summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--CREDITS8
-rw-r--r--Documentation/ABI/testing/sysfs-devices-system-ibm-rtl4
-rw-r--r--Documentation/devicetree/bindings/display/bridge/dumb-vga-dac.txt2
-rw-r--r--Documentation/devicetree/bindings/display/bridge/dw_hdmi.txt4
-rw-r--r--Documentation/devicetree/bindings/display/bridge/ti,tfp410.txt (renamed from Documentation/devicetree/bindings/display/ti/ti,tfp410.txt)9
-rw-r--r--Documentation/devicetree/bindings/display/panel/display-timing.txt8
-rw-r--r--Documentation/devicetree/bindings/display/renesas,du.txt12
-rw-r--r--Documentation/devicetree/bindings/display/sunxi/sun4i-drm.txt10
-rw-r--r--Documentation/devicetree/bindings/display/tilcdc/tilcdc.txt6
-rw-r--r--Documentation/devicetree/bindings/display/zte,vou.txt84
-rw-r--r--Documentation/devicetree/bindings/ipmi/aspeed,ast2400-ibt-bmc.txt (renamed from Documentation/devicetree/bindings/ipmi/aspeed,ast2400-bt-bmc.txt)4
-rw-r--r--Documentation/devicetree/bindings/mmc/synopsys-dw-mshc.txt5
-rw-r--r--Documentation/devicetree/bindings/net/ethernet.txt24
-rw-r--r--Documentation/devicetree/bindings/net/marvell-orion-net.txt1
-rw-r--r--Documentation/devicetree/bindings/pci/rockchip-pcie.txt11
-rw-r--r--Documentation/devicetree/bindings/pinctrl/st,stm32-pinctrl.txt10
-rw-r--r--Documentation/devicetree/bindings/sound/omap-abe-twl6040.txt2
-rw-r--r--Documentation/devicetree/bindings/video/bridge/sil-sii8620.txt33
-rw-r--r--Documentation/driver-api/infrastructure.rst8
-rw-r--r--Documentation/filesystems/Locking1
-rw-r--r--Documentation/filesystems/vfs.txt1
-rw-r--r--Documentation/gpu/drm-internals.rst20
-rw-r--r--Documentation/gpu/drm-kms-helpers.rst11
-rw-r--r--Documentation/gpu/drm-kms.rst95
-rw-r--r--Documentation/gpu/drm-uapi.rst6
-rw-r--r--Documentation/gpu/i915.rst17
-rw-r--r--Documentation/i2c/i2c-topology4
-rw-r--r--Documentation/networking/dsa/dsa.txt3
-rw-r--r--Documentation/networking/netdev-FAQ.txt8
-rw-r--r--Documentation/networking/nf_conntrack-sysctl.txt25
-rw-r--r--Documentation/sync_file.txt14
-rw-r--r--Documentation/virtual/kvm/api.txt11
-rw-r--r--Documentation/virtual/kvm/locking.txt12
-rw-r--r--MAINTAINERS131
-rw-r--r--Makefile28
-rw-r--r--arch/arc/Makefile7
-rw-r--r--arch/arc/boot/dts/axc001.dtsi2
-rw-r--r--arch/arc/boot/dts/nsim_700.dts2
-rw-r--r--arch/arc/boot/dts/nsimosci.dts4
-rw-r--r--arch/arc/configs/nsim_700_defconfig1
-rw-r--r--arch/arc/configs/nsim_hs_defconfig1
-rw-r--r--arch/arc/configs/nsim_hs_smp_defconfig1
-rw-r--r--arch/arc/configs/nsimosci_defconfig1
-rw-r--r--arch/arc/configs/nsimosci_hs_defconfig1
-rw-r--r--arch/arc/configs/nsimosci_hs_smp_defconfig3
-rw-r--r--arch/arc/include/asm/arcregs.h2
-rw-r--r--arch/arc/include/asm/delay.h9
-rw-r--r--arch/arc/include/asm/pgtable.h2
-rw-r--r--arch/arc/include/asm/smp.h4
-rw-r--r--arch/arc/kernel/devtree.c2
-rw-r--r--arch/arc/kernel/mcip.c32
-rw-r--r--arch/arc/kernel/process.c20
-rw-r--r--arch/arc/kernel/smp.c23
-rw-r--r--arch/arc/kernel/time.c19
-rw-r--r--arch/arc/mm/cache.c2
-rw-r--r--arch/arc/mm/dma.c26
-rw-r--r--arch/arc/plat-eznps/smp.c6
-rw-r--r--arch/arm/boot/dts/Makefile2
-rw-r--r--arch/arm/boot/dts/imx53-qsb.dts14
-rw-r--r--arch/arm/boot/dts/logicpd-som-lv.dtsi5
-rw-r--r--arch/arm/boot/dts/logicpd-torpedo-som.dtsi4
-rw-r--r--arch/arm/boot/dts/omap5-board-common.dtsi7
-rw-r--r--arch/arm/boot/dts/stih407-family.dtsi16
-rw-r--r--arch/arm/boot/dts/stih410-b2260.dts2
-rw-r--r--arch/arm/boot/dts/sun5i-gr8-evb.dts (renamed from arch/arm/boot/dts/ntc-gr8-evb.dts)2
-rw-r--r--arch/arm/boot/dts/sun5i-gr8.dtsi (renamed from arch/arm/boot/dts/ntc-gr8.dtsi)0
-rw-r--r--arch/arm/boot/dts/sun8i-a23-a33.dtsi4
-rw-r--r--arch/arm/include/asm/Kbuild1
-rw-r--r--arch/arm/include/asm/kvm_asm.h1
-rw-r--r--arch/arm/include/asm/kvm_host.h3
-rw-r--r--arch/arm/include/asm/kvm_hyp.h1
-rw-r--r--arch/arm/include/asm/unistd.h2
-rw-r--r--arch/arm/include/uapi/asm/unistd.h3
-rw-r--r--arch/arm/kernel/Makefile2
-rw-r--r--arch/arm/kernel/armksyms.c183
-rw-r--r--arch/arm/kernel/calls.S3
-rw-r--r--arch/arm/kernel/entry-ftrace.S3
-rw-r--r--arch/arm/kernel/head.S3
-rw-r--r--arch/arm/kernel/smccc-call.S3
-rw-r--r--arch/arm/kernel/traps.c20
-rw-r--r--arch/arm/kernel/vmlinux-xip.lds.S5
-rw-r--r--arch/arm/kvm/arm.c27
-rw-r--r--arch/arm/kvm/hyp/tlb.c15
-rw-r--r--arch/arm/lib/ashldi3.S3
-rw-r--r--arch/arm/lib/ashrdi3.S3
-rw-r--r--arch/arm/lib/backtrace.S37
-rw-r--r--arch/arm/lib/bitops.h5
-rw-r--r--arch/arm/lib/bswapsdi2.S3
-rw-r--r--arch/arm/lib/clear_user.S4
-rw-r--r--arch/arm/lib/copy_from_user.S2
-rw-r--r--arch/arm/lib/copy_page.S2
-rw-r--r--arch/arm/lib/copy_to_user.S4
-rw-r--r--arch/arm/lib/csumipv6.S3
-rw-r--r--arch/arm/lib/csumpartial.S2
-rw-r--r--arch/arm/lib/csumpartialcopy.S1
-rw-r--r--arch/arm/lib/csumpartialcopygeneric.S2
-rw-r--r--arch/arm/lib/csumpartialcopyuser.S1
-rw-r--r--arch/arm/lib/delay.c2
-rw-r--r--arch/arm/lib/div64.S2
-rw-r--r--arch/arm/lib/findbit.S9
-rw-r--r--arch/arm/lib/getuser.S9
-rw-r--r--arch/arm/lib/io-readsb.S2
-rw-r--r--arch/arm/lib/io-readsl.S2
-rw-r--r--arch/arm/lib/io-readsw-armv3.S3
-rw-r--r--arch/arm/lib/io-readsw-armv4.S2
-rw-r--r--arch/arm/lib/io-writesb.S2
-rw-r--r--arch/arm/lib/io-writesl.S2
-rw-r--r--arch/arm/lib/io-writesw-armv3.S2
-rw-r--r--arch/arm/lib/io-writesw-armv4.S2
-rw-r--r--arch/arm/lib/lib1funcs.S9
-rw-r--r--arch/arm/lib/lshrdi3.S3
-rw-r--r--arch/arm/lib/memchr.S2
-rw-r--r--arch/arm/lib/memcpy.S3
-rw-r--r--arch/arm/lib/memmove.S2
-rw-r--r--arch/arm/lib/memset.S3
-rw-r--r--arch/arm/lib/memzero.S2
-rw-r--r--arch/arm/lib/muldi3.S3
-rw-r--r--arch/arm/lib/putuser.S5
-rw-r--r--arch/arm/lib/strchr.S2
-rw-r--r--arch/arm/lib/strrchr.S2
-rw-r--r--arch/arm/lib/uaccess_with_memcpy.c3
-rw-r--r--arch/arm/lib/ucmpdi2.S3
-rw-r--r--arch/arm/mach-imx/Makefile1
-rw-r--r--arch/arm/mach-imx/ssi-fiq-ksym.c20
-rw-r--r--arch/arm/mach-imx/ssi-fiq.S7
-rw-r--r--arch/arm/mach-omap2/Kconfig1
-rw-r--r--arch/arm/mach-omap2/id.c16
-rw-r--r--arch/arm/mach-omap2/prm3xxx.c3
-rw-r--r--arch/arm/mach-omap2/voltage.c6
-rw-r--r--arch/arm/mm/abort-lv4t.S34
-rw-r--r--arch/arm/mm/dma-mapping.c2
-rw-r--r--arch/arm/mm/proc-v7m.S2
-rw-r--r--arch/arm64/boot/dts/arm/juno-base.dtsi2
-rw-r--r--arch/arm64/boot/dts/arm/juno-r1.dts2
-rw-r--r--arch/arm64/boot/dts/arm/juno-r2.dts2
-rw-r--r--arch/arm64/boot/dts/arm/juno.dts2
-rw-r--r--arch/arm64/boot/dts/marvell/armada-37xx.dtsi4
-rw-r--r--arch/arm64/boot/dts/marvell/armada-cp110-slave.dtsi6
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3399.dtsi7
-rw-r--r--arch/arm64/include/asm/alternative.h2
-rw-r--r--arch/arm64/include/asm/cpucaps.h40
-rw-r--r--arch/arm64/include/asm/cpufeature.h20
-rw-r--r--arch/arm64/include/asm/kvm_asm.h1
-rw-r--r--arch/arm64/include/asm/kvm_host.h3
-rw-r--r--arch/arm64/include/asm/kvm_mmu.h2
-rw-r--r--arch/arm64/include/asm/lse.h1
-rw-r--r--arch/arm64/include/asm/perf_event.h10
-rw-r--r--arch/arm64/kernel/perf_event.c10
-rw-r--r--arch/arm64/kvm/hyp/tlb.c15
-rw-r--r--arch/arm64/kvm/sys_regs.c10
-rw-r--r--arch/mips/Makefile2
-rw-r--r--arch/mips/boot/dts/mti/malta.dts3
-rw-r--r--arch/mips/generic/init.c16
-rw-r--r--arch/mips/include/asm/fpu_emulator.h13
-rw-r--r--arch/mips/include/asm/kvm_host.h7
-rw-r--r--arch/mips/include/asm/mipsregs.h6
-rw-r--r--arch/mips/include/asm/switch_to.h18
-rw-r--r--arch/mips/include/asm/tlb.h13
-rw-r--r--arch/mips/kernel/mips-cpc.c11
-rw-r--r--arch/mips/kernel/mips-r2-to-r6-emul.c10
-rw-r--r--arch/mips/kernel/ptrace.c8
-rw-r--r--arch/mips/kernel/r2300_fpu.S138
-rw-r--r--arch/mips/kernel/r6000_fpu.S89
-rw-r--r--arch/mips/kernel/relocate.c2
-rw-r--r--arch/mips/kernel/setup.c13
-rw-r--r--arch/mips/kernel/traps.c137
-rw-r--r--arch/mips/kvm/emulate.c32
-rw-r--r--arch/mips/kvm/mips.c5
-rw-r--r--arch/mips/kvm/mmu.c4
-rw-r--r--arch/mips/lib/dump_tlb.c44
-rw-r--r--arch/mips/lib/r3k_dump_tlb.c18
-rw-r--r--arch/mips/mm/fault.c9
-rw-r--r--arch/mips/mm/init.c4
-rw-r--r--arch/mips/mm/tlb-r4k.c6
-rw-r--r--arch/nios2/kernel/time.c1
-rw-r--r--arch/openrisc/include/asm/cache.h2
-rw-r--r--arch/parisc/Kconfig4
-rw-r--r--arch/parisc/include/uapi/asm/unistd.h4
-rw-r--r--arch/parisc/kernel/cache.c31
-rw-r--r--arch/parisc/kernel/drivers.c6
-rw-r--r--arch/parisc/kernel/inventory.c8
-rw-r--r--arch/parisc/kernel/pacache.S37
-rw-r--r--arch/parisc/kernel/pci-dma.c2
-rw-r--r--arch/parisc/kernel/setup.c4
-rw-r--r--arch/parisc/kernel/syscall.S66
-rw-r--r--arch/parisc/kernel/time.c57
-rw-r--r--arch/powerpc/boot/main.c8
-rw-r--r--arch/powerpc/boot/opal-calls.S13
-rw-r--r--arch/powerpc/boot/opal.c11
-rw-r--r--arch/powerpc/boot/ops.h1
-rw-r--r--arch/powerpc/include/asm/asm-prototypes.h12
-rw-r--r--arch/powerpc/include/asm/checksum.h12
-rw-r--r--arch/powerpc/include/asm/exception-64s.h15
-rw-r--r--arch/powerpc/include/asm/mmu.h14
-rw-r--r--arch/powerpc/include/asm/ppc-opcode.h1
-rw-r--r--arch/powerpc/include/asm/reg.h1
-rw-r--r--arch/powerpc/kernel/cpu_setup_power.S8
-rw-r--r--arch/powerpc/kernel/exceptions-64s.S11
-rw-r--r--arch/powerpc/kernel/process.c42
-rw-r--r--arch/powerpc/kernel/setup_64.c20
-rw-r--r--arch/powerpc/mm/hash_utils_64.c12
-rw-r--r--arch/powerpc/mm/pgtable-radix.c4
-rw-r--r--arch/powerpc/mm/tlb-radix.c4
-rw-r--r--arch/s390/hypfs/hypfs_diag.c6
-rw-r--r--arch/s390/kernel/vmlinux.lds.S2
-rw-r--r--arch/s390/kvm/sthyi.c4
-rw-r--r--arch/s390/pci/pci_dma.c2
-rw-r--r--arch/sparc/Kconfig23
-rw-r--r--arch/sparc/include/asm/cpudata_64.h5
-rw-r--r--arch/sparc/include/asm/hypervisor.h343
-rw-r--r--arch/sparc/include/asm/iommu_64.h28
-rw-r--r--arch/sparc/include/asm/spinlock_32.h2
-rw-r--r--arch/sparc/include/asm/spinlock_64.h12
-rw-r--r--arch/sparc/include/asm/topology_64.h8
-rw-r--r--arch/sparc/include/asm/uaccess_64.h28
-rw-r--r--arch/sparc/kernel/head_64.S37
-rw-r--r--arch/sparc/kernel/hvapi.c1
-rw-r--r--arch/sparc/kernel/iommu.c8
-rw-r--r--arch/sparc/kernel/iommu_common.h1
-rw-r--r--arch/sparc/kernel/jump_label.c23
-rw-r--r--arch/sparc/kernel/mdesc.c46
-rw-r--r--arch/sparc/kernel/pci_sun4v.c418
-rw-r--r--arch/sparc/kernel/pci_sun4v.h21
-rw-r--r--arch/sparc/kernel/pci_sun4v_asm.S68
-rw-r--r--arch/sparc/kernel/signal_32.c4
-rw-r--r--arch/sparc/kernel/smp_64.c8
-rw-r--r--arch/sparc/lib/GENcopy_from_user.S4
-rw-r--r--arch/sparc/lib/GENcopy_to_user.S4
-rw-r--r--arch/sparc/lib/GENmemcpy.S48
-rw-r--r--arch/sparc/lib/Makefile2
-rw-r--r--arch/sparc/lib/NG2copy_from_user.S8
-rw-r--r--arch/sparc/lib/NG2copy_to_user.S8
-rw-r--r--arch/sparc/lib/NG2memcpy.S228
-rw-r--r--arch/sparc/lib/NG4copy_from_user.S8
-rw-r--r--arch/sparc/lib/NG4copy_to_user.S8
-rw-r--r--arch/sparc/lib/NG4memcpy.S294
-rw-r--r--arch/sparc/lib/NGcopy_from_user.S4
-rw-r--r--arch/sparc/lib/NGcopy_to_user.S4
-rw-r--r--arch/sparc/lib/NGmemcpy.S233
-rw-r--r--arch/sparc/lib/U1copy_from_user.S8
-rw-r--r--arch/sparc/lib/U1copy_to_user.S8
-rw-r--r--arch/sparc/lib/U1memcpy.S345
-rw-r--r--arch/sparc/lib/U3copy_from_user.S8
-rw-r--r--arch/sparc/lib/U3copy_to_user.S8
-rw-r--r--arch/sparc/lib/U3memcpy.S227
-rw-r--r--arch/sparc/lib/copy_in_user.S35
-rw-r--r--arch/sparc/lib/user_fixup.c71
-rw-r--r--arch/sparc/mm/init_64.c71
-rw-r--r--arch/sparc/mm/tsb.c17
-rw-r--r--arch/sparc/mm/ultra.S374
-rw-r--r--arch/tile/include/asm/cache.h3
-rw-r--r--arch/tile/kernel/time.c4
-rw-r--r--arch/x86/boot/compressed/Makefile5
-rw-r--r--arch/x86/boot/cpu.c6
-rw-r--r--arch/x86/crypto/aesni-intel_glue.c4
-rw-r--r--arch/x86/events/amd/core.c8
-rw-r--r--arch/x86/events/core.c10
-rw-r--r--arch/x86/events/intel/ds.c35
-rw-r--r--arch/x86/events/intel/uncore.c8
-rw-r--r--arch/x86/events/intel/uncore_snb.c44
-rw-r--r--arch/x86/events/perf_event.h2
-rw-r--r--arch/x86/include/asm/intel-mid.h1
-rw-r--r--arch/x86/include/asm/kvm_host.h3
-rw-r--r--arch/x86/include/asm/kvm_page_track.h14
-rw-r--r--arch/x86/kernel/apm_32.c5
-rw-r--r--arch/x86/kernel/cpu/amd.c6
-rw-r--r--arch/x86/kernel/cpu/common.c32
-rw-r--r--arch/x86/kernel/dumpstack.c2
-rw-r--r--arch/x86/kernel/fpu/core.c16
-rw-r--r--arch/x86/kernel/head_32.S9
-rw-r--r--arch/x86/kernel/sysfb_simplefb.c39
-rw-r--r--arch/x86/kernel/unwind_guess.c8
-rw-r--r--arch/x86/kvm/emulate.c38
-rw-r--r--arch/x86/kvm/ioapic.c2
-rw-r--r--arch/x86/kvm/ioapic.h4
-rw-r--r--arch/x86/kvm/irq_comm.c71
-rw-r--r--arch/x86/kvm/lapic.c2
-rw-r--r--arch/x86/kvm/mmu.c11
-rw-r--r--arch/x86/kvm/page_track.c31
-rw-r--r--arch/x86/kvm/svm.c23
-rw-r--r--arch/x86/kvm/vmx.c65
-rw-r--r--arch/x86/kvm/x86.c65
-rw-r--r--arch/x86/mm/extable.c7
-rw-r--r--arch/x86/platform/efi/efi.c2
-rw-r--r--arch/x86/platform/efi/efi_64.c80
-rw-r--r--arch/x86/platform/intel-mid/device_libs/Makefile2
-rw-r--r--arch/x86/platform/intel-mid/device_libs/platform_mrfld_wdt.c (renamed from arch/x86/platform/intel-mid/device_libs/platform_wdt.c)34
-rw-r--r--arch/x86/platform/intel-mid/pwr.c19
-rw-r--r--arch/x86/purgatory/Makefile1
-rw-r--r--arch/xtensa/include/uapi/asm/unistd.h9
-rw-r--r--arch/xtensa/kernel/time.c14
-rw-r--r--arch/xtensa/kernel/traps.c74
-rw-r--r--crypto/algif_hash.c17
-rw-r--r--crypto/asymmetric_keys/x509_cert_parser.c1
-rw-r--r--crypto/scatterwalk.c4
-rw-r--r--drivers/acpi/acpi_apd.c10
-rw-r--r--drivers/acpi/acpi_lpss.c10
-rw-r--r--drivers/acpi/acpi_platform.c5
-rw-r--r--drivers/acpi/acpica/tbfadt.c10
-rw-r--r--drivers/acpi/dptf/int340x_thermal.c4
-rw-r--r--drivers/acpi/scan.c2
-rw-r--r--drivers/acpi/sleep.c29
-rw-r--r--drivers/ata/ahci.c7
-rw-r--r--drivers/ata/libata-scsi.c2
-rw-r--r--drivers/base/Kconfig6
-rw-r--r--drivers/base/dd.c5
-rw-r--r--drivers/base/power/main.c8
-rw-r--r--drivers/block/aoe/aoecmd.c41
-rw-r--r--drivers/block/drbd/drbd_main.c2
-rw-r--r--drivers/block/nbd.c2
-rw-r--r--drivers/block/virtio_blk.c10
-rw-r--r--drivers/block/zram/zram_drv.c3
-rw-r--r--drivers/bluetooth/btwilink.c2
-rw-r--r--drivers/bluetooth/hci_bcm.c8
-rw-r--r--drivers/char/ipmi/bt-bmc.c4
-rw-r--r--drivers/char/ppdev.c3
-rw-r--r--drivers/char/tpm/tpm-interface.c3
-rw-r--r--drivers/char/virtio_console.c22
-rw-r--r--drivers/clk/bcm/Kconfig2
-rw-r--r--drivers/clk/berlin/bg2.c2
-rw-r--r--drivers/clk/berlin/bg2q.c2
-rw-r--r--drivers/clk/clk-efm32gg.c2
-rw-r--r--drivers/clk/clk-qoriq.c13
-rw-r--r--drivers/clk/clk-xgene.c10
-rw-r--r--drivers/clk/imx/clk-pllv3.c8
-rw-r--r--drivers/clk/mmp/clk-of-mmp2.c2
-rw-r--r--drivers/clk/mmp/clk-of-pxa168.c2
-rw-r--r--drivers/clk/mmp/clk-of-pxa910.c4
-rw-r--r--drivers/clk/rockchip/clk-ddr.c5
-rw-r--r--drivers/clk/samsung/clk-exynos-clkout.c22
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun6i-a31.c14
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun8i-a33.c2
-rw-r--r--drivers/clk/sunxi/clk-sunxi.c2
-rw-r--r--drivers/crypto/caam/caamalg.c11
-rw-r--r--drivers/dax/dax.c4
-rw-r--r--drivers/dax/pmem.c4
-rw-r--r--drivers/dma-buf/Kconfig2
-rw-r--r--drivers/dma-buf/Makefile2
-rw-r--r--drivers/dma-buf/dma-buf.c28
-rw-r--r--drivers/dma-buf/dma-fence-array.c (renamed from drivers/dma-buf/fence-array.c)91
-rw-r--r--drivers/dma-buf/dma-fence.c (renamed from drivers/dma-buf/fence.c)221
-rw-r--r--drivers/dma-buf/reservation.c197
-rw-r--r--drivers/dma-buf/seqno-fence.c18
-rw-r--r--drivers/dma-buf/sw_sync.c50
-rw-r--r--drivers/dma-buf/sync_debug.c13
-rw-r--r--drivers/dma-buf/sync_debug.h9
-rw-r--r--drivers/dma-buf/sync_file.c66
-rw-r--r--drivers/dma/Kconfig1
-rw-r--r--drivers/dma/cppi41.c31
-rw-r--r--drivers/dma/edma.c1
-rw-r--r--drivers/dma/sun6i-dma.c2
-rw-r--r--drivers/firewire/net.c59
-rw-r--r--drivers/gpio/Kconfig4
-rw-r--r--drivers/gpio/Makefile2
-rw-r--r--drivers/gpio/gpio-mvebu.c92
-rw-r--r--drivers/gpio/gpio-pca953x.c4
-rw-r--r--drivers/gpio/gpio-tc3589x.c2
-rw-r--r--drivers/gpio/gpiolib-of.c14
-rw-r--r--drivers/gpio/gpiolib.c64
-rw-r--r--drivers/gpu/drm/Kconfig17
-rw-r--r--drivers/gpu/drm/Makefile7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/Makefile2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/ObjectID.h7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu.h887
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c22
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_acp.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c95
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.h12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c16
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c64
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c95
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c267
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c40
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_device.c506
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_display.c28
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c16
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h443
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c37
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c25
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c59
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c50
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_job.c22
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c71
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_object.c85
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_object.h11
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c82
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c11
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.h8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c20
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c21
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h185
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c25
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c48
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_sync.h56
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_test.c12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c197
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c41
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c37
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c134
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c358
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h205
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c222
-rw-r--r--drivers/gpu/drm/amd/amdgpu/atombios_crtc.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/ci_dpm.c126
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cik.c834
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cik.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cik_ih.c11
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cik_ih.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cik_sdma.c55
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cik_sdma.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cikd.h8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cz_dpm.c28
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cz_ih.c10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cz_ih.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v10_0.c148
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v10_0.h4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v11_0.c148
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v11_0.h3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v6_0.c330
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v6_0.h5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v8_0.c306
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v8_0.h6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_virtual.c436
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_virtual.h3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c83
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v6_0.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c152
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v7_0.h5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c222
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v8_0.h3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v6_0.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c20
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v7_0.h3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c30
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v8_0.h4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/iceland_ih.c10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/iceland_ih.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/kv_dpm.c27
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c55
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v2_4.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c64
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v3_0.h3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/si.c162
-rw-r--r--drivers/gpu/drm/amd/amdgpu/si.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/si_dma.c51
-rw-r--r--drivers/gpu/drm/amd/amdgpu/si_dma.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/si_dpm.c126
-rw-r--r--drivers/gpu/drm/amd/amdgpu/si_ih.c10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/si_ih.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/tonga_ih.c10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/tonga_ih.h4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c63
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v4_2.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c56
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v5_0.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c90
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v6_0.h4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vce_v2_0.c22
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vce_v2_0.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vce_v3_0.c80
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vce_v3_0.h4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vi.c992
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vi.h2
-rw-r--r--drivers/gpu/drm/amd/include/amd_shared.h23
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_1_d.h2
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_2_d.h2
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_3_d.h2
-rwxr-xr-xdrivers/gpu/drm/amd/include/cgs_common.h6
-rw-r--r--drivers/gpu/drm/amd/powerplay/amd_powerplay.c21
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c12
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c2
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c8
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/pp_acpi.c1
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c3
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.c19
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.c6
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.c22
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c156
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/smu7_thermal.c6
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/amd_powerplay.h1
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/hwmgr.h16
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/power_state.h9
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/pp_acpi.h9
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/fiji_smc.c6
-rwxr-xr-xdrivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c12
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/iceland_smc.c4
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smc.c9
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c5
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c7
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.h2
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/tonga_smc.c4
-rw-r--r--drivers/gpu/drm/amd/scheduler/gpu_sched_trace.h4
-rw-r--r--drivers/gpu/drm/amd/scheduler/gpu_scheduler.c79
-rw-r--r--drivers/gpu/drm/amd/scheduler/gpu_scheduler.h32
-rw-r--r--drivers/gpu/drm/amd/scheduler/sched_fence.c71
-rw-r--r--drivers/gpu/drm/arc/arcpgu_drv.c2
-rw-r--r--drivers/gpu/drm/arc/arcpgu_hdmi.c159
-rw-r--r--drivers/gpu/drm/arm/hdlcd_crtc.c10
-rw-r--r--drivers/gpu/drm/arm/hdlcd_drv.c26
-rw-r--r--drivers/gpu/drm/arm/malidp_drv.c43
-rw-r--r--drivers/gpu/drm/arm/malidp_drv.h3
-rw-r--r--drivers/gpu/drm/arm/malidp_hw.c12
-rw-r--r--drivers/gpu/drm/arm/malidp_hw.h9
-rw-r--r--drivers/gpu/drm/arm/malidp_planes.c99
-rw-r--r--drivers/gpu/drm/armada/Makefile2
-rw-r--r--drivers/gpu/drm/armada/armada_crtc.c121
-rw-r--r--drivers/gpu/drm/armada/armada_crtc.h10
-rw-r--r--drivers/gpu/drm/armada/armada_debugfs.c2
-rw-r--r--drivers/gpu/drm/armada/armada_drm.h1
-rw-r--r--drivers/gpu/drm/armada/armada_drv.c238
-rw-r--r--drivers/gpu/drm/armada/armada_fbdev.c8
-rw-r--r--drivers/gpu/drm/armada/armada_gem.c10
-rw-r--r--drivers/gpu/drm/armada/armada_overlay.c65
-rw-r--r--drivers/gpu/drm/armada/armada_trace.c4
-rw-r--r--drivers/gpu/drm/armada/armada_trace.h66
-rw-r--r--drivers/gpu/drm/ast/ast_drv.c2
-rw-r--r--drivers/gpu/drm/ast/ast_fb.c2
-rw-r--r--drivers/gpu/drm/ast/ast_ttm.c1
-rw-r--r--drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c5
-rw-r--r--drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c41
-rw-r--r--drivers/gpu/drm/bochs/bochs_drv.c2
-rw-r--r--drivers/gpu/drm/bochs/bochs_fbdev.c6
-rw-r--r--drivers/gpu/drm/bochs/bochs_mm.c1
-rw-r--r--drivers/gpu/drm/bridge/Kconfig23
-rw-r--r--drivers/gpu/drm/bridge/Makefile3
-rw-r--r--drivers/gpu/drm/bridge/adv7511/Kconfig8
-rw-r--r--drivers/gpu/drm/bridge/adv7511/Makefile1
-rw-r--r--drivers/gpu/drm/bridge/adv7511/adv7511.h16
-rw-r--r--drivers/gpu/drm/bridge/adv7511/adv7511_audio.c213
-rw-r--r--drivers/gpu/drm/bridge/adv7511/adv7511_drv.c4
-rw-r--r--drivers/gpu/drm/bridge/adv7511/adv7533.c1
-rw-r--r--drivers/gpu/drm/bridge/analogix/analogix_dp_reg.c2
-rw-r--r--drivers/gpu/drm/bridge/dumb-vga-dac.c33
-rw-r--r--drivers/gpu/drm/bridge/dw-hdmi-audio.h7
-rw-r--r--drivers/gpu/drm/bridge/dw-hdmi-i2s-audio.c141
-rw-r--r--drivers/gpu/drm/bridge/dw-hdmi.c301
-rw-r--r--drivers/gpu/drm/bridge/dw-hdmi.h39
-rw-r--r--drivers/gpu/drm/bridge/sil-sii8620.c1564
-rw-r--r--drivers/gpu/drm/bridge/sil-sii8620.h1517
-rw-r--r--drivers/gpu/drm/bridge/ti-tfp410.c317
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_drv.c2
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_fbdev.c6
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_main.c4
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_ttm.c1
-rw-r--r--drivers/gpu/drm/drm_atomic.c556
-rw-r--r--drivers/gpu/drm/drm_atomic_helper.c202
-rw-r--r--drivers/gpu/drm/drm_blend.c39
-rw-r--r--drivers/gpu/drm/drm_color_mgmt.c12
-rw-r--r--drivers/gpu/drm/drm_connector.c148
-rw-r--r--drivers/gpu/drm/drm_crtc.c760
-rw-r--r--drivers/gpu/drm/drm_crtc_internal.h36
-rw-r--r--drivers/gpu/drm/drm_debugfs.c52
-rw-r--r--drivers/gpu/drm/drm_debugfs_crc.c352
-rw-r--r--drivers/gpu/drm/drm_dp_dual_mode_helper.c121
-rw-r--r--drivers/gpu/drm/drm_dp_mst_topology.c6
-rw-r--r--drivers/gpu/drm/drm_drv.c68
-rw-r--r--drivers/gpu/drm/drm_dumb_buffers.c128
-rw-r--r--drivers/gpu/drm/drm_edid.c207
-rw-r--r--drivers/gpu/drm/drm_fb_cma_helper.c67
-rw-r--r--drivers/gpu/drm/drm_fb_helper.c114
-rw-r--r--drivers/gpu/drm/drm_fops.c21
-rw-r--r--drivers/gpu/drm/drm_fourcc.c293
-rw-r--r--drivers/gpu/drm/drm_framebuffer.c123
-rw-r--r--drivers/gpu/drm/drm_internal.h28
-rw-r--r--drivers/gpu/drm/drm_ioctl.c10
-rw-r--r--drivers/gpu/drm/drm_irq.c164
-rw-r--r--drivers/gpu/drm/drm_lock.c3
-rw-r--r--drivers/gpu/drm/drm_mm.c98
-rw-r--r--drivers/gpu/drm/drm_mode_config.c494
-rw-r--r--drivers/gpu/drm/drm_modes.c16
-rw-r--r--drivers/gpu/drm/drm_modeset_helper.c25
-rw-r--r--drivers/gpu/drm/drm_modeset_lock.c13
-rw-r--r--drivers/gpu/drm/drm_of.c28
-rw-r--r--drivers/gpu/drm/drm_plane.c8
-rw-r--r--drivers/gpu/drm/drm_plane_helper.c11
-rw-r--r--drivers/gpu/drm/drm_print.c59
-rw-r--r--drivers/gpu/drm/drm_property.c54
-rw-r--r--drivers/gpu/drm/drm_rect.c11
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_drv.c7
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gem.c28
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gpu.c46
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gpu.h4
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_drv.c25
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_drv.h1
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fb.c2
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fbdev.c6
-rw-r--r--drivers/gpu/drm/exynos/exynos_hdmi.c5
-rw-r--r--drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_crtc.c13
-rw-r--r--drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c6
-rw-r--r--drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_plane.c5
-rw-r--r--drivers/gpu/drm/gma500/framebuffer.c35
-rw-r--r--drivers/gpu/drm/gma500/gtt.c4
-rw-r--r--drivers/gpu/drm/gma500/psb_drv.c1
-rw-r--r--drivers/gpu/drm/gma500/psb_drv.h4
-rw-r--r--drivers/gpu/drm/hisilicon/Kconfig1
-rw-r--r--drivers/gpu/drm/hisilicon/Makefile1
-rw-r--r--drivers/gpu/drm/hisilicon/hibmc/Kconfig9
-rw-r--r--drivers/gpu/drm/hisilicon/hibmc/Makefile4
-rw-r--r--drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_de.c477
-rw-r--r--drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c456
-rw-r--r--drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.h114
-rw-r--r--drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_fbdev.c267
-rw-r--r--drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_regs.h196
-rw-r--r--drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_vdac.c147
-rw-r--r--drivers/gpu/drm/hisilicon/hibmc/hibmc_ttm.c558
-rw-r--r--drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c7
-rw-r--r--drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.c9
-rw-r--r--drivers/gpu/drm/i2c/tda998x_drv.c863
-rw-r--r--drivers/gpu/drm/i810/i810_dma.c2
-rw-r--r--drivers/gpu/drm/i810/i810_drv.c2
-rw-r--r--drivers/gpu/drm/i915/Kconfig64
-rw-r--r--drivers/gpu/drm/i915/Kconfig.debug1
-rw-r--r--drivers/gpu/drm/i915/Makefile11
-rw-r--r--drivers/gpu/drm/i915/gvt/Makefile11
-rw-r--r--drivers/gpu/drm/i915/gvt/aperture_gm.c352
-rw-r--r--drivers/gpu/drm/i915/gvt/cfg_space.c284
-rw-r--r--drivers/gpu/drm/i915/gvt/cmd_parser.c2831
-rw-r--r--drivers/gpu/drm/i915/gvt/cmd_parser.h49
-rw-r--r--drivers/gpu/drm/i915/gvt/debug.h29
-rw-r--r--drivers/gpu/drm/i915/gvt/display.c330
-rw-r--r--drivers/gpu/drm/i915/gvt/display.h163
-rw-r--r--drivers/gpu/drm/i915/gvt/edid.c531
-rw-r--r--drivers/gpu/drm/i915/gvt/edid.h150
-rw-r--r--drivers/gpu/drm/i915/gvt/execlist.c858
-rw-r--r--drivers/gpu/drm/i915/gvt/execlist.h188
-rw-r--r--drivers/gpu/drm/i915/gvt/firmware.c312
-rw-r--r--drivers/gpu/drm/i915/gvt/gtt.c2244
-rw-r--r--drivers/gpu/drm/i915/gvt/gtt.h306
-rw-r--r--drivers/gpu/drm/i915/gvt/gvt.c205
-rw-r--r--drivers/gpu/drm/i915/gvt/gvt.h380
-rw-r--r--drivers/gpu/drm/i915/gvt/handlers.c2848
-rw-r--r--drivers/gpu/drm/i915/gvt/hypercall.h26
-rw-r--r--drivers/gpu/drm/i915/gvt/interrupt.c741
-rw-r--r--drivers/gpu/drm/i915/gvt/interrupt.h233
-rw-r--r--drivers/gpu/drm/i915/gvt/kvmgt.c597
-rw-r--r--drivers/gpu/drm/i915/gvt/mmio.c304
-rw-r--r--drivers/gpu/drm/i915/gvt/mmio.h106
-rw-r--r--drivers/gpu/drm/i915/gvt/mpt.h259
-rw-r--r--drivers/gpu/drm/i915/gvt/opregion.c320
-rw-r--r--drivers/gpu/drm/i915/gvt/reg.h80
-rw-r--r--drivers/gpu/drm/i915/gvt/render.c310
-rw-r--r--drivers/gpu/drm/i915/gvt/render.h43
-rw-r--r--drivers/gpu/drm/i915/gvt/sched_policy.c292
-rw-r--r--drivers/gpu/drm/i915/gvt/sched_policy.h58
-rw-r--r--drivers/gpu/drm/i915/gvt/scheduler.c581
-rw-r--r--drivers/gpu/drm/i915/gvt/scheduler.h139
-rw-r--r--drivers/gpu/drm/i915/gvt/trace.h286
-rw-r--r--drivers/gpu/drm/i915/gvt/trace_points.c (renamed from drivers/gpu/drm/i915/i915_gem_dmabuf.h)37
-rw-r--r--drivers/gpu/drm/i915/gvt/vgpu.c408
-rw-r--r--drivers/gpu/drm/i915/i915_cmd_parser.c5
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c655
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c252
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h1152
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c3045
-rw-r--r--drivers/gpu/drm/i915/i915_gem.h4
-rw-r--r--drivers/gpu/drm/i915/i915_gem_batch_pool.c39
-rw-r--r--drivers/gpu/drm/i915/i915_gem_context.c109
-rw-r--r--drivers/gpu/drm/i915/i915_gem_dmabuf.c150
-rw-r--r--drivers/gpu/drm/i915/i915_gem_evict.c21
-rw-r--r--drivers/gpu/drm/i915/i915_gem_execbuffer.c176
-rw-r--r--drivers/gpu/drm/i915/i915_gem_fence_reg.c (renamed from drivers/gpu/drm/i915/i915_gem_fence.c)61
-rw-r--r--drivers/gpu/drm/i915/i915_gem_fence_reg.h51
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.c1089
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.h250
-rw-r--r--drivers/gpu/drm/i915/i915_gem_internal.c170
-rw-r--r--drivers/gpu/drm/i915/i915_gem_object.h338
-rw-r--r--drivers/gpu/drm/i915/i915_gem_render_state.c186
-rw-r--r--drivers/gpu/drm/i915/i915_gem_render_state.h4
-rw-r--r--drivers/gpu/drm/i915/i915_gem_request.c758
-rw-r--r--drivers/gpu/drm/i915/i915_gem_request.h212
-rw-r--r--drivers/gpu/drm/i915/i915_gem_shrinker.c104
-rw-r--r--drivers/gpu/drm/i915/i915_gem_stolen.c107
-rw-r--r--drivers/gpu/drm/i915/i915_gem_tiling.c64
-rw-r--r--drivers/gpu/drm/i915/i915_gem_timeline.c90
-rw-r--r--drivers/gpu/drm/i915/i915_gem_timeline.h73
-rw-r--r--drivers/gpu/drm/i915/i915_gem_userptr.c124
-rw-r--r--drivers/gpu/drm/i915/i915_gpu_error.c727
-rw-r--r--drivers/gpu/drm/i915/i915_guc_submission.c675
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c838
-rw-r--r--drivers/gpu/drm/i915/i915_params.c18
-rw-r--r--drivers/gpu/drm/i915/i915_params.h3
-rw-r--r--drivers/gpu/drm/i915/i915_pci.c20
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h285
-rw-r--r--drivers/gpu/drm/i915/i915_suspend.c32
-rw-r--r--drivers/gpu/drm/i915/i915_sw_fence.c88
-rw-r--r--drivers/gpu/drm/i915/i915_sw_fence.h33
-rw-r--r--drivers/gpu/drm/i915/i915_sysfs.c25
-rw-r--r--drivers/gpu/drm/i915/i915_trace.h10
-rw-r--r--drivers/gpu/drm/i915/i915_vma.c638
-rw-r--r--drivers/gpu/drm/i915/i915_vma.h341
-rw-r--r--drivers/gpu/drm/i915/intel_atomic_plane.c26
-rw-r--r--drivers/gpu/drm/i915/intel_audio.c409
-rw-r--r--drivers/gpu/drm/i915/intel_bios.c205
-rw-r--r--drivers/gpu/drm/i915/intel_breadcrumbs.c76
-rw-r--r--drivers/gpu/drm/i915/intel_color.c52
-rw-r--r--drivers/gpu/drm/i915/intel_crt.c141
-rw-r--r--drivers/gpu/drm/i915/intel_csr.c11
-rw-r--r--drivers/gpu/drm/i915/intel_ddi.c534
-rw-r--r--drivers/gpu/drm/i915/intel_device_info.c23
-rw-r--r--drivers/gpu/drm/i915/intel_display.c2052
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c611
-rw-r--r--drivers/gpu/drm/i915/intel_dp_link_training.c3
-rw-r--r--drivers/gpu/drm/i915/intel_dp_mst.c2
-rw-r--r--drivers/gpu/drm/i915/intel_dpio_phy.c559
-rw-r--r--drivers/gpu/drm/i915/intel_dpll_mgr.c99
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h199
-rw-r--r--drivers/gpu/drm/i915/intel_dsi.c38
-rw-r--r--drivers/gpu/drm/i915/intel_dsi_panel_vbt.c56
-rw-r--r--drivers/gpu/drm/i915/intel_dsi_pll.c26
-rw-r--r--drivers/gpu/drm/i915/intel_dvo.c22
-rw-r--r--drivers/gpu/drm/i915/intel_engine_cs.c203
-rw-r--r--drivers/gpu/drm/i915/intel_fbc.c152
-rw-r--r--drivers/gpu/drm/i915/intel_fbdev.c17
-rw-r--r--drivers/gpu/drm/i915/intel_fifo_underrun.c33
-rw-r--r--drivers/gpu/drm/i915/intel_frontbuffer.h5
-rw-r--r--drivers/gpu/drm/i915/intel_guc.h32
-rw-r--r--drivers/gpu/drm/i915/intel_guc_fwif.h82
-rw-r--r--drivers/gpu/drm/i915/intel_guc_loader.c56
-rw-r--r--drivers/gpu/drm/i915/intel_gvt.c10
-rw-r--r--drivers/gpu/drm/i915/intel_gvt.h2
-rw-r--r--drivers/gpu/drm/i915/intel_hangcheck.c450
-rw-r--r--drivers/gpu/drm/i915/intel_hdmi.c195
-rw-r--r--drivers/gpu/drm/i915/intel_hotplug.c2
-rw-r--r--drivers/gpu/drm/i915/intel_i2c.c14
-rw-r--r--drivers/gpu/drm/i915/intel_lrc.c420
-rw-r--r--drivers/gpu/drm/i915/intel_lrc.h1
-rw-r--r--drivers/gpu/drm/i915/intel_lspcon.c185
-rw-r--r--drivers/gpu/drm/i915/intel_lvds.c45
-rw-r--r--drivers/gpu/drm/i915/intel_opregion.c139
-rw-r--r--drivers/gpu/drm/i915/intel_overlay.c10
-rw-r--r--drivers/gpu/drm/i915/intel_panel.c12
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c1422
-rw-r--r--drivers/gpu/drm/i915/intel_psr.c28
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c371
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.h147
-rw-r--r--drivers/gpu/drm/i915/intel_runtime_pm.c173
-rw-r--r--drivers/gpu/drm/i915/intel_sdvo.c45
-rw-r--r--drivers/gpu/drm/i915/intel_sprite.c209
-rw-r--r--drivers/gpu/drm/i915/intel_tv.c63
-rw-r--r--drivers/gpu/drm/i915/intel_uncore.c695
-rw-r--r--drivers/gpu/drm/i915/intel_vbt_defs.h3
-rw-r--r--drivers/gpu/drm/imx/imx-drm-core.c36
-rw-r--r--drivers/gpu/drm/imx/imx-ldb.c12
-rw-r--r--drivers/gpu/drm/imx/ipuv3-crtc.c9
-rw-r--r--drivers/gpu/drm/imx/ipuv3-plane.c187
-rw-r--r--drivers/gpu/drm/mediatek/mtk_disp_ovl.c15
-rw-r--r--drivers/gpu/drm/mediatek/mtk_dpi.c9
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c2
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_drv.c11
-rw-r--r--drivers/gpu/drm/mediatek/mtk_dsi.c64
-rw-r--r--drivers/gpu/drm/mediatek/mtk_hdmi.c17
-rw-r--r--drivers/gpu/drm/mediatek/mtk_mt8173_hdmi_phy.c42
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_drv.c2
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_ttm.c1
-rw-r--r--drivers/gpu/drm/msm/Makefile4
-rw-r--r--drivers/gpu/drm/msm/adreno/a2xx.xml.h27
-rw-r--r--drivers/gpu/drm/msm/adreno/a3xx.xml.h38
-rw-r--r--drivers/gpu/drm/msm/adreno/a3xx_gpu.c112
-rw-r--r--drivers/gpu/drm/msm/adreno/a4xx.xml.h111
-rw-r--r--drivers/gpu/drm/msm/adreno/a4xx_gpu.c119
-rw-r--r--drivers/gpu/drm/msm/adreno/a5xx.xml.h3757
-rw-r--r--drivers/gpu/drm/msm/adreno/a5xx_gpu.c888
-rw-r--r--drivers/gpu/drm/msm/adreno/a5xx_gpu.h60
-rw-r--r--drivers/gpu/drm/msm/adreno/a5xx_power.c344
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_common.xml.h21
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_device.c32
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_gpu.c39
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_gpu.h162
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_pm4.xml.h300
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi.xml.h2
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi_host.c18
-rw-r--r--drivers/gpu/drm/msm/dsi/mmss_cc.xml.h2
-rw-r--r--drivers/gpu/drm/msm/dsi/pll/dsi_pll_28nm.c1
-rw-r--r--drivers/gpu/drm/msm/dsi/pll/dsi_pll_28nm_8960.c1
-rw-r--r--drivers/gpu/drm/msm/dsi/sfpb.xml.h2
-rw-r--r--drivers/gpu/drm/msm/edp/edp.xml.h2
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi.xml.h2
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi_phy_8996.c1
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi_pll_8960.c1
-rw-r--r--drivers/gpu/drm/msm/hdmi/qfprom.xml.h2
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp4/mdp4.xml.h2
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c4
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp4/mdp4_irq.c10
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c38
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.h4
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c2
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5.xml.h14
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.c8
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c84
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_irq.c13
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c267
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h53
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_pipe.c133
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_pipe.h56
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c300
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.c306
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.h70
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp_common.xml.h2
-rw-r--r--drivers/gpu/drm/msm/msm_atomic.c37
-rw-r--r--drivers/gpu/drm/msm/msm_debugfs.c17
-rw-r--r--drivers/gpu/drm/msm/msm_drv.c49
-rw-r--r--drivers/gpu/drm/msm/msm_drv.h42
-rw-r--r--drivers/gpu/drm/msm/msm_fb.c4
-rw-r--r--drivers/gpu/drm/msm/msm_fbdev.c9
-rw-r--r--drivers/gpu/drm/msm/msm_fence.c28
-rw-r--r--drivers/gpu/drm/msm/msm_fence.h2
-rw-r--r--drivers/gpu/drm/msm/msm_gem.c60
-rw-r--r--drivers/gpu/drm/msm/msm_gem.h25
-rw-r--r--drivers/gpu/drm/msm/msm_gem_shrinker.c7
-rw-r--r--drivers/gpu/drm/msm/msm_gem_submit.c17
-rw-r--r--drivers/gpu/drm/msm/msm_gem_vma.c90
-rw-r--r--drivers/gpu/drm/msm/msm_gpu.c68
-rw-r--r--drivers/gpu/drm/msm/msm_gpu.h45
-rw-r--r--drivers/gpu/drm/msm/msm_iommu.c12
-rw-r--r--drivers/gpu/drm/msm/msm_kms.h19
-rw-r--r--drivers/gpu/drm/msm/msm_mmu.h4
-rw-r--r--drivers/gpu/drm/msm/msm_rd.c4
-rw-r--r--drivers/gpu/drm/nouveau/Kbuild1
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/crtc.c4
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/overlay.c2
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/cl5070.h17
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/class.h4
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/object.h29
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/engine/ce.h2
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/engine/disp.h2
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/boost.h12
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/cstep.h12
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/fan.h2
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/gpio.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/iccsense.h10
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/perf.h6
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/timing.h6
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/vmap.h13
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/volt.h13
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/vpstate.h24
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/clk.h18
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/fb.h5
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/pmu.h2
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/volt.h18
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_acpi.c3
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_backlight.c1
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bios.c3
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bios.h6
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.c7
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_connector.c650
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_connector.h98
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_crtc.h7
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_display.c349
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_display.h15
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_dp.c19
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drm.c21
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drv.h13
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_encoder.h14
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fbcon.c97
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fbcon.h2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fence.c82
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fence.h8
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_gem.c25
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_led.c139
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_led.h57
-rw-r--r--drivers/gpu/drm/nouveau/nv04_fbcon.c8
-rw-r--r--drivers/gpu/drm/nouveau/nv04_fence.c2
-rw-r--r--drivers/gpu/drm/nouveau/nv10_fence.c7
-rw-r--r--drivers/gpu/drm/nouveau/nv10_fence.h1
-rw-r--r--drivers/gpu/drm/nouveau/nv17_fence.c2
-rw-r--r--drivers/gpu/drm/nouveau/nv50_display.c4185
-rw-r--r--drivers/gpu/drm/nouveau/nv50_display.h7
-rw-r--r--drivers/gpu/drm/nouveau/nv50_fbcon.c10
-rw-r--r--drivers/gpu/drm/nouveau/nv50_fence.c22
-rw-r--r--drivers/gpu/drm/nouveau/nv84_fence.c24
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_fbcon.c10
-rw-r--r--drivers/gpu/drm/nouveau/nvif/client.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvif/notify.c6
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/core/firmware.c1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/ce/Kbuild2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/ce/fuc/gf100.fuc3.h4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/ce/fuc/gt215.fuc3.h4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/ce/gp102.c (renamed from drivers/gpu/drm/nouveau/nvkm/engine/ce/gp104.c)6
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/device/base.c69
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/device/pci.c37
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/device/user.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/Kbuild14
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/basegp102.c (renamed from drivers/gpu/drm/nouveau/nvkm/engine/disp/basegp104.c)4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/channv50.c38
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/channv50.h23
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/coreg94.c6
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/coregp102.c (renamed from drivers/gpu/drm/nouveau/nvkm/engine/disp/coregp104.c)14
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/cursg84.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/cursgf119.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/cursgk104.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/cursgp102.c37
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/cursgt215.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/cursnv50.c6
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacgf119.c44
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacgp102.c (renamed from drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacgp104.c)29
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacnv50.c46
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacnv50.h8
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/dport.c9
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/dport.h4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/gf119.c25
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/gp102.c (renamed from drivers/gpu/drm/nouveau/nvkm/engine/disp/gp104.c)12
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmg84.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmgf119.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmgk104.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmgp102.c37
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmgt215.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmnv50.c6
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/outpdp.c33
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/outpdp.h9
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/ovlygp102.c (renamed from drivers/gpu/drm/nouveau/nvkm/engine/disp/ovlygp104.c)4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/piocgf119.c28
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/piocnv50.c30
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgp102.c (renamed from drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgp104.c)22
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/rootnv50.c51
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/rootnv50.h2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/sorg94.c8
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgf119.c16
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgm107.c1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgm200.c1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/chang84.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogf100.c9
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogk104.c8
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf117.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm107.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxnv50.c1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgf100.fuc3.h4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgf117.fuc3.h4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgk104.fuc3.h4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgk110.fuc3.h4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgk208.fuc5.h4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgm107.fuc5.h4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgf100.fuc3.h4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgf117.fuc3.h4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgk104.fuc3.h4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgk110.fuc3.h4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgk208.fuc5.h4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgm107.fuc5.h4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c54
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gf117.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gm107.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/nv30.c1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/nv34.c1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/nv35.c1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/pm/base.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/pm/gf100.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/sec/fuc/g98.fuc0s.h4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bar/nv50.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bios/Kbuild1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bios/boost.c30
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bios/cstep.c30
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bios/fan.c18
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bios/iccsense.c41
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bios/mxm.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bios/perf.c22
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bios/priv.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadow.c7
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadowacpi.c1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bios/therm.c20
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bios/timing.c20
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bios/vmap.c34
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bios/volt.c39
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bios/vpstate.c82
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/clk/base.c147
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/clk/gf100.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk104.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/clk/gt215.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gm200.c19
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/Kbuild3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/gf100.c25
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/gf100.h4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/gk20a.c20
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/gm200.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/gm20b.c40
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/gp102.c (renamed from drivers/gpu/drm/nouveau/nvkm/subdev/fb/gp104.c)6
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv50.c28
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/ram.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgf100.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgk104.c24
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgm107.c12
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgp100.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgt215.c8
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/sddr2.c1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/sddr3.c1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gpio/gk104.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxg94.c10
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxgm200.c10
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gk20a.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/iccsense/base.c132
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/iccsense/priv.h2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/instmem/base.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mc/g84.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mxm/base.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mxm/mxms.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mxm/nv50.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pmu/Kbuild2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pmu/base.c227
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gf100.fuc3.h4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gf119.fuc4.h4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gk208.fuc5.h4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gt215.fuc3.h4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gf100.c6
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gf119.c6
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk104.c6
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk110.c6
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk208.c6
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gm107.c6
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gp100.c35
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gp102.c43
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gt215.c216
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pmu/priv.h16
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gm200.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/volt/Kbuild1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/volt/base.c141
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/volt/gf100.c70
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/volt/gk104.c25
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/volt/gm20b.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/volt/gpio.c1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/volt/priv.h4
-rw-r--r--drivers/gpu/drm/omapdrm/displays/connector-analog-tv.c47
-rw-r--r--drivers/gpu/drm/omapdrm/displays/connector-dvi.c50
-rw-r--r--drivers/gpu/drm/omapdrm/displays/connector-hdmi.c49
-rw-r--r--drivers/gpu/drm/omapdrm/displays/encoder-opa362.c20
-rw-r--r--drivers/gpu/drm/omapdrm/displays/encoder-tfp410.c31
-rw-r--r--drivers/gpu/drm/omapdrm/displays/encoder-tpd12s015.c20
-rw-r--r--drivers/gpu/drm/omapdrm/displays/panel-dpi.c30
-rw-r--r--drivers/gpu/drm/omapdrm/displays/panel-dsi-cm.c25
-rw-r--r--drivers/gpu/drm/omapdrm/displays/panel-lgphilips-lb035q02.c59
-rw-r--r--drivers/gpu/drm/omapdrm/displays/panel-nec-nl8048hl11.c52
-rw-r--r--drivers/gpu/drm/omapdrm/displays/panel-sharp-ls037v7dw01.c58
-rw-r--r--drivers/gpu/drm/omapdrm/displays/panel-sony-acx565akm.c53
-rw-r--r--drivers/gpu/drm/omapdrm/displays/panel-tpo-td028ttec1.c57
-rw-r--r--drivers/gpu/drm/omapdrm/displays/panel-tpo-td043mtea1.c54
-rw-r--r--drivers/gpu/drm/omapdrm/dss/dispc.c228
-rw-r--r--drivers/gpu/drm/omapdrm/dss/display.c78
-rw-r--r--drivers/gpu/drm/omapdrm/dss/dpi.c40
-rw-r--r--drivers/gpu/drm/omapdrm/dss/dsi.c160
-rw-r--r--drivers/gpu/drm/omapdrm/dss/dss.h5
-rw-r--r--drivers/gpu/drm/omapdrm/dss/hdmi.h8
-rw-r--r--drivers/gpu/drm/omapdrm/dss/hdmi4.c31
-rw-r--r--drivers/gpu/drm/omapdrm/dss/hdmi4_core.c8
-rw-r--r--drivers/gpu/drm/omapdrm/dss/hdmi5.c31
-rw-r--r--drivers/gpu/drm/omapdrm/dss/hdmi5_core.c85
-rw-r--r--drivers/gpu/drm/omapdrm/dss/hdmi_wp.c73
-rw-r--r--drivers/gpu/drm/omapdrm/dss/omapdss.h98
-rw-r--r--drivers/gpu/drm/omapdrm/dss/output.c5
-rw-r--r--drivers/gpu/drm/omapdrm/dss/rfbi.c49
-rw-r--r--drivers/gpu/drm/omapdrm/dss/sdi.c33
-rw-r--r--drivers/gpu/drm/omapdrm/dss/venc.c97
-rw-r--r--drivers/gpu/drm/omapdrm/omap_connector.c87
-rw-r--r--drivers/gpu/drm/omapdrm/omap_crtc.c43
-rw-r--r--drivers/gpu/drm/omapdrm/omap_drv.c53
-rw-r--r--drivers/gpu/drm/omapdrm/omap_drv.h7
-rw-r--r--drivers/gpu/drm/omapdrm/omap_encoder.c10
-rw-r--r--drivers/gpu/drm/omapdrm/omap_fbdev.c5
-rw-r--r--drivers/gpu/drm/omapdrm/omap_gem.c6
-rw-r--r--drivers/gpu/drm/omapdrm/omap_plane.c50
-rw-r--r--drivers/gpu/drm/qxl/qxl_cmd.c2
-rw-r--r--drivers/gpu/drm/qxl/qxl_display.c69
-rw-r--r--drivers/gpu/drm/qxl/qxl_drv.h12
-rw-r--r--drivers/gpu/drm/qxl/qxl_fb.c10
-rw-r--r--drivers/gpu/drm/qxl/qxl_gem.c3
-rw-r--r--drivers/gpu/drm/qxl/qxl_kms.c3
-rw-r--r--drivers/gpu/drm/qxl/qxl_release.c35
-rw-r--r--drivers/gpu/drm/qxl/qxl_ttm.c1
-rw-r--r--drivers/gpu/drm/radeon/atombios_crtc.c14
-rw-r--r--drivers/gpu/drm/radeon/atombios_encoders.c1
-rw-r--r--drivers/gpu/drm/radeon/evergreen_cs.c1
-rw-r--r--drivers/gpu/drm/radeon/ni.c4
-rw-r--r--drivers/gpu/drm/radeon/r600_cs.c1
-rw-r--r--drivers/gpu/drm/radeon/radeon.h10
-rw-r--r--drivers/gpu/drm/radeon/radeon_atombios.c1
-rw-r--r--drivers/gpu/drm/radeon/radeon_atpx_handler.c16
-rw-r--r--drivers/gpu/drm/radeon/radeon_clocks.c1
-rw-r--r--drivers/gpu/drm/radeon/radeon_connectors.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c20
-rw-r--r--drivers/gpu/drm/radeon/radeon_display.c8
-rw-r--r--drivers/gpu/drm/radeon/radeon_dp_auxch.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_dp_mst.c8
-rw-r--r--drivers/gpu/drm/radeon/radeon_fb.c28
-rw-r--r--drivers/gpu/drm/radeon/radeon_fence.c56
-rw-r--r--drivers/gpu/drm/radeon/radeon_gem.c3
-rw-r--r--drivers/gpu/drm/radeon/radeon_legacy_encoders.c1
-rw-r--r--drivers/gpu/drm/radeon/radeon_pm.c6
-rw-r--r--drivers/gpu/drm/radeon/radeon_sync.c6
-rw-r--r--drivers/gpu/drm/radeon/radeon_ttm.c1
-rw-r--r--drivers/gpu/drm/radeon/radeon_uvd.c2
-rw-r--r--drivers/gpu/drm/radeon/si.c2
-rw-r--r--drivers/gpu/drm/radeon/si_dpm.c53
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_crtc.c6
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_drv.c85
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_group.c22
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_hdmienc.c4
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_kms.c32
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_lvdscon.c2
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_lvdscon.h2
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_lvdsenc.c19
-rw-r--r--drivers/gpu/drm/rockchip/Kconfig1
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_drv.c8
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_fbdev.c6
-rw-r--r--drivers/gpu/drm/savage/savage_drv.c2
-rw-r--r--drivers/gpu/drm/savage/savage_state.c1
-rw-r--r--drivers/gpu/drm/shmobile/shmob_drm_drv.c2
-rw-r--r--drivers/gpu/drm/sis/sis_drv.c2
-rw-r--r--drivers/gpu/drm/sti/sti_drv.c32
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_backend.c23
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_drv.c14
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_layer.c6
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_rgb.c20
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_tcon.c45
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_tcon.h11
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_tv.c2
-rw-r--r--drivers/gpu/drm/sun4i/sun6i_drc.c4
-rw-r--r--drivers/gpu/drm/tdfx/tdfx_drv.c2
-rw-r--r--drivers/gpu/drm/tegra/Kconfig1
-rw-r--r--drivers/gpu/drm/tegra/drm.c5
-rw-r--r--drivers/gpu/drm/tegra/fb.c6
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_crtc.c598
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_drv.c214
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_drv.h11
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_external.c260
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_external.h5
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_panel.c2
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_plane.c7
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_regs.h15
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_tfp410.c2
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c68
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_util.c22
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_vm.c8
-rw-r--r--drivers/gpu/drm/ttm/ttm_execbuf_util.c3
-rw-r--r--drivers/gpu/drm/udl/udl_drv.c2
-rw-r--r--drivers/gpu/drm/udl/udl_fb.c8
-rw-r--r--drivers/gpu/drm/udl/udl_main.c16
-rw-r--r--drivers/gpu/drm/vc4/vc4_drv.c13
-rw-r--r--drivers/gpu/drm/vc4/vc4_drv.h2
-rw-r--r--drivers/gpu/drm/vc4/vc4_gem.c15
-rw-r--r--drivers/gpu/drm/vc4/vc4_kms.c3
-rw-r--r--drivers/gpu/drm/vc4/vc4_v3d.c2
-rw-r--r--drivers/gpu/drm/vc4/vc4_validate.c24
-rw-r--r--drivers/gpu/drm/vc4/vc4_validate_shaders.c82
-rw-r--r--drivers/gpu/drm/vgem/vgem_fence.c53
-rw-r--r--drivers/gpu/drm/via/via_drv.c2
-rw-r--r--drivers/gpu/drm/virtio/Kconfig6
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_display.c3
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_drm_bus.c23
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_drv.c3
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_drv.h3
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_fb.c8
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_fence.c26
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_ioctl.c12
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_kms.c2
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_plane.c2
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_ttm.c1
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_vq.c2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c1
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fence.c44
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fence.h8
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.c13
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_resource.c4
-rw-r--r--drivers/gpu/drm/zte/Kconfig8
-rw-r--r--drivers/gpu/drm/zte/Makefile7
-rw-r--r--drivers/gpu/drm/zte/zx_drm_drv.c267
-rw-r--r--drivers/gpu/drm/zte/zx_drm_drv.h36
-rw-r--r--drivers/gpu/drm/zte/zx_hdmi.c624
-rw-r--r--drivers/gpu/drm/zte/zx_hdmi_regs.h56
-rw-r--r--drivers/gpu/drm/zte/zx_plane.c299
-rw-r--r--drivers/gpu/drm/zte/zx_plane.h26
-rw-r--r--drivers/gpu/drm/zte/zx_plane_regs.h91
-rw-r--r--drivers/gpu/drm/zte/zx_vou.c661
-rw-r--r--drivers/gpu/drm/zte/zx_vou.h46
-rw-r--r--drivers/gpu/drm/zte/zx_vou_regs.h157
-rw-r--r--drivers/gpu/ipu-v3/Kconfig1
-rw-r--r--drivers/gpu/ipu-v3/ipu-common.c7
-rw-r--r--drivers/gpu/ipu-v3/ipu-cpmem.c43
-rw-r--r--drivers/gpu/ipu-v3/ipu-csi.c16
-rw-r--r--drivers/gpu/ipu-v3/ipu-di.c2
-rw-r--r--drivers/gpu/ipu-v3/ipu-image-convert.c2
-rw-r--r--drivers/gpu/vga/vgaarb.c80
-rw-r--r--drivers/hid/hid-cp2112.c115
-rw-r--r--drivers/hid/hid-ids.h1
-rw-r--r--drivers/hid/hid-lg.c14
-rw-r--r--drivers/hid/hid-magicmouse.c12
-rw-r--r--drivers/hid/hid-rmi.c10
-rw-r--r--drivers/hid/hid-sensor-custom.c6
-rw-r--r--drivers/hid/hid-sensor-hub.c16
-rw-r--r--drivers/hid/intel-ish-hid/ipc/ipc.c102
-rw-r--r--drivers/hid/intel-ish-hid/ipc/pci-ish.c6
-rw-r--r--drivers/hid/usbhid/hid-quirks.c1
-rw-r--r--drivers/hv/vmbus_drv.c2
-rw-r--r--drivers/hwmon/hwmon.c6
-rw-r--r--drivers/i2c/Kconfig1
-rw-r--r--drivers/i2c/busses/i2c-designware-core.c64
-rw-r--r--drivers/i2c/busses/i2c-digicolor.c2
-rw-r--r--drivers/i2c/busses/i2c-octeon-core.c4
-rw-r--r--drivers/i2c/busses/i2c-octeon-core.h27
-rw-r--r--drivers/i2c/i2c-core.c2
-rw-r--r--drivers/i2c/muxes/Kconfig1
-rw-r--r--drivers/i2c/muxes/i2c-demux-pinctrl.c22
-rw-r--r--drivers/i2c/muxes/i2c-mux-pca954x.c4
-rw-r--r--drivers/iio/accel/st_accel_core.c12
-rw-r--r--drivers/iio/common/hid-sensors/hid-sensor-attributes.c56
-rw-r--r--drivers/iio/common/st_sensors/st_sensors_core.c8
-rw-r--r--drivers/iio/orientation/hid-sensor-rotation.c1
-rw-r--r--drivers/iio/temperature/maxim_thermocouple.c2
-rw-r--r--drivers/infiniband/core/addr.c11
-rw-r--r--drivers/infiniband/core/cm.c126
-rw-r--r--drivers/infiniband/core/cma.c75
-rw-r--r--drivers/infiniband/core/umem.c2
-rw-r--r--drivers/infiniband/core/uverbs_main.c7
-rw-r--r--drivers/infiniband/hw/cxgb4/cq.c17
-rw-r--r--drivers/infiniband/hw/cxgb4/iw_cxgb4.h2
-rw-r--r--drivers/infiniband/hw/cxgb4/mem.c12
-rw-r--r--drivers/infiniband/hw/cxgb4/qp.c20
-rw-r--r--drivers/infiniband/hw/hfi1/affinity.c72
-rw-r--r--drivers/infiniband/hw/hfi1/affinity.h4
-rw-r--r--drivers/infiniband/hw/hfi1/chip.c27
-rw-r--r--drivers/infiniband/hw/hfi1/chip.h3
-rw-r--r--drivers/infiniband/hw/hfi1/driver.c37
-rw-r--r--drivers/infiniband/hw/hfi1/file_ops.c19
-rw-r--r--drivers/infiniband/hw/hfi1/hfi.h89
-rw-r--r--drivers/infiniband/hw/hfi1/init.c104
-rw-r--r--drivers/infiniband/hw/hfi1/pcie.c3
-rw-r--r--drivers/infiniband/hw/hfi1/pio.c13
-rw-r--r--drivers/infiniband/hw/hfi1/rc.c2
-rw-r--r--drivers/infiniband/hw/hfi1/sdma.c19
-rw-r--r--drivers/infiniband/hw/hfi1/sysfs.c25
-rw-r--r--drivers/infiniband/hw/hfi1/trace_rx.h60
-rw-r--r--drivers/infiniband/hw/hfi1/user_sdma.c2
-rw-r--r--drivers/infiniband/hw/mlx4/ah.c5
-rw-r--r--drivers/infiniband/hw/mlx4/cq.c5
-rw-r--r--drivers/infiniband/hw/mlx5/cq.c3
-rw-r--r--drivers/infiniband/hw/mlx5/main.c13
-rw-r--r--drivers/infiniband/hw/mlx5/mlx5_ib.h2
-rw-r--r--drivers/infiniband/hw/mlx5/mr.c6
-rw-r--r--drivers/infiniband/hw/mlx5/qp.c13
-rw-r--r--drivers/infiniband/hw/qedr/Kconfig1
-rw-r--r--drivers/infiniband/sw/rdmavt/dma.c3
-rw-r--r--drivers/infiniband/sw/rxe/rxe_net.c8
-rw-r--r--drivers/infiniband/sw/rxe/rxe_qp.c2
-rw-r--r--drivers/infiniband/sw/rxe/rxe_queue.c9
-rw-r--r--drivers/infiniband/sw/rxe/rxe_queue.h2
-rw-r--r--drivers/infiniband/sw/rxe/rxe_req.c21
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib.h20
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_cm.c15
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_ib.c12
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_main.c54
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_multicast.c6
-rw-r--r--drivers/input/mouse/focaltech.c6
-rw-r--r--drivers/input/mouse/psmouse-base.c4
-rw-r--r--drivers/input/serio/i8042-x86ia64io.h7
-rw-r--r--drivers/iommu/arm-smmu-v3.c25
-rw-r--r--drivers/iommu/arm-smmu.c16
-rw-r--r--drivers/iommu/dmar.c4
-rw-r--r--drivers/iommu/intel-iommu.c27
-rw-r--r--drivers/iommu/intel-svm.c28
-rw-r--r--drivers/mailbox/pcc.c13
-rw-r--r--drivers/md/md.c10
-rw-r--r--drivers/md/raid1.c26
-rw-r--r--drivers/md/raid10.c7
-rw-r--r--drivers/md/raid5-cache.c6
-rw-r--r--drivers/media/dvb-frontends/Kconfig5
-rw-r--r--drivers/media/dvb-frontends/Makefile1
-rw-r--r--drivers/media/dvb-frontends/gp8psk-fe.c (renamed from drivers/media/usb/dvb-usb/gp8psk-fe.c)156
-rw-r--r--drivers/media/dvb-frontends/gp8psk-fe.h82
-rw-r--r--drivers/media/i2c/ir-kbd-i2c.c2
-rw-r--r--drivers/media/tuners/tuner-xc2028.c37
-rw-r--r--drivers/media/usb/b2c2/flexcop-usb.c105
-rw-r--r--drivers/media/usb/b2c2/flexcop-usb.h4
-rw-r--r--drivers/media/usb/cpia2/cpia2_usb.c34
-rw-r--r--drivers/media/usb/dvb-usb/Makefile2
-rw-r--r--drivers/media/usb/dvb-usb/af9005.c304
-rw-r--r--drivers/media/usb/dvb-usb/cinergyT2-core.c77
-rw-r--r--drivers/media/usb/dvb-usb/cinergyT2-fe.c100
-rw-r--r--drivers/media/usb/dvb-usb/cxusb.c27
-rw-r--r--drivers/media/usb/dvb-usb/cxusb.h5
-rw-r--r--drivers/media/usb/dvb-usb/dib0700_core.c36
-rw-r--r--drivers/media/usb/dvb-usb/dib0700_devices.c25
-rw-r--r--drivers/media/usb/dvb-usb/dibusb-common.c113
-rw-r--r--drivers/media/usb/dvb-usb/dibusb.h3
-rw-r--r--drivers/media/usb/dvb-usb/digitv.c26
-rw-r--r--drivers/media/usb/dvb-usb/digitv.h5
-rw-r--r--drivers/media/usb/dvb-usb/dtt200u-fe.c128
-rw-r--r--drivers/media/usb/dvb-usb/dtt200u.c104
-rw-r--r--drivers/media/usb/dvb-usb/dtv5100.c10
-rw-r--r--drivers/media/usb/dvb-usb/dvb-usb-init.c1
-rw-r--r--drivers/media/usb/dvb-usb/dvb-usb.h9
-rw-r--r--drivers/media/usb/dvb-usb/dw2102.c2
-rw-r--r--drivers/media/usb/dvb-usb/gp8psk.c132
-rw-r--r--drivers/media/usb/dvb-usb/gp8psk.h63
-rw-r--r--drivers/media/usb/dvb-usb/nova-t-usb2.c25
-rw-r--r--drivers/media/usb/dvb-usb/pctv452e.c136
-rw-r--r--drivers/media/usb/dvb-usb/technisat-usb2.c16
-rw-r--r--drivers/media/usb/s2255/s2255drv.c15
-rw-r--r--drivers/media/usb/stkwebcam/stk-webcam.c16
-rw-r--r--drivers/mfd/intel-lpss-pci.c31
-rw-r--r--drivers/mfd/intel-lpss.c3
-rw-r--r--drivers/mfd/intel_soc_pmic_bxtwc.c6
-rw-r--r--drivers/mfd/mfd-core.c2
-rw-r--r--drivers/mfd/stmpe.c2
-rw-r--r--drivers/mfd/syscon.c4
-rw-r--r--drivers/mfd/wm8994-core.c16
-rw-r--r--drivers/misc/mei/bus-fixup.c2
-rw-r--r--drivers/mmc/card/mmc_test.c8
-rw-r--r--drivers/mmc/core/mmc.c3
-rw-r--r--drivers/mmc/host/dw_mmc-pltfm.c5
-rw-r--r--drivers/mmc/host/dw_mmc.c3
-rw-r--r--drivers/mmc/host/mxs-mmc.c4
-rw-r--r--drivers/mmc/host/sdhci-msm.c1
-rw-r--r--drivers/mmc/host/sdhci-of-esdhc.c14
-rw-r--r--drivers/mmc/host/sdhci.c36
-rw-r--r--drivers/mmc/host/sdhci.h1
-rw-r--r--drivers/mtd/nand/gpmi-nand/gpmi-lib.c6
-rw-r--r--drivers/mtd/nand/mtk_ecc.c19
-rw-r--r--drivers/mtd/nand/nand_base.c60
-rw-r--r--drivers/net/can/sja1000/plx_pci.c18
-rw-r--r--drivers/net/can/usb/peak_usb/pcan_ucan.h37
-rw-r--r--drivers/net/can/usb/peak_usb/pcan_usb_core.c2
-rw-r--r--drivers/net/can/usb/peak_usb/pcan_usb_core.h2
-rw-r--r--drivers/net/can/usb/peak_usb/pcan_usb_fd.c104
-rw-r--r--drivers/net/dsa/b53/b53_common.c16
-rw-r--r--drivers/net/dsa/b53/b53_mmap.c1
-rw-r--r--drivers/net/dsa/bcm_sf2.c20
-rw-r--r--drivers/net/ethernet/altera/altera_tse_main.c21
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-main.c4
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_hw.c12
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_hw.h2
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_main.c3
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_ring2.c12
-rw-r--r--drivers/net/ethernet/arc/emac_main.c7
-rw-r--r--drivers/net/ethernet/aurora/nb8800.c10
-rw-r--r--drivers/net/ethernet/broadcom/bcm63xx_enet.c3
-rw-r--r--drivers/net/ethernet/broadcom/bcmsysport.c17
-rw-r--r--drivers/net/ethernet/broadcom/bgmac.c11
-rw-r--r--drivers/net/ethernet/broadcom/bnx2.c65
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c30
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c4
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet.c8
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmmii.c10
-rw-r--r--drivers/net/ethernet/brocade/bna/bnad.c4
-rw-r--r--drivers/net/ethernet/cadence/macb.c11
-rw-r--r--drivers/net/ethernet/cavium/thunder/nic.h64
-rw-r--r--drivers/net/ethernet/cavium/thunder/nic_main.c37
-rw-r--r--drivers/net/ethernet/cavium/thunder/nic_reg.h1
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c105
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_main.c153
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_queues.c118
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_queues.h24
-rw-r--r--drivers/net/ethernet/cavium/thunder/thunder_bgx.c4
-rw-r--r--drivers/net/ethernet/cavium/thunder/thunder_bgx.h2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c40
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/sched.c4
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/sge.c1
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h3
-rw-r--r--drivers/net/ethernet/cisco/enic/vnic_rq.c32
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c1
-rw-r--r--drivers/net/ethernet/ezchip/nps_enet.c1
-rw-r--r--drivers/net/ethernet/freescale/fec.h2
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c34
-rw-r--r--drivers/net/ethernet/freescale/fman/fman_memac.c3
-rw-r--r--drivers/net/ethernet/freescale/fman/fman_tgec.c3
-rw-r--r--drivers/net/ethernet/freescale/fman/mac.c2
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c7
-rw-r--r--drivers/net/ethernet/freescale/gianfar.c8
-rw-r--r--drivers/net/ethernet/freescale/ucc_geth.c23
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hnae.c8
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c1
-rw-r--r--drivers/net/ethernet/hisilicon/hns_mdio.c1
-rw-r--r--drivers/net/ethernet/ibm/ehea/ehea_main.c2
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.c56
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.h3
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e.h1
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c34
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c8
-rw-r--r--drivers/net/ethernet/intel/igbvf/netdev.c8
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c20
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c8
-rw-r--r--drivers/net/ethernet/marvell/mv643xx_eth.c25
-rw-r--r--drivers/net/ethernet/marvell/mvneta.c7
-rw-r--r--drivers/net/ethernet/marvell/mvpp2.c2
-rw-r--r--drivers/net/ethernet/marvell/sky2.c13
-rw-r--r--drivers/net/ethernet/mediatek/mtk_eth_soc.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/cmd.c9
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_clock.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_cq.c10
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_netdev.c34
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_port.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_selftest.c26
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/eq.c62
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/fw.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/main.c15
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mcg.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4.h5
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/port.c13
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/resource_tracker.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/alloc.c31
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en.h5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c52
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rep.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tc.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.c50
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.c11
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/health.c76
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/main.c45
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c26
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/pci.c10
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c155
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c14
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/switchx2.c1
-rw-r--r--drivers/net/ethernet/qlogic/Kconfig3
-rw-r--r--drivers/net/ethernet/qlogic/qed/Makefile2
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_cxt.c15
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_dcbx.c2
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_debug.c53
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_dev.c19
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_hsi.h3
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_ll2.c28
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_ll2.h20
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_main.c30
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_roce.c216
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_roce.h95
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_sp.h1
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_spq.c4
-rw-r--r--drivers/net/ethernet/qlogic/qede/Makefile2
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede.h3
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_ethtool.c124
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_main.c17
-rw-r--r--drivers/net/ethernet/qualcomm/emac/emac-mac.c23
-rw-r--r--drivers/net/ethernet/qualcomm/emac/emac-phy.c1
-rw-r--r--drivers/net/ethernet/qualcomm/emac/emac-sgmii.c2
-rw-r--r--drivers/net/ethernet/qualcomm/emac/emac.c5
-rw-r--r--drivers/net/ethernet/realtek/r8169.c3
-rw-r--r--drivers/net/ethernet/renesas/ravb_main.c19
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.c2
-rw-r--r--drivers/net/ethernet/rocker/rocker_main.c2
-rw-r--r--drivers/net/ethernet/rocker/rocker_ofdpa.c4
-rw-r--r--drivers/net/ethernet/sfc/efx.c3
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/Kconfig2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/altr_tse_pcs.c4
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/common.h24
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/descs.h20
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-generic.c17
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c25
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-lpc18xx.c17
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-meson.c23
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c32
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c21
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c39
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c23
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c19
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c26
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c102
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.h4
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/enh_desc.c28
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac.h3
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c19
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c43
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c111
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c33
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_platform.h2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c22
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.h72
-rw-r--r--drivers/net/ethernet/sun/sunbmac.c5
-rw-r--r--drivers/net/ethernet/sun/sunbmac.h2
-rw-r--r--drivers/net/ethernet/sun/sunqe.c11
-rw-r--r--drivers/net/ethernet/sun/sunqe.h4
-rw-r--r--drivers/net/ethernet/synopsys/dwc_eth_qos.c26
-rw-r--r--drivers/net/ethernet/ti/cpsw-phy-sel.c3
-rw-r--r--drivers/net/ethernet/ti/cpsw.c87
-rw-r--r--drivers/net/ethernet/ti/davinci_emac.c20
-rw-r--r--drivers/net/ethernet/toshiba/ps3_gelic_wireless.c2
-rw-r--r--drivers/net/ethernet/xscale/ixp4xx_eth.c3
-rw-r--r--drivers/net/geneve.c61
-rw-r--r--drivers/net/hyperv/netvsc_drv.c25
-rw-r--r--drivers/net/ieee802154/adf7242.c1
-rw-r--r--drivers/net/ipvlan/ipvlan_main.c17
-rw-r--r--drivers/net/irda/w83977af_ir.c4
-rw-r--r--drivers/net/macsec.c26
-rw-r--r--drivers/net/macvlan.c34
-rw-r--r--drivers/net/macvtap.c19
-rw-r--r--drivers/net/phy/at803x.c65
-rw-r--r--drivers/net/phy/dp83848.c3
-rw-r--r--drivers/net/phy/fixed_phy.c2
-rw-r--r--drivers/net/phy/micrel.c8
-rw-r--r--drivers/net/phy/phy_device.c2
-rw-r--r--drivers/net/phy/realtek.c20
-rw-r--r--drivers/net/phy/vitesse.c34
-rw-r--r--drivers/net/tun.c10
-rw-r--r--drivers/net/usb/asix_common.c8
-rw-r--r--drivers/net/usb/asix_devices.c6
-rw-r--r--drivers/net/usb/ax88179_178a.c17
-rw-r--r--drivers/net/usb/cdc_ether.c38
-rw-r--r--drivers/net/usb/kalmia.c2
-rw-r--r--drivers/net/usb/qmi_wwan.c1
-rw-r--r--drivers/net/usb/r8152.c21
-rw-r--r--drivers/net/virtio_net.c35
-rw-r--r--drivers/net/vmxnet3/vmxnet3_drv.c17
-rw-r--r--drivers/net/vrf.c2
-rw-r--r--drivers/net/vxlan.c94
-rw-r--r--drivers/net/wan/Kconfig2
-rw-r--r--drivers/net/wan/slic_ds26522.c8
-rw-r--r--drivers/net/wireless/ath/ath10k/core.h1
-rw-r--r--drivers/net/wireless/ath/ath10k/debug.c75
-rw-r--r--drivers/net/wireless/ath/ath6kl/sdio.c1
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_calib.c25
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.h1
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/d3.c49
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c3
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mvm.h1
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/ops.c1
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c3
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/scan.c33
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/drv.c79
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/tx.c8
-rw-r--r--drivers/net/wireless/mac80211_hwsim.c2
-rw-r--r--drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c2
-rw-r--r--drivers/net/wireless/marvell/mwifiex/cfg80211.c13
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h4
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192e.c8
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8723b.c4
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c11
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/core.c2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8188ee/sw.c8
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ce/sw.c13
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192cu/sw.c12
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192de/sw.c6
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ee/sw.c8
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192se/sw.c9
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723ae/sw.c12
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723be/sw.c6
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8821ae/sw.c18
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/wifi.h2
-rw-r--r--drivers/net/wireless/ti/wlcore/sdio.c1
-rw-r--r--drivers/net/xen-netfront.c4
-rw-r--r--drivers/nfc/mei_phy.c2
-rw-r--r--drivers/ntb/hw/intel/ntb_hw_intel.c15
-rw-r--r--drivers/ntb/ntb_transport.c2
-rw-r--r--drivers/ntb/test/ntb_perf.c8
-rw-r--r--drivers/ntb/test/ntb_pingpong.c2
-rw-r--r--drivers/nvme/host/lightnvm.c2
-rw-r--r--drivers/nvme/host/pci.c18
-rw-r--r--drivers/nvme/host/rdma.c42
-rw-r--r--drivers/nvme/target/core.c10
-rw-r--r--drivers/nvme/target/rdma.c18
-rw-r--r--drivers/of/base.c2
-rw-r--r--drivers/of/of_mdio.c21
-rw-r--r--drivers/pci/host/pcie-designware-plat.c2
-rw-r--r--drivers/pci/host/pcie-designware.c7
-rw-r--r--drivers/pci/host/pcie-qcom.c2
-rw-r--r--drivers/pci/host/pcie-rockchip.c62
-rw-r--r--drivers/pci/pci-mid.c6
-rw-r--r--drivers/pci/pcie/aer/aer_inject.c14
-rw-r--r--drivers/pci/probe.c28
-rw-r--r--drivers/pci/setup-res.c8
-rw-r--r--drivers/pcmcia/soc_common.c2
-rw-r--r--drivers/phy/phy-da8xx-usb.c5
-rw-r--r--drivers/phy/phy-rockchip-pcie.c13
-rw-r--r--drivers/phy/phy-sun4i-usb.c2
-rw-r--r--drivers/phy/phy-twl4030-usb.c4
-rw-r--r--drivers/pinctrl/aspeed/pinctrl-aspeed-g5.c2
-rw-r--r--drivers/pinctrl/bcm/pinctrl-iproc-gpio.c2
-rw-r--r--drivers/pinctrl/bcm/pinctrl-nsp-gpio.c2
-rw-r--r--drivers/pinctrl/freescale/pinctrl-imx.c1
-rw-r--r--drivers/pinctrl/intel/pinctrl-cherryview.c17
-rw-r--r--drivers/pinctrl/pinctrl-st.c2
-rw-r--r--drivers/pinctrl/stm32/pinctrl-stm32.c8
-rw-r--r--drivers/platform/x86/ideapad-laptop.c7
-rw-r--r--drivers/platform/x86/intel-hid.c2
-rw-r--r--drivers/platform/x86/intel-vbtn.c2
-rw-r--r--drivers/platform/x86/toshiba-wmi.c26
-rw-r--r--drivers/pwm/pwm-meson.c1
-rw-r--r--drivers/pwm/sysfs.c2
-rw-r--r--drivers/regulator/core.c2
-rw-r--r--drivers/rtc/rtc-asm9260.c1
-rw-r--r--drivers/rtc/rtc-cmos.c15
-rw-r--r--drivers/rtc/rtc-omap.c38
-rw-r--r--drivers/scsi/arcmsr/arcmsr_hba.c9
-rw-r--r--drivers/scsi/be2iscsi/be_mgmt.c2
-rw-r--r--drivers/scsi/cxgbi/libcxgbi.c3
-rw-r--r--drivers/scsi/device_handler/scsi_dh_alua.c5
-rw-r--r--drivers/scsi/hpsa.c16
-rw-r--r--drivers/scsi/hpsa.h2
-rw-r--r--drivers/scsi/libfc/fc_lport.c2
-rw-r--r--drivers/scsi/megaraid/megaraid_sas.h2
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_base.c13
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_scsih.c24
-rw-r--r--drivers/scsi/mvsas/mv_sas.c4
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c21
-rw-r--r--drivers/scsi/qlogicpti.h4
-rw-r--r--drivers/scsi/scsi_debug.c1
-rw-r--r--drivers/scsi/vmw_pvscsi.c5
-rw-r--r--drivers/scsi/vmw_pvscsi.h2
-rw-r--r--drivers/spi/spi-fsl-dspi.c7
-rw-r--r--drivers/spi/spi-fsl-espi.c2
-rw-r--r--drivers/spi/spi.c5
-rw-r--r--drivers/staging/comedi/drivers/ni_tio.c3
-rw-r--r--drivers/staging/greybus/arche-platform.c1
-rw-r--r--drivers/staging/iio/impedance-analyzer/ad5933.c17
-rw-r--r--drivers/staging/media/bcm2048/radio-bcm2048.c2
-rw-r--r--drivers/staging/nvec/nvec_ps2.c8
-rw-r--r--drivers/staging/sm750fb/ddk750_reg.h8
-rw-r--r--drivers/thermal/intel_powerclamp.c9
-rw-r--r--drivers/usb/chipidea/core.c1
-rw-r--r--drivers/usb/chipidea/udc.c2
-rw-r--r--drivers/usb/class/cdc-acm.c4
-rw-r--r--drivers/usb/dwc3/core.c5
-rw-r--r--drivers/usb/dwc3/dwc3-st.c1
-rw-r--r--drivers/usb/gadget/function/f_fs.c8
-rw-r--r--drivers/usb/gadget/function/u_ether.c8
-rw-r--r--drivers/usb/host/pci-quirks.c8
-rw-r--r--drivers/usb/musb/da8xx.c3
-rw-r--r--drivers/usb/musb/musb_core.c152
-rw-r--r--drivers/usb/musb/musb_core.h13
-rw-r--r--drivers/usb/musb/musb_dsps.c58
-rw-r--r--drivers/usb/musb/musb_gadget.c39
-rw-r--r--drivers/usb/musb/omap2430.c10
-rw-r--r--drivers/usb/musb/tusb6010.c6
-rw-r--r--drivers/usb/serial/cp210x.c1
-rw-r--r--drivers/usb/serial/ftdi_sio.c2
-rw-r--r--drivers/usb/serial/ftdi_sio_ids.h6
-rw-r--r--drivers/usb/storage/transport.c7
-rw-r--r--drivers/uwb/lc-rc.c16
-rw-r--r--drivers/uwb/pal.c2
-rw-r--r--drivers/vfio/pci/vfio_pci.c33
-rw-r--r--drivers/vfio/pci/vfio_pci_intrs.c2
-rw-r--r--drivers/video/fbdev/amba-clcd-versatile.c4
-rw-r--r--drivers/video/hdmi.c4
-rw-r--r--drivers/video/of_display_timing.c15
-rw-r--r--drivers/virtio/config.c12
-rw-r--r--drivers/virtio/virtio_balloon.c2
-rw-r--r--drivers/virtio/virtio_pci_legacy.c16
-rw-r--r--drivers/virtio/virtio_ring.c16
-rw-r--r--drivers/watchdog/Kconfig1
-rw-r--r--fs/afs/cmservice.c6
-rw-r--r--fs/afs/fsclient.c4
-rw-r--r--fs/afs/internal.h2
-rw-r--r--fs/afs/rxrpc.c3
-rw-r--r--fs/aio.c207
-rw-r--r--fs/btrfs/extent-tree.c3
-rw-r--r--fs/btrfs/extent_io.c8
-rw-r--r--fs/btrfs/inode.c13
-rw-r--r--fs/btrfs/ioctl.c5
-rw-r--r--fs/btrfs/relocation.c9
-rw-r--r--fs/ceph/file.c1
-rw-r--r--fs/cifs/cifsencrypt.c11
-rw-r--r--fs/cifs/cifssmb.c4
-rw-r--r--fs/cifs/connect.c25
-rw-r--r--fs/coredump.c3
-rw-r--r--fs/crypto/fname.c53
-rw-r--r--fs/crypto/keyinfo.c16
-rw-r--r--fs/ext4/ext4.h1
-rw-r--r--fs/ext4/super.c17
-rw-r--r--fs/fuse/dir.c5
-rw-r--r--fs/fuse/file.c6
-rw-r--r--fs/fuse/fuse_i.h1
-rw-r--r--fs/fuse/inode.c3
-rw-r--r--fs/isofs/rock.c4
-rw-r--r--fs/nfs/callback.c2
-rw-r--r--fs/nfs/client.c3
-rw-r--r--fs/nfs/namespace.c2
-rw-r--r--fs/nfs/nfs4_fs.h7
-rw-r--r--fs/nfs/nfs4proc.c38
-rw-r--r--fs/nfs/nfs4session.c12
-rw-r--r--fs/nfs/nfs4state.c1
-rw-r--r--fs/nfs/pnfs.c2
-rw-r--r--fs/nfsd/netns.h5
-rw-r--r--fs/nfsd/nfs4state.c38
-rw-r--r--fs/ntfs/dir.c2
-rw-r--r--fs/ocfs2/dir.c2
-rw-r--r--fs/orangefs/orangefs-debugfs.c149
-rw-r--r--fs/orangefs/orangefs-mod.c6
-rw-r--r--fs/overlayfs/copy_up.c2
-rw-r--r--fs/overlayfs/inode.c3
-rw-r--r--fs/overlayfs/super.c21
-rw-r--r--fs/splice.c8
-rw-r--r--fs/xattr.c22
-rw-r--r--fs/xfs/libxfs/xfs_defer.c17
-rw-r--r--include/acpi/actbl.h164
-rw-r--r--include/acpi/platform/aclinux.h3
-rw-r--r--include/asm-generic/export.h1
-rw-r--r--include/asm-generic/percpu.h4
-rw-r--r--include/asm-generic/sections.h3
-rw-r--r--include/asm-generic/vmlinux.lds.h5
-rw-r--r--include/drm/bridge/mhl.h291
-rw-r--r--include/drm/drmP.h335
-rw-r--r--include/drm/drm_atomic.h55
-rw-r--r--include/drm/drm_blend.h10
-rw-r--r--include/drm/drm_connector.h37
-rw-r--r--include/drm/drm_crtc.h734
-rw-r--r--include/drm/drm_debugfs_crc.h73
-rw-r--r--include/drm/drm_dp_dual_mode_helper.h27
-rw-r--r--include/drm/drm_dp_helper.h6
-rw-r--r--include/drm/drm_drv.h435
-rw-r--r--include/drm/drm_edid.h1
-rw-r--r--include/drm/drm_encoder.h2
-rw-r--r--include/drm/drm_fb_cma_helper.h5
-rw-r--r--include/drm/drm_fb_helper.h4
-rw-r--r--include/drm/drm_fourcc.h33
-rw-r--r--include/drm/drm_framebuffer.h22
-rw-r--r--include/drm/drm_irq.h63
-rw-r--r--include/drm/drm_mm.h28
-rw-r--r--include/drm/drm_mode_config.h663
-rw-r--r--include/drm/drm_modeset_helper_vtables.h28
-rw-r--r--include/drm/drm_modeset_lock.h12
-rw-r--r--include/drm/drm_of.h13
-rw-r--r--include/drm/drm_plane.h104
-rw-r--r--include/drm/drm_print.h112
-rw-r--r--include/drm/i915_component.h6
-rw-r--r--include/drm/ttm/ttm_bo_api.h15
-rw-r--r--include/drm/ttm/ttm_bo_driver.h48
-rw-r--r--include/drm/ttm/ttm_execbuf_util.h2
-rw-r--r--include/linux/acpi.h3
-rw-r--r--include/linux/bpf_verifier.h5
-rw-r--r--include/linux/ceph/osd_client.h2
-rw-r--r--include/linux/compiler-gcc.h4
-rw-r--r--include/linux/console.h6
-rw-r--r--include/linux/dma-buf.h4
-rw-r--r--include/linux/dma-fence-array.h86
-rw-r--r--include/linux/dma-fence.h438
-rw-r--r--include/linux/fence-array.h83
-rw-r--r--include/linux/fence.h378
-rw-r--r--include/linux/frontswap.h5
-rw-r--r--include/linux/fs.h2
-rw-r--r--include/linux/hdmi.h2
-rw-r--r--include/linux/huge_mm.h2
-rw-r--r--include/linux/hyperv.h7
-rw-r--r--include/linux/intel-iommu.h1
-rw-r--r--include/linux/ipv6.h17
-rw-r--r--include/linux/mlx4/device.h4
-rw-r--r--include/linux/mlx5/driver.h16
-rw-r--r--include/linux/mtd/nand.h2
-rw-r--r--include/linux/netdevice.h58
-rw-r--r--include/linux/of_mdio.h4
-rw-r--r--include/linux/pagemap.h21
-rw-r--r--include/linux/pci.h14
-rw-r--r--include/linux/phy/phy.h7
-rw-r--r--include/linux/qed/qed_if.h1
-rw-r--r--include/linux/qed/qede_roce.h2
-rw-r--r--include/linux/regmap.h11
-rw-r--r--include/linux/reservation.h41
-rw-r--r--include/linux/sched.h2
-rw-r--r--include/linux/seqno-fence.h20
-rw-r--r--include/linux/skbuff.h1
-rw-r--r--include/linux/sunrpc/svc_xprt.h1
-rw-r--r--include/linux/sync_file.h14
-rw-r--r--include/net/addrconf.h1
-rw-r--r--include/net/bluetooth/hci_core.h2
-rw-r--r--include/net/cfg80211.h32
-rw-r--r--include/net/gro_cells.h3
-rw-r--r--include/net/if_inet6.h2
-rw-r--r--include/net/ip.h13
-rw-r--r--include/net/ip6_fib.h2
-rw-r--r--include/net/ip6_route.h1
-rw-r--r--include/net/ip6_tunnel.h1
-rw-r--r--include/net/ip_fib.h1
-rw-r--r--include/net/ipv6.h2
-rw-r--r--include/net/mac80211.h21
-rw-r--r--include/net/net_namespace.h2
-rw-r--r--include/net/netfilter/nf_conntrack.h6
-rw-r--r--include/net/netfilter/nf_conntrack_labels.h3
-rw-r--r--include/net/netfilter/nf_tables.h10
-rw-r--r--include/net/sctp/sctp.h2
-rw-r--r--include/net/sock.h8
-rw-r--r--include/net/tcp.h14
-rw-r--r--include/net/udp.h1
-rw-r--r--include/net/vxlan.h4
-rw-r--r--include/sound/hda_i915.h11
-rw-r--r--include/trace/events/dma_fence.h (renamed from include/trace/events/fence.h)44
-rw-r--r--include/uapi/drm/amdgpu_drm.h81
-rw-r--r--include/uapi/drm/drm_mode.h41
-rw-r--r--include/uapi/drm/i915_drm.h5
-rw-r--r--include/uapi/drm/msm_drm.h25
-rw-r--r--include/uapi/drm/vc4_drm.h2
-rw-r--r--include/uapi/linux/atm_zatm.h1
-rw-r--r--include/uapi/linux/bpqether.h2
-rw-r--r--include/uapi/linux/ethtool.h3
-rw-r--r--include/uapi/linux/input-event-codes.h2
-rw-r--r--include/uapi/linux/kvm.h7
-rw-r--r--include/uapi/linux/rtnetlink.h2
-rw-r--r--include/uapi/linux/tc_act/Kbuild1
-rw-r--r--include/uapi/sound/asoc.h6
-rw-r--r--include/video/display_timing.h4
-rw-r--r--include/video/imx-ipu-v3.h3
-rw-r--r--include/video/of_display_timing.h15
-rw-r--r--init/do_mounts_rd.c2
-rw-r--r--kernel/bpf/hashtab.c3
-rw-r--r--kernel/bpf/syscall.c4
-rw-r--r--kernel/bpf/verifier.c80
-rw-r--r--kernel/events/core.c13
-rw-r--r--kernel/exit.c1
-rw-r--r--kernel/fork.c4
-rw-r--r--kernel/irq/manage.c4
-rw-r--r--kernel/locking/lockdep_internals.h20
-rw-r--r--kernel/module.c5
-rw-r--r--kernel/power/suspend_test.c4
-rw-r--r--kernel/printk/printk.c24
-rw-r--r--kernel/sched/auto_group.c36
-rw-r--r--kernel/sched/core.c12
-rw-r--r--kernel/taskstats.c6
-rw-r--r--kernel/trace/ftrace.c24
-rw-r--r--lib/Kconfig.debug3
-rw-r--r--lib/debugobjects.c8
-rw-r--r--lib/iov_iter.c4
-rw-r--r--lib/mpi/mpi-pow.c7
-rw-r--r--lib/stackdepot.c2
-rw-r--r--lib/test_bpf.c2
-rw-r--r--lib/test_kasan.c29
-rw-r--r--mm/cma.c3
-rw-r--r--mm/filemap.c3
-rw-r--r--mm/huge_memory.c9
-rw-r--r--mm/hugetlb.c66
-rw-r--r--mm/kasan/kasan.c19
-rw-r--r--mm/kasan/kasan.h4
-rw-r--r--mm/kasan/report.c3
-rw-r--r--mm/khugepaged.c2
-rw-r--r--mm/kmemleak.c1
-rw-r--r--mm/memory-failure.c12
-rw-r--r--mm/mlock.c7
-rw-r--r--mm/mremap.c34
-rw-r--r--mm/page_alloc.c4
-rw-r--r--mm/shmem.c2
-rw-r--r--mm/slab_common.c4
-rw-r--r--mm/swapfile.c2
-rw-r--r--mm/truncate.c8
-rw-r--r--mm/vmscan.c2
-rw-r--r--mm/workingset.c2
-rw-r--r--net/8021q/vlan.c2
-rw-r--r--net/batman-adv/log.h2
-rw-r--r--net/batman-adv/originator.c2
-rw-r--r--net/batman-adv/tp_meter.c1
-rw-r--r--net/bluetooth/6lowpan.c4
-rw-r--r--net/bluetooth/hci_conn.c26
-rw-r--r--net/bluetooth/hci_request.c49
-rw-r--r--net/bluetooth/hci_request.h2
-rw-r--r--net/bluetooth/l2cap_core.c2
-rw-r--r--net/bluetooth/mgmt.c26
-rw-r--r--net/bluetooth/rfcomm/tty.c2
-rw-r--r--net/bluetooth/sco.c2
-rw-r--r--net/bridge/br_multicast.c23
-rw-r--r--net/can/bcm.c50
-rw-r--r--net/ceph/ceph_fs.c3
-rw-r--r--net/ceph/osd_client.c1
-rw-r--r--net/core/dev.c31
-rw-r--r--net/core/ethtool.c1
-rw-r--r--net/core/filter.c68
-rw-r--r--net/core/flow.c6
-rw-r--r--net/core/flow_dissector.c25
-rw-r--r--net/core/net_namespace.c37
-rw-r--r--net/core/pktgen.c17
-rw-r--r--net/core/rtnetlink.c29
-rw-r--r--net/core/sock.c10
-rw-r--r--net/core/sock_reuseport.c1
-rw-r--r--net/dccp/ipv4.c28
-rw-r--r--net/dccp/ipv6.c19
-rw-r--r--net/dccp/proto.c4
-rw-r--r--net/dsa/dsa.c13
-rw-r--r--net/dsa/dsa2.c4
-rw-r--r--net/dsa/slave.c19
-rw-r--r--net/ethernet/eth.c2
-rw-r--r--net/hsr/hsr_forward.c4
-rw-r--r--net/ipv4/Kconfig1
-rw-r--r--net/ipv4/af_inet.c13
-rw-r--r--net/ipv4/esp4.c2
-rw-r--r--net/ipv4/fib_frontend.c20
-rw-r--r--net/ipv4/fib_trie.c90
-rw-r--r--net/ipv4/fou.c4
-rw-r--r--net/ipv4/gre_offload.c2
-rw-r--r--net/ipv4/icmp.c4
-rw-r--r--net/ipv4/igmp.c50
-rw-r--r--net/ipv4/inet_hashtables.c8
-rw-r--r--net/ipv4/ip_forward.c2
-rw-r--r--net/ipv4/ip_output.c30
-rw-r--r--net/ipv4/ip_sockglue.c11
-rw-r--r--net/ipv4/ip_tunnel_core.c11
-rw-r--r--net/ipv4/ipmr.c2
-rw-r--r--net/ipv4/netfilter.c5
-rw-r--r--net/ipv4/netfilter/arp_tables.c4
-rw-r--r--net/ipv4/netfilter/nft_dup_ipv4.c6
-rw-r--r--net/ipv4/ping.c2
-rw-r--r--net/ipv4/raw.c2
-rw-r--r--net/ipv4/route.c4
-rw-r--r--net/ipv4/sysctl_net_ipv4.c8
-rw-r--r--net/ipv4/tcp.c4
-rw-r--r--net/ipv4/tcp_cong.c4
-rw-r--r--net/ipv4/tcp_dctcp.c13
-rw-r--r--net/ipv4/tcp_ipv4.c22
-rw-r--r--net/ipv4/udp.c23
-rw-r--r--net/ipv4/udp_impl.h2
-rw-r--r--net/ipv4/udp_offload.c2
-rw-r--r--net/ipv4/udplite.c2
-rw-r--r--net/ipv6/addrconf.c119
-rw-r--r--net/ipv6/datagram.c4
-rw-r--r--net/ipv6/esp6.c2
-rw-r--r--net/ipv6/icmp.c6
-rw-r--r--net/ipv6/inet6_hashtables.c13
-rw-r--r--net/ipv6/ip6_offload.c4
-rw-r--r--net/ipv6/ip6_output.c2
-rw-r--r--net/ipv6/ip6_tunnel.c15
-rw-r--r--net/ipv6/ip6_udp_tunnel.c3
-rw-r--r--net/ipv6/ip6_vti.c31
-rw-r--r--net/ipv6/ipv6_sockglue.c3
-rw-r--r--net/ipv6/mcast.c17
-rw-r--r--net/ipv6/netfilter/nf_conntrack_reasm.c4
-rw-r--r--net/ipv6/netfilter/nf_defrag_ipv6_hooks.c2
-rw-r--r--net/ipv6/netfilter/nf_reject_ipv6.c1
-rw-r--r--net/ipv6/netfilter/nft_dup_ipv6.c6
-rw-r--r--net/ipv6/output_core.c2
-rw-r--r--net/ipv6/ping.c2
-rw-r--r--net/ipv6/raw.c2
-rw-r--r--net/ipv6/reassembly.c3
-rw-r--r--net/ipv6/route.c78
-rw-r--r--net/ipv6/tcp_ipv6.c14
-rw-r--r--net/ipv6/udp.c11
-rw-r--r--net/ipv6/udp_impl.h2
-rw-r--r--net/ipv6/udplite.c2
-rw-r--r--net/l2tp/l2tp_ip.c68
-rw-r--r--net/l2tp/l2tp_ip6.c86
-rw-r--r--net/mac80211/aes_ccm.c46
-rw-r--r--net/mac80211/aes_ccm.h8
-rw-r--r--net/mac80211/aes_gcm.c43
-rw-r--r--net/mac80211/aes_gcm.h6
-rw-r--r--net/mac80211/aes_gmac.c26
-rw-r--r--net/mac80211/aes_gmac.h4
-rw-r--r--net/mac80211/offchannel.c2
-rw-r--r--net/mac80211/rx.c51
-rw-r--r--net/mac80211/sta_info.c2
-rw-r--r--net/mac80211/tx.c14
-rw-r--r--net/mac80211/vht.c16
-rw-r--r--net/mac80211/wpa.c22
-rw-r--r--net/ncsi/internal.h2
-rw-r--r--net/ncsi/ncsi-aen.c18
-rw-r--r--net/ncsi/ncsi-manage.c126
-rw-r--r--net/netfilter/core.c13
-rw-r--r--net/netfilter/ipvs/ip_vs_ctl.c2
-rw-r--r--net/netfilter/ipvs/ip_vs_sync.c7
-rw-r--r--net/netfilter/nf_conntrack_core.c49
-rw-r--r--net/netfilter/nf_conntrack_helper.c11
-rw-r--r--net/netfilter/nf_conntrack_sip.c5
-rw-r--r--net/netfilter/nf_internals.h2
-rw-r--r--net/netfilter/nf_nat_core.c49
-rw-r--r--net/netfilter/nf_queue.c48
-rw-r--r--net/netfilter/nf_tables_api.c34
-rw-r--r--net/netfilter/nft_dynset.c25
-rw-r--r--net/netfilter/nft_exthdr.c3
-rw-r--r--net/netfilter/nft_hash.c8
-rw-r--r--net/netfilter/nft_range.c32
-rw-r--r--net/netfilter/nft_set_hash.c19
-rw-r--r--net/netfilter/nft_set_rbtree.c2
-rw-r--r--net/netfilter/x_tables.c2
-rw-r--r--net/netfilter/xt_NFLOG.c1
-rw-r--r--net/netfilter/xt_connmark.c4
-rw-r--r--net/netfilter/xt_hashlimit.c4
-rw-r--r--net/netfilter/xt_ipcomp.c2
-rw-r--r--net/netlink/af_netlink.c27
-rw-r--r--net/netlink/af_netlink.h2
-rw-r--r--net/netlink/diag.c5
-rw-r--r--net/netlink/genetlink.c4
-rw-r--r--net/openvswitch/conntrack.c5
-rw-r--r--net/packet/af_packet.c27
-rw-r--r--net/rds/Makefile2
-rw-r--r--net/rds/rds.h2
-rw-r--r--net/rds/tcp.c2
-rw-r--r--net/rxrpc/call_object.c2
-rw-r--r--net/rxrpc/peer_object.c4
-rw-r--r--net/sched/act_api.c3
-rw-r--r--net/sched/act_mirred.c5
-rw-r--r--net/sched/act_pedit.c24
-rw-r--r--net/sched/cls_api.c6
-rw-r--r--net/sched/cls_basic.c4
-rw-r--r--net/sched/cls_bpf.c4
-rw-r--r--net/sched/cls_cgroup.c7
-rw-r--r--net/sched/cls_flow.c1
-rw-r--r--net/sched/cls_flower.c41
-rw-r--r--net/sched/cls_matchall.c1
-rw-r--r--net/sched/cls_rsvp.h3
-rw-r--r--net/sched/cls_tcindex.c1
-rw-r--r--net/sctp/input.c35
-rw-r--r--net/sctp/ipv6.c2
-rw-r--r--net/sctp/output.c8
-rw-r--r--net/sctp/sm_statefuns.c12
-rw-r--r--net/sctp/socket.c32
-rw-r--r--net/socket.c17
-rw-r--r--net/sunrpc/auth_gss/auth_gss.c13
-rw-r--r--net/sunrpc/auth_gss/gss_krb5_crypto.c82
-rw-r--r--net/sunrpc/auth_gss/svcauth_gss.c21
-rw-r--r--net/sunrpc/clnt.c7
-rw-r--r--net/sunrpc/svc_xprt.c11
-rw-r--r--net/sunrpc/svcsock.c21
-rw-r--r--net/sunrpc/xprtrdma/frwr_ops.c37
-rw-r--r--net/sunrpc/xprtrdma/svc_rdma_backchannel.c12
-rw-r--r--net/sunrpc/xprtrdma/svc_rdma_transport.c6
-rw-r--r--net/sunrpc/xprtrdma/xprt_rdma.h3
-rw-r--r--net/sunrpc/xprtsock.c1
-rw-r--r--net/switchdev/switchdev.c9
-rw-r--r--net/tipc/bcast.c14
-rw-r--r--net/tipc/bcast.h3
-rw-r--r--net/tipc/bearer.c11
-rw-r--r--net/tipc/bearer.h13
-rw-r--r--net/tipc/link.c42
-rw-r--r--net/tipc/monitor.c10
-rw-r--r--net/tipc/msg.h17
-rw-r--r--net/tipc/name_distr.c1
-rw-r--r--net/tipc/node.c2
-rw-r--r--net/tipc/socket.c50
-rw-r--r--net/tipc/udp_media.c5
-rw-r--r--net/unix/af_unix.c20
-rw-r--r--net/wireless/core.h1
-rw-r--r--net/wireless/scan.c69
-rw-r--r--net/wireless/sysfs.c5
-rw-r--r--net/wireless/util.c37
-rw-r--r--net/xfrm/xfrm_policy.c10
-rw-r--r--net/xfrm/xfrm_user.c2
-rw-r--r--samples/bpf/Makefile4
-rw-r--r--samples/bpf/bpf_helpers.h2
-rw-r--r--samples/bpf/parse_ldabs.c1
-rw-r--r--samples/bpf/parse_simple.c1
-rw-r--r--samples/bpf/parse_varlen.c1
-rw-r--r--samples/bpf/sampleip_kern.c2
-rwxr-xr-xsamples/bpf/tc_l2_redirect.sh173
-rw-r--r--samples/bpf/tc_l2_redirect_kern.c236
-rw-r--r--samples/bpf/tc_l2_redirect_user.c73
-rw-r--r--samples/bpf/tcbpf1_kern.c1
-rw-r--r--samples/bpf/tcbpf2_kern.c1
-rw-r--r--samples/bpf/test_cgrp2_tc_kern.c1
-rw-r--r--samples/bpf/trace_event_kern.c2
-rw-r--r--scripts/Makefile.build81
-rw-r--r--scripts/Makefile.extrawarn1
-rw-r--r--scripts/Makefile.ubsan4
-rwxr-xr-xscripts/bloat-o-meter3
-rw-r--r--scripts/gcc-plugins/cyc_complexity_plugin.c4
-rw-r--r--scripts/gcc-plugins/gcc-common.h1
-rw-r--r--scripts/gcc-plugins/latent_entropy_plugin.c25
-rw-r--r--scripts/gcc-plugins/sancov_plugin.c4
-rwxr-xr-xscripts/gcc-x86_64-has-stack-protector.sh2
-rw-r--r--scripts/kconfig/Makefile2
-rw-r--r--security/apparmor/domain.c6
-rw-r--r--sound/core/info.c9
-rw-r--r--sound/hda/hdac_i915.c18
-rw-r--r--sound/pci/hda/patch_hdmi.c7
-rw-r--r--sound/pci/hda/patch_realtek.c2
-rw-r--r--sound/pci/hda/thinkpad_helper.c3
-rw-r--r--sound/soc/codecs/cs4270.c8
-rw-r--r--sound/soc/codecs/da7219.c3
-rw-r--r--sound/soc/codecs/hdac_hdmi.c2
-rw-r--r--sound/soc/codecs/hdmi-codec.c7
-rw-r--r--sound/soc/codecs/rt298.c5
-rw-r--r--sound/soc/codecs/rt5663.c4
-rw-r--r--sound/soc/codecs/sti-sas.c2
-rw-r--r--sound/soc/codecs/tas571x.c37
-rw-r--r--sound/soc/intel/Kconfig3
-rw-r--r--sound/soc/intel/atom/sst/sst_acpi.c1
-rw-r--r--sound/soc/intel/boards/bxt_da7219_max98357a.c4
-rw-r--r--sound/soc/intel/skylake/skl.c8
-rw-r--r--sound/soc/pxa/Kconfig2
-rw-r--r--sound/soc/qcom/lpass-cpu.c3
-rw-r--r--sound/soc/qcom/lpass-platform.c166
-rw-r--r--sound/soc/qcom/lpass.h1
-rw-r--r--sound/soc/samsung/ac97.c10
-rw-r--r--sound/soc/samsung/i2s.c19
-rw-r--r--sound/soc/samsung/pcm.c19
-rw-r--r--sound/soc/samsung/s3c2412-i2s.c16
-rw-r--r--sound/soc/samsung/s3c24xx-i2s.c14
-rw-r--r--sound/soc/samsung/spdif.c19
-rw-r--r--sound/soc/sti/uniperif_player.c6
-rw-r--r--sound/soc/sunxi/sun4i-codec.c19
-rw-r--r--sound/sparc/dbri.c26
-rw-r--r--sound/usb/card.c3
-rw-r--r--tools/perf/ui/browsers/hists.c48
-rw-r--r--tools/perf/util/hist.c12
-rw-r--r--tools/power/acpi/Makefile.config23
-rw-r--r--tools/power/acpi/Makefile.rules40
-rw-r--r--tools/power/acpi/tools/acpidbg/Makefile4
-rw-r--r--tools/power/acpi/tools/acpidbg/acpidbg.c8
-rw-r--r--tools/power/acpi/tools/acpidump/Makefile12
-rw-r--r--tools/power/cpupower/utils/cpufreq-set.c7
-rw-r--r--tools/virtio/ringtest/Makefile4
-rw-r--r--tools/virtio/ringtest/main.c20
-rw-r--r--tools/virtio/ringtest/main.h4
-rw-r--r--tools/virtio/ringtest/noring.c6
-rw-r--r--tools/virtio/ringtest/ptr_ring.c22
-rw-r--r--tools/virtio/ringtest/ring.c18
-rw-r--r--tools/virtio/ringtest/virtio_ring_0_9.c64
-rw-r--r--virt/kvm/arm/pmu.c8
-rw-r--r--virt/kvm/arm/vgic/vgic-mmio.c41
-rw-r--r--virt/kvm/arm/vgic/vgic-mmio.h14
-rw-r--r--virt/kvm/arm/vgic/vgic-v2.c6
-rw-r--r--virt/kvm/arm/vgic/vgic-v3.c6
-rw-r--r--virt/kvm/arm/vgic/vgic.c12
-rw-r--r--virt/kvm/async_pf.c13
-rw-r--r--virt/kvm/eventfd.c22
-rw-r--r--virt/kvm/kvm_main.c8
2117 files changed, 81411 insertions, 33214 deletions
diff --git a/CREDITS b/CREDITS
index 837367624e45..d7ebdfbc4d4f 100644
--- a/CREDITS
+++ b/CREDITS
@@ -9,7 +9,7 @@
Linus
----------
-M: Matt Mackal
+N: Matt Mackal
E: mpm@selenic.com
D: SLOB slab allocator
@@ -1910,7 +1910,7 @@ S: Ra'annana, Israel
N: Andi Kleen
E: andi@firstfloor.org
-U: http://www.halobates.de
+W: http://www.halobates.de
D: network, x86, NUMA, various hacks
S: Schwalbenstr. 96
S: 85551 Ottobrunn
@@ -2089,8 +2089,8 @@ D: ST Microelectronics SPEAr13xx PCI host bridge driver
D: Synopsys Designware PCI host bridge driver
N: Gabor Kuti
-M: seasons@falcon.sch.bme.hu
-M: seasons@makosteszta.sote.hu
+E: seasons@falcon.sch.bme.hu
+E: seasons@makosteszta.sote.hu
D: Original author of software suspend
N: Jaroslav Kysela
diff --git a/Documentation/ABI/testing/sysfs-devices-system-ibm-rtl b/Documentation/ABI/testing/sysfs-devices-system-ibm-rtl
index b82deeaec314..470def06ab0a 100644
--- a/Documentation/ABI/testing/sysfs-devices-system-ibm-rtl
+++ b/Documentation/ABI/testing/sysfs-devices-system-ibm-rtl
@@ -1,4 +1,4 @@
-What: state
+What: /sys/devices/system/ibm_rtl/state
Date: Sep 2010
KernelVersion: 2.6.37
Contact: Vernon Mauery <vernux@us.ibm.com>
@@ -10,7 +10,7 @@ Description: The state file allows a means by which to change in and
Users: The ibm-prtm userspace daemon uses this interface.
-What: version
+What: /sys/devices/system/ibm_rtl/version
Date: Sep 2010
KernelVersion: 2.6.37
Contact: Vernon Mauery <vernux@us.ibm.com>
diff --git a/Documentation/devicetree/bindings/display/bridge/dumb-vga-dac.txt b/Documentation/devicetree/bindings/display/bridge/dumb-vga-dac.txt
index 003bc246a270..164cbb15f04c 100644
--- a/Documentation/devicetree/bindings/display/bridge/dumb-vga-dac.txt
+++ b/Documentation/devicetree/bindings/display/bridge/dumb-vga-dac.txt
@@ -16,6 +16,8 @@ graph bindings specified in Documentation/devicetree/bindings/graph.txt.
- Video port 0 for RGB input
- Video port 1 for VGA output
+Optional properties:
+- vdd-supply: Power supply for DAC
Example
-------
diff --git a/Documentation/devicetree/bindings/display/bridge/dw_hdmi.txt b/Documentation/devicetree/bindings/display/bridge/dw_hdmi.txt
index dc1452f0d5d8..5e9a84d6e5f1 100644
--- a/Documentation/devicetree/bindings/display/bridge/dw_hdmi.txt
+++ b/Documentation/devicetree/bindings/display/bridge/dw_hdmi.txt
@@ -19,7 +19,9 @@ Required properties:
Optional properties
- reg-io-width: the width of the reg:1,4, default set to 1 if not present
-- ddc-i2c-bus: phandle of an I2C controller used for DDC EDID probing
+- ddc-i2c-bus: phandle of an I2C controller used for DDC EDID probing,
+ if the property is omitted, a functionally reduced I2C bus
+ controller on DW HDMI is probed
- clocks, clock-names: phandle to the HDMI CEC clock, name should be "cec"
Example:
diff --git a/Documentation/devicetree/bindings/display/ti/ti,tfp410.txt b/Documentation/devicetree/bindings/display/bridge/ti,tfp410.txt
index 2cbe32a3d0bb..54d7e31525ec 100644
--- a/Documentation/devicetree/bindings/display/ti/ti,tfp410.txt
+++ b/Documentation/devicetree/bindings/display/bridge/ti,tfp410.txt
@@ -6,10 +6,15 @@ Required properties:
Optional properties:
- powerdown-gpios: power-down gpio
+- reg: I2C address. If and only if present the device node
+ should be placed into the i2c controller node where the
+ tfp410 i2c is connected to.
Required nodes:
-- Video port 0 for DPI input
-- Video port 1 for DVI output
+- Video port 0 for DPI input [1].
+- Video port 1 for DVI output [1].
+
+[1]: Documentation/devicetree/bindings/media/video-interfaces.txt
Example
-------
diff --git a/Documentation/devicetree/bindings/display/panel/display-timing.txt b/Documentation/devicetree/bindings/display/panel/display-timing.txt
index e1d4a0b59612..81a75893d1b8 100644
--- a/Documentation/devicetree/bindings/display/panel/display-timing.txt
+++ b/Documentation/devicetree/bindings/display/panel/display-timing.txt
@@ -32,6 +32,14 @@ optional properties:
- active low = drive pixel data on falling edge/
sample data on rising edge
- ignored = ignored
+ - syncclk-active: with
+ - active high = drive sync on rising edge/
+ sample sync on falling edge of pixel
+ clock
+ - active low = drive sync on falling edge/
+ sample sync on rising edge of pixel
+ clock
+ - omitted = same configuration as pixelclk-active
- interlaced (bool): boolean to enable interlaced mode
- doublescan (bool): boolean to enable doublescan mode
- doubleclk (bool): boolean to enable doubleclock mode
diff --git a/Documentation/devicetree/bindings/display/renesas,du.txt b/Documentation/devicetree/bindings/display/renesas,du.txt
index 0d30e42e40be..1a02f099a0ff 100644
--- a/Documentation/devicetree/bindings/display/renesas,du.txt
+++ b/Documentation/devicetree/bindings/display/renesas,du.txt
@@ -6,9 +6,11 @@ Required Properties:
- "renesas,du-r8a7779" for R8A7779 (R-Car H1) compatible DU
- "renesas,du-r8a7790" for R8A7790 (R-Car H2) compatible DU
- "renesas,du-r8a7791" for R8A7791 (R-Car M2-W) compatible DU
+ - "renesas,du-r8a7792" for R8A7792 (R-Car V2H) compatible DU
- "renesas,du-r8a7793" for R8A7793 (R-Car M2-N) compatible DU
- "renesas,du-r8a7794" for R8A7794 (R-Car E2) compatible DU
- "renesas,du-r8a7795" for R8A7795 (R-Car H3) compatible DU
+ - "renesas,du-r8a7796" for R8A7796 (R-Car M3-W) compatible DU
- reg: A list of base address and length of each memory resource, one for
each entry in the reg-names property.
@@ -25,10 +27,10 @@ Required Properties:
- clock-names: Name of the clocks. This property is model-dependent.
- R8A7779 uses a single functional clock. The clock doesn't need to be
named.
- - R8A779[01345] use one functional clock per channel and one clock per LVDS
- encoder (if available). The functional clocks must be named "du.x" with
- "x" being the channel numerical index. The LVDS clocks must be named
- "lvds.x" with "x" being the LVDS encoder numerical index.
+ - R8A779[0123456] use one functional clock per channel and one clock per
+ LVDS encoder (if available). The functional clocks must be named "du.x"
+ with "x" being the channel numerical index. The LVDS clocks must be
+ named "lvds.x" with "x" being the LVDS encoder numerical index.
- In addition to the functional and encoder clocks, all DU versions also
support externally supplied pixel clocks. Those clocks are optional.
When supplied they must be named "dclkin.x" with "x" being the input
@@ -47,9 +49,11 @@ corresponding to each DU output.
R8A7779 (H1) DPAD 0 DPAD 1 - -
R8A7790 (H2) DPAD LVDS 0 LVDS 1 -
R8A7791 (M2-W) DPAD LVDS 0 - -
+ R8A7792 (V2H) DPAD 0 DPAD 1 - -
R8A7793 (M2-N) DPAD LVDS 0 - -
R8A7794 (E2) DPAD 0 DPAD 1 - -
R8A7795 (H3) DPAD HDMI 0 HDMI 1 LVDS
+ R8A7796 (M3-W) DPAD HDMI LVDS -
Example: R8A7790 (R-Car H2) DU
diff --git a/Documentation/devicetree/bindings/display/sunxi/sun4i-drm.txt b/Documentation/devicetree/bindings/display/sunxi/sun4i-drm.txt
index b95696d748c7..b82c00449468 100644
--- a/Documentation/devicetree/bindings/display/sunxi/sun4i-drm.txt
+++ b/Documentation/devicetree/bindings/display/sunxi/sun4i-drm.txt
@@ -28,6 +28,8 @@ The TCON acts as a timing controller for RGB, LVDS and TV interfaces.
Required properties:
- compatible: value must be either:
* allwinner,sun5i-a13-tcon
+ * allwinner,sun6i-a31-tcon
+ * allwinner,sun6i-a31s-tcon
* allwinner,sun8i-a33-tcon
- reg: base address and size of memory-mapped region
- interrupts: interrupt associated to this IP
@@ -50,7 +52,7 @@ Required properties:
second the block connected to the TCON channel 1 (usually the TV
encoder)
-On the A13, there is one more clock required:
+On SoCs other than the A33, there is one more clock required:
- 'tcon-ch1': The clock driving the TCON channel 1
DRC
@@ -64,6 +66,8 @@ adaptive backlight control.
Required properties:
- compatible: value must be one of:
+ * allwinner,sun6i-a31-drc
+ * allwinner,sun6i-a31s-drc
* allwinner,sun8i-a33-drc
- reg: base address and size of the memory-mapped region.
- interrupts: interrupt associated to this IP
@@ -87,6 +91,7 @@ system.
Required properties:
- compatible: value must be one of:
* allwinner,sun5i-a13-display-backend
+ * allwinner,sun6i-a31-display-backend
* allwinner,sun8i-a33-display-backend
- reg: base address and size of the memory-mapped region.
- clocks: phandles to the clocks feeding the frontend and backend
@@ -117,6 +122,7 @@ deinterlacing and color space conversion.
Required properties:
- compatible: value must be one of:
* allwinner,sun5i-a13-display-frontend
+ * allwinner,sun6i-a31-display-frontend
* allwinner,sun8i-a33-display-frontend
- reg: base address and size of the memory-mapped region.
- interrupts: interrupt associated to this IP
@@ -142,6 +148,8 @@ extra node.
Required properties:
- compatible: value must be one of:
* allwinner,sun5i-a13-display-engine
+ * allwinner,sun6i-a31-display-engine
+ * allwinner,sun6i-a31s-display-engine
* allwinner,sun8i-a33-display-engine
- allwinner,pipelines: list of phandle to the display engine
diff --git a/Documentation/devicetree/bindings/display/tilcdc/tilcdc.txt b/Documentation/devicetree/bindings/display/tilcdc/tilcdc.txt
index a83abd79c55c..6fddb4f4f71a 100644
--- a/Documentation/devicetree/bindings/display/tilcdc/tilcdc.txt
+++ b/Documentation/devicetree/bindings/display/tilcdc/tilcdc.txt
@@ -1,7 +1,9 @@
Device-Tree bindings for tilcdc DRM driver
Required properties:
- - compatible: value should be "ti,am33xx-tilcdc".
+ - compatible: value should be one of the following:
+ - "ti,am33xx-tilcdc" for AM335x based boards
+ - "ti,da850-tilcdc" for DA850/AM18x/OMAP-L138 based boards
- interrupts: the interrupt number
- reg: base address and size of the LCDC device
@@ -51,7 +53,7 @@ Optional nodes:
Example:
fb: fb@4830e000 {
- compatible = "ti,am33xx-tilcdc";
+ compatible = "ti,am33xx-tilcdc", "ti,da850-tilcdc";
reg = <0x4830e000 0x1000>;
interrupt-parent = <&intc>;
interrupts = <36>;
diff --git a/Documentation/devicetree/bindings/display/zte,vou.txt b/Documentation/devicetree/bindings/display/zte,vou.txt
new file mode 100644
index 000000000000..740e5bd2e4f7
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/zte,vou.txt
@@ -0,0 +1,84 @@
+ZTE VOU Display Controller
+
+This is a display controller found on ZTE ZX296718 SoC. It includes multiple
+Graphic Layer (GL) and Video Layer (VL), two Mixers/Channels, and a few blocks
+handling scaling, color space conversion etc. VOU also integrates the support
+for typical output devices, like HDMI, TV Encoder, VGA, and RGB LCD.
+
+* Master VOU node
+
+It must be the parent node of all the sub-device nodes.
+
+Required properties:
+ - compatible: should be "zte,zx296718-vou"
+ - #address-cells: should be <1>
+ - #size-cells: should be <1>
+ - ranges: list of address translations between VOU and sub-devices
+
+* VOU DPC device
+
+Required properties:
+ - compatible: should be "zte,zx296718-dpc"
+ - reg: Physical base address and length of DPC register regions, one for each
+ entry in 'reg-names'
+ - reg-names: The names of register regions. The following regions are required:
+ "osd"
+ "timing_ctrl"
+ "dtrc"
+ "vou_ctrl"
+ "otfppu"
+ - interrupts: VOU DPC interrupt number to CPU
+ - clocks: A list of phandle + clock-specifier pairs, one for each entry
+ in 'clock-names'
+ - clock-names: A list of clock names. The following clocks are required:
+ "aclk"
+ "ppu_wclk"
+ "main_wclk"
+ "aux_wclk"
+
+* HDMI output device
+
+Required properties:
+ - compatible: should be "zte,zx296718-hdmi"
+ - reg: Physical base address and length of the HDMI device IO region
+ - interrupts : HDMI interrupt number to CPU
+ - clocks: A list of phandle + clock-specifier pairs, one for each entry
+ in 'clock-names'
+ - clock-names: A list of clock names. The following clocks are required:
+ "osc_cec"
+ "osc_clk"
+ "xclk"
+
+Example:
+
+vou: vou@1440000 {
+ compatible = "zte,zx296718-vou";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0 0x1440000 0x10000>;
+
+ dpc: dpc@0 {
+ compatible = "zte,zx296718-dpc";
+ reg = <0x0000 0x1000>, <0x1000 0x1000>,
+ <0x5000 0x1000>, <0x6000 0x1000>,
+ <0xa000 0x1000>;
+ reg-names = "osd", "timing_ctrl",
+ "dtrc", "vou_ctrl",
+ "otfppu";
+ interrupts = <GIC_SPI 81 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&topcrm VOU_ACLK>, <&topcrm VOU_PPU_WCLK>,
+ <&topcrm VOU_MAIN_WCLK>, <&topcrm VOU_AUX_WCLK>;
+ clock-names = "aclk", "ppu_wclk",
+ "main_wclk", "aux_wclk";
+ };
+
+ hdmi: hdmi@c000 {
+ compatible = "zte,zx296718-hdmi";
+ reg = <0xc000 0x4000>;
+ interrupts = <GIC_SPI 82 IRQ_TYPE_EDGE_RISING>;
+ clocks = <&topcrm HDMI_OSC_CEC>,
+ <&topcrm HDMI_OSC_CLK>,
+ <&topcrm HDMI_XCLK>;
+ clock-names = "osc_cec", "osc_clk", "xclk";
+ };
+};
diff --git a/Documentation/devicetree/bindings/ipmi/aspeed,ast2400-bt-bmc.txt b/Documentation/devicetree/bindings/ipmi/aspeed,ast2400-ibt-bmc.txt
index fbbacd958240..6f28969af9dc 100644
--- a/Documentation/devicetree/bindings/ipmi/aspeed,ast2400-bt-bmc.txt
+++ b/Documentation/devicetree/bindings/ipmi/aspeed,ast2400-ibt-bmc.txt
@@ -6,7 +6,7 @@ perform in-band IPMI communication with their host.
Required properties:
-- compatible : should be "aspeed,ast2400-bt-bmc"
+- compatible : should be "aspeed,ast2400-ibt-bmc"
- reg: physical address and size of the registers
Optional properties:
@@ -17,7 +17,7 @@ Optional properties:
Example:
ibt@1e789140 {
- compatible = "aspeed,ast2400-bt-bmc";
+ compatible = "aspeed,ast2400-ibt-bmc";
reg = <0x1e789140 0x18>;
interrupts = <8>;
};
diff --git a/Documentation/devicetree/bindings/mmc/synopsys-dw-mshc.txt b/Documentation/devicetree/bindings/mmc/synopsys-dw-mshc.txt
index 4e00e859e885..bfa461aaac99 100644
--- a/Documentation/devicetree/bindings/mmc/synopsys-dw-mshc.txt
+++ b/Documentation/devicetree/bindings/mmc/synopsys-dw-mshc.txt
@@ -43,6 +43,9 @@ Optional properties:
reset signal present internally in some host controller IC designs.
See Documentation/devicetree/bindings/reset/reset.txt for details.
+* reset-names: request name for using "resets" property. Must be "reset".
+ (It will be used together with "resets" property.)
+
* clocks: from common clock binding: handle to biu and ciu clocks for the
bus interface unit clock and the card interface unit clock.
@@ -103,6 +106,8 @@ board specific portions as listed below.
interrupts = <0 75 0>;
#address-cells = <1>;
#size-cells = <0>;
+ resets = <&rst 20>;
+ reset-names = "reset";
};
[board specific internal DMA resources]
diff --git a/Documentation/devicetree/bindings/net/ethernet.txt b/Documentation/devicetree/bindings/net/ethernet.txt
index e1d76812419c..05150957ecfd 100644
--- a/Documentation/devicetree/bindings/net/ethernet.txt
+++ b/Documentation/devicetree/bindings/net/ethernet.txt
@@ -9,10 +9,26 @@ The following properties are common to the Ethernet controllers:
- max-speed: number, specifies maximum speed in Mbit/s supported by the device;
- max-frame-size: number, maximum transfer unit (IEEE defined MTU), rather than
the maximum frame size (there's contradiction in ePAPR).
-- phy-mode: string, operation mode of the PHY interface; supported values are
- "mii", "gmii", "sgmii", "qsgmii", "tbi", "rev-mii", "rmii", "rgmii", "rgmii-id",
- "rgmii-rxid", "rgmii-txid", "rtbi", "smii", "xgmii", "trgmii"; this is now a
- de-facto standard property;
+- phy-mode: string, operation mode of the PHY interface. This is now a de-facto
+ standard property; supported values are:
+ * "mii"
+ * "gmii"
+ * "sgmii"
+ * "qsgmii"
+ * "tbi"
+ * "rev-mii"
+ * "rmii"
+ * "rgmii" (RX and TX delays are added by the MAC when required)
+ * "rgmii-id" (RGMII with internal RX and TX delays provided by the PHY, the
+ MAC should not add the RX or TX delays in this case)
+ * "rgmii-rxid" (RGMII with internal RX delay provided by the PHY, the MAC
+ should not add an RX delay in this case)
+ * "rgmii-txid" (RGMII with internal TX delay provided by the PHY, the MAC
+ should not add an TX delay in this case)
+ * "rtbi"
+ * "smii"
+ * "xgmii"
+ * "trgmii"
- phy-connection-type: the same as "phy-mode" property but described in ePAPR;
- phy-handle: phandle, specifies a reference to a node representing a PHY
device; this property is described in ePAPR and so preferred;
diff --git a/Documentation/devicetree/bindings/net/marvell-orion-net.txt b/Documentation/devicetree/bindings/net/marvell-orion-net.txt
index bce52b2ec55e..6fd988c84c4f 100644
--- a/Documentation/devicetree/bindings/net/marvell-orion-net.txt
+++ b/Documentation/devicetree/bindings/net/marvell-orion-net.txt
@@ -49,6 +49,7 @@ Optional port properties:
and
- phy-handle: See ethernet.txt file in the same directory.
+ - phy-mode: See ethernet.txt file in the same directory.
or
diff --git a/Documentation/devicetree/bindings/pci/rockchip-pcie.txt b/Documentation/devicetree/bindings/pci/rockchip-pcie.txt
index ba67b39939c1..71aeda1ca055 100644
--- a/Documentation/devicetree/bindings/pci/rockchip-pcie.txt
+++ b/Documentation/devicetree/bindings/pci/rockchip-pcie.txt
@@ -26,13 +26,16 @@ Required properties:
- "sys"
- "legacy"
- "client"
-- resets: Must contain five entries for each entry in reset-names.
+- resets: Must contain seven entries for each entry in reset-names.
See ../reset/reset.txt for details.
- reset-names: Must include the following names
- "core"
- "mgmt"
- "mgmt-sticky"
- "pipe"
+ - "pm"
+ - "aclk"
+ - "pclk"
- pinctrl-names : The pin control state names
- pinctrl-0: The "default" pinctrl state
- #interrupt-cells: specifies the number of cells needed to encode an
@@ -86,8 +89,10 @@ pcie0: pcie@f8000000 {
reg = <0x0 0xf8000000 0x0 0x2000000>, <0x0 0xfd000000 0x0 0x1000000>;
reg-names = "axi-base", "apb-base";
resets = <&cru SRST_PCIE_CORE>, <&cru SRST_PCIE_MGMT>,
- <&cru SRST_PCIE_MGMT_STICKY>, <&cru SRST_PCIE_PIPE>;
- reset-names = "core", "mgmt", "mgmt-sticky", "pipe";
+ <&cru SRST_PCIE_MGMT_STICKY>, <&cru SRST_PCIE_PIPE> ,
+ <&cru SRST_PCIE_PM>, <&cru SRST_P_PCIE>, <&cru SRST_A_PCIE>;
+ reset-names = "core", "mgmt", "mgmt-sticky", "pipe",
+ "pm", "pclk", "aclk";
phys = <&pcie_phy>;
phy-names = "pcie-phy";
pinctrl-names = "default";
diff --git a/Documentation/devicetree/bindings/pinctrl/st,stm32-pinctrl.txt b/Documentation/devicetree/bindings/pinctrl/st,stm32-pinctrl.txt
index f9753c416974..b24583aa34c3 100644
--- a/Documentation/devicetree/bindings/pinctrl/st,stm32-pinctrl.txt
+++ b/Documentation/devicetree/bindings/pinctrl/st,stm32-pinctrl.txt
@@ -14,11 +14,6 @@ Required properies:
- #size-cells : The value of this property must be 1
- ranges : defines mapping between pin controller node (parent) to
gpio-bank node (children).
- - interrupt-parent: phandle of the interrupt parent to which the external
- GPIO interrupts are forwarded to.
- - st,syscfg: Should be phandle/offset pair. The phandle to the syscon node
- which includes IRQ mux selection register, and the offset of the IRQ mux
- selection register.
- pins-are-numbered: Specify the subnodes are using numbered pinmux to
specify pins.
@@ -37,6 +32,11 @@ Required properties:
Optional properties:
- reset: : Reference to the reset controller
+ - interrupt-parent: phandle of the interrupt parent to which the external
+ GPIO interrupts are forwarded to.
+ - st,syscfg: Should be phandle/offset pair. The phandle to the syscon node
+ which includes IRQ mux selection register, and the offset of the IRQ mux
+ selection register.
Example:
#include <dt-bindings/pinctrl/stm32f429-pinfunc.h>
diff --git a/Documentation/devicetree/bindings/sound/omap-abe-twl6040.txt b/Documentation/devicetree/bindings/sound/omap-abe-twl6040.txt
index fd40c852d7c7..462b04e8209f 100644
--- a/Documentation/devicetree/bindings/sound/omap-abe-twl6040.txt
+++ b/Documentation/devicetree/bindings/sound/omap-abe-twl6040.txt
@@ -12,7 +12,7 @@ Required properties:
Optional properties:
- ti,dmic: phandle for the OMAP dmic node if the machine have it connected
-- ti,jack_detection: Need to be present if the board capable to detect jack
+- ti,jack-detection: Need to be present if the board capable to detect jack
insertion, removal.
Available audio endpoints for the audio-routing table:
diff --git a/Documentation/devicetree/bindings/video/bridge/sil-sii8620.txt b/Documentation/devicetree/bindings/video/bridge/sil-sii8620.txt
new file mode 100644
index 000000000000..9409d9c6a260
--- /dev/null
+++ b/Documentation/devicetree/bindings/video/bridge/sil-sii8620.txt
@@ -0,0 +1,33 @@
+Silicon Image SiI8620 HDMI/MHL bridge bindings
+
+Required properties:
+ - compatible: "sil,sii8620"
+ - reg: i2c address of the bridge
+ - cvcc10-supply: Digital Core Supply Voltage (1.0V)
+ - iovcc18-supply: I/O Supply Voltage (1.8V)
+ - interrupts, interrupt-parent: interrupt specifier of INT pin
+ - reset-gpios: gpio specifier of RESET pin
+ - clocks, clock-names: specification and name of "xtal" clock
+ - video interfaces: Device node can contain video interface port
+ node for HDMI encoder according to [1].
+
+[1]: Documentation/devicetree/bindings/media/video-interfaces.txt
+
+Example:
+ sii8620@39 {
+ reg = <0x39>;
+ compatible = "sil,sii8620";
+ cvcc10-supply = <&ldo36_reg>;
+ iovcc18-supply = <&ldo34_reg>;
+ interrupt-parent = <&gpf0>;
+ interrupts = <2 0>;
+ reset-gpio = <&gpv7 0 0>;
+ clocks = <&pmu_system_controller 0>;
+ clock-names = "xtal";
+
+ port {
+ mhl_to_hdmi: endpoint {
+ remote-endpoint = <&hdmi_to_mhl>;
+ };
+ };
+ };
diff --git a/Documentation/driver-api/infrastructure.rst b/Documentation/driver-api/infrastructure.rst
index 5d50d6733db3..a0d65eb49055 100644
--- a/Documentation/driver-api/infrastructure.rst
+++ b/Documentation/driver-api/infrastructure.rst
@@ -86,10 +86,10 @@ reservation
fence
~~~~~
-.. kernel-doc:: drivers/dma-buf/fence.c
+.. kernel-doc:: drivers/dma-buf/dma-fence.c
:export:
-.. kernel-doc:: include/linux/fence.h
+.. kernel-doc:: include/linux/dma-fence.h
:internal:
.. kernel-doc:: drivers/dma-buf/seqno-fence.c
@@ -98,10 +98,10 @@ fence
.. kernel-doc:: include/linux/seqno-fence.h
:internal:
-.. kernel-doc:: drivers/dma-buf/fence-array.c
+.. kernel-doc:: drivers/dma-buf/dma-fence-array.c
:export:
-.. kernel-doc:: include/linux/fence-array.h
+.. kernel-doc:: include/linux/dma-fence-array.h
:internal:
.. kernel-doc:: drivers/dma-buf/reservation.c
diff --git a/Documentation/filesystems/Locking b/Documentation/filesystems/Locking
index 14cdc101d165..1b5f15653b1b 100644
--- a/Documentation/filesystems/Locking
+++ b/Documentation/filesystems/Locking
@@ -447,7 +447,6 @@ prototypes:
int (*flush) (struct file *);
int (*release) (struct inode *, struct file *);
int (*fsync) (struct file *, loff_t start, loff_t end, int datasync);
- int (*aio_fsync) (struct kiocb *, int datasync);
int (*fasync) (int, struct file *, int);
int (*lock) (struct file *, int, struct file_lock *);
ssize_t (*readv) (struct file *, const struct iovec *, unsigned long,
diff --git a/Documentation/filesystems/vfs.txt b/Documentation/filesystems/vfs.txt
index d619c8d71966..b5039a00caaf 100644
--- a/Documentation/filesystems/vfs.txt
+++ b/Documentation/filesystems/vfs.txt
@@ -828,7 +828,6 @@ struct file_operations {
int (*flush) (struct file *, fl_owner_t id);
int (*release) (struct inode *, struct file *);
int (*fsync) (struct file *, loff_t, loff_t, int datasync);
- int (*aio_fsync) (struct kiocb *, int datasync);
int (*fasync) (int, struct file *, int);
int (*lock) (struct file *, int, struct file_lock *);
ssize_t (*sendpage) (struct file *, struct page *, int, size_t, loff_t *, int);
diff --git a/Documentation/gpu/drm-internals.rst b/Documentation/gpu/drm-internals.rst
index 37284bcc7764..e35920db1f4c 100644
--- a/Documentation/gpu/drm-internals.rst
+++ b/Documentation/gpu/drm-internals.rst
@@ -143,6 +143,9 @@ Device Instance and Driver Handling
.. kernel-doc:: drivers/gpu/drm/drm_drv.c
:export:
+.. kernel-doc:: include/drm/drm_drv.h
+ :internal:
+
Driver Load
-----------
@@ -350,6 +353,23 @@ how the ioctl is allowed to be called.
.. kernel-doc:: drivers/gpu/drm/drm_ioctl.c
:export:
+
+Misc Utilities
+==============
+
+Printer
+-------
+
+.. kernel-doc:: include/drm/drm_print.h
+ :doc: print
+
+.. kernel-doc:: include/drm/drm_print.h
+ :internal:
+
+.. kernel-doc:: drivers/gpu/drm/drm_print.c
+ :export:
+
+
Legacy Support Code
===================
diff --git a/Documentation/gpu/drm-kms-helpers.rst b/Documentation/gpu/drm-kms-helpers.rst
index bb4254d19cbb..03040aa14fe8 100644
--- a/Documentation/gpu/drm-kms-helpers.rst
+++ b/Documentation/gpu/drm-kms-helpers.rst
@@ -63,6 +63,9 @@ Atomic State Reset and Initialization
.. kernel-doc:: drivers/gpu/drm/drm_atomic_helper.c
:doc: atomic state reset and initialization
+Helper Functions Reference
+--------------------------
+
.. kernel-doc:: include/drm/drm_atomic_helper.h
:internal:
@@ -261,14 +264,6 @@ Plane Helper Reference
.. kernel-doc:: drivers/gpu/drm/drm_plane_helper.c
:export:
-Tile group
-==========
-
-# FIXME: This should probably be moved into a property documentation section
-
-.. kernel-doc:: drivers/gpu/drm/drm_crtc.c
- :doc: Tile group
-
Auxiliary Modeset Helpers
=========================
diff --git a/Documentation/gpu/drm-kms.rst b/Documentation/gpu/drm-kms.rst
index 53b872c105d2..0ef21076012b 100644
--- a/Documentation/gpu/drm-kms.rst
+++ b/Documentation/gpu/drm-kms.rst
@@ -15,25 +15,24 @@ be setup by initializing the following fields.
- struct drm_mode_config_funcs \*funcs;
Mode setting functions.
-Modeset Base Object Abstraction
-===============================
+Mode Configuration
-.. kernel-doc:: include/drm/drm_mode_object.h
- :internal:
+KMS Core Structures and Functions
+=================================
-.. kernel-doc:: drivers/gpu/drm/drm_mode_object.c
+.. kernel-doc:: drivers/gpu/drm/drm_mode_config.c
:export:
-KMS Data Structures
-===================
-
-.. kernel-doc:: include/drm/drm_crtc.h
+.. kernel-doc:: include/drm/drm_mode_config.h
:internal:
-KMS API Functions
-=================
+Modeset Base Object Abstraction
+===============================
-.. kernel-doc:: drivers/gpu/drm/drm_crtc.c
+.. kernel-doc:: include/drm/drm_mode_object.h
+ :internal:
+
+.. kernel-doc:: drivers/gpu/drm/drm_mode_object.c
:export:
Atomic Mode Setting Function Reference
@@ -45,6 +44,15 @@ Atomic Mode Setting Function Reference
.. kernel-doc:: include/drm/drm_atomic.h
:internal:
+CRTC Abstraction
+================
+
+.. kernel-doc:: drivers/gpu/drm/drm_crtc.c
+ :export:
+
+.. kernel-doc:: include/drm/drm_crtc.h
+ :internal:
+
Frame Buffer Abstraction
========================
@@ -63,52 +71,17 @@ Frame Buffer Functions Reference
DRM Format Handling
===================
+.. kernel-doc:: include/drm/drm_fourcc.h
+ :internal:
+
.. kernel-doc:: drivers/gpu/drm/drm_fourcc.c
:export:
Dumb Buffer Objects
===================
-The KMS API doesn't standardize backing storage object creation and
-leaves it to driver-specific ioctls. Furthermore actually creating a
-buffer object even for GEM-based drivers is done through a
-driver-specific ioctl - GEM only has a common userspace interface for
-sharing and destroying objects. While not an issue for full-fledged
-graphics stacks that include device-specific userspace components (in
-libdrm for instance), this limit makes DRM-based early boot graphics
-unnecessarily complex.
-
-Dumb objects partly alleviate the problem by providing a standard API to
-create dumb buffers suitable for scanout, which can then be used to
-create KMS frame buffers.
-
-To support dumb objects drivers must implement the dumb_create,
-dumb_destroy and dumb_map_offset operations.
-
-- int (\*dumb_create)(struct drm_file \*file_priv, struct
- drm_device \*dev, struct drm_mode_create_dumb \*args);
- The dumb_create operation creates a driver object (GEM or TTM
- handle) suitable for scanout based on the width, height and depth
- from the struct :c:type:`struct drm_mode_create_dumb
- <drm_mode_create_dumb>` argument. It fills the argument's
- handle, pitch and size fields with a handle for the newly created
- object and its line pitch and size in bytes.
-
-- int (\*dumb_destroy)(struct drm_file \*file_priv, struct
- drm_device \*dev, uint32_t handle);
- The dumb_destroy operation destroys a dumb object created by
- dumb_create.
-
-- int (\*dumb_map_offset)(struct drm_file \*file_priv, struct
- drm_device \*dev, uint32_t handle, uint64_t \*offset);
- The dumb_map_offset operation associates an mmap fake offset with
- the object given by the handle and returns it. Drivers must use the
- :c:func:`drm_gem_create_mmap_offset()` function to associate
- the fake offset as described in ?.
-
-Note that dumb objects may not be used for gpu acceleration, as has been
-attempted on some ARM embedded platforms. Such drivers really must have
-a hardware-specific ioctl to allocate suitable buffer objects.
+.. kernel-doc:: drivers/gpu/drm/drm_dumb_buffers.c
+ :doc: overview
Plane Abstraction
=================
@@ -287,6 +260,12 @@ Property Types and Blob Property Support
.. kernel-doc:: drivers/gpu/drm/drm_property.c
:export:
+Standard Connector Properties
+-----------------------------
+
+.. kernel-doc:: drivers/gpu/drm/drm_connector.c
+ :doc: standard connector properties
+
Plane Composition Properties
----------------------------
@@ -308,6 +287,18 @@ Color Management Properties
.. kernel-doc:: drivers/gpu/drm/drm_color_mgmt.c
:export:
+Tile Group Property
+-------------------
+
+.. kernel-doc:: drivers/gpu/drm/drm_connector.c
+ :doc: Tile group
+
+Explicit Fencing Properties
+---------------------------
+
+.. kernel-doc:: drivers/gpu/drm/drm_atomic.c
+ :doc: explicit fencing properties
+
Existing KMS Properties
-----------------------
diff --git a/Documentation/gpu/drm-uapi.rst b/Documentation/gpu/drm-uapi.rst
index 1ba301cebe16..de3ac9f90f8f 100644
--- a/Documentation/gpu/drm-uapi.rst
+++ b/Documentation/gpu/drm-uapi.rst
@@ -216,3 +216,9 @@ interfaces. Especially since all hardware-acceleration interfaces to
userspace are driver specific for efficiency and other reasons these
interfaces can be rather substantial. Hence every driver has its own
chapter.
+
+Testing and validation
+======================
+
+.. kernel-doc:: drivers/gpu/drm/drm_debugfs_crc.c
+ :doc: CRC ABI
diff --git a/Documentation/gpu/i915.rst b/Documentation/gpu/i915.rst
index 87aaffc22920..117d2ab7a5f7 100644
--- a/Documentation/gpu/i915.rst
+++ b/Documentation/gpu/i915.rst
@@ -49,6 +49,15 @@ Intel GVT-g Guest Support(vGPU)
.. kernel-doc:: drivers/gpu/drm/i915/i915_vgpu.c
:internal:
+Intel GVT-g Host Support(vGPU device model)
+-------------------------------------------
+
+.. kernel-doc:: drivers/gpu/drm/i915/intel_gvt.c
+ :doc: Intel GVT-g host support
+
+.. kernel-doc:: drivers/gpu/drm/i915/intel_gvt.c
+ :internal:
+
Display Hardware Handling
=========================
@@ -180,7 +189,7 @@ Display Refresh Rate Switching (DRRS)
DPIO
----
-.. kernel-doc:: drivers/gpu/drm/i915/i915_reg.h
+.. kernel-doc:: drivers/gpu/drm/i915/intel_dpio_phy.c
:doc: DPIO
CSR firmware support for DMC
@@ -249,19 +258,19 @@ Global GTT views
GTT Fences and Swizzling
------------------------
-.. kernel-doc:: drivers/gpu/drm/i915/i915_gem_fence.c
+.. kernel-doc:: drivers/gpu/drm/i915/i915_gem_fence_reg.c
:internal:
Global GTT Fence Handling
~~~~~~~~~~~~~~~~~~~~~~~~~
-.. kernel-doc:: drivers/gpu/drm/i915/i915_gem_fence.c
+.. kernel-doc:: drivers/gpu/drm/i915/i915_gem_fence_reg.c
:doc: fence register handling
Hardware Tiling and Swizzling Details
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-.. kernel-doc:: drivers/gpu/drm/i915/i915_gem_fence.c
+.. kernel-doc:: drivers/gpu/drm/i915/i915_gem_fence_reg.c
:doc: tiling swizzling details
Object Tiling IOCTLs
diff --git a/Documentation/i2c/i2c-topology b/Documentation/i2c/i2c-topology
index e0aefeece551..1a014fede0b7 100644
--- a/Documentation/i2c/i2c-topology
+++ b/Documentation/i2c/i2c-topology
@@ -326,7 +326,7 @@ Two parent-locked sibling muxes
This is a good topology.
- .--------.
+ .--------.
.----------. .--| dev D1 |
| parent- |--' '--------'
.--| locked | .--------.
@@ -350,7 +350,7 @@ Mux-locked and parent-locked sibling muxes
This is a good topology.
- .--------.
+ .--------.
.----------. .--| dev D1 |
| mux- |--' '--------'
.--| locked | .--------.
diff --git a/Documentation/networking/dsa/dsa.txt b/Documentation/networking/dsa/dsa.txt
index 6d6c07cf1a9a..63912ef34606 100644
--- a/Documentation/networking/dsa/dsa.txt
+++ b/Documentation/networking/dsa/dsa.txt
@@ -67,13 +67,14 @@ Note that DSA does not currently create network interfaces for the "cpu" and
Switch tagging protocols
------------------------
-DSA currently supports 4 different tagging protocols, and a tag-less mode as
+DSA currently supports 5 different tagging protocols, and a tag-less mode as
well. The different protocols are implemented in:
net/dsa/tag_trailer.c: Marvell's 4 trailer tag mode (legacy)
net/dsa/tag_dsa.c: Marvell's original DSA tag
net/dsa/tag_edsa.c: Marvell's enhanced DSA tag
net/dsa/tag_brcm.c: Broadcom's 4 bytes tag
+net/dsa/tag_qca.c: Qualcomm's 2 bytes tag
The exact format of the tag protocol is vendor specific, but in general, they
all contain something which:
diff --git a/Documentation/networking/netdev-FAQ.txt b/Documentation/networking/netdev-FAQ.txt
index 0fe1c6e0dbcd..a20b2fae942b 100644
--- a/Documentation/networking/netdev-FAQ.txt
+++ b/Documentation/networking/netdev-FAQ.txt
@@ -29,8 +29,8 @@ A: There are always two trees (git repositories) in play. Both are driven
Linus, and net-next is where the new code goes for the future release.
You can find the trees here:
- http://git.kernel.org/?p=linux/kernel/git/davem/net.git
- http://git.kernel.org/?p=linux/kernel/git/davem/net-next.git
+ https://git.kernel.org/pub/scm/linux/kernel/git/davem/net.git
+ https://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next.git
Q: How often do changes from these trees make it to the mainline Linus tree?
@@ -76,7 +76,7 @@ Q: So where are we now in this cycle?
A: Load the mainline (Linus) page here:
- http://git.kernel.org/?p=linux/kernel/git/torvalds/linux.git
+ https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
and note the top of the "tags" section. If it is rc1, it is early
in the dev cycle. If it was tagged rc7 a week ago, then a release
@@ -123,7 +123,7 @@ A: Normally Greg Kroah-Hartman collects stable commits himself, but
It contains the patches which Dave has selected, but not yet handed
off to Greg. If Greg already has the patch, then it will be here:
- http://git.kernel.org/cgit/linux/kernel/git/stable/stable-queue.git
+ https://git.kernel.org/pub/scm/linux/kernel/git/stable/stable-queue.git
A quick way to find whether the patch is in this stable-queue is
to simply clone the repo, and then git grep the mainline commit ID, e.g.
diff --git a/Documentation/networking/nf_conntrack-sysctl.txt b/Documentation/networking/nf_conntrack-sysctl.txt
index 4fb51d32fccc..433b6724797a 100644
--- a/Documentation/networking/nf_conntrack-sysctl.txt
+++ b/Documentation/networking/nf_conntrack-sysctl.txt
@@ -33,24 +33,6 @@ nf_conntrack_events - BOOLEAN
If this option is enabled, the connection tracking code will
provide userspace with connection tracking events via ctnetlink.
-nf_conntrack_events_retry_timeout - INTEGER (seconds)
- default 15
-
- This option is only relevant when "reliable connection tracking
- events" are used. Normally, ctnetlink is "lossy", that is,
- events are normally dropped when userspace listeners can't keep up.
-
- Userspace can request "reliable event mode". When this mode is
- active, the conntrack will only be destroyed after the event was
- delivered. If event delivery fails, the kernel periodically
- re-tries to send the event to userspace.
-
- This is the maximum interval the kernel should use when re-trying
- to deliver the destroy event.
-
- A higher number means there will be fewer delivery retries and it
- will take longer for a backlog to be processed.
-
nf_conntrack_expect_max - INTEGER
Maximum size of expectation table. Default value is
nf_conntrack_buckets / 256. Minimum is 1.
@@ -80,10 +62,13 @@ nf_conntrack_generic_timeout - INTEGER (seconds)
protocols.
nf_conntrack_helper - BOOLEAN
- 0 - disabled
- not 0 - enabled (default)
+ 0 - disabled (default)
+ not 0 - enabled
Enable automatic conntrack helper assignment.
+ If disabled it is required to set up iptables rules to assign
+ helpers to connections. See the CT target description in the
+ iptables-extensions(8) man page for further information.
nf_conntrack_icmp_timeout - INTEGER (seconds)
default 30
diff --git a/Documentation/sync_file.txt b/Documentation/sync_file.txt
index b63a68531afd..269681a6faec 100644
--- a/Documentation/sync_file.txt
+++ b/Documentation/sync_file.txt
@@ -6,7 +6,7 @@
This document serves as a guide for device drivers writers on what the
sync_file API is, and how drivers can support it. Sync file is the carrier of
-the fences(struct fence) that are needed to synchronize between drivers or
+the fences(struct dma_fence) that are needed to synchronize between drivers or
across process boundaries.
The sync_file API is meant to be used to send and receive fence information
@@ -32,9 +32,9 @@ in-fences and out-fences
Sync files can go either to or from userspace. When a sync_file is sent from
the driver to userspace we call the fences it contains 'out-fences'. They are
related to a buffer that the driver is processing or is going to process, so
-the driver creates an out-fence to be able to notify, through fence_signal(),
-when it has finished using (or processing) that buffer. Out-fences are fences
-that the driver creates.
+the driver creates an out-fence to be able to notify, through
+dma_fence_signal(), when it has finished using (or processing) that buffer.
+Out-fences are fences that the driver creates.
On the other hand if the driver receives fence(s) through a sync_file from
userspace we call these fence(s) 'in-fences'. Receiveing in-fences means that
@@ -47,7 +47,7 @@ Creating Sync Files
When a driver needs to send an out-fence userspace it creates a sync_file.
Interface:
- struct sync_file *sync_file_create(struct fence *fence);
+ struct sync_file *sync_file_create(struct dma_fence *fence);
The caller pass the out-fence and gets back the sync_file. That is just the
first step, next it needs to install an fd on sync_file->file. So it gets an
@@ -72,11 +72,11 @@ of the Sync File to the kernel. The kernel can then retrieve the fences
from it.
Interface:
- struct fence *sync_file_get_fence(int fd);
+ struct dma_fence *sync_file_get_fence(int fd);
The returned reference is owned by the caller and must be disposed of
-afterwards using fence_put(). In case of error, a NULL is returned instead.
+afterwards using dma_fence_put(). In case of error, a NULL is returned instead.
References:
[1] struct sync_file in include/linux/sync_file.h
diff --git a/Documentation/virtual/kvm/api.txt b/Documentation/virtual/kvm/api.txt
index 739db9ab16b2..6bbceb9a3a19 100644
--- a/Documentation/virtual/kvm/api.txt
+++ b/Documentation/virtual/kvm/api.txt
@@ -777,6 +777,17 @@ Gets the current timestamp of kvmclock as seen by the current guest. In
conjunction with KVM_SET_CLOCK, it is used to ensure monotonicity on scenarios
such as migration.
+When KVM_CAP_ADJUST_CLOCK is passed to KVM_CHECK_EXTENSION, it returns the
+set of bits that KVM can return in struct kvm_clock_data's flag member.
+
+The only flag defined now is KVM_CLOCK_TSC_STABLE. If set, the returned
+value is the exact kvmclock value seen by all VCPUs at the instant
+when KVM_GET_CLOCK was called. If clear, the returned value is simply
+CLOCK_MONOTONIC plus a constant offset; the offset can be modified
+with KVM_SET_CLOCK. KVM will try to make all VCPUs follow this clock,
+but the exact value read by each VCPU could differ, because the host
+TSC is not stable.
+
struct kvm_clock_data {
__u64 clock; /* kvmclock current value */
__u32 flags;
diff --git a/Documentation/virtual/kvm/locking.txt b/Documentation/virtual/kvm/locking.txt
index f2491a8c68b4..e5dd9f4d6100 100644
--- a/Documentation/virtual/kvm/locking.txt
+++ b/Documentation/virtual/kvm/locking.txt
@@ -4,7 +4,17 @@ KVM Lock Overview
1. Acquisition Orders
---------------------
-(to be written)
+The acquisition orders for mutexes are as follows:
+
+- kvm->lock is taken outside vcpu->mutex
+
+- kvm->lock is taken outside kvm->slots_lock and kvm->irq_lock
+
+- kvm->slots_lock is taken outside kvm->irq_lock, though acquiring
+ them together is quite rare.
+
+For spinlocks, kvm_lock is taken outside kvm->mmu_lock. Everything
+else is a leaf: no other lock is taken inside the critical sections.
2: Exception
------------
diff --git a/MAINTAINERS b/MAINTAINERS
index 51a5dea9b84c..f981ae71a0e9 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -77,6 +77,7 @@ Descriptions of section entries:
Q: Patchwork web based patch tracking system site
T: SCM tree type and location.
Type is one of: git, hg, quilt, stgit, topgit
+ B: Bug tracking system location.
S: Status, one of the following:
Supported: Someone is actually paid to look after this.
Maintained: Someone actually looks after it.
@@ -281,6 +282,7 @@ L: linux-acpi@vger.kernel.org
W: https://01.org/linux-acpi
Q: https://patchwork.kernel.org/project/linux-acpi/list/
T: git git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm
+B: https://bugzilla.kernel.org
S: Supported
F: drivers/acpi/
F: drivers/pnp/pnpacpi/
@@ -304,6 +306,8 @@ W: https://acpica.org/
W: https://github.com/acpica/acpica/
Q: https://patchwork.kernel.org/project/linux-acpi/list/
T: git git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm
+B: https://bugzilla.kernel.org
+B: https://bugs.acpica.org
S: Supported
F: drivers/acpi/acpica/
F: include/acpi/
@@ -313,6 +317,7 @@ ACPI FAN DRIVER
M: Zhang Rui <rui.zhang@intel.com>
L: linux-acpi@vger.kernel.org
W: https://01.org/linux-acpi
+B: https://bugzilla.kernel.org
S: Supported
F: drivers/acpi/fan.c
@@ -328,6 +333,7 @@ ACPI THERMAL DRIVER
M: Zhang Rui <rui.zhang@intel.com>
L: linux-acpi@vger.kernel.org
W: https://01.org/linux-acpi
+B: https://bugzilla.kernel.org
S: Supported
F: drivers/acpi/*thermal*
@@ -335,6 +341,7 @@ ACPI VIDEO DRIVER
M: Zhang Rui <rui.zhang@intel.com>
L: linux-acpi@vger.kernel.org
W: https://01.org/linux-acpi
+B: https://bugzilla.kernel.org
S: Supported
F: drivers/acpi/acpi_video.c
@@ -2552,15 +2559,18 @@ S: Supported
F: drivers/net/ethernet/broadcom/genet/
BROADCOM BNX2 GIGABIT ETHERNET DRIVER
-M: Sony Chacko <sony.chacko@qlogic.com>
-M: Dept-HSGLinuxNICDev@qlogic.com
+M: Rasesh Mody <rasesh.mody@cavium.com>
+M: Harish Patil <harish.patil@cavium.com>
+M: Dept-GELinuxNICDev@cavium.com
L: netdev@vger.kernel.org
S: Supported
F: drivers/net/ethernet/broadcom/bnx2.*
F: drivers/net/ethernet/broadcom/bnx2_*
BROADCOM BNX2X 10 GIGABIT ETHERNET DRIVER
-M: Ariel Elior <ariel.elior@qlogic.com>
+M: Yuval Mintz <Yuval.Mintz@cavium.com>
+M: Ariel Elior <ariel.elior@cavium.com>
+M: everest-linux-l2@cavium.com
L: netdev@vger.kernel.org
S: Supported
F: drivers/net/ethernet/broadcom/bnx2x/
@@ -2767,7 +2777,9 @@ S: Supported
F: drivers/scsi/bfa/
BROCADE BNA 10 GIGABIT ETHERNET DRIVER
-M: Rasesh Mody <rasesh.mody@qlogic.com>
+M: Rasesh Mody <rasesh.mody@cavium.com>
+M: Sudarsana Kalluru <sudarsana.kalluru@cavium.com>
+M: Dept-GELinuxNICDev@cavium.com
L: netdev@vger.kernel.org
S: Supported
F: drivers/net/ethernet/brocade/bna/
@@ -3906,7 +3918,7 @@ F: include/linux/dma-buf*
F: include/linux/reservation.h
F: include/linux/*fence.h
F: Documentation/dma-buf-sharing.txt
-T: git git://git.linaro.org/people/sumitsemwal/linux-dma-buf.git
+T: git git://anongit.freedesktop.org/drm/drm-misc
SYNC FILE FRAMEWORK
M: Sumit Semwal <sumit.semwal@linaro.org>
@@ -3914,10 +3926,12 @@ R: Gustavo Padovan <gustavo@padovan.org>
S: Maintained
L: linux-media@vger.kernel.org
L: dri-devel@lists.freedesktop.org
-F: drivers/dma-buf/sync_file.c
+F: drivers/dma-buf/sync_*
+F: drivers/dma-buf/sw_sync.c
F: include/linux/sync_file.h
+F: include/uapi/linux/sync_file.h
F: Documentation/sync_file.txt
-T: git git://git.linaro.org/people/sumitsemwal/linux-dma-buf.git
+T: git git://anongit.freedesktop.org/drm/drm-misc
DMA GENERIC OFFLOAD ENGINE SUBSYSTEM
M: Vinod Koul <vinod.koul@intel.com>
@@ -4015,11 +4029,30 @@ F: Documentation/gpu/
F: include/drm/
F: include/uapi/drm/
+DRM DRIVERS AND MISC GPU PATCHES
+M: Daniel Vetter <daniel.vetter@intel.com>
+M: Jani Nikula <jani.nikula@linux.intel.com>
+M: Sean Paul <seanpaul@chromium.org>
+W: https://01.org/linuxgraphics/gfx-docs/maintainer-tools/drm-misc.html
+S: Maintained
+T: git git://anongit.freedesktop.org/drm/drm-misc
+F: Documentation/gpu/
+F: drivers/gpu/vga/
+F: drivers/gpu/drm/*
+F: include/drm/drm*
+F: include/uapi/drm/drm*
+
DRM DRIVER FOR AST SERVER GRAPHICS CHIPS
M: Dave Airlie <airlied@redhat.com>
S: Odd Fixes
F: drivers/gpu/drm/ast/
+DRM DRIVERS FOR BRIDGE CHIPS
+M: Archit Taneja <architt@codeaurora.org>
+S: Maintained
+T: git git://anongit.freedesktop.org/drm/drm-misc
+F: drivers/gpu/drm/bridge/
+
DRM DRIVER FOR BOCHS VIRTUAL GPU
M: Gerd Hoffmann <kraxel@redhat.com>
S: Odd Fixes
@@ -4055,7 +4088,6 @@ INTEL DRM DRIVERS (excluding Poulsbo, Moorestown and derivative chipsets)
M: Daniel Vetter <daniel.vetter@intel.com>
M: Jani Nikula <jani.nikula@linux.intel.com>
L: intel-gfx@lists.freedesktop.org
-L: dri-devel@lists.freedesktop.org
W: https://01.org/linuxgraphics/
Q: http://patchwork.freedesktop.org/project/intel-gfx/
T: git git://anongit.freedesktop.org/drm-intel
@@ -4065,6 +4097,16 @@ F: include/drm/i915*
F: include/uapi/drm/i915_drm.h
F: Documentation/gpu/i915.rst
+INTEL GVT-g DRIVERS (Intel GPU Virtualization)
+M: Zhenyu Wang <zhenyuw@linux.intel.com>
+M: Zhi Wang <zhi.a.wang@intel.com>
+L: igvt-g-dev@lists.01.org
+L: intel-gfx@lists.freedesktop.org
+W: https://01.org/igvt-g
+T: git https://github.com/01org/gvt-linux.git
+S: Supported
+F: drivers/gpu/drm/i915/gvt/
+
DRM DRIVERS FOR ATMEL HLCDC
M: Boris Brezillon <boris.brezillon@free-electrons.com>
L: dri-devel@lists.freedesktop.org
@@ -4118,6 +4160,7 @@ F: drivers/gpu/drm/gma500/
DRM DRIVERS FOR HISILICON
M: Xinliang Liu <z.liuxinliang@hisilicon.com>
+M: Rongrong Zou <zourongrong@gmail.com>
R: Xinwei Kong <kong.kongxinwei@hisilicon.com>
R: Chen Feng <puck.chen@hisilicon.com>
L: dri-devel@lists.freedesktop.org
@@ -4283,6 +4326,13 @@ S: Maintained
F: drivers/gpu/drm/tilcdc/
F: Documentation/devicetree/bindings/display/tilcdc/
+DRM DRIVERS FOR ZTE ZX
+M: Shawn Guo <shawnguo@kernel.org>
+L: dri-devel@lists.freedesktop.org
+S: Maintained
+F: drivers/gpu/drm/zte/
+F: Documentation/devicetree/bindings/display/zte,vou.txt
+
DSBR100 USB FM RADIO DRIVER
M: Alexey Klimov <klimov.linux@gmail.com>
L: linux-media@vger.kernel.org
@@ -5659,6 +5709,7 @@ HIBERNATION (aka Software Suspend, aka swsusp)
M: "Rafael J. Wysocki" <rjw@rjwysocki.net>
M: Pavel Machek <pavel@ucw.cz>
L: linux-pm@vger.kernel.org
+B: https://bugzilla.kernel.org
S: Supported
F: arch/x86/power/
F: drivers/base/power/
@@ -7080,6 +7131,7 @@ F: drivers/scsi/53c700*
LED SUBSYSTEM
M: Richard Purdie <rpurdie@rpsys.net>
M: Jacek Anaszewski <j.anaszewski@samsung.com>
+M: Pavel Machek <pavel@ucw.cz>
L: linux-leds@vger.kernel.org
T: git git://git.kernel.org/pub/scm/linux/kernel/git/j.anaszewski/linux-leds.git
S: Maintained
@@ -7921,6 +7973,10 @@ F: mm/
MEMORY TECHNOLOGY DEVICES (MTD)
M: David Woodhouse <dwmw2@infradead.org>
M: Brian Norris <computersforpeace@gmail.com>
+M: Boris Brezillon <boris.brezillon@free-electrons.com>
+M: Marek Vasut <marek.vasut@gmail.com>
+M: Richard Weinberger <richard@nod.at>
+M: Cyrille Pitchen <cyrille.pitchen@atmel.com>
L: linux-mtd@lists.infradead.org
W: http://www.linux-mtd.infradead.org/
Q: http://patchwork.ozlabs.org/project/linux-mtd/list/
@@ -8049,6 +8105,7 @@ F: drivers/infiniband/hw/mlx4/
F: include/linux/mlx4/
MELLANOX MLX5 core VPI driver
+M: Saeed Mahameed <saeedm@mellanox.com>
M: Matan Barak <matanb@mellanox.com>
M: Leon Romanovsky <leonro@mellanox.com>
L: netdev@vger.kernel.org
@@ -8518,11 +8575,10 @@ F: Documentation/devicetree/bindings/net/wireless/
F: drivers/net/wireless/
NETXEN (1/10) GbE SUPPORT
-M: Manish Chopra <manish.chopra@qlogic.com>
-M: Sony Chacko <sony.chacko@qlogic.com>
-M: Rajesh Borundia <rajesh.borundia@qlogic.com>
+M: Manish Chopra <manish.chopra@cavium.com>
+M: Rahul Verma <rahul.verma@cavium.com>
+M: Dept-GELinuxNICDev@cavium.com
L: netdev@vger.kernel.org
-W: http://www.qlogic.com
S: Supported
F: drivers/net/ethernet/qlogic/netxen/
@@ -9240,11 +9296,12 @@ S: Maintained
F: drivers/pci/host/*layerscape*
PCI DRIVER FOR IMX6
-M: Richard Zhu <Richard.Zhu@freescale.com>
+M: Richard Zhu <hongxing.zhu@nxp.com>
M: Lucas Stach <l.stach@pengutronix.de>
L: linux-pci@vger.kernel.org
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
S: Maintained
+F: Documentation/devicetree/bindings/pci/fsl,imx6q-pcie.txt
F: drivers/pci/host/*imx6*
PCI DRIVER FOR TI KEYSTONE
@@ -9303,17 +9360,11 @@ F: drivers/pci/host/pci-exynos.c
PCI DRIVER FOR SYNOPSIS DESIGNWARE
M: Jingoo Han <jingoohan1@gmail.com>
-M: Pratyush Anand <pratyush.anand@gmail.com>
-L: linux-pci@vger.kernel.org
-S: Maintained
-F: drivers/pci/host/*designware*
-
-PCI DRIVER FOR SYNOPSYS PROTOTYPING DEVICE
-M: Jose Abreu <Jose.Abreu@synopsys.com>
+M: Joao Pinto <Joao.Pinto@synopsys.com>
L: linux-pci@vger.kernel.org
S: Maintained
F: Documentation/devicetree/bindings/pci/designware-pcie.txt
-F: drivers/pci/host/pcie-designware-plat.c
+F: drivers/pci/host/*designware*
PCI DRIVER FOR GENERIC OF HOSTS
M: Will Deacon <will.deacon@arm.com>
@@ -9328,7 +9379,7 @@ PCI DRIVER FOR INTEL VOLUME MANAGEMENT DEVICE (VMD)
M: Keith Busch <keith.busch@intel.com>
L: linux-pci@vger.kernel.org
S: Supported
-F: arch/x86/pci/vmd.c
+F: drivers/pci/host/vmd.c
PCIE DRIVER FOR ST SPEAR13XX
M: Pratyush Anand <pratyush.anand@gmail.com>
@@ -9615,6 +9666,7 @@ POWER MANAGEMENT CORE
M: "Rafael J. Wysocki" <rjw@rjwysocki.net>
L: linux-pm@vger.kernel.org
T: git git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm
+B: https://bugzilla.kernel.org
S: Supported
F: drivers/base/power/
F: include/linux/pm.h
@@ -9898,33 +9950,32 @@ F: Documentation/scsi/LICENSE.qla4xxx
F: drivers/scsi/qla4xxx/
QLOGIC QLA3XXX NETWORK DRIVER
-M: Jitendra Kalsaria <jitendra.kalsaria@qlogic.com>
-M: Ron Mercer <ron.mercer@qlogic.com>
-M: linux-driver@qlogic.com
+M: Dept-GELinuxNICDev@cavium.com
L: netdev@vger.kernel.org
S: Supported
F: Documentation/networking/LICENSE.qla3xxx
F: drivers/net/ethernet/qlogic/qla3xxx.*
QLOGIC QLCNIC (1/10)Gb ETHERNET DRIVER
-M: Dept-GELinuxNICDev@qlogic.com
+M: Harish Patil <harish.patil@cavium.com>
+M: Manish Chopra <manish.chopra@cavium.com>
+M: Dept-GELinuxNICDev@cavium.com
L: netdev@vger.kernel.org
S: Supported
F: drivers/net/ethernet/qlogic/qlcnic/
QLOGIC QLGE 10Gb ETHERNET DRIVER
-M: Harish Patil <harish.patil@qlogic.com>
-M: Sudarsana Kalluru <sudarsana.kalluru@qlogic.com>
-M: Dept-GELinuxNICDev@qlogic.com
-M: linux-driver@qlogic.com
+M: Harish Patil <harish.patil@cavium.com>
+M: Manish Chopra <manish.chopra@cavium.com>
+M: Dept-GELinuxNICDev@cavium.com
L: netdev@vger.kernel.org
S: Supported
F: drivers/net/ethernet/qlogic/qlge/
QLOGIC QL4xxx ETHERNET DRIVER
-M: Yuval Mintz <Yuval.Mintz@qlogic.com>
-M: Ariel Elior <Ariel.Elior@qlogic.com>
-M: everest-linux-l2@qlogic.com
+M: Yuval Mintz <Yuval.Mintz@cavium.com>
+M: Ariel Elior <Ariel.Elior@cavium.com>
+M: everest-linux-l2@cavium.com
L: netdev@vger.kernel.org
S: Supported
F: drivers/net/ethernet/qlogic/qed/
@@ -11402,6 +11453,17 @@ W: http://www.st.com/spear
S: Maintained
F: drivers/clk/spear/
+SPI NOR SUBSYSTEM
+M: Cyrille Pitchen <cyrille.pitchen@atmel.com>
+M: Marek Vasut <marek.vasut@gmail.com>
+L: linux-mtd@lists.infradead.org
+W: http://www.linux-mtd.infradead.org/
+Q: http://patchwork.ozlabs.org/project/linux-mtd/list/
+T: git git://github.com/spi-nor/linux.git
+S: Maintained
+F: drivers/mtd/spi-nor/
+F: include/linux/mtd/spi-nor.h
+
SPI SUBSYSTEM
M: Mark Brown <broonie@kernel.org>
L: linux-spi@vger.kernel.org
@@ -11594,6 +11656,7 @@ M: "Rafael J. Wysocki" <rjw@rjwysocki.net>
M: Len Brown <len.brown@intel.com>
M: Pavel Machek <pavel@ucw.cz>
L: linux-pm@vger.kernel.org
+B: https://bugzilla.kernel.org
S: Supported
F: Documentation/power/
F: arch/x86/kernel/acpi/
@@ -12781,6 +12844,7 @@ F: include/uapi/linux/virtio_console.h
VIRTIO CORE, NET AND BLOCK DRIVERS
M: "Michael S. Tsirkin" <mst@redhat.com>
+M: Jason Wang <jasowang@redhat.com>
L: virtualization@lists.linux-foundation.org
S: Maintained
F: Documentation/devicetree/bindings/virtio/
@@ -12811,6 +12875,7 @@ F: include/uapi/linux/virtio_gpu.h
VIRTIO HOST (VHOST)
M: "Michael S. Tsirkin" <mst@redhat.com>
+M: Jason Wang <jasowang@redhat.com>
L: kvm@vger.kernel.org
L: virtualization@lists.linux-foundation.org
L: netdev@vger.kernel.org
diff --git a/Makefile b/Makefile
index a2650f9c6a25..369099dc0fae 100644
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
VERSION = 4
PATCHLEVEL = 9
SUBLEVEL = 0
-EXTRAVERSION = -rc3
+EXTRAVERSION = -rc8
NAME = Psychotic Stoned Sheep
# *DOCUMENTATION*
@@ -370,7 +370,7 @@ LDFLAGS_MODULE =
CFLAGS_KERNEL =
AFLAGS_KERNEL =
LDFLAGS_vmlinux =
-CFLAGS_GCOV = -fprofile-arcs -ftest-coverage -fno-tree-loop-im
+CFLAGS_GCOV = -fprofile-arcs -ftest-coverage -fno-tree-loop-im -Wno-maybe-uninitialized
CFLAGS_KCOV := $(call cc-option,-fsanitize-coverage=trace-pc,)
@@ -399,11 +399,12 @@ KBUILD_CFLAGS := -Wall -Wundef -Wstrict-prototypes -Wno-trigraphs \
-fno-strict-aliasing -fno-common \
-Werror-implicit-function-declaration \
-Wno-format-security \
- -std=gnu89
+ -std=gnu89 $(call cc-option,-fno-PIE)
+
KBUILD_AFLAGS_KERNEL :=
KBUILD_CFLAGS_KERNEL :=
-KBUILD_AFLAGS := -D__ASSEMBLY__
+KBUILD_AFLAGS := -D__ASSEMBLY__ $(call cc-option,-fno-PIE)
KBUILD_AFLAGS_MODULE := -DMODULE
KBUILD_CFLAGS_MODULE := -DMODULE
KBUILD_LDFLAGS_MODULE := -T $(srctree)/scripts/module-common.lds
@@ -606,6 +607,13 @@ else
include/config/auto.conf: ;
endif # $(dot-config)
+# For the kernel to actually contain only the needed exported symbols,
+# we have to build modules as well to determine what those symbols are.
+# (this can be evaluated only once include/config/auto.conf has been included)
+ifdef CONFIG_TRIM_UNUSED_KSYMS
+ KBUILD_MODULES := 1
+endif
+
# The all: target is the default when no target is given on the
# command line.
# This allow a user to issue only 'make' to build a kernel including modules
@@ -620,7 +628,6 @@ ARCH_CFLAGS :=
include arch/$(SRCARCH)/Makefile
KBUILD_CFLAGS += $(call cc-option,-fno-delete-null-pointer-checks,)
-KBUILD_CFLAGS += $(call cc-disable-warning,maybe-uninitialized,)
KBUILD_CFLAGS += $(call cc-disable-warning,frame-address,)
ifdef CONFIG_LD_DEAD_CODE_DATA_ELIMINATION
@@ -629,15 +636,18 @@ KBUILD_CFLAGS += $(call cc-option,-fdata-sections,)
endif
ifdef CONFIG_CC_OPTIMIZE_FOR_SIZE
-KBUILD_CFLAGS += -Os
+KBUILD_CFLAGS += -Os $(call cc-disable-warning,maybe-uninitialized,)
else
ifdef CONFIG_PROFILE_ALL_BRANCHES
-KBUILD_CFLAGS += -O2
+KBUILD_CFLAGS += -O2 $(call cc-disable-warning,maybe-uninitialized,)
else
KBUILD_CFLAGS += -O2
endif
endif
+KBUILD_CFLAGS += $(call cc-ifversion, -lt, 0409, \
+ $(call cc-disable-warning,maybe-uninitialized,))
+
# Tell gcc to never replace conditional load with a non-conditional one
KBUILD_CFLAGS += $(call cc-option,--param=allow-store-data-races=0)
@@ -941,7 +951,7 @@ ifdef CONFIG_GDB_SCRIPTS
endif
ifdef CONFIG_TRIM_UNUSED_KSYMS
$(Q)$(CONFIG_SHELL) $(srctree)/scripts/adjust_autoksyms.sh \
- "$(MAKE) KBUILD_MODULES=1 -f $(srctree)/Makefile vmlinux_prereq"
+ "$(MAKE) -f $(srctree)/Makefile vmlinux"
endif
# standalone target for easier testing
@@ -1016,8 +1026,6 @@ prepare2: prepare3 prepare-compiler-check outputmakefile asm-generic
prepare1: prepare2 $(version_h) include/generated/utsrelease.h \
include/config/auto.conf
$(cmd_crmodverdir)
- $(Q)test -e include/generated/autoksyms.h || \
- touch include/generated/autoksyms.h
archprepare: archheaders archscripts prepare1 scripts_basic
diff --git a/arch/arc/Makefile b/arch/arc/Makefile
index 864adad52280..19cce226d1a8 100644
--- a/arch/arc/Makefile
+++ b/arch/arc/Makefile
@@ -50,6 +50,9 @@ atleast_gcc44 := $(call cc-ifversion, -ge, 0404, y)
cflags-$(atleast_gcc44) += -fsection-anchors
+cflags-$(CONFIG_ARC_HAS_LLSC) += -mlock
+cflags-$(CONFIG_ARC_HAS_SWAPE) += -mswape
+
ifdef CONFIG_ISA_ARCV2
ifndef CONFIG_ARC_HAS_LL64
@@ -68,7 +71,9 @@ cflags-$(CONFIG_ARC_DW2_UNWIND) += -fasynchronous-unwind-tables $(cfi)
ifndef CONFIG_CC_OPTIMIZE_FOR_SIZE
# Generic build system uses -O2, we want -O3
# Note: No need to add to cflags-y as that happens anyways
-ARCH_CFLAGS += -O3
+#
+# Disable the false maybe-uninitialized warings gcc spits out at -O3
+ARCH_CFLAGS += -O3 $(call cc-disable-warning,maybe-uninitialized,)
endif
# small data is default for elf32 tool-chain. If not usable, disable it
diff --git a/arch/arc/boot/dts/axc001.dtsi b/arch/arc/boot/dts/axc001.dtsi
index 6ae2c476ad82..53ce226f77a5 100644
--- a/arch/arc/boot/dts/axc001.dtsi
+++ b/arch/arc/boot/dts/axc001.dtsi
@@ -71,7 +71,7 @@
reg-io-width = <4>;
};
- arcpmu0: pmu {
+ arcpct0: pct {
compatible = "snps,arc700-pct";
};
};
diff --git a/arch/arc/boot/dts/nsim_700.dts b/arch/arc/boot/dts/nsim_700.dts
index ce0ccd20b5bf..5ee96b067c08 100644
--- a/arch/arc/boot/dts/nsim_700.dts
+++ b/arch/arc/boot/dts/nsim_700.dts
@@ -69,7 +69,7 @@
};
};
- arcpmu0: pmu {
+ arcpct0: pct {
compatible = "snps,arc700-pct";
};
};
diff --git a/arch/arc/boot/dts/nsimosci.dts b/arch/arc/boot/dts/nsimosci.dts
index bcf603142a33..3c391ba565ed 100644
--- a/arch/arc/boot/dts/nsimosci.dts
+++ b/arch/arc/boot/dts/nsimosci.dts
@@ -83,5 +83,9 @@
reg = <0xf0003000 0x44>;
interrupts = <7>;
};
+
+ arcpct0: pct {
+ compatible = "snps,arc700-pct";
+ };
};
};
diff --git a/arch/arc/configs/nsim_700_defconfig b/arch/arc/configs/nsim_700_defconfig
index 7314f538847b..b0066a749d4c 100644
--- a/arch/arc/configs/nsim_700_defconfig
+++ b/arch/arc/configs/nsim_700_defconfig
@@ -14,6 +14,7 @@ CONFIG_BLK_DEV_INITRD=y
CONFIG_INITRAMFS_SOURCE="../arc_initramfs/"
CONFIG_KALLSYMS_ALL=y
CONFIG_EMBEDDED=y
+CONFIG_PERF_EVENTS=y
# CONFIG_SLUB_DEBUG is not set
# CONFIG_COMPAT_BRK is not set
CONFIG_KPROBES=y
diff --git a/arch/arc/configs/nsim_hs_defconfig b/arch/arc/configs/nsim_hs_defconfig
index 65ab9fbf83f2..ebe9ebb92933 100644
--- a/arch/arc/configs/nsim_hs_defconfig
+++ b/arch/arc/configs/nsim_hs_defconfig
@@ -14,6 +14,7 @@ CONFIG_BLK_DEV_INITRD=y
CONFIG_INITRAMFS_SOURCE="../../arc_initramfs_hs/"
CONFIG_KALLSYMS_ALL=y
CONFIG_EMBEDDED=y
+CONFIG_PERF_EVENTS=y
# CONFIG_SLUB_DEBUG is not set
# CONFIG_COMPAT_BRK is not set
CONFIG_KPROBES=y
diff --git a/arch/arc/configs/nsim_hs_smp_defconfig b/arch/arc/configs/nsim_hs_smp_defconfig
index 3b3990cddbe1..4bde43278be6 100644
--- a/arch/arc/configs/nsim_hs_smp_defconfig
+++ b/arch/arc/configs/nsim_hs_smp_defconfig
@@ -12,6 +12,7 @@ CONFIG_BLK_DEV_INITRD=y
CONFIG_INITRAMFS_SOURCE="../arc_initramfs_hs/"
CONFIG_KALLSYMS_ALL=y
CONFIG_EMBEDDED=y
+CONFIG_PERF_EVENTS=y
# CONFIG_SLUB_DEBUG is not set
# CONFIG_COMPAT_BRK is not set
CONFIG_KPROBES=y
diff --git a/arch/arc/configs/nsimosci_defconfig b/arch/arc/configs/nsimosci_defconfig
index 98cf20933bbb..f6fb3d26557e 100644
--- a/arch/arc/configs/nsimosci_defconfig
+++ b/arch/arc/configs/nsimosci_defconfig
@@ -14,6 +14,7 @@ CONFIG_BLK_DEV_INITRD=y
CONFIG_INITRAMFS_SOURCE="../arc_initramfs/"
CONFIG_KALLSYMS_ALL=y
CONFIG_EMBEDDED=y
+CONFIG_PERF_EVENTS=y
# CONFIG_SLUB_DEBUG is not set
# CONFIG_COMPAT_BRK is not set
CONFIG_KPROBES=y
diff --git a/arch/arc/configs/nsimosci_hs_defconfig b/arch/arc/configs/nsimosci_hs_defconfig
index ddf8b96d494e..b9f0fe00044b 100644
--- a/arch/arc/configs/nsimosci_hs_defconfig
+++ b/arch/arc/configs/nsimosci_hs_defconfig
@@ -14,6 +14,7 @@ CONFIG_BLK_DEV_INITRD=y
CONFIG_INITRAMFS_SOURCE="../arc_initramfs_hs/"
CONFIG_KALLSYMS_ALL=y
CONFIG_EMBEDDED=y
+CONFIG_PERF_EVENTS=y
# CONFIG_SLUB_DEBUG is not set
# CONFIG_COMPAT_BRK is not set
CONFIG_KPROBES=y
diff --git a/arch/arc/configs/nsimosci_hs_smp_defconfig b/arch/arc/configs/nsimosci_hs_smp_defconfig
index ceb90745326e..6da71ba253a9 100644
--- a/arch/arc/configs/nsimosci_hs_smp_defconfig
+++ b/arch/arc/configs/nsimosci_hs_smp_defconfig
@@ -10,6 +10,7 @@ CONFIG_IKCONFIG_PROC=y
# CONFIG_PID_NS is not set
CONFIG_BLK_DEV_INITRD=y
CONFIG_INITRAMFS_SOURCE="../arc_initramfs_hs/"
+CONFIG_PERF_EVENTS=y
# CONFIG_COMPAT_BRK is not set
CONFIG_KPROBES=y
CONFIG_MODULES=y
@@ -34,7 +35,6 @@ CONFIG_INET=y
# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
# CONFIG_INET_XFRM_MODE_TUNNEL is not set
# CONFIG_INET_XFRM_MODE_BEET is not set
-# CONFIG_INET_LRO is not set
# CONFIG_IPV6 is not set
# CONFIG_WIRELESS is not set
CONFIG_DEVTMPFS=y
@@ -72,7 +72,6 @@ CONFIG_SERIAL_OF_PLATFORM=y
# CONFIG_HWMON is not set
CONFIG_DRM=y
CONFIG_DRM_ARCPGU=y
-CONFIG_FRAMEBUFFER_CONSOLE=y
CONFIG_LOGO=y
# CONFIG_HID is not set
# CONFIG_USB_SUPPORT is not set
diff --git a/arch/arc/include/asm/arcregs.h b/arch/arc/include/asm/arcregs.h
index 7f3f9f63708c..1bd24ec3e350 100644
--- a/arch/arc/include/asm/arcregs.h
+++ b/arch/arc/include/asm/arcregs.h
@@ -43,12 +43,14 @@
#define STATUS_AE_BIT 5 /* Exception active */
#define STATUS_DE_BIT 6 /* PC is in delay slot */
#define STATUS_U_BIT 7 /* User/Kernel mode */
+#define STATUS_Z_BIT 11
#define STATUS_L_BIT 12 /* Loop inhibit */
/* These masks correspond to the status word(STATUS_32) bits */
#define STATUS_AE_MASK (1<<STATUS_AE_BIT)
#define STATUS_DE_MASK (1<<STATUS_DE_BIT)
#define STATUS_U_MASK (1<<STATUS_U_BIT)
+#define STATUS_Z_MASK (1<<STATUS_Z_BIT)
#define STATUS_L_MASK (1<<STATUS_L_BIT)
/*
diff --git a/arch/arc/include/asm/delay.h b/arch/arc/include/asm/delay.h
index 08e7e2a16ac1..a36e8601114d 100644
--- a/arch/arc/include/asm/delay.h
+++ b/arch/arc/include/asm/delay.h
@@ -22,10 +22,11 @@
static inline void __delay(unsigned long loops)
{
__asm__ __volatile__(
- " lp 1f \n"
- " nop \n"
- "1: \n"
- : "+l"(loops));
+ " mov lp_count, %0 \n"
+ " lp 1f \n"
+ " nop \n"
+ "1: \n"
+ : : "r"(loops));
}
extern void __bad_udelay(void);
diff --git a/arch/arc/include/asm/pgtable.h b/arch/arc/include/asm/pgtable.h
index 89eeb3720051..e94ca72b974e 100644
--- a/arch/arc/include/asm/pgtable.h
+++ b/arch/arc/include/asm/pgtable.h
@@ -280,7 +280,7 @@ static inline void pmd_set(pmd_t *pmdp, pte_t *ptep)
#define pte_page(pte) pfn_to_page(pte_pfn(pte))
#define mk_pte(page, prot) pfn_pte(page_to_pfn(page), prot)
-#define pfn_pte(pfn, prot) __pte(((pfn) << PAGE_SHIFT) | pgprot_val(prot))
+#define pfn_pte(pfn, prot) __pte(__pfn_to_phys(pfn) | pgprot_val(prot))
/* Don't use virt_to_pfn for macros below: could cause truncations for PAE40*/
#define pte_pfn(pte) (pte_val(pte) >> PAGE_SHIFT)
diff --git a/arch/arc/include/asm/smp.h b/arch/arc/include/asm/smp.h
index 89fdd1b0a76e..0861007d9ef3 100644
--- a/arch/arc/include/asm/smp.h
+++ b/arch/arc/include/asm/smp.h
@@ -37,9 +37,9 @@ extern const char *arc_platform_smp_cpuinfo(void);
* API expected BY platform smp code (FROM arch smp code)
*
* smp_ipi_irq_setup:
- * Takes @cpu and @irq to which the arch-common ISR is hooked up
+ * Takes @cpu and @hwirq to which the arch-common ISR is hooked up
*/
-extern int smp_ipi_irq_setup(int cpu, int irq);
+extern int smp_ipi_irq_setup(int cpu, irq_hw_number_t hwirq);
/*
* struct plat_smp_ops - SMP callbacks provided by platform to ARC SMP
diff --git a/arch/arc/kernel/devtree.c b/arch/arc/kernel/devtree.c
index f1e07c2344f8..3b67f538f142 100644
--- a/arch/arc/kernel/devtree.c
+++ b/arch/arc/kernel/devtree.c
@@ -31,6 +31,8 @@ static void __init arc_set_early_base_baud(unsigned long dt_root)
arc_base_baud = 166666666; /* Fixed 166.6MHz clk (TB10x) */
else if (of_flat_dt_is_compatible(dt_root, "snps,arc-sdp"))
arc_base_baud = 33333333; /* Fixed 33MHz clk (AXS10x) */
+ else if (of_flat_dt_is_compatible(dt_root, "ezchip,arc-nps"))
+ arc_base_baud = 800000000; /* Fixed 800MHz clk (NPS) */
else
arc_base_baud = 50000000; /* Fixed default 50MHz */
}
diff --git a/arch/arc/kernel/mcip.c b/arch/arc/kernel/mcip.c
index c424d5abc318..f39142acc89e 100644
--- a/arch/arc/kernel/mcip.c
+++ b/arch/arc/kernel/mcip.c
@@ -181,6 +181,8 @@ idu_irq_set_affinity(struct irq_data *data, const struct cpumask *cpumask,
{
unsigned long flags;
cpumask_t online;
+ unsigned int destination_bits;
+ unsigned int distribution_mode;
/* errout if no online cpu per @cpumask */
if (!cpumask_and(&online, cpumask, cpu_online_mask))
@@ -188,8 +190,15 @@ idu_irq_set_affinity(struct irq_data *data, const struct cpumask *cpumask,
raw_spin_lock_irqsave(&mcip_lock, flags);
- idu_set_dest(data->hwirq, cpumask_bits(&online)[0]);
- idu_set_mode(data->hwirq, IDU_M_TRIG_LEVEL, IDU_M_DISTRI_RR);
+ destination_bits = cpumask_bits(&online)[0];
+ idu_set_dest(data->hwirq, destination_bits);
+
+ if (ffs(destination_bits) == fls(destination_bits))
+ distribution_mode = IDU_M_DISTRI_DEST;
+ else
+ distribution_mode = IDU_M_DISTRI_RR;
+
+ idu_set_mode(data->hwirq, IDU_M_TRIG_LEVEL, distribution_mode);
raw_spin_unlock_irqrestore(&mcip_lock, flags);
@@ -207,16 +216,15 @@ static struct irq_chip idu_irq_chip = {
};
-static int idu_first_irq;
+static irq_hw_number_t idu_first_hwirq;
static void idu_cascade_isr(struct irq_desc *desc)
{
- struct irq_domain *domain = irq_desc_get_handler_data(desc);
- unsigned int core_irq = irq_desc_get_irq(desc);
- unsigned int idu_irq;
+ struct irq_domain *idu_domain = irq_desc_get_handler_data(desc);
+ irq_hw_number_t core_hwirq = irqd_to_hwirq(irq_desc_get_irq_data(desc));
+ irq_hw_number_t idu_hwirq = core_hwirq - idu_first_hwirq;
- idu_irq = core_irq - idu_first_irq;
- generic_handle_irq(irq_find_mapping(domain, idu_irq));
+ generic_handle_irq(irq_find_mapping(idu_domain, idu_hwirq));
}
static int idu_irq_map(struct irq_domain *d, unsigned int virq, irq_hw_number_t hwirq)
@@ -282,7 +290,7 @@ idu_of_init(struct device_node *intc, struct device_node *parent)
struct irq_domain *domain;
/* Read IDU BCR to confirm nr_irqs */
int nr_irqs = of_irq_count(intc);
- int i, irq;
+ int i, virq;
struct mcip_bcr mp;
READ_BCR(ARC_REG_MCIP_BCR, mp);
@@ -303,11 +311,11 @@ idu_of_init(struct device_node *intc, struct device_node *parent)
* however we need it to get the parent virq and set IDU handler
* as first level isr
*/
- irq = irq_of_parse_and_map(intc, i);
+ virq = irq_of_parse_and_map(intc, i);
if (!i)
- idu_first_irq = irq;
+ idu_first_hwirq = irqd_to_hwirq(irq_get_irq_data(virq));
- irq_set_chained_handler_and_data(irq, idu_cascade_isr, domain);
+ irq_set_chained_handler_and_data(virq, idu_cascade_isr, domain);
}
__mcip_cmd(CMD_IDU_ENABLE, 0);
diff --git a/arch/arc/kernel/process.c b/arch/arc/kernel/process.c
index 59aa43cb146e..a41a79a4f4fe 100644
--- a/arch/arc/kernel/process.c
+++ b/arch/arc/kernel/process.c
@@ -43,8 +43,8 @@ SYSCALL_DEFINE0(arc_gettls)
SYSCALL_DEFINE3(arc_usr_cmpxchg, int *, uaddr, int, expected, int, new)
{
- int uval;
- int ret;
+ struct pt_regs *regs = current_pt_regs();
+ int uval = -EFAULT;
/*
* This is only for old cores lacking LLOCK/SCOND, which by defintion
@@ -54,24 +54,26 @@ SYSCALL_DEFINE3(arc_usr_cmpxchg, int *, uaddr, int, expected, int, new)
*/
WARN_ON_ONCE(IS_ENABLED(CONFIG_SMP));
+ /* Z indicates to userspace if operation succeded */
+ regs->status32 &= ~STATUS_Z_MASK;
+
if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int)))
return -EFAULT;
preempt_disable();
- ret = __get_user(uval, uaddr);
- if (ret)
+ if (__get_user(uval, uaddr))
goto done;
- if (uval != expected)
- ret = -EAGAIN;
- else
- ret = __put_user(new, uaddr);
+ if (uval == expected) {
+ if (!__put_user(new, uaddr))
+ regs->status32 |= STATUS_Z_MASK;
+ }
done:
preempt_enable();
- return ret;
+ return uval;
}
void arch_cpu_idle(void)
diff --git a/arch/arc/kernel/smp.c b/arch/arc/kernel/smp.c
index f183cc648851..88674d972c9d 100644
--- a/arch/arc/kernel/smp.c
+++ b/arch/arc/kernel/smp.c
@@ -22,6 +22,7 @@
#include <linux/atomic.h>
#include <linux/cpumask.h>
#include <linux/reboot.h>
+#include <linux/irqdomain.h>
#include <asm/processor.h>
#include <asm/setup.h>
#include <asm/mach_desc.h>
@@ -67,11 +68,13 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
int i;
/*
- * Initialise the present map, which describes the set of CPUs
- * actually populated at the present time.
+ * if platform didn't set the present map already, do it now
+ * boot cpu is set to present already by init/main.c
*/
- for (i = 0; i < max_cpus; i++)
- set_cpu_present(i, true);
+ if (num_present_cpus() <= 1) {
+ for (i = 0; i < max_cpus; i++)
+ set_cpu_present(i, true);
+ }
}
void __init smp_cpus_done(unsigned int max_cpus)
@@ -351,20 +354,24 @@ irqreturn_t do_IPI(int irq, void *dev_id)
*/
static DEFINE_PER_CPU(int, ipi_dev);
-int smp_ipi_irq_setup(int cpu, int irq)
+int smp_ipi_irq_setup(int cpu, irq_hw_number_t hwirq)
{
int *dev = per_cpu_ptr(&ipi_dev, cpu);
+ unsigned int virq = irq_find_mapping(NULL, hwirq);
+
+ if (!virq)
+ panic("Cannot find virq for root domain and hwirq=%lu", hwirq);
/* Boot cpu calls request, all call enable */
if (!cpu) {
int rc;
- rc = request_percpu_irq(irq, do_IPI, "IPI Interrupt", dev);
+ rc = request_percpu_irq(virq, do_IPI, "IPI Interrupt", dev);
if (rc)
- panic("Percpu IRQ request failed for %d\n", irq);
+ panic("Percpu IRQ request failed for %u\n", virq);
}
- enable_percpu_irq(irq, 0);
+ enable_percpu_irq(virq, 0);
return 0;
}
diff --git a/arch/arc/kernel/time.c b/arch/arc/kernel/time.c
index f927b8dc6edd..c10390d1ddb6 100644
--- a/arch/arc/kernel/time.c
+++ b/arch/arc/kernel/time.c
@@ -152,14 +152,17 @@ static cycle_t arc_read_rtc(struct clocksource *cs)
cycle_t full;
} stamp;
-
- __asm__ __volatile(
- "1: \n"
- " lr %0, [AUX_RTC_LOW] \n"
- " lr %1, [AUX_RTC_HIGH] \n"
- " lr %2, [AUX_RTC_CTRL] \n"
- " bbit0.nt %2, 31, 1b \n"
- : "=r" (stamp.low), "=r" (stamp.high), "=r" (status));
+ /*
+ * hardware has an internal state machine which tracks readout of
+ * low/high and updates the CTRL.status if
+ * - interrupt/exception taken between the two reads
+ * - high increments after low has been read
+ */
+ do {
+ stamp.low = read_aux_reg(AUX_RTC_LOW);
+ stamp.high = read_aux_reg(AUX_RTC_HIGH);
+ status = read_aux_reg(AUX_RTC_CTRL);
+ } while (!(status & _BITUL(31)));
return stamp.full;
}
diff --git a/arch/arc/mm/cache.c b/arch/arc/mm/cache.c
index 2b96cfc3be75..50d71695cd4e 100644
--- a/arch/arc/mm/cache.c
+++ b/arch/arc/mm/cache.c
@@ -23,7 +23,7 @@
static int l2_line_sz;
static int ioc_exists;
-int slc_enable = 1, ioc_enable = 1;
+int slc_enable = 1, ioc_enable = 0;
unsigned long perip_base = ARC_UNCACHED_ADDR_SPACE; /* legacy value for boot */
unsigned long perip_end = 0xFFFFFFFF; /* legacy value */
diff --git a/arch/arc/mm/dma.c b/arch/arc/mm/dma.c
index 60aab5a7522b..cd8aad8226dd 100644
--- a/arch/arc/mm/dma.c
+++ b/arch/arc/mm/dma.c
@@ -105,6 +105,31 @@ static void arc_dma_free(struct device *dev, size_t size, void *vaddr,
__free_pages(page, get_order(size));
}
+static int arc_dma_mmap(struct device *dev, struct vm_area_struct *vma,
+ void *cpu_addr, dma_addr_t dma_addr, size_t size,
+ unsigned long attrs)
+{
+ unsigned long user_count = vma_pages(vma);
+ unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT;
+ unsigned long pfn = __phys_to_pfn(plat_dma_to_phys(dev, dma_addr));
+ unsigned long off = vma->vm_pgoff;
+ int ret = -ENXIO;
+
+ vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+
+ if (dma_mmap_from_coherent(dev, vma, cpu_addr, size, &ret))
+ return ret;
+
+ if (off < count && user_count <= (count - off)) {
+ ret = remap_pfn_range(vma, vma->vm_start,
+ pfn + off,
+ user_count << PAGE_SHIFT,
+ vma->vm_page_prot);
+ }
+
+ return ret;
+}
+
/*
* streaming DMA Mapping API...
* CPU accesses page via normal paddr, thus needs to explicitly made
@@ -193,6 +218,7 @@ static int arc_dma_supported(struct device *dev, u64 dma_mask)
struct dma_map_ops arc_dma_ops = {
.alloc = arc_dma_alloc,
.free = arc_dma_free,
+ .mmap = arc_dma_mmap,
.map_page = arc_dma_map_page,
.map_sg = arc_dma_map_sg,
.sync_single_for_device = arc_dma_sync_single_for_device,
diff --git a/arch/arc/plat-eznps/smp.c b/arch/arc/plat-eznps/smp.c
index 5e901f86e4bd..56a4c8522f11 100644
--- a/arch/arc/plat-eznps/smp.c
+++ b/arch/arc/plat-eznps/smp.c
@@ -140,16 +140,10 @@ static void eznps_init_per_cpu(int cpu)
mtm_enable_core(cpu);
}
-static void eznps_ipi_clear(int irq)
-{
- write_aux_reg(CTOP_AUX_IACK, 1 << irq);
-}
-
struct plat_smp_ops plat_smp_ops = {
.info = smp_cpuinfo_buf,
.init_early_smp = eznps_init_cpumasks,
.cpu_kick = eznps_smp_wakeup_cpu,
.ipi_send = eznps_ipi_send,
.init_per_cpu = eznps_init_per_cpu,
- .ipi_clear = eznps_ipi_clear,
};
diff --git a/arch/arm/boot/dts/Makefile b/arch/arm/boot/dts/Makefile
index befcd2619902..c558ba75cbcc 100644
--- a/arch/arm/boot/dts/Makefile
+++ b/arch/arm/boot/dts/Makefile
@@ -745,7 +745,6 @@ dtb-$(CONFIG_MACH_SUN4I) += \
sun4i-a10-pcduino2.dtb \
sun4i-a10-pov-protab2-ips9.dtb
dtb-$(CONFIG_MACH_SUN5I) += \
- ntc-gr8-evb.dtb \
sun5i-a10s-auxtek-t003.dtb \
sun5i-a10s-auxtek-t004.dtb \
sun5i-a10s-mk802.dtb \
@@ -761,6 +760,7 @@ dtb-$(CONFIG_MACH_SUN5I) += \
sun5i-a13-olinuxino-micro.dtb \
sun5i-a13-q8-tablet.dtb \
sun5i-a13-utoo-p66.dtb \
+ sun5i-gr8-evb.dtb \
sun5i-r8-chip.dtb
dtb-$(CONFIG_MACH_SUN6I) += \
sun6i-a31-app4-evb1.dtb \
diff --git a/arch/arm/boot/dts/imx53-qsb.dts b/arch/arm/boot/dts/imx53-qsb.dts
index dec4b073ceb1..379939699164 100644
--- a/arch/arm/boot/dts/imx53-qsb.dts
+++ b/arch/arm/boot/dts/imx53-qsb.dts
@@ -64,8 +64,8 @@
};
ldo3_reg: ldo3 {
- regulator-min-microvolt = <600000>;
- regulator-max-microvolt = <1800000>;
+ regulator-min-microvolt = <1725000>;
+ regulator-max-microvolt = <3300000>;
regulator-always-on;
};
@@ -76,8 +76,8 @@
};
ldo5_reg: ldo5 {
- regulator-min-microvolt = <1725000>;
- regulator-max-microvolt = <3300000>;
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <3600000>;
regulator-always-on;
};
@@ -100,14 +100,14 @@
};
ldo9_reg: ldo9 {
- regulator-min-microvolt = <1200000>;
+ regulator-min-microvolt = <1250000>;
regulator-max-microvolt = <3600000>;
regulator-always-on;
};
ldo10_reg: ldo10 {
- regulator-min-microvolt = <1250000>;
- regulator-max-microvolt = <3650000>;
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <3600000>;
regulator-always-on;
};
};
diff --git a/arch/arm/boot/dts/logicpd-som-lv.dtsi b/arch/arm/boot/dts/logicpd-som-lv.dtsi
index 0ff1c2de95bf..26cce4d18405 100644
--- a/arch/arm/boot/dts/logicpd-som-lv.dtsi
+++ b/arch/arm/boot/dts/logicpd-som-lv.dtsi
@@ -13,6 +13,11 @@
};
};
+ memory@80000000 {
+ device_type = "memory";
+ reg = <0x80000000 0>;
+ };
+
wl12xx_vmmc: wl12xx_vmmc {
compatible = "regulator-fixed";
regulator-name = "vwl1271";
diff --git a/arch/arm/boot/dts/logicpd-torpedo-som.dtsi b/arch/arm/boot/dts/logicpd-torpedo-som.dtsi
index 731ec37aed5b..8f9a69ca818c 100644
--- a/arch/arm/boot/dts/logicpd-torpedo-som.dtsi
+++ b/arch/arm/boot/dts/logicpd-torpedo-som.dtsi
@@ -13,9 +13,9 @@
};
};
- memory@0 {
+ memory@80000000 {
device_type = "memory";
- reg = <0 0>;
+ reg = <0x80000000 0>;
};
leds {
diff --git a/arch/arm/boot/dts/omap5-board-common.dtsi b/arch/arm/boot/dts/omap5-board-common.dtsi
index 6365635fea5c..4caadb253249 100644
--- a/arch/arm/boot/dts/omap5-board-common.dtsi
+++ b/arch/arm/boot/dts/omap5-board-common.dtsi
@@ -124,6 +124,7 @@
compatible = "ti,abe-twl6040";
ti,model = "omap5-uevm";
+ ti,jack-detection;
ti,mclk-freq = <19200000>;
ti,mcpdm = <&mcpdm>;
@@ -415,7 +416,7 @@
ti,backup-battery-charge-high-current;
};
- gpadc {
+ gpadc: gpadc {
compatible = "ti,palmas-gpadc";
interrupts = <18 0
16 0
@@ -475,8 +476,8 @@
smps6_reg: smps6 {
/* VDD_DDR3 - over VDD_SMPS6 */
regulator-name = "smps6";
- regulator-min-microvolt = <1200000>;
- regulator-max-microvolt = <1200000>;
+ regulator-min-microvolt = <1350000>;
+ regulator-max-microvolt = <1350000>;
regulator-always-on;
regulator-boot-on;
};
diff --git a/arch/arm/boot/dts/stih407-family.dtsi b/arch/arm/boot/dts/stih407-family.dtsi
index 91096a49efa9..8f79b4147bba 100644
--- a/arch/arm/boot/dts/stih407-family.dtsi
+++ b/arch/arm/boot/dts/stih407-family.dtsi
@@ -283,6 +283,8 @@
clock-frequency = <400000>;
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_i2c0_default>;
+ #address-cells = <1>;
+ #size-cells = <0>;
status = "disabled";
};
@@ -296,6 +298,8 @@
clock-frequency = <400000>;
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_i2c1_default>;
+ #address-cells = <1>;
+ #size-cells = <0>;
status = "disabled";
};
@@ -309,6 +313,8 @@
clock-frequency = <400000>;
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_i2c2_default>;
+ #address-cells = <1>;
+ #size-cells = <0>;
status = "disabled";
};
@@ -322,6 +328,8 @@
clock-frequency = <400000>;
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_i2c3_default>;
+ #address-cells = <1>;
+ #size-cells = <0>;
status = "disabled";
};
@@ -335,6 +343,8 @@
clock-frequency = <400000>;
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_i2c4_default>;
+ #address-cells = <1>;
+ #size-cells = <0>;
status = "disabled";
};
@@ -348,6 +358,8 @@
clock-frequency = <400000>;
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_i2c5_default>;
+ #address-cells = <1>;
+ #size-cells = <0>;
status = "disabled";
};
@@ -363,6 +375,8 @@
clock-frequency = <400000>;
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_i2c10_default>;
+ #address-cells = <1>;
+ #size-cells = <0>;
status = "disabled";
};
@@ -376,6 +390,8 @@
clock-frequency = <400000>;
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_i2c11_default>;
+ #address-cells = <1>;
+ #size-cells = <0>;
status = "disabled";
};
diff --git a/arch/arm/boot/dts/stih410-b2260.dts b/arch/arm/boot/dts/stih410-b2260.dts
index ef2ff2f518f6..7fb507fcba7e 100644
--- a/arch/arm/boot/dts/stih410-b2260.dts
+++ b/arch/arm/boot/dts/stih410-b2260.dts
@@ -74,7 +74,7 @@
/* Low speed expansion connector */
spi0: spi@9844000 {
label = "LS-SPI0";
- cs-gpio = <&pio30 3 0>;
+ cs-gpios = <&pio30 3 0>;
status = "okay";
};
diff --git a/arch/arm/boot/dts/ntc-gr8-evb.dts b/arch/arm/boot/dts/sun5i-gr8-evb.dts
index 4b622f3b5220..714381fd64d7 100644
--- a/arch/arm/boot/dts/ntc-gr8-evb.dts
+++ b/arch/arm/boot/dts/sun5i-gr8-evb.dts
@@ -44,7 +44,7 @@
*/
/dts-v1/;
-#include "ntc-gr8.dtsi"
+#include "sun5i-gr8.dtsi"
#include "sunxi-common-regulators.dtsi"
#include <dt-bindings/gpio/gpio.h>
diff --git a/arch/arm/boot/dts/ntc-gr8.dtsi b/arch/arm/boot/dts/sun5i-gr8.dtsi
index ca54e03ef366..ca54e03ef366 100644
--- a/arch/arm/boot/dts/ntc-gr8.dtsi
+++ b/arch/arm/boot/dts/sun5i-gr8.dtsi
diff --git a/arch/arm/boot/dts/sun8i-a23-a33.dtsi b/arch/arm/boot/dts/sun8i-a23-a33.dtsi
index 48fc24f36fcb..300a1bd5a6ec 100644
--- a/arch/arm/boot/dts/sun8i-a23-a33.dtsi
+++ b/arch/arm/boot/dts/sun8i-a23-a33.dtsi
@@ -282,11 +282,15 @@
uart1_pins_a: uart1@0 {
allwinner,pins = "PG6", "PG7";
allwinner,function = "uart1";
+ allwinner,drive = <SUN4I_PINCTRL_10_MA>;
+ allwinner,pull = <SUN4I_PINCTRL_NO_PULL>;
};
uart1_pins_cts_rts_a: uart1-cts-rts@0 {
allwinner,pins = "PG8", "PG9";
allwinner,function = "uart1";
+ allwinner,drive = <SUN4I_PINCTRL_10_MA>;
+ allwinner,pull = <SUN4I_PINCTRL_NO_PULL>;
};
mmc0_pins_a: mmc0@0 {
diff --git a/arch/arm/include/asm/Kbuild b/arch/arm/include/asm/Kbuild
index 0745538b26d3..55e0e3ea9cb6 100644
--- a/arch/arm/include/asm/Kbuild
+++ b/arch/arm/include/asm/Kbuild
@@ -8,7 +8,6 @@ generic-y += early_ioremap.h
generic-y += emergency-restart.h
generic-y += errno.h
generic-y += exec.h
-generic-y += export.h
generic-y += ioctl.h
generic-y += ipcbuf.h
generic-y += irq_regs.h
diff --git a/arch/arm/include/asm/kvm_asm.h b/arch/arm/include/asm/kvm_asm.h
index d7ea6bcb29bf..8ef05381984b 100644
--- a/arch/arm/include/asm/kvm_asm.h
+++ b/arch/arm/include/asm/kvm_asm.h
@@ -66,6 +66,7 @@ extern char __kvm_hyp_vector[];
extern void __kvm_flush_vm_context(void);
extern void __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa);
extern void __kvm_tlb_flush_vmid(struct kvm *kvm);
+extern void __kvm_tlb_flush_local_vmid(struct kvm_vcpu *vcpu);
extern int __kvm_vcpu_run(struct kvm_vcpu *vcpu);
diff --git a/arch/arm/include/asm/kvm_host.h b/arch/arm/include/asm/kvm_host.h
index 2d19e02d03fd..d5423ab15ed5 100644
--- a/arch/arm/include/asm/kvm_host.h
+++ b/arch/arm/include/asm/kvm_host.h
@@ -57,6 +57,9 @@ struct kvm_arch {
/* VTTBR value associated with below pgd and vmid */
u64 vttbr;
+ /* The last vcpu id that ran on each physical CPU */
+ int __percpu *last_vcpu_ran;
+
/* Timer */
struct arch_timer_kvm timer;
diff --git a/arch/arm/include/asm/kvm_hyp.h b/arch/arm/include/asm/kvm_hyp.h
index 343135ede5fa..58508900c4bb 100644
--- a/arch/arm/include/asm/kvm_hyp.h
+++ b/arch/arm/include/asm/kvm_hyp.h
@@ -71,6 +71,7 @@
#define ICIALLUIS __ACCESS_CP15(c7, 0, c1, 0)
#define ATS1CPR __ACCESS_CP15(c7, 0, c8, 0)
#define TLBIALLIS __ACCESS_CP15(c8, 0, c3, 0)
+#define TLBIALL __ACCESS_CP15(c8, 0, c7, 0)
#define TLBIALLNSNHIS __ACCESS_CP15(c8, 4, c3, 4)
#define PRRR __ACCESS_CP15(c10, 0, c2, 0)
#define NMRR __ACCESS_CP15(c10, 0, c2, 1)
diff --git a/arch/arm/include/asm/unistd.h b/arch/arm/include/asm/unistd.h
index 194b69923389..ada0d29a660f 100644
--- a/arch/arm/include/asm/unistd.h
+++ b/arch/arm/include/asm/unistd.h
@@ -19,7 +19,7 @@
* This may need to be greater than __NR_last_syscall+1 in order to
* account for the padding in the syscall table
*/
-#define __NR_syscalls (396)
+#define __NR_syscalls (400)
#define __ARCH_WANT_STAT64
#define __ARCH_WANT_SYS_GETHOSTNAME
diff --git a/arch/arm/include/uapi/asm/unistd.h b/arch/arm/include/uapi/asm/unistd.h
index 2cb9dc770e1d..314100a06ccb 100644
--- a/arch/arm/include/uapi/asm/unistd.h
+++ b/arch/arm/include/uapi/asm/unistd.h
@@ -420,6 +420,9 @@
#define __NR_copy_file_range (__NR_SYSCALL_BASE+391)
#define __NR_preadv2 (__NR_SYSCALL_BASE+392)
#define __NR_pwritev2 (__NR_SYSCALL_BASE+393)
+#define __NR_pkey_mprotect (__NR_SYSCALL_BASE+394)
+#define __NR_pkey_alloc (__NR_SYSCALL_BASE+395)
+#define __NR_pkey_free (__NR_SYSCALL_BASE+396)
/*
* The following SWIs are ARM private.
diff --git a/arch/arm/kernel/Makefile b/arch/arm/kernel/Makefile
index 68c2c097cffe..ad325a8c7e1e 100644
--- a/arch/arm/kernel/Makefile
+++ b/arch/arm/kernel/Makefile
@@ -33,7 +33,7 @@ endif
obj-$(CONFIG_CPU_IDLE) += cpuidle.o
obj-$(CONFIG_ISA_DMA_API) += dma.o
obj-$(CONFIG_FIQ) += fiq.o fiqasm.o
-obj-$(CONFIG_MODULES) += module.o
+obj-$(CONFIG_MODULES) += armksyms.o module.o
obj-$(CONFIG_ARM_MODULE_PLTS) += module-plts.o
obj-$(CONFIG_ISA_DMA) += dma-isa.o
obj-$(CONFIG_PCI) += bios32.o isa.o
diff --git a/arch/arm/kernel/armksyms.c b/arch/arm/kernel/armksyms.c
new file mode 100644
index 000000000000..7e45f69a0ddc
--- /dev/null
+++ b/arch/arm/kernel/armksyms.c
@@ -0,0 +1,183 @@
+/*
+ * linux/arch/arm/kernel/armksyms.c
+ *
+ * Copyright (C) 2000 Russell King
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/export.h>
+#include <linux/sched.h>
+#include <linux/string.h>
+#include <linux/cryptohash.h>
+#include <linux/delay.h>
+#include <linux/in6.h>
+#include <linux/syscalls.h>
+#include <linux/uaccess.h>
+#include <linux/io.h>
+#include <linux/arm-smccc.h>
+
+#include <asm/checksum.h>
+#include <asm/ftrace.h>
+
+/*
+ * libgcc functions - functions that are used internally by the
+ * compiler... (prototypes are not correct though, but that
+ * doesn't really matter since they're not versioned).
+ */
+extern void __ashldi3(void);
+extern void __ashrdi3(void);
+extern void __divsi3(void);
+extern void __lshrdi3(void);
+extern void __modsi3(void);
+extern void __muldi3(void);
+extern void __ucmpdi2(void);
+extern void __udivsi3(void);
+extern void __umodsi3(void);
+extern void __do_div64(void);
+extern void __bswapsi2(void);
+extern void __bswapdi2(void);
+
+extern void __aeabi_idiv(void);
+extern void __aeabi_idivmod(void);
+extern void __aeabi_lasr(void);
+extern void __aeabi_llsl(void);
+extern void __aeabi_llsr(void);
+extern void __aeabi_lmul(void);
+extern void __aeabi_uidiv(void);
+extern void __aeabi_uidivmod(void);
+extern void __aeabi_ulcmp(void);
+
+extern void fpundefinstr(void);
+
+void mmioset(void *, unsigned int, size_t);
+void mmiocpy(void *, const void *, size_t);
+
+ /* platform dependent support */
+EXPORT_SYMBOL(arm_delay_ops);
+
+ /* networking */
+EXPORT_SYMBOL(csum_partial);
+EXPORT_SYMBOL(csum_partial_copy_from_user);
+EXPORT_SYMBOL(csum_partial_copy_nocheck);
+EXPORT_SYMBOL(__csum_ipv6_magic);
+
+ /* io */
+#ifndef __raw_readsb
+EXPORT_SYMBOL(__raw_readsb);
+#endif
+#ifndef __raw_readsw
+EXPORT_SYMBOL(__raw_readsw);
+#endif
+#ifndef __raw_readsl
+EXPORT_SYMBOL(__raw_readsl);
+#endif
+#ifndef __raw_writesb
+EXPORT_SYMBOL(__raw_writesb);
+#endif
+#ifndef __raw_writesw
+EXPORT_SYMBOL(__raw_writesw);
+#endif
+#ifndef __raw_writesl
+EXPORT_SYMBOL(__raw_writesl);
+#endif
+
+ /* string / mem functions */
+EXPORT_SYMBOL(strchr);
+EXPORT_SYMBOL(strrchr);
+EXPORT_SYMBOL(memset);
+EXPORT_SYMBOL(memcpy);
+EXPORT_SYMBOL(memmove);
+EXPORT_SYMBOL(memchr);
+EXPORT_SYMBOL(__memzero);
+
+EXPORT_SYMBOL(mmioset);
+EXPORT_SYMBOL(mmiocpy);
+
+#ifdef CONFIG_MMU
+EXPORT_SYMBOL(copy_page);
+
+EXPORT_SYMBOL(arm_copy_from_user);
+EXPORT_SYMBOL(arm_copy_to_user);
+EXPORT_SYMBOL(arm_clear_user);
+
+EXPORT_SYMBOL(__get_user_1);
+EXPORT_SYMBOL(__get_user_2);
+EXPORT_SYMBOL(__get_user_4);
+EXPORT_SYMBOL(__get_user_8);
+
+#ifdef __ARMEB__
+EXPORT_SYMBOL(__get_user_64t_1);
+EXPORT_SYMBOL(__get_user_64t_2);
+EXPORT_SYMBOL(__get_user_64t_4);
+EXPORT_SYMBOL(__get_user_32t_8);
+#endif
+
+EXPORT_SYMBOL(__put_user_1);
+EXPORT_SYMBOL(__put_user_2);
+EXPORT_SYMBOL(__put_user_4);
+EXPORT_SYMBOL(__put_user_8);
+#endif
+
+ /* gcc lib functions */
+EXPORT_SYMBOL(__ashldi3);
+EXPORT_SYMBOL(__ashrdi3);
+EXPORT_SYMBOL(__divsi3);
+EXPORT_SYMBOL(__lshrdi3);
+EXPORT_SYMBOL(__modsi3);
+EXPORT_SYMBOL(__muldi3);
+EXPORT_SYMBOL(__ucmpdi2);
+EXPORT_SYMBOL(__udivsi3);
+EXPORT_SYMBOL(__umodsi3);
+EXPORT_SYMBOL(__do_div64);
+EXPORT_SYMBOL(__bswapsi2);
+EXPORT_SYMBOL(__bswapdi2);
+
+#ifdef CONFIG_AEABI
+EXPORT_SYMBOL(__aeabi_idiv);
+EXPORT_SYMBOL(__aeabi_idivmod);
+EXPORT_SYMBOL(__aeabi_lasr);
+EXPORT_SYMBOL(__aeabi_llsl);
+EXPORT_SYMBOL(__aeabi_llsr);
+EXPORT_SYMBOL(__aeabi_lmul);
+EXPORT_SYMBOL(__aeabi_uidiv);
+EXPORT_SYMBOL(__aeabi_uidivmod);
+EXPORT_SYMBOL(__aeabi_ulcmp);
+#endif
+
+ /* bitops */
+EXPORT_SYMBOL(_set_bit);
+EXPORT_SYMBOL(_test_and_set_bit);
+EXPORT_SYMBOL(_clear_bit);
+EXPORT_SYMBOL(_test_and_clear_bit);
+EXPORT_SYMBOL(_change_bit);
+EXPORT_SYMBOL(_test_and_change_bit);
+EXPORT_SYMBOL(_find_first_zero_bit_le);
+EXPORT_SYMBOL(_find_next_zero_bit_le);
+EXPORT_SYMBOL(_find_first_bit_le);
+EXPORT_SYMBOL(_find_next_bit_le);
+
+#ifdef __ARMEB__
+EXPORT_SYMBOL(_find_first_zero_bit_be);
+EXPORT_SYMBOL(_find_next_zero_bit_be);
+EXPORT_SYMBOL(_find_first_bit_be);
+EXPORT_SYMBOL(_find_next_bit_be);
+#endif
+
+#ifdef CONFIG_FUNCTION_TRACER
+#ifdef CONFIG_OLD_MCOUNT
+EXPORT_SYMBOL(mcount);
+#endif
+EXPORT_SYMBOL(__gnu_mcount_nc);
+#endif
+
+#ifdef CONFIG_ARM_PATCH_PHYS_VIRT
+EXPORT_SYMBOL(__pv_phys_pfn_offset);
+EXPORT_SYMBOL(__pv_offset);
+#endif
+
+#ifdef CONFIG_HAVE_ARM_SMCCC
+EXPORT_SYMBOL(arm_smccc_smc);
+EXPORT_SYMBOL(arm_smccc_hvc);
+#endif
diff --git a/arch/arm/kernel/calls.S b/arch/arm/kernel/calls.S
index 703fa0f3cd8f..08030b18f10a 100644
--- a/arch/arm/kernel/calls.S
+++ b/arch/arm/kernel/calls.S
@@ -403,6 +403,9 @@
CALL(sys_copy_file_range)
CALL(sys_preadv2)
CALL(sys_pwritev2)
+ CALL(sys_pkey_mprotect)
+/* 395 */ CALL(sys_pkey_alloc)
+ CALL(sys_pkey_free)
#ifndef syscalls_counted
.equ syscalls_padding, ((NR_syscalls + 3) & ~3) - NR_syscalls
#define syscalls_counted
diff --git a/arch/arm/kernel/entry-ftrace.S b/arch/arm/kernel/entry-ftrace.S
index b629d3f11c3d..c73c4030ca5d 100644
--- a/arch/arm/kernel/entry-ftrace.S
+++ b/arch/arm/kernel/entry-ftrace.S
@@ -7,7 +7,6 @@
#include <asm/assembler.h>
#include <asm/ftrace.h>
#include <asm/unwind.h>
-#include <asm/export.h>
#include "entry-header.S"
@@ -154,7 +153,6 @@ ENTRY(mcount)
__mcount _old
#endif
ENDPROC(mcount)
-EXPORT_SYMBOL(mcount)
#ifdef CONFIG_DYNAMIC_FTRACE
ENTRY(ftrace_caller_old)
@@ -207,7 +205,6 @@ UNWIND(.fnstart)
#endif
UNWIND(.fnend)
ENDPROC(__gnu_mcount_nc)
-EXPORT_SYMBOL(__gnu_mcount_nc)
#ifdef CONFIG_DYNAMIC_FTRACE
ENTRY(ftrace_caller)
diff --git a/arch/arm/kernel/head.S b/arch/arm/kernel/head.S
index f41cee4c5746..04286fd9e09c 100644
--- a/arch/arm/kernel/head.S
+++ b/arch/arm/kernel/head.S
@@ -22,7 +22,6 @@
#include <asm/memory.h>
#include <asm/thread_info.h>
#include <asm/pgtable.h>
-#include <asm/export.h>
#if defined(CONFIG_DEBUG_LL) && !defined(CONFIG_DEBUG_SEMIHOSTING)
#include CONFIG_DEBUG_LL_INCLUDE
@@ -728,8 +727,6 @@ __pv_phys_pfn_offset:
__pv_offset:
.quad 0
.size __pv_offset, . -__pv_offset
-EXPORT_SYMBOL(__pv_phys_pfn_offset)
-EXPORT_SYMBOL(__pv_offset)
#endif
#include "head-common.S"
diff --git a/arch/arm/kernel/smccc-call.S b/arch/arm/kernel/smccc-call.S
index 37669e7e13af..2e48b674aab1 100644
--- a/arch/arm/kernel/smccc-call.S
+++ b/arch/arm/kernel/smccc-call.S
@@ -16,7 +16,6 @@
#include <asm/opcodes-sec.h>
#include <asm/opcodes-virt.h>
#include <asm/unwind.h>
-#include <asm/export.h>
/*
* Wrap c macros in asm macros to delay expansion until after the
@@ -52,7 +51,6 @@ UNWIND( .fnend)
ENTRY(arm_smccc_smc)
SMCCC SMCCC_SMC
ENDPROC(arm_smccc_smc)
-EXPORT_SYMBOL(arm_smccc_smc)
/*
* void smccc_hvc(unsigned long a0, unsigned long a1, unsigned long a2,
@@ -62,4 +60,3 @@ EXPORT_SYMBOL(arm_smccc_smc)
ENTRY(arm_smccc_hvc)
SMCCC SMCCC_HVC
ENDPROC(arm_smccc_hvc)
-EXPORT_SYMBOL(arm_smccc_hvc)
diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c
index bc698383e822..9688ec0c6ef4 100644
--- a/arch/arm/kernel/traps.c
+++ b/arch/arm/kernel/traps.c
@@ -74,6 +74,26 @@ void dump_backtrace_entry(unsigned long where, unsigned long from, unsigned long
dump_mem("", "Exception stack", frame + 4, frame + 4 + sizeof(struct pt_regs));
}
+void dump_backtrace_stm(u32 *stack, u32 instruction)
+{
+ char str[80], *p;
+ unsigned int x;
+ int reg;
+
+ for (reg = 10, x = 0, p = str; reg >= 0; reg--) {
+ if (instruction & BIT(reg)) {
+ p += sprintf(p, " r%d:%08x", reg, *stack--);
+ if (++x == 6) {
+ x = 0;
+ p = str;
+ printk("%s\n", str);
+ }
+ }
+ }
+ if (p != str)
+ printk("%s\n", str);
+}
+
#ifndef CONFIG_ARM_UNWIND
/*
* Stack pointers should always be within the kernels view of
diff --git a/arch/arm/kernel/vmlinux-xip.lds.S b/arch/arm/kernel/vmlinux-xip.lds.S
index 7fa487ef7e2f..37b2a11af345 100644
--- a/arch/arm/kernel/vmlinux-xip.lds.S
+++ b/arch/arm/kernel/vmlinux-xip.lds.S
@@ -3,6 +3,9 @@
* Written by Martin Mares <mj@atrey.karlin.mff.cuni.cz>
*/
+/* No __ro_after_init data in the .rodata section - which will always be ro */
+#define RO_AFTER_INIT_DATA
+
#include <asm-generic/vmlinux.lds.h>
#include <asm/cache.h>
#include <asm/thread_info.h>
@@ -223,6 +226,8 @@ SECTIONS
. = ALIGN(PAGE_SIZE);
__init_end = .;
+ *(.data..ro_after_init)
+
NOSAVE_DATA
CACHELINE_ALIGNED_DATA(L1_CACHE_BYTES)
READ_MOSTLY_DATA(L1_CACHE_BYTES)
diff --git a/arch/arm/kvm/arm.c b/arch/arm/kvm/arm.c
index 08bb84f2ad58..19b5f5c1c0ff 100644
--- a/arch/arm/kvm/arm.c
+++ b/arch/arm/kvm/arm.c
@@ -114,11 +114,18 @@ void kvm_arch_check_processor_compat(void *rtn)
*/
int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
{
- int ret = 0;
+ int ret, cpu;
if (type)
return -EINVAL;
+ kvm->arch.last_vcpu_ran = alloc_percpu(typeof(*kvm->arch.last_vcpu_ran));
+ if (!kvm->arch.last_vcpu_ran)
+ return -ENOMEM;
+
+ for_each_possible_cpu(cpu)
+ *per_cpu_ptr(kvm->arch.last_vcpu_ran, cpu) = -1;
+
ret = kvm_alloc_stage2_pgd(kvm);
if (ret)
goto out_fail_alloc;
@@ -141,6 +148,8 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
out_free_stage2_pgd:
kvm_free_stage2_pgd(kvm);
out_fail_alloc:
+ free_percpu(kvm->arch.last_vcpu_ran);
+ kvm->arch.last_vcpu_ran = NULL;
return ret;
}
@@ -168,6 +177,9 @@ void kvm_arch_destroy_vm(struct kvm *kvm)
{
int i;
+ free_percpu(kvm->arch.last_vcpu_ran);
+ kvm->arch.last_vcpu_ran = NULL;
+
for (i = 0; i < KVM_MAX_VCPUS; ++i) {
if (kvm->vcpus[i]) {
kvm_arch_vcpu_free(kvm->vcpus[i]);
@@ -312,6 +324,19 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
{
+ int *last_ran;
+
+ last_ran = this_cpu_ptr(vcpu->kvm->arch.last_vcpu_ran);
+
+ /*
+ * We might get preempted before the vCPU actually runs, but
+ * over-invalidation doesn't affect correctness.
+ */
+ if (*last_ran != vcpu->vcpu_id) {
+ kvm_call_hyp(__kvm_tlb_flush_local_vmid, vcpu);
+ *last_ran = vcpu->vcpu_id;
+ }
+
vcpu->cpu = cpu;
vcpu->arch.host_cpu_context = this_cpu_ptr(kvm_host_cpu_state);
diff --git a/arch/arm/kvm/hyp/tlb.c b/arch/arm/kvm/hyp/tlb.c
index 729652854f90..6d810af2d9fd 100644
--- a/arch/arm/kvm/hyp/tlb.c
+++ b/arch/arm/kvm/hyp/tlb.c
@@ -55,6 +55,21 @@ void __hyp_text __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa)
__kvm_tlb_flush_vmid(kvm);
}
+void __hyp_text __kvm_tlb_flush_local_vmid(struct kvm_vcpu *vcpu)
+{
+ struct kvm *kvm = kern_hyp_va(kern_hyp_va(vcpu)->kvm);
+
+ /* Switch to requested VMID */
+ write_sysreg(kvm->arch.vttbr, VTTBR);
+ isb();
+
+ write_sysreg(0, TLBIALL);
+ dsb(nsh);
+ isb();
+
+ write_sysreg(0, VTTBR);
+}
+
void __hyp_text __kvm_flush_vm_context(void)
{
write_sysreg(0, TLBIALLNSNHIS);
diff --git a/arch/arm/lib/ashldi3.S b/arch/arm/lib/ashldi3.S
index a7e7de89bd75..b05e95840651 100644
--- a/arch/arm/lib/ashldi3.S
+++ b/arch/arm/lib/ashldi3.S
@@ -28,7 +28,6 @@ Boston, MA 02110-1301, USA. */
#include <linux/linkage.h>
#include <asm/assembler.h>
-#include <asm/export.h>
#ifdef __ARMEB__
#define al r1
@@ -53,5 +52,3 @@ ENTRY(__aeabi_llsl)
ENDPROC(__ashldi3)
ENDPROC(__aeabi_llsl)
-EXPORT_SYMBOL(__ashldi3)
-EXPORT_SYMBOL(__aeabi_llsl)
diff --git a/arch/arm/lib/ashrdi3.S b/arch/arm/lib/ashrdi3.S
index 490336e42518..275d7d2341a4 100644
--- a/arch/arm/lib/ashrdi3.S
+++ b/arch/arm/lib/ashrdi3.S
@@ -28,7 +28,6 @@ Boston, MA 02110-1301, USA. */
#include <linux/linkage.h>
#include <asm/assembler.h>
-#include <asm/export.h>
#ifdef __ARMEB__
#define al r1
@@ -53,5 +52,3 @@ ENTRY(__aeabi_lasr)
ENDPROC(__ashrdi3)
ENDPROC(__aeabi_lasr)
-EXPORT_SYMBOL(__ashrdi3)
-EXPORT_SYMBOL(__aeabi_lasr)
diff --git a/arch/arm/lib/backtrace.S b/arch/arm/lib/backtrace.S
index fab5a50503ae..7d7952e5a3b1 100644
--- a/arch/arm/lib/backtrace.S
+++ b/arch/arm/lib/backtrace.S
@@ -10,6 +10,7 @@
* 27/03/03 Ian Molton Clean up CONFIG_CPU
*
*/
+#include <linux/kern_levels.h>
#include <linux/linkage.h>
#include <asm/assembler.h>
.text
@@ -83,13 +84,13 @@ for_each_frame: tst frame, mask @ Check for address exceptions
teq r3, r1, lsr #11
ldreq r0, [frame, #-8] @ get sp
subeq r0, r0, #4 @ point at the last arg
- bleq .Ldumpstm @ dump saved registers
+ bleq dump_backtrace_stm @ dump saved registers
1004: ldr r1, [sv_pc, #0] @ if stmfd sp!, {..., fp, ip, lr, pc}
ldr r3, .Ldsi @ instruction exists,
teq r3, r1, lsr #11
subeq r0, frame, #16
- bleq .Ldumpstm @ dump saved registers
+ bleq dump_backtrace_stm @ dump saved registers
teq sv_fp, #0 @ zero saved fp means
beq no_frame @ no further frames
@@ -112,38 +113,6 @@ ENDPROC(c_backtrace)
.long 1004b, 1006b
.popsection
-#define instr r4
-#define reg r5
-#define stack r6
-
-.Ldumpstm: stmfd sp!, {instr, reg, stack, r7, lr}
- mov stack, r0
- mov instr, r1
- mov reg, #10
- mov r7, #0
-1: mov r3, #1
- ARM( tst instr, r3, lsl reg )
- THUMB( lsl r3, reg )
- THUMB( tst instr, r3 )
- beq 2f
- add r7, r7, #1
- teq r7, #6
- moveq r7, #0
- adr r3, .Lcr
- addne r3, r3, #1 @ skip newline
- ldr r2, [stack], #-4
- mov r1, reg
- adr r0, .Lfp
- bl printk
-2: subs reg, reg, #1
- bpl 1b
- teq r7, #0
- adrne r0, .Lcr
- blne printk
- ldmfd sp!, {instr, reg, stack, r7, pc}
-
-.Lfp: .asciz " r%d:%08x%s"
-.Lcr: .asciz "\n"
.Lbad: .asciz "Backtrace aborted due to bad frame pointer <%p>\n"
.align
.Ldsi: .word 0xe92dd800 >> 11 @ stmfd sp!, {... fp, ip, lr, pc}
diff --git a/arch/arm/lib/bitops.h b/arch/arm/lib/bitops.h
index df06638b327c..7d807cfd8ef5 100644
--- a/arch/arm/lib/bitops.h
+++ b/arch/arm/lib/bitops.h
@@ -1,6 +1,5 @@
#include <asm/assembler.h>
#include <asm/unwind.h>
-#include <asm/export.h>
#if __LINUX_ARM_ARCH__ >= 6
.macro bitop, name, instr
@@ -26,7 +25,6 @@ UNWIND( .fnstart )
bx lr
UNWIND( .fnend )
ENDPROC(\name )
-EXPORT_SYMBOL(\name )
.endm
.macro testop, name, instr, store
@@ -57,7 +55,6 @@ UNWIND( .fnstart )
2: bx lr
UNWIND( .fnend )
ENDPROC(\name )
-EXPORT_SYMBOL(\name )
.endm
#else
.macro bitop, name, instr
@@ -77,7 +74,6 @@ UNWIND( .fnstart )
ret lr
UNWIND( .fnend )
ENDPROC(\name )
-EXPORT_SYMBOL(\name )
.endm
/**
@@ -106,6 +102,5 @@ UNWIND( .fnstart )
ret lr
UNWIND( .fnend )
ENDPROC(\name )
-EXPORT_SYMBOL(\name )
.endm
#endif
diff --git a/arch/arm/lib/bswapsdi2.S b/arch/arm/lib/bswapsdi2.S
index f05f78247304..07cda737bb11 100644
--- a/arch/arm/lib/bswapsdi2.S
+++ b/arch/arm/lib/bswapsdi2.S
@@ -1,6 +1,5 @@
#include <linux/linkage.h>
#include <asm/assembler.h>
-#include <asm/export.h>
#if __LINUX_ARM_ARCH__ >= 6
ENTRY(__bswapsi2)
@@ -36,5 +35,3 @@ ENTRY(__bswapdi2)
ret lr
ENDPROC(__bswapdi2)
#endif
-EXPORT_SYMBOL(__bswapsi2)
-EXPORT_SYMBOL(__bswapdi2)
diff --git a/arch/arm/lib/clear_user.S b/arch/arm/lib/clear_user.S
index b566154f5cf4..e936352ccb00 100644
--- a/arch/arm/lib/clear_user.S
+++ b/arch/arm/lib/clear_user.S
@@ -10,7 +10,6 @@
#include <linux/linkage.h>
#include <asm/assembler.h>
#include <asm/unwind.h>
-#include <asm/export.h>
.text
@@ -51,9 +50,6 @@ USER( strnebt r2, [r0])
UNWIND(.fnend)
ENDPROC(arm_clear_user)
ENDPROC(__clear_user_std)
-#ifndef CONFIG_UACCESS_WITH_MEMCPY
-EXPORT_SYMBOL(arm_clear_user)
-#endif
.pushsection .text.fixup,"ax"
.align 0
diff --git a/arch/arm/lib/copy_from_user.S b/arch/arm/lib/copy_from_user.S
index 63e4c1ed0225..7a4b06049001 100644
--- a/arch/arm/lib/copy_from_user.S
+++ b/arch/arm/lib/copy_from_user.S
@@ -13,7 +13,6 @@
#include <linux/linkage.h>
#include <asm/assembler.h>
#include <asm/unwind.h>
-#include <asm/export.h>
/*
* Prototype:
@@ -95,7 +94,6 @@ ENTRY(arm_copy_from_user)
#include "copy_template.S"
ENDPROC(arm_copy_from_user)
-EXPORT_SYMBOL(arm_copy_from_user)
.pushsection .fixup,"ax"
.align 0
diff --git a/arch/arm/lib/copy_page.S b/arch/arm/lib/copy_page.S
index d97851d4af7a..6ee2f6706f86 100644
--- a/arch/arm/lib/copy_page.S
+++ b/arch/arm/lib/copy_page.S
@@ -13,7 +13,6 @@
#include <asm/assembler.h>
#include <asm/asm-offsets.h>
#include <asm/cache.h>
-#include <asm/export.h>
#define COPY_COUNT (PAGE_SZ / (2 * L1_CACHE_BYTES) PLD( -1 ))
@@ -46,4 +45,3 @@ ENTRY(copy_page)
PLD( beq 2b )
ldmfd sp!, {r4, pc} @ 3
ENDPROC(copy_page)
-EXPORT_SYMBOL(copy_page)
diff --git a/arch/arm/lib/copy_to_user.S b/arch/arm/lib/copy_to_user.S
index 592c179112d1..caf5019d8161 100644
--- a/arch/arm/lib/copy_to_user.S
+++ b/arch/arm/lib/copy_to_user.S
@@ -13,7 +13,6 @@
#include <linux/linkage.h>
#include <asm/assembler.h>
#include <asm/unwind.h>
-#include <asm/export.h>
/*
* Prototype:
@@ -100,9 +99,6 @@ WEAK(arm_copy_to_user)
ENDPROC(arm_copy_to_user)
ENDPROC(__copy_to_user_std)
-#ifndef CONFIG_UACCESS_WITH_MEMCPY
-EXPORT_SYMBOL(arm_copy_to_user)
-#endif
.pushsection .text.fixup,"ax"
.align 0
diff --git a/arch/arm/lib/csumipv6.S b/arch/arm/lib/csumipv6.S
index 68603b5ee537..3ac6ef01bc43 100644
--- a/arch/arm/lib/csumipv6.S
+++ b/arch/arm/lib/csumipv6.S
@@ -9,7 +9,6 @@
*/
#include <linux/linkage.h>
#include <asm/assembler.h>
-#include <asm/export.h>
.text
@@ -31,4 +30,4 @@ ENTRY(__csum_ipv6_magic)
adcs r0, r0, #0
ldmfd sp!, {pc}
ENDPROC(__csum_ipv6_magic)
-EXPORT_SYMBOL(__csum_ipv6_magic)
+
diff --git a/arch/arm/lib/csumpartial.S b/arch/arm/lib/csumpartial.S
index 830b20e81c37..984e0f29d548 100644
--- a/arch/arm/lib/csumpartial.S
+++ b/arch/arm/lib/csumpartial.S
@@ -9,7 +9,6 @@
*/
#include <linux/linkage.h>
#include <asm/assembler.h>
-#include <asm/export.h>
.text
@@ -141,4 +140,3 @@ ENTRY(csum_partial)
bne 4b
b .Lless4
ENDPROC(csum_partial)
-EXPORT_SYMBOL(csum_partial)
diff --git a/arch/arm/lib/csumpartialcopy.S b/arch/arm/lib/csumpartialcopy.S
index 9c3383fed129..d03fc71fc88c 100644
--- a/arch/arm/lib/csumpartialcopy.S
+++ b/arch/arm/lib/csumpartialcopy.S
@@ -49,6 +49,5 @@
#define FN_ENTRY ENTRY(csum_partial_copy_nocheck)
#define FN_EXIT ENDPROC(csum_partial_copy_nocheck)
-#define FN_EXPORT EXPORT_SYMBOL(csum_partial_copy_nocheck)
#include "csumpartialcopygeneric.S"
diff --git a/arch/arm/lib/csumpartialcopygeneric.S b/arch/arm/lib/csumpartialcopygeneric.S
index 8b94d20e51d1..10b45909610c 100644
--- a/arch/arm/lib/csumpartialcopygeneric.S
+++ b/arch/arm/lib/csumpartialcopygeneric.S
@@ -8,7 +8,6 @@
* published by the Free Software Foundation.
*/
#include <asm/assembler.h>
-#include <asm/export.h>
/*
* unsigned int
@@ -332,4 +331,3 @@ FN_ENTRY
mov r5, r4, get_byte_1
b .Lexit
FN_EXIT
-FN_EXPORT
diff --git a/arch/arm/lib/csumpartialcopyuser.S b/arch/arm/lib/csumpartialcopyuser.S
index 5d495edf3d83..1712f132b80d 100644
--- a/arch/arm/lib/csumpartialcopyuser.S
+++ b/arch/arm/lib/csumpartialcopyuser.S
@@ -73,7 +73,6 @@
#define FN_ENTRY ENTRY(csum_partial_copy_from_user)
#define FN_EXIT ENDPROC(csum_partial_copy_from_user)
-#define FN_EXPORT EXPORT_SYMBOL(csum_partial_copy_from_user)
#include "csumpartialcopygeneric.S"
diff --git a/arch/arm/lib/delay.c b/arch/arm/lib/delay.c
index 69aad80a3af4..2cef11884857 100644
--- a/arch/arm/lib/delay.c
+++ b/arch/arm/lib/delay.c
@@ -24,7 +24,6 @@
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/export.h>
#include <linux/timex.h>
/*
@@ -35,7 +34,6 @@ struct arm_delay_ops arm_delay_ops __ro_after_init = {
.const_udelay = __loop_const_udelay,
.udelay = __loop_udelay,
};
-EXPORT_SYMBOL(arm_delay_ops);
static const struct delay_timer *delay_timer;
static bool delay_calibrated;
diff --git a/arch/arm/lib/div64.S b/arch/arm/lib/div64.S
index 0c9e1c18fc9e..a9eafe4981eb 100644
--- a/arch/arm/lib/div64.S
+++ b/arch/arm/lib/div64.S
@@ -15,7 +15,6 @@
#include <linux/linkage.h>
#include <asm/assembler.h>
#include <asm/unwind.h>
-#include <asm/export.h>
#ifdef __ARMEB__
#define xh r0
@@ -211,4 +210,3 @@ Ldiv0_64:
UNWIND(.fnend)
ENDPROC(__do_div64)
-EXPORT_SYMBOL(__do_div64)
diff --git a/arch/arm/lib/findbit.S b/arch/arm/lib/findbit.S
index 26302b8cd38f..7848780e8834 100644
--- a/arch/arm/lib/findbit.S
+++ b/arch/arm/lib/findbit.S
@@ -15,7 +15,6 @@
*/
#include <linux/linkage.h>
#include <asm/assembler.h>
-#include <asm/export.h>
.text
/*
@@ -38,7 +37,6 @@ ENTRY(_find_first_zero_bit_le)
3: mov r0, r1 @ no free bits
ret lr
ENDPROC(_find_first_zero_bit_le)
-EXPORT_SYMBOL(_find_first_zero_bit_le)
/*
* Purpose : Find next 'zero' bit
@@ -59,7 +57,6 @@ ENTRY(_find_next_zero_bit_le)
add r2, r2, #1 @ align bit pointer
b 2b @ loop for next bit
ENDPROC(_find_next_zero_bit_le)
-EXPORT_SYMBOL(_find_next_zero_bit_le)
/*
* Purpose : Find a 'one' bit
@@ -81,7 +78,6 @@ ENTRY(_find_first_bit_le)
3: mov r0, r1 @ no free bits
ret lr
ENDPROC(_find_first_bit_le)
-EXPORT_SYMBOL(_find_first_bit_le)
/*
* Purpose : Find next 'one' bit
@@ -101,7 +97,6 @@ ENTRY(_find_next_bit_le)
add r2, r2, #1 @ align bit pointer
b 2b @ loop for next bit
ENDPROC(_find_next_bit_le)
-EXPORT_SYMBOL(_find_next_bit_le)
#ifdef __ARMEB__
@@ -121,7 +116,6 @@ ENTRY(_find_first_zero_bit_be)
3: mov r0, r1 @ no free bits
ret lr
ENDPROC(_find_first_zero_bit_be)
-EXPORT_SYMBOL(_find_first_zero_bit_be)
ENTRY(_find_next_zero_bit_be)
teq r1, #0
@@ -139,7 +133,6 @@ ENTRY(_find_next_zero_bit_be)
add r2, r2, #1 @ align bit pointer
b 2b @ loop for next bit
ENDPROC(_find_next_zero_bit_be)
-EXPORT_SYMBOL(_find_next_zero_bit_be)
ENTRY(_find_first_bit_be)
teq r1, #0
@@ -157,7 +150,6 @@ ENTRY(_find_first_bit_be)
3: mov r0, r1 @ no free bits
ret lr
ENDPROC(_find_first_bit_be)
-EXPORT_SYMBOL(_find_first_bit_be)
ENTRY(_find_next_bit_be)
teq r1, #0
@@ -174,7 +166,6 @@ ENTRY(_find_next_bit_be)
add r2, r2, #1 @ align bit pointer
b 2b @ loop for next bit
ENDPROC(_find_next_bit_be)
-EXPORT_SYMBOL(_find_next_bit_be)
#endif
diff --git a/arch/arm/lib/getuser.S b/arch/arm/lib/getuser.S
index 9d09a38e73af..8ecfd15c3a02 100644
--- a/arch/arm/lib/getuser.S
+++ b/arch/arm/lib/getuser.S
@@ -31,7 +31,6 @@
#include <asm/assembler.h>
#include <asm/errno.h>
#include <asm/domain.h>
-#include <asm/export.h>
ENTRY(__get_user_1)
check_uaccess r0, 1, r1, r2, __get_user_bad
@@ -39,7 +38,6 @@ ENTRY(__get_user_1)
mov r0, #0
ret lr
ENDPROC(__get_user_1)
-EXPORT_SYMBOL(__get_user_1)
ENTRY(__get_user_2)
check_uaccess r0, 2, r1, r2, __get_user_bad
@@ -60,7 +58,6 @@ rb .req r0
mov r0, #0
ret lr
ENDPROC(__get_user_2)
-EXPORT_SYMBOL(__get_user_2)
ENTRY(__get_user_4)
check_uaccess r0, 4, r1, r2, __get_user_bad
@@ -68,7 +65,6 @@ ENTRY(__get_user_4)
mov r0, #0
ret lr
ENDPROC(__get_user_4)
-EXPORT_SYMBOL(__get_user_4)
ENTRY(__get_user_8)
check_uaccess r0, 8, r1, r2, __get_user_bad
@@ -82,7 +78,6 @@ ENTRY(__get_user_8)
mov r0, #0
ret lr
ENDPROC(__get_user_8)
-EXPORT_SYMBOL(__get_user_8)
#ifdef __ARMEB__
ENTRY(__get_user_32t_8)
@@ -96,7 +91,6 @@ ENTRY(__get_user_32t_8)
mov r0, #0
ret lr
ENDPROC(__get_user_32t_8)
-EXPORT_SYMBOL(__get_user_32t_8)
ENTRY(__get_user_64t_1)
check_uaccess r0, 1, r1, r2, __get_user_bad8
@@ -104,7 +98,6 @@ ENTRY(__get_user_64t_1)
mov r0, #0
ret lr
ENDPROC(__get_user_64t_1)
-EXPORT_SYMBOL(__get_user_64t_1)
ENTRY(__get_user_64t_2)
check_uaccess r0, 2, r1, r2, __get_user_bad8
@@ -121,7 +114,6 @@ rb .req r0
mov r0, #0
ret lr
ENDPROC(__get_user_64t_2)
-EXPORT_SYMBOL(__get_user_64t_2)
ENTRY(__get_user_64t_4)
check_uaccess r0, 4, r1, r2, __get_user_bad8
@@ -129,7 +121,6 @@ ENTRY(__get_user_64t_4)
mov r0, #0
ret lr
ENDPROC(__get_user_64t_4)
-EXPORT_SYMBOL(__get_user_64t_4)
#endif
__get_user_bad8:
diff --git a/arch/arm/lib/io-readsb.S b/arch/arm/lib/io-readsb.S
index 3dff7a3a2aef..c31b2f3153f1 100644
--- a/arch/arm/lib/io-readsb.S
+++ b/arch/arm/lib/io-readsb.S
@@ -9,7 +9,6 @@
*/
#include <linux/linkage.h>
#include <asm/assembler.h>
-#include <asm/export.h>
.Linsb_align: rsb ip, ip, #4
cmp ip, r2
@@ -122,4 +121,3 @@ ENTRY(__raw_readsb)
ldmfd sp!, {r4 - r6, pc}
ENDPROC(__raw_readsb)
-EXPORT_SYMBOL(__raw_readsb)
diff --git a/arch/arm/lib/io-readsl.S b/arch/arm/lib/io-readsl.S
index bfd39682325b..2ed86fa5465f 100644
--- a/arch/arm/lib/io-readsl.S
+++ b/arch/arm/lib/io-readsl.S
@@ -9,7 +9,6 @@
*/
#include <linux/linkage.h>
#include <asm/assembler.h>
-#include <asm/export.h>
ENTRY(__raw_readsl)
teq r2, #0 @ do we have to check for the zero len?
@@ -78,4 +77,3 @@ ENTRY(__raw_readsl)
strb r3, [r1, #0]
ret lr
ENDPROC(__raw_readsl)
-EXPORT_SYMBOL(__raw_readsl)
diff --git a/arch/arm/lib/io-readsw-armv3.S b/arch/arm/lib/io-readsw-armv3.S
index b3af3db6caac..413da9914529 100644
--- a/arch/arm/lib/io-readsw-armv3.S
+++ b/arch/arm/lib/io-readsw-armv3.S
@@ -9,7 +9,6 @@
*/
#include <linux/linkage.h>
#include <asm/assembler.h>
-#include <asm/export.h>
.Linsw_bad_alignment:
adr r0, .Linsw_bad_align_msg
@@ -104,4 +103,4 @@ ENTRY(__raw_readsw)
ldmfd sp!, {r4, r5, r6, pc}
-EXPORT_SYMBOL(__raw_readsw)
+
diff --git a/arch/arm/lib/io-readsw-armv4.S b/arch/arm/lib/io-readsw-armv4.S
index 3c7a7a40b33e..d9a45e9692ae 100644
--- a/arch/arm/lib/io-readsw-armv4.S
+++ b/arch/arm/lib/io-readsw-armv4.S
@@ -9,7 +9,6 @@
*/
#include <linux/linkage.h>
#include <asm/assembler.h>
-#include <asm/export.h>
.macro pack, rd, hw1, hw2
#ifndef __ARMEB__
@@ -130,4 +129,3 @@ ENTRY(__raw_readsw)
strneb ip, [r1]
ldmfd sp!, {r4, pc}
ENDPROC(__raw_readsw)
-EXPORT_SYMBOL(__raw_readsw)
diff --git a/arch/arm/lib/io-writesb.S b/arch/arm/lib/io-writesb.S
index fa3633594415..a46bbc9b168b 100644
--- a/arch/arm/lib/io-writesb.S
+++ b/arch/arm/lib/io-writesb.S
@@ -9,7 +9,6 @@
*/
#include <linux/linkage.h>
#include <asm/assembler.h>
-#include <asm/export.h>
.macro outword, rd
#ifndef __ARMEB__
@@ -93,4 +92,3 @@ ENTRY(__raw_writesb)
ldmfd sp!, {r4, r5, pc}
ENDPROC(__raw_writesb)
-EXPORT_SYMBOL(__raw_writesb)
diff --git a/arch/arm/lib/io-writesl.S b/arch/arm/lib/io-writesl.S
index 98ed6aec0b47..4ea2435988c1 100644
--- a/arch/arm/lib/io-writesl.S
+++ b/arch/arm/lib/io-writesl.S
@@ -9,7 +9,6 @@
*/
#include <linux/linkage.h>
#include <asm/assembler.h>
-#include <asm/export.h>
ENTRY(__raw_writesl)
teq r2, #0 @ do we have to check for the zero len?
@@ -66,4 +65,3 @@ ENTRY(__raw_writesl)
bne 6b
ret lr
ENDPROC(__raw_writesl)
-EXPORT_SYMBOL(__raw_writesl)
diff --git a/arch/arm/lib/io-writesw-armv3.S b/arch/arm/lib/io-writesw-armv3.S
index 577184c082bb..121789eb6802 100644
--- a/arch/arm/lib/io-writesw-armv3.S
+++ b/arch/arm/lib/io-writesw-armv3.S
@@ -9,7 +9,6 @@
*/
#include <linux/linkage.h>
#include <asm/assembler.h>
-#include <asm/export.h>
.Loutsw_bad_alignment:
adr r0, .Loutsw_bad_align_msg
@@ -125,4 +124,3 @@ ENTRY(__raw_writesw)
strne ip, [r0]
ldmfd sp!, {r4, r5, r6, pc}
-EXPORT_SYMBOL(__raw_writesw)
diff --git a/arch/arm/lib/io-writesw-armv4.S b/arch/arm/lib/io-writesw-armv4.S
index e335f489d1fc..269f90c51ad2 100644
--- a/arch/arm/lib/io-writesw-armv4.S
+++ b/arch/arm/lib/io-writesw-armv4.S
@@ -9,7 +9,6 @@
*/
#include <linux/linkage.h>
#include <asm/assembler.h>
-#include <asm/export.h>
.macro outword, rd
#ifndef __ARMEB__
@@ -99,4 +98,3 @@ ENTRY(__raw_writesw)
strneh ip, [r0]
ret lr
ENDPROC(__raw_writesw)
-EXPORT_SYMBOL(__raw_writesw)
diff --git a/arch/arm/lib/lib1funcs.S b/arch/arm/lib/lib1funcs.S
index f541bc013bff..9397b2e532af 100644
--- a/arch/arm/lib/lib1funcs.S
+++ b/arch/arm/lib/lib1funcs.S
@@ -36,7 +36,6 @@ Boston, MA 02111-1307, USA. */
#include <linux/linkage.h>
#include <asm/assembler.h>
#include <asm/unwind.h>
-#include <asm/export.h>
.macro ARM_DIV_BODY dividend, divisor, result, curbit
@@ -239,8 +238,6 @@ UNWIND(.fnstart)
UNWIND(.fnend)
ENDPROC(__udivsi3)
ENDPROC(__aeabi_uidiv)
-EXPORT_SYMBOL(__udivsi3)
-EXPORT_SYMBOL(__aeabi_uidiv)
ENTRY(__umodsi3)
UNWIND(.fnstart)
@@ -259,7 +256,6 @@ UNWIND(.fnstart)
UNWIND(.fnend)
ENDPROC(__umodsi3)
-EXPORT_SYMBOL(__umodsi3)
#ifdef CONFIG_ARM_PATCH_IDIV
.align 3
@@ -307,8 +303,6 @@ UNWIND(.fnstart)
UNWIND(.fnend)
ENDPROC(__divsi3)
ENDPROC(__aeabi_idiv)
-EXPORT_SYMBOL(__divsi3)
-EXPORT_SYMBOL(__aeabi_idiv)
ENTRY(__modsi3)
UNWIND(.fnstart)
@@ -333,7 +327,6 @@ UNWIND(.fnstart)
UNWIND(.fnend)
ENDPROC(__modsi3)
-EXPORT_SYMBOL(__modsi3)
#ifdef CONFIG_AEABI
@@ -350,7 +343,6 @@ UNWIND(.save {r0, r1, ip, lr} )
UNWIND(.fnend)
ENDPROC(__aeabi_uidivmod)
-EXPORT_SYMBOL(__aeabi_uidivmod)
ENTRY(__aeabi_idivmod)
UNWIND(.fnstart)
@@ -364,7 +356,6 @@ UNWIND(.save {r0, r1, ip, lr} )
UNWIND(.fnend)
ENDPROC(__aeabi_idivmod)
-EXPORT_SYMBOL(__aeabi_idivmod)
#endif
diff --git a/arch/arm/lib/lshrdi3.S b/arch/arm/lib/lshrdi3.S
index e40833981417..922dcd88b02b 100644
--- a/arch/arm/lib/lshrdi3.S
+++ b/arch/arm/lib/lshrdi3.S
@@ -28,7 +28,6 @@ Boston, MA 02110-1301, USA. */
#include <linux/linkage.h>
#include <asm/assembler.h>
-#include <asm/export.h>
#ifdef __ARMEB__
#define al r1
@@ -53,5 +52,3 @@ ENTRY(__aeabi_llsr)
ENDPROC(__lshrdi3)
ENDPROC(__aeabi_llsr)
-EXPORT_SYMBOL(__lshrdi3)
-EXPORT_SYMBOL(__aeabi_llsr)
diff --git a/arch/arm/lib/memchr.S b/arch/arm/lib/memchr.S
index 44182bf686a5..74a5bed6d999 100644
--- a/arch/arm/lib/memchr.S
+++ b/arch/arm/lib/memchr.S
@@ -11,7 +11,6 @@
*/
#include <linux/linkage.h>
#include <asm/assembler.h>
-#include <asm/export.h>
.text
.align 5
@@ -25,4 +24,3 @@ ENTRY(memchr)
2: movne r0, #0
ret lr
ENDPROC(memchr)
-EXPORT_SYMBOL(memchr)
diff --git a/arch/arm/lib/memcpy.S b/arch/arm/lib/memcpy.S
index 1be5b6ddf37c..64111bd4440b 100644
--- a/arch/arm/lib/memcpy.S
+++ b/arch/arm/lib/memcpy.S
@@ -13,7 +13,6 @@
#include <linux/linkage.h>
#include <asm/assembler.h>
#include <asm/unwind.h>
-#include <asm/export.h>
#define LDR1W_SHIFT 0
#define STR1W_SHIFT 0
@@ -69,5 +68,3 @@ ENTRY(memcpy)
ENDPROC(memcpy)
ENDPROC(mmiocpy)
-EXPORT_SYMBOL(memcpy)
-EXPORT_SYMBOL(mmiocpy)
diff --git a/arch/arm/lib/memmove.S b/arch/arm/lib/memmove.S
index 71dcc5400d02..69a9d47fc5ab 100644
--- a/arch/arm/lib/memmove.S
+++ b/arch/arm/lib/memmove.S
@@ -13,7 +13,6 @@
#include <linux/linkage.h>
#include <asm/assembler.h>
#include <asm/unwind.h>
-#include <asm/export.h>
.text
@@ -226,4 +225,3 @@ ENTRY(memmove)
18: backward_copy_shift push=24 pull=8
ENDPROC(memmove)
-EXPORT_SYMBOL(memmove)
diff --git a/arch/arm/lib/memset.S b/arch/arm/lib/memset.S
index 7b72044cba62..3c65e3bd790f 100644
--- a/arch/arm/lib/memset.S
+++ b/arch/arm/lib/memset.S
@@ -12,7 +12,6 @@
#include <linux/linkage.h>
#include <asm/assembler.h>
#include <asm/unwind.h>
-#include <asm/export.h>
.text
.align 5
@@ -136,5 +135,3 @@ UNWIND( .fnstart )
UNWIND( .fnend )
ENDPROC(memset)
ENDPROC(mmioset)
-EXPORT_SYMBOL(memset)
-EXPORT_SYMBOL(mmioset)
diff --git a/arch/arm/lib/memzero.S b/arch/arm/lib/memzero.S
index 6dec26ed5bcc..0eded952e089 100644
--- a/arch/arm/lib/memzero.S
+++ b/arch/arm/lib/memzero.S
@@ -10,7 +10,6 @@
#include <linux/linkage.h>
#include <asm/assembler.h>
#include <asm/unwind.h>
-#include <asm/export.h>
.text
.align 5
@@ -136,4 +135,3 @@ UNWIND( .fnstart )
ret lr @ 1
UNWIND( .fnend )
ENDPROC(__memzero)
-EXPORT_SYMBOL(__memzero)
diff --git a/arch/arm/lib/muldi3.S b/arch/arm/lib/muldi3.S
index b8f12388ccac..204305956925 100644
--- a/arch/arm/lib/muldi3.S
+++ b/arch/arm/lib/muldi3.S
@@ -12,7 +12,6 @@
#include <linux/linkage.h>
#include <asm/assembler.h>
-#include <asm/export.h>
#ifdef __ARMEB__
#define xh r0
@@ -47,5 +46,3 @@ ENTRY(__aeabi_lmul)
ENDPROC(__muldi3)
ENDPROC(__aeabi_lmul)
-EXPORT_SYMBOL(__muldi3)
-EXPORT_SYMBOL(__aeabi_lmul)
diff --git a/arch/arm/lib/putuser.S b/arch/arm/lib/putuser.S
index 11de126e2ed6..38d660d3705f 100644
--- a/arch/arm/lib/putuser.S
+++ b/arch/arm/lib/putuser.S
@@ -31,7 +31,6 @@
#include <asm/assembler.h>
#include <asm/errno.h>
#include <asm/domain.h>
-#include <asm/export.h>
ENTRY(__put_user_1)
check_uaccess r0, 1, r1, ip, __put_user_bad
@@ -39,7 +38,6 @@ ENTRY(__put_user_1)
mov r0, #0
ret lr
ENDPROC(__put_user_1)
-EXPORT_SYMBOL(__put_user_1)
ENTRY(__put_user_2)
check_uaccess r0, 2, r1, ip, __put_user_bad
@@ -64,7 +62,6 @@ ENTRY(__put_user_2)
mov r0, #0
ret lr
ENDPROC(__put_user_2)
-EXPORT_SYMBOL(__put_user_2)
ENTRY(__put_user_4)
check_uaccess r0, 4, r1, ip, __put_user_bad
@@ -72,7 +69,6 @@ ENTRY(__put_user_4)
mov r0, #0
ret lr
ENDPROC(__put_user_4)
-EXPORT_SYMBOL(__put_user_4)
ENTRY(__put_user_8)
check_uaccess r0, 8, r1, ip, __put_user_bad
@@ -86,7 +82,6 @@ ENTRY(__put_user_8)
mov r0, #0
ret lr
ENDPROC(__put_user_8)
-EXPORT_SYMBOL(__put_user_8)
__put_user_bad:
mov r0, #-EFAULT
diff --git a/arch/arm/lib/strchr.S b/arch/arm/lib/strchr.S
index 7301f6e6046c..013d64c71e8d 100644
--- a/arch/arm/lib/strchr.S
+++ b/arch/arm/lib/strchr.S
@@ -11,7 +11,6 @@
*/
#include <linux/linkage.h>
#include <asm/assembler.h>
-#include <asm/export.h>
.text
.align 5
@@ -26,4 +25,3 @@ ENTRY(strchr)
subeq r0, r0, #1
ret lr
ENDPROC(strchr)
-EXPORT_SYMBOL(strchr)
diff --git a/arch/arm/lib/strrchr.S b/arch/arm/lib/strrchr.S
index aaf9fd98b754..3cec1c7482c4 100644
--- a/arch/arm/lib/strrchr.S
+++ b/arch/arm/lib/strrchr.S
@@ -11,7 +11,6 @@
*/
#include <linux/linkage.h>
#include <asm/assembler.h>
-#include <asm/export.h>
.text
.align 5
@@ -25,4 +24,3 @@ ENTRY(strrchr)
mov r0, r3
ret lr
ENDPROC(strrchr)
-EXPORT_SYMBOL(strrchr)
diff --git a/arch/arm/lib/uaccess_with_memcpy.c b/arch/arm/lib/uaccess_with_memcpy.c
index 1626e3a551a1..6bd1089b07e0 100644
--- a/arch/arm/lib/uaccess_with_memcpy.c
+++ b/arch/arm/lib/uaccess_with_memcpy.c
@@ -19,7 +19,6 @@
#include <linux/gfp.h>
#include <linux/highmem.h>
#include <linux/hugetlb.h>
-#include <linux/export.h>
#include <asm/current.h>
#include <asm/page.h>
@@ -157,7 +156,6 @@ arm_copy_to_user(void __user *to, const void *from, unsigned long n)
}
return n;
}
-EXPORT_SYMBOL(arm_copy_to_user);
static unsigned long noinline
__clear_user_memset(void __user *addr, unsigned long n)
@@ -215,7 +213,6 @@ unsigned long arm_clear_user(void __user *addr, unsigned long n)
}
return n;
}
-EXPORT_SYMBOL(arm_clear_user);
#if 0
diff --git a/arch/arm/lib/ucmpdi2.S b/arch/arm/lib/ucmpdi2.S
index 127a91af46f3..ad4a6309141a 100644
--- a/arch/arm/lib/ucmpdi2.S
+++ b/arch/arm/lib/ucmpdi2.S
@@ -12,7 +12,6 @@
#include <linux/linkage.h>
#include <asm/assembler.h>
-#include <asm/export.h>
#ifdef __ARMEB__
#define xh r0
@@ -36,7 +35,6 @@ ENTRY(__ucmpdi2)
ret lr
ENDPROC(__ucmpdi2)
-EXPORT_SYMBOL(__ucmpdi2)
#ifdef CONFIG_AEABI
@@ -50,7 +48,6 @@ ENTRY(__aeabi_ulcmp)
ret lr
ENDPROC(__aeabi_ulcmp)
-EXPORT_SYMBOL(__aeabi_ulcmp)
#endif
diff --git a/arch/arm/mach-imx/Makefile b/arch/arm/mach-imx/Makefile
index 737450fe790c..cab128913e72 100644
--- a/arch/arm/mach-imx/Makefile
+++ b/arch/arm/mach-imx/Makefile
@@ -32,6 +32,7 @@ endif
ifdef CONFIG_SND_IMX_SOC
obj-y += ssi-fiq.o
+obj-y += ssi-fiq-ksym.o
endif
# i.MX21 based machines
diff --git a/arch/arm/mach-imx/ssi-fiq-ksym.c b/arch/arm/mach-imx/ssi-fiq-ksym.c
new file mode 100644
index 000000000000..792090f9a032
--- /dev/null
+++ b/arch/arm/mach-imx/ssi-fiq-ksym.c
@@ -0,0 +1,20 @@
+/*
+ * Exported ksyms for the SSI FIQ handler
+ *
+ * Copyright (C) 2009, Sascha Hauer <s.hauer@pengutronix.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+
+#include <linux/platform_data/asoc-imx-ssi.h>
+
+EXPORT_SYMBOL(imx_ssi_fiq_tx_buffer);
+EXPORT_SYMBOL(imx_ssi_fiq_rx_buffer);
+EXPORT_SYMBOL(imx_ssi_fiq_start);
+EXPORT_SYMBOL(imx_ssi_fiq_end);
+EXPORT_SYMBOL(imx_ssi_fiq_base);
+
diff --git a/arch/arm/mach-imx/ssi-fiq.S b/arch/arm/mach-imx/ssi-fiq.S
index fd7917f1c204..a8b93c5f29b5 100644
--- a/arch/arm/mach-imx/ssi-fiq.S
+++ b/arch/arm/mach-imx/ssi-fiq.S
@@ -8,7 +8,6 @@
#include <linux/linkage.h>
#include <asm/assembler.h>
-#include <asm/export.h>
/*
* r8 = bit 0-15: tx offset, bit 16-31: tx buffer size
@@ -145,8 +144,4 @@ imx_ssi_fiq_tx_buffer:
.word 0x0
.L_imx_ssi_fiq_end:
imx_ssi_fiq_end:
-EXPORT_SYMBOL(imx_ssi_fiq_tx_buffer)
-EXPORT_SYMBOL(imx_ssi_fiq_rx_buffer)
-EXPORT_SYMBOL(imx_ssi_fiq_start)
-EXPORT_SYMBOL(imx_ssi_fiq_end)
-EXPORT_SYMBOL(imx_ssi_fiq_base)
+
diff --git a/arch/arm/mach-omap2/Kconfig b/arch/arm/mach-omap2/Kconfig
index a9afeebd59f2..0465338183c7 100644
--- a/arch/arm/mach-omap2/Kconfig
+++ b/arch/arm/mach-omap2/Kconfig
@@ -71,6 +71,7 @@ config SOC_AM43XX
select HAVE_ARM_TWD
select ARM_ERRATA_754322
select ARM_ERRATA_775420
+ select OMAP_INTERCONNECT
config SOC_DRA7XX
bool "TI DRA7XX"
diff --git a/arch/arm/mach-omap2/id.c b/arch/arm/mach-omap2/id.c
index 2abd53ae3e7a..cc6d9fa60924 100644
--- a/arch/arm/mach-omap2/id.c
+++ b/arch/arm/mach-omap2/id.c
@@ -205,11 +205,15 @@ void __init omap2xxx_check_revision(void)
#define OMAP3_SHOW_FEATURE(feat) \
if (omap3_has_ ##feat()) \
- printk(#feat" ");
+ n += scnprintf(buf + n, sizeof(buf) - n, #feat " ");
static void __init omap3_cpuinfo(void)
{
const char *cpu_name;
+ char buf[64];
+ int n = 0;
+
+ memset(buf, 0, sizeof(buf));
/*
* OMAP3430 and OMAP3530 are assumed to be same.
@@ -241,10 +245,10 @@ static void __init omap3_cpuinfo(void)
cpu_name = "OMAP3503";
}
- sprintf(soc_name, "%s", cpu_name);
+ scnprintf(soc_name, sizeof(soc_name), "%s", cpu_name);
/* Print verbose information */
- pr_info("%s %s (", soc_name, soc_rev);
+ n += scnprintf(buf, sizeof(buf) - n, "%s %s (", soc_name, soc_rev);
OMAP3_SHOW_FEATURE(l2cache);
OMAP3_SHOW_FEATURE(iva);
@@ -252,8 +256,10 @@ static void __init omap3_cpuinfo(void)
OMAP3_SHOW_FEATURE(neon);
OMAP3_SHOW_FEATURE(isp);
OMAP3_SHOW_FEATURE(192mhz_clk);
-
- printk(")\n");
+ if (*(buf + n - 1) == ' ')
+ n--;
+ n += scnprintf(buf + n, sizeof(buf) - n, ")\n");
+ pr_info("%s", buf);
}
#define OMAP3_CHECK_FEATURE(status,feat) \
diff --git a/arch/arm/mach-omap2/prm3xxx.c b/arch/arm/mach-omap2/prm3xxx.c
index 62680aad2126..718981bb80cd 100644
--- a/arch/arm/mach-omap2/prm3xxx.c
+++ b/arch/arm/mach-omap2/prm3xxx.c
@@ -319,6 +319,9 @@ void __init omap3_prm_init_pm(bool has_uart4, bool has_iva)
if (has_uart4) {
en_uart4_mask = OMAP3630_EN_UART4_MASK;
grpsel_uart4_mask = OMAP3630_GRPSEL_UART4_MASK;
+ } else {
+ en_uart4_mask = 0;
+ grpsel_uart4_mask = 0;
}
/* Enable wakeups in PER */
diff --git a/arch/arm/mach-omap2/voltage.c b/arch/arm/mach-omap2/voltage.c
index cba8cada8c81..cd15dbd62671 100644
--- a/arch/arm/mach-omap2/voltage.c
+++ b/arch/arm/mach-omap2/voltage.c
@@ -87,6 +87,12 @@ int voltdm_scale(struct voltagedomain *voltdm,
return -ENODATA;
}
+ if (!voltdm->volt_data) {
+ pr_err("%s: No voltage data defined for vdd_%s\n",
+ __func__, voltdm->name);
+ return -ENODATA;
+ }
+
/* Adjust voltage to the exact voltage from the OPP table */
for (i = 0; voltdm->volt_data[i].volt_nominal != 0; i++) {
if (voltdm->volt_data[i].volt_nominal >= target_volt) {
diff --git a/arch/arm/mm/abort-lv4t.S b/arch/arm/mm/abort-lv4t.S
index 6d8e8e3365d1..4cdfab31a0b6 100644
--- a/arch/arm/mm/abort-lv4t.S
+++ b/arch/arm/mm/abort-lv4t.S
@@ -7,7 +7,7 @@
* : r4 = aborted context pc
* : r5 = aborted context psr
*
- * Returns : r4-r5, r10-r11, r13 preserved
+ * Returns : r4-r5, r9-r11, r13 preserved
*
* Purpose : obtain information about current aborted instruction.
* Note: we read user space. This means we might cause a data
@@ -48,7 +48,10 @@ ENTRY(v4t_late_abort)
/* c */ b do_DataAbort @ ldc rd, [rn], #m @ Same as ldr rd, [rn], #m
/* d */ b do_DataAbort @ ldc rd, [rn, #m]
/* e */ b .data_unknown
-/* f */
+/* f */ b .data_unknown
+
+.data_unknown_r9:
+ ldr r9, [sp], #4
.data_unknown: @ Part of jumptable
mov r0, r4
mov r1, r8
@@ -57,6 +60,7 @@ ENTRY(v4t_late_abort)
.data_arm_ldmstm:
tst r8, #1 << 21 @ check writeback bit
beq do_DataAbort @ no writeback -> no fixup
+ str r9, [sp, #-4]!
mov r7, #0x11
orr r7, r7, #0x1100
and r6, r8, r7
@@ -75,12 +79,14 @@ ENTRY(v4t_late_abort)
subne r7, r7, r6, lsl #2 @ Undo increment
addeq r7, r7, r6, lsl #2 @ Undo decrement
str r7, [r2, r9, lsr #14] @ Put register 'Rn'
+ ldr r9, [sp], #4
b do_DataAbort
.data_arm_lateldrhpre:
tst r8, #1 << 21 @ Check writeback bit
beq do_DataAbort @ No writeback -> no fixup
.data_arm_lateldrhpost:
+ str r9, [sp, #-4]!
and r9, r8, #0x00f @ get Rm / low nibble of immediate value
tst r8, #1 << 22 @ if (immediate offset)
andne r6, r8, #0xf00 @ { immediate high nibble
@@ -93,6 +99,7 @@ ENTRY(v4t_late_abort)
subne r7, r7, r6 @ Undo incrmenet
addeq r7, r7, r6 @ Undo decrement
str r7, [r2, r9, lsr #14] @ Put register 'Rn'
+ ldr r9, [sp], #4
b do_DataAbort
.data_arm_lateldrpreconst:
@@ -101,12 +108,14 @@ ENTRY(v4t_late_abort)
.data_arm_lateldrpostconst:
movs r6, r8, lsl #20 @ Get offset
beq do_DataAbort @ zero -> no fixup
+ str r9, [sp, #-4]!
and r9, r8, #15 << 16 @ Extract 'n' from instruction
ldr r7, [r2, r9, lsr #14] @ Get register 'Rn'
tst r8, #1 << 23 @ Check U bit
subne r7, r7, r6, lsr #20 @ Undo increment
addeq r7, r7, r6, lsr #20 @ Undo decrement
str r7, [r2, r9, lsr #14] @ Put register 'Rn'
+ ldr r9, [sp], #4
b do_DataAbort
.data_arm_lateldrprereg:
@@ -115,6 +124,7 @@ ENTRY(v4t_late_abort)
.data_arm_lateldrpostreg:
and r7, r8, #15 @ Extract 'm' from instruction
ldr r6, [r2, r7, lsl #2] @ Get register 'Rm'
+ str r9, [sp, #-4]!
mov r9, r8, lsr #7 @ get shift count
ands r9, r9, #31
and r7, r8, #0x70 @ get shift type
@@ -126,33 +136,33 @@ ENTRY(v4t_late_abort)
b .data_arm_apply_r6_and_rn
b .data_arm_apply_r6_and_rn @ 1: LSL #0
nop
- b .data_unknown @ 2: MUL?
+ b .data_unknown_r9 @ 2: MUL?
nop
- b .data_unknown @ 3: MUL?
+ b .data_unknown_r9 @ 3: MUL?
nop
mov r6, r6, lsr r9 @ 4: LSR #!0
b .data_arm_apply_r6_and_rn
mov r6, r6, lsr #32 @ 5: LSR #32
b .data_arm_apply_r6_and_rn
- b .data_unknown @ 6: MUL?
+ b .data_unknown_r9 @ 6: MUL?
nop
- b .data_unknown @ 7: MUL?
+ b .data_unknown_r9 @ 7: MUL?
nop
mov r6, r6, asr r9 @ 8: ASR #!0
b .data_arm_apply_r6_and_rn
mov r6, r6, asr #32 @ 9: ASR #32
b .data_arm_apply_r6_and_rn
- b .data_unknown @ A: MUL?
+ b .data_unknown_r9 @ A: MUL?
nop
- b .data_unknown @ B: MUL?
+ b .data_unknown_r9 @ B: MUL?
nop
mov r6, r6, ror r9 @ C: ROR #!0
b .data_arm_apply_r6_and_rn
mov r6, r6, rrx @ D: RRX
b .data_arm_apply_r6_and_rn
- b .data_unknown @ E: MUL?
+ b .data_unknown_r9 @ E: MUL?
nop
- b .data_unknown @ F: MUL?
+ b .data_unknown_r9 @ F: MUL?
.data_thumb_abort:
ldrh r8, [r4] @ read instruction
@@ -190,6 +200,7 @@ ENTRY(v4t_late_abort)
.data_thumb_pushpop:
tst r8, #1 << 10
beq .data_unknown
+ str r9, [sp, #-4]!
and r6, r8, #0x55 @ hweight8(r8) + R bit
and r9, r8, #0xaa
add r6, r6, r9, lsr #1
@@ -204,9 +215,11 @@ ENTRY(v4t_late_abort)
addeq r7, r7, r6, lsl #2 @ increment SP if PUSH
subne r7, r7, r6, lsl #2 @ decrement SP if POP
str r7, [r2, #13 << 2]
+ ldr r9, [sp], #4
b do_DataAbort
.data_thumb_ldmstm:
+ str r9, [sp, #-4]!
and r6, r8, #0x55 @ hweight8(r8)
and r9, r8, #0xaa
add r6, r6, r9, lsr #1
@@ -219,4 +232,5 @@ ENTRY(v4t_late_abort)
and r6, r6, #15 @ number of regs to transfer
sub r7, r7, r6, lsl #2 @ always decrement
str r7, [r2, r9, lsr #6]
+ ldr r9, [sp], #4
b do_DataAbort
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index ab4f74536057..ab7710002ba6 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -1167,7 +1167,7 @@ static int __init dma_debug_do_init(void)
dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES);
return 0;
}
-fs_initcall(dma_debug_do_init);
+core_initcall(dma_debug_do_init);
#ifdef CONFIG_ARM_DMA_USE_IOMMU
diff --git a/arch/arm/mm/proc-v7m.S b/arch/arm/mm/proc-v7m.S
index f6d333f09bfe..8dea61640cc1 100644
--- a/arch/arm/mm/proc-v7m.S
+++ b/arch/arm/mm/proc-v7m.S
@@ -96,7 +96,7 @@ ENTRY(cpu_cm7_proc_fin)
ret lr
ENDPROC(cpu_cm7_proc_fin)
- .section ".text.init", #alloc, #execinstr
+ .section ".init.text", #alloc, #execinstr
__v7m_cm7_setup:
mov r8, #(V7M_SCB_CCR_DC | V7M_SCB_CCR_IC| V7M_SCB_CCR_BP)
diff --git a/arch/arm64/boot/dts/arm/juno-base.dtsi b/arch/arm64/boot/dts/arm/juno-base.dtsi
index 334271a25f70..7d3a2acc6a55 100644
--- a/arch/arm64/boot/dts/arm/juno-base.dtsi
+++ b/arch/arm64/boot/dts/arm/juno-base.dtsi
@@ -393,7 +393,7 @@
#address-cells = <3>;
#size-cells = <2>;
dma-coherent;
- ranges = <0x01000000 0x00 0x5f800000 0x00 0x5f800000 0x0 0x00800000>,
+ ranges = <0x01000000 0x00 0x00000000 0x00 0x5f800000 0x0 0x00800000>,
<0x02000000 0x00 0x50000000 0x00 0x50000000 0x0 0x08000000>,
<0x42000000 0x40 0x00000000 0x40 0x00000000 0x1 0x00000000>;
#interrupt-cells = <1>;
diff --git a/arch/arm64/boot/dts/arm/juno-r1.dts b/arch/arm64/boot/dts/arm/juno-r1.dts
index 123a58b29cbd..f0b857d6d73c 100644
--- a/arch/arm64/boot/dts/arm/juno-r1.dts
+++ b/arch/arm64/boot/dts/arm/juno-r1.dts
@@ -76,7 +76,7 @@
compatible = "arm,idle-state";
arm,psci-suspend-param = <0x1010000>;
local-timer-stop;
- entry-latency-us = <300>;
+ entry-latency-us = <400>;
exit-latency-us = <1200>;
min-residency-us = <2500>;
};
diff --git a/arch/arm64/boot/dts/arm/juno-r2.dts b/arch/arm64/boot/dts/arm/juno-r2.dts
index 007be826efce..26aaa6a7670f 100644
--- a/arch/arm64/boot/dts/arm/juno-r2.dts
+++ b/arch/arm64/boot/dts/arm/juno-r2.dts
@@ -76,7 +76,7 @@
compatible = "arm,idle-state";
arm,psci-suspend-param = <0x1010000>;
local-timer-stop;
- entry-latency-us = <300>;
+ entry-latency-us = <400>;
exit-latency-us = <1200>;
min-residency-us = <2500>;
};
diff --git a/arch/arm64/boot/dts/arm/juno.dts b/arch/arm64/boot/dts/arm/juno.dts
index a7270eff6939..6e154d948a80 100644
--- a/arch/arm64/boot/dts/arm/juno.dts
+++ b/arch/arm64/boot/dts/arm/juno.dts
@@ -76,7 +76,7 @@
compatible = "arm,idle-state";
arm,psci-suspend-param = <0x1010000>;
local-timer-stop;
- entry-latency-us = <300>;
+ entry-latency-us = <400>;
exit-latency-us = <1200>;
min-residency-us = <2500>;
};
diff --git a/arch/arm64/boot/dts/marvell/armada-37xx.dtsi b/arch/arm64/boot/dts/marvell/armada-37xx.dtsi
index c4762538ec01..e9bd58793464 100644
--- a/arch/arm64/boot/dts/marvell/armada-37xx.dtsi
+++ b/arch/arm64/boot/dts/marvell/armada-37xx.dtsi
@@ -105,7 +105,7 @@
status = "disabled";
};
- nb_perih_clk: nb-periph-clk@13000{
+ nb_periph_clk: nb-periph-clk@13000 {
compatible = "marvell,armada-3700-periph-clock-nb";
reg = <0x13000 0x100>;
clocks = <&tbg 0>, <&tbg 1>, <&tbg 2>,
@@ -113,7 +113,7 @@
#clock-cells = <1>;
};
- sb_perih_clk: sb-periph-clk@18000{
+ sb_periph_clk: sb-periph-clk@18000 {
compatible = "marvell,armada-3700-periph-clock-sb";
reg = <0x18000 0x100>;
clocks = <&tbg 0>, <&tbg 1>, <&tbg 2>,
diff --git a/arch/arm64/boot/dts/marvell/armada-cp110-slave.dtsi b/arch/arm64/boot/dts/marvell/armada-cp110-slave.dtsi
index 842fb333285c..6bf9e241179b 100644
--- a/arch/arm64/boot/dts/marvell/armada-cp110-slave.dtsi
+++ b/arch/arm64/boot/dts/marvell/armada-cp110-slave.dtsi
@@ -130,8 +130,8 @@
reg = <0x700600 0x50>;
#address-cells = <0x1>;
#size-cells = <0x0>;
- cell-index = <1>;
- clocks = <&cps_syscon0 0 3>;
+ cell-index = <3>;
+ clocks = <&cps_syscon0 1 21>;
status = "disabled";
};
@@ -140,7 +140,7 @@
reg = <0x700680 0x50>;
#address-cells = <1>;
#size-cells = <0>;
- cell-index = <2>;
+ cell-index = <4>;
clocks = <&cps_syscon0 1 21>;
status = "disabled";
};
diff --git a/arch/arm64/boot/dts/rockchip/rk3399.dtsi b/arch/arm64/boot/dts/rockchip/rk3399.dtsi
index b65c193dc64e..7afbfb0f96a3 100644
--- a/arch/arm64/boot/dts/rockchip/rk3399.dtsi
+++ b/arch/arm64/boot/dts/rockchip/rk3399.dtsi
@@ -300,8 +300,11 @@
ranges = <0x83000000 0x0 0xfa000000 0x0 0xfa000000 0x0 0x600000
0x81000000 0x0 0xfa600000 0x0 0xfa600000 0x0 0x100000>;
resets = <&cru SRST_PCIE_CORE>, <&cru SRST_PCIE_MGMT>,
- <&cru SRST_PCIE_MGMT_STICKY>, <&cru SRST_PCIE_PIPE>;
- reset-names = "core", "mgmt", "mgmt-sticky", "pipe";
+ <&cru SRST_PCIE_MGMT_STICKY>, <&cru SRST_PCIE_PIPE>,
+ <&cru SRST_PCIE_PM>, <&cru SRST_P_PCIE>,
+ <&cru SRST_A_PCIE>;
+ reset-names = "core", "mgmt", "mgmt-sticky", "pipe",
+ "pm", "pclk", "aclk";
status = "disabled";
pcie0_intc: interrupt-controller {
diff --git a/arch/arm64/include/asm/alternative.h b/arch/arm64/include/asm/alternative.h
index 39feb85a6931..6e1cb8c5af4d 100644
--- a/arch/arm64/include/asm/alternative.h
+++ b/arch/arm64/include/asm/alternative.h
@@ -1,7 +1,7 @@
#ifndef __ASM_ALTERNATIVE_H
#define __ASM_ALTERNATIVE_H
-#include <asm/cpufeature.h>
+#include <asm/cpucaps.h>
#include <asm/insn.h>
#ifndef __ASSEMBLY__
diff --git a/arch/arm64/include/asm/cpucaps.h b/arch/arm64/include/asm/cpucaps.h
new file mode 100644
index 000000000000..87b446535185
--- /dev/null
+++ b/arch/arm64/include/asm/cpucaps.h
@@ -0,0 +1,40 @@
+/*
+ * arch/arm64/include/asm/cpucaps.h
+ *
+ * Copyright (C) 2016 ARM Ltd.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+#ifndef __ASM_CPUCAPS_H
+#define __ASM_CPUCAPS_H
+
+#define ARM64_WORKAROUND_CLEAN_CACHE 0
+#define ARM64_WORKAROUND_DEVICE_LOAD_ACQUIRE 1
+#define ARM64_WORKAROUND_845719 2
+#define ARM64_HAS_SYSREG_GIC_CPUIF 3
+#define ARM64_HAS_PAN 4
+#define ARM64_HAS_LSE_ATOMICS 5
+#define ARM64_WORKAROUND_CAVIUM_23154 6
+#define ARM64_WORKAROUND_834220 7
+#define ARM64_HAS_NO_HW_PREFETCH 8
+#define ARM64_HAS_UAO 9
+#define ARM64_ALT_PAN_NOT_UAO 10
+#define ARM64_HAS_VIRT_HOST_EXTN 11
+#define ARM64_WORKAROUND_CAVIUM_27456 12
+#define ARM64_HAS_32BIT_EL0 13
+#define ARM64_HYP_OFFSET_LOW 14
+#define ARM64_MISMATCHED_CACHE_LINE_SIZE 15
+
+#define ARM64_NCAPS 16
+
+#endif /* __ASM_CPUCAPS_H */
diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h
index a27c3245ba21..0bc0b1de90c4 100644
--- a/arch/arm64/include/asm/cpufeature.h
+++ b/arch/arm64/include/asm/cpufeature.h
@@ -11,6 +11,7 @@
#include <linux/jump_label.h>
+#include <asm/cpucaps.h>
#include <asm/hwcap.h>
#include <asm/sysreg.h>
@@ -24,25 +25,6 @@
#define MAX_CPU_FEATURES (8 * sizeof(elf_hwcap))
#define cpu_feature(x) ilog2(HWCAP_ ## x)
-#define ARM64_WORKAROUND_CLEAN_CACHE 0
-#define ARM64_WORKAROUND_DEVICE_LOAD_ACQUIRE 1
-#define ARM64_WORKAROUND_845719 2
-#define ARM64_HAS_SYSREG_GIC_CPUIF 3
-#define ARM64_HAS_PAN 4
-#define ARM64_HAS_LSE_ATOMICS 5
-#define ARM64_WORKAROUND_CAVIUM_23154 6
-#define ARM64_WORKAROUND_834220 7
-#define ARM64_HAS_NO_HW_PREFETCH 8
-#define ARM64_HAS_UAO 9
-#define ARM64_ALT_PAN_NOT_UAO 10
-#define ARM64_HAS_VIRT_HOST_EXTN 11
-#define ARM64_WORKAROUND_CAVIUM_27456 12
-#define ARM64_HAS_32BIT_EL0 13
-#define ARM64_HYP_OFFSET_LOW 14
-#define ARM64_MISMATCHED_CACHE_LINE_SIZE 15
-
-#define ARM64_NCAPS 16
-
#ifndef __ASSEMBLY__
#include <linux/kernel.h>
diff --git a/arch/arm64/include/asm/kvm_asm.h b/arch/arm64/include/asm/kvm_asm.h
index 18f746551bf6..ec3553eb9349 100644
--- a/arch/arm64/include/asm/kvm_asm.h
+++ b/arch/arm64/include/asm/kvm_asm.h
@@ -54,6 +54,7 @@ extern char __kvm_hyp_vector[];
extern void __kvm_flush_vm_context(void);
extern void __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa);
extern void __kvm_tlb_flush_vmid(struct kvm *kvm);
+extern void __kvm_tlb_flush_local_vmid(struct kvm_vcpu *vcpu);
extern int __kvm_vcpu_run(struct kvm_vcpu *vcpu);
diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
index bd94e6766759..e5050388e062 100644
--- a/arch/arm64/include/asm/kvm_host.h
+++ b/arch/arm64/include/asm/kvm_host.h
@@ -62,6 +62,9 @@ struct kvm_arch {
/* VTTBR value associated with above pgd and vmid */
u64 vttbr;
+ /* The last vcpu id that ran on each physical CPU */
+ int __percpu *last_vcpu_ran;
+
/* The maximum number of vCPUs depends on the used GIC model */
int max_vcpus;
diff --git a/arch/arm64/include/asm/kvm_mmu.h b/arch/arm64/include/asm/kvm_mmu.h
index a79b969c26fc..6f72fe8b0e3e 100644
--- a/arch/arm64/include/asm/kvm_mmu.h
+++ b/arch/arm64/include/asm/kvm_mmu.h
@@ -128,7 +128,7 @@ static inline unsigned long __kern_hyp_va(unsigned long v)
return v;
}
-#define kern_hyp_va(v) (typeof(v))(__kern_hyp_va((unsigned long)(v)))
+#define kern_hyp_va(v) ((typeof(v))(__kern_hyp_va((unsigned long)(v))))
/*
* We currently only support a 40bit IPA.
diff --git a/arch/arm64/include/asm/lse.h b/arch/arm64/include/asm/lse.h
index 23acc00be32d..fc756e22c84c 100644
--- a/arch/arm64/include/asm/lse.h
+++ b/arch/arm64/include/asm/lse.h
@@ -5,7 +5,6 @@
#include <linux/stringify.h>
#include <asm/alternative.h>
-#include <asm/cpufeature.h>
#ifdef __ASSEMBLER__
diff --git a/arch/arm64/include/asm/perf_event.h b/arch/arm64/include/asm/perf_event.h
index 2065f46fa740..38b6a2b49d68 100644
--- a/arch/arm64/include/asm/perf_event.h
+++ b/arch/arm64/include/asm/perf_event.h
@@ -46,7 +46,15 @@
#define ARMV8_PMU_EVTYPE_MASK 0xc800ffff /* Mask for writable bits */
#define ARMV8_PMU_EVTYPE_EVENT 0xffff /* Mask for EVENT bits */
-#define ARMV8_PMU_EVTYPE_EVENT_SW_INCR 0 /* Software increment event */
+/*
+ * PMUv3 event types: required events
+ */
+#define ARMV8_PMUV3_PERFCTR_SW_INCR 0x00
+#define ARMV8_PMUV3_PERFCTR_L1D_CACHE_REFILL 0x03
+#define ARMV8_PMUV3_PERFCTR_L1D_CACHE 0x04
+#define ARMV8_PMUV3_PERFCTR_BR_MIS_PRED 0x10
+#define ARMV8_PMUV3_PERFCTR_CPU_CYCLES 0x11
+#define ARMV8_PMUV3_PERFCTR_BR_PRED 0x12
/*
* Event filters for PMUv3
diff --git a/arch/arm64/kernel/perf_event.c b/arch/arm64/kernel/perf_event.c
index a9310a69fffd..57ae9d9ed9bb 100644
--- a/arch/arm64/kernel/perf_event.c
+++ b/arch/arm64/kernel/perf_event.c
@@ -31,17 +31,9 @@
/*
* ARMv8 PMUv3 Performance Events handling code.
- * Common event types.
+ * Common event types (some are defined in asm/perf_event.h).
*/
-/* Required events. */
-#define ARMV8_PMUV3_PERFCTR_SW_INCR 0x00
-#define ARMV8_PMUV3_PERFCTR_L1D_CACHE_REFILL 0x03
-#define ARMV8_PMUV3_PERFCTR_L1D_CACHE 0x04
-#define ARMV8_PMUV3_PERFCTR_BR_MIS_PRED 0x10
-#define ARMV8_PMUV3_PERFCTR_CPU_CYCLES 0x11
-#define ARMV8_PMUV3_PERFCTR_BR_PRED 0x12
-
/* At least one of the following is required. */
#define ARMV8_PMUV3_PERFCTR_INST_RETIRED 0x08
#define ARMV8_PMUV3_PERFCTR_INST_SPEC 0x1B
diff --git a/arch/arm64/kvm/hyp/tlb.c b/arch/arm64/kvm/hyp/tlb.c
index 9cc0ea784ae6..88e2f2b938f0 100644
--- a/arch/arm64/kvm/hyp/tlb.c
+++ b/arch/arm64/kvm/hyp/tlb.c
@@ -64,6 +64,21 @@ void __hyp_text __kvm_tlb_flush_vmid(struct kvm *kvm)
write_sysreg(0, vttbr_el2);
}
+void __hyp_text __kvm_tlb_flush_local_vmid(struct kvm_vcpu *vcpu)
+{
+ struct kvm *kvm = kern_hyp_va(kern_hyp_va(vcpu)->kvm);
+
+ /* Switch to requested VMID */
+ write_sysreg(kvm->arch.vttbr, vttbr_el2);
+ isb();
+
+ asm volatile("tlbi vmalle1" : : );
+ dsb(nsh);
+ isb();
+
+ write_sysreg(0, vttbr_el2);
+}
+
void __hyp_text __kvm_flush_vm_context(void)
{
dsb(ishst);
diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c
index f302fdb3a030..87e7e6608cd8 100644
--- a/arch/arm64/kvm/sys_regs.c
+++ b/arch/arm64/kvm/sys_regs.c
@@ -597,8 +597,14 @@ static bool access_pmu_evcntr(struct kvm_vcpu *vcpu,
idx = ARMV8_PMU_CYCLE_IDX;
} else {
- BUG();
+ return false;
}
+ } else if (r->CRn == 0 && r->CRm == 9) {
+ /* PMCCNTR */
+ if (pmu_access_event_counter_el0_disabled(vcpu))
+ return false;
+
+ idx = ARMV8_PMU_CYCLE_IDX;
} else if (r->CRn == 14 && (r->CRm & 12) == 8) {
/* PMEVCNTRn_EL0 */
if (pmu_access_event_counter_el0_disabled(vcpu))
@@ -606,7 +612,7 @@ static bool access_pmu_evcntr(struct kvm_vcpu *vcpu,
idx = ((r->CRm & 3) << 3) | (r->Op2 & 7);
} else {
- BUG();
+ return false;
}
if (!pmu_counter_idx_valid(vcpu, idx))
diff --git a/arch/mips/Makefile b/arch/mips/Makefile
index fbf40d3c8123..1a6bac7b076f 100644
--- a/arch/mips/Makefile
+++ b/arch/mips/Makefile
@@ -263,7 +263,7 @@ KBUILD_CPPFLAGS += -DDATAOFFSET=$(if $(dataoffset-y),$(dataoffset-y),0)
bootvars-y = VMLINUX_LOAD_ADDRESS=$(load-y) \
VMLINUX_ENTRY_ADDRESS=$(entry-y) \
- PLATFORM=$(platform-y)
+ PLATFORM="$(platform-y)"
ifdef CONFIG_32BIT
bootvars-y += ADDR_BITS=32
endif
diff --git a/arch/mips/boot/dts/mti/malta.dts b/arch/mips/boot/dts/mti/malta.dts
index f604a272d91d..ffe3a1508e72 100644
--- a/arch/mips/boot/dts/mti/malta.dts
+++ b/arch/mips/boot/dts/mti/malta.dts
@@ -84,12 +84,13 @@
fpga_regs: system-controller@1f000000 {
compatible = "mti,malta-fpga", "syscon", "simple-mfd";
reg = <0x1f000000 0x1000>;
+ native-endian;
reboot {
compatible = "syscon-reboot";
regmap = <&fpga_regs>;
offset = <0x500>;
- mask = <0x4d>;
+ mask = <0x42>;
};
};
diff --git a/arch/mips/generic/init.c b/arch/mips/generic/init.c
index 0ea73e845440..d493ccbf274a 100644
--- a/arch/mips/generic/init.c
+++ b/arch/mips/generic/init.c
@@ -30,9 +30,19 @@ static __initdata const void *mach_match_data;
void __init prom_init(void)
{
+ plat_get_fdt();
+ BUG_ON(!fdt);
+}
+
+void __init *plat_get_fdt(void)
+{
const struct mips_machine *check_mach;
const struct of_device_id *match;
+ if (fdt)
+ /* Already set up */
+ return (void *)fdt;
+
if ((fw_arg0 == -2) && !fdt_check_header((void *)fw_arg1)) {
/*
* We booted using the UHI boot protocol, so we have been
@@ -75,12 +85,6 @@ void __init prom_init(void)
/* Retrieve the machine's FDT */
fdt = mach->fdt;
}
-
- BUG_ON(!fdt);
-}
-
-void __init *plat_get_fdt(void)
-{
return (void *)fdt;
}
diff --git a/arch/mips/include/asm/fpu_emulator.h b/arch/mips/include/asm/fpu_emulator.h
index 355dc25172e7..c05369e0b8d6 100644
--- a/arch/mips/include/asm/fpu_emulator.h
+++ b/arch/mips/include/asm/fpu_emulator.h
@@ -63,6 +63,8 @@ do { \
extern int fpu_emulator_cop1Handler(struct pt_regs *xcp,
struct mips_fpu_struct *ctx, int has_fpu,
void *__user *fault_addr);
+void force_fcr31_sig(unsigned long fcr31, void __user *fault_addr,
+ struct task_struct *tsk);
int process_fpemu_return(int sig, void __user *fault_addr,
unsigned long fcr31);
int isBranchInstr(struct pt_regs *regs, struct mm_decoded_insn dec_insn,
@@ -81,4 +83,15 @@ static inline void fpu_emulator_init_fpu(void)
set_fpr64(&t->thread.fpu.fpr[i], 0, SIGNALLING_NAN);
}
+/*
+ * Mask the FCSR Cause bits according to the Enable bits, observing
+ * that Unimplemented is always enabled.
+ */
+static inline unsigned long mask_fcr31_x(unsigned long fcr31)
+{
+ return fcr31 & (FPU_CSR_UNI_X |
+ ((fcr31 & FPU_CSR_ALL_E) <<
+ (ffs(FPU_CSR_ALL_X) - ffs(FPU_CSR_ALL_E))));
+}
+
#endif /* _ASM_FPU_EMULATOR_H */
diff --git a/arch/mips/include/asm/kvm_host.h b/arch/mips/include/asm/kvm_host.h
index 07f58cfc1ab9..bebec370324f 100644
--- a/arch/mips/include/asm/kvm_host.h
+++ b/arch/mips/include/asm/kvm_host.h
@@ -293,7 +293,10 @@ struct kvm_vcpu_arch {
/* Host KSEG0 address of the EI/DI offset */
void *kseg0_commpage;
- u32 io_gpr; /* GPR used as IO source/target */
+ /* Resume PC after MMIO completion */
+ unsigned long io_pc;
+ /* GPR used as IO source/target */
+ u32 io_gpr;
struct hrtimer comparecount_timer;
/* Count timer control KVM register */
@@ -315,8 +318,6 @@ struct kvm_vcpu_arch {
/* Bitmask of pending exceptions to be cleared */
unsigned long pending_exceptions_clr;
- u32 pending_load_cause;
-
/* Save/Restore the entryhi register when are are preempted/scheduled back in */
unsigned long preempt_entryhi;
diff --git a/arch/mips/include/asm/mipsregs.h b/arch/mips/include/asm/mipsregs.h
index 7dd2dd47909a..df78b2ca70eb 100644
--- a/arch/mips/include/asm/mipsregs.h
+++ b/arch/mips/include/asm/mipsregs.h
@@ -215,6 +215,12 @@
#endif
/*
+ * Wired register bits
+ */
+#define MIPSR6_WIRED_LIMIT (_ULCAST_(0xffff) << 16)
+#define MIPSR6_WIRED_WIRED (_ULCAST_(0xffff) << 0)
+
+/*
* Values used for computation of new tlb entries
*/
#define PL_4K 12
diff --git a/arch/mips/include/asm/switch_to.h b/arch/mips/include/asm/switch_to.h
index ebb5c0f2f90d..c0ae27971e31 100644
--- a/arch/mips/include/asm/switch_to.h
+++ b/arch/mips/include/asm/switch_to.h
@@ -76,6 +76,22 @@ do { if (cpu_has_rw_llb) { \
} while (0)
/*
+ * Check FCSR for any unmasked exceptions pending set with `ptrace',
+ * clear them and send a signal.
+ */
+#define __sanitize_fcr31(next) \
+do { \
+ unsigned long fcr31 = mask_fcr31_x(next->thread.fpu.fcr31); \
+ void __user *pc; \
+ \
+ if (unlikely(fcr31)) { \
+ pc = (void __user *)task_pt_regs(next)->cp0_epc; \
+ next->thread.fpu.fcr31 &= ~fcr31; \
+ force_fcr31_sig(fcr31, pc, next); \
+ } \
+} while (0)
+
+/*
* For newly created kernel threads switch_to() will return to
* ret_from_kernel_thread, newly created user threads to ret_from_fork.
* That is, everything following resume() will be skipped for new threads.
@@ -85,6 +101,8 @@ do { if (cpu_has_rw_llb) { \
do { \
__mips_mt_fpaff_switch_to(prev); \
lose_fpu_inatomic(1, prev); \
+ if (tsk_used_math(next)) \
+ __sanitize_fcr31(next); \
if (cpu_has_dsp) { \
__save_dsp(prev); \
__restore_dsp(next); \
diff --git a/arch/mips/include/asm/tlb.h b/arch/mips/include/asm/tlb.h
index 4a2349302b55..dd179fd8acda 100644
--- a/arch/mips/include/asm/tlb.h
+++ b/arch/mips/include/asm/tlb.h
@@ -1,6 +1,9 @@
#ifndef __ASM_TLB_H
#define __ASM_TLB_H
+#include <asm/cpu-features.h>
+#include <asm/mipsregs.h>
+
/*
* MIPS doesn't need any special per-pte or per-vma handling, except
* we need to flush cache for area to be unmapped.
@@ -22,6 +25,16 @@
((CKSEG0 + ((idx) << (PAGE_SHIFT + 1))) | \
(cpu_has_tlbinv ? MIPS_ENTRYHI_EHINV : 0))
+static inline unsigned int num_wired_entries(void)
+{
+ unsigned int wired = read_c0_wired();
+
+ if (cpu_has_mips_r6)
+ wired &= MIPSR6_WIRED_WIRED;
+
+ return wired;
+}
+
#include <asm-generic/tlb.h>
#endif /* __ASM_TLB_H */
diff --git a/arch/mips/kernel/mips-cpc.c b/arch/mips/kernel/mips-cpc.c
index 2a45867d3b4f..a4964c334cab 100644
--- a/arch/mips/kernel/mips-cpc.c
+++ b/arch/mips/kernel/mips-cpc.c
@@ -21,6 +21,11 @@ static DEFINE_PER_CPU_ALIGNED(spinlock_t, cpc_core_lock);
static DEFINE_PER_CPU_ALIGNED(unsigned long, cpc_core_lock_flags);
+phys_addr_t __weak mips_cpc_default_phys_base(void)
+{
+ return 0;
+}
+
/**
* mips_cpc_phys_base - retrieve the physical base address of the CPC
*
@@ -43,8 +48,12 @@ static phys_addr_t mips_cpc_phys_base(void)
if (cpc_base & CM_GCR_CPC_BASE_CPCEN_MSK)
return cpc_base & CM_GCR_CPC_BASE_CPCBASE_MSK;
- /* Otherwise, give it the default address & enable it */
+ /* Otherwise, use the default address */
cpc_base = mips_cpc_default_phys_base();
+ if (!cpc_base)
+ return cpc_base;
+
+ /* Enable the CPC, mapped at the default address */
write_gcr_cpc_base(cpc_base | CM_GCR_CPC_BASE_CPCEN_MSK);
return cpc_base;
}
diff --git a/arch/mips/kernel/mips-r2-to-r6-emul.c b/arch/mips/kernel/mips-r2-to-r6-emul.c
index 22dedd62818a..bd09853aecdf 100644
--- a/arch/mips/kernel/mips-r2-to-r6-emul.c
+++ b/arch/mips/kernel/mips-r2-to-r6-emul.c
@@ -899,7 +899,7 @@ static inline int mipsr2_find_op_func(struct pt_regs *regs, u32 inst,
* mipsr2_decoder: Decode and emulate a MIPS R2 instruction
* @regs: Process register set
* @inst: Instruction to decode and emulate
- * @fcr31: Floating Point Control and Status Register returned
+ * @fcr31: Floating Point Control and Status Register Cause bits returned
*/
int mipsr2_decoder(struct pt_regs *regs, u32 inst, unsigned long *fcr31)
{
@@ -1172,13 +1172,13 @@ fpu_emul:
err = fpu_emulator_cop1Handler(regs, &current->thread.fpu, 0,
&fault_addr);
- *fcr31 = current->thread.fpu.fcr31;
/*
- * We can't allow the emulated instruction to leave any of
- * the cause bits set in $fcr31.
+ * We can't allow the emulated instruction to leave any
+ * enabled Cause bits set in $fcr31.
*/
- current->thread.fpu.fcr31 &= ~FPU_CSR_ALL_X;
+ *fcr31 = res = mask_fcr31_x(current->thread.fpu.fcr31);
+ current->thread.fpu.fcr31 &= ~res;
/*
* this is a tricky issue - lose_fpu() uses LL/SC atomics
diff --git a/arch/mips/kernel/ptrace.c b/arch/mips/kernel/ptrace.c
index 6103b24d1bfc..a92994d60e91 100644
--- a/arch/mips/kernel/ptrace.c
+++ b/arch/mips/kernel/ptrace.c
@@ -79,16 +79,15 @@ void ptrace_disable(struct task_struct *child)
}
/*
- * Poke at FCSR according to its mask. Don't set the cause bits as
- * this is currently not handled correctly in FP context restoration
- * and will cause an oops if a corresponding enable bit is set.
+ * Poke at FCSR according to its mask. Set the Cause bits even
+ * if a corresponding Enable bit is set. This will be noticed at
+ * the time the thread is switched to and SIGFPE thrown accordingly.
*/
static void ptrace_setfcr31(struct task_struct *child, u32 value)
{
u32 fcr31;
u32 mask;
- value &= ~FPU_CSR_ALL_X;
fcr31 = child->thread.fpu.fcr31;
mask = boot_cpu_data.fpu_msk31;
child->thread.fpu.fcr31 = (value & ~mask) | (fcr31 & mask);
@@ -817,6 +816,7 @@ long arch_ptrace(struct task_struct *child, long request,
break;
#endif
case FPC_CSR:
+ init_fp_ctx(child);
ptrace_setfcr31(child, data);
break;
case DSP_BASE ... DSP_BASE + 5: {
diff --git a/arch/mips/kernel/r2300_fpu.S b/arch/mips/kernel/r2300_fpu.S
index b4ac6374a38f..918f2f6d3861 100644
--- a/arch/mips/kernel/r2300_fpu.S
+++ b/arch/mips/kernel/r2300_fpu.S
@@ -21,106 +21,84 @@
#define EX(a,b) \
9: a,##b; \
.section __ex_table,"a"; \
+ PTR 9b,fault; \
+ .previous
+
+#define EX2(a,b) \
+9: a,##b; \
+ .section __ex_table,"a"; \
PTR 9b,bad_stack; \
+ PTR 9b+4,bad_stack; \
.previous
.set noreorder
.set mips1
- /* Save floating point context */
+
+/**
+ * _save_fp_context() - save FP context from the FPU
+ * @a0 - pointer to fpregs field of sigcontext
+ * @a1 - pointer to fpc_csr field of sigcontext
+ *
+ * Save FP context, including the 32 FP data registers and the FP
+ * control & status register, from the FPU to signal context.
+ */
LEAF(_save_fp_context)
.set push
SET_HARDFLOAT
li v0, 0 # assume success
- cfc1 t1,fcr31
- EX(swc1 $f0,(SC_FPREGS+0)(a0))
- EX(swc1 $f1,(SC_FPREGS+8)(a0))
- EX(swc1 $f2,(SC_FPREGS+16)(a0))
- EX(swc1 $f3,(SC_FPREGS+24)(a0))
- EX(swc1 $f4,(SC_FPREGS+32)(a0))
- EX(swc1 $f5,(SC_FPREGS+40)(a0))
- EX(swc1 $f6,(SC_FPREGS+48)(a0))
- EX(swc1 $f7,(SC_FPREGS+56)(a0))
- EX(swc1 $f8,(SC_FPREGS+64)(a0))
- EX(swc1 $f9,(SC_FPREGS+72)(a0))
- EX(swc1 $f10,(SC_FPREGS+80)(a0))
- EX(swc1 $f11,(SC_FPREGS+88)(a0))
- EX(swc1 $f12,(SC_FPREGS+96)(a0))
- EX(swc1 $f13,(SC_FPREGS+104)(a0))
- EX(swc1 $f14,(SC_FPREGS+112)(a0))
- EX(swc1 $f15,(SC_FPREGS+120)(a0))
- EX(swc1 $f16,(SC_FPREGS+128)(a0))
- EX(swc1 $f17,(SC_FPREGS+136)(a0))
- EX(swc1 $f18,(SC_FPREGS+144)(a0))
- EX(swc1 $f19,(SC_FPREGS+152)(a0))
- EX(swc1 $f20,(SC_FPREGS+160)(a0))
- EX(swc1 $f21,(SC_FPREGS+168)(a0))
- EX(swc1 $f22,(SC_FPREGS+176)(a0))
- EX(swc1 $f23,(SC_FPREGS+184)(a0))
- EX(swc1 $f24,(SC_FPREGS+192)(a0))
- EX(swc1 $f25,(SC_FPREGS+200)(a0))
- EX(swc1 $f26,(SC_FPREGS+208)(a0))
- EX(swc1 $f27,(SC_FPREGS+216)(a0))
- EX(swc1 $f28,(SC_FPREGS+224)(a0))
- EX(swc1 $f29,(SC_FPREGS+232)(a0))
- EX(swc1 $f30,(SC_FPREGS+240)(a0))
- EX(swc1 $f31,(SC_FPREGS+248)(a0))
- EX(sw t1,(SC_FPC_CSR)(a0))
- cfc1 t0,$0 # implementation/version
+ cfc1 t1, fcr31
+ EX2(s.d $f0, 0(a0))
+ EX2(s.d $f2, 16(a0))
+ EX2(s.d $f4, 32(a0))
+ EX2(s.d $f6, 48(a0))
+ EX2(s.d $f8, 64(a0))
+ EX2(s.d $f10, 80(a0))
+ EX2(s.d $f12, 96(a0))
+ EX2(s.d $f14, 112(a0))
+ EX2(s.d $f16, 128(a0))
+ EX2(s.d $f18, 144(a0))
+ EX2(s.d $f20, 160(a0))
+ EX2(s.d $f22, 176(a0))
+ EX2(s.d $f24, 192(a0))
+ EX2(s.d $f26, 208(a0))
+ EX2(s.d $f28, 224(a0))
+ EX2(s.d $f30, 240(a0))
jr ra
+ EX(sw t1, (a1))
.set pop
- .set nomacro
- EX(sw t0,(SC_FPC_EIR)(a0))
- .set macro
END(_save_fp_context)
-/*
- * Restore FPU state:
- * - fp gp registers
- * - cp1 status/control register
+/**
+ * _restore_fp_context() - restore FP context to the FPU
+ * @a0 - pointer to fpregs field of sigcontext
+ * @a1 - pointer to fpc_csr field of sigcontext
*
- * We base the decision which registers to restore from the signal stack
- * frame on the current content of c0_status, not on the content of the
- * stack frame which might have been changed by the user.
+ * Restore FP context, including the 32 FP data registers and the FP
+ * control & status register, from signal context to the FPU.
*/
LEAF(_restore_fp_context)
.set push
SET_HARDFLOAT
li v0, 0 # assume success
- EX(lw t0,(SC_FPC_CSR)(a0))
- EX(lwc1 $f0,(SC_FPREGS+0)(a0))
- EX(lwc1 $f1,(SC_FPREGS+8)(a0))
- EX(lwc1 $f2,(SC_FPREGS+16)(a0))
- EX(lwc1 $f3,(SC_FPREGS+24)(a0))
- EX(lwc1 $f4,(SC_FPREGS+32)(a0))
- EX(lwc1 $f5,(SC_FPREGS+40)(a0))
- EX(lwc1 $f6,(SC_FPREGS+48)(a0))
- EX(lwc1 $f7,(SC_FPREGS+56)(a0))
- EX(lwc1 $f8,(SC_FPREGS+64)(a0))
- EX(lwc1 $f9,(SC_FPREGS+72)(a0))
- EX(lwc1 $f10,(SC_FPREGS+80)(a0))
- EX(lwc1 $f11,(SC_FPREGS+88)(a0))
- EX(lwc1 $f12,(SC_FPREGS+96)(a0))
- EX(lwc1 $f13,(SC_FPREGS+104)(a0))
- EX(lwc1 $f14,(SC_FPREGS+112)(a0))
- EX(lwc1 $f15,(SC_FPREGS+120)(a0))
- EX(lwc1 $f16,(SC_FPREGS+128)(a0))
- EX(lwc1 $f17,(SC_FPREGS+136)(a0))
- EX(lwc1 $f18,(SC_FPREGS+144)(a0))
- EX(lwc1 $f19,(SC_FPREGS+152)(a0))
- EX(lwc1 $f20,(SC_FPREGS+160)(a0))
- EX(lwc1 $f21,(SC_FPREGS+168)(a0))
- EX(lwc1 $f22,(SC_FPREGS+176)(a0))
- EX(lwc1 $f23,(SC_FPREGS+184)(a0))
- EX(lwc1 $f24,(SC_FPREGS+192)(a0))
- EX(lwc1 $f25,(SC_FPREGS+200)(a0))
- EX(lwc1 $f26,(SC_FPREGS+208)(a0))
- EX(lwc1 $f27,(SC_FPREGS+216)(a0))
- EX(lwc1 $f28,(SC_FPREGS+224)(a0))
- EX(lwc1 $f29,(SC_FPREGS+232)(a0))
- EX(lwc1 $f30,(SC_FPREGS+240)(a0))
- EX(lwc1 $f31,(SC_FPREGS+248)(a0))
+ EX(lw t0, (a1))
+ EX2(l.d $f0, 0(a0))
+ EX2(l.d $f2, 16(a0))
+ EX2(l.d $f4, 32(a0))
+ EX2(l.d $f6, 48(a0))
+ EX2(l.d $f8, 64(a0))
+ EX2(l.d $f10, 80(a0))
+ EX2(l.d $f12, 96(a0))
+ EX2(l.d $f14, 112(a0))
+ EX2(l.d $f16, 128(a0))
+ EX2(l.d $f18, 144(a0))
+ EX2(l.d $f20, 160(a0))
+ EX2(l.d $f22, 176(a0))
+ EX2(l.d $f24, 192(a0))
+ EX2(l.d $f26, 208(a0))
+ EX2(l.d $f28, 224(a0))
+ EX2(l.d $f30, 240(a0))
jr ra
- ctc1 t0,fcr31
+ ctc1 t0, fcr31
.set pop
END(_restore_fp_context)
.set reorder
diff --git a/arch/mips/kernel/r6000_fpu.S b/arch/mips/kernel/r6000_fpu.S
index 47077380c15c..9cc7bfab3419 100644
--- a/arch/mips/kernel/r6000_fpu.S
+++ b/arch/mips/kernel/r6000_fpu.S
@@ -21,7 +21,14 @@
.set push
SET_HARDFLOAT
- /* Save floating point context */
+/**
+ * _save_fp_context() - save FP context from the FPU
+ * @a0 - pointer to fpregs field of sigcontext
+ * @a1 - pointer to fpc_csr field of sigcontext
+ *
+ * Save FP context, including the 32 FP data registers and the FP
+ * control & status register, from the FPU to signal context.
+ */
LEAF(_save_fp_context)
mfc0 t0,CP0_STATUS
sll t0,t0,2
@@ -30,59 +37,59 @@
cfc1 t1,fcr31
/* Store the 16 double precision registers */
- sdc1 $f0,(SC_FPREGS+0)(a0)
- sdc1 $f2,(SC_FPREGS+16)(a0)
- sdc1 $f4,(SC_FPREGS+32)(a0)
- sdc1 $f6,(SC_FPREGS+48)(a0)
- sdc1 $f8,(SC_FPREGS+64)(a0)
- sdc1 $f10,(SC_FPREGS+80)(a0)
- sdc1 $f12,(SC_FPREGS+96)(a0)
- sdc1 $f14,(SC_FPREGS+112)(a0)
- sdc1 $f16,(SC_FPREGS+128)(a0)
- sdc1 $f18,(SC_FPREGS+144)(a0)
- sdc1 $f20,(SC_FPREGS+160)(a0)
- sdc1 $f22,(SC_FPREGS+176)(a0)
- sdc1 $f24,(SC_FPREGS+192)(a0)
- sdc1 $f26,(SC_FPREGS+208)(a0)
- sdc1 $f28,(SC_FPREGS+224)(a0)
- sdc1 $f30,(SC_FPREGS+240)(a0)
+ sdc1 $f0,0(a0)
+ sdc1 $f2,16(a0)
+ sdc1 $f4,32(a0)
+ sdc1 $f6,48(a0)
+ sdc1 $f8,64(a0)
+ sdc1 $f10,80(a0)
+ sdc1 $f12,96(a0)
+ sdc1 $f14,112(a0)
+ sdc1 $f16,128(a0)
+ sdc1 $f18,144(a0)
+ sdc1 $f20,160(a0)
+ sdc1 $f22,176(a0)
+ sdc1 $f24,192(a0)
+ sdc1 $f26,208(a0)
+ sdc1 $f28,224(a0)
+ sdc1 $f30,240(a0)
jr ra
- sw t0,SC_FPC_CSR(a0)
+ sw t0,(a1)
1: jr ra
nop
END(_save_fp_context)
-/* Restore FPU state:
- * - fp gp registers
- * - cp1 status/control register
+/**
+ * _restore_fp_context() - restore FP context to the FPU
+ * @a0 - pointer to fpregs field of sigcontext
+ * @a1 - pointer to fpc_csr field of sigcontext
*
- * We base the decision which registers to restore from the signal stack
- * frame on the current content of c0_status, not on the content of the
- * stack frame which might have been changed by the user.
+ * Restore FP context, including the 32 FP data registers and the FP
+ * control & status register, from signal context to the FPU.
*/
LEAF(_restore_fp_context)
mfc0 t0,CP0_STATUS
sll t0,t0,2
bgez t0,1f
- lw t0,SC_FPC_CSR(a0)
+ lw t0,(a1)
/* Restore the 16 double precision registers */
- ldc1 $f0,(SC_FPREGS+0)(a0)
- ldc1 $f2,(SC_FPREGS+16)(a0)
- ldc1 $f4,(SC_FPREGS+32)(a0)
- ldc1 $f6,(SC_FPREGS+48)(a0)
- ldc1 $f8,(SC_FPREGS+64)(a0)
- ldc1 $f10,(SC_FPREGS+80)(a0)
- ldc1 $f12,(SC_FPREGS+96)(a0)
- ldc1 $f14,(SC_FPREGS+112)(a0)
- ldc1 $f16,(SC_FPREGS+128)(a0)
- ldc1 $f18,(SC_FPREGS+144)(a0)
- ldc1 $f20,(SC_FPREGS+160)(a0)
- ldc1 $f22,(SC_FPREGS+176)(a0)
- ldc1 $f24,(SC_FPREGS+192)(a0)
- ldc1 $f26,(SC_FPREGS+208)(a0)
- ldc1 $f28,(SC_FPREGS+224)(a0)
- ldc1 $f30,(SC_FPREGS+240)(a0)
+ ldc1 $f0,0(a0)
+ ldc1 $f2,16(a0)
+ ldc1 $f4,32(a0)
+ ldc1 $f6,48(a0)
+ ldc1 $f8,64(a0)
+ ldc1 $f10,80(a0)
+ ldc1 $f12,96(a0)
+ ldc1 $f14,112(a0)
+ ldc1 $f16,128(a0)
+ ldc1 $f18,144(a0)
+ ldc1 $f20,160(a0)
+ ldc1 $f22,176(a0)
+ ldc1 $f24,192(a0)
+ ldc1 $f26,208(a0)
+ ldc1 $f28,224(a0)
+ ldc1 $f30,240(a0)
jr ra
ctc1 t0,fcr31
1: jr ra
diff --git a/arch/mips/kernel/relocate.c b/arch/mips/kernel/relocate.c
index ca1cc30c0891..1958910b75c0 100644
--- a/arch/mips/kernel/relocate.c
+++ b/arch/mips/kernel/relocate.c
@@ -200,7 +200,7 @@ static inline __init unsigned long get_random_boot(void)
#if defined(CONFIG_USE_OF)
/* Get any additional entropy passed in device tree */
- {
+ if (initial_boot_params) {
int node, len;
u64 *prop;
diff --git a/arch/mips/kernel/setup.c b/arch/mips/kernel/setup.c
index 0d57909d9026..f66e5ce505b2 100644
--- a/arch/mips/kernel/setup.c
+++ b/arch/mips/kernel/setup.c
@@ -368,6 +368,19 @@ static void __init bootmem_init(void)
end = PFN_DOWN(boot_mem_map.map[i].addr
+ boot_mem_map.map[i].size);
+#ifndef CONFIG_HIGHMEM
+ /*
+ * Skip highmem here so we get an accurate max_low_pfn if low
+ * memory stops short of high memory.
+ * If the region overlaps HIGHMEM_START, end is clipped so
+ * max_pfn excludes the highmem portion.
+ */
+ if (start >= PFN_DOWN(HIGHMEM_START))
+ continue;
+ if (end > PFN_DOWN(HIGHMEM_START))
+ end = PFN_DOWN(HIGHMEM_START);
+#endif
+
if (end > max_low_pfn)
max_low_pfn = end;
if (start < min_low_pfn)
diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c
index 1f5fdee1dfc3..3905003dfe2b 100644
--- a/arch/mips/kernel/traps.c
+++ b/arch/mips/kernel/traps.c
@@ -156,7 +156,7 @@ static void show_backtrace(struct task_struct *task, const struct pt_regs *regs)
print_ip_sym(pc);
pc = unwind_stack(task, &sp, pc, &ra);
} while (pc);
- printk("\n");
+ pr_cont("\n");
}
/*
@@ -174,22 +174,24 @@ static void show_stacktrace(struct task_struct *task,
printk("Stack :");
i = 0;
while ((unsigned long) sp & (PAGE_SIZE - 1)) {
- if (i && ((i % (64 / field)) == 0))
- printk("\n ");
+ if (i && ((i % (64 / field)) == 0)) {
+ pr_cont("\n");
+ printk(" ");
+ }
if (i > 39) {
- printk(" ...");
+ pr_cont(" ...");
break;
}
if (__get_user(stackdata, sp++)) {
- printk(" (Bad stack address)");
+ pr_cont(" (Bad stack address)");
break;
}
- printk(" %0*lx", field, stackdata);
+ pr_cont(" %0*lx", field, stackdata);
i++;
}
- printk("\n");
+ pr_cont("\n");
show_backtrace(task, regs);
}
@@ -229,18 +231,19 @@ static void show_code(unsigned int __user *pc)
long i;
unsigned short __user *pc16 = NULL;
- printk("\nCode:");
+ printk("Code:");
if ((unsigned long)pc & 1)
pc16 = (unsigned short __user *)((unsigned long)pc & ~1);
for(i = -3 ; i < 6 ; i++) {
unsigned int insn;
if (pc16 ? __get_user(insn, pc16 + i) : __get_user(insn, pc + i)) {
- printk(" (Bad address in epc)\n");
+ pr_cont(" (Bad address in epc)\n");
break;
}
- printk("%c%0*x%c", (i?' ':'<'), pc16 ? 4 : 8, insn, (i?' ':'>'));
+ pr_cont("%c%0*x%c", (i?' ':'<'), pc16 ? 4 : 8, insn, (i?' ':'>'));
}
+ pr_cont("\n");
}
static void __show_regs(const struct pt_regs *regs)
@@ -259,15 +262,15 @@ static void __show_regs(const struct pt_regs *regs)
if ((i % 4) == 0)
printk("$%2d :", i);
if (i == 0)
- printk(" %0*lx", field, 0UL);
+ pr_cont(" %0*lx", field, 0UL);
else if (i == 26 || i == 27)
- printk(" %*s", field, "");
+ pr_cont(" %*s", field, "");
else
- printk(" %0*lx", field, regs->regs[i]);
+ pr_cont(" %0*lx", field, regs->regs[i]);
i++;
if ((i % 4) == 0)
- printk("\n");
+ pr_cont("\n");
}
#ifdef CONFIG_CPU_HAS_SMARTMIPS
@@ -288,46 +291,46 @@ static void __show_regs(const struct pt_regs *regs)
if (cpu_has_3kex) {
if (regs->cp0_status & ST0_KUO)
- printk("KUo ");
+ pr_cont("KUo ");
if (regs->cp0_status & ST0_IEO)
- printk("IEo ");
+ pr_cont("IEo ");
if (regs->cp0_status & ST0_KUP)
- printk("KUp ");
+ pr_cont("KUp ");
if (regs->cp0_status & ST0_IEP)
- printk("IEp ");
+ pr_cont("IEp ");
if (regs->cp0_status & ST0_KUC)
- printk("KUc ");
+ pr_cont("KUc ");
if (regs->cp0_status & ST0_IEC)
- printk("IEc ");
+ pr_cont("IEc ");
} else if (cpu_has_4kex) {
if (regs->cp0_status & ST0_KX)
- printk("KX ");
+ pr_cont("KX ");
if (regs->cp0_status & ST0_SX)
- printk("SX ");
+ pr_cont("SX ");
if (regs->cp0_status & ST0_UX)
- printk("UX ");
+ pr_cont("UX ");
switch (regs->cp0_status & ST0_KSU) {
case KSU_USER:
- printk("USER ");
+ pr_cont("USER ");
break;
case KSU_SUPERVISOR:
- printk("SUPERVISOR ");
+ pr_cont("SUPERVISOR ");
break;
case KSU_KERNEL:
- printk("KERNEL ");
+ pr_cont("KERNEL ");
break;
default:
- printk("BAD_MODE ");
+ pr_cont("BAD_MODE ");
break;
}
if (regs->cp0_status & ST0_ERL)
- printk("ERL ");
+ pr_cont("ERL ");
if (regs->cp0_status & ST0_EXL)
- printk("EXL ");
+ pr_cont("EXL ");
if (regs->cp0_status & ST0_IE)
- printk("IE ");
+ pr_cont("IE ");
}
- printk("\n");
+ pr_cont("\n");
exccode = (cause & CAUSEF_EXCCODE) >> CAUSEB_EXCCODE;
printk("Cause : %08x (ExcCode %02x)\n", cause, exccode);
@@ -705,6 +708,32 @@ asmlinkage void do_ov(struct pt_regs *regs)
exception_exit(prev_state);
}
+/*
+ * Send SIGFPE according to FCSR Cause bits, which must have already
+ * been masked against Enable bits. This is impotant as Inexact can
+ * happen together with Overflow or Underflow, and `ptrace' can set
+ * any bits.
+ */
+void force_fcr31_sig(unsigned long fcr31, void __user *fault_addr,
+ struct task_struct *tsk)
+{
+ struct siginfo si = { .si_addr = fault_addr, .si_signo = SIGFPE };
+
+ if (fcr31 & FPU_CSR_INV_X)
+ si.si_code = FPE_FLTINV;
+ else if (fcr31 & FPU_CSR_DIV_X)
+ si.si_code = FPE_FLTDIV;
+ else if (fcr31 & FPU_CSR_OVF_X)
+ si.si_code = FPE_FLTOVF;
+ else if (fcr31 & FPU_CSR_UDF_X)
+ si.si_code = FPE_FLTUND;
+ else if (fcr31 & FPU_CSR_INE_X)
+ si.si_code = FPE_FLTRES;
+ else
+ si.si_code = __SI_FAULT;
+ force_sig_info(SIGFPE, &si, tsk);
+}
+
int process_fpemu_return(int sig, void __user *fault_addr, unsigned long fcr31)
{
struct siginfo si = { 0 };
@@ -715,27 +744,7 @@ int process_fpemu_return(int sig, void __user *fault_addr, unsigned long fcr31)
return 0;
case SIGFPE:
- si.si_addr = fault_addr;
- si.si_signo = sig;
- /*
- * Inexact can happen together with Overflow or Underflow.
- * Respect the mask to deliver the correct exception.
- */
- fcr31 &= (fcr31 & FPU_CSR_ALL_E) <<
- (ffs(FPU_CSR_ALL_X) - ffs(FPU_CSR_ALL_E));
- if (fcr31 & FPU_CSR_INV_X)
- si.si_code = FPE_FLTINV;
- else if (fcr31 & FPU_CSR_DIV_X)
- si.si_code = FPE_FLTDIV;
- else if (fcr31 & FPU_CSR_OVF_X)
- si.si_code = FPE_FLTOVF;
- else if (fcr31 & FPU_CSR_UDF_X)
- si.si_code = FPE_FLTUND;
- else if (fcr31 & FPU_CSR_INE_X)
- si.si_code = FPE_FLTRES;
- else
- si.si_code = __SI_FAULT;
- force_sig_info(sig, &si, current);
+ force_fcr31_sig(fcr31, fault_addr, current);
return 1;
case SIGBUS:
@@ -799,13 +808,13 @@ static int simulate_fp(struct pt_regs *regs, unsigned int opcode,
/* Run the emulator */
sig = fpu_emulator_cop1Handler(regs, &current->thread.fpu, 1,
&fault_addr);
- fcr31 = current->thread.fpu.fcr31;
/*
- * We can't allow the emulated instruction to leave any of
- * the cause bits set in $fcr31.
+ * We can't allow the emulated instruction to leave any
+ * enabled Cause bits set in $fcr31.
*/
- current->thread.fpu.fcr31 &= ~FPU_CSR_ALL_X;
+ fcr31 = mask_fcr31_x(current->thread.fpu.fcr31);
+ current->thread.fpu.fcr31 &= ~fcr31;
/* Restore the hardware register state */
own_fpu(1);
@@ -831,7 +840,7 @@ asmlinkage void do_fpe(struct pt_regs *regs, unsigned long fcr31)
goto out;
/* Clear FCSR.Cause before enabling interrupts */
- write_32bit_cp1_register(CP1_STATUS, fcr31 & ~FPU_CSR_ALL_X);
+ write_32bit_cp1_register(CP1_STATUS, fcr31 & ~mask_fcr31_x(fcr31));
local_irq_enable();
die_if_kernel("FP exception in kernel code", regs);
@@ -853,13 +862,13 @@ asmlinkage void do_fpe(struct pt_regs *regs, unsigned long fcr31)
/* Run the emulator */
sig = fpu_emulator_cop1Handler(regs, &current->thread.fpu, 1,
&fault_addr);
- fcr31 = current->thread.fpu.fcr31;
/*
- * We can't allow the emulated instruction to leave any of
- * the cause bits set in $fcr31.
+ * We can't allow the emulated instruction to leave any
+ * enabled Cause bits set in $fcr31.
*/
- current->thread.fpu.fcr31 &= ~FPU_CSR_ALL_X;
+ fcr31 = mask_fcr31_x(current->thread.fpu.fcr31);
+ current->thread.fpu.fcr31 &= ~fcr31;
/* Restore the hardware register state */
own_fpu(1); /* Using the FPU again. */
@@ -1424,13 +1433,13 @@ asmlinkage void do_cpu(struct pt_regs *regs)
sig = fpu_emulator_cop1Handler(regs, &current->thread.fpu, 0,
&fault_addr);
- fcr31 = current->thread.fpu.fcr31;
/*
* We can't allow the emulated instruction to leave
- * any of the cause bits set in $fcr31.
+ * any enabled Cause bits set in $fcr31.
*/
- current->thread.fpu.fcr31 &= ~FPU_CSR_ALL_X;
+ fcr31 = mask_fcr31_x(current->thread.fpu.fcr31);
+ current->thread.fpu.fcr31 &= ~fcr31;
/* Send a signal if required. */
if (!process_fpemu_return(sig, fault_addr, fcr31) && !err)
diff --git a/arch/mips/kvm/emulate.c b/arch/mips/kvm/emulate.c
index 8770f32c9e0b..aa0937423e28 100644
--- a/arch/mips/kvm/emulate.c
+++ b/arch/mips/kvm/emulate.c
@@ -790,15 +790,15 @@ enum emulation_result kvm_mips_emul_eret(struct kvm_vcpu *vcpu)
struct mips_coproc *cop0 = vcpu->arch.cop0;
enum emulation_result er = EMULATE_DONE;
- if (kvm_read_c0_guest_status(cop0) & ST0_EXL) {
+ if (kvm_read_c0_guest_status(cop0) & ST0_ERL) {
+ kvm_clear_c0_guest_status(cop0, ST0_ERL);
+ vcpu->arch.pc = kvm_read_c0_guest_errorepc(cop0);
+ } else if (kvm_read_c0_guest_status(cop0) & ST0_EXL) {
kvm_debug("[%#lx] ERET to %#lx\n", vcpu->arch.pc,
kvm_read_c0_guest_epc(cop0));
kvm_clear_c0_guest_status(cop0, ST0_EXL);
vcpu->arch.pc = kvm_read_c0_guest_epc(cop0);
- } else if (kvm_read_c0_guest_status(cop0) & ST0_ERL) {
- kvm_clear_c0_guest_status(cop0, ST0_ERL);
- vcpu->arch.pc = kvm_read_c0_guest_errorepc(cop0);
} else {
kvm_err("[%#lx] ERET when MIPS_SR_EXL|MIPS_SR_ERL == 0\n",
vcpu->arch.pc);
@@ -1528,13 +1528,25 @@ enum emulation_result kvm_mips_emulate_load(union mips_instruction inst,
struct kvm_vcpu *vcpu)
{
enum emulation_result er = EMULATE_DO_MMIO;
+ unsigned long curr_pc;
u32 op, rt;
u32 bytes;
rt = inst.i_format.rt;
op = inst.i_format.opcode;
- vcpu->arch.pending_load_cause = cause;
+ /*
+ * Find the resume PC now while we have safe and easy access to the
+ * prior branch instruction, and save it for
+ * kvm_mips_complete_mmio_load() to restore later.
+ */
+ curr_pc = vcpu->arch.pc;
+ er = update_pc(vcpu, cause);
+ if (er == EMULATE_FAIL)
+ return er;
+ vcpu->arch.io_pc = vcpu->arch.pc;
+ vcpu->arch.pc = curr_pc;
+
vcpu->arch.io_gpr = rt;
switch (op) {
@@ -2494,9 +2506,8 @@ enum emulation_result kvm_mips_complete_mmio_load(struct kvm_vcpu *vcpu,
goto done;
}
- er = update_pc(vcpu, vcpu->arch.pending_load_cause);
- if (er == EMULATE_FAIL)
- return er;
+ /* Restore saved resume PC */
+ vcpu->arch.pc = vcpu->arch.io_pc;
switch (run->mmio.len) {
case 4:
@@ -2518,11 +2529,6 @@ enum emulation_result kvm_mips_complete_mmio_load(struct kvm_vcpu *vcpu,
break;
}
- if (vcpu->arch.pending_load_cause & CAUSEF_BD)
- kvm_debug("[%#lx] Completing %d byte BD Load to gpr %d (0x%08lx) type %d\n",
- vcpu->arch.pc, run->mmio.len, vcpu->arch.io_gpr, *gpr,
- vcpu->mmio_needed);
-
done:
return er;
}
diff --git a/arch/mips/kvm/mips.c b/arch/mips/kvm/mips.c
index 622037d851a3..06a60b19acfb 100644
--- a/arch/mips/kvm/mips.c
+++ b/arch/mips/kvm/mips.c
@@ -426,7 +426,7 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
static void kvm_mips_check_asids(struct kvm_vcpu *vcpu)
{
struct mips_coproc *cop0 = vcpu->arch.cop0;
- int cpu = smp_processor_id();
+ int i, cpu = smp_processor_id();
unsigned int gasid;
/*
@@ -442,6 +442,9 @@ static void kvm_mips_check_asids(struct kvm_vcpu *vcpu)
vcpu);
vcpu->arch.guest_user_asid[cpu] =
vcpu->arch.guest_user_mm.context.asid[cpu];
+ for_each_possible_cpu(i)
+ if (i != cpu)
+ vcpu->arch.guest_user_asid[cpu] = 0;
vcpu->arch.last_user_gasid = gasid;
}
}
diff --git a/arch/mips/kvm/mmu.c b/arch/mips/kvm/mmu.c
index 03883ba806e2..3b677c851be0 100644
--- a/arch/mips/kvm/mmu.c
+++ b/arch/mips/kvm/mmu.c
@@ -260,13 +260,9 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
if ((vcpu->arch.guest_user_asid[cpu] ^ asid_cache(cpu)) &
asid_version_mask(cpu)) {
- u32 gasid = kvm_read_c0_guest_entryhi(vcpu->arch.cop0) &
- KVM_ENTRYHI_ASID;
-
kvm_get_new_mmu_context(&vcpu->arch.guest_user_mm, cpu, vcpu);
vcpu->arch.guest_user_asid[cpu] =
vcpu->arch.guest_user_mm.context.asid[cpu];
- vcpu->arch.last_user_gasid = gasid;
newasid++;
kvm_debug("[%d]: cpu_context: %#lx\n", cpu,
diff --git a/arch/mips/lib/dump_tlb.c b/arch/mips/lib/dump_tlb.c
index 0f80b936e75e..6eb50a7137db 100644
--- a/arch/mips/lib/dump_tlb.c
+++ b/arch/mips/lib/dump_tlb.c
@@ -135,42 +135,42 @@ static void dump_tlb(int first, int last)
c0 = (entrylo0 & ENTRYLO_C) >> ENTRYLO_C_SHIFT;
c1 = (entrylo1 & ENTRYLO_C) >> ENTRYLO_C_SHIFT;
- printk("va=%0*lx asid=%0*lx",
- vwidth, (entryhi & ~0x1fffUL),
- asidwidth, entryhi & asidmask);
+ pr_cont("va=%0*lx asid=%0*lx",
+ vwidth, (entryhi & ~0x1fffUL),
+ asidwidth, entryhi & asidmask);
if (cpu_has_guestid)
- printk(" gid=%02lx",
- (guestctl1 & MIPS_GCTL1_RID)
+ pr_cont(" gid=%02lx",
+ (guestctl1 & MIPS_GCTL1_RID)
>> MIPS_GCTL1_RID_SHIFT);
/* RI/XI are in awkward places, so mask them off separately */
pa = entrylo0 & ~(MIPS_ENTRYLO_RI | MIPS_ENTRYLO_XI);
if (xpa)
pa |= (unsigned long long)readx_c0_entrylo0() << 30;
pa = (pa << 6) & PAGE_MASK;
- printk("\n\t[");
+ pr_cont("\n\t[");
if (cpu_has_rixi)
- printk("ri=%d xi=%d ",
- (entrylo0 & MIPS_ENTRYLO_RI) ? 1 : 0,
- (entrylo0 & MIPS_ENTRYLO_XI) ? 1 : 0);
- printk("pa=%0*llx c=%d d=%d v=%d g=%d] [",
- pwidth, pa, c0,
- (entrylo0 & ENTRYLO_D) ? 1 : 0,
- (entrylo0 & ENTRYLO_V) ? 1 : 0,
- (entrylo0 & ENTRYLO_G) ? 1 : 0);
+ pr_cont("ri=%d xi=%d ",
+ (entrylo0 & MIPS_ENTRYLO_RI) ? 1 : 0,
+ (entrylo0 & MIPS_ENTRYLO_XI) ? 1 : 0);
+ pr_cont("pa=%0*llx c=%d d=%d v=%d g=%d] [",
+ pwidth, pa, c0,
+ (entrylo0 & ENTRYLO_D) ? 1 : 0,
+ (entrylo0 & ENTRYLO_V) ? 1 : 0,
+ (entrylo0 & ENTRYLO_G) ? 1 : 0);
/* RI/XI are in awkward places, so mask them off separately */
pa = entrylo1 & ~(MIPS_ENTRYLO_RI | MIPS_ENTRYLO_XI);
if (xpa)
pa |= (unsigned long long)readx_c0_entrylo1() << 30;
pa = (pa << 6) & PAGE_MASK;
if (cpu_has_rixi)
- printk("ri=%d xi=%d ",
- (entrylo1 & MIPS_ENTRYLO_RI) ? 1 : 0,
- (entrylo1 & MIPS_ENTRYLO_XI) ? 1 : 0);
- printk("pa=%0*llx c=%d d=%d v=%d g=%d]\n",
- pwidth, pa, c1,
- (entrylo1 & ENTRYLO_D) ? 1 : 0,
- (entrylo1 & ENTRYLO_V) ? 1 : 0,
- (entrylo1 & ENTRYLO_G) ? 1 : 0);
+ pr_cont("ri=%d xi=%d ",
+ (entrylo1 & MIPS_ENTRYLO_RI) ? 1 : 0,
+ (entrylo1 & MIPS_ENTRYLO_XI) ? 1 : 0);
+ pr_cont("pa=%0*llx c=%d d=%d v=%d g=%d]\n",
+ pwidth, pa, c1,
+ (entrylo1 & ENTRYLO_D) ? 1 : 0,
+ (entrylo1 & ENTRYLO_V) ? 1 : 0,
+ (entrylo1 & ENTRYLO_G) ? 1 : 0);
}
printk("\n");
diff --git a/arch/mips/lib/r3k_dump_tlb.c b/arch/mips/lib/r3k_dump_tlb.c
index 744f4a7bc49d..85b4086e553e 100644
--- a/arch/mips/lib/r3k_dump_tlb.c
+++ b/arch/mips/lib/r3k_dump_tlb.c
@@ -53,15 +53,15 @@ static void dump_tlb(int first, int last)
*/
printk("Index: %2d ", i);
- printk("va=%08lx asid=%08lx"
- " [pa=%06lx n=%d d=%d v=%d g=%d]",
- entryhi & PAGE_MASK,
- entryhi & asid_mask,
- entrylo0 & PAGE_MASK,
- (entrylo0 & R3K_ENTRYLO_N) ? 1 : 0,
- (entrylo0 & R3K_ENTRYLO_D) ? 1 : 0,
- (entrylo0 & R3K_ENTRYLO_V) ? 1 : 0,
- (entrylo0 & R3K_ENTRYLO_G) ? 1 : 0);
+ pr_cont("va=%08lx asid=%08lx"
+ " [pa=%06lx n=%d d=%d v=%d g=%d]",
+ entryhi & PAGE_MASK,
+ entryhi & asid_mask,
+ entrylo0 & PAGE_MASK,
+ (entrylo0 & R3K_ENTRYLO_N) ? 1 : 0,
+ (entrylo0 & R3K_ENTRYLO_D) ? 1 : 0,
+ (entrylo0 & R3K_ENTRYLO_V) ? 1 : 0,
+ (entrylo0 & R3K_ENTRYLO_G) ? 1 : 0);
}
}
printk("\n");
diff --git a/arch/mips/mm/fault.c b/arch/mips/mm/fault.c
index d56a855828c2..3bef306cdfdb 100644
--- a/arch/mips/mm/fault.c
+++ b/arch/mips/mm/fault.c
@@ -209,17 +209,18 @@ bad_area_nosemaphore:
if (show_unhandled_signals &&
unhandled_signal(tsk, SIGSEGV) &&
__ratelimit(&ratelimit_state)) {
- pr_info("\ndo_page_fault(): sending SIGSEGV to %s for invalid %s %0*lx",
+ pr_info("do_page_fault(): sending SIGSEGV to %s for invalid %s %0*lx\n",
tsk->comm,
write ? "write access to" : "read access from",
field, address);
pr_info("epc = %0*lx in", field,
(unsigned long) regs->cp0_epc);
- print_vma_addr(" ", regs->cp0_epc);
+ print_vma_addr(KERN_CONT " ", regs->cp0_epc);
+ pr_cont("\n");
pr_info("ra = %0*lx in", field,
(unsigned long) regs->regs[31]);
- print_vma_addr(" ", regs->regs[31]);
- pr_info("\n");
+ print_vma_addr(KERN_CONT " ", regs->regs[31]);
+ pr_cont("\n");
}
current->thread.trap_nr = (regs->cp0_cause >> 2) & 0x1f;
info.si_signo = SIGSEGV;
diff --git a/arch/mips/mm/init.c b/arch/mips/mm/init.c
index 3a6edecc3f38..e86ebcf5c071 100644
--- a/arch/mips/mm/init.c
+++ b/arch/mips/mm/init.c
@@ -118,7 +118,7 @@ static void *__kmap_pgprot(struct page *page, unsigned long addr, pgprot_t prot)
writex_c0_entrylo1(entrylo);
}
#endif
- tlbidx = read_c0_wired();
+ tlbidx = num_wired_entries();
write_c0_wired(tlbidx + 1);
write_c0_index(tlbidx);
mtc0_tlbw_hazard();
@@ -147,7 +147,7 @@ void kunmap_coherent(void)
local_irq_save(flags);
old_ctx = read_c0_entryhi();
- wired = read_c0_wired() - 1;
+ wired = num_wired_entries() - 1;
write_c0_wired(wired);
write_c0_index(wired);
write_c0_entryhi(UNIQUE_ENTRYHI(wired));
diff --git a/arch/mips/mm/tlb-r4k.c b/arch/mips/mm/tlb-r4k.c
index bba9c1484b41..0596505770db 100644
--- a/arch/mips/mm/tlb-r4k.c
+++ b/arch/mips/mm/tlb-r4k.c
@@ -65,7 +65,7 @@ void local_flush_tlb_all(void)
write_c0_entrylo0(0);
write_c0_entrylo1(0);
- entry = read_c0_wired();
+ entry = num_wired_entries();
/*
* Blast 'em all away.
@@ -385,7 +385,7 @@ void add_wired_entry(unsigned long entrylo0, unsigned long entrylo1,
old_ctx = read_c0_entryhi();
htw_stop();
old_pagemask = read_c0_pagemask();
- wired = read_c0_wired();
+ wired = num_wired_entries();
write_c0_wired(wired + 1);
write_c0_index(wired);
tlbw_use_hazard(); /* What is the hazard here? */
@@ -449,7 +449,7 @@ __init int add_temporary_entry(unsigned long entrylo0, unsigned long entrylo1,
htw_stop();
old_ctx = read_c0_entryhi();
old_pagemask = read_c0_pagemask();
- wired = read_c0_wired();
+ wired = num_wired_entries();
if (--temp_tlb_entry < wired) {
printk(KERN_WARNING
"No TLB space left for add_temporary_entry\n");
diff --git a/arch/nios2/kernel/time.c b/arch/nios2/kernel/time.c
index d9563ddb337e..746bf5caaffc 100644
--- a/arch/nios2/kernel/time.c
+++ b/arch/nios2/kernel/time.c
@@ -324,6 +324,7 @@ static int __init nios2_time_init(struct device_node *timer)
ret = nios2_clocksource_init(timer);
break;
default:
+ ret = 0;
break;
}
diff --git a/arch/openrisc/include/asm/cache.h b/arch/openrisc/include/asm/cache.h
index 4ce7a01a252d..5f55da9cbfd5 100644
--- a/arch/openrisc/include/asm/cache.h
+++ b/arch/openrisc/include/asm/cache.h
@@ -23,6 +23,8 @@
* they shouldn't be hard-coded!
*/
+#define __ro_after_init __read_mostly
+
#define L1_CACHE_BYTES 16
#define L1_CACHE_SHIFT 4
diff --git a/arch/parisc/Kconfig b/arch/parisc/Kconfig
index 71c4a3aa3752..a14b86587013 100644
--- a/arch/parisc/Kconfig
+++ b/arch/parisc/Kconfig
@@ -34,7 +34,9 @@ config PARISC
select HAVE_ARCH_HASH
select HAVE_ARCH_SECCOMP_FILTER
select HAVE_ARCH_TRACEHOOK
- select HAVE_UNSTABLE_SCHED_CLOCK if (SMP || !64BIT)
+ select GENERIC_SCHED_CLOCK
+ select HAVE_UNSTABLE_SCHED_CLOCK if SMP
+ select GENERIC_CLOCKEVENTS
select ARCH_NO_COHERENT_DMA_MMAP
select CPU_NO_EFFICIENT_FFS
diff --git a/arch/parisc/include/uapi/asm/unistd.h b/arch/parisc/include/uapi/asm/unistd.h
index a9b9407f38f7..6b0741e7a7ed 100644
--- a/arch/parisc/include/uapi/asm/unistd.h
+++ b/arch/parisc/include/uapi/asm/unistd.h
@@ -368,7 +368,9 @@
#define __IGNORE_select /* newselect */
#define __IGNORE_fadvise64 /* fadvise64_64 */
-
+#define __IGNORE_pkey_mprotect
+#define __IGNORE_pkey_alloc
+#define __IGNORE_pkey_free
#define LINUX_GATEWAY_ADDR 0x100
diff --git a/arch/parisc/kernel/cache.c b/arch/parisc/kernel/cache.c
index 629eb464d5ba..c263301648f3 100644
--- a/arch/parisc/kernel/cache.c
+++ b/arch/parisc/kernel/cache.c
@@ -369,6 +369,7 @@ void __init parisc_setup_cache_timing(void)
{
unsigned long rangetime, alltime;
unsigned long size, start;
+ unsigned long threshold;
alltime = mfctl(16);
flush_data_cache();
@@ -382,17 +383,12 @@ void __init parisc_setup_cache_timing(void)
printk(KERN_DEBUG "Whole cache flush %lu cycles, flushing %lu bytes %lu cycles\n",
alltime, size, rangetime);
- /* Racy, but if we see an intermediate value, it's ok too... */
- parisc_cache_flush_threshold = size * alltime / rangetime;
-
- parisc_cache_flush_threshold = L1_CACHE_ALIGN(parisc_cache_flush_threshold);
- if (!parisc_cache_flush_threshold)
- parisc_cache_flush_threshold = FLUSH_THRESHOLD;
-
- if (parisc_cache_flush_threshold > cache_info.dc_size)
- parisc_cache_flush_threshold = cache_info.dc_size;
-
- printk(KERN_INFO "Setting cache flush threshold to %lu kB\n",
+ threshold = L1_CACHE_ALIGN(size * alltime / rangetime);
+ if (threshold > cache_info.dc_size)
+ threshold = cache_info.dc_size;
+ if (threshold)
+ parisc_cache_flush_threshold = threshold;
+ printk(KERN_INFO "Cache flush threshold set to %lu KiB\n",
parisc_cache_flush_threshold/1024);
/* calculate TLB flush threshold */
@@ -401,7 +397,7 @@ void __init parisc_setup_cache_timing(void)
flush_tlb_all();
alltime = mfctl(16) - alltime;
- size = PAGE_SIZE;
+ size = 0;
start = (unsigned long) _text;
rangetime = mfctl(16);
while (start < (unsigned long) _end) {
@@ -414,13 +410,10 @@ void __init parisc_setup_cache_timing(void)
printk(KERN_DEBUG "Whole TLB flush %lu cycles, flushing %lu bytes %lu cycles\n",
alltime, size, rangetime);
- parisc_tlb_flush_threshold = size * alltime / rangetime;
- parisc_tlb_flush_threshold *= num_online_cpus();
- parisc_tlb_flush_threshold = PAGE_ALIGN(parisc_tlb_flush_threshold);
- if (!parisc_tlb_flush_threshold)
- parisc_tlb_flush_threshold = FLUSH_TLB_THRESHOLD;
-
- printk(KERN_INFO "Setting TLB flush threshold to %lu kB\n",
+ threshold = PAGE_ALIGN(num_online_cpus() * size * alltime / rangetime);
+ if (threshold)
+ parisc_tlb_flush_threshold = threshold;
+ printk(KERN_INFO "TLB flush threshold set to %lu KiB\n",
parisc_tlb_flush_threshold/1024);
}
diff --git a/arch/parisc/kernel/drivers.c b/arch/parisc/kernel/drivers.c
index f8150669b8c6..700e2d2da096 100644
--- a/arch/parisc/kernel/drivers.c
+++ b/arch/parisc/kernel/drivers.c
@@ -873,11 +873,11 @@ static void print_parisc_device(struct parisc_device *dev)
if (dev->num_addrs) {
int k;
- printk(", additional addresses: ");
+ pr_cont(", additional addresses: ");
for (k = 0; k < dev->num_addrs; k++)
- printk("0x%lx ", dev->addr[k]);
+ pr_cont("0x%lx ", dev->addr[k]);
}
- printk("\n");
+ pr_cont("\n");
}
/**
diff --git a/arch/parisc/kernel/inventory.c b/arch/parisc/kernel/inventory.c
index 545f9d2fe711..c05d1876d27c 100644
--- a/arch/parisc/kernel/inventory.c
+++ b/arch/parisc/kernel/inventory.c
@@ -58,7 +58,7 @@ void __init setup_pdc(void)
status = pdc_system_map_find_mods(&module_result, &module_path, 0);
if (status == PDC_OK) {
pdc_type = PDC_TYPE_SYSTEM_MAP;
- printk("System Map.\n");
+ pr_cont("System Map.\n");
return;
}
@@ -77,7 +77,7 @@ void __init setup_pdc(void)
status = pdc_pat_cell_get_number(&cell_info);
if (status == PDC_OK) {
pdc_type = PDC_TYPE_PAT;
- printk("64 bit PAT.\n");
+ pr_cont("64 bit PAT.\n");
return;
}
#endif
@@ -97,12 +97,12 @@ void __init setup_pdc(void)
case 0xC: /* 715/64, at least */
pdc_type = PDC_TYPE_SNAKE;
- printk("Snake.\n");
+ pr_cont("Snake.\n");
return;
default: /* Everything else */
- printk("Unsupported.\n");
+ pr_cont("Unsupported.\n");
panic("If this is a 64-bit machine, please try a 64-bit kernel.\n");
}
}
diff --git a/arch/parisc/kernel/pacache.S b/arch/parisc/kernel/pacache.S
index 985e06da37f5..1b39a2acaadf 100644
--- a/arch/parisc/kernel/pacache.S
+++ b/arch/parisc/kernel/pacache.S
@@ -96,7 +96,7 @@ fitmanyloop: /* Loop if LOOP >= 2 */
fitmanymiddle: /* Loop if LOOP >= 2 */
addib,COND(>) -1, %r31, fitmanymiddle /* Adjusted inner loop decr */
- pitlbe 0(%sr1, %r28)
+ pitlbe %r0(%sr1, %r28)
pitlbe,m %arg1(%sr1, %r28) /* Last pitlbe and addr adjust */
addib,COND(>) -1, %r29, fitmanymiddle /* Middle loop decr */
copy %arg3, %r31 /* Re-init inner loop count */
@@ -139,7 +139,7 @@ fdtmanyloop: /* Loop if LOOP >= 2 */
fdtmanymiddle: /* Loop if LOOP >= 2 */
addib,COND(>) -1, %r31, fdtmanymiddle /* Adjusted inner loop decr */
- pdtlbe 0(%sr1, %r28)
+ pdtlbe %r0(%sr1, %r28)
pdtlbe,m %arg1(%sr1, %r28) /* Last pdtlbe and addr adjust */
addib,COND(>) -1, %r29, fdtmanymiddle /* Middle loop decr */
copy %arg3, %r31 /* Re-init inner loop count */
@@ -626,12 +626,12 @@ ENTRY_CFI(copy_user_page_asm)
/* Purge any old translations */
#ifdef CONFIG_PA20
- pdtlb,l 0(%r28)
- pdtlb,l 0(%r29)
+ pdtlb,l %r0(%r28)
+ pdtlb,l %r0(%r29)
#else
tlb_lock %r20,%r21,%r22
- pdtlb 0(%r28)
- pdtlb 0(%r29)
+ pdtlb %r0(%r28)
+ pdtlb %r0(%r29)
tlb_unlock %r20,%r21,%r22
#endif
@@ -774,10 +774,10 @@ ENTRY_CFI(clear_user_page_asm)
/* Purge any old translation */
#ifdef CONFIG_PA20
- pdtlb,l 0(%r28)
+ pdtlb,l %r0(%r28)
#else
tlb_lock %r20,%r21,%r22
- pdtlb 0(%r28)
+ pdtlb %r0(%r28)
tlb_unlock %r20,%r21,%r22
#endif
@@ -858,10 +858,10 @@ ENTRY_CFI(flush_dcache_page_asm)
/* Purge any old translation */
#ifdef CONFIG_PA20
- pdtlb,l 0(%r28)
+ pdtlb,l %r0(%r28)
#else
tlb_lock %r20,%r21,%r22
- pdtlb 0(%r28)
+ pdtlb %r0(%r28)
tlb_unlock %r20,%r21,%r22
#endif
@@ -898,10 +898,10 @@ ENTRY_CFI(flush_dcache_page_asm)
sync
#ifdef CONFIG_PA20
- pdtlb,l 0(%r25)
+ pdtlb,l %r0(%r25)
#else
tlb_lock %r20,%r21,%r22
- pdtlb 0(%r25)
+ pdtlb %r0(%r25)
tlb_unlock %r20,%r21,%r22
#endif
@@ -931,13 +931,18 @@ ENTRY_CFI(flush_icache_page_asm)
depwi 0, 31,PAGE_SHIFT, %r28 /* Clear any offset bits */
#endif
- /* Purge any old translation */
+ /* Purge any old translation. Note that the FIC instruction
+ * may use either the instruction or data TLB. Given that we
+ * have a flat address space, it's not clear which TLB will be
+ * used. So, we purge both entries. */
#ifdef CONFIG_PA20
+ pdtlb,l %r0(%r28)
pitlb,l %r0(%sr4,%r28)
#else
tlb_lock %r20,%r21,%r22
- pitlb (%sr4,%r28)
+ pdtlb %r0(%r28)
+ pitlb %r0(%sr4,%r28)
tlb_unlock %r20,%r21,%r22
#endif
@@ -976,10 +981,12 @@ ENTRY_CFI(flush_icache_page_asm)
sync
#ifdef CONFIG_PA20
+ pdtlb,l %r0(%r28)
pitlb,l %r0(%sr4,%r25)
#else
tlb_lock %r20,%r21,%r22
- pitlb (%sr4,%r25)
+ pdtlb %r0(%r28)
+ pitlb %r0(%sr4,%r25)
tlb_unlock %r20,%r21,%r22
#endif
diff --git a/arch/parisc/kernel/pci-dma.c b/arch/parisc/kernel/pci-dma.c
index 02d9ed0f3949..494ff6e8c88a 100644
--- a/arch/parisc/kernel/pci-dma.c
+++ b/arch/parisc/kernel/pci-dma.c
@@ -95,8 +95,8 @@ static inline int map_pte_uncached(pte_t * pte,
if (!pte_none(*pte))
printk(KERN_ERR "map_pte_uncached: page already exists\n");
- set_pte(pte, __mk_pte(*paddr_ptr, PAGE_KERNEL_UNC));
purge_tlb_start(flags);
+ set_pte(pte, __mk_pte(*paddr_ptr, PAGE_KERNEL_UNC));
pdtlb_kernel(orig_vaddr);
purge_tlb_end(flags);
vaddr += PAGE_SIZE;
diff --git a/arch/parisc/kernel/setup.c b/arch/parisc/kernel/setup.c
index 81d6f6391944..2e66a887788e 100644
--- a/arch/parisc/kernel/setup.c
+++ b/arch/parisc/kernel/setup.c
@@ -334,6 +334,10 @@ static int __init parisc_init(void)
/* tell PDC we're Linux. Nevermind failure. */
pdc_stable_write(0x40, &osid, sizeof(osid));
+ /* start with known state */
+ flush_cache_all_local();
+ flush_tlb_all_local(NULL);
+
processor_init();
#ifdef CONFIG_SMP
pr_info("CPU(s): %d out of %d %s at %d.%06d MHz online\n",
diff --git a/arch/parisc/kernel/syscall.S b/arch/parisc/kernel/syscall.S
index d03422e5f188..23de307c3052 100644
--- a/arch/parisc/kernel/syscall.S
+++ b/arch/parisc/kernel/syscall.S
@@ -100,14 +100,12 @@ set_thread_pointer:
.endr
/* This address must remain fixed at 0x100 for glibc's syscalls to work */
- .align 256
+ .align LINUX_GATEWAY_ADDR
linux_gateway_entry:
gate .+8, %r0 /* become privileged */
mtsp %r0,%sr4 /* get kernel space into sr4 */
mtsp %r0,%sr5 /* get kernel space into sr5 */
mtsp %r0,%sr6 /* get kernel space into sr6 */
- mfsp %sr7,%r1 /* save user sr7 */
- mtsp %r1,%sr3 /* and store it in sr3 */
#ifdef CONFIG_64BIT
/* for now we can *always* set the W bit on entry to the syscall
@@ -133,6 +131,14 @@ linux_gateway_entry:
depdi 0, 31, 32, %r21
1:
#endif
+
+ /* We use a rsm/ssm pair to prevent sr3 from being clobbered
+ * by external interrupts.
+ */
+ mfsp %sr7,%r1 /* save user sr7 */
+ rsm PSW_SM_I, %r0 /* disable interrupts */
+ mtsp %r1,%sr3 /* and store it in sr3 */
+
mfctl %cr30,%r1
xor %r1,%r30,%r30 /* ye olde xor trick */
xor %r1,%r30,%r1
@@ -147,6 +153,7 @@ linux_gateway_entry:
*/
mtsp %r0,%sr7 /* get kernel space into sr7 */
+ ssm PSW_SM_I, %r0 /* enable interrupts */
STREGM %r1,FRAME_SIZE(%r30) /* save r1 (usp) here for now */
mfctl %cr30,%r1 /* get task ptr in %r1 */
LDREG TI_TASK(%r1),%r1
@@ -474,11 +481,6 @@ lws_start:
comiclr,>> __NR_lws_entries, %r20, %r0
b,n lws_exit_nosys
- /* WARNING: Trashing sr2 and sr3 */
- mfsp %sr7,%r1 /* get userspace into sr3 */
- mtsp %r1,%sr3
- mtsp %r0,%sr2 /* get kernel space into sr2 */
-
/* Load table start */
ldil L%lws_table, %r1
ldo R%lws_table(%r1), %r28 /* Scratch use of r28 */
@@ -627,9 +629,9 @@ cas_action:
stw %r1, 4(%sr2,%r20)
#endif
/* The load and store could fail */
-1: ldw,ma 0(%sr3,%r26), %r28
+1: ldw,ma 0(%r26), %r28
sub,<> %r28, %r25, %r0
-2: stw,ma %r24, 0(%sr3,%r26)
+2: stw,ma %r24, 0(%r26)
/* Free lock */
stw,ma %r20, 0(%sr2,%r20)
#if ENABLE_LWS_DEBUG
@@ -706,9 +708,9 @@ lws_compare_and_swap_2:
nop
/* 8bit load */
-4: ldb 0(%sr3,%r25), %r25
+4: ldb 0(%r25), %r25
b cas2_lock_start
-5: ldb 0(%sr3,%r24), %r24
+5: ldb 0(%r24), %r24
nop
nop
nop
@@ -716,9 +718,9 @@ lws_compare_and_swap_2:
nop
/* 16bit load */
-6: ldh 0(%sr3,%r25), %r25
+6: ldh 0(%r25), %r25
b cas2_lock_start
-7: ldh 0(%sr3,%r24), %r24
+7: ldh 0(%r24), %r24
nop
nop
nop
@@ -726,9 +728,9 @@ lws_compare_and_swap_2:
nop
/* 32bit load */
-8: ldw 0(%sr3,%r25), %r25
+8: ldw 0(%r25), %r25
b cas2_lock_start
-9: ldw 0(%sr3,%r24), %r24
+9: ldw 0(%r24), %r24
nop
nop
nop
@@ -737,14 +739,14 @@ lws_compare_and_swap_2:
/* 64bit load */
#ifdef CONFIG_64BIT
-10: ldd 0(%sr3,%r25), %r25
-11: ldd 0(%sr3,%r24), %r24
+10: ldd 0(%r25), %r25
+11: ldd 0(%r24), %r24
#else
/* Load new value into r22/r23 - high/low */
-10: ldw 0(%sr3,%r25), %r22
-11: ldw 4(%sr3,%r25), %r23
+10: ldw 0(%r25), %r22
+11: ldw 4(%r25), %r23
/* Load new value into fr4 for atomic store later */
-12: flddx 0(%sr3,%r24), %fr4
+12: flddx 0(%r24), %fr4
#endif
cas2_lock_start:
@@ -794,30 +796,30 @@ cas2_action:
ldo 1(%r0),%r28
/* 8bit CAS */
-13: ldb,ma 0(%sr3,%r26), %r29
+13: ldb,ma 0(%r26), %r29
sub,= %r29, %r25, %r0
b,n cas2_end
-14: stb,ma %r24, 0(%sr3,%r26)
+14: stb,ma %r24, 0(%r26)
b cas2_end
copy %r0, %r28
nop
nop
/* 16bit CAS */
-15: ldh,ma 0(%sr3,%r26), %r29
+15: ldh,ma 0(%r26), %r29
sub,= %r29, %r25, %r0
b,n cas2_end
-16: sth,ma %r24, 0(%sr3,%r26)
+16: sth,ma %r24, 0(%r26)
b cas2_end
copy %r0, %r28
nop
nop
/* 32bit CAS */
-17: ldw,ma 0(%sr3,%r26), %r29
+17: ldw,ma 0(%r26), %r29
sub,= %r29, %r25, %r0
b,n cas2_end
-18: stw,ma %r24, 0(%sr3,%r26)
+18: stw,ma %r24, 0(%r26)
b cas2_end
copy %r0, %r28
nop
@@ -825,22 +827,22 @@ cas2_action:
/* 64bit CAS */
#ifdef CONFIG_64BIT
-19: ldd,ma 0(%sr3,%r26), %r29
+19: ldd,ma 0(%r26), %r29
sub,*= %r29, %r25, %r0
b,n cas2_end
-20: std,ma %r24, 0(%sr3,%r26)
+20: std,ma %r24, 0(%r26)
copy %r0, %r28
#else
/* Compare first word */
-19: ldw,ma 0(%sr3,%r26), %r29
+19: ldw,ma 0(%r26), %r29
sub,= %r29, %r22, %r0
b,n cas2_end
/* Compare second word */
-20: ldw,ma 4(%sr3,%r26), %r29
+20: ldw,ma 4(%r26), %r29
sub,= %r29, %r23, %r0
b,n cas2_end
/* Perform the store */
-21: fstdx %fr4, 0(%sr3,%r26)
+21: fstdx %fr4, 0(%r26)
copy %r0, %r28
#endif
diff --git a/arch/parisc/kernel/time.c b/arch/parisc/kernel/time.c
index 9b63b876a13a..325f30d82b64 100644
--- a/arch/parisc/kernel/time.c
+++ b/arch/parisc/kernel/time.c
@@ -14,6 +14,7 @@
#include <linux/module.h>
#include <linux/rtc.h>
#include <linux/sched.h>
+#include <linux/sched_clock.h>
#include <linux/kernel.h>
#include <linux/param.h>
#include <linux/string.h>
@@ -39,18 +40,6 @@
static unsigned long clocktick __read_mostly; /* timer cycles per tick */
-#ifndef CONFIG_64BIT
-/*
- * The processor-internal cycle counter (Control Register 16) is used as time
- * source for the sched_clock() function. This register is 64bit wide on a
- * 64-bit kernel and 32bit on a 32-bit kernel. Since sched_clock() always
- * requires a 64bit counter we emulate on the 32-bit kernel the higher 32bits
- * with a per-cpu variable which we increase every time the counter
- * wraps-around (which happens every ~4 secounds).
- */
-static DEFINE_PER_CPU(unsigned long, cr16_high_32_bits);
-#endif
-
/*
* We keep time on PA-RISC Linux by using the Interval Timer which is
* a pair of registers; one is read-only and one is write-only; both
@@ -121,12 +110,6 @@ irqreturn_t __irq_entry timer_interrupt(int irq, void *dev_id)
*/
mtctl(next_tick, 16);
-#if !defined(CONFIG_64BIT)
- /* check for overflow on a 32bit kernel (every ~4 seconds). */
- if (unlikely(next_tick < now))
- this_cpu_inc(cr16_high_32_bits);
-#endif
-
/* Skip one clocktick on purpose if we missed next_tick.
* The new CR16 must be "later" than current CR16 otherwise
* itimer would not fire until CR16 wrapped - e.g 4 seconds
@@ -208,7 +191,7 @@ EXPORT_SYMBOL(profile_pc);
/* clock source code */
-static cycle_t read_cr16(struct clocksource *cs)
+static cycle_t notrace read_cr16(struct clocksource *cs)
{
return get_cycles();
}
@@ -287,26 +270,9 @@ void read_persistent_clock(struct timespec *ts)
}
-/*
- * sched_clock() framework
- */
-
-static u32 cyc2ns_mul __read_mostly;
-static u32 cyc2ns_shift __read_mostly;
-
-u64 sched_clock(void)
+static u64 notrace read_cr16_sched_clock(void)
{
- u64 now;
-
- /* Get current cycle counter (Control Register 16). */
-#ifdef CONFIG_64BIT
- now = mfctl(16);
-#else
- now = mfctl(16) + (((u64) this_cpu_read(cr16_high_32_bits)) << 32);
-#endif
-
- /* return the value in ns (cycles_2_ns) */
- return mul_u64_u32_shr(now, cyc2ns_mul, cyc2ns_shift);
+ return get_cycles();
}
@@ -316,17 +282,16 @@ u64 sched_clock(void)
void __init time_init(void)
{
- unsigned long current_cr16_khz;
+ unsigned long cr16_hz;
- current_cr16_khz = PAGE0->mem_10msec/10; /* kHz */
clocktick = (100 * PAGE0->mem_10msec) / HZ;
-
- /* calculate mult/shift values for cr16 */
- clocks_calc_mult_shift(&cyc2ns_mul, &cyc2ns_shift, current_cr16_khz,
- NSEC_PER_MSEC, 0);
-
start_cpu_itimer(); /* get CPU 0 started */
+ cr16_hz = 100 * PAGE0->mem_10msec; /* Hz */
+
/* register at clocksource framework */
- clocksource_register_khz(&clocksource_cr16, current_cr16_khz);
+ clocksource_register_hz(&clocksource_cr16, cr16_hz);
+
+ /* register as sched_clock source */
+ sched_clock_register(read_cr16_sched_clock, BITS_PER_LONG, cr16_hz);
}
diff --git a/arch/powerpc/boot/main.c b/arch/powerpc/boot/main.c
index 57d42d129033..78aaf4ffd7ab 100644
--- a/arch/powerpc/boot/main.c
+++ b/arch/powerpc/boot/main.c
@@ -232,8 +232,12 @@ void start(void)
console_ops.close();
kentry = (kernel_entry_t) vmlinux.addr;
- if (ft_addr)
- kentry(ft_addr, 0, NULL);
+ if (ft_addr) {
+ if(platform_ops.kentry)
+ platform_ops.kentry(ft_addr, vmlinux.addr);
+ else
+ kentry(ft_addr, 0, NULL);
+ }
else
kentry((unsigned long)initrd.addr, initrd.size,
loader_info.promptr);
diff --git a/arch/powerpc/boot/opal-calls.S b/arch/powerpc/boot/opal-calls.S
index ff2f1b97bc53..2a99fc9a3ccf 100644
--- a/arch/powerpc/boot/opal-calls.S
+++ b/arch/powerpc/boot/opal-calls.S
@@ -12,6 +12,19 @@
.text
+ .globl opal_kentry
+opal_kentry:
+ /* r3 is the fdt ptr */
+ mtctr r4
+ li r4, 0
+ li r5, 0
+ li r6, 0
+ li r7, 0
+ ld r11,opal@got(r2)
+ ld r8,0(r11)
+ ld r9,8(r11)
+ bctr
+
#define OPAL_CALL(name, token) \
.globl name; \
name: \
diff --git a/arch/powerpc/boot/opal.c b/arch/powerpc/boot/opal.c
index 1f37e1c1d6d8..d7b4fd47eb44 100644
--- a/arch/powerpc/boot/opal.c
+++ b/arch/powerpc/boot/opal.c
@@ -23,14 +23,25 @@ struct opal {
static u32 opal_con_id;
+/* see opal-wrappers.S */
int64_t opal_console_write(int64_t term_number, u64 *length, const u8 *buffer);
int64_t opal_console_read(int64_t term_number, uint64_t *length, u8 *buffer);
int64_t opal_console_write_buffer_space(uint64_t term_number, uint64_t *length);
int64_t opal_console_flush(uint64_t term_number);
int64_t opal_poll_events(uint64_t *outstanding_event_mask);
+void opal_kentry(unsigned long fdt_addr, void *vmlinux_addr);
+
static int opal_con_open(void)
{
+ /*
+ * When OPAL loads the boot kernel it stashes the OPAL base and entry
+ * address in r8 and r9 so the kernel can use the OPAL console
+ * before unflattening the devicetree. While executing the wrapper will
+ * probably trash r8 and r9 so this kentry hook restores them before
+ * entering the decompressed kernel.
+ */
+ platform_ops.kentry = opal_kentry;
return 0;
}
diff --git a/arch/powerpc/boot/ops.h b/arch/powerpc/boot/ops.h
index 309d1b127e96..fad1862f4b2d 100644
--- a/arch/powerpc/boot/ops.h
+++ b/arch/powerpc/boot/ops.h
@@ -30,6 +30,7 @@ struct platform_ops {
void * (*realloc)(void *ptr, unsigned long size);
void (*exit)(void);
void * (*vmlinux_alloc)(unsigned long size);
+ void (*kentry)(unsigned long fdt_addr, void *vmlinux_addr);
};
extern struct platform_ops platform_ops;
diff --git a/arch/powerpc/include/asm/asm-prototypes.h b/arch/powerpc/include/asm/asm-prototypes.h
index d1492736d852..e0baba1535e6 100644
--- a/arch/powerpc/include/asm/asm-prototypes.h
+++ b/arch/powerpc/include/asm/asm-prototypes.h
@@ -14,6 +14,10 @@
#include <linux/threads.h>
#include <linux/kprobes.h>
+#include <asm/cacheflush.h>
+#include <asm/checksum.h>
+#include <asm/uaccess.h>
+#include <asm/epapr_hcalls.h>
#include <uapi/asm/ucontext.h>
@@ -109,4 +113,12 @@ void early_setup_secondary(void);
/* time */
void accumulate_stolen_time(void);
+/* misc runtime */
+extern u64 __bswapdi2(u64);
+extern s64 __lshrdi3(s64, int);
+extern s64 __ashldi3(s64, int);
+extern s64 __ashrdi3(s64, int);
+extern int __cmpdi2(s64, s64);
+extern int __ucmpdi2(u64, u64);
+
#endif /* _ASM_POWERPC_ASM_PROTOTYPES_H */
diff --git a/arch/powerpc/include/asm/checksum.h b/arch/powerpc/include/asm/checksum.h
index ee655ed1ff1b..1e8fceb308a5 100644
--- a/arch/powerpc/include/asm/checksum.h
+++ b/arch/powerpc/include/asm/checksum.h
@@ -53,10 +53,8 @@ static inline __sum16 csum_fold(__wsum sum)
return (__force __sum16)(~((__force u32)sum + tmp) >> 16);
}
-static inline __wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr,
- unsigned short len,
- unsigned short proto,
- __wsum sum)
+static inline __wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr, __u32 len,
+ __u8 proto, __wsum sum)
{
#ifdef __powerpc64__
unsigned long s = (__force u32)sum;
@@ -83,10 +81,8 @@ static inline __wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr,
* computes the checksum of the TCP/UDP pseudo-header
* returns a 16-bit checksum, already complemented
*/
-static inline __sum16 csum_tcpudp_magic(__be32 saddr, __be32 daddr,
- unsigned short len,
- unsigned short proto,
- __wsum sum)
+static inline __sum16 csum_tcpudp_magic(__be32 saddr, __be32 daddr, __u32 len,
+ __u8 proto, __wsum sum)
{
return csum_fold(csum_tcpudp_nofold(saddr, daddr, len, proto, sum));
}
diff --git a/arch/powerpc/include/asm/exception-64s.h b/arch/powerpc/include/asm/exception-64s.h
index 84d49b197c32..9a3eee661297 100644
--- a/arch/powerpc/include/asm/exception-64s.h
+++ b/arch/powerpc/include/asm/exception-64s.h
@@ -91,7 +91,7 @@
*/
#define LOAD_HANDLER(reg, label) \
ld reg,PACAKBASE(r13); /* get high part of &label */ \
- ori reg,reg,(FIXED_SYMBOL_ABS_ADDR(label))@l;
+ ori reg,reg,FIXED_SYMBOL_ABS_ADDR(label);
#define __LOAD_HANDLER(reg, label) \
ld reg,PACAKBASE(r13); \
@@ -158,14 +158,17 @@ BEGIN_FTR_SECTION_NESTED(943) \
std ra,offset(r13); \
END_FTR_SECTION_NESTED(ftr,ftr,943)
-#define EXCEPTION_PROLOG_0(area) \
- GET_PACA(r13); \
+#define EXCEPTION_PROLOG_0_PACA(area) \
std r9,area+EX_R9(r13); /* save r9 */ \
OPT_GET_SPR(r9, SPRN_PPR, CPU_FTR_HAS_PPR); \
HMT_MEDIUM; \
std r10,area+EX_R10(r13); /* save r10 - r12 */ \
OPT_GET_SPR(r10, SPRN_CFAR, CPU_FTR_CFAR)
+#define EXCEPTION_PROLOG_0(area) \
+ GET_PACA(r13); \
+ EXCEPTION_PROLOG_0_PACA(area)
+
#define __EXCEPTION_PROLOG_1(area, extra, vec) \
OPT_SAVE_REG_TO_PACA(area+EX_PPR, r9, CPU_FTR_HAS_PPR); \
OPT_SAVE_REG_TO_PACA(area+EX_CFAR, r10, CPU_FTR_CFAR); \
@@ -196,6 +199,12 @@ END_FTR_SECTION_NESTED(ftr,ftr,943)
EXCEPTION_PROLOG_1(area, extra, vec); \
EXCEPTION_PROLOG_PSERIES_1(label, h);
+/* Have the PACA in r13 already */
+#define EXCEPTION_PROLOG_PSERIES_PACA(area, label, h, extra, vec) \
+ EXCEPTION_PROLOG_0_PACA(area); \
+ EXCEPTION_PROLOG_1(area, extra, vec); \
+ EXCEPTION_PROLOG_PSERIES_1(label, h);
+
#define __KVMTEST(h, n) \
lbz r10,HSTATE_IN_GUEST(r13); \
cmpwi r10,0; \
diff --git a/arch/powerpc/include/asm/mmu.h b/arch/powerpc/include/asm/mmu.h
index e88368354e49..e311c25751a4 100644
--- a/arch/powerpc/include/asm/mmu.h
+++ b/arch/powerpc/include/asm/mmu.h
@@ -29,6 +29,12 @@
*/
/*
+ * Kernel read only support.
+ * We added the ppp value 0b110 in ISA 2.04.
+ */
+#define MMU_FTR_KERNEL_RO ASM_CONST(0x00004000)
+
+/*
* We need to clear top 16bits of va (from the remaining 64 bits )in
* tlbie* instructions
*/
@@ -103,10 +109,10 @@
#define MMU_FTRS_POWER4 MMU_FTRS_DEFAULT_HPTE_ARCH_V2
#define MMU_FTRS_PPC970 MMU_FTRS_POWER4 | MMU_FTR_TLBIE_CROP_VA
#define MMU_FTRS_POWER5 MMU_FTRS_POWER4 | MMU_FTR_LOCKLESS_TLBIE
-#define MMU_FTRS_POWER6 MMU_FTRS_POWER4 | MMU_FTR_LOCKLESS_TLBIE
-#define MMU_FTRS_POWER7 MMU_FTRS_POWER4 | MMU_FTR_LOCKLESS_TLBIE
-#define MMU_FTRS_POWER8 MMU_FTRS_POWER4 | MMU_FTR_LOCKLESS_TLBIE
-#define MMU_FTRS_POWER9 MMU_FTRS_POWER4 | MMU_FTR_LOCKLESS_TLBIE
+#define MMU_FTRS_POWER6 MMU_FTRS_POWER4 | MMU_FTR_LOCKLESS_TLBIE | MMU_FTR_KERNEL_RO
+#define MMU_FTRS_POWER7 MMU_FTRS_POWER4 | MMU_FTR_LOCKLESS_TLBIE | MMU_FTR_KERNEL_RO
+#define MMU_FTRS_POWER8 MMU_FTRS_POWER4 | MMU_FTR_LOCKLESS_TLBIE | MMU_FTR_KERNEL_RO
+#define MMU_FTRS_POWER9 MMU_FTRS_POWER4 | MMU_FTR_LOCKLESS_TLBIE | MMU_FTR_KERNEL_RO
#define MMU_FTRS_CELL MMU_FTRS_DEFAULT_HPTE_ARCH_V2 | \
MMU_FTR_CI_LARGE_PAGE
#define MMU_FTRS_PA6T MMU_FTRS_DEFAULT_HPTE_ARCH_V2 | \
diff --git a/arch/powerpc/include/asm/ppc-opcode.h b/arch/powerpc/include/asm/ppc-opcode.h
index 0132831b3081..c56ea8c84abb 100644
--- a/arch/powerpc/include/asm/ppc-opcode.h
+++ b/arch/powerpc/include/asm/ppc-opcode.h
@@ -460,5 +460,6 @@
#define PPC_SLBIA(IH) stringify_in_c(.long PPC_INST_SLBIA | \
((IH & 0x7) << 21))
+#define PPC_INVALIDATE_ERAT PPC_SLBIA(7)
#endif /* _ASM_POWERPC_PPC_OPCODE_H */
diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
index 9cd4e8cbc78c..9e1499f98def 100644
--- a/arch/powerpc/include/asm/reg.h
+++ b/arch/powerpc/include/asm/reg.h
@@ -355,6 +355,7 @@
#define LPCR_PECE0 ASM_CONST(0x0000000000004000) /* ext. exceptions can cause exit */
#define LPCR_PECE1 ASM_CONST(0x0000000000002000) /* decrementer can cause exit */
#define LPCR_PECE2 ASM_CONST(0x0000000000001000) /* machine check etc can cause exit */
+#define LPCR_PECE_HVEE ASM_CONST(0x0000400000000000) /* P9 Wakeup on HV interrupts */
#define LPCR_MER ASM_CONST(0x0000000000000800) /* Mediated External Exception */
#define LPCR_MER_SH 11
#define LPCR_TC ASM_CONST(0x0000000000000200) /* Translation control */
diff --git a/arch/powerpc/kernel/cpu_setup_power.S b/arch/powerpc/kernel/cpu_setup_power.S
index 52ff3f025437..37c027ca83b2 100644
--- a/arch/powerpc/kernel/cpu_setup_power.S
+++ b/arch/powerpc/kernel/cpu_setup_power.S
@@ -98,8 +98,8 @@ _GLOBAL(__setup_cpu_power9)
li r0,0
mtspr SPRN_LPID,r0
mfspr r3,SPRN_LPCR
- ori r3, r3, LPCR_PECEDH
- ori r3, r3, LPCR_HVICE
+ LOAD_REG_IMMEDIATE(r4, LPCR_PECEDH | LPCR_PECE_HVEE | LPCR_HVICE)
+ or r3, r3, r4
bl __init_LPCR
bl __init_HFSCR
bl __init_tlb_power9
@@ -118,8 +118,8 @@ _GLOBAL(__restore_cpu_power9)
li r0,0
mtspr SPRN_LPID,r0
mfspr r3,SPRN_LPCR
- ori r3, r3, LPCR_PECEDH
- ori r3, r3, LPCR_HVICE
+ LOAD_REG_IMMEDIATE(r4, LPCR_PECEDH | LPCR_PECE_HVEE | LPCR_HVICE)
+ or r3, r3, r4
bl __init_LPCR
bl __init_HFSCR
bl __init_tlb_power9
diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
index 08ba447a4b3d..1ba82ea90230 100644
--- a/arch/powerpc/kernel/exceptions-64s.S
+++ b/arch/powerpc/kernel/exceptions-64s.S
@@ -116,7 +116,9 @@ EXC_VIRT_NONE(0x4000, 0x4100)
EXC_REAL_BEGIN(system_reset, 0x100, 0x200)
SET_SCRATCH0(r13)
- EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, system_reset_common, EXC_STD,
+ GET_PACA(r13)
+ clrrdi r13,r13,1 /* Last bit of HSPRG0 is set if waking from winkle */
+ EXCEPTION_PROLOG_PSERIES_PACA(PACA_EXGEN, system_reset_common, EXC_STD,
IDLETEST, 0x100)
EXC_REAL_END(system_reset, 0x100, 0x200)
@@ -124,6 +126,9 @@ EXC_VIRT_NONE(0x4100, 0x4200)
#ifdef CONFIG_PPC_P7_NAP
EXC_COMMON_BEGIN(system_reset_idle_common)
+BEGIN_FTR_SECTION
+ GET_PACA(r13) /* Restore HSPRG0 to get the winkle bit in r13 */
+END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300)
bl pnv_restore_hyp_resource
li r0,PNV_THREAD_RUNNING
@@ -169,7 +174,7 @@ EXC_REAL_BEGIN(machine_check, 0x200, 0x300)
SET_SCRATCH0(r13) /* save r13 */
/*
* Running native on arch 2.06 or later, we may wakeup from winkle
- * inside machine check. If yes, then last bit of HSPGR0 would be set
+ * inside machine check. If yes, then last bit of HSPRG0 would be set
* to 1. Hence clear it unconditionally.
*/
GET_PACA(r13)
@@ -388,7 +393,7 @@ EXC_COMMON_BEGIN(machine_check_handle_early)
/*
* Go back to winkle. Please note that this thread was woken up in
* machine check from winkle and have not restored the per-subcore
- * state. Hence before going back to winkle, set last bit of HSPGR0
+ * state. Hence before going back to winkle, set last bit of HSPRG0
* to 1. This will make sure that if this thread gets woken up
* again at reset vector 0x100 then it will get chance to restore
* the subcore state.
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
index ce6dc61b15b2..49a680d5ae37 100644
--- a/arch/powerpc/kernel/process.c
+++ b/arch/powerpc/kernel/process.c
@@ -1215,7 +1215,7 @@ static void show_instructions(struct pt_regs *regs)
int instr;
if (!(i % 8))
- printk("\n");
+ pr_cont("\n");
#if !defined(CONFIG_BOOKE)
/* If executing with the IMMU off, adjust pc rather
@@ -1227,18 +1227,18 @@ static void show_instructions(struct pt_regs *regs)
if (!__kernel_text_address(pc) ||
probe_kernel_address((unsigned int __user *)pc, instr)) {
- printk(KERN_CONT "XXXXXXXX ");
+ pr_cont("XXXXXXXX ");
} else {
if (regs->nip == pc)
- printk(KERN_CONT "<%08x> ", instr);
+ pr_cont("<%08x> ", instr);
else
- printk(KERN_CONT "%08x ", instr);
+ pr_cont("%08x ", instr);
}
pc += sizeof(int);
}
- printk("\n");
+ pr_cont("\n");
}
struct regbit {
@@ -1282,7 +1282,7 @@ static void print_bits(unsigned long val, struct regbit *bits, const char *sep)
for (; bits->bit; ++bits)
if (val & bits->bit) {
- printk("%s%s", s, bits->name);
+ pr_cont("%s%s", s, bits->name);
s = sep;
}
}
@@ -1305,9 +1305,9 @@ static void print_tm_bits(unsigned long val)
* T: Transactional (bit 34)
*/
if (val & (MSR_TM | MSR_TS_S | MSR_TS_T)) {
- printk(",TM[");
+ pr_cont(",TM[");
print_bits(val, msr_tm_bits, "");
- printk("]");
+ pr_cont("]");
}
}
#else
@@ -1316,10 +1316,10 @@ static void print_tm_bits(unsigned long val) {}
static void print_msr_bits(unsigned long val)
{
- printk("<");
+ pr_cont("<");
print_bits(val, msr_bits, ",");
print_tm_bits(val);
- printk(">");
+ pr_cont(">");
}
#ifdef CONFIG_PPC64
@@ -1347,29 +1347,29 @@ void show_regs(struct pt_regs * regs)
printk(" CR: %08lx XER: %08lx\n", regs->ccr, regs->xer);
trap = TRAP(regs);
if ((regs->trap != 0xc00) && cpu_has_feature(CPU_FTR_CFAR))
- printk("CFAR: "REG" ", regs->orig_gpr3);
+ pr_cont("CFAR: "REG" ", regs->orig_gpr3);
if (trap == 0x200 || trap == 0x300 || trap == 0x600)
#if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
- printk("DEAR: "REG" ESR: "REG" ", regs->dar, regs->dsisr);
+ pr_cont("DEAR: "REG" ESR: "REG" ", regs->dar, regs->dsisr);
#else
- printk("DAR: "REG" DSISR: %08lx ", regs->dar, regs->dsisr);
+ pr_cont("DAR: "REG" DSISR: %08lx ", regs->dar, regs->dsisr);
#endif
#ifdef CONFIG_PPC64
- printk("SOFTE: %ld ", regs->softe);
+ pr_cont("SOFTE: %ld ", regs->softe);
#endif
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
if (MSR_TM_ACTIVE(regs->msr))
- printk("\nPACATMSCRATCH: %016llx ", get_paca()->tm_scratch);
+ pr_cont("\nPACATMSCRATCH: %016llx ", get_paca()->tm_scratch);
#endif
for (i = 0; i < 32; i++) {
if ((i % REGS_PER_LINE) == 0)
- printk("\nGPR%02d: ", i);
- printk(REG " ", regs->gpr[i]);
+ pr_cont("\nGPR%02d: ", i);
+ pr_cont(REG " ", regs->gpr[i]);
if (i == LAST_VOLATILE && !FULL_REGS(regs))
break;
}
- printk("\n");
+ pr_cont("\n");
#ifdef CONFIG_KALLSYMS
/*
* Lookup NIP late so we have the best change of getting the
@@ -1900,14 +1900,14 @@ void show_stack(struct task_struct *tsk, unsigned long *stack)
printk("["REG"] ["REG"] %pS", sp, ip, (void *)ip);
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
if ((ip == rth) && curr_frame >= 0) {
- printk(" (%pS)",
+ pr_cont(" (%pS)",
(void *)current->ret_stack[curr_frame].ret);
curr_frame--;
}
#endif
if (firstframe)
- printk(" (unreliable)");
- printk("\n");
+ pr_cont(" (unreliable)");
+ pr_cont("\n");
}
firstframe = 0;
diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c
index 7ac8e6eaab5b..8d586cff8a41 100644
--- a/arch/powerpc/kernel/setup_64.c
+++ b/arch/powerpc/kernel/setup_64.c
@@ -226,17 +226,25 @@ static void __init configure_exceptions(void)
if (firmware_has_feature(FW_FEATURE_OPAL))
opal_configure_cores();
- /* Enable AIL if supported, and we are in hypervisor mode */
- if (early_cpu_has_feature(CPU_FTR_HVMODE) &&
- early_cpu_has_feature(CPU_FTR_ARCH_207S)) {
- unsigned long lpcr = mfspr(SPRN_LPCR);
- mtspr(SPRN_LPCR, lpcr | LPCR_AIL_3);
- }
+ /* AIL on native is done in cpu_ready_for_interrupts() */
}
}
static void cpu_ready_for_interrupts(void)
{
+ /*
+ * Enable AIL if supported, and we are in hypervisor mode. This
+ * is called once for every processor.
+ *
+ * If we are not in hypervisor mode the job is done once for
+ * the whole partition in configure_exceptions().
+ */
+ if (early_cpu_has_feature(CPU_FTR_HVMODE) &&
+ early_cpu_has_feature(CPU_FTR_ARCH_207S)) {
+ unsigned long lpcr = mfspr(SPRN_LPCR);
+ mtspr(SPRN_LPCR, lpcr | LPCR_AIL_3);
+ }
+
/* Set IR and DR in PACA MSR */
get_paca()->kernel_msr = MSR_KERNEL;
}
diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c
index 44d3c3a38e3e..78dabf065ba9 100644
--- a/arch/powerpc/mm/hash_utils_64.c
+++ b/arch/powerpc/mm/hash_utils_64.c
@@ -193,8 +193,12 @@ unsigned long htab_convert_pte_flags(unsigned long pteflags)
/*
* Kernel read only mapped with ppp bits 0b110
*/
- if (!(pteflags & _PAGE_WRITE))
- rflags |= (HPTE_R_PP0 | 0x2);
+ if (!(pteflags & _PAGE_WRITE)) {
+ if (mmu_has_feature(MMU_FTR_KERNEL_RO))
+ rflags |= (HPTE_R_PP0 | 0x2);
+ else
+ rflags |= 0x3;
+ }
} else {
if (pteflags & _PAGE_RWX)
rflags |= 0x2;
@@ -1029,6 +1033,10 @@ void hash__early_init_mmu_secondary(void)
{
/* Initialize hash table for that CPU */
if (!firmware_has_feature(FW_FEATURE_LPAR)) {
+
+ if (cpu_has_feature(CPU_FTR_POWER9_DD1))
+ update_hid_for_hash();
+
if (!cpu_has_feature(CPU_FTR_ARCH_300))
mtspr(SPRN_SDR1, _SDR1);
else
diff --git a/arch/powerpc/mm/pgtable-radix.c b/arch/powerpc/mm/pgtable-radix.c
index ed7bddc456b7..688b54517655 100644
--- a/arch/powerpc/mm/pgtable-radix.c
+++ b/arch/powerpc/mm/pgtable-radix.c
@@ -388,6 +388,10 @@ void radix__early_init_mmu_secondary(void)
* update partition table control register and UPRT
*/
if (!firmware_has_feature(FW_FEATURE_LPAR)) {
+
+ if (cpu_has_feature(CPU_FTR_POWER9_DD1))
+ update_hid_for_radix();
+
lpcr = mfspr(SPRN_LPCR);
mtspr(SPRN_LPCR, lpcr | LPCR_UPRT | LPCR_HR);
diff --git a/arch/powerpc/mm/tlb-radix.c b/arch/powerpc/mm/tlb-radix.c
index bda8c43be78a..3493cf4e0452 100644
--- a/arch/powerpc/mm/tlb-radix.c
+++ b/arch/powerpc/mm/tlb-radix.c
@@ -50,6 +50,8 @@ static inline void _tlbiel_pid(unsigned long pid, unsigned long ric)
for (set = 0; set < POWER9_TLB_SETS_RADIX ; set++) {
__tlbiel_pid(pid, set, ric);
}
+ if (cpu_has_feature(CPU_FTR_POWER9_DD1))
+ asm volatile(PPC_INVALIDATE_ERAT : : :"memory");
return;
}
@@ -83,6 +85,8 @@ static inline void _tlbiel_va(unsigned long va, unsigned long pid,
asm volatile(PPC_TLBIEL(%0, %4, %3, %2, %1)
: : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(rs) : "memory");
asm volatile("ptesync": : :"memory");
+ if (cpu_has_feature(CPU_FTR_POWER9_DD1))
+ asm volatile(PPC_INVALIDATE_ERAT : : :"memory");
}
static inline void _tlbie_va(unsigned long va, unsigned long pid,
diff --git a/arch/s390/hypfs/hypfs_diag.c b/arch/s390/hypfs/hypfs_diag.c
index 28f03ca60100..794bebb43d23 100644
--- a/arch/s390/hypfs/hypfs_diag.c
+++ b/arch/s390/hypfs/hypfs_diag.c
@@ -363,11 +363,11 @@ out:
static int diag224_get_name_table(void)
{
/* memory must be below 2GB */
- diag224_cpu_names = kmalloc(PAGE_SIZE, GFP_KERNEL | GFP_DMA);
+ diag224_cpu_names = (char *) __get_free_page(GFP_KERNEL | GFP_DMA);
if (!diag224_cpu_names)
return -ENOMEM;
if (diag224(diag224_cpu_names)) {
- kfree(diag224_cpu_names);
+ free_page((unsigned long) diag224_cpu_names);
return -EOPNOTSUPP;
}
EBCASC(diag224_cpu_names + 16, (*diag224_cpu_names + 1) * 16);
@@ -376,7 +376,7 @@ static int diag224_get_name_table(void)
static void diag224_delete_name_table(void)
{
- kfree(diag224_cpu_names);
+ free_page((unsigned long) diag224_cpu_names);
}
static int diag224_idx2name(int index, char *name)
diff --git a/arch/s390/kernel/vmlinux.lds.S b/arch/s390/kernel/vmlinux.lds.S
index 000e6e91f6a0..3667d20e997f 100644
--- a/arch/s390/kernel/vmlinux.lds.S
+++ b/arch/s390/kernel/vmlinux.lds.S
@@ -62,9 +62,11 @@ SECTIONS
. = ALIGN(PAGE_SIZE);
__start_ro_after_init = .;
+ __start_data_ro_after_init = .;
.data..ro_after_init : {
*(.data..ro_after_init)
}
+ __end_data_ro_after_init = .;
EXCEPTION_TABLE(16)
. = ALIGN(PAGE_SIZE);
__end_ro_after_init = .;
diff --git a/arch/s390/kvm/sthyi.c b/arch/s390/kvm/sthyi.c
index bd98b7d25200..05c98bb853cf 100644
--- a/arch/s390/kvm/sthyi.c
+++ b/arch/s390/kvm/sthyi.c
@@ -315,7 +315,7 @@ static void fill_diag(struct sthyi_sctns *sctns)
if (r < 0)
goto out;
- diag224_buf = kmalloc(PAGE_SIZE, GFP_KERNEL | GFP_DMA);
+ diag224_buf = (void *)__get_free_page(GFP_KERNEL | GFP_DMA);
if (!diag224_buf || diag224(diag224_buf))
goto out;
@@ -378,7 +378,7 @@ static void fill_diag(struct sthyi_sctns *sctns)
sctns->par.infpval1 |= PAR_WGHT_VLD;
out:
- kfree(diag224_buf);
+ free_page((unsigned long)diag224_buf);
vfree(diag204_buf);
}
diff --git a/arch/s390/pci/pci_dma.c b/arch/s390/pci/pci_dma.c
index 7350c8bc13a2..6b2f72f523b9 100644
--- a/arch/s390/pci/pci_dma.c
+++ b/arch/s390/pci/pci_dma.c
@@ -423,7 +423,7 @@ static int __s390_dma_map_sg(struct device *dev, struct scatterlist *sg,
dma_addr_t dma_addr_base, dma_addr;
int flags = ZPCI_PTE_VALID;
struct scatterlist *s;
- unsigned long pa;
+ unsigned long pa = 0;
int ret;
size = PAGE_ALIGN(size);
diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig
index b23c76b42d6e..165ecdd24d22 100644
--- a/arch/sparc/Kconfig
+++ b/arch/sparc/Kconfig
@@ -43,6 +43,7 @@ config SPARC
select ARCH_HAS_SG_CHAIN
select CPU_NO_EFFICIENT_FFS
select HAVE_ARCH_HARDENED_USERCOPY
+ select PROVE_LOCKING_SMALL if PROVE_LOCKING
config SPARC32
def_bool !64BIT
@@ -89,6 +90,14 @@ config ARCH_DEFCONFIG
config ARCH_PROC_KCORE_TEXT
def_bool y
+config ARCH_ATU
+ bool
+ default y if SPARC64
+
+config ARCH_DMA_ADDR_T_64BIT
+ bool
+ default y if ARCH_ATU
+
config IOMMU_HELPER
bool
default y if SPARC64
@@ -304,6 +313,20 @@ config ARCH_SPARSEMEM_ENABLE
config ARCH_SPARSEMEM_DEFAULT
def_bool y if SPARC64
+config FORCE_MAX_ZONEORDER
+ int "Maximum zone order"
+ default "13"
+ help
+ The kernel memory allocator divides physically contiguous memory
+ blocks into "zones", where each zone is a power of two number of
+ pages. This option selects the largest power of two that the kernel
+ keeps in the memory allocator. If you need to allocate very large
+ blocks of physically contiguous memory, then you may need to
+ increase this value.
+
+ This config option is actually maximum order plus one. For example,
+ a value of 13 means that the largest free memory block is 2^12 pages.
+
source "mm/Kconfig"
if SPARC64
diff --git a/arch/sparc/include/asm/cpudata_64.h b/arch/sparc/include/asm/cpudata_64.h
index a6cfdabb6054..5b0ed48e5b0c 100644
--- a/arch/sparc/include/asm/cpudata_64.h
+++ b/arch/sparc/include/asm/cpudata_64.h
@@ -24,9 +24,10 @@ typedef struct {
unsigned int icache_line_size;
unsigned int ecache_size;
unsigned int ecache_line_size;
- unsigned short sock_id;
+ unsigned short sock_id; /* physical package */
unsigned short core_id;
- int proc_id;
+ unsigned short max_cache_id; /* groupings of highest shared cache */
+ unsigned short proc_id; /* strand (aka HW thread) id */
} cpuinfo_sparc;
DECLARE_PER_CPU(cpuinfo_sparc, __cpu_data);
diff --git a/arch/sparc/include/asm/hypervisor.h b/arch/sparc/include/asm/hypervisor.h
index 666d5ba230d2..73cb8978df58 100644
--- a/arch/sparc/include/asm/hypervisor.h
+++ b/arch/sparc/include/asm/hypervisor.h
@@ -2335,6 +2335,348 @@ unsigned long sun4v_vintr_set_target(unsigned long dev_handle,
*/
#define HV_FAST_PCI_MSG_SETVALID 0xd3
+/* PCI IOMMU v2 definitions and services
+ *
+ * While the PCI IO definitions above is valid IOMMU v2 adds new PCI IO
+ * definitions and services.
+ *
+ * CTE Clump Table Entry. First level table entry in the ATU.
+ *
+ * pci_device_list
+ * A 32-bit aligned list of pci_devices.
+ *
+ * pci_device_listp
+ * real address of a pci_device_list. 32-bit aligned.
+ *
+ * iotte IOMMU translation table entry.
+ *
+ * iotte_attributes
+ * IO Attributes for IOMMU v2 mappings. In addition to
+ * read, write IOMMU v2 supports relax ordering
+ *
+ * io_page_list A 64-bit aligned list of real addresses. Each real
+ * address in an io_page_list must be properly aligned
+ * to the pagesize of the given IOTSB.
+ *
+ * io_page_list_p Real address of an io_page_list, 64-bit aligned.
+ *
+ * IOTSB IO Translation Storage Buffer. An aligned table of
+ * IOTTEs. Each IOTSB has a pagesize, table size, and
+ * virtual address associated with it that must match
+ * a pagesize and table size supported by the un-derlying
+ * hardware implementation. The alignment requirements
+ * for an IOTSB depend on the pagesize used for that IOTSB.
+ * Each IOTTE in an IOTSB maps one pagesize-sized page.
+ * The size of the IOTSB dictates how large of a virtual
+ * address space the IOTSB is capable of mapping.
+ *
+ * iotsb_handle An opaque identifier for an IOTSB. A devhandle plus
+ * iotsb_handle represents a binding of an IOTSB to a
+ * PCI root complex.
+ *
+ * iotsb_index Zero-based IOTTE number within an IOTSB.
+ */
+
+/* The index_count argument consists of two fields:
+ * bits 63:48 #iottes and bits 47:0 iotsb_index
+ */
+#define HV_PCI_IOTSB_INDEX_COUNT(__iottes, __iotsb_index) \
+ (((u64)(__iottes) << 48UL) | ((u64)(__iotsb_index)))
+
+/* pci_iotsb_conf()
+ * TRAP: HV_FAST_TRAP
+ * FUNCTION: HV_FAST_PCI_IOTSB_CONF
+ * ARG0: devhandle
+ * ARG1: r_addr
+ * ARG2: size
+ * ARG3: pagesize
+ * ARG4: iova
+ * RET0: status
+ * RET1: iotsb_handle
+ * ERRORS: EINVAL Invalid devhandle, size, iova, or pagesize
+ * EBADALIGN r_addr is not properly aligned
+ * ENORADDR r_addr is not a valid real address
+ * ETOOMANY No further IOTSBs may be configured
+ * EBUSY Duplicate devhandle, raddir, iova combination
+ *
+ * Create an IOTSB suitable for the PCI root complex identified by devhandle,
+ * for the DMA virtual address defined by the argument iova.
+ *
+ * r_addr is the properly aligned base address of the IOTSB and size is the
+ * IOTSB (table) size in bytes.The IOTSB is required to be zeroed prior to
+ * being configured. If it contains any values other than zeros then the
+ * behavior is undefined.
+ *
+ * pagesize is the size of each page in the IOTSB. Note that the combination of
+ * size (table size) and pagesize must be valid.
+ *
+ * virt is the DMA virtual address this IOTSB will map.
+ *
+ * If successful, the opaque 64-bit handle iotsb_handle is returned in ret1.
+ * Once configured, privileged access to the IOTSB memory is prohibited and
+ * creates undefined behavior. The only permitted access is indirect via these
+ * services.
+ */
+#define HV_FAST_PCI_IOTSB_CONF 0x190
+
+/* pci_iotsb_info()
+ * TRAP: HV_FAST_TRAP
+ * FUNCTION: HV_FAST_PCI_IOTSB_INFO
+ * ARG0: devhandle
+ * ARG1: iotsb_handle
+ * RET0: status
+ * RET1: r_addr
+ * RET2: size
+ * RET3: pagesize
+ * RET4: iova
+ * RET5: #bound
+ * ERRORS: EINVAL Invalid devhandle or iotsb_handle
+ *
+ * This service returns configuration information about an IOTSB previously
+ * created with pci_iotsb_conf.
+ *
+ * iotsb_handle value 0 may be used with this service to inquire about the
+ * legacy IOTSB that may or may not exist. If the service succeeds, the return
+ * values describe the legacy IOTSB and I/O virtual addresses mapped by that
+ * table. However, the table base address r_addr may contain the value -1 which
+ * indicates a memory range that cannot be accessed or be reclaimed.
+ *
+ * The return value #bound contains the number of PCI devices that iotsb_handle
+ * is currently bound to.
+ */
+#define HV_FAST_PCI_IOTSB_INFO 0x191
+
+/* pci_iotsb_unconf()
+ * TRAP: HV_FAST_TRAP
+ * FUNCTION: HV_FAST_PCI_IOTSB_UNCONF
+ * ARG0: devhandle
+ * ARG1: iotsb_handle
+ * RET0: status
+ * ERRORS: EINVAL Invalid devhandle or iotsb_handle
+ * EBUSY The IOTSB is bound and may not be unconfigured
+ *
+ * This service unconfigures the IOTSB identified by the devhandle and
+ * iotsb_handle arguments, previously created with pci_iotsb_conf.
+ * The IOTSB must not be currently bound to any device or the service will fail
+ *
+ * If the call succeeds, iotsb_handle is no longer valid.
+ */
+#define HV_FAST_PCI_IOTSB_UNCONF 0x192
+
+/* pci_iotsb_bind()
+ * TRAP: HV_FAST_TRAP
+ * FUNCTION: HV_FAST_PCI_IOTSB_BIND
+ * ARG0: devhandle
+ * ARG1: iotsb_handle
+ * ARG2: pci_device
+ * RET0: status
+ * ERRORS: EINVAL Invalid devhandle, iotsb_handle, or pci_device
+ * EBUSY A PCI function is already bound to an IOTSB at the same
+ * address range as specified by devhandle, iotsb_handle.
+ *
+ * This service binds the PCI function specified by the argument pci_device to
+ * the IOTSB specified by the arguments devhandle and iotsb_handle.
+ *
+ * The PCI device function is bound to the specified IOTSB with the IOVA range
+ * specified when the IOTSB was configured via pci_iotsb_conf. If the function
+ * is already bound then it is unbound first.
+ */
+#define HV_FAST_PCI_IOTSB_BIND 0x193
+
+/* pci_iotsb_unbind()
+ * TRAP: HV_FAST_TRAP
+ * FUNCTION: HV_FAST_PCI_IOTSB_UNBIND
+ * ARG0: devhandle
+ * ARG1: iotsb_handle
+ * ARG2: pci_device
+ * RET0: status
+ * ERRORS: EINVAL Invalid devhandle, iotsb_handle, or pci_device
+ * ENOMAP The PCI function was not bound to the specified IOTSB
+ *
+ * This service unbinds the PCI device specified by the argument pci_device
+ * from the IOTSB identified * by the arguments devhandle and iotsb_handle.
+ *
+ * If the PCI device is not bound to the specified IOTSB then this service will
+ * fail with status ENOMAP
+ */
+#define HV_FAST_PCI_IOTSB_UNBIND 0x194
+
+/* pci_iotsb_get_binding()
+ * TRAP: HV_FAST_TRAP
+ * FUNCTION: HV_FAST_PCI_IOTSB_GET_BINDING
+ * ARG0: devhandle
+ * ARG1: iotsb_handle
+ * ARG2: iova
+ * RET0: status
+ * RET1: iotsb_handle
+ * ERRORS: EINVAL Invalid devhandle, pci_device, or iova
+ * ENOMAP The PCI function is not bound to an IOTSB at iova
+ *
+ * This service returns the IOTSB binding, iotsb_handle, for a given pci_device
+ * and DMA virtual address, iova.
+ *
+ * iova must be the base address of a DMA virtual address range as defined by
+ * the iommu-address-ranges property in the root complex device node defined
+ * by the argument devhandle.
+ */
+#define HV_FAST_PCI_IOTSB_GET_BINDING 0x195
+
+/* pci_iotsb_map()
+ * TRAP: HV_FAST_TRAP
+ * FUNCTION: HV_FAST_PCI_IOTSB_MAP
+ * ARG0: devhandle
+ * ARG1: iotsb_handle
+ * ARG2: index_count
+ * ARG3: iotte_attributes
+ * ARG4: io_page_list_p
+ * RET0: status
+ * RET1: #mapped
+ * ERRORS: EINVAL Invalid devhandle, iotsb_handle, #iottes,
+ * iotsb_index or iotte_attributes
+ * EBADALIGN Improperly aligned io_page_list_p or I/O page
+ * address in the I/O page list.
+ * ENORADDR Invalid io_page_list_p or I/O page address in
+ * the I/O page list.
+ *
+ * This service creates and flushes mappings in the IOTSB defined by the
+ * arguments devhandle, iotsb.
+ *
+ * The index_count argument consists of two fields. Bits 63:48 contain #iotte
+ * and bits 47:0 contain iotsb_index
+ *
+ * The first mapping is created in the IOTSB index specified by iotsb_index.
+ * Subsequent mappings are created at iotsb_index+1 and so on.
+ *
+ * The attributes of each mapping are defined by the argument iotte_attributes.
+ *
+ * The io_page_list_p specifies the real address of the 64-bit-aligned list of
+ * #iottes I/O page addresses. Each page address must be a properly aligned
+ * real address of a page to be mapped in the IOTSB. The first entry in the I/O
+ * page list contains the real address of the first page, the 2nd entry for the
+ * 2nd page, and so on.
+ *
+ * #iottes must be greater than zero.
+ *
+ * The return value #mapped is the actual number of mappings created, which may
+ * be less than or equal to the argument #iottes. If the function returns
+ * successfully with a #mapped value less than the requested #iottes then the
+ * caller should continue to invoke the service with updated iotsb_index,
+ * #iottes, and io_page_list_p arguments until all pages are mapped.
+ *
+ * This service must not be used to demap a mapping. In other words, all
+ * mappings must be valid and have one or both of the RW attribute bits set.
+ *
+ * Note:
+ * It is implementation-defined whether I/O page real address validity checking
+ * is done at time mappings are established or deferred until they are
+ * accessed.
+ */
+#define HV_FAST_PCI_IOTSB_MAP 0x196
+
+/* pci_iotsb_map_one()
+ * TRAP: HV_FAST_TRAP
+ * FUNCTION: HV_FAST_PCI_IOTSB_MAP_ONE
+ * ARG0: devhandle
+ * ARG1: iotsb_handle
+ * ARG2: iotsb_index
+ * ARG3: iotte_attributes
+ * ARG4: r_addr
+ * RET0: status
+ * ERRORS: EINVAL Invalid devhandle,iotsb_handle, iotsb_index
+ * or iotte_attributes
+ * EBADALIGN Improperly aligned r_addr
+ * ENORADDR Invalid r_addr
+ *
+ * This service creates and flushes a single mapping in the IOTSB defined by the
+ * arguments devhandle, iotsb.
+ *
+ * The mapping for the page at r_addr is created at the IOTSB index specified by
+ * iotsb_index with the attributes iotte_attributes.
+ *
+ * This service must not be used to demap a mapping. In other words, the mapping
+ * must be valid and have one or both of the RW attribute bits set.
+ *
+ * Note:
+ * It is implementation-defined whether I/O page real address validity checking
+ * is done at time mappings are established or deferred until they are
+ * accessed.
+ */
+#define HV_FAST_PCI_IOTSB_MAP_ONE 0x197
+
+/* pci_iotsb_demap()
+ * TRAP: HV_FAST_TRAP
+ * FUNCTION: HV_FAST_PCI_IOTSB_DEMAP
+ * ARG0: devhandle
+ * ARG1: iotsb_handle
+ * ARG2: iotsb_index
+ * ARG3: #iottes
+ * RET0: status
+ * RET1: #unmapped
+ * ERRORS: EINVAL Invalid devhandle, iotsb_handle, iotsb_index or #iottes
+ *
+ * This service unmaps and flushes up to #iottes mappings starting at index
+ * iotsb_index from the IOTSB defined by the arguments devhandle, iotsb.
+ *
+ * #iottes must be greater than zero.
+ *
+ * The actual number of IOTTEs unmapped is returned in #unmapped and may be less
+ * than or equal to the requested number of IOTTEs, #iottes.
+ *
+ * If #unmapped is less than #iottes, the caller should continue to invoke this
+ * service with updated iotsb_index and #iottes arguments until all pages are
+ * demapped.
+ */
+#define HV_FAST_PCI_IOTSB_DEMAP 0x198
+
+/* pci_iotsb_getmap()
+ * TRAP: HV_FAST_TRAP
+ * FUNCTION: HV_FAST_PCI_IOTSB_GETMAP
+ * ARG0: devhandle
+ * ARG1: iotsb_handle
+ * ARG2: iotsb_index
+ * RET0: status
+ * RET1: r_addr
+ * RET2: iotte_attributes
+ * ERRORS: EINVAL Invalid devhandle, iotsb_handle, or iotsb_index
+ * ENOMAP No mapping was found
+ *
+ * This service returns the mapping specified by index iotsb_index from the
+ * IOTSB defined by the arguments devhandle, iotsb.
+ *
+ * Upon success, the real address of the mapping shall be returned in
+ * r_addr and thethe IOTTE mapping attributes shall be returned in
+ * iotte_attributes.
+ *
+ * The return value iotte_attributes may not include optional features used in
+ * the call to create the mapping.
+ */
+#define HV_FAST_PCI_IOTSB_GETMAP 0x199
+
+/* pci_iotsb_sync_mappings()
+ * TRAP: HV_FAST_TRAP
+ * FUNCTION: HV_FAST_PCI_IOTSB_SYNC_MAPPINGS
+ * ARG0: devhandle
+ * ARG1: iotsb_handle
+ * ARG2: iotsb_index
+ * ARG3: #iottes
+ * RET0: status
+ * RET1: #synced
+ * ERROS: EINVAL Invalid devhandle, iotsb_handle, iotsb_index, or #iottes
+ *
+ * This service synchronizes #iottes mappings starting at index iotsb_index in
+ * the IOTSB defined by the arguments devhandle, iotsb.
+ *
+ * #iottes must be greater than zero.
+ *
+ * The actual number of IOTTEs synchronized is returned in #synced, which may
+ * be less than or equal to the requested number, #iottes.
+ *
+ * Upon a successful return, #synced is less than #iottes, the caller should
+ * continue to invoke this service with updated iotsb_index and #iottes
+ * arguments until all pages are synchronized.
+ */
+#define HV_FAST_PCI_IOTSB_SYNC_MAPPINGS 0x19a
+
/* Logical Domain Channel services. */
#define LDC_CHANNEL_DOWN 0
@@ -2993,6 +3335,7 @@ unsigned long sun4v_m7_set_perfreg(unsigned long reg_num,
#define HV_GRP_SDIO 0x0108
#define HV_GRP_SDIO_ERR 0x0109
#define HV_GRP_REBOOT_DATA 0x0110
+#define HV_GRP_ATU 0x0111
#define HV_GRP_M7_PERF 0x0114
#define HV_GRP_NIAG_PERF 0x0200
#define HV_GRP_FIRE_PERF 0x0201
diff --git a/arch/sparc/include/asm/iommu_64.h b/arch/sparc/include/asm/iommu_64.h
index cd0d69fa7592..f24f356f2503 100644
--- a/arch/sparc/include/asm/iommu_64.h
+++ b/arch/sparc/include/asm/iommu_64.h
@@ -24,8 +24,36 @@ struct iommu_arena {
unsigned int limit;
};
+#define ATU_64_SPACE_SIZE 0x800000000 /* 32G */
+
+/* Data structures for SPARC ATU architecture */
+struct atu_iotsb {
+ void *table; /* IOTSB table base virtual addr*/
+ u64 ra; /* IOTSB table real addr */
+ u64 dvma_size; /* ranges[3].size or OS slected 32G size */
+ u64 dvma_base; /* ranges[3].base */
+ u64 table_size; /* IOTSB table size */
+ u64 page_size; /* IO PAGE size for IOTSB */
+ u32 iotsb_num; /* tsbnum is same as iotsb_handle */
+};
+
+struct atu_ranges {
+ u64 base;
+ u64 size;
+};
+
+struct atu {
+ struct atu_ranges *ranges;
+ struct atu_iotsb *iotsb;
+ struct iommu_map_table tbl;
+ u64 base;
+ u64 size;
+ u64 dma_addr_mask;
+};
+
struct iommu {
struct iommu_map_table tbl;
+ struct atu *atu;
spinlock_t lock;
u32 dma_addr_mask;
iopte_t *page_table;
diff --git a/arch/sparc/include/asm/spinlock_32.h b/arch/sparc/include/asm/spinlock_32.h
index d9c5876c6121..8011e79f59c9 100644
--- a/arch/sparc/include/asm/spinlock_32.h
+++ b/arch/sparc/include/asm/spinlock_32.h
@@ -134,7 +134,7 @@ static inline void arch_write_lock(arch_rwlock_t *rw)
*(volatile __u32 *)&lp->lock = ~0U;
}
-static void inline arch_write_unlock(arch_rwlock_t *lock)
+static inline void arch_write_unlock(arch_rwlock_t *lock)
{
__asm__ __volatile__(
" st %%g0, [%0]"
diff --git a/arch/sparc/include/asm/spinlock_64.h b/arch/sparc/include/asm/spinlock_64.h
index 87990b7c6b0d..07c9f2e9bf57 100644
--- a/arch/sparc/include/asm/spinlock_64.h
+++ b/arch/sparc/include/asm/spinlock_64.h
@@ -96,7 +96,7 @@ static inline void arch_spin_lock_flags(arch_spinlock_t *lock, unsigned long fla
/* Multi-reader locks, these are much saner than the 32-bit Sparc ones... */
-static void inline arch_read_lock(arch_rwlock_t *lock)
+static inline void arch_read_lock(arch_rwlock_t *lock)
{
unsigned long tmp1, tmp2;
@@ -119,7 +119,7 @@ static void inline arch_read_lock(arch_rwlock_t *lock)
: "memory");
}
-static int inline arch_read_trylock(arch_rwlock_t *lock)
+static inline int arch_read_trylock(arch_rwlock_t *lock)
{
int tmp1, tmp2;
@@ -140,7 +140,7 @@ static int inline arch_read_trylock(arch_rwlock_t *lock)
return tmp1;
}
-static void inline arch_read_unlock(arch_rwlock_t *lock)
+static inline void arch_read_unlock(arch_rwlock_t *lock)
{
unsigned long tmp1, tmp2;
@@ -156,7 +156,7 @@ static void inline arch_read_unlock(arch_rwlock_t *lock)
: "memory");
}
-static void inline arch_write_lock(arch_rwlock_t *lock)
+static inline void arch_write_lock(arch_rwlock_t *lock)
{
unsigned long mask, tmp1, tmp2;
@@ -181,7 +181,7 @@ static void inline arch_write_lock(arch_rwlock_t *lock)
: "memory");
}
-static void inline arch_write_unlock(arch_rwlock_t *lock)
+static inline void arch_write_unlock(arch_rwlock_t *lock)
{
__asm__ __volatile__(
" stw %%g0, [%0]"
@@ -190,7 +190,7 @@ static void inline arch_write_unlock(arch_rwlock_t *lock)
: "memory");
}
-static int inline arch_write_trylock(arch_rwlock_t *lock)
+static inline int arch_write_trylock(arch_rwlock_t *lock)
{
unsigned long mask, tmp1, tmp2, result;
diff --git a/arch/sparc/include/asm/topology_64.h b/arch/sparc/include/asm/topology_64.h
index bec481aaca16..7b4898a36eee 100644
--- a/arch/sparc/include/asm/topology_64.h
+++ b/arch/sparc/include/asm/topology_64.h
@@ -44,14 +44,20 @@ int __node_distance(int, int);
#define topology_physical_package_id(cpu) (cpu_data(cpu).proc_id)
#define topology_core_id(cpu) (cpu_data(cpu).core_id)
#define topology_core_cpumask(cpu) (&cpu_core_sib_map[cpu])
+#define topology_core_cache_cpumask(cpu) (&cpu_core_sib_cache_map[cpu])
#define topology_sibling_cpumask(cpu) (&per_cpu(cpu_sibling_map, cpu))
#endif /* CONFIG_SMP */
extern cpumask_t cpu_core_map[NR_CPUS];
extern cpumask_t cpu_core_sib_map[NR_CPUS];
+extern cpumask_t cpu_core_sib_cache_map[NR_CPUS];
+
+/**
+ * Return cores that shares the last level cache.
+ */
static inline const struct cpumask *cpu_coregroup_mask(int cpu)
{
- return &cpu_core_map[cpu];
+ return &cpu_core_sib_cache_map[cpu];
}
#endif /* _ASM_SPARC64_TOPOLOGY_H */
diff --git a/arch/sparc/include/asm/uaccess_64.h b/arch/sparc/include/asm/uaccess_64.h
index b68acc563235..5373136c412b 100644
--- a/arch/sparc/include/asm/uaccess_64.h
+++ b/arch/sparc/include/asm/uaccess_64.h
@@ -82,7 +82,6 @@ static inline int access_ok(int type, const void __user * addr, unsigned long si
return 1;
}
-void __ret_efault(void);
void __retl_efault(void);
/* Uh, these should become the main single-value transfer routines..
@@ -189,55 +188,34 @@ int __get_user_bad(void);
unsigned long __must_check ___copy_from_user(void *to,
const void __user *from,
unsigned long size);
-unsigned long copy_from_user_fixup(void *to, const void __user *from,
- unsigned long size);
static inline unsigned long __must_check
copy_from_user(void *to, const void __user *from, unsigned long size)
{
- unsigned long ret;
-
check_object_size(to, size, false);
- ret = ___copy_from_user(to, from, size);
- if (unlikely(ret))
- ret = copy_from_user_fixup(to, from, size);
-
- return ret;
+ return ___copy_from_user(to, from, size);
}
#define __copy_from_user copy_from_user
unsigned long __must_check ___copy_to_user(void __user *to,
const void *from,
unsigned long size);
-unsigned long copy_to_user_fixup(void __user *to, const void *from,
- unsigned long size);
static inline unsigned long __must_check
copy_to_user(void __user *to, const void *from, unsigned long size)
{
- unsigned long ret;
-
check_object_size(from, size, true);
- ret = ___copy_to_user(to, from, size);
- if (unlikely(ret))
- ret = copy_to_user_fixup(to, from, size);
- return ret;
+ return ___copy_to_user(to, from, size);
}
#define __copy_to_user copy_to_user
unsigned long __must_check ___copy_in_user(void __user *to,
const void __user *from,
unsigned long size);
-unsigned long copy_in_user_fixup(void __user *to, void __user *from,
- unsigned long size);
static inline unsigned long __must_check
copy_in_user(void __user *to, void __user *from, unsigned long size)
{
- unsigned long ret = ___copy_in_user(to, from, size);
-
- if (unlikely(ret))
- ret = copy_in_user_fixup(to, from, size);
- return ret;
+ return ___copy_in_user(to, from, size);
}
#define __copy_in_user copy_in_user
diff --git a/arch/sparc/kernel/head_64.S b/arch/sparc/kernel/head_64.S
index beba6c11554c..6aa3da152c20 100644
--- a/arch/sparc/kernel/head_64.S
+++ b/arch/sparc/kernel/head_64.S
@@ -926,48 +926,11 @@ tlb_type: .word 0 /* Must NOT end up in BSS */
EXPORT_SYMBOL(tlb_type)
.section ".fixup",#alloc,#execinstr
- .globl __ret_efault, __retl_efault, __ret_one, __retl_one
-ENTRY(__ret_efault)
- ret
- restore %g0, -EFAULT, %o0
-ENDPROC(__ret_efault)
-EXPORT_SYMBOL(__ret_efault)
-
ENTRY(__retl_efault)
retl
mov -EFAULT, %o0
ENDPROC(__retl_efault)
-ENTRY(__retl_one)
- retl
- mov 1, %o0
-ENDPROC(__retl_one)
-
-ENTRY(__retl_one_fp)
- VISExitHalf
- retl
- mov 1, %o0
-ENDPROC(__retl_one_fp)
-
-ENTRY(__ret_one_asi)
- wr %g0, ASI_AIUS, %asi
- ret
- restore %g0, 1, %o0
-ENDPROC(__ret_one_asi)
-
-ENTRY(__retl_one_asi)
- wr %g0, ASI_AIUS, %asi
- retl
- mov 1, %o0
-ENDPROC(__retl_one_asi)
-
-ENTRY(__retl_one_asi_fp)
- wr %g0, ASI_AIUS, %asi
- VISExitHalf
- retl
- mov 1, %o0
-ENDPROC(__retl_one_asi_fp)
-
ENTRY(__retl_o1)
retl
mov %o1, %o0
diff --git a/arch/sparc/kernel/hvapi.c b/arch/sparc/kernel/hvapi.c
index 662500fa555f..267731234ce8 100644
--- a/arch/sparc/kernel/hvapi.c
+++ b/arch/sparc/kernel/hvapi.c
@@ -39,6 +39,7 @@ static struct api_info api_table[] = {
{ .group = HV_GRP_SDIO, },
{ .group = HV_GRP_SDIO_ERR, },
{ .group = HV_GRP_REBOOT_DATA, },
+ { .group = HV_GRP_ATU, .flags = FLAG_PRE_API },
{ .group = HV_GRP_NIAG_PERF, .flags = FLAG_PRE_API },
{ .group = HV_GRP_FIRE_PERF, },
{ .group = HV_GRP_N2_CPU, },
diff --git a/arch/sparc/kernel/iommu.c b/arch/sparc/kernel/iommu.c
index 5c615abff030..852a3291db96 100644
--- a/arch/sparc/kernel/iommu.c
+++ b/arch/sparc/kernel/iommu.c
@@ -760,8 +760,12 @@ int dma_supported(struct device *dev, u64 device_mask)
struct iommu *iommu = dev->archdata.iommu;
u64 dma_addr_mask = iommu->dma_addr_mask;
- if (device_mask >= (1UL << 32UL))
- return 0;
+ if (device_mask > DMA_BIT_MASK(32)) {
+ if (iommu->atu)
+ dma_addr_mask = iommu->atu->dma_addr_mask;
+ else
+ return 0;
+ }
if ((device_mask & dma_addr_mask) == dma_addr_mask)
return 1;
diff --git a/arch/sparc/kernel/iommu_common.h b/arch/sparc/kernel/iommu_common.h
index b40cec252905..828493329f68 100644
--- a/arch/sparc/kernel/iommu_common.h
+++ b/arch/sparc/kernel/iommu_common.h
@@ -13,7 +13,6 @@
#include <linux/scatterlist.h>
#include <linux/device.h>
#include <linux/iommu-helper.h>
-#include <linux/scatterlist.h>
#include <asm/iommu.h>
diff --git a/arch/sparc/kernel/jump_label.c b/arch/sparc/kernel/jump_label.c
index 59bbeff55024..07933b9e9ce0 100644
--- a/arch/sparc/kernel/jump_label.c
+++ b/arch/sparc/kernel/jump_label.c
@@ -13,19 +13,30 @@
void arch_jump_label_transform(struct jump_entry *entry,
enum jump_label_type type)
{
- u32 val;
u32 *insn = (u32 *) (unsigned long) entry->code;
+ u32 val;
if (type == JUMP_LABEL_JMP) {
s32 off = (s32)entry->target - (s32)entry->code;
+ bool use_v9_branch = false;
+
+ BUG_ON(off & 3);
#ifdef CONFIG_SPARC64
- /* ba,pt %xcc, . + (off << 2) */
- val = 0x10680000 | ((u32) off >> 2);
-#else
- /* ba . + (off << 2) */
- val = 0x10800000 | ((u32) off >> 2);
+ if (off <= 0xfffff && off >= -0x100000)
+ use_v9_branch = true;
#endif
+ if (use_v9_branch) {
+ /* WDISP19 - target is . + immed << 2 */
+ /* ba,pt %xcc, . + off */
+ val = 0x10680000 | (((u32) off >> 2) & 0x7ffff);
+ } else {
+ /* WDISP22 - target is . + immed << 2 */
+ BUG_ON(off > 0x7fffff);
+ BUG_ON(off < -0x800000);
+ /* ba . + off */
+ val = 0x10800000 | (((u32) off >> 2) & 0x3fffff);
+ }
} else {
val = 0x01000000;
}
diff --git a/arch/sparc/kernel/mdesc.c b/arch/sparc/kernel/mdesc.c
index 11228861d9b4..8a6982dfd733 100644
--- a/arch/sparc/kernel/mdesc.c
+++ b/arch/sparc/kernel/mdesc.c
@@ -645,13 +645,20 @@ static void __mark_core_id(struct mdesc_handle *hp, u64 node,
cpu_data(*id).core_id = core_id;
}
-static void __mark_sock_id(struct mdesc_handle *hp, u64 node,
- int sock_id)
+static void __mark_max_cache_id(struct mdesc_handle *hp, u64 node,
+ int max_cache_id)
{
const u64 *id = mdesc_get_property(hp, node, "id", NULL);
- if (*id < num_possible_cpus())
- cpu_data(*id).sock_id = sock_id;
+ if (*id < num_possible_cpus()) {
+ cpu_data(*id).max_cache_id = max_cache_id;
+
+ /**
+ * On systems without explicit socket descriptions socket
+ * is max_cache_id
+ */
+ cpu_data(*id).sock_id = max_cache_id;
+ }
}
static void mark_core_ids(struct mdesc_handle *hp, u64 mp,
@@ -660,10 +667,11 @@ static void mark_core_ids(struct mdesc_handle *hp, u64 mp,
find_back_node_value(hp, mp, "cpu", __mark_core_id, core_id, 10);
}
-static void mark_sock_ids(struct mdesc_handle *hp, u64 mp,
- int sock_id)
+static void mark_max_cache_ids(struct mdesc_handle *hp, u64 mp,
+ int max_cache_id)
{
- find_back_node_value(hp, mp, "cpu", __mark_sock_id, sock_id, 10);
+ find_back_node_value(hp, mp, "cpu", __mark_max_cache_id,
+ max_cache_id, 10);
}
static void set_core_ids(struct mdesc_handle *hp)
@@ -694,14 +702,15 @@ static void set_core_ids(struct mdesc_handle *hp)
}
}
-static int set_sock_ids_by_cache(struct mdesc_handle *hp, int level)
+static int set_max_cache_ids_by_cache(struct mdesc_handle *hp, int level)
{
u64 mp;
int idx = 1;
int fnd = 0;
- /* Identify unique sockets by looking for cpus backpointed to by
- * shared level n caches.
+ /**
+ * Identify unique highest level of shared cache by looking for cpus
+ * backpointed to by shared level N caches.
*/
mdesc_for_each_node_by_name(hp, mp, "cache") {
const u64 *cur_lvl;
@@ -709,8 +718,7 @@ static int set_sock_ids_by_cache(struct mdesc_handle *hp, int level)
cur_lvl = mdesc_get_property(hp, mp, "level", NULL);
if (*cur_lvl != level)
continue;
-
- mark_sock_ids(hp, mp, idx);
+ mark_max_cache_ids(hp, mp, idx);
idx++;
fnd = 1;
}
@@ -745,15 +753,17 @@ static void set_sock_ids(struct mdesc_handle *hp)
{
u64 mp;
- /* If machine description exposes sockets data use it.
- * Otherwise fallback to use shared L3 or L2 caches.
+ /**
+ * Find the highest level of shared cache which pre-T7 is also
+ * the socket.
*/
+ if (!set_max_cache_ids_by_cache(hp, 3))
+ set_max_cache_ids_by_cache(hp, 2);
+
+ /* If machine description exposes sockets data use it.*/
mp = mdesc_node_by_name(hp, MDESC_NODE_NULL, "sockets");
if (mp != MDESC_NODE_NULL)
- return set_sock_ids_by_socket(hp, mp);
-
- if (!set_sock_ids_by_cache(hp, 3))
- set_sock_ids_by_cache(hp, 2);
+ set_sock_ids_by_socket(hp, mp);
}
static void mark_proc_ids(struct mdesc_handle *hp, u64 mp, int proc_id)
diff --git a/arch/sparc/kernel/pci_sun4v.c b/arch/sparc/kernel/pci_sun4v.c
index db57d8acdc01..06981cc716b6 100644
--- a/arch/sparc/kernel/pci_sun4v.c
+++ b/arch/sparc/kernel/pci_sun4v.c
@@ -44,6 +44,9 @@ static struct vpci_version vpci_versions[] = {
{ .major = 1, .minor = 1 },
};
+static unsigned long vatu_major = 1;
+static unsigned long vatu_minor = 1;
+
#define PGLIST_NENTS (PAGE_SIZE / sizeof(u64))
struct iommu_batch {
@@ -69,34 +72,57 @@ static inline void iommu_batch_start(struct device *dev, unsigned long prot, uns
}
/* Interrupts must be disabled. */
-static long iommu_batch_flush(struct iommu_batch *p)
+static long iommu_batch_flush(struct iommu_batch *p, u64 mask)
{
struct pci_pbm_info *pbm = p->dev->archdata.host_controller;
+ u64 *pglist = p->pglist;
+ u64 index_count;
unsigned long devhandle = pbm->devhandle;
unsigned long prot = p->prot;
unsigned long entry = p->entry;
- u64 *pglist = p->pglist;
unsigned long npages = p->npages;
+ unsigned long iotsb_num;
+ unsigned long ret;
+ long num;
/* VPCI maj=1, min=[0,1] only supports read and write */
if (vpci_major < 2)
prot &= (HV_PCI_MAP_ATTR_READ | HV_PCI_MAP_ATTR_WRITE);
while (npages != 0) {
- long num;
-
- num = pci_sun4v_iommu_map(devhandle, HV_PCI_TSBID(0, entry),
- npages, prot, __pa(pglist));
- if (unlikely(num < 0)) {
- if (printk_ratelimit())
- printk("iommu_batch_flush: IOMMU map of "
- "[%08lx:%08llx:%lx:%lx:%lx] failed with "
- "status %ld\n",
- devhandle, HV_PCI_TSBID(0, entry),
- npages, prot, __pa(pglist), num);
- return -1;
+ if (mask <= DMA_BIT_MASK(32)) {
+ num = pci_sun4v_iommu_map(devhandle,
+ HV_PCI_TSBID(0, entry),
+ npages,
+ prot,
+ __pa(pglist));
+ if (unlikely(num < 0)) {
+ pr_err_ratelimited("%s: IOMMU map of [%08lx:%08llx:%lx:%lx:%lx] failed with status %ld\n",
+ __func__,
+ devhandle,
+ HV_PCI_TSBID(0, entry),
+ npages, prot, __pa(pglist),
+ num);
+ return -1;
+ }
+ } else {
+ index_count = HV_PCI_IOTSB_INDEX_COUNT(npages, entry),
+ iotsb_num = pbm->iommu->atu->iotsb->iotsb_num;
+ ret = pci_sun4v_iotsb_map(devhandle,
+ iotsb_num,
+ index_count,
+ prot,
+ __pa(pglist),
+ &num);
+ if (unlikely(ret != HV_EOK)) {
+ pr_err_ratelimited("%s: ATU map of [%08lx:%lx:%llx:%lx:%lx] failed with status %ld\n",
+ __func__,
+ devhandle, iotsb_num,
+ index_count, prot,
+ __pa(pglist), ret);
+ return -1;
+ }
}
-
entry += num;
npages -= num;
pglist += num;
@@ -108,19 +134,19 @@ static long iommu_batch_flush(struct iommu_batch *p)
return 0;
}
-static inline void iommu_batch_new_entry(unsigned long entry)
+static inline void iommu_batch_new_entry(unsigned long entry, u64 mask)
{
struct iommu_batch *p = this_cpu_ptr(&iommu_batch);
if (p->entry + p->npages == entry)
return;
if (p->entry != ~0UL)
- iommu_batch_flush(p);
+ iommu_batch_flush(p, mask);
p->entry = entry;
}
/* Interrupts must be disabled. */
-static inline long iommu_batch_add(u64 phys_page)
+static inline long iommu_batch_add(u64 phys_page, u64 mask)
{
struct iommu_batch *p = this_cpu_ptr(&iommu_batch);
@@ -128,28 +154,31 @@ static inline long iommu_batch_add(u64 phys_page)
p->pglist[p->npages++] = phys_page;
if (p->npages == PGLIST_NENTS)
- return iommu_batch_flush(p);
+ return iommu_batch_flush(p, mask);
return 0;
}
/* Interrupts must be disabled. */
-static inline long iommu_batch_end(void)
+static inline long iommu_batch_end(u64 mask)
{
struct iommu_batch *p = this_cpu_ptr(&iommu_batch);
BUG_ON(p->npages >= PGLIST_NENTS);
- return iommu_batch_flush(p);
+ return iommu_batch_flush(p, mask);
}
static void *dma_4v_alloc_coherent(struct device *dev, size_t size,
dma_addr_t *dma_addrp, gfp_t gfp,
unsigned long attrs)
{
+ u64 mask;
unsigned long flags, order, first_page, npages, n;
unsigned long prot = 0;
struct iommu *iommu;
+ struct atu *atu;
+ struct iommu_map_table *tbl;
struct page *page;
void *ret;
long entry;
@@ -174,14 +203,21 @@ static void *dma_4v_alloc_coherent(struct device *dev, size_t size,
memset((char *)first_page, 0, PAGE_SIZE << order);
iommu = dev->archdata.iommu;
+ atu = iommu->atu;
+
+ mask = dev->coherent_dma_mask;
+ if (mask <= DMA_BIT_MASK(32))
+ tbl = &iommu->tbl;
+ else
+ tbl = &atu->tbl;
- entry = iommu_tbl_range_alloc(dev, &iommu->tbl, npages, NULL,
+ entry = iommu_tbl_range_alloc(dev, tbl, npages, NULL,
(unsigned long)(-1), 0);
if (unlikely(entry == IOMMU_ERROR_CODE))
goto range_alloc_fail;
- *dma_addrp = (iommu->tbl.table_map_base + (entry << IO_PAGE_SHIFT));
+ *dma_addrp = (tbl->table_map_base + (entry << IO_PAGE_SHIFT));
ret = (void *) first_page;
first_page = __pa(first_page);
@@ -193,12 +229,12 @@ static void *dma_4v_alloc_coherent(struct device *dev, size_t size,
entry);
for (n = 0; n < npages; n++) {
- long err = iommu_batch_add(first_page + (n * PAGE_SIZE));
+ long err = iommu_batch_add(first_page + (n * PAGE_SIZE), mask);
if (unlikely(err < 0L))
goto iommu_map_fail;
}
- if (unlikely(iommu_batch_end() < 0L))
+ if (unlikely(iommu_batch_end(mask) < 0L))
goto iommu_map_fail;
local_irq_restore(flags);
@@ -206,25 +242,71 @@ static void *dma_4v_alloc_coherent(struct device *dev, size_t size,
return ret;
iommu_map_fail:
- iommu_tbl_range_free(&iommu->tbl, *dma_addrp, npages, IOMMU_ERROR_CODE);
+ iommu_tbl_range_free(tbl, *dma_addrp, npages, IOMMU_ERROR_CODE);
range_alloc_fail:
free_pages(first_page, order);
return NULL;
}
-static void dma_4v_iommu_demap(void *demap_arg, unsigned long entry,
- unsigned long npages)
+unsigned long dma_4v_iotsb_bind(unsigned long devhandle,
+ unsigned long iotsb_num,
+ struct pci_bus *bus_dev)
+{
+ struct pci_dev *pdev;
+ unsigned long err;
+ unsigned int bus;
+ unsigned int device;
+ unsigned int fun;
+
+ list_for_each_entry(pdev, &bus_dev->devices, bus_list) {
+ if (pdev->subordinate) {
+ /* No need to bind pci bridge */
+ dma_4v_iotsb_bind(devhandle, iotsb_num,
+ pdev->subordinate);
+ } else {
+ bus = bus_dev->number;
+ device = PCI_SLOT(pdev->devfn);
+ fun = PCI_FUNC(pdev->devfn);
+ err = pci_sun4v_iotsb_bind(devhandle, iotsb_num,
+ HV_PCI_DEVICE_BUILD(bus,
+ device,
+ fun));
+
+ /* If bind fails for one device it is going to fail
+ * for rest of the devices because we are sharing
+ * IOTSB. So in case of failure simply return with
+ * error.
+ */
+ if (err)
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+static void dma_4v_iommu_demap(struct device *dev, unsigned long devhandle,
+ dma_addr_t dvma, unsigned long iotsb_num,
+ unsigned long entry, unsigned long npages)
{
- u32 devhandle = *(u32 *)demap_arg;
unsigned long num, flags;
+ unsigned long ret;
local_irq_save(flags);
do {
- num = pci_sun4v_iommu_demap(devhandle,
- HV_PCI_TSBID(0, entry),
- npages);
-
+ if (dvma <= DMA_BIT_MASK(32)) {
+ num = pci_sun4v_iommu_demap(devhandle,
+ HV_PCI_TSBID(0, entry),
+ npages);
+ } else {
+ ret = pci_sun4v_iotsb_demap(devhandle, iotsb_num,
+ entry, npages, &num);
+ if (unlikely(ret != HV_EOK)) {
+ pr_err_ratelimited("pci_iotsb_demap() failed with error: %ld\n",
+ ret);
+ }
+ }
entry += num;
npages -= num;
} while (npages != 0);
@@ -236,16 +318,28 @@ static void dma_4v_free_coherent(struct device *dev, size_t size, void *cpu,
{
struct pci_pbm_info *pbm;
struct iommu *iommu;
+ struct atu *atu;
+ struct iommu_map_table *tbl;
unsigned long order, npages, entry;
+ unsigned long iotsb_num;
u32 devhandle;
npages = IO_PAGE_ALIGN(size) >> IO_PAGE_SHIFT;
iommu = dev->archdata.iommu;
pbm = dev->archdata.host_controller;
+ atu = iommu->atu;
devhandle = pbm->devhandle;
- entry = ((dvma - iommu->tbl.table_map_base) >> IO_PAGE_SHIFT);
- dma_4v_iommu_demap(&devhandle, entry, npages);
- iommu_tbl_range_free(&iommu->tbl, dvma, npages, IOMMU_ERROR_CODE);
+
+ if (dvma <= DMA_BIT_MASK(32)) {
+ tbl = &iommu->tbl;
+ iotsb_num = 0; /* we don't care for legacy iommu */
+ } else {
+ tbl = &atu->tbl;
+ iotsb_num = atu->iotsb->iotsb_num;
+ }
+ entry = ((dvma - tbl->table_map_base) >> IO_PAGE_SHIFT);
+ dma_4v_iommu_demap(dev, devhandle, dvma, iotsb_num, entry, npages);
+ iommu_tbl_range_free(tbl, dvma, npages, IOMMU_ERROR_CODE);
order = get_order(size);
if (order < 10)
free_pages((unsigned long)cpu, order);
@@ -257,13 +351,17 @@ static dma_addr_t dma_4v_map_page(struct device *dev, struct page *page,
unsigned long attrs)
{
struct iommu *iommu;
+ struct atu *atu;
+ struct iommu_map_table *tbl;
+ u64 mask;
unsigned long flags, npages, oaddr;
unsigned long i, base_paddr;
- u32 bus_addr, ret;
unsigned long prot;
+ dma_addr_t bus_addr, ret;
long entry;
iommu = dev->archdata.iommu;
+ atu = iommu->atu;
if (unlikely(direction == DMA_NONE))
goto bad;
@@ -272,13 +370,19 @@ static dma_addr_t dma_4v_map_page(struct device *dev, struct page *page,
npages = IO_PAGE_ALIGN(oaddr + sz) - (oaddr & IO_PAGE_MASK);
npages >>= IO_PAGE_SHIFT;
- entry = iommu_tbl_range_alloc(dev, &iommu->tbl, npages, NULL,
+ mask = *dev->dma_mask;
+ if (mask <= DMA_BIT_MASK(32))
+ tbl = &iommu->tbl;
+ else
+ tbl = &atu->tbl;
+
+ entry = iommu_tbl_range_alloc(dev, tbl, npages, NULL,
(unsigned long)(-1), 0);
if (unlikely(entry == IOMMU_ERROR_CODE))
goto bad;
- bus_addr = (iommu->tbl.table_map_base + (entry << IO_PAGE_SHIFT));
+ bus_addr = (tbl->table_map_base + (entry << IO_PAGE_SHIFT));
ret = bus_addr | (oaddr & ~IO_PAGE_MASK);
base_paddr = __pa(oaddr & IO_PAGE_MASK);
prot = HV_PCI_MAP_ATTR_READ;
@@ -293,11 +397,11 @@ static dma_addr_t dma_4v_map_page(struct device *dev, struct page *page,
iommu_batch_start(dev, prot, entry);
for (i = 0; i < npages; i++, base_paddr += IO_PAGE_SIZE) {
- long err = iommu_batch_add(base_paddr);
+ long err = iommu_batch_add(base_paddr, mask);
if (unlikely(err < 0L))
goto iommu_map_fail;
}
- if (unlikely(iommu_batch_end() < 0L))
+ if (unlikely(iommu_batch_end(mask) < 0L))
goto iommu_map_fail;
local_irq_restore(flags);
@@ -310,7 +414,7 @@ bad:
return DMA_ERROR_CODE;
iommu_map_fail:
- iommu_tbl_range_free(&iommu->tbl, bus_addr, npages, IOMMU_ERROR_CODE);
+ iommu_tbl_range_free(tbl, bus_addr, npages, IOMMU_ERROR_CODE);
return DMA_ERROR_CODE;
}
@@ -320,7 +424,10 @@ static void dma_4v_unmap_page(struct device *dev, dma_addr_t bus_addr,
{
struct pci_pbm_info *pbm;
struct iommu *iommu;
+ struct atu *atu;
+ struct iommu_map_table *tbl;
unsigned long npages;
+ unsigned long iotsb_num;
long entry;
u32 devhandle;
@@ -332,14 +439,23 @@ static void dma_4v_unmap_page(struct device *dev, dma_addr_t bus_addr,
iommu = dev->archdata.iommu;
pbm = dev->archdata.host_controller;
+ atu = iommu->atu;
devhandle = pbm->devhandle;
npages = IO_PAGE_ALIGN(bus_addr + sz) - (bus_addr & IO_PAGE_MASK);
npages >>= IO_PAGE_SHIFT;
bus_addr &= IO_PAGE_MASK;
- entry = (bus_addr - iommu->tbl.table_map_base) >> IO_PAGE_SHIFT;
- dma_4v_iommu_demap(&devhandle, entry, npages);
- iommu_tbl_range_free(&iommu->tbl, bus_addr, npages, IOMMU_ERROR_CODE);
+
+ if (bus_addr <= DMA_BIT_MASK(32)) {
+ iotsb_num = 0; /* we don't care for legacy iommu */
+ tbl = &iommu->tbl;
+ } else {
+ iotsb_num = atu->iotsb->iotsb_num;
+ tbl = &atu->tbl;
+ }
+ entry = (bus_addr - tbl->table_map_base) >> IO_PAGE_SHIFT;
+ dma_4v_iommu_demap(dev, devhandle, bus_addr, iotsb_num, entry, npages);
+ iommu_tbl_range_free(tbl, bus_addr, npages, IOMMU_ERROR_CODE);
}
static int dma_4v_map_sg(struct device *dev, struct scatterlist *sglist,
@@ -353,12 +469,17 @@ static int dma_4v_map_sg(struct device *dev, struct scatterlist *sglist,
unsigned long seg_boundary_size;
int outcount, incount, i;
struct iommu *iommu;
+ struct atu *atu;
+ struct iommu_map_table *tbl;
+ u64 mask;
unsigned long base_shift;
long err;
BUG_ON(direction == DMA_NONE);
iommu = dev->archdata.iommu;
+ atu = iommu->atu;
+
if (nelems == 0 || !iommu)
return 0;
@@ -384,7 +505,15 @@ static int dma_4v_map_sg(struct device *dev, struct scatterlist *sglist,
max_seg_size = dma_get_max_seg_size(dev);
seg_boundary_size = ALIGN(dma_get_seg_boundary(dev) + 1,
IO_PAGE_SIZE) >> IO_PAGE_SHIFT;
- base_shift = iommu->tbl.table_map_base >> IO_PAGE_SHIFT;
+
+ mask = *dev->dma_mask;
+ if (mask <= DMA_BIT_MASK(32))
+ tbl = &iommu->tbl;
+ else
+ tbl = &atu->tbl;
+
+ base_shift = tbl->table_map_base >> IO_PAGE_SHIFT;
+
for_each_sg(sglist, s, nelems, i) {
unsigned long paddr, npages, entry, out_entry = 0, slen;
@@ -397,27 +526,26 @@ static int dma_4v_map_sg(struct device *dev, struct scatterlist *sglist,
/* Allocate iommu entries for that segment */
paddr = (unsigned long) SG_ENT_PHYS_ADDRESS(s);
npages = iommu_num_pages(paddr, slen, IO_PAGE_SIZE);
- entry = iommu_tbl_range_alloc(dev, &iommu->tbl, npages,
+ entry = iommu_tbl_range_alloc(dev, tbl, npages,
&handle, (unsigned long)(-1), 0);
/* Handle failure */
if (unlikely(entry == IOMMU_ERROR_CODE)) {
- if (printk_ratelimit())
- printk(KERN_INFO "iommu_alloc failed, iommu %p paddr %lx"
- " npages %lx\n", iommu, paddr, npages);
+ pr_err_ratelimited("iommu_alloc failed, iommu %p paddr %lx npages %lx\n",
+ tbl, paddr, npages);
goto iommu_map_failed;
}
- iommu_batch_new_entry(entry);
+ iommu_batch_new_entry(entry, mask);
/* Convert entry to a dma_addr_t */
- dma_addr = iommu->tbl.table_map_base + (entry << IO_PAGE_SHIFT);
+ dma_addr = tbl->table_map_base + (entry << IO_PAGE_SHIFT);
dma_addr |= (s->offset & ~IO_PAGE_MASK);
/* Insert into HW table */
paddr &= IO_PAGE_MASK;
while (npages--) {
- err = iommu_batch_add(paddr);
+ err = iommu_batch_add(paddr, mask);
if (unlikely(err < 0L))
goto iommu_map_failed;
paddr += IO_PAGE_SIZE;
@@ -452,7 +580,7 @@ static int dma_4v_map_sg(struct device *dev, struct scatterlist *sglist,
dma_next = dma_addr + slen;
}
- err = iommu_batch_end();
+ err = iommu_batch_end(mask);
if (unlikely(err < 0L))
goto iommu_map_failed;
@@ -475,7 +603,7 @@ iommu_map_failed:
vaddr = s->dma_address & IO_PAGE_MASK;
npages = iommu_num_pages(s->dma_address, s->dma_length,
IO_PAGE_SIZE);
- iommu_tbl_range_free(&iommu->tbl, vaddr, npages,
+ iommu_tbl_range_free(tbl, vaddr, npages,
IOMMU_ERROR_CODE);
/* XXX demap? XXX */
s->dma_address = DMA_ERROR_CODE;
@@ -496,13 +624,16 @@ static void dma_4v_unmap_sg(struct device *dev, struct scatterlist *sglist,
struct pci_pbm_info *pbm;
struct scatterlist *sg;
struct iommu *iommu;
+ struct atu *atu;
unsigned long flags, entry;
+ unsigned long iotsb_num;
u32 devhandle;
BUG_ON(direction == DMA_NONE);
iommu = dev->archdata.iommu;
pbm = dev->archdata.host_controller;
+ atu = iommu->atu;
devhandle = pbm->devhandle;
local_irq_save(flags);
@@ -512,15 +643,24 @@ static void dma_4v_unmap_sg(struct device *dev, struct scatterlist *sglist,
dma_addr_t dma_handle = sg->dma_address;
unsigned int len = sg->dma_length;
unsigned long npages;
- struct iommu_map_table *tbl = &iommu->tbl;
+ struct iommu_map_table *tbl;
unsigned long shift = IO_PAGE_SHIFT;
if (!len)
break;
npages = iommu_num_pages(dma_handle, len, IO_PAGE_SIZE);
+
+ if (dma_handle <= DMA_BIT_MASK(32)) {
+ iotsb_num = 0; /* we don't care for legacy iommu */
+ tbl = &iommu->tbl;
+ } else {
+ iotsb_num = atu->iotsb->iotsb_num;
+ tbl = &atu->tbl;
+ }
entry = ((dma_handle - tbl->table_map_base) >> shift);
- dma_4v_iommu_demap(&devhandle, entry, npages);
- iommu_tbl_range_free(&iommu->tbl, dma_handle, npages,
+ dma_4v_iommu_demap(dev, devhandle, dma_handle, iotsb_num,
+ entry, npages);
+ iommu_tbl_range_free(tbl, dma_handle, npages,
IOMMU_ERROR_CODE);
sg = sg_next(sg);
}
@@ -581,6 +721,132 @@ static unsigned long probe_existing_entries(struct pci_pbm_info *pbm,
return cnt;
}
+static int pci_sun4v_atu_alloc_iotsb(struct pci_pbm_info *pbm)
+{
+ struct atu *atu = pbm->iommu->atu;
+ struct atu_iotsb *iotsb;
+ void *table;
+ u64 table_size;
+ u64 iotsb_num;
+ unsigned long order;
+ unsigned long err;
+
+ iotsb = kzalloc(sizeof(*iotsb), GFP_KERNEL);
+ if (!iotsb) {
+ err = -ENOMEM;
+ goto out_err;
+ }
+ atu->iotsb = iotsb;
+
+ /* calculate size of IOTSB */
+ table_size = (atu->size / IO_PAGE_SIZE) * 8;
+ order = get_order(table_size);
+ table = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, order);
+ if (!table) {
+ err = -ENOMEM;
+ goto table_failed;
+ }
+ iotsb->table = table;
+ iotsb->ra = __pa(table);
+ iotsb->dvma_size = atu->size;
+ iotsb->dvma_base = atu->base;
+ iotsb->table_size = table_size;
+ iotsb->page_size = IO_PAGE_SIZE;
+
+ /* configure and register IOTSB with HV */
+ err = pci_sun4v_iotsb_conf(pbm->devhandle,
+ iotsb->ra,
+ iotsb->table_size,
+ iotsb->page_size,
+ iotsb->dvma_base,
+ &iotsb_num);
+ if (err) {
+ pr_err(PFX "pci_iotsb_conf failed error: %ld\n", err);
+ goto iotsb_conf_failed;
+ }
+ iotsb->iotsb_num = iotsb_num;
+
+ err = dma_4v_iotsb_bind(pbm->devhandle, iotsb_num, pbm->pci_bus);
+ if (err) {
+ pr_err(PFX "pci_iotsb_bind failed error: %ld\n", err);
+ goto iotsb_conf_failed;
+ }
+
+ return 0;
+
+iotsb_conf_failed:
+ free_pages((unsigned long)table, order);
+table_failed:
+ kfree(iotsb);
+out_err:
+ return err;
+}
+
+static int pci_sun4v_atu_init(struct pci_pbm_info *pbm)
+{
+ struct atu *atu = pbm->iommu->atu;
+ unsigned long err;
+ const u64 *ranges;
+ u64 map_size, num_iotte;
+ u64 dma_mask;
+ const u32 *page_size;
+ int len;
+
+ ranges = of_get_property(pbm->op->dev.of_node, "iommu-address-ranges",
+ &len);
+ if (!ranges) {
+ pr_err(PFX "No iommu-address-ranges\n");
+ return -EINVAL;
+ }
+
+ page_size = of_get_property(pbm->op->dev.of_node, "iommu-pagesizes",
+ NULL);
+ if (!page_size) {
+ pr_err(PFX "No iommu-pagesizes\n");
+ return -EINVAL;
+ }
+
+ /* There are 4 iommu-address-ranges supported. Each range is pair of
+ * {base, size}. The ranges[0] and ranges[1] are 32bit address space
+ * while ranges[2] and ranges[3] are 64bit space. We want to use 64bit
+ * address ranges to support 64bit addressing. Because 'size' for
+ * address ranges[2] and ranges[3] are same we can select either of
+ * ranges[2] or ranges[3] for mapping. However due to 'size' is too
+ * large for OS to allocate IOTSB we are using fix size 32G
+ * (ATU_64_SPACE_SIZE) which is more than enough for all PCIe devices
+ * to share.
+ */
+ atu->ranges = (struct atu_ranges *)ranges;
+ atu->base = atu->ranges[3].base;
+ atu->size = ATU_64_SPACE_SIZE;
+
+ /* Create IOTSB */
+ err = pci_sun4v_atu_alloc_iotsb(pbm);
+ if (err) {
+ pr_err(PFX "Error creating ATU IOTSB\n");
+ return err;
+ }
+
+ /* Create ATU iommu map.
+ * One bit represents one iotte in IOTSB table.
+ */
+ dma_mask = (roundup_pow_of_two(atu->size) - 1UL);
+ num_iotte = atu->size / IO_PAGE_SIZE;
+ map_size = num_iotte / 8;
+ atu->tbl.table_map_base = atu->base;
+ atu->dma_addr_mask = dma_mask;
+ atu->tbl.map = kzalloc(map_size, GFP_KERNEL);
+ if (!atu->tbl.map)
+ return -ENOMEM;
+
+ iommu_tbl_pool_init(&atu->tbl, num_iotte, IO_PAGE_SHIFT,
+ NULL, false /* no large_pool */,
+ 0 /* default npools */,
+ false /* want span boundary checking */);
+
+ return 0;
+}
+
static int pci_sun4v_iommu_init(struct pci_pbm_info *pbm)
{
static const u32 vdma_default[] = { 0x80000000, 0x80000000 };
@@ -918,6 +1184,18 @@ static int pci_sun4v_pbm_init(struct pci_pbm_info *pbm,
pci_sun4v_scan_bus(pbm, &op->dev);
+ /* if atu_init fails its not complete failure.
+ * we can still continue using legacy iommu.
+ */
+ if (pbm->iommu->atu) {
+ err = pci_sun4v_atu_init(pbm);
+ if (err) {
+ kfree(pbm->iommu->atu);
+ pbm->iommu->atu = NULL;
+ pr_err(PFX "ATU init failed, err=%d\n", err);
+ }
+ }
+
pbm->next = pci_pbm_root;
pci_pbm_root = pbm;
@@ -931,8 +1209,10 @@ static int pci_sun4v_probe(struct platform_device *op)
struct pci_pbm_info *pbm;
struct device_node *dp;
struct iommu *iommu;
+ struct atu *atu;
u32 devhandle;
int i, err = -ENODEV;
+ static bool hv_atu = true;
dp = op->dev.of_node;
@@ -954,6 +1234,19 @@ static int pci_sun4v_probe(struct platform_device *op)
pr_info(PFX "Registered hvapi major[%lu] minor[%lu]\n",
vpci_major, vpci_minor);
+ err = sun4v_hvapi_register(HV_GRP_ATU, vatu_major, &vatu_minor);
+ if (err) {
+ /* don't return an error if we fail to register the
+ * ATU group, but ATU hcalls won't be available.
+ */
+ hv_atu = false;
+ pr_err(PFX "Could not register hvapi ATU err=%d\n",
+ err);
+ } else {
+ pr_info(PFX "Registered hvapi ATU major[%lu] minor[%lu]\n",
+ vatu_major, vatu_minor);
+ }
+
dma_ops = &sun4v_dma_ops;
}
@@ -991,6 +1284,14 @@ static int pci_sun4v_probe(struct platform_device *op)
}
pbm->iommu = iommu;
+ iommu->atu = NULL;
+ if (hv_atu) {
+ atu = kzalloc(sizeof(*atu), GFP_KERNEL);
+ if (!atu)
+ pr_err(PFX "Could not allocate atu\n");
+ else
+ iommu->atu = atu;
+ }
err = pci_sun4v_pbm_init(pbm, op, devhandle);
if (err)
@@ -1001,6 +1302,7 @@ static int pci_sun4v_probe(struct platform_device *op)
return 0;
out_free_iommu:
+ kfree(iommu->atu);
kfree(pbm->iommu);
out_free_controller:
diff --git a/arch/sparc/kernel/pci_sun4v.h b/arch/sparc/kernel/pci_sun4v.h
index 5642212390b2..22603a4e48bf 100644
--- a/arch/sparc/kernel/pci_sun4v.h
+++ b/arch/sparc/kernel/pci_sun4v.h
@@ -89,4 +89,25 @@ unsigned long pci_sun4v_msg_setvalid(unsigned long devhandle,
unsigned long msinum,
unsigned long valid);
+/* Sun4v HV IOMMU v2 APIs */
+unsigned long pci_sun4v_iotsb_conf(unsigned long devhandle,
+ unsigned long ra,
+ unsigned long table_size,
+ unsigned long page_size,
+ unsigned long dvma_base,
+ u64 *iotsb_num);
+unsigned long pci_sun4v_iotsb_bind(unsigned long devhandle,
+ unsigned long iotsb_num,
+ unsigned int pci_device);
+unsigned long pci_sun4v_iotsb_map(unsigned long devhandle,
+ unsigned long iotsb_num,
+ unsigned long iotsb_index_iottes,
+ unsigned long io_attributes,
+ unsigned long io_page_list_pa,
+ long *mapped);
+unsigned long pci_sun4v_iotsb_demap(unsigned long devhandle,
+ unsigned long iotsb_num,
+ unsigned long iotsb_index,
+ unsigned long iottes,
+ unsigned long *demapped);
#endif /* !(_PCI_SUN4V_H) */
diff --git a/arch/sparc/kernel/pci_sun4v_asm.S b/arch/sparc/kernel/pci_sun4v_asm.S
index e606d46c6815..578f09657916 100644
--- a/arch/sparc/kernel/pci_sun4v_asm.S
+++ b/arch/sparc/kernel/pci_sun4v_asm.S
@@ -360,3 +360,71 @@ ENTRY(pci_sun4v_msg_setvalid)
mov %o0, %o0
ENDPROC(pci_sun4v_msg_setvalid)
+ /*
+ * %o0: devhandle
+ * %o1: r_addr
+ * %o2: size
+ * %o3: pagesize
+ * %o4: virt
+ * %o5: &iotsb_num/&iotsb_handle
+ *
+ * returns %o0: status
+ * %o1: iotsb_num/iotsb_handle
+ */
+ENTRY(pci_sun4v_iotsb_conf)
+ mov %o5, %g1
+ mov HV_FAST_PCI_IOTSB_CONF, %o5
+ ta HV_FAST_TRAP
+ retl
+ stx %o1, [%g1]
+ENDPROC(pci_sun4v_iotsb_conf)
+
+ /*
+ * %o0: devhandle
+ * %o1: iotsb_num/iotsb_handle
+ * %o2: pci_device
+ *
+ * returns %o0: status
+ */
+ENTRY(pci_sun4v_iotsb_bind)
+ mov HV_FAST_PCI_IOTSB_BIND, %o5
+ ta HV_FAST_TRAP
+ retl
+ nop
+ENDPROC(pci_sun4v_iotsb_bind)
+
+ /*
+ * %o0: devhandle
+ * %o1: iotsb_num/iotsb_handle
+ * %o2: index_count
+ * %o3: iotte_attributes
+ * %o4: io_page_list_p
+ * %o5: &mapped
+ *
+ * returns %o0: status
+ * %o1: #mapped
+ */
+ENTRY(pci_sun4v_iotsb_map)
+ mov %o5, %g1
+ mov HV_FAST_PCI_IOTSB_MAP, %o5
+ ta HV_FAST_TRAP
+ retl
+ stx %o1, [%g1]
+ENDPROC(pci_sun4v_iotsb_map)
+
+ /*
+ * %o0: devhandle
+ * %o1: iotsb_num/iotsb_handle
+ * %o2: iotsb_index
+ * %o3: #iottes
+ * %o4: &demapped
+ *
+ * returns %o0: status
+ * %o1: #demapped
+ */
+ENTRY(pci_sun4v_iotsb_demap)
+ mov HV_FAST_PCI_IOTSB_DEMAP, %o5
+ ta HV_FAST_TRAP
+ retl
+ stx %o1, [%o4]
+ENDPROC(pci_sun4v_iotsb_demap)
diff --git a/arch/sparc/kernel/signal_32.c b/arch/sparc/kernel/signal_32.c
index c3c12efe0bc0..9c0c8fd0b292 100644
--- a/arch/sparc/kernel/signal_32.c
+++ b/arch/sparc/kernel/signal_32.c
@@ -89,7 +89,7 @@ asmlinkage void do_sigreturn(struct pt_regs *regs)
sf = (struct signal_frame __user *) regs->u_regs[UREG_FP];
/* 1. Make sure we are not getting garbage from the user */
- if (!invalid_frame_pointer(sf, sizeof(*sf)))
+ if (invalid_frame_pointer(sf, sizeof(*sf)))
goto segv_and_exit;
if (get_user(ufp, &sf->info.si_regs.u_regs[UREG_FP]))
@@ -150,7 +150,7 @@ asmlinkage void do_rt_sigreturn(struct pt_regs *regs)
synchronize_user_stack();
sf = (struct rt_signal_frame __user *) regs->u_regs[UREG_FP];
- if (!invalid_frame_pointer(sf, sizeof(*sf)))
+ if (invalid_frame_pointer(sf, sizeof(*sf)))
goto segv;
if (get_user(ufp, &sf->regs.u_regs[UREG_FP]))
diff --git a/arch/sparc/kernel/smp_64.c b/arch/sparc/kernel/smp_64.c
index d3035ba6cd31..8182f7caf5b1 100644
--- a/arch/sparc/kernel/smp_64.c
+++ b/arch/sparc/kernel/smp_64.c
@@ -63,9 +63,13 @@ cpumask_t cpu_core_map[NR_CPUS] __read_mostly =
cpumask_t cpu_core_sib_map[NR_CPUS] __read_mostly = {
[0 ... NR_CPUS-1] = CPU_MASK_NONE };
+cpumask_t cpu_core_sib_cache_map[NR_CPUS] __read_mostly = {
+ [0 ... NR_CPUS - 1] = CPU_MASK_NONE };
+
EXPORT_PER_CPU_SYMBOL(cpu_sibling_map);
EXPORT_SYMBOL(cpu_core_map);
EXPORT_SYMBOL(cpu_core_sib_map);
+EXPORT_SYMBOL(cpu_core_sib_cache_map);
static cpumask_t smp_commenced_mask;
@@ -1265,6 +1269,10 @@ void smp_fill_in_sib_core_maps(void)
unsigned int j;
for_each_present_cpu(j) {
+ if (cpu_data(i).max_cache_id ==
+ cpu_data(j).max_cache_id)
+ cpumask_set_cpu(j, &cpu_core_sib_cache_map[i]);
+
if (cpu_data(i).sock_id == cpu_data(j).sock_id)
cpumask_set_cpu(j, &cpu_core_sib_map[i]);
}
diff --git a/arch/sparc/lib/GENcopy_from_user.S b/arch/sparc/lib/GENcopy_from_user.S
index b7d0bd6b1406..69a439fa2fc1 100644
--- a/arch/sparc/lib/GENcopy_from_user.S
+++ b/arch/sparc/lib/GENcopy_from_user.S
@@ -3,11 +3,11 @@
* Copyright (C) 2007 David S. Miller (davem@davemloft.net)
*/
-#define EX_LD(x) \
+#define EX_LD(x,y) \
98: x; \
.section __ex_table,"a";\
.align 4; \
- .word 98b, __retl_one; \
+ .word 98b, y; \
.text; \
.align 4;
diff --git a/arch/sparc/lib/GENcopy_to_user.S b/arch/sparc/lib/GENcopy_to_user.S
index 780550e1afc7..9947427ce354 100644
--- a/arch/sparc/lib/GENcopy_to_user.S
+++ b/arch/sparc/lib/GENcopy_to_user.S
@@ -3,11 +3,11 @@
* Copyright (C) 2007 David S. Miller (davem@davemloft.net)
*/
-#define EX_ST(x) \
+#define EX_ST(x,y) \
98: x; \
.section __ex_table,"a";\
.align 4; \
- .word 98b, __retl_one; \
+ .word 98b, y; \
.text; \
.align 4;
diff --git a/arch/sparc/lib/GENmemcpy.S b/arch/sparc/lib/GENmemcpy.S
index 89358ee94851..059ea24ad73d 100644
--- a/arch/sparc/lib/GENmemcpy.S
+++ b/arch/sparc/lib/GENmemcpy.S
@@ -4,21 +4,18 @@
*/
#ifdef __KERNEL__
+#include <linux/linkage.h>
#define GLOBAL_SPARE %g7
#else
#define GLOBAL_SPARE %g5
#endif
#ifndef EX_LD
-#define EX_LD(x) x
+#define EX_LD(x,y) x
#endif
#ifndef EX_ST
-#define EX_ST(x) x
-#endif
-
-#ifndef EX_RETVAL
-#define EX_RETVAL(x) x
+#define EX_ST(x,y) x
#endif
#ifndef LOAD
@@ -45,6 +42,29 @@
.register %g3,#scratch
.text
+
+#ifndef EX_RETVAL
+#define EX_RETVAL(x) x
+ENTRY(GEN_retl_o4_1)
+ add %o4, %o2, %o4
+ retl
+ add %o4, 1, %o0
+ENDPROC(GEN_retl_o4_1)
+ENTRY(GEN_retl_g1_8)
+ add %g1, %o2, %g1
+ retl
+ add %g1, 8, %o0
+ENDPROC(GEN_retl_g1_8)
+ENTRY(GEN_retl_o2_4)
+ retl
+ add %o2, 4, %o0
+ENDPROC(GEN_retl_o2_4)
+ENTRY(GEN_retl_o2_1)
+ retl
+ add %o2, 1, %o0
+ENDPROC(GEN_retl_o2_1)
+#endif
+
.align 64
.globl FUNC_NAME
@@ -73,8 +93,8 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
sub %g0, %o4, %o4
sub %o2, %o4, %o2
1: subcc %o4, 1, %o4
- EX_LD(LOAD(ldub, %o1, %g1))
- EX_ST(STORE(stb, %g1, %o0))
+ EX_LD(LOAD(ldub, %o1, %g1),GEN_retl_o4_1)
+ EX_ST(STORE(stb, %g1, %o0),GEN_retl_o4_1)
add %o1, 1, %o1
bne,pt %XCC, 1b
add %o0, 1, %o0
@@ -82,8 +102,8 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
andn %o2, 0x7, %g1
sub %o2, %g1, %o2
1: subcc %g1, 0x8, %g1
- EX_LD(LOAD(ldx, %o1, %g2))
- EX_ST(STORE(stx, %g2, %o0))
+ EX_LD(LOAD(ldx, %o1, %g2),GEN_retl_g1_8)
+ EX_ST(STORE(stx, %g2, %o0),GEN_retl_g1_8)
add %o1, 0x8, %o1
bne,pt %XCC, 1b
add %o0, 0x8, %o0
@@ -100,8 +120,8 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
1:
subcc %o2, 4, %o2
- EX_LD(LOAD(lduw, %o1, %g1))
- EX_ST(STORE(stw, %g1, %o1 + %o3))
+ EX_LD(LOAD(lduw, %o1, %g1),GEN_retl_o2_4)
+ EX_ST(STORE(stw, %g1, %o1 + %o3),GEN_retl_o2_4)
bgu,pt %XCC, 1b
add %o1, 4, %o1
@@ -111,8 +131,8 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
.align 32
90:
subcc %o2, 1, %o2
- EX_LD(LOAD(ldub, %o1, %g1))
- EX_ST(STORE(stb, %g1, %o1 + %o3))
+ EX_LD(LOAD(ldub, %o1, %g1),GEN_retl_o2_1)
+ EX_ST(STORE(stb, %g1, %o1 + %o3),GEN_retl_o2_1)
bgu,pt %XCC, 90b
add %o1, 1, %o1
retl
diff --git a/arch/sparc/lib/Makefile b/arch/sparc/lib/Makefile
index 885f00e81d1a..69912d2f8b54 100644
--- a/arch/sparc/lib/Makefile
+++ b/arch/sparc/lib/Makefile
@@ -38,7 +38,7 @@ lib-$(CONFIG_SPARC64) += NG4patch.o NG4copy_page.o NG4clear_page.o NG4memset.o
lib-$(CONFIG_SPARC64) += GENmemcpy.o GENcopy_from_user.o GENcopy_to_user.o
lib-$(CONFIG_SPARC64) += GENpatch.o GENpage.o GENbzero.o
-lib-$(CONFIG_SPARC64) += copy_in_user.o user_fixup.o memmove.o
+lib-$(CONFIG_SPARC64) += copy_in_user.o memmove.o
lib-$(CONFIG_SPARC64) += mcount.o ipcsum.o xor.o hweight.o ffs.o
obj-$(CONFIG_SPARC64) += iomap.o
diff --git a/arch/sparc/lib/NG2copy_from_user.S b/arch/sparc/lib/NG2copy_from_user.S
index d5242b8c4f94..b79a6998d87c 100644
--- a/arch/sparc/lib/NG2copy_from_user.S
+++ b/arch/sparc/lib/NG2copy_from_user.S
@@ -3,19 +3,19 @@
* Copyright (C) 2007 David S. Miller (davem@davemloft.net)
*/
-#define EX_LD(x) \
+#define EX_LD(x,y) \
98: x; \
.section __ex_table,"a";\
.align 4; \
- .word 98b, __retl_one_asi;\
+ .word 98b, y; \
.text; \
.align 4;
-#define EX_LD_FP(x) \
+#define EX_LD_FP(x,y) \
98: x; \
.section __ex_table,"a";\
.align 4; \
- .word 98b, __retl_one_asi_fp;\
+ .word 98b, y##_fp; \
.text; \
.align 4;
diff --git a/arch/sparc/lib/NG2copy_to_user.S b/arch/sparc/lib/NG2copy_to_user.S
index 4e962d993b10..dcec55f254ab 100644
--- a/arch/sparc/lib/NG2copy_to_user.S
+++ b/arch/sparc/lib/NG2copy_to_user.S
@@ -3,19 +3,19 @@
* Copyright (C) 2007 David S. Miller (davem@davemloft.net)
*/
-#define EX_ST(x) \
+#define EX_ST(x,y) \
98: x; \
.section __ex_table,"a";\
.align 4; \
- .word 98b, __retl_one_asi;\
+ .word 98b, y; \
.text; \
.align 4;
-#define EX_ST_FP(x) \
+#define EX_ST_FP(x,y) \
98: x; \
.section __ex_table,"a";\
.align 4; \
- .word 98b, __retl_one_asi_fp;\
+ .word 98b, y##_fp; \
.text; \
.align 4;
diff --git a/arch/sparc/lib/NG2memcpy.S b/arch/sparc/lib/NG2memcpy.S
index d5f585df2f3f..c629dbd121b6 100644
--- a/arch/sparc/lib/NG2memcpy.S
+++ b/arch/sparc/lib/NG2memcpy.S
@@ -4,6 +4,7 @@
*/
#ifdef __KERNEL__
+#include <linux/linkage.h>
#include <asm/visasm.h>
#include <asm/asi.h>
#define GLOBAL_SPARE %g7
@@ -32,21 +33,17 @@
#endif
#ifndef EX_LD
-#define EX_LD(x) x
+#define EX_LD(x,y) x
#endif
#ifndef EX_LD_FP
-#define EX_LD_FP(x) x
+#define EX_LD_FP(x,y) x
#endif
#ifndef EX_ST
-#define EX_ST(x) x
+#define EX_ST(x,y) x
#endif
#ifndef EX_ST_FP
-#define EX_ST_FP(x) x
-#endif
-
-#ifndef EX_RETVAL
-#define EX_RETVAL(x) x
+#define EX_ST_FP(x,y) x
#endif
#ifndef LOAD
@@ -140,45 +137,110 @@
fsrc2 %x6, %f12; \
fsrc2 %x7, %f14;
#define FREG_LOAD_1(base, x0) \
- EX_LD_FP(LOAD(ldd, base + 0x00, %x0))
+ EX_LD_FP(LOAD(ldd, base + 0x00, %x0), NG2_retl_o2_plus_g1)
#define FREG_LOAD_2(base, x0, x1) \
- EX_LD_FP(LOAD(ldd, base + 0x00, %x0)); \
- EX_LD_FP(LOAD(ldd, base + 0x08, %x1));
+ EX_LD_FP(LOAD(ldd, base + 0x00, %x0), NG2_retl_o2_plus_g1); \
+ EX_LD_FP(LOAD(ldd, base + 0x08, %x1), NG2_retl_o2_plus_g1);
#define FREG_LOAD_3(base, x0, x1, x2) \
- EX_LD_FP(LOAD(ldd, base + 0x00, %x0)); \
- EX_LD_FP(LOAD(ldd, base + 0x08, %x1)); \
- EX_LD_FP(LOAD(ldd, base + 0x10, %x2));
+ EX_LD_FP(LOAD(ldd, base + 0x00, %x0), NG2_retl_o2_plus_g1); \
+ EX_LD_FP(LOAD(ldd, base + 0x08, %x1), NG2_retl_o2_plus_g1); \
+ EX_LD_FP(LOAD(ldd, base + 0x10, %x2), NG2_retl_o2_plus_g1);
#define FREG_LOAD_4(base, x0, x1, x2, x3) \
- EX_LD_FP(LOAD(ldd, base + 0x00, %x0)); \
- EX_LD_FP(LOAD(ldd, base + 0x08, %x1)); \
- EX_LD_FP(LOAD(ldd, base + 0x10, %x2)); \
- EX_LD_FP(LOAD(ldd, base + 0x18, %x3));
+ EX_LD_FP(LOAD(ldd, base + 0x00, %x0), NG2_retl_o2_plus_g1); \
+ EX_LD_FP(LOAD(ldd, base + 0x08, %x1), NG2_retl_o2_plus_g1); \
+ EX_LD_FP(LOAD(ldd, base + 0x10, %x2), NG2_retl_o2_plus_g1); \
+ EX_LD_FP(LOAD(ldd, base + 0x18, %x3), NG2_retl_o2_plus_g1);
#define FREG_LOAD_5(base, x0, x1, x2, x3, x4) \
- EX_LD_FP(LOAD(ldd, base + 0x00, %x0)); \
- EX_LD_FP(LOAD(ldd, base + 0x08, %x1)); \
- EX_LD_FP(LOAD(ldd, base + 0x10, %x2)); \
- EX_LD_FP(LOAD(ldd, base + 0x18, %x3)); \
- EX_LD_FP(LOAD(ldd, base + 0x20, %x4));
+ EX_LD_FP(LOAD(ldd, base + 0x00, %x0), NG2_retl_o2_plus_g1); \
+ EX_LD_FP(LOAD(ldd, base + 0x08, %x1), NG2_retl_o2_plus_g1); \
+ EX_LD_FP(LOAD(ldd, base + 0x10, %x2), NG2_retl_o2_plus_g1); \
+ EX_LD_FP(LOAD(ldd, base + 0x18, %x3), NG2_retl_o2_plus_g1); \
+ EX_LD_FP(LOAD(ldd, base + 0x20, %x4), NG2_retl_o2_plus_g1);
#define FREG_LOAD_6(base, x0, x1, x2, x3, x4, x5) \
- EX_LD_FP(LOAD(ldd, base + 0x00, %x0)); \
- EX_LD_FP(LOAD(ldd, base + 0x08, %x1)); \
- EX_LD_FP(LOAD(ldd, base + 0x10, %x2)); \
- EX_LD_FP(LOAD(ldd, base + 0x18, %x3)); \
- EX_LD_FP(LOAD(ldd, base + 0x20, %x4)); \
- EX_LD_FP(LOAD(ldd, base + 0x28, %x5));
+ EX_LD_FP(LOAD(ldd, base + 0x00, %x0), NG2_retl_o2_plus_g1); \
+ EX_LD_FP(LOAD(ldd, base + 0x08, %x1), NG2_retl_o2_plus_g1); \
+ EX_LD_FP(LOAD(ldd, base + 0x10, %x2), NG2_retl_o2_plus_g1); \
+ EX_LD_FP(LOAD(ldd, base + 0x18, %x3), NG2_retl_o2_plus_g1); \
+ EX_LD_FP(LOAD(ldd, base + 0x20, %x4), NG2_retl_o2_plus_g1); \
+ EX_LD_FP(LOAD(ldd, base + 0x28, %x5), NG2_retl_o2_plus_g1);
#define FREG_LOAD_7(base, x0, x1, x2, x3, x4, x5, x6) \
- EX_LD_FP(LOAD(ldd, base + 0x00, %x0)); \
- EX_LD_FP(LOAD(ldd, base + 0x08, %x1)); \
- EX_LD_FP(LOAD(ldd, base + 0x10, %x2)); \
- EX_LD_FP(LOAD(ldd, base + 0x18, %x3)); \
- EX_LD_FP(LOAD(ldd, base + 0x20, %x4)); \
- EX_LD_FP(LOAD(ldd, base + 0x28, %x5)); \
- EX_LD_FP(LOAD(ldd, base + 0x30, %x6));
+ EX_LD_FP(LOAD(ldd, base + 0x00, %x0), NG2_retl_o2_plus_g1); \
+ EX_LD_FP(LOAD(ldd, base + 0x08, %x1), NG2_retl_o2_plus_g1); \
+ EX_LD_FP(LOAD(ldd, base + 0x10, %x2), NG2_retl_o2_plus_g1); \
+ EX_LD_FP(LOAD(ldd, base + 0x18, %x3), NG2_retl_o2_plus_g1); \
+ EX_LD_FP(LOAD(ldd, base + 0x20, %x4), NG2_retl_o2_plus_g1); \
+ EX_LD_FP(LOAD(ldd, base + 0x28, %x5), NG2_retl_o2_plus_g1); \
+ EX_LD_FP(LOAD(ldd, base + 0x30, %x6), NG2_retl_o2_plus_g1);
.register %g2,#scratch
.register %g3,#scratch
.text
+#ifndef EX_RETVAL
+#define EX_RETVAL(x) x
+__restore_fp:
+ VISExitHalf
+__restore_asi:
+ retl
+ wr %g0, ASI_AIUS, %asi
+ENTRY(NG2_retl_o2)
+ ba,pt %xcc, __restore_asi
+ mov %o2, %o0
+ENDPROC(NG2_retl_o2)
+ENTRY(NG2_retl_o2_plus_1)
+ ba,pt %xcc, __restore_asi
+ add %o2, 1, %o0
+ENDPROC(NG2_retl_o2_plus_1)
+ENTRY(NG2_retl_o2_plus_4)
+ ba,pt %xcc, __restore_asi
+ add %o2, 4, %o0
+ENDPROC(NG2_retl_o2_plus_4)
+ENTRY(NG2_retl_o2_plus_8)
+ ba,pt %xcc, __restore_asi
+ add %o2, 8, %o0
+ENDPROC(NG2_retl_o2_plus_8)
+ENTRY(NG2_retl_o2_plus_o4_plus_1)
+ add %o4, 1, %o4
+ ba,pt %xcc, __restore_asi
+ add %o2, %o4, %o0
+ENDPROC(NG2_retl_o2_plus_o4_plus_1)
+ENTRY(NG2_retl_o2_plus_o4_plus_8)
+ add %o4, 8, %o4
+ ba,pt %xcc, __restore_asi
+ add %o2, %o4, %o0
+ENDPROC(NG2_retl_o2_plus_o4_plus_8)
+ENTRY(NG2_retl_o2_plus_o4_plus_16)
+ add %o4, 16, %o4
+ ba,pt %xcc, __restore_asi
+ add %o2, %o4, %o0
+ENDPROC(NG2_retl_o2_plus_o4_plus_16)
+ENTRY(NG2_retl_o2_plus_g1_fp)
+ ba,pt %xcc, __restore_fp
+ add %o2, %g1, %o0
+ENDPROC(NG2_retl_o2_plus_g1_fp)
+ENTRY(NG2_retl_o2_plus_g1_plus_64_fp)
+ add %g1, 64, %g1
+ ba,pt %xcc, __restore_fp
+ add %o2, %g1, %o0
+ENDPROC(NG2_retl_o2_plus_g1_plus_64_fp)
+ENTRY(NG2_retl_o2_plus_g1_plus_1)
+ add %g1, 1, %g1
+ ba,pt %xcc, __restore_asi
+ add %o2, %g1, %o0
+ENDPROC(NG2_retl_o2_plus_g1_plus_1)
+ENTRY(NG2_retl_o2_and_7_plus_o4)
+ and %o2, 7, %o2
+ ba,pt %xcc, __restore_asi
+ add %o2, %o4, %o0
+ENDPROC(NG2_retl_o2_and_7_plus_o4)
+ENTRY(NG2_retl_o2_and_7_plus_o4_plus_8)
+ and %o2, 7, %o2
+ add %o4, 8, %o4
+ ba,pt %xcc, __restore_asi
+ add %o2, %o4, %o0
+ENDPROC(NG2_retl_o2_and_7_plus_o4_plus_8)
+#endif
+
.align 64
.globl FUNC_NAME
@@ -230,8 +292,8 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
sub %g0, %o4, %o4 ! bytes to align dst
sub %o2, %o4, %o2
1: subcc %o4, 1, %o4
- EX_LD(LOAD(ldub, %o1, %g1))
- EX_ST(STORE(stb, %g1, %o0))
+ EX_LD(LOAD(ldub, %o1, %g1), NG2_retl_o2_plus_o4_plus_1)
+ EX_ST(STORE(stb, %g1, %o0), NG2_retl_o2_plus_o4_plus_1)
add %o1, 1, %o1
bne,pt %XCC, 1b
add %o0, 1, %o0
@@ -281,11 +343,11 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
nop
/* fall through for 0 < low bits < 8 */
110: sub %o4, 64, %g2
- EX_LD_FP(LOAD_BLK(%g2, %f0))
-1: EX_ST_FP(STORE_INIT(%g0, %o4 + %g3))
- EX_LD_FP(LOAD_BLK(%o4, %f16))
+ EX_LD_FP(LOAD_BLK(%g2, %f0), NG2_retl_o2_plus_g1)
+1: EX_ST_FP(STORE_INIT(%g0, %o4 + %g3), NG2_retl_o2_plus_g1)
+ EX_LD_FP(LOAD_BLK(%o4, %f16), NG2_retl_o2_plus_g1)
FREG_FROB(f0, f2, f4, f6, f8, f10, f12, f14, f16)
- EX_ST_FP(STORE_BLK(%f0, %o4 + %g3))
+ EX_ST_FP(STORE_BLK(%f0, %o4 + %g3), NG2_retl_o2_plus_g1)
FREG_MOVE_8(f16, f18, f20, f22, f24, f26, f28, f30)
subcc %g1, 64, %g1
add %o4, 64, %o4
@@ -296,10 +358,10 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
120: sub %o4, 56, %g2
FREG_LOAD_7(%g2, f0, f2, f4, f6, f8, f10, f12)
-1: EX_ST_FP(STORE_INIT(%g0, %o4 + %g3))
- EX_LD_FP(LOAD_BLK(%o4, %f16))
+1: EX_ST_FP(STORE_INIT(%g0, %o4 + %g3), NG2_retl_o2_plus_g1)
+ EX_LD_FP(LOAD_BLK(%o4, %f16), NG2_retl_o2_plus_g1)
FREG_FROB(f0, f2, f4, f6, f8, f10, f12, f16, f18)
- EX_ST_FP(STORE_BLK(%f0, %o4 + %g3))
+ EX_ST_FP(STORE_BLK(%f0, %o4 + %g3), NG2_retl_o2_plus_g1)
FREG_MOVE_7(f18, f20, f22, f24, f26, f28, f30)
subcc %g1, 64, %g1
add %o4, 64, %o4
@@ -310,10 +372,10 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
130: sub %o4, 48, %g2
FREG_LOAD_6(%g2, f0, f2, f4, f6, f8, f10)
-1: EX_ST_FP(STORE_INIT(%g0, %o4 + %g3))
- EX_LD_FP(LOAD_BLK(%o4, %f16))
+1: EX_ST_FP(STORE_INIT(%g0, %o4 + %g3), NG2_retl_o2_plus_g1)
+ EX_LD_FP(LOAD_BLK(%o4, %f16), NG2_retl_o2_plus_g1)
FREG_FROB(f0, f2, f4, f6, f8, f10, f16, f18, f20)
- EX_ST_FP(STORE_BLK(%f0, %o4 + %g3))
+ EX_ST_FP(STORE_BLK(%f0, %o4 + %g3), NG2_retl_o2_plus_g1)
FREG_MOVE_6(f20, f22, f24, f26, f28, f30)
subcc %g1, 64, %g1
add %o4, 64, %o4
@@ -324,10 +386,10 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
140: sub %o4, 40, %g2
FREG_LOAD_5(%g2, f0, f2, f4, f6, f8)
-1: EX_ST_FP(STORE_INIT(%g0, %o4 + %g3))
- EX_LD_FP(LOAD_BLK(%o4, %f16))
+1: EX_ST_FP(STORE_INIT(%g0, %o4 + %g3), NG2_retl_o2_plus_g1)
+ EX_LD_FP(LOAD_BLK(%o4, %f16), NG2_retl_o2_plus_g1)
FREG_FROB(f0, f2, f4, f6, f8, f16, f18, f20, f22)
- EX_ST_FP(STORE_BLK(%f0, %o4 + %g3))
+ EX_ST_FP(STORE_BLK(%f0, %o4 + %g3), NG2_retl_o2_plus_g1)
FREG_MOVE_5(f22, f24, f26, f28, f30)
subcc %g1, 64, %g1
add %o4, 64, %o4
@@ -338,10 +400,10 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
150: sub %o4, 32, %g2
FREG_LOAD_4(%g2, f0, f2, f4, f6)
-1: EX_ST_FP(STORE_INIT(%g0, %o4 + %g3))
- EX_LD_FP(LOAD_BLK(%o4, %f16))
+1: EX_ST_FP(STORE_INIT(%g0, %o4 + %g3), NG2_retl_o2_plus_g1)
+ EX_LD_FP(LOAD_BLK(%o4, %f16), NG2_retl_o2_plus_g1)
FREG_FROB(f0, f2, f4, f6, f16, f18, f20, f22, f24)
- EX_ST_FP(STORE_BLK(%f0, %o4 + %g3))
+ EX_ST_FP(STORE_BLK(%f0, %o4 + %g3), NG2_retl_o2_plus_g1)
FREG_MOVE_4(f24, f26, f28, f30)
subcc %g1, 64, %g1
add %o4, 64, %o4
@@ -352,10 +414,10 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
160: sub %o4, 24, %g2
FREG_LOAD_3(%g2, f0, f2, f4)
-1: EX_ST_FP(STORE_INIT(%g0, %o4 + %g3))
- EX_LD_FP(LOAD_BLK(%o4, %f16))
+1: EX_ST_FP(STORE_INIT(%g0, %o4 + %g3), NG2_retl_o2_plus_g1)
+ EX_LD_FP(LOAD_BLK(%o4, %f16), NG2_retl_o2_plus_g1)
FREG_FROB(f0, f2, f4, f16, f18, f20, f22, f24, f26)
- EX_ST_FP(STORE_BLK(%f0, %o4 + %g3))
+ EX_ST_FP(STORE_BLK(%f0, %o4 + %g3), NG2_retl_o2_plus_g1)
FREG_MOVE_3(f26, f28, f30)
subcc %g1, 64, %g1
add %o4, 64, %o4
@@ -366,10 +428,10 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
170: sub %o4, 16, %g2
FREG_LOAD_2(%g2, f0, f2)
-1: EX_ST_FP(STORE_INIT(%g0, %o4 + %g3))
- EX_LD_FP(LOAD_BLK(%o4, %f16))
+1: EX_ST_FP(STORE_INIT(%g0, %o4 + %g3), NG2_retl_o2_plus_g1)
+ EX_LD_FP(LOAD_BLK(%o4, %f16), NG2_retl_o2_plus_g1)
FREG_FROB(f0, f2, f16, f18, f20, f22, f24, f26, f28)
- EX_ST_FP(STORE_BLK(%f0, %o4 + %g3))
+ EX_ST_FP(STORE_BLK(%f0, %o4 + %g3), NG2_retl_o2_plus_g1)
FREG_MOVE_2(f28, f30)
subcc %g1, 64, %g1
add %o4, 64, %o4
@@ -380,10 +442,10 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
180: sub %o4, 8, %g2
FREG_LOAD_1(%g2, f0)
-1: EX_ST_FP(STORE_INIT(%g0, %o4 + %g3))
- EX_LD_FP(LOAD_BLK(%o4, %f16))
+1: EX_ST_FP(STORE_INIT(%g0, %o4 + %g3), NG2_retl_o2_plus_g1)
+ EX_LD_FP(LOAD_BLK(%o4, %f16), NG2_retl_o2_plus_g1)
FREG_FROB(f0, f16, f18, f20, f22, f24, f26, f28, f30)
- EX_ST_FP(STORE_BLK(%f0, %o4 + %g3))
+ EX_ST_FP(STORE_BLK(%f0, %o4 + %g3), NG2_retl_o2_plus_g1)
FREG_MOVE_1(f30)
subcc %g1, 64, %g1
add %o4, 64, %o4
@@ -393,10 +455,10 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
nop
190:
-1: EX_ST_FP(STORE_INIT(%g0, %o4 + %g3))
+1: EX_ST_FP(STORE_INIT(%g0, %o4 + %g3), NG2_retl_o2_plus_g1)
subcc %g1, 64, %g1
- EX_LD_FP(LOAD_BLK(%o4, %f0))
- EX_ST_FP(STORE_BLK(%f0, %o4 + %g3))
+ EX_LD_FP(LOAD_BLK(%o4, %f0), NG2_retl_o2_plus_g1_plus_64)
+ EX_ST_FP(STORE_BLK(%f0, %o4 + %g3), NG2_retl_o2_plus_g1_plus_64)
add %o4, 64, %o4
bne,pt %xcc, 1b
LOAD(prefetch, %o4 + 64, #one_read)
@@ -423,28 +485,28 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
andn %o2, 0xf, %o4
and %o2, 0xf, %o2
1: subcc %o4, 0x10, %o4
- EX_LD(LOAD(ldx, %o1, %o5))
+ EX_LD(LOAD(ldx, %o1, %o5), NG2_retl_o2_plus_o4_plus_16)
add %o1, 0x08, %o1
- EX_LD(LOAD(ldx, %o1, %g1))
+ EX_LD(LOAD(ldx, %o1, %g1), NG2_retl_o2_plus_o4_plus_16)
sub %o1, 0x08, %o1
- EX_ST(STORE(stx, %o5, %o1 + GLOBAL_SPARE))
+ EX_ST(STORE(stx, %o5, %o1 + GLOBAL_SPARE), NG2_retl_o2_plus_o4_plus_16)
add %o1, 0x8, %o1
- EX_ST(STORE(stx, %g1, %o1 + GLOBAL_SPARE))
+ EX_ST(STORE(stx, %g1, %o1 + GLOBAL_SPARE), NG2_retl_o2_plus_o4_plus_8)
bgu,pt %XCC, 1b
add %o1, 0x8, %o1
73: andcc %o2, 0x8, %g0
be,pt %XCC, 1f
nop
sub %o2, 0x8, %o2
- EX_LD(LOAD(ldx, %o1, %o5))
- EX_ST(STORE(stx, %o5, %o1 + GLOBAL_SPARE))
+ EX_LD(LOAD(ldx, %o1, %o5), NG2_retl_o2_plus_8)
+ EX_ST(STORE(stx, %o5, %o1 + GLOBAL_SPARE), NG2_retl_o2_plus_8)
add %o1, 0x8, %o1
1: andcc %o2, 0x4, %g0
be,pt %XCC, 1f
nop
sub %o2, 0x4, %o2
- EX_LD(LOAD(lduw, %o1, %o5))
- EX_ST(STORE(stw, %o5, %o1 + GLOBAL_SPARE))
+ EX_LD(LOAD(lduw, %o1, %o5), NG2_retl_o2_plus_4)
+ EX_ST(STORE(stw, %o5, %o1 + GLOBAL_SPARE), NG2_retl_o2_plus_4)
add %o1, 0x4, %o1
1: cmp %o2, 0
be,pt %XCC, 85f
@@ -460,8 +522,8 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
sub %o2, %g1, %o2
1: subcc %g1, 1, %g1
- EX_LD(LOAD(ldub, %o1, %o5))
- EX_ST(STORE(stb, %o5, %o1 + GLOBAL_SPARE))
+ EX_LD(LOAD(ldub, %o1, %o5), NG2_retl_o2_plus_g1_plus_1)
+ EX_ST(STORE(stb, %o5, %o1 + GLOBAL_SPARE), NG2_retl_o2_plus_g1_plus_1)
bgu,pt %icc, 1b
add %o1, 1, %o1
@@ -477,16 +539,16 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
8: mov 64, GLOBAL_SPARE
andn %o1, 0x7, %o1
- EX_LD(LOAD(ldx, %o1, %g2))
+ EX_LD(LOAD(ldx, %o1, %g2), NG2_retl_o2)
sub GLOBAL_SPARE, %g1, GLOBAL_SPARE
andn %o2, 0x7, %o4
sllx %g2, %g1, %g2
1: add %o1, 0x8, %o1
- EX_LD(LOAD(ldx, %o1, %g3))
+ EX_LD(LOAD(ldx, %o1, %g3), NG2_retl_o2_and_7_plus_o4)
subcc %o4, 0x8, %o4
srlx %g3, GLOBAL_SPARE, %o5
or %o5, %g2, %o5
- EX_ST(STORE(stx, %o5, %o0))
+ EX_ST(STORE(stx, %o5, %o0), NG2_retl_o2_and_7_plus_o4_plus_8)
add %o0, 0x8, %o0
bgu,pt %icc, 1b
sllx %g3, %g1, %g2
@@ -506,8 +568,8 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
1:
subcc %o2, 4, %o2
- EX_LD(LOAD(lduw, %o1, %g1))
- EX_ST(STORE(stw, %g1, %o1 + GLOBAL_SPARE))
+ EX_LD(LOAD(lduw, %o1, %g1), NG2_retl_o2_plus_4)
+ EX_ST(STORE(stw, %g1, %o1 + GLOBAL_SPARE), NG2_retl_o2_plus_4)
bgu,pt %XCC, 1b
add %o1, 4, %o1
@@ -517,8 +579,8 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
.align 32
90:
subcc %o2, 1, %o2
- EX_LD(LOAD(ldub, %o1, %g1))
- EX_ST(STORE(stb, %g1, %o1 + GLOBAL_SPARE))
+ EX_LD(LOAD(ldub, %o1, %g1), NG2_retl_o2_plus_1)
+ EX_ST(STORE(stb, %g1, %o1 + GLOBAL_SPARE), NG2_retl_o2_plus_1)
bgu,pt %XCC, 90b
add %o1, 1, %o1
retl
diff --git a/arch/sparc/lib/NG4copy_from_user.S b/arch/sparc/lib/NG4copy_from_user.S
index 2e8ee7ad07a9..16a286c1a528 100644
--- a/arch/sparc/lib/NG4copy_from_user.S
+++ b/arch/sparc/lib/NG4copy_from_user.S
@@ -3,19 +3,19 @@
* Copyright (C) 2012 David S. Miller (davem@davemloft.net)
*/
-#define EX_LD(x) \
+#define EX_LD(x, y) \
98: x; \
.section __ex_table,"a";\
.align 4; \
- .word 98b, __retl_one_asi;\
+ .word 98b, y; \
.text; \
.align 4;
-#define EX_LD_FP(x) \
+#define EX_LD_FP(x,y) \
98: x; \
.section __ex_table,"a";\
.align 4; \
- .word 98b, __retl_one_asi_fp;\
+ .word 98b, y##_fp; \
.text; \
.align 4;
diff --git a/arch/sparc/lib/NG4copy_to_user.S b/arch/sparc/lib/NG4copy_to_user.S
index be0bf4590df8..6b0276ffc858 100644
--- a/arch/sparc/lib/NG4copy_to_user.S
+++ b/arch/sparc/lib/NG4copy_to_user.S
@@ -3,19 +3,19 @@
* Copyright (C) 2012 David S. Miller (davem@davemloft.net)
*/
-#define EX_ST(x) \
+#define EX_ST(x,y) \
98: x; \
.section __ex_table,"a";\
.align 4; \
- .word 98b, __retl_one_asi;\
+ .word 98b, y; \
.text; \
.align 4;
-#define EX_ST_FP(x) \
+#define EX_ST_FP(x,y) \
98: x; \
.section __ex_table,"a";\
.align 4; \
- .word 98b, __retl_one_asi_fp;\
+ .word 98b, y##_fp; \
.text; \
.align 4;
diff --git a/arch/sparc/lib/NG4memcpy.S b/arch/sparc/lib/NG4memcpy.S
index 8e13ee1f4454..75bb93b1437f 100644
--- a/arch/sparc/lib/NG4memcpy.S
+++ b/arch/sparc/lib/NG4memcpy.S
@@ -4,6 +4,7 @@
*/
#ifdef __KERNEL__
+#include <linux/linkage.h>
#include <asm/visasm.h>
#include <asm/asi.h>
#define GLOBAL_SPARE %g7
@@ -46,22 +47,19 @@
#endif
#ifndef EX_LD
-#define EX_LD(x) x
+#define EX_LD(x,y) x
#endif
#ifndef EX_LD_FP
-#define EX_LD_FP(x) x
+#define EX_LD_FP(x,y) x
#endif
#ifndef EX_ST
-#define EX_ST(x) x
+#define EX_ST(x,y) x
#endif
#ifndef EX_ST_FP
-#define EX_ST_FP(x) x
+#define EX_ST_FP(x,y) x
#endif
-#ifndef EX_RETVAL
-#define EX_RETVAL(x) x
-#endif
#ifndef LOAD
#define LOAD(type,addr,dest) type [addr], dest
@@ -94,6 +92,158 @@
.register %g3,#scratch
.text
+#ifndef EX_RETVAL
+#define EX_RETVAL(x) x
+__restore_asi_fp:
+ VISExitHalf
+__restore_asi:
+ retl
+ wr %g0, ASI_AIUS, %asi
+
+ENTRY(NG4_retl_o2)
+ ba,pt %xcc, __restore_asi
+ mov %o2, %o0
+ENDPROC(NG4_retl_o2)
+ENTRY(NG4_retl_o2_plus_1)
+ ba,pt %xcc, __restore_asi
+ add %o2, 1, %o0
+ENDPROC(NG4_retl_o2_plus_1)
+ENTRY(NG4_retl_o2_plus_4)
+ ba,pt %xcc, __restore_asi
+ add %o2, 4, %o0
+ENDPROC(NG4_retl_o2_plus_4)
+ENTRY(NG4_retl_o2_plus_o5)
+ ba,pt %xcc, __restore_asi
+ add %o2, %o5, %o0
+ENDPROC(NG4_retl_o2_plus_o5)
+ENTRY(NG4_retl_o2_plus_o5_plus_4)
+ add %o5, 4, %o5
+ ba,pt %xcc, __restore_asi
+ add %o2, %o5, %o0
+ENDPROC(NG4_retl_o2_plus_o5_plus_4)
+ENTRY(NG4_retl_o2_plus_o5_plus_8)
+ add %o5, 8, %o5
+ ba,pt %xcc, __restore_asi
+ add %o2, %o5, %o0
+ENDPROC(NG4_retl_o2_plus_o5_plus_8)
+ENTRY(NG4_retl_o2_plus_o5_plus_16)
+ add %o5, 16, %o5
+ ba,pt %xcc, __restore_asi
+ add %o2, %o5, %o0
+ENDPROC(NG4_retl_o2_plus_o5_plus_16)
+ENTRY(NG4_retl_o2_plus_o5_plus_24)
+ add %o5, 24, %o5
+ ba,pt %xcc, __restore_asi
+ add %o2, %o5, %o0
+ENDPROC(NG4_retl_o2_plus_o5_plus_24)
+ENTRY(NG4_retl_o2_plus_o5_plus_32)
+ add %o5, 32, %o5
+ ba,pt %xcc, __restore_asi
+ add %o2, %o5, %o0
+ENDPROC(NG4_retl_o2_plus_o5_plus_32)
+ENTRY(NG4_retl_o2_plus_g1)
+ ba,pt %xcc, __restore_asi
+ add %o2, %g1, %o0
+ENDPROC(NG4_retl_o2_plus_g1)
+ENTRY(NG4_retl_o2_plus_g1_plus_1)
+ add %g1, 1, %g1
+ ba,pt %xcc, __restore_asi
+ add %o2, %g1, %o0
+ENDPROC(NG4_retl_o2_plus_g1_plus_1)
+ENTRY(NG4_retl_o2_plus_g1_plus_8)
+ add %g1, 8, %g1
+ ba,pt %xcc, __restore_asi
+ add %o2, %g1, %o0
+ENDPROC(NG4_retl_o2_plus_g1_plus_8)
+ENTRY(NG4_retl_o2_plus_o4)
+ ba,pt %xcc, __restore_asi
+ add %o2, %o4, %o0
+ENDPROC(NG4_retl_o2_plus_o4)
+ENTRY(NG4_retl_o2_plus_o4_plus_8)
+ add %o4, 8, %o4
+ ba,pt %xcc, __restore_asi
+ add %o2, %o4, %o0
+ENDPROC(NG4_retl_o2_plus_o4_plus_8)
+ENTRY(NG4_retl_o2_plus_o4_plus_16)
+ add %o4, 16, %o4
+ ba,pt %xcc, __restore_asi
+ add %o2, %o4, %o0
+ENDPROC(NG4_retl_o2_plus_o4_plus_16)
+ENTRY(NG4_retl_o2_plus_o4_plus_24)
+ add %o4, 24, %o4
+ ba,pt %xcc, __restore_asi
+ add %o2, %o4, %o0
+ENDPROC(NG4_retl_o2_plus_o4_plus_24)
+ENTRY(NG4_retl_o2_plus_o4_plus_32)
+ add %o4, 32, %o4
+ ba,pt %xcc, __restore_asi
+ add %o2, %o4, %o0
+ENDPROC(NG4_retl_o2_plus_o4_plus_32)
+ENTRY(NG4_retl_o2_plus_o4_plus_40)
+ add %o4, 40, %o4
+ ba,pt %xcc, __restore_asi
+ add %o2, %o4, %o0
+ENDPROC(NG4_retl_o2_plus_o4_plus_40)
+ENTRY(NG4_retl_o2_plus_o4_plus_48)
+ add %o4, 48, %o4
+ ba,pt %xcc, __restore_asi
+ add %o2, %o4, %o0
+ENDPROC(NG4_retl_o2_plus_o4_plus_48)
+ENTRY(NG4_retl_o2_plus_o4_plus_56)
+ add %o4, 56, %o4
+ ba,pt %xcc, __restore_asi
+ add %o2, %o4, %o0
+ENDPROC(NG4_retl_o2_plus_o4_plus_56)
+ENTRY(NG4_retl_o2_plus_o4_plus_64)
+ add %o4, 64, %o4
+ ba,pt %xcc, __restore_asi
+ add %o2, %o4, %o0
+ENDPROC(NG4_retl_o2_plus_o4_plus_64)
+ENTRY(NG4_retl_o2_plus_o4_fp)
+ ba,pt %xcc, __restore_asi_fp
+ add %o2, %o4, %o0
+ENDPROC(NG4_retl_o2_plus_o4_fp)
+ENTRY(NG4_retl_o2_plus_o4_plus_8_fp)
+ add %o4, 8, %o4
+ ba,pt %xcc, __restore_asi_fp
+ add %o2, %o4, %o0
+ENDPROC(NG4_retl_o2_plus_o4_plus_8_fp)
+ENTRY(NG4_retl_o2_plus_o4_plus_16_fp)
+ add %o4, 16, %o4
+ ba,pt %xcc, __restore_asi_fp
+ add %o2, %o4, %o0
+ENDPROC(NG4_retl_o2_plus_o4_plus_16_fp)
+ENTRY(NG4_retl_o2_plus_o4_plus_24_fp)
+ add %o4, 24, %o4
+ ba,pt %xcc, __restore_asi_fp
+ add %o2, %o4, %o0
+ENDPROC(NG4_retl_o2_plus_o4_plus_24_fp)
+ENTRY(NG4_retl_o2_plus_o4_plus_32_fp)
+ add %o4, 32, %o4
+ ba,pt %xcc, __restore_asi_fp
+ add %o2, %o4, %o0
+ENDPROC(NG4_retl_o2_plus_o4_plus_32_fp)
+ENTRY(NG4_retl_o2_plus_o4_plus_40_fp)
+ add %o4, 40, %o4
+ ba,pt %xcc, __restore_asi_fp
+ add %o2, %o4, %o0
+ENDPROC(NG4_retl_o2_plus_o4_plus_40_fp)
+ENTRY(NG4_retl_o2_plus_o4_plus_48_fp)
+ add %o4, 48, %o4
+ ba,pt %xcc, __restore_asi_fp
+ add %o2, %o4, %o0
+ENDPROC(NG4_retl_o2_plus_o4_plus_48_fp)
+ENTRY(NG4_retl_o2_plus_o4_plus_56_fp)
+ add %o4, 56, %o4
+ ba,pt %xcc, __restore_asi_fp
+ add %o2, %o4, %o0
+ENDPROC(NG4_retl_o2_plus_o4_plus_56_fp)
+ENTRY(NG4_retl_o2_plus_o4_plus_64_fp)
+ add %o4, 64, %o4
+ ba,pt %xcc, __restore_asi_fp
+ add %o2, %o4, %o0
+ENDPROC(NG4_retl_o2_plus_o4_plus_64_fp)
+#endif
.align 64
.globl FUNC_NAME
@@ -124,12 +274,13 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
brz,pt %g1, 51f
sub %o2, %g1, %o2
-1: EX_LD(LOAD(ldub, %o1 + 0x00, %g2))
+
+1: EX_LD(LOAD(ldub, %o1 + 0x00, %g2), NG4_retl_o2_plus_g1)
add %o1, 1, %o1
subcc %g1, 1, %g1
add %o0, 1, %o0
bne,pt %icc, 1b
- EX_ST(STORE(stb, %g2, %o0 - 0x01))
+ EX_ST(STORE(stb, %g2, %o0 - 0x01), NG4_retl_o2_plus_g1_plus_1)
51: LOAD(prefetch, %o1 + 0x040, #n_reads_strong)
LOAD(prefetch, %o1 + 0x080, #n_reads_strong)
@@ -154,43 +305,43 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
brz,pt %g1, .Llarge_aligned
sub %o2, %g1, %o2
-1: EX_LD(LOAD(ldx, %o1 + 0x00, %g2))
+1: EX_LD(LOAD(ldx, %o1 + 0x00, %g2), NG4_retl_o2_plus_g1)
add %o1, 8, %o1
subcc %g1, 8, %g1
add %o0, 8, %o0
bne,pt %icc, 1b
- EX_ST(STORE(stx, %g2, %o0 - 0x08))
+ EX_ST(STORE(stx, %g2, %o0 - 0x08), NG4_retl_o2_plus_g1_plus_8)
.Llarge_aligned:
/* len >= 0x80 && src 8-byte aligned && dest 8-byte aligned */
andn %o2, 0x3f, %o4
sub %o2, %o4, %o2
-1: EX_LD(LOAD(ldx, %o1 + 0x00, %g1))
+1: EX_LD(LOAD(ldx, %o1 + 0x00, %g1), NG4_retl_o2_plus_o4)
add %o1, 0x40, %o1
- EX_LD(LOAD(ldx, %o1 - 0x38, %g2))
+ EX_LD(LOAD(ldx, %o1 - 0x38, %g2), NG4_retl_o2_plus_o4)
subcc %o4, 0x40, %o4
- EX_LD(LOAD(ldx, %o1 - 0x30, %g3))
- EX_LD(LOAD(ldx, %o1 - 0x28, GLOBAL_SPARE))
- EX_LD(LOAD(ldx, %o1 - 0x20, %o5))
- EX_ST(STORE_INIT(%g1, %o0))
+ EX_LD(LOAD(ldx, %o1 - 0x30, %g3), NG4_retl_o2_plus_o4_plus_64)
+ EX_LD(LOAD(ldx, %o1 - 0x28, GLOBAL_SPARE), NG4_retl_o2_plus_o4_plus_64)
+ EX_LD(LOAD(ldx, %o1 - 0x20, %o5), NG4_retl_o2_plus_o4_plus_64)
+ EX_ST(STORE_INIT(%g1, %o0), NG4_retl_o2_plus_o4_plus_64)
add %o0, 0x08, %o0
- EX_ST(STORE_INIT(%g2, %o0))
+ EX_ST(STORE_INIT(%g2, %o0), NG4_retl_o2_plus_o4_plus_56)
add %o0, 0x08, %o0
- EX_LD(LOAD(ldx, %o1 - 0x18, %g2))
- EX_ST(STORE_INIT(%g3, %o0))
+ EX_LD(LOAD(ldx, %o1 - 0x18, %g2), NG4_retl_o2_plus_o4_plus_48)
+ EX_ST(STORE_INIT(%g3, %o0), NG4_retl_o2_plus_o4_plus_48)
add %o0, 0x08, %o0
- EX_LD(LOAD(ldx, %o1 - 0x10, %g3))
- EX_ST(STORE_INIT(GLOBAL_SPARE, %o0))
+ EX_LD(LOAD(ldx, %o1 - 0x10, %g3), NG4_retl_o2_plus_o4_plus_40)
+ EX_ST(STORE_INIT(GLOBAL_SPARE, %o0), NG4_retl_o2_plus_o4_plus_40)
add %o0, 0x08, %o0
- EX_LD(LOAD(ldx, %o1 - 0x08, GLOBAL_SPARE))
- EX_ST(STORE_INIT(%o5, %o0))
+ EX_LD(LOAD(ldx, %o1 - 0x08, GLOBAL_SPARE), NG4_retl_o2_plus_o4_plus_32)
+ EX_ST(STORE_INIT(%o5, %o0), NG4_retl_o2_plus_o4_plus_32)
add %o0, 0x08, %o0
- EX_ST(STORE_INIT(%g2, %o0))
+ EX_ST(STORE_INIT(%g2, %o0), NG4_retl_o2_plus_o4_plus_24)
add %o0, 0x08, %o0
- EX_ST(STORE_INIT(%g3, %o0))
+ EX_ST(STORE_INIT(%g3, %o0), NG4_retl_o2_plus_o4_plus_16)
add %o0, 0x08, %o0
- EX_ST(STORE_INIT(GLOBAL_SPARE, %o0))
+ EX_ST(STORE_INIT(GLOBAL_SPARE, %o0), NG4_retl_o2_plus_o4_plus_8)
add %o0, 0x08, %o0
bne,pt %icc, 1b
LOAD(prefetch, %o1 + 0x200, #n_reads_strong)
@@ -216,17 +367,17 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
sub %o2, %o4, %o2
alignaddr %o1, %g0, %g1
add %o1, %o4, %o1
- EX_LD_FP(LOAD(ldd, %g1 + 0x00, %f0))
-1: EX_LD_FP(LOAD(ldd, %g1 + 0x08, %f2))
+ EX_LD_FP(LOAD(ldd, %g1 + 0x00, %f0), NG4_retl_o2_plus_o4)
+1: EX_LD_FP(LOAD(ldd, %g1 + 0x08, %f2), NG4_retl_o2_plus_o4)
subcc %o4, 0x40, %o4
- EX_LD_FP(LOAD(ldd, %g1 + 0x10, %f4))
- EX_LD_FP(LOAD(ldd, %g1 + 0x18, %f6))
- EX_LD_FP(LOAD(ldd, %g1 + 0x20, %f8))
- EX_LD_FP(LOAD(ldd, %g1 + 0x28, %f10))
- EX_LD_FP(LOAD(ldd, %g1 + 0x30, %f12))
- EX_LD_FP(LOAD(ldd, %g1 + 0x38, %f14))
+ EX_LD_FP(LOAD(ldd, %g1 + 0x10, %f4), NG4_retl_o2_plus_o4_plus_64)
+ EX_LD_FP(LOAD(ldd, %g1 + 0x18, %f6), NG4_retl_o2_plus_o4_plus_64)
+ EX_LD_FP(LOAD(ldd, %g1 + 0x20, %f8), NG4_retl_o2_plus_o4_plus_64)
+ EX_LD_FP(LOAD(ldd, %g1 + 0x28, %f10), NG4_retl_o2_plus_o4_plus_64)
+ EX_LD_FP(LOAD(ldd, %g1 + 0x30, %f12), NG4_retl_o2_plus_o4_plus_64)
+ EX_LD_FP(LOAD(ldd, %g1 + 0x38, %f14), NG4_retl_o2_plus_o4_plus_64)
faligndata %f0, %f2, %f16
- EX_LD_FP(LOAD(ldd, %g1 + 0x40, %f0))
+ EX_LD_FP(LOAD(ldd, %g1 + 0x40, %f0), NG4_retl_o2_plus_o4_plus_64)
faligndata %f2, %f4, %f18
add %g1, 0x40, %g1
faligndata %f4, %f6, %f20
@@ -235,14 +386,14 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
faligndata %f10, %f12, %f26
faligndata %f12, %f14, %f28
faligndata %f14, %f0, %f30
- EX_ST_FP(STORE(std, %f16, %o0 + 0x00))
- EX_ST_FP(STORE(std, %f18, %o0 + 0x08))
- EX_ST_FP(STORE(std, %f20, %o0 + 0x10))
- EX_ST_FP(STORE(std, %f22, %o0 + 0x18))
- EX_ST_FP(STORE(std, %f24, %o0 + 0x20))
- EX_ST_FP(STORE(std, %f26, %o0 + 0x28))
- EX_ST_FP(STORE(std, %f28, %o0 + 0x30))
- EX_ST_FP(STORE(std, %f30, %o0 + 0x38))
+ EX_ST_FP(STORE(std, %f16, %o0 + 0x00), NG4_retl_o2_plus_o4_plus_64)
+ EX_ST_FP(STORE(std, %f18, %o0 + 0x08), NG4_retl_o2_plus_o4_plus_56)
+ EX_ST_FP(STORE(std, %f20, %o0 + 0x10), NG4_retl_o2_plus_o4_plus_48)
+ EX_ST_FP(STORE(std, %f22, %o0 + 0x18), NG4_retl_o2_plus_o4_plus_40)
+ EX_ST_FP(STORE(std, %f24, %o0 + 0x20), NG4_retl_o2_plus_o4_plus_32)
+ EX_ST_FP(STORE(std, %f26, %o0 + 0x28), NG4_retl_o2_plus_o4_plus_24)
+ EX_ST_FP(STORE(std, %f28, %o0 + 0x30), NG4_retl_o2_plus_o4_plus_16)
+ EX_ST_FP(STORE(std, %f30, %o0 + 0x38), NG4_retl_o2_plus_o4_plus_8)
add %o0, 0x40, %o0
bne,pt %icc, 1b
LOAD(prefetch, %g1 + 0x200, #n_reads_strong)
@@ -270,37 +421,38 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
andncc %o2, 0x20 - 1, %o5
be,pn %icc, 2f
sub %o2, %o5, %o2
-1: EX_LD(LOAD(ldx, %o1 + 0x00, %g1))
- EX_LD(LOAD(ldx, %o1 + 0x08, %g2))
- EX_LD(LOAD(ldx, %o1 + 0x10, GLOBAL_SPARE))
- EX_LD(LOAD(ldx, %o1 + 0x18, %o4))
+1: EX_LD(LOAD(ldx, %o1 + 0x00, %g1), NG4_retl_o2_plus_o5)
+ EX_LD(LOAD(ldx, %o1 + 0x08, %g2), NG4_retl_o2_plus_o5)
+ EX_LD(LOAD(ldx, %o1 + 0x10, GLOBAL_SPARE), NG4_retl_o2_plus_o5)
+ EX_LD(LOAD(ldx, %o1 + 0x18, %o4), NG4_retl_o2_plus_o5)
add %o1, 0x20, %o1
subcc %o5, 0x20, %o5
- EX_ST(STORE(stx, %g1, %o0 + 0x00))
- EX_ST(STORE(stx, %g2, %o0 + 0x08))
- EX_ST(STORE(stx, GLOBAL_SPARE, %o0 + 0x10))
- EX_ST(STORE(stx, %o4, %o0 + 0x18))
+ EX_ST(STORE(stx, %g1, %o0 + 0x00), NG4_retl_o2_plus_o5_plus_32)
+ EX_ST(STORE(stx, %g2, %o0 + 0x08), NG4_retl_o2_plus_o5_plus_24)
+ EX_ST(STORE(stx, GLOBAL_SPARE, %o0 + 0x10), NG4_retl_o2_plus_o5_plus_24)
+ EX_ST(STORE(stx, %o4, %o0 + 0x18), NG4_retl_o2_plus_o5_plus_8)
bne,pt %icc, 1b
add %o0, 0x20, %o0
2: andcc %o2, 0x18, %o5
be,pt %icc, 3f
sub %o2, %o5, %o2
-1: EX_LD(LOAD(ldx, %o1 + 0x00, %g1))
+
+1: EX_LD(LOAD(ldx, %o1 + 0x00, %g1), NG4_retl_o2_plus_o5)
add %o1, 0x08, %o1
add %o0, 0x08, %o0
subcc %o5, 0x08, %o5
bne,pt %icc, 1b
- EX_ST(STORE(stx, %g1, %o0 - 0x08))
+ EX_ST(STORE(stx, %g1, %o0 - 0x08), NG4_retl_o2_plus_o5_plus_8)
3: brz,pt %o2, .Lexit
cmp %o2, 0x04
bl,pn %icc, .Ltiny
nop
- EX_LD(LOAD(lduw, %o1 + 0x00, %g1))
+ EX_LD(LOAD(lduw, %o1 + 0x00, %g1), NG4_retl_o2)
add %o1, 0x04, %o1
add %o0, 0x04, %o0
subcc %o2, 0x04, %o2
bne,pn %icc, .Ltiny
- EX_ST(STORE(stw, %g1, %o0 - 0x04))
+ EX_ST(STORE(stw, %g1, %o0 - 0x04), NG4_retl_o2_plus_4)
ba,a,pt %icc, .Lexit
.Lmedium_unaligned:
/* First get dest 8 byte aligned. */
@@ -309,12 +461,12 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
brz,pt %g1, 2f
sub %o2, %g1, %o2
-1: EX_LD(LOAD(ldub, %o1 + 0x00, %g2))
+1: EX_LD(LOAD(ldub, %o1 + 0x00, %g2), NG4_retl_o2_plus_g1)
add %o1, 1, %o1
subcc %g1, 1, %g1
add %o0, 1, %o0
bne,pt %icc, 1b
- EX_ST(STORE(stb, %g2, %o0 - 0x01))
+ EX_ST(STORE(stb, %g2, %o0 - 0x01), NG4_retl_o2_plus_g1_plus_1)
2:
and %o1, 0x7, %g1
brz,pn %g1, .Lmedium_noprefetch
@@ -322,16 +474,16 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
mov 64, %g2
sub %g2, %g1, %g2
andn %o1, 0x7, %o1
- EX_LD(LOAD(ldx, %o1 + 0x00, %o4))
+ EX_LD(LOAD(ldx, %o1 + 0x00, %o4), NG4_retl_o2)
sllx %o4, %g1, %o4
andn %o2, 0x08 - 1, %o5
sub %o2, %o5, %o2
-1: EX_LD(LOAD(ldx, %o1 + 0x08, %g3))
+1: EX_LD(LOAD(ldx, %o1 + 0x08, %g3), NG4_retl_o2_plus_o5)
add %o1, 0x08, %o1
subcc %o5, 0x08, %o5
srlx %g3, %g2, GLOBAL_SPARE
or GLOBAL_SPARE, %o4, GLOBAL_SPARE
- EX_ST(STORE(stx, GLOBAL_SPARE, %o0 + 0x00))
+ EX_ST(STORE(stx, GLOBAL_SPARE, %o0 + 0x00), NG4_retl_o2_plus_o5_plus_8)
add %o0, 0x08, %o0
bne,pt %icc, 1b
sllx %g3, %g1, %o4
@@ -342,17 +494,17 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
ba,pt %icc, .Lsmall_unaligned
.Ltiny:
- EX_LD(LOAD(ldub, %o1 + 0x00, %g1))
+ EX_LD(LOAD(ldub, %o1 + 0x00, %g1), NG4_retl_o2)
subcc %o2, 1, %o2
be,pn %icc, .Lexit
- EX_ST(STORE(stb, %g1, %o0 + 0x00))
- EX_LD(LOAD(ldub, %o1 + 0x01, %g1))
+ EX_ST(STORE(stb, %g1, %o0 + 0x00), NG4_retl_o2_plus_1)
+ EX_LD(LOAD(ldub, %o1 + 0x01, %g1), NG4_retl_o2)
subcc %o2, 1, %o2
be,pn %icc, .Lexit
- EX_ST(STORE(stb, %g1, %o0 + 0x01))
- EX_LD(LOAD(ldub, %o1 + 0x02, %g1))
+ EX_ST(STORE(stb, %g1, %o0 + 0x01), NG4_retl_o2_plus_1)
+ EX_LD(LOAD(ldub, %o1 + 0x02, %g1), NG4_retl_o2)
ba,pt %icc, .Lexit
- EX_ST(STORE(stb, %g1, %o0 + 0x02))
+ EX_ST(STORE(stb, %g1, %o0 + 0x02), NG4_retl_o2)
.Lsmall:
andcc %g2, 0x3, %g0
@@ -360,22 +512,22 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
andn %o2, 0x4 - 1, %o5
sub %o2, %o5, %o2
1:
- EX_LD(LOAD(lduw, %o1 + 0x00, %g1))
+ EX_LD(LOAD(lduw, %o1 + 0x00, %g1), NG4_retl_o2_plus_o5)
add %o1, 0x04, %o1
subcc %o5, 0x04, %o5
add %o0, 0x04, %o0
bne,pt %icc, 1b
- EX_ST(STORE(stw, %g1, %o0 - 0x04))
+ EX_ST(STORE(stw, %g1, %o0 - 0x04), NG4_retl_o2_plus_o5_plus_4)
brz,pt %o2, .Lexit
nop
ba,a,pt %icc, .Ltiny
.Lsmall_unaligned:
-1: EX_LD(LOAD(ldub, %o1 + 0x00, %g1))
+1: EX_LD(LOAD(ldub, %o1 + 0x00, %g1), NG4_retl_o2)
add %o1, 1, %o1
add %o0, 1, %o0
subcc %o2, 1, %o2
bne,pt %icc, 1b
- EX_ST(STORE(stb, %g1, %o0 - 0x01))
+ EX_ST(STORE(stb, %g1, %o0 - 0x01), NG4_retl_o2_plus_1)
ba,a,pt %icc, .Lexit
.size FUNC_NAME, .-FUNC_NAME
diff --git a/arch/sparc/lib/NGcopy_from_user.S b/arch/sparc/lib/NGcopy_from_user.S
index 5d1e4d1ac21e..9cd42fcbc781 100644
--- a/arch/sparc/lib/NGcopy_from_user.S
+++ b/arch/sparc/lib/NGcopy_from_user.S
@@ -3,11 +3,11 @@
* Copyright (C) 2006, 2007 David S. Miller (davem@davemloft.net)
*/
-#define EX_LD(x) \
+#define EX_LD(x,y) \
98: x; \
.section __ex_table,"a";\
.align 4; \
- .word 98b, __ret_one_asi;\
+ .word 98b, y; \
.text; \
.align 4;
diff --git a/arch/sparc/lib/NGcopy_to_user.S b/arch/sparc/lib/NGcopy_to_user.S
index ff630dcb273c..5c358afd464e 100644
--- a/arch/sparc/lib/NGcopy_to_user.S
+++ b/arch/sparc/lib/NGcopy_to_user.S
@@ -3,11 +3,11 @@
* Copyright (C) 2006, 2007 David S. Miller (davem@davemloft.net)
*/
-#define EX_ST(x) \
+#define EX_ST(x,y) \
98: x; \
.section __ex_table,"a";\
.align 4; \
- .word 98b, __ret_one_asi;\
+ .word 98b, y; \
.text; \
.align 4;
diff --git a/arch/sparc/lib/NGmemcpy.S b/arch/sparc/lib/NGmemcpy.S
index 96a14caf6966..d88c4ed50a00 100644
--- a/arch/sparc/lib/NGmemcpy.S
+++ b/arch/sparc/lib/NGmemcpy.S
@@ -4,6 +4,7 @@
*/
#ifdef __KERNEL__
+#include <linux/linkage.h>
#include <asm/asi.h>
#include <asm/thread_info.h>
#define GLOBAL_SPARE %g7
@@ -27,15 +28,11 @@
#endif
#ifndef EX_LD
-#define EX_LD(x) x
+#define EX_LD(x,y) x
#endif
#ifndef EX_ST
-#define EX_ST(x) x
-#endif
-
-#ifndef EX_RETVAL
-#define EX_RETVAL(x) x
+#define EX_ST(x,y) x
#endif
#ifndef LOAD
@@ -79,6 +76,92 @@
.register %g3,#scratch
.text
+#ifndef EX_RETVAL
+#define EX_RETVAL(x) x
+__restore_asi:
+ ret
+ wr %g0, ASI_AIUS, %asi
+ restore
+ENTRY(NG_ret_i2_plus_i4_plus_1)
+ ba,pt %xcc, __restore_asi
+ add %i2, %i5, %i0
+ENDPROC(NG_ret_i2_plus_i4_plus_1)
+ENTRY(NG_ret_i2_plus_g1)
+ ba,pt %xcc, __restore_asi
+ add %i2, %g1, %i0
+ENDPROC(NG_ret_i2_plus_g1)
+ENTRY(NG_ret_i2_plus_g1_minus_8)
+ sub %g1, 8, %g1
+ ba,pt %xcc, __restore_asi
+ add %i2, %g1, %i0
+ENDPROC(NG_ret_i2_plus_g1_minus_8)
+ENTRY(NG_ret_i2_plus_g1_minus_16)
+ sub %g1, 16, %g1
+ ba,pt %xcc, __restore_asi
+ add %i2, %g1, %i0
+ENDPROC(NG_ret_i2_plus_g1_minus_16)
+ENTRY(NG_ret_i2_plus_g1_minus_24)
+ sub %g1, 24, %g1
+ ba,pt %xcc, __restore_asi
+ add %i2, %g1, %i0
+ENDPROC(NG_ret_i2_plus_g1_minus_24)
+ENTRY(NG_ret_i2_plus_g1_minus_32)
+ sub %g1, 32, %g1
+ ba,pt %xcc, __restore_asi
+ add %i2, %g1, %i0
+ENDPROC(NG_ret_i2_plus_g1_minus_32)
+ENTRY(NG_ret_i2_plus_g1_minus_40)
+ sub %g1, 40, %g1
+ ba,pt %xcc, __restore_asi
+ add %i2, %g1, %i0
+ENDPROC(NG_ret_i2_plus_g1_minus_40)
+ENTRY(NG_ret_i2_plus_g1_minus_48)
+ sub %g1, 48, %g1
+ ba,pt %xcc, __restore_asi
+ add %i2, %g1, %i0
+ENDPROC(NG_ret_i2_plus_g1_minus_48)
+ENTRY(NG_ret_i2_plus_g1_minus_56)
+ sub %g1, 56, %g1
+ ba,pt %xcc, __restore_asi
+ add %i2, %g1, %i0
+ENDPROC(NG_ret_i2_plus_g1_minus_56)
+ENTRY(NG_ret_i2_plus_i4)
+ ba,pt %xcc, __restore_asi
+ add %i2, %i4, %i0
+ENDPROC(NG_ret_i2_plus_i4)
+ENTRY(NG_ret_i2_plus_i4_minus_8)
+ sub %i4, 8, %i4
+ ba,pt %xcc, __restore_asi
+ add %i2, %i4, %i0
+ENDPROC(NG_ret_i2_plus_i4_minus_8)
+ENTRY(NG_ret_i2_plus_8)
+ ba,pt %xcc, __restore_asi
+ add %i2, 8, %i0
+ENDPROC(NG_ret_i2_plus_8)
+ENTRY(NG_ret_i2_plus_4)
+ ba,pt %xcc, __restore_asi
+ add %i2, 4, %i0
+ENDPROC(NG_ret_i2_plus_4)
+ENTRY(NG_ret_i2_plus_1)
+ ba,pt %xcc, __restore_asi
+ add %i2, 1, %i0
+ENDPROC(NG_ret_i2_plus_1)
+ENTRY(NG_ret_i2_plus_g1_plus_1)
+ add %g1, 1, %g1
+ ba,pt %xcc, __restore_asi
+ add %i2, %g1, %i0
+ENDPROC(NG_ret_i2_plus_g1_plus_1)
+ENTRY(NG_ret_i2)
+ ba,pt %xcc, __restore_asi
+ mov %i2, %i0
+ENDPROC(NG_ret_i2)
+ENTRY(NG_ret_i2_and_7_plus_i4)
+ and %i2, 7, %i2
+ ba,pt %xcc, __restore_asi
+ add %i2, %i4, %i0
+ENDPROC(NG_ret_i2_and_7_plus_i4)
+#endif
+
.align 64
.globl FUNC_NAME
@@ -126,8 +209,8 @@ FUNC_NAME: /* %i0=dst, %i1=src, %i2=len */
sub %g0, %i4, %i4 ! bytes to align dst
sub %i2, %i4, %i2
1: subcc %i4, 1, %i4
- EX_LD(LOAD(ldub, %i1, %g1))
- EX_ST(STORE(stb, %g1, %o0))
+ EX_LD(LOAD(ldub, %i1, %g1), NG_ret_i2_plus_i4_plus_1)
+ EX_ST(STORE(stb, %g1, %o0), NG_ret_i2_plus_i4_plus_1)
add %i1, 1, %i1
bne,pt %XCC, 1b
add %o0, 1, %o0
@@ -160,7 +243,7 @@ FUNC_NAME: /* %i0=dst, %i1=src, %i2=len */
and %i4, 0x7, GLOBAL_SPARE
sll GLOBAL_SPARE, 3, GLOBAL_SPARE
mov 64, %i5
- EX_LD(LOAD_TWIN(%i1, %g2, %g3))
+ EX_LD(LOAD_TWIN(%i1, %g2, %g3), NG_ret_i2_plus_g1)
sub %i5, GLOBAL_SPARE, %i5
mov 16, %o4
mov 32, %o5
@@ -178,31 +261,31 @@ FUNC_NAME: /* %i0=dst, %i1=src, %i2=len */
srlx WORD3, PRE_SHIFT, TMP; \
or WORD2, TMP, WORD2;
-8: EX_LD(LOAD_TWIN(%i1 + %o4, %o2, %o3))
+8: EX_LD(LOAD_TWIN(%i1 + %o4, %o2, %o3), NG_ret_i2_plus_g1)
MIX_THREE_WORDS(%g2, %g3, %o2, %i5, GLOBAL_SPARE, %o1)
LOAD(prefetch, %i1 + %i3, #one_read)
- EX_ST(STORE_INIT(%g2, %o0 + 0x00))
- EX_ST(STORE_INIT(%g3, %o0 + 0x08))
+ EX_ST(STORE_INIT(%g2, %o0 + 0x00), NG_ret_i2_plus_g1)
+ EX_ST(STORE_INIT(%g3, %o0 + 0x08), NG_ret_i2_plus_g1_minus_8)
- EX_LD(LOAD_TWIN(%i1 + %o5, %g2, %g3))
+ EX_LD(LOAD_TWIN(%i1 + %o5, %g2, %g3), NG_ret_i2_plus_g1_minus_16)
MIX_THREE_WORDS(%o2, %o3, %g2, %i5, GLOBAL_SPARE, %o1)
- EX_ST(STORE_INIT(%o2, %o0 + 0x10))
- EX_ST(STORE_INIT(%o3, %o0 + 0x18))
+ EX_ST(STORE_INIT(%o2, %o0 + 0x10), NG_ret_i2_plus_g1_minus_16)
+ EX_ST(STORE_INIT(%o3, %o0 + 0x18), NG_ret_i2_plus_g1_minus_24)
- EX_LD(LOAD_TWIN(%i1 + %o7, %o2, %o3))
+ EX_LD(LOAD_TWIN(%i1 + %o7, %o2, %o3), NG_ret_i2_plus_g1_minus_32)
MIX_THREE_WORDS(%g2, %g3, %o2, %i5, GLOBAL_SPARE, %o1)
- EX_ST(STORE_INIT(%g2, %o0 + 0x20))
- EX_ST(STORE_INIT(%g3, %o0 + 0x28))
+ EX_ST(STORE_INIT(%g2, %o0 + 0x20), NG_ret_i2_plus_g1_minus_32)
+ EX_ST(STORE_INIT(%g3, %o0 + 0x28), NG_ret_i2_plus_g1_minus_40)
- EX_LD(LOAD_TWIN(%i1 + %i3, %g2, %g3))
+ EX_LD(LOAD_TWIN(%i1 + %i3, %g2, %g3), NG_ret_i2_plus_g1_minus_48)
add %i1, 64, %i1
MIX_THREE_WORDS(%o2, %o3, %g2, %i5, GLOBAL_SPARE, %o1)
- EX_ST(STORE_INIT(%o2, %o0 + 0x30))
- EX_ST(STORE_INIT(%o3, %o0 + 0x38))
+ EX_ST(STORE_INIT(%o2, %o0 + 0x30), NG_ret_i2_plus_g1_minus_48)
+ EX_ST(STORE_INIT(%o3, %o0 + 0x38), NG_ret_i2_plus_g1_minus_56)
subcc %g1, 64, %g1
bne,pt %XCC, 8b
@@ -211,31 +294,31 @@ FUNC_NAME: /* %i0=dst, %i1=src, %i2=len */
ba,pt %XCC, 60f
add %i1, %i4, %i1
-9: EX_LD(LOAD_TWIN(%i1 + %o4, %o2, %o3))
+9: EX_LD(LOAD_TWIN(%i1 + %o4, %o2, %o3), NG_ret_i2_plus_g1)
MIX_THREE_WORDS(%g3, %o2, %o3, %i5, GLOBAL_SPARE, %o1)
LOAD(prefetch, %i1 + %i3, #one_read)
- EX_ST(STORE_INIT(%g3, %o0 + 0x00))
- EX_ST(STORE_INIT(%o2, %o0 + 0x08))
+ EX_ST(STORE_INIT(%g3, %o0 + 0x00), NG_ret_i2_plus_g1)
+ EX_ST(STORE_INIT(%o2, %o0 + 0x08), NG_ret_i2_plus_g1_minus_8)
- EX_LD(LOAD_TWIN(%i1 + %o5, %g2, %g3))
+ EX_LD(LOAD_TWIN(%i1 + %o5, %g2, %g3), NG_ret_i2_plus_g1_minus_16)
MIX_THREE_WORDS(%o3, %g2, %g3, %i5, GLOBAL_SPARE, %o1)
- EX_ST(STORE_INIT(%o3, %o0 + 0x10))
- EX_ST(STORE_INIT(%g2, %o0 + 0x18))
+ EX_ST(STORE_INIT(%o3, %o0 + 0x10), NG_ret_i2_plus_g1_minus_16)
+ EX_ST(STORE_INIT(%g2, %o0 + 0x18), NG_ret_i2_plus_g1_minus_24)
- EX_LD(LOAD_TWIN(%i1 + %o7, %o2, %o3))
+ EX_LD(LOAD_TWIN(%i1 + %o7, %o2, %o3), NG_ret_i2_plus_g1_minus_32)
MIX_THREE_WORDS(%g3, %o2, %o3, %i5, GLOBAL_SPARE, %o1)
- EX_ST(STORE_INIT(%g3, %o0 + 0x20))
- EX_ST(STORE_INIT(%o2, %o0 + 0x28))
+ EX_ST(STORE_INIT(%g3, %o0 + 0x20), NG_ret_i2_plus_g1_minus_32)
+ EX_ST(STORE_INIT(%o2, %o0 + 0x28), NG_ret_i2_plus_g1_minus_40)
- EX_LD(LOAD_TWIN(%i1 + %i3, %g2, %g3))
+ EX_LD(LOAD_TWIN(%i1 + %i3, %g2, %g3), NG_ret_i2_plus_g1_minus_48)
add %i1, 64, %i1
MIX_THREE_WORDS(%o3, %g2, %g3, %i5, GLOBAL_SPARE, %o1)
- EX_ST(STORE_INIT(%o3, %o0 + 0x30))
- EX_ST(STORE_INIT(%g2, %o0 + 0x38))
+ EX_ST(STORE_INIT(%o3, %o0 + 0x30), NG_ret_i2_plus_g1_minus_48)
+ EX_ST(STORE_INIT(%g2, %o0 + 0x38), NG_ret_i2_plus_g1_minus_56)
subcc %g1, 64, %g1
bne,pt %XCC, 9b
@@ -249,25 +332,25 @@ FUNC_NAME: /* %i0=dst, %i1=src, %i2=len */
* one twin load ahead, then add 8 back into source when
* we finish the loop.
*/
- EX_LD(LOAD_TWIN(%i1, %o4, %o5))
+ EX_LD(LOAD_TWIN(%i1, %o4, %o5), NG_ret_i2_plus_g1)
mov 16, %o7
mov 32, %g2
mov 48, %g3
mov 64, %o1
-1: EX_LD(LOAD_TWIN(%i1 + %o7, %o2, %o3))
+1: EX_LD(LOAD_TWIN(%i1 + %o7, %o2, %o3), NG_ret_i2_plus_g1)
LOAD(prefetch, %i1 + %o1, #one_read)
- EX_ST(STORE_INIT(%o5, %o0 + 0x00)) ! initializes cache line
- EX_ST(STORE_INIT(%o2, %o0 + 0x08))
- EX_LD(LOAD_TWIN(%i1 + %g2, %o4, %o5))
- EX_ST(STORE_INIT(%o3, %o0 + 0x10))
- EX_ST(STORE_INIT(%o4, %o0 + 0x18))
- EX_LD(LOAD_TWIN(%i1 + %g3, %o2, %o3))
- EX_ST(STORE_INIT(%o5, %o0 + 0x20))
- EX_ST(STORE_INIT(%o2, %o0 + 0x28))
- EX_LD(LOAD_TWIN(%i1 + %o1, %o4, %o5))
+ EX_ST(STORE_INIT(%o5, %o0 + 0x00), NG_ret_i2_plus_g1) ! initializes cache line
+ EX_ST(STORE_INIT(%o2, %o0 + 0x08), NG_ret_i2_plus_g1_minus_8)
+ EX_LD(LOAD_TWIN(%i1 + %g2, %o4, %o5), NG_ret_i2_plus_g1_minus_16)
+ EX_ST(STORE_INIT(%o3, %o0 + 0x10), NG_ret_i2_plus_g1_minus_16)
+ EX_ST(STORE_INIT(%o4, %o0 + 0x18), NG_ret_i2_plus_g1_minus_24)
+ EX_LD(LOAD_TWIN(%i1 + %g3, %o2, %o3), NG_ret_i2_plus_g1_minus_32)
+ EX_ST(STORE_INIT(%o5, %o0 + 0x20), NG_ret_i2_plus_g1_minus_32)
+ EX_ST(STORE_INIT(%o2, %o0 + 0x28), NG_ret_i2_plus_g1_minus_40)
+ EX_LD(LOAD_TWIN(%i1 + %o1, %o4, %o5), NG_ret_i2_plus_g1_minus_48)
add %i1, 64, %i1
- EX_ST(STORE_INIT(%o3, %o0 + 0x30))
- EX_ST(STORE_INIT(%o4, %o0 + 0x38))
+ EX_ST(STORE_INIT(%o3, %o0 + 0x30), NG_ret_i2_plus_g1_minus_48)
+ EX_ST(STORE_INIT(%o4, %o0 + 0x38), NG_ret_i2_plus_g1_minus_56)
subcc %g1, 64, %g1
bne,pt %XCC, 1b
add %o0, 64, %o0
@@ -282,20 +365,20 @@ FUNC_NAME: /* %i0=dst, %i1=src, %i2=len */
mov 32, %g2
mov 48, %g3
mov 64, %o1
-1: EX_LD(LOAD_TWIN(%i1 + %g0, %o4, %o5))
- EX_LD(LOAD_TWIN(%i1 + %o7, %o2, %o3))
+1: EX_LD(LOAD_TWIN(%i1 + %g0, %o4, %o5), NG_ret_i2_plus_g1)
+ EX_LD(LOAD_TWIN(%i1 + %o7, %o2, %o3), NG_ret_i2_plus_g1)
LOAD(prefetch, %i1 + %o1, #one_read)
- EX_ST(STORE_INIT(%o4, %o0 + 0x00)) ! initializes cache line
- EX_ST(STORE_INIT(%o5, %o0 + 0x08))
- EX_LD(LOAD_TWIN(%i1 + %g2, %o4, %o5))
- EX_ST(STORE_INIT(%o2, %o0 + 0x10))
- EX_ST(STORE_INIT(%o3, %o0 + 0x18))
- EX_LD(LOAD_TWIN(%i1 + %g3, %o2, %o3))
+ EX_ST(STORE_INIT(%o4, %o0 + 0x00), NG_ret_i2_plus_g1) ! initializes cache line
+ EX_ST(STORE_INIT(%o5, %o0 + 0x08), NG_ret_i2_plus_g1_minus_8)
+ EX_LD(LOAD_TWIN(%i1 + %g2, %o4, %o5), NG_ret_i2_plus_g1_minus_16)
+ EX_ST(STORE_INIT(%o2, %o0 + 0x10), NG_ret_i2_plus_g1_minus_16)
+ EX_ST(STORE_INIT(%o3, %o0 + 0x18), NG_ret_i2_plus_g1_minus_24)
+ EX_LD(LOAD_TWIN(%i1 + %g3, %o2, %o3), NG_ret_i2_plus_g1_minus_32)
add %i1, 64, %i1
- EX_ST(STORE_INIT(%o4, %o0 + 0x20))
- EX_ST(STORE_INIT(%o5, %o0 + 0x28))
- EX_ST(STORE_INIT(%o2, %o0 + 0x30))
- EX_ST(STORE_INIT(%o3, %o0 + 0x38))
+ EX_ST(STORE_INIT(%o4, %o0 + 0x20), NG_ret_i2_plus_g1_minus_32)
+ EX_ST(STORE_INIT(%o5, %o0 + 0x28), NG_ret_i2_plus_g1_minus_40)
+ EX_ST(STORE_INIT(%o2, %o0 + 0x30), NG_ret_i2_plus_g1_minus_48)
+ EX_ST(STORE_INIT(%o3, %o0 + 0x38), NG_ret_i2_plus_g1_minus_56)
subcc %g1, 64, %g1
bne,pt %XCC, 1b
add %o0, 64, %o0
@@ -321,28 +404,28 @@ FUNC_NAME: /* %i0=dst, %i1=src, %i2=len */
andn %i2, 0xf, %i4
and %i2, 0xf, %i2
1: subcc %i4, 0x10, %i4
- EX_LD(LOAD(ldx, %i1, %o4))
+ EX_LD(LOAD(ldx, %i1, %o4), NG_ret_i2_plus_i4)
add %i1, 0x08, %i1
- EX_LD(LOAD(ldx, %i1, %g1))
+ EX_LD(LOAD(ldx, %i1, %g1), NG_ret_i2_plus_i4)
sub %i1, 0x08, %i1
- EX_ST(STORE(stx, %o4, %i1 + %i3))
+ EX_ST(STORE(stx, %o4, %i1 + %i3), NG_ret_i2_plus_i4)
add %i1, 0x8, %i1
- EX_ST(STORE(stx, %g1, %i1 + %i3))
+ EX_ST(STORE(stx, %g1, %i1 + %i3), NG_ret_i2_plus_i4_minus_8)
bgu,pt %XCC, 1b
add %i1, 0x8, %i1
73: andcc %i2, 0x8, %g0
be,pt %XCC, 1f
nop
sub %i2, 0x8, %i2
- EX_LD(LOAD(ldx, %i1, %o4))
- EX_ST(STORE(stx, %o4, %i1 + %i3))
+ EX_LD(LOAD(ldx, %i1, %o4), NG_ret_i2_plus_8)
+ EX_ST(STORE(stx, %o4, %i1 + %i3), NG_ret_i2_plus_8)
add %i1, 0x8, %i1
1: andcc %i2, 0x4, %g0
be,pt %XCC, 1f
nop
sub %i2, 0x4, %i2
- EX_LD(LOAD(lduw, %i1, %i5))
- EX_ST(STORE(stw, %i5, %i1 + %i3))
+ EX_LD(LOAD(lduw, %i1, %i5), NG_ret_i2_plus_4)
+ EX_ST(STORE(stw, %i5, %i1 + %i3), NG_ret_i2_plus_4)
add %i1, 0x4, %i1
1: cmp %i2, 0
be,pt %XCC, 85f
@@ -358,8 +441,8 @@ FUNC_NAME: /* %i0=dst, %i1=src, %i2=len */
sub %i2, %g1, %i2
1: subcc %g1, 1, %g1
- EX_LD(LOAD(ldub, %i1, %i5))
- EX_ST(STORE(stb, %i5, %i1 + %i3))
+ EX_LD(LOAD(ldub, %i1, %i5), NG_ret_i2_plus_g1_plus_1)
+ EX_ST(STORE(stb, %i5, %i1 + %i3), NG_ret_i2_plus_g1_plus_1)
bgu,pt %icc, 1b
add %i1, 1, %i1
@@ -375,16 +458,16 @@ FUNC_NAME: /* %i0=dst, %i1=src, %i2=len */
8: mov 64, %i3
andn %i1, 0x7, %i1
- EX_LD(LOAD(ldx, %i1, %g2))
+ EX_LD(LOAD(ldx, %i1, %g2), NG_ret_i2)
sub %i3, %g1, %i3
andn %i2, 0x7, %i4
sllx %g2, %g1, %g2
1: add %i1, 0x8, %i1
- EX_LD(LOAD(ldx, %i1, %g3))
+ EX_LD(LOAD(ldx, %i1, %g3), NG_ret_i2_and_7_plus_i4)
subcc %i4, 0x8, %i4
srlx %g3, %i3, %i5
or %i5, %g2, %i5
- EX_ST(STORE(stx, %i5, %o0))
+ EX_ST(STORE(stx, %i5, %o0), NG_ret_i2_and_7_plus_i4)
add %o0, 0x8, %o0
bgu,pt %icc, 1b
sllx %g3, %g1, %g2
@@ -404,8 +487,8 @@ FUNC_NAME: /* %i0=dst, %i1=src, %i2=len */
1:
subcc %i2, 4, %i2
- EX_LD(LOAD(lduw, %i1, %g1))
- EX_ST(STORE(stw, %g1, %i1 + %i3))
+ EX_LD(LOAD(lduw, %i1, %g1), NG_ret_i2_plus_4)
+ EX_ST(STORE(stw, %g1, %i1 + %i3), NG_ret_i2_plus_4)
bgu,pt %XCC, 1b
add %i1, 4, %i1
@@ -415,8 +498,8 @@ FUNC_NAME: /* %i0=dst, %i1=src, %i2=len */
.align 32
90:
subcc %i2, 1, %i2
- EX_LD(LOAD(ldub, %i1, %g1))
- EX_ST(STORE(stb, %g1, %i1 + %i3))
+ EX_LD(LOAD(ldub, %i1, %g1), NG_ret_i2_plus_1)
+ EX_ST(STORE(stb, %g1, %i1 + %i3), NG_ret_i2_plus_1)
bgu,pt %XCC, 90b
add %i1, 1, %i1
ret
diff --git a/arch/sparc/lib/U1copy_from_user.S b/arch/sparc/lib/U1copy_from_user.S
index ecc5692fa2b4..bb6ff73229e3 100644
--- a/arch/sparc/lib/U1copy_from_user.S
+++ b/arch/sparc/lib/U1copy_from_user.S
@@ -3,19 +3,19 @@
* Copyright (C) 1999, 2000, 2004 David S. Miller (davem@redhat.com)
*/
-#define EX_LD(x) \
+#define EX_LD(x,y) \
98: x; \
.section __ex_table,"a";\
.align 4; \
- .word 98b, __retl_one; \
+ .word 98b, y; \
.text; \
.align 4;
-#define EX_LD_FP(x) \
+#define EX_LD_FP(x,y) \
98: x; \
.section __ex_table,"a";\
.align 4; \
- .word 98b, __retl_one_fp;\
+ .word 98b, y; \
.text; \
.align 4;
diff --git a/arch/sparc/lib/U1copy_to_user.S b/arch/sparc/lib/U1copy_to_user.S
index 9eea392e44d4..ed92ce739558 100644
--- a/arch/sparc/lib/U1copy_to_user.S
+++ b/arch/sparc/lib/U1copy_to_user.S
@@ -3,19 +3,19 @@
* Copyright (C) 1999, 2000, 2004 David S. Miller (davem@redhat.com)
*/
-#define EX_ST(x) \
+#define EX_ST(x,y) \
98: x; \
.section __ex_table,"a";\
.align 4; \
- .word 98b, __retl_one; \
+ .word 98b, y; \
.text; \
.align 4;
-#define EX_ST_FP(x) \
+#define EX_ST_FP(x,y) \
98: x; \
.section __ex_table,"a";\
.align 4; \
- .word 98b, __retl_one_fp;\
+ .word 98b, y; \
.text; \
.align 4;
diff --git a/arch/sparc/lib/U1memcpy.S b/arch/sparc/lib/U1memcpy.S
index 97e1b211090c..4f0d50b33a72 100644
--- a/arch/sparc/lib/U1memcpy.S
+++ b/arch/sparc/lib/U1memcpy.S
@@ -5,6 +5,7 @@
*/
#ifdef __KERNEL__
+#include <linux/linkage.h>
#include <asm/visasm.h>
#include <asm/asi.h>
#include <asm/export.h>
@@ -24,21 +25,17 @@
#endif
#ifndef EX_LD
-#define EX_LD(x) x
+#define EX_LD(x,y) x
#endif
#ifndef EX_LD_FP
-#define EX_LD_FP(x) x
+#define EX_LD_FP(x,y) x
#endif
#ifndef EX_ST
-#define EX_ST(x) x
+#define EX_ST(x,y) x
#endif
#ifndef EX_ST_FP
-#define EX_ST_FP(x) x
-#endif
-
-#ifndef EX_RETVAL
-#define EX_RETVAL(x) x
+#define EX_ST_FP(x,y) x
#endif
#ifndef LOAD
@@ -79,53 +76,169 @@
faligndata %f7, %f8, %f60; \
faligndata %f8, %f9, %f62;
-#define MAIN_LOOP_CHUNK(src, dest, fdest, fsrc, len, jmptgt) \
- EX_LD_FP(LOAD_BLK(%src, %fdest)); \
- EX_ST_FP(STORE_BLK(%fsrc, %dest)); \
- add %src, 0x40, %src; \
- subcc %len, 0x40, %len; \
- be,pn %xcc, jmptgt; \
- add %dest, 0x40, %dest; \
-
-#define LOOP_CHUNK1(src, dest, len, branch_dest) \
- MAIN_LOOP_CHUNK(src, dest, f0, f48, len, branch_dest)
-#define LOOP_CHUNK2(src, dest, len, branch_dest) \
- MAIN_LOOP_CHUNK(src, dest, f16, f48, len, branch_dest)
-#define LOOP_CHUNK3(src, dest, len, branch_dest) \
- MAIN_LOOP_CHUNK(src, dest, f32, f48, len, branch_dest)
+#define MAIN_LOOP_CHUNK(src, dest, fdest, fsrc, jmptgt) \
+ EX_LD_FP(LOAD_BLK(%src, %fdest), U1_gs_80_fp); \
+ EX_ST_FP(STORE_BLK(%fsrc, %dest), U1_gs_80_fp); \
+ add %src, 0x40, %src; \
+ subcc %GLOBAL_SPARE, 0x40, %GLOBAL_SPARE; \
+ be,pn %xcc, jmptgt; \
+ add %dest, 0x40, %dest; \
+
+#define LOOP_CHUNK1(src, dest, branch_dest) \
+ MAIN_LOOP_CHUNK(src, dest, f0, f48, branch_dest)
+#define LOOP_CHUNK2(src, dest, branch_dest) \
+ MAIN_LOOP_CHUNK(src, dest, f16, f48, branch_dest)
+#define LOOP_CHUNK3(src, dest, branch_dest) \
+ MAIN_LOOP_CHUNK(src, dest, f32, f48, branch_dest)
#define DO_SYNC membar #Sync;
#define STORE_SYNC(dest, fsrc) \
- EX_ST_FP(STORE_BLK(%fsrc, %dest)); \
+ EX_ST_FP(STORE_BLK(%fsrc, %dest), U1_gs_80_fp); \
add %dest, 0x40, %dest; \
DO_SYNC
#define STORE_JUMP(dest, fsrc, target) \
- EX_ST_FP(STORE_BLK(%fsrc, %dest)); \
+ EX_ST_FP(STORE_BLK(%fsrc, %dest), U1_gs_40_fp); \
add %dest, 0x40, %dest; \
ba,pt %xcc, target; \
nop;
-#define FINISH_VISCHUNK(dest, f0, f1, left) \
- subcc %left, 8, %left;\
- bl,pn %xcc, 95f; \
- faligndata %f0, %f1, %f48; \
- EX_ST_FP(STORE(std, %f48, %dest)); \
+#define FINISH_VISCHUNK(dest, f0, f1) \
+ subcc %g3, 8, %g3; \
+ bl,pn %xcc, 95f; \
+ faligndata %f0, %f1, %f48; \
+ EX_ST_FP(STORE(std, %f48, %dest), U1_g3_8_fp); \
add %dest, 8, %dest;
-#define UNEVEN_VISCHUNK_LAST(dest, f0, f1, left) \
- subcc %left, 8, %left; \
- bl,pn %xcc, 95f; \
+#define UNEVEN_VISCHUNK_LAST(dest, f0, f1) \
+ subcc %g3, 8, %g3; \
+ bl,pn %xcc, 95f; \
fsrc2 %f0, %f1;
-#define UNEVEN_VISCHUNK(dest, f0, f1, left) \
- UNEVEN_VISCHUNK_LAST(dest, f0, f1, left) \
+#define UNEVEN_VISCHUNK(dest, f0, f1) \
+ UNEVEN_VISCHUNK_LAST(dest, f0, f1) \
ba,a,pt %xcc, 93f;
.register %g2,#scratch
.register %g3,#scratch
.text
+#ifndef EX_RETVAL
+#define EX_RETVAL(x) x
+ENTRY(U1_g1_1_fp)
+ VISExitHalf
+ add %g1, 1, %g1
+ add %g1, %g2, %g1
+ retl
+ add %g1, %o2, %o0
+ENDPROC(U1_g1_1_fp)
+ENTRY(U1_g2_0_fp)
+ VISExitHalf
+ retl
+ add %g2, %o2, %o0
+ENDPROC(U1_g2_0_fp)
+ENTRY(U1_g2_8_fp)
+ VISExitHalf
+ add %g2, 8, %g2
+ retl
+ add %g2, %o2, %o0
+ENDPROC(U1_g2_8_fp)
+ENTRY(U1_gs_0_fp)
+ VISExitHalf
+ add %GLOBAL_SPARE, %g3, %o0
+ retl
+ add %o0, %o2, %o0
+ENDPROC(U1_gs_0_fp)
+ENTRY(U1_gs_80_fp)
+ VISExitHalf
+ add %GLOBAL_SPARE, 0x80, %GLOBAL_SPARE
+ add %GLOBAL_SPARE, %g3, %o0
+ retl
+ add %o0, %o2, %o0
+ENDPROC(U1_gs_80_fp)
+ENTRY(U1_gs_40_fp)
+ VISExitHalf
+ add %GLOBAL_SPARE, 0x40, %GLOBAL_SPARE
+ add %GLOBAL_SPARE, %g3, %o0
+ retl
+ add %o0, %o2, %o0
+ENDPROC(U1_gs_40_fp)
+ENTRY(U1_g3_0_fp)
+ VISExitHalf
+ retl
+ add %g3, %o2, %o0
+ENDPROC(U1_g3_0_fp)
+ENTRY(U1_g3_8_fp)
+ VISExitHalf
+ add %g3, 8, %g3
+ retl
+ add %g3, %o2, %o0
+ENDPROC(U1_g3_8_fp)
+ENTRY(U1_o2_0_fp)
+ VISExitHalf
+ retl
+ mov %o2, %o0
+ENDPROC(U1_o2_0_fp)
+ENTRY(U1_o2_1_fp)
+ VISExitHalf
+ retl
+ add %o2, 1, %o0
+ENDPROC(U1_o2_1_fp)
+ENTRY(U1_gs_0)
+ VISExitHalf
+ retl
+ add %GLOBAL_SPARE, %o2, %o0
+ENDPROC(U1_gs_0)
+ENTRY(U1_gs_8)
+ VISExitHalf
+ add %GLOBAL_SPARE, %o2, %GLOBAL_SPARE
+ retl
+ add %GLOBAL_SPARE, 0x8, %o0
+ENDPROC(U1_gs_8)
+ENTRY(U1_gs_10)
+ VISExitHalf
+ add %GLOBAL_SPARE, %o2, %GLOBAL_SPARE
+ retl
+ add %GLOBAL_SPARE, 0x10, %o0
+ENDPROC(U1_gs_10)
+ENTRY(U1_o2_0)
+ retl
+ mov %o2, %o0
+ENDPROC(U1_o2_0)
+ENTRY(U1_o2_8)
+ retl
+ add %o2, 8, %o0
+ENDPROC(U1_o2_8)
+ENTRY(U1_o2_4)
+ retl
+ add %o2, 4, %o0
+ENDPROC(U1_o2_4)
+ENTRY(U1_o2_1)
+ retl
+ add %o2, 1, %o0
+ENDPROC(U1_o2_1)
+ENTRY(U1_g1_0)
+ retl
+ add %g1, %o2, %o0
+ENDPROC(U1_g1_0)
+ENTRY(U1_g1_1)
+ add %g1, 1, %g1
+ retl
+ add %g1, %o2, %o0
+ENDPROC(U1_g1_1)
+ENTRY(U1_gs_0_o2_adj)
+ and %o2, 7, %o2
+ retl
+ add %GLOBAL_SPARE, %o2, %o0
+ENDPROC(U1_gs_0_o2_adj)
+ENTRY(U1_gs_8_o2_adj)
+ and %o2, 7, %o2
+ add %GLOBAL_SPARE, 8, %GLOBAL_SPARE
+ retl
+ add %GLOBAL_SPARE, %o2, %o0
+ENDPROC(U1_gs_8_o2_adj)
+#endif
+
.align 64
.globl FUNC_NAME
@@ -167,8 +280,8 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
and %g2, 0x38, %g2
1: subcc %g1, 0x1, %g1
- EX_LD_FP(LOAD(ldub, %o1 + 0x00, %o3))
- EX_ST_FP(STORE(stb, %o3, %o1 + %GLOBAL_SPARE))
+ EX_LD_FP(LOAD(ldub, %o1 + 0x00, %o3), U1_g1_1_fp)
+ EX_ST_FP(STORE(stb, %o3, %o1 + %GLOBAL_SPARE), U1_g1_1_fp)
bgu,pt %XCC, 1b
add %o1, 0x1, %o1
@@ -179,20 +292,20 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
be,pt %icc, 3f
alignaddr %o1, %g0, %o1
- EX_LD_FP(LOAD(ldd, %o1, %f4))
-1: EX_LD_FP(LOAD(ldd, %o1 + 0x8, %f6))
+ EX_LD_FP(LOAD(ldd, %o1, %f4), U1_g2_0_fp)
+1: EX_LD_FP(LOAD(ldd, %o1 + 0x8, %f6), U1_g2_0_fp)
add %o1, 0x8, %o1
subcc %g2, 0x8, %g2
faligndata %f4, %f6, %f0
- EX_ST_FP(STORE(std, %f0, %o0))
+ EX_ST_FP(STORE(std, %f0, %o0), U1_g2_8_fp)
be,pn %icc, 3f
add %o0, 0x8, %o0
- EX_LD_FP(LOAD(ldd, %o1 + 0x8, %f4))
+ EX_LD_FP(LOAD(ldd, %o1 + 0x8, %f4), U1_g2_0_fp)
add %o1, 0x8, %o1
subcc %g2, 0x8, %g2
faligndata %f6, %f4, %f0
- EX_ST_FP(STORE(std, %f0, %o0))
+ EX_ST_FP(STORE(std, %f0, %o0), U1_g2_8_fp)
bne,pt %icc, 1b
add %o0, 0x8, %o0
@@ -215,13 +328,13 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
add %g1, %GLOBAL_SPARE, %g1
subcc %o2, %g3, %o2
- EX_LD_FP(LOAD_BLK(%o1, %f0))
+ EX_LD_FP(LOAD_BLK(%o1, %f0), U1_gs_0_fp)
add %o1, 0x40, %o1
add %g1, %g3, %g1
- EX_LD_FP(LOAD_BLK(%o1, %f16))
+ EX_LD_FP(LOAD_BLK(%o1, %f16), U1_gs_0_fp)
add %o1, 0x40, %o1
sub %GLOBAL_SPARE, 0x80, %GLOBAL_SPARE
- EX_LD_FP(LOAD_BLK(%o1, %f32))
+ EX_LD_FP(LOAD_BLK(%o1, %f32), U1_gs_80_fp)
add %o1, 0x40, %o1
/* There are 8 instances of the unrolled loop,
@@ -241,11 +354,11 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
.align 64
1: FREG_FROB(f0, f2, f4, f6, f8, f10,f12,f14,f16)
- LOOP_CHUNK1(o1, o0, GLOBAL_SPARE, 1f)
+ LOOP_CHUNK1(o1, o0, 1f)
FREG_FROB(f16,f18,f20,f22,f24,f26,f28,f30,f32)
- LOOP_CHUNK2(o1, o0, GLOBAL_SPARE, 2f)
+ LOOP_CHUNK2(o1, o0, 2f)
FREG_FROB(f32,f34,f36,f38,f40,f42,f44,f46,f0)
- LOOP_CHUNK3(o1, o0, GLOBAL_SPARE, 3f)
+ LOOP_CHUNK3(o1, o0, 3f)
ba,pt %xcc, 1b+4
faligndata %f0, %f2, %f48
1: FREG_FROB(f16,f18,f20,f22,f24,f26,f28,f30,f32)
@@ -262,11 +375,11 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
STORE_JUMP(o0, f48, 56f)
1: FREG_FROB(f2, f4, f6, f8, f10,f12,f14,f16,f18)
- LOOP_CHUNK1(o1, o0, GLOBAL_SPARE, 1f)
+ LOOP_CHUNK1(o1, o0, 1f)
FREG_FROB(f18,f20,f22,f24,f26,f28,f30,f32,f34)
- LOOP_CHUNK2(o1, o0, GLOBAL_SPARE, 2f)
+ LOOP_CHUNK2(o1, o0, 2f)
FREG_FROB(f34,f36,f38,f40,f42,f44,f46,f0, f2)
- LOOP_CHUNK3(o1, o0, GLOBAL_SPARE, 3f)
+ LOOP_CHUNK3(o1, o0, 3f)
ba,pt %xcc, 1b+4
faligndata %f2, %f4, %f48
1: FREG_FROB(f18,f20,f22,f24,f26,f28,f30,f32,f34)
@@ -283,11 +396,11 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
STORE_JUMP(o0, f48, 57f)
1: FREG_FROB(f4, f6, f8, f10,f12,f14,f16,f18,f20)
- LOOP_CHUNK1(o1, o0, GLOBAL_SPARE, 1f)
+ LOOP_CHUNK1(o1, o0, 1f)
FREG_FROB(f20,f22,f24,f26,f28,f30,f32,f34,f36)
- LOOP_CHUNK2(o1, o0, GLOBAL_SPARE, 2f)
+ LOOP_CHUNK2(o1, o0, 2f)
FREG_FROB(f36,f38,f40,f42,f44,f46,f0, f2, f4)
- LOOP_CHUNK3(o1, o0, GLOBAL_SPARE, 3f)
+ LOOP_CHUNK3(o1, o0, 3f)
ba,pt %xcc, 1b+4
faligndata %f4, %f6, %f48
1: FREG_FROB(f20,f22,f24,f26,f28,f30,f32,f34,f36)
@@ -304,11 +417,11 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
STORE_JUMP(o0, f48, 58f)
1: FREG_FROB(f6, f8, f10,f12,f14,f16,f18,f20,f22)
- LOOP_CHUNK1(o1, o0, GLOBAL_SPARE, 1f)
+ LOOP_CHUNK1(o1, o0, 1f)
FREG_FROB(f22,f24,f26,f28,f30,f32,f34,f36,f38)
- LOOP_CHUNK2(o1, o0, GLOBAL_SPARE, 2f)
+ LOOP_CHUNK2(o1, o0, 2f)
FREG_FROB(f38,f40,f42,f44,f46,f0, f2, f4, f6)
- LOOP_CHUNK3(o1, o0, GLOBAL_SPARE, 3f)
+ LOOP_CHUNK3(o1, o0, 3f)
ba,pt %xcc, 1b+4
faligndata %f6, %f8, %f48
1: FREG_FROB(f22,f24,f26,f28,f30,f32,f34,f36,f38)
@@ -325,11 +438,11 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
STORE_JUMP(o0, f48, 59f)
1: FREG_FROB(f8, f10,f12,f14,f16,f18,f20,f22,f24)
- LOOP_CHUNK1(o1, o0, GLOBAL_SPARE, 1f)
+ LOOP_CHUNK1(o1, o0, 1f)
FREG_FROB(f24,f26,f28,f30,f32,f34,f36,f38,f40)
- LOOP_CHUNK2(o1, o0, GLOBAL_SPARE, 2f)
+ LOOP_CHUNK2(o1, o0, 2f)
FREG_FROB(f40,f42,f44,f46,f0, f2, f4, f6, f8)
- LOOP_CHUNK3(o1, o0, GLOBAL_SPARE, 3f)
+ LOOP_CHUNK3(o1, o0, 3f)
ba,pt %xcc, 1b+4
faligndata %f8, %f10, %f48
1: FREG_FROB(f24,f26,f28,f30,f32,f34,f36,f38,f40)
@@ -346,11 +459,11 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
STORE_JUMP(o0, f48, 60f)
1: FREG_FROB(f10,f12,f14,f16,f18,f20,f22,f24,f26)
- LOOP_CHUNK1(o1, o0, GLOBAL_SPARE, 1f)
+ LOOP_CHUNK1(o1, o0, 1f)
FREG_FROB(f26,f28,f30,f32,f34,f36,f38,f40,f42)
- LOOP_CHUNK2(o1, o0, GLOBAL_SPARE, 2f)
+ LOOP_CHUNK2(o1, o0, 2f)
FREG_FROB(f42,f44,f46,f0, f2, f4, f6, f8, f10)
- LOOP_CHUNK3(o1, o0, GLOBAL_SPARE, 3f)
+ LOOP_CHUNK3(o1, o0, 3f)
ba,pt %xcc, 1b+4
faligndata %f10, %f12, %f48
1: FREG_FROB(f26,f28,f30,f32,f34,f36,f38,f40,f42)
@@ -367,11 +480,11 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
STORE_JUMP(o0, f48, 61f)
1: FREG_FROB(f12,f14,f16,f18,f20,f22,f24,f26,f28)
- LOOP_CHUNK1(o1, o0, GLOBAL_SPARE, 1f)
+ LOOP_CHUNK1(o1, o0, 1f)
FREG_FROB(f28,f30,f32,f34,f36,f38,f40,f42,f44)
- LOOP_CHUNK2(o1, o0, GLOBAL_SPARE, 2f)
+ LOOP_CHUNK2(o1, o0, 2f)
FREG_FROB(f44,f46,f0, f2, f4, f6, f8, f10,f12)
- LOOP_CHUNK3(o1, o0, GLOBAL_SPARE, 3f)
+ LOOP_CHUNK3(o1, o0, 3f)
ba,pt %xcc, 1b+4
faligndata %f12, %f14, %f48
1: FREG_FROB(f28,f30,f32,f34,f36,f38,f40,f42,f44)
@@ -388,11 +501,11 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
STORE_JUMP(o0, f48, 62f)
1: FREG_FROB(f14,f16,f18,f20,f22,f24,f26,f28,f30)
- LOOP_CHUNK1(o1, o0, GLOBAL_SPARE, 1f)
+ LOOP_CHUNK1(o1, o0, 1f)
FREG_FROB(f30,f32,f34,f36,f38,f40,f42,f44,f46)
- LOOP_CHUNK2(o1, o0, GLOBAL_SPARE, 2f)
+ LOOP_CHUNK2(o1, o0, 2f)
FREG_FROB(f46,f0, f2, f4, f6, f8, f10,f12,f14)
- LOOP_CHUNK3(o1, o0, GLOBAL_SPARE, 3f)
+ LOOP_CHUNK3(o1, o0, 3f)
ba,pt %xcc, 1b+4
faligndata %f14, %f16, %f48
1: FREG_FROB(f30,f32,f34,f36,f38,f40,f42,f44,f46)
@@ -408,53 +521,53 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
FREG_FROB(f30,f32,f34,f36,f38,f40,f42,f44,f46)
STORE_JUMP(o0, f48, 63f)
-40: FINISH_VISCHUNK(o0, f0, f2, g3)
-41: FINISH_VISCHUNK(o0, f2, f4, g3)
-42: FINISH_VISCHUNK(o0, f4, f6, g3)
-43: FINISH_VISCHUNK(o0, f6, f8, g3)
-44: FINISH_VISCHUNK(o0, f8, f10, g3)
-45: FINISH_VISCHUNK(o0, f10, f12, g3)
-46: FINISH_VISCHUNK(o0, f12, f14, g3)
-47: UNEVEN_VISCHUNK(o0, f14, f0, g3)
-48: FINISH_VISCHUNK(o0, f16, f18, g3)
-49: FINISH_VISCHUNK(o0, f18, f20, g3)
-50: FINISH_VISCHUNK(o0, f20, f22, g3)
-51: FINISH_VISCHUNK(o0, f22, f24, g3)
-52: FINISH_VISCHUNK(o0, f24, f26, g3)
-53: FINISH_VISCHUNK(o0, f26, f28, g3)
-54: FINISH_VISCHUNK(o0, f28, f30, g3)
-55: UNEVEN_VISCHUNK(o0, f30, f0, g3)
-56: FINISH_VISCHUNK(o0, f32, f34, g3)
-57: FINISH_VISCHUNK(o0, f34, f36, g3)
-58: FINISH_VISCHUNK(o0, f36, f38, g3)
-59: FINISH_VISCHUNK(o0, f38, f40, g3)
-60: FINISH_VISCHUNK(o0, f40, f42, g3)
-61: FINISH_VISCHUNK(o0, f42, f44, g3)
-62: FINISH_VISCHUNK(o0, f44, f46, g3)
-63: UNEVEN_VISCHUNK_LAST(o0, f46, f0, g3)
-
-93: EX_LD_FP(LOAD(ldd, %o1, %f2))
+40: FINISH_VISCHUNK(o0, f0, f2)
+41: FINISH_VISCHUNK(o0, f2, f4)
+42: FINISH_VISCHUNK(o0, f4, f6)
+43: FINISH_VISCHUNK(o0, f6, f8)
+44: FINISH_VISCHUNK(o0, f8, f10)
+45: FINISH_VISCHUNK(o0, f10, f12)
+46: FINISH_VISCHUNK(o0, f12, f14)
+47: UNEVEN_VISCHUNK(o0, f14, f0)
+48: FINISH_VISCHUNK(o0, f16, f18)
+49: FINISH_VISCHUNK(o0, f18, f20)
+50: FINISH_VISCHUNK(o0, f20, f22)
+51: FINISH_VISCHUNK(o0, f22, f24)
+52: FINISH_VISCHUNK(o0, f24, f26)
+53: FINISH_VISCHUNK(o0, f26, f28)
+54: FINISH_VISCHUNK(o0, f28, f30)
+55: UNEVEN_VISCHUNK(o0, f30, f0)
+56: FINISH_VISCHUNK(o0, f32, f34)
+57: FINISH_VISCHUNK(o0, f34, f36)
+58: FINISH_VISCHUNK(o0, f36, f38)
+59: FINISH_VISCHUNK(o0, f38, f40)
+60: FINISH_VISCHUNK(o0, f40, f42)
+61: FINISH_VISCHUNK(o0, f42, f44)
+62: FINISH_VISCHUNK(o0, f44, f46)
+63: UNEVEN_VISCHUNK_LAST(o0, f46, f0)
+
+93: EX_LD_FP(LOAD(ldd, %o1, %f2), U1_g3_0_fp)
add %o1, 8, %o1
subcc %g3, 8, %g3
faligndata %f0, %f2, %f8
- EX_ST_FP(STORE(std, %f8, %o0))
+ EX_ST_FP(STORE(std, %f8, %o0), U1_g3_8_fp)
bl,pn %xcc, 95f
add %o0, 8, %o0
- EX_LD_FP(LOAD(ldd, %o1, %f0))
+ EX_LD_FP(LOAD(ldd, %o1, %f0), U1_g3_0_fp)
add %o1, 8, %o1
subcc %g3, 8, %g3
faligndata %f2, %f0, %f8
- EX_ST_FP(STORE(std, %f8, %o0))
+ EX_ST_FP(STORE(std, %f8, %o0), U1_g3_8_fp)
bge,pt %xcc, 93b
add %o0, 8, %o0
95: brz,pt %o2, 2f
mov %g1, %o1
-1: EX_LD_FP(LOAD(ldub, %o1, %o3))
+1: EX_LD_FP(LOAD(ldub, %o1, %o3), U1_o2_0_fp)
add %o1, 1, %o1
subcc %o2, 1, %o2
- EX_ST_FP(STORE(stb, %o3, %o0))
+ EX_ST_FP(STORE(stb, %o3, %o0), U1_o2_1_fp)
bne,pt %xcc, 1b
add %o0, 1, %o0
@@ -470,27 +583,27 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
72: andn %o2, 0xf, %GLOBAL_SPARE
and %o2, 0xf, %o2
-1: EX_LD(LOAD(ldx, %o1 + 0x00, %o5))
- EX_LD(LOAD(ldx, %o1 + 0x08, %g1))
+1: EX_LD(LOAD(ldx, %o1 + 0x00, %o5), U1_gs_0)
+ EX_LD(LOAD(ldx, %o1 + 0x08, %g1), U1_gs_0)
subcc %GLOBAL_SPARE, 0x10, %GLOBAL_SPARE
- EX_ST(STORE(stx, %o5, %o1 + %o3))
+ EX_ST(STORE(stx, %o5, %o1 + %o3), U1_gs_10)
add %o1, 0x8, %o1
- EX_ST(STORE(stx, %g1, %o1 + %o3))
+ EX_ST(STORE(stx, %g1, %o1 + %o3), U1_gs_8)
bgu,pt %XCC, 1b
add %o1, 0x8, %o1
73: andcc %o2, 0x8, %g0
be,pt %XCC, 1f
nop
- EX_LD(LOAD(ldx, %o1, %o5))
+ EX_LD(LOAD(ldx, %o1, %o5), U1_o2_0)
sub %o2, 0x8, %o2
- EX_ST(STORE(stx, %o5, %o1 + %o3))
+ EX_ST(STORE(stx, %o5, %o1 + %o3), U1_o2_8)
add %o1, 0x8, %o1
1: andcc %o2, 0x4, %g0
be,pt %XCC, 1f
nop
- EX_LD(LOAD(lduw, %o1, %o5))
+ EX_LD(LOAD(lduw, %o1, %o5), U1_o2_0)
sub %o2, 0x4, %o2
- EX_ST(STORE(stw, %o5, %o1 + %o3))
+ EX_ST(STORE(stw, %o5, %o1 + %o3), U1_o2_4)
add %o1, 0x4, %o1
1: cmp %o2, 0
be,pt %XCC, 85f
@@ -504,9 +617,9 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
sub %g0, %g1, %g1
sub %o2, %g1, %o2
-1: EX_LD(LOAD(ldub, %o1, %o5))
+1: EX_LD(LOAD(ldub, %o1, %o5), U1_g1_0)
subcc %g1, 1, %g1
- EX_ST(STORE(stb, %o5, %o1 + %o3))
+ EX_ST(STORE(stb, %o5, %o1 + %o3), U1_g1_1)
bgu,pt %icc, 1b
add %o1, 1, %o1
@@ -522,16 +635,16 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
8: mov 64, %o3
andn %o1, 0x7, %o1
- EX_LD(LOAD(ldx, %o1, %g2))
+ EX_LD(LOAD(ldx, %o1, %g2), U1_o2_0)
sub %o3, %g1, %o3
andn %o2, 0x7, %GLOBAL_SPARE
sllx %g2, %g1, %g2
-1: EX_LD(LOAD(ldx, %o1 + 0x8, %g3))
+1: EX_LD(LOAD(ldx, %o1 + 0x8, %g3), U1_gs_0_o2_adj)
subcc %GLOBAL_SPARE, 0x8, %GLOBAL_SPARE
add %o1, 0x8, %o1
srlx %g3, %o3, %o5
or %o5, %g2, %o5
- EX_ST(STORE(stx, %o5, %o0))
+ EX_ST(STORE(stx, %o5, %o0), U1_gs_8_o2_adj)
add %o0, 0x8, %o0
bgu,pt %icc, 1b
sllx %g3, %g1, %g2
@@ -549,9 +662,9 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
bne,pn %XCC, 90f
sub %o0, %o1, %o3
-1: EX_LD(LOAD(lduw, %o1, %g1))
+1: EX_LD(LOAD(lduw, %o1, %g1), U1_o2_0)
subcc %o2, 4, %o2
- EX_ST(STORE(stw, %g1, %o1 + %o3))
+ EX_ST(STORE(stw, %g1, %o1 + %o3), U1_o2_4)
bgu,pt %XCC, 1b
add %o1, 4, %o1
@@ -559,9 +672,9 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
mov EX_RETVAL(%o4), %o0
.align 32
-90: EX_LD(LOAD(ldub, %o1, %g1))
+90: EX_LD(LOAD(ldub, %o1, %g1), U1_o2_0)
subcc %o2, 1, %o2
- EX_ST(STORE(stb, %g1, %o1 + %o3))
+ EX_ST(STORE(stb, %g1, %o1 + %o3), U1_o2_1)
bgu,pt %XCC, 90b
add %o1, 1, %o1
retl
diff --git a/arch/sparc/lib/U3copy_from_user.S b/arch/sparc/lib/U3copy_from_user.S
index 88ad73d86fe4..db73010a1af8 100644
--- a/arch/sparc/lib/U3copy_from_user.S
+++ b/arch/sparc/lib/U3copy_from_user.S
@@ -3,19 +3,19 @@
* Copyright (C) 1999, 2000, 2004 David S. Miller (davem@redhat.com)
*/
-#define EX_LD(x) \
+#define EX_LD(x,y) \
98: x; \
.section __ex_table,"a";\
.align 4; \
- .word 98b, __retl_one; \
+ .word 98b, y; \
.text; \
.align 4;
-#define EX_LD_FP(x) \
+#define EX_LD_FP(x,y) \
98: x; \
.section __ex_table,"a";\
.align 4; \
- .word 98b, __retl_one_fp;\
+ .word 98b, y##_fp; \
.text; \
.align 4;
diff --git a/arch/sparc/lib/U3copy_to_user.S b/arch/sparc/lib/U3copy_to_user.S
index 845139d75537..c4ee858e352a 100644
--- a/arch/sparc/lib/U3copy_to_user.S
+++ b/arch/sparc/lib/U3copy_to_user.S
@@ -3,19 +3,19 @@
* Copyright (C) 1999, 2000, 2004 David S. Miller (davem@redhat.com)
*/
-#define EX_ST(x) \
+#define EX_ST(x,y) \
98: x; \
.section __ex_table,"a";\
.align 4; \
- .word 98b, __retl_one; \
+ .word 98b, y; \
.text; \
.align 4;
-#define EX_ST_FP(x) \
+#define EX_ST_FP(x,y) \
98: x; \
.section __ex_table,"a";\
.align 4; \
- .word 98b, __retl_one_fp;\
+ .word 98b, y##_fp; \
.text; \
.align 4;
diff --git a/arch/sparc/lib/U3memcpy.S b/arch/sparc/lib/U3memcpy.S
index 491ee69e4995..54f98706b03b 100644
--- a/arch/sparc/lib/U3memcpy.S
+++ b/arch/sparc/lib/U3memcpy.S
@@ -4,6 +4,7 @@
*/
#ifdef __KERNEL__
+#include <linux/linkage.h>
#include <asm/visasm.h>
#include <asm/asi.h>
#define GLOBAL_SPARE %g7
@@ -22,21 +23,17 @@
#endif
#ifndef EX_LD
-#define EX_LD(x) x
+#define EX_LD(x,y) x
#endif
#ifndef EX_LD_FP
-#define EX_LD_FP(x) x
+#define EX_LD_FP(x,y) x
#endif
#ifndef EX_ST
-#define EX_ST(x) x
+#define EX_ST(x,y) x
#endif
#ifndef EX_ST_FP
-#define EX_ST_FP(x) x
-#endif
-
-#ifndef EX_RETVAL
-#define EX_RETVAL(x) x
+#define EX_ST_FP(x,y) x
#endif
#ifndef LOAD
@@ -77,6 +74,87 @@
*/
.text
+#ifndef EX_RETVAL
+#define EX_RETVAL(x) x
+__restore_fp:
+ VISExitHalf
+ retl
+ nop
+ENTRY(U3_retl_o2_plus_g2_plus_g1_plus_1_fp)
+ add %g1, 1, %g1
+ add %g2, %g1, %g2
+ ba,pt %xcc, __restore_fp
+ add %o2, %g2, %o0
+ENDPROC(U3_retl_o2_plus_g2_plus_g1_plus_1_fp)
+ENTRY(U3_retl_o2_plus_g2_fp)
+ ba,pt %xcc, __restore_fp
+ add %o2, %g2, %o0
+ENDPROC(U3_retl_o2_plus_g2_fp)
+ENTRY(U3_retl_o2_plus_g2_plus_8_fp)
+ add %g2, 8, %g2
+ ba,pt %xcc, __restore_fp
+ add %o2, %g2, %o0
+ENDPROC(U3_retl_o2_plus_g2_plus_8_fp)
+ENTRY(U3_retl_o2)
+ retl
+ mov %o2, %o0
+ENDPROC(U3_retl_o2)
+ENTRY(U3_retl_o2_plus_1)
+ retl
+ add %o2, 1, %o0
+ENDPROC(U3_retl_o2_plus_1)
+ENTRY(U3_retl_o2_plus_4)
+ retl
+ add %o2, 4, %o0
+ENDPROC(U3_retl_o2_plus_4)
+ENTRY(U3_retl_o2_plus_8)
+ retl
+ add %o2, 8, %o0
+ENDPROC(U3_retl_o2_plus_8)
+ENTRY(U3_retl_o2_plus_g1_plus_1)
+ add %g1, 1, %g1
+ retl
+ add %o2, %g1, %o0
+ENDPROC(U3_retl_o2_plus_g1_plus_1)
+ENTRY(U3_retl_o2_fp)
+ ba,pt %xcc, __restore_fp
+ mov %o2, %o0
+ENDPROC(U3_retl_o2_fp)
+ENTRY(U3_retl_o2_plus_o3_sll_6_plus_0x80_fp)
+ sll %o3, 6, %o3
+ add %o3, 0x80, %o3
+ ba,pt %xcc, __restore_fp
+ add %o2, %o3, %o0
+ENDPROC(U3_retl_o2_plus_o3_sll_6_plus_0x80_fp)
+ENTRY(U3_retl_o2_plus_o3_sll_6_plus_0x40_fp)
+ sll %o3, 6, %o3
+ add %o3, 0x40, %o3
+ ba,pt %xcc, __restore_fp
+ add %o2, %o3, %o0
+ENDPROC(U3_retl_o2_plus_o3_sll_6_plus_0x40_fp)
+ENTRY(U3_retl_o2_plus_GS_plus_0x10)
+ add GLOBAL_SPARE, 0x10, GLOBAL_SPARE
+ retl
+ add %o2, GLOBAL_SPARE, %o0
+ENDPROC(U3_retl_o2_plus_GS_plus_0x10)
+ENTRY(U3_retl_o2_plus_GS_plus_0x08)
+ add GLOBAL_SPARE, 0x08, GLOBAL_SPARE
+ retl
+ add %o2, GLOBAL_SPARE, %o0
+ENDPROC(U3_retl_o2_plus_GS_plus_0x08)
+ENTRY(U3_retl_o2_and_7_plus_GS)
+ and %o2, 7, %o2
+ retl
+ add %o2, GLOBAL_SPARE, %o2
+ENDPROC(U3_retl_o2_and_7_plus_GS)
+ENTRY(U3_retl_o2_and_7_plus_GS_plus_8)
+ add GLOBAL_SPARE, 8, GLOBAL_SPARE
+ and %o2, 7, %o2
+ retl
+ add %o2, GLOBAL_SPARE, %o2
+ENDPROC(U3_retl_o2_and_7_plus_GS_plus_8)
+#endif
+
.align 64
/* The cheetah's flexible spine, oversized liver, enlarged heart,
@@ -126,8 +204,8 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
and %g2, 0x38, %g2
1: subcc %g1, 0x1, %g1
- EX_LD_FP(LOAD(ldub, %o1 + 0x00, %o3))
- EX_ST_FP(STORE(stb, %o3, %o1 + GLOBAL_SPARE))
+ EX_LD_FP(LOAD(ldub, %o1 + 0x00, %o3), U3_retl_o2_plus_g2_plus_g1_plus_1)
+ EX_ST_FP(STORE(stb, %o3, %o1 + GLOBAL_SPARE), U3_retl_o2_plus_g2_plus_g1_plus_1)
bgu,pt %XCC, 1b
add %o1, 0x1, %o1
@@ -138,20 +216,20 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
be,pt %icc, 3f
alignaddr %o1, %g0, %o1
- EX_LD_FP(LOAD(ldd, %o1, %f4))
-1: EX_LD_FP(LOAD(ldd, %o1 + 0x8, %f6))
+ EX_LD_FP(LOAD(ldd, %o1, %f4), U3_retl_o2_plus_g2)
+1: EX_LD_FP(LOAD(ldd, %o1 + 0x8, %f6), U3_retl_o2_plus_g2)
add %o1, 0x8, %o1
subcc %g2, 0x8, %g2
faligndata %f4, %f6, %f0
- EX_ST_FP(STORE(std, %f0, %o0))
+ EX_ST_FP(STORE(std, %f0, %o0), U3_retl_o2_plus_g2_plus_8)
be,pn %icc, 3f
add %o0, 0x8, %o0
- EX_LD_FP(LOAD(ldd, %o1 + 0x8, %f4))
+ EX_LD_FP(LOAD(ldd, %o1 + 0x8, %f4), U3_retl_o2_plus_g2)
add %o1, 0x8, %o1
subcc %g2, 0x8, %g2
faligndata %f6, %f4, %f2
- EX_ST_FP(STORE(std, %f2, %o0))
+ EX_ST_FP(STORE(std, %f2, %o0), U3_retl_o2_plus_g2_plus_8)
bne,pt %icc, 1b
add %o0, 0x8, %o0
@@ -161,25 +239,25 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
LOAD(prefetch, %o1 + 0x080, #one_read)
LOAD(prefetch, %o1 + 0x0c0, #one_read)
LOAD(prefetch, %o1 + 0x100, #one_read)
- EX_LD_FP(LOAD(ldd, %o1 + 0x000, %f0))
+ EX_LD_FP(LOAD(ldd, %o1 + 0x000, %f0), U3_retl_o2)
LOAD(prefetch, %o1 + 0x140, #one_read)
- EX_LD_FP(LOAD(ldd, %o1 + 0x008, %f2))
+ EX_LD_FP(LOAD(ldd, %o1 + 0x008, %f2), U3_retl_o2)
LOAD(prefetch, %o1 + 0x180, #one_read)
- EX_LD_FP(LOAD(ldd, %o1 + 0x010, %f4))
+ EX_LD_FP(LOAD(ldd, %o1 + 0x010, %f4), U3_retl_o2)
LOAD(prefetch, %o1 + 0x1c0, #one_read)
faligndata %f0, %f2, %f16
- EX_LD_FP(LOAD(ldd, %o1 + 0x018, %f6))
+ EX_LD_FP(LOAD(ldd, %o1 + 0x018, %f6), U3_retl_o2)
faligndata %f2, %f4, %f18
- EX_LD_FP(LOAD(ldd, %o1 + 0x020, %f8))
+ EX_LD_FP(LOAD(ldd, %o1 + 0x020, %f8), U3_retl_o2)
faligndata %f4, %f6, %f20
- EX_LD_FP(LOAD(ldd, %o1 + 0x028, %f10))
+ EX_LD_FP(LOAD(ldd, %o1 + 0x028, %f10), U3_retl_o2)
faligndata %f6, %f8, %f22
- EX_LD_FP(LOAD(ldd, %o1 + 0x030, %f12))
+ EX_LD_FP(LOAD(ldd, %o1 + 0x030, %f12), U3_retl_o2)
faligndata %f8, %f10, %f24
- EX_LD_FP(LOAD(ldd, %o1 + 0x038, %f14))
+ EX_LD_FP(LOAD(ldd, %o1 + 0x038, %f14), U3_retl_o2)
faligndata %f10, %f12, %f26
- EX_LD_FP(LOAD(ldd, %o1 + 0x040, %f0))
+ EX_LD_FP(LOAD(ldd, %o1 + 0x040, %f0), U3_retl_o2)
subcc GLOBAL_SPARE, 0x80, GLOBAL_SPARE
add %o1, 0x40, %o1
@@ -190,26 +268,26 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
.align 64
1:
- EX_LD_FP(LOAD(ldd, %o1 + 0x008, %f2))
+ EX_LD_FP(LOAD(ldd, %o1 + 0x008, %f2), U3_retl_o2_plus_o3_sll_6_plus_0x80)
faligndata %f12, %f14, %f28
- EX_LD_FP(LOAD(ldd, %o1 + 0x010, %f4))
+ EX_LD_FP(LOAD(ldd, %o1 + 0x010, %f4), U3_retl_o2_plus_o3_sll_6_plus_0x80)
faligndata %f14, %f0, %f30
- EX_ST_FP(STORE_BLK(%f16, %o0))
- EX_LD_FP(LOAD(ldd, %o1 + 0x018, %f6))
+ EX_ST_FP(STORE_BLK(%f16, %o0), U3_retl_o2_plus_o3_sll_6_plus_0x80)
+ EX_LD_FP(LOAD(ldd, %o1 + 0x018, %f6), U3_retl_o2_plus_o3_sll_6_plus_0x40)
faligndata %f0, %f2, %f16
add %o0, 0x40, %o0
- EX_LD_FP(LOAD(ldd, %o1 + 0x020, %f8))
+ EX_LD_FP(LOAD(ldd, %o1 + 0x020, %f8), U3_retl_o2_plus_o3_sll_6_plus_0x40)
faligndata %f2, %f4, %f18
- EX_LD_FP(LOAD(ldd, %o1 + 0x028, %f10))
+ EX_LD_FP(LOAD(ldd, %o1 + 0x028, %f10), U3_retl_o2_plus_o3_sll_6_plus_0x40)
faligndata %f4, %f6, %f20
- EX_LD_FP(LOAD(ldd, %o1 + 0x030, %f12))
+ EX_LD_FP(LOAD(ldd, %o1 + 0x030, %f12), U3_retl_o2_plus_o3_sll_6_plus_0x40)
subcc %o3, 0x01, %o3
faligndata %f6, %f8, %f22
- EX_LD_FP(LOAD(ldd, %o1 + 0x038, %f14))
+ EX_LD_FP(LOAD(ldd, %o1 + 0x038, %f14), U3_retl_o2_plus_o3_sll_6_plus_0x80)
faligndata %f8, %f10, %f24
- EX_LD_FP(LOAD(ldd, %o1 + 0x040, %f0))
+ EX_LD_FP(LOAD(ldd, %o1 + 0x040, %f0), U3_retl_o2_plus_o3_sll_6_plus_0x80)
LOAD(prefetch, %o1 + 0x1c0, #one_read)
faligndata %f10, %f12, %f26
bg,pt %XCC, 1b
@@ -217,29 +295,29 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
/* Finally we copy the last full 64-byte block. */
2:
- EX_LD_FP(LOAD(ldd, %o1 + 0x008, %f2))
+ EX_LD_FP(LOAD(ldd, %o1 + 0x008, %f2), U3_retl_o2_plus_o3_sll_6_plus_0x80)
faligndata %f12, %f14, %f28
- EX_LD_FP(LOAD(ldd, %o1 + 0x010, %f4))
+ EX_LD_FP(LOAD(ldd, %o1 + 0x010, %f4), U3_retl_o2_plus_o3_sll_6_plus_0x80)
faligndata %f14, %f0, %f30
- EX_ST_FP(STORE_BLK(%f16, %o0))
- EX_LD_FP(LOAD(ldd, %o1 + 0x018, %f6))
+ EX_ST_FP(STORE_BLK(%f16, %o0), U3_retl_o2_plus_o3_sll_6_plus_0x80)
+ EX_LD_FP(LOAD(ldd, %o1 + 0x018, %f6), U3_retl_o2_plus_o3_sll_6_plus_0x40)
faligndata %f0, %f2, %f16
- EX_LD_FP(LOAD(ldd, %o1 + 0x020, %f8))
+ EX_LD_FP(LOAD(ldd, %o1 + 0x020, %f8), U3_retl_o2_plus_o3_sll_6_plus_0x40)
faligndata %f2, %f4, %f18
- EX_LD_FP(LOAD(ldd, %o1 + 0x028, %f10))
+ EX_LD_FP(LOAD(ldd, %o1 + 0x028, %f10), U3_retl_o2_plus_o3_sll_6_plus_0x40)
faligndata %f4, %f6, %f20
- EX_LD_FP(LOAD(ldd, %o1 + 0x030, %f12))
+ EX_LD_FP(LOAD(ldd, %o1 + 0x030, %f12), U3_retl_o2_plus_o3_sll_6_plus_0x40)
faligndata %f6, %f8, %f22
- EX_LD_FP(LOAD(ldd, %o1 + 0x038, %f14))
+ EX_LD_FP(LOAD(ldd, %o1 + 0x038, %f14), U3_retl_o2_plus_o3_sll_6_plus_0x40)
faligndata %f8, %f10, %f24
cmp %g1, 0
be,pt %XCC, 1f
add %o0, 0x40, %o0
- EX_LD_FP(LOAD(ldd, %o1 + 0x040, %f0))
+ EX_LD_FP(LOAD(ldd, %o1 + 0x040, %f0), U3_retl_o2_plus_o3_sll_6_plus_0x40)
1: faligndata %f10, %f12, %f26
faligndata %f12, %f14, %f28
faligndata %f14, %f0, %f30
- EX_ST_FP(STORE_BLK(%f16, %o0))
+ EX_ST_FP(STORE_BLK(%f16, %o0), U3_retl_o2_plus_o3_sll_6_plus_0x40)
add %o0, 0x40, %o0
add %o1, 0x40, %o1
membar #Sync
@@ -259,20 +337,20 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
sub %o2, %g2, %o2
be,a,pt %XCC, 1f
- EX_LD_FP(LOAD(ldd, %o1 + 0x00, %f0))
+ EX_LD_FP(LOAD(ldd, %o1 + 0x00, %f0), U3_retl_o2_plus_g2)
-1: EX_LD_FP(LOAD(ldd, %o1 + 0x08, %f2))
+1: EX_LD_FP(LOAD(ldd, %o1 + 0x08, %f2), U3_retl_o2_plus_g2)
add %o1, 0x8, %o1
subcc %g2, 0x8, %g2
faligndata %f0, %f2, %f8
- EX_ST_FP(STORE(std, %f8, %o0))
+ EX_ST_FP(STORE(std, %f8, %o0), U3_retl_o2_plus_g2_plus_8)
be,pn %XCC, 2f
add %o0, 0x8, %o0
- EX_LD_FP(LOAD(ldd, %o1 + 0x08, %f0))
+ EX_LD_FP(LOAD(ldd, %o1 + 0x08, %f0), U3_retl_o2_plus_g2)
add %o1, 0x8, %o1
subcc %g2, 0x8, %g2
faligndata %f2, %f0, %f8
- EX_ST_FP(STORE(std, %f8, %o0))
+ EX_ST_FP(STORE(std, %f8, %o0), U3_retl_o2_plus_g2_plus_8)
bne,pn %XCC, 1b
add %o0, 0x8, %o0
@@ -292,30 +370,33 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
andcc %o2, 0x8, %g0
be,pt %icc, 1f
nop
- EX_LD(LOAD(ldx, %o1, %o5))
- EX_ST(STORE(stx, %o5, %o1 + %o3))
+ EX_LD(LOAD(ldx, %o1, %o5), U3_retl_o2)
+ EX_ST(STORE(stx, %o5, %o1 + %o3), U3_retl_o2)
add %o1, 0x8, %o1
+ sub %o2, 8, %o2
1: andcc %o2, 0x4, %g0
be,pt %icc, 1f
nop
- EX_LD(LOAD(lduw, %o1, %o5))
- EX_ST(STORE(stw, %o5, %o1 + %o3))
+ EX_LD(LOAD(lduw, %o1, %o5), U3_retl_o2)
+ EX_ST(STORE(stw, %o5, %o1 + %o3), U3_retl_o2)
add %o1, 0x4, %o1
+ sub %o2, 4, %o2
1: andcc %o2, 0x2, %g0
be,pt %icc, 1f
nop
- EX_LD(LOAD(lduh, %o1, %o5))
- EX_ST(STORE(sth, %o5, %o1 + %o3))
+ EX_LD(LOAD(lduh, %o1, %o5), U3_retl_o2)
+ EX_ST(STORE(sth, %o5, %o1 + %o3), U3_retl_o2)
add %o1, 0x2, %o1
+ sub %o2, 2, %o2
1: andcc %o2, 0x1, %g0
be,pt %icc, 85f
nop
- EX_LD(LOAD(ldub, %o1, %o5))
+ EX_LD(LOAD(ldub, %o1, %o5), U3_retl_o2)
ba,pt %xcc, 85f
- EX_ST(STORE(stb, %o5, %o1 + %o3))
+ EX_ST(STORE(stb, %o5, %o1 + %o3), U3_retl_o2)
.align 64
70: /* 16 < len <= 64 */
@@ -326,26 +407,26 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
andn %o2, 0xf, GLOBAL_SPARE
and %o2, 0xf, %o2
1: subcc GLOBAL_SPARE, 0x10, GLOBAL_SPARE
- EX_LD(LOAD(ldx, %o1 + 0x00, %o5))
- EX_LD(LOAD(ldx, %o1 + 0x08, %g1))
- EX_ST(STORE(stx, %o5, %o1 + %o3))
+ EX_LD(LOAD(ldx, %o1 + 0x00, %o5), U3_retl_o2_plus_GS_plus_0x10)
+ EX_LD(LOAD(ldx, %o1 + 0x08, %g1), U3_retl_o2_plus_GS_plus_0x10)
+ EX_ST(STORE(stx, %o5, %o1 + %o3), U3_retl_o2_plus_GS_plus_0x10)
add %o1, 0x8, %o1
- EX_ST(STORE(stx, %g1, %o1 + %o3))
+ EX_ST(STORE(stx, %g1, %o1 + %o3), U3_retl_o2_plus_GS_plus_0x08)
bgu,pt %XCC, 1b
add %o1, 0x8, %o1
73: andcc %o2, 0x8, %g0
be,pt %XCC, 1f
nop
sub %o2, 0x8, %o2
- EX_LD(LOAD(ldx, %o1, %o5))
- EX_ST(STORE(stx, %o5, %o1 + %o3))
+ EX_LD(LOAD(ldx, %o1, %o5), U3_retl_o2_plus_8)
+ EX_ST(STORE(stx, %o5, %o1 + %o3), U3_retl_o2_plus_8)
add %o1, 0x8, %o1
1: andcc %o2, 0x4, %g0
be,pt %XCC, 1f
nop
sub %o2, 0x4, %o2
- EX_LD(LOAD(lduw, %o1, %o5))
- EX_ST(STORE(stw, %o5, %o1 + %o3))
+ EX_LD(LOAD(lduw, %o1, %o5), U3_retl_o2_plus_4)
+ EX_ST(STORE(stw, %o5, %o1 + %o3), U3_retl_o2_plus_4)
add %o1, 0x4, %o1
1: cmp %o2, 0
be,pt %XCC, 85f
@@ -361,8 +442,8 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
sub %o2, %g1, %o2
1: subcc %g1, 1, %g1
- EX_LD(LOAD(ldub, %o1, %o5))
- EX_ST(STORE(stb, %o5, %o1 + %o3))
+ EX_LD(LOAD(ldub, %o1, %o5), U3_retl_o2_plus_g1_plus_1)
+ EX_ST(STORE(stb, %o5, %o1 + %o3), U3_retl_o2_plus_g1_plus_1)
bgu,pt %icc, 1b
add %o1, 1, %o1
@@ -378,16 +459,16 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
8: mov 64, %o3
andn %o1, 0x7, %o1
- EX_LD(LOAD(ldx, %o1, %g2))
+ EX_LD(LOAD(ldx, %o1, %g2), U3_retl_o2)
sub %o3, %g1, %o3
andn %o2, 0x7, GLOBAL_SPARE
sllx %g2, %g1, %g2
-1: EX_LD(LOAD(ldx, %o1 + 0x8, %g3))
+1: EX_LD(LOAD(ldx, %o1 + 0x8, %g3), U3_retl_o2_and_7_plus_GS)
subcc GLOBAL_SPARE, 0x8, GLOBAL_SPARE
add %o1, 0x8, %o1
srlx %g3, %o3, %o5
or %o5, %g2, %o5
- EX_ST(STORE(stx, %o5, %o0))
+ EX_ST(STORE(stx, %o5, %o0), U3_retl_o2_and_7_plus_GS_plus_8)
add %o0, 0x8, %o0
bgu,pt %icc, 1b
sllx %g3, %g1, %g2
@@ -407,8 +488,8 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
1:
subcc %o2, 4, %o2
- EX_LD(LOAD(lduw, %o1, %g1))
- EX_ST(STORE(stw, %g1, %o1 + %o3))
+ EX_LD(LOAD(lduw, %o1, %g1), U3_retl_o2_plus_4)
+ EX_ST(STORE(stw, %g1, %o1 + %o3), U3_retl_o2_plus_4)
bgu,pt %XCC, 1b
add %o1, 4, %o1
@@ -418,8 +499,8 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
.align 32
90:
subcc %o2, 1, %o2
- EX_LD(LOAD(ldub, %o1, %g1))
- EX_ST(STORE(stb, %g1, %o1 + %o3))
+ EX_LD(LOAD(ldub, %o1, %g1), U3_retl_o2_plus_1)
+ EX_ST(STORE(stb, %g1, %o1 + %o3), U3_retl_o2_plus_1)
bgu,pt %XCC, 90b
add %o1, 1, %o1
retl
diff --git a/arch/sparc/lib/copy_in_user.S b/arch/sparc/lib/copy_in_user.S
index 482de093bdae..0252b218de45 100644
--- a/arch/sparc/lib/copy_in_user.S
+++ b/arch/sparc/lib/copy_in_user.S
@@ -9,18 +9,33 @@
#define XCC xcc
-#define EX(x,y) \
+#define EX(x,y,z) \
98: x,y; \
.section __ex_table,"a";\
.align 4; \
- .word 98b, __retl_one; \
+ .word 98b, z; \
.text; \
.align 4;
+#define EX_O4(x,y) EX(x,y,__retl_o4_plus_8)
+#define EX_O2_4(x,y) EX(x,y,__retl_o2_plus_4)
+#define EX_O2_1(x,y) EX(x,y,__retl_o2_plus_1)
+
.register %g2,#scratch
.register %g3,#scratch
.text
+__retl_o4_plus_8:
+ add %o4, %o2, %o4
+ retl
+ add %o4, 8, %o0
+__retl_o2_plus_4:
+ retl
+ add %o2, 4, %o0
+__retl_o2_plus_1:
+ retl
+ add %o2, 1, %o0
+
.align 32
/* Don't try to get too fancy here, just nice and
@@ -45,8 +60,8 @@ ENTRY(___copy_in_user) /* %o0=dst, %o1=src, %o2=len */
andn %o2, 0x7, %o4
and %o2, 0x7, %o2
1: subcc %o4, 0x8, %o4
- EX(ldxa [%o1] %asi, %o5)
- EX(stxa %o5, [%o0] %asi)
+ EX_O4(ldxa [%o1] %asi, %o5)
+ EX_O4(stxa %o5, [%o0] %asi)
add %o1, 0x8, %o1
bgu,pt %XCC, 1b
add %o0, 0x8, %o0
@@ -54,8 +69,8 @@ ENTRY(___copy_in_user) /* %o0=dst, %o1=src, %o2=len */
be,pt %XCC, 1f
nop
sub %o2, 0x4, %o2
- EX(lduwa [%o1] %asi, %o5)
- EX(stwa %o5, [%o0] %asi)
+ EX_O2_4(lduwa [%o1] %asi, %o5)
+ EX_O2_4(stwa %o5, [%o0] %asi)
add %o1, 0x4, %o1
add %o0, 0x4, %o0
1: cmp %o2, 0
@@ -71,8 +86,8 @@ ENTRY(___copy_in_user) /* %o0=dst, %o1=src, %o2=len */
82:
subcc %o2, 4, %o2
- EX(lduwa [%o1] %asi, %g1)
- EX(stwa %g1, [%o0] %asi)
+ EX_O2_4(lduwa [%o1] %asi, %g1)
+ EX_O2_4(stwa %g1, [%o0] %asi)
add %o1, 4, %o1
bgu,pt %XCC, 82b
add %o0, 4, %o0
@@ -83,8 +98,8 @@ ENTRY(___copy_in_user) /* %o0=dst, %o1=src, %o2=len */
.align 32
90:
subcc %o2, 1, %o2
- EX(lduba [%o1] %asi, %g1)
- EX(stba %g1, [%o0] %asi)
+ EX_O2_1(lduba [%o1] %asi, %g1)
+ EX_O2_1(stba %g1, [%o0] %asi)
add %o1, 1, %o1
bgu,pt %XCC, 90b
add %o0, 1, %o0
diff --git a/arch/sparc/lib/user_fixup.c b/arch/sparc/lib/user_fixup.c
deleted file mode 100644
index ac96ae236709..000000000000
--- a/arch/sparc/lib/user_fixup.c
+++ /dev/null
@@ -1,71 +0,0 @@
-/* user_fixup.c: Fix up user copy faults.
- *
- * Copyright (C) 2004 David S. Miller <davem@redhat.com>
- */
-
-#include <linux/compiler.h>
-#include <linux/kernel.h>
-#include <linux/string.h>
-#include <linux/errno.h>
-#include <linux/module.h>
-
-#include <asm/uaccess.h>
-
-/* Calculating the exact fault address when using
- * block loads and stores can be very complicated.
- *
- * Instead of trying to be clever and handling all
- * of the cases, just fix things up simply here.
- */
-
-static unsigned long compute_size(unsigned long start, unsigned long size, unsigned long *offset)
-{
- unsigned long fault_addr = current_thread_info()->fault_address;
- unsigned long end = start + size;
-
- if (fault_addr < start || fault_addr >= end) {
- *offset = 0;
- } else {
- *offset = fault_addr - start;
- size = end - fault_addr;
- }
- return size;
-}
-
-unsigned long copy_from_user_fixup(void *to, const void __user *from, unsigned long size)
-{
- unsigned long offset;
-
- size = compute_size((unsigned long) from, size, &offset);
- if (likely(size))
- memset(to + offset, 0, size);
-
- return size;
-}
-EXPORT_SYMBOL(copy_from_user_fixup);
-
-unsigned long copy_to_user_fixup(void __user *to, const void *from, unsigned long size)
-{
- unsigned long offset;
-
- return compute_size((unsigned long) to, size, &offset);
-}
-EXPORT_SYMBOL(copy_to_user_fixup);
-
-unsigned long copy_in_user_fixup(void __user *to, void __user *from, unsigned long size)
-{
- unsigned long fault_addr = current_thread_info()->fault_address;
- unsigned long start = (unsigned long) to;
- unsigned long end = start + size;
-
- if (fault_addr >= start && fault_addr < end)
- return end - fault_addr;
-
- start = (unsigned long) from;
- end = start + size;
- if (fault_addr >= start && fault_addr < end)
- return end - fault_addr;
-
- return size;
-}
-EXPORT_SYMBOL(copy_in_user_fixup);
diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c
index 439784b7b7ac..37aa537b3ad8 100644
--- a/arch/sparc/mm/init_64.c
+++ b/arch/sparc/mm/init_64.c
@@ -802,8 +802,10 @@ struct mdesc_mblock {
};
static struct mdesc_mblock *mblocks;
static int num_mblocks;
+static int find_numa_node_for_addr(unsigned long pa,
+ struct node_mem_mask *pnode_mask);
-static unsigned long ra_to_pa(unsigned long addr)
+static unsigned long __init ra_to_pa(unsigned long addr)
{
int i;
@@ -819,8 +821,11 @@ static unsigned long ra_to_pa(unsigned long addr)
return addr;
}
-static int find_node(unsigned long addr)
+static int __init find_node(unsigned long addr)
{
+ static bool search_mdesc = true;
+ static struct node_mem_mask last_mem_mask = { ~0UL, ~0UL };
+ static int last_index;
int i;
addr = ra_to_pa(addr);
@@ -830,13 +835,30 @@ static int find_node(unsigned long addr)
if ((addr & p->mask) == p->val)
return i;
}
- /* The following condition has been observed on LDOM guests.*/
- WARN_ONCE(1, "find_node: A physical address doesn't match a NUMA node"
- " rule. Some physical memory will be owned by node 0.");
- return 0;
+ /* The following condition has been observed on LDOM guests because
+ * node_masks only contains the best latency mask and value.
+ * LDOM guest's mdesc can contain a single latency group to
+ * cover multiple address range. Print warning message only if the
+ * address cannot be found in node_masks nor mdesc.
+ */
+ if ((search_mdesc) &&
+ ((addr & last_mem_mask.mask) != last_mem_mask.val)) {
+ /* find the available node in the mdesc */
+ last_index = find_numa_node_for_addr(addr, &last_mem_mask);
+ numadbg("find_node: latency group for address 0x%lx is %d\n",
+ addr, last_index);
+ if ((last_index < 0) || (last_index >= num_node_masks)) {
+ /* WARN_ONCE() and use default group 0 */
+ WARN_ONCE(1, "find_node: A physical address doesn't match a NUMA node rule. Some physical memory will be owned by node 0.");
+ search_mdesc = false;
+ last_index = 0;
+ }
+ }
+
+ return last_index;
}
-static u64 memblock_nid_range(u64 start, u64 end, int *nid)
+static u64 __init memblock_nid_range(u64 start, u64 end, int *nid)
{
*nid = find_node(start);
start += PAGE_SIZE;
@@ -1160,6 +1182,41 @@ int __node_distance(int from, int to)
return numa_latency[from][to];
}
+static int find_numa_node_for_addr(unsigned long pa,
+ struct node_mem_mask *pnode_mask)
+{
+ struct mdesc_handle *md = mdesc_grab();
+ u64 node, arc;
+ int i = 0;
+
+ node = mdesc_node_by_name(md, MDESC_NODE_NULL, "latency-groups");
+ if (node == MDESC_NODE_NULL)
+ goto out;
+
+ mdesc_for_each_node_by_name(md, node, "group") {
+ mdesc_for_each_arc(arc, md, node, MDESC_ARC_TYPE_FWD) {
+ u64 target = mdesc_arc_target(md, arc);
+ struct mdesc_mlgroup *m = find_mlgroup(target);
+
+ if (!m)
+ continue;
+ if ((pa & m->mask) == m->match) {
+ if (pnode_mask) {
+ pnode_mask->mask = m->mask;
+ pnode_mask->val = m->match;
+ }
+ mdesc_release(md);
+ return i;
+ }
+ }
+ i++;
+ }
+
+out:
+ mdesc_release(md);
+ return -1;
+}
+
static int __init find_best_numa_node_for_mlgroup(struct mdesc_mlgroup *grp)
{
int i;
diff --git a/arch/sparc/mm/tsb.c b/arch/sparc/mm/tsb.c
index f2b77112e9d8..e20fbbafb0b0 100644
--- a/arch/sparc/mm/tsb.c
+++ b/arch/sparc/mm/tsb.c
@@ -27,6 +27,20 @@ static inline int tag_compare(unsigned long tag, unsigned long vaddr)
return (tag == (vaddr >> 22));
}
+static void flush_tsb_kernel_range_scan(unsigned long start, unsigned long end)
+{
+ unsigned long idx;
+
+ for (idx = 0; idx < KERNEL_TSB_NENTRIES; idx++) {
+ struct tsb *ent = &swapper_tsb[idx];
+ unsigned long match = idx << 13;
+
+ match |= (ent->tag << 22);
+ if (match >= start && match < end)
+ ent->tag = (1UL << TSB_TAG_INVALID_BIT);
+ }
+}
+
/* TSB flushes need only occur on the processor initiating the address
* space modification, not on each cpu the address space has run on.
* Only the TLB flush needs that treatment.
@@ -36,6 +50,9 @@ void flush_tsb_kernel_range(unsigned long start, unsigned long end)
{
unsigned long v;
+ if ((end - start) >> PAGE_SHIFT >= 2 * KERNEL_TSB_NENTRIES)
+ return flush_tsb_kernel_range_scan(start, end);
+
for (v = start; v < end; v += PAGE_SIZE) {
unsigned long hash = tsb_hash(v, PAGE_SHIFT,
KERNEL_TSB_NENTRIES);
diff --git a/arch/sparc/mm/ultra.S b/arch/sparc/mm/ultra.S
index b4f4733abc6e..5d2fd6cd3189 100644
--- a/arch/sparc/mm/ultra.S
+++ b/arch/sparc/mm/ultra.S
@@ -30,7 +30,7 @@
.text
.align 32
.globl __flush_tlb_mm
-__flush_tlb_mm: /* 18 insns */
+__flush_tlb_mm: /* 19 insns */
/* %o0=(ctx & TAG_CONTEXT_BITS), %o1=SECONDARY_CONTEXT */
ldxa [%o1] ASI_DMMU, %g2
cmp %g2, %o0
@@ -81,7 +81,7 @@ __flush_tlb_page: /* 22 insns */
.align 32
.globl __flush_tlb_pending
-__flush_tlb_pending: /* 26 insns */
+__flush_tlb_pending: /* 27 insns */
/* %o0 = context, %o1 = nr, %o2 = vaddrs[] */
rdpr %pstate, %g7
sllx %o1, 3, %o1
@@ -113,12 +113,14 @@ __flush_tlb_pending: /* 26 insns */
.align 32
.globl __flush_tlb_kernel_range
-__flush_tlb_kernel_range: /* 16 insns */
+__flush_tlb_kernel_range: /* 31 insns */
/* %o0=start, %o1=end */
cmp %o0, %o1
be,pn %xcc, 2f
+ sub %o1, %o0, %o3
+ srlx %o3, 18, %o4
+ brnz,pn %o4, __spitfire_flush_tlb_kernel_range_slow
sethi %hi(PAGE_SIZE), %o4
- sub %o1, %o0, %o3
sub %o3, %o4, %o3
or %o0, 0x20, %o0 ! Nucleus
1: stxa %g0, [%o0 + %o3] ASI_DMMU_DEMAP
@@ -131,6 +133,41 @@ __flush_tlb_kernel_range: /* 16 insns */
retl
nop
nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+
+__spitfire_flush_tlb_kernel_range_slow:
+ mov 63 * 8, %o4
+1: ldxa [%o4] ASI_ITLB_DATA_ACCESS, %o3
+ andcc %o3, 0x40, %g0 /* _PAGE_L_4U */
+ bne,pn %xcc, 2f
+ mov TLB_TAG_ACCESS, %o3
+ stxa %g0, [%o3] ASI_IMMU
+ stxa %g0, [%o4] ASI_ITLB_DATA_ACCESS
+ membar #Sync
+2: ldxa [%o4] ASI_DTLB_DATA_ACCESS, %o3
+ andcc %o3, 0x40, %g0
+ bne,pn %xcc, 2f
+ mov TLB_TAG_ACCESS, %o3
+ stxa %g0, [%o3] ASI_DMMU
+ stxa %g0, [%o4] ASI_DTLB_DATA_ACCESS
+ membar #Sync
+2: sub %o4, 8, %o4
+ brgez,pt %o4, 1b
+ nop
+ retl
+ nop
__spitfire_flush_tlb_mm_slow:
rdpr %pstate, %g1
@@ -285,6 +322,40 @@ __cheetah_flush_tlb_pending: /* 27 insns */
retl
wrpr %g7, 0x0, %pstate
+__cheetah_flush_tlb_kernel_range: /* 31 insns */
+ /* %o0=start, %o1=end */
+ cmp %o0, %o1
+ be,pn %xcc, 2f
+ sub %o1, %o0, %o3
+ srlx %o3, 18, %o4
+ brnz,pn %o4, 3f
+ sethi %hi(PAGE_SIZE), %o4
+ sub %o3, %o4, %o3
+ or %o0, 0x20, %o0 ! Nucleus
+1: stxa %g0, [%o0 + %o3] ASI_DMMU_DEMAP
+ stxa %g0, [%o0 + %o3] ASI_IMMU_DEMAP
+ membar #Sync
+ brnz,pt %o3, 1b
+ sub %o3, %o4, %o3
+2: sethi %hi(KERNBASE), %o3
+ flush %o3
+ retl
+ nop
+3: mov 0x80, %o4
+ stxa %g0, [%o4] ASI_DMMU_DEMAP
+ membar #Sync
+ stxa %g0, [%o4] ASI_IMMU_DEMAP
+ membar #Sync
+ retl
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+
#ifdef DCACHE_ALIASING_POSSIBLE
__cheetah_flush_dcache_page: /* 11 insns */
sethi %hi(PAGE_OFFSET), %g1
@@ -309,19 +380,28 @@ __hypervisor_tlb_tl0_error:
ret
restore
-__hypervisor_flush_tlb_mm: /* 10 insns */
+__hypervisor_flush_tlb_mm: /* 19 insns */
mov %o0, %o2 /* ARG2: mmu context */
mov 0, %o0 /* ARG0: CPU lists unimplemented */
mov 0, %o1 /* ARG1: CPU lists unimplemented */
mov HV_MMU_ALL, %o3 /* ARG3: flags */
mov HV_FAST_MMU_DEMAP_CTX, %o5
ta HV_FAST_TRAP
- brnz,pn %o0, __hypervisor_tlb_tl0_error
+ brnz,pn %o0, 1f
mov HV_FAST_MMU_DEMAP_CTX, %o1
retl
nop
+1: sethi %hi(__hypervisor_tlb_tl0_error), %o5
+ jmpl %o5 + %lo(__hypervisor_tlb_tl0_error), %g0
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
-__hypervisor_flush_tlb_page: /* 11 insns */
+__hypervisor_flush_tlb_page: /* 22 insns */
/* %o0 = context, %o1 = vaddr */
mov %o0, %g2
mov %o1, %o0 /* ARG0: vaddr + IMMU-bit */
@@ -330,12 +410,23 @@ __hypervisor_flush_tlb_page: /* 11 insns */
srlx %o0, PAGE_SHIFT, %o0
sllx %o0, PAGE_SHIFT, %o0
ta HV_MMU_UNMAP_ADDR_TRAP
- brnz,pn %o0, __hypervisor_tlb_tl0_error
+ brnz,pn %o0, 1f
mov HV_MMU_UNMAP_ADDR_TRAP, %o1
retl
nop
+1: sethi %hi(__hypervisor_tlb_tl0_error), %o2
+ jmpl %o2 + %lo(__hypervisor_tlb_tl0_error), %g0
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
-__hypervisor_flush_tlb_pending: /* 16 insns */
+__hypervisor_flush_tlb_pending: /* 27 insns */
/* %o0 = context, %o1 = nr, %o2 = vaddrs[] */
sllx %o1, 3, %g1
mov %o2, %g2
@@ -347,31 +438,57 @@ __hypervisor_flush_tlb_pending: /* 16 insns */
srlx %o0, PAGE_SHIFT, %o0
sllx %o0, PAGE_SHIFT, %o0
ta HV_MMU_UNMAP_ADDR_TRAP
- brnz,pn %o0, __hypervisor_tlb_tl0_error
+ brnz,pn %o0, 1f
mov HV_MMU_UNMAP_ADDR_TRAP, %o1
brnz,pt %g1, 1b
nop
retl
nop
+1: sethi %hi(__hypervisor_tlb_tl0_error), %o2
+ jmpl %o2 + %lo(__hypervisor_tlb_tl0_error), %g0
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
-__hypervisor_flush_tlb_kernel_range: /* 16 insns */
+__hypervisor_flush_tlb_kernel_range: /* 31 insns */
/* %o0=start, %o1=end */
cmp %o0, %o1
be,pn %xcc, 2f
- sethi %hi(PAGE_SIZE), %g3
- mov %o0, %g1
- sub %o1, %g1, %g2
+ sub %o1, %o0, %g2
+ srlx %g2, 18, %g3
+ brnz,pn %g3, 4f
+ mov %o0, %g1
+ sethi %hi(PAGE_SIZE), %g3
sub %g2, %g3, %g2
1: add %g1, %g2, %o0 /* ARG0: virtual address */
mov 0, %o1 /* ARG1: mmu context */
mov HV_MMU_ALL, %o2 /* ARG2: flags */
ta HV_MMU_UNMAP_ADDR_TRAP
- brnz,pn %o0, __hypervisor_tlb_tl0_error
+ brnz,pn %o0, 3f
mov HV_MMU_UNMAP_ADDR_TRAP, %o1
brnz,pt %g2, 1b
sub %g2, %g3, %g2
2: retl
nop
+3: sethi %hi(__hypervisor_tlb_tl0_error), %o2
+ jmpl %o2 + %lo(__hypervisor_tlb_tl0_error), %g0
+ nop
+4: mov 0, %o0 /* ARG0: CPU lists unimplemented */
+ mov 0, %o1 /* ARG1: CPU lists unimplemented */
+ mov 0, %o2 /* ARG2: mmu context == nucleus */
+ mov HV_MMU_ALL, %o3 /* ARG3: flags */
+ mov HV_FAST_MMU_DEMAP_CTX, %o5
+ ta HV_FAST_TRAP
+ brnz,pn %o0, 3b
+ mov HV_FAST_MMU_DEMAP_CTX, %o1
+ retl
+ nop
#ifdef DCACHE_ALIASING_POSSIBLE
/* XXX Niagara and friends have an 8K cache, so no aliasing is
@@ -394,43 +511,6 @@ tlb_patch_one:
retl
nop
- .globl cheetah_patch_cachetlbops
-cheetah_patch_cachetlbops:
- save %sp, -128, %sp
-
- sethi %hi(__flush_tlb_mm), %o0
- or %o0, %lo(__flush_tlb_mm), %o0
- sethi %hi(__cheetah_flush_tlb_mm), %o1
- or %o1, %lo(__cheetah_flush_tlb_mm), %o1
- call tlb_patch_one
- mov 19, %o2
-
- sethi %hi(__flush_tlb_page), %o0
- or %o0, %lo(__flush_tlb_page), %o0
- sethi %hi(__cheetah_flush_tlb_page), %o1
- or %o1, %lo(__cheetah_flush_tlb_page), %o1
- call tlb_patch_one
- mov 22, %o2
-
- sethi %hi(__flush_tlb_pending), %o0
- or %o0, %lo(__flush_tlb_pending), %o0
- sethi %hi(__cheetah_flush_tlb_pending), %o1
- or %o1, %lo(__cheetah_flush_tlb_pending), %o1
- call tlb_patch_one
- mov 27, %o2
-
-#ifdef DCACHE_ALIASING_POSSIBLE
- sethi %hi(__flush_dcache_page), %o0
- or %o0, %lo(__flush_dcache_page), %o0
- sethi %hi(__cheetah_flush_dcache_page), %o1
- or %o1, %lo(__cheetah_flush_dcache_page), %o1
- call tlb_patch_one
- mov 11, %o2
-#endif /* DCACHE_ALIASING_POSSIBLE */
-
- ret
- restore
-
#ifdef CONFIG_SMP
/* These are all called by the slaves of a cross call, at
* trap level 1, with interrupts fully disabled.
@@ -447,7 +527,7 @@ cheetah_patch_cachetlbops:
*/
.align 32
.globl xcall_flush_tlb_mm
-xcall_flush_tlb_mm: /* 21 insns */
+xcall_flush_tlb_mm: /* 24 insns */
mov PRIMARY_CONTEXT, %g2
ldxa [%g2] ASI_DMMU, %g3
srlx %g3, CTX_PGSZ1_NUC_SHIFT, %g4
@@ -469,9 +549,12 @@ xcall_flush_tlb_mm: /* 21 insns */
nop
nop
nop
+ nop
+ nop
+ nop
.globl xcall_flush_tlb_page
-xcall_flush_tlb_page: /* 17 insns */
+xcall_flush_tlb_page: /* 20 insns */
/* %g5=context, %g1=vaddr */
mov PRIMARY_CONTEXT, %g4
ldxa [%g4] ASI_DMMU, %g2
@@ -490,15 +573,20 @@ xcall_flush_tlb_page: /* 17 insns */
retry
nop
nop
+ nop
+ nop
+ nop
.globl xcall_flush_tlb_kernel_range
-xcall_flush_tlb_kernel_range: /* 25 insns */
+xcall_flush_tlb_kernel_range: /* 44 insns */
sethi %hi(PAGE_SIZE - 1), %g2
or %g2, %lo(PAGE_SIZE - 1), %g2
andn %g1, %g2, %g1
andn %g7, %g2, %g7
sub %g7, %g1, %g3
- add %g2, 1, %g2
+ srlx %g3, 18, %g2
+ brnz,pn %g2, 2f
+ add %g2, 1, %g2
sub %g3, %g2, %g3
or %g1, 0x20, %g1 ! Nucleus
1: stxa %g0, [%g1 + %g3] ASI_DMMU_DEMAP
@@ -507,8 +595,25 @@ xcall_flush_tlb_kernel_range: /* 25 insns */
brnz,pt %g3, 1b
sub %g3, %g2, %g3
retry
- nop
- nop
+2: mov 63 * 8, %g1
+1: ldxa [%g1] ASI_ITLB_DATA_ACCESS, %g2
+ andcc %g2, 0x40, %g0 /* _PAGE_L_4U */
+ bne,pn %xcc, 2f
+ mov TLB_TAG_ACCESS, %g2
+ stxa %g0, [%g2] ASI_IMMU
+ stxa %g0, [%g1] ASI_ITLB_DATA_ACCESS
+ membar #Sync
+2: ldxa [%g1] ASI_DTLB_DATA_ACCESS, %g2
+ andcc %g2, 0x40, %g0
+ bne,pn %xcc, 2f
+ mov TLB_TAG_ACCESS, %g2
+ stxa %g0, [%g2] ASI_DMMU
+ stxa %g0, [%g1] ASI_DTLB_DATA_ACCESS
+ membar #Sync
+2: sub %g1, 8, %g1
+ brgez,pt %g1, 1b
+ nop
+ retry
nop
nop
nop
@@ -637,6 +742,52 @@ xcall_fetch_glob_pmu_n4:
retry
+__cheetah_xcall_flush_tlb_kernel_range: /* 44 insns */
+ sethi %hi(PAGE_SIZE - 1), %g2
+ or %g2, %lo(PAGE_SIZE - 1), %g2
+ andn %g1, %g2, %g1
+ andn %g7, %g2, %g7
+ sub %g7, %g1, %g3
+ srlx %g3, 18, %g2
+ brnz,pn %g2, 2f
+ add %g2, 1, %g2
+ sub %g3, %g2, %g3
+ or %g1, 0x20, %g1 ! Nucleus
+1: stxa %g0, [%g1 + %g3] ASI_DMMU_DEMAP
+ stxa %g0, [%g1 + %g3] ASI_IMMU_DEMAP
+ membar #Sync
+ brnz,pt %g3, 1b
+ sub %g3, %g2, %g3
+ retry
+2: mov 0x80, %g2
+ stxa %g0, [%g2] ASI_DMMU_DEMAP
+ membar #Sync
+ stxa %g0, [%g2] ASI_IMMU_DEMAP
+ membar #Sync
+ retry
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+
#ifdef DCACHE_ALIASING_POSSIBLE
.align 32
.globl xcall_flush_dcache_page_cheetah
@@ -700,7 +851,7 @@ __hypervisor_tlb_xcall_error:
ba,a,pt %xcc, rtrap
.globl __hypervisor_xcall_flush_tlb_mm
-__hypervisor_xcall_flush_tlb_mm: /* 21 insns */
+__hypervisor_xcall_flush_tlb_mm: /* 24 insns */
/* %g5=ctx, g1,g2,g3,g4,g7=scratch, %g6=unusable */
mov %o0, %g2
mov %o1, %g3
@@ -714,7 +865,7 @@ __hypervisor_xcall_flush_tlb_mm: /* 21 insns */
mov HV_FAST_MMU_DEMAP_CTX, %o5
ta HV_FAST_TRAP
mov HV_FAST_MMU_DEMAP_CTX, %g6
- brnz,pn %o0, __hypervisor_tlb_xcall_error
+ brnz,pn %o0, 1f
mov %o0, %g5
mov %g2, %o0
mov %g3, %o1
@@ -723,9 +874,12 @@ __hypervisor_xcall_flush_tlb_mm: /* 21 insns */
mov %g7, %o5
membar #Sync
retry
+1: sethi %hi(__hypervisor_tlb_xcall_error), %g4
+ jmpl %g4 + %lo(__hypervisor_tlb_xcall_error), %g0
+ nop
.globl __hypervisor_xcall_flush_tlb_page
-__hypervisor_xcall_flush_tlb_page: /* 17 insns */
+__hypervisor_xcall_flush_tlb_page: /* 20 insns */
/* %g5=ctx, %g1=vaddr */
mov %o0, %g2
mov %o1, %g3
@@ -737,42 +891,64 @@ __hypervisor_xcall_flush_tlb_page: /* 17 insns */
sllx %o0, PAGE_SHIFT, %o0
ta HV_MMU_UNMAP_ADDR_TRAP
mov HV_MMU_UNMAP_ADDR_TRAP, %g6
- brnz,a,pn %o0, __hypervisor_tlb_xcall_error
+ brnz,a,pn %o0, 1f
mov %o0, %g5
mov %g2, %o0
mov %g3, %o1
mov %g4, %o2
membar #Sync
retry
+1: sethi %hi(__hypervisor_tlb_xcall_error), %g4
+ jmpl %g4 + %lo(__hypervisor_tlb_xcall_error), %g0
+ nop
.globl __hypervisor_xcall_flush_tlb_kernel_range
-__hypervisor_xcall_flush_tlb_kernel_range: /* 25 insns */
+__hypervisor_xcall_flush_tlb_kernel_range: /* 44 insns */
/* %g1=start, %g7=end, g2,g3,g4,g5,g6=scratch */
sethi %hi(PAGE_SIZE - 1), %g2
or %g2, %lo(PAGE_SIZE - 1), %g2
andn %g1, %g2, %g1
andn %g7, %g2, %g7
sub %g7, %g1, %g3
+ srlx %g3, 18, %g7
add %g2, 1, %g2
sub %g3, %g2, %g3
mov %o0, %g2
mov %o1, %g4
- mov %o2, %g7
+ brnz,pn %g7, 2f
+ mov %o2, %g7
1: add %g1, %g3, %o0 /* ARG0: virtual address */
mov 0, %o1 /* ARG1: mmu context */
mov HV_MMU_ALL, %o2 /* ARG2: flags */
ta HV_MMU_UNMAP_ADDR_TRAP
mov HV_MMU_UNMAP_ADDR_TRAP, %g6
- brnz,pn %o0, __hypervisor_tlb_xcall_error
+ brnz,pn %o0, 1f
mov %o0, %g5
sethi %hi(PAGE_SIZE), %o2
brnz,pt %g3, 1b
sub %g3, %o2, %g3
- mov %g2, %o0
+5: mov %g2, %o0
mov %g4, %o1
mov %g7, %o2
membar #Sync
retry
+1: sethi %hi(__hypervisor_tlb_xcall_error), %g4
+ jmpl %g4 + %lo(__hypervisor_tlb_xcall_error), %g0
+ nop
+2: mov %o3, %g1
+ mov %o5, %g3
+ mov 0, %o0 /* ARG0: CPU lists unimplemented */
+ mov 0, %o1 /* ARG1: CPU lists unimplemented */
+ mov 0, %o2 /* ARG2: mmu context == nucleus */
+ mov HV_MMU_ALL, %o3 /* ARG3: flags */
+ mov HV_FAST_MMU_DEMAP_CTX, %o5
+ ta HV_FAST_TRAP
+ mov %g1, %o3
+ brz,pt %o0, 5b
+ mov %g3, %o5
+ mov HV_FAST_MMU_DEMAP_CTX, %g6
+ ba,pt %xcc, 1b
+ clr %g5
/* These just get rescheduled to PIL vectors. */
.globl xcall_call_function
@@ -809,6 +985,58 @@ xcall_kgdb_capture:
#endif /* CONFIG_SMP */
+ .globl cheetah_patch_cachetlbops
+cheetah_patch_cachetlbops:
+ save %sp, -128, %sp
+
+ sethi %hi(__flush_tlb_mm), %o0
+ or %o0, %lo(__flush_tlb_mm), %o0
+ sethi %hi(__cheetah_flush_tlb_mm), %o1
+ or %o1, %lo(__cheetah_flush_tlb_mm), %o1
+ call tlb_patch_one
+ mov 19, %o2
+
+ sethi %hi(__flush_tlb_page), %o0
+ or %o0, %lo(__flush_tlb_page), %o0
+ sethi %hi(__cheetah_flush_tlb_page), %o1
+ or %o1, %lo(__cheetah_flush_tlb_page), %o1
+ call tlb_patch_one
+ mov 22, %o2
+
+ sethi %hi(__flush_tlb_pending), %o0
+ or %o0, %lo(__flush_tlb_pending), %o0
+ sethi %hi(__cheetah_flush_tlb_pending), %o1
+ or %o1, %lo(__cheetah_flush_tlb_pending), %o1
+ call tlb_patch_one
+ mov 27, %o2
+
+ sethi %hi(__flush_tlb_kernel_range), %o0
+ or %o0, %lo(__flush_tlb_kernel_range), %o0
+ sethi %hi(__cheetah_flush_tlb_kernel_range), %o1
+ or %o1, %lo(__cheetah_flush_tlb_kernel_range), %o1
+ call tlb_patch_one
+ mov 31, %o2
+
+#ifdef DCACHE_ALIASING_POSSIBLE
+ sethi %hi(__flush_dcache_page), %o0
+ or %o0, %lo(__flush_dcache_page), %o0
+ sethi %hi(__cheetah_flush_dcache_page), %o1
+ or %o1, %lo(__cheetah_flush_dcache_page), %o1
+ call tlb_patch_one
+ mov 11, %o2
+#endif /* DCACHE_ALIASING_POSSIBLE */
+
+#ifdef CONFIG_SMP
+ sethi %hi(xcall_flush_tlb_kernel_range), %o0
+ or %o0, %lo(xcall_flush_tlb_kernel_range), %o0
+ sethi %hi(__cheetah_xcall_flush_tlb_kernel_range), %o1
+ or %o1, %lo(__cheetah_xcall_flush_tlb_kernel_range), %o1
+ call tlb_patch_one
+ mov 44, %o2
+#endif /* CONFIG_SMP */
+
+ ret
+ restore
.globl hypervisor_patch_cachetlbops
hypervisor_patch_cachetlbops:
@@ -819,28 +1047,28 @@ hypervisor_patch_cachetlbops:
sethi %hi(__hypervisor_flush_tlb_mm), %o1
or %o1, %lo(__hypervisor_flush_tlb_mm), %o1
call tlb_patch_one
- mov 10, %o2
+ mov 19, %o2
sethi %hi(__flush_tlb_page), %o0
or %o0, %lo(__flush_tlb_page), %o0
sethi %hi(__hypervisor_flush_tlb_page), %o1
or %o1, %lo(__hypervisor_flush_tlb_page), %o1
call tlb_patch_one
- mov 11, %o2
+ mov 22, %o2
sethi %hi(__flush_tlb_pending), %o0
or %o0, %lo(__flush_tlb_pending), %o0
sethi %hi(__hypervisor_flush_tlb_pending), %o1
or %o1, %lo(__hypervisor_flush_tlb_pending), %o1
call tlb_patch_one
- mov 16, %o2
+ mov 27, %o2
sethi %hi(__flush_tlb_kernel_range), %o0
or %o0, %lo(__flush_tlb_kernel_range), %o0
sethi %hi(__hypervisor_flush_tlb_kernel_range), %o1
or %o1, %lo(__hypervisor_flush_tlb_kernel_range), %o1
call tlb_patch_one
- mov 16, %o2
+ mov 31, %o2
#ifdef DCACHE_ALIASING_POSSIBLE
sethi %hi(__flush_dcache_page), %o0
@@ -857,21 +1085,21 @@ hypervisor_patch_cachetlbops:
sethi %hi(__hypervisor_xcall_flush_tlb_mm), %o1
or %o1, %lo(__hypervisor_xcall_flush_tlb_mm), %o1
call tlb_patch_one
- mov 21, %o2
+ mov 24, %o2
sethi %hi(xcall_flush_tlb_page), %o0
or %o0, %lo(xcall_flush_tlb_page), %o0
sethi %hi(__hypervisor_xcall_flush_tlb_page), %o1
or %o1, %lo(__hypervisor_xcall_flush_tlb_page), %o1
call tlb_patch_one
- mov 17, %o2
+ mov 20, %o2
sethi %hi(xcall_flush_tlb_kernel_range), %o0
or %o0, %lo(xcall_flush_tlb_kernel_range), %o0
sethi %hi(__hypervisor_xcall_flush_tlb_kernel_range), %o1
or %o1, %lo(__hypervisor_xcall_flush_tlb_kernel_range), %o1
call tlb_patch_one
- mov 25, %o2
+ mov 44, %o2
#endif /* CONFIG_SMP */
ret
diff --git a/arch/tile/include/asm/cache.h b/arch/tile/include/asm/cache.h
index 6160761d5f61..4810e48dbbbf 100644
--- a/arch/tile/include/asm/cache.h
+++ b/arch/tile/include/asm/cache.h
@@ -61,4 +61,7 @@
*/
#define __write_once __read_mostly
+/* __ro_after_init is the generic name for the tile arch __write_once. */
+#define __ro_after_init __read_mostly
+
#endif /* _ASM_TILE_CACHE_H */
diff --git a/arch/tile/kernel/time.c b/arch/tile/kernel/time.c
index 178989e6d3e3..ea960d660917 100644
--- a/arch/tile/kernel/time.c
+++ b/arch/tile/kernel/time.c
@@ -218,8 +218,8 @@ void do_timer_interrupt(struct pt_regs *regs, int fault_num)
*/
unsigned long long sched_clock(void)
{
- return clocksource_cyc2ns(get_cycles(),
- sched_clock_mult, SCHED_CLOCK_SHIFT);
+ return mult_frac(get_cycles(),
+ sched_clock_mult, 1ULL << SCHED_CLOCK_SHIFT);
}
int setup_profiling_timer(unsigned int multiplier)
diff --git a/arch/x86/boot/compressed/Makefile b/arch/x86/boot/compressed/Makefile
index 536ccfcc01c6..34d9e15857c3 100644
--- a/arch/x86/boot/compressed/Makefile
+++ b/arch/x86/boot/compressed/Makefile
@@ -40,8 +40,8 @@ GCOV_PROFILE := n
UBSAN_SANITIZE :=n
LDFLAGS := -m elf_$(UTS_MACHINE)
-ifeq ($(CONFIG_RELOCATABLE),y)
-# If kernel is relocatable, build compressed kernel as PIE.
+# Compressed kernel should be built as PIE since it may be loaded at any
+# address by the bootloader.
ifeq ($(CONFIG_X86_32),y)
LDFLAGS += $(call ld-option, -pie) $(call ld-option, --no-dynamic-linker)
else
@@ -51,7 +51,6 @@ else
LDFLAGS += $(shell $(LD) --help 2>&1 | grep -q "\-z noreloc-overflow" \
&& echo "-z noreloc-overflow -pie --no-dynamic-linker")
endif
-endif
LDFLAGS_vmlinux := -T
hostprogs-y := mkpiggy
diff --git a/arch/x86/boot/cpu.c b/arch/x86/boot/cpu.c
index 26240dde081e..4224ede43b4e 100644
--- a/arch/x86/boot/cpu.c
+++ b/arch/x86/boot/cpu.c
@@ -87,6 +87,12 @@ int validate_cpu(void)
return -1;
}
+ if (CONFIG_X86_MINIMUM_CPU_FAMILY <= 4 && !IS_ENABLED(CONFIG_M486) &&
+ !has_eflag(X86_EFLAGS_ID)) {
+ printf("This kernel requires a CPU with the CPUID instruction. Build with CONFIG_M486=y to run on this CPU.\n");
+ return -1;
+ }
+
if (err_flags) {
puts("This kernel requires the following features "
"not present on the CPU:\n");
diff --git a/arch/x86/crypto/aesni-intel_glue.c b/arch/x86/crypto/aesni-intel_glue.c
index 0ab5ee1c26af..aa8b0672f87a 100644
--- a/arch/x86/crypto/aesni-intel_glue.c
+++ b/arch/x86/crypto/aesni-intel_glue.c
@@ -888,7 +888,7 @@ static int helper_rfc4106_encrypt(struct aead_request *req)
unsigned long auth_tag_len = crypto_aead_authsize(tfm);
u8 iv[16] __attribute__ ((__aligned__(AESNI_ALIGN)));
struct scatter_walk src_sg_walk;
- struct scatter_walk dst_sg_walk;
+ struct scatter_walk dst_sg_walk = {};
unsigned int i;
/* Assuming we are supporting rfc4106 64-bit extended */
@@ -968,7 +968,7 @@ static int helper_rfc4106_decrypt(struct aead_request *req)
u8 iv[16] __attribute__ ((__aligned__(AESNI_ALIGN)));
u8 authTag[16];
struct scatter_walk src_sg_walk;
- struct scatter_walk dst_sg_walk;
+ struct scatter_walk dst_sg_walk = {};
unsigned int i;
if (unlikely(req->assoclen != 16 && req->assoclen != 20))
diff --git a/arch/x86/events/amd/core.c b/arch/x86/events/amd/core.c
index f5f4b3fbbbc2..afb222b63cae 100644
--- a/arch/x86/events/amd/core.c
+++ b/arch/x86/events/amd/core.c
@@ -662,7 +662,13 @@ static int __init amd_core_pmu_init(void)
pr_cont("Fam15h ");
x86_pmu.get_event_constraints = amd_get_event_constraints_f15h;
break;
-
+ case 0x17:
+ pr_cont("Fam17h ");
+ /*
+ * In family 17h, there are no event constraints in the PMC hardware.
+ * We fallback to using default amd_get_event_constraints.
+ */
+ break;
default:
pr_err("core perfctr but no constraints; unknown hardware!\n");
return -ENODEV;
diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c
index d31735f37ed7..9d4bf3ab049e 100644
--- a/arch/x86/events/core.c
+++ b/arch/x86/events/core.c
@@ -2352,7 +2352,7 @@ perf_callchain_user32(struct pt_regs *regs, struct perf_callchain_entry_ctx *ent
frame.next_frame = 0;
frame.return_address = 0;
- if (!access_ok(VERIFY_READ, fp, 8))
+ if (!valid_user_frame(fp, sizeof(frame)))
break;
bytes = __copy_from_user_nmi(&frame.next_frame, fp, 4);
@@ -2362,9 +2362,6 @@ perf_callchain_user32(struct pt_regs *regs, struct perf_callchain_entry_ctx *ent
if (bytes != 0)
break;
- if (!valid_user_frame(fp, sizeof(frame)))
- break;
-
perf_callchain_store(entry, cs_base + frame.return_address);
fp = compat_ptr(ss_base + frame.next_frame);
}
@@ -2413,7 +2410,7 @@ perf_callchain_user(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs
frame.next_frame = NULL;
frame.return_address = 0;
- if (!access_ok(VERIFY_READ, fp, sizeof(*fp) * 2))
+ if (!valid_user_frame(fp, sizeof(frame)))
break;
bytes = __copy_from_user_nmi(&frame.next_frame, fp, sizeof(*fp));
@@ -2423,9 +2420,6 @@ perf_callchain_user(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs
if (bytes != 0)
break;
- if (!valid_user_frame(fp, sizeof(frame)))
- break;
-
perf_callchain_store(entry, frame.return_address);
fp = (void __user *)frame.next_frame;
}
diff --git a/arch/x86/events/intel/ds.c b/arch/x86/events/intel/ds.c
index 0319311dbdbb..be202390bbd3 100644
--- a/arch/x86/events/intel/ds.c
+++ b/arch/x86/events/intel/ds.c
@@ -1108,20 +1108,20 @@ static void setup_pebs_sample_data(struct perf_event *event,
}
/*
- * We use the interrupt regs as a base because the PEBS record
- * does not contain a full regs set, specifically it seems to
- * lack segment descriptors, which get used by things like
- * user_mode().
+ * We use the interrupt regs as a base because the PEBS record does not
+ * contain a full regs set, specifically it seems to lack segment
+ * descriptors, which get used by things like user_mode().
*
- * In the simple case fix up only the IP and BP,SP regs, for
- * PERF_SAMPLE_IP and PERF_SAMPLE_CALLCHAIN to function properly.
- * A possible PERF_SAMPLE_REGS will have to transfer all regs.
+ * In the simple case fix up only the IP for PERF_SAMPLE_IP.
+ *
+ * We must however always use BP,SP from iregs for the unwinder to stay
+ * sane; the record BP,SP can point into thin air when the record is
+ * from a previous PMI context or an (I)RET happend between the record
+ * and PMI.
*/
*regs = *iregs;
regs->flags = pebs->flags;
set_linear_ip(regs, pebs->ip);
- regs->bp = pebs->bp;
- regs->sp = pebs->sp;
if (sample_type & PERF_SAMPLE_REGS_INTR) {
regs->ax = pebs->ax;
@@ -1130,10 +1130,21 @@ static void setup_pebs_sample_data(struct perf_event *event,
regs->dx = pebs->dx;
regs->si = pebs->si;
regs->di = pebs->di;
- regs->bp = pebs->bp;
- regs->sp = pebs->sp;
- regs->flags = pebs->flags;
+ /*
+ * Per the above; only set BP,SP if we don't need callchains.
+ *
+ * XXX: does this make sense?
+ */
+ if (!(sample_type & PERF_SAMPLE_CALLCHAIN)) {
+ regs->bp = pebs->bp;
+ regs->sp = pebs->sp;
+ }
+
+ /*
+ * Preserve PERF_EFLAGS_VM from set_linear_ip().
+ */
+ regs->flags = pebs->flags | (regs->flags & PERF_EFLAGS_VM);
#ifndef CONFIG_X86_32
regs->r8 = pebs->r8;
regs->r9 = pebs->r9;
diff --git a/arch/x86/events/intel/uncore.c b/arch/x86/events/intel/uncore.c
index efca2685d876..dbaaf7dc8373 100644
--- a/arch/x86/events/intel/uncore.c
+++ b/arch/x86/events/intel/uncore.c
@@ -319,9 +319,9 @@ static struct intel_uncore_box *uncore_alloc_box(struct intel_uncore_type *type,
*/
static int uncore_pmu_event_init(struct perf_event *event);
-static bool is_uncore_event(struct perf_event *event)
+static bool is_box_event(struct intel_uncore_box *box, struct perf_event *event)
{
- return event->pmu->event_init == uncore_pmu_event_init;
+ return &box->pmu->pmu == event->pmu;
}
static int
@@ -340,7 +340,7 @@ uncore_collect_events(struct intel_uncore_box *box, struct perf_event *leader,
n = box->n_events;
- if (is_uncore_event(leader)) {
+ if (is_box_event(box, leader)) {
box->event_list[n] = leader;
n++;
}
@@ -349,7 +349,7 @@ uncore_collect_events(struct intel_uncore_box *box, struct perf_event *leader,
return n;
list_for_each_entry(event, &leader->sibling_list, group_entry) {
- if (!is_uncore_event(event) ||
+ if (!is_box_event(box, event) ||
event->state <= PERF_EVENT_STATE_OFF)
continue;
diff --git a/arch/x86/events/intel/uncore_snb.c b/arch/x86/events/intel/uncore_snb.c
index 5f845eef9a4d..a3dcc12bef4a 100644
--- a/arch/x86/events/intel/uncore_snb.c
+++ b/arch/x86/events/intel/uncore_snb.c
@@ -8,8 +8,12 @@
#define PCI_DEVICE_ID_INTEL_HSW_IMC 0x0c00
#define PCI_DEVICE_ID_INTEL_HSW_U_IMC 0x0a04
#define PCI_DEVICE_ID_INTEL_BDW_IMC 0x1604
-#define PCI_DEVICE_ID_INTEL_SKL_IMC 0x191f
-#define PCI_DEVICE_ID_INTEL_SKL_U_IMC 0x190c
+#define PCI_DEVICE_ID_INTEL_SKL_U_IMC 0x1904
+#define PCI_DEVICE_ID_INTEL_SKL_Y_IMC 0x190c
+#define PCI_DEVICE_ID_INTEL_SKL_HD_IMC 0x1900
+#define PCI_DEVICE_ID_INTEL_SKL_HQ_IMC 0x1910
+#define PCI_DEVICE_ID_INTEL_SKL_SD_IMC 0x190f
+#define PCI_DEVICE_ID_INTEL_SKL_SQ_IMC 0x191f
/* SNB event control */
#define SNB_UNC_CTL_EV_SEL_MASK 0x000000ff
@@ -486,24 +490,12 @@ static int snb_uncore_imc_event_add(struct perf_event *event, int flags)
snb_uncore_imc_event_start(event, 0);
- box->n_events++;
-
return 0;
}
static void snb_uncore_imc_event_del(struct perf_event *event, int flags)
{
- struct intel_uncore_box *box = uncore_event_to_box(event);
- int i;
-
snb_uncore_imc_event_stop(event, PERF_EF_UPDATE);
-
- for (i = 0; i < box->n_events; i++) {
- if (event == box->event_list[i]) {
- --box->n_events;
- break;
- }
- }
}
int snb_pci2phy_map_init(int devid)
@@ -616,13 +608,29 @@ static const struct pci_device_id bdw_uncore_pci_ids[] = {
static const struct pci_device_id skl_uncore_pci_ids[] = {
{ /* IMC */
- PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SKL_IMC),
+ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SKL_Y_IMC),
.driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
},
{ /* IMC */
PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SKL_U_IMC),
.driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
},
+ { /* IMC */
+ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SKL_HD_IMC),
+ .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
+ },
+ { /* IMC */
+ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SKL_HQ_IMC),
+ .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
+ },
+ { /* IMC */
+ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SKL_SD_IMC),
+ .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
+ },
+ { /* IMC */
+ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SKL_SQ_IMC),
+ .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
+ },
{ /* end: all zeroes */ },
};
@@ -666,8 +674,12 @@ static const struct imc_uncore_pci_dev desktop_imc_pci_ids[] = {
IMC_DEV(HSW_IMC, &hsw_uncore_pci_driver), /* 4th Gen Core Processor */
IMC_DEV(HSW_U_IMC, &hsw_uncore_pci_driver), /* 4th Gen Core ULT Mobile Processor */
IMC_DEV(BDW_IMC, &bdw_uncore_pci_driver), /* 5th Gen Core U */
- IMC_DEV(SKL_IMC, &skl_uncore_pci_driver), /* 6th Gen Core */
+ IMC_DEV(SKL_Y_IMC, &skl_uncore_pci_driver), /* 6th Gen Core Y */
IMC_DEV(SKL_U_IMC, &skl_uncore_pci_driver), /* 6th Gen Core U */
+ IMC_DEV(SKL_HD_IMC, &skl_uncore_pci_driver), /* 6th Gen Core H Dual Core */
+ IMC_DEV(SKL_HQ_IMC, &skl_uncore_pci_driver), /* 6th Gen Core H Quad Core */
+ IMC_DEV(SKL_SD_IMC, &skl_uncore_pci_driver), /* 6th Gen Core S Dual Core */
+ IMC_DEV(SKL_SQ_IMC, &skl_uncore_pci_driver), /* 6th Gen Core S Quad Core */
{ /* end marker */ }
};
diff --git a/arch/x86/events/perf_event.h b/arch/x86/events/perf_event.h
index 5874d8de1f8d..a77ee026643d 100644
--- a/arch/x86/events/perf_event.h
+++ b/arch/x86/events/perf_event.h
@@ -113,7 +113,7 @@ struct debug_store {
* Per register state.
*/
struct er_account {
- raw_spinlock_t lock; /* per-core: protect structure */
+ raw_spinlock_t lock; /* per-core: protect structure */
u64 config; /* extra MSR config */
u64 reg; /* extra MSR number */
atomic_t ref; /* reference count */
diff --git a/arch/x86/include/asm/intel-mid.h b/arch/x86/include/asm/intel-mid.h
index 5b6753d1f7f4..49da9f497b90 100644
--- a/arch/x86/include/asm/intel-mid.h
+++ b/arch/x86/include/asm/intel-mid.h
@@ -17,6 +17,7 @@
extern int intel_mid_pci_init(void);
extern int intel_mid_pci_set_power_state(struct pci_dev *pdev, pci_power_t state);
+extern pci_power_t intel_mid_pci_get_power_state(struct pci_dev *pdev);
extern void intel_mid_pwr_power_off(void);
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 4b20f7304b9c..bdde80731f49 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -948,7 +948,6 @@ struct kvm_x86_ops {
int (*get_lpage_level)(void);
bool (*rdtscp_supported)(void);
bool (*invpcid_supported)(void);
- void (*adjust_tsc_offset_guest)(struct kvm_vcpu *vcpu, s64 adjustment);
void (*set_tdp_cr3)(struct kvm_vcpu *vcpu, unsigned long cr3);
@@ -958,8 +957,6 @@ struct kvm_x86_ops {
void (*write_tsc_offset)(struct kvm_vcpu *vcpu, u64 offset);
- u64 (*read_l1_tsc)(struct kvm_vcpu *vcpu, u64 host_tsc);
-
void (*get_exit_info)(struct kvm_vcpu *vcpu, u64 *info1, u64 *info2);
int (*check_intercept)(struct kvm_vcpu *vcpu,
diff --git a/arch/x86/include/asm/kvm_page_track.h b/arch/x86/include/asm/kvm_page_track.h
index c2b8d24a235c..d74747b031ec 100644
--- a/arch/x86/include/asm/kvm_page_track.h
+++ b/arch/x86/include/asm/kvm_page_track.h
@@ -29,9 +29,20 @@ struct kvm_page_track_notifier_node {
* @gpa: the physical address written by guest.
* @new: the data was written to the address.
* @bytes: the written length.
+ * @node: this node
*/
void (*track_write)(struct kvm_vcpu *vcpu, gpa_t gpa, const u8 *new,
- int bytes);
+ int bytes, struct kvm_page_track_notifier_node *node);
+ /*
+ * It is called when memory slot is being moved or removed
+ * users can drop write-protection for the pages in that memory slot
+ *
+ * @kvm: the kvm where memory slot being moved or removed
+ * @slot: the memory slot being moved or removed
+ * @node: this node
+ */
+ void (*track_flush_slot)(struct kvm *kvm, struct kvm_memory_slot *slot,
+ struct kvm_page_track_notifier_node *node);
};
void kvm_page_track_init(struct kvm *kvm);
@@ -58,4 +69,5 @@ kvm_page_track_unregister_notifier(struct kvm *kvm,
struct kvm_page_track_notifier_node *n);
void kvm_page_track_write(struct kvm_vcpu *vcpu, gpa_t gpa, const u8 *new,
int bytes);
+void kvm_page_track_flush_slot(struct kvm *kvm, struct kvm_memory_slot *slot);
#endif
diff --git a/arch/x86/kernel/apm_32.c b/arch/x86/kernel/apm_32.c
index c7364bd633e1..51287cd90bf6 100644
--- a/arch/x86/kernel/apm_32.c
+++ b/arch/x86/kernel/apm_32.c
@@ -1042,8 +1042,11 @@ static int apm_get_power_status(u_short *status, u_short *bat, u_short *life)
if (apm_info.get_power_status_broken)
return APM_32_UNSUPPORTED;
- if (apm_bios_call(&call))
+ if (apm_bios_call(&call)) {
+ if (!call.err)
+ return APM_NO_ERROR;
return call.err;
+ }
*status = call.ebx;
*bat = call.ecx;
if (apm_info.get_power_status_swabinminutes) {
diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
index b81fe2d63e15..1e81a37c034e 100644
--- a/arch/x86/kernel/cpu/amd.c
+++ b/arch/x86/kernel/cpu/amd.c
@@ -347,7 +347,6 @@ static void amd_detect_cmp(struct cpuinfo_x86 *c)
#ifdef CONFIG_SMP
unsigned bits;
int cpu = smp_processor_id();
- unsigned int socket_id, core_complex_id;
bits = c->x86_coreid_bits;
/* Low order bits define the core id (index of core in socket) */
@@ -365,10 +364,7 @@ static void amd_detect_cmp(struct cpuinfo_x86 *c)
if (c->x86 != 0x17 || !cpuid_edx(0x80000006))
return;
- socket_id = (c->apicid >> bits) - 1;
- core_complex_id = (c->apicid & ((1 << bits) - 1)) >> 3;
-
- per_cpu(cpu_llc_id, cpu) = (socket_id << 3) | core_complex_id;
+ per_cpu(cpu_llc_id, cpu) = c->apicid >> 3;
#endif
}
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index 9bd910a7dd0a..cc9e980c68ec 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -979,6 +979,35 @@ static void x86_init_cache_qos(struct cpuinfo_x86 *c)
}
/*
+ * The physical to logical package id mapping is initialized from the
+ * acpi/mptables information. Make sure that CPUID actually agrees with
+ * that.
+ */
+static void sanitize_package_id(struct cpuinfo_x86 *c)
+{
+#ifdef CONFIG_SMP
+ unsigned int pkg, apicid, cpu = smp_processor_id();
+
+ apicid = apic->cpu_present_to_apicid(cpu);
+ pkg = apicid >> boot_cpu_data.x86_coreid_bits;
+
+ if (apicid != c->initial_apicid) {
+ pr_err(FW_BUG "CPU%u: APIC id mismatch. Firmware: %x CPUID: %x\n",
+ cpu, apicid, c->initial_apicid);
+ c->initial_apicid = apicid;
+ }
+ if (pkg != c->phys_proc_id) {
+ pr_err(FW_BUG "CPU%u: Using firmware package id %u instead of %u\n",
+ cpu, pkg, c->phys_proc_id);
+ c->phys_proc_id = pkg;
+ }
+ c->logical_proc_id = topology_phys_to_logical_pkg(pkg);
+#else
+ c->logical_proc_id = 0;
+#endif
+}
+
+/*
* This does the hard work of actually picking apart the CPU stuff...
*/
static void identify_cpu(struct cpuinfo_x86 *c)
@@ -1103,8 +1132,7 @@ static void identify_cpu(struct cpuinfo_x86 *c)
#ifdef CONFIG_NUMA
numa_add_cpu(smp_processor_id());
#endif
- /* The boot/hotplug time assigment got cleared, restore it */
- c->logical_proc_id = topology_phys_to_logical_pkg(c->phys_proc_id);
+ sanitize_package_id(c);
}
/*
diff --git a/arch/x86/kernel/dumpstack.c b/arch/x86/kernel/dumpstack.c
index 9b7cf5c28f5f..85f854b98a9d 100644
--- a/arch/x86/kernel/dumpstack.c
+++ b/arch/x86/kernel/dumpstack.c
@@ -112,7 +112,7 @@ void show_trace_log_lvl(struct task_struct *task, struct pt_regs *regs,
for (; stack < stack_info.end; stack++) {
unsigned long real_addr;
int reliable = 0;
- unsigned long addr = *stack;
+ unsigned long addr = READ_ONCE_NOCHECK(*stack);
unsigned long *ret_addr_p =
unwind_get_return_address_ptr(&state);
diff --git a/arch/x86/kernel/fpu/core.c b/arch/x86/kernel/fpu/core.c
index 47004010ad5d..ebb4e95fbd74 100644
--- a/arch/x86/kernel/fpu/core.c
+++ b/arch/x86/kernel/fpu/core.c
@@ -521,14 +521,14 @@ void fpu__clear(struct fpu *fpu)
{
WARN_ON_FPU(fpu != &current->thread.fpu); /* Almost certainly an anomaly */
- if (!use_eager_fpu() || !static_cpu_has(X86_FEATURE_FPU)) {
- /* FPU state will be reallocated lazily at the first use. */
- fpu__drop(fpu);
- } else {
- if (!fpu->fpstate_active) {
- fpu__activate_curr(fpu);
- user_fpu_begin();
- }
+ fpu__drop(fpu);
+
+ /*
+ * Make sure fpstate is cleared and initialized.
+ */
+ if (static_cpu_has(X86_FEATURE_FPU)) {
+ fpu__activate_curr(fpu);
+ user_fpu_begin();
copy_init_fpstate_to_fpregs();
}
}
diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S
index b6b2f0264af3..2dabea46f039 100644
--- a/arch/x86/kernel/head_32.S
+++ b/arch/x86/kernel/head_32.S
@@ -665,14 +665,17 @@ __PAGE_ALIGNED_BSS
initial_pg_pmd:
.fill 1024*KPMDS,4,0
#else
-ENTRY(initial_page_table)
+.globl initial_page_table
+initial_page_table:
.fill 1024,4,0
#endif
initial_pg_fixmap:
.fill 1024,4,0
-ENTRY(empty_zero_page)
+.globl empty_zero_page
+empty_zero_page:
.fill 4096,1,0
-ENTRY(swapper_pg_dir)
+.globl swapper_pg_dir
+swapper_pg_dir:
.fill 1024,4,0
EXPORT_SYMBOL(empty_zero_page)
diff --git a/arch/x86/kernel/sysfb_simplefb.c b/arch/x86/kernel/sysfb_simplefb.c
index 764a29f84de7..85195d447a92 100644
--- a/arch/x86/kernel/sysfb_simplefb.c
+++ b/arch/x86/kernel/sysfb_simplefb.c
@@ -66,13 +66,36 @@ __init int create_simplefb(const struct screen_info *si,
{
struct platform_device *pd;
struct resource res;
- unsigned long len;
+ u64 base, size;
+ u32 length;
- /* don't use lfb_size as it may contain the whole VMEM instead of only
- * the part that is occupied by the framebuffer */
- len = mode->height * mode->stride;
- len = PAGE_ALIGN(len);
- if (len > (u64)si->lfb_size << 16) {
+ /*
+ * If the 64BIT_BASE capability is set, ext_lfb_base will contain the
+ * upper half of the base address. Assemble the address, then make sure
+ * it is valid and we can actually access it.
+ */
+ base = si->lfb_base;
+ if (si->capabilities & VIDEO_CAPABILITY_64BIT_BASE)
+ base |= (u64)si->ext_lfb_base << 32;
+ if (!base || (u64)(resource_size_t)base != base) {
+ printk(KERN_DEBUG "sysfb: inaccessible VRAM base\n");
+ return -EINVAL;
+ }
+
+ /*
+ * Don't use lfb_size as IORESOURCE size, since it may contain the
+ * entire VMEM, and thus require huge mappings. Use just the part we
+ * need, that is, the part where the framebuffer is located. But verify
+ * that it does not exceed the advertised VMEM.
+ * Note that in case of VBE, the lfb_size is shifted by 16 bits for
+ * historical reasons.
+ */
+ size = si->lfb_size;
+ if (si->orig_video_isVGA == VIDEO_TYPE_VLFB)
+ size <<= 16;
+ length = mode->height * mode->stride;
+ length = PAGE_ALIGN(length);
+ if (length > size) {
printk(KERN_WARNING "sysfb: VRAM smaller than advertised\n");
return -EINVAL;
}
@@ -81,8 +104,8 @@ __init int create_simplefb(const struct screen_info *si,
memset(&res, 0, sizeof(res));
res.flags = IORESOURCE_MEM | IORESOURCE_BUSY;
res.name = simplefb_resname;
- res.start = si->lfb_base;
- res.end = si->lfb_base + len - 1;
+ res.start = base;
+ res.end = res.start + length - 1;
if (res.end <= res.start)
return -EINVAL;
diff --git a/arch/x86/kernel/unwind_guess.c b/arch/x86/kernel/unwind_guess.c
index 2d721e533cf4..b80e8bf43cc6 100644
--- a/arch/x86/kernel/unwind_guess.c
+++ b/arch/x86/kernel/unwind_guess.c
@@ -7,11 +7,13 @@
unsigned long unwind_get_return_address(struct unwind_state *state)
{
+ unsigned long addr = READ_ONCE_NOCHECK(*state->sp);
+
if (unwind_done(state))
return 0;
return ftrace_graph_ret_addr(state->task, &state->graph_idx,
- *state->sp, state->sp);
+ addr, state->sp);
}
EXPORT_SYMBOL_GPL(unwind_get_return_address);
@@ -23,8 +25,10 @@ bool unwind_next_frame(struct unwind_state *state)
return false;
do {
+ unsigned long addr = READ_ONCE_NOCHECK(*state->sp);
+
for (state->sp++; state->sp < info->end; state->sp++)
- if (__kernel_text_address(*state->sp))
+ if (__kernel_text_address(addr))
return true;
state->sp = info->next_sp;
diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
index 4e95d3eb2955..a3ce9d260d68 100644
--- a/arch/x86/kvm/emulate.c
+++ b/arch/x86/kvm/emulate.c
@@ -2105,16 +2105,10 @@ static int em_iret(struct x86_emulate_ctxt *ctxt)
static int em_jmp_far(struct x86_emulate_ctxt *ctxt)
{
int rc;
- unsigned short sel, old_sel;
- struct desc_struct old_desc, new_desc;
- const struct x86_emulate_ops *ops = ctxt->ops;
+ unsigned short sel;
+ struct desc_struct new_desc;
u8 cpl = ctxt->ops->cpl(ctxt);
- /* Assignment of RIP may only fail in 64-bit mode */
- if (ctxt->mode == X86EMUL_MODE_PROT64)
- ops->get_segment(ctxt, &old_sel, &old_desc, NULL,
- VCPU_SREG_CS);
-
memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2);
rc = __load_segment_descriptor(ctxt, sel, VCPU_SREG_CS, cpl,
@@ -2124,12 +2118,10 @@ static int em_jmp_far(struct x86_emulate_ctxt *ctxt)
return rc;
rc = assign_eip_far(ctxt, ctxt->src.val, &new_desc);
- if (rc != X86EMUL_CONTINUE) {
- WARN_ON(ctxt->mode != X86EMUL_MODE_PROT64);
- /* assigning eip failed; restore the old cs */
- ops->set_segment(ctxt, old_sel, &old_desc, 0, VCPU_SREG_CS);
- return rc;
- }
+ /* Error handling is not implemented. */
+ if (rc != X86EMUL_CONTINUE)
+ return X86EMUL_UNHANDLEABLE;
+
return rc;
}
@@ -2189,14 +2181,8 @@ static int em_ret_far(struct x86_emulate_ctxt *ctxt)
{
int rc;
unsigned long eip, cs;
- u16 old_cs;
int cpl = ctxt->ops->cpl(ctxt);
- struct desc_struct old_desc, new_desc;
- const struct x86_emulate_ops *ops = ctxt->ops;
-
- if (ctxt->mode == X86EMUL_MODE_PROT64)
- ops->get_segment(ctxt, &old_cs, &old_desc, NULL,
- VCPU_SREG_CS);
+ struct desc_struct new_desc;
rc = emulate_pop(ctxt, &eip, ctxt->op_bytes);
if (rc != X86EMUL_CONTINUE)
@@ -2213,10 +2199,10 @@ static int em_ret_far(struct x86_emulate_ctxt *ctxt)
if (rc != X86EMUL_CONTINUE)
return rc;
rc = assign_eip_far(ctxt, eip, &new_desc);
- if (rc != X86EMUL_CONTINUE) {
- WARN_ON(ctxt->mode != X86EMUL_MODE_PROT64);
- ops->set_segment(ctxt, old_cs, &old_desc, 0, VCPU_SREG_CS);
- }
+ /* Error handling is not implemented. */
+ if (rc != X86EMUL_CONTINUE)
+ return X86EMUL_UNHANDLEABLE;
+
return rc;
}
@@ -5045,7 +5031,7 @@ done_prefixes:
/* Decode and fetch the destination operand: register or memory. */
rc = decode_operand(ctxt, &ctxt->dst, (ctxt->d >> DstShift) & OpMask);
- if (ctxt->rip_relative)
+ if (ctxt->rip_relative && likely(ctxt->memopp))
ctxt->memopp->addr.mem.ea = address_mask(ctxt,
ctxt->memopp->addr.mem.ea + ctxt->_eip);
diff --git a/arch/x86/kvm/ioapic.c b/arch/x86/kvm/ioapic.c
index 1a22de70f7f7..6e219e5c07d2 100644
--- a/arch/x86/kvm/ioapic.c
+++ b/arch/x86/kvm/ioapic.c
@@ -94,7 +94,7 @@ static unsigned long ioapic_read_indirect(struct kvm_ioapic *ioapic,
static void rtc_irq_eoi_tracking_reset(struct kvm_ioapic *ioapic)
{
ioapic->rtc_status.pending_eoi = 0;
- bitmap_zero(ioapic->rtc_status.dest_map.map, KVM_MAX_VCPUS);
+ bitmap_zero(ioapic->rtc_status.dest_map.map, KVM_MAX_VCPU_ID);
}
static void kvm_rtc_eoi_tracking_restore_all(struct kvm_ioapic *ioapic);
diff --git a/arch/x86/kvm/ioapic.h b/arch/x86/kvm/ioapic.h
index 7d2692a49657..1cc6e54436db 100644
--- a/arch/x86/kvm/ioapic.h
+++ b/arch/x86/kvm/ioapic.h
@@ -42,13 +42,13 @@ struct kvm_vcpu;
struct dest_map {
/* vcpu bitmap where IRQ has been sent */
- DECLARE_BITMAP(map, KVM_MAX_VCPUS);
+ DECLARE_BITMAP(map, KVM_MAX_VCPU_ID);
/*
* Vector sent to a given vcpu, only valid when
* the vcpu's bit in map is set
*/
- u8 vectors[KVM_MAX_VCPUS];
+ u8 vectors[KVM_MAX_VCPU_ID];
};
diff --git a/arch/x86/kvm/irq_comm.c b/arch/x86/kvm/irq_comm.c
index 25810b144b58..6c0191615f23 100644
--- a/arch/x86/kvm/irq_comm.c
+++ b/arch/x86/kvm/irq_comm.c
@@ -41,6 +41,15 @@ static int kvm_set_pic_irq(struct kvm_kernel_irq_routing_entry *e,
bool line_status)
{
struct kvm_pic *pic = pic_irqchip(kvm);
+
+ /*
+ * XXX: rejecting pic routes when pic isn't in use would be better,
+ * but the default routing table is installed while kvm->arch.vpic is
+ * NULL and KVM_CREATE_IRQCHIP can race with KVM_IRQ_LINE.
+ */
+ if (!pic)
+ return -1;
+
return kvm_pic_set_irq(pic, e->irqchip.pin, irq_source_id, level);
}
@@ -49,6 +58,10 @@ static int kvm_set_ioapic_irq(struct kvm_kernel_irq_routing_entry *e,
bool line_status)
{
struct kvm_ioapic *ioapic = kvm->arch.vioapic;
+
+ if (!ioapic)
+ return -1;
+
return kvm_ioapic_set_irq(ioapic, e->irqchip.pin, irq_source_id, level,
line_status);
}
@@ -156,6 +169,16 @@ int kvm_set_msi(struct kvm_kernel_irq_routing_entry *e,
}
+static int kvm_hv_set_sint(struct kvm_kernel_irq_routing_entry *e,
+ struct kvm *kvm, int irq_source_id, int level,
+ bool line_status)
+{
+ if (!level)
+ return -1;
+
+ return kvm_hv_synic_set_irq(kvm, e->hv_sint.vcpu, e->hv_sint.sint);
+}
+
int kvm_arch_set_irq_inatomic(struct kvm_kernel_irq_routing_entry *e,
struct kvm *kvm, int irq_source_id, int level,
bool line_status)
@@ -163,18 +186,26 @@ int kvm_arch_set_irq_inatomic(struct kvm_kernel_irq_routing_entry *e,
struct kvm_lapic_irq irq;
int r;
- if (unlikely(e->type != KVM_IRQ_ROUTING_MSI))
- return -EWOULDBLOCK;
+ switch (e->type) {
+ case KVM_IRQ_ROUTING_HV_SINT:
+ return kvm_hv_set_sint(e, kvm, irq_source_id, level,
+ line_status);
- if (kvm_msi_route_invalid(kvm, e))
- return -EINVAL;
+ case KVM_IRQ_ROUTING_MSI:
+ if (kvm_msi_route_invalid(kvm, e))
+ return -EINVAL;
- kvm_set_msi_irq(kvm, e, &irq);
+ kvm_set_msi_irq(kvm, e, &irq);
- if (kvm_irq_delivery_to_apic_fast(kvm, NULL, &irq, &r, NULL))
- return r;
- else
- return -EWOULDBLOCK;
+ if (kvm_irq_delivery_to_apic_fast(kvm, NULL, &irq, &r, NULL))
+ return r;
+ break;
+
+ default:
+ break;
+ }
+
+ return -EWOULDBLOCK;
}
int kvm_request_irq_source_id(struct kvm *kvm)
@@ -254,16 +285,6 @@ void kvm_fire_mask_notifiers(struct kvm *kvm, unsigned irqchip, unsigned pin,
srcu_read_unlock(&kvm->irq_srcu, idx);
}
-static int kvm_hv_set_sint(struct kvm_kernel_irq_routing_entry *e,
- struct kvm *kvm, int irq_source_id, int level,
- bool line_status)
-{
- if (!level)
- return -1;
-
- return kvm_hv_synic_set_irq(kvm, e->hv_sint.vcpu, e->hv_sint.sint);
-}
-
int kvm_set_routing_entry(struct kvm *kvm,
struct kvm_kernel_irq_routing_entry *e,
const struct kvm_irq_routing_entry *ue)
@@ -423,18 +444,6 @@ void kvm_scan_ioapic_routes(struct kvm_vcpu *vcpu,
srcu_read_unlock(&kvm->irq_srcu, idx);
}
-int kvm_arch_set_irq(struct kvm_kernel_irq_routing_entry *irq, struct kvm *kvm,
- int irq_source_id, int level, bool line_status)
-{
- switch (irq->type) {
- case KVM_IRQ_ROUTING_HV_SINT:
- return kvm_hv_set_sint(irq, kvm, irq_source_id, level,
- line_status);
- default:
- return -EWOULDBLOCK;
- }
-}
-
void kvm_arch_irq_routing_update(struct kvm *kvm)
{
kvm_hv_irq_routing_update(kvm);
diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
index 23b99f305382..6f69340f9fa3 100644
--- a/arch/x86/kvm/lapic.c
+++ b/arch/x86/kvm/lapic.c
@@ -138,7 +138,7 @@ static inline bool kvm_apic_map_get_logical_dest(struct kvm_apic_map *map,
*mask = dest_id & 0xff;
return true;
case KVM_APIC_MODE_XAPIC_CLUSTER:
- *cluster = map->xapic_cluster_map[dest_id >> 4];
+ *cluster = map->xapic_cluster_map[(dest_id >> 4) & 0xf];
*mask = dest_id & 0xf;
return true;
default:
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index d9c7e986b4e4..87c5880ba3b7 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -4405,7 +4405,8 @@ static u64 *get_written_sptes(struct kvm_mmu_page *sp, gpa_t gpa, int *nspte)
}
static void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
- const u8 *new, int bytes)
+ const u8 *new, int bytes,
+ struct kvm_page_track_notifier_node *node)
{
gfn_t gfn = gpa >> PAGE_SHIFT;
struct kvm_mmu_page *sp;
@@ -4617,11 +4618,19 @@ void kvm_mmu_setup(struct kvm_vcpu *vcpu)
init_kvm_mmu(vcpu);
}
+static void kvm_mmu_invalidate_zap_pages_in_memslot(struct kvm *kvm,
+ struct kvm_memory_slot *slot,
+ struct kvm_page_track_notifier_node *node)
+{
+ kvm_mmu_invalidate_zap_all_pages(kvm);
+}
+
void kvm_mmu_init_vm(struct kvm *kvm)
{
struct kvm_page_track_notifier_node *node = &kvm->arch.mmu_sp_tracker;
node->track_write = kvm_mmu_pte_write;
+ node->track_flush_slot = kvm_mmu_invalidate_zap_pages_in_memslot;
kvm_page_track_register_notifier(kvm, node);
}
diff --git a/arch/x86/kvm/page_track.c b/arch/x86/kvm/page_track.c
index b431539c3714..4a1c13eaa518 100644
--- a/arch/x86/kvm/page_track.c
+++ b/arch/x86/kvm/page_track.c
@@ -106,6 +106,7 @@ void kvm_slot_page_track_add_page(struct kvm *kvm,
if (kvm_mmu_slot_gfn_write_protect(kvm, slot, gfn))
kvm_flush_remote_tlbs(kvm);
}
+EXPORT_SYMBOL_GPL(kvm_slot_page_track_add_page);
/*
* remove the guest page from the tracking pool which stops the interception
@@ -135,6 +136,7 @@ void kvm_slot_page_track_remove_page(struct kvm *kvm,
*/
kvm_mmu_gfn_allow_lpage(slot, gfn);
}
+EXPORT_SYMBOL_GPL(kvm_slot_page_track_remove_page);
/*
* check if the corresponding access on the specified guest page is tracked.
@@ -181,6 +183,7 @@ kvm_page_track_register_notifier(struct kvm *kvm,
hlist_add_head_rcu(&n->node, &head->track_notifier_list);
spin_unlock(&kvm->mmu_lock);
}
+EXPORT_SYMBOL_GPL(kvm_page_track_register_notifier);
/*
* stop receiving the event interception. It is the opposed operation of
@@ -199,6 +202,7 @@ kvm_page_track_unregister_notifier(struct kvm *kvm,
spin_unlock(&kvm->mmu_lock);
synchronize_srcu(&head->track_srcu);
}
+EXPORT_SYMBOL_GPL(kvm_page_track_unregister_notifier);
/*
* Notify the node that write access is intercepted and write emulation is
@@ -222,6 +226,31 @@ void kvm_page_track_write(struct kvm_vcpu *vcpu, gpa_t gpa, const u8 *new,
idx = srcu_read_lock(&head->track_srcu);
hlist_for_each_entry_rcu(n, &head->track_notifier_list, node)
if (n->track_write)
- n->track_write(vcpu, gpa, new, bytes);
+ n->track_write(vcpu, gpa, new, bytes, n);
+ srcu_read_unlock(&head->track_srcu, idx);
+}
+
+/*
+ * Notify the node that memory slot is being removed or moved so that it can
+ * drop write-protection for the pages in the memory slot.
+ *
+ * The node should figure out it has any write-protected pages in this slot
+ * by itself.
+ */
+void kvm_page_track_flush_slot(struct kvm *kvm, struct kvm_memory_slot *slot)
+{
+ struct kvm_page_track_notifier_head *head;
+ struct kvm_page_track_notifier_node *n;
+ int idx;
+
+ head = &kvm->arch.track_notifier_head;
+
+ if (hlist_empty(&head->track_notifier_list))
+ return;
+
+ idx = srcu_read_lock(&head->track_srcu);
+ hlist_for_each_entry_rcu(n, &head->track_notifier_list, node)
+ if (n->track_flush_slot)
+ n->track_flush_slot(kvm, slot, n);
srcu_read_unlock(&head->track_srcu, idx);
}
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index f8157a36ab09..8ca1eca5038d 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -1138,21 +1138,6 @@ static void svm_write_tsc_offset(struct kvm_vcpu *vcpu, u64 offset)
mark_dirty(svm->vmcb, VMCB_INTERCEPTS);
}
-static void svm_adjust_tsc_offset_guest(struct kvm_vcpu *vcpu, s64 adjustment)
-{
- struct vcpu_svm *svm = to_svm(vcpu);
-
- svm->vmcb->control.tsc_offset += adjustment;
- if (is_guest_mode(vcpu))
- svm->nested.hsave->control.tsc_offset += adjustment;
- else
- trace_kvm_write_tsc_offset(vcpu->vcpu_id,
- svm->vmcb->control.tsc_offset - adjustment,
- svm->vmcb->control.tsc_offset);
-
- mark_dirty(svm->vmcb, VMCB_INTERCEPTS);
-}
-
static void avic_init_vmcb(struct vcpu_svm *svm)
{
struct vmcb *vmcb = svm->vmcb;
@@ -3449,12 +3434,6 @@ static int cr8_write_interception(struct vcpu_svm *svm)
return 0;
}
-static u64 svm_read_l1_tsc(struct kvm_vcpu *vcpu, u64 host_tsc)
-{
- struct vmcb *vmcb = get_host_vmcb(to_svm(vcpu));
- return vmcb->control.tsc_offset + host_tsc;
-}
-
static int svm_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
{
struct vcpu_svm *svm = to_svm(vcpu);
@@ -5422,8 +5401,6 @@ static struct kvm_x86_ops svm_x86_ops __ro_after_init = {
.has_wbinvd_exit = svm_has_wbinvd_exit,
.write_tsc_offset = svm_write_tsc_offset,
- .adjust_tsc_offset_guest = svm_adjust_tsc_offset_guest,
- .read_l1_tsc = svm_read_l1_tsc,
.set_tdp_cr3 = set_tdp_cr3,
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index cf1b16dbc98a..5382b82462fc 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -187,6 +187,7 @@ struct vmcs {
*/
struct loaded_vmcs {
struct vmcs *vmcs;
+ struct vmcs *shadow_vmcs;
int cpu;
int launched;
struct list_head loaded_vmcss_on_cpu_link;
@@ -411,7 +412,6 @@ struct nested_vmx {
* memory during VMXOFF, VMCLEAR, VMPTRLD.
*/
struct vmcs12 *cached_vmcs12;
- struct vmcs *current_shadow_vmcs;
/*
* Indicates if the shadow vmcs must be updated with the
* data hold by vmcs12
@@ -421,7 +421,6 @@ struct nested_vmx {
/* vmcs02_list cache of VMCSs recently used to run L2 guests */
struct list_head vmcs02_pool;
int vmcs02_num;
- u64 vmcs01_tsc_offset;
bool change_vmcs01_virtual_x2apic_mode;
/* L2 must run next, and mustn't decide to exit to L1. */
bool nested_run_pending;
@@ -1419,6 +1418,8 @@ static void vmcs_clear(struct vmcs *vmcs)
static inline void loaded_vmcs_init(struct loaded_vmcs *loaded_vmcs)
{
vmcs_clear(loaded_vmcs->vmcs);
+ if (loaded_vmcs->shadow_vmcs && loaded_vmcs->launched)
+ vmcs_clear(loaded_vmcs->shadow_vmcs);
loaded_vmcs->cpu = -1;
loaded_vmcs->launched = 0;
}
@@ -2605,20 +2606,6 @@ static u64 guest_read_tsc(struct kvm_vcpu *vcpu)
}
/*
- * Like guest_read_tsc, but always returns L1's notion of the timestamp
- * counter, even if a nested guest (L2) is currently running.
- */
-static u64 vmx_read_l1_tsc(struct kvm_vcpu *vcpu, u64 host_tsc)
-{
- u64 tsc_offset;
-
- tsc_offset = is_guest_mode(vcpu) ?
- to_vmx(vcpu)->nested.vmcs01_tsc_offset :
- vmcs_read64(TSC_OFFSET);
- return host_tsc + tsc_offset;
-}
-
-/*
* writes 'offset' into guest's timestamp counter offset register
*/
static void vmx_write_tsc_offset(struct kvm_vcpu *vcpu, u64 offset)
@@ -2631,7 +2618,6 @@ static void vmx_write_tsc_offset(struct kvm_vcpu *vcpu, u64 offset)
* to the newly set TSC to get L2's TSC.
*/
struct vmcs12 *vmcs12;
- to_vmx(vcpu)->nested.vmcs01_tsc_offset = offset;
/* recalculate vmcs02.TSC_OFFSET: */
vmcs12 = get_vmcs12(vcpu);
vmcs_write64(TSC_OFFSET, offset +
@@ -2644,19 +2630,6 @@ static void vmx_write_tsc_offset(struct kvm_vcpu *vcpu, u64 offset)
}
}
-static void vmx_adjust_tsc_offset_guest(struct kvm_vcpu *vcpu, s64 adjustment)
-{
- u64 offset = vmcs_read64(TSC_OFFSET);
-
- vmcs_write64(TSC_OFFSET, offset + adjustment);
- if (is_guest_mode(vcpu)) {
- /* Even when running L2, the adjustment needs to apply to L1 */
- to_vmx(vcpu)->nested.vmcs01_tsc_offset += adjustment;
- } else
- trace_kvm_write_tsc_offset(vcpu->vcpu_id, offset,
- offset + adjustment);
-}
-
static bool guest_cpuid_has_vmx(struct kvm_vcpu *vcpu)
{
struct kvm_cpuid_entry2 *best = kvm_find_cpuid_entry(vcpu, 1, 0);
@@ -3562,6 +3535,7 @@ static void free_loaded_vmcs(struct loaded_vmcs *loaded_vmcs)
loaded_vmcs_clear(loaded_vmcs);
free_vmcs(loaded_vmcs->vmcs);
loaded_vmcs->vmcs = NULL;
+ WARN_ON(loaded_vmcs->shadow_vmcs != NULL);
}
static void free_kvm_area(void)
@@ -6696,6 +6670,7 @@ static struct loaded_vmcs *nested_get_current_vmcs02(struct vcpu_vmx *vmx)
if (!item)
return NULL;
item->vmcs02.vmcs = alloc_vmcs();
+ item->vmcs02.shadow_vmcs = NULL;
if (!item->vmcs02.vmcs) {
kfree(item);
return NULL;
@@ -7072,7 +7047,7 @@ static int handle_vmon(struct kvm_vcpu *vcpu)
shadow_vmcs->revision_id |= (1u << 31);
/* init shadow vmcs */
vmcs_clear(shadow_vmcs);
- vmx->nested.current_shadow_vmcs = shadow_vmcs;
+ vmx->vmcs01.shadow_vmcs = shadow_vmcs;
}
INIT_LIST_HEAD(&(vmx->nested.vmcs02_pool));
@@ -7174,8 +7149,11 @@ static void free_nested(struct vcpu_vmx *vmx)
free_page((unsigned long)vmx->nested.msr_bitmap);
vmx->nested.msr_bitmap = NULL;
}
- if (enable_shadow_vmcs)
- free_vmcs(vmx->nested.current_shadow_vmcs);
+ if (enable_shadow_vmcs) {
+ vmcs_clear(vmx->vmcs01.shadow_vmcs);
+ free_vmcs(vmx->vmcs01.shadow_vmcs);
+ vmx->vmcs01.shadow_vmcs = NULL;
+ }
kfree(vmx->nested.cached_vmcs12);
/* Unpin physical memory we referred to in current vmcs02 */
if (vmx->nested.apic_access_page) {
@@ -7352,7 +7330,7 @@ static void copy_shadow_to_vmcs12(struct vcpu_vmx *vmx)
int i;
unsigned long field;
u64 field_value;
- struct vmcs *shadow_vmcs = vmx->nested.current_shadow_vmcs;
+ struct vmcs *shadow_vmcs = vmx->vmcs01.shadow_vmcs;
const unsigned long *fields = shadow_read_write_fields;
const int num_fields = max_shadow_read_write_fields;
@@ -7401,7 +7379,7 @@ static void copy_vmcs12_to_shadow(struct vcpu_vmx *vmx)
int i, q;
unsigned long field;
u64 field_value = 0;
- struct vmcs *shadow_vmcs = vmx->nested.current_shadow_vmcs;
+ struct vmcs *shadow_vmcs = vmx->vmcs01.shadow_vmcs;
vmcs_load(shadow_vmcs);
@@ -7591,7 +7569,7 @@ static int handle_vmptrld(struct kvm_vcpu *vcpu)
vmcs_set_bits(SECONDARY_VM_EXEC_CONTROL,
SECONDARY_EXEC_SHADOW_VMCS);
vmcs_write64(VMCS_LINK_POINTER,
- __pa(vmx->nested.current_shadow_vmcs));
+ __pa(vmx->vmcs01.shadow_vmcs));
vmx->nested.sync_shadow_vmcs = true;
}
}
@@ -7659,7 +7637,7 @@ static int handle_invept(struct kvm_vcpu *vcpu)
types = (vmx->nested.nested_vmx_ept_caps >> VMX_EPT_EXTENT_SHIFT) & 6;
- if (!(types & (1UL << type))) {
+ if (type >= 32 || !(types & (1 << type))) {
nested_vmx_failValid(vcpu,
VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID);
skip_emulated_instruction(vcpu);
@@ -7722,7 +7700,7 @@ static int handle_invvpid(struct kvm_vcpu *vcpu)
types = (vmx->nested.nested_vmx_vpid_caps >> 8) & 0x7;
- if (!(types & (1UL << type))) {
+ if (type >= 32 || !(types & (1 << type))) {
nested_vmx_failValid(vcpu,
VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID);
skip_emulated_instruction(vcpu);
@@ -9156,6 +9134,7 @@ static struct kvm_vcpu *vmx_create_vcpu(struct kvm *kvm, unsigned int id)
vmx->loaded_vmcs = &vmx->vmcs01;
vmx->loaded_vmcs->vmcs = alloc_vmcs();
+ vmx->loaded_vmcs->shadow_vmcs = NULL;
if (!vmx->loaded_vmcs->vmcs)
goto free_msrs;
if (!vmm_exclusive)
@@ -10061,9 +10040,9 @@ static void prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
if (vmcs12->cpu_based_vm_exec_control & CPU_BASED_USE_TSC_OFFSETING)
vmcs_write64(TSC_OFFSET,
- vmx->nested.vmcs01_tsc_offset + vmcs12->tsc_offset);
+ vcpu->arch.tsc_offset + vmcs12->tsc_offset);
else
- vmcs_write64(TSC_OFFSET, vmx->nested.vmcs01_tsc_offset);
+ vmcs_write64(TSC_OFFSET, vcpu->arch.tsc_offset);
if (kvm_has_tsc_control)
decache_tsc_multiplier(vmx);
@@ -10293,8 +10272,6 @@ static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch)
enter_guest_mode(vcpu);
- vmx->nested.vmcs01_tsc_offset = vmcs_read64(TSC_OFFSET);
-
if (!(vmcs12->vm_entry_controls & VM_ENTRY_LOAD_DEBUG_CONTROLS))
vmx->nested.vmcs01_debugctl = vmcs_read64(GUEST_IA32_DEBUGCTL);
@@ -10818,7 +10795,7 @@ static void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 exit_reason,
load_vmcs12_host_state(vcpu, vmcs12);
/* Update any VMCS fields that might have changed while L2 ran */
- vmcs_write64(TSC_OFFSET, vmx->nested.vmcs01_tsc_offset);
+ vmcs_write64(TSC_OFFSET, vcpu->arch.tsc_offset);
if (vmx->hv_deadline_tsc == -1)
vmcs_clear_bits(PIN_BASED_VM_EXEC_CONTROL,
PIN_BASED_VMX_PREEMPTION_TIMER);
@@ -11339,8 +11316,6 @@ static struct kvm_x86_ops vmx_x86_ops __ro_after_init = {
.has_wbinvd_exit = cpu_has_vmx_wbinvd_exit,
.write_tsc_offset = vmx_write_tsc_offset,
- .adjust_tsc_offset_guest = vmx_adjust_tsc_offset_guest,
- .read_l1_tsc = vmx_read_l1_tsc,
.set_tdp_cr3 = vmx_set_cr3,
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index e375235d81c9..073eaeabc2a7 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -210,7 +210,18 @@ static void kvm_on_user_return(struct user_return_notifier *urn)
struct kvm_shared_msrs *locals
= container_of(urn, struct kvm_shared_msrs, urn);
struct kvm_shared_msr_values *values;
+ unsigned long flags;
+ /*
+ * Disabling irqs at this point since the following code could be
+ * interrupted and executed through kvm_arch_hardware_disable()
+ */
+ local_irq_save(flags);
+ if (locals->registered) {
+ locals->registered = false;
+ user_return_notifier_unregister(urn);
+ }
+ local_irq_restore(flags);
for (slot = 0; slot < shared_msrs_global.nr; ++slot) {
values = &locals->values[slot];
if (values->host != values->curr) {
@@ -218,8 +229,6 @@ static void kvm_on_user_return(struct user_return_notifier *urn)
values->curr = values->host;
}
}
- locals->registered = false;
- user_return_notifier_unregister(urn);
}
static void shared_msr_update(unsigned slot, u32 msr)
@@ -1409,7 +1418,7 @@ static u64 kvm_compute_tsc_offset(struct kvm_vcpu *vcpu, u64 target_tsc)
u64 kvm_read_l1_tsc(struct kvm_vcpu *vcpu, u64 host_tsc)
{
- return kvm_x86_ops->read_l1_tsc(vcpu, kvm_scale_tsc(vcpu, host_tsc));
+ return vcpu->arch.tsc_offset + kvm_scale_tsc(vcpu, host_tsc);
}
EXPORT_SYMBOL_GPL(kvm_read_l1_tsc);
@@ -1547,7 +1556,7 @@ EXPORT_SYMBOL_GPL(kvm_write_tsc);
static inline void adjust_tsc_offset_guest(struct kvm_vcpu *vcpu,
s64 adjustment)
{
- kvm_x86_ops->adjust_tsc_offset_guest(vcpu, adjustment);
+ kvm_vcpu_write_tsc_offset(vcpu, vcpu->arch.tsc_offset + adjustment);
}
static inline void adjust_tsc_offset_host(struct kvm_vcpu *vcpu, s64 adjustment)
@@ -1555,7 +1564,7 @@ static inline void adjust_tsc_offset_host(struct kvm_vcpu *vcpu, s64 adjustment)
if (vcpu->arch.tsc_scaling_ratio != kvm_default_tsc_scaling_ratio)
WARN_ON(adjustment < 0);
adjustment = kvm_scale_tsc(vcpu, (u64) adjustment);
- kvm_x86_ops->adjust_tsc_offset_guest(vcpu, adjustment);
+ adjust_tsc_offset_guest(vcpu, adjustment);
}
#ifdef CONFIG_X86_64
@@ -1724,18 +1733,23 @@ static void kvm_gen_update_masterclock(struct kvm *kvm)
static u64 __get_kvmclock_ns(struct kvm *kvm)
{
- struct kvm_vcpu *vcpu = kvm_get_vcpu(kvm, 0);
struct kvm_arch *ka = &kvm->arch;
- s64 ns;
+ struct pvclock_vcpu_time_info hv_clock;
- if (vcpu->arch.hv_clock.flags & PVCLOCK_TSC_STABLE_BIT) {
- u64 tsc = kvm_read_l1_tsc(vcpu, rdtsc());
- ns = __pvclock_read_cycles(&vcpu->arch.hv_clock, tsc);
- } else {
- ns = ktime_get_boot_ns() + ka->kvmclock_offset;
+ spin_lock(&ka->pvclock_gtod_sync_lock);
+ if (!ka->use_master_clock) {
+ spin_unlock(&ka->pvclock_gtod_sync_lock);
+ return ktime_get_boot_ns() + ka->kvmclock_offset;
}
- return ns;
+ hv_clock.tsc_timestamp = ka->master_cycle_now;
+ hv_clock.system_time = ka->master_kernel_ns + ka->kvmclock_offset;
+ spin_unlock(&ka->pvclock_gtod_sync_lock);
+
+ kvm_get_time_scale(NSEC_PER_SEC, __this_cpu_read(cpu_tsc_khz) * 1000LL,
+ &hv_clock.tsc_shift,
+ &hv_clock.tsc_to_system_mul);
+ return __pvclock_read_cycles(&hv_clock, rdtsc());
}
u64 get_kvmclock_ns(struct kvm *kvm)
@@ -2262,7 +2276,7 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
/* Drop writes to this legacy MSR -- see rdmsr
* counterpart for further detail.
*/
- vcpu_unimpl(vcpu, "ignored wrmsr: 0x%x data %llx\n", msr, data);
+ vcpu_unimpl(vcpu, "ignored wrmsr: 0x%x data 0x%llx\n", msr, data);
break;
case MSR_AMD64_OSVW_ID_LENGTH:
if (!guest_cpuid_has_osvw(vcpu))
@@ -2280,11 +2294,11 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
if (kvm_pmu_is_valid_msr(vcpu, msr))
return kvm_pmu_set_msr(vcpu, msr_info);
if (!ignore_msrs) {
- vcpu_unimpl(vcpu, "unhandled wrmsr: 0x%x data %llx\n",
+ vcpu_unimpl(vcpu, "unhandled wrmsr: 0x%x data 0x%llx\n",
msr, data);
return 1;
} else {
- vcpu_unimpl(vcpu, "ignored wrmsr: 0x%x data %llx\n",
+ vcpu_unimpl(vcpu, "ignored wrmsr: 0x%x data 0x%llx\n",
msr, data);
break;
}
@@ -2596,7 +2610,6 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
case KVM_CAP_PIT_STATE2:
case KVM_CAP_SET_IDENTITY_MAP_ADDR:
case KVM_CAP_XEN_HVM:
- case KVM_CAP_ADJUST_CLOCK:
case KVM_CAP_VCPU_EVENTS:
case KVM_CAP_HYPERV:
case KVM_CAP_HYPERV_VAPIC:
@@ -2623,6 +2636,9 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
#endif
r = 1;
break;
+ case KVM_CAP_ADJUST_CLOCK:
+ r = KVM_CLOCK_TSC_STABLE;
+ break;
case KVM_CAP_X86_SMM:
/* SMBASE is usually relocated above 1M on modern chipsets,
* and SMM handlers might indeed rely on 4G segment limits,
@@ -3415,6 +3431,7 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
};
case KVM_SET_VAPIC_ADDR: {
struct kvm_vapic_addr va;
+ int idx;
r = -EINVAL;
if (!lapic_in_kernel(vcpu))
@@ -3422,7 +3439,9 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
r = -EFAULT;
if (copy_from_user(&va, argp, sizeof va))
goto out;
+ idx = srcu_read_lock(&vcpu->kvm->srcu);
r = kvm_lapic_set_vapic_addr(vcpu, va.vapic_addr);
+ srcu_read_unlock(&vcpu->kvm->srcu, idx);
break;
}
case KVM_X86_SETUP_MCE: {
@@ -4103,9 +4122,11 @@ long kvm_arch_vm_ioctl(struct file *filp,
struct kvm_clock_data user_ns;
u64 now_ns;
- now_ns = get_kvmclock_ns(kvm);
+ local_irq_disable();
+ now_ns = __get_kvmclock_ns(kvm);
user_ns.clock = now_ns;
- user_ns.flags = 0;
+ user_ns.flags = kvm->arch.use_master_clock ? KVM_CLOCK_TSC_STABLE : 0;
+ local_irq_enable();
memset(&user_ns.pad, 0, sizeof(user_ns.pad));
r = -EFAULT;
@@ -7410,10 +7431,12 @@ void kvm_put_guest_fpu(struct kvm_vcpu *vcpu)
void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu)
{
+ void *wbinvd_dirty_mask = vcpu->arch.wbinvd_dirty_mask;
+
kvmclock_reset(vcpu);
- free_cpumask_var(vcpu->arch.wbinvd_dirty_mask);
kvm_x86_ops->vcpu_free(vcpu);
+ free_cpumask_var(wbinvd_dirty_mask);
}
struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
@@ -8153,7 +8176,7 @@ void kvm_arch_flush_shadow_all(struct kvm *kvm)
void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
struct kvm_memory_slot *slot)
{
- kvm_mmu_invalidate_zap_all_pages(kvm);
+ kvm_page_track_flush_slot(kvm, slot);
}
static inline bool kvm_vcpu_has_events(struct kvm_vcpu *vcpu)
diff --git a/arch/x86/mm/extable.c b/arch/x86/mm/extable.c
index 79ae939970d3..fcd06f7526de 100644
--- a/arch/x86/mm/extable.c
+++ b/arch/x86/mm/extable.c
@@ -135,7 +135,12 @@ void __init early_fixup_exception(struct pt_regs *regs, int trapnr)
if (early_recursion_flag > 2)
goto halt_loop;
- if (regs->cs != __KERNEL_CS)
+ /*
+ * Old CPUs leave the high bits of CS on the stack
+ * undefined. I'm not sure which CPUs do this, but at least
+ * the 486 DX works this way.
+ */
+ if ((regs->cs & 0xFFFF) != __KERNEL_CS)
goto fail;
/*
diff --git a/arch/x86/platform/efi/efi.c b/arch/x86/platform/efi/efi.c
index bf99aa7005eb..936a488d6cf6 100644
--- a/arch/x86/platform/efi/efi.c
+++ b/arch/x86/platform/efi/efi.c
@@ -861,7 +861,7 @@ static void __init __efi_enter_virtual_mode(void)
int count = 0, pg_shift = 0;
void *new_memmap = NULL;
efi_status_t status;
- phys_addr_t pa;
+ unsigned long pa;
efi.systab = NULL;
diff --git a/arch/x86/platform/efi/efi_64.c b/arch/x86/platform/efi/efi_64.c
index 58b0f801f66f..319148bd4b05 100644
--- a/arch/x86/platform/efi/efi_64.c
+++ b/arch/x86/platform/efi/efi_64.c
@@ -31,6 +31,7 @@
#include <linux/io.h>
#include <linux/reboot.h>
#include <linux/slab.h>
+#include <linux/ucs2_string.h>
#include <asm/setup.h>
#include <asm/page.h>
@@ -211,6 +212,35 @@ void efi_sync_low_kernel_mappings(void)
memcpy(pud_efi, pud_k, sizeof(pud_t) * num_entries);
}
+/*
+ * Wrapper for slow_virt_to_phys() that handles NULL addresses.
+ */
+static inline phys_addr_t
+virt_to_phys_or_null_size(void *va, unsigned long size)
+{
+ bool bad_size;
+
+ if (!va)
+ return 0;
+
+ if (virt_addr_valid(va))
+ return virt_to_phys(va);
+
+ /*
+ * A fully aligned variable on the stack is guaranteed not to
+ * cross a page bounary. Try to catch strings on the stack by
+ * checking that 'size' is a power of two.
+ */
+ bad_size = size > PAGE_SIZE || !is_power_of_2(size);
+
+ WARN_ON(!IS_ALIGNED((unsigned long)va, size) || bad_size);
+
+ return slow_virt_to_phys(va);
+}
+
+#define virt_to_phys_or_null(addr) \
+ virt_to_phys_or_null_size((addr), sizeof(*(addr)))
+
int __init efi_setup_page_tables(unsigned long pa_memmap, unsigned num_pages)
{
unsigned long pfn, text;
@@ -494,8 +524,8 @@ static efi_status_t efi_thunk_get_time(efi_time_t *tm, efi_time_cap_t *tc)
spin_lock(&rtc_lock);
- phys_tm = virt_to_phys(tm);
- phys_tc = virt_to_phys(tc);
+ phys_tm = virt_to_phys_or_null(tm);
+ phys_tc = virt_to_phys_or_null(tc);
status = efi_thunk(get_time, phys_tm, phys_tc);
@@ -511,7 +541,7 @@ static efi_status_t efi_thunk_set_time(efi_time_t *tm)
spin_lock(&rtc_lock);
- phys_tm = virt_to_phys(tm);
+ phys_tm = virt_to_phys_or_null(tm);
status = efi_thunk(set_time, phys_tm);
@@ -529,9 +559,9 @@ efi_thunk_get_wakeup_time(efi_bool_t *enabled, efi_bool_t *pending,
spin_lock(&rtc_lock);
- phys_enabled = virt_to_phys(enabled);
- phys_pending = virt_to_phys(pending);
- phys_tm = virt_to_phys(tm);
+ phys_enabled = virt_to_phys_or_null(enabled);
+ phys_pending = virt_to_phys_or_null(pending);
+ phys_tm = virt_to_phys_or_null(tm);
status = efi_thunk(get_wakeup_time, phys_enabled,
phys_pending, phys_tm);
@@ -549,7 +579,7 @@ efi_thunk_set_wakeup_time(efi_bool_t enabled, efi_time_t *tm)
spin_lock(&rtc_lock);
- phys_tm = virt_to_phys(tm);
+ phys_tm = virt_to_phys_or_null(tm);
status = efi_thunk(set_wakeup_time, enabled, phys_tm);
@@ -558,6 +588,10 @@ efi_thunk_set_wakeup_time(efi_bool_t enabled, efi_time_t *tm)
return status;
}
+static unsigned long efi_name_size(efi_char16_t *name)
+{
+ return ucs2_strsize(name, EFI_VAR_NAME_LEN) + 1;
+}
static efi_status_t
efi_thunk_get_variable(efi_char16_t *name, efi_guid_t *vendor,
@@ -567,11 +601,11 @@ efi_thunk_get_variable(efi_char16_t *name, efi_guid_t *vendor,
u32 phys_name, phys_vendor, phys_attr;
u32 phys_data_size, phys_data;
- phys_data_size = virt_to_phys(data_size);
- phys_vendor = virt_to_phys(vendor);
- phys_name = virt_to_phys(name);
- phys_attr = virt_to_phys(attr);
- phys_data = virt_to_phys(data);
+ phys_data_size = virt_to_phys_or_null(data_size);
+ phys_vendor = virt_to_phys_or_null(vendor);
+ phys_name = virt_to_phys_or_null_size(name, efi_name_size(name));
+ phys_attr = virt_to_phys_or_null(attr);
+ phys_data = virt_to_phys_or_null_size(data, *data_size);
status = efi_thunk(get_variable, phys_name, phys_vendor,
phys_attr, phys_data_size, phys_data);
@@ -586,9 +620,9 @@ efi_thunk_set_variable(efi_char16_t *name, efi_guid_t *vendor,
u32 phys_name, phys_vendor, phys_data;
efi_status_t status;
- phys_name = virt_to_phys(name);
- phys_vendor = virt_to_phys(vendor);
- phys_data = virt_to_phys(data);
+ phys_name = virt_to_phys_or_null_size(name, efi_name_size(name));
+ phys_vendor = virt_to_phys_or_null(vendor);
+ phys_data = virt_to_phys_or_null_size(data, data_size);
/* If data_size is > sizeof(u32) we've got problems */
status = efi_thunk(set_variable, phys_name, phys_vendor,
@@ -605,9 +639,9 @@ efi_thunk_get_next_variable(unsigned long *name_size,
efi_status_t status;
u32 phys_name_size, phys_name, phys_vendor;
- phys_name_size = virt_to_phys(name_size);
- phys_vendor = virt_to_phys(vendor);
- phys_name = virt_to_phys(name);
+ phys_name_size = virt_to_phys_or_null(name_size);
+ phys_vendor = virt_to_phys_or_null(vendor);
+ phys_name = virt_to_phys_or_null_size(name, *name_size);
status = efi_thunk(get_next_variable, phys_name_size,
phys_name, phys_vendor);
@@ -621,7 +655,7 @@ efi_thunk_get_next_high_mono_count(u32 *count)
efi_status_t status;
u32 phys_count;
- phys_count = virt_to_phys(count);
+ phys_count = virt_to_phys_or_null(count);
status = efi_thunk(get_next_high_mono_count, phys_count);
return status;
@@ -633,7 +667,7 @@ efi_thunk_reset_system(int reset_type, efi_status_t status,
{
u32 phys_data;
- phys_data = virt_to_phys(data);
+ phys_data = virt_to_phys_or_null_size(data, data_size);
efi_thunk(reset_system, reset_type, status, data_size, phys_data);
}
@@ -661,9 +695,9 @@ efi_thunk_query_variable_info(u32 attr, u64 *storage_space,
if (efi.runtime_version < EFI_2_00_SYSTEM_TABLE_REVISION)
return EFI_UNSUPPORTED;
- phys_storage = virt_to_phys(storage_space);
- phys_remaining = virt_to_phys(remaining_space);
- phys_max = virt_to_phys(max_variable_size);
+ phys_storage = virt_to_phys_or_null(storage_space);
+ phys_remaining = virt_to_phys_or_null(remaining_space);
+ phys_max = virt_to_phys_or_null(max_variable_size);
status = efi_thunk(query_variable_info, attr, phys_storage,
phys_remaining, phys_max);
diff --git a/arch/x86/platform/intel-mid/device_libs/Makefile b/arch/x86/platform/intel-mid/device_libs/Makefile
index 429d08be7848..dd6cfa4ad3ac 100644
--- a/arch/x86/platform/intel-mid/device_libs/Makefile
+++ b/arch/x86/platform/intel-mid/device_libs/Makefile
@@ -28,4 +28,4 @@ obj-$(subst m,y,$(CONFIG_GPIO_PCA953X)) += platform_pcal9555a.o
obj-$(subst m,y,$(CONFIG_GPIO_PCA953X)) += platform_tca6416.o
# MISC Devices
obj-$(subst m,y,$(CONFIG_KEYBOARD_GPIO)) += platform_gpio_keys.o
-obj-$(subst m,y,$(CONFIG_INTEL_MID_WATCHDOG)) += platform_wdt.o
+obj-$(subst m,y,$(CONFIG_INTEL_MID_WATCHDOG)) += platform_mrfld_wdt.o
diff --git a/arch/x86/platform/intel-mid/device_libs/platform_wdt.c b/arch/x86/platform/intel-mid/device_libs/platform_mrfld_wdt.c
index de734134bc8d..3f1f1c77d090 100644
--- a/arch/x86/platform/intel-mid/device_libs/platform_wdt.c
+++ b/arch/x86/platform/intel-mid/device_libs/platform_mrfld_wdt.c
@@ -1,5 +1,5 @@
/*
- * platform_wdt.c: Watchdog platform library file
+ * Intel Merrifield watchdog platform device library file
*
* (C) Copyright 2014 Intel Corporation
* Author: David Cohen <david.a.cohen@linux.intel.com>
@@ -14,7 +14,9 @@
#include <linux/interrupt.h>
#include <linux/platform_device.h>
#include <linux/platform_data/intel-mid_wdt.h>
+
#include <asm/intel-mid.h>
+#include <asm/intel_scu_ipc.h>
#include <asm/io_apic.h>
#define TANGIER_EXT_TIMER0_MSI 15
@@ -50,14 +52,34 @@ static struct intel_mid_wdt_pdata tangier_pdata = {
.probe = tangier_probe,
};
-static int __init register_mid_wdt(void)
+static int wdt_scu_status_change(struct notifier_block *nb,
+ unsigned long code, void *data)
{
- if (intel_mid_identify_cpu() == INTEL_MID_CPU_CHIP_TANGIER) {
- wdt_dev.dev.platform_data = &tangier_pdata;
- return platform_device_register(&wdt_dev);
+ if (code == SCU_DOWN) {
+ platform_device_unregister(&wdt_dev);
+ return 0;
}
- return -ENODEV;
+ return platform_device_register(&wdt_dev);
}
+static struct notifier_block wdt_scu_notifier = {
+ .notifier_call = wdt_scu_status_change,
+};
+
+static int __init register_mid_wdt(void)
+{
+ if (intel_mid_identify_cpu() != INTEL_MID_CPU_CHIP_TANGIER)
+ return -ENODEV;
+
+ wdt_dev.dev.platform_data = &tangier_pdata;
+
+ /*
+ * We need to be sure that the SCU IPC is ready before watchdog device
+ * can be registered:
+ */
+ intel_scu_notifier_add(&wdt_scu_notifier);
+
+ return 0;
+}
rootfs_initcall(register_mid_wdt);
diff --git a/arch/x86/platform/intel-mid/pwr.c b/arch/x86/platform/intel-mid/pwr.c
index 5d3b45ad1c03..67375dda451c 100644
--- a/arch/x86/platform/intel-mid/pwr.c
+++ b/arch/x86/platform/intel-mid/pwr.c
@@ -272,6 +272,25 @@ int intel_mid_pci_set_power_state(struct pci_dev *pdev, pci_power_t state)
}
EXPORT_SYMBOL_GPL(intel_mid_pci_set_power_state);
+pci_power_t intel_mid_pci_get_power_state(struct pci_dev *pdev)
+{
+ struct mid_pwr *pwr = midpwr;
+ int id, reg, bit;
+ u32 power;
+
+ if (!pwr || !pwr->available)
+ return PCI_UNKNOWN;
+
+ id = intel_mid_pwr_get_lss_id(pdev);
+ if (id < 0)
+ return PCI_UNKNOWN;
+
+ reg = (id * LSS_PWS_BITS) / 32;
+ bit = (id * LSS_PWS_BITS) % 32;
+ power = mid_pwr_get_state(pwr, reg);
+ return (__force pci_power_t)((power >> bit) & 3);
+}
+
void intel_mid_pwr_power_off(void)
{
struct mid_pwr *pwr = midpwr;
diff --git a/arch/x86/purgatory/Makefile b/arch/x86/purgatory/Makefile
index ac58c1616408..555b9fa0ad43 100644
--- a/arch/x86/purgatory/Makefile
+++ b/arch/x86/purgatory/Makefile
@@ -16,6 +16,7 @@ KCOV_INSTRUMENT := n
KBUILD_CFLAGS := -fno-strict-aliasing -Wall -Wstrict-prototypes -fno-zero-initialized-in-bss -fno-builtin -ffreestanding -c -MD -Os -mcmodel=large
KBUILD_CFLAGS += -m$(BITS)
+KBUILD_CFLAGS += $(call cc-option,-fno-PIE)
$(obj)/purgatory.ro: $(PURGATORY_OBJS) FORCE
$(call if_changed,ld)
diff --git a/arch/xtensa/include/uapi/asm/unistd.h b/arch/xtensa/include/uapi/asm/unistd.h
index de9b14b2d348..cd400af4a6b2 100644
--- a/arch/xtensa/include/uapi/asm/unistd.h
+++ b/arch/xtensa/include/uapi/asm/unistd.h
@@ -767,7 +767,14 @@ __SYSCALL(346, sys_preadv2, 6)
#define __NR_pwritev2 347
__SYSCALL(347, sys_pwritev2, 6)
-#define __NR_syscall_count 348
+#define __NR_pkey_mprotect 348
+__SYSCALL(348, sys_pkey_mprotect, 4)
+#define __NR_pkey_alloc 349
+__SYSCALL(349, sys_pkey_alloc, 2)
+#define __NR_pkey_free 350
+__SYSCALL(350, sys_pkey_free, 1)
+
+#define __NR_syscall_count 351
/*
* sysxtensa syscall handler
diff --git a/arch/xtensa/kernel/time.c b/arch/xtensa/kernel/time.c
index 9a5bcd0381a7..be81e69b25bc 100644
--- a/arch/xtensa/kernel/time.c
+++ b/arch/xtensa/kernel/time.c
@@ -172,10 +172,11 @@ void __init time_init(void)
{
of_clk_init(NULL);
#ifdef CONFIG_XTENSA_CALIBRATE_CCOUNT
- printk("Calibrating CPU frequency ");
+ pr_info("Calibrating CPU frequency ");
calibrate_ccount();
- printk("%d.%02d MHz\n", (int)ccount_freq/1000000,
- (int)(ccount_freq/10000)%100);
+ pr_cont("%d.%02d MHz\n",
+ (int)ccount_freq / 1000000,
+ (int)(ccount_freq / 10000) % 100);
#else
ccount_freq = CONFIG_XTENSA_CPU_CLOCK*1000000UL;
#endif
@@ -210,9 +211,8 @@ irqreturn_t timer_interrupt(int irq, void *dev_id)
void calibrate_delay(void)
{
loops_per_jiffy = ccount_freq / HZ;
- printk("Calibrating delay loop (skipped)... "
- "%lu.%02lu BogoMIPS preset\n",
- loops_per_jiffy/(1000000/HZ),
- (loops_per_jiffy/(10000/HZ)) % 100);
+ pr_info("Calibrating delay loop (skipped)... %lu.%02lu BogoMIPS preset\n",
+ loops_per_jiffy / (1000000 / HZ),
+ (loops_per_jiffy / (10000 / HZ)) % 100);
}
#endif
diff --git a/arch/xtensa/kernel/traps.c b/arch/xtensa/kernel/traps.c
index d02fc304b31c..ce37d5b899fe 100644
--- a/arch/xtensa/kernel/traps.c
+++ b/arch/xtensa/kernel/traps.c
@@ -465,26 +465,25 @@ void show_regs(struct pt_regs * regs)
for (i = 0; i < 16; i++) {
if ((i % 8) == 0)
- printk(KERN_INFO "a%02d:", i);
- printk(KERN_CONT " %08lx", regs->areg[i]);
+ pr_info("a%02d:", i);
+ pr_cont(" %08lx", regs->areg[i]);
}
- printk(KERN_CONT "\n");
-
- printk("pc: %08lx, ps: %08lx, depc: %08lx, excvaddr: %08lx\n",
- regs->pc, regs->ps, regs->depc, regs->excvaddr);
- printk("lbeg: %08lx, lend: %08lx lcount: %08lx, sar: %08lx\n",
- regs->lbeg, regs->lend, regs->lcount, regs->sar);
+ pr_cont("\n");
+ pr_info("pc: %08lx, ps: %08lx, depc: %08lx, excvaddr: %08lx\n",
+ regs->pc, regs->ps, regs->depc, regs->excvaddr);
+ pr_info("lbeg: %08lx, lend: %08lx lcount: %08lx, sar: %08lx\n",
+ regs->lbeg, regs->lend, regs->lcount, regs->sar);
if (user_mode(regs))
- printk("wb: %08lx, ws: %08lx, wmask: %08lx, syscall: %ld\n",
- regs->windowbase, regs->windowstart, regs->wmask,
- regs->syscall);
+ pr_cont("wb: %08lx, ws: %08lx, wmask: %08lx, syscall: %ld\n",
+ regs->windowbase, regs->windowstart, regs->wmask,
+ regs->syscall);
}
static int show_trace_cb(struct stackframe *frame, void *data)
{
if (kernel_text_address(frame->pc)) {
- printk(" [<%08lx>] ", frame->pc);
- print_symbol("%s\n", frame->pc);
+ pr_cont(" [<%08lx>]", frame->pc);
+ print_symbol(" %s\n", frame->pc);
}
return 0;
}
@@ -494,19 +493,13 @@ void show_trace(struct task_struct *task, unsigned long *sp)
if (!sp)
sp = stack_pointer(task);
- printk("Call Trace:");
-#ifdef CONFIG_KALLSYMS
- printk("\n");
-#endif
+ pr_info("Call Trace:\n");
walk_stackframe(sp, show_trace_cb, NULL);
- printk("\n");
+#ifndef CONFIG_KALLSYMS
+ pr_cont("\n");
+#endif
}
-/*
- * This routine abuses get_user()/put_user() to reference pointers
- * with at least a bit of error checking ...
- */
-
static int kstack_depth_to_print = 24;
void show_stack(struct task_struct *task, unsigned long *sp)
@@ -518,52 +511,29 @@ void show_stack(struct task_struct *task, unsigned long *sp)
sp = stack_pointer(task);
stack = sp;
- printk("\nStack: ");
+ pr_info("Stack:\n");
for (i = 0; i < kstack_depth_to_print; i++) {
if (kstack_end(sp))
break;
- if (i && ((i % 8) == 0))
- printk("\n ");
- printk("%08lx ", *sp++);
+ pr_cont(" %08lx", *sp++);
+ if (i % 8 == 7)
+ pr_cont("\n");
}
- printk("\n");
show_trace(task, stack);
}
-void show_code(unsigned int *pc)
-{
- long i;
-
- printk("\nCode:");
-
- for(i = -3 ; i < 6 ; i++) {
- unsigned long insn;
- if (__get_user(insn, pc + i)) {
- printk(" (Bad address in pc)\n");
- break;
- }
- printk("%c%08lx%c",(i?' ':'<'),insn,(i?' ':'>'));
- }
-}
-
DEFINE_SPINLOCK(die_lock);
void die(const char * str, struct pt_regs * regs, long err)
{
static int die_counter;
- int nl = 0;
console_verbose();
spin_lock_irq(&die_lock);
- printk("%s: sig: %ld [#%d]\n", str, err, ++die_counter);
-#ifdef CONFIG_PREEMPT
- printk("PREEMPT ");
- nl = 1;
-#endif
- if (nl)
- printk("\n");
+ pr_info("%s: sig: %ld [#%d]%s\n", str, err, ++die_counter,
+ IS_ENABLED(CONFIG_PREEMPT) ? " PREEMPT" : "");
show_regs(regs);
if (!user_mode(regs))
show_stack(NULL, (unsigned long*)regs->areg[1]);
diff --git a/crypto/algif_hash.c b/crypto/algif_hash.c
index 2d8466f9e49b..d19b09cdf284 100644
--- a/crypto/algif_hash.c
+++ b/crypto/algif_hash.c
@@ -214,23 +214,26 @@ static int hash_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
ahash_request_set_crypt(&ctx->req, NULL, ctx->result, 0);
- if (ctx->more) {
+ if (!result && !ctx->more) {
+ err = af_alg_wait_for_completion(
+ crypto_ahash_init(&ctx->req),
+ &ctx->completion);
+ if (err)
+ goto unlock;
+ }
+
+ if (!result || ctx->more) {
ctx->more = 0;
err = af_alg_wait_for_completion(crypto_ahash_final(&ctx->req),
&ctx->completion);
if (err)
goto unlock;
- } else if (!result) {
- err = af_alg_wait_for_completion(
- crypto_ahash_digest(&ctx->req),
- &ctx->completion);
}
err = memcpy_to_msg(msg, ctx->result, len);
- hash_free_result(sk, ctx);
-
unlock:
+ hash_free_result(sk, ctx);
release_sock(sk);
return err ?: len;
diff --git a/crypto/asymmetric_keys/x509_cert_parser.c b/crypto/asymmetric_keys/x509_cert_parser.c
index 865f46ea724f..c80765b211cf 100644
--- a/crypto/asymmetric_keys/x509_cert_parser.c
+++ b/crypto/asymmetric_keys/x509_cert_parser.c
@@ -133,7 +133,6 @@ struct x509_certificate *x509_cert_parse(const void *data, size_t datalen)
return cert;
error_decode:
- kfree(cert->pub->key);
kfree(ctx);
error_no_ctx:
x509_free_certificate(cert);
diff --git a/crypto/scatterwalk.c b/crypto/scatterwalk.c
index 52ce17a3dd63..c16c94f88733 100644
--- a/crypto/scatterwalk.c
+++ b/crypto/scatterwalk.c
@@ -68,10 +68,6 @@ void scatterwalk_map_and_copy(void *buf, struct scatterlist *sg,
sg = scatterwalk_ffwd(tmp, sg, start);
- if (sg_page(sg) == virt_to_page(buf) &&
- sg->offset == offset_in_page(buf))
- return;
-
scatterwalk_start(&walk, sg);
scatterwalk_copychunks(buf, &walk, nbytes, out);
scatterwalk_done(&walk, out, 0);
diff --git a/drivers/acpi/acpi_apd.c b/drivers/acpi/acpi_apd.c
index d58fbf7f04e6..7dd70927991e 100644
--- a/drivers/acpi/acpi_apd.c
+++ b/drivers/acpi/acpi_apd.c
@@ -122,7 +122,7 @@ static int acpi_apd_create_device(struct acpi_device *adev,
int ret;
if (!dev_desc) {
- pdev = acpi_create_platform_device(adev);
+ pdev = acpi_create_platform_device(adev, NULL);
return IS_ERR_OR_NULL(pdev) ? PTR_ERR(pdev) : 1;
}
@@ -139,14 +139,8 @@ static int acpi_apd_create_device(struct acpi_device *adev,
goto err_out;
}
- if (dev_desc->properties) {
- ret = device_add_properties(&adev->dev, dev_desc->properties);
- if (ret)
- goto err_out;
- }
-
adev->driver_data = pdata;
- pdev = acpi_create_platform_device(adev);
+ pdev = acpi_create_platform_device(adev, dev_desc->properties);
if (!IS_ERR_OR_NULL(pdev))
return 1;
diff --git a/drivers/acpi/acpi_lpss.c b/drivers/acpi/acpi_lpss.c
index 552010288135..373657f7e35a 100644
--- a/drivers/acpi/acpi_lpss.c
+++ b/drivers/acpi/acpi_lpss.c
@@ -395,7 +395,7 @@ static int acpi_lpss_create_device(struct acpi_device *adev,
dev_desc = (const struct lpss_device_desc *)id->driver_data;
if (!dev_desc) {
- pdev = acpi_create_platform_device(adev);
+ pdev = acpi_create_platform_device(adev, NULL);
return IS_ERR_OR_NULL(pdev) ? PTR_ERR(pdev) : 1;
}
pdata = kzalloc(sizeof(*pdata), GFP_KERNEL);
@@ -451,14 +451,8 @@ static int acpi_lpss_create_device(struct acpi_device *adev,
goto err_out;
}
- if (dev_desc->properties) {
- ret = device_add_properties(&adev->dev, dev_desc->properties);
- if (ret)
- goto err_out;
- }
-
adev->driver_data = pdata;
- pdev = acpi_create_platform_device(adev);
+ pdev = acpi_create_platform_device(adev, dev_desc->properties);
if (!IS_ERR_OR_NULL(pdev)) {
return 1;
}
diff --git a/drivers/acpi/acpi_platform.c b/drivers/acpi/acpi_platform.c
index b200ae1f3c6f..b4c1a6a51da4 100644
--- a/drivers/acpi/acpi_platform.c
+++ b/drivers/acpi/acpi_platform.c
@@ -50,6 +50,7 @@ static void acpi_platform_fill_resource(struct acpi_device *adev,
/**
* acpi_create_platform_device - Create platform device for ACPI device node
* @adev: ACPI device node to create a platform device for.
+ * @properties: Optional collection of build-in properties.
*
* Check if the given @adev can be represented as a platform device and, if
* that's the case, create and register a platform device, populate its common
@@ -57,7 +58,8 @@ static void acpi_platform_fill_resource(struct acpi_device *adev,
*
* Name of the platform device will be the same as @adev's.
*/
-struct platform_device *acpi_create_platform_device(struct acpi_device *adev)
+struct platform_device *acpi_create_platform_device(struct acpi_device *adev,
+ struct property_entry *properties)
{
struct platform_device *pdev = NULL;
struct platform_device_info pdevinfo;
@@ -106,6 +108,7 @@ struct platform_device *acpi_create_platform_device(struct acpi_device *adev)
pdevinfo.res = resources;
pdevinfo.num_res = count;
pdevinfo.fwnode = acpi_fwnode_handle(adev);
+ pdevinfo.properties = properties;
if (acpi_dma_supported(adev))
pdevinfo.dma_mask = DMA_BIT_MASK(32);
diff --git a/drivers/acpi/acpica/tbfadt.c b/drivers/acpi/acpica/tbfadt.c
index 046c4d0394ee..5fb838e592dc 100644
--- a/drivers/acpi/acpica/tbfadt.c
+++ b/drivers/acpi/acpica/tbfadt.c
@@ -480,19 +480,17 @@ static void acpi_tb_convert_fadt(void)
u32 i;
/*
- * For ACPI 1.0 FADTs (revision 1), ensure that reserved fields which
+ * For ACPI 1.0 FADTs (revision 1 or 2), ensure that reserved fields which
* should be zero are indeed zero. This will workaround BIOSs that
* inadvertently place values in these fields.
*
* The ACPI 1.0 reserved fields that will be zeroed are the bytes located
* at offset 45, 55, 95, and the word located at offset 109, 110.
*
- * Note: The FADT revision value is unreliable because of BIOS errors.
- * The table length is instead used as the final word on the version.
- *
- * Note: FADT revision 3 is the ACPI 2.0 version of the FADT.
+ * Note: The FADT revision value is unreliable. Only the length can be
+ * trusted.
*/
- if (acpi_gbl_FADT.header.length <= ACPI_FADT_V3_SIZE) {
+ if (acpi_gbl_FADT.header.length <= ACPI_FADT_V2_SIZE) {
acpi_gbl_FADT.preferred_profile = 0;
acpi_gbl_FADT.pstate_control = 0;
acpi_gbl_FADT.cst_control = 0;
diff --git a/drivers/acpi/dptf/int340x_thermal.c b/drivers/acpi/dptf/int340x_thermal.c
index 33505c651f62..86364097e236 100644
--- a/drivers/acpi/dptf/int340x_thermal.c
+++ b/drivers/acpi/dptf/int340x_thermal.c
@@ -34,11 +34,11 @@ static int int340x_thermal_handler_attach(struct acpi_device *adev,
const struct acpi_device_id *id)
{
if (IS_ENABLED(CONFIG_INT340X_THERMAL))
- acpi_create_platform_device(adev);
+ acpi_create_platform_device(adev, NULL);
/* Intel SoC DTS thermal driver needs INT3401 to set IRQ descriptor */
else if (IS_ENABLED(CONFIG_INTEL_SOC_DTS_THERMAL) &&
id->driver_data == INT3401_DEVICE)
- acpi_create_platform_device(adev);
+ acpi_create_platform_device(adev, NULL);
return 1;
}
diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c
index 035ac646d8db..3d1856f1f4d0 100644
--- a/drivers/acpi/scan.c
+++ b/drivers/acpi/scan.c
@@ -1734,7 +1734,7 @@ static void acpi_default_enumeration(struct acpi_device *device)
&is_spi_i2c_slave);
acpi_dev_free_resource_list(&resource_list);
if (!is_spi_i2c_slave) {
- acpi_create_platform_device(device);
+ acpi_create_platform_device(device, NULL);
acpi_device_set_enumerated(device);
} else {
blocking_notifier_call_chain(&acpi_reconfig_chain,
diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c
index deb0ff78eba8..54abb26b7366 100644
--- a/drivers/acpi/sleep.c
+++ b/drivers/acpi/sleep.c
@@ -47,32 +47,15 @@ static void acpi_sleep_tts_switch(u32 acpi_state)
}
}
-static void acpi_sleep_pts_switch(u32 acpi_state)
-{
- acpi_status status;
-
- status = acpi_execute_simple_method(NULL, "\\_PTS", acpi_state);
- if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) {
- /*
- * OS can't evaluate the _PTS object correctly. Some warning
- * message will be printed. But it won't break anything.
- */
- printk(KERN_NOTICE "Failure in evaluating _PTS object\n");
- }
-}
-
-static int sleep_notify_reboot(struct notifier_block *this,
+static int tts_notify_reboot(struct notifier_block *this,
unsigned long code, void *x)
{
acpi_sleep_tts_switch(ACPI_STATE_S5);
-
- acpi_sleep_pts_switch(ACPI_STATE_S5);
-
return NOTIFY_DONE;
}
-static struct notifier_block sleep_notifier = {
- .notifier_call = sleep_notify_reboot,
+static struct notifier_block tts_notifier = {
+ .notifier_call = tts_notify_reboot,
.next = NULL,
.priority = 0,
};
@@ -916,9 +899,9 @@ int __init acpi_sleep_init(void)
pr_info(PREFIX "(supports%s)\n", supported);
/*
- * Register the sleep_notifier to reboot notifier list so that the _TTS
- * and _PTS object can also be evaluated when the system enters S5.
+ * Register the tts_notifier to reboot notifier list so that the _TTS
+ * object can also be evaluated when the system enters S5.
*/
- register_reboot_notifier(&sleep_notifier);
+ register_reboot_notifier(&tts_notifier);
return 0;
}
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
index 9669fc7c19df..74f4c662f776 100644
--- a/drivers/ata/ahci.c
+++ b/drivers/ata/ahci.c
@@ -1436,13 +1436,6 @@ static int ahci_init_msi(struct pci_dev *pdev, unsigned int n_ports,
"ahci: MRSM is on, fallback to single MSI\n");
pci_free_irq_vectors(pdev);
}
-
- /*
- * -ENOSPC indicated we don't have enough vectors. Don't bother
- * trying a single vectors for any other error:
- */
- if (nvec < 0 && nvec != -ENOSPC)
- return nvec;
}
/*
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
index 9cceb4a875a5..c4eb4ae9c3aa 100644
--- a/drivers/ata/libata-scsi.c
+++ b/drivers/ata/libata-scsi.c
@@ -1088,7 +1088,7 @@ static void ata_gen_passthru_sense(struct ata_queued_cmd *qc)
desc[1] = tf->command; /* status */
desc[2] = tf->device;
desc[3] = tf->nsect;
- desc[0] = 0;
+ desc[7] = 0;
if (tf->flags & ATA_TFLAG_LBA48) {
desc[8] |= 0x80;
if (tf->hob_nsect)
diff --git a/drivers/base/Kconfig b/drivers/base/Kconfig
index d02e7c0f5bfd..0e40967a5d6e 100644
--- a/drivers/base/Kconfig
+++ b/drivers/base/Kconfig
@@ -250,11 +250,11 @@ config DMA_SHARED_BUFFER
APIs extension; the file's descriptor can then be passed on to other
driver.
-config FENCE_TRACE
- bool "Enable verbose FENCE_TRACE messages"
+config DMA_FENCE_TRACE
+ bool "Enable verbose DMA_FENCE_TRACE messages"
depends on DMA_SHARED_BUFFER
help
- Enable the FENCE_TRACE printks. This will add extra
+ Enable the DMA_FENCE_TRACE printks. This will add extra
spam to the console log, but will make it easier to diagnose
lockup related problems for dma-buffers shared across multiple
devices.
diff --git a/drivers/base/dd.c b/drivers/base/dd.c
index d22a7260f42b..d76cd97a98b6 100644
--- a/drivers/base/dd.c
+++ b/drivers/base/dd.c
@@ -324,7 +324,8 @@ static int really_probe(struct device *dev, struct device_driver *drv)
{
int ret = -EPROBE_DEFER;
int local_trigger_count = atomic_read(&deferred_trigger_count);
- bool test_remove = IS_ENABLED(CONFIG_DEBUG_TEST_DRIVER_REMOVE);
+ bool test_remove = IS_ENABLED(CONFIG_DEBUG_TEST_DRIVER_REMOVE) &&
+ !drv->suppress_bind_attrs;
if (defer_all_probes) {
/*
@@ -383,7 +384,7 @@ re_probe:
if (test_remove) {
test_remove = false;
- if (dev->bus && dev->bus->remove)
+ if (dev->bus->remove)
dev->bus->remove(dev);
else if (drv->remove)
drv->remove(dev);
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
index e44944f4be77..2932a5bd892f 100644
--- a/drivers/base/power/main.c
+++ b/drivers/base/power/main.c
@@ -1027,6 +1027,8 @@ static int __device_suspend_noirq(struct device *dev, pm_message_t state, bool a
TRACE_DEVICE(dev);
TRACE_SUSPEND(0);
+ dpm_wait_for_children(dev, async);
+
if (async_error)
goto Complete;
@@ -1038,8 +1040,6 @@ static int __device_suspend_noirq(struct device *dev, pm_message_t state, bool a
if (dev->power.syscore || dev->power.direct_complete)
goto Complete;
- dpm_wait_for_children(dev, async);
-
if (dev->pm_domain) {
info = "noirq power domain ";
callback = pm_noirq_op(&dev->pm_domain->ops, state);
@@ -1174,6 +1174,8 @@ static int __device_suspend_late(struct device *dev, pm_message_t state, bool as
__pm_runtime_disable(dev, false);
+ dpm_wait_for_children(dev, async);
+
if (async_error)
goto Complete;
@@ -1185,8 +1187,6 @@ static int __device_suspend_late(struct device *dev, pm_message_t state, bool as
if (dev->power.syscore || dev->power.direct_complete)
goto Complete;
- dpm_wait_for_children(dev, async);
-
if (dev->pm_domain) {
info = "late power domain ";
callback = pm_late_early_op(&dev->pm_domain->ops, state);
diff --git a/drivers/block/aoe/aoecmd.c b/drivers/block/aoe/aoecmd.c
index ab19adb07a12..3c606c09fd5a 100644
--- a/drivers/block/aoe/aoecmd.c
+++ b/drivers/block/aoe/aoecmd.c
@@ -853,45 +853,6 @@ rqbiocnt(struct request *r)
return n;
}
-/* This can be removed if we are certain that no users of the block
- * layer will ever use zero-count pages in bios. Otherwise we have to
- * protect against the put_page sometimes done by the network layer.
- *
- * See http://oss.sgi.com/archives/xfs/2007-01/msg00594.html for
- * discussion.
- *
- * We cannot use get_page in the workaround, because it insists on a
- * positive page count as a precondition. So we use _refcount directly.
- */
-static void
-bio_pageinc(struct bio *bio)
-{
- struct bio_vec bv;
- struct page *page;
- struct bvec_iter iter;
-
- bio_for_each_segment(bv, bio, iter) {
- /* Non-zero page count for non-head members of
- * compound pages is no longer allowed by the kernel.
- */
- page = compound_head(bv.bv_page);
- page_ref_inc(page);
- }
-}
-
-static void
-bio_pagedec(struct bio *bio)
-{
- struct page *page;
- struct bio_vec bv;
- struct bvec_iter iter;
-
- bio_for_each_segment(bv, bio, iter) {
- page = compound_head(bv.bv_page);
- page_ref_dec(page);
- }
-}
-
static void
bufinit(struct buf *buf, struct request *rq, struct bio *bio)
{
@@ -899,7 +860,6 @@ bufinit(struct buf *buf, struct request *rq, struct bio *bio)
buf->rq = rq;
buf->bio = bio;
buf->iter = bio->bi_iter;
- bio_pageinc(bio);
}
static struct buf *
@@ -1127,7 +1087,6 @@ aoe_end_buf(struct aoedev *d, struct buf *buf)
if (buf == d->ip.buf)
d->ip.buf = NULL;
rq = buf->rq;
- bio_pagedec(buf->bio);
mempool_free(buf, d->bufpool);
n = (unsigned long) rq->special;
rq->special = (void *) --n;
diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c
index 100be556e613..83482721bc01 100644
--- a/drivers/block/drbd/drbd_main.c
+++ b/drivers/block/drbd/drbd_main.c
@@ -1871,7 +1871,7 @@ int drbd_send(struct drbd_connection *connection, struct socket *sock,
drbd_update_congested(connection);
}
do {
- rv = kernel_sendmsg(sock, &msg, &iov, 1, size);
+ rv = kernel_sendmsg(sock, &msg, &iov, 1, iov.iov_len);
if (rv == -EAGAIN) {
if (we_should_drop_the_connection(connection, sock))
break;
diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
index 19a16b2dbb91..7a1048755914 100644
--- a/drivers/block/nbd.c
+++ b/drivers/block/nbd.c
@@ -599,7 +599,7 @@ static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *nbd,
return -EINVAL;
sreq = blk_mq_alloc_request(bdev_get_queue(bdev), WRITE, 0);
- if (!sreq)
+ if (IS_ERR(sreq))
return -ENOMEM;
mutex_unlock(&nbd->tx_lock);
diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c
index 2dc5c96c186a..5545a679abd8 100644
--- a/drivers/block/virtio_blk.c
+++ b/drivers/block/virtio_blk.c
@@ -376,7 +376,7 @@ static void virtblk_config_changed(struct virtio_device *vdev)
static int init_vq(struct virtio_blk *vblk)
{
- int err = 0;
+ int err;
int i;
vq_callback_t **callbacks;
const char **names;
@@ -390,13 +390,13 @@ static int init_vq(struct virtio_blk *vblk)
if (err)
num_vqs = 1;
- vblk->vqs = kmalloc(sizeof(*vblk->vqs) * num_vqs, GFP_KERNEL);
+ vblk->vqs = kmalloc_array(num_vqs, sizeof(*vblk->vqs), GFP_KERNEL);
if (!vblk->vqs)
return -ENOMEM;
- names = kmalloc(sizeof(*names) * num_vqs, GFP_KERNEL);
- callbacks = kmalloc(sizeof(*callbacks) * num_vqs, GFP_KERNEL);
- vqs = kmalloc(sizeof(*vqs) * num_vqs, GFP_KERNEL);
+ names = kmalloc_array(num_vqs, sizeof(*names), GFP_KERNEL);
+ callbacks = kmalloc_array(num_vqs, sizeof(*callbacks), GFP_KERNEL);
+ vqs = kmalloc_array(num_vqs, sizeof(*vqs), GFP_KERNEL);
if (!names || !callbacks || !vqs) {
err = -ENOMEM;
goto out;
diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c
index 04365b17ee67..5163c8f918cb 100644
--- a/drivers/block/zram/zram_drv.c
+++ b/drivers/block/zram/zram_drv.c
@@ -1403,7 +1403,8 @@ static ssize_t hot_remove_store(struct class *class,
zram = idr_find(&zram_index_idr, dev_id);
if (zram) {
ret = zram_remove(zram);
- idr_remove(&zram_index_idr, dev_id);
+ if (!ret)
+ idr_remove(&zram_index_idr, dev_id);
} else {
ret = -ENODEV;
}
diff --git a/drivers/bluetooth/btwilink.c b/drivers/bluetooth/btwilink.c
index ef51c9c864c5..b6bb58c41df5 100644
--- a/drivers/bluetooth/btwilink.c
+++ b/drivers/bluetooth/btwilink.c
@@ -310,7 +310,7 @@ static int bt_ti_probe(struct platform_device *pdev)
BT_DBG("HCI device registered (hdev %p)", hdev);
dev_set_drvdata(&pdev->dev, hst);
- return err;
+ return 0;
}
static int bt_ti_remove(struct platform_device *pdev)
diff --git a/drivers/bluetooth/hci_bcm.c b/drivers/bluetooth/hci_bcm.c
index 5ccb90ef0146..8f6c23c20c52 100644
--- a/drivers/bluetooth/hci_bcm.c
+++ b/drivers/bluetooth/hci_bcm.c
@@ -643,6 +643,14 @@ static const struct dmi_system_id bcm_wrong_irq_dmi_table[] = {
},
.driver_data = &acpi_active_low,
},
+ { /* Handle ThinkPad 8 tablets with BCM2E55 chipset ACPI ID */
+ .ident = "Lenovo ThinkPad 8",
+ .matches = {
+ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_EXACT_MATCH(DMI_PRODUCT_VERSION, "ThinkPad 8"),
+ },
+ .driver_data = &acpi_active_low,
+ },
{ }
};
diff --git a/drivers/char/ipmi/bt-bmc.c b/drivers/char/ipmi/bt-bmc.c
index b49e61320952..fc9e8891eae3 100644
--- a/drivers/char/ipmi/bt-bmc.c
+++ b/drivers/char/ipmi/bt-bmc.c
@@ -484,7 +484,7 @@ static int bt_bmc_remove(struct platform_device *pdev)
}
static const struct of_device_id bt_bmc_match[] = {
- { .compatible = "aspeed,ast2400-bt-bmc" },
+ { .compatible = "aspeed,ast2400-ibt-bmc" },
{ },
};
@@ -502,4 +502,4 @@ module_platform_driver(bt_bmc_driver);
MODULE_DEVICE_TABLE(of, bt_bmc_match);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Alistair Popple <alistair@popple.id.au>");
-MODULE_DESCRIPTION("Linux device interface to the BT interface");
+MODULE_DESCRIPTION("Linux device interface to the IPMI BT interface");
diff --git a/drivers/char/ppdev.c b/drivers/char/ppdev.c
index d23368874710..6af1ce04b3da 100644
--- a/drivers/char/ppdev.c
+++ b/drivers/char/ppdev.c
@@ -748,10 +748,7 @@ static int pp_release(struct inode *inode, struct file *file)
}
if (pp->pdev) {
- const char *name = pp->pdev->name;
-
parport_unregister_device(pp->pdev);
- kfree(name);
pp->pdev = NULL;
pr_debug(CHRDEV "%x: unregistered pardevice\n", minor);
}
diff --git a/drivers/char/tpm/tpm-interface.c b/drivers/char/tpm/tpm-interface.c
index 8de61876f633..3a9149cf0110 100644
--- a/drivers/char/tpm/tpm-interface.c
+++ b/drivers/char/tpm/tpm-interface.c
@@ -813,9 +813,6 @@ int tpm_do_selftest(struct tpm_chip *chip)
continue;
}
- if (rc < TPM_HEADER_SIZE)
- return -EFAULT;
-
if (rc == TPM_ERR_DISABLED || rc == TPM_ERR_DEACTIVATED) {
dev_info(&chip->dev,
"TPM is disabled/deactivated (0x%X)\n", rc);
diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
index d433b1db1fdd..5649234b7316 100644
--- a/drivers/char/virtio_console.c
+++ b/drivers/char/virtio_console.c
@@ -1539,19 +1539,29 @@ static void remove_port_data(struct port *port)
spin_lock_irq(&port->inbuf_lock);
/* Remove unused data this port might have received. */
discard_port_data(port);
+ spin_unlock_irq(&port->inbuf_lock);
/* Remove buffers we queued up for the Host to send us data in. */
- while ((buf = virtqueue_detach_unused_buf(port->in_vq)))
- free_buf(buf, true);
- spin_unlock_irq(&port->inbuf_lock);
+ do {
+ spin_lock_irq(&port->inbuf_lock);
+ buf = virtqueue_detach_unused_buf(port->in_vq);
+ spin_unlock_irq(&port->inbuf_lock);
+ if (buf)
+ free_buf(buf, true);
+ } while (buf);
spin_lock_irq(&port->outvq_lock);
reclaim_consumed_buffers(port);
+ spin_unlock_irq(&port->outvq_lock);
/* Free pending buffers from the out-queue. */
- while ((buf = virtqueue_detach_unused_buf(port->out_vq)))
- free_buf(buf, true);
- spin_unlock_irq(&port->outvq_lock);
+ do {
+ spin_lock_irq(&port->outvq_lock);
+ buf = virtqueue_detach_unused_buf(port->out_vq);
+ spin_unlock_irq(&port->outvq_lock);
+ if (buf)
+ free_buf(buf, true);
+ } while (buf);
}
/*
diff --git a/drivers/clk/bcm/Kconfig b/drivers/clk/bcm/Kconfig
index f21e9b7afd1a..e3eed5a78404 100644
--- a/drivers/clk/bcm/Kconfig
+++ b/drivers/clk/bcm/Kconfig
@@ -20,7 +20,7 @@ config CLK_BCM_KONA
config COMMON_CLK_IPROC
bool "Broadcom iProc clock support"
- depends on ARCH_BCM_IPROC || COMPILE_TEST
+ depends on ARCH_BCM_IPROC || ARCH_BCM_63XX || COMPILE_TEST
depends on COMMON_CLK
default ARCH_BCM_IPROC
help
diff --git a/drivers/clk/berlin/bg2.c b/drivers/clk/berlin/bg2.c
index edf3b96b3b73..1d99292e2039 100644
--- a/drivers/clk/berlin/bg2.c
+++ b/drivers/clk/berlin/bg2.c
@@ -685,7 +685,7 @@ static void __init berlin2_clock_setup(struct device_node *np)
}
/* register clk-provider */
- of_clk_add_hw_provider(np, of_clk_hw_onecell_get, &clk_data);
+ of_clk_add_hw_provider(np, of_clk_hw_onecell_get, clk_data);
return;
diff --git a/drivers/clk/berlin/bg2q.c b/drivers/clk/berlin/bg2q.c
index 0718e831475f..3b784b593afd 100644
--- a/drivers/clk/berlin/bg2q.c
+++ b/drivers/clk/berlin/bg2q.c
@@ -382,7 +382,7 @@ static void __init berlin2q_clock_setup(struct device_node *np)
}
/* register clk-provider */
- of_clk_add_hw_provider(np, of_clk_hw_onecell_get, &clk_data);
+ of_clk_add_hw_provider(np, of_clk_hw_onecell_get, clk_data);
return;
diff --git a/drivers/clk/clk-efm32gg.c b/drivers/clk/clk-efm32gg.c
index 8802a2dd56ac..f674778fb3ac 100644
--- a/drivers/clk/clk-efm32gg.c
+++ b/drivers/clk/clk-efm32gg.c
@@ -82,6 +82,6 @@ static void __init efm32gg_cmu_init(struct device_node *np)
hws[clk_HFPERCLKDAC0] = clk_hw_register_gate(NULL, "HFPERCLK.DAC0",
"HFXO", 0, base + CMU_HFPERCLKEN0, 17, 0, NULL);
- of_clk_add_hw_provider(np, of_clk_hw_onecell_get, &clk_data);
+ of_clk_add_hw_provider(np, of_clk_hw_onecell_get, clk_data);
}
CLK_OF_DECLARE(efm32ggcmu, "efm32gg,cmu", efm32gg_cmu_init);
diff --git a/drivers/clk/clk-qoriq.c b/drivers/clk/clk-qoriq.c
index 20b105584f82..80ae2a51452d 100644
--- a/drivers/clk/clk-qoriq.c
+++ b/drivers/clk/clk-qoriq.c
@@ -700,6 +700,7 @@ static struct clk * __init create_mux_common(struct clockgen *cg,
struct mux_hwclock *hwc,
const struct clk_ops *ops,
unsigned long min_rate,
+ unsigned long max_rate,
unsigned long pct80_rate,
const char *fmt, int idx)
{
@@ -728,6 +729,8 @@ static struct clk * __init create_mux_common(struct clockgen *cg,
continue;
if (rate < min_rate)
continue;
+ if (rate > max_rate)
+ continue;
parent_names[j] = div->name;
hwc->parent_to_clksel[j] = i;
@@ -759,7 +762,7 @@ static struct clk * __init create_one_cmux(struct clockgen *cg, int idx)
struct mux_hwclock *hwc;
const struct clockgen_pll_div *div;
unsigned long plat_rate, min_rate;
- u64 pct80_rate;
+ u64 max_rate, pct80_rate;
u32 clksel;
hwc = kzalloc(sizeof(*hwc), GFP_KERNEL);
@@ -787,8 +790,8 @@ static struct clk * __init create_one_cmux(struct clockgen *cg, int idx)
return NULL;
}
- pct80_rate = clk_get_rate(div->clk);
- pct80_rate *= 8;
+ max_rate = clk_get_rate(div->clk);
+ pct80_rate = max_rate * 8;
do_div(pct80_rate, 10);
plat_rate = clk_get_rate(cg->pll[PLATFORM_PLL].div[PLL_DIV1].clk);
@@ -798,7 +801,7 @@ static struct clk * __init create_one_cmux(struct clockgen *cg, int idx)
else
min_rate = plat_rate / 2;
- return create_mux_common(cg, hwc, &cmux_ops, min_rate,
+ return create_mux_common(cg, hwc, &cmux_ops, min_rate, max_rate,
pct80_rate, "cg-cmux%d", idx);
}
@@ -813,7 +816,7 @@ static struct clk * __init create_one_hwaccel(struct clockgen *cg, int idx)
hwc->reg = cg->regs + 0x20 * idx + 0x10;
hwc->info = cg->info.hwaccel[idx];
- return create_mux_common(cg, hwc, &hwaccel_ops, 0, 0,
+ return create_mux_common(cg, hwc, &hwaccel_ops, 0, ULONG_MAX, 0,
"cg-hwaccel%d", idx);
}
diff --git a/drivers/clk/clk-xgene.c b/drivers/clk/clk-xgene.c
index 5daddf5ecc4b..bc37030e38ba 100644
--- a/drivers/clk/clk-xgene.c
+++ b/drivers/clk/clk-xgene.c
@@ -463,22 +463,20 @@ static int xgene_clk_enable(struct clk_hw *hw)
struct xgene_clk *pclk = to_xgene_clk(hw);
unsigned long flags = 0;
u32 data;
- phys_addr_t reg;
if (pclk->lock)
spin_lock_irqsave(pclk->lock, flags);
if (pclk->param.csr_reg != NULL) {
pr_debug("%s clock enabled\n", clk_hw_get_name(hw));
- reg = __pa(pclk->param.csr_reg);
/* First enable the clock */
data = xgene_clk_read(pclk->param.csr_reg +
pclk->param.reg_clk_offset);
data |= pclk->param.reg_clk_mask;
xgene_clk_write(data, pclk->param.csr_reg +
pclk->param.reg_clk_offset);
- pr_debug("%s clock PADDR base %pa clk offset 0x%08X mask 0x%08X value 0x%08X\n",
- clk_hw_get_name(hw), &reg,
+ pr_debug("%s clk offset 0x%08X mask 0x%08X value 0x%08X\n",
+ clk_hw_get_name(hw),
pclk->param.reg_clk_offset, pclk->param.reg_clk_mask,
data);
@@ -488,8 +486,8 @@ static int xgene_clk_enable(struct clk_hw *hw)
data &= ~pclk->param.reg_csr_mask;
xgene_clk_write(data, pclk->param.csr_reg +
pclk->param.reg_csr_offset);
- pr_debug("%s CSR RESET PADDR base %pa csr offset 0x%08X mask 0x%08X value 0x%08X\n",
- clk_hw_get_name(hw), &reg,
+ pr_debug("%s csr offset 0x%08X mask 0x%08X value 0x%08X\n",
+ clk_hw_get_name(hw),
pclk->param.reg_csr_offset, pclk->param.reg_csr_mask,
data);
}
diff --git a/drivers/clk/imx/clk-pllv3.c b/drivers/clk/imx/clk-pllv3.c
index 19f9b622981a..7a6acc3e4a92 100644
--- a/drivers/clk/imx/clk-pllv3.c
+++ b/drivers/clk/imx/clk-pllv3.c
@@ -223,7 +223,7 @@ static unsigned long clk_pllv3_av_recalc_rate(struct clk_hw *hw,
temp64 *= mfn;
do_div(temp64, mfd);
- return (parent_rate * div) + (u32)temp64;
+ return parent_rate * div + (unsigned long)temp64;
}
static long clk_pllv3_av_round_rate(struct clk_hw *hw, unsigned long rate,
@@ -247,7 +247,11 @@ static long clk_pllv3_av_round_rate(struct clk_hw *hw, unsigned long rate,
do_div(temp64, parent_rate);
mfn = temp64;
- return parent_rate * div + parent_rate * mfn / mfd;
+ temp64 = (u64)parent_rate;
+ temp64 *= mfn;
+ do_div(temp64, mfd);
+
+ return parent_rate * div + (unsigned long)temp64;
}
static int clk_pllv3_av_set_rate(struct clk_hw *hw, unsigned long rate,
diff --git a/drivers/clk/mmp/clk-of-mmp2.c b/drivers/clk/mmp/clk-of-mmp2.c
index 3a51fff1b0e7..9adaf48aea23 100644
--- a/drivers/clk/mmp/clk-of-mmp2.c
+++ b/drivers/clk/mmp/clk-of-mmp2.c
@@ -313,7 +313,7 @@ static void __init mmp2_clk_init(struct device_node *np)
}
pxa_unit->apmu_base = of_iomap(np, 1);
- if (!pxa_unit->mpmu_base) {
+ if (!pxa_unit->apmu_base) {
pr_err("failed to map apmu registers\n");
return;
}
diff --git a/drivers/clk/mmp/clk-of-pxa168.c b/drivers/clk/mmp/clk-of-pxa168.c
index 87f2317b2a00..f110c02e83cb 100644
--- a/drivers/clk/mmp/clk-of-pxa168.c
+++ b/drivers/clk/mmp/clk-of-pxa168.c
@@ -262,7 +262,7 @@ static void __init pxa168_clk_init(struct device_node *np)
}
pxa_unit->apmu_base = of_iomap(np, 1);
- if (!pxa_unit->mpmu_base) {
+ if (!pxa_unit->apmu_base) {
pr_err("failed to map apmu registers\n");
return;
}
diff --git a/drivers/clk/mmp/clk-of-pxa910.c b/drivers/clk/mmp/clk-of-pxa910.c
index e22a67f76d93..64d1ef49caeb 100644
--- a/drivers/clk/mmp/clk-of-pxa910.c
+++ b/drivers/clk/mmp/clk-of-pxa910.c
@@ -282,7 +282,7 @@ static void __init pxa910_clk_init(struct device_node *np)
}
pxa_unit->apmu_base = of_iomap(np, 1);
- if (!pxa_unit->mpmu_base) {
+ if (!pxa_unit->apmu_base) {
pr_err("failed to map apmu registers\n");
return;
}
@@ -294,7 +294,7 @@ static void __init pxa910_clk_init(struct device_node *np)
}
pxa_unit->apbcp_base = of_iomap(np, 3);
- if (!pxa_unit->mpmu_base) {
+ if (!pxa_unit->apbcp_base) {
pr_err("failed to map apbcp registers\n");
return;
}
diff --git a/drivers/clk/rockchip/clk-ddr.c b/drivers/clk/rockchip/clk-ddr.c
index 8feba93672c5..e8075359366b 100644
--- a/drivers/clk/rockchip/clk-ddr.c
+++ b/drivers/clk/rockchip/clk-ddr.c
@@ -144,11 +144,8 @@ struct clk *rockchip_clk_register_ddrclk(const char *name, int flags,
ddrclk->ddr_flag = ddr_flag;
clk = clk_register(NULL, &ddrclk->hw);
- if (IS_ERR(clk)) {
- pr_err("%s: could not register ddrclk %s\n", __func__, name);
+ if (IS_ERR(clk))
kfree(ddrclk);
- return NULL;
- }
return clk;
}
diff --git a/drivers/clk/samsung/clk-exynos-clkout.c b/drivers/clk/samsung/clk-exynos-clkout.c
index 96fab6cfb202..6c6afb87b4ce 100644
--- a/drivers/clk/samsung/clk-exynos-clkout.c
+++ b/drivers/clk/samsung/clk-exynos-clkout.c
@@ -132,28 +132,34 @@ free_clkout:
pr_err("%s: failed to register clkout clock\n", __func__);
}
+/*
+ * We use CLK_OF_DECLARE_DRIVER initialization method to avoid setting
+ * the OF_POPULATED flag on the pmu device tree node, so later the
+ * Exynos PMU platform device can be properly probed with PMU driver.
+ */
+
static void __init exynos4_clkout_init(struct device_node *node)
{
exynos_clkout_init(node, EXYNOS4_CLKOUT_MUX_MASK);
}
-CLK_OF_DECLARE(exynos4210_clkout, "samsung,exynos4210-pmu",
+CLK_OF_DECLARE_DRIVER(exynos4210_clkout, "samsung,exynos4210-pmu",
exynos4_clkout_init);
-CLK_OF_DECLARE(exynos4212_clkout, "samsung,exynos4212-pmu",
+CLK_OF_DECLARE_DRIVER(exynos4212_clkout, "samsung,exynos4212-pmu",
exynos4_clkout_init);
-CLK_OF_DECLARE(exynos4412_clkout, "samsung,exynos4412-pmu",
+CLK_OF_DECLARE_DRIVER(exynos4412_clkout, "samsung,exynos4412-pmu",
exynos4_clkout_init);
-CLK_OF_DECLARE(exynos3250_clkout, "samsung,exynos3250-pmu",
+CLK_OF_DECLARE_DRIVER(exynos3250_clkout, "samsung,exynos3250-pmu",
exynos4_clkout_init);
static void __init exynos5_clkout_init(struct device_node *node)
{
exynos_clkout_init(node, EXYNOS5_CLKOUT_MUX_MASK);
}
-CLK_OF_DECLARE(exynos5250_clkout, "samsung,exynos5250-pmu",
+CLK_OF_DECLARE_DRIVER(exynos5250_clkout, "samsung,exynos5250-pmu",
exynos5_clkout_init);
-CLK_OF_DECLARE(exynos5410_clkout, "samsung,exynos5410-pmu",
+CLK_OF_DECLARE_DRIVER(exynos5410_clkout, "samsung,exynos5410-pmu",
exynos5_clkout_init);
-CLK_OF_DECLARE(exynos5420_clkout, "samsung,exynos5420-pmu",
+CLK_OF_DECLARE_DRIVER(exynos5420_clkout, "samsung,exynos5420-pmu",
exynos5_clkout_init);
-CLK_OF_DECLARE(exynos5433_clkout, "samsung,exynos5433-pmu",
+CLK_OF_DECLARE_DRIVER(exynos5433_clkout, "samsung,exynos5433-pmu",
exynos5_clkout_init);
diff --git a/drivers/clk/sunxi-ng/ccu-sun6i-a31.c b/drivers/clk/sunxi-ng/ccu-sun6i-a31.c
index 79596463e0d9..fc75a335a7ce 100644
--- a/drivers/clk/sunxi-ng/ccu-sun6i-a31.c
+++ b/drivers/clk/sunxi-ng/ccu-sun6i-a31.c
@@ -143,7 +143,7 @@ static SUNXI_CCU_NKM_WITH_MUX_GATE_LOCK(pll_mipi_clk, "pll-mipi",
4, 2, /* K */
0, 4, /* M */
21, 0, /* mux */
- BIT(31), /* gate */
+ BIT(31) | BIT(23) | BIT(22), /* gate */
BIT(28), /* lock */
CLK_SET_RATE_UNGATE);
@@ -191,6 +191,8 @@ static struct clk_div_table axi_div_table[] = {
static SUNXI_CCU_DIV_TABLE(axi_clk, "axi", "cpu",
0x050, 0, 3, axi_div_table, 0);
+#define SUN6I_A31_AHB1_REG 0x054
+
static const char * const ahb1_parents[] = { "osc32k", "osc24M",
"axi", "pll-periph" };
@@ -1230,6 +1232,16 @@ static void __init sun6i_a31_ccu_setup(struct device_node *node)
val &= BIT(16);
writel(val, reg + SUN6I_A31_PLL_MIPI_REG);
+ /* Force AHB1 to PLL6 / 3 */
+ val = readl(reg + SUN6I_A31_AHB1_REG);
+ /* set PLL6 pre-div = 3 */
+ val &= ~GENMASK(7, 6);
+ val |= 0x2 << 6;
+ /* select PLL6 / pre-div */
+ val &= ~GENMASK(13, 12);
+ val |= 0x3 << 12;
+ writel(val, reg + SUN6I_A31_AHB1_REG);
+
sunxi_ccu_probe(node, reg, &sun6i_a31_ccu_desc);
ccu_mux_notifier_register(pll_cpu_clk.common.hw.clk,
diff --git a/drivers/clk/sunxi-ng/ccu-sun8i-a33.c b/drivers/clk/sunxi-ng/ccu-sun8i-a33.c
index 96b40ca57697..9bd1f78a0547 100644
--- a/drivers/clk/sunxi-ng/ccu-sun8i-a33.c
+++ b/drivers/clk/sunxi-ng/ccu-sun8i-a33.c
@@ -131,7 +131,7 @@ static SUNXI_CCU_NKM_WITH_GATE_LOCK(pll_mipi_clk, "pll-mipi",
8, 4, /* N */
4, 2, /* K */
0, 4, /* M */
- BIT(31), /* gate */
+ BIT(31) | BIT(23) | BIT(22), /* gate */
BIT(28), /* lock */
CLK_SET_RATE_UNGATE);
diff --git a/drivers/clk/sunxi/clk-sunxi.c b/drivers/clk/sunxi/clk-sunxi.c
index 838b22aa8b67..f2c9274b8bd5 100644
--- a/drivers/clk/sunxi/clk-sunxi.c
+++ b/drivers/clk/sunxi/clk-sunxi.c
@@ -373,7 +373,7 @@ static void sun4i_get_apb1_factors(struct factors_request *req)
else
calcp = 3;
- calcm = (req->parent_rate >> calcp) - 1;
+ calcm = (div >> calcp) - 1;
req->rate = (req->parent_rate >> calcp) / (calcm + 1);
req->m = calcm;
diff --git a/drivers/crypto/caam/caamalg.c b/drivers/crypto/caam/caamalg.c
index 156aad167cd6..954a64c7757b 100644
--- a/drivers/crypto/caam/caamalg.c
+++ b/drivers/crypto/caam/caamalg.c
@@ -137,7 +137,7 @@ static void dbg_dump_sg(const char *level, const char *prefix_str,
}
buf = it_page + it->offset;
- len = min(tlen, it->length);
+ len = min_t(size_t, tlen, it->length);
print_hex_dump(level, prefix_str, prefix_type, rowsize,
groupsize, buf, len, ascii);
tlen -= len;
@@ -4583,6 +4583,15 @@ static int __init caam_algapi_init(void)
if (!aes_inst && (alg_sel == OP_ALG_ALGSEL_AES))
continue;
+ /*
+ * Check support for AES modes not available
+ * on LP devices.
+ */
+ if ((cha_vid & CHA_ID_LS_AES_MASK) == CHA_ID_LS_AES_LP)
+ if ((alg->class1_alg_type & OP_ALG_AAI_MASK) ==
+ OP_ALG_AAI_XTS)
+ continue;
+
t_alg = caam_alg_alloc(alg);
if (IS_ERR(t_alg)) {
err = PTR_ERR(t_alg);
diff --git a/drivers/dax/dax.c b/drivers/dax/dax.c
index 0e499bfca41c..3d94ff20fdca 100644
--- a/drivers/dax/dax.c
+++ b/drivers/dax/dax.c
@@ -270,8 +270,8 @@ static int check_vma(struct dax_dev *dax_dev, struct vm_area_struct *vma,
if (!dax_dev->alive)
return -ENXIO;
- /* prevent private / writable mappings from being established */
- if ((vma->vm_flags & (VM_NORESERVE|VM_SHARED|VM_WRITE)) == VM_WRITE) {
+ /* prevent private mappings from being established */
+ if ((vma->vm_flags & VM_SHARED) != VM_SHARED) {
dev_info(dev, "%s: %s: fail, attempted private mapping\n",
current->comm, func);
return -EINVAL;
diff --git a/drivers/dax/pmem.c b/drivers/dax/pmem.c
index 4a15fa5df98b..73c6ce93a0d9 100644
--- a/drivers/dax/pmem.c
+++ b/drivers/dax/pmem.c
@@ -78,7 +78,9 @@ static int dax_pmem_probe(struct device *dev)
nsio = to_nd_namespace_io(&ndns->dev);
/* parse the 'pfn' info block via ->rw_bytes */
- devm_nsio_enable(dev, nsio);
+ rc = devm_nsio_enable(dev, nsio);
+ if (rc)
+ return rc;
altmap = nvdimm_setup_pfn(nd_pfn, &res, &__altmap);
if (IS_ERR(altmap))
return PTR_ERR(altmap);
diff --git a/drivers/dma-buf/Kconfig b/drivers/dma-buf/Kconfig
index 2585821b24ab..ed3b785bae37 100644
--- a/drivers/dma-buf/Kconfig
+++ b/drivers/dma-buf/Kconfig
@@ -7,7 +7,7 @@ config SYNC_FILE
select DMA_SHARED_BUFFER
---help---
The Sync File Framework adds explicit syncronization via
- userspace. It enables send/receive 'struct fence' objects to/from
+ userspace. It enables send/receive 'struct dma_fence' objects to/from
userspace via Sync File fds for synchronization between drivers via
userspace components. It has been ported from Android.
diff --git a/drivers/dma-buf/Makefile b/drivers/dma-buf/Makefile
index 210a10bfad2b..c33bf8863147 100644
--- a/drivers/dma-buf/Makefile
+++ b/drivers/dma-buf/Makefile
@@ -1,3 +1,3 @@
-obj-y := dma-buf.o fence.o reservation.o seqno-fence.o fence-array.o
+obj-y := dma-buf.o dma-fence.o dma-fence-array.o reservation.o seqno-fence.o
obj-$(CONFIG_SYNC_FILE) += sync_file.o
obj-$(CONFIG_SW_SYNC) += sw_sync.o sync_debug.o
diff --git a/drivers/dma-buf/dma-buf.c b/drivers/dma-buf/dma-buf.c
index cf04d249a6a4..e72e64484131 100644
--- a/drivers/dma-buf/dma-buf.c
+++ b/drivers/dma-buf/dma-buf.c
@@ -25,7 +25,7 @@
#include <linux/fs.h>
#include <linux/slab.h>
#include <linux/dma-buf.h>
-#include <linux/fence.h>
+#include <linux/dma-fence.h>
#include <linux/anon_inodes.h>
#include <linux/export.h>
#include <linux/debugfs.h>
@@ -124,7 +124,7 @@ static loff_t dma_buf_llseek(struct file *file, loff_t offset, int whence)
return base + offset;
}
-static void dma_buf_poll_cb(struct fence *fence, struct fence_cb *cb)
+static void dma_buf_poll_cb(struct dma_fence *fence, struct dma_fence_cb *cb)
{
struct dma_buf_poll_cb_t *dcb = (struct dma_buf_poll_cb_t *)cb;
unsigned long flags;
@@ -140,7 +140,7 @@ static unsigned int dma_buf_poll(struct file *file, poll_table *poll)
struct dma_buf *dmabuf;
struct reservation_object *resv;
struct reservation_object_list *fobj;
- struct fence *fence_excl;
+ struct dma_fence *fence_excl;
unsigned long events;
unsigned shared_count, seq;
@@ -187,20 +187,20 @@ retry:
spin_unlock_irq(&dmabuf->poll.lock);
if (events & pevents) {
- if (!fence_get_rcu(fence_excl)) {
+ if (!dma_fence_get_rcu(fence_excl)) {
/* force a recheck */
events &= ~pevents;
dma_buf_poll_cb(NULL, &dcb->cb);
- } else if (!fence_add_callback(fence_excl, &dcb->cb,
- dma_buf_poll_cb)) {
+ } else if (!dma_fence_add_callback(fence_excl, &dcb->cb,
+ dma_buf_poll_cb)) {
events &= ~pevents;
- fence_put(fence_excl);
+ dma_fence_put(fence_excl);
} else {
/*
* No callback queued, wake up any additional
* waiters.
*/
- fence_put(fence_excl);
+ dma_fence_put(fence_excl);
dma_buf_poll_cb(NULL, &dcb->cb);
}
}
@@ -222,9 +222,9 @@ retry:
goto out;
for (i = 0; i < shared_count; ++i) {
- struct fence *fence = rcu_dereference(fobj->shared[i]);
+ struct dma_fence *fence = rcu_dereference(fobj->shared[i]);
- if (!fence_get_rcu(fence)) {
+ if (!dma_fence_get_rcu(fence)) {
/*
* fence refcount dropped to zero, this means
* that fobj has been freed
@@ -235,13 +235,13 @@ retry:
dma_buf_poll_cb(NULL, &dcb->cb);
break;
}
- if (!fence_add_callback(fence, &dcb->cb,
- dma_buf_poll_cb)) {
- fence_put(fence);
+ if (!dma_fence_add_callback(fence, &dcb->cb,
+ dma_buf_poll_cb)) {
+ dma_fence_put(fence);
events &= ~POLLOUT;
break;
}
- fence_put(fence);
+ dma_fence_put(fence);
}
/* No callback queued, wake up any additional waiters. */
diff --git a/drivers/dma-buf/fence-array.c b/drivers/dma-buf/dma-fence-array.c
index f1989fcaf354..67eb7c8fb88c 100644
--- a/drivers/dma-buf/fence-array.c
+++ b/drivers/dma-buf/dma-fence-array.c
@@ -1,5 +1,5 @@
/*
- * fence-array: aggregate fences to be waited together
+ * dma-fence-array: aggregate fences to be waited together
*
* Copyright (C) 2016 Collabora Ltd
* Copyright (C) 2016 Advanced Micro Devices, Inc.
@@ -19,35 +19,34 @@
#include <linux/export.h>
#include <linux/slab.h>
-#include <linux/fence-array.h>
+#include <linux/dma-fence-array.h>
-static void fence_array_cb_func(struct fence *f, struct fence_cb *cb);
-
-static const char *fence_array_get_driver_name(struct fence *fence)
+static const char *dma_fence_array_get_driver_name(struct dma_fence *fence)
{
- return "fence_array";
+ return "dma_fence_array";
}
-static const char *fence_array_get_timeline_name(struct fence *fence)
+static const char *dma_fence_array_get_timeline_name(struct dma_fence *fence)
{
return "unbound";
}
-static void fence_array_cb_func(struct fence *f, struct fence_cb *cb)
+static void dma_fence_array_cb_func(struct dma_fence *f,
+ struct dma_fence_cb *cb)
{
- struct fence_array_cb *array_cb =
- container_of(cb, struct fence_array_cb, cb);
- struct fence_array *array = array_cb->array;
+ struct dma_fence_array_cb *array_cb =
+ container_of(cb, struct dma_fence_array_cb, cb);
+ struct dma_fence_array *array = array_cb->array;
if (atomic_dec_and_test(&array->num_pending))
- fence_signal(&array->base);
- fence_put(&array->base);
+ dma_fence_signal(&array->base);
+ dma_fence_put(&array->base);
}
-static bool fence_array_enable_signaling(struct fence *fence)
+static bool dma_fence_array_enable_signaling(struct dma_fence *fence)
{
- struct fence_array *array = to_fence_array(fence);
- struct fence_array_cb *cb = (void *)(&array[1]);
+ struct dma_fence_array *array = to_dma_fence_array(fence);
+ struct dma_fence_array_cb *cb = (void *)(&array[1]);
unsigned i;
for (i = 0; i < array->num_fences; ++i) {
@@ -60,10 +59,10 @@ static bool fence_array_enable_signaling(struct fence *fence)
* until we signal the array as complete (but that is now
* insufficient).
*/
- fence_get(&array->base);
- if (fence_add_callback(array->fences[i], &cb[i].cb,
- fence_array_cb_func)) {
- fence_put(&array->base);
+ dma_fence_get(&array->base);
+ if (dma_fence_add_callback(array->fences[i], &cb[i].cb,
+ dma_fence_array_cb_func)) {
+ dma_fence_put(&array->base);
if (atomic_dec_and_test(&array->num_pending))
return false;
}
@@ -72,69 +71,71 @@ static bool fence_array_enable_signaling(struct fence *fence)
return true;
}
-static bool fence_array_signaled(struct fence *fence)
+static bool dma_fence_array_signaled(struct dma_fence *fence)
{
- struct fence_array *array = to_fence_array(fence);
+ struct dma_fence_array *array = to_dma_fence_array(fence);
return atomic_read(&array->num_pending) <= 0;
}
-static void fence_array_release(struct fence *fence)
+static void dma_fence_array_release(struct dma_fence *fence)
{
- struct fence_array *array = to_fence_array(fence);
+ struct dma_fence_array *array = to_dma_fence_array(fence);
unsigned i;
for (i = 0; i < array->num_fences; ++i)
- fence_put(array->fences[i]);
+ dma_fence_put(array->fences[i]);
kfree(array->fences);
- fence_free(fence);
+ dma_fence_free(fence);
}
-const struct fence_ops fence_array_ops = {
- .get_driver_name = fence_array_get_driver_name,
- .get_timeline_name = fence_array_get_timeline_name,
- .enable_signaling = fence_array_enable_signaling,
- .signaled = fence_array_signaled,
- .wait = fence_default_wait,
- .release = fence_array_release,
+const struct dma_fence_ops dma_fence_array_ops = {
+ .get_driver_name = dma_fence_array_get_driver_name,
+ .get_timeline_name = dma_fence_array_get_timeline_name,
+ .enable_signaling = dma_fence_array_enable_signaling,
+ .signaled = dma_fence_array_signaled,
+ .wait = dma_fence_default_wait,
+ .release = dma_fence_array_release,
};
-EXPORT_SYMBOL(fence_array_ops);
+EXPORT_SYMBOL(dma_fence_array_ops);
/**
- * fence_array_create - Create a custom fence array
+ * dma_fence_array_create - Create a custom fence array
* @num_fences: [in] number of fences to add in the array
* @fences: [in] array containing the fences
* @context: [in] fence context to use
* @seqno: [in] sequence number to use
* @signal_on_any: [in] signal on any fence in the array
*
- * Allocate a fence_array object and initialize the base fence with fence_init().
+ * Allocate a dma_fence_array object and initialize the base fence with
+ * dma_fence_init().
* In case of error it returns NULL.
*
* The caller should allocate the fences array with num_fences size
* and fill it with the fences it wants to add to the object. Ownership of this
- * array is taken and fence_put() is used on each fence on release.
+ * array is taken and dma_fence_put() is used on each fence on release.
*
* If @signal_on_any is true the fence array signals if any fence in the array
* signals, otherwise it signals when all fences in the array signal.
*/
-struct fence_array *fence_array_create(int num_fences, struct fence **fences,
- u64 context, unsigned seqno,
- bool signal_on_any)
+struct dma_fence_array *dma_fence_array_create(int num_fences,
+ struct dma_fence **fences,
+ u64 context, unsigned seqno,
+ bool signal_on_any)
{
- struct fence_array *array;
+ struct dma_fence_array *array;
size_t size = sizeof(*array);
/* Allocate the callback structures behind the array. */
- size += num_fences * sizeof(struct fence_array_cb);
+ size += num_fences * sizeof(struct dma_fence_array_cb);
array = kzalloc(size, GFP_KERNEL);
if (!array)
return NULL;
spin_lock_init(&array->lock);
- fence_init(&array->base, &fence_array_ops, &array->lock,
- context, seqno);
+ dma_fence_init(&array->base, &dma_fence_array_ops, &array->lock,
+ context, seqno);
array->num_fences = num_fences;
atomic_set(&array->num_pending, signal_on_any ? 1 : num_fences);
@@ -142,4 +143,4 @@ struct fence_array *fence_array_create(int num_fences, struct fence **fences,
return array;
}
-EXPORT_SYMBOL(fence_array_create);
+EXPORT_SYMBOL(dma_fence_array_create);
diff --git a/drivers/dma-buf/fence.c b/drivers/dma-buf/dma-fence.c
index 4d51f9e83fa8..0212af7997d9 100644
--- a/drivers/dma-buf/fence.c
+++ b/drivers/dma-buf/dma-fence.c
@@ -21,13 +21,13 @@
#include <linux/slab.h>
#include <linux/export.h>
#include <linux/atomic.h>
-#include <linux/fence.h>
+#include <linux/dma-fence.h>
#define CREATE_TRACE_POINTS
-#include <trace/events/fence.h>
+#include <trace/events/dma_fence.h>
-EXPORT_TRACEPOINT_SYMBOL(fence_annotate_wait_on);
-EXPORT_TRACEPOINT_SYMBOL(fence_emit);
+EXPORT_TRACEPOINT_SYMBOL(dma_fence_annotate_wait_on);
+EXPORT_TRACEPOINT_SYMBOL(dma_fence_emit);
/*
* fence context counter: each execution context should have its own
@@ -35,39 +35,41 @@ EXPORT_TRACEPOINT_SYMBOL(fence_emit);
* context or not. One device can have multiple separate contexts,
* and they're used if some engine can run independently of another.
*/
-static atomic64_t fence_context_counter = ATOMIC64_INIT(0);
+static atomic64_t dma_fence_context_counter = ATOMIC64_INIT(0);
/**
- * fence_context_alloc - allocate an array of fence contexts
+ * dma_fence_context_alloc - allocate an array of fence contexts
* @num: [in] amount of contexts to allocate
*
* This function will return the first index of the number of fences allocated.
* The fence context is used for setting fence->context to a unique number.
*/
-u64 fence_context_alloc(unsigned num)
+u64 dma_fence_context_alloc(unsigned num)
{
BUG_ON(!num);
- return atomic64_add_return(num, &fence_context_counter) - num;
+ return atomic64_add_return(num, &dma_fence_context_counter) - num;
}
-EXPORT_SYMBOL(fence_context_alloc);
+EXPORT_SYMBOL(dma_fence_context_alloc);
/**
- * fence_signal_locked - signal completion of a fence
+ * dma_fence_signal_locked - signal completion of a fence
* @fence: the fence to signal
*
* Signal completion for software callbacks on a fence, this will unblock
- * fence_wait() calls and run all the callbacks added with
- * fence_add_callback(). Can be called multiple times, but since a fence
+ * dma_fence_wait() calls and run all the callbacks added with
+ * dma_fence_add_callback(). Can be called multiple times, but since a fence
* can only go from unsignaled to signaled state, it will only be effective
* the first time.
*
- * Unlike fence_signal, this function must be called with fence->lock held.
+ * Unlike dma_fence_signal, this function must be called with fence->lock held.
*/
-int fence_signal_locked(struct fence *fence)
+int dma_fence_signal_locked(struct dma_fence *fence)
{
- struct fence_cb *cur, *tmp;
+ struct dma_fence_cb *cur, *tmp;
int ret = 0;
+ lockdep_assert_held(fence->lock);
+
if (WARN_ON(!fence))
return -EINVAL;
@@ -76,15 +78,15 @@ int fence_signal_locked(struct fence *fence)
smp_mb__before_atomic();
}
- if (test_and_set_bit(FENCE_FLAG_SIGNALED_BIT, &fence->flags)) {
+ if (test_and_set_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) {
ret = -EINVAL;
/*
- * we might have raced with the unlocked fence_signal,
+ * we might have raced with the unlocked dma_fence_signal,
* still run through all callbacks
*/
} else
- trace_fence_signaled(fence);
+ trace_dma_fence_signaled(fence);
list_for_each_entry_safe(cur, tmp, &fence->cb_list, node) {
list_del_init(&cur->node);
@@ -92,19 +94,19 @@ int fence_signal_locked(struct fence *fence)
}
return ret;
}
-EXPORT_SYMBOL(fence_signal_locked);
+EXPORT_SYMBOL(dma_fence_signal_locked);
/**
- * fence_signal - signal completion of a fence
+ * dma_fence_signal - signal completion of a fence
* @fence: the fence to signal
*
* Signal completion for software callbacks on a fence, this will unblock
- * fence_wait() calls and run all the callbacks added with
- * fence_add_callback(). Can be called multiple times, but since a fence
+ * dma_fence_wait() calls and run all the callbacks added with
+ * dma_fence_add_callback(). Can be called multiple times, but since a fence
* can only go from unsignaled to signaled state, it will only be effective
* the first time.
*/
-int fence_signal(struct fence *fence)
+int dma_fence_signal(struct dma_fence *fence)
{
unsigned long flags;
@@ -116,13 +118,13 @@ int fence_signal(struct fence *fence)
smp_mb__before_atomic();
}
- if (test_and_set_bit(FENCE_FLAG_SIGNALED_BIT, &fence->flags))
+ if (test_and_set_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
return -EINVAL;
- trace_fence_signaled(fence);
+ trace_dma_fence_signaled(fence);
- if (test_bit(FENCE_FLAG_ENABLE_SIGNAL_BIT, &fence->flags)) {
- struct fence_cb *cur, *tmp;
+ if (test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &fence->flags)) {
+ struct dma_fence_cb *cur, *tmp;
spin_lock_irqsave(fence->lock, flags);
list_for_each_entry_safe(cur, tmp, &fence->cb_list, node) {
@@ -133,10 +135,10 @@ int fence_signal(struct fence *fence)
}
return 0;
}
-EXPORT_SYMBOL(fence_signal);
+EXPORT_SYMBOL(dma_fence_signal);
/**
- * fence_wait_timeout - sleep until the fence gets signaled
+ * dma_fence_wait_timeout - sleep until the fence gets signaled
* or until timeout elapses
* @fence: [in] the fence to wait on
* @intr: [in] if true, do an interruptible wait
@@ -152,78 +154,76 @@ EXPORT_SYMBOL(fence_signal);
* freed before return, resulting in undefined behavior.
*/
signed long
-fence_wait_timeout(struct fence *fence, bool intr, signed long timeout)
+dma_fence_wait_timeout(struct dma_fence *fence, bool intr, signed long timeout)
{
signed long ret;
if (WARN_ON(timeout < 0))
return -EINVAL;
- if (timeout == 0)
- return fence_is_signaled(fence);
-
- trace_fence_wait_start(fence);
+ trace_dma_fence_wait_start(fence);
ret = fence->ops->wait(fence, intr, timeout);
- trace_fence_wait_end(fence);
+ trace_dma_fence_wait_end(fence);
return ret;
}
-EXPORT_SYMBOL(fence_wait_timeout);
+EXPORT_SYMBOL(dma_fence_wait_timeout);
-void fence_release(struct kref *kref)
+void dma_fence_release(struct kref *kref)
{
- struct fence *fence =
- container_of(kref, struct fence, refcount);
+ struct dma_fence *fence =
+ container_of(kref, struct dma_fence, refcount);
- trace_fence_destroy(fence);
+ trace_dma_fence_destroy(fence);
BUG_ON(!list_empty(&fence->cb_list));
if (fence->ops->release)
fence->ops->release(fence);
else
- fence_free(fence);
+ dma_fence_free(fence);
}
-EXPORT_SYMBOL(fence_release);
+EXPORT_SYMBOL(dma_fence_release);
-void fence_free(struct fence *fence)
+void dma_fence_free(struct dma_fence *fence)
{
kfree_rcu(fence, rcu);
}
-EXPORT_SYMBOL(fence_free);
+EXPORT_SYMBOL(dma_fence_free);
/**
- * fence_enable_sw_signaling - enable signaling on fence
+ * dma_fence_enable_sw_signaling - enable signaling on fence
* @fence: [in] the fence to enable
*
* this will request for sw signaling to be enabled, to make the fence
* complete as soon as possible
*/
-void fence_enable_sw_signaling(struct fence *fence)
+void dma_fence_enable_sw_signaling(struct dma_fence *fence)
{
unsigned long flags;
- if (!test_and_set_bit(FENCE_FLAG_ENABLE_SIGNAL_BIT, &fence->flags) &&
- !test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->flags)) {
- trace_fence_enable_signal(fence);
+ if (!test_and_set_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT,
+ &fence->flags) &&
+ !test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) {
+ trace_dma_fence_enable_signal(fence);
spin_lock_irqsave(fence->lock, flags);
if (!fence->ops->enable_signaling(fence))
- fence_signal_locked(fence);
+ dma_fence_signal_locked(fence);
spin_unlock_irqrestore(fence->lock, flags);
}
}
-EXPORT_SYMBOL(fence_enable_sw_signaling);
+EXPORT_SYMBOL(dma_fence_enable_sw_signaling);
/**
- * fence_add_callback - add a callback to be called when the fence
+ * dma_fence_add_callback - add a callback to be called when the fence
* is signaled
* @fence: [in] the fence to wait on
* @cb: [in] the callback to register
* @func: [in] the function to call
*
- * cb will be initialized by fence_add_callback, no initialization
+ * cb will be initialized by dma_fence_add_callback, no initialization
* by the caller is required. Any number of callbacks can be registered
* to a fence, but a callback can only be registered to one fence at a time.
*
@@ -232,15 +232,15 @@ EXPORT_SYMBOL(fence_enable_sw_signaling);
* *not* call the callback)
*
* Add a software callback to the fence. Same restrictions apply to
- * refcount as it does to fence_wait, however the caller doesn't need to
+ * refcount as it does to dma_fence_wait, however the caller doesn't need to
* keep a refcount to fence afterwards: when software access is enabled,
* the creator of the fence is required to keep the fence alive until
- * after it signals with fence_signal. The callback itself can be called
+ * after it signals with dma_fence_signal. The callback itself can be called
* from irq context.
*
*/
-int fence_add_callback(struct fence *fence, struct fence_cb *cb,
- fence_func_t func)
+int dma_fence_add_callback(struct dma_fence *fence, struct dma_fence_cb *cb,
+ dma_fence_func_t func)
{
unsigned long flags;
int ret = 0;
@@ -249,22 +249,23 @@ int fence_add_callback(struct fence *fence, struct fence_cb *cb,
if (WARN_ON(!fence || !func))
return -EINVAL;
- if (test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->flags)) {
+ if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) {
INIT_LIST_HEAD(&cb->node);
return -ENOENT;
}
spin_lock_irqsave(fence->lock, flags);
- was_set = test_and_set_bit(FENCE_FLAG_ENABLE_SIGNAL_BIT, &fence->flags);
+ was_set = test_and_set_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT,
+ &fence->flags);
- if (test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->flags))
+ if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
ret = -ENOENT;
else if (!was_set) {
- trace_fence_enable_signal(fence);
+ trace_dma_fence_enable_signal(fence);
if (!fence->ops->enable_signaling(fence)) {
- fence_signal_locked(fence);
+ dma_fence_signal_locked(fence);
ret = -ENOENT;
}
}
@@ -278,10 +279,10 @@ int fence_add_callback(struct fence *fence, struct fence_cb *cb,
return ret;
}
-EXPORT_SYMBOL(fence_add_callback);
+EXPORT_SYMBOL(dma_fence_add_callback);
/**
- * fence_remove_callback - remove a callback from the signaling list
+ * dma_fence_remove_callback - remove a callback from the signaling list
* @fence: [in] the fence to wait on
* @cb: [in] the callback to remove
*
@@ -296,7 +297,7 @@ EXPORT_SYMBOL(fence_add_callback);
* with a reference held to the fence.
*/
bool
-fence_remove_callback(struct fence *fence, struct fence_cb *cb)
+dma_fence_remove_callback(struct dma_fence *fence, struct dma_fence_cb *cb)
{
unsigned long flags;
bool ret;
@@ -311,15 +312,15 @@ fence_remove_callback(struct fence *fence, struct fence_cb *cb)
return ret;
}
-EXPORT_SYMBOL(fence_remove_callback);
+EXPORT_SYMBOL(dma_fence_remove_callback);
struct default_wait_cb {
- struct fence_cb base;
+ struct dma_fence_cb base;
struct task_struct *task;
};
static void
-fence_default_wait_cb(struct fence *fence, struct fence_cb *cb)
+dma_fence_default_wait_cb(struct dma_fence *fence, struct dma_fence_cb *cb)
{
struct default_wait_cb *wait =
container_of(cb, struct default_wait_cb, base);
@@ -328,25 +329,27 @@ fence_default_wait_cb(struct fence *fence, struct fence_cb *cb)
}
/**
- * fence_default_wait - default sleep until the fence gets signaled
+ * dma_fence_default_wait - default sleep until the fence gets signaled
* or until timeout elapses
* @fence: [in] the fence to wait on
* @intr: [in] if true, do an interruptible wait
* @timeout: [in] timeout value in jiffies, or MAX_SCHEDULE_TIMEOUT
*
* Returns -ERESTARTSYS if interrupted, 0 if the wait timed out, or the
- * remaining timeout in jiffies on success.
+ * remaining timeout in jiffies on success. If timeout is zero the value one is
+ * returned if the fence is already signaled for consistency with other
+ * functions taking a jiffies timeout.
*/
signed long
-fence_default_wait(struct fence *fence, bool intr, signed long timeout)
+dma_fence_default_wait(struct dma_fence *fence, bool intr, signed long timeout)
{
struct default_wait_cb cb;
unsigned long flags;
- signed long ret = timeout;
+ signed long ret = timeout ? timeout : 1;
bool was_set;
- if (test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->flags))
- return timeout;
+ if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
+ return ret;
spin_lock_irqsave(fence->lock, flags);
@@ -355,25 +358,26 @@ fence_default_wait(struct fence *fence, bool intr, signed long timeout)
goto out;
}
- was_set = test_and_set_bit(FENCE_FLAG_ENABLE_SIGNAL_BIT, &fence->flags);
+ was_set = test_and_set_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT,
+ &fence->flags);
- if (test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->flags))
+ if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
goto out;
if (!was_set) {
- trace_fence_enable_signal(fence);
+ trace_dma_fence_enable_signal(fence);
if (!fence->ops->enable_signaling(fence)) {
- fence_signal_locked(fence);
+ dma_fence_signal_locked(fence);
goto out;
}
}
- cb.base.func = fence_default_wait_cb;
+ cb.base.func = dma_fence_default_wait_cb;
cb.task = current;
list_add(&cb.base.node, &fence->cb_list);
- while (!test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->flags) && ret > 0) {
+ while (!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags) && ret > 0) {
if (intr)
__set_current_state(TASK_INTERRUPTIBLE);
else
@@ -395,28 +399,34 @@ out:
spin_unlock_irqrestore(fence->lock, flags);
return ret;
}
-EXPORT_SYMBOL(fence_default_wait);
+EXPORT_SYMBOL(dma_fence_default_wait);
static bool
-fence_test_signaled_any(struct fence **fences, uint32_t count)
+dma_fence_test_signaled_any(struct dma_fence **fences, uint32_t count,
+ uint32_t *idx)
{
int i;
for (i = 0; i < count; ++i) {
- struct fence *fence = fences[i];
- if (test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->flags))
+ struct dma_fence *fence = fences[i];
+ if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) {
+ if (idx)
+ *idx = i;
return true;
+ }
}
return false;
}
/**
- * fence_wait_any_timeout - sleep until any fence gets signaled
+ * dma_fence_wait_any_timeout - sleep until any fence gets signaled
* or until timeout elapses
* @fences: [in] array of fences to wait on
* @count: [in] number of fences to wait on
* @intr: [in] if true, do an interruptible wait
* @timeout: [in] timeout value in jiffies, or MAX_SCHEDULE_TIMEOUT
+ * @idx: [out] the first signaled fence index, meaningful only on
+ * positive return
*
* Returns -EINVAL on custom fence wait implementation, -ERESTARTSYS if
* interrupted, 0 if the wait timed out, or the remaining timeout in jiffies
@@ -427,8 +437,8 @@ fence_test_signaled_any(struct fence **fences, uint32_t count)
* fence might be freed before return, resulting in undefined behavior.
*/
signed long
-fence_wait_any_timeout(struct fence **fences, uint32_t count,
- bool intr, signed long timeout)
+dma_fence_wait_any_timeout(struct dma_fence **fences, uint32_t count,
+ bool intr, signed long timeout, uint32_t *idx)
{
struct default_wait_cb *cb;
signed long ret = timeout;
@@ -439,8 +449,11 @@ fence_wait_any_timeout(struct fence **fences, uint32_t count,
if (timeout == 0) {
for (i = 0; i < count; ++i)
- if (fence_is_signaled(fences[i]))
+ if (dma_fence_is_signaled(fences[i])) {
+ if (idx)
+ *idx = i;
return 1;
+ }
return 0;
}
@@ -452,17 +465,19 @@ fence_wait_any_timeout(struct fence **fences, uint32_t count,
}
for (i = 0; i < count; ++i) {
- struct fence *fence = fences[i];
+ struct dma_fence *fence = fences[i];
- if (fence->ops->wait != fence_default_wait) {
+ if (fence->ops->wait != dma_fence_default_wait) {
ret = -EINVAL;
goto fence_rm_cb;
}
cb[i].task = current;
- if (fence_add_callback(fence, &cb[i].base,
- fence_default_wait_cb)) {
+ if (dma_fence_add_callback(fence, &cb[i].base,
+ dma_fence_default_wait_cb)) {
/* This fence is already signaled */
+ if (idx)
+ *idx = i;
goto fence_rm_cb;
}
}
@@ -473,7 +488,7 @@ fence_wait_any_timeout(struct fence **fences, uint32_t count,
else
set_current_state(TASK_UNINTERRUPTIBLE);
- if (fence_test_signaled_any(fences, count))
+ if (dma_fence_test_signaled_any(fences, count, idx))
break;
ret = schedule_timeout(ret);
@@ -486,34 +501,34 @@ fence_wait_any_timeout(struct fence **fences, uint32_t count,
fence_rm_cb:
while (i-- > 0)
- fence_remove_callback(fences[i], &cb[i].base);
+ dma_fence_remove_callback(fences[i], &cb[i].base);
err_free_cb:
kfree(cb);
return ret;
}
-EXPORT_SYMBOL(fence_wait_any_timeout);
+EXPORT_SYMBOL(dma_fence_wait_any_timeout);
/**
- * fence_init - Initialize a custom fence.
+ * dma_fence_init - Initialize a custom fence.
* @fence: [in] the fence to initialize
- * @ops: [in] the fence_ops for operations on this fence
+ * @ops: [in] the dma_fence_ops for operations on this fence
* @lock: [in] the irqsafe spinlock to use for locking this fence
* @context: [in] the execution context this fence is run on
* @seqno: [in] a linear increasing sequence number for this context
*
* Initializes an allocated fence, the caller doesn't have to keep its
* refcount after committing with this fence, but it will need to hold a
- * refcount again if fence_ops.enable_signaling gets called. This can
+ * refcount again if dma_fence_ops.enable_signaling gets called. This can
* be used for other implementing other types of fence.
*
* context and seqno are used for easy comparison between fences, allowing
- * to check which fence is later by simply using fence_later.
+ * to check which fence is later by simply using dma_fence_later.
*/
void
-fence_init(struct fence *fence, const struct fence_ops *ops,
- spinlock_t *lock, u64 context, unsigned seqno)
+dma_fence_init(struct dma_fence *fence, const struct dma_fence_ops *ops,
+ spinlock_t *lock, u64 context, unsigned seqno)
{
BUG_ON(!lock);
BUG_ON(!ops || !ops->wait || !ops->enable_signaling ||
@@ -527,6 +542,6 @@ fence_init(struct fence *fence, const struct fence_ops *ops,
fence->seqno = seqno;
fence->flags = 0UL;
- trace_fence_init(fence);
+ trace_dma_fence_init(fence);
}
-EXPORT_SYMBOL(fence_init);
+EXPORT_SYMBOL(dma_fence_init);
diff --git a/drivers/dma-buf/reservation.c b/drivers/dma-buf/reservation.c
index 723d8af988e5..393817e849ed 100644
--- a/drivers/dma-buf/reservation.c
+++ b/drivers/dma-buf/reservation.c
@@ -102,17 +102,17 @@ EXPORT_SYMBOL(reservation_object_reserve_shared);
static void
reservation_object_add_shared_inplace(struct reservation_object *obj,
struct reservation_object_list *fobj,
- struct fence *fence)
+ struct dma_fence *fence)
{
u32 i;
- fence_get(fence);
+ dma_fence_get(fence);
preempt_disable();
write_seqcount_begin(&obj->seq);
for (i = 0; i < fobj->shared_count; ++i) {
- struct fence *old_fence;
+ struct dma_fence *old_fence;
old_fence = rcu_dereference_protected(fobj->shared[i],
reservation_object_held(obj));
@@ -123,7 +123,7 @@ reservation_object_add_shared_inplace(struct reservation_object *obj,
write_seqcount_end(&obj->seq);
preempt_enable();
- fence_put(old_fence);
+ dma_fence_put(old_fence);
return;
}
}
@@ -143,12 +143,12 @@ static void
reservation_object_add_shared_replace(struct reservation_object *obj,
struct reservation_object_list *old,
struct reservation_object_list *fobj,
- struct fence *fence)
+ struct dma_fence *fence)
{
unsigned i;
- struct fence *old_fence = NULL;
+ struct dma_fence *old_fence = NULL;
- fence_get(fence);
+ dma_fence_get(fence);
if (!old) {
RCU_INIT_POINTER(fobj->shared[0], fence);
@@ -165,7 +165,7 @@ reservation_object_add_shared_replace(struct reservation_object *obj,
fobj->shared_count = old->shared_count;
for (i = 0; i < old->shared_count; ++i) {
- struct fence *check;
+ struct dma_fence *check;
check = rcu_dereference_protected(old->shared[i],
reservation_object_held(obj));
@@ -196,7 +196,7 @@ done:
kfree_rcu(old, rcu);
if (old_fence)
- fence_put(old_fence);
+ dma_fence_put(old_fence);
}
/**
@@ -208,7 +208,7 @@ done:
* reservation_object_reserve_shared() has been called.
*/
void reservation_object_add_shared_fence(struct reservation_object *obj,
- struct fence *fence)
+ struct dma_fence *fence)
{
struct reservation_object_list *old, *fobj = obj->staged;
@@ -231,9 +231,9 @@ EXPORT_SYMBOL(reservation_object_add_shared_fence);
* Add a fence to the exclusive slot. The obj->lock must be held.
*/
void reservation_object_add_excl_fence(struct reservation_object *obj,
- struct fence *fence)
+ struct dma_fence *fence)
{
- struct fence *old_fence = reservation_object_get_excl(obj);
+ struct dma_fence *old_fence = reservation_object_get_excl(obj);
struct reservation_object_list *old;
u32 i = 0;
@@ -242,7 +242,7 @@ void reservation_object_add_excl_fence(struct reservation_object *obj,
i = old->shared_count;
if (fence)
- fence_get(fence);
+ dma_fence_get(fence);
preempt_disable();
write_seqcount_begin(&obj->seq);
@@ -255,11 +255,11 @@ void reservation_object_add_excl_fence(struct reservation_object *obj,
/* inplace update, no shared fences */
while (i--)
- fence_put(rcu_dereference_protected(old->shared[i],
+ dma_fence_put(rcu_dereference_protected(old->shared[i],
reservation_object_held(obj)));
if (old_fence)
- fence_put(old_fence);
+ dma_fence_put(old_fence);
}
EXPORT_SYMBOL(reservation_object_add_excl_fence);
@@ -276,26 +276,32 @@ EXPORT_SYMBOL(reservation_object_add_excl_fence);
* Zero or -errno
*/
int reservation_object_get_fences_rcu(struct reservation_object *obj,
- struct fence **pfence_excl,
+ struct dma_fence **pfence_excl,
unsigned *pshared_count,
- struct fence ***pshared)
+ struct dma_fence ***pshared)
{
- unsigned shared_count = 0;
- unsigned retry = 1;
- struct fence **shared = NULL, *fence_excl = NULL;
- int ret = 0;
+ struct dma_fence **shared = NULL;
+ struct dma_fence *fence_excl;
+ unsigned int shared_count;
+ int ret = 1;
- while (retry) {
+ do {
struct reservation_object_list *fobj;
unsigned seq;
+ unsigned int i;
- seq = read_seqcount_begin(&obj->seq);
+ shared_count = i = 0;
rcu_read_lock();
+ seq = read_seqcount_begin(&obj->seq);
+
+ fence_excl = rcu_dereference(obj->fence_excl);
+ if (fence_excl && !dma_fence_get_rcu(fence_excl))
+ goto unlock;
fobj = rcu_dereference(obj->fence);
if (fobj) {
- struct fence **nshared;
+ struct dma_fence **nshared;
size_t sz = sizeof(*shared) * fobj->shared_max;
nshared = krealloc(shared, sz,
@@ -309,52 +315,37 @@ int reservation_object_get_fences_rcu(struct reservation_object *obj,
}
ret = -ENOMEM;
- shared_count = 0;
break;
}
shared = nshared;
- memcpy(shared, fobj->shared, sz);
shared_count = fobj->shared_count;
- } else
- shared_count = 0;
- fence_excl = rcu_dereference(obj->fence_excl);
-
- retry = read_seqcount_retry(&obj->seq, seq);
- if (retry)
- goto unlock;
-
- if (!fence_excl || fence_get_rcu(fence_excl)) {
- unsigned i;
for (i = 0; i < shared_count; ++i) {
- if (fence_get_rcu(shared[i]))
- continue;
-
- /* uh oh, refcount failed, abort and retry */
- while (i--)
- fence_put(shared[i]);
-
- if (fence_excl) {
- fence_put(fence_excl);
- fence_excl = NULL;
- }
-
- retry = 1;
- break;
+ shared[i] = rcu_dereference(fobj->shared[i]);
+ if (!dma_fence_get_rcu(shared[i]))
+ break;
}
- } else
- retry = 1;
+ }
+ if (i != shared_count || read_seqcount_retry(&obj->seq, seq)) {
+ while (i--)
+ dma_fence_put(shared[i]);
+ dma_fence_put(fence_excl);
+ goto unlock;
+ }
+
+ ret = 0;
unlock:
rcu_read_unlock();
- }
- *pshared_count = shared_count;
- if (shared_count)
- *pshared = shared;
- else {
- *pshared = NULL;
+ } while (ret);
+
+ if (!shared_count) {
kfree(shared);
+ shared = NULL;
}
+
+ *pshared_count = shared_count;
+ *pshared = shared;
*pfence_excl = fence_excl;
return ret;
@@ -377,12 +368,9 @@ long reservation_object_wait_timeout_rcu(struct reservation_object *obj,
bool wait_all, bool intr,
unsigned long timeout)
{
- struct fence *fence;
+ struct dma_fence *fence;
unsigned seq, shared_count, i = 0;
- long ret = timeout;
-
- if (!timeout)
- return reservation_object_test_signaled_rcu(obj, wait_all);
+ long ret = timeout ? timeout : 1;
retry:
fence = NULL;
@@ -397,20 +385,18 @@ retry:
if (fobj)
shared_count = fobj->shared_count;
- if (read_seqcount_retry(&obj->seq, seq))
- goto unlock_retry;
-
for (i = 0; i < shared_count; ++i) {
- struct fence *lfence = rcu_dereference(fobj->shared[i]);
+ struct dma_fence *lfence = rcu_dereference(fobj->shared[i]);
- if (test_bit(FENCE_FLAG_SIGNALED_BIT, &lfence->flags))
+ if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT,
+ &lfence->flags))
continue;
- if (!fence_get_rcu(lfence))
+ if (!dma_fence_get_rcu(lfence))
goto unlock_retry;
- if (fence_is_signaled(lfence)) {
- fence_put(lfence);
+ if (dma_fence_is_signaled(lfence)) {
+ dma_fence_put(lfence);
continue;
}
@@ -420,18 +406,16 @@ retry:
}
if (!shared_count) {
- struct fence *fence_excl = rcu_dereference(obj->fence_excl);
-
- if (read_seqcount_retry(&obj->seq, seq))
- goto unlock_retry;
+ struct dma_fence *fence_excl = rcu_dereference(obj->fence_excl);
if (fence_excl &&
- !test_bit(FENCE_FLAG_SIGNALED_BIT, &fence_excl->flags)) {
- if (!fence_get_rcu(fence_excl))
+ !test_bit(DMA_FENCE_FLAG_SIGNALED_BIT,
+ &fence_excl->flags)) {
+ if (!dma_fence_get_rcu(fence_excl))
goto unlock_retry;
- if (fence_is_signaled(fence_excl))
- fence_put(fence_excl);
+ if (dma_fence_is_signaled(fence_excl))
+ dma_fence_put(fence_excl);
else
fence = fence_excl;
}
@@ -439,8 +423,13 @@ retry:
rcu_read_unlock();
if (fence) {
- ret = fence_wait_timeout(fence, intr, ret);
- fence_put(fence);
+ if (read_seqcount_retry(&obj->seq, seq)) {
+ dma_fence_put(fence);
+ goto retry;
+ }
+
+ ret = dma_fence_wait_timeout(fence, intr, ret);
+ dma_fence_put(fence);
if (ret > 0 && wait_all && (i + 1 < shared_count))
goto retry;
}
@@ -454,18 +443,18 @@ EXPORT_SYMBOL_GPL(reservation_object_wait_timeout_rcu);
static inline int
-reservation_object_test_signaled_single(struct fence *passed_fence)
+reservation_object_test_signaled_single(struct dma_fence *passed_fence)
{
- struct fence *fence, *lfence = passed_fence;
+ struct dma_fence *fence, *lfence = passed_fence;
int ret = 1;
- if (!test_bit(FENCE_FLAG_SIGNALED_BIT, &lfence->flags)) {
- fence = fence_get_rcu(lfence);
+ if (!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &lfence->flags)) {
+ fence = dma_fence_get_rcu(lfence);
if (!fence)
return -1;
- ret = !!fence_is_signaled(fence);
- fence_put(fence);
+ ret = !!dma_fence_is_signaled(fence);
+ dma_fence_put(fence);
}
return ret;
}
@@ -484,12 +473,13 @@ bool reservation_object_test_signaled_rcu(struct reservation_object *obj,
bool test_all)
{
unsigned seq, shared_count;
- int ret = true;
+ int ret;
+ rcu_read_lock();
retry:
+ ret = true;
shared_count = 0;
seq = read_seqcount_begin(&obj->seq);
- rcu_read_lock();
if (test_all) {
unsigned i;
@@ -500,46 +490,35 @@ retry:
if (fobj)
shared_count = fobj->shared_count;
- if (read_seqcount_retry(&obj->seq, seq))
- goto unlock_retry;
-
for (i = 0; i < shared_count; ++i) {
- struct fence *fence = rcu_dereference(fobj->shared[i]);
+ struct dma_fence *fence = rcu_dereference(fobj->shared[i]);
ret = reservation_object_test_signaled_single(fence);
if (ret < 0)
- goto unlock_retry;
+ goto retry;
else if (!ret)
break;
}
- /*
- * There could be a read_seqcount_retry here, but nothing cares
- * about whether it's the old or newer fence pointers that are
- * signaled. That race could still have happened after checking
- * read_seqcount_retry. If you care, use ww_mutex_lock.
- */
+ if (read_seqcount_retry(&obj->seq, seq))
+ goto retry;
}
if (!shared_count) {
- struct fence *fence_excl = rcu_dereference(obj->fence_excl);
-
- if (read_seqcount_retry(&obj->seq, seq))
- goto unlock_retry;
+ struct dma_fence *fence_excl = rcu_dereference(obj->fence_excl);
if (fence_excl) {
ret = reservation_object_test_signaled_single(
fence_excl);
if (ret < 0)
- goto unlock_retry;
+ goto retry;
+
+ if (read_seqcount_retry(&obj->seq, seq))
+ goto retry;
}
}
rcu_read_unlock();
return ret;
-
-unlock_retry:
- rcu_read_unlock();
- goto retry;
}
EXPORT_SYMBOL_GPL(reservation_object_test_signaled_rcu);
diff --git a/drivers/dma-buf/seqno-fence.c b/drivers/dma-buf/seqno-fence.c
index 71127f8f1626..f47112a64763 100644
--- a/drivers/dma-buf/seqno-fence.c
+++ b/drivers/dma-buf/seqno-fence.c
@@ -21,35 +21,35 @@
#include <linux/export.h>
#include <linux/seqno-fence.h>
-static const char *seqno_fence_get_driver_name(struct fence *fence)
+static const char *seqno_fence_get_driver_name(struct dma_fence *fence)
{
struct seqno_fence *seqno_fence = to_seqno_fence(fence);
return seqno_fence->ops->get_driver_name(fence);
}
-static const char *seqno_fence_get_timeline_name(struct fence *fence)
+static const char *seqno_fence_get_timeline_name(struct dma_fence *fence)
{
struct seqno_fence *seqno_fence = to_seqno_fence(fence);
return seqno_fence->ops->get_timeline_name(fence);
}
-static bool seqno_enable_signaling(struct fence *fence)
+static bool seqno_enable_signaling(struct dma_fence *fence)
{
struct seqno_fence *seqno_fence = to_seqno_fence(fence);
return seqno_fence->ops->enable_signaling(fence);
}
-static bool seqno_signaled(struct fence *fence)
+static bool seqno_signaled(struct dma_fence *fence)
{
struct seqno_fence *seqno_fence = to_seqno_fence(fence);
return seqno_fence->ops->signaled && seqno_fence->ops->signaled(fence);
}
-static void seqno_release(struct fence *fence)
+static void seqno_release(struct dma_fence *fence)
{
struct seqno_fence *f = to_seqno_fence(fence);
@@ -57,18 +57,18 @@ static void seqno_release(struct fence *fence)
if (f->ops->release)
f->ops->release(fence);
else
- fence_free(&f->base);
+ dma_fence_free(&f->base);
}
-static signed long seqno_wait(struct fence *fence, bool intr,
- signed long timeout)
+static signed long seqno_wait(struct dma_fence *fence, bool intr,
+ signed long timeout)
{
struct seqno_fence *f = to_seqno_fence(fence);
return f->ops->wait(fence, intr, timeout);
}
-const struct fence_ops seqno_fence_ops = {
+const struct dma_fence_ops seqno_fence_ops = {
.get_driver_name = seqno_fence_get_driver_name,
.get_timeline_name = seqno_fence_get_timeline_name,
.enable_signaling = seqno_enable_signaling,
diff --git a/drivers/dma-buf/sw_sync.c b/drivers/dma-buf/sw_sync.c
index 62e8e6dc7953..69c5ff36e2f9 100644
--- a/drivers/dma-buf/sw_sync.c
+++ b/drivers/dma-buf/sw_sync.c
@@ -68,9 +68,9 @@ struct sw_sync_create_fence_data {
#define SW_SYNC_IOC_INC _IOW(SW_SYNC_IOC_MAGIC, 1, __u32)
-static const struct fence_ops timeline_fence_ops;
+static const struct dma_fence_ops timeline_fence_ops;
-static inline struct sync_pt *fence_to_sync_pt(struct fence *fence)
+static inline struct sync_pt *dma_fence_to_sync_pt(struct dma_fence *fence)
{
if (fence->ops != &timeline_fence_ops)
return NULL;
@@ -84,7 +84,7 @@ static inline struct sync_pt *fence_to_sync_pt(struct fence *fence)
* Creates a new sync_timeline. Returns the sync_timeline object or NULL in
* case of error.
*/
-struct sync_timeline *sync_timeline_create(const char *name)
+static struct sync_timeline *sync_timeline_create(const char *name)
{
struct sync_timeline *obj;
@@ -93,7 +93,7 @@ struct sync_timeline *sync_timeline_create(const char *name)
return NULL;
kref_init(&obj->kref);
- obj->context = fence_context_alloc(1);
+ obj->context = dma_fence_context_alloc(1);
strlcpy(obj->name, name, sizeof(obj->name));
INIT_LIST_HEAD(&obj->child_list_head);
@@ -146,7 +146,7 @@ static void sync_timeline_signal(struct sync_timeline *obj, unsigned int inc)
list_for_each_entry_safe(pt, next, &obj->active_list_head,
active_list) {
- if (fence_is_signaled_locked(&pt->base))
+ if (dma_fence_is_signaled_locked(&pt->base))
list_del_init(&pt->active_list);
}
@@ -179,30 +179,30 @@ static struct sync_pt *sync_pt_create(struct sync_timeline *obj, int size,
spin_lock_irqsave(&obj->child_list_lock, flags);
sync_timeline_get(obj);
- fence_init(&pt->base, &timeline_fence_ops, &obj->child_list_lock,
- obj->context, value);
+ dma_fence_init(&pt->base, &timeline_fence_ops, &obj->child_list_lock,
+ obj->context, value);
list_add_tail(&pt->child_list, &obj->child_list_head);
INIT_LIST_HEAD(&pt->active_list);
spin_unlock_irqrestore(&obj->child_list_lock, flags);
return pt;
}
-static const char *timeline_fence_get_driver_name(struct fence *fence)
+static const char *timeline_fence_get_driver_name(struct dma_fence *fence)
{
return "sw_sync";
}
-static const char *timeline_fence_get_timeline_name(struct fence *fence)
+static const char *timeline_fence_get_timeline_name(struct dma_fence *fence)
{
- struct sync_timeline *parent = fence_parent(fence);
+ struct sync_timeline *parent = dma_fence_parent(fence);
return parent->name;
}
-static void timeline_fence_release(struct fence *fence)
+static void timeline_fence_release(struct dma_fence *fence)
{
- struct sync_pt *pt = fence_to_sync_pt(fence);
- struct sync_timeline *parent = fence_parent(fence);
+ struct sync_pt *pt = dma_fence_to_sync_pt(fence);
+ struct sync_timeline *parent = dma_fence_parent(fence);
unsigned long flags;
spin_lock_irqsave(fence->lock, flags);
@@ -212,20 +212,20 @@ static void timeline_fence_release(struct fence *fence)
spin_unlock_irqrestore(fence->lock, flags);
sync_timeline_put(parent);
- fence_free(fence);
+ dma_fence_free(fence);
}
-static bool timeline_fence_signaled(struct fence *fence)
+static bool timeline_fence_signaled(struct dma_fence *fence)
{
- struct sync_timeline *parent = fence_parent(fence);
+ struct sync_timeline *parent = dma_fence_parent(fence);
return (fence->seqno > parent->value) ? false : true;
}
-static bool timeline_fence_enable_signaling(struct fence *fence)
+static bool timeline_fence_enable_signaling(struct dma_fence *fence)
{
- struct sync_pt *pt = fence_to_sync_pt(fence);
- struct sync_timeline *parent = fence_parent(fence);
+ struct sync_pt *pt = dma_fence_to_sync_pt(fence);
+ struct sync_timeline *parent = dma_fence_parent(fence);
if (timeline_fence_signaled(fence))
return false;
@@ -234,26 +234,26 @@ static bool timeline_fence_enable_signaling(struct fence *fence)
return true;
}
-static void timeline_fence_value_str(struct fence *fence,
+static void timeline_fence_value_str(struct dma_fence *fence,
char *str, int size)
{
snprintf(str, size, "%d", fence->seqno);
}
-static void timeline_fence_timeline_value_str(struct fence *fence,
+static void timeline_fence_timeline_value_str(struct dma_fence *fence,
char *str, int size)
{
- struct sync_timeline *parent = fence_parent(fence);
+ struct sync_timeline *parent = dma_fence_parent(fence);
snprintf(str, size, "%d", parent->value);
}
-static const struct fence_ops timeline_fence_ops = {
+static const struct dma_fence_ops timeline_fence_ops = {
.get_driver_name = timeline_fence_get_driver_name,
.get_timeline_name = timeline_fence_get_timeline_name,
.enable_signaling = timeline_fence_enable_signaling,
.signaled = timeline_fence_signaled,
- .wait = fence_default_wait,
+ .wait = dma_fence_default_wait,
.release = timeline_fence_release,
.fence_value_str = timeline_fence_value_str,
.timeline_value_str = timeline_fence_timeline_value_str,
@@ -316,8 +316,8 @@ static long sw_sync_ioctl_create_fence(struct sync_timeline *obj,
}
sync_file = sync_file_create(&pt->base);
+ dma_fence_put(&pt->base);
if (!sync_file) {
- fence_put(&pt->base);
err = -ENOMEM;
goto err;
}
diff --git a/drivers/dma-buf/sync_debug.c b/drivers/dma-buf/sync_debug.c
index 2dd4c3db6caa..48b20e34fb6d 100644
--- a/drivers/dma-buf/sync_debug.c
+++ b/drivers/dma-buf/sync_debug.c
@@ -71,12 +71,13 @@ static const char *sync_status_str(int status)
return "error";
}
-static void sync_print_fence(struct seq_file *s, struct fence *fence, bool show)
+static void sync_print_fence(struct seq_file *s,
+ struct dma_fence *fence, bool show)
{
int status = 1;
- struct sync_timeline *parent = fence_parent(fence);
+ struct sync_timeline *parent = dma_fence_parent(fence);
- if (fence_is_signaled_locked(fence))
+ if (dma_fence_is_signaled_locked(fence))
status = fence->status;
seq_printf(s, " %s%sfence %s",
@@ -135,10 +136,10 @@ static void sync_print_sync_file(struct seq_file *s,
int i;
seq_printf(s, "[%p] %s: %s\n", sync_file, sync_file->name,
- sync_status_str(!fence_is_signaled(sync_file->fence)));
+ sync_status_str(!dma_fence_is_signaled(sync_file->fence)));
- if (fence_is_array(sync_file->fence)) {
- struct fence_array *array = to_fence_array(sync_file->fence);
+ if (dma_fence_is_array(sync_file->fence)) {
+ struct dma_fence_array *array = to_dma_fence_array(sync_file->fence);
for (i = 0; i < array->num_fences; ++i)
sync_print_fence(s, array->fences[i], true);
diff --git a/drivers/dma-buf/sync_debug.h b/drivers/dma-buf/sync_debug.h
index d269aa6783aa..26fe8b9907b3 100644
--- a/drivers/dma-buf/sync_debug.h
+++ b/drivers/dma-buf/sync_debug.h
@@ -15,7 +15,7 @@
#include <linux/list.h>
#include <linux/spinlock.h>
-#include <linux/fence.h>
+#include <linux/dma-fence.h>
#include <linux/sync_file.h>
#include <uapi/linux/sync_file.h>
@@ -45,10 +45,9 @@ struct sync_timeline {
struct list_head sync_timeline_list;
};
-static inline struct sync_timeline *fence_parent(struct fence *fence)
+static inline struct sync_timeline *dma_fence_parent(struct dma_fence *fence)
{
- return container_of(fence->lock, struct sync_timeline,
- child_list_lock);
+ return container_of(fence->lock, struct sync_timeline, child_list_lock);
}
/**
@@ -58,7 +57,7 @@ static inline struct sync_timeline *fence_parent(struct fence *fence)
* @active_list: sync timeline active child's list
*/
struct sync_pt {
- struct fence base;
+ struct dma_fence base;
struct list_head child_list;
struct list_head active_list;
};
diff --git a/drivers/dma-buf/sync_file.c b/drivers/dma-buf/sync_file.c
index b29a9e817320..6d802f2d2881 100644
--- a/drivers/dma-buf/sync_file.c
+++ b/drivers/dma-buf/sync_file.c
@@ -54,7 +54,7 @@ err:
return NULL;
}
-static void fence_check_cb_func(struct fence *f, struct fence_cb *cb)
+static void fence_check_cb_func(struct dma_fence *f, struct dma_fence_cb *cb)
{
struct sync_file *sync_file;
@@ -71,7 +71,7 @@ static void fence_check_cb_func(struct fence *f, struct fence_cb *cb)
* takes ownership of @fence. The sync_file can be released with
* fput(sync_file->file). Returns the sync_file or NULL in case of error.
*/
-struct sync_file *sync_file_create(struct fence *fence)
+struct sync_file *sync_file_create(struct dma_fence *fence)
{
struct sync_file *sync_file;
@@ -79,7 +79,7 @@ struct sync_file *sync_file_create(struct fence *fence)
if (!sync_file)
return NULL;
- sync_file->fence = fence;
+ sync_file->fence = dma_fence_get(fence);
snprintf(sync_file->name, sizeof(sync_file->name), "%s-%s%llu-%d",
fence->ops->get_driver_name(fence),
@@ -121,16 +121,16 @@ err:
* Ensures @fd references a valid sync_file and returns a fence that
* represents all fence in the sync_file. On error NULL is returned.
*/
-struct fence *sync_file_get_fence(int fd)
+struct dma_fence *sync_file_get_fence(int fd)
{
struct sync_file *sync_file;
- struct fence *fence;
+ struct dma_fence *fence;
sync_file = sync_file_fdget(fd);
if (!sync_file)
return NULL;
- fence = fence_get(sync_file->fence);
+ fence = dma_fence_get(sync_file->fence);
fput(sync_file->file);
return fence;
@@ -138,22 +138,23 @@ struct fence *sync_file_get_fence(int fd)
EXPORT_SYMBOL(sync_file_get_fence);
static int sync_file_set_fence(struct sync_file *sync_file,
- struct fence **fences, int num_fences)
+ struct dma_fence **fences, int num_fences)
{
- struct fence_array *array;
+ struct dma_fence_array *array;
/*
* The reference for the fences in the new sync_file and held
* in add_fence() during the merge procedure, so for num_fences == 1
* we already own a new reference to the fence. For num_fence > 1
- * we own the reference of the fence_array creation.
+ * we own the reference of the dma_fence_array creation.
*/
if (num_fences == 1) {
sync_file->fence = fences[0];
kfree(fences);
} else {
- array = fence_array_create(num_fences, fences,
- fence_context_alloc(1), 1, false);
+ array = dma_fence_array_create(num_fences, fences,
+ dma_fence_context_alloc(1),
+ 1, false);
if (!array)
return -ENOMEM;
@@ -163,10 +164,11 @@ static int sync_file_set_fence(struct sync_file *sync_file,
return 0;
}
-static struct fence **get_fences(struct sync_file *sync_file, int *num_fences)
+static struct dma_fence **get_fences(struct sync_file *sync_file,
+ int *num_fences)
{
- if (fence_is_array(sync_file->fence)) {
- struct fence_array *array = to_fence_array(sync_file->fence);
+ if (dma_fence_is_array(sync_file->fence)) {
+ struct dma_fence_array *array = to_dma_fence_array(sync_file->fence);
*num_fences = array->num_fences;
return array->fences;
@@ -176,12 +178,13 @@ static struct fence **get_fences(struct sync_file *sync_file, int *num_fences)
return &sync_file->fence;
}
-static void add_fence(struct fence **fences, int *i, struct fence *fence)
+static void add_fence(struct dma_fence **fences,
+ int *i, struct dma_fence *fence)
{
fences[*i] = fence;
- if (!fence_is_signaled(fence)) {
- fence_get(fence);
+ if (!dma_fence_is_signaled(fence)) {
+ dma_fence_get(fence);
(*i)++;
}
}
@@ -200,7 +203,7 @@ static struct sync_file *sync_file_merge(const char *name, struct sync_file *a,
struct sync_file *b)
{
struct sync_file *sync_file;
- struct fence **fences, **nfences, **a_fences, **b_fences;
+ struct dma_fence **fences, **nfences, **a_fences, **b_fences;
int i, i_a, i_b, num_fences, a_num_fences, b_num_fences;
sync_file = sync_file_alloc();
@@ -226,8 +229,8 @@ static struct sync_file *sync_file_merge(const char *name, struct sync_file *a,
* and sync_file_create, this is a reasonable assumption.
*/
for (i = i_a = i_b = 0; i_a < a_num_fences && i_b < b_num_fences; ) {
- struct fence *pt_a = a_fences[i_a];
- struct fence *pt_b = b_fences[i_b];
+ struct dma_fence *pt_a = a_fences[i_a];
+ struct dma_fence *pt_b = b_fences[i_b];
if (pt_a->context < pt_b->context) {
add_fence(fences, &i, pt_a);
@@ -255,7 +258,7 @@ static struct sync_file *sync_file_merge(const char *name, struct sync_file *a,
add_fence(fences, &i, b_fences[i_b]);
if (i == 0)
- fences[i++] = fence_get(a_fences[0]);
+ fences[i++] = dma_fence_get(a_fences[0]);
if (num_fences > i) {
nfences = krealloc(fences, i * sizeof(*fences),
@@ -286,8 +289,8 @@ static void sync_file_free(struct kref *kref)
kref);
if (test_bit(POLL_ENABLED, &sync_file->fence->flags))
- fence_remove_callback(sync_file->fence, &sync_file->cb);
- fence_put(sync_file->fence);
+ dma_fence_remove_callback(sync_file->fence, &sync_file->cb);
+ dma_fence_put(sync_file->fence);
kfree(sync_file);
}
@@ -305,14 +308,13 @@ static unsigned int sync_file_poll(struct file *file, poll_table *wait)
poll_wait(file, &sync_file->wq, wait);
- if (!poll_does_not_wait(wait) &&
- !test_and_set_bit(POLL_ENABLED, &sync_file->fence->flags)) {
- if (fence_add_callback(sync_file->fence, &sync_file->cb,
- fence_check_cb_func) < 0)
+ if (!test_and_set_bit(POLL_ENABLED, &sync_file->fence->flags)) {
+ if (dma_fence_add_callback(sync_file->fence, &sync_file->cb,
+ fence_check_cb_func) < 0)
wake_up_all(&sync_file->wq);
}
- return fence_is_signaled(sync_file->fence) ? POLLIN : 0;
+ return dma_fence_is_signaled(sync_file->fence) ? POLLIN : 0;
}
static long sync_file_ioctl_merge(struct sync_file *sync_file,
@@ -370,14 +372,14 @@ err_put_fd:
return err;
}
-static void sync_fill_fence_info(struct fence *fence,
+static void sync_fill_fence_info(struct dma_fence *fence,
struct sync_fence_info *info)
{
strlcpy(info->obj_name, fence->ops->get_timeline_name(fence),
sizeof(info->obj_name));
strlcpy(info->driver_name, fence->ops->get_driver_name(fence),
sizeof(info->driver_name));
- if (fence_is_signaled(fence))
+ if (dma_fence_is_signaled(fence))
info->status = fence->status >= 0 ? 1 : fence->status;
else
info->status = 0;
@@ -389,7 +391,7 @@ static long sync_file_ioctl_fence_info(struct sync_file *sync_file,
{
struct sync_file_info info;
struct sync_fence_info *fence_info = NULL;
- struct fence **fences;
+ struct dma_fence **fences;
__u32 size;
int num_fences, ret, i;
@@ -429,7 +431,7 @@ static long sync_file_ioctl_fence_info(struct sync_file *sync_file,
no_fences:
strlcpy(info.name, sync_file->name, sizeof(info.name));
- info.status = fence_is_signaled(sync_file->fence);
+ info.status = dma_fence_is_signaled(sync_file->fence);
info.num_fences = num_fences;
if (copy_to_user((void __user *)arg, &info, sizeof(info)))
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index af63a6bcf564..141aefbe37ec 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -306,6 +306,7 @@ config MMP_TDMA
depends on ARCH_MMP || COMPILE_TEST
select DMA_ENGINE
select MMP_SRAM if ARCH_MMP
+ select GENERIC_ALLOCATOR
help
Support the MMP Two-Channel DMA engine.
This engine used for MMP Audio DMA and pxa910 SQU.
diff --git a/drivers/dma/cppi41.c b/drivers/dma/cppi41.c
index bac5f023013b..d5ba43a87a68 100644
--- a/drivers/dma/cppi41.c
+++ b/drivers/dma/cppi41.c
@@ -317,6 +317,12 @@ static irqreturn_t cppi41_irq(int irq, void *data)
while (val) {
u32 desc, len;
+ int error;
+
+ error = pm_runtime_get(cdd->ddev.dev);
+ if (error < 0)
+ dev_err(cdd->ddev.dev, "%s pm runtime get: %i\n",
+ __func__, error);
q_num = __fls(val);
val &= ~(1 << q_num);
@@ -338,7 +344,6 @@ static irqreturn_t cppi41_irq(int irq, void *data)
dma_cookie_complete(&c->txd);
dmaengine_desc_get_callback_invoke(&c->txd, NULL);
- /* Paired with cppi41_dma_issue_pending */
pm_runtime_mark_last_busy(cdd->ddev.dev);
pm_runtime_put_autosuspend(cdd->ddev.dev);
}
@@ -362,8 +367,13 @@ static int cppi41_dma_alloc_chan_resources(struct dma_chan *chan)
int error;
error = pm_runtime_get_sync(cdd->ddev.dev);
- if (error < 0)
+ if (error < 0) {
+ dev_err(cdd->ddev.dev, "%s pm runtime get: %i\n",
+ __func__, error);
+ pm_runtime_put_noidle(cdd->ddev.dev);
+
return error;
+ }
dma_cookie_init(chan);
dma_async_tx_descriptor_init(&c->txd, chan);
@@ -385,8 +395,11 @@ static void cppi41_dma_free_chan_resources(struct dma_chan *chan)
int error;
error = pm_runtime_get_sync(cdd->ddev.dev);
- if (error < 0)
+ if (error < 0) {
+ pm_runtime_put_noidle(cdd->ddev.dev);
+
return;
+ }
WARN_ON(!list_empty(&cdd->pending));
@@ -460,9 +473,9 @@ static void cppi41_dma_issue_pending(struct dma_chan *chan)
struct cppi41_dd *cdd = c->cdd;
int error;
- /* PM runtime paired with dmaengine_desc_get_callback_invoke */
error = pm_runtime_get(cdd->ddev.dev);
if ((error != -EINPROGRESS) && error < 0) {
+ pm_runtime_put_noidle(cdd->ddev.dev);
dev_err(cdd->ddev.dev, "Failed to pm_runtime_get: %i\n",
error);
@@ -473,6 +486,9 @@ static void cppi41_dma_issue_pending(struct dma_chan *chan)
push_desc_queue(c);
else
pending_desc(c);
+
+ pm_runtime_mark_last_busy(cdd->ddev.dev);
+ pm_runtime_put_autosuspend(cdd->ddev.dev);
}
static u32 get_host_pd0(u32 length)
@@ -1059,8 +1075,8 @@ err_chans:
deinit_cppi41(dev, cdd);
err_init_cppi:
pm_runtime_dont_use_autosuspend(dev);
- pm_runtime_put_sync(dev);
err_get_sync:
+ pm_runtime_put_sync(dev);
pm_runtime_disable(dev);
iounmap(cdd->usbss_mem);
iounmap(cdd->ctrl_mem);
@@ -1072,7 +1088,12 @@ err_get_sync:
static int cppi41_dma_remove(struct platform_device *pdev)
{
struct cppi41_dd *cdd = platform_get_drvdata(pdev);
+ int error;
+ error = pm_runtime_get_sync(&pdev->dev);
+ if (error < 0)
+ dev_err(&pdev->dev, "%s could not pm_runtime_get: %i\n",
+ __func__, error);
of_dma_controller_free(pdev->dev.of_node);
dma_async_device_unregister(&cdd->ddev);
diff --git a/drivers/dma/edma.c b/drivers/dma/edma.c
index e18a58068bca..77242b37ef87 100644
--- a/drivers/dma/edma.c
+++ b/drivers/dma/edma.c
@@ -1628,6 +1628,7 @@ static int edma_alloc_chan_resources(struct dma_chan *chan)
if (echan->slot[0] < 0) {
dev_err(dev, "Entry slot allocation failed for channel %u\n",
EDMA_CHAN_SLOT(echan->ch_num));
+ ret = echan->slot[0];
goto err_slot;
}
diff --git a/drivers/dma/sun6i-dma.c b/drivers/dma/sun6i-dma.c
index 83461994e418..a2358780ab2c 100644
--- a/drivers/dma/sun6i-dma.c
+++ b/drivers/dma/sun6i-dma.c
@@ -578,7 +578,7 @@ static struct dma_async_tx_descriptor *sun6i_dma_prep_dma_memcpy(
burst = convert_burst(8);
width = convert_buswidth(DMA_SLAVE_BUSWIDTH_4_BYTES);
- v_lli->cfg |= DMA_CHAN_CFG_SRC_DRQ(DRQ_SDRAM) |
+ v_lli->cfg = DMA_CHAN_CFG_SRC_DRQ(DRQ_SDRAM) |
DMA_CHAN_CFG_DST_DRQ(DRQ_SDRAM) |
DMA_CHAN_CFG_DST_LINEAR_MODE |
DMA_CHAN_CFG_SRC_LINEAR_MODE |
diff --git a/drivers/firewire/net.c b/drivers/firewire/net.c
index 309311b1faae..15475892af0c 100644
--- a/drivers/firewire/net.c
+++ b/drivers/firewire/net.c
@@ -73,13 +73,13 @@ struct rfc2734_header {
#define fwnet_get_hdr_lf(h) (((h)->w0 & 0xc0000000) >> 30)
#define fwnet_get_hdr_ether_type(h) (((h)->w0 & 0x0000ffff))
-#define fwnet_get_hdr_dg_size(h) (((h)->w0 & 0x0fff0000) >> 16)
+#define fwnet_get_hdr_dg_size(h) ((((h)->w0 & 0x0fff0000) >> 16) + 1)
#define fwnet_get_hdr_fg_off(h) (((h)->w0 & 0x00000fff))
#define fwnet_get_hdr_dgl(h) (((h)->w1 & 0xffff0000) >> 16)
-#define fwnet_set_hdr_lf(lf) ((lf) << 30)
+#define fwnet_set_hdr_lf(lf) ((lf) << 30)
#define fwnet_set_hdr_ether_type(et) (et)
-#define fwnet_set_hdr_dg_size(dgs) ((dgs) << 16)
+#define fwnet_set_hdr_dg_size(dgs) (((dgs) - 1) << 16)
#define fwnet_set_hdr_fg_off(fgo) (fgo)
#define fwnet_set_hdr_dgl(dgl) ((dgl) << 16)
@@ -578,6 +578,9 @@ static int fwnet_incoming_packet(struct fwnet_device *dev, __be32 *buf, int len,
int retval;
u16 ether_type;
+ if (len <= RFC2374_UNFRAG_HDR_SIZE)
+ return 0;
+
hdr.w0 = be32_to_cpu(buf[0]);
lf = fwnet_get_hdr_lf(&hdr);
if (lf == RFC2374_HDR_UNFRAG) {
@@ -602,7 +605,12 @@ static int fwnet_incoming_packet(struct fwnet_device *dev, __be32 *buf, int len,
return fwnet_finish_incoming_packet(net, skb, source_node_id,
is_broadcast, ether_type);
}
+
/* A datagram fragment has been received, now the fun begins. */
+
+ if (len <= RFC2374_FRAG_HDR_SIZE)
+ return 0;
+
hdr.w1 = ntohl(buf[1]);
buf += 2;
len -= RFC2374_FRAG_HDR_SIZE;
@@ -614,7 +622,10 @@ static int fwnet_incoming_packet(struct fwnet_device *dev, __be32 *buf, int len,
fg_off = fwnet_get_hdr_fg_off(&hdr);
}
datagram_label = fwnet_get_hdr_dgl(&hdr);
- dg_size = fwnet_get_hdr_dg_size(&hdr); /* ??? + 1 */
+ dg_size = fwnet_get_hdr_dg_size(&hdr);
+
+ if (fg_off + len > dg_size)
+ return 0;
spin_lock_irqsave(&dev->lock, flags);
@@ -722,6 +733,22 @@ static void fwnet_receive_packet(struct fw_card *card, struct fw_request *r,
fw_send_response(card, r, rcode);
}
+static int gasp_source_id(__be32 *p)
+{
+ return be32_to_cpu(p[0]) >> 16;
+}
+
+static u32 gasp_specifier_id(__be32 *p)
+{
+ return (be32_to_cpu(p[0]) & 0xffff) << 8 |
+ (be32_to_cpu(p[1]) & 0xff000000) >> 24;
+}
+
+static u32 gasp_version(__be32 *p)
+{
+ return be32_to_cpu(p[1]) & 0xffffff;
+}
+
static void fwnet_receive_broadcast(struct fw_iso_context *context,
u32 cycle, size_t header_length, void *header, void *data)
{
@@ -731,9 +758,6 @@ static void fwnet_receive_broadcast(struct fw_iso_context *context,
__be32 *buf_ptr;
int retval;
u32 length;
- u16 source_node_id;
- u32 specifier_id;
- u32 ver;
unsigned long offset;
unsigned long flags;
@@ -750,22 +774,17 @@ static void fwnet_receive_broadcast(struct fw_iso_context *context,
spin_unlock_irqrestore(&dev->lock, flags);
- specifier_id = (be32_to_cpu(buf_ptr[0]) & 0xffff) << 8
- | (be32_to_cpu(buf_ptr[1]) & 0xff000000) >> 24;
- ver = be32_to_cpu(buf_ptr[1]) & 0xffffff;
- source_node_id = be32_to_cpu(buf_ptr[0]) >> 16;
-
- if (specifier_id == IANA_SPECIFIER_ID &&
- (ver == RFC2734_SW_VERSION
+ if (length > IEEE1394_GASP_HDR_SIZE &&
+ gasp_specifier_id(buf_ptr) == IANA_SPECIFIER_ID &&
+ (gasp_version(buf_ptr) == RFC2734_SW_VERSION
#if IS_ENABLED(CONFIG_IPV6)
- || ver == RFC3146_SW_VERSION
+ || gasp_version(buf_ptr) == RFC3146_SW_VERSION
#endif
- )) {
- buf_ptr += 2;
- length -= IEEE1394_GASP_HDR_SIZE;
- fwnet_incoming_packet(dev, buf_ptr, length, source_node_id,
+ ))
+ fwnet_incoming_packet(dev, buf_ptr + 2,
+ length - IEEE1394_GASP_HDR_SIZE,
+ gasp_source_id(buf_ptr),
context->card->generation, true);
- }
packet.payload_length = dev->rcv_buffer_size;
packet.interrupt = 1;
diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig
index d011cb89d25e..ed37e5908b91 100644
--- a/drivers/gpio/Kconfig
+++ b/drivers/gpio/Kconfig
@@ -22,10 +22,6 @@ menuconfig GPIOLIB
if GPIOLIB
-config GPIO_DEVRES
- def_bool y
- depends on HAS_IOMEM
-
config OF_GPIO
def_bool y
depends on OF
diff --git a/drivers/gpio/Makefile b/drivers/gpio/Makefile
index ab28a2daeacc..d074c2299393 100644
--- a/drivers/gpio/Makefile
+++ b/drivers/gpio/Makefile
@@ -2,7 +2,7 @@
ccflags-$(CONFIG_DEBUG_GPIO) += -DDEBUG
-obj-$(CONFIG_GPIO_DEVRES) += devres.o
+obj-$(CONFIG_GPIOLIB) += devres.o
obj-$(CONFIG_GPIOLIB) += gpiolib.o
obj-$(CONFIG_GPIOLIB) += gpiolib-legacy.o
obj-$(CONFIG_OF_GPIO) += gpiolib-of.o
diff --git a/drivers/gpio/gpio-mvebu.c b/drivers/gpio/gpio-mvebu.c
index cd5dc27320a2..1ed6132b993c 100644
--- a/drivers/gpio/gpio-mvebu.c
+++ b/drivers/gpio/gpio-mvebu.c
@@ -293,10 +293,10 @@ static void mvebu_gpio_irq_ack(struct irq_data *d)
{
struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
struct mvebu_gpio_chip *mvchip = gc->private;
- u32 mask = ~(1 << (d->irq - gc->irq_base));
+ u32 mask = d->mask;
irq_gc_lock(gc);
- writel_relaxed(mask, mvebu_gpioreg_edge_cause(mvchip));
+ writel_relaxed(~mask, mvebu_gpioreg_edge_cause(mvchip));
irq_gc_unlock(gc);
}
@@ -305,7 +305,7 @@ static void mvebu_gpio_edge_irq_mask(struct irq_data *d)
struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
struct mvebu_gpio_chip *mvchip = gc->private;
struct irq_chip_type *ct = irq_data_get_chip_type(d);
- u32 mask = 1 << (d->irq - gc->irq_base);
+ u32 mask = d->mask;
irq_gc_lock(gc);
ct->mask_cache_priv &= ~mask;
@@ -319,8 +319,7 @@ static void mvebu_gpio_edge_irq_unmask(struct irq_data *d)
struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
struct mvebu_gpio_chip *mvchip = gc->private;
struct irq_chip_type *ct = irq_data_get_chip_type(d);
-
- u32 mask = 1 << (d->irq - gc->irq_base);
+ u32 mask = d->mask;
irq_gc_lock(gc);
ct->mask_cache_priv |= mask;
@@ -333,8 +332,7 @@ static void mvebu_gpio_level_irq_mask(struct irq_data *d)
struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
struct mvebu_gpio_chip *mvchip = gc->private;
struct irq_chip_type *ct = irq_data_get_chip_type(d);
-
- u32 mask = 1 << (d->irq - gc->irq_base);
+ u32 mask = d->mask;
irq_gc_lock(gc);
ct->mask_cache_priv &= ~mask;
@@ -347,8 +345,7 @@ static void mvebu_gpio_level_irq_unmask(struct irq_data *d)
struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
struct mvebu_gpio_chip *mvchip = gc->private;
struct irq_chip_type *ct = irq_data_get_chip_type(d);
-
- u32 mask = 1 << (d->irq - gc->irq_base);
+ u32 mask = d->mask;
irq_gc_lock(gc);
ct->mask_cache_priv |= mask;
@@ -462,7 +459,7 @@ static void mvebu_gpio_irq_handler(struct irq_desc *desc)
for (i = 0; i < mvchip->chip.ngpio; i++) {
int irq;
- irq = mvchip->irqbase + i;
+ irq = irq_find_mapping(mvchip->domain, i);
if (!(cause & (1 << i)))
continue;
@@ -655,6 +652,7 @@ static int mvebu_gpio_probe(struct platform_device *pdev)
struct irq_chip_type *ct;
struct clk *clk;
unsigned int ngpios;
+ bool have_irqs;
int soc_variant;
int i, cpu, id;
int err;
@@ -665,6 +663,9 @@ static int mvebu_gpio_probe(struct platform_device *pdev)
else
soc_variant = MVEBU_GPIO_SOC_VARIANT_ORION;
+ /* Some gpio controllers do not provide irq support */
+ have_irqs = of_irq_count(np) != 0;
+
mvchip = devm_kzalloc(&pdev->dev, sizeof(struct mvebu_gpio_chip),
GFP_KERNEL);
if (!mvchip)
@@ -697,7 +698,8 @@ static int mvebu_gpio_probe(struct platform_device *pdev)
mvchip->chip.get = mvebu_gpio_get;
mvchip->chip.direction_output = mvebu_gpio_direction_output;
mvchip->chip.set = mvebu_gpio_set;
- mvchip->chip.to_irq = mvebu_gpio_to_irq;
+ if (have_irqs)
+ mvchip->chip.to_irq = mvebu_gpio_to_irq;
mvchip->chip.base = id * MVEBU_MAX_GPIO_PER_BANK;
mvchip->chip.ngpio = ngpios;
mvchip->chip.can_sleep = false;
@@ -758,34 +760,30 @@ static int mvebu_gpio_probe(struct platform_device *pdev)
devm_gpiochip_add_data(&pdev->dev, &mvchip->chip, mvchip);
/* Some gpio controllers do not provide irq support */
- if (!of_irq_count(np))
+ if (!have_irqs)
return 0;
- /* Setup the interrupt handlers. Each chip can have up to 4
- * interrupt handlers, with each handler dealing with 8 GPIO
- * pins. */
- for (i = 0; i < 4; i++) {
- int irq = platform_get_irq(pdev, i);
-
- if (irq < 0)
- continue;
- irq_set_chained_handler_and_data(irq, mvebu_gpio_irq_handler,
- mvchip);
- }
-
- mvchip->irqbase = irq_alloc_descs(-1, 0, ngpios, -1);
- if (mvchip->irqbase < 0) {
- dev_err(&pdev->dev, "no irqs\n");
- return mvchip->irqbase;
+ mvchip->domain =
+ irq_domain_add_linear(np, ngpios, &irq_generic_chip_ops, NULL);
+ if (!mvchip->domain) {
+ dev_err(&pdev->dev, "couldn't allocate irq domain %s (DT).\n",
+ mvchip->chip.label);
+ return -ENODEV;
}
- gc = irq_alloc_generic_chip("mvebu_gpio_irq", 2, mvchip->irqbase,
- mvchip->membase, handle_level_irq);
- if (!gc) {
- dev_err(&pdev->dev, "Cannot allocate generic irq_chip\n");
- return -ENOMEM;
+ err = irq_alloc_domain_generic_chips(
+ mvchip->domain, ngpios, 2, np->name, handle_level_irq,
+ IRQ_NOREQUEST | IRQ_NOPROBE | IRQ_LEVEL, 0, 0);
+ if (err) {
+ dev_err(&pdev->dev, "couldn't allocate irq chips %s (DT).\n",
+ mvchip->chip.label);
+ goto err_domain;
}
+ /* NOTE: The common accessors cannot be used because of the percpu
+ * access to the mask registers
+ */
+ gc = irq_get_domain_generic_chip(mvchip->domain, 0);
gc->private = mvchip;
ct = &gc->chip_types[0];
ct->type = IRQ_TYPE_LEVEL_HIGH | IRQ_TYPE_LEVEL_LOW;
@@ -803,27 +801,23 @@ static int mvebu_gpio_probe(struct platform_device *pdev)
ct->handler = handle_edge_irq;
ct->chip.name = mvchip->chip.label;
- irq_setup_generic_chip(gc, IRQ_MSK(ngpios), 0,
- IRQ_NOREQUEST, IRQ_LEVEL | IRQ_NOPROBE);
+ /* Setup the interrupt handlers. Each chip can have up to 4
+ * interrupt handlers, with each handler dealing with 8 GPIO
+ * pins.
+ */
+ for (i = 0; i < 4; i++) {
+ int irq = platform_get_irq(pdev, i);
- /* Setup irq domain on top of the generic chip. */
- mvchip->domain = irq_domain_add_simple(np, mvchip->chip.ngpio,
- mvchip->irqbase,
- &irq_domain_simple_ops,
- mvchip);
- if (!mvchip->domain) {
- dev_err(&pdev->dev, "couldn't allocate irq domain %s (DT).\n",
- mvchip->chip.label);
- err = -ENODEV;
- goto err_generic_chip;
+ if (irq < 0)
+ continue;
+ irq_set_chained_handler_and_data(irq, mvebu_gpio_irq_handler,
+ mvchip);
}
return 0;
-err_generic_chip:
- irq_remove_generic_chip(gc, IRQ_MSK(ngpios), IRQ_NOREQUEST,
- IRQ_LEVEL | IRQ_NOPROBE);
- kfree(gc);
+err_domain:
+ irq_domain_remove(mvchip->domain);
return err;
}
diff --git a/drivers/gpio/gpio-pca953x.c b/drivers/gpio/gpio-pca953x.c
index e422568e14ad..fe731f094257 100644
--- a/drivers/gpio/gpio-pca953x.c
+++ b/drivers/gpio/gpio-pca953x.c
@@ -372,14 +372,15 @@ static void pca953x_gpio_set_multiple(struct gpio_chip *gc,
bank_shift = fls((chip->gpio_chip.ngpio - 1) / BANK_SZ);
- memcpy(reg_val, chip->reg_output, NBANK(chip));
mutex_lock(&chip->i2c_lock);
+ memcpy(reg_val, chip->reg_output, NBANK(chip));
for (bank = 0; bank < NBANK(chip); bank++) {
bank_mask = mask[bank / sizeof(*mask)] >>
((bank % sizeof(*mask)) * 8);
if (bank_mask) {
bank_val = bits[bank / sizeof(*bits)] >>
((bank % sizeof(*bits)) * 8);
+ bank_val &= bank_mask;
reg_val[bank] = (reg_val[bank] & ~bank_mask) | bank_val;
}
}
@@ -607,7 +608,6 @@ static int pca953x_irq_setup(struct pca953x_chip *chip,
if (client->irq && irq_base != -1
&& (chip->driver_data & PCA_INT)) {
-
ret = pca953x_read_regs(chip,
chip->regs->input, chip->irq_stat);
if (ret)
diff --git a/drivers/gpio/gpio-tc3589x.c b/drivers/gpio/gpio-tc3589x.c
index 5a5a6cb00eea..d6e21f1a70a9 100644
--- a/drivers/gpio/gpio-tc3589x.c
+++ b/drivers/gpio/gpio-tc3589x.c
@@ -97,7 +97,7 @@ static int tc3589x_gpio_get_direction(struct gpio_chip *chip,
if (ret < 0)
return ret;
- return !!(ret & BIT(pos));
+ return !(ret & BIT(pos));
}
static int tc3589x_gpio_set_single_ended(struct gpio_chip *chip,
diff --git a/drivers/gpio/gpiolib-of.c b/drivers/gpio/gpiolib-of.c
index ecad3f0e3b77..193f15d50bba 100644
--- a/drivers/gpio/gpiolib-of.c
+++ b/drivers/gpio/gpiolib-of.c
@@ -26,14 +26,18 @@
#include "gpiolib.h"
-static int of_gpiochip_match_node(struct gpio_chip *chip, void *data)
+static int of_gpiochip_match_node_and_xlate(struct gpio_chip *chip, void *data)
{
- return chip->gpiodev->dev.of_node == data;
+ struct of_phandle_args *gpiospec = data;
+
+ return chip->gpiodev->dev.of_node == gpiospec->np &&
+ chip->of_xlate(chip, gpiospec, NULL) >= 0;
}
-static struct gpio_chip *of_find_gpiochip_by_node(struct device_node *np)
+static struct gpio_chip *of_find_gpiochip_by_xlate(
+ struct of_phandle_args *gpiospec)
{
- return gpiochip_find(np, of_gpiochip_match_node);
+ return gpiochip_find(gpiospec, of_gpiochip_match_node_and_xlate);
}
static struct gpio_desc *of_xlate_and_get_gpiod_flags(struct gpio_chip *chip,
@@ -79,7 +83,7 @@ struct gpio_desc *of_get_named_gpiod_flags(struct device_node *np,
return ERR_PTR(ret);
}
- chip = of_find_gpiochip_by_node(gpiospec.np);
+ chip = of_find_gpiochip_by_xlate(&gpiospec);
if (!chip) {
desc = ERR_PTR(-EPROBE_DEFER);
goto out;
diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
index 20e09b7c2de3..868128a676ba 100644
--- a/drivers/gpio/gpiolib.c
+++ b/drivers/gpio/gpiolib.c
@@ -21,6 +21,7 @@
#include <linux/uaccess.h>
#include <linux/compat.h>
#include <linux/anon_inodes.h>
+#include <linux/file.h>
#include <linux/kfifo.h>
#include <linux/poll.h>
#include <linux/timekeeping.h>
@@ -423,6 +424,7 @@ static int linehandle_create(struct gpio_device *gdev, void __user *ip)
{
struct gpiohandle_request handlereq;
struct linehandle_state *lh;
+ struct file *file;
int fd, i, ret;
if (copy_from_user(&handlereq, ip, sizeof(handlereq)))
@@ -499,26 +501,41 @@ static int linehandle_create(struct gpio_device *gdev, void __user *ip)
i--;
lh->numdescs = handlereq.lines;
- fd = anon_inode_getfd("gpio-linehandle",
- &linehandle_fileops,
- lh,
- O_RDONLY | O_CLOEXEC);
+ fd = get_unused_fd_flags(O_RDONLY | O_CLOEXEC);
if (fd < 0) {
ret = fd;
goto out_free_descs;
}
+ file = anon_inode_getfile("gpio-linehandle",
+ &linehandle_fileops,
+ lh,
+ O_RDONLY | O_CLOEXEC);
+ if (IS_ERR(file)) {
+ ret = PTR_ERR(file);
+ goto out_put_unused_fd;
+ }
+
handlereq.fd = fd;
if (copy_to_user(ip, &handlereq, sizeof(handlereq))) {
- ret = -EFAULT;
- goto out_free_descs;
+ /*
+ * fput() will trigger the release() callback, so do not go onto
+ * the regular error cleanup path here.
+ */
+ fput(file);
+ put_unused_fd(fd);
+ return -EFAULT;
}
+ fd_install(fd, file);
+
dev_dbg(&gdev->dev, "registered chardev handle for %d lines\n",
lh->numdescs);
return 0;
+out_put_unused_fd:
+ put_unused_fd(fd);
out_free_descs:
for (; i >= 0; i--)
gpiod_free(lh->descs[i]);
@@ -721,6 +738,7 @@ static int lineevent_create(struct gpio_device *gdev, void __user *ip)
struct gpioevent_request eventreq;
struct lineevent_state *le;
struct gpio_desc *desc;
+ struct file *file;
u32 offset;
u32 lflags;
u32 eflags;
@@ -815,23 +833,38 @@ static int lineevent_create(struct gpio_device *gdev, void __user *ip)
if (ret)
goto out_free_desc;
- fd = anon_inode_getfd("gpio-event",
- &lineevent_fileops,
- le,
- O_RDONLY | O_CLOEXEC);
+ fd = get_unused_fd_flags(O_RDONLY | O_CLOEXEC);
if (fd < 0) {
ret = fd;
goto out_free_irq;
}
+ file = anon_inode_getfile("gpio-event",
+ &lineevent_fileops,
+ le,
+ O_RDONLY | O_CLOEXEC);
+ if (IS_ERR(file)) {
+ ret = PTR_ERR(file);
+ goto out_put_unused_fd;
+ }
+
eventreq.fd = fd;
if (copy_to_user(ip, &eventreq, sizeof(eventreq))) {
- ret = -EFAULT;
- goto out_free_irq;
+ /*
+ * fput() will trigger the release() callback, so do not go onto
+ * the regular error cleanup path here.
+ */
+ fput(file);
+ put_unused_fd(fd);
+ return -EFAULT;
}
+ fd_install(fd, file);
+
return 0;
+out_put_unused_fd:
+ put_unused_fd(fd);
out_free_irq:
free_irq(le->irq, le);
out_free_desc:
@@ -2704,8 +2737,11 @@ int gpiochip_lock_as_irq(struct gpio_chip *chip, unsigned int offset)
if (IS_ERR(desc))
return PTR_ERR(desc);
- /* Flush direction if something changed behind our back */
- if (chip->get_direction) {
+ /*
+ * If it's fast: flush the direction setting if something changed
+ * behind our back
+ */
+ if (!chip->can_sleep && chip->get_direction) {
int dir = chip->get_direction(chip, offset);
if (dir)
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
index 483059a22b1b..95fc0410e129 100644
--- a/drivers/gpu/drm/Kconfig
+++ b/drivers/gpu/drm/Kconfig
@@ -12,6 +12,7 @@ menuconfig DRM
select I2C
select I2C_ALGOBIT
select DMA_SHARED_BUFFER
+ select SYNC_FILE
help
Kernel-level support for the Direct Rendering Infrastructure (DRI)
introduced in XFree86 4.0. If you say Y here, you need to select
@@ -33,6 +34,20 @@ config DRM_DP_AUX_CHARDEV
read and write values to arbitrary DPCD registers on the DP aux
channel.
+config DRM_DEBUG_MM
+ bool "Insert extra checks and debug info into the DRM range managers"
+ default n
+ depends on DRM=y
+ depends on STACKTRACE_SUPPORT
+ select STACKDEPOT
+ help
+ Enable allocation tracking of memory manager and leak detection on
+ shutdown.
+
+ Recommended for driver developers only.
+
+ If in doubt, say "N".
+
config DRM_KMS_HELPER
tristate
depends on DRM
@@ -223,6 +238,8 @@ source "drivers/gpu/drm/hisilicon/Kconfig"
source "drivers/gpu/drm/mediatek/Kconfig"
+source "drivers/gpu/drm/zte/Kconfig"
+
# Keep legacy drivers last
menuconfig DRM_LEGACY
diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile
index 25c720454017..883f3e75cfbc 100644
--- a/drivers/gpu/drm/Makefile
+++ b/drivers/gpu/drm/Makefile
@@ -9,13 +9,14 @@ drm-y := drm_auth.o drm_bufs.o drm_cache.o \
drm_scatter.o drm_pci.o \
drm_platform.o drm_sysfs.o drm_hashtab.o drm_mm.o \
drm_crtc.o drm_fourcc.o drm_modes.o drm_edid.o \
- drm_info.o drm_debugfs.o drm_encoder_slave.o \
+ drm_info.o drm_encoder_slave.o \
drm_trace_points.o drm_global.o drm_prime.o \
drm_rect.o drm_vma_manager.o drm_flip_work.o \
drm_modeset_lock.o drm_atomic.o drm_bridge.o \
drm_framebuffer.o drm_connector.o drm_blend.o \
drm_encoder.o drm_mode_object.o drm_property.o \
- drm_plane.o drm_color_mgmt.o
+ drm_plane.o drm_color_mgmt.o drm_print.o \
+ drm_dumb_buffers.o drm_mode_config.o
drm-$(CONFIG_COMPAT) += drm_ioc32.o
drm-$(CONFIG_DRM_GEM_CMA_HELPER) += drm_gem_cma_helper.o
@@ -23,6 +24,7 @@ drm-$(CONFIG_PCI) += ati_pcigart.o
drm-$(CONFIG_DRM_PANEL) += drm_panel.o
drm-$(CONFIG_OF) += drm_of.o
drm-$(CONFIG_AGP) += drm_agpsupport.o
+drm-$(CONFIG_DEBUG_FS) += drm_debugfs.o drm_debugfs_crc.o
drm_kms_helper-y := drm_crtc_helper.o drm_dp_helper.o drm_probe_helper.o \
drm_plane_helper.o drm_dp_mst_topology.o drm_atomic_helper.o \
@@ -86,3 +88,4 @@ obj-$(CONFIG_DRM_FSL_DCU) += fsl-dcu/
obj-$(CONFIG_DRM_ETNAVIV) += etnaviv/
obj-$(CONFIG_DRM_ARCPGU)+= arc/
obj-y += hisilicon/
+obj-$(CONFIG_DRM_ZTE) += zte/
diff --git a/drivers/gpu/drm/amd/amdgpu/Makefile b/drivers/gpu/drm/amd/amdgpu/Makefile
index 248a05d02917..41bd2bf28f4c 100644
--- a/drivers/gpu/drm/amd/amdgpu/Makefile
+++ b/drivers/gpu/drm/amd/amdgpu/Makefile
@@ -24,7 +24,7 @@ amdgpu-y += amdgpu_device.o amdgpu_kms.o \
atombios_encoders.o amdgpu_sa.o atombios_i2c.o \
amdgpu_prime.o amdgpu_vm.o amdgpu_ib.o amdgpu_pll.o \
amdgpu_ucode.o amdgpu_bo_list.o amdgpu_ctx.o amdgpu_sync.o \
- amdgpu_gtt_mgr.o
+ amdgpu_gtt_mgr.o amdgpu_vram_mgr.o
# add asic specific block
amdgpu-$(CONFIG_DRM_AMDGPU_CIK)+= cik.o cik_ih.o kv_smc.o kv_dpm.o \
diff --git a/drivers/gpu/drm/amd/amdgpu/ObjectID.h b/drivers/gpu/drm/amd/amdgpu/ObjectID.h
index b8d66670bb17..06192698bd96 100644
--- a/drivers/gpu/drm/amd/amdgpu/ObjectID.h
+++ b/drivers/gpu/drm/amd/amdgpu/ObjectID.h
@@ -90,7 +90,6 @@
#define ENCODER_OBJECT_ID_INTERNAL_VCE 0x24
#define ENCODER_OBJECT_ID_INTERNAL_UNIPHY3 0x25
#define ENCODER_OBJECT_ID_INTERNAL_AMCLK 0x27
-#define ENCODER_OBJECT_ID_VIRTUAL 0x28
#define ENCODER_OBJECT_ID_GENERAL_EXTERNAL_DVO 0xFF
@@ -120,7 +119,6 @@
#define CONNECTOR_OBJECT_ID_eDP 0x14
#define CONNECTOR_OBJECT_ID_MXM 0x15
#define CONNECTOR_OBJECT_ID_LVDS_eDP 0x16
-#define CONNECTOR_OBJECT_ID_VIRTUAL 0x17
/* deleted */
@@ -149,7 +147,6 @@
#define GRAPH_OBJECT_ENUM_ID5 0x05
#define GRAPH_OBJECT_ENUM_ID6 0x06
#define GRAPH_OBJECT_ENUM_ID7 0x07
-#define GRAPH_OBJECT_ENUM_VIRTUAL 0x08
/****************************************************/
/* Graphics Object ID Bit definition */
@@ -411,10 +408,6 @@
GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
ENCODER_OBJECT_ID_HDMI_ANX9805 << OBJECT_ID_SHIFT)
-#define ENCODER_VIRTUAL_ENUM_VIRTUAL ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
- GRAPH_OBJECT_ENUM_VIRTUAL << ENUM_ID_SHIFT |\
- ENCODER_OBJECT_ID_VIRTUAL << OBJECT_ID_SHIFT)
-
/****************************************************/
/* Connector Object ID definition - Shared with BIOS */
/****************************************************/
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index 039b57e4644c..121a034fe27d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -34,7 +34,7 @@
#include <linux/kref.h>
#include <linux/interval_tree.h>
#include <linux/hashtable.h>
-#include <linux/fence.h>
+#include <linux/dma-fence.h>
#include <ttm/ttm_bo_api.h>
#include <ttm/ttm_bo_driver.h>
@@ -53,7 +53,11 @@
#include "amdgpu_ucode.h"
#include "amdgpu_ttm.h"
#include "amdgpu_gds.h"
+#include "amdgpu_sync.h"
+#include "amdgpu_ring.h"
+#include "amdgpu_vm.h"
#include "amd_powerplay.h"
+#include "amdgpu_dpm.h"
#include "amdgpu_acp.h"
#include "gpu_scheduler.h"
@@ -97,6 +101,7 @@ extern char *amdgpu_disable_cu;
extern int amdgpu_sclk_deep_sleep_en;
extern char *amdgpu_virtual_display;
extern unsigned amdgpu_pp_feature_mask;
+extern int amdgpu_vram_page_split;
#define AMDGPU_WAIT_IDLE_TIMEOUT_IN_MS 3000
#define AMDGPU_MAX_USEC_TIMEOUT 100000 /* 100 ms */
@@ -107,12 +112,6 @@ extern unsigned amdgpu_pp_feature_mask;
#define AMDGPUFB_CONN_LIMIT 4
#define AMDGPU_BIOS_NUM_SCRATCH 8
-/* max number of rings */
-#define AMDGPU_MAX_RINGS 16
-#define AMDGPU_MAX_GFX_RINGS 1
-#define AMDGPU_MAX_COMPUTE_RINGS 8
-#define AMDGPU_MAX_VCE_RINGS 3
-
/* max number of IP instances */
#define AMDGPU_MAX_SDMA_INSTANCES 2
@@ -152,8 +151,6 @@ extern unsigned amdgpu_pp_feature_mask;
struct amdgpu_device;
struct amdgpu_ib;
-struct amdgpu_vm;
-struct amdgpu_ring;
struct amdgpu_cs_parser;
struct amdgpu_job;
struct amdgpu_irq_src;
@@ -198,21 +195,38 @@ int amdgpu_wait_for_idle(struct amdgpu_device *adev,
bool amdgpu_is_idle(struct amdgpu_device *adev,
enum amd_ip_block_type block_type);
+#define AMDGPU_MAX_IP_NUM 16
+
+struct amdgpu_ip_block_status {
+ bool valid;
+ bool sw;
+ bool hw;
+ bool late_initialized;
+ bool hang;
+};
+
struct amdgpu_ip_block_version {
- enum amd_ip_block_type type;
- u32 major;
- u32 minor;
- u32 rev;
+ const enum amd_ip_block_type type;
+ const u32 major;
+ const u32 minor;
+ const u32 rev;
const struct amd_ip_funcs *funcs;
};
+struct amdgpu_ip_block {
+ struct amdgpu_ip_block_status status;
+ const struct amdgpu_ip_block_version *version;
+};
+
int amdgpu_ip_block_version_cmp(struct amdgpu_device *adev,
enum amd_ip_block_type type,
u32 major, u32 minor);
-const struct amdgpu_ip_block_version * amdgpu_get_ip_block(
- struct amdgpu_device *adev,
- enum amd_ip_block_type type);
+struct amdgpu_ip_block * amdgpu_get_ip_block(struct amdgpu_device *adev,
+ enum amd_ip_block_type type);
+
+int amdgpu_ip_block_add(struct amdgpu_device *adev,
+ const struct amdgpu_ip_block_version *ip_block_version);
/* provided by hw blocks that can move/clear data. e.g., gfx or sdma */
struct amdgpu_buffer_funcs {
@@ -286,47 +300,6 @@ struct amdgpu_ih_funcs {
void (*set_rptr)(struct amdgpu_device *adev);
};
-/* provided by hw blocks that expose a ring buffer for commands */
-struct amdgpu_ring_funcs {
- /* ring read/write ptr handling */
- u32 (*get_rptr)(struct amdgpu_ring *ring);
- u32 (*get_wptr)(struct amdgpu_ring *ring);
- void (*set_wptr)(struct amdgpu_ring *ring);
- /* validating and patching of IBs */
- int (*parse_cs)(struct amdgpu_cs_parser *p, uint32_t ib_idx);
- /* command emit functions */
- void (*emit_ib)(struct amdgpu_ring *ring,
- struct amdgpu_ib *ib,
- unsigned vm_id, bool ctx_switch);
- void (*emit_fence)(struct amdgpu_ring *ring, uint64_t addr,
- uint64_t seq, unsigned flags);
- void (*emit_pipeline_sync)(struct amdgpu_ring *ring);
- void (*emit_vm_flush)(struct amdgpu_ring *ring, unsigned vm_id,
- uint64_t pd_addr);
- void (*emit_hdp_flush)(struct amdgpu_ring *ring);
- void (*emit_hdp_invalidate)(struct amdgpu_ring *ring);
- void (*emit_gds_switch)(struct amdgpu_ring *ring, uint32_t vmid,
- uint32_t gds_base, uint32_t gds_size,
- uint32_t gws_base, uint32_t gws_size,
- uint32_t oa_base, uint32_t oa_size);
- /* testing functions */
- int (*test_ring)(struct amdgpu_ring *ring);
- int (*test_ib)(struct amdgpu_ring *ring, long timeout);
- /* insert NOP packets */
- void (*insert_nop)(struct amdgpu_ring *ring, uint32_t count);
- /* pad the indirect buffer to the necessary number of dw */
- void (*pad_ib)(struct amdgpu_ring *ring, struct amdgpu_ib *ib);
- unsigned (*init_cond_exec)(struct amdgpu_ring *ring);
- void (*patch_cond_exec)(struct amdgpu_ring *ring, unsigned offset);
- /* note usage for clock and power gating */
- void (*begin_use)(struct amdgpu_ring *ring);
- void (*end_use)(struct amdgpu_ring *ring);
- void (*emit_switch_buffer) (struct amdgpu_ring *ring);
- void (*emit_cntxcntl) (struct amdgpu_ring *ring, uint32_t flags);
- unsigned (*get_emit_ib_size) (struct amdgpu_ring *ring);
- unsigned (*get_dma_frame_size) (struct amdgpu_ring *ring);
-};
-
/*
* BIOS.
*/
@@ -364,47 +337,6 @@ struct amdgpu_clock {
};
/*
- * Fences.
- */
-struct amdgpu_fence_driver {
- uint64_t gpu_addr;
- volatile uint32_t *cpu_addr;
- /* sync_seq is protected by ring emission lock */
- uint32_t sync_seq;
- atomic_t last_seq;
- bool initialized;
- struct amdgpu_irq_src *irq_src;
- unsigned irq_type;
- struct timer_list fallback_timer;
- unsigned num_fences_mask;
- spinlock_t lock;
- struct fence **fences;
-};
-
-/* some special values for the owner field */
-#define AMDGPU_FENCE_OWNER_UNDEFINED ((void*)0ul)
-#define AMDGPU_FENCE_OWNER_VM ((void*)1ul)
-
-#define AMDGPU_FENCE_FLAG_64BIT (1 << 0)
-#define AMDGPU_FENCE_FLAG_INT (1 << 1)
-
-int amdgpu_fence_driver_init(struct amdgpu_device *adev);
-void amdgpu_fence_driver_fini(struct amdgpu_device *adev);
-void amdgpu_fence_driver_force_completion(struct amdgpu_device *adev);
-
-int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring,
- unsigned num_hw_submission);
-int amdgpu_fence_driver_start_ring(struct amdgpu_ring *ring,
- struct amdgpu_irq_src *irq_src,
- unsigned irq_type);
-void amdgpu_fence_driver_suspend(struct amdgpu_device *adev);
-void amdgpu_fence_driver_resume(struct amdgpu_device *adev);
-int amdgpu_fence_emit(struct amdgpu_ring *ring, struct fence **fence);
-void amdgpu_fence_process(struct amdgpu_ring *ring);
-int amdgpu_fence_wait_empty(struct amdgpu_ring *ring);
-unsigned amdgpu_fence_count_emitted(struct amdgpu_ring *ring);
-
-/*
* BO.
*/
struct amdgpu_bo_list_entry {
@@ -427,7 +359,7 @@ struct amdgpu_bo_va_mapping {
struct amdgpu_bo_va {
/* protected by bo being reserved */
struct list_head bo_list;
- struct fence *last_pt_update;
+ struct dma_fence *last_pt_update;
unsigned ref_count;
/* protected by vm mutex and spinlock */
@@ -459,12 +391,12 @@ struct amdgpu_bo {
u64 metadata_flags;
void *metadata;
u32 metadata_size;
+ unsigned prime_shared_count;
/* list of all virtual address to which this bo
* is associated to
*/
struct list_head va;
/* Constant after initialization */
- struct amdgpu_device *adev;
struct drm_gem_object gem_base;
struct amdgpu_bo *parent;
struct amdgpu_bo *shadow;
@@ -543,7 +475,7 @@ struct amdgpu_sa_bo {
struct amdgpu_sa_manager *manager;
unsigned soffset;
unsigned eoffset;
- struct fence *fence;
+ struct dma_fence *fence;
};
/*
@@ -561,27 +493,6 @@ int amdgpu_mode_dumb_create(struct drm_file *file_priv,
int amdgpu_mode_dumb_mmap(struct drm_file *filp,
struct drm_device *dev,
uint32_t handle, uint64_t *offset_p);
-/*
- * Synchronization
- */
-struct amdgpu_sync {
- DECLARE_HASHTABLE(fences, 4);
- struct fence *last_vm_update;
-};
-
-void amdgpu_sync_create(struct amdgpu_sync *sync);
-int amdgpu_sync_fence(struct amdgpu_device *adev, struct amdgpu_sync *sync,
- struct fence *f);
-int amdgpu_sync_resv(struct amdgpu_device *adev,
- struct amdgpu_sync *sync,
- struct reservation_object *resv,
- void *owner);
-struct fence *amdgpu_sync_peek_fence(struct amdgpu_sync *sync,
- struct amdgpu_ring *ring);
-struct fence *amdgpu_sync_get_fence(struct amdgpu_sync *sync);
-void amdgpu_sync_free(struct amdgpu_sync *sync);
-int amdgpu_sync_init(void);
-void amdgpu_sync_fini(void);
int amdgpu_fence_slab_init(void);
void amdgpu_fence_slab_fini(void);
@@ -703,10 +614,10 @@ struct amdgpu_flip_work {
uint64_t base;
struct drm_pending_vblank_event *event;
struct amdgpu_bo *old_abo;
- struct fence *excl;
+ struct dma_fence *excl;
unsigned shared_count;
- struct fence **shared;
- struct fence_cb cb;
+ struct dma_fence **shared;
+ struct dma_fence_cb cb;
bool async;
};
@@ -723,14 +634,6 @@ struct amdgpu_ib {
uint32_t flags;
};
-enum amdgpu_ring_type {
- AMDGPU_RING_TYPE_GFX,
- AMDGPU_RING_TYPE_COMPUTE,
- AMDGPU_RING_TYPE_SDMA,
- AMDGPU_RING_TYPE_UVD,
- AMDGPU_RING_TYPE_VCE
-};
-
extern const struct amd_sched_backend_ops amdgpu_sched_ops;
int amdgpu_job_alloc(struct amdgpu_device *adev, unsigned num_ibs,
@@ -742,214 +645,7 @@ void amdgpu_job_free_resources(struct amdgpu_job *job);
void amdgpu_job_free(struct amdgpu_job *job);
int amdgpu_job_submit(struct amdgpu_job *job, struct amdgpu_ring *ring,
struct amd_sched_entity *entity, void *owner,
- struct fence **f);
-
-struct amdgpu_ring {
- struct amdgpu_device *adev;
- const struct amdgpu_ring_funcs *funcs;
- struct amdgpu_fence_driver fence_drv;
- struct amd_gpu_scheduler sched;
-
- struct amdgpu_bo *ring_obj;
- volatile uint32_t *ring;
- unsigned rptr_offs;
- unsigned wptr;
- unsigned wptr_old;
- unsigned ring_size;
- unsigned max_dw;
- int count_dw;
- uint64_t gpu_addr;
- uint32_t align_mask;
- uint32_t ptr_mask;
- bool ready;
- u32 nop;
- u32 idx;
- u32 me;
- u32 pipe;
- u32 queue;
- struct amdgpu_bo *mqd_obj;
- u32 doorbell_index;
- bool use_doorbell;
- unsigned wptr_offs;
- unsigned fence_offs;
- uint64_t current_ctx;
- enum amdgpu_ring_type type;
- char name[16];
- unsigned cond_exe_offs;
- u64 cond_exe_gpu_addr;
- volatile u32 *cond_exe_cpu_addr;
-#if defined(CONFIG_DEBUG_FS)
- struct dentry *ent;
-#endif
-};
-
-/*
- * VM
- */
-
-/* maximum number of VMIDs */
-#define AMDGPU_NUM_VM 16
-
-/* Maximum number of PTEs the hardware can write with one command */
-#define AMDGPU_VM_MAX_UPDATE_SIZE 0x3FFFF
-
-/* number of entries in page table */
-#define AMDGPU_VM_PTE_COUNT (1 << amdgpu_vm_block_size)
-
-/* PTBs (Page Table Blocks) need to be aligned to 32K */
-#define AMDGPU_VM_PTB_ALIGN_SIZE 32768
-
-/* LOG2 number of continuous pages for the fragment field */
-#define AMDGPU_LOG2_PAGES_PER_FRAG 4
-
-#define AMDGPU_PTE_VALID (1 << 0)
-#define AMDGPU_PTE_SYSTEM (1 << 1)
-#define AMDGPU_PTE_SNOOPED (1 << 2)
-
-/* VI only */
-#define AMDGPU_PTE_EXECUTABLE (1 << 4)
-
-#define AMDGPU_PTE_READABLE (1 << 5)
-#define AMDGPU_PTE_WRITEABLE (1 << 6)
-
-#define AMDGPU_PTE_FRAG(x) ((x & 0x1f) << 7)
-
-/* How to programm VM fault handling */
-#define AMDGPU_VM_FAULT_STOP_NEVER 0
-#define AMDGPU_VM_FAULT_STOP_FIRST 1
-#define AMDGPU_VM_FAULT_STOP_ALWAYS 2
-
-struct amdgpu_vm_pt {
- struct amdgpu_bo_list_entry entry;
- uint64_t addr;
- uint64_t shadow_addr;
-};
-
-struct amdgpu_vm {
- /* tree of virtual addresses mapped */
- struct rb_root va;
-
- /* protecting invalidated */
- spinlock_t status_lock;
-
- /* BOs moved, but not yet updated in the PT */
- struct list_head invalidated;
-
- /* BOs cleared in the PT because of a move */
- struct list_head cleared;
-
- /* BO mappings freed, but not yet updated in the PT */
- struct list_head freed;
-
- /* contains the page directory */
- struct amdgpu_bo *page_directory;
- unsigned max_pde_used;
- struct fence *page_directory_fence;
- uint64_t last_eviction_counter;
-
- /* array of page tables, one for each page directory entry */
- struct amdgpu_vm_pt *page_tables;
-
- /* for id and flush management per ring */
- struct amdgpu_vm_id *ids[AMDGPU_MAX_RINGS];
-
- /* protecting freed */
- spinlock_t freed_lock;
-
- /* Scheduler entity for page table updates */
- struct amd_sched_entity entity;
-
- /* client id */
- u64 client_id;
-};
-
-struct amdgpu_vm_id {
- struct list_head list;
- struct fence *first;
- struct amdgpu_sync active;
- struct fence *last_flush;
- atomic64_t owner;
-
- uint64_t pd_gpu_addr;
- /* last flushed PD/PT update */
- struct fence *flushed_updates;
-
- uint32_t current_gpu_reset_count;
-
- uint32_t gds_base;
- uint32_t gds_size;
- uint32_t gws_base;
- uint32_t gws_size;
- uint32_t oa_base;
- uint32_t oa_size;
-};
-
-struct amdgpu_vm_manager {
- /* Handling of VMIDs */
- struct mutex lock;
- unsigned num_ids;
- struct list_head ids_lru;
- struct amdgpu_vm_id ids[AMDGPU_NUM_VM];
-
- /* Handling of VM fences */
- u64 fence_context;
- unsigned seqno[AMDGPU_MAX_RINGS];
-
- uint32_t max_pfn;
- /* vram base address for page table entry */
- u64 vram_base_offset;
- /* is vm enabled? */
- bool enabled;
- /* vm pte handling */
- const struct amdgpu_vm_pte_funcs *vm_pte_funcs;
- struct amdgpu_ring *vm_pte_rings[AMDGPU_MAX_RINGS];
- unsigned vm_pte_num_rings;
- atomic_t vm_pte_next_ring;
- /* client id counter */
- atomic64_t client_counter;
-};
-
-void amdgpu_vm_manager_init(struct amdgpu_device *adev);
-void amdgpu_vm_manager_fini(struct amdgpu_device *adev);
-int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm);
-void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm);
-void amdgpu_vm_get_pd_bo(struct amdgpu_vm *vm,
- struct list_head *validated,
- struct amdgpu_bo_list_entry *entry);
-void amdgpu_vm_get_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm,
- struct list_head *duplicates);
-void amdgpu_vm_move_pt_bos_in_lru(struct amdgpu_device *adev,
- struct amdgpu_vm *vm);
-int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
- struct amdgpu_sync *sync, struct fence *fence,
- struct amdgpu_job *job);
-int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job);
-void amdgpu_vm_reset_id(struct amdgpu_device *adev, unsigned vm_id);
-int amdgpu_vm_update_page_directory(struct amdgpu_device *adev,
- struct amdgpu_vm *vm);
-int amdgpu_vm_clear_freed(struct amdgpu_device *adev,
- struct amdgpu_vm *vm);
-int amdgpu_vm_clear_invalids(struct amdgpu_device *adev, struct amdgpu_vm *vm,
- struct amdgpu_sync *sync);
-int amdgpu_vm_bo_update(struct amdgpu_device *adev,
- struct amdgpu_bo_va *bo_va,
- bool clear);
-void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev,
- struct amdgpu_bo *bo);
-struct amdgpu_bo_va *amdgpu_vm_bo_find(struct amdgpu_vm *vm,
- struct amdgpu_bo *bo);
-struct amdgpu_bo_va *amdgpu_vm_bo_add(struct amdgpu_device *adev,
- struct amdgpu_vm *vm,
- struct amdgpu_bo *bo);
-int amdgpu_vm_bo_map(struct amdgpu_device *adev,
- struct amdgpu_bo_va *bo_va,
- uint64_t addr, uint64_t offset,
- uint64_t size, uint32_t flags);
-int amdgpu_vm_bo_unmap(struct amdgpu_device *adev,
- struct amdgpu_bo_va *bo_va,
- uint64_t addr);
-void amdgpu_vm_bo_rmv(struct amdgpu_device *adev,
- struct amdgpu_bo_va *bo_va);
+ struct dma_fence **f);
/*
* context related structures
@@ -957,7 +653,7 @@ void amdgpu_vm_bo_rmv(struct amdgpu_device *adev,
struct amdgpu_ctx_ring {
uint64_t sequence;
- struct fence **fences;
+ struct dma_fence **fences;
struct amd_sched_entity entity;
};
@@ -966,7 +662,7 @@ struct amdgpu_ctx {
struct amdgpu_device *adev;
unsigned reset_counter;
spinlock_t ring_lock;
- struct fence **fences;
+ struct dma_fence **fences;
struct amdgpu_ctx_ring rings[AMDGPU_MAX_RINGS];
bool preamble_presented;
};
@@ -982,8 +678,8 @@ struct amdgpu_ctx *amdgpu_ctx_get(struct amdgpu_fpriv *fpriv, uint32_t id);
int amdgpu_ctx_put(struct amdgpu_ctx *ctx);
uint64_t amdgpu_ctx_add_fence(struct amdgpu_ctx *ctx, struct amdgpu_ring *ring,
- struct fence *fence);
-struct fence *amdgpu_ctx_get_fence(struct amdgpu_ctx *ctx,
+ struct dma_fence *fence);
+struct dma_fence *amdgpu_ctx_get_fence(struct amdgpu_ctx *ctx,
struct amdgpu_ring *ring, uint64_t seq);
int amdgpu_ctx_ioctl(struct drm_device *dev, void *data,
@@ -1093,6 +789,16 @@ struct amdgpu_scratch {
/*
* GFX configurations
*/
+#define AMDGPU_GFX_MAX_SE 4
+#define AMDGPU_GFX_MAX_SH_PER_SE 2
+
+struct amdgpu_rb_config {
+ uint32_t rb_backend_disable;
+ uint32_t user_rb_backend_disable;
+ uint32_t raster_config;
+ uint32_t raster_config_1;
+};
+
struct amdgpu_gca_config {
unsigned max_shader_engines;
unsigned max_tile_pipes;
@@ -1121,6 +827,8 @@ struct amdgpu_gca_config {
uint32_t tile_mode_array[32];
uint32_t macrotile_mode_array[16];
+
+ struct amdgpu_rb_config rb_config[AMDGPU_GFX_MAX_SE][AMDGPU_GFX_MAX_SH_PER_SE];
};
struct amdgpu_cu_info {
@@ -1133,6 +841,7 @@ struct amdgpu_gfx_funcs {
/* get the gpu clock counter */
uint64_t (*get_gpu_clock_counter)(struct amdgpu_device *adev);
void (*select_se_sh)(struct amdgpu_device *adev, u32 se_num, u32 sh_num, u32 instance);
+ void (*read_wave_data)(struct amdgpu_device *adev, uint32_t simd, uint32_t wave, uint32_t *dst, int *no_fields);
};
struct amdgpu_gfx {
@@ -1181,23 +890,13 @@ struct amdgpu_gfx {
int amdgpu_ib_get(struct amdgpu_device *adev, struct amdgpu_vm *vm,
unsigned size, struct amdgpu_ib *ib);
void amdgpu_ib_free(struct amdgpu_device *adev, struct amdgpu_ib *ib,
- struct fence *f);
+ struct dma_fence *f);
int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
- struct amdgpu_ib *ib, struct fence *last_vm_update,
- struct amdgpu_job *job, struct fence **f);
+ struct amdgpu_ib *ib, struct dma_fence *last_vm_update,
+ struct amdgpu_job *job, struct dma_fence **f);
int amdgpu_ib_pool_init(struct amdgpu_device *adev);
void amdgpu_ib_pool_fini(struct amdgpu_device *adev);
int amdgpu_ib_ring_tests(struct amdgpu_device *adev);
-int amdgpu_ring_alloc(struct amdgpu_ring *ring, unsigned ndw);
-void amdgpu_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count);
-void amdgpu_ring_generic_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib);
-void amdgpu_ring_commit(struct amdgpu_ring *ring);
-void amdgpu_ring_undo(struct amdgpu_ring *ring);
-int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring,
- unsigned ring_size, u32 nop, u32 align_mask,
- struct amdgpu_irq_src *irq_src, unsigned irq_type,
- enum amdgpu_ring_type ring_type);
-void amdgpu_ring_fini(struct amdgpu_ring *ring);
/*
* CS.
@@ -1225,7 +924,7 @@ struct amdgpu_cs_parser {
struct amdgpu_bo_list *bo_list;
struct amdgpu_bo_list_entry vm_pd;
struct list_head validated;
- struct fence *fence;
+ struct dma_fence *fence;
uint64_t bytes_moved_threshold;
uint64_t bytes_moved;
struct amdgpu_bo_list_entry *evictable;
@@ -1245,7 +944,7 @@ struct amdgpu_job {
struct amdgpu_ring *ring;
struct amdgpu_sync sync;
struct amdgpu_ib *ibs;
- struct fence *fence; /* the hw fence */
+ struct dma_fence *fence; /* the hw fence */
uint32_t preamble_status;
uint32_t num_ibs;
void *owner;
@@ -1294,354 +993,6 @@ struct amdgpu_wb {
int amdgpu_wb_get(struct amdgpu_device *adev, u32 *wb);
void amdgpu_wb_free(struct amdgpu_device *adev, u32 wb);
-
-
-enum amdgpu_int_thermal_type {
- THERMAL_TYPE_NONE,
- THERMAL_TYPE_EXTERNAL,
- THERMAL_TYPE_EXTERNAL_GPIO,
- THERMAL_TYPE_RV6XX,
- THERMAL_TYPE_RV770,
- THERMAL_TYPE_ADT7473_WITH_INTERNAL,
- THERMAL_TYPE_EVERGREEN,
- THERMAL_TYPE_SUMO,
- THERMAL_TYPE_NI,
- THERMAL_TYPE_SI,
- THERMAL_TYPE_EMC2103_WITH_INTERNAL,
- THERMAL_TYPE_CI,
- THERMAL_TYPE_KV,
-};
-
-enum amdgpu_dpm_auto_throttle_src {
- AMDGPU_DPM_AUTO_THROTTLE_SRC_THERMAL,
- AMDGPU_DPM_AUTO_THROTTLE_SRC_EXTERNAL
-};
-
-enum amdgpu_dpm_event_src {
- AMDGPU_DPM_EVENT_SRC_ANALOG = 0,
- AMDGPU_DPM_EVENT_SRC_EXTERNAL = 1,
- AMDGPU_DPM_EVENT_SRC_DIGITAL = 2,
- AMDGPU_DPM_EVENT_SRC_ANALOG_OR_EXTERNAL = 3,
- AMDGPU_DPM_EVENT_SRC_DIGIAL_OR_EXTERNAL = 4
-};
-
-#define AMDGPU_MAX_VCE_LEVELS 6
-
-enum amdgpu_vce_level {
- AMDGPU_VCE_LEVEL_AC_ALL = 0, /* AC, All cases */
- AMDGPU_VCE_LEVEL_DC_EE = 1, /* DC, entropy encoding */
- AMDGPU_VCE_LEVEL_DC_LL_LOW = 2, /* DC, low latency queue, res <= 720 */
- AMDGPU_VCE_LEVEL_DC_LL_HIGH = 3, /* DC, low latency queue, 1080 >= res > 720 */
- AMDGPU_VCE_LEVEL_DC_GP_LOW = 4, /* DC, general purpose queue, res <= 720 */
- AMDGPU_VCE_LEVEL_DC_GP_HIGH = 5, /* DC, general purpose queue, 1080 >= res > 720 */
-};
-
-struct amdgpu_ps {
- u32 caps; /* vbios flags */
- u32 class; /* vbios flags */
- u32 class2; /* vbios flags */
- /* UVD clocks */
- u32 vclk;
- u32 dclk;
- /* VCE clocks */
- u32 evclk;
- u32 ecclk;
- bool vce_active;
- enum amdgpu_vce_level vce_level;
- /* asic priv */
- void *ps_priv;
-};
-
-struct amdgpu_dpm_thermal {
- /* thermal interrupt work */
- struct work_struct work;
- /* low temperature threshold */
- int min_temp;
- /* high temperature threshold */
- int max_temp;
- /* was last interrupt low to high or high to low */
- bool high_to_low;
- /* interrupt source */
- struct amdgpu_irq_src irq;
-};
-
-enum amdgpu_clk_action
-{
- AMDGPU_SCLK_UP = 1,
- AMDGPU_SCLK_DOWN
-};
-
-struct amdgpu_blacklist_clocks
-{
- u32 sclk;
- u32 mclk;
- enum amdgpu_clk_action action;
-};
-
-struct amdgpu_clock_and_voltage_limits {
- u32 sclk;
- u32 mclk;
- u16 vddc;
- u16 vddci;
-};
-
-struct amdgpu_clock_array {
- u32 count;
- u32 *values;
-};
-
-struct amdgpu_clock_voltage_dependency_entry {
- u32 clk;
- u16 v;
-};
-
-struct amdgpu_clock_voltage_dependency_table {
- u32 count;
- struct amdgpu_clock_voltage_dependency_entry *entries;
-};
-
-union amdgpu_cac_leakage_entry {
- struct {
- u16 vddc;
- u32 leakage;
- };
- struct {
- u16 vddc1;
- u16 vddc2;
- u16 vddc3;
- };
-};
-
-struct amdgpu_cac_leakage_table {
- u32 count;
- union amdgpu_cac_leakage_entry *entries;
-};
-
-struct amdgpu_phase_shedding_limits_entry {
- u16 voltage;
- u32 sclk;
- u32 mclk;
-};
-
-struct amdgpu_phase_shedding_limits_table {
- u32 count;
- struct amdgpu_phase_shedding_limits_entry *entries;
-};
-
-struct amdgpu_uvd_clock_voltage_dependency_entry {
- u32 vclk;
- u32 dclk;
- u16 v;
-};
-
-struct amdgpu_uvd_clock_voltage_dependency_table {
- u8 count;
- struct amdgpu_uvd_clock_voltage_dependency_entry *entries;
-};
-
-struct amdgpu_vce_clock_voltage_dependency_entry {
- u32 ecclk;
- u32 evclk;
- u16 v;
-};
-
-struct amdgpu_vce_clock_voltage_dependency_table {
- u8 count;
- struct amdgpu_vce_clock_voltage_dependency_entry *entries;
-};
-
-struct amdgpu_ppm_table {
- u8 ppm_design;
- u16 cpu_core_number;
- u32 platform_tdp;
- u32 small_ac_platform_tdp;
- u32 platform_tdc;
- u32 small_ac_platform_tdc;
- u32 apu_tdp;
- u32 dgpu_tdp;
- u32 dgpu_ulv_power;
- u32 tj_max;
-};
-
-struct amdgpu_cac_tdp_table {
- u16 tdp;
- u16 configurable_tdp;
- u16 tdc;
- u16 battery_power_limit;
- u16 small_power_limit;
- u16 low_cac_leakage;
- u16 high_cac_leakage;
- u16 maximum_power_delivery_limit;
-};
-
-struct amdgpu_dpm_dynamic_state {
- struct amdgpu_clock_voltage_dependency_table vddc_dependency_on_sclk;
- struct amdgpu_clock_voltage_dependency_table vddci_dependency_on_mclk;
- struct amdgpu_clock_voltage_dependency_table vddc_dependency_on_mclk;
- struct amdgpu_clock_voltage_dependency_table mvdd_dependency_on_mclk;
- struct amdgpu_clock_voltage_dependency_table vddc_dependency_on_dispclk;
- struct amdgpu_uvd_clock_voltage_dependency_table uvd_clock_voltage_dependency_table;
- struct amdgpu_vce_clock_voltage_dependency_table vce_clock_voltage_dependency_table;
- struct amdgpu_clock_voltage_dependency_table samu_clock_voltage_dependency_table;
- struct amdgpu_clock_voltage_dependency_table acp_clock_voltage_dependency_table;
- struct amdgpu_clock_voltage_dependency_table vddgfx_dependency_on_sclk;
- struct amdgpu_clock_array valid_sclk_values;
- struct amdgpu_clock_array valid_mclk_values;
- struct amdgpu_clock_and_voltage_limits max_clock_voltage_on_dc;
- struct amdgpu_clock_and_voltage_limits max_clock_voltage_on_ac;
- u32 mclk_sclk_ratio;
- u32 sclk_mclk_delta;
- u16 vddc_vddci_delta;
- u16 min_vddc_for_pcie_gen2;
- struct amdgpu_cac_leakage_table cac_leakage_table;
- struct amdgpu_phase_shedding_limits_table phase_shedding_limits_table;
- struct amdgpu_ppm_table *ppm_table;
- struct amdgpu_cac_tdp_table *cac_tdp_table;
-};
-
-struct amdgpu_dpm_fan {
- u16 t_min;
- u16 t_med;
- u16 t_high;
- u16 pwm_min;
- u16 pwm_med;
- u16 pwm_high;
- u8 t_hyst;
- u32 cycle_delay;
- u16 t_max;
- u8 control_mode;
- u16 default_max_fan_pwm;
- u16 default_fan_output_sensitivity;
- u16 fan_output_sensitivity;
- bool ucode_fan_control;
-};
-
-enum amdgpu_pcie_gen {
- AMDGPU_PCIE_GEN1 = 0,
- AMDGPU_PCIE_GEN2 = 1,
- AMDGPU_PCIE_GEN3 = 2,
- AMDGPU_PCIE_GEN_INVALID = 0xffff
-};
-
-enum amdgpu_dpm_forced_level {
- AMDGPU_DPM_FORCED_LEVEL_AUTO = 0,
- AMDGPU_DPM_FORCED_LEVEL_LOW = 1,
- AMDGPU_DPM_FORCED_LEVEL_HIGH = 2,
- AMDGPU_DPM_FORCED_LEVEL_MANUAL = 3,
-};
-
-struct amdgpu_vce_state {
- /* vce clocks */
- u32 evclk;
- u32 ecclk;
- /* gpu clocks */
- u32 sclk;
- u32 mclk;
- u8 clk_idx;
- u8 pstate;
-};
-
-struct amdgpu_dpm_funcs {
- int (*get_temperature)(struct amdgpu_device *adev);
- int (*pre_set_power_state)(struct amdgpu_device *adev);
- int (*set_power_state)(struct amdgpu_device *adev);
- void (*post_set_power_state)(struct amdgpu_device *adev);
- void (*display_configuration_changed)(struct amdgpu_device *adev);
- u32 (*get_sclk)(struct amdgpu_device *adev, bool low);
- u32 (*get_mclk)(struct amdgpu_device *adev, bool low);
- void (*print_power_state)(struct amdgpu_device *adev, struct amdgpu_ps *ps);
- void (*debugfs_print_current_performance_level)(struct amdgpu_device *adev, struct seq_file *m);
- int (*force_performance_level)(struct amdgpu_device *adev, enum amdgpu_dpm_forced_level level);
- bool (*vblank_too_short)(struct amdgpu_device *adev);
- void (*powergate_uvd)(struct amdgpu_device *adev, bool gate);
- void (*powergate_vce)(struct amdgpu_device *adev, bool gate);
- void (*enable_bapm)(struct amdgpu_device *adev, bool enable);
- void (*set_fan_control_mode)(struct amdgpu_device *adev, u32 mode);
- u32 (*get_fan_control_mode)(struct amdgpu_device *adev);
- int (*set_fan_speed_percent)(struct amdgpu_device *adev, u32 speed);
- int (*get_fan_speed_percent)(struct amdgpu_device *adev, u32 *speed);
- int (*force_clock_level)(struct amdgpu_device *adev, enum pp_clock_type type, uint32_t mask);
- int (*print_clock_levels)(struct amdgpu_device *adev, enum pp_clock_type type, char *buf);
- int (*get_sclk_od)(struct amdgpu_device *adev);
- int (*set_sclk_od)(struct amdgpu_device *adev, uint32_t value);
- int (*get_mclk_od)(struct amdgpu_device *adev);
- int (*set_mclk_od)(struct amdgpu_device *adev, uint32_t value);
-};
-
-struct amdgpu_dpm {
- struct amdgpu_ps *ps;
- /* number of valid power states */
- int num_ps;
- /* current power state that is active */
- struct amdgpu_ps *current_ps;
- /* requested power state */
- struct amdgpu_ps *requested_ps;
- /* boot up power state */
- struct amdgpu_ps *boot_ps;
- /* default uvd power state */
- struct amdgpu_ps *uvd_ps;
- /* vce requirements */
- struct amdgpu_vce_state vce_states[AMDGPU_MAX_VCE_LEVELS];
- enum amdgpu_vce_level vce_level;
- enum amd_pm_state_type state;
- enum amd_pm_state_type user_state;
- u32 platform_caps;
- u32 voltage_response_time;
- u32 backbias_response_time;
- void *priv;
- u32 new_active_crtcs;
- int new_active_crtc_count;
- u32 current_active_crtcs;
- int current_active_crtc_count;
- struct amdgpu_dpm_dynamic_state dyn_state;
- struct amdgpu_dpm_fan fan;
- u32 tdp_limit;
- u32 near_tdp_limit;
- u32 near_tdp_limit_adjusted;
- u32 sq_ramping_threshold;
- u32 cac_leakage;
- u16 tdp_od_limit;
- u32 tdp_adjustment;
- u16 load_line_slope;
- bool power_control;
- bool ac_power;
- /* special states active */
- bool thermal_active;
- bool uvd_active;
- bool vce_active;
- /* thermal handling */
- struct amdgpu_dpm_thermal thermal;
- /* forced levels */
- enum amdgpu_dpm_forced_level forced_level;
-};
-
-struct amdgpu_pm {
- struct mutex mutex;
- u32 current_sclk;
- u32 current_mclk;
- u32 default_sclk;
- u32 default_mclk;
- struct amdgpu_i2c_chan *i2c_bus;
- /* internal thermal controller on rv6xx+ */
- enum amdgpu_int_thermal_type int_thermal_type;
- struct device *int_hwmon_dev;
- /* fan control parameters */
- bool no_fan;
- u8 fan_pulses_per_revolution;
- u8 fan_min_rpm;
- u8 fan_max_rpm;
- /* dpm */
- bool dpm_enabled;
- bool sysfs_initialized;
- struct amdgpu_dpm dpm;
- const struct firmware *fw; /* SMC firmware */
- uint32_t fw_version;
- const struct amdgpu_dpm_funcs *funcs;
- uint32_t pcie_gen_mask;
- uint32_t pcie_mlw_mask;
- struct amd_pp_display_configuration pm_display_cfg;/* set by DAL */
-};
-
void amdgpu_get_pcie_info(struct amdgpu_device *adev);
/*
@@ -1862,6 +1213,8 @@ int amdgpu_gem_op_ioctl(struct drm_device *dev, void *data,
struct drm_file *filp);
int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp);
int amdgpu_cs_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *filp);
+int amdgpu_cs_wait_fences_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *filp);
int amdgpu_gem_metadata_ioctl(struct drm_device *dev, void *data,
struct drm_file *filp);
@@ -1939,14 +1292,6 @@ typedef void (*amdgpu_wreg_t)(struct amdgpu_device*, uint32_t, uint32_t);
typedef uint32_t (*amdgpu_block_rreg_t)(struct amdgpu_device*, uint32_t, uint32_t);
typedef void (*amdgpu_block_wreg_t)(struct amdgpu_device*, uint32_t, uint32_t, uint32_t);
-struct amdgpu_ip_block_status {
- bool valid;
- bool sw;
- bool hw;
- bool late_initialized;
- bool hang;
-};
-
struct amdgpu_device {
struct device *dev;
struct drm_device *ddev;
@@ -2102,9 +1447,8 @@ struct amdgpu_device {
/* GDS */
struct amdgpu_gds gds;
- const struct amdgpu_ip_block_version *ip_blocks;
+ struct amdgpu_ip_block ip_blocks[AMDGPU_MAX_IP_NUM];
int num_ip_blocks;
- struct amdgpu_ip_block_status *ip_block_status;
struct mutex mn_lock;
DECLARE_HASHTABLE(mn_hash, 7);
@@ -2127,6 +1471,11 @@ struct amdgpu_device {
};
+static inline struct amdgpu_device *amdgpu_ttm_adev(struct ttm_bo_device *bdev)
+{
+ return container_of(bdev, struct amdgpu_device, mman.bdev);
+}
+
bool amdgpu_device_is_px(struct drm_device *dev);
int amdgpu_device_init(struct amdgpu_device *adev,
struct drm_device *ddev,
@@ -2278,8 +1627,6 @@ amdgpu_get_sdma_instance(struct amdgpu_ring *ring)
#define amdgpu_ring_pad_ib(r, ib) ((r)->funcs->pad_ib((r), (ib)))
#define amdgpu_ring_init_cond_exec(r) (r)->funcs->init_cond_exec((r))
#define amdgpu_ring_patch_cond_exec(r,o) (r)->funcs->patch_cond_exec((r),(o))
-#define amdgpu_ring_get_emit_ib_size(r) (r)->funcs->get_emit_ib_size((r))
-#define amdgpu_ring_get_dma_frame_size(r) (r)->funcs->get_dma_frame_size((r))
#define amdgpu_ih_get_wptr(adev) (adev)->irq.ih_funcs->get_wptr((adev))
#define amdgpu_ih_decode_iv(adev, iv) (adev)->irq.ih_funcs->decode_iv((adev), (iv))
#define amdgpu_ih_set_rptr(adev) (adev)->irq.ih_funcs->set_rptr((adev))
@@ -2301,108 +1648,8 @@ amdgpu_get_sdma_instance(struct amdgpu_ring *ring)
#define amdgpu_display_resume_mc_access(adev, s) (adev)->mode_info.funcs->resume_mc_access((adev), (s))
#define amdgpu_emit_copy_buffer(adev, ib, s, d, b) (adev)->mman.buffer_funcs->emit_copy_buffer((ib), (s), (d), (b))
#define amdgpu_emit_fill_buffer(adev, ib, s, d, b) (adev)->mman.buffer_funcs->emit_fill_buffer((ib), (s), (d), (b))
-#define amdgpu_dpm_pre_set_power_state(adev) (adev)->pm.funcs->pre_set_power_state((adev))
-#define amdgpu_dpm_set_power_state(adev) (adev)->pm.funcs->set_power_state((adev))
-#define amdgpu_dpm_post_set_power_state(adev) (adev)->pm.funcs->post_set_power_state((adev))
-#define amdgpu_dpm_display_configuration_changed(adev) (adev)->pm.funcs->display_configuration_changed((adev))
-#define amdgpu_dpm_print_power_state(adev, ps) (adev)->pm.funcs->print_power_state((adev), (ps))
-#define amdgpu_dpm_vblank_too_short(adev) (adev)->pm.funcs->vblank_too_short((adev))
-#define amdgpu_dpm_enable_bapm(adev, e) (adev)->pm.funcs->enable_bapm((adev), (e))
#define amdgpu_gfx_get_gpu_clock_counter(adev) (adev)->gfx.funcs->get_gpu_clock_counter((adev))
#define amdgpu_gfx_select_se_sh(adev, se, sh, instance) (adev)->gfx.funcs->select_se_sh((adev), (se), (sh), (instance))
-
-#define amdgpu_dpm_read_sensor(adev, idx, value) \
- ((adev)->pp_enabled ? \
- (adev)->powerplay.pp_funcs->read_sensor(adev->powerplay.pp_handle, (idx), (value)) : \
- -EINVAL)
-
-#define amdgpu_dpm_get_temperature(adev) \
- ((adev)->pp_enabled ? \
- (adev)->powerplay.pp_funcs->get_temperature((adev)->powerplay.pp_handle) : \
- (adev)->pm.funcs->get_temperature((adev)))
-
-#define amdgpu_dpm_set_fan_control_mode(adev, m) \
- ((adev)->pp_enabled ? \
- (adev)->powerplay.pp_funcs->set_fan_control_mode((adev)->powerplay.pp_handle, (m)) : \
- (adev)->pm.funcs->set_fan_control_mode((adev), (m)))
-
-#define amdgpu_dpm_get_fan_control_mode(adev) \
- ((adev)->pp_enabled ? \
- (adev)->powerplay.pp_funcs->get_fan_control_mode((adev)->powerplay.pp_handle) : \
- (adev)->pm.funcs->get_fan_control_mode((adev)))
-
-#define amdgpu_dpm_set_fan_speed_percent(adev, s) \
- ((adev)->pp_enabled ? \
- (adev)->powerplay.pp_funcs->set_fan_speed_percent((adev)->powerplay.pp_handle, (s)) : \
- (adev)->pm.funcs->set_fan_speed_percent((adev), (s)))
-
-#define amdgpu_dpm_get_fan_speed_percent(adev, s) \
- ((adev)->pp_enabled ? \
- (adev)->powerplay.pp_funcs->get_fan_speed_percent((adev)->powerplay.pp_handle, (s)) : \
- (adev)->pm.funcs->get_fan_speed_percent((adev), (s)))
-
-#define amdgpu_dpm_get_sclk(adev, l) \
- ((adev)->pp_enabled ? \
- (adev)->powerplay.pp_funcs->get_sclk((adev)->powerplay.pp_handle, (l)) : \
- (adev)->pm.funcs->get_sclk((adev), (l)))
-
-#define amdgpu_dpm_get_mclk(adev, l) \
- ((adev)->pp_enabled ? \
- (adev)->powerplay.pp_funcs->get_mclk((adev)->powerplay.pp_handle, (l)) : \
- (adev)->pm.funcs->get_mclk((adev), (l)))
-
-
-#define amdgpu_dpm_force_performance_level(adev, l) \
- ((adev)->pp_enabled ? \
- (adev)->powerplay.pp_funcs->force_performance_level((adev)->powerplay.pp_handle, (l)) : \
- (adev)->pm.funcs->force_performance_level((adev), (l)))
-
-#define amdgpu_dpm_powergate_uvd(adev, g) \
- ((adev)->pp_enabled ? \
- (adev)->powerplay.pp_funcs->powergate_uvd((adev)->powerplay.pp_handle, (g)) : \
- (adev)->pm.funcs->powergate_uvd((adev), (g)))
-
-#define amdgpu_dpm_powergate_vce(adev, g) \
- ((adev)->pp_enabled ? \
- (adev)->powerplay.pp_funcs->powergate_vce((adev)->powerplay.pp_handle, (g)) : \
- (adev)->pm.funcs->powergate_vce((adev), (g)))
-
-#define amdgpu_dpm_get_current_power_state(adev) \
- (adev)->powerplay.pp_funcs->get_current_power_state((adev)->powerplay.pp_handle)
-
-#define amdgpu_dpm_get_performance_level(adev) \
- (adev)->powerplay.pp_funcs->get_performance_level((adev)->powerplay.pp_handle)
-
-#define amdgpu_dpm_get_pp_num_states(adev, data) \
- (adev)->powerplay.pp_funcs->get_pp_num_states((adev)->powerplay.pp_handle, data)
-
-#define amdgpu_dpm_get_pp_table(adev, table) \
- (adev)->powerplay.pp_funcs->get_pp_table((adev)->powerplay.pp_handle, table)
-
-#define amdgpu_dpm_set_pp_table(adev, buf, size) \
- (adev)->powerplay.pp_funcs->set_pp_table((adev)->powerplay.pp_handle, buf, size)
-
-#define amdgpu_dpm_print_clock_levels(adev, type, buf) \
- (adev)->powerplay.pp_funcs->print_clock_levels((adev)->powerplay.pp_handle, type, buf)
-
-#define amdgpu_dpm_force_clock_level(adev, type, level) \
- (adev)->powerplay.pp_funcs->force_clock_level((adev)->powerplay.pp_handle, type, level)
-
-#define amdgpu_dpm_get_sclk_od(adev) \
- (adev)->powerplay.pp_funcs->get_sclk_od((adev)->powerplay.pp_handle)
-
-#define amdgpu_dpm_set_sclk_od(adev, value) \
- (adev)->powerplay.pp_funcs->set_sclk_od((adev)->powerplay.pp_handle, value)
-
-#define amdgpu_dpm_get_mclk_od(adev) \
- ((adev)->powerplay.pp_funcs->get_mclk_od((adev)->powerplay.pp_handle))
-
-#define amdgpu_dpm_set_mclk_od(adev, value) \
- ((adev)->powerplay.pp_funcs->set_mclk_od((adev)->powerplay.pp_handle, value))
-
-#define amdgpu_dpm_dispatch_task(adev, event_id, input, output) \
- (adev)->powerplay.pp_funcs->dispatch_tasks((adev)->powerplay.pp_handle, (event_id), (input), (output))
-
#define amdgpu_gds_switch(adev, r, v, d, w, a) (adev)->gds.funcs->patch_gds_switch((r), (v), (d), (w), (a))
/* Common functions */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c
index 892d60fb225b..06879d1dcabd 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c
@@ -265,14 +265,14 @@ static int acp_hw_init(void *handle)
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- const struct amdgpu_ip_block_version *ip_version =
+ const struct amdgpu_ip_block *ip_block =
amdgpu_get_ip_block(adev, AMD_IP_BLOCK_TYPE_ACP);
- if (!ip_version)
+ if (!ip_block)
return -EINVAL;
r = amd_acp_hw_init(adev->acp.cgs_device,
- ip_version->major, ip_version->minor);
+ ip_block->version->major, ip_block->version->minor);
/* -ENODEV means board uses AZ rather than ACP */
if (r == -ENODEV)
return 0;
@@ -395,9 +395,12 @@ static int acp_hw_fini(void *handle)
{
int i, ret;
struct device *dev;
-
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ /* return early if no ACP */
+ if (!adev->acp.acp_genpd)
+ return 0;
+
for (i = 0; i < ACP_DEVS ; i++) {
dev = get_mfd_cell_dev(adev->acp.acp_cell[i].name, i);
ret = pm_genpd_remove_device(&adev->acp.acp_genpd->gpd, dev);
@@ -456,7 +459,7 @@ static int acp_set_powergating_state(void *handle,
return 0;
}
-const struct amd_ip_funcs acp_ip_funcs = {
+static const struct amd_ip_funcs acp_ip_funcs = {
.name = "acp_ip",
.early_init = acp_early_init,
.late_init = NULL,
@@ -472,3 +475,12 @@ const struct amd_ip_funcs acp_ip_funcs = {
.set_clockgating_state = acp_set_clockgating_state,
.set_powergating_state = acp_set_powergating_state,
};
+
+const struct amdgpu_ip_block_version acp_ip_block =
+{
+ .type = AMD_IP_BLOCK_TYPE_ACP,
+ .major = 2,
+ .minor = 2,
+ .rev = 0,
+ .funcs = &acp_ip_funcs,
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.h
index 8a396313c86f..a288ce25c176 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.h
@@ -37,6 +37,6 @@ struct amdgpu_acp {
struct acp_pm_domain *acp_genpd;
};
-extern const struct amd_ip_funcs acp_ip_funcs;
+extern const struct amdgpu_ip_block_version acp_ip_block;
#endif /* __AMDGPU_ACP_H__ */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c
index 8e6bf548d689..56a86dd5789e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c
@@ -1115,49 +1115,6 @@ int amdgpu_atombios_get_memory_pll_dividers(struct amdgpu_device *adev,
return 0;
}
-uint32_t amdgpu_atombios_get_engine_clock(struct amdgpu_device *adev)
-{
- GET_ENGINE_CLOCK_PS_ALLOCATION args;
- int index = GetIndexIntoMasterTable(COMMAND, GetEngineClock);
-
- amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args);
- return le32_to_cpu(args.ulReturnEngineClock);
-}
-
-uint32_t amdgpu_atombios_get_memory_clock(struct amdgpu_device *adev)
-{
- GET_MEMORY_CLOCK_PS_ALLOCATION args;
- int index = GetIndexIntoMasterTable(COMMAND, GetMemoryClock);
-
- amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args);
- return le32_to_cpu(args.ulReturnMemoryClock);
-}
-
-void amdgpu_atombios_set_engine_clock(struct amdgpu_device *adev,
- uint32_t eng_clock)
-{
- SET_ENGINE_CLOCK_PS_ALLOCATION args;
- int index = GetIndexIntoMasterTable(COMMAND, SetEngineClock);
-
- args.ulTargetEngineClock = cpu_to_le32(eng_clock); /* 10 khz */
-
- amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args);
-}
-
-void amdgpu_atombios_set_memory_clock(struct amdgpu_device *adev,
- uint32_t mem_clock)
-{
- SET_MEMORY_CLOCK_PS_ALLOCATION args;
- int index = GetIndexIntoMasterTable(COMMAND, SetMemoryClock);
-
- if (adev->flags & AMD_IS_APU)
- return;
-
- args.ulTargetMemoryClock = cpu_to_le32(mem_clock); /* 10 khz */
-
- amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args);
-}
-
void amdgpu_atombios_set_engine_dram_timings(struct amdgpu_device *adev,
u32 eng_clock, u32 mem_clock)
{
@@ -1256,45 +1213,6 @@ int amdgpu_atombios_get_leakage_vddc_based_on_leakage_idx(struct amdgpu_device *
return amdgpu_atombios_get_max_vddc(adev, VOLTAGE_TYPE_VDDC, leakage_idx, voltage);
}
-void amdgpu_atombios_set_voltage(struct amdgpu_device *adev,
- u16 voltage_level,
- u8 voltage_type)
-{
- union set_voltage args;
- int index = GetIndexIntoMasterTable(COMMAND, SetVoltage);
- u8 frev, crev, volt_index = voltage_level;
-
- if (!amdgpu_atom_parse_cmd_header(adev->mode_info.atom_context, index, &frev, &crev))
- return;
-
- /* 0xff01 is a flag rather then an actual voltage */
- if (voltage_level == 0xff01)
- return;
-
- switch (crev) {
- case 1:
- args.v1.ucVoltageType = voltage_type;
- args.v1.ucVoltageMode = SET_ASIC_VOLTAGE_MODE_ALL_SOURCE;
- args.v1.ucVoltageIndex = volt_index;
- break;
- case 2:
- args.v2.ucVoltageType = voltage_type;
- args.v2.ucVoltageMode = SET_ASIC_VOLTAGE_MODE_SET_VOLTAGE;
- args.v2.usVoltageLevel = cpu_to_le16(voltage_level);
- break;
- case 3:
- args.v3.ucVoltageType = voltage_type;
- args.v3.ucVoltageMode = ATOM_SET_VOLTAGE;
- args.v3.usVoltageLevel = cpu_to_le16(voltage_level);
- break;
- default:
- DRM_ERROR("Unknown table version %d, %d\n", frev, crev);
- return;
- }
-
- amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args);
-}
-
int amdgpu_atombios_get_leakage_id_from_vbios(struct amdgpu_device *adev,
u16 *leakage_id)
{
@@ -1784,6 +1702,19 @@ void amdgpu_atombios_scratch_regs_restore(struct amdgpu_device *adev)
WREG32(mmBIOS_SCRATCH_0 + i, adev->bios_scratch[i]);
}
+void amdgpu_atombios_scratch_regs_engine_hung(struct amdgpu_device *adev,
+ bool hung)
+{
+ u32 tmp = RREG32(mmBIOS_SCRATCH_3);
+
+ if (hung)
+ tmp |= ATOM_S3_ASIC_GUI_ENGINE_HUNG;
+ else
+ tmp &= ~ATOM_S3_ASIC_GUI_ENGINE_HUNG;
+
+ WREG32(mmBIOS_SCRATCH_3, tmp);
+}
+
/* Atom needs data in little endian format
* so swap as appropriate when copying data to
* or from atom. Note that atom operates on
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.h
index 17356151db38..70e9acef5d9c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.h
@@ -163,16 +163,6 @@ int amdgpu_atombios_get_memory_pll_dividers(struct amdgpu_device *adev,
bool strobe_mode,
struct atom_mpll_param *mpll_param);
-uint32_t amdgpu_atombios_get_engine_clock(struct amdgpu_device *adev);
-uint32_t amdgpu_atombios_get_memory_clock(struct amdgpu_device *adev);
-void amdgpu_atombios_set_engine_clock(struct amdgpu_device *adev,
- uint32_t eng_clock);
-void amdgpu_atombios_set_memory_clock(struct amdgpu_device *adev,
- uint32_t mem_clock);
-void amdgpu_atombios_set_voltage(struct amdgpu_device *adev,
- u16 voltage_level,
- u8 voltage_type);
-
void amdgpu_atombios_set_engine_dram_timings(struct amdgpu_device *adev,
u32 eng_clock, u32 mem_clock);
@@ -206,6 +196,8 @@ void amdgpu_atombios_scratch_regs_lock(struct amdgpu_device *adev, bool lock);
void amdgpu_atombios_scratch_regs_init(struct amdgpu_device *adev);
void amdgpu_atombios_scratch_regs_save(struct amdgpu_device *adev);
void amdgpu_atombios_scratch_regs_restore(struct amdgpu_device *adev);
+void amdgpu_atombios_scratch_regs_engine_hung(struct amdgpu_device *adev,
+ bool hung);
void amdgpu_atombios_copy_swap(u8 *dst, u8 *src, u8 num_bytes, bool to_le);
int amdgpu_atombios_get_max_vddc(struct amdgpu_device *adev, u8 voltage_type,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
index dae35a96a694..6c343a933182 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
@@ -34,6 +34,7 @@ struct amdgpu_atpx {
static struct amdgpu_atpx_priv {
bool atpx_detected;
+ bool bridge_pm_usable;
/* handle for device - and atpx */
acpi_handle dhandle;
acpi_handle other_handle;
@@ -205,7 +206,11 @@ static int amdgpu_atpx_validate(struct amdgpu_atpx *atpx)
atpx->is_hybrid = false;
if (valid_bits & ATPX_MS_HYBRID_GFX_SUPPORTED) {
printk("ATPX Hybrid Graphics\n");
- atpx->functions.power_cntl = false;
+ /*
+ * Disable legacy PM methods only when pcie port PM is usable,
+ * otherwise the device might fail to power off or power on.
+ */
+ atpx->functions.power_cntl = !amdgpu_atpx_priv.bridge_pm_usable;
atpx->is_hybrid = true;
}
@@ -555,17 +560,25 @@ static bool amdgpu_atpx_detect(void)
struct pci_dev *pdev = NULL;
bool has_atpx = false;
int vga_count = 0;
+ bool d3_supported = false;
+ struct pci_dev *parent_pdev;
while ((pdev = pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, pdev)) != NULL) {
vga_count++;
has_atpx |= (amdgpu_atpx_pci_probe_handle(pdev) == true);
+
+ parent_pdev = pci_upstream_bridge(pdev);
+ d3_supported |= parent_pdev && parent_pdev->bridge_d3;
}
while ((pdev = pci_get_class(PCI_CLASS_DISPLAY_OTHER << 8, pdev)) != NULL) {
vga_count++;
has_atpx |= (amdgpu_atpx_pci_probe_handle(pdev) == true);
+
+ parent_pdev = pci_upstream_bridge(pdev);
+ d3_supported |= parent_pdev && parent_pdev->bridge_d3;
}
if (has_atpx && vga_count == 2) {
@@ -573,6 +586,7 @@ static bool amdgpu_atpx_detect(void)
printk(KERN_INFO "vga_switcheroo: detected switching method %s handle\n",
acpi_method_name);
amdgpu_atpx_priv.atpx_detected = true;
+ amdgpu_atpx_priv.bridge_pm_usable = d3_supported;
amdgpu_atpx_init();
return true;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c
index 345305235349..cc97eee93226 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c
@@ -33,7 +33,7 @@ static int amdgpu_benchmark_do_move(struct amdgpu_device *adev, unsigned size,
{
unsigned long start_jiffies;
unsigned long end_jiffies;
- struct fence *fence = NULL;
+ struct dma_fence *fence = NULL;
int i, r;
start_jiffies = jiffies;
@@ -43,17 +43,17 @@ static int amdgpu_benchmark_do_move(struct amdgpu_device *adev, unsigned size,
false);
if (r)
goto exit_do_move;
- r = fence_wait(fence, false);
+ r = dma_fence_wait(fence, false);
if (r)
goto exit_do_move;
- fence_put(fence);
+ dma_fence_put(fence);
}
end_jiffies = jiffies;
r = jiffies_to_msecs(end_jiffies - start_jiffies);
exit_do_move:
if (fence)
- fence_put(fence);
+ dma_fence_put(fence);
return r;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c
index 651115dcce12..c02db01f6583 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c
@@ -132,7 +132,7 @@ static int amdgpu_bo_list_set(struct amdgpu_device *adev,
entry->priority = min(info[i].bo_priority,
AMDGPU_BO_LIST_MAX_PRIORITY);
entry->tv.bo = &entry->robj->tbo;
- entry->tv.shared = true;
+ entry->tv.shared = !entry->robj->prime_shared_count;
if (entry->robj->prefered_domains == AMDGPU_GEM_DOMAIN_GDS)
gds_obj = entry->robj;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
index 7a8bfa34682f..7ded61e6dd81 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
@@ -146,7 +146,8 @@ static int amdgpu_cgs_alloc_gpu_mem(struct cgs_device *cgs_device,
switch(type) {
case CGS_GPU_MEM_TYPE__VISIBLE_CONTIG_FB:
case CGS_GPU_MEM_TYPE__VISIBLE_FB:
- flags = AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
+ flags = AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
+ AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
domain = AMDGPU_GEM_DOMAIN_VRAM;
if (max_offset > adev->mc.real_vram_size)
return -EINVAL;
@@ -157,7 +158,8 @@ static int amdgpu_cgs_alloc_gpu_mem(struct cgs_device *cgs_device,
break;
case CGS_GPU_MEM_TYPE__INVISIBLE_CONTIG_FB:
case CGS_GPU_MEM_TYPE__INVISIBLE_FB:
- flags = AMDGPU_GEM_CREATE_NO_CPU_ACCESS;
+ flags = AMDGPU_GEM_CREATE_NO_CPU_ACCESS |
+ AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
domain = AMDGPU_GEM_DOMAIN_VRAM;
if (adev->mc.visible_vram_size < adev->mc.real_vram_size) {
place.fpfn =
@@ -240,7 +242,7 @@ static int amdgpu_cgs_gmap_gpu_mem(struct cgs_device *cgs_device, cgs_handle_t h
r = amdgpu_bo_reserve(obj, false);
if (unlikely(r != 0))
return r;
- r = amdgpu_bo_pin_restricted(obj, AMDGPU_GEM_DOMAIN_GTT,
+ r = amdgpu_bo_pin_restricted(obj, obj->prefered_domains,
min_offset, max_offset, mcaddr);
amdgpu_bo_unreserve(obj);
return r;
@@ -624,11 +626,11 @@ static int amdgpu_cgs_set_clockgating_state(struct cgs_device *cgs_device,
int i, r = -1;
for (i = 0; i < adev->num_ip_blocks; i++) {
- if (!adev->ip_block_status[i].valid)
+ if (!adev->ip_blocks[i].status.valid)
continue;
- if (adev->ip_blocks[i].type == block_type) {
- r = adev->ip_blocks[i].funcs->set_clockgating_state(
+ if (adev->ip_blocks[i].version->type == block_type) {
+ r = adev->ip_blocks[i].version->funcs->set_clockgating_state(
(void *)adev,
state);
break;
@@ -645,11 +647,11 @@ static int amdgpu_cgs_set_powergating_state(struct cgs_device *cgs_device,
int i, r = -1;
for (i = 0; i < adev->num_ip_blocks; i++) {
- if (!adev->ip_block_status[i].valid)
+ if (!adev->ip_blocks[i].status.valid)
continue;
- if (adev->ip_blocks[i].type == block_type) {
- r = adev->ip_blocks[i].funcs->set_powergating_state(
+ if (adev->ip_blocks[i].version->type == block_type) {
+ r = adev->ip_blocks[i].version->funcs->set_powergating_state(
(void *)adev,
state);
break;
@@ -685,15 +687,21 @@ static uint32_t fw_type_convert(struct cgs_device *cgs_device, uint32_t fw_type)
result = AMDGPU_UCODE_ID_CP_MEC1;
break;
case CGS_UCODE_ID_CP_MEC_JT2:
- if (adev->asic_type == CHIP_TONGA || adev->asic_type == CHIP_POLARIS11
- || adev->asic_type == CHIP_POLARIS10)
- result = AMDGPU_UCODE_ID_CP_MEC2;
- else
+ /* for VI. JT2 should be the same as JT1, because:
+ 1, MEC2 and MEC1 use exactly same FW.
+ 2, JT2 is not pached but JT1 is.
+ */
+ if (adev->asic_type >= CHIP_TOPAZ)
result = AMDGPU_UCODE_ID_CP_MEC1;
+ else
+ result = AMDGPU_UCODE_ID_CP_MEC2;
break;
case CGS_UCODE_ID_RLC_G:
result = AMDGPU_UCODE_ID_RLC_G;
break;
+ case CGS_UCODE_ID_STORAGE:
+ result = AMDGPU_UCODE_ID_STORAGE;
+ break;
default:
DRM_ERROR("Firmware type not supported\n");
}
@@ -776,12 +784,18 @@ static int amdgpu_cgs_get_firmware_info(struct cgs_device *cgs_device,
if ((type == CGS_UCODE_ID_CP_MEC_JT1) ||
(type == CGS_UCODE_ID_CP_MEC_JT2)) {
- gpu_addr += le32_to_cpu(header->jt_offset) << 2;
+ gpu_addr += ALIGN(le32_to_cpu(header->header.ucode_size_bytes), PAGE_SIZE);
data_size = le32_to_cpu(header->jt_size) << 2;
}
- info->mc_addr = gpu_addr;
+
+ info->kptr = ucode->kaddr;
info->image_size = data_size;
+ info->mc_addr = gpu_addr;
info->version = (uint16_t)le32_to_cpu(header->header.ucode_version);
+
+ if (CGS_UCODE_ID_CP_MEC == type)
+ info->image_size = (header->jt_offset) << 2;
+
info->fw_version = amdgpu_get_firmware_version(cgs_device, type);
info->feature_version = (uint16_t)le32_to_cpu(header->ucode_feature_version);
} else {
@@ -795,10 +809,19 @@ static int amdgpu_cgs_get_firmware_info(struct cgs_device *cgs_device,
if (!adev->pm.fw) {
switch (adev->asic_type) {
case CHIP_TOPAZ:
- strcpy(fw_name, "amdgpu/topaz_smc.bin");
+ if (((adev->pdev->device == 0x6900) && (adev->pdev->revision == 0x81)) ||
+ ((adev->pdev->device == 0x6900) && (adev->pdev->revision == 0x83)) ||
+ ((adev->pdev->device == 0x6907) && (adev->pdev->revision == 0x87)))
+ strcpy(fw_name, "amdgpu/topaz_k_smc.bin");
+ else
+ strcpy(fw_name, "amdgpu/topaz_smc.bin");
break;
case CHIP_TONGA:
- strcpy(fw_name, "amdgpu/tonga_smc.bin");
+ if (((adev->pdev->device == 0x6939) && (adev->pdev->revision == 0xf1)) ||
+ ((adev->pdev->device == 0x6938) && (adev->pdev->revision == 0xf1)))
+ strcpy(fw_name, "amdgpu/tonga_k_smc.bin");
+ else
+ strcpy(fw_name, "amdgpu/tonga_smc.bin");
break;
case CHIP_FIJI:
strcpy(fw_name, "amdgpu/fiji_smc.bin");
@@ -851,6 +874,12 @@ static int amdgpu_cgs_get_firmware_info(struct cgs_device *cgs_device,
return 0;
}
+static int amdgpu_cgs_is_virtualization_enabled(void *cgs_device)
+{
+ CGS_FUNC_ADEV;
+ return amdgpu_sriov_vf(adev);
+}
+
static int amdgpu_cgs_query_system_info(struct cgs_device *cgs_device,
struct cgs_system_info *sys_info)
{
@@ -1204,6 +1233,7 @@ static const struct cgs_ops amdgpu_cgs_ops = {
amdgpu_cgs_notify_dpm_enabled,
amdgpu_cgs_call_acpi_method,
amdgpu_cgs_query_system_info,
+ amdgpu_cgs_is_virtualization_enabled
};
static const struct cgs_os_ops amdgpu_cgs_os_ops = {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
index e3281d4e3e41..8d1cf2d3e663 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
@@ -769,7 +769,7 @@ static void amdgpu_connector_unregister(struct drm_connector *connector)
{
struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
- if (amdgpu_connector->ddc_bus->has_aux) {
+ if (amdgpu_connector->ddc_bus && amdgpu_connector->ddc_bus->has_aux) {
drm_dp_aux_unregister(&amdgpu_connector->ddc_bus->aux);
amdgpu_connector->ddc_bus->has_aux = false;
}
@@ -1517,88 +1517,6 @@ static const struct drm_connector_funcs amdgpu_connector_edp_funcs = {
.force = amdgpu_connector_dvi_force,
};
-static struct drm_encoder *
-amdgpu_connector_virtual_encoder(struct drm_connector *connector)
-{
- int enc_id = connector->encoder_ids[0];
- struct drm_encoder *encoder;
- int i;
- for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
- if (connector->encoder_ids[i] == 0)
- break;
-
- encoder = drm_encoder_find(connector->dev, connector->encoder_ids[i]);
- if (!encoder)
- continue;
-
- if (encoder->encoder_type == DRM_MODE_ENCODER_VIRTUAL)
- return encoder;
- }
-
- /* pick the first one */
- if (enc_id)
- return drm_encoder_find(connector->dev, enc_id);
- return NULL;
-}
-
-static int amdgpu_connector_virtual_get_modes(struct drm_connector *connector)
-{
- struct drm_encoder *encoder = amdgpu_connector_best_single_encoder(connector);
-
- if (encoder) {
- amdgpu_connector_add_common_modes(encoder, connector);
- }
-
- return 0;
-}
-
-static int amdgpu_connector_virtual_mode_valid(struct drm_connector *connector,
- struct drm_display_mode *mode)
-{
- return MODE_OK;
-}
-
-static int
-amdgpu_connector_virtual_dpms(struct drm_connector *connector, int mode)
-{
- return 0;
-}
-
-static enum drm_connector_status
-
-amdgpu_connector_virtual_detect(struct drm_connector *connector, bool force)
-{
- return connector_status_connected;
-}
-
-static int
-amdgpu_connector_virtual_set_property(struct drm_connector *connector,
- struct drm_property *property,
- uint64_t val)
-{
- return 0;
-}
-
-static void amdgpu_connector_virtual_force(struct drm_connector *connector)
-{
- return;
-}
-
-static const struct drm_connector_helper_funcs amdgpu_connector_virtual_helper_funcs = {
- .get_modes = amdgpu_connector_virtual_get_modes,
- .mode_valid = amdgpu_connector_virtual_mode_valid,
- .best_encoder = amdgpu_connector_virtual_encoder,
-};
-
-static const struct drm_connector_funcs amdgpu_connector_virtual_funcs = {
- .dpms = amdgpu_connector_virtual_dpms,
- .detect = amdgpu_connector_virtual_detect,
- .fill_modes = drm_helper_probe_single_connector_modes,
- .set_property = amdgpu_connector_virtual_set_property,
- .destroy = amdgpu_connector_destroy,
- .force = amdgpu_connector_virtual_force,
-};
-
void
amdgpu_connector_add(struct amdgpu_device *adev,
uint32_t connector_id,
@@ -1983,17 +1901,6 @@ amdgpu_connector_add(struct amdgpu_device *adev,
connector->interlace_allowed = false;
connector->doublescan_allowed = false;
break;
- case DRM_MODE_CONNECTOR_VIRTUAL:
- amdgpu_dig_connector = kzalloc(sizeof(struct amdgpu_connector_atom_dig), GFP_KERNEL);
- if (!amdgpu_dig_connector)
- goto failed;
- amdgpu_connector->con_priv = amdgpu_dig_connector;
- drm_connector_init(dev, &amdgpu_connector->base, &amdgpu_connector_virtual_funcs, connector_type);
- drm_connector_helper_add(&amdgpu_connector->base, &amdgpu_connector_virtual_helper_funcs);
- subpixel_order = SubPixelHorizontalRGB;
- connector->interlace_allowed = false;
- connector->doublescan_allowed = false;
- break;
}
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
index b0f6e6957536..78da52f90099 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
@@ -355,6 +355,7 @@ static void amdgpu_cs_report_moved_bytes(struct amdgpu_device *adev,
static int amdgpu_cs_bo_validate(struct amdgpu_cs_parser *p,
struct amdgpu_bo *bo)
{
+ struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
u64 initial_bytes_moved;
uint32_t domain;
int r;
@@ -372,9 +373,9 @@ static int amdgpu_cs_bo_validate(struct amdgpu_cs_parser *p,
retry:
amdgpu_ttm_placement_from_domain(bo, domain);
- initial_bytes_moved = atomic64_read(&bo->adev->num_bytes_moved);
+ initial_bytes_moved = atomic64_read(&adev->num_bytes_moved);
r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false);
- p->bytes_moved += atomic64_read(&bo->adev->num_bytes_moved) -
+ p->bytes_moved += atomic64_read(&adev->num_bytes_moved) -
initial_bytes_moved;
if (unlikely(r == -ENOMEM) && domain != bo->allowed_domains) {
@@ -387,9 +388,9 @@ retry:
/* Last resort, try to evict something from the current working set */
static bool amdgpu_cs_try_evict(struct amdgpu_cs_parser *p,
- struct amdgpu_bo_list_entry *lobj)
+ struct amdgpu_bo *validated)
{
- uint32_t domain = lobj->robj->allowed_domains;
+ uint32_t domain = validated->allowed_domains;
int r;
if (!p->evictable)
@@ -400,11 +401,12 @@ static bool amdgpu_cs_try_evict(struct amdgpu_cs_parser *p,
struct amdgpu_bo_list_entry *candidate = p->evictable;
struct amdgpu_bo *bo = candidate->robj;
+ struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
u64 initial_bytes_moved;
uint32_t other;
/* If we reached our current BO we can forget it */
- if (candidate == lobj)
+ if (candidate->robj == validated)
break;
other = amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type);
@@ -420,9 +422,9 @@ static bool amdgpu_cs_try_evict(struct amdgpu_cs_parser *p,
/* Good we can try to move this BO somewhere else */
amdgpu_ttm_placement_from_domain(bo, other);
- initial_bytes_moved = atomic64_read(&bo->adev->num_bytes_moved);
+ initial_bytes_moved = atomic64_read(&adev->num_bytes_moved);
r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false);
- p->bytes_moved += atomic64_read(&bo->adev->num_bytes_moved) -
+ p->bytes_moved += atomic64_read(&adev->num_bytes_moved) -
initial_bytes_moved;
if (unlikely(r))
@@ -437,6 +439,23 @@ static bool amdgpu_cs_try_evict(struct amdgpu_cs_parser *p,
return false;
}
+static int amdgpu_cs_validate(void *param, struct amdgpu_bo *bo)
+{
+ struct amdgpu_cs_parser *p = param;
+ int r;
+
+ do {
+ r = amdgpu_cs_bo_validate(p, bo);
+ } while (r == -ENOMEM && amdgpu_cs_try_evict(p, bo));
+ if (r)
+ return r;
+
+ if (bo->shadow)
+ r = amdgpu_cs_bo_validate(p, bo);
+
+ return r;
+}
+
static int amdgpu_cs_list_validate(struct amdgpu_cs_parser *p,
struct list_head *validated)
{
@@ -464,18 +483,10 @@ static int amdgpu_cs_list_validate(struct amdgpu_cs_parser *p,
if (p->evictable == lobj)
p->evictable = NULL;
- do {
- r = amdgpu_cs_bo_validate(p, bo);
- } while (r == -ENOMEM && amdgpu_cs_try_evict(p, lobj));
+ r = amdgpu_cs_validate(p, bo);
if (r)
return r;
- if (bo->shadow) {
- r = amdgpu_cs_bo_validate(p, bo);
- if (r)
- return r;
- }
-
if (binding_userptr) {
drm_free_large(lobj->user_pages);
lobj->user_pages = NULL;
@@ -519,7 +530,8 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
r = ttm_eu_reserve_buffers(&p->ticket, &p->validated, true,
&duplicates);
if (unlikely(r != 0)) {
- DRM_ERROR("ttm_eu_reserve_buffers failed.\n");
+ if (r != -ERESTARTSYS)
+ DRM_ERROR("ttm_eu_reserve_buffers failed.\n");
goto error_free_pages;
}
@@ -593,14 +605,19 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
list_splice(&need_pages, &p->validated);
}
- amdgpu_vm_get_pt_bos(p->adev, &fpriv->vm, &duplicates);
-
p->bytes_moved_threshold = amdgpu_cs_get_threshold_for_moves(p->adev);
p->bytes_moved = 0;
p->evictable = list_last_entry(&p->validated,
struct amdgpu_bo_list_entry,
tv.head);
+ r = amdgpu_vm_validate_pt_bos(p->adev, &fpriv->vm,
+ amdgpu_cs_validate, p);
+ if (r) {
+ DRM_ERROR("amdgpu_vm_validate_pt_bos() failed.\n");
+ goto error_validate;
+ }
+
r = amdgpu_cs_list_validate(p, &duplicates);
if (r) {
DRM_ERROR("amdgpu_cs_list_validate(duplicates) failed.\n");
@@ -719,7 +736,7 @@ static void amdgpu_cs_parser_fini(struct amdgpu_cs_parser *parser, int error, bo
ttm_eu_backoff_reservation(&parser->ticket,
&parser->validated);
}
- fence_put(parser->fence);
+ dma_fence_put(parser->fence);
if (parser->ctx)
amdgpu_ctx_put(parser->ctx);
@@ -756,7 +773,7 @@ static int amdgpu_bo_vm_update_pte(struct amdgpu_cs_parser *p,
if (p->bo_list) {
for (i = 0; i < p->bo_list->num_entries; i++) {
- struct fence *f;
+ struct dma_fence *f;
/* ignore duplicates */
bo = p->bo_list->array[i].robj;
@@ -806,13 +823,14 @@ static int amdgpu_cs_ib_vm_chunk(struct amdgpu_device *adev,
/* Only for UVD/VCE VM emulation */
if (ring->funcs->parse_cs) {
- p->job->vm = NULL;
for (i = 0; i < p->job->num_ibs; i++) {
r = amdgpu_ring_parse_cs(ring, p, i);
if (r)
return r;
}
- } else {
+ }
+
+ if (p->job->vm) {
p->job->vm_pd_addr = amdgpu_bo_gpu_offset(vm->page_directory);
r = amdgpu_bo_vm_update_pte(p, vm);
@@ -901,7 +919,7 @@ static int amdgpu_cs_ib_fill(struct amdgpu_device *adev,
offset = ((uint64_t)m->it.start) * AMDGPU_GPU_PAGE_SIZE;
kptr += chunk_ib->va_start - offset;
- r = amdgpu_ib_get(adev, NULL, chunk_ib->ib_bytes, ib);
+ r = amdgpu_ib_get(adev, vm, chunk_ib->ib_bytes, ib);
if (r) {
DRM_ERROR("Failed to get ib !\n");
return r;
@@ -916,9 +934,9 @@ static int amdgpu_cs_ib_fill(struct amdgpu_device *adev,
return r;
}
- ib->gpu_addr = chunk_ib->va_start;
}
+ ib->gpu_addr = chunk_ib->va_start;
ib->length_dw = chunk_ib->ib_bytes / 4;
ib->flags = chunk_ib->flags;
j++;
@@ -926,8 +944,8 @@ static int amdgpu_cs_ib_fill(struct amdgpu_device *adev,
/* UVD & VCE fw doesn't support user fences */
if (parser->job->uf_addr && (
- parser->job->ring->type == AMDGPU_RING_TYPE_UVD ||
- parser->job->ring->type == AMDGPU_RING_TYPE_VCE))
+ parser->job->ring->funcs->type == AMDGPU_RING_TYPE_UVD ||
+ parser->job->ring->funcs->type == AMDGPU_RING_TYPE_VCE))
return -EINVAL;
return 0;
@@ -956,7 +974,7 @@ static int amdgpu_cs_dependencies(struct amdgpu_device *adev,
for (j = 0; j < num_deps; ++j) {
struct amdgpu_ring *ring;
struct amdgpu_ctx *ctx;
- struct fence *fence;
+ struct dma_fence *fence;
r = amdgpu_cs_get_ring(adev, deps[j].ip_type,
deps[j].ip_instance,
@@ -978,7 +996,7 @@ static int amdgpu_cs_dependencies(struct amdgpu_device *adev,
} else if (fence) {
r = amdgpu_sync_fence(adev, &p->job->sync,
fence);
- fence_put(fence);
+ dma_fence_put(fence);
amdgpu_ctx_put(ctx);
if (r)
return r;
@@ -1008,7 +1026,7 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
job->owner = p->filp;
job->fence_ctx = entity->fence_context;
- p->fence = fence_get(&job->base.s_fence->finished);
+ p->fence = dma_fence_get(&job->base.s_fence->finished);
cs->out.handle = amdgpu_ctx_add_fence(p->ctx, ring, p->fence);
job->uf_sequence = cs->out.handle;
amdgpu_job_free_resources(job);
@@ -1091,7 +1109,7 @@ int amdgpu_cs_wait_ioctl(struct drm_device *dev, void *data,
unsigned long timeout = amdgpu_gem_timeout(wait->in.timeout);
struct amdgpu_ring *ring = NULL;
struct amdgpu_ctx *ctx;
- struct fence *fence;
+ struct dma_fence *fence;
long r;
r = amdgpu_cs_get_ring(adev, wait->in.ip_type, wait->in.ip_instance,
@@ -1107,8 +1125,8 @@ int amdgpu_cs_wait_ioctl(struct drm_device *dev, void *data,
if (IS_ERR(fence))
r = PTR_ERR(fence);
else if (fence) {
- r = fence_wait_timeout(fence, true, timeout);
- fence_put(fence);
+ r = dma_fence_wait_timeout(fence, true, timeout);
+ dma_fence_put(fence);
} else
r = 1;
@@ -1123,6 +1141,180 @@ int amdgpu_cs_wait_ioctl(struct drm_device *dev, void *data,
}
/**
+ * amdgpu_cs_get_fence - helper to get fence from drm_amdgpu_fence
+ *
+ * @adev: amdgpu device
+ * @filp: file private
+ * @user: drm_amdgpu_fence copied from user space
+ */
+static struct dma_fence *amdgpu_cs_get_fence(struct amdgpu_device *adev,
+ struct drm_file *filp,
+ struct drm_amdgpu_fence *user)
+{
+ struct amdgpu_ring *ring;
+ struct amdgpu_ctx *ctx;
+ struct dma_fence *fence;
+ int r;
+
+ r = amdgpu_cs_get_ring(adev, user->ip_type, user->ip_instance,
+ user->ring, &ring);
+ if (r)
+ return ERR_PTR(r);
+
+ ctx = amdgpu_ctx_get(filp->driver_priv, user->ctx_id);
+ if (ctx == NULL)
+ return ERR_PTR(-EINVAL);
+
+ fence = amdgpu_ctx_get_fence(ctx, ring, user->seq_no);
+ amdgpu_ctx_put(ctx);
+
+ return fence;
+}
+
+/**
+ * amdgpu_cs_wait_all_fence - wait on all fences to signal
+ *
+ * @adev: amdgpu device
+ * @filp: file private
+ * @wait: wait parameters
+ * @fences: array of drm_amdgpu_fence
+ */
+static int amdgpu_cs_wait_all_fences(struct amdgpu_device *adev,
+ struct drm_file *filp,
+ union drm_amdgpu_wait_fences *wait,
+ struct drm_amdgpu_fence *fences)
+{
+ uint32_t fence_count = wait->in.fence_count;
+ unsigned int i;
+ long r = 1;
+
+ for (i = 0; i < fence_count; i++) {
+ struct dma_fence *fence;
+ unsigned long timeout = amdgpu_gem_timeout(wait->in.timeout_ns);
+
+ fence = amdgpu_cs_get_fence(adev, filp, &fences[i]);
+ if (IS_ERR(fence))
+ return PTR_ERR(fence);
+ else if (!fence)
+ continue;
+
+ r = dma_fence_wait_timeout(fence, true, timeout);
+ if (r < 0)
+ return r;
+
+ if (r == 0)
+ break;
+ }
+
+ memset(wait, 0, sizeof(*wait));
+ wait->out.status = (r > 0);
+
+ return 0;
+}
+
+/**
+ * amdgpu_cs_wait_any_fence - wait on any fence to signal
+ *
+ * @adev: amdgpu device
+ * @filp: file private
+ * @wait: wait parameters
+ * @fences: array of drm_amdgpu_fence
+ */
+static int amdgpu_cs_wait_any_fence(struct amdgpu_device *adev,
+ struct drm_file *filp,
+ union drm_amdgpu_wait_fences *wait,
+ struct drm_amdgpu_fence *fences)
+{
+ unsigned long timeout = amdgpu_gem_timeout(wait->in.timeout_ns);
+ uint32_t fence_count = wait->in.fence_count;
+ uint32_t first = ~0;
+ struct dma_fence **array;
+ unsigned int i;
+ long r;
+
+ /* Prepare the fence array */
+ array = kcalloc(fence_count, sizeof(struct dma_fence *), GFP_KERNEL);
+
+ if (array == NULL)
+ return -ENOMEM;
+
+ for (i = 0; i < fence_count; i++) {
+ struct dma_fence *fence;
+
+ fence = amdgpu_cs_get_fence(adev, filp, &fences[i]);
+ if (IS_ERR(fence)) {
+ r = PTR_ERR(fence);
+ goto err_free_fence_array;
+ } else if (fence) {
+ array[i] = fence;
+ } else { /* NULL, the fence has been already signaled */
+ r = 1;
+ goto out;
+ }
+ }
+
+ r = dma_fence_wait_any_timeout(array, fence_count, true, timeout,
+ &first);
+ if (r < 0)
+ goto err_free_fence_array;
+
+out:
+ memset(wait, 0, sizeof(*wait));
+ wait->out.status = (r > 0);
+ wait->out.first_signaled = first;
+ /* set return value 0 to indicate success */
+ r = 0;
+
+err_free_fence_array:
+ for (i = 0; i < fence_count; i++)
+ dma_fence_put(array[i]);
+ kfree(array);
+
+ return r;
+}
+
+/**
+ * amdgpu_cs_wait_fences_ioctl - wait for multiple command submissions to finish
+ *
+ * @dev: drm device
+ * @data: data from userspace
+ * @filp: file private
+ */
+int amdgpu_cs_wait_fences_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *filp)
+{
+ struct amdgpu_device *adev = dev->dev_private;
+ union drm_amdgpu_wait_fences *wait = data;
+ uint32_t fence_count = wait->in.fence_count;
+ struct drm_amdgpu_fence *fences_user;
+ struct drm_amdgpu_fence *fences;
+ int r;
+
+ /* Get the fences from userspace */
+ fences = kmalloc_array(fence_count, sizeof(struct drm_amdgpu_fence),
+ GFP_KERNEL);
+ if (fences == NULL)
+ return -ENOMEM;
+
+ fences_user = (void __user *)(unsigned long)(wait->in.fences);
+ if (copy_from_user(fences, fences_user,
+ sizeof(struct drm_amdgpu_fence) * fence_count)) {
+ r = -EFAULT;
+ goto err_free_fences;
+ }
+
+ if (wait->in.wait_all)
+ r = amdgpu_cs_wait_all_fences(adev, filp, wait, fences);
+ else
+ r = amdgpu_cs_wait_any_fence(adev, filp, wait, fences);
+
+err_free_fences:
+ kfree(fences);
+
+ return r;
+}
+
+/**
* amdgpu_cs_find_bo_va - find bo_va for VM address
*
* @parser: command submission parser context
@@ -1195,6 +1387,15 @@ int amdgpu_cs_sysvm_access_required(struct amdgpu_cs_parser *parser)
r = amdgpu_ttm_bind(&bo->tbo, &bo->tbo.mem);
if (unlikely(r))
return r;
+
+ if (bo->flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS)
+ continue;
+
+ bo->flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
+ amdgpu_ttm_placement_from_domain(bo, bo->allowed_domains);
+ r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
+ if (unlikely(r))
+ return r;
}
return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
index a5e2fcbef0f0..400c66ba4c6b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
@@ -35,7 +35,7 @@ static int amdgpu_ctx_init(struct amdgpu_device *adev, struct amdgpu_ctx *ctx)
kref_init(&ctx->refcount);
spin_lock_init(&ctx->ring_lock);
ctx->fences = kcalloc(amdgpu_sched_jobs * AMDGPU_MAX_RINGS,
- sizeof(struct fence*), GFP_KERNEL);
+ sizeof(struct dma_fence*), GFP_KERNEL);
if (!ctx->fences)
return -ENOMEM;
@@ -55,18 +55,18 @@ static int amdgpu_ctx_init(struct amdgpu_device *adev, struct amdgpu_ctx *ctx)
r = amd_sched_entity_init(&ring->sched, &ctx->rings[i].entity,
rq, amdgpu_sched_jobs);
if (r)
- break;
+ goto failed;
}
- if (i < adev->num_rings) {
- for (j = 0; j < i; j++)
- amd_sched_entity_fini(&adev->rings[j]->sched,
- &ctx->rings[j].entity);
- kfree(ctx->fences);
- ctx->fences = NULL;
- return r;
- }
return 0;
+
+failed:
+ for (j = 0; j < i; j++)
+ amd_sched_entity_fini(&adev->rings[j]->sched,
+ &ctx->rings[j].entity);
+ kfree(ctx->fences);
+ ctx->fences = NULL;
+ return r;
}
static void amdgpu_ctx_fini(struct amdgpu_ctx *ctx)
@@ -79,7 +79,7 @@ static void amdgpu_ctx_fini(struct amdgpu_ctx *ctx)
for (i = 0; i < AMDGPU_MAX_RINGS; ++i)
for (j = 0; j < amdgpu_sched_jobs; ++j)
- fence_put(ctx->rings[i].fences[j]);
+ dma_fence_put(ctx->rings[i].fences[j]);
kfree(ctx->fences);
ctx->fences = NULL;
@@ -241,39 +241,39 @@ int amdgpu_ctx_put(struct amdgpu_ctx *ctx)
}
uint64_t amdgpu_ctx_add_fence(struct amdgpu_ctx *ctx, struct amdgpu_ring *ring,
- struct fence *fence)
+ struct dma_fence *fence)
{
struct amdgpu_ctx_ring *cring = & ctx->rings[ring->idx];
uint64_t seq = cring->sequence;
unsigned idx = 0;
- struct fence *other = NULL;
+ struct dma_fence *other = NULL;
idx = seq & (amdgpu_sched_jobs - 1);
other = cring->fences[idx];
if (other) {
signed long r;
- r = fence_wait_timeout(other, false, MAX_SCHEDULE_TIMEOUT);
+ r = dma_fence_wait_timeout(other, false, MAX_SCHEDULE_TIMEOUT);
if (r < 0)
DRM_ERROR("Error (%ld) waiting for fence!\n", r);
}
- fence_get(fence);
+ dma_fence_get(fence);
spin_lock(&ctx->ring_lock);
cring->fences[idx] = fence;
cring->sequence++;
spin_unlock(&ctx->ring_lock);
- fence_put(other);
+ dma_fence_put(other);
return seq;
}
-struct fence *amdgpu_ctx_get_fence(struct amdgpu_ctx *ctx,
- struct amdgpu_ring *ring, uint64_t seq)
+struct dma_fence *amdgpu_ctx_get_fence(struct amdgpu_ctx *ctx,
+ struct amdgpu_ring *ring, uint64_t seq)
{
struct amdgpu_ctx_ring *cring = & ctx->rings[ring->idx];
- struct fence *fence;
+ struct dma_fence *fence;
spin_lock(&ctx->ring_lock);
@@ -288,7 +288,7 @@ struct fence *amdgpu_ctx_get_fence(struct amdgpu_ctx *ctx,
return NULL;
}
- fence = fence_get(cring->fences[seq & (amdgpu_sched_jobs - 1)]);
+ fence = dma_fence_get(cring->fences[seq & (amdgpu_sched_jobs - 1)]);
spin_unlock(&ctx->ring_lock);
return fence;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index b4f4a9239069..deee2db36fce 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -264,7 +264,8 @@ static int amdgpu_vram_scratch_init(struct amdgpu_device *adev)
if (adev->vram_scratch.robj == NULL) {
r = amdgpu_bo_create(adev, AMDGPU_GPU_PAGE_SIZE,
PAGE_SIZE, true, AMDGPU_GEM_DOMAIN_VRAM,
- AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
+ AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
+ AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS,
NULL, NULL, &adev->vram_scratch.robj);
if (r) {
return r;
@@ -442,13 +443,9 @@ void amdgpu_doorbell_get_kfd_info(struct amdgpu_device *adev,
static void amdgpu_wb_fini(struct amdgpu_device *adev)
{
if (adev->wb.wb_obj) {
- if (!amdgpu_bo_reserve(adev->wb.wb_obj, false)) {
- amdgpu_bo_kunmap(adev->wb.wb_obj);
- amdgpu_bo_unpin(adev->wb.wb_obj);
- amdgpu_bo_unreserve(adev->wb.wb_obj);
- }
- amdgpu_bo_unref(&adev->wb.wb_obj);
- adev->wb.wb = NULL;
+ amdgpu_bo_free_kernel(&adev->wb.wb_obj,
+ &adev->wb.gpu_addr,
+ (void **)&adev->wb.wb);
adev->wb.wb_obj = NULL;
}
}
@@ -467,33 +464,14 @@ static int amdgpu_wb_init(struct amdgpu_device *adev)
int r;
if (adev->wb.wb_obj == NULL) {
- r = amdgpu_bo_create(adev, AMDGPU_MAX_WB * 4, PAGE_SIZE, true,
- AMDGPU_GEM_DOMAIN_GTT, 0, NULL, NULL,
- &adev->wb.wb_obj);
+ r = amdgpu_bo_create_kernel(adev, AMDGPU_MAX_WB * 4,
+ PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT,
+ &adev->wb.wb_obj, &adev->wb.gpu_addr,
+ (void **)&adev->wb.wb);
if (r) {
dev_warn(adev->dev, "(%d) create WB bo failed\n", r);
return r;
}
- r = amdgpu_bo_reserve(adev->wb.wb_obj, false);
- if (unlikely(r != 0)) {
- amdgpu_wb_fini(adev);
- return r;
- }
- r = amdgpu_bo_pin(adev->wb.wb_obj, AMDGPU_GEM_DOMAIN_GTT,
- &adev->wb.gpu_addr);
- if (r) {
- amdgpu_bo_unreserve(adev->wb.wb_obj);
- dev_warn(adev->dev, "(%d) pin WB bo failed\n", r);
- amdgpu_wb_fini(adev);
- return r;
- }
- r = amdgpu_bo_kmap(adev->wb.wb_obj, (void **)&adev->wb.wb);
- amdgpu_bo_unreserve(adev->wb.wb_obj);
- if (r) {
- dev_warn(adev->dev, "(%d) map WB bo failed\n", r);
- amdgpu_wb_fini(adev);
- return r;
- }
adev->wb.num_wb = AMDGPU_MAX_WB;
memset(&adev->wb.used, 0, sizeof(adev->wb.used));
@@ -658,12 +636,10 @@ static bool amdgpu_vpost_needed(struct amdgpu_device *adev)
return false;
if (amdgpu_passthrough(adev)) {
- /* for FIJI: In whole GPU pass-through virtualization case
- * old smc fw won't clear some registers (e.g. MEM_SIZE, BIOS_SCRATCH)
- * so amdgpu_card_posted return false and driver will incorrectly skip vPost.
- * but if we force vPost do in pass-through case, the driver reload will hang.
- * whether doing vPost depends on amdgpu_card_posted if smc version is above
- * 00160e00 for FIJI.
+ /* for FIJI: In whole GPU pass-through virtualization case, after VM reboot
+ * some old smc fw still need driver do vPost otherwise gpu hang, while
+ * those smc fw version above 22.15 doesn't have this flaw, so we force
+ * vpost executed for smc version below 22.15
*/
if (adev->asic_type == CHIP_FIJI) {
int err;
@@ -674,22 +650,11 @@ static bool amdgpu_vpost_needed(struct amdgpu_device *adev)
return true;
fw_ver = *((uint32_t *)adev->pm.fw->data + 69);
- if (fw_ver >= 0x00160e00)
- return !amdgpu_card_posted(adev);
+ if (fw_ver < 0x00160e00)
+ return true;
}
- } else {
- /* in bare-metal case, amdgpu_card_posted return false
- * after system reboot/boot, and return true if driver
- * reloaded.
- * we shouldn't do vPost after driver reload otherwise GPU
- * could hang.
- */
- if (amdgpu_card_posted(adev))
- return false;
}
-
- /* we assume vPost is neede for all other cases */
- return true;
+ return !amdgpu_card_posted(adev);
}
/**
@@ -1051,6 +1016,13 @@ static void amdgpu_check_arguments(struct amdgpu_device *adev)
amdgpu_vm_block_size);
amdgpu_vm_block_size = 9;
}
+
+ if ((amdgpu_vram_page_split != -1 && amdgpu_vram_page_split < 16) ||
+ !amdgpu_check_pot_argument(amdgpu_vram_page_split)) {
+ dev_warn(adev->dev, "invalid VRAM page split (%d)\n",
+ amdgpu_vram_page_split);
+ amdgpu_vram_page_split = 1024;
+ }
}
/**
@@ -1125,11 +1097,11 @@ int amdgpu_set_clockgating_state(struct amdgpu_device *adev,
int i, r = 0;
for (i = 0; i < adev->num_ip_blocks; i++) {
- if (!adev->ip_block_status[i].valid)
+ if (!adev->ip_blocks[i].status.valid)
continue;
- if (adev->ip_blocks[i].type == block_type) {
- r = adev->ip_blocks[i].funcs->set_clockgating_state((void *)adev,
- state);
+ if (adev->ip_blocks[i].version->type == block_type) {
+ r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev,
+ state);
if (r)
return r;
break;
@@ -1145,11 +1117,11 @@ int amdgpu_set_powergating_state(struct amdgpu_device *adev,
int i, r = 0;
for (i = 0; i < adev->num_ip_blocks; i++) {
- if (!adev->ip_block_status[i].valid)
+ if (!adev->ip_blocks[i].status.valid)
continue;
- if (adev->ip_blocks[i].type == block_type) {
- r = adev->ip_blocks[i].funcs->set_powergating_state((void *)adev,
- state);
+ if (adev->ip_blocks[i].version->type == block_type) {
+ r = adev->ip_blocks[i].version->funcs->set_powergating_state((void *)adev,
+ state);
if (r)
return r;
break;
@@ -1164,10 +1136,10 @@ int amdgpu_wait_for_idle(struct amdgpu_device *adev,
int i, r;
for (i = 0; i < adev->num_ip_blocks; i++) {
- if (!adev->ip_block_status[i].valid)
+ if (!adev->ip_blocks[i].status.valid)
continue;
- if (adev->ip_blocks[i].type == block_type) {
- r = adev->ip_blocks[i].funcs->wait_for_idle((void *)adev);
+ if (adev->ip_blocks[i].version->type == block_type) {
+ r = adev->ip_blocks[i].version->funcs->wait_for_idle((void *)adev);
if (r)
return r;
break;
@@ -1183,23 +1155,22 @@ bool amdgpu_is_idle(struct amdgpu_device *adev,
int i;
for (i = 0; i < adev->num_ip_blocks; i++) {
- if (!adev->ip_block_status[i].valid)
+ if (!adev->ip_blocks[i].status.valid)
continue;
- if (adev->ip_blocks[i].type == block_type)
- return adev->ip_blocks[i].funcs->is_idle((void *)adev);
+ if (adev->ip_blocks[i].version->type == block_type)
+ return adev->ip_blocks[i].version->funcs->is_idle((void *)adev);
}
return true;
}
-const struct amdgpu_ip_block_version * amdgpu_get_ip_block(
- struct amdgpu_device *adev,
- enum amd_ip_block_type type)
+struct amdgpu_ip_block * amdgpu_get_ip_block(struct amdgpu_device *adev,
+ enum amd_ip_block_type type)
{
int i;
for (i = 0; i < adev->num_ip_blocks; i++)
- if (adev->ip_blocks[i].type == type)
+ if (adev->ip_blocks[i].version->type == type)
return &adev->ip_blocks[i];
return NULL;
@@ -1220,38 +1191,75 @@ int amdgpu_ip_block_version_cmp(struct amdgpu_device *adev,
enum amd_ip_block_type type,
u32 major, u32 minor)
{
- const struct amdgpu_ip_block_version *ip_block;
- ip_block = amdgpu_get_ip_block(adev, type);
+ struct amdgpu_ip_block *ip_block = amdgpu_get_ip_block(adev, type);
- if (ip_block && ((ip_block->major > major) ||
- ((ip_block->major == major) &&
- (ip_block->minor >= minor))))
+ if (ip_block && ((ip_block->version->major > major) ||
+ ((ip_block->version->major == major) &&
+ (ip_block->version->minor >= minor))))
return 0;
return 1;
}
-static void amdgpu_whether_enable_virtual_display(struct amdgpu_device *adev)
+/**
+ * amdgpu_ip_block_add
+ *
+ * @adev: amdgpu_device pointer
+ * @ip_block_version: pointer to the IP to add
+ *
+ * Adds the IP block driver information to the collection of IPs
+ * on the asic.
+ */
+int amdgpu_ip_block_add(struct amdgpu_device *adev,
+ const struct amdgpu_ip_block_version *ip_block_version)
+{
+ if (!ip_block_version)
+ return -EINVAL;
+
+ adev->ip_blocks[adev->num_ip_blocks++].version = ip_block_version;
+
+ return 0;
+}
+
+static void amdgpu_device_enable_virtual_display(struct amdgpu_device *adev)
{
adev->enable_virtual_display = false;
if (amdgpu_virtual_display) {
struct drm_device *ddev = adev->ddev;
const char *pci_address_name = pci_name(ddev->pdev);
- char *pciaddstr, *pciaddstr_tmp, *pciaddname;
+ char *pciaddstr, *pciaddstr_tmp, *pciaddname_tmp, *pciaddname;
pciaddstr = kstrdup(amdgpu_virtual_display, GFP_KERNEL);
pciaddstr_tmp = pciaddstr;
- while ((pciaddname = strsep(&pciaddstr_tmp, ";"))) {
+ while ((pciaddname_tmp = strsep(&pciaddstr_tmp, ";"))) {
+ pciaddname = strsep(&pciaddname_tmp, ",");
if (!strcmp(pci_address_name, pciaddname)) {
+ long num_crtc;
+ int res = -1;
+
adev->enable_virtual_display = true;
+
+ if (pciaddname_tmp)
+ res = kstrtol(pciaddname_tmp, 10,
+ &num_crtc);
+
+ if (!res) {
+ if (num_crtc < 1)
+ num_crtc = 1;
+ if (num_crtc > 6)
+ num_crtc = 6;
+ adev->mode_info.num_crtc = num_crtc;
+ } else {
+ adev->mode_info.num_crtc = 1;
+ }
break;
}
}
- DRM_INFO("virtual display string:%s, %s:virtual_display:%d\n",
- amdgpu_virtual_display, pci_address_name,
- adev->enable_virtual_display);
+ DRM_INFO("virtual display string:%s, %s:virtual_display:%d, num_crtc:%d\n",
+ amdgpu_virtual_display, pci_address_name,
+ adev->enable_virtual_display, adev->mode_info.num_crtc);
kfree(pciaddstr);
}
@@ -1261,7 +1269,7 @@ static int amdgpu_early_init(struct amdgpu_device *adev)
{
int i, r;
- amdgpu_whether_enable_virtual_display(adev);
+ amdgpu_device_enable_virtual_display(adev);
switch (adev->asic_type) {
case CHIP_TOPAZ:
@@ -1313,33 +1321,24 @@ static int amdgpu_early_init(struct amdgpu_device *adev)
return -EINVAL;
}
- adev->ip_block_status = kcalloc(adev->num_ip_blocks,
- sizeof(struct amdgpu_ip_block_status), GFP_KERNEL);
- if (adev->ip_block_status == NULL)
- return -ENOMEM;
-
- if (adev->ip_blocks == NULL) {
- DRM_ERROR("No IP blocks found!\n");
- return r;
- }
-
for (i = 0; i < adev->num_ip_blocks; i++) {
if ((amdgpu_ip_block_mask & (1 << i)) == 0) {
DRM_ERROR("disabled ip block: %d\n", i);
- adev->ip_block_status[i].valid = false;
+ adev->ip_blocks[i].status.valid = false;
} else {
- if (adev->ip_blocks[i].funcs->early_init) {
- r = adev->ip_blocks[i].funcs->early_init((void *)adev);
+ if (adev->ip_blocks[i].version->funcs->early_init) {
+ r = adev->ip_blocks[i].version->funcs->early_init((void *)adev);
if (r == -ENOENT) {
- adev->ip_block_status[i].valid = false;
+ adev->ip_blocks[i].status.valid = false;
} else if (r) {
- DRM_ERROR("early_init of IP block <%s> failed %d\n", adev->ip_blocks[i].funcs->name, r);
+ DRM_ERROR("early_init of IP block <%s> failed %d\n",
+ adev->ip_blocks[i].version->funcs->name, r);
return r;
} else {
- adev->ip_block_status[i].valid = true;
+ adev->ip_blocks[i].status.valid = true;
}
} else {
- adev->ip_block_status[i].valid = true;
+ adev->ip_blocks[i].status.valid = true;
}
}
}
@@ -1355,22 +1354,23 @@ static int amdgpu_init(struct amdgpu_device *adev)
int i, r;
for (i = 0; i < adev->num_ip_blocks; i++) {
- if (!adev->ip_block_status[i].valid)
+ if (!adev->ip_blocks[i].status.valid)
continue;
- r = adev->ip_blocks[i].funcs->sw_init((void *)adev);
+ r = adev->ip_blocks[i].version->funcs->sw_init((void *)adev);
if (r) {
- DRM_ERROR("sw_init of IP block <%s> failed %d\n", adev->ip_blocks[i].funcs->name, r);
+ DRM_ERROR("sw_init of IP block <%s> failed %d\n",
+ adev->ip_blocks[i].version->funcs->name, r);
return r;
}
- adev->ip_block_status[i].sw = true;
+ adev->ip_blocks[i].status.sw = true;
/* need to do gmc hw init early so we can allocate gpu mem */
- if (adev->ip_blocks[i].type == AMD_IP_BLOCK_TYPE_GMC) {
+ if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) {
r = amdgpu_vram_scratch_init(adev);
if (r) {
DRM_ERROR("amdgpu_vram_scratch_init failed %d\n", r);
return r;
}
- r = adev->ip_blocks[i].funcs->hw_init((void *)adev);
+ r = adev->ip_blocks[i].version->funcs->hw_init((void *)adev);
if (r) {
DRM_ERROR("hw_init %d failed %d\n", i, r);
return r;
@@ -1380,22 +1380,23 @@ static int amdgpu_init(struct amdgpu_device *adev)
DRM_ERROR("amdgpu_wb_init failed %d\n", r);
return r;
}
- adev->ip_block_status[i].hw = true;
+ adev->ip_blocks[i].status.hw = true;
}
}
for (i = 0; i < adev->num_ip_blocks; i++) {
- if (!adev->ip_block_status[i].sw)
+ if (!adev->ip_blocks[i].status.sw)
continue;
/* gmc hw init is done early */
- if (adev->ip_blocks[i].type == AMD_IP_BLOCK_TYPE_GMC)
+ if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC)
continue;
- r = adev->ip_blocks[i].funcs->hw_init((void *)adev);
+ r = adev->ip_blocks[i].version->funcs->hw_init((void *)adev);
if (r) {
- DRM_ERROR("hw_init of IP block <%s> failed %d\n", adev->ip_blocks[i].funcs->name, r);
+ DRM_ERROR("hw_init of IP block <%s> failed %d\n",
+ adev->ip_blocks[i].version->funcs->name, r);
return r;
}
- adev->ip_block_status[i].hw = true;
+ adev->ip_blocks[i].status.hw = true;
}
return 0;
@@ -1406,25 +1407,26 @@ static int amdgpu_late_init(struct amdgpu_device *adev)
int i = 0, r;
for (i = 0; i < adev->num_ip_blocks; i++) {
- if (!adev->ip_block_status[i].valid)
+ if (!adev->ip_blocks[i].status.valid)
continue;
- if (adev->ip_blocks[i].funcs->late_init) {
- r = adev->ip_blocks[i].funcs->late_init((void *)adev);
+ if (adev->ip_blocks[i].version->funcs->late_init) {
+ r = adev->ip_blocks[i].version->funcs->late_init((void *)adev);
if (r) {
- DRM_ERROR("late_init of IP block <%s> failed %d\n", adev->ip_blocks[i].funcs->name, r);
+ DRM_ERROR("late_init of IP block <%s> failed %d\n",
+ adev->ip_blocks[i].version->funcs->name, r);
return r;
}
- adev->ip_block_status[i].late_initialized = true;
+ adev->ip_blocks[i].status.late_initialized = true;
}
/* skip CG for VCE/UVD, it's handled specially */
- if (adev->ip_blocks[i].type != AMD_IP_BLOCK_TYPE_UVD &&
- adev->ip_blocks[i].type != AMD_IP_BLOCK_TYPE_VCE) {
+ if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD &&
+ adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE) {
/* enable clockgating to save power */
- r = adev->ip_blocks[i].funcs->set_clockgating_state((void *)adev,
- AMD_CG_STATE_GATE);
+ r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev,
+ AMD_CG_STATE_GATE);
if (r) {
DRM_ERROR("set_clockgating_state(gate) of IP block <%s> failed %d\n",
- adev->ip_blocks[i].funcs->name, r);
+ adev->ip_blocks[i].version->funcs->name, r);
return r;
}
}
@@ -1439,68 +1441,71 @@ static int amdgpu_fini(struct amdgpu_device *adev)
/* need to disable SMC first */
for (i = 0; i < adev->num_ip_blocks; i++) {
- if (!adev->ip_block_status[i].hw)
+ if (!adev->ip_blocks[i].status.hw)
continue;
- if (adev->ip_blocks[i].type == AMD_IP_BLOCK_TYPE_SMC) {
+ if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) {
/* ungate blocks before hw fini so that we can shutdown the blocks safely */
- r = adev->ip_blocks[i].funcs->set_clockgating_state((void *)adev,
- AMD_CG_STATE_UNGATE);
+ r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev,
+ AMD_CG_STATE_UNGATE);
if (r) {
DRM_ERROR("set_clockgating_state(ungate) of IP block <%s> failed %d\n",
- adev->ip_blocks[i].funcs->name, r);
+ adev->ip_blocks[i].version->funcs->name, r);
return r;
}
- r = adev->ip_blocks[i].funcs->hw_fini((void *)adev);
+ r = adev->ip_blocks[i].version->funcs->hw_fini((void *)adev);
/* XXX handle errors */
if (r) {
DRM_DEBUG("hw_fini of IP block <%s> failed %d\n",
- adev->ip_blocks[i].funcs->name, r);
+ adev->ip_blocks[i].version->funcs->name, r);
}
- adev->ip_block_status[i].hw = false;
+ adev->ip_blocks[i].status.hw = false;
break;
}
}
for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
- if (!adev->ip_block_status[i].hw)
+ if (!adev->ip_blocks[i].status.hw)
continue;
- if (adev->ip_blocks[i].type == AMD_IP_BLOCK_TYPE_GMC) {
+ if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) {
amdgpu_wb_fini(adev);
amdgpu_vram_scratch_fini(adev);
}
/* ungate blocks before hw fini so that we can shutdown the blocks safely */
- r = adev->ip_blocks[i].funcs->set_clockgating_state((void *)adev,
- AMD_CG_STATE_UNGATE);
+ r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev,
+ AMD_CG_STATE_UNGATE);
if (r) {
- DRM_ERROR("set_clockgating_state(ungate) of IP block <%s> failed %d\n", adev->ip_blocks[i].funcs->name, r);
+ DRM_ERROR("set_clockgating_state(ungate) of IP block <%s> failed %d\n",
+ adev->ip_blocks[i].version->funcs->name, r);
return r;
}
- r = adev->ip_blocks[i].funcs->hw_fini((void *)adev);
+ r = adev->ip_blocks[i].version->funcs->hw_fini((void *)adev);
/* XXX handle errors */
if (r) {
- DRM_DEBUG("hw_fini of IP block <%s> failed %d\n", adev->ip_blocks[i].funcs->name, r);
+ DRM_DEBUG("hw_fini of IP block <%s> failed %d\n",
+ adev->ip_blocks[i].version->funcs->name, r);
}
- adev->ip_block_status[i].hw = false;
+ adev->ip_blocks[i].status.hw = false;
}
for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
- if (!adev->ip_block_status[i].sw)
+ if (!adev->ip_blocks[i].status.sw)
continue;
- r = adev->ip_blocks[i].funcs->sw_fini((void *)adev);
+ r = adev->ip_blocks[i].version->funcs->sw_fini((void *)adev);
/* XXX handle errors */
if (r) {
- DRM_DEBUG("sw_fini of IP block <%s> failed %d\n", adev->ip_blocks[i].funcs->name, r);
+ DRM_DEBUG("sw_fini of IP block <%s> failed %d\n",
+ adev->ip_blocks[i].version->funcs->name, r);
}
- adev->ip_block_status[i].sw = false;
- adev->ip_block_status[i].valid = false;
+ adev->ip_blocks[i].status.sw = false;
+ adev->ip_blocks[i].status.valid = false;
}
for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
- if (!adev->ip_block_status[i].late_initialized)
+ if (!adev->ip_blocks[i].status.late_initialized)
continue;
- if (adev->ip_blocks[i].funcs->late_fini)
- adev->ip_blocks[i].funcs->late_fini((void *)adev);
- adev->ip_block_status[i].late_initialized = false;
+ if (adev->ip_blocks[i].version->funcs->late_fini)
+ adev->ip_blocks[i].version->funcs->late_fini((void *)adev);
+ adev->ip_blocks[i].status.late_initialized = false;
}
return 0;
@@ -1518,21 +1523,23 @@ static int amdgpu_suspend(struct amdgpu_device *adev)
}
for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
- if (!adev->ip_block_status[i].valid)
+ if (!adev->ip_blocks[i].status.valid)
continue;
/* ungate blocks so that suspend can properly shut them down */
if (i != AMD_IP_BLOCK_TYPE_SMC) {
- r = adev->ip_blocks[i].funcs->set_clockgating_state((void *)adev,
- AMD_CG_STATE_UNGATE);
+ r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev,
+ AMD_CG_STATE_UNGATE);
if (r) {
- DRM_ERROR("set_clockgating_state(ungate) of IP block <%s> failed %d\n", adev->ip_blocks[i].funcs->name, r);
+ DRM_ERROR("set_clockgating_state(ungate) of IP block <%s> failed %d\n",
+ adev->ip_blocks[i].version->funcs->name, r);
}
}
/* XXX handle errors */
- r = adev->ip_blocks[i].funcs->suspend(adev);
+ r = adev->ip_blocks[i].version->funcs->suspend(adev);
/* XXX handle errors */
if (r) {
- DRM_ERROR("suspend of IP block <%s> failed %d\n", adev->ip_blocks[i].funcs->name, r);
+ DRM_ERROR("suspend of IP block <%s> failed %d\n",
+ adev->ip_blocks[i].version->funcs->name, r);
}
}
@@ -1544,11 +1551,12 @@ static int amdgpu_resume(struct amdgpu_device *adev)
int i, r;
for (i = 0; i < adev->num_ip_blocks; i++) {
- if (!adev->ip_block_status[i].valid)
+ if (!adev->ip_blocks[i].status.valid)
continue;
- r = adev->ip_blocks[i].funcs->resume(adev);
+ r = adev->ip_blocks[i].version->funcs->resume(adev);
if (r) {
- DRM_ERROR("resume of IP block <%s> failed %d\n", adev->ip_blocks[i].funcs->name, r);
+ DRM_ERROR("resume of IP block <%s> failed %d\n",
+ adev->ip_blocks[i].version->funcs->name, r);
return r;
}
}
@@ -1599,7 +1607,7 @@ int amdgpu_device_init(struct amdgpu_device *adev,
adev->vm_manager.vm_pte_funcs = NULL;
adev->vm_manager.vm_pte_num_rings = 0;
adev->gart.gart_funcs = NULL;
- adev->fence_context = fence_context_alloc(AMDGPU_MAX_RINGS);
+ adev->fence_context = dma_fence_context_alloc(AMDGPU_MAX_RINGS);
adev->smc_rreg = &amdgpu_invalid_rreg;
adev->smc_wreg = &amdgpu_invalid_wreg;
@@ -1859,8 +1867,6 @@ void amdgpu_device_fini(struct amdgpu_device *adev)
amdgpu_fence_driver_fini(adev);
amdgpu_fbdev_fini(adev);
r = amdgpu_fini(adev);
- kfree(adev->ip_block_status);
- adev->ip_block_status = NULL;
adev->accel_working = false;
/* free i2c buses */
amdgpu_i2c_fini(adev);
@@ -1956,9 +1962,13 @@ int amdgpu_device_suspend(struct drm_device *dev, bool suspend, bool fbcon)
r = amdgpu_suspend(adev);
- /* evict remaining vram memory */
+ /* evict remaining vram memory
+ * This second call to evict vram is to evict the gart page table
+ * using the CPU.
+ */
amdgpu_bo_evict_vram(adev);
+ amdgpu_atombios_scratch_regs_save(adev);
pci_save_state(dev->pdev);
if (suspend) {
/* Shut down the device */
@@ -2010,6 +2020,7 @@ int amdgpu_device_resume(struct drm_device *dev, bool resume, bool fbcon)
return r;
}
}
+ amdgpu_atombios_scratch_regs_restore(adev);
/* post card */
if (!amdgpu_card_posted(adev) || !resume) {
@@ -2096,13 +2107,13 @@ static bool amdgpu_check_soft_reset(struct amdgpu_device *adev)
bool asic_hang = false;
for (i = 0; i < adev->num_ip_blocks; i++) {
- if (!adev->ip_block_status[i].valid)
+ if (!adev->ip_blocks[i].status.valid)
continue;
- if (adev->ip_blocks[i].funcs->check_soft_reset)
- adev->ip_block_status[i].hang =
- adev->ip_blocks[i].funcs->check_soft_reset(adev);
- if (adev->ip_block_status[i].hang) {
- DRM_INFO("IP block:%d is hang!\n", i);
+ if (adev->ip_blocks[i].version->funcs->check_soft_reset)
+ adev->ip_blocks[i].status.hang =
+ adev->ip_blocks[i].version->funcs->check_soft_reset(adev);
+ if (adev->ip_blocks[i].status.hang) {
+ DRM_INFO("IP block:%s is hung!\n", adev->ip_blocks[i].version->funcs->name);
asic_hang = true;
}
}
@@ -2114,11 +2125,11 @@ static int amdgpu_pre_soft_reset(struct amdgpu_device *adev)
int i, r = 0;
for (i = 0; i < adev->num_ip_blocks; i++) {
- if (!adev->ip_block_status[i].valid)
+ if (!adev->ip_blocks[i].status.valid)
continue;
- if (adev->ip_block_status[i].hang &&
- adev->ip_blocks[i].funcs->pre_soft_reset) {
- r = adev->ip_blocks[i].funcs->pre_soft_reset(adev);
+ if (adev->ip_blocks[i].status.hang &&
+ adev->ip_blocks[i].version->funcs->pre_soft_reset) {
+ r = adev->ip_blocks[i].version->funcs->pre_soft_reset(adev);
if (r)
return r;
}
@@ -2132,13 +2143,13 @@ static bool amdgpu_need_full_reset(struct amdgpu_device *adev)
int i;
for (i = 0; i < adev->num_ip_blocks; i++) {
- if (!adev->ip_block_status[i].valid)
+ if (!adev->ip_blocks[i].status.valid)
continue;
- if ((adev->ip_blocks[i].type == AMD_IP_BLOCK_TYPE_GMC) ||
- (adev->ip_blocks[i].type == AMD_IP_BLOCK_TYPE_SMC) ||
- (adev->ip_blocks[i].type == AMD_IP_BLOCK_TYPE_ACP) ||
- (adev->ip_blocks[i].type == AMD_IP_BLOCK_TYPE_DCE)) {
- if (adev->ip_block_status[i].hang) {
+ if ((adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) ||
+ (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) ||
+ (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_ACP) ||
+ (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE)) {
+ if (adev->ip_blocks[i].status.hang) {
DRM_INFO("Some block need full reset!\n");
return true;
}
@@ -2152,11 +2163,11 @@ static int amdgpu_soft_reset(struct amdgpu_device *adev)
int i, r = 0;
for (i = 0; i < adev->num_ip_blocks; i++) {
- if (!adev->ip_block_status[i].valid)
+ if (!adev->ip_blocks[i].status.valid)
continue;
- if (adev->ip_block_status[i].hang &&
- adev->ip_blocks[i].funcs->soft_reset) {
- r = adev->ip_blocks[i].funcs->soft_reset(adev);
+ if (adev->ip_blocks[i].status.hang &&
+ adev->ip_blocks[i].version->funcs->soft_reset) {
+ r = adev->ip_blocks[i].version->funcs->soft_reset(adev);
if (r)
return r;
}
@@ -2170,11 +2181,11 @@ static int amdgpu_post_soft_reset(struct amdgpu_device *adev)
int i, r = 0;
for (i = 0; i < adev->num_ip_blocks; i++) {
- if (!adev->ip_block_status[i].valid)
+ if (!adev->ip_blocks[i].status.valid)
continue;
- if (adev->ip_block_status[i].hang &&
- adev->ip_blocks[i].funcs->post_soft_reset)
- r = adev->ip_blocks[i].funcs->post_soft_reset(adev);
+ if (adev->ip_blocks[i].status.hang &&
+ adev->ip_blocks[i].version->funcs->post_soft_reset)
+ r = adev->ip_blocks[i].version->funcs->post_soft_reset(adev);
if (r)
return r;
}
@@ -2193,7 +2204,7 @@ bool amdgpu_need_backup(struct amdgpu_device *adev)
static int amdgpu_recover_vram_from_shadow(struct amdgpu_device *adev,
struct amdgpu_ring *ring,
struct amdgpu_bo *bo,
- struct fence **fence)
+ struct dma_fence **fence)
{
uint32_t domain;
int r;
@@ -2268,8 +2279,6 @@ int amdgpu_gpu_reset(struct amdgpu_device *adev)
}
if (need_full_reset) {
- /* save scratch */
- amdgpu_atombios_scratch_regs_save(adev);
r = amdgpu_suspend(adev);
retry:
@@ -2279,8 +2288,9 @@ retry:
amdgpu_display_stop_mc_access(adev, &save);
amdgpu_wait_for_idle(adev, AMD_IP_BLOCK_TYPE_GMC);
}
-
+ amdgpu_atombios_scratch_regs_save(adev);
r = amdgpu_asic_reset(adev);
+ amdgpu_atombios_scratch_regs_restore(adev);
/* post card */
amdgpu_atom_asic_init(adev->mode_info.atom_context);
@@ -2288,8 +2298,6 @@ retry:
dev_info(adev->dev, "GPU reset succeeded, trying to resume\n");
r = amdgpu_resume(adev);
}
- /* restore scratch */
- amdgpu_atombios_scratch_regs_restore(adev);
}
if (!r) {
amdgpu_irq_gpu_reset_resume_helper(adev);
@@ -2312,30 +2320,30 @@ retry:
if (need_full_reset && amdgpu_need_backup(adev)) {
struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
struct amdgpu_bo *bo, *tmp;
- struct fence *fence = NULL, *next = NULL;
+ struct dma_fence *fence = NULL, *next = NULL;
DRM_INFO("recover vram bo from shadow\n");
mutex_lock(&adev->shadow_list_lock);
list_for_each_entry_safe(bo, tmp, &adev->shadow_list, shadow_list) {
amdgpu_recover_vram_from_shadow(adev, ring, bo, &next);
if (fence) {
- r = fence_wait(fence, false);
+ r = dma_fence_wait(fence, false);
if (r) {
WARN(r, "recovery from shadow isn't comleted\n");
break;
}
}
- fence_put(fence);
+ dma_fence_put(fence);
fence = next;
}
mutex_unlock(&adev->shadow_list_lock);
if (fence) {
- r = fence_wait(fence, false);
+ r = dma_fence_wait(fence, false);
if (r)
WARN(r, "recovery from shadow isn't comleted\n");
}
- fence_put(fence);
+ dma_fence_put(fence);
}
for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
struct amdgpu_ring *ring = adev->rings[i];
@@ -2531,6 +2539,13 @@ static ssize_t amdgpu_debugfs_regs_read(struct file *f, char __user *buf,
se_bank = (*pos >> 24) & 0x3FF;
sh_bank = (*pos >> 34) & 0x3FF;
instance_bank = (*pos >> 44) & 0x3FF;
+
+ if (se_bank == 0x3FF)
+ se_bank = 0xFFFFFFFF;
+ if (sh_bank == 0x3FF)
+ sh_bank = 0xFFFFFFFF;
+ if (instance_bank == 0x3FF)
+ instance_bank = 0xFFFFFFFF;
use_bank = 1;
} else {
use_bank = 0;
@@ -2539,8 +2554,8 @@ static ssize_t amdgpu_debugfs_regs_read(struct file *f, char __user *buf,
*pos &= 0x3FFFF;
if (use_bank) {
- if (sh_bank >= adev->gfx.config.max_sh_per_se ||
- se_bank >= adev->gfx.config.max_shader_engines)
+ if ((sh_bank != 0xFFFFFFFF && sh_bank >= adev->gfx.config.max_sh_per_se) ||
+ (se_bank != 0xFFFFFFFF && se_bank >= adev->gfx.config.max_shader_engines))
return -EINVAL;
mutex_lock(&adev->grbm_idx_mutex);
amdgpu_gfx_select_se_sh(adev, se_bank,
@@ -2587,10 +2602,45 @@ static ssize_t amdgpu_debugfs_regs_write(struct file *f, const char __user *buf,
struct amdgpu_device *adev = f->f_inode->i_private;
ssize_t result = 0;
int r;
+ bool pm_pg_lock, use_bank;
+ unsigned instance_bank, sh_bank, se_bank;
if (size & 0x3 || *pos & 0x3)
return -EINVAL;
+ /* are we reading registers for which a PG lock is necessary? */
+ pm_pg_lock = (*pos >> 23) & 1;
+
+ if (*pos & (1ULL << 62)) {
+ se_bank = (*pos >> 24) & 0x3FF;
+ sh_bank = (*pos >> 34) & 0x3FF;
+ instance_bank = (*pos >> 44) & 0x3FF;
+
+ if (se_bank == 0x3FF)
+ se_bank = 0xFFFFFFFF;
+ if (sh_bank == 0x3FF)
+ sh_bank = 0xFFFFFFFF;
+ if (instance_bank == 0x3FF)
+ instance_bank = 0xFFFFFFFF;
+ use_bank = 1;
+ } else {
+ use_bank = 0;
+ }
+
+ *pos &= 0x3FFFF;
+
+ if (use_bank) {
+ if ((sh_bank != 0xFFFFFFFF && sh_bank >= adev->gfx.config.max_sh_per_se) ||
+ (se_bank != 0xFFFFFFFF && se_bank >= adev->gfx.config.max_shader_engines))
+ return -EINVAL;
+ mutex_lock(&adev->grbm_idx_mutex);
+ amdgpu_gfx_select_se_sh(adev, se_bank,
+ sh_bank, instance_bank);
+ }
+
+ if (pm_pg_lock)
+ mutex_lock(&adev->pm.mutex);
+
while (size) {
uint32_t value;
@@ -2609,6 +2659,14 @@ static ssize_t amdgpu_debugfs_regs_write(struct file *f, const char __user *buf,
size -= 4;
}
+ if (use_bank) {
+ amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
+ mutex_unlock(&adev->grbm_idx_mutex);
+ }
+
+ if (pm_pg_lock)
+ mutex_unlock(&adev->pm.mutex);
+
return result;
}
@@ -2871,6 +2929,56 @@ static ssize_t amdgpu_debugfs_sensor_read(struct file *f, char __user *buf,
return !r ? 4 : r;
}
+static ssize_t amdgpu_debugfs_wave_read(struct file *f, char __user *buf,
+ size_t size, loff_t *pos)
+{
+ struct amdgpu_device *adev = f->f_inode->i_private;
+ int r, x;
+ ssize_t result=0;
+ uint32_t offset, se, sh, cu, wave, simd, data[32];
+
+ if (size & 3 || *pos & 3)
+ return -EINVAL;
+
+ /* decode offset */
+ offset = (*pos & 0x7F);
+ se = ((*pos >> 7) & 0xFF);
+ sh = ((*pos >> 15) & 0xFF);
+ cu = ((*pos >> 23) & 0xFF);
+ wave = ((*pos >> 31) & 0xFF);
+ simd = ((*pos >> 37) & 0xFF);
+
+ /* switch to the specific se/sh/cu */
+ mutex_lock(&adev->grbm_idx_mutex);
+ amdgpu_gfx_select_se_sh(adev, se, sh, cu);
+
+ x = 0;
+ if (adev->gfx.funcs->read_wave_data)
+ adev->gfx.funcs->read_wave_data(adev, simd, wave, data, &x);
+
+ amdgpu_gfx_select_se_sh(adev, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF);
+ mutex_unlock(&adev->grbm_idx_mutex);
+
+ if (!x)
+ return -EINVAL;
+
+ while (size && (offset < x * 4)) {
+ uint32_t value;
+
+ value = data[offset >> 2];
+ r = put_user(value, (uint32_t *)buf);
+ if (r)
+ return r;
+
+ result += 4;
+ buf += 4;
+ offset += 4;
+ size -= 4;
+ }
+
+ return result;
+}
+
static const struct file_operations amdgpu_debugfs_regs_fops = {
.owner = THIS_MODULE,
.read = amdgpu_debugfs_regs_read,
@@ -2908,6 +3016,12 @@ static const struct file_operations amdgpu_debugfs_sensors_fops = {
.llseek = default_llseek
};
+static const struct file_operations amdgpu_debugfs_wave_fops = {
+ .owner = THIS_MODULE,
+ .read = amdgpu_debugfs_wave_read,
+ .llseek = default_llseek
+};
+
static const struct file_operations *debugfs_regs[] = {
&amdgpu_debugfs_regs_fops,
&amdgpu_debugfs_regs_didt_fops,
@@ -2915,6 +3029,7 @@ static const struct file_operations *debugfs_regs[] = {
&amdgpu_debugfs_regs_smc_fops,
&amdgpu_debugfs_gca_config_fops,
&amdgpu_debugfs_sensors_fops,
+ &amdgpu_debugfs_wave_fops,
};
static const char *debugfs_regs_names[] = {
@@ -2924,6 +3039,7 @@ static const char *debugfs_regs_names[] = {
"amdgpu_regs_smc",
"amdgpu_gca_config",
"amdgpu_sensors",
+ "amdgpu_wave",
};
static int amdgpu_debugfs_regs_init(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
index 083e2b429872..741144fcc7bc 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
@@ -35,29 +35,29 @@
#include <drm/drm_crtc_helper.h>
#include <drm/drm_edid.h>
-static void amdgpu_flip_callback(struct fence *f, struct fence_cb *cb)
+static void amdgpu_flip_callback(struct dma_fence *f, struct dma_fence_cb *cb)
{
struct amdgpu_flip_work *work =
container_of(cb, struct amdgpu_flip_work, cb);
- fence_put(f);
+ dma_fence_put(f);
schedule_work(&work->flip_work.work);
}
static bool amdgpu_flip_handle_fence(struct amdgpu_flip_work *work,
- struct fence **f)
+ struct dma_fence **f)
{
- struct fence *fence= *f;
+ struct dma_fence *fence= *f;
if (fence == NULL)
return false;
*f = NULL;
- if (!fence_add_callback(fence, &work->cb, amdgpu_flip_callback))
+ if (!dma_fence_add_callback(fence, &work->cb, amdgpu_flip_callback))
return true;
- fence_put(fence);
+ dma_fence_put(fence);
return false;
}
@@ -68,9 +68,9 @@ static void amdgpu_flip_work_func(struct work_struct *__work)
struct amdgpu_flip_work *work =
container_of(delayed_work, struct amdgpu_flip_work, flip_work);
struct amdgpu_device *adev = work->adev;
- struct amdgpu_crtc *amdgpuCrtc = adev->mode_info.crtcs[work->crtc_id];
+ struct amdgpu_crtc *amdgpu_crtc = adev->mode_info.crtcs[work->crtc_id];
- struct drm_crtc *crtc = &amdgpuCrtc->base;
+ struct drm_crtc *crtc = &amdgpu_crtc->base;
unsigned long flags;
unsigned i;
int vpos, hpos;
@@ -85,14 +85,14 @@ static void amdgpu_flip_work_func(struct work_struct *__work)
/* Wait until we're out of the vertical blank period before the one
* targeted by the flip
*/
- if (amdgpuCrtc->enabled &&
+ if (amdgpu_crtc->enabled &&
(amdgpu_get_crtc_scanoutpos(adev->ddev, work->crtc_id, 0,
&vpos, &hpos, NULL, NULL,
&crtc->hwmode)
& (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) ==
(DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) &&
(int)(work->target_vblank -
- amdgpu_get_vblank_counter_kms(adev->ddev, amdgpuCrtc->crtc_id)) > 0) {
+ amdgpu_get_vblank_counter_kms(adev->ddev, amdgpu_crtc->crtc_id)) > 0) {
schedule_delayed_work(&work->flip_work, usecs_to_jiffies(1000));
return;
}
@@ -104,12 +104,12 @@ static void amdgpu_flip_work_func(struct work_struct *__work)
adev->mode_info.funcs->page_flip(adev, work->crtc_id, work->base, work->async);
/* Set the flip status */
- amdgpuCrtc->pflip_status = AMDGPU_FLIP_SUBMITTED;
+ amdgpu_crtc->pflip_status = AMDGPU_FLIP_SUBMITTED;
spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
DRM_DEBUG_DRIVER("crtc:%d[%p], pflip_stat:AMDGPU_FLIP_SUBMITTED, work: %p,\n",
- amdgpuCrtc->crtc_id, amdgpuCrtc, work);
+ amdgpu_crtc->crtc_id, amdgpu_crtc, work);
}
@@ -244,9 +244,9 @@ unreserve:
cleanup:
amdgpu_bo_unref(&work->old_abo);
- fence_put(work->excl);
+ dma_fence_put(work->excl);
for (i = 0; i < work->shared_count; ++i)
- fence_put(work->shared[i]);
+ dma_fence_put(work->shared[i]);
kfree(work->shared);
kfree(work);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c
index 14f57d9915e3..6ca0333ca4c0 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c
@@ -553,9 +553,10 @@ int amdgpu_parse_extended_power_table(struct amdgpu_device *adev)
entry = (ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record *)
((u8 *)entry + sizeof(ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record));
}
- for (i = 0; i < states->numEntries; i++) {
- if (i >= AMDGPU_MAX_VCE_LEVELS)
- break;
+ adev->pm.dpm.num_of_vce_states =
+ states->numEntries > AMD_MAX_VCE_LEVELS ?
+ AMD_MAX_VCE_LEVELS : states->numEntries;
+ for (i = 0; i < adev->pm.dpm.num_of_vce_states; i++) {
vce_clk = (VCEClockInfo *)
((u8 *)&array->entries[0] +
(state_entry->ucVCEClockInfoIndex * sizeof(VCEClockInfo)));
@@ -955,3 +956,12 @@ u8 amdgpu_encode_pci_lane_width(u32 lanes)
return encoded_lanes[lanes];
}
+
+struct amd_vce_state*
+amdgpu_get_vce_clock_state(struct amdgpu_device *adev, unsigned idx)
+{
+ if (idx < adev->pm.dpm.num_of_vce_states)
+ return &adev->pm.dpm.vce_states[idx];
+
+ return NULL;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h
index 3738a96c2619..bd85e35998e7 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h
@@ -23,6 +23,446 @@
#ifndef __AMDGPU_DPM_H__
#define __AMDGPU_DPM_H__
+enum amdgpu_int_thermal_type {
+ THERMAL_TYPE_NONE,
+ THERMAL_TYPE_EXTERNAL,
+ THERMAL_TYPE_EXTERNAL_GPIO,
+ THERMAL_TYPE_RV6XX,
+ THERMAL_TYPE_RV770,
+ THERMAL_TYPE_ADT7473_WITH_INTERNAL,
+ THERMAL_TYPE_EVERGREEN,
+ THERMAL_TYPE_SUMO,
+ THERMAL_TYPE_NI,
+ THERMAL_TYPE_SI,
+ THERMAL_TYPE_EMC2103_WITH_INTERNAL,
+ THERMAL_TYPE_CI,
+ THERMAL_TYPE_KV,
+};
+
+enum amdgpu_dpm_auto_throttle_src {
+ AMDGPU_DPM_AUTO_THROTTLE_SRC_THERMAL,
+ AMDGPU_DPM_AUTO_THROTTLE_SRC_EXTERNAL
+};
+
+enum amdgpu_dpm_event_src {
+ AMDGPU_DPM_EVENT_SRC_ANALOG = 0,
+ AMDGPU_DPM_EVENT_SRC_EXTERNAL = 1,
+ AMDGPU_DPM_EVENT_SRC_DIGITAL = 2,
+ AMDGPU_DPM_EVENT_SRC_ANALOG_OR_EXTERNAL = 3,
+ AMDGPU_DPM_EVENT_SRC_DIGIAL_OR_EXTERNAL = 4
+};
+
+struct amdgpu_ps {
+ u32 caps; /* vbios flags */
+ u32 class; /* vbios flags */
+ u32 class2; /* vbios flags */
+ /* UVD clocks */
+ u32 vclk;
+ u32 dclk;
+ /* VCE clocks */
+ u32 evclk;
+ u32 ecclk;
+ bool vce_active;
+ enum amd_vce_level vce_level;
+ /* asic priv */
+ void *ps_priv;
+};
+
+struct amdgpu_dpm_thermal {
+ /* thermal interrupt work */
+ struct work_struct work;
+ /* low temperature threshold */
+ int min_temp;
+ /* high temperature threshold */
+ int max_temp;
+ /* was last interrupt low to high or high to low */
+ bool high_to_low;
+ /* interrupt source */
+ struct amdgpu_irq_src irq;
+};
+
+enum amdgpu_clk_action
+{
+ AMDGPU_SCLK_UP = 1,
+ AMDGPU_SCLK_DOWN
+};
+
+struct amdgpu_blacklist_clocks
+{
+ u32 sclk;
+ u32 mclk;
+ enum amdgpu_clk_action action;
+};
+
+struct amdgpu_clock_and_voltage_limits {
+ u32 sclk;
+ u32 mclk;
+ u16 vddc;
+ u16 vddci;
+};
+
+struct amdgpu_clock_array {
+ u32 count;
+ u32 *values;
+};
+
+struct amdgpu_clock_voltage_dependency_entry {
+ u32 clk;
+ u16 v;
+};
+
+struct amdgpu_clock_voltage_dependency_table {
+ u32 count;
+ struct amdgpu_clock_voltage_dependency_entry *entries;
+};
+
+union amdgpu_cac_leakage_entry {
+ struct {
+ u16 vddc;
+ u32 leakage;
+ };
+ struct {
+ u16 vddc1;
+ u16 vddc2;
+ u16 vddc3;
+ };
+};
+
+struct amdgpu_cac_leakage_table {
+ u32 count;
+ union amdgpu_cac_leakage_entry *entries;
+};
+
+struct amdgpu_phase_shedding_limits_entry {
+ u16 voltage;
+ u32 sclk;
+ u32 mclk;
+};
+
+struct amdgpu_phase_shedding_limits_table {
+ u32 count;
+ struct amdgpu_phase_shedding_limits_entry *entries;
+};
+
+struct amdgpu_uvd_clock_voltage_dependency_entry {
+ u32 vclk;
+ u32 dclk;
+ u16 v;
+};
+
+struct amdgpu_uvd_clock_voltage_dependency_table {
+ u8 count;
+ struct amdgpu_uvd_clock_voltage_dependency_entry *entries;
+};
+
+struct amdgpu_vce_clock_voltage_dependency_entry {
+ u32 ecclk;
+ u32 evclk;
+ u16 v;
+};
+
+struct amdgpu_vce_clock_voltage_dependency_table {
+ u8 count;
+ struct amdgpu_vce_clock_voltage_dependency_entry *entries;
+};
+
+struct amdgpu_ppm_table {
+ u8 ppm_design;
+ u16 cpu_core_number;
+ u32 platform_tdp;
+ u32 small_ac_platform_tdp;
+ u32 platform_tdc;
+ u32 small_ac_platform_tdc;
+ u32 apu_tdp;
+ u32 dgpu_tdp;
+ u32 dgpu_ulv_power;
+ u32 tj_max;
+};
+
+struct amdgpu_cac_tdp_table {
+ u16 tdp;
+ u16 configurable_tdp;
+ u16 tdc;
+ u16 battery_power_limit;
+ u16 small_power_limit;
+ u16 low_cac_leakage;
+ u16 high_cac_leakage;
+ u16 maximum_power_delivery_limit;
+};
+
+struct amdgpu_dpm_dynamic_state {
+ struct amdgpu_clock_voltage_dependency_table vddc_dependency_on_sclk;
+ struct amdgpu_clock_voltage_dependency_table vddci_dependency_on_mclk;
+ struct amdgpu_clock_voltage_dependency_table vddc_dependency_on_mclk;
+ struct amdgpu_clock_voltage_dependency_table mvdd_dependency_on_mclk;
+ struct amdgpu_clock_voltage_dependency_table vddc_dependency_on_dispclk;
+ struct amdgpu_uvd_clock_voltage_dependency_table uvd_clock_voltage_dependency_table;
+ struct amdgpu_vce_clock_voltage_dependency_table vce_clock_voltage_dependency_table;
+ struct amdgpu_clock_voltage_dependency_table samu_clock_voltage_dependency_table;
+ struct amdgpu_clock_voltage_dependency_table acp_clock_voltage_dependency_table;
+ struct amdgpu_clock_voltage_dependency_table vddgfx_dependency_on_sclk;
+ struct amdgpu_clock_array valid_sclk_values;
+ struct amdgpu_clock_array valid_mclk_values;
+ struct amdgpu_clock_and_voltage_limits max_clock_voltage_on_dc;
+ struct amdgpu_clock_and_voltage_limits max_clock_voltage_on_ac;
+ u32 mclk_sclk_ratio;
+ u32 sclk_mclk_delta;
+ u16 vddc_vddci_delta;
+ u16 min_vddc_for_pcie_gen2;
+ struct amdgpu_cac_leakage_table cac_leakage_table;
+ struct amdgpu_phase_shedding_limits_table phase_shedding_limits_table;
+ struct amdgpu_ppm_table *ppm_table;
+ struct amdgpu_cac_tdp_table *cac_tdp_table;
+};
+
+struct amdgpu_dpm_fan {
+ u16 t_min;
+ u16 t_med;
+ u16 t_high;
+ u16 pwm_min;
+ u16 pwm_med;
+ u16 pwm_high;
+ u8 t_hyst;
+ u32 cycle_delay;
+ u16 t_max;
+ u8 control_mode;
+ u16 default_max_fan_pwm;
+ u16 default_fan_output_sensitivity;
+ u16 fan_output_sensitivity;
+ bool ucode_fan_control;
+};
+
+enum amdgpu_pcie_gen {
+ AMDGPU_PCIE_GEN1 = 0,
+ AMDGPU_PCIE_GEN2 = 1,
+ AMDGPU_PCIE_GEN3 = 2,
+ AMDGPU_PCIE_GEN_INVALID = 0xffff
+};
+
+enum amdgpu_dpm_forced_level {
+ AMDGPU_DPM_FORCED_LEVEL_AUTO = 0,
+ AMDGPU_DPM_FORCED_LEVEL_LOW = 1,
+ AMDGPU_DPM_FORCED_LEVEL_HIGH = 2,
+ AMDGPU_DPM_FORCED_LEVEL_MANUAL = 3,
+};
+
+struct amdgpu_dpm_funcs {
+ int (*get_temperature)(struct amdgpu_device *adev);
+ int (*pre_set_power_state)(struct amdgpu_device *adev);
+ int (*set_power_state)(struct amdgpu_device *adev);
+ void (*post_set_power_state)(struct amdgpu_device *adev);
+ void (*display_configuration_changed)(struct amdgpu_device *adev);
+ u32 (*get_sclk)(struct amdgpu_device *adev, bool low);
+ u32 (*get_mclk)(struct amdgpu_device *adev, bool low);
+ void (*print_power_state)(struct amdgpu_device *adev, struct amdgpu_ps *ps);
+ void (*debugfs_print_current_performance_level)(struct amdgpu_device *adev, struct seq_file *m);
+ int (*force_performance_level)(struct amdgpu_device *adev, enum amdgpu_dpm_forced_level level);
+ bool (*vblank_too_short)(struct amdgpu_device *adev);
+ void (*powergate_uvd)(struct amdgpu_device *adev, bool gate);
+ void (*powergate_vce)(struct amdgpu_device *adev, bool gate);
+ void (*enable_bapm)(struct amdgpu_device *adev, bool enable);
+ void (*set_fan_control_mode)(struct amdgpu_device *adev, u32 mode);
+ u32 (*get_fan_control_mode)(struct amdgpu_device *adev);
+ int (*set_fan_speed_percent)(struct amdgpu_device *adev, u32 speed);
+ int (*get_fan_speed_percent)(struct amdgpu_device *adev, u32 *speed);
+ int (*force_clock_level)(struct amdgpu_device *adev, enum pp_clock_type type, uint32_t mask);
+ int (*print_clock_levels)(struct amdgpu_device *adev, enum pp_clock_type type, char *buf);
+ int (*get_sclk_od)(struct amdgpu_device *adev);
+ int (*set_sclk_od)(struct amdgpu_device *adev, uint32_t value);
+ int (*get_mclk_od)(struct amdgpu_device *adev);
+ int (*set_mclk_od)(struct amdgpu_device *adev, uint32_t value);
+ int (*check_state_equal)(struct amdgpu_device *adev,
+ struct amdgpu_ps *cps,
+ struct amdgpu_ps *rps,
+ bool *equal);
+
+ struct amd_vce_state* (*get_vce_clock_state)(struct amdgpu_device *adev, unsigned idx);
+};
+
+#define amdgpu_dpm_pre_set_power_state(adev) (adev)->pm.funcs->pre_set_power_state((adev))
+#define amdgpu_dpm_set_power_state(adev) (adev)->pm.funcs->set_power_state((adev))
+#define amdgpu_dpm_post_set_power_state(adev) (adev)->pm.funcs->post_set_power_state((adev))
+#define amdgpu_dpm_display_configuration_changed(adev) (adev)->pm.funcs->display_configuration_changed((adev))
+#define amdgpu_dpm_print_power_state(adev, ps) (adev)->pm.funcs->print_power_state((adev), (ps))
+#define amdgpu_dpm_vblank_too_short(adev) (adev)->pm.funcs->vblank_too_short((adev))
+#define amdgpu_dpm_enable_bapm(adev, e) (adev)->pm.funcs->enable_bapm((adev), (e))
+
+#define amdgpu_dpm_read_sensor(adev, idx, value) \
+ ((adev)->pp_enabled ? \
+ (adev)->powerplay.pp_funcs->read_sensor(adev->powerplay.pp_handle, (idx), (value)) : \
+ -EINVAL)
+
+#define amdgpu_dpm_get_temperature(adev) \
+ ((adev)->pp_enabled ? \
+ (adev)->powerplay.pp_funcs->get_temperature((adev)->powerplay.pp_handle) : \
+ (adev)->pm.funcs->get_temperature((adev)))
+
+#define amdgpu_dpm_set_fan_control_mode(adev, m) \
+ ((adev)->pp_enabled ? \
+ (adev)->powerplay.pp_funcs->set_fan_control_mode((adev)->powerplay.pp_handle, (m)) : \
+ (adev)->pm.funcs->set_fan_control_mode((adev), (m)))
+
+#define amdgpu_dpm_get_fan_control_mode(adev) \
+ ((adev)->pp_enabled ? \
+ (adev)->powerplay.pp_funcs->get_fan_control_mode((adev)->powerplay.pp_handle) : \
+ (adev)->pm.funcs->get_fan_control_mode((adev)))
+
+#define amdgpu_dpm_set_fan_speed_percent(adev, s) \
+ ((adev)->pp_enabled ? \
+ (adev)->powerplay.pp_funcs->set_fan_speed_percent((adev)->powerplay.pp_handle, (s)) : \
+ (adev)->pm.funcs->set_fan_speed_percent((adev), (s)))
+
+#define amdgpu_dpm_get_fan_speed_percent(adev, s) \
+ ((adev)->pp_enabled ? \
+ (adev)->powerplay.pp_funcs->get_fan_speed_percent((adev)->powerplay.pp_handle, (s)) : \
+ (adev)->pm.funcs->get_fan_speed_percent((adev), (s)))
+
+#define amdgpu_dpm_get_sclk(adev, l) \
+ ((adev)->pp_enabled ? \
+ (adev)->powerplay.pp_funcs->get_sclk((adev)->powerplay.pp_handle, (l)) : \
+ (adev)->pm.funcs->get_sclk((adev), (l)))
+
+#define amdgpu_dpm_get_mclk(adev, l) \
+ ((adev)->pp_enabled ? \
+ (adev)->powerplay.pp_funcs->get_mclk((adev)->powerplay.pp_handle, (l)) : \
+ (adev)->pm.funcs->get_mclk((adev), (l)))
+
+
+#define amdgpu_dpm_force_performance_level(adev, l) \
+ ((adev)->pp_enabled ? \
+ (adev)->powerplay.pp_funcs->force_performance_level((adev)->powerplay.pp_handle, (l)) : \
+ (adev)->pm.funcs->force_performance_level((adev), (l)))
+
+#define amdgpu_dpm_powergate_uvd(adev, g) \
+ ((adev)->pp_enabled ? \
+ (adev)->powerplay.pp_funcs->powergate_uvd((adev)->powerplay.pp_handle, (g)) : \
+ (adev)->pm.funcs->powergate_uvd((adev), (g)))
+
+#define amdgpu_dpm_powergate_vce(adev, g) \
+ ((adev)->pp_enabled ? \
+ (adev)->powerplay.pp_funcs->powergate_vce((adev)->powerplay.pp_handle, (g)) : \
+ (adev)->pm.funcs->powergate_vce((adev), (g)))
+
+#define amdgpu_dpm_get_current_power_state(adev) \
+ (adev)->powerplay.pp_funcs->get_current_power_state((adev)->powerplay.pp_handle)
+
+#define amdgpu_dpm_get_performance_level(adev) \
+ (adev)->powerplay.pp_funcs->get_performance_level((adev)->powerplay.pp_handle)
+
+#define amdgpu_dpm_get_pp_num_states(adev, data) \
+ (adev)->powerplay.pp_funcs->get_pp_num_states((adev)->powerplay.pp_handle, data)
+
+#define amdgpu_dpm_get_pp_table(adev, table) \
+ (adev)->powerplay.pp_funcs->get_pp_table((adev)->powerplay.pp_handle, table)
+
+#define amdgpu_dpm_set_pp_table(adev, buf, size) \
+ (adev)->powerplay.pp_funcs->set_pp_table((adev)->powerplay.pp_handle, buf, size)
+
+#define amdgpu_dpm_print_clock_levels(adev, type, buf) \
+ (adev)->powerplay.pp_funcs->print_clock_levels((adev)->powerplay.pp_handle, type, buf)
+
+#define amdgpu_dpm_force_clock_level(adev, type, level) \
+ (adev)->powerplay.pp_funcs->force_clock_level((adev)->powerplay.pp_handle, type, level)
+
+#define amdgpu_dpm_get_sclk_od(adev) \
+ (adev)->powerplay.pp_funcs->get_sclk_od((adev)->powerplay.pp_handle)
+
+#define amdgpu_dpm_set_sclk_od(adev, value) \
+ (adev)->powerplay.pp_funcs->set_sclk_od((adev)->powerplay.pp_handle, value)
+
+#define amdgpu_dpm_get_mclk_od(adev) \
+ ((adev)->powerplay.pp_funcs->get_mclk_od((adev)->powerplay.pp_handle))
+
+#define amdgpu_dpm_set_mclk_od(adev, value) \
+ ((adev)->powerplay.pp_funcs->set_mclk_od((adev)->powerplay.pp_handle, value))
+
+#define amdgpu_dpm_dispatch_task(adev, event_id, input, output) \
+ (adev)->powerplay.pp_funcs->dispatch_tasks((adev)->powerplay.pp_handle, (event_id), (input), (output))
+
+#define amgdpu_dpm_check_state_equal(adev, cps, rps, equal) (adev)->pm.funcs->check_state_equal((adev), (cps),(rps),(equal))
+
+#define amdgpu_dpm_get_vce_clock_state(adev, i) \
+ ((adev)->pp_enabled ? \
+ (adev)->powerplay.pp_funcs->get_vce_clock_state((adev)->powerplay.pp_handle, (i)) : \
+ (adev)->pm.funcs->get_vce_clock_state((adev), (i)))
+
+struct amdgpu_dpm {
+ struct amdgpu_ps *ps;
+ /* number of valid power states */
+ int num_ps;
+ /* current power state that is active */
+ struct amdgpu_ps *current_ps;
+ /* requested power state */
+ struct amdgpu_ps *requested_ps;
+ /* boot up power state */
+ struct amdgpu_ps *boot_ps;
+ /* default uvd power state */
+ struct amdgpu_ps *uvd_ps;
+ /* vce requirements */
+ u32 num_of_vce_states;
+ struct amd_vce_state vce_states[AMD_MAX_VCE_LEVELS];
+ enum amd_vce_level vce_level;
+ enum amd_pm_state_type state;
+ enum amd_pm_state_type user_state;
+ enum amd_pm_state_type last_state;
+ enum amd_pm_state_type last_user_state;
+ u32 platform_caps;
+ u32 voltage_response_time;
+ u32 backbias_response_time;
+ void *priv;
+ u32 new_active_crtcs;
+ int new_active_crtc_count;
+ u32 current_active_crtcs;
+ int current_active_crtc_count;
+ struct amdgpu_dpm_dynamic_state dyn_state;
+ struct amdgpu_dpm_fan fan;
+ u32 tdp_limit;
+ u32 near_tdp_limit;
+ u32 near_tdp_limit_adjusted;
+ u32 sq_ramping_threshold;
+ u32 cac_leakage;
+ u16 tdp_od_limit;
+ u32 tdp_adjustment;
+ u16 load_line_slope;
+ bool power_control;
+ bool ac_power;
+ /* special states active */
+ bool thermal_active;
+ bool uvd_active;
+ bool vce_active;
+ /* thermal handling */
+ struct amdgpu_dpm_thermal thermal;
+ /* forced levels */
+ enum amdgpu_dpm_forced_level forced_level;
+};
+
+struct amdgpu_pm {
+ struct mutex mutex;
+ u32 current_sclk;
+ u32 current_mclk;
+ u32 default_sclk;
+ u32 default_mclk;
+ struct amdgpu_i2c_chan *i2c_bus;
+ /* internal thermal controller on rv6xx+ */
+ enum amdgpu_int_thermal_type int_thermal_type;
+ struct device *int_hwmon_dev;
+ /* fan control parameters */
+ bool no_fan;
+ u8 fan_pulses_per_revolution;
+ u8 fan_min_rpm;
+ u8 fan_max_rpm;
+ /* dpm */
+ bool dpm_enabled;
+ bool sysfs_initialized;
+ struct amdgpu_dpm dpm;
+ const struct firmware *fw; /* SMC firmware */
+ uint32_t fw_version;
+ const struct amdgpu_dpm_funcs *funcs;
+ uint32_t pcie_gen_mask;
+ uint32_t pcie_mlw_mask;
+ struct amd_pp_display_configuration pm_display_cfg;/* set by DAL */
+};
+
#define R600_SSTU_DFLT 0
#define R600_SST_DFLT 0x00C8
@@ -82,4 +522,7 @@ u16 amdgpu_get_pcie_lane_support(struct amdgpu_device *adev,
u16 default_lanes);
u8 amdgpu_encode_pci_lane_width(u32 lanes);
+struct amd_vce_state*
+amdgpu_get_vce_clock_state(struct amdgpu_device *adev, unsigned idx);
+
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
index 71ed27eb3dde..42da6163b893 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
@@ -58,9 +58,10 @@
* - 3.6.0 - kmd involves use CONTEXT_CONTROL in ring buffer.
* - 3.7.0 - Add support for VCE clock list packet
* - 3.8.0 - Add support raster config init in the kernel
+ * - 3.9.0 - Add support for memory query info about VRAM and GTT.
*/
#define KMS_DRIVER_MAJOR 3
-#define KMS_DRIVER_MINOR 8
+#define KMS_DRIVER_MINOR 9
#define KMS_DRIVER_PATCHLEVEL 0
int amdgpu_vram_limit = 0;
@@ -85,6 +86,7 @@ int amdgpu_vm_size = 64;
int amdgpu_vm_block_size = -1;
int amdgpu_vm_fault_stop = 0;
int amdgpu_vm_debug = 0;
+int amdgpu_vram_page_split = 1024;
int amdgpu_exp_hw_support = 0;
int amdgpu_sched_jobs = 32;
int amdgpu_sched_hw_submission = 2;
@@ -165,6 +167,9 @@ module_param_named(vm_fault_stop, amdgpu_vm_fault_stop, int, 0444);
MODULE_PARM_DESC(vm_debug, "Debug VM handling (0 = disabled (default), 1 = enabled)");
module_param_named(vm_debug, amdgpu_vm_debug, int, 0644);
+MODULE_PARM_DESC(vram_page_split, "Number of pages after we split VRAM allocations (default 1024, -1 = disable)");
+module_param_named(vram_page_split, amdgpu_vram_page_split, int, 0444);
+
MODULE_PARM_DESC(exp_hw_support, "experimental hw support (1 = enable, 0 = disable (default))");
module_param_named(exp_hw_support, amdgpu_exp_hw_support, int, 0444);
@@ -201,7 +206,8 @@ module_param_named(pg_mask, amdgpu_pg_mask, uint, 0444);
MODULE_PARM_DESC(disable_cu, "Disable CUs (se.sh.cu,...)");
module_param_named(disable_cu, amdgpu_disable_cu, charp, 0444);
-MODULE_PARM_DESC(virtual_display, "Enable virtual display feature (the virtual_display will be set like xxxx:xx:xx.x;xxxx:xx:xx.x)");
+MODULE_PARM_DESC(virtual_display,
+ "Enable virtual display feature (the virtual_display will be set like xxxx:xx:xx.x,x;xxxx:xx:xx.x,x)");
module_param_named(virtual_display, amdgpu_virtual_display, charp, 0444);
static const struct pci_device_id pciidlist[] = {
@@ -381,6 +387,7 @@ static const struct pci_device_id pciidlist[] = {
{0x1002, 0x6939, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TONGA},
/* fiji */
{0x1002, 0x7300, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_FIJI},
+ {0x1002, 0x730F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_FIJI},
/* carrizo */
{0x1002, 0x9870, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CARRIZO|AMD_IS_APU},
{0x1002, 0x9874, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CARRIZO|AMD_IS_APU},
@@ -735,8 +742,20 @@ static struct pci_driver amdgpu_kms_pci_driver = {
static int __init amdgpu_init(void)
{
- amdgpu_sync_init();
- amdgpu_fence_slab_init();
+ int r;
+
+ r = amdgpu_sync_init();
+ if (r)
+ goto error_sync;
+
+ r = amdgpu_fence_slab_init();
+ if (r)
+ goto error_fence;
+
+ r = amd_sched_fence_slab_init();
+ if (r)
+ goto error_sched;
+
if (vgacon_text_force()) {
DRM_ERROR("VGACON disables amdgpu kernel modesetting.\n");
return -EINVAL;
@@ -748,6 +767,15 @@ static int __init amdgpu_init(void)
amdgpu_register_atpx_handler();
/* let modprobe override vga console setting */
return drm_pci_init(driver, pdriver);
+
+error_sched:
+ amdgpu_fence_slab_fini();
+
+error_fence:
+ amdgpu_sync_fini();
+
+error_sync:
+ return r;
}
static void __exit amdgpu_exit(void)
@@ -756,6 +784,7 @@ static void __exit amdgpu_exit(void)
drm_pci_exit(driver, pdriver);
amdgpu_unregister_atpx_handler();
amdgpu_sync_fini();
+ amd_sched_fence_slab_fini();
amdgpu_fence_slab_fini();
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c
index 9fb8aa4d6bae..f1c9e59a7c87 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c
@@ -75,27 +75,21 @@ amdgpufb_release(struct fb_info *info, int user)
static struct fb_ops amdgpufb_ops = {
.owner = THIS_MODULE,
+ DRM_FB_HELPER_DEFAULT_OPS,
.fb_open = amdgpufb_open,
.fb_release = amdgpufb_release,
- .fb_check_var = drm_fb_helper_check_var,
- .fb_set_par = drm_fb_helper_set_par,
.fb_fillrect = drm_fb_helper_cfb_fillrect,
.fb_copyarea = drm_fb_helper_cfb_copyarea,
.fb_imageblit = drm_fb_helper_cfb_imageblit,
- .fb_pan_display = drm_fb_helper_pan_display,
- .fb_blank = drm_fb_helper_blank,
- .fb_setcmap = drm_fb_helper_setcmap,
- .fb_debug_enter = drm_fb_helper_debug_enter,
- .fb_debug_leave = drm_fb_helper_debug_leave,
};
-int amdgpu_align_pitch(struct amdgpu_device *adev, int width, int bpp, bool tiled)
+int amdgpu_align_pitch(struct amdgpu_device *adev, int width, int cpp, bool tiled)
{
int aligned = width;
int pitch_mask = 0;
- switch (bpp / 8) {
+ switch (cpp) {
case 1:
pitch_mask = 255;
break;
@@ -110,7 +104,7 @@ int amdgpu_align_pitch(struct amdgpu_device *adev, int width, int bpp, bool tile
aligned += pitch_mask;
aligned &= ~pitch_mask;
- return aligned;
+ return aligned * cpp;
}
static void amdgpufb_destroy_pinned_object(struct drm_gem_object *gobj)
@@ -139,20 +133,21 @@ static int amdgpufb_create_pinned_object(struct amdgpu_fbdev *rfbdev,
int ret;
int aligned_size, size;
int height = mode_cmd->height;
- u32 bpp, depth;
+ u32 cpp;
- drm_fb_get_bpp_depth(mode_cmd->pixel_format, &depth, &bpp);
+ cpp = drm_format_plane_cpp(mode_cmd->pixel_format, 0);
/* need to align pitch with crtc limits */
- mode_cmd->pitches[0] = amdgpu_align_pitch(adev, mode_cmd->width, bpp,
- fb_tiled) * ((bpp + 1) / 8);
+ mode_cmd->pitches[0] = amdgpu_align_pitch(adev, mode_cmd->width, cpp,
+ fb_tiled);
height = ALIGN(mode_cmd->height, 8);
size = mode_cmd->pitches[0] * height;
aligned_size = ALIGN(size, PAGE_SIZE);
ret = amdgpu_gem_object_create(adev, aligned_size, 0,
AMDGPU_GEM_DOMAIN_VRAM,
- AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
+ AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
+ AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS,
true, &gobj);
if (ret) {
printk(KERN_ERR "failed to allocate framebuffer (%d)\n",
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
index 3a2e42f4b897..97928d7281f6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
@@ -48,7 +48,7 @@
*/
struct amdgpu_fence {
- struct fence base;
+ struct dma_fence base;
/* RB, DMA, etc. */
struct amdgpu_ring *ring;
@@ -68,13 +68,14 @@ int amdgpu_fence_slab_init(void)
void amdgpu_fence_slab_fini(void)
{
+ rcu_barrier();
kmem_cache_destroy(amdgpu_fence_slab);
}
/*
* Cast helper
*/
-static const struct fence_ops amdgpu_fence_ops;
-static inline struct amdgpu_fence *to_amdgpu_fence(struct fence *f)
+static const struct dma_fence_ops amdgpu_fence_ops;
+static inline struct amdgpu_fence *to_amdgpu_fence(struct dma_fence *f)
{
struct amdgpu_fence *__f = container_of(f, struct amdgpu_fence, base);
@@ -130,11 +131,11 @@ static u32 amdgpu_fence_read(struct amdgpu_ring *ring)
* Emits a fence command on the requested ring (all asics).
* Returns 0 on success, -ENOMEM on failure.
*/
-int amdgpu_fence_emit(struct amdgpu_ring *ring, struct fence **f)
+int amdgpu_fence_emit(struct amdgpu_ring *ring, struct dma_fence **f)
{
struct amdgpu_device *adev = ring->adev;
struct amdgpu_fence *fence;
- struct fence *old, **ptr;
+ struct dma_fence *old, **ptr;
uint32_t seq;
fence = kmem_cache_alloc(amdgpu_fence_slab, GFP_KERNEL);
@@ -143,10 +144,10 @@ int amdgpu_fence_emit(struct amdgpu_ring *ring, struct fence **f)
seq = ++ring->fence_drv.sync_seq;
fence->ring = ring;
- fence_init(&fence->base, &amdgpu_fence_ops,
- &ring->fence_drv.lock,
- adev->fence_context + ring->idx,
- seq);
+ dma_fence_init(&fence->base, &amdgpu_fence_ops,
+ &ring->fence_drv.lock,
+ adev->fence_context + ring->idx,
+ seq);
amdgpu_ring_emit_fence(ring, ring->fence_drv.gpu_addr,
seq, AMDGPU_FENCE_FLAG_INT);
@@ -155,12 +156,12 @@ int amdgpu_fence_emit(struct amdgpu_ring *ring, struct fence **f)
* emitting the fence would mess up the hardware ring buffer.
*/
old = rcu_dereference_protected(*ptr, 1);
- if (old && !fence_is_signaled(old)) {
+ if (old && !dma_fence_is_signaled(old)) {
DRM_INFO("rcu slot is busy\n");
- fence_wait(old, false);
+ dma_fence_wait(old, false);
}
- rcu_assign_pointer(*ptr, fence_get(&fence->base));
+ rcu_assign_pointer(*ptr, dma_fence_get(&fence->base));
*f = &fence->base;
@@ -211,7 +212,7 @@ void amdgpu_fence_process(struct amdgpu_ring *ring)
seq &= drv->num_fences_mask;
do {
- struct fence *fence, **ptr;
+ struct dma_fence *fence, **ptr;
++last_seq;
last_seq &= drv->num_fences_mask;
@@ -224,13 +225,13 @@ void amdgpu_fence_process(struct amdgpu_ring *ring)
if (!fence)
continue;
- r = fence_signal(fence);
+ r = dma_fence_signal(fence);
if (!r)
- FENCE_TRACE(fence, "signaled from irq context\n");
+ DMA_FENCE_TRACE(fence, "signaled from irq context\n");
else
BUG();
- fence_put(fence);
+ dma_fence_put(fence);
} while (last_seq != seq);
}
@@ -260,7 +261,7 @@ static void amdgpu_fence_fallback(unsigned long arg)
int amdgpu_fence_wait_empty(struct amdgpu_ring *ring)
{
uint64_t seq = ACCESS_ONCE(ring->fence_drv.sync_seq);
- struct fence *fence, **ptr;
+ struct dma_fence *fence, **ptr;
int r;
if (!seq)
@@ -269,14 +270,14 @@ int amdgpu_fence_wait_empty(struct amdgpu_ring *ring)
ptr = &ring->fence_drv.fences[seq & ring->fence_drv.num_fences_mask];
rcu_read_lock();
fence = rcu_dereference(*ptr);
- if (!fence || !fence_get_rcu(fence)) {
+ if (!fence || !dma_fence_get_rcu(fence)) {
rcu_read_unlock();
return 0;
}
rcu_read_unlock();
- r = fence_wait(fence, false);
- fence_put(fence);
+ r = dma_fence_wait(fence, false);
+ dma_fence_put(fence);
return r;
}
@@ -452,7 +453,7 @@ void amdgpu_fence_driver_fini(struct amdgpu_device *adev)
amd_sched_fini(&ring->sched);
del_timer_sync(&ring->fence_drv.fallback_timer);
for (j = 0; j <= ring->fence_drv.num_fences_mask; ++j)
- fence_put(ring->fence_drv.fences[j]);
+ dma_fence_put(ring->fence_drv.fences[j]);
kfree(ring->fence_drv.fences);
ring->fence_drv.fences = NULL;
ring->fence_drv.initialized = false;
@@ -541,12 +542,12 @@ void amdgpu_fence_driver_force_completion(struct amdgpu_device *adev)
* Common fence implementation
*/
-static const char *amdgpu_fence_get_driver_name(struct fence *fence)
+static const char *amdgpu_fence_get_driver_name(struct dma_fence *fence)
{
return "amdgpu";
}
-static const char *amdgpu_fence_get_timeline_name(struct fence *f)
+static const char *amdgpu_fence_get_timeline_name(struct dma_fence *f)
{
struct amdgpu_fence *fence = to_amdgpu_fence(f);
return (const char *)fence->ring->name;
@@ -560,7 +561,7 @@ static const char *amdgpu_fence_get_timeline_name(struct fence *f)
* to fence_queue that checks if this fence is signaled, and if so it
* signals the fence and removes itself.
*/
-static bool amdgpu_fence_enable_signaling(struct fence *f)
+static bool amdgpu_fence_enable_signaling(struct dma_fence *f)
{
struct amdgpu_fence *fence = to_amdgpu_fence(f);
struct amdgpu_ring *ring = fence->ring;
@@ -568,7 +569,7 @@ static bool amdgpu_fence_enable_signaling(struct fence *f)
if (!timer_pending(&ring->fence_drv.fallback_timer))
amdgpu_fence_schedule_fallback(ring);
- FENCE_TRACE(&fence->base, "armed on ring %i!\n", ring->idx);
+ DMA_FENCE_TRACE(&fence->base, "armed on ring %i!\n", ring->idx);
return true;
}
@@ -582,7 +583,7 @@ static bool amdgpu_fence_enable_signaling(struct fence *f)
*/
static void amdgpu_fence_free(struct rcu_head *rcu)
{
- struct fence *f = container_of(rcu, struct fence, rcu);
+ struct dma_fence *f = container_of(rcu, struct dma_fence, rcu);
struct amdgpu_fence *fence = to_amdgpu_fence(f);
kmem_cache_free(amdgpu_fence_slab, fence);
}
@@ -595,16 +596,16 @@ static void amdgpu_fence_free(struct rcu_head *rcu)
* This function is called when the reference count becomes zero.
* It just RCU schedules freeing up the fence.
*/
-static void amdgpu_fence_release(struct fence *f)
+static void amdgpu_fence_release(struct dma_fence *f)
{
call_rcu(&f->rcu, amdgpu_fence_free);
}
-static const struct fence_ops amdgpu_fence_ops = {
+static const struct dma_fence_ops amdgpu_fence_ops = {
.get_driver_name = amdgpu_fence_get_driver_name,
.get_timeline_name = amdgpu_fence_get_timeline_name,
.enable_signaling = amdgpu_fence_enable_signaling,
- .wait = fence_default_wait,
+ .wait = dma_fence_default_wait,
.release = amdgpu_fence_release,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
index 21a1242fc13b..964d2a946ed5 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
@@ -126,7 +126,8 @@ int amdgpu_gart_table_vram_alloc(struct amdgpu_device *adev)
if (adev->gart.robj == NULL) {
r = amdgpu_bo_create(adev, adev->gart.table_size,
PAGE_SIZE, true, AMDGPU_GEM_DOMAIN_VRAM,
- AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
+ AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
+ AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS,
NULL, NULL, &adev->gart.robj);
if (r) {
return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
index a7ea9a3b454e..cd62f6ffde2a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
@@ -116,10 +116,11 @@ void amdgpu_gem_force_release(struct amdgpu_device *adev)
* Call from drm_gem_handle_create which appear in both new and open ioctl
* case.
*/
-int amdgpu_gem_object_open(struct drm_gem_object *obj, struct drm_file *file_priv)
+int amdgpu_gem_object_open(struct drm_gem_object *obj,
+ struct drm_file *file_priv)
{
struct amdgpu_bo *abo = gem_to_amdgpu_bo(obj);
- struct amdgpu_device *adev = abo->adev;
+ struct amdgpu_device *adev = amdgpu_ttm_adev(abo->tbo.bdev);
struct amdgpu_fpriv *fpriv = file_priv->driver_priv;
struct amdgpu_vm *vm = &fpriv->vm;
struct amdgpu_bo_va *bo_va;
@@ -142,7 +143,7 @@ void amdgpu_gem_object_close(struct drm_gem_object *obj,
struct drm_file *file_priv)
{
struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
- struct amdgpu_device *adev = bo->adev;
+ struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
struct amdgpu_fpriv *fpriv = file_priv->driver_priv;
struct amdgpu_vm *vm = &fpriv->vm;
@@ -407,10 +408,8 @@ int amdgpu_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
return -ENOENT;
}
robj = gem_to_amdgpu_bo(gobj);
- if (timeout == 0)
- ret = reservation_object_test_signaled_rcu(robj->tbo.resv, true);
- else
- ret = reservation_object_wait_timeout_rcu(robj->tbo.resv, true, true, timeout);
+ ret = reservation_object_wait_timeout_rcu(robj->tbo.resv, true, true,
+ timeout);
/* ret == 0 means not signaled,
* ret > 0 means signaled
@@ -470,6 +469,16 @@ out:
return r;
}
+static int amdgpu_gem_va_check(void *param, struct amdgpu_bo *bo)
+{
+ unsigned domain = amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type);
+
+ /* if anything is swapped out don't swap it in here,
+ just abort and wait for the next CS */
+
+ return domain == AMDGPU_GEM_DOMAIN_CPU ? -ERESTARTSYS : 0;
+}
+
/**
* amdgpu_gem_va_update_vm -update the bo_va in its VM
*
@@ -480,7 +489,8 @@ out:
* vital here, so they are not reported back to userspace.
*/
static void amdgpu_gem_va_update_vm(struct amdgpu_device *adev,
- struct amdgpu_bo_va *bo_va, uint32_t operation)
+ struct amdgpu_bo_va *bo_va,
+ uint32_t operation)
{
struct ttm_validate_buffer tv, *entry;
struct amdgpu_bo_list_entry vm_pd;
@@ -503,7 +513,6 @@ static void amdgpu_gem_va_update_vm(struct amdgpu_device *adev,
if (r)
goto error_print;
- amdgpu_vm_get_pt_bos(adev, bo_va->vm, &duplicates);
list_for_each_entry(entry, &list, head) {
domain = amdgpu_mem_type_to_domain(entry->bo->mem.mem_type);
/* if anything is swapped out don't swap it in here,
@@ -511,13 +520,10 @@ static void amdgpu_gem_va_update_vm(struct amdgpu_device *adev,
if (domain == AMDGPU_GEM_DOMAIN_CPU)
goto error_unreserve;
}
- list_for_each_entry(entry, &duplicates, head) {
- domain = amdgpu_mem_type_to_domain(entry->bo->mem.mem_type);
- /* if anything is swapped out don't swap it in here,
- just abort and wait for the next CS */
- if (domain == AMDGPU_GEM_DOMAIN_CPU)
- goto error_unreserve;
- }
+ r = amdgpu_vm_validate_pt_bos(adev, bo_va->vm, amdgpu_gem_va_check,
+ NULL);
+ if (r)
+ goto error_unreserve;
r = amdgpu_vm_update_page_directory(adev, bo_va->vm);
if (r)
@@ -538,8 +544,6 @@ error_print:
DRM_ERROR("Couldn't update BO_VA (%d)\n", r);
}
-
-
int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
struct drm_file *filp)
{
@@ -549,7 +553,8 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
struct amdgpu_fpriv *fpriv = filp->driver_priv;
struct amdgpu_bo *abo;
struct amdgpu_bo_va *bo_va;
- struct ttm_validate_buffer tv, tv_pd;
+ struct amdgpu_bo_list_entry vm_pd;
+ struct ttm_validate_buffer tv;
struct ww_acquire_ctx ticket;
struct list_head list, duplicates;
uint32_t invalid_flags, va_flags = 0;
@@ -594,9 +599,7 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
tv.shared = true;
list_add(&tv.head, &list);
- tv_pd.bo = &fpriv->vm.page_directory->tbo;
- tv_pd.shared = true;
- list_add(&tv_pd.head, &list);
+ amdgpu_vm_get_pd_bo(&fpriv->vm, &list, &vm_pd);
r = ttm_eu_reserve_buffers(&ticket, &list, true, &duplicates);
if (r) {
@@ -704,7 +707,8 @@ int amdgpu_mode_dumb_create(struct drm_file *file_priv,
uint32_t handle;
int r;
- args->pitch = amdgpu_align_pitch(adev, args->width, args->bpp, 0) * ((args->bpp + 1) / 8);
+ args->pitch = amdgpu_align_pitch(adev, args->width,
+ DIV_ROUND_UP(args->bpp, 8), 0);
args->size = (u64)args->pitch * args->height;
args->size = ALIGN(args->size, PAGE_SIZE);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
index a074edd95c70..01a42b6a69a4 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
@@ -24,6 +24,7 @@
*/
#include <drm/drmP.h>
#include "amdgpu.h"
+#include "amdgpu_gfx.h"
/*
* GPU scratch registers helpers function.
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
index 51321e154c09..e02044086445 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
@@ -27,6 +27,7 @@
int amdgpu_gfx_scratch_get(struct amdgpu_device *adev, uint32_t *reg);
void amdgpu_gfx_scratch_free(struct amdgpu_device *adev, uint32_t reg);
-unsigned amdgpu_gfx_parse_disable_cu(unsigned *mask, unsigned max_se, unsigned max_sh);
+void amdgpu_gfx_parse_disable_cu(unsigned *mask, unsigned max_se,
+ unsigned max_sh);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c
index f86c84427778..3c634f02a3d5 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c
@@ -168,6 +168,7 @@ static int amdgpu_gtt_mgr_new(struct ttm_mem_type_manager *man,
return -ENOMEM;
node->start = AMDGPU_BO_INVALID_OFFSET;
+ node->size = mem->num_pages;
mem->mm_node = node;
if (place->fpfn || place->lpfn || place->flags & TTM_PL_FLAG_TOPDOWN) {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
index 6a6c86c9c169..216a9572d946 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
@@ -89,7 +89,7 @@ int amdgpu_ib_get(struct amdgpu_device *adev, struct amdgpu_vm *vm,
* Free an IB (all asics).
*/
void amdgpu_ib_free(struct amdgpu_device *adev, struct amdgpu_ib *ib,
- struct fence *f)
+ struct dma_fence *f)
{
amdgpu_sa_bo_free(adev, &ib->sa_bo, f);
}
@@ -116,8 +116,8 @@ void amdgpu_ib_free(struct amdgpu_device *adev, struct amdgpu_ib *ib,
* to SI there was just a DE IB.
*/
int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
- struct amdgpu_ib *ibs, struct fence *last_vm_update,
- struct amdgpu_job *job, struct fence **f)
+ struct amdgpu_ib *ibs, struct dma_fence *last_vm_update,
+ struct amdgpu_job *job, struct dma_fence **f)
{
struct amdgpu_device *adev = ring->adev;
struct amdgpu_ib *ib = &ibs[0];
@@ -152,8 +152,8 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
return -EINVAL;
}
- alloc_size = amdgpu_ring_get_dma_frame_size(ring) +
- num_ibs * amdgpu_ring_get_emit_ib_size(ring);
+ alloc_size = ring->funcs->emit_frame_size + num_ibs *
+ ring->funcs->emit_ib_size;
r = amdgpu_ring_alloc(ring, alloc_size);
if (r) {
@@ -161,7 +161,7 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
return r;
}
- if (ring->type == AMDGPU_RING_TYPE_SDMA && ring->funcs->init_cond_exec)
+ if (ring->funcs->init_cond_exec)
patch_offset = amdgpu_ring_init_cond_exec(ring);
if (vm) {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
index 278708f5a744..9fa809876339 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
@@ -239,6 +239,7 @@ int amdgpu_irq_init(struct amdgpu_device *adev)
if (r) {
adev->irq.installed = false;
flush_work(&adev->hotplug_work);
+ cancel_work_sync(&adev->reset_work);
return r;
}
@@ -264,6 +265,7 @@ void amdgpu_irq_fini(struct amdgpu_device *adev)
if (adev->irq.msi_enabled)
pci_disable_msi(adev->pdev);
flush_work(&adev->hotplug_work);
+ cancel_work_sync(&adev->reset_work);
}
for (i = 0; i < AMDGPU_MAX_IRQ_SRC_ID; ++i) {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
index 8c5807994073..a0de6286c453 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
@@ -81,7 +81,7 @@ int amdgpu_job_alloc_with_ib(struct amdgpu_device *adev, unsigned size,
void amdgpu_job_free_resources(struct amdgpu_job *job)
{
- struct fence *f;
+ struct dma_fence *f;
unsigned i;
/* use sched fence if available */
@@ -95,7 +95,7 @@ static void amdgpu_job_free_cb(struct amd_sched_job *s_job)
{
struct amdgpu_job *job = container_of(s_job, struct amdgpu_job, base);
- fence_put(job->fence);
+ dma_fence_put(job->fence);
amdgpu_sync_free(&job->sync);
kfree(job);
}
@@ -104,14 +104,14 @@ void amdgpu_job_free(struct amdgpu_job *job)
{
amdgpu_job_free_resources(job);
- fence_put(job->fence);
+ dma_fence_put(job->fence);
amdgpu_sync_free(&job->sync);
kfree(job);
}
int amdgpu_job_submit(struct amdgpu_job *job, struct amdgpu_ring *ring,
struct amd_sched_entity *entity, void *owner,
- struct fence **f)
+ struct dma_fence **f)
{
int r;
job->ring = ring;
@@ -125,19 +125,19 @@ int amdgpu_job_submit(struct amdgpu_job *job, struct amdgpu_ring *ring,
job->owner = owner;
job->fence_ctx = entity->fence_context;
- *f = fence_get(&job->base.s_fence->finished);
+ *f = dma_fence_get(&job->base.s_fence->finished);
amdgpu_job_free_resources(job);
amd_sched_entity_push_job(&job->base);
return 0;
}
-static struct fence *amdgpu_job_dependency(struct amd_sched_job *sched_job)
+static struct dma_fence *amdgpu_job_dependency(struct amd_sched_job *sched_job)
{
struct amdgpu_job *job = to_amdgpu_job(sched_job);
struct amdgpu_vm *vm = job->vm;
- struct fence *fence = amdgpu_sync_get_fence(&job->sync);
+ struct dma_fence *fence = amdgpu_sync_get_fence(&job->sync);
if (fence == NULL && vm && !job->vm_id) {
struct amdgpu_ring *ring = job->ring;
@@ -155,9 +155,9 @@ static struct fence *amdgpu_job_dependency(struct amd_sched_job *sched_job)
return fence;
}
-static struct fence *amdgpu_job_run(struct amd_sched_job *sched_job)
+static struct dma_fence *amdgpu_job_run(struct amd_sched_job *sched_job)
{
- struct fence *fence = NULL;
+ struct dma_fence *fence = NULL;
struct amdgpu_job *job;
int r;
@@ -176,8 +176,8 @@ static struct fence *amdgpu_job_run(struct amd_sched_job *sched_job)
DRM_ERROR("Error scheduling IBs (%d)\n", r);
/* if gpu reset, hw fence will be replaced here */
- fence_put(job->fence);
- job->fence = fence_get(fence);
+ dma_fence_put(job->fence);
+ job->fence = dma_fence_get(fence);
amdgpu_job_free_resources(job);
return fence;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
index c2c7fb140338..d1cf9ac0dff1 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
@@ -99,6 +99,8 @@ int amdgpu_driver_load_kms(struct drm_device *dev, unsigned long flags)
if ((amdgpu_runtime_pm != 0) &&
amdgpu_has_atpx() &&
+ (amdgpu_is_atpx_hybrid() ||
+ amdgpu_has_atpx_dgpu_power_cntl()) &&
((flags & AMD_IS_APU) == 0))
flags |= AMD_IS_PX;
@@ -306,10 +308,10 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
}
for (i = 0; i < adev->num_ip_blocks; i++) {
- if (adev->ip_blocks[i].type == type &&
- adev->ip_block_status[i].valid) {
- ip.hw_ip_version_major = adev->ip_blocks[i].major;
- ip.hw_ip_version_minor = adev->ip_blocks[i].minor;
+ if (adev->ip_blocks[i].version->type == type &&
+ adev->ip_blocks[i].status.valid) {
+ ip.hw_ip_version_major = adev->ip_blocks[i].version->major;
+ ip.hw_ip_version_minor = adev->ip_blocks[i].version->minor;
ip.capabilities_flags = 0;
ip.available_rings = ring_mask;
ip.ib_start_alignment = ib_start_alignment;
@@ -345,8 +347,8 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
}
for (i = 0; i < adev->num_ip_blocks; i++)
- if (adev->ip_blocks[i].type == type &&
- adev->ip_block_status[i].valid &&
+ if (adev->ip_blocks[i].version->type == type &&
+ adev->ip_blocks[i].status.valid &&
count < AMDGPU_HW_IP_INSTANCE_MAX_COUNT)
count++;
@@ -411,6 +413,36 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
return copy_to_user(out, &vram_gtt,
min((size_t)size, sizeof(vram_gtt))) ? -EFAULT : 0;
}
+ case AMDGPU_INFO_MEMORY: {
+ struct drm_amdgpu_memory_info mem;
+
+ memset(&mem, 0, sizeof(mem));
+ mem.vram.total_heap_size = adev->mc.real_vram_size;
+ mem.vram.usable_heap_size =
+ adev->mc.real_vram_size - adev->vram_pin_size;
+ mem.vram.heap_usage = atomic64_read(&adev->vram_usage);
+ mem.vram.max_allocation = mem.vram.usable_heap_size * 3 / 4;
+
+ mem.cpu_accessible_vram.total_heap_size =
+ adev->mc.visible_vram_size;
+ mem.cpu_accessible_vram.usable_heap_size =
+ adev->mc.visible_vram_size -
+ (adev->vram_pin_size - adev->invisible_pin_size);
+ mem.cpu_accessible_vram.heap_usage =
+ atomic64_read(&adev->vram_vis_usage);
+ mem.cpu_accessible_vram.max_allocation =
+ mem.cpu_accessible_vram.usable_heap_size * 3 / 4;
+
+ mem.gtt.total_heap_size = adev->mc.gtt_size;
+ mem.gtt.usable_heap_size =
+ adev->mc.gtt_size - adev->gart_pin_size;
+ mem.gtt.heap_usage = atomic64_read(&adev->gtt_usage);
+ mem.gtt.max_allocation = mem.gtt.usable_heap_size * 3 / 4;
+
+ return copy_to_user(out, &mem,
+ min((size_t)size, sizeof(mem)))
+ ? -EFAULT : 0;
+ }
case AMDGPU_INFO_READ_MMR_REG: {
unsigned n, alloc_size;
uint32_t *regs;
@@ -459,10 +491,8 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
/* return all clocks in KHz */
dev_info.gpu_counter_freq = amdgpu_asic_get_xclk(adev) * 10;
if (adev->pm.dpm_enabled) {
- dev_info.max_engine_clock =
- adev->pm.dpm.dyn_state.max_clock_voltage_on_ac.sclk * 10;
- dev_info.max_memory_clock =
- adev->pm.dpm.dyn_state.max_clock_voltage_on_ac.mclk * 10;
+ dev_info.max_engine_clock = amdgpu_dpm_get_sclk(adev, false) * 10;
+ dev_info.max_memory_clock = amdgpu_dpm_get_mclk(adev, false) * 10;
} else {
dev_info.max_engine_clock = adev->pm.default_sclk * 10;
dev_info.max_memory_clock = adev->pm.default_mclk * 10;
@@ -475,6 +505,8 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
dev_info.ids_flags = 0;
if (adev->flags & AMD_IS_APU)
dev_info.ids_flags |= AMDGPU_IDS_FLAGS_FUSION;
+ if (amdgpu_sriov_vf(adev))
+ dev_info.ids_flags |= AMDGPU_IDS_FLAGS_PREEMPTION;
dev_info.virtual_address_offset = AMDGPU_VA_RESERVED_SIZE;
dev_info.virtual_address_max = (uint64_t)adev->vm_manager.max_pfn * AMDGPU_GPU_PAGE_SIZE;
dev_info.virtual_address_alignment = max((int)PAGE_SIZE, AMDGPU_GPU_PAGE_SIZE);
@@ -494,6 +526,24 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
return copy_to_user(out, &dev_info,
min((size_t)size, sizeof(dev_info))) ? -EFAULT : 0;
}
+ case AMDGPU_INFO_VCE_CLOCK_TABLE: {
+ unsigned i;
+ struct drm_amdgpu_info_vce_clock_table vce_clk_table = {};
+ struct amd_vce_state *vce_state;
+
+ for (i = 0; i < AMDGPU_VCE_CLOCK_TABLE_ENTRIES; i++) {
+ vce_state = amdgpu_dpm_get_vce_clock_state(adev, i);
+ if (vce_state) {
+ vce_clk_table.entries[i].sclk = vce_state->sclk;
+ vce_clk_table.entries[i].mclk = vce_state->mclk;
+ vce_clk_table.entries[i].eclk = vce_state->evclk;
+ vce_clk_table.num_valid_entries++;
+ }
+ }
+
+ return copy_to_user(out, &vce_clk_table,
+ min((size_t)size, sizeof(vce_clk_table))) ? -EFAULT : 0;
+ }
default:
DRM_DEBUG_KMS("Invalid request %d\n", info->query);
return -EINVAL;
@@ -775,6 +825,7 @@ const struct drm_ioctl_desc amdgpu_ioctls_kms[] = {
DRM_IOCTL_DEF_DRV(AMDGPU_CS, amdgpu_cs_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(AMDGPU_INFO, amdgpu_info_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(AMDGPU_WAIT_CS, amdgpu_cs_wait_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(AMDGPU_WAIT_FENCES, amdgpu_cs_wait_fences_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(AMDGPU_GEM_METADATA, amdgpu_gem_metadata_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(AMDGPU_GEM_VA, amdgpu_gem_va_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(AMDGPU_GEM_OP, amdgpu_gem_op_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c
index 32fa7b7913f7..7ea3cacf9f9f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c
@@ -285,7 +285,7 @@ free_rmn:
int amdgpu_mn_register(struct amdgpu_bo *bo, unsigned long addr)
{
unsigned long end = addr + amdgpu_bo_size(bo) - 1;
- struct amdgpu_device *adev = bo->adev;
+ struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
struct amdgpu_mn *rmn;
struct amdgpu_mn_node *node = NULL;
struct list_head bos;
@@ -340,7 +340,7 @@ int amdgpu_mn_register(struct amdgpu_bo *bo, unsigned long addr)
*/
void amdgpu_mn_unregister(struct amdgpu_bo *bo)
{
- struct amdgpu_device *adev = bo->adev;
+ struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
struct amdgpu_mn *rmn;
struct list_head *head;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
index 7b0eff7d060b..1e23334b07fb 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
@@ -341,8 +341,6 @@ struct amdgpu_mode_info {
int num_dig; /* number of dig blocks */
int disp_priority;
const struct amdgpu_display_funcs *funcs;
- struct hrtimer vblank_timer;
- enum amdgpu_interrupt_state vsync_timer_enabled;
};
#define AMDGPU_MAX_BL_LEVEL 0xFF
@@ -413,6 +411,9 @@ struct amdgpu_crtc {
u32 wm_high;
u32 lb_vblank_lead_lines;
struct drm_display_mode hw_mode;
+ /* for virtual dce */
+ struct hrtimer vblank_timer;
+ enum amdgpu_interrupt_state vsync_timer_enabled;
};
struct amdgpu_encoder_atom_dig {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
index f3efb1c5dae9..1479d09bd4dd 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
@@ -88,18 +88,19 @@ static void amdgpu_update_memory_usage(struct amdgpu_device *adev,
static void amdgpu_ttm_bo_destroy(struct ttm_buffer_object *tbo)
{
+ struct amdgpu_device *adev = amdgpu_ttm_adev(tbo->bdev);
struct amdgpu_bo *bo;
bo = container_of(tbo, struct amdgpu_bo, tbo);
- amdgpu_update_memory_usage(bo->adev, &bo->tbo.mem, NULL);
+ amdgpu_update_memory_usage(adev, &bo->tbo.mem, NULL);
drm_gem_object_release(&bo->gem_base);
amdgpu_bo_unref(&bo->parent);
if (!list_empty(&bo->shadow_list)) {
- mutex_lock(&bo->adev->shadow_list_lock);
+ mutex_lock(&adev->shadow_list_lock);
list_del_init(&bo->shadow_list);
- mutex_unlock(&bo->adev->shadow_list_lock);
+ mutex_unlock(&adev->shadow_list_lock);
}
kfree(bo->metadata);
kfree(bo);
@@ -121,12 +122,17 @@ static void amdgpu_ttm_placement_init(struct amdgpu_device *adev,
if (domain & AMDGPU_GEM_DOMAIN_VRAM) {
unsigned visible_pfn = adev->mc.visible_vram_size >> PAGE_SHIFT;
+ unsigned lpfn = 0;
+
+ /* This forces a reallocation if the flag wasn't set before */
+ if (flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS)
+ lpfn = adev->mc.real_vram_size >> PAGE_SHIFT;
if (flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS &&
!(flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED) &&
adev->mc.visible_vram_size < adev->mc.real_vram_size) {
places[c].fpfn = visible_pfn;
- places[c].lpfn = 0;
+ places[c].lpfn = lpfn;
places[c].flags = TTM_PL_FLAG_WC |
TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_VRAM |
TTM_PL_FLAG_TOPDOWN;
@@ -134,7 +140,7 @@ static void amdgpu_ttm_placement_init(struct amdgpu_device *adev,
}
places[c].fpfn = 0;
- places[c].lpfn = 0;
+ places[c].lpfn = lpfn;
places[c].flags = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED |
TTM_PL_FLAG_VRAM;
if (flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED)
@@ -205,8 +211,10 @@ static void amdgpu_ttm_placement_init(struct amdgpu_device *adev,
void amdgpu_ttm_placement_from_domain(struct amdgpu_bo *abo, u32 domain)
{
- amdgpu_ttm_placement_init(abo->adev, &abo->placement,
- abo->placements, domain, abo->flags);
+ struct amdgpu_device *adev = amdgpu_ttm_adev(abo->tbo.bdev);
+
+ amdgpu_ttm_placement_init(adev, &abo->placement, abo->placements,
+ domain, abo->flags);
}
static void amdgpu_fill_placement_to_bo(struct amdgpu_bo *bo,
@@ -245,7 +253,8 @@ int amdgpu_bo_create_kernel(struct amdgpu_device *adev,
int r;
r = amdgpu_bo_create(adev, size, align, true, domain,
- AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
+ AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
+ AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS,
NULL, NULL, bo_ptr);
if (r) {
dev_err(adev->dev, "(%d) failed to allocate kernel bo\n", r);
@@ -351,7 +360,6 @@ int amdgpu_bo_create_restricted(struct amdgpu_device *adev,
kfree(bo);
return r;
}
- bo->adev = adev;
INIT_LIST_HEAD(&bo->shadow_list);
INIT_LIST_HEAD(&bo->va);
bo->prefered_domains = domain & (AMDGPU_GEM_DOMAIN_VRAM |
@@ -383,7 +391,7 @@ int amdgpu_bo_create_restricted(struct amdgpu_device *adev,
if (flags & AMDGPU_GEM_CREATE_VRAM_CLEARED &&
bo->tbo.mem.placement & TTM_PL_FLAG_VRAM) {
- struct fence *fence;
+ struct dma_fence *fence;
if (adev->mman.buffer_funcs_ring == NULL ||
!adev->mman.buffer_funcs_ring->ready) {
@@ -403,9 +411,9 @@ int amdgpu_bo_create_restricted(struct amdgpu_device *adev,
amdgpu_fill_buffer(bo, 0, bo->tbo.resv, &fence);
amdgpu_bo_fence(bo, fence, false);
amdgpu_bo_unreserve(bo);
- fence_put(bo->tbo.moving);
- bo->tbo.moving = fence_get(fence);
- fence_put(fence);
+ dma_fence_put(bo->tbo.moving);
+ bo->tbo.moving = dma_fence_get(fence);
+ dma_fence_put(fence);
}
*bo_ptr = bo;
@@ -491,7 +499,7 @@ int amdgpu_bo_backup_to_shadow(struct amdgpu_device *adev,
struct amdgpu_ring *ring,
struct amdgpu_bo *bo,
struct reservation_object *resv,
- struct fence **fence,
+ struct dma_fence **fence,
bool direct)
{
@@ -523,7 +531,7 @@ int amdgpu_bo_restore_from_shadow(struct amdgpu_device *adev,
struct amdgpu_ring *ring,
struct amdgpu_bo *bo,
struct reservation_object *resv,
- struct fence **fence,
+ struct dma_fence **fence,
bool direct)
{
@@ -616,6 +624,7 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
u64 min_offset, u64 max_offset,
u64 *gpu_addr)
{
+ struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
int r, i;
unsigned fpfn, lpfn;
@@ -643,18 +652,20 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
return 0;
}
+
+ bo->flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
amdgpu_ttm_placement_from_domain(bo, domain);
for (i = 0; i < bo->placement.num_placement; i++) {
/* force to pin into visible video ram */
if ((bo->placements[i].flags & TTM_PL_FLAG_VRAM) &&
!(bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS) &&
(!max_offset || max_offset >
- bo->adev->mc.visible_vram_size)) {
+ adev->mc.visible_vram_size)) {
if (WARN_ON_ONCE(min_offset >
- bo->adev->mc.visible_vram_size))
+ adev->mc.visible_vram_size))
return -EINVAL;
fpfn = min_offset >> PAGE_SHIFT;
- lpfn = bo->adev->mc.visible_vram_size >> PAGE_SHIFT;
+ lpfn = adev->mc.visible_vram_size >> PAGE_SHIFT;
} else {
fpfn = min_offset >> PAGE_SHIFT;
lpfn = max_offset >> PAGE_SHIFT;
@@ -669,12 +680,12 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
if (unlikely(r)) {
- dev_err(bo->adev->dev, "%p pin failed\n", bo);
+ dev_err(adev->dev, "%p pin failed\n", bo);
goto error;
}
r = amdgpu_ttm_bind(&bo->tbo, &bo->tbo.mem);
if (unlikely(r)) {
- dev_err(bo->adev->dev, "%p bind failed\n", bo);
+ dev_err(adev->dev, "%p bind failed\n", bo);
goto error;
}
@@ -682,11 +693,11 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
if (gpu_addr != NULL)
*gpu_addr = amdgpu_bo_gpu_offset(bo);
if (domain == AMDGPU_GEM_DOMAIN_VRAM) {
- bo->adev->vram_pin_size += amdgpu_bo_size(bo);
+ adev->vram_pin_size += amdgpu_bo_size(bo);
if (bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS)
- bo->adev->invisible_pin_size += amdgpu_bo_size(bo);
+ adev->invisible_pin_size += amdgpu_bo_size(bo);
} else if (domain == AMDGPU_GEM_DOMAIN_GTT) {
- bo->adev->gart_pin_size += amdgpu_bo_size(bo);
+ adev->gart_pin_size += amdgpu_bo_size(bo);
}
error:
@@ -700,10 +711,11 @@ int amdgpu_bo_pin(struct amdgpu_bo *bo, u32 domain, u64 *gpu_addr)
int amdgpu_bo_unpin(struct amdgpu_bo *bo)
{
+ struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
int r, i;
if (!bo->pin_count) {
- dev_warn(bo->adev->dev, "%p unpin not necessary\n", bo);
+ dev_warn(adev->dev, "%p unpin not necessary\n", bo);
return 0;
}
bo->pin_count--;
@@ -715,16 +727,16 @@ int amdgpu_bo_unpin(struct amdgpu_bo *bo)
}
r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
if (unlikely(r)) {
- dev_err(bo->adev->dev, "%p validate failed for unpin\n", bo);
+ dev_err(adev->dev, "%p validate failed for unpin\n", bo);
goto error;
}
if (bo->tbo.mem.mem_type == TTM_PL_VRAM) {
- bo->adev->vram_pin_size -= amdgpu_bo_size(bo);
+ adev->vram_pin_size -= amdgpu_bo_size(bo);
if (bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS)
- bo->adev->invisible_pin_size -= amdgpu_bo_size(bo);
+ adev->invisible_pin_size -= amdgpu_bo_size(bo);
} else if (bo->tbo.mem.mem_type == TTM_PL_TT) {
- bo->adev->gart_pin_size -= amdgpu_bo_size(bo);
+ adev->gart_pin_size -= amdgpu_bo_size(bo);
}
error:
@@ -854,6 +866,7 @@ int amdgpu_bo_get_metadata(struct amdgpu_bo *bo, void *buffer,
void amdgpu_bo_move_notify(struct ttm_buffer_object *bo,
struct ttm_mem_reg *new_mem)
{
+ struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
struct amdgpu_bo *abo;
struct ttm_mem_reg *old_mem = &bo->mem;
@@ -861,21 +874,21 @@ void amdgpu_bo_move_notify(struct ttm_buffer_object *bo,
return;
abo = container_of(bo, struct amdgpu_bo, tbo);
- amdgpu_vm_bo_invalidate(abo->adev, abo);
+ amdgpu_vm_bo_invalidate(adev, abo);
/* update statistics */
if (!new_mem)
return;
/* move_notify is called before move happens */
- amdgpu_update_memory_usage(abo->adev, &bo->mem, new_mem);
+ amdgpu_update_memory_usage(adev, &bo->mem, new_mem);
trace_amdgpu_ttm_bo_move(abo, new_mem->mem_type, old_mem->mem_type);
}
int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
{
- struct amdgpu_device *adev;
+ struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
struct amdgpu_bo *abo;
unsigned long offset, size, lpfn;
int i, r;
@@ -884,13 +897,14 @@ int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
return 0;
abo = container_of(bo, struct amdgpu_bo, tbo);
- adev = abo->adev;
if (bo->mem.mem_type != TTM_PL_VRAM)
return 0;
size = bo->mem.num_pages << PAGE_SHIFT;
offset = bo->mem.start << PAGE_SHIFT;
- if ((offset + size) <= adev->mc.visible_vram_size)
+ /* TODO: figure out how to map scattered VRAM to the CPU */
+ if ((offset + size) <= adev->mc.visible_vram_size &&
+ (abo->flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS))
return 0;
/* Can't move a pinned BO to visible VRAM */
@@ -898,6 +912,7 @@ int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
return -EINVAL;
/* hurrah the memory is not visible ! */
+ abo->flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
amdgpu_ttm_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_VRAM);
lpfn = adev->mc.visible_vram_size >> PAGE_SHIFT;
for (i = 0; i < abo->placement.num_placement; i++) {
@@ -931,7 +946,7 @@ int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
* @shared: true if fence should be added shared
*
*/
-void amdgpu_bo_fence(struct amdgpu_bo *bo, struct fence *fence,
+void amdgpu_bo_fence(struct amdgpu_bo *bo, struct dma_fence *fence,
bool shared)
{
struct reservation_object *resv = bo->tbo.resv;
@@ -959,6 +974,8 @@ u64 amdgpu_bo_gpu_offset(struct amdgpu_bo *bo)
WARN_ON_ONCE(!ww_mutex_is_locked(&bo->tbo.resv->lock) &&
!bo->pin_count);
WARN_ON_ONCE(bo->tbo.mem.start == AMDGPU_BO_INVALID_OFFSET);
+ WARN_ON_ONCE(bo->tbo.mem.mem_type == TTM_PL_VRAM &&
+ !(bo->flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS));
return bo->tbo.offset;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
index 8255034d73eb..5cbf59ec0f68 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
@@ -71,12 +71,13 @@ static inline unsigned amdgpu_mem_type_to_domain(u32 mem_type)
*/
static inline int amdgpu_bo_reserve(struct amdgpu_bo *bo, bool no_intr)
{
+ struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
int r;
r = ttm_bo_reserve(&bo->tbo, !no_intr, false, NULL);
if (unlikely(r != 0)) {
if (r != -ERESTARTSYS)
- dev_err(bo->adev->dev, "%p reserve failed\n", bo);
+ dev_err(adev->dev, "%p reserve failed\n", bo);
return r;
}
return 0;
@@ -156,19 +157,19 @@ int amdgpu_bo_get_metadata(struct amdgpu_bo *bo, void *buffer,
void amdgpu_bo_move_notify(struct ttm_buffer_object *bo,
struct ttm_mem_reg *new_mem);
int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo);
-void amdgpu_bo_fence(struct amdgpu_bo *bo, struct fence *fence,
+void amdgpu_bo_fence(struct amdgpu_bo *bo, struct dma_fence *fence,
bool shared);
u64 amdgpu_bo_gpu_offset(struct amdgpu_bo *bo);
int amdgpu_bo_backup_to_shadow(struct amdgpu_device *adev,
struct amdgpu_ring *ring,
struct amdgpu_bo *bo,
struct reservation_object *resv,
- struct fence **fence, bool direct);
+ struct dma_fence **fence, bool direct);
int amdgpu_bo_restore_from_shadow(struct amdgpu_device *adev,
struct amdgpu_ring *ring,
struct amdgpu_bo *bo,
struct reservation_object *resv,
- struct fence **fence,
+ struct dma_fence **fence,
bool direct);
@@ -200,7 +201,7 @@ int amdgpu_sa_bo_new(struct amdgpu_sa_manager *sa_manager,
unsigned size, unsigned align);
void amdgpu_sa_bo_free(struct amdgpu_device *adev,
struct amdgpu_sa_bo **sa_bo,
- struct fence *fence);
+ struct dma_fence *fence);
#if defined(CONFIG_DEBUG_FS)
void amdgpu_sa_bo_dump_debug_info(struct amdgpu_sa_manager *sa_manager,
struct seq_file *m);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
index accc908bdc88..274f3309aec9 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
@@ -986,10 +986,10 @@ restart_search:
static void amdgpu_dpm_change_power_state_locked(struct amdgpu_device *adev)
{
- int i;
struct amdgpu_ps *ps;
enum amd_pm_state_type dpm_state;
int ret;
+ bool equal;
/* if dpm init failed */
if (!adev->pm.dpm_enabled)
@@ -1009,46 +1009,6 @@ static void amdgpu_dpm_change_power_state_locked(struct amdgpu_device *adev)
else
return;
- /* no need to reprogram if nothing changed unless we are on BTC+ */
- if (adev->pm.dpm.current_ps == adev->pm.dpm.requested_ps) {
- /* vce just modifies an existing state so force a change */
- if (ps->vce_active != adev->pm.dpm.vce_active)
- goto force;
- if (adev->flags & AMD_IS_APU) {
- /* for APUs if the num crtcs changed but state is the same,
- * all we need to do is update the display configuration.
- */
- if (adev->pm.dpm.new_active_crtcs != adev->pm.dpm.current_active_crtcs) {
- /* update display watermarks based on new power state */
- amdgpu_display_bandwidth_update(adev);
- /* update displays */
- amdgpu_dpm_display_configuration_changed(adev);
- adev->pm.dpm.current_active_crtcs = adev->pm.dpm.new_active_crtcs;
- adev->pm.dpm.current_active_crtc_count = adev->pm.dpm.new_active_crtc_count;
- }
- return;
- } else {
- /* for BTC+ if the num crtcs hasn't changed and state is the same,
- * nothing to do, if the num crtcs is > 1 and state is the same,
- * update display configuration.
- */
- if (adev->pm.dpm.new_active_crtcs ==
- adev->pm.dpm.current_active_crtcs) {
- return;
- } else if ((adev->pm.dpm.current_active_crtc_count > 1) &&
- (adev->pm.dpm.new_active_crtc_count > 1)) {
- /* update display watermarks based on new power state */
- amdgpu_display_bandwidth_update(adev);
- /* update displays */
- amdgpu_dpm_display_configuration_changed(adev);
- adev->pm.dpm.current_active_crtcs = adev->pm.dpm.new_active_crtcs;
- adev->pm.dpm.current_active_crtc_count = adev->pm.dpm.new_active_crtc_count;
- return;
- }
- }
- }
-
-force:
if (amdgpu_dpm == 1) {
printk("switching from power state:\n");
amdgpu_dpm_print_power_state(adev, adev->pm.dpm.current_ps);
@@ -1059,31 +1019,21 @@ force:
/* update whether vce is active */
ps->vce_active = adev->pm.dpm.vce_active;
+ amdgpu_dpm_display_configuration_changed(adev);
+
ret = amdgpu_dpm_pre_set_power_state(adev);
if (ret)
return;
- /* update display watermarks based on new power state */
- amdgpu_display_bandwidth_update(adev);
+ if ((0 != amgdpu_dpm_check_state_equal(adev, adev->pm.dpm.current_ps, adev->pm.dpm.requested_ps, &equal)))
+ equal = false;
- /* wait for the rings to drain */
- for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
- struct amdgpu_ring *ring = adev->rings[i];
- if (ring && ring->ready)
- amdgpu_fence_wait_empty(ring);
- }
+ if (equal)
+ return;
- /* program the new power state */
amdgpu_dpm_set_power_state(adev);
-
- /* update current power state */
- adev->pm.dpm.current_ps = adev->pm.dpm.requested_ps;
-
amdgpu_dpm_post_set_power_state(adev);
- /* update displays */
- amdgpu_dpm_display_configuration_changed(adev);
-
adev->pm.dpm.current_active_crtcs = adev->pm.dpm.new_active_crtcs;
adev->pm.dpm.current_active_crtc_count = adev->pm.dpm.new_active_crtc_count;
@@ -1135,7 +1085,7 @@ void amdgpu_dpm_enable_vce(struct amdgpu_device *adev, bool enable)
mutex_lock(&adev->pm.mutex);
adev->pm.dpm.vce_active = true;
/* XXX select vce level based on ring/task */
- adev->pm.dpm.vce_level = AMDGPU_VCE_LEVEL_AC_ALL;
+ adev->pm.dpm.vce_level = AMD_VCE_LEVEL_AC_ALL;
mutex_unlock(&adev->pm.mutex);
} else {
mutex_lock(&adev->pm.mutex);
@@ -1276,20 +1226,20 @@ void amdgpu_pm_compute_clocks(struct amdgpu_device *adev)
struct drm_device *ddev = adev->ddev;
struct drm_crtc *crtc;
struct amdgpu_crtc *amdgpu_crtc;
+ int i = 0;
if (!adev->pm.dpm_enabled)
return;
- if (adev->pp_enabled) {
- int i = 0;
+ amdgpu_display_bandwidth_update(adev);
- amdgpu_display_bandwidth_update(adev);
- for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
- struct amdgpu_ring *ring = adev->rings[i];
- if (ring && ring->ready)
- amdgpu_fence_wait_empty(ring);
- }
+ for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
+ struct amdgpu_ring *ring = adev->rings[i];
+ if (ring && ring->ready)
+ amdgpu_fence_wait_empty(ring);
+ }
+ if (adev->pp_enabled) {
amdgpu_dpm_dispatch_task(adev, AMD_PP_EVENT_DISPLAY_CONFIG_CHANGE, NULL, NULL);
} else {
mutex_lock(&adev->pm.mutex);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c
index 7532ff822aa7..fa6baf31a35d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c
@@ -299,7 +299,7 @@ static int amdgpu_pp_soft_reset(void *handle)
return ret;
}
-const struct amd_ip_funcs amdgpu_pp_ip_funcs = {
+static const struct amd_ip_funcs amdgpu_pp_ip_funcs = {
.name = "amdgpu_powerplay",
.early_init = amdgpu_pp_early_init,
.late_init = amdgpu_pp_late_init,
@@ -316,3 +316,12 @@ const struct amd_ip_funcs amdgpu_pp_ip_funcs = {
.set_clockgating_state = amdgpu_pp_set_clockgating_state,
.set_powergating_state = amdgpu_pp_set_powergating_state,
};
+
+const struct amdgpu_ip_block_version amdgpu_pp_ip_block =
+{
+ .type = AMD_IP_BLOCK_TYPE_SMC,
+ .major = 1,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &amdgpu_pp_ip_funcs,
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.h
index da5cf47cfd99..c0c4bfdcdb14 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.h
@@ -23,11 +23,11 @@
*
*/
-#ifndef __AMDGPU_POPWERPLAY_H__
-#define __AMDGPU_POPWERPLAY_H__
+#ifndef __AMDGPU_POWERPLAY_H__
+#define __AMDGPU_POWERPLAY_H__
#include "amd_shared.h"
-extern const struct amd_ip_funcs amdgpu_pp_ip_funcs;
+extern const struct amdgpu_ip_block_version amdgpu_pp_ip_block;
-#endif /* __AMDSOC_DM_H__ */
+#endif /* __AMDGPU_POWERPLAY_H__ */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c
index 7700dc22f243..3826d5aea0a6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c
@@ -74,20 +74,36 @@ amdgpu_gem_prime_import_sg_table(struct drm_device *dev,
if (ret)
return ERR_PTR(ret);
+ bo->prime_shared_count = 1;
return &bo->gem_base;
}
int amdgpu_gem_prime_pin(struct drm_gem_object *obj)
{
struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
- int ret = 0;
+ long ret = 0;
ret = amdgpu_bo_reserve(bo, false);
if (unlikely(ret != 0))
return ret;
+ /*
+ * Wait for all shared fences to complete before we switch to future
+ * use of exclusive fence on this prime shared bo.
+ */
+ ret = reservation_object_wait_timeout_rcu(bo->tbo.resv, true, false,
+ MAX_SCHEDULE_TIMEOUT);
+ if (unlikely(ret < 0)) {
+ DRM_DEBUG_PRIME("Fence wait failed: %li\n", ret);
+ amdgpu_bo_unreserve(bo);
+ return ret;
+ }
+
/* pin buffer into GTT */
ret = amdgpu_bo_pin(bo, AMDGPU_GEM_DOMAIN_GTT, NULL);
+ if (likely(ret == 0))
+ bo->prime_shared_count++;
+
amdgpu_bo_unreserve(bo);
return ret;
}
@@ -102,6 +118,8 @@ void amdgpu_gem_prime_unpin(struct drm_gem_object *obj)
return;
amdgpu_bo_unpin(bo);
+ if (bo->prime_shared_count)
+ bo->prime_shared_count--;
amdgpu_bo_unreserve(bo);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
index 3cb5e903cd62..4c992826d2d6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
@@ -65,7 +65,7 @@ int amdgpu_ring_alloc(struct amdgpu_ring *ring, unsigned ndw)
{
/* Align requested size with padding so unlock_commit can
* pad safely */
- ndw = (ndw + ring->align_mask) & ~ring->align_mask;
+ ndw = (ndw + ring->funcs->align_mask) & ~ring->funcs->align_mask;
/* Make sure we aren't trying to allocate more space
* than the maximum for one submission
@@ -94,7 +94,7 @@ void amdgpu_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
int i;
for (i = 0; i < count; i++)
- amdgpu_ring_write(ring, ring->nop);
+ amdgpu_ring_write(ring, ring->funcs->nop);
}
/** amdgpu_ring_generic_pad_ib - pad IB with NOP packets
@@ -106,8 +106,8 @@ void amdgpu_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
*/
void amdgpu_ring_generic_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib)
{
- while (ib->length_dw & ring->align_mask)
- ib->ptr[ib->length_dw++] = ring->nop;
+ while (ib->length_dw & ring->funcs->align_mask)
+ ib->ptr[ib->length_dw++] = ring->funcs->nop;
}
/**
@@ -125,8 +125,9 @@ void amdgpu_ring_commit(struct amdgpu_ring *ring)
uint32_t count;
/* We pad to match fetch size */
- count = ring->align_mask + 1 - (ring->wptr & ring->align_mask);
- count %= ring->align_mask + 1;
+ count = ring->funcs->align_mask + 1 -
+ (ring->wptr & ring->funcs->align_mask);
+ count %= ring->funcs->align_mask + 1;
ring->funcs->insert_nop(ring, count);
mb();
@@ -163,9 +164,8 @@ void amdgpu_ring_undo(struct amdgpu_ring *ring)
* Returns 0 on success, error on failure.
*/
int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring,
- unsigned max_dw, u32 nop, u32 align_mask,
- struct amdgpu_irq_src *irq_src, unsigned irq_type,
- enum amdgpu_ring_type ring_type)
+ unsigned max_dw, struct amdgpu_irq_src *irq_src,
+ unsigned irq_type)
{
int r;
@@ -216,9 +216,6 @@ int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring,
ring->ring_size = roundup_pow_of_two(max_dw * 4 *
amdgpu_sched_hw_submission);
- ring->align_mask = align_mask;
- ring->nop = nop;
- ring->type = ring_type;
/* Allocate ring buffer */
if (ring->ring_obj == NULL) {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
new file mode 100644
index 000000000000..f2ad49c8e85b
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
@@ -0,0 +1,185 @@
+/*
+ * Copyright 2016 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Christian König
+ */
+#ifndef __AMDGPU_RING_H__
+#define __AMDGPU_RING_H__
+
+#include "gpu_scheduler.h"
+
+/* max number of rings */
+#define AMDGPU_MAX_RINGS 16
+#define AMDGPU_MAX_GFX_RINGS 1
+#define AMDGPU_MAX_COMPUTE_RINGS 8
+#define AMDGPU_MAX_VCE_RINGS 3
+
+/* some special values for the owner field */
+#define AMDGPU_FENCE_OWNER_UNDEFINED ((void*)0ul)
+#define AMDGPU_FENCE_OWNER_VM ((void*)1ul)
+
+#define AMDGPU_FENCE_FLAG_64BIT (1 << 0)
+#define AMDGPU_FENCE_FLAG_INT (1 << 1)
+
+enum amdgpu_ring_type {
+ AMDGPU_RING_TYPE_GFX,
+ AMDGPU_RING_TYPE_COMPUTE,
+ AMDGPU_RING_TYPE_SDMA,
+ AMDGPU_RING_TYPE_UVD,
+ AMDGPU_RING_TYPE_VCE
+};
+
+struct amdgpu_device;
+struct amdgpu_ring;
+struct amdgpu_ib;
+struct amdgpu_cs_parser;
+
+/*
+ * Fences.
+ */
+struct amdgpu_fence_driver {
+ uint64_t gpu_addr;
+ volatile uint32_t *cpu_addr;
+ /* sync_seq is protected by ring emission lock */
+ uint32_t sync_seq;
+ atomic_t last_seq;
+ bool initialized;
+ struct amdgpu_irq_src *irq_src;
+ unsigned irq_type;
+ struct timer_list fallback_timer;
+ unsigned num_fences_mask;
+ spinlock_t lock;
+ struct dma_fence **fences;
+};
+
+int amdgpu_fence_driver_init(struct amdgpu_device *adev);
+void amdgpu_fence_driver_fini(struct amdgpu_device *adev);
+void amdgpu_fence_driver_force_completion(struct amdgpu_device *adev);
+
+int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring,
+ unsigned num_hw_submission);
+int amdgpu_fence_driver_start_ring(struct amdgpu_ring *ring,
+ struct amdgpu_irq_src *irq_src,
+ unsigned irq_type);
+void amdgpu_fence_driver_suspend(struct amdgpu_device *adev);
+void amdgpu_fence_driver_resume(struct amdgpu_device *adev);
+int amdgpu_fence_emit(struct amdgpu_ring *ring, struct dma_fence **fence);
+void amdgpu_fence_process(struct amdgpu_ring *ring);
+int amdgpu_fence_wait_empty(struct amdgpu_ring *ring);
+unsigned amdgpu_fence_count_emitted(struct amdgpu_ring *ring);
+
+/*
+ * Rings.
+ */
+
+/* provided by hw blocks that expose a ring buffer for commands */
+struct amdgpu_ring_funcs {
+ enum amdgpu_ring_type type;
+ uint32_t align_mask;
+ u32 nop;
+
+ /* ring read/write ptr handling */
+ u32 (*get_rptr)(struct amdgpu_ring *ring);
+ u32 (*get_wptr)(struct amdgpu_ring *ring);
+ void (*set_wptr)(struct amdgpu_ring *ring);
+ /* validating and patching of IBs */
+ int (*parse_cs)(struct amdgpu_cs_parser *p, uint32_t ib_idx);
+ /* constants to calculate how many DW are needed for an emit */
+ unsigned emit_frame_size;
+ unsigned emit_ib_size;
+ /* command emit functions */
+ void (*emit_ib)(struct amdgpu_ring *ring,
+ struct amdgpu_ib *ib,
+ unsigned vm_id, bool ctx_switch);
+ void (*emit_fence)(struct amdgpu_ring *ring, uint64_t addr,
+ uint64_t seq, unsigned flags);
+ void (*emit_pipeline_sync)(struct amdgpu_ring *ring);
+ void (*emit_vm_flush)(struct amdgpu_ring *ring, unsigned vm_id,
+ uint64_t pd_addr);
+ void (*emit_hdp_flush)(struct amdgpu_ring *ring);
+ void (*emit_hdp_invalidate)(struct amdgpu_ring *ring);
+ void (*emit_gds_switch)(struct amdgpu_ring *ring, uint32_t vmid,
+ uint32_t gds_base, uint32_t gds_size,
+ uint32_t gws_base, uint32_t gws_size,
+ uint32_t oa_base, uint32_t oa_size);
+ /* testing functions */
+ int (*test_ring)(struct amdgpu_ring *ring);
+ int (*test_ib)(struct amdgpu_ring *ring, long timeout);
+ /* insert NOP packets */
+ void (*insert_nop)(struct amdgpu_ring *ring, uint32_t count);
+ /* pad the indirect buffer to the necessary number of dw */
+ void (*pad_ib)(struct amdgpu_ring *ring, struct amdgpu_ib *ib);
+ unsigned (*init_cond_exec)(struct amdgpu_ring *ring);
+ void (*patch_cond_exec)(struct amdgpu_ring *ring, unsigned offset);
+ /* note usage for clock and power gating */
+ void (*begin_use)(struct amdgpu_ring *ring);
+ void (*end_use)(struct amdgpu_ring *ring);
+ void (*emit_switch_buffer) (struct amdgpu_ring *ring);
+ void (*emit_cntxcntl) (struct amdgpu_ring *ring, uint32_t flags);
+};
+
+struct amdgpu_ring {
+ struct amdgpu_device *adev;
+ const struct amdgpu_ring_funcs *funcs;
+ struct amdgpu_fence_driver fence_drv;
+ struct amd_gpu_scheduler sched;
+
+ struct amdgpu_bo *ring_obj;
+ volatile uint32_t *ring;
+ unsigned rptr_offs;
+ unsigned wptr;
+ unsigned wptr_old;
+ unsigned ring_size;
+ unsigned max_dw;
+ int count_dw;
+ uint64_t gpu_addr;
+ uint32_t ptr_mask;
+ bool ready;
+ u32 idx;
+ u32 me;
+ u32 pipe;
+ u32 queue;
+ struct amdgpu_bo *mqd_obj;
+ u32 doorbell_index;
+ bool use_doorbell;
+ unsigned wptr_offs;
+ unsigned fence_offs;
+ uint64_t current_ctx;
+ char name[16];
+ unsigned cond_exe_offs;
+ u64 cond_exe_gpu_addr;
+ volatile u32 *cond_exe_cpu_addr;
+#if defined(CONFIG_DEBUG_FS)
+ struct dentry *ent;
+#endif
+};
+
+int amdgpu_ring_alloc(struct amdgpu_ring *ring, unsigned ndw);
+void amdgpu_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count);
+void amdgpu_ring_generic_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib);
+void amdgpu_ring_commit(struct amdgpu_ring *ring);
+void amdgpu_ring_undo(struct amdgpu_ring *ring);
+int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring,
+ unsigned ring_size, struct amdgpu_irq_src *irq_src,
+ unsigned irq_type);
+void amdgpu_ring_fini(struct amdgpu_ring *ring);
+
+#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c
index d8af37a845f4..34a795463988 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c
@@ -147,7 +147,7 @@ static void amdgpu_sa_bo_remove_locked(struct amdgpu_sa_bo *sa_bo)
}
list_del_init(&sa_bo->olist);
list_del_init(&sa_bo->flist);
- fence_put(sa_bo->fence);
+ dma_fence_put(sa_bo->fence);
kfree(sa_bo);
}
@@ -161,7 +161,7 @@ static void amdgpu_sa_bo_try_free(struct amdgpu_sa_manager *sa_manager)
sa_bo = list_entry(sa_manager->hole->next, struct amdgpu_sa_bo, olist);
list_for_each_entry_safe_from(sa_bo, tmp, &sa_manager->olist, olist) {
if (sa_bo->fence == NULL ||
- !fence_is_signaled(sa_bo->fence)) {
+ !dma_fence_is_signaled(sa_bo->fence)) {
return;
}
amdgpu_sa_bo_remove_locked(sa_bo);
@@ -244,7 +244,7 @@ static bool amdgpu_sa_event(struct amdgpu_sa_manager *sa_manager,
}
static bool amdgpu_sa_bo_next_hole(struct amdgpu_sa_manager *sa_manager,
- struct fence **fences,
+ struct dma_fence **fences,
unsigned *tries)
{
struct amdgpu_sa_bo *best_bo = NULL;
@@ -272,7 +272,7 @@ static bool amdgpu_sa_bo_next_hole(struct amdgpu_sa_manager *sa_manager,
sa_bo = list_first_entry(&sa_manager->flist[i],
struct amdgpu_sa_bo, flist);
- if (!fence_is_signaled(sa_bo->fence)) {
+ if (!dma_fence_is_signaled(sa_bo->fence)) {
fences[i] = sa_bo->fence;
continue;
}
@@ -314,7 +314,7 @@ int amdgpu_sa_bo_new(struct amdgpu_sa_manager *sa_manager,
struct amdgpu_sa_bo **sa_bo,
unsigned size, unsigned align)
{
- struct fence *fences[AMDGPU_SA_NUM_FENCE_LISTS];
+ struct dma_fence *fences[AMDGPU_SA_NUM_FENCE_LISTS];
unsigned tries[AMDGPU_SA_NUM_FENCE_LISTS];
unsigned count;
int i, r;
@@ -356,14 +356,15 @@ int amdgpu_sa_bo_new(struct amdgpu_sa_manager *sa_manager,
for (i = 0, count = 0; i < AMDGPU_SA_NUM_FENCE_LISTS; ++i)
if (fences[i])
- fences[count++] = fence_get(fences[i]);
+ fences[count++] = dma_fence_get(fences[i]);
if (count) {
spin_unlock(&sa_manager->wq.lock);
- t = fence_wait_any_timeout(fences, count, false,
- MAX_SCHEDULE_TIMEOUT);
+ t = dma_fence_wait_any_timeout(fences, count, false,
+ MAX_SCHEDULE_TIMEOUT,
+ NULL);
for (i = 0; i < count; ++i)
- fence_put(fences[i]);
+ dma_fence_put(fences[i]);
r = (t > 0) ? 0 : t;
spin_lock(&sa_manager->wq.lock);
@@ -384,7 +385,7 @@ int amdgpu_sa_bo_new(struct amdgpu_sa_manager *sa_manager,
}
void amdgpu_sa_bo_free(struct amdgpu_device *adev, struct amdgpu_sa_bo **sa_bo,
- struct fence *fence)
+ struct dma_fence *fence)
{
struct amdgpu_sa_manager *sa_manager;
@@ -394,10 +395,10 @@ void amdgpu_sa_bo_free(struct amdgpu_device *adev, struct amdgpu_sa_bo **sa_bo,
sa_manager = (*sa_bo)->manager;
spin_lock(&sa_manager->wq.lock);
- if (fence && !fence_is_signaled(fence)) {
+ if (fence && !dma_fence_is_signaled(fence)) {
uint32_t idx;
- (*sa_bo)->fence = fence_get(fence);
+ (*sa_bo)->fence = dma_fence_get(fence);
idx = fence->context % AMDGPU_SA_NUM_FENCE_LISTS;
list_add_tail(&(*sa_bo)->flist, &sa_manager->flist[idx]);
} else {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
index 5c8d3022fb87..ed814e6d0207 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
@@ -34,7 +34,7 @@
struct amdgpu_sync_entry {
struct hlist_node node;
- struct fence *fence;
+ struct dma_fence *fence;
};
static struct kmem_cache *amdgpu_sync_slab;
@@ -60,7 +60,8 @@ void amdgpu_sync_create(struct amdgpu_sync *sync)
*
* Test if the fence was issued by us.
*/
-static bool amdgpu_sync_same_dev(struct amdgpu_device *adev, struct fence *f)
+static bool amdgpu_sync_same_dev(struct amdgpu_device *adev,
+ struct dma_fence *f)
{
struct amd_sched_fence *s_fence = to_amd_sched_fence(f);
@@ -81,7 +82,7 @@ static bool amdgpu_sync_same_dev(struct amdgpu_device *adev, struct fence *f)
*
* Extract who originally created the fence.
*/
-static void *amdgpu_sync_get_owner(struct fence *f)
+static void *amdgpu_sync_get_owner(struct dma_fence *f)
{
struct amd_sched_fence *s_fence = to_amd_sched_fence(f);
@@ -99,13 +100,14 @@ static void *amdgpu_sync_get_owner(struct fence *f)
*
* Either keep the existing fence or the new one, depending which one is later.
*/
-static void amdgpu_sync_keep_later(struct fence **keep, struct fence *fence)
+static void amdgpu_sync_keep_later(struct dma_fence **keep,
+ struct dma_fence *fence)
{
- if (*keep && fence_is_later(*keep, fence))
+ if (*keep && dma_fence_is_later(*keep, fence))
return;
- fence_put(*keep);
- *keep = fence_get(fence);
+ dma_fence_put(*keep);
+ *keep = dma_fence_get(fence);
}
/**
@@ -117,7 +119,7 @@ static void amdgpu_sync_keep_later(struct fence **keep, struct fence *fence)
* Tries to add the fence to an existing hash entry. Returns true when an entry
* was found, false otherwise.
*/
-static bool amdgpu_sync_add_later(struct amdgpu_sync *sync, struct fence *f)
+static bool amdgpu_sync_add_later(struct amdgpu_sync *sync, struct dma_fence *f)
{
struct amdgpu_sync_entry *e;
@@ -139,7 +141,7 @@ static bool amdgpu_sync_add_later(struct amdgpu_sync *sync, struct fence *f)
*
*/
int amdgpu_sync_fence(struct amdgpu_device *adev, struct amdgpu_sync *sync,
- struct fence *f)
+ struct dma_fence *f)
{
struct amdgpu_sync_entry *e;
@@ -158,7 +160,7 @@ int amdgpu_sync_fence(struct amdgpu_device *adev, struct amdgpu_sync *sync,
return -ENOMEM;
hash_add(sync->fences, &e->node, f->context);
- e->fence = fence_get(f);
+ e->fence = dma_fence_get(f);
return 0;
}
@@ -177,7 +179,7 @@ int amdgpu_sync_resv(struct amdgpu_device *adev,
void *owner)
{
struct reservation_object_list *flist;
- struct fence *f;
+ struct dma_fence *f;
void *fence_owner;
unsigned i;
int r = 0;
@@ -231,15 +233,15 @@ int amdgpu_sync_resv(struct amdgpu_device *adev,
* Returns the next fence not signaled yet without removing it from the sync
* object.
*/
-struct fence *amdgpu_sync_peek_fence(struct amdgpu_sync *sync,
- struct amdgpu_ring *ring)
+struct dma_fence *amdgpu_sync_peek_fence(struct amdgpu_sync *sync,
+ struct amdgpu_ring *ring)
{
struct amdgpu_sync_entry *e;
struct hlist_node *tmp;
int i;
hash_for_each_safe(sync->fences, i, tmp, e, node) {
- struct fence *f = e->fence;
+ struct dma_fence *f = e->fence;
struct amd_sched_fence *s_fence = to_amd_sched_fence(f);
if (ring && s_fence) {
@@ -247,16 +249,16 @@ struct fence *amdgpu_sync_peek_fence(struct amdgpu_sync *sync,
* when they are scheduled.
*/
if (s_fence->sched == &ring->sched) {
- if (fence_is_signaled(&s_fence->scheduled))
+ if (dma_fence_is_signaled(&s_fence->scheduled))
continue;
return &s_fence->scheduled;
}
}
- if (fence_is_signaled(f)) {
+ if (dma_fence_is_signaled(f)) {
hash_del(&e->node);
- fence_put(f);
+ dma_fence_put(f);
kmem_cache_free(amdgpu_sync_slab, e);
continue;
}
@@ -274,11 +276,11 @@ struct fence *amdgpu_sync_peek_fence(struct amdgpu_sync *sync,
*
* Get and removes the next fence from the sync object not signaled yet.
*/
-struct fence *amdgpu_sync_get_fence(struct amdgpu_sync *sync)
+struct dma_fence *amdgpu_sync_get_fence(struct amdgpu_sync *sync)
{
struct amdgpu_sync_entry *e;
struct hlist_node *tmp;
- struct fence *f;
+ struct dma_fence *f;
int i;
hash_for_each_safe(sync->fences, i, tmp, e, node) {
@@ -288,10 +290,10 @@ struct fence *amdgpu_sync_get_fence(struct amdgpu_sync *sync)
hash_del(&e->node);
kmem_cache_free(amdgpu_sync_slab, e);
- if (!fence_is_signaled(f))
+ if (!dma_fence_is_signaled(f))
return f;
- fence_put(f);
+ dma_fence_put(f);
}
return NULL;
}
@@ -311,11 +313,11 @@ void amdgpu_sync_free(struct amdgpu_sync *sync)
hash_for_each_safe(sync->fences, i, tmp, e, node) {
hash_del(&e->node);
- fence_put(e->fence);
+ dma_fence_put(e->fence);
kmem_cache_free(amdgpu_sync_slab, e);
}
- fence_put(sync->last_vm_update);
+ dma_fence_put(sync->last_vm_update);
}
/**
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.h
new file mode 100644
index 000000000000..605be266e07f
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.h
@@ -0,0 +1,56 @@
+/*
+ * Copyright 2016 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Christian König
+ */
+#ifndef __AMDGPU_SYNC_H__
+#define __AMDGPU_SYNC_H__
+
+#include <linux/hashtable.h>
+
+struct dma_fence;
+struct reservation_object;
+struct amdgpu_device;
+struct amdgpu_ring;
+
+/*
+ * Container for fences used to sync command submissions.
+ */
+struct amdgpu_sync {
+ DECLARE_HASHTABLE(fences, 4);
+ struct dma_fence *last_vm_update;
+};
+
+void amdgpu_sync_create(struct amdgpu_sync *sync);
+int amdgpu_sync_fence(struct amdgpu_device *adev, struct amdgpu_sync *sync,
+ struct dma_fence *f);
+int amdgpu_sync_resv(struct amdgpu_device *adev,
+ struct amdgpu_sync *sync,
+ struct reservation_object *resv,
+ void *owner);
+struct dma_fence *amdgpu_sync_peek_fence(struct amdgpu_sync *sync,
+ struct amdgpu_ring *ring);
+struct dma_fence *amdgpu_sync_get_fence(struct amdgpu_sync *sync);
+void amdgpu_sync_free(struct amdgpu_sync *sync);
+int amdgpu_sync_init(void);
+void amdgpu_sync_fini(void);
+
+#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c
index b827c75e95de..e05a24325eeb 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c
@@ -78,7 +78,7 @@ static void amdgpu_do_test_moves(struct amdgpu_device *adev)
void *gtt_map, *vram_map;
void **gtt_start, **gtt_end;
void **vram_start, **vram_end;
- struct fence *fence = NULL;
+ struct dma_fence *fence = NULL;
r = amdgpu_bo_create(adev, size, PAGE_SIZE, true,
AMDGPU_GEM_DOMAIN_GTT, 0, NULL,
@@ -118,13 +118,13 @@ static void amdgpu_do_test_moves(struct amdgpu_device *adev)
goto out_lclean_unpin;
}
- r = fence_wait(fence, false);
+ r = dma_fence_wait(fence, false);
if (r) {
DRM_ERROR("Failed to wait for GTT->VRAM fence %d\n", i);
goto out_lclean_unpin;
}
- fence_put(fence);
+ dma_fence_put(fence);
r = amdgpu_bo_kmap(vram_obj, &vram_map);
if (r) {
@@ -163,13 +163,13 @@ static void amdgpu_do_test_moves(struct amdgpu_device *adev)
goto out_lclean_unpin;
}
- r = fence_wait(fence, false);
+ r = dma_fence_wait(fence, false);
if (r) {
DRM_ERROR("Failed to wait for VRAM->GTT fence %d\n", i);
goto out_lclean_unpin;
}
- fence_put(fence);
+ dma_fence_put(fence);
r = amdgpu_bo_kmap(gtt_obj[i], &gtt_map);
if (r) {
@@ -216,7 +216,7 @@ out_lclean:
amdgpu_bo_unref(&gtt_obj[i]);
}
if (fence)
- fence_put(fence);
+ dma_fence_put(fence);
break;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
index 067e5e683bb3..bb964a8ff938 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
@@ -104,7 +104,7 @@ TRACE_EVENT(amdgpu_cs_ioctl,
__field(struct amdgpu_device *, adev)
__field(struct amd_sched_job *, sched_job)
__field(struct amdgpu_ib *, ib)
- __field(struct fence *, fence)
+ __field(struct dma_fence *, fence)
__field(char *, ring_name)
__field(u32, num_ibs)
),
@@ -129,7 +129,7 @@ TRACE_EVENT(amdgpu_sched_run_job,
__field(struct amdgpu_device *, adev)
__field(struct amd_sched_job *, sched_job)
__field(struct amdgpu_ib *, ib)
- __field(struct fence *, fence)
+ __field(struct dma_fence *, fence)
__field(char *, ring_name)
__field(u32, num_ibs)
),
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
index dcaf691f56b5..1821c05484d0 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
@@ -51,16 +51,6 @@
static int amdgpu_ttm_debugfs_init(struct amdgpu_device *adev);
static void amdgpu_ttm_debugfs_fini(struct amdgpu_device *adev);
-static struct amdgpu_device *amdgpu_get_adev(struct ttm_bo_device *bdev)
-{
- struct amdgpu_mman *mman;
- struct amdgpu_device *adev;
-
- mman = container_of(bdev, struct amdgpu_mman, bdev);
- adev = container_of(mman, struct amdgpu_device, mman);
- return adev;
-}
-
/*
* Global memory.
@@ -150,7 +140,7 @@ static int amdgpu_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
{
struct amdgpu_device *adev;
- adev = amdgpu_get_adev(bdev);
+ adev = amdgpu_ttm_adev(bdev);
switch (type) {
case TTM_PL_SYSTEM:
@@ -168,7 +158,7 @@ static int amdgpu_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
break;
case TTM_PL_VRAM:
/* "On-card" video ram */
- man->func = &ttm_bo_manager_func;
+ man->func = &amdgpu_vram_mgr_func;
man->gpu_offset = adev->mc.vram_start;
man->flags = TTM_MEMTYPE_FLAG_FIXED |
TTM_MEMTYPE_FLAG_MAPPABLE;
@@ -195,6 +185,7 @@ static int amdgpu_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
static void amdgpu_evict_flags(struct ttm_buffer_object *bo,
struct ttm_placement *placement)
{
+ struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
struct amdgpu_bo *abo;
static struct ttm_place placements = {
.fpfn = 0,
@@ -213,7 +204,7 @@ static void amdgpu_evict_flags(struct ttm_buffer_object *bo,
abo = container_of(bo, struct amdgpu_bo, tbo);
switch (bo->mem.mem_type) {
case TTM_PL_VRAM:
- if (abo->adev->mman.buffer_funcs_ring->ready == false) {
+ if (adev->mman.buffer_funcs_ring->ready == false) {
amdgpu_ttm_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_CPU);
} else {
amdgpu_ttm_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_GTT);
@@ -229,7 +220,7 @@ static void amdgpu_evict_flags(struct ttm_buffer_object *bo,
* allocating address space for the BO.
*/
abo->placements[i].lpfn =
- abo->adev->mc.gtt_size >> PAGE_SHIFT;
+ adev->mc.gtt_size >> PAGE_SHIFT;
}
}
break;
@@ -260,63 +251,115 @@ static void amdgpu_move_null(struct ttm_buffer_object *bo,
new_mem->mm_node = NULL;
}
-static int amdgpu_move_blit(struct ttm_buffer_object *bo,
- bool evict, bool no_wait_gpu,
- struct ttm_mem_reg *new_mem,
- struct ttm_mem_reg *old_mem)
+static int amdgpu_mm_node_addr(struct ttm_buffer_object *bo,
+ struct drm_mm_node *mm_node,
+ struct ttm_mem_reg *mem,
+ uint64_t *addr)
{
- struct amdgpu_device *adev;
- struct amdgpu_ring *ring;
- uint64_t old_start, new_start;
- struct fence *fence;
int r;
- adev = amdgpu_get_adev(bo->bdev);
- ring = adev->mman.buffer_funcs_ring;
-
- switch (old_mem->mem_type) {
+ switch (mem->mem_type) {
case TTM_PL_TT:
- r = amdgpu_ttm_bind(bo, old_mem);
+ r = amdgpu_ttm_bind(bo, mem);
if (r)
return r;
case TTM_PL_VRAM:
- old_start = (u64)old_mem->start << PAGE_SHIFT;
- old_start += bo->bdev->man[old_mem->mem_type].gpu_offset;
+ *addr = mm_node->start << PAGE_SHIFT;
+ *addr += bo->bdev->man[mem->mem_type].gpu_offset;
break;
default:
- DRM_ERROR("Unknown placement %d\n", old_mem->mem_type);
+ DRM_ERROR("Unknown placement %d\n", mem->mem_type);
return -EINVAL;
}
- switch (new_mem->mem_type) {
- case TTM_PL_TT:
- r = amdgpu_ttm_bind(bo, new_mem);
- if (r)
- return r;
- case TTM_PL_VRAM:
- new_start = (u64)new_mem->start << PAGE_SHIFT;
- new_start += bo->bdev->man[new_mem->mem_type].gpu_offset;
- break;
- default:
- DRM_ERROR("Unknown placement %d\n", old_mem->mem_type);
- return -EINVAL;
- }
+ return 0;
+}
+
+static int amdgpu_move_blit(struct ttm_buffer_object *bo,
+ bool evict, bool no_wait_gpu,
+ struct ttm_mem_reg *new_mem,
+ struct ttm_mem_reg *old_mem)
+{
+ struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
+ struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
+
+ struct drm_mm_node *old_mm, *new_mm;
+ uint64_t old_start, old_size, new_start, new_size;
+ unsigned long num_pages;
+ struct dma_fence *fence = NULL;
+ int r;
+
+ BUILD_BUG_ON((PAGE_SIZE % AMDGPU_GPU_PAGE_SIZE) != 0);
+
if (!ring->ready) {
DRM_ERROR("Trying to move memory with ring turned off.\n");
return -EINVAL;
}
- BUILD_BUG_ON((PAGE_SIZE % AMDGPU_GPU_PAGE_SIZE) != 0);
+ old_mm = old_mem->mm_node;
+ r = amdgpu_mm_node_addr(bo, old_mm, old_mem, &old_start);
+ if (r)
+ return r;
+ old_size = old_mm->size;
+
- r = amdgpu_copy_buffer(ring, old_start, new_start,
- new_mem->num_pages * PAGE_SIZE, /* bytes */
- bo->resv, &fence, false);
+ new_mm = new_mem->mm_node;
+ r = amdgpu_mm_node_addr(bo, new_mm, new_mem, &new_start);
if (r)
return r;
+ new_size = new_mm->size;
+
+ num_pages = new_mem->num_pages;
+ while (num_pages) {
+ unsigned long cur_pages = min(old_size, new_size);
+ struct dma_fence *next;
+
+ r = amdgpu_copy_buffer(ring, old_start, new_start,
+ cur_pages * PAGE_SIZE,
+ bo->resv, &next, false);
+ if (r)
+ goto error;
+
+ dma_fence_put(fence);
+ fence = next;
+
+ num_pages -= cur_pages;
+ if (!num_pages)
+ break;
+
+ old_size -= cur_pages;
+ if (!old_size) {
+ r = amdgpu_mm_node_addr(bo, ++old_mm, old_mem,
+ &old_start);
+ if (r)
+ goto error;
+ old_size = old_mm->size;
+ } else {
+ old_start += cur_pages * PAGE_SIZE;
+ }
+
+ new_size -= cur_pages;
+ if (!new_size) {
+ r = amdgpu_mm_node_addr(bo, ++new_mm, new_mem,
+ &new_start);
+ if (r)
+ goto error;
+
+ new_size = new_mm->size;
+ } else {
+ new_start += cur_pages * PAGE_SIZE;
+ }
+ }
r = ttm_bo_pipeline_move(bo, fence, evict, new_mem);
- fence_put(fence);
+ dma_fence_put(fence);
+ return r;
+
+error:
+ if (fence)
+ dma_fence_wait(fence, false);
+ dma_fence_put(fence);
return r;
}
@@ -332,7 +375,7 @@ static int amdgpu_move_vram_ram(struct ttm_buffer_object *bo,
struct ttm_placement placement;
int r;
- adev = amdgpu_get_adev(bo->bdev);
+ adev = amdgpu_ttm_adev(bo->bdev);
tmp_mem = *new_mem;
tmp_mem.mm_node = NULL;
placement.num_placement = 1;
@@ -379,7 +422,7 @@ static int amdgpu_move_ram_vram(struct ttm_buffer_object *bo,
struct ttm_place placements;
int r;
- adev = amdgpu_get_adev(bo->bdev);
+ adev = amdgpu_ttm_adev(bo->bdev);
tmp_mem = *new_mem;
tmp_mem.mm_node = NULL;
placement.num_placement = 1;
@@ -422,7 +465,7 @@ static int amdgpu_bo_move(struct ttm_buffer_object *bo,
if (WARN_ON_ONCE(abo->pin_count > 0))
return -EINVAL;
- adev = amdgpu_get_adev(bo->bdev);
+ adev = amdgpu_ttm_adev(bo->bdev);
/* remember the eviction */
if (evict)
@@ -475,7 +518,7 @@ memcpy:
static int amdgpu_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
{
struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
- struct amdgpu_device *adev = amdgpu_get_adev(bdev);
+ struct amdgpu_device *adev = amdgpu_ttm_adev(bdev);
mem->bus.addr = NULL;
mem->bus.offset = 0;
@@ -607,7 +650,7 @@ release_pages:
/* prepare the sg table with the user pages */
static int amdgpu_ttm_tt_pin_userptr(struct ttm_tt *ttm)
{
- struct amdgpu_device *adev = amdgpu_get_adev(ttm->bdev);
+ struct amdgpu_device *adev = amdgpu_ttm_adev(ttm->bdev);
struct amdgpu_ttm_tt *gtt = (void *)ttm;
unsigned nents;
int r;
@@ -639,7 +682,7 @@ release_sg:
static void amdgpu_ttm_tt_unpin_userptr(struct ttm_tt *ttm)
{
- struct amdgpu_device *adev = amdgpu_get_adev(ttm->bdev);
+ struct amdgpu_device *adev = amdgpu_ttm_adev(ttm->bdev);
struct amdgpu_ttm_tt *gtt = (void *)ttm;
struct sg_page_iter sg_iter;
@@ -799,7 +842,7 @@ static struct ttm_tt *amdgpu_ttm_tt_create(struct ttm_bo_device *bdev,
struct amdgpu_device *adev;
struct amdgpu_ttm_tt *gtt;
- adev = amdgpu_get_adev(bdev);
+ adev = amdgpu_ttm_adev(bdev);
gtt = kzalloc(sizeof(struct amdgpu_ttm_tt), GFP_KERNEL);
if (gtt == NULL) {
@@ -843,7 +886,7 @@ static int amdgpu_ttm_tt_populate(struct ttm_tt *ttm)
return 0;
}
- adev = amdgpu_get_adev(ttm->bdev);
+ adev = amdgpu_ttm_adev(ttm->bdev);
#ifdef CONFIG_SWIOTLB
if (swiotlb_nr_tbl()) {
@@ -889,7 +932,7 @@ static void amdgpu_ttm_tt_unpopulate(struct ttm_tt *ttm)
if (slave)
return;
- adev = amdgpu_get_adev(ttm->bdev);
+ adev = amdgpu_ttm_adev(ttm->bdev);
#ifdef CONFIG_SWIOTLB
if (swiotlb_nr_tbl()) {
@@ -1012,7 +1055,7 @@ uint32_t amdgpu_ttm_tt_pte_flags(struct amdgpu_device *adev, struct ttm_tt *ttm,
static void amdgpu_ttm_lru_removal(struct ttm_buffer_object *tbo)
{
- struct amdgpu_device *adev = amdgpu_get_adev(tbo->bdev);
+ struct amdgpu_device *adev = amdgpu_ttm_adev(tbo->bdev);
unsigned i, j;
for (i = 0; i < AMDGPU_TTM_LRU_SIZE; ++i) {
@@ -1029,7 +1072,7 @@ static void amdgpu_ttm_lru_removal(struct ttm_buffer_object *tbo)
static struct amdgpu_mman_lru *amdgpu_ttm_lru(struct ttm_buffer_object *tbo)
{
- struct amdgpu_device *adev = amdgpu_get_adev(tbo->bdev);
+ struct amdgpu_device *adev = amdgpu_ttm_adev(tbo->bdev);
unsigned log2_size = min(ilog2(tbo->num_pages),
AMDGPU_TTM_LRU_SIZE - 1);
@@ -1060,12 +1103,37 @@ static struct list_head *amdgpu_ttm_swap_lru_tail(struct ttm_buffer_object *tbo)
return res;
}
+static bool amdgpu_ttm_bo_eviction_valuable(struct ttm_buffer_object *bo,
+ const struct ttm_place *place)
+{
+ if (bo->mem.mem_type == TTM_PL_VRAM &&
+ bo->mem.start == AMDGPU_BO_INVALID_OFFSET) {
+ unsigned long num_pages = bo->mem.num_pages;
+ struct drm_mm_node *node = bo->mem.mm_node;
+
+ /* Check each drm MM node individually */
+ while (num_pages) {
+ if (place->fpfn < (node->start + node->size) &&
+ !(place->lpfn && place->lpfn <= node->start))
+ return true;
+
+ num_pages -= node->size;
+ ++node;
+ }
+
+ return false;
+ }
+
+ return ttm_bo_eviction_valuable(bo, place);
+}
+
static struct ttm_bo_driver amdgpu_bo_driver = {
.ttm_tt_create = &amdgpu_ttm_tt_create,
.ttm_tt_populate = &amdgpu_ttm_tt_populate,
.ttm_tt_unpopulate = &amdgpu_ttm_tt_unpopulate,
.invalidate_caches = &amdgpu_invalidate_caches,
.init_mem_type = &amdgpu_init_mem_type,
+ .eviction_valuable = amdgpu_ttm_bo_eviction_valuable,
.evict_flags = &amdgpu_evict_flags,
.move = &amdgpu_bo_move,
.verify_access = &amdgpu_verify_access,
@@ -1119,7 +1187,8 @@ int amdgpu_ttm_init(struct amdgpu_device *adev)
r = amdgpu_bo_create(adev, 256 * 1024, PAGE_SIZE, true,
AMDGPU_GEM_DOMAIN_VRAM,
- AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
+ AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
+ AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS,
NULL, NULL, &adev->stollen_vga_memory);
if (r) {
return r;
@@ -1247,7 +1316,7 @@ int amdgpu_copy_buffer(struct amdgpu_ring *ring,
uint64_t dst_offset,
uint32_t byte_count,
struct reservation_object *resv,
- struct fence **fence, bool direct_submit)
+ struct dma_fence **fence, bool direct_submit)
{
struct amdgpu_device *adev = ring->adev;
struct amdgpu_job *job;
@@ -1294,7 +1363,7 @@ int amdgpu_copy_buffer(struct amdgpu_ring *ring,
if (direct_submit) {
r = amdgpu_ib_schedule(ring, job->num_ibs, job->ibs,
NULL, NULL, fence);
- job->fence = fence_get(*fence);
+ job->fence = dma_fence_get(*fence);
if (r)
DRM_ERROR("Error scheduling IBs (%d)\n", r);
amdgpu_job_free(job);
@@ -1315,9 +1384,9 @@ error_free:
int amdgpu_fill_buffer(struct amdgpu_bo *bo,
uint32_t src_data,
struct reservation_object *resv,
- struct fence **fence)
+ struct dma_fence **fence)
{
- struct amdgpu_device *adev = bo->adev;
+ struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
struct amdgpu_job *job;
struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
index 9812c805326c..98ee384f0fca 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
@@ -66,6 +66,7 @@ struct amdgpu_mman {
};
extern const struct ttm_mem_type_manager_func amdgpu_gtt_mgr_func;
+extern const struct ttm_mem_type_manager_func amdgpu_vram_mgr_func;
int amdgpu_gtt_mgr_alloc(struct ttm_mem_type_manager *man,
struct ttm_buffer_object *tbo,
@@ -77,11 +78,11 @@ int amdgpu_copy_buffer(struct amdgpu_ring *ring,
uint64_t dst_offset,
uint32_t byte_count,
struct reservation_object *resv,
- struct fence **fence, bool direct_submit);
+ struct dma_fence **fence, bool direct_submit);
int amdgpu_fill_buffer(struct amdgpu_bo *bo,
uint32_t src_data,
struct reservation_object *resv,
- struct fence **fence);
+ struct dma_fence **fence);
int amdgpu_mmap(struct file *filp, struct vm_area_struct *vma);
bool amdgpu_ttm_is_bound(struct ttm_tt *ttm);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c
index cb3d252f3c78..0f0b38191fac 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c
@@ -228,6 +228,9 @@ static int amdgpu_ucode_init_single_fw(struct amdgpu_firmware_info *ucode,
ucode->mc_addr = mc_addr;
ucode->kaddr = kptr;
+ if (ucode->ucode_id == AMDGPU_UCODE_ID_STORAGE)
+ return 0;
+
header = (const struct common_firmware_header *)ucode->fw->data;
memcpy(ucode->kaddr, (void *)((uint8_t *)ucode->fw->data +
le32_to_cpu(header->ucode_array_offset_bytes)),
@@ -236,6 +239,31 @@ static int amdgpu_ucode_init_single_fw(struct amdgpu_firmware_info *ucode,
return 0;
}
+static int amdgpu_ucode_patch_jt(struct amdgpu_firmware_info *ucode,
+ uint64_t mc_addr, void *kptr)
+{
+ const struct gfx_firmware_header_v1_0 *header = NULL;
+ const struct common_firmware_header *comm_hdr = NULL;
+ uint8_t* src_addr = NULL;
+ uint8_t* dst_addr = NULL;
+
+ if (NULL == ucode->fw)
+ return 0;
+
+ comm_hdr = (const struct common_firmware_header *)ucode->fw->data;
+ header = (const struct gfx_firmware_header_v1_0 *)ucode->fw->data;
+ dst_addr = ucode->kaddr +
+ ALIGN(le32_to_cpu(comm_hdr->ucode_size_bytes),
+ PAGE_SIZE);
+ src_addr = (uint8_t *)ucode->fw->data +
+ le32_to_cpu(comm_hdr->ucode_array_offset_bytes) +
+ (le32_to_cpu(header->jt_offset) * 4);
+ memcpy(dst_addr, src_addr, le32_to_cpu(header->jt_size) * 4);
+
+ return 0;
+}
+
+
int amdgpu_ucode_init_bo(struct amdgpu_device *adev)
{
struct amdgpu_bo **bo = &adev->firmware.fw_buf;
@@ -247,7 +275,8 @@ int amdgpu_ucode_init_bo(struct amdgpu_device *adev)
const struct common_firmware_header *header = NULL;
err = amdgpu_bo_create(adev, adev->firmware.fw_size, PAGE_SIZE, true,
- AMDGPU_GEM_DOMAIN_GTT, 0, NULL, NULL, bo);
+ amdgpu_sriov_vf(adev) ? AMDGPU_GEM_DOMAIN_VRAM : AMDGPU_GEM_DOMAIN_GTT,
+ 0, NULL, NULL, bo);
if (err) {
dev_err(adev->dev, "(%d) Firmware buffer allocate failed\n", err);
goto failed;
@@ -259,7 +288,8 @@ int amdgpu_ucode_init_bo(struct amdgpu_device *adev)
goto failed_reserve;
}
- err = amdgpu_bo_pin(*bo, AMDGPU_GEM_DOMAIN_GTT, &fw_mc_addr);
+ err = amdgpu_bo_pin(*bo, amdgpu_sriov_vf(adev) ? AMDGPU_GEM_DOMAIN_VRAM : AMDGPU_GEM_DOMAIN_GTT,
+ &fw_mc_addr);
if (err) {
dev_err(adev->dev, "(%d) Firmware buffer pin failed\n", err);
goto failed_pin;
@@ -279,6 +309,13 @@ int amdgpu_ucode_init_bo(struct amdgpu_device *adev)
header = (const struct common_firmware_header *)ucode->fw->data;
amdgpu_ucode_init_single_fw(ucode, fw_mc_addr + fw_offset,
fw_buf_ptr + fw_offset);
+ if (i == AMDGPU_UCODE_ID_CP_MEC1) {
+ const struct gfx_firmware_header_v1_0 *cp_hdr;
+ cp_hdr = (const struct gfx_firmware_header_v1_0 *)ucode->fw->data;
+ amdgpu_ucode_patch_jt(ucode, fw_mc_addr + fw_offset,
+ fw_buf_ptr + fw_offset);
+ fw_offset += ALIGN(le32_to_cpu(cp_hdr->jt_size) << 2, PAGE_SIZE);
+ }
fw_offset += ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
}
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h
index e468be4e28fa..a8a4230729f9 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h
@@ -130,6 +130,7 @@ enum AMDGPU_UCODE_ID {
AMDGPU_UCODE_ID_CP_MEC1,
AMDGPU_UCODE_ID_CP_MEC2,
AMDGPU_UCODE_ID_RLC_G,
+ AMDGPU_UCODE_ID_STORAGE,
AMDGPU_UCODE_ID_MAXIMUM,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
index e3281cacc586..fb270c7e7171 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
@@ -333,7 +333,7 @@ void amdgpu_uvd_free_handles(struct amdgpu_device *adev, struct drm_file *filp)
for (i = 0; i < adev->uvd.max_handles; ++i) {
uint32_t handle = atomic_read(&adev->uvd.handles[i]);
if (handle != 0 && adev->uvd.filp[i] == filp) {
- struct fence *fence;
+ struct dma_fence *fence;
r = amdgpu_uvd_get_destroy_msg(ring, handle,
false, &fence);
@@ -342,8 +342,8 @@ void amdgpu_uvd_free_handles(struct amdgpu_device *adev, struct drm_file *filp)
continue;
}
- fence_wait(fence, false);
- fence_put(fence);
+ dma_fence_wait(fence, false);
+ dma_fence_put(fence);
adev->uvd.filp[i] = NULL;
atomic_set(&adev->uvd.handles[i], 0);
@@ -876,6 +876,9 @@ int amdgpu_uvd_ring_parse_cs(struct amdgpu_cs_parser *parser, uint32_t ib_idx)
struct amdgpu_ib *ib = &parser->job->ibs[ib_idx];
int r;
+ parser->job->vm = NULL;
+ ib->gpu_addr = amdgpu_sa_bo_gpu_addr(ib->sa_bo);
+
if (ib->length_dw % 16) {
DRM_ERROR("UVD IB length (%d) not 16 dwords aligned!\n",
ib->length_dw);
@@ -909,14 +912,14 @@ int amdgpu_uvd_ring_parse_cs(struct amdgpu_cs_parser *parser, uint32_t ib_idx)
}
static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo,
- bool direct, struct fence **fence)
+ bool direct, struct dma_fence **fence)
{
struct ttm_validate_buffer tv;
struct ww_acquire_ctx ticket;
struct list_head head;
struct amdgpu_job *job;
struct amdgpu_ib *ib;
- struct fence *f = NULL;
+ struct dma_fence *f = NULL;
struct amdgpu_device *adev = ring->adev;
uint64_t addr;
int i, r;
@@ -931,7 +934,7 @@ static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo,
if (r)
return r;
- if (!bo->adev->uvd.address_64_bit) {
+ if (!ring->adev->uvd.address_64_bit) {
amdgpu_ttm_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_VRAM);
amdgpu_uvd_force_into_uvd_segment(bo);
}
@@ -960,7 +963,7 @@ static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo,
if (direct) {
r = amdgpu_ib_schedule(ring, 1, ib, NULL, NULL, &f);
- job->fence = fence_get(f);
+ job->fence = dma_fence_get(f);
if (r)
goto err_free;
@@ -975,9 +978,9 @@ static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo,
ttm_eu_fence_buffer_objects(&ticket, &head, f);
if (fence)
- *fence = fence_get(f);
+ *fence = dma_fence_get(f);
amdgpu_bo_unref(&bo);
- fence_put(f);
+ dma_fence_put(f);
return 0;
@@ -993,7 +996,7 @@ err:
crash the vcpu so just try to emmit a dummy create/destroy msg to
avoid this */
int amdgpu_uvd_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
- struct fence **fence)
+ struct dma_fence **fence)
{
struct amdgpu_device *adev = ring->adev;
struct amdgpu_bo *bo;
@@ -1002,7 +1005,8 @@ int amdgpu_uvd_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
r = amdgpu_bo_create(adev, 1024, PAGE_SIZE, true,
AMDGPU_GEM_DOMAIN_VRAM,
- AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
+ AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
+ AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS,
NULL, NULL, &bo);
if (r)
return r;
@@ -1042,7 +1046,7 @@ int amdgpu_uvd_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
}
int amdgpu_uvd_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
- bool direct, struct fence **fence)
+ bool direct, struct dma_fence **fence)
{
struct amdgpu_device *adev = ring->adev;
struct amdgpu_bo *bo;
@@ -1051,7 +1055,8 @@ int amdgpu_uvd_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
r = amdgpu_bo_create(adev, 1024, PAGE_SIZE, true,
AMDGPU_GEM_DOMAIN_VRAM,
- AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
+ AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
+ AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS,
NULL, NULL, &bo);
if (r)
return r;
@@ -1128,7 +1133,7 @@ void amdgpu_uvd_ring_end_use(struct amdgpu_ring *ring)
*/
int amdgpu_uvd_ring_test_ib(struct amdgpu_ring *ring, long timeout)
{
- struct fence *fence;
+ struct dma_fence *fence;
long r;
r = amdgpu_uvd_get_create_msg(ring, 1, NULL);
@@ -1143,7 +1148,7 @@ int amdgpu_uvd_ring_test_ib(struct amdgpu_ring *ring, long timeout)
goto error;
}
- r = fence_wait_timeout(fence, false, timeout);
+ r = dma_fence_wait_timeout(fence, false, timeout);
if (r == 0) {
DRM_ERROR("amdgpu: IB test timed out.\n");
r = -ETIMEDOUT;
@@ -1154,7 +1159,7 @@ int amdgpu_uvd_ring_test_ib(struct amdgpu_ring *ring, long timeout)
r = 0;
}
- fence_put(fence);
+ dma_fence_put(fence);
error:
return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h
index c850009602d1..6249ba1bde2a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h
@@ -29,9 +29,9 @@ int amdgpu_uvd_sw_fini(struct amdgpu_device *adev);
int amdgpu_uvd_suspend(struct amdgpu_device *adev);
int amdgpu_uvd_resume(struct amdgpu_device *adev);
int amdgpu_uvd_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
- struct fence **fence);
+ struct dma_fence **fence);
int amdgpu_uvd_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
- bool direct, struct fence **fence);
+ bool direct, struct dma_fence **fence);
void amdgpu_uvd_free_handles(struct amdgpu_device *adev,
struct drm_file *filp);
int amdgpu_uvd_ring_parse_cs(struct amdgpu_cs_parser *parser, uint32_t ib_idx);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
index 7fe8fd884f06..69b66b9e7f57 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
@@ -157,7 +157,8 @@ int amdgpu_vce_sw_init(struct amdgpu_device *adev, unsigned long size)
r = amdgpu_bo_create(adev, size, PAGE_SIZE, true,
AMDGPU_GEM_DOMAIN_VRAM,
- AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
+ AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
+ AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS,
NULL, NULL, &adev->vce.vcpu_bo);
if (r) {
dev_err(adev->dev, "(%d) failed to allocate VCE bo\n", r);
@@ -395,12 +396,12 @@ void amdgpu_vce_free_handles(struct amdgpu_device *adev, struct drm_file *filp)
* Open up a stream for HW test
*/
int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
- struct fence **fence)
+ struct dma_fence **fence)
{
const unsigned ib_size_dw = 1024;
struct amdgpu_job *job;
struct amdgpu_ib *ib;
- struct fence *f = NULL;
+ struct dma_fence *f = NULL;
uint64_t dummy;
int i, r;
@@ -450,14 +451,14 @@ int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
ib->ptr[i] = 0x0;
r = amdgpu_ib_schedule(ring, 1, ib, NULL, NULL, &f);
- job->fence = fence_get(f);
+ job->fence = dma_fence_get(f);
if (r)
goto err;
amdgpu_job_free(job);
if (fence)
- *fence = fence_get(f);
- fence_put(f);
+ *fence = dma_fence_get(f);
+ dma_fence_put(f);
return 0;
err:
@@ -476,12 +477,12 @@ err:
* Close up a stream for HW test or if userspace failed to do so
*/
int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
- bool direct, struct fence **fence)
+ bool direct, struct dma_fence **fence)
{
const unsigned ib_size_dw = 1024;
struct amdgpu_job *job;
struct amdgpu_ib *ib;
- struct fence *f = NULL;
+ struct dma_fence *f = NULL;
int i, r;
r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job);
@@ -513,7 +514,7 @@ int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
if (direct) {
r = amdgpu_ib_schedule(ring, 1, ib, NULL, NULL, &f);
- job->fence = fence_get(f);
+ job->fence = dma_fence_get(f);
if (r)
goto err;
@@ -526,8 +527,8 @@ int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
}
if (fence)
- *fence = fence_get(f);
- fence_put(f);
+ *fence = dma_fence_get(f);
+ dma_fence_put(f);
return 0;
err:
@@ -641,6 +642,9 @@ int amdgpu_vce_ring_parse_cs(struct amdgpu_cs_parser *p, uint32_t ib_idx)
uint32_t *size = &tmp;
int i, r, idx = 0;
+ p->job->vm = NULL;
+ ib->gpu_addr = amdgpu_sa_bo_gpu_addr(ib->sa_bo);
+
r = amdgpu_cs_sysvm_access_required(p);
if (r)
return r;
@@ -788,6 +792,96 @@ out:
}
/**
+ * amdgpu_vce_cs_parse_vm - parse the command stream in VM mode
+ *
+ * @p: parser context
+ *
+ */
+int amdgpu_vce_ring_parse_cs_vm(struct amdgpu_cs_parser *p, uint32_t ib_idx)
+{
+ struct amdgpu_ib *ib = &p->job->ibs[ib_idx];
+ int session_idx = -1;
+ uint32_t destroyed = 0;
+ uint32_t created = 0;
+ uint32_t allocated = 0;
+ uint32_t tmp, handle = 0;
+ int i, r = 0, idx = 0;
+
+ while (idx < ib->length_dw) {
+ uint32_t len = amdgpu_get_ib_value(p, ib_idx, idx);
+ uint32_t cmd = amdgpu_get_ib_value(p, ib_idx, idx + 1);
+
+ if ((len < 8) || (len & 3)) {
+ DRM_ERROR("invalid VCE command length (%d)!\n", len);
+ r = -EINVAL;
+ goto out;
+ }
+
+ switch (cmd) {
+ case 0x00000001: /* session */
+ handle = amdgpu_get_ib_value(p, ib_idx, idx + 2);
+ session_idx = amdgpu_vce_validate_handle(p, handle,
+ &allocated);
+ if (session_idx < 0) {
+ r = session_idx;
+ goto out;
+ }
+ break;
+
+ case 0x01000001: /* create */
+ created |= 1 << session_idx;
+ if (destroyed & (1 << session_idx)) {
+ destroyed &= ~(1 << session_idx);
+ allocated |= 1 << session_idx;
+
+ } else if (!(allocated & (1 << session_idx))) {
+ DRM_ERROR("Handle already in use!\n");
+ r = -EINVAL;
+ goto out;
+ }
+
+ break;
+
+ case 0x02000001: /* destroy */
+ destroyed |= 1 << session_idx;
+ break;
+
+ default:
+ break;
+ }
+
+ if (session_idx == -1) {
+ DRM_ERROR("no session command at start of IB\n");
+ r = -EINVAL;
+ goto out;
+ }
+
+ idx += len / 4;
+ }
+
+ if (allocated & ~created) {
+ DRM_ERROR("New session without create command!\n");
+ r = -ENOENT;
+ }
+
+out:
+ if (!r) {
+ /* No error, free all destroyed handle slots */
+ tmp = destroyed;
+ amdgpu_ib_free(p->adev, ib, NULL);
+ } else {
+ /* Error during parsing, free all allocated handle slots */
+ tmp = allocated;
+ }
+
+ for (i = 0; i < AMDGPU_MAX_VCE_HANDLES; ++i)
+ if (tmp & (1 << i))
+ atomic_set(&p->adev->vce.handles[i], 0);
+
+ return r;
+}
+
+/**
* amdgpu_vce_ring_emit_ib - execute indirect buffer
*
* @ring: engine to use
@@ -823,18 +917,6 @@ void amdgpu_vce_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
amdgpu_ring_write(ring, VCE_CMD_END);
}
-unsigned amdgpu_vce_ring_get_emit_ib_size(struct amdgpu_ring *ring)
-{
- return
- 4; /* amdgpu_vce_ring_emit_ib */
-}
-
-unsigned amdgpu_vce_ring_get_dma_frame_size(struct amdgpu_ring *ring)
-{
- return
- 6; /* amdgpu_vce_ring_emit_fence x1 no user fence */
-}
-
/**
* amdgpu_vce_ring_test_ring - test if VCE ring is working
*
@@ -883,7 +965,7 @@ int amdgpu_vce_ring_test_ring(struct amdgpu_ring *ring)
*/
int amdgpu_vce_ring_test_ib(struct amdgpu_ring *ring, long timeout)
{
- struct fence *fence = NULL;
+ struct dma_fence *fence = NULL;
long r;
/* skip vce ring1/2 ib test for now, since it's not reliable */
@@ -902,7 +984,7 @@ int amdgpu_vce_ring_test_ib(struct amdgpu_ring *ring, long timeout)
goto error;
}
- r = fence_wait_timeout(fence, false, timeout);
+ r = dma_fence_wait_timeout(fence, false, timeout);
if (r == 0) {
DRM_ERROR("amdgpu: IB test timed out.\n");
r = -ETIMEDOUT;
@@ -913,6 +995,6 @@ int amdgpu_vce_ring_test_ib(struct amdgpu_ring *ring, long timeout)
r = 0;
}
error:
- fence_put(fence);
+ dma_fence_put(fence);
return r;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h
index 12729d2852df..d98041f7508d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h
@@ -29,11 +29,12 @@ int amdgpu_vce_sw_fini(struct amdgpu_device *adev);
int amdgpu_vce_suspend(struct amdgpu_device *adev);
int amdgpu_vce_resume(struct amdgpu_device *adev);
int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
- struct fence **fence);
+ struct dma_fence **fence);
int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
- bool direct, struct fence **fence);
+ bool direct, struct dma_fence **fence);
void amdgpu_vce_free_handles(struct amdgpu_device *adev, struct drm_file *filp);
int amdgpu_vce_ring_parse_cs(struct amdgpu_cs_parser *p, uint32_t ib_idx);
+int amdgpu_vce_ring_parse_cs_vm(struct amdgpu_cs_parser *p, uint32_t ib_idx);
void amdgpu_vce_ring_emit_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib,
unsigned vm_id, bool ctx_switch);
void amdgpu_vce_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index 06f24322e7c3..337c5b31d18d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -25,7 +25,7 @@
* Alex Deucher
* Jerome Glisse
*/
-#include <linux/fence-array.h>
+#include <linux/dma-fence-array.h>
#include <drm/drmP.h>
#include <drm/amdgpu_drm.h>
#include "amdgpu.h"
@@ -116,38 +116,43 @@ void amdgpu_vm_get_pd_bo(struct amdgpu_vm *vm,
}
/**
- * amdgpu_vm_get_bos - add the vm BOs to a duplicates list
+ * amdgpu_vm_validate_pt_bos - validate the page table BOs
*
* @adev: amdgpu device pointer
* @vm: vm providing the BOs
- * @duplicates: head of duplicates list
+ * @validate: callback to do the validation
+ * @param: parameter for the validation callback
*
- * Add the page directory to the BO duplicates list
- * for command submission.
+ * Validate the page table BOs on command submission if neccessary.
*/
-void amdgpu_vm_get_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm,
- struct list_head *duplicates)
+int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm,
+ int (*validate)(void *p, struct amdgpu_bo *bo),
+ void *param)
{
uint64_t num_evictions;
unsigned i;
+ int r;
/* We only need to validate the page tables
* if they aren't already valid.
*/
num_evictions = atomic64_read(&adev->num_evictions);
if (num_evictions == vm->last_eviction_counter)
- return;
+ return 0;
/* add the vm page table to the list */
for (i = 0; i <= vm->max_pde_used; ++i) {
- struct amdgpu_bo_list_entry *entry = &vm->page_tables[i].entry;
+ struct amdgpu_bo *bo = vm->page_tables[i].bo;
- if (!entry->robj)
+ if (!bo)
continue;
- list_add(&entry->tv.head, duplicates);
+ r = validate(param, bo);
+ if (r)
+ return r;
}
+ return 0;
}
/**
@@ -166,12 +171,12 @@ void amdgpu_vm_move_pt_bos_in_lru(struct amdgpu_device *adev,
spin_lock(&glob->lru_lock);
for (i = 0; i <= vm->max_pde_used; ++i) {
- struct amdgpu_bo_list_entry *entry = &vm->page_tables[i].entry;
+ struct amdgpu_bo *bo = vm->page_tables[i].bo;
- if (!entry->robj)
+ if (!bo)
continue;
- ttm_bo_move_to_lru_tail(&entry->robj->tbo);
+ ttm_bo_move_to_lru_tail(&bo->tbo);
}
spin_unlock(&glob->lru_lock);
}
@@ -194,14 +199,14 @@ static bool amdgpu_vm_is_gpu_reset(struct amdgpu_device *adev,
* Allocate an id for the vm, adding fences to the sync obj as necessary.
*/
int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
- struct amdgpu_sync *sync, struct fence *fence,
+ struct amdgpu_sync *sync, struct dma_fence *fence,
struct amdgpu_job *job)
{
struct amdgpu_device *adev = ring->adev;
uint64_t fence_context = adev->fence_context + ring->idx;
- struct fence *updates = sync->last_vm_update;
+ struct dma_fence *updates = sync->last_vm_update;
struct amdgpu_vm_id *id, *idle;
- struct fence **fences;
+ struct dma_fence **fences;
unsigned i;
int r = 0;
@@ -225,17 +230,17 @@ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
if (&idle->list == &adev->vm_manager.ids_lru) {
u64 fence_context = adev->vm_manager.fence_context + ring->idx;
unsigned seqno = ++adev->vm_manager.seqno[ring->idx];
- struct fence_array *array;
+ struct dma_fence_array *array;
unsigned j;
for (j = 0; j < i; ++j)
- fence_get(fences[j]);
+ dma_fence_get(fences[j]);
- array = fence_array_create(i, fences, fence_context,
+ array = dma_fence_array_create(i, fences, fence_context,
seqno, true);
if (!array) {
for (j = 0; j < i; ++j)
- fence_put(fences[j]);
+ dma_fence_put(fences[j]);
kfree(fences);
r = -ENOMEM;
goto error;
@@ -243,7 +248,7 @@ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
r = amdgpu_sync_fence(ring->adev, sync, &array->base);
- fence_put(&array->base);
+ dma_fence_put(&array->base);
if (r)
goto error;
@@ -257,7 +262,7 @@ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
/* Check if we can use a VMID already assigned to this VM */
i = ring->idx;
do {
- struct fence *flushed;
+ struct dma_fence *flushed;
id = vm->ids[i++];
if (i == AMDGPU_MAX_RINGS)
@@ -279,12 +284,12 @@ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
continue;
if (id->last_flush->context != fence_context &&
- !fence_is_signaled(id->last_flush))
+ !dma_fence_is_signaled(id->last_flush))
continue;
flushed = id->flushed_updates;
if (updates &&
- (!flushed || fence_is_later(updates, flushed)))
+ (!flushed || dma_fence_is_later(updates, flushed)))
continue;
/* Good we can use this VMID. Remember this submission as
@@ -315,14 +320,14 @@ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
if (r)
goto error;
- fence_put(id->first);
- id->first = fence_get(fence);
+ dma_fence_put(id->first);
+ id->first = dma_fence_get(fence);
- fence_put(id->last_flush);
+ dma_fence_put(id->last_flush);
id->last_flush = NULL;
- fence_put(id->flushed_updates);
- id->flushed_updates = fence_get(updates);
+ dma_fence_put(id->flushed_updates);
+ id->flushed_updates = dma_fence_get(updates);
id->pd_gpu_addr = job->vm_pd_addr;
id->current_gpu_reset_count = atomic_read(&adev->gpu_reset_counter);
@@ -341,9 +346,9 @@ error:
static bool amdgpu_vm_ring_has_compute_vm_bug(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
- const struct amdgpu_ip_block_version *ip_block;
+ const struct amdgpu_ip_block *ip_block;
- if (ring->type != AMDGPU_RING_TYPE_COMPUTE)
+ if (ring->funcs->type != AMDGPU_RING_TYPE_COMPUTE)
/* only compute rings */
return false;
@@ -351,10 +356,10 @@ static bool amdgpu_vm_ring_has_compute_vm_bug(struct amdgpu_ring *ring)
if (!ip_block)
return false;
- if (ip_block->major <= 7) {
+ if (ip_block->version->major <= 7) {
/* gfx7 has no workaround */
return true;
- } else if (ip_block->major == 8) {
+ } else if (ip_block->version->major == 8) {
if (adev->gfx.mec_fw_version >= 673)
/* gfx8 is fixed in MEC firmware 673 */
return false;
@@ -393,7 +398,7 @@ int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job)
if (ring->funcs->emit_vm_flush && (job->vm_needs_flush ||
amdgpu_vm_is_gpu_reset(adev, id))) {
- struct fence *fence;
+ struct dma_fence *fence;
trace_amdgpu_vm_flush(job->vm_pd_addr, ring->idx, job->vm_id);
amdgpu_ring_emit_vm_flush(ring, job->vm_id, job->vm_pd_addr);
@@ -403,7 +408,7 @@ int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job)
return r;
mutex_lock(&adev->vm_manager.lock);
- fence_put(id->last_flush);
+ dma_fence_put(id->last_flush);
id->last_flush = fence;
mutex_unlock(&adev->vm_manager.lock);
}
@@ -537,7 +542,7 @@ static int amdgpu_vm_clear_bo(struct amdgpu_device *adev,
struct amdgpu_bo *bo)
{
struct amdgpu_ring *ring;
- struct fence *fence = NULL;
+ struct dma_fence *fence = NULL;
struct amdgpu_job *job;
struct amdgpu_pte_update_params params;
unsigned entries;
@@ -578,7 +583,7 @@ static int amdgpu_vm_clear_bo(struct amdgpu_device *adev,
goto error_free;
amdgpu_bo_fence(bo, fence, true);
- fence_put(fence);
+ dma_fence_put(fence);
return 0;
error_free:
@@ -612,32 +617,35 @@ static uint64_t amdgpu_vm_map_gart(const dma_addr_t *pages_addr, uint64_t addr)
return result;
}
-static int amdgpu_vm_update_pd_or_shadow(struct amdgpu_device *adev,
- struct amdgpu_vm *vm,
- bool shadow)
+/*
+ * amdgpu_vm_update_pdes - make sure that page directory is valid
+ *
+ * @adev: amdgpu_device pointer
+ * @vm: requested vm
+ * @start: start of GPU address range
+ * @end: end of GPU address range
+ *
+ * Allocates new page tables if necessary
+ * and updates the page directory.
+ * Returns 0 for success, error for failure.
+ */
+int amdgpu_vm_update_page_directory(struct amdgpu_device *adev,
+ struct amdgpu_vm *vm)
{
+ struct amdgpu_bo *shadow;
struct amdgpu_ring *ring;
- struct amdgpu_bo *pd = shadow ? vm->page_directory->shadow :
- vm->page_directory;
- uint64_t pd_addr;
+ uint64_t pd_addr, shadow_addr;
uint32_t incr = AMDGPU_VM_PTE_COUNT * 8;
- uint64_t last_pde = ~0, last_pt = ~0;
+ uint64_t last_pde = ~0, last_pt = ~0, last_shadow = ~0;
unsigned count = 0, pt_idx, ndw;
struct amdgpu_job *job;
struct amdgpu_pte_update_params params;
- struct fence *fence = NULL;
+ struct dma_fence *fence = NULL;
int r;
- if (!pd)
- return 0;
-
- r = amdgpu_ttm_bind(&pd->tbo, &pd->tbo.mem);
- if (r)
- return r;
-
- pd_addr = amdgpu_bo_gpu_offset(pd);
ring = container_of(vm->entity.sched, struct amdgpu_ring, sched);
+ shadow = vm->page_directory->shadow;
/* padding, etc. */
ndw = 64;
@@ -645,6 +653,17 @@ static int amdgpu_vm_update_pd_or_shadow(struct amdgpu_device *adev,
/* assume the worst case */
ndw += vm->max_pde_used * 6;
+ pd_addr = amdgpu_bo_gpu_offset(vm->page_directory);
+ if (shadow) {
+ r = amdgpu_ttm_bind(&shadow->tbo, &shadow->tbo.mem);
+ if (r)
+ return r;
+ shadow_addr = amdgpu_bo_gpu_offset(shadow);
+ ndw *= 2;
+ } else {
+ shadow_addr = 0;
+ }
+
r = amdgpu_job_alloc_with_ib(adev, ndw * 4, &job);
if (r)
return r;
@@ -655,30 +674,26 @@ static int amdgpu_vm_update_pd_or_shadow(struct amdgpu_device *adev,
/* walk over the address space and update the page directory */
for (pt_idx = 0; pt_idx <= vm->max_pde_used; ++pt_idx) {
- struct amdgpu_bo *bo = vm->page_tables[pt_idx].entry.robj;
+ struct amdgpu_bo *bo = vm->page_tables[pt_idx].bo;
uint64_t pde, pt;
if (bo == NULL)
continue;
if (bo->shadow) {
- struct amdgpu_bo *shadow = bo->shadow;
+ struct amdgpu_bo *pt_shadow = bo->shadow;
- r = amdgpu_ttm_bind(&shadow->tbo, &shadow->tbo.mem);
+ r = amdgpu_ttm_bind(&pt_shadow->tbo,
+ &pt_shadow->tbo.mem);
if (r)
return r;
}
pt = amdgpu_bo_gpu_offset(bo);
- if (!shadow) {
- if (vm->page_tables[pt_idx].addr == pt)
- continue;
- vm->page_tables[pt_idx].addr = pt;
- } else {
- if (vm->page_tables[pt_idx].shadow_addr == pt)
- continue;
- vm->page_tables[pt_idx].shadow_addr = pt;
- }
+ if (vm->page_tables[pt_idx].addr == pt)
+ continue;
+
+ vm->page_tables[pt_idx].addr = pt;
pde = pd_addr + pt_idx * 8;
if (((last_pde + 8 * count) != pde) ||
@@ -686,6 +701,13 @@ static int amdgpu_vm_update_pd_or_shadow(struct amdgpu_device *adev,
(count == AMDGPU_VM_MAX_UPDATE_SIZE)) {
if (count) {
+ if (shadow)
+ amdgpu_vm_do_set_ptes(&params,
+ last_shadow,
+ last_pt, count,
+ incr,
+ AMDGPU_PTE_VALID);
+
amdgpu_vm_do_set_ptes(&params, last_pde,
last_pt, count, incr,
AMDGPU_PTE_VALID);
@@ -693,34 +715,44 @@ static int amdgpu_vm_update_pd_or_shadow(struct amdgpu_device *adev,
count = 1;
last_pde = pde;
+ last_shadow = shadow_addr + pt_idx * 8;
last_pt = pt;
} else {
++count;
}
}
- if (count)
+ if (count) {
+ if (vm->page_directory->shadow)
+ amdgpu_vm_do_set_ptes(&params, last_shadow, last_pt,
+ count, incr, AMDGPU_PTE_VALID);
+
amdgpu_vm_do_set_ptes(&params, last_pde, last_pt,
count, incr, AMDGPU_PTE_VALID);
+ }
- if (params.ib->length_dw != 0) {
- amdgpu_ring_pad_ib(ring, params.ib);
- amdgpu_sync_resv(adev, &job->sync, pd->tbo.resv,
+ if (params.ib->length_dw == 0) {
+ amdgpu_job_free(job);
+ return 0;
+ }
+
+ amdgpu_ring_pad_ib(ring, params.ib);
+ amdgpu_sync_resv(adev, &job->sync, vm->page_directory->tbo.resv,
+ AMDGPU_FENCE_OWNER_VM);
+ if (shadow)
+ amdgpu_sync_resv(adev, &job->sync, shadow->tbo.resv,
AMDGPU_FENCE_OWNER_VM);
- WARN_ON(params.ib->length_dw > ndw);
- r = amdgpu_job_submit(job, ring, &vm->entity,
- AMDGPU_FENCE_OWNER_VM, &fence);
- if (r)
- goto error_free;
- amdgpu_bo_fence(pd, fence, true);
- fence_put(vm->page_directory_fence);
- vm->page_directory_fence = fence_get(fence);
- fence_put(fence);
+ WARN_ON(params.ib->length_dw > ndw);
+ r = amdgpu_job_submit(job, ring, &vm->entity,
+ AMDGPU_FENCE_OWNER_VM, &fence);
+ if (r)
+ goto error_free;
- } else {
- amdgpu_job_free(job);
- }
+ amdgpu_bo_fence(vm->page_directory, fence, true);
+ dma_fence_put(vm->page_directory_fence);
+ vm->page_directory_fence = dma_fence_get(fence);
+ dma_fence_put(fence);
return 0;
@@ -729,29 +761,6 @@ error_free:
return r;
}
-/*
- * amdgpu_vm_update_pdes - make sure that page directory is valid
- *
- * @adev: amdgpu_device pointer
- * @vm: requested vm
- * @start: start of GPU address range
- * @end: end of GPU address range
- *
- * Allocates new page tables if necessary
- * and updates the page directory.
- * Returns 0 for success, error for failure.
- */
-int amdgpu_vm_update_page_directory(struct amdgpu_device *adev,
- struct amdgpu_vm *vm)
-{
- int r;
-
- r = amdgpu_vm_update_pd_or_shadow(adev, vm, true);
- if (r)
- return r;
- return amdgpu_vm_update_pd_or_shadow(adev, vm, false);
-}
-
/**
* amdgpu_vm_update_ptes - make sure that page tables are valid
*
@@ -781,11 +790,11 @@ static void amdgpu_vm_update_ptes(struct amdgpu_pte_update_params *params,
/* initialize the variables */
addr = start;
pt_idx = addr >> amdgpu_vm_block_size;
- pt = vm->page_tables[pt_idx].entry.robj;
+ pt = vm->page_tables[pt_idx].bo;
if (params->shadow) {
if (!pt->shadow)
return;
- pt = vm->page_tables[pt_idx].entry.robj->shadow;
+ pt = pt->shadow;
}
if ((addr & ~mask) == (end & ~mask))
nptes = end - addr;
@@ -804,11 +813,11 @@ static void amdgpu_vm_update_ptes(struct amdgpu_pte_update_params *params,
/* walk over the address space and update the page tables */
while (addr < end) {
pt_idx = addr >> amdgpu_vm_block_size;
- pt = vm->page_tables[pt_idx].entry.robj;
+ pt = vm->page_tables[pt_idx].bo;
if (params->shadow) {
if (!pt->shadow)
return;
- pt = vm->page_tables[pt_idx].entry.robj->shadow;
+ pt = pt->shadow;
}
if ((addr & ~mask) == (end & ~mask))
@@ -929,20 +938,20 @@ static void amdgpu_vm_frag_ptes(struct amdgpu_pte_update_params *params,
* Returns 0 for success, -EINVAL for failure.
*/
static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
- struct fence *exclusive,
+ struct dma_fence *exclusive,
uint64_t src,
dma_addr_t *pages_addr,
struct amdgpu_vm *vm,
uint64_t start, uint64_t last,
uint32_t flags, uint64_t addr,
- struct fence **fence)
+ struct dma_fence **fence)
{
struct amdgpu_ring *ring;
void *owner = AMDGPU_FENCE_OWNER_VM;
unsigned nptes, ncmds, ndw;
struct amdgpu_job *job;
struct amdgpu_pte_update_params params;
- struct fence *f = NULL;
+ struct dma_fence *f = NULL;
int r;
memset(&params, 0, sizeof(params));
@@ -1045,10 +1054,10 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
amdgpu_bo_fence(vm->page_directory, f, true);
if (fence) {
- fence_put(*fence);
- *fence = fence_get(f);
+ dma_fence_put(*fence);
+ *fence = dma_fence_get(f);
}
- fence_put(f);
+ dma_fence_put(f);
return 0;
error_free:
@@ -1065,8 +1074,8 @@ error_free:
* @pages_addr: DMA addresses to use for mapping
* @vm: requested vm
* @mapping: mapped range and flags to use for the update
- * @addr: addr to set the area to
* @flags: HW flags for the mapping
+ * @nodes: array of drm_mm_nodes with the MC addresses
* @fence: optional resulting fence
*
* Split the mapping into smaller chunks so that each update fits
@@ -1074,17 +1083,16 @@ error_free:
* Returns 0 for success, -EINVAL for failure.
*/
static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev,
- struct fence *exclusive,
+ struct dma_fence *exclusive,
uint32_t gtt_flags,
dma_addr_t *pages_addr,
struct amdgpu_vm *vm,
struct amdgpu_bo_va_mapping *mapping,
- uint32_t flags, uint64_t addr,
- struct fence **fence)
+ uint32_t flags,
+ struct drm_mm_node *nodes,
+ struct dma_fence **fence)
{
- const uint64_t max_size = 64ULL * 1024ULL * 1024ULL / AMDGPU_GPU_PAGE_SIZE;
-
- uint64_t src = 0, start = mapping->it.start;
+ uint64_t pfn, src = 0, start = mapping->it.start;
int r;
/* normally,bo_va->flags only contians READABLE and WIRTEABLE bit go here
@@ -1097,23 +1105,40 @@ static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev,
trace_amdgpu_vm_bo_update(mapping);
- if (pages_addr) {
- if (flags == gtt_flags)
- src = adev->gart.table_addr + (addr >> 12) * 8;
- addr = 0;
+ pfn = mapping->offset >> PAGE_SHIFT;
+ if (nodes) {
+ while (pfn >= nodes->size) {
+ pfn -= nodes->size;
+ ++nodes;
+ }
}
- addr += mapping->offset;
- if (!pages_addr || src)
- return amdgpu_vm_bo_update_mapping(adev, exclusive,
- src, pages_addr, vm,
- start, mapping->it.last,
- flags, addr, fence);
+ do {
+ uint64_t max_entries;
+ uint64_t addr, last;
+
+ if (nodes) {
+ addr = nodes->start << PAGE_SHIFT;
+ max_entries = (nodes->size - pfn) *
+ (PAGE_SIZE / AMDGPU_GPU_PAGE_SIZE);
+ } else {
+ addr = 0;
+ max_entries = S64_MAX;
+ }
- while (start != mapping->it.last + 1) {
- uint64_t last;
+ if (pages_addr) {
+ if (flags == gtt_flags)
+ src = adev->gart.table_addr +
+ (addr >> AMDGPU_GPU_PAGE_SHIFT) * 8;
+ else
+ max_entries = min(max_entries, 16ull * 1024ull);
+ addr = 0;
+ } else if (flags & AMDGPU_PTE_VALID) {
+ addr += adev->vm_manager.vram_base_offset;
+ }
+ addr += pfn << PAGE_SHIFT;
- last = min((uint64_t)mapping->it.last, start + max_size - 1);
+ last = min((uint64_t)mapping->it.last, start + max_entries - 1);
r = amdgpu_vm_bo_update_mapping(adev, exclusive,
src, pages_addr, vm,
start, last, flags, addr,
@@ -1121,9 +1146,14 @@ static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev,
if (r)
return r;
+ pfn += last - start + 1;
+ if (nodes && nodes->size == pfn) {
+ pfn = 0;
+ ++nodes;
+ }
start = last + 1;
- addr += max_size * AMDGPU_GPU_PAGE_SIZE;
- }
+
+ } while (unlikely(start != mapping->it.last + 1));
return 0;
}
@@ -1147,40 +1177,30 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev,
dma_addr_t *pages_addr = NULL;
uint32_t gtt_flags, flags;
struct ttm_mem_reg *mem;
- struct fence *exclusive;
- uint64_t addr;
+ struct drm_mm_node *nodes;
+ struct dma_fence *exclusive;
int r;
if (clear) {
mem = NULL;
- addr = 0;
+ nodes = NULL;
exclusive = NULL;
} else {
struct ttm_dma_tt *ttm;
mem = &bo_va->bo->tbo.mem;
- addr = (u64)mem->start << PAGE_SHIFT;
- switch (mem->mem_type) {
- case TTM_PL_TT:
+ nodes = mem->mm_node;
+ if (mem->mem_type == TTM_PL_TT) {
ttm = container_of(bo_va->bo->tbo.ttm, struct
ttm_dma_tt, ttm);
pages_addr = ttm->dma_address;
- break;
-
- case TTM_PL_VRAM:
- addr += adev->vm_manager.vram_base_offset;
- break;
-
- default:
- break;
}
-
exclusive = reservation_object_get_excl(bo_va->bo->tbo.resv);
}
flags = amdgpu_ttm_tt_pte_flags(adev, bo_va->bo->tbo.ttm, mem);
gtt_flags = (amdgpu_ttm_is_bound(bo_va->bo->tbo.ttm) &&
- adev == bo_va->bo->adev) ? flags : 0;
+ adev == amdgpu_ttm_adev(bo_va->bo->tbo.bdev)) ? flags : 0;
spin_lock(&vm->status_lock);
if (!list_empty(&bo_va->vm_status))
@@ -1190,7 +1210,7 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev,
list_for_each_entry(mapping, &bo_va->invalids, list) {
r = amdgpu_vm_bo_split_mapping(adev, exclusive,
gtt_flags, pages_addr, vm,
- mapping, flags, addr,
+ mapping, flags, nodes,
&bo_va->last_pt_update);
if (r)
return r;
@@ -1405,18 +1425,17 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev,
/* walk over the address space and allocate the page tables */
for (pt_idx = saddr; pt_idx <= eaddr; ++pt_idx) {
struct reservation_object *resv = vm->page_directory->tbo.resv;
- struct amdgpu_bo_list_entry *entry;
struct amdgpu_bo *pt;
- entry = &vm->page_tables[pt_idx].entry;
- if (entry->robj)
+ if (vm->page_tables[pt_idx].bo)
continue;
r = amdgpu_bo_create(adev, AMDGPU_VM_PTE_COUNT * 8,
AMDGPU_GPU_PAGE_SIZE, true,
AMDGPU_GEM_DOMAIN_VRAM,
AMDGPU_GEM_CREATE_NO_CPU_ACCESS |
- AMDGPU_GEM_CREATE_SHADOW,
+ AMDGPU_GEM_CREATE_SHADOW |
+ AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS,
NULL, resv, &pt);
if (r)
goto error_free;
@@ -1442,11 +1461,7 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev,
}
}
- entry->robj = pt;
- entry->priority = 0;
- entry->tv.bo = &entry->robj->tbo;
- entry->tv.shared = true;
- entry->user_pages = NULL;
+ vm->page_tables[pt_idx].bo = pt;
vm->page_tables[pt_idx].addr = 0;
}
@@ -1547,7 +1562,7 @@ void amdgpu_vm_bo_rmv(struct amdgpu_device *adev,
kfree(mapping);
}
- fence_put(bo_va->last_pt_update);
+ dma_fence_put(bo_va->last_pt_update);
kfree(bo_va);
}
@@ -1626,7 +1641,8 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm)
r = amdgpu_bo_create(adev, pd_size, align, true,
AMDGPU_GEM_DOMAIN_VRAM,
AMDGPU_GEM_CREATE_NO_CPU_ACCESS |
- AMDGPU_GEM_CREATE_SHADOW,
+ AMDGPU_GEM_CREATE_SHADOW |
+ AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS,
NULL, NULL, &vm->page_directory);
if (r)
goto error_free_sched_entity;
@@ -1697,7 +1713,7 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
}
for (i = 0; i < amdgpu_vm_num_pdes(adev); i++) {
- struct amdgpu_bo *pt = vm->page_tables[i].entry.robj;
+ struct amdgpu_bo *pt = vm->page_tables[i].bo;
if (!pt)
continue;
@@ -1709,7 +1725,7 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
amdgpu_bo_unref(&vm->page_directory->shadow);
amdgpu_bo_unref(&vm->page_directory);
- fence_put(vm->page_directory_fence);
+ dma_fence_put(vm->page_directory_fence);
}
/**
@@ -1733,7 +1749,8 @@ void amdgpu_vm_manager_init(struct amdgpu_device *adev)
&adev->vm_manager.ids_lru);
}
- adev->vm_manager.fence_context = fence_context_alloc(AMDGPU_MAX_RINGS);
+ adev->vm_manager.fence_context =
+ dma_fence_context_alloc(AMDGPU_MAX_RINGS);
for (i = 0; i < AMDGPU_MAX_RINGS; ++i)
adev->vm_manager.seqno[i] = 0;
@@ -1755,8 +1772,9 @@ void amdgpu_vm_manager_fini(struct amdgpu_device *adev)
for (i = 0; i < AMDGPU_NUM_VM; ++i) {
struct amdgpu_vm_id *id = &adev->vm_manager.ids[i];
- fence_put(adev->vm_manager.ids[i].first);
+ dma_fence_put(adev->vm_manager.ids[i].first);
amdgpu_sync_free(&adev->vm_manager.ids[i].active);
- fence_put(id->flushed_updates);
+ dma_fence_put(id->flushed_updates);
+ dma_fence_put(id->last_flush);
}
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
new file mode 100644
index 000000000000..adbc2f5e5c7f
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
@@ -0,0 +1,205 @@
+/*
+ * Copyright 2016 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Christian König
+ */
+#ifndef __AMDGPU_VM_H__
+#define __AMDGPU_VM_H__
+
+#include <linux/rbtree.h>
+
+#include "gpu_scheduler.h"
+#include "amdgpu_sync.h"
+#include "amdgpu_ring.h"
+
+struct amdgpu_bo_va;
+struct amdgpu_job;
+struct amdgpu_bo_list_entry;
+
+/*
+ * GPUVM handling
+ */
+
+/* maximum number of VMIDs */
+#define AMDGPU_NUM_VM 16
+
+/* Maximum number of PTEs the hardware can write with one command */
+#define AMDGPU_VM_MAX_UPDATE_SIZE 0x3FFFF
+
+/* number of entries in page table */
+#define AMDGPU_VM_PTE_COUNT (1 << amdgpu_vm_block_size)
+
+/* PTBs (Page Table Blocks) need to be aligned to 32K */
+#define AMDGPU_VM_PTB_ALIGN_SIZE 32768
+
+/* LOG2 number of continuous pages for the fragment field */
+#define AMDGPU_LOG2_PAGES_PER_FRAG 4
+
+#define AMDGPU_PTE_VALID (1 << 0)
+#define AMDGPU_PTE_SYSTEM (1 << 1)
+#define AMDGPU_PTE_SNOOPED (1 << 2)
+
+/* VI only */
+#define AMDGPU_PTE_EXECUTABLE (1 << 4)
+
+#define AMDGPU_PTE_READABLE (1 << 5)
+#define AMDGPU_PTE_WRITEABLE (1 << 6)
+
+#define AMDGPU_PTE_FRAG(x) ((x & 0x1f) << 7)
+
+/* How to programm VM fault handling */
+#define AMDGPU_VM_FAULT_STOP_NEVER 0
+#define AMDGPU_VM_FAULT_STOP_FIRST 1
+#define AMDGPU_VM_FAULT_STOP_ALWAYS 2
+
+struct amdgpu_vm_pt {
+ struct amdgpu_bo *bo;
+ uint64_t addr;
+};
+
+struct amdgpu_vm {
+ /* tree of virtual addresses mapped */
+ struct rb_root va;
+
+ /* protecting invalidated */
+ spinlock_t status_lock;
+
+ /* BOs moved, but not yet updated in the PT */
+ struct list_head invalidated;
+
+ /* BOs cleared in the PT because of a move */
+ struct list_head cleared;
+
+ /* BO mappings freed, but not yet updated in the PT */
+ struct list_head freed;
+
+ /* contains the page directory */
+ struct amdgpu_bo *page_directory;
+ unsigned max_pde_used;
+ struct dma_fence *page_directory_fence;
+ uint64_t last_eviction_counter;
+
+ /* array of page tables, one for each page directory entry */
+ struct amdgpu_vm_pt *page_tables;
+
+ /* for id and flush management per ring */
+ struct amdgpu_vm_id *ids[AMDGPU_MAX_RINGS];
+
+ /* protecting freed */
+ spinlock_t freed_lock;
+
+ /* Scheduler entity for page table updates */
+ struct amd_sched_entity entity;
+
+ /* client id */
+ u64 client_id;
+};
+
+struct amdgpu_vm_id {
+ struct list_head list;
+ struct dma_fence *first;
+ struct amdgpu_sync active;
+ struct dma_fence *last_flush;
+ atomic64_t owner;
+
+ uint64_t pd_gpu_addr;
+ /* last flushed PD/PT update */
+ struct dma_fence *flushed_updates;
+
+ uint32_t current_gpu_reset_count;
+
+ uint32_t gds_base;
+ uint32_t gds_size;
+ uint32_t gws_base;
+ uint32_t gws_size;
+ uint32_t oa_base;
+ uint32_t oa_size;
+};
+
+struct amdgpu_vm_manager {
+ /* Handling of VMIDs */
+ struct mutex lock;
+ unsigned num_ids;
+ struct list_head ids_lru;
+ struct amdgpu_vm_id ids[AMDGPU_NUM_VM];
+
+ /* Handling of VM fences */
+ u64 fence_context;
+ unsigned seqno[AMDGPU_MAX_RINGS];
+
+ uint32_t max_pfn;
+ /* vram base address for page table entry */
+ u64 vram_base_offset;
+ /* is vm enabled? */
+ bool enabled;
+ /* vm pte handling */
+ const struct amdgpu_vm_pte_funcs *vm_pte_funcs;
+ struct amdgpu_ring *vm_pte_rings[AMDGPU_MAX_RINGS];
+ unsigned vm_pte_num_rings;
+ atomic_t vm_pte_next_ring;
+ /* client id counter */
+ atomic64_t client_counter;
+};
+
+void amdgpu_vm_manager_init(struct amdgpu_device *adev);
+void amdgpu_vm_manager_fini(struct amdgpu_device *adev);
+int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm);
+void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm);
+void amdgpu_vm_get_pd_bo(struct amdgpu_vm *vm,
+ struct list_head *validated,
+ struct amdgpu_bo_list_entry *entry);
+int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm,
+ int (*callback)(void *p, struct amdgpu_bo *bo),
+ void *param);
+void amdgpu_vm_move_pt_bos_in_lru(struct amdgpu_device *adev,
+ struct amdgpu_vm *vm);
+int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
+ struct amdgpu_sync *sync, struct dma_fence *fence,
+ struct amdgpu_job *job);
+int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job);
+void amdgpu_vm_reset_id(struct amdgpu_device *adev, unsigned vm_id);
+int amdgpu_vm_update_page_directory(struct amdgpu_device *adev,
+ struct amdgpu_vm *vm);
+int amdgpu_vm_clear_freed(struct amdgpu_device *adev,
+ struct amdgpu_vm *vm);
+int amdgpu_vm_clear_invalids(struct amdgpu_device *adev, struct amdgpu_vm *vm,
+ struct amdgpu_sync *sync);
+int amdgpu_vm_bo_update(struct amdgpu_device *adev,
+ struct amdgpu_bo_va *bo_va,
+ bool clear);
+void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev,
+ struct amdgpu_bo *bo);
+struct amdgpu_bo_va *amdgpu_vm_bo_find(struct amdgpu_vm *vm,
+ struct amdgpu_bo *bo);
+struct amdgpu_bo_va *amdgpu_vm_bo_add(struct amdgpu_device *adev,
+ struct amdgpu_vm *vm,
+ struct amdgpu_bo *bo);
+int amdgpu_vm_bo_map(struct amdgpu_device *adev,
+ struct amdgpu_bo_va *bo_va,
+ uint64_t addr, uint64_t offset,
+ uint64_t size, uint32_t flags);
+int amdgpu_vm_bo_unmap(struct amdgpu_device *adev,
+ struct amdgpu_bo_va *bo_va,
+ uint64_t addr);
+void amdgpu_vm_bo_rmv(struct amdgpu_device *adev,
+ struct amdgpu_bo_va *bo_va);
+
+#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
new file mode 100644
index 000000000000..180eed7c8bca
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
@@ -0,0 +1,222 @@
+/*
+ * Copyright 2016 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Christian König
+ */
+
+#include <drm/drmP.h>
+#include "amdgpu.h"
+
+struct amdgpu_vram_mgr {
+ struct drm_mm mm;
+ spinlock_t lock;
+};
+
+/**
+ * amdgpu_vram_mgr_init - init VRAM manager and DRM MM
+ *
+ * @man: TTM memory type manager
+ * @p_size: maximum size of VRAM
+ *
+ * Allocate and initialize the VRAM manager.
+ */
+static int amdgpu_vram_mgr_init(struct ttm_mem_type_manager *man,
+ unsigned long p_size)
+{
+ struct amdgpu_vram_mgr *mgr;
+
+ mgr = kzalloc(sizeof(*mgr), GFP_KERNEL);
+ if (!mgr)
+ return -ENOMEM;
+
+ drm_mm_init(&mgr->mm, 0, p_size);
+ spin_lock_init(&mgr->lock);
+ man->priv = mgr;
+ return 0;
+}
+
+/**
+ * amdgpu_vram_mgr_fini - free and destroy VRAM manager
+ *
+ * @man: TTM memory type manager
+ *
+ * Destroy and free the VRAM manager, returns -EBUSY if ranges are still
+ * allocated inside it.
+ */
+static int amdgpu_vram_mgr_fini(struct ttm_mem_type_manager *man)
+{
+ struct amdgpu_vram_mgr *mgr = man->priv;
+
+ spin_lock(&mgr->lock);
+ if (!drm_mm_clean(&mgr->mm)) {
+ spin_unlock(&mgr->lock);
+ return -EBUSY;
+ }
+
+ drm_mm_takedown(&mgr->mm);
+ spin_unlock(&mgr->lock);
+ kfree(mgr);
+ man->priv = NULL;
+ return 0;
+}
+
+/**
+ * amdgpu_vram_mgr_new - allocate new ranges
+ *
+ * @man: TTM memory type manager
+ * @tbo: TTM BO we need this range for
+ * @place: placement flags and restrictions
+ * @mem: the resulting mem object
+ *
+ * Allocate VRAM for the given BO.
+ */
+static int amdgpu_vram_mgr_new(struct ttm_mem_type_manager *man,
+ struct ttm_buffer_object *tbo,
+ const struct ttm_place *place,
+ struct ttm_mem_reg *mem)
+{
+ struct amdgpu_bo *bo = container_of(tbo, struct amdgpu_bo, tbo);
+ struct amdgpu_vram_mgr *mgr = man->priv;
+ struct drm_mm *mm = &mgr->mm;
+ struct drm_mm_node *nodes;
+ enum drm_mm_search_flags sflags = DRM_MM_SEARCH_DEFAULT;
+ enum drm_mm_allocator_flags aflags = DRM_MM_CREATE_DEFAULT;
+ unsigned long lpfn, num_nodes, pages_per_node, pages_left;
+ unsigned i;
+ int r;
+
+ lpfn = place->lpfn;
+ if (!lpfn)
+ lpfn = man->size;
+
+ if (bo->flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS ||
+ amdgpu_vram_page_split == -1) {
+ pages_per_node = ~0ul;
+ num_nodes = 1;
+ } else {
+ pages_per_node = max((uint32_t)amdgpu_vram_page_split,
+ mem->page_alignment);
+ num_nodes = DIV_ROUND_UP(mem->num_pages, pages_per_node);
+ }
+
+ nodes = kcalloc(num_nodes, sizeof(*nodes), GFP_KERNEL);
+ if (!nodes)
+ return -ENOMEM;
+
+ if (place->flags & TTM_PL_FLAG_TOPDOWN) {
+ sflags = DRM_MM_SEARCH_BELOW;
+ aflags = DRM_MM_CREATE_TOP;
+ }
+
+ pages_left = mem->num_pages;
+
+ spin_lock(&mgr->lock);
+ for (i = 0; i < num_nodes; ++i) {
+ unsigned long pages = min(pages_left, pages_per_node);
+ uint32_t alignment = mem->page_alignment;
+
+ if (pages == pages_per_node)
+ alignment = pages_per_node;
+ else
+ sflags |= DRM_MM_SEARCH_BEST;
+
+ r = drm_mm_insert_node_in_range_generic(mm, &nodes[i], pages,
+ alignment, 0,
+ place->fpfn, lpfn,
+ sflags, aflags);
+ if (unlikely(r))
+ goto error;
+
+ pages_left -= pages;
+ }
+ spin_unlock(&mgr->lock);
+
+ mem->start = num_nodes == 1 ? nodes[0].start : AMDGPU_BO_INVALID_OFFSET;
+ mem->mm_node = nodes;
+
+ return 0;
+
+error:
+ while (i--)
+ drm_mm_remove_node(&nodes[i]);
+ spin_unlock(&mgr->lock);
+
+ kfree(nodes);
+ return r == -ENOSPC ? 0 : r;
+}
+
+/**
+ * amdgpu_vram_mgr_del - free ranges
+ *
+ * @man: TTM memory type manager
+ * @tbo: TTM BO we need this range for
+ * @place: placement flags and restrictions
+ * @mem: TTM memory object
+ *
+ * Free the allocated VRAM again.
+ */
+static void amdgpu_vram_mgr_del(struct ttm_mem_type_manager *man,
+ struct ttm_mem_reg *mem)
+{
+ struct amdgpu_vram_mgr *mgr = man->priv;
+ struct drm_mm_node *nodes = mem->mm_node;
+ unsigned pages = mem->num_pages;
+
+ if (!mem->mm_node)
+ return;
+
+ spin_lock(&mgr->lock);
+ while (pages) {
+ pages -= nodes->size;
+ drm_mm_remove_node(nodes);
+ ++nodes;
+ }
+ spin_unlock(&mgr->lock);
+
+ kfree(mem->mm_node);
+ mem->mm_node = NULL;
+}
+
+/**
+ * amdgpu_vram_mgr_debug - dump VRAM table
+ *
+ * @man: TTM memory type manager
+ * @prefix: text prefix
+ *
+ * Dump the table content using printk.
+ */
+static void amdgpu_vram_mgr_debug(struct ttm_mem_type_manager *man,
+ const char *prefix)
+{
+ struct amdgpu_vram_mgr *mgr = man->priv;
+
+ spin_lock(&mgr->lock);
+ drm_mm_debug_table(&mgr->mm, prefix);
+ spin_unlock(&mgr->lock);
+}
+
+const struct ttm_mem_type_manager_func amdgpu_vram_mgr_func = {
+ amdgpu_vram_mgr_init,
+ amdgpu_vram_mgr_fini,
+ amdgpu_vram_mgr_new,
+ amdgpu_vram_mgr_del,
+ amdgpu_vram_mgr_debug
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/atombios_crtc.c b/drivers/gpu/drm/amd/amdgpu/atombios_crtc.c
index f7d236f95e74..8c9bc75a9c2d 100644
--- a/drivers/gpu/drm/amd/amdgpu/atombios_crtc.c
+++ b/drivers/gpu/drm/amd/amdgpu/atombios_crtc.c
@@ -31,6 +31,7 @@
#include "atom.h"
#include "atom-bits.h"
#include "atombios_encoders.h"
+#include "atombios_crtc.h"
#include "amdgpu_atombios.h"
#include "amdgpu_pll.h"
#include "amdgpu_connectors.h"
diff --git a/drivers/gpu/drm/amd/amdgpu/ci_dpm.c b/drivers/gpu/drm/amd/amdgpu/ci_dpm.c
index 1d8c375a3561..1caff75ab9fc 100644
--- a/drivers/gpu/drm/amd/amdgpu/ci_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/ci_dpm.c
@@ -887,9 +887,6 @@ static void ci_dpm_powergate_uvd(struct amdgpu_device *adev, bool gate)
{
struct ci_power_info *pi = ci_get_pi(adev);
- if (pi->uvd_power_gated == gate)
- return;
-
pi->uvd_power_gated = gate;
ci_update_uvd_dpm(adev, gate);
@@ -960,6 +957,12 @@ static void ci_apply_state_adjust_rules(struct amdgpu_device *adev,
sclk = ps->performance_levels[0].sclk;
}
+ if (adev->pm.pm_display_cfg.min_core_set_clock > sclk)
+ sclk = adev->pm.pm_display_cfg.min_core_set_clock;
+
+ if (adev->pm.pm_display_cfg.min_mem_set_clock > mclk)
+ mclk = adev->pm.pm_display_cfg.min_mem_set_clock;
+
if (rps->vce_active) {
if (sclk < adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].sclk)
sclk = adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].sclk;
@@ -2201,6 +2204,11 @@ static int ci_upload_firmware(struct amdgpu_device *adev)
struct ci_power_info *pi = ci_get_pi(adev);
int i, ret;
+ if (amdgpu_ci_is_smc_running(adev)) {
+ DRM_INFO("smc is running, no need to load smc firmware\n");
+ return 0;
+ }
+
for (i = 0; i < adev->usec_timeout; i++) {
if (RREG32_SMC(ixRCU_UC_EVENTS) & RCU_UC_EVENTS__boot_seq_done_MASK)
break;
@@ -4075,7 +4083,7 @@ static int ci_enable_uvd_dpm(struct amdgpu_device *adev, bool enable)
pi->dpm_level_enable_mask.mclk_dpm_enable_mask);
}
} else {
- if (pi->last_mclk_dpm_enable_mask & 0x1) {
+ if (pi->uvd_enabled) {
pi->uvd_enabled = false;
pi->dpm_level_enable_mask.mclk_dpm_enable_mask |= 1;
amdgpu_ci_send_msg_to_smc_with_parameter(adev,
@@ -4190,8 +4198,15 @@ static int ci_update_uvd_dpm(struct amdgpu_device *adev, bool gate)
{
struct ci_power_info *pi = ci_get_pi(adev);
u32 tmp;
+ int ret = 0;
if (!gate) {
+ /* turn the clocks on when decoding */
+ ret = amdgpu_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_UVD,
+ AMD_CG_STATE_UNGATE);
+ if (ret)
+ return ret;
+
if (pi->caps_uvd_dpm ||
(adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.count <= 0))
pi->smc_state_table.UvdBootLevel = 0;
@@ -4203,9 +4218,17 @@ static int ci_update_uvd_dpm(struct amdgpu_device *adev, bool gate)
tmp &= ~DPM_TABLE_475__UvdBootLevel_MASK;
tmp |= (pi->smc_state_table.UvdBootLevel << DPM_TABLE_475__UvdBootLevel__SHIFT);
WREG32_SMC(ixDPM_TABLE_475, tmp);
+ ret = ci_enable_uvd_dpm(adev, true);
+ } else {
+ ret = ci_enable_uvd_dpm(adev, false);
+ if (ret)
+ return ret;
+
+ ret = amdgpu_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_UVD,
+ AMD_CG_STATE_GATE);
}
- return ci_enable_uvd_dpm(adev, !gate);
+ return ret;
}
static u8 ci_get_vce_boot_level(struct amdgpu_device *adev)
@@ -4247,13 +4270,12 @@ static int ci_update_vce_dpm(struct amdgpu_device *adev,
ret = ci_enable_vce_dpm(adev, true);
} else {
+ ret = ci_enable_vce_dpm(adev, false);
+ if (ret)
+ return ret;
/* turn the clocks off when not encoding */
ret = amdgpu_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
AMD_CG_STATE_GATE);
- if (ret)
- return ret;
-
- ret = ci_enable_vce_dpm(adev, false);
}
}
return ret;
@@ -5219,6 +5241,7 @@ static void ci_update_current_ps(struct amdgpu_device *adev,
pi->current_rps = *rps;
pi->current_ps = *new_ps;
pi->current_rps.ps_priv = &pi->current_ps;
+ adev->pm.dpm.current_ps = &pi->current_rps;
}
static void ci_update_requested_ps(struct amdgpu_device *adev,
@@ -5230,6 +5253,7 @@ static void ci_update_requested_ps(struct amdgpu_device *adev,
pi->requested_rps = *rps;
pi->requested_ps = *new_ps;
pi->requested_rps.ps_priv = &pi->requested_ps;
+ adev->pm.dpm.requested_ps = &pi->requested_rps;
}
static int ci_dpm_pre_set_power_state(struct amdgpu_device *adev)
@@ -5267,8 +5291,6 @@ static int ci_dpm_enable(struct amdgpu_device *adev)
struct amdgpu_ps *boot_ps = adev->pm.dpm.boot_ps;
int ret;
- if (amdgpu_ci_is_smc_running(adev))
- return -EINVAL;
if (pi->voltage_control != CISLANDS_VOLTAGE_CONTROL_NONE) {
ci_enable_voltage_control(adev);
ret = ci_construct_voltage_tables(adev);
@@ -5689,7 +5711,7 @@ static int ci_parse_power_table(struct amdgpu_device *adev)
adev->pm.dpm.num_ps = state_array->ucNumEntries;
/* fill in the vce power states */
- for (i = 0; i < AMDGPU_MAX_VCE_LEVELS; i++) {
+ for (i = 0; i < adev->pm.dpm.num_of_vce_states; i++) {
u32 sclk, mclk;
clock_array_index = adev->pm.dpm.vce_states[i].clk_idx;
clock_info = (union pplib_clock_info *)
@@ -6094,6 +6116,56 @@ static void ci_dpm_print_power_state(struct amdgpu_device *adev,
amdgpu_dpm_print_ps_status(adev, rps);
}
+static inline bool ci_are_power_levels_equal(const struct ci_pl *ci_cpl1,
+ const struct ci_pl *ci_cpl2)
+{
+ return ((ci_cpl1->mclk == ci_cpl2->mclk) &&
+ (ci_cpl1->sclk == ci_cpl2->sclk) &&
+ (ci_cpl1->pcie_gen == ci_cpl2->pcie_gen) &&
+ (ci_cpl1->pcie_lane == ci_cpl2->pcie_lane));
+}
+
+static int ci_check_state_equal(struct amdgpu_device *adev,
+ struct amdgpu_ps *cps,
+ struct amdgpu_ps *rps,
+ bool *equal)
+{
+ struct ci_ps *ci_cps;
+ struct ci_ps *ci_rps;
+ int i;
+
+ if (adev == NULL || cps == NULL || rps == NULL || equal == NULL)
+ return -EINVAL;
+
+ ci_cps = ci_get_ps(cps);
+ ci_rps = ci_get_ps(rps);
+
+ if (ci_cps == NULL) {
+ *equal = false;
+ return 0;
+ }
+
+ if (ci_cps->performance_level_count != ci_rps->performance_level_count) {
+
+ *equal = false;
+ return 0;
+ }
+
+ for (i = 0; i < ci_cps->performance_level_count; i++) {
+ if (!ci_are_power_levels_equal(&(ci_cps->performance_levels[i]),
+ &(ci_rps->performance_levels[i]))) {
+ *equal = false;
+ return 0;
+ }
+ }
+
+ /* If all performance levels are the same try to use the UVD clocks to break the tie.*/
+ *equal = ((cps->vclk == rps->vclk) && (cps->dclk == rps->dclk));
+ *equal &= ((cps->evclk == rps->evclk) && (cps->ecclk == rps->ecclk));
+
+ return 0;
+}
+
static u32 ci_dpm_get_sclk(struct amdgpu_device *adev, bool low)
{
struct ci_power_info *pi = ci_get_pi(adev);
@@ -6236,6 +6308,8 @@ static int ci_dpm_sw_fini(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ flush_work(&adev->pm.dpm.thermal.work);
+
mutex_lock(&adev->pm.mutex);
amdgpu_pm_sysfs_fini(adev);
ci_dpm_fini(adev);
@@ -6287,12 +6361,19 @@ static int ci_dpm_suspend(void *handle)
if (adev->pm.dpm_enabled) {
mutex_lock(&adev->pm.mutex);
- /* disable dpm */
- ci_dpm_disable(adev);
- /* reset the power state */
- adev->pm.dpm.current_ps = adev->pm.dpm.requested_ps = adev->pm.dpm.boot_ps;
+ amdgpu_irq_put(adev, &adev->pm.dpm.thermal.irq,
+ AMDGPU_THERMAL_IRQ_LOW_TO_HIGH);
+ amdgpu_irq_put(adev, &adev->pm.dpm.thermal.irq,
+ AMDGPU_THERMAL_IRQ_HIGH_TO_LOW);
+ adev->pm.dpm.last_user_state = adev->pm.dpm.user_state;
+ adev->pm.dpm.last_state = adev->pm.dpm.state;
+ adev->pm.dpm.user_state = POWER_STATE_TYPE_INTERNAL_BOOT;
+ adev->pm.dpm.state = POWER_STATE_TYPE_INTERNAL_BOOT;
mutex_unlock(&adev->pm.mutex);
+ amdgpu_pm_compute_clocks(adev);
+
}
+
return 0;
}
@@ -6310,6 +6391,8 @@ static int ci_dpm_resume(void *handle)
adev->pm.dpm_enabled = false;
else
adev->pm.dpm_enabled = true;
+ adev->pm.dpm.user_state = adev->pm.dpm.last_user_state;
+ adev->pm.dpm.state = adev->pm.dpm.last_state;
mutex_unlock(&adev->pm.mutex);
if (adev->pm.dpm_enabled)
amdgpu_pm_compute_clocks(adev);
@@ -6644,6 +6727,8 @@ static const struct amdgpu_dpm_funcs ci_dpm_funcs = {
.set_sclk_od = ci_dpm_set_sclk_od,
.get_mclk_od = ci_dpm_get_mclk_od,
.set_mclk_od = ci_dpm_set_mclk_od,
+ .check_state_equal = ci_check_state_equal,
+ .get_vce_clock_state = amdgpu_get_vce_clock_state,
};
static void ci_dpm_set_dpm_funcs(struct amdgpu_device *adev)
@@ -6662,3 +6747,12 @@ static void ci_dpm_set_irq_funcs(struct amdgpu_device *adev)
adev->pm.dpm.thermal.irq.num_types = AMDGPU_THERMAL_IRQ_LAST;
adev->pm.dpm.thermal.irq.funcs = &ci_dpm_irq_funcs;
}
+
+const struct amdgpu_ip_block_version ci_dpm_ip_block =
+{
+ .type = AMD_IP_BLOCK_TYPE_SMC,
+ .major = 7,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &ci_dpm_ip_funcs,
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/cik.c b/drivers/gpu/drm/amd/amdgpu/cik.c
index a845b6a93b79..302df85893ab 100644
--- a/drivers/gpu/drm/amd/amdgpu/cik.c
+++ b/drivers/gpu/drm/amd/amdgpu/cik.c
@@ -1189,18 +1189,6 @@ static int cik_gpu_pci_config_reset(struct amdgpu_device *adev)
return r;
}
-static void cik_set_bios_scratch_engine_hung(struct amdgpu_device *adev, bool hung)
-{
- u32 tmp = RREG32(mmBIOS_SCRATCH_3);
-
- if (hung)
- tmp |= ATOM_S3_ASIC_GUI_ENGINE_HUNG;
- else
- tmp &= ~ATOM_S3_ASIC_GUI_ENGINE_HUNG;
-
- WREG32(mmBIOS_SCRATCH_3, tmp);
-}
-
/**
* cik_asic_reset - soft reset GPU
*
@@ -1213,11 +1201,12 @@ static void cik_set_bios_scratch_engine_hung(struct amdgpu_device *adev, bool hu
static int cik_asic_reset(struct amdgpu_device *adev)
{
int r;
- cik_set_bios_scratch_engine_hung(adev, true);
+
+ amdgpu_atombios_scratch_regs_engine_hung(adev, true);
r = cik_gpu_pci_config_reset(adev);
- cik_set_bios_scratch_engine_hung(adev, false);
+ amdgpu_atombios_scratch_regs_engine_hung(adev, false);
return r;
}
@@ -1641,745 +1630,6 @@ static void cik_detect_hw_virtualization(struct amdgpu_device *adev)
adev->virtualization.virtual_caps |= AMDGPU_PASSTHROUGH_MODE;
}
-static const struct amdgpu_ip_block_version bonaire_ip_blocks[] =
-{
- /* ORDER MATTERS! */
- {
- .type = AMD_IP_BLOCK_TYPE_COMMON,
- .major = 1,
- .minor = 0,
- .rev = 0,
- .funcs = &cik_common_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_GMC,
- .major = 7,
- .minor = 0,
- .rev = 0,
- .funcs = &gmc_v7_0_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_IH,
- .major = 2,
- .minor = 0,
- .rev = 0,
- .funcs = &cik_ih_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_SMC,
- .major = 7,
- .minor = 0,
- .rev = 0,
- .funcs = &amdgpu_pp_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_DCE,
- .major = 8,
- .minor = 2,
- .rev = 0,
- .funcs = &dce_v8_0_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_GFX,
- .major = 7,
- .minor = 2,
- .rev = 0,
- .funcs = &gfx_v7_0_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_SDMA,
- .major = 2,
- .minor = 0,
- .rev = 0,
- .funcs = &cik_sdma_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_UVD,
- .major = 4,
- .minor = 2,
- .rev = 0,
- .funcs = &uvd_v4_2_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_VCE,
- .major = 2,
- .minor = 0,
- .rev = 0,
- .funcs = &vce_v2_0_ip_funcs,
- },
-};
-
-static const struct amdgpu_ip_block_version bonaire_ip_blocks_vd[] =
-{
- /* ORDER MATTERS! */
- {
- .type = AMD_IP_BLOCK_TYPE_COMMON,
- .major = 1,
- .minor = 0,
- .rev = 0,
- .funcs = &cik_common_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_GMC,
- .major = 7,
- .minor = 0,
- .rev = 0,
- .funcs = &gmc_v7_0_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_IH,
- .major = 2,
- .minor = 0,
- .rev = 0,
- .funcs = &cik_ih_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_SMC,
- .major = 7,
- .minor = 0,
- .rev = 0,
- .funcs = &amdgpu_pp_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_DCE,
- .major = 8,
- .minor = 2,
- .rev = 0,
- .funcs = &dce_virtual_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_GFX,
- .major = 7,
- .minor = 2,
- .rev = 0,
- .funcs = &gfx_v7_0_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_SDMA,
- .major = 2,
- .minor = 0,
- .rev = 0,
- .funcs = &cik_sdma_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_UVD,
- .major = 4,
- .minor = 2,
- .rev = 0,
- .funcs = &uvd_v4_2_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_VCE,
- .major = 2,
- .minor = 0,
- .rev = 0,
- .funcs = &vce_v2_0_ip_funcs,
- },
-};
-
-static const struct amdgpu_ip_block_version hawaii_ip_blocks[] =
-{
- /* ORDER MATTERS! */
- {
- .type = AMD_IP_BLOCK_TYPE_COMMON,
- .major = 1,
- .minor = 0,
- .rev = 0,
- .funcs = &cik_common_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_GMC,
- .major = 7,
- .minor = 0,
- .rev = 0,
- .funcs = &gmc_v7_0_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_IH,
- .major = 2,
- .minor = 0,
- .rev = 0,
- .funcs = &cik_ih_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_SMC,
- .major = 7,
- .minor = 0,
- .rev = 0,
- .funcs = &amdgpu_pp_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_DCE,
- .major = 8,
- .minor = 5,
- .rev = 0,
- .funcs = &dce_v8_0_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_GFX,
- .major = 7,
- .minor = 3,
- .rev = 0,
- .funcs = &gfx_v7_0_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_SDMA,
- .major = 2,
- .minor = 0,
- .rev = 0,
- .funcs = &cik_sdma_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_UVD,
- .major = 4,
- .minor = 2,
- .rev = 0,
- .funcs = &uvd_v4_2_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_VCE,
- .major = 2,
- .minor = 0,
- .rev = 0,
- .funcs = &vce_v2_0_ip_funcs,
- },
-};
-
-static const struct amdgpu_ip_block_version hawaii_ip_blocks_vd[] =
-{
- /* ORDER MATTERS! */
- {
- .type = AMD_IP_BLOCK_TYPE_COMMON,
- .major = 1,
- .minor = 0,
- .rev = 0,
- .funcs = &cik_common_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_GMC,
- .major = 7,
- .minor = 0,
- .rev = 0,
- .funcs = &gmc_v7_0_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_IH,
- .major = 2,
- .minor = 0,
- .rev = 0,
- .funcs = &cik_ih_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_SMC,
- .major = 7,
- .minor = 0,
- .rev = 0,
- .funcs = &amdgpu_pp_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_DCE,
- .major = 8,
- .minor = 5,
- .rev = 0,
- .funcs = &dce_virtual_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_GFX,
- .major = 7,
- .minor = 3,
- .rev = 0,
- .funcs = &gfx_v7_0_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_SDMA,
- .major = 2,
- .minor = 0,
- .rev = 0,
- .funcs = &cik_sdma_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_UVD,
- .major = 4,
- .minor = 2,
- .rev = 0,
- .funcs = &uvd_v4_2_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_VCE,
- .major = 2,
- .minor = 0,
- .rev = 0,
- .funcs = &vce_v2_0_ip_funcs,
- },
-};
-
-static const struct amdgpu_ip_block_version kabini_ip_blocks[] =
-{
- /* ORDER MATTERS! */
- {
- .type = AMD_IP_BLOCK_TYPE_COMMON,
- .major = 1,
- .minor = 0,
- .rev = 0,
- .funcs = &cik_common_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_GMC,
- .major = 7,
- .minor = 0,
- .rev = 0,
- .funcs = &gmc_v7_0_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_IH,
- .major = 2,
- .minor = 0,
- .rev = 0,
- .funcs = &cik_ih_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_SMC,
- .major = 7,
- .minor = 0,
- .rev = 0,
- .funcs = &amdgpu_pp_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_DCE,
- .major = 8,
- .minor = 3,
- .rev = 0,
- .funcs = &dce_v8_0_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_GFX,
- .major = 7,
- .minor = 2,
- .rev = 0,
- .funcs = &gfx_v7_0_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_SDMA,
- .major = 2,
- .minor = 0,
- .rev = 0,
- .funcs = &cik_sdma_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_UVD,
- .major = 4,
- .minor = 2,
- .rev = 0,
- .funcs = &uvd_v4_2_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_VCE,
- .major = 2,
- .minor = 0,
- .rev = 0,
- .funcs = &vce_v2_0_ip_funcs,
- },
-};
-
-static const struct amdgpu_ip_block_version kabini_ip_blocks_vd[] =
-{
- /* ORDER MATTERS! */
- {
- .type = AMD_IP_BLOCK_TYPE_COMMON,
- .major = 1,
- .minor = 0,
- .rev = 0,
- .funcs = &cik_common_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_GMC,
- .major = 7,
- .minor = 0,
- .rev = 0,
- .funcs = &gmc_v7_0_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_IH,
- .major = 2,
- .minor = 0,
- .rev = 0,
- .funcs = &cik_ih_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_SMC,
- .major = 7,
- .minor = 0,
- .rev = 0,
- .funcs = &amdgpu_pp_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_DCE,
- .major = 8,
- .minor = 3,
- .rev = 0,
- .funcs = &dce_virtual_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_GFX,
- .major = 7,
- .minor = 2,
- .rev = 0,
- .funcs = &gfx_v7_0_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_SDMA,
- .major = 2,
- .minor = 0,
- .rev = 0,
- .funcs = &cik_sdma_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_UVD,
- .major = 4,
- .minor = 2,
- .rev = 0,
- .funcs = &uvd_v4_2_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_VCE,
- .major = 2,
- .minor = 0,
- .rev = 0,
- .funcs = &vce_v2_0_ip_funcs,
- },
-};
-
-static const struct amdgpu_ip_block_version mullins_ip_blocks[] =
-{
- /* ORDER MATTERS! */
- {
- .type = AMD_IP_BLOCK_TYPE_COMMON,
- .major = 1,
- .minor = 0,
- .rev = 0,
- .funcs = &cik_common_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_GMC,
- .major = 7,
- .minor = 0,
- .rev = 0,
- .funcs = &gmc_v7_0_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_IH,
- .major = 2,
- .minor = 0,
- .rev = 0,
- .funcs = &cik_ih_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_SMC,
- .major = 7,
- .minor = 0,
- .rev = 0,
- .funcs = &amdgpu_pp_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_DCE,
- .major = 8,
- .minor = 3,
- .rev = 0,
- .funcs = &dce_v8_0_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_GFX,
- .major = 7,
- .minor = 2,
- .rev = 0,
- .funcs = &gfx_v7_0_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_SDMA,
- .major = 2,
- .minor = 0,
- .rev = 0,
- .funcs = &cik_sdma_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_UVD,
- .major = 4,
- .minor = 2,
- .rev = 0,
- .funcs = &uvd_v4_2_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_VCE,
- .major = 2,
- .minor = 0,
- .rev = 0,
- .funcs = &vce_v2_0_ip_funcs,
- },
-};
-
-static const struct amdgpu_ip_block_version mullins_ip_blocks_vd[] =
-{
- /* ORDER MATTERS! */
- {
- .type = AMD_IP_BLOCK_TYPE_COMMON,
- .major = 1,
- .minor = 0,
- .rev = 0,
- .funcs = &cik_common_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_GMC,
- .major = 7,
- .minor = 0,
- .rev = 0,
- .funcs = &gmc_v7_0_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_IH,
- .major = 2,
- .minor = 0,
- .rev = 0,
- .funcs = &cik_ih_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_SMC,
- .major = 7,
- .minor = 0,
- .rev = 0,
- .funcs = &amdgpu_pp_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_DCE,
- .major = 8,
- .minor = 3,
- .rev = 0,
- .funcs = &dce_virtual_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_GFX,
- .major = 7,
- .minor = 2,
- .rev = 0,
- .funcs = &gfx_v7_0_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_SDMA,
- .major = 2,
- .minor = 0,
- .rev = 0,
- .funcs = &cik_sdma_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_UVD,
- .major = 4,
- .minor = 2,
- .rev = 0,
- .funcs = &uvd_v4_2_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_VCE,
- .major = 2,
- .minor = 0,
- .rev = 0,
- .funcs = &vce_v2_0_ip_funcs,
- },
-};
-
-static const struct amdgpu_ip_block_version kaveri_ip_blocks[] =
-{
- /* ORDER MATTERS! */
- {
- .type = AMD_IP_BLOCK_TYPE_COMMON,
- .major = 1,
- .minor = 0,
- .rev = 0,
- .funcs = &cik_common_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_GMC,
- .major = 7,
- .minor = 0,
- .rev = 0,
- .funcs = &gmc_v7_0_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_IH,
- .major = 2,
- .minor = 0,
- .rev = 0,
- .funcs = &cik_ih_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_SMC,
- .major = 7,
- .minor = 0,
- .rev = 0,
- .funcs = &amdgpu_pp_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_DCE,
- .major = 8,
- .minor = 1,
- .rev = 0,
- .funcs = &dce_v8_0_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_GFX,
- .major = 7,
- .minor = 1,
- .rev = 0,
- .funcs = &gfx_v7_0_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_SDMA,
- .major = 2,
- .minor = 0,
- .rev = 0,
- .funcs = &cik_sdma_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_UVD,
- .major = 4,
- .minor = 2,
- .rev = 0,
- .funcs = &uvd_v4_2_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_VCE,
- .major = 2,
- .minor = 0,
- .rev = 0,
- .funcs = &vce_v2_0_ip_funcs,
- },
-};
-
-static const struct amdgpu_ip_block_version kaveri_ip_blocks_vd[] =
-{
- /* ORDER MATTERS! */
- {
- .type = AMD_IP_BLOCK_TYPE_COMMON,
- .major = 1,
- .minor = 0,
- .rev = 0,
- .funcs = &cik_common_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_GMC,
- .major = 7,
- .minor = 0,
- .rev = 0,
- .funcs = &gmc_v7_0_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_IH,
- .major = 2,
- .minor = 0,
- .rev = 0,
- .funcs = &cik_ih_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_SMC,
- .major = 7,
- .minor = 0,
- .rev = 0,
- .funcs = &amdgpu_pp_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_DCE,
- .major = 8,
- .minor = 1,
- .rev = 0,
- .funcs = &dce_virtual_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_GFX,
- .major = 7,
- .minor = 1,
- .rev = 0,
- .funcs = &gfx_v7_0_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_SDMA,
- .major = 2,
- .minor = 0,
- .rev = 0,
- .funcs = &cik_sdma_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_UVD,
- .major = 4,
- .minor = 2,
- .rev = 0,
- .funcs = &uvd_v4_2_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_VCE,
- .major = 2,
- .minor = 0,
- .rev = 0,
- .funcs = &vce_v2_0_ip_funcs,
- },
-};
-
-int cik_set_ip_blocks(struct amdgpu_device *adev)
-{
- if (adev->enable_virtual_display) {
- switch (adev->asic_type) {
- case CHIP_BONAIRE:
- adev->ip_blocks = bonaire_ip_blocks_vd;
- adev->num_ip_blocks = ARRAY_SIZE(bonaire_ip_blocks_vd);
- break;
- case CHIP_HAWAII:
- adev->ip_blocks = hawaii_ip_blocks_vd;
- adev->num_ip_blocks = ARRAY_SIZE(hawaii_ip_blocks_vd);
- break;
- case CHIP_KAVERI:
- adev->ip_blocks = kaveri_ip_blocks_vd;
- adev->num_ip_blocks = ARRAY_SIZE(kaveri_ip_blocks_vd);
- break;
- case CHIP_KABINI:
- adev->ip_blocks = kabini_ip_blocks_vd;
- adev->num_ip_blocks = ARRAY_SIZE(kabini_ip_blocks_vd);
- break;
- case CHIP_MULLINS:
- adev->ip_blocks = mullins_ip_blocks_vd;
- adev->num_ip_blocks = ARRAY_SIZE(mullins_ip_blocks_vd);
- break;
- default:
- /* FIXME: not supported yet */
- return -EINVAL;
- }
- } else {
- switch (adev->asic_type) {
- case CHIP_BONAIRE:
- adev->ip_blocks = bonaire_ip_blocks;
- adev->num_ip_blocks = ARRAY_SIZE(bonaire_ip_blocks);
- break;
- case CHIP_HAWAII:
- adev->ip_blocks = hawaii_ip_blocks;
- adev->num_ip_blocks = ARRAY_SIZE(hawaii_ip_blocks);
- break;
- case CHIP_KAVERI:
- adev->ip_blocks = kaveri_ip_blocks;
- adev->num_ip_blocks = ARRAY_SIZE(kaveri_ip_blocks);
- break;
- case CHIP_KABINI:
- adev->ip_blocks = kabini_ip_blocks;
- adev->num_ip_blocks = ARRAY_SIZE(kabini_ip_blocks);
- break;
- case CHIP_MULLINS:
- adev->ip_blocks = mullins_ip_blocks;
- adev->num_ip_blocks = ARRAY_SIZE(mullins_ip_blocks);
- break;
- default:
- /* FIXME: not supported yet */
- return -EINVAL;
- }
- }
-
- return 0;
-}
-
static const struct amdgpu_asic_funcs cik_asic_funcs =
{
.read_disabled_bios = &cik_read_disabled_bios,
@@ -2612,7 +1862,7 @@ static int cik_common_set_powergating_state(void *handle,
return 0;
}
-const struct amd_ip_funcs cik_common_ip_funcs = {
+static const struct amd_ip_funcs cik_common_ip_funcs = {
.name = "cik_common",
.early_init = cik_common_early_init,
.late_init = NULL,
@@ -2628,3 +1878,79 @@ const struct amd_ip_funcs cik_common_ip_funcs = {
.set_clockgating_state = cik_common_set_clockgating_state,
.set_powergating_state = cik_common_set_powergating_state,
};
+
+static const struct amdgpu_ip_block_version cik_common_ip_block =
+{
+ .type = AMD_IP_BLOCK_TYPE_COMMON,
+ .major = 1,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &cik_common_ip_funcs,
+};
+
+int cik_set_ip_blocks(struct amdgpu_device *adev)
+{
+ switch (adev->asic_type) {
+ case CHIP_BONAIRE:
+ amdgpu_ip_block_add(adev, &cik_common_ip_block);
+ amdgpu_ip_block_add(adev, &gmc_v7_0_ip_block);
+ amdgpu_ip_block_add(adev, &cik_ih_ip_block);
+ amdgpu_ip_block_add(adev, &amdgpu_pp_ip_block);
+ if (adev->enable_virtual_display)
+ amdgpu_ip_block_add(adev, &dce_virtual_ip_block);
+ else
+ amdgpu_ip_block_add(adev, &dce_v8_2_ip_block);
+ amdgpu_ip_block_add(adev, &gfx_v7_2_ip_block);
+ amdgpu_ip_block_add(adev, &cik_sdma_ip_block);
+ amdgpu_ip_block_add(adev, &uvd_v4_2_ip_block);
+ amdgpu_ip_block_add(adev, &vce_v2_0_ip_block);
+ break;
+ case CHIP_HAWAII:
+ amdgpu_ip_block_add(adev, &cik_common_ip_block);
+ amdgpu_ip_block_add(adev, &gmc_v7_0_ip_block);
+ amdgpu_ip_block_add(adev, &cik_ih_ip_block);
+ amdgpu_ip_block_add(adev, &amdgpu_pp_ip_block);
+ if (adev->enable_virtual_display)
+ amdgpu_ip_block_add(adev, &dce_virtual_ip_block);
+ else
+ amdgpu_ip_block_add(adev, &dce_v8_5_ip_block);
+ amdgpu_ip_block_add(adev, &gfx_v7_3_ip_block);
+ amdgpu_ip_block_add(adev, &cik_sdma_ip_block);
+ amdgpu_ip_block_add(adev, &uvd_v4_2_ip_block);
+ amdgpu_ip_block_add(adev, &vce_v2_0_ip_block);
+ break;
+ case CHIP_KAVERI:
+ amdgpu_ip_block_add(adev, &cik_common_ip_block);
+ amdgpu_ip_block_add(adev, &gmc_v7_0_ip_block);
+ amdgpu_ip_block_add(adev, &cik_ih_ip_block);
+ amdgpu_ip_block_add(adev, &amdgpu_pp_ip_block);
+ if (adev->enable_virtual_display)
+ amdgpu_ip_block_add(adev, &dce_virtual_ip_block);
+ else
+ amdgpu_ip_block_add(adev, &dce_v8_1_ip_block);
+ amdgpu_ip_block_add(adev, &gfx_v7_1_ip_block);
+ amdgpu_ip_block_add(adev, &cik_sdma_ip_block);
+ amdgpu_ip_block_add(adev, &uvd_v4_2_ip_block);
+ amdgpu_ip_block_add(adev, &vce_v2_0_ip_block);
+ break;
+ case CHIP_KABINI:
+ case CHIP_MULLINS:
+ amdgpu_ip_block_add(adev, &cik_common_ip_block);
+ amdgpu_ip_block_add(adev, &gmc_v7_0_ip_block);
+ amdgpu_ip_block_add(adev, &cik_ih_ip_block);
+ amdgpu_ip_block_add(adev, &amdgpu_pp_ip_block);
+ if (adev->enable_virtual_display)
+ amdgpu_ip_block_add(adev, &dce_virtual_ip_block);
+ else
+ amdgpu_ip_block_add(adev, &dce_v8_3_ip_block);
+ amdgpu_ip_block_add(adev, &gfx_v7_2_ip_block);
+ amdgpu_ip_block_add(adev, &cik_sdma_ip_block);
+ amdgpu_ip_block_add(adev, &uvd_v4_2_ip_block);
+ amdgpu_ip_block_add(adev, &vce_v2_0_ip_block);
+ break;
+ default:
+ /* FIXME: not supported yet */
+ return -EINVAL;
+ }
+ return 0;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/cik.h b/drivers/gpu/drm/amd/amdgpu/cik.h
index 5ebd2d7a0327..c4989f51ecef 100644
--- a/drivers/gpu/drm/amd/amdgpu/cik.h
+++ b/drivers/gpu/drm/amd/amdgpu/cik.h
@@ -24,8 +24,6 @@
#ifndef __CIK_H__
#define __CIK_H__
-extern const struct amd_ip_funcs cik_common_ip_funcs;
-
void cik_srbm_select(struct amdgpu_device *adev,
u32 me, u32 pipe, u32 queue, u32 vmid);
int cik_set_ip_blocks(struct amdgpu_device *adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/cik_ih.c b/drivers/gpu/drm/amd/amdgpu/cik_ih.c
index be3d6f79a864..319b32cdea84 100644
--- a/drivers/gpu/drm/amd/amdgpu/cik_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/cik_ih.c
@@ -413,7 +413,7 @@ static int cik_ih_set_powergating_state(void *handle,
return 0;
}
-const struct amd_ip_funcs cik_ih_ip_funcs = {
+static const struct amd_ip_funcs cik_ih_ip_funcs = {
.name = "cik_ih",
.early_init = cik_ih_early_init,
.late_init = NULL,
@@ -441,3 +441,12 @@ static void cik_ih_set_interrupt_funcs(struct amdgpu_device *adev)
if (adev->irq.ih_funcs == NULL)
adev->irq.ih_funcs = &cik_ih_funcs;
}
+
+const struct amdgpu_ip_block_version cik_ih_ip_block =
+{
+ .type = AMD_IP_BLOCK_TYPE_IH,
+ .major = 2,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &cik_ih_ip_funcs,
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/cik_ih.h b/drivers/gpu/drm/amd/amdgpu/cik_ih.h
index 6b0f375ec244..1d9ddee2868e 100644
--- a/drivers/gpu/drm/amd/amdgpu/cik_ih.h
+++ b/drivers/gpu/drm/amd/amdgpu/cik_ih.h
@@ -24,6 +24,6 @@
#ifndef __CIK_IH_H__
#define __CIK_IH_H__
-extern const struct amd_ip_funcs cik_ih_ip_funcs;
+extern const struct amdgpu_ip_block_version cik_ih_ip_block;
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
index cb952acc7133..4c34dbc7a254 100644
--- a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
+++ b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
@@ -206,10 +206,10 @@ static void cik_sdma_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
for (i = 0; i < count; i++)
if (sdma && sdma->burst_nop && (i == 0))
- amdgpu_ring_write(ring, ring->nop |
+ amdgpu_ring_write(ring, ring->funcs->nop |
SDMA_NOP_COUNT(count - 1));
else
- amdgpu_ring_write(ring, ring->nop);
+ amdgpu_ring_write(ring, ring->funcs->nop);
}
/**
@@ -622,7 +622,7 @@ static int cik_sdma_ring_test_ib(struct amdgpu_ring *ring, long timeout)
{
struct amdgpu_device *adev = ring->adev;
struct amdgpu_ib ib;
- struct fence *f = NULL;
+ struct dma_fence *f = NULL;
unsigned index;
u32 tmp = 0;
u64 gpu_addr;
@@ -655,7 +655,7 @@ static int cik_sdma_ring_test_ib(struct amdgpu_ring *ring, long timeout)
if (r)
goto err1;
- r = fence_wait_timeout(f, false, timeout);
+ r = dma_fence_wait_timeout(f, false, timeout);
if (r == 0) {
DRM_ERROR("amdgpu: IB test timed out\n");
r = -ETIMEDOUT;
@@ -675,7 +675,7 @@ static int cik_sdma_ring_test_ib(struct amdgpu_ring *ring, long timeout)
err1:
amdgpu_ib_free(adev, &ib, NULL);
- fence_put(f);
+ dma_fence_put(f);
err0:
amdgpu_wb_free(adev, index);
return r;
@@ -848,22 +848,6 @@ static void cik_sdma_ring_emit_vm_flush(struct amdgpu_ring *ring,
amdgpu_ring_write(ring, (0xfff << 16) | 10); /* retry count, poll interval */
}
-static unsigned cik_sdma_ring_get_emit_ib_size(struct amdgpu_ring *ring)
-{
- return
- 7 + 4; /* cik_sdma_ring_emit_ib */
-}
-
-static unsigned cik_sdma_ring_get_dma_frame_size(struct amdgpu_ring *ring)
-{
- return
- 6 + /* cik_sdma_ring_emit_hdp_flush */
- 3 + /* cik_sdma_ring_emit_hdp_invalidate */
- 6 + /* cik_sdma_ring_emit_pipeline_sync */
- 12 + /* cik_sdma_ring_emit_vm_flush */
- 9 + 9 + 9; /* cik_sdma_ring_emit_fence x3 for user fence, vm fence */
-}
-
static void cik_enable_sdma_mgcg(struct amdgpu_device *adev,
bool enable)
{
@@ -959,11 +943,10 @@ static int cik_sdma_sw_init(void *handle)
ring->ring_obj = NULL;
sprintf(ring->name, "sdma%d", i);
r = amdgpu_ring_init(adev, ring, 1024,
- SDMA_PACKET(SDMA_OPCODE_NOP, 0, 0), 0xf,
&adev->sdma.trap_irq,
(i == 0) ?
- AMDGPU_SDMA_IRQ_TRAP0 : AMDGPU_SDMA_IRQ_TRAP1,
- AMDGPU_RING_TYPE_SDMA);
+ AMDGPU_SDMA_IRQ_TRAP0 :
+ AMDGPU_SDMA_IRQ_TRAP1);
if (r)
return r;
}
@@ -1207,7 +1190,7 @@ static int cik_sdma_set_powergating_state(void *handle,
return 0;
}
-const struct amd_ip_funcs cik_sdma_ip_funcs = {
+static const struct amd_ip_funcs cik_sdma_ip_funcs = {
.name = "cik_sdma",
.early_init = cik_sdma_early_init,
.late_init = NULL,
@@ -1225,10 +1208,19 @@ const struct amd_ip_funcs cik_sdma_ip_funcs = {
};
static const struct amdgpu_ring_funcs cik_sdma_ring_funcs = {
+ .type = AMDGPU_RING_TYPE_SDMA,
+ .align_mask = 0xf,
+ .nop = SDMA_PACKET(SDMA_OPCODE_NOP, 0, 0),
.get_rptr = cik_sdma_ring_get_rptr,
.get_wptr = cik_sdma_ring_get_wptr,
.set_wptr = cik_sdma_ring_set_wptr,
- .parse_cs = NULL,
+ .emit_frame_size =
+ 6 + /* cik_sdma_ring_emit_hdp_flush */
+ 3 + /* cik_sdma_ring_emit_hdp_invalidate */
+ 6 + /* cik_sdma_ring_emit_pipeline_sync */
+ 12 + /* cik_sdma_ring_emit_vm_flush */
+ 9 + 9 + 9, /* cik_sdma_ring_emit_fence x3 for user fence, vm fence */
+ .emit_ib_size = 7 + 4, /* cik_sdma_ring_emit_ib */
.emit_ib = cik_sdma_ring_emit_ib,
.emit_fence = cik_sdma_ring_emit_fence,
.emit_pipeline_sync = cik_sdma_ring_emit_pipeline_sync,
@@ -1239,8 +1231,6 @@ static const struct amdgpu_ring_funcs cik_sdma_ring_funcs = {
.test_ib = cik_sdma_ring_test_ib,
.insert_nop = cik_sdma_ring_insert_nop,
.pad_ib = cik_sdma_ring_pad_ib,
- .get_emit_ib_size = cik_sdma_ring_get_emit_ib_size,
- .get_dma_frame_size = cik_sdma_ring_get_dma_frame_size,
};
static void cik_sdma_set_ring_funcs(struct amdgpu_device *adev)
@@ -1352,3 +1342,12 @@ static void cik_sdma_set_vm_pte_funcs(struct amdgpu_device *adev)
adev->vm_manager.vm_pte_num_rings = adev->sdma.num_instances;
}
}
+
+const struct amdgpu_ip_block_version cik_sdma_ip_block =
+{
+ .type = AMD_IP_BLOCK_TYPE_SDMA,
+ .major = 2,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &cik_sdma_ip_funcs,
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/cik_sdma.h b/drivers/gpu/drm/amd/amdgpu/cik_sdma.h
index 027727c677b8..a4a8fe01410b 100644
--- a/drivers/gpu/drm/amd/amdgpu/cik_sdma.h
+++ b/drivers/gpu/drm/amd/amdgpu/cik_sdma.h
@@ -24,6 +24,6 @@
#ifndef __CIK_SDMA_H__
#define __CIK_SDMA_H__
-extern const struct amd_ip_funcs cik_sdma_ip_funcs;
+extern const struct amdgpu_ip_block_version cik_sdma_ip_block;
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/cikd.h b/drivers/gpu/drm/amd/amdgpu/cikd.h
index 8659852aea9e..6cbd913fd12e 100644
--- a/drivers/gpu/drm/amd/amdgpu/cikd.h
+++ b/drivers/gpu/drm/amd/amdgpu/cikd.h
@@ -43,6 +43,14 @@
#define CRTC4_REGISTER_OFFSET (0x477c - 0x1b7c)
#define CRTC5_REGISTER_OFFSET (0x4a7c - 0x1b7c)
+/* hpd instance offsets */
+#define HPD0_REGISTER_OFFSET (0x1807 - 0x1807)
+#define HPD1_REGISTER_OFFSET (0x180a - 0x1807)
+#define HPD2_REGISTER_OFFSET (0x180d - 0x1807)
+#define HPD3_REGISTER_OFFSET (0x1810 - 0x1807)
+#define HPD4_REGISTER_OFFSET (0x1813 - 0x1807)
+#define HPD5_REGISTER_OFFSET (0x1816 - 0x1807)
+
#define BONAIRE_GB_ADDR_CONFIG_GOLDEN 0x12010001
#define HAWAII_GB_ADDR_CONFIG_GOLDEN 0x12011003
diff --git a/drivers/gpu/drm/amd/amdgpu/cz_dpm.c b/drivers/gpu/drm/amd/amdgpu/cz_dpm.c
index 3c082e143730..352b5fad5a06 100644
--- a/drivers/gpu/drm/amd/amdgpu/cz_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/cz_dpm.c
@@ -1250,7 +1250,8 @@ static void cz_update_current_ps(struct amdgpu_device *adev,
pi->current_ps = *ps;
pi->current_rps = *rps;
- pi->current_rps.ps_priv = ps;
+ pi->current_rps.ps_priv = &pi->current_ps;
+ adev->pm.dpm.current_ps = &pi->current_rps;
}
@@ -1262,7 +1263,8 @@ static void cz_update_requested_ps(struct amdgpu_device *adev,
pi->requested_ps = *ps;
pi->requested_rps = *rps;
- pi->requested_rps.ps_priv = ps;
+ pi->requested_rps.ps_priv = &pi->requested_ps;
+ adev->pm.dpm.requested_ps = &pi->requested_rps;
}
@@ -2257,6 +2259,18 @@ static void cz_dpm_powergate_vce(struct amdgpu_device *adev, bool gate)
}
}
+static int cz_check_state_equal(struct amdgpu_device *adev,
+ struct amdgpu_ps *cps,
+ struct amdgpu_ps *rps,
+ bool *equal)
+{
+ if (equal == NULL)
+ return -EINVAL;
+
+ *equal = false;
+ return 0;
+}
+
const struct amd_ip_funcs cz_dpm_ip_funcs = {
.name = "cz_dpm",
.early_init = cz_dpm_early_init,
@@ -2289,6 +2303,7 @@ static const struct amdgpu_dpm_funcs cz_dpm_funcs = {
.vblank_too_short = NULL,
.powergate_uvd = cz_dpm_powergate_uvd,
.powergate_vce = cz_dpm_powergate_vce,
+ .check_state_equal = cz_check_state_equal,
};
static void cz_dpm_set_funcs(struct amdgpu_device *adev)
@@ -2296,3 +2311,12 @@ static void cz_dpm_set_funcs(struct amdgpu_device *adev)
if (NULL == adev->pm.funcs)
adev->pm.funcs = &cz_dpm_funcs;
}
+
+const struct amdgpu_ip_block_version cz_dpm_ip_block =
+{
+ .type = AMD_IP_BLOCK_TYPE_SMC,
+ .major = 8,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &cz_dpm_ip_funcs,
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/cz_ih.c b/drivers/gpu/drm/amd/amdgpu/cz_ih.c
index 3d23a70b6432..fe7cbb24da7b 100644
--- a/drivers/gpu/drm/amd/amdgpu/cz_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/cz_ih.c
@@ -394,7 +394,7 @@ static int cz_ih_set_powergating_state(void *handle,
return 0;
}
-const struct amd_ip_funcs cz_ih_ip_funcs = {
+static const struct amd_ip_funcs cz_ih_ip_funcs = {
.name = "cz_ih",
.early_init = cz_ih_early_init,
.late_init = NULL,
@@ -423,3 +423,11 @@ static void cz_ih_set_interrupt_funcs(struct amdgpu_device *adev)
adev->irq.ih_funcs = &cz_ih_funcs;
}
+const struct amdgpu_ip_block_version cz_ih_ip_block =
+{
+ .type = AMD_IP_BLOCK_TYPE_IH,
+ .major = 3,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &cz_ih_ip_funcs,
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/cz_ih.h b/drivers/gpu/drm/amd/amdgpu/cz_ih.h
index fc4057a2ecb9..14be7753221b 100644
--- a/drivers/gpu/drm/amd/amdgpu/cz_ih.h
+++ b/drivers/gpu/drm/amd/amdgpu/cz_ih.h
@@ -24,6 +24,6 @@
#ifndef __CZ_IH_H__
#define __CZ_IH_H__
-extern const struct amd_ip_funcs cz_ih_ip_funcs;
+extern const struct amdgpu_ip_block_version cz_ih_ip_block;
#endif /* __CZ_IH_H__ */
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
index 4108c686aa7c..65a954cb69ed 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
@@ -31,6 +31,7 @@
#include "atombios_encoders.h"
#include "amdgpu_pll.h"
#include "amdgpu_connectors.h"
+#include "dce_v10_0.h"
#include "dce/dce_10_0_d.h"
#include "dce/dce_10_0_sh_mask.h"
@@ -330,33 +331,12 @@ static int dce_v10_0_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
static bool dce_v10_0_hpd_sense(struct amdgpu_device *adev,
enum amdgpu_hpd_id hpd)
{
- int idx;
bool connected = false;
- switch (hpd) {
- case AMDGPU_HPD_1:
- idx = 0;
- break;
- case AMDGPU_HPD_2:
- idx = 1;
- break;
- case AMDGPU_HPD_3:
- idx = 2;
- break;
- case AMDGPU_HPD_4:
- idx = 3;
- break;
- case AMDGPU_HPD_5:
- idx = 4;
- break;
- case AMDGPU_HPD_6:
- idx = 5;
- break;
- default:
+ if (hpd >= adev->mode_info.num_hpd)
return connected;
- }
- if (RREG32(mmDC_HPD_INT_STATUS + hpd_offsets[idx]) &
+ if (RREG32(mmDC_HPD_INT_STATUS + hpd_offsets[hpd]) &
DC_HPD_INT_STATUS__DC_HPD_SENSE_MASK)
connected = true;
@@ -376,37 +356,16 @@ static void dce_v10_0_hpd_set_polarity(struct amdgpu_device *adev,
{
u32 tmp;
bool connected = dce_v10_0_hpd_sense(adev, hpd);
- int idx;
- switch (hpd) {
- case AMDGPU_HPD_1:
- idx = 0;
- break;
- case AMDGPU_HPD_2:
- idx = 1;
- break;
- case AMDGPU_HPD_3:
- idx = 2;
- break;
- case AMDGPU_HPD_4:
- idx = 3;
- break;
- case AMDGPU_HPD_5:
- idx = 4;
- break;
- case AMDGPU_HPD_6:
- idx = 5;
- break;
- default:
+ if (hpd >= adev->mode_info.num_hpd)
return;
- }
- tmp = RREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[idx]);
+ tmp = RREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[hpd]);
if (connected)
tmp = REG_SET_FIELD(tmp, DC_HPD_INT_CONTROL, DC_HPD_INT_POLARITY, 0);
else
tmp = REG_SET_FIELD(tmp, DC_HPD_INT_CONTROL, DC_HPD_INT_POLARITY, 1);
- WREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[idx], tmp);
+ WREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[hpd], tmp);
}
/**
@@ -422,33 +381,12 @@ static void dce_v10_0_hpd_init(struct amdgpu_device *adev)
struct drm_device *dev = adev->ddev;
struct drm_connector *connector;
u32 tmp;
- int idx;
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
- switch (amdgpu_connector->hpd.hpd) {
- case AMDGPU_HPD_1:
- idx = 0;
- break;
- case AMDGPU_HPD_2:
- idx = 1;
- break;
- case AMDGPU_HPD_3:
- idx = 2;
- break;
- case AMDGPU_HPD_4:
- idx = 3;
- break;
- case AMDGPU_HPD_5:
- idx = 4;
- break;
- case AMDGPU_HPD_6:
- idx = 5;
- break;
- default:
+ if (amdgpu_connector->hpd.hpd >= adev->mode_info.num_hpd)
continue;
- }
if (connector->connector_type == DRM_MODE_CONNECTOR_eDP ||
connector->connector_type == DRM_MODE_CONNECTOR_LVDS) {
@@ -457,24 +395,24 @@ static void dce_v10_0_hpd_init(struct amdgpu_device *adev)
* https://bugzilla.redhat.com/show_bug.cgi?id=726143
* also avoid interrupt storms during dpms.
*/
- tmp = RREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[idx]);
+ tmp = RREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd]);
tmp = REG_SET_FIELD(tmp, DC_HPD_INT_CONTROL, DC_HPD_INT_EN, 0);
- WREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[idx], tmp);
+ WREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd], tmp);
continue;
}
- tmp = RREG32(mmDC_HPD_CONTROL + hpd_offsets[idx]);
+ tmp = RREG32(mmDC_HPD_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd]);
tmp = REG_SET_FIELD(tmp, DC_HPD_CONTROL, DC_HPD_EN, 1);
- WREG32(mmDC_HPD_CONTROL + hpd_offsets[idx], tmp);
+ WREG32(mmDC_HPD_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd], tmp);
- tmp = RREG32(mmDC_HPD_TOGGLE_FILT_CNTL + hpd_offsets[idx]);
+ tmp = RREG32(mmDC_HPD_TOGGLE_FILT_CNTL + hpd_offsets[amdgpu_connector->hpd.hpd]);
tmp = REG_SET_FIELD(tmp, DC_HPD_TOGGLE_FILT_CNTL,
DC_HPD_CONNECT_INT_DELAY,
AMDGPU_HPD_CONNECT_INT_DELAY_IN_MS);
tmp = REG_SET_FIELD(tmp, DC_HPD_TOGGLE_FILT_CNTL,
DC_HPD_DISCONNECT_INT_DELAY,
AMDGPU_HPD_DISCONNECT_INT_DELAY_IN_MS);
- WREG32(mmDC_HPD_TOGGLE_FILT_CNTL + hpd_offsets[idx], tmp);
+ WREG32(mmDC_HPD_TOGGLE_FILT_CNTL + hpd_offsets[amdgpu_connector->hpd.hpd], tmp);
dce_v10_0_hpd_set_polarity(adev, amdgpu_connector->hpd.hpd);
amdgpu_irq_get(adev, &adev->hpd_irq,
@@ -495,37 +433,16 @@ static void dce_v10_0_hpd_fini(struct amdgpu_device *adev)
struct drm_device *dev = adev->ddev;
struct drm_connector *connector;
u32 tmp;
- int idx;
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
- switch (amdgpu_connector->hpd.hpd) {
- case AMDGPU_HPD_1:
- idx = 0;
- break;
- case AMDGPU_HPD_2:
- idx = 1;
- break;
- case AMDGPU_HPD_3:
- idx = 2;
- break;
- case AMDGPU_HPD_4:
- idx = 3;
- break;
- case AMDGPU_HPD_5:
- idx = 4;
- break;
- case AMDGPU_HPD_6:
- idx = 5;
- break;
- default:
+ if (amdgpu_connector->hpd.hpd >= adev->mode_info.num_hpd)
continue;
- }
- tmp = RREG32(mmDC_HPD_CONTROL + hpd_offsets[idx]);
+ tmp = RREG32(mmDC_HPD_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd]);
tmp = REG_SET_FIELD(tmp, DC_HPD_CONTROL, DC_HPD_EN, 0);
- WREG32(mmDC_HPD_CONTROL + hpd_offsets[idx], tmp);
+ WREG32(mmDC_HPD_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd], tmp);
amdgpu_irq_put(adev, &adev->hpd_irq,
amdgpu_connector->hpd.hpd);
@@ -2115,7 +2032,7 @@ static int dce_v10_0_crtc_do_set_base(struct drm_crtc *crtc,
u32 tmp, viewport_w, viewport_h;
int r;
bool bypass_lut = false;
- char *format_name;
+ struct drm_format_name_buf format_name;
/* no fb bound */
if (!atomic && !crtc->primary->fb) {
@@ -2227,9 +2144,8 @@ static int dce_v10_0_crtc_do_set_base(struct drm_crtc *crtc,
bypass_lut = true;
break;
default:
- format_name = drm_get_format_name(target_fb->pixel_format);
- DRM_ERROR("Unsupported screen format %s\n", format_name);
- kfree(format_name);
+ DRM_ERROR("Unsupported screen format %s\n",
+ drm_get_format_name(target_fb->pixel_format, &format_name));
return -EINVAL;
}
@@ -3151,10 +3067,6 @@ static int dce_v10_0_hw_fini(void *handle)
static int dce_v10_0_suspend(void *handle)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- amdgpu_atombios_scratch_regs_save(adev);
-
return dce_v10_0_hw_fini(handle);
}
@@ -3165,8 +3077,6 @@ static int dce_v10_0_resume(void *handle)
ret = dce_v10_0_hw_init(handle);
- amdgpu_atombios_scratch_regs_restore(adev);
-
/* turn on the BL */
if (adev->mode_info.bl_encoder) {
u8 bl_level = amdgpu_display_backlight_get_level(adev,
@@ -3554,7 +3464,7 @@ static int dce_v10_0_set_powergating_state(void *handle,
return 0;
}
-const struct amd_ip_funcs dce_v10_0_ip_funcs = {
+static const struct amd_ip_funcs dce_v10_0_ip_funcs = {
.name = "dce_v10_0",
.early_init = dce_v10_0_early_init,
.late_init = NULL,
@@ -3885,3 +3795,21 @@ static void dce_v10_0_set_irq_funcs(struct amdgpu_device *adev)
adev->hpd_irq.num_types = AMDGPU_HPD_LAST;
adev->hpd_irq.funcs = &dce_v10_0_hpd_irq_funcs;
}
+
+const struct amdgpu_ip_block_version dce_v10_0_ip_block =
+{
+ .type = AMD_IP_BLOCK_TYPE_DCE,
+ .major = 10,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &dce_v10_0_ip_funcs,
+};
+
+const struct amdgpu_ip_block_version dce_v10_1_ip_block =
+{
+ .type = AMD_IP_BLOCK_TYPE_DCE,
+ .major = 10,
+ .minor = 1,
+ .rev = 0,
+ .funcs = &dce_v10_0_ip_funcs,
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.h b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.h
index e3dc04d293e4..7a0747789f1d 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.h
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.h
@@ -24,7 +24,9 @@
#ifndef __DCE_V10_0_H__
#define __DCE_V10_0_H__
-extern const struct amd_ip_funcs dce_v10_0_ip_funcs;
+
+extern const struct amdgpu_ip_block_version dce_v10_0_ip_block;
+extern const struct amdgpu_ip_block_version dce_v10_1_ip_block;
void dce_v10_0_disable_dce(struct amdgpu_device *adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
index f264b8f17ad1..d807e876366b 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
@@ -31,6 +31,7 @@
#include "atombios_encoders.h"
#include "amdgpu_pll.h"
#include "amdgpu_connectors.h"
+#include "dce_v11_0.h"
#include "dce/dce_11_0_d.h"
#include "dce/dce_11_0_sh_mask.h"
@@ -346,33 +347,12 @@ static int dce_v11_0_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
static bool dce_v11_0_hpd_sense(struct amdgpu_device *adev,
enum amdgpu_hpd_id hpd)
{
- int idx;
bool connected = false;
- switch (hpd) {
- case AMDGPU_HPD_1:
- idx = 0;
- break;
- case AMDGPU_HPD_2:
- idx = 1;
- break;
- case AMDGPU_HPD_3:
- idx = 2;
- break;
- case AMDGPU_HPD_4:
- idx = 3;
- break;
- case AMDGPU_HPD_5:
- idx = 4;
- break;
- case AMDGPU_HPD_6:
- idx = 5;
- break;
- default:
+ if (hpd >= adev->mode_info.num_hpd)
return connected;
- }
- if (RREG32(mmDC_HPD_INT_STATUS + hpd_offsets[idx]) &
+ if (RREG32(mmDC_HPD_INT_STATUS + hpd_offsets[hpd]) &
DC_HPD_INT_STATUS__DC_HPD_SENSE_MASK)
connected = true;
@@ -392,37 +372,16 @@ static void dce_v11_0_hpd_set_polarity(struct amdgpu_device *adev,
{
u32 tmp;
bool connected = dce_v11_0_hpd_sense(adev, hpd);
- int idx;
- switch (hpd) {
- case AMDGPU_HPD_1:
- idx = 0;
- break;
- case AMDGPU_HPD_2:
- idx = 1;
- break;
- case AMDGPU_HPD_3:
- idx = 2;
- break;
- case AMDGPU_HPD_4:
- idx = 3;
- break;
- case AMDGPU_HPD_5:
- idx = 4;
- break;
- case AMDGPU_HPD_6:
- idx = 5;
- break;
- default:
+ if (hpd >= adev->mode_info.num_hpd)
return;
- }
- tmp = RREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[idx]);
+ tmp = RREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[hpd]);
if (connected)
tmp = REG_SET_FIELD(tmp, DC_HPD_INT_CONTROL, DC_HPD_INT_POLARITY, 0);
else
tmp = REG_SET_FIELD(tmp, DC_HPD_INT_CONTROL, DC_HPD_INT_POLARITY, 1);
- WREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[idx], tmp);
+ WREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[hpd], tmp);
}
/**
@@ -438,33 +397,12 @@ static void dce_v11_0_hpd_init(struct amdgpu_device *adev)
struct drm_device *dev = adev->ddev;
struct drm_connector *connector;
u32 tmp;
- int idx;
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
- switch (amdgpu_connector->hpd.hpd) {
- case AMDGPU_HPD_1:
- idx = 0;
- break;
- case AMDGPU_HPD_2:
- idx = 1;
- break;
- case AMDGPU_HPD_3:
- idx = 2;
- break;
- case AMDGPU_HPD_4:
- idx = 3;
- break;
- case AMDGPU_HPD_5:
- idx = 4;
- break;
- case AMDGPU_HPD_6:
- idx = 5;
- break;
- default:
+ if (amdgpu_connector->hpd.hpd >= adev->mode_info.num_hpd)
continue;
- }
if (connector->connector_type == DRM_MODE_CONNECTOR_eDP ||
connector->connector_type == DRM_MODE_CONNECTOR_LVDS) {
@@ -473,24 +411,24 @@ static void dce_v11_0_hpd_init(struct amdgpu_device *adev)
* https://bugzilla.redhat.com/show_bug.cgi?id=726143
* also avoid interrupt storms during dpms.
*/
- tmp = RREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[idx]);
+ tmp = RREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd]);
tmp = REG_SET_FIELD(tmp, DC_HPD_INT_CONTROL, DC_HPD_INT_EN, 0);
- WREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[idx], tmp);
+ WREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd], tmp);
continue;
}
- tmp = RREG32(mmDC_HPD_CONTROL + hpd_offsets[idx]);
+ tmp = RREG32(mmDC_HPD_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd]);
tmp = REG_SET_FIELD(tmp, DC_HPD_CONTROL, DC_HPD_EN, 1);
- WREG32(mmDC_HPD_CONTROL + hpd_offsets[idx], tmp);
+ WREG32(mmDC_HPD_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd], tmp);
- tmp = RREG32(mmDC_HPD_TOGGLE_FILT_CNTL + hpd_offsets[idx]);
+ tmp = RREG32(mmDC_HPD_TOGGLE_FILT_CNTL + hpd_offsets[amdgpu_connector->hpd.hpd]);
tmp = REG_SET_FIELD(tmp, DC_HPD_TOGGLE_FILT_CNTL,
DC_HPD_CONNECT_INT_DELAY,
AMDGPU_HPD_CONNECT_INT_DELAY_IN_MS);
tmp = REG_SET_FIELD(tmp, DC_HPD_TOGGLE_FILT_CNTL,
DC_HPD_DISCONNECT_INT_DELAY,
AMDGPU_HPD_DISCONNECT_INT_DELAY_IN_MS);
- WREG32(mmDC_HPD_TOGGLE_FILT_CNTL + hpd_offsets[idx], tmp);
+ WREG32(mmDC_HPD_TOGGLE_FILT_CNTL + hpd_offsets[amdgpu_connector->hpd.hpd], tmp);
dce_v11_0_hpd_set_polarity(adev, amdgpu_connector->hpd.hpd);
amdgpu_irq_get(adev, &adev->hpd_irq, amdgpu_connector->hpd.hpd);
@@ -510,37 +448,16 @@ static void dce_v11_0_hpd_fini(struct amdgpu_device *adev)
struct drm_device *dev = adev->ddev;
struct drm_connector *connector;
u32 tmp;
- int idx;
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
- switch (amdgpu_connector->hpd.hpd) {
- case AMDGPU_HPD_1:
- idx = 0;
- break;
- case AMDGPU_HPD_2:
- idx = 1;
- break;
- case AMDGPU_HPD_3:
- idx = 2;
- break;
- case AMDGPU_HPD_4:
- idx = 3;
- break;
- case AMDGPU_HPD_5:
- idx = 4;
- break;
- case AMDGPU_HPD_6:
- idx = 5;
- break;
- default:
+ if (amdgpu_connector->hpd.hpd >= adev->mode_info.num_hpd)
continue;
- }
- tmp = RREG32(mmDC_HPD_CONTROL + hpd_offsets[idx]);
+ tmp = RREG32(mmDC_HPD_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd]);
tmp = REG_SET_FIELD(tmp, DC_HPD_CONTROL, DC_HPD_EN, 0);
- WREG32(mmDC_HPD_CONTROL + hpd_offsets[idx], tmp);
+ WREG32(mmDC_HPD_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd], tmp);
amdgpu_irq_put(adev, &adev->hpd_irq, amdgpu_connector->hpd.hpd);
}
@@ -2096,7 +2013,7 @@ static int dce_v11_0_crtc_do_set_base(struct drm_crtc *crtc,
u32 tmp, viewport_w, viewport_h;
int r;
bool bypass_lut = false;
- char *format_name;
+ struct drm_format_name_buf format_name;
/* no fb bound */
if (!atomic && !crtc->primary->fb) {
@@ -2208,9 +2125,8 @@ static int dce_v11_0_crtc_do_set_base(struct drm_crtc *crtc,
bypass_lut = true;
break;
default:
- format_name = drm_get_format_name(target_fb->pixel_format);
- DRM_ERROR("Unsupported screen format %s\n", format_name);
- kfree(format_name);
+ DRM_ERROR("Unsupported screen format %s\n",
+ drm_get_format_name(target_fb->pixel_format, &format_name));
return -EINVAL;
}
@@ -3215,10 +3131,6 @@ static int dce_v11_0_hw_fini(void *handle)
static int dce_v11_0_suspend(void *handle)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- amdgpu_atombios_scratch_regs_save(adev);
-
return dce_v11_0_hw_fini(handle);
}
@@ -3229,8 +3141,6 @@ static int dce_v11_0_resume(void *handle)
ret = dce_v11_0_hw_init(handle);
- amdgpu_atombios_scratch_regs_restore(adev);
-
/* turn on the BL */
if (adev->mode_info.bl_encoder) {
u8 bl_level = amdgpu_display_backlight_get_level(adev,
@@ -3611,7 +3521,7 @@ static int dce_v11_0_set_powergating_state(void *handle,
return 0;
}
-const struct amd_ip_funcs dce_v11_0_ip_funcs = {
+static const struct amd_ip_funcs dce_v11_0_ip_funcs = {
.name = "dce_v11_0",
.early_init = dce_v11_0_early_init,
.late_init = NULL,
@@ -3941,3 +3851,21 @@ static void dce_v11_0_set_irq_funcs(struct amdgpu_device *adev)
adev->hpd_irq.num_types = AMDGPU_HPD_LAST;
adev->hpd_irq.funcs = &dce_v11_0_hpd_irq_funcs;
}
+
+const struct amdgpu_ip_block_version dce_v11_0_ip_block =
+{
+ .type = AMD_IP_BLOCK_TYPE_DCE,
+ .major = 11,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &dce_v11_0_ip_funcs,
+};
+
+const struct amdgpu_ip_block_version dce_v11_2_ip_block =
+{
+ .type = AMD_IP_BLOCK_TYPE_DCE,
+ .major = 11,
+ .minor = 2,
+ .rev = 0,
+ .funcs = &dce_v11_0_ip_funcs,
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.h b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.h
index 1f58a65ba2ef..0d878ca3acba 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.h
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.h
@@ -24,7 +24,8 @@
#ifndef __DCE_V11_0_H__
#define __DCE_V11_0_H__
-extern const struct amd_ip_funcs dce_v11_0_ip_funcs;
+extern const struct amdgpu_ip_block_version dce_v11_0_ip_block;
+extern const struct amdgpu_ip_block_version dce_v11_2_ip_block;
void dce_v11_0_disable_dce(struct amdgpu_device *adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
index b948d6cb1399..bc9f2f423270 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
@@ -46,6 +46,16 @@ static const u32 crtc_offsets[6] =
SI_CRTC5_REGISTER_OFFSET
};
+static const u32 hpd_offsets[] =
+{
+ DC_HPD1_INT_STATUS - DC_HPD1_INT_STATUS,
+ DC_HPD2_INT_STATUS - DC_HPD1_INT_STATUS,
+ DC_HPD3_INT_STATUS - DC_HPD1_INT_STATUS,
+ DC_HPD4_INT_STATUS - DC_HPD1_INT_STATUS,
+ DC_HPD5_INT_STATUS - DC_HPD1_INT_STATUS,
+ DC_HPD6_INT_STATUS - DC_HPD1_INT_STATUS,
+};
+
static const uint32_t dig_offsets[] = {
SI_CRTC0_REGISTER_OFFSET,
SI_CRTC1_REGISTER_OFFSET,
@@ -94,15 +104,6 @@ static const struct {
.hpd = DISP_INTERRUPT_STATUS_CONTINUE5__DC_HPD6_INTERRUPT_MASK
} };
-static const uint32_t hpd_int_control_offsets[6] = {
- DC_HPD1_INT_CONTROL,
- DC_HPD2_INT_CONTROL,
- DC_HPD3_INT_CONTROL,
- DC_HPD4_INT_CONTROL,
- DC_HPD5_INT_CONTROL,
- DC_HPD6_INT_CONTROL,
-};
-
static u32 dce_v6_0_audio_endpt_rreg(struct amdgpu_device *adev,
u32 block_offset, u32 reg)
{
@@ -257,34 +258,11 @@ static bool dce_v6_0_hpd_sense(struct amdgpu_device *adev,
{
bool connected = false;
- switch (hpd) {
- case AMDGPU_HPD_1:
- if (RREG32(DC_HPD1_INT_STATUS) & DC_HPDx_SENSE)
- connected = true;
- break;
- case AMDGPU_HPD_2:
- if (RREG32(DC_HPD2_INT_STATUS) & DC_HPDx_SENSE)
- connected = true;
- break;
- case AMDGPU_HPD_3:
- if (RREG32(DC_HPD3_INT_STATUS) & DC_HPDx_SENSE)
- connected = true;
- break;
- case AMDGPU_HPD_4:
- if (RREG32(DC_HPD4_INT_STATUS) & DC_HPDx_SENSE)
- connected = true;
- break;
- case AMDGPU_HPD_5:
- if (RREG32(DC_HPD5_INT_STATUS) & DC_HPDx_SENSE)
- connected = true;
- break;
- case AMDGPU_HPD_6:
- if (RREG32(DC_HPD6_INT_STATUS) & DC_HPDx_SENSE)
- connected = true;
- break;
- default:
- break;
- }
+ if (hpd >= adev->mode_info.num_hpd)
+ return connected;
+
+ if (RREG32(DC_HPD1_INT_STATUS + hpd_offsets[hpd]) & DC_HPDx_SENSE)
+ connected = true;
return connected;
}
@@ -303,58 +281,15 @@ static void dce_v6_0_hpd_set_polarity(struct amdgpu_device *adev,
u32 tmp;
bool connected = dce_v6_0_hpd_sense(adev, hpd);
- switch (hpd) {
- case AMDGPU_HPD_1:
- tmp = RREG32(DC_HPD1_INT_CONTROL);
- if (connected)
- tmp &= ~DC_HPDx_INT_POLARITY;
- else
- tmp |= DC_HPDx_INT_POLARITY;
- WREG32(DC_HPD1_INT_CONTROL, tmp);
- break;
- case AMDGPU_HPD_2:
- tmp = RREG32(DC_HPD2_INT_CONTROL);
- if (connected)
- tmp &= ~DC_HPDx_INT_POLARITY;
- else
- tmp |= DC_HPDx_INT_POLARITY;
- WREG32(DC_HPD2_INT_CONTROL, tmp);
- break;
- case AMDGPU_HPD_3:
- tmp = RREG32(DC_HPD3_INT_CONTROL);
- if (connected)
- tmp &= ~DC_HPDx_INT_POLARITY;
- else
- tmp |= DC_HPDx_INT_POLARITY;
- WREG32(DC_HPD3_INT_CONTROL, tmp);
- break;
- case AMDGPU_HPD_4:
- tmp = RREG32(DC_HPD4_INT_CONTROL);
- if (connected)
- tmp &= ~DC_HPDx_INT_POLARITY;
- else
- tmp |= DC_HPDx_INT_POLARITY;
- WREG32(DC_HPD4_INT_CONTROL, tmp);
- break;
- case AMDGPU_HPD_5:
- tmp = RREG32(DC_HPD5_INT_CONTROL);
- if (connected)
- tmp &= ~DC_HPDx_INT_POLARITY;
- else
- tmp |= DC_HPDx_INT_POLARITY;
- WREG32(DC_HPD5_INT_CONTROL, tmp);
- break;
- case AMDGPU_HPD_6:
- tmp = RREG32(DC_HPD6_INT_CONTROL);
- if (connected)
- tmp &= ~DC_HPDx_INT_POLARITY;
- else
- tmp |= DC_HPDx_INT_POLARITY;
- WREG32(DC_HPD6_INT_CONTROL, tmp);
- break;
- default:
- break;
- }
+ if (hpd >= adev->mode_info.num_hpd)
+ return;
+
+ tmp = RREG32(DC_HPD1_INT_CONTROL + hpd_offsets[hpd]);
+ if (connected)
+ tmp &= ~DC_HPDx_INT_POLARITY;
+ else
+ tmp |= DC_HPDx_INT_POLARITY;
+ WREG32(DC_HPD1_INT_CONTROL + hpd_offsets[hpd], tmp);
}
/**
@@ -369,34 +304,17 @@ static void dce_v6_0_hpd_init(struct amdgpu_device *adev)
{
struct drm_device *dev = adev->ddev;
struct drm_connector *connector;
- u32 tmp = DC_HPDx_CONNECTION_TIMER(0x9c4) |
- DC_HPDx_RX_INT_TIMER(0xfa) | DC_HPDx_EN;
+ u32 tmp;
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
- switch (amdgpu_connector->hpd.hpd) {
- case AMDGPU_HPD_1:
- WREG32(DC_HPD1_CONTROL, tmp);
- break;
- case AMDGPU_HPD_2:
- WREG32(DC_HPD2_CONTROL, tmp);
- break;
- case AMDGPU_HPD_3:
- WREG32(DC_HPD3_CONTROL, tmp);
- break;
- case AMDGPU_HPD_4:
- WREG32(DC_HPD4_CONTROL, tmp);
- break;
- case AMDGPU_HPD_5:
- WREG32(DC_HPD5_CONTROL, tmp);
- break;
- case AMDGPU_HPD_6:
- WREG32(DC_HPD6_CONTROL, tmp);
- break;
- default:
- break;
- }
+ if (amdgpu_connector->hpd.hpd >= adev->mode_info.num_hpd)
+ continue;
+
+ tmp = RREG32(DC_HPD1_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd]);
+ tmp |= DC_HPDx_EN;
+ WREG32(DC_HPD1_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd], tmp);
if (connector->connector_type == DRM_MODE_CONNECTOR_eDP ||
connector->connector_type == DRM_MODE_CONNECTOR_LVDS) {
@@ -405,34 +323,9 @@ static void dce_v6_0_hpd_init(struct amdgpu_device *adev)
* https://bugzilla.redhat.com/show_bug.cgi?id=726143
* also avoid interrupt storms during dpms.
*/
- u32 dc_hpd_int_cntl_reg, dc_hpd_int_cntl;
-
- switch (amdgpu_connector->hpd.hpd) {
- case AMDGPU_HPD_1:
- dc_hpd_int_cntl_reg = DC_HPD1_INT_CONTROL;
- break;
- case AMDGPU_HPD_2:
- dc_hpd_int_cntl_reg = DC_HPD2_INT_CONTROL;
- break;
- case AMDGPU_HPD_3:
- dc_hpd_int_cntl_reg = DC_HPD3_INT_CONTROL;
- break;
- case AMDGPU_HPD_4:
- dc_hpd_int_cntl_reg = DC_HPD4_INT_CONTROL;
- break;
- case AMDGPU_HPD_5:
- dc_hpd_int_cntl_reg = DC_HPD5_INT_CONTROL;
- break;
- case AMDGPU_HPD_6:
- dc_hpd_int_cntl_reg = DC_HPD6_INT_CONTROL;
- break;
- default:
- continue;
- }
-
- dc_hpd_int_cntl = RREG32(dc_hpd_int_cntl_reg);
- dc_hpd_int_cntl &= ~DC_HPDx_INT_EN;
- WREG32(dc_hpd_int_cntl_reg, dc_hpd_int_cntl);
+ tmp = RREG32(DC_HPD1_INT_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd]);
+ tmp &= ~DC_HPDx_INT_EN;
+ WREG32(DC_HPD1_INT_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd], tmp);
continue;
}
@@ -454,32 +347,18 @@ static void dce_v6_0_hpd_fini(struct amdgpu_device *adev)
{
struct drm_device *dev = adev->ddev;
struct drm_connector *connector;
+ u32 tmp;
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
- switch (amdgpu_connector->hpd.hpd) {
- case AMDGPU_HPD_1:
- WREG32(DC_HPD1_CONTROL, 0);
- break;
- case AMDGPU_HPD_2:
- WREG32(DC_HPD2_CONTROL, 0);
- break;
- case AMDGPU_HPD_3:
- WREG32(DC_HPD3_CONTROL, 0);
- break;
- case AMDGPU_HPD_4:
- WREG32(DC_HPD4_CONTROL, 0);
- break;
- case AMDGPU_HPD_5:
- WREG32(DC_HPD5_CONTROL, 0);
- break;
- case AMDGPU_HPD_6:
- WREG32(DC_HPD6_CONTROL, 0);
- break;
- default:
- break;
- }
+ if (amdgpu_connector->hpd.hpd >= adev->mode_info.num_hpd)
+ continue;
+
+ tmp = RREG32(DC_HPD1_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd]);
+ tmp &= ~DC_HPDx_EN;
+ WREG32(DC_HPD1_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd], 0);
+
amdgpu_irq_put(adev, &adev->hpd_irq, amdgpu_connector->hpd.hpd);
}
}
@@ -611,12 +490,55 @@ static void dce_v6_0_resume_mc_access(struct amdgpu_device *adev,
static void dce_v6_0_set_vga_render_state(struct amdgpu_device *adev,
bool render)
{
- if (!render)
+ if (!render)
WREG32(R_000300_VGA_RENDER_CONTROL,
RREG32(R_000300_VGA_RENDER_CONTROL) & C_000300_VGA_VSTATUS_CNTL);
}
+static int dce_v6_0_get_num_crtc(struct amdgpu_device *adev)
+{
+ int num_crtc = 0;
+
+ switch (adev->asic_type) {
+ case CHIP_TAHITI:
+ case CHIP_PITCAIRN:
+ case CHIP_VERDE:
+ num_crtc = 6;
+ break;
+ case CHIP_OLAND:
+ num_crtc = 2;
+ break;
+ default:
+ num_crtc = 0;
+ }
+ return num_crtc;
+}
+
+void dce_v6_0_disable_dce(struct amdgpu_device *adev)
+{
+ /*Disable VGA render and enabled crtc, if has DCE engine*/
+ if (amdgpu_atombios_has_dce_engine_info(adev)) {
+ u32 tmp;
+ int crtc_enabled, i;
+
+ dce_v6_0_set_vga_render_state(adev, false);
+
+ /*Disable crtc*/
+ for (i = 0; i < dce_v6_0_get_num_crtc(adev); i++) {
+ crtc_enabled = RREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i]) &
+ EVERGREEN_CRTC_MASTER_EN;
+ if (crtc_enabled) {
+ WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 1);
+ tmp = RREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i]);
+ tmp &= ~EVERGREEN_CRTC_MASTER_EN;
+ WREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i], tmp);
+ WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 0);
+ }
+ }
+ }
+}
+
static void dce_v6_0_program_fmt(struct drm_encoder *encoder)
{
@@ -1534,6 +1456,7 @@ static int dce_v6_0_crtc_do_set_base(struct drm_crtc *crtc,
u32 viewport_w, viewport_h;
int r;
bool bypass_lut = false;
+ struct drm_format_name_buf format_name;
/* no fb bound */
if (!atomic && !crtc->primary->fb) {
@@ -1637,7 +1560,7 @@ static int dce_v6_0_crtc_do_set_base(struct drm_crtc *crtc,
break;
default:
DRM_ERROR("Unsupported screen format %s\n",
- drm_get_format_name(target_fb->pixel_format));
+ drm_get_format_name(target_fb->pixel_format, &format_name));
return -EINVAL;
}
@@ -2117,13 +2040,13 @@ static void dce_v6_0_crtc_dpms(struct drm_crtc *crtc, int mode)
type = amdgpu_crtc_idx_to_irq_type(adev, amdgpu_crtc->crtc_id);
amdgpu_irq_update(adev, &adev->crtc_irq, type);
amdgpu_irq_update(adev, &adev->pageflip_irq, type);
- drm_vblank_post_modeset(dev, amdgpu_crtc->crtc_id);
+ drm_crtc_vblank_on(crtc);
dce_v6_0_crtc_load_lut(crtc);
break;
case DRM_MODE_DPMS_STANDBY:
case DRM_MODE_DPMS_SUSPEND:
case DRM_MODE_DPMS_OFF:
- drm_vblank_pre_modeset(dev, amdgpu_crtc->crtc_id);
+ drm_crtc_vblank_off(crtc);
if (amdgpu_crtc->enabled)
amdgpu_atombios_crtc_blank(crtc, ATOM_ENABLE);
amdgpu_atombios_crtc_enable(crtc, ATOM_DISABLE);
@@ -2338,21 +2261,20 @@ static int dce_v6_0_early_init(void *handle)
dce_v6_0_set_display_funcs(adev);
dce_v6_0_set_irq_funcs(adev);
+ adev->mode_info.num_crtc = dce_v6_0_get_num_crtc(adev);
+
switch (adev->asic_type) {
case CHIP_TAHITI:
case CHIP_PITCAIRN:
case CHIP_VERDE:
- adev->mode_info.num_crtc = 6;
adev->mode_info.num_hpd = 6;
adev->mode_info.num_dig = 6;
break;
case CHIP_OLAND:
- adev->mode_info.num_crtc = 2;
adev->mode_info.num_hpd = 2;
adev->mode_info.num_dig = 2;
break;
default:
- /* FIXME: not supported yet */
return -EINVAL;
}
@@ -2482,10 +2404,6 @@ static int dce_v6_0_hw_fini(void *handle)
static int dce_v6_0_suspend(void *handle)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- amdgpu_atombios_scratch_regs_save(adev);
-
return dce_v6_0_hw_fini(handle);
}
@@ -2496,8 +2414,6 @@ static int dce_v6_0_resume(void *handle)
ret = dce_v6_0_hw_init(handle);
- amdgpu_atombios_scratch_regs_restore(adev);
-
/* turn on the BL */
if (adev->mode_info.bl_encoder) {
u8 bl_level = amdgpu_display_backlight_get_level(adev,
@@ -2588,42 +2504,23 @@ static int dce_v6_0_set_hpd_interrupt_state(struct amdgpu_device *adev,
unsigned type,
enum amdgpu_interrupt_state state)
{
- u32 dc_hpd_int_cntl_reg, dc_hpd_int_cntl;
+ u32 dc_hpd_int_cntl;
- switch (type) {
- case AMDGPU_HPD_1:
- dc_hpd_int_cntl_reg = DC_HPD1_INT_CONTROL;
- break;
- case AMDGPU_HPD_2:
- dc_hpd_int_cntl_reg = DC_HPD2_INT_CONTROL;
- break;
- case AMDGPU_HPD_3:
- dc_hpd_int_cntl_reg = DC_HPD3_INT_CONTROL;
- break;
- case AMDGPU_HPD_4:
- dc_hpd_int_cntl_reg = DC_HPD4_INT_CONTROL;
- break;
- case AMDGPU_HPD_5:
- dc_hpd_int_cntl_reg = DC_HPD5_INT_CONTROL;
- break;
- case AMDGPU_HPD_6:
- dc_hpd_int_cntl_reg = DC_HPD6_INT_CONTROL;
- break;
- default:
+ if (type >= adev->mode_info.num_hpd) {
DRM_DEBUG("invalid hdp %d\n", type);
return 0;
}
switch (state) {
case AMDGPU_IRQ_STATE_DISABLE:
- dc_hpd_int_cntl = RREG32(dc_hpd_int_cntl_reg);
- dc_hpd_int_cntl &= ~(DC_HPDx_INT_EN | DC_HPDx_RX_INT_EN);
- WREG32(dc_hpd_int_cntl_reg, dc_hpd_int_cntl);
+ dc_hpd_int_cntl = RREG32(DC_HPD1_INT_CONTROL + hpd_offsets[type]);
+ dc_hpd_int_cntl &= ~DC_HPDx_INT_EN;
+ WREG32(DC_HPD1_INT_CONTROL + hpd_offsets[type], dc_hpd_int_cntl);
break;
case AMDGPU_IRQ_STATE_ENABLE:
- dc_hpd_int_cntl = RREG32(dc_hpd_int_cntl_reg);
- dc_hpd_int_cntl |= (DC_HPDx_INT_EN | DC_HPDx_RX_INT_EN);
- WREG32(dc_hpd_int_cntl_reg, dc_hpd_int_cntl);
+ dc_hpd_int_cntl = RREG32(DC_HPD1_INT_CONTROL + hpd_offsets[type]);
+ dc_hpd_int_cntl |= DC_HPDx_INT_EN;
+ WREG32(DC_HPD1_INT_CONTROL + hpd_offsets[type], dc_hpd_int_cntl);
break;
default:
break;
@@ -2796,7 +2693,7 @@ static int dce_v6_0_hpd_irq(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
struct amdgpu_iv_entry *entry)
{
- uint32_t disp_int, mask, int_control, tmp;
+ uint32_t disp_int, mask, tmp;
unsigned hpd;
if (entry->src_data >= adev->mode_info.num_hpd) {
@@ -2807,12 +2704,11 @@ static int dce_v6_0_hpd_irq(struct amdgpu_device *adev,
hpd = entry->src_data;
disp_int = RREG32(interrupt_status_offsets[hpd].reg);
mask = interrupt_status_offsets[hpd].hpd;
- int_control = hpd_int_control_offsets[hpd];
if (disp_int & mask) {
- tmp = RREG32(int_control);
+ tmp = RREG32(DC_HPD1_INT_CONTROL + hpd_offsets[hpd]);
tmp |= DC_HPD1_INT_CONTROL__DC_HPD1_INT_ACK_MASK;
- WREG32(int_control, tmp);
+ WREG32(DC_HPD1_INT_CONTROL + hpd_offsets[hpd], tmp);
schedule_work(&adev->hotplug_work);
DRM_INFO("IH: HPD%d\n", hpd + 1);
}
@@ -2833,7 +2729,7 @@ static int dce_v6_0_set_powergating_state(void *handle,
return 0;
}
-const struct amd_ip_funcs dce_v6_0_ip_funcs = {
+static const struct amd_ip_funcs dce_v6_0_ip_funcs = {
.name = "dce_v6_0",
.early_init = dce_v6_0_early_init,
.late_init = NULL,
@@ -3174,3 +3070,21 @@ static void dce_v6_0_set_irq_funcs(struct amdgpu_device *adev)
adev->hpd_irq.num_types = AMDGPU_HPD_LAST;
adev->hpd_irq.funcs = &dce_v6_0_hpd_irq_funcs;
}
+
+const struct amdgpu_ip_block_version dce_v6_0_ip_block =
+{
+ .type = AMD_IP_BLOCK_TYPE_DCE,
+ .major = 6,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &dce_v6_0_ip_funcs,
+};
+
+const struct amdgpu_ip_block_version dce_v6_4_ip_block =
+{
+ .type = AMD_IP_BLOCK_TYPE_DCE,
+ .major = 6,
+ .minor = 4,
+ .rev = 0,
+ .funcs = &dce_v6_0_ip_funcs,
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.h b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.h
index 6a5528105bb6..7b546b596de1 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.h
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.h
@@ -24,6 +24,9 @@
#ifndef __DCE_V6_0_H__
#define __DCE_V6_0_H__
-extern const struct amd_ip_funcs dce_v6_0_ip_funcs;
+extern const struct amdgpu_ip_block_version dce_v6_0_ip_block;
+extern const struct amdgpu_ip_block_version dce_v6_4_ip_block;
+
+void dce_v6_0_disable_dce(struct amdgpu_device *adev);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
index 5966166ec94c..4ae59914bc32 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
@@ -31,6 +31,7 @@
#include "atombios_encoders.h"
#include "amdgpu_pll.h"
#include "amdgpu_connectors.h"
+#include "dce_v8_0.h"
#include "dce/dce_8_0_d.h"
#include "dce/dce_8_0_sh_mask.h"
@@ -56,6 +57,16 @@ static const u32 crtc_offsets[6] =
CRTC5_REGISTER_OFFSET
};
+static const u32 hpd_offsets[] =
+{
+ HPD0_REGISTER_OFFSET,
+ HPD1_REGISTER_OFFSET,
+ HPD2_REGISTER_OFFSET,
+ HPD3_REGISTER_OFFSET,
+ HPD4_REGISTER_OFFSET,
+ HPD5_REGISTER_OFFSET
+};
+
static const uint32_t dig_offsets[] = {
CRTC0_REGISTER_OFFSET,
CRTC1_REGISTER_OFFSET,
@@ -104,15 +115,6 @@ static const struct {
.hpd = DISP_INTERRUPT_STATUS_CONTINUE5__DC_HPD6_INTERRUPT_MASK
} };
-static const uint32_t hpd_int_control_offsets[6] = {
- mmDC_HPD1_INT_CONTROL,
- mmDC_HPD2_INT_CONTROL,
- mmDC_HPD3_INT_CONTROL,
- mmDC_HPD4_INT_CONTROL,
- mmDC_HPD5_INT_CONTROL,
- mmDC_HPD6_INT_CONTROL,
-};
-
static u32 dce_v8_0_audio_endpt_rreg(struct amdgpu_device *adev,
u32 block_offset, u32 reg)
{
@@ -278,34 +280,12 @@ static bool dce_v8_0_hpd_sense(struct amdgpu_device *adev,
{
bool connected = false;
- switch (hpd) {
- case AMDGPU_HPD_1:
- if (RREG32(mmDC_HPD1_INT_STATUS) & DC_HPD1_INT_STATUS__DC_HPD1_SENSE_MASK)
- connected = true;
- break;
- case AMDGPU_HPD_2:
- if (RREG32(mmDC_HPD2_INT_STATUS) & DC_HPD2_INT_STATUS__DC_HPD2_SENSE_MASK)
- connected = true;
- break;
- case AMDGPU_HPD_3:
- if (RREG32(mmDC_HPD3_INT_STATUS) & DC_HPD3_INT_STATUS__DC_HPD3_SENSE_MASK)
- connected = true;
- break;
- case AMDGPU_HPD_4:
- if (RREG32(mmDC_HPD4_INT_STATUS) & DC_HPD4_INT_STATUS__DC_HPD4_SENSE_MASK)
- connected = true;
- break;
- case AMDGPU_HPD_5:
- if (RREG32(mmDC_HPD5_INT_STATUS) & DC_HPD5_INT_STATUS__DC_HPD5_SENSE_MASK)
- connected = true;
- break;
- case AMDGPU_HPD_6:
- if (RREG32(mmDC_HPD6_INT_STATUS) & DC_HPD6_INT_STATUS__DC_HPD6_SENSE_MASK)
- connected = true;
- break;
- default:
- break;
- }
+ if (hpd >= adev->mode_info.num_hpd)
+ return connected;
+
+ if (RREG32(mmDC_HPD1_INT_STATUS + hpd_offsets[hpd]) &
+ DC_HPD1_INT_STATUS__DC_HPD1_SENSE_MASK)
+ connected = true;
return connected;
}
@@ -324,58 +304,15 @@ static void dce_v8_0_hpd_set_polarity(struct amdgpu_device *adev,
u32 tmp;
bool connected = dce_v8_0_hpd_sense(adev, hpd);
- switch (hpd) {
- case AMDGPU_HPD_1:
- tmp = RREG32(mmDC_HPD1_INT_CONTROL);
- if (connected)
- tmp &= ~DC_HPD1_INT_CONTROL__DC_HPD1_INT_POLARITY_MASK;
- else
- tmp |= DC_HPD1_INT_CONTROL__DC_HPD1_INT_POLARITY_MASK;
- WREG32(mmDC_HPD1_INT_CONTROL, tmp);
- break;
- case AMDGPU_HPD_2:
- tmp = RREG32(mmDC_HPD2_INT_CONTROL);
- if (connected)
- tmp &= ~DC_HPD2_INT_CONTROL__DC_HPD2_INT_POLARITY_MASK;
- else
- tmp |= DC_HPD2_INT_CONTROL__DC_HPD2_INT_POLARITY_MASK;
- WREG32(mmDC_HPD2_INT_CONTROL, tmp);
- break;
- case AMDGPU_HPD_3:
- tmp = RREG32(mmDC_HPD3_INT_CONTROL);
- if (connected)
- tmp &= ~DC_HPD3_INT_CONTROL__DC_HPD3_INT_POLARITY_MASK;
- else
- tmp |= DC_HPD3_INT_CONTROL__DC_HPD3_INT_POLARITY_MASK;
- WREG32(mmDC_HPD3_INT_CONTROL, tmp);
- break;
- case AMDGPU_HPD_4:
- tmp = RREG32(mmDC_HPD4_INT_CONTROL);
- if (connected)
- tmp &= ~DC_HPD4_INT_CONTROL__DC_HPD4_INT_POLARITY_MASK;
- else
- tmp |= DC_HPD4_INT_CONTROL__DC_HPD4_INT_POLARITY_MASK;
- WREG32(mmDC_HPD4_INT_CONTROL, tmp);
- break;
- case AMDGPU_HPD_5:
- tmp = RREG32(mmDC_HPD5_INT_CONTROL);
- if (connected)
- tmp &= ~DC_HPD5_INT_CONTROL__DC_HPD5_INT_POLARITY_MASK;
- else
- tmp |= DC_HPD5_INT_CONTROL__DC_HPD5_INT_POLARITY_MASK;
- WREG32(mmDC_HPD5_INT_CONTROL, tmp);
- break;
- case AMDGPU_HPD_6:
- tmp = RREG32(mmDC_HPD6_INT_CONTROL);
- if (connected)
- tmp &= ~DC_HPD6_INT_CONTROL__DC_HPD6_INT_POLARITY_MASK;
- else
- tmp |= DC_HPD6_INT_CONTROL__DC_HPD6_INT_POLARITY_MASK;
- WREG32(mmDC_HPD6_INT_CONTROL, tmp);
- break;
- default:
- break;
- }
+ if (hpd >= adev->mode_info.num_hpd)
+ return;
+
+ tmp = RREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[hpd]);
+ if (connected)
+ tmp &= ~DC_HPD1_INT_CONTROL__DC_HPD1_INT_POLARITY_MASK;
+ else
+ tmp |= DC_HPD1_INT_CONTROL__DC_HPD1_INT_POLARITY_MASK;
+ WREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[hpd], tmp);
}
/**
@@ -390,35 +327,17 @@ static void dce_v8_0_hpd_init(struct amdgpu_device *adev)
{
struct drm_device *dev = adev->ddev;
struct drm_connector *connector;
- u32 tmp = (0x9c4 << DC_HPD1_CONTROL__DC_HPD1_CONNECTION_TIMER__SHIFT) |
- (0xfa << DC_HPD1_CONTROL__DC_HPD1_RX_INT_TIMER__SHIFT) |
- DC_HPD1_CONTROL__DC_HPD1_EN_MASK;
+ u32 tmp;
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
- switch (amdgpu_connector->hpd.hpd) {
- case AMDGPU_HPD_1:
- WREG32(mmDC_HPD1_CONTROL, tmp);
- break;
- case AMDGPU_HPD_2:
- WREG32(mmDC_HPD2_CONTROL, tmp);
- break;
- case AMDGPU_HPD_3:
- WREG32(mmDC_HPD3_CONTROL, tmp);
- break;
- case AMDGPU_HPD_4:
- WREG32(mmDC_HPD4_CONTROL, tmp);
- break;
- case AMDGPU_HPD_5:
- WREG32(mmDC_HPD5_CONTROL, tmp);
- break;
- case AMDGPU_HPD_6:
- WREG32(mmDC_HPD6_CONTROL, tmp);
- break;
- default:
- break;
- }
+ if (amdgpu_connector->hpd.hpd >= adev->mode_info.num_hpd)
+ continue;
+
+ tmp = RREG32(mmDC_HPD1_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd]);
+ tmp |= DC_HPD1_CONTROL__DC_HPD1_EN_MASK;
+ WREG32(mmDC_HPD1_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd], tmp);
if (connector->connector_type == DRM_MODE_CONNECTOR_eDP ||
connector->connector_type == DRM_MODE_CONNECTOR_LVDS) {
@@ -427,34 +346,9 @@ static void dce_v8_0_hpd_init(struct amdgpu_device *adev)
* https://bugzilla.redhat.com/show_bug.cgi?id=726143
* also avoid interrupt storms during dpms.
*/
- u32 dc_hpd_int_cntl_reg, dc_hpd_int_cntl;
-
- switch (amdgpu_connector->hpd.hpd) {
- case AMDGPU_HPD_1:
- dc_hpd_int_cntl_reg = mmDC_HPD1_INT_CONTROL;
- break;
- case AMDGPU_HPD_2:
- dc_hpd_int_cntl_reg = mmDC_HPD2_INT_CONTROL;
- break;
- case AMDGPU_HPD_3:
- dc_hpd_int_cntl_reg = mmDC_HPD3_INT_CONTROL;
- break;
- case AMDGPU_HPD_4:
- dc_hpd_int_cntl_reg = mmDC_HPD4_INT_CONTROL;
- break;
- case AMDGPU_HPD_5:
- dc_hpd_int_cntl_reg = mmDC_HPD5_INT_CONTROL;
- break;
- case AMDGPU_HPD_6:
- dc_hpd_int_cntl_reg = mmDC_HPD6_INT_CONTROL;
- break;
- default:
- continue;
- }
-
- dc_hpd_int_cntl = RREG32(dc_hpd_int_cntl_reg);
- dc_hpd_int_cntl &= ~DC_HPD1_INT_CONTROL__DC_HPD1_INT_EN_MASK;
- WREG32(dc_hpd_int_cntl_reg, dc_hpd_int_cntl);
+ tmp = RREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd]);
+ tmp &= ~DC_HPD1_INT_CONTROL__DC_HPD1_INT_EN_MASK;
+ WREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd], tmp);
continue;
}
@@ -475,32 +369,18 @@ static void dce_v8_0_hpd_fini(struct amdgpu_device *adev)
{
struct drm_device *dev = adev->ddev;
struct drm_connector *connector;
+ u32 tmp;
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
- switch (amdgpu_connector->hpd.hpd) {
- case AMDGPU_HPD_1:
- WREG32(mmDC_HPD1_CONTROL, 0);
- break;
- case AMDGPU_HPD_2:
- WREG32(mmDC_HPD2_CONTROL, 0);
- break;
- case AMDGPU_HPD_3:
- WREG32(mmDC_HPD3_CONTROL, 0);
- break;
- case AMDGPU_HPD_4:
- WREG32(mmDC_HPD4_CONTROL, 0);
- break;
- case AMDGPU_HPD_5:
- WREG32(mmDC_HPD5_CONTROL, 0);
- break;
- case AMDGPU_HPD_6:
- WREG32(mmDC_HPD6_CONTROL, 0);
- break;
- default:
- break;
- }
+ if (amdgpu_connector->hpd.hpd >= adev->mode_info.num_hpd)
+ continue;
+
+ tmp = RREG32(mmDC_HPD1_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd]);
+ tmp &= ~DC_HPD1_CONTROL__DC_HPD1_EN_MASK;
+ WREG32(mmDC_HPD1_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd], 0);
+
amdgpu_irq_put(adev, &adev->hpd_irq, amdgpu_connector->hpd.hpd);
}
}
@@ -2030,7 +1910,7 @@ static int dce_v8_0_crtc_do_set_base(struct drm_crtc *crtc,
u32 viewport_w, viewport_h;
int r;
bool bypass_lut = false;
- char *format_name;
+ struct drm_format_name_buf format_name;
/* no fb bound */
if (!atomic && !crtc->primary->fb) {
@@ -2135,9 +2015,8 @@ static int dce_v8_0_crtc_do_set_base(struct drm_crtc *crtc,
bypass_lut = true;
break;
default:
- format_name = drm_get_format_name(target_fb->pixel_format);
- DRM_ERROR("Unsupported screen format %s\n", format_name);
- kfree(format_name);
+ DRM_ERROR("Unsupported screen format %s\n",
+ drm_get_format_name(target_fb->pixel_format, &format_name));
return -EINVAL;
}
@@ -3033,10 +2912,6 @@ static int dce_v8_0_hw_fini(void *handle)
static int dce_v8_0_suspend(void *handle)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- amdgpu_atombios_scratch_regs_save(adev);
-
return dce_v8_0_hw_fini(handle);
}
@@ -3047,8 +2922,6 @@ static int dce_v8_0_resume(void *handle)
ret = dce_v8_0_hw_init(handle);
- amdgpu_atombios_scratch_regs_restore(adev);
-
/* turn on the BL */
if (adev->mode_info.bl_encoder) {
u8 bl_level = amdgpu_display_backlight_get_level(adev,
@@ -3204,42 +3077,23 @@ static int dce_v8_0_set_hpd_interrupt_state(struct amdgpu_device *adev,
unsigned type,
enum amdgpu_interrupt_state state)
{
- u32 dc_hpd_int_cntl_reg, dc_hpd_int_cntl;
+ u32 dc_hpd_int_cntl;
- switch (type) {
- case AMDGPU_HPD_1:
- dc_hpd_int_cntl_reg = mmDC_HPD1_INT_CONTROL;
- break;
- case AMDGPU_HPD_2:
- dc_hpd_int_cntl_reg = mmDC_HPD2_INT_CONTROL;
- break;
- case AMDGPU_HPD_3:
- dc_hpd_int_cntl_reg = mmDC_HPD3_INT_CONTROL;
- break;
- case AMDGPU_HPD_4:
- dc_hpd_int_cntl_reg = mmDC_HPD4_INT_CONTROL;
- break;
- case AMDGPU_HPD_5:
- dc_hpd_int_cntl_reg = mmDC_HPD5_INT_CONTROL;
- break;
- case AMDGPU_HPD_6:
- dc_hpd_int_cntl_reg = mmDC_HPD6_INT_CONTROL;
- break;
- default:
+ if (type >= adev->mode_info.num_hpd) {
DRM_DEBUG("invalid hdp %d\n", type);
return 0;
}
switch (state) {
case AMDGPU_IRQ_STATE_DISABLE:
- dc_hpd_int_cntl = RREG32(dc_hpd_int_cntl_reg);
+ dc_hpd_int_cntl = RREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[type]);
dc_hpd_int_cntl &= ~DC_HPD1_INT_CONTROL__DC_HPD1_INT_EN_MASK;
- WREG32(dc_hpd_int_cntl_reg, dc_hpd_int_cntl);
+ WREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[type], dc_hpd_int_cntl);
break;
case AMDGPU_IRQ_STATE_ENABLE:
- dc_hpd_int_cntl = RREG32(dc_hpd_int_cntl_reg);
+ dc_hpd_int_cntl = RREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[type]);
dc_hpd_int_cntl |= DC_HPD1_INT_CONTROL__DC_HPD1_INT_EN_MASK;
- WREG32(dc_hpd_int_cntl_reg, dc_hpd_int_cntl);
+ WREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[type], dc_hpd_int_cntl);
break;
default:
break;
@@ -3412,7 +3266,7 @@ static int dce_v8_0_hpd_irq(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
struct amdgpu_iv_entry *entry)
{
- uint32_t disp_int, mask, int_control, tmp;
+ uint32_t disp_int, mask, tmp;
unsigned hpd;
if (entry->src_data >= adev->mode_info.num_hpd) {
@@ -3423,12 +3277,11 @@ static int dce_v8_0_hpd_irq(struct amdgpu_device *adev,
hpd = entry->src_data;
disp_int = RREG32(interrupt_status_offsets[hpd].reg);
mask = interrupt_status_offsets[hpd].hpd;
- int_control = hpd_int_control_offsets[hpd];
if (disp_int & mask) {
- tmp = RREG32(int_control);
+ tmp = RREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[hpd]);
tmp |= DC_HPD1_INT_CONTROL__DC_HPD1_INT_ACK_MASK;
- WREG32(int_control, tmp);
+ WREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[hpd], tmp);
schedule_work(&adev->hotplug_work);
DRM_DEBUG("IH: HPD%d\n", hpd + 1);
}
@@ -3449,7 +3302,7 @@ static int dce_v8_0_set_powergating_state(void *handle,
return 0;
}
-const struct amd_ip_funcs dce_v8_0_ip_funcs = {
+static const struct amd_ip_funcs dce_v8_0_ip_funcs = {
.name = "dce_v8_0",
.early_init = dce_v8_0_early_init,
.late_init = NULL,
@@ -3779,3 +3632,48 @@ static void dce_v8_0_set_irq_funcs(struct amdgpu_device *adev)
adev->hpd_irq.num_types = AMDGPU_HPD_LAST;
adev->hpd_irq.funcs = &dce_v8_0_hpd_irq_funcs;
}
+
+const struct amdgpu_ip_block_version dce_v8_0_ip_block =
+{
+ .type = AMD_IP_BLOCK_TYPE_DCE,
+ .major = 8,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &dce_v8_0_ip_funcs,
+};
+
+const struct amdgpu_ip_block_version dce_v8_1_ip_block =
+{
+ .type = AMD_IP_BLOCK_TYPE_DCE,
+ .major = 8,
+ .minor = 1,
+ .rev = 0,
+ .funcs = &dce_v8_0_ip_funcs,
+};
+
+const struct amdgpu_ip_block_version dce_v8_2_ip_block =
+{
+ .type = AMD_IP_BLOCK_TYPE_DCE,
+ .major = 8,
+ .minor = 2,
+ .rev = 0,
+ .funcs = &dce_v8_0_ip_funcs,
+};
+
+const struct amdgpu_ip_block_version dce_v8_3_ip_block =
+{
+ .type = AMD_IP_BLOCK_TYPE_DCE,
+ .major = 8,
+ .minor = 3,
+ .rev = 0,
+ .funcs = &dce_v8_0_ip_funcs,
+};
+
+const struct amdgpu_ip_block_version dce_v8_5_ip_block =
+{
+ .type = AMD_IP_BLOCK_TYPE_DCE,
+ .major = 8,
+ .minor = 5,
+ .rev = 0,
+ .funcs = &dce_v8_0_ip_funcs,
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.h b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.h
index 7d0770c3a49b..13b802dd946a 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.h
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.h
@@ -24,7 +24,11 @@
#ifndef __DCE_V8_0_H__
#define __DCE_V8_0_H__
-extern const struct amd_ip_funcs dce_v8_0_ip_funcs;
+extern const struct amdgpu_ip_block_version dce_v8_0_ip_block;
+extern const struct amdgpu_ip_block_version dce_v8_1_ip_block;
+extern const struct amdgpu_ip_block_version dce_v8_2_ip_block;
+extern const struct amdgpu_ip_block_version dce_v8_3_ip_block;
+extern const struct amdgpu_ip_block_version dce_v8_5_ip_block;
void dce_v8_0_disable_dce(struct amdgpu_device *adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_virtual.c b/drivers/gpu/drm/amd/amdgpu/dce_virtual.c
index c2bd9f045532..81cbf0b05dff 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_virtual.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_virtual.c
@@ -27,6 +27,9 @@
#include "atom.h"
#include "amdgpu_pll.h"
#include "amdgpu_connectors.h"
+#ifdef CONFIG_DRM_AMDGPU_SI
+#include "dce_v6_0.h"
+#endif
#ifdef CONFIG_DRM_AMDGPU_CIK
#include "dce_v8_0.h"
#endif
@@ -34,11 +37,13 @@
#include "dce_v11_0.h"
#include "dce_virtual.h"
+#define DCE_VIRTUAL_VBLANK_PERIOD 16666666
+
+
static void dce_virtual_set_display_funcs(struct amdgpu_device *adev);
static void dce_virtual_set_irq_funcs(struct amdgpu_device *adev);
-static int dce_virtual_pageflip_irq(struct amdgpu_device *adev,
- struct amdgpu_irq_src *source,
- struct amdgpu_iv_entry *entry);
+static int dce_virtual_connector_encoder_init(struct amdgpu_device *adev,
+ int index);
/**
* dce_virtual_vblank_wait - vblank wait asic callback.
@@ -99,6 +104,14 @@ static void dce_virtual_stop_mc_access(struct amdgpu_device *adev,
struct amdgpu_mode_mc_save *save)
{
switch (adev->asic_type) {
+#ifdef CONFIG_DRM_AMDGPU_SI
+ case CHIP_TAHITI:
+ case CHIP_PITCAIRN:
+ case CHIP_VERDE:
+ case CHIP_OLAND:
+ dce_v6_0_disable_dce(adev);
+ break;
+#endif
#ifdef CONFIG_DRM_AMDGPU_CIK
case CHIP_BONAIRE:
case CHIP_HAWAII:
@@ -119,6 +132,9 @@ static void dce_virtual_stop_mc_access(struct amdgpu_device *adev,
dce_v11_0_disable_dce(adev);
break;
case CHIP_TOPAZ:
+#ifdef CONFIG_DRM_AMDGPU_SI
+ case CHIP_HAINAN:
+#endif
/* no DCE */
return;
default:
@@ -195,16 +211,15 @@ static void dce_virtual_crtc_dpms(struct drm_crtc *crtc, int mode)
switch (mode) {
case DRM_MODE_DPMS_ON:
amdgpu_crtc->enabled = true;
- /* Make sure VBLANK and PFLIP interrupts are still enabled */
+ /* Make sure VBLANK interrupts are still enabled */
type = amdgpu_crtc_idx_to_irq_type(adev, amdgpu_crtc->crtc_id);
amdgpu_irq_update(adev, &adev->crtc_irq, type);
- amdgpu_irq_update(adev, &adev->pageflip_irq, type);
- drm_vblank_on(dev, amdgpu_crtc->crtc_id);
+ drm_crtc_vblank_on(crtc);
break;
case DRM_MODE_DPMS_STANDBY:
case DRM_MODE_DPMS_SUSPEND:
case DRM_MODE_DPMS_OFF:
- drm_vblank_off(dev, amdgpu_crtc->crtc_id);
+ drm_crtc_vblank_off(crtc);
amdgpu_crtc->enabled = false;
break;
}
@@ -264,24 +279,6 @@ static bool dce_virtual_crtc_mode_fixup(struct drm_crtc *crtc,
const struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
- struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
- struct drm_device *dev = crtc->dev;
- struct drm_encoder *encoder;
-
- /* assign the encoder to the amdgpu crtc to avoid repeated lookups later */
- list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
- if (encoder->crtc == crtc) {
- amdgpu_crtc->encoder = encoder;
- amdgpu_crtc->connector = amdgpu_get_connector_for_encoder(encoder);
- break;
- }
- }
- if ((amdgpu_crtc->encoder == NULL) || (amdgpu_crtc->connector == NULL)) {
- amdgpu_crtc->encoder = NULL;
- amdgpu_crtc->connector = NULL;
- return false;
- }
-
return true;
}
@@ -341,6 +338,7 @@ static int dce_virtual_crtc_init(struct amdgpu_device *adev, int index)
amdgpu_crtc->pll_id = ATOM_PPLL_INVALID;
amdgpu_crtc->encoder = NULL;
amdgpu_crtc->connector = NULL;
+ amdgpu_crtc->vsync_timer_enabled = AMDGPU_IRQ_STATE_DISABLE;
drm_crtc_helper_add(&amdgpu_crtc->base, &dce_virtual_crtc_helper_funcs);
return 0;
@@ -350,48 +348,128 @@ static int dce_virtual_early_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- adev->mode_info.vsync_timer_enabled = AMDGPU_IRQ_STATE_DISABLE;
dce_virtual_set_display_funcs(adev);
dce_virtual_set_irq_funcs(adev);
- adev->mode_info.num_crtc = 1;
adev->mode_info.num_hpd = 1;
adev->mode_info.num_dig = 1;
return 0;
}
-static bool dce_virtual_get_connector_info(struct amdgpu_device *adev)
+static struct drm_encoder *
+dce_virtual_encoder(struct drm_connector *connector)
{
- struct amdgpu_i2c_bus_rec ddc_bus;
- struct amdgpu_router router;
- struct amdgpu_hpd hpd;
+ int enc_id = connector->encoder_ids[0];
+ struct drm_encoder *encoder;
+ int i;
- /* look up gpio for ddc, hpd */
- ddc_bus.valid = false;
- hpd.hpd = AMDGPU_HPD_NONE;
- /* needed for aux chan transactions */
- ddc_bus.hpd = hpd.hpd;
+ for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
+ if (connector->encoder_ids[i] == 0)
+ break;
- memset(&router, 0, sizeof(router));
- router.ddc_valid = false;
- router.cd_valid = false;
- amdgpu_display_add_connector(adev,
- 0,
- ATOM_DEVICE_CRT1_SUPPORT,
- DRM_MODE_CONNECTOR_VIRTUAL, &ddc_bus,
- CONNECTOR_OBJECT_ID_VIRTUAL,
- &hpd,
- &router);
+ encoder = drm_encoder_find(connector->dev, connector->encoder_ids[i]);
+ if (!encoder)
+ continue;
- amdgpu_display_add_encoder(adev, ENCODER_VIRTUAL_ENUM_VIRTUAL,
- ATOM_DEVICE_CRT1_SUPPORT,
- 0);
+ if (encoder->encoder_type == DRM_MODE_ENCODER_VIRTUAL)
+ return encoder;
+ }
- amdgpu_link_encoder_connector(adev->ddev);
+ /* pick the first one */
+ if (enc_id)
+ return drm_encoder_find(connector->dev, enc_id);
+ return NULL;
+}
+
+static int dce_virtual_get_modes(struct drm_connector *connector)
+{
+ struct drm_device *dev = connector->dev;
+ struct drm_display_mode *mode = NULL;
+ unsigned i;
+ static const struct mode_size {
+ int w;
+ int h;
+ } common_modes[17] = {
+ { 640, 480},
+ { 720, 480},
+ { 800, 600},
+ { 848, 480},
+ {1024, 768},
+ {1152, 768},
+ {1280, 720},
+ {1280, 800},
+ {1280, 854},
+ {1280, 960},
+ {1280, 1024},
+ {1440, 900},
+ {1400, 1050},
+ {1680, 1050},
+ {1600, 1200},
+ {1920, 1080},
+ {1920, 1200}
+ };
+
+ for (i = 0; i < 17; i++) {
+ mode = drm_cvt_mode(dev, common_modes[i].w, common_modes[i].h, 60, false, false, false);
+ drm_mode_probed_add(connector, mode);
+ }
- return true;
+ return 0;
+}
+
+static int dce_virtual_mode_valid(struct drm_connector *connector,
+ struct drm_display_mode *mode)
+{
+ return MODE_OK;
+}
+
+static int
+dce_virtual_dpms(struct drm_connector *connector, int mode)
+{
+ return 0;
}
+static enum drm_connector_status
+dce_virtual_detect(struct drm_connector *connector, bool force)
+{
+ return connector_status_connected;
+}
+
+static int
+dce_virtual_set_property(struct drm_connector *connector,
+ struct drm_property *property,
+ uint64_t val)
+{
+ return 0;
+}
+
+static void dce_virtual_destroy(struct drm_connector *connector)
+{
+ drm_connector_unregister(connector);
+ drm_connector_cleanup(connector);
+ kfree(connector);
+}
+
+static void dce_virtual_force(struct drm_connector *connector)
+{
+ return;
+}
+
+static const struct drm_connector_helper_funcs dce_virtual_connector_helper_funcs = {
+ .get_modes = dce_virtual_get_modes,
+ .mode_valid = dce_virtual_mode_valid,
+ .best_encoder = dce_virtual_encoder,
+};
+
+static const struct drm_connector_funcs dce_virtual_connector_funcs = {
+ .dpms = dce_virtual_dpms,
+ .detect = dce_virtual_detect,
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .set_property = dce_virtual_set_property,
+ .destroy = dce_virtual_destroy,
+ .force = dce_virtual_force,
+};
+
static int dce_virtual_sw_init(void *handle)
{
int r, i;
@@ -420,16 +498,16 @@ static int dce_virtual_sw_init(void *handle)
adev->ddev->mode_config.max_width = 16384;
adev->ddev->mode_config.max_height = 16384;
- /* allocate crtcs */
+ /* allocate crtcs, encoders, connectors */
for (i = 0; i < adev->mode_info.num_crtc; i++) {
r = dce_virtual_crtc_init(adev, i);
if (r)
return r;
+ r = dce_virtual_connector_encoder_init(adev, i);
+ if (r)
+ return r;
}
- dce_virtual_get_connector_info(adev);
- amdgpu_print_display_setup(adev->ddev);
-
drm_kms_helper_poll_init(adev->ddev);
adev->mode_info.mode_config_initialized = true;
@@ -496,7 +574,7 @@ static int dce_virtual_set_powergating_state(void *handle,
return 0;
}
-const struct amd_ip_funcs dce_virtual_ip_funcs = {
+static const struct amd_ip_funcs dce_virtual_ip_funcs = {
.name = "dce_virtual",
.early_init = dce_virtual_early_init,
.late_init = NULL,
@@ -526,8 +604,8 @@ static void dce_virtual_encoder_commit(struct drm_encoder *encoder)
static void
dce_virtual_encoder_mode_set(struct drm_encoder *encoder,
- struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
{
return;
}
@@ -547,10 +625,6 @@ static bool dce_virtual_encoder_mode_fixup(struct drm_encoder *encoder,
const struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
-
- /* set the active encoder to connector routing */
- amdgpu_encoder_set_active_device(encoder);
-
return true;
}
@@ -576,45 +650,40 @@ static const struct drm_encoder_funcs dce_virtual_encoder_funcs = {
.destroy = dce_virtual_encoder_destroy,
};
-static void dce_virtual_encoder_add(struct amdgpu_device *adev,
- uint32_t encoder_enum,
- uint32_t supported_device,
- u16 caps)
+static int dce_virtual_connector_encoder_init(struct amdgpu_device *adev,
+ int index)
{
- struct drm_device *dev = adev->ddev;
struct drm_encoder *encoder;
- struct amdgpu_encoder *amdgpu_encoder;
-
- /* see if we already added it */
- list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
- amdgpu_encoder = to_amdgpu_encoder(encoder);
- if (amdgpu_encoder->encoder_enum == encoder_enum) {
- amdgpu_encoder->devices |= supported_device;
- return;
- }
+ struct drm_connector *connector;
+
+ /* add a new encoder */
+ encoder = kzalloc(sizeof(struct drm_encoder), GFP_KERNEL);
+ if (!encoder)
+ return -ENOMEM;
+ encoder->possible_crtcs = 1 << index;
+ drm_encoder_init(adev->ddev, encoder, &dce_virtual_encoder_funcs,
+ DRM_MODE_ENCODER_VIRTUAL, NULL);
+ drm_encoder_helper_add(encoder, &dce_virtual_encoder_helper_funcs);
+ connector = kzalloc(sizeof(struct drm_connector), GFP_KERNEL);
+ if (!connector) {
+ kfree(encoder);
+ return -ENOMEM;
}
- /* add a new one */
- amdgpu_encoder = kzalloc(sizeof(struct amdgpu_encoder), GFP_KERNEL);
- if (!amdgpu_encoder)
- return;
+ /* add a new connector */
+ drm_connector_init(adev->ddev, connector, &dce_virtual_connector_funcs,
+ DRM_MODE_CONNECTOR_VIRTUAL);
+ drm_connector_helper_add(connector, &dce_virtual_connector_helper_funcs);
+ connector->display_info.subpixel_order = SubPixelHorizontalRGB;
+ connector->interlace_allowed = false;
+ connector->doublescan_allowed = false;
+ drm_connector_register(connector);
- encoder = &amdgpu_encoder->base;
- encoder->possible_crtcs = 0x1;
- amdgpu_encoder->enc_priv = NULL;
- amdgpu_encoder->encoder_enum = encoder_enum;
- amdgpu_encoder->encoder_id = (encoder_enum & OBJECT_ID_MASK) >> OBJECT_ID_SHIFT;
- amdgpu_encoder->devices = supported_device;
- amdgpu_encoder->rmx_type = RMX_OFF;
- amdgpu_encoder->underscan_type = UNDERSCAN_OFF;
- amdgpu_encoder->is_ext_encoder = false;
- amdgpu_encoder->caps = caps;
-
- drm_encoder_init(dev, encoder, &dce_virtual_encoder_funcs,
- DRM_MODE_ENCODER_VIRTUAL, NULL);
- drm_encoder_helper_add(encoder, &dce_virtual_encoder_helper_funcs);
- DRM_INFO("[FM]encoder: %d is VIRTUAL\n", amdgpu_encoder->encoder_id);
+ /* link them */
+ drm_mode_connector_attach_encoder(connector, encoder);
+
+ return 0;
}
static const struct amdgpu_display_funcs dce_virtual_display_funcs = {
@@ -630,8 +699,8 @@ static const struct amdgpu_display_funcs dce_virtual_display_funcs = {
.hpd_get_gpio_reg = &dce_virtual_hpd_get_gpio_reg,
.page_flip = &dce_virtual_page_flip,
.page_flip_get_scanoutpos = &dce_virtual_crtc_get_scanoutpos,
- .add_encoder = &dce_virtual_encoder_add,
- .add_connector = &amdgpu_connector_add,
+ .add_encoder = NULL,
+ .add_connector = NULL,
.stop_mc_access = &dce_virtual_stop_mc_access,
.resume_mc_access = &dce_virtual_resume_mc_access,
};
@@ -642,107 +711,13 @@ static void dce_virtual_set_display_funcs(struct amdgpu_device *adev)
adev->mode_info.funcs = &dce_virtual_display_funcs;
}
-static enum hrtimer_restart dce_virtual_vblank_timer_handle(struct hrtimer *vblank_timer)
-{
- struct amdgpu_mode_info *mode_info = container_of(vblank_timer, struct amdgpu_mode_info ,vblank_timer);
- struct amdgpu_device *adev = container_of(mode_info, struct amdgpu_device ,mode_info);
- unsigned crtc = 0;
- drm_handle_vblank(adev->ddev, crtc);
- dce_virtual_pageflip_irq(adev, NULL, NULL);
- hrtimer_start(vblank_timer, ktime_set(0, DCE_VIRTUAL_VBLANK_PERIOD), HRTIMER_MODE_REL);
- return HRTIMER_NORESTART;
-}
-
-static void dce_virtual_set_crtc_vblank_interrupt_state(struct amdgpu_device *adev,
- int crtc,
- enum amdgpu_interrupt_state state)
-{
- if (crtc >= adev->mode_info.num_crtc) {
- DRM_DEBUG("invalid crtc %d\n", crtc);
- return;
- }
-
- if (state && !adev->mode_info.vsync_timer_enabled) {
- DRM_DEBUG("Enable software vsync timer\n");
- hrtimer_init(&adev->mode_info.vblank_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- hrtimer_set_expires(&adev->mode_info.vblank_timer, ktime_set(0, DCE_VIRTUAL_VBLANK_PERIOD));
- adev->mode_info.vblank_timer.function = dce_virtual_vblank_timer_handle;
- hrtimer_start(&adev->mode_info.vblank_timer, ktime_set(0, DCE_VIRTUAL_VBLANK_PERIOD), HRTIMER_MODE_REL);
- } else if (!state && adev->mode_info.vsync_timer_enabled) {
- DRM_DEBUG("Disable software vsync timer\n");
- hrtimer_cancel(&adev->mode_info.vblank_timer);
- }
-
- adev->mode_info.vsync_timer_enabled = state;
- DRM_DEBUG("[FM]set crtc %d vblank interrupt state %d\n", crtc, state);
-}
-
-
-static int dce_virtual_set_crtc_irq_state(struct amdgpu_device *adev,
- struct amdgpu_irq_src *source,
- unsigned type,
- enum amdgpu_interrupt_state state)
-{
- switch (type) {
- case AMDGPU_CRTC_IRQ_VBLANK1:
- dce_virtual_set_crtc_vblank_interrupt_state(adev, 0, state);
- break;
- default:
- break;
- }
- return 0;
-}
-
-static void dce_virtual_crtc_vblank_int_ack(struct amdgpu_device *adev,
- int crtc)
-{
- if (crtc >= adev->mode_info.num_crtc) {
- DRM_DEBUG("invalid crtc %d\n", crtc);
- return;
- }
-}
-
-static int dce_virtual_crtc_irq(struct amdgpu_device *adev,
- struct amdgpu_irq_src *source,
- struct amdgpu_iv_entry *entry)
-{
- unsigned crtc = 0;
- unsigned irq_type = AMDGPU_CRTC_IRQ_VBLANK1;
-
- dce_virtual_crtc_vblank_int_ack(adev, crtc);
-
- if (amdgpu_irq_enabled(adev, source, irq_type)) {
- drm_handle_vblank(adev->ddev, crtc);
- }
- dce_virtual_pageflip_irq(adev, NULL, NULL);
- DRM_DEBUG("IH: D%d vblank\n", crtc + 1);
- return 0;
-}
-
-static int dce_virtual_set_pageflip_irq_state(struct amdgpu_device *adev,
- struct amdgpu_irq_src *src,
- unsigned type,
- enum amdgpu_interrupt_state state)
-{
- if (type >= adev->mode_info.num_crtc) {
- DRM_ERROR("invalid pageflip crtc %d\n", type);
- return -EINVAL;
- }
- DRM_DEBUG("[FM]set pageflip irq type %d state %d\n", type, state);
-
- return 0;
-}
-
-static int dce_virtual_pageflip_irq(struct amdgpu_device *adev,
- struct amdgpu_irq_src *source,
- struct amdgpu_iv_entry *entry)
+static int dce_virtual_pageflip(struct amdgpu_device *adev,
+ unsigned crtc_id)
{
unsigned long flags;
- unsigned crtc_id = 0;
struct amdgpu_crtc *amdgpu_crtc;
struct amdgpu_flip_work *works;
- crtc_id = 0;
amdgpu_crtc = adev->mode_info.crtcs[crtc_id];
if (crtc_id >= adev->mode_info.num_crtc) {
@@ -781,22 +756,79 @@ static int dce_virtual_pageflip_irq(struct amdgpu_device *adev,
return 0;
}
+static enum hrtimer_restart dce_virtual_vblank_timer_handle(struct hrtimer *vblank_timer)
+{
+ struct amdgpu_crtc *amdgpu_crtc = container_of(vblank_timer,
+ struct amdgpu_crtc, vblank_timer);
+ struct drm_device *ddev = amdgpu_crtc->base.dev;
+ struct amdgpu_device *adev = ddev->dev_private;
+
+ drm_handle_vblank(ddev, amdgpu_crtc->crtc_id);
+ dce_virtual_pageflip(adev, amdgpu_crtc->crtc_id);
+ hrtimer_start(vblank_timer, ktime_set(0, DCE_VIRTUAL_VBLANK_PERIOD),
+ HRTIMER_MODE_REL);
+
+ return HRTIMER_NORESTART;
+}
+
+static void dce_virtual_set_crtc_vblank_interrupt_state(struct amdgpu_device *adev,
+ int crtc,
+ enum amdgpu_interrupt_state state)
+{
+ if (crtc >= adev->mode_info.num_crtc) {
+ DRM_DEBUG("invalid crtc %d\n", crtc);
+ return;
+ }
+
+ if (state && !adev->mode_info.crtcs[crtc]->vsync_timer_enabled) {
+ DRM_DEBUG("Enable software vsync timer\n");
+ hrtimer_init(&adev->mode_info.crtcs[crtc]->vblank_timer,
+ CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+ hrtimer_set_expires(&adev->mode_info.crtcs[crtc]->vblank_timer,
+ ktime_set(0, DCE_VIRTUAL_VBLANK_PERIOD));
+ adev->mode_info.crtcs[crtc]->vblank_timer.function =
+ dce_virtual_vblank_timer_handle;
+ hrtimer_start(&adev->mode_info.crtcs[crtc]->vblank_timer,
+ ktime_set(0, DCE_VIRTUAL_VBLANK_PERIOD), HRTIMER_MODE_REL);
+ } else if (!state && adev->mode_info.crtcs[crtc]->vsync_timer_enabled) {
+ DRM_DEBUG("Disable software vsync timer\n");
+ hrtimer_cancel(&adev->mode_info.crtcs[crtc]->vblank_timer);
+ }
+
+ adev->mode_info.crtcs[crtc]->vsync_timer_enabled = state;
+ DRM_DEBUG("[FM]set crtc %d vblank interrupt state %d\n", crtc, state);
+}
+
+
+static int dce_virtual_set_crtc_irq_state(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *source,
+ unsigned type,
+ enum amdgpu_interrupt_state state)
+{
+ if (type > AMDGPU_CRTC_IRQ_VBLANK6)
+ return -EINVAL;
+
+ dce_virtual_set_crtc_vblank_interrupt_state(adev, type, state);
+
+ return 0;
+}
+
static const struct amdgpu_irq_src_funcs dce_virtual_crtc_irq_funcs = {
.set = dce_virtual_set_crtc_irq_state,
- .process = dce_virtual_crtc_irq,
-};
-
-static const struct amdgpu_irq_src_funcs dce_virtual_pageflip_irq_funcs = {
- .set = dce_virtual_set_pageflip_irq_state,
- .process = dce_virtual_pageflip_irq,
+ .process = NULL,
};
static void dce_virtual_set_irq_funcs(struct amdgpu_device *adev)
{
adev->crtc_irq.num_types = AMDGPU_CRTC_IRQ_LAST;
adev->crtc_irq.funcs = &dce_virtual_crtc_irq_funcs;
-
- adev->pageflip_irq.num_types = AMDGPU_PAGEFLIP_IRQ_LAST;
- adev->pageflip_irq.funcs = &dce_virtual_pageflip_irq_funcs;
}
+const struct amdgpu_ip_block_version dce_virtual_ip_block =
+{
+ .type = AMD_IP_BLOCK_TYPE_DCE,
+ .major = 1,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &dce_virtual_ip_funcs,
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_virtual.h b/drivers/gpu/drm/amd/amdgpu/dce_virtual.h
index e239243f6ebc..ed422012c8c6 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_virtual.h
+++ b/drivers/gpu/drm/amd/amdgpu/dce_virtual.h
@@ -24,8 +24,7 @@
#ifndef __DCE_VIRTUAL_H__
#define __DCE_VIRTUAL_H__
-extern const struct amd_ip_funcs dce_virtual_ip_funcs;
-#define DCE_VIRTUAL_VBLANK_PERIOD 16666666
+extern const struct amdgpu_ip_block_version dce_virtual_ip_block;
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c
index 40abb6b81c09..21c086e02e7b 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c
@@ -1522,7 +1522,7 @@ static int gfx_v6_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
{
struct amdgpu_device *adev = ring->adev;
struct amdgpu_ib ib;
- struct fence *f = NULL;
+ struct dma_fence *f = NULL;
uint32_t scratch;
uint32_t tmp = 0;
long r;
@@ -1548,7 +1548,7 @@ static int gfx_v6_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
if (r)
goto err2;
- r = fence_wait_timeout(f, false, timeout);
+ r = dma_fence_wait_timeout(f, false, timeout);
if (r == 0) {
DRM_ERROR("amdgpu: IB test timed out\n");
r = -ETIMEDOUT;
@@ -1569,7 +1569,7 @@ static int gfx_v6_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
err2:
amdgpu_ib_free(adev, &ib, NULL);
- fence_put(f);
+ dma_fence_put(f);
err1:
amdgpu_gfx_scratch_free(adev, scratch);
return r;
@@ -1940,7 +1940,7 @@ static int gfx_v6_0_cp_resume(struct amdgpu_device *adev)
static void gfx_v6_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
{
- int usepfp = (ring->type == AMDGPU_RING_TYPE_GFX);
+ int usepfp = (ring->funcs->type == AMDGPU_RING_TYPE_GFX);
uint32_t seq = ring->fence_drv.sync_seq;
uint64_t addr = ring->fence_drv.gpu_addr;
@@ -1966,7 +1966,7 @@ static void gfx_v6_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
static void gfx_v6_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
unsigned vm_id, uint64_t pd_addr)
{
- int usepfp = (ring->type == AMDGPU_RING_TYPE_GFX);
+ int usepfp = (ring->funcs->type == AMDGPU_RING_TYPE_GFX);
/* write new base address */
amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
@@ -2814,33 +2814,6 @@ static void gfx_v6_ring_emit_cntxcntl(struct amdgpu_ring *ring, uint32_t flags)
amdgpu_ring_write(ring, 0);
}
-static unsigned gfx_v6_0_ring_get_emit_ib_size(struct amdgpu_ring *ring)
-{
- return
- 6; /* gfx_v6_0_ring_emit_ib */
-}
-
-static unsigned gfx_v6_0_ring_get_dma_frame_size_gfx(struct amdgpu_ring *ring)
-{
- return
- 5 + /* gfx_v6_0_ring_emit_hdp_flush */
- 5 + /* gfx_v6_0_ring_emit_hdp_invalidate */
- 14 + 14 + 14 + /* gfx_v6_0_ring_emit_fence x3 for user fence, vm fence */
- 7 + 4 + /* gfx_v6_0_ring_emit_pipeline_sync */
- 17 + 6 + /* gfx_v6_0_ring_emit_vm_flush */
- 3; /* gfx_v6_ring_emit_cntxcntl */
-}
-
-static unsigned gfx_v6_0_ring_get_dma_frame_size_compute(struct amdgpu_ring *ring)
-{
- return
- 5 + /* gfx_v6_0_ring_emit_hdp_flush */
- 5 + /* gfx_v6_0_ring_emit_hdp_invalidate */
- 7 + /* gfx_v6_0_ring_emit_pipeline_sync */
- 17 + /* gfx_v6_0_ring_emit_vm_flush */
- 14 + 14 + 14; /* gfx_v6_0_ring_emit_fence x3 for user fence, vm fence */
-}
-
static const struct amdgpu_gfx_funcs gfx_v6_0_gfx_funcs = {
.get_gpu_clock_counter = &gfx_v6_0_get_gpu_clock_counter,
.select_se_sh = &gfx_v6_0_select_se_sh,
@@ -2896,9 +2869,7 @@ static int gfx_v6_0_sw_init(void *handle)
ring->ring_obj = NULL;
sprintf(ring->name, "gfx");
r = amdgpu_ring_init(adev, ring, 1024,
- 0x80000000, 0xf,
- &adev->gfx.eop_irq, AMDGPU_CP_IRQ_GFX_EOP,
- AMDGPU_RING_TYPE_GFX);
+ &adev->gfx.eop_irq, AMDGPU_CP_IRQ_GFX_EOP);
if (r)
return r;
}
@@ -2920,9 +2891,7 @@ static int gfx_v6_0_sw_init(void *handle)
sprintf(ring->name, "comp %d.%d.%d", ring->me, ring->pipe, ring->queue);
irq_type = AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP + ring->pipe;
r = amdgpu_ring_init(adev, ring, 1024,
- 0x80000000, 0xf,
- &adev->gfx.eop_irq, irq_type,
- AMDGPU_RING_TYPE_COMPUTE);
+ &adev->gfx.eop_irq, irq_type);
if (r)
return r;
}
@@ -3237,7 +3206,7 @@ static int gfx_v6_0_set_powergating_state(void *handle,
return 0;
}
-const struct amd_ip_funcs gfx_v6_0_ip_funcs = {
+static const struct amd_ip_funcs gfx_v6_0_ip_funcs = {
.name = "gfx_v6_0",
.early_init = gfx_v6_0_early_init,
.late_init = NULL,
@@ -3255,10 +3224,20 @@ const struct amd_ip_funcs gfx_v6_0_ip_funcs = {
};
static const struct amdgpu_ring_funcs gfx_v6_0_ring_funcs_gfx = {
+ .type = AMDGPU_RING_TYPE_GFX,
+ .align_mask = 0xff,
+ .nop = 0x80000000,
.get_rptr = gfx_v6_0_ring_get_rptr,
.get_wptr = gfx_v6_0_ring_get_wptr,
.set_wptr = gfx_v6_0_ring_set_wptr_gfx,
- .parse_cs = NULL,
+ .emit_frame_size =
+ 5 + /* gfx_v6_0_ring_emit_hdp_flush */
+ 5 + /* gfx_v6_0_ring_emit_hdp_invalidate */
+ 14 + 14 + 14 + /* gfx_v6_0_ring_emit_fence x3 for user fence, vm fence */
+ 7 + 4 + /* gfx_v6_0_ring_emit_pipeline_sync */
+ 17 + 6 + /* gfx_v6_0_ring_emit_vm_flush */
+ 3, /* gfx_v6_ring_emit_cntxcntl */
+ .emit_ib_size = 6, /* gfx_v6_0_ring_emit_ib */
.emit_ib = gfx_v6_0_ring_emit_ib,
.emit_fence = gfx_v6_0_ring_emit_fence,
.emit_pipeline_sync = gfx_v6_0_ring_emit_pipeline_sync,
@@ -3269,15 +3248,22 @@ static const struct amdgpu_ring_funcs gfx_v6_0_ring_funcs_gfx = {
.test_ib = gfx_v6_0_ring_test_ib,
.insert_nop = amdgpu_ring_insert_nop,
.emit_cntxcntl = gfx_v6_ring_emit_cntxcntl,
- .get_emit_ib_size = gfx_v6_0_ring_get_emit_ib_size,
- .get_dma_frame_size = gfx_v6_0_ring_get_dma_frame_size_gfx,
};
static const struct amdgpu_ring_funcs gfx_v6_0_ring_funcs_compute = {
+ .type = AMDGPU_RING_TYPE_COMPUTE,
+ .align_mask = 0xff,
+ .nop = 0x80000000,
.get_rptr = gfx_v6_0_ring_get_rptr,
.get_wptr = gfx_v6_0_ring_get_wptr,
.set_wptr = gfx_v6_0_ring_set_wptr_compute,
- .parse_cs = NULL,
+ .emit_frame_size =
+ 5 + /* gfx_v6_0_ring_emit_hdp_flush */
+ 5 + /* gfx_v6_0_ring_emit_hdp_invalidate */
+ 7 + /* gfx_v6_0_ring_emit_pipeline_sync */
+ 17 + /* gfx_v6_0_ring_emit_vm_flush */
+ 14 + 14 + 14, /* gfx_v6_0_ring_emit_fence x3 for user fence, vm fence */
+ .emit_ib_size = 6, /* gfx_v6_0_ring_emit_ib */
.emit_ib = gfx_v6_0_ring_emit_ib,
.emit_fence = gfx_v6_0_ring_emit_fence,
.emit_pipeline_sync = gfx_v6_0_ring_emit_pipeline_sync,
@@ -3287,8 +3273,6 @@ static const struct amdgpu_ring_funcs gfx_v6_0_ring_funcs_compute = {
.test_ring = gfx_v6_0_ring_test_ring,
.test_ib = gfx_v6_0_ring_test_ib,
.insert_nop = amdgpu_ring_insert_nop,
- .get_emit_ib_size = gfx_v6_0_ring_get_emit_ib_size,
- .get_dma_frame_size = gfx_v6_0_ring_get_dma_frame_size_compute,
};
static void gfx_v6_0_set_ring_funcs(struct amdgpu_device *adev)
@@ -3360,3 +3344,12 @@ static void gfx_v6_0_get_cu_info(struct amdgpu_device *adev)
cu_info->number = active_cu_number;
cu_info->ao_cu_mask = ao_cu_mask;
}
+
+const struct amdgpu_ip_block_version gfx_v6_0_ip_block =
+{
+ .type = AMD_IP_BLOCK_TYPE_GFX,
+ .major = 6,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &gfx_v6_0_ip_funcs,
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.h b/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.h
index b9657e72b248..ced6fc42f688 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.h
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.h
@@ -24,6 +24,6 @@
#ifndef __GFX_V6_0_H__
#define __GFX_V6_0_H__
-extern const struct amd_ip_funcs gfx_v6_0_ip_funcs;
+extern const struct amdgpu_ip_block_version gfx_v6_0_ip_block;
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
index 71116da9e782..5b631fd1a879 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
@@ -2077,9 +2077,9 @@ static int gfx_v7_0_ring_test_ring(struct amdgpu_ring *ring)
static void gfx_v7_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
{
u32 ref_and_mask;
- int usepfp = ring->type == AMDGPU_RING_TYPE_COMPUTE ? 0 : 1;
+ int usepfp = ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE ? 0 : 1;
- if (ring->type == AMDGPU_RING_TYPE_COMPUTE) {
+ if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE) {
switch (ring->me) {
case 1:
ref_and_mask = GPU_HDP_FLUSH_DONE__CP2_MASK << ring->pipe;
@@ -2286,7 +2286,7 @@ static int gfx_v7_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
{
struct amdgpu_device *adev = ring->adev;
struct amdgpu_ib ib;
- struct fence *f = NULL;
+ struct dma_fence *f = NULL;
uint32_t scratch;
uint32_t tmp = 0;
long r;
@@ -2312,7 +2312,7 @@ static int gfx_v7_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
if (r)
goto err2;
- r = fence_wait_timeout(f, false, timeout);
+ r = dma_fence_wait_timeout(f, false, timeout);
if (r == 0) {
DRM_ERROR("amdgpu: IB test timed out\n");
r = -ETIMEDOUT;
@@ -2333,7 +2333,7 @@ static int gfx_v7_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
err2:
amdgpu_ib_free(adev, &ib, NULL);
- fence_put(f);
+ dma_fence_put(f);
err1:
amdgpu_gfx_scratch_free(adev, scratch);
return r;
@@ -3222,7 +3222,7 @@ static int gfx_v7_0_cp_resume(struct amdgpu_device *adev)
*/
static void gfx_v7_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
{
- int usepfp = (ring->type == AMDGPU_RING_TYPE_GFX);
+ int usepfp = (ring->funcs->type == AMDGPU_RING_TYPE_GFX);
uint32_t seq = ring->fence_drv.sync_seq;
uint64_t addr = ring->fence_drv.gpu_addr;
@@ -3262,7 +3262,7 @@ static void gfx_v7_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
static void gfx_v7_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
unsigned vm_id, uint64_t pd_addr)
{
- int usepfp = (ring->type == AMDGPU_RING_TYPE_GFX);
+ int usepfp = (ring->funcs->type == AMDGPU_RING_TYPE_GFX);
amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(usepfp) |
@@ -3391,7 +3391,8 @@ static int gfx_v7_0_rlc_init(struct amdgpu_device *adev)
if (adev->gfx.rlc.save_restore_obj == NULL) {
r = amdgpu_bo_create(adev, dws * 4, PAGE_SIZE, true,
AMDGPU_GEM_DOMAIN_VRAM,
- AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
+ AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
+ AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS,
NULL, NULL,
&adev->gfx.rlc.save_restore_obj);
if (r) {
@@ -3435,7 +3436,8 @@ static int gfx_v7_0_rlc_init(struct amdgpu_device *adev)
if (adev->gfx.rlc.clear_state_obj == NULL) {
r = amdgpu_bo_create(adev, dws * 4, PAGE_SIZE, true,
AMDGPU_GEM_DOMAIN_VRAM,
- AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
+ AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
+ AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS,
NULL, NULL,
&adev->gfx.rlc.clear_state_obj);
if (r) {
@@ -3475,7 +3477,8 @@ static int gfx_v7_0_rlc_init(struct amdgpu_device *adev)
if (adev->gfx.rlc.cp_table_obj == NULL) {
r = amdgpu_bo_create(adev, adev->gfx.rlc.cp_table_size, PAGE_SIZE, true,
AMDGPU_GEM_DOMAIN_VRAM,
- AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
+ AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
+ AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS,
NULL, NULL,
&adev->gfx.rlc.cp_table_obj);
if (r) {
@@ -4354,44 +4357,40 @@ static void gfx_v7_0_ring_emit_gds_switch(struct amdgpu_ring *ring,
amdgpu_ring_write(ring, (1 << (oa_size + oa_base)) - (1 << oa_base));
}
-static unsigned gfx_v7_0_ring_get_emit_ib_size_gfx(struct amdgpu_ring *ring)
+static uint32_t wave_read_ind(struct amdgpu_device *adev, uint32_t simd, uint32_t wave, uint32_t address)
{
- return
- 4; /* gfx_v7_0_ring_emit_ib_gfx */
+ WREG32(mmSQ_IND_INDEX, (wave & 0xF) | ((simd & 0x3) << 4) | (address << 16) | (1 << 13));
+ return RREG32(mmSQ_IND_DATA);
}
-static unsigned gfx_v7_0_ring_get_dma_frame_size_gfx(struct amdgpu_ring *ring)
+static void gfx_v7_0_read_wave_data(struct amdgpu_device *adev, uint32_t simd, uint32_t wave, uint32_t *dst, int *no_fields)
{
- return
- 20 + /* gfx_v7_0_ring_emit_gds_switch */
- 7 + /* gfx_v7_0_ring_emit_hdp_flush */
- 5 + /* gfx_v7_0_ring_emit_hdp_invalidate */
- 12 + 12 + 12 + /* gfx_v7_0_ring_emit_fence_gfx x3 for user fence, vm fence */
- 7 + 4 + /* gfx_v7_0_ring_emit_pipeline_sync */
- 17 + 6 + /* gfx_v7_0_ring_emit_vm_flush */
- 3; /* gfx_v7_ring_emit_cntxcntl */
-}
-
-static unsigned gfx_v7_0_ring_get_emit_ib_size_compute(struct amdgpu_ring *ring)
-{
- return
- 4; /* gfx_v7_0_ring_emit_ib_compute */
-}
-
-static unsigned gfx_v7_0_ring_get_dma_frame_size_compute(struct amdgpu_ring *ring)
-{
- return
- 20 + /* gfx_v7_0_ring_emit_gds_switch */
- 7 + /* gfx_v7_0_ring_emit_hdp_flush */
- 5 + /* gfx_v7_0_ring_emit_hdp_invalidate */
- 7 + /* gfx_v7_0_ring_emit_pipeline_sync */
- 17 + /* gfx_v7_0_ring_emit_vm_flush */
- 7 + 7 + 7; /* gfx_v7_0_ring_emit_fence_compute x3 for user fence, vm fence */
+ /* type 0 wave data */
+ dst[(*no_fields)++] = 0;
+ dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_STATUS);
+ dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_PC_LO);
+ dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_PC_HI);
+ dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_EXEC_LO);
+ dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_EXEC_HI);
+ dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_HW_ID);
+ dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_INST_DW0);
+ dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_INST_DW1);
+ dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_GPR_ALLOC);
+ dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_LDS_ALLOC);
+ dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_TRAPSTS);
+ dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_IB_STS);
+ dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_TBA_LO);
+ dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_TBA_HI);
+ dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_TMA_LO);
+ dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_TMA_HI);
+ dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_IB_DBG0);
+ dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_M0);
}
static const struct amdgpu_gfx_funcs gfx_v7_0_gfx_funcs = {
.get_gpu_clock_counter = &gfx_v7_0_get_gpu_clock_counter,
.select_se_sh = &gfx_v7_0_select_se_sh,
+ .read_wave_data = &gfx_v7_0_read_wave_data,
};
static const struct amdgpu_rlc_funcs gfx_v7_0_rlc_funcs = {
@@ -4643,9 +4642,7 @@ static int gfx_v7_0_sw_init(void *handle)
ring->ring_obj = NULL;
sprintf(ring->name, "gfx");
r = amdgpu_ring_init(adev, ring, 1024,
- PACKET3(PACKET3_NOP, 0x3FFF), 0xf,
- &adev->gfx.eop_irq, AMDGPU_CP_IRQ_GFX_EOP,
- AMDGPU_RING_TYPE_GFX);
+ &adev->gfx.eop_irq, AMDGPU_CP_IRQ_GFX_EOP);
if (r)
return r;
}
@@ -4670,9 +4667,7 @@ static int gfx_v7_0_sw_init(void *handle)
irq_type = AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP + ring->pipe;
/* type-2 packets are deprecated on MEC, use type-3 instead */
r = amdgpu_ring_init(adev, ring, 1024,
- PACKET3(PACKET3_NOP, 0x3FFF), 0xf,
- &adev->gfx.eop_irq, irq_type,
- AMDGPU_RING_TYPE_COMPUTE);
+ &adev->gfx.eop_irq, irq_type);
if (r)
return r;
}
@@ -5123,7 +5118,7 @@ static int gfx_v7_0_set_powergating_state(void *handle,
return 0;
}
-const struct amd_ip_funcs gfx_v7_0_ip_funcs = {
+static const struct amd_ip_funcs gfx_v7_0_ip_funcs = {
.name = "gfx_v7_0",
.early_init = gfx_v7_0_early_init,
.late_init = gfx_v7_0_late_init,
@@ -5141,10 +5136,21 @@ const struct amd_ip_funcs gfx_v7_0_ip_funcs = {
};
static const struct amdgpu_ring_funcs gfx_v7_0_ring_funcs_gfx = {
+ .type = AMDGPU_RING_TYPE_GFX,
+ .align_mask = 0xff,
+ .nop = PACKET3(PACKET3_NOP, 0x3FFF),
.get_rptr = gfx_v7_0_ring_get_rptr,
.get_wptr = gfx_v7_0_ring_get_wptr_gfx,
.set_wptr = gfx_v7_0_ring_set_wptr_gfx,
- .parse_cs = NULL,
+ .emit_frame_size =
+ 20 + /* gfx_v7_0_ring_emit_gds_switch */
+ 7 + /* gfx_v7_0_ring_emit_hdp_flush */
+ 5 + /* gfx_v7_0_ring_emit_hdp_invalidate */
+ 12 + 12 + 12 + /* gfx_v7_0_ring_emit_fence_gfx x3 for user fence, vm fence */
+ 7 + 4 + /* gfx_v7_0_ring_emit_pipeline_sync */
+ 17 + 6 + /* gfx_v7_0_ring_emit_vm_flush */
+ 3, /* gfx_v7_ring_emit_cntxcntl */
+ .emit_ib_size = 4, /* gfx_v7_0_ring_emit_ib_gfx */
.emit_ib = gfx_v7_0_ring_emit_ib_gfx,
.emit_fence = gfx_v7_0_ring_emit_fence_gfx,
.emit_pipeline_sync = gfx_v7_0_ring_emit_pipeline_sync,
@@ -5157,15 +5163,23 @@ static const struct amdgpu_ring_funcs gfx_v7_0_ring_funcs_gfx = {
.insert_nop = amdgpu_ring_insert_nop,
.pad_ib = amdgpu_ring_generic_pad_ib,
.emit_cntxcntl = gfx_v7_ring_emit_cntxcntl,
- .get_emit_ib_size = gfx_v7_0_ring_get_emit_ib_size_gfx,
- .get_dma_frame_size = gfx_v7_0_ring_get_dma_frame_size_gfx,
};
static const struct amdgpu_ring_funcs gfx_v7_0_ring_funcs_compute = {
+ .type = AMDGPU_RING_TYPE_COMPUTE,
+ .align_mask = 0xff,
+ .nop = PACKET3(PACKET3_NOP, 0x3FFF),
.get_rptr = gfx_v7_0_ring_get_rptr,
.get_wptr = gfx_v7_0_ring_get_wptr_compute,
.set_wptr = gfx_v7_0_ring_set_wptr_compute,
- .parse_cs = NULL,
+ .emit_frame_size =
+ 20 + /* gfx_v7_0_ring_emit_gds_switch */
+ 7 + /* gfx_v7_0_ring_emit_hdp_flush */
+ 5 + /* gfx_v7_0_ring_emit_hdp_invalidate */
+ 7 + /* gfx_v7_0_ring_emit_pipeline_sync */
+ 17 + /* gfx_v7_0_ring_emit_vm_flush */
+ 7 + 7 + 7, /* gfx_v7_0_ring_emit_fence_compute x3 for user fence, vm fence */
+ .emit_ib_size = 4, /* gfx_v7_0_ring_emit_ib_compute */
.emit_ib = gfx_v7_0_ring_emit_ib_compute,
.emit_fence = gfx_v7_0_ring_emit_fence_compute,
.emit_pipeline_sync = gfx_v7_0_ring_emit_pipeline_sync,
@@ -5177,8 +5191,6 @@ static const struct amdgpu_ring_funcs gfx_v7_0_ring_funcs_compute = {
.test_ib = gfx_v7_0_ring_test_ib,
.insert_nop = amdgpu_ring_insert_nop,
.pad_ib = amdgpu_ring_generic_pad_ib,
- .get_emit_ib_size = gfx_v7_0_ring_get_emit_ib_size_compute,
- .get_dma_frame_size = gfx_v7_0_ring_get_dma_frame_size_compute,
};
static void gfx_v7_0_set_ring_funcs(struct amdgpu_device *adev)
@@ -5289,3 +5301,39 @@ static void gfx_v7_0_get_cu_info(struct amdgpu_device *adev)
cu_info->number = active_cu_number;
cu_info->ao_cu_mask = ao_cu_mask;
}
+
+const struct amdgpu_ip_block_version gfx_v7_0_ip_block =
+{
+ .type = AMD_IP_BLOCK_TYPE_GFX,
+ .major = 7,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &gfx_v7_0_ip_funcs,
+};
+
+const struct amdgpu_ip_block_version gfx_v7_1_ip_block =
+{
+ .type = AMD_IP_BLOCK_TYPE_GFX,
+ .major = 7,
+ .minor = 1,
+ .rev = 0,
+ .funcs = &gfx_v7_0_ip_funcs,
+};
+
+const struct amdgpu_ip_block_version gfx_v7_2_ip_block =
+{
+ .type = AMD_IP_BLOCK_TYPE_GFX,
+ .major = 7,
+ .minor = 2,
+ .rev = 0,
+ .funcs = &gfx_v7_0_ip_funcs,
+};
+
+const struct amdgpu_ip_block_version gfx_v7_3_ip_block =
+{
+ .type = AMD_IP_BLOCK_TYPE_GFX,
+ .major = 7,
+ .minor = 3,
+ .rev = 0,
+ .funcs = &gfx_v7_0_ip_funcs,
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.h b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.h
index 94e3ea147c26..2f5164cc0e53 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.h
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.h
@@ -24,6 +24,9 @@
#ifndef __GFX_V7_0_H__
#define __GFX_V7_0_H__
-extern const struct amd_ip_funcs gfx_v7_0_ip_funcs;
+extern const struct amdgpu_ip_block_version gfx_v7_0_ip_block;
+extern const struct amdgpu_ip_block_version gfx_v7_1_ip_block;
+extern const struct amdgpu_ip_block_version gfx_v7_2_ip_block;
+extern const struct amdgpu_ip_block_version gfx_v7_3_ip_block;
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
index ee6a48a09214..23f1bc94ad3e 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
@@ -640,7 +640,6 @@ static const u32 stoney_mgcg_cgcg_init[] =
mmCP_MEM_SLP_CNTL, 0xffffffff, 0x00020201,
mmRLC_MEM_SLP_CNTL, 0xffffffff, 0x00020201,
mmCGTS_SM_CTRL_REG, 0xffffffff, 0x96940200,
- mmATC_MISC_CG, 0xffffffff, 0x000c0200,
};
static void gfx_v8_0_set_ring_funcs(struct amdgpu_device *adev);
@@ -798,7 +797,7 @@ static int gfx_v8_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
{
struct amdgpu_device *adev = ring->adev;
struct amdgpu_ib ib;
- struct fence *f = NULL;
+ struct dma_fence *f = NULL;
uint32_t scratch;
uint32_t tmp = 0;
long r;
@@ -824,7 +823,7 @@ static int gfx_v8_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
if (r)
goto err2;
- r = fence_wait_timeout(f, false, timeout);
+ r = dma_fence_wait_timeout(f, false, timeout);
if (r == 0) {
DRM_ERROR("amdgpu: IB test timed out.\n");
r = -ETIMEDOUT;
@@ -844,7 +843,7 @@ static int gfx_v8_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
}
err2:
amdgpu_ib_free(adev, &ib, NULL);
- fence_put(f);
+ dma_fence_put(f);
err1:
amdgpu_gfx_scratch_free(adev, scratch);
return r;
@@ -1058,6 +1057,19 @@ static int gfx_v8_0_init_microcode(struct amdgpu_device *adev)
adev->firmware.fw_size +=
ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
+ /* we need account JT in */
+ cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data;
+ adev->firmware.fw_size +=
+ ALIGN(le32_to_cpu(cp_hdr->jt_size) << 2, PAGE_SIZE);
+
+ if (amdgpu_sriov_vf(adev)) {
+ info = &adev->firmware.ucode[AMDGPU_UCODE_ID_STORAGE];
+ info->ucode_id = AMDGPU_UCODE_ID_STORAGE;
+ info->fw = adev->gfx.mec_fw;
+ adev->firmware.fw_size +=
+ ALIGN(le32_to_cpu(64 * PAGE_SIZE), PAGE_SIZE);
+ }
+
if (adev->gfx.mec2_fw) {
info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_MEC2];
info->ucode_id = AMDGPU_UCODE_ID_CP_MEC2;
@@ -1127,34 +1139,8 @@ static void gfx_v8_0_get_csb_buffer(struct amdgpu_device *adev,
buffer[count++] = cpu_to_le32(PACKET3(PACKET3_SET_CONTEXT_REG, 2));
buffer[count++] = cpu_to_le32(mmPA_SC_RASTER_CONFIG -
PACKET3_SET_CONTEXT_REG_START);
- switch (adev->asic_type) {
- case CHIP_TONGA:
- case CHIP_POLARIS10:
- buffer[count++] = cpu_to_le32(0x16000012);
- buffer[count++] = cpu_to_le32(0x0000002A);
- break;
- case CHIP_POLARIS11:
- buffer[count++] = cpu_to_le32(0x16000012);
- buffer[count++] = cpu_to_le32(0x00000000);
- break;
- case CHIP_FIJI:
- buffer[count++] = cpu_to_le32(0x3a00161a);
- buffer[count++] = cpu_to_le32(0x0000002e);
- break;
- case CHIP_TOPAZ:
- case CHIP_CARRIZO:
- buffer[count++] = cpu_to_le32(0x00000002);
- buffer[count++] = cpu_to_le32(0x00000000);
- break;
- case CHIP_STONEY:
- buffer[count++] = cpu_to_le32(0x00000000);
- buffer[count++] = cpu_to_le32(0x00000000);
- break;
- default:
- buffer[count++] = cpu_to_le32(0x00000000);
- buffer[count++] = cpu_to_le32(0x00000000);
- break;
- }
+ buffer[count++] = cpu_to_le32(adev->gfx.config.rb_config[0][0].raster_config);
+ buffer[count++] = cpu_to_le32(adev->gfx.config.rb_config[0][0].raster_config_1);
buffer[count++] = cpu_to_le32(PACKET3(PACKET3_PREAMBLE_CNTL, 0));
buffer[count++] = cpu_to_le32(PACKET3_PREAMBLE_END_CLEAR_STATE);
@@ -1273,7 +1259,8 @@ static int gfx_v8_0_rlc_init(struct amdgpu_device *adev)
if (adev->gfx.rlc.clear_state_obj == NULL) {
r = amdgpu_bo_create(adev, dws * 4, PAGE_SIZE, true,
AMDGPU_GEM_DOMAIN_VRAM,
- AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
+ AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
+ AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS,
NULL, NULL,
&adev->gfx.rlc.clear_state_obj);
if (r) {
@@ -1315,7 +1302,8 @@ static int gfx_v8_0_rlc_init(struct amdgpu_device *adev)
if (adev->gfx.rlc.cp_table_obj == NULL) {
r = amdgpu_bo_create(adev, adev->gfx.rlc.cp_table_size, PAGE_SIZE, true,
AMDGPU_GEM_DOMAIN_VRAM,
- AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
+ AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
+ AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS,
NULL, NULL,
&adev->gfx.rlc.cp_table_obj);
if (r) {
@@ -1575,7 +1563,7 @@ static int gfx_v8_0_do_edc_gpr_workarounds(struct amdgpu_device *adev)
{
struct amdgpu_ring *ring = &adev->gfx.compute_ring[0];
struct amdgpu_ib ib;
- struct fence *f = NULL;
+ struct dma_fence *f = NULL;
int r, i;
u32 tmp;
unsigned total_size, vgpr_offset, sgpr_offset;
@@ -1708,7 +1696,7 @@ static int gfx_v8_0_do_edc_gpr_workarounds(struct amdgpu_device *adev)
}
/* wait for the GPU to finish processing the IB */
- r = fence_wait(f, false);
+ r = dma_fence_wait(f, false);
if (r) {
DRM_ERROR("amdgpu: fence wait failed (%d).\n", r);
goto fail;
@@ -1729,7 +1717,7 @@ static int gfx_v8_0_do_edc_gpr_workarounds(struct amdgpu_device *adev)
fail:
amdgpu_ib_free(adev, &ib, NULL);
- fence_put(f);
+ dma_fence_put(f);
return r;
}
@@ -2045,10 +2033,8 @@ static int gfx_v8_0_sw_init(void *handle)
ring->doorbell_index = AMDGPU_DOORBELL_GFX_RING0;
}
- r = amdgpu_ring_init(adev, ring, 1024,
- PACKET3(PACKET3_NOP, 0x3FFF), 0xf,
- &adev->gfx.eop_irq, AMDGPU_CP_IRQ_GFX_EOP,
- AMDGPU_RING_TYPE_GFX);
+ r = amdgpu_ring_init(adev, ring, 1024, &adev->gfx.eop_irq,
+ AMDGPU_CP_IRQ_GFX_EOP);
if (r)
return r;
}
@@ -2072,10 +2058,8 @@ static int gfx_v8_0_sw_init(void *handle)
sprintf(ring->name, "comp_%d.%d.%d", ring->me, ring->pipe, ring->queue);
irq_type = AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP + ring->pipe;
/* type-2 packets are deprecated on MEC, use type-3 instead */
- r = amdgpu_ring_init(adev, ring, 1024,
- PACKET3(PACKET3_NOP, 0x3FFF), 0xf,
- &adev->gfx.eop_irq, irq_type,
- AMDGPU_RING_TYPE_COMPUTE);
+ r = amdgpu_ring_init(adev, ring, 1024, &adev->gfx.eop_irq,
+ irq_type);
if (r)
return r;
}
@@ -3679,6 +3663,21 @@ static void gfx_v8_0_setup_rb(struct amdgpu_device *adev)
num_rb_pipes);
}
+ /* cache the values for userspace */
+ for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
+ for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
+ gfx_v8_0_select_se_sh(adev, i, j, 0xffffffff);
+ adev->gfx.config.rb_config[i][j].rb_backend_disable =
+ RREG32(mmCC_RB_BACKEND_DISABLE);
+ adev->gfx.config.rb_config[i][j].user_rb_backend_disable =
+ RREG32(mmGC_USER_RB_BACKEND_DISABLE);
+ adev->gfx.config.rb_config[i][j].raster_config =
+ RREG32(mmPA_SC_RASTER_CONFIG);
+ adev->gfx.config.rb_config[i][j].raster_config_1 =
+ RREG32(mmPA_SC_RASTER_CONFIG_1);
+ }
+ }
+ gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
mutex_unlock(&adev->grbm_idx_mutex);
}
@@ -4331,7 +4330,7 @@ static int gfx_v8_0_cp_gfx_resume(struct amdgpu_device *adev)
struct amdgpu_ring *ring;
u32 tmp;
u32 rb_bufsz;
- u64 rb_addr, rptr_addr;
+ u64 rb_addr, rptr_addr, wptr_gpu_addr;
int r;
/* Set the write pointer delay */
@@ -4362,6 +4361,9 @@ static int gfx_v8_0_cp_gfx_resume(struct amdgpu_device *adev)
WREG32(mmCP_RB0_RPTR_ADDR, lower_32_bits(rptr_addr));
WREG32(mmCP_RB0_RPTR_ADDR_HI, upper_32_bits(rptr_addr) & 0xFF);
+ wptr_gpu_addr = adev->wb.gpu_addr + (ring->wptr_offs * 4);
+ WREG32(mmCP_RB_WPTR_POLL_ADDR_LO, lower_32_bits(wptr_gpu_addr));
+ WREG32(mmCP_RB_WPTR_POLL_ADDR_HI, upper_32_bits(wptr_gpu_addr));
mdelay(1);
WREG32(mmCP_RB0_CNTL, tmp);
@@ -5438,9 +5440,41 @@ static void gfx_v8_0_ring_emit_gds_switch(struct amdgpu_ring *ring,
amdgpu_ring_write(ring, (1 << (oa_size + oa_base)) - (1 << oa_base));
}
+static uint32_t wave_read_ind(struct amdgpu_device *adev, uint32_t simd, uint32_t wave, uint32_t address)
+{
+ WREG32(mmSQ_IND_INDEX, (wave & 0xF) | ((simd & 0x3) << 4) | (address << 16) | (1 << 13));
+ return RREG32(mmSQ_IND_DATA);
+}
+
+static void gfx_v8_0_read_wave_data(struct amdgpu_device *adev, uint32_t simd, uint32_t wave, uint32_t *dst, int *no_fields)
+{
+ /* type 0 wave data */
+ dst[(*no_fields)++] = 0;
+ dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_STATUS);
+ dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_PC_LO);
+ dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_PC_HI);
+ dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_EXEC_LO);
+ dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_EXEC_HI);
+ dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_HW_ID);
+ dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_INST_DW0);
+ dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_INST_DW1);
+ dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_GPR_ALLOC);
+ dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_LDS_ALLOC);
+ dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_TRAPSTS);
+ dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_IB_STS);
+ dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_TBA_LO);
+ dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_TBA_HI);
+ dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_TMA_LO);
+ dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_TMA_HI);
+ dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_IB_DBG0);
+ dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_M0);
+}
+
+
static const struct amdgpu_gfx_funcs gfx_v8_0_gfx_funcs = {
.get_gpu_clock_counter = &gfx_v8_0_get_gpu_clock_counter,
.select_se_sh = &gfx_v8_0_select_se_sh,
+ .read_wave_data = &gfx_v8_0_read_wave_data,
};
static int gfx_v8_0_early_init(void *handle)
@@ -6120,7 +6154,7 @@ static void gfx_v8_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
{
u32 ref_and_mask, reg_mem_engine;
- if (ring->type == AMDGPU_RING_TYPE_COMPUTE) {
+ if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE) {
switch (ring->me) {
case 1:
ref_and_mask = GPU_HDP_FLUSH_DONE__CP2_MASK << ring->pipe;
@@ -6222,7 +6256,7 @@ static void gfx_v8_0_ring_emit_fence_gfx(struct amdgpu_ring *ring, u64 addr,
static void gfx_v8_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
{
- int usepfp = (ring->type == AMDGPU_RING_TYPE_GFX);
+ int usepfp = (ring->funcs->type == AMDGPU_RING_TYPE_GFX);
uint32_t seq = ring->fence_drv.sync_seq;
uint64_t addr = ring->fence_drv.gpu_addr;
@@ -6240,11 +6274,7 @@ static void gfx_v8_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
static void gfx_v8_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
unsigned vm_id, uint64_t pd_addr)
{
- int usepfp = (ring->type == AMDGPU_RING_TYPE_GFX);
-
- /* GFX8 emits 128 dw nop to prevent DE do vm_flush before CE finish CEIB */
- if (usepfp)
- amdgpu_ring_insert_nop(ring, 128);
+ int usepfp = (ring->funcs->type == AMDGPU_RING_TYPE_GFX);
amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(usepfp) |
@@ -6360,42 +6390,6 @@ static void gfx_v8_ring_emit_cntxcntl(struct amdgpu_ring *ring, uint32_t flags)
amdgpu_ring_write(ring, 0);
}
-static unsigned gfx_v8_0_ring_get_emit_ib_size_gfx(struct amdgpu_ring *ring)
-{
- return
- 4; /* gfx_v8_0_ring_emit_ib_gfx */
-}
-
-static unsigned gfx_v8_0_ring_get_dma_frame_size_gfx(struct amdgpu_ring *ring)
-{
- return
- 20 + /* gfx_v8_0_ring_emit_gds_switch */
- 7 + /* gfx_v8_0_ring_emit_hdp_flush */
- 5 + /* gfx_v8_0_ring_emit_hdp_invalidate */
- 6 + 6 + 6 +/* gfx_v8_0_ring_emit_fence_gfx x3 for user fence, vm fence */
- 7 + /* gfx_v8_0_ring_emit_pipeline_sync */
- 256 + 19 + /* gfx_v8_0_ring_emit_vm_flush */
- 2 + /* gfx_v8_ring_emit_sb */
- 3; /* gfx_v8_ring_emit_cntxcntl */
-}
-
-static unsigned gfx_v8_0_ring_get_emit_ib_size_compute(struct amdgpu_ring *ring)
-{
- return
- 4; /* gfx_v8_0_ring_emit_ib_compute */
-}
-
-static unsigned gfx_v8_0_ring_get_dma_frame_size_compute(struct amdgpu_ring *ring)
-{
- return
- 20 + /* gfx_v8_0_ring_emit_gds_switch */
- 7 + /* gfx_v8_0_ring_emit_hdp_flush */
- 5 + /* gfx_v8_0_ring_emit_hdp_invalidate */
- 7 + /* gfx_v8_0_ring_emit_pipeline_sync */
- 17 + /* gfx_v8_0_ring_emit_vm_flush */
- 7 + 7 + 7; /* gfx_v8_0_ring_emit_fence_compute x3 for user fence, vm fence */
-}
-
static void gfx_v8_0_set_gfx_eop_interrupt_state(struct amdgpu_device *adev,
enum amdgpu_interrupt_state state)
{
@@ -6541,7 +6535,7 @@ static int gfx_v8_0_priv_inst_irq(struct amdgpu_device *adev,
return 0;
}
-const struct amd_ip_funcs gfx_v8_0_ip_funcs = {
+static const struct amd_ip_funcs gfx_v8_0_ip_funcs = {
.name = "gfx_v8_0",
.early_init = gfx_v8_0_early_init,
.late_init = gfx_v8_0_late_init,
@@ -6562,10 +6556,22 @@ const struct amd_ip_funcs gfx_v8_0_ip_funcs = {
};
static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_gfx = {
+ .type = AMDGPU_RING_TYPE_GFX,
+ .align_mask = 0xff,
+ .nop = PACKET3(PACKET3_NOP, 0x3FFF),
.get_rptr = gfx_v8_0_ring_get_rptr,
.get_wptr = gfx_v8_0_ring_get_wptr_gfx,
.set_wptr = gfx_v8_0_ring_set_wptr_gfx,
- .parse_cs = NULL,
+ .emit_frame_size =
+ 20 + /* gfx_v8_0_ring_emit_gds_switch */
+ 7 + /* gfx_v8_0_ring_emit_hdp_flush */
+ 5 + /* gfx_v8_0_ring_emit_hdp_invalidate */
+ 6 + 6 + 6 +/* gfx_v8_0_ring_emit_fence_gfx x3 for user fence, vm fence */
+ 7 + /* gfx_v8_0_ring_emit_pipeline_sync */
+ 128 + 19 + /* gfx_v8_0_ring_emit_vm_flush */
+ 2 + /* gfx_v8_ring_emit_sb */
+ 3, /* gfx_v8_ring_emit_cntxcntl */
+ .emit_ib_size = 4, /* gfx_v8_0_ring_emit_ib_gfx */
.emit_ib = gfx_v8_0_ring_emit_ib_gfx,
.emit_fence = gfx_v8_0_ring_emit_fence_gfx,
.emit_pipeline_sync = gfx_v8_0_ring_emit_pipeline_sync,
@@ -6579,15 +6585,23 @@ static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_gfx = {
.pad_ib = amdgpu_ring_generic_pad_ib,
.emit_switch_buffer = gfx_v8_ring_emit_sb,
.emit_cntxcntl = gfx_v8_ring_emit_cntxcntl,
- .get_emit_ib_size = gfx_v8_0_ring_get_emit_ib_size_gfx,
- .get_dma_frame_size = gfx_v8_0_ring_get_dma_frame_size_gfx,
};
static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_compute = {
+ .type = AMDGPU_RING_TYPE_COMPUTE,
+ .align_mask = 0xff,
+ .nop = PACKET3(PACKET3_NOP, 0x3FFF),
.get_rptr = gfx_v8_0_ring_get_rptr,
.get_wptr = gfx_v8_0_ring_get_wptr_compute,
.set_wptr = gfx_v8_0_ring_set_wptr_compute,
- .parse_cs = NULL,
+ .emit_frame_size =
+ 20 + /* gfx_v8_0_ring_emit_gds_switch */
+ 7 + /* gfx_v8_0_ring_emit_hdp_flush */
+ 5 + /* gfx_v8_0_ring_emit_hdp_invalidate */
+ 7 + /* gfx_v8_0_ring_emit_pipeline_sync */
+ 17 + /* gfx_v8_0_ring_emit_vm_flush */
+ 7 + 7 + 7, /* gfx_v8_0_ring_emit_fence_compute x3 for user fence, vm fence */
+ .emit_ib_size = 4, /* gfx_v8_0_ring_emit_ib_compute */
.emit_ib = gfx_v8_0_ring_emit_ib_compute,
.emit_fence = gfx_v8_0_ring_emit_fence_compute,
.emit_pipeline_sync = gfx_v8_0_ring_emit_pipeline_sync,
@@ -6599,8 +6613,6 @@ static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_compute = {
.test_ib = gfx_v8_0_ring_test_ib,
.insert_nop = amdgpu_ring_insert_nop,
.pad_ib = amdgpu_ring_generic_pad_ib,
- .get_emit_ib_size = gfx_v8_0_ring_get_emit_ib_size_compute,
- .get_dma_frame_size = gfx_v8_0_ring_get_dma_frame_size_compute,
};
static void gfx_v8_0_set_ring_funcs(struct amdgpu_device *adev)
@@ -6753,3 +6765,21 @@ static void gfx_v8_0_get_cu_info(struct amdgpu_device *adev)
cu_info->number = active_cu_number;
cu_info->ao_cu_mask = ao_cu_mask;
}
+
+const struct amdgpu_ip_block_version gfx_v8_0_ip_block =
+{
+ .type = AMD_IP_BLOCK_TYPE_GFX,
+ .major = 8,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &gfx_v8_0_ip_funcs,
+};
+
+const struct amdgpu_ip_block_version gfx_v8_1_ip_block =
+{
+ .type = AMD_IP_BLOCK_TYPE_GFX,
+ .major = 8,
+ .minor = 1,
+ .rev = 0,
+ .funcs = &gfx_v8_0_ip_funcs,
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.h b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.h
index ebed1f829297..788cc3ab584b 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.h
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.h
@@ -24,6 +24,7 @@
#ifndef __GFX_V8_0_H__
#define __GFX_V8_0_H__
-extern const struct amd_ip_funcs gfx_v8_0_ip_funcs;
+extern const struct amdgpu_ip_block_version gfx_v8_0_ip_block;
+extern const struct amdgpu_ip_block_version gfx_v8_1_ip_block;
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
index b13c8aaec078..1940d36bc304 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
@@ -1030,7 +1030,7 @@ static int gmc_v6_0_set_powergating_state(void *handle,
return 0;
}
-const struct amd_ip_funcs gmc_v6_0_ip_funcs = {
+static const struct amd_ip_funcs gmc_v6_0_ip_funcs = {
.name = "gmc_v6_0",
.early_init = gmc_v6_0_early_init,
.late_init = gmc_v6_0_late_init,
@@ -1069,3 +1069,11 @@ static void gmc_v6_0_set_irq_funcs(struct amdgpu_device *adev)
adev->mc.vm_fault.funcs = &gmc_v6_0_irq_funcs;
}
+const struct amdgpu_ip_block_version gmc_v6_0_ip_block =
+{
+ .type = AMD_IP_BLOCK_TYPE_GMC,
+ .major = 6,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &gmc_v6_0_ip_funcs,
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.h b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.h
index 42c4fc676cd4..ed2f64dec47a 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.h
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.h
@@ -24,6 +24,6 @@
#ifndef __GMC_V6_0_H__
#define __GMC_V6_0_H__
-extern const struct amd_ip_funcs gmc_v6_0_ip_funcs;
+extern const struct amdgpu_ip_block_version gmc_v6_0_ip_block;
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
index aa0c4b964621..3a25f72980c1 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
@@ -1235,7 +1235,7 @@ static int gmc_v7_0_set_powergating_state(void *handle,
return 0;
}
-const struct amd_ip_funcs gmc_v7_0_ip_funcs = {
+static const struct amd_ip_funcs gmc_v7_0_ip_funcs = {
.name = "gmc_v7_0",
.early_init = gmc_v7_0_early_init,
.late_init = gmc_v7_0_late_init,
@@ -1273,3 +1273,21 @@ static void gmc_v7_0_set_irq_funcs(struct amdgpu_device *adev)
adev->mc.vm_fault.num_types = 1;
adev->mc.vm_fault.funcs = &gmc_v7_0_irq_funcs;
}
+
+const struct amdgpu_ip_block_version gmc_v7_0_ip_block =
+{
+ .type = AMD_IP_BLOCK_TYPE_GMC,
+ .major = 7,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &gmc_v7_0_ip_funcs,
+};
+
+const struct amdgpu_ip_block_version gmc_v7_4_ip_block =
+{
+ .type = AMD_IP_BLOCK_TYPE_GMC,
+ .major = 7,
+ .minor = 4,
+ .rev = 0,
+ .funcs = &gmc_v7_0_ip_funcs,
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.h b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.h
index 0b386b5d2f7a..ebce2966c1c7 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.h
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.h
@@ -24,6 +24,7 @@
#ifndef __GMC_V7_0_H__
#define __GMC_V7_0_H__
-extern const struct amd_ip_funcs gmc_v7_0_ip_funcs;
+extern const struct amdgpu_ip_block_version gmc_v7_0_ip_block;
+extern const struct amdgpu_ip_block_version gmc_v7_4_ip_block;
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
index c22ef140a542..f7372d32b8e7 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
@@ -100,6 +100,7 @@ static const u32 cz_mgcg_cgcg_init[] =
static const u32 stoney_mgcg_cgcg_init[] =
{
+ mmATC_MISC_CG, 0xffffffff, 0x000c0200,
mmMC_MEM_POWER_LS, 0xffffffff, 0x00000104
};
@@ -1436,7 +1437,7 @@ static int gmc_v8_0_set_powergating_state(void *handle,
return 0;
}
-const struct amd_ip_funcs gmc_v8_0_ip_funcs = {
+static const struct amd_ip_funcs gmc_v8_0_ip_funcs = {
.name = "gmc_v8_0",
.early_init = gmc_v8_0_early_init,
.late_init = gmc_v8_0_late_init,
@@ -1477,3 +1478,30 @@ static void gmc_v8_0_set_irq_funcs(struct amdgpu_device *adev)
adev->mc.vm_fault.num_types = 1;
adev->mc.vm_fault.funcs = &gmc_v8_0_irq_funcs;
}
+
+const struct amdgpu_ip_block_version gmc_v8_0_ip_block =
+{
+ .type = AMD_IP_BLOCK_TYPE_GMC,
+ .major = 8,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &gmc_v8_0_ip_funcs,
+};
+
+const struct amdgpu_ip_block_version gmc_v8_1_ip_block =
+{
+ .type = AMD_IP_BLOCK_TYPE_GMC,
+ .major = 8,
+ .minor = 1,
+ .rev = 0,
+ .funcs = &gmc_v8_0_ip_funcs,
+};
+
+const struct amdgpu_ip_block_version gmc_v8_5_ip_block =
+{
+ .type = AMD_IP_BLOCK_TYPE_GMC,
+ .major = 8,
+ .minor = 5,
+ .rev = 0,
+ .funcs = &gmc_v8_0_ip_funcs,
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.h b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.h
index fc5001a8119d..19b8a8aed204 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.h
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.h
@@ -24,6 +24,8 @@
#ifndef __GMC_V8_0_H__
#define __GMC_V8_0_H__
-extern const struct amd_ip_funcs gmc_v8_0_ip_funcs;
+extern const struct amdgpu_ip_block_version gmc_v8_0_ip_block;
+extern const struct amdgpu_ip_block_version gmc_v8_1_ip_block;
+extern const struct amdgpu_ip_block_version gmc_v8_5_ip_block;
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/iceland_ih.c b/drivers/gpu/drm/amd/amdgpu/iceland_ih.c
index 3b8906ce3511..ac21bb7bc0f3 100644
--- a/drivers/gpu/drm/amd/amdgpu/iceland_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/iceland_ih.c
@@ -392,7 +392,7 @@ static int iceland_ih_set_powergating_state(void *handle,
return 0;
}
-const struct amd_ip_funcs iceland_ih_ip_funcs = {
+static const struct amd_ip_funcs iceland_ih_ip_funcs = {
.name = "iceland_ih",
.early_init = iceland_ih_early_init,
.late_init = NULL,
@@ -421,3 +421,11 @@ static void iceland_ih_set_interrupt_funcs(struct amdgpu_device *adev)
adev->irq.ih_funcs = &iceland_ih_funcs;
}
+const struct amdgpu_ip_block_version iceland_ih_ip_block =
+{
+ .type = AMD_IP_BLOCK_TYPE_IH,
+ .major = 2,
+ .minor = 4,
+ .rev = 0,
+ .funcs = &iceland_ih_ip_funcs,
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/iceland_ih.h b/drivers/gpu/drm/amd/amdgpu/iceland_ih.h
index 57558cddfbcb..3235f4277548 100644
--- a/drivers/gpu/drm/amd/amdgpu/iceland_ih.h
+++ b/drivers/gpu/drm/amd/amdgpu/iceland_ih.h
@@ -24,6 +24,6 @@
#ifndef __ICELAND_IH_H__
#define __ICELAND_IH_H__
-extern const struct amd_ip_funcs iceland_ih_ip_funcs;
+extern const struct amdgpu_ip_block_version iceland_ih_ip_block;
#endif /* __ICELAND_IH_H__ */
diff --git a/drivers/gpu/drm/amd/amdgpu/kv_dpm.c b/drivers/gpu/drm/amd/amdgpu/kv_dpm.c
index f8618a3881a8..61172d4a0657 100644
--- a/drivers/gpu/drm/amd/amdgpu/kv_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/kv_dpm.c
@@ -2796,7 +2796,7 @@ static int kv_parse_power_table(struct amdgpu_device *adev)
adev->pm.dpm.num_ps = state_array->ucNumEntries;
/* fill in the vce power states */
- for (i = 0; i < AMDGPU_MAX_VCE_LEVELS; i++) {
+ for (i = 0; i < adev->pm.dpm.num_of_vce_states; i++) {
u32 sclk;
clock_array_index = adev->pm.dpm.vce_states[i].clk_idx;
clock_info = (union pplib_clock_info *)
@@ -3063,6 +3063,8 @@ static int kv_dpm_sw_fini(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ flush_work(&adev->pm.dpm.thermal.work);
+
mutex_lock(&adev->pm.mutex);
amdgpu_pm_sysfs_fini(adev);
kv_dpm_fini(adev);
@@ -3243,6 +3245,18 @@ static int kv_dpm_set_powergating_state(void *handle,
return 0;
}
+static int kv_check_state_equal(struct amdgpu_device *adev,
+ struct amdgpu_ps *cps,
+ struct amdgpu_ps *rps,
+ bool *equal)
+{
+ if (equal == NULL)
+ return -EINVAL;
+
+ *equal = false;
+ return 0;
+}
+
const struct amd_ip_funcs kv_dpm_ip_funcs = {
.name = "kv_dpm",
.early_init = kv_dpm_early_init,
@@ -3273,6 +3287,8 @@ static const struct amdgpu_dpm_funcs kv_dpm_funcs = {
.force_performance_level = &kv_dpm_force_performance_level,
.powergate_uvd = &kv_dpm_powergate_uvd,
.enable_bapm = &kv_dpm_enable_bapm,
+ .get_vce_clock_state = amdgpu_get_vce_clock_state,
+ .check_state_equal = kv_check_state_equal,
};
static void kv_dpm_set_dpm_funcs(struct amdgpu_device *adev)
@@ -3291,3 +3307,12 @@ static void kv_dpm_set_irq_funcs(struct amdgpu_device *adev)
adev->pm.dpm.thermal.irq.num_types = AMDGPU_THERMAL_IRQ_LAST;
adev->pm.dpm.thermal.irq.funcs = &kv_dpm_irq_funcs;
}
+
+const struct amdgpu_ip_block_version kv_dpm_ip_block =
+{
+ .type = AMD_IP_BLOCK_TYPE_SMC,
+ .major = 7,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &kv_dpm_ip_funcs,
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
index 565dab3c7218..e81aa4682760 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
@@ -232,10 +232,10 @@ static void sdma_v2_4_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
for (i = 0; i < count; i++)
if (sdma && sdma->burst_nop && (i == 0))
- amdgpu_ring_write(ring, ring->nop |
+ amdgpu_ring_write(ring, ring->funcs->nop |
SDMA_PKT_NOP_HEADER_COUNT(count - 1));
else
- amdgpu_ring_write(ring, ring->nop);
+ amdgpu_ring_write(ring, ring->funcs->nop);
}
/**
@@ -668,7 +668,7 @@ static int sdma_v2_4_ring_test_ib(struct amdgpu_ring *ring, long timeout)
{
struct amdgpu_device *adev = ring->adev;
struct amdgpu_ib ib;
- struct fence *f = NULL;
+ struct dma_fence *f = NULL;
unsigned index;
u32 tmp = 0;
u64 gpu_addr;
@@ -705,7 +705,7 @@ static int sdma_v2_4_ring_test_ib(struct amdgpu_ring *ring, long timeout)
if (r)
goto err1;
- r = fence_wait_timeout(f, false, timeout);
+ r = dma_fence_wait_timeout(f, false, timeout);
if (r == 0) {
DRM_ERROR("amdgpu: IB test timed out\n");
r = -ETIMEDOUT;
@@ -725,7 +725,7 @@ static int sdma_v2_4_ring_test_ib(struct amdgpu_ring *ring, long timeout)
err1:
amdgpu_ib_free(adev, &ib, NULL);
- fence_put(f);
+ dma_fence_put(f);
err0:
amdgpu_wb_free(adev, index);
return r;
@@ -902,22 +902,6 @@ static void sdma_v2_4_ring_emit_vm_flush(struct amdgpu_ring *ring,
SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(10)); /* retry count, poll interval */
}
-static unsigned sdma_v2_4_ring_get_emit_ib_size(struct amdgpu_ring *ring)
-{
- return
- 7 + 6; /* sdma_v2_4_ring_emit_ib */
-}
-
-static unsigned sdma_v2_4_ring_get_dma_frame_size(struct amdgpu_ring *ring)
-{
- return
- 6 + /* sdma_v2_4_ring_emit_hdp_flush */
- 3 + /* sdma_v2_4_ring_emit_hdp_invalidate */
- 6 + /* sdma_v2_4_ring_emit_pipeline_sync */
- 12 + /* sdma_v2_4_ring_emit_vm_flush */
- 10 + 10 + 10; /* sdma_v2_4_ring_emit_fence x3 for user fence, vm fence */
-}
-
static int sdma_v2_4_early_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
@@ -965,11 +949,10 @@ static int sdma_v2_4_sw_init(void *handle)
ring->use_doorbell = false;
sprintf(ring->name, "sdma%d", i);
r = amdgpu_ring_init(adev, ring, 1024,
- SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP), 0xf,
&adev->sdma.trap_irq,
(i == 0) ?
- AMDGPU_SDMA_IRQ_TRAP0 : AMDGPU_SDMA_IRQ_TRAP1,
- AMDGPU_RING_TYPE_SDMA);
+ AMDGPU_SDMA_IRQ_TRAP0 :
+ AMDGPU_SDMA_IRQ_TRAP1);
if (r)
return r;
}
@@ -1204,7 +1187,7 @@ static int sdma_v2_4_set_powergating_state(void *handle,
return 0;
}
-const struct amd_ip_funcs sdma_v2_4_ip_funcs = {
+static const struct amd_ip_funcs sdma_v2_4_ip_funcs = {
.name = "sdma_v2_4",
.early_init = sdma_v2_4_early_init,
.late_init = NULL,
@@ -1222,10 +1205,19 @@ const struct amd_ip_funcs sdma_v2_4_ip_funcs = {
};
static const struct amdgpu_ring_funcs sdma_v2_4_ring_funcs = {
+ .type = AMDGPU_RING_TYPE_SDMA,
+ .align_mask = 0xf,
+ .nop = SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP),
.get_rptr = sdma_v2_4_ring_get_rptr,
.get_wptr = sdma_v2_4_ring_get_wptr,
.set_wptr = sdma_v2_4_ring_set_wptr,
- .parse_cs = NULL,
+ .emit_frame_size =
+ 6 + /* sdma_v2_4_ring_emit_hdp_flush */
+ 3 + /* sdma_v2_4_ring_emit_hdp_invalidate */
+ 6 + /* sdma_v2_4_ring_emit_pipeline_sync */
+ 12 + /* sdma_v2_4_ring_emit_vm_flush */
+ 10 + 10 + 10, /* sdma_v2_4_ring_emit_fence x3 for user fence, vm fence */
+ .emit_ib_size = 7 + 6, /* sdma_v2_4_ring_emit_ib */
.emit_ib = sdma_v2_4_ring_emit_ib,
.emit_fence = sdma_v2_4_ring_emit_fence,
.emit_pipeline_sync = sdma_v2_4_ring_emit_pipeline_sync,
@@ -1236,8 +1228,6 @@ static const struct amdgpu_ring_funcs sdma_v2_4_ring_funcs = {
.test_ib = sdma_v2_4_ring_test_ib,
.insert_nop = sdma_v2_4_ring_insert_nop,
.pad_ib = sdma_v2_4_ring_pad_ib,
- .get_emit_ib_size = sdma_v2_4_ring_get_emit_ib_size,
- .get_dma_frame_size = sdma_v2_4_ring_get_dma_frame_size,
};
static void sdma_v2_4_set_ring_funcs(struct amdgpu_device *adev)
@@ -1350,3 +1340,12 @@ static void sdma_v2_4_set_vm_pte_funcs(struct amdgpu_device *adev)
adev->vm_manager.vm_pte_num_rings = adev->sdma.num_instances;
}
}
+
+const struct amdgpu_ip_block_version sdma_v2_4_ip_block =
+{
+ .type = AMD_IP_BLOCK_TYPE_SDMA,
+ .major = 2,
+ .minor = 4,
+ .rev = 0,
+ .funcs = &sdma_v2_4_ip_funcs,
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.h b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.h
index 07349f5ee10f..28b433729216 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.h
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.h
@@ -24,6 +24,6 @@
#ifndef __SDMA_V2_4_H__
#define __SDMA_V2_4_H__
-extern const struct amd_ip_funcs sdma_v2_4_ip_funcs;
+extern const struct amdgpu_ip_block_version sdma_v2_4_ip_block;
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
index a9d10941fb53..77f146587c60 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
@@ -392,10 +392,10 @@ static void sdma_v3_0_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
for (i = 0; i < count; i++)
if (sdma && sdma->burst_nop && (i == 0))
- amdgpu_ring_write(ring, ring->nop |
+ amdgpu_ring_write(ring, ring->funcs->nop |
SDMA_PKT_NOP_HEADER_COUNT(count - 1));
else
- amdgpu_ring_write(ring, ring->nop);
+ amdgpu_ring_write(ring, ring->funcs->nop);
}
/**
@@ -871,7 +871,7 @@ static int sdma_v3_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
{
struct amdgpu_device *adev = ring->adev;
struct amdgpu_ib ib;
- struct fence *f = NULL;
+ struct dma_fence *f = NULL;
unsigned index;
u32 tmp = 0;
u64 gpu_addr;
@@ -908,7 +908,7 @@ static int sdma_v3_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
if (r)
goto err1;
- r = fence_wait_timeout(f, false, timeout);
+ r = dma_fence_wait_timeout(f, false, timeout);
if (r == 0) {
DRM_ERROR("amdgpu: IB test timed out\n");
r = -ETIMEDOUT;
@@ -927,7 +927,7 @@ static int sdma_v3_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
}
err1:
amdgpu_ib_free(adev, &ib, NULL);
- fence_put(f);
+ dma_fence_put(f);
err0:
amdgpu_wb_free(adev, index);
return r;
@@ -1104,22 +1104,6 @@ static void sdma_v3_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(10)); /* retry count, poll interval */
}
-static unsigned sdma_v3_0_ring_get_emit_ib_size(struct amdgpu_ring *ring)
-{
- return
- 7 + 6; /* sdma_v3_0_ring_emit_ib */
-}
-
-static unsigned sdma_v3_0_ring_get_dma_frame_size(struct amdgpu_ring *ring)
-{
- return
- 6 + /* sdma_v3_0_ring_emit_hdp_flush */
- 3 + /* sdma_v3_0_ring_emit_hdp_invalidate */
- 6 + /* sdma_v3_0_ring_emit_pipeline_sync */
- 12 + /* sdma_v3_0_ring_emit_vm_flush */
- 10 + 10 + 10; /* sdma_v3_0_ring_emit_fence x3 for user fence, vm fence */
-}
-
static int sdma_v3_0_early_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
@@ -1177,11 +1161,10 @@ static int sdma_v3_0_sw_init(void *handle)
sprintf(ring->name, "sdma%d", i);
r = amdgpu_ring_init(adev, ring, 1024,
- SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP), 0xf,
&adev->sdma.trap_irq,
(i == 0) ?
- AMDGPU_SDMA_IRQ_TRAP0 : AMDGPU_SDMA_IRQ_TRAP1,
- AMDGPU_RING_TYPE_SDMA);
+ AMDGPU_SDMA_IRQ_TRAP0 :
+ AMDGPU_SDMA_IRQ_TRAP1);
if (r)
return r;
}
@@ -1544,7 +1527,7 @@ static int sdma_v3_0_set_powergating_state(void *handle,
return 0;
}
-const struct amd_ip_funcs sdma_v3_0_ip_funcs = {
+static const struct amd_ip_funcs sdma_v3_0_ip_funcs = {
.name = "sdma_v3_0",
.early_init = sdma_v3_0_early_init,
.late_init = NULL,
@@ -1565,10 +1548,19 @@ const struct amd_ip_funcs sdma_v3_0_ip_funcs = {
};
static const struct amdgpu_ring_funcs sdma_v3_0_ring_funcs = {
+ .type = AMDGPU_RING_TYPE_SDMA,
+ .align_mask = 0xf,
+ .nop = SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP),
.get_rptr = sdma_v3_0_ring_get_rptr,
.get_wptr = sdma_v3_0_ring_get_wptr,
.set_wptr = sdma_v3_0_ring_set_wptr,
- .parse_cs = NULL,
+ .emit_frame_size =
+ 6 + /* sdma_v3_0_ring_emit_hdp_flush */
+ 3 + /* sdma_v3_0_ring_emit_hdp_invalidate */
+ 6 + /* sdma_v3_0_ring_emit_pipeline_sync */
+ 12 + /* sdma_v3_0_ring_emit_vm_flush */
+ 10 + 10 + 10, /* sdma_v3_0_ring_emit_fence x3 for user fence, vm fence */
+ .emit_ib_size = 7 + 6, /* sdma_v3_0_ring_emit_ib */
.emit_ib = sdma_v3_0_ring_emit_ib,
.emit_fence = sdma_v3_0_ring_emit_fence,
.emit_pipeline_sync = sdma_v3_0_ring_emit_pipeline_sync,
@@ -1579,8 +1571,6 @@ static const struct amdgpu_ring_funcs sdma_v3_0_ring_funcs = {
.test_ib = sdma_v3_0_ring_test_ib,
.insert_nop = sdma_v3_0_ring_insert_nop,
.pad_ib = sdma_v3_0_ring_pad_ib,
- .get_emit_ib_size = sdma_v3_0_ring_get_emit_ib_size,
- .get_dma_frame_size = sdma_v3_0_ring_get_dma_frame_size,
};
static void sdma_v3_0_set_ring_funcs(struct amdgpu_device *adev)
@@ -1693,3 +1683,21 @@ static void sdma_v3_0_set_vm_pte_funcs(struct amdgpu_device *adev)
adev->vm_manager.vm_pte_num_rings = adev->sdma.num_instances;
}
}
+
+const struct amdgpu_ip_block_version sdma_v3_0_ip_block =
+{
+ .type = AMD_IP_BLOCK_TYPE_SDMA,
+ .major = 3,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &sdma_v3_0_ip_funcs,
+};
+
+const struct amdgpu_ip_block_version sdma_v3_1_ip_block =
+{
+ .type = AMD_IP_BLOCK_TYPE_SDMA,
+ .major = 3,
+ .minor = 1,
+ .rev = 0,
+ .funcs = &sdma_v3_0_ip_funcs,
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.h b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.h
index 0cb9698a3054..7aa223d35f1b 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.h
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.h
@@ -24,6 +24,7 @@
#ifndef __SDMA_V3_0_H__
#define __SDMA_V3_0_H__
-extern const struct amd_ip_funcs sdma_v3_0_ip_funcs;
+extern const struct amdgpu_ip_block_version sdma_v3_0_ip_block;
+extern const struct amdgpu_ip_block_version sdma_v3_1_ip_block;
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/si.c b/drivers/gpu/drm/amd/amdgpu/si.c
index dc9511c5ecb8..3ed8ad8725b9 100644
--- a/drivers/gpu/drm/amd/amdgpu/si.c
+++ b/drivers/gpu/drm/amd/amdgpu/si.c
@@ -39,6 +39,7 @@
#include "si_dma.h"
#include "dce_v6_0.h"
#include "si.h"
+#include "dce_virtual.h"
static const u32 tahiti_golden_registers[] =
{
@@ -905,7 +906,7 @@ static void si_pcie_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
}
-u32 si_pciep_rreg(struct amdgpu_device *adev, u32 reg)
+static u32 si_pciep_rreg(struct amdgpu_device *adev, u32 reg)
{
unsigned long flags;
u32 r;
@@ -918,7 +919,7 @@ u32 si_pciep_rreg(struct amdgpu_device *adev, u32 reg)
return r;
}
-void si_pciep_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
+static void si_pciep_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
{
unsigned long flags;
@@ -1811,7 +1812,7 @@ static int si_common_set_powergating_state(void *handle,
return 0;
}
-const struct amd_ip_funcs si_common_ip_funcs = {
+static const struct amd_ip_funcs si_common_ip_funcs = {
.name = "si_common",
.early_init = si_common_early_init,
.late_init = NULL,
@@ -1828,119 +1829,13 @@ const struct amd_ip_funcs si_common_ip_funcs = {
.set_powergating_state = si_common_set_powergating_state,
};
-static const struct amdgpu_ip_block_version verde_ip_blocks[] =
+static const struct amdgpu_ip_block_version si_common_ip_block =
{
- {
- .type = AMD_IP_BLOCK_TYPE_COMMON,
- .major = 1,
- .minor = 0,
- .rev = 0,
- .funcs = &si_common_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_GMC,
- .major = 6,
- .minor = 0,
- .rev = 0,
- .funcs = &gmc_v6_0_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_IH,
- .major = 1,
- .minor = 0,
- .rev = 0,
- .funcs = &si_ih_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_SMC,
- .major = 6,
- .minor = 0,
- .rev = 0,
- .funcs = &amdgpu_pp_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_DCE,
- .major = 6,
- .minor = 0,
- .rev = 0,
- .funcs = &dce_v6_0_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_GFX,
- .major = 6,
- .minor = 0,
- .rev = 0,
- .funcs = &gfx_v6_0_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_SDMA,
- .major = 1,
- .minor = 0,
- .rev = 0,
- .funcs = &si_dma_ip_funcs,
- },
-/* {
- .type = AMD_IP_BLOCK_TYPE_UVD,
- .major = 3,
- .minor = 1,
- .rev = 0,
- .funcs = &si_null_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_VCE,
- .major = 1,
- .minor = 0,
- .rev = 0,
- .funcs = &si_null_ip_funcs,
- },
- */
-};
-
-
-static const struct amdgpu_ip_block_version hainan_ip_blocks[] =
-{
- {
- .type = AMD_IP_BLOCK_TYPE_COMMON,
- .major = 1,
- .minor = 0,
- .rev = 0,
- .funcs = &si_common_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_GMC,
- .major = 6,
- .minor = 0,
- .rev = 0,
- .funcs = &gmc_v6_0_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_IH,
- .major = 1,
- .minor = 0,
- .rev = 0,
- .funcs = &si_ih_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_SMC,
- .major = 6,
- .minor = 0,
- .rev = 0,
- .funcs = &amdgpu_pp_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_GFX,
- .major = 6,
- .minor = 0,
- .rev = 0,
- .funcs = &gfx_v6_0_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_SDMA,
- .major = 1,
- .minor = 0,
- .rev = 0,
- .funcs = &si_dma_ip_funcs,
- },
+ .type = AMD_IP_BLOCK_TYPE_COMMON,
+ .major = 1,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &si_common_ip_funcs,
};
int si_set_ip_blocks(struct amdgpu_device *adev)
@@ -1949,13 +1844,42 @@ int si_set_ip_blocks(struct amdgpu_device *adev)
case CHIP_VERDE:
case CHIP_TAHITI:
case CHIP_PITCAIRN:
+ amdgpu_ip_block_add(adev, &si_common_ip_block);
+ amdgpu_ip_block_add(adev, &gmc_v6_0_ip_block);
+ amdgpu_ip_block_add(adev, &si_ih_ip_block);
+ amdgpu_ip_block_add(adev, &amdgpu_pp_ip_block);
+ if (adev->enable_virtual_display)
+ amdgpu_ip_block_add(adev, &dce_virtual_ip_block);
+ else
+ amdgpu_ip_block_add(adev, &dce_v6_0_ip_block);
+ amdgpu_ip_block_add(adev, &gfx_v6_0_ip_block);
+ amdgpu_ip_block_add(adev, &si_dma_ip_block);
+ /* amdgpu_ip_block_add(adev, &uvd_v3_1_ip_block); */
+ /* amdgpu_ip_block_add(adev, &vce_v1_0_ip_block); */
+ break;
case CHIP_OLAND:
- adev->ip_blocks = verde_ip_blocks;
- adev->num_ip_blocks = ARRAY_SIZE(verde_ip_blocks);
+ amdgpu_ip_block_add(adev, &si_common_ip_block);
+ amdgpu_ip_block_add(adev, &gmc_v6_0_ip_block);
+ amdgpu_ip_block_add(adev, &si_ih_ip_block);
+ amdgpu_ip_block_add(adev, &amdgpu_pp_ip_block);
+ if (adev->enable_virtual_display)
+ amdgpu_ip_block_add(adev, &dce_virtual_ip_block);
+ else
+ amdgpu_ip_block_add(adev, &dce_v6_4_ip_block);
+ amdgpu_ip_block_add(adev, &gfx_v6_0_ip_block);
+ amdgpu_ip_block_add(adev, &si_dma_ip_block);
+ /* amdgpu_ip_block_add(adev, &uvd_v3_1_ip_block); */
+ /* amdgpu_ip_block_add(adev, &vce_v1_0_ip_block); */
break;
case CHIP_HAINAN:
- adev->ip_blocks = hainan_ip_blocks;
- adev->num_ip_blocks = ARRAY_SIZE(hainan_ip_blocks);
+ amdgpu_ip_block_add(adev, &si_common_ip_block);
+ amdgpu_ip_block_add(adev, &gmc_v6_0_ip_block);
+ amdgpu_ip_block_add(adev, &si_ih_ip_block);
+ amdgpu_ip_block_add(adev, &amdgpu_pp_ip_block);
+ if (adev->enable_virtual_display)
+ amdgpu_ip_block_add(adev, &dce_virtual_ip_block);
+ amdgpu_ip_block_add(adev, &gfx_v6_0_ip_block);
+ amdgpu_ip_block_add(adev, &si_dma_ip_block);
break;
default:
BUG();
diff --git a/drivers/gpu/drm/amd/amdgpu/si.h b/drivers/gpu/drm/amd/amdgpu/si.h
index 959d7b63e0e5..589225080c24 100644
--- a/drivers/gpu/drm/amd/amdgpu/si.h
+++ b/drivers/gpu/drm/amd/amdgpu/si.h
@@ -24,8 +24,6 @@
#ifndef __SI_H__
#define __SI_H__
-extern const struct amd_ip_funcs si_common_ip_funcs;
-
void si_srbm_select(struct amdgpu_device *adev,
u32 me, u32 pipe, u32 queue, u32 vmid);
int si_set_ip_blocks(struct amdgpu_device *adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/si_dma.c b/drivers/gpu/drm/amd/amdgpu/si_dma.c
index de358193a8f9..3dd552ae0b59 100644
--- a/drivers/gpu/drm/amd/amdgpu/si_dma.c
+++ b/drivers/gpu/drm/amd/amdgpu/si_dma.c
@@ -274,7 +274,7 @@ static int si_dma_ring_test_ib(struct amdgpu_ring *ring, long timeout)
{
struct amdgpu_device *adev = ring->adev;
struct amdgpu_ib ib;
- struct fence *f = NULL;
+ struct dma_fence *f = NULL;
unsigned index;
u32 tmp = 0;
u64 gpu_addr;
@@ -305,7 +305,7 @@ static int si_dma_ring_test_ib(struct amdgpu_ring *ring, long timeout)
if (r)
goto err1;
- r = fence_wait_timeout(f, false, timeout);
+ r = dma_fence_wait_timeout(f, false, timeout);
if (r == 0) {
DRM_ERROR("amdgpu: IB test timed out\n");
r = -ETIMEDOUT;
@@ -325,7 +325,7 @@ static int si_dma_ring_test_ib(struct amdgpu_ring *ring, long timeout)
err1:
amdgpu_ib_free(adev, &ib, NULL);
- fence_put(f);
+ dma_fence_put(f);
err0:
amdgpu_wb_free(adev, index);
return r;
@@ -495,22 +495,6 @@ static void si_dma_ring_emit_vm_flush(struct amdgpu_ring *ring,
amdgpu_ring_write(ring, (0 << 28) | 0x20); /* func(always) | poll interval */
}
-static unsigned si_dma_ring_get_emit_ib_size(struct amdgpu_ring *ring)
-{
- return
- 7 + 3; /* si_dma_ring_emit_ib */
-}
-
-static unsigned si_dma_ring_get_dma_frame_size(struct amdgpu_ring *ring)
-{
- return
- 3 + /* si_dma_ring_emit_hdp_flush */
- 3 + /* si_dma_ring_emit_hdp_invalidate */
- 6 + /* si_dma_ring_emit_pipeline_sync */
- 12 + /* si_dma_ring_emit_vm_flush */
- 9 + 9 + 9; /* si_dma_ring_emit_fence x3 for user fence, vm fence */
-}
-
static int si_dma_early_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
@@ -547,11 +531,10 @@ static int si_dma_sw_init(void *handle)
ring->use_doorbell = false;
sprintf(ring->name, "sdma%d", i);
r = amdgpu_ring_init(adev, ring, 1024,
- DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0, 0), 0xf,
&adev->sdma.trap_irq,
(i == 0) ?
- AMDGPU_SDMA_IRQ_TRAP0 : AMDGPU_SDMA_IRQ_TRAP1,
- AMDGPU_RING_TYPE_SDMA);
+ AMDGPU_SDMA_IRQ_TRAP0 :
+ AMDGPU_SDMA_IRQ_TRAP1);
if (r)
return r;
}
@@ -762,7 +745,7 @@ static int si_dma_set_powergating_state(void *handle,
return 0;
}
-const struct amd_ip_funcs si_dma_ip_funcs = {
+static const struct amd_ip_funcs si_dma_ip_funcs = {
.name = "si_dma",
.early_init = si_dma_early_init,
.late_init = NULL,
@@ -780,10 +763,19 @@ const struct amd_ip_funcs si_dma_ip_funcs = {
};
static const struct amdgpu_ring_funcs si_dma_ring_funcs = {
+ .type = AMDGPU_RING_TYPE_SDMA,
+ .align_mask = 0xf,
+ .nop = DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0, 0),
.get_rptr = si_dma_ring_get_rptr,
.get_wptr = si_dma_ring_get_wptr,
.set_wptr = si_dma_ring_set_wptr,
- .parse_cs = NULL,
+ .emit_frame_size =
+ 3 + /* si_dma_ring_emit_hdp_flush */
+ 3 + /* si_dma_ring_emit_hdp_invalidate */
+ 6 + /* si_dma_ring_emit_pipeline_sync */
+ 12 + /* si_dma_ring_emit_vm_flush */
+ 9 + 9 + 9, /* si_dma_ring_emit_fence x3 for user fence, vm fence */
+ .emit_ib_size = 7 + 3, /* si_dma_ring_emit_ib */
.emit_ib = si_dma_ring_emit_ib,
.emit_fence = si_dma_ring_emit_fence,
.emit_pipeline_sync = si_dma_ring_emit_pipeline_sync,
@@ -794,8 +786,6 @@ static const struct amdgpu_ring_funcs si_dma_ring_funcs = {
.test_ib = si_dma_ring_test_ib,
.insert_nop = amdgpu_ring_insert_nop,
.pad_ib = si_dma_ring_pad_ib,
- .get_emit_ib_size = si_dma_ring_get_emit_ib_size,
- .get_dma_frame_size = si_dma_ring_get_dma_frame_size,
};
static void si_dma_set_ring_funcs(struct amdgpu_device *adev)
@@ -913,3 +903,12 @@ static void si_dma_set_vm_pte_funcs(struct amdgpu_device *adev)
adev->vm_manager.vm_pte_num_rings = adev->sdma.num_instances;
}
}
+
+const struct amdgpu_ip_block_version si_dma_ip_block =
+{
+ .type = AMD_IP_BLOCK_TYPE_SDMA,
+ .major = 1,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &si_dma_ip_funcs,
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/si_dma.h b/drivers/gpu/drm/amd/amdgpu/si_dma.h
index 3a3e0c78a54b..5ac1b8452fb4 100644
--- a/drivers/gpu/drm/amd/amdgpu/si_dma.h
+++ b/drivers/gpu/drm/amd/amdgpu/si_dma.h
@@ -24,6 +24,6 @@
#ifndef __SI_DMA_H__
#define __SI_DMA_H__
-extern const struct amd_ip_funcs si_dma_ip_funcs;
+extern const struct amdgpu_ip_block_version si_dma_ip_block;
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/si_dpm.c b/drivers/gpu/drm/amd/amdgpu/si_dpm.c
index 3de7bca5854b..f0f2f6c9718e 100644
--- a/drivers/gpu/drm/amd/amdgpu/si_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/si_dpm.c
@@ -3171,6 +3171,7 @@ static void ni_update_current_ps(struct amdgpu_device *adev,
eg_pi->current_rps = *rps;
ni_pi->current_ps = *new_ps;
eg_pi->current_rps.ps_priv = &ni_pi->current_ps;
+ adev->pm.dpm.current_ps = &eg_pi->current_rps;
}
static void ni_update_requested_ps(struct amdgpu_device *adev,
@@ -3183,6 +3184,7 @@ static void ni_update_requested_ps(struct amdgpu_device *adev,
eg_pi->requested_rps = *rps;
ni_pi->requested_ps = *new_ps;
eg_pi->requested_rps.ps_priv = &ni_pi->requested_ps;
+ adev->pm.dpm.requested_ps = &eg_pi->requested_rps;
}
static void ni_set_uvd_clock_before_set_eng_clock(struct amdgpu_device *adev,
@@ -3477,6 +3479,49 @@ static void si_apply_state_adjust_rules(struct amdgpu_device *adev,
int i;
struct si_dpm_quirk *p = si_dpm_quirk_list;
+ /* limit all SI kickers */
+ if (adev->asic_type == CHIP_PITCAIRN) {
+ if ((adev->pdev->revision == 0x81) ||
+ (adev->pdev->device == 0x6810) ||
+ (adev->pdev->device == 0x6811) ||
+ (adev->pdev->device == 0x6816) ||
+ (adev->pdev->device == 0x6817) ||
+ (adev->pdev->device == 0x6806))
+ max_mclk = 120000;
+ } else if (adev->asic_type == CHIP_VERDE) {
+ if ((adev->pdev->revision == 0x81) ||
+ (adev->pdev->revision == 0x83) ||
+ (adev->pdev->revision == 0x87) ||
+ (adev->pdev->device == 0x6820) ||
+ (adev->pdev->device == 0x6821) ||
+ (adev->pdev->device == 0x6822) ||
+ (adev->pdev->device == 0x6823) ||
+ (adev->pdev->device == 0x682A) ||
+ (adev->pdev->device == 0x682B)) {
+ max_sclk = 75000;
+ max_mclk = 80000;
+ }
+ } else if (adev->asic_type == CHIP_OLAND) {
+ if ((adev->pdev->revision == 0xC7) ||
+ (adev->pdev->revision == 0x80) ||
+ (adev->pdev->revision == 0x81) ||
+ (adev->pdev->revision == 0x83) ||
+ (adev->pdev->device == 0x6604) ||
+ (adev->pdev->device == 0x6605)) {
+ max_sclk = 75000;
+ max_mclk = 80000;
+ }
+ } else if (adev->asic_type == CHIP_HAINAN) {
+ if ((adev->pdev->revision == 0x81) ||
+ (adev->pdev->revision == 0x83) ||
+ (adev->pdev->revision == 0xC3) ||
+ (adev->pdev->device == 0x6664) ||
+ (adev->pdev->device == 0x6665) ||
+ (adev->pdev->device == 0x6667)) {
+ max_sclk = 75000;
+ max_mclk = 80000;
+ }
+ }
/* Apply dpm quirks */
while (p && p->chip_device != 0) {
if (adev->pdev->vendor == p->chip_vendor &&
@@ -3489,22 +3534,6 @@ static void si_apply_state_adjust_rules(struct amdgpu_device *adev,
}
++p;
}
- /* limit mclk on all R7 370 parts for stability */
- if (adev->pdev->device == 0x6811 &&
- adev->pdev->revision == 0x81)
- max_mclk = 120000;
- /* limit sclk/mclk on Jet parts for stability */
- if (adev->pdev->device == 0x6665 &&
- adev->pdev->revision == 0xc3) {
- max_sclk = 75000;
- max_mclk = 80000;
- }
- /* Limit clocks for some HD8600 parts */
- if (adev->pdev->device == 0x6660 &&
- adev->pdev->revision == 0x83) {
- max_sclk = 75000;
- max_mclk = 80000;
- }
if (rps->vce_active) {
rps->evclk = adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].evclk;
@@ -7320,7 +7349,7 @@ static int si_parse_power_table(struct amdgpu_device *adev)
adev->pm.dpm.num_ps = state_array->ucNumEntries;
/* fill in the vce power states */
- for (i = 0; i < AMDGPU_MAX_VCE_LEVELS; i++) {
+ for (i = 0; i < adev->pm.dpm.num_of_vce_states; i++) {
u32 sclk, mclk;
clock_array_index = adev->pm.dpm.vce_states[i].clk_idx;
clock_info = (union pplib_clock_info *)
@@ -7777,6 +7806,8 @@ static int si_dpm_sw_fini(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ flush_work(&adev->pm.dpm.thermal.work);
+
mutex_lock(&adev->pm.mutex);
amdgpu_pm_sysfs_fini(adev);
si_dpm_fini(adev);
@@ -7957,6 +7988,57 @@ static int si_dpm_early_init(void *handle)
return 0;
}
+static inline bool si_are_power_levels_equal(const struct rv7xx_pl *si_cpl1,
+ const struct rv7xx_pl *si_cpl2)
+{
+ return ((si_cpl1->mclk == si_cpl2->mclk) &&
+ (si_cpl1->sclk == si_cpl2->sclk) &&
+ (si_cpl1->pcie_gen == si_cpl2->pcie_gen) &&
+ (si_cpl1->vddc == si_cpl2->vddc) &&
+ (si_cpl1->vddci == si_cpl2->vddci));
+}
+
+static int si_check_state_equal(struct amdgpu_device *adev,
+ struct amdgpu_ps *cps,
+ struct amdgpu_ps *rps,
+ bool *equal)
+{
+ struct si_ps *si_cps;
+ struct si_ps *si_rps;
+ int i;
+
+ if (adev == NULL || cps == NULL || rps == NULL || equal == NULL)
+ return -EINVAL;
+
+ si_cps = si_get_ps(cps);
+ si_rps = si_get_ps(rps);
+
+ if (si_cps == NULL) {
+ printk("si_cps is NULL\n");
+ *equal = false;
+ return 0;
+ }
+
+ if (si_cps->performance_level_count != si_rps->performance_level_count) {
+ *equal = false;
+ return 0;
+ }
+
+ for (i = 0; i < si_cps->performance_level_count; i++) {
+ if (!si_are_power_levels_equal(&(si_cps->performance_levels[i]),
+ &(si_rps->performance_levels[i]))) {
+ *equal = false;
+ return 0;
+ }
+ }
+
+ /* If all performance levels are the same try to use the UVD clocks to break the tie.*/
+ *equal = ((cps->vclk == rps->vclk) && (cps->dclk == rps->dclk));
+ *equal &= ((cps->evclk == rps->evclk) && (cps->ecclk == rps->ecclk));
+
+ return 0;
+}
+
const struct amd_ip_funcs si_dpm_ip_funcs = {
.name = "si_dpm",
@@ -7991,6 +8073,8 @@ static const struct amdgpu_dpm_funcs si_dpm_funcs = {
.get_fan_control_mode = &si_dpm_get_fan_control_mode,
.set_fan_speed_percent = &si_dpm_set_fan_speed_percent,
.get_fan_speed_percent = &si_dpm_get_fan_speed_percent,
+ .check_state_equal = &si_check_state_equal,
+ .get_vce_clock_state = amdgpu_get_vce_clock_state,
};
static void si_dpm_set_dpm_funcs(struct amdgpu_device *adev)
@@ -8010,3 +8094,11 @@ static void si_dpm_set_irq_funcs(struct amdgpu_device *adev)
adev->pm.dpm.thermal.irq.funcs = &si_dpm_irq_funcs;
}
+const struct amdgpu_ip_block_version si_dpm_ip_block =
+{
+ .type = AMD_IP_BLOCK_TYPE_SMC,
+ .major = 6,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &si_dpm_ip_funcs,
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/si_ih.c b/drivers/gpu/drm/amd/amdgpu/si_ih.c
index 8fae3d4a2360..db0f36846661 100644
--- a/drivers/gpu/drm/amd/amdgpu/si_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/si_ih.c
@@ -268,7 +268,7 @@ static int si_ih_set_powergating_state(void *handle,
return 0;
}
-const struct amd_ip_funcs si_ih_ip_funcs = {
+static const struct amd_ip_funcs si_ih_ip_funcs = {
.name = "si_ih",
.early_init = si_ih_early_init,
.late_init = NULL,
@@ -297,3 +297,11 @@ static void si_ih_set_interrupt_funcs(struct amdgpu_device *adev)
adev->irq.ih_funcs = &si_ih_funcs;
}
+const struct amdgpu_ip_block_version si_ih_ip_block =
+{
+ .type = AMD_IP_BLOCK_TYPE_IH,
+ .major = 1,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &si_ih_ip_funcs,
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/si_ih.h b/drivers/gpu/drm/amd/amdgpu/si_ih.h
index f3e3a954369c..42e64a53e24f 100644
--- a/drivers/gpu/drm/amd/amdgpu/si_ih.h
+++ b/drivers/gpu/drm/amd/amdgpu/si_ih.h
@@ -24,6 +24,6 @@
#ifndef __SI_IH_H__
#define __SI_IH_H__
-extern const struct amd_ip_funcs si_ih_ip_funcs;
+extern const struct amdgpu_ip_block_version si_ih_ip_block;
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/tonga_ih.c b/drivers/gpu/drm/amd/amdgpu/tonga_ih.c
index b4ea229bb449..52b71ee58793 100644
--- a/drivers/gpu/drm/amd/amdgpu/tonga_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/tonga_ih.c
@@ -455,7 +455,7 @@ static int tonga_ih_set_powergating_state(void *handle,
return 0;
}
-const struct amd_ip_funcs tonga_ih_ip_funcs = {
+static const struct amd_ip_funcs tonga_ih_ip_funcs = {
.name = "tonga_ih",
.early_init = tonga_ih_early_init,
.late_init = NULL,
@@ -487,3 +487,11 @@ static void tonga_ih_set_interrupt_funcs(struct amdgpu_device *adev)
adev->irq.ih_funcs = &tonga_ih_funcs;
}
+const struct amdgpu_ip_block_version tonga_ih_ip_block =
+{
+ .type = AMD_IP_BLOCK_TYPE_IH,
+ .major = 3,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &tonga_ih_ip_funcs,
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/tonga_ih.h b/drivers/gpu/drm/amd/amdgpu/tonga_ih.h
index 7392d70fa4a7..499027eee5c2 100644
--- a/drivers/gpu/drm/amd/amdgpu/tonga_ih.h
+++ b/drivers/gpu/drm/amd/amdgpu/tonga_ih.h
@@ -24,6 +24,6 @@
#ifndef __TONGA_IH_H__
#define __TONGA_IH_H__
-extern const struct amd_ip_funcs tonga_ih_ip_funcs;
+extern const struct amdgpu_ip_block_version tonga_ih_ip_block;
-#endif /* __CZ_IH_H__ */
+#endif /* __TONGA_IH_H__ */
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c b/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c
index f6c941550b8f..8f9c7d55ddda 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c
@@ -36,6 +36,9 @@
#include "bif/bif_4_1_d.h"
+#include "smu/smu_7_0_1_d.h"
+#include "smu/smu_7_0_1_sh_mask.h"
+
static void uvd_v4_2_mc_resume(struct amdgpu_device *adev);
static void uvd_v4_2_init_cg(struct amdgpu_device *adev);
static void uvd_v4_2_set_ring_funcs(struct amdgpu_device *adev);
@@ -116,8 +119,7 @@ static int uvd_v4_2_sw_init(void *handle)
ring = &adev->uvd.ring;
sprintf(ring->name, "uvd");
- r = amdgpu_ring_init(adev, ring, 512, PACKET0(mmUVD_NO_OP, 0), 0xf,
- &adev->uvd.irq, 0, AMDGPU_RING_TYPE_UVD);
+ r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.irq, 0);
return r;
}
@@ -526,20 +528,6 @@ static void uvd_v4_2_ring_emit_ib(struct amdgpu_ring *ring,
amdgpu_ring_write(ring, ib->length_dw);
}
-static unsigned uvd_v4_2_ring_get_emit_ib_size(struct amdgpu_ring *ring)
-{
- return
- 4; /* uvd_v4_2_ring_emit_ib */
-}
-
-static unsigned uvd_v4_2_ring_get_dma_frame_size(struct amdgpu_ring *ring)
-{
- return
- 2 + /* uvd_v4_2_ring_emit_hdp_flush */
- 2 + /* uvd_v4_2_ring_emit_hdp_invalidate */
- 14; /* uvd_v4_2_ring_emit_fence x1 no user fence */
-}
-
/**
* uvd_v4_2_mc_resume - memory controller programming
*
@@ -698,18 +686,34 @@ static int uvd_v4_2_process_interrupt(struct amdgpu_device *adev,
return 0;
}
+static void uvd_v5_0_set_bypass_mode(struct amdgpu_device *adev, bool enable)
+{
+ u32 tmp = RREG32_SMC(ixGCK_DFS_BYPASS_CNTL);
+
+ if (enable)
+ tmp |= (GCK_DFS_BYPASS_CNTL__BYPASSDCLK_MASK |
+ GCK_DFS_BYPASS_CNTL__BYPASSVCLK_MASK);
+ else
+ tmp &= ~(GCK_DFS_BYPASS_CNTL__BYPASSDCLK_MASK |
+ GCK_DFS_BYPASS_CNTL__BYPASSVCLK_MASK);
+
+ WREG32_SMC(ixGCK_DFS_BYPASS_CNTL, tmp);
+}
+
static int uvd_v4_2_set_clockgating_state(void *handle,
enum amd_clockgating_state state)
{
bool gate = false;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- if (!(adev->cg_flags & AMD_CG_SUPPORT_UVD_MGCG))
- return 0;
-
if (state == AMD_CG_STATE_GATE)
gate = true;
+ uvd_v5_0_set_bypass_mode(adev, gate);
+
+ if (!(adev->cg_flags & AMD_CG_SUPPORT_UVD_MGCG))
+ return 0;
+
uvd_v4_2_enable_mgcg(adev, gate);
return 0;
@@ -738,7 +742,7 @@ static int uvd_v4_2_set_powergating_state(void *handle,
}
}
-const struct amd_ip_funcs uvd_v4_2_ip_funcs = {
+static const struct amd_ip_funcs uvd_v4_2_ip_funcs = {
.name = "uvd_v4_2",
.early_init = uvd_v4_2_early_init,
.late_init = NULL,
@@ -756,10 +760,18 @@ const struct amd_ip_funcs uvd_v4_2_ip_funcs = {
};
static const struct amdgpu_ring_funcs uvd_v4_2_ring_funcs = {
+ .type = AMDGPU_RING_TYPE_UVD,
+ .align_mask = 0xf,
+ .nop = PACKET0(mmUVD_NO_OP, 0),
.get_rptr = uvd_v4_2_ring_get_rptr,
.get_wptr = uvd_v4_2_ring_get_wptr,
.set_wptr = uvd_v4_2_ring_set_wptr,
.parse_cs = amdgpu_uvd_ring_parse_cs,
+ .emit_frame_size =
+ 2 + /* uvd_v4_2_ring_emit_hdp_flush */
+ 2 + /* uvd_v4_2_ring_emit_hdp_invalidate */
+ 14, /* uvd_v4_2_ring_emit_fence x1 no user fence */
+ .emit_ib_size = 4, /* uvd_v4_2_ring_emit_ib */
.emit_ib = uvd_v4_2_ring_emit_ib,
.emit_fence = uvd_v4_2_ring_emit_fence,
.emit_hdp_flush = uvd_v4_2_ring_emit_hdp_flush,
@@ -770,8 +782,6 @@ static const struct amdgpu_ring_funcs uvd_v4_2_ring_funcs = {
.pad_ib = amdgpu_ring_generic_pad_ib,
.begin_use = amdgpu_uvd_ring_begin_use,
.end_use = amdgpu_uvd_ring_end_use,
- .get_emit_ib_size = uvd_v4_2_ring_get_emit_ib_size,
- .get_dma_frame_size = uvd_v4_2_ring_get_dma_frame_size,
};
static void uvd_v4_2_set_ring_funcs(struct amdgpu_device *adev)
@@ -789,3 +799,12 @@ static void uvd_v4_2_set_irq_funcs(struct amdgpu_device *adev)
adev->uvd.irq.num_types = 1;
adev->uvd.irq.funcs = &uvd_v4_2_irq_funcs;
}
+
+const struct amdgpu_ip_block_version uvd_v4_2_ip_block =
+{
+ .type = AMD_IP_BLOCK_TYPE_UVD,
+ .major = 4,
+ .minor = 2,
+ .rev = 0,
+ .funcs = &uvd_v4_2_ip_funcs,
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.h b/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.h
index 0a615dd50840..8a0444bb8b95 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.h
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.h
@@ -24,6 +24,6 @@
#ifndef __UVD_V4_2_H__
#define __UVD_V4_2_H__
-extern const struct amd_ip_funcs uvd_v4_2_ip_funcs;
+extern const struct amdgpu_ip_block_version uvd_v4_2_ip_block;
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c
index 400c16fe579e..95303e2d5f92 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c
@@ -33,6 +33,8 @@
#include "oss/oss_2_0_sh_mask.h"
#include "bif/bif_5_0_d.h"
#include "vi.h"
+#include "smu/smu_7_1_2_d.h"
+#include "smu/smu_7_1_2_sh_mask.h"
static void uvd_v5_0_set_ring_funcs(struct amdgpu_device *adev);
static void uvd_v5_0_set_irq_funcs(struct amdgpu_device *adev);
@@ -112,8 +114,7 @@ static int uvd_v5_0_sw_init(void *handle)
ring = &adev->uvd.ring;
sprintf(ring->name, "uvd");
- r = amdgpu_ring_init(adev, ring, 512, PACKET0(mmUVD_NO_OP, 0), 0xf,
- &adev->uvd.irq, 0, AMDGPU_RING_TYPE_UVD);
+ r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.irq, 0);
return r;
}
@@ -577,20 +578,6 @@ static void uvd_v5_0_ring_emit_ib(struct amdgpu_ring *ring,
amdgpu_ring_write(ring, ib->length_dw);
}
-static unsigned uvd_v5_0_ring_get_emit_ib_size(struct amdgpu_ring *ring)
-{
- return
- 6; /* uvd_v5_0_ring_emit_ib */
-}
-
-static unsigned uvd_v5_0_ring_get_dma_frame_size(struct amdgpu_ring *ring)
-{
- return
- 2 + /* uvd_v5_0_ring_emit_hdp_flush */
- 2 + /* uvd_v5_0_ring_emit_hdp_invalidate */
- 14; /* uvd_v5_0_ring_emit_fence x1 no user fence */
-}
-
static bool uvd_v5_0_is_idle(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
@@ -737,6 +724,20 @@ static void uvd_v5_0_set_hw_clock_gating(struct amdgpu_device *adev)
}
#endif
+static void uvd_v5_0_set_bypass_mode(struct amdgpu_device *adev, bool enable)
+{
+ u32 tmp = RREG32_SMC(ixGCK_DFS_BYPASS_CNTL);
+
+ if (enable)
+ tmp |= (GCK_DFS_BYPASS_CNTL__BYPASSDCLK_MASK |
+ GCK_DFS_BYPASS_CNTL__BYPASSVCLK_MASK);
+ else
+ tmp &= ~(GCK_DFS_BYPASS_CNTL__BYPASSDCLK_MASK |
+ GCK_DFS_BYPASS_CNTL__BYPASSVCLK_MASK);
+
+ WREG32_SMC(ixGCK_DFS_BYPASS_CNTL, tmp);
+}
+
static int uvd_v5_0_set_clockgating_state(void *handle,
enum amd_clockgating_state state)
{
@@ -744,6 +745,8 @@ static int uvd_v5_0_set_clockgating_state(void *handle,
bool enable = (state == AMD_CG_STATE_GATE) ? true : false;
static int curstate = -1;
+ uvd_v5_0_set_bypass_mode(adev, enable);
+
if (!(adev->cg_flags & AMD_CG_SUPPORT_UVD_MGCG))
return 0;
@@ -789,7 +792,7 @@ static int uvd_v5_0_set_powergating_state(void *handle,
}
}
-const struct amd_ip_funcs uvd_v5_0_ip_funcs = {
+static const struct amd_ip_funcs uvd_v5_0_ip_funcs = {
.name = "uvd_v5_0",
.early_init = uvd_v5_0_early_init,
.late_init = NULL,
@@ -807,10 +810,18 @@ const struct amd_ip_funcs uvd_v5_0_ip_funcs = {
};
static const struct amdgpu_ring_funcs uvd_v5_0_ring_funcs = {
+ .type = AMDGPU_RING_TYPE_UVD,
+ .align_mask = 0xf,
+ .nop = PACKET0(mmUVD_NO_OP, 0),
.get_rptr = uvd_v5_0_ring_get_rptr,
.get_wptr = uvd_v5_0_ring_get_wptr,
.set_wptr = uvd_v5_0_ring_set_wptr,
.parse_cs = amdgpu_uvd_ring_parse_cs,
+ .emit_frame_size =
+ 2 + /* uvd_v5_0_ring_emit_hdp_flush */
+ 2 + /* uvd_v5_0_ring_emit_hdp_invalidate */
+ 14, /* uvd_v5_0_ring_emit_fence x1 no user fence */
+ .emit_ib_size = 6, /* uvd_v5_0_ring_emit_ib */
.emit_ib = uvd_v5_0_ring_emit_ib,
.emit_fence = uvd_v5_0_ring_emit_fence,
.emit_hdp_flush = uvd_v5_0_ring_emit_hdp_flush,
@@ -821,8 +832,6 @@ static const struct amdgpu_ring_funcs uvd_v5_0_ring_funcs = {
.pad_ib = amdgpu_ring_generic_pad_ib,
.begin_use = amdgpu_uvd_ring_begin_use,
.end_use = amdgpu_uvd_ring_end_use,
- .get_emit_ib_size = uvd_v5_0_ring_get_emit_ib_size,
- .get_dma_frame_size = uvd_v5_0_ring_get_dma_frame_size,
};
static void uvd_v5_0_set_ring_funcs(struct amdgpu_device *adev)
@@ -840,3 +849,12 @@ static void uvd_v5_0_set_irq_funcs(struct amdgpu_device *adev)
adev->uvd.irq.num_types = 1;
adev->uvd.irq.funcs = &uvd_v5_0_irq_funcs;
}
+
+const struct amdgpu_ip_block_version uvd_v5_0_ip_block =
+{
+ .type = AMD_IP_BLOCK_TYPE_UVD,
+ .major = 5,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &uvd_v5_0_ip_funcs,
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.h b/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.h
index e3b3c49fa5de..2eaaea793ac5 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.h
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.h
@@ -24,6 +24,6 @@
#ifndef __UVD_V5_0_H__
#define __UVD_V5_0_H__
-extern const struct amd_ip_funcs uvd_v5_0_ip_funcs;
+extern const struct amdgpu_ip_block_version uvd_v5_0_ip_block;
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
index ab3df6d75656..a339b5ccb296 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
@@ -116,8 +116,7 @@ static int uvd_v6_0_sw_init(void *handle)
ring = &adev->uvd.ring;
sprintf(ring->name, "uvd");
- r = amdgpu_ring_init(adev, ring, 512, PACKET0(mmUVD_NO_OP, 0), 0xf,
- &adev->uvd.irq, 0, AMDGPU_RING_TYPE_UVD);
+ r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.irq, 0);
return r;
}
@@ -725,31 +724,6 @@ static void uvd_v6_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
amdgpu_ring_write(ring, 0xE);
}
-static unsigned uvd_v6_0_ring_get_emit_ib_size(struct amdgpu_ring *ring)
-{
- return
- 8; /* uvd_v6_0_ring_emit_ib */
-}
-
-static unsigned uvd_v6_0_ring_get_dma_frame_size(struct amdgpu_ring *ring)
-{
- return
- 2 + /* uvd_v6_0_ring_emit_hdp_flush */
- 2 + /* uvd_v6_0_ring_emit_hdp_invalidate */
- 10 + /* uvd_v6_0_ring_emit_pipeline_sync */
- 14; /* uvd_v6_0_ring_emit_fence x1 no user fence */
-}
-
-static unsigned uvd_v6_0_ring_get_dma_frame_size_vm(struct amdgpu_ring *ring)
-{
- return
- 2 + /* uvd_v6_0_ring_emit_hdp_flush */
- 2 + /* uvd_v6_0_ring_emit_hdp_invalidate */
- 10 + /* uvd_v6_0_ring_emit_pipeline_sync */
- 20 + /* uvd_v6_0_ring_emit_vm_flush */
- 14 + 14; /* uvd_v6_0_ring_emit_fence x2 vm fence */
-}
-
static bool uvd_v6_0_is_idle(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
@@ -961,7 +935,7 @@ static void uvd_v6_0_set_hw_clock_gating(struct amdgpu_device *adev)
}
#endif
-static void uvd_v6_set_bypass_mode(struct amdgpu_device *adev, bool enable)
+static void uvd_v6_0_set_bypass_mode(struct amdgpu_device *adev, bool enable)
{
u32 tmp = RREG32_SMC(ixGCK_DFS_BYPASS_CNTL);
@@ -979,15 +953,14 @@ static int uvd_v6_0_set_clockgating_state(void *handle,
enum amd_clockgating_state state)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ bool enable = (state == AMD_CG_STATE_GATE) ? true : false;
- if (adev->asic_type == CHIP_FIJI ||
- adev->asic_type == CHIP_POLARIS10)
- uvd_v6_set_bypass_mode(adev, state == AMD_CG_STATE_GATE ? true : false);
+ uvd_v6_0_set_bypass_mode(adev, enable);
if (!(adev->cg_flags & AMD_CG_SUPPORT_UVD_MGCG))
return 0;
- if (state == AMD_CG_STATE_GATE) {
+ if (enable) {
/* disable HW gating and enable Sw gating */
uvd_v6_0_set_sw_clock_gating(adev);
} else {
@@ -1027,7 +1000,7 @@ static int uvd_v6_0_set_powergating_state(void *handle,
}
}
-const struct amd_ip_funcs uvd_v6_0_ip_funcs = {
+static const struct amd_ip_funcs uvd_v6_0_ip_funcs = {
.name = "uvd_v6_0",
.early_init = uvd_v6_0_early_init,
.late_init = NULL,
@@ -1048,10 +1021,19 @@ const struct amd_ip_funcs uvd_v6_0_ip_funcs = {
};
static const struct amdgpu_ring_funcs uvd_v6_0_ring_phys_funcs = {
+ .type = AMDGPU_RING_TYPE_UVD,
+ .align_mask = 0xf,
+ .nop = PACKET0(mmUVD_NO_OP, 0),
.get_rptr = uvd_v6_0_ring_get_rptr,
.get_wptr = uvd_v6_0_ring_get_wptr,
.set_wptr = uvd_v6_0_ring_set_wptr,
.parse_cs = amdgpu_uvd_ring_parse_cs,
+ .emit_frame_size =
+ 2 + /* uvd_v6_0_ring_emit_hdp_flush */
+ 2 + /* uvd_v6_0_ring_emit_hdp_invalidate */
+ 10 + /* uvd_v6_0_ring_emit_pipeline_sync */
+ 14, /* uvd_v6_0_ring_emit_fence x1 no user fence */
+ .emit_ib_size = 8, /* uvd_v6_0_ring_emit_ib */
.emit_ib = uvd_v6_0_ring_emit_ib,
.emit_fence = uvd_v6_0_ring_emit_fence,
.emit_hdp_flush = uvd_v6_0_ring_emit_hdp_flush,
@@ -1062,15 +1044,22 @@ static const struct amdgpu_ring_funcs uvd_v6_0_ring_phys_funcs = {
.pad_ib = amdgpu_ring_generic_pad_ib,
.begin_use = amdgpu_uvd_ring_begin_use,
.end_use = amdgpu_uvd_ring_end_use,
- .get_emit_ib_size = uvd_v6_0_ring_get_emit_ib_size,
- .get_dma_frame_size = uvd_v6_0_ring_get_dma_frame_size,
};
static const struct amdgpu_ring_funcs uvd_v6_0_ring_vm_funcs = {
+ .type = AMDGPU_RING_TYPE_UVD,
+ .align_mask = 0xf,
+ .nop = PACKET0(mmUVD_NO_OP, 0),
.get_rptr = uvd_v6_0_ring_get_rptr,
.get_wptr = uvd_v6_0_ring_get_wptr,
.set_wptr = uvd_v6_0_ring_set_wptr,
- .parse_cs = NULL,
+ .emit_frame_size =
+ 2 + /* uvd_v6_0_ring_emit_hdp_flush */
+ 2 + /* uvd_v6_0_ring_emit_hdp_invalidate */
+ 10 + /* uvd_v6_0_ring_emit_pipeline_sync */
+ 20 + /* uvd_v6_0_ring_emit_vm_flush */
+ 14 + 14, /* uvd_v6_0_ring_emit_fence x2 vm fence */
+ .emit_ib_size = 8, /* uvd_v6_0_ring_emit_ib */
.emit_ib = uvd_v6_0_ring_emit_ib,
.emit_fence = uvd_v6_0_ring_emit_fence,
.emit_vm_flush = uvd_v6_0_ring_emit_vm_flush,
@@ -1083,8 +1072,6 @@ static const struct amdgpu_ring_funcs uvd_v6_0_ring_vm_funcs = {
.pad_ib = amdgpu_ring_generic_pad_ib,
.begin_use = amdgpu_uvd_ring_begin_use,
.end_use = amdgpu_uvd_ring_end_use,
- .get_emit_ib_size = uvd_v6_0_ring_get_emit_ib_size,
- .get_dma_frame_size = uvd_v6_0_ring_get_dma_frame_size_vm,
};
static void uvd_v6_0_set_ring_funcs(struct amdgpu_device *adev)
@@ -1108,3 +1095,30 @@ static void uvd_v6_0_set_irq_funcs(struct amdgpu_device *adev)
adev->uvd.irq.num_types = 1;
adev->uvd.irq.funcs = &uvd_v6_0_irq_funcs;
}
+
+const struct amdgpu_ip_block_version uvd_v6_0_ip_block =
+{
+ .type = AMD_IP_BLOCK_TYPE_UVD,
+ .major = 6,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &uvd_v6_0_ip_funcs,
+};
+
+const struct amdgpu_ip_block_version uvd_v6_2_ip_block =
+{
+ .type = AMD_IP_BLOCK_TYPE_UVD,
+ .major = 6,
+ .minor = 2,
+ .rev = 0,
+ .funcs = &uvd_v6_0_ip_funcs,
+};
+
+const struct amdgpu_ip_block_version uvd_v6_3_ip_block =
+{
+ .type = AMD_IP_BLOCK_TYPE_UVD,
+ .major = 6,
+ .minor = 3,
+ .rev = 0,
+ .funcs = &uvd_v6_0_ip_funcs,
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.h b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.h
index 6b92a2352986..d3d48c6428cd 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.h
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.h
@@ -24,6 +24,8 @@
#ifndef __UVD_V6_0_H__
#define __UVD_V6_0_H__
-extern const struct amd_ip_funcs uvd_v6_0_ip_funcs;
+extern const struct amdgpu_ip_block_version uvd_v6_0_ip_block;
+extern const struct amdgpu_ip_block_version uvd_v6_2_ip_block;
+extern const struct amdgpu_ip_block_version uvd_v6_3_ip_block;
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c
index 76e64ad04a53..38ed903dd6f8 100644
--- a/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c
@@ -224,8 +224,8 @@ static int vce_v2_0_sw_init(void *handle)
for (i = 0; i < adev->vce.num_rings; i++) {
ring = &adev->vce.ring[i];
sprintf(ring->name, "vce%d", i);
- r = amdgpu_ring_init(adev, ring, 512, VCE_CMD_NO_OP, 0xf,
- &adev->vce.irq, 0, AMDGPU_RING_TYPE_VCE);
+ r = amdgpu_ring_init(adev, ring, 512,
+ &adev->vce.irq, 0);
if (r)
return r;
}
@@ -592,7 +592,7 @@ static int vce_v2_0_set_powergating_state(void *handle,
return vce_v2_0_start(adev);
}
-const struct amd_ip_funcs vce_v2_0_ip_funcs = {
+static const struct amd_ip_funcs vce_v2_0_ip_funcs = {
.name = "vce_v2_0",
.early_init = vce_v2_0_early_init,
.late_init = NULL,
@@ -610,10 +610,15 @@ const struct amd_ip_funcs vce_v2_0_ip_funcs = {
};
static const struct amdgpu_ring_funcs vce_v2_0_ring_funcs = {
+ .type = AMDGPU_RING_TYPE_VCE,
+ .align_mask = 0xf,
+ .nop = VCE_CMD_NO_OP,
.get_rptr = vce_v2_0_ring_get_rptr,
.get_wptr = vce_v2_0_ring_get_wptr,
.set_wptr = vce_v2_0_ring_set_wptr,
.parse_cs = amdgpu_vce_ring_parse_cs,
+ .emit_frame_size = 6, /* amdgpu_vce_ring_emit_fence x1 no user fence */
+ .emit_ib_size = 4, /* amdgpu_vce_ring_emit_ib */
.emit_ib = amdgpu_vce_ring_emit_ib,
.emit_fence = amdgpu_vce_ring_emit_fence,
.test_ring = amdgpu_vce_ring_test_ring,
@@ -622,8 +627,6 @@ static const struct amdgpu_ring_funcs vce_v2_0_ring_funcs = {
.pad_ib = amdgpu_ring_generic_pad_ib,
.begin_use = amdgpu_vce_ring_begin_use,
.end_use = amdgpu_vce_ring_end_use,
- .get_emit_ib_size = amdgpu_vce_ring_get_emit_ib_size,
- .get_dma_frame_size = amdgpu_vce_ring_get_dma_frame_size,
};
static void vce_v2_0_set_ring_funcs(struct amdgpu_device *adev)
@@ -644,3 +647,12 @@ static void vce_v2_0_set_irq_funcs(struct amdgpu_device *adev)
adev->vce.irq.num_types = 1;
adev->vce.irq.funcs = &vce_v2_0_irq_funcs;
};
+
+const struct amdgpu_ip_block_version vce_v2_0_ip_block =
+{
+ .type = AMD_IP_BLOCK_TYPE_VCE,
+ .major = 2,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &vce_v2_0_ip_funcs,
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v2_0.h b/drivers/gpu/drm/amd/amdgpu/vce_v2_0.h
index 0d2ae8a01acd..4d15167654a6 100644
--- a/drivers/gpu/drm/amd/amdgpu/vce_v2_0.h
+++ b/drivers/gpu/drm/amd/amdgpu/vce_v2_0.h
@@ -24,6 +24,6 @@
#ifndef __VCE_V2_0_H__
#define __VCE_V2_0_H__
-extern const struct amd_ip_funcs vce_v2_0_ip_funcs;
+extern const struct amdgpu_ip_block_version vce_v2_0_ip_block;
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
index 8533269ec160..39f03f137a56 100644
--- a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
@@ -52,6 +52,8 @@
#define VCE_V3_0_STACK_SIZE (64 * 1024)
#define VCE_V3_0_DATA_SIZE ((16 * 1024 * AMDGPU_MAX_VCE_HANDLES) + (52 * 1024))
+#define FW_52_8_3 ((52 << 24) | (8 << 16) | (3 << 8))
+
static void vce_v3_0_mc_resume(struct amdgpu_device *adev, int idx);
static void vce_v3_0_set_ring_funcs(struct amdgpu_device *adev);
static void vce_v3_0_set_irq_funcs(struct amdgpu_device *adev);
@@ -382,6 +384,10 @@ static int vce_v3_0_sw_init(void *handle)
if (r)
return r;
+ /* 52.8.3 required for 3 ring support */
+ if (adev->vce.fw_version < FW_52_8_3)
+ adev->vce.num_rings = 2;
+
r = amdgpu_vce_resume(adev);
if (r)
return r;
@@ -389,8 +395,7 @@ static int vce_v3_0_sw_init(void *handle)
for (i = 0; i < adev->vce.num_rings; i++) {
ring = &adev->vce.ring[i];
sprintf(ring->name, "vce%d", i);
- r = amdgpu_ring_init(adev, ring, 512, VCE_CMD_NO_OP, 0xf,
- &adev->vce.irq, 0, AMDGPU_RING_TYPE_VCE);
+ r = amdgpu_ring_init(adev, ring, 512, &adev->vce.irq, 0);
if (r)
return r;
}
@@ -808,28 +813,7 @@ static void vce_v3_0_emit_pipeline_sync(struct amdgpu_ring *ring)
amdgpu_ring_write(ring, seq);
}
-static unsigned vce_v3_0_ring_get_emit_ib_size(struct amdgpu_ring *ring)
-{
- return
- 5; /* vce_v3_0_ring_emit_ib */
-}
-
-static unsigned vce_v3_0_ring_get_dma_frame_size(struct amdgpu_ring *ring)
-{
- return
- 4 + /* vce_v3_0_emit_pipeline_sync */
- 6; /* amdgpu_vce_ring_emit_fence x1 no user fence */
-}
-
-static unsigned vce_v3_0_ring_get_dma_frame_size_vm(struct amdgpu_ring *ring)
-{
- return
- 6 + /* vce_v3_0_emit_vm_flush */
- 4 + /* vce_v3_0_emit_pipeline_sync */
- 6 + 6; /* amdgpu_vce_ring_emit_fence x2 vm fence */
-}
-
-const struct amd_ip_funcs vce_v3_0_ip_funcs = {
+static const struct amd_ip_funcs vce_v3_0_ip_funcs = {
.name = "vce_v3_0",
.early_init = vce_v3_0_early_init,
.late_init = NULL,
@@ -850,10 +834,17 @@ const struct amd_ip_funcs vce_v3_0_ip_funcs = {
};
static const struct amdgpu_ring_funcs vce_v3_0_ring_phys_funcs = {
+ .type = AMDGPU_RING_TYPE_VCE,
+ .align_mask = 0xf,
+ .nop = VCE_CMD_NO_OP,
.get_rptr = vce_v3_0_ring_get_rptr,
.get_wptr = vce_v3_0_ring_get_wptr,
.set_wptr = vce_v3_0_ring_set_wptr,
.parse_cs = amdgpu_vce_ring_parse_cs,
+ .emit_frame_size =
+ 4 + /* vce_v3_0_emit_pipeline_sync */
+ 6, /* amdgpu_vce_ring_emit_fence x1 no user fence */
+ .emit_ib_size = 5, /* vce_v3_0_ring_emit_ib */
.emit_ib = amdgpu_vce_ring_emit_ib,
.emit_fence = amdgpu_vce_ring_emit_fence,
.test_ring = amdgpu_vce_ring_test_ring,
@@ -862,15 +853,21 @@ static const struct amdgpu_ring_funcs vce_v3_0_ring_phys_funcs = {
.pad_ib = amdgpu_ring_generic_pad_ib,
.begin_use = amdgpu_vce_ring_begin_use,
.end_use = amdgpu_vce_ring_end_use,
- .get_emit_ib_size = vce_v3_0_ring_get_emit_ib_size,
- .get_dma_frame_size = vce_v3_0_ring_get_dma_frame_size,
};
static const struct amdgpu_ring_funcs vce_v3_0_ring_vm_funcs = {
+ .type = AMDGPU_RING_TYPE_VCE,
+ .align_mask = 0xf,
+ .nop = VCE_CMD_NO_OP,
.get_rptr = vce_v3_0_ring_get_rptr,
.get_wptr = vce_v3_0_ring_get_wptr,
.set_wptr = vce_v3_0_ring_set_wptr,
- .parse_cs = NULL,
+ .parse_cs = amdgpu_vce_ring_parse_cs_vm,
+ .emit_frame_size =
+ 6 + /* vce_v3_0_emit_vm_flush */
+ 4 + /* vce_v3_0_emit_pipeline_sync */
+ 6 + 6, /* amdgpu_vce_ring_emit_fence x2 vm fence */
+ .emit_ib_size = 4, /* amdgpu_vce_ring_emit_ib */
.emit_ib = vce_v3_0_ring_emit_ib,
.emit_vm_flush = vce_v3_0_emit_vm_flush,
.emit_pipeline_sync = vce_v3_0_emit_pipeline_sync,
@@ -881,8 +878,6 @@ static const struct amdgpu_ring_funcs vce_v3_0_ring_vm_funcs = {
.pad_ib = amdgpu_ring_generic_pad_ib,
.begin_use = amdgpu_vce_ring_begin_use,
.end_use = amdgpu_vce_ring_end_use,
- .get_emit_ib_size = vce_v3_0_ring_get_emit_ib_size,
- .get_dma_frame_size = vce_v3_0_ring_get_dma_frame_size_vm,
};
static void vce_v3_0_set_ring_funcs(struct amdgpu_device *adev)
@@ -910,3 +905,30 @@ static void vce_v3_0_set_irq_funcs(struct amdgpu_device *adev)
adev->vce.irq.num_types = 1;
adev->vce.irq.funcs = &vce_v3_0_irq_funcs;
};
+
+const struct amdgpu_ip_block_version vce_v3_0_ip_block =
+{
+ .type = AMD_IP_BLOCK_TYPE_VCE,
+ .major = 3,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &vce_v3_0_ip_funcs,
+};
+
+const struct amdgpu_ip_block_version vce_v3_1_ip_block =
+{
+ .type = AMD_IP_BLOCK_TYPE_VCE,
+ .major = 3,
+ .minor = 1,
+ .rev = 0,
+ .funcs = &vce_v3_0_ip_funcs,
+};
+
+const struct amdgpu_ip_block_version vce_v3_4_ip_block =
+{
+ .type = AMD_IP_BLOCK_TYPE_VCE,
+ .major = 3,
+ .minor = 4,
+ .rev = 0,
+ .funcs = &vce_v3_0_ip_funcs,
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.h b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.h
index b45af65da81f..08b908c7de0f 100644
--- a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.h
+++ b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.h
@@ -24,6 +24,8 @@
#ifndef __VCE_V3_0_H__
#define __VCE_V3_0_H__
-extern const struct amd_ip_funcs vce_v3_0_ip_funcs;
+extern const struct amdgpu_ip_block_version vce_v3_0_ip_block;
+extern const struct amdgpu_ip_block_version vce_v3_1_ip_block;
+extern const struct amdgpu_ip_block_version vce_v3_4_ip_block;
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/vi.c b/drivers/gpu/drm/amd/amdgpu/vi.c
index c0d9aad7126f..0b21e7beda91 100644
--- a/drivers/gpu/drm/amd/amdgpu/vi.c
+++ b/drivers/gpu/drm/amd/amdgpu/vi.c
@@ -80,7 +80,9 @@
#include "dce_virtual.h"
MODULE_FIRMWARE("amdgpu/topaz_smc.bin");
+MODULE_FIRMWARE("amdgpu/topaz_k_smc.bin");
MODULE_FIRMWARE("amdgpu/tonga_smc.bin");
+MODULE_FIRMWARE("amdgpu/tonga_k_smc.bin");
MODULE_FIRMWARE("amdgpu/fiji_smc.bin");
MODULE_FIRMWARE("amdgpu/polaris10_smc.bin");
MODULE_FIRMWARE("amdgpu/polaris10_smc_sk.bin");
@@ -121,8 +123,8 @@ static u32 vi_smc_rreg(struct amdgpu_device *adev, u32 reg)
u32 r;
spin_lock_irqsave(&adev->smc_idx_lock, flags);
- WREG32(mmSMC_IND_INDEX_0, (reg));
- r = RREG32(mmSMC_IND_DATA_0);
+ WREG32(mmSMC_IND_INDEX_11, (reg));
+ r = RREG32(mmSMC_IND_DATA_11);
spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
return r;
}
@@ -132,8 +134,8 @@ static void vi_smc_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
unsigned long flags;
spin_lock_irqsave(&adev->smc_idx_lock, flags);
- WREG32(mmSMC_IND_INDEX_0, (reg));
- WREG32(mmSMC_IND_DATA_0, (v));
+ WREG32(mmSMC_IND_INDEX_11, (reg));
+ WREG32(mmSMC_IND_DATA_11, (v));
spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
}
@@ -437,12 +439,12 @@ static bool vi_read_bios_from_rom(struct amdgpu_device *adev,
/* take the smc lock since we are using the smc index */
spin_lock_irqsave(&adev->smc_idx_lock, flags);
/* set rom index to 0 */
- WREG32(mmSMC_IND_INDEX_0, ixROM_INDEX);
- WREG32(mmSMC_IND_DATA_0, 0);
+ WREG32(mmSMC_IND_INDEX_11, ixROM_INDEX);
+ WREG32(mmSMC_IND_DATA_11, 0);
/* set index to data for continous read */
- WREG32(mmSMC_IND_INDEX_0, ixROM_DATA);
+ WREG32(mmSMC_IND_INDEX_11, ixROM_DATA);
for (i = 0; i < length_dw; i++)
- dw_ptr[i] = RREG32(mmSMC_IND_DATA_0);
+ dw_ptr[i] = RREG32(mmSMC_IND_DATA_11);
spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
return true;
@@ -556,21 +558,100 @@ static const struct amdgpu_allowed_register_entry vi_allowed_read_registers[] =
{mmPA_SC_RASTER_CONFIG_1, false, true},
};
-static uint32_t vi_read_indexed_register(struct amdgpu_device *adev, u32 se_num,
- u32 sh_num, u32 reg_offset)
-{
- uint32_t val;
+static uint32_t vi_get_register_value(struct amdgpu_device *adev,
+ bool indexed, u32 se_num,
+ u32 sh_num, u32 reg_offset)
+{
+ if (indexed) {
+ uint32_t val;
+ unsigned se_idx = (se_num == 0xffffffff) ? 0 : se_num;
+ unsigned sh_idx = (sh_num == 0xffffffff) ? 0 : sh_num;
+
+ switch (reg_offset) {
+ case mmCC_RB_BACKEND_DISABLE:
+ return adev->gfx.config.rb_config[se_idx][sh_idx].rb_backend_disable;
+ case mmGC_USER_RB_BACKEND_DISABLE:
+ return adev->gfx.config.rb_config[se_idx][sh_idx].user_rb_backend_disable;
+ case mmPA_SC_RASTER_CONFIG:
+ return adev->gfx.config.rb_config[se_idx][sh_idx].raster_config;
+ case mmPA_SC_RASTER_CONFIG_1:
+ return adev->gfx.config.rb_config[se_idx][sh_idx].raster_config_1;
+ }
- mutex_lock(&adev->grbm_idx_mutex);
- if (se_num != 0xffffffff || sh_num != 0xffffffff)
- amdgpu_gfx_select_se_sh(adev, se_num, sh_num, 0xffffffff);
+ mutex_lock(&adev->grbm_idx_mutex);
+ if (se_num != 0xffffffff || sh_num != 0xffffffff)
+ amdgpu_gfx_select_se_sh(adev, se_num, sh_num, 0xffffffff);
- val = RREG32(reg_offset);
+ val = RREG32(reg_offset);
- if (se_num != 0xffffffff || sh_num != 0xffffffff)
- amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
- mutex_unlock(&adev->grbm_idx_mutex);
- return val;
+ if (se_num != 0xffffffff || sh_num != 0xffffffff)
+ amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
+ mutex_unlock(&adev->grbm_idx_mutex);
+ return val;
+ } else {
+ unsigned idx;
+
+ switch (reg_offset) {
+ case mmGB_ADDR_CONFIG:
+ return adev->gfx.config.gb_addr_config;
+ case mmMC_ARB_RAMCFG:
+ return adev->gfx.config.mc_arb_ramcfg;
+ case mmGB_TILE_MODE0:
+ case mmGB_TILE_MODE1:
+ case mmGB_TILE_MODE2:
+ case mmGB_TILE_MODE3:
+ case mmGB_TILE_MODE4:
+ case mmGB_TILE_MODE5:
+ case mmGB_TILE_MODE6:
+ case mmGB_TILE_MODE7:
+ case mmGB_TILE_MODE8:
+ case mmGB_TILE_MODE9:
+ case mmGB_TILE_MODE10:
+ case mmGB_TILE_MODE11:
+ case mmGB_TILE_MODE12:
+ case mmGB_TILE_MODE13:
+ case mmGB_TILE_MODE14:
+ case mmGB_TILE_MODE15:
+ case mmGB_TILE_MODE16:
+ case mmGB_TILE_MODE17:
+ case mmGB_TILE_MODE18:
+ case mmGB_TILE_MODE19:
+ case mmGB_TILE_MODE20:
+ case mmGB_TILE_MODE21:
+ case mmGB_TILE_MODE22:
+ case mmGB_TILE_MODE23:
+ case mmGB_TILE_MODE24:
+ case mmGB_TILE_MODE25:
+ case mmGB_TILE_MODE26:
+ case mmGB_TILE_MODE27:
+ case mmGB_TILE_MODE28:
+ case mmGB_TILE_MODE29:
+ case mmGB_TILE_MODE30:
+ case mmGB_TILE_MODE31:
+ idx = (reg_offset - mmGB_TILE_MODE0);
+ return adev->gfx.config.tile_mode_array[idx];
+ case mmGB_MACROTILE_MODE0:
+ case mmGB_MACROTILE_MODE1:
+ case mmGB_MACROTILE_MODE2:
+ case mmGB_MACROTILE_MODE3:
+ case mmGB_MACROTILE_MODE4:
+ case mmGB_MACROTILE_MODE5:
+ case mmGB_MACROTILE_MODE6:
+ case mmGB_MACROTILE_MODE7:
+ case mmGB_MACROTILE_MODE8:
+ case mmGB_MACROTILE_MODE9:
+ case mmGB_MACROTILE_MODE10:
+ case mmGB_MACROTILE_MODE11:
+ case mmGB_MACROTILE_MODE12:
+ case mmGB_MACROTILE_MODE13:
+ case mmGB_MACROTILE_MODE14:
+ case mmGB_MACROTILE_MODE15:
+ idx = (reg_offset - mmGB_MACROTILE_MODE0);
+ return adev->gfx.config.macrotile_mode_array[idx];
+ default:
+ return RREG32(reg_offset);
+ }
+ }
}
static int vi_read_register(struct amdgpu_device *adev, u32 se_num,
@@ -605,10 +686,9 @@ static int vi_read_register(struct amdgpu_device *adev, u32 se_num,
if (reg_offset != asic_register_entry->reg_offset)
continue;
if (!asic_register_entry->untouched)
- *value = asic_register_entry->grbm_indexed ?
- vi_read_indexed_register(adev, se_num,
- sh_num, reg_offset) :
- RREG32(reg_offset);
+ *value = vi_get_register_value(adev,
+ asic_register_entry->grbm_indexed,
+ se_num, sh_num, reg_offset);
return 0;
}
}
@@ -618,10 +698,9 @@ static int vi_read_register(struct amdgpu_device *adev, u32 se_num,
continue;
if (!vi_allowed_read_registers[i].untouched)
- *value = vi_allowed_read_registers[i].grbm_indexed ?
- vi_read_indexed_register(adev, se_num,
- sh_num, reg_offset) :
- RREG32(reg_offset);
+ *value = vi_get_register_value(adev,
+ vi_allowed_read_registers[i].grbm_indexed,
+ se_num, sh_num, reg_offset);
return 0;
}
return -EINVAL;
@@ -652,18 +731,6 @@ static int vi_gpu_pci_config_reset(struct amdgpu_device *adev)
return -EINVAL;
}
-static void vi_set_bios_scratch_engine_hung(struct amdgpu_device *adev, bool hung)
-{
- u32 tmp = RREG32(mmBIOS_SCRATCH_3);
-
- if (hung)
- tmp |= ATOM_S3_ASIC_GUI_ENGINE_HUNG;
- else
- tmp &= ~ATOM_S3_ASIC_GUI_ENGINE_HUNG;
-
- WREG32(mmBIOS_SCRATCH_3, tmp);
-}
-
/**
* vi_asic_reset - soft reset GPU
*
@@ -677,11 +744,11 @@ static int vi_asic_reset(struct amdgpu_device *adev)
{
int r;
- vi_set_bios_scratch_engine_hung(adev, true);
+ amdgpu_atombios_scratch_regs_engine_hung(adev, true);
r = vi_gpu_pci_config_reset(adev);
- vi_set_bios_scratch_engine_hung(adev, false);
+ amdgpu_atombios_scratch_regs_engine_hung(adev, false);
return r;
}
@@ -781,734 +848,6 @@ static void vi_enable_doorbell_aperture(struct amdgpu_device *adev,
WREG32(mmBIF_DOORBELL_APER_EN, tmp);
}
-/* topaz has no DCE, UVD, VCE */
-static const struct amdgpu_ip_block_version topaz_ip_blocks[] =
-{
- /* ORDER MATTERS! */
- {
- .type = AMD_IP_BLOCK_TYPE_COMMON,
- .major = 2,
- .minor = 0,
- .rev = 0,
- .funcs = &vi_common_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_GMC,
- .major = 7,
- .minor = 4,
- .rev = 0,
- .funcs = &gmc_v7_0_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_IH,
- .major = 2,
- .minor = 4,
- .rev = 0,
- .funcs = &iceland_ih_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_SMC,
- .major = 7,
- .minor = 1,
- .rev = 0,
- .funcs = &amdgpu_pp_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_GFX,
- .major = 8,
- .minor = 0,
- .rev = 0,
- .funcs = &gfx_v8_0_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_SDMA,
- .major = 2,
- .minor = 4,
- .rev = 0,
- .funcs = &sdma_v2_4_ip_funcs,
- },
-};
-
-static const struct amdgpu_ip_block_version topaz_ip_blocks_vd[] =
-{
- /* ORDER MATTERS! */
- {
- .type = AMD_IP_BLOCK_TYPE_COMMON,
- .major = 2,
- .minor = 0,
- .rev = 0,
- .funcs = &vi_common_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_GMC,
- .major = 7,
- .minor = 4,
- .rev = 0,
- .funcs = &gmc_v7_0_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_IH,
- .major = 2,
- .minor = 4,
- .rev = 0,
- .funcs = &iceland_ih_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_SMC,
- .major = 7,
- .minor = 1,
- .rev = 0,
- .funcs = &amdgpu_pp_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_DCE,
- .major = 1,
- .minor = 0,
- .rev = 0,
- .funcs = &dce_virtual_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_GFX,
- .major = 8,
- .minor = 0,
- .rev = 0,
- .funcs = &gfx_v8_0_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_SDMA,
- .major = 2,
- .minor = 4,
- .rev = 0,
- .funcs = &sdma_v2_4_ip_funcs,
- },
-};
-
-static const struct amdgpu_ip_block_version tonga_ip_blocks[] =
-{
- /* ORDER MATTERS! */
- {
- .type = AMD_IP_BLOCK_TYPE_COMMON,
- .major = 2,
- .minor = 0,
- .rev = 0,
- .funcs = &vi_common_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_GMC,
- .major = 8,
- .minor = 0,
- .rev = 0,
- .funcs = &gmc_v8_0_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_IH,
- .major = 3,
- .minor = 0,
- .rev = 0,
- .funcs = &tonga_ih_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_SMC,
- .major = 7,
- .minor = 1,
- .rev = 0,
- .funcs = &amdgpu_pp_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_DCE,
- .major = 10,
- .minor = 0,
- .rev = 0,
- .funcs = &dce_v10_0_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_GFX,
- .major = 8,
- .minor = 0,
- .rev = 0,
- .funcs = &gfx_v8_0_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_SDMA,
- .major = 3,
- .minor = 0,
- .rev = 0,
- .funcs = &sdma_v3_0_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_UVD,
- .major = 5,
- .minor = 0,
- .rev = 0,
- .funcs = &uvd_v5_0_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_VCE,
- .major = 3,
- .minor = 0,
- .rev = 0,
- .funcs = &vce_v3_0_ip_funcs,
- },
-};
-
-static const struct amdgpu_ip_block_version tonga_ip_blocks_vd[] =
-{
- /* ORDER MATTERS! */
- {
- .type = AMD_IP_BLOCK_TYPE_COMMON,
- .major = 2,
- .minor = 0,
- .rev = 0,
- .funcs = &vi_common_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_GMC,
- .major = 8,
- .minor = 0,
- .rev = 0,
- .funcs = &gmc_v8_0_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_IH,
- .major = 3,
- .minor = 0,
- .rev = 0,
- .funcs = &tonga_ih_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_SMC,
- .major = 7,
- .minor = 1,
- .rev = 0,
- .funcs = &amdgpu_pp_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_DCE,
- .major = 10,
- .minor = 0,
- .rev = 0,
- .funcs = &dce_virtual_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_GFX,
- .major = 8,
- .minor = 0,
- .rev = 0,
- .funcs = &gfx_v8_0_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_SDMA,
- .major = 3,
- .minor = 0,
- .rev = 0,
- .funcs = &sdma_v3_0_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_UVD,
- .major = 5,
- .minor = 0,
- .rev = 0,
- .funcs = &uvd_v5_0_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_VCE,
- .major = 3,
- .minor = 0,
- .rev = 0,
- .funcs = &vce_v3_0_ip_funcs,
- },
-};
-
-static const struct amdgpu_ip_block_version fiji_ip_blocks[] =
-{
- /* ORDER MATTERS! */
- {
- .type = AMD_IP_BLOCK_TYPE_COMMON,
- .major = 2,
- .minor = 0,
- .rev = 0,
- .funcs = &vi_common_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_GMC,
- .major = 8,
- .minor = 5,
- .rev = 0,
- .funcs = &gmc_v8_0_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_IH,
- .major = 3,
- .minor = 0,
- .rev = 0,
- .funcs = &tonga_ih_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_SMC,
- .major = 7,
- .minor = 1,
- .rev = 0,
- .funcs = &amdgpu_pp_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_DCE,
- .major = 10,
- .minor = 1,
- .rev = 0,
- .funcs = &dce_v10_0_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_GFX,
- .major = 8,
- .minor = 0,
- .rev = 0,
- .funcs = &gfx_v8_0_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_SDMA,
- .major = 3,
- .minor = 0,
- .rev = 0,
- .funcs = &sdma_v3_0_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_UVD,
- .major = 6,
- .minor = 0,
- .rev = 0,
- .funcs = &uvd_v6_0_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_VCE,
- .major = 3,
- .minor = 0,
- .rev = 0,
- .funcs = &vce_v3_0_ip_funcs,
- },
-};
-
-static const struct amdgpu_ip_block_version fiji_ip_blocks_vd[] =
-{
- /* ORDER MATTERS! */
- {
- .type = AMD_IP_BLOCK_TYPE_COMMON,
- .major = 2,
- .minor = 0,
- .rev = 0,
- .funcs = &vi_common_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_GMC,
- .major = 8,
- .minor = 5,
- .rev = 0,
- .funcs = &gmc_v8_0_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_IH,
- .major = 3,
- .minor = 0,
- .rev = 0,
- .funcs = &tonga_ih_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_SMC,
- .major = 7,
- .minor = 1,
- .rev = 0,
- .funcs = &amdgpu_pp_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_DCE,
- .major = 10,
- .minor = 1,
- .rev = 0,
- .funcs = &dce_virtual_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_GFX,
- .major = 8,
- .minor = 0,
- .rev = 0,
- .funcs = &gfx_v8_0_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_SDMA,
- .major = 3,
- .minor = 0,
- .rev = 0,
- .funcs = &sdma_v3_0_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_UVD,
- .major = 6,
- .minor = 0,
- .rev = 0,
- .funcs = &uvd_v6_0_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_VCE,
- .major = 3,
- .minor = 0,
- .rev = 0,
- .funcs = &vce_v3_0_ip_funcs,
- },
-};
-
-static const struct amdgpu_ip_block_version polaris11_ip_blocks[] =
-{
- /* ORDER MATTERS! */
- {
- .type = AMD_IP_BLOCK_TYPE_COMMON,
- .major = 2,
- .minor = 0,
- .rev = 0,
- .funcs = &vi_common_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_GMC,
- .major = 8,
- .minor = 1,
- .rev = 0,
- .funcs = &gmc_v8_0_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_IH,
- .major = 3,
- .minor = 1,
- .rev = 0,
- .funcs = &tonga_ih_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_SMC,
- .major = 7,
- .minor = 2,
- .rev = 0,
- .funcs = &amdgpu_pp_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_DCE,
- .major = 11,
- .minor = 2,
- .rev = 0,
- .funcs = &dce_v11_0_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_GFX,
- .major = 8,
- .minor = 0,
- .rev = 0,
- .funcs = &gfx_v8_0_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_SDMA,
- .major = 3,
- .minor = 1,
- .rev = 0,
- .funcs = &sdma_v3_0_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_UVD,
- .major = 6,
- .minor = 3,
- .rev = 0,
- .funcs = &uvd_v6_0_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_VCE,
- .major = 3,
- .minor = 4,
- .rev = 0,
- .funcs = &vce_v3_0_ip_funcs,
- },
-};
-
-static const struct amdgpu_ip_block_version polaris11_ip_blocks_vd[] =
-{
- /* ORDER MATTERS! */
- {
- .type = AMD_IP_BLOCK_TYPE_COMMON,
- .major = 2,
- .minor = 0,
- .rev = 0,
- .funcs = &vi_common_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_GMC,
- .major = 8,
- .minor = 1,
- .rev = 0,
- .funcs = &gmc_v8_0_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_IH,
- .major = 3,
- .minor = 1,
- .rev = 0,
- .funcs = &tonga_ih_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_SMC,
- .major = 7,
- .minor = 2,
- .rev = 0,
- .funcs = &amdgpu_pp_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_DCE,
- .major = 11,
- .minor = 2,
- .rev = 0,
- .funcs = &dce_virtual_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_GFX,
- .major = 8,
- .minor = 0,
- .rev = 0,
- .funcs = &gfx_v8_0_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_SDMA,
- .major = 3,
- .minor = 1,
- .rev = 0,
- .funcs = &sdma_v3_0_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_UVD,
- .major = 6,
- .minor = 3,
- .rev = 0,
- .funcs = &uvd_v6_0_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_VCE,
- .major = 3,
- .minor = 4,
- .rev = 0,
- .funcs = &vce_v3_0_ip_funcs,
- },
-};
-
-static const struct amdgpu_ip_block_version cz_ip_blocks[] =
-{
- /* ORDER MATTERS! */
- {
- .type = AMD_IP_BLOCK_TYPE_COMMON,
- .major = 2,
- .minor = 0,
- .rev = 0,
- .funcs = &vi_common_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_GMC,
- .major = 8,
- .minor = 0,
- .rev = 0,
- .funcs = &gmc_v8_0_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_IH,
- .major = 3,
- .minor = 0,
- .rev = 0,
- .funcs = &cz_ih_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_SMC,
- .major = 8,
- .minor = 0,
- .rev = 0,
- .funcs = &amdgpu_pp_ip_funcs
- },
- {
- .type = AMD_IP_BLOCK_TYPE_DCE,
- .major = 11,
- .minor = 0,
- .rev = 0,
- .funcs = &dce_v11_0_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_GFX,
- .major = 8,
- .minor = 0,
- .rev = 0,
- .funcs = &gfx_v8_0_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_SDMA,
- .major = 3,
- .minor = 0,
- .rev = 0,
- .funcs = &sdma_v3_0_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_UVD,
- .major = 6,
- .minor = 0,
- .rev = 0,
- .funcs = &uvd_v6_0_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_VCE,
- .major = 3,
- .minor = 0,
- .rev = 0,
- .funcs = &vce_v3_0_ip_funcs,
- },
-#if defined(CONFIG_DRM_AMD_ACP)
- {
- .type = AMD_IP_BLOCK_TYPE_ACP,
- .major = 2,
- .minor = 2,
- .rev = 0,
- .funcs = &acp_ip_funcs,
- },
-#endif
-};
-
-static const struct amdgpu_ip_block_version cz_ip_blocks_vd[] =
-{
- /* ORDER MATTERS! */
- {
- .type = AMD_IP_BLOCK_TYPE_COMMON,
- .major = 2,
- .minor = 0,
- .rev = 0,
- .funcs = &vi_common_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_GMC,
- .major = 8,
- .minor = 0,
- .rev = 0,
- .funcs = &gmc_v8_0_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_IH,
- .major = 3,
- .minor = 0,
- .rev = 0,
- .funcs = &cz_ih_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_SMC,
- .major = 8,
- .minor = 0,
- .rev = 0,
- .funcs = &amdgpu_pp_ip_funcs
- },
- {
- .type = AMD_IP_BLOCK_TYPE_DCE,
- .major = 11,
- .minor = 0,
- .rev = 0,
- .funcs = &dce_virtual_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_GFX,
- .major = 8,
- .minor = 0,
- .rev = 0,
- .funcs = &gfx_v8_0_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_SDMA,
- .major = 3,
- .minor = 0,
- .rev = 0,
- .funcs = &sdma_v3_0_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_UVD,
- .major = 6,
- .minor = 0,
- .rev = 0,
- .funcs = &uvd_v6_0_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_VCE,
- .major = 3,
- .minor = 0,
- .rev = 0,
- .funcs = &vce_v3_0_ip_funcs,
- },
-#if defined(CONFIG_DRM_AMD_ACP)
- {
- .type = AMD_IP_BLOCK_TYPE_ACP,
- .major = 2,
- .minor = 2,
- .rev = 0,
- .funcs = &acp_ip_funcs,
- },
-#endif
-};
-
-int vi_set_ip_blocks(struct amdgpu_device *adev)
-{
- if (adev->enable_virtual_display) {
- switch (adev->asic_type) {
- case CHIP_TOPAZ:
- adev->ip_blocks = topaz_ip_blocks_vd;
- adev->num_ip_blocks = ARRAY_SIZE(topaz_ip_blocks_vd);
- break;
- case CHIP_FIJI:
- adev->ip_blocks = fiji_ip_blocks_vd;
- adev->num_ip_blocks = ARRAY_SIZE(fiji_ip_blocks_vd);
- break;
- case CHIP_TONGA:
- adev->ip_blocks = tonga_ip_blocks_vd;
- adev->num_ip_blocks = ARRAY_SIZE(tonga_ip_blocks_vd);
- break;
- case CHIP_POLARIS11:
- case CHIP_POLARIS10:
- adev->ip_blocks = polaris11_ip_blocks_vd;
- adev->num_ip_blocks = ARRAY_SIZE(polaris11_ip_blocks_vd);
- break;
-
- case CHIP_CARRIZO:
- case CHIP_STONEY:
- adev->ip_blocks = cz_ip_blocks_vd;
- adev->num_ip_blocks = ARRAY_SIZE(cz_ip_blocks_vd);
- break;
- default:
- /* FIXME: not supported yet */
- return -EINVAL;
- }
- } else {
- switch (adev->asic_type) {
- case CHIP_TOPAZ:
- adev->ip_blocks = topaz_ip_blocks;
- adev->num_ip_blocks = ARRAY_SIZE(topaz_ip_blocks);
- break;
- case CHIP_FIJI:
- adev->ip_blocks = fiji_ip_blocks;
- adev->num_ip_blocks = ARRAY_SIZE(fiji_ip_blocks);
- break;
- case CHIP_TONGA:
- adev->ip_blocks = tonga_ip_blocks;
- adev->num_ip_blocks = ARRAY_SIZE(tonga_ip_blocks);
- break;
- case CHIP_POLARIS11:
- case CHIP_POLARIS10:
- adev->ip_blocks = polaris11_ip_blocks;
- adev->num_ip_blocks = ARRAY_SIZE(polaris11_ip_blocks);
- break;
- case CHIP_CARRIZO:
- case CHIP_STONEY:
- adev->ip_blocks = cz_ip_blocks;
- adev->num_ip_blocks = ARRAY_SIZE(cz_ip_blocks);
- break;
- default:
- /* FIXME: not supported yet */
- return -EINVAL;
- }
- }
-
- return 0;
-}
-
#define ATI_REV_ID_FUSE_MACRO__ADDRESS 0xC0014044
#define ATI_REV_ID_FUSE_MACRO__SHIFT 9
#define ATI_REV_ID_FUSE_MACRO__MASK 0x00001E00
@@ -1593,7 +932,7 @@ static int vi_common_early_init(void *handle)
break;
case CHIP_TONGA:
adev->cg_flags = AMD_CG_SUPPORT_UVD_MGCG;
- adev->pg_flags = 0;
+ adev->pg_flags = AMD_PG_SUPPORT_UVD;
adev->external_rev_id = adev->rev_id + 0x14;
break;
case CHIP_POLARIS11:
@@ -1651,7 +990,7 @@ static int vi_common_early_init(void *handle)
AMD_CG_SUPPORT_SDMA_MGCG |
AMD_CG_SUPPORT_SDMA_LS |
AMD_CG_SUPPORT_VCE_MGCG;
- adev->pg_flags |= AMD_PG_SUPPORT_GFX_PG |
+ adev->pg_flags = AMD_PG_SUPPORT_GFX_PG |
AMD_PG_SUPPORT_GFX_SMG |
AMD_PG_SUPPORT_GFX_PIPELINE |
AMD_PG_SUPPORT_UVD |
@@ -1908,7 +1247,7 @@ static int vi_common_set_powergating_state(void *handle,
return 0;
}
-const struct amd_ip_funcs vi_common_ip_funcs = {
+static const struct amd_ip_funcs vi_common_ip_funcs = {
.name = "vi_common",
.early_init = vi_common_early_init,
.late_init = NULL,
@@ -1925,3 +1264,110 @@ const struct amd_ip_funcs vi_common_ip_funcs = {
.set_powergating_state = vi_common_set_powergating_state,
};
+static const struct amdgpu_ip_block_version vi_common_ip_block =
+{
+ .type = AMD_IP_BLOCK_TYPE_COMMON,
+ .major = 1,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &vi_common_ip_funcs,
+};
+
+int vi_set_ip_blocks(struct amdgpu_device *adev)
+{
+ switch (adev->asic_type) {
+ case CHIP_TOPAZ:
+ /* topaz has no DCE, UVD, VCE */
+ amdgpu_ip_block_add(adev, &vi_common_ip_block);
+ amdgpu_ip_block_add(adev, &gmc_v7_4_ip_block);
+ amdgpu_ip_block_add(adev, &iceland_ih_ip_block);
+ amdgpu_ip_block_add(adev, &amdgpu_pp_ip_block);
+ if (adev->enable_virtual_display)
+ amdgpu_ip_block_add(adev, &dce_virtual_ip_block);
+ amdgpu_ip_block_add(adev, &gfx_v8_0_ip_block);
+ amdgpu_ip_block_add(adev, &sdma_v2_4_ip_block);
+ break;
+ case CHIP_FIJI:
+ amdgpu_ip_block_add(adev, &vi_common_ip_block);
+ amdgpu_ip_block_add(adev, &gmc_v8_5_ip_block);
+ amdgpu_ip_block_add(adev, &tonga_ih_ip_block);
+ amdgpu_ip_block_add(adev, &amdgpu_pp_ip_block);
+ if (adev->enable_virtual_display)
+ amdgpu_ip_block_add(adev, &dce_virtual_ip_block);
+ else
+ amdgpu_ip_block_add(adev, &dce_v10_1_ip_block);
+ amdgpu_ip_block_add(adev, &gfx_v8_0_ip_block);
+ amdgpu_ip_block_add(adev, &sdma_v3_0_ip_block);
+ amdgpu_ip_block_add(adev, &uvd_v6_0_ip_block);
+ amdgpu_ip_block_add(adev, &vce_v3_0_ip_block);
+ break;
+ case CHIP_TONGA:
+ amdgpu_ip_block_add(adev, &vi_common_ip_block);
+ amdgpu_ip_block_add(adev, &gmc_v8_0_ip_block);
+ amdgpu_ip_block_add(adev, &tonga_ih_ip_block);
+ amdgpu_ip_block_add(adev, &amdgpu_pp_ip_block);
+ if (adev->enable_virtual_display)
+ amdgpu_ip_block_add(adev, &dce_virtual_ip_block);
+ else
+ amdgpu_ip_block_add(adev, &dce_v10_0_ip_block);
+ amdgpu_ip_block_add(adev, &gfx_v8_0_ip_block);
+ amdgpu_ip_block_add(adev, &sdma_v3_0_ip_block);
+ amdgpu_ip_block_add(adev, &uvd_v5_0_ip_block);
+ amdgpu_ip_block_add(adev, &vce_v3_0_ip_block);
+ break;
+ case CHIP_POLARIS11:
+ case CHIP_POLARIS10:
+ amdgpu_ip_block_add(adev, &vi_common_ip_block);
+ amdgpu_ip_block_add(adev, &gmc_v8_1_ip_block);
+ amdgpu_ip_block_add(adev, &tonga_ih_ip_block);
+ amdgpu_ip_block_add(adev, &amdgpu_pp_ip_block);
+ if (adev->enable_virtual_display)
+ amdgpu_ip_block_add(adev, &dce_virtual_ip_block);
+ else
+ amdgpu_ip_block_add(adev, &dce_v11_2_ip_block);
+ amdgpu_ip_block_add(adev, &gfx_v8_0_ip_block);
+ amdgpu_ip_block_add(adev, &sdma_v3_1_ip_block);
+ amdgpu_ip_block_add(adev, &uvd_v6_3_ip_block);
+ amdgpu_ip_block_add(adev, &vce_v3_4_ip_block);
+ break;
+ case CHIP_CARRIZO:
+ amdgpu_ip_block_add(adev, &vi_common_ip_block);
+ amdgpu_ip_block_add(adev, &gmc_v8_0_ip_block);
+ amdgpu_ip_block_add(adev, &cz_ih_ip_block);
+ amdgpu_ip_block_add(adev, &amdgpu_pp_ip_block);
+ if (adev->enable_virtual_display)
+ amdgpu_ip_block_add(adev, &dce_virtual_ip_block);
+ else
+ amdgpu_ip_block_add(adev, &dce_v11_0_ip_block);
+ amdgpu_ip_block_add(adev, &gfx_v8_0_ip_block);
+ amdgpu_ip_block_add(adev, &sdma_v3_0_ip_block);
+ amdgpu_ip_block_add(adev, &uvd_v6_0_ip_block);
+ amdgpu_ip_block_add(adev, &vce_v3_1_ip_block);
+#if defined(CONFIG_DRM_AMD_ACP)
+ amdgpu_ip_block_add(adev, &acp_ip_block);
+#endif
+ break;
+ case CHIP_STONEY:
+ amdgpu_ip_block_add(adev, &vi_common_ip_block);
+ amdgpu_ip_block_add(adev, &gmc_v8_0_ip_block);
+ amdgpu_ip_block_add(adev, &cz_ih_ip_block);
+ amdgpu_ip_block_add(adev, &amdgpu_pp_ip_block);
+ if (adev->enable_virtual_display)
+ amdgpu_ip_block_add(adev, &dce_virtual_ip_block);
+ else
+ amdgpu_ip_block_add(adev, &dce_v11_0_ip_block);
+ amdgpu_ip_block_add(adev, &gfx_v8_1_ip_block);
+ amdgpu_ip_block_add(adev, &sdma_v3_0_ip_block);
+ amdgpu_ip_block_add(adev, &uvd_v6_2_ip_block);
+ amdgpu_ip_block_add(adev, &vce_v3_4_ip_block);
+#if defined(CONFIG_DRM_AMD_ACP)
+ amdgpu_ip_block_add(adev, &acp_ip_block);
+#endif
+ break;
+ default:
+ /* FIXME: not supported yet */
+ return -EINVAL;
+ }
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/vi.h b/drivers/gpu/drm/amd/amdgpu/vi.h
index 502094042462..575d7aed5d32 100644
--- a/drivers/gpu/drm/amd/amdgpu/vi.h
+++ b/drivers/gpu/drm/amd/amdgpu/vi.h
@@ -24,8 +24,6 @@
#ifndef __VI_H__
#define __VI_H__
-extern const struct amd_ip_funcs vi_common_ip_funcs;
-
void vi_srbm_select(struct amdgpu_device *adev,
u32 me, u32 pipe, u32 queue, u32 vmid);
int vi_set_ip_blocks(struct amdgpu_device *adev);
diff --git a/drivers/gpu/drm/amd/include/amd_shared.h b/drivers/gpu/drm/amd/include/amd_shared.h
index bec8125bceb0..d1986276dbbd 100644
--- a/drivers/gpu/drm/amd/include/amd_shared.h
+++ b/drivers/gpu/drm/amd/include/amd_shared.h
@@ -84,6 +84,29 @@ enum amd_powergating_state {
AMD_PG_STATE_UNGATE,
};
+struct amd_vce_state {
+ /* vce clocks */
+ u32 evclk;
+ u32 ecclk;
+ /* gpu clocks */
+ u32 sclk;
+ u32 mclk;
+ u8 clk_idx;
+ u8 pstate;
+};
+
+
+#define AMD_MAX_VCE_LEVELS 6
+
+enum amd_vce_level {
+ AMD_VCE_LEVEL_AC_ALL = 0, /* AC, All cases */
+ AMD_VCE_LEVEL_DC_EE = 1, /* DC, entropy encoding */
+ AMD_VCE_LEVEL_DC_LL_LOW = 2, /* DC, low latency queue, res <= 720 */
+ AMD_VCE_LEVEL_DC_LL_HIGH = 3, /* DC, low latency queue, 1080 >= res > 720 */
+ AMD_VCE_LEVEL_DC_GP_LOW = 4, /* DC, general purpose queue, res <= 720 */
+ AMD_VCE_LEVEL_DC_GP_HIGH = 5, /* DC, general purpose queue, 1080 >= res > 720 */
+};
+
/* CG flags */
#define AMD_CG_SUPPORT_GFX_MGCG (1 << 0)
#define AMD_CG_SUPPORT_GFX_MGLS (1 << 1)
diff --git a/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_1_d.h b/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_1_d.h
index 3014d4a58c43..a9ef1562f43b 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_1_d.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_1_d.h
@@ -176,6 +176,8 @@
#define mmSMU1_SMU_SMC_IND_DATA 0x83
#define mmSMU2_SMU_SMC_IND_DATA 0x85
#define mmSMU3_SMU_SMC_IND_DATA 0x87
+#define mmSMC_IND_INDEX_11 0x1AC
+#define mmSMC_IND_DATA_11 0x1AD
#define ixRCU_UC_EVENTS 0xc0000004
#define ixRCU_MISC_CTRL 0xc0000010
#define ixCC_RCU_FUSES 0xc00c0000
diff --git a/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_2_d.h b/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_2_d.h
index 933917479985..22dd4c2b7290 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_2_d.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_2_d.h
@@ -87,6 +87,8 @@
#define mmSMC_IND_DATA_6 0x8d
#define mmSMC_IND_INDEX_7 0x8e
#define mmSMC_IND_DATA_7 0x8f
+#define mmSMC_IND_INDEX_11 0x1AC
+#define mmSMC_IND_DATA_11 0x1AD
#define mmSMC_IND_ACCESS_CNTL 0x92
#define mmSMC_MESSAGE_0 0x94
#define mmSMC_RESP_0 0x95
diff --git a/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_3_d.h b/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_3_d.h
index 44b1855cb8df..eca2b851f25f 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_3_d.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_3_d.h
@@ -90,6 +90,8 @@
#define mmSMC_IND_DATA_6 0x8d
#define mmSMC_IND_INDEX_7 0x8e
#define mmSMC_IND_DATA_7 0x8f
+#define mmSMC_IND_INDEX_11 0x1AC
+#define mmSMC_IND_DATA_11 0x1AD
#define mmSMC_IND_ACCESS_CNTL 0x92
#define mmSMC_MESSAGE_0 0x94
#define mmSMC_RESP_0 0x95
diff --git a/drivers/gpu/drm/amd/include/cgs_common.h b/drivers/gpu/drm/amd/include/cgs_common.h
index df7c18b6a02a..e4a1697ec1d3 100755
--- a/drivers/gpu/drm/amd/include/cgs_common.h
+++ b/drivers/gpu/drm/amd/include/cgs_common.h
@@ -106,6 +106,7 @@ enum cgs_ucode_id {
CGS_UCODE_ID_CP_MEC_JT2,
CGS_UCODE_ID_GMCON_RENG,
CGS_UCODE_ID_RLC_G,
+ CGS_UCODE_ID_STORAGE,
CGS_UCODE_ID_MAXIMUM,
};
@@ -619,6 +620,8 @@ typedef int (*cgs_call_acpi_method)(struct cgs_device *cgs_device,
typedef int (*cgs_query_system_info)(struct cgs_device *cgs_device,
struct cgs_system_info *sys_info);
+typedef int (*cgs_is_virtualization_enabled_t)(void *cgs_device);
+
struct cgs_ops {
/* memory management calls (similar to KFD interface) */
cgs_gpu_mem_info_t gpu_mem_info;
@@ -670,6 +673,7 @@ struct cgs_ops {
cgs_call_acpi_method call_acpi_method;
/* get system info */
cgs_query_system_info query_system_info;
+ cgs_is_virtualization_enabled_t is_virtualization_enabled;
};
struct cgs_os_ops; /* To be define in OS-specific CGS header */
@@ -773,4 +777,6 @@ struct cgs_device
CGS_CALL(get_pci_resource, cgs_device, resource_type, size, offset, \
resource_base)
+#define cgs_is_virtualization_enabled(cgs_device) \
+ CGS_CALL(is_virtualization_enabled, cgs_device)
#endif /* _CGS_COMMON_H */
diff --git a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
index 7174f7a68266..0b1f2205c2f1 100644
--- a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
+++ b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
@@ -436,7 +436,8 @@ static enum PP_StateUILabel power_state_convert(enum amd_pm_state_type state)
}
}
-int pp_dpm_dispatch_tasks(void *handle, enum amd_pp_event event_id, void *input, void *output)
+static int pp_dpm_dispatch_tasks(void *handle, enum amd_pp_event event_id,
+ void *input, void *output)
{
int ret = 0;
struct pp_instance *pp_handle;
@@ -475,7 +476,7 @@ int pp_dpm_dispatch_tasks(void *handle, enum amd_pp_event event_id, void *input,
return ret;
}
-enum amd_pm_state_type pp_dpm_get_current_power_state(void *handle)
+static enum amd_pm_state_type pp_dpm_get_current_power_state(void *handle)
{
struct pp_hwmgr *hwmgr;
struct pp_power_state *state;
@@ -820,6 +821,21 @@ static int pp_dpm_read_sensor(void *handle, int idx, int32_t *value)
return hwmgr->hwmgr_func->read_sensor(hwmgr, idx, value);
}
+static struct amd_vce_state*
+pp_dpm_get_vce_clock_state(void *handle, unsigned idx)
+{
+ struct pp_hwmgr *hwmgr;
+
+ if (handle) {
+ hwmgr = ((struct pp_instance *)handle)->hwmgr;
+
+ if (hwmgr && idx < hwmgr->num_vce_state_tables)
+ return &hwmgr->vce_states[idx];
+ }
+
+ return NULL;
+}
+
const struct amd_powerplay_funcs pp_dpm_funcs = {
.get_temperature = pp_dpm_get_temperature,
.load_firmware = pp_dpm_load_fw,
@@ -846,6 +862,7 @@ const struct amd_powerplay_funcs pp_dpm_funcs = {
.get_mclk_od = pp_dpm_get_mclk_od,
.set_mclk_od = pp_dpm_set_mclk_od,
.read_sensor = pp_dpm_read_sensor,
+ .get_vce_clock_state = pp_dpm_get_vce_clock_state,
};
static int amd_pp_instance_init(struct amd_pp_init *pp_init,
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c
index 960424913496..4b14f259a147 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c
@@ -66,7 +66,7 @@ static const struct cz_power_state *cast_const_PhwCzPowerState(
return (struct cz_power_state *)hw_ps;
}
-uint32_t cz_get_eclk_level(struct pp_hwmgr *hwmgr,
+static uint32_t cz_get_eclk_level(struct pp_hwmgr *hwmgr,
uint32_t clock, uint32_t msg)
{
int i = 0;
@@ -1017,7 +1017,7 @@ static int cz_tf_program_bootup_state(struct pp_hwmgr *hwmgr, void *input,
return 0;
}
-int cz_tf_reset_acp_boot_level(struct pp_hwmgr *hwmgr, void *input,
+static int cz_tf_reset_acp_boot_level(struct pp_hwmgr *hwmgr, void *input,
void *output, void *storage, int result)
{
struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend);
@@ -1225,7 +1225,7 @@ static int cz_hwmgr_backend_fini(struct pp_hwmgr *hwmgr)
return 0;
}
-int cz_phm_force_dpm_highest(struct pp_hwmgr *hwmgr)
+static int cz_phm_force_dpm_highest(struct pp_hwmgr *hwmgr)
{
struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend);
@@ -1239,7 +1239,7 @@ int cz_phm_force_dpm_highest(struct pp_hwmgr *hwmgr)
return 0;
}
-int cz_phm_unforce_dpm_levels(struct pp_hwmgr *hwmgr)
+static int cz_phm_unforce_dpm_levels(struct pp_hwmgr *hwmgr)
{
struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend);
struct phm_clock_voltage_dependency_table *table =
@@ -1277,7 +1277,7 @@ int cz_phm_unforce_dpm_levels(struct pp_hwmgr *hwmgr)
return 0;
}
-int cz_phm_force_dpm_lowest(struct pp_hwmgr *hwmgr)
+static int cz_phm_force_dpm_lowest(struct pp_hwmgr *hwmgr)
{
struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend);
@@ -1533,7 +1533,7 @@ static int cz_dpm_get_pp_table_entry(struct pp_hwmgr *hwmgr,
return result;
}
-int cz_get_power_state_size(struct pp_hwmgr *hwmgr)
+static int cz_get_power_state_size(struct pp_hwmgr *hwmgr)
{
return sizeof(struct cz_power_state);
}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c b/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c
index 14f8c1f4da3d..0723758ed065 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c
@@ -272,7 +272,7 @@ bool phm_check_smc_update_required_for_display_configuration(struct pp_hwmgr *hw
PHM_FUNC_CHECK(hwmgr);
if (hwmgr->hwmgr_func->check_smc_update_required_for_display_configuration == NULL)
- return -EINVAL;
+ return false;
return hwmgr->hwmgr_func->check_smc_update_required_for_display_configuration(hwmgr);
}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c
index 1167205057b3..e03dcb6ea9c1 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c
@@ -710,13 +710,15 @@ int phm_get_voltage_evv_on_sclk(struct pp_hwmgr *hwmgr, uint8_t voltage_type,
uint32_t vol;
int ret = 0;
- if (hwmgr->chip_id < CHIP_POLARIS10) {
- atomctrl_get_voltage_evv_on_sclk(hwmgr, voltage_type, sclk, id, voltage);
+ if (hwmgr->chip_id < CHIP_TONGA) {
+ ret = atomctrl_get_voltage_evv(hwmgr, id, voltage);
+ } else if (hwmgr->chip_id < CHIP_POLARIS10) {
+ ret = atomctrl_get_voltage_evv_on_sclk(hwmgr, voltage_type, sclk, id, voltage);
if (*voltage >= 2000 || *voltage == 0)
*voltage = 1150;
} else {
ret = atomctrl_get_voltage_evv_on_sclk_ai(hwmgr, voltage_type, sclk, id, &vol);
- *voltage = (uint16_t)vol/100;
+ *voltage = (uint16_t)(vol/100);
}
return ret;
}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/pp_acpi.c b/drivers/gpu/drm/amd/powerplay/hwmgr/pp_acpi.c
index 1944d289f846..f5e8fda964f7 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/pp_acpi.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/pp_acpi.c
@@ -25,6 +25,7 @@
#include "linux/delay.h"
#include "hwmgr.h"
#include "amd_acpi.h"
+#include "pp_acpi.h"
bool acpi_atcs_functions_supported(void *device, uint32_t index)
{
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c b/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c
index 1126bd4f74dc..0894527d932f 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c
@@ -1320,7 +1320,8 @@ int atomctrl_get_voltage_evv_on_sclk_ai(struct pp_hwmgr *hwmgr, uint8_t voltage_
if (0 != result)
return result;
- *voltage = le32_to_cpu(((GET_EVV_VOLTAGE_INFO_OUTPUT_PARAMETER_V1_3 *)(&get_voltage_info_param_space))->ulVoltageLevel);
+ *voltage = le32_to_cpu(((GET_EVV_VOLTAGE_INFO_OUTPUT_PARAMETER_V1_3 *)
+ (&get_voltage_info_param_space))->ulVoltageLevel);
return result;
}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.c b/drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.c
index 7de701d8a450..c45bd2560468 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.c
@@ -131,7 +131,7 @@ static int set_platform_caps(struct pp_hwmgr *hwmgr, uint32_t powerplay_caps)
/**
* Private Function to get the PowerPlay Table Address.
*/
-const void *get_powerplay_table(struct pp_hwmgr *hwmgr)
+static const void *get_powerplay_table(struct pp_hwmgr *hwmgr)
{
int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
@@ -1049,7 +1049,7 @@ static int check_powerplay_tables(
return 0;
}
-int pp_tables_v1_0_initialize(struct pp_hwmgr *hwmgr)
+static int pp_tables_v1_0_initialize(struct pp_hwmgr *hwmgr)
{
int result = 0;
const ATOM_Tonga_POWERPLAYTABLE *powerplay_table;
@@ -1100,7 +1100,7 @@ int pp_tables_v1_0_initialize(struct pp_hwmgr *hwmgr)
return result;
}
-int pp_tables_v1_0_uninitialize(struct pp_hwmgr *hwmgr)
+static int pp_tables_v1_0_uninitialize(struct pp_hwmgr *hwmgr)
{
struct phm_ppt_v1_information *pp_table_information =
(struct phm_ppt_v1_information *)(hwmgr->pptable);
@@ -1201,17 +1201,20 @@ static uint32_t make_classification_flags(struct pp_hwmgr *hwmgr,
static int ppt_get_num_of_vce_state_table_entries_v1_0(struct pp_hwmgr *hwmgr)
{
const ATOM_Tonga_POWERPLAYTABLE *pp_table = get_powerplay_table(hwmgr);
- const ATOM_Tonga_VCE_State_Table *vce_state_table =
- (ATOM_Tonga_VCE_State_Table *)(((unsigned long)pp_table) + le16_to_cpu(pp_table->usVCEStateTableOffset));
+ const ATOM_Tonga_VCE_State_Table *vce_state_table;
- if (vce_state_table == NULL)
+
+ if (pp_table == NULL)
return 0;
+ vce_state_table = (void *)pp_table +
+ le16_to_cpu(pp_table->usVCEStateTableOffset);
+
return vce_state_table->ucNumEntries;
}
static int ppt_get_vce_state_table_entry_v1_0(struct pp_hwmgr *hwmgr, uint32_t i,
- struct pp_vce_state *vce_state, void **clock_info, uint32_t *flag)
+ struct amd_vce_state *vce_state, void **clock_info, uint32_t *flag)
{
const ATOM_Tonga_VCE_State_Record *vce_state_record;
ATOM_Tonga_SCLK_Dependency_Record *sclk_dep_record;
@@ -1315,7 +1318,7 @@ int get_powerplay_table_entry_v1_0(struct pp_hwmgr *hwmgr,
hwmgr->num_vce_state_tables = i = ppt_get_num_of_vce_state_table_entries_v1_0(hwmgr);
- if ((i != 0) && (i <= PP_MAX_VCE_LEVELS)) {
+ if ((i != 0) && (i <= AMD_MAX_VCE_LEVELS)) {
for (j = 0; j < i; j++)
ppt_get_vce_state_table_entry_v1_0(hwmgr, j, &(hwmgr->vce_states[j]), NULL, &flags);
}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.c b/drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.c
index ccf7ebeaf892..a4e9cf429e62 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.c
@@ -1507,7 +1507,7 @@ static int init_phase_shedding_table(struct pp_hwmgr *hwmgr,
return 0;
}
-int get_number_of_vce_state_table_entries(
+static int get_number_of_vce_state_table_entries(
struct pp_hwmgr *hwmgr)
{
const ATOM_PPLIB_POWERPLAYTABLE *table =
@@ -1521,9 +1521,9 @@ int get_number_of_vce_state_table_entries(
return 0;
}
-int get_vce_state_table_entry(struct pp_hwmgr *hwmgr,
+static int get_vce_state_table_entry(struct pp_hwmgr *hwmgr,
unsigned long i,
- struct pp_vce_state *vce_state,
+ struct amd_vce_state *vce_state,
void **clock_info,
unsigned long *flag)
{
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.c
index 6eb6db199250..cf2ee93d8475 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.c
@@ -75,7 +75,7 @@ int smu7_powerdown_uvd(struct pp_hwmgr *hwmgr)
return 0;
}
-int smu7_powerup_uvd(struct pp_hwmgr *hwmgr)
+static int smu7_powerup_uvd(struct pp_hwmgr *hwmgr)
{
if (phm_cf_want_uvd_power_gating(hwmgr)) {
if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
@@ -91,7 +91,7 @@ int smu7_powerup_uvd(struct pp_hwmgr *hwmgr)
return 0;
}
-int smu7_powerdown_vce(struct pp_hwmgr *hwmgr)
+static int smu7_powerdown_vce(struct pp_hwmgr *hwmgr)
{
if (phm_cf_want_vce_power_gating(hwmgr))
return smum_send_msg_to_smc(hwmgr->smumgr,
@@ -99,7 +99,7 @@ int smu7_powerdown_vce(struct pp_hwmgr *hwmgr)
return 0;
}
-int smu7_powerup_vce(struct pp_hwmgr *hwmgr)
+static int smu7_powerup_vce(struct pp_hwmgr *hwmgr)
{
if (phm_cf_want_vce_power_gating(hwmgr))
return smum_send_msg_to_smc(hwmgr->smumgr,
@@ -107,7 +107,7 @@ int smu7_powerup_vce(struct pp_hwmgr *hwmgr)
return 0;
}
-int smu7_powerdown_samu(struct pp_hwmgr *hwmgr)
+static int smu7_powerdown_samu(struct pp_hwmgr *hwmgr)
{
if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_SamuPowerGating))
@@ -116,7 +116,7 @@ int smu7_powerdown_samu(struct pp_hwmgr *hwmgr)
return 0;
}
-int smu7_powerup_samu(struct pp_hwmgr *hwmgr)
+static int smu7_powerup_samu(struct pp_hwmgr *hwmgr)
{
if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_SamuPowerGating))
@@ -149,15 +149,21 @@ int smu7_powergate_uvd(struct pp_hwmgr *hwmgr, bool bgate)
if (bgate) {
cgs_set_clockgating_state(hwmgr->device,
AMD_IP_BLOCK_TYPE_UVD,
- AMD_CG_STATE_GATE);
+ AMD_CG_STATE_UNGATE);
+ cgs_set_powergating_state(hwmgr->device,
+ AMD_IP_BLOCK_TYPE_UVD,
+ AMD_PG_STATE_GATE);
smu7_update_uvd_dpm(hwmgr, true);
smu7_powerdown_uvd(hwmgr);
} else {
smu7_powerup_uvd(hwmgr);
- smu7_update_uvd_dpm(hwmgr, false);
+ cgs_set_powergating_state(hwmgr->device,
+ AMD_IP_BLOCK_TYPE_UVD,
+ AMD_CG_STATE_UNGATE);
cgs_set_clockgating_state(hwmgr->device,
AMD_IP_BLOCK_TYPE_UVD,
- AMD_CG_STATE_UNGATE);
+ AMD_CG_STATE_GATE);
+ smu7_update_uvd_dpm(hwmgr, false);
}
return 0;
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
index 609996c84ad5..85621a77335d 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
@@ -89,7 +89,7 @@ enum DPM_EVENT_SRC {
static const unsigned long PhwVIslands_Magic = (unsigned long)(PHM_VIslands_Magic);
-struct smu7_power_state *cast_phw_smu7_power_state(
+static struct smu7_power_state *cast_phw_smu7_power_state(
struct pp_hw_power_state *hw_ps)
{
PP_ASSERT_WITH_CODE((PhwVIslands_Magic == hw_ps->magic),
@@ -99,7 +99,7 @@ struct smu7_power_state *cast_phw_smu7_power_state(
return (struct smu7_power_state *)hw_ps;
}
-const struct smu7_power_state *cast_const_phw_smu7_power_state(
+static const struct smu7_power_state *cast_const_phw_smu7_power_state(
const struct pp_hw_power_state *hw_ps)
{
PP_ASSERT_WITH_CODE((PhwVIslands_Magic == hw_ps->magic),
@@ -115,7 +115,7 @@ const struct smu7_power_state *cast_const_phw_smu7_power_state(
* @param hwmgr the address of the powerplay hardware manager.
* @return always 0
*/
-int smu7_get_mc_microcode_version (struct pp_hwmgr *hwmgr)
+static int smu7_get_mc_microcode_version(struct pp_hwmgr *hwmgr)
{
cgs_write_register(hwmgr->device, mmMC_SEQ_IO_DEBUG_INDEX, 0x9F);
@@ -124,7 +124,7 @@ int smu7_get_mc_microcode_version (struct pp_hwmgr *hwmgr)
return 0;
}
-uint16_t smu7_get_current_pcie_speed(struct pp_hwmgr *hwmgr)
+static uint16_t smu7_get_current_pcie_speed(struct pp_hwmgr *hwmgr)
{
uint32_t speedCntl = 0;
@@ -135,7 +135,7 @@ uint16_t smu7_get_current_pcie_speed(struct pp_hwmgr *hwmgr)
PCIE_LC_SPEED_CNTL, LC_CURRENT_DATA_RATE));
}
-int smu7_get_current_pcie_lane_number(struct pp_hwmgr *hwmgr)
+static int smu7_get_current_pcie_lane_number(struct pp_hwmgr *hwmgr)
{
uint32_t link_width;
@@ -155,7 +155,7 @@ int smu7_get_current_pcie_lane_number(struct pp_hwmgr *hwmgr)
* @param pHwMgr the address of the powerplay hardware manager.
* @return always PP_Result_OK
*/
-int smu7_enable_smc_voltage_controller(struct pp_hwmgr *hwmgr)
+static int smu7_enable_smc_voltage_controller(struct pp_hwmgr *hwmgr)
{
if (hwmgr->feature_mask & PP_SMC_VOLTAGE_CONTROL_MASK)
smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_Voltage_Cntl_Enable);
@@ -802,7 +802,7 @@ static int smu7_setup_dpm_tables_v1(struct pp_hwmgr *hwmgr)
return 0;
}
-int smu7_setup_default_dpm_tables(struct pp_hwmgr *hwmgr)
+static int smu7_setup_default_dpm_tables(struct pp_hwmgr *hwmgr)
{
struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
@@ -1153,7 +1153,7 @@ static int smu7_disable_thermal_auto_throttle(struct pp_hwmgr *hwmgr)
return smu7_disable_auto_throttle_source(hwmgr, PHM_AutoThrottleSource_Thermal);
}
-int smu7_pcie_performance_request(struct pp_hwmgr *hwmgr)
+static int smu7_pcie_performance_request(struct pp_hwmgr *hwmgr)
{
struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
data->pcie_performance_request = true;
@@ -1161,15 +1161,15 @@ int smu7_pcie_performance_request(struct pp_hwmgr *hwmgr)
return 0;
}
-int smu7_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
+static int smu7_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
{
int tmp_result = 0;
int result = 0;
tmp_result = (!smum_is_dpm_running(hwmgr)) ? 0 : -1;
PP_ASSERT_WITH_CODE(tmp_result == 0,
- "DPM is already running right now, no need to enable DPM!",
- return 0);
+ "DPM is already running",
+ );
if (smu7_voltage_control(hwmgr)) {
tmp_result = smu7_enable_voltage_control(hwmgr);
@@ -1352,6 +1352,8 @@ static void smu7_init_dpm_defaults(struct pp_hwmgr *hwmgr)
struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
struct phm_ppt_v1_information *table_info =
(struct phm_ppt_v1_information *)(hwmgr->pptable);
+ struct cgs_system_info sys_info = {0};
+ int result;
data->dll_default_on = false;
data->mclk_dpm0_activity_target = 0xa;
@@ -1439,6 +1441,18 @@ static void smu7_init_dpm_defaults(struct pp_hwmgr *hwmgr)
data->pcie_lane_performance.min = 16;
data->pcie_lane_power_saving.max = 0;
data->pcie_lane_power_saving.min = 16;
+
+ sys_info.size = sizeof(struct cgs_system_info);
+ sys_info.info_id = CGS_SYSTEM_INFO_PG_FLAGS;
+ result = cgs_query_system_info(hwmgr->device, &sys_info);
+ if (!result) {
+ if (sys_info.value & AMD_PG_SUPPORT_UVD)
+ phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_UVDPowerGating);
+ if (sys_info.value & AMD_PG_SUPPORT_VCE)
+ phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_VCEPowerGating);
+ }
}
/**
@@ -1460,19 +1474,17 @@ static int smu7_get_evv_voltages(struct pp_hwmgr *hwmgr)
struct phm_ppt_v1_clock_voltage_dependency_table *sclk_table = NULL;
- if (table_info == NULL)
- return -EINVAL;
-
- sclk_table = table_info->vdd_dep_on_sclk;
-
for (i = 0; i < SMU7_MAX_LEAKAGE_COUNT; i++) {
vv_id = ATOM_VIRTUAL_VOLTAGE_ID0 + i;
if (data->vdd_gfx_control == SMU7_VOLTAGE_CONTROL_BY_SVID2) {
- if (0 == phm_get_sclk_for_voltage_evv(hwmgr,
+ if ((hwmgr->pp_table_version == PP_TABLE_V1)
+ && !phm_get_sclk_for_voltage_evv(hwmgr,
table_info->vddgfx_lookup_table, vv_id, &sclk)) {
if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_ClockStretcher)) {
+ sclk_table = table_info->vdd_dep_on_sclk;
+
for (j = 1; j < sclk_table->count; j++) {
if (sclk_table->entries[j].clk == sclk &&
sclk_table->entries[j].cks_enable == 0) {
@@ -1498,12 +1510,15 @@ static int smu7_get_evv_voltages(struct pp_hwmgr *hwmgr)
}
}
} else {
-
if ((hwmgr->pp_table_version == PP_TABLE_V0)
|| !phm_get_sclk_for_voltage_evv(hwmgr,
table_info->vddc_lookup_table, vv_id, &sclk)) {
if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_ClockStretcher)) {
+ if (table_info == NULL)
+ return -EINVAL;
+ sclk_table = table_info->vdd_dep_on_sclk;
+
for (j = 1; j < sclk_table->count; j++) {
if (sclk_table->entries[j].clk == sclk &&
sclk_table->entries[j].cks_enable == 0) {
@@ -1864,7 +1879,7 @@ static int smu7_set_private_data_based_on_pptable_v1(struct pp_hwmgr *hwmgr)
return 0;
}
-int smu7_patch_voltage_workaround(struct pp_hwmgr *hwmgr)
+static int smu7_patch_voltage_workaround(struct pp_hwmgr *hwmgr)
{
struct phm_ppt_v1_information *table_info =
(struct phm_ppt_v1_information *)(hwmgr->pptable);
@@ -2127,15 +2142,20 @@ static int smu7_patch_acp_vddc(struct pp_hwmgr *hwmgr,
}
static int smu7_patch_limits_vddc(struct pp_hwmgr *hwmgr,
- struct phm_clock_and_voltage_limits *tab)
+ struct phm_clock_and_voltage_limits *tab)
{
+ uint32_t vddc, vddci;
struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
if (tab) {
- smu7_patch_ppt_v0_with_vdd_leakage(hwmgr, (uint32_t *)&tab->vddc,
- &data->vddc_leakage);
- smu7_patch_ppt_v0_with_vdd_leakage(hwmgr, (uint32_t *)&tab->vddci,
- &data->vddci_leakage);
+ vddc = tab->vddc;
+ smu7_patch_ppt_v0_with_vdd_leakage(hwmgr, &vddc,
+ &data->vddc_leakage);
+ tab->vddc = vddc;
+ vddci = tab->vddci;
+ smu7_patch_ppt_v0_with_vdd_leakage(hwmgr, &vddci,
+ &data->vddci_leakage);
+ tab->vddci = vddci;
}
return 0;
@@ -2253,7 +2273,7 @@ static int smu7_set_private_data_based_on_pptable_v0(struct pp_hwmgr *hwmgr)
return 0;
}
-int smu7_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
+static int smu7_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
{
struct smu7_hwmgr *data;
int result;
@@ -2978,19 +2998,19 @@ static int smu7_get_pp_table_entry_callback_func_v0(struct pp_hwmgr *hwmgr,
if (!(data->mc_micro_code_feature & DISABLE_MC_LOADMICROCODE) && memory_clock > data->highest_mclk)
data->highest_mclk = memory_clock;
- performance_level = &(ps->performance_levels
- [ps->performance_level_count++]);
-
PP_ASSERT_WITH_CODE(
(ps->performance_level_count < smum_get_mac_definition(hwmgr->smumgr, SMU_MAX_LEVELS_GRAPHICS)),
"Performance levels exceeds SMC limit!",
return -EINVAL);
PP_ASSERT_WITH_CODE(
- (ps->performance_level_count <=
+ (ps->performance_level_count <
hwmgr->platform_descriptor.hardwareActivityPerformanceLevels),
- "Performance levels exceeds Driver limit!",
- return -EINVAL);
+ "Performance levels exceeds Driver limit, Skip!",
+ return 0);
+
+ performance_level = &(ps->performance_levels
+ [ps->performance_level_count++]);
/* Performance levels are arranged from low to high. */
performance_level->memory_clock = memory_clock;
@@ -3672,14 +3692,16 @@ static int smu7_set_max_fan_pwm_output(struct pp_hwmgr *hwmgr, uint16_t us_max_f
PPSMC_MSG_SetFanPwmMax, us_max_fan_pwm);
}
-int smu7_notify_smc_display_change(struct pp_hwmgr *hwmgr, bool has_display)
+static int
+smu7_notify_smc_display_change(struct pp_hwmgr *hwmgr, bool has_display)
{
PPSMC_Msg msg = has_display ? (PPSMC_Msg)PPSMC_HasDisplay : (PPSMC_Msg)PPSMC_NoDisplay;
return (smum_send_msg_to_smc(hwmgr->smumgr, msg) == 0) ? 0 : -1;
}
-int smu7_notify_smc_display_config_after_ps_adjustment(struct pp_hwmgr *hwmgr)
+static int
+smu7_notify_smc_display_config_after_ps_adjustment(struct pp_hwmgr *hwmgr)
{
uint32_t num_active_displays = 0;
struct cgs_display_info info = {0};
@@ -3701,7 +3723,7 @@ int smu7_notify_smc_display_config_after_ps_adjustment(struct pp_hwmgr *hwmgr)
* @param hwmgr the address of the powerplay hardware manager.
* @return always OK
*/
-int smu7_program_display_gap(struct pp_hwmgr *hwmgr)
+static int smu7_program_display_gap(struct pp_hwmgr *hwmgr)
{
struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
uint32_t num_active_displays = 0;
@@ -3751,7 +3773,7 @@ int smu7_program_display_gap(struct pp_hwmgr *hwmgr)
return 0;
}
-int smu7_display_configuration_changed_task(struct pp_hwmgr *hwmgr)
+static int smu7_display_configuration_changed_task(struct pp_hwmgr *hwmgr)
{
return smu7_program_display_gap(hwmgr);
}
@@ -3775,13 +3797,14 @@ static int smu7_set_max_fan_rpm_output(struct pp_hwmgr *hwmgr, uint16_t us_max_f
PPSMC_MSG_SetFanRpmMax, us_max_fan_rpm);
}
-int smu7_register_internal_thermal_interrupt(struct pp_hwmgr *hwmgr,
+static int smu7_register_internal_thermal_interrupt(struct pp_hwmgr *hwmgr,
const void *thermal_interrupt_info)
{
return 0;
}
-bool smu7_check_smc_update_required_for_display_configuration(struct pp_hwmgr *hwmgr)
+static bool
+smu7_check_smc_update_required_for_display_configuration(struct pp_hwmgr *hwmgr)
{
struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
bool is_update_required = false;
@@ -3810,7 +3833,9 @@ static inline bool smu7_are_power_levels_equal(const struct smu7_performance_lev
(pl1->pcie_lane == pl2->pcie_lane));
}
-int smu7_check_states_equal(struct pp_hwmgr *hwmgr, const struct pp_hw_power_state *pstate1, const struct pp_hw_power_state *pstate2, bool *equal)
+static int smu7_check_states_equal(struct pp_hwmgr *hwmgr,
+ const struct pp_hw_power_state *pstate1,
+ const struct pp_hw_power_state *pstate2, bool *equal)
{
const struct smu7_power_state *psa;
const struct smu7_power_state *psb;
@@ -3843,7 +3868,7 @@ int smu7_check_states_equal(struct pp_hwmgr *hwmgr, const struct pp_hw_power_sta
return 0;
}
-int smu7_upload_mc_firmware(struct pp_hwmgr *hwmgr)
+static int smu7_upload_mc_firmware(struct pp_hwmgr *hwmgr)
{
struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
@@ -3972,7 +3997,7 @@ static int smu7_init_sclk_threshold(struct pp_hwmgr *hwmgr)
return 0;
}
-int smu7_setup_asic_task(struct pp_hwmgr *hwmgr)
+static int smu7_setup_asic_task(struct pp_hwmgr *hwmgr)
{
int tmp_result, result = 0;
@@ -4225,18 +4250,26 @@ static int smu7_get_sclks(struct pp_hwmgr *hwmgr, struct amd_pp_clocks *clocks)
{
struct phm_ppt_v1_information *table_info =
(struct phm_ppt_v1_information *)hwmgr->pptable;
- struct phm_ppt_v1_clock_voltage_dependency_table *dep_sclk_table;
+ struct phm_ppt_v1_clock_voltage_dependency_table *dep_sclk_table = NULL;
+ struct phm_clock_voltage_dependency_table *sclk_table;
int i;
- if (table_info == NULL)
- return -EINVAL;
-
- dep_sclk_table = table_info->vdd_dep_on_sclk;
-
- for (i = 0; i < dep_sclk_table->count; i++) {
- clocks->clock[i] = dep_sclk_table->entries[i].clk;
- clocks->count++;
+ if (hwmgr->pp_table_version == PP_TABLE_V1) {
+ if (table_info == NULL || table_info->vdd_dep_on_sclk == NULL)
+ return -EINVAL;
+ dep_sclk_table = table_info->vdd_dep_on_sclk;
+ for (i = 0; i < dep_sclk_table->count; i++) {
+ clocks->clock[i] = dep_sclk_table->entries[i].clk;
+ clocks->count++;
+ }
+ } else if (hwmgr->pp_table_version == PP_TABLE_V0) {
+ sclk_table = hwmgr->dyn_state.vddc_dependency_on_sclk;
+ for (i = 0; i < sclk_table->count; i++) {
+ clocks->clock[i] = sclk_table->entries[i].clk;
+ clocks->count++;
+ }
}
+
return 0;
}
@@ -4258,17 +4291,24 @@ static int smu7_get_mclks(struct pp_hwmgr *hwmgr, struct amd_pp_clocks *clocks)
(struct phm_ppt_v1_information *)hwmgr->pptable;
struct phm_ppt_v1_clock_voltage_dependency_table *dep_mclk_table;
int i;
+ struct phm_clock_voltage_dependency_table *mclk_table;
- if (table_info == NULL)
- return -EINVAL;
-
- dep_mclk_table = table_info->vdd_dep_on_mclk;
-
- for (i = 0; i < dep_mclk_table->count; i++) {
- clocks->clock[i] = dep_mclk_table->entries[i].clk;
- clocks->latency[i] = smu7_get_mem_latency(hwmgr,
+ if (hwmgr->pp_table_version == PP_TABLE_V1) {
+ if (table_info == NULL)
+ return -EINVAL;
+ dep_mclk_table = table_info->vdd_dep_on_mclk;
+ for (i = 0; i < dep_mclk_table->count; i++) {
+ clocks->clock[i] = dep_mclk_table->entries[i].clk;
+ clocks->latency[i] = smu7_get_mem_latency(hwmgr,
dep_mclk_table->entries[i].clk);
- clocks->count++;
+ clocks->count++;
+ }
+ } else if (hwmgr->pp_table_version == PP_TABLE_V0) {
+ mclk_table = hwmgr->dyn_state.vddc_dependency_on_mclk;
+ for (i = 0; i < mclk_table->count; i++) {
+ clocks->clock[i] = mclk_table->entries[i].clk;
+ clocks->count++;
+ }
}
return 0;
}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_thermal.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_thermal.c
index fb6c6f6106d5..29d0319b22e6 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_thermal.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_thermal.c
@@ -30,7 +30,7 @@ int smu7_fan_ctrl_get_fan_speed_info(struct pp_hwmgr *hwmgr,
struct phm_fan_speed_info *fan_speed_info)
{
if (hwmgr->thermal_controller.fanInfo.bNoFan)
- return 0;
+ return -ENODEV;
fan_speed_info->supports_percent_read = true;
fan_speed_info->supports_percent_write = true;
@@ -60,7 +60,7 @@ int smu7_fan_ctrl_get_fan_speed_percent(struct pp_hwmgr *hwmgr,
uint64_t tmp64;
if (hwmgr->thermal_controller.fanInfo.bNoFan)
- return 0;
+ return -ENODEV;
duty100 = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
CG_FDO_CTRL1, FMAX_DUTY100);
@@ -89,7 +89,7 @@ int smu7_fan_ctrl_get_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t *speed)
if (hwmgr->thermal_controller.fanInfo.bNoFan ||
(hwmgr->thermal_controller.fanInfo.
ucTachometerPulsesPerRevolution == 0))
- return 0;
+ return -ENODEV;
tach_period = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
CG_TACH_STATUS, TACH_PERIOD);
diff --git a/drivers/gpu/drm/amd/powerplay/inc/amd_powerplay.h b/drivers/gpu/drm/amd/powerplay/inc/amd_powerplay.h
index 3fb5e57a378b..eb3e83d7af31 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/amd_powerplay.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/amd_powerplay.h
@@ -359,6 +359,7 @@ struct amd_powerplay_funcs {
int (*get_mclk_od)(void *handle);
int (*set_mclk_od)(void *handle, uint32_t value);
int (*read_sensor)(void *handle, int idx, int32_t *value);
+ struct amd_vce_state* (*get_vce_clock_state)(void *handle, unsigned idx);
};
struct amd_powerplay {
diff --git a/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h b/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h
index 4f0fedd1e9d3..e38b999e3235 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h
@@ -367,7 +367,7 @@ struct pp_table_func {
int (*pptable_get_vce_state_table_entry)(
struct pp_hwmgr *hwmgr,
unsigned long i,
- struct pp_vce_state *vce_state,
+ struct amd_vce_state *vce_state,
void **clock_info,
unsigned long *flag);
};
@@ -586,18 +586,6 @@ struct phm_microcode_version_info {
uint32_t NB;
};
-#define PP_MAX_VCE_LEVELS 6
-
-enum PP_VCE_LEVEL {
- PP_VCE_LEVEL_AC_ALL = 0, /* AC, All cases */
- PP_VCE_LEVEL_DC_EE = 1, /* DC, entropy encoding */
- PP_VCE_LEVEL_DC_LL_LOW = 2, /* DC, low latency queue, res <= 720 */
- PP_VCE_LEVEL_DC_LL_HIGH = 3, /* DC, low latency queue, 1080 >= res > 720 */
- PP_VCE_LEVEL_DC_GP_LOW = 4, /* DC, general purpose queue, res <= 720 */
- PP_VCE_LEVEL_DC_GP_HIGH = 5, /* DC, general purpose queue, 1080 >= res > 720 */
-};
-
-
enum PP_TABLE_VERSION {
PP_TABLE_V0 = 0,
PP_TABLE_V1,
@@ -620,7 +608,7 @@ struct pp_hwmgr {
void *hardcode_pp_table;
bool need_pp_table_upload;
- struct pp_vce_state vce_states[PP_MAX_VCE_LEVELS];
+ struct amd_vce_state vce_states[AMD_MAX_VCE_LEVELS];
uint32_t num_vce_state_tables;
enum amd_dpm_forced_level dpm_level;
diff --git a/drivers/gpu/drm/amd/powerplay/inc/power_state.h b/drivers/gpu/drm/amd/powerplay/inc/power_state.h
index 9ceaed9ac52a..827860fffe78 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/power_state.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/power_state.h
@@ -156,15 +156,6 @@ struct pp_power_state {
struct pp_hw_power_state hardware;
};
-
-/*Structure to hold a VCE state entry*/
-struct pp_vce_state {
- uint32_t evclk;
- uint32_t ecclk;
- uint32_t sclk;
- uint32_t mclk;
-};
-
enum PP_MMProfilingState {
PP_MMProfilingState_NA = 0,
PP_MMProfilingState_Started,
diff --git a/drivers/gpu/drm/amd/powerplay/inc/pp_acpi.h b/drivers/gpu/drm/amd/powerplay/inc/pp_acpi.h
index 3df5de2cdab0..8fe8ba9434ff 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/pp_acpi.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/pp_acpi.h
@@ -21,9 +21,6 @@
*
*/
-extern bool acpi_atcs_functions_supported(void *device,
- uint32_t index);
-extern int acpi_pcie_perf_request(void *device,
- uint8_t perf_req,
- bool advertise);
-extern bool acpi_atcs_notify_pcie_device_ready(void *device);
+bool acpi_atcs_functions_supported(void *device, uint32_t index);
+int acpi_pcie_perf_request(void *device, uint8_t perf_req, bool advertise);
+bool acpi_atcs_notify_pcie_device_ready(void *device);
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smc.c b/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smc.c
index 76310ac7ef0d..34523fe6ed6f 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smc.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smc.c
@@ -2049,7 +2049,7 @@ int fiji_thermal_setup_fan_table(struct pp_hwmgr *hwmgr)
return 0;
}
-int fiji_program_mem_timing_parameters(struct pp_hwmgr *hwmgr)
+static int fiji_program_mem_timing_parameters(struct pp_hwmgr *hwmgr)
{
struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
@@ -2125,7 +2125,7 @@ uint32_t fiji_get_offsetof(uint32_t type, uint32_t member)
return offsetof(SMU73_Discrete_DpmTable, LowSclkInterruptThreshold);
}
}
- printk("cant't get the offset of type %x member %x \n", type, member);
+ printk(KERN_WARNING "can't get the offset of type %x member %x\n", type, member);
return 0;
}
@@ -2150,7 +2150,7 @@ uint32_t fiji_get_mac_definition(uint32_t value)
return SMU73_MAX_LEVELS_MVDD;
}
- printk("cant't get the mac of %x \n", value);
+ printk(KERN_WARNING "can't get the mac of %x\n", value);
return 0;
}
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c
index 02fe1df855a9..b86e48fb40d1 100755
--- a/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c
@@ -159,7 +159,7 @@ static int fiji_start_smu_in_non_protection_mode(struct pp_smumgr *smumgr)
return result;
}
-int fiji_setup_pwr_virus(struct pp_smumgr *smumgr)
+static int fiji_setup_pwr_virus(struct pp_smumgr *smumgr)
{
int i, result = -1;
uint32_t reg, data;
@@ -224,7 +224,7 @@ static int fiji_start_avfs_btc(struct pp_smumgr *smumgr)
return result;
}
-int fiji_setup_pm_fuse_for_avfs(struct pp_smumgr *smumgr)
+static int fiji_setup_pm_fuse_for_avfs(struct pp_smumgr *smumgr)
{
int result = 0;
uint32_t table_start;
@@ -260,7 +260,7 @@ int fiji_setup_pm_fuse_for_avfs(struct pp_smumgr *smumgr)
return result;
}
-int fiji_setup_graphics_level_structure(struct pp_smumgr *smumgr)
+static int fiji_setup_graphics_level_structure(struct pp_smumgr *smumgr)
{
int32_t vr_config;
uint32_t table_start;
@@ -299,7 +299,7 @@ int fiji_setup_graphics_level_structure(struct pp_smumgr *smumgr)
}
/* Work in Progress */
-int fiji_restore_vft_table(struct pp_smumgr *smumgr)
+static int fiji_restore_vft_table(struct pp_smumgr *smumgr)
{
struct fiji_smumgr *priv = (struct fiji_smumgr *)(smumgr->backend);
@@ -311,7 +311,7 @@ int fiji_restore_vft_table(struct pp_smumgr *smumgr)
}
/* Work in Progress */
-int fiji_save_vft_table(struct pp_smumgr *smumgr)
+static int fiji_save_vft_table(struct pp_smumgr *smumgr)
{
struct fiji_smumgr *priv = (struct fiji_smumgr *)(smumgr->backend);
@@ -322,7 +322,7 @@ int fiji_save_vft_table(struct pp_smumgr *smumgr)
return -EINVAL;
}
-int fiji_avfs_event_mgr(struct pp_smumgr *smumgr, bool smu_started)
+static int fiji_avfs_event_mgr(struct pp_smumgr *smumgr, bool smu_started)
{
struct fiji_smumgr *priv = (struct fiji_smumgr *)(smumgr->backend);
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smc.c b/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smc.c
index 8c889caba420..b579f0c175e6 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smc.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smc.c
@@ -2140,7 +2140,7 @@ uint32_t iceland_get_offsetof(uint32_t type, uint32_t member)
return offsetof(SMU71_Discrete_DpmTable, LowSclkInterruptThreshold);
}
}
- printk("cant't get the offset of type %x member %x \n", type, member);
+ printk(KERN_WARNING "can't get the offset of type %x member %x\n", type, member);
return 0;
}
@@ -2163,7 +2163,7 @@ uint32_t iceland_get_mac_definition(uint32_t value)
return SMU71_MAX_LEVELS_MVDD;
}
- printk("cant't get the mac of %x \n", value);
+ printk(KERN_WARNING "can't get the mac of %x\n", value);
return 0;
}
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smc.c b/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smc.c
index 4ccc0b72324d..8db8e209d915 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smc.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smc.c
@@ -2174,7 +2174,7 @@ uint32_t polaris10_get_offsetof(uint32_t type, uint32_t member)
return offsetof(SMU74_Discrete_DpmTable, LowSclkInterruptThreshold);
}
}
- printk("cant't get the offset of type %x member %x \n", type, member);
+ printk(KERN_WARNING "can't get the offset of type %x member %x\n", type, member);
return 0;
}
@@ -2201,7 +2201,7 @@ uint32_t polaris10_get_mac_definition(uint32_t value)
return SMU7_UVD_MCLK_HANDSHAKE_DISABLE;
}
- printk("cant't get the mac of %x \n", value);
+ printk(KERN_WARNING "can't get the mac of %x\n", value);
return 0;
}
@@ -2214,6 +2214,7 @@ uint32_t polaris10_get_mac_definition(uint32_t value)
int polaris10_process_firmware_header(struct pp_hwmgr *hwmgr)
{
struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smumgr->backend);
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
uint32_t tmp;
int result;
bool error = false;
@@ -2233,8 +2234,10 @@ int polaris10_process_firmware_header(struct pp_hwmgr *hwmgr)
offsetof(SMU74_Firmware_Header, SoftRegisters),
&tmp, SMC_RAM_END);
- if (!result)
+ if (!result) {
+ data->soft_regs_start = tmp;
smu_data->smu7_data.soft_regs_start = tmp;
+ }
error |= (0 != result);
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c
index 5c3598ab7dae..f38a68747df0 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c
@@ -118,7 +118,7 @@ static int polaris10_perform_btc(struct pp_smumgr *smumgr)
}
-int polaris10_setup_graphics_level_structure(struct pp_smumgr *smumgr)
+static int polaris10_setup_graphics_level_structure(struct pp_smumgr *smumgr)
{
uint32_t vr_config;
uint32_t dpm_table_start;
@@ -172,7 +172,8 @@ int polaris10_setup_graphics_level_structure(struct pp_smumgr *smumgr)
return 0;
}
-int polaris10_avfs_event_mgr(struct pp_smumgr *smumgr, bool SMU_VFT_INTACT)
+static int
+polaris10_avfs_event_mgr(struct pp_smumgr *smumgr, bool SMU_VFT_INTACT)
{
struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(smumgr->backend);
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c
index 6af744f42ec9..6df0d6edfdd1 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c
@@ -278,6 +278,9 @@ enum cgs_ucode_id smu7_convert_fw_type_to_cgs(uint32_t fw_type)
case UCODE_ID_RLC_G:
result = CGS_UCODE_ID_RLC_G;
break;
+ case UCODE_ID_MEC_STORAGE:
+ result = CGS_UCODE_ID_STORAGE;
+ break;
default:
break;
}
@@ -452,6 +455,10 @@ int smu7_request_smu_load_fw(struct pp_smumgr *smumgr)
PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(smumgr,
UCODE_ID_SDMA1, &toc->entry[toc->num_entries++]),
"Failed to Get Firmware Entry.", return -EINVAL);
+ if (cgs_is_virtualization_enabled(smumgr->device))
+ PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(smumgr,
+ UCODE_ID_MEC_STORAGE, &toc->entry[toc->num_entries++]),
+ "Failed to Get Firmware Entry.", return -EINVAL);
smu7_send_msg_to_smc_with_parameter(smumgr, PPSMC_MSG_DRV_DRAM_ADDR_HI, smu_data->header_buffer.mc_addr_high);
smu7_send_msg_to_smc_with_parameter(smumgr, PPSMC_MSG_DRV_DRAM_ADDR_LO, smu_data->header_buffer.mc_addr_low);
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.h b/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.h
index 76352f2423ae..919be435b49c 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.h
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.h
@@ -28,8 +28,6 @@
#include <pp_endian.h>
#define SMC_RAM_END 0x40000
-#define mmSMC_IND_INDEX_11 0x01AC
-#define mmSMC_IND_DATA_11 0x01AD
struct smu7_buffer_entry {
uint32_t data_size;
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smc.c b/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smc.c
index de2a24d85f48..d08f6f19b454 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smc.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smc.c
@@ -2651,7 +2651,7 @@ uint32_t tonga_get_offsetof(uint32_t type, uint32_t member)
return offsetof(SMU72_Discrete_DpmTable, LowSclkInterruptThreshold);
}
}
- printk("cant't get the offset of type %x member %x\n", type, member);
+ printk(KERN_WARNING "can't get the offset of type %x member %x\n", type, member);
return 0;
}
@@ -2675,7 +2675,7 @@ uint32_t tonga_get_mac_definition(uint32_t value)
case SMU_MAX_LEVELS_MVDD:
return SMU72_MAX_LEVELS_MVDD;
}
- printk("cant't get the mac value %x\n", value);
+ printk(KERN_WARNING "can't get the mac value %x\n", value);
return 0;
}
diff --git a/drivers/gpu/drm/amd/scheduler/gpu_sched_trace.h b/drivers/gpu/drm/amd/scheduler/gpu_sched_trace.h
index b961a1c6caf3..dbd4fd3a810b 100644
--- a/drivers/gpu/drm/amd/scheduler/gpu_sched_trace.h
+++ b/drivers/gpu/drm/amd/scheduler/gpu_sched_trace.h
@@ -17,7 +17,7 @@ TRACE_EVENT(amd_sched_job,
TP_STRUCT__entry(
__field(struct amd_sched_entity *, entity)
__field(struct amd_sched_job *, sched_job)
- __field(struct fence *, fence)
+ __field(struct dma_fence *, fence)
__field(const char *, name)
__field(u32, job_count)
__field(int, hw_job_count)
@@ -42,7 +42,7 @@ TRACE_EVENT(amd_sched_process_job,
TP_PROTO(struct amd_sched_fence *fence),
TP_ARGS(fence),
TP_STRUCT__entry(
- __field(struct fence *, fence)
+ __field(struct dma_fence *, fence)
),
TP_fast_assign(
diff --git a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
index 963a24d46a93..1bf83ed113b3 100644
--- a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
+++ b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
@@ -32,10 +32,7 @@
static bool amd_sched_entity_is_ready(struct amd_sched_entity *entity);
static void amd_sched_wakeup(struct amd_gpu_scheduler *sched);
-static void amd_sched_process_job(struct fence *f, struct fence_cb *cb);
-
-struct kmem_cache *sched_fence_slab;
-atomic_t sched_fence_slab_ref = ATOMIC_INIT(0);
+static void amd_sched_process_job(struct dma_fence *f, struct dma_fence_cb *cb);
/* Initialize a given run queue struct */
static void amd_sched_rq_init(struct amd_sched_rq *rq)
@@ -141,7 +138,7 @@ int amd_sched_entity_init(struct amd_gpu_scheduler *sched,
return r;
atomic_set(&entity->fence_seq, 0);
- entity->fence_context = fence_context_alloc(2);
+ entity->fence_context = dma_fence_context_alloc(2);
return 0;
}
@@ -221,32 +218,32 @@ void amd_sched_entity_fini(struct amd_gpu_scheduler *sched,
kfifo_free(&entity->job_queue);
}
-static void amd_sched_entity_wakeup(struct fence *f, struct fence_cb *cb)
+static void amd_sched_entity_wakeup(struct dma_fence *f, struct dma_fence_cb *cb)
{
struct amd_sched_entity *entity =
container_of(cb, struct amd_sched_entity, cb);
entity->dependency = NULL;
- fence_put(f);
+ dma_fence_put(f);
amd_sched_wakeup(entity->sched);
}
-static void amd_sched_entity_clear_dep(struct fence *f, struct fence_cb *cb)
+static void amd_sched_entity_clear_dep(struct dma_fence *f, struct dma_fence_cb *cb)
{
struct amd_sched_entity *entity =
container_of(cb, struct amd_sched_entity, cb);
entity->dependency = NULL;
- fence_put(f);
+ dma_fence_put(f);
}
static bool amd_sched_entity_add_dependency_cb(struct amd_sched_entity *entity)
{
struct amd_gpu_scheduler *sched = entity->sched;
- struct fence * fence = entity->dependency;
+ struct dma_fence * fence = entity->dependency;
struct amd_sched_fence *s_fence;
if (fence->context == entity->fence_context) {
/* We can ignore fences from ourself */
- fence_put(entity->dependency);
+ dma_fence_put(entity->dependency);
return false;
}
@@ -257,23 +254,23 @@ static bool amd_sched_entity_add_dependency_cb(struct amd_sched_entity *entity)
* Fence is from the same scheduler, only need to wait for
* it to be scheduled
*/
- fence = fence_get(&s_fence->scheduled);
- fence_put(entity->dependency);
+ fence = dma_fence_get(&s_fence->scheduled);
+ dma_fence_put(entity->dependency);
entity->dependency = fence;
- if (!fence_add_callback(fence, &entity->cb,
- amd_sched_entity_clear_dep))
+ if (!dma_fence_add_callback(fence, &entity->cb,
+ amd_sched_entity_clear_dep))
return true;
/* Ignore it when it is already scheduled */
- fence_put(fence);
+ dma_fence_put(fence);
return false;
}
- if (!fence_add_callback(entity->dependency, &entity->cb,
- amd_sched_entity_wakeup))
+ if (!dma_fence_add_callback(entity->dependency, &entity->cb,
+ amd_sched_entity_wakeup))
return true;
- fence_put(entity->dependency);
+ dma_fence_put(entity->dependency);
return false;
}
@@ -354,7 +351,8 @@ static void amd_sched_job_finish(struct work_struct *work)
sched->ops->free_job(s_job);
}
-static void amd_sched_job_finish_cb(struct fence *f, struct fence_cb *cb)
+static void amd_sched_job_finish_cb(struct dma_fence *f,
+ struct dma_fence_cb *cb)
{
struct amd_sched_job *job = container_of(cb, struct amd_sched_job,
finish_cb);
@@ -388,8 +386,8 @@ void amd_sched_hw_job_reset(struct amd_gpu_scheduler *sched)
spin_lock(&sched->job_list_lock);
list_for_each_entry_reverse(s_job, &sched->ring_mirror_list, node) {
- if (fence_remove_callback(s_job->s_fence->parent, &s_job->s_fence->cb)) {
- fence_put(s_job->s_fence->parent);
+ if (dma_fence_remove_callback(s_job->s_fence->parent, &s_job->s_fence->cb)) {
+ dma_fence_put(s_job->s_fence->parent);
s_job->s_fence->parent = NULL;
}
}
@@ -410,21 +408,21 @@ void amd_sched_job_recovery(struct amd_gpu_scheduler *sched)
list_for_each_entry_safe(s_job, tmp, &sched->ring_mirror_list, node) {
struct amd_sched_fence *s_fence = s_job->s_fence;
- struct fence *fence;
+ struct dma_fence *fence;
spin_unlock(&sched->job_list_lock);
fence = sched->ops->run_job(s_job);
atomic_inc(&sched->hw_rq_count);
if (fence) {
- s_fence->parent = fence_get(fence);
- r = fence_add_callback(fence, &s_fence->cb,
- amd_sched_process_job);
+ s_fence->parent = dma_fence_get(fence);
+ r = dma_fence_add_callback(fence, &s_fence->cb,
+ amd_sched_process_job);
if (r == -ENOENT)
amd_sched_process_job(fence, &s_fence->cb);
else if (r)
DRM_ERROR("fence add callback failed (%d)\n",
r);
- fence_put(fence);
+ dma_fence_put(fence);
} else {
DRM_ERROR("Failed to run job!\n");
amd_sched_process_job(NULL, &s_fence->cb);
@@ -446,8 +444,8 @@ void amd_sched_entity_push_job(struct amd_sched_job *sched_job)
struct amd_sched_entity *entity = sched_job->s_entity;
trace_amd_sched_job(sched_job);
- fence_add_callback(&sched_job->s_fence->finished, &sched_job->finish_cb,
- amd_sched_job_finish_cb);
+ dma_fence_add_callback(&sched_job->s_fence->finished, &sched_job->finish_cb,
+ amd_sched_job_finish_cb);
wait_event(entity->sched->job_scheduled,
amd_sched_entity_in(sched_job));
}
@@ -511,7 +509,7 @@ amd_sched_select_entity(struct amd_gpu_scheduler *sched)
return entity;
}
-static void amd_sched_process_job(struct fence *f, struct fence_cb *cb)
+static void amd_sched_process_job(struct dma_fence *f, struct dma_fence_cb *cb)
{
struct amd_sched_fence *s_fence =
container_of(cb, struct amd_sched_fence, cb);
@@ -521,7 +519,7 @@ static void amd_sched_process_job(struct fence *f, struct fence_cb *cb)
amd_sched_fence_finished(s_fence);
trace_amd_sched_process_job(s_fence);
- fence_put(&s_fence->finished);
+ dma_fence_put(&s_fence->finished);
wake_up_interruptible(&sched->wake_up_worker);
}
@@ -547,7 +545,7 @@ static int amd_sched_main(void *param)
struct amd_sched_entity *entity = NULL;
struct amd_sched_fence *s_fence;
struct amd_sched_job *sched_job;
- struct fence *fence;
+ struct dma_fence *fence;
wait_event_interruptible(sched->wake_up_worker,
(!amd_sched_blocked(sched) &&
@@ -569,15 +567,15 @@ static int amd_sched_main(void *param)
fence = sched->ops->run_job(sched_job);
amd_sched_fence_scheduled(s_fence);
if (fence) {
- s_fence->parent = fence_get(fence);
- r = fence_add_callback(fence, &s_fence->cb,
- amd_sched_process_job);
+ s_fence->parent = dma_fence_get(fence);
+ r = dma_fence_add_callback(fence, &s_fence->cb,
+ amd_sched_process_job);
if (r == -ENOENT)
amd_sched_process_job(fence, &s_fence->cb);
else if (r)
DRM_ERROR("fence add callback failed (%d)\n",
r);
- fence_put(fence);
+ dma_fence_put(fence);
} else {
DRM_ERROR("Failed to run job!\n");
amd_sched_process_job(NULL, &s_fence->cb);
@@ -618,13 +616,6 @@ int amd_sched_init(struct amd_gpu_scheduler *sched,
INIT_LIST_HEAD(&sched->ring_mirror_list);
spin_lock_init(&sched->job_list_lock);
atomic_set(&sched->hw_rq_count, 0);
- if (atomic_inc_return(&sched_fence_slab_ref) == 1) {
- sched_fence_slab = kmem_cache_create(
- "amd_sched_fence", sizeof(struct amd_sched_fence), 0,
- SLAB_HWCACHE_ALIGN, NULL);
- if (!sched_fence_slab)
- return -ENOMEM;
- }
/* Each scheduler will run on a seperate kernel thread */
sched->thread = kthread_run(amd_sched_main, sched, sched->name);
@@ -645,6 +636,4 @@ void amd_sched_fini(struct amd_gpu_scheduler *sched)
{
if (sched->thread)
kthread_stop(sched->thread);
- if (atomic_dec_and_test(&sched_fence_slab_ref))
- kmem_cache_destroy(sched_fence_slab);
}
diff --git a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h
index 7cbbbfb502ef..d8dc681bcda6 100644
--- a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h
+++ b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h
@@ -25,14 +25,11 @@
#define _GPU_SCHEDULER_H_
#include <linux/kfifo.h>
-#include <linux/fence.h>
+#include <linux/dma-fence.h>
struct amd_gpu_scheduler;
struct amd_sched_rq;
-extern struct kmem_cache *sched_fence_slab;
-extern atomic_t sched_fence_slab_ref;
-
/**
* A scheduler entity is a wrapper around a job queue or a group
* of other entities. Entities take turns emitting jobs from their
@@ -50,8 +47,8 @@ struct amd_sched_entity {
atomic_t fence_seq;
uint64_t fence_context;
- struct fence *dependency;
- struct fence_cb cb;
+ struct dma_fence *dependency;
+ struct dma_fence_cb cb;
};
/**
@@ -66,10 +63,10 @@ struct amd_sched_rq {
};
struct amd_sched_fence {
- struct fence scheduled;
- struct fence finished;
- struct fence_cb cb;
- struct fence *parent;
+ struct dma_fence scheduled;
+ struct dma_fence finished;
+ struct dma_fence_cb cb;
+ struct dma_fence *parent;
struct amd_gpu_scheduler *sched;
spinlock_t lock;
void *owner;
@@ -79,15 +76,15 @@ struct amd_sched_job {
struct amd_gpu_scheduler *sched;
struct amd_sched_entity *s_entity;
struct amd_sched_fence *s_fence;
- struct fence_cb finish_cb;
+ struct dma_fence_cb finish_cb;
struct work_struct finish_work;
struct list_head node;
struct delayed_work work_tdr;
};
-extern const struct fence_ops amd_sched_fence_ops_scheduled;
-extern const struct fence_ops amd_sched_fence_ops_finished;
-static inline struct amd_sched_fence *to_amd_sched_fence(struct fence *f)
+extern const struct dma_fence_ops amd_sched_fence_ops_scheduled;
+extern const struct dma_fence_ops amd_sched_fence_ops_finished;
+static inline struct amd_sched_fence *to_amd_sched_fence(struct dma_fence *f)
{
if (f->ops == &amd_sched_fence_ops_scheduled)
return container_of(f, struct amd_sched_fence, scheduled);
@@ -103,8 +100,8 @@ static inline struct amd_sched_fence *to_amd_sched_fence(struct fence *f)
* these functions should be implemented in driver side
*/
struct amd_sched_backend_ops {
- struct fence *(*dependency)(struct amd_sched_job *sched_job);
- struct fence *(*run_job)(struct amd_sched_job *sched_job);
+ struct dma_fence *(*dependency)(struct amd_sched_job *sched_job);
+ struct dma_fence *(*run_job)(struct amd_sched_job *sched_job);
void (*timedout_job)(struct amd_sched_job *sched_job);
void (*free_job)(struct amd_sched_job *sched_job);
};
@@ -145,6 +142,9 @@ void amd_sched_entity_fini(struct amd_gpu_scheduler *sched,
struct amd_sched_entity *entity);
void amd_sched_entity_push_job(struct amd_sched_job *sched_job);
+int amd_sched_fence_slab_init(void);
+void amd_sched_fence_slab_fini(void);
+
struct amd_sched_fence *amd_sched_fence_create(
struct amd_sched_entity *s_entity, void *owner);
void amd_sched_fence_scheduled(struct amd_sched_fence *fence);
diff --git a/drivers/gpu/drm/amd/scheduler/sched_fence.c b/drivers/gpu/drm/amd/scheduler/sched_fence.c
index 6b63beaf7574..33f54d0a5c4f 100644
--- a/drivers/gpu/drm/amd/scheduler/sched_fence.c
+++ b/drivers/gpu/drm/amd/scheduler/sched_fence.c
@@ -27,6 +27,25 @@
#include <drm/drmP.h>
#include "gpu_scheduler.h"
+static struct kmem_cache *sched_fence_slab;
+
+int amd_sched_fence_slab_init(void)
+{
+ sched_fence_slab = kmem_cache_create(
+ "amd_sched_fence", sizeof(struct amd_sched_fence), 0,
+ SLAB_HWCACHE_ALIGN, NULL);
+ if (!sched_fence_slab)
+ return -ENOMEM;
+
+ return 0;
+}
+
+void amd_sched_fence_slab_fini(void)
+{
+ rcu_barrier();
+ kmem_cache_destroy(sched_fence_slab);
+}
+
struct amd_sched_fence *amd_sched_fence_create(struct amd_sched_entity *entity,
void *owner)
{
@@ -42,46 +61,50 @@ struct amd_sched_fence *amd_sched_fence_create(struct amd_sched_entity *entity,
spin_lock_init(&fence->lock);
seq = atomic_inc_return(&entity->fence_seq);
- fence_init(&fence->scheduled, &amd_sched_fence_ops_scheduled,
- &fence->lock, entity->fence_context, seq);
- fence_init(&fence->finished, &amd_sched_fence_ops_finished,
- &fence->lock, entity->fence_context + 1, seq);
+ dma_fence_init(&fence->scheduled, &amd_sched_fence_ops_scheduled,
+ &fence->lock, entity->fence_context, seq);
+ dma_fence_init(&fence->finished, &amd_sched_fence_ops_finished,
+ &fence->lock, entity->fence_context + 1, seq);
return fence;
}
void amd_sched_fence_scheduled(struct amd_sched_fence *fence)
{
- int ret = fence_signal(&fence->scheduled);
+ int ret = dma_fence_signal(&fence->scheduled);
if (!ret)
- FENCE_TRACE(&fence->scheduled, "signaled from irq context\n");
+ DMA_FENCE_TRACE(&fence->scheduled,
+ "signaled from irq context\n");
else
- FENCE_TRACE(&fence->scheduled, "was already signaled\n");
+ DMA_FENCE_TRACE(&fence->scheduled,
+ "was already signaled\n");
}
void amd_sched_fence_finished(struct amd_sched_fence *fence)
{
- int ret = fence_signal(&fence->finished);
+ int ret = dma_fence_signal(&fence->finished);
if (!ret)
- FENCE_TRACE(&fence->finished, "signaled from irq context\n");
+ DMA_FENCE_TRACE(&fence->finished,
+ "signaled from irq context\n");
else
- FENCE_TRACE(&fence->finished, "was already signaled\n");
+ DMA_FENCE_TRACE(&fence->finished,
+ "was already signaled\n");
}
-static const char *amd_sched_fence_get_driver_name(struct fence *fence)
+static const char *amd_sched_fence_get_driver_name(struct dma_fence *fence)
{
return "amd_sched";
}
-static const char *amd_sched_fence_get_timeline_name(struct fence *f)
+static const char *amd_sched_fence_get_timeline_name(struct dma_fence *f)
{
struct amd_sched_fence *fence = to_amd_sched_fence(f);
return (const char *)fence->sched->name;
}
-static bool amd_sched_fence_enable_signaling(struct fence *f)
+static bool amd_sched_fence_enable_signaling(struct dma_fence *f)
{
return true;
}
@@ -95,22 +118,22 @@ static bool amd_sched_fence_enable_signaling(struct fence *f)
*/
static void amd_sched_fence_free(struct rcu_head *rcu)
{
- struct fence *f = container_of(rcu, struct fence, rcu);
+ struct dma_fence *f = container_of(rcu, struct dma_fence, rcu);
struct amd_sched_fence *fence = to_amd_sched_fence(f);
- fence_put(fence->parent);
+ dma_fence_put(fence->parent);
kmem_cache_free(sched_fence_slab, fence);
}
/**
- * amd_sched_fence_release - callback that fence can be freed
+ * amd_sched_fence_release_scheduled - callback that fence can be freed
*
* @fence: fence
*
* This function is called when the reference count becomes zero.
* It just RCU schedules freeing up the fence.
*/
-static void amd_sched_fence_release_scheduled(struct fence *f)
+static void amd_sched_fence_release_scheduled(struct dma_fence *f)
{
struct amd_sched_fence *fence = to_amd_sched_fence(f);
@@ -118,33 +141,33 @@ static void amd_sched_fence_release_scheduled(struct fence *f)
}
/**
- * amd_sched_fence_release_scheduled - drop extra reference
+ * amd_sched_fence_release_finished - drop extra reference
*
* @f: fence
*
* Drop the extra reference from the scheduled fence to the base fence.
*/
-static void amd_sched_fence_release_finished(struct fence *f)
+static void amd_sched_fence_release_finished(struct dma_fence *f)
{
struct amd_sched_fence *fence = to_amd_sched_fence(f);
- fence_put(&fence->scheduled);
+ dma_fence_put(&fence->scheduled);
}
-const struct fence_ops amd_sched_fence_ops_scheduled = {
+const struct dma_fence_ops amd_sched_fence_ops_scheduled = {
.get_driver_name = amd_sched_fence_get_driver_name,
.get_timeline_name = amd_sched_fence_get_timeline_name,
.enable_signaling = amd_sched_fence_enable_signaling,
.signaled = NULL,
- .wait = fence_default_wait,
+ .wait = dma_fence_default_wait,
.release = amd_sched_fence_release_scheduled,
};
-const struct fence_ops amd_sched_fence_ops_finished = {
+const struct dma_fence_ops amd_sched_fence_ops_finished = {
.get_driver_name = amd_sched_fence_get_driver_name,
.get_timeline_name = amd_sched_fence_get_timeline_name,
.enable_signaling = amd_sched_fence_enable_signaling,
.signaled = NULL,
- .wait = fence_default_wait,
+ .wait = dma_fence_default_wait,
.release = amd_sched_fence_release_finished,
};
diff --git a/drivers/gpu/drm/arc/arcpgu_drv.c b/drivers/gpu/drm/arc/arcpgu_drv.c
index 28e6471257d0..0b6eaa49a1db 100644
--- a/drivers/gpu/drm/arc/arcpgu_drv.c
+++ b/drivers/gpu/drm/arc/arcpgu_drv.c
@@ -65,9 +65,7 @@ static const struct file_operations arcpgu_drm_ops = {
.open = drm_open,
.release = drm_release,
.unlocked_ioctl = drm_ioctl,
-#ifdef CONFIG_COMPAT
.compat_ioctl = drm_compat_ioctl,
-#endif
.poll = drm_poll,
.read = drm_read,
.llseek = no_llseek,
diff --git a/drivers/gpu/drm/arc/arcpgu_hdmi.c b/drivers/gpu/drm/arc/arcpgu_hdmi.c
index b7a8b2ac4055..b69c66b4897e 100644
--- a/drivers/gpu/drm/arc/arcpgu_hdmi.c
+++ b/drivers/gpu/drm/arc/arcpgu_hdmi.c
@@ -14,170 +14,45 @@
*
*/
-#include <drm/drm_crtc_helper.h>
+#include <drm/drm_crtc.h>
#include <drm/drm_encoder_slave.h>
-#include <drm/drm_atomic_helper.h>
#include "arcpgu.h"
-struct arcpgu_drm_connector {
- struct drm_connector connector;
- struct drm_encoder_slave *encoder_slave;
-};
-
-static int arcpgu_drm_connector_get_modes(struct drm_connector *connector)
-{
- const struct drm_encoder_slave_funcs *sfuncs;
- struct drm_encoder_slave *slave;
- struct arcpgu_drm_connector *con =
- container_of(connector, struct arcpgu_drm_connector, connector);
-
- slave = con->encoder_slave;
- if (slave == NULL) {
- dev_err(connector->dev->dev,
- "connector_get_modes: cannot find slave encoder for connector\n");
- return 0;
- }
-
- sfuncs = slave->slave_funcs;
- if (sfuncs->get_modes == NULL)
- return 0;
-
- return sfuncs->get_modes(&slave->base, connector);
-}
-
-static enum drm_connector_status
-arcpgu_drm_connector_detect(struct drm_connector *connector, bool force)
-{
- enum drm_connector_status status = connector_status_unknown;
- const struct drm_encoder_slave_funcs *sfuncs;
- struct drm_encoder_slave *slave;
-
- struct arcpgu_drm_connector *con =
- container_of(connector, struct arcpgu_drm_connector, connector);
-
- slave = con->encoder_slave;
- if (slave == NULL) {
- dev_err(connector->dev->dev,
- "connector_detect: cannot find slave encoder for connector\n");
- return status;
- }
-
- sfuncs = slave->slave_funcs;
- if (sfuncs && sfuncs->detect)
- return sfuncs->detect(&slave->base, connector);
-
- dev_err(connector->dev->dev, "connector_detect: could not detect slave funcs\n");
- return status;
-}
-
-static void arcpgu_drm_connector_destroy(struct drm_connector *connector)
-{
- drm_connector_unregister(connector);
- drm_connector_cleanup(connector);
-}
-
-static const struct drm_connector_helper_funcs
-arcpgu_drm_connector_helper_funcs = {
- .get_modes = arcpgu_drm_connector_get_modes,
-};
-
-static const struct drm_connector_funcs arcpgu_drm_connector_funcs = {
- .dpms = drm_helper_connector_dpms,
- .reset = drm_atomic_helper_connector_reset,
- .detect = arcpgu_drm_connector_detect,
- .fill_modes = drm_helper_probe_single_connector_modes,
- .destroy = arcpgu_drm_connector_destroy,
- .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
- .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
-};
-
-static struct drm_encoder_helper_funcs arcpgu_drm_encoder_helper_funcs = {
- .dpms = drm_i2c_encoder_dpms,
- .mode_fixup = drm_i2c_encoder_mode_fixup,
- .mode_set = drm_i2c_encoder_mode_set,
- .prepare = drm_i2c_encoder_prepare,
- .commit = drm_i2c_encoder_commit,
- .detect = drm_i2c_encoder_detect,
-};
-
static struct drm_encoder_funcs arcpgu_drm_encoder_funcs = {
.destroy = drm_encoder_cleanup,
};
int arcpgu_drm_hdmi_init(struct drm_device *drm, struct device_node *np)
{
- struct arcpgu_drm_connector *arcpgu_connector;
- struct drm_i2c_encoder_driver *driver;
- struct drm_encoder_slave *encoder;
- struct drm_connector *connector;
- struct i2c_client *i2c_slave;
- int ret;
+ struct drm_encoder *encoder;
+ struct drm_bridge *bridge;
+
+ int ret = 0;
encoder = devm_kzalloc(drm->dev, sizeof(*encoder), GFP_KERNEL);
if (encoder == NULL)
return -ENOMEM;
- i2c_slave = of_find_i2c_device_by_node(np);
- if (!i2c_slave || !i2c_get_clientdata(i2c_slave)) {
- dev_err(drm->dev, "failed to find i2c slave encoder\n");
- return -EPROBE_DEFER;
- }
-
- if (i2c_slave->dev.driver == NULL) {
- dev_err(drm->dev, "failed to find i2c slave driver\n");
+ /* Locate drm bridge from the hdmi encoder DT node */
+ bridge = of_drm_find_bridge(np);
+ if (!bridge)
return -EPROBE_DEFER;
- }
- driver =
- to_drm_i2c_encoder_driver(to_i2c_driver(i2c_slave->dev.driver));
- ret = driver->encoder_init(i2c_slave, drm, encoder);
- if (ret) {
- dev_err(drm->dev, "failed to initialize i2c encoder slave\n");
- return ret;
- }
-
- encoder->base.possible_crtcs = 1;
- encoder->base.possible_clones = 0;
- ret = drm_encoder_init(drm, &encoder->base, &arcpgu_drm_encoder_funcs,
+ encoder->possible_crtcs = 1;
+ encoder->possible_clones = 0;
+ ret = drm_encoder_init(drm, encoder, &arcpgu_drm_encoder_funcs,
DRM_MODE_ENCODER_TMDS, NULL);
if (ret)
return ret;
- drm_encoder_helper_add(&encoder->base,
- &arcpgu_drm_encoder_helper_funcs);
-
- arcpgu_connector = devm_kzalloc(drm->dev, sizeof(*arcpgu_connector),
- GFP_KERNEL);
- if (!arcpgu_connector) {
- ret = -ENOMEM;
- goto error_encoder_cleanup;
- }
-
- connector = &arcpgu_connector->connector;
- drm_connector_helper_add(connector, &arcpgu_drm_connector_helper_funcs);
- ret = drm_connector_init(drm, connector, &arcpgu_drm_connector_funcs,
- DRM_MODE_CONNECTOR_HDMIA);
- if (ret < 0) {
- dev_err(drm->dev, "failed to initialize drm connector\n");
- goto error_encoder_cleanup;
- }
+ /* Link drm_bridge to encoder */
+ bridge->encoder = encoder;
+ encoder->bridge = bridge;
- ret = drm_mode_connector_attach_encoder(connector, &encoder->base);
- if (ret < 0) {
- dev_err(drm->dev, "could not attach connector to encoder\n");
- drm_connector_unregister(connector);
- goto error_connector_cleanup;
- }
-
- arcpgu_connector->encoder_slave = encoder;
-
- return 0;
-
-error_connector_cleanup:
- drm_connector_cleanup(connector);
+ ret = drm_bridge_attach(drm, bridge);
+ if (ret)
+ drm_encoder_cleanup(encoder);
-error_encoder_cleanup:
- drm_encoder_cleanup(&encoder->base);
return ret;
}
diff --git a/drivers/gpu/drm/arm/hdlcd_crtc.c b/drivers/gpu/drm/arm/hdlcd_crtc.c
index 48019ae22ddb..7d4e5aa77195 100644
--- a/drivers/gpu/drm/arm/hdlcd_crtc.c
+++ b/drivers/gpu/drm/arm/hdlcd_crtc.c
@@ -150,15 +150,14 @@ static void hdlcd_crtc_enable(struct drm_crtc *crtc)
clk_prepare_enable(hdlcd->clk);
hdlcd_crtc_mode_set_nofb(crtc);
hdlcd_write(hdlcd, HDLCD_REG_COMMAND, 1);
+ drm_crtc_vblank_on(crtc);
}
static void hdlcd_crtc_disable(struct drm_crtc *crtc)
{
struct hdlcd_drm_private *hdlcd = crtc_to_hdlcd_priv(crtc);
- if (!crtc->state->active)
- return;
-
+ drm_crtc_vblank_off(crtc);
hdlcd_write(hdlcd, HDLCD_REG_COMMAND, 0);
clk_disable_unprepare(hdlcd->clk);
}
@@ -223,14 +222,12 @@ static void hdlcd_plane_atomic_update(struct drm_plane *plane,
{
struct hdlcd_drm_private *hdlcd;
struct drm_gem_cma_object *gem;
- unsigned int depth, bpp;
u32 src_w, src_h, dest_w, dest_h;
dma_addr_t scanout_start;
if (!plane->state->fb)
return;
- drm_fb_get_bpp_depth(plane->state->fb->pixel_format, &depth, &bpp);
src_w = plane->state->src_w >> 16;
src_h = plane->state->src_h >> 16;
dest_w = plane->state->crtc_w;
@@ -238,7 +235,8 @@ static void hdlcd_plane_atomic_update(struct drm_plane *plane,
gem = drm_fb_cma_get_gem_obj(plane->state->fb, 0);
scanout_start = gem->paddr + plane->state->fb->offsets[0] +
plane->state->crtc_y * plane->state->fb->pitches[0] +
- plane->state->crtc_x * bpp / 8;
+ plane->state->crtc_x *
+ drm_format_plane_cpp(plane->state->fb->pixel_format, 0);
hdlcd = plane->dev->dev_private;
hdlcd_write(hdlcd, HDLCD_REG_FB_LINE_LENGTH, plane->state->fb->pitches[0]);
diff --git a/drivers/gpu/drm/arm/hdlcd_drv.c b/drivers/gpu/drm/arm/hdlcd_drv.c
index fb6a418ce6be..e5f4f4a6546d 100644
--- a/drivers/gpu/drm/arm/hdlcd_drv.c
+++ b/drivers/gpu/drm/arm/hdlcd_drv.c
@@ -268,9 +268,7 @@ static const struct file_operations fops = {
.open = drm_open,
.release = drm_release,
.unlocked_ioctl = drm_ioctl,
-#ifdef CONFIG_COMPAT
.compat_ioctl = drm_compat_ioctl,
-#endif
.poll = drm_poll,
.read = drm_read,
.llseek = noop_llseek,
@@ -337,14 +335,10 @@ static int hdlcd_drm_bind(struct device *dev)
if (ret)
goto err_free;
- ret = drm_dev_register(drm, 0);
- if (ret)
- goto err_unload;
-
ret = component_bind_all(dev, drm);
if (ret) {
DRM_ERROR("Failed to bind all components\n");
- goto err_unregister;
+ goto err_unload;
}
ret = pm_runtime_set_active(dev);
@@ -371,22 +365,29 @@ static int hdlcd_drm_bind(struct device *dev)
goto err_fbdev;
}
+ ret = drm_dev_register(drm, 0);
+ if (ret)
+ goto err_register;
+
return 0;
+err_register:
+ if (hdlcd->fbdev) {
+ drm_fbdev_cma_fini(hdlcd->fbdev);
+ hdlcd->fbdev = NULL;
+ }
err_fbdev:
drm_kms_helper_poll_fini(drm);
- drm_mode_config_cleanup(drm);
drm_vblank_cleanup(drm);
err_vblank:
pm_runtime_disable(drm->dev);
err_pm_active:
component_unbind_all(dev, drm);
-err_unregister:
- drm_dev_unregister(drm);
err_unload:
drm_irq_uninstall(drm);
of_reserved_mem_device_release(drm->dev);
err_free:
+ drm_mode_config_cleanup(drm);
dev_set_drvdata(dev, NULL);
drm_dev_unref(drm);
@@ -398,6 +399,7 @@ static void hdlcd_drm_unbind(struct device *dev)
struct drm_device *drm = dev_get_drvdata(dev);
struct hdlcd_drm_private *hdlcd = drm->dev_private;
+ drm_dev_unregister(drm);
if (hdlcd->fbdev) {
drm_fbdev_cma_fini(hdlcd->fbdev);
hdlcd->fbdev = NULL;
@@ -411,7 +413,6 @@ static void hdlcd_drm_unbind(struct device *dev)
pm_runtime_disable(drm->dev);
of_reserved_mem_device_release(drm->dev);
drm_mode_config_cleanup(drm);
- drm_dev_unregister(drm);
drm_dev_unref(drm);
drm->dev_private = NULL;
dev_set_drvdata(dev, NULL);
@@ -453,7 +454,8 @@ static int hdlcd_probe(struct platform_device *pdev)
return -EAGAIN;
}
- component_match_add(&pdev->dev, &match, compare_dev, port);
+ drm_of_component_match_add(&pdev->dev, &match, compare_dev, port);
+ of_node_put(port);
return component_master_add_with_match(&pdev->dev, &hdlcd_master_ops,
match);
diff --git a/drivers/gpu/drm/arm/malidp_drv.c b/drivers/gpu/drm/arm/malidp_drv.c
index 9280358b8f15..32f746e31379 100644
--- a/drivers/gpu/drm/arm/malidp_drv.c
+++ b/drivers/gpu/drm/arm/malidp_drv.c
@@ -42,6 +42,7 @@ static int malidp_set_and_wait_config_valid(struct drm_device *drm)
struct malidp_hw_device *hwdev = malidp->dev;
int ret;
+ atomic_set(&malidp->config_valid, 0);
hwdev->set_config_valid(hwdev);
/* don't wait for config_valid flag if we are in config mode */
if (hwdev->in_config_mode(hwdev))
@@ -91,8 +92,7 @@ static void malidp_atomic_commit_tail(struct drm_atomic_state *state)
drm_atomic_helper_commit_modeset_disables(drm, state);
drm_atomic_helper_commit_modeset_enables(drm, state);
- drm_atomic_helper_commit_planes(drm, state,
- DRM_PLANE_COMMIT_ACTIVE_ONLY);
+ drm_atomic_helper_commit_planes(drm, state, 0);
malidp_atomic_commit_hw_done(state);
@@ -155,6 +155,12 @@ static int malidp_init(struct drm_device *drm)
return 0;
}
+static void malidp_fini(struct drm_device *drm)
+{
+ malidp_de_planes_destroy(drm);
+ drm_mode_config_cleanup(drm);
+}
+
static int malidp_irq_init(struct platform_device *pdev)
{
int irq_de, irq_se, ret = 0;
@@ -197,9 +203,7 @@ static const struct file_operations fops = {
.open = drm_open,
.release = drm_release,
.unlocked_ioctl = drm_ioctl,
-#ifdef CONFIG_COMPAT
.compat_ioctl = drm_compat_ioctl,
-#endif
.poll = drm_poll,
.read = drm_read,
.llseek = noop_llseek,
@@ -355,10 +359,6 @@ static int malidp_bind(struct device *dev)
if (ret < 0)
goto init_fail;
- ret = drm_dev_register(drm, 0);
- if (ret)
- goto register_fail;
-
/* Set the CRTC's port so that the encoder component can find it */
ep = of_graph_get_next_endpoint(dev->of_node, NULL);
if (!ep) {
@@ -377,6 +377,8 @@ static int malidp_bind(struct device *dev)
if (ret < 0)
goto irq_init_fail;
+ drm->irq_enabled = true;
+
ret = drm_vblank_init(drm, drm->mode_config.num_crtc);
if (ret < 0) {
DRM_ERROR("failed to initialise vblank\n");
@@ -395,23 +397,31 @@ static int malidp_bind(struct device *dev)
}
drm_kms_helper_poll_init(drm);
+
+ ret = drm_dev_register(drm, 0);
+ if (ret)
+ goto register_fail;
+
return 0;
+register_fail:
+ if (malidp->fbdev) {
+ drm_fbdev_cma_fini(malidp->fbdev);
+ malidp->fbdev = NULL;
+ }
fbdev_fail:
drm_vblank_cleanup(drm);
vblank_fail:
malidp_se_irq_fini(drm);
malidp_de_irq_fini(drm);
+ drm->irq_enabled = false;
irq_init_fail:
component_unbind_all(dev, drm);
bind_fail:
of_node_put(malidp->crtc.port);
malidp->crtc.port = NULL;
port_fail:
- drm_dev_unregister(drm);
-register_fail:
- malidp_de_planes_destroy(drm);
- drm_mode_config_cleanup(drm);
+ malidp_fini(drm);
init_fail:
drm->dev_private = NULL;
dev_set_drvdata(dev, NULL);
@@ -432,6 +442,7 @@ static void malidp_unbind(struct device *dev)
struct malidp_drm *malidp = drm->dev_private;
struct malidp_hw_device *hwdev = malidp->dev;
+ drm_dev_unregister(drm);
if (malidp->fbdev) {
drm_fbdev_cma_fini(malidp->fbdev);
malidp->fbdev = NULL;
@@ -443,9 +454,7 @@ static void malidp_unbind(struct device *dev)
component_unbind_all(dev, drm);
of_node_put(malidp->crtc.port);
malidp->crtc.port = NULL;
- drm_dev_unregister(drm);
- malidp_de_planes_destroy(drm);
- drm_mode_config_cleanup(drm);
+ malidp_fini(drm);
drm->dev_private = NULL;
dev_set_drvdata(dev, NULL);
clk_disable_unprepare(hwdev->mclk);
@@ -493,7 +502,9 @@ static int malidp_platform_probe(struct platform_device *pdev)
return -EAGAIN;
}
- component_match_add(&pdev->dev, &match, malidp_compare_dev, port);
+ drm_of_component_match_add(&pdev->dev, &match, malidp_compare_dev,
+ port);
+ of_node_put(port);
return component_master_add_with_match(&pdev->dev, &malidp_master_ops,
match);
}
diff --git a/drivers/gpu/drm/arm/malidp_drv.h b/drivers/gpu/drm/arm/malidp_drv.h
index 271d2fb9711c..9fc8a2e405e4 100644
--- a/drivers/gpu/drm/arm/malidp_drv.h
+++ b/drivers/gpu/drm/arm/malidp_drv.h
@@ -39,6 +39,9 @@ struct malidp_plane_state {
/* size of the required rotation memory if plane is rotated */
u32 rotmem_size;
+ /* internal format ID */
+ u8 format;
+ u8 n_planes;
};
#define to_malidp_plane(x) container_of(x, struct malidp_plane, base)
diff --git a/drivers/gpu/drm/arm/malidp_hw.c b/drivers/gpu/drm/arm/malidp_hw.c
index a6132f1d58c1..4bdf531f7844 100644
--- a/drivers/gpu/drm/arm/malidp_hw.c
+++ b/drivers/gpu/drm/arm/malidp_hw.c
@@ -125,6 +125,7 @@ static void malidp500_leave_config_mode(struct malidp_hw_device *hwdev)
{
u32 status, count = 100;
+ malidp_hw_clearbits(hwdev, MALIDP_CFG_VALID, MALIDP500_CONFIG_VALID);
malidp_hw_clearbits(hwdev, MALIDP500_DC_CONFIG_REQ, MALIDP500_DC_CONTROL);
while (count) {
status = malidp_hw_read(hwdev, hwdev->map.dc_base + MALIDP_REG_STATUS);
@@ -198,9 +199,6 @@ static void malidp500_modeset(struct malidp_hw_device *hwdev, struct videomode *
static int malidp500_rotmem_required(struct malidp_hw_device *hwdev, u16 w, u16 h, u32 fmt)
{
- unsigned int depth;
- int bpp;
-
/* RGB888 or BGR888 can't be rotated */
if ((fmt == DRM_FORMAT_RGB888) || (fmt == DRM_FORMAT_BGR888))
return -EINVAL;
@@ -210,9 +208,7 @@ static int malidp500_rotmem_required(struct malidp_hw_device *hwdev, u16 w, u16
* worth of pixel data. Required size is then:
* size = rotated_width * (bpp / 8) * 8;
*/
- drm_fb_get_bpp_depth(fmt, &depth, &bpp);
-
- return w * bpp;
+ return w * drm_format_plane_cpp(fmt, 0) * 8;
}
static int malidp550_query_hw(struct malidp_hw_device *hwdev)
@@ -271,6 +267,7 @@ static void malidp550_leave_config_mode(struct malidp_hw_device *hwdev)
{
u32 status, count = 100;
+ malidp_hw_clearbits(hwdev, MALIDP_CFG_VALID, MALIDP550_CONFIG_VALID);
malidp_hw_clearbits(hwdev, MALIDP550_DC_CONFIG_REQ, MALIDP550_DC_CONTROL);
while (count) {
status = malidp_hw_read(hwdev, hwdev->map.dc_base + MALIDP_REG_STATUS);
@@ -441,6 +438,7 @@ const struct malidp_hw_device malidp_device[MALIDP_MAX_DEVICES] = {
},
.input_formats = malidp500_de_formats,
.n_input_formats = ARRAY_SIZE(malidp500_de_formats),
+ .bus_align_bytes = 8,
},
.query_hw = malidp500_query_hw,
.enter_config_mode = malidp500_enter_config_mode,
@@ -473,6 +471,7 @@ const struct malidp_hw_device malidp_device[MALIDP_MAX_DEVICES] = {
},
.input_formats = malidp550_de_formats,
.n_input_formats = ARRAY_SIZE(malidp550_de_formats),
+ .bus_align_bytes = 8,
},
.query_hw = malidp550_query_hw,
.enter_config_mode = malidp550_enter_config_mode,
@@ -506,6 +505,7 @@ const struct malidp_hw_device malidp_device[MALIDP_MAX_DEVICES] = {
},
.input_formats = malidp550_de_formats,
.n_input_formats = ARRAY_SIZE(malidp550_de_formats),
+ .bus_align_bytes = 16,
},
.query_hw = malidp650_query_hw,
.enter_config_mode = malidp550_enter_config_mode,
diff --git a/drivers/gpu/drm/arm/malidp_hw.h b/drivers/gpu/drm/arm/malidp_hw.h
index 141743e9f3a6..087e1202db3d 100644
--- a/drivers/gpu/drm/arm/malidp_hw.h
+++ b/drivers/gpu/drm/arm/malidp_hw.h
@@ -88,6 +88,9 @@ struct malidp_hw_regmap {
/* list of supported input formats for each layer */
const struct malidp_input_format *input_formats;
const u8 n_input_formats;
+
+ /* pitch alignment requirement in bytes */
+ const u8 bus_align_bytes;
};
struct malidp_hw_device {
@@ -229,6 +232,12 @@ void malidp_se_irq_fini(struct drm_device *drm);
u8 malidp_hw_get_format_id(const struct malidp_hw_regmap *map,
u8 layer_id, u32 format);
+static inline bool malidp_hw_pitch_valid(struct malidp_hw_device *hwdev,
+ unsigned int pitch)
+{
+ return !(pitch & (hwdev->map.bus_align_bytes - 1));
+}
+
/*
* background color components are defined as 12bits values,
* they will be shifted right when stored on hardware that
diff --git a/drivers/gpu/drm/arm/malidp_planes.c b/drivers/gpu/drm/arm/malidp_planes.c
index 82c193e5e0d6..63eec8f37cfc 100644
--- a/drivers/gpu/drm/arm/malidp_planes.c
+++ b/drivers/gpu/drm/arm/malidp_planes.c
@@ -27,6 +27,10 @@
#define LAYER_H_FLIP (1 << 10)
#define LAYER_V_FLIP (1 << 11)
#define LAYER_ROT_MASK (0xf << 8)
+#define LAYER_COMP_MASK (0x3 << 12)
+#define LAYER_COMP_PIXEL (0x3 << 12)
+#define LAYER_COMP_PLANE (0x2 << 12)
+#define MALIDP_LAYER_COMPOSE 0x008
#define MALIDP_LAYER_SIZE 0x00c
#define LAYER_H_VAL(x) (((x) & 0x1fff) << 0)
#define LAYER_V_VAL(x) (((x) & 0x1fff) << 16)
@@ -34,6 +38,14 @@
#define MALIDP_LAYER_OFFSET 0x014
#define MALIDP_LAYER_STRIDE 0x018
+/*
+ * This 4-entry look-up-table is used to determine the full 8-bit alpha value
+ * for formats with 1- or 2-bit alpha channels.
+ * We set it to give 100%/0% opacity for 1-bit formats and 100%/66%/33%/0%
+ * opacity for 2-bit formats.
+ */
+#define MALIDP_ALPHA_LUT 0xffaa5500
+
static void malidp_de_plane_destroy(struct drm_plane *plane)
{
struct malidp_plane *mp = to_malidp_plane(plane);
@@ -46,7 +58,8 @@ static void malidp_de_plane_destroy(struct drm_plane *plane)
devm_kfree(plane->dev->dev, mp);
}
-struct drm_plane_state *malidp_duplicate_plane_state(struct drm_plane *plane)
+static struct
+drm_plane_state *malidp_duplicate_plane_state(struct drm_plane *plane)
{
struct malidp_plane_state *state, *m_state;
@@ -58,13 +71,15 @@ struct drm_plane_state *malidp_duplicate_plane_state(struct drm_plane *plane)
m_state = to_malidp_plane_state(plane->state);
__drm_atomic_helper_plane_duplicate_state(plane, &state->base);
state->rotmem_size = m_state->rotmem_size;
+ state->format = m_state->format;
+ state->n_planes = m_state->n_planes;
}
return &state->base;
}
-void malidp_destroy_plane_state(struct drm_plane *plane,
- struct drm_plane_state *state)
+static void malidp_destroy_plane_state(struct drm_plane *plane,
+ struct drm_plane_state *state)
{
struct malidp_plane_state *m_state = to_malidp_plane_state(state);
@@ -75,6 +90,7 @@ void malidp_destroy_plane_state(struct drm_plane *plane,
static const struct drm_plane_funcs malidp_de_plane_funcs = {
.update_plane = drm_atomic_helper_update_plane,
.disable_plane = drm_atomic_helper_disable_plane,
+ .set_property = drm_atomic_helper_plane_set_property,
.destroy = malidp_de_plane_destroy,
.reset = drm_atomic_helper_plane_reset,
.atomic_duplicate_state = malidp_duplicate_plane_state,
@@ -86,17 +102,29 @@ static int malidp_de_plane_check(struct drm_plane *plane,
{
struct malidp_plane *mp = to_malidp_plane(plane);
struct malidp_plane_state *ms = to_malidp_plane_state(state);
- u8 format_id;
+ struct drm_framebuffer *fb;
+ int i;
u32 src_w, src_h;
if (!state->crtc || !state->fb)
return 0;
- format_id = malidp_hw_get_format_id(&mp->hwdev->map, mp->layer->id,
- state->fb->pixel_format);
- if (format_id == MALIDP_INVALID_FORMAT_ID)
+ fb = state->fb;
+
+ ms->format = malidp_hw_get_format_id(&mp->hwdev->map, mp->layer->id,
+ fb->pixel_format);
+ if (ms->format == MALIDP_INVALID_FORMAT_ID)
return -EINVAL;
+ ms->n_planes = drm_format_num_planes(fb->pixel_format);
+ for (i = 0; i < ms->n_planes; i++) {
+ if (!malidp_hw_pitch_valid(mp->hwdev, fb->pitches[i])) {
+ DRM_DEBUG_KMS("Invalid pitch %u for plane %d\n",
+ fb->pitches[i], i);
+ return -EINVAL;
+ }
+ }
+
src_w = state->src_w >> 16;
src_h = state->src_h >> 16;
@@ -135,17 +163,13 @@ static void malidp_de_plane_update(struct drm_plane *plane,
struct drm_gem_cma_object *obj;
struct malidp_plane *mp;
const struct malidp_hw_regmap *map;
- u8 format_id;
+ struct malidp_plane_state *ms = to_malidp_plane_state(plane->state);
u16 ptr;
- u32 format, src_w, src_h, dest_w, dest_h, val = 0;
- int num_planes, i;
+ u32 src_w, src_h, dest_w, dest_h, val;
+ int i;
mp = to_malidp_plane(plane);
-
map = &mp->hwdev->map;
- format = plane->state->fb->pixel_format;
- format_id = malidp_hw_get_format_id(map, mp->layer->id, format);
- num_planes = drm_format_num_planes(format);
/* convert src values from Q16 fixed point to integer */
src_w = plane->state->src_w >> 16;
@@ -158,9 +182,9 @@ static void malidp_de_plane_update(struct drm_plane *plane,
dest_h = plane->state->crtc_h;
}
- malidp_hw_write(mp->hwdev, format_id, mp->layer->base);
+ malidp_hw_write(mp->hwdev, ms->format, mp->layer->base);
- for (i = 0; i < num_planes; i++) {
+ for (i = 0; i < ms->n_planes; i++) {
/* calculate the offset for the layer's plane registers */
ptr = mp->layer->ptr + (i << 4);
@@ -181,9 +205,9 @@ static void malidp_de_plane_update(struct drm_plane *plane,
LAYER_V_VAL(plane->state->crtc_y),
mp->layer->base + MALIDP_LAYER_OFFSET);
- /* first clear the rotation bits in the register */
- malidp_hw_clearbits(mp->hwdev, LAYER_ROT_MASK,
- mp->layer->base + MALIDP_LAYER_CONTROL);
+ /* first clear the rotation bits */
+ val = malidp_hw_read(mp->hwdev, mp->layer->base + MALIDP_LAYER_CONTROL);
+ val &= ~LAYER_ROT_MASK;
/* setup the rotation and axis flip bits */
if (plane->state->rotation & DRM_ROTATE_MASK)
@@ -193,11 +217,18 @@ static void malidp_de_plane_update(struct drm_plane *plane,
if (plane->state->rotation & DRM_REFLECT_Y)
val |= LAYER_H_FLIP;
+ /*
+ * always enable pixel alpha blending until we have a way to change
+ * blend modes
+ */
+ val &= ~LAYER_COMP_MASK;
+ val |= LAYER_COMP_PIXEL;
+
/* set the 'enable layer' bit */
val |= LAYER_ENABLE;
- malidp_hw_setbits(mp->hwdev, val,
- mp->layer->base + MALIDP_LAYER_CONTROL);
+ malidp_hw_write(mp->hwdev, val,
+ mp->layer->base + MALIDP_LAYER_CONTROL);
}
static void malidp_de_plane_disable(struct drm_plane *plane,
@@ -222,6 +253,8 @@ int malidp_de_planes_init(struct drm_device *drm)
struct malidp_plane *plane = NULL;
enum drm_plane_type plane_type;
unsigned long crtcs = 1 << drm->mode_config.num_crtc;
+ unsigned long flags = DRM_ROTATE_0 | DRM_ROTATE_90 | DRM_ROTATE_180 |
+ DRM_ROTATE_270 | DRM_REFLECT_X | DRM_REFLECT_Y;
u32 *formats;
int ret, i, j, n;
@@ -254,26 +287,18 @@ int malidp_de_planes_init(struct drm_device *drm)
if (ret < 0)
goto cleanup;
- if (!drm->mode_config.rotation_property) {
- unsigned long flags = DRM_ROTATE_0 |
- DRM_ROTATE_90 |
- DRM_ROTATE_180 |
- DRM_ROTATE_270 |
- DRM_REFLECT_X |
- DRM_REFLECT_Y;
- drm->mode_config.rotation_property =
- drm_mode_create_rotation_property(drm, flags);
- }
- /* SMART layer can't be rotated */
- if (drm->mode_config.rotation_property && (id != DE_SMART))
- drm_object_attach_property(&plane->base.base,
- drm->mode_config.rotation_property,
- DRM_ROTATE_0);
-
drm_plane_helper_add(&plane->base,
&malidp_de_plane_helper_funcs);
plane->hwdev = malidp->dev;
plane->layer = &map->layers[i];
+
+ /* Skip the features which the SMART layer doesn't have */
+ if (id == DE_SMART)
+ continue;
+
+ drm_plane_create_rotation_property(&plane->base, DRM_ROTATE_0, flags);
+ malidp_hw_write(malidp->dev, MALIDP_ALPHA_LUT,
+ plane->layer->base + MALIDP_LAYER_COMPOSE);
}
kfree(formats);
diff --git a/drivers/gpu/drm/armada/Makefile b/drivers/gpu/drm/armada/Makefile
index ffd673615772..a18f156c8b66 100644
--- a/drivers/gpu/drm/armada/Makefile
+++ b/drivers/gpu/drm/armada/Makefile
@@ -1,5 +1,5 @@
armada-y := armada_crtc.o armada_drv.o armada_fb.o armada_fbdev.o \
- armada_gem.o armada_overlay.o
+ armada_gem.o armada_overlay.o armada_trace.o
armada-y += armada_510.o
armada-$(CONFIG_DEBUG_FS) += armada_debugfs.o
diff --git a/drivers/gpu/drm/armada/armada_crtc.c b/drivers/gpu/drm/armada/armada_crtc.c
index a51f8cbcfe26..95cb3966b2ca 100644
--- a/drivers/gpu/drm/armada/armada_crtc.c
+++ b/drivers/gpu/drm/armada/armada_crtc.c
@@ -18,6 +18,7 @@
#include "armada_fb.h"
#include "armada_gem.h"
#include "armada_hw.h"
+#include "armada_trace.h"
struct armada_frame_work {
struct armada_plane_work work;
@@ -164,19 +165,37 @@ static void armada_drm_crtc_update(struct armada_crtc *dcrtc)
}
}
+void armada_drm_plane_calc_addrs(u32 *addrs, struct drm_framebuffer *fb,
+ int x, int y)
+{
+ u32 addr = drm_fb_obj(fb)->dev_addr;
+ u32 pixel_format = fb->pixel_format;
+ int num_planes = drm_format_num_planes(pixel_format);
+ int i;
+
+ if (num_planes > 3)
+ num_planes = 3;
+
+ for (i = 0; i < num_planes; i++)
+ addrs[i] = addr + fb->offsets[i] + y * fb->pitches[i] +
+ x * drm_format_plane_cpp(pixel_format, i);
+ for (; i < 3; i++)
+ addrs[i] = 0;
+}
+
static unsigned armada_drm_crtc_calc_fb(struct drm_framebuffer *fb,
int x, int y, struct armada_regs *regs, bool interlaced)
{
- struct armada_gem_object *obj = drm_fb_obj(fb);
unsigned pitch = fb->pitches[0];
- unsigned offset = y * pitch + x * fb->bits_per_pixel / 8;
- uint32_t addr_odd, addr_even;
+ u32 addrs[3], addr_odd, addr_even;
unsigned i = 0;
DRM_DEBUG_DRIVER("pitch %u x %d y %d bpp %d\n",
pitch, x, y, fb->bits_per_pixel);
- addr_odd = addr_even = obj->dev_addr + offset;
+ armada_drm_plane_calc_addrs(addrs, fb, x, y);
+
+ addr_odd = addr_even = addrs[0];
if (interlaced) {
addr_even += pitch;
@@ -192,17 +211,18 @@ static unsigned armada_drm_crtc_calc_fb(struct drm_framebuffer *fb,
}
static void armada_drm_plane_work_run(struct armada_crtc *dcrtc,
- struct armada_plane *plane)
+ struct drm_plane *plane)
{
- struct armada_plane_work *work = xchg(&plane->work, NULL);
+ struct armada_plane *dplane = drm_to_armada_plane(plane);
+ struct armada_plane_work *work = xchg(&dplane->work, NULL);
/* Handle any pending frame work. */
if (work) {
- work->fn(dcrtc, plane, work);
+ work->fn(dcrtc, dplane, work);
drm_crtc_vblank_put(&dcrtc->crtc);
}
- wake_up(&plane->frame_wait);
+ wake_up(&dplane->frame_wait);
}
int armada_drm_plane_work_queue(struct armada_crtc *dcrtc,
@@ -307,14 +327,12 @@ static void armada_drm_crtc_finish_fb(struct armada_crtc *dcrtc,
static void armada_drm_vblank_off(struct armada_crtc *dcrtc)
{
- struct armada_plane *plane = drm_to_armada_plane(dcrtc->crtc.primary);
-
/*
* Tell the DRM core that vblank IRQs aren't going to happen for
* a while. This cleans up any pending vblank events for us.
*/
drm_crtc_vblank_off(&dcrtc->crtc);
- armada_drm_plane_work_run(dcrtc, plane);
+ armada_drm_plane_work_run(dcrtc, dcrtc->crtc.primary);
}
void armada_drm_crtc_gamma_set(struct drm_crtc *crtc, u16 r, u16 g, u16 b,
@@ -416,10 +434,8 @@ static void armada_drm_crtc_irq(struct armada_crtc *dcrtc, u32 stat)
spin_lock(&dcrtc->irq_lock);
ovl_plane = dcrtc->plane;
- if (ovl_plane) {
- struct armada_plane *plane = drm_to_armada_plane(ovl_plane);
- armada_drm_plane_work_run(dcrtc, plane);
- }
+ if (ovl_plane)
+ armada_drm_plane_work_run(dcrtc, ovl_plane);
if (stat & GRA_FRAME_IRQ && dcrtc->interlaced) {
int i = stat & GRA_FRAME_IRQ0 ? 0 : 1;
@@ -449,10 +465,8 @@ static void armada_drm_crtc_irq(struct armada_crtc *dcrtc, u32 stat)
spin_unlock(&dcrtc->irq_lock);
- if (stat & GRA_FRAME_IRQ) {
- struct armada_plane *plane = drm_to_armada_plane(dcrtc->crtc.primary);
- armada_drm_plane_work_run(dcrtc, plane);
- }
+ if (stat & GRA_FRAME_IRQ)
+ armada_drm_plane_work_run(dcrtc, dcrtc->crtc.primary);
}
static irqreturn_t armada_drm_irq(int irq, void *arg)
@@ -466,6 +480,8 @@ static irqreturn_t armada_drm_irq(int irq, void *arg)
*/
writel_relaxed(0, dcrtc->base + LCD_SPU_IRQ_ISR);
+ trace_armada_drm_irq(&dcrtc->crtc, stat);
+
/* Mask out those interrupts we haven't enabled */
v = stat & dcrtc->irq_ena;
@@ -531,6 +547,35 @@ static uint32_t armada_drm_crtc_calculate_csc(struct armada_crtc *dcrtc)
return val;
}
+static void armada_drm_primary_set(struct drm_crtc *crtc,
+ struct drm_plane *plane, int x, int y)
+{
+ struct armada_plane_state *state = &drm_to_armada_plane(plane)->state;
+ struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc);
+ struct armada_regs regs[8];
+ bool interlaced = dcrtc->interlaced;
+ unsigned i;
+ u32 ctrl0;
+
+ i = armada_drm_crtc_calc_fb(plane->fb, x, y, regs, interlaced);
+
+ armada_reg_queue_set(regs, i, state->dst_yx, LCD_SPU_GRA_OVSA_HPXL_VLN);
+ armada_reg_queue_set(regs, i, state->src_hw, LCD_SPU_GRA_HPXL_VLN);
+ armada_reg_queue_set(regs, i, state->dst_hw, LCD_SPU_GZM_HPXL_VLN);
+
+ ctrl0 = state->ctrl0;
+ if (interlaced)
+ ctrl0 |= CFG_GRA_FTOGGLE;
+
+ armada_reg_queue_mod(regs, i, ctrl0, CFG_GRAFORMAT |
+ CFG_GRA_MOD(CFG_SWAPRB | CFG_SWAPUV |
+ CFG_SWAPYU | CFG_YUV2RGB) |
+ CFG_PALETTE_ENA | CFG_GRA_FTOGGLE,
+ LCD_SPU_DMA_CTRL0);
+ armada_reg_queue_end(regs, i);
+ armada_drm_crtc_update_regs(dcrtc, regs);
+}
+
/* The mode_config.mutex will be held for this call */
static int armada_drm_crtc_mode_set(struct drm_crtc *crtc,
struct drm_display_mode *mode, struct drm_display_mode *adj,
@@ -547,9 +592,20 @@ static int armada_drm_crtc_mode_set(struct drm_crtc *crtc,
interlaced = !!(adj->flags & DRM_MODE_FLAG_INTERLACE);
- i = armada_drm_crtc_calc_fb(dcrtc->crtc.primary->fb,
- x, y, regs, interlaced);
+ val = CFG_GRA_ENA | CFG_GRA_HSMOOTH;
+ val |= CFG_GRA_FMT(drm_fb_to_armada_fb(dcrtc->crtc.primary->fb)->fmt);
+ val |= CFG_GRA_MOD(drm_fb_to_armada_fb(dcrtc->crtc.primary->fb)->mod);
+
+ if (drm_fb_to_armada_fb(dcrtc->crtc.primary->fb)->fmt > CFG_420)
+ val |= CFG_PALETTE_ENA;
+
+ drm_to_armada_plane(crtc->primary)->state.ctrl0 = val;
+ drm_to_armada_plane(crtc->primary)->state.src_hw =
+ drm_to_armada_plane(crtc->primary)->state.dst_hw =
+ adj->crtc_vdisplay << 16 | adj->crtc_hdisplay;
+ drm_to_armada_plane(crtc->primary)->state.dst_yx = 0;
+ i = 0;
rm = adj->crtc_hsync_start - adj->crtc_hdisplay;
lm = adj->crtc_htotal - adj->crtc_hsync_end;
bm = adj->crtc_vsync_start - adj->crtc_vdisplay;
@@ -625,8 +681,6 @@ static int armada_drm_crtc_mode_set(struct drm_crtc *crtc,
val = adj->crtc_vdisplay << 16 | adj->crtc_hdisplay;
armada_reg_queue_set(regs, i, val, LCD_SPU_V_H_ACTIVE);
- armada_reg_queue_set(regs, i, val, LCD_SPU_GRA_HPXL_VLN);
- armada_reg_queue_set(regs, i, val, LCD_SPU_GZM_HPXL_VLN);
armada_reg_queue_set(regs, i, (lm << 16) | rm, LCD_SPU_H_PORCH);
armada_reg_queue_set(regs, i, dcrtc->v[0].spu_v_porch, LCD_SPU_V_PORCH);
armada_reg_queue_set(regs, i, dcrtc->v[0].spu_v_h_total,
@@ -638,22 +692,6 @@ static int armada_drm_crtc_mode_set(struct drm_crtc *crtc,
ADV_VSYNCOFFEN, LCD_SPU_ADV_REG);
}
- val = CFG_GRA_ENA | CFG_GRA_HSMOOTH;
- val |= CFG_GRA_FMT(drm_fb_to_armada_fb(dcrtc->crtc.primary->fb)->fmt);
- val |= CFG_GRA_MOD(drm_fb_to_armada_fb(dcrtc->crtc.primary->fb)->mod);
-
- if (drm_fb_to_armada_fb(dcrtc->crtc.primary->fb)->fmt > CFG_420)
- val |= CFG_PALETTE_ENA;
-
- if (interlaced)
- val |= CFG_GRA_FTOGGLE;
-
- armada_reg_queue_mod(regs, i, val, CFG_GRAFORMAT |
- CFG_GRA_MOD(CFG_SWAPRB | CFG_SWAPUV |
- CFG_SWAPYU | CFG_YUV2RGB) |
- CFG_PALETTE_ENA | CFG_GRA_FTOGGLE,
- LCD_SPU_DMA_CTRL0);
-
val = adj->flags & DRM_MODE_FLAG_NVSYNC ? CFG_VSYNC_INV : 0;
armada_reg_queue_mod(regs, i, val, CFG_VSYNC_INV, LCD_SPU_DMA_CTRL1);
@@ -662,6 +700,8 @@ static int armada_drm_crtc_mode_set(struct drm_crtc *crtc,
armada_reg_queue_end(regs, i);
armada_drm_crtc_update_regs(dcrtc, regs);
+
+ armada_drm_primary_set(crtc, crtc->primary, x, y);
spin_unlock_irqrestore(&dcrtc->irq_lock, flags);
armada_drm_crtc_update(dcrtc);
@@ -1038,7 +1078,7 @@ static int armada_drm_crtc_page_flip(struct drm_crtc *crtc,
* interrupt, so complete it now.
*/
if (dpms_blanked(dcrtc->dpms))
- armada_drm_plane_work_run(dcrtc, drm_to_armada_plane(dcrtc->crtc.primary));
+ armada_drm_plane_work_run(dcrtc, dcrtc->crtc.primary);
return 0;
}
@@ -1172,7 +1212,6 @@ static int armada_drm_crtc_create(struct drm_device *drm, struct device *dev,
CFG_PDWN32x32 | CFG_PDWN16x66 | CFG_PDWN32x66 |
CFG_PDWN64x66, dcrtc->base + LCD_SPU_SRAM_PARA1);
writel_relaxed(0x2032ff81, dcrtc->base + LCD_SPU_DMA_CTRL1);
- writel_relaxed(0x00000000, dcrtc->base + LCD_SPU_GRA_OVSA_HPXL_VLN);
writel_relaxed(dcrtc->irq_ena, dcrtc->base + LCD_SPU_IRQ_ENA);
writel_relaxed(0, dcrtc->base + LCD_SPU_IRQ_ISR);
diff --git a/drivers/gpu/drm/armada/armada_crtc.h b/drivers/gpu/drm/armada/armada_crtc.h
index 04fdd22d483b..b08043e8cc3b 100644
--- a/drivers/gpu/drm/armada/armada_crtc.h
+++ b/drivers/gpu/drm/armada/armada_crtc.h
@@ -41,10 +41,18 @@ struct armada_plane_work {
struct armada_plane_work *);
};
+struct armada_plane_state {
+ u32 src_hw;
+ u32 dst_hw;
+ u32 dst_yx;
+ u32 ctrl0;
+};
+
struct armada_plane {
struct drm_plane base;
wait_queue_head_t frame_wait;
struct armada_plane_work *work;
+ struct armada_plane_state state;
};
#define drm_to_armada_plane(p) container_of(p, struct armada_plane, base)
@@ -54,6 +62,8 @@ int armada_drm_plane_work_queue(struct armada_crtc *dcrtc,
int armada_drm_plane_work_wait(struct armada_plane *plane, long timeout);
struct armada_plane_work *armada_drm_plane_work_cancel(
struct armada_crtc *dcrtc, struct armada_plane *plane);
+void armada_drm_plane_calc_addrs(u32 *addrs, struct drm_framebuffer *fb,
+ int x, int y);
struct armada_crtc {
struct drm_crtc crtc;
diff --git a/drivers/gpu/drm/armada/armada_debugfs.c b/drivers/gpu/drm/armada/armada_debugfs.c
index d4f7ab0a30d4..90222e60d2d6 100644
--- a/drivers/gpu/drm/armada/armada_debugfs.c
+++ b/drivers/gpu/drm/armada/armada_debugfs.c
@@ -113,7 +113,7 @@ static int drm_add_fake_info_node(struct drm_minor *minor, struct dentry *ent,
struct drm_info_node *node;
node = kmalloc(sizeof(struct drm_info_node), GFP_KERNEL);
- if (node == NULL) {
+ if (!node) {
debugfs_remove(ent);
return -ENOMEM;
}
diff --git a/drivers/gpu/drm/armada/armada_drm.h b/drivers/gpu/drm/armada/armada_drm.h
index 3b2bb6128d40..77952d559a3c 100644
--- a/drivers/gpu/drm/armada/armada_drm.h
+++ b/drivers/gpu/drm/armada/armada_drm.h
@@ -53,6 +53,7 @@ struct armada_variant {
extern const struct armada_variant armada510_ops;
struct armada_private {
+ struct drm_device drm;
struct work_struct fb_unref_work;
DECLARE_KFIFO(fb_unref, struct drm_framebuffer *, 8);
struct drm_fb_helper *fbdev;
diff --git a/drivers/gpu/drm/armada/armada_drv.c b/drivers/gpu/drm/armada/armada_drv.c
index 1e0e68f608e4..07086b427c22 100644
--- a/drivers/gpu/drm/armada/armada_drv.c
+++ b/drivers/gpu/drm/armada/armada_drv.c
@@ -49,106 +49,6 @@ void armada_drm_queue_unref_work(struct drm_device *dev,
spin_unlock_irqrestore(&dev->event_lock, flags);
}
-static int armada_drm_load(struct drm_device *dev, unsigned long flags)
-{
- struct armada_private *priv;
- struct resource *mem = NULL;
- int ret, n;
-
- for (n = 0; ; n++) {
- struct resource *r = platform_get_resource(dev->platformdev,
- IORESOURCE_MEM, n);
- if (!r)
- break;
-
- /* Resources above 64K are graphics memory */
- if (resource_size(r) > SZ_64K)
- mem = r;
- else
- return -EINVAL;
- }
-
- if (!mem)
- return -ENXIO;
-
- if (!devm_request_mem_region(dev->dev, mem->start,
- resource_size(mem), "armada-drm"))
- return -EBUSY;
-
- priv = devm_kzalloc(dev->dev, sizeof(*priv), GFP_KERNEL);
- if (!priv) {
- DRM_ERROR("failed to allocate private\n");
- return -ENOMEM;
- }
-
- platform_set_drvdata(dev->platformdev, dev);
- dev->dev_private = priv;
-
- INIT_WORK(&priv->fb_unref_work, armada_drm_unref_work);
- INIT_KFIFO(priv->fb_unref);
-
- /* Mode setting support */
- drm_mode_config_init(dev);
- dev->mode_config.min_width = 320;
- dev->mode_config.min_height = 200;
-
- /*
- * With vscale enabled, the maximum width is 1920 due to the
- * 1920 by 3 lines RAM
- */
- dev->mode_config.max_width = 1920;
- dev->mode_config.max_height = 2048;
-
- dev->mode_config.preferred_depth = 24;
- dev->mode_config.funcs = &armada_drm_mode_config_funcs;
- drm_mm_init(&priv->linear, mem->start, resource_size(mem));
- mutex_init(&priv->linear_lock);
-
- ret = component_bind_all(dev->dev, dev);
- if (ret)
- goto err_kms;
-
- ret = drm_vblank_init(dev, dev->mode_config.num_crtc);
- if (ret)
- goto err_comp;
-
- dev->irq_enabled = true;
-
- ret = armada_fbdev_init(dev);
- if (ret)
- goto err_comp;
-
- drm_kms_helper_poll_init(dev);
-
- return 0;
-
- err_comp:
- component_unbind_all(dev->dev, dev);
- err_kms:
- drm_mode_config_cleanup(dev);
- drm_mm_takedown(&priv->linear);
- flush_work(&priv->fb_unref_work);
-
- return ret;
-}
-
-static int armada_drm_unload(struct drm_device *dev)
-{
- struct armada_private *priv = dev->dev_private;
-
- drm_kms_helper_poll_fini(dev);
- armada_fbdev_fini(dev);
-
- component_unbind_all(dev->dev, dev);
-
- drm_mode_config_cleanup(dev);
- drm_mm_takedown(&priv->linear);
- flush_work(&priv->fb_unref_work);
- dev->dev_private = NULL;
-
- return 0;
-}
-
/* These are called under the vbl_lock. */
static int armada_drm_enable_vblank(struct drm_device *dev, unsigned int pipe)
{
@@ -186,16 +86,10 @@ static const struct file_operations armada_drm_fops = {
};
static struct drm_driver armada_drm_driver = {
- .load = armada_drm_load,
.lastclose = armada_drm_lastclose,
- .unload = armada_drm_unload,
.get_vblank_counter = drm_vblank_no_hw_counter,
.enable_vblank = armada_drm_enable_vblank,
.disable_vblank = armada_drm_disable_vblank,
-#ifdef CONFIG_DEBUG_FS
- .debugfs_init = armada_drm_debugfs_init,
- .debugfs_cleanup = armada_drm_debugfs_cleanup,
-#endif
.gem_free_object_unlocked = armada_gem_free_object,
.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
@@ -218,12 +112,138 @@ static struct drm_driver armada_drm_driver = {
static int armada_drm_bind(struct device *dev)
{
- return drm_platform_init(&armada_drm_driver, to_platform_device(dev));
+ struct armada_private *priv;
+ struct resource *mem = NULL;
+ int ret, n;
+
+ for (n = 0; ; n++) {
+ struct resource *r = platform_get_resource(to_platform_device(dev),
+ IORESOURCE_MEM, n);
+ if (!r)
+ break;
+
+ /* Resources above 64K are graphics memory */
+ if (resource_size(r) > SZ_64K)
+ mem = r;
+ else
+ return -EINVAL;
+ }
+
+ if (!mem)
+ return -ENXIO;
+
+ if (!devm_request_mem_region(dev, mem->start, resource_size(mem),
+ "armada-drm"))
+ return -EBUSY;
+
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ /*
+ * The drm_device structure must be at the start of
+ * armada_private for drm_dev_unref() to work correctly.
+ */
+ BUILD_BUG_ON(offsetof(struct armada_private, drm) != 0);
+
+ ret = drm_dev_init(&priv->drm, &armada_drm_driver, dev);
+ if (ret) {
+ dev_err(dev, "[" DRM_NAME ":%s] drm_dev_init failed: %d\n",
+ __func__, ret);
+ kfree(priv);
+ return ret;
+ }
+
+ priv->drm.platformdev = to_platform_device(dev);
+ priv->drm.dev_private = priv;
+
+ platform_set_drvdata(priv->drm.platformdev, &priv->drm);
+
+ INIT_WORK(&priv->fb_unref_work, armada_drm_unref_work);
+ INIT_KFIFO(priv->fb_unref);
+
+ /* Mode setting support */
+ drm_mode_config_init(&priv->drm);
+ priv->drm.mode_config.min_width = 320;
+ priv->drm.mode_config.min_height = 200;
+
+ /*
+ * With vscale enabled, the maximum width is 1920 due to the
+ * 1920 by 3 lines RAM
+ */
+ priv->drm.mode_config.max_width = 1920;
+ priv->drm.mode_config.max_height = 2048;
+
+ priv->drm.mode_config.preferred_depth = 24;
+ priv->drm.mode_config.funcs = &armada_drm_mode_config_funcs;
+ drm_mm_init(&priv->linear, mem->start, resource_size(mem));
+ mutex_init(&priv->linear_lock);
+
+ ret = component_bind_all(dev, &priv->drm);
+ if (ret)
+ goto err_kms;
+
+ ret = drm_vblank_init(&priv->drm, priv->drm.mode_config.num_crtc);
+ if (ret)
+ goto err_comp;
+
+ priv->drm.irq_enabled = true;
+
+ ret = armada_fbdev_init(&priv->drm);
+ if (ret)
+ goto err_comp;
+
+ drm_kms_helper_poll_init(&priv->drm);
+
+ ret = drm_dev_register(&priv->drm, 0);
+ if (ret)
+ goto err_poll;
+
+#ifdef CONFIG_DEBUG_FS
+ armada_drm_debugfs_init(priv->drm.primary);
+#endif
+
+ DRM_INFO("Initialized %s %d.%d.%d %s for %s on minor %d\n",
+ armada_drm_driver.name, armada_drm_driver.major,
+ armada_drm_driver.minor, armada_drm_driver.patchlevel,
+ armada_drm_driver.date, dev_name(dev),
+ priv->drm.primary->index);
+
+ return 0;
+
+ err_poll:
+ drm_kms_helper_poll_fini(&priv->drm);
+ armada_fbdev_fini(&priv->drm);
+ err_comp:
+ component_unbind_all(dev, &priv->drm);
+ err_kms:
+ drm_mode_config_cleanup(&priv->drm);
+ drm_mm_takedown(&priv->linear);
+ flush_work(&priv->fb_unref_work);
+ drm_dev_unref(&priv->drm);
+ return ret;
}
static void armada_drm_unbind(struct device *dev)
{
- drm_put_dev(dev_get_drvdata(dev));
+ struct drm_device *drm = dev_get_drvdata(dev);
+ struct armada_private *priv = drm->dev_private;
+
+ drm_kms_helper_poll_fini(&priv->drm);
+ armada_fbdev_fini(&priv->drm);
+
+#ifdef CONFIG_DEBUG_FS
+ armada_drm_debugfs_cleanup(priv->drm.primary);
+#endif
+ drm_dev_unregister(&priv->drm);
+
+ component_unbind_all(dev, &priv->drm);
+
+ drm_mode_config_cleanup(&priv->drm);
+ drm_mm_takedown(&priv->linear);
+ flush_work(&priv->fb_unref_work);
+
+ drm_dev_unref(&priv->drm);
}
static int compare_of(struct device *dev, void *data)
@@ -254,7 +274,7 @@ static void armada_add_endpoints(struct device *dev,
continue;
}
- component_match_add(dev, match, compare_of, remote);
+ drm_of_component_match_add(dev, match, compare_of, remote);
of_node_put(remote);
}
}
diff --git a/drivers/gpu/drm/armada/armada_fbdev.c b/drivers/gpu/drm/armada/armada_fbdev.c
index ca73ad8614fe..c5dc06a55883 100644
--- a/drivers/gpu/drm/armada/armada_fbdev.c
+++ b/drivers/gpu/drm/armada/armada_fbdev.c
@@ -19,16 +19,10 @@
static /*const*/ struct fb_ops armada_fb_ops = {
.owner = THIS_MODULE,
- .fb_check_var = drm_fb_helper_check_var,
- .fb_set_par = drm_fb_helper_set_par,
+ DRM_FB_HELPER_DEFAULT_OPS,
.fb_fillrect = drm_fb_helper_cfb_fillrect,
.fb_copyarea = drm_fb_helper_cfb_copyarea,
.fb_imageblit = drm_fb_helper_cfb_imageblit,
- .fb_pan_display = drm_fb_helper_pan_display,
- .fb_blank = drm_fb_helper_blank,
- .fb_setcmap = drm_fb_helper_setcmap,
- .fb_debug_enter = drm_fb_helper_debug_enter,
- .fb_debug_leave = drm_fb_helper_debug_leave,
};
static int armada_fb_create(struct drm_fb_helper *fbh,
diff --git a/drivers/gpu/drm/armada/armada_gem.c b/drivers/gpu/drm/armada/armada_gem.c
index 806791897304..768087ddb046 100644
--- a/drivers/gpu/drm/armada/armada_gem.c
+++ b/drivers/gpu/drm/armada/armada_gem.c
@@ -212,7 +212,7 @@ armada_gem_alloc_private_object(struct drm_device *dev, size_t size)
return obj;
}
-struct armada_gem_object *armada_gem_alloc_object(struct drm_device *dev,
+static struct armada_gem_object *armada_gem_alloc_object(struct drm_device *dev,
size_t size)
{
struct armada_gem_object *obj;
@@ -419,7 +419,7 @@ int armada_gem_pwrite_ioctl(struct drm_device *dev, void *data,
}
/* Prime support */
-struct sg_table *
+static struct sg_table *
armada_gem_prime_map_dma_buf(struct dma_buf_attachment *attach,
enum dma_data_direction dir)
{
@@ -594,11 +594,7 @@ int armada_gem_map_import(struct armada_gem_object *dobj)
int ret;
dobj->sgt = dma_buf_map_attachment(dobj->obj.import_attach,
- DMA_TO_DEVICE);
- if (!dobj->sgt) {
- DRM_ERROR("dma_buf_map_attachment() returned NULL\n");
- return -EINVAL;
- }
+ DMA_TO_DEVICE);
if (IS_ERR(dobj->sgt)) {
ret = PTR_ERR(dobj->sgt);
dobj->sgt = NULL;
diff --git a/drivers/gpu/drm/armada/armada_overlay.c b/drivers/gpu/drm/armada/armada_overlay.c
index 152b4e716269..6743615232f5 100644
--- a/drivers/gpu/drm/armada/armada_overlay.c
+++ b/drivers/gpu/drm/armada/armada_overlay.c
@@ -15,6 +15,7 @@
#include "armada_hw.h"
#include <drm/armada_drm.h>
#include "armada_ioctlP.h"
+#include "armada_trace.h"
struct armada_ovl_plane_properties {
uint32_t colorkey_yr;
@@ -32,10 +33,6 @@ struct armada_ovl_plane_properties {
struct armada_ovl_plane {
struct armada_plane base;
struct drm_framebuffer *old_fb;
- uint32_t src_hw;
- uint32_t dst_hw;
- uint32_t dst_yx;
- uint32_t ctrl0;
struct {
struct armada_plane_work work;
struct armada_regs regs[13];
@@ -87,6 +84,8 @@ static void armada_ovl_plane_work(struct armada_crtc *dcrtc,
{
struct armada_ovl_plane *dplane = container_of(plane, struct armada_ovl_plane, base);
+ trace_armada_ovl_plane_work(&dcrtc->crtc, &plane->base);
+
armada_drm_crtc_update_regs(dcrtc, dplane->vbl.regs);
armada_ovl_retire_fb(dplane, NULL);
}
@@ -120,6 +119,10 @@ armada_ovl_plane_update(struct drm_plane *plane, struct drm_crtc *crtc,
bool visible;
int ret;
+ trace_armada_ovl_plane_update(plane, crtc, fb,
+ crtc_x, crtc_y, crtc_w, crtc_h,
+ src_x, src_y, src_w, src_h);
+
ret = drm_plane_helper_check_update(plane, crtc, fb, &src, &dest, &clip,
DRM_ROTATE_0,
0, INT_MAX, true, false, &visible);
@@ -141,22 +144,22 @@ armada_ovl_plane_update(struct drm_plane *plane, struct drm_crtc *crtc,
/* FIXME: overlay on an interlaced display */
/* Just updating the position/size? */
- if (plane->fb == fb && dplane->ctrl0 == ctrl0) {
+ if (plane->fb == fb && dplane->base.state.ctrl0 == ctrl0) {
val = (drm_rect_height(&src) & 0xffff0000) |
drm_rect_width(&src) >> 16;
- dplane->src_hw = val;
+ dplane->base.state.src_hw = val;
writel_relaxed(val, dcrtc->base + LCD_SPU_DMA_HPXL_VLN);
val = drm_rect_height(&dest) << 16 | drm_rect_width(&dest);
- dplane->dst_hw = val;
+ dplane->base.state.dst_hw = val;
writel_relaxed(val, dcrtc->base + LCD_SPU_DZM_HPXL_VLN);
val = dest.y1 << 16 | dest.x1;
- dplane->dst_yx = val;
+ dplane->base.state.dst_yx = val;
writel_relaxed(val, dcrtc->base + LCD_SPU_DMA_OVSA_HPXL_VLN);
return 0;
- } else if (~dplane->ctrl0 & ctrl0 & CFG_DMA_ENA) {
+ } else if (~dplane->base.state.ctrl0 & ctrl0 & CFG_DMA_ENA) {
/* Power up the Y/U/V FIFOs on ENA 0->1 transitions */
armada_updatel(0, CFG_PDWN16x66 | CFG_PDWN32x66,
dcrtc->base + LCD_SPU_SRAM_PARA1);
@@ -166,9 +169,8 @@ armada_ovl_plane_update(struct drm_plane *plane, struct drm_crtc *crtc,
armada_drm_plane_work_cancel(dcrtc, &dplane->base);
if (plane->fb != fb) {
- struct armada_gem_object *obj = drm_fb_obj(fb);
- uint32_t addr[3], pixel_format;
- int i, num_planes, hsub;
+ u32 addrs[3], pixel_format;
+ int num_planes, hsub;
/*
* Take a reference on the new framebuffer - we want to
@@ -182,6 +184,8 @@ armada_ovl_plane_update(struct drm_plane *plane, struct drm_crtc *crtc,
src_y = src.y1 >> 16;
src_x = src.x1 >> 16;
+ armada_drm_plane_calc_addrs(addrs, fb, src_x, src_y);
+
pixel_format = fb->pixel_format;
hsub = drm_format_horz_chroma_subsampling(pixel_format);
num_planes = drm_format_num_planes(pixel_format);
@@ -194,24 +198,17 @@ armada_ovl_plane_update(struct drm_plane *plane, struct drm_crtc *crtc,
if (src_x & (hsub - 1) && num_planes == 1)
ctrl0 ^= CFG_DMA_MOD(CFG_SWAPUV);
- for (i = 0; i < num_planes; i++)
- addr[i] = obj->dev_addr + fb->offsets[i] +
- src_y * fb->pitches[i] +
- src_x * drm_format_plane_cpp(pixel_format, i);
- for (; i < ARRAY_SIZE(addr); i++)
- addr[i] = 0;
-
- armada_reg_queue_set(dplane->vbl.regs, idx, addr[0],
+ armada_reg_queue_set(dplane->vbl.regs, idx, addrs[0],
LCD_SPU_DMA_START_ADDR_Y0);
- armada_reg_queue_set(dplane->vbl.regs, idx, addr[1],
+ armada_reg_queue_set(dplane->vbl.regs, idx, addrs[1],
LCD_SPU_DMA_START_ADDR_U0);
- armada_reg_queue_set(dplane->vbl.regs, idx, addr[2],
+ armada_reg_queue_set(dplane->vbl.regs, idx, addrs[2],
LCD_SPU_DMA_START_ADDR_V0);
- armada_reg_queue_set(dplane->vbl.regs, idx, addr[0],
+ armada_reg_queue_set(dplane->vbl.regs, idx, addrs[0],
LCD_SPU_DMA_START_ADDR_Y1);
- armada_reg_queue_set(dplane->vbl.regs, idx, addr[1],
+ armada_reg_queue_set(dplane->vbl.regs, idx, addrs[1],
LCD_SPU_DMA_START_ADDR_U1);
- armada_reg_queue_set(dplane->vbl.regs, idx, addr[2],
+ armada_reg_queue_set(dplane->vbl.regs, idx, addrs[2],
LCD_SPU_DMA_START_ADDR_V1);
val = fb->pitches[0] << 16 | fb->pitches[0];
@@ -223,28 +220,28 @@ armada_ovl_plane_update(struct drm_plane *plane, struct drm_crtc *crtc,
}
val = (drm_rect_height(&src) & 0xffff0000) | drm_rect_width(&src) >> 16;
- if (dplane->src_hw != val) {
- dplane->src_hw = val;
+ if (dplane->base.state.src_hw != val) {
+ dplane->base.state.src_hw = val;
armada_reg_queue_set(dplane->vbl.regs, idx, val,
LCD_SPU_DMA_HPXL_VLN);
}
val = drm_rect_height(&dest) << 16 | drm_rect_width(&dest);
- if (dplane->dst_hw != val) {
- dplane->dst_hw = val;
+ if (dplane->base.state.dst_hw != val) {
+ dplane->base.state.dst_hw = val;
armada_reg_queue_set(dplane->vbl.regs, idx, val,
LCD_SPU_DZM_HPXL_VLN);
}
val = dest.y1 << 16 | dest.x1;
- if (dplane->dst_yx != val) {
- dplane->dst_yx = val;
+ if (dplane->base.state.dst_yx != val) {
+ dplane->base.state.dst_yx = val;
armada_reg_queue_set(dplane->vbl.regs, idx, val,
LCD_SPU_DMA_OVSA_HPXL_VLN);
}
- if (dplane->ctrl0 != ctrl0) {
- dplane->ctrl0 = ctrl0;
+ if (dplane->base.state.ctrl0 != ctrl0) {
+ dplane->base.state.ctrl0 = ctrl0;
armada_reg_queue_mod(dplane->vbl.regs, idx, ctrl0,
CFG_CBSH_ENA | CFG_DMAFORMAT | CFG_DMA_FTOGGLE |
CFG_DMA_HSMOOTH | CFG_DMA_TSTMODE |
@@ -275,7 +272,7 @@ static int armada_ovl_plane_disable(struct drm_plane *plane)
armada_drm_crtc_plane_disable(dcrtc, plane);
dcrtc->plane = NULL;
- dplane->ctrl0 = 0;
+ dplane->base.state.ctrl0 = 0;
fb = xchg(&dplane->old_fb, NULL);
if (fb)
diff --git a/drivers/gpu/drm/armada/armada_trace.c b/drivers/gpu/drm/armada/armada_trace.c
new file mode 100644
index 000000000000..068b336ba75f
--- /dev/null
+++ b/drivers/gpu/drm/armada/armada_trace.c
@@ -0,0 +1,4 @@
+#ifndef __CHECKER__
+#define CREATE_TRACE_POINTS
+#include "armada_trace.h"
+#endif
diff --git a/drivers/gpu/drm/armada/armada_trace.h b/drivers/gpu/drm/armada/armada_trace.h
new file mode 100644
index 000000000000..dc0cba70fd1a
--- /dev/null
+++ b/drivers/gpu/drm/armada/armada_trace.h
@@ -0,0 +1,66 @@
+#if !defined(ARMADA_TRACE_H) || defined(TRACE_HEADER_MULTI_READ)
+#define ARMADA_TRACE_H
+
+#include <linux/tracepoint.h>
+#include <drm/drmP.h>
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM armada
+#define TRACE_INCLUDE_FILE armada_trace
+
+TRACE_EVENT(armada_drm_irq,
+ TP_PROTO(struct drm_crtc *crtc, u32 stat),
+ TP_ARGS(crtc, stat),
+ TP_STRUCT__entry(
+ __field(struct drm_crtc *, crtc)
+ __field(u32, stat)
+ ),
+ TP_fast_assign(
+ __entry->crtc = crtc;
+ __entry->stat = stat;
+ ),
+ TP_printk("crtc %p stat 0x%08x",
+ __entry->crtc, __entry->stat)
+);
+
+TRACE_EVENT(armada_ovl_plane_update,
+ TP_PROTO(struct drm_plane *plane, struct drm_crtc *crtc,
+ struct drm_framebuffer *fb,
+ int crtc_x, int crtc_y, unsigned crtc_w, unsigned crtc_h,
+ uint32_t src_x, uint32_t src_y, uint32_t src_w, uint32_t src_h),
+ TP_ARGS(plane, crtc, fb, crtc_x, crtc_y, crtc_w, crtc_h, src_x, src_y, src_w, src_h),
+ TP_STRUCT__entry(
+ __field(struct drm_plane *, plane)
+ __field(struct drm_crtc *, crtc)
+ __field(struct drm_framebuffer *, fb)
+ ),
+ TP_fast_assign(
+ __entry->plane = plane;
+ __entry->crtc = crtc;
+ __entry->fb = fb;
+ ),
+ TP_printk("plane %p crtc %p fb %p",
+ __entry->plane, __entry->crtc, __entry->fb)
+);
+
+TRACE_EVENT(armada_ovl_plane_work,
+ TP_PROTO(struct drm_crtc *crtc, struct drm_plane *plane),
+ TP_ARGS(crtc, plane),
+ TP_STRUCT__entry(
+ __field(struct drm_plane *, plane)
+ __field(struct drm_crtc *, crtc)
+ ),
+ TP_fast_assign(
+ __entry->plane = plane;
+ __entry->crtc = crtc;
+ ),
+ TP_printk("plane %p crtc %p",
+ __entry->plane, __entry->crtc)
+);
+
+#endif
+
+/* This part must be outside protection */
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+#include <trace/define_trace.h>
diff --git a/drivers/gpu/drm/ast/ast_drv.c b/drivers/gpu/drm/ast/ast_drv.c
index f54afd2113a9..fd7c9eec92e4 100644
--- a/drivers/gpu/drm/ast/ast_drv.c
+++ b/drivers/gpu/drm/ast/ast_drv.c
@@ -188,9 +188,7 @@ static const struct file_operations ast_fops = {
.unlocked_ioctl = drm_ioctl,
.mmap = ast_mmap,
.poll = drm_poll,
-#ifdef CONFIG_COMPAT
.compat_ioctl = drm_compat_ioctl,
-#endif
.read = drm_read,
};
diff --git a/drivers/gpu/drm/ast/ast_fb.c b/drivers/gpu/drm/ast/ast_fb.c
index 7a86e24e2687..d6f5ec64c667 100644
--- a/drivers/gpu/drm/ast/ast_fb.c
+++ b/drivers/gpu/drm/ast/ast_fb.c
@@ -253,7 +253,7 @@ static int astfb_create(struct drm_fb_helper *helper,
err_release_fbi:
drm_fb_helper_release_fbi(helper);
err_free_vram:
- vfree(afbdev->sysram);
+ vfree(sysram);
return ret;
}
diff --git a/drivers/gpu/drm/ast/ast_ttm.c b/drivers/gpu/drm/ast/ast_ttm.c
index 0743e65cb240..2a1368fac1d1 100644
--- a/drivers/gpu/drm/ast/ast_ttm.c
+++ b/drivers/gpu/drm/ast/ast_ttm.c
@@ -230,6 +230,7 @@ struct ttm_bo_driver ast_bo_driver = {
.ttm_tt_populate = ast_ttm_tt_populate,
.ttm_tt_unpopulate = ast_ttm_tt_unpopulate,
.init_mem_type = ast_bo_init_mem_type,
+ .eviction_valuable = ttm_bo_eviction_valuable,
.evict_flags = ast_bo_evict_flags,
.move = NULL,
.verify_access = ast_bo_verify_access,
diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c
index 5f484310bee9..cbd0070265c9 100644
--- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c
+++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c
@@ -464,7 +464,7 @@ atmel_hlcdc_dc_atomic_complete(struct atmel_hlcdc_dc_commit *commit)
drm_atomic_helper_cleanup_planes(dev, old_state);
- drm_atomic_state_free(old_state);
+ drm_atomic_state_put(old_state);
/* Complete the commit, wake up any waiter. */
spin_lock(&dc->commit.wait.lock);
@@ -521,6 +521,7 @@ static int atmel_hlcdc_dc_atomic_commit(struct drm_device *dev,
/* Swap the state, this is the point of no return. */
drm_atomic_helper_swap_state(state, true);
+ drm_atomic_state_get(state);
if (async)
queue_work(dc->wq, &commit->work);
else
@@ -748,9 +749,7 @@ static const struct file_operations fops = {
.open = drm_open,
.release = drm_release,
.unlocked_ioctl = drm_ioctl,
-#ifdef CONFIG_COMPAT
.compat_ioctl = drm_compat_ioctl,
-#endif
.poll = drm_poll,
.read = drm_read,
.llseek = no_llseek,
diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c
index 9d4c030672f0..246ed1e33d8a 100644
--- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c
+++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c
@@ -393,7 +393,7 @@ static void atmel_hlcdc_plane_update_format(struct atmel_hlcdc_plane *plane,
if ((state->base.fb->pixel_format == DRM_FORMAT_YUV422 ||
state->base.fb->pixel_format == DRM_FORMAT_NV61) &&
- (state->base.rotation & (DRM_ROTATE_90 | DRM_ROTATE_270)))
+ drm_rotation_90_or_270(state->base.rotation))
cfg |= ATMEL_HLCDC_YUV422ROT;
atmel_hlcdc_layer_update_cfg(&plane->layer,
@@ -628,7 +628,7 @@ static int atmel_hlcdc_plane_atomic_check(struct drm_plane *p,
/*
* Swap width and size in case of 90 or 270 degrees rotation
*/
- if (state->base.rotation & (DRM_ROTATE_90 | DRM_ROTATE_270)) {
+ if (drm_rotation_90_or_270(state->base.rotation)) {
tmp = state->crtc_w;
state->crtc_w = state->crtc_h;
state->crtc_h = tmp;
@@ -883,9 +883,9 @@ static int atmel_hlcdc_plane_atomic_get_property(struct drm_plane *p,
return 0;
}
-static void atmel_hlcdc_plane_init_properties(struct atmel_hlcdc_plane *plane,
- const struct atmel_hlcdc_layer_desc *desc,
- struct atmel_hlcdc_plane_properties *props)
+static int atmel_hlcdc_plane_init_properties(struct atmel_hlcdc_plane *plane,
+ const struct atmel_hlcdc_layer_desc *desc,
+ struct atmel_hlcdc_plane_properties *props)
{
struct regmap *regmap = plane->layer.hlcdc->regmap;
@@ -902,10 +902,18 @@ static void atmel_hlcdc_plane_init_properties(struct atmel_hlcdc_plane *plane,
ATMEL_HLCDC_LAYER_GA_MASK);
}
- if (desc->layout.xstride && desc->layout.pstride)
- drm_object_attach_property(&plane->base.base,
- plane->base.dev->mode_config.rotation_property,
- DRM_ROTATE_0);
+ if (desc->layout.xstride && desc->layout.pstride) {
+ int ret;
+
+ ret = drm_plane_create_rotation_property(&plane->base,
+ DRM_ROTATE_0,
+ DRM_ROTATE_0 |
+ DRM_ROTATE_90 |
+ DRM_ROTATE_180 |
+ DRM_ROTATE_270);
+ if (ret)
+ return ret;
+ }
if (desc->layout.csc) {
/*
@@ -925,6 +933,8 @@ static void atmel_hlcdc_plane_init_properties(struct atmel_hlcdc_plane *plane,
ATMEL_HLCDC_LAYER_CSC_CFG(&plane->layer, 2),
0x40040890);
}
+
+ return 0;
}
static struct drm_plane_helper_funcs atmel_hlcdc_layer_plane_helper_funcs = {
@@ -1036,7 +1046,9 @@ atmel_hlcdc_plane_create(struct drm_device *dev,
&atmel_hlcdc_layer_plane_helper_funcs);
/* Set default property values*/
- atmel_hlcdc_plane_init_properties(plane, desc, props);
+ ret = atmel_hlcdc_plane_init_properties(plane, desc, props);
+ if (ret)
+ return ERR_PTR(ret);
return plane;
}
@@ -1054,15 +1066,6 @@ atmel_hlcdc_plane_create_properties(struct drm_device *dev)
if (!props->alpha)
return ERR_PTR(-ENOMEM);
- dev->mode_config.rotation_property =
- drm_mode_create_rotation_property(dev,
- DRM_ROTATE_0 |
- DRM_ROTATE_90 |
- DRM_ROTATE_180 |
- DRM_ROTATE_270);
- if (!dev->mode_config.rotation_property)
- return ERR_PTR(-ENOMEM);
-
return props;
}
diff --git a/drivers/gpu/drm/bochs/bochs_drv.c b/drivers/gpu/drm/bochs/bochs_drv.c
index 534227df23f3..15a293e65b31 100644
--- a/drivers/gpu/drm/bochs/bochs_drv.c
+++ b/drivers/gpu/drm/bochs/bochs_drv.c
@@ -70,9 +70,7 @@ static const struct file_operations bochs_fops = {
.open = drm_open,
.release = drm_release,
.unlocked_ioctl = drm_ioctl,
-#ifdef CONFIG_COMPAT
.compat_ioctl = drm_compat_ioctl,
-#endif
.poll = drm_poll,
.read = drm_read,
.llseek = no_llseek,
diff --git a/drivers/gpu/drm/bochs/bochs_fbdev.c b/drivers/gpu/drm/bochs/bochs_fbdev.c
index e1ec498a6b6e..da790a1c302a 100644
--- a/drivers/gpu/drm/bochs/bochs_fbdev.c
+++ b/drivers/gpu/drm/bochs/bochs_fbdev.c
@@ -22,14 +22,10 @@ static int bochsfb_mmap(struct fb_info *info,
static struct fb_ops bochsfb_ops = {
.owner = THIS_MODULE,
- .fb_check_var = drm_fb_helper_check_var,
- .fb_set_par = drm_fb_helper_set_par,
+ DRM_FB_HELPER_DEFAULT_OPS,
.fb_fillrect = drm_fb_helper_sys_fillrect,
.fb_copyarea = drm_fb_helper_sys_copyarea,
.fb_imageblit = drm_fb_helper_sys_imageblit,
- .fb_pan_display = drm_fb_helper_pan_display,
- .fb_blank = drm_fb_helper_blank,
- .fb_setcmap = drm_fb_helper_setcmap,
.fb_mmap = bochsfb_mmap,
};
diff --git a/drivers/gpu/drm/bochs/bochs_mm.c b/drivers/gpu/drm/bochs/bochs_mm.c
index 269cfca9ca06..099a3c688c26 100644
--- a/drivers/gpu/drm/bochs/bochs_mm.c
+++ b/drivers/gpu/drm/bochs/bochs_mm.c
@@ -199,6 +199,7 @@ struct ttm_bo_driver bochs_bo_driver = {
.ttm_tt_populate = ttm_pool_populate,
.ttm_tt_unpopulate = ttm_pool_unpopulate,
.init_mem_type = bochs_bo_init_mem_type,
+ .eviction_valuable = ttm_bo_eviction_valuable,
.evict_flags = bochs_bo_evict_flags,
.move = NULL,
.verify_access = bochs_bo_verify_access,
diff --git a/drivers/gpu/drm/bridge/Kconfig b/drivers/gpu/drm/bridge/Kconfig
index 10e12e74fc9f..eb8688ec6f18 100644
--- a/drivers/gpu/drm/bridge/Kconfig
+++ b/drivers/gpu/drm/bridge/Kconfig
@@ -39,6 +39,15 @@ config DRM_DW_HDMI_AHB_AUDIO
Designware HDMI block. This is used in conjunction with
the i.MX6 HDMI driver.
+config DRM_DW_HDMI_I2S_AUDIO
+ tristate "Synopsis Designware I2S Audio interface"
+ depends on SND_SOC
+ depends on DRM_DW_HDMI
+ select SND_SOC_HDMI_CODEC
+ help
+ Support the I2S Audio interface which is part of the Synopsis
+ Designware HDMI block.
+
config DRM_NXP_PTN3460
tristate "NXP PTN3460 DP/LVDS bridge"
depends on OF
@@ -57,6 +66,13 @@ config DRM_PARADE_PS8622
---help---
Parade eDP-LVDS bridge chip driver.
+config DRM_SIL_SII8620
+ tristate "Silicon Image SII8620 HDMI/MHL bridge"
+ depends on OF
+ select DRM_KMS_HELPER
+ help
+ Silicon Image SII8620 HDMI/MHL bridge chip driver.
+
config DRM_SII902X
tristate "Silicon Image sii902x RGB/HDMI bridge"
depends on OF
@@ -74,6 +90,13 @@ config DRM_TOSHIBA_TC358767
---help---
Toshiba TC358767 eDP bridge chip driver.
+config DRM_TI_TFP410
+ tristate "TI TFP410 DVI/HDMI bridge"
+ depends on OF
+ select DRM_KMS_HELPER
+ ---help---
+ Texas Instruments TFP410 DVI/HDMI Transmitter driver
+
source "drivers/gpu/drm/bridge/analogix/Kconfig"
source "drivers/gpu/drm/bridge/adv7511/Kconfig"
diff --git a/drivers/gpu/drm/bridge/Makefile b/drivers/gpu/drm/bridge/Makefile
index cdf3a3cf765d..2e83a7855399 100644
--- a/drivers/gpu/drm/bridge/Makefile
+++ b/drivers/gpu/drm/bridge/Makefile
@@ -4,9 +4,12 @@ obj-$(CONFIG_DRM_ANALOGIX_ANX78XX) += analogix-anx78xx.o
obj-$(CONFIG_DRM_DUMB_VGA_DAC) += dumb-vga-dac.o
obj-$(CONFIG_DRM_DW_HDMI) += dw-hdmi.o
obj-$(CONFIG_DRM_DW_HDMI_AHB_AUDIO) += dw-hdmi-ahb-audio.o
+obj-$(CONFIG_DRM_DW_HDMI_I2S_AUDIO) += dw-hdmi-i2s-audio.o
obj-$(CONFIG_DRM_NXP_PTN3460) += nxp-ptn3460.o
obj-$(CONFIG_DRM_PARADE_PS8622) += parade-ps8622.o
+obj-$(CONFIG_DRM_SIL_SII8620) += sil-sii8620.o
obj-$(CONFIG_DRM_SII902X) += sii902x.o
obj-$(CONFIG_DRM_TOSHIBA_TC358767) += tc358767.o
obj-$(CONFIG_DRM_ANALOGIX_DP) += analogix/
obj-$(CONFIG_DRM_I2C_ADV7511) += adv7511/
+obj-$(CONFIG_DRM_TI_TFP410) += ti-tfp410.o
diff --git a/drivers/gpu/drm/bridge/adv7511/Kconfig b/drivers/gpu/drm/bridge/adv7511/Kconfig
index d2b0499ab7d7..2fed567f9943 100644
--- a/drivers/gpu/drm/bridge/adv7511/Kconfig
+++ b/drivers/gpu/drm/bridge/adv7511/Kconfig
@@ -6,6 +6,14 @@ config DRM_I2C_ADV7511
help
Support for the Analog Device ADV7511(W) and ADV7513 HDMI encoders.
+config DRM_I2C_ADV7511_AUDIO
+ bool "ADV7511 HDMI Audio driver"
+ depends on DRM_I2C_ADV7511 && SND_SOC
+ select SND_SOC_HDMI_CODEC
+ help
+ Support the ADV7511 HDMI Audio interface. This is used in
+ conjunction with the AV7511 HDMI driver.
+
config DRM_I2C_ADV7533
bool "ADV7533 encoder"
depends on DRM_I2C_ADV7511
diff --git a/drivers/gpu/drm/bridge/adv7511/Makefile b/drivers/gpu/drm/bridge/adv7511/Makefile
index 9019327fff4c..5ba675534f6e 100644
--- a/drivers/gpu/drm/bridge/adv7511/Makefile
+++ b/drivers/gpu/drm/bridge/adv7511/Makefile
@@ -1,3 +1,4 @@
adv7511-y := adv7511_drv.o
+adv7511-$(CONFIG_DRM_I2C_ADV7511_AUDIO) += adv7511_audio.o
adv7511-$(CONFIG_DRM_I2C_ADV7533) += adv7533.o
obj-$(CONFIG_DRM_I2C_ADV7511) += adv7511.o
diff --git a/drivers/gpu/drm/bridge/adv7511/adv7511.h b/drivers/gpu/drm/bridge/adv7511/adv7511.h
index 161c923d6162..992d76ce02bb 100644
--- a/drivers/gpu/drm/bridge/adv7511/adv7511.h
+++ b/drivers/gpu/drm/bridge/adv7511/adv7511.h
@@ -309,6 +309,8 @@ struct adv7511 {
struct drm_display_mode curr_mode;
unsigned int f_tmds;
+ unsigned int f_audio;
+ unsigned int audio_source;
unsigned int current_edid_segment;
uint8_t edid_buf[256];
@@ -334,6 +336,7 @@ struct adv7511 {
bool use_timing_gen;
enum adv7511_type type;
+ struct platform_device *audio_pdev;
};
#ifdef CONFIG_DRM_I2C_ADV7533
@@ -389,4 +392,17 @@ static inline int adv7533_parse_dt(struct device_node *np, struct adv7511 *adv)
}
#endif
+#ifdef CONFIG_DRM_I2C_ADV7511_AUDIO
+int adv7511_audio_init(struct device *dev, struct adv7511 *adv7511);
+void adv7511_audio_exit(struct adv7511 *adv7511);
+#else /*CONFIG_DRM_I2C_ADV7511_AUDIO */
+static inline int adv7511_audio_init(struct device *dev, struct adv7511 *adv7511)
+{
+ return 0;
+}
+static inline void adv7511_audio_exit(struct adv7511 *adv7511)
+{
+}
+#endif /* CONFIG_DRM_I2C_ADV7511_AUDIO */
+
#endif /* __DRM_I2C_ADV7511_H__ */
diff --git a/drivers/gpu/drm/bridge/adv7511/adv7511_audio.c b/drivers/gpu/drm/bridge/adv7511/adv7511_audio.c
new file mode 100644
index 000000000000..cf92ebfe6ab7
--- /dev/null
+++ b/drivers/gpu/drm/bridge/adv7511/adv7511_audio.c
@@ -0,0 +1,213 @@
+/*
+ * Analog Devices ADV7511 HDMI transmitter driver
+ *
+ * Copyright 2012 Analog Devices Inc.
+ * Copyright (c) 2016, Linaro Limited
+ *
+ * Licensed under the GPL-2.
+ */
+
+#include <sound/core.h>
+#include <sound/hdmi-codec.h>
+#include <sound/pcm.h>
+#include <sound/soc.h>
+
+#include "adv7511.h"
+
+static void adv7511_calc_cts_n(unsigned int f_tmds, unsigned int fs,
+ unsigned int *cts, unsigned int *n)
+{
+ switch (fs) {
+ case 32000:
+ *n = 4096;
+ break;
+ case 44100:
+ *n = 6272;
+ break;
+ case 48000:
+ *n = 6144;
+ break;
+ }
+
+ *cts = ((f_tmds * *n) / (128 * fs)) * 1000;
+}
+
+static int adv7511_update_cts_n(struct adv7511 *adv7511)
+{
+ unsigned int cts = 0;
+ unsigned int n = 0;
+
+ adv7511_calc_cts_n(adv7511->f_tmds, adv7511->f_audio, &cts, &n);
+
+ regmap_write(adv7511->regmap, ADV7511_REG_N0, (n >> 16) & 0xf);
+ regmap_write(adv7511->regmap, ADV7511_REG_N1, (n >> 8) & 0xff);
+ regmap_write(adv7511->regmap, ADV7511_REG_N2, n & 0xff);
+
+ regmap_write(adv7511->regmap, ADV7511_REG_CTS_MANUAL0,
+ (cts >> 16) & 0xf);
+ regmap_write(adv7511->regmap, ADV7511_REG_CTS_MANUAL1,
+ (cts >> 8) & 0xff);
+ regmap_write(adv7511->regmap, ADV7511_REG_CTS_MANUAL2,
+ cts & 0xff);
+
+ return 0;
+}
+
+int adv7511_hdmi_hw_params(struct device *dev, void *data,
+ struct hdmi_codec_daifmt *fmt,
+ struct hdmi_codec_params *hparms)
+{
+ struct adv7511 *adv7511 = dev_get_drvdata(dev);
+ unsigned int audio_source, i2s_format = 0;
+ unsigned int invert_clock;
+ unsigned int rate;
+ unsigned int len;
+
+ switch (hparms->sample_rate) {
+ case 32000:
+ rate = ADV7511_SAMPLE_FREQ_32000;
+ break;
+ case 44100:
+ rate = ADV7511_SAMPLE_FREQ_44100;
+ break;
+ case 48000:
+ rate = ADV7511_SAMPLE_FREQ_48000;
+ break;
+ case 88200:
+ rate = ADV7511_SAMPLE_FREQ_88200;
+ break;
+ case 96000:
+ rate = ADV7511_SAMPLE_FREQ_96000;
+ break;
+ case 176400:
+ rate = ADV7511_SAMPLE_FREQ_176400;
+ break;
+ case 192000:
+ rate = ADV7511_SAMPLE_FREQ_192000;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ switch (hparms->sample_width) {
+ case 16:
+ len = ADV7511_I2S_SAMPLE_LEN_16;
+ break;
+ case 18:
+ len = ADV7511_I2S_SAMPLE_LEN_18;
+ break;
+ case 20:
+ len = ADV7511_I2S_SAMPLE_LEN_20;
+ break;
+ case 24:
+ len = ADV7511_I2S_SAMPLE_LEN_24;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ switch (fmt->fmt) {
+ case HDMI_I2S:
+ audio_source = ADV7511_AUDIO_SOURCE_I2S;
+ i2s_format = ADV7511_I2S_FORMAT_I2S;
+ break;
+ case HDMI_RIGHT_J:
+ audio_source = ADV7511_AUDIO_SOURCE_I2S;
+ i2s_format = ADV7511_I2S_FORMAT_RIGHT_J;
+ break;
+ case HDMI_LEFT_J:
+ audio_source = ADV7511_AUDIO_SOURCE_I2S;
+ i2s_format = ADV7511_I2S_FORMAT_LEFT_J;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ invert_clock = fmt->bit_clk_inv;
+
+ regmap_update_bits(adv7511->regmap, ADV7511_REG_AUDIO_SOURCE, 0x70,
+ audio_source << 4);
+ regmap_update_bits(adv7511->regmap, ADV7511_REG_AUDIO_CONFIG, BIT(6),
+ invert_clock << 6);
+ regmap_update_bits(adv7511->regmap, ADV7511_REG_I2S_CONFIG, 0x03,
+ i2s_format);
+
+ adv7511->audio_source = audio_source;
+
+ adv7511->f_audio = hparms->sample_rate;
+
+ adv7511_update_cts_n(adv7511);
+
+ regmap_update_bits(adv7511->regmap, ADV7511_REG_AUDIO_CFG3,
+ ADV7511_AUDIO_CFG3_LEN_MASK, len);
+ regmap_update_bits(adv7511->regmap, ADV7511_REG_I2C_FREQ_ID_CFG,
+ ADV7511_I2C_FREQ_ID_CFG_RATE_MASK, rate << 4);
+ regmap_write(adv7511->regmap, 0x73, 0x1);
+
+ return 0;
+}
+
+static int audio_startup(struct device *dev, void *data)
+{
+ struct adv7511 *adv7511 = dev_get_drvdata(dev);
+
+ regmap_update_bits(adv7511->regmap, ADV7511_REG_AUDIO_CONFIG,
+ BIT(7), 0);
+
+ /* hide Audio infoframe updates */
+ regmap_update_bits(adv7511->regmap, ADV7511_REG_INFOFRAME_UPDATE,
+ BIT(5), BIT(5));
+ /* enable N/CTS, enable Audio sample packets */
+ regmap_update_bits(adv7511->regmap, ADV7511_REG_PACKET_ENABLE1,
+ BIT(5), BIT(5));
+ /* enable N/CTS */
+ regmap_update_bits(adv7511->regmap, ADV7511_REG_PACKET_ENABLE1,
+ BIT(6), BIT(6));
+ /* not copyrighted */
+ regmap_update_bits(adv7511->regmap, ADV7511_REG_AUDIO_CFG1,
+ BIT(5), BIT(5));
+ /* enable audio infoframes */
+ regmap_update_bits(adv7511->regmap, ADV7511_REG_PACKET_ENABLE1,
+ BIT(3), BIT(3));
+ /* AV mute disable */
+ regmap_update_bits(adv7511->regmap, ADV7511_REG_GC(0),
+ BIT(7) | BIT(6), BIT(7));
+ /* use Audio infoframe updated info */
+ regmap_update_bits(adv7511->regmap, ADV7511_REG_GC(1),
+ BIT(5), 0);
+ return 0;
+}
+
+static void audio_shutdown(struct device *dev, void *data)
+{
+}
+
+static const struct hdmi_codec_ops adv7511_codec_ops = {
+ .hw_params = adv7511_hdmi_hw_params,
+ .audio_shutdown = audio_shutdown,
+ .audio_startup = audio_startup,
+};
+
+static struct hdmi_codec_pdata codec_data = {
+ .ops = &adv7511_codec_ops,
+ .max_i2s_channels = 2,
+ .i2s = 1,
+};
+
+int adv7511_audio_init(struct device *dev, struct adv7511 *adv7511)
+{
+ adv7511->audio_pdev = platform_device_register_data(dev,
+ HDMI_CODEC_DRV_NAME,
+ PLATFORM_DEVID_AUTO,
+ &codec_data,
+ sizeof(codec_data));
+ return PTR_ERR_OR_ZERO(adv7511->audio_pdev);
+}
+
+void adv7511_audio_exit(struct adv7511 *adv7511)
+{
+ if (adv7511->audio_pdev) {
+ platform_device_unregister(adv7511->audio_pdev);
+ adv7511->audio_pdev = NULL;
+ }
+}
diff --git a/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c b/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c
index 8ed3906dd411..8dba729f6ef9 100644
--- a/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c
+++ b/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c
@@ -1037,6 +1037,8 @@ static int adv7511_probe(struct i2c_client *i2c, const struct i2c_device_id *id)
goto err_unregister_cec;
}
+ adv7511_audio_init(dev, adv7511);
+
return 0;
err_unregister_cec:
@@ -1058,6 +1060,8 @@ static int adv7511_remove(struct i2c_client *i2c)
drm_bridge_remove(&adv7511->bridge);
+ adv7511_audio_exit(adv7511);
+
i2c_unregister_device(adv7511->i2c_edid);
kfree(adv7511->edid);
diff --git a/drivers/gpu/drm/bridge/adv7511/adv7533.c b/drivers/gpu/drm/bridge/adv7511/adv7533.c
index d7f7b7ce8ebe..8b210373cfa2 100644
--- a/drivers/gpu/drm/bridge/adv7511/adv7533.c
+++ b/drivers/gpu/drm/bridge/adv7511/adv7533.c
@@ -29,6 +29,7 @@ static const struct reg_sequence adv7533_cec_fixed_registers[] = {
{ 0x17, 0xd0 },
{ 0x24, 0x20 },
{ 0x57, 0x11 },
+ { 0x05, 0xc8 },
};
static const struct regmap_config adv7533_cec_regmap_config = {
diff --git a/drivers/gpu/drm/bridge/analogix/analogix_dp_reg.c b/drivers/gpu/drm/bridge/analogix/analogix_dp_reg.c
index cd37ac058675..303083ad28e3 100644
--- a/drivers/gpu/drm/bridge/analogix/analogix_dp_reg.c
+++ b/drivers/gpu/drm/bridge/analogix/analogix_dp_reg.c
@@ -1162,5 +1162,5 @@ ssize_t analogix_dp_transfer(struct analogix_dp_device *dp,
(msg->request & ~DP_AUX_I2C_MOT) == DP_AUX_NATIVE_READ)
msg->reply = DP_AUX_NATIVE_REPLY_ACK;
- return num_transferred;
+ return num_transferred > 0 ? num_transferred : -EBUSY;
}
diff --git a/drivers/gpu/drm/bridge/dumb-vga-dac.c b/drivers/gpu/drm/bridge/dumb-vga-dac.c
index afec232185a7..e5706981c934 100644
--- a/drivers/gpu/drm/bridge/dumb-vga-dac.c
+++ b/drivers/gpu/drm/bridge/dumb-vga-dac.c
@@ -12,6 +12,7 @@
#include <linux/module.h>
#include <linux/of_graph.h>
+#include <linux/regulator/consumer.h>
#include <drm/drmP.h>
#include <drm/drm_atomic_helper.h>
@@ -23,6 +24,7 @@ struct dumb_vga {
struct drm_connector connector;
struct i2c_adapter *ddc;
+ struct regulator *vdd;
};
static inline struct dumb_vga *
@@ -124,8 +126,30 @@ static int dumb_vga_attach(struct drm_bridge *bridge)
return 0;
}
+static void dumb_vga_enable(struct drm_bridge *bridge)
+{
+ struct dumb_vga *vga = drm_bridge_to_dumb_vga(bridge);
+ int ret = 0;
+
+ if (vga->vdd)
+ ret = regulator_enable(vga->vdd);
+
+ if (ret)
+ DRM_ERROR("Failed to enable vdd regulator: %d\n", ret);
+}
+
+static void dumb_vga_disable(struct drm_bridge *bridge)
+{
+ struct dumb_vga *vga = drm_bridge_to_dumb_vga(bridge);
+
+ if (vga->vdd)
+ regulator_disable(vga->vdd);
+}
+
static const struct drm_bridge_funcs dumb_vga_bridge_funcs = {
.attach = dumb_vga_attach,
+ .enable = dumb_vga_enable,
+ .disable = dumb_vga_disable,
};
static struct i2c_adapter *dumb_vga_retrieve_ddc(struct device *dev)
@@ -169,6 +193,15 @@ static int dumb_vga_probe(struct platform_device *pdev)
return -ENOMEM;
platform_set_drvdata(pdev, vga);
+ vga->vdd = devm_regulator_get_optional(&pdev->dev, "vdd");
+ if (IS_ERR(vga->vdd)) {
+ ret = PTR_ERR(vga->vdd);
+ if (ret == -EPROBE_DEFER)
+ return -EPROBE_DEFER;
+ vga->vdd = NULL;
+ dev_dbg(&pdev->dev, "No vdd regulator found: %d\n", ret);
+ }
+
vga->ddc = dumb_vga_retrieve_ddc(&pdev->dev);
if (IS_ERR(vga->ddc)) {
if (PTR_ERR(vga->ddc) == -ENODEV) {
diff --git a/drivers/gpu/drm/bridge/dw-hdmi-audio.h b/drivers/gpu/drm/bridge/dw-hdmi-audio.h
index 91f631beecc7..fd1f745c6073 100644
--- a/drivers/gpu/drm/bridge/dw-hdmi-audio.h
+++ b/drivers/gpu/drm/bridge/dw-hdmi-audio.h
@@ -11,4 +11,11 @@ struct dw_hdmi_audio_data {
u8 *eld;
};
+struct dw_hdmi_i2s_audio_data {
+ struct dw_hdmi *hdmi;
+
+ void (*write)(struct dw_hdmi *hdmi, u8 val, int offset);
+ u8 (*read)(struct dw_hdmi *hdmi, int offset);
+};
+
#endif
diff --git a/drivers/gpu/drm/bridge/dw-hdmi-i2s-audio.c b/drivers/gpu/drm/bridge/dw-hdmi-i2s-audio.c
new file mode 100644
index 000000000000..aaf287d2e91d
--- /dev/null
+++ b/drivers/gpu/drm/bridge/dw-hdmi-i2s-audio.c
@@ -0,0 +1,141 @@
+/*
+ * dw-hdmi-i2s-audio.c
+ *
+ * Copyright (c) 2016 Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <drm/bridge/dw_hdmi.h>
+
+#include <sound/hdmi-codec.h>
+
+#include "dw-hdmi.h"
+#include "dw-hdmi-audio.h"
+
+#define DRIVER_NAME "dw-hdmi-i2s-audio"
+
+static inline void hdmi_write(struct dw_hdmi_i2s_audio_data *audio,
+ u8 val, int offset)
+{
+ struct dw_hdmi *hdmi = audio->hdmi;
+
+ audio->write(hdmi, val, offset);
+}
+
+static inline u8 hdmi_read(struct dw_hdmi_i2s_audio_data *audio, int offset)
+{
+ struct dw_hdmi *hdmi = audio->hdmi;
+
+ return audio->read(hdmi, offset);
+}
+
+static int dw_hdmi_i2s_hw_params(struct device *dev, void *data,
+ struct hdmi_codec_daifmt *fmt,
+ struct hdmi_codec_params *hparms)
+{
+ struct dw_hdmi_i2s_audio_data *audio = data;
+ struct dw_hdmi *hdmi = audio->hdmi;
+ u8 conf0 = 0;
+ u8 conf1 = 0;
+ u8 inputclkfs = 0;
+
+ /* it cares I2S only */
+ if ((fmt->fmt != HDMI_I2S) ||
+ (fmt->bit_clk_master | fmt->frame_clk_master)) {
+ dev_err(dev, "unsupported format/settings\n");
+ return -EINVAL;
+ }
+
+ inputclkfs = HDMI_AUD_INPUTCLKFS_64FS;
+ conf0 = HDMI_AUD_CONF0_I2S_ALL_ENABLE;
+
+ switch (hparms->sample_width) {
+ case 16:
+ conf1 = HDMI_AUD_CONF1_WIDTH_16;
+ break;
+ case 24:
+ case 32:
+ conf1 = HDMI_AUD_CONF1_WIDTH_24;
+ break;
+ }
+
+ dw_hdmi_set_sample_rate(hdmi, hparms->sample_rate);
+
+ hdmi_write(audio, inputclkfs, HDMI_AUD_INPUTCLKFS);
+ hdmi_write(audio, conf0, HDMI_AUD_CONF0);
+ hdmi_write(audio, conf1, HDMI_AUD_CONF1);
+
+ dw_hdmi_audio_enable(hdmi);
+
+ return 0;
+}
+
+static void dw_hdmi_i2s_audio_shutdown(struct device *dev, void *data)
+{
+ struct dw_hdmi_i2s_audio_data *audio = data;
+ struct dw_hdmi *hdmi = audio->hdmi;
+
+ dw_hdmi_audio_disable(hdmi);
+
+ hdmi_write(audio, HDMI_AUD_CONF0_SW_RESET, HDMI_AUD_CONF0);
+}
+
+static struct hdmi_codec_ops dw_hdmi_i2s_ops = {
+ .hw_params = dw_hdmi_i2s_hw_params,
+ .audio_shutdown = dw_hdmi_i2s_audio_shutdown,
+};
+
+static int snd_dw_hdmi_probe(struct platform_device *pdev)
+{
+ struct dw_hdmi_i2s_audio_data *audio = pdev->dev.platform_data;
+ struct platform_device_info pdevinfo;
+ struct hdmi_codec_pdata pdata;
+ struct platform_device *platform;
+
+ pdata.ops = &dw_hdmi_i2s_ops;
+ pdata.i2s = 1;
+ pdata.max_i2s_channels = 6;
+ pdata.data = audio;
+
+ memset(&pdevinfo, 0, sizeof(pdevinfo));
+ pdevinfo.parent = pdev->dev.parent;
+ pdevinfo.id = PLATFORM_DEVID_AUTO;
+ pdevinfo.name = HDMI_CODEC_DRV_NAME;
+ pdevinfo.data = &pdata;
+ pdevinfo.size_data = sizeof(pdata);
+ pdevinfo.dma_mask = DMA_BIT_MASK(32);
+
+ platform = platform_device_register_full(&pdevinfo);
+ if (IS_ERR(platform))
+ return PTR_ERR(platform);
+
+ dev_set_drvdata(&pdev->dev, platform);
+
+ return 0;
+}
+
+static int snd_dw_hdmi_remove(struct platform_device *pdev)
+{
+ struct platform_device *platform = dev_get_drvdata(&pdev->dev);
+
+ platform_device_unregister(platform);
+
+ return 0;
+}
+
+static struct platform_driver snd_dw_hdmi_driver = {
+ .probe = snd_dw_hdmi_probe,
+ .remove = snd_dw_hdmi_remove,
+ .driver = {
+ .name = DRIVER_NAME,
+ .owner = THIS_MODULE,
+ },
+};
+module_platform_driver(snd_dw_hdmi_driver);
+
+MODULE_AUTHOR("Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>");
+MODULE_DESCRIPTION("Synopsis Designware HDMI I2S ALSA SoC interface");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:" DRIVER_NAME);
diff --git a/drivers/gpu/drm/bridge/dw-hdmi.c b/drivers/gpu/drm/bridge/dw-hdmi.c
index ab7023e5dfde..235ce7d1583d 100644
--- a/drivers/gpu/drm/bridge/dw-hdmi.c
+++ b/drivers/gpu/drm/bridge/dw-hdmi.c
@@ -1,14 +1,15 @@
/*
+ * DesignWare High-Definition Multimedia Interface (HDMI) driver
+ *
+ * Copyright (C) 2013-2015 Mentor Graphics Inc.
* Copyright (C) 2011-2013 Freescale Semiconductor, Inc.
+ * Copyright (C) 2010, Guennadi Liakhovetski <g.liakhovetski@gmx.de>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
- * Designware High-Definition Multimedia Interface (HDMI) driver
- *
- * Copyright (C) 2010, Guennadi Liakhovetski <g.liakhovetski@gmx.de>
*/
#include <linux/module.h>
#include <linux/irq.h>
@@ -101,6 +102,17 @@ struct hdmi_data_info {
struct hdmi_vmode video_mode;
};
+struct dw_hdmi_i2c {
+ struct i2c_adapter adap;
+
+ struct mutex lock; /* used to serialize data transfers */
+ struct completion cmp;
+ u8 stat;
+
+ u8 slave_reg;
+ bool is_regaddr;
+};
+
struct dw_hdmi {
struct drm_connector connector;
struct drm_encoder *encoder;
@@ -111,6 +123,7 @@ struct dw_hdmi {
struct device *dev;
struct clk *isfr_clk;
struct clk *iahb_clk;
+ struct dw_hdmi_i2c *i2c;
struct hdmi_data_info hdmi_data;
const struct dw_hdmi_plat_data *plat_data;
@@ -198,6 +211,201 @@ static void hdmi_mask_writeb(struct dw_hdmi *hdmi, u8 data, unsigned int reg,
hdmi_modb(hdmi, data << shift, mask, reg);
}
+static void dw_hdmi_i2c_init(struct dw_hdmi *hdmi)
+{
+ /* Software reset */
+ hdmi_writeb(hdmi, 0x00, HDMI_I2CM_SOFTRSTZ);
+
+ /* Set Standard Mode speed (determined to be 100KHz on iMX6) */
+ hdmi_writeb(hdmi, 0x00, HDMI_I2CM_DIV);
+
+ /* Set done, not acknowledged and arbitration interrupt polarities */
+ hdmi_writeb(hdmi, HDMI_I2CM_INT_DONE_POL, HDMI_I2CM_INT);
+ hdmi_writeb(hdmi, HDMI_I2CM_CTLINT_NAC_POL | HDMI_I2CM_CTLINT_ARB_POL,
+ HDMI_I2CM_CTLINT);
+
+ /* Clear DONE and ERROR interrupts */
+ hdmi_writeb(hdmi, HDMI_IH_I2CM_STAT0_ERROR | HDMI_IH_I2CM_STAT0_DONE,
+ HDMI_IH_I2CM_STAT0);
+
+ /* Mute DONE and ERROR interrupts */
+ hdmi_writeb(hdmi, HDMI_IH_I2CM_STAT0_ERROR | HDMI_IH_I2CM_STAT0_DONE,
+ HDMI_IH_MUTE_I2CM_STAT0);
+}
+
+static int dw_hdmi_i2c_read(struct dw_hdmi *hdmi,
+ unsigned char *buf, unsigned int length)
+{
+ struct dw_hdmi_i2c *i2c = hdmi->i2c;
+ int stat;
+
+ if (!i2c->is_regaddr) {
+ dev_dbg(hdmi->dev, "set read register address to 0\n");
+ i2c->slave_reg = 0x00;
+ i2c->is_regaddr = true;
+ }
+
+ while (length--) {
+ reinit_completion(&i2c->cmp);
+
+ hdmi_writeb(hdmi, i2c->slave_reg++, HDMI_I2CM_ADDRESS);
+ hdmi_writeb(hdmi, HDMI_I2CM_OPERATION_READ,
+ HDMI_I2CM_OPERATION);
+
+ stat = wait_for_completion_timeout(&i2c->cmp, HZ / 10);
+ if (!stat)
+ return -EAGAIN;
+
+ /* Check for error condition on the bus */
+ if (i2c->stat & HDMI_IH_I2CM_STAT0_ERROR)
+ return -EIO;
+
+ *buf++ = hdmi_readb(hdmi, HDMI_I2CM_DATAI);
+ }
+
+ return 0;
+}
+
+static int dw_hdmi_i2c_write(struct dw_hdmi *hdmi,
+ unsigned char *buf, unsigned int length)
+{
+ struct dw_hdmi_i2c *i2c = hdmi->i2c;
+ int stat;
+
+ if (!i2c->is_regaddr) {
+ /* Use the first write byte as register address */
+ i2c->slave_reg = buf[0];
+ length--;
+ buf++;
+ i2c->is_regaddr = true;
+ }
+
+ while (length--) {
+ reinit_completion(&i2c->cmp);
+
+ hdmi_writeb(hdmi, *buf++, HDMI_I2CM_DATAO);
+ hdmi_writeb(hdmi, i2c->slave_reg++, HDMI_I2CM_ADDRESS);
+ hdmi_writeb(hdmi, HDMI_I2CM_OPERATION_WRITE,
+ HDMI_I2CM_OPERATION);
+
+ stat = wait_for_completion_timeout(&i2c->cmp, HZ / 10);
+ if (!stat)
+ return -EAGAIN;
+
+ /* Check for error condition on the bus */
+ if (i2c->stat & HDMI_IH_I2CM_STAT0_ERROR)
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static int dw_hdmi_i2c_xfer(struct i2c_adapter *adap,
+ struct i2c_msg *msgs, int num)
+{
+ struct dw_hdmi *hdmi = i2c_get_adapdata(adap);
+ struct dw_hdmi_i2c *i2c = hdmi->i2c;
+ u8 addr = msgs[0].addr;
+ int i, ret = 0;
+
+ dev_dbg(hdmi->dev, "xfer: num: %d, addr: %#x\n", num, addr);
+
+ for (i = 0; i < num; i++) {
+ if (msgs[i].addr != addr) {
+ dev_warn(hdmi->dev,
+ "unsupported transfer, changed slave address\n");
+ return -EOPNOTSUPP;
+ }
+
+ if (msgs[i].len == 0) {
+ dev_dbg(hdmi->dev,
+ "unsupported transfer %d/%d, no data\n",
+ i + 1, num);
+ return -EOPNOTSUPP;
+ }
+ }
+
+ mutex_lock(&i2c->lock);
+
+ /* Unmute DONE and ERROR interrupts */
+ hdmi_writeb(hdmi, 0x00, HDMI_IH_MUTE_I2CM_STAT0);
+
+ /* Set slave device address taken from the first I2C message */
+ hdmi_writeb(hdmi, addr, HDMI_I2CM_SLAVE);
+
+ /* Set slave device register address on transfer */
+ i2c->is_regaddr = false;
+
+ for (i = 0; i < num; i++) {
+ dev_dbg(hdmi->dev, "xfer: num: %d/%d, len: %d, flags: %#x\n",
+ i + 1, num, msgs[i].len, msgs[i].flags);
+
+ if (msgs[i].flags & I2C_M_RD)
+ ret = dw_hdmi_i2c_read(hdmi, msgs[i].buf, msgs[i].len);
+ else
+ ret = dw_hdmi_i2c_write(hdmi, msgs[i].buf, msgs[i].len);
+
+ if (ret < 0)
+ break;
+ }
+
+ if (!ret)
+ ret = num;
+
+ /* Mute DONE and ERROR interrupts */
+ hdmi_writeb(hdmi, HDMI_IH_I2CM_STAT0_ERROR | HDMI_IH_I2CM_STAT0_DONE,
+ HDMI_IH_MUTE_I2CM_STAT0);
+
+ mutex_unlock(&i2c->lock);
+
+ return ret;
+}
+
+static u32 dw_hdmi_i2c_func(struct i2c_adapter *adapter)
+{
+ return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
+}
+
+static const struct i2c_algorithm dw_hdmi_algorithm = {
+ .master_xfer = dw_hdmi_i2c_xfer,
+ .functionality = dw_hdmi_i2c_func,
+};
+
+static struct i2c_adapter *dw_hdmi_i2c_adapter(struct dw_hdmi *hdmi)
+{
+ struct i2c_adapter *adap;
+ struct dw_hdmi_i2c *i2c;
+ int ret;
+
+ i2c = devm_kzalloc(hdmi->dev, sizeof(*i2c), GFP_KERNEL);
+ if (!i2c)
+ return ERR_PTR(-ENOMEM);
+
+ mutex_init(&i2c->lock);
+ init_completion(&i2c->cmp);
+
+ adap = &i2c->adap;
+ adap->class = I2C_CLASS_DDC;
+ adap->owner = THIS_MODULE;
+ adap->dev.parent = hdmi->dev;
+ adap->algo = &dw_hdmi_algorithm;
+ strlcpy(adap->name, "DesignWare HDMI", sizeof(adap->name));
+ i2c_set_adapdata(adap, hdmi);
+
+ ret = i2c_add_adapter(adap);
+ if (ret) {
+ dev_warn(hdmi->dev, "cannot add %s I2C adapter\n", adap->name);
+ devm_kfree(hdmi->dev, i2c);
+ return ERR_PTR(ret);
+ }
+
+ hdmi->i2c = i2c;
+
+ dev_info(hdmi->dev, "registered %s I2C bus driver\n", adap->name);
+
+ return adap;
+}
+
static void hdmi_set_cts_n(struct dw_hdmi *hdmi, unsigned int cts,
unsigned int n)
{
@@ -1512,16 +1720,40 @@ static const struct drm_bridge_funcs dw_hdmi_bridge_funcs = {
.mode_set = dw_hdmi_bridge_mode_set,
};
+static irqreturn_t dw_hdmi_i2c_irq(struct dw_hdmi *hdmi)
+{
+ struct dw_hdmi_i2c *i2c = hdmi->i2c;
+ unsigned int stat;
+
+ stat = hdmi_readb(hdmi, HDMI_IH_I2CM_STAT0);
+ if (!stat)
+ return IRQ_NONE;
+
+ hdmi_writeb(hdmi, stat, HDMI_IH_I2CM_STAT0);
+
+ i2c->stat = stat;
+
+ complete(&i2c->cmp);
+
+ return IRQ_HANDLED;
+}
+
static irqreturn_t dw_hdmi_hardirq(int irq, void *dev_id)
{
struct dw_hdmi *hdmi = dev_id;
u8 intr_stat;
+ irqreturn_t ret = IRQ_NONE;
+
+ if (hdmi->i2c)
+ ret = dw_hdmi_i2c_irq(hdmi);
intr_stat = hdmi_readb(hdmi, HDMI_IH_PHY_STAT0);
- if (intr_stat)
+ if (intr_stat) {
hdmi_writeb(hdmi, ~0, HDMI_IH_MUTE_PHY_STAT0);
+ return IRQ_WAKE_THREAD;
+ }
- return intr_stat ? IRQ_WAKE_THREAD : IRQ_NONE;
+ return ret;
}
static irqreturn_t dw_hdmi_irq(int irq, void *dev_id)
@@ -1639,10 +1871,11 @@ int dw_hdmi_bind(struct device *dev, struct device *master,
struct device_node *np = dev->of_node;
struct platform_device_info pdevinfo;
struct device_node *ddc_node;
- struct dw_hdmi_audio_data audio;
struct dw_hdmi *hdmi;
int ret;
u32 val = 1;
+ u8 config0;
+ u8 config1;
hdmi = devm_kzalloc(dev, sizeof(*hdmi), GFP_KERNEL);
if (!hdmi)
@@ -1681,7 +1914,7 @@ int dw_hdmi_bind(struct device *dev, struct device *master,
ddc_node = of_parse_phandle(np, "ddc-i2c-bus", 0);
if (ddc_node) {
- hdmi->ddc = of_find_i2c_adapter_by_node(ddc_node);
+ hdmi->ddc = of_get_i2c_adapter_by_node(ddc_node);
of_node_put(ddc_node);
if (!hdmi->ddc) {
dev_dbg(hdmi->dev, "failed to read ddc node\n");
@@ -1693,20 +1926,22 @@ int dw_hdmi_bind(struct device *dev, struct device *master,
}
hdmi->regs = devm_ioremap_resource(dev, iores);
- if (IS_ERR(hdmi->regs))
- return PTR_ERR(hdmi->regs);
+ if (IS_ERR(hdmi->regs)) {
+ ret = PTR_ERR(hdmi->regs);
+ goto err_res;
+ }
hdmi->isfr_clk = devm_clk_get(hdmi->dev, "isfr");
if (IS_ERR(hdmi->isfr_clk)) {
ret = PTR_ERR(hdmi->isfr_clk);
dev_err(hdmi->dev, "Unable to get HDMI isfr clk: %d\n", ret);
- return ret;
+ goto err_res;
}
ret = clk_prepare_enable(hdmi->isfr_clk);
if (ret) {
dev_err(hdmi->dev, "Cannot enable HDMI isfr clock: %d\n", ret);
- return ret;
+ goto err_res;
}
hdmi->iahb_clk = devm_clk_get(hdmi->dev, "iahb");
@@ -1744,6 +1979,13 @@ int dw_hdmi_bind(struct device *dev, struct device *master,
*/
hdmi_init_clk_regenerator(hdmi);
+ /* If DDC bus is not specified, try to register HDMI I2C bus */
+ if (!hdmi->ddc) {
+ hdmi->ddc = dw_hdmi_i2c_adapter(hdmi);
+ if (IS_ERR(hdmi->ddc))
+ hdmi->ddc = NULL;
+ }
+
/*
* Configure registers related to HDMI interrupt
* generation before registering IRQ.
@@ -1770,7 +2012,12 @@ int dw_hdmi_bind(struct device *dev, struct device *master,
pdevinfo.parent = dev;
pdevinfo.id = PLATFORM_DEVID_AUTO;
- if (hdmi_readb(hdmi, HDMI_CONFIG1_ID) & HDMI_CONFIG1_AHB) {
+ config0 = hdmi_readb(hdmi, HDMI_CONFIG0_ID);
+ config1 = hdmi_readb(hdmi, HDMI_CONFIG1_ID);
+
+ if (config1 & HDMI_CONFIG1_AHB) {
+ struct dw_hdmi_audio_data audio;
+
audio.phys = iores->start;
audio.base = hdmi->regs;
audio.irq = irq;
@@ -1782,16 +2029,39 @@ int dw_hdmi_bind(struct device *dev, struct device *master,
pdevinfo.size_data = sizeof(audio);
pdevinfo.dma_mask = DMA_BIT_MASK(32);
hdmi->audio = platform_device_register_full(&pdevinfo);
+ } else if (config0 & HDMI_CONFIG0_I2S) {
+ struct dw_hdmi_i2s_audio_data audio;
+
+ audio.hdmi = hdmi;
+ audio.write = hdmi_writeb;
+ audio.read = hdmi_readb;
+
+ pdevinfo.name = "dw-hdmi-i2s-audio";
+ pdevinfo.data = &audio;
+ pdevinfo.size_data = sizeof(audio);
+ pdevinfo.dma_mask = DMA_BIT_MASK(32);
+ hdmi->audio = platform_device_register_full(&pdevinfo);
}
+ /* Reset HDMI DDC I2C master controller and mute I2CM interrupts */
+ if (hdmi->i2c)
+ dw_hdmi_i2c_init(hdmi);
+
dev_set_drvdata(dev, hdmi);
return 0;
err_iahb:
+ if (hdmi->i2c) {
+ i2c_del_adapter(&hdmi->i2c->adap);
+ hdmi->ddc = NULL;
+ }
+
clk_disable_unprepare(hdmi->iahb_clk);
err_isfr:
clk_disable_unprepare(hdmi->isfr_clk);
+err_res:
+ i2c_put_adapter(hdmi->ddc);
return ret;
}
@@ -1809,13 +2079,18 @@ void dw_hdmi_unbind(struct device *dev, struct device *master, void *data)
clk_disable_unprepare(hdmi->iahb_clk);
clk_disable_unprepare(hdmi->isfr_clk);
- i2c_put_adapter(hdmi->ddc);
+
+ if (hdmi->i2c)
+ i2c_del_adapter(&hdmi->i2c->adap);
+ else
+ i2c_put_adapter(hdmi->ddc);
}
EXPORT_SYMBOL_GPL(dw_hdmi_unbind);
MODULE_AUTHOR("Sascha Hauer <s.hauer@pengutronix.de>");
MODULE_AUTHOR("Andy Yan <andy.yan@rock-chips.com>");
MODULE_AUTHOR("Yakir Yang <ykk@rock-chips.com>");
+MODULE_AUTHOR("Vladimir Zapolskiy <vladimir_zapolskiy@mentor.com>");
MODULE_DESCRIPTION("DW HDMI transmitter driver");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:dw-hdmi");
diff --git a/drivers/gpu/drm/bridge/dw-hdmi.h b/drivers/gpu/drm/bridge/dw-hdmi.h
index fc9a560429d6..55135bbd0c16 100644
--- a/drivers/gpu/drm/bridge/dw-hdmi.h
+++ b/drivers/gpu/drm/bridge/dw-hdmi.h
@@ -545,6 +545,9 @@
#define HDMI_I2CM_FS_SCL_LCNT_0_ADDR 0x7E12
enum {
+/* CONFIG0_ID field values */
+ HDMI_CONFIG0_I2S = 0x10,
+
/* CONFIG1_ID field values */
HDMI_CONFIG1_AHB = 0x01,
@@ -566,6 +569,10 @@ enum {
HDMI_IH_PHY_STAT0_TX_PHY_LOCK = 0x2,
HDMI_IH_PHY_STAT0_HPD = 0x1,
+/* IH_I2CM_STAT0 and IH_MUTE_I2CM_STAT0 field values */
+ HDMI_IH_I2CM_STAT0_DONE = 0x2,
+ HDMI_IH_I2CM_STAT0_ERROR = 0x1,
+
/* IH_MUTE_I2CMPHY_STAT0 field values */
HDMI_IH_MUTE_I2CMPHY_STAT0_I2CMPHYDONE = 0x2,
HDMI_IH_MUTE_I2CMPHY_STAT0_I2CMPHYERROR = 0x1,
@@ -887,6 +894,17 @@ enum {
HDMI_PHY_I2CM_CTLINT_ADDR_ARBITRATION_POL = 0x08,
HDMI_PHY_I2CM_CTLINT_ADDR_ARBITRATION_MASK = 0x04,
+/* AUD_CONF0 field values */
+ HDMI_AUD_CONF0_SW_RESET = 0x80,
+ HDMI_AUD_CONF0_I2S_ALL_ENABLE = 0x2F,
+
+/* AUD_CONF1 field values */
+ HDMI_AUD_CONF1_MODE_I2S = 0x00,
+ HDMI_AUD_CONF1_MODE_RIGHT_J = 0x02,
+ HDMI_AUD_CONF1_MODE_LEFT_J = 0x04,
+ HDMI_AUD_CONF1_WIDTH_16 = 0x10,
+ HDMI_AUD_CONF1_WIDTH_24 = 0x18,
+
/* AUD_CTS3 field values */
HDMI_AUD_CTS3_N_SHIFT_OFFSET = 5,
HDMI_AUD_CTS3_N_SHIFT_MASK = 0xe0,
@@ -901,6 +919,12 @@ enum {
HDMI_AUD_CTS3_CTS_MANUAL = 0x10,
HDMI_AUD_CTS3_AUDCTS19_16_MASK = 0x0f,
+/* HDMI_AUD_INPUTCLKFS field values */
+ HDMI_AUD_INPUTCLKFS_128FS = 0,
+ HDMI_AUD_INPUTCLKFS_256FS = 1,
+ HDMI_AUD_INPUTCLKFS_512FS = 2,
+ HDMI_AUD_INPUTCLKFS_64FS = 4,
+
/* AHB_DMA_CONF0 field values */
HDMI_AHB_DMA_CONF0_SW_FIFO_RST_OFFSET = 7,
HDMI_AHB_DMA_CONF0_SW_FIFO_RST_MASK = 0x80,
@@ -1032,6 +1056,21 @@ enum {
HDMI_A_VIDPOLCFG_HSYNCPOL_MASK = 0x2,
HDMI_A_VIDPOLCFG_HSYNCPOL_ACTIVE_HIGH = 0x2,
HDMI_A_VIDPOLCFG_HSYNCPOL_ACTIVE_LOW = 0x0,
+
+/* I2CM_OPERATION field values */
+ HDMI_I2CM_OPERATION_WRITE = 0x10,
+ HDMI_I2CM_OPERATION_READ_EXT = 0x2,
+ HDMI_I2CM_OPERATION_READ = 0x1,
+
+/* I2CM_INT field values */
+ HDMI_I2CM_INT_DONE_POL = 0x8,
+ HDMI_I2CM_INT_DONE_MASK = 0x4,
+
+/* I2CM_CTLINT field values */
+ HDMI_I2CM_CTLINT_NAC_POL = 0x80,
+ HDMI_I2CM_CTLINT_NAC_MASK = 0x40,
+ HDMI_I2CM_CTLINT_ARB_POL = 0x8,
+ HDMI_I2CM_CTLINT_ARB_MASK = 0x4,
};
#endif /* __DW_HDMI_H__ */
diff --git a/drivers/gpu/drm/bridge/sil-sii8620.c b/drivers/gpu/drm/bridge/sil-sii8620.c
new file mode 100644
index 000000000000..b2c267df7ee7
--- /dev/null
+++ b/drivers/gpu/drm/bridge/sil-sii8620.c
@@ -0,0 +1,1564 @@
+/*
+ * Silicon Image SiI8620 HDMI/MHL bridge driver
+ *
+ * Copyright (C) 2015, Samsung Electronics Co., Ltd.
+ * Andrzej Hajda <a.hajda@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <drm/bridge/mhl.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_edid.h>
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
+#include <linux/i2c.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/regulator/consumer.h>
+#include <linux/slab.h>
+
+#include "sil-sii8620.h"
+
+#define VAL_RX_HDMI_CTRL2_DEFVAL VAL_RX_HDMI_CTRL2_IDLE_CNT(3)
+
+enum sii8620_mode {
+ CM_DISCONNECTED,
+ CM_DISCOVERY,
+ CM_MHL1,
+ CM_MHL3,
+ CM_ECBUS_S
+};
+
+enum sii8620_sink_type {
+ SINK_NONE,
+ SINK_HDMI,
+ SINK_DVI
+};
+
+enum sii8620_mt_state {
+ MT_STATE_READY,
+ MT_STATE_BUSY,
+ MT_STATE_DONE
+};
+
+struct sii8620 {
+ struct drm_bridge bridge;
+ struct device *dev;
+ struct clk *clk_xtal;
+ struct gpio_desc *gpio_reset;
+ struct gpio_desc *gpio_int;
+ struct regulator_bulk_data supplies[2];
+ struct mutex lock; /* context lock, protects fields below */
+ int error;
+ enum sii8620_mode mode;
+ enum sii8620_sink_type sink_type;
+ u8 cbus_status;
+ u8 stat[MHL_DST_SIZE];
+ u8 xstat[MHL_XDS_SIZE];
+ u8 devcap[MHL_DCAP_SIZE];
+ u8 xdevcap[MHL_XDC_SIZE];
+ u8 avif[19];
+ struct edid *edid;
+ unsigned int gen2_write_burst:1;
+ enum sii8620_mt_state mt_state;
+ struct list_head mt_queue;
+};
+
+struct sii8620_mt_msg;
+
+typedef void (*sii8620_mt_msg_cb)(struct sii8620 *ctx,
+ struct sii8620_mt_msg *msg);
+
+struct sii8620_mt_msg {
+ struct list_head node;
+ u8 reg[4];
+ u8 ret;
+ sii8620_mt_msg_cb send;
+ sii8620_mt_msg_cb recv;
+};
+
+static const u8 sii8620_i2c_page[] = {
+ 0x39, /* Main System */
+ 0x3d, /* TDM and HSIC */
+ 0x49, /* TMDS Receiver, MHL EDID */
+ 0x4d, /* eMSC, HDCP, HSIC */
+ 0x5d, /* MHL Spec */
+ 0x64, /* MHL CBUS */
+ 0x59, /* Hardware TPI (Transmitter Programming Interface) */
+ 0x61, /* eCBUS-S, eCBUS-D */
+};
+
+static void sii8620_fetch_edid(struct sii8620 *ctx);
+static void sii8620_set_upstream_edid(struct sii8620 *ctx);
+static void sii8620_enable_hpd(struct sii8620 *ctx);
+static void sii8620_mhl_disconnected(struct sii8620 *ctx);
+
+static int sii8620_clear_error(struct sii8620 *ctx)
+{
+ int ret = ctx->error;
+
+ ctx->error = 0;
+ return ret;
+}
+
+static void sii8620_read_buf(struct sii8620 *ctx, u16 addr, u8 *buf, int len)
+{
+ struct device *dev = ctx->dev;
+ struct i2c_client *client = to_i2c_client(dev);
+ u8 data = addr;
+ struct i2c_msg msg[] = {
+ {
+ .addr = sii8620_i2c_page[addr >> 8],
+ .flags = client->flags,
+ .len = 1,
+ .buf = &data
+ },
+ {
+ .addr = sii8620_i2c_page[addr >> 8],
+ .flags = client->flags | I2C_M_RD,
+ .len = len,
+ .buf = buf
+ },
+ };
+ int ret;
+
+ if (ctx->error)
+ return;
+
+ ret = i2c_transfer(client->adapter, msg, 2);
+ dev_dbg(dev, "read at %04x: %*ph, %d\n", addr, len, buf, ret);
+
+ if (ret != 2) {
+ dev_err(dev, "Read at %#06x of %d bytes failed with code %d.\n",
+ addr, len, ret);
+ ctx->error = ret < 0 ? ret : -EIO;
+ }
+}
+
+static u8 sii8620_readb(struct sii8620 *ctx, u16 addr)
+{
+ u8 ret;
+
+ sii8620_read_buf(ctx, addr, &ret, 1);
+ return ret;
+}
+
+static void sii8620_write_buf(struct sii8620 *ctx, u16 addr, const u8 *buf,
+ int len)
+{
+ struct device *dev = ctx->dev;
+ struct i2c_client *client = to_i2c_client(dev);
+ u8 data[2];
+ struct i2c_msg msg = {
+ .addr = sii8620_i2c_page[addr >> 8],
+ .flags = client->flags,
+ .len = len + 1,
+ };
+ int ret;
+
+ if (ctx->error)
+ return;
+
+ if (len > 1) {
+ msg.buf = kmalloc(len + 1, GFP_KERNEL);
+ if (!msg.buf) {
+ ctx->error = -ENOMEM;
+ return;
+ }
+ memcpy(msg.buf + 1, buf, len);
+ } else {
+ msg.buf = data;
+ msg.buf[1] = *buf;
+ }
+
+ msg.buf[0] = addr;
+
+ ret = i2c_transfer(client->adapter, &msg, 1);
+ dev_dbg(dev, "write at %04x: %*ph, %d\n", addr, len, buf, ret);
+
+ if (ret != 1) {
+ dev_err(dev, "Write at %#06x of %*ph failed with code %d.\n",
+ addr, len, buf, ret);
+ ctx->error = ret ?: -EIO;
+ }
+
+ if (len > 1)
+ kfree(msg.buf);
+}
+
+#define sii8620_write(ctx, addr, arr...) \
+({\
+ u8 d[] = { arr }; \
+ sii8620_write_buf(ctx, addr, d, ARRAY_SIZE(d)); \
+})
+
+static void __sii8620_write_seq(struct sii8620 *ctx, const u16 *seq, int len)
+{
+ int i;
+
+ for (i = 0; i < len; i += 2)
+ sii8620_write(ctx, seq[i], seq[i + 1]);
+}
+
+#define sii8620_write_seq(ctx, seq...) \
+({\
+ const u16 d[] = { seq }; \
+ __sii8620_write_seq(ctx, d, ARRAY_SIZE(d)); \
+})
+
+#define sii8620_write_seq_static(ctx, seq...) \
+({\
+ static const u16 d[] = { seq }; \
+ __sii8620_write_seq(ctx, d, ARRAY_SIZE(d)); \
+})
+
+static void sii8620_setbits(struct sii8620 *ctx, u16 addr, u8 mask, u8 val)
+{
+ val = (val & mask) | (sii8620_readb(ctx, addr) & ~mask);
+ sii8620_write(ctx, addr, val);
+}
+
+static void sii8620_mt_cleanup(struct sii8620 *ctx)
+{
+ struct sii8620_mt_msg *msg, *n;
+
+ list_for_each_entry_safe(msg, n, &ctx->mt_queue, node) {
+ list_del(&msg->node);
+ kfree(msg);
+ }
+ ctx->mt_state = MT_STATE_READY;
+}
+
+static void sii8620_mt_work(struct sii8620 *ctx)
+{
+ struct sii8620_mt_msg *msg;
+
+ if (ctx->error)
+ return;
+ if (ctx->mt_state == MT_STATE_BUSY || list_empty(&ctx->mt_queue))
+ return;
+
+ if (ctx->mt_state == MT_STATE_DONE) {
+ ctx->mt_state = MT_STATE_READY;
+ msg = list_first_entry(&ctx->mt_queue, struct sii8620_mt_msg,
+ node);
+ if (msg->recv)
+ msg->recv(ctx, msg);
+ list_del(&msg->node);
+ kfree(msg);
+ }
+
+ if (ctx->mt_state != MT_STATE_READY || list_empty(&ctx->mt_queue))
+ return;
+
+ ctx->mt_state = MT_STATE_BUSY;
+ msg = list_first_entry(&ctx->mt_queue, struct sii8620_mt_msg, node);
+ if (msg->send)
+ msg->send(ctx, msg);
+}
+
+static void sii8620_mt_msc_cmd_send(struct sii8620 *ctx,
+ struct sii8620_mt_msg *msg)
+{
+ switch (msg->reg[0]) {
+ case MHL_WRITE_STAT:
+ case MHL_SET_INT:
+ sii8620_write_buf(ctx, REG_MSC_CMD_OR_OFFSET, msg->reg + 1, 2);
+ sii8620_write(ctx, REG_MSC_COMMAND_START,
+ BIT_MSC_COMMAND_START_WRITE_STAT);
+ break;
+ case MHL_MSC_MSG:
+ sii8620_write_buf(ctx, REG_MSC_CMD_OR_OFFSET, msg->reg, 3);
+ sii8620_write(ctx, REG_MSC_COMMAND_START,
+ BIT_MSC_COMMAND_START_MSC_MSG);
+ break;
+ default:
+ dev_err(ctx->dev, "%s: command %#x not supported\n", __func__,
+ msg->reg[0]);
+ }
+}
+
+static struct sii8620_mt_msg *sii8620_mt_msg_new(struct sii8620 *ctx)
+{
+ struct sii8620_mt_msg *msg = kzalloc(sizeof(*msg), GFP_KERNEL);
+
+ if (!msg)
+ ctx->error = -ENOMEM;
+ else
+ list_add_tail(&msg->node, &ctx->mt_queue);
+
+ return msg;
+}
+
+static void sii8620_mt_msc_cmd(struct sii8620 *ctx, u8 cmd, u8 arg1, u8 arg2)
+{
+ struct sii8620_mt_msg *msg = sii8620_mt_msg_new(ctx);
+
+ if (!msg)
+ return;
+
+ msg->reg[0] = cmd;
+ msg->reg[1] = arg1;
+ msg->reg[2] = arg2;
+ msg->send = sii8620_mt_msc_cmd_send;
+}
+
+static void sii8620_mt_write_stat(struct sii8620 *ctx, u8 reg, u8 val)
+{
+ sii8620_mt_msc_cmd(ctx, MHL_WRITE_STAT, reg, val);
+}
+
+static inline void sii8620_mt_set_int(struct sii8620 *ctx, u8 irq, u8 mask)
+{
+ sii8620_mt_msc_cmd(ctx, MHL_SET_INT, irq, mask);
+}
+
+static void sii8620_mt_msc_msg(struct sii8620 *ctx, u8 cmd, u8 data)
+{
+ sii8620_mt_msc_cmd(ctx, MHL_MSC_MSG, cmd, data);
+}
+
+static void sii8620_mt_rap(struct sii8620 *ctx, u8 code)
+{
+ sii8620_mt_msc_msg(ctx, MHL_MSC_MSG_RAP, code);
+}
+
+static void sii8620_mt_read_devcap_send(struct sii8620 *ctx,
+ struct sii8620_mt_msg *msg)
+{
+ u8 ctrl = BIT_EDID_CTRL_DEVCAP_SELECT_DEVCAP
+ | BIT_EDID_CTRL_EDID_FIFO_ADDR_AUTO
+ | BIT_EDID_CTRL_EDID_MODE_EN;
+
+ if (msg->reg[0] == MHL_READ_XDEVCAP)
+ ctrl |= BIT_EDID_CTRL_XDEVCAP_EN;
+
+ sii8620_write_seq(ctx,
+ REG_INTR9_MASK, BIT_INTR9_DEVCAP_DONE,
+ REG_EDID_CTRL, ctrl,
+ REG_TPI_CBUS_START, BIT_TPI_CBUS_START_GET_DEVCAP_START
+ );
+}
+
+/* copy src to dst and set changed bits in src */
+static void sii8620_update_array(u8 *dst, u8 *src, int count)
+{
+ while (--count >= 0) {
+ *src ^= *dst;
+ *dst++ ^= *src++;
+ }
+}
+
+static void sii8620_mr_devcap(struct sii8620 *ctx)
+{
+ static const char * const sink_str[] = {
+ [SINK_NONE] = "NONE",
+ [SINK_HDMI] = "HDMI",
+ [SINK_DVI] = "DVI"
+ };
+
+ u8 dcap[MHL_DCAP_SIZE];
+ char sink_name[20];
+ struct device *dev = ctx->dev;
+
+ sii8620_read_buf(ctx, REG_EDID_FIFO_RD_DATA, dcap, MHL_DCAP_SIZE);
+ if (ctx->error < 0)
+ return;
+
+ dev_info(dev, "dcap: %*ph\n", MHL_DCAP_SIZE, dcap);
+ dev_info(dev, "detected dongle MHL %d.%d, ChipID %02x%02x:%02x%02x\n",
+ dcap[MHL_DCAP_MHL_VERSION] / 16,
+ dcap[MHL_DCAP_MHL_VERSION] % 16, dcap[MHL_DCAP_ADOPTER_ID_H],
+ dcap[MHL_DCAP_ADOPTER_ID_L], dcap[MHL_DCAP_DEVICE_ID_H],
+ dcap[MHL_DCAP_DEVICE_ID_L]);
+ sii8620_update_array(ctx->devcap, dcap, MHL_DCAP_SIZE);
+
+ if (!(dcap[MHL_DCAP_CAT] & MHL_DCAP_CAT_SINK))
+ return;
+
+ sii8620_fetch_edid(ctx);
+ if (!ctx->edid) {
+ dev_err(ctx->dev, "Cannot fetch EDID\n");
+ sii8620_mhl_disconnected(ctx);
+ return;
+ }
+
+ if (drm_detect_hdmi_monitor(ctx->edid))
+ ctx->sink_type = SINK_HDMI;
+ else
+ ctx->sink_type = SINK_DVI;
+
+ drm_edid_get_monitor_name(ctx->edid, sink_name, ARRAY_SIZE(sink_name));
+
+ dev_info(dev, "detected sink(type: %s): %s\n",
+ sink_str[ctx->sink_type], sink_name);
+ sii8620_set_upstream_edid(ctx);
+ sii8620_enable_hpd(ctx);
+}
+
+static void sii8620_mr_xdevcap(struct sii8620 *ctx)
+{
+ sii8620_read_buf(ctx, REG_EDID_FIFO_RD_DATA, ctx->xdevcap,
+ MHL_XDC_SIZE);
+
+ sii8620_mt_write_stat(ctx, MHL_XDS_REG(CURR_ECBUS_MODE),
+ MHL_XDS_ECBUS_S | MHL_XDS_SLOT_MODE_8BIT);
+ sii8620_mt_rap(ctx, MHL_RAP_CBUS_MODE_UP);
+}
+
+static void sii8620_mt_read_devcap_recv(struct sii8620 *ctx,
+ struct sii8620_mt_msg *msg)
+{
+ u8 ctrl = BIT_EDID_CTRL_DEVCAP_SELECT_DEVCAP
+ | BIT_EDID_CTRL_EDID_FIFO_ADDR_AUTO
+ | BIT_EDID_CTRL_EDID_MODE_EN;
+
+ if (msg->reg[0] == MHL_READ_XDEVCAP)
+ ctrl |= BIT_EDID_CTRL_XDEVCAP_EN;
+
+ sii8620_write_seq(ctx,
+ REG_INTR9_MASK, BIT_INTR9_DEVCAP_DONE | BIT_INTR9_EDID_DONE
+ | BIT_INTR9_EDID_ERROR,
+ REG_EDID_CTRL, ctrl,
+ REG_EDID_FIFO_ADDR, 0
+ );
+
+ if (msg->reg[0] == MHL_READ_XDEVCAP)
+ sii8620_mr_xdevcap(ctx);
+ else
+ sii8620_mr_devcap(ctx);
+}
+
+static void sii8620_mt_read_devcap(struct sii8620 *ctx, bool xdevcap)
+{
+ struct sii8620_mt_msg *msg = sii8620_mt_msg_new(ctx);
+
+ if (!msg)
+ return;
+
+ msg->reg[0] = xdevcap ? MHL_READ_XDEVCAP : MHL_READ_DEVCAP;
+ msg->send = sii8620_mt_read_devcap_send;
+ msg->recv = sii8620_mt_read_devcap_recv;
+}
+
+static void sii8620_fetch_edid(struct sii8620 *ctx)
+{
+ u8 lm_ddc, ddc_cmd, int3, cbus;
+ int fetched, i;
+ int edid_len = EDID_LENGTH;
+ u8 *edid;
+
+ sii8620_readb(ctx, REG_CBUS_STATUS);
+ lm_ddc = sii8620_readb(ctx, REG_LM_DDC);
+ ddc_cmd = sii8620_readb(ctx, REG_DDC_CMD);
+
+ sii8620_write_seq(ctx,
+ REG_INTR9_MASK, 0,
+ REG_EDID_CTRL, BIT_EDID_CTRL_EDID_FIFO_ADDR_AUTO,
+ REG_HDCP2X_POLL_CS, 0x71,
+ REG_HDCP2X_CTRL_0, BIT_HDCP2X_CTRL_0_HDCP2X_HDCPTX,
+ REG_LM_DDC, lm_ddc | BIT_LM_DDC_SW_TPI_EN_DISABLED,
+ );
+
+ for (i = 0; i < 256; ++i) {
+ u8 ddc_stat = sii8620_readb(ctx, REG_DDC_STATUS);
+
+ if (!(ddc_stat & BIT_DDC_STATUS_DDC_I2C_IN_PROG))
+ break;
+ sii8620_write(ctx, REG_DDC_STATUS,
+ BIT_DDC_STATUS_DDC_FIFO_EMPTY);
+ }
+
+ sii8620_write(ctx, REG_DDC_ADDR, 0x50 << 1);
+
+ edid = kmalloc(EDID_LENGTH, GFP_KERNEL);
+ if (!edid) {
+ ctx->error = -ENOMEM;
+ return;
+ }
+
+#define FETCH_SIZE 16
+ for (fetched = 0; fetched < edid_len; fetched += FETCH_SIZE) {
+ sii8620_readb(ctx, REG_DDC_STATUS);
+ sii8620_write_seq(ctx,
+ REG_DDC_CMD, ddc_cmd | VAL_DDC_CMD_DDC_CMD_ABORT,
+ REG_DDC_CMD, ddc_cmd | VAL_DDC_CMD_DDC_CMD_CLEAR_FIFO,
+ REG_DDC_STATUS, BIT_DDC_STATUS_DDC_FIFO_EMPTY
+ );
+ sii8620_write_seq(ctx,
+ REG_DDC_SEGM, fetched >> 8,
+ REG_DDC_OFFSET, fetched & 0xff,
+ REG_DDC_DIN_CNT1, FETCH_SIZE,
+ REG_DDC_DIN_CNT2, 0,
+ REG_DDC_CMD, ddc_cmd | VAL_DDC_CMD_ENH_DDC_READ_NO_ACK
+ );
+
+ do {
+ int3 = sii8620_readb(ctx, REG_INTR3);
+ cbus = sii8620_readb(ctx, REG_CBUS_STATUS);
+
+ if (int3 & BIT_DDC_CMD_DONE)
+ break;
+
+ if (!(cbus & BIT_CBUS_STATUS_CBUS_CONNECTED)) {
+ kfree(edid);
+ edid = NULL;
+ goto end;
+ }
+ } while (1);
+
+ sii8620_readb(ctx, REG_DDC_STATUS);
+ while (sii8620_readb(ctx, REG_DDC_DOUT_CNT) < FETCH_SIZE)
+ usleep_range(10, 20);
+
+ sii8620_read_buf(ctx, REG_DDC_DATA, edid + fetched, FETCH_SIZE);
+ if (fetched + FETCH_SIZE == EDID_LENGTH) {
+ u8 ext = ((struct edid *)edid)->extensions;
+
+ if (ext) {
+ u8 *new_edid;
+
+ edid_len += ext * EDID_LENGTH;
+ new_edid = krealloc(edid, edid_len, GFP_KERNEL);
+ if (!new_edid) {
+ kfree(edid);
+ ctx->error = -ENOMEM;
+ return;
+ }
+ edid = new_edid;
+ }
+ }
+
+ if (fetched + FETCH_SIZE == edid_len)
+ sii8620_write(ctx, REG_INTR3, int3);
+ }
+
+ sii8620_write(ctx, REG_LM_DDC, lm_ddc);
+
+end:
+ kfree(ctx->edid);
+ ctx->edid = (struct edid *)edid;
+}
+
+static void sii8620_set_upstream_edid(struct sii8620 *ctx)
+{
+ sii8620_setbits(ctx, REG_DPD, BIT_DPD_PDNRX12 | BIT_DPD_PDIDCK_N
+ | BIT_DPD_PD_MHL_CLK_N, 0xff);
+
+ sii8620_write_seq_static(ctx,
+ REG_RX_HDMI_CTRL3, 0x00,
+ REG_PKT_FILTER_0, 0xFF,
+ REG_PKT_FILTER_1, 0xFF,
+ REG_ALICE0_BW_I2C, 0x06
+ );
+
+ sii8620_setbits(ctx, REG_RX_HDMI_CLR_BUFFER,
+ BIT_RX_HDMI_CLR_BUFFER_VSI_CLR_EN, 0xff);
+
+ sii8620_write_seq_static(ctx,
+ REG_EDID_CTRL, BIT_EDID_CTRL_EDID_FIFO_ADDR_AUTO
+ | BIT_EDID_CTRL_EDID_MODE_EN,
+ REG_EDID_FIFO_ADDR, 0,
+ );
+
+ sii8620_write_buf(ctx, REG_EDID_FIFO_WR_DATA, (u8 *)ctx->edid,
+ (ctx->edid->extensions + 1) * EDID_LENGTH);
+
+ sii8620_write_seq_static(ctx,
+ REG_EDID_CTRL, BIT_EDID_CTRL_EDID_PRIME_VALID
+ | BIT_EDID_CTRL_EDID_FIFO_ADDR_AUTO
+ | BIT_EDID_CTRL_EDID_MODE_EN,
+ REG_INTR5_MASK, BIT_INTR_SCDT_CHANGE,
+ REG_INTR9_MASK, 0
+ );
+}
+
+static void sii8620_xtal_set_rate(struct sii8620 *ctx)
+{
+ static const struct {
+ unsigned int rate;
+ u8 div;
+ u8 tp1;
+ } rates[] = {
+ { 19200, 0x04, 0x53 },
+ { 20000, 0x04, 0x62 },
+ { 24000, 0x05, 0x75 },
+ { 30000, 0x06, 0x92 },
+ { 38400, 0x0c, 0xbc },
+ };
+ unsigned long rate = clk_get_rate(ctx->clk_xtal) / 1000;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(rates) - 1; ++i)
+ if (rate <= rates[i].rate)
+ break;
+
+ if (rate != rates[i].rate)
+ dev_err(ctx->dev, "xtal clock rate(%lukHz) not supported, setting MHL for %ukHz.\n",
+ rate, rates[i].rate);
+
+ sii8620_write(ctx, REG_DIV_CTL_MAIN, rates[i].div);
+ sii8620_write(ctx, REG_HDCP2X_TP1, rates[i].tp1);
+}
+
+static int sii8620_hw_on(struct sii8620 *ctx)
+{
+ int ret;
+
+ ret = regulator_bulk_enable(ARRAY_SIZE(ctx->supplies), ctx->supplies);
+ if (ret)
+ return ret;
+ usleep_range(10000, 20000);
+ return clk_prepare_enable(ctx->clk_xtal);
+}
+
+static int sii8620_hw_off(struct sii8620 *ctx)
+{
+ clk_disable_unprepare(ctx->clk_xtal);
+ gpiod_set_value(ctx->gpio_reset, 1);
+ return regulator_bulk_disable(ARRAY_SIZE(ctx->supplies), ctx->supplies);
+}
+
+static void sii8620_hw_reset(struct sii8620 *ctx)
+{
+ usleep_range(10000, 20000);
+ gpiod_set_value(ctx->gpio_reset, 0);
+ usleep_range(5000, 20000);
+ gpiod_set_value(ctx->gpio_reset, 1);
+ usleep_range(10000, 20000);
+ gpiod_set_value(ctx->gpio_reset, 0);
+ msleep(300);
+}
+
+static void sii8620_cbus_reset(struct sii8620 *ctx)
+{
+ sii8620_write_seq_static(ctx,
+ REG_PWD_SRST, BIT_PWD_SRST_CBUS_RST
+ | BIT_PWD_SRST_CBUS_RST_SW_EN,
+ REG_PWD_SRST, BIT_PWD_SRST_CBUS_RST_SW_EN
+ );
+}
+
+static void sii8620_set_auto_zone(struct sii8620 *ctx)
+{
+ if (ctx->mode != CM_MHL1) {
+ sii8620_write_seq_static(ctx,
+ REG_TX_ZONE_CTL1, 0x0,
+ REG_MHL_PLL_CTL0, VAL_MHL_PLL_CTL0_HDMI_CLK_RATIO_1X
+ | BIT_MHL_PLL_CTL0_CRYSTAL_CLK_SEL
+ | BIT_MHL_PLL_CTL0_ZONE_MASK_OE
+ );
+ } else {
+ sii8620_write_seq_static(ctx,
+ REG_TX_ZONE_CTL1, VAL_TX_ZONE_CTL1_TX_ZONE_CTRL_MODE,
+ REG_MHL_PLL_CTL0, VAL_MHL_PLL_CTL0_HDMI_CLK_RATIO_1X
+ | BIT_MHL_PLL_CTL0_ZONE_MASK_OE
+ );
+ }
+}
+
+static void sii8620_stop_video(struct sii8620 *ctx)
+{
+ u8 uninitialized_var(val);
+
+ sii8620_write_seq_static(ctx,
+ REG_TPI_INTR_EN, 0,
+ REG_HDCP2X_INTR0_MASK, 0,
+ REG_TPI_COPP_DATA2, 0,
+ REG_TPI_INTR_ST0, ~0,
+ );
+
+ switch (ctx->sink_type) {
+ case SINK_DVI:
+ val = BIT_TPI_SC_REG_TMDS_OE_POWER_DOWN
+ | BIT_TPI_SC_TPI_AV_MUTE;
+ break;
+ case SINK_HDMI:
+ val = BIT_TPI_SC_REG_TMDS_OE_POWER_DOWN
+ | BIT_TPI_SC_TPI_AV_MUTE
+ | BIT_TPI_SC_TPI_OUTPUT_MODE_0_HDMI;
+ break;
+ default:
+ return;
+ }
+
+ sii8620_write(ctx, REG_TPI_SC, val);
+}
+
+static void sii8620_start_hdmi(struct sii8620 *ctx)
+{
+ sii8620_write_seq_static(ctx,
+ REG_RX_HDMI_CTRL2, VAL_RX_HDMI_CTRL2_DEFVAL
+ | BIT_RX_HDMI_CTRL2_USE_AV_MUTE,
+ REG_VID_OVRRD, BIT_VID_OVRRD_PP_AUTO_DISABLE
+ | BIT_VID_OVRRD_M1080P_OVRRD,
+ REG_VID_MODE, 0,
+ REG_MHL_TOP_CTL, 0x1,
+ REG_MHLTX_CTL6, 0xa0,
+ REG_TPI_INPUT, VAL_TPI_FORMAT(RGB, FULL),
+ REG_TPI_OUTPUT, VAL_TPI_FORMAT(RGB, FULL),
+ );
+
+ sii8620_mt_write_stat(ctx, MHL_DST_REG(LINK_MODE),
+ MHL_DST_LM_CLK_MODE_NORMAL |
+ MHL_DST_LM_PATH_ENABLED);
+
+ sii8620_set_auto_zone(ctx);
+
+ sii8620_write(ctx, REG_TPI_SC, BIT_TPI_SC_TPI_OUTPUT_MODE_0_HDMI);
+
+ sii8620_write_buf(ctx, REG_TPI_AVI_CHSUM, ctx->avif,
+ ARRAY_SIZE(ctx->avif));
+
+ sii8620_write(ctx, REG_PKT_FILTER_0, 0xa1, 0x2);
+}
+
+static void sii8620_start_video(struct sii8620 *ctx)
+{
+ if (ctx->mode < CM_MHL3)
+ sii8620_stop_video(ctx);
+
+ switch (ctx->sink_type) {
+ case SINK_HDMI:
+ sii8620_start_hdmi(ctx);
+ break;
+ case SINK_DVI:
+ default:
+ break;
+ }
+}
+
+static void sii8620_disable_hpd(struct sii8620 *ctx)
+{
+ sii8620_setbits(ctx, REG_EDID_CTRL, BIT_EDID_CTRL_EDID_PRIME_VALID, 0);
+ sii8620_write_seq_static(ctx,
+ REG_HPD_CTRL, BIT_HPD_CTRL_HPD_OUT_OVR_EN,
+ REG_INTR8_MASK, 0
+ );
+}
+
+static void sii8620_enable_hpd(struct sii8620 *ctx)
+{
+ sii8620_setbits(ctx, REG_TMDS_CSTAT_P3,
+ BIT_TMDS_CSTAT_P3_SCDT_CLR_AVI_DIS
+ | BIT_TMDS_CSTAT_P3_CLR_AVI, ~0);
+ sii8620_write_seq_static(ctx,
+ REG_HPD_CTRL, BIT_HPD_CTRL_HPD_OUT_OVR_EN
+ | BIT_HPD_CTRL_HPD_HIGH,
+ );
+}
+
+static void sii8620_enable_gen2_write_burst(struct sii8620 *ctx)
+{
+ if (ctx->gen2_write_burst)
+ return;
+
+ sii8620_write_seq_static(ctx,
+ REG_MDT_RCV_TIMEOUT, 100,
+ REG_MDT_RCV_CTRL, BIT_MDT_RCV_CTRL_MDT_RCV_EN
+ );
+ ctx->gen2_write_burst = 1;
+}
+
+static void sii8620_disable_gen2_write_burst(struct sii8620 *ctx)
+{
+ if (!ctx->gen2_write_burst)
+ return;
+
+ sii8620_write_seq_static(ctx,
+ REG_MDT_XMIT_CTRL, 0,
+ REG_MDT_RCV_CTRL, 0
+ );
+ ctx->gen2_write_burst = 0;
+}
+
+static void sii8620_start_gen2_write_burst(struct sii8620 *ctx)
+{
+ sii8620_write_seq_static(ctx,
+ REG_MDT_INT_1_MASK, BIT_MDT_RCV_TIMEOUT
+ | BIT_MDT_RCV_SM_ABORT_PKT_RCVD | BIT_MDT_RCV_SM_ERROR
+ | BIT_MDT_XMIT_TIMEOUT | BIT_MDT_XMIT_SM_ABORT_PKT_RCVD
+ | BIT_MDT_XMIT_SM_ERROR,
+ REG_MDT_INT_0_MASK, BIT_MDT_XFIFO_EMPTY
+ | BIT_MDT_IDLE_AFTER_HAWB_DISABLE
+ | BIT_MDT_RFIFO_DATA_RDY
+ );
+ sii8620_enable_gen2_write_burst(ctx);
+}
+
+static void sii8620_mhl_discover(struct sii8620 *ctx)
+{
+ sii8620_write_seq_static(ctx,
+ REG_DISC_CTRL9, BIT_DISC_CTRL9_WAKE_DRVFLT
+ | BIT_DISC_CTRL9_DISC_PULSE_PROCEED,
+ REG_DISC_CTRL4, VAL_DISC_CTRL4(VAL_PUP_5K, VAL_PUP_20K),
+ REG_CBUS_DISC_INTR0_MASK, BIT_MHL3_EST_INT
+ | BIT_MHL_EST_INT
+ | BIT_NOT_MHL_EST_INT
+ | BIT_CBUS_MHL3_DISCON_INT
+ | BIT_CBUS_MHL12_DISCON_INT
+ | BIT_RGND_READY_INT,
+ REG_MHL_PLL_CTL0, VAL_MHL_PLL_CTL0_HDMI_CLK_RATIO_1X
+ | BIT_MHL_PLL_CTL0_CRYSTAL_CLK_SEL
+ | BIT_MHL_PLL_CTL0_ZONE_MASK_OE,
+ REG_MHL_DP_CTL0, BIT_MHL_DP_CTL0_DP_OE
+ | BIT_MHL_DP_CTL0_TX_OE_OVR,
+ REG_M3_CTRL, VAL_M3_CTRL_MHL3_VALUE,
+ REG_MHL_DP_CTL1, 0xA2,
+ REG_MHL_DP_CTL2, 0x03,
+ REG_MHL_DP_CTL3, 0x35,
+ REG_MHL_DP_CTL5, 0x02,
+ REG_MHL_DP_CTL6, 0x02,
+ REG_MHL_DP_CTL7, 0x03,
+ REG_COC_CTLC, 0xFF,
+ REG_DPD, BIT_DPD_PWRON_PLL | BIT_DPD_PDNTX12
+ | BIT_DPD_OSC_EN | BIT_DPD_PWRON_HSIC,
+ REG_COC_INTR_MASK, BIT_COC_PLL_LOCK_STATUS_CHANGE
+ | BIT_COC_CALIBRATION_DONE,
+ REG_CBUS_INT_1_MASK, BIT_CBUS_MSC_ABORT_RCVD
+ | BIT_CBUS_CMD_ABORT,
+ REG_CBUS_INT_0_MASK, BIT_CBUS_MSC_MT_DONE
+ | BIT_CBUS_HPD_CHG
+ | BIT_CBUS_MSC_MR_WRITE_STAT
+ | BIT_CBUS_MSC_MR_MSC_MSG
+ | BIT_CBUS_MSC_MR_WRITE_BURST
+ | BIT_CBUS_MSC_MR_SET_INT
+ | BIT_CBUS_MSC_MT_DONE_NACK
+ );
+}
+
+static void sii8620_peer_specific_init(struct sii8620 *ctx)
+{
+ if (ctx->mode == CM_MHL3)
+ sii8620_write_seq_static(ctx,
+ REG_SYS_CTRL1, BIT_SYS_CTRL1_BLOCK_DDC_BY_HPD,
+ REG_EMSCINTRMASK1,
+ BIT_EMSCINTR1_EMSC_TRAINING_COMMA_ERR
+ );
+ else
+ sii8620_write_seq_static(ctx,
+ REG_HDCP2X_INTR0_MASK, 0x00,
+ REG_EMSCINTRMASK1, 0x00,
+ REG_HDCP2X_INTR0, 0xFF,
+ REG_INTR1, 0xFF,
+ REG_SYS_CTRL1, BIT_SYS_CTRL1_BLOCK_DDC_BY_HPD
+ | BIT_SYS_CTRL1_TX_CTRL_HDMI
+ );
+}
+
+#define SII8620_MHL_VERSION 0x32
+#define SII8620_SCRATCHPAD_SIZE 16
+#define SII8620_INT_STAT_SIZE 0x33
+
+static void sii8620_set_dev_cap(struct sii8620 *ctx)
+{
+ static const u8 devcap[MHL_DCAP_SIZE] = {
+ [MHL_DCAP_MHL_VERSION] = SII8620_MHL_VERSION,
+ [MHL_DCAP_CAT] = MHL_DCAP_CAT_SOURCE | MHL_DCAP_CAT_POWER,
+ [MHL_DCAP_ADOPTER_ID_H] = 0x01,
+ [MHL_DCAP_ADOPTER_ID_L] = 0x41,
+ [MHL_DCAP_VID_LINK_MODE] = MHL_DCAP_VID_LINK_RGB444
+ | MHL_DCAP_VID_LINK_PPIXEL
+ | MHL_DCAP_VID_LINK_16BPP,
+ [MHL_DCAP_AUD_LINK_MODE] = MHL_DCAP_AUD_LINK_2CH,
+ [MHL_DCAP_VIDEO_TYPE] = MHL_DCAP_VT_GRAPHICS,
+ [MHL_DCAP_LOG_DEV_MAP] = MHL_DCAP_LD_GUI,
+ [MHL_DCAP_BANDWIDTH] = 0x0f,
+ [MHL_DCAP_FEATURE_FLAG] = MHL_DCAP_FEATURE_RCP_SUPPORT
+ | MHL_DCAP_FEATURE_RAP_SUPPORT
+ | MHL_DCAP_FEATURE_SP_SUPPORT,
+ [MHL_DCAP_SCRATCHPAD_SIZE] = SII8620_SCRATCHPAD_SIZE,
+ [MHL_DCAP_INT_STAT_SIZE] = SII8620_INT_STAT_SIZE,
+ };
+ static const u8 xdcap[MHL_XDC_SIZE] = {
+ [MHL_XDC_ECBUS_SPEEDS] = MHL_XDC_ECBUS_S_075
+ | MHL_XDC_ECBUS_S_8BIT,
+ [MHL_XDC_TMDS_SPEEDS] = MHL_XDC_TMDS_150
+ | MHL_XDC_TMDS_300 | MHL_XDC_TMDS_600,
+ [MHL_XDC_ECBUS_ROLES] = MHL_XDC_DEV_HOST,
+ [MHL_XDC_LOG_DEV_MAPX] = MHL_XDC_LD_PHONE,
+ };
+
+ sii8620_write_buf(ctx, REG_MHL_DEVCAP_0, devcap, ARRAY_SIZE(devcap));
+ sii8620_write_buf(ctx, REG_MHL_EXTDEVCAP_0, xdcap, ARRAY_SIZE(xdcap));
+}
+
+static void sii8620_mhl_init(struct sii8620 *ctx)
+{
+ sii8620_write_seq_static(ctx,
+ REG_DISC_CTRL4, VAL_DISC_CTRL4(VAL_PUP_OFF, VAL_PUP_20K),
+ REG_CBUS_MSC_COMPAT_CTRL,
+ BIT_CBUS_MSC_COMPAT_CTRL_XDEVCAP_EN,
+ );
+
+ sii8620_peer_specific_init(ctx);
+
+ sii8620_disable_hpd(ctx);
+
+ sii8620_write_seq_static(ctx,
+ REG_EDID_CTRL, BIT_EDID_CTRL_EDID_FIFO_ADDR_AUTO,
+ REG_DISC_CTRL9, BIT_DISC_CTRL9_WAKE_DRVFLT
+ | BIT_DISC_CTRL9_WAKE_PULSE_BYPASS,
+ REG_TMDS0_CCTRL1, 0x90,
+ REG_TMDS_CLK_EN, 0x01,
+ REG_TMDS_CH_EN, 0x11,
+ REG_BGR_BIAS, 0x87,
+ REG_ALICE0_ZONE_CTRL, 0xE8,
+ REG_ALICE0_MODE_CTRL, 0x04,
+ );
+ sii8620_setbits(ctx, REG_LM_DDC, BIT_LM_DDC_SW_TPI_EN_DISABLED, 0);
+ sii8620_write_seq_static(ctx,
+ REG_TPI_HW_OPT3, 0x76,
+ REG_TMDS_CCTRL, BIT_TMDS_CCTRL_TMDS_OE,
+ REG_TPI_DTD_B2, 79,
+ );
+ sii8620_set_dev_cap(ctx);
+ sii8620_write_seq_static(ctx,
+ REG_MDT_XMIT_TIMEOUT, 100,
+ REG_MDT_XMIT_CTRL, 0x03,
+ REG_MDT_XFIFO_STAT, 0x00,
+ REG_MDT_RCV_TIMEOUT, 100,
+ REG_CBUS_LINK_CTRL_8, 0x1D,
+ );
+
+ sii8620_start_gen2_write_burst(ctx);
+ sii8620_write_seq_static(ctx,
+ REG_BIST_CTRL, 0x00,
+ REG_COC_CTL1, 0x10,
+ REG_COC_CTL2, 0x18,
+ REG_COC_CTLF, 0x07,
+ REG_COC_CTL11, 0xF8,
+ REG_COC_CTL17, 0x61,
+ REG_COC_CTL18, 0x46,
+ REG_COC_CTL19, 0x15,
+ REG_COC_CTL1A, 0x01,
+ REG_MHL_COC_CTL3, BIT_MHL_COC_CTL3_COC_AECHO_EN,
+ REG_MHL_COC_CTL4, 0x2D,
+ REG_MHL_COC_CTL5, 0xF9,
+ REG_MSC_HEARTBEAT_CTRL, 0x27,
+ );
+ sii8620_disable_gen2_write_burst(ctx);
+
+ /* currently MHL3 is not supported, so we force version to 0 */
+ sii8620_mt_write_stat(ctx, MHL_DST_REG(VERSION), 0);
+ sii8620_mt_write_stat(ctx, MHL_DST_REG(CONNECTED_RDY),
+ MHL_DST_CONN_DCAP_RDY | MHL_DST_CONN_XDEVCAPP_SUPP
+ | MHL_DST_CONN_POW_STAT);
+ sii8620_mt_set_int(ctx, MHL_INT_REG(RCHANGE), MHL_INT_RC_DCAP_CHG);
+}
+
+static void sii8620_set_mode(struct sii8620 *ctx, enum sii8620_mode mode)
+{
+ if (ctx->mode == mode)
+ return;
+
+ ctx->mode = mode;
+
+ switch (mode) {
+ case CM_MHL1:
+ sii8620_write_seq_static(ctx,
+ REG_CBUS_MSC_COMPAT_CTRL, 0x02,
+ REG_M3_CTRL, VAL_M3_CTRL_MHL1_2_VALUE,
+ REG_DPD, BIT_DPD_PWRON_PLL | BIT_DPD_PDNTX12
+ | BIT_DPD_OSC_EN,
+ REG_COC_INTR_MASK, 0
+ );
+ break;
+ case CM_MHL3:
+ sii8620_write_seq_static(ctx,
+ REG_M3_CTRL, VAL_M3_CTRL_MHL3_VALUE,
+ REG_COC_CTL0, 0x40,
+ REG_MHL_COC_CTL1, 0x07
+ );
+ break;
+ case CM_DISCONNECTED:
+ break;
+ default:
+ dev_err(ctx->dev, "%s mode %d not supported\n", __func__, mode);
+ break;
+ }
+
+ sii8620_set_auto_zone(ctx);
+
+ if (mode != CM_MHL1)
+ return;
+
+ sii8620_write_seq_static(ctx,
+ REG_MHL_DP_CTL0, 0xBC,
+ REG_MHL_DP_CTL1, 0xBB,
+ REG_MHL_DP_CTL3, 0x48,
+ REG_MHL_DP_CTL5, 0x39,
+ REG_MHL_DP_CTL2, 0x2A,
+ REG_MHL_DP_CTL6, 0x2A,
+ REG_MHL_DP_CTL7, 0x08
+ );
+}
+
+static void sii8620_disconnect(struct sii8620 *ctx)
+{
+ sii8620_disable_gen2_write_burst(ctx);
+ sii8620_stop_video(ctx);
+ msleep(50);
+ sii8620_cbus_reset(ctx);
+ sii8620_set_mode(ctx, CM_DISCONNECTED);
+ sii8620_write_seq_static(ctx,
+ REG_COC_CTL0, 0x40,
+ REG_CBUS3_CNVT, 0x84,
+ REG_COC_CTL14, 0x00,
+ REG_COC_CTL0, 0x40,
+ REG_HRXCTRL3, 0x07,
+ REG_MHL_PLL_CTL0, VAL_MHL_PLL_CTL0_HDMI_CLK_RATIO_1X
+ | BIT_MHL_PLL_CTL0_CRYSTAL_CLK_SEL
+ | BIT_MHL_PLL_CTL0_ZONE_MASK_OE,
+ REG_MHL_DP_CTL0, BIT_MHL_DP_CTL0_DP_OE
+ | BIT_MHL_DP_CTL0_TX_OE_OVR,
+ REG_MHL_DP_CTL1, 0xBB,
+ REG_MHL_DP_CTL3, 0x48,
+ REG_MHL_DP_CTL5, 0x3F,
+ REG_MHL_DP_CTL2, 0x2F,
+ REG_MHL_DP_CTL6, 0x2A,
+ REG_MHL_DP_CTL7, 0x03
+ );
+ sii8620_disable_hpd(ctx);
+ sii8620_write_seq_static(ctx,
+ REG_M3_CTRL, VAL_M3_CTRL_MHL3_VALUE,
+ REG_MHL_COC_CTL1, 0x07,
+ REG_DISC_CTRL4, VAL_DISC_CTRL4(VAL_PUP_OFF, VAL_PUP_20K),
+ REG_DISC_CTRL8, 0x00,
+ REG_DISC_CTRL9, BIT_DISC_CTRL9_WAKE_DRVFLT
+ | BIT_DISC_CTRL9_WAKE_PULSE_BYPASS,
+ REG_INT_CTRL, 0x00,
+ REG_MSC_HEARTBEAT_CTRL, 0x27,
+ REG_DISC_CTRL1, 0x25,
+ REG_CBUS_DISC_INTR0, (u8)~BIT_RGND_READY_INT,
+ REG_CBUS_DISC_INTR0_MASK, BIT_RGND_READY_INT,
+ REG_MDT_INT_1, 0xff,
+ REG_MDT_INT_1_MASK, 0x00,
+ REG_MDT_INT_0, 0xff,
+ REG_MDT_INT_0_MASK, 0x00,
+ REG_COC_INTR, 0xff,
+ REG_COC_INTR_MASK, 0x00,
+ REG_TRXINTH, 0xff,
+ REG_TRXINTMH, 0x00,
+ REG_CBUS_INT_0, 0xff,
+ REG_CBUS_INT_0_MASK, 0x00,
+ REG_CBUS_INT_1, 0xff,
+ REG_CBUS_INT_1_MASK, 0x00,
+ REG_EMSCINTR, 0xff,
+ REG_EMSCINTRMASK, 0x00,
+ REG_EMSCINTR1, 0xff,
+ REG_EMSCINTRMASK1, 0x00,
+ REG_INTR8, 0xff,
+ REG_INTR8_MASK, 0x00,
+ REG_TPI_INTR_ST0, 0xff,
+ REG_TPI_INTR_EN, 0x00,
+ REG_HDCP2X_INTR0, 0xff,
+ REG_HDCP2X_INTR0_MASK, 0x00,
+ REG_INTR9, 0xff,
+ REG_INTR9_MASK, 0x00,
+ REG_INTR3, 0xff,
+ REG_INTR3_MASK, 0x00,
+ REG_INTR5, 0xff,
+ REG_INTR5_MASK, 0x00,
+ REG_INTR2, 0xff,
+ REG_INTR2_MASK, 0x00,
+ );
+ memset(ctx->stat, 0, sizeof(ctx->stat));
+ memset(ctx->xstat, 0, sizeof(ctx->xstat));
+ memset(ctx->devcap, 0, sizeof(ctx->devcap));
+ memset(ctx->xdevcap, 0, sizeof(ctx->xdevcap));
+ ctx->cbus_status = 0;
+ ctx->sink_type = SINK_NONE;
+ kfree(ctx->edid);
+ ctx->edid = NULL;
+ sii8620_mt_cleanup(ctx);
+}
+
+static void sii8620_mhl_disconnected(struct sii8620 *ctx)
+{
+ sii8620_write_seq_static(ctx,
+ REG_DISC_CTRL4, VAL_DISC_CTRL4(VAL_PUP_OFF, VAL_PUP_20K),
+ REG_CBUS_MSC_COMPAT_CTRL,
+ BIT_CBUS_MSC_COMPAT_CTRL_XDEVCAP_EN
+ );
+ sii8620_disconnect(ctx);
+}
+
+static void sii8620_irq_disc(struct sii8620 *ctx)
+{
+ u8 stat = sii8620_readb(ctx, REG_CBUS_DISC_INTR0);
+
+ if (stat & VAL_CBUS_MHL_DISCON)
+ sii8620_mhl_disconnected(ctx);
+
+ if (stat & BIT_RGND_READY_INT) {
+ u8 stat2 = sii8620_readb(ctx, REG_DISC_STAT2);
+
+ if ((stat2 & MSK_DISC_STAT2_RGND) == VAL_RGND_1K) {
+ sii8620_mhl_discover(ctx);
+ } else {
+ sii8620_write_seq_static(ctx,
+ REG_DISC_CTRL9, BIT_DISC_CTRL9_WAKE_DRVFLT
+ | BIT_DISC_CTRL9_NOMHL_EST
+ | BIT_DISC_CTRL9_WAKE_PULSE_BYPASS,
+ REG_CBUS_DISC_INTR0_MASK, BIT_RGND_READY_INT
+ | BIT_CBUS_MHL3_DISCON_INT
+ | BIT_CBUS_MHL12_DISCON_INT
+ | BIT_NOT_MHL_EST_INT
+ );
+ }
+ }
+ if (stat & BIT_MHL_EST_INT)
+ sii8620_mhl_init(ctx);
+
+ sii8620_write(ctx, REG_CBUS_DISC_INTR0, stat);
+}
+
+static void sii8620_irq_g2wb(struct sii8620 *ctx)
+{
+ u8 stat = sii8620_readb(ctx, REG_MDT_INT_0);
+
+ if (stat & BIT_MDT_IDLE_AFTER_HAWB_DISABLE)
+ dev_dbg(ctx->dev, "HAWB idle\n");
+
+ sii8620_write(ctx, REG_MDT_INT_0, stat);
+}
+
+static void sii8620_status_changed_dcap(struct sii8620 *ctx)
+{
+ if (ctx->stat[MHL_DST_CONNECTED_RDY] & MHL_DST_CONN_DCAP_RDY) {
+ sii8620_set_mode(ctx, CM_MHL1);
+ sii8620_peer_specific_init(ctx);
+ sii8620_write(ctx, REG_INTR9_MASK, BIT_INTR9_DEVCAP_DONE
+ | BIT_INTR9_EDID_DONE | BIT_INTR9_EDID_ERROR);
+ }
+}
+
+static void sii8620_status_changed_path(struct sii8620 *ctx)
+{
+ if (ctx->stat[MHL_DST_LINK_MODE] & MHL_DST_LM_PATH_ENABLED) {
+ sii8620_mt_write_stat(ctx, MHL_DST_REG(LINK_MODE),
+ MHL_DST_LM_CLK_MODE_NORMAL
+ | MHL_DST_LM_PATH_ENABLED);
+ sii8620_mt_read_devcap(ctx, false);
+ } else {
+ sii8620_mt_write_stat(ctx, MHL_DST_REG(LINK_MODE),
+ MHL_DST_LM_CLK_MODE_NORMAL);
+ }
+}
+
+static void sii8620_msc_mr_write_stat(struct sii8620 *ctx)
+{
+ u8 st[MHL_DST_SIZE], xst[MHL_XDS_SIZE];
+
+ sii8620_read_buf(ctx, REG_MHL_STAT_0, st, MHL_DST_SIZE);
+ sii8620_read_buf(ctx, REG_MHL_EXTSTAT_0, xst, MHL_XDS_SIZE);
+
+ sii8620_update_array(ctx->stat, st, MHL_DST_SIZE);
+ sii8620_update_array(ctx->xstat, xst, MHL_XDS_SIZE);
+
+ if (st[MHL_DST_CONNECTED_RDY] & MHL_DST_CONN_DCAP_RDY)
+ sii8620_status_changed_dcap(ctx);
+
+ if (st[MHL_DST_LINK_MODE] & MHL_DST_LM_PATH_ENABLED)
+ sii8620_status_changed_path(ctx);
+}
+
+static void sii8620_msc_mr_set_int(struct sii8620 *ctx)
+{
+ u8 ints[MHL_INT_SIZE];
+
+ sii8620_read_buf(ctx, REG_MHL_INT_0, ints, MHL_INT_SIZE);
+ sii8620_write_buf(ctx, REG_MHL_INT_0, ints, MHL_INT_SIZE);
+}
+
+static struct sii8620_mt_msg *sii8620_msc_msg_first(struct sii8620 *ctx)
+{
+ struct device *dev = ctx->dev;
+
+ if (list_empty(&ctx->mt_queue)) {
+ dev_err(dev, "unexpected MSC MT response\n");
+ return NULL;
+ }
+
+ return list_first_entry(&ctx->mt_queue, struct sii8620_mt_msg, node);
+}
+
+static void sii8620_msc_mt_done(struct sii8620 *ctx)
+{
+ struct sii8620_mt_msg *msg = sii8620_msc_msg_first(ctx);
+
+ if (!msg)
+ return;
+
+ msg->ret = sii8620_readb(ctx, REG_MSC_MT_RCVD_DATA0);
+ ctx->mt_state = MT_STATE_DONE;
+}
+
+static void sii8620_msc_mr_msc_msg(struct sii8620 *ctx)
+{
+ struct sii8620_mt_msg *msg = sii8620_msc_msg_first(ctx);
+ u8 buf[2];
+
+ if (!msg)
+ return;
+
+ sii8620_read_buf(ctx, REG_MSC_MR_MSC_MSG_RCVD_1ST_DATA, buf, 2);
+
+ switch (buf[0]) {
+ case MHL_MSC_MSG_RAPK:
+ msg->ret = buf[1];
+ ctx->mt_state = MT_STATE_DONE;
+ break;
+ default:
+ dev_err(ctx->dev, "%s message type %d,%d not supported",
+ __func__, buf[0], buf[1]);
+ }
+}
+
+static void sii8620_irq_msc(struct sii8620 *ctx)
+{
+ u8 stat = sii8620_readb(ctx, REG_CBUS_INT_0);
+
+ if (stat & ~BIT_CBUS_HPD_CHG)
+ sii8620_write(ctx, REG_CBUS_INT_0, stat & ~BIT_CBUS_HPD_CHG);
+
+ if (stat & BIT_CBUS_HPD_CHG) {
+ u8 cbus_stat = sii8620_readb(ctx, REG_CBUS_STATUS);
+
+ if ((cbus_stat ^ ctx->cbus_status) & BIT_CBUS_STATUS_CBUS_HPD) {
+ sii8620_write(ctx, REG_CBUS_INT_0, BIT_CBUS_HPD_CHG);
+ } else {
+ stat ^= BIT_CBUS_STATUS_CBUS_HPD;
+ cbus_stat ^= BIT_CBUS_STATUS_CBUS_HPD;
+ }
+ ctx->cbus_status = cbus_stat;
+ }
+
+ if (stat & BIT_CBUS_MSC_MR_WRITE_STAT)
+ sii8620_msc_mr_write_stat(ctx);
+
+ if (stat & BIT_CBUS_MSC_MR_SET_INT)
+ sii8620_msc_mr_set_int(ctx);
+
+ if (stat & BIT_CBUS_MSC_MT_DONE)
+ sii8620_msc_mt_done(ctx);
+
+ if (stat & BIT_CBUS_MSC_MR_MSC_MSG)
+ sii8620_msc_mr_msc_msg(ctx);
+}
+
+static void sii8620_irq_coc(struct sii8620 *ctx)
+{
+ u8 stat = sii8620_readb(ctx, REG_COC_INTR);
+
+ sii8620_write(ctx, REG_COC_INTR, stat);
+}
+
+static void sii8620_irq_merr(struct sii8620 *ctx)
+{
+ u8 stat = sii8620_readb(ctx, REG_CBUS_INT_1);
+
+ sii8620_write(ctx, REG_CBUS_INT_1, stat);
+}
+
+static void sii8620_irq_edid(struct sii8620 *ctx)
+{
+ u8 stat = sii8620_readb(ctx, REG_INTR9);
+
+ sii8620_write(ctx, REG_INTR9, stat);
+
+ if (stat & BIT_INTR9_DEVCAP_DONE)
+ ctx->mt_state = MT_STATE_DONE;
+}
+
+static void sii8620_scdt_high(struct sii8620 *ctx)
+{
+ sii8620_write_seq_static(ctx,
+ REG_INTR8_MASK, BIT_CEA_NEW_AVI | BIT_CEA_NEW_VSI,
+ REG_TPI_SC, BIT_TPI_SC_TPI_OUTPUT_MODE_0_HDMI,
+ );
+}
+
+static void sii8620_scdt_low(struct sii8620 *ctx)
+{
+ sii8620_write(ctx, REG_TMDS_CSTAT_P3,
+ BIT_TMDS_CSTAT_P3_SCDT_CLR_AVI_DIS |
+ BIT_TMDS_CSTAT_P3_CLR_AVI);
+
+ sii8620_stop_video(ctx);
+
+ sii8620_write(ctx, REG_INTR8_MASK, 0);
+}
+
+static void sii8620_irq_scdt(struct sii8620 *ctx)
+{
+ u8 stat = sii8620_readb(ctx, REG_INTR5);
+
+ if (stat & BIT_INTR_SCDT_CHANGE) {
+ u8 cstat = sii8620_readb(ctx, REG_TMDS_CSTAT_P3);
+
+ if (cstat & BIT_TMDS_CSTAT_P3_SCDT)
+ sii8620_scdt_high(ctx);
+ else
+ sii8620_scdt_low(ctx);
+ }
+
+ sii8620_write(ctx, REG_INTR5, stat);
+}
+
+static void sii8620_new_vsi(struct sii8620 *ctx)
+{
+ u8 vsif[11];
+
+ sii8620_write(ctx, REG_RX_HDMI_CTRL2,
+ VAL_RX_HDMI_CTRL2_DEFVAL |
+ BIT_RX_HDMI_CTRL2_VSI_MON_SEL_VSI);
+ sii8620_read_buf(ctx, REG_RX_HDMI_MON_PKT_HEADER1, vsif,
+ ARRAY_SIZE(vsif));
+}
+
+static void sii8620_new_avi(struct sii8620 *ctx)
+{
+ sii8620_write(ctx, REG_RX_HDMI_CTRL2, VAL_RX_HDMI_CTRL2_DEFVAL);
+ sii8620_read_buf(ctx, REG_RX_HDMI_MON_PKT_HEADER1, ctx->avif,
+ ARRAY_SIZE(ctx->avif));
+}
+
+static void sii8620_irq_infr(struct sii8620 *ctx)
+{
+ u8 stat = sii8620_readb(ctx, REG_INTR8)
+ & (BIT_CEA_NEW_VSI | BIT_CEA_NEW_AVI);
+
+ sii8620_write(ctx, REG_INTR8, stat);
+
+ if (stat & BIT_CEA_NEW_VSI)
+ sii8620_new_vsi(ctx);
+
+ if (stat & BIT_CEA_NEW_AVI)
+ sii8620_new_avi(ctx);
+
+ if (stat & (BIT_CEA_NEW_VSI | BIT_CEA_NEW_AVI))
+ sii8620_start_video(ctx);
+}
+
+/* endian agnostic, non-volatile version of test_bit */
+static bool sii8620_test_bit(unsigned int nr, const u8 *addr)
+{
+ return 1 & (addr[nr / BITS_PER_BYTE] >> (nr % BITS_PER_BYTE));
+}
+
+static irqreturn_t sii8620_irq_thread(int irq, void *data)
+{
+ static const struct {
+ int bit;
+ void (*handler)(struct sii8620 *ctx);
+ } irq_vec[] = {
+ { BIT_FAST_INTR_STAT_DISC, sii8620_irq_disc },
+ { BIT_FAST_INTR_STAT_G2WB, sii8620_irq_g2wb },
+ { BIT_FAST_INTR_STAT_COC, sii8620_irq_coc },
+ { BIT_FAST_INTR_STAT_MSC, sii8620_irq_msc },
+ { BIT_FAST_INTR_STAT_MERR, sii8620_irq_merr },
+ { BIT_FAST_INTR_STAT_EDID, sii8620_irq_edid },
+ { BIT_FAST_INTR_STAT_SCDT, sii8620_irq_scdt },
+ { BIT_FAST_INTR_STAT_INFR, sii8620_irq_infr },
+ };
+ struct sii8620 *ctx = data;
+ u8 stats[LEN_FAST_INTR_STAT];
+ int i, ret;
+
+ mutex_lock(&ctx->lock);
+
+ sii8620_read_buf(ctx, REG_FAST_INTR_STAT, stats, ARRAY_SIZE(stats));
+ for (i = 0; i < ARRAY_SIZE(irq_vec); ++i)
+ if (sii8620_test_bit(irq_vec[i].bit, stats))
+ irq_vec[i].handler(ctx);
+
+ sii8620_mt_work(ctx);
+
+ ret = sii8620_clear_error(ctx);
+ if (ret) {
+ dev_err(ctx->dev, "Error during IRQ handling, %d.\n", ret);
+ sii8620_mhl_disconnected(ctx);
+ }
+ mutex_unlock(&ctx->lock);
+
+ return IRQ_HANDLED;
+}
+
+static void sii8620_cable_in(struct sii8620 *ctx)
+{
+ struct device *dev = ctx->dev;
+ u8 ver[5];
+ int ret;
+
+ ret = sii8620_hw_on(ctx);
+ if (ret) {
+ dev_err(dev, "Error powering on, %d.\n", ret);
+ return;
+ }
+ sii8620_hw_reset(ctx);
+
+ sii8620_read_buf(ctx, REG_VND_IDL, ver, ARRAY_SIZE(ver));
+ ret = sii8620_clear_error(ctx);
+ if (ret) {
+ dev_err(dev, "Error accessing I2C bus, %d.\n", ret);
+ return;
+ }
+
+ dev_info(dev, "ChipID %02x%02x:%02x%02x rev %02x.\n", ver[1], ver[0],
+ ver[3], ver[2], ver[4]);
+
+ sii8620_write(ctx, REG_DPD,
+ BIT_DPD_PWRON_PLL | BIT_DPD_PDNTX12 | BIT_DPD_OSC_EN);
+
+ sii8620_xtal_set_rate(ctx);
+ sii8620_disconnect(ctx);
+
+ sii8620_write_seq_static(ctx,
+ REG_MHL_CBUS_CTL0, VAL_MHL_CBUS_CTL0_CBUS_DRV_SEL_STRONG
+ | VAL_MHL_CBUS_CTL0_CBUS_RGND_VBIAS_734,
+ REG_MHL_CBUS_CTL1, VAL_MHL_CBUS_CTL1_1115_OHM,
+ REG_DPD, BIT_DPD_PWRON_PLL | BIT_DPD_PDNTX12 | BIT_DPD_OSC_EN,
+ );
+
+ ret = sii8620_clear_error(ctx);
+ if (ret) {
+ dev_err(dev, "Error accessing I2C bus, %d.\n", ret);
+ return;
+ }
+
+ enable_irq(to_i2c_client(ctx->dev)->irq);
+}
+
+static inline struct sii8620 *bridge_to_sii8620(struct drm_bridge *bridge)
+{
+ return container_of(bridge, struct sii8620, bridge);
+}
+
+static bool sii8620_mode_fixup(struct drm_bridge *bridge,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct sii8620 *ctx = bridge_to_sii8620(bridge);
+ bool ret = false;
+ int max_clock = 74250;
+
+ mutex_lock(&ctx->lock);
+
+ if (mode->flags & DRM_MODE_FLAG_INTERLACE)
+ goto out;
+
+ if (ctx->devcap[MHL_DCAP_VID_LINK_MODE] & MHL_DCAP_VID_LINK_PPIXEL)
+ max_clock = 300000;
+
+ ret = mode->clock <= max_clock;
+
+out:
+ mutex_unlock(&ctx->lock);
+
+ return ret;
+}
+
+static const struct drm_bridge_funcs sii8620_bridge_funcs = {
+ .mode_fixup = sii8620_mode_fixup,
+};
+
+static int sii8620_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct device *dev = &client->dev;
+ struct sii8620 *ctx;
+ int ret;
+
+ ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+
+ ctx->dev = dev;
+ mutex_init(&ctx->lock);
+ INIT_LIST_HEAD(&ctx->mt_queue);
+
+ ctx->clk_xtal = devm_clk_get(dev, "xtal");
+ if (IS_ERR(ctx->clk_xtal)) {
+ dev_err(dev, "failed to get xtal clock from DT\n");
+ return PTR_ERR(ctx->clk_xtal);
+ }
+
+ if (!client->irq) {
+ dev_err(dev, "no irq provided\n");
+ return -EINVAL;
+ }
+ irq_set_status_flags(client->irq, IRQ_NOAUTOEN);
+ ret = devm_request_threaded_irq(dev, client->irq, NULL,
+ sii8620_irq_thread,
+ IRQF_TRIGGER_HIGH | IRQF_ONESHOT,
+ "sii8620", ctx);
+
+ ctx->gpio_reset = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH);
+ if (IS_ERR(ctx->gpio_reset)) {
+ dev_err(dev, "failed to get reset gpio from DT\n");
+ return PTR_ERR(ctx->gpio_reset);
+ }
+
+ ctx->supplies[0].supply = "cvcc10";
+ ctx->supplies[1].supply = "iovcc18";
+ ret = devm_regulator_bulk_get(dev, 2, ctx->supplies);
+ if (ret)
+ return ret;
+
+ i2c_set_clientdata(client, ctx);
+
+ ctx->bridge.funcs = &sii8620_bridge_funcs;
+ ctx->bridge.of_node = dev->of_node;
+ drm_bridge_add(&ctx->bridge);
+
+ sii8620_cable_in(ctx);
+
+ return 0;
+}
+
+static int sii8620_remove(struct i2c_client *client)
+{
+ struct sii8620 *ctx = i2c_get_clientdata(client);
+
+ disable_irq(to_i2c_client(ctx->dev)->irq);
+ drm_bridge_remove(&ctx->bridge);
+ sii8620_hw_off(ctx);
+
+ return 0;
+}
+
+static const struct of_device_id sii8620_dt_match[] = {
+ { .compatible = "sil,sii8620" },
+ { },
+};
+MODULE_DEVICE_TABLE(of, sii8620_dt_match);
+
+static const struct i2c_device_id sii8620_id[] = {
+ { "sii8620", 0 },
+ { },
+};
+
+MODULE_DEVICE_TABLE(i2c, sii8620_id);
+static struct i2c_driver sii8620_driver = {
+ .driver = {
+ .name = "sii8620",
+ .of_match_table = of_match_ptr(sii8620_dt_match),
+ },
+ .probe = sii8620_probe,
+ .remove = sii8620_remove,
+ .id_table = sii8620_id,
+};
+
+module_i2c_driver(sii8620_driver);
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/bridge/sil-sii8620.h b/drivers/gpu/drm/bridge/sil-sii8620.h
new file mode 100644
index 000000000000..6ff616a4f6ce
--- /dev/null
+++ b/drivers/gpu/drm/bridge/sil-sii8620.h
@@ -0,0 +1,1517 @@
+/*
+ * Registers of Silicon Image SiI8620 Mobile HD Transmitter
+ *
+ * Copyright (C) 2015, Samsung Electronics Co., Ltd.
+ * Andrzej Hajda <a.hajda@samsung.com>
+ *
+ * Based on MHL driver for Android devices.
+ * Copyright (C) 2013-2014 Silicon Image, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __SIL_SII8620_H__
+#define __SIL_SII8620_H__
+
+/* Vendor ID Low byte, default value: 0x01 */
+#define REG_VND_IDL 0x0000
+
+/* Vendor ID High byte, default value: 0x00 */
+#define REG_VND_IDH 0x0001
+
+/* Device ID Low byte, default value: 0x60 */
+#define REG_DEV_IDL 0x0002
+
+/* Device ID High byte, default value: 0x86 */
+#define REG_DEV_IDH 0x0003
+
+/* Device Revision, default value: 0x10 */
+#define REG_DEV_REV 0x0004
+
+/* OTP DBYTE510, default value: 0x00 */
+#define REG_OTP_DBYTE510 0x0006
+
+/* System Control #1, default value: 0x00 */
+#define REG_SYS_CTRL1 0x0008
+#define BIT_SYS_CTRL1_OTPVMUTEOVR_SET BIT(7)
+#define BIT_SYS_CTRL1_VSYNCPIN BIT(6)
+#define BIT_SYS_CTRL1_OTPADROPOVR_SET BIT(5)
+#define BIT_SYS_CTRL1_BLOCK_DDC_BY_HPD BIT(4)
+#define BIT_SYS_CTRL1_OTP2XVOVR_EN BIT(3)
+#define BIT_SYS_CTRL1_OTP2XAOVR_EN BIT(2)
+#define BIT_SYS_CTRL1_TX_CTRL_HDMI BIT(1)
+#define BIT_SYS_CTRL1_OTPAMUTEOVR_SET BIT(0)
+
+/* System Control DPD, default value: 0x90 */
+#define REG_DPD 0x000b
+#define BIT_DPD_PWRON_PLL BIT(7)
+#define BIT_DPD_PDNTX12 BIT(6)
+#define BIT_DPD_PDNRX12 BIT(5)
+#define BIT_DPD_OSC_EN BIT(4)
+#define BIT_DPD_PWRON_HSIC BIT(3)
+#define BIT_DPD_PDIDCK_N BIT(2)
+#define BIT_DPD_PD_MHL_CLK_N BIT(1)
+
+/* Dual link Control, default value: 0x00 */
+#define REG_DCTL 0x000d
+#define BIT_DCTL_TDM_LCLK_PHASE BIT(7)
+#define BIT_DCTL_HSIC_CLK_PHASE BIT(6)
+#define BIT_DCTL_CTS_TCK_PHASE BIT(5)
+#define BIT_DCTL_EXT_DDC_SEL BIT(4)
+#define BIT_DCTL_TRANSCODE BIT(3)
+#define BIT_DCTL_HSIC_RX_STROBE_PHASE BIT(2)
+#define BIT_DCTL_HSIC_TX_BIST_START_SEL BIT(1)
+#define BIT_DCTL_TCLKNX_PHASE BIT(0)
+
+/* PWD Software Reset, default value: 0x20 */
+#define REG_PWD_SRST 0x000e
+#define BIT_PWD_SRST_COC_DOC_RST BIT(7)
+#define BIT_PWD_SRST_CBUS_RST_SW BIT(6)
+#define BIT_PWD_SRST_CBUS_RST_SW_EN BIT(5)
+#define BIT_PWD_SRST_MHLFIFO_RST BIT(4)
+#define BIT_PWD_SRST_CBUS_RST BIT(3)
+#define BIT_PWD_SRST_SW_RST_AUTO BIT(2)
+#define BIT_PWD_SRST_HDCP2X_SW_RST BIT(1)
+#define BIT_PWD_SRST_SW_RST BIT(0)
+
+/* AKSV_1, default value: 0x00 */
+#define REG_AKSV_1 0x001d
+
+/* Video H Resolution #1, default value: 0x00 */
+#define REG_H_RESL 0x003a
+
+/* Video Mode, default value: 0x00 */
+#define REG_VID_MODE 0x004a
+#define BIT_VID_MODE_M1080P BIT(6)
+
+/* Video Input Mode, default value: 0xc0 */
+#define REG_VID_OVRRD 0x0051
+#define BIT_VID_OVRRD_PP_AUTO_DISABLE BIT(7)
+#define BIT_VID_OVRRD_M1080P_OVRRD BIT(6)
+#define BIT_VID_OVRRD_MINIVSYNC_ON BIT(5)
+#define BIT_VID_OVRRD_3DCONV_EN_FRAME_PACK BIT(4)
+#define BIT_VID_OVRRD_ENABLE_AUTO_PATH_EN BIT(3)
+#define BIT_VID_OVRRD_ENRGB2YCBCR_OVRRD BIT(2)
+#define BIT_VID_OVRRD_ENDOWNSAMPLE_OVRRD BIT(0)
+
+/* I2C Address reassignment, default value: 0x00 */
+#define REG_PAGE_MHLSPEC_ADDR 0x0057
+#define REG_PAGE7_ADDR 0x0058
+#define REG_PAGE8_ADDR 0x005c
+
+/* Fast Interrupt Status, default value: 0x00 */
+#define REG_FAST_INTR_STAT 0x005f
+#define LEN_FAST_INTR_STAT 7
+#define BIT_FAST_INTR_STAT_TIMR 8
+#define BIT_FAST_INTR_STAT_INT2 9
+#define BIT_FAST_INTR_STAT_DDC 10
+#define BIT_FAST_INTR_STAT_SCDT 11
+#define BIT_FAST_INTR_STAT_INFR 13
+#define BIT_FAST_INTR_STAT_EDID 14
+#define BIT_FAST_INTR_STAT_HDCP 15
+#define BIT_FAST_INTR_STAT_MSC 16
+#define BIT_FAST_INTR_STAT_MERR 17
+#define BIT_FAST_INTR_STAT_G2WB 18
+#define BIT_FAST_INTR_STAT_G2WB_ERR 19
+#define BIT_FAST_INTR_STAT_DISC 28
+#define BIT_FAST_INTR_STAT_BLOCK 30
+#define BIT_FAST_INTR_STAT_LTRN 31
+#define BIT_FAST_INTR_STAT_HDCP2 32
+#define BIT_FAST_INTR_STAT_TDM 42
+#define BIT_FAST_INTR_STAT_COC 51
+
+/* GPIO Control, default value: 0x15 */
+#define REG_GPIO_CTRL1 0x006e
+#define BIT_CTRL1_GPIO_I_8 BIT(5)
+#define BIT_CTRL1_GPIO_OEN_8 BIT(4)
+#define BIT_CTRL1_GPIO_I_7 BIT(3)
+#define BIT_CTRL1_GPIO_OEN_7 BIT(2)
+#define BIT_CTRL1_GPIO_I_6 BIT(1)
+#define BIT_CTRL1_GPIO_OEN_6 BIT(0)
+
+/* Interrupt Control, default value: 0x06 */
+#define REG_INT_CTRL 0x006f
+#define BIT_INT_CTRL_SOFTWARE_WP BIT(7)
+#define BIT_INT_CTRL_INTR_OD BIT(2)
+#define BIT_INT_CTRL_INTR_POLARITY BIT(1)
+
+/* Interrupt State, default value: 0x00 */
+#define REG_INTR_STATE 0x0070
+#define BIT_INTR_STATE_INTR_STATE BIT(0)
+
+/* Interrupt Source #1, default value: 0x00 */
+#define REG_INTR1 0x0071
+
+/* Interrupt Source #2, default value: 0x00 */
+#define REG_INTR2 0x0072
+
+/* Interrupt Source #3, default value: 0x01 */
+#define REG_INTR3 0x0073
+#define BIT_DDC_CMD_DONE BIT(3)
+
+/* Interrupt Source #5, default value: 0x00 */
+#define REG_INTR5 0x0074
+
+/* Interrupt #1 Mask, default value: 0x00 */
+#define REG_INTR1_MASK 0x0075
+
+/* Interrupt #2 Mask, default value: 0x00 */
+#define REG_INTR2_MASK 0x0076
+
+/* Interrupt #3 Mask, default value: 0x00 */
+#define REG_INTR3_MASK 0x0077
+
+/* Interrupt #5 Mask, default value: 0x00 */
+#define REG_INTR5_MASK 0x0078
+#define BIT_INTR_SCDT_CHANGE BIT(0)
+
+/* Hot Plug Connection Control, default value: 0x45 */
+#define REG_HPD_CTRL 0x0079
+#define BIT_HPD_CTRL_HPD_DS_SIGNAL BIT(7)
+#define BIT_HPD_CTRL_HPD_OUT_OD_EN BIT(6)
+#define BIT_HPD_CTRL_HPD_HIGH BIT(5)
+#define BIT_HPD_CTRL_HPD_OUT_OVR_EN BIT(4)
+#define BIT_HPD_CTRL_GPIO_I_1 BIT(3)
+#define BIT_HPD_CTRL_GPIO_OEN_1 BIT(2)
+#define BIT_HPD_CTRL_GPIO_I_0 BIT(1)
+#define BIT_HPD_CTRL_GPIO_OEN_0 BIT(0)
+
+/* GPIO Control, default value: 0x55 */
+#define REG_GPIO_CTRL 0x007a
+#define BIT_CTRL_GPIO_I_5 BIT(7)
+#define BIT_CTRL_GPIO_OEN_5 BIT(6)
+#define BIT_CTRL_GPIO_I_4 BIT(5)
+#define BIT_CTRL_GPIO_OEN_4 BIT(4)
+#define BIT_CTRL_GPIO_I_3 BIT(3)
+#define BIT_CTRL_GPIO_OEN_3 BIT(2)
+#define BIT_CTRL_GPIO_I_2 BIT(1)
+#define BIT_CTRL_GPIO_OEN_2 BIT(0)
+
+/* Interrupt Source 7, default value: 0x00 */
+#define REG_INTR7 0x007b
+
+/* Interrupt Source 8, default value: 0x00 */
+#define REG_INTR8 0x007c
+
+/* Interrupt #7 Mask, default value: 0x00 */
+#define REG_INTR7_MASK 0x007d
+
+/* Interrupt #8 Mask, default value: 0x00 */
+#define REG_INTR8_MASK 0x007e
+#define BIT_CEA_NEW_VSI BIT(2)
+#define BIT_CEA_NEW_AVI BIT(1)
+
+/* IEEE, default value: 0x10 */
+#define REG_TMDS_CCTRL 0x0080
+#define BIT_TMDS_CCTRL_TMDS_OE BIT(4)
+
+/* TMDS Control #4, default value: 0x02 */
+#define REG_TMDS_CTRL4 0x0085
+#define BIT_TMDS_CTRL4_SCDT_CKDT_SEL BIT(1)
+#define BIT_TMDS_CTRL4_TX_EN_BY_SCDT BIT(0)
+
+/* BIST CNTL, default value: 0x00 */
+#define REG_BIST_CTRL 0x00bb
+#define BIT_RXBIST_VGB_EN BIT(7)
+#define BIT_TXBIST_VGB_EN BIT(6)
+#define BIT_BIST_START_SEL BIT(5)
+#define BIT_BIST_START_BIT BIT(4)
+#define BIT_BIST_ALWAYS_ON BIT(3)
+#define BIT_BIST_TRANS BIT(2)
+#define BIT_BIST_RESET BIT(1)
+#define BIT_BIST_EN BIT(0)
+
+/* BIST DURATION0, default value: 0x00 */
+#define REG_BIST_TEST_SEL 0x00bd
+#define MSK_BIST_TEST_SEL_BIST_PATT_SEL 0x0f
+
+/* BIST VIDEO_MODE, default value: 0x00 */
+#define REG_BIST_VIDEO_MODE 0x00be
+#define MSK_BIST_VIDEO_MODE_BIST_VIDEO_MODE_3_0 0x0f
+
+/* BIST DURATION0, default value: 0x00 */
+#define REG_BIST_DURATION_0 0x00bf
+
+/* BIST DURATION1, default value: 0x00 */
+#define REG_BIST_DURATION_1 0x00c0
+
+/* BIST DURATION2, default value: 0x00 */
+#define REG_BIST_DURATION_2 0x00c1
+
+/* BIST 8BIT_PATTERN, default value: 0x00 */
+#define REG_BIST_8BIT_PATTERN 0x00c2
+
+/* LM DDC, default value: 0x80 */
+#define REG_LM_DDC 0x00c7
+#define BIT_LM_DDC_SW_TPI_EN_DISABLED BIT(7)
+
+#define BIT_LM_DDC_VIDEO_MUTE_EN BIT(5)
+#define BIT_LM_DDC_DDC_TPI_SW BIT(2)
+#define BIT_LM_DDC_DDC_GRANT BIT(1)
+#define BIT_LM_DDC_DDC_GPU_REQUEST BIT(0)
+
+/* DDC I2C Manual, default value: 0x03 */
+#define REG_DDC_MANUAL 0x00ec
+#define BIT_DDC_MANUAL_MAN_DDC BIT(7)
+#define BIT_DDC_MANUAL_VP_SEL BIT(6)
+#define BIT_DDC_MANUAL_DSDA BIT(5)
+#define BIT_DDC_MANUAL_DSCL BIT(4)
+#define BIT_DDC_MANUAL_GCP_HW_CTL_EN BIT(3)
+#define BIT_DDC_MANUAL_DDCM_ABORT_WP BIT(2)
+#define BIT_DDC_MANUAL_IO_DSDA BIT(1)
+#define BIT_DDC_MANUAL_IO_DSCL BIT(0)
+
+/* DDC I2C Target Slave Address, default value: 0x00 */
+#define REG_DDC_ADDR 0x00ed
+#define MSK_DDC_ADDR_DDC_ADDR 0xfe
+
+/* DDC I2C Target Segment Address, default value: 0x00 */
+#define REG_DDC_SEGM 0x00ee
+
+/* DDC I2C Target Offset Address, default value: 0x00 */
+#define REG_DDC_OFFSET 0x00ef
+
+/* DDC I2C Data In count #1, default value: 0x00 */
+#define REG_DDC_DIN_CNT1 0x00f0
+
+/* DDC I2C Data In count #2, default value: 0x00 */
+#define REG_DDC_DIN_CNT2 0x00f1
+#define MSK_DDC_DIN_CNT2_DDC_DIN_CNT_9_8 0x03
+
+/* DDC I2C Status, default value: 0x04 */
+#define REG_DDC_STATUS 0x00f2
+#define BIT_DDC_STATUS_DDC_BUS_LOW BIT(6)
+#define BIT_DDC_STATUS_DDC_NO_ACK BIT(5)
+#define BIT_DDC_STATUS_DDC_I2C_IN_PROG BIT(4)
+#define BIT_DDC_STATUS_DDC_FIFO_FULL BIT(3)
+#define BIT_DDC_STATUS_DDC_FIFO_EMPTY BIT(2)
+#define BIT_DDC_STATUS_DDC_FIFO_READ_IN_SUE BIT(1)
+#define BIT_DDC_STATUS_DDC_FIFO_WRITE_IN_USE BIT(0)
+
+/* DDC I2C Command, default value: 0x70 */
+#define REG_DDC_CMD 0x00f3
+#define BIT_DDC_CMD_HDCP_DDC_EN BIT(6)
+#define BIT_DDC_CMD_SDA_DEL_EN BIT(5)
+#define BIT_DDC_CMD_DDC_FLT_EN BIT(4)
+
+#define MSK_DDC_CMD_DDC_CMD 0x0f
+#define VAL_DDC_CMD_ENH_DDC_READ_NO_ACK 0x04
+#define VAL_DDC_CMD_DDC_CMD_CLEAR_FIFO 0x09
+#define VAL_DDC_CMD_DDC_CMD_ABORT 0x0f
+
+/* DDC I2C FIFO Data In/Out, default value: 0x00 */
+#define REG_DDC_DATA 0x00f4
+
+/* DDC I2C Data Out Counter, default value: 0x00 */
+#define REG_DDC_DOUT_CNT 0x00f5
+#define BIT_DDC_DOUT_CNT_DDC_DELAY_CNT_8 BIT(7)
+#define MSK_DDC_DOUT_CNT_DDC_DATA_OUT_CNT 0x1f
+
+/* DDC I2C Delay Count, default value: 0x14 */
+#define REG_DDC_DELAY_CNT 0x00f6
+
+/* Test Control, default value: 0x80 */
+#define REG_TEST_TXCTRL 0x00f7
+#define BIT_TEST_TXCTRL_RCLK_REF_SEL BIT(7)
+#define BIT_TEST_TXCTRL_PCLK_REF_SEL BIT(6)
+#define MSK_TEST_TXCTRL_BYPASS_PLL_CLK 0x3c
+#define BIT_TEST_TXCTRL_HDMI_MODE BIT(1)
+#define BIT_TEST_TXCTRL_TST_PLLCK BIT(0)
+
+/* CBUS Address, default value: 0x00 */
+#define REG_PAGE_CBUS_ADDR 0x00f8
+
+/* I2C Device Address re-assignment */
+#define REG_PAGE1_ADDR 0x00fc
+#define REG_PAGE2_ADDR 0x00fd
+#define REG_PAGE3_ADDR 0x00fe
+#define REG_HW_TPI_ADDR 0x00ff
+
+/* USBT CTRL0, default value: 0x00 */
+#define REG_UTSRST 0x0100
+#define BIT_UTSRST_FC_SRST BIT(5)
+#define BIT_UTSRST_KEEPER_SRST BIT(4)
+#define BIT_UTSRST_HTX_SRST BIT(3)
+#define BIT_UTSRST_TRX_SRST BIT(2)
+#define BIT_UTSRST_TTX_SRST BIT(1)
+#define BIT_UTSRST_HRX_SRST BIT(0)
+
+/* HSIC RX Control3, default value: 0x07 */
+#define REG_HRXCTRL3 0x0104
+#define MSK_HRXCTRL3_HRX_AFFCTRL 0xf0
+#define BIT_HRXCTRL3_HRX_OUT_EN BIT(2)
+#define BIT_HRXCTRL3_STATUS_EN BIT(1)
+#define BIT_HRXCTRL3_HRX_STAY_RESET BIT(0)
+
+/* HSIC RX INT Registers */
+#define REG_HRXINTL 0x0111
+#define REG_HRXINTH 0x0112
+
+/* TDM TX NUMBITS, default value: 0x0c */
+#define REG_TTXNUMB 0x0116
+#define MSK_TTXNUMB_TTX_AFFCTRL_3_0 0xf0
+#define BIT_TTXNUMB_TTX_COM1_AT_SYNC_WAIT BIT(3)
+#define MSK_TTXNUMB_TTX_NUMBPS_2_0 0x07
+
+/* TDM TX NUMSPISYM, default value: 0x04 */
+#define REG_TTXSPINUMS 0x0117
+
+/* TDM TX NUMHSICSYM, default value: 0x14 */
+#define REG_TTXHSICNUMS 0x0118
+
+/* TDM TX NUMTOTSYM, default value: 0x18 */
+#define REG_TTXTOTNUMS 0x0119
+
+/* TDM TX INT Low, default value: 0x00 */
+#define REG_TTXINTL 0x0136
+#define BIT_TTXINTL_TTX_INTR7 BIT(7)
+#define BIT_TTXINTL_TTX_INTR6 BIT(6)
+#define BIT_TTXINTL_TTX_INTR5 BIT(5)
+#define BIT_TTXINTL_TTX_INTR4 BIT(4)
+#define BIT_TTXINTL_TTX_INTR3 BIT(3)
+#define BIT_TTXINTL_TTX_INTR2 BIT(2)
+#define BIT_TTXINTL_TTX_INTR1 BIT(1)
+#define BIT_TTXINTL_TTX_INTR0 BIT(0)
+
+/* TDM TX INT High, default value: 0x00 */
+#define REG_TTXINTH 0x0137
+#define BIT_TTXINTH_TTX_INTR15 BIT(7)
+#define BIT_TTXINTH_TTX_INTR14 BIT(6)
+#define BIT_TTXINTH_TTX_INTR13 BIT(5)
+#define BIT_TTXINTH_TTX_INTR12 BIT(4)
+#define BIT_TTXINTH_TTX_INTR11 BIT(3)
+#define BIT_TTXINTH_TTX_INTR10 BIT(2)
+#define BIT_TTXINTH_TTX_INTR9 BIT(1)
+#define BIT_TTXINTH_TTX_INTR8 BIT(0)
+
+/* TDM RX Control, default value: 0x1c */
+#define REG_TRXCTRL 0x013b
+#define BIT_TRXCTRL_TRX_CLR_WVALLOW BIT(4)
+#define BIT_TRXCTRL_TRX_FROM_SE_COC BIT(3)
+#define MSK_TRXCTRL_TRX_NUMBPS_2_0 0x07
+
+/* TDM RX NUMSPISYM, default value: 0x04 */
+#define REG_TRXSPINUMS 0x013c
+
+/* TDM RX NUMHSICSYM, default value: 0x14 */
+#define REG_TRXHSICNUMS 0x013d
+
+/* TDM RX NUMTOTSYM, default value: 0x18 */
+#define REG_TRXTOTNUMS 0x013e
+
+/* TDM RX Status 2nd, default value: 0x00 */
+#define REG_TRXSTA2 0x015c
+
+/* TDM RX INT Low, default value: 0x00 */
+#define REG_TRXINTL 0x0163
+
+/* TDM RX INT High, default value: 0x00 */
+#define REG_TRXINTH 0x0164
+
+/* TDM RX INTMASK High, default value: 0x00 */
+#define REG_TRXINTMH 0x0166
+
+/* HSIC TX CRTL, default value: 0x00 */
+#define REG_HTXCTRL 0x0169
+#define BIT_HTXCTRL_HTX_ALLSBE_SOP BIT(4)
+#define BIT_HTXCTRL_HTX_RGDINV_USB BIT(3)
+#define BIT_HTXCTRL_HTX_RSPTDM_BUSY BIT(2)
+#define BIT_HTXCTRL_HTX_DRVCONN1 BIT(1)
+#define BIT_HTXCTRL_HTX_DRVRST1 BIT(0)
+
+/* HSIC TX INT Low, default value: 0x00 */
+#define REG_HTXINTL 0x017d
+
+/* HSIC TX INT High, default value: 0x00 */
+#define REG_HTXINTH 0x017e
+
+/* HSIC Keeper, default value: 0x00 */
+#define REG_KEEPER 0x0181
+#define MSK_KEEPER_KEEPER_MODE_1_0 0x03
+
+/* HSIC Flow Control General, default value: 0x02 */
+#define REG_FCGC 0x0183
+#define BIT_FCGC_HSIC_FC_HOSTMODE BIT(1)
+#define BIT_FCGC_HSIC_FC_ENABLE BIT(0)
+
+/* HSIC Flow Control CTR13, default value: 0xfc */
+#define REG_FCCTR13 0x0191
+
+/* HSIC Flow Control CTR14, default value: 0xff */
+#define REG_FCCTR14 0x0192
+
+/* HSIC Flow Control CTR15, default value: 0xff */
+#define REG_FCCTR15 0x0193
+
+/* HSIC Flow Control CTR50, default value: 0x03 */
+#define REG_FCCTR50 0x01b6
+
+/* HSIC Flow Control INTR0, default value: 0x00 */
+#define REG_FCINTR0 0x01ec
+#define REG_FCINTR1 0x01ed
+#define REG_FCINTR2 0x01ee
+#define REG_FCINTR3 0x01ef
+#define REG_FCINTR4 0x01f0
+#define REG_FCINTR5 0x01f1
+#define REG_FCINTR6 0x01f2
+#define REG_FCINTR7 0x01f3
+
+/* TDM Low Latency, default value: 0x20 */
+#define REG_TDMLLCTL 0x01fc
+#define MSK_TDMLLCTL_TRX_LL_SEL_MANUAL 0xc0
+#define MSK_TDMLLCTL_TRX_LL_SEL_MODE 0x30
+#define MSK_TDMLLCTL_TTX_LL_SEL_MANUAL 0x0c
+#define BIT_TDMLLCTL_TTX_LL_TIE_LOW BIT(1)
+#define BIT_TDMLLCTL_TTX_LL_SEL_MODE BIT(0)
+
+/* TMDS 0 Clock Control, default value: 0x10 */
+#define REG_TMDS0_CCTRL1 0x0210
+#define MSK_TMDS0_CCTRL1_TEST_SEL 0xc0
+#define MSK_TMDS0_CCTRL1_CLK1X_CTL 0x30
+
+/* TMDS Clock Enable, default value: 0x00 */
+#define REG_TMDS_CLK_EN 0x0211
+#define BIT_TMDS_CLK_EN_CLK_EN BIT(0)
+
+/* TMDS Channel Enable, default value: 0x00 */
+#define REG_TMDS_CH_EN 0x0212
+#define BIT_TMDS_CH_EN_CH0_EN BIT(4)
+#define BIT_TMDS_CH_EN_CH12_EN BIT(0)
+
+/* BGR_BIAS, default value: 0x07 */
+#define REG_BGR_BIAS 0x0215
+#define BIT_BGR_BIAS_BGR_EN BIT(7)
+#define MSK_BGR_BIAS_BIAS_BGR_D 0x0f
+
+/* TMDS 0 Digital I2C BW, default value: 0x0a */
+#define REG_ALICE0_BW_I2C 0x0231
+
+/* TMDS 0 Digital Zone Control, default value: 0xe0 */
+#define REG_ALICE0_ZONE_CTRL 0x024c
+#define BIT_ALICE0_ZONE_CTRL_ICRST_N BIT(7)
+#define BIT_ALICE0_ZONE_CTRL_USE_INT_DIV20 BIT(6)
+#define MSK_ALICE0_ZONE_CTRL_SZONE_I2C 0x30
+#define MSK_ALICE0_ZONE_CTRL_ZONE_CTRL 0x0f
+
+/* TMDS 0 Digital PLL Mode Control, default value: 0x00 */
+#define REG_ALICE0_MODE_CTRL 0x024d
+#define MSK_ALICE0_MODE_CTRL_PLL_MODE_I2C 0x0c
+#define MSK_ALICE0_MODE_CTRL_DIV20_CTRL 0x03
+
+/* MHL Tx Control 6th, default value: 0xa0 */
+#define REG_MHLTX_CTL6 0x0285
+#define MSK_MHLTX_CTL6_EMI_SEL 0xe0
+#define MSK_MHLTX_CTL6_TX_CLK_SHAPE_9_8 0x03
+
+/* Packet Filter0, default value: 0x00 */
+#define REG_PKT_FILTER_0 0x0290
+#define BIT_PKT_FILTER_0_DROP_CEA_GAMUT_PKT BIT(7)
+#define BIT_PKT_FILTER_0_DROP_CEA_CP_PKT BIT(6)
+#define BIT_PKT_FILTER_0_DROP_MPEG_PKT BIT(5)
+#define BIT_PKT_FILTER_0_DROP_SPIF_PKT BIT(4)
+#define BIT_PKT_FILTER_0_DROP_AIF_PKT BIT(3)
+#define BIT_PKT_FILTER_0_DROP_AVI_PKT BIT(2)
+#define BIT_PKT_FILTER_0_DROP_CTS_PKT BIT(1)
+#define BIT_PKT_FILTER_0_DROP_GCP_PKT BIT(0)
+
+/* Packet Filter1, default value: 0x00 */
+#define REG_PKT_FILTER_1 0x0291
+#define BIT_PKT_FILTER_1_VSI_OVERRIDE_DIS BIT(7)
+#define BIT_PKT_FILTER_1_AVI_OVERRIDE_DIS BIT(6)
+#define BIT_PKT_FILTER_1_DROP_AUDIO_PKT BIT(3)
+#define BIT_PKT_FILTER_1_DROP_GEN2_PKT BIT(2)
+#define BIT_PKT_FILTER_1_DROP_GEN_PKT BIT(1)
+#define BIT_PKT_FILTER_1_DROP_VSIF_PKT BIT(0)
+
+/* TMDS Clock Status, default value: 0x10 */
+#define REG_TMDS_CSTAT_P3 0x02a0
+#define BIT_TMDS_CSTAT_P3_RX_HDMI_CP_CLR_MUTE BIT(7)
+#define BIT_TMDS_CSTAT_P3_RX_HDMI_CP_SET_MUTE BIT(6)
+#define BIT_TMDS_CSTAT_P3_RX_HDMI_CP_NEW_CP BIT(5)
+#define BIT_TMDS_CSTAT_P3_CLR_AVI BIT(3)
+#define BIT_TMDS_CSTAT_P3_SCDT_CLR_AVI_DIS BIT(2)
+#define BIT_TMDS_CSTAT_P3_SCDT BIT(1)
+#define BIT_TMDS_CSTAT_P3_CKDT BIT(0)
+
+/* RX_HDMI Control, default value: 0x10 */
+#define REG_RX_HDMI_CTRL0 0x02a1
+#define BIT_RX_HDMI_CTRL0_BYP_DVIFILT_SYNC BIT(5)
+#define BIT_RX_HDMI_CTRL0_HDMI_MODE_EN_ITSELF_CLR BIT(4)
+#define BIT_RX_HDMI_CTRL0_HDMI_MODE_SW_VALUE BIT(3)
+#define BIT_RX_HDMI_CTRL0_HDMI_MODE_OVERWRITE BIT(2)
+#define BIT_RX_HDMI_CTRL0_RX_HDMI_HDMI_MODE_EN BIT(1)
+#define BIT_RX_HDMI_CTRL0_RX_HDMI_HDMI_MODE BIT(0)
+
+/* RX_HDMI Control, default value: 0x38 */
+#define REG_RX_HDMI_CTRL2 0x02a3
+#define MSK_RX_HDMI_CTRL2_IDLE_CNT 0xf0
+#define VAL_RX_HDMI_CTRL2_IDLE_CNT(n) ((n) << 4)
+#define BIT_RX_HDMI_CTRL2_USE_AV_MUTE BIT(3)
+#define BIT_RX_HDMI_CTRL2_VSI_MON_SEL_VSI BIT(0)
+
+/* RX_HDMI Control, default value: 0x0f */
+#define REG_RX_HDMI_CTRL3 0x02a4
+#define MSK_RX_HDMI_CTRL3_PP_MODE_CLK_EN 0x0f
+
+/* rx_hdmi Clear Buffer, default value: 0x00 */
+#define REG_RX_HDMI_CLR_BUFFER 0x02ac
+#define MSK_RX_HDMI_CLR_BUFFER_AIF4VSI_CMP 0xc0
+#define BIT_RX_HDMI_CLR_BUFFER_USE_AIF4VSI BIT(5)
+#define BIT_RX_HDMI_CLR_BUFFER_VSI_CLR_W_AVI BIT(4)
+#define BIT_RX_HDMI_CLR_BUFFER_VSI_IEEE_ID_CHK_EN BIT(3)
+#define BIT_RX_HDMI_CLR_BUFFER_SWAP_VSI_IEEE_ID BIT(2)
+#define BIT_RX_HDMI_CLR_BUFFER_AIF_CLR_EN BIT(1)
+#define BIT_RX_HDMI_CLR_BUFFER_VSI_CLR_EN BIT(0)
+
+/* RX_HDMI VSI Header1, default value: 0x00 */
+#define REG_RX_HDMI_MON_PKT_HEADER1 0x02b8
+
+/* RX_HDMI VSI MHL Monitor, default value: 0x3c */
+#define REG_RX_HDMI_VSIF_MHL_MON 0x02d7
+
+#define MSK_RX_HDMI_VSIF_MHL_MON_RX_HDMI_MHL_3D_FORMAT 0x3c
+#define MSK_RX_HDMI_VSIF_MHL_MON_RX_HDMI_MHL_VID_FORMAT 0x03
+
+/* Interrupt Source 9, default value: 0x00 */
+#define REG_INTR9 0x02e0
+#define BIT_INTR9_EDID_ERROR BIT(6)
+#define BIT_INTR9_EDID_DONE BIT(5)
+#define BIT_INTR9_DEVCAP_DONE BIT(4)
+
+/* Interrupt 9 Mask, default value: 0x00 */
+#define REG_INTR9_MASK 0x02e1
+
+/* TPI CBUS Start, default value: 0x00 */
+#define REG_TPI_CBUS_START 0x02e2
+#define BIT_TPI_CBUS_START_RCP_REQ_START BIT(7)
+#define BIT_TPI_CBUS_START_RCPK_REPLY_START BIT(6)
+#define BIT_TPI_CBUS_START_RCPE_REPLY_START BIT(5)
+#define BIT_TPI_CBUS_START_PUT_LINK_MODE_START BIT(4)
+#define BIT_TPI_CBUS_START_PUT_DCAPCHG_START BIT(3)
+#define BIT_TPI_CBUS_START_PUT_DCAPRDY_START BIT(2)
+#define BIT_TPI_CBUS_START_GET_EDID_START_0 BIT(1)
+#define BIT_TPI_CBUS_START_GET_DEVCAP_START BIT(0)
+
+/* EDID Control, default value: 0x10 */
+#define REG_EDID_CTRL 0x02e3
+#define BIT_EDID_CTRL_EDID_PRIME_VALID BIT(7)
+#define BIT_EDID_CTRL_XDEVCAP_EN BIT(6)
+#define BIT_EDID_CTRL_DEVCAP_SELECT_DEVCAP BIT(5)
+#define BIT_EDID_CTRL_EDID_FIFO_ADDR_AUTO BIT(4)
+#define BIT_EDID_CTRL_EDID_FIFO_ACCESS_ALWAYS_EN BIT(3)
+#define BIT_EDID_CTRL_EDID_FIFO_BLOCK_SEL BIT(2)
+#define BIT_EDID_CTRL_INVALID_BKSV BIT(1)
+#define BIT_EDID_CTRL_EDID_MODE_EN BIT(0)
+
+/* EDID FIFO Addr, default value: 0x00 */
+#define REG_EDID_FIFO_ADDR 0x02e9
+
+/* EDID FIFO Write Data, default value: 0x00 */
+#define REG_EDID_FIFO_WR_DATA 0x02ea
+
+/* EDID/DEVCAP FIFO Internal Addr, default value: 0x00 */
+#define REG_EDID_FIFO_ADDR_MON 0x02eb
+
+/* EDID FIFO Read Data, default value: 0x00 */
+#define REG_EDID_FIFO_RD_DATA 0x02ec
+
+/* EDID DDC Segment Pointer, default value: 0x00 */
+#define REG_EDID_START_EXT 0x02ed
+
+/* TX IP BIST CNTL and Status, default value: 0x00 */
+#define REG_TX_IP_BIST_CNTLSTA 0x02f2
+#define BIT_TX_IP_BIST_CNTLSTA_TXBIST_QUARTER_CLK_SEL BIT(6)
+#define BIT_TX_IP_BIST_CNTLSTA_TXBIST_DONE BIT(5)
+#define BIT_TX_IP_BIST_CNTLSTA_TXBIST_ON BIT(4)
+#define BIT_TX_IP_BIST_CNTLSTA_TXBIST_RUN BIT(3)
+#define BIT_TX_IP_BIST_CNTLSTA_TXCLK_HALF_SEL BIT(2)
+#define BIT_TX_IP_BIST_CNTLSTA_TXBIST_EN BIT(1)
+#define BIT_TX_IP_BIST_CNTLSTA_TXBIST_SEL BIT(0)
+
+/* TX IP BIST INST LOW, default value: 0x00 */
+#define REG_TX_IP_BIST_INST_LOW 0x02f3
+#define REG_TX_IP_BIST_INST_HIGH 0x02f4
+
+/* TX IP BIST PATTERN LOW, default value: 0x00 */
+#define REG_TX_IP_BIST_PAT_LOW 0x02f5
+#define REG_TX_IP_BIST_PAT_HIGH 0x02f6
+
+/* TX IP BIST CONFIGURE LOW, default value: 0x00 */
+#define REG_TX_IP_BIST_CONF_LOW 0x02f7
+#define REG_TX_IP_BIST_CONF_HIGH 0x02f8
+
+/* E-MSC General Control, default value: 0x80 */
+#define REG_GENCTL 0x0300
+#define BIT_GENCTL_SPEC_TRANS_DIS BIT(7)
+#define BIT_GENCTL_DIS_XMIT_ERR_STATE BIT(6)
+#define BIT_GENCTL_SPI_MISO_EDGE BIT(5)
+#define BIT_GENCTL_SPI_MOSI_EDGE BIT(4)
+#define BIT_GENCTL_CLR_EMSC_RFIFO BIT(3)
+#define BIT_GENCTL_CLR_EMSC_XFIFO BIT(2)
+#define BIT_GENCTL_START_TRAIN_SEQ BIT(1)
+#define BIT_GENCTL_EMSC_EN BIT(0)
+
+/* E-MSC Comma ErrorCNT, default value: 0x03 */
+#define REG_COMMECNT 0x0305
+#define BIT_COMMECNT_I2C_TO_EMSC_EN BIT(7)
+#define MSK_COMMECNT_COMMA_CHAR_ERR_CNT 0x0f
+
+/* E-MSC RFIFO ByteCnt, default value: 0x00 */
+#define REG_EMSCRFIFOBCNTL 0x031a
+#define REG_EMSCRFIFOBCNTH 0x031b
+
+/* SPI Burst Cnt Status, default value: 0x00 */
+#define REG_SPIBURSTCNT 0x031e
+
+/* SPI Burst Status and SWRST, default value: 0x00 */
+#define REG_SPIBURSTSTAT 0x0322
+#define BIT_SPIBURSTSTAT_SPI_HDCPRST BIT(7)
+#define BIT_SPIBURSTSTAT_SPI_CBUSRST BIT(6)
+#define BIT_SPIBURSTSTAT_SPI_SRST BIT(5)
+#define BIT_SPIBURSTSTAT_EMSC_NORMAL_MODE BIT(0)
+
+/* E-MSC 1st Interrupt, default value: 0x00 */
+#define REG_EMSCINTR 0x0323
+#define BIT_EMSCINTR_EMSC_XFIFO_EMPTY BIT(7)
+#define BIT_EMSCINTR_EMSC_XMIT_ACK_TOUT BIT(6)
+#define BIT_EMSCINTR_EMSC_RFIFO_READ_ERR BIT(5)
+#define BIT_EMSCINTR_EMSC_XFIFO_WRITE_ERR BIT(4)
+#define BIT_EMSCINTR_EMSC_COMMA_CHAR_ERR BIT(3)
+#define BIT_EMSCINTR_EMSC_XMIT_DONE BIT(2)
+#define BIT_EMSCINTR_EMSC_XMIT_GNT_TOUT BIT(1)
+#define BIT_EMSCINTR_SPI_DVLD BIT(0)
+
+/* E-MSC Interrupt Mask, default value: 0x00 */
+#define REG_EMSCINTRMASK 0x0324
+
+/* I2C E-MSC XMIT FIFO Write Port, default value: 0x00 */
+#define REG_EMSC_XMIT_WRITE_PORT 0x032a
+
+/* I2C E-MSC RCV FIFO Write Port, default value: 0x00 */
+#define REG_EMSC_RCV_READ_PORT 0x032b
+
+/* E-MSC 2nd Interrupt, default value: 0x00 */
+#define REG_EMSCINTR1 0x032c
+#define BIT_EMSCINTR1_EMSC_TRAINING_COMMA_ERR BIT(0)
+
+/* E-MSC Interrupt Mask, default value: 0x00 */
+#define REG_EMSCINTRMASK1 0x032d
+#define BIT_EMSCINTRMASK1_EMSC_INTRMASK1_0 BIT(0)
+
+/* MHL Top Ctl, default value: 0x00 */
+#define REG_MHL_TOP_CTL 0x0330
+#define BIT_MHL_TOP_CTL_MHL3_DOC_SEL BIT(7)
+#define BIT_MHL_TOP_CTL_MHL_PP_SEL BIT(6)
+#define MSK_MHL_TOP_CTL_IF_TIMING_CTL 0x03
+
+/* MHL DataPath 1st Ctl, default value: 0xbc */
+#define REG_MHL_DP_CTL0 0x0331
+#define BIT_MHL_DP_CTL0_DP_OE BIT(7)
+#define BIT_MHL_DP_CTL0_TX_OE_OVR BIT(6)
+#define MSK_MHL_DP_CTL0_TX_OE 0x3f
+
+/* MHL DataPath 2nd Ctl, default value: 0xbb */
+#define REG_MHL_DP_CTL1 0x0332
+#define MSK_MHL_DP_CTL1_CK_SWING_CTL 0xf0
+#define MSK_MHL_DP_CTL1_DT_SWING_CTL 0x0f
+
+/* MHL DataPath 3rd Ctl, default value: 0x2f */
+#define REG_MHL_DP_CTL2 0x0333
+#define BIT_MHL_DP_CTL2_CLK_BYPASS_EN BIT(7)
+#define MSK_MHL_DP_CTL2_DAMP_TERM_SEL 0x30
+#define MSK_MHL_DP_CTL2_CK_TERM_SEL 0x0c
+#define MSK_MHL_DP_CTL2_DT_TERM_SEL 0x03
+
+/* MHL DataPath 4th Ctl, default value: 0x48 */
+#define REG_MHL_DP_CTL3 0x0334
+#define MSK_MHL_DP_CTL3_DT_DRV_VNBC_CTL 0xf0
+#define MSK_MHL_DP_CTL3_DT_DRV_VNB_CTL 0x0f
+
+/* MHL DataPath 5th Ctl, default value: 0x48 */
+#define REG_MHL_DP_CTL4 0x0335
+#define MSK_MHL_DP_CTL4_CK_DRV_VNBC_CTL 0xf0
+#define MSK_MHL_DP_CTL4_CK_DRV_VNB_CTL 0x0f
+
+/* MHL DataPath 6th Ctl, default value: 0x3f */
+#define REG_MHL_DP_CTL5 0x0336
+#define BIT_MHL_DP_CTL5_RSEN_EN_OVR BIT(7)
+#define BIT_MHL_DP_CTL5_RSEN_EN BIT(6)
+#define MSK_MHL_DP_CTL5_DAMP_TERM_VGS_CTL 0x30
+#define MSK_MHL_DP_CTL5_CK_TERM_VGS_CTL 0x0c
+#define MSK_MHL_DP_CTL5_DT_TERM_VGS_CTL 0x03
+
+/* MHL PLL 1st Ctl, default value: 0x05 */
+#define REG_MHL_PLL_CTL0 0x0337
+#define BIT_MHL_PLL_CTL0_AUD_CLK_EN BIT(7)
+
+#define MSK_MHL_PLL_CTL0_AUD_CLK_RATIO 0x70
+#define VAL_MHL_PLL_CTL0_AUD_CLK_RATIO_5_10 0x70
+#define VAL_MHL_PLL_CTL0_AUD_CLK_RATIO_5_6 0x60
+#define VAL_MHL_PLL_CTL0_AUD_CLK_RATIO_5_4 0x50
+#define VAL_MHL_PLL_CTL0_AUD_CLK_RATIO_5_2 0x40
+#define VAL_MHL_PLL_CTL0_AUD_CLK_RATIO_5_5 0x30
+#define VAL_MHL_PLL_CTL0_AUD_CLK_RATIO_5_3 0x20
+#define VAL_MHL_PLL_CTL0_AUD_CLK_RATIO_5_2_PRIME 0x10
+#define VAL_MHL_PLL_CTL0_AUD_CLK_RATIO_5_1 0x00
+
+#define MSK_MHL_PLL_CTL0_HDMI_CLK_RATIO 0x0c
+#define VAL_MHL_PLL_CTL0_HDMI_CLK_RATIO_4X 0x0c
+#define VAL_MHL_PLL_CTL0_HDMI_CLK_RATIO_2X 0x08
+#define VAL_MHL_PLL_CTL0_HDMI_CLK_RATIO_1X 0x04
+#define VAL_MHL_PLL_CTL0_HDMI_CLK_RATIO_HALF_X 0x00
+
+#define BIT_MHL_PLL_CTL0_CRYSTAL_CLK_SEL BIT(1)
+#define BIT_MHL_PLL_CTL0_ZONE_MASK_OE BIT(0)
+
+/* MHL PLL 3rd Ctl, default value: 0x80 */
+#define REG_MHL_PLL_CTL2 0x0339
+#define BIT_MHL_PLL_CTL2_CLKDETECT_EN BIT(7)
+#define BIT_MHL_PLL_CTL2_MEAS_FVCO BIT(3)
+#define BIT_MHL_PLL_CTL2_PLL_FAST_LOCK BIT(2)
+#define MSK_MHL_PLL_CTL2_PLL_LF_SEL 0x03
+
+/* MHL CBUS 1st Ctl, default value: 0x12 */
+#define REG_MHL_CBUS_CTL0 0x0340
+#define BIT_MHL_CBUS_CTL0_CBUS_RGND_TEST_MODE BIT(7)
+
+#define MSK_MHL_CBUS_CTL0_CBUS_RGND_VTH_CTL 0x30
+#define VAL_MHL_CBUS_CTL0_CBUS_RGND_VBIAS_734 0x00
+#define VAL_MHL_CBUS_CTL0_CBUS_RGND_VBIAS_747 0x10
+#define VAL_MHL_CBUS_CTL0_CBUS_RGND_VBIAS_740 0x20
+#define VAL_MHL_CBUS_CTL0_CBUS_RGND_VBIAS_754 0x30
+
+#define MSK_MHL_CBUS_CTL0_CBUS_RES_TEST_SEL 0x0c
+
+#define MSK_MHL_CBUS_CTL0_CBUS_DRV_SEL 0x03
+#define VAL_MHL_CBUS_CTL0_CBUS_DRV_SEL_WEAKEST 0x00
+#define VAL_MHL_CBUS_CTL0_CBUS_DRV_SEL_WEAK 0x01
+#define VAL_MHL_CBUS_CTL0_CBUS_DRV_SEL_STRONG 0x02
+#define VAL_MHL_CBUS_CTL0_CBUS_DRV_SEL_STRONGEST 0x03
+
+/* MHL CBUS 2nd Ctl, default value: 0x03 */
+#define REG_MHL_CBUS_CTL1 0x0341
+#define MSK_MHL_CBUS_CTL1_CBUS_RGND_RES_CTL 0x07
+#define VAL_MHL_CBUS_CTL1_0888_OHM 0x00
+#define VAL_MHL_CBUS_CTL1_1115_OHM 0x04
+#define VAL_MHL_CBUS_CTL1_1378_OHM 0x07
+
+/* MHL CoC 1st Ctl, default value: 0xc3 */
+#define REG_MHL_COC_CTL0 0x0342
+#define BIT_MHL_COC_CTL0_COC_BIAS_EN BIT(7)
+#define MSK_MHL_COC_CTL0_COC_BIAS_CTL 0x70
+#define MSK_MHL_COC_CTL0_COC_TERM_CTL 0x07
+
+/* MHL CoC 2nd Ctl, default value: 0x87 */
+#define REG_MHL_COC_CTL1 0x0343
+#define BIT_MHL_COC_CTL1_COC_EN BIT(7)
+#define MSK_MHL_COC_CTL1_COC_DRV_CTL 0x3f
+
+/* MHL CoC 4th Ctl, default value: 0x00 */
+#define REG_MHL_COC_CTL3 0x0345
+#define BIT_MHL_COC_CTL3_COC_AECHO_EN BIT(0)
+
+/* MHL CoC 5th Ctl, default value: 0x28 */
+#define REG_MHL_COC_CTL4 0x0346
+#define MSK_MHL_COC_CTL4_COC_IF_CTL 0xf0
+#define MSK_MHL_COC_CTL4_COC_SLEW_CTL 0x0f
+
+/* MHL CoC 6th Ctl, default value: 0x0d */
+#define REG_MHL_COC_CTL5 0x0347
+
+/* MHL DoC 1st Ctl, default value: 0x18 */
+#define REG_MHL_DOC_CTL0 0x0349
+#define BIT_MHL_DOC_CTL0_DOC_RXDATA_EN BIT(7)
+#define MSK_MHL_DOC_CTL0_DOC_DM_TERM 0x38
+#define MSK_MHL_DOC_CTL0_DOC_OPMODE 0x06
+#define BIT_MHL_DOC_CTL0_DOC_RXBIAS_EN BIT(0)
+
+/* MHL DataPath 7th Ctl, default value: 0x2a */
+#define REG_MHL_DP_CTL6 0x0350
+#define BIT_MHL_DP_CTL6_DP_TAP2_SGN BIT(5)
+#define BIT_MHL_DP_CTL6_DP_TAP2_EN BIT(4)
+#define BIT_MHL_DP_CTL6_DP_TAP1_SGN BIT(3)
+#define BIT_MHL_DP_CTL6_DP_TAP1_EN BIT(2)
+#define BIT_MHL_DP_CTL6_DT_PREDRV_FEEDCAP_EN BIT(1)
+#define BIT_MHL_DP_CTL6_DP_PRE_POST_SEL BIT(0)
+
+/* MHL DataPath 8th Ctl, default value: 0x06 */
+#define REG_MHL_DP_CTL7 0x0351
+#define MSK_MHL_DP_CTL7_DT_DRV_VBIAS_CASCTL 0xf0
+#define MSK_MHL_DP_CTL7_DT_DRV_IREF_CTL 0x0f
+
+/* Tx Zone Ctl1, default value: 0x00 */
+#define REG_TX_ZONE_CTL1 0x0361
+#define VAL_TX_ZONE_CTL1_TX_ZONE_CTRL_MODE 0x08
+
+/* MHL3 Tx Zone Ctl, default value: 0x00 */
+#define REG_MHL3_TX_ZONE_CTL 0x0364
+#define BIT_MHL3_TX_ZONE_CTL_MHL2_INTPLT_ZONE_MANU_EN BIT(7)
+#define MSK_MHL3_TX_ZONE_CTL_MHL3_TX_ZONE 0x03
+
+#define MSK_TX_ZONE_CTL3_TX_ZONE 0x03
+#define VAL_TX_ZONE_CTL3_TX_ZONE_6GBPS 0x00
+#define VAL_TX_ZONE_CTL3_TX_ZONE_3GBPS 0x01
+#define VAL_TX_ZONE_CTL3_TX_ZONE_1_5GBPS 0x02
+
+/* HDCP Polling Control and Status, default value: 0x70 */
+#define REG_HDCP2X_POLL_CS 0x0391
+
+#define BIT_HDCP2X_POLL_CS_HDCP2X_MSG_SZ_CLR_OPTION BIT(6)
+#define BIT_HDCP2X_POLL_CS_HDCP2X_RPT_READY_CLR_OPTION BIT(5)
+#define BIT_HDCP2X_POLL_CS_HDCP2X_REAUTH_REQ_CLR_OPTION BIT(4)
+#define MSK_HDCP2X_POLL_CS_ 0x0c
+#define BIT_HDCP2X_POLL_CS_HDCP2X_DIS_POLL_GNT BIT(1)
+#define BIT_HDCP2X_POLL_CS_HDCP2X_DIS_POLL_EN BIT(0)
+
+/* HDCP Interrupt 0, default value: 0x00 */
+#define REG_HDCP2X_INTR0 0x0398
+
+/* HDCP Interrupt 0 Mask, default value: 0x00 */
+#define REG_HDCP2X_INTR0_MASK 0x0399
+
+/* HDCP General Control 0, default value: 0x02 */
+#define REG_HDCP2X_CTRL_0 0x03a0
+#define BIT_HDCP2X_CTRL_0_HDCP2X_ENCRYPT_EN BIT(7)
+#define BIT_HDCP2X_CTRL_0_HDCP2X_POLINT_SEL BIT(6)
+#define BIT_HDCP2X_CTRL_0_HDCP2X_POLINT_OVR BIT(5)
+#define BIT_HDCP2X_CTRL_0_HDCP2X_PRECOMPUTE BIT(4)
+#define BIT_HDCP2X_CTRL_0_HDCP2X_HDMIMODE BIT(3)
+#define BIT_HDCP2X_CTRL_0_HDCP2X_REPEATER BIT(2)
+#define BIT_HDCP2X_CTRL_0_HDCP2X_HDCPTX BIT(1)
+#define BIT_HDCP2X_CTRL_0_HDCP2X_EN BIT(0)
+
+/* HDCP General Control 1, default value: 0x08 */
+#define REG_HDCP2X_CTRL_1 0x03a1
+#define MSK_HDCP2X_CTRL_1_HDCP2X_REAUTH_MSK_3_0 0xf0
+#define BIT_HDCP2X_CTRL_1_HDCP2X_HPD_SW BIT(3)
+#define BIT_HDCP2X_CTRL_1_HDCP2X_HPD_OVR BIT(2)
+#define BIT_HDCP2X_CTRL_1_HDCP2X_CTL3MSK BIT(1)
+#define BIT_HDCP2X_CTRL_1_HDCP2X_REAUTH_SW BIT(0)
+
+/* HDCP Misc Control, default value: 0x00 */
+#define REG_HDCP2X_MISC_CTRL 0x03a5
+#define BIT_HDCP2X_MISC_CTRL_HDCP2X_RPT_SMNG_XFER_START BIT(4)
+#define BIT_HDCP2X_MISC_CTRL_HDCP2X_RPT_SMNG_WR_START BIT(3)
+#define BIT_HDCP2X_MISC_CTRL_HDCP2X_RPT_SMNG_WR BIT(2)
+#define BIT_HDCP2X_MISC_CTRL_HDCP2X_RPT_RCVID_RD_START BIT(1)
+#define BIT_HDCP2X_MISC_CTRL_HDCP2X_RPT_RCVID_RD BIT(0)
+
+/* HDCP RPT SMNG K, default value: 0x00 */
+#define REG_HDCP2X_RPT_SMNG_K 0x03a6
+
+/* HDCP RPT SMNG In, default value: 0x00 */
+#define REG_HDCP2X_RPT_SMNG_IN 0x03a7
+
+/* HDCP Auth Status, default value: 0x00 */
+#define REG_HDCP2X_AUTH_STAT 0x03aa
+
+/* HDCP RPT RCVID Out, default value: 0x00 */
+#define REG_HDCP2X_RPT_RCVID_OUT 0x03ac
+
+/* HDCP TP1, default value: 0x62 */
+#define REG_HDCP2X_TP1 0x03b4
+
+/* HDCP GP Out 0, default value: 0x00 */
+#define REG_HDCP2X_GP_OUT0 0x03c7
+
+/* HDCP Repeater RCVR ID 0, default value: 0x00 */
+#define REG_HDCP2X_RPT_RCVR_ID0 0x03d1
+
+/* HDCP DDCM Status, default value: 0x00 */
+#define REG_HDCP2X_DDCM_STS 0x03d8
+#define MSK_HDCP2X_DDCM_STS_HDCP2X_DDCM_ERR_STS_3_0 0xf0
+#define MSK_HDCP2X_DDCM_STS_HDCP2X_DDCM_CTL_CS_3_0 0x0f
+
+/* HDMI2MHL3 Control, default value: 0x0a */
+#define REG_M3_CTRL 0x03e0
+#define BIT_M3_CTRL_H2M_SWRST BIT(4)
+#define BIT_M3_CTRL_SW_MHL3_SEL BIT(3)
+#define BIT_M3_CTRL_M3AV_EN BIT(2)
+#define BIT_M3_CTRL_ENC_TMDS BIT(1)
+#define BIT_M3_CTRL_MHL3_MASTER_EN BIT(0)
+
+#define VAL_M3_CTRL_MHL1_2_VALUE (BIT_M3_CTRL_SW_MHL3_SEL \
+ | BIT_M3_CTRL_ENC_TMDS)
+#define VAL_M3_CTRL_MHL3_VALUE (BIT_M3_CTRL_SW_MHL3_SEL \
+ | BIT_M3_CTRL_M3AV_EN \
+ | BIT_M3_CTRL_ENC_TMDS \
+ | BIT_M3_CTRL_MHL3_MASTER_EN)
+
+/* HDMI2MHL3 Port0 Control, default value: 0x04 */
+#define REG_M3_P0CTRL 0x03e1
+#define BIT_M3_P0CTRL_MHL3_P0_HDCP_ENC_EN BIT(4)
+#define BIT_M3_P0CTRL_MHL3_P0_UNLIMIT_EN BIT(3)
+#define BIT_M3_P0CTRL_MHL3_P0_HDCP_EN BIT(2)
+#define BIT_M3_P0CTRL_MHL3_P0_PIXEL_MODE_PACKED BIT(1)
+#define BIT_M3_P0CTRL_MHL3_P0_PORT_EN BIT(0)
+
+#define REG_M3_POSTM 0x03e2
+#define MSK_M3_POSTM_RRP_DECODE 0xf8
+#define MSK_M3_POSTM_MHL3_P0_STM_ID 0x07
+
+/* HDMI2MHL3 Scramble Control, default value: 0x41 */
+#define REG_M3_SCTRL 0x03e6
+#define MSK_M3_SCTRL_MHL3_SR_LENGTH 0xf0
+#define BIT_M3_SCTRL_MHL3_SCRAMBLER_EN BIT(0)
+
+/* HSIC Div Ctl, default value: 0x05 */
+#define REG_DIV_CTL_MAIN 0x03f2
+#define MSK_DIV_CTL_MAIN_PRE_DIV_CTL_MAIN 0x1c
+#define MSK_DIV_CTL_MAIN_FB_DIV_CTL_MAIN 0x03
+
+/* MHL Capability 1st Byte, default value: 0x00 */
+#define REG_MHL_DEVCAP_0 0x0400
+
+/* MHL Interrupt 1st Byte, default value: 0x00 */
+#define REG_MHL_INT_0 0x0420
+
+/* Device Status 1st byte, default value: 0x00 */
+#define REG_MHL_STAT_0 0x0430
+
+/* CBUS Scratch Pad 1st Byte, default value: 0x00 */
+#define REG_MHL_SCRPAD_0 0x0440
+
+/* MHL Extended Capability 1st Byte, default value: 0x00 */
+#define REG_MHL_EXTDEVCAP_0 0x0480
+
+/* Device Extended Status 1st byte, default value: 0x00 */
+#define REG_MHL_EXTSTAT_0 0x0490
+
+/* TPI DTD Byte2, default value: 0x00 */
+#define REG_TPI_DTD_B2 0x0602
+
+#define VAL_TPI_QUAN_RANGE_LIMITED 0x01
+#define VAL_TPI_QUAN_RANGE_FULL 0x02
+#define VAL_TPI_FORMAT_RGB 0x00
+#define VAL_TPI_FORMAT_YCBCR444 0x01
+#define VAL_TPI_FORMAT_YCBCR422 0x02
+#define VAL_TPI_FORMAT_INTERNAL_RGB 0x03
+#define VAL_TPI_FORMAT(_fmt, _qr) \
+ (VAL_TPI_FORMAT_##_fmt | (VAL_TPI_QUAN_RANGE_##_qr << 2))
+
+/* Input Format, default value: 0x00 */
+#define REG_TPI_INPUT 0x0609
+#define BIT_TPI_INPUT_EXTENDEDBITMODE BIT(7)
+#define BIT_TPI_INPUT_ENDITHER BIT(6)
+#define MSK_TPI_INPUT_INPUT_QUAN_RANGE 0x0c
+#define MSK_TPI_INPUT_INPUT_FORMAT 0x03
+
+/* Output Format, default value: 0x00 */
+#define REG_TPI_OUTPUT 0x060a
+#define BIT_TPI_OUTPUT_CSCMODE709 BIT(4)
+#define MSK_TPI_OUTPUT_OUTPUT_QUAN_RANGE 0x0c
+#define MSK_TPI_OUTPUT_OUTPUT_FORMAT 0x03
+
+/* TPI AVI Check Sum, default value: 0x00 */
+#define REG_TPI_AVI_CHSUM 0x060c
+
+/* TPI System Control, default value: 0x00 */
+#define REG_TPI_SC 0x061a
+#define BIT_TPI_SC_TPI_UPDATE_FLG BIT(7)
+#define BIT_TPI_SC_TPI_REAUTH_CTL BIT(6)
+#define BIT_TPI_SC_TPI_OUTPUT_MODE_1 BIT(5)
+#define BIT_TPI_SC_REG_TMDS_OE_POWER_DOWN BIT(4)
+#define BIT_TPI_SC_TPI_AV_MUTE BIT(3)
+#define BIT_TPI_SC_DDC_GPU_REQUEST BIT(2)
+#define BIT_TPI_SC_DDC_TPI_SW BIT(1)
+#define BIT_TPI_SC_TPI_OUTPUT_MODE_0_HDMI BIT(0)
+
+/* TPI COPP Query Data, default value: 0x00 */
+#define REG_TPI_COPP_DATA1 0x0629
+#define BIT_TPI_COPP_DATA1_COPP_GPROT BIT(7)
+#define BIT_TPI_COPP_DATA1_COPP_LPROT BIT(6)
+#define MSK_TPI_COPP_DATA1_COPP_LINK_STATUS 0x30
+#define VAL_TPI_COPP_LINK_STATUS_NORMAL 0x00
+#define VAL_TPI_COPP_LINK_STATUS_LINK_LOST 0x10
+#define VAL_TPI_COPP_LINK_STATUS_RENEGOTIATION_REQ 0x20
+#define VAL_TPI_COPP_LINK_STATUS_LINK_SUSPENDED 0x30
+#define BIT_TPI_COPP_DATA1_COPP_HDCP_REP BIT(3)
+#define BIT_TPI_COPP_DATA1_COPP_CONNTYPE_0 BIT(2)
+#define BIT_TPI_COPP_DATA1_COPP_PROTYPE BIT(1)
+#define BIT_TPI_COPP_DATA1_COPP_CONNTYPE_1 BIT(0)
+
+/* TPI COPP Control Data, default value: 0x00 */
+#define REG_TPI_COPP_DATA2 0x062a
+#define BIT_TPI_COPP_DATA2_INTR_ENCRYPTION BIT(5)
+#define BIT_TPI_COPP_DATA2_KSV_FORWARD BIT(4)
+#define BIT_TPI_COPP_DATA2_INTERM_RI_CHECK_EN BIT(3)
+#define BIT_TPI_COPP_DATA2_DOUBLE_RI_CHECK BIT(2)
+#define BIT_TPI_COPP_DATA2_DDC_SHORT_RI_RD BIT(1)
+#define BIT_TPI_COPP_DATA2_COPP_PROTLEVEL BIT(0)
+
+/* TPI Interrupt Enable, default value: 0x00 */
+#define REG_TPI_INTR_EN 0x063c
+
+/* TPI Interrupt Status Low Byte, default value: 0x00 */
+#define REG_TPI_INTR_ST0 0x063d
+#define BIT_TPI_INTR_ST0_TPI_AUTH_CHNGE_STAT BIT(7)
+#define BIT_TPI_INTR_ST0_TPI_V_RDY_STAT BIT(6)
+#define BIT_TPI_INTR_ST0_TPI_COPP_CHNGE_STAT BIT(5)
+#define BIT_TPI_INTR_ST0_KSV_FIFO_FIRST_STAT BIT(3)
+#define BIT_TPI_INTR_ST0_READ_BKSV_BCAPS_DONE_STAT BIT(2)
+#define BIT_TPI_INTR_ST0_READ_BKSV_BCAPS_ERR_STAT BIT(1)
+#define BIT_TPI_INTR_ST0_READ_BKSV_ERR_STAT BIT(0)
+
+/* TPI DS BCAPS Status, default value: 0x00 */
+#define REG_TPI_DS_BCAPS 0x0644
+
+/* TPI BStatus1, default value: 0x00 */
+#define REG_TPI_BSTATUS1 0x0645
+#define BIT_TPI_BSTATUS1_DS_DEV_EXCEED BIT(7)
+#define MSK_TPI_BSTATUS1_DS_DEV_CNT 0x7f
+
+/* TPI BStatus2, default value: 0x10 */
+#define REG_TPI_BSTATUS2 0x0646
+#define MSK_TPI_BSTATUS2_DS_BSTATUS 0xe0
+#define BIT_TPI_BSTATUS2_DS_HDMI_MODE BIT(4)
+#define BIT_TPI_BSTATUS2_DS_CASC_EXCEED BIT(3)
+#define MSK_TPI_BSTATUS2_DS_DEPTH 0x07
+
+/* TPI HW Optimization Control #3, default value: 0x00 */
+#define REG_TPI_HW_OPT3 0x06bb
+#define BIT_TPI_HW_OPT3_DDC_DEBUG BIT(7)
+#define BIT_TPI_HW_OPT3_RI_CHECK_SKIP BIT(3)
+#define BIT_TPI_HW_OPT3_TPI_DDC_BURST_MODE BIT(2)
+#define MSK_TPI_HW_OPT3_TPI_DDC_REQ_LEVEL 0x03
+
+/* TPI Info Frame Select, default value: 0x00 */
+#define REG_TPI_INFO_FSEL 0x06bf
+#define BIT_TPI_INFO_FSEL_TPI_INFO_EN BIT(7)
+#define BIT_TPI_INFO_FSEL_TPI_INFO_RPT BIT(6)
+#define BIT_TPI_INFO_FSEL_TPI_INFO_READ_FLAG BIT(5)
+#define MSK_TPI_INFO_FSEL_TPI_INFO_SEL 0x07
+
+/* TPI Info Byte #0, default value: 0x00 */
+#define REG_TPI_INFO_B0 0x06c0
+
+/* CoC Status, default value: 0x00 */
+#define REG_COC_STAT_0 0x0700
+#define REG_COC_STAT_1 0x0701
+#define REG_COC_STAT_2 0x0702
+#define REG_COC_STAT_3 0x0703
+#define REG_COC_STAT_4 0x0704
+#define REG_COC_STAT_5 0x0705
+
+/* CoC 1st Ctl, default value: 0x40 */
+#define REG_COC_CTL0 0x0710
+
+/* CoC 2nd Ctl, default value: 0x0a */
+#define REG_COC_CTL1 0x0711
+#define MSK_COC_CTL1_COC_CTRL1_7_6 0xc0
+#define MSK_COC_CTL1_COC_CTRL1_5_0 0x3f
+
+/* CoC 3rd Ctl, default value: 0x14 */
+#define REG_COC_CTL2 0x0712
+#define MSK_COC_CTL2_COC_CTRL2_7_6 0xc0
+#define MSK_COC_CTL2_COC_CTRL2_5_0 0x3f
+
+/* CoC 4th Ctl, default value: 0x40 */
+#define REG_COC_CTL3 0x0713
+#define BIT_COC_CTL3_COC_CTRL3_7 BIT(7)
+#define MSK_COC_CTL3_COC_CTRL3_6_0 0x7f
+
+/* CoC 7th Ctl, default value: 0x00 */
+#define REG_COC_CTL6 0x0716
+#define BIT_COC_CTL6_COC_CTRL6_7 BIT(7)
+#define BIT_COC_CTL6_COC_CTRL6_6 BIT(6)
+#define MSK_COC_CTL6_COC_CTRL6_5_0 0x3f
+
+/* CoC 8th Ctl, default value: 0x06 */
+#define REG_COC_CTL7 0x0717
+#define BIT_COC_CTL7_COC_CTRL7_7 BIT(7)
+#define BIT_COC_CTL7_COC_CTRL7_6 BIT(6)
+#define BIT_COC_CTL7_COC_CTRL7_5 BIT(5)
+#define MSK_COC_CTL7_COC_CTRL7_4_3 0x18
+#define MSK_COC_CTL7_COC_CTRL7_2_0 0x07
+
+/* CoC 10th Ctl, default value: 0x00 */
+#define REG_COC_CTL9 0x0719
+
+/* CoC 11th Ctl, default value: 0x00 */
+#define REG_COC_CTLA 0x071a
+
+/* CoC 12th Ctl, default value: 0x00 */
+#define REG_COC_CTLB 0x071b
+
+/* CoC 13th Ctl, default value: 0x0f */
+#define REG_COC_CTLC 0x071c
+
+/* CoC 14th Ctl, default value: 0x0a */
+#define REG_COC_CTLD 0x071d
+#define BIT_COC_CTLD_COC_CTRLD_7 BIT(7)
+#define MSK_COC_CTLD_COC_CTRLD_6_0 0x7f
+
+/* CoC 15th Ctl, default value: 0x0a */
+#define REG_COC_CTLE 0x071e
+#define BIT_COC_CTLE_COC_CTRLE_7 BIT(7)
+#define MSK_COC_CTLE_COC_CTRLE_6_0 0x7f
+
+/* CoC 16th Ctl, default value: 0x00 */
+#define REG_COC_CTLF 0x071f
+#define MSK_COC_CTLF_COC_CTRLF_7_3 0xf8
+#define MSK_COC_CTLF_COC_CTRLF_2_0 0x07
+
+/* CoC 18th Ctl, default value: 0x32 */
+#define REG_COC_CTL11 0x0721
+#define MSK_COC_CTL11_COC_CTRL11_7_4 0xf0
+#define MSK_COC_CTL11_COC_CTRL11_3_0 0x0f
+
+/* CoC 21st Ctl, default value: 0x00 */
+#define REG_COC_CTL14 0x0724
+#define MSK_COC_CTL14_COC_CTRL14_7_4 0xf0
+#define MSK_COC_CTL14_COC_CTRL14_3_0 0x0f
+
+/* CoC 22nd Ctl, default value: 0x00 */
+#define REG_COC_CTL15 0x0725
+#define BIT_COC_CTL15_COC_CTRL15_7 BIT(7)
+#define MSK_COC_CTL15_COC_CTRL15_6_4 0x70
+#define MSK_COC_CTL15_COC_CTRL15_3_0 0x0f
+
+/* CoC Interrupt, default value: 0x00 */
+#define REG_COC_INTR 0x0726
+
+/* CoC Interrupt Mask, default value: 0x00 */
+#define REG_COC_INTR_MASK 0x0727
+#define BIT_COC_PLL_LOCK_STATUS_CHANGE BIT(0)
+#define BIT_COC_CALIBRATION_DONE BIT(1)
+
+/* CoC Misc Ctl, default value: 0x00 */
+#define REG_COC_MISC_CTL0 0x0728
+#define BIT_COC_MISC_CTL0_FSM_MON BIT(7)
+
+/* CoC 24th Ctl, default value: 0x00 */
+#define REG_COC_CTL17 0x072a
+#define MSK_COC_CTL17_COC_CTRL17_7_4 0xf0
+#define MSK_COC_CTL17_COC_CTRL17_3_0 0x0f
+
+/* CoC 25th Ctl, default value: 0x00 */
+#define REG_COC_CTL18 0x072b
+#define MSK_COC_CTL18_COC_CTRL18_7_4 0xf0
+#define MSK_COC_CTL18_COC_CTRL18_3_0 0x0f
+
+/* CoC 26th Ctl, default value: 0x00 */
+#define REG_COC_CTL19 0x072c
+#define MSK_COC_CTL19_COC_CTRL19_7_4 0xf0
+#define MSK_COC_CTL19_COC_CTRL19_3_0 0x0f
+
+/* CoC 27th Ctl, default value: 0x00 */
+#define REG_COC_CTL1A 0x072d
+#define MSK_COC_CTL1A_COC_CTRL1A_7_2 0xfc
+#define MSK_COC_CTL1A_COC_CTRL1A_1_0 0x03
+
+/* DoC 9th Status, default value: 0x00 */
+#define REG_DOC_STAT_8 0x0740
+
+/* DoC 10th Status, default value: 0x00 */
+#define REG_DOC_STAT_9 0x0741
+
+/* DoC 5th CFG, default value: 0x00 */
+#define REG_DOC_CFG4 0x074e
+#define MSK_DOC_CFG4_DBG_STATE_DOC_FSM 0x0f
+
+/* DoC 1st Ctl, default value: 0x40 */
+#define REG_DOC_CTL0 0x0751
+
+/* DoC 7th Ctl, default value: 0x00 */
+#define REG_DOC_CTL6 0x0757
+#define BIT_DOC_CTL6_DOC_CTRL6_7 BIT(7)
+#define BIT_DOC_CTL6_DOC_CTRL6_6 BIT(6)
+#define MSK_DOC_CTL6_DOC_CTRL6_5_4 0x30
+#define MSK_DOC_CTL6_DOC_CTRL6_3_0 0x0f
+
+/* DoC 8th Ctl, default value: 0x00 */
+#define REG_DOC_CTL7 0x0758
+#define BIT_DOC_CTL7_DOC_CTRL7_7 BIT(7)
+#define BIT_DOC_CTL7_DOC_CTRL7_6 BIT(6)
+#define BIT_DOC_CTL7_DOC_CTRL7_5 BIT(5)
+#define MSK_DOC_CTL7_DOC_CTRL7_4_3 0x18
+#define MSK_DOC_CTL7_DOC_CTRL7_2_0 0x07
+
+/* DoC 9th Ctl, default value: 0x00 */
+#define REG_DOC_CTL8 0x076c
+#define BIT_DOC_CTL8_DOC_CTRL8_7 BIT(7)
+#define MSK_DOC_CTL8_DOC_CTRL8_6_4 0x70
+#define MSK_DOC_CTL8_DOC_CTRL8_3_2 0x0c
+#define MSK_DOC_CTL8_DOC_CTRL8_1_0 0x03
+
+/* DoC 10th Ctl, default value: 0x00 */
+#define REG_DOC_CTL9 0x076d
+
+/* DoC 11th Ctl, default value: 0x00 */
+#define REG_DOC_CTLA 0x076e
+
+/* DoC 15th Ctl, default value: 0x00 */
+#define REG_DOC_CTLE 0x0772
+#define BIT_DOC_CTLE_DOC_CTRLE_7 BIT(7)
+#define BIT_DOC_CTLE_DOC_CTRLE_6 BIT(6)
+#define MSK_DOC_CTLE_DOC_CTRLE_5_4 0x30
+#define MSK_DOC_CTLE_DOC_CTRLE_3_0 0x0f
+
+/* Interrupt Mask 1st, default value: 0x00 */
+#define REG_MHL_INT_0_MASK 0x0580
+
+/* Interrupt Mask 2nd, default value: 0x00 */
+#define REG_MHL_INT_1_MASK 0x0581
+
+/* Interrupt Mask 3rd, default value: 0x00 */
+#define REG_MHL_INT_2_MASK 0x0582
+
+/* Interrupt Mask 4th, default value: 0x00 */
+#define REG_MHL_INT_3_MASK 0x0583
+
+/* MDT Receive Time Out, default value: 0x00 */
+#define REG_MDT_RCV_TIMEOUT 0x0584
+
+/* MDT Transmit Time Out, default value: 0x00 */
+#define REG_MDT_XMIT_TIMEOUT 0x0585
+
+/* MDT Receive Control, default value: 0x00 */
+#define REG_MDT_RCV_CTRL 0x0586
+#define BIT_MDT_RCV_CTRL_MDT_RCV_EN BIT(7)
+#define BIT_MDT_RCV_CTRL_MDT_DELAY_RCV_EN BIT(6)
+#define BIT_MDT_RCV_CTRL_MDT_RFIFO_OVER_WR_EN BIT(4)
+#define BIT_MDT_RCV_CTRL_MDT_XFIFO_OVER_WR_EN BIT(3)
+#define BIT_MDT_RCV_CTRL_MDT_DISABLE BIT(2)
+#define BIT_MDT_RCV_CTRL_MDT_RFIFO_CLR_ALL BIT(1)
+#define BIT_MDT_RCV_CTRL_MDT_RFIFO_CLR_CUR BIT(0)
+
+/* MDT Receive Read Port, default value: 0x00 */
+#define REG_MDT_RCV_READ_PORT 0x0587
+
+/* MDT Transmit Control, default value: 0x70 */
+#define REG_MDT_XMIT_CTRL 0x0588
+#define BIT_MDT_XMIT_CTRL_MDT_XMIT_EN BIT(7)
+#define BIT_MDT_XMIT_CTRL_MDT_XMIT_CMD_MERGE_EN BIT(6)
+#define BIT_MDT_XMIT_CTRL_MDT_XMIT_FIXED_BURST_LEN BIT(5)
+#define BIT_MDT_XMIT_CTRL_MDT_XMIT_FIXED_AID BIT(4)
+#define BIT_MDT_XMIT_CTRL_MDT_XMIT_SINGLE_RUN_EN BIT(3)
+#define BIT_MDT_XMIT_CTRL_MDT_CLR_ABORT_WAIT BIT(2)
+#define BIT_MDT_XMIT_CTRL_MDT_XFIFO_CLR_ALL BIT(1)
+#define BIT_MDT_XMIT_CTRL_MDT_XFIFO_CLR_CUR BIT(0)
+
+/* MDT Receive WRITE Port, default value: 0x00 */
+#define REG_MDT_XMIT_WRITE_PORT 0x0589
+
+/* MDT RFIFO Status, default value: 0x00 */
+#define REG_MDT_RFIFO_STAT 0x058a
+#define MSK_MDT_RFIFO_STAT_MDT_RFIFO_CNT 0xe0
+#define MSK_MDT_RFIFO_STAT_MDT_RFIFO_CUR_BYTE_CNT 0x1f
+
+/* MDT XFIFO Status, default value: 0x80 */
+#define REG_MDT_XFIFO_STAT 0x058b
+#define MSK_MDT_XFIFO_STAT_MDT_XFIFO_LEVEL_AVAIL 0xe0
+#define BIT_MDT_XFIFO_STAT_MDT_XMIT_PRE_HS_EN BIT(4)
+#define MSK_MDT_XFIFO_STAT_MDT_WRITE_BURST_LEN 0x0f
+
+/* MDT Interrupt 0, default value: 0x0c */
+#define REG_MDT_INT_0 0x058c
+#define BIT_MDT_RFIFO_DATA_RDY BIT(0)
+#define BIT_MDT_IDLE_AFTER_HAWB_DISABLE BIT(2)
+#define BIT_MDT_XFIFO_EMPTY BIT(3)
+
+/* MDT Interrupt 0 Mask, default value: 0x00 */
+#define REG_MDT_INT_0_MASK 0x058d
+
+/* MDT Interrupt 1, default value: 0x00 */
+#define REG_MDT_INT_1 0x058e
+#define BIT_MDT_RCV_TIMEOUT BIT(0)
+#define BIT_MDT_RCV_SM_ABORT_PKT_RCVD BIT(1)
+#define BIT_MDT_RCV_SM_ERROR BIT(2)
+#define BIT_MDT_XMIT_TIMEOUT BIT(5)
+#define BIT_MDT_XMIT_SM_ABORT_PKT_RCVD BIT(6)
+#define BIT_MDT_XMIT_SM_ERROR BIT(7)
+
+/* MDT Interrupt 1 Mask, default value: 0x00 */
+#define REG_MDT_INT_1_MASK 0x058f
+
+/* CBUS Vendor ID, default value: 0x01 */
+#define REG_CBUS_VENDOR_ID 0x0590
+
+/* CBUS Connection Status, default value: 0x00 */
+#define REG_CBUS_STATUS 0x0591
+#define BIT_CBUS_STATUS_MHL_CABLE_PRESENT BIT(4)
+#define BIT_CBUS_STATUS_MSC_HB_SUCCESS BIT(3)
+#define BIT_CBUS_STATUS_CBUS_HPD BIT(2)
+#define BIT_CBUS_STATUS_MHL_MODE BIT(1)
+#define BIT_CBUS_STATUS_CBUS_CONNECTED BIT(0)
+
+/* CBUS Interrupt 1st, default value: 0x00 */
+#define REG_CBUS_INT_0 0x0592
+#define BIT_CBUS_MSC_MT_DONE_NACK BIT(7)
+#define BIT_CBUS_MSC_MR_SET_INT BIT(6)
+#define BIT_CBUS_MSC_MR_WRITE_BURST BIT(5)
+#define BIT_CBUS_MSC_MR_MSC_MSG BIT(4)
+#define BIT_CBUS_MSC_MR_WRITE_STAT BIT(3)
+#define BIT_CBUS_HPD_CHG BIT(2)
+#define BIT_CBUS_MSC_MT_DONE BIT(1)
+#define BIT_CBUS_CNX_CHG BIT(0)
+
+/* CBUS Interrupt Mask 1st, default value: 0x00 */
+#define REG_CBUS_INT_0_MASK 0x0593
+
+/* CBUS Interrupt 2nd, default value: 0x00 */
+#define REG_CBUS_INT_1 0x0594
+#define BIT_CBUS_CMD_ABORT BIT(6)
+#define BIT_CBUS_MSC_ABORT_RCVD BIT(3)
+#define BIT_CBUS_DDC_ABORT BIT(2)
+#define BIT_CBUS_CEC_ABORT BIT(1)
+
+/* CBUS Interrupt Mask 2nd, default value: 0x00 */
+#define REG_CBUS_INT_1_MASK 0x0595
+
+/* CBUS DDC Abort Interrupt, default value: 0x00 */
+#define REG_DDC_ABORT_INT 0x0598
+
+/* CBUS DDC Abort Interrupt Mask, default value: 0x00 */
+#define REG_DDC_ABORT_INT_MASK 0x0599
+
+/* CBUS MSC Requester Abort Interrupt, default value: 0x00 */
+#define REG_MSC_MT_ABORT_INT 0x059a
+
+/* CBUS MSC Requester Abort Interrupt Mask, default value: 0x00 */
+#define REG_MSC_MT_ABORT_INT_MASK 0x059b
+
+/* CBUS MSC Responder Abort Interrupt, default value: 0x00 */
+#define REG_MSC_MR_ABORT_INT 0x059c
+
+/* CBUS MSC Responder Abort Interrupt Mask, default value: 0x00 */
+#define REG_MSC_MR_ABORT_INT_MASK 0x059d
+
+/* CBUS RX DISCOVERY interrupt, default value: 0x00 */
+#define REG_CBUS_RX_DISC_INT0 0x059e
+
+/* CBUS RX DISCOVERY Interrupt Mask, default value: 0x00 */
+#define REG_CBUS_RX_DISC_INT0_MASK 0x059f
+
+/* CBUS_Link_Layer Control #8, default value: 0x00 */
+#define REG_CBUS_LINK_CTRL_8 0x05a7
+
+/* MDT State Machine Status, default value: 0x00 */
+#define REG_MDT_SM_STAT 0x05b5
+#define MSK_MDT_SM_STAT_MDT_RCV_STATE 0xf0
+#define MSK_MDT_SM_STAT_MDT_XMIT_STATE 0x0f
+
+/* CBUS MSC command trigger, default value: 0x00 */
+#define REG_MSC_COMMAND_START 0x05b8
+#define BIT_MSC_COMMAND_START_DEBUG BIT(5)
+#define BIT_MSC_COMMAND_START_WRITE_BURST BIT(4)
+#define BIT_MSC_COMMAND_START_WRITE_STAT BIT(3)
+#define BIT_MSC_COMMAND_START_READ_DEVCAP BIT(2)
+#define BIT_MSC_COMMAND_START_MSC_MSG BIT(1)
+#define BIT_MSC_COMMAND_START_PEER BIT(0)
+
+/* CBUS MSC Command/Offset, default value: 0x00 */
+#define REG_MSC_CMD_OR_OFFSET 0x05b9
+
+/* CBUS MSC Transmit Data */
+#define REG_MSC_1ST_TRANSMIT_DATA 0x05ba
+#define REG_MSC_2ND_TRANSMIT_DATA 0x05bb
+
+/* CBUS MSC Requester Received Data */
+#define REG_MSC_MT_RCVD_DATA0 0x05bc
+#define REG_MSC_MT_RCVD_DATA1 0x05bd
+
+/* CBUS MSC Responder MSC_MSG Received Data */
+#define REG_MSC_MR_MSC_MSG_RCVD_1ST_DATA 0x05bf
+#define REG_MSC_MR_MSC_MSG_RCVD_2ND_DATA 0x05c0
+
+/* CBUS MSC Heartbeat Control, default value: 0x27 */
+#define REG_MSC_HEARTBEAT_CTRL 0x05c4
+#define BIT_MSC_HEARTBEAT_CTRL_MSC_HB_EN BIT(7)
+#define MSK_MSC_HEARTBEAT_CTRL_MSC_HB_FAIL_LIMIT 0x70
+#define MSK_MSC_HEARTBEAT_CTRL_MSC_HB_PERIOD_MSB 0x0f
+
+/* CBUS MSC Compatibility Control, default value: 0x02 */
+#define REG_CBUS_MSC_COMPAT_CTRL 0x05c7
+#define BIT_CBUS_MSC_COMPAT_CTRL_XDEVCAP_EN BIT(7)
+#define BIT_CBUS_MSC_COMPAT_CTRL_DISABLE_MSC_ON_CBUS BIT(6)
+#define BIT_CBUS_MSC_COMPAT_CTRL_DISABLE_DDC_ON_CBUS BIT(5)
+#define BIT_CBUS_MSC_COMPAT_CTRL_DISABLE_GET_DDC_ERRORCODE BIT(3)
+#define BIT_CBUS_MSC_COMPAT_CTRL_DISABLE_GET_VS1_ERRORCODE BIT(2)
+
+/* CBUS3 Converter Control, default value: 0x24 */
+#define REG_CBUS3_CNVT 0x05dc
+#define MSK_CBUS3_CNVT_CBUS3_RETRYLMT 0xf0
+#define MSK_CBUS3_CNVT_CBUS3_PEERTOUT_SEL 0x0c
+#define BIT_CBUS3_CNVT_TEARCBUS_EN BIT(1)
+#define BIT_CBUS3_CNVT_CBUS3CNVT_EN BIT(0)
+
+/* Discovery Control1, default value: 0x24 */
+#define REG_DISC_CTRL1 0x05e0
+#define BIT_DISC_CTRL1_CBUS_INTR_EN BIT(7)
+#define BIT_DISC_CTRL1_HB_ONLY BIT(6)
+#define MSK_DISC_CTRL1_DISC_ATT 0x30
+#define MSK_DISC_CTRL1_DISC_CYC 0x0c
+#define BIT_DISC_CTRL1_DISC_EN BIT(0)
+
+#define VAL_PUP_OFF 0
+#define VAL_PUP_20K 1
+#define VAL_PUP_5K 2
+
+/* Discovery Control4, default value: 0x80 */
+#define REG_DISC_CTRL4 0x05e3
+#define MSK_DISC_CTRL4_CBUSDISC_PUP_SEL 0xc0
+#define MSK_DISC_CTRL4_CBUSIDLE_PUP_SEL 0x30
+#define VAL_DISC_CTRL4(pup_disc, pup_idle) (((pup_disc) << 6) | (pup_idle << 4))
+
+/* Discovery Control5, default value: 0x03 */
+#define REG_DISC_CTRL5 0x05e4
+#define BIT_DISC_CTRL5_DSM_OVRIDE BIT(3)
+#define MSK_DISC_CTRL5_CBUSMHL_PUP_SEL 0x03
+
+/* Discovery Control8, default value: 0x81 */
+#define REG_DISC_CTRL8 0x05e7
+#define BIT_DISC_CTRL8_NOMHLINT_CLR_BYPASS BIT(7)
+#define BIT_DISC_CTRL8_DELAY_CBUS_INTR_EN BIT(0)
+
+/* Discovery Control9, default value: 0x54 */
+#define REG_DISC_CTRL9 0x05e8
+#define BIT_DISC_CTRL9_MHL3_RSEN_BYP BIT(7)
+#define BIT_DISC_CTRL9_MHL3DISC_EN BIT(6)
+#define BIT_DISC_CTRL9_WAKE_DRVFLT BIT(4)
+#define BIT_DISC_CTRL9_NOMHL_EST BIT(3)
+#define BIT_DISC_CTRL9_DISC_PULSE_PROCEED BIT(2)
+#define BIT_DISC_CTRL9_WAKE_PULSE_BYPASS BIT(1)
+#define BIT_DISC_CTRL9_VBUS_OUTPUT_CAPABILITY_SRC BIT(0)
+
+/* Discovery Status1, default value: 0x00 */
+#define REG_DISC_STAT1 0x05eb
+#define BIT_DISC_STAT1_PSM_OVRIDE BIT(5)
+#define MSK_DISC_STAT1_DISC_SM 0x0f
+
+/* Discovery Status2, default value: 0x00 */
+#define REG_DISC_STAT2 0x05ec
+#define BIT_DISC_STAT2_CBUS_OE_POL BIT(6)
+#define BIT_DISC_STAT2_CBUS_SATUS BIT(5)
+#define BIT_DISC_STAT2_RSEN BIT(4)
+
+#define MSK_DISC_STAT2_MHL_VRSN 0x0c
+#define VAL_DISC_STAT2_DEFAULT 0x00
+#define VAL_DISC_STAT2_MHL1_2 0x04
+#define VAL_DISC_STAT2_MHL3 0x08
+#define VAL_DISC_STAT2_RESERVED 0x0c
+
+#define MSK_DISC_STAT2_RGND 0x03
+#define VAL_RGND_OPEN 0x00
+#define VAL_RGND_2K 0x01
+#define VAL_RGND_1K 0x02
+#define VAL_RGND_SHORT 0x03
+
+/* Interrupt CBUS_reg1 INTR0, default value: 0x00 */
+#define REG_CBUS_DISC_INTR0 0x05ed
+#define BIT_RGND_READY_INT BIT(6)
+#define BIT_CBUS_MHL12_DISCON_INT BIT(5)
+#define BIT_CBUS_MHL3_DISCON_INT BIT(4)
+#define BIT_NOT_MHL_EST_INT BIT(3)
+#define BIT_MHL_EST_INT BIT(2)
+#define BIT_MHL3_EST_INT BIT(1)
+#define VAL_CBUS_MHL_DISCON (BIT_CBUS_MHL12_DISCON_INT \
+ | BIT_CBUS_MHL3_DISCON_INT \
+ | BIT_NOT_MHL_EST_INT)
+
+/* Interrupt CBUS_reg1 INTR0 Mask, default value: 0x00 */
+#define REG_CBUS_DISC_INTR0_MASK 0x05ee
+
+#endif /* __SIL_SII8620_H__ */
diff --git a/drivers/gpu/drm/bridge/ti-tfp410.c b/drivers/gpu/drm/bridge/ti-tfp410.c
new file mode 100644
index 000000000000..b054ea349952
--- /dev/null
+++ b/drivers/gpu/drm/bridge/ti-tfp410.c
@@ -0,0 +1,317 @@
+/*
+ * Copyright (C) 2016 Texas Instruments
+ * Author: Jyri Sarha <jsarha@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/of_graph.h>
+#include <linux/platform_device.h>
+#include <linux/i2c.h>
+
+#include <drm/drmP.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_crtc_helper.h>
+
+struct tfp410 {
+ struct drm_bridge bridge;
+ struct drm_connector connector;
+
+ struct i2c_adapter *ddc;
+
+ struct device *dev;
+};
+
+static inline struct tfp410 *
+drm_bridge_to_tfp410(struct drm_bridge *bridge)
+{
+ return container_of(bridge, struct tfp410, bridge);
+}
+
+static inline struct tfp410 *
+drm_connector_to_tfp410(struct drm_connector *connector)
+{
+ return container_of(connector, struct tfp410, connector);
+}
+
+static int tfp410_get_modes(struct drm_connector *connector)
+{
+ struct tfp410 *dvi = drm_connector_to_tfp410(connector);
+ struct edid *edid;
+ int ret;
+
+ if (!dvi->ddc)
+ goto fallback;
+
+ edid = drm_get_edid(connector, dvi->ddc);
+ if (!edid) {
+ DRM_INFO("EDID read failed. Fallback to standard modes\n");
+ goto fallback;
+ }
+
+ drm_mode_connector_update_edid_property(connector, edid);
+
+ return drm_add_edid_modes(connector, edid);
+fallback:
+ /* No EDID, fallback on the XGA standard modes */
+ ret = drm_add_modes_noedid(connector, 1920, 1200);
+
+ /* And prefer a mode pretty much anything can handle */
+ drm_set_preferred_mode(connector, 1024, 768);
+
+ return ret;
+}
+
+static const struct drm_connector_helper_funcs tfp410_con_helper_funcs = {
+ .get_modes = tfp410_get_modes,
+};
+
+static enum drm_connector_status
+tfp410_connector_detect(struct drm_connector *connector, bool force)
+{
+ struct tfp410 *dvi = drm_connector_to_tfp410(connector);
+
+ if (dvi->ddc) {
+ if (drm_probe_ddc(dvi->ddc))
+ return connector_status_connected;
+ else
+ return connector_status_disconnected;
+ }
+
+ return connector_status_unknown;
+}
+
+static const struct drm_connector_funcs tfp410_con_funcs = {
+ .dpms = drm_atomic_helper_connector_dpms,
+ .detect = tfp410_connector_detect,
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .destroy = drm_connector_cleanup,
+ .reset = drm_atomic_helper_connector_reset,
+ .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
+};
+
+static int tfp410_attach(struct drm_bridge *bridge)
+{
+ struct tfp410 *dvi = drm_bridge_to_tfp410(bridge);
+ int ret;
+
+ if (!bridge->encoder) {
+ dev_err(dvi->dev, "Missing encoder\n");
+ return -ENODEV;
+ }
+
+ drm_connector_helper_add(&dvi->connector,
+ &tfp410_con_helper_funcs);
+ ret = drm_connector_init(bridge->dev, &dvi->connector,
+ &tfp410_con_funcs, DRM_MODE_CONNECTOR_HDMIA);
+ if (ret) {
+ dev_err(dvi->dev, "drm_connector_init() failed: %d\n", ret);
+ return ret;
+ }
+
+ drm_mode_connector_attach_encoder(&dvi->connector,
+ bridge->encoder);
+
+ return 0;
+}
+
+static const struct drm_bridge_funcs tfp410_bridge_funcs = {
+ .attach = tfp410_attach,
+};
+
+static int tfp410_get_connector_ddc(struct tfp410 *dvi)
+{
+ struct device_node *ep = NULL, *connector_node = NULL;
+ struct device_node *ddc_phandle = NULL;
+ int ret = 0;
+
+ /* port@1 is the connector node */
+ ep = of_graph_get_endpoint_by_regs(dvi->dev->of_node, 1, -1);
+ if (!ep)
+ goto fail;
+
+ connector_node = of_graph_get_remote_port_parent(ep);
+ if (!connector_node)
+ goto fail;
+
+ ddc_phandle = of_parse_phandle(connector_node, "ddc-i2c-bus", 0);
+ if (!ddc_phandle)
+ goto fail;
+
+ dvi->ddc = of_get_i2c_adapter_by_node(ddc_phandle);
+ if (dvi->ddc)
+ dev_info(dvi->dev, "Connector's ddc i2c bus found\n");
+ else
+ ret = -EPROBE_DEFER;
+
+fail:
+ of_node_put(ep);
+ of_node_put(connector_node);
+ of_node_put(ddc_phandle);
+ return ret;
+}
+
+static int tfp410_init(struct device *dev)
+{
+ struct tfp410 *dvi;
+ int ret;
+
+ if (!dev->of_node) {
+ dev_err(dev, "device-tree data is missing\n");
+ return -ENXIO;
+ }
+
+ dvi = devm_kzalloc(dev, sizeof(*dvi), GFP_KERNEL);
+ if (!dvi)
+ return -ENOMEM;
+ dev_set_drvdata(dev, dvi);
+
+ dvi->bridge.funcs = &tfp410_bridge_funcs;
+ dvi->bridge.of_node = dev->of_node;
+ dvi->dev = dev;
+
+ ret = tfp410_get_connector_ddc(dvi);
+ if (ret)
+ goto fail;
+
+ ret = drm_bridge_add(&dvi->bridge);
+ if (ret) {
+ dev_err(dev, "drm_bridge_add() failed: %d\n", ret);
+ goto fail;
+ }
+
+ return 0;
+fail:
+ i2c_put_adapter(dvi->ddc);
+ return ret;
+}
+
+static int tfp410_fini(struct device *dev)
+{
+ struct tfp410 *dvi = dev_get_drvdata(dev);
+
+ drm_bridge_remove(&dvi->bridge);
+
+ if (dvi->ddc)
+ i2c_put_adapter(dvi->ddc);
+
+ return 0;
+}
+
+static int tfp410_probe(struct platform_device *pdev)
+{
+ return tfp410_init(&pdev->dev);
+}
+
+static int tfp410_remove(struct platform_device *pdev)
+{
+ return tfp410_fini(&pdev->dev);
+}
+
+static const struct of_device_id tfp410_match[] = {
+ { .compatible = "ti,tfp410" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, tfp410_match);
+
+struct platform_driver tfp410_platform_driver = {
+ .probe = tfp410_probe,
+ .remove = tfp410_remove,
+ .driver = {
+ .name = "tfp410-bridge",
+ .of_match_table = tfp410_match,
+ },
+};
+
+#if IS_ENABLED(CONFIG_I2C)
+/* There is currently no i2c functionality. */
+static int tfp410_i2c_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ int reg;
+
+ if (!client->dev.of_node ||
+ of_property_read_u32(client->dev.of_node, "reg", &reg)) {
+ dev_err(&client->dev,
+ "Can't get i2c reg property from device-tree\n");
+ return -ENXIO;
+ }
+
+ return tfp410_init(&client->dev);
+}
+
+static int tfp410_i2c_remove(struct i2c_client *client)
+{
+ return tfp410_fini(&client->dev);
+}
+
+static const struct i2c_device_id tfp410_i2c_ids[] = {
+ { "tfp410", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, tfp410_i2c_ids);
+
+static struct i2c_driver tfp410_i2c_driver = {
+ .driver = {
+ .name = "tfp410",
+ .of_match_table = of_match_ptr(tfp410_match),
+ },
+ .id_table = tfp410_i2c_ids,
+ .probe = tfp410_i2c_probe,
+ .remove = tfp410_i2c_remove,
+};
+#endif /* IS_ENABLED(CONFIG_I2C) */
+
+static struct {
+ uint i2c:1;
+ uint platform:1;
+} tfp410_registered_driver;
+
+static int __init tfp410_module_init(void)
+{
+ int ret;
+
+#if IS_ENABLED(CONFIG_I2C)
+ ret = i2c_add_driver(&tfp410_i2c_driver);
+ if (ret)
+ pr_err("%s: registering i2c driver failed: %d",
+ __func__, ret);
+ else
+ tfp410_registered_driver.i2c = 1;
+#endif
+
+ ret = platform_driver_register(&tfp410_platform_driver);
+ if (ret)
+ pr_err("%s: registering platform driver failed: %d",
+ __func__, ret);
+ else
+ tfp410_registered_driver.platform = 1;
+
+ if (tfp410_registered_driver.i2c ||
+ tfp410_registered_driver.platform)
+ return 0;
+
+ return ret;
+}
+module_init(tfp410_module_init);
+
+static void __exit tfp410_module_exit(void)
+{
+#if IS_ENABLED(CONFIG_I2C)
+ if (tfp410_registered_driver.i2c)
+ i2c_del_driver(&tfp410_i2c_driver);
+#endif
+ if (tfp410_registered_driver.platform)
+ platform_driver_unregister(&tfp410_platform_driver);
+}
+module_exit(tfp410_module_exit);
+
+MODULE_AUTHOR("Jyri Sarha <jsarha@ti.com>");
+MODULE_DESCRIPTION("TI TFP410 DVI bridge driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/cirrus/cirrus_drv.c b/drivers/gpu/drm/cirrus/cirrus_drv.c
index 6c76d125995b..d893ea21a359 100644
--- a/drivers/gpu/drm/cirrus/cirrus_drv.c
+++ b/drivers/gpu/drm/cirrus/cirrus_drv.c
@@ -126,9 +126,7 @@ static const struct file_operations cirrus_driver_fops = {
.unlocked_ioctl = drm_ioctl,
.mmap = cirrus_mmap,
.poll = drm_poll,
-#ifdef CONFIG_COMPAT
.compat_ioctl = drm_compat_ioctl,
-#endif
};
static struct drm_driver driver = {
.driver_features = DRIVER_MODESET | DRIVER_GEM,
diff --git a/drivers/gpu/drm/cirrus/cirrus_fbdev.c b/drivers/gpu/drm/cirrus/cirrus_fbdev.c
index daecf1ad76a4..3a6309d7d8e4 100644
--- a/drivers/gpu/drm/cirrus/cirrus_fbdev.c
+++ b/drivers/gpu/drm/cirrus/cirrus_fbdev.c
@@ -138,12 +138,12 @@ static int cirrusfb_create_object(struct cirrus_fbdev *afbdev,
{
struct drm_device *dev = afbdev->helper.dev;
struct cirrus_device *cdev = dev->dev_private;
- u32 bpp, depth;
+ u32 bpp;
u32 size;
struct drm_gem_object *gobj;
-
int ret = 0;
- drm_fb_get_bpp_depth(mode_cmd->pixel_format, &depth, &bpp);
+
+ bpp = drm_format_plane_cpp(mode_cmd->pixel_format, 0) * 8;
if (!cirrus_check_framebuffer(cdev, mode_cmd->width, mode_cmd->height,
bpp, mode_cmd->pitches[0]))
diff --git a/drivers/gpu/drm/cirrus/cirrus_main.c b/drivers/gpu/drm/cirrus/cirrus_main.c
index 76bcb43e7c06..2c3c0d4072ce 100644
--- a/drivers/gpu/drm/cirrus/cirrus_main.c
+++ b/drivers/gpu/drm/cirrus/cirrus_main.c
@@ -52,10 +52,10 @@ cirrus_user_framebuffer_create(struct drm_device *dev,
struct cirrus_device *cdev = dev->dev_private;
struct drm_gem_object *obj;
struct cirrus_framebuffer *cirrus_fb;
+ u32 bpp;
int ret;
- u32 bpp, depth;
- drm_fb_get_bpp_depth(mode_cmd->pixel_format, &depth, &bpp);
+ bpp = drm_format_plane_cpp(mode_cmd->pixel_format, 0) * 8;
if (!cirrus_check_framebuffer(cdev, mode_cmd->width, mode_cmd->height,
bpp, mode_cmd->pitches[0]))
diff --git a/drivers/gpu/drm/cirrus/cirrus_ttm.c b/drivers/gpu/drm/cirrus/cirrus_ttm.c
index 5e7e63ce7bce..d6da848f7c6f 100644
--- a/drivers/gpu/drm/cirrus/cirrus_ttm.c
+++ b/drivers/gpu/drm/cirrus/cirrus_ttm.c
@@ -230,6 +230,7 @@ struct ttm_bo_driver cirrus_bo_driver = {
.ttm_tt_populate = cirrus_ttm_tt_populate,
.ttm_tt_unpopulate = cirrus_ttm_tt_unpopulate,
.init_mem_type = cirrus_bo_init_mem_type,
+ .eviction_valuable = ttm_bo_eviction_valuable,
.evict_flags = cirrus_bo_evict_flags,
.move = NULL,
.verify_access = cirrus_bo_verify_access,
diff --git a/drivers/gpu/drm/drm_atomic.c b/drivers/gpu/drm/drm_atomic.c
index 23739609427d..19d7bcb88217 100644
--- a/drivers/gpu/drm/drm_atomic.c
+++ b/drivers/gpu/drm/drm_atomic.c
@@ -30,6 +30,8 @@
#include <drm/drm_atomic.h>
#include <drm/drm_mode.h>
#include <drm/drm_plane_helper.h>
+#include <drm/drm_print.h>
+#include <linux/sync_file.h>
#include "drm_crtc_internal.h"
@@ -74,6 +76,8 @@ EXPORT_SYMBOL(drm_atomic_state_default_release);
int
drm_atomic_state_init(struct drm_device *dev, struct drm_atomic_state *state)
{
+ kref_init(&state->ref);
+
/* TODO legacy paths should maybe do a better job about
* setting this appropriately?
*/
@@ -215,22 +219,16 @@ void drm_atomic_state_clear(struct drm_atomic_state *state)
EXPORT_SYMBOL(drm_atomic_state_clear);
/**
- * drm_atomic_state_free - free all memory for an atomic state
- * @state: atomic state to deallocate
+ * __drm_atomic_state_free - free all memory for an atomic state
+ * @ref: This atomic state to deallocate
*
* This frees all memory associated with an atomic state, including all the
* per-object state for planes, crtcs and connectors.
*/
-void drm_atomic_state_free(struct drm_atomic_state *state)
+void __drm_atomic_state_free(struct kref *ref)
{
- struct drm_device *dev;
- struct drm_mode_config *config;
-
- if (!state)
- return;
-
- dev = state->dev;
- config = &dev->mode_config;
+ struct drm_atomic_state *state = container_of(ref, typeof(*state), ref);
+ struct drm_mode_config *config = &state->dev->mode_config;
drm_atomic_state_clear(state);
@@ -243,7 +241,7 @@ void drm_atomic_state_free(struct drm_atomic_state *state)
kfree(state);
}
}
-EXPORT_SYMBOL(drm_atomic_state_free);
+EXPORT_SYMBOL(__drm_atomic_state_free);
/**
* drm_atomic_get_crtc_state - get crtc state
@@ -292,6 +290,23 @@ drm_atomic_get_crtc_state(struct drm_atomic_state *state,
}
EXPORT_SYMBOL(drm_atomic_get_crtc_state);
+static void set_out_fence_for_crtc(struct drm_atomic_state *state,
+ struct drm_crtc *crtc, s64 __user *fence_ptr)
+{
+ state->crtcs[drm_crtc_index(crtc)].out_fence_ptr = fence_ptr;
+}
+
+static s64 __user *get_out_fence_for_crtc(struct drm_atomic_state *state,
+ struct drm_crtc *crtc)
+{
+ s64 __user *fence_ptr;
+
+ fence_ptr = state->crtcs[drm_crtc_index(crtc)].out_fence_ptr;
+ state->crtcs[drm_crtc_index(crtc)].out_fence_ptr = NULL;
+
+ return fence_ptr;
+}
+
/**
* drm_atomic_set_mode_for_crtc - set mode for CRTC
* @state: the CRTC whose incoming state to update
@@ -420,18 +435,21 @@ drm_atomic_replace_property_blob_from_id(struct drm_crtc *crtc,
ssize_t expected_size,
bool *replaced)
{
- struct drm_device *dev = crtc->dev;
struct drm_property_blob *new_blob = NULL;
if (blob_id != 0) {
- new_blob = drm_property_lookup_blob(dev, blob_id);
+ new_blob = drm_property_lookup_blob(crtc->dev, blob_id);
if (new_blob == NULL)
return -EINVAL;
- if (expected_size > 0 && expected_size != new_blob->length)
+
+ if (expected_size > 0 && expected_size != new_blob->length) {
+ drm_property_unreference_blob(new_blob);
return -EINVAL;
+ }
}
drm_atomic_replace_property_blob(blob, new_blob, replaced);
+ drm_property_unreference_blob(new_blob);
return 0;
}
@@ -493,6 +511,16 @@ int drm_atomic_crtc_set_property(struct drm_crtc *crtc,
&replaced);
state->color_mgmt_changed |= replaced;
return ret;
+ } else if (property == config->prop_out_fence_ptr) {
+ s64 __user *fence_ptr = u64_to_user_ptr(val);
+
+ if (!fence_ptr)
+ return 0;
+
+ if (put_user(-1, fence_ptr))
+ return -EFAULT;
+
+ set_out_fence_for_crtc(state->state, crtc, fence_ptr);
} else if (crtc->funcs->atomic_set_property)
return crtc->funcs->atomic_set_property(crtc, state, property, val);
else
@@ -535,6 +563,8 @@ drm_atomic_crtc_get_property(struct drm_crtc *crtc,
*val = (state->ctm) ? state->ctm->base.id : 0;
else if (property == config->gamma_lut_property)
*val = (state->gamma_lut) ? state->gamma_lut->base.id : 0;
+ else if (property == config->prop_out_fence_ptr)
+ *val = 0;
else if (crtc->funcs->atomic_get_property)
return crtc->funcs->atomic_get_property(crtc, state, property, val);
else
@@ -606,6 +636,28 @@ static int drm_atomic_crtc_check(struct drm_crtc *crtc,
return 0;
}
+static void drm_atomic_crtc_print_state(struct drm_printer *p,
+ const struct drm_crtc_state *state)
+{
+ struct drm_crtc *crtc = state->crtc;
+
+ drm_printf(p, "crtc[%u]: %s\n", crtc->base.id, crtc->name);
+ drm_printf(p, "\tenable=%d\n", state->enable);
+ drm_printf(p, "\tactive=%d\n", state->active);
+ drm_printf(p, "\tplanes_changed=%d\n", state->planes_changed);
+ drm_printf(p, "\tmode_changed=%d\n", state->mode_changed);
+ drm_printf(p, "\tactive_changed=%d\n", state->active_changed);
+ drm_printf(p, "\tconnectors_changed=%d\n", state->connectors_changed);
+ drm_printf(p, "\tcolor_mgmt_changed=%d\n", state->color_mgmt_changed);
+ drm_printf(p, "\tplane_mask=%x\n", state->plane_mask);
+ drm_printf(p, "\tconnector_mask=%x\n", state->connector_mask);
+ drm_printf(p, "\tencoder_mask=%x\n", state->encoder_mask);
+ drm_printf(p, "\tmode: " DRM_MODE_FMT "\n", DRM_MODE_ARG(&state->mode));
+
+ if (crtc->funcs->atomic_print_state)
+ crtc->funcs->atomic_print_state(p, state);
+}
+
/**
* drm_atomic_get_plane_state - get plane state
* @state: global atomic state object
@@ -690,6 +742,17 @@ int drm_atomic_plane_set_property(struct drm_plane *plane,
drm_atomic_set_fb_for_plane(state, fb);
if (fb)
drm_framebuffer_unreference(fb);
+ } else if (property == config->prop_in_fence_fd) {
+ if (state->fence)
+ return -EINVAL;
+
+ if (U642I64(val) == -1)
+ return 0;
+
+ state->fence = sync_file_get_fence(val);
+ if (!state->fence)
+ return -EINVAL;
+
} else if (property == config->prop_crtc_id) {
struct drm_crtc *crtc = drm_crtc_find(dev, val);
return drm_atomic_set_crtc_for_plane(state, crtc);
@@ -709,7 +772,9 @@ int drm_atomic_plane_set_property(struct drm_plane *plane,
state->src_w = val;
} else if (property == config->prop_src_h) {
state->src_h = val;
- } else if (property == config->rotation_property) {
+ } else if (property == plane->rotation_property) {
+ if (!is_power_of_2(val & DRM_ROTATE_MASK))
+ return -EINVAL;
state->rotation = val;
} else if (property == plane->zpos_property) {
state->zpos = val;
@@ -749,6 +814,8 @@ drm_atomic_plane_get_property(struct drm_plane *plane,
if (property == config->prop_fb_id) {
*val = (state->fb) ? state->fb->base.id : 0;
+ } else if (property == config->prop_in_fence_fd) {
+ *val = -1;
} else if (property == config->prop_crtc_id) {
*val = (state->crtc) ? state->crtc->base.id : 0;
} else if (property == config->prop_crtc_x) {
@@ -767,7 +834,7 @@ drm_atomic_plane_get_property(struct drm_plane *plane,
*val = state->src_w;
} else if (property == config->prop_src_h) {
*val = state->src_h;
- } else if (property == config->rotation_property) {
+ } else if (property == plane->rotation_property) {
*val = state->rotation;
} else if (property == plane->zpos_property) {
*val = state->zpos;
@@ -837,9 +904,10 @@ static int drm_atomic_plane_check(struct drm_plane *plane,
/* Check whether this plane supports the fb pixel format. */
ret = drm_plane_check_pixel_format(plane, state->fb->pixel_format);
if (ret) {
- char *format_name = drm_get_format_name(state->fb->pixel_format);
- DRM_DEBUG_ATOMIC("Invalid pixel format %s\n", format_name);
- kfree(format_name);
+ struct drm_format_name_buf format_name;
+ DRM_DEBUG_ATOMIC("Invalid pixel format %s\n",
+ drm_get_format_name(state->fb->pixel_format,
+ &format_name));
return ret;
}
@@ -880,6 +948,39 @@ static int drm_atomic_plane_check(struct drm_plane *plane,
return 0;
}
+static void drm_atomic_plane_print_state(struct drm_printer *p,
+ const struct drm_plane_state *state)
+{
+ struct drm_plane *plane = state->plane;
+ struct drm_rect src = drm_plane_state_src(state);
+ struct drm_rect dest = drm_plane_state_dest(state);
+
+ drm_printf(p, "plane[%u]: %s\n", plane->base.id, plane->name);
+ drm_printf(p, "\tcrtc=%s\n", state->crtc ? state->crtc->name : "(null)");
+ drm_printf(p, "\tfb=%u\n", state->fb ? state->fb->base.id : 0);
+ if (state->fb) {
+ struct drm_framebuffer *fb = state->fb;
+ int i, n = drm_format_num_planes(fb->pixel_format);
+ struct drm_format_name_buf format_name;
+
+ drm_printf(p, "\t\tformat=%s\n",
+ drm_get_format_name(fb->pixel_format, &format_name));
+ drm_printf(p, "\t\t\tmodifier=0x%llx\n", fb->modifier);
+ drm_printf(p, "\t\tsize=%dx%d\n", fb->width, fb->height);
+ drm_printf(p, "\t\tlayers:\n");
+ for (i = 0; i < n; i++) {
+ drm_printf(p, "\t\t\tpitch[%d]=%u\n", i, fb->pitches[i]);
+ drm_printf(p, "\t\t\toffset[%d]=%u\n", i, fb->offsets[i]);
+ }
+ }
+ drm_printf(p, "\tcrtc-pos=" DRM_RECT_FMT "\n", DRM_RECT_ARG(&dest));
+ drm_printf(p, "\tsrc-pos=" DRM_RECT_FP_FMT "\n", DRM_RECT_FP_ARG(&src));
+ drm_printf(p, "\trotation=%x\n", state->rotation);
+
+ if (plane->funcs->atomic_print_state)
+ plane->funcs->atomic_print_state(p, state);
+}
+
/**
* drm_atomic_get_connector_state - get connector state
* @state: global atomic state object
@@ -995,6 +1096,18 @@ int drm_atomic_connector_set_property(struct drm_connector *connector,
}
EXPORT_SYMBOL(drm_atomic_connector_set_property);
+static void drm_atomic_connector_print_state(struct drm_printer *p,
+ const struct drm_connector_state *state)
+{
+ struct drm_connector *connector = state->connector;
+
+ drm_printf(p, "connector[%u]: %s\n", connector->base.id, connector->name);
+ drm_printf(p, "\tcrtc=%s\n", state->crtc ? state->crtc->name : "(null)");
+
+ if (connector->funcs->atomic_print_state)
+ connector->funcs->atomic_print_state(p, state);
+}
+
/**
* drm_atomic_connector_get_property - get property value from connector state
* @connector: the drm connector to set a property on
@@ -1133,22 +1246,48 @@ void
drm_atomic_set_fb_for_plane(struct drm_plane_state *plane_state,
struct drm_framebuffer *fb)
{
- if (plane_state->fb)
- drm_framebuffer_unreference(plane_state->fb);
- if (fb)
- drm_framebuffer_reference(fb);
- plane_state->fb = fb;
-
if (fb)
DRM_DEBUG_ATOMIC("Set [FB:%d] for plane state %p\n",
fb->base.id, plane_state);
else
DRM_DEBUG_ATOMIC("Set [NOFB] for plane state %p\n",
plane_state);
+
+ drm_framebuffer_assign(&plane_state->fb, fb);
}
EXPORT_SYMBOL(drm_atomic_set_fb_for_plane);
/**
+ * drm_atomic_set_fence_for_plane - set fence for plane
+ * @plane_state: atomic state object for the plane
+ * @fence: dma_fence to use for the plane
+ *
+ * Helper to setup the plane_state fence in case it is not set yet.
+ * By using this drivers doesn't need to worry if the user choose
+ * implicit or explicit fencing.
+ *
+ * This function will not set the fence to the state if it was set
+ * via explicit fencing interfaces on the atomic ioctl. It will
+ * all drope the reference to the fence as we not storing it
+ * anywhere.
+ *
+ * Otherwise, if plane_state->fence is not set this function we
+ * just set it with the received implict fence.
+ */
+void
+drm_atomic_set_fence_for_plane(struct drm_plane_state *plane_state,
+ struct dma_fence *fence)
+{
+ if (plane_state->fence) {
+ dma_fence_put(fence);
+ return;
+ }
+
+ plane_state->fence = fence;
+}
+EXPORT_SYMBOL(drm_atomic_set_fence_for_plane);
+
+/**
* drm_atomic_set_crtc_for_connector - set crtc for connector
* @conn_state: atomic state object for the connector
* @crtc: crtc to use for the connector
@@ -1459,16 +1598,107 @@ int drm_atomic_nonblocking_commit(struct drm_atomic_state *state)
}
EXPORT_SYMBOL(drm_atomic_nonblocking_commit);
+static void drm_atomic_print_state(const struct drm_atomic_state *state)
+{
+ struct drm_printer p = drm_info_printer(state->dev->dev);
+ struct drm_plane *plane;
+ struct drm_plane_state *plane_state;
+ struct drm_crtc *crtc;
+ struct drm_crtc_state *crtc_state;
+ struct drm_connector *connector;
+ struct drm_connector_state *connector_state;
+ int i;
+
+ DRM_DEBUG_ATOMIC("checking %p\n", state);
+
+ for_each_plane_in_state(state, plane, plane_state, i)
+ drm_atomic_plane_print_state(&p, plane_state);
+
+ for_each_crtc_in_state(state, crtc, crtc_state, i)
+ drm_atomic_crtc_print_state(&p, crtc_state);
+
+ for_each_connector_in_state(state, connector, connector_state, i)
+ drm_atomic_connector_print_state(&p, connector_state);
+}
+
+/**
+ * drm_state_dump - dump entire device atomic state
+ * @dev: the drm device
+ * @p: where to print the state to
+ *
+ * Just for debugging. Drivers might want an option to dump state
+ * to dmesg in case of error irq's. (Hint, you probably want to
+ * ratelimit this!)
+ *
+ * The caller must drm_modeset_lock_all(), or if this is called
+ * from error irq handler, it should not be enabled by default.
+ * (Ie. if you are debugging errors you might not care that this
+ * is racey. But calling this without all modeset locks held is
+ * not inherently safe.)
+ */
+void drm_state_dump(struct drm_device *dev, struct drm_printer *p)
+{
+ struct drm_mode_config *config = &dev->mode_config;
+ struct drm_plane *plane;
+ struct drm_crtc *crtc;
+ struct drm_connector *connector;
+
+ if (!drm_core_check_feature(dev, DRIVER_ATOMIC))
+ return;
+
+ list_for_each_entry(plane, &config->plane_list, head)
+ drm_atomic_plane_print_state(p, plane->state);
+
+ list_for_each_entry(crtc, &config->crtc_list, head)
+ drm_atomic_crtc_print_state(p, crtc->state);
+
+ list_for_each_entry(connector, &config->connector_list, head)
+ drm_atomic_connector_print_state(p, connector->state);
+}
+EXPORT_SYMBOL(drm_state_dump);
+
+#ifdef CONFIG_DEBUG_FS
+static int drm_state_info(struct seq_file *m, void *data)
+{
+ struct drm_info_node *node = (struct drm_info_node *) m->private;
+ struct drm_device *dev = node->minor->dev;
+ struct drm_printer p = drm_seq_file_printer(m);
+
+ drm_modeset_lock_all(dev);
+ drm_state_dump(dev, &p);
+ drm_modeset_unlock_all(dev);
+
+ return 0;
+}
+
+/* any use in debugfs files to dump individual planes/crtc/etc? */
+static const struct drm_info_list drm_atomic_debugfs_list[] = {
+ {"state", drm_state_info, 0},
+};
+
+int drm_atomic_debugfs_init(struct drm_minor *minor)
+{
+ return drm_debugfs_create_files(drm_atomic_debugfs_list,
+ ARRAY_SIZE(drm_atomic_debugfs_list),
+ minor->debugfs_root, minor);
+}
+
+int drm_atomic_debugfs_cleanup(struct drm_minor *minor)
+{
+ return drm_debugfs_remove_files(drm_atomic_debugfs_list,
+ ARRAY_SIZE(drm_atomic_debugfs_list),
+ minor);
+}
+#endif
+
/*
* The big monstor ioctl
*/
static struct drm_pending_vblank_event *create_vblank_event(
- struct drm_device *dev, struct drm_file *file_priv,
- struct fence *fence, uint64_t user_data)
+ struct drm_device *dev, uint64_t user_data)
{
struct drm_pending_vblank_event *e = NULL;
- int ret;
e = kzalloc(sizeof *e, GFP_KERNEL);
if (!e)
@@ -1478,17 +1708,6 @@ static struct drm_pending_vblank_event *create_vblank_event(
e->event.base.length = sizeof(e->event);
e->event.user_data = user_data;
- if (file_priv) {
- ret = drm_event_reserve_init(dev, file_priv, &e->base,
- &e->event.base);
- if (ret) {
- kfree(e);
- return NULL;
- }
- }
-
- e->base.fence = fence;
-
return e;
}
@@ -1593,6 +1812,217 @@ void drm_atomic_clean_old_fb(struct drm_device *dev,
}
EXPORT_SYMBOL(drm_atomic_clean_old_fb);
+/**
+ * DOC: explicit fencing properties
+ *
+ * Explicit fencing allows userspace to control the buffer synchronization
+ * between devices. A Fence or a group of fences are transfered to/from
+ * userspace using Sync File fds and there are two DRM properties for that.
+ * IN_FENCE_FD on each DRM Plane to send fences to the kernel and
+ * OUT_FENCE_PTR on each DRM CRTC to receive fences from the kernel.
+ *
+ * As a contrast, with implicit fencing the kernel keeps track of any
+ * ongoing rendering, and automatically ensures that the atomic update waits
+ * for any pending rendering to complete. For shared buffers represented with
+ * a struct &dma_buf this is tracked in &reservation_object structures.
+ * Implicit syncing is how Linux traditionally worked (e.g. DRI2/3 on X.org),
+ * whereas explicit fencing is what Android wants.
+ *
+ * "IN_FENCE_FD”:
+ * Use this property to pass a fence that DRM should wait on before
+ * proceeding with the Atomic Commit request and show the framebuffer for
+ * the plane on the screen. The fence can be either a normal fence or a
+ * merged one, the sync_file framework will handle both cases and use a
+ * fence_array if a merged fence is received. Passing -1 here means no
+ * fences to wait on.
+ *
+ * If the Atomic Commit request has the DRM_MODE_ATOMIC_TEST_ONLY flag
+ * it will only check if the Sync File is a valid one.
+ *
+ * On the driver side the fence is stored on the @fence parameter of
+ * struct &drm_plane_state. Drivers which also support implicit fencing
+ * should set the implicit fence using drm_atomic_set_fence_for_plane(),
+ * to make sure there's consistent behaviour between drivers in precedence
+ * of implicit vs. explicit fencing.
+ *
+ * "OUT_FENCE_PTR”:
+ * Use this property to pass a file descriptor pointer to DRM. Once the
+ * Atomic Commit request call returns OUT_FENCE_PTR will be filled with
+ * the file descriptor number of a Sync File. This Sync File contains the
+ * CRTC fence that will be signaled when all framebuffers present on the
+ * Atomic Commit * request for that given CRTC are scanned out on the
+ * screen.
+ *
+ * The Atomic Commit request fails if a invalid pointer is passed. If the
+ * Atomic Commit request fails for any other reason the out fence fd
+ * returned will be -1. On a Atomic Commit with the
+ * DRM_MODE_ATOMIC_TEST_ONLY flag the out fence will also be set to -1.
+ *
+ * Note that out-fences don't have a special interface to drivers and are
+ * internally represented by a struct &drm_pending_vblank_event in struct
+ * &drm_crtc_state, which is also used by the nonblocking atomic commit
+ * helpers and for the DRM event handling for existing userspace.
+ */
+
+static struct dma_fence *get_crtc_fence(struct drm_crtc *crtc)
+{
+ struct dma_fence *fence;
+
+ fence = kzalloc(sizeof(*fence), GFP_KERNEL);
+ if (!fence)
+ return NULL;
+
+ dma_fence_init(fence, &drm_crtc_fence_ops, &crtc->fence_lock,
+ crtc->fence_context, ++crtc->fence_seqno);
+
+ return fence;
+}
+
+struct drm_out_fence_state {
+ s64 __user *out_fence_ptr;
+ struct sync_file *sync_file;
+ int fd;
+};
+
+static int setup_out_fence(struct drm_out_fence_state *fence_state,
+ struct dma_fence *fence)
+{
+ fence_state->fd = get_unused_fd_flags(O_CLOEXEC);
+ if (fence_state->fd < 0)
+ return fence_state->fd;
+
+ if (put_user(fence_state->fd, fence_state->out_fence_ptr))
+ return -EFAULT;
+
+ fence_state->sync_file = sync_file_create(fence);
+ if (!fence_state->sync_file)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static int prepare_crtc_signaling(struct drm_device *dev,
+ struct drm_atomic_state *state,
+ struct drm_mode_atomic *arg,
+ struct drm_file *file_priv,
+ struct drm_out_fence_state **fence_state,
+ unsigned int *num_fences)
+{
+ struct drm_crtc *crtc;
+ struct drm_crtc_state *crtc_state;
+ int i, ret;
+
+ if (arg->flags & DRM_MODE_ATOMIC_TEST_ONLY)
+ return 0;
+
+ for_each_crtc_in_state(state, crtc, crtc_state, i) {
+ u64 __user *fence_ptr;
+
+ fence_ptr = get_out_fence_for_crtc(crtc_state->state, crtc);
+
+ if (arg->flags & DRM_MODE_PAGE_FLIP_EVENT || fence_ptr) {
+ struct drm_pending_vblank_event *e;
+
+ e = create_vblank_event(dev, arg->user_data);
+ if (!e)
+ return -ENOMEM;
+
+ crtc_state->event = e;
+ }
+
+ if (arg->flags & DRM_MODE_PAGE_FLIP_EVENT) {
+ struct drm_pending_vblank_event *e = crtc_state->event;
+
+ if (!file_priv)
+ continue;
+
+ ret = drm_event_reserve_init(dev, file_priv, &e->base,
+ &e->event.base);
+ if (ret) {
+ kfree(e);
+ crtc_state->event = NULL;
+ return ret;
+ }
+ }
+
+ if (fence_ptr) {
+ struct dma_fence *fence;
+ struct drm_out_fence_state *f;
+
+ f = krealloc(*fence_state, sizeof(**fence_state) *
+ (*num_fences + 1), GFP_KERNEL);
+ if (!f)
+ return -ENOMEM;
+
+ memset(&f[*num_fences], 0, sizeof(*f));
+
+ f[*num_fences].out_fence_ptr = fence_ptr;
+ *fence_state = f;
+
+ fence = get_crtc_fence(crtc);
+ if (!fence)
+ return -ENOMEM;
+
+ ret = setup_out_fence(&f[(*num_fences)++], fence);
+ if (ret) {
+ dma_fence_put(fence);
+ return ret;
+ }
+
+ crtc_state->event->base.fence = fence;
+ }
+ }
+
+ return 0;
+}
+
+static void complete_crtc_signaling(struct drm_device *dev,
+ struct drm_atomic_state *state,
+ struct drm_out_fence_state *fence_state,
+ unsigned int num_fences,
+ bool install_fds)
+{
+ struct drm_crtc *crtc;
+ struct drm_crtc_state *crtc_state;
+ int i;
+
+ if (install_fds) {
+ for (i = 0; i < num_fences; i++)
+ fd_install(fence_state[i].fd,
+ fence_state[i].sync_file->file);
+
+ kfree(fence_state);
+ return;
+ }
+
+ for_each_crtc_in_state(state, crtc, crtc_state, i) {
+ /*
+ * TEST_ONLY and PAGE_FLIP_EVENT are mutually
+ * exclusive, if they weren't, this code should be
+ * called on success for TEST_ONLY too.
+ */
+ if (crtc_state->event)
+ drm_event_cancel_free(dev, &crtc_state->event->base);
+ }
+
+ if (!fence_state)
+ return;
+
+ for (i = 0; i < num_fences; i++) {
+ if (fence_state[i].sync_file)
+ fput(fence_state[i].sync_file->file);
+ if (fence_state[i].fd >= 0)
+ put_unused_fd(fence_state[i].fd);
+
+ /* If this fails log error to the user */
+ if (fence_state[i].out_fence_ptr &&
+ put_user(-1, fence_state[i].out_fence_ptr))
+ DRM_DEBUG_ATOMIC("Couldn't clear out_fence_ptr\n");
+ }
+
+ kfree(fence_state);
+}
+
int drm_mode_atomic_ioctl(struct drm_device *dev,
void *data, struct drm_file *file_priv)
{
@@ -1605,11 +2035,10 @@ int drm_mode_atomic_ioctl(struct drm_device *dev,
struct drm_atomic_state *state;
struct drm_modeset_acquire_ctx ctx;
struct drm_plane *plane;
- struct drm_crtc *crtc;
- struct drm_crtc_state *crtc_state;
+ struct drm_out_fence_state *fence_state = NULL;
unsigned plane_mask;
int ret = 0;
- unsigned int i, j;
+ unsigned int i, j, num_fences = 0;
/* disallow for drivers not supporting atomic: */
if (!drm_core_check_feature(dev, DRIVER_ATOMIC))
@@ -1724,50 +2153,30 @@ retry:
drm_mode_object_unreference(obj);
}
- if (arg->flags & DRM_MODE_PAGE_FLIP_EVENT) {
- for_each_crtc_in_state(state, crtc, crtc_state, i) {
- struct drm_pending_vblank_event *e;
-
- e = create_vblank_event(dev, file_priv, NULL,
- arg->user_data);
- if (!e) {
- ret = -ENOMEM;
- goto out;
- }
-
- crtc_state->event = e;
- }
- }
+ ret = prepare_crtc_signaling(dev, state, arg, file_priv, &fence_state,
+ &num_fences);
+ if (ret)
+ goto out;
if (arg->flags & DRM_MODE_ATOMIC_TEST_ONLY) {
/*
* Unlike commit, check_only does not clean up state.
- * Below we call drm_atomic_state_free for it.
+ * Below we call drm_atomic_state_put for it.
*/
ret = drm_atomic_check_only(state);
} else if (arg->flags & DRM_MODE_ATOMIC_NONBLOCK) {
ret = drm_atomic_nonblocking_commit(state);
} else {
+ if (unlikely(drm_debug & DRM_UT_STATE))
+ drm_atomic_print_state(state);
+
ret = drm_atomic_commit(state);
}
out:
drm_atomic_clean_old_fb(dev, plane_mask, ret);
- if (ret && arg->flags & DRM_MODE_PAGE_FLIP_EVENT) {
- /*
- * TEST_ONLY and PAGE_FLIP_EVENT are mutually exclusive,
- * if they weren't, this code should be called on success
- * for TEST_ONLY too.
- */
-
- for_each_crtc_in_state(state, crtc, crtc_state, i) {
- if (!crtc_state->event)
- continue;
-
- drm_event_cancel_free(dev, &crtc_state->event->base);
- }
- }
+ complete_crtc_signaling(dev, state, fence_state, num_fences, !ret);
if (ret == -EDEADLK) {
drm_atomic_state_clear(state);
@@ -1775,8 +2184,7 @@ out:
goto retry;
}
- if (ret || arg->flags & DRM_MODE_ATOMIC_TEST_ONLY)
- drm_atomic_state_free(state);
+ drm_atomic_state_put(state);
drm_modeset_drop_locks(&ctx);
drm_modeset_acquire_fini(&ctx);
diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c
index c3f83476f996..494680c9056e 100644
--- a/drivers/gpu/drm/drm_atomic_helper.c
+++ b/drivers/gpu/drm/drm_atomic_helper.c
@@ -30,7 +30,7 @@
#include <drm/drm_plane_helper.h>
#include <drm/drm_crtc_helper.h>
#include <drm/drm_atomic_helper.h>
-#include <linux/fence.h>
+#include <linux/dma-fence.h>
#include "drm_crtc_internal.h"
@@ -458,10 +458,11 @@ mode_fixup(struct drm_atomic_state *state)
* removed from the crtc.
* crtc_state->active_changed is set when crtc_state->active changes,
* which is used for dpms.
+ * See also: drm_atomic_crtc_needs_modeset()
*
* IMPORTANT:
*
- * Drivers which update ->mode_changed (e.g. in their ->atomic_check hooks if a
+ * Drivers which set ->mode_changed (e.g. in their ->atomic_check hooks if a
* plane update can't be done without a full modeset) _must_ call this function
* afterwards after that change. It is permitted to call this function multiple
* times for the same update, e.g. when the ->atomic_check functions depend upon
@@ -510,9 +511,9 @@ drm_atomic_helper_check_modeset(struct drm_device *dev,
for_each_connector_in_state(state, connector, connector_state, i) {
/*
- * This only sets crtc->mode_changed for routing changes,
- * drivers must set crtc->mode_changed themselves when connector
- * properties need to be updated.
+ * This only sets crtc->connectors_changed for routing changes,
+ * drivers must set crtc->connectors_changed themselves when
+ * connector properties need to be updated.
*/
ret = update_connector_routing(state, connector,
connector_state);
@@ -594,10 +595,6 @@ drm_atomic_helper_check_planes(struct drm_device *dev,
struct drm_plane_state *plane_state;
int i, ret = 0;
- ret = drm_atomic_normalize_zpos(dev, state);
- if (ret)
- return ret;
-
for_each_plane_in_state(state, plane, plane_state, i) {
const struct drm_plane_helper_funcs *funcs;
@@ -1009,14 +1006,22 @@ EXPORT_SYMBOL(drm_atomic_helper_commit_modeset_enables);
* drm_atomic_helper_wait_for_fences - wait for fences stashed in plane state
* @dev: DRM device
* @state: atomic state object with old state structures
- * @pre_swap: if true, do an interruptible wait
+ * @pre_swap: If true, do an interruptible wait, and @state is the new state.
+ * Otherwise @state is the old state.
*
* For implicit sync, driver should fish the exclusive fence out from the
* incoming fb's and stash it in the drm_plane_state. This is called after
* drm_atomic_helper_swap_state() so it uses the current plane state (and
* just uses the atomic state to find the changed planes)
*
- * Returns zero if success or < 0 if fence_wait() fails.
+ * Note that @pre_swap is needed since the point where we block for fences moves
+ * around depending upon whether an atomic commit is blocking or
+ * non-blocking. For async commit all waiting needs to happen after
+ * drm_atomic_helper_swap_state() is called, but for synchronous commits we want
+ * to wait **before** we do anything that can't be easily rolled back. That is
+ * before we call drm_atomic_helper_swap_state().
+ *
+ * Returns zero if success or < 0 if dma_fence_wait() fails.
*/
int drm_atomic_helper_wait_for_fences(struct drm_device *dev,
struct drm_atomic_state *state,
@@ -1040,11 +1045,11 @@ int drm_atomic_helper_wait_for_fences(struct drm_device *dev,
* still interrupt the operation. Instead of blocking until the
* timer expires, make the wait interruptible.
*/
- ret = fence_wait(plane_state->fence, pre_swap);
+ ret = dma_fence_wait(plane_state->fence, pre_swap);
if (ret)
return ret;
- fence_put(plane_state->fence);
+ dma_fence_put(plane_state->fence);
plane_state->fence = NULL;
}
@@ -1150,7 +1155,7 @@ EXPORT_SYMBOL(drm_atomic_helper_wait_for_vblanks);
/**
* drm_atomic_helper_commit_tail - commit atomic update to hardware
- * @state: new modeset state to be committed
+ * @old_state: atomic state object with old state structures
*
* This is the default implemenation for the ->atomic_commit_tail() hook of the
* &drm_mode_config_helper_funcs vtable.
@@ -1161,53 +1166,53 @@ EXPORT_SYMBOL(drm_atomic_helper_wait_for_vblanks);
*
* For drivers supporting runtime PM the recommended sequence is instead ::
*
- * drm_atomic_helper_commit_modeset_disables(dev, state);
+ * drm_atomic_helper_commit_modeset_disables(dev, old_state);
*
- * drm_atomic_helper_commit_modeset_enables(dev, state);
+ * drm_atomic_helper_commit_modeset_enables(dev, old_state);
*
- * drm_atomic_helper_commit_planes(dev, state,
+ * drm_atomic_helper_commit_planes(dev, old_state,
* DRM_PLANE_COMMIT_ACTIVE_ONLY);
*
* for committing the atomic update to hardware. See the kerneldoc entries for
* these three functions for more details.
*/
-void drm_atomic_helper_commit_tail(struct drm_atomic_state *state)
+void drm_atomic_helper_commit_tail(struct drm_atomic_state *old_state)
{
- struct drm_device *dev = state->dev;
+ struct drm_device *dev = old_state->dev;
- drm_atomic_helper_commit_modeset_disables(dev, state);
+ drm_atomic_helper_commit_modeset_disables(dev, old_state);
- drm_atomic_helper_commit_planes(dev, state, 0);
+ drm_atomic_helper_commit_planes(dev, old_state, 0);
- drm_atomic_helper_commit_modeset_enables(dev, state);
+ drm_atomic_helper_commit_modeset_enables(dev, old_state);
- drm_atomic_helper_commit_hw_done(state);
+ drm_atomic_helper_commit_hw_done(old_state);
- drm_atomic_helper_wait_for_vblanks(dev, state);
+ drm_atomic_helper_wait_for_vblanks(dev, old_state);
- drm_atomic_helper_cleanup_planes(dev, state);
+ drm_atomic_helper_cleanup_planes(dev, old_state);
}
EXPORT_SYMBOL(drm_atomic_helper_commit_tail);
-static void commit_tail(struct drm_atomic_state *state)
+static void commit_tail(struct drm_atomic_state *old_state)
{
- struct drm_device *dev = state->dev;
+ struct drm_device *dev = old_state->dev;
struct drm_mode_config_helper_funcs *funcs;
funcs = dev->mode_config.helper_private;
- drm_atomic_helper_wait_for_fences(dev, state, false);
+ drm_atomic_helper_wait_for_fences(dev, old_state, false);
- drm_atomic_helper_wait_for_dependencies(state);
+ drm_atomic_helper_wait_for_dependencies(old_state);
if (funcs && funcs->atomic_commit_tail)
- funcs->atomic_commit_tail(state);
+ funcs->atomic_commit_tail(old_state);
else
- drm_atomic_helper_commit_tail(state);
+ drm_atomic_helper_commit_tail(old_state);
- drm_atomic_helper_commit_cleanup_done(state);
+ drm_atomic_helper_commit_cleanup_done(old_state);
- drm_atomic_state_free(state);
+ drm_atomic_state_put(old_state);
}
static void commit_work(struct work_struct *work)
@@ -1289,6 +1294,7 @@ int drm_atomic_helper_commit(struct drm_device *dev,
* make sure work items don't artifically stall on each another.
*/
+ drm_atomic_state_get(state);
if (nonblock)
queue_work(system_unbound_wq, &state->commit_work);
else
@@ -1500,10 +1506,10 @@ static struct drm_crtc_commit *preceeding_commit(struct drm_crtc *crtc)
/**
* drm_atomic_helper_wait_for_dependencies - wait for required preceeding commits
- * @state: new modeset state to be committed
+ * @old_state: atomic state object with old state structures
*
* This function waits for all preceeding commits that touch the same CRTC as
- * @state to both be committed to the hardware (as signalled by
+ * @old_state to both be committed to the hardware (as signalled by
* drm_atomic_helper_commit_hw_done) and executed by the hardware (as signalled
* by calling drm_crtc_vblank_send_event on the event member of
* &drm_crtc_state).
@@ -1511,7 +1517,7 @@ static struct drm_crtc_commit *preceeding_commit(struct drm_crtc *crtc)
* This is part of the atomic helper support for nonblocking commits, see
* drm_atomic_helper_setup_commit() for an overview.
*/
-void drm_atomic_helper_wait_for_dependencies(struct drm_atomic_state *state)
+void drm_atomic_helper_wait_for_dependencies(struct drm_atomic_state *old_state)
{
struct drm_crtc *crtc;
struct drm_crtc_state *crtc_state;
@@ -1519,7 +1525,7 @@ void drm_atomic_helper_wait_for_dependencies(struct drm_atomic_state *state)
int i;
long ret;
- for_each_crtc_in_state(state, crtc, crtc_state, i) {
+ for_each_crtc_in_state(old_state, crtc, crtc_state, i) {
spin_lock(&crtc->commit_lock);
commit = preceeding_commit(crtc);
if (commit)
@@ -1550,7 +1556,7 @@ EXPORT_SYMBOL(drm_atomic_helper_wait_for_dependencies);
/**
* drm_atomic_helper_commit_hw_done - setup possible nonblocking commit
- * @state: new modeset state to be committed
+ * @old_state: atomic state object with old state structures
*
* This function is used to signal completion of the hardware commit step. After
* this step the driver is not allowed to read or change any permanent software
@@ -1563,15 +1569,15 @@ EXPORT_SYMBOL(drm_atomic_helper_wait_for_dependencies);
* This is part of the atomic helper support for nonblocking commits, see
* drm_atomic_helper_setup_commit() for an overview.
*/
-void drm_atomic_helper_commit_hw_done(struct drm_atomic_state *state)
+void drm_atomic_helper_commit_hw_done(struct drm_atomic_state *old_state)
{
struct drm_crtc *crtc;
struct drm_crtc_state *crtc_state;
struct drm_crtc_commit *commit;
int i;
- for_each_crtc_in_state(state, crtc, crtc_state, i) {
- commit = state->crtcs[i].commit;
+ for_each_crtc_in_state(old_state, crtc, crtc_state, i) {
+ commit = old_state->crtcs[i].commit;
if (!commit)
continue;
@@ -1586,16 +1592,16 @@ EXPORT_SYMBOL(drm_atomic_helper_commit_hw_done);
/**
* drm_atomic_helper_commit_cleanup_done - signal completion of commit
- * @state: new modeset state to be committed
+ * @old_state: atomic state object with old state structures
*
- * This signals completion of the atomic update @state, including any cleanup
- * work. If used, it must be called right before calling
- * drm_atomic_state_free().
+ * This signals completion of the atomic update @old_state, including any
+ * cleanup work. If used, it must be called right before calling
+ * drm_atomic_state_put().
*
* This is part of the atomic helper support for nonblocking commits, see
* drm_atomic_helper_setup_commit() for an overview.
*/
-void drm_atomic_helper_commit_cleanup_done(struct drm_atomic_state *state)
+void drm_atomic_helper_commit_cleanup_done(struct drm_atomic_state *old_state)
{
struct drm_crtc *crtc;
struct drm_crtc_state *crtc_state;
@@ -1603,8 +1609,8 @@ void drm_atomic_helper_commit_cleanup_done(struct drm_atomic_state *state)
int i;
long ret;
- for_each_crtc_in_state(state, crtc, crtc_state, i) {
- commit = state->crtcs[i].commit;
+ for_each_crtc_in_state(old_state, crtc, crtc_state, i) {
+ commit = old_state->crtcs[i].commit;
if (WARN_ON(!commit))
continue;
@@ -2113,18 +2119,13 @@ retry:
state->legacy_cursor_update = true;
ret = drm_atomic_commit(state);
- if (ret != 0)
- goto fail;
-
- /* Driver takes ownership of state on successful commit. */
- return 0;
fail:
if (ret == -EDEADLK)
goto backoff;
- drm_atomic_state_free(state);
-
+ drm_atomic_state_put(state);
return ret;
+
backoff:
drm_atomic_state_clear(state);
drm_atomic_legacy_backoff(state);
@@ -2186,18 +2187,13 @@ retry:
goto fail;
ret = drm_atomic_commit(state);
- if (ret != 0)
- goto fail;
-
- /* Driver takes ownership of state on successful commit. */
- return 0;
fail:
if (ret == -EDEADLK)
goto backoff;
- drm_atomic_state_free(state);
-
+ drm_atomic_state_put(state);
return ret;
+
backoff:
drm_atomic_state_clear(state);
drm_atomic_legacy_backoff(state);
@@ -2326,18 +2322,13 @@ retry:
goto fail;
ret = drm_atomic_commit(state);
- if (ret != 0)
- goto fail;
-
- /* Driver takes ownership of state on successful commit. */
- return 0;
fail:
if (ret == -EDEADLK)
goto backoff;
- drm_atomic_state_free(state);
-
+ drm_atomic_state_put(state);
return ret;
+
backoff:
drm_atomic_state_clear(state);
drm_atomic_legacy_backoff(state);
@@ -2412,7 +2403,7 @@ int __drm_atomic_helper_set_config(struct drm_mode_set *set,
primary_state->crtc_h = vdisplay;
primary_state->src_x = set->x << 16;
primary_state->src_y = set->y << 16;
- if (primary_state->rotation & (DRM_ROTATE_90 | DRM_ROTATE_270)) {
+ if (drm_rotation_90_or_270(primary_state->rotation)) {
primary_state->src_w = vdisplay << 16;
primary_state->src_h = hdisplay << 16;
} else {
@@ -2479,11 +2470,8 @@ int drm_atomic_helper_disable_all(struct drm_device *dev,
}
err = drm_atomic_commit(state);
-
free:
- if (err < 0)
- drm_atomic_state_free(state);
-
+ drm_atomic_state_put(state);
return err;
}
EXPORT_SYMBOL(drm_atomic_helper_disable_all);
@@ -2534,7 +2522,7 @@ retry:
err = drm_atomic_helper_disable_all(dev, &ctx);
if (err < 0) {
- drm_atomic_state_free(state);
+ drm_atomic_state_put(state);
state = ERR_PTR(err);
goto unlock;
}
@@ -2623,18 +2611,13 @@ retry:
goto fail;
ret = drm_atomic_commit(state);
- if (ret != 0)
- goto fail;
-
- /* Driver takes ownership of state on successful commit. */
- return 0;
fail:
if (ret == -EDEADLK)
goto backoff;
- drm_atomic_state_free(state);
-
+ drm_atomic_state_put(state);
return ret;
+
backoff:
drm_atomic_state_clear(state);
drm_atomic_legacy_backoff(state);
@@ -2683,18 +2666,13 @@ retry:
goto fail;
ret = drm_atomic_commit(state);
- if (ret != 0)
- goto fail;
-
- /* Driver takes ownership of state on successful commit. */
- return 0;
fail:
if (ret == -EDEADLK)
goto backoff;
- drm_atomic_state_free(state);
-
+ drm_atomic_state_put(state);
return ret;
+
backoff:
drm_atomic_state_clear(state);
drm_atomic_legacy_backoff(state);
@@ -2743,18 +2721,13 @@ retry:
goto fail;
ret = drm_atomic_commit(state);
- if (ret != 0)
- goto fail;
-
- /* Driver takes ownership of state on successful commit. */
- return 0;
fail:
if (ret == -EDEADLK)
goto backoff;
- drm_atomic_state_free(state);
-
+ drm_atomic_state_put(state);
return ret;
+
backoff:
drm_atomic_state_clear(state);
drm_atomic_legacy_backoff(state);
@@ -2827,18 +2800,13 @@ retry:
}
ret = drm_atomic_nonblocking_commit(state);
- if (ret != 0)
- goto fail;
-
- /* Driver takes ownership of state on successful commit. */
- return 0;
fail:
if (ret == -EDEADLK)
goto backoff;
- drm_atomic_state_free(state);
-
+ drm_atomic_state_put(state);
return ret;
+
backoff:
drm_atomic_state_clear(state);
drm_atomic_legacy_backoff(state);
@@ -2914,19 +2882,14 @@ retry:
crtc_state->active = active;
ret = drm_atomic_commit(state);
- if (ret != 0)
- goto fail;
-
- /* Driver takes ownership of state on successful commit. */
- return 0;
fail:
if (ret == -EDEADLK)
goto backoff;
connector->dpms = old_mode;
- drm_atomic_state_free(state);
-
+ drm_atomic_state_put(state);
return ret;
+
backoff:
drm_atomic_state_clear(state);
drm_atomic_legacy_backoff(state);
@@ -3117,6 +3080,8 @@ void __drm_atomic_helper_plane_duplicate_state(struct drm_plane *plane,
if (state->fb)
drm_framebuffer_reference(state->fb);
+
+ state->fence = NULL;
}
EXPORT_SYMBOL(__drm_atomic_helper_plane_duplicate_state);
@@ -3155,6 +3120,9 @@ void __drm_atomic_helper_plane_destroy_state(struct drm_plane_state *state)
{
if (state->fb)
drm_framebuffer_unreference(state->fb);
+
+ if (state->fence)
+ dma_fence_put(state->fence);
}
EXPORT_SYMBOL(__drm_atomic_helper_plane_destroy_state);
@@ -3333,7 +3301,7 @@ drm_atomic_helper_duplicate_state(struct drm_device *dev,
free:
if (err < 0) {
- drm_atomic_state_free(state);
+ drm_atomic_state_put(state);
state = ERR_PTR(err);
}
@@ -3448,22 +3416,14 @@ retry:
goto fail;
ret = drm_atomic_commit(state);
- if (ret)
- goto fail;
-
- /* Driver takes ownership of state on successful commit. */
-
- drm_property_unreference_blob(blob);
-
- return 0;
fail:
if (ret == -EDEADLK)
goto backoff;
- drm_atomic_state_free(state);
+ drm_atomic_state_put(state);
drm_property_unreference_blob(blob);
-
return ret;
+
backoff:
drm_atomic_state_clear(state);
drm_atomic_legacy_backoff(state);
diff --git a/drivers/gpu/drm/drm_blend.c b/drivers/gpu/drm/drm_blend.c
index 85172a977bf3..1f2412c7ccfd 100644
--- a/drivers/gpu/drm/drm_blend.c
+++ b/drivers/gpu/drm/drm_blend.c
@@ -89,7 +89,7 @@
* On top of this basic transformation additional properties can be exposed by
* the driver:
*
- * - Rotation is set up with drm_mode_create_rotation_property(). It adds a
+ * - Rotation is set up with drm_plane_create_rotation_property(). It adds a
* rotation and reflection step between the source and destination rectangles.
* Without this property the rectangle is only scaled, but not rotated or
* reflected.
@@ -105,18 +105,12 @@
*/
/**
- * drm_mode_create_rotation_property - create a new rotation property
- * @dev: DRM device
+ * drm_plane_create_rotation_property - create a new rotation property
+ * @plane: drm plane
+ * @rotation: initial value of the rotation property
* @supported_rotations: bitmask of supported rotations and reflections
*
* This creates a new property with the selected support for transformations.
- * The resulting property should be stored in @rotation_property in
- * &drm_mode_config. It then must be attached to each plane which supports
- * rotations using drm_object_attach_property().
- *
- * FIXME: Probably better if the rotation property is created on each plane,
- * like the zpos property. Otherwise it's not possible to allow different
- * rotation modes on different planes.
*
* Since a rotation by 180° degress is the same as reflecting both along the x
* and the y axis the rotation property is somewhat redundant. Drivers can use
@@ -144,8 +138,9 @@
* rotation. After reflection, the rotation is applied to the image sampled from
* the source rectangle, before scaling it to fit the destination rectangle.
*/
-struct drm_property *drm_mode_create_rotation_property(struct drm_device *dev,
- unsigned int supported_rotations)
+int drm_plane_create_rotation_property(struct drm_plane *plane,
+ unsigned int rotation,
+ unsigned int supported_rotations)
{
static const struct drm_prop_enum_list props[] = {
{ __builtin_ffs(DRM_ROTATE_0) - 1, "rotate-0" },
@@ -155,12 +150,28 @@ struct drm_property *drm_mode_create_rotation_property(struct drm_device *dev,
{ __builtin_ffs(DRM_REFLECT_X) - 1, "reflect-x" },
{ __builtin_ffs(DRM_REFLECT_Y) - 1, "reflect-y" },
};
+ struct drm_property *prop;
+
+ WARN_ON((supported_rotations & DRM_ROTATE_MASK) == 0);
+ WARN_ON(!is_power_of_2(rotation & DRM_ROTATE_MASK));
+ WARN_ON(rotation & ~supported_rotations);
- return drm_property_create_bitmask(dev, 0, "rotation",
+ prop = drm_property_create_bitmask(plane->dev, 0, "rotation",
props, ARRAY_SIZE(props),
supported_rotations);
+ if (!prop)
+ return -ENOMEM;
+
+ drm_object_attach_property(&plane->base, prop, rotation);
+
+ if (plane->state)
+ plane->state->rotation = rotation;
+
+ plane->rotation_property = prop;
+
+ return 0;
}
-EXPORT_SYMBOL(drm_mode_create_rotation_property);
+EXPORT_SYMBOL(drm_plane_create_rotation_property);
/**
* drm_rotation_simplify() - Try to simplify the rotation
diff --git a/drivers/gpu/drm/drm_color_mgmt.c b/drivers/gpu/drm/drm_color_mgmt.c
index d28ffdd2b929..6543ebde501a 100644
--- a/drivers/gpu/drm/drm_color_mgmt.c
+++ b/drivers/gpu/drm/drm_color_mgmt.c
@@ -41,6 +41,10 @@
* nor use all the elements of the LUT (for example the hardware might
* choose to interpolate between LUT[0] and LUT[4]).
*
+ * Setting this to NULL (blob property value set to 0) means a
+ * linear/pass-thru gamma table should be used. This is generally the
+ * driver boot-up state too.
+ *
* “DEGAMMA_LUT_SIZE”:
* Unsinged range property to give the size of the lookup table to be set
* on the DEGAMMA_LUT property (the size depends on the underlying
@@ -54,6 +58,10 @@
* lookup through the gamma LUT. The data is interpreted as a struct
* &drm_color_ctm.
*
+ * Setting this to NULL (blob property value set to 0) means a
+ * unit/pass-thru matrix should be used. This is generally the driver
+ * boot-up state too.
+ *
* “GAMMA_LUT”:
* Blob property to set the gamma lookup table (LUT) mapping pixel data
* after the transformation matrix to data sent to the connector. The
@@ -62,6 +70,10 @@
* nor use all the elements of the LUT (for example the hardware might
* choose to interpolate between LUT[0] and LUT[4]).
*
+ * Setting this to NULL (blob property value set to 0) means a
+ * linear/pass-thru gamma table should be used. This is generally the
+ * driver boot-up state too.
+ *
* “GAMMA_LUT_SIZE”:
* Unsigned range property to give the size of the lookup table to be set
* on the GAMMA_LUT property (the size depends on the underlying hardware).
diff --git a/drivers/gpu/drm/drm_connector.c b/drivers/gpu/drm/drm_connector.c
index 2db7fb510b6c..5a4526289392 100644
--- a/drivers/gpu/drm/drm_connector.c
+++ b/drivers/gpu/drm/drm_connector.c
@@ -588,6 +588,50 @@ static const struct drm_prop_enum_list drm_tv_subconnector_enum_list[] = {
DRM_ENUM_NAME_FN(drm_get_tv_subconnector_name,
drm_tv_subconnector_enum_list)
+/**
+ * DOC: standard connector properties
+ *
+ * DRM connectors have a few standardized properties:
+ *
+ * EDID:
+ * Blob property which contains the current EDID read from the sink. This
+ * is useful to parse sink identification information like vendor, model
+ * and serial. Drivers should update this property by calling
+ * drm_mode_connector_update_edid_property(), usually after having parsed
+ * the EDID using drm_add_edid_modes(). Userspace cannot change this
+ * property.
+ * DPMS:
+ * Legacy property for setting the power state of the connector. For atomic
+ * drivers this is only provided for backwards compatibility with existing
+ * drivers, it remaps to controlling the "ACTIVE" property on the CRTC the
+ * connector is linked to. Drivers should never set this property directly,
+ * it is handled by the DRM core by calling the ->dpms() callback in
+ * &drm_connector_funcs. Atomic drivers should implement this hook using
+ * drm_atomic_helper_connector_dpms(). This is the only property standard
+ * connector property that userspace can change.
+ * PATH:
+ * Connector path property to identify how this sink is physically
+ * connected. Used by DP MST. This should be set by calling
+ * drm_mode_connector_set_path_property(), in the case of DP MST with the
+ * path property the MST manager created. Userspace cannot change this
+ * property.
+ * TILE:
+ * Connector tile group property to indicate how a set of DRM connector
+ * compose together into one logical screen. This is used by both high-res
+ * external screens (often only using a single cable, but exposing multiple
+ * DP MST sinks), or high-res integrated panels (like dual-link DSI) which
+ * are not gen-locked. Note that for tiled panels which are genlocked, like
+ * dual-link LVDS or dual-link DSI, the driver should try to not expose the
+ * tiling and virtualize both &drm_crtc and &drm_plane if needed. Drivers
+ * should update this value using drm_mode_connector_set_tile_property().
+ * Userspace cannot change this property.
+ *
+ * Connectors also have one standardized atomic property:
+ *
+ * CRTC_ID:
+ * Mode object ID of the &drm_crtc this connector should be connected to.
+ */
+
int drm_connector_create_standard_properties(struct drm_device *dev)
{
struct drm_property *prop;
@@ -1121,3 +1165,107 @@ out_unlock:
return ret;
}
+
+/**
+ * DOC: Tile group
+ *
+ * Tile groups are used to represent tiled monitors with a unique integer
+ * identifier. Tiled monitors using DisplayID v1.3 have a unique 8-byte handle,
+ * we store this in a tile group, so we have a common identifier for all tiles
+ * in a monitor group. The property is called "TILE". Drivers can manage tile
+ * groups using drm_mode_create_tile_group(), drm_mode_put_tile_group() and
+ * drm_mode_get_tile_group(). But this is only needed for internal panels where
+ * the tile group information is exposed through a non-standard way.
+ */
+
+static void drm_tile_group_free(struct kref *kref)
+{
+ struct drm_tile_group *tg = container_of(kref, struct drm_tile_group, refcount);
+ struct drm_device *dev = tg->dev;
+ mutex_lock(&dev->mode_config.idr_mutex);
+ idr_remove(&dev->mode_config.tile_idr, tg->id);
+ mutex_unlock(&dev->mode_config.idr_mutex);
+ kfree(tg);
+}
+
+/**
+ * drm_mode_put_tile_group - drop a reference to a tile group.
+ * @dev: DRM device
+ * @tg: tile group to drop reference to.
+ *
+ * drop reference to tile group and free if 0.
+ */
+void drm_mode_put_tile_group(struct drm_device *dev,
+ struct drm_tile_group *tg)
+{
+ kref_put(&tg->refcount, drm_tile_group_free);
+}
+EXPORT_SYMBOL(drm_mode_put_tile_group);
+
+/**
+ * drm_mode_get_tile_group - get a reference to an existing tile group
+ * @dev: DRM device
+ * @topology: 8-bytes unique per monitor.
+ *
+ * Use the unique bytes to get a reference to an existing tile group.
+ *
+ * RETURNS:
+ * tile group or NULL if not found.
+ */
+struct drm_tile_group *drm_mode_get_tile_group(struct drm_device *dev,
+ char topology[8])
+{
+ struct drm_tile_group *tg;
+ int id;
+ mutex_lock(&dev->mode_config.idr_mutex);
+ idr_for_each_entry(&dev->mode_config.tile_idr, tg, id) {
+ if (!memcmp(tg->group_data, topology, 8)) {
+ if (!kref_get_unless_zero(&tg->refcount))
+ tg = NULL;
+ mutex_unlock(&dev->mode_config.idr_mutex);
+ return tg;
+ }
+ }
+ mutex_unlock(&dev->mode_config.idr_mutex);
+ return NULL;
+}
+EXPORT_SYMBOL(drm_mode_get_tile_group);
+
+/**
+ * drm_mode_create_tile_group - create a tile group from a displayid description
+ * @dev: DRM device
+ * @topology: 8-bytes unique per monitor.
+ *
+ * Create a tile group for the unique monitor, and get a unique
+ * identifier for the tile group.
+ *
+ * RETURNS:
+ * new tile group or error.
+ */
+struct drm_tile_group *drm_mode_create_tile_group(struct drm_device *dev,
+ char topology[8])
+{
+ struct drm_tile_group *tg;
+ int ret;
+
+ tg = kzalloc(sizeof(*tg), GFP_KERNEL);
+ if (!tg)
+ return ERR_PTR(-ENOMEM);
+
+ kref_init(&tg->refcount);
+ memcpy(tg->group_data, topology, 8);
+ tg->dev = dev;
+
+ mutex_lock(&dev->mode_config.idr_mutex);
+ ret = idr_alloc(&dev->mode_config.tile_idr, tg, 1, 0, GFP_KERNEL);
+ if (ret >= 0) {
+ tg->id = ret;
+ } else {
+ kfree(tg);
+ tg = ERR_PTR(ret);
+ }
+
+ mutex_unlock(&dev->mode_config.idr_mutex);
+ return tg;
+}
+EXPORT_SYMBOL(drm_mode_create_tile_group);
diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
index 2d7bedf28647..90931e039731 100644
--- a/drivers/gpu/drm/drm_crtc.c
+++ b/drivers/gpu/drm/drm_crtc.c
@@ -33,6 +33,7 @@
#include <linux/list.h>
#include <linux/slab.h>
#include <linux/export.h>
+#include <linux/dma-fence.h>
#include <drm/drmP.h>
#include <drm/drm_crtc.h>
#include <drm/drm_edid.h>
@@ -40,23 +41,11 @@
#include <drm/drm_modeset_lock.h>
#include <drm/drm_atomic.h>
#include <drm/drm_auth.h>
-#include <drm/drm_framebuffer.h>
+#include <drm/drm_debugfs_crc.h>
#include "drm_crtc_internal.h"
#include "drm_internal.h"
-/*
- * Global properties
- */
-static const struct drm_prop_enum_list drm_plane_type_enum_list[] = {
- { DRM_PLANE_TYPE_OVERLAY, "Overlay" },
- { DRM_PLANE_TYPE_PRIMARY, "Primary" },
- { DRM_PLANE_TYPE_CURSOR, "Cursor" },
-};
-
-/*
- * Optional properties
- */
/**
* drm_crtc_force_disable - Forcibly turn off a CRTC
* @crtc: CRTC to turn off
@@ -102,8 +91,6 @@ out:
}
EXPORT_SYMBOL(drm_crtc_force_disable_all);
-DEFINE_WW_CLASS(crtc_ww_class);
-
static unsigned int drm_num_crtcs(struct drm_device *dev)
{
unsigned int num = 0;
@@ -116,12 +103,16 @@ static unsigned int drm_num_crtcs(struct drm_device *dev)
return num;
}
-static int drm_crtc_register_all(struct drm_device *dev)
+int drm_crtc_register_all(struct drm_device *dev)
{
struct drm_crtc *crtc;
int ret = 0;
drm_for_each_crtc(crtc, dev) {
+ if (drm_debugfs_crtc_add(crtc))
+ DRM_ERROR("Failed to initialize debugfs entry for CRTC '%s'.\n",
+ crtc->name);
+
if (crtc->funcs->late_register)
ret = crtc->funcs->late_register(crtc);
if (ret)
@@ -131,16 +122,68 @@ static int drm_crtc_register_all(struct drm_device *dev)
return 0;
}
-static void drm_crtc_unregister_all(struct drm_device *dev)
+void drm_crtc_unregister_all(struct drm_device *dev)
{
struct drm_crtc *crtc;
drm_for_each_crtc(crtc, dev) {
if (crtc->funcs->early_unregister)
crtc->funcs->early_unregister(crtc);
+ drm_debugfs_crtc_remove(crtc);
}
}
+static int drm_crtc_crc_init(struct drm_crtc *crtc)
+{
+#ifdef CONFIG_DEBUG_FS
+ spin_lock_init(&crtc->crc.lock);
+ init_waitqueue_head(&crtc->crc.wq);
+ crtc->crc.source = kstrdup("auto", GFP_KERNEL);
+ if (!crtc->crc.source)
+ return -ENOMEM;
+#endif
+ return 0;
+}
+
+static void drm_crtc_crc_fini(struct drm_crtc *crtc)
+{
+#ifdef CONFIG_DEBUG_FS
+ kfree(crtc->crc.source);
+#endif
+}
+
+static struct drm_crtc *fence_to_crtc(struct dma_fence *fence)
+{
+ BUG_ON(fence->ops != &drm_crtc_fence_ops);
+ return container_of(fence->lock, struct drm_crtc, fence_lock);
+}
+
+static const char *drm_crtc_fence_get_driver_name(struct dma_fence *fence)
+{
+ struct drm_crtc *crtc = fence_to_crtc(fence);
+
+ return crtc->dev->driver->name;
+}
+
+static const char *drm_crtc_fence_get_timeline_name(struct dma_fence *fence)
+{
+ struct drm_crtc *crtc = fence_to_crtc(fence);
+
+ return crtc->timeline_name;
+}
+
+static bool drm_crtc_fence_enable_signaling(struct dma_fence *fence)
+{
+ return true;
+}
+
+const struct dma_fence_ops drm_crtc_fence_ops = {
+ .get_driver_name = drm_crtc_fence_get_driver_name,
+ .get_timeline_name = drm_crtc_fence_get_timeline_name,
+ .enable_signaling = drm_crtc_fence_enable_signaling,
+ .wait = dma_fence_default_wait,
+};
+
/**
* drm_crtc_init_with_planes - Initialise a new CRTC object with
* specified primary and cursor planes.
@@ -198,6 +241,11 @@ int drm_crtc_init_with_planes(struct drm_device *dev, struct drm_crtc *crtc,
return -ENOMEM;
}
+ crtc->fence_context = dma_fence_context_alloc(1);
+ spin_lock_init(&crtc->fence_lock);
+ snprintf(crtc->timeline_name, sizeof(crtc->timeline_name),
+ "CRTC:%d-%s", crtc->base.id, crtc->name);
+
crtc->base.properties = &crtc->properties;
list_add_tail(&crtc->head, &config->crtc_list);
@@ -205,14 +253,22 @@ int drm_crtc_init_with_planes(struct drm_device *dev, struct drm_crtc *crtc,
crtc->primary = primary;
crtc->cursor = cursor;
- if (primary)
+ if (primary && !primary->possible_crtcs)
primary->possible_crtcs = 1 << drm_crtc_index(crtc);
- if (cursor)
+ if (cursor && !cursor->possible_crtcs)
cursor->possible_crtcs = 1 << drm_crtc_index(crtc);
+ ret = drm_crtc_crc_init(crtc);
+ if (ret) {
+ drm_mode_object_unregister(dev, &crtc->base);
+ return ret;
+ }
+
if (drm_core_check_feature(dev, DRIVER_ATOMIC)) {
drm_object_attach_property(&crtc->base, config->prop_active, 0);
drm_object_attach_property(&crtc->base, config->prop_mode_id, 0);
+ drm_object_attach_property(&crtc->base,
+ config->prop_out_fence_ptr, 0);
}
return 0;
@@ -236,6 +292,8 @@ void drm_crtc_cleanup(struct drm_crtc *crtc)
* the indices on the drm_crtc after us in the crtc_list.
*/
+ drm_crtc_crc_fini(crtc);
+
kfree(crtc->gamma_store);
crtc->gamma_store = NULL;
@@ -255,301 +313,6 @@ void drm_crtc_cleanup(struct drm_crtc *crtc)
}
EXPORT_SYMBOL(drm_crtc_cleanup);
-int drm_modeset_register_all(struct drm_device *dev)
-{
- int ret;
-
- ret = drm_plane_register_all(dev);
- if (ret)
- goto err_plane;
-
- ret = drm_crtc_register_all(dev);
- if (ret)
- goto err_crtc;
-
- ret = drm_encoder_register_all(dev);
- if (ret)
- goto err_encoder;
-
- ret = drm_connector_register_all(dev);
- if (ret)
- goto err_connector;
-
- return 0;
-
-err_connector:
- drm_encoder_unregister_all(dev);
-err_encoder:
- drm_crtc_unregister_all(dev);
-err_crtc:
- drm_plane_unregister_all(dev);
-err_plane:
- return ret;
-}
-
-void drm_modeset_unregister_all(struct drm_device *dev)
-{
- drm_connector_unregister_all(dev);
- drm_encoder_unregister_all(dev);
- drm_crtc_unregister_all(dev);
- drm_plane_unregister_all(dev);
-}
-
-static int drm_mode_create_standard_properties(struct drm_device *dev)
-{
- struct drm_property *prop;
- int ret;
-
- ret = drm_connector_create_standard_properties(dev);
- if (ret)
- return ret;
-
- prop = drm_property_create_enum(dev, DRM_MODE_PROP_IMMUTABLE,
- "type", drm_plane_type_enum_list,
- ARRAY_SIZE(drm_plane_type_enum_list));
- if (!prop)
- return -ENOMEM;
- dev->mode_config.plane_type_property = prop;
-
- prop = drm_property_create_range(dev, DRM_MODE_PROP_ATOMIC,
- "SRC_X", 0, UINT_MAX);
- if (!prop)
- return -ENOMEM;
- dev->mode_config.prop_src_x = prop;
-
- prop = drm_property_create_range(dev, DRM_MODE_PROP_ATOMIC,
- "SRC_Y", 0, UINT_MAX);
- if (!prop)
- return -ENOMEM;
- dev->mode_config.prop_src_y = prop;
-
- prop = drm_property_create_range(dev, DRM_MODE_PROP_ATOMIC,
- "SRC_W", 0, UINT_MAX);
- if (!prop)
- return -ENOMEM;
- dev->mode_config.prop_src_w = prop;
-
- prop = drm_property_create_range(dev, DRM_MODE_PROP_ATOMIC,
- "SRC_H", 0, UINT_MAX);
- if (!prop)
- return -ENOMEM;
- dev->mode_config.prop_src_h = prop;
-
- prop = drm_property_create_signed_range(dev, DRM_MODE_PROP_ATOMIC,
- "CRTC_X", INT_MIN, INT_MAX);
- if (!prop)
- return -ENOMEM;
- dev->mode_config.prop_crtc_x = prop;
-
- prop = drm_property_create_signed_range(dev, DRM_MODE_PROP_ATOMIC,
- "CRTC_Y", INT_MIN, INT_MAX);
- if (!prop)
- return -ENOMEM;
- dev->mode_config.prop_crtc_y = prop;
-
- prop = drm_property_create_range(dev, DRM_MODE_PROP_ATOMIC,
- "CRTC_W", 0, INT_MAX);
- if (!prop)
- return -ENOMEM;
- dev->mode_config.prop_crtc_w = prop;
-
- prop = drm_property_create_range(dev, DRM_MODE_PROP_ATOMIC,
- "CRTC_H", 0, INT_MAX);
- if (!prop)
- return -ENOMEM;
- dev->mode_config.prop_crtc_h = prop;
-
- prop = drm_property_create_object(dev, DRM_MODE_PROP_ATOMIC,
- "FB_ID", DRM_MODE_OBJECT_FB);
- if (!prop)
- return -ENOMEM;
- dev->mode_config.prop_fb_id = prop;
-
- prop = drm_property_create_object(dev, DRM_MODE_PROP_ATOMIC,
- "CRTC_ID", DRM_MODE_OBJECT_CRTC);
- if (!prop)
- return -ENOMEM;
- dev->mode_config.prop_crtc_id = prop;
-
- prop = drm_property_create_bool(dev, DRM_MODE_PROP_ATOMIC,
- "ACTIVE");
- if (!prop)
- return -ENOMEM;
- dev->mode_config.prop_active = prop;
-
- prop = drm_property_create(dev,
- DRM_MODE_PROP_ATOMIC | DRM_MODE_PROP_BLOB,
- "MODE_ID", 0);
- if (!prop)
- return -ENOMEM;
- dev->mode_config.prop_mode_id = prop;
-
- prop = drm_property_create(dev,
- DRM_MODE_PROP_BLOB,
- "DEGAMMA_LUT", 0);
- if (!prop)
- return -ENOMEM;
- dev->mode_config.degamma_lut_property = prop;
-
- prop = drm_property_create_range(dev,
- DRM_MODE_PROP_IMMUTABLE,
- "DEGAMMA_LUT_SIZE", 0, UINT_MAX);
- if (!prop)
- return -ENOMEM;
- dev->mode_config.degamma_lut_size_property = prop;
-
- prop = drm_property_create(dev,
- DRM_MODE_PROP_BLOB,
- "CTM", 0);
- if (!prop)
- return -ENOMEM;
- dev->mode_config.ctm_property = prop;
-
- prop = drm_property_create(dev,
- DRM_MODE_PROP_BLOB,
- "GAMMA_LUT", 0);
- if (!prop)
- return -ENOMEM;
- dev->mode_config.gamma_lut_property = prop;
-
- prop = drm_property_create_range(dev,
- DRM_MODE_PROP_IMMUTABLE,
- "GAMMA_LUT_SIZE", 0, UINT_MAX);
- if (!prop)
- return -ENOMEM;
- dev->mode_config.gamma_lut_size_property = prop;
-
- return 0;
-}
-
-/**
- * drm_mode_getresources - get graphics configuration
- * @dev: drm device for the ioctl
- * @data: data pointer for the ioctl
- * @file_priv: drm file for the ioctl call
- *
- * Construct a set of configuration description structures and return
- * them to the user, including CRTC, connector and framebuffer configuration.
- *
- * Called by the user via ioctl.
- *
- * Returns:
- * Zero on success, negative errno on failure.
- */
-int drm_mode_getresources(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct drm_mode_card_res *card_res = data;
- struct list_head *lh;
- struct drm_framebuffer *fb;
- struct drm_connector *connector;
- struct drm_crtc *crtc;
- struct drm_encoder *encoder;
- int ret = 0;
- int connector_count = 0;
- int crtc_count = 0;
- int fb_count = 0;
- int encoder_count = 0;
- int copied = 0;
- uint32_t __user *fb_id;
- uint32_t __user *crtc_id;
- uint32_t __user *connector_id;
- uint32_t __user *encoder_id;
-
- if (!drm_core_check_feature(dev, DRIVER_MODESET))
- return -EINVAL;
-
-
- mutex_lock(&file_priv->fbs_lock);
- /*
- * For the non-control nodes we need to limit the list of resources
- * by IDs in the group list for this node
- */
- list_for_each(lh, &file_priv->fbs)
- fb_count++;
-
- /* handle this in 4 parts */
- /* FBs */
- if (card_res->count_fbs >= fb_count) {
- copied = 0;
- fb_id = (uint32_t __user *)(unsigned long)card_res->fb_id_ptr;
- list_for_each_entry(fb, &file_priv->fbs, filp_head) {
- if (put_user(fb->base.id, fb_id + copied)) {
- mutex_unlock(&file_priv->fbs_lock);
- return -EFAULT;
- }
- copied++;
- }
- }
- card_res->count_fbs = fb_count;
- mutex_unlock(&file_priv->fbs_lock);
-
- /* mode_config.mutex protects the connector list against e.g. DP MST
- * connector hot-adding. CRTC/Plane lists are invariant. */
- mutex_lock(&dev->mode_config.mutex);
- drm_for_each_crtc(crtc, dev)
- crtc_count++;
-
- drm_for_each_connector(connector, dev)
- connector_count++;
-
- drm_for_each_encoder(encoder, dev)
- encoder_count++;
-
- card_res->max_height = dev->mode_config.max_height;
- card_res->min_height = dev->mode_config.min_height;
- card_res->max_width = dev->mode_config.max_width;
- card_res->min_width = dev->mode_config.min_width;
-
- /* CRTCs */
- if (card_res->count_crtcs >= crtc_count) {
- copied = 0;
- crtc_id = (uint32_t __user *)(unsigned long)card_res->crtc_id_ptr;
- drm_for_each_crtc(crtc, dev) {
- if (put_user(crtc->base.id, crtc_id + copied)) {
- ret = -EFAULT;
- goto out;
- }
- copied++;
- }
- }
- card_res->count_crtcs = crtc_count;
-
- /* Encoders */
- if (card_res->count_encoders >= encoder_count) {
- copied = 0;
- encoder_id = (uint32_t __user *)(unsigned long)card_res->encoder_id_ptr;
- drm_for_each_encoder(encoder, dev) {
- if (put_user(encoder->base.id, encoder_id +
- copied)) {
- ret = -EFAULT;
- goto out;
- }
- copied++;
- }
- }
- card_res->count_encoders = encoder_count;
-
- /* Connectors */
- if (card_res->count_connectors >= connector_count) {
- copied = 0;
- connector_id = (uint32_t __user *)(unsigned long)card_res->connector_id_ptr;
- drm_for_each_connector(connector, dev) {
- if (put_user(connector->base.id,
- connector_id + copied)) {
- ret = -EFAULT;
- goto out;
- }
- copied++;
- }
- }
- card_res->count_connectors = connector_count;
-
-out:
- mutex_unlock(&dev->mode_config.mutex);
- return ret;
-}
-
/**
* drm_mode_getcrtc - get CRTC configuration
* @dev: drm device for the ioctl
@@ -695,8 +458,7 @@ int drm_crtc_check_viewport(const struct drm_crtc *crtc,
drm_crtc_get_hv_timing(mode, &hdisplay, &vdisplay);
if (crtc->state &&
- crtc->primary->state->rotation & (DRM_ROTATE_90 |
- DRM_ROTATE_270))
+ drm_rotation_90_or_270(crtc->primary->state->rotation))
swap(hdisplay, vdisplay);
return drm_framebuffer_check_src_coords(x << 16, y << 16,
@@ -796,9 +558,10 @@ int drm_mode_setcrtc(struct drm_device *dev, void *data,
ret = drm_plane_check_pixel_format(crtc->primary,
fb->pixel_format);
if (ret) {
- char *format_name = drm_get_format_name(fb->pixel_format);
- DRM_DEBUG_KMS("Invalid pixel format %s\n", format_name);
- kfree(format_name);
+ struct drm_format_name_buf format_name;
+ DRM_DEBUG_KMS("Invalid pixel format %s\n",
+ drm_get_format_name(fb->pixel_format,
+ &format_name));
goto out;
}
}
@@ -902,362 +665,3 @@ int drm_mode_crtc_set_obj_prop(struct drm_mode_object *obj,
return ret;
}
-
-/**
- * drm_mode_config_reset - call ->reset callbacks
- * @dev: drm device
- *
- * This functions calls all the crtc's, encoder's and connector's ->reset
- * callback. Drivers can use this in e.g. their driver load or resume code to
- * reset hardware and software state.
- */
-void drm_mode_config_reset(struct drm_device *dev)
-{
- struct drm_crtc *crtc;
- struct drm_plane *plane;
- struct drm_encoder *encoder;
- struct drm_connector *connector;
-
- drm_for_each_plane(plane, dev)
- if (plane->funcs->reset)
- plane->funcs->reset(plane);
-
- drm_for_each_crtc(crtc, dev)
- if (crtc->funcs->reset)
- crtc->funcs->reset(crtc);
-
- drm_for_each_encoder(encoder, dev)
- if (encoder->funcs->reset)
- encoder->funcs->reset(encoder);
-
- mutex_lock(&dev->mode_config.mutex);
- drm_for_each_connector(connector, dev)
- if (connector->funcs->reset)
- connector->funcs->reset(connector);
- mutex_unlock(&dev->mode_config.mutex);
-}
-EXPORT_SYMBOL(drm_mode_config_reset);
-
-/**
- * drm_mode_create_dumb_ioctl - create a dumb backing storage buffer
- * @dev: DRM device
- * @data: ioctl data
- * @file_priv: DRM file info
- *
- * This creates a new dumb buffer in the driver's backing storage manager (GEM,
- * TTM or something else entirely) and returns the resulting buffer handle. This
- * handle can then be wrapped up into a framebuffer modeset object.
- *
- * Note that userspace is not allowed to use such objects for render
- * acceleration - drivers must create their own private ioctls for such a use
- * case.
- *
- * Called by the user via ioctl.
- *
- * Returns:
- * Zero on success, negative errno on failure.
- */
-int drm_mode_create_dumb_ioctl(struct drm_device *dev,
- void *data, struct drm_file *file_priv)
-{
- struct drm_mode_create_dumb *args = data;
- u32 cpp, stride, size;
-
- if (!dev->driver->dumb_create)
- return -ENOSYS;
- if (!args->width || !args->height || !args->bpp)
- return -EINVAL;
-
- /* overflow checks for 32bit size calculations */
- /* NOTE: DIV_ROUND_UP() can overflow */
- cpp = DIV_ROUND_UP(args->bpp, 8);
- if (!cpp || cpp > 0xffffffffU / args->width)
- return -EINVAL;
- stride = cpp * args->width;
- if (args->height > 0xffffffffU / stride)
- return -EINVAL;
-
- /* test for wrap-around */
- size = args->height * stride;
- if (PAGE_ALIGN(size) == 0)
- return -EINVAL;
-
- /*
- * handle, pitch and size are output parameters. Zero them out to
- * prevent drivers from accidentally using uninitialized data. Since
- * not all existing userspace is clearing these fields properly we
- * cannot reject IOCTL with garbage in them.
- */
- args->handle = 0;
- args->pitch = 0;
- args->size = 0;
-
- return dev->driver->dumb_create(file_priv, dev, args);
-}
-
-/**
- * drm_mode_mmap_dumb_ioctl - create an mmap offset for a dumb backing storage buffer
- * @dev: DRM device
- * @data: ioctl data
- * @file_priv: DRM file info
- *
- * Allocate an offset in the drm device node's address space to be able to
- * memory map a dumb buffer.
- *
- * Called by the user via ioctl.
- *
- * Returns:
- * Zero on success, negative errno on failure.
- */
-int drm_mode_mmap_dumb_ioctl(struct drm_device *dev,
- void *data, struct drm_file *file_priv)
-{
- struct drm_mode_map_dumb *args = data;
-
- /* call driver ioctl to get mmap offset */
- if (!dev->driver->dumb_map_offset)
- return -ENOSYS;
-
- return dev->driver->dumb_map_offset(file_priv, dev, args->handle, &args->offset);
-}
-
-/**
- * drm_mode_destroy_dumb_ioctl - destroy a dumb backing strage buffer
- * @dev: DRM device
- * @data: ioctl data
- * @file_priv: DRM file info
- *
- * This destroys the userspace handle for the given dumb backing storage buffer.
- * Since buffer objects must be reference counted in the kernel a buffer object
- * won't be immediately freed if a framebuffer modeset object still uses it.
- *
- * Called by the user via ioctl.
- *
- * Returns:
- * Zero on success, negative errno on failure.
- */
-int drm_mode_destroy_dumb_ioctl(struct drm_device *dev,
- void *data, struct drm_file *file_priv)
-{
- struct drm_mode_destroy_dumb *args = data;
-
- if (!dev->driver->dumb_destroy)
- return -ENOSYS;
-
- return dev->driver->dumb_destroy(file_priv, dev, args->handle);
-}
-
-/**
- * drm_mode_config_init - initialize DRM mode_configuration structure
- * @dev: DRM device
- *
- * Initialize @dev's mode_config structure, used for tracking the graphics
- * configuration of @dev.
- *
- * Since this initializes the modeset locks, no locking is possible. Which is no
- * problem, since this should happen single threaded at init time. It is the
- * driver's problem to ensure this guarantee.
- *
- */
-void drm_mode_config_init(struct drm_device *dev)
-{
- mutex_init(&dev->mode_config.mutex);
- drm_modeset_lock_init(&dev->mode_config.connection_mutex);
- mutex_init(&dev->mode_config.idr_mutex);
- mutex_init(&dev->mode_config.fb_lock);
- mutex_init(&dev->mode_config.blob_lock);
- INIT_LIST_HEAD(&dev->mode_config.fb_list);
- INIT_LIST_HEAD(&dev->mode_config.crtc_list);
- INIT_LIST_HEAD(&dev->mode_config.connector_list);
- INIT_LIST_HEAD(&dev->mode_config.encoder_list);
- INIT_LIST_HEAD(&dev->mode_config.property_list);
- INIT_LIST_HEAD(&dev->mode_config.property_blob_list);
- INIT_LIST_HEAD(&dev->mode_config.plane_list);
- idr_init(&dev->mode_config.crtc_idr);
- idr_init(&dev->mode_config.tile_idr);
- ida_init(&dev->mode_config.connector_ida);
-
- drm_modeset_lock_all(dev);
- drm_mode_create_standard_properties(dev);
- drm_modeset_unlock_all(dev);
-
- /* Just to be sure */
- dev->mode_config.num_fb = 0;
- dev->mode_config.num_connector = 0;
- dev->mode_config.num_crtc = 0;
- dev->mode_config.num_encoder = 0;
- dev->mode_config.num_overlay_plane = 0;
- dev->mode_config.num_total_plane = 0;
-}
-EXPORT_SYMBOL(drm_mode_config_init);
-
-/**
- * drm_mode_config_cleanup - free up DRM mode_config info
- * @dev: DRM device
- *
- * Free up all the connectors and CRTCs associated with this DRM device, then
- * free up the framebuffers and associated buffer objects.
- *
- * Note that since this /should/ happen single-threaded at driver/device
- * teardown time, no locking is required. It's the driver's job to ensure that
- * this guarantee actually holds true.
- *
- * FIXME: cleanup any dangling user buffer objects too
- */
-void drm_mode_config_cleanup(struct drm_device *dev)
-{
- struct drm_connector *connector, *ot;
- struct drm_crtc *crtc, *ct;
- struct drm_encoder *encoder, *enct;
- struct drm_framebuffer *fb, *fbt;
- struct drm_property *property, *pt;
- struct drm_property_blob *blob, *bt;
- struct drm_plane *plane, *plt;
-
- list_for_each_entry_safe(encoder, enct, &dev->mode_config.encoder_list,
- head) {
- encoder->funcs->destroy(encoder);
- }
-
- list_for_each_entry_safe(connector, ot,
- &dev->mode_config.connector_list, head) {
- connector->funcs->destroy(connector);
- }
-
- list_for_each_entry_safe(property, pt, &dev->mode_config.property_list,
- head) {
- drm_property_destroy(dev, property);
- }
-
- list_for_each_entry_safe(plane, plt, &dev->mode_config.plane_list,
- head) {
- plane->funcs->destroy(plane);
- }
-
- list_for_each_entry_safe(crtc, ct, &dev->mode_config.crtc_list, head) {
- crtc->funcs->destroy(crtc);
- }
-
- list_for_each_entry_safe(blob, bt, &dev->mode_config.property_blob_list,
- head_global) {
- drm_property_unreference_blob(blob);
- }
-
- /*
- * Single-threaded teardown context, so it's not required to grab the
- * fb_lock to protect against concurrent fb_list access. Contrary, it
- * would actually deadlock with the drm_framebuffer_cleanup function.
- *
- * Also, if there are any framebuffers left, that's a driver leak now,
- * so politely WARN about this.
- */
- WARN_ON(!list_empty(&dev->mode_config.fb_list));
- list_for_each_entry_safe(fb, fbt, &dev->mode_config.fb_list, head) {
- drm_framebuffer_free(&fb->base.refcount);
- }
-
- ida_destroy(&dev->mode_config.connector_ida);
- idr_destroy(&dev->mode_config.tile_idr);
- idr_destroy(&dev->mode_config.crtc_idr);
- drm_modeset_lock_fini(&dev->mode_config.connection_mutex);
-}
-EXPORT_SYMBOL(drm_mode_config_cleanup);
-
-/**
- * DOC: Tile group
- *
- * Tile groups are used to represent tiled monitors with a unique
- * integer identifier. Tiled monitors using DisplayID v1.3 have
- * a unique 8-byte handle, we store this in a tile group, so we
- * have a common identifier for all tiles in a monitor group.
- */
-static void drm_tile_group_free(struct kref *kref)
-{
- struct drm_tile_group *tg = container_of(kref, struct drm_tile_group, refcount);
- struct drm_device *dev = tg->dev;
- mutex_lock(&dev->mode_config.idr_mutex);
- idr_remove(&dev->mode_config.tile_idr, tg->id);
- mutex_unlock(&dev->mode_config.idr_mutex);
- kfree(tg);
-}
-
-/**
- * drm_mode_put_tile_group - drop a reference to a tile group.
- * @dev: DRM device
- * @tg: tile group to drop reference to.
- *
- * drop reference to tile group and free if 0.
- */
-void drm_mode_put_tile_group(struct drm_device *dev,
- struct drm_tile_group *tg)
-{
- kref_put(&tg->refcount, drm_tile_group_free);
-}
-
-/**
- * drm_mode_get_tile_group - get a reference to an existing tile group
- * @dev: DRM device
- * @topology: 8-bytes unique per monitor.
- *
- * Use the unique bytes to get a reference to an existing tile group.
- *
- * RETURNS:
- * tile group or NULL if not found.
- */
-struct drm_tile_group *drm_mode_get_tile_group(struct drm_device *dev,
- char topology[8])
-{
- struct drm_tile_group *tg;
- int id;
- mutex_lock(&dev->mode_config.idr_mutex);
- idr_for_each_entry(&dev->mode_config.tile_idr, tg, id) {
- if (!memcmp(tg->group_data, topology, 8)) {
- if (!kref_get_unless_zero(&tg->refcount))
- tg = NULL;
- mutex_unlock(&dev->mode_config.idr_mutex);
- return tg;
- }
- }
- mutex_unlock(&dev->mode_config.idr_mutex);
- return NULL;
-}
-EXPORT_SYMBOL(drm_mode_get_tile_group);
-
-/**
- * drm_mode_create_tile_group - create a tile group from a displayid description
- * @dev: DRM device
- * @topology: 8-bytes unique per monitor.
- *
- * Create a tile group for the unique monitor, and get a unique
- * identifier for the tile group.
- *
- * RETURNS:
- * new tile group or error.
- */
-struct drm_tile_group *drm_mode_create_tile_group(struct drm_device *dev,
- char topology[8])
-{
- struct drm_tile_group *tg;
- int ret;
-
- tg = kzalloc(sizeof(*tg), GFP_KERNEL);
- if (!tg)
- return ERR_PTR(-ENOMEM);
-
- kref_init(&tg->refcount);
- memcpy(tg->group_data, topology, 8);
- tg->dev = dev;
-
- mutex_lock(&dev->mode_config.idr_mutex);
- ret = idr_alloc(&dev->mode_config.tile_idr, tg, 1, 0, GFP_KERNEL);
- if (ret >= 0) {
- tg->id = ret;
- } else {
- kfree(tg);
- tg = ERR_PTR(ret);
- }
-
- mutex_unlock(&dev->mode_config.idr_mutex);
- return tg;
-}
-EXPORT_SYMBOL(drm_mode_create_tile_group);
diff --git a/drivers/gpu/drm/drm_crtc_internal.h b/drivers/gpu/drm/drm_crtc_internal.h
index c48ba02c5365..33b17d0b127e 100644
--- a/drivers/gpu/drm/drm_crtc_internal.h
+++ b/drivers/gpu/drm/drm_crtc_internal.h
@@ -40,10 +40,29 @@ int drm_crtc_check_viewport(const struct drm_crtc *crtc,
int x, int y,
const struct drm_display_mode *mode,
const struct drm_framebuffer *fb);
+int drm_crtc_register_all(struct drm_device *dev);
+void drm_crtc_unregister_all(struct drm_device *dev);
-void drm_fb_release(struct drm_file *file_priv);
+extern const struct dma_fence_ops drm_crtc_fence_ops;
+
+/* IOCTLs */
+int drm_mode_getcrtc(struct drm_device *dev,
+ void *data, struct drm_file *file_priv);
+int drm_mode_setcrtc(struct drm_device *dev,
+ void *data, struct drm_file *file_priv);
+
+
+/* drm_mode_config.c */
+int drm_modeset_register_all(struct drm_device *dev);
+void drm_modeset_unregister_all(struct drm_device *dev);
+
+/* IOCTLs */
+int drm_mode_getresources(struct drm_device *dev,
+ void *data, struct drm_file *file_priv);
-/* dumb buffer support IOCTLs */
+
+/* drm_dumb_buffers.c */
+/* IOCTLs */
int drm_mode_create_dumb_ioctl(struct drm_device *dev,
void *data, struct drm_file *file_priv);
int drm_mode_mmap_dumb_ioctl(struct drm_device *dev,
@@ -51,14 +70,6 @@ int drm_mode_mmap_dumb_ioctl(struct drm_device *dev,
int drm_mode_destroy_dumb_ioctl(struct drm_device *dev,
void *data, struct drm_file *file_priv);
-/* IOCTLs */
-int drm_mode_getresources(struct drm_device *dev,
- void *data, struct drm_file *file_priv);
-int drm_mode_getcrtc(struct drm_device *dev,
- void *data, struct drm_file *file_priv);
-int drm_mode_setcrtc(struct drm_device *dev,
- void *data, struct drm_file *file_priv);
-
/* drm_color_mgmt.c */
/* IOCTLs */
@@ -147,6 +158,8 @@ void drm_framebuffer_free(struct kref *kref);
int drm_framebuffer_check_src_coords(uint32_t src_x, uint32_t src_y,
uint32_t src_w, uint32_t src_h,
const struct drm_framebuffer *fb);
+void drm_fb_release(struct drm_file *file_priv);
+
/* IOCTL */
int drm_mode_addfb(struct drm_device *dev,
@@ -166,9 +179,6 @@ int drm_atomic_get_property(struct drm_mode_object *obj,
int drm_mode_atomic_ioctl(struct drm_device *dev,
void *data, struct drm_file *file_priv);
-int drm_modeset_register_all(struct drm_device *dev);
-void drm_modeset_unregister_all(struct drm_device *dev);
-
/* drm_plane.c */
int drm_plane_register_all(struct drm_device *dev);
diff --git a/drivers/gpu/drm/drm_debugfs.c b/drivers/gpu/drm/drm_debugfs.c
index 1205790ed960..2e3e46a53805 100644
--- a/drivers/gpu/drm/drm_debugfs.c
+++ b/drivers/gpu/drm/drm_debugfs.c
@@ -36,6 +36,7 @@
#include <linux/export.h>
#include <drm/drmP.h>
#include <drm/drm_edid.h>
+#include <drm/drm_atomic.h>
#include "drm_internal.h"
#if defined(CONFIG_DEBUG_FS)
@@ -163,6 +164,14 @@ int drm_debugfs_init(struct drm_minor *minor, int minor_id,
return ret;
}
+ if (drm_core_check_feature(dev, DRIVER_ATOMIC)) {
+ ret = drm_atomic_debugfs_init(minor);
+ if (ret) {
+ DRM_ERROR("Failed to create atomic debugfs files\n");
+ return ret;
+ }
+ }
+
if (dev->driver->debugfs_init) {
ret = dev->driver->debugfs_init(minor);
if (ret) {
@@ -219,6 +228,7 @@ EXPORT_SYMBOL(drm_debugfs_remove_files);
int drm_debugfs_cleanup(struct drm_minor *minor)
{
struct drm_device *dev = minor->dev;
+ int ret;
if (!minor->debugfs_root)
return 0;
@@ -226,6 +236,14 @@ int drm_debugfs_cleanup(struct drm_minor *minor)
if (dev->driver->debugfs_cleanup)
dev->driver->debugfs_cleanup(minor);
+ if (drm_core_check_feature(dev, DRIVER_ATOMIC)) {
+ ret = drm_atomic_debugfs_cleanup(minor);
+ if (ret) {
+ DRM_ERROR("DRM: Failed to remove atomic debugfs entries\n");
+ return ret;
+ }
+ }
+
drm_debugfs_remove_files(drm_debugfs_list, DRM_DEBUGFS_ENTRIES, minor);
debugfs_remove(minor->debugfs_root);
@@ -415,5 +433,37 @@ void drm_debugfs_connector_remove(struct drm_connector *connector)
connector->debugfs_entry = NULL;
}
-#endif /* CONFIG_DEBUG_FS */
+int drm_debugfs_crtc_add(struct drm_crtc *crtc)
+{
+ struct drm_minor *minor = crtc->dev->primary;
+ struct dentry *root;
+ char *name;
+
+ name = kasprintf(GFP_KERNEL, "crtc-%d", crtc->index);
+ if (!name)
+ return -ENOMEM;
+ root = debugfs_create_dir(name, minor->debugfs_root);
+ kfree(name);
+ if (!root)
+ return -ENOMEM;
+
+ crtc->debugfs_entry = root;
+
+ if (drm_debugfs_crtc_crc_add(crtc))
+ goto error;
+
+ return 0;
+
+error:
+ drm_debugfs_crtc_remove(crtc);
+ return -ENOMEM;
+}
+
+void drm_debugfs_crtc_remove(struct drm_crtc *crtc)
+{
+ debugfs_remove_recursive(crtc->debugfs_entry);
+ crtc->debugfs_entry = NULL;
+}
+
+#endif /* CONFIG_DEBUG_FS */
diff --git a/drivers/gpu/drm/drm_debugfs_crc.c b/drivers/gpu/drm/drm_debugfs_crc.c
new file mode 100644
index 000000000000..00e771fb7df2
--- /dev/null
+++ b/drivers/gpu/drm/drm_debugfs_crc.c
@@ -0,0 +1,352 @@
+/*
+ * Copyright © 2008 Intel Corporation
+ * Copyright © 2016 Collabora Ltd
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ * Based on code from the i915 driver.
+ * Original author: Damien Lespiau <damien.lespiau@intel.com>
+ *
+ */
+
+#include <linux/circ_buf.h>
+#include <linux/ctype.h>
+#include <linux/debugfs.h>
+#include <drm/drmP.h>
+#include "drm_internal.h"
+
+/**
+ * DOC: CRC ABI
+ *
+ * DRM device drivers can provide to userspace CRC information of each frame as
+ * it reached a given hardware component (a "source").
+ *
+ * Userspace can control generation of CRCs in a given CRTC by writing to the
+ * file dri/0/crtc-N/crc/control in debugfs, with N being the index of the CRTC.
+ * Accepted values are source names (which are driver-specific) and the "auto"
+ * keyword, which will let the driver select a default source of frame CRCs
+ * for this CRTC.
+ *
+ * Once frame CRC generation is enabled, userspace can capture them by reading
+ * the dri/0/crtc-N/crc/data file. Each line in that file contains the frame
+ * number in the first field and then a number of unsigned integer fields
+ * containing the CRC data. Fields are separated by a single space and the number
+ * of CRC fields is source-specific.
+ *
+ * Note that though in some cases the CRC is computed in a specified way and on
+ * the frame contents as supplied by userspace (eDP 1.3), in general the CRC
+ * computation is performed in an unspecified way and on frame contents that have
+ * been already processed in also an unspecified way and thus userspace cannot
+ * rely on being able to generate matching CRC values for the frame contents that
+ * it submits. In this general case, the maximum userspace can do is to compare
+ * the reported CRCs of frames that should have the same contents.
+ */
+
+static int crc_control_show(struct seq_file *m, void *data)
+{
+ struct drm_crtc *crtc = m->private;
+
+ seq_printf(m, "%s\n", crtc->crc.source);
+
+ return 0;
+}
+
+static int crc_control_open(struct inode *inode, struct file *file)
+{
+ struct drm_crtc *crtc = inode->i_private;
+
+ return single_open(file, crc_control_show, crtc);
+}
+
+static ssize_t crc_control_write(struct file *file, const char __user *ubuf,
+ size_t len, loff_t *offp)
+{
+ struct seq_file *m = file->private_data;
+ struct drm_crtc *crtc = m->private;
+ struct drm_crtc_crc *crc = &crtc->crc;
+ char *source;
+
+ if (len == 0)
+ return 0;
+
+ if (len > PAGE_SIZE - 1) {
+ DRM_DEBUG_KMS("Expected < %lu bytes into crtc crc control\n",
+ PAGE_SIZE);
+ return -E2BIG;
+ }
+
+ source = memdup_user_nul(ubuf, len);
+ if (IS_ERR(source))
+ return PTR_ERR(source);
+
+ if (source[len] == '\n')
+ source[len] = '\0';
+
+ spin_lock_irq(&crc->lock);
+
+ if (crc->opened) {
+ spin_unlock_irq(&crc->lock);
+ kfree(source);
+ return -EBUSY;
+ }
+
+ kfree(crc->source);
+ crc->source = source;
+
+ spin_unlock_irq(&crc->lock);
+
+ *offp += len;
+ return len;
+}
+
+static const struct file_operations drm_crtc_crc_control_fops = {
+ .owner = THIS_MODULE,
+ .open = crc_control_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .write = crc_control_write
+};
+
+static int crtc_crc_open(struct inode *inode, struct file *filep)
+{
+ struct drm_crtc *crtc = inode->i_private;
+ struct drm_crtc_crc *crc = &crtc->crc;
+ struct drm_crtc_crc_entry *entries = NULL;
+ size_t values_cnt;
+ int ret;
+
+ if (crc->opened)
+ return -EBUSY;
+
+ ret = crtc->funcs->set_crc_source(crtc, crc->source, &values_cnt);
+ if (ret)
+ return ret;
+
+ if (WARN_ON(values_cnt > DRM_MAX_CRC_NR)) {
+ ret = -EINVAL;
+ goto err_disable;
+ }
+
+ if (WARN_ON(values_cnt == 0)) {
+ ret = -EINVAL;
+ goto err_disable;
+ }
+
+ entries = kcalloc(DRM_CRC_ENTRIES_NR, sizeof(*entries), GFP_KERNEL);
+ if (!entries) {
+ ret = -ENOMEM;
+ goto err_disable;
+ }
+
+ spin_lock_irq(&crc->lock);
+ crc->entries = entries;
+ crc->values_cnt = values_cnt;
+ crc->opened = true;
+ spin_unlock_irq(&crc->lock);
+
+ return 0;
+
+err_disable:
+ crtc->funcs->set_crc_source(crtc, NULL, &values_cnt);
+ return ret;
+}
+
+static int crtc_crc_release(struct inode *inode, struct file *filep)
+{
+ struct drm_crtc *crtc = filep->f_inode->i_private;
+ struct drm_crtc_crc *crc = &crtc->crc;
+ size_t values_cnt;
+
+ spin_lock_irq(&crc->lock);
+ kfree(crc->entries);
+ crc->entries = NULL;
+ crc->head = 0;
+ crc->tail = 0;
+ crc->values_cnt = 0;
+ crc->opened = false;
+ spin_unlock_irq(&crc->lock);
+
+ crtc->funcs->set_crc_source(crtc, NULL, &values_cnt);
+
+ return 0;
+}
+
+static int crtc_crc_data_count(struct drm_crtc_crc *crc)
+{
+ assert_spin_locked(&crc->lock);
+ return CIRC_CNT(crc->head, crc->tail, DRM_CRC_ENTRIES_NR);
+}
+
+/*
+ * 1 frame field of 10 chars plus a number of CRC fields of 10 chars each, space
+ * separated, with a newline at the end and null-terminated.
+ */
+#define LINE_LEN(values_cnt) (10 + 11 * values_cnt + 1 + 1)
+#define MAX_LINE_LEN (LINE_LEN(DRM_MAX_CRC_NR))
+
+static ssize_t crtc_crc_read(struct file *filep, char __user *user_buf,
+ size_t count, loff_t *pos)
+{
+ struct drm_crtc *crtc = filep->f_inode->i_private;
+ struct drm_crtc_crc *crc = &crtc->crc;
+ struct drm_crtc_crc_entry *entry;
+ char buf[MAX_LINE_LEN];
+ int ret, i;
+
+ spin_lock_irq(&crc->lock);
+
+ if (!crc->source) {
+ spin_unlock_irq(&crc->lock);
+ return 0;
+ }
+
+ /* Nothing to read? */
+ while (crtc_crc_data_count(crc) == 0) {
+ if (filep->f_flags & O_NONBLOCK) {
+ spin_unlock_irq(&crc->lock);
+ return -EAGAIN;
+ }
+
+ ret = wait_event_interruptible_lock_irq(crc->wq,
+ crtc_crc_data_count(crc),
+ crc->lock);
+ if (ret) {
+ spin_unlock_irq(&crc->lock);
+ return ret;
+ }
+ }
+
+ /* We know we have an entry to be read */
+ entry = &crc->entries[crc->tail];
+
+ if (count < LINE_LEN(crc->values_cnt)) {
+ spin_unlock_irq(&crc->lock);
+ return -EINVAL;
+ }
+
+ BUILD_BUG_ON_NOT_POWER_OF_2(DRM_CRC_ENTRIES_NR);
+ crc->tail = (crc->tail + 1) & (DRM_CRC_ENTRIES_NR - 1);
+
+ spin_unlock_irq(&crc->lock);
+
+ if (entry->has_frame_counter)
+ sprintf(buf, "0x%08x", entry->frame);
+ else
+ sprintf(buf, "XXXXXXXXXX");
+
+ for (i = 0; i < crc->values_cnt; i++)
+ sprintf(buf + 10 + i * 11, " 0x%08x", entry->crcs[i]);
+ sprintf(buf + 10 + crc->values_cnt * 11, "\n");
+
+ if (copy_to_user(user_buf, buf, LINE_LEN(crc->values_cnt)))
+ return -EFAULT;
+
+ return LINE_LEN(crc->values_cnt);
+}
+
+static const struct file_operations drm_crtc_crc_data_fops = {
+ .owner = THIS_MODULE,
+ .open = crtc_crc_open,
+ .read = crtc_crc_read,
+ .release = crtc_crc_release,
+};
+
+/**
+ * drm_debugfs_crtc_crc_add - Add files to debugfs for capture of frame CRCs
+ * @crtc: CRTC to whom the frames will belong
+ *
+ * Adds files to debugfs directory that allows userspace to control the
+ * generation of frame CRCs and to read them.
+ *
+ * Returns:
+ * Zero on success, error code on failure.
+ */
+int drm_debugfs_crtc_crc_add(struct drm_crtc *crtc)
+{
+ struct dentry *crc_ent, *ent;
+
+ if (!crtc->funcs->set_crc_source)
+ return 0;
+
+ crc_ent = debugfs_create_dir("crc", crtc->debugfs_entry);
+ if (!crc_ent)
+ return -ENOMEM;
+
+ ent = debugfs_create_file("control", S_IRUGO, crc_ent, crtc,
+ &drm_crtc_crc_control_fops);
+ if (!ent)
+ goto error;
+
+ ent = debugfs_create_file("data", S_IRUGO, crc_ent, crtc,
+ &drm_crtc_crc_data_fops);
+ if (!ent)
+ goto error;
+
+ return 0;
+
+error:
+ debugfs_remove_recursive(crc_ent);
+
+ return -ENOMEM;
+}
+
+/**
+ * drm_crtc_add_crc_entry - Add entry with CRC information for a frame
+ * @crtc: CRTC to which the frame belongs
+ * @has_frame: whether this entry has a frame number to go with
+ * @frame: number of the frame these CRCs are about
+ * @crcs: array of CRC values, with length matching #drm_crtc_crc.values_cnt
+ *
+ * For each frame, the driver polls the source of CRCs for new data and calls
+ * this function to add them to the buffer from where userspace reads.
+ */
+int drm_crtc_add_crc_entry(struct drm_crtc *crtc, bool has_frame,
+ uint32_t frame, uint32_t *crcs)
+{
+ struct drm_crtc_crc *crc = &crtc->crc;
+ struct drm_crtc_crc_entry *entry;
+ int head, tail;
+
+ assert_spin_locked(&crc->lock);
+
+ /* Caller may not have noticed yet that userspace has stopped reading */
+ if (!crc->opened)
+ return -EINVAL;
+
+ head = crc->head;
+ tail = crc->tail;
+
+ if (CIRC_SPACE(head, tail, DRM_CRC_ENTRIES_NR) < 1) {
+ DRM_ERROR("Overflow of CRC buffer, userspace reads too slow.\n");
+ return -ENOBUFS;
+ }
+
+ entry = &crc->entries[head];
+ entry->frame = frame;
+ entry->has_frame_counter = has_frame;
+ memcpy(&entry->crcs, crcs, sizeof(*crcs) * crc->values_cnt);
+
+ head = (head + 1) & (DRM_CRC_ENTRIES_NR - 1);
+ crc->head = head;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(drm_crtc_add_crc_entry);
diff --git a/drivers/gpu/drm/drm_dp_dual_mode_helper.c b/drivers/gpu/drm/drm_dp_dual_mode_helper.c
index a7b2a751f6fe..e02563966271 100644
--- a/drivers/gpu/drm/drm_dp_dual_mode_helper.c
+++ b/drivers/gpu/drm/drm_dp_dual_mode_helper.c
@@ -142,12 +142,25 @@ static bool is_hdmi_adaptor(const char hdmi_id[DP_DUAL_MODE_HDMI_ID_LEN])
sizeof(dp_dual_mode_hdmi_id)) == 0;
}
+static bool is_type1_adaptor(uint8_t adaptor_id)
+{
+ return adaptor_id == 0 || adaptor_id == 0xff;
+}
+
static bool is_type2_adaptor(uint8_t adaptor_id)
{
return adaptor_id == (DP_DUAL_MODE_TYPE_TYPE2 |
DP_DUAL_MODE_REV_TYPE2);
}
+static bool is_lspcon_adaptor(const char hdmi_id[DP_DUAL_MODE_HDMI_ID_LEN],
+ const uint8_t adaptor_id)
+{
+ return is_hdmi_adaptor(hdmi_id) &&
+ (adaptor_id == (DP_DUAL_MODE_TYPE_TYPE2 |
+ DP_DUAL_MODE_TYPE_HAS_DPCD));
+}
+
/**
* drm_dp_dual_mode_detect - Identify the DP dual mode adaptor
* @adapter: I2C adapter for the DDC bus
@@ -185,6 +198,8 @@ enum drm_dp_dual_mode_type drm_dp_dual_mode_detect(struct i2c_adapter *adapter)
*/
ret = drm_dp_dual_mode_read(adapter, DP_DUAL_MODE_HDMI_ID,
hdmi_id, sizeof(hdmi_id));
+ DRM_DEBUG_KMS("DP dual mode HDMI ID: %*pE (err %zd)\n",
+ ret ? 0 : (int)sizeof(hdmi_id), hdmi_id, ret);
if (ret)
return DRM_DP_DUAL_MODE_UNKNOWN;
@@ -202,13 +217,26 @@ enum drm_dp_dual_mode_type drm_dp_dual_mode_detect(struct i2c_adapter *adapter)
*/
ret = drm_dp_dual_mode_read(adapter, DP_DUAL_MODE_ADAPTOR_ID,
&adaptor_id, sizeof(adaptor_id));
+ DRM_DEBUG_KMS("DP dual mode adaptor ID: %02x (err %zd)\n",
+ adaptor_id, ret);
if (ret == 0) {
+ if (is_lspcon_adaptor(hdmi_id, adaptor_id))
+ return DRM_DP_DUAL_MODE_LSPCON;
if (is_type2_adaptor(adaptor_id)) {
if (is_hdmi_adaptor(hdmi_id))
return DRM_DP_DUAL_MODE_TYPE2_HDMI;
else
return DRM_DP_DUAL_MODE_TYPE2_DVI;
}
+ /*
+ * If neither a proper type 1 ID nor a broken type 1 adaptor
+ * as described above, assume type 1, but let the user know
+ * that we may have misdetected the type.
+ */
+ if (!is_type1_adaptor(adaptor_id) && adaptor_id != hdmi_id[0])
+ DRM_ERROR("Unexpected DP dual mode adaptor ID %02x\n",
+ adaptor_id);
+
}
if (is_hdmi_adaptor(hdmi_id))
@@ -364,3 +392,96 @@ const char *drm_dp_get_dual_mode_type_name(enum drm_dp_dual_mode_type type)
}
}
EXPORT_SYMBOL(drm_dp_get_dual_mode_type_name);
+
+/**
+ * drm_lspcon_get_mode: Get LSPCON's current mode of operation by
+ * reading offset (0x80, 0x41)
+ * @adapter: I2C-over-aux adapter
+ * @mode: current lspcon mode of operation output variable
+ *
+ * Returns:
+ * 0 on success, sets the current_mode value to appropriate mode
+ * -error on failure
+ */
+int drm_lspcon_get_mode(struct i2c_adapter *adapter,
+ enum drm_lspcon_mode *mode)
+{
+ u8 data;
+ int ret = 0;
+
+ if (!mode) {
+ DRM_ERROR("NULL input\n");
+ return -EINVAL;
+ }
+
+ /* Read Status: i2c over aux */
+ ret = drm_dp_dual_mode_read(adapter, DP_DUAL_MODE_LSPCON_CURRENT_MODE,
+ &data, sizeof(data));
+ if (ret < 0) {
+ DRM_ERROR("LSPCON read(0x80, 0x41) failed\n");
+ return -EFAULT;
+ }
+
+ if (data & DP_DUAL_MODE_LSPCON_MODE_PCON)
+ *mode = DRM_LSPCON_MODE_PCON;
+ else
+ *mode = DRM_LSPCON_MODE_LS;
+ return 0;
+}
+EXPORT_SYMBOL(drm_lspcon_get_mode);
+
+/**
+ * drm_lspcon_set_mode: Change LSPCON's mode of operation by
+ * writing offset (0x80, 0x40)
+ * @adapter: I2C-over-aux adapter
+ * @mode: required mode of operation
+ *
+ * Returns:
+ * 0 on success, -error on failure/timeout
+ */
+int drm_lspcon_set_mode(struct i2c_adapter *adapter,
+ enum drm_lspcon_mode mode)
+{
+ u8 data = 0;
+ int ret;
+ int time_out = 200;
+ enum drm_lspcon_mode current_mode;
+
+ if (mode == DRM_LSPCON_MODE_PCON)
+ data = DP_DUAL_MODE_LSPCON_MODE_PCON;
+
+ /* Change mode */
+ ret = drm_dp_dual_mode_write(adapter, DP_DUAL_MODE_LSPCON_MODE_CHANGE,
+ &data, sizeof(data));
+ if (ret < 0) {
+ DRM_ERROR("LSPCON mode change failed\n");
+ return ret;
+ }
+
+ /*
+ * Confirm mode change by reading the status bit.
+ * Sometimes, it takes a while to change the mode,
+ * so wait and retry until time out or done.
+ */
+ do {
+ ret = drm_lspcon_get_mode(adapter, &current_mode);
+ if (ret) {
+ DRM_ERROR("can't confirm LSPCON mode change\n");
+ return ret;
+ } else {
+ if (current_mode != mode) {
+ msleep(10);
+ time_out -= 10;
+ } else {
+ DRM_DEBUG_KMS("LSPCON mode changed to %s\n",
+ mode == DRM_LSPCON_MODE_LS ?
+ "LS" : "PCON");
+ return 0;
+ }
+ }
+ } while (time_out);
+
+ DRM_ERROR("LSPCON mode change timed out\n");
+ return -ETIMEDOUT;
+}
+EXPORT_SYMBOL(drm_lspcon_set_mode);
diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c
index 04e457117980..aa644487749c 100644
--- a/drivers/gpu/drm/drm_dp_mst_topology.c
+++ b/drivers/gpu/drm/drm_dp_mst_topology.c
@@ -914,6 +914,7 @@ static void drm_dp_destroy_port(struct kref *kref)
/* no need to clean up vcpi
* as if we have no connector we never setup a vcpi */
drm_dp_port_teardown_pdt(port, port->pdt);
+ port->pdt = DP_PEER_DEVICE_NONE;
}
kfree(port);
}
@@ -1159,7 +1160,9 @@ static void drm_dp_add_port(struct drm_dp_mst_branch *mstb,
drm_dp_put_port(port);
goto out;
}
- if (port->port_num >= DP_MST_LOGICAL_PORT_0) {
+ if ((port->pdt == DP_PEER_DEVICE_DP_LEGACY_CONV ||
+ port->pdt == DP_PEER_DEVICE_SST_SINK) &&
+ port->port_num >= DP_MST_LOGICAL_PORT_0) {
port->cached_edid = drm_get_edid(port->connector, &port->aux.ddc);
drm_mode_connector_set_tile_property(port->connector);
}
@@ -2919,6 +2922,7 @@ static void drm_dp_destroy_connector_work(struct work_struct *work)
mgr->cbs->destroy_connector(mgr, port->connector);
drm_dp_port_teardown_pdt(port, port->pdt);
+ port->pdt = DP_PEER_DEVICE_NONE;
if (!port->input && port->vcpi.vcpi > 0) {
drm_dp_mst_reset_vcpi_slots(mgr, port);
diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
index 6efdba4993fc..f74b7d06ec01 100644
--- a/drivers/gpu/drm/drm_drv.c
+++ b/drivers/gpu/drm/drm_drv.c
@@ -32,7 +32,10 @@
#include <linux/moduleparam.h>
#include <linux/mount.h>
#include <linux/slab.h>
+
+#include <drm/drm_drv.h>
#include <drm/drmP.h>
+
#include "drm_crtc_internal.h"
#include "drm_legacy.h"
#include "drm_internal.h"
@@ -257,10 +260,7 @@ static void drm_minor_unregister(struct drm_device *dev, unsigned int type)
drm_debugfs_cleanup(minor);
}
-/**
- * drm_minor_acquire - Acquire a DRM minor
- * @minor_id: Minor ID of the DRM-minor
- *
+/*
* Looks up the given minor-ID and returns the respective DRM-minor object. The
* refence-count of the underlying device is increased so you must release this
* object with drm_minor_release().
@@ -268,10 +268,6 @@ static void drm_minor_unregister(struct drm_device *dev, unsigned int type)
* As long as you hold this minor, it is guaranteed that the object and the
* minor->dev pointer will stay valid! However, the device may get unplugged and
* unregistered while you hold the minor.
- *
- * Returns:
- * Pointer to minor-object with increased device-refcount, or PTR_ERR on
- * failure.
*/
struct drm_minor *drm_minor_acquire(unsigned int minor_id)
{
@@ -294,12 +290,6 @@ struct drm_minor *drm_minor_acquire(unsigned int minor_id)
return minor;
}
-/**
- * drm_minor_release - Release DRM minor
- * @minor: Pointer to DRM minor object
- *
- * Release a minor that was previously acquired via drm_minor_acquire().
- */
void drm_minor_release(struct drm_minor *minor)
{
drm_dev_unref(minor->dev);
@@ -313,9 +303,10 @@ void drm_minor_release(struct drm_minor *minor)
* callbacks implemented by the driver. The driver then needs to initialize all
* the various subsystems for the drm device like memory management, vblank
* handling, modesetting support and intial output configuration plus obviously
- * initialize all the corresponding hardware bits. Finally when everything is up
- * and running and ready for userspace the device instance can be published
- * using drm_dev_register().
+ * initialize all the corresponding hardware bits. An important part of this is
+ * also calling drm_dev_set_unique() to set the userspace-visible unique name of
+ * this device instance. Finally when everything is up and running and ready for
+ * userspace the device instance can be published using drm_dev_register().
*
* There is also deprecated support for initalizing device instances using
* bus-specific helpers and the ->load() callback. But due to
@@ -337,17 +328,6 @@ void drm_minor_release(struct drm_minor *minor)
* dev_priv field of &drm_device.
*/
-static int drm_dev_set_unique(struct drm_device *dev, const char *name)
-{
- if (!name)
- return -EINVAL;
-
- kfree(dev->unique);
- dev->unique = kstrdup(name, GFP_KERNEL);
-
- return dev->unique ? 0 : -ENOMEM;
-}
-
/**
* drm_put_dev - Unregister and release a DRM device
* @dev: DRM device
@@ -517,12 +497,6 @@ int drm_dev_init(struct drm_device *dev,
goto err_free;
}
- if (drm_core_check_feature(dev, DRIVER_MODESET)) {
- ret = drm_minor_alloc(dev, DRM_MINOR_CONTROL);
- if (ret)
- goto err_minors;
- }
-
if (drm_core_check_feature(dev, DRIVER_RENDER)) {
ret = drm_minor_alloc(dev, DRM_MINOR_RENDER);
if (ret)
@@ -568,6 +542,9 @@ err_minors:
drm_fs_inode_free(dev->anon_inode);
err_free:
mutex_destroy(&dev->master_mutex);
+ mutex_destroy(&dev->ctxlist_mutex);
+ mutex_destroy(&dev->filelist_mutex);
+ mutex_destroy(&dev->struct_mutex);
return ret;
}
EXPORT_SYMBOL(drm_dev_init);
@@ -630,6 +607,9 @@ static void drm_dev_release(struct kref *ref)
drm_minor_free(dev, DRM_MINOR_CONTROL);
mutex_destroy(&dev->master_mutex);
+ mutex_destroy(&dev->ctxlist_mutex);
+ mutex_destroy(&dev->filelist_mutex);
+ mutex_destroy(&dev->struct_mutex);
kfree(dev->unique);
kfree(dev);
}
@@ -764,6 +744,26 @@ void drm_dev_unregister(struct drm_device *dev)
}
EXPORT_SYMBOL(drm_dev_unregister);
+/**
+ * drm_dev_set_unique - Set the unique name of a DRM device
+ * @dev: device of which to set the unique name
+ * @name: unique name
+ *
+ * Sets the unique name of a DRM device using the specified string. Drivers
+ * can use this at driver probe time if the unique name of the devices they
+ * drive is static.
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+int drm_dev_set_unique(struct drm_device *dev, const char *name)
+{
+ kfree(dev->unique);
+ dev->unique = kstrdup(name, GFP_KERNEL);
+
+ return dev->unique ? 0 : -ENOMEM;
+}
+EXPORT_SYMBOL(drm_dev_set_unique);
+
/*
* DRM Core
* The DRM core module initializes all global DRM objects and makes them
diff --git a/drivers/gpu/drm/drm_dumb_buffers.c b/drivers/gpu/drm/drm_dumb_buffers.c
new file mode 100644
index 000000000000..8ac5a1c1d811
--- /dev/null
+++ b/drivers/gpu/drm/drm_dumb_buffers.c
@@ -0,0 +1,128 @@
+/*
+ * Copyright (c) 2006-2008 Intel Corporation
+ * Copyright (c) 2007 Dave Airlie <airlied@linux.ie>
+ * Copyright (c) 2008 Red Hat Inc.
+ * Copyright (c) 2016 Intel Corporation
+ *
+ * Permission to use, copy, modify, distribute, and sell this software and its
+ * documentation for any purpose is hereby granted without fee, provided that
+ * the above copyright notice appear in all copies and that both that copyright
+ * notice and this permission notice appear in supporting documentation, and
+ * that the name of the copyright holders not be used in advertising or
+ * publicity pertaining to distribution of the software without specific,
+ * written prior permission. The copyright holders make no representations
+ * about the suitability of this software for any purpose. It is provided "as
+ * is" without express or implied warranty.
+ *
+ * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
+ * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
+ * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
+ * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
+ * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
+ * OF THIS SOFTWARE.
+ */
+
+#include <drm/drmP.h>
+
+#include "drm_crtc_internal.h"
+
+/**
+ * DOC: overview
+ *
+ * The KMS API doesn't standardize backing storage object creation and leaves it
+ * to driver-specific ioctls. Furthermore actually creating a buffer object even
+ * for GEM-based drivers is done through a driver-specific ioctl - GEM only has
+ * a common userspace interface for sharing and destroying objects. While not an
+ * issue for full-fledged graphics stacks that include device-specific userspace
+ * components (in libdrm for instance), this limit makes DRM-based early boot
+ * graphics unnecessarily complex.
+ *
+ * Dumb objects partly alleviate the problem by providing a standard API to
+ * create dumb buffers suitable for scanout, which can then be used to create
+ * KMS frame buffers.
+ *
+ * To support dumb objects drivers must implement the dumb_create,
+ * dumb_destroy and dumb_map_offset operations from struct &drm_driver. See
+ * there for further details.
+ *
+ * Note that dumb objects may not be used for gpu acceleration, as has been
+ * attempted on some ARM embedded platforms. Such drivers really must have
+ * a hardware-specific ioctl to allocate suitable buffer objects.
+ */
+
+int drm_mode_create_dumb_ioctl(struct drm_device *dev,
+ void *data, struct drm_file *file_priv)
+{
+ struct drm_mode_create_dumb *args = data;
+ u32 cpp, stride, size;
+
+ if (!dev->driver->dumb_create)
+ return -ENOSYS;
+ if (!args->width || !args->height || !args->bpp)
+ return -EINVAL;
+
+ /* overflow checks for 32bit size calculations */
+ /* NOTE: DIV_ROUND_UP() can overflow */
+ cpp = DIV_ROUND_UP(args->bpp, 8);
+ if (!cpp || cpp > 0xffffffffU / args->width)
+ return -EINVAL;
+ stride = cpp * args->width;
+ if (args->height > 0xffffffffU / stride)
+ return -EINVAL;
+
+ /* test for wrap-around */
+ size = args->height * stride;
+ if (PAGE_ALIGN(size) == 0)
+ return -EINVAL;
+
+ /*
+ * handle, pitch and size are output parameters. Zero them out to
+ * prevent drivers from accidentally using uninitialized data. Since
+ * not all existing userspace is clearing these fields properly we
+ * cannot reject IOCTL with garbage in them.
+ */
+ args->handle = 0;
+ args->pitch = 0;
+ args->size = 0;
+
+ return dev->driver->dumb_create(file_priv, dev, args);
+}
+
+/**
+ * drm_mode_mmap_dumb_ioctl - create an mmap offset for a dumb backing storage buffer
+ * @dev: DRM device
+ * @data: ioctl data
+ * @file_priv: DRM file info
+ *
+ * Allocate an offset in the drm device node's address space to be able to
+ * memory map a dumb buffer.
+ *
+ * Called by the user via ioctl.
+ *
+ * Returns:
+ * Zero on success, negative errno on failure.
+ */
+int drm_mode_mmap_dumb_ioctl(struct drm_device *dev,
+ void *data, struct drm_file *file_priv)
+{
+ struct drm_mode_map_dumb *args = data;
+
+ /* call driver ioctl to get mmap offset */
+ if (!dev->driver->dumb_map_offset)
+ return -ENOSYS;
+
+ return dev->driver->dumb_map_offset(file_priv, dev, args->handle, &args->offset);
+}
+
+int drm_mode_destroy_dumb_ioctl(struct drm_device *dev,
+ void *data, struct drm_file *file_priv)
+{
+ struct drm_mode_destroy_dumb *args = data;
+
+ if (!dev->driver->dumb_destroy)
+ return -ENOSYS;
+
+ return dev->driver->dumb_destroy(file_priv, dev, args->handle);
+}
+
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index ec77bd3e1f08..336be31ff3de 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -957,13 +957,13 @@ static const struct drm_display_mode edid_cea_modes[] = {
798, 858, 0, 480, 489, 495, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
.vrefresh = 240, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
- /* 58 - 720(1440)x480i@240 */
+ /* 58 - 720(1440)x480i@240Hz */
{ DRM_MODE("720x480i", DRM_MODE_TYPE_DRIVER, 54000, 720, 739,
801, 858, 0, 480, 488, 494, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK),
.vrefresh = 240, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
- /* 59 - 720(1440)x480i@240 */
+ /* 59 - 720(1440)x480i@240Hz */
{ DRM_MODE("720x480i", DRM_MODE_TYPE_DRIVER, 54000, 720, 739,
801, 858, 0, 480, 488, 494, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
@@ -1260,6 +1260,34 @@ drm_do_probe_ddc_edid(void *data, u8 *buf, unsigned int block, size_t len)
return ret == xfers ? 0 : -1;
}
+static void connector_bad_edid(struct drm_connector *connector,
+ u8 *edid, int num_blocks)
+{
+ int i;
+
+ if (connector->bad_edid_counter++ && !(drm_debug & DRM_UT_KMS))
+ return;
+
+ dev_warn(connector->dev->dev,
+ "%s: EDID is invalid:\n",
+ connector->name);
+ for (i = 0; i < num_blocks; i++) {
+ u8 *block = edid + i * EDID_LENGTH;
+ char prefix[20];
+
+ if (drm_edid_is_zero(block, EDID_LENGTH))
+ sprintf(prefix, "\t[%02x] ZERO ", i);
+ else if (!drm_edid_block_valid(block, i, false, NULL))
+ sprintf(prefix, "\t[%02x] BAD ", i);
+ else
+ sprintf(prefix, "\t[%02x] GOOD ", i);
+
+ print_hex_dump(KERN_WARNING,
+ prefix, DUMP_PREFIX_NONE, 16, 1,
+ block, EDID_LENGTH, false);
+ }
+}
+
/**
* drm_do_get_edid - get EDID data using a custom EDID block read function
* @connector: connector we're probing
@@ -1282,20 +1310,19 @@ struct edid *drm_do_get_edid(struct drm_connector *connector,
void *data)
{
int i, j = 0, valid_extensions = 0;
- u8 *block, *new;
- bool print_bad_edid = !connector->bad_edid_counter || (drm_debug & DRM_UT_KMS);
+ u8 *edid, *new;
- if ((block = kmalloc(EDID_LENGTH, GFP_KERNEL)) == NULL)
+ if ((edid = kmalloc(EDID_LENGTH, GFP_KERNEL)) == NULL)
return NULL;
/* base block fetch */
for (i = 0; i < 4; i++) {
- if (get_edid_block(data, block, 0, EDID_LENGTH))
+ if (get_edid_block(data, edid, 0, EDID_LENGTH))
goto out;
- if (drm_edid_block_valid(block, 0, print_bad_edid,
+ if (drm_edid_block_valid(edid, 0, false,
&connector->edid_corrupt))
break;
- if (i == 0 && drm_edid_is_zero(block, EDID_LENGTH)) {
+ if (i == 0 && drm_edid_is_zero(edid, EDID_LENGTH)) {
connector->null_edid_counter++;
goto carp;
}
@@ -1304,58 +1331,62 @@ struct edid *drm_do_get_edid(struct drm_connector *connector,
goto carp;
/* if there's no extensions, we're done */
- if (block[0x7e] == 0)
- return (struct edid *)block;
+ valid_extensions = edid[0x7e];
+ if (valid_extensions == 0)
+ return (struct edid *)edid;
- new = krealloc(block, (block[0x7e] + 1) * EDID_LENGTH, GFP_KERNEL);
+ new = krealloc(edid, (valid_extensions + 1) * EDID_LENGTH, GFP_KERNEL);
if (!new)
goto out;
- block = new;
+ edid = new;
+
+ for (j = 1; j <= edid[0x7e]; j++) {
+ u8 *block = edid + j * EDID_LENGTH;
- for (j = 1; j <= block[0x7e]; j++) {
for (i = 0; i < 4; i++) {
- if (get_edid_block(data,
- block + (valid_extensions + 1) * EDID_LENGTH,
- j, EDID_LENGTH))
+ if (get_edid_block(data, block, j, EDID_LENGTH))
goto out;
- if (drm_edid_block_valid(block + (valid_extensions + 1)
- * EDID_LENGTH, j,
- print_bad_edid,
- NULL)) {
- valid_extensions++;
+ if (drm_edid_block_valid(block, j, false, NULL))
break;
- }
}
- if (i == 4 && print_bad_edid) {
- dev_warn(connector->dev->dev,
- "%s: Ignoring invalid EDID block %d.\n",
- connector->name, j);
-
- connector->bad_edid_counter++;
- }
+ if (i == 4)
+ valid_extensions--;
}
- if (valid_extensions != block[0x7e]) {
- block[EDID_LENGTH-1] += block[0x7e] - valid_extensions;
- block[0x7e] = valid_extensions;
- new = krealloc(block, (valid_extensions + 1) * EDID_LENGTH, GFP_KERNEL);
+ if (valid_extensions != edid[0x7e]) {
+ u8 *base;
+
+ connector_bad_edid(connector, edid, edid[0x7e] + 1);
+
+ edid[EDID_LENGTH-1] += edid[0x7e] - valid_extensions;
+ edid[0x7e] = valid_extensions;
+
+ new = kmalloc((valid_extensions + 1) * EDID_LENGTH, GFP_KERNEL);
if (!new)
goto out;
- block = new;
- }
- return (struct edid *)block;
+ base = new;
+ for (i = 0; i <= edid[0x7e]; i++) {
+ u8 *block = edid + i * EDID_LENGTH;
-carp:
- if (print_bad_edid) {
- dev_warn(connector->dev->dev, "%s: EDID block %d invalid.\n",
- connector->name, j);
+ if (!drm_edid_block_valid(block, i, false, NULL))
+ continue;
+
+ memcpy(base, block, EDID_LENGTH);
+ base += EDID_LENGTH;
+ }
+
+ kfree(edid);
+ edid = new;
}
- connector->bad_edid_counter++;
+ return (struct edid *)edid;
+
+carp:
+ connector_bad_edid(connector, edid, 1);
out:
- kfree(block);
+ kfree(edid);
return NULL;
}
EXPORT_SYMBOL_GPL(drm_do_get_edid);
@@ -2582,6 +2613,41 @@ cea_mode_alternate_clock(const struct drm_display_mode *cea_mode)
return clock;
}
+static bool
+cea_mode_alternate_timings(u8 vic, struct drm_display_mode *mode)
+{
+ /*
+ * For certain VICs the spec allows the vertical
+ * front porch to vary by one or two lines.
+ *
+ * cea_modes[] stores the variant with the shortest
+ * vertical front porch. We can adjust the mode to
+ * get the other variants by simply increasing the
+ * vertical front porch length.
+ */
+ BUILD_BUG_ON(edid_cea_modes[8].vtotal != 262 ||
+ edid_cea_modes[9].vtotal != 262 ||
+ edid_cea_modes[12].vtotal != 262 ||
+ edid_cea_modes[13].vtotal != 262 ||
+ edid_cea_modes[23].vtotal != 312 ||
+ edid_cea_modes[24].vtotal != 312 ||
+ edid_cea_modes[27].vtotal != 312 ||
+ edid_cea_modes[28].vtotal != 312);
+
+ if (((vic == 8 || vic == 9 ||
+ vic == 12 || vic == 13) && mode->vtotal < 263) ||
+ ((vic == 23 || vic == 24 ||
+ vic == 27 || vic == 28) && mode->vtotal < 314)) {
+ mode->vsync_start++;
+ mode->vsync_end++;
+ mode->vtotal++;
+
+ return true;
+ }
+
+ return false;
+}
+
static u8 drm_match_cea_mode_clock_tolerance(const struct drm_display_mode *to_match,
unsigned int clock_tolerance)
{
@@ -2591,19 +2657,21 @@ static u8 drm_match_cea_mode_clock_tolerance(const struct drm_display_mode *to_m
return 0;
for (vic = 1; vic < ARRAY_SIZE(edid_cea_modes); vic++) {
- const struct drm_display_mode *cea_mode = &edid_cea_modes[vic];
+ struct drm_display_mode cea_mode = edid_cea_modes[vic];
unsigned int clock1, clock2;
/* Check both 60Hz and 59.94Hz */
- clock1 = cea_mode->clock;
- clock2 = cea_mode_alternate_clock(cea_mode);
+ clock1 = cea_mode.clock;
+ clock2 = cea_mode_alternate_clock(&cea_mode);
if (abs(to_match->clock - clock1) > clock_tolerance &&
abs(to_match->clock - clock2) > clock_tolerance)
continue;
- if (drm_mode_equal_no_clocks(to_match, cea_mode))
- return vic;
+ do {
+ if (drm_mode_equal_no_clocks_no_stereo(to_match, &cea_mode))
+ return vic;
+ } while (cea_mode_alternate_timings(vic, &cea_mode));
}
return 0;
@@ -2624,18 +2692,23 @@ u8 drm_match_cea_mode(const struct drm_display_mode *to_match)
return 0;
for (vic = 1; vic < ARRAY_SIZE(edid_cea_modes); vic++) {
- const struct drm_display_mode *cea_mode = &edid_cea_modes[vic];
+ struct drm_display_mode cea_mode = edid_cea_modes[vic];
unsigned int clock1, clock2;
/* Check both 60Hz and 59.94Hz */
- clock1 = cea_mode->clock;
- clock2 = cea_mode_alternate_clock(cea_mode);
+ clock1 = cea_mode.clock;
+ clock2 = cea_mode_alternate_clock(&cea_mode);
- if ((KHZ2PICOS(to_match->clock) == KHZ2PICOS(clock1) ||
- KHZ2PICOS(to_match->clock) == KHZ2PICOS(clock2)) &&
- drm_mode_equal_no_clocks_no_stereo(to_match, cea_mode))
- return vic;
+ if (KHZ2PICOS(to_match->clock) != KHZ2PICOS(clock1) &&
+ KHZ2PICOS(to_match->clock) != KHZ2PICOS(clock2))
+ continue;
+
+ do {
+ if (drm_mode_equal_no_clocks_no_stereo(to_match, &cea_mode))
+ return vic;
+ } while (cea_mode_alternate_timings(vic, &cea_mode));
}
+
return 0;
}
EXPORT_SYMBOL(drm_match_cea_mode);
@@ -3580,32 +3653,6 @@ int drm_av_sync_delay(struct drm_connector *connector,
EXPORT_SYMBOL(drm_av_sync_delay);
/**
- * drm_select_eld - select one ELD from multiple HDMI/DP sinks
- * @encoder: the encoder just changed display mode
- *
- * It's possible for one encoder to be associated with multiple HDMI/DP sinks.
- * The policy is now hard coded to simply use the first HDMI/DP sink's ELD.
- *
- * Return: The connector associated with the first HDMI/DP sink that has ELD
- * attached to it.
- */
-struct drm_connector *drm_select_eld(struct drm_encoder *encoder)
-{
- struct drm_connector *connector;
- struct drm_device *dev = encoder->dev;
-
- WARN_ON(!mutex_is_locked(&dev->mode_config.mutex));
- WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
-
- drm_for_each_connector(connector, dev)
- if (connector->encoder == encoder && connector->eld[0])
- return connector;
-
- return NULL;
-}
-EXPORT_SYMBOL(drm_select_eld);
-
-/**
* drm_detect_hdmi_monitor - detect whether monitor is HDMI
* @edid: monitor EDID information
*
diff --git a/drivers/gpu/drm/drm_fb_cma_helper.c b/drivers/gpu/drm/drm_fb_cma_helper.c
index 1fd6eac1400c..81b3558302b5 100644
--- a/drivers/gpu/drm/drm_fb_cma_helper.c
+++ b/drivers/gpu/drm/drm_fb_cma_helper.c
@@ -18,13 +18,16 @@
*/
#include <drm/drmP.h>
+#include <drm/drm_atomic.h>
#include <drm/drm_crtc.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_crtc_helper.h>
#include <drm/drm_gem_cma_helper.h>
#include <drm/drm_fb_cma_helper.h>
+#include <linux/dma-buf.h>
#include <linux/dma-mapping.h>
#include <linux/module.h>
+#include <linux/reservation.h>
#define DEFAULT_FBDEFIO_DELAY_MS 50
@@ -176,20 +179,20 @@ struct drm_framebuffer *drm_fb_cma_create_with_funcs(struct drm_device *dev,
struct drm_file *file_priv, const struct drm_mode_fb_cmd2 *mode_cmd,
const struct drm_framebuffer_funcs *funcs)
{
+ const struct drm_format_info *info;
struct drm_fb_cma *fb_cma;
struct drm_gem_cma_object *objs[4];
struct drm_gem_object *obj;
- unsigned int hsub;
- unsigned int vsub;
int ret;
int i;
- hsub = drm_format_horz_chroma_subsampling(mode_cmd->pixel_format);
- vsub = drm_format_vert_chroma_subsampling(mode_cmd->pixel_format);
+ info = drm_format_info(mode_cmd->pixel_format);
+ if (!info)
+ return ERR_PTR(-EINVAL);
- for (i = 0; i < drm_format_num_planes(mode_cmd->pixel_format); i++) {
- unsigned int width = mode_cmd->width / (i ? hsub : 1);
- unsigned int height = mode_cmd->height / (i ? vsub : 1);
+ for (i = 0; i < info->num_planes; i++) {
+ unsigned int width = mode_cmd->width / (i ? info->hsub : 1);
+ unsigned int height = mode_cmd->height / (i ? info->vsub : 1);
unsigned int min_size;
obj = drm_gem_object_lookup(file_priv, mode_cmd->handles[i]);
@@ -200,7 +203,7 @@ struct drm_framebuffer *drm_fb_cma_create_with_funcs(struct drm_device *dev,
}
min_size = (height - 1) * mode_cmd->pitches[i]
- + width * drm_format_plane_cpp(mode_cmd->pixel_format, i)
+ + width * info->cpp[i]
+ mode_cmd->offsets[i];
if (obj->size < min_size) {
@@ -265,16 +268,51 @@ struct drm_gem_cma_object *drm_fb_cma_get_gem_obj(struct drm_framebuffer *fb,
}
EXPORT_SYMBOL_GPL(drm_fb_cma_get_gem_obj);
+/**
+ * drm_fb_cma_prepare_fb() - Prepare CMA framebuffer
+ * @plane: Which plane
+ * @state: Plane state attach fence to
+ *
+ * This should be put into prepare_fb hook of struct &drm_plane_helper_funcs .
+ *
+ * This function checks if the plane FB has an dma-buf attached, extracts
+ * the exclusive fence and attaches it to plane state for the atomic helper
+ * to wait on.
+ *
+ * There is no need for cleanup_fb for CMA based framebuffer drivers.
+ */
+int drm_fb_cma_prepare_fb(struct drm_plane *plane,
+ struct drm_plane_state *state)
+{
+ struct dma_buf *dma_buf;
+ struct dma_fence *fence;
+
+ if ((plane->state->fb == state->fb) || !state->fb)
+ return 0;
+
+ dma_buf = drm_fb_cma_get_gem_obj(state->fb, 0)->base.dma_buf;
+ if (dma_buf) {
+ fence = reservation_object_get_excl_rcu(dma_buf->resv);
+ drm_atomic_set_fence_for_plane(state, fence);
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(drm_fb_cma_prepare_fb);
+
#ifdef CONFIG_DEBUG_FS
static void drm_fb_cma_describe(struct drm_framebuffer *fb, struct seq_file *m)
{
struct drm_fb_cma *fb_cma = to_fb_cma(fb);
- int i, n = drm_format_num_planes(fb->pixel_format);
+ const struct drm_format_info *info;
+ int i;
seq_printf(m, "fb: %dx%d@%4.4s\n", fb->width, fb->height,
(char *)&fb->pixel_format);
- for (i = 0; i < n; i++) {
+ info = drm_format_info(fb->pixel_format);
+
+ for (i = 0; i < info->num_planes; i++) {
seq_printf(m, " %d: offset=%d pitch=%d, obj: ",
i, fb->offsets[i], fb->pitches[i]);
drm_gem_cma_describe(fb_cma->obj[i], m);
@@ -311,14 +349,10 @@ static int drm_fb_cma_mmap(struct fb_info *info, struct vm_area_struct *vma)
static struct fb_ops drm_fbdev_cma_ops = {
.owner = THIS_MODULE,
+ DRM_FB_HELPER_DEFAULT_OPS,
.fb_fillrect = drm_fb_helper_sys_fillrect,
.fb_copyarea = drm_fb_helper_sys_copyarea,
.fb_imageblit = drm_fb_helper_sys_imageblit,
- .fb_check_var = drm_fb_helper_check_var,
- .fb_set_par = drm_fb_helper_set_par,
- .fb_blank = drm_fb_helper_blank,
- .fb_pan_display = drm_fb_helper_pan_display,
- .fb_setcmap = drm_fb_helper_setcmap,
.fb_mmap = drm_fb_cma_mmap,
};
@@ -557,7 +591,8 @@ EXPORT_SYMBOL_GPL(drm_fbdev_cma_init);
void drm_fbdev_cma_fini(struct drm_fbdev_cma *fbdev_cma)
{
drm_fb_helper_unregister_fbi(&fbdev_cma->fb_helper);
- drm_fbdev_cma_defio_fini(fbdev_cma->fb_helper.fbdev);
+ if (fbdev_cma->fb_helper.fbdev)
+ drm_fbdev_cma_defio_fini(fbdev_cma->fb_helper.fbdev);
drm_fb_helper_release_fbi(&fbdev_cma->fb_helper);
if (fbdev_cma->fb) {
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
index 03414bde1f15..1f26634f53d8 100644
--- a/drivers/gpu/drm/drm_fb_helper.c
+++ b/drivers/gpu/drm/drm_fb_helper.c
@@ -131,7 +131,12 @@ int drm_fb_helper_single_add_all_connectors(struct drm_fb_helper *fb_helper)
return 0;
fail:
for (i = 0; i < fb_helper->connector_count; i++) {
- kfree(fb_helper->connector_info[i]);
+ struct drm_fb_helper_connector *fb_helper_connector =
+ fb_helper->connector_info[i];
+
+ drm_connector_unreference(fb_helper_connector->connector);
+
+ kfree(fb_helper_connector);
fb_helper->connector_info[i] = NULL;
}
fb_helper->connector_count = 0;
@@ -251,6 +256,9 @@ int drm_fb_helper_debug_enter(struct fb_info *info)
continue;
funcs = mode_set->crtc->helper_private;
+ if (funcs->mode_set_base_atomic == NULL)
+ continue;
+
drm_fb_helper_save_lut_atomic(mode_set->crtc, helper);
funcs->mode_set_base_atomic(mode_set->crtc,
mode_set->fb,
@@ -304,6 +312,9 @@ int drm_fb_helper_debug_leave(struct fb_info *info)
continue;
}
+ if (funcs->mode_set_base_atomic == NULL)
+ continue;
+
drm_fb_helper_restore_lut_atomic(mode_set->crtc);
funcs->mode_set_base_atomic(mode_set->crtc, fb, crtc->x,
crtc->y, LEAVE_ATOMIC_MODE_SET);
@@ -367,9 +378,7 @@ fail:
if (ret == -EDEADLK)
goto backoff;
- if (ret != 0)
- drm_atomic_state_free(state);
-
+ drm_atomic_state_put(state);
return ret;
backoff:
@@ -394,11 +403,10 @@ static int restore_fbdev_mode(struct drm_fb_helper *fb_helper)
if (plane->type != DRM_PLANE_TYPE_PRIMARY)
drm_plane_force_disable(plane);
- if (dev->mode_config.rotation_property) {
+ if (plane->rotation_property)
drm_mode_plane_set_obj_prop(plane,
- dev->mode_config.rotation_property,
+ plane->rotation_property,
DRM_ROTATE_0);
- }
}
for (i = 0; i < fb_helper->crtc_count; i++) {
@@ -603,6 +611,24 @@ int drm_fb_helper_blank(int blank, struct fb_info *info)
}
EXPORT_SYMBOL(drm_fb_helper_blank);
+static void drm_fb_helper_modeset_release(struct drm_fb_helper *helper,
+ struct drm_mode_set *modeset)
+{
+ int i;
+
+ for (i = 0; i < modeset->num_connectors; i++) {
+ drm_connector_unreference(modeset->connectors[i]);
+ modeset->connectors[i] = NULL;
+ }
+ modeset->num_connectors = 0;
+
+ drm_mode_destroy(helper->dev, modeset->mode);
+ modeset->mode = NULL;
+
+ /* FIXME should hold a ref? */
+ modeset->fb = NULL;
+}
+
static void drm_fb_helper_crtc_free(struct drm_fb_helper *helper)
{
int i;
@@ -612,10 +638,12 @@ static void drm_fb_helper_crtc_free(struct drm_fb_helper *helper)
kfree(helper->connector_info[i]);
}
kfree(helper->connector_info);
+
for (i = 0; i < helper->crtc_count; i++) {
- kfree(helper->crtc_info[i].mode_set.connectors);
- if (helper->crtc_info[i].mode_set.mode)
- drm_mode_destroy(helper->dev, helper->crtc_info[i].mode_set.mode);
+ struct drm_mode_set *modeset = &helper->crtc_info[i].mode_set;
+
+ drm_fb_helper_modeset_release(helper, modeset);
+ kfree(modeset->connectors);
}
kfree(helper->crtc_info);
}
@@ -644,7 +672,9 @@ static void drm_fb_helper_dirty_work(struct work_struct *work)
clip->x2 = clip->y2 = 0;
spin_unlock_irqrestore(&helper->dirty_lock, flags);
- helper->fb->funcs->dirty(helper->fb, NULL, 0, 0, &clip_copy, 1);
+ /* call dirty callback only when it has been really touched */
+ if (clip_copy.x1 < clip_copy.x2 && clip_copy.y1 < clip_copy.y2)
+ helper->fb->funcs->dirty(helper->fb, NULL, 0, 0, &clip_copy, 1);
}
/**
@@ -1211,11 +1241,14 @@ int drm_fb_helper_check_var(struct fb_var_screeninfo *var,
if (var->pixclock != 0 || in_dbg_master())
return -EINVAL;
- /* Need to resize the fb object !!! */
- if (var->bits_per_pixel > fb->bits_per_pixel ||
- var->xres > fb->width || var->yres > fb->height ||
- var->xres_virtual > fb->width || var->yres_virtual > fb->height) {
- DRM_DEBUG("fb userspace requested width/height/bpp is greater than current fb "
+ /*
+ * Changes struct fb_var_screeninfo are currently not pushed back
+ * to KMS, hence fail if different settings are requested.
+ */
+ if (var->bits_per_pixel != fb->bits_per_pixel ||
+ var->xres != fb->width || var->yres != fb->height ||
+ var->xres_virtual != fb->width || var->yres_virtual != fb->height) {
+ DRM_DEBUG("fb userspace requested width/height/bpp different than current fb "
"request %dx%d-%d (virtual %dx%d) > %dx%d-%d\n",
var->xres, var->yres, var->bits_per_pixel,
var->xres_virtual, var->yres_virtual,
@@ -1361,16 +1394,13 @@ retry:
info->var.xoffset = var->xoffset;
info->var.yoffset = var->yoffset;
-
fail:
drm_atomic_clean_old_fb(dev, plane_mask, ret);
if (ret == -EDEADLK)
goto backoff;
- if (ret != 0)
- drm_atomic_state_free(state);
-
+ drm_atomic_state_put(state);
return ret;
backoff:
@@ -1929,19 +1959,20 @@ static bool drm_target_preferred(struct drm_fb_helper *fb_helper,
bool *enabled, int width, int height)
{
struct drm_fb_helper_connector *fb_helper_conn;
- int i;
- uint64_t conn_configured = 0, mask;
+ const u64 mask = BIT_ULL(fb_helper->connector_count) - 1;
+ u64 conn_configured = 0;
int tile_pass = 0;
- mask = (1 << fb_helper->connector_count) - 1;
+ int i;
+
retry:
for (i = 0; i < fb_helper->connector_count; i++) {
fb_helper_conn = fb_helper->connector_info[i];
- if (conn_configured & (1 << i))
+ if (conn_configured & BIT_ULL(i))
continue;
if (enabled[i] == false) {
- conn_configured |= (1 << i);
+ conn_configured |= BIT_ULL(i);
continue;
}
@@ -1982,7 +2013,7 @@ retry:
}
DRM_DEBUG_KMS("found mode %s\n", modes[i] ? modes[i]->name :
"none");
- conn_configured |= (1 << i);
+ conn_configured |= BIT_ULL(i);
}
if ((conn_configured & mask) != mask) {
@@ -2088,7 +2119,6 @@ static void drm_setup_crtcs(struct drm_fb_helper *fb_helper)
struct drm_fb_helper_crtc **crtcs;
struct drm_display_mode **modes;
struct drm_fb_offset *offsets;
- struct drm_mode_set *modeset;
bool *enabled;
int width, height;
int i;
@@ -2136,45 +2166,35 @@ static void drm_setup_crtcs(struct drm_fb_helper *fb_helper)
/* need to set the modesets up here for use later */
/* fill out the connector<->crtc mappings into the modesets */
- for (i = 0; i < fb_helper->crtc_count; i++) {
- modeset = &fb_helper->crtc_info[i].mode_set;
- modeset->num_connectors = 0;
- modeset->fb = NULL;
- }
+ for (i = 0; i < fb_helper->crtc_count; i++)
+ drm_fb_helper_modeset_release(fb_helper,
+ &fb_helper->crtc_info[i].mode_set);
for (i = 0; i < fb_helper->connector_count; i++) {
struct drm_display_mode *mode = modes[i];
struct drm_fb_helper_crtc *fb_crtc = crtcs[i];
struct drm_fb_offset *offset = &offsets[i];
- modeset = &fb_crtc->mode_set;
+ struct drm_mode_set *modeset = &fb_crtc->mode_set;
if (mode && fb_crtc) {
+ struct drm_connector *connector =
+ fb_helper->connector_info[i]->connector;
+
DRM_DEBUG_KMS("desired mode %s set on crtc %d (%d,%d)\n",
mode->name, fb_crtc->mode_set.crtc->base.id, offset->x, offset->y);
+
fb_crtc->desired_mode = mode;
fb_crtc->x = offset->x;
fb_crtc->y = offset->y;
- if (modeset->mode)
- drm_mode_destroy(dev, modeset->mode);
modeset->mode = drm_mode_duplicate(dev,
fb_crtc->desired_mode);
- modeset->connectors[modeset->num_connectors++] = fb_helper->connector_info[i]->connector;
+ drm_connector_reference(connector);
+ modeset->connectors[modeset->num_connectors++] = connector;
modeset->fb = fb_helper->fb;
modeset->x = offset->x;
modeset->y = offset->y;
}
}
-
- /* Clear out any old modes if there are no more connected outputs. */
- for (i = 0; i < fb_helper->crtc_count; i++) {
- modeset = &fb_helper->crtc_info[i].mode_set;
- if (modeset->num_connectors == 0) {
- BUG_ON(modeset->fb);
- if (modeset->mode)
- drm_mode_destroy(dev, modeset->mode);
- modeset->mode = NULL;
- }
- }
out:
kfree(crtcs);
kfree(modes);
diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c
index e84faecf5225..5d96de40b63f 100644
--- a/drivers/gpu/drm/drm_fops.c
+++ b/drivers/gpu/drm/drm_fops.c
@@ -51,10 +51,11 @@ DEFINE_MUTEX(drm_global_mutex);
* Drivers must define the file operations structure that forms the DRM
* userspace API entry point, even though most of those operations are
* implemented in the DRM core. The mandatory functions are drm_open(),
- * drm_read(), drm_ioctl() and drm_compat_ioctl if CONFIG_COMPAT is enabled.
- * Drivers which implement private ioctls that require 32/64 bit compatibility
- * support must provided their onw .compat_ioctl() handler that processes
- * private ioctls and calls drm_compat_ioctl() for core ioctls.
+ * drm_read(), drm_ioctl() and drm_compat_ioctl() if CONFIG_COMPAT is enabled
+ * (note that drm_compat_ioctl will be NULL if CONFIG_COMPAT=n). Drivers which
+ * implement private ioctls that require 32/64 bit compatibility support must
+ * provide their own .compat_ioctl() handler that processes private ioctls and
+ * calls drm_compat_ioctl() for core ioctls.
*
* In addition drm_read() and drm_poll() provide support for DRM events. DRM
* events are a generic and extensible means to send asynchronous events to
@@ -75,9 +76,7 @@ DEFINE_MUTEX(drm_global_mutex);
* .open = drm_open,
* .release = drm_release,
* .unlocked_ioctl = drm_ioctl,
- * #ifdef CONFIG_COMPAT
- * .compat_ioctl = drm_compat_ioctl,
- * #endif
+ * .compat_ioctl = drm_compat_ioctl, // NULL if CONFIG_COMPAT=n
* .poll = drm_poll,
* .read = drm_read,
* .llseek = no_llseek,
@@ -663,6 +662,10 @@ void drm_event_cancel_free(struct drm_device *dev,
list_del(&p->pending_link);
}
spin_unlock_irqrestore(&dev->event_lock, flags);
+
+ if (p->fence)
+ dma_fence_put(p->fence);
+
kfree(p);
}
EXPORT_SYMBOL(drm_event_cancel_free);
@@ -692,8 +695,8 @@ void drm_send_event_locked(struct drm_device *dev, struct drm_pending_event *e)
}
if (e->fence) {
- fence_signal(e->fence);
- fence_put(e->fence);
+ dma_fence_signal(e->fence);
+ dma_fence_put(e->fence);
}
if (!e->file_priv) {
diff --git a/drivers/gpu/drm/drm_fourcc.c b/drivers/gpu/drm/drm_fourcc.c
index 29c56b4331e0..90d2cc8da8eb 100644
--- a/drivers/gpu/drm/drm_fourcc.c
+++ b/drivers/gpu/drm/drm_fourcc.c
@@ -79,17 +79,13 @@ uint32_t drm_mode_legacy_fb_format(uint32_t bpp, uint32_t depth)
EXPORT_SYMBOL(drm_mode_legacy_fb_format);
/**
- * drm_get_format_name - return a string for drm fourcc format
+ * drm_get_format_name - fill a string with a drm fourcc format's name
* @format: format to compute name of
- *
- * Note that the buffer returned by this function is owned by the caller
- * and will need to be freed using kfree().
+ * @buf: caller-supplied buffer
*/
-char *drm_get_format_name(uint32_t format)
+const char *drm_get_format_name(uint32_t format, struct drm_format_name_buf *buf)
{
- char *buf = kmalloc(32, GFP_KERNEL);
-
- snprintf(buf, 32,
+ snprintf(buf->str, sizeof(buf->str),
"%c%c%c%c %s-endian (0x%08x)",
printable_char(format & 0xff),
printable_char((format >> 8) & 0xff),
@@ -98,87 +94,109 @@ char *drm_get_format_name(uint32_t format)
format & DRM_FORMAT_BIG_ENDIAN ? "big" : "little",
format);
- return buf;
+ return buf->str;
}
EXPORT_SYMBOL(drm_get_format_name);
+/*
+ * Internal function to query information for a given format. See
+ * drm_format_info() for the public API.
+ */
+const struct drm_format_info *__drm_format_info(u32 format)
+{
+ static const struct drm_format_info formats[] = {
+ { .format = DRM_FORMAT_C8, .depth = 8, .num_planes = 1, .cpp = { 1, 0, 0 }, .hsub = 1, .vsub = 1 },
+ { .format = DRM_FORMAT_RGB332, .depth = 8, .num_planes = 1, .cpp = { 1, 0, 0 }, .hsub = 1, .vsub = 1 },
+ { .format = DRM_FORMAT_BGR233, .depth = 8, .num_planes = 1, .cpp = { 1, 0, 0 }, .hsub = 1, .vsub = 1 },
+ { .format = DRM_FORMAT_XRGB4444, .depth = 0, .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 1, .vsub = 1 },
+ { .format = DRM_FORMAT_XBGR4444, .depth = 0, .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 1, .vsub = 1 },
+ { .format = DRM_FORMAT_RGBX4444, .depth = 0, .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 1, .vsub = 1 },
+ { .format = DRM_FORMAT_BGRX4444, .depth = 0, .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 1, .vsub = 1 },
+ { .format = DRM_FORMAT_ARGB4444, .depth = 0, .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 1, .vsub = 1 },
+ { .format = DRM_FORMAT_ABGR4444, .depth = 0, .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 1, .vsub = 1 },
+ { .format = DRM_FORMAT_RGBA4444, .depth = 0, .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 1, .vsub = 1 },
+ { .format = DRM_FORMAT_BGRA4444, .depth = 0, .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 1, .vsub = 1 },
+ { .format = DRM_FORMAT_XRGB1555, .depth = 15, .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 1, .vsub = 1 },
+ { .format = DRM_FORMAT_XBGR1555, .depth = 15, .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 1, .vsub = 1 },
+ { .format = DRM_FORMAT_RGBX5551, .depth = 15, .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 1, .vsub = 1 },
+ { .format = DRM_FORMAT_BGRX5551, .depth = 15, .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 1, .vsub = 1 },
+ { .format = DRM_FORMAT_ARGB1555, .depth = 15, .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 1, .vsub = 1 },
+ { .format = DRM_FORMAT_ABGR1555, .depth = 15, .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 1, .vsub = 1 },
+ { .format = DRM_FORMAT_RGBA5551, .depth = 15, .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 1, .vsub = 1 },
+ { .format = DRM_FORMAT_BGRA5551, .depth = 15, .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 1, .vsub = 1 },
+ { .format = DRM_FORMAT_RGB565, .depth = 16, .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 1, .vsub = 1 },
+ { .format = DRM_FORMAT_BGR565, .depth = 16, .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 1, .vsub = 1 },
+ { .format = DRM_FORMAT_RGB888, .depth = 24, .num_planes = 1, .cpp = { 3, 0, 0 }, .hsub = 1, .vsub = 1 },
+ { .format = DRM_FORMAT_BGR888, .depth = 24, .num_planes = 1, .cpp = { 3, 0, 0 }, .hsub = 1, .vsub = 1 },
+ { .format = DRM_FORMAT_XRGB8888, .depth = 24, .num_planes = 1, .cpp = { 4, 0, 0 }, .hsub = 1, .vsub = 1 },
+ { .format = DRM_FORMAT_XBGR8888, .depth = 24, .num_planes = 1, .cpp = { 4, 0, 0 }, .hsub = 1, .vsub = 1 },
+ { .format = DRM_FORMAT_RGBX8888, .depth = 24, .num_planes = 1, .cpp = { 4, 0, 0 }, .hsub = 1, .vsub = 1 },
+ { .format = DRM_FORMAT_BGRX8888, .depth = 24, .num_planes = 1, .cpp = { 4, 0, 0 }, .hsub = 1, .vsub = 1 },
+ { .format = DRM_FORMAT_XRGB2101010, .depth = 30, .num_planes = 1, .cpp = { 4, 0, 0 }, .hsub = 1, .vsub = 1 },
+ { .format = DRM_FORMAT_XBGR2101010, .depth = 30, .num_planes = 1, .cpp = { 4, 0, 0 }, .hsub = 1, .vsub = 1 },
+ { .format = DRM_FORMAT_RGBX1010102, .depth = 30, .num_planes = 1, .cpp = { 4, 0, 0 }, .hsub = 1, .vsub = 1 },
+ { .format = DRM_FORMAT_BGRX1010102, .depth = 30, .num_planes = 1, .cpp = { 4, 0, 0 }, .hsub = 1, .vsub = 1 },
+ { .format = DRM_FORMAT_ARGB2101010, .depth = 30, .num_planes = 1, .cpp = { 4, 0, 0 }, .hsub = 1, .vsub = 1 },
+ { .format = DRM_FORMAT_ABGR2101010, .depth = 30, .num_planes = 1, .cpp = { 4, 0, 0 }, .hsub = 1, .vsub = 1 },
+ { .format = DRM_FORMAT_RGBA1010102, .depth = 30, .num_planes = 1, .cpp = { 4, 0, 0 }, .hsub = 1, .vsub = 1 },
+ { .format = DRM_FORMAT_BGRA1010102, .depth = 30, .num_planes = 1, .cpp = { 4, 0, 0 }, .hsub = 1, .vsub = 1 },
+ { .format = DRM_FORMAT_ARGB8888, .depth = 32, .num_planes = 1, .cpp = { 4, 0, 0 }, .hsub = 1, .vsub = 1 },
+ { .format = DRM_FORMAT_ABGR8888, .depth = 32, .num_planes = 1, .cpp = { 4, 0, 0 }, .hsub = 1, .vsub = 1 },
+ { .format = DRM_FORMAT_RGBA8888, .depth = 32, .num_planes = 1, .cpp = { 4, 0, 0 }, .hsub = 1, .vsub = 1 },
+ { .format = DRM_FORMAT_BGRA8888, .depth = 32, .num_planes = 1, .cpp = { 4, 0, 0 }, .hsub = 1, .vsub = 1 },
+ { .format = DRM_FORMAT_YUV410, .depth = 0, .num_planes = 3, .cpp = { 1, 1, 1 }, .hsub = 4, .vsub = 4 },
+ { .format = DRM_FORMAT_YVU410, .depth = 0, .num_planes = 3, .cpp = { 1, 1, 1 }, .hsub = 4, .vsub = 4 },
+ { .format = DRM_FORMAT_YUV411, .depth = 0, .num_planes = 3, .cpp = { 1, 1, 1 }, .hsub = 4, .vsub = 1 },
+ { .format = DRM_FORMAT_YVU411, .depth = 0, .num_planes = 3, .cpp = { 1, 1, 1 }, .hsub = 4, .vsub = 1 },
+ { .format = DRM_FORMAT_YUV420, .depth = 0, .num_planes = 3, .cpp = { 1, 1, 1 }, .hsub = 2, .vsub = 2 },
+ { .format = DRM_FORMAT_YVU420, .depth = 0, .num_planes = 3, .cpp = { 1, 1, 1 }, .hsub = 2, .vsub = 2 },
+ { .format = DRM_FORMAT_YUV422, .depth = 0, .num_planes = 3, .cpp = { 1, 1, 1 }, .hsub = 2, .vsub = 1 },
+ { .format = DRM_FORMAT_YVU422, .depth = 0, .num_planes = 3, .cpp = { 1, 1, 1 }, .hsub = 2, .vsub = 1 },
+ { .format = DRM_FORMAT_YUV444, .depth = 0, .num_planes = 3, .cpp = { 1, 1, 1 }, .hsub = 1, .vsub = 1 },
+ { .format = DRM_FORMAT_YVU444, .depth = 0, .num_planes = 3, .cpp = { 1, 1, 1 }, .hsub = 1, .vsub = 1 },
+ { .format = DRM_FORMAT_NV12, .depth = 0, .num_planes = 2, .cpp = { 1, 2, 0 }, .hsub = 2, .vsub = 2 },
+ { .format = DRM_FORMAT_NV21, .depth = 0, .num_planes = 2, .cpp = { 1, 2, 0 }, .hsub = 2, .vsub = 2 },
+ { .format = DRM_FORMAT_NV16, .depth = 0, .num_planes = 2, .cpp = { 1, 2, 0 }, .hsub = 2, .vsub = 1 },
+ { .format = DRM_FORMAT_NV61, .depth = 0, .num_planes = 2, .cpp = { 1, 2, 0 }, .hsub = 2, .vsub = 1 },
+ { .format = DRM_FORMAT_NV24, .depth = 0, .num_planes = 2, .cpp = { 1, 2, 0 }, .hsub = 1, .vsub = 1 },
+ { .format = DRM_FORMAT_NV42, .depth = 0, .num_planes = 2, .cpp = { 1, 2, 0 }, .hsub = 1, .vsub = 1 },
+ { .format = DRM_FORMAT_YUYV, .depth = 0, .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 2, .vsub = 1 },
+ { .format = DRM_FORMAT_YVYU, .depth = 0, .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 2, .vsub = 1 },
+ { .format = DRM_FORMAT_UYVY, .depth = 0, .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 2, .vsub = 1 },
+ { .format = DRM_FORMAT_VYUY, .depth = 0, .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 2, .vsub = 1 },
+ { .format = DRM_FORMAT_AYUV, .depth = 0, .num_planes = 1, .cpp = { 4, 0, 0 }, .hsub = 1, .vsub = 1 },
+ };
+
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(formats); ++i) {
+ if (formats[i].format == format)
+ return &formats[i];
+ }
+
+ return NULL;
+}
+
/**
- * drm_fb_get_bpp_depth - get the bpp/depth values for format
+ * drm_format_info - query information for a given format
* @format: pixel format (DRM_FORMAT_*)
- * @depth: storage for the depth value
- * @bpp: storage for the bpp value
*
- * This only supports RGB formats here for compat with code that doesn't use
- * pixel formats directly yet.
+ * The caller should only pass a supported pixel format to this function.
+ * Unsupported pixel formats will generate a warning in the kernel log.
+ *
+ * Returns:
+ * The instance of struct drm_format_info that describes the pixel format, or
+ * NULL if the format is unsupported.
*/
-void drm_fb_get_bpp_depth(uint32_t format, unsigned int *depth,
- int *bpp)
+const struct drm_format_info *drm_format_info(u32 format)
{
- char *format_name;
+ const struct drm_format_info *info;
- switch (format) {
- case DRM_FORMAT_C8:
- case DRM_FORMAT_RGB332:
- case DRM_FORMAT_BGR233:
- *depth = 8;
- *bpp = 8;
- break;
- case DRM_FORMAT_XRGB1555:
- case DRM_FORMAT_XBGR1555:
- case DRM_FORMAT_RGBX5551:
- case DRM_FORMAT_BGRX5551:
- case DRM_FORMAT_ARGB1555:
- case DRM_FORMAT_ABGR1555:
- case DRM_FORMAT_RGBA5551:
- case DRM_FORMAT_BGRA5551:
- *depth = 15;
- *bpp = 16;
- break;
- case DRM_FORMAT_RGB565:
- case DRM_FORMAT_BGR565:
- *depth = 16;
- *bpp = 16;
- break;
- case DRM_FORMAT_RGB888:
- case DRM_FORMAT_BGR888:
- *depth = 24;
- *bpp = 24;
- break;
- case DRM_FORMAT_XRGB8888:
- case DRM_FORMAT_XBGR8888:
- case DRM_FORMAT_RGBX8888:
- case DRM_FORMAT_BGRX8888:
- *depth = 24;
- *bpp = 32;
- break;
- case DRM_FORMAT_XRGB2101010:
- case DRM_FORMAT_XBGR2101010:
- case DRM_FORMAT_RGBX1010102:
- case DRM_FORMAT_BGRX1010102:
- case DRM_FORMAT_ARGB2101010:
- case DRM_FORMAT_ABGR2101010:
- case DRM_FORMAT_RGBA1010102:
- case DRM_FORMAT_BGRA1010102:
- *depth = 30;
- *bpp = 32;
- break;
- case DRM_FORMAT_ARGB8888:
- case DRM_FORMAT_ABGR8888:
- case DRM_FORMAT_RGBA8888:
- case DRM_FORMAT_BGRA8888:
- *depth = 32;
- *bpp = 32;
- break;
- default:
- format_name = drm_get_format_name(format);
- DRM_DEBUG_KMS("unsupported pixel format %s\n", format_name);
- kfree(format_name);
- *depth = 0;
- *bpp = 0;
- break;
- }
+ info = __drm_format_info(format);
+ WARN_ON(!info);
+ return info;
}
-EXPORT_SYMBOL(drm_fb_get_bpp_depth);
+EXPORT_SYMBOL(drm_format_info);
/**
* drm_format_num_planes - get the number of planes for format
@@ -189,28 +207,10 @@ EXPORT_SYMBOL(drm_fb_get_bpp_depth);
*/
int drm_format_num_planes(uint32_t format)
{
- switch (format) {
- case DRM_FORMAT_YUV410:
- case DRM_FORMAT_YVU410:
- case DRM_FORMAT_YUV411:
- case DRM_FORMAT_YVU411:
- case DRM_FORMAT_YUV420:
- case DRM_FORMAT_YVU420:
- case DRM_FORMAT_YUV422:
- case DRM_FORMAT_YVU422:
- case DRM_FORMAT_YUV444:
- case DRM_FORMAT_YVU444:
- return 3;
- case DRM_FORMAT_NV12:
- case DRM_FORMAT_NV21:
- case DRM_FORMAT_NV16:
- case DRM_FORMAT_NV61:
- case DRM_FORMAT_NV24:
- case DRM_FORMAT_NV42:
- return 2;
- default:
- return 1;
- }
+ const struct drm_format_info *info;
+
+ info = drm_format_info(format);
+ return info ? info->num_planes : 1;
}
EXPORT_SYMBOL(drm_format_num_planes);
@@ -224,40 +224,13 @@ EXPORT_SYMBOL(drm_format_num_planes);
*/
int drm_format_plane_cpp(uint32_t format, int plane)
{
- unsigned int depth;
- int bpp;
+ const struct drm_format_info *info;
- if (plane >= drm_format_num_planes(format))
+ info = drm_format_info(format);
+ if (!info || plane >= info->num_planes)
return 0;
- switch (format) {
- case DRM_FORMAT_YUYV:
- case DRM_FORMAT_YVYU:
- case DRM_FORMAT_UYVY:
- case DRM_FORMAT_VYUY:
- return 2;
- case DRM_FORMAT_NV12:
- case DRM_FORMAT_NV21:
- case DRM_FORMAT_NV16:
- case DRM_FORMAT_NV61:
- case DRM_FORMAT_NV24:
- case DRM_FORMAT_NV42:
- return plane ? 2 : 1;
- case DRM_FORMAT_YUV410:
- case DRM_FORMAT_YVU410:
- case DRM_FORMAT_YUV411:
- case DRM_FORMAT_YVU411:
- case DRM_FORMAT_YUV420:
- case DRM_FORMAT_YVU420:
- case DRM_FORMAT_YUV422:
- case DRM_FORMAT_YVU422:
- case DRM_FORMAT_YUV444:
- case DRM_FORMAT_YVU444:
- return 1;
- default:
- drm_fb_get_bpp_depth(format, &depth, &bpp);
- return bpp >> 3;
- }
+ return info->cpp[plane];
}
EXPORT_SYMBOL(drm_format_plane_cpp);
@@ -271,28 +244,10 @@ EXPORT_SYMBOL(drm_format_plane_cpp);
*/
int drm_format_horz_chroma_subsampling(uint32_t format)
{
- switch (format) {
- case DRM_FORMAT_YUV411:
- case DRM_FORMAT_YVU411:
- case DRM_FORMAT_YUV410:
- case DRM_FORMAT_YVU410:
- return 4;
- case DRM_FORMAT_YUYV:
- case DRM_FORMAT_YVYU:
- case DRM_FORMAT_UYVY:
- case DRM_FORMAT_VYUY:
- case DRM_FORMAT_NV12:
- case DRM_FORMAT_NV21:
- case DRM_FORMAT_NV16:
- case DRM_FORMAT_NV61:
- case DRM_FORMAT_YUV422:
- case DRM_FORMAT_YVU422:
- case DRM_FORMAT_YUV420:
- case DRM_FORMAT_YVU420:
- return 2;
- default:
- return 1;
- }
+ const struct drm_format_info *info;
+
+ info = drm_format_info(format);
+ return info ? info->hsub : 1;
}
EXPORT_SYMBOL(drm_format_horz_chroma_subsampling);
@@ -306,18 +261,10 @@ EXPORT_SYMBOL(drm_format_horz_chroma_subsampling);
*/
int drm_format_vert_chroma_subsampling(uint32_t format)
{
- switch (format) {
- case DRM_FORMAT_YUV410:
- case DRM_FORMAT_YVU410:
- return 4;
- case DRM_FORMAT_YUV420:
- case DRM_FORMAT_YVU420:
- case DRM_FORMAT_NV12:
- case DRM_FORMAT_NV21:
- return 2;
- default:
- return 1;
- }
+ const struct drm_format_info *info;
+
+ info = drm_format_info(format);
+ return info ? info->vsub : 1;
}
EXPORT_SYMBOL(drm_format_vert_chroma_subsampling);
@@ -332,13 +279,16 @@ EXPORT_SYMBOL(drm_format_vert_chroma_subsampling);
*/
int drm_format_plane_width(int width, uint32_t format, int plane)
{
- if (plane >= drm_format_num_planes(format))
+ const struct drm_format_info *info;
+
+ info = drm_format_info(format);
+ if (!info || plane >= info->num_planes)
return 0;
if (plane == 0)
return width;
- return width / drm_format_horz_chroma_subsampling(format);
+ return width / info->hsub;
}
EXPORT_SYMBOL(drm_format_plane_width);
@@ -353,12 +303,15 @@ EXPORT_SYMBOL(drm_format_plane_width);
*/
int drm_format_plane_height(int height, uint32_t format, int plane)
{
- if (plane >= drm_format_num_planes(format))
+ const struct drm_format_info *info;
+
+ info = drm_format_info(format);
+ if (!info || plane >= info->num_planes)
return 0;
if (plane == 0)
return height;
- return height / drm_format_vert_chroma_subsampling(format);
+ return height / info->vsub;
}
EXPORT_SYMBOL(drm_format_plane_height);
diff --git a/drivers/gpu/drm/drm_framebuffer.c b/drivers/gpu/drm/drm_framebuffer.c
index 398efd67cb93..cbf0c893f426 100644
--- a/drivers/gpu/drm/drm_framebuffer.c
+++ b/drivers/gpu/drm/drm_framebuffer.c
@@ -126,111 +126,34 @@ int drm_mode_addfb(struct drm_device *dev,
return 0;
}
-static int format_check(const struct drm_mode_fb_cmd2 *r)
-{
- uint32_t format = r->pixel_format & ~DRM_FORMAT_BIG_ENDIAN;
- char *format_name;
-
- switch (format) {
- case DRM_FORMAT_C8:
- case DRM_FORMAT_RGB332:
- case DRM_FORMAT_BGR233:
- case DRM_FORMAT_XRGB4444:
- case DRM_FORMAT_XBGR4444:
- case DRM_FORMAT_RGBX4444:
- case DRM_FORMAT_BGRX4444:
- case DRM_FORMAT_ARGB4444:
- case DRM_FORMAT_ABGR4444:
- case DRM_FORMAT_RGBA4444:
- case DRM_FORMAT_BGRA4444:
- case DRM_FORMAT_XRGB1555:
- case DRM_FORMAT_XBGR1555:
- case DRM_FORMAT_RGBX5551:
- case DRM_FORMAT_BGRX5551:
- case DRM_FORMAT_ARGB1555:
- case DRM_FORMAT_ABGR1555:
- case DRM_FORMAT_RGBA5551:
- case DRM_FORMAT_BGRA5551:
- case DRM_FORMAT_RGB565:
- case DRM_FORMAT_BGR565:
- case DRM_FORMAT_RGB888:
- case DRM_FORMAT_BGR888:
- case DRM_FORMAT_XRGB8888:
- case DRM_FORMAT_XBGR8888:
- case DRM_FORMAT_RGBX8888:
- case DRM_FORMAT_BGRX8888:
- case DRM_FORMAT_ARGB8888:
- case DRM_FORMAT_ABGR8888:
- case DRM_FORMAT_RGBA8888:
- case DRM_FORMAT_BGRA8888:
- case DRM_FORMAT_XRGB2101010:
- case DRM_FORMAT_XBGR2101010:
- case DRM_FORMAT_RGBX1010102:
- case DRM_FORMAT_BGRX1010102:
- case DRM_FORMAT_ARGB2101010:
- case DRM_FORMAT_ABGR2101010:
- case DRM_FORMAT_RGBA1010102:
- case DRM_FORMAT_BGRA1010102:
- case DRM_FORMAT_YUYV:
- case DRM_FORMAT_YVYU:
- case DRM_FORMAT_UYVY:
- case DRM_FORMAT_VYUY:
- case DRM_FORMAT_AYUV:
- case DRM_FORMAT_NV12:
- case DRM_FORMAT_NV21:
- case DRM_FORMAT_NV16:
- case DRM_FORMAT_NV61:
- case DRM_FORMAT_NV24:
- case DRM_FORMAT_NV42:
- case DRM_FORMAT_YUV410:
- case DRM_FORMAT_YVU410:
- case DRM_FORMAT_YUV411:
- case DRM_FORMAT_YVU411:
- case DRM_FORMAT_YUV420:
- case DRM_FORMAT_YVU420:
- case DRM_FORMAT_YUV422:
- case DRM_FORMAT_YVU422:
- case DRM_FORMAT_YUV444:
- case DRM_FORMAT_YVU444:
- return 0;
- default:
- format_name = drm_get_format_name(r->pixel_format);
- DRM_DEBUG_KMS("invalid pixel format %s\n", format_name);
- kfree(format_name);
- return -EINVAL;
- }
-}
-
static int framebuffer_check(const struct drm_mode_fb_cmd2 *r)
{
- int ret, hsub, vsub, num_planes, i;
-
- ret = format_check(r);
- if (ret) {
- char *format_name = drm_get_format_name(r->pixel_format);
- DRM_DEBUG_KMS("bad framebuffer format %s\n", format_name);
- kfree(format_name);
- return ret;
+ const struct drm_format_info *info;
+ int i;
+
+ info = __drm_format_info(r->pixel_format & ~DRM_FORMAT_BIG_ENDIAN);
+ if (!info) {
+ struct drm_format_name_buf format_name;
+ DRM_DEBUG_KMS("bad framebuffer format %s\n",
+ drm_get_format_name(r->pixel_format,
+ &format_name));
+ return -EINVAL;
}
- hsub = drm_format_horz_chroma_subsampling(r->pixel_format);
- vsub = drm_format_vert_chroma_subsampling(r->pixel_format);
- num_planes = drm_format_num_planes(r->pixel_format);
-
- if (r->width == 0 || r->width % hsub) {
+ if (r->width == 0 || r->width % info->hsub) {
DRM_DEBUG_KMS("bad framebuffer width %u\n", r->width);
return -EINVAL;
}
- if (r->height == 0 || r->height % vsub) {
+ if (r->height == 0 || r->height % info->vsub) {
DRM_DEBUG_KMS("bad framebuffer height %u\n", r->height);
return -EINVAL;
}
- for (i = 0; i < num_planes; i++) {
- unsigned int width = r->width / (i != 0 ? hsub : 1);
- unsigned int height = r->height / (i != 0 ? vsub : 1);
- unsigned int cpp = drm_format_plane_cpp(r->pixel_format, i);
+ for (i = 0; i < info->num_planes; i++) {
+ unsigned int width = r->width / (i != 0 ? info->hsub : 1);
+ unsigned int height = r->height / (i != 0 ? info->vsub : 1);
+ unsigned int cpp = info->cpp[i];
if (!r->handles[i]) {
DRM_DEBUG_KMS("no buffer object handle for plane %d\n", i);
@@ -254,6 +177,13 @@ static int framebuffer_check(const struct drm_mode_fb_cmd2 *r)
return -EINVAL;
}
+ if (r->flags & DRM_MODE_FB_MODIFIERS &&
+ r->modifier[i] != r->modifier[0]) {
+ DRM_DEBUG_KMS("bad fb modifier %llu for plane %d\n",
+ r->modifier[i], i);
+ return -EINVAL;
+ }
+
/* modifier specific checks: */
switch (r->modifier[i]) {
case DRM_FORMAT_MOD_SAMSUNG_64_32_TILE:
@@ -273,7 +203,7 @@ static int framebuffer_check(const struct drm_mode_fb_cmd2 *r)
}
}
- for (i = num_planes; i < 4; i++) {
+ for (i = info->num_planes; i < 4; i++) {
if (r->modifier[i]) {
DRM_DEBUG_KMS("non-zero modifier for unused plane %d\n", i);
return -EINVAL;
@@ -751,6 +681,11 @@ EXPORT_SYMBOL(drm_framebuffer_lookup);
* those used for fbdev. Note that the caller must hold a reference of it's own,
* i.e. the object may not be destroyed through this call (since it'll lead to a
* locking inversion).
+ *
+ * NOTE: This function is deprecated. For driver-private framebuffers it is not
+ * recommended to embed a framebuffer struct info fbdev struct, instead, a
+ * framebuffer pointer is preferred and drm_framebuffer_unreference() should be
+ * called when the framebuffer is to be cleaned up.
*/
void drm_framebuffer_unregister_private(struct drm_framebuffer *fb)
{
diff --git a/drivers/gpu/drm/drm_internal.h b/drivers/gpu/drm/drm_internal.h
index e66af289a016..db80ec860e33 100644
--- a/drivers/gpu/drm/drm_internal.h
+++ b/drivers/gpu/drm/drm_internal.h
@@ -24,9 +24,6 @@
#define DRM_IF_MAJOR 1
#define DRM_IF_MINOR 4
-/* drm_irq.c */
-extern unsigned int drm_timestamp_monotonic;
-
/* drm_fops.c */
extern struct mutex drm_global_mutex;
void drm_lastclose(struct drm_device *dev);
@@ -46,12 +43,21 @@ void drm_prime_destroy_file_private(struct drm_prime_file_private *prime_fpriv);
void drm_prime_remove_buf_handle_locked(struct drm_prime_file_private *prime_fpriv,
struct dma_buf *dma_buf);
+/* drm_drv.c */
+struct drm_minor *drm_minor_acquire(unsigned int minor_id);
+void drm_minor_release(struct drm_minor *minor);
+
/* drm_info.c */
int drm_name_info(struct seq_file *m, void *data);
int drm_clients_info(struct seq_file *m, void* data);
int drm_gem_name_info(struct seq_file *m, void *data);
/* drm_irq.c */
+extern unsigned int drm_timestamp_monotonic;
+
+/* IOCTLS */
+int drm_wait_vblank(struct drm_device *dev, void *data,
+ struct drm_file *filp);
int drm_control(struct drm_device *dev, void *data,
struct drm_file *file_priv);
int drm_modeset_ctl(struct drm_device *dev, void *data,
@@ -100,6 +106,9 @@ int drm_debugfs_init(struct drm_minor *minor, int minor_id,
int drm_debugfs_cleanup(struct drm_minor *minor);
int drm_debugfs_connector_add(struct drm_connector *connector);
void drm_debugfs_connector_remove(struct drm_connector *connector);
+int drm_debugfs_crtc_add(struct drm_crtc *crtc);
+void drm_debugfs_crtc_remove(struct drm_crtc *crtc);
+int drm_debugfs_crtc_crc_add(struct drm_crtc *crtc);
#else
static inline int drm_debugfs_init(struct drm_minor *minor, int minor_id,
struct dentry *root)
@@ -119,4 +128,17 @@ static inline int drm_debugfs_connector_add(struct drm_connector *connector)
static inline void drm_debugfs_connector_remove(struct drm_connector *connector)
{
}
+
+static inline int drm_debugfs_crtc_add(struct drm_crtc *crtc)
+{
+ return 0;
+}
+static inline void drm_debugfs_crtc_remove(struct drm_crtc *crtc)
+{
+}
+
+static inline int drm_debugfs_crtc_crc_add(struct drm_crtc *crtc)
+{
+ return 0;
+}
#endif
diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c
index 0ad2c47f808f..71c3473476c7 100644
--- a/drivers/gpu/drm/drm_ioctl.c
+++ b/drivers/gpu/drm/drm_ioctl.c
@@ -254,10 +254,12 @@ static int drm_getcap(struct drm_device *dev, void *data, struct drm_file *file_
req->value = dev->mode_config.async_page_flip;
break;
case DRM_CAP_PAGE_FLIP_TARGET:
- req->value = 1;
- drm_for_each_crtc(crtc, dev) {
- if (!crtc->funcs->page_flip_target)
- req->value = 0;
+ if (drm_core_check_feature(dev, DRIVER_MODESET)) {
+ req->value = 1;
+ drm_for_each_crtc(crtc, dev) {
+ if (!crtc->funcs->page_flip_target)
+ req->value = 0;
+ }
}
break;
case DRM_CAP_CURSOR_WIDTH:
diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c
index b969a64a1514..273625a85036 100644
--- a/drivers/gpu/drm/drm_irq.c
+++ b/drivers/gpu/drm/drm_irq.c
@@ -93,7 +93,7 @@ static void store_vblank(struct drm_device *dev, unsigned int pipe,
* Reset the stored timestamp for the current vblank count to correspond
* to the last vblank occurred.
*
- * Only to be called from drm_vblank_on().
+ * Only to be called from drm_crtc_vblank_on().
*
* Note: caller must hold dev->vbl_lock since this reads & writes
* device vblank fields.
@@ -234,6 +234,16 @@ static void drm_update_vblank_count(struct drm_device *dev, unsigned int pipe,
store_vblank(dev, pipe, diff, &t_vblank, cur_vblank);
}
+static u32 drm_vblank_count(struct drm_device *dev, unsigned int pipe)
+{
+ struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
+
+ if (WARN_ON(pipe >= dev->num_crtcs))
+ return 0;
+
+ return vblank->count;
+}
+
/**
* drm_accurate_vblank_count - retrieve the master vblank counter
* @crtc: which counter to retrieve
@@ -296,7 +306,7 @@ static void vblank_disable_and_save(struct drm_device *dev, unsigned int pipe)
* Always update the count and timestamp to maintain the
* appearance that the counter has been ticking all along until
* this time. This makes the count account for the entire time
- * between drm_vblank_on() and drm_vblank_off().
+ * between drm_crtc_vblank_on() and drm_crtc_vblank_off().
*/
drm_update_vblank_count(dev, pipe, 0);
@@ -888,31 +898,6 @@ drm_get_last_vbltimestamp(struct drm_device *dev, unsigned int pipe,
}
/**
- * drm_vblank_count - retrieve "cooked" vblank counter value
- * @dev: DRM device
- * @pipe: index of CRTC for which to retrieve the counter
- *
- * Fetches the "cooked" vblank count value that represents the number of
- * vblank events since the system was booted, including lost events due to
- * modesetting activity.
- *
- * This is the legacy version of drm_crtc_vblank_count().
- *
- * Returns:
- * The software vblank counter.
- */
-u32 drm_vblank_count(struct drm_device *dev, unsigned int pipe)
-{
- struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
-
- if (WARN_ON(pipe >= dev->num_crtcs))
- return 0;
-
- return vblank->count;
-}
-EXPORT_SYMBOL(drm_vblank_count);
-
-/**
* drm_crtc_vblank_count - retrieve "cooked" vblank counter value
* @crtc: which counter to retrieve
*
@@ -920,8 +905,6 @@ EXPORT_SYMBOL(drm_vblank_count);
* vblank events since the system was booted, including lost events due to
* modesetting activity.
*
- * This is the native KMS version of drm_vblank_count().
- *
* Returns:
* The software vblank counter.
*/
@@ -952,8 +935,10 @@ static u32 drm_vblank_count_and_time(struct drm_device *dev, unsigned int pipe,
u32 vblank_count;
unsigned int seq;
- if (WARN_ON(pipe >= dev->num_crtcs))
+ if (WARN_ON(pipe >= dev->num_crtcs)) {
+ *vblanktime = (struct timeval) { 0 };
return 0;
+ }
do {
seq = read_seqbegin(&vblank->seqlock);
@@ -1270,21 +1255,20 @@ void drm_crtc_wait_one_vblank(struct drm_crtc *crtc)
EXPORT_SYMBOL(drm_crtc_wait_one_vblank);
/**
- * drm_vblank_off - disable vblank events on a CRTC
- * @dev: DRM device
- * @pipe: CRTC index
+ * drm_crtc_vblank_off - disable vblank events on a CRTC
+ * @crtc: CRTC in question
*
* Drivers can use this function to shut down the vblank interrupt handling when
* disabling a crtc. This function ensures that the latest vblank frame count is
- * stored so that drm_vblank_on() can restore it again.
+ * stored so that drm_vblank_on can restore it again.
*
* Drivers must use this function when the hardware vblank counter can get
* reset, e.g. when suspending.
- *
- * This is the legacy version of drm_crtc_vblank_off().
*/
-void drm_vblank_off(struct drm_device *dev, unsigned int pipe)
+void drm_crtc_vblank_off(struct drm_crtc *crtc)
{
+ struct drm_device *dev = crtc->dev;
+ unsigned int pipe = drm_crtc_index(crtc);
struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
struct drm_pending_vblank_event *e, *t;
struct timeval now;
@@ -1300,7 +1284,8 @@ void drm_vblank_off(struct drm_device *dev, unsigned int pipe)
DRM_DEBUG_VBL("crtc %d, vblank enabled %d, inmodeset %d\n",
pipe, vblank->enabled, vblank->inmodeset);
- /* Avoid redundant vblank disables without previous drm_vblank_on(). */
+ /* Avoid redundant vblank disables without previous
+ * drm_crtc_vblank_on(). */
if (drm_core_check_feature(dev, DRIVER_ATOMIC) || !vblank->inmodeset)
vblank_disable_and_save(dev, pipe);
@@ -1331,25 +1316,6 @@ void drm_vblank_off(struct drm_device *dev, unsigned int pipe)
}
spin_unlock_irqrestore(&dev->event_lock, irqflags);
}
-EXPORT_SYMBOL(drm_vblank_off);
-
-/**
- * drm_crtc_vblank_off - disable vblank events on a CRTC
- * @crtc: CRTC in question
- *
- * Drivers can use this function to shut down the vblank interrupt handling when
- * disabling a crtc. This function ensures that the latest vblank frame count is
- * stored so that drm_vblank_on can restore it again.
- *
- * Drivers must use this function when the hardware vblank counter can get
- * reset, e.g. when suspending.
- *
- * This is the native kms version of drm_vblank_off().
- */
-void drm_crtc_vblank_off(struct drm_crtc *crtc)
-{
- drm_vblank_off(crtc->dev, drm_crtc_index(crtc));
-}
EXPORT_SYMBOL(drm_crtc_vblank_off);
/**
@@ -1385,19 +1351,18 @@ void drm_crtc_vblank_reset(struct drm_crtc *crtc)
EXPORT_SYMBOL(drm_crtc_vblank_reset);
/**
- * drm_vblank_on - enable vblank events on a CRTC
- * @dev: DRM device
- * @pipe: CRTC index
+ * drm_crtc_vblank_on - enable vblank events on a CRTC
+ * @crtc: CRTC in question
*
* This functions restores the vblank interrupt state captured with
- * drm_vblank_off() again. Note that calls to drm_vblank_on() and
- * drm_vblank_off() can be unbalanced and so can also be unconditionally called
+ * drm_crtc_vblank_off() again. Note that calls to drm_crtc_vblank_on() and
+ * drm_crtc_vblank_off() can be unbalanced and so can also be unconditionally called
* in driver load code to reflect the current hardware state of the crtc.
- *
- * This is the legacy version of drm_crtc_vblank_on().
*/
-void drm_vblank_on(struct drm_device *dev, unsigned int pipe)
+void drm_crtc_vblank_on(struct drm_crtc *crtc)
{
+ struct drm_device *dev = crtc->dev;
+ unsigned int pipe = drm_crtc_index(crtc);
struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
unsigned long irqflags;
@@ -1424,49 +1389,10 @@ void drm_vblank_on(struct drm_device *dev, unsigned int pipe)
WARN_ON(drm_vblank_enable(dev, pipe));
spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
}
-EXPORT_SYMBOL(drm_vblank_on);
-
-/**
- * drm_crtc_vblank_on - enable vblank events on a CRTC
- * @crtc: CRTC in question
- *
- * This functions restores the vblank interrupt state captured with
- * drm_vblank_off() again. Note that calls to drm_vblank_on() and
- * drm_vblank_off() can be unbalanced and so can also be unconditionally called
- * in driver load code to reflect the current hardware state of the crtc.
- *
- * This is the native kms version of drm_vblank_on().
- */
-void drm_crtc_vblank_on(struct drm_crtc *crtc)
-{
- drm_vblank_on(crtc->dev, drm_crtc_index(crtc));
-}
EXPORT_SYMBOL(drm_crtc_vblank_on);
-/**
- * drm_vblank_pre_modeset - account for vblanks across mode sets
- * @dev: DRM device
- * @pipe: CRTC index
- *
- * Account for vblank events across mode setting events, which will likely
- * reset the hardware frame counter.
- *
- * This is done by grabbing a temporary vblank reference to ensure that the
- * vblank interrupt keeps running across the modeset sequence. With this the
- * software-side vblank frame counting will ensure that there are no jumps or
- * discontinuities.
- *
- * Unfortunately this approach is racy and also doesn't work when the vblank
- * interrupt stops running, e.g. across system suspend resume. It is therefore
- * highly recommended that drivers use the newer drm_vblank_off() and
- * drm_vblank_on() instead. drm_vblank_pre_modeset() only works correctly when
- * using "cooked" software vblank frame counters and not relying on any hardware
- * counters.
- *
- * Drivers must call drm_vblank_post_modeset() when re-enabling the same crtc
- * again.
- */
-void drm_vblank_pre_modeset(struct drm_device *dev, unsigned int pipe)
+static void drm_legacy_vblank_pre_modeset(struct drm_device *dev,
+ unsigned int pipe)
{
struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
@@ -1490,17 +1416,9 @@ void drm_vblank_pre_modeset(struct drm_device *dev, unsigned int pipe)
vblank->inmodeset |= 0x2;
}
}
-EXPORT_SYMBOL(drm_vblank_pre_modeset);
-/**
- * drm_vblank_post_modeset - undo drm_vblank_pre_modeset changes
- * @dev: DRM device
- * @pipe: CRTC index
- *
- * This function again drops the temporary vblank reference acquired in
- * drm_vblank_pre_modeset.
- */
-void drm_vblank_post_modeset(struct drm_device *dev, unsigned int pipe)
+static void drm_legacy_vblank_post_modeset(struct drm_device *dev,
+ unsigned int pipe)
{
struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
unsigned long irqflags;
@@ -1523,7 +1441,6 @@ void drm_vblank_post_modeset(struct drm_device *dev, unsigned int pipe)
vblank->inmodeset = 0;
}
}
-EXPORT_SYMBOL(drm_vblank_post_modeset);
/*
* drm_modeset_ctl - handle vblank event counter changes across mode switch
@@ -1556,10 +1473,10 @@ int drm_modeset_ctl(struct drm_device *dev, void *data,
switch (modeset->cmd) {
case _DRM_PRE_MODESET:
- drm_vblank_pre_modeset(dev, pipe);
+ drm_legacy_vblank_pre_modeset(dev, pipe);
break;
case _DRM_POST_MODESET:
- drm_vblank_post_modeset(dev, pipe);
+ drm_legacy_vblank_post_modeset(dev, pipe);
break;
default:
return -EINVAL;
@@ -1594,11 +1511,10 @@ static int drm_queue_vblank_event(struct drm_device *dev, unsigned int pipe,
spin_lock_irqsave(&dev->event_lock, flags);
/*
- * drm_vblank_off() might have been called after we called
- * drm_vblank_get(). drm_vblank_off() holds event_lock
- * around the vblank disable, so no need for further locking.
- * The reference from drm_vblank_get() protects against
- * vblank disable from another source.
+ * drm_crtc_vblank_off() might have been called after we called
+ * drm_vblank_get(). drm_crtc_vblank_off() holds event_lock around the
+ * vblank disable, so no need for further locking. The reference from
+ * drm_vblank_get() protects against vblank disable from another source.
*/
if (!vblank->enabled) {
ret = -EINVAL;
diff --git a/drivers/gpu/drm/drm_lock.c b/drivers/gpu/drm/drm_lock.c
index c901f3c5b269..32d43f86a8f2 100644
--- a/drivers/gpu/drm/drm_lock.c
+++ b/drivers/gpu/drm/drm_lock.c
@@ -176,7 +176,8 @@ int drm_legacy_lock(struct drm_device *dev, void *data,
DRM_DEBUG("%d (pid %d) requests lock (0x%08x), flags = 0x%08x\n",
lock->context, task_pid_nr(current),
- master->lock.hw_lock->lock, lock->flags);
+ master->lock.hw_lock ? master->lock.hw_lock->lock : -1,
+ lock->flags);
add_wait_queue(&master->lock.lock_queue, &entry);
spin_lock_bh(&master->lock.spinlock);
diff --git a/drivers/gpu/drm/drm_mm.c b/drivers/gpu/drm/drm_mm.c
index 11d44a1e0ab3..025dcd8cadcb 100644
--- a/drivers/gpu/drm/drm_mm.c
+++ b/drivers/gpu/drm/drm_mm.c
@@ -104,6 +104,68 @@ static struct drm_mm_node *drm_mm_search_free_in_range_generic(const struct drm_
u64 end,
enum drm_mm_search_flags flags);
+#ifdef CONFIG_DRM_DEBUG_MM
+#include <linux/stackdepot.h>
+
+#define STACKDEPTH 32
+#define BUFSZ 4096
+
+static noinline void save_stack(struct drm_mm_node *node)
+{
+ unsigned long entries[STACKDEPTH];
+ struct stack_trace trace = {
+ .entries = entries,
+ .max_entries = STACKDEPTH,
+ .skip = 1
+ };
+
+ save_stack_trace(&trace);
+ if (trace.nr_entries != 0 &&
+ trace.entries[trace.nr_entries-1] == ULONG_MAX)
+ trace.nr_entries--;
+
+ /* May be called under spinlock, so avoid sleeping */
+ node->stack = depot_save_stack(&trace, GFP_NOWAIT);
+}
+
+static void show_leaks(struct drm_mm *mm)
+{
+ struct drm_mm_node *node;
+ unsigned long entries[STACKDEPTH];
+ char *buf;
+
+ buf = kmalloc(BUFSZ, GFP_KERNEL);
+ if (!buf)
+ return;
+
+ list_for_each_entry(node, &mm->head_node.node_list, node_list) {
+ struct stack_trace trace = {
+ .entries = entries,
+ .max_entries = STACKDEPTH
+ };
+
+ if (!node->stack) {
+ DRM_ERROR("node [%08llx + %08llx]: unknown owner\n",
+ node->start, node->size);
+ continue;
+ }
+
+ depot_fetch_stack(node->stack, &trace);
+ snprint_stack_trace(buf, BUFSZ, &trace, 0);
+ DRM_ERROR("node [%08llx + %08llx]: inserted at\n%s",
+ node->start, node->size, buf);
+ }
+
+ kfree(buf);
+}
+
+#undef STACKDEPTH
+#undef BUFSZ
+#else
+static void save_stack(struct drm_mm_node *node) { }
+static void show_leaks(struct drm_mm *mm) { }
+#endif
+
#define START(node) ((node)->start)
#define LAST(node) ((node)->start + (node)->size - 1)
@@ -112,19 +174,12 @@ INTERVAL_TREE_DEFINE(struct drm_mm_node, rb,
START, LAST, static inline, drm_mm_interval_tree)
struct drm_mm_node *
-drm_mm_interval_first(struct drm_mm *mm, u64 start, u64 last)
+__drm_mm_interval_first(struct drm_mm *mm, u64 start, u64 last)
{
return drm_mm_interval_tree_iter_first(&mm->interval_tree,
start, last);
}
-EXPORT_SYMBOL(drm_mm_interval_first);
-
-struct drm_mm_node *
-drm_mm_interval_next(struct drm_mm_node *node, u64 start, u64 last)
-{
- return drm_mm_interval_tree_iter_next(node, start, last);
-}
-EXPORT_SYMBOL(drm_mm_interval_next);
+EXPORT_SYMBOL(__drm_mm_interval_first);
static void drm_mm_interval_tree_add_node(struct drm_mm_node *hole_node,
struct drm_mm_node *node)
@@ -228,6 +283,8 @@ static void drm_mm_insert_helper(struct drm_mm_node *hole_node,
list_add(&node->hole_stack, &mm->hole_stack);
node->hole_follows = 1;
}
+
+ save_stack(node);
}
/**
@@ -249,6 +306,7 @@ int drm_mm_reserve_node(struct drm_mm *mm, struct drm_mm_node *node)
u64 end = node->start + node->size;
struct drm_mm_node *hole;
u64 hole_start, hole_end;
+ u64 adj_start, adj_end;
if (WARN_ON(node->size == 0))
return -EINVAL;
@@ -270,9 +328,13 @@ int drm_mm_reserve_node(struct drm_mm *mm, struct drm_mm_node *node)
if (!hole->hole_follows)
return -ENOSPC;
- hole_start = __drm_mm_hole_node_start(hole);
- hole_end = __drm_mm_hole_node_end(hole);
- if (hole_start > node->start || hole_end < end)
+ adj_start = hole_start = __drm_mm_hole_node_start(hole);
+ adj_end = hole_end = __drm_mm_hole_node_end(hole);
+
+ if (mm->color_adjust)
+ mm->color_adjust(hole, node->color, &adj_start, &adj_end);
+
+ if (adj_start > node->start || adj_end < end)
return -ENOSPC;
node->mm = mm;
@@ -293,6 +355,8 @@ int drm_mm_reserve_node(struct drm_mm *mm, struct drm_mm_node *node)
node->hole_follows = 1;
}
+ save_stack(node);
+
return 0;
}
EXPORT_SYMBOL(drm_mm_reserve_node);
@@ -397,6 +461,8 @@ static void drm_mm_insert_helper_range(struct drm_mm_node *hole_node,
list_add(&node->hole_stack, &mm->hole_stack);
node->hole_follows = 1;
}
+
+ save_stack(node);
}
/**
@@ -861,10 +927,12 @@ EXPORT_SYMBOL(drm_mm_init);
* Note that it is a bug to call this function on an allocator which is not
* clean.
*/
-void drm_mm_takedown(struct drm_mm * mm)
+void drm_mm_takedown(struct drm_mm *mm)
{
- WARN(!list_empty(&mm->head_node.node_list),
- "Memory manager not clean during takedown.\n");
+ if (WARN(!list_empty(&mm->head_node.node_list),
+ "Memory manager not clean during takedown.\n"))
+ show_leaks(mm);
+
}
EXPORT_SYMBOL(drm_mm_takedown);
diff --git a/drivers/gpu/drm/drm_mode_config.c b/drivers/gpu/drm/drm_mode_config.c
new file mode 100644
index 000000000000..2735a5847ffa
--- /dev/null
+++ b/drivers/gpu/drm/drm_mode_config.c
@@ -0,0 +1,494 @@
+/*
+ * Copyright (c) 2016 Intel Corporation
+ *
+ * Permission to use, copy, modify, distribute, and sell this software and its
+ * documentation for any purpose is hereby granted without fee, provided that
+ * the above copyright notice appear in all copies and that both that copyright
+ * notice and this permission notice appear in supporting documentation, and
+ * that the name of the copyright holders not be used in advertising or
+ * publicity pertaining to distribution of the software without specific,
+ * written prior permission. The copyright holders make no representations
+ * about the suitability of this software for any purpose. It is provided "as
+ * is" without express or implied warranty.
+ *
+ * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
+ * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
+ * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
+ * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
+ * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
+ * OF THIS SOFTWARE.
+ */
+
+#include <drm/drm_mode_config.h>
+#include <drm/drmP.h>
+
+#include "drm_crtc_internal.h"
+#include "drm_internal.h"
+
+int drm_modeset_register_all(struct drm_device *dev)
+{
+ int ret;
+
+ ret = drm_plane_register_all(dev);
+ if (ret)
+ goto err_plane;
+
+ ret = drm_crtc_register_all(dev);
+ if (ret)
+ goto err_crtc;
+
+ ret = drm_encoder_register_all(dev);
+ if (ret)
+ goto err_encoder;
+
+ ret = drm_connector_register_all(dev);
+ if (ret)
+ goto err_connector;
+
+ return 0;
+
+err_connector:
+ drm_encoder_unregister_all(dev);
+err_encoder:
+ drm_crtc_unregister_all(dev);
+err_crtc:
+ drm_plane_unregister_all(dev);
+err_plane:
+ return ret;
+}
+
+void drm_modeset_unregister_all(struct drm_device *dev)
+{
+ drm_connector_unregister_all(dev);
+ drm_encoder_unregister_all(dev);
+ drm_crtc_unregister_all(dev);
+ drm_plane_unregister_all(dev);
+}
+
+/**
+ * drm_mode_getresources - get graphics configuration
+ * @dev: drm device for the ioctl
+ * @data: data pointer for the ioctl
+ * @file_priv: drm file for the ioctl call
+ *
+ * Construct a set of configuration description structures and return
+ * them to the user, including CRTC, connector and framebuffer configuration.
+ *
+ * Called by the user via ioctl.
+ *
+ * Returns:
+ * Zero on success, negative errno on failure.
+ */
+int drm_mode_getresources(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_mode_card_res *card_res = data;
+ struct list_head *lh;
+ struct drm_framebuffer *fb;
+ struct drm_connector *connector;
+ struct drm_crtc *crtc;
+ struct drm_encoder *encoder;
+ int ret = 0;
+ int connector_count = 0;
+ int crtc_count = 0;
+ int fb_count = 0;
+ int encoder_count = 0;
+ int copied = 0;
+ uint32_t __user *fb_id;
+ uint32_t __user *crtc_id;
+ uint32_t __user *connector_id;
+ uint32_t __user *encoder_id;
+
+ if (!drm_core_check_feature(dev, DRIVER_MODESET))
+ return -EINVAL;
+
+
+ mutex_lock(&file_priv->fbs_lock);
+ /*
+ * For the non-control nodes we need to limit the list of resources
+ * by IDs in the group list for this node
+ */
+ list_for_each(lh, &file_priv->fbs)
+ fb_count++;
+
+ /* handle this in 4 parts */
+ /* FBs */
+ if (card_res->count_fbs >= fb_count) {
+ copied = 0;
+ fb_id = (uint32_t __user *)(unsigned long)card_res->fb_id_ptr;
+ list_for_each_entry(fb, &file_priv->fbs, filp_head) {
+ if (put_user(fb->base.id, fb_id + copied)) {
+ mutex_unlock(&file_priv->fbs_lock);
+ return -EFAULT;
+ }
+ copied++;
+ }
+ }
+ card_res->count_fbs = fb_count;
+ mutex_unlock(&file_priv->fbs_lock);
+
+ /* mode_config.mutex protects the connector list against e.g. DP MST
+ * connector hot-adding. CRTC/Plane lists are invariant. */
+ mutex_lock(&dev->mode_config.mutex);
+ drm_for_each_crtc(crtc, dev)
+ crtc_count++;
+
+ drm_for_each_connector(connector, dev)
+ connector_count++;
+
+ drm_for_each_encoder(encoder, dev)
+ encoder_count++;
+
+ card_res->max_height = dev->mode_config.max_height;
+ card_res->min_height = dev->mode_config.min_height;
+ card_res->max_width = dev->mode_config.max_width;
+ card_res->min_width = dev->mode_config.min_width;
+
+ /* CRTCs */
+ if (card_res->count_crtcs >= crtc_count) {
+ copied = 0;
+ crtc_id = (uint32_t __user *)(unsigned long)card_res->crtc_id_ptr;
+ drm_for_each_crtc(crtc, dev) {
+ if (put_user(crtc->base.id, crtc_id + copied)) {
+ ret = -EFAULT;
+ goto out;
+ }
+ copied++;
+ }
+ }
+ card_res->count_crtcs = crtc_count;
+
+ /* Encoders */
+ if (card_res->count_encoders >= encoder_count) {
+ copied = 0;
+ encoder_id = (uint32_t __user *)(unsigned long)card_res->encoder_id_ptr;
+ drm_for_each_encoder(encoder, dev) {
+ if (put_user(encoder->base.id, encoder_id +
+ copied)) {
+ ret = -EFAULT;
+ goto out;
+ }
+ copied++;
+ }
+ }
+ card_res->count_encoders = encoder_count;
+
+ /* Connectors */
+ if (card_res->count_connectors >= connector_count) {
+ copied = 0;
+ connector_id = (uint32_t __user *)(unsigned long)card_res->connector_id_ptr;
+ drm_for_each_connector(connector, dev) {
+ if (put_user(connector->base.id,
+ connector_id + copied)) {
+ ret = -EFAULT;
+ goto out;
+ }
+ copied++;
+ }
+ }
+ card_res->count_connectors = connector_count;
+
+out:
+ mutex_unlock(&dev->mode_config.mutex);
+ return ret;
+}
+
+/**
+ * drm_mode_config_reset - call ->reset callbacks
+ * @dev: drm device
+ *
+ * This functions calls all the crtc's, encoder's and connector's ->reset
+ * callback. Drivers can use this in e.g. their driver load or resume code to
+ * reset hardware and software state.
+ */
+void drm_mode_config_reset(struct drm_device *dev)
+{
+ struct drm_crtc *crtc;
+ struct drm_plane *plane;
+ struct drm_encoder *encoder;
+ struct drm_connector *connector;
+
+ drm_for_each_plane(plane, dev)
+ if (plane->funcs->reset)
+ plane->funcs->reset(plane);
+
+ drm_for_each_crtc(crtc, dev)
+ if (crtc->funcs->reset)
+ crtc->funcs->reset(crtc);
+
+ drm_for_each_encoder(encoder, dev)
+ if (encoder->funcs->reset)
+ encoder->funcs->reset(encoder);
+
+ mutex_lock(&dev->mode_config.mutex);
+ drm_for_each_connector(connector, dev)
+ if (connector->funcs->reset)
+ connector->funcs->reset(connector);
+ mutex_unlock(&dev->mode_config.mutex);
+}
+EXPORT_SYMBOL(drm_mode_config_reset);
+
+/*
+ * Global properties
+ */
+static const struct drm_prop_enum_list drm_plane_type_enum_list[] = {
+ { DRM_PLANE_TYPE_OVERLAY, "Overlay" },
+ { DRM_PLANE_TYPE_PRIMARY, "Primary" },
+ { DRM_PLANE_TYPE_CURSOR, "Cursor" },
+};
+
+static int drm_mode_create_standard_properties(struct drm_device *dev)
+{
+ struct drm_property *prop;
+ int ret;
+
+ ret = drm_connector_create_standard_properties(dev);
+ if (ret)
+ return ret;
+
+ prop = drm_property_create_enum(dev, DRM_MODE_PROP_IMMUTABLE,
+ "type", drm_plane_type_enum_list,
+ ARRAY_SIZE(drm_plane_type_enum_list));
+ if (!prop)
+ return -ENOMEM;
+ dev->mode_config.plane_type_property = prop;
+
+ prop = drm_property_create_range(dev, DRM_MODE_PROP_ATOMIC,
+ "SRC_X", 0, UINT_MAX);
+ if (!prop)
+ return -ENOMEM;
+ dev->mode_config.prop_src_x = prop;
+
+ prop = drm_property_create_range(dev, DRM_MODE_PROP_ATOMIC,
+ "SRC_Y", 0, UINT_MAX);
+ if (!prop)
+ return -ENOMEM;
+ dev->mode_config.prop_src_y = prop;
+
+ prop = drm_property_create_range(dev, DRM_MODE_PROP_ATOMIC,
+ "SRC_W", 0, UINT_MAX);
+ if (!prop)
+ return -ENOMEM;
+ dev->mode_config.prop_src_w = prop;
+
+ prop = drm_property_create_range(dev, DRM_MODE_PROP_ATOMIC,
+ "SRC_H", 0, UINT_MAX);
+ if (!prop)
+ return -ENOMEM;
+ dev->mode_config.prop_src_h = prop;
+
+ prop = drm_property_create_signed_range(dev, DRM_MODE_PROP_ATOMIC,
+ "CRTC_X", INT_MIN, INT_MAX);
+ if (!prop)
+ return -ENOMEM;
+ dev->mode_config.prop_crtc_x = prop;
+
+ prop = drm_property_create_signed_range(dev, DRM_MODE_PROP_ATOMIC,
+ "CRTC_Y", INT_MIN, INT_MAX);
+ if (!prop)
+ return -ENOMEM;
+ dev->mode_config.prop_crtc_y = prop;
+
+ prop = drm_property_create_range(dev, DRM_MODE_PROP_ATOMIC,
+ "CRTC_W", 0, INT_MAX);
+ if (!prop)
+ return -ENOMEM;
+ dev->mode_config.prop_crtc_w = prop;
+
+ prop = drm_property_create_range(dev, DRM_MODE_PROP_ATOMIC,
+ "CRTC_H", 0, INT_MAX);
+ if (!prop)
+ return -ENOMEM;
+ dev->mode_config.prop_crtc_h = prop;
+
+ prop = drm_property_create_object(dev, DRM_MODE_PROP_ATOMIC,
+ "FB_ID", DRM_MODE_OBJECT_FB);
+ if (!prop)
+ return -ENOMEM;
+ dev->mode_config.prop_fb_id = prop;
+
+ prop = drm_property_create_signed_range(dev, DRM_MODE_PROP_ATOMIC,
+ "IN_FENCE_FD", -1, INT_MAX);
+ if (!prop)
+ return -ENOMEM;
+ dev->mode_config.prop_in_fence_fd = prop;
+
+ prop = drm_property_create_range(dev, DRM_MODE_PROP_ATOMIC,
+ "OUT_FENCE_PTR", 0, U64_MAX);
+ if (!prop)
+ return -ENOMEM;
+ dev->mode_config.prop_out_fence_ptr = prop;
+
+ prop = drm_property_create_object(dev, DRM_MODE_PROP_ATOMIC,
+ "CRTC_ID", DRM_MODE_OBJECT_CRTC);
+ if (!prop)
+ return -ENOMEM;
+ dev->mode_config.prop_crtc_id = prop;
+
+ prop = drm_property_create_bool(dev, DRM_MODE_PROP_ATOMIC,
+ "ACTIVE");
+ if (!prop)
+ return -ENOMEM;
+ dev->mode_config.prop_active = prop;
+
+ prop = drm_property_create(dev,
+ DRM_MODE_PROP_ATOMIC | DRM_MODE_PROP_BLOB,
+ "MODE_ID", 0);
+ if (!prop)
+ return -ENOMEM;
+ dev->mode_config.prop_mode_id = prop;
+
+ prop = drm_property_create(dev,
+ DRM_MODE_PROP_BLOB,
+ "DEGAMMA_LUT", 0);
+ if (!prop)
+ return -ENOMEM;
+ dev->mode_config.degamma_lut_property = prop;
+
+ prop = drm_property_create_range(dev,
+ DRM_MODE_PROP_IMMUTABLE,
+ "DEGAMMA_LUT_SIZE", 0, UINT_MAX);
+ if (!prop)
+ return -ENOMEM;
+ dev->mode_config.degamma_lut_size_property = prop;
+
+ prop = drm_property_create(dev,
+ DRM_MODE_PROP_BLOB,
+ "CTM", 0);
+ if (!prop)
+ return -ENOMEM;
+ dev->mode_config.ctm_property = prop;
+
+ prop = drm_property_create(dev,
+ DRM_MODE_PROP_BLOB,
+ "GAMMA_LUT", 0);
+ if (!prop)
+ return -ENOMEM;
+ dev->mode_config.gamma_lut_property = prop;
+
+ prop = drm_property_create_range(dev,
+ DRM_MODE_PROP_IMMUTABLE,
+ "GAMMA_LUT_SIZE", 0, UINT_MAX);
+ if (!prop)
+ return -ENOMEM;
+ dev->mode_config.gamma_lut_size_property = prop;
+
+ return 0;
+}
+
+/**
+ * drm_mode_config_init - initialize DRM mode_configuration structure
+ * @dev: DRM device
+ *
+ * Initialize @dev's mode_config structure, used for tracking the graphics
+ * configuration of @dev.
+ *
+ * Since this initializes the modeset locks, no locking is possible. Which is no
+ * problem, since this should happen single threaded at init time. It is the
+ * driver's problem to ensure this guarantee.
+ *
+ */
+void drm_mode_config_init(struct drm_device *dev)
+{
+ mutex_init(&dev->mode_config.mutex);
+ drm_modeset_lock_init(&dev->mode_config.connection_mutex);
+ mutex_init(&dev->mode_config.idr_mutex);
+ mutex_init(&dev->mode_config.fb_lock);
+ mutex_init(&dev->mode_config.blob_lock);
+ INIT_LIST_HEAD(&dev->mode_config.fb_list);
+ INIT_LIST_HEAD(&dev->mode_config.crtc_list);
+ INIT_LIST_HEAD(&dev->mode_config.connector_list);
+ INIT_LIST_HEAD(&dev->mode_config.encoder_list);
+ INIT_LIST_HEAD(&dev->mode_config.property_list);
+ INIT_LIST_HEAD(&dev->mode_config.property_blob_list);
+ INIT_LIST_HEAD(&dev->mode_config.plane_list);
+ idr_init(&dev->mode_config.crtc_idr);
+ idr_init(&dev->mode_config.tile_idr);
+ ida_init(&dev->mode_config.connector_ida);
+
+ drm_modeset_lock_all(dev);
+ drm_mode_create_standard_properties(dev);
+ drm_modeset_unlock_all(dev);
+
+ /* Just to be sure */
+ dev->mode_config.num_fb = 0;
+ dev->mode_config.num_connector = 0;
+ dev->mode_config.num_crtc = 0;
+ dev->mode_config.num_encoder = 0;
+ dev->mode_config.num_overlay_plane = 0;
+ dev->mode_config.num_total_plane = 0;
+}
+EXPORT_SYMBOL(drm_mode_config_init);
+
+/**
+ * drm_mode_config_cleanup - free up DRM mode_config info
+ * @dev: DRM device
+ *
+ * Free up all the connectors and CRTCs associated with this DRM device, then
+ * free up the framebuffers and associated buffer objects.
+ *
+ * Note that since this /should/ happen single-threaded at driver/device
+ * teardown time, no locking is required. It's the driver's job to ensure that
+ * this guarantee actually holds true.
+ *
+ * FIXME: cleanup any dangling user buffer objects too
+ */
+void drm_mode_config_cleanup(struct drm_device *dev)
+{
+ struct drm_connector *connector, *ot;
+ struct drm_crtc *crtc, *ct;
+ struct drm_encoder *encoder, *enct;
+ struct drm_framebuffer *fb, *fbt;
+ struct drm_property *property, *pt;
+ struct drm_property_blob *blob, *bt;
+ struct drm_plane *plane, *plt;
+
+ list_for_each_entry_safe(encoder, enct, &dev->mode_config.encoder_list,
+ head) {
+ encoder->funcs->destroy(encoder);
+ }
+
+ list_for_each_entry_safe(connector, ot,
+ &dev->mode_config.connector_list, head) {
+ connector->funcs->destroy(connector);
+ }
+
+ list_for_each_entry_safe(property, pt, &dev->mode_config.property_list,
+ head) {
+ drm_property_destroy(dev, property);
+ }
+
+ list_for_each_entry_safe(plane, plt, &dev->mode_config.plane_list,
+ head) {
+ plane->funcs->destroy(plane);
+ }
+
+ list_for_each_entry_safe(crtc, ct, &dev->mode_config.crtc_list, head) {
+ crtc->funcs->destroy(crtc);
+ }
+
+ list_for_each_entry_safe(blob, bt, &dev->mode_config.property_blob_list,
+ head_global) {
+ drm_property_unreference_blob(blob);
+ }
+
+ /*
+ * Single-threaded teardown context, so it's not required to grab the
+ * fb_lock to protect against concurrent fb_list access. Contrary, it
+ * would actually deadlock with the drm_framebuffer_cleanup function.
+ *
+ * Also, if there are any framebuffers left, that's a driver leak now,
+ * so politely WARN about this.
+ */
+ WARN_ON(!list_empty(&dev->mode_config.fb_list));
+ list_for_each_entry_safe(fb, fbt, &dev->mode_config.fb_list, head) {
+ drm_framebuffer_free(&fb->base.refcount);
+ }
+
+ ida_destroy(&dev->mode_config.connector_ida);
+ idr_destroy(&dev->mode_config.tile_idr);
+ idr_destroy(&dev->mode_config.crtc_idr);
+ drm_modeset_lock_fini(&dev->mode_config.connection_mutex);
+}
+EXPORT_SYMBOL(drm_mode_config_cleanup);
diff --git a/drivers/gpu/drm/drm_modes.c b/drivers/gpu/drm/drm_modes.c
index 53f07ac7c174..ac6a35212501 100644
--- a/drivers/gpu/drm/drm_modes.c
+++ b/drivers/gpu/drm/drm_modes.c
@@ -49,13 +49,7 @@
*/
void drm_mode_debug_printmodeline(const struct drm_display_mode *mode)
{
- DRM_DEBUG_KMS("Modeline %d:\"%s\" %d %d %d %d %d %d %d %d %d %d "
- "0x%x 0x%x\n",
- mode->base.id, mode->name, mode->vrefresh, mode->clock,
- mode->hdisplay, mode->hsync_start,
- mode->hsync_end, mode->htotal,
- mode->vdisplay, mode->vsync_start,
- mode->vsync_end, mode->vtotal, mode->type, mode->flags);
+ DRM_DEBUG_KMS("Modeline " DRM_MODE_FMT "\n", DRM_MODE_ARG(mode));
}
EXPORT_SYMBOL(drm_mode_debug_printmodeline);
@@ -165,6 +159,7 @@ struct drm_display_mode *drm_cvt_mode(struct drm_device *dev, int hdisplay,
unsigned int vfieldrate, hperiod;
int hdisplay_rnd, hmargin, vdisplay_rnd, vmargin, vsync;
int interlace;
+ u64 tmp;
/* allocate the drm_display_mode structure. If failure, we will
* return directly
@@ -322,8 +317,11 @@ struct drm_display_mode *drm_cvt_mode(struct drm_device *dev, int hdisplay,
drm_mode->vsync_end = drm_mode->vsync_start + vsync;
}
/* 15/13. Find pixel clock frequency (kHz for xf86) */
- drm_mode->clock = drm_mode->htotal * HV_FACTOR * 1000 / hperiod;
- drm_mode->clock -= drm_mode->clock % CVT_CLOCK_STEP;
+ tmp = drm_mode->htotal; /* perform intermediate calcs in u64 */
+ tmp *= HV_FACTOR * 1000;
+ do_div(tmp, hperiod);
+ tmp -= drm_mode->clock % CVT_CLOCK_STEP;
+ drm_mode->clock = tmp;
/* 18/16. Find actual vertical frame frequency */
/* ignore - just set the mode flag for interlaced */
if (interlaced) {
diff --git a/drivers/gpu/drm/drm_modeset_helper.c b/drivers/gpu/drm/drm_modeset_helper.c
index 1d45738f8f98..cc232ac6c950 100644
--- a/drivers/gpu/drm/drm_modeset_helper.c
+++ b/drivers/gpu/drm/drm_modeset_helper.c
@@ -38,7 +38,7 @@
* Some userspace presumes that the first connected connector is the main
* display, where it's supposed to display e.g. the login screen. For
* laptops, this should be the main panel. Use this function to sort all
- * (eDP/LVDS) panels to the front of the connector list, instead of
+ * (eDP/LVDS/DSI) panels to the front of the connector list, instead of
* painstakingly trying to initialize them in the right order.
*/
void drm_helper_move_panel_connectors_to_head(struct drm_device *dev)
@@ -51,7 +51,8 @@ void drm_helper_move_panel_connectors_to_head(struct drm_device *dev)
list_for_each_entry_safe(connector, tmp,
&dev->mode_config.connector_list, head) {
if (connector->connector_type == DRM_MODE_CONNECTOR_LVDS ||
- connector->connector_type == DRM_MODE_CONNECTOR_eDP)
+ connector->connector_type == DRM_MODE_CONNECTOR_eDP ||
+ connector->connector_type == DRM_MODE_CONNECTOR_DSI)
list_move_tail(&connector->head, &panel_list);
}
@@ -70,17 +71,31 @@ EXPORT_SYMBOL(drm_helper_move_panel_connectors_to_head);
void drm_helper_mode_fill_fb_struct(struct drm_framebuffer *fb,
const struct drm_mode_fb_cmd2 *mode_cmd)
{
+ const struct drm_format_info *info;
int i;
+ info = drm_format_info(mode_cmd->pixel_format);
+ if (!info || !info->depth) {
+ struct drm_format_name_buf format_name;
+
+ DRM_DEBUG_KMS("non-RGB pixel format %s\n",
+ drm_get_format_name(mode_cmd->pixel_format,
+ &format_name));
+
+ fb->depth = 0;
+ fb->bits_per_pixel = 0;
+ } else {
+ fb->depth = info->depth;
+ fb->bits_per_pixel = info->cpp[0] * 8;
+ }
+
fb->width = mode_cmd->width;
fb->height = mode_cmd->height;
for (i = 0; i < 4; i++) {
fb->pitches[i] = mode_cmd->pitches[i];
fb->offsets[i] = mode_cmd->offsets[i];
- fb->modifier[i] = mode_cmd->modifier[i];
}
- drm_fb_get_bpp_depth(mode_cmd->pixel_format, &fb->depth,
- &fb->bits_per_pixel);
+ fb->modifier = mode_cmd->modifier[0];
fb->pixel_format = mode_cmd->pixel_format;
fb->flags = mode_cmd->flags;
}
diff --git a/drivers/gpu/drm/drm_modeset_lock.c b/drivers/gpu/drm/drm_modeset_lock.c
index 61146f5b4f56..9059fe3145a1 100644
--- a/drivers/gpu/drm/drm_modeset_lock.c
+++ b/drivers/gpu/drm/drm_modeset_lock.c
@@ -60,6 +60,8 @@
* lists and lookup data structures.
*/
+static DEFINE_WW_CLASS(crtc_ww_class);
+
/**
* drm_modeset_lock_all - take all modeset locks
* @dev: DRM device
@@ -398,6 +400,17 @@ int drm_modeset_backoff_interruptible(struct drm_modeset_acquire_ctx *ctx)
EXPORT_SYMBOL(drm_modeset_backoff_interruptible);
/**
+ * drm_modeset_lock_init - initialize lock
+ * @lock: lock to init
+ */
+void drm_modeset_lock_init(struct drm_modeset_lock *lock)
+{
+ ww_mutex_init(&lock->mutex, &crtc_ww_class);
+ INIT_LIST_HEAD(&lock->head);
+}
+EXPORT_SYMBOL(drm_modeset_lock_init);
+
+/**
* drm_modeset_lock - take modeset lock
* @lock: lock to take
* @ctx: acquire ctx
diff --git a/drivers/gpu/drm/drm_of.c b/drivers/gpu/drm/drm_of.c
index bc98bb94264d..47848ed8ca48 100644
--- a/drivers/gpu/drm/drm_of.c
+++ b/drivers/gpu/drm/drm_of.c
@@ -6,6 +6,11 @@
#include <drm/drm_crtc.h>
#include <drm/drm_of.h>
+static void drm_release_of(struct device *dev, void *data)
+{
+ of_node_put(data);
+}
+
/**
* drm_crtc_port_mask - find the mask of a registered CRTC by port OF node
* @dev: DRM device
@@ -64,6 +69,24 @@ uint32_t drm_of_find_possible_crtcs(struct drm_device *dev,
EXPORT_SYMBOL(drm_of_find_possible_crtcs);
/**
+ * drm_of_component_match_add - Add a component helper OF node match rule
+ * @master: master device
+ * @matchptr: component match pointer
+ * @compare: compare function used for matching component
+ * @node: of_node
+ */
+void drm_of_component_match_add(struct device *master,
+ struct component_match **matchptr,
+ int (*compare)(struct device *, void *),
+ struct device_node *node)
+{
+ of_node_get(node);
+ component_match_add_release(master, matchptr, drm_release_of,
+ compare, node);
+}
+EXPORT_SYMBOL_GPL(drm_of_component_match_add);
+
+/**
* drm_of_component_probe - Generic probe function for a component based master
* @dev: master device containing the OF node
* @compare_of: compare function used for matching components
@@ -101,7 +124,7 @@ int drm_of_component_probe(struct device *dev,
continue;
}
- component_match_add(dev, &match, compare_of, port);
+ drm_of_component_match_add(dev, &match, compare_of, port);
of_node_put(port);
}
@@ -140,7 +163,8 @@ int drm_of_component_probe(struct device *dev,
continue;
}
- component_match_add(dev, &match, compare_of, remote);
+ drm_of_component_match_add(dev, &match, compare_of,
+ remote);
of_node_put(remote);
}
of_node_put(port);
diff --git a/drivers/gpu/drm/drm_plane.c b/drivers/gpu/drm/drm_plane.c
index 249c0ae52c6d..419ac313c36f 100644
--- a/drivers/gpu/drm/drm_plane.c
+++ b/drivers/gpu/drm/drm_plane.c
@@ -137,6 +137,7 @@ int drm_universal_plane_init(struct drm_device *dev, struct drm_plane *plane,
if (drm_core_check_feature(dev, DRIVER_ATOMIC)) {
drm_object_attach_property(&plane->base, config->prop_fb_id, 0);
+ drm_object_attach_property(&plane->base, config->prop_in_fence_fd, -1);
drm_object_attach_property(&plane->base, config->prop_crtc_id, 0);
drm_object_attach_property(&plane->base, config->prop_crtc_x, 0);
drm_object_attach_property(&plane->base, config->prop_crtc_y, 0);
@@ -479,9 +480,10 @@ static int __setplane_internal(struct drm_plane *plane,
/* Check whether this plane supports the fb pixel format. */
ret = drm_plane_check_pixel_format(plane, fb->pixel_format);
if (ret) {
- char *format_name = drm_get_format_name(fb->pixel_format);
- DRM_DEBUG_KMS("Invalid pixel format %s\n", format_name);
- kfree(format_name);
+ struct drm_format_name_buf format_name;
+ DRM_DEBUG_KMS("Invalid pixel format %s\n",
+ drm_get_format_name(fb->pixel_format,
+ &format_name));
goto out;
}
diff --git a/drivers/gpu/drm/drm_plane_helper.c b/drivers/gpu/drm/drm_plane_helper.c
index 7899fc1dcdb0..7a7dddf604d7 100644
--- a/drivers/gpu/drm/drm_plane_helper.c
+++ b/drivers/gpu/drm/drm_plane_helper.c
@@ -130,15 +130,8 @@ int drm_plane_helper_check_state(struct drm_plane_state *state,
unsigned int rotation = state->rotation;
int hscale, vscale;
- src->x1 = state->src_x;
- src->y1 = state->src_y;
- src->x2 = state->src_x + state->src_w;
- src->y2 = state->src_y + state->src_h;
-
- dst->x1 = state->crtc_x;
- dst->y1 = state->crtc_y;
- dst->x2 = state->crtc_x + state->crtc_w;
- dst->y2 = state->crtc_y + state->crtc_h;
+ *src = drm_plane_state_src(state);
+ *dst = drm_plane_state_dest(state);
if (!fb) {
state->visible = false;
diff --git a/drivers/gpu/drm/drm_print.c b/drivers/gpu/drm/drm_print.c
new file mode 100644
index 000000000000..ad3caaa1f48b
--- /dev/null
+++ b/drivers/gpu/drm/drm_print.c
@@ -0,0 +1,59 @@
+/*
+ * Copyright (C) 2016 Red Hat
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Rob Clark <robdclark@gmail.com>
+ */
+
+#include <stdarg.h>
+#include <linux/seq_file.h>
+#include <drm/drmP.h>
+#include <drm/drm_print.h>
+
+void __drm_printfn_seq_file(struct drm_printer *p, struct va_format *vaf)
+{
+ seq_printf(p->arg, "%pV", vaf);
+}
+EXPORT_SYMBOL(__drm_printfn_seq_file);
+
+void __drm_printfn_info(struct drm_printer *p, struct va_format *vaf)
+{
+ dev_printk(KERN_INFO, p->arg, "[" DRM_NAME "] %pV", vaf);
+}
+EXPORT_SYMBOL(__drm_printfn_info);
+
+/**
+ * drm_printf - print to a &drm_printer stream
+ * @p: the &drm_printer
+ * @f: format string
+ */
+void drm_printf(struct drm_printer *p, const char *f, ...)
+{
+ struct va_format vaf;
+ va_list args;
+
+ va_start(args, f);
+ vaf.fmt = f;
+ vaf.va = &args;
+ p->printfn(p, &vaf);
+ va_end(args);
+}
+EXPORT_SYMBOL(drm_printf);
diff --git a/drivers/gpu/drm/drm_property.c b/drivers/gpu/drm/drm_property.c
index a4d81cf4ffa0..24be69d29964 100644
--- a/drivers/gpu/drm/drm_property.c
+++ b/drivers/gpu/drm/drm_property.c
@@ -65,9 +65,9 @@ static bool drm_property_type_valid(struct drm_property *property)
* @num_values: number of pre-defined values
*
* This creates a new generic drm property which can then be attached to a drm
- * object with drm_object_attach_property. The returned property object must be
- * freed with drm_property_destroy(), which is done automatically when calling
- * drm_mode_config_cleanup().
+ * object with drm_object_attach_property(). The returned property object must
+ * be freed with drm_property_destroy(), which is done automatically when
+ * calling drm_mode_config_cleanup().
*
* Returns:
* A pointer to the newly created property on success, NULL on failure.
@@ -125,9 +125,9 @@ EXPORT_SYMBOL(drm_property_create);
* @num_values: number of pre-defined values
*
* This creates a new generic drm property which can then be attached to a drm
- * object with drm_object_attach_property. The returned property object must be
- * freed with drm_property_destroy(), which is done automatically when calling
- * drm_mode_config_cleanup().
+ * object with drm_object_attach_property(). The returned property object must
+ * be freed with drm_property_destroy(), which is done automatically when
+ * calling drm_mode_config_cleanup().
*
* Userspace is only allowed to set one of the predefined values for enumeration
* properties.
@@ -173,9 +173,9 @@ EXPORT_SYMBOL(drm_property_create_enum);
* @supported_bits: bitmask of all supported enumeration values
*
* This creates a new bitmask drm property which can then be attached to a drm
- * object with drm_object_attach_property. The returned property object must be
- * freed with drm_property_destroy(), which is done automatically when calling
- * drm_mode_config_cleanup().
+ * object with drm_object_attach_property(). The returned property object must
+ * be freed with drm_property_destroy(), which is done automatically when
+ * calling drm_mode_config_cleanup().
*
* Compared to plain enumeration properties userspace is allowed to set any
* or'ed together combination of the predefined property bitflag values
@@ -245,9 +245,9 @@ static struct drm_property *property_create_range(struct drm_device *dev,
* @max: maximum value of the property
*
* This creates a new generic drm property which can then be attached to a drm
- * object with drm_object_attach_property. The returned property object must be
- * freed with drm_property_destroy(), which is done automatically when calling
- * drm_mode_config_cleanup().
+ * object with drm_object_attach_property(). The returned property object must
+ * be freed with drm_property_destroy(), which is done automatically when
+ * calling drm_mode_config_cleanup().
*
* Userspace is allowed to set any unsigned integer value in the (min, max)
* range inclusive.
@@ -273,9 +273,9 @@ EXPORT_SYMBOL(drm_property_create_range);
* @max: maximum value of the property
*
* This creates a new generic drm property which can then be attached to a drm
- * object with drm_object_attach_property. The returned property object must be
- * freed with drm_property_destroy(), which is done automatically when calling
- * drm_mode_config_cleanup().
+ * object with drm_object_attach_property(). The returned property object must
+ * be freed with drm_property_destroy(), which is done automatically when
+ * calling drm_mode_config_cleanup().
*
* Userspace is allowed to set any signed integer value in the (min, max)
* range inclusive.
@@ -300,9 +300,9 @@ EXPORT_SYMBOL(drm_property_create_signed_range);
* @type: object type from DRM_MODE_OBJECT_* defines
*
* This creates a new generic drm property which can then be attached to a drm
- * object with drm_object_attach_property. The returned property object must be
- * freed with drm_property_destroy(), which is done automatically when calling
- * drm_mode_config_cleanup().
+ * object with drm_object_attach_property(). The returned property object must
+ * be freed with drm_property_destroy(), which is done automatically when
+ * calling drm_mode_config_cleanup().
*
* Userspace is only allowed to set this to any property value of the given
* @type. Only useful for atomic properties, which is enforced.
@@ -338,9 +338,9 @@ EXPORT_SYMBOL(drm_property_create_object);
* @name: name of the property
*
* This creates a new generic drm property which can then be attached to a drm
- * object with drm_object_attach_property. The returned property object must be
- * freed with drm_property_destroy(), which is done automatically when calling
- * drm_mode_config_cleanup().
+ * object with drm_object_attach_property(). The returned property object must
+ * be freed with drm_property_destroy(), which is done automatically when
+ * calling drm_mode_config_cleanup().
*
* This is implemented as a ranged property with only {0, 1} as valid values.
*
@@ -729,7 +729,6 @@ int drm_mode_getblob_ioctl(struct drm_device *dev,
struct drm_mode_get_blob *out_resp = data;
struct drm_property_blob *blob;
int ret = 0;
- void __user *blob_ptr;
if (!drm_core_check_feature(dev, DRIVER_MODESET))
return -EINVAL;
@@ -739,8 +738,9 @@ int drm_mode_getblob_ioctl(struct drm_device *dev,
return -ENOENT;
if (out_resp->length == blob->length) {
- blob_ptr = (void __user *)(unsigned long)out_resp->data;
- if (copy_to_user(blob_ptr, blob->data, blob->length)) {
+ if (copy_to_user(u64_to_user_ptr(out_resp->data),
+ blob->data,
+ blob->length)) {
ret = -EFAULT;
goto unref;
}
@@ -757,7 +757,6 @@ int drm_mode_createblob_ioctl(struct drm_device *dev,
{
struct drm_mode_create_blob *out_resp = data;
struct drm_property_blob *blob;
- void __user *blob_ptr;
int ret = 0;
if (!drm_core_check_feature(dev, DRIVER_MODESET))
@@ -767,8 +766,9 @@ int drm_mode_createblob_ioctl(struct drm_device *dev,
if (IS_ERR(blob))
return PTR_ERR(blob);
- blob_ptr = (void __user *)(unsigned long)out_resp->data;
- if (copy_from_user(blob->data, blob_ptr, out_resp->length)) {
+ if (copy_from_user(blob->data,
+ u64_to_user_ptr(out_resp->data),
+ out_resp->length)) {
ret = -EFAULT;
goto out_blob;
}
diff --git a/drivers/gpu/drm/drm_rect.c b/drivers/gpu/drm/drm_rect.c
index 73e53a8d1b37..e6057d8cdcd5 100644
--- a/drivers/gpu/drm/drm_rect.c
+++ b/drivers/gpu/drm/drm_rect.c
@@ -281,17 +281,10 @@ EXPORT_SYMBOL(drm_rect_calc_vscale_relaxed);
*/
void drm_rect_debug_print(const char *prefix, const struct drm_rect *r, bool fixed_point)
{
- int w = drm_rect_width(r);
- int h = drm_rect_height(r);
-
if (fixed_point)
- DRM_DEBUG_KMS("%s%d.%06ux%d.%06u%+d.%06u%+d.%06u\n", prefix,
- w >> 16, ((w & 0xffff) * 15625) >> 10,
- h >> 16, ((h & 0xffff) * 15625) >> 10,
- r->x1 >> 16, ((r->x1 & 0xffff) * 15625) >> 10,
- r->y1 >> 16, ((r->y1 & 0xffff) * 15625) >> 10);
+ DRM_DEBUG_KMS("%s" DRM_RECT_FP_FMT "\n", prefix, DRM_RECT_FP_ARG(r));
else
- DRM_DEBUG_KMS("%s%dx%d%+d%+d\n", prefix, w, h, r->x1, r->y1);
+ DRM_DEBUG_KMS("%s" DRM_RECT_FMT "\n", prefix, DRM_RECT_ARG(r));
}
EXPORT_SYMBOL(drm_rect_debug_print);
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_drv.c b/drivers/gpu/drm/etnaviv/etnaviv_drv.c
index 38720adfc62f..00368b14d08d 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_drv.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_drv.c
@@ -16,6 +16,7 @@
#include <linux/component.h>
#include <linux/of_platform.h>
+#include <drm/drm_of.h>
#include "etnaviv_drv.h"
#include "etnaviv_gpu.h"
@@ -478,9 +479,7 @@ static const struct file_operations fops = {
.open = drm_open,
.release = drm_release,
.unlocked_ioctl = drm_ioctl,
-#ifdef CONFIG_COMPAT
.compat_ioctl = drm_compat_ioctl,
-#endif
.poll = drm_poll,
.read = drm_read,
.llseek = no_llseek,
@@ -630,8 +629,8 @@ static int etnaviv_pdev_probe(struct platform_device *pdev)
if (!core_node)
break;
- component_match_add(&pdev->dev, &match, compare_of,
- core_node);
+ drm_of_component_match_add(&pdev->dev, &match,
+ compare_of, core_node);
of_node_put(core_node);
}
} else if (dev->platform_data) {
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem.c b/drivers/gpu/drm/etnaviv/etnaviv_gem.c
index 0370b842d9cc..7d066a91d778 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gem.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gem.c
@@ -409,20 +409,16 @@ int etnaviv_gem_cpu_prep(struct drm_gem_object *obj, u32 op,
struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
struct drm_device *dev = obj->dev;
bool write = !!(op & ETNA_PREP_WRITE);
- int ret;
-
- if (op & ETNA_PREP_NOSYNC) {
- if (!reservation_object_test_signaled_rcu(etnaviv_obj->resv,
- write))
- return -EBUSY;
- } else {
- unsigned long remain = etnaviv_timeout_to_jiffies(timeout);
+ unsigned long remain =
+ op & ETNA_PREP_NOSYNC ? 0 : etnaviv_timeout_to_jiffies(timeout);
+ long lret;
- ret = reservation_object_wait_timeout_rcu(etnaviv_obj->resv,
- write, true, remain);
- if (ret <= 0)
- return ret == 0 ? -ETIMEDOUT : ret;
- }
+ lret = reservation_object_wait_timeout_rcu(etnaviv_obj->resv,
+ write, true, remain);
+ if (lret < 0)
+ return lret;
+ else if (lret == 0)
+ return remain == 0 ? -EBUSY : -ETIMEDOUT;
if (etnaviv_obj->flags & ETNA_BO_CACHED) {
if (!etnaviv_obj->sgt) {
@@ -470,10 +466,10 @@ int etnaviv_gem_wait_bo(struct etnaviv_gpu *gpu, struct drm_gem_object *obj,
}
#ifdef CONFIG_DEBUG_FS
-static void etnaviv_gem_describe_fence(struct fence *fence,
+static void etnaviv_gem_describe_fence(struct dma_fence *fence,
const char *type, struct seq_file *m)
{
- if (!test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->flags))
+ if (!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
seq_printf(m, "\t%9s: %s %s seq %u\n",
type,
fence->ops->get_driver_name(fence),
@@ -486,7 +482,7 @@ static void etnaviv_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
struct reservation_object *robj = etnaviv_obj->resv;
struct reservation_object_list *fobj;
- struct fence *fence;
+ struct dma_fence *fence;
unsigned long off = drm_vma_node_start(&obj->vma_node);
seq_printf(m, "%08x: %c %2d (%2d) %08lx %p %zd\n",
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
index 4b697ad8bd64..0a67124bb2a4 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
@@ -15,7 +15,7 @@
*/
#include <linux/component.h>
-#include <linux/fence.h>
+#include <linux/dma-fence.h>
#include <linux/moduleparam.h>
#include <linux/of_device.h>
#include "etnaviv_dump.h"
@@ -886,7 +886,7 @@ static void recover_worker(struct work_struct *work)
for (i = 0; i < ARRAY_SIZE(gpu->event); i++) {
if (!gpu->event[i].used)
continue;
- fence_signal(gpu->event[i].fence);
+ dma_fence_signal(gpu->event[i].fence);
gpu->event[i].fence = NULL;
gpu->event[i].used = false;
complete(&gpu->event_free);
@@ -956,55 +956,55 @@ static void hangcheck_disable(struct etnaviv_gpu *gpu)
/* fence object management */
struct etnaviv_fence {
struct etnaviv_gpu *gpu;
- struct fence base;
+ struct dma_fence base;
};
-static inline struct etnaviv_fence *to_etnaviv_fence(struct fence *fence)
+static inline struct etnaviv_fence *to_etnaviv_fence(struct dma_fence *fence)
{
return container_of(fence, struct etnaviv_fence, base);
}
-static const char *etnaviv_fence_get_driver_name(struct fence *fence)
+static const char *etnaviv_fence_get_driver_name(struct dma_fence *fence)
{
return "etnaviv";
}
-static const char *etnaviv_fence_get_timeline_name(struct fence *fence)
+static const char *etnaviv_fence_get_timeline_name(struct dma_fence *fence)
{
struct etnaviv_fence *f = to_etnaviv_fence(fence);
return dev_name(f->gpu->dev);
}
-static bool etnaviv_fence_enable_signaling(struct fence *fence)
+static bool etnaviv_fence_enable_signaling(struct dma_fence *fence)
{
return true;
}
-static bool etnaviv_fence_signaled(struct fence *fence)
+static bool etnaviv_fence_signaled(struct dma_fence *fence)
{
struct etnaviv_fence *f = to_etnaviv_fence(fence);
return fence_completed(f->gpu, f->base.seqno);
}
-static void etnaviv_fence_release(struct fence *fence)
+static void etnaviv_fence_release(struct dma_fence *fence)
{
struct etnaviv_fence *f = to_etnaviv_fence(fence);
kfree_rcu(f, base.rcu);
}
-static const struct fence_ops etnaviv_fence_ops = {
+static const struct dma_fence_ops etnaviv_fence_ops = {
.get_driver_name = etnaviv_fence_get_driver_name,
.get_timeline_name = etnaviv_fence_get_timeline_name,
.enable_signaling = etnaviv_fence_enable_signaling,
.signaled = etnaviv_fence_signaled,
- .wait = fence_default_wait,
+ .wait = dma_fence_default_wait,
.release = etnaviv_fence_release,
};
-static struct fence *etnaviv_gpu_fence_alloc(struct etnaviv_gpu *gpu)
+static struct dma_fence *etnaviv_gpu_fence_alloc(struct etnaviv_gpu *gpu)
{
struct etnaviv_fence *f;
@@ -1014,8 +1014,8 @@ static struct fence *etnaviv_gpu_fence_alloc(struct etnaviv_gpu *gpu)
f->gpu = gpu;
- fence_init(&f->base, &etnaviv_fence_ops, &gpu->fence_spinlock,
- gpu->fence_context, ++gpu->next_fence);
+ dma_fence_init(&f->base, &etnaviv_fence_ops, &gpu->fence_spinlock,
+ gpu->fence_context, ++gpu->next_fence);
return &f->base;
}
@@ -1025,7 +1025,7 @@ int etnaviv_gpu_fence_sync_obj(struct etnaviv_gem_object *etnaviv_obj,
{
struct reservation_object *robj = etnaviv_obj->resv;
struct reservation_object_list *fobj;
- struct fence *fence;
+ struct dma_fence *fence;
int i, ret;
if (!exclusive) {
@@ -1043,7 +1043,7 @@ int etnaviv_gpu_fence_sync_obj(struct etnaviv_gem_object *etnaviv_obj,
/* Wait on any existing exclusive fence which isn't our own */
fence = reservation_object_get_excl(robj);
if (fence && fence->context != context) {
- ret = fence_wait(fence, true);
+ ret = dma_fence_wait(fence, true);
if (ret)
return ret;
}
@@ -1056,7 +1056,7 @@ int etnaviv_gpu_fence_sync_obj(struct etnaviv_gem_object *etnaviv_obj,
fence = rcu_dereference_protected(fobj->shared[i],
reservation_object_held(robj));
if (fence->context != context) {
- ret = fence_wait(fence, true);
+ ret = dma_fence_wait(fence, true);
if (ret)
return ret;
}
@@ -1162,11 +1162,11 @@ static void retire_worker(struct work_struct *work)
mutex_lock(&gpu->lock);
list_for_each_entry_safe(cmdbuf, tmp, &gpu->active_cmd_list, node) {
- if (!fence_is_signaled(cmdbuf->fence))
+ if (!dma_fence_is_signaled(cmdbuf->fence))
break;
list_del(&cmdbuf->node);
- fence_put(cmdbuf->fence);
+ dma_fence_put(cmdbuf->fence);
for (i = 0; i < cmdbuf->nr_bos; i++) {
struct etnaviv_vram_mapping *mapping = cmdbuf->bo_map[i];
@@ -1279,7 +1279,7 @@ void etnaviv_gpu_pm_put(struct etnaviv_gpu *gpu)
int etnaviv_gpu_submit(struct etnaviv_gpu *gpu,
struct etnaviv_gem_submit *submit, struct etnaviv_cmdbuf *cmdbuf)
{
- struct fence *fence;
+ struct dma_fence *fence;
unsigned int event, i;
int ret;
@@ -1395,7 +1395,7 @@ static irqreturn_t irq_handler(int irq, void *data)
}
while ((event = ffs(intr)) != 0) {
- struct fence *fence;
+ struct dma_fence *fence;
event -= 1;
@@ -1405,7 +1405,7 @@ static irqreturn_t irq_handler(int irq, void *data)
fence = gpu->event[event].fence;
gpu->event[event].fence = NULL;
- fence_signal(fence);
+ dma_fence_signal(fence);
/*
* Events can be processed out of order. Eg,
@@ -1557,7 +1557,7 @@ static int etnaviv_gpu_bind(struct device *dev, struct device *master,
return ret;
gpu->drm = drm;
- gpu->fence_context = fence_context_alloc(1);
+ gpu->fence_context = dma_fence_context_alloc(1);
spin_lock_init(&gpu->fence_spinlock);
INIT_LIST_HEAD(&gpu->active_cmd_list);
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gpu.h b/drivers/gpu/drm/etnaviv/etnaviv_gpu.h
index 73c278dc3706..8c6b824e9d0a 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gpu.h
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gpu.h
@@ -89,7 +89,7 @@ struct etnaviv_chip_identity {
struct etnaviv_event {
bool used;
- struct fence *fence;
+ struct dma_fence *fence;
};
struct etnaviv_cmdbuf;
@@ -163,7 +163,7 @@ struct etnaviv_cmdbuf {
/* vram node used if the cmdbuf is mapped through the MMUv2 */
struct drm_mm_node vram_node;
/* fence after which this buffer is to be disposed */
- struct fence *fence;
+ struct dma_fence *fence;
/* target exec state */
u32 exec_state;
/* per GPU in-flight list */
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.c b/drivers/gpu/drm/exynos/exynos_drm_drv.c
index def78c8c1780..739180ac3da5 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_drv.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_drv.c
@@ -69,7 +69,7 @@ static void exynos_atomic_commit_complete(struct exynos_atomic_commit *commit)
drm_atomic_helper_cleanup_planes(dev, state);
- drm_atomic_state_free(state);
+ drm_atomic_state_put(state);
spin_lock(&priv->lock);
priv->pending &= ~commit->crtcs;
@@ -254,6 +254,7 @@ int exynos_atomic_commit(struct drm_device *dev, struct drm_atomic_state *state,
drm_atomic_helper_swap_state(state, true);
+ drm_atomic_state_get(state);
if (nonblock)
schedule_work(&commit->work);
else
@@ -262,6 +263,26 @@ int exynos_atomic_commit(struct drm_device *dev, struct drm_atomic_state *state,
return 0;
}
+int exynos_atomic_check(struct drm_device *dev,
+ struct drm_atomic_state *state)
+{
+ int ret;
+
+ ret = drm_atomic_helper_check_modeset(dev, state);
+ if (ret)
+ return ret;
+
+ ret = drm_atomic_normalize_zpos(dev, state);
+ if (ret)
+ return ret;
+
+ ret = drm_atomic_helper_check_planes(dev, state);
+ if (ret)
+ return ret;
+
+ return ret;
+}
+
static int exynos_drm_open(struct drm_device *dev, struct drm_file *file)
{
struct drm_exynos_file_private *file_priv;
@@ -345,9 +366,7 @@ static const struct file_operations exynos_drm_driver_fops = {
.poll = drm_poll,
.read = drm_read,
.unlocked_ioctl = drm_ioctl,
-#ifdef CONFIG_COMPAT
.compat_ioctl = drm_compat_ioctl,
-#endif
.release = drm_release,
};
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.h b/drivers/gpu/drm/exynos/exynos_drm_drv.h
index d215149e737b..80c4d5b81689 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_drv.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_drv.h
@@ -301,6 +301,7 @@ static inline int exynos_dpi_bind(struct drm_device *dev,
int exynos_atomic_commit(struct drm_device *dev, struct drm_atomic_state *state,
bool nonblock);
+int exynos_atomic_check(struct drm_device *dev, struct drm_atomic_state *state);
extern struct platform_driver fimd_driver;
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fb.c b/drivers/gpu/drm/exynos/exynos_drm_fb.c
index 40ce841eb952..23cce0a3f5fc 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fb.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fb.c
@@ -190,7 +190,7 @@ dma_addr_t exynos_drm_fb_dma_addr(struct drm_framebuffer *fb, int index)
static const struct drm_mode_config_funcs exynos_drm_mode_config_funcs = {
.fb_create = exynos_user_fb_create,
.output_poll_changed = exynos_drm_output_poll_changed,
- .atomic_check = drm_atomic_helper_check,
+ .atomic_check = exynos_atomic_check,
.atomic_commit = exynos_atomic_commit,
};
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fbdev.c b/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
index 4cfb39d543b4..9f35deb56170 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
@@ -63,15 +63,11 @@ static int exynos_drm_fb_mmap(struct fb_info *info,
static struct fb_ops exynos_drm_fb_ops = {
.owner = THIS_MODULE,
+ DRM_FB_HELPER_DEFAULT_OPS,
.fb_mmap = exynos_drm_fb_mmap,
.fb_fillrect = drm_fb_helper_cfb_fillrect,
.fb_copyarea = drm_fb_helper_cfb_copyarea,
.fb_imageblit = drm_fb_helper_cfb_imageblit,
- .fb_check_var = drm_fb_helper_check_var,
- .fb_set_par = drm_fb_helper_set_par,
- .fb_blank = drm_fb_helper_blank,
- .fb_pan_display = drm_fb_helper_pan_display,
- .fb_setcmap = drm_fb_helper_setcmap,
};
static int exynos_drm_fbdev_update(struct drm_fb_helper *helper,
diff --git a/drivers/gpu/drm/exynos/exynos_hdmi.c b/drivers/gpu/drm/exynos/exynos_hdmi.c
index e8fb6ef947ee..38eaa63afb31 100644
--- a/drivers/gpu/drm/exynos/exynos_hdmi.c
+++ b/drivers/gpu/drm/exynos/exynos_hdmi.c
@@ -1907,6 +1907,8 @@ err_disable_pm_runtime:
err_hdmiphy:
if (hdata->hdmiphy_port)
put_device(&hdata->hdmiphy_port->dev);
+ if (hdata->regs_hdmiphy)
+ iounmap(hdata->regs_hdmiphy);
err_ddc:
put_device(&hdata->ddc_adpt->dev);
@@ -1929,6 +1931,9 @@ static int hdmi_remove(struct platform_device *pdev)
if (hdata->hdmiphy_port)
put_device(&hdata->hdmiphy_port->dev);
+ if (hdata->regs_hdmiphy)
+ iounmap(hdata->regs_hdmiphy);
+
put_device(&hdata->ddc_adpt->dev);
return 0;
diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_crtc.c b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_crtc.c
index b2d5e188b1b8..deb57435cc89 100644
--- a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_crtc.c
+++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_crtc.c
@@ -25,8 +25,13 @@
static void fsl_dcu_drm_crtc_atomic_flush(struct drm_crtc *crtc,
struct drm_crtc_state *old_crtc_state)
{
+ struct drm_device *dev = crtc->dev;
+ struct fsl_dcu_drm_device *fsl_dev = dev->dev_private;
struct drm_pending_vblank_event *event = crtc->state->event;
+ regmap_write(fsl_dev->regmap,
+ DCU_UPDATE_MODE, DCU_UPDATE_MODE_READREG);
+
if (event) {
crtc->state->event = NULL;
@@ -39,11 +44,15 @@ static void fsl_dcu_drm_crtc_atomic_flush(struct drm_crtc *crtc,
}
}
-static void fsl_dcu_drm_disable_crtc(struct drm_crtc *crtc)
+static void fsl_dcu_drm_crtc_atomic_disable(struct drm_crtc *crtc,
+ struct drm_crtc_state *old_crtc_state)
{
struct drm_device *dev = crtc->dev;
struct fsl_dcu_drm_device *fsl_dev = dev->dev_private;
+ /* always disable planes on the CRTC */
+ drm_atomic_helper_disable_planes_on_crtc(old_crtc_state, true);
+
drm_crtc_vblank_off(crtc);
regmap_update_bits(fsl_dev->regmap, DCU_DCU_MODE,
@@ -122,8 +131,8 @@ static void fsl_dcu_drm_crtc_mode_set_nofb(struct drm_crtc *crtc)
}
static const struct drm_crtc_helper_funcs fsl_dcu_drm_crtc_helper_funcs = {
+ .atomic_disable = fsl_dcu_drm_crtc_atomic_disable,
.atomic_flush = fsl_dcu_drm_crtc_atomic_flush,
- .disable = fsl_dcu_drm_disable_crtc,
.enable = fsl_dcu_drm_crtc_enable,
.mode_set_nofb = fsl_dcu_drm_crtc_mode_set_nofb,
};
diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c
index e04efbed1a54..320e4728c9b9 100644
--- a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c
+++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c
@@ -59,8 +59,6 @@ static int fsl_dcu_drm_irq_init(struct drm_device *dev)
regmap_write(fsl_dev->regmap, DCU_INT_STATUS, 0);
regmap_write(fsl_dev->regmap, DCU_INT_MASK, ~0);
- regmap_write(fsl_dev->regmap, DCU_UPDATE_MODE,
- DCU_UPDATE_MODE_READREG);
return ret;
}
@@ -139,8 +137,6 @@ static irqreturn_t fsl_dcu_drm_irq(int irq, void *arg)
drm_handle_vblank(dev, 0);
regmap_write(fsl_dev->regmap, DCU_INT_STATUS, int_status);
- regmap_write(fsl_dev->regmap, DCU_UPDATE_MODE,
- DCU_UPDATE_MODE_READREG);
return IRQ_HANDLED;
}
@@ -180,9 +176,7 @@ static const struct file_operations fsl_dcu_drm_fops = {
.open = drm_open,
.release = drm_release,
.unlocked_ioctl = drm_ioctl,
-#ifdef CONFIG_COMPAT
.compat_ioctl = drm_compat_ioctl,
-#endif
.poll = drm_poll,
.read = drm_read,
.llseek = no_llseek,
diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_plane.c b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_plane.c
index 9e6f7d8112b3..a99f48847420 100644
--- a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_plane.c
+++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_plane.c
@@ -160,11 +160,6 @@ static void fsl_dcu_drm_plane_atomic_update(struct drm_plane *plane,
DCU_LAYER_POST_SKIP(0) |
DCU_LAYER_PRE_SKIP(0));
}
- regmap_update_bits(fsl_dev->regmap, DCU_DCU_MODE,
- DCU_MODE_DCU_MODE_MASK,
- DCU_MODE_DCU_MODE(DCU_MODE_NORMAL));
- regmap_write(fsl_dev->regmap,
- DCU_UPDATE_MODE, DCU_UPDATE_MODE_READREG);
return;
}
diff --git a/drivers/gpu/drm/gma500/framebuffer.c b/drivers/gpu/drm/gma500/framebuffer.c
index 3a44e705db53..4071b2d1e8cf 100644
--- a/drivers/gpu/drm/gma500/framebuffer.c
+++ b/drivers/gpu/drm/gma500/framebuffer.c
@@ -124,7 +124,7 @@ static int psbfb_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
unsigned long phys_addr = (unsigned long)dev_priv->stolen_base +
psbfb->gtt->offset;
- page_num = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
+ page_num = vma_pages(vma);
address = (unsigned long)vmf->virtual_address - (vmf->pgoff << PAGE_SHIFT);
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
@@ -185,9 +185,7 @@ static int psbfb_mmap(struct fb_info *info, struct vm_area_struct *vma)
static struct fb_ops psbfb_ops = {
.owner = THIS_MODULE,
- .fb_check_var = drm_fb_helper_check_var,
- .fb_set_par = drm_fb_helper_set_par,
- .fb_blank = drm_fb_helper_blank,
+ DRM_FB_HELPER_DEFAULT_OPS,
.fb_setcolreg = psbfb_setcolreg,
.fb_fillrect = drm_fb_helper_cfb_fillrect,
.fb_copyarea = psbfb_copyarea,
@@ -198,9 +196,7 @@ static struct fb_ops psbfb_ops = {
static struct fb_ops psbfb_roll_ops = {
.owner = THIS_MODULE,
- .fb_check_var = drm_fb_helper_check_var,
- .fb_set_par = drm_fb_helper_set_par,
- .fb_blank = drm_fb_helper_blank,
+ DRM_FB_HELPER_DEFAULT_OPS,
.fb_setcolreg = psbfb_setcolreg,
.fb_fillrect = drm_fb_helper_cfb_fillrect,
.fb_copyarea = drm_fb_helper_cfb_copyarea,
@@ -211,9 +207,7 @@ static struct fb_ops psbfb_roll_ops = {
static struct fb_ops psbfb_unaccel_ops = {
.owner = THIS_MODULE,
- .fb_check_var = drm_fb_helper_check_var,
- .fb_set_par = drm_fb_helper_set_par,
- .fb_blank = drm_fb_helper_blank,
+ DRM_FB_HELPER_DEFAULT_OPS,
.fb_setcolreg = psbfb_setcolreg,
.fb_fillrect = drm_fb_helper_cfb_fillrect,
.fb_copyarea = drm_fb_helper_cfb_copyarea,
@@ -236,22 +230,20 @@ static int psb_framebuffer_init(struct drm_device *dev,
const struct drm_mode_fb_cmd2 *mode_cmd,
struct gtt_range *gt)
{
- u32 bpp, depth;
+ const struct drm_format_info *info;
int ret;
- drm_fb_get_bpp_depth(mode_cmd->pixel_format, &depth, &bpp);
+ /*
+ * Reject unknown formats, YUV formats, and formats with more than
+ * 4 bytes per pixel.
+ */
+ info = drm_format_info(mode_cmd->pixel_format);
+ if (!info || !info->depth || info->cpp[0] > 4)
+ return -EINVAL;
if (mode_cmd->pitches[0] & 63)
return -EINVAL;
- switch (bpp) {
- case 8:
- case 16:
- case 24:
- case 32:
- break;
- default:
- return -EINVAL;
- }
+
drm_helper_mode_fill_fb_struct(&fb->base, mode_cmd);
fb->gtt = gt;
ret = drm_framebuffer_init(dev, &fb->base, &psb_fb_funcs);
@@ -298,7 +290,6 @@ static struct drm_framebuffer *psb_framebuffer_create
* psbfb_alloc - allocate frame buffer memory
* @dev: the DRM device
* @aligned_size: space needed
- * @force: fall back to GEM buffers if need be
*
* Allocate the frame buffer. In the usual case we get a GTT range that
* is stolen memory backed and life is simple. If there isn't sufficient
diff --git a/drivers/gpu/drm/gma500/gtt.c b/drivers/gpu/drm/gma500/gtt.c
index 8f69225ce2b4..3f4f424196b2 100644
--- a/drivers/gpu/drm/gma500/gtt.c
+++ b/drivers/gpu/drm/gma500/gtt.c
@@ -76,6 +76,7 @@ static u32 __iomem *psb_gtt_entry(struct drm_device *dev, struct gtt_range *r)
* psb_gtt_insert - put an object into the GTT
* @dev: our DRM device
* @r: our GTT range
+ * @resume: on resume
*
* Take our preallocated GTT range and insert the GEM object into
* the GTT. This is protected via the gtt mutex which the caller
@@ -130,7 +131,7 @@ static int psb_gtt_insert(struct drm_device *dev, struct gtt_range *r,
* page table entries with the dummy page. This is protected via the gtt
* mutex which the caller must hold.
*/
-void psb_gtt_remove(struct drm_device *dev, struct gtt_range *r)
+static void psb_gtt_remove(struct drm_device *dev, struct gtt_range *r)
{
struct drm_psb_private *dev_priv = dev->dev_private;
u32 __iomem *gtt_slot;
@@ -321,6 +322,7 @@ out:
* @len: length (bytes) of address space required
* @name: resource name
* @backed: resource should be backed by stolen pages
+ * @align: requested alignment
*
* Ask the kernel core to find us a suitable range of addresses
* to use for a GTT mapping.
diff --git a/drivers/gpu/drm/gma500/psb_drv.c b/drivers/gpu/drm/gma500/psb_drv.c
index 50eb944fb78a..ff37ea585664 100644
--- a/drivers/gpu/drm/gma500/psb_drv.c
+++ b/drivers/gpu/drm/gma500/psb_drv.c
@@ -473,6 +473,7 @@ static const struct file_operations psb_gem_fops = {
.open = drm_open,
.release = drm_release,
.unlocked_ioctl = psb_unlocked_ioctl,
+ .compat_ioctl = drm_compat_ioctl,
.mmap = drm_gem_mmap,
.poll = drm_poll,
.read = drm_read,
diff --git a/drivers/gpu/drm/gma500/psb_drv.h b/drivers/gpu/drm/gma500/psb_drv.h
index b74372760d7f..05d7aaf47eea 100644
--- a/drivers/gpu/drm/gma500/psb_drv.h
+++ b/drivers/gpu/drm/gma500/psb_drv.h
@@ -753,10 +753,6 @@ extern int psb_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
extern int psb_gem_dumb_map_gtt(struct drm_file *file, struct drm_device *dev,
uint32_t handle, uint64_t *offset);
extern int psb_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
-extern int psb_gem_create_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file);
-extern int psb_gem_mmap_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file);
/* psb_device.c */
extern const struct psb_ops psb_chip_ops;
diff --git a/drivers/gpu/drm/hisilicon/Kconfig b/drivers/gpu/drm/hisilicon/Kconfig
index 558c61b1b8e8..2fd2724b7a7d 100644
--- a/drivers/gpu/drm/hisilicon/Kconfig
+++ b/drivers/gpu/drm/hisilicon/Kconfig
@@ -2,4 +2,5 @@
# hisilicon drm device configuration.
# Please keep this list sorted alphabetically
+source "drivers/gpu/drm/hisilicon/hibmc/Kconfig"
source "drivers/gpu/drm/hisilicon/kirin/Kconfig"
diff --git a/drivers/gpu/drm/hisilicon/Makefile b/drivers/gpu/drm/hisilicon/Makefile
index e3f6d493c996..c8155bfb1ff1 100644
--- a/drivers/gpu/drm/hisilicon/Makefile
+++ b/drivers/gpu/drm/hisilicon/Makefile
@@ -2,4 +2,5 @@
# Makefile for hisilicon drm drivers.
# Please keep this list sorted alphabetically
+obj-$(CONFIG_DRM_HISI_HIBMC) += hibmc/
obj-$(CONFIG_DRM_HISI_KIRIN) += kirin/
diff --git a/drivers/gpu/drm/hisilicon/hibmc/Kconfig b/drivers/gpu/drm/hisilicon/hibmc/Kconfig
new file mode 100644
index 000000000000..380622a0da35
--- /dev/null
+++ b/drivers/gpu/drm/hisilicon/hibmc/Kconfig
@@ -0,0 +1,9 @@
+config DRM_HISI_HIBMC
+ tristate "DRM Support for Hisilicon Hibmc"
+ depends on DRM && PCI
+ select DRM_KMS_HELPER
+ select DRM_TTM
+
+ help
+ Choose this option if you have a Hisilicon Hibmc soc chipset.
+ If M is selected the module will be called hibmc-drm.
diff --git a/drivers/gpu/drm/hisilicon/hibmc/Makefile b/drivers/gpu/drm/hisilicon/hibmc/Makefile
new file mode 100644
index 000000000000..f2e04c035673
--- /dev/null
+++ b/drivers/gpu/drm/hisilicon/hibmc/Makefile
@@ -0,0 +1,4 @@
+ccflags-y := -Iinclude/drm
+hibmc-drm-y := hibmc_drm_drv.o hibmc_drm_de.o hibmc_drm_vdac.o hibmc_drm_fbdev.o hibmc_ttm.o
+
+obj-$(CONFIG_DRM_HISI_HIBMC) += hibmc-drm.o
diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_de.c b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_de.c
new file mode 100644
index 000000000000..2a1386e33126
--- /dev/null
+++ b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_de.c
@@ -0,0 +1,477 @@
+/* Hisilicon Hibmc SoC drm driver
+ *
+ * Based on the bochs drm driver.
+ *
+ * Copyright (c) 2016 Huawei Limited.
+ *
+ * Author:
+ * Rongrong Zou <zourongrong@huawei.com>
+ * Rongrong Zou <zourongrong@gmail.com>
+ * Jianhua Li <lijianhua@huawei.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ */
+
+#include <drm/drm_atomic.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_plane_helper.h>
+
+#include "hibmc_drm_drv.h"
+#include "hibmc_drm_regs.h"
+
+struct hibmc_display_panel_pll {
+ unsigned long M;
+ unsigned long N;
+ unsigned long OD;
+ unsigned long POD;
+};
+
+struct hibmc_dislay_pll_config {
+ unsigned long hdisplay;
+ unsigned long vdisplay;
+ u32 pll1_config_value;
+ u32 pll2_config_value;
+};
+
+static const struct hibmc_dislay_pll_config hibmc_pll_table[] = {
+ {800, 600, CRT_PLL1_HS_40MHZ, CRT_PLL2_HS_40MHZ},
+ {1024, 768, CRT_PLL1_HS_65MHZ, CRT_PLL2_HS_65MHZ},
+ {1152, 864, CRT_PLL1_HS_80MHZ_1152, CRT_PLL2_HS_80MHZ},
+ {1280, 768, CRT_PLL1_HS_80MHZ, CRT_PLL2_HS_80MHZ},
+ {1280, 720, CRT_PLL1_HS_74MHZ, CRT_PLL2_HS_74MHZ},
+ {1280, 960, CRT_PLL1_HS_108MHZ, CRT_PLL2_HS_108MHZ},
+ {1280, 1024, CRT_PLL1_HS_108MHZ, CRT_PLL2_HS_108MHZ},
+ {1600, 1200, CRT_PLL1_HS_162MHZ, CRT_PLL2_HS_162MHZ},
+ {1920, 1080, CRT_PLL1_HS_148MHZ, CRT_PLL2_HS_148MHZ},
+ {1920, 1200, CRT_PLL1_HS_193MHZ, CRT_PLL2_HS_193MHZ},
+};
+
+#define PADDING(align, data) (((data) + (align) - 1) & (~((align) - 1)))
+
+static int hibmc_plane_atomic_check(struct drm_plane *plane,
+ struct drm_plane_state *state)
+{
+ struct drm_framebuffer *fb = state->fb;
+ struct drm_crtc *crtc = state->crtc;
+ struct drm_crtc_state *crtc_state;
+ u32 src_w = state->src_w >> 16;
+ u32 src_h = state->src_h >> 16;
+
+ if (!crtc || !fb)
+ return 0;
+
+ crtc_state = drm_atomic_get_crtc_state(state->state, crtc);
+ if (IS_ERR(crtc_state))
+ return PTR_ERR(crtc_state);
+
+ if (src_w != state->crtc_w || src_h != state->crtc_h) {
+ DRM_DEBUG_ATOMIC("scale not support\n");
+ return -EINVAL;
+ }
+
+ if (state->crtc_x < 0 || state->crtc_y < 0) {
+ DRM_DEBUG_ATOMIC("crtc_x/y of drm_plane state is invalid\n");
+ return -EINVAL;
+ }
+
+ if (state->crtc_x + state->crtc_w >
+ crtc_state->adjusted_mode.hdisplay ||
+ state->crtc_y + state->crtc_h >
+ crtc_state->adjusted_mode.vdisplay) {
+ DRM_DEBUG_ATOMIC("visible portion of plane is invalid\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void hibmc_plane_atomic_update(struct drm_plane *plane,
+ struct drm_plane_state *old_state)
+{
+ struct drm_plane_state *state = plane->state;
+ u32 reg;
+ int ret;
+ u64 gpu_addr = 0;
+ unsigned int line_l;
+ struct hibmc_drm_private *priv = plane->dev->dev_private;
+ struct hibmc_framebuffer *hibmc_fb;
+ struct hibmc_bo *bo;
+
+ if (!state->fb)
+ return;
+
+ hibmc_fb = to_hibmc_framebuffer(state->fb);
+ bo = gem_to_hibmc_bo(hibmc_fb->obj);
+ ret = ttm_bo_reserve(&bo->bo, true, false, NULL);
+ if (ret) {
+ DRM_ERROR("failed to reserve ttm_bo: %d", ret);
+ return;
+ }
+
+ ret = hibmc_bo_pin(bo, TTM_PL_FLAG_VRAM, &gpu_addr);
+ ttm_bo_unreserve(&bo->bo);
+ if (ret) {
+ DRM_ERROR("failed to pin hibmc_bo: %d", ret);
+ return;
+ }
+
+ writel(gpu_addr, priv->mmio + HIBMC_CRT_FB_ADDRESS);
+
+ reg = state->fb->width * (state->fb->bits_per_pixel / 8);
+ /* now line_pad is 16 */
+ reg = PADDING(16, reg);
+
+ line_l = state->fb->width * state->fb->bits_per_pixel / 8;
+ line_l = PADDING(16, line_l);
+ writel(HIBMC_FIELD(HIBMC_CRT_FB_WIDTH_WIDTH, reg) |
+ HIBMC_FIELD(HIBMC_CRT_FB_WIDTH_OFFS, line_l),
+ priv->mmio + HIBMC_CRT_FB_WIDTH);
+
+ /* SET PIXEL FORMAT */
+ reg = readl(priv->mmio + HIBMC_CRT_DISP_CTL);
+ reg &= ~HIBMC_CRT_DISP_CTL_FORMAT_MASK;
+ reg |= HIBMC_FIELD(HIBMC_CRT_DISP_CTL_FORMAT,
+ state->fb->bits_per_pixel / 16);
+ writel(reg, priv->mmio + HIBMC_CRT_DISP_CTL);
+}
+
+static const u32 channel_formats1[] = {
+ DRM_FORMAT_RGB565, DRM_FORMAT_BGR565, DRM_FORMAT_RGB888,
+ DRM_FORMAT_BGR888, DRM_FORMAT_XRGB8888, DRM_FORMAT_XBGR8888,
+ DRM_FORMAT_RGBA8888, DRM_FORMAT_BGRA8888, DRM_FORMAT_ARGB8888,
+ DRM_FORMAT_ABGR8888
+};
+
+static struct drm_plane_funcs hibmc_plane_funcs = {
+ .update_plane = drm_atomic_helper_update_plane,
+ .disable_plane = drm_atomic_helper_disable_plane,
+ .set_property = drm_atomic_helper_plane_set_property,
+ .destroy = drm_plane_cleanup,
+ .reset = drm_atomic_helper_plane_reset,
+ .atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_plane_destroy_state,
+};
+
+static const struct drm_plane_helper_funcs hibmc_plane_helper_funcs = {
+ .atomic_check = hibmc_plane_atomic_check,
+ .atomic_update = hibmc_plane_atomic_update,
+};
+
+static struct drm_plane *hibmc_plane_init(struct hibmc_drm_private *priv)
+{
+ struct drm_device *dev = priv->dev;
+ struct drm_plane *plane;
+ int ret = 0;
+
+ plane = devm_kzalloc(dev->dev, sizeof(*plane), GFP_KERNEL);
+ if (!plane) {
+ DRM_ERROR("failed to alloc memory when init plane\n");
+ return ERR_PTR(-ENOMEM);
+ }
+ /*
+ * plane init
+ * TODO: Now only support primary plane, overlay planes
+ * need to do.
+ */
+ ret = drm_universal_plane_init(dev, plane, 1, &hibmc_plane_funcs,
+ channel_formats1,
+ ARRAY_SIZE(channel_formats1),
+ DRM_PLANE_TYPE_PRIMARY,
+ NULL);
+ if (ret) {
+ DRM_ERROR("failed to init plane: %d\n", ret);
+ return ERR_PTR(ret);
+ }
+
+ drm_plane_helper_add(plane, &hibmc_plane_helper_funcs);
+ return plane;
+}
+
+static void hibmc_crtc_enable(struct drm_crtc *crtc)
+{
+ unsigned int reg;
+ struct hibmc_drm_private *priv = crtc->dev->dev_private;
+
+ hibmc_set_power_mode(priv, HIBMC_PW_MODE_CTL_MODE_MODE0);
+
+ /* Enable display power gate & LOCALMEM power gate*/
+ reg = readl(priv->mmio + HIBMC_CURRENT_GATE);
+ reg &= ~HIBMC_CURR_GATE_LOCALMEM_MASK;
+ reg &= ~HIBMC_CURR_GATE_DISPLAY_MASK;
+ reg |= HIBMC_CURR_GATE_LOCALMEM(1);
+ reg |= HIBMC_CURR_GATE_DISPLAY(1);
+ hibmc_set_current_gate(priv, reg);
+ drm_crtc_vblank_on(crtc);
+}
+
+static void hibmc_crtc_disable(struct drm_crtc *crtc)
+{
+ unsigned int reg;
+ struct hibmc_drm_private *priv = crtc->dev->dev_private;
+
+ drm_crtc_vblank_off(crtc);
+
+ hibmc_set_power_mode(priv, HIBMC_PW_MODE_CTL_MODE_SLEEP);
+
+ /* Enable display power gate & LOCALMEM power gate*/
+ reg = readl(priv->mmio + HIBMC_CURRENT_GATE);
+ reg &= ~HIBMC_CURR_GATE_LOCALMEM_MASK;
+ reg &= ~HIBMC_CURR_GATE_DISPLAY_MASK;
+ reg |= HIBMC_CURR_GATE_LOCALMEM(0);
+ reg |= HIBMC_CURR_GATE_DISPLAY(0);
+ hibmc_set_current_gate(priv, reg);
+}
+
+static unsigned int format_pll_reg(void)
+{
+ unsigned int pllreg = 0;
+ struct hibmc_display_panel_pll pll = {0};
+
+ /*
+ * Note that all PLL's have the same format. Here,
+ * we just use Panel PLL parameter to work out the bit
+ * fields in the register.On returning a 32 bit number, the value can
+ * be applied to any PLL in the calling function.
+ */
+ pllreg |= HIBMC_FIELD(HIBMC_PLL_CTRL_BYPASS, 0);
+ pllreg |= HIBMC_FIELD(HIBMC_PLL_CTRL_POWER, 1);
+ pllreg |= HIBMC_FIELD(HIBMC_PLL_CTRL_INPUT, 0);
+ pllreg |= HIBMC_FIELD(HIBMC_PLL_CTRL_POD, pll.POD);
+ pllreg |= HIBMC_FIELD(HIBMC_PLL_CTRL_OD, pll.OD);
+ pllreg |= HIBMC_FIELD(HIBMC_PLL_CTRL_N, pll.N);
+ pllreg |= HIBMC_FIELD(HIBMC_PLL_CTRL_M, pll.M);
+
+ return pllreg;
+}
+
+static void set_vclock_hisilicon(struct drm_device *dev, unsigned long pll)
+{
+ u32 val;
+ struct hibmc_drm_private *priv = dev->dev_private;
+
+ val = readl(priv->mmio + CRT_PLL1_HS);
+ val &= ~(CRT_PLL1_HS_OUTER_BYPASS(1));
+ writel(val, priv->mmio + CRT_PLL1_HS);
+
+ val = CRT_PLL1_HS_INTER_BYPASS(1) | CRT_PLL1_HS_POWERON(1);
+ writel(val, priv->mmio + CRT_PLL1_HS);
+
+ writel(pll, priv->mmio + CRT_PLL1_HS);
+
+ usleep_range(1000, 2000);
+
+ val = pll & ~(CRT_PLL1_HS_POWERON(1));
+ writel(val, priv->mmio + CRT_PLL1_HS);
+
+ usleep_range(1000, 2000);
+
+ val &= ~(CRT_PLL1_HS_INTER_BYPASS(1));
+ writel(val, priv->mmio + CRT_PLL1_HS);
+
+ usleep_range(1000, 2000);
+
+ val |= CRT_PLL1_HS_OUTER_BYPASS(1);
+ writel(val, priv->mmio + CRT_PLL1_HS);
+}
+
+static void get_pll_config(unsigned long x, unsigned long y,
+ u32 *pll1, u32 *pll2)
+{
+ int i;
+ int count = ARRAY_SIZE(hibmc_pll_table);
+
+ for (i = 0; i < count; i++) {
+ if (hibmc_pll_table[i].hdisplay == x &&
+ hibmc_pll_table[i].vdisplay == y) {
+ *pll1 = hibmc_pll_table[i].pll1_config_value;
+ *pll2 = hibmc_pll_table[i].pll2_config_value;
+ return;
+ }
+ }
+
+ /* if found none, we use default value */
+ *pll1 = CRT_PLL1_HS_25MHZ;
+ *pll2 = CRT_PLL2_HS_25MHZ;
+}
+
+/*
+ * This function takes care the extra registers and bit fields required to
+ * setup a mode in board.
+ * Explanation about Display Control register:
+ * FPGA only supports 7 predefined pixel clocks, and clock select is
+ * in bit 4:0 of new register 0x802a8.
+ */
+static unsigned int display_ctrl_adjust(struct drm_device *dev,
+ struct drm_display_mode *mode,
+ unsigned int ctrl)
+{
+ unsigned long x, y;
+ u32 pll1; /* bit[31:0] of PLL */
+ u32 pll2; /* bit[63:32] of PLL */
+ struct hibmc_drm_private *priv = dev->dev_private;
+
+ x = mode->hdisplay;
+ y = mode->vdisplay;
+
+ get_pll_config(x, y, &pll1, &pll2);
+ writel(pll2, priv->mmio + CRT_PLL2_HS);
+ set_vclock_hisilicon(dev, pll1);
+
+ /*
+ * Hisilicon has to set up the top-left and bottom-right
+ * registers as well.
+ * Note that normal chip only use those two register for
+ * auto-centering mode.
+ */
+ writel(HIBMC_FIELD(HIBMC_CRT_AUTO_CENTERING_TL_TOP, 0) |
+ HIBMC_FIELD(HIBMC_CRT_AUTO_CENTERING_TL_LEFT, 0),
+ priv->mmio + HIBMC_CRT_AUTO_CENTERING_TL);
+
+ writel(HIBMC_FIELD(HIBMC_CRT_AUTO_CENTERING_BR_BOTTOM, y - 1) |
+ HIBMC_FIELD(HIBMC_CRT_AUTO_CENTERING_BR_RIGHT, x - 1),
+ priv->mmio + HIBMC_CRT_AUTO_CENTERING_BR);
+
+ /*
+ * Assume common fields in ctrl have been properly set before
+ * calling this function.
+ * This function only sets the extra fields in ctrl.
+ */
+
+ /* Set bit 25 of display controller: Select CRT or VGA clock */
+ ctrl &= ~HIBMC_CRT_DISP_CTL_CRTSELECT_MASK;
+ ctrl &= ~HIBMC_CRT_DISP_CTL_CLOCK_PHASE_MASK;
+
+ ctrl |= HIBMC_CRT_DISP_CTL_CRTSELECT(HIBMC_CRTSELECT_CRT);
+
+ /* clock_phase_polarity is 0 */
+ ctrl |= HIBMC_CRT_DISP_CTL_CLOCK_PHASE(0);
+
+ writel(ctrl, priv->mmio + HIBMC_CRT_DISP_CTL);
+
+ return ctrl;
+}
+
+static void hibmc_crtc_mode_set_nofb(struct drm_crtc *crtc)
+{
+ unsigned int val;
+ struct drm_display_mode *mode = &crtc->state->mode;
+ struct drm_device *dev = crtc->dev;
+ struct hibmc_drm_private *priv = dev->dev_private;
+ int width = mode->hsync_end - mode->hsync_start;
+ int height = mode->vsync_end - mode->vsync_start;
+
+ writel(format_pll_reg(), priv->mmio + HIBMC_CRT_PLL_CTRL);
+ writel(HIBMC_FIELD(HIBMC_CRT_HORZ_TOTAL_TOTAL, mode->htotal - 1) |
+ HIBMC_FIELD(HIBMC_CRT_HORZ_TOTAL_DISP_END, mode->hdisplay - 1),
+ priv->mmio + HIBMC_CRT_HORZ_TOTAL);
+
+ writel(HIBMC_FIELD(HIBMC_CRT_HORZ_SYNC_WIDTH, width) |
+ HIBMC_FIELD(HIBMC_CRT_HORZ_SYNC_START, mode->hsync_start - 1),
+ priv->mmio + HIBMC_CRT_HORZ_SYNC);
+
+ writel(HIBMC_FIELD(HIBMC_CRT_VERT_TOTAL_TOTAL, mode->vtotal - 1) |
+ HIBMC_FIELD(HIBMC_CRT_VERT_TOTAL_DISP_END, mode->vdisplay - 1),
+ priv->mmio + HIBMC_CRT_VERT_TOTAL);
+
+ writel(HIBMC_FIELD(HIBMC_CRT_VERT_SYNC_HEIGHT, height) |
+ HIBMC_FIELD(HIBMC_CRT_VERT_SYNC_START, mode->vsync_start - 1),
+ priv->mmio + HIBMC_CRT_VERT_SYNC);
+
+ val = HIBMC_FIELD(HIBMC_CRT_DISP_CTL_VSYNC_PHASE, 0);
+ val |= HIBMC_FIELD(HIBMC_CRT_DISP_CTL_HSYNC_PHASE, 0);
+ val |= HIBMC_CRT_DISP_CTL_TIMING(1);
+ val |= HIBMC_CRT_DISP_CTL_PLANE(1);
+
+ display_ctrl_adjust(dev, mode, val);
+}
+
+static void hibmc_crtc_atomic_begin(struct drm_crtc *crtc,
+ struct drm_crtc_state *old_state)
+{
+ unsigned int reg;
+ struct drm_device *dev = crtc->dev;
+ struct hibmc_drm_private *priv = dev->dev_private;
+
+ hibmc_set_power_mode(priv, HIBMC_PW_MODE_CTL_MODE_MODE0);
+
+ /* Enable display power gate & LOCALMEM power gate*/
+ reg = readl(priv->mmio + HIBMC_CURRENT_GATE);
+ reg &= ~HIBMC_CURR_GATE_DISPLAY_MASK;
+ reg &= ~HIBMC_CURR_GATE_LOCALMEM_MASK;
+ reg |= HIBMC_CURR_GATE_DISPLAY(1);
+ reg |= HIBMC_CURR_GATE_LOCALMEM(1);
+ hibmc_set_current_gate(priv, reg);
+
+ /* We can add more initialization as needed. */
+}
+
+static void hibmc_crtc_atomic_flush(struct drm_crtc *crtc,
+ struct drm_crtc_state *old_state)
+
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&crtc->dev->event_lock, flags);
+ if (crtc->state->event)
+ drm_crtc_send_vblank_event(crtc, crtc->state->event);
+ crtc->state->event = NULL;
+ spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
+}
+
+static const struct drm_crtc_funcs hibmc_crtc_funcs = {
+ .page_flip = drm_atomic_helper_page_flip,
+ .set_config = drm_atomic_helper_set_config,
+ .destroy = drm_crtc_cleanup,
+ .reset = drm_atomic_helper_crtc_reset,
+ .atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_crtc_destroy_state,
+};
+
+static const struct drm_crtc_helper_funcs hibmc_crtc_helper_funcs = {
+ .enable = hibmc_crtc_enable,
+ .disable = hibmc_crtc_disable,
+ .mode_set_nofb = hibmc_crtc_mode_set_nofb,
+ .atomic_begin = hibmc_crtc_atomic_begin,
+ .atomic_flush = hibmc_crtc_atomic_flush,
+};
+
+int hibmc_de_init(struct hibmc_drm_private *priv)
+{
+ struct drm_device *dev = priv->dev;
+ struct drm_crtc *crtc;
+ struct drm_plane *plane;
+ int ret;
+
+ plane = hibmc_plane_init(priv);
+ if (IS_ERR(plane)) {
+ DRM_ERROR("failed to create plane: %ld\n", PTR_ERR(plane));
+ return PTR_ERR(plane);
+ }
+
+ crtc = devm_kzalloc(dev->dev, sizeof(*crtc), GFP_KERNEL);
+ if (!crtc) {
+ DRM_ERROR("failed to alloc memory when init crtc\n");
+ return -ENOMEM;
+ }
+
+ ret = drm_crtc_init_with_planes(dev, crtc, plane,
+ NULL, &hibmc_crtc_funcs, NULL);
+ if (ret) {
+ DRM_ERROR("failed to init crtc: %d\n", ret);
+ return ret;
+ }
+
+ ret = drm_mode_crtc_set_gamma_size(crtc, 256);
+ if (ret) {
+ DRM_ERROR("failed to set gamma size: %d\n", ret);
+ return ret;
+ }
+ drm_crtc_helper_add(crtc, &hibmc_crtc_helper_funcs);
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c
new file mode 100644
index 000000000000..7e2043f4348c
--- /dev/null
+++ b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c
@@ -0,0 +1,456 @@
+/* Hisilicon Hibmc SoC drm driver
+ *
+ * Based on the bochs drm driver.
+ *
+ * Copyright (c) 2016 Huawei Limited.
+ *
+ * Author:
+ * Rongrong Zou <zourongrong@huawei.com>
+ * Rongrong Zou <zourongrong@gmail.com>
+ * Jianhua Li <lijianhua@huawei.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ */
+
+#include <linux/console.h>
+#include <linux/module.h>
+
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_crtc_helper.h>
+
+#include "hibmc_drm_drv.h"
+#include "hibmc_drm_regs.h"
+
+static const struct file_operations hibmc_fops = {
+ .owner = THIS_MODULE,
+ .open = drm_open,
+ .release = drm_release,
+ .unlocked_ioctl = drm_ioctl,
+ .compat_ioctl = drm_compat_ioctl,
+ .mmap = hibmc_mmap,
+ .poll = drm_poll,
+ .read = drm_read,
+ .llseek = no_llseek,
+};
+
+static int hibmc_enable_vblank(struct drm_device *dev, unsigned int pipe)
+{
+ struct hibmc_drm_private *priv =
+ (struct hibmc_drm_private *)dev->dev_private;
+
+ writel(HIBMC_RAW_INTERRUPT_EN_VBLANK(1),
+ priv->mmio + HIBMC_RAW_INTERRUPT_EN);
+
+ return 0;
+}
+
+static void hibmc_disable_vblank(struct drm_device *dev, unsigned int pipe)
+{
+ struct hibmc_drm_private *priv =
+ (struct hibmc_drm_private *)dev->dev_private;
+
+ writel(HIBMC_RAW_INTERRUPT_EN_VBLANK(0),
+ priv->mmio + HIBMC_RAW_INTERRUPT_EN);
+}
+
+irqreturn_t hibmc_drm_interrupt(int irq, void *arg)
+{
+ struct drm_device *dev = (struct drm_device *)arg;
+ struct hibmc_drm_private *priv =
+ (struct hibmc_drm_private *)dev->dev_private;
+ u32 status;
+
+ status = readl(priv->mmio + HIBMC_RAW_INTERRUPT);
+
+ if (status & HIBMC_RAW_INTERRUPT_VBLANK(1)) {
+ writel(HIBMC_RAW_INTERRUPT_VBLANK(1),
+ priv->mmio + HIBMC_RAW_INTERRUPT);
+ drm_handle_vblank(dev, 0);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static struct drm_driver hibmc_driver = {
+ .driver_features = DRIVER_GEM | DRIVER_MODESET |
+ DRIVER_ATOMIC | DRIVER_HAVE_IRQ,
+ .fops = &hibmc_fops,
+ .name = "hibmc",
+ .date = "20160828",
+ .desc = "hibmc drm driver",
+ .major = 1,
+ .minor = 0,
+ .get_vblank_counter = drm_vblank_no_hw_counter,
+ .enable_vblank = hibmc_enable_vblank,
+ .disable_vblank = hibmc_disable_vblank,
+ .gem_free_object_unlocked = hibmc_gem_free_object,
+ .dumb_create = hibmc_dumb_create,
+ .dumb_map_offset = hibmc_dumb_mmap_offset,
+ .dumb_destroy = drm_gem_dumb_destroy,
+ .irq_handler = hibmc_drm_interrupt,
+};
+
+static int __maybe_unused hibmc_pm_suspend(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct drm_device *drm_dev = pci_get_drvdata(pdev);
+ struct hibmc_drm_private *priv = drm_dev->dev_private;
+
+ drm_kms_helper_poll_disable(drm_dev);
+ priv->suspend_state = drm_atomic_helper_suspend(drm_dev);
+ if (IS_ERR(priv->suspend_state)) {
+ DRM_ERROR("drm_atomic_helper_suspend failed: %ld\n",
+ PTR_ERR(priv->suspend_state));
+ drm_kms_helper_poll_enable(drm_dev);
+ return PTR_ERR(priv->suspend_state);
+ }
+
+ return 0;
+}
+
+static int __maybe_unused hibmc_pm_resume(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct drm_device *drm_dev = pci_get_drvdata(pdev);
+ struct hibmc_drm_private *priv = drm_dev->dev_private;
+
+ drm_atomic_helper_resume(drm_dev, priv->suspend_state);
+ drm_kms_helper_poll_enable(drm_dev);
+
+ return 0;
+}
+
+static const struct dev_pm_ops hibmc_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(hibmc_pm_suspend,
+ hibmc_pm_resume)
+};
+
+static int hibmc_kms_init(struct hibmc_drm_private *priv)
+{
+ int ret;
+
+ drm_mode_config_init(priv->dev);
+ priv->mode_config_initialized = true;
+
+ priv->dev->mode_config.min_width = 0;
+ priv->dev->mode_config.min_height = 0;
+ priv->dev->mode_config.max_width = 1920;
+ priv->dev->mode_config.max_height = 1440;
+
+ priv->dev->mode_config.fb_base = priv->fb_base;
+ priv->dev->mode_config.preferred_depth = 24;
+ priv->dev->mode_config.prefer_shadow = 0;
+
+ priv->dev->mode_config.funcs = (void *)&hibmc_mode_funcs;
+
+ ret = hibmc_de_init(priv);
+ if (ret) {
+ DRM_ERROR("failed to init de: %d\n", ret);
+ return ret;
+ }
+
+ ret = hibmc_vdac_init(priv);
+ if (ret) {
+ DRM_ERROR("failed to init vdac: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void hibmc_kms_fini(struct hibmc_drm_private *priv)
+{
+ if (priv->mode_config_initialized) {
+ drm_mode_config_cleanup(priv->dev);
+ priv->mode_config_initialized = false;
+ }
+}
+
+/*
+ * It can operate in one of three modes: 0, 1 or Sleep.
+ */
+void hibmc_set_power_mode(struct hibmc_drm_private *priv,
+ unsigned int power_mode)
+{
+ unsigned int control_value = 0;
+ void __iomem *mmio = priv->mmio;
+ unsigned int input = 1;
+
+ if (power_mode > HIBMC_PW_MODE_CTL_MODE_SLEEP)
+ return;
+
+ if (power_mode == HIBMC_PW_MODE_CTL_MODE_SLEEP)
+ input = 0;
+
+ control_value = readl(mmio + HIBMC_POWER_MODE_CTRL);
+ control_value &= ~(HIBMC_PW_MODE_CTL_MODE_MASK |
+ HIBMC_PW_MODE_CTL_OSC_INPUT_MASK);
+ control_value |= HIBMC_FIELD(HIBMC_PW_MODE_CTL_MODE, power_mode);
+ control_value |= HIBMC_FIELD(HIBMC_PW_MODE_CTL_OSC_INPUT, input);
+ writel(control_value, mmio + HIBMC_POWER_MODE_CTRL);
+}
+
+void hibmc_set_current_gate(struct hibmc_drm_private *priv, unsigned int gate)
+{
+ unsigned int gate_reg;
+ unsigned int mode;
+ void __iomem *mmio = priv->mmio;
+
+ /* Get current power mode. */
+ mode = (readl(mmio + HIBMC_POWER_MODE_CTRL) &
+ HIBMC_PW_MODE_CTL_MODE_MASK) >> HIBMC_PW_MODE_CTL_MODE_SHIFT;
+
+ switch (mode) {
+ case HIBMC_PW_MODE_CTL_MODE_MODE0:
+ gate_reg = HIBMC_MODE0_GATE;
+ break;
+
+ case HIBMC_PW_MODE_CTL_MODE_MODE1:
+ gate_reg = HIBMC_MODE1_GATE;
+ break;
+
+ default:
+ gate_reg = HIBMC_MODE0_GATE;
+ break;
+ }
+ writel(gate, mmio + gate_reg);
+}
+
+static void hibmc_hw_config(struct hibmc_drm_private *priv)
+{
+ unsigned int reg;
+
+ /* On hardware reset, power mode 0 is default. */
+ hibmc_set_power_mode(priv, HIBMC_PW_MODE_CTL_MODE_MODE0);
+
+ /* Enable display power gate & LOCALMEM power gate*/
+ reg = readl(priv->mmio + HIBMC_CURRENT_GATE);
+ reg &= ~HIBMC_CURR_GATE_DISPLAY_MASK;
+ reg &= ~HIBMC_CURR_GATE_LOCALMEM_MASK;
+ reg |= HIBMC_CURR_GATE_DISPLAY(1);
+ reg |= HIBMC_CURR_GATE_LOCALMEM(1);
+
+ hibmc_set_current_gate(priv, reg);
+
+ /*
+ * Reset the memory controller. If the memory controller
+ * is not reset in chip,the system might hang when sw accesses
+ * the memory.The memory should be resetted after
+ * changing the MXCLK.
+ */
+ reg = readl(priv->mmio + HIBMC_MISC_CTRL);
+ reg &= ~HIBMC_MSCCTL_LOCALMEM_RESET_MASK;
+ reg |= HIBMC_MSCCTL_LOCALMEM_RESET(0);
+ writel(reg, priv->mmio + HIBMC_MISC_CTRL);
+
+ reg &= ~HIBMC_MSCCTL_LOCALMEM_RESET_MASK;
+ reg |= HIBMC_MSCCTL_LOCALMEM_RESET(1);
+
+ writel(reg, priv->mmio + HIBMC_MISC_CTRL);
+}
+
+static int hibmc_hw_map(struct hibmc_drm_private *priv)
+{
+ struct drm_device *dev = priv->dev;
+ struct pci_dev *pdev = dev->pdev;
+ resource_size_t addr, size, ioaddr, iosize;
+
+ ioaddr = pci_resource_start(pdev, 1);
+ iosize = pci_resource_len(pdev, 1);
+ priv->mmio = devm_ioremap_nocache(dev->dev, ioaddr, iosize);
+ if (!priv->mmio) {
+ DRM_ERROR("Cannot map mmio region\n");
+ return -ENOMEM;
+ }
+
+ addr = pci_resource_start(pdev, 0);
+ size = pci_resource_len(pdev, 0);
+ priv->fb_map = devm_ioremap(dev->dev, addr, size);
+ if (!priv->fb_map) {
+ DRM_ERROR("Cannot map framebuffer\n");
+ return -ENOMEM;
+ }
+ priv->fb_base = addr;
+ priv->fb_size = size;
+
+ return 0;
+}
+
+static int hibmc_hw_init(struct hibmc_drm_private *priv)
+{
+ int ret;
+
+ ret = hibmc_hw_map(priv);
+ if (ret)
+ return ret;
+
+ hibmc_hw_config(priv);
+
+ return 0;
+}
+
+static int hibmc_unload(struct drm_device *dev)
+{
+ struct hibmc_drm_private *priv = dev->dev_private;
+
+ hibmc_fbdev_fini(priv);
+
+ if (dev->irq_enabled)
+ drm_irq_uninstall(dev);
+ if (priv->msi_enabled)
+ pci_disable_msi(dev->pdev);
+ drm_vblank_cleanup(dev);
+
+ hibmc_kms_fini(priv);
+ hibmc_mm_fini(priv);
+ dev->dev_private = NULL;
+ return 0;
+}
+
+static int hibmc_load(struct drm_device *dev)
+{
+ struct hibmc_drm_private *priv;
+ int ret;
+
+ priv = devm_kzalloc(dev->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv) {
+ DRM_ERROR("no memory to allocate for hibmc_drm_private\n");
+ return -ENOMEM;
+ }
+ dev->dev_private = priv;
+ priv->dev = dev;
+
+ ret = hibmc_hw_init(priv);
+ if (ret)
+ goto err;
+
+ ret = hibmc_mm_init(priv);
+ if (ret)
+ goto err;
+
+ ret = hibmc_kms_init(priv);
+ if (ret)
+ goto err;
+
+ ret = drm_vblank_init(dev, dev->mode_config.num_crtc);
+ if (ret) {
+ DRM_ERROR("failed to initialize vblank: %d\n", ret);
+ goto err;
+ }
+
+ priv->msi_enabled = 0;
+ ret = pci_enable_msi(dev->pdev);
+ if (ret) {
+ DRM_WARN("enabling MSI failed: %d\n", ret);
+ } else {
+ priv->msi_enabled = 1;
+ ret = drm_irq_install(dev, dev->pdev->irq);
+ if (ret)
+ DRM_WARN("install irq failed: %d\n", ret);
+ }
+
+ /* reset all the states of crtc/plane/encoder/connector */
+ drm_mode_config_reset(dev);
+
+ ret = hibmc_fbdev_init(priv);
+ if (ret) {
+ DRM_ERROR("failed to initialize fbdev: %d\n", ret);
+ goto err;
+ }
+
+ return 0;
+
+err:
+ hibmc_unload(dev);
+ DRM_ERROR("failed to initialize drm driver: %d\n", ret);
+ return ret;
+}
+
+static int hibmc_pci_probe(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ struct drm_device *dev;
+ int ret;
+
+ dev = drm_dev_alloc(&hibmc_driver, &pdev->dev);
+ if (IS_ERR(dev)) {
+ DRM_ERROR("failed to allocate drm_device\n");
+ return PTR_ERR(dev);
+ }
+
+ dev->pdev = pdev;
+ pci_set_drvdata(pdev, dev);
+
+ ret = pci_enable_device(pdev);
+ if (ret) {
+ DRM_ERROR("failed to enable pci device: %d\n", ret);
+ goto err_free;
+ }
+
+ ret = hibmc_load(dev);
+ if (ret) {
+ DRM_ERROR("failed to load hibmc: %d\n", ret);
+ goto err_disable;
+ }
+
+ ret = drm_dev_register(dev, 0);
+ if (ret) {
+ DRM_ERROR("failed to register drv for userspace access: %d\n",
+ ret);
+ goto err_unload;
+ }
+ return 0;
+
+err_unload:
+ hibmc_unload(dev);
+err_disable:
+ pci_disable_device(pdev);
+err_free:
+ drm_dev_unref(dev);
+
+ return ret;
+}
+
+static void hibmc_pci_remove(struct pci_dev *pdev)
+{
+ struct drm_device *dev = pci_get_drvdata(pdev);
+
+ drm_dev_unregister(dev);
+ hibmc_unload(dev);
+ drm_dev_unref(dev);
+}
+
+static struct pci_device_id hibmc_pci_table[] = {
+ {0x19e5, 0x1711, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
+ {0,}
+};
+
+static struct pci_driver hibmc_pci_driver = {
+ .name = "hibmc-drm",
+ .id_table = hibmc_pci_table,
+ .probe = hibmc_pci_probe,
+ .remove = hibmc_pci_remove,
+ .driver.pm = &hibmc_pm_ops,
+};
+
+static int __init hibmc_init(void)
+{
+ return pci_register_driver(&hibmc_pci_driver);
+}
+
+static void __exit hibmc_exit(void)
+{
+ return pci_unregister_driver(&hibmc_pci_driver);
+}
+
+module_init(hibmc_init);
+module_exit(hibmc_exit);
+
+MODULE_DEVICE_TABLE(pci, hibmc_pci_table);
+MODULE_AUTHOR("RongrongZou <zourongrong@huawei.com>");
+MODULE_DESCRIPTION("DRM Driver for Hisilicon Hibmc");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.h b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.h
new file mode 100644
index 000000000000..e195521eb41e
--- /dev/null
+++ b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.h
@@ -0,0 +1,114 @@
+/* Hisilicon Hibmc SoC drm driver
+ *
+ * Based on the bochs drm driver.
+ *
+ * Copyright (c) 2016 Huawei Limited.
+ *
+ * Author:
+ * Rongrong Zou <zourongrong@huawei.com>
+ * Rongrong Zou <zourongrong@gmail.com>
+ * Jianhua Li <lijianhua@huawei.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ */
+
+#ifndef HIBMC_DRM_DRV_H
+#define HIBMC_DRM_DRV_H
+
+#include <drm/drmP.h>
+#include <drm/drm_atomic.h>
+#include <drm/drm_fb_helper.h>
+#include <drm/drm_gem.h>
+#include <drm/ttm/ttm_bo_driver.h>
+
+struct hibmc_framebuffer {
+ struct drm_framebuffer fb;
+ struct drm_gem_object *obj;
+};
+
+struct hibmc_fbdev {
+ struct drm_fb_helper helper;
+ struct hibmc_framebuffer *fb;
+ int size;
+};
+
+struct hibmc_drm_private {
+ /* hw */
+ void __iomem *mmio;
+ void __iomem *fb_map;
+ unsigned long fb_base;
+ unsigned long fb_size;
+ bool msi_enabled;
+
+ /* drm */
+ struct drm_device *dev;
+ bool mode_config_initialized;
+ struct drm_atomic_state *suspend_state;
+
+ /* ttm */
+ struct drm_global_reference mem_global_ref;
+ struct ttm_bo_global_ref bo_global_ref;
+ struct ttm_bo_device bdev;
+ bool initialized;
+
+ /* fbdev */
+ struct hibmc_fbdev *fbdev;
+ bool mm_inited;
+};
+
+#define to_hibmc_framebuffer(x) container_of(x, struct hibmc_framebuffer, fb)
+
+struct hibmc_bo {
+ struct ttm_buffer_object bo;
+ struct ttm_placement placement;
+ struct ttm_bo_kmap_obj kmap;
+ struct drm_gem_object gem;
+ struct ttm_place placements[3];
+ int pin_count;
+};
+
+static inline struct hibmc_bo *hibmc_bo(struct ttm_buffer_object *bo)
+{
+ return container_of(bo, struct hibmc_bo, bo);
+}
+
+static inline struct hibmc_bo *gem_to_hibmc_bo(struct drm_gem_object *gem)
+{
+ return container_of(gem, struct hibmc_bo, gem);
+}
+
+void hibmc_set_power_mode(struct hibmc_drm_private *priv,
+ unsigned int power_mode);
+void hibmc_set_current_gate(struct hibmc_drm_private *priv,
+ unsigned int gate);
+
+int hibmc_de_init(struct hibmc_drm_private *priv);
+int hibmc_vdac_init(struct hibmc_drm_private *priv);
+int hibmc_fbdev_init(struct hibmc_drm_private *priv);
+void hibmc_fbdev_fini(struct hibmc_drm_private *priv);
+
+int hibmc_gem_create(struct drm_device *dev, u32 size, bool iskernel,
+ struct drm_gem_object **obj);
+struct hibmc_framebuffer *
+hibmc_framebuffer_init(struct drm_device *dev,
+ const struct drm_mode_fb_cmd2 *mode_cmd,
+ struct drm_gem_object *obj);
+
+int hibmc_mm_init(struct hibmc_drm_private *hibmc);
+void hibmc_mm_fini(struct hibmc_drm_private *hibmc);
+int hibmc_bo_pin(struct hibmc_bo *bo, u32 pl_flag, u64 *gpu_addr);
+int hibmc_bo_unpin(struct hibmc_bo *bo);
+void hibmc_gem_free_object(struct drm_gem_object *obj);
+int hibmc_dumb_create(struct drm_file *file, struct drm_device *dev,
+ struct drm_mode_create_dumb *args);
+int hibmc_dumb_mmap_offset(struct drm_file *file, struct drm_device *dev,
+ u32 handle, u64 *offset);
+int hibmc_mmap(struct file *filp, struct vm_area_struct *vma);
+
+extern const struct drm_mode_config_funcs hibmc_mode_funcs;
+
+#endif
diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_fbdev.c b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_fbdev.c
new file mode 100644
index 000000000000..9b0696735ba1
--- /dev/null
+++ b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_fbdev.c
@@ -0,0 +1,267 @@
+/* Hisilicon Hibmc SoC drm driver
+ *
+ * Based on the bochs drm driver.
+ *
+ * Copyright (c) 2016 Huawei Limited.
+ *
+ * Author:
+ * Rongrong Zou <zourongrong@huawei.com>
+ * Rongrong Zou <zourongrong@gmail.com>
+ * Jianhua Li <lijianhua@huawei.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ */
+
+#include <drm/drm_crtc.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_fb_helper.h>
+
+#include "hibmc_drm_drv.h"
+
+static int hibmcfb_create_object(
+ struct hibmc_drm_private *priv,
+ const struct drm_mode_fb_cmd2 *mode_cmd,
+ struct drm_gem_object **gobj_p)
+{
+ struct drm_gem_object *gobj;
+ struct drm_device *dev = priv->dev;
+ u32 size;
+ int ret = 0;
+
+ size = mode_cmd->pitches[0] * mode_cmd->height;
+ ret = hibmc_gem_create(dev, size, true, &gobj);
+ if (ret)
+ return ret;
+
+ *gobj_p = gobj;
+ return ret;
+}
+
+static struct fb_ops hibmc_drm_fb_ops = {
+ .owner = THIS_MODULE,
+ .fb_check_var = drm_fb_helper_check_var,
+ .fb_set_par = drm_fb_helper_set_par,
+ .fb_fillrect = drm_fb_helper_sys_fillrect,
+ .fb_copyarea = drm_fb_helper_sys_copyarea,
+ .fb_imageblit = drm_fb_helper_sys_imageblit,
+ .fb_pan_display = drm_fb_helper_pan_display,
+ .fb_blank = drm_fb_helper_blank,
+ .fb_setcmap = drm_fb_helper_setcmap,
+};
+
+static int hibmc_drm_fb_create(struct drm_fb_helper *helper,
+ struct drm_fb_helper_surface_size *sizes)
+{
+ struct hibmc_fbdev *hi_fbdev =
+ container_of(helper, struct hibmc_fbdev, helper);
+ struct hibmc_drm_private *priv = helper->dev->dev_private;
+ struct fb_info *info;
+ struct drm_mode_fb_cmd2 mode_cmd;
+ struct drm_gem_object *gobj = NULL;
+ int ret = 0;
+ int ret1;
+ size_t size;
+ unsigned int bytes_per_pixel;
+ struct hibmc_bo *bo = NULL;
+
+ DRM_DEBUG_DRIVER("surface width(%d), height(%d) and bpp(%d)\n",
+ sizes->surface_width, sizes->surface_height,
+ sizes->surface_bpp);
+ sizes->surface_depth = 32;
+
+ bytes_per_pixel = DIV_ROUND_UP(sizes->surface_bpp, 8);
+
+ mode_cmd.width = sizes->surface_width;
+ mode_cmd.height = sizes->surface_height;
+ mode_cmd.pitches[0] = mode_cmd.width * bytes_per_pixel;
+ mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp,
+ sizes->surface_depth);
+
+ size = PAGE_ALIGN(mode_cmd.pitches[0] * mode_cmd.height);
+
+ ret = hibmcfb_create_object(priv, &mode_cmd, &gobj);
+ if (ret) {
+ DRM_ERROR("failed to create fbcon backing object: %d\n", ret);
+ return -ENOMEM;
+ }
+
+ bo = gem_to_hibmc_bo(gobj);
+
+ ret = ttm_bo_reserve(&bo->bo, true, false, NULL);
+ if (ret) {
+ DRM_ERROR("failed to reserve ttm_bo: %d\n", ret);
+ goto out_unref_gem;
+ }
+
+ ret = hibmc_bo_pin(bo, TTM_PL_FLAG_VRAM, NULL);
+ if (ret) {
+ DRM_ERROR("failed to pin fbcon: %d\n", ret);
+ goto out_unreserve_ttm_bo;
+ }
+
+ ret = ttm_bo_kmap(&bo->bo, 0, bo->bo.num_pages, &bo->kmap);
+ if (ret) {
+ DRM_ERROR("failed to kmap fbcon: %d\n", ret);
+ goto out_unpin_bo;
+ }
+ ttm_bo_unreserve(&bo->bo);
+
+ info = drm_fb_helper_alloc_fbi(helper);
+ if (IS_ERR(info)) {
+ ret = PTR_ERR(info);
+ DRM_ERROR("failed to allocate fbi: %d\n", ret);
+ goto out_release_fbi;
+ }
+
+ info->par = hi_fbdev;
+
+ hi_fbdev->fb = hibmc_framebuffer_init(priv->dev, &mode_cmd, gobj);
+ if (IS_ERR(hi_fbdev->fb)) {
+ ret = PTR_ERR(info);
+ DRM_ERROR("failed to initialize framebuffer: %d\n", ret);
+ goto out_release_fbi;
+ }
+
+ priv->fbdev->size = size;
+ hi_fbdev->helper.fb = &hi_fbdev->fb->fb;
+
+ strcpy(info->fix.id, "hibmcdrmfb");
+
+ info->flags = FBINFO_DEFAULT;
+ info->fbops = &hibmc_drm_fb_ops;
+
+ drm_fb_helper_fill_fix(info, hi_fbdev->fb->fb.pitches[0],
+ hi_fbdev->fb->fb.depth);
+ drm_fb_helper_fill_var(info, &priv->fbdev->helper, sizes->fb_width,
+ sizes->fb_height);
+
+ info->screen_base = bo->kmap.virtual;
+ info->screen_size = size;
+
+ info->fix.smem_start = bo->bo.mem.bus.offset + bo->bo.mem.bus.base;
+ info->fix.smem_len = size;
+ return 0;
+
+out_release_fbi:
+ drm_fb_helper_release_fbi(helper);
+ ret1 = ttm_bo_reserve(&bo->bo, true, false, NULL);
+ if (ret1) {
+ DRM_ERROR("failed to rsv ttm_bo when release fbi: %d\n", ret1);
+ goto out_unref_gem;
+ }
+ ttm_bo_kunmap(&bo->kmap);
+out_unpin_bo:
+ hibmc_bo_unpin(bo);
+out_unreserve_ttm_bo:
+ ttm_bo_unreserve(&bo->bo);
+out_unref_gem:
+ drm_gem_object_unreference_unlocked(gobj);
+
+ return ret;
+}
+
+static void hibmc_fbdev_destroy(struct hibmc_fbdev *fbdev)
+{
+ struct hibmc_framebuffer *gfb = fbdev->fb;
+ struct drm_fb_helper *fbh = &fbdev->helper;
+
+ drm_fb_helper_unregister_fbi(fbh);
+ drm_fb_helper_release_fbi(fbh);
+
+ drm_fb_helper_fini(fbh);
+
+ if (gfb)
+ drm_framebuffer_unreference(&gfb->fb);
+}
+
+static const struct drm_fb_helper_funcs hibmc_fbdev_helper_funcs = {
+ .fb_probe = hibmc_drm_fb_create,
+};
+
+int hibmc_fbdev_init(struct hibmc_drm_private *priv)
+{
+ int ret;
+ struct fb_var_screeninfo *var;
+ struct fb_fix_screeninfo *fix;
+ struct hibmc_fbdev *hifbdev;
+
+ hifbdev = devm_kzalloc(priv->dev->dev, sizeof(*hifbdev), GFP_KERNEL);
+ if (!hifbdev) {
+ DRM_ERROR("failed to allocate hibmc_fbdev\n");
+ return -ENOMEM;
+ }
+
+ priv->fbdev = hifbdev;
+ drm_fb_helper_prepare(priv->dev, &hifbdev->helper,
+ &hibmc_fbdev_helper_funcs);
+
+ /* Now just one crtc and one channel */
+ ret = drm_fb_helper_init(priv->dev,
+ &hifbdev->helper, 1, 1);
+ if (ret) {
+ DRM_ERROR("failed to initialize fb helper: %d\n", ret);
+ return ret;
+ }
+
+ ret = drm_fb_helper_single_add_all_connectors(&hifbdev->helper);
+ if (ret) {
+ DRM_ERROR("failed to add all connectors: %d\n", ret);
+ goto fini;
+ }
+
+ ret = drm_fb_helper_initial_config(&hifbdev->helper, 16);
+ if (ret) {
+ DRM_ERROR("failed to setup initial conn config: %d\n", ret);
+ goto fini;
+ }
+
+ var = &hifbdev->helper.fbdev->var;
+ fix = &hifbdev->helper.fbdev->fix;
+
+ DRM_DEBUG_DRIVER("Member of info->var is :\n"
+ "xres=%d\n"
+ "yres=%d\n"
+ "xres_virtual=%d\n"
+ "yres_virtual=%d\n"
+ "xoffset=%d\n"
+ "yoffset=%d\n"
+ "bits_per_pixel=%d\n"
+ "...\n", var->xres, var->yres, var->xres_virtual,
+ var->yres_virtual, var->xoffset, var->yoffset,
+ var->bits_per_pixel);
+ DRM_DEBUG_DRIVER("Member of info->fix is :\n"
+ "smem_start=%lx\n"
+ "smem_len=%d\n"
+ "type=%d\n"
+ "type_aux=%d\n"
+ "visual=%d\n"
+ "xpanstep=%d\n"
+ "ypanstep=%d\n"
+ "ywrapstep=%d\n"
+ "line_length=%d\n"
+ "accel=%d\n"
+ "capabilities=%d\n"
+ "...\n", fix->smem_start, fix->smem_len, fix->type,
+ fix->type_aux, fix->visual, fix->xpanstep,
+ fix->ypanstep, fix->ywrapstep, fix->line_length,
+ fix->accel, fix->capabilities);
+
+ return 0;
+
+fini:
+ drm_fb_helper_fini(&hifbdev->helper);
+ return ret;
+}
+
+void hibmc_fbdev_fini(struct hibmc_drm_private *priv)
+{
+ if (!priv->fbdev)
+ return;
+
+ hibmc_fbdev_destroy(priv->fbdev);
+ priv->fbdev = NULL;
+}
diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_regs.h b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_regs.h
new file mode 100644
index 000000000000..f7035bf3ec1f
--- /dev/null
+++ b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_regs.h
@@ -0,0 +1,196 @@
+/* Hisilicon Hibmc SoC drm driver
+ *
+ * Based on the bochs drm driver.
+ *
+ * Copyright (c) 2016 Huawei Limited.
+ *
+ * Author:
+ * Rongrong Zou <zourongrong@huawei.com>
+ * Rongrong Zou <zourongrong@gmail.com>
+ * Jianhua Li <lijianhua@huawei.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ */
+
+#ifndef HIBMC_DRM_HW_H
+#define HIBMC_DRM_HW_H
+
+/* register definition */
+#define HIBMC_MISC_CTRL 0x4
+
+#define HIBMC_MSCCTL_LOCALMEM_RESET(x) ((x) << 6)
+#define HIBMC_MSCCTL_LOCALMEM_RESET_MASK 0x40
+
+#define HIBMC_CURRENT_GATE 0x000040
+#define HIBMC_CURR_GATE_DISPLAY(x) ((x) << 2)
+#define HIBMC_CURR_GATE_DISPLAY_MASK 0x4
+
+#define HIBMC_CURR_GATE_LOCALMEM(x) ((x) << 1)
+#define HIBMC_CURR_GATE_LOCALMEM_MASK 0x2
+
+#define HIBMC_MODE0_GATE 0x000044
+#define HIBMC_MODE1_GATE 0x000048
+#define HIBMC_POWER_MODE_CTRL 0x00004C
+
+#define HIBMC_PW_MODE_CTL_OSC_INPUT(x) ((x) << 3)
+#define HIBMC_PW_MODE_CTL_OSC_INPUT_MASK 0x8
+
+#define HIBMC_PW_MODE_CTL_MODE(x) ((x) << 0)
+#define HIBMC_PW_MODE_CTL_MODE_MASK 0x03
+#define HIBMC_PW_MODE_CTL_MODE_SHIFT 0
+
+#define HIBMC_PW_MODE_CTL_MODE_MODE0 0
+#define HIBMC_PW_MODE_CTL_MODE_MODE1 1
+#define HIBMC_PW_MODE_CTL_MODE_SLEEP 2
+
+#define HIBMC_PANEL_PLL_CTRL 0x00005C
+#define HIBMC_CRT_PLL_CTRL 0x000060
+
+#define HIBMC_PLL_CTRL_BYPASS(x) ((x) << 18)
+#define HIBMC_PLL_CTRL_BYPASS_MASK 0x40000
+
+#define HIBMC_PLL_CTRL_POWER(x) ((x) << 17)
+#define HIBMC_PLL_CTRL_POWER_MASK 0x20000
+
+#define HIBMC_PLL_CTRL_INPUT(x) ((x) << 16)
+#define HIBMC_PLL_CTRL_INPUT_MASK 0x10000
+
+#define HIBMC_PLL_CTRL_POD(x) ((x) << 14)
+#define HIBMC_PLL_CTRL_POD_MASK 0xC000
+
+#define HIBMC_PLL_CTRL_OD(x) ((x) << 12)
+#define HIBMC_PLL_CTRL_OD_MASK 0x3000
+
+#define HIBMC_PLL_CTRL_N(x) ((x) << 8)
+#define HIBMC_PLL_CTRL_N_MASK 0xF00
+
+#define HIBMC_PLL_CTRL_M(x) ((x) << 0)
+#define HIBMC_PLL_CTRL_M_MASK 0xFF
+
+#define HIBMC_CRT_DISP_CTL 0x80200
+
+#define HIBMC_CRT_DISP_CTL_CRTSELECT(x) ((x) << 25)
+#define HIBMC_CRT_DISP_CTL_CRTSELECT_MASK 0x2000000
+
+#define HIBMC_CRTSELECT_CRT 1
+
+#define HIBMC_CRT_DISP_CTL_CLOCK_PHASE(x) ((x) << 14)
+#define HIBMC_CRT_DISP_CTL_CLOCK_PHASE_MASK 0x4000
+
+#define HIBMC_CRT_DISP_CTL_VSYNC_PHASE(x) ((x) << 13)
+#define HIBMC_CRT_DISP_CTL_VSYNC_PHASE_MASK 0x2000
+
+#define HIBMC_CRT_DISP_CTL_HSYNC_PHASE(x) ((x) << 12)
+#define HIBMC_CRT_DISP_CTL_HSYNC_PHASE_MASK 0x1000
+
+#define HIBMC_CRT_DISP_CTL_TIMING(x) ((x) << 8)
+#define HIBMC_CRT_DISP_CTL_TIMING_MASK 0x100
+
+#define HIBMC_CRT_DISP_CTL_PLANE(x) ((x) << 2)
+#define HIBMC_CRT_DISP_CTL_PLANE_MASK 4
+
+#define HIBMC_CRT_DISP_CTL_FORMAT(x) ((x) << 0)
+#define HIBMC_CRT_DISP_CTL_FORMAT_MASK 0x03
+
+#define HIBMC_CRT_FB_ADDRESS 0x080204
+
+#define HIBMC_CRT_FB_WIDTH 0x080208
+#define HIBMC_CRT_FB_WIDTH_WIDTH(x) ((x) << 16)
+#define HIBMC_CRT_FB_WIDTH_WIDTH_MASK 0x3FFF0000
+#define HIBMC_CRT_FB_WIDTH_OFFS(x) ((x) << 0)
+#define HIBMC_CRT_FB_WIDTH_OFFS_MASK 0x3FFF
+
+#define HIBMC_CRT_HORZ_TOTAL 0x08020C
+#define HIBMC_CRT_HORZ_TOTAL_TOTAL(x) ((x) << 16)
+#define HIBMC_CRT_HORZ_TOTAL_TOTAL_MASK 0xFFF0000
+
+#define HIBMC_CRT_HORZ_TOTAL_DISP_END(x) ((x) << 0)
+#define HIBMC_CRT_HORZ_TOTAL_DISP_END_MASK 0xFFF
+
+#define HIBMC_CRT_HORZ_SYNC 0x080210
+#define HIBMC_CRT_HORZ_SYNC_WIDTH(x) ((x) << 16)
+#define HIBMC_CRT_HORZ_SYNC_WIDTH_MASK 0xFF0000
+
+#define HIBMC_CRT_HORZ_SYNC_START(x) ((x) << 0)
+#define HIBMC_CRT_HORZ_SYNC_START_MASK 0xFFF
+
+#define HIBMC_CRT_VERT_TOTAL 0x080214
+#define HIBMC_CRT_VERT_TOTAL_TOTAL(x) ((x) << 16)
+#define HIBMC_CRT_VERT_TOTAL_TOTAL_MASK 0x7FFF0000
+
+#define HIBMC_CRT_VERT_TOTAL_DISP_END(x) ((x) << 0)
+#define HIBMC_CRT_VERT_TOTAL_DISP_END_MASK 0x7FF
+
+#define HIBMC_CRT_VERT_SYNC 0x080218
+#define HIBMC_CRT_VERT_SYNC_HEIGHT(x) ((x) << 16)
+#define HIBMC_CRT_VERT_SYNC_HEIGHT_MASK 0x3F0000
+
+#define HIBMC_CRT_VERT_SYNC_START(x) ((x) << 0)
+#define HIBMC_CRT_VERT_SYNC_START_MASK 0x7FF
+
+/* Auto Centering */
+#define HIBMC_CRT_AUTO_CENTERING_TL 0x080280
+#define HIBMC_CRT_AUTO_CENTERING_TL_TOP(x) ((x) << 16)
+#define HIBMC_CRT_AUTO_CENTERING_TL_TOP_MASK 0x7FF0000
+
+#define HIBMC_CRT_AUTO_CENTERING_TL_LEFT(x) ((x) << 0)
+#define HIBMC_CRT_AUTO_CENTERING_TL_LEFT_MASK 0x7FF
+
+#define HIBMC_CRT_AUTO_CENTERING_BR 0x080284
+#define HIBMC_CRT_AUTO_CENTERING_BR_BOTTOM(x) ((x) << 16)
+#define HIBMC_CRT_AUTO_CENTERING_BR_BOTTOM_MASK 0x7FF0000
+
+#define HIBMC_CRT_AUTO_CENTERING_BR_RIGHT(x) ((x) << 0)
+#define HIBMC_CRT_AUTO_CENTERING_BR_RIGHT_MASK 0x7FF
+
+/* register to control panel output */
+#define HIBMC_DISPLAY_CONTROL_HISILE 0x80288
+#define HIBMC_DISPLAY_CONTROL_FPVDDEN(x) ((x) << 0)
+#define HIBMC_DISPLAY_CONTROL_PANELDATE(x) ((x) << 1)
+#define HIBMC_DISPLAY_CONTROL_FPEN(x) ((x) << 2)
+#define HIBMC_DISPLAY_CONTROL_VBIASEN(x) ((x) << 3)
+
+#define HIBMC_RAW_INTERRUPT 0x80290
+#define HIBMC_RAW_INTERRUPT_VBLANK(x) ((x) << 2)
+#define HIBMC_RAW_INTERRUPT_VBLANK_MASK 0x4
+
+#define HIBMC_RAW_INTERRUPT_EN 0x80298
+#define HIBMC_RAW_INTERRUPT_EN_VBLANK(x) ((x) << 2)
+#define HIBMC_RAW_INTERRUPT_EN_VBLANK_MASK 0x4
+
+/* register and values for PLL control */
+#define CRT_PLL1_HS 0x802a8
+#define CRT_PLL1_HS_OUTER_BYPASS(x) ((x) << 30)
+#define CRT_PLL1_HS_INTER_BYPASS(x) ((x) << 29)
+#define CRT_PLL1_HS_POWERON(x) ((x) << 24)
+
+#define CRT_PLL1_HS_25MHZ 0x23d40f02
+#define CRT_PLL1_HS_40MHZ 0x23940801
+#define CRT_PLL1_HS_65MHZ 0x23940d01
+#define CRT_PLL1_HS_78MHZ 0x23540F82
+#define CRT_PLL1_HS_74MHZ 0x23941dc2
+#define CRT_PLL1_HS_80MHZ 0x23941001
+#define CRT_PLL1_HS_80MHZ_1152 0x23540fc2
+#define CRT_PLL1_HS_108MHZ 0x23b41b01
+#define CRT_PLL1_HS_162MHZ 0x23480681
+#define CRT_PLL1_HS_148MHZ 0x23541dc2
+#define CRT_PLL1_HS_193MHZ 0x234807c1
+
+#define CRT_PLL2_HS 0x802ac
+#define CRT_PLL2_HS_25MHZ 0x206B851E
+#define CRT_PLL2_HS_40MHZ 0x30000000
+#define CRT_PLL2_HS_65MHZ 0x40000000
+#define CRT_PLL2_HS_78MHZ 0x50E147AE
+#define CRT_PLL2_HS_74MHZ 0x602B6AE7
+#define CRT_PLL2_HS_80MHZ 0x70000000
+#define CRT_PLL2_HS_108MHZ 0x80000000
+#define CRT_PLL2_HS_162MHZ 0xA0000000
+#define CRT_PLL2_HS_148MHZ 0xB0CCCCCD
+#define CRT_PLL2_HS_193MHZ 0xC0872B02
+
+#define HIBMC_FIELD(field, value) (field(value) & field##_MASK)
+#endif
diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_vdac.c b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_vdac.c
new file mode 100644
index 000000000000..d1f67a9d4d86
--- /dev/null
+++ b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_vdac.c
@@ -0,0 +1,147 @@
+/* Hisilicon Hibmc SoC drm driver
+ *
+ * Based on the bochs drm driver.
+ *
+ * Copyright (c) 2016 Huawei Limited.
+ *
+ * Author:
+ * Rongrong Zou <zourongrong@huawei.com>
+ * Rongrong Zou <zourongrong@gmail.com>
+ * Jianhua Li <lijianhua@huawei.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ */
+
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_crtc_helper.h>
+
+#include "hibmc_drm_drv.h"
+#include "hibmc_drm_regs.h"
+
+static int hibmc_connector_get_modes(struct drm_connector *connector)
+{
+ return drm_add_modes_noedid(connector, 800, 600);
+}
+
+static int hibmc_connector_mode_valid(struct drm_connector *connector,
+ struct drm_display_mode *mode)
+{
+ return MODE_OK;
+}
+
+static struct drm_encoder *
+hibmc_connector_best_encoder(struct drm_connector *connector)
+{
+ return drm_encoder_find(connector->dev, connector->encoder_ids[0]);
+}
+
+static enum drm_connector_status hibmc_connector_detect(struct drm_connector
+ *connector, bool force)
+{
+ return connector_status_connected;
+}
+
+static const struct drm_connector_helper_funcs
+ hibmc_connector_helper_funcs = {
+ .get_modes = hibmc_connector_get_modes,
+ .mode_valid = hibmc_connector_mode_valid,
+ .best_encoder = hibmc_connector_best_encoder,
+};
+
+static const struct drm_connector_funcs hibmc_connector_funcs = {
+ .dpms = drm_atomic_helper_connector_dpms,
+ .detect = hibmc_connector_detect,
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .destroy = drm_connector_cleanup,
+ .reset = drm_atomic_helper_connector_reset,
+ .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
+};
+
+static struct drm_connector *
+hibmc_connector_init(struct hibmc_drm_private *priv)
+{
+ struct drm_device *dev = priv->dev;
+ struct drm_connector *connector;
+ int ret;
+
+ connector = devm_kzalloc(dev->dev, sizeof(*connector), GFP_KERNEL);
+ if (!connector) {
+ DRM_ERROR("failed to alloc memory when init connector\n");
+ return ERR_PTR(-ENOMEM);
+ }
+
+ ret = drm_connector_init(dev, connector,
+ &hibmc_connector_funcs,
+ DRM_MODE_CONNECTOR_VGA);
+ if (ret) {
+ DRM_ERROR("failed to init connector: %d\n", ret);
+ return ERR_PTR(ret);
+ }
+ drm_connector_helper_add(connector,
+ &hibmc_connector_helper_funcs);
+
+ return connector;
+}
+
+static void hibmc_encoder_mode_set(struct drm_encoder *encoder,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adj_mode)
+{
+ u32 reg;
+ struct drm_device *dev = encoder->dev;
+ struct hibmc_drm_private *priv = dev->dev_private;
+
+ reg = readl(priv->mmio + HIBMC_DISPLAY_CONTROL_HISILE);
+ reg |= HIBMC_DISPLAY_CONTROL_FPVDDEN(1);
+ reg |= HIBMC_DISPLAY_CONTROL_PANELDATE(1);
+ reg |= HIBMC_DISPLAY_CONTROL_FPEN(1);
+ reg |= HIBMC_DISPLAY_CONTROL_VBIASEN(1);
+ writel(reg, priv->mmio + HIBMC_DISPLAY_CONTROL_HISILE);
+}
+
+static const struct drm_encoder_helper_funcs hibmc_encoder_helper_funcs = {
+ .mode_set = hibmc_encoder_mode_set,
+};
+
+static const struct drm_encoder_funcs hibmc_encoder_funcs = {
+ .destroy = drm_encoder_cleanup,
+};
+
+int hibmc_vdac_init(struct hibmc_drm_private *priv)
+{
+ struct drm_device *dev = priv->dev;
+ struct drm_encoder *encoder;
+ struct drm_connector *connector;
+ int ret;
+
+ connector = hibmc_connector_init(priv);
+ if (IS_ERR(connector)) {
+ DRM_ERROR("failed to create connector: %ld\n",
+ PTR_ERR(connector));
+ return PTR_ERR(connector);
+ }
+
+ encoder = devm_kzalloc(dev->dev, sizeof(*encoder), GFP_KERNEL);
+ if (!encoder) {
+ DRM_ERROR("failed to alloc memory when init encoder\n");
+ return -ENOMEM;
+ }
+
+ encoder->possible_crtcs = 0x1;
+ ret = drm_encoder_init(dev, encoder, &hibmc_encoder_funcs,
+ DRM_MODE_ENCODER_DAC, NULL);
+ if (ret) {
+ DRM_ERROR("failed to init encoder: %d\n", ret);
+ return ret;
+ }
+
+ drm_encoder_helper_add(encoder, &hibmc_encoder_helper_funcs);
+ drm_mode_connector_attach_encoder(connector, encoder);
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_ttm.c b/drivers/gpu/drm/hisilicon/hibmc/hibmc_ttm.c
new file mode 100644
index 000000000000..e76abf61edae
--- /dev/null
+++ b/drivers/gpu/drm/hisilicon/hibmc/hibmc_ttm.c
@@ -0,0 +1,558 @@
+/* Hisilicon Hibmc SoC drm driver
+ *
+ * Based on the bochs drm driver.
+ *
+ * Copyright (c) 2016 Huawei Limited.
+ *
+ * Author:
+ * Rongrong Zou <zourongrong@huawei.com>
+ * Rongrong Zou <zourongrong@gmail.com>
+ * Jianhua Li <lijianhua@huawei.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ */
+
+#include <drm/drm_atomic_helper.h>
+#include <ttm/ttm_page_alloc.h>
+
+#include "hibmc_drm_drv.h"
+
+#define DRM_FILE_PAGE_OFFSET (0x100000000ULL >> PAGE_SHIFT)
+
+static inline struct hibmc_drm_private *
+hibmc_bdev(struct ttm_bo_device *bd)
+{
+ return container_of(bd, struct hibmc_drm_private, bdev);
+}
+
+static int
+hibmc_ttm_mem_global_init(struct drm_global_reference *ref)
+{
+ return ttm_mem_global_init(ref->object);
+}
+
+static void
+hibmc_ttm_mem_global_release(struct drm_global_reference *ref)
+{
+ ttm_mem_global_release(ref->object);
+}
+
+static int hibmc_ttm_global_init(struct hibmc_drm_private *hibmc)
+{
+ int ret;
+
+ hibmc->mem_global_ref.global_type = DRM_GLOBAL_TTM_MEM;
+ hibmc->mem_global_ref.size = sizeof(struct ttm_mem_global);
+ hibmc->mem_global_ref.init = &hibmc_ttm_mem_global_init;
+ hibmc->mem_global_ref.release = &hibmc_ttm_mem_global_release;
+ ret = drm_global_item_ref(&hibmc->mem_global_ref);
+ if (ret) {
+ DRM_ERROR("could not get ref on ttm global: %d\n", ret);
+ return ret;
+ }
+
+ hibmc->bo_global_ref.mem_glob =
+ hibmc->mem_global_ref.object;
+ hibmc->bo_global_ref.ref.global_type = DRM_GLOBAL_TTM_BO;
+ hibmc->bo_global_ref.ref.size = sizeof(struct ttm_bo_global);
+ hibmc->bo_global_ref.ref.init = &ttm_bo_global_init;
+ hibmc->bo_global_ref.ref.release = &ttm_bo_global_release;
+ ret = drm_global_item_ref(&hibmc->bo_global_ref.ref);
+ if (ret) {
+ DRM_ERROR("failed setting up TTM BO subsystem: %d\n", ret);
+ drm_global_item_unref(&hibmc->mem_global_ref);
+ return ret;
+ }
+ return 0;
+}
+
+static void
+hibmc_ttm_global_release(struct hibmc_drm_private *hibmc)
+{
+ drm_global_item_unref(&hibmc->bo_global_ref.ref);
+ drm_global_item_unref(&hibmc->mem_global_ref);
+ hibmc->mem_global_ref.release = NULL;
+}
+
+static void hibmc_bo_ttm_destroy(struct ttm_buffer_object *tbo)
+{
+ struct hibmc_bo *bo = container_of(tbo, struct hibmc_bo, bo);
+
+ drm_gem_object_release(&bo->gem);
+ kfree(bo);
+}
+
+static bool hibmc_ttm_bo_is_hibmc_bo(struct ttm_buffer_object *bo)
+{
+ return bo->destroy == &hibmc_bo_ttm_destroy;
+}
+
+static int
+hibmc_bo_init_mem_type(struct ttm_bo_device *bdev, u32 type,
+ struct ttm_mem_type_manager *man)
+{
+ switch (type) {
+ case TTM_PL_SYSTEM:
+ man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
+ man->available_caching = TTM_PL_MASK_CACHING;
+ man->default_caching = TTM_PL_FLAG_CACHED;
+ break;
+ case TTM_PL_VRAM:
+ man->func = &ttm_bo_manager_func;
+ man->flags = TTM_MEMTYPE_FLAG_FIXED |
+ TTM_MEMTYPE_FLAG_MAPPABLE;
+ man->available_caching = TTM_PL_FLAG_UNCACHED |
+ TTM_PL_FLAG_WC;
+ man->default_caching = TTM_PL_FLAG_WC;
+ break;
+ default:
+ DRM_ERROR("unsupported memory type %u\n", type);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+void hibmc_ttm_placement(struct hibmc_bo *bo, int domain)
+{
+ u32 count = 0;
+ u32 i;
+
+ bo->placement.placement = bo->placements;
+ bo->placement.busy_placement = bo->placements;
+ if (domain & TTM_PL_FLAG_VRAM)
+ bo->placements[count++].flags = TTM_PL_FLAG_WC |
+ TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_VRAM;
+ if (domain & TTM_PL_FLAG_SYSTEM)
+ bo->placements[count++].flags = TTM_PL_MASK_CACHING |
+ TTM_PL_FLAG_SYSTEM;
+ if (!count)
+ bo->placements[count++].flags = TTM_PL_MASK_CACHING |
+ TTM_PL_FLAG_SYSTEM;
+
+ bo->placement.num_placement = count;
+ bo->placement.num_busy_placement = count;
+ for (i = 0; i < count; i++) {
+ bo->placements[i].fpfn = 0;
+ bo->placements[i].lpfn = 0;
+ }
+}
+
+static void
+hibmc_bo_evict_flags(struct ttm_buffer_object *bo, struct ttm_placement *pl)
+{
+ struct hibmc_bo *hibmcbo = hibmc_bo(bo);
+
+ if (!hibmc_ttm_bo_is_hibmc_bo(bo))
+ return;
+
+ hibmc_ttm_placement(hibmcbo, TTM_PL_FLAG_SYSTEM);
+ *pl = hibmcbo->placement;
+}
+
+static int hibmc_bo_verify_access(struct ttm_buffer_object *bo,
+ struct file *filp)
+{
+ struct hibmc_bo *hibmcbo = hibmc_bo(bo);
+
+ return drm_vma_node_verify_access(&hibmcbo->gem.vma_node,
+ filp->private_data);
+}
+
+static int hibmc_ttm_io_mem_reserve(struct ttm_bo_device *bdev,
+ struct ttm_mem_reg *mem)
+{
+ struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
+ struct hibmc_drm_private *hibmc = hibmc_bdev(bdev);
+
+ mem->bus.addr = NULL;
+ mem->bus.offset = 0;
+ mem->bus.size = mem->num_pages << PAGE_SHIFT;
+ mem->bus.base = 0;
+ mem->bus.is_iomem = false;
+ if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE))
+ return -EINVAL;
+ switch (mem->mem_type) {
+ case TTM_PL_SYSTEM:
+ /* system memory */
+ return 0;
+ case TTM_PL_VRAM:
+ mem->bus.offset = mem->start << PAGE_SHIFT;
+ mem->bus.base = pci_resource_start(hibmc->dev->pdev, 0);
+ mem->bus.is_iomem = true;
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static void hibmc_ttm_backend_destroy(struct ttm_tt *tt)
+{
+ ttm_tt_fini(tt);
+ kfree(tt);
+}
+
+static struct ttm_backend_func hibmc_tt_backend_func = {
+ .destroy = &hibmc_ttm_backend_destroy,
+};
+
+static struct ttm_tt *hibmc_ttm_tt_create(struct ttm_bo_device *bdev,
+ unsigned long size,
+ u32 page_flags,
+ struct page *dummy_read_page)
+{
+ struct ttm_tt *tt;
+ int ret;
+
+ tt = kzalloc(sizeof(*tt), GFP_KERNEL);
+ if (!tt) {
+ DRM_ERROR("failed to allocate ttm_tt\n");
+ return NULL;
+ }
+ tt->func = &hibmc_tt_backend_func;
+ ret = ttm_tt_init(tt, bdev, size, page_flags, dummy_read_page);
+ if (ret) {
+ DRM_ERROR("failed to initialize ttm_tt: %d\n", ret);
+ kfree(tt);
+ return NULL;
+ }
+ return tt;
+}
+
+static int hibmc_ttm_tt_populate(struct ttm_tt *ttm)
+{
+ return ttm_pool_populate(ttm);
+}
+
+static void hibmc_ttm_tt_unpopulate(struct ttm_tt *ttm)
+{
+ ttm_pool_unpopulate(ttm);
+}
+
+struct ttm_bo_driver hibmc_bo_driver = {
+ .ttm_tt_create = hibmc_ttm_tt_create,
+ .ttm_tt_populate = hibmc_ttm_tt_populate,
+ .ttm_tt_unpopulate = hibmc_ttm_tt_unpopulate,
+ .init_mem_type = hibmc_bo_init_mem_type,
+ .evict_flags = hibmc_bo_evict_flags,
+ .move = NULL,
+ .verify_access = hibmc_bo_verify_access,
+ .io_mem_reserve = &hibmc_ttm_io_mem_reserve,
+ .io_mem_free = NULL,
+ .lru_tail = &ttm_bo_default_lru_tail,
+ .swap_lru_tail = &ttm_bo_default_swap_lru_tail,
+};
+
+int hibmc_mm_init(struct hibmc_drm_private *hibmc)
+{
+ int ret;
+ struct drm_device *dev = hibmc->dev;
+ struct ttm_bo_device *bdev = &hibmc->bdev;
+
+ ret = hibmc_ttm_global_init(hibmc);
+ if (ret)
+ return ret;
+
+ ret = ttm_bo_device_init(&hibmc->bdev,
+ hibmc->bo_global_ref.ref.object,
+ &hibmc_bo_driver,
+ dev->anon_inode->i_mapping,
+ DRM_FILE_PAGE_OFFSET,
+ true);
+ if (ret) {
+ hibmc_ttm_global_release(hibmc);
+ DRM_ERROR("error initializing bo driver: %d\n", ret);
+ return ret;
+ }
+
+ ret = ttm_bo_init_mm(bdev, TTM_PL_VRAM,
+ hibmc->fb_size >> PAGE_SHIFT);
+ if (ret) {
+ hibmc_ttm_global_release(hibmc);
+ DRM_ERROR("failed ttm VRAM init: %d\n", ret);
+ return ret;
+ }
+
+ hibmc->mm_inited = true;
+ return 0;
+}
+
+void hibmc_mm_fini(struct hibmc_drm_private *hibmc)
+{
+ if (!hibmc->mm_inited)
+ return;
+
+ ttm_bo_device_release(&hibmc->bdev);
+ hibmc_ttm_global_release(hibmc);
+ hibmc->mm_inited = false;
+}
+
+static void hibmc_bo_unref(struct hibmc_bo **bo)
+{
+ struct ttm_buffer_object *tbo;
+
+ if ((*bo) == NULL)
+ return;
+
+ tbo = &((*bo)->bo);
+ ttm_bo_unref(&tbo);
+ *bo = NULL;
+}
+
+int hibmc_bo_create(struct drm_device *dev, int size, int align,
+ u32 flags, struct hibmc_bo **phibmcbo)
+{
+ struct hibmc_drm_private *hibmc = dev->dev_private;
+ struct hibmc_bo *hibmcbo;
+ size_t acc_size;
+ int ret;
+
+ hibmcbo = kzalloc(sizeof(*hibmcbo), GFP_KERNEL);
+ if (!hibmcbo) {
+ DRM_ERROR("failed to allocate hibmcbo\n");
+ return -ENOMEM;
+ }
+ ret = drm_gem_object_init(dev, &hibmcbo->gem, size);
+ if (ret) {
+ DRM_ERROR("failed to initialize drm gem object: %d\n", ret);
+ kfree(hibmcbo);
+ return ret;
+ }
+
+ hibmcbo->bo.bdev = &hibmc->bdev;
+
+ hibmc_ttm_placement(hibmcbo, TTM_PL_FLAG_VRAM | TTM_PL_FLAG_SYSTEM);
+
+ acc_size = ttm_bo_dma_acc_size(&hibmc->bdev, size,
+ sizeof(struct hibmc_bo));
+
+ ret = ttm_bo_init(&hibmc->bdev, &hibmcbo->bo, size,
+ ttm_bo_type_device, &hibmcbo->placement,
+ align >> PAGE_SHIFT, false, NULL, acc_size,
+ NULL, NULL, hibmc_bo_ttm_destroy);
+ if (ret) {
+ hibmc_bo_unref(&hibmcbo);
+ DRM_ERROR("failed to initialize ttm_bo: %d\n", ret);
+ return ret;
+ }
+
+ *phibmcbo = hibmcbo;
+ return 0;
+}
+
+int hibmc_bo_pin(struct hibmc_bo *bo, u32 pl_flag, u64 *gpu_addr)
+{
+ int i, ret;
+
+ if (bo->pin_count) {
+ bo->pin_count++;
+ if (gpu_addr)
+ *gpu_addr = bo->bo.offset;
+ return 0;
+ }
+
+ hibmc_ttm_placement(bo, pl_flag);
+ for (i = 0; i < bo->placement.num_placement; i++)
+ bo->placements[i].flags |= TTM_PL_FLAG_NO_EVICT;
+ ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false);
+ if (ret)
+ return ret;
+
+ bo->pin_count = 1;
+ if (gpu_addr)
+ *gpu_addr = bo->bo.offset;
+ return 0;
+}
+
+int hibmc_bo_unpin(struct hibmc_bo *bo)
+{
+ int i, ret;
+
+ if (!bo->pin_count) {
+ DRM_ERROR("unpin bad %p\n", bo);
+ return 0;
+ }
+ bo->pin_count--;
+ if (bo->pin_count)
+ return 0;
+
+ for (i = 0; i < bo->placement.num_placement ; i++)
+ bo->placements[i].flags &= ~TTM_PL_FLAG_NO_EVICT;
+ ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false);
+ if (ret) {
+ DRM_ERROR("validate failed for unpin: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+int hibmc_mmap(struct file *filp, struct vm_area_struct *vma)
+{
+ struct drm_file *file_priv;
+ struct hibmc_drm_private *hibmc;
+
+ if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET))
+ return -EINVAL;
+
+ file_priv = filp->private_data;
+ hibmc = file_priv->minor->dev->dev_private;
+ return ttm_bo_mmap(filp, vma, &hibmc->bdev);
+}
+
+int hibmc_gem_create(struct drm_device *dev, u32 size, bool iskernel,
+ struct drm_gem_object **obj)
+{
+ struct hibmc_bo *hibmcbo;
+ int ret;
+
+ *obj = NULL;
+
+ size = PAGE_ALIGN(size);
+ if (size == 0) {
+ DRM_ERROR("error: zero size\n");
+ return -EINVAL;
+ }
+
+ ret = hibmc_bo_create(dev, size, 0, 0, &hibmcbo);
+ if (ret) {
+ if (ret != -ERESTARTSYS)
+ DRM_ERROR("failed to allocate GEM object: %d\n", ret);
+ return ret;
+ }
+ *obj = &hibmcbo->gem;
+ return 0;
+}
+
+int hibmc_dumb_create(struct drm_file *file, struct drm_device *dev,
+ struct drm_mode_create_dumb *args)
+{
+ struct drm_gem_object *gobj;
+ u32 handle;
+ int ret;
+
+ args->pitch = ALIGN(args->width * DIV_ROUND_UP(args->bpp, 8), 16);
+ args->size = args->pitch * args->height;
+
+ ret = hibmc_gem_create(dev, args->size, false,
+ &gobj);
+ if (ret) {
+ DRM_ERROR("failed to create GEM object: %d\n", ret);
+ return ret;
+ }
+
+ ret = drm_gem_handle_create(file, gobj, &handle);
+ drm_gem_object_unreference_unlocked(gobj);
+ if (ret) {
+ DRM_ERROR("failed to unreference GEM object: %d\n", ret);
+ return ret;
+ }
+
+ args->handle = handle;
+ return 0;
+}
+
+void hibmc_gem_free_object(struct drm_gem_object *obj)
+{
+ struct hibmc_bo *hibmcbo = gem_to_hibmc_bo(obj);
+
+ hibmc_bo_unref(&hibmcbo);
+}
+
+static u64 hibmc_bo_mmap_offset(struct hibmc_bo *bo)
+{
+ return drm_vma_node_offset_addr(&bo->bo.vma_node);
+}
+
+int hibmc_dumb_mmap_offset(struct drm_file *file, struct drm_device *dev,
+ u32 handle, u64 *offset)
+{
+ struct drm_gem_object *obj;
+ struct hibmc_bo *bo;
+
+ obj = drm_gem_object_lookup(file, handle);
+ if (!obj)
+ return -ENOENT;
+
+ bo = gem_to_hibmc_bo(obj);
+ *offset = hibmc_bo_mmap_offset(bo);
+
+ drm_gem_object_unreference_unlocked(obj);
+ return 0;
+}
+
+static void hibmc_user_framebuffer_destroy(struct drm_framebuffer *fb)
+{
+ struct hibmc_framebuffer *hibmc_fb = to_hibmc_framebuffer(fb);
+
+ drm_gem_object_unreference_unlocked(hibmc_fb->obj);
+ drm_framebuffer_cleanup(fb);
+ kfree(hibmc_fb);
+}
+
+static const struct drm_framebuffer_funcs hibmc_fb_funcs = {
+ .destroy = hibmc_user_framebuffer_destroy,
+};
+
+struct hibmc_framebuffer *
+hibmc_framebuffer_init(struct drm_device *dev,
+ const struct drm_mode_fb_cmd2 *mode_cmd,
+ struct drm_gem_object *obj)
+{
+ struct hibmc_framebuffer *hibmc_fb;
+ int ret;
+
+ hibmc_fb = kzalloc(sizeof(*hibmc_fb), GFP_KERNEL);
+ if (!hibmc_fb) {
+ DRM_ERROR("failed to allocate hibmc_fb\n");
+ return ERR_PTR(-ENOMEM);
+ }
+
+ drm_helper_mode_fill_fb_struct(&hibmc_fb->fb, mode_cmd);
+ hibmc_fb->obj = obj;
+ ret = drm_framebuffer_init(dev, &hibmc_fb->fb, &hibmc_fb_funcs);
+ if (ret) {
+ DRM_ERROR("drm_framebuffer_init failed: %d\n", ret);
+ kfree(hibmc_fb);
+ return ERR_PTR(ret);
+ }
+
+ return hibmc_fb;
+}
+
+static struct drm_framebuffer *
+hibmc_user_framebuffer_create(struct drm_device *dev,
+ struct drm_file *filp,
+ const struct drm_mode_fb_cmd2 *mode_cmd)
+{
+ struct drm_gem_object *obj;
+ struct hibmc_framebuffer *hibmc_fb;
+
+ DRM_DEBUG_DRIVER("%dx%d, format %c%c%c%c\n",
+ mode_cmd->width, mode_cmd->height,
+ (mode_cmd->pixel_format) & 0xff,
+ (mode_cmd->pixel_format >> 8) & 0xff,
+ (mode_cmd->pixel_format >> 16) & 0xff,
+ (mode_cmd->pixel_format >> 24) & 0xff);
+
+ obj = drm_gem_object_lookup(filp, mode_cmd->handles[0]);
+ if (!obj)
+ return ERR_PTR(-ENOENT);
+
+ hibmc_fb = hibmc_framebuffer_init(dev, mode_cmd, obj);
+ if (IS_ERR(hibmc_fb)) {
+ drm_gem_object_unreference_unlocked(obj);
+ return ERR_PTR((long)hibmc_fb);
+ }
+ return &hibmc_fb->fb;
+}
+
+const struct drm_mode_config_funcs hibmc_mode_funcs = {
+ .atomic_check = drm_atomic_helper_check,
+ .atomic_commit = drm_atomic_helper_commit,
+ .fb_create = hibmc_user_framebuffer_create,
+};
diff --git a/drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c b/drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c
index 7e7a4d43d6b6..afc2b5d2d5f0 100644
--- a/drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c
+++ b/drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c
@@ -608,17 +608,16 @@ static void ade_rdma_set(void __iomem *base, struct drm_framebuffer *fb,
u32 ch, u32 y, u32 in_h, u32 fmt)
{
struct drm_gem_cma_object *obj = drm_fb_cma_get_gem_obj(fb, 0);
- char *format_name;
+ struct drm_format_name_buf format_name;
u32 reg_ctrl, reg_addr, reg_size, reg_stride, reg_space, reg_en;
u32 stride = fb->pitches[0];
u32 addr = (u32)obj->paddr + y * stride;
DRM_DEBUG_DRIVER("rdma%d: (y=%d, height=%d), stride=%d, paddr=0x%x\n",
ch + 1, y, in_h, stride, (u32)obj->paddr);
- format_name = drm_get_format_name(fb->pixel_format);
DRM_DEBUG_DRIVER("addr=0x%x, fb:%dx%d, pixel_format=%d(%s)\n",
- addr, fb->width, fb->height, fmt, format_name);
- kfree(format_name);
+ addr, fb->width, fb->height, fmt,
+ drm_get_format_name(fb->pixel_format, &format_name));
/* get reg offset */
reg_ctrl = RD_CH_CTRL(ch);
diff --git a/drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.c b/drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.c
index 90377a609c98..ebd5f4fe4c23 100644
--- a/drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.c
+++ b/drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.c
@@ -24,6 +24,7 @@
#include <drm/drm_fb_cma_helper.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc_helper.h>
+#include <drm/drm_of.h>
#include "kirin_drm_drv.h"
@@ -151,9 +152,7 @@ static const struct file_operations kirin_drm_fops = {
.open = drm_open,
.release = drm_release,
.unlocked_ioctl = drm_ioctl,
-#ifdef CONFIG_COMPAT
.compat_ioctl = drm_compat_ioctl,
-#endif
.poll = drm_poll,
.read = drm_read,
.llseek = no_llseek,
@@ -260,14 +259,13 @@ static struct device_node *kirin_get_remote_node(struct device_node *np)
DRM_ERROR("no valid endpoint node\n");
return ERR_PTR(-ENODEV);
}
- of_node_put(endpoint);
remote = of_graph_get_remote_port_parent(endpoint);
+ of_node_put(endpoint);
if (!remote) {
DRM_ERROR("no valid remote node\n");
return ERR_PTR(-ENODEV);
}
- of_node_put(remote);
if (!of_device_is_available(remote)) {
DRM_ERROR("not available for remote node\n");
@@ -294,7 +292,8 @@ static int kirin_drm_platform_probe(struct platform_device *pdev)
if (IS_ERR(remote))
return PTR_ERR(remote);
- component_match_add(dev, &match, compare_of, remote);
+ drm_of_component_match_add(dev, &match, compare_of, remote);
+ of_node_put(remote);
return component_master_add_with_match(dev, &kirin_drm_ops, match);
diff --git a/drivers/gpu/drm/i2c/tda998x_drv.c b/drivers/gpu/drm/i2c/tda998x_drv.c
index 9798d400d817..86f47e190309 100644
--- a/drivers/gpu/drm/i2c/tda998x_drv.c
+++ b/drivers/gpu/drm/i2c/tda998x_drv.c
@@ -41,12 +41,15 @@ struct tda998x_priv {
struct i2c_client *hdmi;
struct mutex mutex;
u16 rev;
+ u8 cec_addr;
u8 current_page;
- int dpms;
- bool is_hdmi_sink;
+ bool is_on;
+ bool supports_infoframes;
+ bool sink_has_audio;
u8 vip_cntrl_0;
u8 vip_cntrl_1;
u8 vip_cntrl_2;
+ unsigned long tmds_clock;
struct tda998x_audio_params audio_params;
struct platform_device *audio_pdev;
@@ -105,6 +108,8 @@ struct tda998x_priv {
# define I2C_MASTER_DIS_FILT (1 << 1)
# define I2C_MASTER_APP_STRT_LAT (1 << 2)
#define REG_FEAT_POWERDOWN REG(0x00, 0x0e) /* read/write */
+# define FEAT_POWERDOWN_PREFILT BIT(0)
+# define FEAT_POWERDOWN_CSC BIT(1)
# define FEAT_POWERDOWN_SPDIF (1 << 3)
#define REG_INT_FLAGS_0 REG(0x00, 0x0f) /* read/write */
#define REG_INT_FLAGS_1 REG(0x00, 0x10) /* read/write */
@@ -370,35 +375,46 @@ struct tda998x_priv {
static void
cec_write(struct tda998x_priv *priv, u16 addr, u8 val)
{
- struct i2c_client *client = priv->cec;
u8 buf[] = {addr, val};
+ struct i2c_msg msg = {
+ .addr = priv->cec_addr,
+ .len = 2,
+ .buf = buf,
+ };
int ret;
- ret = i2c_master_send(client, buf, sizeof(buf));
+ ret = i2c_transfer(priv->hdmi->adapter, &msg, 1);
if (ret < 0)
- dev_err(&client->dev, "Error %d writing to cec:0x%x\n", ret, addr);
+ dev_err(&priv->hdmi->dev, "Error %d writing to cec:0x%x\n",
+ ret, addr);
}
static u8
cec_read(struct tda998x_priv *priv, u8 addr)
{
- struct i2c_client *client = priv->cec;
u8 val;
+ struct i2c_msg msg[2] = {
+ {
+ .addr = priv->cec_addr,
+ .len = 1,
+ .buf = &addr,
+ }, {
+ .addr = priv->cec_addr,
+ .flags = I2C_M_RD,
+ .len = 1,
+ .buf = &val,
+ },
+ };
int ret;
- ret = i2c_master_send(client, &addr, sizeof(addr));
- if (ret < 0)
- goto fail;
-
- ret = i2c_master_recv(client, &val, sizeof(val));
- if (ret < 0)
- goto fail;
+ ret = i2c_transfer(priv->hdmi->adapter, msg, ARRAY_SIZE(msg));
+ if (ret < 0) {
+ dev_err(&priv->hdmi->dev, "Error %d reading from cec:0x%x\n",
+ ret, addr);
+ val = 0;
+ }
return val;
-
-fail:
- dev_err(&client->dev, "Error %d reading from cec:0x%x\n", ret, addr);
- return 0;
}
static int
@@ -579,9 +595,9 @@ tda998x_reset(struct tda998x_priv *priv)
* HPD assertion: it needs a delay of 100ms to avoid timing out while
* trying to read EDID data.
*
- * However, tda998x_encoder_get_modes() may be called at any moment
+ * However, tda998x_connector_get_modes() may be called at any moment
* after tda998x_connector_detect() indicates that we are connected, so
- * we need to delay probing modes in tda998x_encoder_get_modes() after
+ * we need to delay probing modes in tda998x_connector_get_modes() after
* we have seen a HPD inactive->active transition. This code implements
* that delay.
*/
@@ -630,28 +646,30 @@ static irqreturn_t tda998x_irq_thread(int irq, void *data)
bool handled = false;
sta = cec_read(priv, REG_CEC_INTSTATUS);
- cec = cec_read(priv, REG_CEC_RXSHPDINT);
- lvl = cec_read(priv, REG_CEC_RXSHPDLEV);
- flag0 = reg_read(priv, REG_INT_FLAGS_0);
- flag1 = reg_read(priv, REG_INT_FLAGS_1);
- flag2 = reg_read(priv, REG_INT_FLAGS_2);
- DRM_DEBUG_DRIVER(
- "tda irq sta %02x cec %02x lvl %02x f0 %02x f1 %02x f2 %02x\n",
- sta, cec, lvl, flag0, flag1, flag2);
-
- if (cec & CEC_RXSHPDINT_HPD) {
- if (lvl & CEC_RXSHPDLEV_HPD)
- tda998x_edid_delay_start(priv);
- else
- schedule_work(&priv->detect_work);
-
- handled = true;
- }
+ if (sta & CEC_INTSTATUS_HDMI) {
+ cec = cec_read(priv, REG_CEC_RXSHPDINT);
+ lvl = cec_read(priv, REG_CEC_RXSHPDLEV);
+ flag0 = reg_read(priv, REG_INT_FLAGS_0);
+ flag1 = reg_read(priv, REG_INT_FLAGS_1);
+ flag2 = reg_read(priv, REG_INT_FLAGS_2);
+ DRM_DEBUG_DRIVER(
+ "tda irq sta %02x cec %02x lvl %02x f0 %02x f1 %02x f2 %02x\n",
+ sta, cec, lvl, flag0, flag1, flag2);
+
+ if (cec & CEC_RXSHPDINT_HPD) {
+ if (lvl & CEC_RXSHPDLEV_HPD)
+ tda998x_edid_delay_start(priv);
+ else
+ schedule_work(&priv->detect_work);
+
+ handled = true;
+ }
- if ((flag2 & INT_FLAGS_2_EDID_BLK_RD) && priv->wq_edid_wait) {
- priv->wq_edid_wait = 0;
- wake_up(&priv->wq_edid);
- handled = true;
+ if ((flag2 & INT_FLAGS_2_EDID_BLK_RD) && priv->wq_edid_wait) {
+ priv->wq_edid_wait = 0;
+ wake_up(&priv->wq_edid);
+ handled = true;
+ }
}
return IRQ_RETVAL(handled);
@@ -700,6 +718,8 @@ tda998x_write_avi(struct tda998x_priv *priv, struct drm_display_mode *mode)
tda998x_write_if(priv, DIP_IF_FLAGS_IF2, REG_IF2_HB0, &frame);
}
+/* Audio support */
+
static void tda998x_audio_mute(struct tda998x_priv *priv, bool on)
{
if (on) {
@@ -713,8 +733,7 @@ static void tda998x_audio_mute(struct tda998x_priv *priv, bool on)
static int
tda998x_configure_audio(struct tda998x_priv *priv,
- struct tda998x_audio_params *params,
- unsigned mode_clock)
+ struct tda998x_audio_params *params)
{
u8 buf[6], clksel_aip, clksel_fs, cts_n, adiv;
u32 n;
@@ -771,7 +790,7 @@ tda998x_configure_audio(struct tda998x_priv *priv,
* assume 100MHz requires larger divider.
*/
adiv = AUDIO_DIV_SERCLK_8;
- if (mode_clock > 100000)
+ if (priv->tmds_clock > 100000)
adiv++; /* AUDIO_DIV_SERCLK_16 */
/* S/PDIF asks for a larger divider */
@@ -819,58 +838,281 @@ tda998x_configure_audio(struct tda998x_priv *priv,
return tda998x_write_aif(priv, &params->cea);
}
-/* DRM encoder functions */
+static int tda998x_audio_hw_params(struct device *dev, void *data,
+ struct hdmi_codec_daifmt *daifmt,
+ struct hdmi_codec_params *params)
+{
+ struct tda998x_priv *priv = dev_get_drvdata(dev);
+ int i, ret;
+ struct tda998x_audio_params audio = {
+ .sample_width = params->sample_width,
+ .sample_rate = params->sample_rate,
+ .cea = params->cea,
+ };
+
+ memcpy(audio.status, params->iec.status,
+ min(sizeof(audio.status), sizeof(params->iec.status)));
-static void tda998x_encoder_set_config(struct tda998x_priv *priv,
- const struct tda998x_encoder_params *p)
+ switch (daifmt->fmt) {
+ case HDMI_I2S:
+ if (daifmt->bit_clk_inv || daifmt->frame_clk_inv ||
+ daifmt->bit_clk_master || daifmt->frame_clk_master) {
+ dev_err(dev, "%s: Bad flags %d %d %d %d\n", __func__,
+ daifmt->bit_clk_inv, daifmt->frame_clk_inv,
+ daifmt->bit_clk_master,
+ daifmt->frame_clk_master);
+ return -EINVAL;
+ }
+ for (i = 0; i < ARRAY_SIZE(priv->audio_port); i++)
+ if (priv->audio_port[i].format == AFMT_I2S)
+ audio.config = priv->audio_port[i].config;
+ audio.format = AFMT_I2S;
+ break;
+ case HDMI_SPDIF:
+ for (i = 0; i < ARRAY_SIZE(priv->audio_port); i++)
+ if (priv->audio_port[i].format == AFMT_SPDIF)
+ audio.config = priv->audio_port[i].config;
+ audio.format = AFMT_SPDIF;
+ break;
+ default:
+ dev_err(dev, "%s: Invalid format %d\n", __func__, daifmt->fmt);
+ return -EINVAL;
+ }
+
+ if (audio.config == 0) {
+ dev_err(dev, "%s: No audio configuration found\n", __func__);
+ return -EINVAL;
+ }
+
+ mutex_lock(&priv->audio_mutex);
+ if (priv->supports_infoframes && priv->sink_has_audio)
+ ret = tda998x_configure_audio(priv, &audio);
+ else
+ ret = 0;
+
+ if (ret == 0)
+ priv->audio_params = audio;
+ mutex_unlock(&priv->audio_mutex);
+
+ return ret;
+}
+
+static void tda998x_audio_shutdown(struct device *dev, void *data)
{
- priv->vip_cntrl_0 = VIP_CNTRL_0_SWAP_A(p->swap_a) |
- (p->mirr_a ? VIP_CNTRL_0_MIRR_A : 0) |
- VIP_CNTRL_0_SWAP_B(p->swap_b) |
- (p->mirr_b ? VIP_CNTRL_0_MIRR_B : 0);
- priv->vip_cntrl_1 = VIP_CNTRL_1_SWAP_C(p->swap_c) |
- (p->mirr_c ? VIP_CNTRL_1_MIRR_C : 0) |
- VIP_CNTRL_1_SWAP_D(p->swap_d) |
- (p->mirr_d ? VIP_CNTRL_1_MIRR_D : 0);
- priv->vip_cntrl_2 = VIP_CNTRL_2_SWAP_E(p->swap_e) |
- (p->mirr_e ? VIP_CNTRL_2_MIRR_E : 0) |
- VIP_CNTRL_2_SWAP_F(p->swap_f) |
- (p->mirr_f ? VIP_CNTRL_2_MIRR_F : 0);
+ struct tda998x_priv *priv = dev_get_drvdata(dev);
- priv->audio_params = p->audio_params;
+ mutex_lock(&priv->audio_mutex);
+
+ reg_write(priv, REG_ENA_AP, 0);
+
+ priv->audio_params.format = AFMT_UNUSED;
+
+ mutex_unlock(&priv->audio_mutex);
}
-static void tda998x_encoder_dpms(struct drm_encoder *encoder, int mode)
+int tda998x_audio_digital_mute(struct device *dev, void *data, bool enable)
{
- struct tda998x_priv *priv = enc_to_tda998x_priv(encoder);
+ struct tda998x_priv *priv = dev_get_drvdata(dev);
- /* we only care about on or off: */
- if (mode != DRM_MODE_DPMS_ON)
- mode = DRM_MODE_DPMS_OFF;
+ mutex_lock(&priv->audio_mutex);
- if (mode == priv->dpms)
- return;
+ tda998x_audio_mute(priv, enable);
- switch (mode) {
- case DRM_MODE_DPMS_ON:
- /* enable video ports, audio will be enabled later */
- reg_write(priv, REG_ENA_VP_0, 0xff);
- reg_write(priv, REG_ENA_VP_1, 0xff);
- reg_write(priv, REG_ENA_VP_2, 0xff);
- /* set muxing after enabling ports: */
- reg_write(priv, REG_VIP_CNTRL_0, priv->vip_cntrl_0);
- reg_write(priv, REG_VIP_CNTRL_1, priv->vip_cntrl_1);
- reg_write(priv, REG_VIP_CNTRL_2, priv->vip_cntrl_2);
- break;
- case DRM_MODE_DPMS_OFF:
- /* disable video ports */
- reg_write(priv, REG_ENA_VP_0, 0x00);
- reg_write(priv, REG_ENA_VP_1, 0x00);
- reg_write(priv, REG_ENA_VP_2, 0x00);
- break;
+ mutex_unlock(&priv->audio_mutex);
+ return 0;
+}
+
+static int tda998x_audio_get_eld(struct device *dev, void *data,
+ uint8_t *buf, size_t len)
+{
+ struct tda998x_priv *priv = dev_get_drvdata(dev);
+
+ mutex_lock(&priv->audio_mutex);
+ memcpy(buf, priv->connector.eld,
+ min(sizeof(priv->connector.eld), len));
+ mutex_unlock(&priv->audio_mutex);
+
+ return 0;
+}
+
+static const struct hdmi_codec_ops audio_codec_ops = {
+ .hw_params = tda998x_audio_hw_params,
+ .audio_shutdown = tda998x_audio_shutdown,
+ .digital_mute = tda998x_audio_digital_mute,
+ .get_eld = tda998x_audio_get_eld,
+};
+
+static int tda998x_audio_codec_init(struct tda998x_priv *priv,
+ struct device *dev)
+{
+ struct hdmi_codec_pdata codec_data = {
+ .ops = &audio_codec_ops,
+ .max_i2s_channels = 2,
+ };
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(priv->audio_port); i++) {
+ if (priv->audio_port[i].format == AFMT_I2S &&
+ priv->audio_port[i].config != 0)
+ codec_data.i2s = 1;
+ if (priv->audio_port[i].format == AFMT_SPDIF &&
+ priv->audio_port[i].config != 0)
+ codec_data.spdif = 1;
}
- priv->dpms = mode;
+ priv->audio_pdev = platform_device_register_data(
+ dev, HDMI_CODEC_DRV_NAME, PLATFORM_DEVID_AUTO,
+ &codec_data, sizeof(codec_data));
+
+ return PTR_ERR_OR_ZERO(priv->audio_pdev);
+}
+
+/* DRM connector functions */
+
+static int tda998x_connector_dpms(struct drm_connector *connector, int mode)
+{
+ if (drm_core_check_feature(connector->dev, DRIVER_ATOMIC))
+ return drm_atomic_helper_connector_dpms(connector, mode);
+ else
+ return drm_helper_connector_dpms(connector, mode);
+}
+
+static int tda998x_connector_fill_modes(struct drm_connector *connector,
+ uint32_t maxX, uint32_t maxY)
+{
+ struct tda998x_priv *priv = conn_to_tda998x_priv(connector);
+ int ret;
+
+ mutex_lock(&priv->audio_mutex);
+ ret = drm_helper_probe_single_connector_modes(connector, maxX, maxY);
+
+ if (connector->edid_blob_ptr) {
+ struct edid *edid = (void *)connector->edid_blob_ptr->data;
+
+ priv->sink_has_audio = drm_detect_monitor_audio(edid);
+ } else {
+ priv->sink_has_audio = false;
+ }
+ mutex_unlock(&priv->audio_mutex);
+
+ return ret;
+}
+
+static enum drm_connector_status
+tda998x_connector_detect(struct drm_connector *connector, bool force)
+{
+ struct tda998x_priv *priv = conn_to_tda998x_priv(connector);
+ u8 val = cec_read(priv, REG_CEC_RXSHPDLEV);
+
+ return (val & CEC_RXSHPDLEV_HPD) ? connector_status_connected :
+ connector_status_disconnected;
+}
+
+static void tda998x_connector_destroy(struct drm_connector *connector)
+{
+ drm_connector_cleanup(connector);
+}
+
+static const struct drm_connector_funcs tda998x_connector_funcs = {
+ .dpms = tda998x_connector_dpms,
+ .reset = drm_atomic_helper_connector_reset,
+ .fill_modes = tda998x_connector_fill_modes,
+ .detect = tda998x_connector_detect,
+ .destroy = tda998x_connector_destroy,
+ .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
+};
+
+static int read_edid_block(void *data, u8 *buf, unsigned int blk, size_t length)
+{
+ struct tda998x_priv *priv = data;
+ u8 offset, segptr;
+ int ret, i;
+
+ offset = (blk & 1) ? 128 : 0;
+ segptr = blk / 2;
+
+ reg_write(priv, REG_DDC_ADDR, 0xa0);
+ reg_write(priv, REG_DDC_OFFS, offset);
+ reg_write(priv, REG_DDC_SEGM_ADDR, 0x60);
+ reg_write(priv, REG_DDC_SEGM, segptr);
+
+ /* enable reading EDID: */
+ priv->wq_edid_wait = 1;
+ reg_write(priv, REG_EDID_CTRL, 0x1);
+
+ /* flag must be cleared by sw: */
+ reg_write(priv, REG_EDID_CTRL, 0x0);
+
+ /* wait for block read to complete: */
+ if (priv->hdmi->irq) {
+ i = wait_event_timeout(priv->wq_edid,
+ !priv->wq_edid_wait,
+ msecs_to_jiffies(100));
+ if (i < 0) {
+ dev_err(&priv->hdmi->dev, "read edid wait err %d\n", i);
+ return i;
+ }
+ } else {
+ for (i = 100; i > 0; i--) {
+ msleep(1);
+ ret = reg_read(priv, REG_INT_FLAGS_2);
+ if (ret < 0)
+ return ret;
+ if (ret & INT_FLAGS_2_EDID_BLK_RD)
+ break;
+ }
+ }
+
+ if (i == 0) {
+ dev_err(&priv->hdmi->dev, "read edid timeout\n");
+ return -ETIMEDOUT;
+ }
+
+ ret = reg_read_range(priv, REG_EDID_DATA_0, buf, length);
+ if (ret != length) {
+ dev_err(&priv->hdmi->dev, "failed to read edid block %d: %d\n",
+ blk, ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int tda998x_connector_get_modes(struct drm_connector *connector)
+{
+ struct tda998x_priv *priv = conn_to_tda998x_priv(connector);
+ struct edid *edid;
+ int n;
+
+ /*
+ * If we get killed while waiting for the HPD timeout, return
+ * no modes found: we are not in a restartable path, so we
+ * can't handle signals gracefully.
+ */
+ if (tda998x_edid_delay_wait(priv))
+ return 0;
+
+ if (priv->rev == TDA19988)
+ reg_clear(priv, REG_TX4, TX4_PD_RAM);
+
+ edid = drm_do_get_edid(connector, read_edid_block, priv);
+
+ if (priv->rev == TDA19988)
+ reg_set(priv, REG_TX4, TX4_PD_RAM);
+
+ if (!edid) {
+ dev_warn(&priv->hdmi->dev, "failed to read EDID\n");
+ return 0;
+ }
+
+ drm_mode_connector_update_edid_property(connector, edid);
+ n = drm_add_edid_modes(connector, edid);
+ drm_edid_to_eld(connector, edid);
+
+ kfree(edid);
+
+ return n;
}
static int tda998x_connector_mode_valid(struct drm_connector *connector,
@@ -888,6 +1130,80 @@ static int tda998x_connector_mode_valid(struct drm_connector *connector,
return MODE_OK;
}
+static struct drm_encoder *
+tda998x_connector_best_encoder(struct drm_connector *connector)
+{
+ struct tda998x_priv *priv = conn_to_tda998x_priv(connector);
+
+ return &priv->encoder;
+}
+
+static
+const struct drm_connector_helper_funcs tda998x_connector_helper_funcs = {
+ .get_modes = tda998x_connector_get_modes,
+ .mode_valid = tda998x_connector_mode_valid,
+ .best_encoder = tda998x_connector_best_encoder,
+};
+
+static int tda998x_connector_init(struct tda998x_priv *priv,
+ struct drm_device *drm)
+{
+ struct drm_connector *connector = &priv->connector;
+ int ret;
+
+ connector->interlace_allowed = 1;
+
+ if (priv->hdmi->irq)
+ connector->polled = DRM_CONNECTOR_POLL_HPD;
+ else
+ connector->polled = DRM_CONNECTOR_POLL_CONNECT |
+ DRM_CONNECTOR_POLL_DISCONNECT;
+
+ drm_connector_helper_add(connector, &tda998x_connector_helper_funcs);
+ ret = drm_connector_init(drm, connector, &tda998x_connector_funcs,
+ DRM_MODE_CONNECTOR_HDMIA);
+ if (ret)
+ return ret;
+
+ drm_mode_connector_attach_encoder(&priv->connector, &priv->encoder);
+
+ return 0;
+}
+
+/* DRM encoder functions */
+
+static void tda998x_encoder_dpms(struct drm_encoder *encoder, int mode)
+{
+ struct tda998x_priv *priv = enc_to_tda998x_priv(encoder);
+ bool on;
+
+ /* we only care about on or off: */
+ on = mode == DRM_MODE_DPMS_ON;
+
+ if (on == priv->is_on)
+ return;
+
+ if (on) {
+ /* enable video ports, audio will be enabled later */
+ reg_write(priv, REG_ENA_VP_0, 0xff);
+ reg_write(priv, REG_ENA_VP_1, 0xff);
+ reg_write(priv, REG_ENA_VP_2, 0xff);
+ /* set muxing after enabling ports: */
+ reg_write(priv, REG_VIP_CNTRL_0, priv->vip_cntrl_0);
+ reg_write(priv, REG_VIP_CNTRL_1, priv->vip_cntrl_1);
+ reg_write(priv, REG_VIP_CNTRL_2, priv->vip_cntrl_2);
+
+ priv->is_on = true;
+ } else {
+ /* disable video ports */
+ reg_write(priv, REG_ENA_VP_0, 0x00);
+ reg_write(priv, REG_ENA_VP_1, 0x00);
+ reg_write(priv, REG_ENA_VP_2, 0x00);
+
+ priv->is_on = false;
+ }
+}
+
static void
tda998x_encoder_mode_set(struct drm_encoder *encoder,
struct drm_display_mode *mode,
@@ -971,6 +1287,8 @@ tda998x_encoder_mode_set(struct drm_encoder *encoder,
div = 3;
}
+ mutex_lock(&priv->audio_mutex);
+
/* mute the audio FIFO: */
reg_set(priv, REG_AIP_CNTRL_0, AIP_CNTRL_0_RST_FIFO);
@@ -982,6 +1300,7 @@ tda998x_encoder_mode_set(struct drm_encoder *encoder,
/* no pre-filter or interpolator: */
reg_write(priv, REG_HVF_CNTRL_0, HVF_CNTRL_0_PREFIL(0) |
HVF_CNTRL_0_INTPOL(0));
+ reg_set(priv, REG_FEAT_POWERDOWN, FEAT_POWERDOWN_PREFILT);
reg_write(priv, REG_VIP_CNTRL_5, VIP_CNTRL_5_SP_CNT(0));
reg_write(priv, REG_VIP_CNTRL_4, VIP_CNTRL_4_BLANKIT(0) |
VIP_CNTRL_4_BLC(0));
@@ -1004,6 +1323,7 @@ tda998x_encoder_mode_set(struct drm_encoder *encoder,
/* set color matrix bypass flag: */
reg_write(priv, REG_MAT_CONTRL, MAT_CONTRL_MAT_BP |
MAT_CONTRL_MAT_SC(1));
+ reg_set(priv, REG_FEAT_POWERDOWN, FEAT_POWERDOWN_CSC);
/* set BIAS tmds value: */
reg_write(priv, REG_ANA_GENERAL, 0x09);
@@ -1064,8 +1384,22 @@ tda998x_encoder_mode_set(struct drm_encoder *encoder,
/* must be last register set: */
reg_write(priv, REG_TBG_CNTRL_0, 0);
- /* Only setup the info frames if the sink is HDMI */
- if (priv->is_hdmi_sink) {
+ priv->tmds_clock = adjusted_mode->clock;
+
+ /* CEA-861B section 6 says that:
+ * CEA version 1 (CEA-861) has no support for infoframes.
+ * CEA version 2 (CEA-861A) supports version 1 AVI infoframes,
+ * and optional basic audio.
+ * CEA version 3 (CEA-861B) supports version 1 and 2 AVI infoframes,
+ * and optional digital audio, with audio infoframes.
+ *
+ * Since we only support generation of version 2 AVI infoframes,
+ * ignore CEA version 2 and below (iow, behave as if we're a
+ * CEA-861 source.)
+ */
+ priv->supports_infoframes = priv->connector.display_info.cea_rev >= 3;
+
+ if (priv->supports_infoframes) {
/* We need to turn HDMI HDCP stuff on to get audio through */
reg &= ~TBG_CNTRL_1_DWIN_DIS;
reg_write(priv, REG_TBG_CNTRL_1, reg);
@@ -1074,127 +1408,12 @@ tda998x_encoder_mode_set(struct drm_encoder *encoder,
tda998x_write_avi(priv, adjusted_mode);
- if (priv->audio_params.format != AFMT_UNUSED) {
- mutex_lock(&priv->audio_mutex);
- tda998x_configure_audio(priv,
- &priv->audio_params,
- adjusted_mode->clock);
- mutex_unlock(&priv->audio_mutex);
- }
- }
-}
-
-static enum drm_connector_status
-tda998x_connector_detect(struct drm_connector *connector, bool force)
-{
- struct tda998x_priv *priv = conn_to_tda998x_priv(connector);
- u8 val = cec_read(priv, REG_CEC_RXSHPDLEV);
-
- return (val & CEC_RXSHPDLEV_HPD) ? connector_status_connected :
- connector_status_disconnected;
-}
-
-static int read_edid_block(void *data, u8 *buf, unsigned int blk, size_t length)
-{
- struct tda998x_priv *priv = data;
- u8 offset, segptr;
- int ret, i;
-
- offset = (blk & 1) ? 128 : 0;
- segptr = blk / 2;
-
- reg_write(priv, REG_DDC_ADDR, 0xa0);
- reg_write(priv, REG_DDC_OFFS, offset);
- reg_write(priv, REG_DDC_SEGM_ADDR, 0x60);
- reg_write(priv, REG_DDC_SEGM, segptr);
-
- /* enable reading EDID: */
- priv->wq_edid_wait = 1;
- reg_write(priv, REG_EDID_CTRL, 0x1);
-
- /* flag must be cleared by sw: */
- reg_write(priv, REG_EDID_CTRL, 0x0);
-
- /* wait for block read to complete: */
- if (priv->hdmi->irq) {
- i = wait_event_timeout(priv->wq_edid,
- !priv->wq_edid_wait,
- msecs_to_jiffies(100));
- if (i < 0) {
- dev_err(&priv->hdmi->dev, "read edid wait err %d\n", i);
- return i;
- }
- } else {
- for (i = 100; i > 0; i--) {
- msleep(1);
- ret = reg_read(priv, REG_INT_FLAGS_2);
- if (ret < 0)
- return ret;
- if (ret & INT_FLAGS_2_EDID_BLK_RD)
- break;
- }
- }
-
- if (i == 0) {
- dev_err(&priv->hdmi->dev, "read edid timeout\n");
- return -ETIMEDOUT;
+ if (priv->audio_params.format != AFMT_UNUSED &&
+ priv->sink_has_audio)
+ tda998x_configure_audio(priv, &priv->audio_params);
}
- ret = reg_read_range(priv, REG_EDID_DATA_0, buf, length);
- if (ret != length) {
- dev_err(&priv->hdmi->dev, "failed to read edid block %d: %d\n",
- blk, ret);
- return ret;
- }
-
- return 0;
-}
-
-static int tda998x_connector_get_modes(struct drm_connector *connector)
-{
- struct tda998x_priv *priv = conn_to_tda998x_priv(connector);
- struct edid *edid;
- int n;
-
- /*
- * If we get killed while waiting for the HPD timeout, return
- * no modes found: we are not in a restartable path, so we
- * can't handle signals gracefully.
- */
- if (tda998x_edid_delay_wait(priv))
- return 0;
-
- if (priv->rev == TDA19988)
- reg_clear(priv, REG_TX4, TX4_PD_RAM);
-
- edid = drm_do_get_edid(connector, read_edid_block, priv);
-
- if (priv->rev == TDA19988)
- reg_set(priv, REG_TX4, TX4_PD_RAM);
-
- if (!edid) {
- dev_warn(&priv->hdmi->dev, "failed to read EDID\n");
- return 0;
- }
-
- drm_mode_connector_update_edid_property(connector, edid);
- n = drm_add_edid_modes(connector, edid);
- priv->is_hdmi_sink = drm_detect_hdmi_monitor(edid);
- drm_edid_to_eld(connector, edid);
-
- kfree(edid);
-
- return n;
-}
-
-static void tda998x_encoder_set_polling(struct tda998x_priv *priv,
- struct drm_connector *connector)
-{
- if (priv->hdmi->irq)
- connector->polled = DRM_CONNECTOR_POLL_HPD;
- else
- connector->polled = DRM_CONNECTOR_POLL_CONNECT |
- DRM_CONNECTOR_POLL_DISCONNECT;
+ mutex_unlock(&priv->audio_mutex);
}
static void tda998x_destroy(struct tda998x_priv *priv)
@@ -1215,145 +1434,6 @@ static void tda998x_destroy(struct tda998x_priv *priv)
i2c_unregister_device(priv->cec);
}
-static int tda998x_audio_hw_params(struct device *dev, void *data,
- struct hdmi_codec_daifmt *daifmt,
- struct hdmi_codec_params *params)
-{
- struct tda998x_priv *priv = dev_get_drvdata(dev);
- int i, ret;
- struct tda998x_audio_params audio = {
- .sample_width = params->sample_width,
- .sample_rate = params->sample_rate,
- .cea = params->cea,
- };
-
- if (!priv->encoder.crtc)
- return -ENODEV;
-
- memcpy(audio.status, params->iec.status,
- min(sizeof(audio.status), sizeof(params->iec.status)));
-
- switch (daifmt->fmt) {
- case HDMI_I2S:
- if (daifmt->bit_clk_inv || daifmt->frame_clk_inv ||
- daifmt->bit_clk_master || daifmt->frame_clk_master) {
- dev_err(dev, "%s: Bad flags %d %d %d %d\n", __func__,
- daifmt->bit_clk_inv, daifmt->frame_clk_inv,
- daifmt->bit_clk_master,
- daifmt->frame_clk_master);
- return -EINVAL;
- }
- for (i = 0; i < ARRAY_SIZE(priv->audio_port); i++)
- if (priv->audio_port[i].format == AFMT_I2S)
- audio.config = priv->audio_port[i].config;
- audio.format = AFMT_I2S;
- break;
- case HDMI_SPDIF:
- for (i = 0; i < ARRAY_SIZE(priv->audio_port); i++)
- if (priv->audio_port[i].format == AFMT_SPDIF)
- audio.config = priv->audio_port[i].config;
- audio.format = AFMT_SPDIF;
- break;
- default:
- dev_err(dev, "%s: Invalid format %d\n", __func__, daifmt->fmt);
- return -EINVAL;
- }
-
- if (audio.config == 0) {
- dev_err(dev, "%s: No audio configutation found\n", __func__);
- return -EINVAL;
- }
-
- mutex_lock(&priv->audio_mutex);
- ret = tda998x_configure_audio(priv,
- &audio,
- priv->encoder.crtc->hwmode.clock);
-
- if (ret == 0)
- priv->audio_params = audio;
- mutex_unlock(&priv->audio_mutex);
-
- return ret;
-}
-
-static void tda998x_audio_shutdown(struct device *dev, void *data)
-{
- struct tda998x_priv *priv = dev_get_drvdata(dev);
-
- mutex_lock(&priv->audio_mutex);
-
- reg_write(priv, REG_ENA_AP, 0);
-
- priv->audio_params.format = AFMT_UNUSED;
-
- mutex_unlock(&priv->audio_mutex);
-}
-
-int tda998x_audio_digital_mute(struct device *dev, void *data, bool enable)
-{
- struct tda998x_priv *priv = dev_get_drvdata(dev);
-
- mutex_lock(&priv->audio_mutex);
-
- tda998x_audio_mute(priv, enable);
-
- mutex_unlock(&priv->audio_mutex);
- return 0;
-}
-
-static int tda998x_audio_get_eld(struct device *dev, void *data,
- uint8_t *buf, size_t len)
-{
- struct tda998x_priv *priv = dev_get_drvdata(dev);
- struct drm_mode_config *config = &priv->encoder.dev->mode_config;
- struct drm_connector *connector;
- int ret = -ENODEV;
-
- mutex_lock(&config->mutex);
- list_for_each_entry(connector, &config->connector_list, head) {
- if (&priv->encoder == connector->encoder) {
- memcpy(buf, connector->eld,
- min(sizeof(connector->eld), len));
- ret = 0;
- }
- }
- mutex_unlock(&config->mutex);
-
- return ret;
-}
-
-static const struct hdmi_codec_ops audio_codec_ops = {
- .hw_params = tda998x_audio_hw_params,
- .audio_shutdown = tda998x_audio_shutdown,
- .digital_mute = tda998x_audio_digital_mute,
- .get_eld = tda998x_audio_get_eld,
-};
-
-static int tda998x_audio_codec_init(struct tda998x_priv *priv,
- struct device *dev)
-{
- struct hdmi_codec_pdata codec_data = {
- .ops = &audio_codec_ops,
- .max_i2s_channels = 2,
- };
- int i;
-
- for (i = 0; i < ARRAY_SIZE(priv->audio_port); i++) {
- if (priv->audio_port[i].format == AFMT_I2S &&
- priv->audio_port[i].config != 0)
- codec_data.i2s = 1;
- if (priv->audio_port[i].format == AFMT_SPDIF &&
- priv->audio_port[i].config != 0)
- codec_data.spdif = 1;
- }
-
- priv->audio_pdev = platform_device_register_data(
- dev, HDMI_CODEC_DRV_NAME, PLATFORM_DEVID_AUTO,
- &codec_data, sizeof(codec_data));
-
- return PTR_ERR_OR_ZERO(priv->audio_pdev);
-}
-
/* I2C driver functions */
static int tda998x_get_audio_ports(struct tda998x_priv *priv,
@@ -1403,22 +1483,21 @@ static int tda998x_create(struct i2c_client *client, struct tda998x_priv *priv)
struct device_node *np = client->dev.of_node;
u32 video;
int rev_lo, rev_hi, ret;
- unsigned short cec_addr;
+
+ mutex_init(&priv->audio_mutex); /* Protect access from audio thread */
priv->vip_cntrl_0 = VIP_CNTRL_0_SWAP_A(2) | VIP_CNTRL_0_SWAP_B(3);
priv->vip_cntrl_1 = VIP_CNTRL_1_SWAP_C(0) | VIP_CNTRL_1_SWAP_D(1);
priv->vip_cntrl_2 = VIP_CNTRL_2_SWAP_E(4) | VIP_CNTRL_2_SWAP_F(5);
+ /* CEC I2C address bound to TDA998x I2C addr by configuration pins */
+ priv->cec_addr = 0x34 + (client->addr & 0x03);
priv->current_page = 0xff;
priv->hdmi = client;
- /* CEC I2C address bound to TDA998x I2C addr by configuration pins */
- cec_addr = 0x34 + (client->addr & 0x03);
- priv->cec = i2c_new_dummy(client->adapter, cec_addr);
+ priv->cec = i2c_new_dummy(client->adapter, priv->cec_addr);
if (!priv->cec)
return -ENODEV;
- priv->dpms = DRM_MODE_DPMS_OFF;
-
mutex_init(&priv->mutex); /* protect the page access */
init_waitqueue_head(&priv->edid_delay_waitq);
setup_timer(&priv->edid_delay_timer, tda998x_edid_delay_done,
@@ -1478,7 +1557,7 @@ static int tda998x_create(struct i2c_client *client, struct tda998x_priv *priv)
/* initialize the optional IRQ */
if (client->irq) {
- int irqf_trigger;
+ unsigned long irq_flags;
/* init read EDID waitqueue and HDP work */
init_waitqueue_head(&priv->wq_edid);
@@ -1488,11 +1567,11 @@ static int tda998x_create(struct i2c_client *client, struct tda998x_priv *priv)
reg_read(priv, REG_INT_FLAGS_1);
reg_read(priv, REG_INT_FLAGS_2);
- irqf_trigger =
+ irq_flags =
irqd_get_trigger_type(irq_get_irq_data(client->irq));
+ irq_flags |= IRQF_SHARED | IRQF_ONESHOT;
ret = request_threaded_irq(client->irq, NULL,
- tda998x_irq_thread,
- irqf_trigger | IRQF_ONESHOT,
+ tda998x_irq_thread, irq_flags,
"tda998x", priv);
if (ret) {
dev_err(&client->dev,
@@ -1519,8 +1598,6 @@ static int tda998x_create(struct i2c_client *client, struct tda998x_priv *priv)
priv->vip_cntrl_2 = video;
}
- mutex_init(&priv->audio_mutex); /* Protect access from audio thread */
-
ret = tda998x_get_audio_ports(priv, np);
if (ret)
goto fail;
@@ -1567,45 +1644,25 @@ static const struct drm_encoder_funcs tda998x_encoder_funcs = {
.destroy = tda998x_encoder_destroy,
};
-static struct drm_encoder *
-tda998x_connector_best_encoder(struct drm_connector *connector)
-{
- struct tda998x_priv *priv = conn_to_tda998x_priv(connector);
-
- return &priv->encoder;
-}
-
-static
-const struct drm_connector_helper_funcs tda998x_connector_helper_funcs = {
- .get_modes = tda998x_connector_get_modes,
- .mode_valid = tda998x_connector_mode_valid,
- .best_encoder = tda998x_connector_best_encoder,
-};
-
-static void tda998x_connector_destroy(struct drm_connector *connector)
+static void tda998x_set_config(struct tda998x_priv *priv,
+ const struct tda998x_encoder_params *p)
{
- drm_connector_unregister(connector);
- drm_connector_cleanup(connector);
-}
+ priv->vip_cntrl_0 = VIP_CNTRL_0_SWAP_A(p->swap_a) |
+ (p->mirr_a ? VIP_CNTRL_0_MIRR_A : 0) |
+ VIP_CNTRL_0_SWAP_B(p->swap_b) |
+ (p->mirr_b ? VIP_CNTRL_0_MIRR_B : 0);
+ priv->vip_cntrl_1 = VIP_CNTRL_1_SWAP_C(p->swap_c) |
+ (p->mirr_c ? VIP_CNTRL_1_MIRR_C : 0) |
+ VIP_CNTRL_1_SWAP_D(p->swap_d) |
+ (p->mirr_d ? VIP_CNTRL_1_MIRR_D : 0);
+ priv->vip_cntrl_2 = VIP_CNTRL_2_SWAP_E(p->swap_e) |
+ (p->mirr_e ? VIP_CNTRL_2_MIRR_E : 0) |
+ VIP_CNTRL_2_SWAP_F(p->swap_f) |
+ (p->mirr_f ? VIP_CNTRL_2_MIRR_F : 0);
-static int tda998x_connector_dpms(struct drm_connector *connector, int mode)
-{
- if (drm_core_check_feature(connector->dev, DRIVER_ATOMIC))
- return drm_atomic_helper_connector_dpms(connector, mode);
- else
- return drm_helper_connector_dpms(connector, mode);
+ priv->audio_params = p->audio_params;
}
-static const struct drm_connector_funcs tda998x_connector_funcs = {
- .dpms = tda998x_connector_dpms,
- .reset = drm_atomic_helper_connector_reset,
- .fill_modes = drm_helper_probe_single_connector_modes,
- .detect = tda998x_connector_detect,
- .destroy = tda998x_connector_destroy,
- .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
- .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
-};
-
static int tda998x_bind(struct device *dev, struct device *master, void *data)
{
struct tda998x_encoder_params *params = dev->platform_data;
@@ -1630,7 +1687,6 @@ static int tda998x_bind(struct device *dev, struct device *master, void *data)
crtcs = 1 << 0;
}
- priv->connector.interlace_allowed = 1;
priv->encoder.possible_crtcs = crtcs;
ret = tda998x_create(client, priv);
@@ -1638,9 +1694,7 @@ static int tda998x_bind(struct device *dev, struct device *master, void *data)
return ret;
if (!dev->of_node && params)
- tda998x_encoder_set_config(priv, params);
-
- tda998x_encoder_set_polling(priv, &priv->connector);
+ tda998x_set_config(priv, params);
drm_encoder_helper_add(&priv->encoder, &tda998x_encoder_helper_funcs);
ret = drm_encoder_init(drm, &priv->encoder, &tda998x_encoder_funcs,
@@ -1648,24 +1702,12 @@ static int tda998x_bind(struct device *dev, struct device *master, void *data)
if (ret)
goto err_encoder;
- drm_connector_helper_add(&priv->connector,
- &tda998x_connector_helper_funcs);
- ret = drm_connector_init(drm, &priv->connector,
- &tda998x_connector_funcs,
- DRM_MODE_CONNECTOR_HDMIA);
+ ret = tda998x_connector_init(priv, drm);
if (ret)
goto err_connector;
- ret = drm_connector_register(&priv->connector);
- if (ret)
- goto err_sysfs;
-
- drm_mode_connector_attach_encoder(&priv->connector, &priv->encoder);
-
return 0;
-err_sysfs:
- drm_connector_cleanup(&priv->connector);
err_connector:
drm_encoder_cleanup(&priv->encoder);
err_encoder:
@@ -1678,7 +1720,6 @@ static void tda998x_unbind(struct device *dev, struct device *master,
{
struct tda998x_priv *priv = dev_get_drvdata(dev);
- drm_connector_unregister(&priv->connector);
drm_connector_cleanup(&priv->connector);
drm_encoder_cleanup(&priv->encoder);
tda998x_destroy(priv);
@@ -1692,6 +1733,10 @@ static const struct component_ops tda998x_ops = {
static int
tda998x_probe(struct i2c_client *client, const struct i2c_device_id *id)
{
+ if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
+ dev_warn(&client->dev, "adapter does not support I2C\n");
+ return -EIO;
+ }
return component_add(&client->dev, &tda998x_ops);
}
diff --git a/drivers/gpu/drm/i810/i810_dma.c b/drivers/gpu/drm/i810/i810_dma.c
index d91856779beb..ab4e6cbe1f8b 100644
--- a/drivers/gpu/drm/i810/i810_dma.c
+++ b/drivers/gpu/drm/i810/i810_dma.c
@@ -113,9 +113,7 @@ static const struct file_operations i810_buffer_fops = {
.release = drm_release,
.unlocked_ioctl = drm_ioctl,
.mmap = i810_mmap_buffers,
-#ifdef CONFIG_COMPAT
.compat_ioctl = drm_compat_ioctl,
-#endif
.llseek = noop_llseek,
};
diff --git a/drivers/gpu/drm/i810/i810_drv.c b/drivers/gpu/drm/i810/i810_drv.c
index 0be55dc1ef4b..02504a7cfaf2 100644
--- a/drivers/gpu/drm/i810/i810_drv.c
+++ b/drivers/gpu/drm/i810/i810_drv.c
@@ -49,9 +49,7 @@ static const struct file_operations i810_driver_fops = {
.unlocked_ioctl = drm_ioctl,
.mmap = drm_legacy_mmap,
.poll = drm_poll,
-#ifdef CONFIG_COMPAT
.compat_ioctl = drm_compat_ioctl,
-#endif
.llseek = noop_llseek,
};
diff --git a/drivers/gpu/drm/i915/Kconfig b/drivers/gpu/drm/i915/Kconfig
index 7769e469118f..5ddde7349fbd 100644
--- a/drivers/gpu/drm/i915/Kconfig
+++ b/drivers/gpu/drm/i915/Kconfig
@@ -11,6 +11,7 @@ config DRM_I915
select DRM_KMS_HELPER
select DRM_PANEL
select DRM_MIPI_DSI
+ select RELAY
# i915 depends on ACPI_VIDEO when ACPI is enabled
# but for select to work, need to select ACPI_VIDEO's dependencies, ick
select BACKLIGHT_LCD_SUPPORT if ACPI
@@ -24,28 +25,59 @@ config DRM_I915
including 830M, 845G, 852GM, 855GM, 865G, 915G, 945G, 965G,
G35, G41, G43, G45 chipsets and Celeron, Pentium, Core i3,
Core i5, Core i7 as well as Atom CPUs with integrated graphics.
- If M is selected, the module will be called i915. AGP support
- is required for this driver to work. This driver is used by
- the Intel driver in X.org 6.8 and XFree86 4.4 and above. It
- replaces the older i830 module that supported a subset of the
- hardware in older X.org releases.
+
+ This driver is used by the Intel driver in X.org 6.8 and
+ XFree86 4.4 and above. It replaces the older i830 module that
+ supported a subset of the hardware in older X.org releases.
Note that the older i810/i815 chipsets require the use of the
i810 driver instead, and the Atom z5xx series has an entirely
different implementation.
-config DRM_I915_PRELIMINARY_HW_SUPPORT
- bool "Enable preliminary support for prerelease Intel hardware by default"
+ If "M" is selected, the module will be called i915.
+
+config DRM_I915_ALPHA_SUPPORT
+ bool "Enable alpha quality support for new Intel hardware by default"
depends on DRM_I915
default n
help
- Choose this option if you have prerelease Intel hardware and want the
- i915 driver to support it by default. You can enable such support at
- runtime with the module option i915.preliminary_hw_support=1; this
- option changes the default for that module option.
+ Choose this option if you have new Intel hardware and want to enable
+ the alpha quality i915 driver support for the hardware in this kernel
+ version. You can also enable the support at runtime using the module
+ parameter i915.alpha_support=1; this option changes the default for
+ that module parameter.
+
+ It is recommended to upgrade to a kernel version with proper support
+ as soon as it is available. Generally fixes for platforms with alpha
+ support are not backported to older kernels.
If in doubt, say "N".
+config DRM_I915_CAPTURE_ERROR
+ bool "Enable capturing GPU state following a hang"
+ depends on DRM_I915
+ default y
+ help
+ This option enables capturing the GPU state when a hang is detected.
+ This information is vital for triaging hangs and assists in debugging.
+ Please report any hang to
+ https://bugs.freedesktop.org/enter_bug.cgi?product=DRI
+ for triaging.
+
+ If in doubt, say "Y".
+
+config DRM_I915_COMPRESS_ERROR
+ bool "Compress GPU error state"
+ depends on DRM_I915_CAPTURE_ERROR
+ select ZLIB_DEFLATE
+ default y
+ help
+ This option selects ZLIB_DEFLATE if it isn't already
+ selected and causes any error state captured upon a GPU hang
+ to be compressed using zlib.
+
+ If in doubt, say "Y".
+
config DRM_I915_USERPTR
bool "Always enable userptr support"
depends on DRM_I915
@@ -60,6 +92,7 @@ config DRM_I915_USERPTR
config DRM_I915_GVT
bool "Enable Intel GVT-g graphics virtualization host support"
depends on DRM_I915
+ depends on 64BIT
default n
help
Choose this option if you want to enable Intel GVT-g graphics
@@ -79,6 +112,15 @@ config DRM_I915_GVT
If in doubt, say "N".
+config DRM_I915_GVT_KVMGT
+ tristate "Enable KVM/VFIO support for Intel GVT-g"
+ depends on DRM_I915_GVT
+ depends on KVM
+ default n
+ help
+ Choose this option if you want to enable KVMGT support for
+ Intel GVT-g.
+
menu "drm/i915 Debugging"
depends on DRM_I915
depends on EXPERT
diff --git a/drivers/gpu/drm/i915/Kconfig.debug b/drivers/gpu/drm/i915/Kconfig.debug
index cee87bfd10c4..51ba630a134b 100644
--- a/drivers/gpu/drm/i915/Kconfig.debug
+++ b/drivers/gpu/drm/i915/Kconfig.debug
@@ -21,6 +21,7 @@ config DRM_I915_DEBUG
select PREEMPT_COUNT
select X86_MSR # used by igt/pm_rpm
select DRM_VGEM # used by igt/prime_vgem (dmabuf interop checks)
+ select DRM_DEBUG_MM if DRM=y
default n
help
Choose this option to turn on extra driver debugging that may affect
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index a998c2bce70a..3dea46af9fe6 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -33,19 +33,22 @@ i915-y += i915_cmd_parser.o \
i915_gem_dmabuf.o \
i915_gem_evict.o \
i915_gem_execbuffer.o \
- i915_gem_fence.o \
+ i915_gem_fence_reg.o \
i915_gem_gtt.o \
+ i915_gem_internal.o \
i915_gem.o \
i915_gem_render_state.o \
i915_gem_request.o \
i915_gem_shrinker.o \
i915_gem_stolen.o \
i915_gem_tiling.o \
+ i915_gem_timeline.o \
i915_gem_userptr.o \
- i915_gpu_error.o \
i915_trace_points.o \
+ i915_vma.o \
intel_breadcrumbs.o \
intel_engine_cs.o \
+ intel_hangcheck.o \
intel_lrc.o \
intel_mocs.o \
intel_ringbuffer.o \
@@ -102,11 +105,15 @@ i915-y += dvo_ch7017.o \
intel_dvo.o \
intel_hdmi.o \
intel_i2c.o \
+ intel_lspcon.o \
intel_lvds.o \
intel_panel.o \
intel_sdvo.o \
intel_tv.o
+# Post-mortem debug and GPU hang state capture
+i915-$(CONFIG_DRM_I915_CAPTURE_ERROR) += i915_gpu_error.o
+
# virtual gpu code
i915-y += i915_vgpu.o
diff --git a/drivers/gpu/drm/i915/gvt/Makefile b/drivers/gpu/drm/i915/gvt/Makefile
index d0f21a6ad60d..8a46a7f31d53 100644
--- a/drivers/gpu/drm/i915/gvt/Makefile
+++ b/drivers/gpu/drm/i915/gvt/Makefile
@@ -1,5 +1,10 @@
GVT_DIR := gvt
-GVT_SOURCE := gvt.o
+GVT_SOURCE := gvt.o aperture_gm.o handlers.o vgpu.o trace_points.o firmware.o \
+ interrupt.o gtt.o cfg_space.o opregion.o mmio.o display.o edid.o \
+ execlist.o scheduler.o sched_policy.o render.o cmd_parser.o
-ccflags-y += -I$(src) -I$(src)/$(GVT_DIR) -Wall
-i915-y += $(addprefix $(GVT_DIR)/, $(GVT_SOURCE))
+ccflags-y += -I$(src) -I$(src)/$(GVT_DIR) -Wall
+i915-y += $(addprefix $(GVT_DIR)/, $(GVT_SOURCE))
+
+CFLAGS_kvmgt.o := -Wno-unused-function
+obj-$(CONFIG_DRM_I915_GVT_KVMGT) += $(GVT_DIR)/kvmgt.o
diff --git a/drivers/gpu/drm/i915/gvt/aperture_gm.c b/drivers/gpu/drm/i915/gvt/aperture_gm.c
new file mode 100644
index 000000000000..0d41ebc4aea6
--- /dev/null
+++ b/drivers/gpu/drm/i915/gvt/aperture_gm.c
@@ -0,0 +1,352 @@
+/*
+ * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * Authors:
+ * Kevin Tian <kevin.tian@intel.com>
+ * Dexuan Cui
+ *
+ * Contributors:
+ * Pei Zhang <pei.zhang@intel.com>
+ * Min He <min.he@intel.com>
+ * Niu Bing <bing.niu@intel.com>
+ * Yulei Zhang <yulei.zhang@intel.com>
+ * Zhenyu Wang <zhenyuw@linux.intel.com>
+ * Zhi Wang <zhi.a.wang@intel.com>
+ *
+ */
+
+#include "i915_drv.h"
+#include "gvt.h"
+
+#define MB_TO_BYTES(mb) ((mb) << 20ULL)
+#define BYTES_TO_MB(b) ((b) >> 20ULL)
+
+#define HOST_LOW_GM_SIZE MB_TO_BYTES(128)
+#define HOST_HIGH_GM_SIZE MB_TO_BYTES(384)
+#define HOST_FENCE 4
+
+static int alloc_gm(struct intel_vgpu *vgpu, bool high_gm)
+{
+ struct intel_gvt *gvt = vgpu->gvt;
+ struct drm_i915_private *dev_priv = gvt->dev_priv;
+ u32 alloc_flag, search_flag;
+ u64 start, end, size;
+ struct drm_mm_node *node;
+ int retried = 0;
+ int ret;
+
+ if (high_gm) {
+ search_flag = DRM_MM_SEARCH_BELOW;
+ alloc_flag = DRM_MM_CREATE_TOP;
+ node = &vgpu->gm.high_gm_node;
+ size = vgpu_hidden_sz(vgpu);
+ start = gvt_hidden_gmadr_base(gvt);
+ end = gvt_hidden_gmadr_end(gvt);
+ } else {
+ search_flag = DRM_MM_SEARCH_DEFAULT;
+ alloc_flag = DRM_MM_CREATE_DEFAULT;
+ node = &vgpu->gm.low_gm_node;
+ size = vgpu_aperture_sz(vgpu);
+ start = gvt_aperture_gmadr_base(gvt);
+ end = gvt_aperture_gmadr_end(gvt);
+ }
+
+ mutex_lock(&dev_priv->drm.struct_mutex);
+search_again:
+ ret = drm_mm_insert_node_in_range_generic(&dev_priv->ggtt.base.mm,
+ node, size, 4096, 0,
+ start, end, search_flag,
+ alloc_flag);
+ if (ret) {
+ ret = i915_gem_evict_something(&dev_priv->ggtt.base,
+ size, 4096, 0, start, end, 0);
+ if (ret == 0 && ++retried < 3)
+ goto search_again;
+
+ gvt_err("fail to alloc %s gm space from host, retried %d\n",
+ high_gm ? "high" : "low", retried);
+ }
+ mutex_unlock(&dev_priv->drm.struct_mutex);
+ return ret;
+}
+
+static int alloc_vgpu_gm(struct intel_vgpu *vgpu)
+{
+ struct intel_gvt *gvt = vgpu->gvt;
+ struct drm_i915_private *dev_priv = gvt->dev_priv;
+ int ret;
+
+ ret = alloc_gm(vgpu, false);
+ if (ret)
+ return ret;
+
+ ret = alloc_gm(vgpu, true);
+ if (ret)
+ goto out_free_aperture;
+
+ gvt_dbg_core("vgpu%d: alloc low GM start %llx size %llx\n", vgpu->id,
+ vgpu_aperture_offset(vgpu), vgpu_aperture_sz(vgpu));
+
+ gvt_dbg_core("vgpu%d: alloc high GM start %llx size %llx\n", vgpu->id,
+ vgpu_hidden_offset(vgpu), vgpu_hidden_sz(vgpu));
+
+ return 0;
+out_free_aperture:
+ mutex_lock(&dev_priv->drm.struct_mutex);
+ drm_mm_remove_node(&vgpu->gm.low_gm_node);
+ mutex_unlock(&dev_priv->drm.struct_mutex);
+ return ret;
+}
+
+static void free_vgpu_gm(struct intel_vgpu *vgpu)
+{
+ struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
+
+ mutex_lock(&dev_priv->drm.struct_mutex);
+ drm_mm_remove_node(&vgpu->gm.low_gm_node);
+ drm_mm_remove_node(&vgpu->gm.high_gm_node);
+ mutex_unlock(&dev_priv->drm.struct_mutex);
+}
+
+/**
+ * intel_vgpu_write_fence - write fence registers owned by a vGPU
+ * @vgpu: vGPU instance
+ * @fence: vGPU fence register number
+ * @value: Fence register value to be written
+ *
+ * This function is used to write fence registers owned by a vGPU. The vGPU
+ * fence register number will be translated into HW fence register number.
+ *
+ */
+void intel_vgpu_write_fence(struct intel_vgpu *vgpu,
+ u32 fence, u64 value)
+{
+ struct intel_gvt *gvt = vgpu->gvt;
+ struct drm_i915_private *dev_priv = gvt->dev_priv;
+ struct drm_i915_fence_reg *reg;
+ i915_reg_t fence_reg_lo, fence_reg_hi;
+
+ assert_rpm_wakelock_held(dev_priv);
+
+ if (WARN_ON(fence > vgpu_fence_sz(vgpu)))
+ return;
+
+ reg = vgpu->fence.regs[fence];
+ if (WARN_ON(!reg))
+ return;
+
+ fence_reg_lo = FENCE_REG_GEN6_LO(reg->id);
+ fence_reg_hi = FENCE_REG_GEN6_HI(reg->id);
+
+ I915_WRITE(fence_reg_lo, 0);
+ POSTING_READ(fence_reg_lo);
+
+ I915_WRITE(fence_reg_hi, upper_32_bits(value));
+ I915_WRITE(fence_reg_lo, lower_32_bits(value));
+ POSTING_READ(fence_reg_lo);
+}
+
+static void free_vgpu_fence(struct intel_vgpu *vgpu)
+{
+ struct intel_gvt *gvt = vgpu->gvt;
+ struct drm_i915_private *dev_priv = gvt->dev_priv;
+ struct drm_i915_fence_reg *reg;
+ u32 i;
+
+ if (WARN_ON(!vgpu_fence_sz(vgpu)))
+ return;
+
+ intel_runtime_pm_get(dev_priv);
+
+ mutex_lock(&dev_priv->drm.struct_mutex);
+ for (i = 0; i < vgpu_fence_sz(vgpu); i++) {
+ reg = vgpu->fence.regs[i];
+ intel_vgpu_write_fence(vgpu, i, 0);
+ list_add_tail(&reg->link,
+ &dev_priv->mm.fence_list);
+ }
+ mutex_unlock(&dev_priv->drm.struct_mutex);
+
+ intel_runtime_pm_put(dev_priv);
+}
+
+static int alloc_vgpu_fence(struct intel_vgpu *vgpu)
+{
+ struct intel_gvt *gvt = vgpu->gvt;
+ struct drm_i915_private *dev_priv = gvt->dev_priv;
+ struct drm_i915_fence_reg *reg;
+ int i;
+ struct list_head *pos, *q;
+
+ intel_runtime_pm_get(dev_priv);
+
+ /* Request fences from host */
+ mutex_lock(&dev_priv->drm.struct_mutex);
+ i = 0;
+ list_for_each_safe(pos, q, &dev_priv->mm.fence_list) {
+ reg = list_entry(pos, struct drm_i915_fence_reg, link);
+ if (reg->pin_count || reg->vma)
+ continue;
+ list_del(pos);
+ vgpu->fence.regs[i] = reg;
+ intel_vgpu_write_fence(vgpu, i, 0);
+ if (++i == vgpu_fence_sz(vgpu))
+ break;
+ }
+ if (i != vgpu_fence_sz(vgpu))
+ goto out_free_fence;
+
+ mutex_unlock(&dev_priv->drm.struct_mutex);
+ intel_runtime_pm_put(dev_priv);
+ return 0;
+out_free_fence:
+ /* Return fences to host, if fail */
+ for (i = 0; i < vgpu_fence_sz(vgpu); i++) {
+ reg = vgpu->fence.regs[i];
+ if (!reg)
+ continue;
+ list_add_tail(&reg->link,
+ &dev_priv->mm.fence_list);
+ }
+ mutex_unlock(&dev_priv->drm.struct_mutex);
+ intel_runtime_pm_put(dev_priv);
+ return -ENOSPC;
+}
+
+static void free_resource(struct intel_vgpu *vgpu)
+{
+ struct intel_gvt *gvt = vgpu->gvt;
+
+ gvt->gm.vgpu_allocated_low_gm_size -= vgpu_aperture_sz(vgpu);
+ gvt->gm.vgpu_allocated_high_gm_size -= vgpu_hidden_sz(vgpu);
+ gvt->fence.vgpu_allocated_fence_num -= vgpu_fence_sz(vgpu);
+}
+
+static int alloc_resource(struct intel_vgpu *vgpu,
+ struct intel_vgpu_creation_params *param)
+{
+ struct intel_gvt *gvt = vgpu->gvt;
+ unsigned long request, avail, max, taken;
+ const char *item;
+
+ if (!param->low_gm_sz || !param->high_gm_sz || !param->fence_sz) {
+ gvt_err("Invalid vGPU creation params\n");
+ return -EINVAL;
+ }
+
+ item = "low GM space";
+ max = gvt_aperture_sz(gvt) - HOST_LOW_GM_SIZE;
+ taken = gvt->gm.vgpu_allocated_low_gm_size;
+ avail = max - taken;
+ request = MB_TO_BYTES(param->low_gm_sz);
+
+ if (request > avail)
+ goto no_enough_resource;
+
+ vgpu_aperture_sz(vgpu) = request;
+
+ item = "high GM space";
+ max = gvt_hidden_sz(gvt) - HOST_HIGH_GM_SIZE;
+ taken = gvt->gm.vgpu_allocated_high_gm_size;
+ avail = max - taken;
+ request = MB_TO_BYTES(param->high_gm_sz);
+
+ if (request > avail)
+ goto no_enough_resource;
+
+ vgpu_hidden_sz(vgpu) = request;
+
+ item = "fence";
+ max = gvt_fence_sz(gvt) - HOST_FENCE;
+ taken = gvt->fence.vgpu_allocated_fence_num;
+ avail = max - taken;
+ request = param->fence_sz;
+
+ if (request > avail)
+ goto no_enough_resource;
+
+ vgpu_fence_sz(vgpu) = request;
+
+ gvt->gm.vgpu_allocated_low_gm_size += MB_TO_BYTES(param->low_gm_sz);
+ gvt->gm.vgpu_allocated_high_gm_size += MB_TO_BYTES(param->high_gm_sz);
+ gvt->fence.vgpu_allocated_fence_num += param->fence_sz;
+ return 0;
+
+no_enough_resource:
+ gvt_err("vgpu%d: fail to allocate resource %s\n", vgpu->id, item);
+ gvt_err("vgpu%d: request %luMB avail %luMB max %luMB taken %luMB\n",
+ vgpu->id, BYTES_TO_MB(request), BYTES_TO_MB(avail),
+ BYTES_TO_MB(max), BYTES_TO_MB(taken));
+ return -ENOSPC;
+}
+
+/**
+ * inte_gvt_free_vgpu_resource - free HW resource owned by a vGPU
+ * @vgpu: a vGPU
+ *
+ * This function is used to free the HW resource owned by a vGPU.
+ *
+ */
+void intel_vgpu_free_resource(struct intel_vgpu *vgpu)
+{
+ free_vgpu_gm(vgpu);
+ free_vgpu_fence(vgpu);
+ free_resource(vgpu);
+}
+
+/**
+ * intel_alloc_vgpu_resource - allocate HW resource for a vGPU
+ * @vgpu: vGPU
+ * @param: vGPU creation params
+ *
+ * This function is used to allocate HW resource for a vGPU. User specifies
+ * the resource configuration through the creation params.
+ *
+ * Returns:
+ * zero on success, negative error code if failed.
+ *
+ */
+int intel_vgpu_alloc_resource(struct intel_vgpu *vgpu,
+ struct intel_vgpu_creation_params *param)
+{
+ int ret;
+
+ ret = alloc_resource(vgpu, param);
+ if (ret)
+ return ret;
+
+ ret = alloc_vgpu_gm(vgpu);
+ if (ret)
+ goto out_free_resource;
+
+ ret = alloc_vgpu_fence(vgpu);
+ if (ret)
+ goto out_free_vgpu_gm;
+
+ return 0;
+
+out_free_vgpu_gm:
+ free_vgpu_gm(vgpu);
+out_free_resource:
+ free_resource(vgpu);
+ return ret;
+}
diff --git a/drivers/gpu/drm/i915/gvt/cfg_space.c b/drivers/gpu/drm/i915/gvt/cfg_space.c
new file mode 100644
index 000000000000..db516382a4d4
--- /dev/null
+++ b/drivers/gpu/drm/i915/gvt/cfg_space.c
@@ -0,0 +1,284 @@
+/*
+ * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * Authors:
+ * Eddie Dong <eddie.dong@intel.com>
+ * Jike Song <jike.song@intel.com>
+ *
+ * Contributors:
+ * Zhi Wang <zhi.a.wang@intel.com>
+ * Min He <min.he@intel.com>
+ * Bing Niu <bing.niu@intel.com>
+ *
+ */
+
+#include "i915_drv.h"
+#include "gvt.h"
+
+enum {
+ INTEL_GVT_PCI_BAR_GTTMMIO = 0,
+ INTEL_GVT_PCI_BAR_APERTURE,
+ INTEL_GVT_PCI_BAR_PIO,
+ INTEL_GVT_PCI_BAR_MAX,
+};
+
+/**
+ * intel_vgpu_emulate_cfg_read - emulate vGPU configuration space read
+ *
+ * Returns:
+ * Zero on success, negative error code if failed.
+ */
+int intel_vgpu_emulate_cfg_read(struct intel_vgpu *vgpu, unsigned int offset,
+ void *p_data, unsigned int bytes)
+{
+ if (WARN_ON(bytes > 4))
+ return -EINVAL;
+
+ if (WARN_ON(offset + bytes > INTEL_GVT_MAX_CFG_SPACE_SZ))
+ return -EINVAL;
+
+ memcpy(p_data, vgpu_cfg_space(vgpu) + offset, bytes);
+ return 0;
+}
+
+static int map_aperture(struct intel_vgpu *vgpu, bool map)
+{
+ u64 first_gfn, first_mfn;
+ u64 val;
+ int ret;
+
+ if (map == vgpu->cfg_space.bar[INTEL_GVT_PCI_BAR_APERTURE].tracked)
+ return 0;
+
+ val = vgpu_cfg_space(vgpu)[PCI_BASE_ADDRESS_2];
+ if (val & PCI_BASE_ADDRESS_MEM_TYPE_64)
+ val = *(u64 *)(vgpu_cfg_space(vgpu) + PCI_BASE_ADDRESS_2);
+ else
+ val = *(u32 *)(vgpu_cfg_space(vgpu) + PCI_BASE_ADDRESS_2);
+
+ first_gfn = (val + vgpu_aperture_offset(vgpu)) >> PAGE_SHIFT;
+ first_mfn = vgpu_aperture_pa_base(vgpu) >> PAGE_SHIFT;
+
+ ret = intel_gvt_hypervisor_map_gfn_to_mfn(vgpu, first_gfn,
+ first_mfn,
+ vgpu_aperture_sz(vgpu) >>
+ PAGE_SHIFT, map);
+ if (ret)
+ return ret;
+
+ vgpu->cfg_space.bar[INTEL_GVT_PCI_BAR_APERTURE].tracked = map;
+ return 0;
+}
+
+static int trap_gttmmio(struct intel_vgpu *vgpu, bool trap)
+{
+ u64 start, end;
+ u64 val;
+ int ret;
+
+ if (trap == vgpu->cfg_space.bar[INTEL_GVT_PCI_BAR_GTTMMIO].tracked)
+ return 0;
+
+ val = vgpu_cfg_space(vgpu)[PCI_BASE_ADDRESS_0];
+ if (val & PCI_BASE_ADDRESS_MEM_TYPE_64)
+ start = *(u64 *)(vgpu_cfg_space(vgpu) + PCI_BASE_ADDRESS_0);
+ else
+ start = *(u32 *)(vgpu_cfg_space(vgpu) + PCI_BASE_ADDRESS_0);
+
+ start &= ~GENMASK(3, 0);
+ end = start + vgpu->cfg_space.bar[INTEL_GVT_PCI_BAR_GTTMMIO].size - 1;
+
+ ret = intel_gvt_hypervisor_set_trap_area(vgpu, start, end, trap);
+ if (ret)
+ return ret;
+
+ vgpu->cfg_space.bar[INTEL_GVT_PCI_BAR_GTTMMIO].tracked = trap;
+ return 0;
+}
+
+static int emulate_pci_command_write(struct intel_vgpu *vgpu,
+ unsigned int offset, void *p_data, unsigned int bytes)
+{
+ u8 old = vgpu_cfg_space(vgpu)[offset];
+ u8 new = *(u8 *)p_data;
+ u8 changed = old ^ new;
+ int ret;
+
+ if (!(changed & PCI_COMMAND_MEMORY))
+ return 0;
+
+ if (old & PCI_COMMAND_MEMORY) {
+ ret = trap_gttmmio(vgpu, false);
+ if (ret)
+ return ret;
+ ret = map_aperture(vgpu, false);
+ if (ret)
+ return ret;
+ } else {
+ ret = trap_gttmmio(vgpu, true);
+ if (ret)
+ return ret;
+ ret = map_aperture(vgpu, true);
+ if (ret)
+ return ret;
+ }
+
+ memcpy(vgpu_cfg_space(vgpu) + offset, p_data, bytes);
+ return 0;
+}
+
+static int emulate_pci_bar_write(struct intel_vgpu *vgpu, unsigned int offset,
+ void *p_data, unsigned int bytes)
+{
+ unsigned int bar_index =
+ (rounddown(offset, 8) % PCI_BASE_ADDRESS_0) / 8;
+ u32 new = *(u32 *)(p_data);
+ bool lo = IS_ALIGNED(offset, 8);
+ u64 size;
+ int ret = 0;
+ bool mmio_enabled =
+ vgpu_cfg_space(vgpu)[PCI_COMMAND] & PCI_COMMAND_MEMORY;
+
+ if (WARN_ON(bar_index >= INTEL_GVT_PCI_BAR_MAX))
+ return -EINVAL;
+
+ if (new == 0xffffffff) {
+ /*
+ * Power-up software can determine how much address
+ * space the device requires by writing a value of
+ * all 1's to the register and then reading the value
+ * back. The device will return 0's in all don't-care
+ * address bits.
+ */
+ size = vgpu->cfg_space.bar[bar_index].size;
+ if (lo) {
+ new = rounddown(new, size);
+ } else {
+ u32 val = vgpu_cfg_space(vgpu)[rounddown(offset, 8)];
+ /* for 32bit mode bar it returns all-0 in upper 32
+ * bit, for 64bit mode bar it will calculate the
+ * size with lower 32bit and return the corresponding
+ * value
+ */
+ if (val & PCI_BASE_ADDRESS_MEM_TYPE_64)
+ new &= (~(size-1)) >> 32;
+ else
+ new = 0;
+ }
+ /*
+ * Unmapp & untrap the BAR, since guest hasn't configured a
+ * valid GPA
+ */
+ switch (bar_index) {
+ case INTEL_GVT_PCI_BAR_GTTMMIO:
+ ret = trap_gttmmio(vgpu, false);
+ break;
+ case INTEL_GVT_PCI_BAR_APERTURE:
+ ret = map_aperture(vgpu, false);
+ break;
+ }
+ intel_vgpu_write_pci_bar(vgpu, offset, new, lo);
+ } else {
+ /*
+ * Unmapp & untrap the old BAR first, since guest has
+ * re-configured the BAR
+ */
+ switch (bar_index) {
+ case INTEL_GVT_PCI_BAR_GTTMMIO:
+ ret = trap_gttmmio(vgpu, false);
+ break;
+ case INTEL_GVT_PCI_BAR_APERTURE:
+ ret = map_aperture(vgpu, false);
+ break;
+ }
+ intel_vgpu_write_pci_bar(vgpu, offset, new, lo);
+ /* Track the new BAR */
+ if (mmio_enabled) {
+ switch (bar_index) {
+ case INTEL_GVT_PCI_BAR_GTTMMIO:
+ ret = trap_gttmmio(vgpu, true);
+ break;
+ case INTEL_GVT_PCI_BAR_APERTURE:
+ ret = map_aperture(vgpu, true);
+ break;
+ }
+ }
+ }
+ return ret;
+}
+
+/**
+ * intel_vgpu_emulate_cfg_read - emulate vGPU configuration space write
+ *
+ * Returns:
+ * Zero on success, negative error code if failed.
+ */
+int intel_vgpu_emulate_cfg_write(struct intel_vgpu *vgpu, unsigned int offset,
+ void *p_data, unsigned int bytes)
+{
+ int ret;
+
+ if (WARN_ON(bytes > 4))
+ return -EINVAL;
+
+ if (WARN_ON(offset + bytes >= INTEL_GVT_MAX_CFG_SPACE_SZ))
+ return -EINVAL;
+
+ /* First check if it's PCI_COMMAND */
+ if (IS_ALIGNED(offset, 2) && offset == PCI_COMMAND) {
+ if (WARN_ON(bytes > 2))
+ return -EINVAL;
+ return emulate_pci_command_write(vgpu, offset, p_data, bytes);
+ }
+
+ switch (rounddown(offset, 4)) {
+ case PCI_BASE_ADDRESS_0:
+ case PCI_BASE_ADDRESS_1:
+ case PCI_BASE_ADDRESS_2:
+ case PCI_BASE_ADDRESS_3:
+ if (WARN_ON(!IS_ALIGNED(offset, 4)))
+ return -EINVAL;
+ return emulate_pci_bar_write(vgpu, offset, p_data, bytes);
+
+ case INTEL_GVT_PCI_SWSCI:
+ if (WARN_ON(!IS_ALIGNED(offset, 4)))
+ return -EINVAL;
+ ret = intel_vgpu_emulate_opregion_request(vgpu, *(u32 *)p_data);
+ if (ret)
+ return ret;
+ break;
+
+ case INTEL_GVT_PCI_OPREGION:
+ if (WARN_ON(!IS_ALIGNED(offset, 4)))
+ return -EINVAL;
+ ret = intel_vgpu_init_opregion(vgpu, *(u32 *)p_data);
+ if (ret)
+ return ret;
+
+ memcpy(vgpu_cfg_space(vgpu) + offset, p_data, bytes);
+ break;
+ default:
+ memcpy(vgpu_cfg_space(vgpu) + offset, p_data, bytes);
+ break;
+ }
+ return 0;
+}
diff --git a/drivers/gpu/drm/i915/gvt/cmd_parser.c b/drivers/gpu/drm/i915/gvt/cmd_parser.c
new file mode 100644
index 000000000000..d26a092c70e8
--- /dev/null
+++ b/drivers/gpu/drm/i915/gvt/cmd_parser.c
@@ -0,0 +1,2831 @@
+/*
+ * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * Authors:
+ * Ke Yu
+ * Kevin Tian <kevin.tian@intel.com>
+ * Zhiyuan Lv <zhiyuan.lv@intel.com>
+ *
+ * Contributors:
+ * Min He <min.he@intel.com>
+ * Ping Gao <ping.a.gao@intel.com>
+ * Tina Zhang <tina.zhang@intel.com>
+ * Yulei Zhang <yulei.zhang@intel.com>
+ * Zhi Wang <zhi.a.wang@intel.com>
+ *
+ */
+
+#include <linux/slab.h>
+#include "i915_drv.h"
+#include "gvt.h"
+#include "i915_pvinfo.h"
+#include "trace.h"
+
+#define INVALID_OP (~0U)
+
+#define OP_LEN_MI 9
+#define OP_LEN_2D 10
+#define OP_LEN_3D_MEDIA 16
+#define OP_LEN_MFX_VC 16
+#define OP_LEN_VEBOX 16
+
+#define CMD_TYPE(cmd) (((cmd) >> 29) & 7)
+
+struct sub_op_bits {
+ int hi;
+ int low;
+};
+struct decode_info {
+ char *name;
+ int op_len;
+ int nr_sub_op;
+ struct sub_op_bits *sub_op;
+};
+
+#define MAX_CMD_BUDGET 0x7fffffff
+#define MI_WAIT_FOR_PLANE_C_FLIP_PENDING (1<<15)
+#define MI_WAIT_FOR_PLANE_B_FLIP_PENDING (1<<9)
+#define MI_WAIT_FOR_PLANE_A_FLIP_PENDING (1<<1)
+
+#define MI_WAIT_FOR_SPRITE_C_FLIP_PENDING (1<<20)
+#define MI_WAIT_FOR_SPRITE_B_FLIP_PENDING (1<<10)
+#define MI_WAIT_FOR_SPRITE_A_FLIP_PENDING (1<<2)
+
+/* Render Command Map */
+
+/* MI_* command Opcode (28:23) */
+#define OP_MI_NOOP 0x0
+#define OP_MI_SET_PREDICATE 0x1 /* HSW+ */
+#define OP_MI_USER_INTERRUPT 0x2
+#define OP_MI_WAIT_FOR_EVENT 0x3
+#define OP_MI_FLUSH 0x4
+#define OP_MI_ARB_CHECK 0x5
+#define OP_MI_RS_CONTROL 0x6 /* HSW+ */
+#define OP_MI_REPORT_HEAD 0x7
+#define OP_MI_ARB_ON_OFF 0x8
+#define OP_MI_URB_ATOMIC_ALLOC 0x9 /* HSW+ */
+#define OP_MI_BATCH_BUFFER_END 0xA
+#define OP_MI_SUSPEND_FLUSH 0xB
+#define OP_MI_PREDICATE 0xC /* IVB+ */
+#define OP_MI_TOPOLOGY_FILTER 0xD /* IVB+ */
+#define OP_MI_SET_APPID 0xE /* IVB+ */
+#define OP_MI_RS_CONTEXT 0xF /* HSW+ */
+#define OP_MI_LOAD_SCAN_LINES_INCL 0x12 /* HSW+ */
+#define OP_MI_DISPLAY_FLIP 0x14
+#define OP_MI_SEMAPHORE_MBOX 0x16
+#define OP_MI_SET_CONTEXT 0x18
+#define OP_MI_MATH 0x1A
+#define OP_MI_URB_CLEAR 0x19
+#define OP_MI_SEMAPHORE_SIGNAL 0x1B /* BDW+ */
+#define OP_MI_SEMAPHORE_WAIT 0x1C /* BDW+ */
+
+#define OP_MI_STORE_DATA_IMM 0x20
+#define OP_MI_STORE_DATA_INDEX 0x21
+#define OP_MI_LOAD_REGISTER_IMM 0x22
+#define OP_MI_UPDATE_GTT 0x23
+#define OP_MI_STORE_REGISTER_MEM 0x24
+#define OP_MI_FLUSH_DW 0x26
+#define OP_MI_CLFLUSH 0x27
+#define OP_MI_REPORT_PERF_COUNT 0x28
+#define OP_MI_LOAD_REGISTER_MEM 0x29 /* HSW+ */
+#define OP_MI_LOAD_REGISTER_REG 0x2A /* HSW+ */
+#define OP_MI_RS_STORE_DATA_IMM 0x2B /* HSW+ */
+#define OP_MI_LOAD_URB_MEM 0x2C /* HSW+ */
+#define OP_MI_STORE_URM_MEM 0x2D /* HSW+ */
+#define OP_MI_2E 0x2E /* BDW+ */
+#define OP_MI_2F 0x2F /* BDW+ */
+#define OP_MI_BATCH_BUFFER_START 0x31
+
+/* Bit definition for dword 0 */
+#define _CMDBIT_BB_START_IN_PPGTT (1UL << 8)
+
+#define OP_MI_CONDITIONAL_BATCH_BUFFER_END 0x36
+
+#define BATCH_BUFFER_ADDR_MASK ((1UL << 32) - (1U << 2))
+#define BATCH_BUFFER_ADDR_HIGH_MASK ((1UL << 16) - (1U))
+#define BATCH_BUFFER_ADR_SPACE_BIT(x) (((x) >> 8) & 1U)
+#define BATCH_BUFFER_2ND_LEVEL_BIT(x) ((x) >> 22 & 1U)
+
+/* 2D command: Opcode (28:22) */
+#define OP_2D(x) ((2<<7) | x)
+
+#define OP_XY_SETUP_BLT OP_2D(0x1)
+#define OP_XY_SETUP_CLIP_BLT OP_2D(0x3)
+#define OP_XY_SETUP_MONO_PATTERN_SL_BLT OP_2D(0x11)
+#define OP_XY_PIXEL_BLT OP_2D(0x24)
+#define OP_XY_SCANLINES_BLT OP_2D(0x25)
+#define OP_XY_TEXT_BLT OP_2D(0x26)
+#define OP_XY_TEXT_IMMEDIATE_BLT OP_2D(0x31)
+#define OP_XY_COLOR_BLT OP_2D(0x50)
+#define OP_XY_PAT_BLT OP_2D(0x51)
+#define OP_XY_MONO_PAT_BLT OP_2D(0x52)
+#define OP_XY_SRC_COPY_BLT OP_2D(0x53)
+#define OP_XY_MONO_SRC_COPY_BLT OP_2D(0x54)
+#define OP_XY_FULL_BLT OP_2D(0x55)
+#define OP_XY_FULL_MONO_SRC_BLT OP_2D(0x56)
+#define OP_XY_FULL_MONO_PATTERN_BLT OP_2D(0x57)
+#define OP_XY_FULL_MONO_PATTERN_MONO_SRC_BLT OP_2D(0x58)
+#define OP_XY_MONO_PAT_FIXED_BLT OP_2D(0x59)
+#define OP_XY_MONO_SRC_COPY_IMMEDIATE_BLT OP_2D(0x71)
+#define OP_XY_PAT_BLT_IMMEDIATE OP_2D(0x72)
+#define OP_XY_SRC_COPY_CHROMA_BLT OP_2D(0x73)
+#define OP_XY_FULL_IMMEDIATE_PATTERN_BLT OP_2D(0x74)
+#define OP_XY_FULL_MONO_SRC_IMMEDIATE_PATTERN_BLT OP_2D(0x75)
+#define OP_XY_PAT_CHROMA_BLT OP_2D(0x76)
+#define OP_XY_PAT_CHROMA_BLT_IMMEDIATE OP_2D(0x77)
+
+/* 3D/Media Command: Pipeline Type(28:27) Opcode(26:24) Sub Opcode(23:16) */
+#define OP_3D_MEDIA(sub_type, opcode, sub_opcode) \
+ ((3 << 13) | ((sub_type) << 11) | ((opcode) << 8) | (sub_opcode))
+
+#define OP_STATE_PREFETCH OP_3D_MEDIA(0x0, 0x0, 0x03)
+
+#define OP_STATE_BASE_ADDRESS OP_3D_MEDIA(0x0, 0x1, 0x01)
+#define OP_STATE_SIP OP_3D_MEDIA(0x0, 0x1, 0x02)
+#define OP_3D_MEDIA_0_1_4 OP_3D_MEDIA(0x0, 0x1, 0x04)
+
+#define OP_3DSTATE_VF_STATISTICS_GM45 OP_3D_MEDIA(0x1, 0x0, 0x0B)
+
+#define OP_PIPELINE_SELECT OP_3D_MEDIA(0x1, 0x1, 0x04)
+
+#define OP_MEDIA_VFE_STATE OP_3D_MEDIA(0x2, 0x0, 0x0)
+#define OP_MEDIA_CURBE_LOAD OP_3D_MEDIA(0x2, 0x0, 0x1)
+#define OP_MEDIA_INTERFACE_DESCRIPTOR_LOAD OP_3D_MEDIA(0x2, 0x0, 0x2)
+#define OP_MEDIA_GATEWAY_STATE OP_3D_MEDIA(0x2, 0x0, 0x3)
+#define OP_MEDIA_STATE_FLUSH OP_3D_MEDIA(0x2, 0x0, 0x4)
+
+#define OP_MEDIA_OBJECT OP_3D_MEDIA(0x2, 0x1, 0x0)
+#define OP_MEDIA_OBJECT_PRT OP_3D_MEDIA(0x2, 0x1, 0x2)
+#define OP_MEDIA_OBJECT_WALKER OP_3D_MEDIA(0x2, 0x1, 0x3)
+#define OP_GPGPU_WALKER OP_3D_MEDIA(0x2, 0x1, 0x5)
+
+#define OP_3DSTATE_CLEAR_PARAMS OP_3D_MEDIA(0x3, 0x0, 0x04) /* IVB+ */
+#define OP_3DSTATE_DEPTH_BUFFER OP_3D_MEDIA(0x3, 0x0, 0x05) /* IVB+ */
+#define OP_3DSTATE_STENCIL_BUFFER OP_3D_MEDIA(0x3, 0x0, 0x06) /* IVB+ */
+#define OP_3DSTATE_HIER_DEPTH_BUFFER OP_3D_MEDIA(0x3, 0x0, 0x07) /* IVB+ */
+#define OP_3DSTATE_VERTEX_BUFFERS OP_3D_MEDIA(0x3, 0x0, 0x08)
+#define OP_3DSTATE_VERTEX_ELEMENTS OP_3D_MEDIA(0x3, 0x0, 0x09)
+#define OP_3DSTATE_INDEX_BUFFER OP_3D_MEDIA(0x3, 0x0, 0x0A)
+#define OP_3DSTATE_VF_STATISTICS OP_3D_MEDIA(0x3, 0x0, 0x0B)
+#define OP_3DSTATE_VF OP_3D_MEDIA(0x3, 0x0, 0x0C) /* HSW+ */
+#define OP_3DSTATE_CC_STATE_POINTERS OP_3D_MEDIA(0x3, 0x0, 0x0E)
+#define OP_3DSTATE_SCISSOR_STATE_POINTERS OP_3D_MEDIA(0x3, 0x0, 0x0F)
+#define OP_3DSTATE_VS OP_3D_MEDIA(0x3, 0x0, 0x10)
+#define OP_3DSTATE_GS OP_3D_MEDIA(0x3, 0x0, 0x11)
+#define OP_3DSTATE_CLIP OP_3D_MEDIA(0x3, 0x0, 0x12)
+#define OP_3DSTATE_SF OP_3D_MEDIA(0x3, 0x0, 0x13)
+#define OP_3DSTATE_WM OP_3D_MEDIA(0x3, 0x0, 0x14)
+#define OP_3DSTATE_CONSTANT_VS OP_3D_MEDIA(0x3, 0x0, 0x15)
+#define OP_3DSTATE_CONSTANT_GS OP_3D_MEDIA(0x3, 0x0, 0x16)
+#define OP_3DSTATE_CONSTANT_PS OP_3D_MEDIA(0x3, 0x0, 0x17)
+#define OP_3DSTATE_SAMPLE_MASK OP_3D_MEDIA(0x3, 0x0, 0x18)
+#define OP_3DSTATE_CONSTANT_HS OP_3D_MEDIA(0x3, 0x0, 0x19) /* IVB+ */
+#define OP_3DSTATE_CONSTANT_DS OP_3D_MEDIA(0x3, 0x0, 0x1A) /* IVB+ */
+#define OP_3DSTATE_HS OP_3D_MEDIA(0x3, 0x0, 0x1B) /* IVB+ */
+#define OP_3DSTATE_TE OP_3D_MEDIA(0x3, 0x0, 0x1C) /* IVB+ */
+#define OP_3DSTATE_DS OP_3D_MEDIA(0x3, 0x0, 0x1D) /* IVB+ */
+#define OP_3DSTATE_STREAMOUT OP_3D_MEDIA(0x3, 0x0, 0x1E) /* IVB+ */
+#define OP_3DSTATE_SBE OP_3D_MEDIA(0x3, 0x0, 0x1F) /* IVB+ */
+#define OP_3DSTATE_PS OP_3D_MEDIA(0x3, 0x0, 0x20) /* IVB+ */
+#define OP_3DSTATE_VIEWPORT_STATE_POINTERS_SF_CLIP OP_3D_MEDIA(0x3, 0x0, 0x21) /* IVB+ */
+#define OP_3DSTATE_VIEWPORT_STATE_POINTERS_CC OP_3D_MEDIA(0x3, 0x0, 0x23) /* IVB+ */
+#define OP_3DSTATE_BLEND_STATE_POINTERS OP_3D_MEDIA(0x3, 0x0, 0x24) /* IVB+ */
+#define OP_3DSTATE_DEPTH_STENCIL_STATE_POINTERS OP_3D_MEDIA(0x3, 0x0, 0x25) /* IVB+ */
+#define OP_3DSTATE_BINDING_TABLE_POINTERS_VS OP_3D_MEDIA(0x3, 0x0, 0x26) /* IVB+ */
+#define OP_3DSTATE_BINDING_TABLE_POINTERS_HS OP_3D_MEDIA(0x3, 0x0, 0x27) /* IVB+ */
+#define OP_3DSTATE_BINDING_TABLE_POINTERS_DS OP_3D_MEDIA(0x3, 0x0, 0x28) /* IVB+ */
+#define OP_3DSTATE_BINDING_TABLE_POINTERS_GS OP_3D_MEDIA(0x3, 0x0, 0x29) /* IVB+ */
+#define OP_3DSTATE_BINDING_TABLE_POINTERS_PS OP_3D_MEDIA(0x3, 0x0, 0x2A) /* IVB+ */
+#define OP_3DSTATE_SAMPLER_STATE_POINTERS_VS OP_3D_MEDIA(0x3, 0x0, 0x2B) /* IVB+ */
+#define OP_3DSTATE_SAMPLER_STATE_POINTERS_HS OP_3D_MEDIA(0x3, 0x0, 0x2C) /* IVB+ */
+#define OP_3DSTATE_SAMPLER_STATE_POINTERS_DS OP_3D_MEDIA(0x3, 0x0, 0x2D) /* IVB+ */
+#define OP_3DSTATE_SAMPLER_STATE_POINTERS_GS OP_3D_MEDIA(0x3, 0x0, 0x2E) /* IVB+ */
+#define OP_3DSTATE_SAMPLER_STATE_POINTERS_PS OP_3D_MEDIA(0x3, 0x0, 0x2F) /* IVB+ */
+#define OP_3DSTATE_URB_VS OP_3D_MEDIA(0x3, 0x0, 0x30) /* IVB+ */
+#define OP_3DSTATE_URB_HS OP_3D_MEDIA(0x3, 0x0, 0x31) /* IVB+ */
+#define OP_3DSTATE_URB_DS OP_3D_MEDIA(0x3, 0x0, 0x32) /* IVB+ */
+#define OP_3DSTATE_URB_GS OP_3D_MEDIA(0x3, 0x0, 0x33) /* IVB+ */
+#define OP_3DSTATE_GATHER_CONSTANT_VS OP_3D_MEDIA(0x3, 0x0, 0x34) /* HSW+ */
+#define OP_3DSTATE_GATHER_CONSTANT_GS OP_3D_MEDIA(0x3, 0x0, 0x35) /* HSW+ */
+#define OP_3DSTATE_GATHER_CONSTANT_HS OP_3D_MEDIA(0x3, 0x0, 0x36) /* HSW+ */
+#define OP_3DSTATE_GATHER_CONSTANT_DS OP_3D_MEDIA(0x3, 0x0, 0x37) /* HSW+ */
+#define OP_3DSTATE_GATHER_CONSTANT_PS OP_3D_MEDIA(0x3, 0x0, 0x38) /* HSW+ */
+#define OP_3DSTATE_DX9_CONSTANTF_VS OP_3D_MEDIA(0x3, 0x0, 0x39) /* HSW+ */
+#define OP_3DSTATE_DX9_CONSTANTF_PS OP_3D_MEDIA(0x3, 0x0, 0x3A) /* HSW+ */
+#define OP_3DSTATE_DX9_CONSTANTI_VS OP_3D_MEDIA(0x3, 0x0, 0x3B) /* HSW+ */
+#define OP_3DSTATE_DX9_CONSTANTI_PS OP_3D_MEDIA(0x3, 0x0, 0x3C) /* HSW+ */
+#define OP_3DSTATE_DX9_CONSTANTB_VS OP_3D_MEDIA(0x3, 0x0, 0x3D) /* HSW+ */
+#define OP_3DSTATE_DX9_CONSTANTB_PS OP_3D_MEDIA(0x3, 0x0, 0x3E) /* HSW+ */
+#define OP_3DSTATE_DX9_LOCAL_VALID_VS OP_3D_MEDIA(0x3, 0x0, 0x3F) /* HSW+ */
+#define OP_3DSTATE_DX9_LOCAL_VALID_PS OP_3D_MEDIA(0x3, 0x0, 0x40) /* HSW+ */
+#define OP_3DSTATE_DX9_GENERATE_ACTIVE_VS OP_3D_MEDIA(0x3, 0x0, 0x41) /* HSW+ */
+#define OP_3DSTATE_DX9_GENERATE_ACTIVE_PS OP_3D_MEDIA(0x3, 0x0, 0x42) /* HSW+ */
+#define OP_3DSTATE_BINDING_TABLE_EDIT_VS OP_3D_MEDIA(0x3, 0x0, 0x43) /* HSW+ */
+#define OP_3DSTATE_BINDING_TABLE_EDIT_GS OP_3D_MEDIA(0x3, 0x0, 0x44) /* HSW+ */
+#define OP_3DSTATE_BINDING_TABLE_EDIT_HS OP_3D_MEDIA(0x3, 0x0, 0x45) /* HSW+ */
+#define OP_3DSTATE_BINDING_TABLE_EDIT_DS OP_3D_MEDIA(0x3, 0x0, 0x46) /* HSW+ */
+#define OP_3DSTATE_BINDING_TABLE_EDIT_PS OP_3D_MEDIA(0x3, 0x0, 0x47) /* HSW+ */
+
+#define OP_3DSTATE_VF_INSTANCING OP_3D_MEDIA(0x3, 0x0, 0x49) /* BDW+ */
+#define OP_3DSTATE_VF_SGVS OP_3D_MEDIA(0x3, 0x0, 0x4A) /* BDW+ */
+#define OP_3DSTATE_VF_TOPOLOGY OP_3D_MEDIA(0x3, 0x0, 0x4B) /* BDW+ */
+#define OP_3DSTATE_WM_CHROMAKEY OP_3D_MEDIA(0x3, 0x0, 0x4C) /* BDW+ */
+#define OP_3DSTATE_PS_BLEND OP_3D_MEDIA(0x3, 0x0, 0x4D) /* BDW+ */
+#define OP_3DSTATE_WM_DEPTH_STENCIL OP_3D_MEDIA(0x3, 0x0, 0x4E) /* BDW+ */
+#define OP_3DSTATE_PS_EXTRA OP_3D_MEDIA(0x3, 0x0, 0x4F) /* BDW+ */
+#define OP_3DSTATE_RASTER OP_3D_MEDIA(0x3, 0x0, 0x50) /* BDW+ */
+#define OP_3DSTATE_SBE_SWIZ OP_3D_MEDIA(0x3, 0x0, 0x51) /* BDW+ */
+#define OP_3DSTATE_WM_HZ_OP OP_3D_MEDIA(0x3, 0x0, 0x52) /* BDW+ */
+#define OP_3DSTATE_COMPONENT_PACKING OP_3D_MEDIA(0x3, 0x0, 0x55) /* SKL+ */
+
+#define OP_3DSTATE_DRAWING_RECTANGLE OP_3D_MEDIA(0x3, 0x1, 0x00)
+#define OP_3DSTATE_SAMPLER_PALETTE_LOAD0 OP_3D_MEDIA(0x3, 0x1, 0x02)
+#define OP_3DSTATE_CHROMA_KEY OP_3D_MEDIA(0x3, 0x1, 0x04)
+#define OP_SNB_3DSTATE_DEPTH_BUFFER OP_3D_MEDIA(0x3, 0x1, 0x05)
+#define OP_3DSTATE_POLY_STIPPLE_OFFSET OP_3D_MEDIA(0x3, 0x1, 0x06)
+#define OP_3DSTATE_POLY_STIPPLE_PATTERN OP_3D_MEDIA(0x3, 0x1, 0x07)
+#define OP_3DSTATE_LINE_STIPPLE OP_3D_MEDIA(0x3, 0x1, 0x08)
+#define OP_3DSTATE_AA_LINE_PARAMS OP_3D_MEDIA(0x3, 0x1, 0x0A)
+#define OP_3DSTATE_GS_SVB_INDEX OP_3D_MEDIA(0x3, 0x1, 0x0B)
+#define OP_3DSTATE_SAMPLER_PALETTE_LOAD1 OP_3D_MEDIA(0x3, 0x1, 0x0C)
+#define OP_3DSTATE_MULTISAMPLE_BDW OP_3D_MEDIA(0x3, 0x0, 0x0D)
+#define OP_SNB_3DSTATE_STENCIL_BUFFER OP_3D_MEDIA(0x3, 0x1, 0x0E)
+#define OP_SNB_3DSTATE_HIER_DEPTH_BUFFER OP_3D_MEDIA(0x3, 0x1, 0x0F)
+#define OP_SNB_3DSTATE_CLEAR_PARAMS OP_3D_MEDIA(0x3, 0x1, 0x10)
+#define OP_3DSTATE_MONOFILTER_SIZE OP_3D_MEDIA(0x3, 0x1, 0x11)
+#define OP_3DSTATE_PUSH_CONSTANT_ALLOC_VS OP_3D_MEDIA(0x3, 0x1, 0x12) /* IVB+ */
+#define OP_3DSTATE_PUSH_CONSTANT_ALLOC_HS OP_3D_MEDIA(0x3, 0x1, 0x13) /* IVB+ */
+#define OP_3DSTATE_PUSH_CONSTANT_ALLOC_DS OP_3D_MEDIA(0x3, 0x1, 0x14) /* IVB+ */
+#define OP_3DSTATE_PUSH_CONSTANT_ALLOC_GS OP_3D_MEDIA(0x3, 0x1, 0x15) /* IVB+ */
+#define OP_3DSTATE_PUSH_CONSTANT_ALLOC_PS OP_3D_MEDIA(0x3, 0x1, 0x16) /* IVB+ */
+#define OP_3DSTATE_SO_DECL_LIST OP_3D_MEDIA(0x3, 0x1, 0x17)
+#define OP_3DSTATE_SO_BUFFER OP_3D_MEDIA(0x3, 0x1, 0x18)
+#define OP_3DSTATE_BINDING_TABLE_POOL_ALLOC OP_3D_MEDIA(0x3, 0x1, 0x19) /* HSW+ */
+#define OP_3DSTATE_GATHER_POOL_ALLOC OP_3D_MEDIA(0x3, 0x1, 0x1A) /* HSW+ */
+#define OP_3DSTATE_DX9_CONSTANT_BUFFER_POOL_ALLOC OP_3D_MEDIA(0x3, 0x1, 0x1B) /* HSW+ */
+#define OP_3DSTATE_SAMPLE_PATTERN OP_3D_MEDIA(0x3, 0x1, 0x1C)
+#define OP_PIPE_CONTROL OP_3D_MEDIA(0x3, 0x2, 0x00)
+#define OP_3DPRIMITIVE OP_3D_MEDIA(0x3, 0x3, 0x00)
+
+/* VCCP Command Parser */
+
+/*
+ * Below MFX and VBE cmd definition is from vaapi intel driver project (BSD License)
+ * git://anongit.freedesktop.org/vaapi/intel-driver
+ * src/i965_defines.h
+ *
+ */
+
+#define OP_MFX(pipeline, op, sub_opa, sub_opb) \
+ (3 << 13 | \
+ (pipeline) << 11 | \
+ (op) << 8 | \
+ (sub_opa) << 5 | \
+ (sub_opb))
+
+#define OP_MFX_PIPE_MODE_SELECT OP_MFX(2, 0, 0, 0) /* ALL */
+#define OP_MFX_SURFACE_STATE OP_MFX(2, 0, 0, 1) /* ALL */
+#define OP_MFX_PIPE_BUF_ADDR_STATE OP_MFX(2, 0, 0, 2) /* ALL */
+#define OP_MFX_IND_OBJ_BASE_ADDR_STATE OP_MFX(2, 0, 0, 3) /* ALL */
+#define OP_MFX_BSP_BUF_BASE_ADDR_STATE OP_MFX(2, 0, 0, 4) /* ALL */
+#define OP_2_0_0_5 OP_MFX(2, 0, 0, 5) /* ALL */
+#define OP_MFX_STATE_POINTER OP_MFX(2, 0, 0, 6) /* ALL */
+#define OP_MFX_QM_STATE OP_MFX(2, 0, 0, 7) /* IVB+ */
+#define OP_MFX_FQM_STATE OP_MFX(2, 0, 0, 8) /* IVB+ */
+#define OP_MFX_PAK_INSERT_OBJECT OP_MFX(2, 0, 2, 8) /* IVB+ */
+#define OP_MFX_STITCH_OBJECT OP_MFX(2, 0, 2, 0xA) /* IVB+ */
+
+#define OP_MFD_IT_OBJECT OP_MFX(2, 0, 1, 9) /* ALL */
+
+#define OP_MFX_WAIT OP_MFX(1, 0, 0, 0) /* IVB+ */
+#define OP_MFX_AVC_IMG_STATE OP_MFX(2, 1, 0, 0) /* ALL */
+#define OP_MFX_AVC_QM_STATE OP_MFX(2, 1, 0, 1) /* ALL */
+#define OP_MFX_AVC_DIRECTMODE_STATE OP_MFX(2, 1, 0, 2) /* ALL */
+#define OP_MFX_AVC_SLICE_STATE OP_MFX(2, 1, 0, 3) /* ALL */
+#define OP_MFX_AVC_REF_IDX_STATE OP_MFX(2, 1, 0, 4) /* ALL */
+#define OP_MFX_AVC_WEIGHTOFFSET_STATE OP_MFX(2, 1, 0, 5) /* ALL */
+#define OP_MFD_AVC_PICID_STATE OP_MFX(2, 1, 1, 5) /* HSW+ */
+#define OP_MFD_AVC_DPB_STATE OP_MFX(2, 1, 1, 6) /* IVB+ */
+#define OP_MFD_AVC_SLICEADDR OP_MFX(2, 1, 1, 7) /* IVB+ */
+#define OP_MFD_AVC_BSD_OBJECT OP_MFX(2, 1, 1, 8) /* ALL */
+#define OP_MFC_AVC_PAK_OBJECT OP_MFX(2, 1, 2, 9) /* ALL */
+
+#define OP_MFX_VC1_PRED_PIPE_STATE OP_MFX(2, 2, 0, 1) /* ALL */
+#define OP_MFX_VC1_DIRECTMODE_STATE OP_MFX(2, 2, 0, 2) /* ALL */
+#define OP_MFD_VC1_SHORT_PIC_STATE OP_MFX(2, 2, 1, 0) /* IVB+ */
+#define OP_MFD_VC1_LONG_PIC_STATE OP_MFX(2, 2, 1, 1) /* IVB+ */
+#define OP_MFD_VC1_BSD_OBJECT OP_MFX(2, 2, 1, 8) /* ALL */
+
+#define OP_MFX_MPEG2_PIC_STATE OP_MFX(2, 3, 0, 0) /* ALL */
+#define OP_MFX_MPEG2_QM_STATE OP_MFX(2, 3, 0, 1) /* ALL */
+#define OP_MFD_MPEG2_BSD_OBJECT OP_MFX(2, 3, 1, 8) /* ALL */
+#define OP_MFC_MPEG2_SLICEGROUP_STATE OP_MFX(2, 3, 2, 3) /* ALL */
+#define OP_MFC_MPEG2_PAK_OBJECT OP_MFX(2, 3, 2, 9) /* ALL */
+
+#define OP_MFX_2_6_0_0 OP_MFX(2, 6, 0, 0) /* IVB+ */
+#define OP_MFX_2_6_0_8 OP_MFX(2, 6, 0, 8) /* IVB+ */
+#define OP_MFX_2_6_0_9 OP_MFX(2, 6, 0, 9) /* IVB+ */
+
+#define OP_MFX_JPEG_PIC_STATE OP_MFX(2, 7, 0, 0)
+#define OP_MFX_JPEG_HUFF_TABLE_STATE OP_MFX(2, 7, 0, 2)
+#define OP_MFD_JPEG_BSD_OBJECT OP_MFX(2, 7, 1, 8)
+
+#define OP_VEB(pipeline, op, sub_opa, sub_opb) \
+ (3 << 13 | \
+ (pipeline) << 11 | \
+ (op) << 8 | \
+ (sub_opa) << 5 | \
+ (sub_opb))
+
+#define OP_VEB_SURFACE_STATE OP_VEB(2, 4, 0, 0)
+#define OP_VEB_STATE OP_VEB(2, 4, 0, 2)
+#define OP_VEB_DNDI_IECP_STATE OP_VEB(2, 4, 0, 3)
+
+struct parser_exec_state;
+
+typedef int (*parser_cmd_handler)(struct parser_exec_state *s);
+
+#define GVT_CMD_HASH_BITS 7
+
+/* which DWords need address fix */
+#define ADDR_FIX_1(x1) (1 << (x1))
+#define ADDR_FIX_2(x1, x2) (ADDR_FIX_1(x1) | ADDR_FIX_1(x2))
+#define ADDR_FIX_3(x1, x2, x3) (ADDR_FIX_1(x1) | ADDR_FIX_2(x2, x3))
+#define ADDR_FIX_4(x1, x2, x3, x4) (ADDR_FIX_1(x1) | ADDR_FIX_3(x2, x3, x4))
+#define ADDR_FIX_5(x1, x2, x3, x4, x5) (ADDR_FIX_1(x1) | ADDR_FIX_4(x2, x3, x4, x5))
+
+struct cmd_info {
+ char *name;
+ u32 opcode;
+
+#define F_LEN_MASK (1U<<0)
+#define F_LEN_CONST 1U
+#define F_LEN_VAR 0U
+
+/*
+ * command has its own ip advance logic
+ * e.g. MI_BATCH_START, MI_BATCH_END
+ */
+#define F_IP_ADVANCE_CUSTOM (1<<1)
+
+#define F_POST_HANDLE (1<<2)
+ u32 flag;
+
+#define R_RCS (1 << RCS)
+#define R_VCS1 (1 << VCS)
+#define R_VCS2 (1 << VCS2)
+#define R_VCS (R_VCS1 | R_VCS2)
+#define R_BCS (1 << BCS)
+#define R_VECS (1 << VECS)
+#define R_ALL (R_RCS | R_VCS | R_BCS | R_VECS)
+ /* rings that support this cmd: BLT/RCS/VCS/VECS */
+ uint16_t rings;
+
+ /* devices that support this cmd: SNB/IVB/HSW/... */
+ uint16_t devices;
+
+ /* which DWords are address that need fix up.
+ * bit 0 means a 32-bit non address operand in command
+ * bit 1 means address operand, which could be 32-bit
+ * or 64-bit depending on different architectures.(
+ * defined by "gmadr_bytes_in_cmd" in intel_gvt.
+ * No matter the address length, each address only takes
+ * one bit in the bitmap.
+ */
+ uint16_t addr_bitmap;
+
+ /* flag == F_LEN_CONST : command length
+ * flag == F_LEN_VAR : length bias bits
+ * Note: length is in DWord
+ */
+ uint8_t len;
+
+ parser_cmd_handler handler;
+};
+
+struct cmd_entry {
+ struct hlist_node hlist;
+ struct cmd_info *info;
+};
+
+enum {
+ RING_BUFFER_INSTRUCTION,
+ BATCH_BUFFER_INSTRUCTION,
+ BATCH_BUFFER_2ND_LEVEL,
+};
+
+enum {
+ GTT_BUFFER,
+ PPGTT_BUFFER
+};
+
+struct parser_exec_state {
+ struct intel_vgpu *vgpu;
+ int ring_id;
+
+ int buf_type;
+
+ /* batch buffer address type */
+ int buf_addr_type;
+
+ /* graphics memory address of ring buffer start */
+ unsigned long ring_start;
+ unsigned long ring_size;
+ unsigned long ring_head;
+ unsigned long ring_tail;
+
+ /* instruction graphics memory address */
+ unsigned long ip_gma;
+
+ /* mapped va of the instr_gma */
+ void *ip_va;
+ void *rb_va;
+
+ void *ret_bb_va;
+ /* next instruction when return from batch buffer to ring buffer */
+ unsigned long ret_ip_gma_ring;
+
+ /* next instruction when return from 2nd batch buffer to batch buffer */
+ unsigned long ret_ip_gma_bb;
+
+ /* batch buffer address type (GTT or PPGTT)
+ * used when ret from 2nd level batch buffer
+ */
+ int saved_buf_addr_type;
+
+ struct cmd_info *info;
+
+ struct intel_vgpu_workload *workload;
+};
+
+#define gmadr_dw_number(s) \
+ (s->vgpu->gvt->device_info.gmadr_bytes_in_cmd >> 2)
+
+static unsigned long bypass_scan_mask = 0;
+static bool bypass_batch_buffer_scan = true;
+
+/* ring ALL, type = 0 */
+static struct sub_op_bits sub_op_mi[] = {
+ {31, 29},
+ {28, 23},
+};
+
+static struct decode_info decode_info_mi = {
+ "MI",
+ OP_LEN_MI,
+ ARRAY_SIZE(sub_op_mi),
+ sub_op_mi,
+};
+
+/* ring RCS, command type 2 */
+static struct sub_op_bits sub_op_2d[] = {
+ {31, 29},
+ {28, 22},
+};
+
+static struct decode_info decode_info_2d = {
+ "2D",
+ OP_LEN_2D,
+ ARRAY_SIZE(sub_op_2d),
+ sub_op_2d,
+};
+
+/* ring RCS, command type 3 */
+static struct sub_op_bits sub_op_3d_media[] = {
+ {31, 29},
+ {28, 27},
+ {26, 24},
+ {23, 16},
+};
+
+static struct decode_info decode_info_3d_media = {
+ "3D_Media",
+ OP_LEN_3D_MEDIA,
+ ARRAY_SIZE(sub_op_3d_media),
+ sub_op_3d_media,
+};
+
+/* ring VCS, command type 3 */
+static struct sub_op_bits sub_op_mfx_vc[] = {
+ {31, 29},
+ {28, 27},
+ {26, 24},
+ {23, 21},
+ {20, 16},
+};
+
+static struct decode_info decode_info_mfx_vc = {
+ "MFX_VC",
+ OP_LEN_MFX_VC,
+ ARRAY_SIZE(sub_op_mfx_vc),
+ sub_op_mfx_vc,
+};
+
+/* ring VECS, command type 3 */
+static struct sub_op_bits sub_op_vebox[] = {
+ {31, 29},
+ {28, 27},
+ {26, 24},
+ {23, 21},
+ {20, 16},
+};
+
+static struct decode_info decode_info_vebox = {
+ "VEBOX",
+ OP_LEN_VEBOX,
+ ARRAY_SIZE(sub_op_vebox),
+ sub_op_vebox,
+};
+
+static struct decode_info *ring_decode_info[I915_NUM_ENGINES][8] = {
+ [RCS] = {
+ &decode_info_mi,
+ NULL,
+ NULL,
+ &decode_info_3d_media,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ },
+
+ [VCS] = {
+ &decode_info_mi,
+ NULL,
+ NULL,
+ &decode_info_mfx_vc,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ },
+
+ [BCS] = {
+ &decode_info_mi,
+ NULL,
+ &decode_info_2d,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ },
+
+ [VECS] = {
+ &decode_info_mi,
+ NULL,
+ NULL,
+ &decode_info_vebox,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ },
+
+ [VCS2] = {
+ &decode_info_mi,
+ NULL,
+ NULL,
+ &decode_info_mfx_vc,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ },
+};
+
+static inline u32 get_opcode(u32 cmd, int ring_id)
+{
+ struct decode_info *d_info;
+
+ if (ring_id >= I915_NUM_ENGINES)
+ return INVALID_OP;
+
+ d_info = ring_decode_info[ring_id][CMD_TYPE(cmd)];
+ if (d_info == NULL)
+ return INVALID_OP;
+
+ return cmd >> (32 - d_info->op_len);
+}
+
+static inline struct cmd_info *find_cmd_entry(struct intel_gvt *gvt,
+ unsigned int opcode, int ring_id)
+{
+ struct cmd_entry *e;
+
+ hash_for_each_possible(gvt->cmd_table, e, hlist, opcode) {
+ if ((opcode == e->info->opcode) &&
+ (e->info->rings & (1 << ring_id)))
+ return e->info;
+ }
+ return NULL;
+}
+
+static inline struct cmd_info *get_cmd_info(struct intel_gvt *gvt,
+ u32 cmd, int ring_id)
+{
+ u32 opcode;
+
+ opcode = get_opcode(cmd, ring_id);
+ if (opcode == INVALID_OP)
+ return NULL;
+
+ return find_cmd_entry(gvt, opcode, ring_id);
+}
+
+static inline u32 sub_op_val(u32 cmd, u32 hi, u32 low)
+{
+ return (cmd >> low) & ((1U << (hi - low + 1)) - 1);
+}
+
+static inline void print_opcode(u32 cmd, int ring_id)
+{
+ struct decode_info *d_info;
+ int i;
+
+ if (ring_id >= I915_NUM_ENGINES)
+ return;
+
+ d_info = ring_decode_info[ring_id][CMD_TYPE(cmd)];
+ if (d_info == NULL)
+ return;
+
+ gvt_err("opcode=0x%x %s sub_ops:",
+ cmd >> (32 - d_info->op_len), d_info->name);
+
+ for (i = 0; i < d_info->nr_sub_op; i++)
+ pr_err("0x%x ", sub_op_val(cmd, d_info->sub_op[i].hi,
+ d_info->sub_op[i].low));
+
+ pr_err("\n");
+}
+
+static inline u32 *cmd_ptr(struct parser_exec_state *s, int index)
+{
+ return s->ip_va + (index << 2);
+}
+
+static inline u32 cmd_val(struct parser_exec_state *s, int index)
+{
+ return *cmd_ptr(s, index);
+}
+
+static void parser_exec_state_dump(struct parser_exec_state *s)
+{
+ int cnt = 0;
+ int i;
+
+ gvt_err(" vgpu%d RING%d: ring_start(%08lx) ring_end(%08lx)"
+ " ring_head(%08lx) ring_tail(%08lx)\n", s->vgpu->id,
+ s->ring_id, s->ring_start, s->ring_start + s->ring_size,
+ s->ring_head, s->ring_tail);
+
+ gvt_err(" %s %s ip_gma(%08lx) ",
+ s->buf_type == RING_BUFFER_INSTRUCTION ?
+ "RING_BUFFER" : "BATCH_BUFFER",
+ s->buf_addr_type == GTT_BUFFER ?
+ "GTT" : "PPGTT", s->ip_gma);
+
+ if (s->ip_va == NULL) {
+ gvt_err(" ip_va(NULL)");
+ return;
+ }
+
+ gvt_err(" ip_va=%p: %08x %08x %08x %08x\n",
+ s->ip_va, cmd_val(s, 0), cmd_val(s, 1),
+ cmd_val(s, 2), cmd_val(s, 3));
+
+ print_opcode(cmd_val(s, 0), s->ring_id);
+
+ /* print the whole page to trace */
+ pr_err(" ip_va=%p: %08x %08x %08x %08x\n",
+ s->ip_va, cmd_val(s, 0), cmd_val(s, 1),
+ cmd_val(s, 2), cmd_val(s, 3));
+
+ s->ip_va = (u32 *)((((u64)s->ip_va) >> 12) << 12);
+
+ while (cnt < 1024) {
+ pr_err("ip_va=%p: ", s->ip_va);
+ for (i = 0; i < 8; i++)
+ pr_err("%08x ", cmd_val(s, i));
+ pr_err("\n");
+
+ s->ip_va += 8 * sizeof(u32);
+ cnt += 8;
+ }
+}
+
+static inline void update_ip_va(struct parser_exec_state *s)
+{
+ unsigned long len = 0;
+
+ if (WARN_ON(s->ring_head == s->ring_tail))
+ return;
+
+ if (s->buf_type == RING_BUFFER_INSTRUCTION) {
+ unsigned long ring_top = s->ring_start + s->ring_size;
+
+ if (s->ring_head > s->ring_tail) {
+ if (s->ip_gma >= s->ring_head && s->ip_gma < ring_top)
+ len = (s->ip_gma - s->ring_head);
+ else if (s->ip_gma >= s->ring_start &&
+ s->ip_gma <= s->ring_tail)
+ len = (ring_top - s->ring_head) +
+ (s->ip_gma - s->ring_start);
+ } else
+ len = (s->ip_gma - s->ring_head);
+
+ s->ip_va = s->rb_va + len;
+ } else {/* shadow batch buffer */
+ s->ip_va = s->ret_bb_va;
+ }
+}
+
+static inline int ip_gma_set(struct parser_exec_state *s,
+ unsigned long ip_gma)
+{
+ WARN_ON(!IS_ALIGNED(ip_gma, 4));
+
+ s->ip_gma = ip_gma;
+ update_ip_va(s);
+ return 0;
+}
+
+static inline int ip_gma_advance(struct parser_exec_state *s,
+ unsigned int dw_len)
+{
+ s->ip_gma += (dw_len << 2);
+
+ if (s->buf_type == RING_BUFFER_INSTRUCTION) {
+ if (s->ip_gma >= s->ring_start + s->ring_size)
+ s->ip_gma -= s->ring_size;
+ update_ip_va(s);
+ } else {
+ s->ip_va += (dw_len << 2);
+ }
+
+ return 0;
+}
+
+static inline int get_cmd_length(struct cmd_info *info, u32 cmd)
+{
+ if ((info->flag & F_LEN_MASK) == F_LEN_CONST)
+ return info->len;
+ else
+ return (cmd & ((1U << info->len) - 1)) + 2;
+ return 0;
+}
+
+static inline int cmd_length(struct parser_exec_state *s)
+{
+ return get_cmd_length(s->info, cmd_val(s, 0));
+}
+
+/* do not remove this, some platform may need clflush here */
+#define patch_value(s, addr, val) do { \
+ *addr = val; \
+} while (0)
+
+static bool is_shadowed_mmio(unsigned int offset)
+{
+ bool ret = false;
+
+ if ((offset == 0x2168) || /*BB current head register UDW */
+ (offset == 0x2140) || /*BB current header register */
+ (offset == 0x211c) || /*second BB header register UDW */
+ (offset == 0x2114)) { /*second BB header register UDW */
+ ret = true;
+ }
+ return ret;
+}
+
+static int cmd_reg_handler(struct parser_exec_state *s,
+ unsigned int offset, unsigned int index, char *cmd)
+{
+ struct intel_vgpu *vgpu = s->vgpu;
+ struct intel_gvt *gvt = vgpu->gvt;
+
+ if (offset + 4 > gvt->device_info.mmio_size) {
+ gvt_err("%s access to (%x) outside of MMIO range\n",
+ cmd, offset);
+ return -EINVAL;
+ }
+
+ if (!intel_gvt_mmio_is_cmd_access(gvt, offset)) {
+ gvt_err("vgpu%d: %s access to non-render register (%x)\n",
+ s->vgpu->id, cmd, offset);
+ return 0;
+ }
+
+ if (is_shadowed_mmio(offset)) {
+ gvt_err("vgpu%d: found access of shadowed MMIO %x\n",
+ s->vgpu->id, offset);
+ return 0;
+ }
+
+ if (offset == i915_mmio_reg_offset(DERRMR) ||
+ offset == i915_mmio_reg_offset(FORCEWAKE_MT)) {
+ /* Writing to HW VGT_PVINFO_PAGE offset will be discarded */
+ patch_value(s, cmd_ptr(s, index), VGT_PVINFO_PAGE);
+ }
+
+ /* TODO: Update the global mask if this MMIO is a masked-MMIO */
+ intel_gvt_mmio_set_cmd_accessed(gvt, offset);
+ return 0;
+}
+
+#define cmd_reg(s, i) \
+ (cmd_val(s, i) & GENMASK(22, 2))
+
+#define cmd_reg_inhibit(s, i) \
+ (cmd_val(s, i) & GENMASK(22, 18))
+
+#define cmd_gma(s, i) \
+ (cmd_val(s, i) & GENMASK(31, 2))
+
+#define cmd_gma_hi(s, i) \
+ (cmd_val(s, i) & GENMASK(15, 0))
+
+static int cmd_handler_lri(struct parser_exec_state *s)
+{
+ int i, ret = 0;
+ int cmd_len = cmd_length(s);
+ struct intel_gvt *gvt = s->vgpu->gvt;
+
+ for (i = 1; i < cmd_len; i += 2) {
+ if (IS_BROADWELL(gvt->dev_priv) &&
+ (s->ring_id != RCS)) {
+ if (s->ring_id == BCS &&
+ cmd_reg(s, i) ==
+ i915_mmio_reg_offset(DERRMR))
+ ret |= 0;
+ else
+ ret |= (cmd_reg_inhibit(s, i)) ? -EINVAL : 0;
+ }
+ if (ret)
+ break;
+ ret |= cmd_reg_handler(s, cmd_reg(s, i), i, "lri");
+ }
+ return ret;
+}
+
+static int cmd_handler_lrr(struct parser_exec_state *s)
+{
+ int i, ret = 0;
+ int cmd_len = cmd_length(s);
+
+ for (i = 1; i < cmd_len; i += 2) {
+ if (IS_BROADWELL(s->vgpu->gvt->dev_priv))
+ ret |= ((cmd_reg_inhibit(s, i) ||
+ (cmd_reg_inhibit(s, i + 1)))) ?
+ -EINVAL : 0;
+ if (ret)
+ break;
+ ret |= cmd_reg_handler(s, cmd_reg(s, i), i, "lrr-src");
+ ret |= cmd_reg_handler(s, cmd_reg(s, i + 1), i, "lrr-dst");
+ }
+ return ret;
+}
+
+static inline int cmd_address_audit(struct parser_exec_state *s,
+ unsigned long guest_gma, int op_size, bool index_mode);
+
+static int cmd_handler_lrm(struct parser_exec_state *s)
+{
+ struct intel_gvt *gvt = s->vgpu->gvt;
+ int gmadr_bytes = gvt->device_info.gmadr_bytes_in_cmd;
+ unsigned long gma;
+ int i, ret = 0;
+ int cmd_len = cmd_length(s);
+
+ for (i = 1; i < cmd_len;) {
+ if (IS_BROADWELL(gvt->dev_priv))
+ ret |= (cmd_reg_inhibit(s, i)) ? -EINVAL : 0;
+ if (ret)
+ break;
+ ret |= cmd_reg_handler(s, cmd_reg(s, i), i, "lrm");
+ if (cmd_val(s, 0) & (1 << 22)) {
+ gma = cmd_gma(s, i + 1);
+ if (gmadr_bytes == 8)
+ gma |= (cmd_gma_hi(s, i + 2)) << 32;
+ ret |= cmd_address_audit(s, gma, sizeof(u32), false);
+ }
+ i += gmadr_dw_number(s) + 1;
+ }
+ return ret;
+}
+
+static int cmd_handler_srm(struct parser_exec_state *s)
+{
+ int gmadr_bytes = s->vgpu->gvt->device_info.gmadr_bytes_in_cmd;
+ unsigned long gma;
+ int i, ret = 0;
+ int cmd_len = cmd_length(s);
+
+ for (i = 1; i < cmd_len;) {
+ ret |= cmd_reg_handler(s, cmd_reg(s, i), i, "srm");
+ if (cmd_val(s, 0) & (1 << 22)) {
+ gma = cmd_gma(s, i + 1);
+ if (gmadr_bytes == 8)
+ gma |= (cmd_gma_hi(s, i + 2)) << 32;
+ ret |= cmd_address_audit(s, gma, sizeof(u32), false);
+ }
+ i += gmadr_dw_number(s) + 1;
+ }
+ return ret;
+}
+
+struct cmd_interrupt_event {
+ int pipe_control_notify;
+ int mi_flush_dw;
+ int mi_user_interrupt;
+};
+
+static struct cmd_interrupt_event cmd_interrupt_events[] = {
+ [RCS] = {
+ .pipe_control_notify = RCS_PIPE_CONTROL,
+ .mi_flush_dw = INTEL_GVT_EVENT_RESERVED,
+ .mi_user_interrupt = RCS_MI_USER_INTERRUPT,
+ },
+ [BCS] = {
+ .pipe_control_notify = INTEL_GVT_EVENT_RESERVED,
+ .mi_flush_dw = BCS_MI_FLUSH_DW,
+ .mi_user_interrupt = BCS_MI_USER_INTERRUPT,
+ },
+ [VCS] = {
+ .pipe_control_notify = INTEL_GVT_EVENT_RESERVED,
+ .mi_flush_dw = VCS_MI_FLUSH_DW,
+ .mi_user_interrupt = VCS_MI_USER_INTERRUPT,
+ },
+ [VCS2] = {
+ .pipe_control_notify = INTEL_GVT_EVENT_RESERVED,
+ .mi_flush_dw = VCS2_MI_FLUSH_DW,
+ .mi_user_interrupt = VCS2_MI_USER_INTERRUPT,
+ },
+ [VECS] = {
+ .pipe_control_notify = INTEL_GVT_EVENT_RESERVED,
+ .mi_flush_dw = VECS_MI_FLUSH_DW,
+ .mi_user_interrupt = VECS_MI_USER_INTERRUPT,
+ },
+};
+
+static int cmd_handler_pipe_control(struct parser_exec_state *s)
+{
+ int gmadr_bytes = s->vgpu->gvt->device_info.gmadr_bytes_in_cmd;
+ unsigned long gma;
+ bool index_mode = false;
+ unsigned int post_sync;
+ int ret = 0;
+
+ post_sync = (cmd_val(s, 1) & PIPE_CONTROL_POST_SYNC_OP_MASK) >> 14;
+
+ /* LRI post sync */
+ if (cmd_val(s, 1) & PIPE_CONTROL_MMIO_WRITE)
+ ret = cmd_reg_handler(s, cmd_reg(s, 2), 1, "pipe_ctrl");
+ /* post sync */
+ else if (post_sync) {
+ if (post_sync == 2)
+ ret = cmd_reg_handler(s, 0x2350, 1, "pipe_ctrl");
+ else if (post_sync == 3)
+ ret = cmd_reg_handler(s, 0x2358, 1, "pipe_ctrl");
+ else if (post_sync == 1) {
+ /* check ggtt*/
+ if ((cmd_val(s, 2) & (1 << 2))) {
+ gma = cmd_val(s, 2) & GENMASK(31, 3);
+ if (gmadr_bytes == 8)
+ gma |= (cmd_gma_hi(s, 3)) << 32;
+ /* Store Data Index */
+ if (cmd_val(s, 1) & (1 << 21))
+ index_mode = true;
+ ret |= cmd_address_audit(s, gma, sizeof(u64),
+ index_mode);
+ }
+ }
+ }
+
+ if (ret)
+ return ret;
+
+ if (cmd_val(s, 1) & PIPE_CONTROL_NOTIFY)
+ set_bit(cmd_interrupt_events[s->ring_id].pipe_control_notify,
+ s->workload->pending_events);
+ return 0;
+}
+
+static int cmd_handler_mi_user_interrupt(struct parser_exec_state *s)
+{
+ set_bit(cmd_interrupt_events[s->ring_id].mi_user_interrupt,
+ s->workload->pending_events);
+ return 0;
+}
+
+static int cmd_advance_default(struct parser_exec_state *s)
+{
+ return ip_gma_advance(s, cmd_length(s));
+}
+
+static int cmd_handler_mi_batch_buffer_end(struct parser_exec_state *s)
+{
+ int ret;
+
+ if (s->buf_type == BATCH_BUFFER_2ND_LEVEL) {
+ s->buf_type = BATCH_BUFFER_INSTRUCTION;
+ ret = ip_gma_set(s, s->ret_ip_gma_bb);
+ s->buf_addr_type = s->saved_buf_addr_type;
+ } else {
+ s->buf_type = RING_BUFFER_INSTRUCTION;
+ s->buf_addr_type = GTT_BUFFER;
+ if (s->ret_ip_gma_ring >= s->ring_start + s->ring_size)
+ s->ret_ip_gma_ring -= s->ring_size;
+ ret = ip_gma_set(s, s->ret_ip_gma_ring);
+ }
+ return ret;
+}
+
+struct mi_display_flip_command_info {
+ int pipe;
+ int plane;
+ int event;
+ i915_reg_t stride_reg;
+ i915_reg_t ctrl_reg;
+ i915_reg_t surf_reg;
+ u64 stride_val;
+ u64 tile_val;
+ u64 surf_val;
+ bool async_flip;
+};
+
+struct plane_code_mapping {
+ int pipe;
+ int plane;
+ int event;
+};
+
+static int gen8_decode_mi_display_flip(struct parser_exec_state *s,
+ struct mi_display_flip_command_info *info)
+{
+ struct drm_i915_private *dev_priv = s->vgpu->gvt->dev_priv;
+ struct plane_code_mapping gen8_plane_code[] = {
+ [0] = {PIPE_A, PLANE_A, PRIMARY_A_FLIP_DONE},
+ [1] = {PIPE_B, PLANE_A, PRIMARY_B_FLIP_DONE},
+ [2] = {PIPE_A, PLANE_B, SPRITE_A_FLIP_DONE},
+ [3] = {PIPE_B, PLANE_B, SPRITE_B_FLIP_DONE},
+ [4] = {PIPE_C, PLANE_A, PRIMARY_C_FLIP_DONE},
+ [5] = {PIPE_C, PLANE_B, SPRITE_C_FLIP_DONE},
+ };
+ u32 dword0, dword1, dword2;
+ u32 v;
+
+ dword0 = cmd_val(s, 0);
+ dword1 = cmd_val(s, 1);
+ dword2 = cmd_val(s, 2);
+
+ v = (dword0 & GENMASK(21, 19)) >> 19;
+ if (WARN_ON(v >= ARRAY_SIZE(gen8_plane_code)))
+ return -EINVAL;
+
+ info->pipe = gen8_plane_code[v].pipe;
+ info->plane = gen8_plane_code[v].plane;
+ info->event = gen8_plane_code[v].event;
+ info->stride_val = (dword1 & GENMASK(15, 6)) >> 6;
+ info->tile_val = (dword1 & 0x1);
+ info->surf_val = (dword2 & GENMASK(31, 12)) >> 12;
+ info->async_flip = ((dword2 & GENMASK(1, 0)) == 0x1);
+
+ if (info->plane == PLANE_A) {
+ info->ctrl_reg = DSPCNTR(info->pipe);
+ info->stride_reg = DSPSTRIDE(info->pipe);
+ info->surf_reg = DSPSURF(info->pipe);
+ } else if (info->plane == PLANE_B) {
+ info->ctrl_reg = SPRCTL(info->pipe);
+ info->stride_reg = SPRSTRIDE(info->pipe);
+ info->surf_reg = SPRSURF(info->pipe);
+ } else {
+ WARN_ON(1);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int skl_decode_mi_display_flip(struct parser_exec_state *s,
+ struct mi_display_flip_command_info *info)
+{
+ struct drm_i915_private *dev_priv = s->vgpu->gvt->dev_priv;
+ u32 dword0 = cmd_val(s, 0);
+ u32 dword1 = cmd_val(s, 1);
+ u32 dword2 = cmd_val(s, 2);
+ u32 plane = (dword0 & GENMASK(12, 8)) >> 8;
+
+ switch (plane) {
+ case MI_DISPLAY_FLIP_SKL_PLANE_1_A:
+ info->pipe = PIPE_A;
+ info->event = PRIMARY_A_FLIP_DONE;
+ break;
+ case MI_DISPLAY_FLIP_SKL_PLANE_1_B:
+ info->pipe = PIPE_B;
+ info->event = PRIMARY_B_FLIP_DONE;
+ break;
+ case MI_DISPLAY_FLIP_SKL_PLANE_1_C:
+ info->pipe = PIPE_C;
+ info->event = PRIMARY_C_FLIP_DONE;
+ break;
+ default:
+ gvt_err("unknown plane code %d\n", plane);
+ return -EINVAL;
+ }
+
+ info->pipe = PRIMARY_PLANE;
+ info->stride_val = (dword1 & GENMASK(15, 6)) >> 6;
+ info->tile_val = (dword1 & GENMASK(2, 0));
+ info->surf_val = (dword2 & GENMASK(31, 12)) >> 12;
+ info->async_flip = ((dword2 & GENMASK(1, 0)) == 0x1);
+
+ info->ctrl_reg = DSPCNTR(info->pipe);
+ info->stride_reg = DSPSTRIDE(info->pipe);
+ info->surf_reg = DSPSURF(info->pipe);
+
+ return 0;
+}
+
+static int gen8_check_mi_display_flip(struct parser_exec_state *s,
+ struct mi_display_flip_command_info *info)
+{
+ struct drm_i915_private *dev_priv = s->vgpu->gvt->dev_priv;
+ u32 stride, tile;
+
+ if (!info->async_flip)
+ return 0;
+
+ if (IS_SKYLAKE(dev_priv)) {
+ stride = vgpu_vreg(s->vgpu, info->stride_reg) & GENMASK(9, 0);
+ tile = (vgpu_vreg(s->vgpu, info->ctrl_reg) &
+ GENMASK(12, 10)) >> 10;
+ } else {
+ stride = (vgpu_vreg(s->vgpu, info->stride_reg) &
+ GENMASK(15, 6)) >> 6;
+ tile = (vgpu_vreg(s->vgpu, info->ctrl_reg) & (1 << 10)) >> 10;
+ }
+
+ if (stride != info->stride_val)
+ gvt_dbg_cmd("cannot change stride during async flip\n");
+
+ if (tile != info->tile_val)
+ gvt_dbg_cmd("cannot change tile during async flip\n");
+
+ return 0;
+}
+
+static int gen8_update_plane_mmio_from_mi_display_flip(
+ struct parser_exec_state *s,
+ struct mi_display_flip_command_info *info)
+{
+ struct drm_i915_private *dev_priv = s->vgpu->gvt->dev_priv;
+ struct intel_vgpu *vgpu = s->vgpu;
+
+ set_mask_bits(&vgpu_vreg(vgpu, info->surf_reg), GENMASK(31, 12),
+ info->surf_val << 12);
+ if (IS_SKYLAKE(dev_priv)) {
+ set_mask_bits(&vgpu_vreg(vgpu, info->stride_reg), GENMASK(9, 0),
+ info->stride_val);
+ set_mask_bits(&vgpu_vreg(vgpu, info->ctrl_reg), GENMASK(12, 10),
+ info->tile_val << 10);
+ } else {
+ set_mask_bits(&vgpu_vreg(vgpu, info->stride_reg), GENMASK(15, 6),
+ info->stride_val << 6);
+ set_mask_bits(&vgpu_vreg(vgpu, info->ctrl_reg), GENMASK(10, 10),
+ info->tile_val << 10);
+ }
+
+ vgpu_vreg(vgpu, PIPE_FRMCOUNT_G4X(info->pipe))++;
+ intel_vgpu_trigger_virtual_event(vgpu, info->event);
+ return 0;
+}
+
+static int decode_mi_display_flip(struct parser_exec_state *s,
+ struct mi_display_flip_command_info *info)
+{
+ struct drm_i915_private *dev_priv = s->vgpu->gvt->dev_priv;
+
+ if (IS_BROADWELL(dev_priv))
+ return gen8_decode_mi_display_flip(s, info);
+ if (IS_SKYLAKE(dev_priv))
+ return skl_decode_mi_display_flip(s, info);
+
+ return -ENODEV;
+}
+
+static int check_mi_display_flip(struct parser_exec_state *s,
+ struct mi_display_flip_command_info *info)
+{
+ struct drm_i915_private *dev_priv = s->vgpu->gvt->dev_priv;
+
+ if (IS_BROADWELL(dev_priv) || IS_SKYLAKE(dev_priv))
+ return gen8_check_mi_display_flip(s, info);
+ return -ENODEV;
+}
+
+static int update_plane_mmio_from_mi_display_flip(
+ struct parser_exec_state *s,
+ struct mi_display_flip_command_info *info)
+{
+ struct drm_i915_private *dev_priv = s->vgpu->gvt->dev_priv;
+
+ if (IS_BROADWELL(dev_priv) || IS_SKYLAKE(dev_priv))
+ return gen8_update_plane_mmio_from_mi_display_flip(s, info);
+ return -ENODEV;
+}
+
+static int cmd_handler_mi_display_flip(struct parser_exec_state *s)
+{
+ struct mi_display_flip_command_info info;
+ int ret;
+ int i;
+ int len = cmd_length(s);
+
+ ret = decode_mi_display_flip(s, &info);
+ if (ret) {
+ gvt_err("fail to decode MI display flip command\n");
+ return ret;
+ }
+
+ ret = check_mi_display_flip(s, &info);
+ if (ret) {
+ gvt_err("invalid MI display flip command\n");
+ return ret;
+ }
+
+ ret = update_plane_mmio_from_mi_display_flip(s, &info);
+ if (ret) {
+ gvt_err("fail to update plane mmio\n");
+ return ret;
+ }
+
+ for (i = 0; i < len; i++)
+ patch_value(s, cmd_ptr(s, i), MI_NOOP);
+ return 0;
+}
+
+static bool is_wait_for_flip_pending(u32 cmd)
+{
+ return cmd & (MI_WAIT_FOR_PLANE_A_FLIP_PENDING |
+ MI_WAIT_FOR_PLANE_B_FLIP_PENDING |
+ MI_WAIT_FOR_PLANE_C_FLIP_PENDING |
+ MI_WAIT_FOR_SPRITE_A_FLIP_PENDING |
+ MI_WAIT_FOR_SPRITE_B_FLIP_PENDING |
+ MI_WAIT_FOR_SPRITE_C_FLIP_PENDING);
+}
+
+static int cmd_handler_mi_wait_for_event(struct parser_exec_state *s)
+{
+ u32 cmd = cmd_val(s, 0);
+
+ if (!is_wait_for_flip_pending(cmd))
+ return 0;
+
+ patch_value(s, cmd_ptr(s, 0), MI_NOOP);
+ return 0;
+}
+
+static unsigned long get_gma_bb_from_cmd(struct parser_exec_state *s, int index)
+{
+ unsigned long addr;
+ unsigned long gma_high, gma_low;
+ int gmadr_bytes = s->vgpu->gvt->device_info.gmadr_bytes_in_cmd;
+
+ if (WARN_ON(gmadr_bytes != 4 && gmadr_bytes != 8))
+ return INTEL_GVT_INVALID_ADDR;
+
+ gma_low = cmd_val(s, index) & BATCH_BUFFER_ADDR_MASK;
+ if (gmadr_bytes == 4) {
+ addr = gma_low;
+ } else {
+ gma_high = cmd_val(s, index + 1) & BATCH_BUFFER_ADDR_HIGH_MASK;
+ addr = (((unsigned long)gma_high) << 32) | gma_low;
+ }
+ return addr;
+}
+
+static inline int cmd_address_audit(struct parser_exec_state *s,
+ unsigned long guest_gma, int op_size, bool index_mode)
+{
+ struct intel_vgpu *vgpu = s->vgpu;
+ u32 max_surface_size = vgpu->gvt->device_info.max_surface_size;
+ int i;
+ int ret;
+
+ if (op_size > max_surface_size) {
+ gvt_err("command address audit fail name %s\n", s->info->name);
+ return -EINVAL;
+ }
+
+ if (index_mode) {
+ if (guest_gma >= GTT_PAGE_SIZE / sizeof(u64)) {
+ ret = -EINVAL;
+ goto err;
+ }
+ } else if ((!vgpu_gmadr_is_valid(s->vgpu, guest_gma)) ||
+ (!vgpu_gmadr_is_valid(s->vgpu,
+ guest_gma + op_size - 1))) {
+ ret = -EINVAL;
+ goto err;
+ }
+ return 0;
+err:
+ gvt_err("cmd_parser: Malicious %s detected, addr=0x%lx, len=%d!\n",
+ s->info->name, guest_gma, op_size);
+
+ pr_err("cmd dump: ");
+ for (i = 0; i < cmd_length(s); i++) {
+ if (!(i % 4))
+ pr_err("\n%08x ", cmd_val(s, i));
+ else
+ pr_err("%08x ", cmd_val(s, i));
+ }
+ pr_err("\nvgpu%d: aperture 0x%llx - 0x%llx, hidden 0x%llx - 0x%llx\n",
+ vgpu->id,
+ vgpu_aperture_gmadr_base(vgpu),
+ vgpu_aperture_gmadr_end(vgpu),
+ vgpu_hidden_gmadr_base(vgpu),
+ vgpu_hidden_gmadr_end(vgpu));
+ return ret;
+}
+
+static int cmd_handler_mi_store_data_imm(struct parser_exec_state *s)
+{
+ int gmadr_bytes = s->vgpu->gvt->device_info.gmadr_bytes_in_cmd;
+ int op_size = (cmd_length(s) - 3) * sizeof(u32);
+ int core_id = (cmd_val(s, 2) & (1 << 0)) ? 1 : 0;
+ unsigned long gma, gma_low, gma_high;
+ int ret = 0;
+
+ /* check ppggt */
+ if (!(cmd_val(s, 0) & (1 << 22)))
+ return 0;
+
+ gma = cmd_val(s, 2) & GENMASK(31, 2);
+
+ if (gmadr_bytes == 8) {
+ gma_low = cmd_val(s, 1) & GENMASK(31, 2);
+ gma_high = cmd_val(s, 2) & GENMASK(15, 0);
+ gma = (gma_high << 32) | gma_low;
+ core_id = (cmd_val(s, 1) & (1 << 0)) ? 1 : 0;
+ }
+ ret = cmd_address_audit(s, gma + op_size * core_id, op_size, false);
+ return ret;
+}
+
+static inline int unexpected_cmd(struct parser_exec_state *s)
+{
+ gvt_err("vgpu%d: Unexpected %s in command buffer!\n",
+ s->vgpu->id, s->info->name);
+ return -EINVAL;
+}
+
+static int cmd_handler_mi_semaphore_wait(struct parser_exec_state *s)
+{
+ return unexpected_cmd(s);
+}
+
+static int cmd_handler_mi_report_perf_count(struct parser_exec_state *s)
+{
+ return unexpected_cmd(s);
+}
+
+static int cmd_handler_mi_op_2e(struct parser_exec_state *s)
+{
+ return unexpected_cmd(s);
+}
+
+static int cmd_handler_mi_op_2f(struct parser_exec_state *s)
+{
+ int gmadr_bytes = s->vgpu->gvt->device_info.gmadr_bytes_in_cmd;
+ int op_size = (1 << ((cmd_val(s, 0) & GENMASK(20, 19)) >> 19)) *
+ sizeof(u32);
+ unsigned long gma, gma_high;
+ int ret = 0;
+
+ if (!(cmd_val(s, 0) & (1 << 22)))
+ return ret;
+
+ gma = cmd_val(s, 1) & GENMASK(31, 2);
+ if (gmadr_bytes == 8) {
+ gma_high = cmd_val(s, 2) & GENMASK(15, 0);
+ gma = (gma_high << 32) | gma;
+ }
+ ret = cmd_address_audit(s, gma, op_size, false);
+ return ret;
+}
+
+static int cmd_handler_mi_store_data_index(struct parser_exec_state *s)
+{
+ return unexpected_cmd(s);
+}
+
+static int cmd_handler_mi_clflush(struct parser_exec_state *s)
+{
+ return unexpected_cmd(s);
+}
+
+static int cmd_handler_mi_conditional_batch_buffer_end(
+ struct parser_exec_state *s)
+{
+ return unexpected_cmd(s);
+}
+
+static int cmd_handler_mi_update_gtt(struct parser_exec_state *s)
+{
+ return unexpected_cmd(s);
+}
+
+static int cmd_handler_mi_flush_dw(struct parser_exec_state *s)
+{
+ int gmadr_bytes = s->vgpu->gvt->device_info.gmadr_bytes_in_cmd;
+ unsigned long gma;
+ bool index_mode = false;
+ int ret = 0;
+
+ /* Check post-sync and ppgtt bit */
+ if (((cmd_val(s, 0) >> 14) & 0x3) && (cmd_val(s, 1) & (1 << 2))) {
+ gma = cmd_val(s, 1) & GENMASK(31, 3);
+ if (gmadr_bytes == 8)
+ gma |= (cmd_val(s, 2) & GENMASK(15, 0)) << 32;
+ /* Store Data Index */
+ if (cmd_val(s, 0) & (1 << 21))
+ index_mode = true;
+ ret = cmd_address_audit(s, gma, sizeof(u64), index_mode);
+ }
+ /* Check notify bit */
+ if ((cmd_val(s, 0) & (1 << 8)))
+ set_bit(cmd_interrupt_events[s->ring_id].mi_flush_dw,
+ s->workload->pending_events);
+ return ret;
+}
+
+static void addr_type_update_snb(struct parser_exec_state *s)
+{
+ if ((s->buf_type == RING_BUFFER_INSTRUCTION) &&
+ (BATCH_BUFFER_ADR_SPACE_BIT(cmd_val(s, 0)) == 1)) {
+ s->buf_addr_type = PPGTT_BUFFER;
+ }
+}
+
+
+static int copy_gma_to_hva(struct intel_vgpu *vgpu, struct intel_vgpu_mm *mm,
+ unsigned long gma, unsigned long end_gma, void *va)
+{
+ unsigned long copy_len, offset;
+ unsigned long len = 0;
+ unsigned long gpa;
+
+ while (gma != end_gma) {
+ gpa = intel_vgpu_gma_to_gpa(mm, gma);
+ if (gpa == INTEL_GVT_INVALID_ADDR) {
+ gvt_err("invalid gma address: %lx\n", gma);
+ return -EFAULT;
+ }
+
+ offset = gma & (GTT_PAGE_SIZE - 1);
+
+ copy_len = (end_gma - gma) >= (GTT_PAGE_SIZE - offset) ?
+ GTT_PAGE_SIZE - offset : end_gma - gma;
+
+ intel_gvt_hypervisor_read_gpa(vgpu, gpa, va + len, copy_len);
+
+ len += copy_len;
+ gma += copy_len;
+ }
+ return 0;
+}
+
+
+/*
+ * Check whether a batch buffer needs to be scanned. Currently
+ * the only criteria is based on privilege.
+ */
+static int batch_buffer_needs_scan(struct parser_exec_state *s)
+{
+ struct intel_gvt *gvt = s->vgpu->gvt;
+
+ if (bypass_batch_buffer_scan)
+ return 0;
+
+ if (IS_BROADWELL(gvt->dev_priv) || IS_SKYLAKE(gvt->dev_priv)) {
+ /* BDW decides privilege based on address space */
+ if (cmd_val(s, 0) & (1 << 8))
+ return 0;
+ }
+ return 1;
+}
+
+static uint32_t find_bb_size(struct parser_exec_state *s)
+{
+ unsigned long gma = 0;
+ struct cmd_info *info;
+ uint32_t bb_size = 0;
+ uint32_t cmd_len = 0;
+ bool met_bb_end = false;
+ u32 cmd;
+
+ /* get the start gm address of the batch buffer */
+ gma = get_gma_bb_from_cmd(s, 1);
+ cmd = cmd_val(s, 0);
+
+ info = get_cmd_info(s->vgpu->gvt, cmd, s->ring_id);
+ if (info == NULL) {
+ gvt_err("unknown cmd 0x%x, opcode=0x%x\n",
+ cmd, get_opcode(cmd, s->ring_id));
+ return -EINVAL;
+ }
+ do {
+ copy_gma_to_hva(s->vgpu, s->vgpu->gtt.ggtt_mm,
+ gma, gma + 4, &cmd);
+ info = get_cmd_info(s->vgpu->gvt, cmd, s->ring_id);
+ if (info == NULL) {
+ gvt_err("unknown cmd 0x%x, opcode=0x%x\n",
+ cmd, get_opcode(cmd, s->ring_id));
+ return -EINVAL;
+ }
+
+ if (info->opcode == OP_MI_BATCH_BUFFER_END) {
+ met_bb_end = true;
+ } else if (info->opcode == OP_MI_BATCH_BUFFER_START) {
+ if (BATCH_BUFFER_2ND_LEVEL_BIT(cmd) == 0) {
+ /* chained batch buffer */
+ met_bb_end = true;
+ }
+ }
+ cmd_len = get_cmd_length(info, cmd) << 2;
+ bb_size += cmd_len;
+ gma += cmd_len;
+
+ } while (!met_bb_end);
+
+ return bb_size;
+}
+
+static int perform_bb_shadow(struct parser_exec_state *s)
+{
+ struct intel_shadow_bb_entry *entry_obj;
+ unsigned long gma = 0;
+ uint32_t bb_size;
+ void *dst = NULL;
+ int ret = 0;
+
+ /* get the start gm address of the batch buffer */
+ gma = get_gma_bb_from_cmd(s, 1);
+
+ /* get the size of the batch buffer */
+ bb_size = find_bb_size(s);
+
+ /* allocate shadow batch buffer */
+ entry_obj = kmalloc(sizeof(*entry_obj), GFP_KERNEL);
+ if (entry_obj == NULL)
+ return -ENOMEM;
+
+ entry_obj->obj =
+ i915_gem_object_create(&(s->vgpu->gvt->dev_priv->drm),
+ roundup(bb_size, PAGE_SIZE));
+ if (IS_ERR(entry_obj->obj)) {
+ ret = PTR_ERR(entry_obj->obj);
+ goto free_entry;
+ }
+ entry_obj->len = bb_size;
+ INIT_LIST_HEAD(&entry_obj->list);
+
+ dst = i915_gem_object_pin_map(entry_obj->obj, I915_MAP_WB);
+ if (IS_ERR(dst)) {
+ ret = PTR_ERR(dst);
+ goto put_obj;
+ }
+
+ ret = i915_gem_object_set_to_cpu_domain(entry_obj->obj, false);
+ if (ret) {
+ gvt_err("failed to set shadow batch to CPU\n");
+ goto unmap_src;
+ }
+
+ entry_obj->va = dst;
+ entry_obj->bb_start_cmd_va = s->ip_va;
+
+ /* copy batch buffer to shadow batch buffer*/
+ ret = copy_gma_to_hva(s->vgpu, s->vgpu->gtt.ggtt_mm,
+ gma, gma + bb_size,
+ dst);
+ if (ret) {
+ gvt_err("fail to copy guest ring buffer\n");
+ goto unmap_src;
+ }
+
+ list_add(&entry_obj->list, &s->workload->shadow_bb);
+ /*
+ * ip_va saves the virtual address of the shadow batch buffer, while
+ * ip_gma saves the graphics address of the original batch buffer.
+ * As the shadow batch buffer is just a copy from the originial one,
+ * it should be right to use shadow batch buffer'va and original batch
+ * buffer's gma in pair. After all, we don't want to pin the shadow
+ * buffer here (too early).
+ */
+ s->ip_va = dst;
+ s->ip_gma = gma;
+
+ return 0;
+
+unmap_src:
+ i915_gem_object_unpin_map(entry_obj->obj);
+put_obj:
+ i915_gem_object_put(entry_obj->obj);
+free_entry:
+ kfree(entry_obj);
+ return ret;
+}
+
+static int cmd_handler_mi_batch_buffer_start(struct parser_exec_state *s)
+{
+ bool second_level;
+ int ret = 0;
+
+ if (s->buf_type == BATCH_BUFFER_2ND_LEVEL) {
+ gvt_err("Found MI_BATCH_BUFFER_START in 2nd level BB\n");
+ return -EINVAL;
+ }
+
+ second_level = BATCH_BUFFER_2ND_LEVEL_BIT(cmd_val(s, 0)) == 1;
+ if (second_level && (s->buf_type != BATCH_BUFFER_INSTRUCTION)) {
+ gvt_err("Jumping to 2nd level BB from RB is not allowed\n");
+ return -EINVAL;
+ }
+
+ s->saved_buf_addr_type = s->buf_addr_type;
+ addr_type_update_snb(s);
+ if (s->buf_type == RING_BUFFER_INSTRUCTION) {
+ s->ret_ip_gma_ring = s->ip_gma + cmd_length(s) * sizeof(u32);
+ s->buf_type = BATCH_BUFFER_INSTRUCTION;
+ } else if (second_level) {
+ s->buf_type = BATCH_BUFFER_2ND_LEVEL;
+ s->ret_ip_gma_bb = s->ip_gma + cmd_length(s) * sizeof(u32);
+ s->ret_bb_va = s->ip_va + cmd_length(s) * sizeof(u32);
+ }
+
+ if (batch_buffer_needs_scan(s)) {
+ ret = perform_bb_shadow(s);
+ if (ret < 0)
+ gvt_err("invalid shadow batch buffer\n");
+ } else {
+ /* emulate a batch buffer end to do return right */
+ ret = cmd_handler_mi_batch_buffer_end(s);
+ if (ret < 0)
+ return ret;
+ }
+
+ return ret;
+}
+
+static struct cmd_info cmd_info[] = {
+ {"MI_NOOP", OP_MI_NOOP, F_LEN_CONST, R_ALL, D_ALL, 0, 1, NULL},
+
+ {"MI_SET_PREDICATE", OP_MI_SET_PREDICATE, F_LEN_CONST, R_ALL, D_ALL,
+ 0, 1, NULL},
+
+ {"MI_USER_INTERRUPT", OP_MI_USER_INTERRUPT, F_LEN_CONST, R_ALL, D_ALL,
+ 0, 1, cmd_handler_mi_user_interrupt},
+
+ {"MI_WAIT_FOR_EVENT", OP_MI_WAIT_FOR_EVENT, F_LEN_CONST, R_RCS | R_BCS,
+ D_ALL, 0, 1, cmd_handler_mi_wait_for_event},
+
+ {"MI_FLUSH", OP_MI_FLUSH, F_LEN_CONST, R_ALL, D_ALL, 0, 1, NULL},
+
+ {"MI_ARB_CHECK", OP_MI_ARB_CHECK, F_LEN_CONST, R_ALL, D_ALL, 0, 1,
+ NULL},
+
+ {"MI_RS_CONTROL", OP_MI_RS_CONTROL, F_LEN_CONST, R_RCS, D_ALL, 0, 1,
+ NULL},
+
+ {"MI_REPORT_HEAD", OP_MI_REPORT_HEAD, F_LEN_CONST, R_ALL, D_ALL, 0, 1,
+ NULL},
+
+ {"MI_ARB_ON_OFF", OP_MI_ARB_ON_OFF, F_LEN_CONST, R_ALL, D_ALL, 0, 1,
+ NULL},
+
+ {"MI_URB_ATOMIC_ALLOC", OP_MI_URB_ATOMIC_ALLOC, F_LEN_CONST, R_RCS,
+ D_ALL, 0, 1, NULL},
+
+ {"MI_BATCH_BUFFER_END", OP_MI_BATCH_BUFFER_END,
+ F_IP_ADVANCE_CUSTOM | F_LEN_CONST, R_ALL, D_ALL, 0, 1,
+ cmd_handler_mi_batch_buffer_end},
+
+ {"MI_SUSPEND_FLUSH", OP_MI_SUSPEND_FLUSH, F_LEN_CONST, R_ALL, D_ALL,
+ 0, 1, NULL},
+
+ {"MI_PREDICATE", OP_MI_PREDICATE, F_LEN_CONST, R_RCS, D_ALL, 0, 1,
+ NULL},
+
+ {"MI_TOPOLOGY_FILTER", OP_MI_TOPOLOGY_FILTER, F_LEN_CONST, R_ALL,
+ D_ALL, 0, 1, NULL},
+
+ {"MI_SET_APPID", OP_MI_SET_APPID, F_LEN_CONST, R_ALL, D_ALL, 0, 1,
+ NULL},
+
+ {"MI_RS_CONTEXT", OP_MI_RS_CONTEXT, F_LEN_CONST, R_RCS, D_ALL, 0, 1,
+ NULL},
+
+ {"MI_DISPLAY_FLIP", OP_MI_DISPLAY_FLIP, F_LEN_VAR | F_POST_HANDLE,
+ R_RCS | R_BCS, D_ALL, 0, 8, cmd_handler_mi_display_flip},
+
+ {"MI_SEMAPHORE_MBOX", OP_MI_SEMAPHORE_MBOX, F_LEN_VAR, R_ALL, D_ALL,
+ 0, 8, NULL},
+
+ {"MI_MATH", OP_MI_MATH, F_LEN_VAR, R_ALL, D_ALL, 0, 8, NULL},
+
+ {"MI_URB_CLEAR", OP_MI_URB_CLEAR, F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
+
+ {"ME_SEMAPHORE_SIGNAL", OP_MI_SEMAPHORE_SIGNAL, F_LEN_VAR, R_ALL,
+ D_BDW_PLUS, 0, 8, NULL},
+
+ {"ME_SEMAPHORE_WAIT", OP_MI_SEMAPHORE_WAIT, F_LEN_VAR, R_ALL, D_BDW_PLUS,
+ ADDR_FIX_1(2), 8, cmd_handler_mi_semaphore_wait},
+
+ {"MI_STORE_DATA_IMM", OP_MI_STORE_DATA_IMM, F_LEN_VAR, R_ALL, D_BDW_PLUS,
+ ADDR_FIX_1(1), 10, cmd_handler_mi_store_data_imm},
+
+ {"MI_STORE_DATA_INDEX", OP_MI_STORE_DATA_INDEX, F_LEN_VAR, R_ALL, D_ALL,
+ 0, 8, cmd_handler_mi_store_data_index},
+
+ {"MI_LOAD_REGISTER_IMM", OP_MI_LOAD_REGISTER_IMM, F_LEN_VAR, R_ALL,
+ D_ALL, 0, 8, cmd_handler_lri},
+
+ {"MI_UPDATE_GTT", OP_MI_UPDATE_GTT, F_LEN_VAR, R_ALL, D_BDW_PLUS, 0, 10,
+ cmd_handler_mi_update_gtt},
+
+ {"MI_STORE_REGISTER_MEM", OP_MI_STORE_REGISTER_MEM, F_LEN_VAR, R_ALL,
+ D_ALL, ADDR_FIX_1(2), 8, cmd_handler_srm},
+
+ {"MI_FLUSH_DW", OP_MI_FLUSH_DW, F_LEN_VAR, R_ALL, D_ALL, 0, 6,
+ cmd_handler_mi_flush_dw},
+
+ {"MI_CLFLUSH", OP_MI_CLFLUSH, F_LEN_VAR, R_ALL, D_ALL, ADDR_FIX_1(1),
+ 10, cmd_handler_mi_clflush},
+
+ {"MI_REPORT_PERF_COUNT", OP_MI_REPORT_PERF_COUNT, F_LEN_VAR, R_ALL,
+ D_ALL, ADDR_FIX_1(1), 6, cmd_handler_mi_report_perf_count},
+
+ {"MI_LOAD_REGISTER_MEM", OP_MI_LOAD_REGISTER_MEM, F_LEN_VAR, R_ALL,
+ D_ALL, ADDR_FIX_1(2), 8, cmd_handler_lrm},
+
+ {"MI_LOAD_REGISTER_REG", OP_MI_LOAD_REGISTER_REG, F_LEN_VAR, R_ALL,
+ D_ALL, 0, 8, cmd_handler_lrr},
+
+ {"MI_RS_STORE_DATA_IMM", OP_MI_RS_STORE_DATA_IMM, F_LEN_VAR, R_RCS,
+ D_ALL, 0, 8, NULL},
+
+ {"MI_LOAD_URB_MEM", OP_MI_LOAD_URB_MEM, F_LEN_VAR, R_RCS, D_ALL,
+ ADDR_FIX_1(2), 8, NULL},
+
+ {"MI_STORE_URM_MEM", OP_MI_STORE_URM_MEM, F_LEN_VAR, R_RCS, D_ALL,
+ ADDR_FIX_1(2), 8, NULL},
+
+ {"MI_OP_2E", OP_MI_2E, F_LEN_VAR, R_ALL, D_BDW_PLUS, ADDR_FIX_2(1, 2),
+ 8, cmd_handler_mi_op_2e},
+
+ {"MI_OP_2F", OP_MI_2F, F_LEN_VAR, R_ALL, D_BDW_PLUS, ADDR_FIX_1(1),
+ 8, cmd_handler_mi_op_2f},
+
+ {"MI_BATCH_BUFFER_START", OP_MI_BATCH_BUFFER_START,
+ F_IP_ADVANCE_CUSTOM, R_ALL, D_ALL, 0, 8,
+ cmd_handler_mi_batch_buffer_start},
+
+ {"MI_CONDITIONAL_BATCH_BUFFER_END", OP_MI_CONDITIONAL_BATCH_BUFFER_END,
+ F_LEN_VAR, R_ALL, D_ALL, ADDR_FIX_1(2), 8,
+ cmd_handler_mi_conditional_batch_buffer_end},
+
+ {"MI_LOAD_SCAN_LINES_INCL", OP_MI_LOAD_SCAN_LINES_INCL, F_LEN_CONST,
+ R_RCS | R_BCS, D_ALL, 0, 2, NULL},
+
+ {"XY_SETUP_BLT", OP_XY_SETUP_BLT, F_LEN_VAR, R_BCS, D_ALL,
+ ADDR_FIX_2(4, 7), 8, NULL},
+
+ {"XY_SETUP_CLIP_BLT", OP_XY_SETUP_CLIP_BLT, F_LEN_VAR, R_BCS, D_ALL,
+ 0, 8, NULL},
+
+ {"XY_SETUP_MONO_PATTERN_SL_BLT", OP_XY_SETUP_MONO_PATTERN_SL_BLT,
+ F_LEN_VAR, R_BCS, D_ALL, ADDR_FIX_1(4), 8, NULL},
+
+ {"XY_PIXEL_BLT", OP_XY_PIXEL_BLT, F_LEN_VAR, R_BCS, D_ALL, 0, 8, NULL},
+
+ {"XY_SCANLINES_BLT", OP_XY_SCANLINES_BLT, F_LEN_VAR, R_BCS, D_ALL,
+ 0, 8, NULL},
+
+ {"XY_TEXT_BLT", OP_XY_TEXT_BLT, F_LEN_VAR, R_BCS, D_ALL,
+ ADDR_FIX_1(3), 8, NULL},
+
+ {"XY_TEXT_IMMEDIATE_BLT", OP_XY_TEXT_IMMEDIATE_BLT, F_LEN_VAR, R_BCS,
+ D_ALL, 0, 8, NULL},
+
+ {"XY_COLOR_BLT", OP_XY_COLOR_BLT, F_LEN_VAR, R_BCS, D_ALL,
+ ADDR_FIX_1(4), 8, NULL},
+
+ {"XY_PAT_BLT", OP_XY_PAT_BLT, F_LEN_VAR, R_BCS, D_ALL,
+ ADDR_FIX_2(4, 5), 8, NULL},
+
+ {"XY_MONO_PAT_BLT", OP_XY_MONO_PAT_BLT, F_LEN_VAR, R_BCS, D_ALL,
+ ADDR_FIX_1(4), 8, NULL},
+
+ {"XY_SRC_COPY_BLT", OP_XY_SRC_COPY_BLT, F_LEN_VAR, R_BCS, D_ALL,
+ ADDR_FIX_2(4, 7), 8, NULL},
+
+ {"XY_MONO_SRC_COPY_BLT", OP_XY_MONO_SRC_COPY_BLT, F_LEN_VAR, R_BCS,
+ D_ALL, ADDR_FIX_2(4, 5), 8, NULL},
+
+ {"XY_FULL_BLT", OP_XY_FULL_BLT, F_LEN_VAR, R_BCS, D_ALL, 0, 8, NULL},
+
+ {"XY_FULL_MONO_SRC_BLT", OP_XY_FULL_MONO_SRC_BLT, F_LEN_VAR, R_BCS,
+ D_ALL, ADDR_FIX_3(4, 5, 8), 8, NULL},
+
+ {"XY_FULL_MONO_PATTERN_BLT", OP_XY_FULL_MONO_PATTERN_BLT, F_LEN_VAR,
+ R_BCS, D_ALL, ADDR_FIX_2(4, 7), 8, NULL},
+
+ {"XY_FULL_MONO_PATTERN_MONO_SRC_BLT",
+ OP_XY_FULL_MONO_PATTERN_MONO_SRC_BLT,
+ F_LEN_VAR, R_BCS, D_ALL, ADDR_FIX_2(4, 5), 8, NULL},
+
+ {"XY_MONO_PAT_FIXED_BLT", OP_XY_MONO_PAT_FIXED_BLT, F_LEN_VAR, R_BCS,
+ D_ALL, ADDR_FIX_1(4), 8, NULL},
+
+ {"XY_MONO_SRC_COPY_IMMEDIATE_BLT", OP_XY_MONO_SRC_COPY_IMMEDIATE_BLT,
+ F_LEN_VAR, R_BCS, D_ALL, ADDR_FIX_1(4), 8, NULL},
+
+ {"XY_PAT_BLT_IMMEDIATE", OP_XY_PAT_BLT_IMMEDIATE, F_LEN_VAR, R_BCS,
+ D_ALL, ADDR_FIX_1(4), 8, NULL},
+
+ {"XY_SRC_COPY_CHROMA_BLT", OP_XY_SRC_COPY_CHROMA_BLT, F_LEN_VAR, R_BCS,
+ D_ALL, ADDR_FIX_2(4, 7), 8, NULL},
+
+ {"XY_FULL_IMMEDIATE_PATTERN_BLT", OP_XY_FULL_IMMEDIATE_PATTERN_BLT,
+ F_LEN_VAR, R_BCS, D_ALL, ADDR_FIX_2(4, 7), 8, NULL},
+
+ {"XY_FULL_MONO_SRC_IMMEDIATE_PATTERN_BLT",
+ OP_XY_FULL_MONO_SRC_IMMEDIATE_PATTERN_BLT,
+ F_LEN_VAR, R_BCS, D_ALL, ADDR_FIX_2(4, 5), 8, NULL},
+
+ {"XY_PAT_CHROMA_BLT", OP_XY_PAT_CHROMA_BLT, F_LEN_VAR, R_BCS, D_ALL,
+ ADDR_FIX_2(4, 5), 8, NULL},
+
+ {"XY_PAT_CHROMA_BLT_IMMEDIATE", OP_XY_PAT_CHROMA_BLT_IMMEDIATE,
+ F_LEN_VAR, R_BCS, D_ALL, ADDR_FIX_1(4), 8, NULL},
+
+ {"3DSTATE_VIEWPORT_STATE_POINTERS_SF_CLIP",
+ OP_3DSTATE_VIEWPORT_STATE_POINTERS_SF_CLIP,
+ F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
+
+ {"3DSTATE_VIEWPORT_STATE_POINTERS_CC",
+ OP_3DSTATE_VIEWPORT_STATE_POINTERS_CC,
+ F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
+
+ {"3DSTATE_BLEND_STATE_POINTERS",
+ OP_3DSTATE_BLEND_STATE_POINTERS,
+ F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
+
+ {"3DSTATE_DEPTH_STENCIL_STATE_POINTERS",
+ OP_3DSTATE_DEPTH_STENCIL_STATE_POINTERS,
+ F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
+
+ {"3DSTATE_BINDING_TABLE_POINTERS_VS",
+ OP_3DSTATE_BINDING_TABLE_POINTERS_VS,
+ F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
+
+ {"3DSTATE_BINDING_TABLE_POINTERS_HS",
+ OP_3DSTATE_BINDING_TABLE_POINTERS_HS,
+ F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
+
+ {"3DSTATE_BINDING_TABLE_POINTERS_DS",
+ OP_3DSTATE_BINDING_TABLE_POINTERS_DS,
+ F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
+
+ {"3DSTATE_BINDING_TABLE_POINTERS_GS",
+ OP_3DSTATE_BINDING_TABLE_POINTERS_GS,
+ F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
+
+ {"3DSTATE_BINDING_TABLE_POINTERS_PS",
+ OP_3DSTATE_BINDING_TABLE_POINTERS_PS,
+ F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
+
+ {"3DSTATE_SAMPLER_STATE_POINTERS_VS",
+ OP_3DSTATE_SAMPLER_STATE_POINTERS_VS,
+ F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
+
+ {"3DSTATE_SAMPLER_STATE_POINTERS_HS",
+ OP_3DSTATE_SAMPLER_STATE_POINTERS_HS,
+ F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
+
+ {"3DSTATE_SAMPLER_STATE_POINTERS_DS",
+ OP_3DSTATE_SAMPLER_STATE_POINTERS_DS,
+ F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
+
+ {"3DSTATE_SAMPLER_STATE_POINTERS_GS",
+ OP_3DSTATE_SAMPLER_STATE_POINTERS_GS,
+ F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
+
+ {"3DSTATE_SAMPLER_STATE_POINTERS_PS",
+ OP_3DSTATE_SAMPLER_STATE_POINTERS_PS,
+ F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
+
+ {"3DSTATE_URB_VS", OP_3DSTATE_URB_VS, F_LEN_VAR, R_RCS, D_ALL,
+ 0, 8, NULL},
+
+ {"3DSTATE_URB_HS", OP_3DSTATE_URB_HS, F_LEN_VAR, R_RCS, D_ALL,
+ 0, 8, NULL},
+
+ {"3DSTATE_URB_DS", OP_3DSTATE_URB_DS, F_LEN_VAR, R_RCS, D_ALL,
+ 0, 8, NULL},
+
+ {"3DSTATE_URB_GS", OP_3DSTATE_URB_GS, F_LEN_VAR, R_RCS, D_ALL,
+ 0, 8, NULL},
+
+ {"3DSTATE_GATHER_CONSTANT_VS", OP_3DSTATE_GATHER_CONSTANT_VS,
+ F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
+
+ {"3DSTATE_GATHER_CONSTANT_GS", OP_3DSTATE_GATHER_CONSTANT_GS,
+ F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
+
+ {"3DSTATE_GATHER_CONSTANT_HS", OP_3DSTATE_GATHER_CONSTANT_HS,
+ F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
+
+ {"3DSTATE_GATHER_CONSTANT_DS", OP_3DSTATE_GATHER_CONSTANT_DS,
+ F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
+
+ {"3DSTATE_GATHER_CONSTANT_PS", OP_3DSTATE_GATHER_CONSTANT_PS,
+ F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
+
+ {"3DSTATE_DX9_CONSTANTF_VS", OP_3DSTATE_DX9_CONSTANTF_VS,
+ F_LEN_VAR, R_RCS, D_ALL, 0, 11, NULL},
+
+ {"3DSTATE_DX9_CONSTANTF_PS", OP_3DSTATE_DX9_CONSTANTF_PS,
+ F_LEN_VAR, R_RCS, D_ALL, 0, 11, NULL},
+
+ {"3DSTATE_DX9_CONSTANTI_VS", OP_3DSTATE_DX9_CONSTANTI_VS,
+ F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
+
+ {"3DSTATE_DX9_CONSTANTI_PS", OP_3DSTATE_DX9_CONSTANTI_PS,
+ F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
+
+ {"3DSTATE_DX9_CONSTANTB_VS", OP_3DSTATE_DX9_CONSTANTB_VS,
+ F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
+
+ {"3DSTATE_DX9_CONSTANTB_PS", OP_3DSTATE_DX9_CONSTANTB_PS,
+ F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
+
+ {"3DSTATE_DX9_LOCAL_VALID_VS", OP_3DSTATE_DX9_LOCAL_VALID_VS,
+ F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
+
+ {"3DSTATE_DX9_LOCAL_VALID_PS", OP_3DSTATE_DX9_LOCAL_VALID_PS,
+ F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
+
+ {"3DSTATE_DX9_GENERATE_ACTIVE_VS", OP_3DSTATE_DX9_GENERATE_ACTIVE_VS,
+ F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
+
+ {"3DSTATE_DX9_GENERATE_ACTIVE_PS", OP_3DSTATE_DX9_GENERATE_ACTIVE_PS,
+ F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
+
+ {"3DSTATE_BINDING_TABLE_EDIT_VS", OP_3DSTATE_BINDING_TABLE_EDIT_VS,
+ F_LEN_VAR, R_RCS, D_ALL, 0, 9, NULL},
+
+ {"3DSTATE_BINDING_TABLE_EDIT_GS", OP_3DSTATE_BINDING_TABLE_EDIT_GS,
+ F_LEN_VAR, R_RCS, D_ALL, 0, 9, NULL},
+
+ {"3DSTATE_BINDING_TABLE_EDIT_HS", OP_3DSTATE_BINDING_TABLE_EDIT_HS,
+ F_LEN_VAR, R_RCS, D_ALL, 0, 9, NULL},
+
+ {"3DSTATE_BINDING_TABLE_EDIT_DS", OP_3DSTATE_BINDING_TABLE_EDIT_DS,
+ F_LEN_VAR, R_RCS, D_ALL, 0, 9, NULL},
+
+ {"3DSTATE_BINDING_TABLE_EDIT_PS", OP_3DSTATE_BINDING_TABLE_EDIT_PS,
+ F_LEN_VAR, R_RCS, D_ALL, 0, 9, NULL},
+
+ {"3DSTATE_VF_INSTANCING", OP_3DSTATE_VF_INSTANCING, F_LEN_VAR, R_RCS,
+ D_BDW_PLUS, 0, 8, NULL},
+
+ {"3DSTATE_VF_SGVS", OP_3DSTATE_VF_SGVS, F_LEN_VAR, R_RCS, D_BDW_PLUS, 0, 8,
+ NULL},
+
+ {"3DSTATE_VF_TOPOLOGY", OP_3DSTATE_VF_TOPOLOGY, F_LEN_VAR, R_RCS,
+ D_BDW_PLUS, 0, 8, NULL},
+
+ {"3DSTATE_WM_CHROMAKEY", OP_3DSTATE_WM_CHROMAKEY, F_LEN_VAR, R_RCS,
+ D_BDW_PLUS, 0, 8, NULL},
+
+ {"3DSTATE_PS_BLEND", OP_3DSTATE_PS_BLEND, F_LEN_VAR, R_RCS, D_BDW_PLUS, 0,
+ 8, NULL},
+
+ {"3DSTATE_WM_DEPTH_STENCIL", OP_3DSTATE_WM_DEPTH_STENCIL, F_LEN_VAR,
+ R_RCS, D_BDW_PLUS, 0, 8, NULL},
+
+ {"3DSTATE_PS_EXTRA", OP_3DSTATE_PS_EXTRA, F_LEN_VAR, R_RCS, D_BDW_PLUS, 0,
+ 8, NULL},
+
+ {"3DSTATE_RASTER", OP_3DSTATE_RASTER, F_LEN_VAR, R_RCS, D_BDW_PLUS, 0, 8,
+ NULL},
+
+ {"3DSTATE_SBE_SWIZ", OP_3DSTATE_SBE_SWIZ, F_LEN_VAR, R_RCS, D_BDW_PLUS, 0, 8,
+ NULL},
+
+ {"3DSTATE_WM_HZ_OP", OP_3DSTATE_WM_HZ_OP, F_LEN_VAR, R_RCS, D_BDW_PLUS, 0, 8,
+ NULL},
+
+ {"3DSTATE_VERTEX_BUFFERS", OP_3DSTATE_VERTEX_BUFFERS, F_LEN_VAR, R_RCS,
+ D_BDW_PLUS, 0, 8, NULL},
+
+ {"3DSTATE_VERTEX_ELEMENTS", OP_3DSTATE_VERTEX_ELEMENTS, F_LEN_VAR,
+ R_RCS, D_ALL, 0, 8, NULL},
+
+ {"3DSTATE_INDEX_BUFFER", OP_3DSTATE_INDEX_BUFFER, F_LEN_VAR, R_RCS,
+ D_BDW_PLUS, ADDR_FIX_1(2), 8, NULL},
+
+ {"3DSTATE_VF_STATISTICS", OP_3DSTATE_VF_STATISTICS, F_LEN_CONST,
+ R_RCS, D_ALL, 0, 1, NULL},
+
+ {"3DSTATE_VF", OP_3DSTATE_VF, F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
+
+ {"3DSTATE_CC_STATE_POINTERS", OP_3DSTATE_CC_STATE_POINTERS, F_LEN_VAR,
+ R_RCS, D_ALL, 0, 8, NULL},
+
+ {"3DSTATE_SCISSOR_STATE_POINTERS", OP_3DSTATE_SCISSOR_STATE_POINTERS,
+ F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
+
+ {"3DSTATE_GS", OP_3DSTATE_GS, F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
+
+ {"3DSTATE_CLIP", OP_3DSTATE_CLIP, F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
+
+ {"3DSTATE_WM", OP_3DSTATE_WM, F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
+
+ {"3DSTATE_CONSTANT_GS", OP_3DSTATE_CONSTANT_GS, F_LEN_VAR, R_RCS,
+ D_BDW_PLUS, 0, 8, NULL},
+
+ {"3DSTATE_CONSTANT_PS", OP_3DSTATE_CONSTANT_PS, F_LEN_VAR, R_RCS,
+ D_BDW_PLUS, 0, 8, NULL},
+
+ {"3DSTATE_SAMPLE_MASK", OP_3DSTATE_SAMPLE_MASK, F_LEN_VAR, R_RCS,
+ D_ALL, 0, 8, NULL},
+
+ {"3DSTATE_CONSTANT_HS", OP_3DSTATE_CONSTANT_HS, F_LEN_VAR, R_RCS,
+ D_BDW_PLUS, 0, 8, NULL},
+
+ {"3DSTATE_CONSTANT_DS", OP_3DSTATE_CONSTANT_DS, F_LEN_VAR, R_RCS,
+ D_BDW_PLUS, 0, 8, NULL},
+
+ {"3DSTATE_HS", OP_3DSTATE_HS, F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
+
+ {"3DSTATE_TE", OP_3DSTATE_TE, F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
+
+ {"3DSTATE_DS", OP_3DSTATE_DS, F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
+
+ {"3DSTATE_STREAMOUT", OP_3DSTATE_STREAMOUT, F_LEN_VAR, R_RCS,
+ D_ALL, 0, 8, NULL},
+
+ {"3DSTATE_SBE", OP_3DSTATE_SBE, F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
+
+ {"3DSTATE_PS", OP_3DSTATE_PS, F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
+
+ {"3DSTATE_DRAWING_RECTANGLE", OP_3DSTATE_DRAWING_RECTANGLE, F_LEN_VAR,
+ R_RCS, D_ALL, 0, 8, NULL},
+
+ {"3DSTATE_SAMPLER_PALETTE_LOAD0", OP_3DSTATE_SAMPLER_PALETTE_LOAD0,
+ F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
+
+ {"3DSTATE_CHROMA_KEY", OP_3DSTATE_CHROMA_KEY, F_LEN_VAR, R_RCS, D_ALL,
+ 0, 8, NULL},
+
+ {"3DSTATE_DEPTH_BUFFER", OP_3DSTATE_DEPTH_BUFFER, F_LEN_VAR, R_RCS,
+ D_ALL, ADDR_FIX_1(2), 8, NULL},
+
+ {"3DSTATE_POLY_STIPPLE_OFFSET", OP_3DSTATE_POLY_STIPPLE_OFFSET,
+ F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
+
+ {"3DSTATE_POLY_STIPPLE_PATTERN", OP_3DSTATE_POLY_STIPPLE_PATTERN,
+ F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
+
+ {"3DSTATE_LINE_STIPPLE", OP_3DSTATE_LINE_STIPPLE, F_LEN_VAR, R_RCS,
+ D_ALL, 0, 8, NULL},
+
+ {"3DSTATE_AA_LINE_PARAMS", OP_3DSTATE_AA_LINE_PARAMS, F_LEN_VAR, R_RCS,
+ D_ALL, 0, 8, NULL},
+
+ {"3DSTATE_GS_SVB_INDEX", OP_3DSTATE_GS_SVB_INDEX, F_LEN_VAR, R_RCS,
+ D_ALL, 0, 8, NULL},
+
+ {"3DSTATE_SAMPLER_PALETTE_LOAD1", OP_3DSTATE_SAMPLER_PALETTE_LOAD1,
+ F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
+
+ {"3DSTATE_MULTISAMPLE", OP_3DSTATE_MULTISAMPLE_BDW, F_LEN_VAR, R_RCS,
+ D_BDW_PLUS, 0, 8, NULL},
+
+ {"3DSTATE_STENCIL_BUFFER", OP_3DSTATE_STENCIL_BUFFER, F_LEN_VAR, R_RCS,
+ D_ALL, ADDR_FIX_1(2), 8, NULL},
+
+ {"3DSTATE_HIER_DEPTH_BUFFER", OP_3DSTATE_HIER_DEPTH_BUFFER, F_LEN_VAR,
+ R_RCS, D_ALL, ADDR_FIX_1(2), 8, NULL},
+
+ {"3DSTATE_CLEAR_PARAMS", OP_3DSTATE_CLEAR_PARAMS, F_LEN_VAR,
+ R_RCS, D_ALL, 0, 8, NULL},
+
+ {"3DSTATE_PUSH_CONSTANT_ALLOC_VS", OP_3DSTATE_PUSH_CONSTANT_ALLOC_VS,
+ F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
+
+ {"3DSTATE_PUSH_CONSTANT_ALLOC_HS", OP_3DSTATE_PUSH_CONSTANT_ALLOC_HS,
+ F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
+
+ {"3DSTATE_PUSH_CONSTANT_ALLOC_DS", OP_3DSTATE_PUSH_CONSTANT_ALLOC_DS,
+ F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
+
+ {"3DSTATE_PUSH_CONSTANT_ALLOC_GS", OP_3DSTATE_PUSH_CONSTANT_ALLOC_GS,
+ F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
+
+ {"3DSTATE_PUSH_CONSTANT_ALLOC_PS", OP_3DSTATE_PUSH_CONSTANT_ALLOC_PS,
+ F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
+
+ {"3DSTATE_MONOFILTER_SIZE", OP_3DSTATE_MONOFILTER_SIZE, F_LEN_VAR,
+ R_RCS, D_ALL, 0, 8, NULL},
+
+ {"3DSTATE_SO_DECL_LIST", OP_3DSTATE_SO_DECL_LIST, F_LEN_VAR, R_RCS,
+ D_ALL, 0, 9, NULL},
+
+ {"3DSTATE_SO_BUFFER", OP_3DSTATE_SO_BUFFER, F_LEN_VAR, R_RCS, D_BDW_PLUS,
+ ADDR_FIX_2(2, 4), 8, NULL},
+
+ {"3DSTATE_BINDING_TABLE_POOL_ALLOC",
+ OP_3DSTATE_BINDING_TABLE_POOL_ALLOC,
+ F_LEN_VAR, R_RCS, D_BDW_PLUS, ADDR_FIX_1(1), 8, NULL},
+
+ {"3DSTATE_GATHER_POOL_ALLOC", OP_3DSTATE_GATHER_POOL_ALLOC,
+ F_LEN_VAR, R_RCS, D_BDW_PLUS, ADDR_FIX_1(1), 8, NULL},
+
+ {"3DSTATE_DX9_CONSTANT_BUFFER_POOL_ALLOC",
+ OP_3DSTATE_DX9_CONSTANT_BUFFER_POOL_ALLOC,
+ F_LEN_VAR, R_RCS, D_BDW_PLUS, ADDR_FIX_1(1), 8, NULL},
+
+ {"3DSTATE_SAMPLE_PATTERN", OP_3DSTATE_SAMPLE_PATTERN, F_LEN_VAR, R_RCS,
+ D_BDW_PLUS, 0, 8, NULL},
+
+ {"PIPE_CONTROL", OP_PIPE_CONTROL, F_LEN_VAR, R_RCS, D_ALL,
+ ADDR_FIX_1(2), 8, cmd_handler_pipe_control},
+
+ {"3DPRIMITIVE", OP_3DPRIMITIVE, F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
+
+ {"PIPELINE_SELECT", OP_PIPELINE_SELECT, F_LEN_CONST, R_RCS, D_ALL, 0,
+ 1, NULL},
+
+ {"STATE_PREFETCH", OP_STATE_PREFETCH, F_LEN_VAR, R_RCS, D_ALL,
+ ADDR_FIX_1(1), 8, NULL},
+
+ {"STATE_SIP", OP_STATE_SIP, F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
+
+ {"STATE_BASE_ADDRESS", OP_STATE_BASE_ADDRESS, F_LEN_VAR, R_RCS, D_BDW_PLUS,
+ ADDR_FIX_5(1, 3, 4, 5, 6), 8, NULL},
+
+ {"OP_3D_MEDIA_0_1_4", OP_3D_MEDIA_0_1_4, F_LEN_VAR, R_RCS, D_ALL,
+ ADDR_FIX_1(1), 8, NULL},
+
+ {"3DSTATE_VS", OP_3DSTATE_VS, F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
+
+ {"3DSTATE_SF", OP_3DSTATE_SF, F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
+
+ {"3DSTATE_CONSTANT_VS", OP_3DSTATE_CONSTANT_VS, F_LEN_VAR, R_RCS, D_BDW_PLUS,
+ 0, 8, NULL},
+
+ {"3DSTATE_COMPONENT_PACKING", OP_3DSTATE_COMPONENT_PACKING, F_LEN_VAR, R_RCS,
+ D_SKL_PLUS, 0, 8, NULL},
+
+ {"MEDIA_INTERFACE_DESCRIPTOR_LOAD", OP_MEDIA_INTERFACE_DESCRIPTOR_LOAD,
+ F_LEN_VAR, R_RCS, D_ALL, 0, 16, NULL},
+
+ {"MEDIA_GATEWAY_STATE", OP_MEDIA_GATEWAY_STATE, F_LEN_VAR, R_RCS, D_ALL,
+ 0, 16, NULL},
+
+ {"MEDIA_STATE_FLUSH", OP_MEDIA_STATE_FLUSH, F_LEN_VAR, R_RCS, D_ALL,
+ 0, 16, NULL},
+
+ {"MEDIA_OBJECT", OP_MEDIA_OBJECT, F_LEN_VAR, R_RCS, D_ALL, 0, 16, NULL},
+
+ {"MEDIA_CURBE_LOAD", OP_MEDIA_CURBE_LOAD, F_LEN_VAR, R_RCS, D_ALL,
+ 0, 16, NULL},
+
+ {"MEDIA_OBJECT_PRT", OP_MEDIA_OBJECT_PRT, F_LEN_VAR, R_RCS, D_ALL,
+ 0, 16, NULL},
+
+ {"MEDIA_OBJECT_WALKER", OP_MEDIA_OBJECT_WALKER, F_LEN_VAR, R_RCS, D_ALL,
+ 0, 16, NULL},
+
+ {"GPGPU_WALKER", OP_GPGPU_WALKER, F_LEN_VAR, R_RCS, D_ALL,
+ 0, 8, NULL},
+
+ {"MEDIA_VFE_STATE", OP_MEDIA_VFE_STATE, F_LEN_VAR, R_RCS, D_ALL, 0, 16,
+ NULL},
+
+ {"3DSTATE_VF_STATISTICS_GM45", OP_3DSTATE_VF_STATISTICS_GM45,
+ F_LEN_CONST, R_ALL, D_ALL, 0, 1, NULL},
+
+ {"MFX_PIPE_MODE_SELECT", OP_MFX_PIPE_MODE_SELECT, F_LEN_VAR,
+ R_VCS, D_ALL, 0, 12, NULL},
+
+ {"MFX_SURFACE_STATE", OP_MFX_SURFACE_STATE, F_LEN_VAR,
+ R_VCS, D_ALL, 0, 12, NULL},
+
+ {"MFX_PIPE_BUF_ADDR_STATE", OP_MFX_PIPE_BUF_ADDR_STATE, F_LEN_VAR,
+ R_VCS, D_BDW_PLUS, 0, 12, NULL},
+
+ {"MFX_IND_OBJ_BASE_ADDR_STATE", OP_MFX_IND_OBJ_BASE_ADDR_STATE,
+ F_LEN_VAR, R_VCS, D_BDW_PLUS, 0, 12, NULL},
+
+ {"MFX_BSP_BUF_BASE_ADDR_STATE", OP_MFX_BSP_BUF_BASE_ADDR_STATE,
+ F_LEN_VAR, R_VCS, D_BDW_PLUS, ADDR_FIX_3(1, 3, 5), 12, NULL},
+
+ {"OP_2_0_0_5", OP_2_0_0_5, F_LEN_VAR, R_VCS, D_BDW_PLUS, 0, 12, NULL},
+
+ {"MFX_STATE_POINTER", OP_MFX_STATE_POINTER, F_LEN_VAR,
+ R_VCS, D_ALL, 0, 12, NULL},
+
+ {"MFX_QM_STATE", OP_MFX_QM_STATE, F_LEN_VAR,
+ R_VCS, D_ALL, 0, 12, NULL},
+
+ {"MFX_FQM_STATE", OP_MFX_FQM_STATE, F_LEN_VAR,
+ R_VCS, D_ALL, 0, 12, NULL},
+
+ {"MFX_PAK_INSERT_OBJECT", OP_MFX_PAK_INSERT_OBJECT, F_LEN_VAR,
+ R_VCS, D_ALL, 0, 12, NULL},
+
+ {"MFX_STITCH_OBJECT", OP_MFX_STITCH_OBJECT, F_LEN_VAR,
+ R_VCS, D_ALL, 0, 12, NULL},
+
+ {"MFD_IT_OBJECT", OP_MFD_IT_OBJECT, F_LEN_VAR,
+ R_VCS, D_ALL, 0, 12, NULL},
+
+ {"MFX_WAIT", OP_MFX_WAIT, F_LEN_VAR,
+ R_VCS, D_ALL, 0, 6, NULL},
+
+ {"MFX_AVC_IMG_STATE", OP_MFX_AVC_IMG_STATE, F_LEN_VAR,
+ R_VCS, D_ALL, 0, 12, NULL},
+
+ {"MFX_AVC_QM_STATE", OP_MFX_AVC_QM_STATE, F_LEN_VAR,
+ R_VCS, D_ALL, 0, 12, NULL},
+
+ {"MFX_AVC_DIRECTMODE_STATE", OP_MFX_AVC_DIRECTMODE_STATE, F_LEN_VAR,
+ R_VCS, D_ALL, 0, 12, NULL},
+
+ {"MFX_AVC_SLICE_STATE", OP_MFX_AVC_SLICE_STATE, F_LEN_VAR,
+ R_VCS, D_ALL, 0, 12, NULL},
+
+ {"MFX_AVC_REF_IDX_STATE", OP_MFX_AVC_REF_IDX_STATE, F_LEN_VAR,
+ R_VCS, D_ALL, 0, 12, NULL},
+
+ {"MFX_AVC_WEIGHTOFFSET_STATE", OP_MFX_AVC_WEIGHTOFFSET_STATE, F_LEN_VAR,
+ R_VCS, D_ALL, 0, 12, NULL},
+
+ {"MFD_AVC_PICID_STATE", OP_MFD_AVC_PICID_STATE, F_LEN_VAR,
+ R_VCS, D_ALL, 0, 12, NULL},
+ {"MFD_AVC_DPB_STATE", OP_MFD_AVC_DPB_STATE, F_LEN_VAR,
+ R_VCS, D_ALL, 0, 12, NULL},
+
+ {"MFD_AVC_BSD_OBJECT", OP_MFD_AVC_BSD_OBJECT, F_LEN_VAR,
+ R_VCS, D_ALL, 0, 12, NULL},
+
+ {"MFD_AVC_SLICEADDR", OP_MFD_AVC_SLICEADDR, F_LEN_VAR,
+ R_VCS, D_ALL, ADDR_FIX_1(2), 12, NULL},
+
+ {"MFC_AVC_PAK_OBJECT", OP_MFC_AVC_PAK_OBJECT, F_LEN_VAR,
+ R_VCS, D_ALL, 0, 12, NULL},
+
+ {"MFX_VC1_PRED_PIPE_STATE", OP_MFX_VC1_PRED_PIPE_STATE, F_LEN_VAR,
+ R_VCS, D_ALL, 0, 12, NULL},
+
+ {"MFX_VC1_DIRECTMODE_STATE", OP_MFX_VC1_DIRECTMODE_STATE, F_LEN_VAR,
+ R_VCS, D_ALL, 0, 12, NULL},
+
+ {"MFD_VC1_SHORT_PIC_STATE", OP_MFD_VC1_SHORT_PIC_STATE, F_LEN_VAR,
+ R_VCS, D_ALL, 0, 12, NULL},
+
+ {"MFD_VC1_LONG_PIC_STATE", OP_MFD_VC1_LONG_PIC_STATE, F_LEN_VAR,
+ R_VCS, D_ALL, 0, 12, NULL},
+
+ {"MFD_VC1_BSD_OBJECT", OP_MFD_VC1_BSD_OBJECT, F_LEN_VAR,
+ R_VCS, D_ALL, 0, 12, NULL},
+
+ {"MFC_MPEG2_SLICEGROUP_STATE", OP_MFC_MPEG2_SLICEGROUP_STATE, F_LEN_VAR,
+ R_VCS, D_ALL, 0, 12, NULL},
+
+ {"MFC_MPEG2_PAK_OBJECT", OP_MFC_MPEG2_PAK_OBJECT, F_LEN_VAR,
+ R_VCS, D_ALL, 0, 12, NULL},
+
+ {"MFX_MPEG2_PIC_STATE", OP_MFX_MPEG2_PIC_STATE, F_LEN_VAR,
+ R_VCS, D_ALL, 0, 12, NULL},
+
+ {"MFX_MPEG2_QM_STATE", OP_MFX_MPEG2_QM_STATE, F_LEN_VAR,
+ R_VCS, D_ALL, 0, 12, NULL},
+
+ {"MFD_MPEG2_BSD_OBJECT", OP_MFD_MPEG2_BSD_OBJECT, F_LEN_VAR,
+ R_VCS, D_ALL, 0, 12, NULL},
+
+ {"MFX_2_6_0_0", OP_MFX_2_6_0_0, F_LEN_VAR, R_VCS, D_ALL,
+ 0, 16, NULL},
+
+ {"MFX_2_6_0_9", OP_MFX_2_6_0_9, F_LEN_VAR, R_VCS, D_ALL, 0, 16, NULL},
+
+ {"MFX_2_6_0_8", OP_MFX_2_6_0_8, F_LEN_VAR, R_VCS, D_ALL, 0, 16, NULL},
+
+ {"MFX_JPEG_PIC_STATE", OP_MFX_JPEG_PIC_STATE, F_LEN_VAR,
+ R_VCS, D_ALL, 0, 12, NULL},
+
+ {"MFX_JPEG_HUFF_TABLE_STATE", OP_MFX_JPEG_HUFF_TABLE_STATE, F_LEN_VAR,
+ R_VCS, D_ALL, 0, 12, NULL},
+
+ {"MFD_JPEG_BSD_OBJECT", OP_MFD_JPEG_BSD_OBJECT, F_LEN_VAR,
+ R_VCS, D_ALL, 0, 12, NULL},
+
+ {"VEBOX_STATE", OP_VEB_STATE, F_LEN_VAR, R_VECS, D_ALL, 0, 12, NULL},
+
+ {"VEBOX_SURFACE_STATE", OP_VEB_SURFACE_STATE, F_LEN_VAR, R_VECS, D_ALL,
+ 0, 12, NULL},
+
+ {"VEB_DI_IECP", OP_VEB_DNDI_IECP_STATE, F_LEN_VAR, R_VECS, D_BDW_PLUS,
+ 0, 20, NULL},
+};
+
+static void add_cmd_entry(struct intel_gvt *gvt, struct cmd_entry *e)
+{
+ hash_add(gvt->cmd_table, &e->hlist, e->info->opcode);
+}
+
+#define GVT_MAX_CMD_LENGTH 20 /* In Dword */
+
+static void trace_cs_command(struct parser_exec_state *s,
+ cycles_t cost_pre_cmd_handler, cycles_t cost_cmd_handler)
+{
+ /* This buffer is used by ftrace to store all commands copied from
+ * guest gma space. Sometimes commands can cross pages, this should
+ * not be handled in ftrace logic. So this is just used as a
+ * 'bounce buffer'
+ */
+ u32 cmd_trace_buf[GVT_MAX_CMD_LENGTH];
+ int i;
+ u32 cmd_len = cmd_length(s);
+ /* The chosen value of GVT_MAX_CMD_LENGTH are just based on
+ * following two considerations:
+ * 1) From observation, most common ring commands is not that long.
+ * But there are execeptions. So it indeed makes sence to observe
+ * longer commands.
+ * 2) From the performance and debugging point of view, dumping all
+ * contents of very commands is not necessary.
+ * We mgith shrink GVT_MAX_CMD_LENGTH or remove this trace event in
+ * future for performance considerations.
+ */
+ if (unlikely(cmd_len > GVT_MAX_CMD_LENGTH)) {
+ gvt_dbg_cmd("cmd length exceed tracing limitation!\n");
+ cmd_len = GVT_MAX_CMD_LENGTH;
+ }
+
+ for (i = 0; i < cmd_len; i++)
+ cmd_trace_buf[i] = cmd_val(s, i);
+
+ trace_gvt_command(s->vgpu->id, s->ring_id, s->ip_gma, cmd_trace_buf,
+ cmd_len, s->buf_type == RING_BUFFER_INSTRUCTION,
+ cost_pre_cmd_handler, cost_cmd_handler);
+}
+
+/* call the cmd handler, and advance ip */
+static int cmd_parser_exec(struct parser_exec_state *s)
+{
+ struct cmd_info *info;
+ u32 cmd;
+ int ret = 0;
+ cycles_t t0, t1, t2;
+ struct parser_exec_state s_before_advance_custom;
+
+ t0 = get_cycles();
+
+ cmd = cmd_val(s, 0);
+
+ info = get_cmd_info(s->vgpu->gvt, cmd, s->ring_id);
+ if (info == NULL) {
+ gvt_err("unknown cmd 0x%x, opcode=0x%x\n",
+ cmd, get_opcode(cmd, s->ring_id));
+ return -EINVAL;
+ }
+
+ gvt_dbg_cmd("%s\n", info->name);
+
+ s->info = info;
+
+ t1 = get_cycles();
+
+ memcpy(&s_before_advance_custom, s, sizeof(struct parser_exec_state));
+
+ if (info->handler) {
+ ret = info->handler(s);
+ if (ret < 0) {
+ gvt_err("%s handler error\n", info->name);
+ return ret;
+ }
+ }
+ t2 = get_cycles();
+
+ trace_cs_command(&s_before_advance_custom, t1 - t0, t2 - t1);
+
+ if (!(info->flag & F_IP_ADVANCE_CUSTOM)) {
+ ret = cmd_advance_default(s);
+ if (ret) {
+ gvt_err("%s IP advance error\n", info->name);
+ return ret;
+ }
+ }
+ return 0;
+}
+
+static inline bool gma_out_of_range(unsigned long gma,
+ unsigned long gma_head, unsigned int gma_tail)
+{
+ if (gma_tail >= gma_head)
+ return (gma < gma_head) || (gma > gma_tail);
+ else
+ return (gma > gma_tail) && (gma < gma_head);
+}
+
+static int command_scan(struct parser_exec_state *s,
+ unsigned long rb_head, unsigned long rb_tail,
+ unsigned long rb_start, unsigned long rb_len)
+{
+
+ unsigned long gma_head, gma_tail, gma_bottom;
+ int ret = 0;
+
+ gma_head = rb_start + rb_head;
+ gma_tail = rb_start + rb_tail;
+ gma_bottom = rb_start + rb_len;
+
+ gvt_dbg_cmd("scan_start: start=%lx end=%lx\n", gma_head, gma_tail);
+
+ while (s->ip_gma != gma_tail) {
+ if (s->buf_type == RING_BUFFER_INSTRUCTION) {
+ if (!(s->ip_gma >= rb_start) ||
+ !(s->ip_gma < gma_bottom)) {
+ gvt_err("ip_gma %lx out of ring scope."
+ "(base:0x%lx, bottom: 0x%lx)\n",
+ s->ip_gma, rb_start,
+ gma_bottom);
+ parser_exec_state_dump(s);
+ return -EINVAL;
+ }
+ if (gma_out_of_range(s->ip_gma, gma_head, gma_tail)) {
+ gvt_err("ip_gma %lx out of range."
+ "base 0x%lx head 0x%lx tail 0x%lx\n",
+ s->ip_gma, rb_start,
+ rb_head, rb_tail);
+ parser_exec_state_dump(s);
+ break;
+ }
+ }
+ ret = cmd_parser_exec(s);
+ if (ret) {
+ gvt_err("cmd parser error\n");
+ parser_exec_state_dump(s);
+ break;
+ }
+ }
+
+ gvt_dbg_cmd("scan_end\n");
+
+ return ret;
+}
+
+static int scan_workload(struct intel_vgpu_workload *workload)
+{
+ unsigned long gma_head, gma_tail, gma_bottom;
+ struct parser_exec_state s;
+ int ret = 0;
+
+ /* ring base is page aligned */
+ if (WARN_ON(!IS_ALIGNED(workload->rb_start, GTT_PAGE_SIZE)))
+ return -EINVAL;
+
+ gma_head = workload->rb_start + workload->rb_head;
+ gma_tail = workload->rb_start + workload->rb_tail;
+ gma_bottom = workload->rb_start + _RING_CTL_BUF_SIZE(workload->rb_ctl);
+
+ s.buf_type = RING_BUFFER_INSTRUCTION;
+ s.buf_addr_type = GTT_BUFFER;
+ s.vgpu = workload->vgpu;
+ s.ring_id = workload->ring_id;
+ s.ring_start = workload->rb_start;
+ s.ring_size = _RING_CTL_BUF_SIZE(workload->rb_ctl);
+ s.ring_head = gma_head;
+ s.ring_tail = gma_tail;
+ s.rb_va = workload->shadow_ring_buffer_va;
+ s.workload = workload;
+
+ if ((bypass_scan_mask & (1 << workload->ring_id)) ||
+ gma_head == gma_tail)
+ return 0;
+
+ ret = ip_gma_set(&s, gma_head);
+ if (ret)
+ goto out;
+
+ ret = command_scan(&s, workload->rb_head, workload->rb_tail,
+ workload->rb_start, _RING_CTL_BUF_SIZE(workload->rb_ctl));
+
+out:
+ return ret;
+}
+
+static int scan_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
+{
+
+ unsigned long gma_head, gma_tail, gma_bottom, ring_size, ring_tail;
+ struct parser_exec_state s;
+ int ret = 0;
+
+ /* ring base is page aligned */
+ if (WARN_ON(!IS_ALIGNED(wa_ctx->indirect_ctx.guest_gma, GTT_PAGE_SIZE)))
+ return -EINVAL;
+
+ ring_tail = wa_ctx->indirect_ctx.size + 3 * sizeof(uint32_t);
+ ring_size = round_up(wa_ctx->indirect_ctx.size + CACHELINE_BYTES,
+ PAGE_SIZE);
+ gma_head = wa_ctx->indirect_ctx.guest_gma;
+ gma_tail = wa_ctx->indirect_ctx.guest_gma + ring_tail;
+ gma_bottom = wa_ctx->indirect_ctx.guest_gma + ring_size;
+
+ s.buf_type = RING_BUFFER_INSTRUCTION;
+ s.buf_addr_type = GTT_BUFFER;
+ s.vgpu = wa_ctx->workload->vgpu;
+ s.ring_id = wa_ctx->workload->ring_id;
+ s.ring_start = wa_ctx->indirect_ctx.guest_gma;
+ s.ring_size = ring_size;
+ s.ring_head = gma_head;
+ s.ring_tail = gma_tail;
+ s.rb_va = wa_ctx->indirect_ctx.shadow_va;
+ s.workload = wa_ctx->workload;
+
+ ret = ip_gma_set(&s, gma_head);
+ if (ret)
+ goto out;
+
+ ret = command_scan(&s, 0, ring_tail,
+ wa_ctx->indirect_ctx.guest_gma, ring_size);
+out:
+ return ret;
+}
+
+static int shadow_workload_ring_buffer(struct intel_vgpu_workload *workload)
+{
+ struct intel_vgpu *vgpu = workload->vgpu;
+ int ring_id = workload->ring_id;
+ struct i915_gem_context *shadow_ctx = vgpu->shadow_ctx;
+ struct intel_ring *ring = shadow_ctx->engine[ring_id].ring;
+ unsigned long gma_head, gma_tail, gma_top, guest_rb_size;
+ unsigned int copy_len = 0;
+ int ret;
+
+ guest_rb_size = _RING_CTL_BUF_SIZE(workload->rb_ctl);
+
+ /* calculate workload ring buffer size */
+ workload->rb_len = (workload->rb_tail + guest_rb_size -
+ workload->rb_head) % guest_rb_size;
+
+ gma_head = workload->rb_start + workload->rb_head;
+ gma_tail = workload->rb_start + workload->rb_tail;
+ gma_top = workload->rb_start + guest_rb_size;
+
+ /* allocate shadow ring buffer */
+ ret = intel_ring_begin(workload->req, workload->rb_len / 4);
+ if (ret)
+ return ret;
+
+ /* get shadow ring buffer va */
+ workload->shadow_ring_buffer_va = ring->vaddr + ring->tail;
+
+ /* head > tail --> copy head <-> top */
+ if (gma_head > gma_tail) {
+ ret = copy_gma_to_hva(vgpu, vgpu->gtt.ggtt_mm,
+ gma_head, gma_top,
+ workload->shadow_ring_buffer_va);
+ if (ret) {
+ gvt_err("fail to copy guest ring buffer\n");
+ return ret;
+ }
+ copy_len = gma_top - gma_head;
+ gma_head = workload->rb_start;
+ }
+
+ /* copy head or start <-> tail */
+ ret = copy_gma_to_hva(vgpu, vgpu->gtt.ggtt_mm,
+ gma_head, gma_tail,
+ workload->shadow_ring_buffer_va + copy_len);
+ if (ret) {
+ gvt_err("fail to copy guest ring buffer\n");
+ return ret;
+ }
+ ring->tail += workload->rb_len;
+ intel_ring_advance(ring);
+ return 0;
+}
+
+int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload)
+{
+ int ret;
+
+ ret = shadow_workload_ring_buffer(workload);
+ if (ret) {
+ gvt_err("fail to shadow workload ring_buffer\n");
+ return ret;
+ }
+
+ ret = scan_workload(workload);
+ if (ret) {
+ gvt_err("scan workload error\n");
+ return ret;
+ }
+ return 0;
+}
+
+static int shadow_indirect_ctx(struct intel_shadow_wa_ctx *wa_ctx)
+{
+ struct drm_device *dev = &wa_ctx->workload->vgpu->gvt->dev_priv->drm;
+ int ctx_size = wa_ctx->indirect_ctx.size;
+ unsigned long guest_gma = wa_ctx->indirect_ctx.guest_gma;
+ struct drm_i915_gem_object *obj;
+ int ret = 0;
+ void *map;
+
+ obj = i915_gem_object_create(dev,
+ roundup(ctx_size + CACHELINE_BYTES,
+ PAGE_SIZE));
+ if (IS_ERR(obj))
+ return PTR_ERR(obj);
+
+ /* get the va of the shadow batch buffer */
+ map = i915_gem_object_pin_map(obj, I915_MAP_WB);
+ if (IS_ERR(map)) {
+ gvt_err("failed to vmap shadow indirect ctx\n");
+ ret = PTR_ERR(map);
+ goto put_obj;
+ }
+
+ ret = i915_gem_object_set_to_cpu_domain(obj, false);
+ if (ret) {
+ gvt_err("failed to set shadow indirect ctx to CPU\n");
+ goto unmap_src;
+ }
+
+ ret = copy_gma_to_hva(wa_ctx->workload->vgpu,
+ wa_ctx->workload->vgpu->gtt.ggtt_mm,
+ guest_gma, guest_gma + ctx_size,
+ map);
+ if (ret) {
+ gvt_err("fail to copy guest indirect ctx\n");
+ goto unmap_src;
+ }
+
+ wa_ctx->indirect_ctx.obj = obj;
+ wa_ctx->indirect_ctx.shadow_va = map;
+ return 0;
+
+unmap_src:
+ i915_gem_object_unpin_map(obj);
+put_obj:
+ i915_gem_object_put(wa_ctx->indirect_ctx.obj);
+ return ret;
+}
+
+static int combine_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
+{
+ uint32_t per_ctx_start[CACHELINE_DWORDS] = {0};
+ unsigned char *bb_start_sva;
+
+ per_ctx_start[0] = 0x18800001;
+ per_ctx_start[1] = wa_ctx->per_ctx.guest_gma;
+
+ bb_start_sva = (unsigned char *)wa_ctx->indirect_ctx.shadow_va +
+ wa_ctx->indirect_ctx.size;
+
+ memcpy(bb_start_sva, per_ctx_start, CACHELINE_BYTES);
+
+ return 0;
+}
+
+int intel_gvt_scan_and_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
+{
+ int ret;
+
+ if (wa_ctx->indirect_ctx.size == 0)
+ return 0;
+
+ ret = shadow_indirect_ctx(wa_ctx);
+ if (ret) {
+ gvt_err("fail to shadow indirect ctx\n");
+ return ret;
+ }
+
+ combine_wa_ctx(wa_ctx);
+
+ ret = scan_wa_ctx(wa_ctx);
+ if (ret) {
+ gvt_err("scan wa ctx error\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static struct cmd_info *find_cmd_entry_any_ring(struct intel_gvt *gvt,
+ unsigned int opcode, int rings)
+{
+ struct cmd_info *info = NULL;
+ unsigned int ring;
+
+ for_each_set_bit(ring, (unsigned long *)&rings, I915_NUM_ENGINES) {
+ info = find_cmd_entry(gvt, opcode, ring);
+ if (info)
+ break;
+ }
+ return info;
+}
+
+static int init_cmd_table(struct intel_gvt *gvt)
+{
+ int i;
+ struct cmd_entry *e;
+ struct cmd_info *info;
+ unsigned int gen_type;
+
+ gen_type = intel_gvt_get_device_type(gvt);
+
+ for (i = 0; i < ARRAY_SIZE(cmd_info); i++) {
+ if (!(cmd_info[i].devices & gen_type))
+ continue;
+
+ e = kzalloc(sizeof(*e), GFP_KERNEL);
+ if (!e)
+ return -ENOMEM;
+
+ e->info = &cmd_info[i];
+ info = find_cmd_entry_any_ring(gvt,
+ e->info->opcode, e->info->rings);
+ if (info) {
+ gvt_err("%s %s duplicated\n", e->info->name,
+ info->name);
+ return -EEXIST;
+ }
+
+ INIT_HLIST_NODE(&e->hlist);
+ add_cmd_entry(gvt, e);
+ gvt_dbg_cmd("add %-30s op %04x flag %x devs %02x rings %02x\n",
+ e->info->name, e->info->opcode, e->info->flag,
+ e->info->devices, e->info->rings);
+ }
+ return 0;
+}
+
+static void clean_cmd_table(struct intel_gvt *gvt)
+{
+ struct hlist_node *tmp;
+ struct cmd_entry *e;
+ int i;
+
+ hash_for_each_safe(gvt->cmd_table, i, tmp, e, hlist)
+ kfree(e);
+
+ hash_init(gvt->cmd_table);
+}
+
+void intel_gvt_clean_cmd_parser(struct intel_gvt *gvt)
+{
+ clean_cmd_table(gvt);
+}
+
+int intel_gvt_init_cmd_parser(struct intel_gvt *gvt)
+{
+ int ret;
+
+ ret = init_cmd_table(gvt);
+ if (ret) {
+ intel_gvt_clean_cmd_parser(gvt);
+ return ret;
+ }
+ return 0;
+}
diff --git a/drivers/gpu/drm/i915/gvt/cmd_parser.h b/drivers/gpu/drm/i915/gvt/cmd_parser.h
new file mode 100644
index 000000000000..bed33514103c
--- /dev/null
+++ b/drivers/gpu/drm/i915/gvt/cmd_parser.h
@@ -0,0 +1,49 @@
+/*
+ * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * Authors:
+ * Ke Yu
+ * Kevin Tian <kevin.tian@intel.com>
+ * Zhiyuan Lv <zhiyuan.lv@intel.com>
+ *
+ * Contributors:
+ * Min He <min.he@intel.com>
+ * Ping Gao <ping.a.gao@intel.com>
+ * Tina Zhang <tina.zhang@intel.com>
+ * Yulei Zhang <yulei.zhang@intel.com>
+ * Zhi Wang <zhi.a.wang@intel.com>
+ *
+ */
+#ifndef _GVT_CMD_PARSER_H_
+#define _GVT_CMD_PARSER_H_
+
+#define GVT_CMD_HASH_BITS 7
+
+void intel_gvt_clean_cmd_parser(struct intel_gvt *gvt);
+
+int intel_gvt_init_cmd_parser(struct intel_gvt *gvt);
+
+int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload);
+
+int intel_gvt_scan_and_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx);
+
+#endif
diff --git a/drivers/gpu/drm/i915/gvt/debug.h b/drivers/gpu/drm/i915/gvt/debug.h
index 7ef412be665f..68cba7bd980a 100644
--- a/drivers/gpu/drm/i915/gvt/debug.h
+++ b/drivers/gpu/drm/i915/gvt/debug.h
@@ -24,11 +24,34 @@
#ifndef __GVT_DEBUG_H__
#define __GVT_DEBUG_H__
+#define gvt_err(fmt, args...) \
+ DRM_ERROR("gvt: "fmt, ##args)
+
#define gvt_dbg_core(fmt, args...) \
DRM_DEBUG_DRIVER("gvt: core: "fmt, ##args)
-/*
- * Other GVT debug stuff will be introduced in the GVT device model patches.
- */
+#define gvt_dbg_irq(fmt, args...) \
+ DRM_DEBUG_DRIVER("gvt: irq: "fmt, ##args)
+
+#define gvt_dbg_mm(fmt, args...) \
+ DRM_DEBUG_DRIVER("gvt: mm: "fmt, ##args)
+
+#define gvt_dbg_mmio(fmt, args...) \
+ DRM_DEBUG_DRIVER("gvt: mmio: "fmt, ##args)
+
+#define gvt_dbg_dpy(fmt, args...) \
+ DRM_DEBUG_DRIVER("gvt: dpy: "fmt, ##args)
+
+#define gvt_dbg_el(fmt, args...) \
+ DRM_DEBUG_DRIVER("gvt: el: "fmt, ##args)
+
+#define gvt_dbg_sched(fmt, args...) \
+ DRM_DEBUG_DRIVER("gvt: sched: "fmt, ##args)
+
+#define gvt_dbg_render(fmt, args...) \
+ DRM_DEBUG_DRIVER("gvt: render: "fmt, ##args)
+
+#define gvt_dbg_cmd(fmt, args...) \
+ DRM_DEBUG_DRIVER("gvt: cmd: "fmt, ##args)
#endif
diff --git a/drivers/gpu/drm/i915/gvt/display.c b/drivers/gpu/drm/i915/gvt/display.c
new file mode 100644
index 000000000000..c0c884aeb30e
--- /dev/null
+++ b/drivers/gpu/drm/i915/gvt/display.c
@@ -0,0 +1,330 @@
+/*
+ * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * Authors:
+ * Ke Yu
+ * Zhiyuan Lv <zhiyuan.lv@intel.com>
+ *
+ * Contributors:
+ * Terrence Xu <terrence.xu@intel.com>
+ * Changbin Du <changbin.du@intel.com>
+ * Bing Niu <bing.niu@intel.com>
+ * Zhi Wang <zhi.a.wang@intel.com>
+ *
+ */
+
+#include "i915_drv.h"
+#include "gvt.h"
+
+static int get_edp_pipe(struct intel_vgpu *vgpu)
+{
+ u32 data = vgpu_vreg(vgpu, _TRANS_DDI_FUNC_CTL_EDP);
+ int pipe = -1;
+
+ switch (data & TRANS_DDI_EDP_INPUT_MASK) {
+ case TRANS_DDI_EDP_INPUT_A_ON:
+ case TRANS_DDI_EDP_INPUT_A_ONOFF:
+ pipe = PIPE_A;
+ break;
+ case TRANS_DDI_EDP_INPUT_B_ONOFF:
+ pipe = PIPE_B;
+ break;
+ case TRANS_DDI_EDP_INPUT_C_ONOFF:
+ pipe = PIPE_C;
+ break;
+ }
+ return pipe;
+}
+
+static int edp_pipe_is_enabled(struct intel_vgpu *vgpu)
+{
+ struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
+
+ if (!(vgpu_vreg(vgpu, PIPECONF(_PIPE_EDP)) & PIPECONF_ENABLE))
+ return 0;
+
+ if (!(vgpu_vreg(vgpu, _TRANS_DDI_FUNC_CTL_EDP) & TRANS_DDI_FUNC_ENABLE))
+ return 0;
+ return 1;
+}
+
+static int pipe_is_enabled(struct intel_vgpu *vgpu, int pipe)
+{
+ struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
+
+ if (WARN_ON(pipe < PIPE_A || pipe >= I915_MAX_PIPES))
+ return -EINVAL;
+
+ if (vgpu_vreg(vgpu, PIPECONF(pipe)) & PIPECONF_ENABLE)
+ return 1;
+
+ if (edp_pipe_is_enabled(vgpu) &&
+ get_edp_pipe(vgpu) == pipe)
+ return 1;
+ return 0;
+}
+
+/* EDID with 1024x768 as its resolution */
+static unsigned char virtual_dp_monitor_edid[] = {
+ /*Header*/
+ 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00,
+ /* Vendor & Product Identification */
+ 0x22, 0xf0, 0x54, 0x29, 0x00, 0x00, 0x00, 0x00, 0x04, 0x17,
+ /* Version & Revision */
+ 0x01, 0x04,
+ /* Basic Display Parameters & Features */
+ 0xa5, 0x34, 0x20, 0x78, 0x23,
+ /* Color Characteristics */
+ 0xfc, 0x81, 0xa4, 0x55, 0x4d, 0x9d, 0x25, 0x12, 0x50, 0x54,
+ /* Established Timings: maximum resolution is 1024x768 */
+ 0x21, 0x08, 0x00,
+ /* Standard Timings. All invalid */
+ 0x00, 0xc0, 0x00, 0xc0, 0x00, 0x40, 0x00, 0x80, 0x00, 0x00,
+ 0x00, 0x40, 0x00, 0x00, 0x00, 0x01,
+ /* 18 Byte Data Blocks 1: invalid */
+ 0x00, 0x00, 0x80, 0xa0, 0x70, 0xb0,
+ 0x23, 0x40, 0x30, 0x20, 0x36, 0x00, 0x06, 0x44, 0x21, 0x00, 0x00, 0x1a,
+ /* 18 Byte Data Blocks 2: invalid */
+ 0x00, 0x00, 0x00, 0xfd, 0x00, 0x18, 0x3c, 0x18, 0x50, 0x11, 0x00, 0x0a,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ /* 18 Byte Data Blocks 3: invalid */
+ 0x00, 0x00, 0x00, 0xfc, 0x00, 0x48,
+ 0x50, 0x20, 0x5a, 0x52, 0x32, 0x34, 0x34, 0x30, 0x77, 0x0a, 0x20, 0x20,
+ /* 18 Byte Data Blocks 4: invalid */
+ 0x00, 0x00, 0x00, 0xff, 0x00, 0x43, 0x4e, 0x34, 0x33, 0x30, 0x34, 0x30,
+ 0x44, 0x58, 0x51, 0x0a, 0x20, 0x20,
+ /* Extension Block Count */
+ 0x00,
+ /* Checksum */
+ 0xef,
+};
+
+#define DPCD_HEADER_SIZE 0xb
+
+static u8 dpcd_fix_data[DPCD_HEADER_SIZE] = {
+ 0x11, 0x0a, 0x04, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
+};
+
+static void emulate_monitor_status_change(struct intel_vgpu *vgpu)
+{
+ struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
+ vgpu_vreg(vgpu, SDEISR) &= ~(SDE_PORTB_HOTPLUG_CPT |
+ SDE_PORTC_HOTPLUG_CPT |
+ SDE_PORTD_HOTPLUG_CPT);
+
+ if (IS_SKYLAKE(dev_priv))
+ vgpu_vreg(vgpu, SDEISR) &= ~(SDE_PORTA_HOTPLUG_SPT |
+ SDE_PORTE_HOTPLUG_SPT);
+
+ if (intel_vgpu_has_monitor_on_port(vgpu, PORT_B))
+ vgpu_vreg(vgpu, SDEISR) |= SDE_PORTB_HOTPLUG_CPT;
+
+ if (intel_vgpu_has_monitor_on_port(vgpu, PORT_C))
+ vgpu_vreg(vgpu, SDEISR) |= SDE_PORTC_HOTPLUG_CPT;
+
+ if (intel_vgpu_has_monitor_on_port(vgpu, PORT_D))
+ vgpu_vreg(vgpu, SDEISR) |= SDE_PORTD_HOTPLUG_CPT;
+
+ if (IS_SKYLAKE(dev_priv) &&
+ intel_vgpu_has_monitor_on_port(vgpu, PORT_E)) {
+ vgpu_vreg(vgpu, SDEISR) |= SDE_PORTE_HOTPLUG_SPT;
+ }
+
+ if (intel_vgpu_has_monitor_on_port(vgpu, PORT_A)) {
+ if (IS_BROADWELL(dev_priv))
+ vgpu_vreg(vgpu, GEN8_DE_PORT_ISR) |=
+ GEN8_PORT_DP_A_HOTPLUG;
+ else
+ vgpu_vreg(vgpu, SDEISR) |= SDE_PORTA_HOTPLUG_SPT;
+ }
+}
+
+static void clean_virtual_dp_monitor(struct intel_vgpu *vgpu, int port_num)
+{
+ struct intel_vgpu_port *port = intel_vgpu_port(vgpu, port_num);
+
+ kfree(port->edid);
+ port->edid = NULL;
+
+ kfree(port->dpcd);
+ port->dpcd = NULL;
+}
+
+static int setup_virtual_dp_monitor(struct intel_vgpu *vgpu, int port_num,
+ int type)
+{
+ struct intel_vgpu_port *port = intel_vgpu_port(vgpu, port_num);
+
+ port->edid = kzalloc(sizeof(*(port->edid)), GFP_KERNEL);
+ if (!port->edid)
+ return -ENOMEM;
+
+ port->dpcd = kzalloc(sizeof(*(port->dpcd)), GFP_KERNEL);
+ if (!port->dpcd) {
+ kfree(port->edid);
+ return -ENOMEM;
+ }
+
+ memcpy(port->edid->edid_block, virtual_dp_monitor_edid,
+ EDID_SIZE);
+ port->edid->data_valid = true;
+
+ memcpy(port->dpcd->data, dpcd_fix_data, DPCD_HEADER_SIZE);
+ port->dpcd->data_valid = true;
+ port->dpcd->data[DPCD_SINK_COUNT] = 0x1;
+ port->type = type;
+
+ emulate_monitor_status_change(vgpu);
+ return 0;
+}
+
+/**
+ * intel_gvt_check_vblank_emulation - check if vblank emulation timer should
+ * be turned on/off when a virtual pipe is enabled/disabled.
+ * @gvt: a GVT device
+ *
+ * This function is used to turn on/off vblank timer according to currently
+ * enabled/disabled virtual pipes.
+ *
+ */
+void intel_gvt_check_vblank_emulation(struct intel_gvt *gvt)
+{
+ struct intel_gvt_irq *irq = &gvt->irq;
+ struct intel_vgpu *vgpu;
+ bool have_enabled_pipe = false;
+ int pipe, id;
+
+ if (WARN_ON(!mutex_is_locked(&gvt->lock)))
+ return;
+
+ hrtimer_cancel(&irq->vblank_timer.timer);
+
+ for_each_active_vgpu(gvt, vgpu, id) {
+ for (pipe = 0; pipe < I915_MAX_PIPES; pipe++) {
+ have_enabled_pipe =
+ pipe_is_enabled(vgpu, pipe);
+ if (have_enabled_pipe)
+ break;
+ }
+ }
+
+ if (have_enabled_pipe)
+ hrtimer_start(&irq->vblank_timer.timer,
+ ktime_add_ns(ktime_get(), irq->vblank_timer.period),
+ HRTIMER_MODE_ABS);
+}
+
+static void emulate_vblank_on_pipe(struct intel_vgpu *vgpu, int pipe)
+{
+ struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
+ struct intel_vgpu_irq *irq = &vgpu->irq;
+ int vblank_event[] = {
+ [PIPE_A] = PIPE_A_VBLANK,
+ [PIPE_B] = PIPE_B_VBLANK,
+ [PIPE_C] = PIPE_C_VBLANK,
+ };
+ int event;
+
+ if (pipe < PIPE_A || pipe > PIPE_C)
+ return;
+
+ for_each_set_bit(event, irq->flip_done_event[pipe],
+ INTEL_GVT_EVENT_MAX) {
+ clear_bit(event, irq->flip_done_event[pipe]);
+ if (!pipe_is_enabled(vgpu, pipe))
+ continue;
+
+ vgpu_vreg(vgpu, PIPE_FLIPCOUNT_G4X(pipe))++;
+ intel_vgpu_trigger_virtual_event(vgpu, event);
+ }
+
+ if (pipe_is_enabled(vgpu, pipe)) {
+ vgpu_vreg(vgpu, PIPE_FRMCOUNT_G4X(pipe))++;
+ intel_vgpu_trigger_virtual_event(vgpu, vblank_event[pipe]);
+ }
+}
+
+static void emulate_vblank(struct intel_vgpu *vgpu)
+{
+ int pipe;
+
+ for_each_pipe(vgpu->gvt->dev_priv, pipe)
+ emulate_vblank_on_pipe(vgpu, pipe);
+}
+
+/**
+ * intel_gvt_emulate_vblank - trigger vblank events for vGPUs on GVT device
+ * @gvt: a GVT device
+ *
+ * This function is used to trigger vblank interrupts for vGPUs on GVT device
+ *
+ */
+void intel_gvt_emulate_vblank(struct intel_gvt *gvt)
+{
+ struct intel_vgpu *vgpu;
+ int id;
+
+ if (WARN_ON(!mutex_is_locked(&gvt->lock)))
+ return;
+
+ for_each_active_vgpu(gvt, vgpu, id)
+ emulate_vblank(vgpu);
+}
+
+/**
+ * intel_vgpu_clean_display - clean vGPU virtual display emulation
+ * @vgpu: a vGPU
+ *
+ * This function is used to clean vGPU virtual display emulation stuffs
+ *
+ */
+void intel_vgpu_clean_display(struct intel_vgpu *vgpu)
+{
+ struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
+
+ if (IS_SKYLAKE(dev_priv))
+ clean_virtual_dp_monitor(vgpu, PORT_D);
+ else
+ clean_virtual_dp_monitor(vgpu, PORT_B);
+}
+
+/**
+ * intel_vgpu_init_display- initialize vGPU virtual display emulation
+ * @vgpu: a vGPU
+ *
+ * This function is used to initialize vGPU virtual display emulation stuffs
+ *
+ * Returns:
+ * Zero on success, negative error code if failed.
+ *
+ */
+int intel_vgpu_init_display(struct intel_vgpu *vgpu)
+{
+ struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
+
+ intel_vgpu_init_i2c_edid(vgpu);
+
+ if (IS_SKYLAKE(dev_priv))
+ return setup_virtual_dp_monitor(vgpu, PORT_D, GVT_DP_D);
+ else
+ return setup_virtual_dp_monitor(vgpu, PORT_B, GVT_DP_B);
+}
diff --git a/drivers/gpu/drm/i915/gvt/display.h b/drivers/gpu/drm/i915/gvt/display.h
new file mode 100644
index 000000000000..7a60cb848268
--- /dev/null
+++ b/drivers/gpu/drm/i915/gvt/display.h
@@ -0,0 +1,163 @@
+/*
+ * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * Authors:
+ * Ke Yu
+ * Zhiyuan Lv <zhiyuan.lv@intel.com>
+ *
+ * Contributors:
+ * Terrence Xu <terrence.xu@intel.com>
+ * Changbin Du <changbin.du@intel.com>
+ * Bing Niu <bing.niu@intel.com>
+ * Zhi Wang <zhi.a.wang@intel.com>
+ *
+ */
+
+#ifndef _GVT_DISPLAY_H_
+#define _GVT_DISPLAY_H_
+
+#define SBI_REG_MAX 20
+#define DPCD_SIZE 0x700
+
+#define intel_vgpu_port(vgpu, port) \
+ (&(vgpu->display.ports[port]))
+
+#define intel_vgpu_has_monitor_on_port(vgpu, port) \
+ (intel_vgpu_port(vgpu, port)->edid && \
+ intel_vgpu_port(vgpu, port)->edid->data_valid)
+
+#define intel_vgpu_port_is_dp(vgpu, port) \
+ ((intel_vgpu_port(vgpu, port)->type == GVT_DP_A) || \
+ (intel_vgpu_port(vgpu, port)->type == GVT_DP_B) || \
+ (intel_vgpu_port(vgpu, port)->type == GVT_DP_C) || \
+ (intel_vgpu_port(vgpu, port)->type == GVT_DP_D))
+
+#define INTEL_GVT_MAX_UEVENT_VARS 3
+
+/* DPCD start */
+#define DPCD_SIZE 0x700
+
+/* DPCD */
+#define DP_SET_POWER 0x600
+#define DP_SET_POWER_D0 0x1
+#define AUX_NATIVE_WRITE 0x8
+#define AUX_NATIVE_READ 0x9
+
+#define AUX_NATIVE_REPLY_MASK (0x3 << 4)
+#define AUX_NATIVE_REPLY_ACK (0x0 << 4)
+#define AUX_NATIVE_REPLY_NAK (0x1 << 4)
+#define AUX_NATIVE_REPLY_DEFER (0x2 << 4)
+
+#define AUX_BURST_SIZE 16
+
+/* DPCD addresses */
+#define DPCD_REV 0x000
+#define DPCD_MAX_LINK_RATE 0x001
+#define DPCD_MAX_LANE_COUNT 0x002
+
+#define DPCD_TRAINING_PATTERN_SET 0x102
+#define DPCD_SINK_COUNT 0x200
+#define DPCD_LANE0_1_STATUS 0x202
+#define DPCD_LANE2_3_STATUS 0x203
+#define DPCD_LANE_ALIGN_STATUS_UPDATED 0x204
+#define DPCD_SINK_STATUS 0x205
+
+/* link training */
+#define DPCD_TRAINING_PATTERN_SET_MASK 0x03
+#define DPCD_LINK_TRAINING_DISABLED 0x00
+#define DPCD_TRAINING_PATTERN_1 0x01
+#define DPCD_TRAINING_PATTERN_2 0x02
+
+#define DPCD_CP_READY_MASK (1 << 6)
+
+/* lane status */
+#define DPCD_LANES_CR_DONE 0x11
+#define DPCD_LANES_EQ_DONE 0x22
+#define DPCD_SYMBOL_LOCKED 0x44
+
+#define DPCD_INTERLANE_ALIGN_DONE 0x01
+
+#define DPCD_SINK_IN_SYNC 0x03
+/* DPCD end */
+
+#define SBI_RESPONSE_MASK 0x3
+#define SBI_RESPONSE_SHIFT 0x1
+#define SBI_STAT_MASK 0x1
+#define SBI_STAT_SHIFT 0x0
+#define SBI_OPCODE_SHIFT 8
+#define SBI_OPCODE_MASK (0xff << SBI_OPCODE_SHIFT)
+#define SBI_CMD_IORD 2
+#define SBI_CMD_IOWR 3
+#define SBI_CMD_CRRD 6
+#define SBI_CMD_CRWR 7
+#define SBI_ADDR_OFFSET_SHIFT 16
+#define SBI_ADDR_OFFSET_MASK (0xffff << SBI_ADDR_OFFSET_SHIFT)
+
+struct intel_vgpu_sbi_register {
+ unsigned int offset;
+ u32 value;
+};
+
+struct intel_vgpu_sbi {
+ int number;
+ struct intel_vgpu_sbi_register registers[SBI_REG_MAX];
+};
+
+enum intel_gvt_plane_type {
+ PRIMARY_PLANE = 0,
+ CURSOR_PLANE,
+ SPRITE_PLANE,
+ MAX_PLANE
+};
+
+struct intel_vgpu_dpcd_data {
+ bool data_valid;
+ u8 data[DPCD_SIZE];
+};
+
+enum intel_vgpu_port_type {
+ GVT_CRT = 0,
+ GVT_DP_A,
+ GVT_DP_B,
+ GVT_DP_C,
+ GVT_DP_D,
+ GVT_HDMI_B,
+ GVT_HDMI_C,
+ GVT_HDMI_D,
+ GVT_PORT_MAX
+};
+
+struct intel_vgpu_port {
+ /* per display EDID information */
+ struct intel_vgpu_edid_data *edid;
+ /* per display DPCD information */
+ struct intel_vgpu_dpcd_data *dpcd;
+ int type;
+};
+
+void intel_gvt_emulate_vblank(struct intel_gvt *gvt);
+void intel_gvt_check_vblank_emulation(struct intel_gvt *gvt);
+
+int intel_vgpu_init_display(struct intel_vgpu *vgpu);
+void intel_vgpu_clean_display(struct intel_vgpu *vgpu);
+
+#endif
diff --git a/drivers/gpu/drm/i915/gvt/edid.c b/drivers/gpu/drm/i915/gvt/edid.c
new file mode 100644
index 000000000000..bda85dff7b2a
--- /dev/null
+++ b/drivers/gpu/drm/i915/gvt/edid.c
@@ -0,0 +1,531 @@
+/*
+ * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * Authors:
+ * Ke Yu
+ * Zhiyuan Lv <zhiyuan.lv@intel.com>
+ *
+ * Contributors:
+ * Terrence Xu <terrence.xu@intel.com>
+ * Changbin Du <changbin.du@intel.com>
+ * Bing Niu <bing.niu@intel.com>
+ * Zhi Wang <zhi.a.wang@intel.com>
+ *
+ */
+
+#include "i915_drv.h"
+#include "gvt.h"
+
+#define GMBUS1_TOTAL_BYTES_SHIFT 16
+#define GMBUS1_TOTAL_BYTES_MASK 0x1ff
+#define gmbus1_total_byte_count(v) (((v) >> \
+ GMBUS1_TOTAL_BYTES_SHIFT) & GMBUS1_TOTAL_BYTES_MASK)
+#define gmbus1_slave_addr(v) (((v) & 0xff) >> 1)
+#define gmbus1_slave_index(v) (((v) >> 8) & 0xff)
+#define gmbus1_bus_cycle(v) (((v) >> 25) & 0x7)
+
+/* GMBUS0 bits definitions */
+#define _GMBUS_PIN_SEL_MASK (0x7)
+
+static unsigned char edid_get_byte(struct intel_vgpu *vgpu)
+{
+ struct intel_vgpu_i2c_edid *edid = &vgpu->display.i2c_edid;
+ unsigned char chr = 0;
+
+ if (edid->state == I2C_NOT_SPECIFIED || !edid->slave_selected) {
+ gvt_err("Driver tries to read EDID without proper sequence!\n");
+ return 0;
+ }
+ if (edid->current_edid_read >= EDID_SIZE) {
+ gvt_err("edid_get_byte() exceeds the size of EDID!\n");
+ return 0;
+ }
+
+ if (!edid->edid_available) {
+ gvt_err("Reading EDID but EDID is not available!\n");
+ return 0;
+ }
+
+ if (intel_vgpu_has_monitor_on_port(vgpu, edid->port)) {
+ struct intel_vgpu_edid_data *edid_data =
+ intel_vgpu_port(vgpu, edid->port)->edid;
+
+ chr = edid_data->edid_block[edid->current_edid_read];
+ edid->current_edid_read++;
+ } else {
+ gvt_err("No EDID available during the reading?\n");
+ }
+ return chr;
+}
+
+static inline int get_port_from_gmbus0(u32 gmbus0)
+{
+ int port_select = gmbus0 & _GMBUS_PIN_SEL_MASK;
+ int port = -EINVAL;
+
+ if (port_select == 2)
+ port = PORT_E;
+ else if (port_select == 4)
+ port = PORT_C;
+ else if (port_select == 5)
+ port = PORT_B;
+ else if (port_select == 6)
+ port = PORT_D;
+ return port;
+}
+
+static void reset_gmbus_controller(struct intel_vgpu *vgpu)
+{
+ vgpu_vreg(vgpu, PCH_GMBUS2) = GMBUS_HW_RDY;
+ if (!vgpu->display.i2c_edid.edid_available)
+ vgpu_vreg(vgpu, PCH_GMBUS2) |= GMBUS_SATOER;
+ vgpu->display.i2c_edid.gmbus.phase = GMBUS_IDLE_PHASE;
+}
+
+/* GMBUS0 */
+static int gmbus0_mmio_write(struct intel_vgpu *vgpu,
+ unsigned int offset, void *p_data, unsigned int bytes)
+{
+ int port, pin_select;
+
+ memcpy(&vgpu_vreg(vgpu, offset), p_data, bytes);
+
+ pin_select = vgpu_vreg(vgpu, offset) & _GMBUS_PIN_SEL_MASK;
+
+ intel_vgpu_init_i2c_edid(vgpu);
+
+ if (pin_select == 0)
+ return 0;
+
+ port = get_port_from_gmbus0(pin_select);
+ if (WARN_ON(port < 0))
+ return 0;
+
+ vgpu->display.i2c_edid.state = I2C_GMBUS;
+ vgpu->display.i2c_edid.gmbus.phase = GMBUS_IDLE_PHASE;
+
+ vgpu_vreg(vgpu, PCH_GMBUS2) &= ~GMBUS_ACTIVE;
+ vgpu_vreg(vgpu, PCH_GMBUS2) |= GMBUS_HW_RDY | GMBUS_HW_WAIT_PHASE;
+
+ if (intel_vgpu_has_monitor_on_port(vgpu, port) &&
+ !intel_vgpu_port_is_dp(vgpu, port)) {
+ vgpu->display.i2c_edid.port = port;
+ vgpu->display.i2c_edid.edid_available = true;
+ vgpu_vreg(vgpu, PCH_GMBUS2) &= ~GMBUS_SATOER;
+ } else
+ vgpu_vreg(vgpu, PCH_GMBUS2) |= GMBUS_SATOER;
+ return 0;
+}
+
+static int gmbus1_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
+ void *p_data, unsigned int bytes)
+{
+ struct intel_vgpu_i2c_edid *i2c_edid = &vgpu->display.i2c_edid;
+ u32 slave_addr;
+ u32 wvalue = *(u32 *)p_data;
+
+ if (vgpu_vreg(vgpu, offset) & GMBUS_SW_CLR_INT) {
+ if (!(wvalue & GMBUS_SW_CLR_INT)) {
+ vgpu_vreg(vgpu, offset) &= ~GMBUS_SW_CLR_INT;
+ reset_gmbus_controller(vgpu);
+ }
+ /*
+ * TODO: "This bit is cleared to zero when an event
+ * causes the HW_RDY bit transition to occur "
+ */
+ } else {
+ /*
+ * per bspec setting this bit can cause:
+ * 1) INT status bit cleared
+ * 2) HW_RDY bit asserted
+ */
+ if (wvalue & GMBUS_SW_CLR_INT) {
+ vgpu_vreg(vgpu, PCH_GMBUS2) &= ~GMBUS_INT;
+ vgpu_vreg(vgpu, PCH_GMBUS2) |= GMBUS_HW_RDY;
+ }
+
+ /* For virtualization, we suppose that HW is always ready,
+ * so GMBUS_SW_RDY should always be cleared
+ */
+ if (wvalue & GMBUS_SW_RDY)
+ wvalue &= ~GMBUS_SW_RDY;
+
+ i2c_edid->gmbus.total_byte_count =
+ gmbus1_total_byte_count(wvalue);
+ slave_addr = gmbus1_slave_addr(wvalue);
+
+ /* vgpu gmbus only support EDID */
+ if (slave_addr == EDID_ADDR) {
+ i2c_edid->slave_selected = true;
+ } else if (slave_addr != 0) {
+ gvt_dbg_dpy(
+ "vgpu%d: unsupported gmbus slave addr(0x%x)\n"
+ " gmbus operations will be ignored.\n",
+ vgpu->id, slave_addr);
+ }
+
+ if (wvalue & GMBUS_CYCLE_INDEX)
+ i2c_edid->current_edid_read =
+ gmbus1_slave_index(wvalue);
+
+ i2c_edid->gmbus.cycle_type = gmbus1_bus_cycle(wvalue);
+ switch (gmbus1_bus_cycle(wvalue)) {
+ case GMBUS_NOCYCLE:
+ break;
+ case GMBUS_STOP:
+ /* From spec:
+ * This can only cause a STOP to be generated
+ * if a GMBUS cycle is generated, the GMBUS is
+ * currently in a data/wait/idle phase, or it is in a
+ * WAIT phase
+ */
+ if (gmbus1_bus_cycle(vgpu_vreg(vgpu, offset))
+ != GMBUS_NOCYCLE) {
+ intel_vgpu_init_i2c_edid(vgpu);
+ /* After the 'stop' cycle, hw state would become
+ * 'stop phase' and then 'idle phase' after a
+ * few milliseconds. In emulation, we just set
+ * it as 'idle phase' ('stop phase' is not
+ * visible in gmbus interface)
+ */
+ i2c_edid->gmbus.phase = GMBUS_IDLE_PHASE;
+ vgpu_vreg(vgpu, PCH_GMBUS2) &= ~GMBUS_ACTIVE;
+ }
+ break;
+ case NIDX_NS_W:
+ case IDX_NS_W:
+ case NIDX_STOP:
+ case IDX_STOP:
+ /* From hw spec the GMBUS phase
+ * transition like this:
+ * START (-->INDEX) -->DATA
+ */
+ i2c_edid->gmbus.phase = GMBUS_DATA_PHASE;
+ vgpu_vreg(vgpu, PCH_GMBUS2) |= GMBUS_ACTIVE;
+ break;
+ default:
+ gvt_err("Unknown/reserved GMBUS cycle detected!\n");
+ break;
+ }
+ /*
+ * From hw spec the WAIT state will be
+ * cleared:
+ * (1) in a new GMBUS cycle
+ * (2) by generating a stop
+ */
+ vgpu_vreg(vgpu, offset) = wvalue;
+ }
+ return 0;
+}
+
+static int gmbus3_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
+ void *p_data, unsigned int bytes)
+{
+ WARN_ON(1);
+ return 0;
+}
+
+static int gmbus3_mmio_read(struct intel_vgpu *vgpu, unsigned int offset,
+ void *p_data, unsigned int bytes)
+{
+ int i;
+ unsigned char byte_data;
+ struct intel_vgpu_i2c_edid *i2c_edid = &vgpu->display.i2c_edid;
+ int byte_left = i2c_edid->gmbus.total_byte_count -
+ i2c_edid->current_edid_read;
+ int byte_count = byte_left;
+ u32 reg_data = 0;
+
+ /* Data can only be recevied if previous settings correct */
+ if (vgpu_vreg(vgpu, PCH_GMBUS1) & GMBUS_SLAVE_READ) {
+ if (byte_left <= 0) {
+ memcpy(p_data, &vgpu_vreg(vgpu, offset), bytes);
+ return 0;
+ }
+
+ if (byte_count > 4)
+ byte_count = 4;
+ for (i = 0; i < byte_count; i++) {
+ byte_data = edid_get_byte(vgpu);
+ reg_data |= (byte_data << (i << 3));
+ }
+
+ memcpy(&vgpu_vreg(vgpu, offset), &reg_data, byte_count);
+ memcpy(p_data, &vgpu_vreg(vgpu, offset), bytes);
+
+ if (byte_left <= 4) {
+ switch (i2c_edid->gmbus.cycle_type) {
+ case NIDX_STOP:
+ case IDX_STOP:
+ i2c_edid->gmbus.phase = GMBUS_IDLE_PHASE;
+ break;
+ case NIDX_NS_W:
+ case IDX_NS_W:
+ default:
+ i2c_edid->gmbus.phase = GMBUS_WAIT_PHASE;
+ break;
+ }
+ intel_vgpu_init_i2c_edid(vgpu);
+ }
+ /*
+ * Read GMBUS3 during send operation,
+ * return the latest written value
+ */
+ } else {
+ memcpy(p_data, &vgpu_vreg(vgpu, offset), bytes);
+ gvt_err("vgpu%d: warning: gmbus3 read with nothing returned\n",
+ vgpu->id);
+ }
+ return 0;
+}
+
+static int gmbus2_mmio_read(struct intel_vgpu *vgpu, unsigned int offset,
+ void *p_data, unsigned int bytes)
+{
+ u32 value = vgpu_vreg(vgpu, offset);
+
+ if (!(vgpu_vreg(vgpu, offset) & GMBUS_INUSE))
+ vgpu_vreg(vgpu, offset) |= GMBUS_INUSE;
+ memcpy(p_data, (void *)&value, bytes);
+ return 0;
+}
+
+static int gmbus2_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
+ void *p_data, unsigned int bytes)
+{
+ u32 wvalue = *(u32 *)p_data;
+
+ if (wvalue & GMBUS_INUSE)
+ vgpu_vreg(vgpu, offset) &= ~GMBUS_INUSE;
+ /* All other bits are read-only */
+ return 0;
+}
+
+/**
+ * intel_gvt_i2c_handle_gmbus_read - emulate gmbus register mmio read
+ * @vgpu: a vGPU
+ *
+ * This function is used to emulate gmbus register mmio read
+ *
+ * Returns:
+ * Zero on success, negative error code if failed.
+ *
+ */
+int intel_gvt_i2c_handle_gmbus_read(struct intel_vgpu *vgpu,
+ unsigned int offset, void *p_data, unsigned int bytes)
+{
+ if (WARN_ON(bytes > 8 && (offset & (bytes - 1))))
+ return -EINVAL;
+
+ if (offset == i915_mmio_reg_offset(PCH_GMBUS2))
+ return gmbus2_mmio_read(vgpu, offset, p_data, bytes);
+ else if (offset == i915_mmio_reg_offset(PCH_GMBUS3))
+ return gmbus3_mmio_read(vgpu, offset, p_data, bytes);
+
+ memcpy(p_data, &vgpu_vreg(vgpu, offset), bytes);
+ return 0;
+}
+
+/**
+ * intel_gvt_i2c_handle_gmbus_write - emulate gmbus register mmio write
+ * @vgpu: a vGPU
+ *
+ * This function is used to emulate gmbus register mmio write
+ *
+ * Returns:
+ * Zero on success, negative error code if failed.
+ *
+ */
+int intel_gvt_i2c_handle_gmbus_write(struct intel_vgpu *vgpu,
+ unsigned int offset, void *p_data, unsigned int bytes)
+{
+ if (WARN_ON(bytes > 8 && (offset & (bytes - 1))))
+ return -EINVAL;
+
+ if (offset == i915_mmio_reg_offset(PCH_GMBUS0))
+ return gmbus0_mmio_write(vgpu, offset, p_data, bytes);
+ else if (offset == i915_mmio_reg_offset(PCH_GMBUS1))
+ return gmbus1_mmio_write(vgpu, offset, p_data, bytes);
+ else if (offset == i915_mmio_reg_offset(PCH_GMBUS2))
+ return gmbus2_mmio_write(vgpu, offset, p_data, bytes);
+ else if (offset == i915_mmio_reg_offset(PCH_GMBUS3))
+ return gmbus3_mmio_write(vgpu, offset, p_data, bytes);
+
+ memcpy(&vgpu_vreg(vgpu, offset), p_data, bytes);
+ return 0;
+}
+
+enum {
+ AUX_CH_CTL = 0,
+ AUX_CH_DATA1,
+ AUX_CH_DATA2,
+ AUX_CH_DATA3,
+ AUX_CH_DATA4,
+ AUX_CH_DATA5
+};
+
+static inline int get_aux_ch_reg(unsigned int offset)
+{
+ int reg;
+
+ switch (offset & 0xff) {
+ case 0x10:
+ reg = AUX_CH_CTL;
+ break;
+ case 0x14:
+ reg = AUX_CH_DATA1;
+ break;
+ case 0x18:
+ reg = AUX_CH_DATA2;
+ break;
+ case 0x1c:
+ reg = AUX_CH_DATA3;
+ break;
+ case 0x20:
+ reg = AUX_CH_DATA4;
+ break;
+ case 0x24:
+ reg = AUX_CH_DATA5;
+ break;
+ default:
+ reg = -1;
+ break;
+ }
+ return reg;
+}
+
+#define AUX_CTL_MSG_LENGTH(reg) \
+ ((reg & DP_AUX_CH_CTL_MESSAGE_SIZE_MASK) >> \
+ DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT)
+
+/**
+ * intel_gvt_i2c_handle_aux_ch_write - emulate AUX channel register write
+ * @vgpu: a vGPU
+ *
+ * This function is used to emulate AUX channel register write
+ *
+ */
+void intel_gvt_i2c_handle_aux_ch_write(struct intel_vgpu *vgpu,
+ int port_idx,
+ unsigned int offset,
+ void *p_data)
+{
+ struct intel_vgpu_i2c_edid *i2c_edid = &vgpu->display.i2c_edid;
+ int msg_length, ret_msg_size;
+ int msg, addr, ctrl, op;
+ u32 value = *(u32 *)p_data;
+ int aux_data_for_write = 0;
+ int reg = get_aux_ch_reg(offset);
+
+ if (reg != AUX_CH_CTL) {
+ vgpu_vreg(vgpu, offset) = value;
+ return;
+ }
+
+ msg_length = AUX_CTL_MSG_LENGTH(value);
+ // check the msg in DATA register.
+ msg = vgpu_vreg(vgpu, offset + 4);
+ addr = (msg >> 8) & 0xffff;
+ ctrl = (msg >> 24) & 0xff;
+ op = ctrl >> 4;
+ if (!(value & DP_AUX_CH_CTL_SEND_BUSY)) {
+ /* The ctl write to clear some states */
+ return;
+ }
+
+ /* Always set the wanted value for vms. */
+ ret_msg_size = (((op & 0x1) == GVT_AUX_I2C_READ) ? 2 : 1);
+ vgpu_vreg(vgpu, offset) =
+ DP_AUX_CH_CTL_DONE |
+ ((ret_msg_size << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) &
+ DP_AUX_CH_CTL_MESSAGE_SIZE_MASK);
+
+ if (msg_length == 3) {
+ if (!(op & GVT_AUX_I2C_MOT)) {
+ /* stop */
+ intel_vgpu_init_i2c_edid(vgpu);
+ } else {
+ /* start or restart */
+ i2c_edid->aux_ch.i2c_over_aux_ch = true;
+ i2c_edid->aux_ch.aux_ch_mot = true;
+ if (addr == 0) {
+ /* reset the address */
+ intel_vgpu_init_i2c_edid(vgpu);
+ } else if (addr == EDID_ADDR) {
+ i2c_edid->state = I2C_AUX_CH;
+ i2c_edid->port = port_idx;
+ i2c_edid->slave_selected = true;
+ if (intel_vgpu_has_monitor_on_port(vgpu,
+ port_idx) &&
+ intel_vgpu_port_is_dp(vgpu, port_idx))
+ i2c_edid->edid_available = true;
+ }
+ }
+ } else if ((op & 0x1) == GVT_AUX_I2C_WRITE) {
+ /* TODO
+ * We only support EDID reading from I2C_over_AUX. And
+ * we do not expect the index mode to be used. Right now
+ * the WRITE operation is ignored. It is good enough to
+ * support the gfx driver to do EDID access.
+ */
+ } else {
+ if (WARN_ON((op & 0x1) != GVT_AUX_I2C_READ))
+ return;
+ if (WARN_ON(msg_length != 4))
+ return;
+ if (i2c_edid->edid_available && i2c_edid->slave_selected) {
+ unsigned char val = edid_get_byte(vgpu);
+
+ aux_data_for_write = (val << 16);
+ }
+ }
+ /* write the return value in AUX_CH_DATA reg which includes:
+ * ACK of I2C_WRITE
+ * returned byte if it is READ
+ */
+ aux_data_for_write |= GVT_AUX_I2C_REPLY_ACK << 24;
+ vgpu_vreg(vgpu, offset + 4) = aux_data_for_write;
+}
+
+/**
+ * intel_vgpu_init_i2c_edid - initialize vGPU i2c edid emulation
+ * @vgpu: a vGPU
+ *
+ * This function is used to initialize vGPU i2c edid emulation stuffs
+ *
+ */
+void intel_vgpu_init_i2c_edid(struct intel_vgpu *vgpu)
+{
+ struct intel_vgpu_i2c_edid *edid = &vgpu->display.i2c_edid;
+
+ edid->state = I2C_NOT_SPECIFIED;
+
+ edid->port = -1;
+ edid->slave_selected = false;
+ edid->edid_available = false;
+ edid->current_edid_read = 0;
+
+ memset(&edid->gmbus, 0, sizeof(struct intel_vgpu_i2c_gmbus));
+
+ edid->aux_ch.i2c_over_aux_ch = false;
+ edid->aux_ch.aux_ch_mot = false;
+}
diff --git a/drivers/gpu/drm/i915/gvt/edid.h b/drivers/gpu/drm/i915/gvt/edid.h
new file mode 100644
index 000000000000..f6dfc8b795ec
--- /dev/null
+++ b/drivers/gpu/drm/i915/gvt/edid.h
@@ -0,0 +1,150 @@
+/*
+ * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * Authors:
+ * Ke Yu
+ * Zhiyuan Lv <zhiyuan.lv@intel.com>
+ *
+ * Contributors:
+ * Terrence Xu <terrence.xu@intel.com>
+ * Changbin Du <changbin.du@intel.com>
+ * Bing Niu <bing.niu@intel.com>
+ * Zhi Wang <zhi.a.wang@intel.com>
+ *
+ */
+
+#ifndef _GVT_EDID_H_
+#define _GVT_EDID_H_
+
+#define EDID_SIZE 128
+#define EDID_ADDR 0x50 /* Linux hvm EDID addr */
+
+#define GVT_AUX_NATIVE_WRITE 0x8
+#define GVT_AUX_NATIVE_READ 0x9
+#define GVT_AUX_I2C_WRITE 0x0
+#define GVT_AUX_I2C_READ 0x1
+#define GVT_AUX_I2C_STATUS 0x2
+#define GVT_AUX_I2C_MOT 0x4
+#define GVT_AUX_I2C_REPLY_ACK 0x0
+
+struct intel_vgpu_edid_data {
+ bool data_valid;
+ unsigned char edid_block[EDID_SIZE];
+};
+
+enum gmbus_cycle_type {
+ GMBUS_NOCYCLE = 0x0,
+ NIDX_NS_W = 0x1,
+ IDX_NS_W = 0x3,
+ GMBUS_STOP = 0x4,
+ NIDX_STOP = 0x5,
+ IDX_STOP = 0x7
+};
+
+/*
+ * States of GMBUS
+ *
+ * GMBUS0-3 could be related to the EDID virtualization. Another two GMBUS
+ * registers, GMBUS4 (interrupt mask) and GMBUS5 (2 byte indes register), are
+ * not considered here. Below describes the usage of GMBUS registers that are
+ * cared by the EDID virtualization
+ *
+ * GMBUS0:
+ * R/W
+ * port selection. value of bit0 - bit2 corresponds to the GPIO registers.
+ *
+ * GMBUS1:
+ * R/W Protect
+ * Command and Status.
+ * bit0 is the direction bit: 1 is read; 0 is write.
+ * bit1 - bit7 is slave 7-bit address.
+ * bit16 - bit24 total byte count (ignore?)
+ *
+ * GMBUS2:
+ * Most of bits are read only except bit 15 (IN_USE)
+ * Status register
+ * bit0 - bit8 current byte count
+ * bit 11: hardware ready;
+ *
+ * GMBUS3:
+ * Read/Write
+ * Data for transfer
+ */
+
+/* From hw specs, Other phases like START, ADDRESS, INDEX
+ * are invisible to GMBUS MMIO interface. So no definitions
+ * in below enum types
+ */
+enum gvt_gmbus_phase {
+ GMBUS_IDLE_PHASE = 0,
+ GMBUS_DATA_PHASE,
+ GMBUS_WAIT_PHASE,
+ //GMBUS_STOP_PHASE,
+ GMBUS_MAX_PHASE
+};
+
+struct intel_vgpu_i2c_gmbus {
+ unsigned int total_byte_count; /* from GMBUS1 */
+ enum gmbus_cycle_type cycle_type;
+ enum gvt_gmbus_phase phase;
+};
+
+struct intel_vgpu_i2c_aux_ch {
+ bool i2c_over_aux_ch;
+ bool aux_ch_mot;
+};
+
+enum i2c_state {
+ I2C_NOT_SPECIFIED = 0,
+ I2C_GMBUS = 1,
+ I2C_AUX_CH = 2
+};
+
+/* I2C sequences cannot interleave.
+ * GMBUS and AUX_CH sequences cannot interleave.
+ */
+struct intel_vgpu_i2c_edid {
+ enum i2c_state state;
+
+ unsigned int port;
+ bool slave_selected;
+ bool edid_available;
+ unsigned int current_edid_read;
+
+ struct intel_vgpu_i2c_gmbus gmbus;
+ struct intel_vgpu_i2c_aux_ch aux_ch;
+};
+
+void intel_vgpu_init_i2c_edid(struct intel_vgpu *vgpu);
+
+int intel_gvt_i2c_handle_gmbus_read(struct intel_vgpu *vgpu,
+ unsigned int offset, void *p_data, unsigned int bytes);
+
+int intel_gvt_i2c_handle_gmbus_write(struct intel_vgpu *vgpu,
+ unsigned int offset, void *p_data, unsigned int bytes);
+
+void intel_gvt_i2c_handle_aux_ch_write(struct intel_vgpu *vgpu,
+ int port_idx,
+ unsigned int offset,
+ void *p_data);
+
+#endif /*_GVT_EDID_H_*/
diff --git a/drivers/gpu/drm/i915/gvt/execlist.c b/drivers/gpu/drm/i915/gvt/execlist.c
new file mode 100644
index 000000000000..f32bb6f6495c
--- /dev/null
+++ b/drivers/gpu/drm/i915/gvt/execlist.c
@@ -0,0 +1,858 @@
+/*
+ * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * Authors:
+ * Zhiyuan Lv <zhiyuan.lv@intel.com>
+ * Zhi Wang <zhi.a.wang@intel.com>
+ *
+ * Contributors:
+ * Min He <min.he@intel.com>
+ * Bing Niu <bing.niu@intel.com>
+ * Ping Gao <ping.a.gao@intel.com>
+ * Tina Zhang <tina.zhang@intel.com>
+ *
+ */
+
+#include "i915_drv.h"
+#include "gvt.h"
+
+#define _EL_OFFSET_STATUS 0x234
+#define _EL_OFFSET_STATUS_BUF 0x370
+#define _EL_OFFSET_STATUS_PTR 0x3A0
+
+#define execlist_ring_mmio(gvt, ring_id, offset) \
+ (gvt->dev_priv->engine[ring_id]->mmio_base + (offset))
+
+#define valid_context(ctx) ((ctx)->valid)
+#define same_context(a, b) (((a)->context_id == (b)->context_id) && \
+ ((a)->lrca == (b)->lrca))
+
+static int context_switch_events[] = {
+ [RCS] = RCS_AS_CONTEXT_SWITCH,
+ [BCS] = BCS_AS_CONTEXT_SWITCH,
+ [VCS] = VCS_AS_CONTEXT_SWITCH,
+ [VCS2] = VCS2_AS_CONTEXT_SWITCH,
+ [VECS] = VECS_AS_CONTEXT_SWITCH,
+};
+
+static int ring_id_to_context_switch_event(int ring_id)
+{
+ if (WARN_ON(ring_id < RCS && ring_id >
+ ARRAY_SIZE(context_switch_events)))
+ return -EINVAL;
+
+ return context_switch_events[ring_id];
+}
+
+static void switch_virtual_execlist_slot(struct intel_vgpu_execlist *execlist)
+{
+ gvt_dbg_el("[before] running slot %d/context %x pending slot %d\n",
+ execlist->running_slot ?
+ execlist->running_slot->index : -1,
+ execlist->running_context ?
+ execlist->running_context->context_id : 0,
+ execlist->pending_slot ?
+ execlist->pending_slot->index : -1);
+
+ execlist->running_slot = execlist->pending_slot;
+ execlist->pending_slot = NULL;
+ execlist->running_context = execlist->running_context ?
+ &execlist->running_slot->ctx[0] : NULL;
+
+ gvt_dbg_el("[after] running slot %d/context %x pending slot %d\n",
+ execlist->running_slot ?
+ execlist->running_slot->index : -1,
+ execlist->running_context ?
+ execlist->running_context->context_id : 0,
+ execlist->pending_slot ?
+ execlist->pending_slot->index : -1);
+}
+
+static void emulate_execlist_status(struct intel_vgpu_execlist *execlist)
+{
+ struct intel_vgpu_execlist_slot *running = execlist->running_slot;
+ struct intel_vgpu_execlist_slot *pending = execlist->pending_slot;
+ struct execlist_ctx_descriptor_format *desc = execlist->running_context;
+ struct intel_vgpu *vgpu = execlist->vgpu;
+ struct execlist_status_format status;
+ int ring_id = execlist->ring_id;
+ u32 status_reg = execlist_ring_mmio(vgpu->gvt,
+ ring_id, _EL_OFFSET_STATUS);
+
+ status.ldw = vgpu_vreg(vgpu, status_reg);
+ status.udw = vgpu_vreg(vgpu, status_reg + 4);
+
+ if (running) {
+ status.current_execlist_pointer = !!running->index;
+ status.execlist_write_pointer = !!!running->index;
+ status.execlist_0_active = status.execlist_0_valid =
+ !!!(running->index);
+ status.execlist_1_active = status.execlist_1_valid =
+ !!(running->index);
+ } else {
+ status.context_id = 0;
+ status.execlist_0_active = status.execlist_0_valid = 0;
+ status.execlist_1_active = status.execlist_1_valid = 0;
+ }
+
+ status.context_id = desc ? desc->context_id : 0;
+ status.execlist_queue_full = !!(pending);
+
+ vgpu_vreg(vgpu, status_reg) = status.ldw;
+ vgpu_vreg(vgpu, status_reg + 4) = status.udw;
+
+ gvt_dbg_el("vgpu%d: status reg offset %x ldw %x udw %x\n",
+ vgpu->id, status_reg, status.ldw, status.udw);
+}
+
+static void emulate_csb_update(struct intel_vgpu_execlist *execlist,
+ struct execlist_context_status_format *status,
+ bool trigger_interrupt_later)
+{
+ struct intel_vgpu *vgpu = execlist->vgpu;
+ int ring_id = execlist->ring_id;
+ struct execlist_context_status_pointer_format ctx_status_ptr;
+ u32 write_pointer;
+ u32 ctx_status_ptr_reg, ctx_status_buf_reg, offset;
+
+ ctx_status_ptr_reg = execlist_ring_mmio(vgpu->gvt, ring_id,
+ _EL_OFFSET_STATUS_PTR);
+ ctx_status_buf_reg = execlist_ring_mmio(vgpu->gvt, ring_id,
+ _EL_OFFSET_STATUS_BUF);
+
+ ctx_status_ptr.dw = vgpu_vreg(vgpu, ctx_status_ptr_reg);
+
+ write_pointer = ctx_status_ptr.write_ptr;
+
+ if (write_pointer == 0x7)
+ write_pointer = 0;
+ else {
+ ++write_pointer;
+ write_pointer %= 0x6;
+ }
+
+ offset = ctx_status_buf_reg + write_pointer * 8;
+
+ vgpu_vreg(vgpu, offset) = status->ldw;
+ vgpu_vreg(vgpu, offset + 4) = status->udw;
+
+ ctx_status_ptr.write_ptr = write_pointer;
+ vgpu_vreg(vgpu, ctx_status_ptr_reg) = ctx_status_ptr.dw;
+
+ gvt_dbg_el("vgpu%d: w pointer %u reg %x csb l %x csb h %x\n",
+ vgpu->id, write_pointer, offset, status->ldw, status->udw);
+
+ if (trigger_interrupt_later)
+ return;
+
+ intel_vgpu_trigger_virtual_event(vgpu,
+ ring_id_to_context_switch_event(execlist->ring_id));
+}
+
+static int emulate_execlist_ctx_schedule_out(
+ struct intel_vgpu_execlist *execlist,
+ struct execlist_ctx_descriptor_format *ctx)
+{
+ struct intel_vgpu_execlist_slot *running = execlist->running_slot;
+ struct intel_vgpu_execlist_slot *pending = execlist->pending_slot;
+ struct execlist_ctx_descriptor_format *ctx0 = &running->ctx[0];
+ struct execlist_ctx_descriptor_format *ctx1 = &running->ctx[1];
+ struct execlist_context_status_format status;
+
+ memset(&status, 0, sizeof(status));
+
+ gvt_dbg_el("schedule out context id %x\n", ctx->context_id);
+
+ if (WARN_ON(!same_context(ctx, execlist->running_context))) {
+ gvt_err("schedule out context is not running context,"
+ "ctx id %x running ctx id %x\n",
+ ctx->context_id,
+ execlist->running_context->context_id);
+ return -EINVAL;
+ }
+
+ /* ctx1 is valid, ctx0/ctx is scheduled-out -> element switch */
+ if (valid_context(ctx1) && same_context(ctx0, ctx)) {
+ gvt_dbg_el("ctx 1 valid, ctx/ctx 0 is scheduled-out\n");
+
+ execlist->running_context = ctx1;
+
+ emulate_execlist_status(execlist);
+
+ status.context_complete = status.element_switch = 1;
+ status.context_id = ctx->context_id;
+
+ emulate_csb_update(execlist, &status, false);
+ /*
+ * ctx1 is not valid, ctx == ctx0
+ * ctx1 is valid, ctx1 == ctx
+ * --> last element is finished
+ * emulate:
+ * active-to-idle if there is *no* pending execlist
+ * context-complete if there *is* pending execlist
+ */
+ } else if ((!valid_context(ctx1) && same_context(ctx0, ctx))
+ || (valid_context(ctx1) && same_context(ctx1, ctx))) {
+ gvt_dbg_el("need to switch virtual execlist slot\n");
+
+ switch_virtual_execlist_slot(execlist);
+
+ emulate_execlist_status(execlist);
+
+ status.context_complete = status.active_to_idle = 1;
+ status.context_id = ctx->context_id;
+
+ if (!pending) {
+ emulate_csb_update(execlist, &status, false);
+ } else {
+ emulate_csb_update(execlist, &status, true);
+
+ memset(&status, 0, sizeof(status));
+
+ status.idle_to_active = 1;
+ status.context_id = 0;
+
+ emulate_csb_update(execlist, &status, false);
+ }
+ } else {
+ WARN_ON(1);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static struct intel_vgpu_execlist_slot *get_next_execlist_slot(
+ struct intel_vgpu_execlist *execlist)
+{
+ struct intel_vgpu *vgpu = execlist->vgpu;
+ int ring_id = execlist->ring_id;
+ u32 status_reg = execlist_ring_mmio(vgpu->gvt, ring_id,
+ _EL_OFFSET_STATUS);
+ struct execlist_status_format status;
+
+ status.ldw = vgpu_vreg(vgpu, status_reg);
+ status.udw = vgpu_vreg(vgpu, status_reg + 4);
+
+ if (status.execlist_queue_full) {
+ gvt_err("virtual execlist slots are full\n");
+ return NULL;
+ }
+
+ return &execlist->slot[status.execlist_write_pointer];
+}
+
+static int emulate_execlist_schedule_in(struct intel_vgpu_execlist *execlist,
+ struct execlist_ctx_descriptor_format ctx[2])
+{
+ struct intel_vgpu_execlist_slot *running = execlist->running_slot;
+ struct intel_vgpu_execlist_slot *slot =
+ get_next_execlist_slot(execlist);
+
+ struct execlist_ctx_descriptor_format *ctx0, *ctx1;
+ struct execlist_context_status_format status;
+
+ gvt_dbg_el("emulate schedule-in\n");
+
+ if (!slot) {
+ gvt_err("no available execlist slot\n");
+ return -EINVAL;
+ }
+
+ memset(&status, 0, sizeof(status));
+ memset(slot->ctx, 0, sizeof(slot->ctx));
+
+ slot->ctx[0] = ctx[0];
+ slot->ctx[1] = ctx[1];
+
+ gvt_dbg_el("alloc slot index %d ctx 0 %x ctx 1 %x\n",
+ slot->index, ctx[0].context_id,
+ ctx[1].context_id);
+
+ /*
+ * no running execlist, make this write bundle as running execlist
+ * -> idle-to-active
+ */
+ if (!running) {
+ gvt_dbg_el("no current running execlist\n");
+
+ execlist->running_slot = slot;
+ execlist->pending_slot = NULL;
+ execlist->running_context = &slot->ctx[0];
+
+ gvt_dbg_el("running slot index %d running context %x\n",
+ execlist->running_slot->index,
+ execlist->running_context->context_id);
+
+ emulate_execlist_status(execlist);
+
+ status.idle_to_active = 1;
+ status.context_id = 0;
+
+ emulate_csb_update(execlist, &status, false);
+ return 0;
+ }
+
+ ctx0 = &running->ctx[0];
+ ctx1 = &running->ctx[1];
+
+ gvt_dbg_el("current running slot index %d ctx 0 %x ctx 1 %x\n",
+ running->index, ctx0->context_id, ctx1->context_id);
+
+ /*
+ * already has an running execlist
+ * a. running ctx1 is valid,
+ * ctx0 is finished, and running ctx1 == new execlist ctx[0]
+ * b. running ctx1 is not valid,
+ * ctx0 == new execlist ctx[0]
+ * ----> lite-restore + preempted
+ */
+ if ((valid_context(ctx1) && same_context(ctx1, &slot->ctx[0]) &&
+ /* condition a */
+ (!same_context(ctx0, execlist->running_context))) ||
+ (!valid_context(ctx1) &&
+ same_context(ctx0, &slot->ctx[0]))) { /* condition b */
+ gvt_dbg_el("need to switch virtual execlist slot\n");
+
+ execlist->pending_slot = slot;
+ switch_virtual_execlist_slot(execlist);
+
+ emulate_execlist_status(execlist);
+
+ status.lite_restore = status.preempted = 1;
+ status.context_id = ctx[0].context_id;
+
+ emulate_csb_update(execlist, &status, false);
+ } else {
+ gvt_dbg_el("emulate as pending slot\n");
+ /*
+ * otherwise
+ * --> emulate pending execlist exist + but no preemption case
+ */
+ execlist->pending_slot = slot;
+ emulate_execlist_status(execlist);
+ }
+ return 0;
+}
+
+static void free_workload(struct intel_vgpu_workload *workload)
+{
+ intel_vgpu_unpin_mm(workload->shadow_mm);
+ intel_gvt_mm_unreference(workload->shadow_mm);
+ kmem_cache_free(workload->vgpu->workloads, workload);
+}
+
+#define get_desc_from_elsp_dwords(ed, i) \
+ ((struct execlist_ctx_descriptor_format *)&((ed)->data[i * 2]))
+
+
+#define BATCH_BUFFER_ADDR_MASK ((1UL << 32) - (1U << 2))
+#define BATCH_BUFFER_ADDR_HIGH_MASK ((1UL << 16) - (1U))
+static int set_gma_to_bb_cmd(struct intel_shadow_bb_entry *entry_obj,
+ unsigned long add, int gmadr_bytes)
+{
+ if (WARN_ON(gmadr_bytes != 4 && gmadr_bytes != 8))
+ return -1;
+
+ *((u32 *)(entry_obj->bb_start_cmd_va + (1 << 2))) = add &
+ BATCH_BUFFER_ADDR_MASK;
+ if (gmadr_bytes == 8) {
+ *((u32 *)(entry_obj->bb_start_cmd_va + (2 << 2))) =
+ add & BATCH_BUFFER_ADDR_HIGH_MASK;
+ }
+
+ return 0;
+}
+
+static void prepare_shadow_batch_buffer(struct intel_vgpu_workload *workload)
+{
+ int gmadr_bytes = workload->vgpu->gvt->device_info.gmadr_bytes_in_cmd;
+
+ /* pin the gem object to ggtt */
+ if (!list_empty(&workload->shadow_bb)) {
+ struct intel_shadow_bb_entry *entry_obj =
+ list_first_entry(&workload->shadow_bb,
+ struct intel_shadow_bb_entry,
+ list);
+ struct intel_shadow_bb_entry *temp;
+
+ list_for_each_entry_safe(entry_obj, temp, &workload->shadow_bb,
+ list) {
+ struct i915_vma *vma;
+
+ vma = i915_gem_object_ggtt_pin(entry_obj->obj, NULL, 0,
+ 4, 0);
+ if (IS_ERR(vma)) {
+ gvt_err("Cannot pin\n");
+ return;
+ }
+
+ /* FIXME: we are not tracking our pinned VMA leaving it
+ * up to the core to fix up the stray pin_count upon
+ * free.
+ */
+
+ /* update the relocate gma with shadow batch buffer*/
+ set_gma_to_bb_cmd(entry_obj,
+ i915_ggtt_offset(vma),
+ gmadr_bytes);
+ }
+ }
+}
+
+static int update_wa_ctx_2_shadow_ctx(struct intel_shadow_wa_ctx *wa_ctx)
+{
+ int ring_id = wa_ctx->workload->ring_id;
+ struct i915_gem_context *shadow_ctx =
+ wa_ctx->workload->vgpu->shadow_ctx;
+ struct drm_i915_gem_object *ctx_obj =
+ shadow_ctx->engine[ring_id].state->obj;
+ struct execlist_ring_context *shadow_ring_context;
+ struct page *page;
+
+ page = i915_gem_object_get_page(ctx_obj, LRC_STATE_PN);
+ shadow_ring_context = kmap_atomic(page);
+
+ shadow_ring_context->bb_per_ctx_ptr.val =
+ (shadow_ring_context->bb_per_ctx_ptr.val &
+ (~PER_CTX_ADDR_MASK)) | wa_ctx->per_ctx.shadow_gma;
+ shadow_ring_context->rcs_indirect_ctx.val =
+ (shadow_ring_context->rcs_indirect_ctx.val &
+ (~INDIRECT_CTX_ADDR_MASK)) | wa_ctx->indirect_ctx.shadow_gma;
+
+ kunmap_atomic(shadow_ring_context);
+ return 0;
+}
+
+static void prepare_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
+{
+ struct i915_vma *vma;
+ unsigned char *per_ctx_va =
+ (unsigned char *)wa_ctx->indirect_ctx.shadow_va +
+ wa_ctx->indirect_ctx.size;
+
+ if (wa_ctx->indirect_ctx.size == 0)
+ return;
+
+ vma = i915_gem_object_ggtt_pin(wa_ctx->indirect_ctx.obj, NULL,
+ 0, CACHELINE_BYTES, 0);
+ if (IS_ERR(vma)) {
+ gvt_err("Cannot pin indirect ctx obj\n");
+ return;
+ }
+
+ /* FIXME: we are not tracking our pinned VMA leaving it
+ * up to the core to fix up the stray pin_count upon
+ * free.
+ */
+
+ wa_ctx->indirect_ctx.shadow_gma = i915_ggtt_offset(vma);
+
+ wa_ctx->per_ctx.shadow_gma = *((unsigned int *)per_ctx_va + 1);
+ memset(per_ctx_va, 0, CACHELINE_BYTES);
+
+ update_wa_ctx_2_shadow_ctx(wa_ctx);
+}
+
+static int prepare_execlist_workload(struct intel_vgpu_workload *workload)
+{
+ struct intel_vgpu *vgpu = workload->vgpu;
+ struct execlist_ctx_descriptor_format ctx[2];
+ int ring_id = workload->ring_id;
+
+ intel_vgpu_pin_mm(workload->shadow_mm);
+ intel_vgpu_sync_oos_pages(workload->vgpu);
+ intel_vgpu_flush_post_shadow(workload->vgpu);
+ prepare_shadow_batch_buffer(workload);
+ prepare_shadow_wa_ctx(&workload->wa_ctx);
+ if (!workload->emulate_schedule_in)
+ return 0;
+
+ ctx[0] = *get_desc_from_elsp_dwords(&workload->elsp_dwords, 1);
+ ctx[1] = *get_desc_from_elsp_dwords(&workload->elsp_dwords, 0);
+
+ return emulate_execlist_schedule_in(&vgpu->execlist[ring_id], ctx);
+}
+
+static void release_shadow_batch_buffer(struct intel_vgpu_workload *workload)
+{
+ /* release all the shadow batch buffer */
+ if (!list_empty(&workload->shadow_bb)) {
+ struct intel_shadow_bb_entry *entry_obj =
+ list_first_entry(&workload->shadow_bb,
+ struct intel_shadow_bb_entry,
+ list);
+ struct intel_shadow_bb_entry *temp;
+
+ list_for_each_entry_safe(entry_obj, temp, &workload->shadow_bb,
+ list) {
+ i915_gem_object_unpin_map(entry_obj->obj);
+ i915_gem_object_put(entry_obj->obj);
+ list_del(&entry_obj->list);
+ kfree(entry_obj);
+ }
+ }
+}
+
+static void release_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
+{
+ if (wa_ctx->indirect_ctx.size == 0)
+ return;
+
+ i915_gem_object_unpin_map(wa_ctx->indirect_ctx.obj);
+ i915_gem_object_put(wa_ctx->indirect_ctx.obj);
+}
+
+static int complete_execlist_workload(struct intel_vgpu_workload *workload)
+{
+ struct intel_vgpu *vgpu = workload->vgpu;
+ struct intel_vgpu_execlist *execlist =
+ &vgpu->execlist[workload->ring_id];
+ struct intel_vgpu_workload *next_workload;
+ struct list_head *next = workload_q_head(vgpu, workload->ring_id)->next;
+ bool lite_restore = false;
+ int ret;
+
+ gvt_dbg_el("complete workload %p status %d\n", workload,
+ workload->status);
+
+ release_shadow_batch_buffer(workload);
+ release_shadow_wa_ctx(&workload->wa_ctx);
+
+ if (workload->status || vgpu->resetting)
+ goto out;
+
+ if (!list_empty(workload_q_head(vgpu, workload->ring_id))) {
+ struct execlist_ctx_descriptor_format *this_desc, *next_desc;
+
+ next_workload = container_of(next,
+ struct intel_vgpu_workload, list);
+ this_desc = &workload->ctx_desc;
+ next_desc = &next_workload->ctx_desc;
+
+ lite_restore = same_context(this_desc, next_desc);
+ }
+
+ if (lite_restore) {
+ gvt_dbg_el("next context == current - no schedule-out\n");
+ free_workload(workload);
+ return 0;
+ }
+
+ ret = emulate_execlist_ctx_schedule_out(execlist, &workload->ctx_desc);
+ if (ret)
+ goto err;
+out:
+ free_workload(workload);
+ return 0;
+err:
+ free_workload(workload);
+ return ret;
+}
+
+#define RING_CTX_OFF(x) \
+ offsetof(struct execlist_ring_context, x)
+
+static void read_guest_pdps(struct intel_vgpu *vgpu,
+ u64 ring_context_gpa, u32 pdp[8])
+{
+ u64 gpa;
+ int i;
+
+ gpa = ring_context_gpa + RING_CTX_OFF(pdp3_UDW.val);
+
+ for (i = 0; i < 8; i++)
+ intel_gvt_hypervisor_read_gpa(vgpu,
+ gpa + i * 8, &pdp[7 - i], 4);
+}
+
+static int prepare_mm(struct intel_vgpu_workload *workload)
+{
+ struct execlist_ctx_descriptor_format *desc = &workload->ctx_desc;
+ struct intel_vgpu_mm *mm;
+ int page_table_level;
+ u32 pdp[8];
+
+ if (desc->addressing_mode == 1) { /* legacy 32-bit */
+ page_table_level = 3;
+ } else if (desc->addressing_mode == 3) { /* legacy 64 bit */
+ page_table_level = 4;
+ } else {
+ gvt_err("Advanced Context mode(SVM) is not supported!\n");
+ return -EINVAL;
+ }
+
+ read_guest_pdps(workload->vgpu, workload->ring_context_gpa, pdp);
+
+ mm = intel_vgpu_find_ppgtt_mm(workload->vgpu, page_table_level, pdp);
+ if (mm) {
+ intel_gvt_mm_reference(mm);
+ } else {
+
+ mm = intel_vgpu_create_mm(workload->vgpu, INTEL_GVT_MM_PPGTT,
+ pdp, page_table_level, 0);
+ if (IS_ERR(mm)) {
+ gvt_err("fail to create mm object.\n");
+ return PTR_ERR(mm);
+ }
+ }
+ workload->shadow_mm = mm;
+ return 0;
+}
+
+#define get_last_workload(q) \
+ (list_empty(q) ? NULL : container_of(q->prev, \
+ struct intel_vgpu_workload, list))
+
+static int submit_context(struct intel_vgpu *vgpu, int ring_id,
+ struct execlist_ctx_descriptor_format *desc,
+ bool emulate_schedule_in)
+{
+ struct list_head *q = workload_q_head(vgpu, ring_id);
+ struct intel_vgpu_workload *last_workload = get_last_workload(q);
+ struct intel_vgpu_workload *workload = NULL;
+ u64 ring_context_gpa;
+ u32 head, tail, start, ctl, ctx_ctl, per_ctx, indirect_ctx;
+ int ret;
+
+ ring_context_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm,
+ (u32)((desc->lrca + 1) << GTT_PAGE_SHIFT));
+ if (ring_context_gpa == INTEL_GVT_INVALID_ADDR) {
+ gvt_err("invalid guest context LRCA: %x\n", desc->lrca);
+ return -EINVAL;
+ }
+
+ intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa +
+ RING_CTX_OFF(ring_header.val), &head, 4);
+
+ intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa +
+ RING_CTX_OFF(ring_tail.val), &tail, 4);
+
+ head &= RB_HEAD_OFF_MASK;
+ tail &= RB_TAIL_OFF_MASK;
+
+ if (last_workload && same_context(&last_workload->ctx_desc, desc)) {
+ gvt_dbg_el("ring id %d cur workload == last\n", ring_id);
+ gvt_dbg_el("ctx head %x real head %lx\n", head,
+ last_workload->rb_tail);
+ /*
+ * cannot use guest context head pointer here,
+ * as it might not be updated at this time
+ */
+ head = last_workload->rb_tail;
+ }
+
+ gvt_dbg_el("ring id %d begin a new workload\n", ring_id);
+
+ workload = kmem_cache_zalloc(vgpu->workloads, GFP_KERNEL);
+ if (!workload)
+ return -ENOMEM;
+
+ /* record some ring buffer register values for scan and shadow */
+ intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa +
+ RING_CTX_OFF(rb_start.val), &start, 4);
+ intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa +
+ RING_CTX_OFF(rb_ctrl.val), &ctl, 4);
+ intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa +
+ RING_CTX_OFF(ctx_ctrl.val), &ctx_ctl, 4);
+
+ INIT_LIST_HEAD(&workload->list);
+ INIT_LIST_HEAD(&workload->shadow_bb);
+
+ init_waitqueue_head(&workload->shadow_ctx_status_wq);
+ atomic_set(&workload->shadow_ctx_active, 0);
+
+ workload->vgpu = vgpu;
+ workload->ring_id = ring_id;
+ workload->ctx_desc = *desc;
+ workload->ring_context_gpa = ring_context_gpa;
+ workload->rb_head = head;
+ workload->rb_tail = tail;
+ workload->rb_start = start;
+ workload->rb_ctl = ctl;
+ workload->prepare = prepare_execlist_workload;
+ workload->complete = complete_execlist_workload;
+ workload->status = -EINPROGRESS;
+ workload->emulate_schedule_in = emulate_schedule_in;
+
+ if (ring_id == RCS) {
+ intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa +
+ RING_CTX_OFF(bb_per_ctx_ptr.val), &per_ctx, 4);
+ intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa +
+ RING_CTX_OFF(rcs_indirect_ctx.val), &indirect_ctx, 4);
+
+ workload->wa_ctx.indirect_ctx.guest_gma =
+ indirect_ctx & INDIRECT_CTX_ADDR_MASK;
+ workload->wa_ctx.indirect_ctx.size =
+ (indirect_ctx & INDIRECT_CTX_SIZE_MASK) *
+ CACHELINE_BYTES;
+ workload->wa_ctx.per_ctx.guest_gma =
+ per_ctx & PER_CTX_ADDR_MASK;
+ workload->wa_ctx.workload = workload;
+
+ WARN_ON(workload->wa_ctx.indirect_ctx.size && !(per_ctx & 0x1));
+ }
+
+ if (emulate_schedule_in)
+ memcpy(&workload->elsp_dwords,
+ &vgpu->execlist[ring_id].elsp_dwords,
+ sizeof(workload->elsp_dwords));
+
+ gvt_dbg_el("workload %p ring id %d head %x tail %x start %x ctl %x\n",
+ workload, ring_id, head, tail, start, ctl);
+
+ gvt_dbg_el("workload %p emulate schedule_in %d\n", workload,
+ emulate_schedule_in);
+
+ ret = prepare_mm(workload);
+ if (ret) {
+ kmem_cache_free(vgpu->workloads, workload);
+ return ret;
+ }
+
+ queue_workload(workload);
+ return 0;
+}
+
+int intel_vgpu_submit_execlist(struct intel_vgpu *vgpu, int ring_id)
+{
+ struct intel_vgpu_execlist *execlist = &vgpu->execlist[ring_id];
+ struct execlist_ctx_descriptor_format *desc[2], valid_desc[2];
+ unsigned long valid_desc_bitmap = 0;
+ bool emulate_schedule_in = true;
+ int ret;
+ int i;
+
+ memset(valid_desc, 0, sizeof(valid_desc));
+
+ desc[0] = get_desc_from_elsp_dwords(&execlist->elsp_dwords, 1);
+ desc[1] = get_desc_from_elsp_dwords(&execlist->elsp_dwords, 0);
+
+ for (i = 0; i < 2; i++) {
+ if (!desc[i]->valid)
+ continue;
+
+ if (!desc[i]->privilege_access) {
+ gvt_err("vgpu%d: unexpected GGTT elsp submission\n",
+ vgpu->id);
+ return -EINVAL;
+ }
+
+ /* TODO: add another guest context checks here. */
+ set_bit(i, &valid_desc_bitmap);
+ valid_desc[i] = *desc[i];
+ }
+
+ if (!valid_desc_bitmap) {
+ gvt_err("vgpu%d: no valid desc in a elsp submission\n",
+ vgpu->id);
+ return -EINVAL;
+ }
+
+ if (!test_bit(0, (void *)&valid_desc_bitmap) &&
+ test_bit(1, (void *)&valid_desc_bitmap)) {
+ gvt_err("vgpu%d: weird elsp submission, desc 0 is not valid\n",
+ vgpu->id);
+ return -EINVAL;
+ }
+
+ /* submit workload */
+ for_each_set_bit(i, (void *)&valid_desc_bitmap, 2) {
+ ret = submit_context(vgpu, ring_id, &valid_desc[i],
+ emulate_schedule_in);
+ if (ret) {
+ gvt_err("vgpu%d: fail to schedule workload\n",
+ vgpu->id);
+ return ret;
+ }
+ emulate_schedule_in = false;
+ }
+ return 0;
+}
+
+static void init_vgpu_execlist(struct intel_vgpu *vgpu, int ring_id)
+{
+ struct intel_vgpu_execlist *execlist = &vgpu->execlist[ring_id];
+ struct execlist_context_status_pointer_format ctx_status_ptr;
+ u32 ctx_status_ptr_reg;
+
+ memset(execlist, 0, sizeof(*execlist));
+
+ execlist->vgpu = vgpu;
+ execlist->ring_id = ring_id;
+ execlist->slot[0].index = 0;
+ execlist->slot[1].index = 1;
+
+ ctx_status_ptr_reg = execlist_ring_mmio(vgpu->gvt, ring_id,
+ _EL_OFFSET_STATUS_PTR);
+
+ ctx_status_ptr.dw = vgpu_vreg(vgpu, ctx_status_ptr_reg);
+ ctx_status_ptr.read_ptr = ctx_status_ptr.write_ptr = 0x7;
+ vgpu_vreg(vgpu, ctx_status_ptr_reg) = ctx_status_ptr.dw;
+}
+
+void intel_vgpu_clean_execlist(struct intel_vgpu *vgpu)
+{
+ kmem_cache_destroy(vgpu->workloads);
+}
+
+int intel_vgpu_init_execlist(struct intel_vgpu *vgpu)
+{
+ enum intel_engine_id i;
+ struct intel_engine_cs *engine;
+
+ /* each ring has a virtual execlist engine */
+ for_each_engine(engine, vgpu->gvt->dev_priv, i) {
+ init_vgpu_execlist(vgpu, i);
+ INIT_LIST_HEAD(&vgpu->workload_q_head[i]);
+ }
+
+ vgpu->workloads = kmem_cache_create("gvt-g vgpu workload",
+ sizeof(struct intel_vgpu_workload), 0,
+ SLAB_HWCACHE_ALIGN,
+ NULL);
+
+ if (!vgpu->workloads)
+ return -ENOMEM;
+
+ return 0;
+}
+
+void intel_vgpu_reset_execlist(struct intel_vgpu *vgpu,
+ unsigned long engine_mask)
+{
+ struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
+ struct intel_engine_cs *engine;
+ struct intel_vgpu_workload *pos, *n;
+ unsigned int tmp;
+
+ for_each_engine_masked(engine, dev_priv, engine_mask, tmp) {
+ /* free the unsubmited workload in the queue */
+ list_for_each_entry_safe(pos, n,
+ &vgpu->workload_q_head[engine->id], list) {
+ list_del_init(&pos->list);
+ free_workload(pos);
+ }
+
+ init_vgpu_execlist(vgpu, engine->id);
+ }
+}
diff --git a/drivers/gpu/drm/i915/gvt/execlist.h b/drivers/gpu/drm/i915/gvt/execlist.h
new file mode 100644
index 000000000000..7eced40a1e30
--- /dev/null
+++ b/drivers/gpu/drm/i915/gvt/execlist.h
@@ -0,0 +1,188 @@
+/*
+ * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * Authors:
+ * Zhiyuan Lv <zhiyuan.lv@intel.com>
+ * Zhi Wang <zhi.a.wang@intel.com>
+ *
+ * Contributors:
+ * Min He <min.he@intel.com>
+ * Bing Niu <bing.niu@intel.com>
+ * Ping Gao <ping.a.gao@intel.com>
+ * Tina Zhang <tina.zhang@intel.com>
+ *
+ */
+
+#ifndef _GVT_EXECLIST_H_
+#define _GVT_EXECLIST_H_
+
+struct execlist_ctx_descriptor_format {
+ union {
+ u32 udw;
+ u32 context_id;
+ };
+ union {
+ u32 ldw;
+ struct {
+ u32 valid : 1;
+ u32 force_pd_restore : 1;
+ u32 force_restore : 1;
+ u32 addressing_mode : 2;
+ u32 llc_coherency : 1;
+ u32 fault_handling : 2;
+ u32 privilege_access : 1;
+ u32 reserved : 3;
+ u32 lrca : 20;
+ };
+ };
+};
+
+struct execlist_status_format {
+ union {
+ u32 ldw;
+ struct {
+ u32 current_execlist_pointer :1;
+ u32 execlist_write_pointer :1;
+ u32 execlist_queue_full :1;
+ u32 execlist_1_valid :1;
+ u32 execlist_0_valid :1;
+ u32 last_ctx_switch_reason :9;
+ u32 current_active_elm_status :2;
+ u32 arbitration_enable :1;
+ u32 execlist_1_active :1;
+ u32 execlist_0_active :1;
+ u32 reserved :13;
+ };
+ };
+ union {
+ u32 udw;
+ u32 context_id;
+ };
+};
+
+struct execlist_context_status_pointer_format {
+ union {
+ u32 dw;
+ struct {
+ u32 write_ptr :3;
+ u32 reserved :5;
+ u32 read_ptr :3;
+ u32 reserved2 :5;
+ u32 mask :16;
+ };
+ };
+};
+
+struct execlist_context_status_format {
+ union {
+ u32 ldw;
+ struct {
+ u32 idle_to_active :1;
+ u32 preempted :1;
+ u32 element_switch :1;
+ u32 active_to_idle :1;
+ u32 context_complete :1;
+ u32 wait_on_sync_flip :1;
+ u32 wait_on_vblank :1;
+ u32 wait_on_semaphore :1;
+ u32 wait_on_scanline :1;
+ u32 reserved :2;
+ u32 semaphore_wait_mode :1;
+ u32 display_plane :3;
+ u32 lite_restore :1;
+ u32 reserved_2 :16;
+ };
+ };
+ union {
+ u32 udw;
+ u32 context_id;
+ };
+};
+
+struct execlist_mmio_pair {
+ u32 addr;
+ u32 val;
+};
+
+/* The first 52 dwords in register state context */
+struct execlist_ring_context {
+ u32 nop1;
+ u32 lri_cmd_1;
+ struct execlist_mmio_pair ctx_ctrl;
+ struct execlist_mmio_pair ring_header;
+ struct execlist_mmio_pair ring_tail;
+ struct execlist_mmio_pair rb_start;
+ struct execlist_mmio_pair rb_ctrl;
+ struct execlist_mmio_pair bb_cur_head_UDW;
+ struct execlist_mmio_pair bb_cur_head_LDW;
+ struct execlist_mmio_pair bb_state;
+ struct execlist_mmio_pair second_bb_addr_UDW;
+ struct execlist_mmio_pair second_bb_addr_LDW;
+ struct execlist_mmio_pair second_bb_state;
+ struct execlist_mmio_pair bb_per_ctx_ptr;
+ struct execlist_mmio_pair rcs_indirect_ctx;
+ struct execlist_mmio_pair rcs_indirect_ctx_offset;
+ u32 nop2;
+ u32 nop3;
+ u32 nop4;
+ u32 lri_cmd_2;
+ struct execlist_mmio_pair ctx_timestamp;
+ struct execlist_mmio_pair pdp3_UDW;
+ struct execlist_mmio_pair pdp3_LDW;
+ struct execlist_mmio_pair pdp2_UDW;
+ struct execlist_mmio_pair pdp2_LDW;
+ struct execlist_mmio_pair pdp1_UDW;
+ struct execlist_mmio_pair pdp1_LDW;
+ struct execlist_mmio_pair pdp0_UDW;
+ struct execlist_mmio_pair pdp0_LDW;
+};
+
+struct intel_vgpu_elsp_dwords {
+ u32 data[4];
+ u32 index;
+};
+
+struct intel_vgpu_execlist_slot {
+ struct execlist_ctx_descriptor_format ctx[2];
+ u32 index;
+};
+
+struct intel_vgpu_execlist {
+ struct intel_vgpu_execlist_slot slot[2];
+ struct intel_vgpu_execlist_slot *running_slot;
+ struct intel_vgpu_execlist_slot *pending_slot;
+ struct execlist_ctx_descriptor_format *running_context;
+ int ring_id;
+ struct intel_vgpu *vgpu;
+ struct intel_vgpu_elsp_dwords elsp_dwords;
+};
+
+void intel_vgpu_clean_execlist(struct intel_vgpu *vgpu);
+
+int intel_vgpu_init_execlist(struct intel_vgpu *vgpu);
+
+int intel_vgpu_submit_execlist(struct intel_vgpu *vgpu, int ring_id);
+
+void intel_vgpu_reset_execlist(struct intel_vgpu *vgpu,
+ unsigned long engine_mask);
+
+#endif /*_GVT_EXECLIST_H_*/
diff --git a/drivers/gpu/drm/i915/gvt/firmware.c b/drivers/gpu/drm/i915/gvt/firmware.c
new file mode 100644
index 000000000000..2fae2a2ca96f
--- /dev/null
+++ b/drivers/gpu/drm/i915/gvt/firmware.c
@@ -0,0 +1,312 @@
+/*
+ * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * Authors:
+ * Zhi Wang <zhi.a.wang@intel.com>
+ *
+ * Contributors:
+ * Changbin Du <changbin.du@intel.com>
+ *
+ */
+
+#include <linux/firmware.h>
+#include <linux/crc32.h>
+
+#include "i915_drv.h"
+#include "gvt.h"
+#include "i915_pvinfo.h"
+
+#define FIRMWARE_VERSION (0x0)
+
+struct gvt_firmware_header {
+ u64 magic;
+ u32 crc32; /* protect the data after this field */
+ u32 version;
+ u64 cfg_space_size;
+ u64 cfg_space_offset; /* offset in the file */
+ u64 mmio_size;
+ u64 mmio_offset; /* offset in the file */
+ unsigned char data[1];
+};
+
+#define RD(offset) (readl(mmio + offset.reg))
+#define WR(v, offset) (writel(v, mmio + offset.reg))
+
+static void bdw_forcewake_get(void __iomem *mmio)
+{
+ WR(_MASKED_BIT_DISABLE(0xffff), FORCEWAKE_MT);
+
+ RD(ECOBUS);
+
+ if (wait_for((RD(FORCEWAKE_ACK_HSW) & FORCEWAKE_KERNEL) == 0, 50))
+ gvt_err("fail to wait forcewake idle\n");
+
+ WR(_MASKED_BIT_ENABLE(FORCEWAKE_KERNEL), FORCEWAKE_MT);
+
+ if (wait_for((RD(FORCEWAKE_ACK_HSW) & FORCEWAKE_KERNEL), 50))
+ gvt_err("fail to wait forcewake ack\n");
+
+ if (wait_for((RD(GEN6_GT_THREAD_STATUS_REG) &
+ GEN6_GT_THREAD_STATUS_CORE_MASK) == 0, 50))
+ gvt_err("fail to wait c0 wake up\n");
+}
+
+#undef RD
+#undef WR
+
+#define dev_to_drm_minor(d) dev_get_drvdata((d))
+
+static ssize_t
+gvt_firmware_read(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *attr, char *buf,
+ loff_t offset, size_t count)
+{
+ memcpy(buf, attr->private + offset, count);
+ return count;
+}
+
+static struct bin_attribute firmware_attr = {
+ .attr = {.name = "gvt_firmware", .mode = (S_IRUSR)},
+ .read = gvt_firmware_read,
+ .write = NULL,
+ .mmap = NULL,
+};
+
+static int expose_firmware_sysfs(struct intel_gvt *gvt,
+ void __iomem *mmio)
+{
+ struct intel_gvt_device_info *info = &gvt->device_info;
+ struct pci_dev *pdev = gvt->dev_priv->drm.pdev;
+ struct intel_gvt_mmio_info *e;
+ struct gvt_firmware_header *h;
+ void *firmware;
+ void *p;
+ unsigned long size;
+ int i;
+ int ret;
+
+ size = sizeof(*h) + info->mmio_size + info->cfg_space_size - 1;
+ firmware = vmalloc(size);
+ if (!firmware)
+ return -ENOMEM;
+
+ h = firmware;
+
+ h->magic = VGT_MAGIC;
+ h->version = FIRMWARE_VERSION;
+ h->cfg_space_size = info->cfg_space_size;
+ h->cfg_space_offset = offsetof(struct gvt_firmware_header, data);
+ h->mmio_size = info->mmio_size;
+ h->mmio_offset = h->cfg_space_offset + h->cfg_space_size;
+
+ p = firmware + h->cfg_space_offset;
+
+ for (i = 0; i < h->cfg_space_size; i += 4)
+ pci_read_config_dword(pdev, i, p + i);
+
+ memcpy(gvt->firmware.cfg_space, p, info->cfg_space_size);
+
+ p = firmware + h->mmio_offset;
+
+ hash_for_each(gvt->mmio.mmio_info_table, i, e, node) {
+ int j;
+
+ for (j = 0; j < e->length; j += 4)
+ *(u32 *)(p + e->offset + j) =
+ readl(mmio + e->offset + j);
+ }
+
+ memcpy(gvt->firmware.mmio, p, info->mmio_size);
+
+ firmware_attr.size = size;
+ firmware_attr.private = firmware;
+
+ ret = device_create_bin_file(&pdev->dev, &firmware_attr);
+ if (ret) {
+ vfree(firmware);
+ return ret;
+ }
+ return 0;
+}
+
+static void clean_firmware_sysfs(struct intel_gvt *gvt)
+{
+ struct pci_dev *pdev = gvt->dev_priv->drm.pdev;
+
+ device_remove_bin_file(&pdev->dev, &firmware_attr);
+ vfree(firmware_attr.private);
+}
+
+/**
+ * intel_gvt_free_firmware - free GVT firmware
+ * @gvt: intel gvt device
+ *
+ */
+void intel_gvt_free_firmware(struct intel_gvt *gvt)
+{
+ if (!gvt->firmware.firmware_loaded)
+ clean_firmware_sysfs(gvt);
+
+ kfree(gvt->firmware.cfg_space);
+ kfree(gvt->firmware.mmio);
+}
+
+static int verify_firmware(struct intel_gvt *gvt,
+ const struct firmware *fw)
+{
+ struct intel_gvt_device_info *info = &gvt->device_info;
+ struct drm_i915_private *dev_priv = gvt->dev_priv;
+ struct pci_dev *pdev = dev_priv->drm.pdev;
+ struct gvt_firmware_header *h;
+ unsigned long id, crc32_start;
+ const void *mem;
+ const char *item;
+ u64 file, request;
+
+ h = (struct gvt_firmware_header *)fw->data;
+
+ crc32_start = offsetof(struct gvt_firmware_header, crc32) + 4;
+ mem = fw->data + crc32_start;
+
+#define VERIFY(s, a, b) do { \
+ item = (s); file = (u64)(a); request = (u64)(b); \
+ if ((a) != (b)) \
+ goto invalid_firmware; \
+} while (0)
+
+ VERIFY("magic number", h->magic, VGT_MAGIC);
+ VERIFY("version", h->version, FIRMWARE_VERSION);
+ VERIFY("crc32", h->crc32, crc32_le(0, mem, fw->size - crc32_start));
+ VERIFY("cfg space size", h->cfg_space_size, info->cfg_space_size);
+ VERIFY("mmio size", h->mmio_size, info->mmio_size);
+
+ mem = (fw->data + h->cfg_space_offset);
+
+ id = *(u16 *)(mem + PCI_VENDOR_ID);
+ VERIFY("vender id", id, pdev->vendor);
+
+ id = *(u16 *)(mem + PCI_DEVICE_ID);
+ VERIFY("device id", id, pdev->device);
+
+ id = *(u8 *)(mem + PCI_REVISION_ID);
+ VERIFY("revision id", id, pdev->revision);
+
+#undef VERIFY
+ return 0;
+
+invalid_firmware:
+ gvt_dbg_core("Invalid firmware: %s [file] 0x%llx [request] 0x%llx\n",
+ item, file, request);
+ return -EINVAL;
+}
+
+#define GVT_FIRMWARE_PATH "i915/gvt"
+
+/**
+ * intel_gvt_load_firmware - load GVT firmware
+ * @gvt: intel gvt device
+ *
+ */
+int intel_gvt_load_firmware(struct intel_gvt *gvt)
+{
+ struct intel_gvt_device_info *info = &gvt->device_info;
+ struct drm_i915_private *dev_priv = gvt->dev_priv;
+ struct pci_dev *pdev = dev_priv->drm.pdev;
+ struct intel_gvt_firmware *firmware = &gvt->firmware;
+ struct gvt_firmware_header *h;
+ const struct firmware *fw;
+ char *path;
+ void __iomem *mmio;
+ void *mem;
+ int ret;
+
+ path = kmalloc(PATH_MAX, GFP_KERNEL);
+ if (!path)
+ return -ENOMEM;
+
+ mem = kmalloc(info->cfg_space_size, GFP_KERNEL);
+ if (!mem) {
+ kfree(path);
+ return -ENOMEM;
+ }
+
+ firmware->cfg_space = mem;
+
+ mem = kmalloc(info->mmio_size, GFP_KERNEL);
+ if (!mem) {
+ kfree(path);
+ kfree(firmware->cfg_space);
+ return -ENOMEM;
+ }
+
+ firmware->mmio = mem;
+
+ mmio = pci_iomap(pdev, info->mmio_bar, info->mmio_size);
+ if (!mmio) {
+ kfree(path);
+ kfree(firmware->cfg_space);
+ kfree(firmware->mmio);
+ return -EINVAL;
+ }
+
+ if (IS_BROADWELL(gvt->dev_priv) || IS_SKYLAKE(gvt->dev_priv))
+ bdw_forcewake_get(mmio);
+
+ sprintf(path, "%s/vid_0x%04x_did_0x%04x_rid_0x%04x.golden_hw_state",
+ GVT_FIRMWARE_PATH, pdev->vendor, pdev->device,
+ pdev->revision);
+
+ gvt_dbg_core("request hw state firmware %s...\n", path);
+
+ ret = request_firmware(&fw, path, &dev_priv->drm.pdev->dev);
+ kfree(path);
+
+ if (ret)
+ goto expose_firmware;
+
+ gvt_dbg_core("success.\n");
+
+ ret = verify_firmware(gvt, fw);
+ if (ret)
+ goto out_free_fw;
+
+ gvt_dbg_core("verified.\n");
+
+ h = (struct gvt_firmware_header *)fw->data;
+
+ memcpy(firmware->cfg_space, fw->data + h->cfg_space_offset,
+ h->cfg_space_size);
+ memcpy(firmware->mmio, fw->data + h->mmio_offset,
+ h->mmio_size);
+
+ release_firmware(fw);
+ firmware->firmware_loaded = true;
+ pci_iounmap(pdev, mmio);
+ return 0;
+
+out_free_fw:
+ release_firmware(fw);
+expose_firmware:
+ expose_firmware_sysfs(gvt, mmio);
+ pci_iounmap(pdev, mmio);
+ return 0;
+}
diff --git a/drivers/gpu/drm/i915/gvt/gtt.c b/drivers/gpu/drm/i915/gvt/gtt.c
new file mode 100644
index 000000000000..7eaaf1c9ed2b
--- /dev/null
+++ b/drivers/gpu/drm/i915/gvt/gtt.c
@@ -0,0 +1,2244 @@
+/*
+ * GTT virtualization
+ *
+ * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * Authors:
+ * Zhi Wang <zhi.a.wang@intel.com>
+ * Zhenyu Wang <zhenyuw@linux.intel.com>
+ * Xiao Zheng <xiao.zheng@intel.com>
+ *
+ * Contributors:
+ * Min He <min.he@intel.com>
+ * Bing Niu <bing.niu@intel.com>
+ *
+ */
+
+#include "i915_drv.h"
+#include "gvt.h"
+#include "i915_pvinfo.h"
+#include "trace.h"
+
+static bool enable_out_of_sync = false;
+static int preallocated_oos_pages = 8192;
+
+/*
+ * validate a gm address and related range size,
+ * translate it to host gm address
+ */
+bool intel_gvt_ggtt_validate_range(struct intel_vgpu *vgpu, u64 addr, u32 size)
+{
+ if ((!vgpu_gmadr_is_valid(vgpu, addr)) || (size
+ && !vgpu_gmadr_is_valid(vgpu, addr + size - 1))) {
+ gvt_err("vgpu%d: invalid range gmadr 0x%llx size 0x%x\n",
+ vgpu->id, addr, size);
+ return false;
+ }
+ return true;
+}
+
+/* translate a guest gmadr to host gmadr */
+int intel_gvt_ggtt_gmadr_g2h(struct intel_vgpu *vgpu, u64 g_addr, u64 *h_addr)
+{
+ if (WARN(!vgpu_gmadr_is_valid(vgpu, g_addr),
+ "invalid guest gmadr %llx\n", g_addr))
+ return -EACCES;
+
+ if (vgpu_gmadr_is_aperture(vgpu, g_addr))
+ *h_addr = vgpu_aperture_gmadr_base(vgpu)
+ + (g_addr - vgpu_aperture_offset(vgpu));
+ else
+ *h_addr = vgpu_hidden_gmadr_base(vgpu)
+ + (g_addr - vgpu_hidden_offset(vgpu));
+ return 0;
+}
+
+/* translate a host gmadr to guest gmadr */
+int intel_gvt_ggtt_gmadr_h2g(struct intel_vgpu *vgpu, u64 h_addr, u64 *g_addr)
+{
+ if (WARN(!gvt_gmadr_is_valid(vgpu->gvt, h_addr),
+ "invalid host gmadr %llx\n", h_addr))
+ return -EACCES;
+
+ if (gvt_gmadr_is_aperture(vgpu->gvt, h_addr))
+ *g_addr = vgpu_aperture_gmadr_base(vgpu)
+ + (h_addr - gvt_aperture_gmadr_base(vgpu->gvt));
+ else
+ *g_addr = vgpu_hidden_gmadr_base(vgpu)
+ + (h_addr - gvt_hidden_gmadr_base(vgpu->gvt));
+ return 0;
+}
+
+int intel_gvt_ggtt_index_g2h(struct intel_vgpu *vgpu, unsigned long g_index,
+ unsigned long *h_index)
+{
+ u64 h_addr;
+ int ret;
+
+ ret = intel_gvt_ggtt_gmadr_g2h(vgpu, g_index << GTT_PAGE_SHIFT,
+ &h_addr);
+ if (ret)
+ return ret;
+
+ *h_index = h_addr >> GTT_PAGE_SHIFT;
+ return 0;
+}
+
+int intel_gvt_ggtt_h2g_index(struct intel_vgpu *vgpu, unsigned long h_index,
+ unsigned long *g_index)
+{
+ u64 g_addr;
+ int ret;
+
+ ret = intel_gvt_ggtt_gmadr_h2g(vgpu, h_index << GTT_PAGE_SHIFT,
+ &g_addr);
+ if (ret)
+ return ret;
+
+ *g_index = g_addr >> GTT_PAGE_SHIFT;
+ return 0;
+}
+
+#define gtt_type_is_entry(type) \
+ (type > GTT_TYPE_INVALID && type < GTT_TYPE_PPGTT_ENTRY \
+ && type != GTT_TYPE_PPGTT_PTE_ENTRY \
+ && type != GTT_TYPE_PPGTT_ROOT_ENTRY)
+
+#define gtt_type_is_pt(type) \
+ (type >= GTT_TYPE_PPGTT_PTE_PT && type < GTT_TYPE_MAX)
+
+#define gtt_type_is_pte_pt(type) \
+ (type == GTT_TYPE_PPGTT_PTE_PT)
+
+#define gtt_type_is_root_pointer(type) \
+ (gtt_type_is_entry(type) && type > GTT_TYPE_PPGTT_ROOT_ENTRY)
+
+#define gtt_init_entry(e, t, p, v) do { \
+ (e)->type = t; \
+ (e)->pdev = p; \
+ memcpy(&(e)->val64, &v, sizeof(v)); \
+} while (0)
+
+/*
+ * Mappings between GTT_TYPE* enumerations.
+ * Following information can be found according to the given type:
+ * - type of next level page table
+ * - type of entry inside this level page table
+ * - type of entry with PSE set
+ *
+ * If the given type doesn't have such a kind of information,
+ * e.g. give a l4 root entry type, then request to get its PSE type,
+ * give a PTE page table type, then request to get its next level page
+ * table type, as we know l4 root entry doesn't have a PSE bit,
+ * and a PTE page table doesn't have a next level page table type,
+ * GTT_TYPE_INVALID will be returned. This is useful when traversing a
+ * page table.
+ */
+
+struct gtt_type_table_entry {
+ int entry_type;
+ int next_pt_type;
+ int pse_entry_type;
+};
+
+#define GTT_TYPE_TABLE_ENTRY(type, e_type, npt_type, pse_type) \
+ [type] = { \
+ .entry_type = e_type, \
+ .next_pt_type = npt_type, \
+ .pse_entry_type = pse_type, \
+ }
+
+static struct gtt_type_table_entry gtt_type_table[] = {
+ GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_ROOT_L4_ENTRY,
+ GTT_TYPE_PPGTT_ROOT_L4_ENTRY,
+ GTT_TYPE_PPGTT_PML4_PT,
+ GTT_TYPE_INVALID),
+ GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PML4_PT,
+ GTT_TYPE_PPGTT_PML4_ENTRY,
+ GTT_TYPE_PPGTT_PDP_PT,
+ GTT_TYPE_INVALID),
+ GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PML4_ENTRY,
+ GTT_TYPE_PPGTT_PML4_ENTRY,
+ GTT_TYPE_PPGTT_PDP_PT,
+ GTT_TYPE_INVALID),
+ GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PDP_PT,
+ GTT_TYPE_PPGTT_PDP_ENTRY,
+ GTT_TYPE_PPGTT_PDE_PT,
+ GTT_TYPE_PPGTT_PTE_1G_ENTRY),
+ GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_ROOT_L3_ENTRY,
+ GTT_TYPE_PPGTT_ROOT_L3_ENTRY,
+ GTT_TYPE_PPGTT_PDE_PT,
+ GTT_TYPE_PPGTT_PTE_1G_ENTRY),
+ GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PDP_ENTRY,
+ GTT_TYPE_PPGTT_PDP_ENTRY,
+ GTT_TYPE_PPGTT_PDE_PT,
+ GTT_TYPE_PPGTT_PTE_1G_ENTRY),
+ GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PDE_PT,
+ GTT_TYPE_PPGTT_PDE_ENTRY,
+ GTT_TYPE_PPGTT_PTE_PT,
+ GTT_TYPE_PPGTT_PTE_2M_ENTRY),
+ GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PDE_ENTRY,
+ GTT_TYPE_PPGTT_PDE_ENTRY,
+ GTT_TYPE_PPGTT_PTE_PT,
+ GTT_TYPE_PPGTT_PTE_2M_ENTRY),
+ GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PTE_PT,
+ GTT_TYPE_PPGTT_PTE_4K_ENTRY,
+ GTT_TYPE_INVALID,
+ GTT_TYPE_INVALID),
+ GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PTE_4K_ENTRY,
+ GTT_TYPE_PPGTT_PTE_4K_ENTRY,
+ GTT_TYPE_INVALID,
+ GTT_TYPE_INVALID),
+ GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PTE_2M_ENTRY,
+ GTT_TYPE_PPGTT_PDE_ENTRY,
+ GTT_TYPE_INVALID,
+ GTT_TYPE_PPGTT_PTE_2M_ENTRY),
+ GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PTE_1G_ENTRY,
+ GTT_TYPE_PPGTT_PDP_ENTRY,
+ GTT_TYPE_INVALID,
+ GTT_TYPE_PPGTT_PTE_1G_ENTRY),
+ GTT_TYPE_TABLE_ENTRY(GTT_TYPE_GGTT_PTE,
+ GTT_TYPE_GGTT_PTE,
+ GTT_TYPE_INVALID,
+ GTT_TYPE_INVALID),
+};
+
+static inline int get_next_pt_type(int type)
+{
+ return gtt_type_table[type].next_pt_type;
+}
+
+static inline int get_entry_type(int type)
+{
+ return gtt_type_table[type].entry_type;
+}
+
+static inline int get_pse_type(int type)
+{
+ return gtt_type_table[type].pse_entry_type;
+}
+
+static u64 read_pte64(struct drm_i915_private *dev_priv, unsigned long index)
+{
+ void __iomem *addr = (gen8_pte_t __iomem *)dev_priv->ggtt.gsm + index;
+ u64 pte;
+
+#ifdef readq
+ pte = readq(addr);
+#else
+ pte = ioread32(addr);
+ pte |= (u64)ioread32(addr + 4) << 32;
+#endif
+ return pte;
+}
+
+static void write_pte64(struct drm_i915_private *dev_priv,
+ unsigned long index, u64 pte)
+{
+ void __iomem *addr = (gen8_pte_t __iomem *)dev_priv->ggtt.gsm + index;
+
+#ifdef writeq
+ writeq(pte, addr);
+#else
+ iowrite32((u32)pte, addr);
+ iowrite32(pte >> 32, addr + 4);
+#endif
+ I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN);
+ POSTING_READ(GFX_FLSH_CNTL_GEN6);
+}
+
+static inline struct intel_gvt_gtt_entry *gtt_get_entry64(void *pt,
+ struct intel_gvt_gtt_entry *e,
+ unsigned long index, bool hypervisor_access, unsigned long gpa,
+ struct intel_vgpu *vgpu)
+{
+ const struct intel_gvt_device_info *info = &vgpu->gvt->device_info;
+ int ret;
+
+ if (WARN_ON(info->gtt_entry_size != 8))
+ return e;
+
+ if (hypervisor_access) {
+ ret = intel_gvt_hypervisor_read_gpa(vgpu, gpa +
+ (index << info->gtt_entry_size_shift),
+ &e->val64, 8);
+ WARN_ON(ret);
+ } else if (!pt) {
+ e->val64 = read_pte64(vgpu->gvt->dev_priv, index);
+ } else {
+ e->val64 = *((u64 *)pt + index);
+ }
+ return e;
+}
+
+static inline struct intel_gvt_gtt_entry *gtt_set_entry64(void *pt,
+ struct intel_gvt_gtt_entry *e,
+ unsigned long index, bool hypervisor_access, unsigned long gpa,
+ struct intel_vgpu *vgpu)
+{
+ const struct intel_gvt_device_info *info = &vgpu->gvt->device_info;
+ int ret;
+
+ if (WARN_ON(info->gtt_entry_size != 8))
+ return e;
+
+ if (hypervisor_access) {
+ ret = intel_gvt_hypervisor_write_gpa(vgpu, gpa +
+ (index << info->gtt_entry_size_shift),
+ &e->val64, 8);
+ WARN_ON(ret);
+ } else if (!pt) {
+ write_pte64(vgpu->gvt->dev_priv, index, e->val64);
+ } else {
+ *((u64 *)pt + index) = e->val64;
+ }
+ return e;
+}
+
+#define GTT_HAW 46
+
+#define ADDR_1G_MASK (((1UL << (GTT_HAW - 30 + 1)) - 1) << 30)
+#define ADDR_2M_MASK (((1UL << (GTT_HAW - 21 + 1)) - 1) << 21)
+#define ADDR_4K_MASK (((1UL << (GTT_HAW - 12 + 1)) - 1) << 12)
+
+static unsigned long gen8_gtt_get_pfn(struct intel_gvt_gtt_entry *e)
+{
+ unsigned long pfn;
+
+ if (e->type == GTT_TYPE_PPGTT_PTE_1G_ENTRY)
+ pfn = (e->val64 & ADDR_1G_MASK) >> 12;
+ else if (e->type == GTT_TYPE_PPGTT_PTE_2M_ENTRY)
+ pfn = (e->val64 & ADDR_2M_MASK) >> 12;
+ else
+ pfn = (e->val64 & ADDR_4K_MASK) >> 12;
+ return pfn;
+}
+
+static void gen8_gtt_set_pfn(struct intel_gvt_gtt_entry *e, unsigned long pfn)
+{
+ if (e->type == GTT_TYPE_PPGTT_PTE_1G_ENTRY) {
+ e->val64 &= ~ADDR_1G_MASK;
+ pfn &= (ADDR_1G_MASK >> 12);
+ } else if (e->type == GTT_TYPE_PPGTT_PTE_2M_ENTRY) {
+ e->val64 &= ~ADDR_2M_MASK;
+ pfn &= (ADDR_2M_MASK >> 12);
+ } else {
+ e->val64 &= ~ADDR_4K_MASK;
+ pfn &= (ADDR_4K_MASK >> 12);
+ }
+
+ e->val64 |= (pfn << 12);
+}
+
+static bool gen8_gtt_test_pse(struct intel_gvt_gtt_entry *e)
+{
+ /* Entry doesn't have PSE bit. */
+ if (get_pse_type(e->type) == GTT_TYPE_INVALID)
+ return false;
+
+ e->type = get_entry_type(e->type);
+ if (!(e->val64 & (1 << 7)))
+ return false;
+
+ e->type = get_pse_type(e->type);
+ return true;
+}
+
+static bool gen8_gtt_test_present(struct intel_gvt_gtt_entry *e)
+{
+ /*
+ * i915 writes PDP root pointer registers without present bit,
+ * it also works, so we need to treat root pointer entry
+ * specifically.
+ */
+ if (e->type == GTT_TYPE_PPGTT_ROOT_L3_ENTRY
+ || e->type == GTT_TYPE_PPGTT_ROOT_L4_ENTRY)
+ return (e->val64 != 0);
+ else
+ return (e->val64 & (1 << 0));
+}
+
+static void gtt_entry_clear_present(struct intel_gvt_gtt_entry *e)
+{
+ e->val64 &= ~(1 << 0);
+}
+
+/*
+ * Per-platform GMA routines.
+ */
+static unsigned long gma_to_ggtt_pte_index(unsigned long gma)
+{
+ unsigned long x = (gma >> GTT_PAGE_SHIFT);
+
+ trace_gma_index(__func__, gma, x);
+ return x;
+}
+
+#define DEFINE_PPGTT_GMA_TO_INDEX(prefix, ename, exp) \
+static unsigned long prefix##_gma_to_##ename##_index(unsigned long gma) \
+{ \
+ unsigned long x = (exp); \
+ trace_gma_index(__func__, gma, x); \
+ return x; \
+}
+
+DEFINE_PPGTT_GMA_TO_INDEX(gen8, pte, (gma >> 12 & 0x1ff));
+DEFINE_PPGTT_GMA_TO_INDEX(gen8, pde, (gma >> 21 & 0x1ff));
+DEFINE_PPGTT_GMA_TO_INDEX(gen8, l3_pdp, (gma >> 30 & 0x3));
+DEFINE_PPGTT_GMA_TO_INDEX(gen8, l4_pdp, (gma >> 30 & 0x1ff));
+DEFINE_PPGTT_GMA_TO_INDEX(gen8, pml4, (gma >> 39 & 0x1ff));
+
+static struct intel_gvt_gtt_pte_ops gen8_gtt_pte_ops = {
+ .get_entry = gtt_get_entry64,
+ .set_entry = gtt_set_entry64,
+ .clear_present = gtt_entry_clear_present,
+ .test_present = gen8_gtt_test_present,
+ .test_pse = gen8_gtt_test_pse,
+ .get_pfn = gen8_gtt_get_pfn,
+ .set_pfn = gen8_gtt_set_pfn,
+};
+
+static struct intel_gvt_gtt_gma_ops gen8_gtt_gma_ops = {
+ .gma_to_ggtt_pte_index = gma_to_ggtt_pte_index,
+ .gma_to_pte_index = gen8_gma_to_pte_index,
+ .gma_to_pde_index = gen8_gma_to_pde_index,
+ .gma_to_l3_pdp_index = gen8_gma_to_l3_pdp_index,
+ .gma_to_l4_pdp_index = gen8_gma_to_l4_pdp_index,
+ .gma_to_pml4_index = gen8_gma_to_pml4_index,
+};
+
+static int gtt_entry_p2m(struct intel_vgpu *vgpu, struct intel_gvt_gtt_entry *p,
+ struct intel_gvt_gtt_entry *m)
+{
+ struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
+ unsigned long gfn, mfn;
+
+ *m = *p;
+
+ if (!ops->test_present(p))
+ return 0;
+
+ gfn = ops->get_pfn(p);
+
+ mfn = intel_gvt_hypervisor_gfn_to_mfn(vgpu, gfn);
+ if (mfn == INTEL_GVT_INVALID_ADDR) {
+ gvt_err("fail to translate gfn: 0x%lx\n", gfn);
+ return -ENXIO;
+ }
+
+ ops->set_pfn(m, mfn);
+ return 0;
+}
+
+/*
+ * MM helpers.
+ */
+struct intel_gvt_gtt_entry *intel_vgpu_mm_get_entry(struct intel_vgpu_mm *mm,
+ void *page_table, struct intel_gvt_gtt_entry *e,
+ unsigned long index)
+{
+ struct intel_gvt *gvt = mm->vgpu->gvt;
+ struct intel_gvt_gtt_pte_ops *ops = gvt->gtt.pte_ops;
+
+ e->type = mm->page_table_entry_type;
+
+ ops->get_entry(page_table, e, index, false, 0, mm->vgpu);
+ ops->test_pse(e);
+ return e;
+}
+
+struct intel_gvt_gtt_entry *intel_vgpu_mm_set_entry(struct intel_vgpu_mm *mm,
+ void *page_table, struct intel_gvt_gtt_entry *e,
+ unsigned long index)
+{
+ struct intel_gvt *gvt = mm->vgpu->gvt;
+ struct intel_gvt_gtt_pte_ops *ops = gvt->gtt.pte_ops;
+
+ return ops->set_entry(page_table, e, index, false, 0, mm->vgpu);
+}
+
+/*
+ * PPGTT shadow page table helpers.
+ */
+static inline struct intel_gvt_gtt_entry *ppgtt_spt_get_entry(
+ struct intel_vgpu_ppgtt_spt *spt,
+ void *page_table, int type,
+ struct intel_gvt_gtt_entry *e, unsigned long index,
+ bool guest)
+{
+ struct intel_gvt *gvt = spt->vgpu->gvt;
+ struct intel_gvt_gtt_pte_ops *ops = gvt->gtt.pte_ops;
+
+ e->type = get_entry_type(type);
+
+ if (WARN(!gtt_type_is_entry(e->type), "invalid entry type\n"))
+ return e;
+
+ ops->get_entry(page_table, e, index, guest,
+ spt->guest_page.gfn << GTT_PAGE_SHIFT,
+ spt->vgpu);
+ ops->test_pse(e);
+ return e;
+}
+
+static inline struct intel_gvt_gtt_entry *ppgtt_spt_set_entry(
+ struct intel_vgpu_ppgtt_spt *spt,
+ void *page_table, int type,
+ struct intel_gvt_gtt_entry *e, unsigned long index,
+ bool guest)
+{
+ struct intel_gvt *gvt = spt->vgpu->gvt;
+ struct intel_gvt_gtt_pte_ops *ops = gvt->gtt.pte_ops;
+
+ if (WARN(!gtt_type_is_entry(e->type), "invalid entry type\n"))
+ return e;
+
+ return ops->set_entry(page_table, e, index, guest,
+ spt->guest_page.gfn << GTT_PAGE_SHIFT,
+ spt->vgpu);
+}
+
+#define ppgtt_get_guest_entry(spt, e, index) \
+ ppgtt_spt_get_entry(spt, NULL, \
+ spt->guest_page_type, e, index, true)
+
+#define ppgtt_set_guest_entry(spt, e, index) \
+ ppgtt_spt_set_entry(spt, NULL, \
+ spt->guest_page_type, e, index, true)
+
+#define ppgtt_get_shadow_entry(spt, e, index) \
+ ppgtt_spt_get_entry(spt, spt->shadow_page.vaddr, \
+ spt->shadow_page.type, e, index, false)
+
+#define ppgtt_set_shadow_entry(spt, e, index) \
+ ppgtt_spt_set_entry(spt, spt->shadow_page.vaddr, \
+ spt->shadow_page.type, e, index, false)
+
+/**
+ * intel_vgpu_init_guest_page - init a guest page data structure
+ * @vgpu: a vGPU
+ * @p: a guest page data structure
+ * @gfn: guest memory page frame number
+ * @handler: function will be called when target guest memory page has
+ * been modified.
+ *
+ * This function is called when user wants to track a guest memory page.
+ *
+ * Returns:
+ * Zero on success, negative error code if failed.
+ */
+int intel_vgpu_init_guest_page(struct intel_vgpu *vgpu,
+ struct intel_vgpu_guest_page *p,
+ unsigned long gfn,
+ int (*handler)(void *, u64, void *, int),
+ void *data)
+{
+ INIT_HLIST_NODE(&p->node);
+
+ p->writeprotection = false;
+ p->gfn = gfn;
+ p->handler = handler;
+ p->data = data;
+ p->oos_page = NULL;
+ p->write_cnt = 0;
+
+ hash_add(vgpu->gtt.guest_page_hash_table, &p->node, p->gfn);
+ return 0;
+}
+
+static int detach_oos_page(struct intel_vgpu *vgpu,
+ struct intel_vgpu_oos_page *oos_page);
+
+/**
+ * intel_vgpu_clean_guest_page - release the resource owned by guest page data
+ * structure
+ * @vgpu: a vGPU
+ * @p: a tracked guest page
+ *
+ * This function is called when user tries to stop tracking a guest memory
+ * page.
+ */
+void intel_vgpu_clean_guest_page(struct intel_vgpu *vgpu,
+ struct intel_vgpu_guest_page *p)
+{
+ if (!hlist_unhashed(&p->node))
+ hash_del(&p->node);
+
+ if (p->oos_page)
+ detach_oos_page(vgpu, p->oos_page);
+
+ if (p->writeprotection)
+ intel_gvt_hypervisor_unset_wp_page(vgpu, p);
+}
+
+/**
+ * intel_vgpu_find_guest_page - find a guest page data structure by GFN.
+ * @vgpu: a vGPU
+ * @gfn: guest memory page frame number
+ *
+ * This function is called when emulation logic wants to know if a trapped GFN
+ * is a tracked guest page.
+ *
+ * Returns:
+ * Pointer to guest page data structure, NULL if failed.
+ */
+struct intel_vgpu_guest_page *intel_vgpu_find_guest_page(
+ struct intel_vgpu *vgpu, unsigned long gfn)
+{
+ struct intel_vgpu_guest_page *p;
+
+ hash_for_each_possible(vgpu->gtt.guest_page_hash_table,
+ p, node, gfn) {
+ if (p->gfn == gfn)
+ return p;
+ }
+ return NULL;
+}
+
+static inline int init_shadow_page(struct intel_vgpu *vgpu,
+ struct intel_vgpu_shadow_page *p, int type)
+{
+ p->vaddr = page_address(p->page);
+ p->type = type;
+
+ INIT_HLIST_NODE(&p->node);
+
+ p->mfn = intel_gvt_hypervisor_virt_to_mfn(p->vaddr);
+ if (p->mfn == INTEL_GVT_INVALID_ADDR)
+ return -EFAULT;
+
+ hash_add(vgpu->gtt.shadow_page_hash_table, &p->node, p->mfn);
+ return 0;
+}
+
+static inline void clean_shadow_page(struct intel_vgpu_shadow_page *p)
+{
+ if (!hlist_unhashed(&p->node))
+ hash_del(&p->node);
+}
+
+static inline struct intel_vgpu_shadow_page *find_shadow_page(
+ struct intel_vgpu *vgpu, unsigned long mfn)
+{
+ struct intel_vgpu_shadow_page *p;
+
+ hash_for_each_possible(vgpu->gtt.shadow_page_hash_table,
+ p, node, mfn) {
+ if (p->mfn == mfn)
+ return p;
+ }
+ return NULL;
+}
+
+#define guest_page_to_ppgtt_spt(ptr) \
+ container_of(ptr, struct intel_vgpu_ppgtt_spt, guest_page)
+
+#define shadow_page_to_ppgtt_spt(ptr) \
+ container_of(ptr, struct intel_vgpu_ppgtt_spt, shadow_page)
+
+static void *alloc_spt(gfp_t gfp_mask)
+{
+ struct intel_vgpu_ppgtt_spt *spt;
+
+ spt = kzalloc(sizeof(*spt), gfp_mask);
+ if (!spt)
+ return NULL;
+
+ spt->shadow_page.page = alloc_page(gfp_mask);
+ if (!spt->shadow_page.page) {
+ kfree(spt);
+ return NULL;
+ }
+ return spt;
+}
+
+static void free_spt(struct intel_vgpu_ppgtt_spt *spt)
+{
+ __free_page(spt->shadow_page.page);
+ kfree(spt);
+}
+
+static void ppgtt_free_shadow_page(struct intel_vgpu_ppgtt_spt *spt)
+{
+ trace_spt_free(spt->vgpu->id, spt, spt->shadow_page.type);
+
+ clean_shadow_page(&spt->shadow_page);
+ intel_vgpu_clean_guest_page(spt->vgpu, &spt->guest_page);
+ list_del_init(&spt->post_shadow_list);
+
+ free_spt(spt);
+}
+
+static void ppgtt_free_all_shadow_page(struct intel_vgpu *vgpu)
+{
+ struct hlist_node *n;
+ struct intel_vgpu_shadow_page *sp;
+ int i;
+
+ hash_for_each_safe(vgpu->gtt.shadow_page_hash_table, i, n, sp, node)
+ ppgtt_free_shadow_page(shadow_page_to_ppgtt_spt(sp));
+}
+
+static int ppgtt_handle_guest_write_page_table_bytes(void *gp,
+ u64 pa, void *p_data, int bytes);
+
+static int ppgtt_write_protection_handler(void *gp, u64 pa,
+ void *p_data, int bytes)
+{
+ struct intel_vgpu_guest_page *gpt = (struct intel_vgpu_guest_page *)gp;
+ int ret;
+
+ if (bytes != 4 && bytes != 8)
+ return -EINVAL;
+
+ if (!gpt->writeprotection)
+ return -EINVAL;
+
+ ret = ppgtt_handle_guest_write_page_table_bytes(gp,
+ pa, p_data, bytes);
+ if (ret)
+ return ret;
+ return ret;
+}
+
+static int reclaim_one_mm(struct intel_gvt *gvt);
+
+static struct intel_vgpu_ppgtt_spt *ppgtt_alloc_shadow_page(
+ struct intel_vgpu *vgpu, int type, unsigned long gfn)
+{
+ struct intel_vgpu_ppgtt_spt *spt = NULL;
+ int ret;
+
+retry:
+ spt = alloc_spt(GFP_KERNEL | __GFP_ZERO);
+ if (!spt) {
+ if (reclaim_one_mm(vgpu->gvt))
+ goto retry;
+
+ gvt_err("fail to allocate ppgtt shadow page\n");
+ return ERR_PTR(-ENOMEM);
+ }
+
+ spt->vgpu = vgpu;
+ spt->guest_page_type = type;
+ atomic_set(&spt->refcount, 1);
+ INIT_LIST_HEAD(&spt->post_shadow_list);
+
+ /*
+ * TODO: guest page type may be different with shadow page type,
+ * when we support PSE page in future.
+ */
+ ret = init_shadow_page(vgpu, &spt->shadow_page, type);
+ if (ret) {
+ gvt_err("fail to initialize shadow page for spt\n");
+ goto err;
+ }
+
+ ret = intel_vgpu_init_guest_page(vgpu, &spt->guest_page,
+ gfn, ppgtt_write_protection_handler, NULL);
+ if (ret) {
+ gvt_err("fail to initialize guest page for spt\n");
+ goto err;
+ }
+
+ trace_spt_alloc(vgpu->id, spt, type, spt->shadow_page.mfn, gfn);
+ return spt;
+err:
+ ppgtt_free_shadow_page(spt);
+ return ERR_PTR(ret);
+}
+
+static struct intel_vgpu_ppgtt_spt *ppgtt_find_shadow_page(
+ struct intel_vgpu *vgpu, unsigned long mfn)
+{
+ struct intel_vgpu_shadow_page *p = find_shadow_page(vgpu, mfn);
+
+ if (p)
+ return shadow_page_to_ppgtt_spt(p);
+
+ gvt_err("vgpu%d: fail to find ppgtt shadow page: 0x%lx\n",
+ vgpu->id, mfn);
+ return NULL;
+}
+
+#define pt_entry_size_shift(spt) \
+ ((spt)->vgpu->gvt->device_info.gtt_entry_size_shift)
+
+#define pt_entries(spt) \
+ (GTT_PAGE_SIZE >> pt_entry_size_shift(spt))
+
+#define for_each_present_guest_entry(spt, e, i) \
+ for (i = 0; i < pt_entries(spt); i++) \
+ if (spt->vgpu->gvt->gtt.pte_ops->test_present( \
+ ppgtt_get_guest_entry(spt, e, i)))
+
+#define for_each_present_shadow_entry(spt, e, i) \
+ for (i = 0; i < pt_entries(spt); i++) \
+ if (spt->vgpu->gvt->gtt.pte_ops->test_present( \
+ ppgtt_get_shadow_entry(spt, e, i)))
+
+static void ppgtt_get_shadow_page(struct intel_vgpu_ppgtt_spt *spt)
+{
+ int v = atomic_read(&spt->refcount);
+
+ trace_spt_refcount(spt->vgpu->id, "inc", spt, v, (v + 1));
+
+ atomic_inc(&spt->refcount);
+}
+
+static int ppgtt_invalidate_shadow_page(struct intel_vgpu_ppgtt_spt *spt);
+
+static int ppgtt_invalidate_shadow_page_by_shadow_entry(struct intel_vgpu *vgpu,
+ struct intel_gvt_gtt_entry *e)
+{
+ struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
+ struct intel_vgpu_ppgtt_spt *s;
+ intel_gvt_gtt_type_t cur_pt_type;
+
+ if (WARN_ON(!gtt_type_is_pt(get_next_pt_type(e->type))))
+ return -EINVAL;
+
+ if (e->type != GTT_TYPE_PPGTT_ROOT_L3_ENTRY
+ && e->type != GTT_TYPE_PPGTT_ROOT_L4_ENTRY) {
+ cur_pt_type = get_next_pt_type(e->type) + 1;
+ if (ops->get_pfn(e) ==
+ vgpu->gtt.scratch_pt[cur_pt_type].page_mfn)
+ return 0;
+ }
+ s = ppgtt_find_shadow_page(vgpu, ops->get_pfn(e));
+ if (!s) {
+ gvt_err("vgpu%d: fail to find shadow page: mfn: 0x%lx\n",
+ vgpu->id, ops->get_pfn(e));
+ return -ENXIO;
+ }
+ return ppgtt_invalidate_shadow_page(s);
+}
+
+static int ppgtt_invalidate_shadow_page(struct intel_vgpu_ppgtt_spt *spt)
+{
+ struct intel_gvt_gtt_entry e;
+ unsigned long index;
+ int ret;
+ int v = atomic_read(&spt->refcount);
+
+ trace_spt_change(spt->vgpu->id, "die", spt,
+ spt->guest_page.gfn, spt->shadow_page.type);
+
+ trace_spt_refcount(spt->vgpu->id, "dec", spt, v, (v - 1));
+
+ if (atomic_dec_return(&spt->refcount) > 0)
+ return 0;
+
+ if (gtt_type_is_pte_pt(spt->shadow_page.type))
+ goto release;
+
+ for_each_present_shadow_entry(spt, &e, index) {
+ if (!gtt_type_is_pt(get_next_pt_type(e.type))) {
+ gvt_err("GVT doesn't support pse bit for now\n");
+ return -EINVAL;
+ }
+ ret = ppgtt_invalidate_shadow_page_by_shadow_entry(
+ spt->vgpu, &e);
+ if (ret)
+ goto fail;
+ }
+release:
+ trace_spt_change(spt->vgpu->id, "release", spt,
+ spt->guest_page.gfn, spt->shadow_page.type);
+ ppgtt_free_shadow_page(spt);
+ return 0;
+fail:
+ gvt_err("vgpu%d: fail: shadow page %p shadow entry 0x%llx type %d\n",
+ spt->vgpu->id, spt, e.val64, e.type);
+ return ret;
+}
+
+static int ppgtt_populate_shadow_page(struct intel_vgpu_ppgtt_spt *spt);
+
+static struct intel_vgpu_ppgtt_spt *ppgtt_populate_shadow_page_by_guest_entry(
+ struct intel_vgpu *vgpu, struct intel_gvt_gtt_entry *we)
+{
+ struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
+ struct intel_vgpu_ppgtt_spt *s = NULL;
+ struct intel_vgpu_guest_page *g;
+ int ret;
+
+ if (WARN_ON(!gtt_type_is_pt(get_next_pt_type(we->type)))) {
+ ret = -EINVAL;
+ goto fail;
+ }
+
+ g = intel_vgpu_find_guest_page(vgpu, ops->get_pfn(we));
+ if (g) {
+ s = guest_page_to_ppgtt_spt(g);
+ ppgtt_get_shadow_page(s);
+ } else {
+ int type = get_next_pt_type(we->type);
+
+ s = ppgtt_alloc_shadow_page(vgpu, type, ops->get_pfn(we));
+ if (IS_ERR(s)) {
+ ret = PTR_ERR(s);
+ goto fail;
+ }
+
+ ret = intel_gvt_hypervisor_set_wp_page(vgpu, &s->guest_page);
+ if (ret)
+ goto fail;
+
+ ret = ppgtt_populate_shadow_page(s);
+ if (ret)
+ goto fail;
+
+ trace_spt_change(vgpu->id, "new", s, s->guest_page.gfn,
+ s->shadow_page.type);
+ }
+ return s;
+fail:
+ gvt_err("vgpu%d: fail: shadow page %p guest entry 0x%llx type %d\n",
+ vgpu->id, s, we->val64, we->type);
+ return ERR_PTR(ret);
+}
+
+static inline void ppgtt_generate_shadow_entry(struct intel_gvt_gtt_entry *se,
+ struct intel_vgpu_ppgtt_spt *s, struct intel_gvt_gtt_entry *ge)
+{
+ struct intel_gvt_gtt_pte_ops *ops = s->vgpu->gvt->gtt.pte_ops;
+
+ se->type = ge->type;
+ se->val64 = ge->val64;
+
+ ops->set_pfn(se, s->shadow_page.mfn);
+}
+
+static int ppgtt_populate_shadow_page(struct intel_vgpu_ppgtt_spt *spt)
+{
+ struct intel_vgpu *vgpu = spt->vgpu;
+ struct intel_vgpu_ppgtt_spt *s;
+ struct intel_gvt_gtt_entry se, ge;
+ unsigned long i;
+ int ret;
+
+ trace_spt_change(spt->vgpu->id, "born", spt,
+ spt->guest_page.gfn, spt->shadow_page.type);
+
+ if (gtt_type_is_pte_pt(spt->shadow_page.type)) {
+ for_each_present_guest_entry(spt, &ge, i) {
+ ret = gtt_entry_p2m(vgpu, &ge, &se);
+ if (ret)
+ goto fail;
+ ppgtt_set_shadow_entry(spt, &se, i);
+ }
+ return 0;
+ }
+
+ for_each_present_guest_entry(spt, &ge, i) {
+ if (!gtt_type_is_pt(get_next_pt_type(ge.type))) {
+ gvt_err("GVT doesn't support pse bit now\n");
+ ret = -EINVAL;
+ goto fail;
+ }
+
+ s = ppgtt_populate_shadow_page_by_guest_entry(vgpu, &ge);
+ if (IS_ERR(s)) {
+ ret = PTR_ERR(s);
+ goto fail;
+ }
+ ppgtt_get_shadow_entry(spt, &se, i);
+ ppgtt_generate_shadow_entry(&se, s, &ge);
+ ppgtt_set_shadow_entry(spt, &se, i);
+ }
+ return 0;
+fail:
+ gvt_err("vgpu%d: fail: shadow page %p guest entry 0x%llx type %d\n",
+ vgpu->id, spt, ge.val64, ge.type);
+ return ret;
+}
+
+static int ppgtt_handle_guest_entry_removal(struct intel_vgpu_guest_page *gpt,
+ unsigned long index)
+{
+ struct intel_vgpu_ppgtt_spt *spt = guest_page_to_ppgtt_spt(gpt);
+ struct intel_vgpu_shadow_page *sp = &spt->shadow_page;
+ struct intel_vgpu *vgpu = spt->vgpu;
+ struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
+ struct intel_gvt_gtt_entry e;
+ int ret;
+
+ ppgtt_get_shadow_entry(spt, &e, index);
+
+ trace_gpt_change(spt->vgpu->id, "remove", spt, sp->type, e.val64,
+ index);
+
+ if (!ops->test_present(&e))
+ return 0;
+
+ if (ops->get_pfn(&e) == vgpu->gtt.scratch_pt[sp->type].page_mfn)
+ return 0;
+
+ if (gtt_type_is_pt(get_next_pt_type(e.type))) {
+ struct intel_vgpu_ppgtt_spt *s =
+ ppgtt_find_shadow_page(vgpu, ops->get_pfn(&e));
+ if (!s) {
+ gvt_err("fail to find guest page\n");
+ ret = -ENXIO;
+ goto fail;
+ }
+ ret = ppgtt_invalidate_shadow_page(s);
+ if (ret)
+ goto fail;
+ }
+ ops->set_pfn(&e, vgpu->gtt.scratch_pt[sp->type].page_mfn);
+ ppgtt_set_shadow_entry(spt, &e, index);
+ return 0;
+fail:
+ gvt_err("vgpu%d: fail: shadow page %p guest entry 0x%llx type %d\n",
+ vgpu->id, spt, e.val64, e.type);
+ return ret;
+}
+
+static int ppgtt_handle_guest_entry_add(struct intel_vgpu_guest_page *gpt,
+ struct intel_gvt_gtt_entry *we, unsigned long index)
+{
+ struct intel_vgpu_ppgtt_spt *spt = guest_page_to_ppgtt_spt(gpt);
+ struct intel_vgpu_shadow_page *sp = &spt->shadow_page;
+ struct intel_vgpu *vgpu = spt->vgpu;
+ struct intel_gvt_gtt_entry m;
+ struct intel_vgpu_ppgtt_spt *s;
+ int ret;
+
+ trace_gpt_change(spt->vgpu->id, "add", spt, sp->type,
+ we->val64, index);
+
+ if (gtt_type_is_pt(get_next_pt_type(we->type))) {
+ s = ppgtt_populate_shadow_page_by_guest_entry(vgpu, we);
+ if (IS_ERR(s)) {
+ ret = PTR_ERR(s);
+ goto fail;
+ }
+ ppgtt_get_shadow_entry(spt, &m, index);
+ ppgtt_generate_shadow_entry(&m, s, we);
+ ppgtt_set_shadow_entry(spt, &m, index);
+ } else {
+ ret = gtt_entry_p2m(vgpu, we, &m);
+ if (ret)
+ goto fail;
+ ppgtt_set_shadow_entry(spt, &m, index);
+ }
+ return 0;
+fail:
+ gvt_err("vgpu%d: fail: spt %p guest entry 0x%llx type %d\n", vgpu->id,
+ spt, we->val64, we->type);
+ return ret;
+}
+
+static int sync_oos_page(struct intel_vgpu *vgpu,
+ struct intel_vgpu_oos_page *oos_page)
+{
+ const struct intel_gvt_device_info *info = &vgpu->gvt->device_info;
+ struct intel_gvt *gvt = vgpu->gvt;
+ struct intel_gvt_gtt_pte_ops *ops = gvt->gtt.pte_ops;
+ struct intel_vgpu_ppgtt_spt *spt =
+ guest_page_to_ppgtt_spt(oos_page->guest_page);
+ struct intel_gvt_gtt_entry old, new, m;
+ int index;
+ int ret;
+
+ trace_oos_change(vgpu->id, "sync", oos_page->id,
+ oos_page->guest_page, spt->guest_page_type);
+
+ old.type = new.type = get_entry_type(spt->guest_page_type);
+ old.val64 = new.val64 = 0;
+
+ for (index = 0; index < (GTT_PAGE_SIZE >> info->gtt_entry_size_shift);
+ index++) {
+ ops->get_entry(oos_page->mem, &old, index, false, 0, vgpu);
+ ops->get_entry(NULL, &new, index, true,
+ oos_page->guest_page->gfn << PAGE_SHIFT, vgpu);
+
+ if (old.val64 == new.val64
+ && !test_and_clear_bit(index, spt->post_shadow_bitmap))
+ continue;
+
+ trace_oos_sync(vgpu->id, oos_page->id,
+ oos_page->guest_page, spt->guest_page_type,
+ new.val64, index);
+
+ ret = gtt_entry_p2m(vgpu, &new, &m);
+ if (ret)
+ return ret;
+
+ ops->set_entry(oos_page->mem, &new, index, false, 0, vgpu);
+ ppgtt_set_shadow_entry(spt, &m, index);
+ }
+
+ oos_page->guest_page->write_cnt = 0;
+ list_del_init(&spt->post_shadow_list);
+ return 0;
+}
+
+static int detach_oos_page(struct intel_vgpu *vgpu,
+ struct intel_vgpu_oos_page *oos_page)
+{
+ struct intel_gvt *gvt = vgpu->gvt;
+ struct intel_vgpu_ppgtt_spt *spt =
+ guest_page_to_ppgtt_spt(oos_page->guest_page);
+
+ trace_oos_change(vgpu->id, "detach", oos_page->id,
+ oos_page->guest_page, spt->guest_page_type);
+
+ oos_page->guest_page->write_cnt = 0;
+ oos_page->guest_page->oos_page = NULL;
+ oos_page->guest_page = NULL;
+
+ list_del_init(&oos_page->vm_list);
+ list_move_tail(&oos_page->list, &gvt->gtt.oos_page_free_list_head);
+
+ return 0;
+}
+
+static int attach_oos_page(struct intel_vgpu *vgpu,
+ struct intel_vgpu_oos_page *oos_page,
+ struct intel_vgpu_guest_page *gpt)
+{
+ struct intel_gvt *gvt = vgpu->gvt;
+ int ret;
+
+ ret = intel_gvt_hypervisor_read_gpa(vgpu, gpt->gfn << GTT_PAGE_SHIFT,
+ oos_page->mem, GTT_PAGE_SIZE);
+ if (ret)
+ return ret;
+
+ oos_page->guest_page = gpt;
+ gpt->oos_page = oos_page;
+
+ list_move_tail(&oos_page->list, &gvt->gtt.oos_page_use_list_head);
+
+ trace_oos_change(vgpu->id, "attach", gpt->oos_page->id,
+ gpt, guest_page_to_ppgtt_spt(gpt)->guest_page_type);
+ return 0;
+}
+
+static int ppgtt_set_guest_page_sync(struct intel_vgpu *vgpu,
+ struct intel_vgpu_guest_page *gpt)
+{
+ int ret;
+
+ ret = intel_gvt_hypervisor_set_wp_page(vgpu, gpt);
+ if (ret)
+ return ret;
+
+ trace_oos_change(vgpu->id, "set page sync", gpt->oos_page->id,
+ gpt, guest_page_to_ppgtt_spt(gpt)->guest_page_type);
+
+ list_del_init(&gpt->oos_page->vm_list);
+ return sync_oos_page(vgpu, gpt->oos_page);
+}
+
+static int ppgtt_allocate_oos_page(struct intel_vgpu *vgpu,
+ struct intel_vgpu_guest_page *gpt)
+{
+ struct intel_gvt *gvt = vgpu->gvt;
+ struct intel_gvt_gtt *gtt = &gvt->gtt;
+ struct intel_vgpu_oos_page *oos_page = gpt->oos_page;
+ int ret;
+
+ WARN(oos_page, "shadow PPGTT page has already has a oos page\n");
+
+ if (list_empty(&gtt->oos_page_free_list_head)) {
+ oos_page = container_of(gtt->oos_page_use_list_head.next,
+ struct intel_vgpu_oos_page, list);
+ ret = ppgtt_set_guest_page_sync(vgpu, oos_page->guest_page);
+ if (ret)
+ return ret;
+ ret = detach_oos_page(vgpu, oos_page);
+ if (ret)
+ return ret;
+ } else
+ oos_page = container_of(gtt->oos_page_free_list_head.next,
+ struct intel_vgpu_oos_page, list);
+ return attach_oos_page(vgpu, oos_page, gpt);
+}
+
+static int ppgtt_set_guest_page_oos(struct intel_vgpu *vgpu,
+ struct intel_vgpu_guest_page *gpt)
+{
+ struct intel_vgpu_oos_page *oos_page = gpt->oos_page;
+
+ if (WARN(!oos_page, "shadow PPGTT page should have a oos page\n"))
+ return -EINVAL;
+
+ trace_oos_change(vgpu->id, "set page out of sync", gpt->oos_page->id,
+ gpt, guest_page_to_ppgtt_spt(gpt)->guest_page_type);
+
+ list_add_tail(&oos_page->vm_list, &vgpu->gtt.oos_page_list_head);
+ return intel_gvt_hypervisor_unset_wp_page(vgpu, gpt);
+}
+
+/**
+ * intel_vgpu_sync_oos_pages - sync all the out-of-synced shadow for vGPU
+ * @vgpu: a vGPU
+ *
+ * This function is called before submitting a guest workload to host,
+ * to sync all the out-of-synced shadow for vGPU
+ *
+ * Returns:
+ * Zero on success, negative error code if failed.
+ */
+int intel_vgpu_sync_oos_pages(struct intel_vgpu *vgpu)
+{
+ struct list_head *pos, *n;
+ struct intel_vgpu_oos_page *oos_page;
+ int ret;
+
+ if (!enable_out_of_sync)
+ return 0;
+
+ list_for_each_safe(pos, n, &vgpu->gtt.oos_page_list_head) {
+ oos_page = container_of(pos,
+ struct intel_vgpu_oos_page, vm_list);
+ ret = ppgtt_set_guest_page_sync(vgpu, oos_page->guest_page);
+ if (ret)
+ return ret;
+ }
+ return 0;
+}
+
+/*
+ * The heart of PPGTT shadow page table.
+ */
+static int ppgtt_handle_guest_write_page_table(
+ struct intel_vgpu_guest_page *gpt,
+ struct intel_gvt_gtt_entry *we, unsigned long index)
+{
+ struct intel_vgpu_ppgtt_spt *spt = guest_page_to_ppgtt_spt(gpt);
+ struct intel_vgpu *vgpu = spt->vgpu;
+ struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
+
+ int ret;
+ int new_present;
+
+ new_present = ops->test_present(we);
+
+ ret = ppgtt_handle_guest_entry_removal(gpt, index);
+ if (ret)
+ goto fail;
+
+ if (new_present) {
+ ret = ppgtt_handle_guest_entry_add(gpt, we, index);
+ if (ret)
+ goto fail;
+ }
+ return 0;
+fail:
+ gvt_err("vgpu%d: fail: shadow page %p guest entry 0x%llx type %d.\n",
+ vgpu->id, spt, we->val64, we->type);
+ return ret;
+}
+
+static inline bool can_do_out_of_sync(struct intel_vgpu_guest_page *gpt)
+{
+ return enable_out_of_sync
+ && gtt_type_is_pte_pt(
+ guest_page_to_ppgtt_spt(gpt)->guest_page_type)
+ && gpt->write_cnt >= 2;
+}
+
+static void ppgtt_set_post_shadow(struct intel_vgpu_ppgtt_spt *spt,
+ unsigned long index)
+{
+ set_bit(index, spt->post_shadow_bitmap);
+ if (!list_empty(&spt->post_shadow_list))
+ return;
+
+ list_add_tail(&spt->post_shadow_list,
+ &spt->vgpu->gtt.post_shadow_list_head);
+}
+
+/**
+ * intel_vgpu_flush_post_shadow - flush the post shadow transactions
+ * @vgpu: a vGPU
+ *
+ * This function is called before submitting a guest workload to host,
+ * to flush all the post shadows for a vGPU.
+ *
+ * Returns:
+ * Zero on success, negative error code if failed.
+ */
+int intel_vgpu_flush_post_shadow(struct intel_vgpu *vgpu)
+{
+ struct list_head *pos, *n;
+ struct intel_vgpu_ppgtt_spt *spt;
+ struct intel_gvt_gtt_entry ge;
+ unsigned long index;
+ int ret;
+
+ list_for_each_safe(pos, n, &vgpu->gtt.post_shadow_list_head) {
+ spt = container_of(pos, struct intel_vgpu_ppgtt_spt,
+ post_shadow_list);
+
+ for_each_set_bit(index, spt->post_shadow_bitmap,
+ GTT_ENTRY_NUM_IN_ONE_PAGE) {
+ ppgtt_get_guest_entry(spt, &ge, index);
+
+ ret = ppgtt_handle_guest_write_page_table(
+ &spt->guest_page, &ge, index);
+ if (ret)
+ return ret;
+ clear_bit(index, spt->post_shadow_bitmap);
+ }
+ list_del_init(&spt->post_shadow_list);
+ }
+ return 0;
+}
+
+static int ppgtt_handle_guest_write_page_table_bytes(void *gp,
+ u64 pa, void *p_data, int bytes)
+{
+ struct intel_vgpu_guest_page *gpt = (struct intel_vgpu_guest_page *)gp;
+ struct intel_vgpu_ppgtt_spt *spt = guest_page_to_ppgtt_spt(gpt);
+ struct intel_vgpu *vgpu = spt->vgpu;
+ struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
+ const struct intel_gvt_device_info *info = &vgpu->gvt->device_info;
+ struct intel_gvt_gtt_entry we;
+ unsigned long index;
+ int ret;
+
+ index = (pa & (PAGE_SIZE - 1)) >> info->gtt_entry_size_shift;
+
+ ppgtt_get_guest_entry(spt, &we, index);
+
+ ops->test_pse(&we);
+
+ if (bytes == info->gtt_entry_size) {
+ ret = ppgtt_handle_guest_write_page_table(gpt, &we, index);
+ if (ret)
+ return ret;
+ } else {
+ if (!test_bit(index, spt->post_shadow_bitmap)) {
+ ret = ppgtt_handle_guest_entry_removal(gpt, index);
+ if (ret)
+ return ret;
+ }
+
+ ppgtt_set_post_shadow(spt, index);
+ }
+
+ if (!enable_out_of_sync)
+ return 0;
+
+ gpt->write_cnt++;
+
+ if (gpt->oos_page)
+ ops->set_entry(gpt->oos_page->mem, &we, index,
+ false, 0, vgpu);
+
+ if (can_do_out_of_sync(gpt)) {
+ if (!gpt->oos_page)
+ ppgtt_allocate_oos_page(vgpu, gpt);
+
+ ret = ppgtt_set_guest_page_oos(vgpu, gpt);
+ if (ret < 0)
+ return ret;
+ }
+ return 0;
+}
+
+/*
+ * mm page table allocation policy for bdw+
+ * - for ggtt, only virtual page table will be allocated.
+ * - for ppgtt, dedicated virtual/shadow page table will be allocated.
+ */
+static int gen8_mm_alloc_page_table(struct intel_vgpu_mm *mm)
+{
+ struct intel_vgpu *vgpu = mm->vgpu;
+ struct intel_gvt *gvt = vgpu->gvt;
+ const struct intel_gvt_device_info *info = &gvt->device_info;
+ void *mem;
+
+ if (mm->type == INTEL_GVT_MM_PPGTT) {
+ mm->page_table_entry_cnt = 4;
+ mm->page_table_entry_size = mm->page_table_entry_cnt *
+ info->gtt_entry_size;
+ mem = kzalloc(mm->has_shadow_page_table ?
+ mm->page_table_entry_size * 2
+ : mm->page_table_entry_size,
+ GFP_ATOMIC);
+ if (!mem)
+ return -ENOMEM;
+ mm->virtual_page_table = mem;
+ if (!mm->has_shadow_page_table)
+ return 0;
+ mm->shadow_page_table = mem + mm->page_table_entry_size;
+ } else if (mm->type == INTEL_GVT_MM_GGTT) {
+ mm->page_table_entry_cnt =
+ (gvt_ggtt_gm_sz(gvt) >> GTT_PAGE_SHIFT);
+ mm->page_table_entry_size = mm->page_table_entry_cnt *
+ info->gtt_entry_size;
+ mem = vzalloc(mm->page_table_entry_size);
+ if (!mem)
+ return -ENOMEM;
+ mm->virtual_page_table = mem;
+ }
+ return 0;
+}
+
+static void gen8_mm_free_page_table(struct intel_vgpu_mm *mm)
+{
+ if (mm->type == INTEL_GVT_MM_PPGTT) {
+ kfree(mm->virtual_page_table);
+ } else if (mm->type == INTEL_GVT_MM_GGTT) {
+ if (mm->virtual_page_table)
+ vfree(mm->virtual_page_table);
+ }
+ mm->virtual_page_table = mm->shadow_page_table = NULL;
+}
+
+static void invalidate_mm(struct intel_vgpu_mm *mm)
+{
+ struct intel_vgpu *vgpu = mm->vgpu;
+ struct intel_gvt *gvt = vgpu->gvt;
+ struct intel_gvt_gtt *gtt = &gvt->gtt;
+ struct intel_gvt_gtt_pte_ops *ops = gtt->pte_ops;
+ struct intel_gvt_gtt_entry se;
+ int i;
+
+ if (WARN_ON(!mm->has_shadow_page_table || !mm->shadowed))
+ return;
+
+ for (i = 0; i < mm->page_table_entry_cnt; i++) {
+ ppgtt_get_shadow_root_entry(mm, &se, i);
+ if (!ops->test_present(&se))
+ continue;
+ ppgtt_invalidate_shadow_page_by_shadow_entry(
+ vgpu, &se);
+ se.val64 = 0;
+ ppgtt_set_shadow_root_entry(mm, &se, i);
+
+ trace_gpt_change(vgpu->id, "destroy root pointer",
+ NULL, se.type, se.val64, i);
+ }
+ mm->shadowed = false;
+}
+
+/**
+ * intel_vgpu_destroy_mm - destroy a mm object
+ * @mm: a kref object
+ *
+ * This function is used to destroy a mm object for vGPU
+ *
+ */
+void intel_vgpu_destroy_mm(struct kref *mm_ref)
+{
+ struct intel_vgpu_mm *mm = container_of(mm_ref, typeof(*mm), ref);
+ struct intel_vgpu *vgpu = mm->vgpu;
+ struct intel_gvt *gvt = vgpu->gvt;
+ struct intel_gvt_gtt *gtt = &gvt->gtt;
+
+ if (!mm->initialized)
+ goto out;
+
+ list_del(&mm->list);
+ list_del(&mm->lru_list);
+
+ if (mm->has_shadow_page_table)
+ invalidate_mm(mm);
+
+ gtt->mm_free_page_table(mm);
+out:
+ kfree(mm);
+}
+
+static int shadow_mm(struct intel_vgpu_mm *mm)
+{
+ struct intel_vgpu *vgpu = mm->vgpu;
+ struct intel_gvt *gvt = vgpu->gvt;
+ struct intel_gvt_gtt *gtt = &gvt->gtt;
+ struct intel_gvt_gtt_pte_ops *ops = gtt->pte_ops;
+ struct intel_vgpu_ppgtt_spt *spt;
+ struct intel_gvt_gtt_entry ge, se;
+ int i;
+ int ret;
+
+ if (WARN_ON(!mm->has_shadow_page_table || mm->shadowed))
+ return 0;
+
+ mm->shadowed = true;
+
+ for (i = 0; i < mm->page_table_entry_cnt; i++) {
+ ppgtt_get_guest_root_entry(mm, &ge, i);
+ if (!ops->test_present(&ge))
+ continue;
+
+ trace_gpt_change(vgpu->id, __func__, NULL,
+ ge.type, ge.val64, i);
+
+ spt = ppgtt_populate_shadow_page_by_guest_entry(vgpu, &ge);
+ if (IS_ERR(spt)) {
+ gvt_err("fail to populate guest root pointer\n");
+ ret = PTR_ERR(spt);
+ goto fail;
+ }
+ ppgtt_generate_shadow_entry(&se, spt, &ge);
+ ppgtt_set_shadow_root_entry(mm, &se, i);
+
+ trace_gpt_change(vgpu->id, "populate root pointer",
+ NULL, se.type, se.val64, i);
+ }
+ return 0;
+fail:
+ invalidate_mm(mm);
+ return ret;
+}
+
+/**
+ * intel_vgpu_create_mm - create a mm object for a vGPU
+ * @vgpu: a vGPU
+ * @mm_type: mm object type, should be PPGTT or GGTT
+ * @virtual_page_table: page table root pointers. Could be NULL if user wants
+ * to populate shadow later.
+ * @page_table_level: describe the page table level of the mm object
+ * @pde_base_index: pde root pointer base in GGTT MMIO.
+ *
+ * This function is used to create a mm object for a vGPU.
+ *
+ * Returns:
+ * Zero on success, negative error code in pointer if failed.
+ */
+struct intel_vgpu_mm *intel_vgpu_create_mm(struct intel_vgpu *vgpu,
+ int mm_type, void *virtual_page_table, int page_table_level,
+ u32 pde_base_index)
+{
+ struct intel_gvt *gvt = vgpu->gvt;
+ struct intel_gvt_gtt *gtt = &gvt->gtt;
+ struct intel_vgpu_mm *mm;
+ int ret;
+
+ mm = kzalloc(sizeof(*mm), GFP_ATOMIC);
+ if (!mm) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ mm->type = mm_type;
+
+ if (page_table_level == 1)
+ mm->page_table_entry_type = GTT_TYPE_GGTT_PTE;
+ else if (page_table_level == 3)
+ mm->page_table_entry_type = GTT_TYPE_PPGTT_ROOT_L3_ENTRY;
+ else if (page_table_level == 4)
+ mm->page_table_entry_type = GTT_TYPE_PPGTT_ROOT_L4_ENTRY;
+ else {
+ WARN_ON(1);
+ ret = -EINVAL;
+ goto fail;
+ }
+
+ mm->page_table_level = page_table_level;
+ mm->pde_base_index = pde_base_index;
+
+ mm->vgpu = vgpu;
+ mm->has_shadow_page_table = !!(mm_type == INTEL_GVT_MM_PPGTT);
+
+ kref_init(&mm->ref);
+ atomic_set(&mm->pincount, 0);
+ INIT_LIST_HEAD(&mm->list);
+ INIT_LIST_HEAD(&mm->lru_list);
+ list_add_tail(&mm->list, &vgpu->gtt.mm_list_head);
+
+ ret = gtt->mm_alloc_page_table(mm);
+ if (ret) {
+ gvt_err("fail to allocate page table for mm\n");
+ goto fail;
+ }
+
+ mm->initialized = true;
+
+ if (virtual_page_table)
+ memcpy(mm->virtual_page_table, virtual_page_table,
+ mm->page_table_entry_size);
+
+ if (mm->has_shadow_page_table) {
+ ret = shadow_mm(mm);
+ if (ret)
+ goto fail;
+ list_add_tail(&mm->lru_list, &gvt->gtt.mm_lru_list_head);
+ }
+ return mm;
+fail:
+ gvt_err("fail to create mm\n");
+ if (mm)
+ intel_gvt_mm_unreference(mm);
+ return ERR_PTR(ret);
+}
+
+/**
+ * intel_vgpu_unpin_mm - decrease the pin count of a vGPU mm object
+ * @mm: a vGPU mm object
+ *
+ * This function is called when user doesn't want to use a vGPU mm object
+ */
+void intel_vgpu_unpin_mm(struct intel_vgpu_mm *mm)
+{
+ if (WARN_ON(mm->type != INTEL_GVT_MM_PPGTT))
+ return;
+
+ atomic_dec(&mm->pincount);
+}
+
+/**
+ * intel_vgpu_pin_mm - increase the pin count of a vGPU mm object
+ * @vgpu: a vGPU
+ *
+ * This function is called when user wants to use a vGPU mm object. If this
+ * mm object hasn't been shadowed yet, the shadow will be populated at this
+ * time.
+ *
+ * Returns:
+ * Zero on success, negative error code if failed.
+ */
+int intel_vgpu_pin_mm(struct intel_vgpu_mm *mm)
+{
+ int ret;
+
+ if (WARN_ON(mm->type != INTEL_GVT_MM_PPGTT))
+ return 0;
+
+ atomic_inc(&mm->pincount);
+
+ if (!mm->shadowed) {
+ ret = shadow_mm(mm);
+ if (ret)
+ return ret;
+ }
+
+ list_del_init(&mm->lru_list);
+ list_add_tail(&mm->lru_list, &mm->vgpu->gvt->gtt.mm_lru_list_head);
+ return 0;
+}
+
+static int reclaim_one_mm(struct intel_gvt *gvt)
+{
+ struct intel_vgpu_mm *mm;
+ struct list_head *pos, *n;
+
+ list_for_each_safe(pos, n, &gvt->gtt.mm_lru_list_head) {
+ mm = container_of(pos, struct intel_vgpu_mm, lru_list);
+
+ if (mm->type != INTEL_GVT_MM_PPGTT)
+ continue;
+ if (atomic_read(&mm->pincount))
+ continue;
+
+ list_del_init(&mm->lru_list);
+ invalidate_mm(mm);
+ return 1;
+ }
+ return 0;
+}
+
+/*
+ * GMA translation APIs.
+ */
+static inline int ppgtt_get_next_level_entry(struct intel_vgpu_mm *mm,
+ struct intel_gvt_gtt_entry *e, unsigned long index, bool guest)
+{
+ struct intel_vgpu *vgpu = mm->vgpu;
+ struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
+ struct intel_vgpu_ppgtt_spt *s;
+
+ if (WARN_ON(!mm->has_shadow_page_table))
+ return -EINVAL;
+
+ s = ppgtt_find_shadow_page(vgpu, ops->get_pfn(e));
+ if (!s)
+ return -ENXIO;
+
+ if (!guest)
+ ppgtt_get_shadow_entry(s, e, index);
+ else
+ ppgtt_get_guest_entry(s, e, index);
+ return 0;
+}
+
+/**
+ * intel_vgpu_gma_to_gpa - translate a gma to GPA
+ * @mm: mm object. could be a PPGTT or GGTT mm object
+ * @gma: graphics memory address in this mm object
+ *
+ * This function is used to translate a graphics memory address in specific
+ * graphics memory space to guest physical address.
+ *
+ * Returns:
+ * Guest physical address on success, INTEL_GVT_INVALID_ADDR if failed.
+ */
+unsigned long intel_vgpu_gma_to_gpa(struct intel_vgpu_mm *mm, unsigned long gma)
+{
+ struct intel_vgpu *vgpu = mm->vgpu;
+ struct intel_gvt *gvt = vgpu->gvt;
+ struct intel_gvt_gtt_pte_ops *pte_ops = gvt->gtt.pte_ops;
+ struct intel_gvt_gtt_gma_ops *gma_ops = gvt->gtt.gma_ops;
+ unsigned long gpa = INTEL_GVT_INVALID_ADDR;
+ unsigned long gma_index[4];
+ struct intel_gvt_gtt_entry e;
+ int i, index;
+ int ret;
+
+ if (mm->type != INTEL_GVT_MM_GGTT && mm->type != INTEL_GVT_MM_PPGTT)
+ return INTEL_GVT_INVALID_ADDR;
+
+ if (mm->type == INTEL_GVT_MM_GGTT) {
+ if (!vgpu_gmadr_is_valid(vgpu, gma))
+ goto err;
+
+ ggtt_get_guest_entry(mm, &e,
+ gma_ops->gma_to_ggtt_pte_index(gma));
+ gpa = (pte_ops->get_pfn(&e) << GTT_PAGE_SHIFT)
+ + (gma & ~GTT_PAGE_MASK);
+
+ trace_gma_translate(vgpu->id, "ggtt", 0, 0, gma, gpa);
+ return gpa;
+ }
+
+ switch (mm->page_table_level) {
+ case 4:
+ ppgtt_get_shadow_root_entry(mm, &e, 0);
+ gma_index[0] = gma_ops->gma_to_pml4_index(gma);
+ gma_index[1] = gma_ops->gma_to_l4_pdp_index(gma);
+ gma_index[2] = gma_ops->gma_to_pde_index(gma);
+ gma_index[3] = gma_ops->gma_to_pte_index(gma);
+ index = 4;
+ break;
+ case 3:
+ ppgtt_get_shadow_root_entry(mm, &e,
+ gma_ops->gma_to_l3_pdp_index(gma));
+ gma_index[0] = gma_ops->gma_to_pde_index(gma);
+ gma_index[1] = gma_ops->gma_to_pte_index(gma);
+ index = 2;
+ break;
+ case 2:
+ ppgtt_get_shadow_root_entry(mm, &e,
+ gma_ops->gma_to_pde_index(gma));
+ gma_index[0] = gma_ops->gma_to_pte_index(gma);
+ index = 1;
+ break;
+ default:
+ WARN_ON(1);
+ goto err;
+ }
+
+ /* walk into the shadow page table and get gpa from guest entry */
+ for (i = 0; i < index; i++) {
+ ret = ppgtt_get_next_level_entry(mm, &e, gma_index[i],
+ (i == index - 1));
+ if (ret)
+ goto err;
+ }
+
+ gpa = (pte_ops->get_pfn(&e) << GTT_PAGE_SHIFT)
+ + (gma & ~GTT_PAGE_MASK);
+
+ trace_gma_translate(vgpu->id, "ppgtt", 0,
+ mm->page_table_level, gma, gpa);
+ return gpa;
+err:
+ gvt_err("invalid mm type: %d gma %lx\n", mm->type, gma);
+ return INTEL_GVT_INVALID_ADDR;
+}
+
+static int emulate_gtt_mmio_read(struct intel_vgpu *vgpu,
+ unsigned int off, void *p_data, unsigned int bytes)
+{
+ struct intel_vgpu_mm *ggtt_mm = vgpu->gtt.ggtt_mm;
+ const struct intel_gvt_device_info *info = &vgpu->gvt->device_info;
+ unsigned long index = off >> info->gtt_entry_size_shift;
+ struct intel_gvt_gtt_entry e;
+
+ if (bytes != 4 && bytes != 8)
+ return -EINVAL;
+
+ ggtt_get_guest_entry(ggtt_mm, &e, index);
+ memcpy(p_data, (void *)&e.val64 + (off & (info->gtt_entry_size - 1)),
+ bytes);
+ return 0;
+}
+
+/**
+ * intel_vgpu_emulate_gtt_mmio_read - emulate GTT MMIO register read
+ * @vgpu: a vGPU
+ * @off: register offset
+ * @p_data: data will be returned to guest
+ * @bytes: data length
+ *
+ * This function is used to emulate the GTT MMIO register read
+ *
+ * Returns:
+ * Zero on success, error code if failed.
+ */
+int intel_vgpu_emulate_gtt_mmio_read(struct intel_vgpu *vgpu, unsigned int off,
+ void *p_data, unsigned int bytes)
+{
+ const struct intel_gvt_device_info *info = &vgpu->gvt->device_info;
+ int ret;
+
+ if (bytes != 4 && bytes != 8)
+ return -EINVAL;
+
+ off -= info->gtt_start_offset;
+ ret = emulate_gtt_mmio_read(vgpu, off, p_data, bytes);
+ return ret;
+}
+
+static int emulate_gtt_mmio_write(struct intel_vgpu *vgpu, unsigned int off,
+ void *p_data, unsigned int bytes)
+{
+ struct intel_gvt *gvt = vgpu->gvt;
+ const struct intel_gvt_device_info *info = &gvt->device_info;
+ struct intel_vgpu_mm *ggtt_mm = vgpu->gtt.ggtt_mm;
+ struct intel_gvt_gtt_pte_ops *ops = gvt->gtt.pte_ops;
+ unsigned long g_gtt_index = off >> info->gtt_entry_size_shift;
+ unsigned long gma;
+ struct intel_gvt_gtt_entry e, m;
+ int ret;
+
+ if (bytes != 4 && bytes != 8)
+ return -EINVAL;
+
+ gma = g_gtt_index << GTT_PAGE_SHIFT;
+
+ /* the VM may configure the whole GM space when ballooning is used */
+ if (WARN_ONCE(!vgpu_gmadr_is_valid(vgpu, gma),
+ "vgpu%d: found oob ggtt write, offset %x\n",
+ vgpu->id, off)) {
+ return 0;
+ }
+
+ ggtt_get_guest_entry(ggtt_mm, &e, g_gtt_index);
+
+ memcpy((void *)&e.val64 + (off & (info->gtt_entry_size - 1)), p_data,
+ bytes);
+
+ if (ops->test_present(&e)) {
+ ret = gtt_entry_p2m(vgpu, &e, &m);
+ if (ret) {
+ gvt_err("vgpu%d: fail to translate guest gtt entry\n",
+ vgpu->id);
+ return ret;
+ }
+ } else {
+ m = e;
+ m.val64 = 0;
+ }
+
+ ggtt_set_shadow_entry(ggtt_mm, &m, g_gtt_index);
+ ggtt_set_guest_entry(ggtt_mm, &e, g_gtt_index);
+ return 0;
+}
+
+/*
+ * intel_vgpu_emulate_gtt_mmio_write - emulate GTT MMIO register write
+ * @vgpu: a vGPU
+ * @off: register offset
+ * @p_data: data from guest write
+ * @bytes: data length
+ *
+ * This function is used to emulate the GTT MMIO register write
+ *
+ * Returns:
+ * Zero on success, error code if failed.
+ */
+int intel_vgpu_emulate_gtt_mmio_write(struct intel_vgpu *vgpu, unsigned int off,
+ void *p_data, unsigned int bytes)
+{
+ const struct intel_gvt_device_info *info = &vgpu->gvt->device_info;
+ int ret;
+
+ if (bytes != 4 && bytes != 8)
+ return -EINVAL;
+
+ off -= info->gtt_start_offset;
+ ret = emulate_gtt_mmio_write(vgpu, off, p_data, bytes);
+ return ret;
+}
+
+static int alloc_scratch_pages(struct intel_vgpu *vgpu,
+ intel_gvt_gtt_type_t type)
+{
+ struct intel_vgpu_gtt *gtt = &vgpu->gtt;
+ struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
+ int page_entry_num = GTT_PAGE_SIZE >>
+ vgpu->gvt->device_info.gtt_entry_size_shift;
+ struct page *scratch_pt;
+ unsigned long mfn;
+ int i;
+ void *p;
+
+ if (WARN_ON(type < GTT_TYPE_PPGTT_PTE_PT || type >= GTT_TYPE_MAX))
+ return -EINVAL;
+
+ scratch_pt = alloc_page(GFP_KERNEL | GFP_ATOMIC | __GFP_ZERO);
+ if (!scratch_pt) {
+ gvt_err("fail to allocate scratch page\n");
+ return -ENOMEM;
+ }
+
+ p = kmap_atomic(scratch_pt);
+ mfn = intel_gvt_hypervisor_virt_to_mfn(p);
+ if (mfn == INTEL_GVT_INVALID_ADDR) {
+ gvt_err("fail to translate vaddr:0x%llx\n", (u64)p);
+ kunmap_atomic(p);
+ __free_page(scratch_pt);
+ return -EFAULT;
+ }
+ gtt->scratch_pt[type].page_mfn = mfn;
+ gtt->scratch_pt[type].page = scratch_pt;
+ gvt_dbg_mm("vgpu%d create scratch_pt: type %d mfn=0x%lx\n",
+ vgpu->id, type, mfn);
+
+ /* Build the tree by full filled the scratch pt with the entries which
+ * point to the next level scratch pt or scratch page. The
+ * scratch_pt[type] indicate the scratch pt/scratch page used by the
+ * 'type' pt.
+ * e.g. scratch_pt[GTT_TYPE_PPGTT_PDE_PT] is used by
+ * GTT_TYPE_PPGTT_PDE_PT level pt, that means this scatch_pt it self
+ * is GTT_TYPE_PPGTT_PTE_PT, and full filled by scratch page mfn.
+ */
+ if (type > GTT_TYPE_PPGTT_PTE_PT && type < GTT_TYPE_MAX) {
+ struct intel_gvt_gtt_entry se;
+
+ memset(&se, 0, sizeof(struct intel_gvt_gtt_entry));
+ se.type = get_entry_type(type - 1);
+ ops->set_pfn(&se, gtt->scratch_pt[type - 1].page_mfn);
+
+ /* The entry parameters like present/writeable/cache type
+ * set to the same as i915's scratch page tree.
+ */
+ se.val64 |= _PAGE_PRESENT | _PAGE_RW;
+ if (type == GTT_TYPE_PPGTT_PDE_PT)
+ se.val64 |= PPAT_CACHED_INDEX;
+
+ for (i = 0; i < page_entry_num; i++)
+ ops->set_entry(p, &se, i, false, 0, vgpu);
+ }
+
+ kunmap_atomic(p);
+
+ return 0;
+}
+
+static int release_scratch_page_tree(struct intel_vgpu *vgpu)
+{
+ int i;
+
+ for (i = GTT_TYPE_PPGTT_PTE_PT; i < GTT_TYPE_MAX; i++) {
+ if (vgpu->gtt.scratch_pt[i].page != NULL) {
+ __free_page(vgpu->gtt.scratch_pt[i].page);
+ vgpu->gtt.scratch_pt[i].page = NULL;
+ vgpu->gtt.scratch_pt[i].page_mfn = 0;
+ }
+ }
+
+ return 0;
+}
+
+static int create_scratch_page_tree(struct intel_vgpu *vgpu)
+{
+ int i, ret;
+
+ for (i = GTT_TYPE_PPGTT_PTE_PT; i < GTT_TYPE_MAX; i++) {
+ ret = alloc_scratch_pages(vgpu, i);
+ if (ret)
+ goto err;
+ }
+
+ return 0;
+
+err:
+ release_scratch_page_tree(vgpu);
+ return ret;
+}
+
+/**
+ * intel_vgpu_init_gtt - initialize per-vGPU graphics memory virulization
+ * @vgpu: a vGPU
+ *
+ * This function is used to initialize per-vGPU graphics memory virtualization
+ * components.
+ *
+ * Returns:
+ * Zero on success, error code if failed.
+ */
+int intel_vgpu_init_gtt(struct intel_vgpu *vgpu)
+{
+ struct intel_vgpu_gtt *gtt = &vgpu->gtt;
+ struct intel_vgpu_mm *ggtt_mm;
+
+ hash_init(gtt->guest_page_hash_table);
+ hash_init(gtt->shadow_page_hash_table);
+
+ INIT_LIST_HEAD(&gtt->mm_list_head);
+ INIT_LIST_HEAD(&gtt->oos_page_list_head);
+ INIT_LIST_HEAD(&gtt->post_shadow_list_head);
+
+ ggtt_mm = intel_vgpu_create_mm(vgpu, INTEL_GVT_MM_GGTT,
+ NULL, 1, 0);
+ if (IS_ERR(ggtt_mm)) {
+ gvt_err("fail to create mm for ggtt.\n");
+ return PTR_ERR(ggtt_mm);
+ }
+
+ gtt->ggtt_mm = ggtt_mm;
+
+ return create_scratch_page_tree(vgpu);
+}
+
+/**
+ * intel_vgpu_clean_gtt - clean up per-vGPU graphics memory virulization
+ * @vgpu: a vGPU
+ *
+ * This function is used to clean up per-vGPU graphics memory virtualization
+ * components.
+ *
+ * Returns:
+ * Zero on success, error code if failed.
+ */
+void intel_vgpu_clean_gtt(struct intel_vgpu *vgpu)
+{
+ struct list_head *pos, *n;
+ struct intel_vgpu_mm *mm;
+
+ ppgtt_free_all_shadow_page(vgpu);
+ release_scratch_page_tree(vgpu);
+
+ list_for_each_safe(pos, n, &vgpu->gtt.mm_list_head) {
+ mm = container_of(pos, struct intel_vgpu_mm, list);
+ vgpu->gvt->gtt.mm_free_page_table(mm);
+ list_del(&mm->list);
+ list_del(&mm->lru_list);
+ kfree(mm);
+ }
+}
+
+static void clean_spt_oos(struct intel_gvt *gvt)
+{
+ struct intel_gvt_gtt *gtt = &gvt->gtt;
+ struct list_head *pos, *n;
+ struct intel_vgpu_oos_page *oos_page;
+
+ WARN(!list_empty(&gtt->oos_page_use_list_head),
+ "someone is still using oos page\n");
+
+ list_for_each_safe(pos, n, &gtt->oos_page_free_list_head) {
+ oos_page = container_of(pos, struct intel_vgpu_oos_page, list);
+ list_del(&oos_page->list);
+ kfree(oos_page);
+ }
+}
+
+static int setup_spt_oos(struct intel_gvt *gvt)
+{
+ struct intel_gvt_gtt *gtt = &gvt->gtt;
+ struct intel_vgpu_oos_page *oos_page;
+ int i;
+ int ret;
+
+ INIT_LIST_HEAD(&gtt->oos_page_free_list_head);
+ INIT_LIST_HEAD(&gtt->oos_page_use_list_head);
+
+ for (i = 0; i < preallocated_oos_pages; i++) {
+ oos_page = kzalloc(sizeof(*oos_page), GFP_KERNEL);
+ if (!oos_page) {
+ gvt_err("fail to pre-allocate oos page\n");
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ INIT_LIST_HEAD(&oos_page->list);
+ INIT_LIST_HEAD(&oos_page->vm_list);
+ oos_page->id = i;
+ list_add_tail(&oos_page->list, &gtt->oos_page_free_list_head);
+ }
+
+ gvt_dbg_mm("%d oos pages preallocated\n", i);
+
+ return 0;
+fail:
+ clean_spt_oos(gvt);
+ return ret;
+}
+
+/**
+ * intel_vgpu_find_ppgtt_mm - find a PPGTT mm object
+ * @vgpu: a vGPU
+ * @page_table_level: PPGTT page table level
+ * @root_entry: PPGTT page table root pointers
+ *
+ * This function is used to find a PPGTT mm object from mm object pool
+ *
+ * Returns:
+ * pointer to mm object on success, NULL if failed.
+ */
+struct intel_vgpu_mm *intel_vgpu_find_ppgtt_mm(struct intel_vgpu *vgpu,
+ int page_table_level, void *root_entry)
+{
+ struct list_head *pos;
+ struct intel_vgpu_mm *mm;
+ u64 *src, *dst;
+
+ list_for_each(pos, &vgpu->gtt.mm_list_head) {
+ mm = container_of(pos, struct intel_vgpu_mm, list);
+ if (mm->type != INTEL_GVT_MM_PPGTT)
+ continue;
+
+ if (mm->page_table_level != page_table_level)
+ continue;
+
+ src = root_entry;
+ dst = mm->virtual_page_table;
+
+ if (page_table_level == 3) {
+ if (src[0] == dst[0]
+ && src[1] == dst[1]
+ && src[2] == dst[2]
+ && src[3] == dst[3])
+ return mm;
+ } else {
+ if (src[0] == dst[0])
+ return mm;
+ }
+ }
+ return NULL;
+}
+
+/**
+ * intel_vgpu_g2v_create_ppgtt_mm - create a PPGTT mm object from
+ * g2v notification
+ * @vgpu: a vGPU
+ * @page_table_level: PPGTT page table level
+ *
+ * This function is used to create a PPGTT mm object from a guest to GVT-g
+ * notification.
+ *
+ * Returns:
+ * Zero on success, negative error code if failed.
+ */
+int intel_vgpu_g2v_create_ppgtt_mm(struct intel_vgpu *vgpu,
+ int page_table_level)
+{
+ u64 *pdp = (u64 *)&vgpu_vreg64(vgpu, vgtif_reg(pdp[0]));
+ struct intel_vgpu_mm *mm;
+
+ if (WARN_ON((page_table_level != 4) && (page_table_level != 3)))
+ return -EINVAL;
+
+ mm = intel_vgpu_find_ppgtt_mm(vgpu, page_table_level, pdp);
+ if (mm) {
+ intel_gvt_mm_reference(mm);
+ } else {
+ mm = intel_vgpu_create_mm(vgpu, INTEL_GVT_MM_PPGTT,
+ pdp, page_table_level, 0);
+ if (IS_ERR(mm)) {
+ gvt_err("fail to create mm\n");
+ return PTR_ERR(mm);
+ }
+ }
+ return 0;
+}
+
+/**
+ * intel_vgpu_g2v_destroy_ppgtt_mm - destroy a PPGTT mm object from
+ * g2v notification
+ * @vgpu: a vGPU
+ * @page_table_level: PPGTT page table level
+ *
+ * This function is used to create a PPGTT mm object from a guest to GVT-g
+ * notification.
+ *
+ * Returns:
+ * Zero on success, negative error code if failed.
+ */
+int intel_vgpu_g2v_destroy_ppgtt_mm(struct intel_vgpu *vgpu,
+ int page_table_level)
+{
+ u64 *pdp = (u64 *)&vgpu_vreg64(vgpu, vgtif_reg(pdp[0]));
+ struct intel_vgpu_mm *mm;
+
+ if (WARN_ON((page_table_level != 4) && (page_table_level != 3)))
+ return -EINVAL;
+
+ mm = intel_vgpu_find_ppgtt_mm(vgpu, page_table_level, pdp);
+ if (!mm) {
+ gvt_err("fail to find ppgtt instance.\n");
+ return -EINVAL;
+ }
+ intel_gvt_mm_unreference(mm);
+ return 0;
+}
+
+/**
+ * intel_gvt_init_gtt - initialize mm components of a GVT device
+ * @gvt: GVT device
+ *
+ * This function is called at the initialization stage, to initialize
+ * the mm components of a GVT device.
+ *
+ * Returns:
+ * zero on success, negative error code if failed.
+ */
+int intel_gvt_init_gtt(struct intel_gvt *gvt)
+{
+ int ret;
+
+ gvt_dbg_core("init gtt\n");
+
+ if (IS_BROADWELL(gvt->dev_priv) || IS_SKYLAKE(gvt->dev_priv)) {
+ gvt->gtt.pte_ops = &gen8_gtt_pte_ops;
+ gvt->gtt.gma_ops = &gen8_gtt_gma_ops;
+ gvt->gtt.mm_alloc_page_table = gen8_mm_alloc_page_table;
+ gvt->gtt.mm_free_page_table = gen8_mm_free_page_table;
+ } else {
+ return -ENODEV;
+ }
+
+ if (enable_out_of_sync) {
+ ret = setup_spt_oos(gvt);
+ if (ret) {
+ gvt_err("fail to initialize SPT oos\n");
+ return ret;
+ }
+ }
+ INIT_LIST_HEAD(&gvt->gtt.mm_lru_list_head);
+ return 0;
+}
+
+/**
+ * intel_gvt_clean_gtt - clean up mm components of a GVT device
+ * @gvt: GVT device
+ *
+ * This function is called at the driver unloading stage, to clean up the
+ * the mm components of a GVT device.
+ *
+ */
+void intel_gvt_clean_gtt(struct intel_gvt *gvt)
+{
+ if (enable_out_of_sync)
+ clean_spt_oos(gvt);
+}
diff --git a/drivers/gpu/drm/i915/gvt/gtt.h b/drivers/gpu/drm/i915/gvt/gtt.h
new file mode 100644
index 000000000000..d250013bc37b
--- /dev/null
+++ b/drivers/gpu/drm/i915/gvt/gtt.h
@@ -0,0 +1,306 @@
+/*
+ * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * Authors:
+ * Zhi Wang <zhi.a.wang@intel.com>
+ * Zhenyu Wang <zhenyuw@linux.intel.com>
+ * Xiao Zheng <xiao.zheng@intel.com>
+ *
+ * Contributors:
+ * Min He <min.he@intel.com>
+ * Bing Niu <bing.niu@intel.com>
+ *
+ */
+
+#ifndef _GVT_GTT_H_
+#define _GVT_GTT_H_
+
+#define GTT_PAGE_SHIFT 12
+#define GTT_PAGE_SIZE (1UL << GTT_PAGE_SHIFT)
+#define GTT_PAGE_MASK (~(GTT_PAGE_SIZE-1))
+
+struct intel_vgpu_mm;
+
+#define INTEL_GVT_GTT_HASH_BITS 8
+#define INTEL_GVT_INVALID_ADDR (~0UL)
+
+struct intel_gvt_gtt_entry {
+ u64 val64;
+ int type;
+};
+
+struct intel_gvt_gtt_pte_ops {
+ struct intel_gvt_gtt_entry *(*get_entry)(void *pt,
+ struct intel_gvt_gtt_entry *e,
+ unsigned long index, bool hypervisor_access, unsigned long gpa,
+ struct intel_vgpu *vgpu);
+ struct intel_gvt_gtt_entry *(*set_entry)(void *pt,
+ struct intel_gvt_gtt_entry *e,
+ unsigned long index, bool hypervisor_access, unsigned long gpa,
+ struct intel_vgpu *vgpu);
+ bool (*test_present)(struct intel_gvt_gtt_entry *e);
+ void (*clear_present)(struct intel_gvt_gtt_entry *e);
+ bool (*test_pse)(struct intel_gvt_gtt_entry *e);
+ void (*set_pfn)(struct intel_gvt_gtt_entry *e, unsigned long pfn);
+ unsigned long (*get_pfn)(struct intel_gvt_gtt_entry *e);
+};
+
+struct intel_gvt_gtt_gma_ops {
+ unsigned long (*gma_to_ggtt_pte_index)(unsigned long gma);
+ unsigned long (*gma_to_pte_index)(unsigned long gma);
+ unsigned long (*gma_to_pde_index)(unsigned long gma);
+ unsigned long (*gma_to_l3_pdp_index)(unsigned long gma);
+ unsigned long (*gma_to_l4_pdp_index)(unsigned long gma);
+ unsigned long (*gma_to_pml4_index)(unsigned long gma);
+};
+
+struct intel_gvt_gtt {
+ struct intel_gvt_gtt_pte_ops *pte_ops;
+ struct intel_gvt_gtt_gma_ops *gma_ops;
+ int (*mm_alloc_page_table)(struct intel_vgpu_mm *mm);
+ void (*mm_free_page_table)(struct intel_vgpu_mm *mm);
+ struct list_head oos_page_use_list_head;
+ struct list_head oos_page_free_list_head;
+ struct list_head mm_lru_list_head;
+};
+
+enum {
+ INTEL_GVT_MM_GGTT = 0,
+ INTEL_GVT_MM_PPGTT,
+};
+
+typedef enum {
+ GTT_TYPE_INVALID = -1,
+
+ GTT_TYPE_GGTT_PTE,
+
+ GTT_TYPE_PPGTT_PTE_4K_ENTRY,
+ GTT_TYPE_PPGTT_PTE_2M_ENTRY,
+ GTT_TYPE_PPGTT_PTE_1G_ENTRY,
+
+ GTT_TYPE_PPGTT_PTE_ENTRY,
+
+ GTT_TYPE_PPGTT_PDE_ENTRY,
+ GTT_TYPE_PPGTT_PDP_ENTRY,
+ GTT_TYPE_PPGTT_PML4_ENTRY,
+
+ GTT_TYPE_PPGTT_ROOT_ENTRY,
+
+ GTT_TYPE_PPGTT_ROOT_L3_ENTRY,
+ GTT_TYPE_PPGTT_ROOT_L4_ENTRY,
+
+ GTT_TYPE_PPGTT_ENTRY,
+
+ GTT_TYPE_PPGTT_PTE_PT,
+ GTT_TYPE_PPGTT_PDE_PT,
+ GTT_TYPE_PPGTT_PDP_PT,
+ GTT_TYPE_PPGTT_PML4_PT,
+
+ GTT_TYPE_MAX,
+} intel_gvt_gtt_type_t;
+
+struct intel_vgpu_mm {
+ int type;
+ bool initialized;
+ bool shadowed;
+
+ int page_table_entry_type;
+ u32 page_table_entry_size;
+ u32 page_table_entry_cnt;
+ void *virtual_page_table;
+ void *shadow_page_table;
+
+ int page_table_level;
+ bool has_shadow_page_table;
+ u32 pde_base_index;
+
+ struct list_head list;
+ struct kref ref;
+ atomic_t pincount;
+ struct list_head lru_list;
+ struct intel_vgpu *vgpu;
+};
+
+extern struct intel_gvt_gtt_entry *intel_vgpu_mm_get_entry(
+ struct intel_vgpu_mm *mm,
+ void *page_table, struct intel_gvt_gtt_entry *e,
+ unsigned long index);
+
+extern struct intel_gvt_gtt_entry *intel_vgpu_mm_set_entry(
+ struct intel_vgpu_mm *mm,
+ void *page_table, struct intel_gvt_gtt_entry *e,
+ unsigned long index);
+
+#define ggtt_get_guest_entry(mm, e, index) \
+ intel_vgpu_mm_get_entry(mm, mm->virtual_page_table, e, index)
+
+#define ggtt_set_guest_entry(mm, e, index) \
+ intel_vgpu_mm_set_entry(mm, mm->virtual_page_table, e, index)
+
+#define ggtt_get_shadow_entry(mm, e, index) \
+ intel_vgpu_mm_get_entry(mm, mm->shadow_page_table, e, index)
+
+#define ggtt_set_shadow_entry(mm, e, index) \
+ intel_vgpu_mm_set_entry(mm, mm->shadow_page_table, e, index)
+
+#define ppgtt_get_guest_root_entry(mm, e, index) \
+ intel_vgpu_mm_get_entry(mm, mm->virtual_page_table, e, index)
+
+#define ppgtt_set_guest_root_entry(mm, e, index) \
+ intel_vgpu_mm_set_entry(mm, mm->virtual_page_table, e, index)
+
+#define ppgtt_get_shadow_root_entry(mm, e, index) \
+ intel_vgpu_mm_get_entry(mm, mm->shadow_page_table, e, index)
+
+#define ppgtt_set_shadow_root_entry(mm, e, index) \
+ intel_vgpu_mm_set_entry(mm, mm->shadow_page_table, e, index)
+
+extern struct intel_vgpu_mm *intel_vgpu_create_mm(struct intel_vgpu *vgpu,
+ int mm_type, void *virtual_page_table, int page_table_level,
+ u32 pde_base_index);
+extern void intel_vgpu_destroy_mm(struct kref *mm_ref);
+
+struct intel_vgpu_guest_page;
+
+struct intel_vgpu_scratch_pt {
+ struct page *page;
+ unsigned long page_mfn;
+};
+
+
+struct intel_vgpu_gtt {
+ struct intel_vgpu_mm *ggtt_mm;
+ unsigned long active_ppgtt_mm_bitmap;
+ struct list_head mm_list_head;
+ DECLARE_HASHTABLE(shadow_page_hash_table, INTEL_GVT_GTT_HASH_BITS);
+ DECLARE_HASHTABLE(guest_page_hash_table, INTEL_GVT_GTT_HASH_BITS);
+ atomic_t n_write_protected_guest_page;
+ struct list_head oos_page_list_head;
+ struct list_head post_shadow_list_head;
+ struct intel_vgpu_scratch_pt scratch_pt[GTT_TYPE_MAX];
+
+};
+
+extern int intel_vgpu_init_gtt(struct intel_vgpu *vgpu);
+extern void intel_vgpu_clean_gtt(struct intel_vgpu *vgpu);
+
+extern int intel_gvt_init_gtt(struct intel_gvt *gvt);
+extern void intel_gvt_clean_gtt(struct intel_gvt *gvt);
+
+extern struct intel_vgpu_mm *intel_gvt_find_ppgtt_mm(struct intel_vgpu *vgpu,
+ int page_table_level, void *root_entry);
+
+struct intel_vgpu_oos_page;
+
+struct intel_vgpu_shadow_page {
+ void *vaddr;
+ struct page *page;
+ int type;
+ struct hlist_node node;
+ unsigned long mfn;
+};
+
+struct intel_vgpu_guest_page {
+ struct hlist_node node;
+ bool writeprotection;
+ unsigned long gfn;
+ int (*handler)(void *, u64, void *, int);
+ void *data;
+ unsigned long write_cnt;
+ struct intel_vgpu_oos_page *oos_page;
+};
+
+struct intel_vgpu_oos_page {
+ struct intel_vgpu_guest_page *guest_page;
+ struct list_head list;
+ struct list_head vm_list;
+ int id;
+ unsigned char mem[GTT_PAGE_SIZE];
+};
+
+#define GTT_ENTRY_NUM_IN_ONE_PAGE 512
+
+struct intel_vgpu_ppgtt_spt {
+ struct intel_vgpu_shadow_page shadow_page;
+ struct intel_vgpu_guest_page guest_page;
+ int guest_page_type;
+ atomic_t refcount;
+ struct intel_vgpu *vgpu;
+ DECLARE_BITMAP(post_shadow_bitmap, GTT_ENTRY_NUM_IN_ONE_PAGE);
+ struct list_head post_shadow_list;
+};
+
+int intel_vgpu_init_guest_page(struct intel_vgpu *vgpu,
+ struct intel_vgpu_guest_page *guest_page,
+ unsigned long gfn,
+ int (*handler)(void *gp, u64, void *, int),
+ void *data);
+
+void intel_vgpu_clean_guest_page(struct intel_vgpu *vgpu,
+ struct intel_vgpu_guest_page *guest_page);
+
+int intel_vgpu_set_guest_page_writeprotection(struct intel_vgpu *vgpu,
+ struct intel_vgpu_guest_page *guest_page);
+
+void intel_vgpu_clear_guest_page_writeprotection(struct intel_vgpu *vgpu,
+ struct intel_vgpu_guest_page *guest_page);
+
+struct intel_vgpu_guest_page *intel_vgpu_find_guest_page(
+ struct intel_vgpu *vgpu, unsigned long gfn);
+
+int intel_vgpu_sync_oos_pages(struct intel_vgpu *vgpu);
+
+int intel_vgpu_flush_post_shadow(struct intel_vgpu *vgpu);
+
+static inline void intel_gvt_mm_reference(struct intel_vgpu_mm *mm)
+{
+ kref_get(&mm->ref);
+}
+
+static inline void intel_gvt_mm_unreference(struct intel_vgpu_mm *mm)
+{
+ kref_put(&mm->ref, intel_vgpu_destroy_mm);
+}
+
+int intel_vgpu_pin_mm(struct intel_vgpu_mm *mm);
+
+void intel_vgpu_unpin_mm(struct intel_vgpu_mm *mm);
+
+unsigned long intel_vgpu_gma_to_gpa(struct intel_vgpu_mm *mm,
+ unsigned long gma);
+
+struct intel_vgpu_mm *intel_vgpu_find_ppgtt_mm(struct intel_vgpu *vgpu,
+ int page_table_level, void *root_entry);
+
+int intel_vgpu_g2v_create_ppgtt_mm(struct intel_vgpu *vgpu,
+ int page_table_level);
+
+int intel_vgpu_g2v_destroy_ppgtt_mm(struct intel_vgpu *vgpu,
+ int page_table_level);
+
+int intel_vgpu_emulate_gtt_mmio_read(struct intel_vgpu *vgpu,
+ unsigned int off, void *p_data, unsigned int bytes);
+
+int intel_vgpu_emulate_gtt_mmio_write(struct intel_vgpu *vgpu,
+ unsigned int off, void *p_data, unsigned int bytes);
+
+#endif /* _GVT_GTT_H_ */
diff --git a/drivers/gpu/drm/i915/gvt/gvt.c b/drivers/gpu/drm/i915/gvt/gvt.c
index 927f4579f5b6..398877c3d2fd 100644
--- a/drivers/gpu/drm/i915/gvt/gvt.c
+++ b/drivers/gpu/drm/i915/gvt/gvt.c
@@ -19,12 +19,23 @@
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
+ *
+ * Authors:
+ * Kevin Tian <kevin.tian@intel.com>
+ * Eddie Dong <eddie.dong@intel.com>
+ *
+ * Contributors:
+ * Niu Bing <bing.niu@intel.com>
+ * Zhi Wang <zhi.a.wang@intel.com>
+ *
*/
#include <linux/types.h>
#include <xen/xen.h>
+#include <linux/kthread.h>
#include "i915_drv.h"
+#include "gvt.h"
struct intel_gvt_host intel_gvt_host;
@@ -33,6 +44,16 @@ static const char * const supported_hypervisors[] = {
[INTEL_GVT_HYPERVISOR_KVM] = "KVM",
};
+static const struct intel_gvt_ops intel_gvt_ops = {
+ .emulate_cfg_read = intel_vgpu_emulate_cfg_read,
+ .emulate_cfg_write = intel_vgpu_emulate_cfg_write,
+ .emulate_mmio_read = intel_vgpu_emulate_mmio_read,
+ .emulate_mmio_write = intel_vgpu_emulate_mmio_write,
+ .vgpu_create = intel_gvt_create_vgpu,
+ .vgpu_destroy = intel_gvt_destroy_vgpu,
+ .vgpu_reset = intel_gvt_reset_vgpu,
+};
+
/**
* intel_gvt_init_host - Load MPT modules and detect if we're running in host
* @gvt: intel gvt device
@@ -47,6 +68,8 @@ static const char * const supported_hypervisors[] = {
*/
int intel_gvt_init_host(void)
{
+ int ret;
+
if (intel_gvt_host.initialized)
return 0;
@@ -61,10 +84,12 @@ int intel_gvt_init_host(void)
symbol_get(xengt_mpt), "xengt");
intel_gvt_host.hypervisor_type = INTEL_GVT_HYPERVISOR_XEN;
} else {
+#if IS_ENABLED(CONFIG_DRM_I915_GVT_KVMGT)
/* not in Xen. Try KVMGT */
intel_gvt_host.mpt = try_then_request_module(
- symbol_get(kvmgt_mpt), "kvm");
+ symbol_get(kvmgt_mpt), "kvmgt");
intel_gvt_host.hypervisor_type = INTEL_GVT_HYPERVISOR_KVM;
+#endif
}
/* Fail to load MPT modules - bail out */
@@ -72,7 +97,8 @@ int intel_gvt_init_host(void)
return -EINVAL;
/* Try to detect if we're running in host instead of VM. */
- if (!intel_gvt_hypervisor_detect_host())
+ ret = intel_gvt_hypervisor_detect_host();
+ if (ret)
return -ENODEV;
gvt_dbg_core("Running with hypervisor %s in host mode\n",
@@ -84,9 +110,67 @@ int intel_gvt_init_host(void)
static void init_device_info(struct intel_gvt *gvt)
{
- if (IS_BROADWELL(gvt->dev_priv))
- gvt->device_info.max_support_vgpus = 8;
- /* This function will grow large in GVT device model patches. */
+ struct intel_gvt_device_info *info = &gvt->device_info;
+ struct pci_dev *pdev = gvt->dev_priv->drm.pdev;
+
+ if (IS_BROADWELL(gvt->dev_priv) || IS_SKYLAKE(gvt->dev_priv)) {
+ info->max_support_vgpus = 8;
+ info->cfg_space_size = 256;
+ info->mmio_size = 2 * 1024 * 1024;
+ info->mmio_bar = 0;
+ info->gtt_start_offset = 8 * 1024 * 1024;
+ info->gtt_entry_size = 8;
+ info->gtt_entry_size_shift = 3;
+ info->gmadr_bytes_in_cmd = 8;
+ info->max_surface_size = 36 * 1024 * 1024;
+ }
+ info->msi_cap_offset = pdev->msi_cap;
+}
+
+static int gvt_service_thread(void *data)
+{
+ struct intel_gvt *gvt = (struct intel_gvt *)data;
+ int ret;
+
+ gvt_dbg_core("service thread start\n");
+
+ while (!kthread_should_stop()) {
+ ret = wait_event_interruptible(gvt->service_thread_wq,
+ kthread_should_stop() || gvt->service_request);
+
+ if (kthread_should_stop())
+ break;
+
+ if (WARN_ONCE(ret, "service thread is waken up by signal.\n"))
+ continue;
+
+ if (test_and_clear_bit(INTEL_GVT_REQUEST_EMULATE_VBLANK,
+ (void *)&gvt->service_request)) {
+ mutex_lock(&gvt->lock);
+ intel_gvt_emulate_vblank(gvt);
+ mutex_unlock(&gvt->lock);
+ }
+ }
+
+ return 0;
+}
+
+static void clean_service_thread(struct intel_gvt *gvt)
+{
+ kthread_stop(gvt->service_thread);
+}
+
+static int init_service_thread(struct intel_gvt *gvt)
+{
+ init_waitqueue_head(&gvt->service_thread_wq);
+
+ gvt->service_thread = kthread_run(gvt_service_thread,
+ gvt, "gvt_service_thread");
+ if (IS_ERR(gvt->service_thread)) {
+ gvt_err("fail to start service thread.\n");
+ return PTR_ERR(gvt->service_thread);
+ }
+ return 0;
}
/**
@@ -99,14 +183,26 @@ static void init_device_info(struct intel_gvt *gvt)
*/
void intel_gvt_clean_device(struct drm_i915_private *dev_priv)
{
- struct intel_gvt *gvt = &dev_priv->gvt;
+ struct intel_gvt *gvt = to_gvt(dev_priv);
- if (WARN_ON(!gvt->initialized))
+ if (WARN_ON(!gvt))
return;
- /* Other de-initialization of GVT components will be introduced. */
+ clean_service_thread(gvt);
+ intel_gvt_clean_cmd_parser(gvt);
+ intel_gvt_clean_sched_policy(gvt);
+ intel_gvt_clean_workload_scheduler(gvt);
+ intel_gvt_clean_opregion(gvt);
+ intel_gvt_clean_gtt(gvt);
+ intel_gvt_clean_irq(gvt);
+ intel_gvt_clean_mmio_info(gvt);
+ intel_gvt_free_firmware(gvt);
+
+ intel_gvt_hypervisor_host_exit(&dev_priv->drm.pdev->dev, gvt);
+ intel_gvt_clean_vgpu_types(gvt);
- gvt->initialized = false;
+ kfree(dev_priv->gvt);
+ dev_priv->gvt = NULL;
}
/**
@@ -122,7 +218,9 @@ void intel_gvt_clean_device(struct drm_i915_private *dev_priv)
*/
int intel_gvt_init_device(struct drm_i915_private *dev_priv)
{
- struct intel_gvt *gvt = &dev_priv->gvt;
+ struct intel_gvt *gvt;
+ int ret;
+
/*
* Cannot initialize GVT device without intel_gvt_host gets
* initialized first.
@@ -130,16 +228,91 @@ int intel_gvt_init_device(struct drm_i915_private *dev_priv)
if (WARN_ON(!intel_gvt_host.initialized))
return -EINVAL;
- if (WARN_ON(gvt->initialized))
+ if (WARN_ON(dev_priv->gvt))
return -EEXIST;
+ gvt = kzalloc(sizeof(struct intel_gvt), GFP_KERNEL);
+ if (!gvt)
+ return -ENOMEM;
+
gvt_dbg_core("init gvt device\n");
+ mutex_init(&gvt->lock);
+ gvt->dev_priv = dev_priv;
+
init_device_info(gvt);
- /*
- * Other initialization of GVT components will be introduce here.
- */
- gvt_dbg_core("gvt device creation is done\n");
- gvt->initialized = true;
+
+ ret = intel_gvt_setup_mmio_info(gvt);
+ if (ret)
+ return ret;
+
+ ret = intel_gvt_load_firmware(gvt);
+ if (ret)
+ goto out_clean_mmio_info;
+
+ ret = intel_gvt_init_irq(gvt);
+ if (ret)
+ goto out_free_firmware;
+
+ ret = intel_gvt_init_gtt(gvt);
+ if (ret)
+ goto out_clean_irq;
+
+ ret = intel_gvt_init_opregion(gvt);
+ if (ret)
+ goto out_clean_gtt;
+
+ ret = intel_gvt_init_workload_scheduler(gvt);
+ if (ret)
+ goto out_clean_opregion;
+
+ ret = intel_gvt_init_sched_policy(gvt);
+ if (ret)
+ goto out_clean_workload_scheduler;
+
+ ret = intel_gvt_init_cmd_parser(gvt);
+ if (ret)
+ goto out_clean_sched_policy;
+
+ ret = init_service_thread(gvt);
+ if (ret)
+ goto out_clean_cmd_parser;
+
+ ret = intel_gvt_init_vgpu_types(gvt);
+ if (ret)
+ goto out_clean_thread;
+
+ ret = intel_gvt_hypervisor_host_init(&dev_priv->drm.pdev->dev, gvt,
+ &intel_gvt_ops);
+ if (ret) {
+ gvt_err("failed to register gvt-g host device: %d\n", ret);
+ goto out_clean_types;
+ }
+
+ gvt_dbg_core("gvt device initialization is done\n");
+ dev_priv->gvt = gvt;
return 0;
+
+out_clean_types:
+ intel_gvt_clean_vgpu_types(gvt);
+out_clean_thread:
+ clean_service_thread(gvt);
+out_clean_cmd_parser:
+ intel_gvt_clean_cmd_parser(gvt);
+out_clean_sched_policy:
+ intel_gvt_clean_sched_policy(gvt);
+out_clean_workload_scheduler:
+ intel_gvt_clean_workload_scheduler(gvt);
+out_clean_opregion:
+ intel_gvt_clean_opregion(gvt);
+out_clean_gtt:
+ intel_gvt_clean_gtt(gvt);
+out_clean_irq:
+ intel_gvt_clean_irq(gvt);
+out_free_firmware:
+ intel_gvt_free_firmware(gvt);
+out_clean_mmio_info:
+ intel_gvt_clean_mmio_info(gvt);
+ kfree(gvt);
+ return ret;
}
diff --git a/drivers/gpu/drm/i915/gvt/gvt.h b/drivers/gpu/drm/i915/gvt/gvt.h
index fb619a6e519d..3d4223e8ebe3 100644
--- a/drivers/gpu/drm/i915/gvt/gvt.h
+++ b/drivers/gpu/drm/i915/gvt/gvt.h
@@ -19,6 +19,15 @@
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
+ *
+ * Authors:
+ * Kevin Tian <kevin.tian@intel.com>
+ * Eddie Dong <eddie.dong@intel.com>
+ *
+ * Contributors:
+ * Niu Bing <bing.niu@intel.com>
+ * Zhi Wang <zhi.a.wang@intel.com>
+ *
*/
#ifndef _GVT_H_
@@ -26,6 +35,17 @@
#include "debug.h"
#include "hypercall.h"
+#include "mmio.h"
+#include "reg.h"
+#include "interrupt.h"
+#include "gtt.h"
+#include "display.h"
+#include "edid.h"
+#include "execlist.h"
+#include "scheduler.h"
+#include "sched_policy.h"
+#include "render.h"
+#include "cmd_parser.h"
#define GVT_MAX_VGPU 8
@@ -45,25 +65,379 @@ extern struct intel_gvt_host intel_gvt_host;
/* Describe per-platform limitations. */
struct intel_gvt_device_info {
u32 max_support_vgpus;
- /* This data structure will grow bigger in GVT device model patches */
+ u32 cfg_space_size;
+ u32 mmio_size;
+ u32 mmio_bar;
+ unsigned long msi_cap_offset;
+ u32 gtt_start_offset;
+ u32 gtt_entry_size;
+ u32 gtt_entry_size_shift;
+ int gmadr_bytes_in_cmd;
+ u32 max_surface_size;
+};
+
+/* GM resources owned by a vGPU */
+struct intel_vgpu_gm {
+ u64 aperture_sz;
+ u64 hidden_sz;
+ struct drm_mm_node low_gm_node;
+ struct drm_mm_node high_gm_node;
+};
+
+#define INTEL_GVT_MAX_NUM_FENCES 32
+
+/* Fences owned by a vGPU */
+struct intel_vgpu_fence {
+ struct drm_i915_fence_reg *regs[INTEL_GVT_MAX_NUM_FENCES];
+ u32 base;
+ u32 size;
+};
+
+struct intel_vgpu_mmio {
+ void *vreg;
+ void *sreg;
+ bool disable_warn_untrack;
+};
+
+#define INTEL_GVT_MAX_CFG_SPACE_SZ 256
+#define INTEL_GVT_MAX_BAR_NUM 4
+
+struct intel_vgpu_pci_bar {
+ u64 size;
+ bool tracked;
+};
+
+struct intel_vgpu_cfg_space {
+ unsigned char virtual_cfg_space[INTEL_GVT_MAX_CFG_SPACE_SZ];
+ struct intel_vgpu_pci_bar bar[INTEL_GVT_MAX_BAR_NUM];
+};
+
+#define vgpu_cfg_space(vgpu) ((vgpu)->cfg_space.virtual_cfg_space)
+
+#define INTEL_GVT_MAX_PIPE 4
+
+struct intel_vgpu_irq {
+ bool irq_warn_once[INTEL_GVT_EVENT_MAX];
+ DECLARE_BITMAP(flip_done_event[INTEL_GVT_MAX_PIPE],
+ INTEL_GVT_EVENT_MAX);
+};
+
+struct intel_vgpu_opregion {
+ void *va;
+ u32 gfn[INTEL_GVT_OPREGION_PAGES];
+ struct page *pages[INTEL_GVT_OPREGION_PAGES];
+};
+
+#define vgpu_opregion(vgpu) (&(vgpu->opregion))
+
+#define INTEL_GVT_MAX_PORT 5
+
+struct intel_vgpu_display {
+ struct intel_vgpu_i2c_edid i2c_edid;
+ struct intel_vgpu_port ports[INTEL_GVT_MAX_PORT];
+ struct intel_vgpu_sbi sbi;
};
struct intel_vgpu {
struct intel_gvt *gvt;
int id;
unsigned long handle; /* vGPU handle used by hypervisor MPT modules */
+ bool active;
+ bool resetting;
+ void *sched_data;
+
+ struct intel_vgpu_fence fence;
+ struct intel_vgpu_gm gm;
+ struct intel_vgpu_cfg_space cfg_space;
+ struct intel_vgpu_mmio mmio;
+ struct intel_vgpu_irq irq;
+ struct intel_vgpu_gtt gtt;
+ struct intel_vgpu_opregion opregion;
+ struct intel_vgpu_display display;
+ struct intel_vgpu_execlist execlist[I915_NUM_ENGINES];
+ struct list_head workload_q_head[I915_NUM_ENGINES];
+ struct kmem_cache *workloads;
+ atomic_t running_workload_num;
+ DECLARE_BITMAP(tlb_handle_pending, I915_NUM_ENGINES);
+ struct i915_gem_context *shadow_ctx;
+ struct notifier_block shadow_ctx_notifier_block;
+
+#if IS_ENABLED(CONFIG_DRM_I915_GVT_KVMGT)
+ struct {
+ struct device *mdev;
+ struct vfio_region *region;
+ int num_regions;
+ struct eventfd_ctx *intx_trigger;
+ struct eventfd_ctx *msi_trigger;
+ struct rb_root cache;
+ struct mutex cache_lock;
+ void *vfio_group;
+ struct notifier_block iommu_notifier;
+ } vdev;
+#endif
+};
+
+struct intel_gvt_gm {
+ unsigned long vgpu_allocated_low_gm_size;
+ unsigned long vgpu_allocated_high_gm_size;
+};
+
+struct intel_gvt_fence {
+ unsigned long vgpu_allocated_fence_num;
+};
+
+#define INTEL_GVT_MMIO_HASH_BITS 9
+
+struct intel_gvt_mmio {
+ u32 *mmio_attribute;
+ DECLARE_HASHTABLE(mmio_info_table, INTEL_GVT_MMIO_HASH_BITS);
+};
+
+struct intel_gvt_firmware {
+ void *cfg_space;
+ void *mmio;
+ bool firmware_loaded;
+};
+
+struct intel_gvt_opregion {
+ void __iomem *opregion_va;
+ u32 opregion_pa;
+};
+
+#define NR_MAX_INTEL_VGPU_TYPES 20
+struct intel_vgpu_type {
+ char name[16];
+ unsigned int max_instance;
+ unsigned int avail_instance;
+ unsigned int low_gm_size;
+ unsigned int high_gm_size;
+ unsigned int fence;
};
struct intel_gvt {
struct mutex lock;
- bool initialized;
-
struct drm_i915_private *dev_priv;
struct idr vgpu_idr; /* vGPU IDR pool */
struct intel_gvt_device_info device_info;
+ struct intel_gvt_gm gm;
+ struct intel_gvt_fence fence;
+ struct intel_gvt_mmio mmio;
+ struct intel_gvt_firmware firmware;
+ struct intel_gvt_irq irq;
+ struct intel_gvt_gtt gtt;
+ struct intel_gvt_opregion opregion;
+ struct intel_gvt_workload_scheduler scheduler;
+ DECLARE_HASHTABLE(cmd_table, GVT_CMD_HASH_BITS);
+ struct intel_vgpu_type *types;
+ unsigned int num_types;
+
+ struct task_struct *service_thread;
+ wait_queue_head_t service_thread_wq;
+ unsigned long service_request;
+};
+
+static inline struct intel_gvt *to_gvt(struct drm_i915_private *i915)
+{
+ return i915->gvt;
+}
+
+enum {
+ INTEL_GVT_REQUEST_EMULATE_VBLANK = 0,
};
+static inline void intel_gvt_request_service(struct intel_gvt *gvt,
+ int service)
+{
+ set_bit(service, (void *)&gvt->service_request);
+ wake_up(&gvt->service_thread_wq);
+}
+
+void intel_gvt_free_firmware(struct intel_gvt *gvt);
+int intel_gvt_load_firmware(struct intel_gvt *gvt);
+
+/* Aperture/GM space definitions for GVT device */
+#define MB_TO_BYTES(mb) ((mb) << 20ULL)
+#define BYTES_TO_MB(b) ((b) >> 20ULL)
+
+#define HOST_LOW_GM_SIZE MB_TO_BYTES(128)
+#define HOST_HIGH_GM_SIZE MB_TO_BYTES(384)
+#define HOST_FENCE 4
+
+/* Aperture/GM space definitions for GVT device */
+#define gvt_aperture_sz(gvt) (gvt->dev_priv->ggtt.mappable_end)
+#define gvt_aperture_pa_base(gvt) (gvt->dev_priv->ggtt.mappable_base)
+
+#define gvt_ggtt_gm_sz(gvt) (gvt->dev_priv->ggtt.base.total)
+#define gvt_ggtt_sz(gvt) \
+ ((gvt->dev_priv->ggtt.base.total >> PAGE_SHIFT) << 3)
+#define gvt_hidden_sz(gvt) (gvt_ggtt_gm_sz(gvt) - gvt_aperture_sz(gvt))
+
+#define gvt_aperture_gmadr_base(gvt) (0)
+#define gvt_aperture_gmadr_end(gvt) (gvt_aperture_gmadr_base(gvt) \
+ + gvt_aperture_sz(gvt) - 1)
+
+#define gvt_hidden_gmadr_base(gvt) (gvt_aperture_gmadr_base(gvt) \
+ + gvt_aperture_sz(gvt))
+#define gvt_hidden_gmadr_end(gvt) (gvt_hidden_gmadr_base(gvt) \
+ + gvt_hidden_sz(gvt) - 1)
+
+#define gvt_fence_sz(gvt) (gvt->dev_priv->num_fence_regs)
+
+/* Aperture/GM space definitions for vGPU */
+#define vgpu_aperture_offset(vgpu) ((vgpu)->gm.low_gm_node.start)
+#define vgpu_hidden_offset(vgpu) ((vgpu)->gm.high_gm_node.start)
+#define vgpu_aperture_sz(vgpu) ((vgpu)->gm.aperture_sz)
+#define vgpu_hidden_sz(vgpu) ((vgpu)->gm.hidden_sz)
+
+#define vgpu_aperture_pa_base(vgpu) \
+ (gvt_aperture_pa_base(vgpu->gvt) + vgpu_aperture_offset(vgpu))
+
+#define vgpu_ggtt_gm_sz(vgpu) ((vgpu)->gm.aperture_sz + (vgpu)->gm.hidden_sz)
+
+#define vgpu_aperture_pa_end(vgpu) \
+ (vgpu_aperture_pa_base(vgpu) + vgpu_aperture_sz(vgpu) - 1)
+
+#define vgpu_aperture_gmadr_base(vgpu) (vgpu_aperture_offset(vgpu))
+#define vgpu_aperture_gmadr_end(vgpu) \
+ (vgpu_aperture_gmadr_base(vgpu) + vgpu_aperture_sz(vgpu) - 1)
+
+#define vgpu_hidden_gmadr_base(vgpu) (vgpu_hidden_offset(vgpu))
+#define vgpu_hidden_gmadr_end(vgpu) \
+ (vgpu_hidden_gmadr_base(vgpu) + vgpu_hidden_sz(vgpu) - 1)
+
+#define vgpu_fence_base(vgpu) (vgpu->fence.base)
+#define vgpu_fence_sz(vgpu) (vgpu->fence.size)
+
+struct intel_vgpu_creation_params {
+ __u64 handle;
+ __u64 low_gm_sz; /* in MB */
+ __u64 high_gm_sz; /* in MB */
+ __u64 fence_sz;
+ __s32 primary;
+ __u64 vgpu_id;
+};
+
+int intel_vgpu_alloc_resource(struct intel_vgpu *vgpu,
+ struct intel_vgpu_creation_params *param);
+void intel_vgpu_free_resource(struct intel_vgpu *vgpu);
+void intel_vgpu_write_fence(struct intel_vgpu *vgpu,
+ u32 fence, u64 value);
+
+/* Macros for easily accessing vGPU virtual/shadow register */
+#define vgpu_vreg(vgpu, reg) \
+ (*(u32 *)(vgpu->mmio.vreg + INTEL_GVT_MMIO_OFFSET(reg)))
+#define vgpu_vreg8(vgpu, reg) \
+ (*(u8 *)(vgpu->mmio.vreg + INTEL_GVT_MMIO_OFFSET(reg)))
+#define vgpu_vreg16(vgpu, reg) \
+ (*(u16 *)(vgpu->mmio.vreg + INTEL_GVT_MMIO_OFFSET(reg)))
+#define vgpu_vreg64(vgpu, reg) \
+ (*(u64 *)(vgpu->mmio.vreg + INTEL_GVT_MMIO_OFFSET(reg)))
+#define vgpu_sreg(vgpu, reg) \
+ (*(u32 *)(vgpu->mmio.sreg + INTEL_GVT_MMIO_OFFSET(reg)))
+#define vgpu_sreg8(vgpu, reg) \
+ (*(u8 *)(vgpu->mmio.sreg + INTEL_GVT_MMIO_OFFSET(reg)))
+#define vgpu_sreg16(vgpu, reg) \
+ (*(u16 *)(vgpu->mmio.sreg + INTEL_GVT_MMIO_OFFSET(reg)))
+#define vgpu_sreg64(vgpu, reg) \
+ (*(u64 *)(vgpu->mmio.sreg + INTEL_GVT_MMIO_OFFSET(reg)))
+
+#define for_each_active_vgpu(gvt, vgpu, id) \
+ idr_for_each_entry((&(gvt)->vgpu_idr), (vgpu), (id)) \
+ for_each_if(vgpu->active)
+
+static inline void intel_vgpu_write_pci_bar(struct intel_vgpu *vgpu,
+ u32 offset, u32 val, bool low)
+{
+ u32 *pval;
+
+ /* BAR offset should be 32 bits algiend */
+ offset = rounddown(offset, 4);
+ pval = (u32 *)(vgpu_cfg_space(vgpu) + offset);
+
+ if (low) {
+ /*
+ * only update bit 31 - bit 4,
+ * leave the bit 3 - bit 0 unchanged.
+ */
+ *pval = (val & GENMASK(31, 4)) | (*pval & GENMASK(3, 0));
+ }
+}
+
+int intel_gvt_init_vgpu_types(struct intel_gvt *gvt);
+void intel_gvt_clean_vgpu_types(struct intel_gvt *gvt);
+
+struct intel_vgpu *intel_gvt_create_vgpu(struct intel_gvt *gvt,
+ struct intel_vgpu_type *type);
+void intel_gvt_destroy_vgpu(struct intel_vgpu *vgpu);
+void intel_gvt_reset_vgpu(struct intel_vgpu *vgpu);
+
+
+/* validating GM functions */
+#define vgpu_gmadr_is_aperture(vgpu, gmadr) \
+ ((gmadr >= vgpu_aperture_gmadr_base(vgpu)) && \
+ (gmadr <= vgpu_aperture_gmadr_end(vgpu)))
+
+#define vgpu_gmadr_is_hidden(vgpu, gmadr) \
+ ((gmadr >= vgpu_hidden_gmadr_base(vgpu)) && \
+ (gmadr <= vgpu_hidden_gmadr_end(vgpu)))
+
+#define vgpu_gmadr_is_valid(vgpu, gmadr) \
+ ((vgpu_gmadr_is_aperture(vgpu, gmadr) || \
+ (vgpu_gmadr_is_hidden(vgpu, gmadr))))
+
+#define gvt_gmadr_is_aperture(gvt, gmadr) \
+ ((gmadr >= gvt_aperture_gmadr_base(gvt)) && \
+ (gmadr <= gvt_aperture_gmadr_end(gvt)))
+
+#define gvt_gmadr_is_hidden(gvt, gmadr) \
+ ((gmadr >= gvt_hidden_gmadr_base(gvt)) && \
+ (gmadr <= gvt_hidden_gmadr_end(gvt)))
+
+#define gvt_gmadr_is_valid(gvt, gmadr) \
+ (gvt_gmadr_is_aperture(gvt, gmadr) || \
+ gvt_gmadr_is_hidden(gvt, gmadr))
+
+bool intel_gvt_ggtt_validate_range(struct intel_vgpu *vgpu, u64 addr, u32 size);
+int intel_gvt_ggtt_gmadr_g2h(struct intel_vgpu *vgpu, u64 g_addr, u64 *h_addr);
+int intel_gvt_ggtt_gmadr_h2g(struct intel_vgpu *vgpu, u64 h_addr, u64 *g_addr);
+int intel_gvt_ggtt_index_g2h(struct intel_vgpu *vgpu, unsigned long g_index,
+ unsigned long *h_index);
+int intel_gvt_ggtt_h2g_index(struct intel_vgpu *vgpu, unsigned long h_index,
+ unsigned long *g_index);
+
+int intel_vgpu_emulate_cfg_read(struct intel_vgpu *vgpu, unsigned int offset,
+ void *p_data, unsigned int bytes);
+
+int intel_vgpu_emulate_cfg_write(struct intel_vgpu *vgpu, unsigned int offset,
+ void *p_data, unsigned int bytes);
+
+void intel_gvt_clean_opregion(struct intel_gvt *gvt);
+int intel_gvt_init_opregion(struct intel_gvt *gvt);
+
+void intel_vgpu_clean_opregion(struct intel_vgpu *vgpu);
+int intel_vgpu_init_opregion(struct intel_vgpu *vgpu, u32 gpa);
+
+int intel_vgpu_emulate_opregion_request(struct intel_vgpu *vgpu, u32 swsci);
+int setup_vgpu_mmio(struct intel_vgpu *vgpu);
+void populate_pvinfo_page(struct intel_vgpu *vgpu);
+
+struct intel_gvt_ops {
+ int (*emulate_cfg_read)(struct intel_vgpu *, unsigned int, void *,
+ unsigned int);
+ int (*emulate_cfg_write)(struct intel_vgpu *, unsigned int, void *,
+ unsigned int);
+ int (*emulate_mmio_read)(struct intel_vgpu *, u64, void *,
+ unsigned int);
+ int (*emulate_mmio_write)(struct intel_vgpu *, u64, void *,
+ unsigned int);
+ struct intel_vgpu *(*vgpu_create)(struct intel_gvt *,
+ struct intel_vgpu_type *);
+ void (*vgpu_destroy)(struct intel_vgpu *);
+ void (*vgpu_reset)(struct intel_vgpu *);
+};
+
+
#include "mpt.h"
#endif
diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c
new file mode 100644
index 000000000000..522809710312
--- /dev/null
+++ b/drivers/gpu/drm/i915/gvt/handlers.c
@@ -0,0 +1,2848 @@
+/*
+ * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * Authors:
+ * Kevin Tian <kevin.tian@intel.com>
+ * Eddie Dong <eddie.dong@intel.com>
+ * Zhiyuan Lv <zhiyuan.lv@intel.com>
+ *
+ * Contributors:
+ * Min He <min.he@intel.com>
+ * Tina Zhang <tina.zhang@intel.com>
+ * Pei Zhang <pei.zhang@intel.com>
+ * Niu Bing <bing.niu@intel.com>
+ * Ping Gao <ping.a.gao@intel.com>
+ * Zhi Wang <zhi.a.wang@intel.com>
+ *
+
+ */
+
+#include "i915_drv.h"
+#include "gvt.h"
+#include "i915_pvinfo.h"
+
+/* XXX FIXME i915 has changed PP_XXX definition */
+#define PCH_PP_STATUS _MMIO(0xc7200)
+#define PCH_PP_CONTROL _MMIO(0xc7204)
+#define PCH_PP_ON_DELAYS _MMIO(0xc7208)
+#define PCH_PP_OFF_DELAYS _MMIO(0xc720c)
+#define PCH_PP_DIVISOR _MMIO(0xc7210)
+
+/* Register contains RO bits */
+#define F_RO (1 << 0)
+/* Register contains graphics address */
+#define F_GMADR (1 << 1)
+/* Mode mask registers with high 16 bits as the mask bits */
+#define F_MODE_MASK (1 << 2)
+/* This reg can be accessed by GPU commands */
+#define F_CMD_ACCESS (1 << 3)
+/* This reg has been accessed by a VM */
+#define F_ACCESSED (1 << 4)
+/* This reg has been accessed through GPU commands */
+#define F_CMD_ACCESSED (1 << 5)
+/* This reg could be accessed by unaligned address */
+#define F_UNALIGN (1 << 6)
+
+unsigned long intel_gvt_get_device_type(struct intel_gvt *gvt)
+{
+ if (IS_BROADWELL(gvt->dev_priv))
+ return D_BDW;
+ else if (IS_SKYLAKE(gvt->dev_priv))
+ return D_SKL;
+
+ return 0;
+}
+
+bool intel_gvt_match_device(struct intel_gvt *gvt,
+ unsigned long device)
+{
+ return intel_gvt_get_device_type(gvt) & device;
+}
+
+static void read_vreg(struct intel_vgpu *vgpu, unsigned int offset,
+ void *p_data, unsigned int bytes)
+{
+ memcpy(p_data, &vgpu_vreg(vgpu, offset), bytes);
+}
+
+static void write_vreg(struct intel_vgpu *vgpu, unsigned int offset,
+ void *p_data, unsigned int bytes)
+{
+ memcpy(&vgpu_vreg(vgpu, offset), p_data, bytes);
+}
+
+static int new_mmio_info(struct intel_gvt *gvt,
+ u32 offset, u32 flags, u32 size,
+ u32 addr_mask, u32 ro_mask, u32 device,
+ void *read, void *write)
+{
+ struct intel_gvt_mmio_info *info, *p;
+ u32 start, end, i;
+
+ if (!intel_gvt_match_device(gvt, device))
+ return 0;
+
+ if (WARN_ON(!IS_ALIGNED(offset, 4)))
+ return -EINVAL;
+
+ start = offset;
+ end = offset + size;
+
+ for (i = start; i < end; i += 4) {
+ info = kzalloc(sizeof(*info), GFP_KERNEL);
+ if (!info)
+ return -ENOMEM;
+
+ info->offset = i;
+ p = intel_gvt_find_mmio_info(gvt, info->offset);
+ if (p)
+ gvt_err("dup mmio definition offset %x\n",
+ info->offset);
+ info->size = size;
+ info->length = (i + 4) < end ? 4 : (end - i);
+ info->addr_mask = addr_mask;
+ info->device = device;
+ info->read = read ? read : intel_vgpu_default_mmio_read;
+ info->write = write ? write : intel_vgpu_default_mmio_write;
+ gvt->mmio.mmio_attribute[info->offset / 4] = flags;
+ INIT_HLIST_NODE(&info->node);
+ hash_add(gvt->mmio.mmio_info_table, &info->node, info->offset);
+ }
+ return 0;
+}
+
+static int render_mmio_to_ring_id(struct intel_gvt *gvt, unsigned int reg)
+{
+ enum intel_engine_id id;
+ struct intel_engine_cs *engine;
+
+ reg &= ~GENMASK(11, 0);
+ for_each_engine(engine, gvt->dev_priv, id) {
+ if (engine->mmio_base == reg)
+ return id;
+ }
+ return -1;
+}
+
+#define offset_to_fence_num(offset) \
+ ((offset - i915_mmio_reg_offset(FENCE_REG_GEN6_LO(0))) >> 3)
+
+#define fence_num_to_offset(num) \
+ (num * 8 + i915_mmio_reg_offset(FENCE_REG_GEN6_LO(0)))
+
+static int sanitize_fence_mmio_access(struct intel_vgpu *vgpu,
+ unsigned int fence_num, void *p_data, unsigned int bytes)
+{
+ if (fence_num >= vgpu_fence_sz(vgpu)) {
+ gvt_err("vgpu%d: found oob fence register access\n",
+ vgpu->id);
+ gvt_err("vgpu%d: total fence num %d access fence num %d\n",
+ vgpu->id, vgpu_fence_sz(vgpu), fence_num);
+ memset(p_data, 0, bytes);
+ }
+ return 0;
+}
+
+static int fence_mmio_read(struct intel_vgpu *vgpu, unsigned int off,
+ void *p_data, unsigned int bytes)
+{
+ int ret;
+
+ ret = sanitize_fence_mmio_access(vgpu, offset_to_fence_num(off),
+ p_data, bytes);
+ if (ret)
+ return ret;
+ read_vreg(vgpu, off, p_data, bytes);
+ return 0;
+}
+
+static int fence_mmio_write(struct intel_vgpu *vgpu, unsigned int off,
+ void *p_data, unsigned int bytes)
+{
+ unsigned int fence_num = offset_to_fence_num(off);
+ int ret;
+
+ ret = sanitize_fence_mmio_access(vgpu, fence_num, p_data, bytes);
+ if (ret)
+ return ret;
+ write_vreg(vgpu, off, p_data, bytes);
+
+ intel_vgpu_write_fence(vgpu, fence_num,
+ vgpu_vreg64(vgpu, fence_num_to_offset(fence_num)));
+ return 0;
+}
+
+#define CALC_MODE_MASK_REG(old, new) \
+ (((new) & GENMASK(31, 16)) \
+ | ((((old) & GENMASK(15, 0)) & ~((new) >> 16)) \
+ | ((new) & ((new) >> 16))))
+
+static int mul_force_wake_write(struct intel_vgpu *vgpu,
+ unsigned int offset, void *p_data, unsigned int bytes)
+{
+ u32 old, new;
+ uint32_t ack_reg_offset;
+
+ old = vgpu_vreg(vgpu, offset);
+ new = CALC_MODE_MASK_REG(old, *(u32 *)p_data);
+
+ if (IS_SKYLAKE(vgpu->gvt->dev_priv)) {
+ switch (offset) {
+ case FORCEWAKE_RENDER_GEN9_REG:
+ ack_reg_offset = FORCEWAKE_ACK_RENDER_GEN9_REG;
+ break;
+ case FORCEWAKE_BLITTER_GEN9_REG:
+ ack_reg_offset = FORCEWAKE_ACK_BLITTER_GEN9_REG;
+ break;
+ case FORCEWAKE_MEDIA_GEN9_REG:
+ ack_reg_offset = FORCEWAKE_ACK_MEDIA_GEN9_REG;
+ break;
+ default:
+ /*should not hit here*/
+ gvt_err("invalid forcewake offset 0x%x\n", offset);
+ return 1;
+ }
+ } else {
+ ack_reg_offset = FORCEWAKE_ACK_HSW_REG;
+ }
+
+ vgpu_vreg(vgpu, offset) = new;
+ vgpu_vreg(vgpu, ack_reg_offset) = (new & GENMASK(15, 0));
+ return 0;
+}
+
+static int handle_device_reset(struct intel_vgpu *vgpu, unsigned int offset,
+ void *p_data, unsigned int bytes, unsigned long bitmap)
+{
+ struct intel_gvt_workload_scheduler *scheduler =
+ &vgpu->gvt->scheduler;
+
+ vgpu->resetting = true;
+
+ intel_vgpu_stop_schedule(vgpu);
+ /*
+ * The current_vgpu will set to NULL after stopping the
+ * scheduler when the reset is triggered by current vgpu.
+ */
+ if (scheduler->current_vgpu == NULL) {
+ mutex_unlock(&vgpu->gvt->lock);
+ intel_gvt_wait_vgpu_idle(vgpu);
+ mutex_lock(&vgpu->gvt->lock);
+ }
+
+ intel_vgpu_reset_execlist(vgpu, bitmap);
+
+ /* full GPU reset */
+ if (bitmap == 0xff) {
+ mutex_unlock(&vgpu->gvt->lock);
+ intel_vgpu_clean_gtt(vgpu);
+ mutex_lock(&vgpu->gvt->lock);
+ setup_vgpu_mmio(vgpu);
+ populate_pvinfo_page(vgpu);
+ intel_vgpu_init_gtt(vgpu);
+ }
+
+ vgpu->resetting = false;
+
+ return 0;
+}
+
+static int gdrst_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
+ void *p_data, unsigned int bytes)
+{
+ u32 data;
+ u64 bitmap = 0;
+
+ write_vreg(vgpu, offset, p_data, bytes);
+ data = vgpu_vreg(vgpu, offset);
+
+ if (data & GEN6_GRDOM_FULL) {
+ gvt_dbg_mmio("vgpu%d: request full GPU reset\n", vgpu->id);
+ bitmap = 0xff;
+ }
+ if (data & GEN6_GRDOM_RENDER) {
+ gvt_dbg_mmio("vgpu%d: request RCS reset\n", vgpu->id);
+ bitmap |= (1 << RCS);
+ }
+ if (data & GEN6_GRDOM_MEDIA) {
+ gvt_dbg_mmio("vgpu%d: request VCS reset\n", vgpu->id);
+ bitmap |= (1 << VCS);
+ }
+ if (data & GEN6_GRDOM_BLT) {
+ gvt_dbg_mmio("vgpu%d: request BCS Reset\n", vgpu->id);
+ bitmap |= (1 << BCS);
+ }
+ if (data & GEN6_GRDOM_VECS) {
+ gvt_dbg_mmio("vgpu%d: request VECS Reset\n", vgpu->id);
+ bitmap |= (1 << VECS);
+ }
+ if (data & GEN8_GRDOM_MEDIA2) {
+ gvt_dbg_mmio("vgpu%d: request VCS2 Reset\n", vgpu->id);
+ if (HAS_BSD2(vgpu->gvt->dev_priv))
+ bitmap |= (1 << VCS2);
+ }
+ return handle_device_reset(vgpu, offset, p_data, bytes, bitmap);
+}
+
+static int gmbus_mmio_read(struct intel_vgpu *vgpu, unsigned int offset,
+ void *p_data, unsigned int bytes)
+{
+ return intel_gvt_i2c_handle_gmbus_read(vgpu, offset, p_data, bytes);
+}
+
+static int gmbus_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
+ void *p_data, unsigned int bytes)
+{
+ return intel_gvt_i2c_handle_gmbus_write(vgpu, offset, p_data, bytes);
+}
+
+static int pch_pp_control_mmio_write(struct intel_vgpu *vgpu,
+ unsigned int offset, void *p_data, unsigned int bytes)
+{
+ write_vreg(vgpu, offset, p_data, bytes);
+
+ if (vgpu_vreg(vgpu, offset) & PANEL_POWER_ON) {
+ vgpu_vreg(vgpu, PCH_PP_STATUS) |= PP_ON;
+ vgpu_vreg(vgpu, PCH_PP_STATUS) |= PP_SEQUENCE_STATE_ON_IDLE;
+ vgpu_vreg(vgpu, PCH_PP_STATUS) &= ~PP_SEQUENCE_POWER_DOWN;
+ vgpu_vreg(vgpu, PCH_PP_STATUS) &= ~PP_CYCLE_DELAY_ACTIVE;
+
+ } else
+ vgpu_vreg(vgpu, PCH_PP_STATUS) &=
+ ~(PP_ON | PP_SEQUENCE_POWER_DOWN
+ | PP_CYCLE_DELAY_ACTIVE);
+ return 0;
+}
+
+static int transconf_mmio_write(struct intel_vgpu *vgpu,
+ unsigned int offset, void *p_data, unsigned int bytes)
+{
+ write_vreg(vgpu, offset, p_data, bytes);
+
+ if (vgpu_vreg(vgpu, offset) & TRANS_ENABLE)
+ vgpu_vreg(vgpu, offset) |= TRANS_STATE_ENABLE;
+ else
+ vgpu_vreg(vgpu, offset) &= ~TRANS_STATE_ENABLE;
+ return 0;
+}
+
+static int lcpll_ctl_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
+ void *p_data, unsigned int bytes)
+{
+ write_vreg(vgpu, offset, p_data, bytes);
+
+ if (vgpu_vreg(vgpu, offset) & LCPLL_PLL_DISABLE)
+ vgpu_vreg(vgpu, offset) &= ~LCPLL_PLL_LOCK;
+ else
+ vgpu_vreg(vgpu, offset) |= LCPLL_PLL_LOCK;
+
+ if (vgpu_vreg(vgpu, offset) & LCPLL_CD_SOURCE_FCLK)
+ vgpu_vreg(vgpu, offset) |= LCPLL_CD_SOURCE_FCLK_DONE;
+ else
+ vgpu_vreg(vgpu, offset) &= ~LCPLL_CD_SOURCE_FCLK_DONE;
+
+ return 0;
+}
+
+static int dpy_reg_mmio_read(struct intel_vgpu *vgpu, unsigned int offset,
+ void *p_data, unsigned int bytes)
+{
+ *(u32 *)p_data = (1 << 17);
+ return 0;
+}
+
+static int dpy_reg_mmio_read_2(struct intel_vgpu *vgpu, unsigned int offset,
+ void *p_data, unsigned int bytes)
+{
+ *(u32 *)p_data = 3;
+ return 0;
+}
+
+static int dpy_reg_mmio_read_3(struct intel_vgpu *vgpu, unsigned int offset,
+ void *p_data, unsigned int bytes)
+{
+ *(u32 *)p_data = (0x2f << 16);
+ return 0;
+}
+
+static int pipeconf_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
+ void *p_data, unsigned int bytes)
+{
+ u32 data;
+
+ write_vreg(vgpu, offset, p_data, bytes);
+ data = vgpu_vreg(vgpu, offset);
+
+ if (data & PIPECONF_ENABLE)
+ vgpu_vreg(vgpu, offset) |= I965_PIPECONF_ACTIVE;
+ else
+ vgpu_vreg(vgpu, offset) &= ~I965_PIPECONF_ACTIVE;
+ intel_gvt_check_vblank_emulation(vgpu->gvt);
+ return 0;
+}
+
+static int ddi_buf_ctl_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
+ void *p_data, unsigned int bytes)
+{
+ write_vreg(vgpu, offset, p_data, bytes);
+
+ if (vgpu_vreg(vgpu, offset) & DDI_BUF_CTL_ENABLE) {
+ vgpu_vreg(vgpu, offset) &= ~DDI_BUF_IS_IDLE;
+ } else {
+ vgpu_vreg(vgpu, offset) |= DDI_BUF_IS_IDLE;
+ if (offset == i915_mmio_reg_offset(DDI_BUF_CTL(PORT_E)))
+ vgpu_vreg(vgpu, DP_TP_STATUS(PORT_E))
+ &= ~DP_TP_STATUS_AUTOTRAIN_DONE;
+ }
+ return 0;
+}
+
+static int fdi_rx_iir_mmio_write(struct intel_vgpu *vgpu,
+ unsigned int offset, void *p_data, unsigned int bytes)
+{
+ vgpu_vreg(vgpu, offset) &= ~*(u32 *)p_data;
+ return 0;
+}
+
+#define FDI_LINK_TRAIN_PATTERN1 0
+#define FDI_LINK_TRAIN_PATTERN2 1
+
+static int fdi_auto_training_started(struct intel_vgpu *vgpu)
+{
+ u32 ddi_buf_ctl = vgpu_vreg(vgpu, DDI_BUF_CTL(PORT_E));
+ u32 rx_ctl = vgpu_vreg(vgpu, _FDI_RXA_CTL);
+ u32 tx_ctl = vgpu_vreg(vgpu, DP_TP_CTL(PORT_E));
+
+ if ((ddi_buf_ctl & DDI_BUF_CTL_ENABLE) &&
+ (rx_ctl & FDI_RX_ENABLE) &&
+ (rx_ctl & FDI_AUTO_TRAINING) &&
+ (tx_ctl & DP_TP_CTL_ENABLE) &&
+ (tx_ctl & DP_TP_CTL_FDI_AUTOTRAIN))
+ return 1;
+ else
+ return 0;
+}
+
+static int check_fdi_rx_train_status(struct intel_vgpu *vgpu,
+ enum pipe pipe, unsigned int train_pattern)
+{
+ i915_reg_t fdi_rx_imr, fdi_tx_ctl, fdi_rx_ctl;
+ unsigned int fdi_rx_check_bits, fdi_tx_check_bits;
+ unsigned int fdi_rx_train_bits, fdi_tx_train_bits;
+ unsigned int fdi_iir_check_bits;
+
+ fdi_rx_imr = FDI_RX_IMR(pipe);
+ fdi_tx_ctl = FDI_TX_CTL(pipe);
+ fdi_rx_ctl = FDI_RX_CTL(pipe);
+
+ if (train_pattern == FDI_LINK_TRAIN_PATTERN1) {
+ fdi_rx_train_bits = FDI_LINK_TRAIN_PATTERN_1_CPT;
+ fdi_tx_train_bits = FDI_LINK_TRAIN_PATTERN_1;
+ fdi_iir_check_bits = FDI_RX_BIT_LOCK;
+ } else if (train_pattern == FDI_LINK_TRAIN_PATTERN2) {
+ fdi_rx_train_bits = FDI_LINK_TRAIN_PATTERN_2_CPT;
+ fdi_tx_train_bits = FDI_LINK_TRAIN_PATTERN_2;
+ fdi_iir_check_bits = FDI_RX_SYMBOL_LOCK;
+ } else {
+ gvt_err("Invalid train pattern %d\n", train_pattern);
+ return -EINVAL;
+ }
+
+ fdi_rx_check_bits = FDI_RX_ENABLE | fdi_rx_train_bits;
+ fdi_tx_check_bits = FDI_TX_ENABLE | fdi_tx_train_bits;
+
+ /* If imr bit has been masked */
+ if (vgpu_vreg(vgpu, fdi_rx_imr) & fdi_iir_check_bits)
+ return 0;
+
+ if (((vgpu_vreg(vgpu, fdi_tx_ctl) & fdi_tx_check_bits)
+ == fdi_tx_check_bits)
+ && ((vgpu_vreg(vgpu, fdi_rx_ctl) & fdi_rx_check_bits)
+ == fdi_rx_check_bits))
+ return 1;
+ else
+ return 0;
+}
+
+#define INVALID_INDEX (~0U)
+
+static unsigned int calc_index(unsigned int offset, unsigned int start,
+ unsigned int next, unsigned int end, i915_reg_t i915_end)
+{
+ unsigned int range = next - start;
+
+ if (!end)
+ end = i915_mmio_reg_offset(i915_end);
+ if (offset < start || offset > end)
+ return INVALID_INDEX;
+ offset -= start;
+ return offset / range;
+}
+
+#define FDI_RX_CTL_TO_PIPE(offset) \
+ calc_index(offset, _FDI_RXA_CTL, _FDI_RXB_CTL, 0, FDI_RX_CTL(PIPE_C))
+
+#define FDI_TX_CTL_TO_PIPE(offset) \
+ calc_index(offset, _FDI_TXA_CTL, _FDI_TXB_CTL, 0, FDI_TX_CTL(PIPE_C))
+
+#define FDI_RX_IMR_TO_PIPE(offset) \
+ calc_index(offset, _FDI_RXA_IMR, _FDI_RXB_IMR, 0, FDI_RX_IMR(PIPE_C))
+
+static int update_fdi_rx_iir_status(struct intel_vgpu *vgpu,
+ unsigned int offset, void *p_data, unsigned int bytes)
+{
+ i915_reg_t fdi_rx_iir;
+ unsigned int index;
+ int ret;
+
+ if (FDI_RX_CTL_TO_PIPE(offset) != INVALID_INDEX)
+ index = FDI_RX_CTL_TO_PIPE(offset);
+ else if (FDI_TX_CTL_TO_PIPE(offset) != INVALID_INDEX)
+ index = FDI_TX_CTL_TO_PIPE(offset);
+ else if (FDI_RX_IMR_TO_PIPE(offset) != INVALID_INDEX)
+ index = FDI_RX_IMR_TO_PIPE(offset);
+ else {
+ gvt_err("Unsupport registers %x\n", offset);
+ return -EINVAL;
+ }
+
+ write_vreg(vgpu, offset, p_data, bytes);
+
+ fdi_rx_iir = FDI_RX_IIR(index);
+
+ ret = check_fdi_rx_train_status(vgpu, index, FDI_LINK_TRAIN_PATTERN1);
+ if (ret < 0)
+ return ret;
+ if (ret)
+ vgpu_vreg(vgpu, fdi_rx_iir) |= FDI_RX_BIT_LOCK;
+
+ ret = check_fdi_rx_train_status(vgpu, index, FDI_LINK_TRAIN_PATTERN2);
+ if (ret < 0)
+ return ret;
+ if (ret)
+ vgpu_vreg(vgpu, fdi_rx_iir) |= FDI_RX_SYMBOL_LOCK;
+
+ if (offset == _FDI_RXA_CTL)
+ if (fdi_auto_training_started(vgpu))
+ vgpu_vreg(vgpu, DP_TP_STATUS(PORT_E)) |=
+ DP_TP_STATUS_AUTOTRAIN_DONE;
+ return 0;
+}
+
+#define DP_TP_CTL_TO_PORT(offset) \
+ calc_index(offset, _DP_TP_CTL_A, _DP_TP_CTL_B, 0, DP_TP_CTL(PORT_E))
+
+static int dp_tp_ctl_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
+ void *p_data, unsigned int bytes)
+{
+ i915_reg_t status_reg;
+ unsigned int index;
+ u32 data;
+
+ write_vreg(vgpu, offset, p_data, bytes);
+
+ index = DP_TP_CTL_TO_PORT(offset);
+ data = (vgpu_vreg(vgpu, offset) & GENMASK(10, 8)) >> 8;
+ if (data == 0x2) {
+ status_reg = DP_TP_STATUS(index);
+ vgpu_vreg(vgpu, status_reg) |= (1 << 25);
+ }
+ return 0;
+}
+
+static int dp_tp_status_mmio_write(struct intel_vgpu *vgpu,
+ unsigned int offset, void *p_data, unsigned int bytes)
+{
+ u32 reg_val;
+ u32 sticky_mask;
+
+ reg_val = *((u32 *)p_data);
+ sticky_mask = GENMASK(27, 26) | (1 << 24);
+
+ vgpu_vreg(vgpu, offset) = (reg_val & ~sticky_mask) |
+ (vgpu_vreg(vgpu, offset) & sticky_mask);
+ vgpu_vreg(vgpu, offset) &= ~(reg_val & sticky_mask);
+ return 0;
+}
+
+static int pch_adpa_mmio_write(struct intel_vgpu *vgpu,
+ unsigned int offset, void *p_data, unsigned int bytes)
+{
+ u32 data;
+
+ write_vreg(vgpu, offset, p_data, bytes);
+ data = vgpu_vreg(vgpu, offset);
+
+ if (data & ADPA_CRT_HOTPLUG_FORCE_TRIGGER)
+ vgpu_vreg(vgpu, offset) &= ~ADPA_CRT_HOTPLUG_FORCE_TRIGGER;
+ return 0;
+}
+
+static int south_chicken2_mmio_write(struct intel_vgpu *vgpu,
+ unsigned int offset, void *p_data, unsigned int bytes)
+{
+ u32 data;
+
+ write_vreg(vgpu, offset, p_data, bytes);
+ data = vgpu_vreg(vgpu, offset);
+
+ if (data & FDI_MPHY_IOSFSB_RESET_CTL)
+ vgpu_vreg(vgpu, offset) |= FDI_MPHY_IOSFSB_RESET_STATUS;
+ else
+ vgpu_vreg(vgpu, offset) &= ~FDI_MPHY_IOSFSB_RESET_STATUS;
+ return 0;
+}
+
+#define DSPSURF_TO_PIPE(offset) \
+ calc_index(offset, _DSPASURF, _DSPBSURF, 0, DSPSURF(PIPE_C))
+
+static int pri_surf_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
+ void *p_data, unsigned int bytes)
+{
+ struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
+ unsigned int index = DSPSURF_TO_PIPE(offset);
+ i915_reg_t surflive_reg = DSPSURFLIVE(index);
+ int flip_event[] = {
+ [PIPE_A] = PRIMARY_A_FLIP_DONE,
+ [PIPE_B] = PRIMARY_B_FLIP_DONE,
+ [PIPE_C] = PRIMARY_C_FLIP_DONE,
+ };
+
+ write_vreg(vgpu, offset, p_data, bytes);
+ vgpu_vreg(vgpu, surflive_reg) = vgpu_vreg(vgpu, offset);
+
+ set_bit(flip_event[index], vgpu->irq.flip_done_event[index]);
+ return 0;
+}
+
+#define SPRSURF_TO_PIPE(offset) \
+ calc_index(offset, _SPRA_SURF, _SPRB_SURF, 0, SPRSURF(PIPE_C))
+
+static int spr_surf_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
+ void *p_data, unsigned int bytes)
+{
+ unsigned int index = SPRSURF_TO_PIPE(offset);
+ i915_reg_t surflive_reg = SPRSURFLIVE(index);
+ int flip_event[] = {
+ [PIPE_A] = SPRITE_A_FLIP_DONE,
+ [PIPE_B] = SPRITE_B_FLIP_DONE,
+ [PIPE_C] = SPRITE_C_FLIP_DONE,
+ };
+
+ write_vreg(vgpu, offset, p_data, bytes);
+ vgpu_vreg(vgpu, surflive_reg) = vgpu_vreg(vgpu, offset);
+
+ set_bit(flip_event[index], vgpu->irq.flip_done_event[index]);
+ return 0;
+}
+
+static int trigger_aux_channel_interrupt(struct intel_vgpu *vgpu,
+ unsigned int reg)
+{
+ struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
+ enum intel_gvt_event_type event;
+
+ if (reg == _DPA_AUX_CH_CTL)
+ event = AUX_CHANNEL_A;
+ else if (reg == _PCH_DPB_AUX_CH_CTL || reg == _DPB_AUX_CH_CTL)
+ event = AUX_CHANNEL_B;
+ else if (reg == _PCH_DPC_AUX_CH_CTL || reg == _DPC_AUX_CH_CTL)
+ event = AUX_CHANNEL_C;
+ else if (reg == _PCH_DPD_AUX_CH_CTL || reg == _DPD_AUX_CH_CTL)
+ event = AUX_CHANNEL_D;
+ else {
+ WARN_ON(true);
+ return -EINVAL;
+ }
+
+ intel_vgpu_trigger_virtual_event(vgpu, event);
+ return 0;
+}
+
+static int dp_aux_ch_ctl_trans_done(struct intel_vgpu *vgpu, u32 value,
+ unsigned int reg, int len, bool data_valid)
+{
+ /* mark transaction done */
+ value |= DP_AUX_CH_CTL_DONE;
+ value &= ~DP_AUX_CH_CTL_SEND_BUSY;
+ value &= ~DP_AUX_CH_CTL_RECEIVE_ERROR;
+
+ if (data_valid)
+ value &= ~DP_AUX_CH_CTL_TIME_OUT_ERROR;
+ else
+ value |= DP_AUX_CH_CTL_TIME_OUT_ERROR;
+
+ /* message size */
+ value &= ~(0xf << 20);
+ value |= (len << 20);
+ vgpu_vreg(vgpu, reg) = value;
+
+ if (value & DP_AUX_CH_CTL_INTERRUPT)
+ return trigger_aux_channel_interrupt(vgpu, reg);
+ return 0;
+}
+
+static void dp_aux_ch_ctl_link_training(struct intel_vgpu_dpcd_data *dpcd,
+ uint8_t t)
+{
+ if ((t & DPCD_TRAINING_PATTERN_SET_MASK) == DPCD_TRAINING_PATTERN_1) {
+ /* training pattern 1 for CR */
+ /* set LANE0_CR_DONE, LANE1_CR_DONE */
+ dpcd->data[DPCD_LANE0_1_STATUS] |= DPCD_LANES_CR_DONE;
+ /* set LANE2_CR_DONE, LANE3_CR_DONE */
+ dpcd->data[DPCD_LANE2_3_STATUS] |= DPCD_LANES_CR_DONE;
+ } else if ((t & DPCD_TRAINING_PATTERN_SET_MASK) ==
+ DPCD_TRAINING_PATTERN_2) {
+ /* training pattern 2 for EQ */
+ /* Set CHANNEL_EQ_DONE and SYMBOL_LOCKED for Lane0_1 */
+ dpcd->data[DPCD_LANE0_1_STATUS] |= DPCD_LANES_EQ_DONE;
+ dpcd->data[DPCD_LANE0_1_STATUS] |= DPCD_SYMBOL_LOCKED;
+ /* Set CHANNEL_EQ_DONE and SYMBOL_LOCKED for Lane2_3 */
+ dpcd->data[DPCD_LANE2_3_STATUS] |= DPCD_LANES_EQ_DONE;
+ dpcd->data[DPCD_LANE2_3_STATUS] |= DPCD_SYMBOL_LOCKED;
+ /* set INTERLANE_ALIGN_DONE */
+ dpcd->data[DPCD_LANE_ALIGN_STATUS_UPDATED] |=
+ DPCD_INTERLANE_ALIGN_DONE;
+ } else if ((t & DPCD_TRAINING_PATTERN_SET_MASK) ==
+ DPCD_LINK_TRAINING_DISABLED) {
+ /* finish link training */
+ /* set sink status as synchronized */
+ dpcd->data[DPCD_SINK_STATUS] = DPCD_SINK_IN_SYNC;
+ }
+}
+
+#define _REG_HSW_DP_AUX_CH_CTL(dp) \
+ ((dp) ? (_PCH_DPB_AUX_CH_CTL + ((dp)-1)*0x100) : 0x64010)
+
+#define _REG_SKL_DP_AUX_CH_CTL(dp) (0x64010 + (dp) * 0x100)
+
+#define OFFSET_TO_DP_AUX_PORT(offset) (((offset) & 0xF00) >> 8)
+
+#define dpy_is_valid_port(port) \
+ (((port) >= PORT_A) && ((port) < I915_MAX_PORTS))
+
+static int dp_aux_ch_ctl_mmio_write(struct intel_vgpu *vgpu,
+ unsigned int offset, void *p_data, unsigned int bytes)
+{
+ struct intel_vgpu_display *display = &vgpu->display;
+ int msg, addr, ctrl, op, len;
+ int port_index = OFFSET_TO_DP_AUX_PORT(offset);
+ struct intel_vgpu_dpcd_data *dpcd = NULL;
+ struct intel_vgpu_port *port = NULL;
+ u32 data;
+
+ if (!dpy_is_valid_port(port_index)) {
+ gvt_err("GVT(%d): Unsupported DP port access!\n", vgpu->id);
+ return 0;
+ }
+
+ write_vreg(vgpu, offset, p_data, bytes);
+ data = vgpu_vreg(vgpu, offset);
+
+ if (IS_SKYLAKE(vgpu->gvt->dev_priv) &&
+ offset != _REG_SKL_DP_AUX_CH_CTL(port_index)) {
+ /* SKL DPB/C/D aux ctl register changed */
+ return 0;
+ } else if (IS_BROADWELL(vgpu->gvt->dev_priv) &&
+ offset != _REG_HSW_DP_AUX_CH_CTL(port_index)) {
+ /* write to the data registers */
+ return 0;
+ }
+
+ if (!(data & DP_AUX_CH_CTL_SEND_BUSY)) {
+ /* just want to clear the sticky bits */
+ vgpu_vreg(vgpu, offset) = 0;
+ return 0;
+ }
+
+ port = &display->ports[port_index];
+ dpcd = port->dpcd;
+
+ /* read out message from DATA1 register */
+ msg = vgpu_vreg(vgpu, offset + 4);
+ addr = (msg >> 8) & 0xffff;
+ ctrl = (msg >> 24) & 0xff;
+ len = msg & 0xff;
+ op = ctrl >> 4;
+
+ if (op == GVT_AUX_NATIVE_WRITE) {
+ int t;
+ uint8_t buf[16];
+
+ if ((addr + len + 1) >= DPCD_SIZE) {
+ /*
+ * Write request exceeds what we supported,
+ * DCPD spec: When a Source Device is writing a DPCD
+ * address not supported by the Sink Device, the Sink
+ * Device shall reply with AUX NACK and “M” equal to
+ * zero.
+ */
+
+ /* NAK the write */
+ vgpu_vreg(vgpu, offset + 4) = AUX_NATIVE_REPLY_NAK;
+ dp_aux_ch_ctl_trans_done(vgpu, data, offset, 2, true);
+ return 0;
+ }
+
+ /*
+ * Write request format: (command + address) occupies
+ * 3 bytes, followed by (len + 1) bytes of data.
+ */
+ if (WARN_ON((len + 4) > AUX_BURST_SIZE))
+ return -EINVAL;
+
+ /* unpack data from vreg to buf */
+ for (t = 0; t < 4; t++) {
+ u32 r = vgpu_vreg(vgpu, offset + 8 + t * 4);
+
+ buf[t * 4] = (r >> 24) & 0xff;
+ buf[t * 4 + 1] = (r >> 16) & 0xff;
+ buf[t * 4 + 2] = (r >> 8) & 0xff;
+ buf[t * 4 + 3] = r & 0xff;
+ }
+
+ /* write to virtual DPCD */
+ if (dpcd && dpcd->data_valid) {
+ for (t = 0; t <= len; t++) {
+ int p = addr + t;
+
+ dpcd->data[p] = buf[t];
+ /* check for link training */
+ if (p == DPCD_TRAINING_PATTERN_SET)
+ dp_aux_ch_ctl_link_training(dpcd,
+ buf[t]);
+ }
+ }
+
+ /* ACK the write */
+ vgpu_vreg(vgpu, offset + 4) = 0;
+ dp_aux_ch_ctl_trans_done(vgpu, data, offset, 1,
+ dpcd && dpcd->data_valid);
+ return 0;
+ }
+
+ if (op == GVT_AUX_NATIVE_READ) {
+ int idx, i, ret = 0;
+
+ if ((addr + len + 1) >= DPCD_SIZE) {
+ /*
+ * read request exceeds what we supported
+ * DPCD spec: A Sink Device receiving a Native AUX CH
+ * read request for an unsupported DPCD address must
+ * reply with an AUX ACK and read data set equal to
+ * zero instead of replying with AUX NACK.
+ */
+
+ /* ACK the READ*/
+ vgpu_vreg(vgpu, offset + 4) = 0;
+ vgpu_vreg(vgpu, offset + 8) = 0;
+ vgpu_vreg(vgpu, offset + 12) = 0;
+ vgpu_vreg(vgpu, offset + 16) = 0;
+ vgpu_vreg(vgpu, offset + 20) = 0;
+
+ dp_aux_ch_ctl_trans_done(vgpu, data, offset, len + 2,
+ true);
+ return 0;
+ }
+
+ for (idx = 1; idx <= 5; idx++) {
+ /* clear the data registers */
+ vgpu_vreg(vgpu, offset + 4 * idx) = 0;
+ }
+
+ /*
+ * Read reply format: ACK (1 byte) plus (len + 1) bytes of data.
+ */
+ if (WARN_ON((len + 2) > AUX_BURST_SIZE))
+ return -EINVAL;
+
+ /* read from virtual DPCD to vreg */
+ /* first 4 bytes: [ACK][addr][addr+1][addr+2] */
+ if (dpcd && dpcd->data_valid) {
+ for (i = 1; i <= (len + 1); i++) {
+ int t;
+
+ t = dpcd->data[addr + i - 1];
+ t <<= (24 - 8 * (i % 4));
+ ret |= t;
+
+ if ((i % 4 == 3) || (i == (len + 1))) {
+ vgpu_vreg(vgpu, offset +
+ (i / 4 + 1) * 4) = ret;
+ ret = 0;
+ }
+ }
+ }
+ dp_aux_ch_ctl_trans_done(vgpu, data, offset, len + 2,
+ dpcd && dpcd->data_valid);
+ return 0;
+ }
+
+ /* i2c transaction starts */
+ intel_gvt_i2c_handle_aux_ch_write(vgpu, port_index, offset, p_data);
+
+ if (data & DP_AUX_CH_CTL_INTERRUPT)
+ trigger_aux_channel_interrupt(vgpu, offset);
+ return 0;
+}
+
+static int vga_control_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
+ void *p_data, unsigned int bytes)
+{
+ bool vga_disable;
+
+ write_vreg(vgpu, offset, p_data, bytes);
+ vga_disable = vgpu_vreg(vgpu, offset) & VGA_DISP_DISABLE;
+
+ gvt_dbg_core("vgpu%d: %s VGA mode\n", vgpu->id,
+ vga_disable ? "Disable" : "Enable");
+ return 0;
+}
+
+static u32 read_virtual_sbi_register(struct intel_vgpu *vgpu,
+ unsigned int sbi_offset)
+{
+ struct intel_vgpu_display *display = &vgpu->display;
+ int num = display->sbi.number;
+ int i;
+
+ for (i = 0; i < num; ++i)
+ if (display->sbi.registers[i].offset == sbi_offset)
+ break;
+
+ if (i == num)
+ return 0;
+
+ return display->sbi.registers[i].value;
+}
+
+static void write_virtual_sbi_register(struct intel_vgpu *vgpu,
+ unsigned int offset, u32 value)
+{
+ struct intel_vgpu_display *display = &vgpu->display;
+ int num = display->sbi.number;
+ int i;
+
+ for (i = 0; i < num; ++i) {
+ if (display->sbi.registers[i].offset == offset)
+ break;
+ }
+
+ if (i == num) {
+ if (num == SBI_REG_MAX) {
+ gvt_err("vgpu%d: SBI caching meets maximum limits\n",
+ vgpu->id);
+ return;
+ }
+ display->sbi.number++;
+ }
+
+ display->sbi.registers[i].offset = offset;
+ display->sbi.registers[i].value = value;
+}
+
+static int sbi_data_mmio_read(struct intel_vgpu *vgpu, unsigned int offset,
+ void *p_data, unsigned int bytes)
+{
+ if (((vgpu_vreg(vgpu, SBI_CTL_STAT) & SBI_OPCODE_MASK) >>
+ SBI_OPCODE_SHIFT) == SBI_CMD_CRRD) {
+ unsigned int sbi_offset = (vgpu_vreg(vgpu, SBI_ADDR) &
+ SBI_ADDR_OFFSET_MASK) >> SBI_ADDR_OFFSET_SHIFT;
+ vgpu_vreg(vgpu, offset) = read_virtual_sbi_register(vgpu,
+ sbi_offset);
+ }
+ read_vreg(vgpu, offset, p_data, bytes);
+ return 0;
+}
+
+static bool sbi_ctl_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
+ void *p_data, unsigned int bytes)
+{
+ u32 data;
+
+ write_vreg(vgpu, offset, p_data, bytes);
+ data = vgpu_vreg(vgpu, offset);
+
+ data &= ~(SBI_STAT_MASK << SBI_STAT_SHIFT);
+ data |= SBI_READY;
+
+ data &= ~(SBI_RESPONSE_MASK << SBI_RESPONSE_SHIFT);
+ data |= SBI_RESPONSE_SUCCESS;
+
+ vgpu_vreg(vgpu, offset) = data;
+
+ if (((vgpu_vreg(vgpu, SBI_CTL_STAT) & SBI_OPCODE_MASK) >>
+ SBI_OPCODE_SHIFT) == SBI_CMD_CRWR) {
+ unsigned int sbi_offset = (vgpu_vreg(vgpu, SBI_ADDR) &
+ SBI_ADDR_OFFSET_MASK) >> SBI_ADDR_OFFSET_SHIFT;
+
+ write_virtual_sbi_register(vgpu, sbi_offset,
+ vgpu_vreg(vgpu, SBI_DATA));
+ }
+ return 0;
+}
+
+#define _vgtif_reg(x) \
+ (VGT_PVINFO_PAGE + offsetof(struct vgt_if, x))
+
+static int pvinfo_mmio_read(struct intel_vgpu *vgpu, unsigned int offset,
+ void *p_data, unsigned int bytes)
+{
+ bool invalid_read = false;
+
+ read_vreg(vgpu, offset, p_data, bytes);
+
+ switch (offset) {
+ case _vgtif_reg(magic) ... _vgtif_reg(vgt_id):
+ if (offset + bytes > _vgtif_reg(vgt_id) + 4)
+ invalid_read = true;
+ break;
+ case _vgtif_reg(avail_rs.mappable_gmadr.base) ...
+ _vgtif_reg(avail_rs.fence_num):
+ if (offset + bytes >
+ _vgtif_reg(avail_rs.fence_num) + 4)
+ invalid_read = true;
+ break;
+ case 0x78010: /* vgt_caps */
+ case 0x7881c:
+ break;
+ default:
+ invalid_read = true;
+ break;
+ }
+ if (invalid_read)
+ gvt_err("invalid pvinfo read: [%x:%x] = %x\n",
+ offset, bytes, *(u32 *)p_data);
+ return 0;
+}
+
+static int handle_g2v_notification(struct intel_vgpu *vgpu, int notification)
+{
+ int ret = 0;
+
+ switch (notification) {
+ case VGT_G2V_PPGTT_L3_PAGE_TABLE_CREATE:
+ ret = intel_vgpu_g2v_create_ppgtt_mm(vgpu, 3);
+ break;
+ case VGT_G2V_PPGTT_L3_PAGE_TABLE_DESTROY:
+ ret = intel_vgpu_g2v_destroy_ppgtt_mm(vgpu, 3);
+ break;
+ case VGT_G2V_PPGTT_L4_PAGE_TABLE_CREATE:
+ ret = intel_vgpu_g2v_create_ppgtt_mm(vgpu, 4);
+ break;
+ case VGT_G2V_PPGTT_L4_PAGE_TABLE_DESTROY:
+ ret = intel_vgpu_g2v_destroy_ppgtt_mm(vgpu, 4);
+ break;
+ case VGT_G2V_EXECLIST_CONTEXT_CREATE:
+ case VGT_G2V_EXECLIST_CONTEXT_DESTROY:
+ case 1: /* Remove this in guest driver. */
+ break;
+ default:
+ gvt_err("Invalid PV notification %d\n", notification);
+ }
+ return ret;
+}
+
+static int send_display_ready_uevent(struct intel_vgpu *vgpu, int ready)
+{
+ struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
+ struct kobject *kobj = &dev_priv->drm.primary->kdev->kobj;
+ char *env[3] = {NULL, NULL, NULL};
+ char vmid_str[20];
+ char display_ready_str[20];
+
+ snprintf(display_ready_str, 20, "GVT_DISPLAY_READY=%d\n", ready);
+ env[0] = display_ready_str;
+
+ snprintf(vmid_str, 20, "VMID=%d", vgpu->id);
+ env[1] = vmid_str;
+
+ return kobject_uevent_env(kobj, KOBJ_ADD, env);
+}
+
+static int pvinfo_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
+ void *p_data, unsigned int bytes)
+{
+ u32 data;
+ int ret;
+
+ write_vreg(vgpu, offset, p_data, bytes);
+ data = vgpu_vreg(vgpu, offset);
+
+ switch (offset) {
+ case _vgtif_reg(display_ready):
+ send_display_ready_uevent(vgpu, data ? 1 : 0);
+ break;
+ case _vgtif_reg(g2v_notify):
+ ret = handle_g2v_notification(vgpu, data);
+ break;
+ /* add xhot and yhot to handled list to avoid error log */
+ case 0x78830:
+ case 0x78834:
+ case _vgtif_reg(pdp[0].lo):
+ case _vgtif_reg(pdp[0].hi):
+ case _vgtif_reg(pdp[1].lo):
+ case _vgtif_reg(pdp[1].hi):
+ case _vgtif_reg(pdp[2].lo):
+ case _vgtif_reg(pdp[2].hi):
+ case _vgtif_reg(pdp[3].lo):
+ case _vgtif_reg(pdp[3].hi):
+ case _vgtif_reg(execlist_context_descriptor_lo):
+ case _vgtif_reg(execlist_context_descriptor_hi):
+ break;
+ default:
+ gvt_err("invalid pvinfo write offset %x bytes %x data %x\n",
+ offset, bytes, data);
+ break;
+ }
+ return 0;
+}
+
+static int pf_write(struct intel_vgpu *vgpu,
+ unsigned int offset, void *p_data, unsigned int bytes)
+{
+ u32 val = *(u32 *)p_data;
+
+ if ((offset == _PS_1A_CTRL || offset == _PS_2A_CTRL ||
+ offset == _PS_1B_CTRL || offset == _PS_2B_CTRL ||
+ offset == _PS_1C_CTRL) && (val & PS_PLANE_SEL_MASK) != 0) {
+ WARN_ONCE(true, "VM(%d): guest is trying to scaling a plane\n",
+ vgpu->id);
+ return 0;
+ }
+
+ return intel_vgpu_default_mmio_write(vgpu, offset, p_data, bytes);
+}
+
+static int power_well_ctl_mmio_write(struct intel_vgpu *vgpu,
+ unsigned int offset, void *p_data, unsigned int bytes)
+{
+ write_vreg(vgpu, offset, p_data, bytes);
+
+ if (vgpu_vreg(vgpu, offset) & HSW_PWR_WELL_ENABLE_REQUEST)
+ vgpu_vreg(vgpu, offset) |= HSW_PWR_WELL_STATE_ENABLED;
+ else
+ vgpu_vreg(vgpu, offset) &= ~HSW_PWR_WELL_STATE_ENABLED;
+ return 0;
+}
+
+static int fpga_dbg_mmio_write(struct intel_vgpu *vgpu,
+ unsigned int offset, void *p_data, unsigned int bytes)
+{
+ write_vreg(vgpu, offset, p_data, bytes);
+
+ if (vgpu_vreg(vgpu, offset) & FPGA_DBG_RM_NOCLAIM)
+ vgpu_vreg(vgpu, offset) &= ~FPGA_DBG_RM_NOCLAIM;
+ return 0;
+}
+
+static int dma_ctrl_write(struct intel_vgpu *vgpu, unsigned int offset,
+ void *p_data, unsigned int bytes)
+{
+ u32 mode;
+
+ write_vreg(vgpu, offset, p_data, bytes);
+ mode = vgpu_vreg(vgpu, offset);
+
+ if (GFX_MODE_BIT_SET_IN_MASK(mode, START_DMA)) {
+ WARN_ONCE(1, "VM(%d): iGVT-g doesn't supporte GuC\n",
+ vgpu->id);
+ return 0;
+ }
+
+ return 0;
+}
+
+static int gen9_trtte_write(struct intel_vgpu *vgpu, unsigned int offset,
+ void *p_data, unsigned int bytes)
+{
+ struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
+ u32 trtte = *(u32 *)p_data;
+
+ if ((trtte & 1) && (trtte & (1 << 1)) == 0) {
+ WARN(1, "VM(%d): Use physical address for TRTT!\n",
+ vgpu->id);
+ return -EINVAL;
+ }
+ write_vreg(vgpu, offset, p_data, bytes);
+ /* TRTTE is not per-context */
+ I915_WRITE(_MMIO(offset), vgpu_vreg(vgpu, offset));
+
+ return 0;
+}
+
+static int gen9_trtt_chicken_write(struct intel_vgpu *vgpu, unsigned int offset,
+ void *p_data, unsigned int bytes)
+{
+ struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
+ u32 val = *(u32 *)p_data;
+
+ if (val & 1) {
+ /* unblock hw logic */
+ I915_WRITE(_MMIO(offset), val);
+ }
+ write_vreg(vgpu, offset, p_data, bytes);
+ return 0;
+}
+
+static int dpll_status_read(struct intel_vgpu *vgpu, unsigned int offset,
+ void *p_data, unsigned int bytes)
+{
+ u32 v = 0;
+
+ if (vgpu_vreg(vgpu, 0x46010) & (1 << 31))
+ v |= (1 << 0);
+
+ if (vgpu_vreg(vgpu, 0x46014) & (1 << 31))
+ v |= (1 << 8);
+
+ if (vgpu_vreg(vgpu, 0x46040) & (1 << 31))
+ v |= (1 << 16);
+
+ if (vgpu_vreg(vgpu, 0x46060) & (1 << 31))
+ v |= (1 << 24);
+
+ vgpu_vreg(vgpu, offset) = v;
+
+ return intel_vgpu_default_mmio_read(vgpu, offset, p_data, bytes);
+}
+
+static int mailbox_write(struct intel_vgpu *vgpu, unsigned int offset,
+ void *p_data, unsigned int bytes)
+{
+ u32 value = *(u32 *)p_data;
+ u32 cmd = value & 0xff;
+ u32 *data0 = &vgpu_vreg(vgpu, GEN6_PCODE_DATA);
+
+ switch (cmd) {
+ case 0x6:
+ /**
+ * "Read memory latency" command on gen9.
+ * Below memory latency values are read
+ * from skylake platform.
+ */
+ if (!*data0)
+ *data0 = 0x1e1a1100;
+ else
+ *data0 = 0x61514b3d;
+ break;
+ case 0x5:
+ *data0 |= 0x1;
+ break;
+ }
+
+ gvt_dbg_core("VM(%d) write %x to mailbox, return data0 %x\n",
+ vgpu->id, value, *data0);
+
+ value &= ~(1 << 31);
+ return intel_vgpu_default_mmio_write(vgpu, offset, &value, bytes);
+}
+
+static int skl_power_well_ctl_write(struct intel_vgpu *vgpu,
+ unsigned int offset, void *p_data, unsigned int bytes)
+{
+ u32 v = *(u32 *)p_data;
+
+ v &= (1 << 31) | (1 << 29) | (1 << 9) |
+ (1 << 7) | (1 << 5) | (1 << 3) | (1 << 1);
+ v |= (v >> 1);
+
+ return intel_vgpu_default_mmio_write(vgpu, offset, &v, bytes);
+}
+
+static int skl_misc_ctl_write(struct intel_vgpu *vgpu, unsigned int offset,
+ void *p_data, unsigned int bytes)
+{
+ struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
+ i915_reg_t reg = {.reg = offset};
+
+ switch (offset) {
+ case 0x4ddc:
+ vgpu_vreg(vgpu, offset) = 0x8000003c;
+ /* WaCompressedResourceSamplerPbeMediaNewHashMode:skl */
+ I915_WRITE(reg, vgpu_vreg(vgpu, offset));
+ break;
+ case 0x42080:
+ vgpu_vreg(vgpu, offset) = 0x8000;
+ /* WaCompressedResourceDisplayNewHashMode:skl */
+ I915_WRITE(reg, vgpu_vreg(vgpu, offset));
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int skl_lcpll_write(struct intel_vgpu *vgpu, unsigned int offset,
+ void *p_data, unsigned int bytes)
+{
+ u32 v = *(u32 *)p_data;
+
+ /* other bits are MBZ. */
+ v &= (1 << 31) | (1 << 30);
+ v & (1 << 31) ? (v |= (1 << 30)) : (v &= ~(1 << 30));
+
+ vgpu_vreg(vgpu, offset) = v;
+
+ return 0;
+}
+
+static int ring_timestamp_mmio_read(struct intel_vgpu *vgpu,
+ unsigned int offset, void *p_data, unsigned int bytes)
+{
+ struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
+
+ vgpu_vreg(vgpu, offset) = I915_READ(_MMIO(offset));
+ return intel_vgpu_default_mmio_read(vgpu, offset, p_data, bytes);
+}
+
+static int elsp_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
+ void *p_data, unsigned int bytes)
+{
+ int ring_id = render_mmio_to_ring_id(vgpu->gvt, offset);
+ struct intel_vgpu_execlist *execlist;
+ u32 data = *(u32 *)p_data;
+ int ret = 0;
+
+ if (WARN_ON(ring_id < 0 || ring_id > I915_NUM_ENGINES - 1))
+ return -EINVAL;
+
+ execlist = &vgpu->execlist[ring_id];
+
+ execlist->elsp_dwords.data[execlist->elsp_dwords.index] = data;
+ if (execlist->elsp_dwords.index == 3) {
+ ret = intel_vgpu_submit_execlist(vgpu, ring_id);
+ if(ret)
+ gvt_err("fail submit workload on ring %d\n", ring_id);
+ }
+
+ ++execlist->elsp_dwords.index;
+ execlist->elsp_dwords.index &= 0x3;
+ return ret;
+}
+
+static int ring_mode_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
+ void *p_data, unsigned int bytes)
+{
+ u32 data = *(u32 *)p_data;
+ int ring_id = render_mmio_to_ring_id(vgpu->gvt, offset);
+ bool enable_execlist;
+
+ write_vreg(vgpu, offset, p_data, bytes);
+ if ((data & _MASKED_BIT_ENABLE(GFX_RUN_LIST_ENABLE))
+ || (data & _MASKED_BIT_DISABLE(GFX_RUN_LIST_ENABLE))) {
+ enable_execlist = !!(data & GFX_RUN_LIST_ENABLE);
+
+ gvt_dbg_core("EXECLIST %s on ring %d\n",
+ (enable_execlist ? "enabling" : "disabling"),
+ ring_id);
+
+ if (enable_execlist)
+ intel_vgpu_start_schedule(vgpu);
+ }
+ return 0;
+}
+
+static int gvt_reg_tlb_control_handler(struct intel_vgpu *vgpu,
+ unsigned int offset, void *p_data, unsigned int bytes)
+{
+ int rc = 0;
+ unsigned int id = 0;
+
+ write_vreg(vgpu, offset, p_data, bytes);
+ vgpu_vreg(vgpu, offset) = 0;
+
+ switch (offset) {
+ case 0x4260:
+ id = RCS;
+ break;
+ case 0x4264:
+ id = VCS;
+ break;
+ case 0x4268:
+ id = VCS2;
+ break;
+ case 0x426c:
+ id = BCS;
+ break;
+ case 0x4270:
+ id = VECS;
+ break;
+ default:
+ rc = -EINVAL;
+ break;
+ }
+ set_bit(id, (void *)vgpu->tlb_handle_pending);
+
+ return rc;
+}
+
+static int ring_reset_ctl_write(struct intel_vgpu *vgpu,
+ unsigned int offset, void *p_data, unsigned int bytes)
+{
+ u32 data;
+
+ write_vreg(vgpu, offset, p_data, bytes);
+ data = vgpu_vreg(vgpu, offset);
+
+ if (data & _MASKED_BIT_ENABLE(RESET_CTL_REQUEST_RESET))
+ data |= RESET_CTL_READY_TO_RESET;
+ else if (data & _MASKED_BIT_DISABLE(RESET_CTL_REQUEST_RESET))
+ data &= ~RESET_CTL_READY_TO_RESET;
+
+ vgpu_vreg(vgpu, offset) = data;
+ return 0;
+}
+
+#define MMIO_F(reg, s, f, am, rm, d, r, w) do { \
+ ret = new_mmio_info(gvt, INTEL_GVT_MMIO_OFFSET(reg), \
+ f, s, am, rm, d, r, w); \
+ if (ret) \
+ return ret; \
+} while (0)
+
+#define MMIO_D(reg, d) \
+ MMIO_F(reg, 4, 0, 0, 0, d, NULL, NULL)
+
+#define MMIO_DH(reg, d, r, w) \
+ MMIO_F(reg, 4, 0, 0, 0, d, r, w)
+
+#define MMIO_DFH(reg, d, f, r, w) \
+ MMIO_F(reg, 4, f, 0, 0, d, r, w)
+
+#define MMIO_GM(reg, d, r, w) \
+ MMIO_F(reg, 4, F_GMADR, 0xFFFFF000, 0, d, r, w)
+
+#define MMIO_RO(reg, d, f, rm, r, w) \
+ MMIO_F(reg, 4, F_RO | f, 0, rm, d, r, w)
+
+#define MMIO_RING_F(prefix, s, f, am, rm, d, r, w) do { \
+ MMIO_F(prefix(RENDER_RING_BASE), s, f, am, rm, d, r, w); \
+ MMIO_F(prefix(BLT_RING_BASE), s, f, am, rm, d, r, w); \
+ MMIO_F(prefix(GEN6_BSD_RING_BASE), s, f, am, rm, d, r, w); \
+ MMIO_F(prefix(VEBOX_RING_BASE), s, f, am, rm, d, r, w); \
+} while (0)
+
+#define MMIO_RING_D(prefix, d) \
+ MMIO_RING_F(prefix, 4, 0, 0, 0, d, NULL, NULL)
+
+#define MMIO_RING_DFH(prefix, d, f, r, w) \
+ MMIO_RING_F(prefix, 4, f, 0, 0, d, r, w)
+
+#define MMIO_RING_GM(prefix, d, r, w) \
+ MMIO_RING_F(prefix, 4, F_GMADR, 0xFFFF0000, 0, d, r, w)
+
+#define MMIO_RING_RO(prefix, d, f, rm, r, w) \
+ MMIO_RING_F(prefix, 4, F_RO | f, 0, rm, d, r, w)
+
+static int init_generic_mmio_info(struct intel_gvt *gvt)
+{
+ struct drm_i915_private *dev_priv = gvt->dev_priv;
+ int ret;
+
+ MMIO_RING_DFH(RING_IMR, D_ALL, 0, NULL, intel_vgpu_reg_imr_handler);
+
+ MMIO_DFH(SDEIMR, D_ALL, 0, NULL, intel_vgpu_reg_imr_handler);
+ MMIO_DFH(SDEIER, D_ALL, 0, NULL, intel_vgpu_reg_ier_handler);
+ MMIO_DFH(SDEIIR, D_ALL, 0, NULL, intel_vgpu_reg_iir_handler);
+ MMIO_D(SDEISR, D_ALL);
+
+ MMIO_RING_D(RING_HWSTAM, D_ALL);
+
+ MMIO_GM(RENDER_HWS_PGA_GEN7, D_ALL, NULL, NULL);
+ MMIO_GM(BSD_HWS_PGA_GEN7, D_ALL, NULL, NULL);
+ MMIO_GM(BLT_HWS_PGA_GEN7, D_ALL, NULL, NULL);
+ MMIO_GM(VEBOX_HWS_PGA_GEN7, D_ALL, NULL, NULL);
+
+#define RING_REG(base) (base + 0x28)
+ MMIO_RING_D(RING_REG, D_ALL);
+#undef RING_REG
+
+#define RING_REG(base) (base + 0x134)
+ MMIO_RING_D(RING_REG, D_ALL);
+#undef RING_REG
+
+ MMIO_GM(0x2148, D_ALL, NULL, NULL);
+ MMIO_GM(CCID, D_ALL, NULL, NULL);
+ MMIO_GM(0x12198, D_ALL, NULL, NULL);
+ MMIO_D(GEN7_CXT_SIZE, D_ALL);
+
+ MMIO_RING_D(RING_TAIL, D_ALL);
+ MMIO_RING_D(RING_HEAD, D_ALL);
+ MMIO_RING_D(RING_CTL, D_ALL);
+ MMIO_RING_D(RING_ACTHD, D_ALL);
+ MMIO_RING_GM(RING_START, D_ALL, NULL, NULL);
+
+ /* RING MODE */
+#define RING_REG(base) (base + 0x29c)
+ MMIO_RING_DFH(RING_REG, D_ALL, F_MODE_MASK, NULL, ring_mode_mmio_write);
+#undef RING_REG
+
+ MMIO_RING_DFH(RING_MI_MODE, D_ALL, F_MODE_MASK, NULL, NULL);
+ MMIO_RING_DFH(RING_INSTPM, D_ALL, F_MODE_MASK, NULL, NULL);
+ MMIO_RING_DFH(RING_TIMESTAMP, D_ALL, F_CMD_ACCESS,
+ ring_timestamp_mmio_read, NULL);
+ MMIO_RING_DFH(RING_TIMESTAMP_UDW, D_ALL, F_CMD_ACCESS,
+ ring_timestamp_mmio_read, NULL);
+
+ MMIO_DFH(GEN7_GT_MODE, D_ALL, F_MODE_MASK, NULL, NULL);
+ MMIO_DFH(CACHE_MODE_0_GEN7, D_ALL, F_MODE_MASK, NULL, NULL);
+ MMIO_DFH(CACHE_MODE_1, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
+
+ MMIO_DFH(0x20dc, D_ALL, F_MODE_MASK, NULL, NULL);
+ MMIO_DFH(_3D_CHICKEN3, D_ALL, F_MODE_MASK, NULL, NULL);
+ MMIO_DFH(0x2088, D_ALL, F_MODE_MASK, NULL, NULL);
+ MMIO_DFH(0x20e4, D_ALL, F_MODE_MASK, NULL, NULL);
+ MMIO_DFH(0x2470, D_ALL, F_MODE_MASK, NULL, NULL);
+ MMIO_D(GAM_ECOCHK, D_ALL);
+ MMIO_DFH(GEN7_COMMON_SLICE_CHICKEN1, D_ALL, F_MODE_MASK, NULL, NULL);
+ MMIO_DFH(COMMON_SLICE_CHICKEN2, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
+ MMIO_D(0x9030, D_ALL);
+ MMIO_D(0x20a0, D_ALL);
+ MMIO_D(0x2420, D_ALL);
+ MMIO_D(0x2430, D_ALL);
+ MMIO_D(0x2434, D_ALL);
+ MMIO_D(0x2438, D_ALL);
+ MMIO_D(0x243c, D_ALL);
+ MMIO_DFH(0x7018, D_ALL, F_MODE_MASK, NULL, NULL);
+ MMIO_DFH(HALF_SLICE_CHICKEN3, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(0xe100, D_ALL, F_MODE_MASK, NULL, NULL);
+
+ /* display */
+ MMIO_F(0x60220, 0x20, 0, 0, 0, D_ALL, NULL, NULL);
+ MMIO_D(0x602a0, D_ALL);
+
+ MMIO_D(0x65050, D_ALL);
+ MMIO_D(0x650b4, D_ALL);
+
+ MMIO_D(0xc4040, D_ALL);
+ MMIO_D(DERRMR, D_ALL);
+
+ MMIO_D(PIPEDSL(PIPE_A), D_ALL);
+ MMIO_D(PIPEDSL(PIPE_B), D_ALL);
+ MMIO_D(PIPEDSL(PIPE_C), D_ALL);
+ MMIO_D(PIPEDSL(_PIPE_EDP), D_ALL);
+
+ MMIO_DH(PIPECONF(PIPE_A), D_ALL, NULL, pipeconf_mmio_write);
+ MMIO_DH(PIPECONF(PIPE_B), D_ALL, NULL, pipeconf_mmio_write);
+ MMIO_DH(PIPECONF(PIPE_C), D_ALL, NULL, pipeconf_mmio_write);
+ MMIO_DH(PIPECONF(_PIPE_EDP), D_ALL, NULL, pipeconf_mmio_write);
+
+ MMIO_D(PIPESTAT(PIPE_A), D_ALL);
+ MMIO_D(PIPESTAT(PIPE_B), D_ALL);
+ MMIO_D(PIPESTAT(PIPE_C), D_ALL);
+ MMIO_D(PIPESTAT(_PIPE_EDP), D_ALL);
+
+ MMIO_D(PIPE_FLIPCOUNT_G4X(PIPE_A), D_ALL);
+ MMIO_D(PIPE_FLIPCOUNT_G4X(PIPE_B), D_ALL);
+ MMIO_D(PIPE_FLIPCOUNT_G4X(PIPE_C), D_ALL);
+ MMIO_D(PIPE_FLIPCOUNT_G4X(_PIPE_EDP), D_ALL);
+
+ MMIO_D(PIPE_FRMCOUNT_G4X(PIPE_A), D_ALL);
+ MMIO_D(PIPE_FRMCOUNT_G4X(PIPE_B), D_ALL);
+ MMIO_D(PIPE_FRMCOUNT_G4X(PIPE_C), D_ALL);
+ MMIO_D(PIPE_FRMCOUNT_G4X(_PIPE_EDP), D_ALL);
+
+ MMIO_D(CURCNTR(PIPE_A), D_ALL);
+ MMIO_D(CURCNTR(PIPE_B), D_ALL);
+ MMIO_D(CURCNTR(PIPE_C), D_ALL);
+
+ MMIO_D(CURPOS(PIPE_A), D_ALL);
+ MMIO_D(CURPOS(PIPE_B), D_ALL);
+ MMIO_D(CURPOS(PIPE_C), D_ALL);
+
+ MMIO_D(CURBASE(PIPE_A), D_ALL);
+ MMIO_D(CURBASE(PIPE_B), D_ALL);
+ MMIO_D(CURBASE(PIPE_C), D_ALL);
+
+ MMIO_D(0x700ac, D_ALL);
+ MMIO_D(0x710ac, D_ALL);
+ MMIO_D(0x720ac, D_ALL);
+
+ MMIO_D(0x70090, D_ALL);
+ MMIO_D(0x70094, D_ALL);
+ MMIO_D(0x70098, D_ALL);
+ MMIO_D(0x7009c, D_ALL);
+
+ MMIO_D(DSPCNTR(PIPE_A), D_ALL);
+ MMIO_D(DSPADDR(PIPE_A), D_ALL);
+ MMIO_D(DSPSTRIDE(PIPE_A), D_ALL);
+ MMIO_D(DSPPOS(PIPE_A), D_ALL);
+ MMIO_D(DSPSIZE(PIPE_A), D_ALL);
+ MMIO_DH(DSPSURF(PIPE_A), D_ALL, NULL, pri_surf_mmio_write);
+ MMIO_D(DSPOFFSET(PIPE_A), D_ALL);
+ MMIO_D(DSPSURFLIVE(PIPE_A), D_ALL);
+
+ MMIO_D(DSPCNTR(PIPE_B), D_ALL);
+ MMIO_D(DSPADDR(PIPE_B), D_ALL);
+ MMIO_D(DSPSTRIDE(PIPE_B), D_ALL);
+ MMIO_D(DSPPOS(PIPE_B), D_ALL);
+ MMIO_D(DSPSIZE(PIPE_B), D_ALL);
+ MMIO_DH(DSPSURF(PIPE_B), D_ALL, NULL, pri_surf_mmio_write);
+ MMIO_D(DSPOFFSET(PIPE_B), D_ALL);
+ MMIO_D(DSPSURFLIVE(PIPE_B), D_ALL);
+
+ MMIO_D(DSPCNTR(PIPE_C), D_ALL);
+ MMIO_D(DSPADDR(PIPE_C), D_ALL);
+ MMIO_D(DSPSTRIDE(PIPE_C), D_ALL);
+ MMIO_D(DSPPOS(PIPE_C), D_ALL);
+ MMIO_D(DSPSIZE(PIPE_C), D_ALL);
+ MMIO_DH(DSPSURF(PIPE_C), D_ALL, NULL, pri_surf_mmio_write);
+ MMIO_D(DSPOFFSET(PIPE_C), D_ALL);
+ MMIO_D(DSPSURFLIVE(PIPE_C), D_ALL);
+
+ MMIO_D(SPRCTL(PIPE_A), D_ALL);
+ MMIO_D(SPRLINOFF(PIPE_A), D_ALL);
+ MMIO_D(SPRSTRIDE(PIPE_A), D_ALL);
+ MMIO_D(SPRPOS(PIPE_A), D_ALL);
+ MMIO_D(SPRSIZE(PIPE_A), D_ALL);
+ MMIO_D(SPRKEYVAL(PIPE_A), D_ALL);
+ MMIO_D(SPRKEYMSK(PIPE_A), D_ALL);
+ MMIO_DH(SPRSURF(PIPE_A), D_ALL, NULL, spr_surf_mmio_write);
+ MMIO_D(SPRKEYMAX(PIPE_A), D_ALL);
+ MMIO_D(SPROFFSET(PIPE_A), D_ALL);
+ MMIO_D(SPRSCALE(PIPE_A), D_ALL);
+ MMIO_D(SPRSURFLIVE(PIPE_A), D_ALL);
+
+ MMIO_D(SPRCTL(PIPE_B), D_ALL);
+ MMIO_D(SPRLINOFF(PIPE_B), D_ALL);
+ MMIO_D(SPRSTRIDE(PIPE_B), D_ALL);
+ MMIO_D(SPRPOS(PIPE_B), D_ALL);
+ MMIO_D(SPRSIZE(PIPE_B), D_ALL);
+ MMIO_D(SPRKEYVAL(PIPE_B), D_ALL);
+ MMIO_D(SPRKEYMSK(PIPE_B), D_ALL);
+ MMIO_DH(SPRSURF(PIPE_B), D_ALL, NULL, spr_surf_mmio_write);
+ MMIO_D(SPRKEYMAX(PIPE_B), D_ALL);
+ MMIO_D(SPROFFSET(PIPE_B), D_ALL);
+ MMIO_D(SPRSCALE(PIPE_B), D_ALL);
+ MMIO_D(SPRSURFLIVE(PIPE_B), D_ALL);
+
+ MMIO_D(SPRCTL(PIPE_C), D_ALL);
+ MMIO_D(SPRLINOFF(PIPE_C), D_ALL);
+ MMIO_D(SPRSTRIDE(PIPE_C), D_ALL);
+ MMIO_D(SPRPOS(PIPE_C), D_ALL);
+ MMIO_D(SPRSIZE(PIPE_C), D_ALL);
+ MMIO_D(SPRKEYVAL(PIPE_C), D_ALL);
+ MMIO_D(SPRKEYMSK(PIPE_C), D_ALL);
+ MMIO_DH(SPRSURF(PIPE_C), D_ALL, NULL, spr_surf_mmio_write);
+ MMIO_D(SPRKEYMAX(PIPE_C), D_ALL);
+ MMIO_D(SPROFFSET(PIPE_C), D_ALL);
+ MMIO_D(SPRSCALE(PIPE_C), D_ALL);
+ MMIO_D(SPRSURFLIVE(PIPE_C), D_ALL);
+
+ MMIO_F(LGC_PALETTE(PIPE_A, 0), 4 * 256, 0, 0, 0, D_ALL, NULL, NULL);
+ MMIO_F(LGC_PALETTE(PIPE_B, 0), 4 * 256, 0, 0, 0, D_ALL, NULL, NULL);
+ MMIO_F(LGC_PALETTE(PIPE_C, 0), 4 * 256, 0, 0, 0, D_ALL, NULL, NULL);
+
+ MMIO_D(HTOTAL(TRANSCODER_A), D_ALL);
+ MMIO_D(HBLANK(TRANSCODER_A), D_ALL);
+ MMIO_D(HSYNC(TRANSCODER_A), D_ALL);
+ MMIO_D(VTOTAL(TRANSCODER_A), D_ALL);
+ MMIO_D(VBLANK(TRANSCODER_A), D_ALL);
+ MMIO_D(VSYNC(TRANSCODER_A), D_ALL);
+ MMIO_D(BCLRPAT(TRANSCODER_A), D_ALL);
+ MMIO_D(VSYNCSHIFT(TRANSCODER_A), D_ALL);
+ MMIO_D(PIPESRC(TRANSCODER_A), D_ALL);
+
+ MMIO_D(HTOTAL(TRANSCODER_B), D_ALL);
+ MMIO_D(HBLANK(TRANSCODER_B), D_ALL);
+ MMIO_D(HSYNC(TRANSCODER_B), D_ALL);
+ MMIO_D(VTOTAL(TRANSCODER_B), D_ALL);
+ MMIO_D(VBLANK(TRANSCODER_B), D_ALL);
+ MMIO_D(VSYNC(TRANSCODER_B), D_ALL);
+ MMIO_D(BCLRPAT(TRANSCODER_B), D_ALL);
+ MMIO_D(VSYNCSHIFT(TRANSCODER_B), D_ALL);
+ MMIO_D(PIPESRC(TRANSCODER_B), D_ALL);
+
+ MMIO_D(HTOTAL(TRANSCODER_C), D_ALL);
+ MMIO_D(HBLANK(TRANSCODER_C), D_ALL);
+ MMIO_D(HSYNC(TRANSCODER_C), D_ALL);
+ MMIO_D(VTOTAL(TRANSCODER_C), D_ALL);
+ MMIO_D(VBLANK(TRANSCODER_C), D_ALL);
+ MMIO_D(VSYNC(TRANSCODER_C), D_ALL);
+ MMIO_D(BCLRPAT(TRANSCODER_C), D_ALL);
+ MMIO_D(VSYNCSHIFT(TRANSCODER_C), D_ALL);
+ MMIO_D(PIPESRC(TRANSCODER_C), D_ALL);
+
+ MMIO_D(HTOTAL(TRANSCODER_EDP), D_ALL);
+ MMIO_D(HBLANK(TRANSCODER_EDP), D_ALL);
+ MMIO_D(HSYNC(TRANSCODER_EDP), D_ALL);
+ MMIO_D(VTOTAL(TRANSCODER_EDP), D_ALL);
+ MMIO_D(VBLANK(TRANSCODER_EDP), D_ALL);
+ MMIO_D(VSYNC(TRANSCODER_EDP), D_ALL);
+ MMIO_D(BCLRPAT(TRANSCODER_EDP), D_ALL);
+ MMIO_D(VSYNCSHIFT(TRANSCODER_EDP), D_ALL);
+
+ MMIO_D(PIPE_DATA_M1(TRANSCODER_A), D_ALL);
+ MMIO_D(PIPE_DATA_N1(TRANSCODER_A), D_ALL);
+ MMIO_D(PIPE_DATA_M2(TRANSCODER_A), D_ALL);
+ MMIO_D(PIPE_DATA_N2(TRANSCODER_A), D_ALL);
+ MMIO_D(PIPE_LINK_M1(TRANSCODER_A), D_ALL);
+ MMIO_D(PIPE_LINK_N1(TRANSCODER_A), D_ALL);
+ MMIO_D(PIPE_LINK_M2(TRANSCODER_A), D_ALL);
+ MMIO_D(PIPE_LINK_N2(TRANSCODER_A), D_ALL);
+
+ MMIO_D(PIPE_DATA_M1(TRANSCODER_B), D_ALL);
+ MMIO_D(PIPE_DATA_N1(TRANSCODER_B), D_ALL);
+ MMIO_D(PIPE_DATA_M2(TRANSCODER_B), D_ALL);
+ MMIO_D(PIPE_DATA_N2(TRANSCODER_B), D_ALL);
+ MMIO_D(PIPE_LINK_M1(TRANSCODER_B), D_ALL);
+ MMIO_D(PIPE_LINK_N1(TRANSCODER_B), D_ALL);
+ MMIO_D(PIPE_LINK_M2(TRANSCODER_B), D_ALL);
+ MMIO_D(PIPE_LINK_N2(TRANSCODER_B), D_ALL);
+
+ MMIO_D(PIPE_DATA_M1(TRANSCODER_C), D_ALL);
+ MMIO_D(PIPE_DATA_N1(TRANSCODER_C), D_ALL);
+ MMIO_D(PIPE_DATA_M2(TRANSCODER_C), D_ALL);
+ MMIO_D(PIPE_DATA_N2(TRANSCODER_C), D_ALL);
+ MMIO_D(PIPE_LINK_M1(TRANSCODER_C), D_ALL);
+ MMIO_D(PIPE_LINK_N1(TRANSCODER_C), D_ALL);
+ MMIO_D(PIPE_LINK_M2(TRANSCODER_C), D_ALL);
+ MMIO_D(PIPE_LINK_N2(TRANSCODER_C), D_ALL);
+
+ MMIO_D(PIPE_DATA_M1(TRANSCODER_EDP), D_ALL);
+ MMIO_D(PIPE_DATA_N1(TRANSCODER_EDP), D_ALL);
+ MMIO_D(PIPE_DATA_M2(TRANSCODER_EDP), D_ALL);
+ MMIO_D(PIPE_DATA_N2(TRANSCODER_EDP), D_ALL);
+ MMIO_D(PIPE_LINK_M1(TRANSCODER_EDP), D_ALL);
+ MMIO_D(PIPE_LINK_N1(TRANSCODER_EDP), D_ALL);
+ MMIO_D(PIPE_LINK_M2(TRANSCODER_EDP), D_ALL);
+ MMIO_D(PIPE_LINK_N2(TRANSCODER_EDP), D_ALL);
+
+ MMIO_D(PF_CTL(PIPE_A), D_ALL);
+ MMIO_D(PF_WIN_SZ(PIPE_A), D_ALL);
+ MMIO_D(PF_WIN_POS(PIPE_A), D_ALL);
+ MMIO_D(PF_VSCALE(PIPE_A), D_ALL);
+ MMIO_D(PF_HSCALE(PIPE_A), D_ALL);
+
+ MMIO_D(PF_CTL(PIPE_B), D_ALL);
+ MMIO_D(PF_WIN_SZ(PIPE_B), D_ALL);
+ MMIO_D(PF_WIN_POS(PIPE_B), D_ALL);
+ MMIO_D(PF_VSCALE(PIPE_B), D_ALL);
+ MMIO_D(PF_HSCALE(PIPE_B), D_ALL);
+
+ MMIO_D(PF_CTL(PIPE_C), D_ALL);
+ MMIO_D(PF_WIN_SZ(PIPE_C), D_ALL);
+ MMIO_D(PF_WIN_POS(PIPE_C), D_ALL);
+ MMIO_D(PF_VSCALE(PIPE_C), D_ALL);
+ MMIO_D(PF_HSCALE(PIPE_C), D_ALL);
+
+ MMIO_D(WM0_PIPEA_ILK, D_ALL);
+ MMIO_D(WM0_PIPEB_ILK, D_ALL);
+ MMIO_D(WM0_PIPEC_IVB, D_ALL);
+ MMIO_D(WM1_LP_ILK, D_ALL);
+ MMIO_D(WM2_LP_ILK, D_ALL);
+ MMIO_D(WM3_LP_ILK, D_ALL);
+ MMIO_D(WM1S_LP_ILK, D_ALL);
+ MMIO_D(WM2S_LP_IVB, D_ALL);
+ MMIO_D(WM3S_LP_IVB, D_ALL);
+
+ MMIO_D(BLC_PWM_CPU_CTL2, D_ALL);
+ MMIO_D(BLC_PWM_CPU_CTL, D_ALL);
+ MMIO_D(BLC_PWM_PCH_CTL1, D_ALL);
+ MMIO_D(BLC_PWM_PCH_CTL2, D_ALL);
+
+ MMIO_D(0x48268, D_ALL);
+
+ MMIO_F(PCH_GMBUS0, 4 * 4, 0, 0, 0, D_ALL, gmbus_mmio_read,
+ gmbus_mmio_write);
+ MMIO_F(PCH_GPIOA, 6 * 4, F_UNALIGN, 0, 0, D_ALL, NULL, NULL);
+ MMIO_F(0xe4f00, 0x28, 0, 0, 0, D_ALL, NULL, NULL);
+
+ MMIO_F(_PCH_DPB_AUX_CH_CTL, 6 * 4, 0, 0, 0, D_PRE_SKL, NULL,
+ dp_aux_ch_ctl_mmio_write);
+ MMIO_F(_PCH_DPC_AUX_CH_CTL, 6 * 4, 0, 0, 0, D_PRE_SKL, NULL,
+ dp_aux_ch_ctl_mmio_write);
+ MMIO_F(_PCH_DPD_AUX_CH_CTL, 6 * 4, 0, 0, 0, D_PRE_SKL, NULL,
+ dp_aux_ch_ctl_mmio_write);
+
+ MMIO_RO(PCH_ADPA, D_ALL, 0, ADPA_CRT_HOTPLUG_MONITOR_MASK, NULL, pch_adpa_mmio_write);
+
+ MMIO_DH(_PCH_TRANSACONF, D_ALL, NULL, transconf_mmio_write);
+ MMIO_DH(_PCH_TRANSBCONF, D_ALL, NULL, transconf_mmio_write);
+
+ MMIO_DH(FDI_RX_IIR(PIPE_A), D_ALL, NULL, fdi_rx_iir_mmio_write);
+ MMIO_DH(FDI_RX_IIR(PIPE_B), D_ALL, NULL, fdi_rx_iir_mmio_write);
+ MMIO_DH(FDI_RX_IIR(PIPE_C), D_ALL, NULL, fdi_rx_iir_mmio_write);
+ MMIO_DH(FDI_RX_IMR(PIPE_A), D_ALL, NULL, update_fdi_rx_iir_status);
+ MMIO_DH(FDI_RX_IMR(PIPE_B), D_ALL, NULL, update_fdi_rx_iir_status);
+ MMIO_DH(FDI_RX_IMR(PIPE_C), D_ALL, NULL, update_fdi_rx_iir_status);
+ MMIO_DH(FDI_RX_CTL(PIPE_A), D_ALL, NULL, update_fdi_rx_iir_status);
+ MMIO_DH(FDI_RX_CTL(PIPE_B), D_ALL, NULL, update_fdi_rx_iir_status);
+ MMIO_DH(FDI_RX_CTL(PIPE_C), D_ALL, NULL, update_fdi_rx_iir_status);
+
+ MMIO_D(_PCH_TRANS_HTOTAL_A, D_ALL);
+ MMIO_D(_PCH_TRANS_HBLANK_A, D_ALL);
+ MMIO_D(_PCH_TRANS_HSYNC_A, D_ALL);
+ MMIO_D(_PCH_TRANS_VTOTAL_A, D_ALL);
+ MMIO_D(_PCH_TRANS_VBLANK_A, D_ALL);
+ MMIO_D(_PCH_TRANS_VSYNC_A, D_ALL);
+ MMIO_D(_PCH_TRANS_VSYNCSHIFT_A, D_ALL);
+
+ MMIO_D(_PCH_TRANS_HTOTAL_B, D_ALL);
+ MMIO_D(_PCH_TRANS_HBLANK_B, D_ALL);
+ MMIO_D(_PCH_TRANS_HSYNC_B, D_ALL);
+ MMIO_D(_PCH_TRANS_VTOTAL_B, D_ALL);
+ MMIO_D(_PCH_TRANS_VBLANK_B, D_ALL);
+ MMIO_D(_PCH_TRANS_VSYNC_B, D_ALL);
+ MMIO_D(_PCH_TRANS_VSYNCSHIFT_B, D_ALL);
+
+ MMIO_D(_PCH_TRANSA_DATA_M1, D_ALL);
+ MMIO_D(_PCH_TRANSA_DATA_N1, D_ALL);
+ MMIO_D(_PCH_TRANSA_DATA_M2, D_ALL);
+ MMIO_D(_PCH_TRANSA_DATA_N2, D_ALL);
+ MMIO_D(_PCH_TRANSA_LINK_M1, D_ALL);
+ MMIO_D(_PCH_TRANSA_LINK_N1, D_ALL);
+ MMIO_D(_PCH_TRANSA_LINK_M2, D_ALL);
+ MMIO_D(_PCH_TRANSA_LINK_N2, D_ALL);
+
+ MMIO_D(TRANS_DP_CTL(PIPE_A), D_ALL);
+ MMIO_D(TRANS_DP_CTL(PIPE_B), D_ALL);
+ MMIO_D(TRANS_DP_CTL(PIPE_C), D_ALL);
+
+ MMIO_D(TVIDEO_DIP_CTL(PIPE_A), D_ALL);
+ MMIO_D(TVIDEO_DIP_DATA(PIPE_A), D_ALL);
+ MMIO_D(TVIDEO_DIP_GCP(PIPE_A), D_ALL);
+
+ MMIO_D(TVIDEO_DIP_CTL(PIPE_B), D_ALL);
+ MMIO_D(TVIDEO_DIP_DATA(PIPE_B), D_ALL);
+ MMIO_D(TVIDEO_DIP_GCP(PIPE_B), D_ALL);
+
+ MMIO_D(TVIDEO_DIP_CTL(PIPE_C), D_ALL);
+ MMIO_D(TVIDEO_DIP_DATA(PIPE_C), D_ALL);
+ MMIO_D(TVIDEO_DIP_GCP(PIPE_C), D_ALL);
+
+ MMIO_D(_FDI_RXA_MISC, D_ALL);
+ MMIO_D(_FDI_RXB_MISC, D_ALL);
+ MMIO_D(_FDI_RXA_TUSIZE1, D_ALL);
+ MMIO_D(_FDI_RXA_TUSIZE2, D_ALL);
+ MMIO_D(_FDI_RXB_TUSIZE1, D_ALL);
+ MMIO_D(_FDI_RXB_TUSIZE2, D_ALL);
+
+ MMIO_DH(PCH_PP_CONTROL, D_ALL, NULL, pch_pp_control_mmio_write);
+ MMIO_D(PCH_PP_DIVISOR, D_ALL);
+ MMIO_D(PCH_PP_STATUS, D_ALL);
+ MMIO_D(PCH_LVDS, D_ALL);
+ MMIO_D(_PCH_DPLL_A, D_ALL);
+ MMIO_D(_PCH_DPLL_B, D_ALL);
+ MMIO_D(_PCH_FPA0, D_ALL);
+ MMIO_D(_PCH_FPA1, D_ALL);
+ MMIO_D(_PCH_FPB0, D_ALL);
+ MMIO_D(_PCH_FPB1, D_ALL);
+ MMIO_D(PCH_DREF_CONTROL, D_ALL);
+ MMIO_D(PCH_RAWCLK_FREQ, D_ALL);
+ MMIO_D(PCH_DPLL_SEL, D_ALL);
+
+ MMIO_D(0x61208, D_ALL);
+ MMIO_D(0x6120c, D_ALL);
+ MMIO_D(PCH_PP_ON_DELAYS, D_ALL);
+ MMIO_D(PCH_PP_OFF_DELAYS, D_ALL);
+
+ MMIO_DH(0xe651c, D_ALL, dpy_reg_mmio_read, NULL);
+ MMIO_DH(0xe661c, D_ALL, dpy_reg_mmio_read, NULL);
+ MMIO_DH(0xe671c, D_ALL, dpy_reg_mmio_read, NULL);
+ MMIO_DH(0xe681c, D_ALL, dpy_reg_mmio_read, NULL);
+ MMIO_DH(0xe6c04, D_ALL, dpy_reg_mmio_read_2, NULL);
+ MMIO_DH(0xe6e1c, D_ALL, dpy_reg_mmio_read_3, NULL);
+
+ MMIO_RO(PCH_PORT_HOTPLUG, D_ALL, 0,
+ PORTA_HOTPLUG_STATUS_MASK
+ | PORTB_HOTPLUG_STATUS_MASK
+ | PORTC_HOTPLUG_STATUS_MASK
+ | PORTD_HOTPLUG_STATUS_MASK,
+ NULL, NULL);
+
+ MMIO_DH(LCPLL_CTL, D_ALL, NULL, lcpll_ctl_mmio_write);
+ MMIO_D(FUSE_STRAP, D_ALL);
+ MMIO_D(DIGITAL_PORT_HOTPLUG_CNTRL, D_ALL);
+
+ MMIO_D(DISP_ARB_CTL, D_ALL);
+ MMIO_D(DISP_ARB_CTL2, D_ALL);
+
+ MMIO_D(ILK_DISPLAY_CHICKEN1, D_ALL);
+ MMIO_D(ILK_DISPLAY_CHICKEN2, D_ALL);
+ MMIO_D(ILK_DSPCLK_GATE_D, D_ALL);
+
+ MMIO_D(SOUTH_CHICKEN1, D_ALL);
+ MMIO_DH(SOUTH_CHICKEN2, D_ALL, NULL, south_chicken2_mmio_write);
+ MMIO_D(_TRANSA_CHICKEN1, D_ALL);
+ MMIO_D(_TRANSB_CHICKEN1, D_ALL);
+ MMIO_D(SOUTH_DSPCLK_GATE_D, D_ALL);
+ MMIO_D(_TRANSA_CHICKEN2, D_ALL);
+ MMIO_D(_TRANSB_CHICKEN2, D_ALL);
+
+ MMIO_D(ILK_DPFC_CB_BASE, D_ALL);
+ MMIO_D(ILK_DPFC_CONTROL, D_ALL);
+ MMIO_D(ILK_DPFC_RECOMP_CTL, D_ALL);
+ MMIO_D(ILK_DPFC_STATUS, D_ALL);
+ MMIO_D(ILK_DPFC_FENCE_YOFF, D_ALL);
+ MMIO_D(ILK_DPFC_CHICKEN, D_ALL);
+ MMIO_D(ILK_FBC_RT_BASE, D_ALL);
+
+ MMIO_D(IPS_CTL, D_ALL);
+
+ MMIO_D(PIPE_CSC_COEFF_RY_GY(PIPE_A), D_ALL);
+ MMIO_D(PIPE_CSC_COEFF_BY(PIPE_A), D_ALL);
+ MMIO_D(PIPE_CSC_COEFF_RU_GU(PIPE_A), D_ALL);
+ MMIO_D(PIPE_CSC_COEFF_BU(PIPE_A), D_ALL);
+ MMIO_D(PIPE_CSC_COEFF_RV_GV(PIPE_A), D_ALL);
+ MMIO_D(PIPE_CSC_COEFF_BV(PIPE_A), D_ALL);
+ MMIO_D(PIPE_CSC_MODE(PIPE_A), D_ALL);
+ MMIO_D(PIPE_CSC_PREOFF_HI(PIPE_A), D_ALL);
+ MMIO_D(PIPE_CSC_PREOFF_ME(PIPE_A), D_ALL);
+ MMIO_D(PIPE_CSC_PREOFF_LO(PIPE_A), D_ALL);
+ MMIO_D(PIPE_CSC_POSTOFF_HI(PIPE_A), D_ALL);
+ MMIO_D(PIPE_CSC_POSTOFF_ME(PIPE_A), D_ALL);
+ MMIO_D(PIPE_CSC_POSTOFF_LO(PIPE_A), D_ALL);
+
+ MMIO_D(PIPE_CSC_COEFF_RY_GY(PIPE_B), D_ALL);
+ MMIO_D(PIPE_CSC_COEFF_BY(PIPE_B), D_ALL);
+ MMIO_D(PIPE_CSC_COEFF_RU_GU(PIPE_B), D_ALL);
+ MMIO_D(PIPE_CSC_COEFF_BU(PIPE_B), D_ALL);
+ MMIO_D(PIPE_CSC_COEFF_RV_GV(PIPE_B), D_ALL);
+ MMIO_D(PIPE_CSC_COEFF_BV(PIPE_B), D_ALL);
+ MMIO_D(PIPE_CSC_MODE(PIPE_B), D_ALL);
+ MMIO_D(PIPE_CSC_PREOFF_HI(PIPE_B), D_ALL);
+ MMIO_D(PIPE_CSC_PREOFF_ME(PIPE_B), D_ALL);
+ MMIO_D(PIPE_CSC_PREOFF_LO(PIPE_B), D_ALL);
+ MMIO_D(PIPE_CSC_POSTOFF_HI(PIPE_B), D_ALL);
+ MMIO_D(PIPE_CSC_POSTOFF_ME(PIPE_B), D_ALL);
+ MMIO_D(PIPE_CSC_POSTOFF_LO(PIPE_B), D_ALL);
+
+ MMIO_D(PIPE_CSC_COEFF_RY_GY(PIPE_C), D_ALL);
+ MMIO_D(PIPE_CSC_COEFF_BY(PIPE_C), D_ALL);
+ MMIO_D(PIPE_CSC_COEFF_RU_GU(PIPE_C), D_ALL);
+ MMIO_D(PIPE_CSC_COEFF_BU(PIPE_C), D_ALL);
+ MMIO_D(PIPE_CSC_COEFF_RV_GV(PIPE_C), D_ALL);
+ MMIO_D(PIPE_CSC_COEFF_BV(PIPE_C), D_ALL);
+ MMIO_D(PIPE_CSC_MODE(PIPE_C), D_ALL);
+ MMIO_D(PIPE_CSC_PREOFF_HI(PIPE_C), D_ALL);
+ MMIO_D(PIPE_CSC_PREOFF_ME(PIPE_C), D_ALL);
+ MMIO_D(PIPE_CSC_PREOFF_LO(PIPE_C), D_ALL);
+ MMIO_D(PIPE_CSC_POSTOFF_HI(PIPE_C), D_ALL);
+ MMIO_D(PIPE_CSC_POSTOFF_ME(PIPE_C), D_ALL);
+ MMIO_D(PIPE_CSC_POSTOFF_LO(PIPE_C), D_ALL);
+
+ MMIO_D(PREC_PAL_INDEX(PIPE_A), D_ALL);
+ MMIO_D(PREC_PAL_DATA(PIPE_A), D_ALL);
+ MMIO_F(PREC_PAL_GC_MAX(PIPE_A, 0), 4 * 3, 0, 0, 0, D_ALL, NULL, NULL);
+
+ MMIO_D(PREC_PAL_INDEX(PIPE_B), D_ALL);
+ MMIO_D(PREC_PAL_DATA(PIPE_B), D_ALL);
+ MMIO_F(PREC_PAL_GC_MAX(PIPE_B, 0), 4 * 3, 0, 0, 0, D_ALL, NULL, NULL);
+
+ MMIO_D(PREC_PAL_INDEX(PIPE_C), D_ALL);
+ MMIO_D(PREC_PAL_DATA(PIPE_C), D_ALL);
+ MMIO_F(PREC_PAL_GC_MAX(PIPE_C, 0), 4 * 3, 0, 0, 0, D_ALL, NULL, NULL);
+
+ MMIO_D(0x60110, D_ALL);
+ MMIO_D(0x61110, D_ALL);
+ MMIO_F(0x70400, 0x40, 0, 0, 0, D_ALL, NULL, NULL);
+ MMIO_F(0x71400, 0x40, 0, 0, 0, D_ALL, NULL, NULL);
+ MMIO_F(0x72400, 0x40, 0, 0, 0, D_ALL, NULL, NULL);
+ MMIO_F(0x70440, 0xc, 0, 0, 0, D_PRE_SKL, NULL, NULL);
+ MMIO_F(0x71440, 0xc, 0, 0, 0, D_PRE_SKL, NULL, NULL);
+ MMIO_F(0x72440, 0xc, 0, 0, 0, D_PRE_SKL, NULL, NULL);
+ MMIO_F(0x7044c, 0xc, 0, 0, 0, D_PRE_SKL, NULL, NULL);
+ MMIO_F(0x7144c, 0xc, 0, 0, 0, D_PRE_SKL, NULL, NULL);
+ MMIO_F(0x7244c, 0xc, 0, 0, 0, D_PRE_SKL, NULL, NULL);
+
+ MMIO_D(PIPE_WM_LINETIME(PIPE_A), D_ALL);
+ MMIO_D(PIPE_WM_LINETIME(PIPE_B), D_ALL);
+ MMIO_D(PIPE_WM_LINETIME(PIPE_C), D_ALL);
+ MMIO_D(SPLL_CTL, D_ALL);
+ MMIO_D(_WRPLL_CTL1, D_ALL);
+ MMIO_D(_WRPLL_CTL2, D_ALL);
+ MMIO_D(PORT_CLK_SEL(PORT_A), D_ALL);
+ MMIO_D(PORT_CLK_SEL(PORT_B), D_ALL);
+ MMIO_D(PORT_CLK_SEL(PORT_C), D_ALL);
+ MMIO_D(PORT_CLK_SEL(PORT_D), D_ALL);
+ MMIO_D(PORT_CLK_SEL(PORT_E), D_ALL);
+ MMIO_D(TRANS_CLK_SEL(TRANSCODER_A), D_ALL);
+ MMIO_D(TRANS_CLK_SEL(TRANSCODER_B), D_ALL);
+ MMIO_D(TRANS_CLK_SEL(TRANSCODER_C), D_ALL);
+
+ MMIO_D(HSW_NDE_RSTWRN_OPT, D_ALL);
+ MMIO_D(0x46508, D_ALL);
+
+ MMIO_D(0x49080, D_ALL);
+ MMIO_D(0x49180, D_ALL);
+ MMIO_D(0x49280, D_ALL);
+
+ MMIO_F(0x49090, 0x14, 0, 0, 0, D_ALL, NULL, NULL);
+ MMIO_F(0x49190, 0x14, 0, 0, 0, D_ALL, NULL, NULL);
+ MMIO_F(0x49290, 0x14, 0, 0, 0, D_ALL, NULL, NULL);
+
+ MMIO_D(GAMMA_MODE(PIPE_A), D_ALL);
+ MMIO_D(GAMMA_MODE(PIPE_B), D_ALL);
+ MMIO_D(GAMMA_MODE(PIPE_C), D_ALL);
+
+ MMIO_D(PIPE_MULT(PIPE_A), D_ALL);
+ MMIO_D(PIPE_MULT(PIPE_B), D_ALL);
+ MMIO_D(PIPE_MULT(PIPE_C), D_ALL);
+
+ MMIO_D(HSW_TVIDEO_DIP_CTL(TRANSCODER_A), D_ALL);
+ MMIO_D(HSW_TVIDEO_DIP_CTL(TRANSCODER_B), D_ALL);
+ MMIO_D(HSW_TVIDEO_DIP_CTL(TRANSCODER_C), D_ALL);
+
+ MMIO_DH(SFUSE_STRAP, D_ALL, NULL, NULL);
+ MMIO_D(SBI_ADDR, D_ALL);
+ MMIO_DH(SBI_DATA, D_ALL, sbi_data_mmio_read, NULL);
+ MMIO_DH(SBI_CTL_STAT, D_ALL, NULL, sbi_ctl_mmio_write);
+ MMIO_D(PIXCLK_GATE, D_ALL);
+
+ MMIO_F(_DPA_AUX_CH_CTL, 6 * 4, 0, 0, 0, D_ALL, NULL,
+ dp_aux_ch_ctl_mmio_write);
+
+ MMIO_DH(DDI_BUF_CTL(PORT_A), D_ALL, NULL, ddi_buf_ctl_mmio_write);
+ MMIO_DH(DDI_BUF_CTL(PORT_B), D_ALL, NULL, ddi_buf_ctl_mmio_write);
+ MMIO_DH(DDI_BUF_CTL(PORT_C), D_ALL, NULL, ddi_buf_ctl_mmio_write);
+ MMIO_DH(DDI_BUF_CTL(PORT_D), D_ALL, NULL, ddi_buf_ctl_mmio_write);
+ MMIO_DH(DDI_BUF_CTL(PORT_E), D_ALL, NULL, ddi_buf_ctl_mmio_write);
+
+ MMIO_DH(DP_TP_CTL(PORT_A), D_ALL, NULL, dp_tp_ctl_mmio_write);
+ MMIO_DH(DP_TP_CTL(PORT_B), D_ALL, NULL, dp_tp_ctl_mmio_write);
+ MMIO_DH(DP_TP_CTL(PORT_C), D_ALL, NULL, dp_tp_ctl_mmio_write);
+ MMIO_DH(DP_TP_CTL(PORT_D), D_ALL, NULL, dp_tp_ctl_mmio_write);
+ MMIO_DH(DP_TP_CTL(PORT_E), D_ALL, NULL, dp_tp_ctl_mmio_write);
+
+ MMIO_DH(DP_TP_STATUS(PORT_A), D_ALL, NULL, dp_tp_status_mmio_write);
+ MMIO_DH(DP_TP_STATUS(PORT_B), D_ALL, NULL, dp_tp_status_mmio_write);
+ MMIO_DH(DP_TP_STATUS(PORT_C), D_ALL, NULL, dp_tp_status_mmio_write);
+ MMIO_DH(DP_TP_STATUS(PORT_D), D_ALL, NULL, dp_tp_status_mmio_write);
+ MMIO_DH(DP_TP_STATUS(PORT_E), D_ALL, NULL, NULL);
+
+ MMIO_F(_DDI_BUF_TRANS_A, 0x50, 0, 0, 0, D_ALL, NULL, NULL);
+ MMIO_F(0x64e60, 0x50, 0, 0, 0, D_ALL, NULL, NULL);
+ MMIO_F(0x64eC0, 0x50, 0, 0, 0, D_ALL, NULL, NULL);
+ MMIO_F(0x64f20, 0x50, 0, 0, 0, D_ALL, NULL, NULL);
+ MMIO_F(0x64f80, 0x50, 0, 0, 0, D_ALL, NULL, NULL);
+
+ MMIO_D(HSW_AUD_CFG(PIPE_A), D_ALL);
+ MMIO_D(HSW_AUD_PIN_ELD_CP_VLD, D_ALL);
+
+ MMIO_DH(_TRANS_DDI_FUNC_CTL_A, D_ALL, NULL, NULL);
+ MMIO_DH(_TRANS_DDI_FUNC_CTL_B, D_ALL, NULL, NULL);
+ MMIO_DH(_TRANS_DDI_FUNC_CTL_C, D_ALL, NULL, NULL);
+ MMIO_DH(_TRANS_DDI_FUNC_CTL_EDP, D_ALL, NULL, NULL);
+
+ MMIO_D(_TRANSA_MSA_MISC, D_ALL);
+ MMIO_D(_TRANSB_MSA_MISC, D_ALL);
+ MMIO_D(_TRANSC_MSA_MISC, D_ALL);
+ MMIO_D(_TRANS_EDP_MSA_MISC, D_ALL);
+
+ MMIO_DH(FORCEWAKE, D_ALL, NULL, NULL);
+ MMIO_D(FORCEWAKE_ACK, D_ALL);
+ MMIO_D(GEN6_GT_CORE_STATUS, D_ALL);
+ MMIO_D(GEN6_GT_THREAD_STATUS_REG, D_ALL);
+ MMIO_D(GTFIFODBG, D_ALL);
+ MMIO_D(GTFIFOCTL, D_ALL);
+ MMIO_DH(FORCEWAKE_MT, D_PRE_SKL, NULL, mul_force_wake_write);
+ MMIO_DH(FORCEWAKE_ACK_HSW, D_HSW | D_BDW, NULL, NULL);
+ MMIO_D(ECOBUS, D_ALL);
+ MMIO_DH(GEN6_RC_CONTROL, D_ALL, NULL, NULL);
+ MMIO_DH(GEN6_RC_STATE, D_ALL, NULL, NULL);
+ MMIO_D(GEN6_RPNSWREQ, D_ALL);
+ MMIO_D(GEN6_RC_VIDEO_FREQ, D_ALL);
+ MMIO_D(GEN6_RP_DOWN_TIMEOUT, D_ALL);
+ MMIO_D(GEN6_RP_INTERRUPT_LIMITS, D_ALL);
+ MMIO_D(GEN6_RPSTAT1, D_ALL);
+ MMIO_D(GEN6_RP_CONTROL, D_ALL);
+ MMIO_D(GEN6_RP_UP_THRESHOLD, D_ALL);
+ MMIO_D(GEN6_RP_DOWN_THRESHOLD, D_ALL);
+ MMIO_D(GEN6_RP_CUR_UP_EI, D_ALL);
+ MMIO_D(GEN6_RP_CUR_UP, D_ALL);
+ MMIO_D(GEN6_RP_PREV_UP, D_ALL);
+ MMIO_D(GEN6_RP_CUR_DOWN_EI, D_ALL);
+ MMIO_D(GEN6_RP_CUR_DOWN, D_ALL);
+ MMIO_D(GEN6_RP_PREV_DOWN, D_ALL);
+ MMIO_D(GEN6_RP_UP_EI, D_ALL);
+ MMIO_D(GEN6_RP_DOWN_EI, D_ALL);
+ MMIO_D(GEN6_RP_IDLE_HYSTERSIS, D_ALL);
+ MMIO_D(GEN6_RC1_WAKE_RATE_LIMIT, D_ALL);
+ MMIO_D(GEN6_RC6_WAKE_RATE_LIMIT, D_ALL);
+ MMIO_D(GEN6_RC6pp_WAKE_RATE_LIMIT, D_ALL);
+ MMIO_D(GEN6_RC_EVALUATION_INTERVAL, D_ALL);
+ MMIO_D(GEN6_RC_IDLE_HYSTERSIS, D_ALL);
+ MMIO_D(GEN6_RC_SLEEP, D_ALL);
+ MMIO_D(GEN6_RC1e_THRESHOLD, D_ALL);
+ MMIO_D(GEN6_RC6_THRESHOLD, D_ALL);
+ MMIO_D(GEN6_RC6p_THRESHOLD, D_ALL);
+ MMIO_D(GEN6_RC6pp_THRESHOLD, D_ALL);
+ MMIO_D(GEN6_PMINTRMSK, D_ALL);
+ MMIO_DH(HSW_PWR_WELL_BIOS, D_HSW | D_BDW, NULL, power_well_ctl_mmio_write);
+ MMIO_DH(HSW_PWR_WELL_DRIVER, D_HSW | D_BDW, NULL, power_well_ctl_mmio_write);
+ MMIO_DH(HSW_PWR_WELL_KVMR, D_HSW | D_BDW, NULL, power_well_ctl_mmio_write);
+ MMIO_DH(HSW_PWR_WELL_DEBUG, D_HSW | D_BDW, NULL, power_well_ctl_mmio_write);
+ MMIO_DH(HSW_PWR_WELL_CTL5, D_HSW | D_BDW, NULL, power_well_ctl_mmio_write);
+ MMIO_DH(HSW_PWR_WELL_CTL6, D_HSW | D_BDW, NULL, power_well_ctl_mmio_write);
+
+ MMIO_D(RSTDBYCTL, D_ALL);
+
+ MMIO_DH(GEN6_GDRST, D_ALL, NULL, gdrst_mmio_write);
+ MMIO_F(FENCE_REG_GEN6_LO(0), 0x80, 0, 0, 0, D_ALL, fence_mmio_read, fence_mmio_write);
+ MMIO_F(VGT_PVINFO_PAGE, VGT_PVINFO_SIZE, F_UNALIGN, 0, 0, D_ALL, pvinfo_mmio_read, pvinfo_mmio_write);
+ MMIO_DH(CPU_VGACNTRL, D_ALL, NULL, vga_control_mmio_write);
+
+ MMIO_F(MCHBAR_MIRROR_BASE_SNB, 0x40000, 0, 0, 0, D_ALL, NULL, NULL);
+
+ MMIO_D(TILECTL, D_ALL);
+
+ MMIO_D(GEN6_UCGCTL1, D_ALL);
+ MMIO_D(GEN6_UCGCTL2, D_ALL);
+
+ MMIO_F(0x4f000, 0x90, 0, 0, 0, D_ALL, NULL, NULL);
+
+ MMIO_D(GEN6_PCODE_MAILBOX, D_PRE_SKL);
+ MMIO_D(GEN6_PCODE_DATA, D_ALL);
+ MMIO_D(0x13812c, D_ALL);
+ MMIO_DH(GEN7_ERR_INT, D_ALL, NULL, NULL);
+ MMIO_D(HSW_EDRAM_CAP, D_ALL);
+ MMIO_D(HSW_IDICR, D_ALL);
+ MMIO_DH(GFX_FLSH_CNTL_GEN6, D_ALL, NULL, NULL);
+
+ MMIO_D(0x3c, D_ALL);
+ MMIO_D(0x860, D_ALL);
+ MMIO_D(ECOSKPD, D_ALL);
+ MMIO_D(0x121d0, D_ALL);
+ MMIO_D(GEN6_BLITTER_ECOSKPD, D_ALL);
+ MMIO_D(0x41d0, D_ALL);
+ MMIO_D(GAC_ECO_BITS, D_ALL);
+ MMIO_D(0x6200, D_ALL);
+ MMIO_D(0x6204, D_ALL);
+ MMIO_D(0x6208, D_ALL);
+ MMIO_D(0x7118, D_ALL);
+ MMIO_D(0x7180, D_ALL);
+ MMIO_D(0x7408, D_ALL);
+ MMIO_D(0x7c00, D_ALL);
+ MMIO_D(GEN6_MBCTL, D_ALL);
+ MMIO_D(0x911c, D_ALL);
+ MMIO_D(0x9120, D_ALL);
+ MMIO_DFH(GEN7_UCGCTL4, D_ALL, F_CMD_ACCESS, NULL, NULL);
+
+ MMIO_D(GAB_CTL, D_ALL);
+ MMIO_D(0x48800, D_ALL);
+ MMIO_D(0xce044, D_ALL);
+ MMIO_D(0xe6500, D_ALL);
+ MMIO_D(0xe6504, D_ALL);
+ MMIO_D(0xe6600, D_ALL);
+ MMIO_D(0xe6604, D_ALL);
+ MMIO_D(0xe6700, D_ALL);
+ MMIO_D(0xe6704, D_ALL);
+ MMIO_D(0xe6800, D_ALL);
+ MMIO_D(0xe6804, D_ALL);
+ MMIO_D(PCH_GMBUS4, D_ALL);
+ MMIO_D(PCH_GMBUS5, D_ALL);
+
+ MMIO_D(0x902c, D_ALL);
+ MMIO_D(0xec008, D_ALL);
+ MMIO_D(0xec00c, D_ALL);
+ MMIO_D(0xec008 + 0x18, D_ALL);
+ MMIO_D(0xec00c + 0x18, D_ALL);
+ MMIO_D(0xec008 + 0x18 * 2, D_ALL);
+ MMIO_D(0xec00c + 0x18 * 2, D_ALL);
+ MMIO_D(0xec008 + 0x18 * 3, D_ALL);
+ MMIO_D(0xec00c + 0x18 * 3, D_ALL);
+ MMIO_D(0xec408, D_ALL);
+ MMIO_D(0xec40c, D_ALL);
+ MMIO_D(0xec408 + 0x18, D_ALL);
+ MMIO_D(0xec40c + 0x18, D_ALL);
+ MMIO_D(0xec408 + 0x18 * 2, D_ALL);
+ MMIO_D(0xec40c + 0x18 * 2, D_ALL);
+ MMIO_D(0xec408 + 0x18 * 3, D_ALL);
+ MMIO_D(0xec40c + 0x18 * 3, D_ALL);
+ MMIO_D(0xfc810, D_ALL);
+ MMIO_D(0xfc81c, D_ALL);
+ MMIO_D(0xfc828, D_ALL);
+ MMIO_D(0xfc834, D_ALL);
+ MMIO_D(0xfcc00, D_ALL);
+ MMIO_D(0xfcc0c, D_ALL);
+ MMIO_D(0xfcc18, D_ALL);
+ MMIO_D(0xfcc24, D_ALL);
+ MMIO_D(0xfd000, D_ALL);
+ MMIO_D(0xfd00c, D_ALL);
+ MMIO_D(0xfd018, D_ALL);
+ MMIO_D(0xfd024, D_ALL);
+ MMIO_D(0xfd034, D_ALL);
+
+ MMIO_DH(FPGA_DBG, D_ALL, NULL, fpga_dbg_mmio_write);
+ MMIO_D(0x2054, D_ALL);
+ MMIO_D(0x12054, D_ALL);
+ MMIO_D(0x22054, D_ALL);
+ MMIO_D(0x1a054, D_ALL);
+
+ MMIO_D(0x44070, D_ALL);
+
+ MMIO_D(0x215c, D_HSW_PLUS);
+ MMIO_DFH(0x2178, D_ALL, F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(0x217c, D_ALL, F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(0x12178, D_ALL, F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(0x1217c, D_ALL, F_CMD_ACCESS, NULL, NULL);
+
+ MMIO_F(0x2290, 8, 0, 0, 0, D_HSW_PLUS, NULL, NULL);
+ MMIO_D(OACONTROL, D_HSW);
+ MMIO_D(0x2b00, D_BDW_PLUS);
+ MMIO_D(0x2360, D_BDW_PLUS);
+ MMIO_F(0x5200, 32, 0, 0, 0, D_ALL, NULL, NULL);
+ MMIO_F(0x5240, 32, 0, 0, 0, D_ALL, NULL, NULL);
+ MMIO_F(0x5280, 16, 0, 0, 0, D_ALL, NULL, NULL);
+
+ MMIO_DFH(0x1c17c, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(0x1c178, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
+ MMIO_D(BCS_SWCTRL, D_ALL);
+
+ MMIO_F(HS_INVOCATION_COUNT, 8, 0, 0, 0, D_ALL, NULL, NULL);
+ MMIO_F(DS_INVOCATION_COUNT, 8, 0, 0, 0, D_ALL, NULL, NULL);
+ MMIO_F(IA_VERTICES_COUNT, 8, 0, 0, 0, D_ALL, NULL, NULL);
+ MMIO_F(IA_PRIMITIVES_COUNT, 8, 0, 0, 0, D_ALL, NULL, NULL);
+ MMIO_F(VS_INVOCATION_COUNT, 8, 0, 0, 0, D_ALL, NULL, NULL);
+ MMIO_F(GS_INVOCATION_COUNT, 8, 0, 0, 0, D_ALL, NULL, NULL);
+ MMIO_F(GS_PRIMITIVES_COUNT, 8, 0, 0, 0, D_ALL, NULL, NULL);
+ MMIO_F(CL_INVOCATION_COUNT, 8, 0, 0, 0, D_ALL, NULL, NULL);
+ MMIO_F(CL_PRIMITIVES_COUNT, 8, 0, 0, 0, D_ALL, NULL, NULL);
+ MMIO_F(PS_INVOCATION_COUNT, 8, 0, 0, 0, D_ALL, NULL, NULL);
+ MMIO_F(PS_DEPTH_COUNT, 8, 0, 0, 0, D_ALL, NULL, NULL);
+ MMIO_DH(0x4260, D_BDW_PLUS, NULL, gvt_reg_tlb_control_handler);
+ MMIO_DH(0x4264, D_BDW_PLUS, NULL, gvt_reg_tlb_control_handler);
+ MMIO_DH(0x4268, D_BDW_PLUS, NULL, gvt_reg_tlb_control_handler);
+ MMIO_DH(0x426c, D_BDW_PLUS, NULL, gvt_reg_tlb_control_handler);
+ MMIO_DH(0x4270, D_BDW_PLUS, NULL, gvt_reg_tlb_control_handler);
+ MMIO_DFH(0x4094, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
+
+ return 0;
+}
+
+static int init_broadwell_mmio_info(struct intel_gvt *gvt)
+{
+ struct drm_i915_private *dev_priv = gvt->dev_priv;
+ int ret;
+
+ MMIO_DH(RING_IMR(GEN8_BSD2_RING_BASE), D_BDW_PLUS, NULL,
+ intel_vgpu_reg_imr_handler);
+
+ MMIO_DH(GEN8_GT_IMR(0), D_BDW_PLUS, NULL, intel_vgpu_reg_imr_handler);
+ MMIO_DH(GEN8_GT_IER(0), D_BDW_PLUS, NULL, intel_vgpu_reg_ier_handler);
+ MMIO_DH(GEN8_GT_IIR(0), D_BDW_PLUS, NULL, intel_vgpu_reg_iir_handler);
+ MMIO_D(GEN8_GT_ISR(0), D_BDW_PLUS);
+
+ MMIO_DH(GEN8_GT_IMR(1), D_BDW_PLUS, NULL, intel_vgpu_reg_imr_handler);
+ MMIO_DH(GEN8_GT_IER(1), D_BDW_PLUS, NULL, intel_vgpu_reg_ier_handler);
+ MMIO_DH(GEN8_GT_IIR(1), D_BDW_PLUS, NULL, intel_vgpu_reg_iir_handler);
+ MMIO_D(GEN8_GT_ISR(1), D_BDW_PLUS);
+
+ MMIO_DH(GEN8_GT_IMR(2), D_BDW_PLUS, NULL, intel_vgpu_reg_imr_handler);
+ MMIO_DH(GEN8_GT_IER(2), D_BDW_PLUS, NULL, intel_vgpu_reg_ier_handler);
+ MMIO_DH(GEN8_GT_IIR(2), D_BDW_PLUS, NULL, intel_vgpu_reg_iir_handler);
+ MMIO_D(GEN8_GT_ISR(2), D_BDW_PLUS);
+
+ MMIO_DH(GEN8_GT_IMR(3), D_BDW_PLUS, NULL, intel_vgpu_reg_imr_handler);
+ MMIO_DH(GEN8_GT_IER(3), D_BDW_PLUS, NULL, intel_vgpu_reg_ier_handler);
+ MMIO_DH(GEN8_GT_IIR(3), D_BDW_PLUS, NULL, intel_vgpu_reg_iir_handler);
+ MMIO_D(GEN8_GT_ISR(3), D_BDW_PLUS);
+
+ MMIO_DH(GEN8_DE_PIPE_IMR(PIPE_A), D_BDW_PLUS, NULL,
+ intel_vgpu_reg_imr_handler);
+ MMIO_DH(GEN8_DE_PIPE_IER(PIPE_A), D_BDW_PLUS, NULL,
+ intel_vgpu_reg_ier_handler);
+ MMIO_DH(GEN8_DE_PIPE_IIR(PIPE_A), D_BDW_PLUS, NULL,
+ intel_vgpu_reg_iir_handler);
+ MMIO_D(GEN8_DE_PIPE_ISR(PIPE_A), D_BDW_PLUS);
+
+ MMIO_DH(GEN8_DE_PIPE_IMR(PIPE_B), D_BDW_PLUS, NULL,
+ intel_vgpu_reg_imr_handler);
+ MMIO_DH(GEN8_DE_PIPE_IER(PIPE_B), D_BDW_PLUS, NULL,
+ intel_vgpu_reg_ier_handler);
+ MMIO_DH(GEN8_DE_PIPE_IIR(PIPE_B), D_BDW_PLUS, NULL,
+ intel_vgpu_reg_iir_handler);
+ MMIO_D(GEN8_DE_PIPE_ISR(PIPE_B), D_BDW_PLUS);
+
+ MMIO_DH(GEN8_DE_PIPE_IMR(PIPE_C), D_BDW_PLUS, NULL,
+ intel_vgpu_reg_imr_handler);
+ MMIO_DH(GEN8_DE_PIPE_IER(PIPE_C), D_BDW_PLUS, NULL,
+ intel_vgpu_reg_ier_handler);
+ MMIO_DH(GEN8_DE_PIPE_IIR(PIPE_C), D_BDW_PLUS, NULL,
+ intel_vgpu_reg_iir_handler);
+ MMIO_D(GEN8_DE_PIPE_ISR(PIPE_C), D_BDW_PLUS);
+
+ MMIO_DH(GEN8_DE_PORT_IMR, D_BDW_PLUS, NULL, intel_vgpu_reg_imr_handler);
+ MMIO_DH(GEN8_DE_PORT_IER, D_BDW_PLUS, NULL, intel_vgpu_reg_ier_handler);
+ MMIO_DH(GEN8_DE_PORT_IIR, D_BDW_PLUS, NULL, intel_vgpu_reg_iir_handler);
+ MMIO_D(GEN8_DE_PORT_ISR, D_BDW_PLUS);
+
+ MMIO_DH(GEN8_DE_MISC_IMR, D_BDW_PLUS, NULL, intel_vgpu_reg_imr_handler);
+ MMIO_DH(GEN8_DE_MISC_IER, D_BDW_PLUS, NULL, intel_vgpu_reg_ier_handler);
+ MMIO_DH(GEN8_DE_MISC_IIR, D_BDW_PLUS, NULL, intel_vgpu_reg_iir_handler);
+ MMIO_D(GEN8_DE_MISC_ISR, D_BDW_PLUS);
+
+ MMIO_DH(GEN8_PCU_IMR, D_BDW_PLUS, NULL, intel_vgpu_reg_imr_handler);
+ MMIO_DH(GEN8_PCU_IER, D_BDW_PLUS, NULL, intel_vgpu_reg_ier_handler);
+ MMIO_DH(GEN8_PCU_IIR, D_BDW_PLUS, NULL, intel_vgpu_reg_iir_handler);
+ MMIO_D(GEN8_PCU_ISR, D_BDW_PLUS);
+
+ MMIO_DH(GEN8_MASTER_IRQ, D_BDW_PLUS, NULL,
+ intel_vgpu_reg_master_irq_handler);
+
+ MMIO_D(RING_HWSTAM(GEN8_BSD2_RING_BASE), D_BDW_PLUS);
+ MMIO_D(0x1c134, D_BDW_PLUS);
+
+ MMIO_D(RING_TAIL(GEN8_BSD2_RING_BASE), D_BDW_PLUS);
+ MMIO_D(RING_HEAD(GEN8_BSD2_RING_BASE), D_BDW_PLUS);
+ MMIO_GM(RING_START(GEN8_BSD2_RING_BASE), D_BDW_PLUS, NULL, NULL);
+ MMIO_D(RING_CTL(GEN8_BSD2_RING_BASE), D_BDW_PLUS);
+ MMIO_D(RING_ACTHD(GEN8_BSD2_RING_BASE), D_BDW_PLUS);
+ MMIO_D(RING_ACTHD_UDW(GEN8_BSD2_RING_BASE), D_BDW_PLUS);
+ MMIO_DFH(0x1c29c, D_BDW_PLUS, F_MODE_MASK, NULL, ring_mode_mmio_write);
+ MMIO_DFH(RING_MI_MODE(GEN8_BSD2_RING_BASE), D_BDW_PLUS, F_MODE_MASK,
+ NULL, NULL);
+ MMIO_DFH(RING_INSTPM(GEN8_BSD2_RING_BASE), D_BDW_PLUS, F_MODE_MASK,
+ NULL, NULL);
+ MMIO_DFH(RING_TIMESTAMP(GEN8_BSD2_RING_BASE), D_BDW_PLUS, F_CMD_ACCESS,
+ ring_timestamp_mmio_read, NULL);
+
+ MMIO_RING_D(RING_ACTHD_UDW, D_BDW_PLUS);
+
+#define RING_REG(base) (base + 0xd0)
+ MMIO_RING_F(RING_REG, 4, F_RO, 0,
+ ~_MASKED_BIT_ENABLE(RESET_CTL_REQUEST_RESET), D_BDW_PLUS, NULL,
+ ring_reset_ctl_write);
+ MMIO_F(RING_REG(GEN8_BSD2_RING_BASE), 4, F_RO, 0,
+ ~_MASKED_BIT_ENABLE(RESET_CTL_REQUEST_RESET), D_BDW_PLUS, NULL,
+ ring_reset_ctl_write);
+#undef RING_REG
+
+#define RING_REG(base) (base + 0x230)
+ MMIO_RING_DFH(RING_REG, D_BDW_PLUS, 0, NULL, elsp_mmio_write);
+ MMIO_DH(RING_REG(GEN8_BSD2_RING_BASE), D_BDW_PLUS, NULL, elsp_mmio_write);
+#undef RING_REG
+
+#define RING_REG(base) (base + 0x234)
+ MMIO_RING_F(RING_REG, 8, F_RO, 0, ~0, D_BDW_PLUS, NULL, NULL);
+ MMIO_F(RING_REG(GEN8_BSD2_RING_BASE), 4, F_RO, 0, ~0LL, D_BDW_PLUS, NULL, NULL);
+#undef RING_REG
+
+#define RING_REG(base) (base + 0x244)
+ MMIO_RING_D(RING_REG, D_BDW_PLUS);
+ MMIO_D(RING_REG(GEN8_BSD2_RING_BASE), D_BDW_PLUS);
+#undef RING_REG
+
+#define RING_REG(base) (base + 0x370)
+ MMIO_RING_F(RING_REG, 48, F_RO, 0, ~0, D_BDW_PLUS, NULL, NULL);
+ MMIO_F(RING_REG(GEN8_BSD2_RING_BASE), 48, F_RO, 0, ~0, D_BDW_PLUS,
+ NULL, NULL);
+#undef RING_REG
+
+#define RING_REG(base) (base + 0x3a0)
+ MMIO_RING_DFH(RING_REG, D_BDW_PLUS, F_MODE_MASK, NULL, NULL);
+ MMIO_DFH(RING_REG(GEN8_BSD2_RING_BASE), D_BDW_PLUS, F_MODE_MASK, NULL, NULL);
+#undef RING_REG
+
+ MMIO_D(PIPEMISC(PIPE_A), D_BDW_PLUS);
+ MMIO_D(PIPEMISC(PIPE_B), D_BDW_PLUS);
+ MMIO_D(PIPEMISC(PIPE_C), D_BDW_PLUS);
+ MMIO_D(0x1c1d0, D_BDW_PLUS);
+ MMIO_D(GEN6_MBCUNIT_SNPCR, D_BDW_PLUS);
+ MMIO_D(GEN7_MISCCPCTL, D_BDW_PLUS);
+ MMIO_D(0x1c054, D_BDW_PLUS);
+
+ MMIO_D(GEN8_PRIVATE_PAT_LO, D_BDW_PLUS);
+ MMIO_D(GEN8_PRIVATE_PAT_HI, D_BDW_PLUS);
+
+ MMIO_D(GAMTARBMODE, D_BDW_PLUS);
+
+#define RING_REG(base) (base + 0x270)
+ MMIO_RING_F(RING_REG, 32, 0, 0, 0, D_BDW_PLUS, NULL, NULL);
+ MMIO_F(RING_REG(GEN8_BSD2_RING_BASE), 32, 0, 0, 0, D_BDW_PLUS, NULL, NULL);
+#undef RING_REG
+
+ MMIO_RING_GM(RING_HWS_PGA, D_BDW_PLUS, NULL, NULL);
+ MMIO_GM(0x1c080, D_BDW_PLUS, NULL, NULL);
+
+ MMIO_DFH(HDC_CHICKEN0, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
+
+ MMIO_D(CHICKEN_PIPESL_1(PIPE_A), D_BDW);
+ MMIO_D(CHICKEN_PIPESL_1(PIPE_B), D_BDW);
+ MMIO_D(CHICKEN_PIPESL_1(PIPE_C), D_BDW);
+
+ MMIO_D(WM_MISC, D_BDW);
+ MMIO_D(BDW_EDP_PSR_BASE, D_BDW);
+
+ MMIO_D(0x66c00, D_BDW_PLUS);
+ MMIO_D(0x66c04, D_BDW_PLUS);
+
+ MMIO_D(HSW_GTT_CACHE_EN, D_BDW_PLUS);
+
+ MMIO_D(GEN8_EU_DISABLE0, D_BDW_PLUS);
+ MMIO_D(GEN8_EU_DISABLE1, D_BDW_PLUS);
+ MMIO_D(GEN8_EU_DISABLE2, D_BDW_PLUS);
+
+ MMIO_D(0xfdc, D_BDW);
+ MMIO_DFH(GEN8_ROW_CHICKEN, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
+ MMIO_D(GEN7_ROW_CHICKEN2, D_BDW_PLUS);
+ MMIO_D(GEN8_UCGCTL6, D_BDW_PLUS);
+
+ MMIO_D(0xb1f0, D_BDW);
+ MMIO_D(0xb1c0, D_BDW);
+ MMIO_DFH(GEN8_L3SQCREG4, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
+ MMIO_D(0xb100, D_BDW);
+ MMIO_D(0xb10c, D_BDW);
+ MMIO_D(0xb110, D_BDW);
+
+ MMIO_DFH(0x24d0, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(0x24d4, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(0x24d8, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(0x24dc, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
+
+ MMIO_D(0x83a4, D_BDW);
+ MMIO_D(GEN8_L3_LRA_1_GPGPU, D_BDW_PLUS);
+
+ MMIO_D(0x8430, D_BDW);
+
+ MMIO_D(0x110000, D_BDW_PLUS);
+
+ MMIO_D(0x48400, D_BDW_PLUS);
+
+ MMIO_D(0x6e570, D_BDW_PLUS);
+ MMIO_D(0x65f10, D_BDW_PLUS);
+
+ MMIO_DFH(0xe194, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(0xe188, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(HALF_SLICE_CHICKEN2, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(0x2580, D_BDW_PLUS, F_MODE_MASK, NULL, NULL);
+
+ MMIO_D(0x2248, D_BDW);
+
+ return 0;
+}
+
+static int init_skl_mmio_info(struct intel_gvt *gvt)
+{
+ struct drm_i915_private *dev_priv = gvt->dev_priv;
+ int ret;
+
+ MMIO_DH(FORCEWAKE_RENDER_GEN9, D_SKL_PLUS, NULL, mul_force_wake_write);
+ MMIO_DH(FORCEWAKE_ACK_RENDER_GEN9, D_SKL_PLUS, NULL, NULL);
+ MMIO_DH(FORCEWAKE_BLITTER_GEN9, D_SKL_PLUS, NULL, mul_force_wake_write);
+ MMIO_DH(FORCEWAKE_ACK_BLITTER_GEN9, D_SKL_PLUS, NULL, NULL);
+ MMIO_DH(FORCEWAKE_MEDIA_GEN9, D_SKL_PLUS, NULL, mul_force_wake_write);
+ MMIO_DH(FORCEWAKE_ACK_MEDIA_GEN9, D_SKL_PLUS, NULL, NULL);
+
+ MMIO_F(_DPB_AUX_CH_CTL, 6 * 4, 0, 0, 0, D_SKL, NULL, dp_aux_ch_ctl_mmio_write);
+ MMIO_F(_DPC_AUX_CH_CTL, 6 * 4, 0, 0, 0, D_SKL, NULL, dp_aux_ch_ctl_mmio_write);
+ MMIO_F(_DPD_AUX_CH_CTL, 6 * 4, 0, 0, 0, D_SKL, NULL, dp_aux_ch_ctl_mmio_write);
+
+ MMIO_D(HSW_PWR_WELL_BIOS, D_SKL);
+ MMIO_DH(HSW_PWR_WELL_DRIVER, D_SKL, NULL, skl_power_well_ctl_write);
+
+ MMIO_DH(GEN6_PCODE_MAILBOX, D_SKL, NULL, mailbox_write);
+ MMIO_D(0xa210, D_SKL_PLUS);
+ MMIO_D(GEN9_MEDIA_PG_IDLE_HYSTERESIS, D_SKL_PLUS);
+ MMIO_D(GEN9_RENDER_PG_IDLE_HYSTERESIS, D_SKL_PLUS);
+ MMIO_DFH(GEN9_GAMT_ECO_REG_RW_IA, D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL);
+ MMIO_DH(0x4ddc, D_SKL, NULL, skl_misc_ctl_write);
+ MMIO_DH(0x42080, D_SKL, NULL, skl_misc_ctl_write);
+ MMIO_D(0x45504, D_SKL);
+ MMIO_D(0x45520, D_SKL);
+ MMIO_D(0x46000, D_SKL);
+ MMIO_DH(0x46010, D_SKL, NULL, skl_lcpll_write);
+ MMIO_DH(0x46014, D_SKL, NULL, skl_lcpll_write);
+ MMIO_D(0x6C040, D_SKL);
+ MMIO_D(0x6C048, D_SKL);
+ MMIO_D(0x6C050, D_SKL);
+ MMIO_D(0x6C044, D_SKL);
+ MMIO_D(0x6C04C, D_SKL);
+ MMIO_D(0x6C054, D_SKL);
+ MMIO_D(0x6c058, D_SKL);
+ MMIO_D(0x6c05c, D_SKL);
+ MMIO_DH(0X6c060, D_SKL, dpll_status_read, NULL);
+
+ MMIO_DH(SKL_PS_WIN_POS(PIPE_A, 0), D_SKL, NULL, pf_write);
+ MMIO_DH(SKL_PS_WIN_POS(PIPE_A, 1), D_SKL, NULL, pf_write);
+ MMIO_DH(SKL_PS_WIN_POS(PIPE_B, 0), D_SKL, NULL, pf_write);
+ MMIO_DH(SKL_PS_WIN_POS(PIPE_B, 1), D_SKL, NULL, pf_write);
+ MMIO_DH(SKL_PS_WIN_POS(PIPE_C, 0), D_SKL, NULL, pf_write);
+ MMIO_DH(SKL_PS_WIN_POS(PIPE_C, 1), D_SKL, NULL, pf_write);
+
+ MMIO_DH(SKL_PS_WIN_SZ(PIPE_A, 0), D_SKL, NULL, pf_write);
+ MMIO_DH(SKL_PS_WIN_SZ(PIPE_A, 1), D_SKL, NULL, pf_write);
+ MMIO_DH(SKL_PS_WIN_SZ(PIPE_B, 0), D_SKL, NULL, pf_write);
+ MMIO_DH(SKL_PS_WIN_SZ(PIPE_B, 1), D_SKL, NULL, pf_write);
+ MMIO_DH(SKL_PS_WIN_SZ(PIPE_C, 0), D_SKL, NULL, pf_write);
+ MMIO_DH(SKL_PS_WIN_SZ(PIPE_C, 1), D_SKL, NULL, pf_write);
+
+ MMIO_DH(SKL_PS_CTRL(PIPE_A, 0), D_SKL, NULL, pf_write);
+ MMIO_DH(SKL_PS_CTRL(PIPE_A, 1), D_SKL, NULL, pf_write);
+ MMIO_DH(SKL_PS_CTRL(PIPE_B, 0), D_SKL, NULL, pf_write);
+ MMIO_DH(SKL_PS_CTRL(PIPE_B, 1), D_SKL, NULL, pf_write);
+ MMIO_DH(SKL_PS_CTRL(PIPE_C, 0), D_SKL, NULL, pf_write);
+ MMIO_DH(SKL_PS_CTRL(PIPE_C, 1), D_SKL, NULL, pf_write);
+
+ MMIO_DH(PLANE_BUF_CFG(PIPE_A, 0), D_SKL, NULL, NULL);
+ MMIO_DH(PLANE_BUF_CFG(PIPE_A, 1), D_SKL, NULL, NULL);
+ MMIO_DH(PLANE_BUF_CFG(PIPE_A, 2), D_SKL, NULL, NULL);
+ MMIO_DH(PLANE_BUF_CFG(PIPE_A, 3), D_SKL, NULL, NULL);
+
+ MMIO_DH(PLANE_BUF_CFG(PIPE_B, 0), D_SKL, NULL, NULL);
+ MMIO_DH(PLANE_BUF_CFG(PIPE_B, 1), D_SKL, NULL, NULL);
+ MMIO_DH(PLANE_BUF_CFG(PIPE_B, 2), D_SKL, NULL, NULL);
+ MMIO_DH(PLANE_BUF_CFG(PIPE_B, 3), D_SKL, NULL, NULL);
+
+ MMIO_DH(PLANE_BUF_CFG(PIPE_C, 0), D_SKL, NULL, NULL);
+ MMIO_DH(PLANE_BUF_CFG(PIPE_C, 1), D_SKL, NULL, NULL);
+ MMIO_DH(PLANE_BUF_CFG(PIPE_C, 2), D_SKL, NULL, NULL);
+ MMIO_DH(PLANE_BUF_CFG(PIPE_C, 3), D_SKL, NULL, NULL);
+
+ MMIO_DH(CUR_BUF_CFG(PIPE_A), D_SKL, NULL, NULL);
+ MMIO_DH(CUR_BUF_CFG(PIPE_B), D_SKL, NULL, NULL);
+ MMIO_DH(CUR_BUF_CFG(PIPE_C), D_SKL, NULL, NULL);
+
+ MMIO_F(PLANE_WM(PIPE_A, 0, 0), 4 * 8, 0, 0, 0, D_SKL, NULL, NULL);
+ MMIO_F(PLANE_WM(PIPE_A, 1, 0), 4 * 8, 0, 0, 0, D_SKL, NULL, NULL);
+ MMIO_F(PLANE_WM(PIPE_A, 2, 0), 4 * 8, 0, 0, 0, D_SKL, NULL, NULL);
+
+ MMIO_F(PLANE_WM(PIPE_B, 0, 0), 4 * 8, 0, 0, 0, D_SKL, NULL, NULL);
+ MMIO_F(PLANE_WM(PIPE_B, 1, 0), 4 * 8, 0, 0, 0, D_SKL, NULL, NULL);
+ MMIO_F(PLANE_WM(PIPE_B, 2, 0), 4 * 8, 0, 0, 0, D_SKL, NULL, NULL);
+
+ MMIO_F(PLANE_WM(PIPE_C, 0, 0), 4 * 8, 0, 0, 0, D_SKL, NULL, NULL);
+ MMIO_F(PLANE_WM(PIPE_C, 1, 0), 4 * 8, 0, 0, 0, D_SKL, NULL, NULL);
+ MMIO_F(PLANE_WM(PIPE_C, 2, 0), 4 * 8, 0, 0, 0, D_SKL, NULL, NULL);
+
+ MMIO_F(CUR_WM(PIPE_A, 0), 4 * 8, 0, 0, 0, D_SKL, NULL, NULL);
+ MMIO_F(CUR_WM(PIPE_B, 0), 4 * 8, 0, 0, 0, D_SKL, NULL, NULL);
+ MMIO_F(CUR_WM(PIPE_C, 0), 4 * 8, 0, 0, 0, D_SKL, NULL, NULL);
+
+ MMIO_DH(PLANE_WM_TRANS(PIPE_A, 0), D_SKL, NULL, NULL);
+ MMIO_DH(PLANE_WM_TRANS(PIPE_A, 1), D_SKL, NULL, NULL);
+ MMIO_DH(PLANE_WM_TRANS(PIPE_A, 2), D_SKL, NULL, NULL);
+
+ MMIO_DH(PLANE_WM_TRANS(PIPE_B, 0), D_SKL, NULL, NULL);
+ MMIO_DH(PLANE_WM_TRANS(PIPE_B, 1), D_SKL, NULL, NULL);
+ MMIO_DH(PLANE_WM_TRANS(PIPE_B, 2), D_SKL, NULL, NULL);
+
+ MMIO_DH(PLANE_WM_TRANS(PIPE_C, 0), D_SKL, NULL, NULL);
+ MMIO_DH(PLANE_WM_TRANS(PIPE_C, 1), D_SKL, NULL, NULL);
+ MMIO_DH(PLANE_WM_TRANS(PIPE_C, 2), D_SKL, NULL, NULL);
+
+ MMIO_DH(CUR_WM_TRANS(PIPE_A), D_SKL, NULL, NULL);
+ MMIO_DH(CUR_WM_TRANS(PIPE_B), D_SKL, NULL, NULL);
+ MMIO_DH(CUR_WM_TRANS(PIPE_C), D_SKL, NULL, NULL);
+
+ MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_A, 0), D_SKL, NULL, NULL);
+ MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_A, 1), D_SKL, NULL, NULL);
+ MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_A, 2), D_SKL, NULL, NULL);
+ MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_A, 3), D_SKL, NULL, NULL);
+
+ MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_B, 0), D_SKL, NULL, NULL);
+ MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_B, 1), D_SKL, NULL, NULL);
+ MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_B, 2), D_SKL, NULL, NULL);
+ MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_B, 3), D_SKL, NULL, NULL);
+
+ MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_C, 0), D_SKL, NULL, NULL);
+ MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_C, 1), D_SKL, NULL, NULL);
+ MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_C, 2), D_SKL, NULL, NULL);
+ MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_C, 3), D_SKL, NULL, NULL);
+
+ MMIO_DH(_REG_701C0(PIPE_A, 1), D_SKL, NULL, NULL);
+ MMIO_DH(_REG_701C0(PIPE_A, 2), D_SKL, NULL, NULL);
+ MMIO_DH(_REG_701C0(PIPE_A, 3), D_SKL, NULL, NULL);
+ MMIO_DH(_REG_701C0(PIPE_A, 4), D_SKL, NULL, NULL);
+
+ MMIO_DH(_REG_701C0(PIPE_B, 1), D_SKL, NULL, NULL);
+ MMIO_DH(_REG_701C0(PIPE_B, 2), D_SKL, NULL, NULL);
+ MMIO_DH(_REG_701C0(PIPE_B, 3), D_SKL, NULL, NULL);
+ MMIO_DH(_REG_701C0(PIPE_B, 4), D_SKL, NULL, NULL);
+
+ MMIO_DH(_REG_701C0(PIPE_C, 1), D_SKL, NULL, NULL);
+ MMIO_DH(_REG_701C0(PIPE_C, 2), D_SKL, NULL, NULL);
+ MMIO_DH(_REG_701C0(PIPE_C, 3), D_SKL, NULL, NULL);
+ MMIO_DH(_REG_701C0(PIPE_C, 4), D_SKL, NULL, NULL);
+
+ MMIO_DH(_REG_701C4(PIPE_A, 1), D_SKL, NULL, NULL);
+ MMIO_DH(_REG_701C4(PIPE_A, 2), D_SKL, NULL, NULL);
+ MMIO_DH(_REG_701C4(PIPE_A, 3), D_SKL, NULL, NULL);
+ MMIO_DH(_REG_701C4(PIPE_A, 4), D_SKL, NULL, NULL);
+
+ MMIO_DH(_REG_701C4(PIPE_B, 1), D_SKL, NULL, NULL);
+ MMIO_DH(_REG_701C4(PIPE_B, 2), D_SKL, NULL, NULL);
+ MMIO_DH(_REG_701C4(PIPE_B, 3), D_SKL, NULL, NULL);
+ MMIO_DH(_REG_701C4(PIPE_B, 4), D_SKL, NULL, NULL);
+
+ MMIO_DH(_REG_701C4(PIPE_C, 1), D_SKL, NULL, NULL);
+ MMIO_DH(_REG_701C4(PIPE_C, 2), D_SKL, NULL, NULL);
+ MMIO_DH(_REG_701C4(PIPE_C, 3), D_SKL, NULL, NULL);
+ MMIO_DH(_REG_701C4(PIPE_C, 4), D_SKL, NULL, NULL);
+
+ MMIO_D(0x70380, D_SKL);
+ MMIO_D(0x71380, D_SKL);
+ MMIO_D(0x72380, D_SKL);
+ MMIO_D(0x7039c, D_SKL);
+
+ MMIO_F(0x80000, 0x3000, 0, 0, 0, D_SKL, NULL, NULL);
+ MMIO_D(0x8f074, D_SKL);
+ MMIO_D(0x8f004, D_SKL);
+ MMIO_D(0x8f034, D_SKL);
+
+ MMIO_D(0xb11c, D_SKL);
+
+ MMIO_D(0x51000, D_SKL);
+ MMIO_D(0x6c00c, D_SKL);
+
+ MMIO_F(0xc800, 0x7f8, F_CMD_ACCESS, 0, 0, D_SKL, NULL, NULL);
+ MMIO_F(0xb020, 0x80, F_CMD_ACCESS, 0, 0, D_SKL, NULL, NULL);
+
+ MMIO_D(0xd08, D_SKL);
+ MMIO_D(0x20e0, D_SKL);
+ MMIO_D(0x20ec, D_SKL);
+
+ /* TRTT */
+ MMIO_D(0x4de0, D_SKL);
+ MMIO_D(0x4de4, D_SKL);
+ MMIO_D(0x4de8, D_SKL);
+ MMIO_D(0x4dec, D_SKL);
+ MMIO_D(0x4df0, D_SKL);
+ MMIO_DH(0x4df4, D_SKL, NULL, gen9_trtte_write);
+ MMIO_DH(0x4dfc, D_SKL, NULL, gen9_trtt_chicken_write);
+
+ MMIO_D(0x45008, D_SKL);
+
+ MMIO_D(0x46430, D_SKL);
+
+ MMIO_D(0x46520, D_SKL);
+
+ MMIO_D(0xc403c, D_SKL);
+ MMIO_D(0xb004, D_SKL);
+ MMIO_DH(DMA_CTRL, D_SKL_PLUS, NULL, dma_ctrl_write);
+
+ MMIO_D(0x65900, D_SKL);
+ MMIO_D(0x1082c0, D_SKL);
+ MMIO_D(0x4068, D_SKL);
+ MMIO_D(0x67054, D_SKL);
+ MMIO_D(0x6e560, D_SKL);
+ MMIO_D(0x6e554, D_SKL);
+ MMIO_D(0x2b20, D_SKL);
+ MMIO_D(0x65f00, D_SKL);
+ MMIO_D(0x65f08, D_SKL);
+ MMIO_D(0x320f0, D_SKL);
+
+ MMIO_D(_REG_VCS2_EXCC, D_SKL);
+ MMIO_D(0x70034, D_SKL);
+ MMIO_D(0x71034, D_SKL);
+ MMIO_D(0x72034, D_SKL);
+
+ MMIO_D(_PLANE_KEYVAL_1(PIPE_A), D_SKL);
+ MMIO_D(_PLANE_KEYVAL_1(PIPE_B), D_SKL);
+ MMIO_D(_PLANE_KEYVAL_1(PIPE_C), D_SKL);
+ MMIO_D(_PLANE_KEYMSK_1(PIPE_A), D_SKL);
+ MMIO_D(_PLANE_KEYMSK_1(PIPE_B), D_SKL);
+ MMIO_D(_PLANE_KEYMSK_1(PIPE_C), D_SKL);
+
+ MMIO_D(0x44500, D_SKL);
+ return 0;
+}
+
+/**
+ * intel_gvt_find_mmio_info - find MMIO information entry by aligned offset
+ * @gvt: GVT device
+ * @offset: register offset
+ *
+ * This function is used to find the MMIO information entry from hash table
+ *
+ * Returns:
+ * pointer to MMIO information entry, NULL if not exists
+ */
+struct intel_gvt_mmio_info *intel_gvt_find_mmio_info(struct intel_gvt *gvt,
+ unsigned int offset)
+{
+ struct intel_gvt_mmio_info *e;
+
+ WARN_ON(!IS_ALIGNED(offset, 4));
+
+ hash_for_each_possible(gvt->mmio.mmio_info_table, e, node, offset) {
+ if (e->offset == offset)
+ return e;
+ }
+ return NULL;
+}
+
+/**
+ * intel_gvt_clean_mmio_info - clean up MMIO information table for GVT device
+ * @gvt: GVT device
+ *
+ * This function is called at the driver unloading stage, to clean up the MMIO
+ * information table of GVT device
+ *
+ */
+void intel_gvt_clean_mmio_info(struct intel_gvt *gvt)
+{
+ struct hlist_node *tmp;
+ struct intel_gvt_mmio_info *e;
+ int i;
+
+ hash_for_each_safe(gvt->mmio.mmio_info_table, i, tmp, e, node)
+ kfree(e);
+
+ vfree(gvt->mmio.mmio_attribute);
+ gvt->mmio.mmio_attribute = NULL;
+}
+
+/**
+ * intel_gvt_setup_mmio_info - setup MMIO information table for GVT device
+ * @gvt: GVT device
+ *
+ * This function is called at the initialization stage, to setup the MMIO
+ * information table for GVT device
+ *
+ * Returns:
+ * zero on success, negative if failed.
+ */
+int intel_gvt_setup_mmio_info(struct intel_gvt *gvt)
+{
+ struct intel_gvt_device_info *info = &gvt->device_info;
+ struct drm_i915_private *dev_priv = gvt->dev_priv;
+ int ret;
+
+ gvt->mmio.mmio_attribute = vzalloc(info->mmio_size);
+ if (!gvt->mmio.mmio_attribute)
+ return -ENOMEM;
+
+ ret = init_generic_mmio_info(gvt);
+ if (ret)
+ goto err;
+
+ if (IS_BROADWELL(dev_priv)) {
+ ret = init_broadwell_mmio_info(gvt);
+ if (ret)
+ goto err;
+ } else if (IS_SKYLAKE(dev_priv)) {
+ ret = init_broadwell_mmio_info(gvt);
+ if (ret)
+ goto err;
+ ret = init_skl_mmio_info(gvt);
+ if (ret)
+ goto err;
+ }
+ return 0;
+err:
+ intel_gvt_clean_mmio_info(gvt);
+ return ret;
+}
+
+/**
+ * intel_gvt_mmio_set_accessed - mark a MMIO has been accessed
+ * @gvt: a GVT device
+ * @offset: register offset
+ *
+ */
+void intel_gvt_mmio_set_accessed(struct intel_gvt *gvt, unsigned int offset)
+{
+ gvt->mmio.mmio_attribute[offset >> 2] |=
+ F_ACCESSED;
+}
+
+/**
+ * intel_gvt_mmio_is_cmd_accessed - mark a MMIO could be accessed by command
+ * @gvt: a GVT device
+ * @offset: register offset
+ *
+ */
+bool intel_gvt_mmio_is_cmd_access(struct intel_gvt *gvt,
+ unsigned int offset)
+{
+ return gvt->mmio.mmio_attribute[offset >> 2] &
+ F_CMD_ACCESS;
+}
+
+/**
+ * intel_gvt_mmio_is_unalign - mark a MMIO could be accessed unaligned
+ * @gvt: a GVT device
+ * @offset: register offset
+ *
+ */
+bool intel_gvt_mmio_is_unalign(struct intel_gvt *gvt,
+ unsigned int offset)
+{
+ return gvt->mmio.mmio_attribute[offset >> 2] &
+ F_UNALIGN;
+}
+
+/**
+ * intel_gvt_mmio_set_cmd_accessed - mark a MMIO has been accessed by command
+ * @gvt: a GVT device
+ * @offset: register offset
+ *
+ */
+void intel_gvt_mmio_set_cmd_accessed(struct intel_gvt *gvt,
+ unsigned int offset)
+{
+ gvt->mmio.mmio_attribute[offset >> 2] |=
+ F_CMD_ACCESSED;
+}
+
+/**
+ * intel_gvt_mmio_has_mode_mask - if a MMIO has a mode mask
+ * @gvt: a GVT device
+ * @offset: register offset
+ *
+ * Returns:
+ * True if a MMIO has a mode mask in its higher 16 bits, false if it isn't.
+ *
+ */
+bool intel_gvt_mmio_has_mode_mask(struct intel_gvt *gvt, unsigned int offset)
+{
+ return gvt->mmio.mmio_attribute[offset >> 2] &
+ F_MODE_MASK;
+}
+
+/**
+ * intel_vgpu_default_mmio_read - default MMIO read handler
+ * @vgpu: a vGPU
+ * @offset: access offset
+ * @p_data: data return buffer
+ * @bytes: access data length
+ *
+ * Returns:
+ * Zero on success, negative error code if failed.
+ */
+int intel_vgpu_default_mmio_read(struct intel_vgpu *vgpu, unsigned int offset,
+ void *p_data, unsigned int bytes)
+{
+ read_vreg(vgpu, offset, p_data, bytes);
+ return 0;
+}
+
+/**
+ * intel_t_default_mmio_write - default MMIO write handler
+ * @vgpu: a vGPU
+ * @offset: access offset
+ * @p_data: write data buffer
+ * @bytes: access data length
+ *
+ * Returns:
+ * Zero on success, negative error code if failed.
+ */
+int intel_vgpu_default_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
+ void *p_data, unsigned int bytes)
+{
+ write_vreg(vgpu, offset, p_data, bytes);
+ return 0;
+}
diff --git a/drivers/gpu/drm/i915/gvt/hypercall.h b/drivers/gpu/drm/i915/gvt/hypercall.h
index 254df8bf1f35..30e543f5a703 100644
--- a/drivers/gpu/drm/i915/gvt/hypercall.h
+++ b/drivers/gpu/drm/i915/gvt/hypercall.h
@@ -19,6 +19,15 @@
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
+ *
+ * Authors:
+ * Eddie Dong <eddie.dong@intel.com>
+ * Dexuan Cui
+ * Jike Song <jike.song@intel.com>
+ *
+ * Contributors:
+ * Zhi Wang <zhi.a.wang@intel.com>
+ *
*/
#ifndef _GVT_HYPERCALL_H_
@@ -30,6 +39,23 @@
*/
struct intel_gvt_mpt {
int (*detect_host)(void);
+ int (*host_init)(struct device *dev, void *gvt, const void *ops);
+ void (*host_exit)(struct device *dev, void *gvt);
+ int (*attach_vgpu)(void *vgpu, unsigned long *handle);
+ void (*detach_vgpu)(unsigned long handle);
+ int (*inject_msi)(unsigned long handle, u32 addr, u16 data);
+ unsigned long (*from_virt_to_mfn)(void *p);
+ int (*set_wp_page)(unsigned long handle, u64 gfn);
+ int (*unset_wp_page)(unsigned long handle, u64 gfn);
+ int (*read_gpa)(unsigned long handle, unsigned long gpa, void *buf,
+ unsigned long len);
+ int (*write_gpa)(unsigned long handle, unsigned long gpa, void *buf,
+ unsigned long len);
+ unsigned long (*gfn_to_mfn)(unsigned long handle, unsigned long gfn);
+ int (*map_gfn_to_mfn)(unsigned long handle, unsigned long gfn,
+ unsigned long mfn, unsigned int nr, bool map);
+ int (*set_trap_area)(unsigned long handle, u64 start, u64 end,
+ bool map);
};
extern struct intel_gvt_mpt xengt_mpt;
diff --git a/drivers/gpu/drm/i915/gvt/interrupt.c b/drivers/gpu/drm/i915/gvt/interrupt.c
new file mode 100644
index 000000000000..f7be02ac4be1
--- /dev/null
+++ b/drivers/gpu/drm/i915/gvt/interrupt.c
@@ -0,0 +1,741 @@
+/*
+ * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * Authors:
+ * Kevin Tian <kevin.tian@intel.com>
+ * Zhi Wang <zhi.a.wang@intel.com>
+ *
+ * Contributors:
+ * Min he <min.he@intel.com>
+ *
+ */
+
+#include "i915_drv.h"
+#include "gvt.h"
+
+/* common offset among interrupt control registers */
+#define regbase_to_isr(base) (base)
+#define regbase_to_imr(base) (base + 0x4)
+#define regbase_to_iir(base) (base + 0x8)
+#define regbase_to_ier(base) (base + 0xC)
+
+#define iir_to_regbase(iir) (iir - 0x8)
+#define ier_to_regbase(ier) (ier - 0xC)
+
+#define get_event_virt_handler(irq, e) (irq->events[e].v_handler)
+#define get_irq_info(irq, e) (irq->events[e].info)
+
+#define irq_to_gvt(irq) \
+ container_of(irq, struct intel_gvt, irq)
+
+static void update_upstream_irq(struct intel_vgpu *vgpu,
+ struct intel_gvt_irq_info *info);
+
+static const char * const irq_name[INTEL_GVT_EVENT_MAX] = {
+ [RCS_MI_USER_INTERRUPT] = "Render CS MI USER INTERRUPT",
+ [RCS_DEBUG] = "Render EU debug from SVG",
+ [RCS_MMIO_SYNC_FLUSH] = "Render MMIO sync flush status",
+ [RCS_CMD_STREAMER_ERR] = "Render CS error interrupt",
+ [RCS_PIPE_CONTROL] = "Render PIPE CONTROL notify",
+ [RCS_WATCHDOG_EXCEEDED] = "Render CS Watchdog counter exceeded",
+ [RCS_PAGE_DIRECTORY_FAULT] = "Render page directory faults",
+ [RCS_AS_CONTEXT_SWITCH] = "Render AS Context Switch Interrupt",
+
+ [VCS_MI_USER_INTERRUPT] = "Video CS MI USER INTERRUPT",
+ [VCS_MMIO_SYNC_FLUSH] = "Video MMIO sync flush status",
+ [VCS_CMD_STREAMER_ERR] = "Video CS error interrupt",
+ [VCS_MI_FLUSH_DW] = "Video MI FLUSH DW notify",
+ [VCS_WATCHDOG_EXCEEDED] = "Video CS Watchdog counter exceeded",
+ [VCS_PAGE_DIRECTORY_FAULT] = "Video page directory faults",
+ [VCS_AS_CONTEXT_SWITCH] = "Video AS Context Switch Interrupt",
+ [VCS2_MI_USER_INTERRUPT] = "VCS2 Video CS MI USER INTERRUPT",
+ [VCS2_MI_FLUSH_DW] = "VCS2 Video MI FLUSH DW notify",
+ [VCS2_AS_CONTEXT_SWITCH] = "VCS2 Context Switch Interrupt",
+
+ [BCS_MI_USER_INTERRUPT] = "Blitter CS MI USER INTERRUPT",
+ [BCS_MMIO_SYNC_FLUSH] = "Billter MMIO sync flush status",
+ [BCS_CMD_STREAMER_ERR] = "Blitter CS error interrupt",
+ [BCS_MI_FLUSH_DW] = "Blitter MI FLUSH DW notify",
+ [BCS_PAGE_DIRECTORY_FAULT] = "Blitter page directory faults",
+ [BCS_AS_CONTEXT_SWITCH] = "Blitter AS Context Switch Interrupt",
+
+ [VECS_MI_FLUSH_DW] = "Video Enhanced Streamer MI FLUSH DW notify",
+ [VECS_AS_CONTEXT_SWITCH] = "VECS Context Switch Interrupt",
+
+ [PIPE_A_FIFO_UNDERRUN] = "Pipe A FIFO underrun",
+ [PIPE_A_CRC_ERR] = "Pipe A CRC error",
+ [PIPE_A_CRC_DONE] = "Pipe A CRC done",
+ [PIPE_A_VSYNC] = "Pipe A vsync",
+ [PIPE_A_LINE_COMPARE] = "Pipe A line compare",
+ [PIPE_A_ODD_FIELD] = "Pipe A odd field",
+ [PIPE_A_EVEN_FIELD] = "Pipe A even field",
+ [PIPE_A_VBLANK] = "Pipe A vblank",
+ [PIPE_B_FIFO_UNDERRUN] = "Pipe B FIFO underrun",
+ [PIPE_B_CRC_ERR] = "Pipe B CRC error",
+ [PIPE_B_CRC_DONE] = "Pipe B CRC done",
+ [PIPE_B_VSYNC] = "Pipe B vsync",
+ [PIPE_B_LINE_COMPARE] = "Pipe B line compare",
+ [PIPE_B_ODD_FIELD] = "Pipe B odd field",
+ [PIPE_B_EVEN_FIELD] = "Pipe B even field",
+ [PIPE_B_VBLANK] = "Pipe B vblank",
+ [PIPE_C_VBLANK] = "Pipe C vblank",
+ [DPST_PHASE_IN] = "DPST phase in event",
+ [DPST_HISTOGRAM] = "DPST histogram event",
+ [GSE] = "GSE",
+ [DP_A_HOTPLUG] = "DP A Hotplug",
+ [AUX_CHANNEL_A] = "AUX Channel A",
+ [PERF_COUNTER] = "Performance counter",
+ [POISON] = "Poison",
+ [GTT_FAULT] = "GTT fault",
+ [PRIMARY_A_FLIP_DONE] = "Primary Plane A flip done",
+ [PRIMARY_B_FLIP_DONE] = "Primary Plane B flip done",
+ [PRIMARY_C_FLIP_DONE] = "Primary Plane C flip done",
+ [SPRITE_A_FLIP_DONE] = "Sprite Plane A flip done",
+ [SPRITE_B_FLIP_DONE] = "Sprite Plane B flip done",
+ [SPRITE_C_FLIP_DONE] = "Sprite Plane C flip done",
+
+ [PCU_THERMAL] = "PCU Thermal Event",
+ [PCU_PCODE2DRIVER_MAILBOX] = "PCU pcode2driver mailbox event",
+
+ [FDI_RX_INTERRUPTS_TRANSCODER_A] = "FDI RX Interrupts Combined A",
+ [AUDIO_CP_CHANGE_TRANSCODER_A] = "Audio CP Change Transcoder A",
+ [AUDIO_CP_REQUEST_TRANSCODER_A] = "Audio CP Request Transcoder A",
+ [FDI_RX_INTERRUPTS_TRANSCODER_B] = "FDI RX Interrupts Combined B",
+ [AUDIO_CP_CHANGE_TRANSCODER_B] = "Audio CP Change Transcoder B",
+ [AUDIO_CP_REQUEST_TRANSCODER_B] = "Audio CP Request Transcoder B",
+ [FDI_RX_INTERRUPTS_TRANSCODER_C] = "FDI RX Interrupts Combined C",
+ [AUDIO_CP_CHANGE_TRANSCODER_C] = "Audio CP Change Transcoder C",
+ [AUDIO_CP_REQUEST_TRANSCODER_C] = "Audio CP Request Transcoder C",
+ [ERR_AND_DBG] = "South Error and Debug Interupts Combined",
+ [GMBUS] = "Gmbus",
+ [SDVO_B_HOTPLUG] = "SDVO B hotplug",
+ [CRT_HOTPLUG] = "CRT Hotplug",
+ [DP_B_HOTPLUG] = "DisplayPort/HDMI/DVI B Hotplug",
+ [DP_C_HOTPLUG] = "DisplayPort/HDMI/DVI C Hotplug",
+ [DP_D_HOTPLUG] = "DisplayPort/HDMI/DVI D Hotplug",
+ [AUX_CHANNEL_B] = "AUX Channel B",
+ [AUX_CHANNEL_C] = "AUX Channel C",
+ [AUX_CHANNEL_D] = "AUX Channel D",
+ [AUDIO_POWER_STATE_CHANGE_B] = "Audio Power State change Port B",
+ [AUDIO_POWER_STATE_CHANGE_C] = "Audio Power State change Port C",
+ [AUDIO_POWER_STATE_CHANGE_D] = "Audio Power State change Port D",
+
+ [INTEL_GVT_EVENT_RESERVED] = "RESERVED EVENTS!!!",
+};
+
+static inline struct intel_gvt_irq_info *regbase_to_irq_info(
+ struct intel_gvt *gvt,
+ unsigned int reg)
+{
+ struct intel_gvt_irq *irq = &gvt->irq;
+ int i;
+
+ for_each_set_bit(i, irq->irq_info_bitmap, INTEL_GVT_IRQ_INFO_MAX) {
+ if (i915_mmio_reg_offset(irq->info[i]->reg_base) == reg)
+ return irq->info[i];
+ }
+
+ return NULL;
+}
+
+/**
+ * intel_vgpu_reg_imr_handler - Generic IMR register emulation write handler
+ * @vgpu: a vGPU
+ * @reg: register offset written by guest
+ * @p_data: register data written by guest
+ * @bytes: register data length
+ *
+ * This function is used to emulate the generic IMR register bit change
+ * behavior.
+ *
+ * Returns:
+ * Zero on success, negative error code if failed.
+ *
+ */
+int intel_vgpu_reg_imr_handler(struct intel_vgpu *vgpu,
+ unsigned int reg, void *p_data, unsigned int bytes)
+{
+ struct intel_gvt *gvt = vgpu->gvt;
+ struct intel_gvt_irq_ops *ops = gvt->irq.ops;
+ u32 changed, masked, unmasked;
+ u32 imr = *(u32 *)p_data;
+
+ gvt_dbg_irq("write IMR %x with val %x\n",
+ reg, imr);
+
+ gvt_dbg_irq("old vIMR %x\n", vgpu_vreg(vgpu, reg));
+
+ /* figure out newly masked/unmasked bits */
+ changed = vgpu_vreg(vgpu, reg) ^ imr;
+ masked = (vgpu_vreg(vgpu, reg) & changed) ^ changed;
+ unmasked = masked ^ changed;
+
+ gvt_dbg_irq("changed %x, masked %x, unmasked %x\n",
+ changed, masked, unmasked);
+
+ vgpu_vreg(vgpu, reg) = imr;
+
+ ops->check_pending_irq(vgpu);
+ gvt_dbg_irq("IRQ: new vIMR %x\n", vgpu_vreg(vgpu, reg));
+ return 0;
+}
+
+/**
+ * intel_vgpu_reg_master_irq_handler - master IRQ write emulation handler
+ * @vgpu: a vGPU
+ * @reg: register offset written by guest
+ * @p_data: register data written by guest
+ * @bytes: register data length
+ *
+ * This function is used to emulate the master IRQ register on gen8+.
+ *
+ * Returns:
+ * Zero on success, negative error code if failed.
+ *
+ */
+int intel_vgpu_reg_master_irq_handler(struct intel_vgpu *vgpu,
+ unsigned int reg, void *p_data, unsigned int bytes)
+{
+ struct intel_gvt *gvt = vgpu->gvt;
+ struct intel_gvt_irq_ops *ops = gvt->irq.ops;
+ u32 changed, enabled, disabled;
+ u32 ier = *(u32 *)p_data;
+ u32 virtual_ier = vgpu_vreg(vgpu, reg);
+
+ gvt_dbg_irq("write master irq reg %x with val %x\n",
+ reg, ier);
+
+ gvt_dbg_irq("old vreg %x\n", vgpu_vreg(vgpu, reg));
+
+ /*
+ * GEN8_MASTER_IRQ is a special irq register,
+ * only bit 31 is allowed to be modified
+ * and treated as an IER bit.
+ */
+ ier &= GEN8_MASTER_IRQ_CONTROL;
+ virtual_ier &= GEN8_MASTER_IRQ_CONTROL;
+ vgpu_vreg(vgpu, reg) &= ~GEN8_MASTER_IRQ_CONTROL;
+ vgpu_vreg(vgpu, reg) |= ier;
+
+ /* figure out newly enabled/disable bits */
+ changed = virtual_ier ^ ier;
+ enabled = (virtual_ier & changed) ^ changed;
+ disabled = enabled ^ changed;
+
+ gvt_dbg_irq("changed %x, enabled %x, disabled %x\n",
+ changed, enabled, disabled);
+
+ ops->check_pending_irq(vgpu);
+ gvt_dbg_irq("new vreg %x\n", vgpu_vreg(vgpu, reg));
+ return 0;
+}
+
+/**
+ * intel_vgpu_reg_ier_handler - Generic IER write emulation handler
+ * @vgpu: a vGPU
+ * @reg: register offset written by guest
+ * @p_data: register data written by guest
+ * @bytes: register data length
+ *
+ * This function is used to emulate the generic IER register behavior.
+ *
+ * Returns:
+ * Zero on success, negative error code if failed.
+ *
+ */
+int intel_vgpu_reg_ier_handler(struct intel_vgpu *vgpu,
+ unsigned int reg, void *p_data, unsigned int bytes)
+{
+ struct intel_gvt *gvt = vgpu->gvt;
+ struct intel_gvt_irq_ops *ops = gvt->irq.ops;
+ struct intel_gvt_irq_info *info;
+ u32 changed, enabled, disabled;
+ u32 ier = *(u32 *)p_data;
+
+ gvt_dbg_irq("write IER %x with val %x\n",
+ reg, ier);
+
+ gvt_dbg_irq("old vIER %x\n", vgpu_vreg(vgpu, reg));
+
+ /* figure out newly enabled/disable bits */
+ changed = vgpu_vreg(vgpu, reg) ^ ier;
+ enabled = (vgpu_vreg(vgpu, reg) & changed) ^ changed;
+ disabled = enabled ^ changed;
+
+ gvt_dbg_irq("changed %x, enabled %x, disabled %x\n",
+ changed, enabled, disabled);
+ vgpu_vreg(vgpu, reg) = ier;
+
+ info = regbase_to_irq_info(gvt, ier_to_regbase(reg));
+ if (WARN_ON(!info))
+ return -EINVAL;
+
+ if (info->has_upstream_irq)
+ update_upstream_irq(vgpu, info);
+
+ ops->check_pending_irq(vgpu);
+ gvt_dbg_irq("new vIER %x\n", vgpu_vreg(vgpu, reg));
+ return 0;
+}
+
+/**
+ * intel_vgpu_reg_iir_handler - Generic IIR write emulation handler
+ * @vgpu: a vGPU
+ * @reg: register offset written by guest
+ * @p_data: register data written by guest
+ * @bytes: register data length
+ *
+ * This function is used to emulate the generic IIR register behavior.
+ *
+ * Returns:
+ * Zero on success, negative error code if failed.
+ *
+ */
+int intel_vgpu_reg_iir_handler(struct intel_vgpu *vgpu, unsigned int reg,
+ void *p_data, unsigned int bytes)
+{
+ struct intel_gvt_irq_info *info = regbase_to_irq_info(vgpu->gvt,
+ iir_to_regbase(reg));
+ u32 iir = *(u32 *)p_data;
+
+ gvt_dbg_irq("write IIR %x with val %x\n", reg, iir);
+
+ if (WARN_ON(!info))
+ return -EINVAL;
+
+ vgpu_vreg(vgpu, reg) &= ~iir;
+
+ if (info->has_upstream_irq)
+ update_upstream_irq(vgpu, info);
+ return 0;
+}
+
+static struct intel_gvt_irq_map gen8_irq_map[] = {
+ { INTEL_GVT_IRQ_INFO_MASTER, 0, INTEL_GVT_IRQ_INFO_GT0, 0xffff },
+ { INTEL_GVT_IRQ_INFO_MASTER, 1, INTEL_GVT_IRQ_INFO_GT0, 0xffff0000 },
+ { INTEL_GVT_IRQ_INFO_MASTER, 2, INTEL_GVT_IRQ_INFO_GT1, 0xffff },
+ { INTEL_GVT_IRQ_INFO_MASTER, 3, INTEL_GVT_IRQ_INFO_GT1, 0xffff0000 },
+ { INTEL_GVT_IRQ_INFO_MASTER, 4, INTEL_GVT_IRQ_INFO_GT2, 0xffff },
+ { INTEL_GVT_IRQ_INFO_MASTER, 6, INTEL_GVT_IRQ_INFO_GT3, 0xffff },
+ { INTEL_GVT_IRQ_INFO_MASTER, 16, INTEL_GVT_IRQ_INFO_DE_PIPE_A, ~0 },
+ { INTEL_GVT_IRQ_INFO_MASTER, 17, INTEL_GVT_IRQ_INFO_DE_PIPE_B, ~0 },
+ { INTEL_GVT_IRQ_INFO_MASTER, 18, INTEL_GVT_IRQ_INFO_DE_PIPE_C, ~0 },
+ { INTEL_GVT_IRQ_INFO_MASTER, 20, INTEL_GVT_IRQ_INFO_DE_PORT, ~0 },
+ { INTEL_GVT_IRQ_INFO_MASTER, 22, INTEL_GVT_IRQ_INFO_DE_MISC, ~0 },
+ { INTEL_GVT_IRQ_INFO_MASTER, 23, INTEL_GVT_IRQ_INFO_PCH, ~0 },
+ { INTEL_GVT_IRQ_INFO_MASTER, 30, INTEL_GVT_IRQ_INFO_PCU, ~0 },
+ { -1, -1, ~0 },
+};
+
+static void update_upstream_irq(struct intel_vgpu *vgpu,
+ struct intel_gvt_irq_info *info)
+{
+ struct intel_gvt_irq *irq = &vgpu->gvt->irq;
+ struct intel_gvt_irq_map *map = irq->irq_map;
+ struct intel_gvt_irq_info *up_irq_info = NULL;
+ u32 set_bits = 0;
+ u32 clear_bits = 0;
+ int bit;
+ u32 val = vgpu_vreg(vgpu,
+ regbase_to_iir(i915_mmio_reg_offset(info->reg_base)))
+ & vgpu_vreg(vgpu,
+ regbase_to_ier(i915_mmio_reg_offset(info->reg_base)));
+
+ if (!info->has_upstream_irq)
+ return;
+
+ for (map = irq->irq_map; map->up_irq_bit != -1; map++) {
+ if (info->group != map->down_irq_group)
+ continue;
+
+ if (!up_irq_info)
+ up_irq_info = irq->info[map->up_irq_group];
+ else
+ WARN_ON(up_irq_info != irq->info[map->up_irq_group]);
+
+ bit = map->up_irq_bit;
+
+ if (val & map->down_irq_bitmask)
+ set_bits |= (1 << bit);
+ else
+ clear_bits |= (1 << bit);
+ }
+
+ WARN_ON(!up_irq_info);
+
+ if (up_irq_info->group == INTEL_GVT_IRQ_INFO_MASTER) {
+ u32 isr = i915_mmio_reg_offset(up_irq_info->reg_base);
+
+ vgpu_vreg(vgpu, isr) &= ~clear_bits;
+ vgpu_vreg(vgpu, isr) |= set_bits;
+ } else {
+ u32 iir = regbase_to_iir(
+ i915_mmio_reg_offset(up_irq_info->reg_base));
+ u32 imr = regbase_to_imr(
+ i915_mmio_reg_offset(up_irq_info->reg_base));
+
+ vgpu_vreg(vgpu, iir) |= (set_bits & ~vgpu_vreg(vgpu, imr));
+ }
+
+ if (up_irq_info->has_upstream_irq)
+ update_upstream_irq(vgpu, up_irq_info);
+}
+
+static void init_irq_map(struct intel_gvt_irq *irq)
+{
+ struct intel_gvt_irq_map *map;
+ struct intel_gvt_irq_info *up_info, *down_info;
+ int up_bit;
+
+ for (map = irq->irq_map; map->up_irq_bit != -1; map++) {
+ up_info = irq->info[map->up_irq_group];
+ up_bit = map->up_irq_bit;
+ down_info = irq->info[map->down_irq_group];
+
+ set_bit(up_bit, up_info->downstream_irq_bitmap);
+ down_info->has_upstream_irq = true;
+
+ gvt_dbg_irq("[up] grp %d bit %d -> [down] grp %d bitmask %x\n",
+ up_info->group, up_bit,
+ down_info->group, map->down_irq_bitmask);
+ }
+}
+
+/* =======================vEvent injection===================== */
+static int inject_virtual_interrupt(struct intel_vgpu *vgpu)
+{
+ return intel_gvt_hypervisor_inject_msi(vgpu);
+}
+
+static void propagate_event(struct intel_gvt_irq *irq,
+ enum intel_gvt_event_type event, struct intel_vgpu *vgpu)
+{
+ struct intel_gvt_irq_info *info;
+ unsigned int reg_base;
+ int bit;
+
+ info = get_irq_info(irq, event);
+ if (WARN_ON(!info))
+ return;
+
+ reg_base = i915_mmio_reg_offset(info->reg_base);
+ bit = irq->events[event].bit;
+
+ if (!test_bit(bit, (void *)&vgpu_vreg(vgpu,
+ regbase_to_imr(reg_base)))) {
+ gvt_dbg_irq("set bit (%d) for (%s) for vgpu (%d)\n",
+ bit, irq_name[event], vgpu->id);
+ set_bit(bit, (void *)&vgpu_vreg(vgpu,
+ regbase_to_iir(reg_base)));
+ }
+}
+
+/* =======================vEvent Handlers===================== */
+static void handle_default_event_virt(struct intel_gvt_irq *irq,
+ enum intel_gvt_event_type event, struct intel_vgpu *vgpu)
+{
+ if (!vgpu->irq.irq_warn_once[event]) {
+ gvt_dbg_core("vgpu%d: IRQ receive event %d (%s)\n",
+ vgpu->id, event, irq_name[event]);
+ vgpu->irq.irq_warn_once[event] = true;
+ }
+ propagate_event(irq, event, vgpu);
+}
+
+/* =====================GEN specific logic======================= */
+/* GEN8 interrupt routines. */
+
+#define DEFINE_GVT_GEN8_INTEL_GVT_IRQ_INFO(regname, regbase) \
+static struct intel_gvt_irq_info gen8_##regname##_info = { \
+ .name = #regname"-IRQ", \
+ .reg_base = (regbase), \
+ .bit_to_event = {[0 ... INTEL_GVT_IRQ_BITWIDTH-1] = \
+ INTEL_GVT_EVENT_RESERVED}, \
+}
+
+DEFINE_GVT_GEN8_INTEL_GVT_IRQ_INFO(gt0, GEN8_GT_ISR(0));
+DEFINE_GVT_GEN8_INTEL_GVT_IRQ_INFO(gt1, GEN8_GT_ISR(1));
+DEFINE_GVT_GEN8_INTEL_GVT_IRQ_INFO(gt2, GEN8_GT_ISR(2));
+DEFINE_GVT_GEN8_INTEL_GVT_IRQ_INFO(gt3, GEN8_GT_ISR(3));
+DEFINE_GVT_GEN8_INTEL_GVT_IRQ_INFO(de_pipe_a, GEN8_DE_PIPE_ISR(PIPE_A));
+DEFINE_GVT_GEN8_INTEL_GVT_IRQ_INFO(de_pipe_b, GEN8_DE_PIPE_ISR(PIPE_B));
+DEFINE_GVT_GEN8_INTEL_GVT_IRQ_INFO(de_pipe_c, GEN8_DE_PIPE_ISR(PIPE_C));
+DEFINE_GVT_GEN8_INTEL_GVT_IRQ_INFO(de_port, GEN8_DE_PORT_ISR);
+DEFINE_GVT_GEN8_INTEL_GVT_IRQ_INFO(de_misc, GEN8_DE_MISC_ISR);
+DEFINE_GVT_GEN8_INTEL_GVT_IRQ_INFO(pcu, GEN8_PCU_ISR);
+DEFINE_GVT_GEN8_INTEL_GVT_IRQ_INFO(master, GEN8_MASTER_IRQ);
+
+static struct intel_gvt_irq_info gvt_base_pch_info = {
+ .name = "PCH-IRQ",
+ .reg_base = SDEISR,
+ .bit_to_event = {[0 ... INTEL_GVT_IRQ_BITWIDTH-1] =
+ INTEL_GVT_EVENT_RESERVED},
+};
+
+static void gen8_check_pending_irq(struct intel_vgpu *vgpu)
+{
+ struct intel_gvt_irq *irq = &vgpu->gvt->irq;
+ int i;
+
+ if (!(vgpu_vreg(vgpu, i915_mmio_reg_offset(GEN8_MASTER_IRQ)) &
+ GEN8_MASTER_IRQ_CONTROL))
+ return;
+
+ for_each_set_bit(i, irq->irq_info_bitmap, INTEL_GVT_IRQ_INFO_MAX) {
+ struct intel_gvt_irq_info *info = irq->info[i];
+ u32 reg_base;
+
+ if (!info->has_upstream_irq)
+ continue;
+
+ reg_base = i915_mmio_reg_offset(info->reg_base);
+ if ((vgpu_vreg(vgpu, regbase_to_iir(reg_base))
+ & vgpu_vreg(vgpu, regbase_to_ier(reg_base))))
+ update_upstream_irq(vgpu, info);
+ }
+
+ if (vgpu_vreg(vgpu, i915_mmio_reg_offset(GEN8_MASTER_IRQ))
+ & ~GEN8_MASTER_IRQ_CONTROL)
+ inject_virtual_interrupt(vgpu);
+}
+
+static void gen8_init_irq(
+ struct intel_gvt_irq *irq)
+{
+ struct intel_gvt *gvt = irq_to_gvt(irq);
+
+#define SET_BIT_INFO(s, b, e, i) \
+ do { \
+ s->events[e].bit = b; \
+ s->events[e].info = s->info[i]; \
+ s->info[i]->bit_to_event[b] = e;\
+ } while (0)
+
+#define SET_IRQ_GROUP(s, g, i) \
+ do { \
+ s->info[g] = i; \
+ (i)->group = g; \
+ set_bit(g, s->irq_info_bitmap); \
+ } while (0)
+
+ SET_IRQ_GROUP(irq, INTEL_GVT_IRQ_INFO_MASTER, &gen8_master_info);
+ SET_IRQ_GROUP(irq, INTEL_GVT_IRQ_INFO_GT0, &gen8_gt0_info);
+ SET_IRQ_GROUP(irq, INTEL_GVT_IRQ_INFO_GT1, &gen8_gt1_info);
+ SET_IRQ_GROUP(irq, INTEL_GVT_IRQ_INFO_GT2, &gen8_gt2_info);
+ SET_IRQ_GROUP(irq, INTEL_GVT_IRQ_INFO_GT3, &gen8_gt3_info);
+ SET_IRQ_GROUP(irq, INTEL_GVT_IRQ_INFO_DE_PIPE_A, &gen8_de_pipe_a_info);
+ SET_IRQ_GROUP(irq, INTEL_GVT_IRQ_INFO_DE_PIPE_B, &gen8_de_pipe_b_info);
+ SET_IRQ_GROUP(irq, INTEL_GVT_IRQ_INFO_DE_PIPE_C, &gen8_de_pipe_c_info);
+ SET_IRQ_GROUP(irq, INTEL_GVT_IRQ_INFO_DE_PORT, &gen8_de_port_info);
+ SET_IRQ_GROUP(irq, INTEL_GVT_IRQ_INFO_DE_MISC, &gen8_de_misc_info);
+ SET_IRQ_GROUP(irq, INTEL_GVT_IRQ_INFO_PCU, &gen8_pcu_info);
+ SET_IRQ_GROUP(irq, INTEL_GVT_IRQ_INFO_PCH, &gvt_base_pch_info);
+
+ /* GEN8 level 2 interrupts. */
+
+ /* GEN8 interrupt GT0 events */
+ SET_BIT_INFO(irq, 0, RCS_MI_USER_INTERRUPT, INTEL_GVT_IRQ_INFO_GT0);
+ SET_BIT_INFO(irq, 4, RCS_PIPE_CONTROL, INTEL_GVT_IRQ_INFO_GT0);
+ SET_BIT_INFO(irq, 8, RCS_AS_CONTEXT_SWITCH, INTEL_GVT_IRQ_INFO_GT0);
+
+ SET_BIT_INFO(irq, 16, BCS_MI_USER_INTERRUPT, INTEL_GVT_IRQ_INFO_GT0);
+ SET_BIT_INFO(irq, 20, BCS_MI_FLUSH_DW, INTEL_GVT_IRQ_INFO_GT0);
+ SET_BIT_INFO(irq, 24, BCS_AS_CONTEXT_SWITCH, INTEL_GVT_IRQ_INFO_GT0);
+
+ /* GEN8 interrupt GT1 events */
+ SET_BIT_INFO(irq, 0, VCS_MI_USER_INTERRUPT, INTEL_GVT_IRQ_INFO_GT1);
+ SET_BIT_INFO(irq, 4, VCS_MI_FLUSH_DW, INTEL_GVT_IRQ_INFO_GT1);
+ SET_BIT_INFO(irq, 8, VCS_AS_CONTEXT_SWITCH, INTEL_GVT_IRQ_INFO_GT1);
+
+ if (HAS_BSD2(gvt->dev_priv)) {
+ SET_BIT_INFO(irq, 16, VCS2_MI_USER_INTERRUPT,
+ INTEL_GVT_IRQ_INFO_GT1);
+ SET_BIT_INFO(irq, 20, VCS2_MI_FLUSH_DW,
+ INTEL_GVT_IRQ_INFO_GT1);
+ SET_BIT_INFO(irq, 24, VCS2_AS_CONTEXT_SWITCH,
+ INTEL_GVT_IRQ_INFO_GT1);
+ }
+
+ /* GEN8 interrupt GT3 events */
+ SET_BIT_INFO(irq, 0, VECS_MI_USER_INTERRUPT, INTEL_GVT_IRQ_INFO_GT3);
+ SET_BIT_INFO(irq, 4, VECS_MI_FLUSH_DW, INTEL_GVT_IRQ_INFO_GT3);
+ SET_BIT_INFO(irq, 8, VECS_AS_CONTEXT_SWITCH, INTEL_GVT_IRQ_INFO_GT3);
+
+ SET_BIT_INFO(irq, 0, PIPE_A_VBLANK, INTEL_GVT_IRQ_INFO_DE_PIPE_A);
+ SET_BIT_INFO(irq, 0, PIPE_B_VBLANK, INTEL_GVT_IRQ_INFO_DE_PIPE_B);
+ SET_BIT_INFO(irq, 0, PIPE_C_VBLANK, INTEL_GVT_IRQ_INFO_DE_PIPE_C);
+
+ /* GEN8 interrupt DE PORT events */
+ SET_BIT_INFO(irq, 0, AUX_CHANNEL_A, INTEL_GVT_IRQ_INFO_DE_PORT);
+ SET_BIT_INFO(irq, 3, DP_A_HOTPLUG, INTEL_GVT_IRQ_INFO_DE_PORT);
+
+ /* GEN8 interrupt DE MISC events */
+ SET_BIT_INFO(irq, 0, GSE, INTEL_GVT_IRQ_INFO_DE_MISC);
+
+ /* PCH events */
+ SET_BIT_INFO(irq, 17, GMBUS, INTEL_GVT_IRQ_INFO_PCH);
+ SET_BIT_INFO(irq, 19, CRT_HOTPLUG, INTEL_GVT_IRQ_INFO_PCH);
+ SET_BIT_INFO(irq, 21, DP_B_HOTPLUG, INTEL_GVT_IRQ_INFO_PCH);
+ SET_BIT_INFO(irq, 22, DP_C_HOTPLUG, INTEL_GVT_IRQ_INFO_PCH);
+ SET_BIT_INFO(irq, 23, DP_D_HOTPLUG, INTEL_GVT_IRQ_INFO_PCH);
+
+ if (IS_BROADWELL(gvt->dev_priv)) {
+ SET_BIT_INFO(irq, 25, AUX_CHANNEL_B, INTEL_GVT_IRQ_INFO_PCH);
+ SET_BIT_INFO(irq, 26, AUX_CHANNEL_C, INTEL_GVT_IRQ_INFO_PCH);
+ SET_BIT_INFO(irq, 27, AUX_CHANNEL_D, INTEL_GVT_IRQ_INFO_PCH);
+
+ SET_BIT_INFO(irq, 4, PRIMARY_A_FLIP_DONE, INTEL_GVT_IRQ_INFO_DE_PIPE_A);
+ SET_BIT_INFO(irq, 5, SPRITE_A_FLIP_DONE, INTEL_GVT_IRQ_INFO_DE_PIPE_A);
+
+ SET_BIT_INFO(irq, 4, PRIMARY_B_FLIP_DONE, INTEL_GVT_IRQ_INFO_DE_PIPE_B);
+ SET_BIT_INFO(irq, 5, SPRITE_B_FLIP_DONE, INTEL_GVT_IRQ_INFO_DE_PIPE_B);
+
+ SET_BIT_INFO(irq, 4, PRIMARY_C_FLIP_DONE, INTEL_GVT_IRQ_INFO_DE_PIPE_C);
+ SET_BIT_INFO(irq, 5, SPRITE_C_FLIP_DONE, INTEL_GVT_IRQ_INFO_DE_PIPE_C);
+ } else if (IS_SKYLAKE(gvt->dev_priv)) {
+ SET_BIT_INFO(irq, 25, AUX_CHANNEL_B, INTEL_GVT_IRQ_INFO_DE_PORT);
+ SET_BIT_INFO(irq, 26, AUX_CHANNEL_C, INTEL_GVT_IRQ_INFO_DE_PORT);
+ SET_BIT_INFO(irq, 27, AUX_CHANNEL_D, INTEL_GVT_IRQ_INFO_DE_PORT);
+
+ SET_BIT_INFO(irq, 3, PRIMARY_A_FLIP_DONE, INTEL_GVT_IRQ_INFO_DE_PIPE_A);
+ SET_BIT_INFO(irq, 3, PRIMARY_B_FLIP_DONE, INTEL_GVT_IRQ_INFO_DE_PIPE_B);
+ SET_BIT_INFO(irq, 3, PRIMARY_C_FLIP_DONE, INTEL_GVT_IRQ_INFO_DE_PIPE_C);
+ }
+
+ /* GEN8 interrupt PCU events */
+ SET_BIT_INFO(irq, 24, PCU_THERMAL, INTEL_GVT_IRQ_INFO_PCU);
+ SET_BIT_INFO(irq, 25, PCU_PCODE2DRIVER_MAILBOX, INTEL_GVT_IRQ_INFO_PCU);
+}
+
+static struct intel_gvt_irq_ops gen8_irq_ops = {
+ .init_irq = gen8_init_irq,
+ .check_pending_irq = gen8_check_pending_irq,
+};
+
+/**
+ * intel_vgpu_trigger_virtual_event - Trigger a virtual event for a vGPU
+ * @vgpu: a vGPU
+ * @event: interrupt event
+ *
+ * This function is used to trigger a virtual interrupt event for vGPU.
+ * The caller provides the event to be triggered, the framework itself
+ * will emulate the IRQ register bit change.
+ *
+ */
+void intel_vgpu_trigger_virtual_event(struct intel_vgpu *vgpu,
+ enum intel_gvt_event_type event)
+{
+ struct intel_gvt *gvt = vgpu->gvt;
+ struct intel_gvt_irq *irq = &gvt->irq;
+ gvt_event_virt_handler_t handler;
+ struct intel_gvt_irq_ops *ops = gvt->irq.ops;
+
+ handler = get_event_virt_handler(irq, event);
+ WARN_ON(!handler);
+
+ handler(irq, event, vgpu);
+
+ ops->check_pending_irq(vgpu);
+}
+
+static void init_events(
+ struct intel_gvt_irq *irq)
+{
+ int i;
+
+ for (i = 0; i < INTEL_GVT_EVENT_MAX; i++) {
+ irq->events[i].info = NULL;
+ irq->events[i].v_handler = handle_default_event_virt;
+ }
+}
+
+static enum hrtimer_restart vblank_timer_fn(struct hrtimer *data)
+{
+ struct intel_gvt_vblank_timer *vblank_timer;
+ struct intel_gvt_irq *irq;
+ struct intel_gvt *gvt;
+
+ vblank_timer = container_of(data, struct intel_gvt_vblank_timer, timer);
+ irq = container_of(vblank_timer, struct intel_gvt_irq, vblank_timer);
+ gvt = container_of(irq, struct intel_gvt, irq);
+
+ intel_gvt_request_service(gvt, INTEL_GVT_REQUEST_EMULATE_VBLANK);
+ hrtimer_add_expires_ns(&vblank_timer->timer, vblank_timer->period);
+ return HRTIMER_RESTART;
+}
+
+/**
+ * intel_gvt_clean_irq - clean up GVT-g IRQ emulation subsystem
+ * @gvt: a GVT device
+ *
+ * This function is called at driver unloading stage, to clean up GVT-g IRQ
+ * emulation subsystem.
+ *
+ */
+void intel_gvt_clean_irq(struct intel_gvt *gvt)
+{
+ struct intel_gvt_irq *irq = &gvt->irq;
+
+ hrtimer_cancel(&irq->vblank_timer.timer);
+}
+
+#define VBLNAK_TIMER_PERIOD 16000000
+
+/**
+ * intel_gvt_init_irq - initialize GVT-g IRQ emulation subsystem
+ * @gvt: a GVT device
+ *
+ * This function is called at driver loading stage, to initialize the GVT-g IRQ
+ * emulation subsystem.
+ *
+ * Returns:
+ * Zero on success, negative error code if failed.
+ */
+int intel_gvt_init_irq(struct intel_gvt *gvt)
+{
+ struct intel_gvt_irq *irq = &gvt->irq;
+ struct intel_gvt_vblank_timer *vblank_timer = &irq->vblank_timer;
+
+ gvt_dbg_core("init irq framework\n");
+
+ if (IS_BROADWELL(gvt->dev_priv) || IS_SKYLAKE(gvt->dev_priv)) {
+ irq->ops = &gen8_irq_ops;
+ irq->irq_map = gen8_irq_map;
+ } else {
+ WARN_ON(1);
+ return -ENODEV;
+ }
+
+ /* common event initialization */
+ init_events(irq);
+
+ /* gen specific initialization */
+ irq->ops->init_irq(irq);
+
+ init_irq_map(irq);
+
+ hrtimer_init(&vblank_timer->timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
+ vblank_timer->timer.function = vblank_timer_fn;
+ vblank_timer->period = VBLNAK_TIMER_PERIOD;
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/i915/gvt/interrupt.h b/drivers/gpu/drm/i915/gvt/interrupt.h
new file mode 100644
index 000000000000..5313fb1b33e1
--- /dev/null
+++ b/drivers/gpu/drm/i915/gvt/interrupt.h
@@ -0,0 +1,233 @@
+/*
+ * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * Authors:
+ * Kevin Tian <kevin.tian@intel.com>
+ * Zhi Wang <zhi.a.wang@intel.com>
+ *
+ * Contributors:
+ * Min he <min.he@intel.com>
+ *
+ */
+
+#ifndef _GVT_INTERRUPT_H_
+#define _GVT_INTERRUPT_H_
+
+enum intel_gvt_event_type {
+ RCS_MI_USER_INTERRUPT = 0,
+ RCS_DEBUG,
+ RCS_MMIO_SYNC_FLUSH,
+ RCS_CMD_STREAMER_ERR,
+ RCS_PIPE_CONTROL,
+ RCS_L3_PARITY_ERR,
+ RCS_WATCHDOG_EXCEEDED,
+ RCS_PAGE_DIRECTORY_FAULT,
+ RCS_AS_CONTEXT_SWITCH,
+ RCS_MONITOR_BUFF_HALF_FULL,
+
+ VCS_MI_USER_INTERRUPT,
+ VCS_MMIO_SYNC_FLUSH,
+ VCS_CMD_STREAMER_ERR,
+ VCS_MI_FLUSH_DW,
+ VCS_WATCHDOG_EXCEEDED,
+ VCS_PAGE_DIRECTORY_FAULT,
+ VCS_AS_CONTEXT_SWITCH,
+
+ VCS2_MI_USER_INTERRUPT,
+ VCS2_MI_FLUSH_DW,
+ VCS2_AS_CONTEXT_SWITCH,
+
+ BCS_MI_USER_INTERRUPT,
+ BCS_MMIO_SYNC_FLUSH,
+ BCS_CMD_STREAMER_ERR,
+ BCS_MI_FLUSH_DW,
+ BCS_PAGE_DIRECTORY_FAULT,
+ BCS_AS_CONTEXT_SWITCH,
+
+ VECS_MI_USER_INTERRUPT,
+ VECS_MI_FLUSH_DW,
+ VECS_AS_CONTEXT_SWITCH,
+
+ PIPE_A_FIFO_UNDERRUN,
+ PIPE_B_FIFO_UNDERRUN,
+ PIPE_A_CRC_ERR,
+ PIPE_B_CRC_ERR,
+ PIPE_A_CRC_DONE,
+ PIPE_B_CRC_DONE,
+ PIPE_A_ODD_FIELD,
+ PIPE_B_ODD_FIELD,
+ PIPE_A_EVEN_FIELD,
+ PIPE_B_EVEN_FIELD,
+ PIPE_A_LINE_COMPARE,
+ PIPE_B_LINE_COMPARE,
+ PIPE_C_LINE_COMPARE,
+ PIPE_A_VBLANK,
+ PIPE_B_VBLANK,
+ PIPE_C_VBLANK,
+ PIPE_A_VSYNC,
+ PIPE_B_VSYNC,
+ PIPE_C_VSYNC,
+ PRIMARY_A_FLIP_DONE,
+ PRIMARY_B_FLIP_DONE,
+ PRIMARY_C_FLIP_DONE,
+ SPRITE_A_FLIP_DONE,
+ SPRITE_B_FLIP_DONE,
+ SPRITE_C_FLIP_DONE,
+
+ PCU_THERMAL,
+ PCU_PCODE2DRIVER_MAILBOX,
+
+ DPST_PHASE_IN,
+ DPST_HISTOGRAM,
+ GSE,
+ DP_A_HOTPLUG,
+ AUX_CHANNEL_A,
+ PERF_COUNTER,
+ POISON,
+ GTT_FAULT,
+ ERROR_INTERRUPT_COMBINED,
+
+ FDI_RX_INTERRUPTS_TRANSCODER_A,
+ AUDIO_CP_CHANGE_TRANSCODER_A,
+ AUDIO_CP_REQUEST_TRANSCODER_A,
+ FDI_RX_INTERRUPTS_TRANSCODER_B,
+ AUDIO_CP_CHANGE_TRANSCODER_B,
+ AUDIO_CP_REQUEST_TRANSCODER_B,
+ FDI_RX_INTERRUPTS_TRANSCODER_C,
+ AUDIO_CP_CHANGE_TRANSCODER_C,
+ AUDIO_CP_REQUEST_TRANSCODER_C,
+ ERR_AND_DBG,
+ GMBUS,
+ SDVO_B_HOTPLUG,
+ CRT_HOTPLUG,
+ DP_B_HOTPLUG,
+ DP_C_HOTPLUG,
+ DP_D_HOTPLUG,
+ AUX_CHANNEL_B,
+ AUX_CHANNEL_C,
+ AUX_CHANNEL_D,
+ AUDIO_POWER_STATE_CHANGE_B,
+ AUDIO_POWER_STATE_CHANGE_C,
+ AUDIO_POWER_STATE_CHANGE_D,
+
+ INTEL_GVT_EVENT_RESERVED,
+ INTEL_GVT_EVENT_MAX,
+};
+
+struct intel_gvt_irq;
+struct intel_gvt;
+
+typedef void (*gvt_event_virt_handler_t)(struct intel_gvt_irq *irq,
+ enum intel_gvt_event_type event, struct intel_vgpu *vgpu);
+
+struct intel_gvt_irq_ops {
+ void (*init_irq)(struct intel_gvt_irq *irq);
+ void (*check_pending_irq)(struct intel_vgpu *vgpu);
+};
+
+/* the list of physical interrupt control register groups */
+enum intel_gvt_irq_type {
+ INTEL_GVT_IRQ_INFO_GT,
+ INTEL_GVT_IRQ_INFO_DPY,
+ INTEL_GVT_IRQ_INFO_PCH,
+ INTEL_GVT_IRQ_INFO_PM,
+
+ INTEL_GVT_IRQ_INFO_MASTER,
+ INTEL_GVT_IRQ_INFO_GT0,
+ INTEL_GVT_IRQ_INFO_GT1,
+ INTEL_GVT_IRQ_INFO_GT2,
+ INTEL_GVT_IRQ_INFO_GT3,
+ INTEL_GVT_IRQ_INFO_DE_PIPE_A,
+ INTEL_GVT_IRQ_INFO_DE_PIPE_B,
+ INTEL_GVT_IRQ_INFO_DE_PIPE_C,
+ INTEL_GVT_IRQ_INFO_DE_PORT,
+ INTEL_GVT_IRQ_INFO_DE_MISC,
+ INTEL_GVT_IRQ_INFO_AUD,
+ INTEL_GVT_IRQ_INFO_PCU,
+
+ INTEL_GVT_IRQ_INFO_MAX,
+};
+
+#define INTEL_GVT_IRQ_BITWIDTH 32
+
+/* device specific interrupt bit definitions */
+struct intel_gvt_irq_info {
+ char *name;
+ i915_reg_t reg_base;
+ enum intel_gvt_event_type bit_to_event[INTEL_GVT_IRQ_BITWIDTH];
+ unsigned long warned;
+ int group;
+ DECLARE_BITMAP(downstream_irq_bitmap, INTEL_GVT_IRQ_BITWIDTH);
+ bool has_upstream_irq;
+};
+
+/* per-event information */
+struct intel_gvt_event_info {
+ int bit; /* map to register bit */
+ int policy; /* forwarding policy */
+ struct intel_gvt_irq_info *info; /* register info */
+ gvt_event_virt_handler_t v_handler; /* for v_event */
+};
+
+struct intel_gvt_irq_map {
+ int up_irq_group;
+ int up_irq_bit;
+ int down_irq_group;
+ u32 down_irq_bitmask;
+};
+
+struct intel_gvt_vblank_timer {
+ struct hrtimer timer;
+ u64 period;
+};
+
+/* structure containing device specific IRQ state */
+struct intel_gvt_irq {
+ struct intel_gvt_irq_ops *ops;
+ struct intel_gvt_irq_info *info[INTEL_GVT_IRQ_INFO_MAX];
+ DECLARE_BITMAP(irq_info_bitmap, INTEL_GVT_IRQ_INFO_MAX);
+ struct intel_gvt_event_info events[INTEL_GVT_EVENT_MAX];
+ DECLARE_BITMAP(pending_events, INTEL_GVT_EVENT_MAX);
+ struct intel_gvt_irq_map *irq_map;
+ struct intel_gvt_vblank_timer vblank_timer;
+};
+
+int intel_gvt_init_irq(struct intel_gvt *gvt);
+void intel_gvt_clean_irq(struct intel_gvt *gvt);
+
+void intel_vgpu_trigger_virtual_event(struct intel_vgpu *vgpu,
+ enum intel_gvt_event_type event);
+
+int intel_vgpu_reg_iir_handler(struct intel_vgpu *vgpu, unsigned int reg,
+ void *p_data, unsigned int bytes);
+int intel_vgpu_reg_ier_handler(struct intel_vgpu *vgpu,
+ unsigned int reg, void *p_data, unsigned int bytes);
+int intel_vgpu_reg_master_irq_handler(struct intel_vgpu *vgpu,
+ unsigned int reg, void *p_data, unsigned int bytes);
+int intel_vgpu_reg_imr_handler(struct intel_vgpu *vgpu,
+ unsigned int reg, void *p_data, unsigned int bytes);
+
+int gvt_ring_id_to_pipe_control_notify_event(int ring_id);
+int gvt_ring_id_to_mi_flush_dw_event(int ring_id);
+int gvt_ring_id_to_mi_user_interrupt_event(int ring_id);
+
+#endif /* _GVT_INTERRUPT_H_ */
diff --git a/drivers/gpu/drm/i915/gvt/kvmgt.c b/drivers/gpu/drm/i915/gvt/kvmgt.c
new file mode 100644
index 000000000000..dc0365033157
--- /dev/null
+++ b/drivers/gpu/drm/i915/gvt/kvmgt.c
@@ -0,0 +1,597 @@
+/*
+ * KVMGT - the implementation of Intel mediated pass-through framework for KVM
+ *
+ * Copyright(c) 2014-2016 Intel Corporation. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * Authors:
+ * Kevin Tian <kevin.tian@intel.com>
+ * Jike Song <jike.song@intel.com>
+ * Xiaoguang Chen <xiaoguang.chen@intel.com>
+ */
+
+#include <linux/init.h>
+#include <linux/device.h>
+#include <linux/mm.h>
+#include <linux/types.h>
+#include <linux/list.h>
+#include <linux/rbtree.h>
+#include <linux/spinlock.h>
+#include <linux/eventfd.h>
+#include <linux/uuid.h>
+#include <linux/kvm_host.h>
+#include <linux/vfio.h>
+
+#include "i915_drv.h"
+#include "gvt.h"
+
+static inline long kvmgt_pin_pages(struct device *dev, unsigned long *user_pfn,
+ long npage, int prot, unsigned long *phys_pfn)
+{
+ return 0;
+}
+static inline long kvmgt_unpin_pages(struct device *dev, unsigned long *pfn,
+ long npage)
+{
+ return 0;
+}
+
+static const struct intel_gvt_ops *intel_gvt_ops;
+
+
+/* helper macros copied from vfio-pci */
+#define VFIO_PCI_OFFSET_SHIFT 40
+#define VFIO_PCI_OFFSET_TO_INDEX(off) (off >> VFIO_PCI_OFFSET_SHIFT)
+#define VFIO_PCI_INDEX_TO_OFFSET(index) ((u64)(index) << VFIO_PCI_OFFSET_SHIFT)
+#define VFIO_PCI_OFFSET_MASK (((u64)(1) << VFIO_PCI_OFFSET_SHIFT) - 1)
+
+struct vfio_region {
+ u32 type;
+ u32 subtype;
+ size_t size;
+ u32 flags;
+};
+
+struct kvmgt_pgfn {
+ gfn_t gfn;
+ struct hlist_node hnode;
+};
+
+struct kvmgt_guest_info {
+ struct kvm *kvm;
+ struct intel_vgpu *vgpu;
+ struct kvm_page_track_notifier_node track_node;
+#define NR_BKT (1 << 18)
+ struct hlist_head ptable[NR_BKT];
+#undef NR_BKT
+};
+
+struct gvt_dma {
+ struct rb_node node;
+ gfn_t gfn;
+ kvm_pfn_t pfn;
+};
+
+static struct gvt_dma *__gvt_cache_find(struct intel_vgpu *vgpu, gfn_t gfn)
+{
+ struct rb_node *node = vgpu->vdev.cache.rb_node;
+ struct gvt_dma *ret = NULL;
+
+ while (node) {
+ struct gvt_dma *itr = rb_entry(node, struct gvt_dma, node);
+
+ if (gfn < itr->gfn)
+ node = node->rb_left;
+ else if (gfn > itr->gfn)
+ node = node->rb_right;
+ else {
+ ret = itr;
+ goto out;
+ }
+ }
+
+out:
+ return ret;
+}
+
+static kvm_pfn_t gvt_cache_find(struct intel_vgpu *vgpu, gfn_t gfn)
+{
+ struct gvt_dma *entry;
+
+ mutex_lock(&vgpu->vdev.cache_lock);
+ entry = __gvt_cache_find(vgpu, gfn);
+ mutex_unlock(&vgpu->vdev.cache_lock);
+
+ return entry == NULL ? 0 : entry->pfn;
+}
+
+static void gvt_cache_add(struct intel_vgpu *vgpu, gfn_t gfn, kvm_pfn_t pfn)
+{
+ struct gvt_dma *new, *itr;
+ struct rb_node **link = &vgpu->vdev.cache.rb_node, *parent = NULL;
+
+ new = kzalloc(sizeof(struct gvt_dma), GFP_KERNEL);
+ if (!new)
+ return;
+
+ new->gfn = gfn;
+ new->pfn = pfn;
+
+ mutex_lock(&vgpu->vdev.cache_lock);
+ while (*link) {
+ parent = *link;
+ itr = rb_entry(parent, struct gvt_dma, node);
+
+ if (gfn == itr->gfn)
+ goto out;
+ else if (gfn < itr->gfn)
+ link = &parent->rb_left;
+ else
+ link = &parent->rb_right;
+ }
+
+ rb_link_node(&new->node, parent, link);
+ rb_insert_color(&new->node, &vgpu->vdev.cache);
+ mutex_unlock(&vgpu->vdev.cache_lock);
+ return;
+
+out:
+ mutex_unlock(&vgpu->vdev.cache_lock);
+ kfree(new);
+}
+
+static void __gvt_cache_remove_entry(struct intel_vgpu *vgpu,
+ struct gvt_dma *entry)
+{
+ rb_erase(&entry->node, &vgpu->vdev.cache);
+ kfree(entry);
+}
+
+static void gvt_cache_remove(struct intel_vgpu *vgpu, gfn_t gfn)
+{
+ struct device *dev = vgpu->vdev.mdev;
+ struct gvt_dma *this;
+ unsigned long pfn;
+
+ mutex_lock(&vgpu->vdev.cache_lock);
+ this = __gvt_cache_find(vgpu, gfn);
+ if (!this) {
+ mutex_unlock(&vgpu->vdev.cache_lock);
+ return;
+ }
+
+ pfn = this->pfn;
+ WARN_ON((kvmgt_unpin_pages(dev, &pfn, 1) != 1));
+ __gvt_cache_remove_entry(vgpu, this);
+ mutex_unlock(&vgpu->vdev.cache_lock);
+}
+
+static void gvt_cache_init(struct intel_vgpu *vgpu)
+{
+ vgpu->vdev.cache = RB_ROOT;
+ mutex_init(&vgpu->vdev.cache_lock);
+}
+
+static void gvt_cache_destroy(struct intel_vgpu *vgpu)
+{
+ struct gvt_dma *dma;
+ struct rb_node *node = NULL;
+ struct device *dev = vgpu->vdev.mdev;
+ unsigned long pfn;
+
+ mutex_lock(&vgpu->vdev.cache_lock);
+ while ((node = rb_first(&vgpu->vdev.cache))) {
+ dma = rb_entry(node, struct gvt_dma, node);
+ pfn = dma->pfn;
+
+ kvmgt_unpin_pages(dev, &pfn, 1);
+ __gvt_cache_remove_entry(vgpu, dma);
+ }
+ mutex_unlock(&vgpu->vdev.cache_lock);
+}
+
+static struct intel_vgpu_type *intel_gvt_find_vgpu_type(struct intel_gvt *gvt,
+ const char *name)
+{
+ int i;
+ struct intel_vgpu_type *t;
+ const char *driver_name = dev_driver_string(
+ &gvt->dev_priv->drm.pdev->dev);
+
+ for (i = 0; i < gvt->num_types; i++) {
+ t = &gvt->types[i];
+ if (!strncmp(t->name, name + strlen(driver_name) + 1,
+ sizeof(t->name)))
+ return t;
+ }
+
+ return NULL;
+}
+
+static struct attribute *type_attrs[] = {
+ NULL,
+};
+
+static struct attribute_group *intel_vgpu_type_groups[] = {
+ [0 ... NR_MAX_INTEL_VGPU_TYPES - 1] = NULL,
+};
+
+static bool intel_gvt_init_vgpu_type_groups(struct intel_gvt *gvt)
+{
+ int i, j;
+ struct intel_vgpu_type *type;
+ struct attribute_group *group;
+
+ for (i = 0; i < gvt->num_types; i++) {
+ type = &gvt->types[i];
+
+ group = kzalloc(sizeof(struct attribute_group), GFP_KERNEL);
+ if (WARN_ON(!group))
+ goto unwind;
+
+ group->name = type->name;
+ group->attrs = type_attrs;
+ intel_vgpu_type_groups[i] = group;
+ }
+
+ return true;
+
+unwind:
+ for (j = 0; j < i; j++) {
+ group = intel_vgpu_type_groups[j];
+ kfree(group);
+ }
+
+ return false;
+}
+
+static void intel_gvt_cleanup_vgpu_type_groups(struct intel_gvt *gvt)
+{
+ int i;
+ struct attribute_group *group;
+
+ for (i = 0; i < gvt->num_types; i++) {
+ group = intel_vgpu_type_groups[i];
+ kfree(group);
+ }
+}
+
+static void kvmgt_protect_table_init(struct kvmgt_guest_info *info)
+{
+ hash_init(info->ptable);
+}
+
+static void kvmgt_protect_table_destroy(struct kvmgt_guest_info *info)
+{
+ struct kvmgt_pgfn *p;
+ struct hlist_node *tmp;
+ int i;
+
+ hash_for_each_safe(info->ptable, i, tmp, p, hnode) {
+ hash_del(&p->hnode);
+ kfree(p);
+ }
+}
+
+static struct kvmgt_pgfn *
+__kvmgt_protect_table_find(struct kvmgt_guest_info *info, gfn_t gfn)
+{
+ struct kvmgt_pgfn *p, *res = NULL;
+
+ hash_for_each_possible(info->ptable, p, hnode, gfn) {
+ if (gfn == p->gfn) {
+ res = p;
+ break;
+ }
+ }
+
+ return res;
+}
+
+static bool kvmgt_gfn_is_write_protected(struct kvmgt_guest_info *info,
+ gfn_t gfn)
+{
+ struct kvmgt_pgfn *p;
+
+ p = __kvmgt_protect_table_find(info, gfn);
+ return !!p;
+}
+
+static void kvmgt_protect_table_add(struct kvmgt_guest_info *info, gfn_t gfn)
+{
+ struct kvmgt_pgfn *p;
+
+ if (kvmgt_gfn_is_write_protected(info, gfn))
+ return;
+
+ p = kmalloc(sizeof(struct kvmgt_pgfn), GFP_ATOMIC);
+ if (WARN(!p, "gfn: 0x%llx\n", gfn))
+ return;
+
+ p->gfn = gfn;
+ hash_add(info->ptable, &p->hnode, gfn);
+}
+
+static void kvmgt_protect_table_del(struct kvmgt_guest_info *info,
+ gfn_t gfn)
+{
+ struct kvmgt_pgfn *p;
+
+ p = __kvmgt_protect_table_find(info, gfn);
+ if (p) {
+ hash_del(&p->hnode);
+ kfree(p);
+ }
+}
+
+static int kvmgt_host_init(struct device *dev, void *gvt, const void *ops)
+{
+ if (!intel_gvt_init_vgpu_type_groups(gvt))
+ return -EFAULT;
+
+ intel_gvt_ops = ops;
+
+ /* MDEV is not yet available */
+ return -ENODEV;
+}
+
+static void kvmgt_host_exit(struct device *dev, void *gvt)
+{
+ intel_gvt_cleanup_vgpu_type_groups(gvt);
+}
+
+static int kvmgt_write_protect_add(unsigned long handle, u64 gfn)
+{
+ struct kvmgt_guest_info *info = (struct kvmgt_guest_info *)handle;
+ struct kvm *kvm = info->kvm;
+ struct kvm_memory_slot *slot;
+ int idx;
+
+ idx = srcu_read_lock(&kvm->srcu);
+ slot = gfn_to_memslot(kvm, gfn);
+
+ spin_lock(&kvm->mmu_lock);
+
+ if (kvmgt_gfn_is_write_protected(info, gfn))
+ goto out;
+
+ kvm_slot_page_track_add_page(kvm, slot, gfn, KVM_PAGE_TRACK_WRITE);
+ kvmgt_protect_table_add(info, gfn);
+
+out:
+ spin_unlock(&kvm->mmu_lock);
+ srcu_read_unlock(&kvm->srcu, idx);
+ return 0;
+}
+
+static int kvmgt_write_protect_remove(unsigned long handle, u64 gfn)
+{
+ struct kvmgt_guest_info *info = (struct kvmgt_guest_info *)handle;
+ struct kvm *kvm = info->kvm;
+ struct kvm_memory_slot *slot;
+ int idx;
+
+ idx = srcu_read_lock(&kvm->srcu);
+ slot = gfn_to_memslot(kvm, gfn);
+
+ spin_lock(&kvm->mmu_lock);
+
+ if (!kvmgt_gfn_is_write_protected(info, gfn))
+ goto out;
+
+ kvm_slot_page_track_remove_page(kvm, slot, gfn, KVM_PAGE_TRACK_WRITE);
+ kvmgt_protect_table_del(info, gfn);
+
+out:
+ spin_unlock(&kvm->mmu_lock);
+ srcu_read_unlock(&kvm->srcu, idx);
+ return 0;
+}
+
+static void kvmgt_page_track_write(struct kvm_vcpu *vcpu, gpa_t gpa,
+ const u8 *val, int len,
+ struct kvm_page_track_notifier_node *node)
+{
+ struct kvmgt_guest_info *info = container_of(node,
+ struct kvmgt_guest_info, track_node);
+
+ if (kvmgt_gfn_is_write_protected(info, gpa_to_gfn(gpa)))
+ intel_gvt_ops->emulate_mmio_write(info->vgpu, gpa,
+ (void *)val, len);
+}
+
+static void kvmgt_page_track_flush_slot(struct kvm *kvm,
+ struct kvm_memory_slot *slot,
+ struct kvm_page_track_notifier_node *node)
+{
+ int i;
+ gfn_t gfn;
+ struct kvmgt_guest_info *info = container_of(node,
+ struct kvmgt_guest_info, track_node);
+
+ spin_lock(&kvm->mmu_lock);
+ for (i = 0; i < slot->npages; i++) {
+ gfn = slot->base_gfn + i;
+ if (kvmgt_gfn_is_write_protected(info, gfn)) {
+ kvm_slot_page_track_remove_page(kvm, slot, gfn,
+ KVM_PAGE_TRACK_WRITE);
+ kvmgt_protect_table_del(info, gfn);
+ }
+ }
+ spin_unlock(&kvm->mmu_lock);
+}
+
+static bool kvmgt_check_guest(void)
+{
+ unsigned int eax, ebx, ecx, edx;
+ char s[12];
+ unsigned int *i;
+
+ eax = KVM_CPUID_SIGNATURE;
+ ebx = ecx = edx = 0;
+
+ asm volatile ("cpuid"
+ : "+a"(eax), "=b"(ebx), "=c"(ecx), "=d"(edx)
+ :
+ : "cc", "memory");
+ i = (unsigned int *)s;
+ i[0] = ebx;
+ i[1] = ecx;
+ i[2] = edx;
+
+ return !strncmp(s, "KVMKVMKVM", strlen("KVMKVMKVM"));
+}
+
+/**
+ * NOTE:
+ * It's actually impossible to check if we are running in KVM host,
+ * since the "KVM host" is simply native. So we only dectect guest here.
+ */
+static int kvmgt_detect_host(void)
+{
+#ifdef CONFIG_INTEL_IOMMU
+ if (intel_iommu_gfx_mapped) {
+ gvt_err("Hardware IOMMU compatibility not yet supported, try to boot with intel_iommu=igfx_off\n");
+ return -ENODEV;
+ }
+#endif
+ return kvmgt_check_guest() ? -ENODEV : 0;
+}
+
+static int kvmgt_attach_vgpu(void *vgpu, unsigned long *handle)
+{
+ /* nothing to do here */
+ return 0;
+}
+
+static void kvmgt_detach_vgpu(unsigned long handle)
+{
+ /* nothing to do here */
+}
+
+static int kvmgt_inject_msi(unsigned long handle, u32 addr, u16 data)
+{
+ struct kvmgt_guest_info *info = (struct kvmgt_guest_info *)handle;
+ struct intel_vgpu *vgpu = info->vgpu;
+
+ if (vgpu->vdev.msi_trigger)
+ return eventfd_signal(vgpu->vdev.msi_trigger, 1) == 1;
+
+ return false;
+}
+
+static unsigned long kvmgt_gfn_to_pfn(unsigned long handle, unsigned long gfn)
+{
+ unsigned long pfn;
+ struct kvmgt_guest_info *info = (struct kvmgt_guest_info *)handle;
+ int rc;
+
+ pfn = gvt_cache_find(info->vgpu, gfn);
+ if (pfn != 0)
+ return pfn;
+
+ rc = kvmgt_pin_pages(info->vgpu->vdev.mdev, &gfn, 1,
+ IOMMU_READ | IOMMU_WRITE, &pfn);
+ if (rc != 1) {
+ gvt_err("vfio_pin_pages failed for gfn: 0x%lx\n", gfn);
+ return 0;
+ }
+
+ gvt_cache_add(info->vgpu, gfn, pfn);
+ return pfn;
+}
+
+static void *kvmgt_gpa_to_hva(unsigned long handle, unsigned long gpa)
+{
+ unsigned long pfn;
+ gfn_t gfn = gpa_to_gfn(gpa);
+
+ pfn = kvmgt_gfn_to_pfn(handle, gfn);
+ if (!pfn)
+ return NULL;
+
+ return (char *)pfn_to_kaddr(pfn) + offset_in_page(gpa);
+}
+
+static int kvmgt_rw_gpa(unsigned long handle, unsigned long gpa,
+ void *buf, unsigned long len, bool write)
+{
+ void *hva = NULL;
+
+ hva = kvmgt_gpa_to_hva(handle, gpa);
+ if (!hva)
+ return -EFAULT;
+
+ if (write)
+ memcpy(hva, buf, len);
+ else
+ memcpy(buf, hva, len);
+
+ return 0;
+}
+
+static int kvmgt_read_gpa(unsigned long handle, unsigned long gpa,
+ void *buf, unsigned long len)
+{
+ return kvmgt_rw_gpa(handle, gpa, buf, len, false);
+}
+
+static int kvmgt_write_gpa(unsigned long handle, unsigned long gpa,
+ void *buf, unsigned long len)
+{
+ return kvmgt_rw_gpa(handle, gpa, buf, len, true);
+}
+
+static unsigned long kvmgt_virt_to_pfn(void *addr)
+{
+ return PFN_DOWN(__pa(addr));
+}
+
+struct intel_gvt_mpt kvmgt_mpt = {
+ .detect_host = kvmgt_detect_host,
+ .host_init = kvmgt_host_init,
+ .host_exit = kvmgt_host_exit,
+ .attach_vgpu = kvmgt_attach_vgpu,
+ .detach_vgpu = kvmgt_detach_vgpu,
+ .inject_msi = kvmgt_inject_msi,
+ .from_virt_to_mfn = kvmgt_virt_to_pfn,
+ .set_wp_page = kvmgt_write_protect_add,
+ .unset_wp_page = kvmgt_write_protect_remove,
+ .read_gpa = kvmgt_read_gpa,
+ .write_gpa = kvmgt_write_gpa,
+ .gfn_to_mfn = kvmgt_gfn_to_pfn,
+};
+EXPORT_SYMBOL_GPL(kvmgt_mpt);
+
+static int __init kvmgt_init(void)
+{
+ return 0;
+}
+
+static void __exit kvmgt_exit(void)
+{
+}
+
+module_init(kvmgt_init);
+module_exit(kvmgt_exit);
+
+MODULE_LICENSE("GPL and additional rights");
+MODULE_AUTHOR("Intel Corporation");
diff --git a/drivers/gpu/drm/i915/gvt/mmio.c b/drivers/gpu/drm/i915/gvt/mmio.c
new file mode 100644
index 000000000000..09c9450a1946
--- /dev/null
+++ b/drivers/gpu/drm/i915/gvt/mmio.c
@@ -0,0 +1,304 @@
+/*
+ * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * Authors:
+ * Ke Yu
+ * Kevin Tian <kevin.tian@intel.com>
+ * Dexuan Cui
+ *
+ * Contributors:
+ * Tina Zhang <tina.zhang@intel.com>
+ * Min He <min.he@intel.com>
+ * Niu Bing <bing.niu@intel.com>
+ * Zhi Wang <zhi.a.wang@intel.com>
+ *
+ */
+
+#include "i915_drv.h"
+#include "gvt.h"
+
+/**
+ * intel_vgpu_gpa_to_mmio_offset - translate a GPA to MMIO offset
+ * @vgpu: a vGPU
+ *
+ * Returns:
+ * Zero on success, negative error code if failed
+ */
+int intel_vgpu_gpa_to_mmio_offset(struct intel_vgpu *vgpu, u64 gpa)
+{
+ u64 gttmmio_gpa = *(u64 *)(vgpu_cfg_space(vgpu) + PCI_BASE_ADDRESS_0) &
+ ~GENMASK(3, 0);
+ return gpa - gttmmio_gpa;
+}
+
+#define reg_is_mmio(gvt, reg) \
+ (reg >= 0 && reg < gvt->device_info.mmio_size)
+
+#define reg_is_gtt(gvt, reg) \
+ (reg >= gvt->device_info.gtt_start_offset \
+ && reg < gvt->device_info.gtt_start_offset + gvt_ggtt_sz(gvt))
+
+/**
+ * intel_vgpu_emulate_mmio_read - emulate MMIO read
+ * @vgpu: a vGPU
+ * @pa: guest physical address
+ * @p_data: data return buffer
+ * @bytes: access data length
+ *
+ * Returns:
+ * Zero on success, negative error code if failed
+ */
+int intel_vgpu_emulate_mmio_read(struct intel_vgpu *vgpu, uint64_t pa,
+ void *p_data, unsigned int bytes)
+{
+ struct intel_gvt *gvt = vgpu->gvt;
+ struct intel_gvt_mmio_info *mmio;
+ unsigned int offset = 0;
+ int ret = -EINVAL;
+
+ mutex_lock(&gvt->lock);
+
+ if (atomic_read(&vgpu->gtt.n_write_protected_guest_page)) {
+ struct intel_vgpu_guest_page *gp;
+
+ gp = intel_vgpu_find_guest_page(vgpu, pa >> PAGE_SHIFT);
+ if (gp) {
+ ret = intel_gvt_hypervisor_read_gpa(vgpu, pa,
+ p_data, bytes);
+ if (ret) {
+ gvt_err("vgpu%d: guest page read error %d, "
+ "gfn 0x%lx, pa 0x%llx, var 0x%x, len %d\n",
+ vgpu->id, ret,
+ gp->gfn, pa, *(u32 *)p_data, bytes);
+ }
+ mutex_unlock(&gvt->lock);
+ return ret;
+ }
+ }
+
+ offset = intel_vgpu_gpa_to_mmio_offset(vgpu, pa);
+
+ if (WARN_ON(bytes > 8))
+ goto err;
+
+ if (reg_is_gtt(gvt, offset)) {
+ if (WARN_ON(!IS_ALIGNED(offset, 4) && !IS_ALIGNED(offset, 8)))
+ goto err;
+ if (WARN_ON(bytes != 4 && bytes != 8))
+ goto err;
+ if (WARN_ON(!reg_is_gtt(gvt, offset + bytes - 1)))
+ goto err;
+
+ ret = intel_vgpu_emulate_gtt_mmio_read(vgpu, offset,
+ p_data, bytes);
+ if (ret)
+ goto err;
+ mutex_unlock(&gvt->lock);
+ return ret;
+ }
+
+ if (WARN_ON_ONCE(!reg_is_mmio(gvt, offset))) {
+ ret = intel_gvt_hypervisor_read_gpa(vgpu, pa, p_data, bytes);
+ mutex_unlock(&gvt->lock);
+ return ret;
+ }
+
+ if (WARN_ON(!reg_is_mmio(gvt, offset + bytes - 1)))
+ goto err;
+
+ mmio = intel_gvt_find_mmio_info(gvt, rounddown(offset, 4));
+ if (!mmio && !vgpu->mmio.disable_warn_untrack) {
+ gvt_err("vgpu%d: read untracked MMIO %x len %d val %x\n",
+ vgpu->id, offset, bytes, *(u32 *)p_data);
+
+ if (offset == 0x206c) {
+ gvt_err("------------------------------------------\n");
+ gvt_err("vgpu%d: likely triggers a gfx reset\n",
+ vgpu->id);
+ gvt_err("------------------------------------------\n");
+ vgpu->mmio.disable_warn_untrack = true;
+ }
+ }
+
+ if (!intel_gvt_mmio_is_unalign(gvt, offset)) {
+ if (WARN_ON(!IS_ALIGNED(offset, bytes)))
+ goto err;
+ }
+
+ if (mmio) {
+ if (!intel_gvt_mmio_is_unalign(gvt, mmio->offset)) {
+ if (WARN_ON(offset + bytes > mmio->offset + mmio->size))
+ goto err;
+ if (WARN_ON(mmio->offset != offset))
+ goto err;
+ }
+ ret = mmio->read(vgpu, offset, p_data, bytes);
+ } else
+ ret = intel_vgpu_default_mmio_read(vgpu, offset, p_data, bytes);
+
+ if (ret)
+ goto err;
+
+ intel_gvt_mmio_set_accessed(gvt, offset);
+ mutex_unlock(&gvt->lock);
+ return 0;
+err:
+ gvt_err("vgpu%d: fail to emulate MMIO read %08x len %d\n",
+ vgpu->id, offset, bytes);
+ mutex_unlock(&gvt->lock);
+ return ret;
+}
+
+/**
+ * intel_vgpu_emulate_mmio_write - emulate MMIO write
+ * @vgpu: a vGPU
+ * @pa: guest physical address
+ * @p_data: write data buffer
+ * @bytes: access data length
+ *
+ * Returns:
+ * Zero on success, negative error code if failed
+ */
+int intel_vgpu_emulate_mmio_write(struct intel_vgpu *vgpu, uint64_t pa,
+ void *p_data, unsigned int bytes)
+{
+ struct intel_gvt *gvt = vgpu->gvt;
+ struct intel_gvt_mmio_info *mmio;
+ unsigned int offset = 0;
+ u32 old_vreg = 0, old_sreg = 0;
+ int ret = -EINVAL;
+
+ mutex_lock(&gvt->lock);
+
+ if (atomic_read(&vgpu->gtt.n_write_protected_guest_page)) {
+ struct intel_vgpu_guest_page *gp;
+
+ gp = intel_vgpu_find_guest_page(vgpu, pa >> PAGE_SHIFT);
+ if (gp) {
+ ret = gp->handler(gp, pa, p_data, bytes);
+ if (ret) {
+ gvt_err("vgpu%d: guest page write error %d, "
+ "gfn 0x%lx, pa 0x%llx, var 0x%x, len %d\n",
+ vgpu->id, ret,
+ gp->gfn, pa, *(u32 *)p_data, bytes);
+ }
+ mutex_unlock(&gvt->lock);
+ return ret;
+ }
+ }
+
+ offset = intel_vgpu_gpa_to_mmio_offset(vgpu, pa);
+
+ if (WARN_ON(bytes > 8))
+ goto err;
+
+ if (reg_is_gtt(gvt, offset)) {
+ if (WARN_ON(!IS_ALIGNED(offset, 4) && !IS_ALIGNED(offset, 8)))
+ goto err;
+ if (WARN_ON(bytes != 4 && bytes != 8))
+ goto err;
+ if (WARN_ON(!reg_is_gtt(gvt, offset + bytes - 1)))
+ goto err;
+
+ ret = intel_vgpu_emulate_gtt_mmio_write(vgpu, offset,
+ p_data, bytes);
+ if (ret)
+ goto err;
+ mutex_unlock(&gvt->lock);
+ return ret;
+ }
+
+ if (WARN_ON_ONCE(!reg_is_mmio(gvt, offset))) {
+ ret = intel_gvt_hypervisor_write_gpa(vgpu, pa, p_data, bytes);
+ mutex_unlock(&gvt->lock);
+ return ret;
+ }
+
+ mmio = intel_gvt_find_mmio_info(gvt, rounddown(offset, 4));
+ if (!mmio && !vgpu->mmio.disable_warn_untrack)
+ gvt_err("vgpu%d: write untracked MMIO %x len %d val %x\n",
+ vgpu->id, offset, bytes, *(u32 *)p_data);
+
+ if (!intel_gvt_mmio_is_unalign(gvt, offset)) {
+ if (WARN_ON(!IS_ALIGNED(offset, bytes)))
+ goto err;
+ }
+
+ if (mmio) {
+ u64 ro_mask = mmio->ro_mask;
+
+ if (!intel_gvt_mmio_is_unalign(gvt, mmio->offset)) {
+ if (WARN_ON(offset + bytes > mmio->offset + mmio->size))
+ goto err;
+ if (WARN_ON(mmio->offset != offset))
+ goto err;
+ }
+
+ if (intel_gvt_mmio_has_mode_mask(gvt, mmio->offset)) {
+ old_vreg = vgpu_vreg(vgpu, offset);
+ old_sreg = vgpu_sreg(vgpu, offset);
+ }
+
+ if (!ro_mask) {
+ ret = mmio->write(vgpu, offset, p_data, bytes);
+ } else {
+ /* Protect RO bits like HW */
+ u64 data = 0;
+
+ /* all register bits are RO. */
+ if (ro_mask == ~(u64)0) {
+ gvt_err("vgpu%d: try to write RO reg %x\n",
+ vgpu->id, offset);
+ ret = 0;
+ goto out;
+ }
+ /* keep the RO bits in the virtual register */
+ memcpy(&data, p_data, bytes);
+ data &= ~mmio->ro_mask;
+ data |= vgpu_vreg(vgpu, offset) & mmio->ro_mask;
+ ret = mmio->write(vgpu, offset, &data, bytes);
+ }
+
+ /* higher 16bits of mode ctl regs are mask bits for change */
+ if (intel_gvt_mmio_has_mode_mask(gvt, mmio->offset)) {
+ u32 mask = vgpu_vreg(vgpu, offset) >> 16;
+
+ vgpu_vreg(vgpu, offset) = (old_vreg & ~mask)
+ | (vgpu_vreg(vgpu, offset) & mask);
+ vgpu_sreg(vgpu, offset) = (old_sreg & ~mask)
+ | (vgpu_sreg(vgpu, offset) & mask);
+ }
+ } else
+ ret = intel_vgpu_default_mmio_write(vgpu, offset, p_data,
+ bytes);
+ if (ret)
+ goto err;
+out:
+ intel_gvt_mmio_set_accessed(gvt, offset);
+ mutex_unlock(&gvt->lock);
+ return 0;
+err:
+ gvt_err("vgpu%d: fail to emulate MMIO write %08x len %d\n",
+ vgpu->id, offset, bytes);
+ mutex_unlock(&gvt->lock);
+ return ret;
+}
diff --git a/drivers/gpu/drm/i915/gvt/mmio.h b/drivers/gpu/drm/i915/gvt/mmio.h
new file mode 100644
index 000000000000..87d5b5e366a3
--- /dev/null
+++ b/drivers/gpu/drm/i915/gvt/mmio.h
@@ -0,0 +1,106 @@
+/*
+ * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * Authors:
+ * Ke Yu
+ * Kevin Tian <kevin.tian@intel.com>
+ * Dexuan Cui
+ *
+ * Contributors:
+ * Tina Zhang <tina.zhang@intel.com>
+ * Min He <min.he@intel.com>
+ * Niu Bing <bing.niu@intel.com>
+ * Zhi Wang <zhi.a.wang@intel.com>
+ *
+ */
+
+#ifndef _GVT_MMIO_H_
+#define _GVT_MMIO_H_
+
+struct intel_gvt;
+struct intel_vgpu;
+
+#define D_SNB (1 << 0)
+#define D_IVB (1 << 1)
+#define D_HSW (1 << 2)
+#define D_BDW (1 << 3)
+#define D_SKL (1 << 4)
+
+#define D_GEN9PLUS (D_SKL)
+#define D_GEN8PLUS (D_BDW | D_SKL)
+#define D_GEN75PLUS (D_HSW | D_BDW | D_SKL)
+#define D_GEN7PLUS (D_IVB | D_HSW | D_BDW | D_SKL)
+
+#define D_SKL_PLUS (D_SKL)
+#define D_BDW_PLUS (D_BDW | D_SKL)
+#define D_HSW_PLUS (D_HSW | D_BDW | D_SKL)
+#define D_IVB_PLUS (D_IVB | D_HSW | D_BDW | D_SKL)
+
+#define D_PRE_BDW (D_SNB | D_IVB | D_HSW)
+#define D_PRE_SKL (D_SNB | D_IVB | D_HSW | D_BDW)
+#define D_ALL (D_SNB | D_IVB | D_HSW | D_BDW | D_SKL)
+
+struct intel_gvt_mmio_info {
+ u32 offset;
+ u32 size;
+ u32 length;
+ u32 addr_mask;
+ u64 ro_mask;
+ u32 device;
+ int (*read)(struct intel_vgpu *, unsigned int, void *, unsigned int);
+ int (*write)(struct intel_vgpu *, unsigned int, void *, unsigned int);
+ u32 addr_range;
+ struct hlist_node node;
+};
+
+unsigned long intel_gvt_get_device_type(struct intel_gvt *gvt);
+bool intel_gvt_match_device(struct intel_gvt *gvt, unsigned long device);
+
+int intel_gvt_setup_mmio_info(struct intel_gvt *gvt);
+void intel_gvt_clean_mmio_info(struct intel_gvt *gvt);
+
+struct intel_gvt_mmio_info *intel_gvt_find_mmio_info(struct intel_gvt *gvt,
+ unsigned int offset);
+#define INTEL_GVT_MMIO_OFFSET(reg) ({ \
+ typeof(reg) __reg = reg; \
+ u32 *offset = (u32 *)&__reg; \
+ *offset; \
+})
+
+int intel_vgpu_gpa_to_mmio_offset(struct intel_vgpu *vgpu, u64 gpa);
+
+int intel_vgpu_emulate_mmio_read(struct intel_vgpu *vgpu, u64 pa,
+ void *p_data, unsigned int bytes);
+int intel_vgpu_emulate_mmio_write(struct intel_vgpu *vgpu, u64 pa,
+ void *p_data, unsigned int bytes);
+bool intel_gvt_mmio_is_cmd_access(struct intel_gvt *gvt,
+ unsigned int offset);
+bool intel_gvt_mmio_is_unalign(struct intel_gvt *gvt, unsigned int offset);
+void intel_gvt_mmio_set_accessed(struct intel_gvt *gvt, unsigned int offset);
+void intel_gvt_mmio_set_cmd_accessed(struct intel_gvt *gvt,
+ unsigned int offset);
+bool intel_gvt_mmio_has_mode_mask(struct intel_gvt *gvt, unsigned int offset);
+int intel_vgpu_default_mmio_read(struct intel_vgpu *vgpu, unsigned int offset,
+ void *p_data, unsigned int bytes);
+int intel_vgpu_default_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
+ void *p_data, unsigned int bytes);
+#endif
diff --git a/drivers/gpu/drm/i915/gvt/mpt.h b/drivers/gpu/drm/i915/gvt/mpt.h
index 03601e3ffa7c..1af5830c0a56 100644
--- a/drivers/gpu/drm/i915/gvt/mpt.h
+++ b/drivers/gpu/drm/i915/gvt/mpt.h
@@ -19,6 +19,15 @@
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
+ *
+ * Authors:
+ * Eddie Dong <eddie.dong@intel.com>
+ * Dexuan Cui
+ * Jike Song <jike.song@intel.com>
+ *
+ * Contributors:
+ * Zhi Wang <zhi.a.wang@intel.com>
+ *
*/
#ifndef _GVT_MPT_H_
@@ -46,4 +55,254 @@ static inline int intel_gvt_hypervisor_detect_host(void)
return intel_gvt_host.mpt->detect_host();
}
+/**
+ * intel_gvt_hypervisor_host_init - init GVT-g host side
+ *
+ * Returns:
+ * Zero on success, negative error code if failed
+ */
+static inline int intel_gvt_hypervisor_host_init(struct device *dev,
+ void *gvt, const void *ops)
+{
+ /* optional to provide */
+ if (!intel_gvt_host.mpt->host_init)
+ return 0;
+
+ return intel_gvt_host.mpt->host_init(dev, gvt, ops);
+}
+
+/**
+ * intel_gvt_hypervisor_host_exit - exit GVT-g host side
+ */
+static inline void intel_gvt_hypervisor_host_exit(struct device *dev,
+ void *gvt)
+{
+ /* optional to provide */
+ if (!intel_gvt_host.mpt->host_exit)
+ return;
+
+ intel_gvt_host.mpt->host_exit(dev, gvt);
+}
+
+/**
+ * intel_gvt_hypervisor_attach_vgpu - call hypervisor to initialize vGPU
+ * related stuffs inside hypervisor.
+ *
+ * Returns:
+ * Zero on success, negative error code if failed.
+ */
+static inline int intel_gvt_hypervisor_attach_vgpu(struct intel_vgpu *vgpu)
+{
+ /* optional to provide */
+ if (!intel_gvt_host.mpt->attach_vgpu)
+ return 0;
+
+ return intel_gvt_host.mpt->attach_vgpu(vgpu, &vgpu->handle);
+}
+
+/**
+ * intel_gvt_hypervisor_detach_vgpu - call hypervisor to release vGPU
+ * related stuffs inside hypervisor.
+ *
+ * Returns:
+ * Zero on success, negative error code if failed.
+ */
+static inline void intel_gvt_hypervisor_detach_vgpu(struct intel_vgpu *vgpu)
+{
+ /* optional to provide */
+ if (!intel_gvt_host.mpt->detach_vgpu)
+ return;
+
+ intel_gvt_host.mpt->detach_vgpu(vgpu->handle);
+}
+
+#define MSI_CAP_CONTROL(offset) (offset + 2)
+#define MSI_CAP_ADDRESS(offset) (offset + 4)
+#define MSI_CAP_DATA(offset) (offset + 8)
+#define MSI_CAP_EN 0x1
+
+/**
+ * intel_gvt_hypervisor_inject_msi - inject a MSI interrupt into vGPU
+ *
+ * Returns:
+ * Zero on success, negative error code if failed.
+ */
+static inline int intel_gvt_hypervisor_inject_msi(struct intel_vgpu *vgpu)
+{
+ unsigned long offset = vgpu->gvt->device_info.msi_cap_offset;
+ u16 control, data;
+ u32 addr;
+ int ret;
+
+ control = *(u16 *)(vgpu_cfg_space(vgpu) + MSI_CAP_CONTROL(offset));
+ addr = *(u32 *)(vgpu_cfg_space(vgpu) + MSI_CAP_ADDRESS(offset));
+ data = *(u16 *)(vgpu_cfg_space(vgpu) + MSI_CAP_DATA(offset));
+
+ /* Do not generate MSI if MSIEN is disable */
+ if (!(control & MSI_CAP_EN))
+ return 0;
+
+ if (WARN(control & GENMASK(15, 1), "only support one MSI format\n"))
+ return -EINVAL;
+
+ gvt_dbg_irq("vgpu%d: inject msi address %x data%x\n", vgpu->id, addr,
+ data);
+
+ ret = intel_gvt_host.mpt->inject_msi(vgpu->handle, addr, data);
+ if (ret)
+ return ret;
+ return 0;
+}
+
+/**
+ * intel_gvt_hypervisor_set_wp_page - translate a host VA into MFN
+ * @p: host kernel virtual address
+ *
+ * Returns:
+ * MFN on success, INTEL_GVT_INVALID_ADDR if failed.
+ */
+static inline unsigned long intel_gvt_hypervisor_virt_to_mfn(void *p)
+{
+ return intel_gvt_host.mpt->from_virt_to_mfn(p);
+}
+
+/**
+ * intel_gvt_hypervisor_set_wp_page - set a guest page to write-protected
+ * @vgpu: a vGPU
+ * @p: intel_vgpu_guest_page
+ *
+ * Returns:
+ * Zero on success, negative error code if failed.
+ */
+static inline int intel_gvt_hypervisor_set_wp_page(struct intel_vgpu *vgpu,
+ struct intel_vgpu_guest_page *p)
+{
+ int ret;
+
+ if (p->writeprotection)
+ return 0;
+
+ ret = intel_gvt_host.mpt->set_wp_page(vgpu->handle, p->gfn);
+ if (ret)
+ return ret;
+ p->writeprotection = true;
+ atomic_inc(&vgpu->gtt.n_write_protected_guest_page);
+ return 0;
+}
+
+/**
+ * intel_gvt_hypervisor_unset_wp_page - remove the write-protection of a
+ * guest page
+ * @vgpu: a vGPU
+ * @p: intel_vgpu_guest_page
+ *
+ * Returns:
+ * Zero on success, negative error code if failed.
+ */
+static inline int intel_gvt_hypervisor_unset_wp_page(struct intel_vgpu *vgpu,
+ struct intel_vgpu_guest_page *p)
+{
+ int ret;
+
+ if (!p->writeprotection)
+ return 0;
+
+ ret = intel_gvt_host.mpt->unset_wp_page(vgpu->handle, p->gfn);
+ if (ret)
+ return ret;
+ p->writeprotection = false;
+ atomic_dec(&vgpu->gtt.n_write_protected_guest_page);
+ return 0;
+}
+
+/**
+ * intel_gvt_hypervisor_read_gpa - copy data from GPA to host data buffer
+ * @vgpu: a vGPU
+ * @gpa: guest physical address
+ * @buf: host data buffer
+ * @len: data length
+ *
+ * Returns:
+ * Zero on success, negative error code if failed.
+ */
+static inline int intel_gvt_hypervisor_read_gpa(struct intel_vgpu *vgpu,
+ unsigned long gpa, void *buf, unsigned long len)
+{
+ return intel_gvt_host.mpt->read_gpa(vgpu->handle, gpa, buf, len);
+}
+
+/**
+ * intel_gvt_hypervisor_write_gpa - copy data from host data buffer to GPA
+ * @vgpu: a vGPU
+ * @gpa: guest physical address
+ * @buf: host data buffer
+ * @len: data length
+ *
+ * Returns:
+ * Zero on success, negative error code if failed.
+ */
+static inline int intel_gvt_hypervisor_write_gpa(struct intel_vgpu *vgpu,
+ unsigned long gpa, void *buf, unsigned long len)
+{
+ return intel_gvt_host.mpt->write_gpa(vgpu->handle, gpa, buf, len);
+}
+
+/**
+ * intel_gvt_hypervisor_gfn_to_mfn - translate a GFN to MFN
+ * @vgpu: a vGPU
+ * @gpfn: guest pfn
+ *
+ * Returns:
+ * MFN on success, INTEL_GVT_INVALID_ADDR if failed.
+ */
+static inline unsigned long intel_gvt_hypervisor_gfn_to_mfn(
+ struct intel_vgpu *vgpu, unsigned long gfn)
+{
+ return intel_gvt_host.mpt->gfn_to_mfn(vgpu->handle, gfn);
+}
+
+/**
+ * intel_gvt_hypervisor_map_gfn_to_mfn - map a GFN region to MFN
+ * @vgpu: a vGPU
+ * @gfn: guest PFN
+ * @mfn: host PFN
+ * @nr: amount of PFNs
+ * @map: map or unmap
+ *
+ * Returns:
+ * Zero on success, negative error code if failed.
+ */
+static inline int intel_gvt_hypervisor_map_gfn_to_mfn(
+ struct intel_vgpu *vgpu, unsigned long gfn,
+ unsigned long mfn, unsigned int nr,
+ bool map)
+{
+ /* a MPT implementation could have MMIO mapped elsewhere */
+ if (!intel_gvt_host.mpt->map_gfn_to_mfn)
+ return 0;
+
+ return intel_gvt_host.mpt->map_gfn_to_mfn(vgpu->handle, gfn, mfn, nr,
+ map);
+}
+
+/**
+ * intel_gvt_hypervisor_set_trap_area - Trap a guest PA region
+ * @vgpu: a vGPU
+ * @start: the beginning of the guest physical address region
+ * @end: the end of the guest physical address region
+ * @map: map or unmap
+ *
+ * Returns:
+ * Zero on success, negative error code if failed.
+ */
+static inline int intel_gvt_hypervisor_set_trap_area(
+ struct intel_vgpu *vgpu, u64 start, u64 end, bool map)
+{
+ /* a MPT implementation could have MMIO trapped elsewhere */
+ if (!intel_gvt_host.mpt->set_trap_area)
+ return 0;
+
+ return intel_gvt_host.mpt->set_trap_area(vgpu->handle, start, end, map);
+}
+
#endif /* _GVT_MPT_H_ */
diff --git a/drivers/gpu/drm/i915/gvt/opregion.c b/drivers/gpu/drm/i915/gvt/opregion.c
new file mode 100644
index 000000000000..d2a0fbc896c3
--- /dev/null
+++ b/drivers/gpu/drm/i915/gvt/opregion.c
@@ -0,0 +1,320 @@
+/*
+ * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/acpi.h>
+#include "i915_drv.h"
+#include "gvt.h"
+
+static int init_vgpu_opregion(struct intel_vgpu *vgpu, u32 gpa)
+{
+ void __iomem *host_va = vgpu->gvt->opregion.opregion_va;
+ u8 *buf;
+ int i;
+
+ if (WARN((vgpu_opregion(vgpu)->va),
+ "vgpu%d: opregion has been initialized already.\n",
+ vgpu->id))
+ return -EINVAL;
+
+ vgpu_opregion(vgpu)->va = (void *)__get_free_pages(GFP_ATOMIC |
+ GFP_DMA32 | __GFP_ZERO,
+ INTEL_GVT_OPREGION_PORDER);
+
+ if (!vgpu_opregion(vgpu)->va)
+ return -ENOMEM;
+
+ memcpy_fromio(vgpu_opregion(vgpu)->va, host_va,
+ INTEL_GVT_OPREGION_SIZE);
+
+ for (i = 0; i < INTEL_GVT_OPREGION_PAGES; i++)
+ vgpu_opregion(vgpu)->gfn[i] = (gpa >> PAGE_SHIFT) + i;
+
+ /* for unknown reason, the value in LID field is incorrect
+ * which block the windows guest, so workaround it by force
+ * setting it to "OPEN"
+ */
+ buf = (u8 *)vgpu_opregion(vgpu)->va;
+ buf[INTEL_GVT_OPREGION_CLID] = 0x3;
+
+ return 0;
+}
+
+static int map_vgpu_opregion(struct intel_vgpu *vgpu, bool map)
+{
+ u64 mfn;
+ int i, ret;
+
+ for (i = 0; i < INTEL_GVT_OPREGION_PAGES; i++) {
+ mfn = intel_gvt_hypervisor_virt_to_mfn(vgpu_opregion(vgpu)
+ + i * PAGE_SIZE);
+ if (mfn == INTEL_GVT_INVALID_ADDR) {
+ gvt_err("fail to get MFN from VA\n");
+ return -EINVAL;
+ }
+ ret = intel_gvt_hypervisor_map_gfn_to_mfn(vgpu,
+ vgpu_opregion(vgpu)->gfn[i],
+ mfn, 1, map);
+ if (ret) {
+ gvt_err("fail to map GFN to MFN, errno: %d\n", ret);
+ return ret;
+ }
+ }
+ return 0;
+}
+
+/**
+ * intel_vgpu_clean_opregion - clean the stuff used to emulate opregion
+ * @vgpu: a vGPU
+ *
+ */
+void intel_vgpu_clean_opregion(struct intel_vgpu *vgpu)
+{
+ gvt_dbg_core("vgpu%d: clean vgpu opregion\n", vgpu->id);
+
+ if (!vgpu_opregion(vgpu)->va)
+ return;
+
+ if (intel_gvt_host.hypervisor_type == INTEL_GVT_HYPERVISOR_XEN) {
+ map_vgpu_opregion(vgpu, false);
+ free_pages((unsigned long)vgpu_opregion(vgpu)->va,
+ INTEL_GVT_OPREGION_PORDER);
+
+ vgpu_opregion(vgpu)->va = NULL;
+ }
+}
+
+/**
+ * intel_vgpu_init_opregion - initialize the stuff used to emulate opregion
+ * @vgpu: a vGPU
+ * @gpa: guest physical address of opregion
+ *
+ * Returns:
+ * Zero on success, negative error code if failed.
+ */
+int intel_vgpu_init_opregion(struct intel_vgpu *vgpu, u32 gpa)
+{
+ int ret;
+
+ gvt_dbg_core("vgpu%d: init vgpu opregion\n", vgpu->id);
+
+ if (intel_gvt_host.hypervisor_type == INTEL_GVT_HYPERVISOR_XEN) {
+ gvt_dbg_core("emulate opregion from kernel\n");
+
+ ret = init_vgpu_opregion(vgpu, gpa);
+ if (ret)
+ return ret;
+
+ ret = map_vgpu_opregion(vgpu, true);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+/**
+ * intel_gvt_clean_opregion - clean host opergion related stuffs
+ * @gvt: a GVT device
+ *
+ */
+void intel_gvt_clean_opregion(struct intel_gvt *gvt)
+{
+ memunmap(gvt->opregion.opregion_va);
+ gvt->opregion.opregion_va = NULL;
+}
+
+/**
+ * intel_gvt_init_opregion - initialize host opergion related stuffs
+ * @gvt: a GVT device
+ *
+ * Returns:
+ * Zero on success, negative error code if failed.
+ */
+int intel_gvt_init_opregion(struct intel_gvt *gvt)
+{
+ gvt_dbg_core("init host opregion\n");
+
+ pci_read_config_dword(gvt->dev_priv->drm.pdev, INTEL_GVT_PCI_OPREGION,
+ &gvt->opregion.opregion_pa);
+
+ gvt->opregion.opregion_va = memremap(gvt->opregion.opregion_pa,
+ INTEL_GVT_OPREGION_SIZE, MEMREMAP_WB);
+ if (!gvt->opregion.opregion_va) {
+ gvt_err("fail to map host opregion\n");
+ return -EFAULT;
+ }
+ return 0;
+}
+
+#define GVT_OPREGION_FUNC(scic) \
+ ({ \
+ u32 __ret; \
+ __ret = (scic & OPREGION_SCIC_FUNC_MASK) >> \
+ OPREGION_SCIC_FUNC_SHIFT; \
+ __ret; \
+ })
+
+#define GVT_OPREGION_SUBFUNC(scic) \
+ ({ \
+ u32 __ret; \
+ __ret = (scic & OPREGION_SCIC_SUBFUNC_MASK) >> \
+ OPREGION_SCIC_SUBFUNC_SHIFT; \
+ __ret; \
+ })
+
+static const char *opregion_func_name(u32 func)
+{
+ const char *name = NULL;
+
+ switch (func) {
+ case 0 ... 3:
+ case 5:
+ case 7 ... 15:
+ name = "Reserved";
+ break;
+
+ case 4:
+ name = "Get BIOS Data";
+ break;
+
+ case 6:
+ name = "System BIOS Callbacks";
+ break;
+
+ default:
+ name = "Unknown";
+ break;
+ }
+ return name;
+}
+
+static const char *opregion_subfunc_name(u32 subfunc)
+{
+ const char *name = NULL;
+
+ switch (subfunc) {
+ case 0:
+ name = "Supported Calls";
+ break;
+
+ case 1:
+ name = "Requested Callbacks";
+ break;
+
+ case 2 ... 3:
+ case 8 ... 9:
+ name = "Reserved";
+ break;
+
+ case 5:
+ name = "Boot Display";
+ break;
+
+ case 6:
+ name = "TV-Standard/Video-Connector";
+ break;
+
+ case 7:
+ name = "Internal Graphics";
+ break;
+
+ case 10:
+ name = "Spread Spectrum Clocks";
+ break;
+
+ case 11:
+ name = "Get AKSV";
+ break;
+
+ default:
+ name = "Unknown";
+ break;
+ }
+ return name;
+};
+
+static bool querying_capabilities(u32 scic)
+{
+ u32 func, subfunc;
+
+ func = GVT_OPREGION_FUNC(scic);
+ subfunc = GVT_OPREGION_SUBFUNC(scic);
+
+ if ((func == INTEL_GVT_OPREGION_SCIC_F_GETBIOSDATA &&
+ subfunc == INTEL_GVT_OPREGION_SCIC_SF_SUPPRTEDCALLS)
+ || (func == INTEL_GVT_OPREGION_SCIC_F_GETBIOSDATA &&
+ subfunc == INTEL_GVT_OPREGION_SCIC_SF_REQEUSTEDCALLBACKS)
+ || (func == INTEL_GVT_OPREGION_SCIC_F_GETBIOSCALLBACKS &&
+ subfunc == INTEL_GVT_OPREGION_SCIC_SF_SUPPRTEDCALLS)) {
+ return true;
+ }
+ return false;
+}
+
+/**
+ * intel_vgpu_emulate_opregion_request - emulating OpRegion request
+ * @vgpu: a vGPU
+ * @swsci: SWSCI request
+ *
+ * Returns:
+ * Zero on success, negative error code if failed
+ */
+int intel_vgpu_emulate_opregion_request(struct intel_vgpu *vgpu, u32 swsci)
+{
+ u32 *scic, *parm;
+ u32 func, subfunc;
+
+ scic = vgpu_opregion(vgpu)->va + INTEL_GVT_OPREGION_SCIC;
+ parm = vgpu_opregion(vgpu)->va + INTEL_GVT_OPREGION_PARM;
+
+ if (!(swsci & SWSCI_SCI_SELECT)) {
+ gvt_err("vgpu%d: requesting SMI service\n", vgpu->id);
+ return 0;
+ }
+ /* ignore non 0->1 trasitions */
+ if ((vgpu_cfg_space(vgpu)[INTEL_GVT_PCI_SWSCI]
+ & SWSCI_SCI_TRIGGER) ||
+ !(swsci & SWSCI_SCI_TRIGGER)) {
+ return 0;
+ }
+
+ func = GVT_OPREGION_FUNC(*scic);
+ subfunc = GVT_OPREGION_SUBFUNC(*scic);
+ if (!querying_capabilities(*scic)) {
+ gvt_err("vgpu%d: requesting runtime service: func \"%s\","
+ " subfunc \"%s\"\n",
+ vgpu->id,
+ opregion_func_name(func),
+ opregion_subfunc_name(subfunc));
+ /*
+ * emulate exit status of function call, '0' means
+ * "failure, generic, unsupported or unknown cause"
+ */
+ *scic &= ~OPREGION_SCIC_EXIT_MASK;
+ return 0;
+ }
+
+ *scic = 0;
+ *parm = 0;
+ return 0;
+}
diff --git a/drivers/gpu/drm/i915/gvt/reg.h b/drivers/gpu/drm/i915/gvt/reg.h
new file mode 100644
index 000000000000..0dfe789d8f02
--- /dev/null
+++ b/drivers/gpu/drm/i915/gvt/reg.h
@@ -0,0 +1,80 @@
+/*
+ * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef _GVT_REG_H
+#define _GVT_REG_H
+
+#define INTEL_GVT_PCI_CLASS_VGA_OTHER 0x80
+
+#define INTEL_GVT_PCI_GMCH_CONTROL 0x50
+#define BDW_GMCH_GMS_SHIFT 8
+#define BDW_GMCH_GMS_MASK 0xff
+
+#define INTEL_GVT_PCI_SWSCI 0xe8
+#define SWSCI_SCI_SELECT (1 << 15)
+#define SWSCI_SCI_TRIGGER 1
+
+#define INTEL_GVT_PCI_OPREGION 0xfc
+
+#define INTEL_GVT_OPREGION_CLID 0x1AC
+#define INTEL_GVT_OPREGION_SCIC 0x200
+#define OPREGION_SCIC_FUNC_MASK 0x1E
+#define OPREGION_SCIC_FUNC_SHIFT 1
+#define OPREGION_SCIC_SUBFUNC_MASK 0xFF00
+#define OPREGION_SCIC_SUBFUNC_SHIFT 8
+#define OPREGION_SCIC_EXIT_MASK 0xE0
+#define INTEL_GVT_OPREGION_SCIC_F_GETBIOSDATA 4
+#define INTEL_GVT_OPREGION_SCIC_F_GETBIOSCALLBACKS 6
+#define INTEL_GVT_OPREGION_SCIC_SF_SUPPRTEDCALLS 0
+#define INTEL_GVT_OPREGION_SCIC_SF_REQEUSTEDCALLBACKS 1
+#define INTEL_GVT_OPREGION_PARM 0x204
+
+#define INTEL_GVT_OPREGION_PAGES 2
+#define INTEL_GVT_OPREGION_PORDER 1
+#define INTEL_GVT_OPREGION_SIZE (2 * 4096)
+
+#define VGT_SPRSTRIDE(pipe) _PIPE(pipe, _SPRA_STRIDE, _PLANE_STRIDE_2_B)
+
+#define _REG_VECS_EXCC 0x1A028
+#define _REG_VCS2_EXCC 0x1c028
+
+#define _REG_701C0(pipe, plane) (0x701c0 + pipe * 0x1000 + (plane - 1) * 0x100)
+#define _REG_701C4(pipe, plane) (0x701c4 + pipe * 0x1000 + (plane - 1) * 0x100)
+
+#define GFX_MODE_BIT_SET_IN_MASK(val, bit) \
+ ((((bit) & 0xffff0000) == 0) && !!((val) & (((bit) << 16))))
+
+#define FORCEWAKE_RENDER_GEN9_REG 0xa278
+#define FORCEWAKE_ACK_RENDER_GEN9_REG 0x0D84
+#define FORCEWAKE_BLITTER_GEN9_REG 0xa188
+#define FORCEWAKE_ACK_BLITTER_GEN9_REG 0x130044
+#define FORCEWAKE_MEDIA_GEN9_REG 0xa270
+#define FORCEWAKE_ACK_MEDIA_GEN9_REG 0x0D88
+#define FORCEWAKE_ACK_HSW_REG 0x130044
+
+#define RB_HEAD_OFF_MASK ((1U << 21) - (1U << 2))
+#define RB_TAIL_OFF_MASK ((1U << 21) - (1U << 3))
+#define RB_TAIL_SIZE_MASK ((1U << 21) - (1U << 12))
+#define _RING_CTL_BUF_SIZE(ctl) (((ctl) & RB_TAIL_SIZE_MASK) + GTT_PAGE_SIZE)
+
+#endif
diff --git a/drivers/gpu/drm/i915/gvt/render.c b/drivers/gpu/drm/i915/gvt/render.c
new file mode 100644
index 000000000000..44136b1f3aab
--- /dev/null
+++ b/drivers/gpu/drm/i915/gvt/render.c
@@ -0,0 +1,310 @@
+/*
+ * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * Authors:
+ * Eddie Dong <eddie.dong@intel.com>
+ * Kevin Tian <kevin.tian@intel.com>
+ *
+ * Contributors:
+ * Zhi Wang <zhi.a.wang@intel.com>
+ * Changbin Du <changbin.du@intel.com>
+ * Zhenyu Wang <zhenyuw@linux.intel.com>
+ * Tina Zhang <tina.zhang@intel.com>
+ * Bing Niu <bing.niu@intel.com>
+ *
+ */
+
+#include "i915_drv.h"
+#include "gvt.h"
+
+struct render_mmio {
+ int ring_id;
+ i915_reg_t reg;
+ u32 mask;
+ bool in_context;
+ u32 value;
+};
+
+static struct render_mmio gen8_render_mmio_list[] = {
+ {RCS, _MMIO(0x229c), 0xffff, false},
+ {RCS, _MMIO(0x2248), 0x0, false},
+ {RCS, _MMIO(0x2098), 0x0, false},
+ {RCS, _MMIO(0x20c0), 0xffff, true},
+ {RCS, _MMIO(0x24d0), 0, false},
+ {RCS, _MMIO(0x24d4), 0, false},
+ {RCS, _MMIO(0x24d8), 0, false},
+ {RCS, _MMIO(0x24dc), 0, false},
+ {RCS, _MMIO(0x7004), 0xffff, true},
+ {RCS, _MMIO(0x7008), 0xffff, true},
+ {RCS, _MMIO(0x7000), 0xffff, true},
+ {RCS, _MMIO(0x7010), 0xffff, true},
+ {RCS, _MMIO(0x7300), 0xffff, true},
+ {RCS, _MMIO(0x83a4), 0xffff, true},
+
+ {BCS, _MMIO(0x2229c), 0xffff, false},
+ {BCS, _MMIO(0x2209c), 0xffff, false},
+ {BCS, _MMIO(0x220c0), 0xffff, false},
+ {BCS, _MMIO(0x22098), 0x0, false},
+ {BCS, _MMIO(0x22028), 0x0, false},
+};
+
+static struct render_mmio gen9_render_mmio_list[] = {
+ {RCS, _MMIO(0x229c), 0xffff, false},
+ {RCS, _MMIO(0x2248), 0x0, false},
+ {RCS, _MMIO(0x2098), 0x0, false},
+ {RCS, _MMIO(0x20c0), 0xffff, true},
+ {RCS, _MMIO(0x24d0), 0, false},
+ {RCS, _MMIO(0x24d4), 0, false},
+ {RCS, _MMIO(0x24d8), 0, false},
+ {RCS, _MMIO(0x24dc), 0, false},
+ {RCS, _MMIO(0x7004), 0xffff, true},
+ {RCS, _MMIO(0x7008), 0xffff, true},
+ {RCS, _MMIO(0x7000), 0xffff, true},
+ {RCS, _MMIO(0x7010), 0xffff, true},
+ {RCS, _MMIO(0x7300), 0xffff, true},
+ {RCS, _MMIO(0x83a4), 0xffff, true},
+
+ {RCS, _MMIO(0x40e0), 0, false},
+ {RCS, _MMIO(0x40e4), 0, false},
+ {RCS, _MMIO(0x2580), 0xffff, true},
+ {RCS, _MMIO(0x7014), 0xffff, true},
+ {RCS, _MMIO(0x20ec), 0xffff, false},
+ {RCS, _MMIO(0xb118), 0, false},
+ {RCS, _MMIO(0xe100), 0xffff, true},
+ {RCS, _MMIO(0xe180), 0xffff, true},
+ {RCS, _MMIO(0xe184), 0xffff, true},
+ {RCS, _MMIO(0xe188), 0xffff, true},
+ {RCS, _MMIO(0xe194), 0xffff, true},
+ {RCS, _MMIO(0x4de0), 0, false},
+ {RCS, _MMIO(0x4de4), 0, false},
+ {RCS, _MMIO(0x4de8), 0, false},
+ {RCS, _MMIO(0x4dec), 0, false},
+ {RCS, _MMIO(0x4df0), 0, false},
+ {RCS, _MMIO(0x4df4), 0, false},
+
+ {BCS, _MMIO(0x2229c), 0xffff, false},
+ {BCS, _MMIO(0x2209c), 0xffff, false},
+ {BCS, _MMIO(0x220c0), 0xffff, false},
+ {BCS, _MMIO(0x22098), 0x0, false},
+ {BCS, _MMIO(0x22028), 0x0, false},
+
+ {VCS2, _MMIO(0x1c028), 0xffff, false},
+
+ {VECS, _MMIO(0x1a028), 0xffff, false},
+};
+
+static u32 gen9_render_mocs[I915_NUM_ENGINES][64];
+static u32 gen9_render_mocs_L3[32];
+
+static void handle_tlb_pending_event(struct intel_vgpu *vgpu, int ring_id)
+{
+ struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
+ enum forcewake_domains fw;
+ i915_reg_t reg;
+ u32 regs[] = {
+ [RCS] = 0x4260,
+ [VCS] = 0x4264,
+ [VCS2] = 0x4268,
+ [BCS] = 0x426c,
+ [VECS] = 0x4270,
+ };
+
+ if (WARN_ON(ring_id >= ARRAY_SIZE(regs)))
+ return;
+
+ if (!test_and_clear_bit(ring_id, (void *)vgpu->tlb_handle_pending))
+ return;
+
+ reg = _MMIO(regs[ring_id]);
+
+ /* WaForceWakeRenderDuringMmioTLBInvalidate:skl
+ * we need to put a forcewake when invalidating RCS TLB caches,
+ * otherwise device can go to RC6 state and interrupt invalidation
+ * process
+ */
+ fw = intel_uncore_forcewake_for_reg(dev_priv, reg,
+ FW_REG_READ | FW_REG_WRITE);
+ if (ring_id == RCS && IS_SKYLAKE(dev_priv))
+ fw |= FORCEWAKE_RENDER;
+
+ intel_uncore_forcewake_get(dev_priv, fw);
+
+ I915_WRITE_FW(reg, 0x1);
+
+ if (wait_for_atomic((I915_READ_FW(reg) == 0), 50))
+ gvt_err("timeout in invalidate ring (%d) tlb\n", ring_id);
+ else
+ vgpu_vreg(vgpu, regs[ring_id]) = 0;
+
+ intel_uncore_forcewake_put(dev_priv, fw);
+
+ gvt_dbg_core("invalidate TLB for ring %d\n", ring_id);
+}
+
+static void load_mocs(struct intel_vgpu *vgpu, int ring_id)
+{
+ struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
+ i915_reg_t offset, l3_offset;
+ u32 regs[] = {
+ [RCS] = 0xc800,
+ [VCS] = 0xc900,
+ [VCS2] = 0xca00,
+ [BCS] = 0xcc00,
+ [VECS] = 0xcb00,
+ };
+ int i;
+
+ if (WARN_ON(ring_id >= ARRAY_SIZE(regs)))
+ return;
+
+ if (!IS_SKYLAKE(dev_priv))
+ return;
+
+ offset.reg = regs[ring_id];
+ for (i = 0; i < 64; i++) {
+ gen9_render_mocs[ring_id][i] = I915_READ(offset);
+ I915_WRITE(offset, vgpu_vreg(vgpu, offset));
+ POSTING_READ(offset);
+ offset.reg += 4;
+ }
+
+ if (ring_id == RCS) {
+ l3_offset.reg = 0xb020;
+ for (i = 0; i < 32; i++) {
+ gen9_render_mocs_L3[i] = I915_READ(l3_offset);
+ I915_WRITE(l3_offset, vgpu_vreg(vgpu, offset));
+ POSTING_READ(l3_offset);
+ l3_offset.reg += 4;
+ }
+ }
+}
+
+static void restore_mocs(struct intel_vgpu *vgpu, int ring_id)
+{
+ struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
+ i915_reg_t offset, l3_offset;
+ u32 regs[] = {
+ [RCS] = 0xc800,
+ [VCS] = 0xc900,
+ [VCS2] = 0xca00,
+ [BCS] = 0xcc00,
+ [VECS] = 0xcb00,
+ };
+ int i;
+
+ if (WARN_ON(ring_id >= ARRAY_SIZE(regs)))
+ return;
+
+ if (!IS_SKYLAKE(dev_priv))
+ return;
+
+ offset.reg = regs[ring_id];
+ for (i = 0; i < 64; i++) {
+ vgpu_vreg(vgpu, offset) = I915_READ(offset);
+ I915_WRITE(offset, gen9_render_mocs[ring_id][i]);
+ POSTING_READ(offset);
+ offset.reg += 4;
+ }
+
+ if (ring_id == RCS) {
+ l3_offset.reg = 0xb020;
+ for (i = 0; i < 32; i++) {
+ vgpu_vreg(vgpu, l3_offset) = I915_READ(l3_offset);
+ I915_WRITE(l3_offset, gen9_render_mocs_L3[i]);
+ POSTING_READ(l3_offset);
+ l3_offset.reg += 4;
+ }
+ }
+}
+
+void intel_gvt_load_render_mmio(struct intel_vgpu *vgpu, int ring_id)
+{
+ struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
+ struct render_mmio *mmio;
+ u32 v;
+ int i, array_size;
+
+ if (IS_SKYLAKE(vgpu->gvt->dev_priv)) {
+ mmio = gen9_render_mmio_list;
+ array_size = ARRAY_SIZE(gen9_render_mmio_list);
+ load_mocs(vgpu, ring_id);
+ } else {
+ mmio = gen8_render_mmio_list;
+ array_size = ARRAY_SIZE(gen8_render_mmio_list);
+ }
+
+ for (i = 0; i < array_size; i++, mmio++) {
+ if (mmio->ring_id != ring_id)
+ continue;
+
+ mmio->value = I915_READ(mmio->reg);
+ if (mmio->mask)
+ v = vgpu_vreg(vgpu, mmio->reg) | (mmio->mask << 16);
+ else
+ v = vgpu_vreg(vgpu, mmio->reg);
+
+ I915_WRITE(mmio->reg, v);
+ POSTING_READ(mmio->reg);
+
+ gvt_dbg_render("load reg %x old %x new %x\n",
+ i915_mmio_reg_offset(mmio->reg),
+ mmio->value, v);
+ }
+ handle_tlb_pending_event(vgpu, ring_id);
+}
+
+void intel_gvt_restore_render_mmio(struct intel_vgpu *vgpu, int ring_id)
+{
+ struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
+ struct render_mmio *mmio;
+ u32 v;
+ int i, array_size;
+
+ if (IS_SKYLAKE(dev_priv)) {
+ mmio = gen9_render_mmio_list;
+ array_size = ARRAY_SIZE(gen9_render_mmio_list);
+ restore_mocs(vgpu, ring_id);
+ } else {
+ mmio = gen8_render_mmio_list;
+ array_size = ARRAY_SIZE(gen8_render_mmio_list);
+ }
+
+ for (i = 0; i < array_size; i++, mmio++) {
+ if (mmio->ring_id != ring_id)
+ continue;
+
+ vgpu_vreg(vgpu, mmio->reg) = I915_READ(mmio->reg);
+
+ if (mmio->mask) {
+ vgpu_vreg(vgpu, mmio->reg) &= ~(mmio->mask << 16);
+ v = mmio->value | (mmio->mask << 16);
+ } else
+ v = mmio->value;
+
+ I915_WRITE(mmio->reg, v);
+ POSTING_READ(mmio->reg);
+
+ gvt_dbg_render("restore reg %x old %x new %x\n",
+ i915_mmio_reg_offset(mmio->reg),
+ mmio->value, v);
+ }
+}
diff --git a/drivers/gpu/drm/i915/gvt/render.h b/drivers/gpu/drm/i915/gvt/render.h
new file mode 100644
index 000000000000..dac1a3cc458b
--- /dev/null
+++ b/drivers/gpu/drm/i915/gvt/render.h
@@ -0,0 +1,43 @@
+/*
+ * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * Authors:
+ * Eddie Dong <eddie.dong@intel.com>
+ * Kevin Tian <kevin.tian@intel.com>
+ *
+ * Contributors:
+ * Zhi Wang <zhi.a.wang@intel.com>
+ * Changbin Du <changbin.du@intel.com>
+ * Zhenyu Wang <zhenyuw@linux.intel.com>
+ * Tina Zhang <tina.zhang@intel.com>
+ * Bing Niu <bing.niu@intel.com>
+ *
+ */
+
+#ifndef __GVT_RENDER_H__
+#define __GVT_RENDER_H__
+
+void intel_gvt_load_render_mmio(struct intel_vgpu *vgpu, int ring_id);
+
+void intel_gvt_restore_render_mmio(struct intel_vgpu *vgpu, int ring_id);
+
+#endif
diff --git a/drivers/gpu/drm/i915/gvt/sched_policy.c b/drivers/gpu/drm/i915/gvt/sched_policy.c
new file mode 100644
index 000000000000..678b0be85376
--- /dev/null
+++ b/drivers/gpu/drm/i915/gvt/sched_policy.c
@@ -0,0 +1,292 @@
+/*
+ * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * Authors:
+ * Anhua Xu
+ * Kevin Tian <kevin.tian@intel.com>
+ *
+ * Contributors:
+ * Min He <min.he@intel.com>
+ * Bing Niu <bing.niu@intel.com>
+ * Zhi Wang <zhi.a.wang@intel.com>
+ *
+ */
+
+#include "i915_drv.h"
+#include "gvt.h"
+
+static bool vgpu_has_pending_workload(struct intel_vgpu *vgpu)
+{
+ enum intel_engine_id i;
+ struct intel_engine_cs *engine;
+
+ for_each_engine(engine, vgpu->gvt->dev_priv, i) {
+ if (!list_empty(workload_q_head(vgpu, i)))
+ return true;
+ }
+
+ return false;
+}
+
+static void try_to_schedule_next_vgpu(struct intel_gvt *gvt)
+{
+ struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
+ enum intel_engine_id i;
+ struct intel_engine_cs *engine;
+
+ /* no target to schedule */
+ if (!scheduler->next_vgpu)
+ return;
+
+ gvt_dbg_sched("try to schedule next vgpu %d\n",
+ scheduler->next_vgpu->id);
+
+ /*
+ * after the flag is set, workload dispatch thread will
+ * stop dispatching workload for current vgpu
+ */
+ scheduler->need_reschedule = true;
+
+ /* still have uncompleted workload? */
+ for_each_engine(engine, gvt->dev_priv, i) {
+ if (scheduler->current_workload[i]) {
+ gvt_dbg_sched("still have running workload\n");
+ return;
+ }
+ }
+
+ gvt_dbg_sched("switch to next vgpu %d\n",
+ scheduler->next_vgpu->id);
+
+ /* switch current vgpu */
+ scheduler->current_vgpu = scheduler->next_vgpu;
+ scheduler->next_vgpu = NULL;
+
+ scheduler->need_reschedule = false;
+
+ /* wake up workload dispatch thread */
+ for_each_engine(engine, gvt->dev_priv, i)
+ wake_up(&scheduler->waitq[i]);
+}
+
+struct tbs_vgpu_data {
+ struct list_head list;
+ struct intel_vgpu *vgpu;
+ /* put some per-vgpu sched stats here */
+};
+
+struct tbs_sched_data {
+ struct intel_gvt *gvt;
+ struct delayed_work work;
+ unsigned long period;
+ struct list_head runq_head;
+};
+
+#define GVT_DEFAULT_TIME_SLICE (1 * HZ / 1000)
+
+static void tbs_sched_func(struct work_struct *work)
+{
+ struct tbs_sched_data *sched_data = container_of(work,
+ struct tbs_sched_data, work.work);
+ struct tbs_vgpu_data *vgpu_data;
+
+ struct intel_gvt *gvt = sched_data->gvt;
+ struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
+
+ struct intel_vgpu *vgpu = NULL;
+ struct list_head *pos, *head;
+
+ mutex_lock(&gvt->lock);
+
+ /* no vgpu or has already had a target */
+ if (list_empty(&sched_data->runq_head) || scheduler->next_vgpu)
+ goto out;
+
+ if (scheduler->current_vgpu) {
+ vgpu_data = scheduler->current_vgpu->sched_data;
+ head = &vgpu_data->list;
+ } else {
+ gvt_dbg_sched("no current vgpu search from q head\n");
+ head = &sched_data->runq_head;
+ }
+
+ /* search a vgpu with pending workload */
+ list_for_each(pos, head) {
+ if (pos == &sched_data->runq_head)
+ continue;
+
+ vgpu_data = container_of(pos, struct tbs_vgpu_data, list);
+ if (!vgpu_has_pending_workload(vgpu_data->vgpu))
+ continue;
+
+ vgpu = vgpu_data->vgpu;
+ break;
+ }
+
+ if (vgpu) {
+ scheduler->next_vgpu = vgpu;
+ gvt_dbg_sched("pick next vgpu %d\n", vgpu->id);
+ }
+out:
+ if (scheduler->next_vgpu) {
+ gvt_dbg_sched("try to schedule next vgpu %d\n",
+ scheduler->next_vgpu->id);
+ try_to_schedule_next_vgpu(gvt);
+ }
+
+ /*
+ * still have vgpu on runq
+ * or last schedule haven't finished due to running workload
+ */
+ if (!list_empty(&sched_data->runq_head) || scheduler->next_vgpu)
+ schedule_delayed_work(&sched_data->work, sched_data->period);
+
+ mutex_unlock(&gvt->lock);
+}
+
+static int tbs_sched_init(struct intel_gvt *gvt)
+{
+ struct intel_gvt_workload_scheduler *scheduler =
+ &gvt->scheduler;
+
+ struct tbs_sched_data *data;
+
+ data = kzalloc(sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&data->runq_head);
+ INIT_DELAYED_WORK(&data->work, tbs_sched_func);
+ data->period = GVT_DEFAULT_TIME_SLICE;
+ data->gvt = gvt;
+
+ scheduler->sched_data = data;
+ return 0;
+}
+
+static void tbs_sched_clean(struct intel_gvt *gvt)
+{
+ struct intel_gvt_workload_scheduler *scheduler =
+ &gvt->scheduler;
+ struct tbs_sched_data *data = scheduler->sched_data;
+
+ cancel_delayed_work(&data->work);
+ kfree(data);
+ scheduler->sched_data = NULL;
+}
+
+static int tbs_sched_init_vgpu(struct intel_vgpu *vgpu)
+{
+ struct tbs_vgpu_data *data;
+
+ data = kzalloc(sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ data->vgpu = vgpu;
+ INIT_LIST_HEAD(&data->list);
+
+ vgpu->sched_data = data;
+ return 0;
+}
+
+static void tbs_sched_clean_vgpu(struct intel_vgpu *vgpu)
+{
+ kfree(vgpu->sched_data);
+ vgpu->sched_data = NULL;
+}
+
+static void tbs_sched_start_schedule(struct intel_vgpu *vgpu)
+{
+ struct tbs_sched_data *sched_data = vgpu->gvt->scheduler.sched_data;
+ struct tbs_vgpu_data *vgpu_data = vgpu->sched_data;
+
+ if (!list_empty(&vgpu_data->list))
+ return;
+
+ list_add_tail(&vgpu_data->list, &sched_data->runq_head);
+ schedule_delayed_work(&sched_data->work, sched_data->period);
+}
+
+static void tbs_sched_stop_schedule(struct intel_vgpu *vgpu)
+{
+ struct tbs_vgpu_data *vgpu_data = vgpu->sched_data;
+
+ list_del_init(&vgpu_data->list);
+}
+
+static struct intel_gvt_sched_policy_ops tbs_schedule_ops = {
+ .init = tbs_sched_init,
+ .clean = tbs_sched_clean,
+ .init_vgpu = tbs_sched_init_vgpu,
+ .clean_vgpu = tbs_sched_clean_vgpu,
+ .start_schedule = tbs_sched_start_schedule,
+ .stop_schedule = tbs_sched_stop_schedule,
+};
+
+int intel_gvt_init_sched_policy(struct intel_gvt *gvt)
+{
+ gvt->scheduler.sched_ops = &tbs_schedule_ops;
+
+ return gvt->scheduler.sched_ops->init(gvt);
+}
+
+void intel_gvt_clean_sched_policy(struct intel_gvt *gvt)
+{
+ gvt->scheduler.sched_ops->clean(gvt);
+}
+
+int intel_vgpu_init_sched_policy(struct intel_vgpu *vgpu)
+{
+ return vgpu->gvt->scheduler.sched_ops->init_vgpu(vgpu);
+}
+
+void intel_vgpu_clean_sched_policy(struct intel_vgpu *vgpu)
+{
+ vgpu->gvt->scheduler.sched_ops->clean_vgpu(vgpu);
+}
+
+void intel_vgpu_start_schedule(struct intel_vgpu *vgpu)
+{
+ gvt_dbg_core("vgpu%d: start schedule\n", vgpu->id);
+
+ vgpu->gvt->scheduler.sched_ops->start_schedule(vgpu);
+}
+
+void intel_vgpu_stop_schedule(struct intel_vgpu *vgpu)
+{
+ struct intel_gvt_workload_scheduler *scheduler =
+ &vgpu->gvt->scheduler;
+
+ gvt_dbg_core("vgpu%d: stop schedule\n", vgpu->id);
+
+ scheduler->sched_ops->stop_schedule(vgpu);
+
+ if (scheduler->next_vgpu == vgpu)
+ scheduler->next_vgpu = NULL;
+
+ if (scheduler->current_vgpu == vgpu) {
+ /* stop workload dispatching */
+ scheduler->need_reschedule = true;
+ scheduler->current_vgpu = NULL;
+ }
+}
diff --git a/drivers/gpu/drm/i915/gvt/sched_policy.h b/drivers/gpu/drm/i915/gvt/sched_policy.h
new file mode 100644
index 000000000000..bb8b9097e41a
--- /dev/null
+++ b/drivers/gpu/drm/i915/gvt/sched_policy.h
@@ -0,0 +1,58 @@
+/*
+ * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * Authors:
+ * Anhua Xu
+ * Kevin Tian <kevin.tian@intel.com>
+ *
+ * Contributors:
+ * Min He <min.he@intel.com>
+ * Bing Niu <bing.niu@intel.com>
+ * Zhi Wang <zhi.a.wang@intel.com>
+ *
+ */
+
+#ifndef __GVT_SCHED_POLICY__
+#define __GVT_SCHED_POLICY__
+
+struct intel_gvt_sched_policy_ops {
+ int (*init)(struct intel_gvt *gvt);
+ void (*clean)(struct intel_gvt *gvt);
+ int (*init_vgpu)(struct intel_vgpu *vgpu);
+ void (*clean_vgpu)(struct intel_vgpu *vgpu);
+ void (*start_schedule)(struct intel_vgpu *vgpu);
+ void (*stop_schedule)(struct intel_vgpu *vgpu);
+};
+
+int intel_gvt_init_sched_policy(struct intel_gvt *gvt);
+
+void intel_gvt_clean_sched_policy(struct intel_gvt *gvt);
+
+int intel_vgpu_init_sched_policy(struct intel_vgpu *vgpu);
+
+void intel_vgpu_clean_sched_policy(struct intel_vgpu *vgpu);
+
+void intel_vgpu_start_schedule(struct intel_vgpu *vgpu);
+
+void intel_vgpu_stop_schedule(struct intel_vgpu *vgpu);
+
+#endif
diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c
new file mode 100644
index 000000000000..f898df38dd9a
--- /dev/null
+++ b/drivers/gpu/drm/i915/gvt/scheduler.c
@@ -0,0 +1,581 @@
+/*
+ * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * Authors:
+ * Zhi Wang <zhi.a.wang@intel.com>
+ *
+ * Contributors:
+ * Ping Gao <ping.a.gao@intel.com>
+ * Tina Zhang <tina.zhang@intel.com>
+ * Chanbin Du <changbin.du@intel.com>
+ * Min He <min.he@intel.com>
+ * Bing Niu <bing.niu@intel.com>
+ * Zhenyu Wang <zhenyuw@linux.intel.com>
+ *
+ */
+
+#include <linux/kthread.h>
+
+#include "i915_drv.h"
+#include "gvt.h"
+
+#define RING_CTX_OFF(x) \
+ offsetof(struct execlist_ring_context, x)
+
+static void set_context_pdp_root_pointer(
+ struct execlist_ring_context *ring_context,
+ u32 pdp[8])
+{
+ struct execlist_mmio_pair *pdp_pair = &ring_context->pdp3_UDW;
+ int i;
+
+ for (i = 0; i < 8; i++)
+ pdp_pair[i].val = pdp[7 - i];
+}
+
+static int populate_shadow_context(struct intel_vgpu_workload *workload)
+{
+ struct intel_vgpu *vgpu = workload->vgpu;
+ struct intel_gvt *gvt = vgpu->gvt;
+ int ring_id = workload->ring_id;
+ struct i915_gem_context *shadow_ctx = workload->vgpu->shadow_ctx;
+ struct drm_i915_gem_object *ctx_obj =
+ shadow_ctx->engine[ring_id].state->obj;
+ struct execlist_ring_context *shadow_ring_context;
+ struct page *page;
+ void *dst;
+ unsigned long context_gpa, context_page_num;
+ int i;
+
+ gvt_dbg_sched("ring id %d workload lrca %x", ring_id,
+ workload->ctx_desc.lrca);
+
+ context_page_num = intel_lr_context_size(
+ gvt->dev_priv->engine[ring_id]);
+
+ context_page_num = context_page_num >> PAGE_SHIFT;
+
+ if (IS_BROADWELL(gvt->dev_priv) && ring_id == RCS)
+ context_page_num = 19;
+
+ i = 2;
+
+ while (i < context_page_num) {
+ context_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm,
+ (u32)((workload->ctx_desc.lrca + i) <<
+ GTT_PAGE_SHIFT));
+ if (context_gpa == INTEL_GVT_INVALID_ADDR) {
+ gvt_err("Invalid guest context descriptor\n");
+ return -EINVAL;
+ }
+
+ page = i915_gem_object_get_page(ctx_obj, LRC_PPHWSP_PN + i);
+ dst = kmap(page);
+ intel_gvt_hypervisor_read_gpa(vgpu, context_gpa, dst,
+ GTT_PAGE_SIZE);
+ kunmap(page);
+ i++;
+ }
+
+ page = i915_gem_object_get_page(ctx_obj, LRC_STATE_PN);
+ shadow_ring_context = kmap(page);
+
+#define COPY_REG(name) \
+ intel_gvt_hypervisor_read_gpa(vgpu, workload->ring_context_gpa \
+ + RING_CTX_OFF(name.val), &shadow_ring_context->name.val, 4)
+
+ COPY_REG(ctx_ctrl);
+ COPY_REG(ctx_timestamp);
+
+ if (ring_id == RCS) {
+ COPY_REG(bb_per_ctx_ptr);
+ COPY_REG(rcs_indirect_ctx);
+ COPY_REG(rcs_indirect_ctx_offset);
+ }
+#undef COPY_REG
+
+ set_context_pdp_root_pointer(shadow_ring_context,
+ workload->shadow_mm->shadow_page_table);
+
+ intel_gvt_hypervisor_read_gpa(vgpu,
+ workload->ring_context_gpa +
+ sizeof(*shadow_ring_context),
+ (void *)shadow_ring_context +
+ sizeof(*shadow_ring_context),
+ GTT_PAGE_SIZE - sizeof(*shadow_ring_context));
+
+ kunmap(page);
+ return 0;
+}
+
+static int shadow_context_status_change(struct notifier_block *nb,
+ unsigned long action, void *data)
+{
+ struct intel_vgpu *vgpu = container_of(nb,
+ struct intel_vgpu, shadow_ctx_notifier_block);
+ struct drm_i915_gem_request *req =
+ (struct drm_i915_gem_request *)data;
+ struct intel_gvt_workload_scheduler *scheduler =
+ &vgpu->gvt->scheduler;
+ struct intel_vgpu_workload *workload =
+ scheduler->current_workload[req->engine->id];
+
+ switch (action) {
+ case INTEL_CONTEXT_SCHEDULE_IN:
+ intel_gvt_load_render_mmio(workload->vgpu,
+ workload->ring_id);
+ atomic_set(&workload->shadow_ctx_active, 1);
+ break;
+ case INTEL_CONTEXT_SCHEDULE_OUT:
+ intel_gvt_restore_render_mmio(workload->vgpu,
+ workload->ring_id);
+ atomic_set(&workload->shadow_ctx_active, 0);
+ break;
+ default:
+ WARN_ON(1);
+ return NOTIFY_OK;
+ }
+ wake_up(&workload->shadow_ctx_status_wq);
+ return NOTIFY_OK;
+}
+
+static int dispatch_workload(struct intel_vgpu_workload *workload)
+{
+ int ring_id = workload->ring_id;
+ struct i915_gem_context *shadow_ctx = workload->vgpu->shadow_ctx;
+ struct drm_i915_private *dev_priv = workload->vgpu->gvt->dev_priv;
+ struct drm_i915_gem_request *rq;
+ int ret;
+
+ gvt_dbg_sched("ring id %d prepare to dispatch workload %p\n",
+ ring_id, workload);
+
+ shadow_ctx->desc_template = workload->ctx_desc.addressing_mode <<
+ GEN8_CTX_ADDRESSING_MODE_SHIFT;
+
+ mutex_lock(&dev_priv->drm.struct_mutex);
+
+ rq = i915_gem_request_alloc(dev_priv->engine[ring_id], shadow_ctx);
+ if (IS_ERR(rq)) {
+ gvt_err("fail to allocate gem request\n");
+ workload->status = PTR_ERR(rq);
+ return workload->status;
+ }
+
+ gvt_dbg_sched("ring id %d get i915 gem request %p\n", ring_id, rq);
+
+ workload->req = i915_gem_request_get(rq);
+
+ ret = intel_gvt_scan_and_shadow_workload(workload);
+ if (ret)
+ goto out;
+
+ ret = intel_gvt_scan_and_shadow_wa_ctx(&workload->wa_ctx);
+ if (ret)
+ goto out;
+
+ ret = populate_shadow_context(workload);
+ if (ret)
+ goto out;
+
+ if (workload->prepare) {
+ ret = workload->prepare(workload);
+ if (ret)
+ goto out;
+ }
+
+ gvt_dbg_sched("ring id %d submit workload to i915 %p\n",
+ ring_id, workload->req);
+
+ ret = 0;
+ workload->dispatched = true;
+out:
+ if (ret)
+ workload->status = ret;
+
+ i915_add_request_no_flush(rq);
+ mutex_unlock(&dev_priv->drm.struct_mutex);
+ return ret;
+}
+
+static struct intel_vgpu_workload *pick_next_workload(
+ struct intel_gvt *gvt, int ring_id)
+{
+ struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
+ struct intel_vgpu_workload *workload = NULL;
+
+ mutex_lock(&gvt->lock);
+
+ /*
+ * no current vgpu / will be scheduled out / no workload
+ * bail out
+ */
+ if (!scheduler->current_vgpu) {
+ gvt_dbg_sched("ring id %d stop - no current vgpu\n", ring_id);
+ goto out;
+ }
+
+ if (scheduler->need_reschedule) {
+ gvt_dbg_sched("ring id %d stop - will reschedule\n", ring_id);
+ goto out;
+ }
+
+ if (list_empty(workload_q_head(scheduler->current_vgpu, ring_id))) {
+ gvt_dbg_sched("ring id %d stop - no available workload\n",
+ ring_id);
+ goto out;
+ }
+
+ /*
+ * still have current workload, maybe the workload disptacher
+ * fail to submit it for some reason, resubmit it.
+ */
+ if (scheduler->current_workload[ring_id]) {
+ workload = scheduler->current_workload[ring_id];
+ gvt_dbg_sched("ring id %d still have current workload %p\n",
+ ring_id, workload);
+ goto out;
+ }
+
+ /*
+ * pick a workload as current workload
+ * once current workload is set, schedule policy routines
+ * will wait the current workload is finished when trying to
+ * schedule out a vgpu.
+ */
+ scheduler->current_workload[ring_id] = container_of(
+ workload_q_head(scheduler->current_vgpu, ring_id)->next,
+ struct intel_vgpu_workload, list);
+
+ workload = scheduler->current_workload[ring_id];
+
+ gvt_dbg_sched("ring id %d pick new workload %p\n", ring_id, workload);
+
+ atomic_inc(&workload->vgpu->running_workload_num);
+out:
+ mutex_unlock(&gvt->lock);
+ return workload;
+}
+
+static void update_guest_context(struct intel_vgpu_workload *workload)
+{
+ struct intel_vgpu *vgpu = workload->vgpu;
+ struct intel_gvt *gvt = vgpu->gvt;
+ int ring_id = workload->ring_id;
+ struct i915_gem_context *shadow_ctx = workload->vgpu->shadow_ctx;
+ struct drm_i915_gem_object *ctx_obj =
+ shadow_ctx->engine[ring_id].state->obj;
+ struct execlist_ring_context *shadow_ring_context;
+ struct page *page;
+ void *src;
+ unsigned long context_gpa, context_page_num;
+ int i;
+
+ gvt_dbg_sched("ring id %d workload lrca %x\n", ring_id,
+ workload->ctx_desc.lrca);
+
+ context_page_num = intel_lr_context_size(
+ gvt->dev_priv->engine[ring_id]);
+
+ context_page_num = context_page_num >> PAGE_SHIFT;
+
+ if (IS_BROADWELL(gvt->dev_priv) && ring_id == RCS)
+ context_page_num = 19;
+
+ i = 2;
+
+ while (i < context_page_num) {
+ context_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm,
+ (u32)((workload->ctx_desc.lrca + i) <<
+ GTT_PAGE_SHIFT));
+ if (context_gpa == INTEL_GVT_INVALID_ADDR) {
+ gvt_err("invalid guest context descriptor\n");
+ return;
+ }
+
+ page = i915_gem_object_get_page(ctx_obj, LRC_PPHWSP_PN + i);
+ src = kmap(page);
+ intel_gvt_hypervisor_write_gpa(vgpu, context_gpa, src,
+ GTT_PAGE_SIZE);
+ kunmap(page);
+ i++;
+ }
+
+ intel_gvt_hypervisor_write_gpa(vgpu, workload->ring_context_gpa +
+ RING_CTX_OFF(ring_header.val), &workload->rb_tail, 4);
+
+ page = i915_gem_object_get_page(ctx_obj, LRC_STATE_PN);
+ shadow_ring_context = kmap(page);
+
+#define COPY_REG(name) \
+ intel_gvt_hypervisor_write_gpa(vgpu, workload->ring_context_gpa + \
+ RING_CTX_OFF(name.val), &shadow_ring_context->name.val, 4)
+
+ COPY_REG(ctx_ctrl);
+ COPY_REG(ctx_timestamp);
+
+#undef COPY_REG
+
+ intel_gvt_hypervisor_write_gpa(vgpu,
+ workload->ring_context_gpa +
+ sizeof(*shadow_ring_context),
+ (void *)shadow_ring_context +
+ sizeof(*shadow_ring_context),
+ GTT_PAGE_SIZE - sizeof(*shadow_ring_context));
+
+ kunmap(page);
+}
+
+static void complete_current_workload(struct intel_gvt *gvt, int ring_id)
+{
+ struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
+ struct intel_vgpu_workload *workload;
+ int event;
+
+ mutex_lock(&gvt->lock);
+
+ workload = scheduler->current_workload[ring_id];
+
+ if (!workload->status && !workload->vgpu->resetting) {
+ wait_event(workload->shadow_ctx_status_wq,
+ !atomic_read(&workload->shadow_ctx_active));
+
+ update_guest_context(workload);
+
+ for_each_set_bit(event, workload->pending_events,
+ INTEL_GVT_EVENT_MAX)
+ intel_vgpu_trigger_virtual_event(workload->vgpu,
+ event);
+ }
+
+ gvt_dbg_sched("ring id %d complete workload %p status %d\n",
+ ring_id, workload, workload->status);
+
+ scheduler->current_workload[ring_id] = NULL;
+
+ atomic_dec(&workload->vgpu->running_workload_num);
+
+ list_del_init(&workload->list);
+ workload->complete(workload);
+
+ wake_up(&scheduler->workload_complete_wq);
+ mutex_unlock(&gvt->lock);
+}
+
+struct workload_thread_param {
+ struct intel_gvt *gvt;
+ int ring_id;
+};
+
+static DEFINE_MUTEX(scheduler_mutex);
+
+static int workload_thread(void *priv)
+{
+ struct workload_thread_param *p = (struct workload_thread_param *)priv;
+ struct intel_gvt *gvt = p->gvt;
+ int ring_id = p->ring_id;
+ struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
+ struct intel_vgpu_workload *workload = NULL;
+ long lret;
+ int ret;
+ bool need_force_wake = IS_SKYLAKE(gvt->dev_priv);
+ DEFINE_WAIT_FUNC(wait, woken_wake_function);
+
+ kfree(p);
+
+ gvt_dbg_core("workload thread for ring %d started\n", ring_id);
+
+ while (!kthread_should_stop()) {
+ add_wait_queue(&scheduler->waitq[ring_id], &wait);
+ do {
+ workload = pick_next_workload(gvt, ring_id);
+ if (workload)
+ break;
+ wait_woken(&wait, TASK_INTERRUPTIBLE,
+ MAX_SCHEDULE_TIMEOUT);
+ } while (!kthread_should_stop());
+ remove_wait_queue(&scheduler->waitq[ring_id], &wait);
+
+ if (!workload)
+ break;
+
+ mutex_lock(&scheduler_mutex);
+
+ gvt_dbg_sched("ring id %d next workload %p vgpu %d\n",
+ workload->ring_id, workload,
+ workload->vgpu->id);
+
+ intel_runtime_pm_get(gvt->dev_priv);
+
+ gvt_dbg_sched("ring id %d will dispatch workload %p\n",
+ workload->ring_id, workload);
+
+ if (need_force_wake)
+ intel_uncore_forcewake_get(gvt->dev_priv,
+ FORCEWAKE_ALL);
+
+ mutex_lock(&gvt->lock);
+ ret = dispatch_workload(workload);
+ mutex_unlock(&gvt->lock);
+
+ if (ret) {
+ gvt_err("fail to dispatch workload, skip\n");
+ goto complete;
+ }
+
+ gvt_dbg_sched("ring id %d wait workload %p\n",
+ workload->ring_id, workload);
+
+ lret = i915_wait_request(workload->req,
+ 0, MAX_SCHEDULE_TIMEOUT);
+ if (lret < 0) {
+ workload->status = lret;
+ gvt_err("fail to wait workload, skip\n");
+ } else {
+ workload->status = 0;
+ }
+
+complete:
+ gvt_dbg_sched("will complete workload %p\n, status: %d\n",
+ workload, workload->status);
+
+ complete_current_workload(gvt, ring_id);
+
+ i915_gem_request_put(fetch_and_zero(&workload->req));
+
+ if (need_force_wake)
+ intel_uncore_forcewake_put(gvt->dev_priv,
+ FORCEWAKE_ALL);
+
+ intel_runtime_pm_put(gvt->dev_priv);
+
+ mutex_unlock(&scheduler_mutex);
+
+ }
+ return 0;
+}
+
+void intel_gvt_wait_vgpu_idle(struct intel_vgpu *vgpu)
+{
+ struct intel_gvt *gvt = vgpu->gvt;
+ struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
+
+ if (atomic_read(&vgpu->running_workload_num)) {
+ gvt_dbg_sched("wait vgpu idle\n");
+
+ wait_event(scheduler->workload_complete_wq,
+ !atomic_read(&vgpu->running_workload_num));
+ }
+}
+
+void intel_gvt_clean_workload_scheduler(struct intel_gvt *gvt)
+{
+ struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
+ int i;
+
+ gvt_dbg_core("clean workload scheduler\n");
+
+ for (i = 0; i < I915_NUM_ENGINES; i++) {
+ if (scheduler->thread[i]) {
+ kthread_stop(scheduler->thread[i]);
+ scheduler->thread[i] = NULL;
+ }
+ }
+}
+
+int intel_gvt_init_workload_scheduler(struct intel_gvt *gvt)
+{
+ struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
+ struct workload_thread_param *param = NULL;
+ int ret;
+ int i;
+
+ gvt_dbg_core("init workload scheduler\n");
+
+ init_waitqueue_head(&scheduler->workload_complete_wq);
+
+ for (i = 0; i < I915_NUM_ENGINES; i++) {
+ /* check ring mask at init time */
+ if (!HAS_ENGINE(gvt->dev_priv, i))
+ continue;
+
+ init_waitqueue_head(&scheduler->waitq[i]);
+
+ param = kzalloc(sizeof(*param), GFP_KERNEL);
+ if (!param) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ param->gvt = gvt;
+ param->ring_id = i;
+
+ scheduler->thread[i] = kthread_run(workload_thread, param,
+ "gvt workload %d", i);
+ if (IS_ERR(scheduler->thread[i])) {
+ gvt_err("fail to create workload thread\n");
+ ret = PTR_ERR(scheduler->thread[i]);
+ goto err;
+ }
+ }
+ return 0;
+err:
+ intel_gvt_clean_workload_scheduler(gvt);
+ kfree(param);
+ param = NULL;
+ return ret;
+}
+
+void intel_vgpu_clean_gvt_context(struct intel_vgpu *vgpu)
+{
+ struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
+
+ atomic_notifier_chain_unregister(&vgpu->shadow_ctx->status_notifier,
+ &vgpu->shadow_ctx_notifier_block);
+
+ mutex_lock(&dev_priv->drm.struct_mutex);
+
+ /* a little hacky to mark as ctx closed */
+ vgpu->shadow_ctx->closed = true;
+ i915_gem_context_put(vgpu->shadow_ctx);
+
+ mutex_unlock(&dev_priv->drm.struct_mutex);
+}
+
+int intel_vgpu_init_gvt_context(struct intel_vgpu *vgpu)
+{
+ atomic_set(&vgpu->running_workload_num, 0);
+
+ vgpu->shadow_ctx = i915_gem_context_create_gvt(
+ &vgpu->gvt->dev_priv->drm);
+ if (IS_ERR(vgpu->shadow_ctx))
+ return PTR_ERR(vgpu->shadow_ctx);
+
+ vgpu->shadow_ctx->engine[RCS].initialised = true;
+
+ vgpu->shadow_ctx_notifier_block.notifier_call =
+ shadow_context_status_change;
+
+ atomic_notifier_chain_register(&vgpu->shadow_ctx->status_notifier,
+ &vgpu->shadow_ctx_notifier_block);
+ return 0;
+}
diff --git a/drivers/gpu/drm/i915/gvt/scheduler.h b/drivers/gpu/drm/i915/gvt/scheduler.h
new file mode 100644
index 000000000000..3b30c28bff51
--- /dev/null
+++ b/drivers/gpu/drm/i915/gvt/scheduler.h
@@ -0,0 +1,139 @@
+/*
+ * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * Authors:
+ * Zhi Wang <zhi.a.wang@intel.com>
+ *
+ * Contributors:
+ * Ping Gao <ping.a.gao@intel.com>
+ * Tina Zhang <tina.zhang@intel.com>
+ * Chanbin Du <changbin.du@intel.com>
+ * Min He <min.he@intel.com>
+ * Bing Niu <bing.niu@intel.com>
+ * Zhenyu Wang <zhenyuw@linux.intel.com>
+ *
+ */
+
+#ifndef _GVT_SCHEDULER_H_
+#define _GVT_SCHEDULER_H_
+
+struct intel_gvt_workload_scheduler {
+ struct intel_vgpu *current_vgpu;
+ struct intel_vgpu *next_vgpu;
+ struct intel_vgpu_workload *current_workload[I915_NUM_ENGINES];
+ bool need_reschedule;
+
+ wait_queue_head_t workload_complete_wq;
+ struct task_struct *thread[I915_NUM_ENGINES];
+ wait_queue_head_t waitq[I915_NUM_ENGINES];
+
+ void *sched_data;
+ struct intel_gvt_sched_policy_ops *sched_ops;
+};
+
+#define INDIRECT_CTX_ADDR_MASK 0xffffffc0
+#define INDIRECT_CTX_SIZE_MASK 0x3f
+struct shadow_indirect_ctx {
+ struct drm_i915_gem_object *obj;
+ unsigned long guest_gma;
+ unsigned long shadow_gma;
+ void *shadow_va;
+ uint32_t size;
+};
+
+#define PER_CTX_ADDR_MASK 0xfffff000
+struct shadow_per_ctx {
+ unsigned long guest_gma;
+ unsigned long shadow_gma;
+};
+
+struct intel_shadow_wa_ctx {
+ struct intel_vgpu_workload *workload;
+ struct shadow_indirect_ctx indirect_ctx;
+ struct shadow_per_ctx per_ctx;
+
+};
+
+struct intel_vgpu_workload {
+ struct intel_vgpu *vgpu;
+ int ring_id;
+ struct drm_i915_gem_request *req;
+ /* if this workload has been dispatched to i915? */
+ bool dispatched;
+ int status;
+
+ struct intel_vgpu_mm *shadow_mm;
+
+ /* different submission model may need different handler */
+ int (*prepare)(struct intel_vgpu_workload *);
+ int (*complete)(struct intel_vgpu_workload *);
+ struct list_head list;
+
+ DECLARE_BITMAP(pending_events, INTEL_GVT_EVENT_MAX);
+ void *shadow_ring_buffer_va;
+
+ /* execlist context information */
+ struct execlist_ctx_descriptor_format ctx_desc;
+ struct execlist_ring_context *ring_context;
+ unsigned long rb_head, rb_tail, rb_ctl, rb_start, rb_len;
+ bool restore_inhibit;
+ struct intel_vgpu_elsp_dwords elsp_dwords;
+ bool emulate_schedule_in;
+ atomic_t shadow_ctx_active;
+ wait_queue_head_t shadow_ctx_status_wq;
+ u64 ring_context_gpa;
+
+ /* shadow batch buffer */
+ struct list_head shadow_bb;
+ struct intel_shadow_wa_ctx wa_ctx;
+};
+
+/* Intel shadow batch buffer is a i915 gem object */
+struct intel_shadow_bb_entry {
+ struct list_head list;
+ struct drm_i915_gem_object *obj;
+ void *va;
+ unsigned long len;
+ void *bb_start_cmd_va;
+};
+
+#define workload_q_head(vgpu, ring_id) \
+ (&(vgpu->workload_q_head[ring_id]))
+
+#define queue_workload(workload) do { \
+ list_add_tail(&workload->list, \
+ workload_q_head(workload->vgpu, workload->ring_id)); \
+ wake_up(&workload->vgpu->gvt-> \
+ scheduler.waitq[workload->ring_id]); \
+} while (0)
+
+int intel_gvt_init_workload_scheduler(struct intel_gvt *gvt);
+
+void intel_gvt_clean_workload_scheduler(struct intel_gvt *gvt);
+
+void intel_gvt_wait_vgpu_idle(struct intel_vgpu *vgpu);
+
+int intel_vgpu_init_gvt_context(struct intel_vgpu *vgpu);
+
+void intel_vgpu_clean_gvt_context(struct intel_vgpu *vgpu);
+
+#endif
diff --git a/drivers/gpu/drm/i915/gvt/trace.h b/drivers/gpu/drm/i915/gvt/trace.h
new file mode 100644
index 000000000000..53a2d10cf3f1
--- /dev/null
+++ b/drivers/gpu/drm/i915/gvt/trace.h
@@ -0,0 +1,286 @@
+/*
+ * Copyright © 2011-2016 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ * Authors:
+ * Jike Song <jike.song@intel.com>
+ *
+ * Contributors:
+ * Zhi Wang <zhi.a.wang@intel.com>
+ *
+ */
+
+#if !defined(_GVT_TRACE_H_) || defined(TRACE_HEADER_MULTI_READ)
+#define _GVT_TRACE_H_
+
+#include <linux/types.h>
+#include <linux/stringify.h>
+#include <linux/tracepoint.h>
+#include <asm/tsc.h>
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM gvt
+
+TRACE_EVENT(spt_alloc,
+ TP_PROTO(int id, void *spt, int type, unsigned long mfn,
+ unsigned long gpt_gfn),
+
+ TP_ARGS(id, spt, type, mfn, gpt_gfn),
+
+ TP_STRUCT__entry(
+ __field(int, id)
+ __field(void *, spt)
+ __field(int, type)
+ __field(unsigned long, mfn)
+ __field(unsigned long, gpt_gfn)
+ ),
+
+ TP_fast_assign(
+ __entry->id = id;
+ __entry->spt = spt;
+ __entry->type = type;
+ __entry->mfn = mfn;
+ __entry->gpt_gfn = gpt_gfn;
+ ),
+
+ TP_printk("VM%d [alloc] spt %p type %d mfn 0x%lx gfn 0x%lx\n",
+ __entry->id,
+ __entry->spt,
+ __entry->type,
+ __entry->mfn,
+ __entry->gpt_gfn)
+);
+
+TRACE_EVENT(spt_free,
+ TP_PROTO(int id, void *spt, int type),
+
+ TP_ARGS(id, spt, type),
+
+ TP_STRUCT__entry(
+ __field(int, id)
+ __field(void *, spt)
+ __field(int, type)
+ ),
+
+ TP_fast_assign(
+ __entry->id = id;
+ __entry->spt = spt;
+ __entry->type = type;
+ ),
+
+ TP_printk("VM%u [free] spt %p type %d\n",
+ __entry->id,
+ __entry->spt,
+ __entry->type)
+);
+
+#define MAX_BUF_LEN 256
+
+TRACE_EVENT(gma_index,
+ TP_PROTO(const char *prefix, unsigned long gma,
+ unsigned long index),
+
+ TP_ARGS(prefix, gma, index),
+
+ TP_STRUCT__entry(
+ __array(char, buf, MAX_BUF_LEN)
+ ),
+
+ TP_fast_assign(
+ snprintf(__entry->buf, MAX_BUF_LEN,
+ "%s gma 0x%lx index 0x%lx\n", prefix, gma, index);
+ ),
+
+ TP_printk("%s", __entry->buf)
+);
+
+TRACE_EVENT(gma_translate,
+ TP_PROTO(int id, char *type, int ring_id, int pt_level,
+ unsigned long gma, unsigned long gpa),
+
+ TP_ARGS(id, type, ring_id, pt_level, gma, gpa),
+
+ TP_STRUCT__entry(
+ __array(char, buf, MAX_BUF_LEN)
+ ),
+
+ TP_fast_assign(
+ snprintf(__entry->buf, MAX_BUF_LEN,
+ "VM%d %s ring %d pt_level %d gma 0x%lx -> gpa 0x%lx\n",
+ id, type, ring_id, pt_level, gma, gpa);
+ ),
+
+ TP_printk("%s", __entry->buf)
+);
+
+TRACE_EVENT(spt_refcount,
+ TP_PROTO(int id, char *action, void *spt, int before, int after),
+
+ TP_ARGS(id, action, spt, before, after),
+
+ TP_STRUCT__entry(
+ __array(char, buf, MAX_BUF_LEN)
+ ),
+
+ TP_fast_assign(
+ snprintf(__entry->buf, MAX_BUF_LEN,
+ "VM%d [%s] spt %p before %d -> after %d\n",
+ id, action, spt, before, after);
+ ),
+
+ TP_printk("%s", __entry->buf)
+);
+
+TRACE_EVENT(spt_change,
+ TP_PROTO(int id, char *action, void *spt, unsigned long gfn,
+ int type),
+
+ TP_ARGS(id, action, spt, gfn, type),
+
+ TP_STRUCT__entry(
+ __array(char, buf, MAX_BUF_LEN)
+ ),
+
+ TP_fast_assign(
+ snprintf(__entry->buf, MAX_BUF_LEN,
+ "VM%d [%s] spt %p gfn 0x%lx type %d\n",
+ id, action, spt, gfn, type);
+ ),
+
+ TP_printk("%s", __entry->buf)
+);
+
+TRACE_EVENT(gpt_change,
+ TP_PROTO(int id, const char *tag, void *spt, int type, u64 v,
+ unsigned long index),
+
+ TP_ARGS(id, tag, spt, type, v, index),
+
+ TP_STRUCT__entry(
+ __array(char, buf, MAX_BUF_LEN)
+ ),
+
+ TP_fast_assign(
+ snprintf(__entry->buf, MAX_BUF_LEN,
+ "VM%d [%s] spt %p type %d entry 0x%llx index 0x%lx\n",
+ id, tag, spt, type, v, index);
+ ),
+
+ TP_printk("%s", __entry->buf)
+);
+
+TRACE_EVENT(oos_change,
+ TP_PROTO(int id, const char *tag, int page_id, void *gpt, int type),
+
+ TP_ARGS(id, tag, page_id, gpt, type),
+
+ TP_STRUCT__entry(
+ __array(char, buf, MAX_BUF_LEN)
+ ),
+
+ TP_fast_assign(
+ snprintf(__entry->buf, MAX_BUF_LEN,
+ "VM%d [oos %s] page id %d gpt %p type %d\n",
+ id, tag, page_id, gpt, type);
+ ),
+
+ TP_printk("%s", __entry->buf)
+);
+
+TRACE_EVENT(oos_sync,
+ TP_PROTO(int id, int page_id, void *gpt, int type, u64 v,
+ unsigned long index),
+
+ TP_ARGS(id, page_id, gpt, type, v, index),
+
+ TP_STRUCT__entry(
+ __array(char, buf, MAX_BUF_LEN)
+ ),
+
+ TP_fast_assign(
+ snprintf(__entry->buf, MAX_BUF_LEN,
+ "VM%d [oos sync] page id %d gpt %p type %d entry 0x%llx index 0x%lx\n",
+ id, page_id, gpt, type, v, index);
+ ),
+
+ TP_printk("%s", __entry->buf)
+);
+
+#define MAX_CMD_STR_LEN 256
+TRACE_EVENT(gvt_command,
+ TP_PROTO(u8 vm_id, u8 ring_id, u32 ip_gma, u32 *cmd_va, u32 cmd_len, bool ring_buffer_cmd, cycles_t cost_pre_cmd_handler, cycles_t cost_cmd_handler),
+
+ TP_ARGS(vm_id, ring_id, ip_gma, cmd_va, cmd_len, ring_buffer_cmd, cost_pre_cmd_handler, cost_cmd_handler),
+
+ TP_STRUCT__entry(
+ __field(u8, vm_id)
+ __field(u8, ring_id)
+ __field(int, i)
+ __array(char, tmp_buf, MAX_CMD_STR_LEN)
+ __array(char, cmd_str, MAX_CMD_STR_LEN)
+ ),
+
+ TP_fast_assign(
+ __entry->vm_id = vm_id;
+ __entry->ring_id = ring_id;
+ __entry->cmd_str[0] = '\0';
+ snprintf(__entry->tmp_buf, MAX_CMD_STR_LEN, "VM(%d) Ring(%d): %s ip(%08x) pre handler cost (%llu), handler cost (%llu) ", vm_id, ring_id, ring_buffer_cmd ? "RB":"BB", ip_gma, cost_pre_cmd_handler, cost_cmd_handler);
+ strcat(__entry->cmd_str, __entry->tmp_buf);
+ entry->i = 0;
+ while (cmd_len > 0) {
+ if (cmd_len >= 8) {
+ snprintf(__entry->tmp_buf, MAX_CMD_STR_LEN, "%08x %08x %08x %08x %08x %08x %08x %08x ",
+ cmd_va[__entry->i], cmd_va[__entry->i+1], cmd_va[__entry->i+2], cmd_va[__entry->i+3],
+ cmd_va[__entry->i+4], cmd_va[__entry->i+5], cmd_va[__entry->i+6], cmd_va[__entry->i+7]);
+ __entry->i += 8;
+ cmd_len -= 8;
+ strcat(__entry->cmd_str, __entry->tmp_buf);
+ } else if (cmd_len >= 4) {
+ snprintf(__entry->tmp_buf, MAX_CMD_STR_LEN, "%08x %08x %08x %08x ",
+ cmd_va[__entry->i], cmd_va[__entry->i+1], cmd_va[__entry->i+2], cmd_va[__entry->i+3]);
+ __entry->i += 4;
+ cmd_len -= 4;
+ strcat(__entry->cmd_str, __entry->tmp_buf);
+ } else if (cmd_len >= 2) {
+ snprintf(__entry->tmp_buf, MAX_CMD_STR_LEN, "%08x %08x ", cmd_va[__entry->i], cmd_va[__entry->i+1]);
+ __entry->i += 2;
+ cmd_len -= 2;
+ strcat(__entry->cmd_str, __entry->tmp_buf);
+ } else if (cmd_len == 1) {
+ snprintf(__entry->tmp_buf, MAX_CMD_STR_LEN, "%08x ", cmd_va[__entry->i]);
+ __entry->i += 1;
+ cmd_len -= 1;
+ strcat(__entry->cmd_str, __entry->tmp_buf);
+ }
+ }
+ strcat(__entry->cmd_str, "\n");
+ ),
+
+ TP_printk("%s", __entry->cmd_str)
+);
+#endif /* _GVT_TRACE_H_ */
+
+/* This part must be out of protection */
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE trace
+#include <trace/define_trace.h>
diff --git a/drivers/gpu/drm/i915/i915_gem_dmabuf.h b/drivers/gpu/drm/i915/gvt/trace_points.c
index 91315557e421..a3deed692b9c 100644
--- a/drivers/gpu/drm/i915/i915_gem_dmabuf.h
+++ b/drivers/gpu/drm/i915/gvt/trace_points.c
@@ -1,5 +1,5 @@
/*
- * Copyright 2016 Intel Corporation
+ * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -16,30 +16,21 @@
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * Authors:
+ * Jike Song <jike.song@intel.com>
+ *
+ * Contributors:
+ * Zhi Wang <zhi.a.wang@intel.com>
*
*/
-#ifndef _I915_GEM_DMABUF_H_
-#define _I915_GEM_DMABUF_H_
-
-#include <linux/dma-buf.h>
-
-static inline struct reservation_object *
-i915_gem_object_get_dmabuf_resv(struct drm_i915_gem_object *obj)
-{
- struct dma_buf *dma_buf;
-
- if (obj->base.dma_buf)
- dma_buf = obj->base.dma_buf;
- else if (obj->base.import_attach)
- dma_buf = obj->base.import_attach->dmabuf;
- else
- return NULL;
-
- return dma_buf->resv;
-}
+#include "trace.h"
+#ifndef __CHECKER__
+#define CREATE_TRACE_POINTS
+#include "trace.h"
#endif
diff --git a/drivers/gpu/drm/i915/gvt/vgpu.c b/drivers/gpu/drm/i915/gvt/vgpu.c
new file mode 100644
index 000000000000..4f64845d8a4c
--- /dev/null
+++ b/drivers/gpu/drm/i915/gvt/vgpu.c
@@ -0,0 +1,408 @@
+/*
+ * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * Authors:
+ * Eddie Dong <eddie.dong@intel.com>
+ * Kevin Tian <kevin.tian@intel.com>
+ *
+ * Contributors:
+ * Ping Gao <ping.a.gao@intel.com>
+ * Zhi Wang <zhi.a.wang@intel.com>
+ * Bing Niu <bing.niu@intel.com>
+ *
+ */
+
+#include "i915_drv.h"
+#include "gvt.h"
+#include "i915_pvinfo.h"
+
+static void clean_vgpu_mmio(struct intel_vgpu *vgpu)
+{
+ vfree(vgpu->mmio.vreg);
+ vgpu->mmio.vreg = vgpu->mmio.sreg = NULL;
+}
+
+int setup_vgpu_mmio(struct intel_vgpu *vgpu)
+{
+ struct intel_gvt *gvt = vgpu->gvt;
+ const struct intel_gvt_device_info *info = &gvt->device_info;
+
+ if (vgpu->mmio.vreg)
+ memset(vgpu->mmio.vreg, 0, info->mmio_size * 2);
+ else {
+ vgpu->mmio.vreg = vzalloc(info->mmio_size * 2);
+ if (!vgpu->mmio.vreg)
+ return -ENOMEM;
+ }
+
+ vgpu->mmio.sreg = vgpu->mmio.vreg + info->mmio_size;
+
+ memcpy(vgpu->mmio.vreg, gvt->firmware.mmio, info->mmio_size);
+ memcpy(vgpu->mmio.sreg, gvt->firmware.mmio, info->mmio_size);
+
+ vgpu_vreg(vgpu, GEN6_GT_THREAD_STATUS_REG) = 0;
+
+ /* set the bit 0:2(Core C-State ) to C0 */
+ vgpu_vreg(vgpu, GEN6_GT_CORE_STATUS) = 0;
+ return 0;
+}
+
+static void setup_vgpu_cfg_space(struct intel_vgpu *vgpu,
+ struct intel_vgpu_creation_params *param)
+{
+ struct intel_gvt *gvt = vgpu->gvt;
+ const struct intel_gvt_device_info *info = &gvt->device_info;
+ u16 *gmch_ctl;
+ int i;
+
+ memcpy(vgpu_cfg_space(vgpu), gvt->firmware.cfg_space,
+ info->cfg_space_size);
+
+ if (!param->primary) {
+ vgpu_cfg_space(vgpu)[PCI_CLASS_DEVICE] =
+ INTEL_GVT_PCI_CLASS_VGA_OTHER;
+ vgpu_cfg_space(vgpu)[PCI_CLASS_PROG] =
+ INTEL_GVT_PCI_CLASS_VGA_OTHER;
+ }
+
+ /* Show guest that there isn't any stolen memory.*/
+ gmch_ctl = (u16 *)(vgpu_cfg_space(vgpu) + INTEL_GVT_PCI_GMCH_CONTROL);
+ *gmch_ctl &= ~(BDW_GMCH_GMS_MASK << BDW_GMCH_GMS_SHIFT);
+
+ intel_vgpu_write_pci_bar(vgpu, PCI_BASE_ADDRESS_2,
+ gvt_aperture_pa_base(gvt), true);
+
+ vgpu_cfg_space(vgpu)[PCI_COMMAND] &= ~(PCI_COMMAND_IO
+ | PCI_COMMAND_MEMORY
+ | PCI_COMMAND_MASTER);
+ /*
+ * Clear the bar upper 32bit and let guest to assign the new value
+ */
+ memset(vgpu_cfg_space(vgpu) + PCI_BASE_ADDRESS_1, 0, 4);
+ memset(vgpu_cfg_space(vgpu) + PCI_BASE_ADDRESS_3, 0, 4);
+ memset(vgpu_cfg_space(vgpu) + INTEL_GVT_PCI_OPREGION, 0, 4);
+
+ for (i = 0; i < INTEL_GVT_MAX_BAR_NUM; i++) {
+ vgpu->cfg_space.bar[i].size = pci_resource_len(
+ gvt->dev_priv->drm.pdev, i * 2);
+ vgpu->cfg_space.bar[i].tracked = false;
+ }
+}
+
+void populate_pvinfo_page(struct intel_vgpu *vgpu)
+{
+ /* setup the ballooning information */
+ vgpu_vreg64(vgpu, vgtif_reg(magic)) = VGT_MAGIC;
+ vgpu_vreg(vgpu, vgtif_reg(version_major)) = 1;
+ vgpu_vreg(vgpu, vgtif_reg(version_minor)) = 0;
+ vgpu_vreg(vgpu, vgtif_reg(display_ready)) = 0;
+ vgpu_vreg(vgpu, vgtif_reg(vgt_id)) = vgpu->id;
+ vgpu_vreg(vgpu, vgtif_reg(avail_rs.mappable_gmadr.base)) =
+ vgpu_aperture_gmadr_base(vgpu);
+ vgpu_vreg(vgpu, vgtif_reg(avail_rs.mappable_gmadr.size)) =
+ vgpu_aperture_sz(vgpu);
+ vgpu_vreg(vgpu, vgtif_reg(avail_rs.nonmappable_gmadr.base)) =
+ vgpu_hidden_gmadr_base(vgpu);
+ vgpu_vreg(vgpu, vgtif_reg(avail_rs.nonmappable_gmadr.size)) =
+ vgpu_hidden_sz(vgpu);
+
+ vgpu_vreg(vgpu, vgtif_reg(avail_rs.fence_num)) = vgpu_fence_sz(vgpu);
+
+ gvt_dbg_core("Populate PVINFO PAGE for vGPU %d\n", vgpu->id);
+ gvt_dbg_core("aperture base [GMADR] 0x%llx size 0x%llx\n",
+ vgpu_aperture_gmadr_base(vgpu), vgpu_aperture_sz(vgpu));
+ gvt_dbg_core("hidden base [GMADR] 0x%llx size=0x%llx\n",
+ vgpu_hidden_gmadr_base(vgpu), vgpu_hidden_sz(vgpu));
+ gvt_dbg_core("fence size %d\n", vgpu_fence_sz(vgpu));
+
+ WARN_ON(sizeof(struct vgt_if) != VGT_PVINFO_SIZE);
+}
+
+/**
+ * intel_gvt_init_vgpu_types - initialize vGPU type list
+ * @gvt : GVT device
+ *
+ * Initialize vGPU type list based on available resource.
+ *
+ */
+int intel_gvt_init_vgpu_types(struct intel_gvt *gvt)
+{
+ unsigned int num_types;
+ unsigned int i, low_avail;
+ unsigned int min_low;
+
+ /* vGPU type name is defined as GVTg_Vx_y which contains
+ * physical GPU generation type and 'y' means maximum vGPU
+ * instances user can create on one physical GPU for this
+ * type.
+ *
+ * Depend on physical SKU resource, might see vGPU types like
+ * GVTg_V4_8, GVTg_V4_4, GVTg_V4_2, etc. We can create
+ * different types of vGPU on same physical GPU depending on
+ * available resource. Each vGPU type will have "avail_instance"
+ * to indicate how many vGPU instance can be created for this
+ * type.
+ *
+ * Currently use static size here as we init type earlier..
+ */
+ low_avail = MB_TO_BYTES(256) - HOST_LOW_GM_SIZE;
+ num_types = 4;
+
+ gvt->types = kzalloc(num_types * sizeof(struct intel_vgpu_type),
+ GFP_KERNEL);
+ if (!gvt->types)
+ return -ENOMEM;
+
+ min_low = MB_TO_BYTES(32);
+ for (i = 0; i < num_types; ++i) {
+ if (low_avail / min_low == 0)
+ break;
+ gvt->types[i].low_gm_size = min_low;
+ gvt->types[i].high_gm_size = 3 * gvt->types[i].low_gm_size;
+ gvt->types[i].fence = 4;
+ gvt->types[i].max_instance = low_avail / min_low;
+ gvt->types[i].avail_instance = gvt->types[i].max_instance;
+
+ if (IS_GEN8(gvt->dev_priv))
+ sprintf(gvt->types[i].name, "GVTg_V4_%u",
+ gvt->types[i].max_instance);
+ else if (IS_GEN9(gvt->dev_priv))
+ sprintf(gvt->types[i].name, "GVTg_V5_%u",
+ gvt->types[i].max_instance);
+
+ min_low <<= 1;
+ gvt_dbg_core("type[%d]: %s max %u avail %u low %u high %u fence %u\n",
+ i, gvt->types[i].name, gvt->types[i].max_instance,
+ gvt->types[i].avail_instance,
+ gvt->types[i].low_gm_size,
+ gvt->types[i].high_gm_size, gvt->types[i].fence);
+ }
+
+ gvt->num_types = i;
+ return 0;
+}
+
+void intel_gvt_clean_vgpu_types(struct intel_gvt *gvt)
+{
+ kfree(gvt->types);
+}
+
+static void intel_gvt_update_vgpu_types(struct intel_gvt *gvt)
+{
+ int i;
+ unsigned int low_gm_avail, high_gm_avail, fence_avail;
+ unsigned int low_gm_min, high_gm_min, fence_min, total_min;
+
+ /* Need to depend on maxium hw resource size but keep on
+ * static config for now.
+ */
+ low_gm_avail = MB_TO_BYTES(256) - HOST_LOW_GM_SIZE -
+ gvt->gm.vgpu_allocated_low_gm_size;
+ high_gm_avail = MB_TO_BYTES(256) * 3 - HOST_HIGH_GM_SIZE -
+ gvt->gm.vgpu_allocated_high_gm_size;
+ fence_avail = gvt_fence_sz(gvt) - HOST_FENCE -
+ gvt->fence.vgpu_allocated_fence_num;
+
+ for (i = 0; i < gvt->num_types; i++) {
+ low_gm_min = low_gm_avail / gvt->types[i].low_gm_size;
+ high_gm_min = high_gm_avail / gvt->types[i].high_gm_size;
+ fence_min = fence_avail / gvt->types[i].fence;
+ total_min = min(min(low_gm_min, high_gm_min), fence_min);
+ gvt->types[i].avail_instance = min(gvt->types[i].max_instance,
+ total_min);
+
+ gvt_dbg_core("update type[%d]: %s max %u avail %u low %u high %u fence %u\n",
+ i, gvt->types[i].name, gvt->types[i].max_instance,
+ gvt->types[i].avail_instance, gvt->types[i].low_gm_size,
+ gvt->types[i].high_gm_size, gvt->types[i].fence);
+ }
+}
+
+/**
+ * intel_gvt_destroy_vgpu - destroy a virtual GPU
+ * @vgpu: virtual GPU
+ *
+ * This function is called when user wants to destroy a virtual GPU.
+ *
+ */
+void intel_gvt_destroy_vgpu(struct intel_vgpu *vgpu)
+{
+ struct intel_gvt *gvt = vgpu->gvt;
+
+ mutex_lock(&gvt->lock);
+
+ vgpu->active = false;
+ idr_remove(&gvt->vgpu_idr, vgpu->id);
+
+ if (atomic_read(&vgpu->running_workload_num)) {
+ mutex_unlock(&gvt->lock);
+ intel_gvt_wait_vgpu_idle(vgpu);
+ mutex_lock(&gvt->lock);
+ }
+
+ intel_vgpu_stop_schedule(vgpu);
+ intel_vgpu_clean_sched_policy(vgpu);
+ intel_vgpu_clean_gvt_context(vgpu);
+ intel_vgpu_clean_execlist(vgpu);
+ intel_vgpu_clean_display(vgpu);
+ intel_vgpu_clean_opregion(vgpu);
+ intel_vgpu_clean_gtt(vgpu);
+ intel_gvt_hypervisor_detach_vgpu(vgpu);
+ intel_vgpu_free_resource(vgpu);
+ clean_vgpu_mmio(vgpu);
+ vfree(vgpu);
+
+ intel_gvt_update_vgpu_types(gvt);
+ mutex_unlock(&gvt->lock);
+}
+
+static struct intel_vgpu *__intel_gvt_create_vgpu(struct intel_gvt *gvt,
+ struct intel_vgpu_creation_params *param)
+{
+ struct intel_vgpu *vgpu;
+ int ret;
+
+ gvt_dbg_core("handle %llu low %llu MB high %llu MB fence %llu\n",
+ param->handle, param->low_gm_sz, param->high_gm_sz,
+ param->fence_sz);
+
+ vgpu = vzalloc(sizeof(*vgpu));
+ if (!vgpu)
+ return ERR_PTR(-ENOMEM);
+
+ mutex_lock(&gvt->lock);
+
+ ret = idr_alloc(&gvt->vgpu_idr, vgpu, 1, GVT_MAX_VGPU, GFP_KERNEL);
+ if (ret < 0)
+ goto out_free_vgpu;
+
+ vgpu->id = ret;
+ vgpu->handle = param->handle;
+ vgpu->gvt = gvt;
+ bitmap_zero(vgpu->tlb_handle_pending, I915_NUM_ENGINES);
+
+ setup_vgpu_cfg_space(vgpu, param);
+
+ ret = setup_vgpu_mmio(vgpu);
+ if (ret)
+ goto out_free_vgpu;
+
+ ret = intel_vgpu_alloc_resource(vgpu, param);
+ if (ret)
+ goto out_clean_vgpu_mmio;
+
+ populate_pvinfo_page(vgpu);
+
+ ret = intel_gvt_hypervisor_attach_vgpu(vgpu);
+ if (ret)
+ goto out_clean_vgpu_resource;
+
+ ret = intel_vgpu_init_gtt(vgpu);
+ if (ret)
+ goto out_detach_hypervisor_vgpu;
+
+ ret = intel_vgpu_init_display(vgpu);
+ if (ret)
+ goto out_clean_gtt;
+
+ ret = intel_vgpu_init_execlist(vgpu);
+ if (ret)
+ goto out_clean_display;
+
+ ret = intel_vgpu_init_gvt_context(vgpu);
+ if (ret)
+ goto out_clean_execlist;
+
+ ret = intel_vgpu_init_sched_policy(vgpu);
+ if (ret)
+ goto out_clean_shadow_ctx;
+
+ vgpu->active = true;
+ mutex_unlock(&gvt->lock);
+
+ return vgpu;
+
+out_clean_shadow_ctx:
+ intel_vgpu_clean_gvt_context(vgpu);
+out_clean_execlist:
+ intel_vgpu_clean_execlist(vgpu);
+out_clean_display:
+ intel_vgpu_clean_display(vgpu);
+out_clean_gtt:
+ intel_vgpu_clean_gtt(vgpu);
+out_detach_hypervisor_vgpu:
+ intel_gvt_hypervisor_detach_vgpu(vgpu);
+out_clean_vgpu_resource:
+ intel_vgpu_free_resource(vgpu);
+out_clean_vgpu_mmio:
+ clean_vgpu_mmio(vgpu);
+out_free_vgpu:
+ vfree(vgpu);
+ mutex_unlock(&gvt->lock);
+ return ERR_PTR(ret);
+}
+
+/**
+ * intel_gvt_create_vgpu - create a virtual GPU
+ * @gvt: GVT device
+ * @type: type of the vGPU to create
+ *
+ * This function is called when user wants to create a virtual GPU.
+ *
+ * Returns:
+ * pointer to intel_vgpu, error pointer if failed.
+ */
+struct intel_vgpu *intel_gvt_create_vgpu(struct intel_gvt *gvt,
+ struct intel_vgpu_type *type)
+{
+ struct intel_vgpu_creation_params param;
+ struct intel_vgpu *vgpu;
+
+ param.handle = 0;
+ param.low_gm_sz = type->low_gm_size;
+ param.high_gm_sz = type->high_gm_size;
+ param.fence_sz = type->fence;
+
+ /* XXX current param based on MB */
+ param.low_gm_sz = BYTES_TO_MB(param.low_gm_sz);
+ param.high_gm_sz = BYTES_TO_MB(param.high_gm_sz);
+
+ vgpu = __intel_gvt_create_vgpu(gvt, &param);
+ if (IS_ERR(vgpu))
+ return vgpu;
+
+ /* calculate left instance change for types */
+ intel_gvt_update_vgpu_types(gvt);
+
+ return vgpu;
+}
+
+/**
+ * intel_gvt_reset_vgpu - reset a virtual GPU
+ * @vgpu: virtual GPU
+ *
+ * This function is called when user wants to reset a virtual GPU.
+ *
+ */
+void intel_gvt_reset_vgpu(struct intel_vgpu *vgpu)
+{
+}
diff --git a/drivers/gpu/drm/i915/i915_cmd_parser.c b/drivers/gpu/drm/i915/i915_cmd_parser.c
index 70980f82a15b..f5039f4f988f 100644
--- a/drivers/gpu/drm/i915/i915_cmd_parser.c
+++ b/drivers/gpu/drm/i915/i915_cmd_parser.c
@@ -1290,7 +1290,7 @@ int intel_engine_cmd_parser(struct intel_engine_cs *engine,
}
if (ret == 0 && needs_clflush_after)
- drm_clflush_virt_range(shadow_batch_obj->mapping, batch_len);
+ drm_clflush_virt_range(shadow_batch_obj->mm.mapping, batch_len);
i915_gem_object_unpin_map(shadow_batch_obj);
return ret;
@@ -1308,10 +1308,11 @@ int intel_engine_cmd_parser(struct intel_engine_cs *engine,
int i915_cmd_parser_get_version(struct drm_i915_private *dev_priv)
{
struct intel_engine_cs *engine;
+ enum intel_engine_id id;
bool active = false;
/* If the command parser is not enabled, report 0 - unsupported */
- for_each_engine(engine, dev_priv) {
+ for_each_engine(engine, dev_priv, id) {
if (intel_engine_needs_cmd_parser(engine)) {
active = true;
break;
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 27b0e34dadec..b7f42c448a44 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -79,10 +79,8 @@ static int i915_capabilities(struct seq_file *m, void *data)
seq_printf(m, "gen: %d\n", INTEL_GEN(dev_priv));
seq_printf(m, "pch: %d\n", INTEL_PCH_TYPE(dev_priv));
#define PRINT_FLAG(x) seq_printf(m, #x ": %s\n", yesno(info->x))
-#define SEP_SEMICOLON ;
- DEV_INFO_FOR_EACH_FLAG(PRINT_FLAG, SEP_SEMICOLON);
+ DEV_INFO_FOR_EACH_FLAG(PRINT_FLAG);
#undef PRINT_FLAG
-#undef SEP_SEMICOLON
return 0;
}
@@ -109,12 +107,12 @@ static char get_tiling_flag(struct drm_i915_gem_object *obj)
static char get_global_flag(struct drm_i915_gem_object *obj)
{
- return i915_gem_object_to_ggtt(obj, NULL) ? 'g' : ' ';
+ return !list_empty(&obj->userfault_link) ? 'g' : ' ';
}
static char get_pin_mapped_flag(struct drm_i915_gem_object *obj)
{
- return obj->mapping ? 'M' : ' ';
+ return obj->mm.mapping ? 'M' : ' ';
}
static u64 i915_gem_obj_total_ggtt_size(struct drm_i915_gem_object *obj)
@@ -138,11 +136,10 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
struct i915_vma *vma;
unsigned int frontbuffer_bits;
int pin_count = 0;
- enum intel_engine_id id;
lockdep_assert_held(&obj->base.dev->struct_mutex);
- seq_printf(m, "%pK: %c%c%c%c%c %8zdKiB %02x %02x [ ",
+ seq_printf(m, "%pK: %c%c%c%c%c %8zdKiB %02x %02x %s%s%s",
&obj->base,
get_active_flag(obj),
get_pin_flag(obj),
@@ -151,17 +148,10 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
get_pin_mapped_flag(obj),
obj->base.size / 1024,
obj->base.read_domains,
- obj->base.write_domain);
- for_each_engine_id(engine, dev_priv, id)
- seq_printf(m, "%x ",
- i915_gem_active_get_seqno(&obj->last_read[id],
- &obj->base.dev->struct_mutex));
- seq_printf(m, "] %x %s%s%s",
- i915_gem_active_get_seqno(&obj->last_write,
- &obj->base.dev->struct_mutex),
+ obj->base.write_domain,
i915_cache_level_str(dev_priv, obj->cache_level),
- obj->dirty ? " dirty" : "",
- obj->madv == I915_MADV_DONTNEED ? " purgeable" : "");
+ obj->mm.dirty ? " dirty" : "",
+ obj->mm.madv == I915_MADV_DONTNEED ? " purgeable" : "");
if (obj->base.name)
seq_printf(m, " (name: %d)", obj->base.name);
list_for_each_entry(vma, &obj->vma_list, obj_link) {
@@ -188,18 +178,8 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
}
if (obj->stolen)
seq_printf(m, " (stolen: %08llx)", obj->stolen->start);
- if (obj->pin_display || obj->fault_mappable) {
- char s[3], *t = s;
- if (obj->pin_display)
- *t++ = 'p';
- if (obj->fault_mappable)
- *t++ = 'f';
- *t = '\0';
- seq_printf(m, " (%s mappable)", s);
- }
-
- engine = i915_gem_active_get_engine(&obj->last_write,
- &dev_priv->drm.struct_mutex);
+
+ engine = i915_gem_object_last_write_engine(obj);
if (engine)
seq_printf(m, " (%s)", engine->name);
@@ -237,7 +217,7 @@ static int i915_gem_stolen_list_info(struct seq_file *m, void *data)
return ret;
total_obj_size = total_gtt_size = count = 0;
- list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
+ list_for_each_entry(obj, &dev_priv->mm.bound_list, global_link) {
if (obj->stolen == NULL)
continue;
@@ -247,7 +227,7 @@ static int i915_gem_stolen_list_info(struct seq_file *m, void *data)
total_gtt_size += i915_gem_obj_total_ggtt_size(obj);
count++;
}
- list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_list) {
+ list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_link) {
if (obj->stolen == NULL)
continue;
@@ -334,11 +314,12 @@ static void print_batch_pool_stats(struct seq_file *m,
struct drm_i915_gem_object *obj;
struct file_stats stats;
struct intel_engine_cs *engine;
+ enum intel_engine_id id;
int j;
memset(&stats, 0, sizeof(stats));
- for_each_engine(engine, dev_priv) {
+ for_each_engine(engine, dev_priv, id) {
for (j = 0; j < ARRAY_SIZE(engine->batch_pool.cache_list); j++) {
list_for_each_entry(obj,
&engine->batch_pool.cache_list[j],
@@ -402,23 +383,23 @@ static int i915_gem_object_info(struct seq_file *m, void *data)
if (ret)
return ret;
- seq_printf(m, "%u objects, %zu bytes\n",
+ seq_printf(m, "%u objects, %llu bytes\n",
dev_priv->mm.object_count,
dev_priv->mm.object_memory);
size = count = 0;
mapped_size = mapped_count = 0;
purgeable_size = purgeable_count = 0;
- list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_list) {
+ list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_link) {
size += obj->base.size;
++count;
- if (obj->madv == I915_MADV_DONTNEED) {
+ if (obj->mm.madv == I915_MADV_DONTNEED) {
purgeable_size += obj->base.size;
++purgeable_count;
}
- if (obj->mapping) {
+ if (obj->mm.mapping) {
mapped_count++;
mapped_size += obj->base.size;
}
@@ -426,7 +407,7 @@ static int i915_gem_object_info(struct seq_file *m, void *data)
seq_printf(m, "%u unbound objects, %llu bytes\n", count, size);
size = count = dpy_size = dpy_count = 0;
- list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
+ list_for_each_entry(obj, &dev_priv->mm.bound_list, global_link) {
size += obj->base.size;
++count;
@@ -435,12 +416,12 @@ static int i915_gem_object_info(struct seq_file *m, void *data)
++dpy_count;
}
- if (obj->madv == I915_MADV_DONTNEED) {
+ if (obj->mm.madv == I915_MADV_DONTNEED) {
purgeable_size += obj->base.size;
++purgeable_count;
}
- if (obj->mapping) {
+ if (obj->mm.mapping) {
mapped_count++;
mapped_size += obj->base.size;
}
@@ -512,7 +493,7 @@ static int i915_gem_gtt_info(struct seq_file *m, void *data)
return ret;
total_obj_size = total_gtt_size = count = 0;
- list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
+ list_for_each_entry(obj, &dev_priv->mm.bound_list, global_link) {
if (show_pin_display_only && !obj->pin_display)
continue;
@@ -566,12 +547,12 @@ static int i915_gem_pageflip_info(struct seq_file *m, void *data)
pipe, plane);
}
if (work->flip_queued_req) {
- struct intel_engine_cs *engine = i915_gem_request_get_engine(work->flip_queued_req);
+ struct intel_engine_cs *engine = work->flip_queued_req->engine;
seq_printf(m, "Flip queued on %s at seqno %x, next seqno %x [current breadcrumb %x], completed? %d\n",
engine->name,
- i915_gem_request_get_seqno(work->flip_queued_req),
- dev_priv->next_seqno,
+ work->flip_queued_req->global_seqno,
+ atomic_read(&dev_priv->gt.global_timeline.next_seqno),
intel_engine_get_seqno(engine),
i915_gem_request_completed(work->flip_queued_req));
} else
@@ -607,6 +588,7 @@ static int i915_gem_batch_pool_info(struct seq_file *m, void *data)
struct drm_device *dev = &dev_priv->drm;
struct drm_i915_gem_object *obj;
struct intel_engine_cs *engine;
+ enum intel_engine_id id;
int total = 0;
int ret, j;
@@ -614,7 +596,7 @@ static int i915_gem_batch_pool_info(struct seq_file *m, void *data)
if (ret)
return ret;
- for_each_engine(engine, dev_priv) {
+ for_each_engine(engine, dev_priv, id) {
for (j = 0; j < ARRAY_SIZE(engine->batch_pool.cache_list); j++) {
int count;
@@ -645,12 +627,24 @@ static int i915_gem_batch_pool_info(struct seq_file *m, void *data)
return 0;
}
+static void print_request(struct seq_file *m,
+ struct drm_i915_gem_request *rq,
+ const char *prefix)
+{
+ seq_printf(m, "%s%x [%x:%x] prio=%d @ %dms: %s\n", prefix,
+ rq->global_seqno, rq->ctx->hw_id, rq->fence.seqno,
+ rq->priotree.priority,
+ jiffies_to_msecs(jiffies - rq->emitted_jiffies),
+ rq->timeline->common->name);
+}
+
static int i915_gem_request_info(struct seq_file *m, void *data)
{
struct drm_i915_private *dev_priv = node_to_i915(m->private);
struct drm_device *dev = &dev_priv->drm;
- struct intel_engine_cs *engine;
struct drm_i915_gem_request *req;
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
int ret, any;
ret = mutex_lock_interruptible(&dev->struct_mutex);
@@ -658,29 +652,18 @@ static int i915_gem_request_info(struct seq_file *m, void *data)
return ret;
any = 0;
- for_each_engine(engine, dev_priv) {
+ for_each_engine(engine, dev_priv, id) {
int count;
count = 0;
- list_for_each_entry(req, &engine->request_list, link)
+ list_for_each_entry(req, &engine->timeline->requests, link)
count++;
if (count == 0)
continue;
seq_printf(m, "%s requests: %d\n", engine->name, count);
- list_for_each_entry(req, &engine->request_list, link) {
- struct pid *pid = req->ctx->pid;
- struct task_struct *task;
-
- rcu_read_lock();
- task = pid ? pid_task(pid, PIDTYPE_PID) : NULL;
- seq_printf(m, " %x @ %d: %s [%d]\n",
- req->fence.seqno,
- (int) (jiffies - req->emitted_jiffies),
- task ? task->comm : "<unknown>",
- task ? task->pid : -1);
- rcu_read_unlock();
- }
+ list_for_each_entry(req, &engine->timeline->requests, link)
+ print_request(m, req, " ");
any++;
}
@@ -701,22 +684,23 @@ static void i915_ring_seqno_info(struct seq_file *m,
seq_printf(m, "Current sequence (%s): %x\n",
engine->name, intel_engine_get_seqno(engine));
- spin_lock(&b->lock);
+ spin_lock_irq(&b->lock);
for (rb = rb_first(&b->waiters); rb; rb = rb_next(rb)) {
struct intel_wait *w = container_of(rb, typeof(*w), node);
seq_printf(m, "Waiting (%s): %s [%d] on %x\n",
engine->name, w->tsk->comm, w->tsk->pid, w->seqno);
}
- spin_unlock(&b->lock);
+ spin_unlock_irq(&b->lock);
}
static int i915_gem_seqno_info(struct seq_file *m, void *data)
{
struct drm_i915_private *dev_priv = node_to_i915(m->private);
struct intel_engine_cs *engine;
+ enum intel_engine_id id;
- for_each_engine(engine, dev_priv)
+ for_each_engine(engine, dev_priv, id)
i915_ring_seqno_info(m, engine);
return 0;
@@ -727,6 +711,7 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
{
struct drm_i915_private *dev_priv = node_to_i915(m->private);
struct intel_engine_cs *engine;
+ enum intel_engine_id id;
int i, pipe;
intel_runtime_pm_get(dev_priv);
@@ -743,17 +728,32 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
I915_READ(VLV_IIR_RW));
seq_printf(m, "Display IMR:\t%08x\n",
I915_READ(VLV_IMR));
- for_each_pipe(dev_priv, pipe)
+ for_each_pipe(dev_priv, pipe) {
+ enum intel_display_power_domain power_domain;
+
+ power_domain = POWER_DOMAIN_PIPE(pipe);
+ if (!intel_display_power_get_if_enabled(dev_priv,
+ power_domain)) {
+ seq_printf(m, "Pipe %c power disabled\n",
+ pipe_name(pipe));
+ continue;
+ }
+
seq_printf(m, "Pipe %c stat:\t%08x\n",
pipe_name(pipe),
I915_READ(PIPESTAT(pipe)));
+ intel_display_power_put(dev_priv, power_domain);
+ }
+
+ intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
seq_printf(m, "Port hotplug:\t%08x\n",
I915_READ(PORT_HOTPLUG_EN));
seq_printf(m, "DPFLIPSTAT:\t%08x\n",
I915_READ(VLV_DPFLIPSTAT));
seq_printf(m, "DPINVGTT:\t%08x\n",
I915_READ(DPINVGTT));
+ intel_display_power_put(dev_priv, POWER_DOMAIN_INIT);
for (i = 0; i < 4; i++) {
seq_printf(m, "GT Interrupt IMR %d:\t%08x\n",
@@ -895,7 +895,7 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
seq_printf(m, "Graphics Interrupt mask: %08x\n",
I915_READ(GTIMR));
}
- for_each_engine(engine, dev_priv) {
+ for_each_engine(engine, dev_priv, id) {
if (INTEL_GEN(dev_priv) >= 6) {
seq_printf(m,
"Graphics Interrupt mask (%s): %08x\n",
@@ -943,7 +943,7 @@ static int i915_hws_info(struct seq_file *m, void *data)
const u32 *hws;
int i;
- engine = &dev_priv->engine[(uintptr_t)node->info_ent->data];
+ engine = dev_priv->engine[(uintptr_t)node->info_ent->data];
hws = engine->status_page.page_addr;
if (hws == NULL)
return 0;
@@ -956,6 +956,8 @@ static int i915_hws_info(struct seq_file *m, void *data)
return 0;
}
+#if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
+
static ssize_t
i915_error_state_write(struct file *filp,
const char __user *ubuf,
@@ -1038,19 +1040,14 @@ static const struct file_operations i915_error_state_fops = {
.release = i915_error_state_release,
};
+#endif
+
static int
i915_next_seqno_get(void *data, u64 *val)
{
struct drm_i915_private *dev_priv = data;
- int ret;
-
- ret = mutex_lock_interruptible(&dev_priv->drm.struct_mutex);
- if (ret)
- return ret;
-
- *val = dev_priv->next_seqno;
- mutex_unlock(&dev_priv->drm.struct_mutex);
+ *val = atomic_read(&dev_priv->gt.global_timeline.next_seqno);
return 0;
}
@@ -1065,7 +1062,7 @@ i915_next_seqno_set(void *data, u64 val)
if (ret)
return ret;
- ret = i915_gem_set_seqno(dev, val);
+ ret = i915_gem_set_global_seqno(dev, val);
mutex_unlock(&dev->struct_mutex);
return ret;
@@ -1277,15 +1274,42 @@ out:
return ret;
}
+static void i915_instdone_info(struct drm_i915_private *dev_priv,
+ struct seq_file *m,
+ struct intel_instdone *instdone)
+{
+ int slice;
+ int subslice;
+
+ seq_printf(m, "\t\tINSTDONE: 0x%08x\n",
+ instdone->instdone);
+
+ if (INTEL_GEN(dev_priv) <= 3)
+ return;
+
+ seq_printf(m, "\t\tSC_INSTDONE: 0x%08x\n",
+ instdone->slice_common);
+
+ if (INTEL_GEN(dev_priv) <= 6)
+ return;
+
+ for_each_instdone_slice_subslice(dev_priv, slice, subslice)
+ seq_printf(m, "\t\tSAMPLER_INSTDONE[%d][%d]: 0x%08x\n",
+ slice, subslice, instdone->sampler[slice][subslice]);
+
+ for_each_instdone_slice_subslice(dev_priv, slice, subslice)
+ seq_printf(m, "\t\tROW_INSTDONE[%d][%d]: 0x%08x\n",
+ slice, subslice, instdone->row[slice][subslice]);
+}
+
static int i915_hangcheck_info(struct seq_file *m, void *unused)
{
struct drm_i915_private *dev_priv = node_to_i915(m->private);
struct intel_engine_cs *engine;
u64 acthd[I915_NUM_ENGINES];
u32 seqno[I915_NUM_ENGINES];
- u32 instdone[I915_NUM_INSTDONE_REG];
+ struct intel_instdone instdone;
enum intel_engine_id id;
- int j;
if (test_bit(I915_WEDGED, &dev_priv->gpu_error.flags))
seq_printf(m, "Wedged\n");
@@ -1303,12 +1327,12 @@ static int i915_hangcheck_info(struct seq_file *m, void *unused)
intel_runtime_pm_get(dev_priv);
- for_each_engine_id(engine, dev_priv, id) {
+ for_each_engine(engine, dev_priv, id) {
acthd[id] = intel_engine_get_active_head(engine);
seqno[id] = intel_engine_get_seqno(engine);
}
- i915_get_extra_instdone(dev_priv, instdone);
+ intel_engine_get_instdone(dev_priv->engine[RCS], &instdone);
intel_runtime_pm_put(dev_priv);
@@ -1319,16 +1343,27 @@ static int i915_hangcheck_info(struct seq_file *m, void *unused)
} else
seq_printf(m, "Hangcheck inactive\n");
- for_each_engine_id(engine, dev_priv, id) {
+ for_each_engine(engine, dev_priv, id) {
+ struct intel_breadcrumbs *b = &engine->breadcrumbs;
+ struct rb_node *rb;
+
seq_printf(m, "%s:\n", engine->name);
seq_printf(m, "\tseqno = %x [current %x, last %x]\n",
- engine->hangcheck.seqno,
- seqno[id],
- engine->last_submitted_seqno);
+ engine->hangcheck.seqno, seqno[id],
+ intel_engine_last_submit(engine));
seq_printf(m, "\twaiters? %s, fake irq active? %s\n",
yesno(intel_engine_has_waiter(engine)),
yesno(test_bit(engine->id,
&dev_priv->gpu_error.missed_irq_rings)));
+ spin_lock_irq(&b->lock);
+ for (rb = rb_first(&b->waiters); rb; rb = rb_next(rb)) {
+ struct intel_wait *w = container_of(rb, typeof(*w), node);
+
+ seq_printf(m, "\t%s [%d] waiting for %x\n",
+ w->tsk->comm, w->tsk->pid, w->seqno);
+ }
+ spin_unlock_irq(&b->lock);
+
seq_printf(m, "\tACTHD = 0x%08llx [current 0x%08llx]\n",
(long long)engine->hangcheck.acthd,
(long long)acthd[id]);
@@ -1336,18 +1371,14 @@ static int i915_hangcheck_info(struct seq_file *m, void *unused)
seq_printf(m, "\taction = %d\n", engine->hangcheck.action);
if (engine->id == RCS) {
- seq_puts(m, "\tinstdone read =");
-
- for (j = 0; j < I915_NUM_INSTDONE_REG; j++)
- seq_printf(m, " 0x%08x", instdone[j]);
+ seq_puts(m, "\tinstdone read =\n");
- seq_puts(m, "\n\tinstdone accu =");
+ i915_instdone_info(dev_priv, m, &instdone);
- for (j = 0; j < I915_NUM_INSTDONE_REG; j++)
- seq_printf(m, " 0x%08x",
- engine->hangcheck.instdone[j]);
+ seq_puts(m, "\tinstdone accu =\n");
- seq_puts(m, "\n");
+ i915_instdone_info(dev_priv, m,
+ &engine->hangcheck.instdone);
}
}
@@ -1357,14 +1388,9 @@ static int i915_hangcheck_info(struct seq_file *m, void *unused)
static int ironlake_drpc_info(struct seq_file *m)
{
struct drm_i915_private *dev_priv = node_to_i915(m->private);
- struct drm_device *dev = &dev_priv->drm;
u32 rgvmodectl, rstdbyctl;
u16 crstandvid;
- int ret;
- ret = mutex_lock_interruptible(&dev->struct_mutex);
- if (ret)
- return ret;
intel_runtime_pm_get(dev_priv);
rgvmodectl = I915_READ(MEMMODECTL);
@@ -1372,7 +1398,6 @@ static int ironlake_drpc_info(struct seq_file *m)
crstandvid = I915_READ16(CRSTANDVID);
intel_runtime_pm_put(dev_priv);
- mutex_unlock(&dev->struct_mutex);
seq_printf(m, "HD boost: %s\n", yesno(rgvmodectl & MEMMODE_BOOST_EN));
seq_printf(m, "Boost freq: %d\n",
@@ -1635,10 +1660,13 @@ static int i915_fbc_status(struct seq_file *m, void *unused)
seq_printf(m, "FBC disabled: %s\n",
dev_priv->fbc.no_fbc_reason);
- if (INTEL_GEN(dev_priv) >= 7)
+ if (intel_fbc_is_active(dev_priv) && INTEL_GEN(dev_priv) >= 7) {
+ uint32_t mask = INTEL_GEN(dev_priv) >= 8 ?
+ BDW_FBC_COMPRESSION_MASK :
+ IVB_FBC_COMPRESSION_MASK;
seq_printf(m, "Compressing: %s\n",
- yesno(I915_READ(FBC_STATUS2) &
- FBC_COMPRESSION_MASK));
+ yesno(I915_READ(FBC_STATUS2) & mask));
+ }
mutex_unlock(&dev_priv->fbc.lock);
intel_runtime_pm_put(dev_priv);
@@ -1717,6 +1745,7 @@ static int i915_sr_status(struct seq_file *m, void *unused)
bool sr_enabled = false;
intel_runtime_pm_get(dev_priv);
+ intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
if (HAS_PCH_SPLIT(dev_priv))
sr_enabled = I915_READ(WM1_LP_ILK) & WM1_LP_SR_EN;
@@ -1730,10 +1759,10 @@ static int i915_sr_status(struct seq_file *m, void *unused)
else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
sr_enabled = I915_READ(FW_BLC_SELF_VLV) & FW_CSPWRDWNEN;
+ intel_display_power_put(dev_priv, POWER_DOMAIN_INIT);
intel_runtime_pm_put(dev_priv);
- seq_printf(m, "self-refresh: %s\n",
- sr_enabled ? "enabled" : "disabled");
+ seq_printf(m, "self-refresh: %s\n", enableddisabled(sr_enabled));
return 0;
}
@@ -1867,7 +1896,7 @@ static int i915_gem_framebuffer_info(struct seq_file *m, void *data)
fbdev_fb->base.height,
fbdev_fb->base.depth,
fbdev_fb->base.bits_per_pixel,
- fbdev_fb->base.modifier[0],
+ fbdev_fb->base.modifier,
drm_framebuffer_read_refcount(&fbdev_fb->base));
describe_obj(m, fbdev_fb->obj);
seq_putc(m, '\n');
@@ -1885,7 +1914,7 @@ static int i915_gem_framebuffer_info(struct seq_file *m, void *data)
fb->base.height,
fb->base.depth,
fb->base.bits_per_pixel,
- fb->base.modifier[0],
+ fb->base.modifier,
drm_framebuffer_read_refcount(&fb->base));
describe_obj(m, fb->obj);
seq_putc(m, '\n');
@@ -1909,6 +1938,7 @@ static int i915_context_status(struct seq_file *m, void *unused)
struct drm_device *dev = &dev_priv->drm;
struct intel_engine_cs *engine;
struct i915_gem_context *ctx;
+ enum intel_engine_id id;
int ret;
ret = mutex_lock_interruptible(&dev->struct_mutex);
@@ -1935,7 +1965,7 @@ static int i915_context_status(struct seq_file *m, void *unused)
seq_putc(m, ctx->remap_slice ? 'R' : 'r');
seq_putc(m, '\n');
- for_each_engine(engine, dev_priv) {
+ for_each_engine(engine, dev_priv, id) {
struct intel_context *ce = &ctx->engine[engine->id];
seq_printf(m, "%s: ", engine->name);
@@ -1974,7 +2004,7 @@ static void i915_dump_lrc_obj(struct seq_file *m,
seq_printf(m, "\tBound in GGTT at 0x%08x\n",
i915_ggtt_offset(vma));
- if (i915_gem_object_get_pages(vma->obj)) {
+ if (i915_gem_object_pin_pages(vma->obj)) {
seq_puts(m, "\tFailed to get pages for context object\n\n");
return;
}
@@ -1993,6 +2023,7 @@ static void i915_dump_lrc_obj(struct seq_file *m,
kunmap_atomic(reg_state);
}
+ i915_gem_object_unpin_pages(vma->obj);
seq_putc(m, '\n');
}
@@ -2002,6 +2033,7 @@ static int i915_dump_lrc(struct seq_file *m, void *unused)
struct drm_device *dev = &dev_priv->drm;
struct intel_engine_cs *engine;
struct i915_gem_context *ctx;
+ enum intel_engine_id id;
int ret;
if (!i915.enable_execlists) {
@@ -2014,7 +2046,7 @@ static int i915_dump_lrc(struct seq_file *m, void *unused)
return ret;
list_for_each_entry(ctx, &dev_priv->context_list, link)
- for_each_engine(engine, dev_priv)
+ for_each_engine(engine, dev_priv, id)
i915_dump_lrc_obj(m, ctx, engine);
mutex_unlock(&dev->struct_mutex);
@@ -2022,84 +2054,6 @@ static int i915_dump_lrc(struct seq_file *m, void *unused)
return 0;
}
-static int i915_execlists(struct seq_file *m, void *data)
-{
- struct drm_i915_private *dev_priv = node_to_i915(m->private);
- struct drm_device *dev = &dev_priv->drm;
- struct intel_engine_cs *engine;
- u32 status_pointer;
- u8 read_pointer;
- u8 write_pointer;
- u32 status;
- u32 ctx_id;
- struct list_head *cursor;
- int i, ret;
-
- if (!i915.enable_execlists) {
- seq_puts(m, "Logical Ring Contexts are disabled\n");
- return 0;
- }
-
- ret = mutex_lock_interruptible(&dev->struct_mutex);
- if (ret)
- return ret;
-
- intel_runtime_pm_get(dev_priv);
-
- for_each_engine(engine, dev_priv) {
- struct drm_i915_gem_request *head_req = NULL;
- int count = 0;
-
- seq_printf(m, "%s\n", engine->name);
-
- status = I915_READ(RING_EXECLIST_STATUS_LO(engine));
- ctx_id = I915_READ(RING_EXECLIST_STATUS_HI(engine));
- seq_printf(m, "\tExeclist status: 0x%08X, context: %u\n",
- status, ctx_id);
-
- status_pointer = I915_READ(RING_CONTEXT_STATUS_PTR(engine));
- seq_printf(m, "\tStatus pointer: 0x%08X\n", status_pointer);
-
- read_pointer = GEN8_CSB_READ_PTR(status_pointer);
- write_pointer = GEN8_CSB_WRITE_PTR(status_pointer);
- if (read_pointer > write_pointer)
- write_pointer += GEN8_CSB_ENTRIES;
- seq_printf(m, "\tRead pointer: 0x%08X, write pointer 0x%08X\n",
- read_pointer, write_pointer);
-
- for (i = 0; i < GEN8_CSB_ENTRIES; i++) {
- status = I915_READ(RING_CONTEXT_STATUS_BUF_LO(engine, i));
- ctx_id = I915_READ(RING_CONTEXT_STATUS_BUF_HI(engine, i));
-
- seq_printf(m, "\tStatus buffer %d: 0x%08X, context: %u\n",
- i, status, ctx_id);
- }
-
- spin_lock_bh(&engine->execlist_lock);
- list_for_each(cursor, &engine->execlist_queue)
- count++;
- head_req = list_first_entry_or_null(&engine->execlist_queue,
- struct drm_i915_gem_request,
- execlist_link);
- spin_unlock_bh(&engine->execlist_lock);
-
- seq_printf(m, "\t%d requests in queue\n", count);
- if (head_req) {
- seq_printf(m, "\tHead request context: %u\n",
- head_req->ctx->hw_id);
- seq_printf(m, "\tHead request tail: %u\n",
- head_req->tail);
- }
-
- seq_putc(m, '\n');
- }
-
- intel_runtime_pm_put(dev_priv);
- mutex_unlock(&dev->struct_mutex);
-
- return 0;
-}
-
static const char *swizzle_string(unsigned swizzle)
{
switch (swizzle) {
@@ -2127,12 +2081,7 @@ static const char *swizzle_string(unsigned swizzle)
static int i915_swizzle_info(struct seq_file *m, void *data)
{
struct drm_i915_private *dev_priv = node_to_i915(m->private);
- struct drm_device *dev = &dev_priv->drm;
- int ret;
- ret = mutex_lock_interruptible(&dev->struct_mutex);
- if (ret)
- return ret;
intel_runtime_pm_get(dev_priv);
seq_printf(m, "bit6 swizzle for X-tiling = %s\n",
@@ -2172,7 +2121,6 @@ static int i915_swizzle_info(struct seq_file *m, void *data)
seq_puts(m, "L-shaped memory detected\n");
intel_runtime_pm_put(dev_priv);
- mutex_unlock(&dev->struct_mutex);
return 0;
}
@@ -2201,14 +2149,15 @@ static int per_file_ctx(int id, void *ptr, void *data)
static void gen8_ppgtt_info(struct seq_file *m,
struct drm_i915_private *dev_priv)
{
- struct intel_engine_cs *engine;
struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
int i;
if (!ppgtt)
return;
- for_each_engine(engine, dev_priv) {
+ for_each_engine(engine, dev_priv, id) {
seq_printf(m, "%s\n", engine->name);
for (i = 0; i < 4; i++) {
u64 pdp = I915_READ(GEN8_RING_PDP_UDW(engine, i));
@@ -2223,11 +2172,12 @@ static void gen6_ppgtt_info(struct seq_file *m,
struct drm_i915_private *dev_priv)
{
struct intel_engine_cs *engine;
+ enum intel_engine_id id;
if (IS_GEN6(dev_priv))
seq_printf(m, "GFX_MODE: 0x%08x\n", I915_READ(GFX_MODE));
- for_each_engine(engine, dev_priv) {
+ for_each_engine(engine, dev_priv, id) {
seq_printf(m, "%s\n", engine->name);
if (IS_GEN7(dev_priv))
seq_printf(m, "GFX_MODE: 0x%08x\n",
@@ -2296,9 +2246,10 @@ out_unlock:
static int count_irq_waiters(struct drm_i915_private *i915)
{
struct intel_engine_cs *engine;
+ enum intel_engine_id id;
int count = 0;
- for_each_engine(engine, i915)
+ for_each_engine(engine, i915, id)
count += intel_engine_has_waiter(engine);
return count;
@@ -2325,8 +2276,8 @@ static int i915_rps_boost_info(struct seq_file *m, void *data)
struct drm_file *file;
seq_printf(m, "RPS enabled? %d\n", dev_priv->rps.enabled);
- seq_printf(m, "GPU busy? %s [%x]\n",
- yesno(dev_priv->gt.awake), dev_priv->gt.active_engines);
+ seq_printf(m, "GPU busy? %s [%d requests]\n",
+ yesno(dev_priv->gt.awake), dev_priv->gt.active_requests);
seq_printf(m, "CPU waiting? %d\n", count_irq_waiters(dev_priv));
seq_printf(m, "Frequency requested %d\n",
intel_gpu_freq(dev_priv, dev_priv->rps.cur_freq));
@@ -2361,7 +2312,7 @@ static int i915_rps_boost_info(struct seq_file *m, void *data)
if (INTEL_GEN(dev_priv) >= 6 &&
dev_priv->rps.enabled &&
- dev_priv->gt.active_engines) {
+ dev_priv->gt.active_requests) {
u32 rpup, rpupei;
u32 rpdown, rpdownei;
@@ -2442,6 +2393,32 @@ static int i915_guc_load_status_info(struct seq_file *m, void *data)
return 0;
}
+static void i915_guc_log_info(struct seq_file *m,
+ struct drm_i915_private *dev_priv)
+{
+ struct intel_guc *guc = &dev_priv->guc;
+
+ seq_puts(m, "\nGuC logging stats:\n");
+
+ seq_printf(m, "\tISR: flush count %10u, overflow count %10u\n",
+ guc->log.flush_count[GUC_ISR_LOG_BUFFER],
+ guc->log.total_overflow_count[GUC_ISR_LOG_BUFFER]);
+
+ seq_printf(m, "\tDPC: flush count %10u, overflow count %10u\n",
+ guc->log.flush_count[GUC_DPC_LOG_BUFFER],
+ guc->log.total_overflow_count[GUC_DPC_LOG_BUFFER]);
+
+ seq_printf(m, "\tCRASH: flush count %10u, overflow count %10u\n",
+ guc->log.flush_count[GUC_CRASH_DUMP_LOG_BUFFER],
+ guc->log.total_overflow_count[GUC_CRASH_DUMP_LOG_BUFFER]);
+
+ seq_printf(m, "\tTotal flush interrupt count: %u\n",
+ guc->log.flush_interrupt_count);
+
+ seq_printf(m, "\tCapture miss count: %u\n",
+ guc->log.capture_miss_count);
+}
+
static void i915_guc_client_info(struct seq_file *m,
struct drm_i915_private *dev_priv,
struct i915_guc_client *client)
@@ -2461,7 +2438,7 @@ static void i915_guc_client_info(struct seq_file *m,
seq_printf(m, "\tFailed doorbell: %u\n", client->b_fail);
seq_printf(m, "\tLast submission result: %d\n", client->retcode);
- for_each_engine_id(engine, dev_priv, id) {
+ for_each_engine(engine, dev_priv, id) {
u64 submissions = client->submissions[id];
tot += submissions;
seq_printf(m, "\tSubmissions: %llu %s\n",
@@ -2504,7 +2481,7 @@ static int i915_guc_info(struct seq_file *m, void *data)
seq_printf(m, "GuC last action error code: %d\n", guc.action_err);
seq_printf(m, "\nGuC submissions:\n");
- for_each_engine_id(engine, dev_priv, id) {
+ for_each_engine(engine, dev_priv, id) {
u64 submissions = guc.submissions[id];
total += submissions;
seq_printf(m, "\t%-24s: %10llu, last seqno 0x%08x\n",
@@ -2515,6 +2492,8 @@ static int i915_guc_info(struct seq_file *m, void *data)
seq_printf(m, "\nGuC execbuf client @ %p:\n", guc.execbuf_client);
i915_guc_client_info(m, dev_priv, &client);
+ i915_guc_log_info(m, dev_priv);
+
/* Add more as required ... */
return 0;
@@ -2526,10 +2505,10 @@ static int i915_guc_log_dump(struct seq_file *m, void *data)
struct drm_i915_gem_object *obj;
int i = 0, pg;
- if (!dev_priv->guc.log_vma)
+ if (!dev_priv->guc.log.vma)
return 0;
- obj = dev_priv->guc.log_vma->obj;
+ obj = dev_priv->guc.log.vma->obj;
for (pg = 0; pg < obj->base.size / PAGE_SIZE; pg++) {
u32 *log = kmap_atomic(i915_gem_object_get_page(obj, pg));
@@ -2546,6 +2525,44 @@ static int i915_guc_log_dump(struct seq_file *m, void *data)
return 0;
}
+static int i915_guc_log_control_get(void *data, u64 *val)
+{
+ struct drm_device *dev = data;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+
+ if (!dev_priv->guc.log.vma)
+ return -EINVAL;
+
+ *val = i915.guc_log_level;
+
+ return 0;
+}
+
+static int i915_guc_log_control_set(void *data, u64 val)
+{
+ struct drm_device *dev = data;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ int ret;
+
+ if (!dev_priv->guc.log.vma)
+ return -EINVAL;
+
+ ret = mutex_lock_interruptible(&dev->struct_mutex);
+ if (ret)
+ return ret;
+
+ intel_runtime_pm_get(dev_priv);
+ ret = i915_guc_log_control(dev_priv, val);
+ intel_runtime_pm_put(dev_priv);
+
+ mutex_unlock(&dev->struct_mutex);
+ return ret;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(i915_guc_log_control_fops,
+ i915_guc_log_control_get, i915_guc_log_control_set,
+ "%lld\n");
+
static int i915_edp_psr_status(struct seq_file *m, void *data)
{
struct drm_i915_private *dev_priv = node_to_i915(m->private);
@@ -2575,11 +2592,22 @@ static int i915_edp_psr_status(struct seq_file *m, void *data)
enabled = I915_READ(EDP_PSR_CTL) & EDP_PSR_ENABLE;
else {
for_each_pipe(dev_priv, pipe) {
+ enum transcoder cpu_transcoder =
+ intel_pipe_to_cpu_transcoder(dev_priv, pipe);
+ enum intel_display_power_domain power_domain;
+
+ power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder);
+ if (!intel_display_power_get_if_enabled(dev_priv,
+ power_domain))
+ continue;
+
stat[pipe] = I915_READ(VLV_PSRSTAT(pipe)) &
VLV_EDP_PSR_CURR_STATE_MASK;
if ((stat[pipe] == VLV_EDP_PSR_ACTIVE_NORFB_UP) ||
(stat[pipe] == VLV_EDP_PSR_ACTIVE_SF_UPDATE))
enabled = true;
+
+ intel_display_power_put(dev_priv, power_domain);
}
}
@@ -3004,7 +3032,7 @@ static void intel_plane_info(struct seq_file *m, struct intel_crtc *intel_crtc)
for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) {
struct drm_plane_state *state;
struct drm_plane *plane = &intel_plane->base;
- char *format_name;
+ struct drm_format_name_buf format_name;
if (!plane->state) {
seq_puts(m, "plane->state is NULL!\n");
@@ -3014,9 +3042,9 @@ static void intel_plane_info(struct seq_file *m, struct intel_crtc *intel_crtc)
state = plane->state;
if (state->fb) {
- format_name = drm_get_format_name(state->fb->pixel_format);
+ drm_get_format_name(state->fb->pixel_format, &format_name);
} else {
- format_name = kstrdup("N/A", GFP_KERNEL);
+ sprintf(format_name.str, "N/A");
}
seq_printf(m, "\t--Plane id %d: type=%s, crtc_pos=%4dx%4d, crtc_size=%4dx%4d, src_pos=%d.%04ux%d.%04u, src_size=%d.%04ux%d.%04u, format=%s, rotation=%s\n",
@@ -3032,10 +3060,8 @@ static void intel_plane_info(struct seq_file *m, struct intel_crtc *intel_crtc)
((state->src_w & 0xffff) * 15625) >> 10,
(state->src_h >> 16),
((state->src_h & 0xffff) * 15625) >> 10,
- format_name,
+ format_name.str,
plane_rotation(state->rotation));
-
- kfree(format_name);
}
}
@@ -3121,6 +3147,146 @@ static int i915_display_info(struct seq_file *m, void *unused)
return 0;
}
+static int i915_engine_info(struct seq_file *m, void *unused)
+{
+ struct drm_i915_private *dev_priv = node_to_i915(m->private);
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+
+ intel_runtime_pm_get(dev_priv);
+
+ for_each_engine(engine, dev_priv, id) {
+ struct intel_breadcrumbs *b = &engine->breadcrumbs;
+ struct drm_i915_gem_request *rq;
+ struct rb_node *rb;
+ u64 addr;
+
+ seq_printf(m, "%s\n", engine->name);
+ seq_printf(m, "\tcurrent seqno %x, last %x, hangcheck %x [score %d]\n",
+ intel_engine_get_seqno(engine),
+ intel_engine_last_submit(engine),
+ engine->hangcheck.seqno,
+ engine->hangcheck.score);
+
+ rcu_read_lock();
+
+ seq_printf(m, "\tRequests:\n");
+
+ rq = list_first_entry(&engine->timeline->requests,
+ struct drm_i915_gem_request, link);
+ if (&rq->link != &engine->timeline->requests)
+ print_request(m, rq, "\t\tfirst ");
+
+ rq = list_last_entry(&engine->timeline->requests,
+ struct drm_i915_gem_request, link);
+ if (&rq->link != &engine->timeline->requests)
+ print_request(m, rq, "\t\tlast ");
+
+ rq = i915_gem_find_active_request(engine);
+ if (rq) {
+ print_request(m, rq, "\t\tactive ");
+ seq_printf(m,
+ "\t\t[head %04x, postfix %04x, tail %04x, batch 0x%08x_%08x]\n",
+ rq->head, rq->postfix, rq->tail,
+ rq->batch ? upper_32_bits(rq->batch->node.start) : ~0u,
+ rq->batch ? lower_32_bits(rq->batch->node.start) : ~0u);
+ }
+
+ seq_printf(m, "\tRING_START: 0x%08x [0x%08x]\n",
+ I915_READ(RING_START(engine->mmio_base)),
+ rq ? i915_ggtt_offset(rq->ring->vma) : 0);
+ seq_printf(m, "\tRING_HEAD: 0x%08x [0x%08x]\n",
+ I915_READ(RING_HEAD(engine->mmio_base)) & HEAD_ADDR,
+ rq ? rq->ring->head : 0);
+ seq_printf(m, "\tRING_TAIL: 0x%08x [0x%08x]\n",
+ I915_READ(RING_TAIL(engine->mmio_base)) & TAIL_ADDR,
+ rq ? rq->ring->tail : 0);
+ seq_printf(m, "\tRING_CTL: 0x%08x [%s]\n",
+ I915_READ(RING_CTL(engine->mmio_base)),
+ I915_READ(RING_CTL(engine->mmio_base)) & (RING_WAIT | RING_WAIT_SEMAPHORE) ? "waiting" : "");
+
+ rcu_read_unlock();
+
+ addr = intel_engine_get_active_head(engine);
+ seq_printf(m, "\tACTHD: 0x%08x_%08x\n",
+ upper_32_bits(addr), lower_32_bits(addr));
+ addr = intel_engine_get_last_batch_head(engine);
+ seq_printf(m, "\tBBADDR: 0x%08x_%08x\n",
+ upper_32_bits(addr), lower_32_bits(addr));
+
+ if (i915.enable_execlists) {
+ u32 ptr, read, write;
+ struct rb_node *rb;
+
+ seq_printf(m, "\tExeclist status: 0x%08x %08x\n",
+ I915_READ(RING_EXECLIST_STATUS_LO(engine)),
+ I915_READ(RING_EXECLIST_STATUS_HI(engine)));
+
+ ptr = I915_READ(RING_CONTEXT_STATUS_PTR(engine));
+ read = GEN8_CSB_READ_PTR(ptr);
+ write = GEN8_CSB_WRITE_PTR(ptr);
+ seq_printf(m, "\tExeclist CSB read %d, write %d\n",
+ read, write);
+ if (read >= GEN8_CSB_ENTRIES)
+ read = 0;
+ if (write >= GEN8_CSB_ENTRIES)
+ write = 0;
+ if (read > write)
+ write += GEN8_CSB_ENTRIES;
+ while (read < write) {
+ unsigned int idx = ++read % GEN8_CSB_ENTRIES;
+
+ seq_printf(m, "\tExeclist CSB[%d]: 0x%08x, context: %d\n",
+ idx,
+ I915_READ(RING_CONTEXT_STATUS_BUF_LO(engine, idx)),
+ I915_READ(RING_CONTEXT_STATUS_BUF_HI(engine, idx)));
+ }
+
+ rcu_read_lock();
+ rq = READ_ONCE(engine->execlist_port[0].request);
+ if (rq)
+ print_request(m, rq, "\t\tELSP[0] ");
+ else
+ seq_printf(m, "\t\tELSP[0] idle\n");
+ rq = READ_ONCE(engine->execlist_port[1].request);
+ if (rq)
+ print_request(m, rq, "\t\tELSP[1] ");
+ else
+ seq_printf(m, "\t\tELSP[1] idle\n");
+ rcu_read_unlock();
+
+ spin_lock_irq(&engine->timeline->lock);
+ for (rb = engine->execlist_first; rb; rb = rb_next(rb)) {
+ rq = rb_entry(rb, typeof(*rq), priotree.node);
+ print_request(m, rq, "\t\tQ ");
+ }
+ spin_unlock_irq(&engine->timeline->lock);
+ } else if (INTEL_GEN(dev_priv) > 6) {
+ seq_printf(m, "\tPP_DIR_BASE: 0x%08x\n",
+ I915_READ(RING_PP_DIR_BASE(engine)));
+ seq_printf(m, "\tPP_DIR_BASE_READ: 0x%08x\n",
+ I915_READ(RING_PP_DIR_BASE_READ(engine)));
+ seq_printf(m, "\tPP_DIR_DCLV: 0x%08x\n",
+ I915_READ(RING_PP_DIR_DCLV(engine)));
+ }
+
+ spin_lock_irq(&b->lock);
+ for (rb = rb_first(&b->waiters); rb; rb = rb_next(rb)) {
+ struct intel_wait *w = container_of(rb, typeof(*w), node);
+
+ seq_printf(m, "\t%s [%d] waiting for %x\n",
+ w->tsk->comm, w->tsk->pid, w->seqno);
+ }
+ spin_unlock_irq(&b->lock);
+
+ seq_puts(m, "\n");
+ }
+
+ intel_runtime_pm_put(dev_priv);
+
+ return 0;
+}
+
static int i915_semaphore_status(struct seq_file *m, void *unused)
{
struct drm_i915_private *dev_priv = node_to_i915(m->private);
@@ -3147,7 +3313,7 @@ static int i915_semaphore_status(struct seq_file *m, void *unused)
page = i915_gem_object_get_page(dev_priv->semaphore->obj, 0);
seqno = (uint64_t *)kmap_atomic(page);
- for_each_engine_id(engine, dev_priv, id) {
+ for_each_engine(engine, dev_priv, id) {
uint64_t offset;
seq_printf(m, "%s\n", engine->name);
@@ -3172,22 +3338,13 @@ static int i915_semaphore_status(struct seq_file *m, void *unused)
kunmap_atomic(seqno);
} else {
seq_puts(m, " Last signal:");
- for_each_engine(engine, dev_priv)
+ for_each_engine(engine, dev_priv, id)
for (j = 0; j < num_rings; j++)
seq_printf(m, "0x%08x\n",
I915_READ(engine->semaphore.mbox.signal[j]));
seq_putc(m, '\n');
}
- seq_puts(m, "\nSync seqno:\n");
- for_each_engine(engine, dev_priv) {
- for (j = 0; j < num_rings; j++)
- seq_printf(m, " 0x%08x ",
- engine->semaphore.sync_seqno[j]);
- seq_putc(m, '\n');
- }
- seq_putc(m, '\n');
-
intel_runtime_pm_put(dev_priv);
mutex_unlock(&dev->struct_mutex);
return 0;
@@ -3236,7 +3393,7 @@ static int i915_wa_registers(struct seq_file *m, void *unused)
intel_runtime_pm_get(dev_priv);
seq_printf(m, "Workarounds applied: %d\n", workarounds->count);
- for_each_engine_id(engine, dev_priv, id)
+ for_each_engine(engine, dev_priv, id)
seq_printf(m, "HW whitelist count for %s: %d\n",
engine->name, workarounds->hw_whitelist_count[id]);
for (i = 0; i < workarounds->count; ++i) {
@@ -3280,7 +3437,7 @@ static int i915_ddb_info(struct seq_file *m, void *unused)
for_each_pipe(dev_priv, pipe) {
seq_printf(m, "Pipe %c\n", pipe_name(pipe));
- for_each_plane(dev_priv, pipe, plane) {
+ for_each_universal_plane(dev_priv, pipe, plane) {
entry = &ddb->plane[pipe][plane];
seq_printf(m, " Plane%-8d%8u%8u%8u\n", plane + 1,
entry->start, entry->end,
@@ -3914,8 +4071,7 @@ static void hsw_trans_edp_pipe_A_crc_wa(struct drm_i915_private *dev_priv,
bool enable)
{
struct drm_device *dev = &dev_priv->drm;
- struct intel_crtc *crtc =
- to_intel_crtc(dev_priv->pipe_to_crtc_mapping[PIPE_A]);
+ struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, PIPE_A);
struct intel_crtc_state *pipe_config;
struct drm_atomic_state *state;
int ret = 0;
@@ -3941,10 +4097,9 @@ static void hsw_trans_edp_pipe_A_crc_wa(struct drm_i915_private *dev_priv,
ret = drm_atomic_commit(state);
out:
- drm_modeset_unlock_all(dev);
WARN(ret, "Toggling workaround to %i returns %i\n", enable, ret);
- if (ret)
- drm_atomic_state_free(state);
+ drm_modeset_unlock_all(dev);
+ drm_atomic_state_put(state);
}
static int ivb_pipe_crc_ctl_reg(struct drm_i915_private *dev_priv,
@@ -3982,10 +4137,8 @@ static int pipe_crc_set_source(struct drm_i915_private *dev_priv,
enum pipe pipe,
enum intel_pipe_crc_source source)
{
- struct drm_device *dev = &dev_priv->drm;
struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe];
- struct intel_crtc *crtc =
- to_intel_crtc(intel_get_crtc_for_pipe(dev, pipe));
+ struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
enum intel_display_power_domain power_domain;
u32 val = 0; /* shut up gcc */
int ret;
@@ -4056,15 +4209,15 @@ static int pipe_crc_set_source(struct drm_i915_private *dev_priv,
/* real source -> none transition */
if (source == INTEL_PIPE_CRC_SOURCE_NONE) {
struct intel_pipe_crc_entry *entries;
- struct intel_crtc *crtc =
- to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
+ struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv,
+ pipe);
DRM_DEBUG_DRIVER("stopping CRCs for pipe %c\n",
pipe_name(pipe));
drm_modeset_lock(&crtc->base.mutex, NULL);
if (crtc->base.state->active)
- intel_wait_for_vblank(dev, pipe);
+ intel_wait_for_vblank(dev_priv, pipe);
drm_modeset_unlock(&crtc->base.mutex);
spin_lock_irq(&pipe_crc->lock);
@@ -4463,7 +4616,7 @@ static void wm_latency_show(struct seq_file *m, const uint16_t wm[8])
else if (IS_VALLEYVIEW(dev_priv))
num_levels = 1;
else
- num_levels = ilk_wm_max_level(dev) + 1;
+ num_levels = ilk_wm_max_level(dev_priv) + 1;
drm_modeset_lock_all(dev);
@@ -4579,7 +4732,7 @@ static ssize_t wm_latency_write(struct file *file, const char __user *ubuf,
else if (IS_VALLEYVIEW(dev_priv))
num_levels = 1;
else
- num_levels = ilk_wm_max_level(dev) + 1;
+ num_levels = ilk_wm_max_level(dev_priv) + 1;
if (len >= sizeof(tmp))
return -EINVAL;
@@ -4704,13 +4857,9 @@ i915_wedged_set(void *data, u64 val)
if (i915_reset_in_progress(&dev_priv->gpu_error))
return -EAGAIN;
- intel_runtime_pm_get(dev_priv);
-
i915_handle_error(dev_priv, val,
"Manually setting wedged to %llu", val);
- intel_runtime_pm_put(dev_priv);
-
return 0;
}
@@ -4778,10 +4927,12 @@ DEFINE_SIMPLE_ATTRIBUTE(i915_ring_test_irq_fops,
#define DROP_BOUND 0x2
#define DROP_RETIRE 0x4
#define DROP_ACTIVE 0x8
-#define DROP_ALL (DROP_UNBOUND | \
- DROP_BOUND | \
- DROP_RETIRE | \
- DROP_ACTIVE)
+#define DROP_FREED 0x10
+#define DROP_ALL (DROP_UNBOUND | \
+ DROP_BOUND | \
+ DROP_RETIRE | \
+ DROP_ACTIVE | \
+ DROP_FREED)
static int
i915_drop_caches_get(void *data, u64 *val)
{
@@ -4825,6 +4976,11 @@ i915_drop_caches_set(void *data, u64 val)
unlock:
mutex_unlock(&dev->struct_mutex);
+ if (val & DROP_FREED) {
+ synchronize_rcu();
+ flush_work(&dev_priv->mm.free_work);
+ }
+
return ret;
}
@@ -4945,22 +5101,16 @@ static int
i915_cache_sharing_get(void *data, u64 *val)
{
struct drm_i915_private *dev_priv = data;
- struct drm_device *dev = &dev_priv->drm;
u32 snpcr;
- int ret;
if (!(IS_GEN6(dev_priv) || IS_GEN7(dev_priv)))
return -ENODEV;
- ret = mutex_lock_interruptible(&dev->struct_mutex);
- if (ret)
- return ret;
intel_runtime_pm_get(dev_priv);
snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
intel_runtime_pm_put(dev_priv);
- mutex_unlock(&dev->struct_mutex);
*val = (snpcr & GEN6_MBC_SNPCR_MASK) >> GEN6_MBC_SNPCR_SHIFT;
@@ -5275,7 +5425,6 @@ static const struct drm_info_list i915_debugfs_list[] = {
{"i915_gem_framebuffer", i915_gem_framebuffer_info, 0},
{"i915_context_status", i915_context_status, 0},
{"i915_dump_lrc", i915_dump_lrc, 0},
- {"i915_execlists", i915_execlists, 0},
{"i915_forcewake_domains", i915_forcewake_domains, 0},
{"i915_swizzle_info", i915_swizzle_info, 0},
{"i915_ppgtt_info", i915_ppgtt_info, 0},
@@ -5287,6 +5436,7 @@ static const struct drm_info_list i915_debugfs_list[] = {
{"i915_power_domain_info", i915_power_domain_info, 0},
{"i915_dmc_info", i915_dmc_info, 0},
{"i915_display_info", i915_display_info, 0},
+ {"i915_engine_info", i915_engine_info, 0},
{"i915_semaphore_status", i915_semaphore_status, 0},
{"i915_shared_dplls_info", i915_shared_dplls_info, 0},
{"i915_dp_mst_info", i915_dp_mst_info, 0},
@@ -5309,7 +5459,9 @@ static const struct i915_debugfs_files {
{"i915_ring_missed_irq", &i915_ring_missed_irq_fops},
{"i915_ring_test_irq", &i915_ring_test_irq_fops},
{"i915_gem_drop_caches", &i915_drop_caches_fops},
+#if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
{"i915_error_state", &i915_error_state_fops},
+#endif
{"i915_next_seqno", &i915_next_seqno_fops},
{"i915_display_crc_ctl", &i915_display_crc_ctl_fops},
{"i915_pri_wm_latency", &i915_pri_wm_latency_fops},
@@ -5318,7 +5470,8 @@ static const struct i915_debugfs_files {
{"i915_fbc_false_color", &i915_fbc_fc_fops},
{"i915_dp_test_data", &i915_displayport_test_data_fops},
{"i915_dp_test_type", &i915_displayport_test_type_fops},
- {"i915_dp_test_active", &i915_displayport_test_active_fops}
+ {"i915_dp_test_active", &i915_displayport_test_active_fops},
+ {"i915_guc_log_control", &i915_guc_log_control_fops}
};
void intel_display_crc_init(struct drm_i915_private *dev_priv)
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index bfb2efd8d4d4..445fec9c2841 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -114,7 +114,7 @@ static bool i915_error_injected(struct drm_i915_private *dev_priv)
fmt, ##__VA_ARGS__)
-static enum intel_pch intel_virt_detect_pch(struct drm_device *dev)
+static enum intel_pch intel_virt_detect_pch(struct drm_i915_private *dev_priv)
{
enum intel_pch ret = PCH_NOP;
@@ -125,16 +125,16 @@ static enum intel_pch intel_virt_detect_pch(struct drm_device *dev)
* make an educated guess as to which PCH is really there.
*/
- if (IS_GEN5(dev)) {
+ if (IS_GEN5(dev_priv)) {
ret = PCH_IBX;
DRM_DEBUG_KMS("Assuming Ibex Peak PCH\n");
- } else if (IS_GEN6(dev) || IS_IVYBRIDGE(dev)) {
+ } else if (IS_GEN6(dev_priv) || IS_IVYBRIDGE(dev_priv)) {
ret = PCH_CPT;
DRM_DEBUG_KMS("Assuming CouarPoint PCH\n");
- } else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
+ } else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
ret = PCH_LPT;
DRM_DEBUG_KMS("Assuming LynxPoint PCH\n");
- } else if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) {
+ } else if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
ret = PCH_SPT;
DRM_DEBUG_KMS("Assuming SunrisePoint PCH\n");
}
@@ -150,7 +150,7 @@ static void intel_detect_pch(struct drm_device *dev)
/* In all current cases, num_pipes is equivalent to the PCH_NOP setting
* (which really amounts to a PCH but no South Display).
*/
- if (INTEL_INFO(dev)->num_pipes == 0) {
+ if (INTEL_INFO(dev_priv)->num_pipes == 0) {
dev_priv->pch_type = PCH_NOP;
return;
}
@@ -174,40 +174,46 @@ static void intel_detect_pch(struct drm_device *dev)
if (id == INTEL_PCH_IBX_DEVICE_ID_TYPE) {
dev_priv->pch_type = PCH_IBX;
DRM_DEBUG_KMS("Found Ibex Peak PCH\n");
- WARN_ON(!IS_GEN5(dev));
+ WARN_ON(!IS_GEN5(dev_priv));
} else if (id == INTEL_PCH_CPT_DEVICE_ID_TYPE) {
dev_priv->pch_type = PCH_CPT;
DRM_DEBUG_KMS("Found CougarPoint PCH\n");
- WARN_ON(!(IS_GEN6(dev) || IS_IVYBRIDGE(dev)));
+ WARN_ON(!(IS_GEN6(dev_priv) ||
+ IS_IVYBRIDGE(dev_priv)));
} else if (id == INTEL_PCH_PPT_DEVICE_ID_TYPE) {
/* PantherPoint is CPT compatible */
dev_priv->pch_type = PCH_CPT;
DRM_DEBUG_KMS("Found PantherPoint PCH\n");
- WARN_ON(!(IS_GEN6(dev) || IS_IVYBRIDGE(dev)));
+ WARN_ON(!(IS_GEN6(dev_priv) ||
+ IS_IVYBRIDGE(dev_priv)));
} else if (id == INTEL_PCH_LPT_DEVICE_ID_TYPE) {
dev_priv->pch_type = PCH_LPT;
DRM_DEBUG_KMS("Found LynxPoint PCH\n");
- WARN_ON(!IS_HASWELL(dev) && !IS_BROADWELL(dev));
- WARN_ON(IS_HSW_ULT(dev) || IS_BDW_ULT(dev));
+ WARN_ON(!IS_HASWELL(dev_priv) &&
+ !IS_BROADWELL(dev_priv));
+ WARN_ON(IS_HSW_ULT(dev_priv) ||
+ IS_BDW_ULT(dev_priv));
} else if (id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) {
dev_priv->pch_type = PCH_LPT;
DRM_DEBUG_KMS("Found LynxPoint LP PCH\n");
- WARN_ON(!IS_HASWELL(dev) && !IS_BROADWELL(dev));
- WARN_ON(!IS_HSW_ULT(dev) && !IS_BDW_ULT(dev));
+ WARN_ON(!IS_HASWELL(dev_priv) &&
+ !IS_BROADWELL(dev_priv));
+ WARN_ON(!IS_HSW_ULT(dev_priv) &&
+ !IS_BDW_ULT(dev_priv));
} else if (id == INTEL_PCH_SPT_DEVICE_ID_TYPE) {
dev_priv->pch_type = PCH_SPT;
DRM_DEBUG_KMS("Found SunrisePoint PCH\n");
- WARN_ON(!IS_SKYLAKE(dev) &&
- !IS_KABYLAKE(dev));
+ WARN_ON(!IS_SKYLAKE(dev_priv) &&
+ !IS_KABYLAKE(dev_priv));
} else if (id == INTEL_PCH_SPT_LP_DEVICE_ID_TYPE) {
dev_priv->pch_type = PCH_SPT;
DRM_DEBUG_KMS("Found SunrisePoint LP PCH\n");
- WARN_ON(!IS_SKYLAKE(dev) &&
- !IS_KABYLAKE(dev));
+ WARN_ON(!IS_SKYLAKE(dev_priv) &&
+ !IS_KABYLAKE(dev_priv));
} else if (id == INTEL_PCH_KBP_DEVICE_ID_TYPE) {
dev_priv->pch_type = PCH_KBP;
DRM_DEBUG_KMS("Found KabyPoint PCH\n");
- WARN_ON(!IS_KABYLAKE(dev));
+ WARN_ON(!IS_KABYLAKE(dev_priv));
} else if ((id == INTEL_PCH_P2X_DEVICE_ID_TYPE) ||
(id == INTEL_PCH_P3X_DEVICE_ID_TYPE) ||
((id == INTEL_PCH_QEMU_DEVICE_ID_TYPE) &&
@@ -215,7 +221,8 @@ static void intel_detect_pch(struct drm_device *dev)
PCI_SUBVENDOR_ID_REDHAT_QUMRANET &&
pch->subsystem_device ==
PCI_SUBDEVICE_ID_QEMU)) {
- dev_priv->pch_type = intel_virt_detect_pch(dev);
+ dev_priv->pch_type =
+ intel_virt_detect_pch(dev_priv);
} else
continue;
@@ -255,16 +262,16 @@ static int i915_getparam(struct drm_device *dev, void *data,
value = dev_priv->overlay ? 1 : 0;
break;
case I915_PARAM_HAS_BSD:
- value = intel_engine_initialized(&dev_priv->engine[VCS]);
+ value = !!dev_priv->engine[VCS];
break;
case I915_PARAM_HAS_BLT:
- value = intel_engine_initialized(&dev_priv->engine[BCS]);
+ value = !!dev_priv->engine[BCS];
break;
case I915_PARAM_HAS_VEBOX:
- value = intel_engine_initialized(&dev_priv->engine[VECS]);
+ value = !!dev_priv->engine[VECS];
break;
case I915_PARAM_HAS_BSD2:
- value = intel_engine_initialized(&dev_priv->engine[VCS2]);
+ value = !!dev_priv->engine[VCS2];
break;
case I915_PARAM_HAS_EXEC_CONSTANTS:
value = INTEL_GEN(dev_priv) >= 4;
@@ -316,6 +323,10 @@ static int i915_getparam(struct drm_device *dev, void *data,
*/
value = i915_gem_mmap_gtt_version();
break;
+ case I915_PARAM_HAS_SCHEDULER:
+ value = dev_priv->engine[RCS] &&
+ dev_priv->engine[RCS]->schedule;
+ break;
case I915_PARAM_MMAP_VERSION:
/* Remember to bump this if the version changes! */
case I915_PARAM_HAS_GEM:
@@ -367,12 +378,12 @@ static int
intel_alloc_mchbar_resource(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = to_i915(dev);
- int reg = INTEL_INFO(dev)->gen >= 4 ? MCHBAR_I965 : MCHBAR_I915;
+ int reg = INTEL_GEN(dev_priv) >= 4 ? MCHBAR_I965 : MCHBAR_I915;
u32 temp_lo, temp_hi = 0;
u64 mchbar_addr;
int ret;
- if (INTEL_INFO(dev)->gen >= 4)
+ if (INTEL_GEN(dev_priv) >= 4)
pci_read_config_dword(dev_priv->bridge_dev, reg + 4, &temp_hi);
pci_read_config_dword(dev_priv->bridge_dev, reg, &temp_lo);
mchbar_addr = ((u64)temp_hi << 32) | temp_lo;
@@ -399,7 +410,7 @@ intel_alloc_mchbar_resource(struct drm_device *dev)
return ret;
}
- if (INTEL_INFO(dev)->gen >= 4)
+ if (INTEL_GEN(dev_priv) >= 4)
pci_write_config_dword(dev_priv->bridge_dev, reg + 4,
upper_32_bits(dev_priv->mch_res.start));
@@ -413,16 +424,16 @@ static void
intel_setup_mchbar(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = to_i915(dev);
- int mchbar_reg = INTEL_INFO(dev)->gen >= 4 ? MCHBAR_I965 : MCHBAR_I915;
+ int mchbar_reg = INTEL_GEN(dev_priv) >= 4 ? MCHBAR_I965 : MCHBAR_I915;
u32 temp;
bool enabled;
- if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
+ if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
return;
dev_priv->mchbar_need_disable = false;
- if (IS_I915G(dev) || IS_I915GM(dev)) {
+ if (IS_I915G(dev_priv) || IS_I915GM(dev_priv)) {
pci_read_config_dword(dev_priv->bridge_dev, DEVEN, &temp);
enabled = !!(temp & DEVEN_MCHBAR_EN);
} else {
@@ -440,7 +451,7 @@ intel_setup_mchbar(struct drm_device *dev)
dev_priv->mchbar_need_disable = true;
/* Space is allocated or reserved, so enable it. */
- if (IS_I915G(dev) || IS_I915GM(dev)) {
+ if (IS_I915G(dev_priv) || IS_I915GM(dev_priv)) {
pci_write_config_dword(dev_priv->bridge_dev, DEVEN,
temp | DEVEN_MCHBAR_EN);
} else {
@@ -453,10 +464,10 @@ static void
intel_teardown_mchbar(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = to_i915(dev);
- int mchbar_reg = INTEL_INFO(dev)->gen >= 4 ? MCHBAR_I965 : MCHBAR_I915;
+ int mchbar_reg = INTEL_GEN(dev_priv) >= 4 ? MCHBAR_I965 : MCHBAR_I915;
if (dev_priv->mchbar_need_disable) {
- if (IS_I915G(dev) || IS_I915GM(dev)) {
+ if (IS_I915G(dev_priv) || IS_I915GM(dev_priv)) {
u32 deven_val;
pci_read_config_dword(dev_priv->bridge_dev, DEVEN,
@@ -484,7 +495,7 @@ static unsigned int i915_vga_set_decode(void *cookie, bool state)
{
struct drm_device *dev = cookie;
- intel_modeset_vga_set_state(dev, state);
+ intel_modeset_vga_set_state(to_i915(dev), state);
if (state)
return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM |
VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
@@ -530,40 +541,17 @@ static const struct vga_switcheroo_client_ops i915_switcheroo_ops = {
.can_switch = i915_switcheroo_can_switch,
};
-static void i915_gem_fini(struct drm_device *dev)
+static void i915_gem_fini(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
-
- /*
- * Neither the BIOS, ourselves or any other kernel
- * expects the system to be in execlists mode on startup,
- * so we need to reset the GPU back to legacy mode. And the only
- * known way to disable logical contexts is through a GPU reset.
- *
- * So in order to leave the system in a known default configuration,
- * always reset the GPU upon unload. Afterwards we then clean up the
- * GEM state tracking, flushing off the requests and leaving the
- * system in a known idle state.
- *
- * Note that is of the upmost importance that the GPU is idle and
- * all stray writes are flushed *before* we dismantle the backing
- * storage for the pinned objects.
- *
- * However, since we are uncertain that reseting the GPU on older
- * machines is a good idea, we don't - just in case it leaves the
- * machine in an unusable condition.
- */
- if (HAS_HW_CONTEXTS(dev)) {
- int reset = intel_gpu_reset(dev_priv, ALL_ENGINES);
- WARN_ON(reset && reset != -ENODEV);
- }
+ mutex_lock(&dev_priv->drm.struct_mutex);
+ i915_gem_cleanup_engines(&dev_priv->drm);
+ i915_gem_context_fini(&dev_priv->drm);
+ mutex_unlock(&dev_priv->drm.struct_mutex);
- mutex_lock(&dev->struct_mutex);
- i915_gem_cleanup_engines(dev);
- i915_gem_context_fini(dev);
- mutex_unlock(&dev->struct_mutex);
+ rcu_barrier();
+ flush_work(&dev_priv->mm.free_work);
- WARN_ON(!list_empty(&to_i915(dev)->context_list));
+ WARN_ON(!list_empty(&dev_priv->context_list));
}
static int i915_load_modeset_init(struct drm_device *dev)
@@ -611,7 +599,9 @@ static int i915_load_modeset_init(struct drm_device *dev)
/* Important: The output setup functions called by modeset_init need
* working irqs for e.g. gmbus and dp aux transfers. */
- intel_modeset_init(dev);
+ ret = intel_modeset_init(dev);
+ if (ret)
+ goto cleanup_irq;
intel_guc_init(dev);
@@ -621,7 +611,7 @@ static int i915_load_modeset_init(struct drm_device *dev)
intel_modeset_gem_init(dev);
- if (INTEL_INFO(dev)->num_pipes == 0)
+ if (INTEL_INFO(dev_priv)->num_pipes == 0)
return 0;
ret = intel_fbdev_init(dev);
@@ -636,7 +626,9 @@ static int i915_load_modeset_init(struct drm_device *dev)
return 0;
cleanup_gem:
- i915_gem_fini(dev);
+ if (i915_gem_suspend(dev))
+ DRM_ERROR("failed to idle hardware; continuing to unload!\n");
+ i915_gem_fini(dev_priv);
cleanup_irq:
intel_guc_fini(dev);
drm_irq_uninstall(dev);
@@ -771,6 +763,19 @@ static void i915_workqueues_cleanup(struct drm_i915_private *dev_priv)
destroy_workqueue(dev_priv->wq);
}
+/*
+ * We don't keep the workarounds for pre-production hardware, so we expect our
+ * driver to fail on these machines in one way or another. A little warning on
+ * dmesg may help both the user and the bug triagers.
+ */
+static void intel_detect_preproduction_hw(struct drm_i915_private *dev_priv)
+{
+ if (IS_HSW_EARLY_SDV(dev_priv) ||
+ IS_SKL_REVID(dev_priv, 0, SKL_REVID_F0))
+ DRM_ERROR("This is a pre-production stepping. "
+ "It may not be fully functional.\n");
+}
+
/**
* i915_driver_init_early - setup state not requiring device access
* @dev_priv: device private
@@ -829,25 +834,24 @@ static int i915_driver_init_early(struct drm_i915_private *dev_priv,
intel_init_dpio(dev_priv);
intel_power_domains_init(dev_priv);
intel_irq_init(dev_priv);
+ intel_hangcheck_init(dev_priv);
intel_init_display_hooks(dev_priv);
intel_init_clock_gating_hooks(dev_priv);
intel_init_audio_hooks(dev_priv);
- i915_gem_load_init(&dev_priv->drm);
+ ret = i915_gem_load_init(&dev_priv->drm);
+ if (ret < 0)
+ goto err_gvt;
intel_display_crc_init(dev_priv);
intel_device_info_dump(dev_priv);
- /* Not all pre-production machines fall into this category, only the
- * very first ones. Almost everything should work, except for maybe
- * suspend/resume. And we don't implement workarounds that affect only
- * pre-production machines. */
- if (IS_HSW_EARLY_SDV(dev_priv))
- DRM_INFO("This is an early pre-production Haswell machine. "
- "It may not be fully functional.\n");
+ intel_detect_preproduction_hw(dev_priv);
return 0;
+err_gvt:
+ intel_gvt_cleanup(dev_priv);
err_workqueues:
i915_workqueues_cleanup(dev_priv);
return ret;
@@ -870,7 +874,7 @@ static int i915_mmio_setup(struct drm_device *dev)
int mmio_bar;
int mmio_size;
- mmio_bar = IS_GEN2(dev) ? 1 : 0;
+ mmio_bar = IS_GEN2(dev_priv) ? 1 : 0;
/*
* Before gen4, the registers and the GTT are behind different BARs.
* However, from gen4 onwards, the registers and the GTT are shared
@@ -879,7 +883,7 @@ static int i915_mmio_setup(struct drm_device *dev)
* the register BAR remains the same size for all the earlier
* generations up to Ironlake.
*/
- if (INTEL_INFO(dev)->gen < 5)
+ if (INTEL_GEN(dev_priv) < 5)
mmio_size = 512 * 1024;
else
mmio_size = 2 * 1024 * 1024;
@@ -982,7 +986,6 @@ static void intel_sanitize_options(struct drm_i915_private *dev_priv)
static int i915_driver_init_hw(struct drm_i915_private *dev_priv)
{
struct pci_dev *pdev = dev_priv->drm.pdev;
- struct drm_device *dev = &dev_priv->drm;
int ret;
if (i915_inject_load_failure())
@@ -1023,7 +1026,7 @@ static int i915_driver_init_hw(struct drm_i915_private *dev_priv)
pci_set_master(pdev);
/* overlay on gen2 is broken and can't address above 1G */
- if (IS_GEN2(dev)) {
+ if (IS_GEN2(dev_priv)) {
ret = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(30));
if (ret) {
DRM_ERROR("failed to set DMA mask\n");
@@ -1040,7 +1043,7 @@ static int i915_driver_init_hw(struct drm_i915_private *dev_priv)
* behaviour if any general state is accessed within a page above 4GB,
* which also needs to be handled carefully.
*/
- if (IS_BROADWATER(dev) || IS_CRESTLINE(dev)) {
+ if (IS_BROADWATER(dev_priv) || IS_CRESTLINE(dev_priv)) {
ret = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
if (ret) {
@@ -1070,7 +1073,7 @@ static int i915_driver_init_hw(struct drm_i915_private *dev_priv)
* be lost or delayed, but we use them anyways to avoid
* stuck interrupts on some machines.
*/
- if (!IS_I945G(dev) && !IS_I945GM(dev)) {
+ if (!IS_I945G(dev_priv) && !IS_I945GM(dev_priv)) {
if (pci_enable_msi(pdev) < 0)
DRM_DEBUG_DRIVER("can't enable MSI");
}
@@ -1121,6 +1124,7 @@ static void i915_driver_register(struct drm_i915_private *dev_priv)
/* Reveal our presence to userspace */
if (drm_dev_register(dev, 0) == 0) {
i915_debugfs_register(dev_priv);
+ i915_guc_register(dev_priv);
i915_setup_sysfs(dev_priv);
} else
DRM_ERROR("Failed to register driver for userspace access!\n");
@@ -1159,6 +1163,7 @@ static void i915_driver_unregister(struct drm_i915_private *dev_priv)
intel_opregion_unregister(dev_priv);
i915_teardown_sysfs(dev_priv);
+ i915_guc_unregister(dev_priv);
i915_debugfs_unregister(dev_priv);
drm_dev_unregister(&dev_priv->drm);
@@ -1167,8 +1172,8 @@ static void i915_driver_unregister(struct drm_i915_private *dev_priv)
/**
* i915_driver_load - setup chip and create an initial config
- * @dev: DRM device
- * @flags: startup flags
+ * @pdev: PCI device
+ * @ent: matching PCI ID entry
*
* The driver load routine has to do several things:
* - drive output discovery via intel_modeset_init()
@@ -1242,6 +1247,10 @@ int i915_driver_load(struct pci_dev *pdev, const struct pci_device_id *ent)
DRM_INFO("Initialized %s %d.%d.%d %s for %s on minor %d\n",
driver.name, driver.major, driver.minor, driver.patchlevel,
driver.date, pci_name(pdev), dev_priv->drm.primary->index);
+ if (IS_ENABLED(CONFIG_DRM_I915_DEBUG))
+ DRM_INFO("DRM_I915_DEBUG enabled\n");
+ if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
+ DRM_INFO("DRM_I915_DEBUG_GEM enabled\n");
intel_runtime_pm_put(dev_priv);
@@ -1309,7 +1318,7 @@ void i915_driver_unload(struct drm_device *dev)
drain_workqueue(dev_priv->wq);
intel_guc_fini(dev);
- i915_gem_fini(dev);
+ i915_gem_fini(dev_priv);
intel_fbc_cleanup_cfb(dev_priv);
intel_power_domains_fini(dev_priv);
@@ -1431,9 +1440,9 @@ static int i915_drm_suspend(struct drm_device *dev)
intel_suspend_encoders(dev_priv);
- intel_suspend_hw(dev);
+ intel_suspend_hw(dev_priv);
- i915_gem_suspend_gtt_mappings(dev);
+ i915_gem_suspend_gtt_mappings(dev_priv);
i915_save_state(dev);
@@ -1447,8 +1456,6 @@ static int i915_drm_suspend(struct drm_device *dev)
dev_priv->suspend_count++;
- intel_display_set_init_power(dev_priv, false);
-
intel_csr_ucode_suspend(dev_priv);
out:
@@ -1466,6 +1473,8 @@ static int i915_drm_suspend_late(struct drm_device *dev, bool hibernation)
disable_rpm_wakeref_asserts(dev_priv);
+ intel_display_set_init_power(dev_priv, false);
+
fw_csr = !IS_BROXTON(dev_priv) &&
suspend_to_idle(dev_priv) && dev_priv->csr.dmc_payload;
/*
@@ -1507,7 +1516,7 @@ static int i915_drm_suspend_late(struct drm_device *dev, bool hibernation)
* Fujitsu FSC S7110
* Acer Aspire 1830T
*/
- if (!(hibernation && INTEL_INFO(dev_priv)->gen < 6))
+ if (!(hibernation && INTEL_GEN(dev_priv) < 6))
pci_set_power_state(pdev, PCI_D3hot);
dev_priv->suspended_to_idle = suspend_to_idle(dev_priv);
@@ -1595,6 +1604,8 @@ static int i915_drm_resume(struct drm_device *dev)
intel_display_resume(dev);
+ drm_kms_helper_poll_enable(dev);
+
/*
* ... but also need to make sure that hotplug processing
* doesn't cause havoc. Like in the driver load code we don't
@@ -1602,8 +1613,6 @@ static int i915_drm_resume(struct drm_device *dev)
* notifications.
* */
intel_hpd_init(dev_priv);
- /* Config may have changed between suspend and resume */
- drm_helper_hpd_irq_event(dev);
intel_opregion_register(dev_priv);
@@ -1616,7 +1625,6 @@ static int i915_drm_resume(struct drm_device *dev)
intel_opregion_notify_adapter(dev_priv, PCI_D0);
intel_autoenable_gt_powersave(dev_priv);
- drm_kms_helper_poll_enable(dev);
enable_rpm_wakeref_asserts(dev_priv);
@@ -1721,6 +1729,22 @@ int i915_resume_switcheroo(struct drm_device *dev)
return i915_drm_resume(dev);
}
+static void disable_engines_irq(struct drm_i915_private *dev_priv)
+{
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+
+ /* Ensure irq handler finishes, and not run again. */
+ disable_irq(dev_priv->drm.irq);
+ for_each_engine(engine, dev_priv, id)
+ tasklet_kill(&engine->irq_tasklet);
+}
+
+static void enable_engines_irq(struct drm_i915_private *dev_priv)
+{
+ enable_irq(dev_priv->drm.irq);
+}
+
/**
* i915_reset - reset chip after a hang
* @dev: drm device to reset
@@ -1754,7 +1778,11 @@ void i915_reset(struct drm_i915_private *dev_priv)
error->reset_count++;
pr_notice("drm/i915: Resetting chip after gpu hang\n");
+
+ disable_engines_irq(dev_priv);
ret = intel_gpu_reset(dev_priv, ALL_ENGINES);
+ enable_engines_irq(dev_priv);
+
if (ret) {
if (ret != -ENODEV)
DRM_ERROR("Failed to reset chip: %i\n", ret);
@@ -2240,7 +2268,6 @@ err1:
static int vlv_resume_prepare(struct drm_i915_private *dev_priv,
bool rpm_resume)
{
- struct drm_device *dev = &dev_priv->drm;
int err;
int ret;
@@ -2264,10 +2291,8 @@ static int vlv_resume_prepare(struct drm_i915_private *dev_priv,
vlv_check_no_gt_access(dev_priv);
- if (rpm_resume) {
- intel_init_clock_gating(dev);
- i915_gem_restore_fences(dev);
- }
+ if (rpm_resume)
+ intel_init_clock_gating(dev_priv);
return ret;
}
@@ -2282,37 +2307,18 @@ static int intel_runtime_suspend(struct device *kdev)
if (WARN_ON_ONCE(!(dev_priv->rps.enabled && intel_enable_rc6())))
return -ENODEV;
- if (WARN_ON_ONCE(!HAS_RUNTIME_PM(dev)))
+ if (WARN_ON_ONCE(!HAS_RUNTIME_PM(dev_priv)))
return -ENODEV;
DRM_DEBUG_KMS("Suspending device\n");
- /*
- * We could deadlock here in case another thread holding struct_mutex
- * calls RPM suspend concurrently, since the RPM suspend will wait
- * first for this RPM suspend to finish. In this case the concurrent
- * RPM resume will be followed by its RPM suspend counterpart. Still
- * for consistency return -EAGAIN, which will reschedule this suspend.
- */
- if (!mutex_trylock(&dev->struct_mutex)) {
- DRM_DEBUG_KMS("device lock contention, deffering suspend\n");
- /*
- * Bump the expiration timestamp, otherwise the suspend won't
- * be rescheduled.
- */
- pm_runtime_mark_last_busy(kdev);
-
- return -EAGAIN;
- }
-
disable_rpm_wakeref_asserts(dev_priv);
/*
* We are safe here against re-faults, since the fault handler takes
* an RPM reference.
*/
- i915_gem_release_all_mmaps(dev_priv);
- mutex_unlock(&dev->struct_mutex);
+ i915_gem_runtime_suspend(dev_priv);
intel_guc_suspend(dev);
@@ -2386,7 +2392,7 @@ static int intel_runtime_resume(struct device *kdev)
struct drm_i915_private *dev_priv = to_i915(dev);
int ret = 0;
- if (WARN_ON_ONCE(!HAS_RUNTIME_PM(dev)))
+ if (WARN_ON_ONCE(!HAS_RUNTIME_PM(dev_priv)))
return -ENODEV;
DRM_DEBUG_KMS("Resuming device\n");
@@ -2404,7 +2410,7 @@ static int intel_runtime_resume(struct device *kdev)
if (IS_GEN6(dev_priv))
intel_init_pch_refclk(dev);
- if (IS_BROXTON(dev)) {
+ if (IS_BROXTON(dev_priv)) {
bxt_disable_dc9(dev_priv);
bxt_display_core_init(dev_priv, true);
if (dev_priv->csr.dmc_payload &&
@@ -2420,7 +2426,7 @@ static int intel_runtime_resume(struct device *kdev)
* No point of rolling back things in case of an error, as the best
* we can do is to hope that things will still work (and disable RPM).
*/
- i915_gem_init_swizzling(dev);
+ i915_gem_init_swizzling(dev_priv);
intel_runtime_pm_enable_interrupts(dev_priv);
@@ -2495,9 +2501,7 @@ static const struct file_operations i915_driver_fops = {
.mmap = drm_gem_mmap,
.poll = drm_poll,
.read = drm_read,
-#ifdef CONFIG_COMPAT
.compat_ioctl = i915_compat_ioctl,
-#endif
.llseek = noop_llseek,
};
@@ -2577,7 +2581,7 @@ static struct drm_driver driver = {
.set_busid = drm_pci_set_busid,
.gem_close_object = i915_gem_close_object,
- .gem_free_object = i915_gem_free_object,
+ .gem_free_object_unlocked = i915_gem_free_object,
.gem_vm_ops = &i915_gem_vm_ops,
.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 8b9ee4e390c0..56002a52936d 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -41,6 +41,7 @@
#include <linux/intel-iommu.h>
#include <linux/kref.h>
#include <linux/pm_qos.h>
+#include <linux/reservation.h>
#include <linux/shmem_fs.h>
#include <drm/drmP.h>
@@ -59,9 +60,14 @@
#include "intel_ringbuffer.h"
#include "i915_gem.h"
+#include "i915_gem_fence_reg.h"
+#include "i915_gem_object.h"
#include "i915_gem_gtt.h"
#include "i915_gem_render_state.h"
#include "i915_gem_request.h"
+#include "i915_gem_timeline.h"
+
+#include "i915_vma.h"
#include "intel_gvt.h"
@@ -70,7 +76,8 @@
#define DRIVER_NAME "i915"
#define DRIVER_DESC "Intel Graphics"
-#define DRIVER_DATE "20160919"
+#define DRIVER_DATE "20161121"
+#define DRIVER_TIMESTAMP 1479717903
#undef WARN_ON
/* Many gcc seem to no see through this and fall over :( */
@@ -122,6 +129,11 @@ static inline const char *onoff(bool v)
return v ? "on" : "off";
}
+static inline const char *enableddisabled(bool v)
+{
+ return v ? "enabled" : "disabled";
+}
+
enum pipe {
INVALID_PIPE = -1,
PIPE_A = 0,
@@ -182,9 +194,10 @@ enum plane {
};
#define plane_name(p) ((p) + 'A')
-#define sprite_name(p, s) ((p) * INTEL_INFO(dev)->num_sprites[(p)] + (s) + 'A')
+#define sprite_name(p, s) ((p) * INTEL_INFO(dev_priv)->num_sprites[(p)] + (s) + 'A')
enum port {
+ PORT_NONE = -1,
PORT_A = 0,
PORT_B,
PORT_C,
@@ -310,7 +323,7 @@ struct i915_hotplug {
#define for_each_pipe_masked(__dev_priv, __p, __mask) \
for ((__p) = 0; (__p) < INTEL_INFO(__dev_priv)->num_pipes; (__p)++) \
for_each_if ((__mask) & (1 << (__p)))
-#define for_each_plane(__dev_priv, __pipe, __p) \
+#define for_each_universal_plane(__dev_priv, __pipe, __p) \
for ((__p) = 0; \
(__p) < INTEL_INFO(__dev_priv)->num_sprites[(__pipe)] + 1; \
(__p)++)
@@ -455,23 +468,6 @@ struct intel_opregion {
struct intel_overlay;
struct intel_overlay_error_state;
-struct drm_i915_fence_reg {
- struct list_head link;
- struct drm_i915_private *i915;
- struct i915_vma *vma;
- int pin_count;
- int id;
- /**
- * Whether the tiling parameters for the currently
- * associated fence register have changed. Note that
- * for the purposes of tracking tiling changes we also
- * treat the unfenced register, the register slot that
- * the object occupies whilst it executes a fenced
- * command (such as BLT on gen2/3), as a "fence".
- */
- bool dirty;
-};
-
struct sdvo_device_mapping {
u8 initialized;
u8 dvo_port;
@@ -483,6 +479,7 @@ struct sdvo_device_mapping {
struct intel_connector;
struct intel_encoder;
+struct intel_atomic_state;
struct intel_crtc_state;
struct intel_initial_plane_config;
struct intel_crtc;
@@ -490,16 +487,20 @@ struct intel_limit;
struct dpll;
struct drm_i915_display_funcs {
- int (*get_display_clock_speed)(struct drm_device *dev);
- int (*get_fifo_size)(struct drm_device *dev, int plane);
+ int (*get_display_clock_speed)(struct drm_i915_private *dev_priv);
+ int (*get_fifo_size)(struct drm_i915_private *dev_priv, int plane);
int (*compute_pipe_wm)(struct intel_crtc_state *cstate);
int (*compute_intermediate_wm)(struct drm_device *dev,
struct intel_crtc *intel_crtc,
struct intel_crtc_state *newstate);
- void (*initial_watermarks)(struct intel_crtc_state *cstate);
- void (*optimize_watermarks)(struct intel_crtc_state *cstate);
+ void (*initial_watermarks)(struct intel_atomic_state *state,
+ struct intel_crtc_state *cstate);
+ void (*atomic_update_watermarks)(struct intel_atomic_state *state,
+ struct intel_crtc_state *cstate);
+ void (*optimize_watermarks)(struct intel_atomic_state *state,
+ struct intel_crtc_state *cstate);
int (*compute_global_watermarks)(struct drm_atomic_state *state);
- void (*update_wm)(struct drm_crtc *crtc);
+ void (*update_wm)(struct intel_crtc *crtc);
int (*modeset_calc_cdclk)(struct drm_atomic_state *state);
void (*modeset_commit_cdclk)(struct drm_atomic_state *state);
/* Returns the active state of the crtc, and if the crtc is active,
@@ -521,7 +522,7 @@ struct drm_i915_display_funcs {
const struct drm_display_mode *adjusted_mode);
void (*audio_codec_disable)(struct intel_encoder *encoder);
void (*fdi_link_train)(struct drm_crtc *crtc);
- void (*init_clock_gating)(struct drm_device *dev);
+ void (*init_clock_gating)(struct drm_i915_private *dev_priv);
int (*queue_flip)(struct drm_device *dev, struct drm_crtc *crtc,
struct drm_framebuffer *fb,
struct drm_i915_gem_object *obj,
@@ -558,6 +559,18 @@ enum forcewake_domains {
#define FW_REG_READ (1)
#define FW_REG_WRITE (2)
+enum decoupled_power_domain {
+ GEN9_DECOUPLED_PD_BLITTER = 0,
+ GEN9_DECOUPLED_PD_RENDER,
+ GEN9_DECOUPLED_PD_MEDIA,
+ GEN9_DECOUPLED_PD_ALL
+};
+
+enum decoupled_ops {
+ GEN9_DECOUPLED_OP_WRITE = 0,
+ GEN9_DECOUPLED_OP_READ
+};
+
enum forcewake_domains
intel_uncore_forcewake_for_reg(struct drm_i915_private *dev_priv,
i915_reg_t reg, unsigned int op);
@@ -581,13 +594,25 @@ struct intel_uncore_funcs {
uint32_t val, bool trace);
};
+struct intel_forcewake_range {
+ u32 start;
+ u32 end;
+
+ enum forcewake_domains domains;
+};
+
struct intel_uncore {
spinlock_t lock; /** lock is also taken in irq contexts. */
+ const struct intel_forcewake_range *fw_domains_table;
+ unsigned int fw_domains_table_entries;
+
struct intel_uncore_funcs funcs;
unsigned fifo_count;
+
enum forcewake_domains fw_domains;
+ enum forcewake_domains fw_domains_active;
struct intel_uncore_forcewake_domain {
struct drm_i915_private *i915;
@@ -633,54 +658,55 @@ struct intel_csr {
uint32_t allowed_dc_mask;
};
-#define DEV_INFO_FOR_EACH_FLAG(func, sep) \
- func(is_mobile) sep \
- func(is_i85x) sep \
- func(is_i915g) sep \
- func(is_i945gm) sep \
- func(is_g33) sep \
- func(hws_needs_physical) sep \
- func(is_g4x) sep \
- func(is_pineview) sep \
- func(is_broadwater) sep \
- func(is_crestline) sep \
- func(is_ivybridge) sep \
- func(is_valleyview) sep \
- func(is_cherryview) sep \
- func(is_haswell) sep \
- func(is_broadwell) sep \
- func(is_skylake) sep \
- func(is_broxton) sep \
- func(is_kabylake) sep \
- func(is_preliminary) sep \
- func(has_fbc) sep \
- func(has_psr) sep \
- func(has_runtime_pm) sep \
- func(has_csr) sep \
- func(has_resource_streamer) sep \
- func(has_rc6) sep \
- func(has_rc6p) sep \
- func(has_dp_mst) sep \
- func(has_gmbus_irq) sep \
- func(has_hw_contexts) sep \
- func(has_logical_ring_contexts) sep \
- func(has_l3_dpf) sep \
- func(has_gmch_display) sep \
- func(has_guc) sep \
- func(has_pipe_cxsr) sep \
- func(has_hotplug) sep \
- func(cursor_needs_physical) sep \
- func(has_overlay) sep \
- func(overlay_needs_physical) sep \
- func(supports_tv) sep \
- func(has_llc) sep \
- func(has_snoop) sep \
- func(has_ddi) sep \
- func(has_fpga_dbg) sep \
- func(has_pooled_eu)
-
-#define DEFINE_FLAG(name) u8 name:1
-#define SEP_SEMICOLON ;
+#define DEV_INFO_FOR_EACH_FLAG(func) \
+ /* Keep is_* in chronological order */ \
+ func(is_mobile); \
+ func(is_i85x); \
+ func(is_i915g); \
+ func(is_i945gm); \
+ func(is_g33); \
+ func(is_g4x); \
+ func(is_pineview); \
+ func(is_broadwater); \
+ func(is_crestline); \
+ func(is_ivybridge); \
+ func(is_valleyview); \
+ func(is_cherryview); \
+ func(is_haswell); \
+ func(is_broadwell); \
+ func(is_skylake); \
+ func(is_broxton); \
+ func(is_kabylake); \
+ func(is_alpha_support); \
+ /* Keep has_* in alphabetical order */ \
+ func(has_64bit_reloc); \
+ func(has_csr); \
+ func(has_ddi); \
+ func(has_dp_mst); \
+ func(has_fbc); \
+ func(has_fpga_dbg); \
+ func(has_gmbus_irq); \
+ func(has_gmch_display); \
+ func(has_guc); \
+ func(has_hotplug); \
+ func(has_hw_contexts); \
+ func(has_l3_dpf); \
+ func(has_llc); \
+ func(has_logical_ring_contexts); \
+ func(has_overlay); \
+ func(has_pipe_cxsr); \
+ func(has_pooled_eu); \
+ func(has_psr); \
+ func(has_rc6); \
+ func(has_rc6p); \
+ func(has_resource_streamer); \
+ func(has_runtime_pm); \
+ func(has_snoop); \
+ func(cursor_needs_physical); \
+ func(hws_needs_physical); \
+ func(overlay_needs_physical); \
+ func(supports_tv); \
+ func(has_decoupled_mmio)
struct sseu_dev_info {
u8 slice_mask;
@@ -709,7 +735,9 @@ struct intel_device_info {
u16 gen_mask;
u8 ring_mask; /* Rings supported by the HW */
u8 num_rings;
- DEV_INFO_FOR_EACH_FLAG(DEFINE_FLAG, SEP_SEMICOLON);
+#define DEFINE_FLAG(name) u8 name:1
+ DEV_INFO_FOR_EACH_FLAG(DEFINE_FLAG);
+#undef DEFINE_FLAG
u16 ddb_size; /* in blocks */
/* Register offsets for the various display pipes and transcoders */
int pipe_offsets[I915_MAX_TRANSCODERS];
@@ -726,14 +754,15 @@ struct intel_device_info {
} color;
};
-#undef DEFINE_FLAG
-#undef SEP_SEMICOLON
-
struct intel_display_error_state;
struct drm_i915_error_state {
struct kref ref;
struct timeval time;
+ struct timeval boottime;
+ struct timeval uptime;
+
+ struct drm_i915_private *i915;
char error_msg[128];
bool simulated;
@@ -759,11 +788,12 @@ struct drm_i915_error_state {
u32 gam_ecochk;
u32 gab_ctl;
u32 gfx_mode;
- u32 extra_instdone[I915_NUM_INSTDONE_REG];
+
u64 fence[I915_MAX_NUM_FENCES];
struct intel_overlay_error_state *overlay;
struct intel_display_error_state *display;
struct drm_i915_error_object *semaphore;
+ struct drm_i915_error_object *guc_log;
struct drm_i915_error_engine {
int engine_id;
@@ -775,12 +805,14 @@ struct drm_i915_error_state {
struct i915_address_space *vm;
int num_requests;
+ /* position of active request inside the ring */
+ u32 rq_head, rq_post, rq_tail;
+
/* our own tracking of ring head and tail */
u32 cpu_ring_head;
u32 cpu_ring_tail;
u32 last_seqno;
- u32 semaphore_seqno[I915_NUM_ENGINES - 1];
/* Register state */
u32 start;
@@ -791,7 +823,6 @@ struct drm_i915_error_state {
u32 hws;
u32 ipeir;
u32 ipehr;
- u32 instdone;
u32 bbstate;
u32 instpm;
u32 instps;
@@ -802,11 +833,13 @@ struct drm_i915_error_state {
u64 faddr;
u32 rc_psmi; /* sleep state */
u32 semaphore_mboxes[I915_NUM_ENGINES - 1];
+ struct intel_instdone instdone;
struct drm_i915_error_object {
- int page_count;
u64 gtt_offset;
u64 gtt_size;
+ int page_count;
+ int unused;
u32 *pages[0];
} *ringbuffer, *batchbuffer, *wa_batchbuffer, *ctx, *hws_page;
@@ -815,10 +848,11 @@ struct drm_i915_error_state {
struct drm_i915_error_request {
long jiffies;
pid_t pid;
+ u32 context;
u32 seqno;
u32 head;
u32 tail;
- } *requests;
+ } *requests, execlist[2];
struct drm_i915_error_waiter {
char comm[TASK_COMM_LEN];
@@ -914,6 +948,7 @@ struct i915_gem_context {
struct drm_i915_file_private *file_priv;
struct i915_hw_ppgtt *ppgtt;
struct pid *pid;
+ const char *name;
struct i915_ctx_hang_stats hang_stats;
@@ -924,6 +959,7 @@ struct i915_gem_context {
/* Unique identifier for this context, used by the hw for tracking */
unsigned int hw_id;
u32 user_handle;
+ int priority; /* greater priorities are serviced first */
u32 ggtt_alignment;
@@ -972,6 +1008,9 @@ struct intel_fbc {
bool enabled;
bool active;
+ bool underrun_detected;
+ struct work_struct underrun_work;
+
struct intel_fbc_state_cache {
struct {
unsigned int mode_flags;
@@ -1297,6 +1336,12 @@ struct i915_power_well {
/* cached hw enabled state */
bool hw_enabled;
unsigned long domains;
+ /* unique identifier for this power well */
+ unsigned long id;
+ /*
+ * Arbitraty data associated with this power well. Platform and power
+ * well specific.
+ */
unsigned long data;
const struct i915_power_well_ops *ops;
};
@@ -1334,11 +1379,22 @@ struct i915_gem_mm {
struct list_head bound_list;
/**
* List of objects which are not bound to the GTT (thus
- * are idle and not used by the GPU) but still have
- * (presumably uncached) pages still attached.
+ * are idle and not used by the GPU). These objects may or may
+ * not actually have any pages attached.
*/
struct list_head unbound_list;
+ /** List of all objects in gtt_space, currently mmaped by userspace.
+ * All objects within this list must also be on bound_list.
+ */
+ struct list_head userfault_list;
+
+ /**
+ * List of objects which are pending destruction.
+ */
+ struct llist_head free_list;
+ struct work_struct free_work;
+
/** Usable portion of the GTT for GEM */
unsigned long stolen_base; /* limited to low memory (32-bit) */
@@ -1368,7 +1424,7 @@ struct i915_gem_mm {
/* accounting, useful for userland debugging */
spinlock_t object_stat_lock;
- size_t object_memory;
+ u64 object_memory;
u32 object_count;
};
@@ -1387,6 +1443,9 @@ struct i915_error_state_file_priv {
struct drm_i915_error_state *error;
};
+#define I915_RESET_TIMEOUT (10 * HZ) /* 10s */
+#define I915_FENCE_TIMEOUT (10 * HZ) /* 10s */
+
struct i915_gpu_error {
/* For hangcheck timer */
#define DRM_I915_HANGCHECK_PERIOD 1500 /* in ms */
@@ -1620,7 +1679,6 @@ static inline bool skl_ddb_entry_equal(const struct skl_ddb_entry *e1,
}
struct skl_ddb_allocation {
- struct skl_ddb_entry pipe[I915_MAX_PIPES];
struct skl_ddb_entry plane[I915_MAX_PIPES][I915_MAX_PLANES]; /* packed/uv */
struct skl_ddb_entry y_plane[I915_MAX_PIPES][I915_MAX_PLANES];
};
@@ -1628,15 +1686,12 @@ struct skl_ddb_allocation {
struct skl_wm_values {
unsigned dirty_pipes;
struct skl_ddb_allocation ddb;
- uint32_t wm_linetime[I915_MAX_PIPES];
- uint32_t plane[I915_MAX_PIPES][I915_MAX_PLANES][8];
- uint32_t plane_trans[I915_MAX_PIPES][I915_MAX_PLANES];
};
struct skl_wm_level {
- bool plane_en[I915_MAX_PLANES];
- uint16_t plane_res_b[I915_MAX_PLANES];
- uint8_t plane_res_l[I915_MAX_PLANES];
+ bool plane_en;
+ uint16_t plane_res_b;
+ uint8_t plane_res_l;
};
/*
@@ -1664,7 +1719,6 @@ struct skl_wm_level {
*/
struct i915_runtime_pm {
atomic_t wakeref_count;
- atomic_t atomic_seq;
bool suspended;
bool irqs_enabled;
};
@@ -1748,6 +1802,7 @@ struct drm_i915_private {
struct kmem_cache *objects;
struct kmem_cache *vmas;
struct kmem_cache *requests;
+ struct kmem_cache *dependencies;
const struct intel_device_info info;
@@ -1759,7 +1814,7 @@ struct drm_i915_private {
struct i915_virtual_gpu vgpu;
- struct intel_gvt gvt;
+ struct intel_gvt *gvt;
struct intel_guc guc;
@@ -1787,9 +1842,8 @@ struct drm_i915_private {
struct pci_dev *bridge_dev;
struct i915_gem_context *kernel_context;
- struct intel_engine_cs engine[I915_NUM_ENGINES];
+ struct intel_engine_cs *engine[I915_NUM_ENGINES];
struct i915_vma *semaphore;
- u32 next_seqno;
struct drm_dma_handle *status_page_dmah;
struct resource mch_res;
@@ -1814,8 +1868,10 @@ struct drm_i915_private {
u32 de_irq_mask[I915_MAX_PIPES];
};
u32 gt_irq_mask;
- u32 pm_irq_mask;
+ u32 pm_imr;
+ u32 pm_ier;
u32 pm_rps_events;
+ u32 pm_guc_events;
u32 pipestat_irq_mask[I915_MAX_PIPES];
struct i915_hotplug hotplug;
@@ -1892,8 +1948,8 @@ struct drm_i915_private {
/* Kernel Modesetting */
- struct drm_crtc *plane_to_crtc_mapping[I915_MAX_PIPES];
- struct drm_crtc *pipe_to_crtc_mapping[I915_MAX_PIPES];
+ struct intel_crtc *plane_to_crtc_mapping[I915_MAX_PIPES];
+ struct intel_crtc *pipe_to_crtc_mapping[I915_MAX_PIPES];
wait_queue_head_t pending_flip_queue;
#ifdef CONFIG_DEBUG_FS
@@ -2009,13 +2065,6 @@ struct drm_i915_private {
*/
uint16_t skl_latency[8];
- /*
- * The skl_wm_values structure is a bit too big for stack
- * allocation, so we keep the staging struct where we store
- * intermediate results here instead.
- */
- struct skl_wm_values skl_results;
-
/* current hardware state */
union {
struct ilk_wm_values hw;
@@ -2047,6 +2096,10 @@ struct drm_i915_private {
void (*resume)(struct drm_i915_private *);
void (*cleanup_engine)(struct intel_engine_cs *engine);
+ struct list_head timelines;
+ struct i915_gem_timeline global_timeline;
+ u32 active_requests;
+
/**
* Is the GPU currently considered idle, or busy executing
* userspace requests? Whilst idle, we allow runtime power
@@ -2054,7 +2107,6 @@ struct drm_i915_private {
* In order to reduce the effect on performance, there
* is a slight delay before we do so.
*/
- unsigned int active_engines;
bool awake;
/**
@@ -2074,12 +2126,15 @@ struct drm_i915_private {
* off the idle_work.
*/
struct delayed_work idle_work;
+
+ ktime_t last_init_time;
} gt;
/* perform PHY state sanity checks? */
bool chv_phy_assert[2];
- struct intel_encoder *dig_port_map[I915_MAX_PORTS];
+ /* Used to save the pipe-to-encoder mapping for audio */
+ struct intel_encoder *av_enc_map[I915_MAX_PIPES];
/*
* NOTE: This is the dri1/ums dungeon, don't add stuff here. Your patch
@@ -2103,19 +2158,11 @@ static inline struct drm_i915_private *guc_to_i915(struct intel_guc *guc)
}
/* Simple iterator over all initialised engines */
-#define for_each_engine(engine__, dev_priv__) \
- for ((engine__) = &(dev_priv__)->engine[0]; \
- (engine__) < &(dev_priv__)->engine[I915_NUM_ENGINES]; \
- (engine__)++) \
- for_each_if (intel_engine_initialized(engine__))
-
-/* Iterator with engine_id */
-#define for_each_engine_id(engine__, dev_priv__, id__) \
- for ((engine__) = &(dev_priv__)->engine[0], (id__) = 0; \
- (engine__) < &(dev_priv__)->engine[I915_NUM_ENGINES]; \
- (engine__)++) \
- for_each_if (((id__) = (engine__)->id, \
- intel_engine_initialized(engine__)))
+#define for_each_engine(engine__, dev_priv__, id__) \
+ for ((id__) = 0; \
+ (id__) < I915_NUM_ENGINES; \
+ (id__)++) \
+ for_each_if ((engine__) = (dev_priv__)->engine[(id__)])
#define __mask_next_bit(mask) ({ \
int __idx = ffs(mask) - 1; \
@@ -2126,7 +2173,7 @@ static inline struct drm_i915_private *guc_to_i915(struct intel_guc *guc)
/* Iterator over subset of engines selected by mask */
#define for_each_engine_masked(engine__, dev_priv__, mask__, tmp__) \
for (tmp__ = mask__ & INTEL_INFO(dev_priv__)->ring_mask; \
- tmp__ ? (engine__ = &(dev_priv__)->engine[__mask_next_bit(tmp__)]), 1 : 0; )
+ tmp__ ? (engine__ = (dev_priv__)->engine[__mask_next_bit(tmp__)]), 1 : 0; )
enum hdmi_force_audio {
HDMI_AUDIO_OFF_DVI = -2, /* no aux data for HDMI-DVI converter */
@@ -2137,30 +2184,6 @@ enum hdmi_force_audio {
#define I915_GTT_OFFSET_NONE ((u32)-1)
-struct drm_i915_gem_object_ops {
- unsigned int flags;
-#define I915_GEM_OBJECT_HAS_STRUCT_PAGE 0x1
-
- /* Interface between the GEM object and its backing storage.
- * get_pages() is called once prior to the use of the associated set
- * of pages before to binding them into the GTT, and put_pages() is
- * called after we no longer need them. As we expect there to be
- * associated cost with migrating pages between the backing storage
- * and making them available for the GPU (e.g. clflush), we may hold
- * onto the pages after they are no longer referenced by the GPU
- * in case they may be used again shortly (for example migrating the
- * pages to a different memory domain within the GTT). put_pages()
- * will therefore most likely be called when the object itself is
- * being released or under memory pressure (where we attempt to
- * reap pages for the shrinker).
- */
- int (*get_pages)(struct drm_i915_gem_object *);
- void (*put_pages)(struct drm_i915_gem_object *);
-
- int (*dmabuf_export)(struct drm_i915_gem_object *);
- void (*release)(struct drm_i915_gem_object *);
-};
-
/*
* Frontbuffer tracking bits. Set in obj->frontbuffer_bits while a gem bo is
* considered to be the frontbuffer for the given plane interface-wise. This
@@ -2182,232 +2205,6 @@ struct drm_i915_gem_object_ops {
#define INTEL_FRONTBUFFER_ALL_MASK(pipe) \
(0xff << (INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe)))
-struct drm_i915_gem_object {
- struct drm_gem_object base;
-
- const struct drm_i915_gem_object_ops *ops;
-
- /** List of VMAs backed by this object */
- struct list_head vma_list;
-
- /** Stolen memory for this object, instead of being backed by shmem. */
- struct drm_mm_node *stolen;
- struct list_head global_list;
-
- /** Used in execbuf to temporarily hold a ref */
- struct list_head obj_exec_link;
-
- struct list_head batch_pool_link;
-
- unsigned long flags;
- /**
- * This is set if the object is on the active lists (has pending
- * rendering and so a non-zero seqno), and is not set if it i s on
- * inactive (ready to be unbound) list.
- */
-#define I915_BO_ACTIVE_SHIFT 0
-#define I915_BO_ACTIVE_MASK ((1 << I915_NUM_ENGINES) - 1)
-#define __I915_BO_ACTIVE(bo) \
- ((READ_ONCE((bo)->flags) >> I915_BO_ACTIVE_SHIFT) & I915_BO_ACTIVE_MASK)
-
- /**
- * This is set if the object has been written to since last bound
- * to the GTT
- */
- unsigned int dirty:1;
-
- /**
- * Advice: are the backing pages purgeable?
- */
- unsigned int madv:2;
-
- /**
- * Whether the current gtt mapping needs to be mappable (and isn't just
- * mappable by accident). Track pin and fault separate for a more
- * accurate mappable working set.
- */
- unsigned int fault_mappable:1;
-
- /*
- * Is the object to be mapped as read-only to the GPU
- * Only honoured if hardware has relevant pte bit
- */
- unsigned long gt_ro:1;
- unsigned int cache_level:3;
- unsigned int cache_dirty:1;
-
- atomic_t frontbuffer_bits;
- unsigned int frontbuffer_ggtt_origin; /* write once */
-
- /** Current tiling stride for the object, if it's tiled. */
- unsigned int tiling_and_stride;
-#define FENCE_MINIMUM_STRIDE 128 /* See i915_tiling_ok() */
-#define TILING_MASK (FENCE_MINIMUM_STRIDE-1)
-#define STRIDE_MASK (~TILING_MASK)
-
- /** Count of VMA actually bound by this object */
- unsigned int bind_count;
- unsigned int pin_display;
-
- struct sg_table *pages;
- int pages_pin_count;
- struct get_page {
- struct scatterlist *sg;
- int last;
- } get_page;
- void *mapping;
-
- /** Breadcrumb of last rendering to the buffer.
- * There can only be one writer, but we allow for multiple readers.
- * If there is a writer that necessarily implies that all other
- * read requests are complete - but we may only be lazily clearing
- * the read requests. A read request is naturally the most recent
- * request on a ring, so we may have two different write and read
- * requests on one ring where the write request is older than the
- * read request. This allows for the CPU to read from an active
- * buffer by only waiting for the write to complete.
- */
- struct i915_gem_active last_read[I915_NUM_ENGINES];
- struct i915_gem_active last_write;
-
- /** References from framebuffers, locks out tiling changes. */
- unsigned long framebuffer_references;
-
- /** Record of address bit 17 of each page at last unbind. */
- unsigned long *bit_17;
-
- struct i915_gem_userptr {
- uintptr_t ptr;
- unsigned read_only :1;
- unsigned workers :4;
-#define I915_GEM_USERPTR_MAX_WORKERS 15
-
- struct i915_mm_struct *mm;
- struct i915_mmu_object *mmu_object;
- struct work_struct *work;
- } userptr;
-
- /** for phys allocated objects */
- struct drm_dma_handle *phys_handle;
-};
-
-static inline struct drm_i915_gem_object *
-to_intel_bo(struct drm_gem_object *gem)
-{
- /* Assert that to_intel_bo(NULL) == NULL */
- BUILD_BUG_ON(offsetof(struct drm_i915_gem_object, base));
-
- return container_of(gem, struct drm_i915_gem_object, base);
-}
-
-static inline struct drm_i915_gem_object *
-i915_gem_object_lookup(struct drm_file *file, u32 handle)
-{
- return to_intel_bo(drm_gem_object_lookup(file, handle));
-}
-
-__deprecated
-extern struct drm_gem_object *
-drm_gem_object_lookup(struct drm_file *file, u32 handle);
-
-__attribute__((nonnull))
-static inline struct drm_i915_gem_object *
-i915_gem_object_get(struct drm_i915_gem_object *obj)
-{
- drm_gem_object_reference(&obj->base);
- return obj;
-}
-
-__deprecated
-extern void drm_gem_object_reference(struct drm_gem_object *);
-
-__attribute__((nonnull))
-static inline void
-i915_gem_object_put(struct drm_i915_gem_object *obj)
-{
- drm_gem_object_unreference(&obj->base);
-}
-
-__deprecated
-extern void drm_gem_object_unreference(struct drm_gem_object *);
-
-__attribute__((nonnull))
-static inline void
-i915_gem_object_put_unlocked(struct drm_i915_gem_object *obj)
-{
- drm_gem_object_unreference_unlocked(&obj->base);
-}
-
-__deprecated
-extern void drm_gem_object_unreference_unlocked(struct drm_gem_object *);
-
-static inline bool
-i915_gem_object_has_struct_page(const struct drm_i915_gem_object *obj)
-{
- return obj->ops->flags & I915_GEM_OBJECT_HAS_STRUCT_PAGE;
-}
-
-static inline unsigned long
-i915_gem_object_get_active(const struct drm_i915_gem_object *obj)
-{
- return (obj->flags >> I915_BO_ACTIVE_SHIFT) & I915_BO_ACTIVE_MASK;
-}
-
-static inline bool
-i915_gem_object_is_active(const struct drm_i915_gem_object *obj)
-{
- return i915_gem_object_get_active(obj);
-}
-
-static inline void
-i915_gem_object_set_active(struct drm_i915_gem_object *obj, int engine)
-{
- obj->flags |= BIT(engine + I915_BO_ACTIVE_SHIFT);
-}
-
-static inline void
-i915_gem_object_clear_active(struct drm_i915_gem_object *obj, int engine)
-{
- obj->flags &= ~BIT(engine + I915_BO_ACTIVE_SHIFT);
-}
-
-static inline bool
-i915_gem_object_has_active_engine(const struct drm_i915_gem_object *obj,
- int engine)
-{
- return obj->flags & BIT(engine + I915_BO_ACTIVE_SHIFT);
-}
-
-static inline unsigned int
-i915_gem_object_get_tiling(struct drm_i915_gem_object *obj)
-{
- return obj->tiling_and_stride & TILING_MASK;
-}
-
-static inline bool
-i915_gem_object_is_tiled(struct drm_i915_gem_object *obj)
-{
- return i915_gem_object_get_tiling(obj) != I915_TILING_NONE;
-}
-
-static inline unsigned int
-i915_gem_object_get_stride(struct drm_i915_gem_object *obj)
-{
- return obj->tiling_and_stride & STRIDE_MASK;
-}
-
-static inline struct i915_vma *i915_vma_get(struct i915_vma *vma)
-{
- i915_gem_object_get(vma->obj);
- return vma;
-}
-
-static inline void i915_vma_put(struct i915_vma *vma)
-{
- lockdep_assert_held(&vma->vm->dev->struct_mutex);
- i915_gem_object_put(vma->obj);
-}
-
/*
* Optimised SGL iterator for GEM objects
*/
@@ -2434,6 +2231,14 @@ static __always_inline struct sgt_iter {
return s;
}
+static inline struct scatterlist *____sg_next(struct scatterlist *sg)
+{
+ ++sg;
+ if (unlikely(sg_is_chain(sg)))
+ sg = sg_chain_ptr(sg);
+ return sg;
+}
+
/**
* __sg_next - return the next scatterlist entry in a list
* @sg: The current sg entry
@@ -2448,9 +2253,7 @@ static inline struct scatterlist *__sg_next(struct scatterlist *sg)
#ifdef CONFIG_DEBUG_SG
BUG_ON(sg->sg_magic != SG_MAGIC);
#endif
- return sg_is_last(sg) ? NULL :
- likely(!sg_is_chain(++sg)) ? sg :
- sg_chain_ptr(sg);
+ return sg_is_last(sg) ? NULL : ____sg_next(sg);
}
/**
@@ -2574,23 +2377,19 @@ struct drm_i915_cmd_table {
int count;
};
-/* Note that the (struct drm_i915_private *) cast is just to shut up gcc. */
-#define __I915__(p) ({ \
- struct drm_i915_private *__p; \
- if (__builtin_types_compatible_p(typeof(*p), struct drm_i915_private)) \
- __p = (struct drm_i915_private *)p; \
- else if (__builtin_types_compatible_p(typeof(*p), struct drm_device)) \
- __p = to_i915((struct drm_device *)p); \
- else \
- BUILD_BUG(); \
- __p; \
-})
-#define INTEL_INFO(p) (&__I915__(p)->info)
-#define INTEL_GEN(p) (INTEL_INFO(p)->gen)
-#define INTEL_DEVID(p) (INTEL_INFO(p)->device_id)
+static inline const struct intel_device_info *
+intel_info(const struct drm_i915_private *dev_priv)
+{
+ return &dev_priv->info;
+}
+
+#define INTEL_INFO(dev_priv) intel_info((dev_priv))
+
+#define INTEL_GEN(dev_priv) ((dev_priv)->info.gen)
+#define INTEL_DEVID(dev_priv) ((dev_priv)->info.device_id)
#define REVID_FOREVER 0xff
-#define INTEL_REVID(p) (__I915__(p)->drm.pdev->revision)
+#define INTEL_REVID(dev_priv) ((dev_priv)->drm.pdev->revision)
#define GEN_FOREVER (0)
/*
@@ -2598,7 +2397,7 @@ struct drm_i915_cmd_table {
*
* Use GEN_FOREVER for unbound start and or end.
*/
-#define IS_GEN(p, s, e) ({ \
+#define IS_GEN(dev_priv, s, e) ({ \
unsigned int __s = (s), __e = (e); \
BUILD_BUG_ON(!__builtin_constant_p(s)); \
BUILD_BUG_ON(!__builtin_constant_p(e)); \
@@ -2608,7 +2407,7 @@ struct drm_i915_cmd_table {
__e = BITS_PER_LONG - 1; \
else \
__e = (e) - 1; \
- !!(INTEL_INFO(p)->gen_mask & GENMASK((__e), (__s))); \
+ !!((dev_priv)->info.gen_mask & GENMASK((__e), (__s))); \
})
/*
@@ -2619,75 +2418,75 @@ struct drm_i915_cmd_table {
#define IS_REVID(p, since, until) \
(INTEL_REVID(p) >= (since) && INTEL_REVID(p) <= (until))
-#define IS_I830(dev) (INTEL_DEVID(dev) == 0x3577)
-#define IS_845G(dev) (INTEL_DEVID(dev) == 0x2562)
-#define IS_I85X(dev) (INTEL_INFO(dev)->is_i85x)
-#define IS_I865G(dev) (INTEL_DEVID(dev) == 0x2572)
-#define IS_I915G(dev) (INTEL_INFO(dev)->is_i915g)
-#define IS_I915GM(dev) (INTEL_DEVID(dev) == 0x2592)
-#define IS_I945G(dev) (INTEL_DEVID(dev) == 0x2772)
-#define IS_I945GM(dev) (INTEL_INFO(dev)->is_i945gm)
-#define IS_BROADWATER(dev) (INTEL_INFO(dev)->is_broadwater)
-#define IS_CRESTLINE(dev) (INTEL_INFO(dev)->is_crestline)
-#define IS_GM45(dev) (INTEL_DEVID(dev) == 0x2A42)
-#define IS_G4X(dev) (INTEL_INFO(dev)->is_g4x)
-#define IS_PINEVIEW_G(dev) (INTEL_DEVID(dev) == 0xa001)
-#define IS_PINEVIEW_M(dev) (INTEL_DEVID(dev) == 0xa011)
-#define IS_PINEVIEW(dev) (INTEL_INFO(dev)->is_pineview)
-#define IS_G33(dev) (INTEL_INFO(dev)->is_g33)
-#define IS_IRONLAKE_M(dev) (INTEL_DEVID(dev) == 0x0046)
-#define IS_IVYBRIDGE(dev) (INTEL_INFO(dev)->is_ivybridge)
-#define IS_IVB_GT1(dev) (INTEL_DEVID(dev) == 0x0156 || \
- INTEL_DEVID(dev) == 0x0152 || \
- INTEL_DEVID(dev) == 0x015a)
-#define IS_VALLEYVIEW(dev) (INTEL_INFO(dev)->is_valleyview)
-#define IS_CHERRYVIEW(dev) (INTEL_INFO(dev)->is_cherryview)
-#define IS_HASWELL(dev) (INTEL_INFO(dev)->is_haswell)
-#define IS_BROADWELL(dev) (INTEL_INFO(dev)->is_broadwell)
-#define IS_SKYLAKE(dev) (INTEL_INFO(dev)->is_skylake)
-#define IS_BROXTON(dev) (INTEL_INFO(dev)->is_broxton)
-#define IS_KABYLAKE(dev) (INTEL_INFO(dev)->is_kabylake)
-#define IS_MOBILE(dev) (INTEL_INFO(dev)->is_mobile)
-#define IS_HSW_EARLY_SDV(dev) (IS_HASWELL(dev) && \
- (INTEL_DEVID(dev) & 0xFF00) == 0x0C00)
-#define IS_BDW_ULT(dev) (IS_BROADWELL(dev) && \
- ((INTEL_DEVID(dev) & 0xf) == 0x6 || \
- (INTEL_DEVID(dev) & 0xf) == 0xb || \
- (INTEL_DEVID(dev) & 0xf) == 0xe))
+#define IS_I830(dev_priv) (INTEL_DEVID(dev_priv) == 0x3577)
+#define IS_845G(dev_priv) (INTEL_DEVID(dev_priv) == 0x2562)
+#define IS_I85X(dev_priv) ((dev_priv)->info.is_i85x)
+#define IS_I865G(dev_priv) (INTEL_DEVID(dev_priv) == 0x2572)
+#define IS_I915G(dev_priv) ((dev_priv)->info.is_i915g)
+#define IS_I915GM(dev_priv) (INTEL_DEVID(dev_priv) == 0x2592)
+#define IS_I945G(dev_priv) (INTEL_DEVID(dev_priv) == 0x2772)
+#define IS_I945GM(dev_priv) ((dev_priv)->info.is_i945gm)
+#define IS_BROADWATER(dev_priv) ((dev_priv)->info.is_broadwater)
+#define IS_CRESTLINE(dev_priv) ((dev_priv)->info.is_crestline)
+#define IS_GM45(dev_priv) (INTEL_DEVID(dev_priv) == 0x2A42)
+#define IS_G4X(dev_priv) ((dev_priv)->info.is_g4x)
+#define IS_PINEVIEW_G(dev_priv) (INTEL_DEVID(dev_priv) == 0xa001)
+#define IS_PINEVIEW_M(dev_priv) (INTEL_DEVID(dev_priv) == 0xa011)
+#define IS_PINEVIEW(dev_priv) ((dev_priv)->info.is_pineview)
+#define IS_G33(dev_priv) ((dev_priv)->info.is_g33)
+#define IS_IRONLAKE_M(dev_priv) (INTEL_DEVID(dev_priv) == 0x0046)
+#define IS_IVYBRIDGE(dev_priv) ((dev_priv)->info.is_ivybridge)
+#define IS_IVB_GT1(dev_priv) (INTEL_DEVID(dev_priv) == 0x0156 || \
+ INTEL_DEVID(dev_priv) == 0x0152 || \
+ INTEL_DEVID(dev_priv) == 0x015a)
+#define IS_VALLEYVIEW(dev_priv) ((dev_priv)->info.is_valleyview)
+#define IS_CHERRYVIEW(dev_priv) ((dev_priv)->info.is_cherryview)
+#define IS_HASWELL(dev_priv) ((dev_priv)->info.is_haswell)
+#define IS_BROADWELL(dev_priv) ((dev_priv)->info.is_broadwell)
+#define IS_SKYLAKE(dev_priv) ((dev_priv)->info.is_skylake)
+#define IS_BROXTON(dev_priv) ((dev_priv)->info.is_broxton)
+#define IS_KABYLAKE(dev_priv) ((dev_priv)->info.is_kabylake)
+#define IS_MOBILE(dev_priv) ((dev_priv)->info.is_mobile)
+#define IS_HSW_EARLY_SDV(dev_priv) (IS_HASWELL(dev_priv) && \
+ (INTEL_DEVID(dev_priv) & 0xFF00) == 0x0C00)
+#define IS_BDW_ULT(dev_priv) (IS_BROADWELL(dev_priv) && \
+ ((INTEL_DEVID(dev_priv) & 0xf) == 0x6 || \
+ (INTEL_DEVID(dev_priv) & 0xf) == 0xb || \
+ (INTEL_DEVID(dev_priv) & 0xf) == 0xe))
/* ULX machines are also considered ULT. */
-#define IS_BDW_ULX(dev) (IS_BROADWELL(dev) && \
- (INTEL_DEVID(dev) & 0xf) == 0xe)
-#define IS_BDW_GT3(dev) (IS_BROADWELL(dev) && \
- (INTEL_DEVID(dev) & 0x00F0) == 0x0020)
-#define IS_HSW_ULT(dev) (IS_HASWELL(dev) && \
- (INTEL_DEVID(dev) & 0xFF00) == 0x0A00)
-#define IS_HSW_GT3(dev) (IS_HASWELL(dev) && \
- (INTEL_DEVID(dev) & 0x00F0) == 0x0020)
+#define IS_BDW_ULX(dev_priv) (IS_BROADWELL(dev_priv) && \
+ (INTEL_DEVID(dev_priv) & 0xf) == 0xe)
+#define IS_BDW_GT3(dev_priv) (IS_BROADWELL(dev_priv) && \
+ (INTEL_DEVID(dev_priv) & 0x00F0) == 0x0020)
+#define IS_HSW_ULT(dev_priv) (IS_HASWELL(dev_priv) && \
+ (INTEL_DEVID(dev_priv) & 0xFF00) == 0x0A00)
+#define IS_HSW_GT3(dev_priv) (IS_HASWELL(dev_priv) && \
+ (INTEL_DEVID(dev_priv) & 0x00F0) == 0x0020)
/* ULX machines are also considered ULT. */
-#define IS_HSW_ULX(dev) (INTEL_DEVID(dev) == 0x0A0E || \
- INTEL_DEVID(dev) == 0x0A1E)
-#define IS_SKL_ULT(dev) (INTEL_DEVID(dev) == 0x1906 || \
- INTEL_DEVID(dev) == 0x1913 || \
- INTEL_DEVID(dev) == 0x1916 || \
- INTEL_DEVID(dev) == 0x1921 || \
- INTEL_DEVID(dev) == 0x1926)
-#define IS_SKL_ULX(dev) (INTEL_DEVID(dev) == 0x190E || \
- INTEL_DEVID(dev) == 0x1915 || \
- INTEL_DEVID(dev) == 0x191E)
-#define IS_KBL_ULT(dev) (INTEL_DEVID(dev) == 0x5906 || \
- INTEL_DEVID(dev) == 0x5913 || \
- INTEL_DEVID(dev) == 0x5916 || \
- INTEL_DEVID(dev) == 0x5921 || \
- INTEL_DEVID(dev) == 0x5926)
-#define IS_KBL_ULX(dev) (INTEL_DEVID(dev) == 0x590E || \
- INTEL_DEVID(dev) == 0x5915 || \
- INTEL_DEVID(dev) == 0x591E)
-#define IS_SKL_GT3(dev) (IS_SKYLAKE(dev) && \
- (INTEL_DEVID(dev) & 0x00F0) == 0x0020)
-#define IS_SKL_GT4(dev) (IS_SKYLAKE(dev) && \
- (INTEL_DEVID(dev) & 0x00F0) == 0x0030)
-
-#define IS_PRELIMINARY_HW(intel_info) ((intel_info)->is_preliminary)
+#define IS_HSW_ULX(dev_priv) (INTEL_DEVID(dev_priv) == 0x0A0E || \
+ INTEL_DEVID(dev_priv) == 0x0A1E)
+#define IS_SKL_ULT(dev_priv) (INTEL_DEVID(dev_priv) == 0x1906 || \
+ INTEL_DEVID(dev_priv) == 0x1913 || \
+ INTEL_DEVID(dev_priv) == 0x1916 || \
+ INTEL_DEVID(dev_priv) == 0x1921 || \
+ INTEL_DEVID(dev_priv) == 0x1926)
+#define IS_SKL_ULX(dev_priv) (INTEL_DEVID(dev_priv) == 0x190E || \
+ INTEL_DEVID(dev_priv) == 0x1915 || \
+ INTEL_DEVID(dev_priv) == 0x191E)
+#define IS_KBL_ULT(dev_priv) (INTEL_DEVID(dev_priv) == 0x5906 || \
+ INTEL_DEVID(dev_priv) == 0x5913 || \
+ INTEL_DEVID(dev_priv) == 0x5916 || \
+ INTEL_DEVID(dev_priv) == 0x5921 || \
+ INTEL_DEVID(dev_priv) == 0x5926)
+#define IS_KBL_ULX(dev_priv) (INTEL_DEVID(dev_priv) == 0x590E || \
+ INTEL_DEVID(dev_priv) == 0x5915 || \
+ INTEL_DEVID(dev_priv) == 0x591E)
+#define IS_SKL_GT3(dev_priv) (IS_SKYLAKE(dev_priv) && \
+ (INTEL_DEVID(dev_priv) & 0x00F0) == 0x0020)
+#define IS_SKL_GT4(dev_priv) (IS_SKYLAKE(dev_priv) && \
+ (INTEL_DEVID(dev_priv) & 0x00F0) == 0x0030)
+
+#define IS_ALPHA_SUPPORT(intel_info) ((intel_info)->is_alpha_support)
#define SKL_REVID_A0 0x0
#define SKL_REVID_B0 0x1
@@ -2705,7 +2504,8 @@ struct drm_i915_cmd_table {
#define BXT_REVID_B0 0x3
#define BXT_REVID_C0 0x9
-#define IS_BXT_REVID(p, since, until) (IS_BROXTON(p) && IS_REVID(p, since, until))
+#define IS_BXT_REVID(dev_priv, since, until) \
+ (IS_BROXTON(dev_priv) && IS_REVID(dev_priv, since, until))
#define KBL_REVID_A0 0x0
#define KBL_REVID_B0 0x1
@@ -2713,8 +2513,8 @@ struct drm_i915_cmd_table {
#define KBL_REVID_D0 0x3
#define KBL_REVID_E0 0x4
-#define IS_KBL_REVID(p, since, until) \
- (IS_KABYLAKE(p) && IS_REVID(p, since, until))
+#define IS_KBL_REVID(dev_priv, since, until) \
+ (IS_KABYLAKE(dev_priv) && IS_REVID(dev_priv, since, until))
/*
* The genX designation typically refers to the render engine, so render
@@ -2722,14 +2522,14 @@ struct drm_i915_cmd_table {
* have their own (e.g. HAS_PCH_SPLIT for ILK+ display, IS_foo for particular
* chips, etc.).
*/
-#define IS_GEN2(dev) (!!(INTEL_INFO(dev)->gen_mask & BIT(1)))
-#define IS_GEN3(dev) (!!(INTEL_INFO(dev)->gen_mask & BIT(2)))
-#define IS_GEN4(dev) (!!(INTEL_INFO(dev)->gen_mask & BIT(3)))
-#define IS_GEN5(dev) (!!(INTEL_INFO(dev)->gen_mask & BIT(4)))
-#define IS_GEN6(dev) (!!(INTEL_INFO(dev)->gen_mask & BIT(5)))
-#define IS_GEN7(dev) (!!(INTEL_INFO(dev)->gen_mask & BIT(6)))
-#define IS_GEN8(dev) (!!(INTEL_INFO(dev)->gen_mask & BIT(7)))
-#define IS_GEN9(dev) (!!(INTEL_INFO(dev)->gen_mask & BIT(8)))
+#define IS_GEN2(dev_priv) (!!((dev_priv)->info.gen_mask & BIT(1)))
+#define IS_GEN3(dev_priv) (!!((dev_priv)->info.gen_mask & BIT(2)))
+#define IS_GEN4(dev_priv) (!!((dev_priv)->info.gen_mask & BIT(3)))
+#define IS_GEN5(dev_priv) (!!((dev_priv)->info.gen_mask & BIT(4)))
+#define IS_GEN6(dev_priv) (!!((dev_priv)->info.gen_mask & BIT(5)))
+#define IS_GEN7(dev_priv) (!!((dev_priv)->info.gen_mask & BIT(6)))
+#define IS_GEN8(dev_priv) (!!((dev_priv)->info.gen_mask & BIT(7)))
+#define IS_GEN9(dev_priv) (!!((dev_priv)->info.gen_mask & BIT(8)))
#define ENGINE_MASK(id) BIT(id)
#define RENDER_RING ENGINE_MASK(RCS)
@@ -2740,31 +2540,34 @@ struct drm_i915_cmd_table {
#define ALL_ENGINES (~0)
#define HAS_ENGINE(dev_priv, id) \
- (!!(INTEL_INFO(dev_priv)->ring_mask & ENGINE_MASK(id)))
+ (!!((dev_priv)->info.ring_mask & ENGINE_MASK(id)))
#define HAS_BSD(dev_priv) HAS_ENGINE(dev_priv, VCS)
#define HAS_BSD2(dev_priv) HAS_ENGINE(dev_priv, VCS2)
#define HAS_BLT(dev_priv) HAS_ENGINE(dev_priv, BCS)
#define HAS_VEBOX(dev_priv) HAS_ENGINE(dev_priv, VECS)
-#define HAS_LLC(dev) (INTEL_INFO(dev)->has_llc)
-#define HAS_SNOOP(dev) (INTEL_INFO(dev)->has_snoop)
-#define HAS_EDRAM(dev) (!!(__I915__(dev)->edram_cap & EDRAM_ENABLED))
-#define HAS_WT(dev) ((IS_HASWELL(dev) || IS_BROADWELL(dev)) && \
- HAS_EDRAM(dev))
-#define HWS_NEEDS_PHYSICAL(dev) (INTEL_INFO(dev)->hws_needs_physical)
+#define HAS_LLC(dev_priv) ((dev_priv)->info.has_llc)
+#define HAS_SNOOP(dev_priv) ((dev_priv)->info.has_snoop)
+#define HAS_EDRAM(dev_priv) (!!((dev_priv)->edram_cap & EDRAM_ENABLED))
+#define HAS_WT(dev_priv) ((IS_HASWELL(dev_priv) || \
+ IS_BROADWELL(dev_priv)) && HAS_EDRAM(dev_priv))
+
+#define HWS_NEEDS_PHYSICAL(dev_priv) ((dev_priv)->info.hws_needs_physical)
-#define HAS_HW_CONTEXTS(dev) (INTEL_INFO(dev)->has_hw_contexts)
-#define HAS_LOGICAL_RING_CONTEXTS(dev) (INTEL_INFO(dev)->has_logical_ring_contexts)
-#define USES_PPGTT(dev) (i915.enable_ppgtt)
-#define USES_FULL_PPGTT(dev) (i915.enable_ppgtt >= 2)
-#define USES_FULL_48BIT_PPGTT(dev) (i915.enable_ppgtt == 3)
+#define HAS_HW_CONTEXTS(dev_priv) ((dev_priv)->info.has_hw_contexts)
+#define HAS_LOGICAL_RING_CONTEXTS(dev_priv) \
+ ((dev_priv)->info.has_logical_ring_contexts)
+#define USES_PPGTT(dev_priv) (i915.enable_ppgtt)
+#define USES_FULL_PPGTT(dev_priv) (i915.enable_ppgtt >= 2)
+#define USES_FULL_48BIT_PPGTT(dev_priv) (i915.enable_ppgtt == 3)
-#define HAS_OVERLAY(dev) (INTEL_INFO(dev)->has_overlay)
-#define OVERLAY_NEEDS_PHYSICAL(dev) (INTEL_INFO(dev)->overlay_needs_physical)
+#define HAS_OVERLAY(dev_priv) ((dev_priv)->info.has_overlay)
+#define OVERLAY_NEEDS_PHYSICAL(dev_priv) \
+ ((dev_priv)->info.overlay_needs_physical)
/* Early gen2 have a totally busted CS tlb and require pinned batches. */
-#define HAS_BROKEN_CS_TLB(dev) (IS_I830(dev) || IS_845G(dev))
+#define HAS_BROKEN_CS_TLB(dev_priv) (IS_I830(dev_priv) || IS_845G(dev_priv))
/* WaRsDisableCoarsePowerGating:skl,bxt */
#define NEEDS_WaRsDisableCoarsePowerGating(dev_priv) \
@@ -2778,46 +2581,49 @@ struct drm_i915_cmd_table {
* legacy irq no. is shared with another device. The kernel then disables that
* interrupt source and so prevents the other device from working properly.
*/
-#define HAS_AUX_IRQ(dev) (INTEL_INFO(dev)->gen >= 5)
-#define HAS_GMBUS_IRQ(dev) (INTEL_INFO(dev)->has_gmbus_irq)
+#define HAS_AUX_IRQ(dev_priv) ((dev_priv)->info.gen >= 5)
+#define HAS_GMBUS_IRQ(dev_priv) ((dev_priv)->info.has_gmbus_irq)
/* With the 945 and later, Y tiling got adjusted so that it was 32 128-byte
* rows, which changed the alignment requirements and fence programming.
*/
-#define HAS_128_BYTE_Y_TILING(dev) (!IS_GEN2(dev) && !(IS_I915G(dev) || \
- IS_I915GM(dev)))
-#define SUPPORTS_TV(dev) (INTEL_INFO(dev)->supports_tv)
-#define I915_HAS_HOTPLUG(dev) (INTEL_INFO(dev)->has_hotplug)
+#define HAS_128_BYTE_Y_TILING(dev_priv) (!IS_GEN2(dev_priv) && \
+ !(IS_I915G(dev_priv) || \
+ IS_I915GM(dev_priv)))
+#define SUPPORTS_TV(dev_priv) ((dev_priv)->info.supports_tv)
+#define I915_HAS_HOTPLUG(dev_priv) ((dev_priv)->info.has_hotplug)
-#define HAS_FW_BLC(dev) (INTEL_INFO(dev)->gen > 2)
-#define HAS_PIPE_CXSR(dev) (INTEL_INFO(dev)->has_pipe_cxsr)
-#define HAS_FBC(dev) (INTEL_INFO(dev)->has_fbc)
+#define HAS_FW_BLC(dev_priv) (INTEL_GEN(dev_priv) > 2)
+#define HAS_PIPE_CXSR(dev_priv) ((dev_priv)->info.has_pipe_cxsr)
+#define HAS_FBC(dev_priv) ((dev_priv)->info.has_fbc)
-#define HAS_IPS(dev) (IS_HSW_ULT(dev) || IS_BROADWELL(dev))
+#define HAS_IPS(dev_priv) (IS_HSW_ULT(dev_priv) || IS_BROADWELL(dev_priv))
-#define HAS_DP_MST(dev) (INTEL_INFO(dev)->has_dp_mst)
+#define HAS_DP_MST(dev_priv) ((dev_priv)->info.has_dp_mst)
-#define HAS_DDI(dev) (INTEL_INFO(dev)->has_ddi)
-#define HAS_FPGA_DBG_UNCLAIMED(dev) (INTEL_INFO(dev)->has_fpga_dbg)
-#define HAS_PSR(dev) (INTEL_INFO(dev)->has_psr)
-#define HAS_RUNTIME_PM(dev) (INTEL_INFO(dev)->has_runtime_pm)
-#define HAS_RC6(dev) (INTEL_INFO(dev)->has_rc6)
-#define HAS_RC6p(dev) (INTEL_INFO(dev)->has_rc6p)
+#define HAS_DDI(dev_priv) ((dev_priv)->info.has_ddi)
+#define HAS_FPGA_DBG_UNCLAIMED(dev_priv) ((dev_priv)->info.has_fpga_dbg)
+#define HAS_PSR(dev_priv) ((dev_priv)->info.has_psr)
+#define HAS_RC6(dev_priv) ((dev_priv)->info.has_rc6)
+#define HAS_RC6p(dev_priv) ((dev_priv)->info.has_rc6p)
-#define HAS_CSR(dev) (INTEL_INFO(dev)->has_csr)
+#define HAS_CSR(dev_priv) ((dev_priv)->info.has_csr)
+
+#define HAS_RUNTIME_PM(dev_priv) ((dev_priv)->info.has_runtime_pm)
+#define HAS_64BIT_RELOC(dev_priv) ((dev_priv)->info.has_64bit_reloc)
/*
* For now, anything with a GuC requires uCode loading, and then supports
* command submission once loaded. But these are logically independent
* properties, so we have separate macros to test them.
*/
-#define HAS_GUC(dev) (INTEL_INFO(dev)->has_guc)
-#define HAS_GUC_UCODE(dev) (HAS_GUC(dev))
-#define HAS_GUC_SCHED(dev) (HAS_GUC(dev))
+#define HAS_GUC(dev_priv) ((dev_priv)->info.has_guc)
+#define HAS_GUC_UCODE(dev_priv) (HAS_GUC(dev_priv))
+#define HAS_GUC_SCHED(dev_priv) (HAS_GUC(dev_priv))
-#define HAS_RESOURCE_STREAMER(dev) (INTEL_INFO(dev)->has_resource_streamer)
+#define HAS_RESOURCE_STREAMER(dev_priv) ((dev_priv)->info.has_resource_streamer)
-#define HAS_POOLED_EU(dev) (INTEL_INFO(dev)->has_pooled_eu)
+#define HAS_POOLED_EU(dev_priv) ((dev_priv)->info.has_pooled_eu)
#define INTEL_PCH_DEVICE_ID_MASK 0xff00
#define INTEL_PCH_IBX_DEVICE_ID_TYPE 0x3b00
@@ -2832,26 +2638,33 @@ struct drm_i915_cmd_table {
#define INTEL_PCH_P3X_DEVICE_ID_TYPE 0x7000
#define INTEL_PCH_QEMU_DEVICE_ID_TYPE 0x2900 /* qemu q35 has 2918 */
-#define INTEL_PCH_TYPE(dev) (__I915__(dev)->pch_type)
-#define HAS_PCH_KBP(dev) (INTEL_PCH_TYPE(dev) == PCH_KBP)
-#define HAS_PCH_SPT(dev) (INTEL_PCH_TYPE(dev) == PCH_SPT)
-#define HAS_PCH_LPT(dev) (INTEL_PCH_TYPE(dev) == PCH_LPT)
-#define HAS_PCH_LPT_LP(dev) (__I915__(dev)->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE)
-#define HAS_PCH_LPT_H(dev) (__I915__(dev)->pch_id == INTEL_PCH_LPT_DEVICE_ID_TYPE)
-#define HAS_PCH_CPT(dev) (INTEL_PCH_TYPE(dev) == PCH_CPT)
-#define HAS_PCH_IBX(dev) (INTEL_PCH_TYPE(dev) == PCH_IBX)
-#define HAS_PCH_NOP(dev) (INTEL_PCH_TYPE(dev) == PCH_NOP)
-#define HAS_PCH_SPLIT(dev) (INTEL_PCH_TYPE(dev) != PCH_NONE)
+#define INTEL_PCH_TYPE(dev_priv) ((dev_priv)->pch_type)
+#define HAS_PCH_KBP(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_KBP)
+#define HAS_PCH_SPT(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_SPT)
+#define HAS_PCH_LPT(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_LPT)
+#define HAS_PCH_LPT_LP(dev_priv) \
+ ((dev_priv)->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE)
+#define HAS_PCH_LPT_H(dev_priv) \
+ ((dev_priv)->pch_id == INTEL_PCH_LPT_DEVICE_ID_TYPE)
+#define HAS_PCH_CPT(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_CPT)
+#define HAS_PCH_IBX(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_IBX)
+#define HAS_PCH_NOP(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_NOP)
+#define HAS_PCH_SPLIT(dev_priv) (INTEL_PCH_TYPE(dev_priv) != PCH_NONE)
+
+#define HAS_GMCH_DISPLAY(dev_priv) ((dev_priv)->info.has_gmch_display)
-#define HAS_GMCH_DISPLAY(dev) (INTEL_INFO(dev)->has_gmch_display)
+#define HAS_LSPCON(dev_priv) (IS_GEN9(dev_priv))
/* DPF == dynamic parity feature */
-#define HAS_L3_DPF(dev) (INTEL_INFO(dev)->has_l3_dpf)
-#define NUM_L3_SLICES(dev) (IS_HSW_GT3(dev) ? 2 : HAS_L3_DPF(dev))
+#define HAS_L3_DPF(dev_priv) ((dev_priv)->info.has_l3_dpf)
+#define NUM_L3_SLICES(dev_priv) (IS_HSW_GT3(dev_priv) ? \
+ 2 : HAS_L3_DPF(dev_priv))
#define GT_FREQUENCY_MULTIPLIER 50
#define GEN9_FREQ_SCALER 3
+#define HAS_DECOUPLED_MMIO(dev_priv) (INTEL_INFO(dev_priv)->has_decoupled_mmio)
+
#include "i915_trace.h"
static inline bool intel_scanout_needs_vtd_wa(struct drm_i915_private *dev_priv)
@@ -2882,12 +2695,20 @@ __i915_printk(struct drm_i915_private *dev_priv, const char *level,
#ifdef CONFIG_COMPAT
extern long i915_compat_ioctl(struct file *filp, unsigned int cmd,
unsigned long arg);
+#else
+#define i915_compat_ioctl NULL
#endif
+extern const struct dev_pm_ops i915_pm_ops;
+
+extern int i915_driver_load(struct pci_dev *pdev,
+ const struct pci_device_id *ent);
+extern void i915_driver_unload(struct drm_device *dev);
extern int intel_gpu_reset(struct drm_i915_private *dev_priv, u32 engine_mask);
extern bool intel_has_gpu_reset(struct drm_i915_private *dev_priv);
extern void i915_reset(struct drm_i915_private *dev_priv);
extern int intel_guc_reset(struct drm_i915_private *dev_priv);
extern void intel_engine_init_hangcheck(struct intel_engine_cs *engine);
+extern void intel_hangcheck_init(struct drm_i915_private *dev_priv);
extern unsigned long i915_chipset_val(struct drm_i915_private *dev_priv);
extern unsigned long i915_mch_val(struct drm_i915_private *dev_priv);
extern unsigned long i915_gfx_val(struct drm_i915_private *dev_priv);
@@ -2969,7 +2790,7 @@ int intel_wait_for_register_fw(struct drm_i915_private *dev_priv,
static inline bool intel_gvt_active(struct drm_i915_private *dev_priv)
{
- return dev_priv->gvt.initialized;
+ return dev_priv->gvt;
}
static inline bool intel_vgpu_active(struct drm_i915_private *dev_priv)
@@ -3071,7 +2892,7 @@ int i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
int i915_gem_wait_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
-void i915_gem_load_init(struct drm_device *dev);
+int i915_gem_load_init(struct drm_device *dev);
void i915_gem_load_cleanup(struct drm_device *dev);
void i915_gem_load_init_fences(struct drm_i915_private *dev_priv);
int i915_gem_freeze(struct drm_i915_private *dev_priv);
@@ -3082,7 +2903,7 @@ void i915_gem_object_free(struct drm_i915_gem_object *obj);
void i915_gem_object_init(struct drm_i915_gem_object *obj,
const struct drm_i915_gem_object_ops *ops);
struct drm_i915_gem_object *i915_gem_object_create(struct drm_device *dev,
- size_t size);
+ u64 size);
struct drm_i915_gem_object *i915_gem_object_create_from_data(
struct drm_device *dev, const void *data, size_t size);
void i915_gem_close_object(struct drm_gem_object *gem, struct drm_file *file);
@@ -3095,77 +2916,86 @@ i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj,
u64 alignment,
u64 flags);
-int i915_vma_bind(struct i915_vma *vma, enum i915_cache_level cache_level,
- u32 flags);
-void __i915_vma_set_map_and_fenceable(struct i915_vma *vma);
-int __must_check i915_vma_unbind(struct i915_vma *vma);
-void i915_vma_close(struct i915_vma *vma);
-void i915_vma_destroy(struct i915_vma *vma);
-
int i915_gem_object_unbind(struct drm_i915_gem_object *obj);
-int i915_gem_object_put_pages(struct drm_i915_gem_object *obj);
-void i915_gem_release_all_mmaps(struct drm_i915_private *dev_priv);
void i915_gem_release_mmap(struct drm_i915_gem_object *obj);
-int __must_check i915_gem_object_get_pages(struct drm_i915_gem_object *obj);
+void i915_gem_runtime_suspend(struct drm_i915_private *dev_priv);
-static inline int __sg_page_count(struct scatterlist *sg)
+static inline int __sg_page_count(const struct scatterlist *sg)
{
return sg->length >> PAGE_SHIFT;
}
+struct scatterlist *
+i915_gem_object_get_sg(struct drm_i915_gem_object *obj,
+ unsigned int n, unsigned int *offset);
+
+struct page *
+i915_gem_object_get_page(struct drm_i915_gem_object *obj,
+ unsigned int n);
+
struct page *
-i915_gem_object_get_dirty_page(struct drm_i915_gem_object *obj, int n);
+i915_gem_object_get_dirty_page(struct drm_i915_gem_object *obj,
+ unsigned int n);
-static inline dma_addr_t
-i915_gem_object_get_dma_address(struct drm_i915_gem_object *obj, int n)
+dma_addr_t
+i915_gem_object_get_dma_address(struct drm_i915_gem_object *obj,
+ unsigned long n);
+
+void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj,
+ struct sg_table *pages);
+int __i915_gem_object_get_pages(struct drm_i915_gem_object *obj);
+
+static inline int __must_check
+i915_gem_object_pin_pages(struct drm_i915_gem_object *obj)
{
- if (n < obj->get_page.last) {
- obj->get_page.sg = obj->pages->sgl;
- obj->get_page.last = 0;
- }
+ might_lock(&obj->mm.lock);
- while (obj->get_page.last + __sg_page_count(obj->get_page.sg) <= n) {
- obj->get_page.last += __sg_page_count(obj->get_page.sg++);
- if (unlikely(sg_is_chain(obj->get_page.sg)))
- obj->get_page.sg = sg_chain_ptr(obj->get_page.sg);
- }
+ if (atomic_inc_not_zero(&obj->mm.pages_pin_count))
+ return 0;
- return sg_dma_address(obj->get_page.sg) + ((n - obj->get_page.last) << PAGE_SHIFT);
+ return __i915_gem_object_get_pages(obj);
}
-static inline struct page *
-i915_gem_object_get_page(struct drm_i915_gem_object *obj, int n)
+static inline void
+__i915_gem_object_pin_pages(struct drm_i915_gem_object *obj)
{
- if (WARN_ON(n >= obj->base.size >> PAGE_SHIFT))
- return NULL;
-
- if (n < obj->get_page.last) {
- obj->get_page.sg = obj->pages->sgl;
- obj->get_page.last = 0;
- }
+ GEM_BUG_ON(!obj->mm.pages);
- while (obj->get_page.last + __sg_page_count(obj->get_page.sg) <= n) {
- obj->get_page.last += __sg_page_count(obj->get_page.sg++);
- if (unlikely(sg_is_chain(obj->get_page.sg)))
- obj->get_page.sg = sg_chain_ptr(obj->get_page.sg);
- }
+ atomic_inc(&obj->mm.pages_pin_count);
+}
- return nth_page(sg_page(obj->get_page.sg), n - obj->get_page.last);
+static inline bool
+i915_gem_object_has_pinned_pages(struct drm_i915_gem_object *obj)
+{
+ return atomic_read(&obj->mm.pages_pin_count);
}
-static inline void i915_gem_object_pin_pages(struct drm_i915_gem_object *obj)
+static inline void
+__i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj)
{
- BUG_ON(obj->pages == NULL);
- obj->pages_pin_count++;
+ GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
+ GEM_BUG_ON(!obj->mm.pages);
+
+ atomic_dec(&obj->mm.pages_pin_count);
+ GEM_BUG_ON(atomic_read(&obj->mm.pages_pin_count) < obj->bind_count);
}
-static inline void i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj)
+static inline void
+i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj)
{
- BUG_ON(obj->pages_pin_count == 0);
- obj->pages_pin_count--;
+ __i915_gem_object_unpin_pages(obj);
}
+enum i915_mm_subclass { /* lockdep subclass for obj->mm.lock */
+ I915_MM_NORMAL = 0,
+ I915_MM_SHRINKER
+};
+
+void __i915_gem_object_put_pages(struct drm_i915_gem_object *obj,
+ enum i915_mm_subclass subclass);
+void __i915_gem_object_invalidate(struct drm_i915_gem_object *obj);
+
enum i915_map_type {
I915_MAP_WB = 0,
I915_MAP_WC,
@@ -3181,8 +3011,8 @@ enum i915_map_type {
* the kernel address space. Based on the @type of mapping, the PTE will be
* set to either WriteBack or WriteCombine (via pgprot_t).
*
- * The caller must hold the struct_mutex, and is responsible for calling
- * i915_gem_object_unpin_map() when the mapping is no longer required.
+ * The caller is responsible for calling i915_gem_object_unpin_map() when the
+ * mapping is no longer required.
*
* Returns the pointer through which to access the mapped object, or an
* ERR_PTR() on error.
@@ -3198,12 +3028,9 @@ void *__must_check i915_gem_object_pin_map(struct drm_i915_gem_object *obj,
* with your access, call i915_gem_object_unpin_map() to release the pin
* upon the mapping. Once the pin count reaches zero, that mapping may be
* removed.
- *
- * The caller must hold the struct_mutex.
*/
static inline void i915_gem_object_unpin_map(struct drm_i915_gem_object *obj)
{
- lockdep_assert_held(&obj->base.dev->struct_mutex);
i915_gem_object_unpin_pages(obj);
}
@@ -3236,7 +3063,7 @@ void i915_gem_track_fb(struct drm_i915_gem_object *old,
struct drm_i915_gem_object *new,
unsigned frontbuffer_bits);
-int __must_check i915_gem_set_seqno(struct drm_device *dev, u32 seqno);
+int __must_check i915_gem_set_global_seqno(struct drm_device *dev, u32 seqno);
struct drm_i915_gem_request *
i915_gem_find_active_request(struct intel_engine_cs *engine);
@@ -3265,19 +3092,25 @@ static inline u32 i915_reset_count(struct i915_gpu_error *error)
void i915_gem_reset(struct drm_i915_private *dev_priv);
void i915_gem_set_wedged(struct drm_i915_private *dev_priv);
-bool i915_gem_clflush_object(struct drm_i915_gem_object *obj, bool force);
+void i915_gem_clflush_object(struct drm_i915_gem_object *obj, bool force);
int __must_check i915_gem_init(struct drm_device *dev);
int __must_check i915_gem_init_hw(struct drm_device *dev);
-void i915_gem_init_swizzling(struct drm_device *dev);
+void i915_gem_init_swizzling(struct drm_i915_private *dev_priv);
void i915_gem_cleanup_engines(struct drm_device *dev);
int __must_check i915_gem_wait_for_idle(struct drm_i915_private *dev_priv,
unsigned int flags);
int __must_check i915_gem_suspend(struct drm_device *dev);
void i915_gem_resume(struct drm_device *dev);
int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
-int __must_check
-i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
- bool readonly);
+int i915_gem_object_wait(struct drm_i915_gem_object *obj,
+ unsigned int flags,
+ long timeout,
+ struct intel_rps_client *rps);
+int i915_gem_object_wait_priority(struct drm_i915_gem_object *obj,
+ unsigned int flags,
+ int priority);
+#define I915_PRIORITY_DISPLAY I915_PRIORITY_MAX
+
int __must_check
i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj,
bool write);
@@ -3337,57 +3170,17 @@ i915_gem_object_ggtt_offset(struct drm_i915_gem_object *o,
return i915_ggtt_offset(i915_gem_object_to_ggtt(o, view));
}
-/* i915_gem_fence.c */
+/* i915_gem_fence_reg.c */
int __must_check i915_vma_get_fence(struct i915_vma *vma);
int __must_check i915_vma_put_fence(struct i915_vma *vma);
-/**
- * i915_vma_pin_fence - pin fencing state
- * @vma: vma to pin fencing for
- *
- * This pins the fencing state (whether tiled or untiled) to make sure the
- * vma (and its object) is ready to be used as a scanout target. Fencing
- * status must be synchronize first by calling i915_vma_get_fence():
- *
- * The resulting fence pin reference must be released again with
- * i915_vma_unpin_fence().
- *
- * Returns:
- *
- * True if the vma has a fence, false otherwise.
- */
-static inline bool
-i915_vma_pin_fence(struct i915_vma *vma)
-{
- if (vma->fence) {
- vma->fence->pin_count++;
- return true;
- } else
- return false;
-}
-
-/**
- * i915_vma_unpin_fence - unpin fencing state
- * @vma: vma to unpin fencing for
- *
- * This releases the fence pin reference acquired through
- * i915_vma_pin_fence. It will handle both objects with and without an
- * attached fence correctly, callers do not need to distinguish this.
- */
-static inline void
-i915_vma_unpin_fence(struct i915_vma *vma)
-{
- if (vma->fence) {
- GEM_BUG_ON(vma->fence->pin_count <= 0);
- vma->fence->pin_count--;
- }
-}
-
-void i915_gem_restore_fences(struct drm_device *dev);
+void i915_gem_restore_fences(struct drm_i915_private *dev_priv);
-void i915_gem_detect_bit_6_swizzle(struct drm_device *dev);
-void i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj);
-void i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj);
+void i915_gem_detect_bit_6_swizzle(struct drm_i915_private *dev_priv);
+void i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj,
+ struct sg_table *pages);
+void i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj,
+ struct sg_table *pages);
/* i915_gem_context.c */
int __must_check i915_gem_context_init(struct drm_device *dev);
@@ -3397,6 +3190,9 @@ int i915_gem_context_open(struct drm_device *dev, struct drm_file *file);
void i915_gem_context_close(struct drm_device *dev, struct drm_file *file);
int i915_switch_context(struct drm_i915_gem_request *req);
int i915_gem_switch_to_kernel_context(struct drm_i915_private *dev_priv);
+struct i915_vma *
+i915_gem_context_pin_legacy(struct i915_gem_context *ctx,
+ unsigned int flags);
void i915_gem_context_free(struct kref *ctx_ref);
struct drm_i915_gem_object *
i915_gem_alloc_context_obj(struct drm_device *dev, size_t size);
@@ -3430,6 +3226,16 @@ static inline void i915_gem_context_put(struct i915_gem_context *ctx)
kref_put(&ctx->ref, i915_gem_context_free);
}
+static inline struct intel_timeline *
+i915_gem_context_lookup_timeline(struct i915_gem_context *ctx,
+ struct intel_engine_cs *engine)
+{
+ struct i915_address_space *vm;
+
+ vm = ctx->ppgtt ? &ctx->ppgtt->base : &ctx->i915->ggtt.base;
+ return &vm->timeline.engine[engine->id];
+}
+
static inline bool i915_gem_context_is_default(const struct i915_gem_context *c)
{
return c->user_handle == DEFAULT_CONTEXT_HANDLE;
@@ -3473,7 +3279,7 @@ int i915_gem_stolen_insert_node_in_range(struct drm_i915_private *dev_priv,
u64 end);
void i915_gem_stolen_remove_node(struct drm_i915_private *dev_priv,
struct drm_mm_node *node);
-int i915_gem_init_stolen(struct drm_device *dev);
+int i915_gem_init_stolen(struct drm_i915_private *dev_priv);
void i915_gem_cleanup_stolen(struct drm_device *dev);
struct drm_i915_gem_object *
i915_gem_object_create_stolen(struct drm_device *dev, u32 size);
@@ -3483,6 +3289,11 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev,
u32 gtt_offset,
u32 size);
+/* i915_gem_internal.c */
+struct drm_i915_gem_object *
+i915_gem_object_create_internal(struct drm_i915_private *dev_priv,
+ unsigned int size);
+
/* i915_gem_shrinker.c */
unsigned long i915_gem_shrink(struct drm_i915_private *dev_priv,
unsigned long target,
@@ -3521,6 +3332,8 @@ static inline void intel_display_crc_init(struct drm_i915_private *dev_priv) {}
#endif
/* i915_gpu_error.c */
+#if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
+
__printf(2, 3)
void i915_error_printf(struct drm_i915_error_state_buf *e, const char *f, ...);
int i915_error_state_to_str(struct drm_i915_error_state_buf *estr,
@@ -3541,7 +3354,20 @@ void i915_error_state_get(struct drm_device *dev,
void i915_error_state_put(struct i915_error_state_file_priv *error_priv);
void i915_destroy_error_state(struct drm_device *dev);
-void i915_get_extra_instdone(struct drm_i915_private *dev_priv, uint32_t *instdone);
+#else
+
+static inline void i915_capture_error_state(struct drm_i915_private *dev_priv,
+ u32 engine_mask,
+ const char *error_msg)
+{
+}
+
+static inline void i915_destroy_error_state(struct drm_device *dev)
+{
+}
+
+#endif
+
const char *i915_cache_level_str(struct drm_i915_private *i915, int type);
/* i915_cmd_parser.c */
@@ -3591,6 +3417,9 @@ bool intel_bios_is_port_dp_dual_mode(struct drm_i915_private *dev_priv, enum por
bool intel_bios_is_dsi_present(struct drm_i915_private *dev_priv, enum port *port);
bool intel_bios_is_port_hpd_inverted(struct drm_i915_private *dev_priv,
enum port port);
+bool intel_bios_is_lspcon_present(struct drm_i915_private *dev_priv,
+ enum port port);
+
/* intel_opregion.c */
#ifdef CONFIG_ACPI
@@ -3647,15 +3476,16 @@ void intel_device_info_dump(struct drm_i915_private *dev_priv);
/* modesetting */
extern void intel_modeset_init_hw(struct drm_device *dev);
-extern void intel_modeset_init(struct drm_device *dev);
+extern int intel_modeset_init(struct drm_device *dev);
extern void intel_modeset_gem_init(struct drm_device *dev);
extern void intel_modeset_cleanup(struct drm_device *dev);
extern int intel_connector_register(struct drm_connector *);
extern void intel_connector_unregister(struct drm_connector *);
-extern int intel_modeset_vga_set_state(struct drm_device *dev, bool state);
+extern int intel_modeset_vga_set_state(struct drm_i915_private *dev_priv,
+ bool state);
extern void intel_display_resume(struct drm_device *dev);
-extern void i915_redisable_vga(struct drm_device *dev);
-extern void i915_redisable_vga_power_on(struct drm_device *dev);
+extern void i915_redisable_vga(struct drm_i915_private *dev_priv);
+extern void i915_redisable_vga_power_on(struct drm_i915_private *dev_priv);
extern bool ironlake_set_drps(struct drm_i915_private *dev_priv, u8 val);
extern void intel_init_pch_refclk(struct drm_device *dev);
extern void intel_set_rps(struct drm_i915_private *dev_priv, u8 val);
@@ -3674,7 +3504,7 @@ extern void intel_overlay_print_error_state(struct drm_i915_error_state_buf *e,
extern struct intel_display_error_state *
intel_display_capture_error_state(struct drm_i915_private *dev_priv);
extern void intel_display_print_error_state(struct drm_i915_error_state_buf *e,
- struct drm_device *dev,
+ struct drm_i915_private *dev_priv,
struct intel_display_error_state *error);
int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u32 mbox, u32 *val);
@@ -3702,6 +3532,23 @@ u32 vlv_flisdsi_read(struct drm_i915_private *dev_priv, u32 reg);
void vlv_flisdsi_write(struct drm_i915_private *dev_priv, u32 reg, u32 val);
/* intel_dpio_phy.c */
+void bxt_port_to_phy_channel(enum port port,
+ enum dpio_phy *phy, enum dpio_channel *ch);
+void bxt_ddi_phy_set_signal_level(struct drm_i915_private *dev_priv,
+ enum port port, u32 margin, u32 scale,
+ u32 enable, u32 deemphasis);
+void bxt_ddi_phy_init(struct drm_i915_private *dev_priv, enum dpio_phy phy);
+void bxt_ddi_phy_uninit(struct drm_i915_private *dev_priv, enum dpio_phy phy);
+bool bxt_ddi_phy_is_enabled(struct drm_i915_private *dev_priv,
+ enum dpio_phy phy);
+bool bxt_ddi_phy_verify_state(struct drm_i915_private *dev_priv,
+ enum dpio_phy phy);
+uint8_t bxt_ddi_phy_calc_lane_lat_optim_mask(struct intel_encoder *encoder,
+ uint8_t lane_count);
+void bxt_ddi_phy_set_lane_optim_mask(struct intel_encoder *encoder,
+ uint8_t lane_lat_optim_mask);
+uint8_t bxt_ddi_phy_get_lane_lat_optim_mask(struct intel_encoder *encoder);
+
void chv_set_phy_signal_level(struct intel_encoder *encoder,
u32 deemph_reg_value, u32 margin_reg_value,
bool uniq_trans_scale);
@@ -3791,11 +3638,30 @@ __raw_write(64, q)
#undef __raw_write
/* These are untraced mmio-accessors that are only valid to be used inside
- * critical sections inside IRQ handlers where forcewake is explicitly
+ * critical sections, such as inside IRQ handlers, where forcewake is explicitly
* controlled.
+ *
* Think twice, and think again, before using these.
- * Note: Should only be used between intel_uncore_forcewake_irqlock() and
- * intel_uncore_forcewake_irqunlock().
+ *
+ * As an example, these accessors can possibly be used between:
+ *
+ * spin_lock_irq(&dev_priv->uncore.lock);
+ * intel_uncore_forcewake_get__locked();
+ *
+ * and
+ *
+ * intel_uncore_forcewake_put__locked();
+ * spin_unlock_irq(&dev_priv->uncore.lock);
+ *
+ *
+ * Note: some registers may not need forcewake held, so
+ * intel_uncore_forcewake_{get,put} can be omitted, see
+ * intel_uncore_forcewake_for_reg().
+ *
+ * Certain architectures will die if the same cacheline is concurrently accessed
+ * by different clients (e.g. on Ivybridge). Access to registers should
+ * therefore generally be serialised, by either the dev_priv->uncore.lock or
+ * a more localised lock guarding all access to that bank of registers.
*/
#define I915_READ_FW(reg__) __raw_i915_read32(dev_priv, (reg__))
#define I915_WRITE_FW(reg__, val__) __raw_i915_write32(dev_priv, (reg__), (val__))
@@ -3807,11 +3673,11 @@ __raw_write(64, q)
#define INTEL_BROADCAST_RGB_FULL 1
#define INTEL_BROADCAST_RGB_LIMITED 2
-static inline i915_reg_t i915_vgacntrl_reg(struct drm_device *dev)
+static inline i915_reg_t i915_vgacntrl_reg(struct drm_i915_private *dev_priv)
{
- if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
+ if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
return VLV_VGACNTRL;
- else if (INTEL_INFO(dev)->gen >= 5)
+ else if (INTEL_GEN(dev_priv) >= 5)
return CPU_VGACNTRL;
else
return VGACNTRL;
@@ -3872,7 +3738,7 @@ __i915_request_irq_complete(struct drm_i915_gem_request *req)
/* Before we do the heavier coherent read of the seqno,
* check the value (hopefully) in the CPU cacheline.
*/
- if (i915_gem_request_completed(req))
+ if (__i915_gem_request_completed(req))
return true;
/* Ensure our read of the seqno is coherent so that we
@@ -3923,7 +3789,7 @@ __i915_request_irq_complete(struct drm_i915_gem_request *req)
wake_up_process(tsk);
rcu_read_unlock();
- if (i915_gem_request_completed(req))
+ if (__i915_gem_request_completed(req))
return true;
}
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 947e82c2b175..902fa427c196 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -29,12 +29,12 @@
#include <drm/drm_vma_manager.h>
#include <drm/i915_drm.h>
#include "i915_drv.h"
-#include "i915_gem_dmabuf.h"
#include "i915_vgpu.h"
#include "i915_trace.h"
#include "intel_drv.h"
#include "intel_frontbuffer.h"
#include "intel_mocs.h"
+#include <linux/dma-fence-array.h>
#include <linux/reservation.h>
#include <linux/shmem_fs.h>
#include <linux/slab.h>
@@ -42,13 +42,14 @@
#include <linux/pci.h>
#include <linux/dma-buf.h>
+static void i915_gem_flush_free_objects(struct drm_i915_private *i915);
static void i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj);
static void i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj);
static bool cpu_cache_is_coherent(struct drm_device *dev,
enum i915_cache_level level)
{
- return HAS_LLC(dev) || level != I915_CACHE_NONE;
+ return HAS_LLC(to_i915(dev)) || level != I915_CACHE_NONE;
}
static bool cpu_write_needs_clflush(struct drm_i915_gem_object *obj)
@@ -63,13 +64,13 @@ static bool cpu_write_needs_clflush(struct drm_i915_gem_object *obj)
}
static int
-insert_mappable_node(struct drm_i915_private *i915,
+insert_mappable_node(struct i915_ggtt *ggtt,
struct drm_mm_node *node, u32 size)
{
memset(node, 0, sizeof(*node));
- return drm_mm_insert_node_in_range_generic(&i915->ggtt.base.mm, node,
- size, 0, 0, 0,
- i915->ggtt.mappable_end,
+ return drm_mm_insert_node_in_range_generic(&ggtt->base.mm, node,
+ size, 0, -1,
+ 0, ggtt->mappable_end,
DRM_MM_SEARCH_DEFAULT,
DRM_MM_CREATE_DEFAULT);
}
@@ -82,7 +83,7 @@ remove_mappable_node(struct drm_mm_node *node)
/* some bookkeeping */
static void i915_gem_info_add_obj(struct drm_i915_private *dev_priv,
- size_t size)
+ u64 size)
{
spin_lock(&dev_priv->mm.object_stat_lock);
dev_priv->mm.object_count++;
@@ -91,7 +92,7 @@ static void i915_gem_info_add_obj(struct drm_i915_private *dev_priv,
}
static void i915_gem_info_remove_obj(struct drm_i915_private *dev_priv,
- size_t size)
+ u64 size)
{
spin_lock(&dev_priv->mm.object_stat_lock);
dev_priv->mm.object_count--;
@@ -104,6 +105,8 @@ i915_gem_wait_for_error(struct i915_gpu_error *error)
{
int ret;
+ might_sleep();
+
if (!i915_reset_in_progress(error))
return 0;
@@ -114,7 +117,7 @@ i915_gem_wait_for_error(struct i915_gpu_error *error)
*/
ret = wait_event_interruptible_timeout(error->reset_queue,
!i915_reset_in_progress(error),
- 10*HZ);
+ I915_RESET_TIMEOUT);
if (ret == 0) {
DRM_ERROR("Timed out waiting for the gpu reset to complete\n");
return -EIO;
@@ -167,7 +170,7 @@ i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
return 0;
}
-static int
+static struct sg_table *
i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj)
{
struct address_space *mapping = obj->base.filp->f_mapping;
@@ -177,7 +180,7 @@ i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj)
int i;
if (WARN_ON(i915_gem_object_needs_bit17_swizzle(obj)))
- return -EINVAL;
+ return ERR_PTR(-EINVAL);
for (i = 0; i < obj->base.size / PAGE_SIZE; i++) {
struct page *page;
@@ -185,7 +188,7 @@ i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj)
page = shmem_read_mapping_page(mapping, i);
if (IS_ERR(page))
- return PTR_ERR(page);
+ return ERR_CAST(page);
src = kmap_atomic(page);
memcpy(vaddr, src, PAGE_SIZE);
@@ -200,11 +203,11 @@ i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj)
st = kmalloc(sizeof(*st), GFP_KERNEL);
if (st == NULL)
- return -ENOMEM;
+ return ERR_PTR(-ENOMEM);
if (sg_alloc_table(st, 1, GFP_KERNEL)) {
kfree(st);
- return -ENOMEM;
+ return ERR_PTR(-ENOMEM);
}
sg = st->sgl;
@@ -214,29 +217,33 @@ i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj)
sg_dma_address(sg) = obj->phys_handle->busaddr;
sg_dma_len(sg) = obj->base.size;
- obj->pages = st;
- return 0;
+ return st;
}
static void
-i915_gem_object_put_pages_phys(struct drm_i915_gem_object *obj)
+__i915_gem_object_release_shmem(struct drm_i915_gem_object *obj,
+ struct sg_table *pages)
{
- int ret;
+ GEM_BUG_ON(obj->mm.madv == __I915_MADV_PURGED);
- BUG_ON(obj->madv == __I915_MADV_PURGED);
+ if (obj->mm.madv == I915_MADV_DONTNEED)
+ obj->mm.dirty = false;
- ret = i915_gem_object_set_to_cpu_domain(obj, true);
- if (WARN_ON(ret)) {
- /* In the event of a disaster, abandon all caches and
- * hope for the best.
- */
- obj->base.read_domains = obj->base.write_domain = I915_GEM_DOMAIN_CPU;
- }
+ if ((obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0 &&
+ !cpu_cache_is_coherent(obj->base.dev, obj->cache_level))
+ drm_clflush_sg(pages);
+
+ obj->base.read_domains = I915_GEM_DOMAIN_CPU;
+ obj->base.write_domain = I915_GEM_DOMAIN_CPU;
+}
- if (obj->madv == I915_MADV_DONTNEED)
- obj->dirty = 0;
+static void
+i915_gem_object_put_pages_phys(struct drm_i915_gem_object *obj,
+ struct sg_table *pages)
+{
+ __i915_gem_object_release_shmem(obj, pages);
- if (obj->dirty) {
+ if (obj->mm.dirty) {
struct address_space *mapping = obj->base.filp->f_mapping;
char *vaddr = obj->phys_handle->vaddr;
int i;
@@ -255,22 +262,23 @@ i915_gem_object_put_pages_phys(struct drm_i915_gem_object *obj)
kunmap_atomic(dst);
set_page_dirty(page);
- if (obj->madv == I915_MADV_WILLNEED)
+ if (obj->mm.madv == I915_MADV_WILLNEED)
mark_page_accessed(page);
put_page(page);
vaddr += PAGE_SIZE;
}
- obj->dirty = 0;
+ obj->mm.dirty = false;
}
- sg_free_table(obj->pages);
- kfree(obj->pages);
+ sg_free_table(pages);
+ kfree(pages);
}
static void
i915_gem_object_release_phys(struct drm_i915_gem_object *obj)
{
drm_pci_free(obj->base.dev, obj->phys_handle);
+ i915_gem_object_unpin_pages(obj);
}
static const struct drm_i915_gem_object_ops i915_gem_phys_ops = {
@@ -292,7 +300,12 @@ int i915_gem_object_unbind(struct drm_i915_gem_object *obj)
* must wait for all rendering to complete to the object (as unbinding
* must anyway), and retire the requests.
*/
- ret = i915_gem_object_wait_rendering(obj, false);
+ ret = i915_gem_object_wait(obj,
+ I915_WAIT_INTERRUPTIBLE |
+ I915_WAIT_LOCKED |
+ I915_WAIT_ALL,
+ MAX_SCHEDULE_TIMEOUT,
+ NULL);
if (ret)
return ret;
@@ -311,90 +324,209 @@ int i915_gem_object_unbind(struct drm_i915_gem_object *obj)
return ret;
}
-/**
- * Ensures that all rendering to the object has completed and the object is
- * safe to unbind from the GTT or access from the CPU.
- * @obj: i915 gem object
- * @readonly: waiting for just read access or read-write access
- */
-int
-i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
- bool readonly)
+static long
+i915_gem_object_wait_fence(struct dma_fence *fence,
+ unsigned int flags,
+ long timeout,
+ struct intel_rps_client *rps)
{
- struct reservation_object *resv;
- struct i915_gem_active *active;
- unsigned long active_mask;
- int idx;
+ struct drm_i915_gem_request *rq;
- lockdep_assert_held(&obj->base.dev->struct_mutex);
+ BUILD_BUG_ON(I915_WAIT_INTERRUPTIBLE != 0x1);
- if (!readonly) {
- active = obj->last_read;
- active_mask = i915_gem_object_get_active(obj);
- } else {
- active_mask = 1;
- active = &obj->last_write;
+ if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
+ return timeout;
+
+ if (!dma_fence_is_i915(fence))
+ return dma_fence_wait_timeout(fence,
+ flags & I915_WAIT_INTERRUPTIBLE,
+ timeout);
+
+ rq = to_request(fence);
+ if (i915_gem_request_completed(rq))
+ goto out;
+
+ /* This client is about to stall waiting for the GPU. In many cases
+ * this is undesirable and limits the throughput of the system, as
+ * many clients cannot continue processing user input/output whilst
+ * blocked. RPS autotuning may take tens of milliseconds to respond
+ * to the GPU load and thus incurs additional latency for the client.
+ * We can circumvent that by promoting the GPU frequency to maximum
+ * before we wait. This makes the GPU throttle up much more quickly
+ * (good for benchmarks and user experience, e.g. window animations),
+ * but at a cost of spending more power processing the workload
+ * (bad for battery). Not all clients even want their results
+ * immediately and for them we should just let the GPU select its own
+ * frequency to maximise efficiency. To prevent a single client from
+ * forcing the clocks too high for the whole system, we only allow
+ * each client to waitboost once in a busy period.
+ */
+ if (rps) {
+ if (INTEL_GEN(rq->i915) >= 6)
+ gen6_rps_boost(rq->i915, rps, rq->emitted_jiffies);
+ else
+ rps = NULL;
+ }
+
+ timeout = i915_wait_request(rq, flags, timeout);
+
+out:
+ if (flags & I915_WAIT_LOCKED && i915_gem_request_completed(rq))
+ i915_gem_request_retire_upto(rq);
+
+ if (rps && rq->global_seqno == intel_engine_last_submit(rq->engine)) {
+ /* The GPU is now idle and this client has stalled.
+ * Since no other client has submitted a request in the
+ * meantime, assume that this client is the only one
+ * supplying work to the GPU but is unable to keep that
+ * work supplied because it is waiting. Since the GPU is
+ * then never kept fully busy, RPS autoclocking will
+ * keep the clocks relatively low, causing further delays.
+ * Compensate by giving the synchronous client credit for
+ * a waitboost next time.
+ */
+ spin_lock(&rq->i915->rps.client_lock);
+ list_del_init(&rps->link);
+ spin_unlock(&rq->i915->rps.client_lock);
}
- for_each_active(active_mask, idx) {
+ return timeout;
+}
+
+static long
+i915_gem_object_wait_reservation(struct reservation_object *resv,
+ unsigned int flags,
+ long timeout,
+ struct intel_rps_client *rps)
+{
+ struct dma_fence *excl;
+
+ if (flags & I915_WAIT_ALL) {
+ struct dma_fence **shared;
+ unsigned int count, i;
int ret;
- ret = i915_gem_active_wait(&active[idx],
- &obj->base.dev->struct_mutex);
+ ret = reservation_object_get_fences_rcu(resv,
+ &excl, &count, &shared);
if (ret)
return ret;
- }
- resv = i915_gem_object_get_dmabuf_resv(obj);
- if (resv) {
- long err;
+ for (i = 0; i < count; i++) {
+ timeout = i915_gem_object_wait_fence(shared[i],
+ flags, timeout,
+ rps);
+ if (timeout <= 0)
+ break;
- err = reservation_object_wait_timeout_rcu(resv, !readonly, true,
- MAX_SCHEDULE_TIMEOUT);
- if (err < 0)
- return err;
+ dma_fence_put(shared[i]);
+ }
+
+ for (; i < count; i++)
+ dma_fence_put(shared[i]);
+ kfree(shared);
+ } else {
+ excl = reservation_object_get_excl_rcu(resv);
}
- return 0;
+ if (excl && timeout > 0)
+ timeout = i915_gem_object_wait_fence(excl, flags, timeout, rps);
+
+ dma_fence_put(excl);
+
+ return timeout;
}
-/* A nonblocking variant of the above wait. Must be called prior to
- * acquiring the mutex for the object, as the object state may change
- * during this call. A reference must be held by the caller for the object.
- */
-static __must_check int
-__unsafe_wait_rendering(struct drm_i915_gem_object *obj,
- struct intel_rps_client *rps,
- bool readonly)
+static void __fence_set_priority(struct dma_fence *fence, int prio)
{
- struct i915_gem_active *active;
- unsigned long active_mask;
- int idx;
+ struct drm_i915_gem_request *rq;
+ struct intel_engine_cs *engine;
- active_mask = __I915_BO_ACTIVE(obj);
- if (!active_mask)
- return 0;
+ if (!dma_fence_is_i915(fence))
+ return;
+
+ rq = to_request(fence);
+ engine = rq->engine;
+ if (!engine->schedule)
+ return;
+
+ engine->schedule(rq, prio);
+}
- if (!readonly) {
- active = obj->last_read;
+static void fence_set_priority(struct dma_fence *fence, int prio)
+{
+ /* Recurse once into a fence-array */
+ if (dma_fence_is_array(fence)) {
+ struct dma_fence_array *array = to_dma_fence_array(fence);
+ int i;
+
+ for (i = 0; i < array->num_fences; i++)
+ __fence_set_priority(array->fences[i], prio);
} else {
- active_mask = 1;
- active = &obj->last_write;
+ __fence_set_priority(fence, prio);
}
+}
- for_each_active(active_mask, idx) {
+int
+i915_gem_object_wait_priority(struct drm_i915_gem_object *obj,
+ unsigned int flags,
+ int prio)
+{
+ struct dma_fence *excl;
+
+ if (flags & I915_WAIT_ALL) {
+ struct dma_fence **shared;
+ unsigned int count, i;
int ret;
- ret = i915_gem_active_wait_unlocked(&active[idx],
- I915_WAIT_INTERRUPTIBLE,
- NULL, rps);
+ ret = reservation_object_get_fences_rcu(obj->resv,
+ &excl, &count, &shared);
if (ret)
return ret;
+
+ for (i = 0; i < count; i++) {
+ fence_set_priority(shared[i], prio);
+ dma_fence_put(shared[i]);
+ }
+
+ kfree(shared);
+ } else {
+ excl = reservation_object_get_excl_rcu(obj->resv);
}
+ if (excl) {
+ fence_set_priority(excl, prio);
+ dma_fence_put(excl);
+ }
return 0;
}
+/**
+ * Waits for rendering to the object to be completed
+ * @obj: i915 gem object
+ * @flags: how to wait (under a lock, for all rendering or just for writes etc)
+ * @timeout: how long to wait
+ * @rps: client (user process) to charge for any waitboosting
+ */
+int
+i915_gem_object_wait(struct drm_i915_gem_object *obj,
+ unsigned int flags,
+ long timeout,
+ struct intel_rps_client *rps)
+{
+ might_sleep();
+#if IS_ENABLED(CONFIG_LOCKDEP)
+ GEM_BUG_ON(debug_locks &&
+ !!lockdep_is_held(&obj->base.dev->struct_mutex) !=
+ !!(flags & I915_WAIT_LOCKED));
+#endif
+ GEM_BUG_ON(timeout < 0);
+
+ timeout = i915_gem_object_wait_reservation(obj->resv,
+ flags, timeout,
+ rps);
+ return timeout < 0 ? timeout : 0;
+}
+
static struct intel_rps_client *to_rps_client(struct drm_file *file)
{
struct drm_i915_file_private *fpriv = file->driver_priv;
@@ -416,7 +548,7 @@ i915_gem_object_attach_phys(struct drm_i915_gem_object *obj,
return 0;
}
- if (obj->madv != I915_MADV_WILLNEED)
+ if (obj->mm.madv != I915_MADV_WILLNEED)
return -EFAULT;
if (obj->base.filp == NULL)
@@ -426,9 +558,9 @@ i915_gem_object_attach_phys(struct drm_i915_gem_object *obj,
if (ret)
return ret;
- ret = i915_gem_object_put_pages(obj);
- if (ret)
- return ret;
+ __i915_gem_object_put_pages(obj, I915_MM_NORMAL);
+ if (obj->mm.pages)
+ return -EBUSY;
/* create a new object */
phys = drm_pci_alloc(obj->base.dev, obj->base.size, align);
@@ -438,23 +570,29 @@ i915_gem_object_attach_phys(struct drm_i915_gem_object *obj,
obj->phys_handle = phys;
obj->ops = &i915_gem_phys_ops;
- return i915_gem_object_get_pages(obj);
+ return i915_gem_object_pin_pages(obj);
}
static int
i915_gem_phys_pwrite(struct drm_i915_gem_object *obj,
struct drm_i915_gem_pwrite *args,
- struct drm_file *file_priv)
+ struct drm_file *file)
{
struct drm_device *dev = obj->base.dev;
void *vaddr = obj->phys_handle->vaddr + args->offset;
char __user *user_data = u64_to_user_ptr(args->data_ptr);
- int ret = 0;
+ int ret;
/* We manually control the domain here and pretend that it
* remains coherent i.e. in the GTT domain, like shmem_pwrite.
*/
- ret = i915_gem_object_wait_rendering(obj, false);
+ lockdep_assert_held(&obj->base.dev->struct_mutex);
+ ret = i915_gem_object_wait(obj,
+ I915_WAIT_INTERRUPTIBLE |
+ I915_WAIT_LOCKED |
+ I915_WAIT_ALL,
+ MAX_SCHEDULE_TIMEOUT,
+ to_rps_client(file));
if (ret)
return ret;
@@ -516,7 +654,7 @@ i915_gem_create(struct drm_file *file,
ret = drm_gem_handle_create(file, &obj->base, &handle);
/* drop reference from allocate - handle holds it now */
- i915_gem_object_put_unlocked(obj);
+ i915_gem_object_put(obj);
if (ret)
return ret;
@@ -548,6 +686,8 @@ i915_gem_create_ioctl(struct drm_device *dev, void *data,
{
struct drm_i915_gem_create *args = data;
+ i915_gem_flush_free_objects(to_i915(dev));
+
return i915_gem_create(file, dev,
args->size, &args->handle);
}
@@ -614,21 +754,24 @@ int i915_gem_obj_prepare_shmem_read(struct drm_i915_gem_object *obj,
{
int ret;
- *needs_clflush = 0;
+ lockdep_assert_held(&obj->base.dev->struct_mutex);
+ *needs_clflush = 0;
if (!i915_gem_object_has_struct_page(obj))
return -ENODEV;
- ret = i915_gem_object_wait_rendering(obj, true);
+ ret = i915_gem_object_wait(obj,
+ I915_WAIT_INTERRUPTIBLE |
+ I915_WAIT_LOCKED,
+ MAX_SCHEDULE_TIMEOUT,
+ NULL);
if (ret)
return ret;
- ret = i915_gem_object_get_pages(obj);
+ ret = i915_gem_object_pin_pages(obj);
if (ret)
return ret;
- i915_gem_object_pin_pages(obj);
-
i915_gem_object_flush_gtt_write_domain(obj);
/* If we're not in the cpu read domain, set ourself into the gtt
@@ -661,20 +804,25 @@ int i915_gem_obj_prepare_shmem_write(struct drm_i915_gem_object *obj,
{
int ret;
+ lockdep_assert_held(&obj->base.dev->struct_mutex);
+
*needs_clflush = 0;
if (!i915_gem_object_has_struct_page(obj))
return -ENODEV;
- ret = i915_gem_object_wait_rendering(obj, false);
+ ret = i915_gem_object_wait(obj,
+ I915_WAIT_INTERRUPTIBLE |
+ I915_WAIT_LOCKED |
+ I915_WAIT_ALL,
+ MAX_SCHEDULE_TIMEOUT,
+ NULL);
if (ret)
return ret;
- ret = i915_gem_object_get_pages(obj);
+ ret = i915_gem_object_pin_pages(obj);
if (ret)
return ret;
- i915_gem_object_pin_pages(obj);
-
i915_gem_object_flush_gtt_write_domain(obj);
/* If we're not in the cpu write domain, set ourself into the
@@ -704,7 +852,7 @@ int i915_gem_obj_prepare_shmem_write(struct drm_i915_gem_object *obj,
obj->cache_dirty = true;
intel_fb_obj_invalidate(obj, ORIGIN_CPU);
- obj->dirty = 1;
+ obj->mm.dirty = true;
/* return with the pages pinned */
return 0;
@@ -713,32 +861,6 @@ err_unpin:
return ret;
}
-/* Per-page copy function for the shmem pread fastpath.
- * Flushes invalid cachelines before reading the target if
- * needs_clflush is set. */
-static int
-shmem_pread_fast(struct page *page, int shmem_page_offset, int page_length,
- char __user *user_data,
- bool page_do_bit17_swizzling, bool needs_clflush)
-{
- char *vaddr;
- int ret;
-
- if (unlikely(page_do_bit17_swizzling))
- return -EINVAL;
-
- vaddr = kmap_atomic(page);
- if (needs_clflush)
- drm_clflush_virt_range(vaddr + shmem_page_offset,
- page_length);
- ret = __copy_to_user_inatomic(user_data,
- vaddr + shmem_page_offset,
- page_length);
- kunmap_atomic(vaddr);
-
- return ret ? -EFAULT : 0;
-}
-
static void
shmem_clflush_swizzled_range(char *addr, unsigned long length,
bool swizzled)
@@ -764,7 +886,7 @@ shmem_clflush_swizzled_range(char *addr, unsigned long length,
/* Only difference to the fast-path function is that this can handle bit17
* and uses non-atomic copy and kmap functions. */
static int
-shmem_pread_slow(struct page *page, int shmem_page_offset, int page_length,
+shmem_pread_slow(struct page *page, int offset, int length,
char __user *user_data,
bool page_do_bit17_swizzling, bool needs_clflush)
{
@@ -773,60 +895,130 @@ shmem_pread_slow(struct page *page, int shmem_page_offset, int page_length,
vaddr = kmap(page);
if (needs_clflush)
- shmem_clflush_swizzled_range(vaddr + shmem_page_offset,
- page_length,
+ shmem_clflush_swizzled_range(vaddr + offset, length,
page_do_bit17_swizzling);
if (page_do_bit17_swizzling)
- ret = __copy_to_user_swizzled(user_data,
- vaddr, shmem_page_offset,
- page_length);
+ ret = __copy_to_user_swizzled(user_data, vaddr, offset, length);
else
- ret = __copy_to_user(user_data,
- vaddr + shmem_page_offset,
- page_length);
+ ret = __copy_to_user(user_data, vaddr + offset, length);
kunmap(page);
return ret ? - EFAULT : 0;
}
-static inline unsigned long
-slow_user_access(struct io_mapping *mapping,
- uint64_t page_base, int page_offset,
- char __user *user_data,
- unsigned long length, bool pwrite)
+static int
+shmem_pread(struct page *page, int offset, int length, char __user *user_data,
+ bool page_do_bit17_swizzling, bool needs_clflush)
+{
+ int ret;
+
+ ret = -ENODEV;
+ if (!page_do_bit17_swizzling) {
+ char *vaddr = kmap_atomic(page);
+
+ if (needs_clflush)
+ drm_clflush_virt_range(vaddr + offset, length);
+ ret = __copy_to_user_inatomic(user_data, vaddr + offset, length);
+ kunmap_atomic(vaddr);
+ }
+ if (ret == 0)
+ return 0;
+
+ return shmem_pread_slow(page, offset, length, user_data,
+ page_do_bit17_swizzling, needs_clflush);
+}
+
+static int
+i915_gem_shmem_pread(struct drm_i915_gem_object *obj,
+ struct drm_i915_gem_pread *args)
+{
+ char __user *user_data;
+ u64 remain;
+ unsigned int obj_do_bit17_swizzling;
+ unsigned int needs_clflush;
+ unsigned int idx, offset;
+ int ret;
+
+ obj_do_bit17_swizzling = 0;
+ if (i915_gem_object_needs_bit17_swizzle(obj))
+ obj_do_bit17_swizzling = BIT(17);
+
+ ret = mutex_lock_interruptible(&obj->base.dev->struct_mutex);
+ if (ret)
+ return ret;
+
+ ret = i915_gem_obj_prepare_shmem_read(obj, &needs_clflush);
+ mutex_unlock(&obj->base.dev->struct_mutex);
+ if (ret)
+ return ret;
+
+ remain = args->size;
+ user_data = u64_to_user_ptr(args->data_ptr);
+ offset = offset_in_page(args->offset);
+ for (idx = args->offset >> PAGE_SHIFT; remain; idx++) {
+ struct page *page = i915_gem_object_get_page(obj, idx);
+ int length;
+
+ length = remain;
+ if (offset + length > PAGE_SIZE)
+ length = PAGE_SIZE - offset;
+
+ ret = shmem_pread(page, offset, length, user_data,
+ page_to_phys(page) & obj_do_bit17_swizzling,
+ needs_clflush);
+ if (ret)
+ break;
+
+ remain -= length;
+ user_data += length;
+ offset = 0;
+ }
+
+ i915_gem_obj_finish_shmem_access(obj);
+ return ret;
+}
+
+static inline bool
+gtt_user_read(struct io_mapping *mapping,
+ loff_t base, int offset,
+ char __user *user_data, int length)
{
- void __iomem *ioaddr;
void *vaddr;
- uint64_t unwritten;
+ unsigned long unwritten;
- ioaddr = io_mapping_map_wc(mapping, page_base, PAGE_SIZE);
/* We can use the cpu mem copy function because this is X86. */
- vaddr = (void __force *)ioaddr + page_offset;
- if (pwrite)
- unwritten = __copy_from_user(vaddr, user_data, length);
- else
- unwritten = __copy_to_user(user_data, vaddr, length);
-
- io_mapping_unmap(ioaddr);
+ vaddr = (void __force *)io_mapping_map_atomic_wc(mapping, base);
+ unwritten = __copy_to_user_inatomic(user_data, vaddr + offset, length);
+ io_mapping_unmap_atomic(vaddr);
+ if (unwritten) {
+ vaddr = (void __force *)
+ io_mapping_map_wc(mapping, base, PAGE_SIZE);
+ unwritten = copy_to_user(user_data, vaddr + offset, length);
+ io_mapping_unmap(vaddr);
+ }
return unwritten;
}
static int
-i915_gem_gtt_pread(struct drm_device *dev,
- struct drm_i915_gem_object *obj, uint64_t size,
- uint64_t data_offset, uint64_t data_ptr)
+i915_gem_gtt_pread(struct drm_i915_gem_object *obj,
+ const struct drm_i915_gem_pread *args)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
- struct i915_ggtt *ggtt = &dev_priv->ggtt;
- struct i915_vma *vma;
+ struct drm_i915_private *i915 = to_i915(obj->base.dev);
+ struct i915_ggtt *ggtt = &i915->ggtt;
struct drm_mm_node node;
- char __user *user_data;
- uint64_t remain;
- uint64_t offset;
+ struct i915_vma *vma;
+ void __user *user_data;
+ u64 remain, offset;
int ret;
- vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, PIN_MAPPABLE);
+ ret = mutex_lock_interruptible(&i915->drm.struct_mutex);
+ if (ret)
+ return ret;
+
+ intel_runtime_pm_get(i915);
+ vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0,
+ PIN_MAPPABLE | PIN_NONBLOCK);
if (!IS_ERR(vma)) {
node.start = i915_ggtt_offset(vma);
node.allocated = false;
@@ -837,35 +1029,21 @@ i915_gem_gtt_pread(struct drm_device *dev,
}
}
if (IS_ERR(vma)) {
- ret = insert_mappable_node(dev_priv, &node, PAGE_SIZE);
+ ret = insert_mappable_node(ggtt, &node, PAGE_SIZE);
if (ret)
- goto out;
-
- ret = i915_gem_object_get_pages(obj);
- if (ret) {
- remove_mappable_node(&node);
- goto out;
- }
-
- i915_gem_object_pin_pages(obj);
+ goto out_unlock;
+ GEM_BUG_ON(!node.allocated);
}
ret = i915_gem_object_set_to_gtt_domain(obj, false);
if (ret)
goto out_unpin;
- user_data = u64_to_user_ptr(data_ptr);
- remain = size;
- offset = data_offset;
+ mutex_unlock(&i915->drm.struct_mutex);
- mutex_unlock(&dev->struct_mutex);
- if (likely(!i915.prefault_disable)) {
- ret = fault_in_pages_writeable(user_data, remain);
- if (ret) {
- mutex_lock(&dev->struct_mutex);
- goto out_unpin;
- }
- }
+ user_data = u64_to_user_ptr(args->data_ptr);
+ remain = args->size;
+ offset = args->offset;
while (remain > 0) {
/* Operation in this page
@@ -882,19 +1060,14 @@ i915_gem_gtt_pread(struct drm_device *dev,
wmb();
ggtt->base.insert_page(&ggtt->base,
i915_gem_object_get_dma_address(obj, offset >> PAGE_SHIFT),
- node.start,
- I915_CACHE_NONE, 0);
+ node.start, I915_CACHE_NONE, 0);
wmb();
} else {
page_base += offset & PAGE_MASK;
}
- /* This is a slow read/write as it tries to read from
- * and write to user memory which may result into page
- * faults, and so we cannot perform this under struct_mutex.
- */
- if (slow_user_access(&ggtt->mappable, page_base,
- page_offset, user_data,
- page_length, false)) {
+
+ if (gtt_user_read(&ggtt->mappable, page_base, page_offset,
+ user_data, page_length)) {
ret = -EFAULT;
break;
}
@@ -904,111 +1077,19 @@ i915_gem_gtt_pread(struct drm_device *dev,
offset += page_length;
}
- mutex_lock(&dev->struct_mutex);
- if (ret == 0 && (obj->base.read_domains & I915_GEM_DOMAIN_GTT) == 0) {
- /* The user has modified the object whilst we tried
- * reading from it, and we now have no idea what domain
- * the pages should be in. As we have just been touching
- * them directly, flush everything back to the GTT
- * domain.
- */
- ret = i915_gem_object_set_to_gtt_domain(obj, false);
- }
-
+ mutex_lock(&i915->drm.struct_mutex);
out_unpin:
if (node.allocated) {
wmb();
ggtt->base.clear_range(&ggtt->base,
- node.start, node.size,
- true);
- i915_gem_object_unpin_pages(obj);
+ node.start, node.size);
remove_mappable_node(&node);
} else {
i915_vma_unpin(vma);
}
-out:
- return ret;
-}
-
-static int
-i915_gem_shmem_pread(struct drm_device *dev,
- struct drm_i915_gem_object *obj,
- struct drm_i915_gem_pread *args,
- struct drm_file *file)
-{
- char __user *user_data;
- ssize_t remain;
- loff_t offset;
- int shmem_page_offset, page_length, ret = 0;
- int obj_do_bit17_swizzling, page_do_bit17_swizzling;
- int prefaulted = 0;
- int needs_clflush = 0;
- struct sg_page_iter sg_iter;
-
- ret = i915_gem_obj_prepare_shmem_read(obj, &needs_clflush);
- if (ret)
- return ret;
-
- obj_do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
- user_data = u64_to_user_ptr(args->data_ptr);
- offset = args->offset;
- remain = args->size;
-
- for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents,
- offset >> PAGE_SHIFT) {
- struct page *page = sg_page_iter_page(&sg_iter);
-
- if (remain <= 0)
- break;
-
- /* Operation in this page
- *
- * shmem_page_offset = offset within page in shmem file
- * page_length = bytes to copy for this page
- */
- shmem_page_offset = offset_in_page(offset);
- page_length = remain;
- if ((shmem_page_offset + page_length) > PAGE_SIZE)
- page_length = PAGE_SIZE - shmem_page_offset;
-
- page_do_bit17_swizzling = obj_do_bit17_swizzling &&
- (page_to_phys(page) & (1 << 17)) != 0;
-
- ret = shmem_pread_fast(page, shmem_page_offset, page_length,
- user_data, page_do_bit17_swizzling,
- needs_clflush);
- if (ret == 0)
- goto next_page;
-
- mutex_unlock(&dev->struct_mutex);
-
- if (likely(!i915.prefault_disable) && !prefaulted) {
- ret = fault_in_pages_writeable(user_data, remain);
- /* Userspace is tricking us, but we've already clobbered
- * its pages with the prefault and promised to write the
- * data up to the first fault. Hence ignore any errors
- * and just continue. */
- (void)ret;
- prefaulted = 1;
- }
-
- ret = shmem_pread_slow(page, shmem_page_offset, page_length,
- user_data, page_do_bit17_swizzling,
- needs_clflush);
-
- mutex_lock(&dev->struct_mutex);
-
- if (ret)
- goto out;
-
-next_page:
- remain -= page_length;
- user_data += page_length;
- offset += page_length;
- }
-
-out:
- i915_gem_obj_finish_shmem_access(obj);
+out_unlock:
+ intel_runtime_pm_put(i915);
+ mutex_unlock(&i915->drm.struct_mutex);
return ret;
}
@@ -1027,7 +1108,7 @@ i915_gem_pread_ioctl(struct drm_device *dev, void *data,
{
struct drm_i915_gem_pread *args = data;
struct drm_i915_gem_object *obj;
- int ret = 0;
+ int ret;
if (args->size == 0)
return 0;
@@ -1045,36 +1126,29 @@ i915_gem_pread_ioctl(struct drm_device *dev, void *data,
if (args->offset > obj->base.size ||
args->size > obj->base.size - args->offset) {
ret = -EINVAL;
- goto err;
+ goto out;
}
trace_i915_gem_object_pread(obj, args->offset, args->size);
- ret = __unsafe_wait_rendering(obj, to_rps_client(file), true);
+ ret = i915_gem_object_wait(obj,
+ I915_WAIT_INTERRUPTIBLE,
+ MAX_SCHEDULE_TIMEOUT,
+ to_rps_client(file));
if (ret)
- goto err;
+ goto out;
- ret = i915_mutex_lock_interruptible(dev);
+ ret = i915_gem_object_pin_pages(obj);
if (ret)
- goto err;
-
- ret = i915_gem_shmem_pread(dev, obj, args, file);
+ goto out;
- /* pread for non shmem backed objects */
- if (ret == -EFAULT || ret == -ENODEV) {
- intel_runtime_pm_get(to_i915(dev));
- ret = i915_gem_gtt_pread(dev, obj, args->size,
- args->offset, args->data_ptr);
- intel_runtime_pm_put(to_i915(dev));
- }
+ ret = i915_gem_shmem_pread(obj, args);
+ if (ret == -EFAULT || ret == -ENODEV)
+ ret = i915_gem_gtt_pread(obj, args);
+ i915_gem_object_unpin_pages(obj);
+out:
i915_gem_object_put(obj);
- mutex_unlock(&dev->struct_mutex);
-
- return ret;
-
-err:
- i915_gem_object_put_unlocked(obj);
return ret;
}
@@ -1082,51 +1156,52 @@ err:
* page faults in the source data
*/
-static inline int
-fast_user_write(struct io_mapping *mapping,
- loff_t page_base, int page_offset,
- char __user *user_data,
- int length)
+static inline bool
+ggtt_write(struct io_mapping *mapping,
+ loff_t base, int offset,
+ char __user *user_data, int length)
{
- void __iomem *vaddr_atomic;
void *vaddr;
unsigned long unwritten;
- vaddr_atomic = io_mapping_map_atomic_wc(mapping, page_base);
/* We can use the cpu mem copy function because this is X86. */
- vaddr = (void __force*)vaddr_atomic + page_offset;
- unwritten = __copy_from_user_inatomic_nocache(vaddr,
+ vaddr = (void __force *)io_mapping_map_atomic_wc(mapping, base);
+ unwritten = __copy_from_user_inatomic_nocache(vaddr + offset,
user_data, length);
- io_mapping_unmap_atomic(vaddr_atomic);
+ io_mapping_unmap_atomic(vaddr);
+ if (unwritten) {
+ vaddr = (void __force *)
+ io_mapping_map_wc(mapping, base, PAGE_SIZE);
+ unwritten = copy_from_user(vaddr + offset, user_data, length);
+ io_mapping_unmap(vaddr);
+ }
+
return unwritten;
}
/**
* This is the fast pwrite path, where we copy the data directly from the
* user into the GTT, uncached.
- * @i915: i915 device private data
- * @obj: i915 gem object
+ * @obj: i915 GEM object
* @args: pwrite arguments structure
- * @file: drm file pointer
*/
static int
-i915_gem_gtt_pwrite_fast(struct drm_i915_private *i915,
- struct drm_i915_gem_object *obj,
- struct drm_i915_gem_pwrite *args,
- struct drm_file *file)
+i915_gem_gtt_pwrite_fast(struct drm_i915_gem_object *obj,
+ const struct drm_i915_gem_pwrite *args)
{
+ struct drm_i915_private *i915 = to_i915(obj->base.dev);
struct i915_ggtt *ggtt = &i915->ggtt;
- struct drm_device *dev = obj->base.dev;
- struct i915_vma *vma;
struct drm_mm_node node;
- uint64_t remain, offset;
- char __user *user_data;
+ struct i915_vma *vma;
+ u64 remain, offset;
+ void __user *user_data;
int ret;
- bool hit_slow_path = false;
- if (i915_gem_object_is_tiled(obj))
- return -EFAULT;
+ ret = mutex_lock_interruptible(&i915->drm.struct_mutex);
+ if (ret)
+ return ret;
+ intel_runtime_pm_get(i915);
vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0,
PIN_MAPPABLE | PIN_NONBLOCK);
if (!IS_ERR(vma)) {
@@ -1139,25 +1214,19 @@ i915_gem_gtt_pwrite_fast(struct drm_i915_private *i915,
}
}
if (IS_ERR(vma)) {
- ret = insert_mappable_node(i915, &node, PAGE_SIZE);
+ ret = insert_mappable_node(ggtt, &node, PAGE_SIZE);
if (ret)
- goto out;
-
- ret = i915_gem_object_get_pages(obj);
- if (ret) {
- remove_mappable_node(&node);
- goto out;
- }
-
- i915_gem_object_pin_pages(obj);
+ goto out_unlock;
+ GEM_BUG_ON(!node.allocated);
}
ret = i915_gem_object_set_to_gtt_domain(obj, true);
if (ret)
goto out_unpin;
+ mutex_unlock(&i915->drm.struct_mutex);
+
intel_fb_obj_invalidate(obj, ORIGIN_CPU);
- obj->dirty = true;
user_data = u64_to_user_ptr(args->data_ptr);
offset = args->offset;
@@ -1170,8 +1239,8 @@ i915_gem_gtt_pwrite_fast(struct drm_i915_private *i915,
* page_length = bytes to copy for this page
*/
u32 page_base = node.start;
- unsigned page_offset = offset_in_page(offset);
- unsigned page_length = PAGE_SIZE - page_offset;
+ unsigned int page_offset = offset_in_page(offset);
+ unsigned int page_length = PAGE_SIZE - page_offset;
page_length = remain < page_length ? remain : page_length;
if (node.allocated) {
wmb(); /* flush the write before we modify the GGTT */
@@ -1188,92 +1257,36 @@ i915_gem_gtt_pwrite_fast(struct drm_i915_private *i915,
* If the object is non-shmem backed, we retry again with the
* path that handles page fault.
*/
- if (fast_user_write(&ggtt->mappable, page_base,
- page_offset, user_data, page_length)) {
- hit_slow_path = true;
- mutex_unlock(&dev->struct_mutex);
- if (slow_user_access(&ggtt->mappable,
- page_base,
- page_offset, user_data,
- page_length, true)) {
- ret = -EFAULT;
- mutex_lock(&dev->struct_mutex);
- goto out_flush;
- }
-
- mutex_lock(&dev->struct_mutex);
+ if (ggtt_write(&ggtt->mappable, page_base, page_offset,
+ user_data, page_length)) {
+ ret = -EFAULT;
+ break;
}
remain -= page_length;
user_data += page_length;
offset += page_length;
}
-
-out_flush:
- if (hit_slow_path) {
- if (ret == 0 &&
- (obj->base.read_domains & I915_GEM_DOMAIN_GTT) == 0) {
- /* The user has modified the object whilst we tried
- * reading from it, and we now have no idea what domain
- * the pages should be in. As we have just been touching
- * them directly, flush everything back to the GTT
- * domain.
- */
- ret = i915_gem_object_set_to_gtt_domain(obj, false);
- }
- }
-
intel_fb_obj_flush(obj, false, ORIGIN_CPU);
+
+ mutex_lock(&i915->drm.struct_mutex);
out_unpin:
if (node.allocated) {
wmb();
ggtt->base.clear_range(&ggtt->base,
- node.start, node.size,
- true);
- i915_gem_object_unpin_pages(obj);
+ node.start, node.size);
remove_mappable_node(&node);
} else {
i915_vma_unpin(vma);
}
-out:
+out_unlock:
+ intel_runtime_pm_put(i915);
+ mutex_unlock(&i915->drm.struct_mutex);
return ret;
}
-/* Per-page copy function for the shmem pwrite fastpath.
- * Flushes invalid cachelines before writing to the target if
- * needs_clflush_before is set and flushes out any written cachelines after
- * writing if needs_clflush is set. */
-static int
-shmem_pwrite_fast(struct page *page, int shmem_page_offset, int page_length,
- char __user *user_data,
- bool page_do_bit17_swizzling,
- bool needs_clflush_before,
- bool needs_clflush_after)
-{
- char *vaddr;
- int ret;
-
- if (unlikely(page_do_bit17_swizzling))
- return -EINVAL;
-
- vaddr = kmap_atomic(page);
- if (needs_clflush_before)
- drm_clflush_virt_range(vaddr + shmem_page_offset,
- page_length);
- ret = __copy_from_user_inatomic(vaddr + shmem_page_offset,
- user_data, page_length);
- if (needs_clflush_after)
- drm_clflush_virt_range(vaddr + shmem_page_offset,
- page_length);
- kunmap_atomic(vaddr);
-
- return ret ? -EFAULT : 0;
-}
-
-/* Only difference to the fast-path function is that this can handle bit17
- * and uses non-atomic copy and kmap functions. */
static int
-shmem_pwrite_slow(struct page *page, int shmem_page_offset, int page_length,
+shmem_pwrite_slow(struct page *page, int offset, int length,
char __user *user_data,
bool page_do_bit17_swizzling,
bool needs_clflush_before,
@@ -1284,124 +1297,114 @@ shmem_pwrite_slow(struct page *page, int shmem_page_offset, int page_length,
vaddr = kmap(page);
if (unlikely(needs_clflush_before || page_do_bit17_swizzling))
- shmem_clflush_swizzled_range(vaddr + shmem_page_offset,
- page_length,
+ shmem_clflush_swizzled_range(vaddr + offset, length,
page_do_bit17_swizzling);
if (page_do_bit17_swizzling)
- ret = __copy_from_user_swizzled(vaddr, shmem_page_offset,
- user_data,
- page_length);
+ ret = __copy_from_user_swizzled(vaddr, offset, user_data,
+ length);
else
- ret = __copy_from_user(vaddr + shmem_page_offset,
- user_data,
- page_length);
+ ret = __copy_from_user(vaddr + offset, user_data, length);
if (needs_clflush_after)
- shmem_clflush_swizzled_range(vaddr + shmem_page_offset,
- page_length,
+ shmem_clflush_swizzled_range(vaddr + offset, length,
page_do_bit17_swizzling);
kunmap(page);
return ret ? -EFAULT : 0;
}
+/* Per-page copy function for the shmem pwrite fastpath.
+ * Flushes invalid cachelines before writing to the target if
+ * needs_clflush_before is set and flushes out any written cachelines after
+ * writing if needs_clflush is set.
+ */
static int
-i915_gem_shmem_pwrite(struct drm_device *dev,
- struct drm_i915_gem_object *obj,
- struct drm_i915_gem_pwrite *args,
- struct drm_file *file)
+shmem_pwrite(struct page *page, int offset, int len, char __user *user_data,
+ bool page_do_bit17_swizzling,
+ bool needs_clflush_before,
+ bool needs_clflush_after)
{
- ssize_t remain;
- loff_t offset;
- char __user *user_data;
- int shmem_page_offset, page_length, ret = 0;
- int obj_do_bit17_swizzling, page_do_bit17_swizzling;
- int hit_slowpath = 0;
- unsigned int needs_clflush;
- struct sg_page_iter sg_iter;
+ int ret;
- ret = i915_gem_obj_prepare_shmem_write(obj, &needs_clflush);
- if (ret)
- return ret;
+ ret = -ENODEV;
+ if (!page_do_bit17_swizzling) {
+ char *vaddr = kmap_atomic(page);
- obj_do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
- user_data = u64_to_user_ptr(args->data_ptr);
- offset = args->offset;
- remain = args->size;
+ if (needs_clflush_before)
+ drm_clflush_virt_range(vaddr + offset, len);
+ ret = __copy_from_user_inatomic(vaddr + offset, user_data, len);
+ if (needs_clflush_after)
+ drm_clflush_virt_range(vaddr + offset, len);
- for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents,
- offset >> PAGE_SHIFT) {
- struct page *page = sg_page_iter_page(&sg_iter);
- int partial_cacheline_write;
+ kunmap_atomic(vaddr);
+ }
+ if (ret == 0)
+ return ret;
- if (remain <= 0)
- break;
+ return shmem_pwrite_slow(page, offset, len, user_data,
+ page_do_bit17_swizzling,
+ needs_clflush_before,
+ needs_clflush_after);
+}
- /* Operation in this page
- *
- * shmem_page_offset = offset within page in shmem file
- * page_length = bytes to copy for this page
- */
- shmem_page_offset = offset_in_page(offset);
-
- page_length = remain;
- if ((shmem_page_offset + page_length) > PAGE_SIZE)
- page_length = PAGE_SIZE - shmem_page_offset;
-
- /* If we don't overwrite a cacheline completely we need to be
- * careful to have up-to-date data by first clflushing. Don't
- * overcomplicate things and flush the entire patch. */
- partial_cacheline_write = needs_clflush & CLFLUSH_BEFORE &&
- ((shmem_page_offset | page_length)
- & (boot_cpu_data.x86_clflush_size - 1));
-
- page_do_bit17_swizzling = obj_do_bit17_swizzling &&
- (page_to_phys(page) & (1 << 17)) != 0;
-
- ret = shmem_pwrite_fast(page, shmem_page_offset, page_length,
- user_data, page_do_bit17_swizzling,
- partial_cacheline_write,
- needs_clflush & CLFLUSH_AFTER);
- if (ret == 0)
- goto next_page;
-
- hit_slowpath = 1;
- mutex_unlock(&dev->struct_mutex);
- ret = shmem_pwrite_slow(page, shmem_page_offset, page_length,
- user_data, page_do_bit17_swizzling,
- partial_cacheline_write,
- needs_clflush & CLFLUSH_AFTER);
+static int
+i915_gem_shmem_pwrite(struct drm_i915_gem_object *obj,
+ const struct drm_i915_gem_pwrite *args)
+{
+ struct drm_i915_private *i915 = to_i915(obj->base.dev);
+ void __user *user_data;
+ u64 remain;
+ unsigned int obj_do_bit17_swizzling;
+ unsigned int partial_cacheline_write;
+ unsigned int needs_clflush;
+ unsigned int offset, idx;
+ int ret;
- mutex_lock(&dev->struct_mutex);
+ ret = mutex_lock_interruptible(&i915->drm.struct_mutex);
+ if (ret)
+ return ret;
- if (ret)
- goto out;
+ ret = i915_gem_obj_prepare_shmem_write(obj, &needs_clflush);
+ mutex_unlock(&i915->drm.struct_mutex);
+ if (ret)
+ return ret;
-next_page:
- remain -= page_length;
- user_data += page_length;
- offset += page_length;
- }
+ obj_do_bit17_swizzling = 0;
+ if (i915_gem_object_needs_bit17_swizzle(obj))
+ obj_do_bit17_swizzling = BIT(17);
-out:
- i915_gem_obj_finish_shmem_access(obj);
+ /* If we don't overwrite a cacheline completely we need to be
+ * careful to have up-to-date data by first clflushing. Don't
+ * overcomplicate things and flush the entire patch.
+ */
+ partial_cacheline_write = 0;
+ if (needs_clflush & CLFLUSH_BEFORE)
+ partial_cacheline_write = boot_cpu_data.x86_clflush_size - 1;
- if (hit_slowpath) {
- /*
- * Fixup: Flush cpu caches in case we didn't flush the dirty
- * cachelines in-line while writing and the object moved
- * out of the cpu write domain while we've dropped the lock.
- */
- if (!(needs_clflush & CLFLUSH_AFTER) &&
- obj->base.write_domain != I915_GEM_DOMAIN_CPU) {
- if (i915_gem_clflush_object(obj, obj->pin_display))
- needs_clflush |= CLFLUSH_AFTER;
- }
- }
+ user_data = u64_to_user_ptr(args->data_ptr);
+ remain = args->size;
+ offset = offset_in_page(args->offset);
+ for (idx = args->offset >> PAGE_SHIFT; remain; idx++) {
+ struct page *page = i915_gem_object_get_page(obj, idx);
+ int length;
+
+ length = remain;
+ if (offset + length > PAGE_SIZE)
+ length = PAGE_SIZE - offset;
+
+ ret = shmem_pwrite(page, offset, length, user_data,
+ page_to_phys(page) & obj_do_bit17_swizzling,
+ (offset | length) & partial_cacheline_write,
+ needs_clflush & CLFLUSH_AFTER);
+ if (ret)
+ break;
- if (needs_clflush & CLFLUSH_AFTER)
- i915_gem_chipset_flush(to_i915(dev));
+ remain -= length;
+ user_data += length;
+ offset = 0;
+ }
intel_fb_obj_flush(obj, false, ORIGIN_CPU);
+ i915_gem_obj_finish_shmem_access(obj);
return ret;
}
@@ -1417,7 +1420,6 @@ int
i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
struct drm_file *file)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_i915_gem_pwrite *args = data;
struct drm_i915_gem_object *obj;
int ret;
@@ -1430,13 +1432,6 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
args->size))
return -EFAULT;
- if (likely(!i915.prefault_disable)) {
- ret = fault_in_pages_readable(u64_to_user_ptr(args->data_ptr),
- args->size);
- if (ret)
- return -EFAULT;
- }
-
obj = i915_gem_object_lookup(file, args->handle);
if (!obj)
return -ENOENT;
@@ -1450,15 +1445,17 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
trace_i915_gem_object_pwrite(obj, args->offset, args->size);
- ret = __unsafe_wait_rendering(obj, to_rps_client(file), false);
+ ret = i915_gem_object_wait(obj,
+ I915_WAIT_INTERRUPTIBLE |
+ I915_WAIT_ALL,
+ MAX_SCHEDULE_TIMEOUT,
+ to_rps_client(file));
if (ret)
goto err;
- intel_runtime_pm_get(dev_priv);
-
- ret = i915_mutex_lock_interruptible(dev);
+ ret = i915_gem_object_pin_pages(obj);
if (ret)
- goto err_rpm;
+ goto err;
ret = -EFAULT;
/* We can only do the GTT pwrite on untiled buffers, as otherwise
@@ -1468,30 +1465,23 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
* perspective, requiring manual detiling by the client.
*/
if (!i915_gem_object_has_struct_page(obj) ||
- cpu_write_needs_clflush(obj)) {
- ret = i915_gem_gtt_pwrite_fast(dev_priv, obj, args, file);
+ cpu_write_needs_clflush(obj))
/* Note that the gtt paths might fail with non-page-backed user
* pointers (e.g. gtt mappings when moving data between
- * textures). Fallback to the shmem path in that case. */
- }
+ * textures). Fallback to the shmem path in that case.
+ */
+ ret = i915_gem_gtt_pwrite_fast(obj, args);
if (ret == -EFAULT || ret == -ENOSPC) {
if (obj->phys_handle)
ret = i915_gem_phys_pwrite(obj, args, file);
else
- ret = i915_gem_shmem_pwrite(dev, obj, args, file);
+ ret = i915_gem_shmem_pwrite(obj, args);
}
- i915_gem_object_put(obj);
- mutex_unlock(&dev->struct_mutex);
- intel_runtime_pm_put(dev_priv);
-
- return ret;
-
-err_rpm:
- intel_runtime_pm_put(dev_priv);
+ i915_gem_object_unpin_pages(obj);
err:
- i915_gem_object_put_unlocked(obj);
+ i915_gem_object_put(obj);
return ret;
}
@@ -1502,6 +1492,30 @@ write_origin(struct drm_i915_gem_object *obj, unsigned domain)
obj->frontbuffer_ggtt_origin : ORIGIN_CPU);
}
+static void i915_gem_object_bump_inactive_ggtt(struct drm_i915_gem_object *obj)
+{
+ struct drm_i915_private *i915;
+ struct list_head *list;
+ struct i915_vma *vma;
+
+ list_for_each_entry(vma, &obj->vma_list, obj_link) {
+ if (!i915_vma_is_ggtt(vma))
+ continue;
+
+ if (i915_vma_is_active(vma))
+ continue;
+
+ if (!drm_mm_node_allocated(&vma->node))
+ continue;
+
+ list_move_tail(&vma->vm_link, &vma->vm->inactive_list);
+ }
+
+ i915 = to_i915(obj->base.dev);
+ list = obj->bind_count ? &i915->mm.bound_list : &i915->mm.unbound_list;
+ list_move_tail(&obj->global_link, list);
+}
+
/**
* Called when user space prepares to use an object with the CPU, either
* through the mmap ioctl's mapping or a GTT mapping.
@@ -1517,7 +1531,7 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
struct drm_i915_gem_object *obj;
uint32_t read_domains = args->read_domains;
uint32_t write_domain = args->write_domain;
- int ret;
+ int err;
/* Only handle setting domains to types used by the CPU. */
if ((write_domain | read_domains) & I915_GEM_GPU_DOMAINS)
@@ -1537,29 +1551,48 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
* We will repeat the flush holding the lock in the normal manner
* to catch cases where we are gazumped.
*/
- ret = __unsafe_wait_rendering(obj, to_rps_client(file), !write_domain);
- if (ret)
- goto err;
+ err = i915_gem_object_wait(obj,
+ I915_WAIT_INTERRUPTIBLE |
+ (write_domain ? I915_WAIT_ALL : 0),
+ MAX_SCHEDULE_TIMEOUT,
+ to_rps_client(file));
+ if (err)
+ goto out;
- ret = i915_mutex_lock_interruptible(dev);
- if (ret)
- goto err;
+ /* Flush and acquire obj->pages so that we are coherent through
+ * direct access in memory with previous cached writes through
+ * shmemfs and that our cache domain tracking remains valid.
+ * For example, if the obj->filp was moved to swap without us
+ * being notified and releasing the pages, we would mistakenly
+ * continue to assume that the obj remained out of the CPU cached
+ * domain.
+ */
+ err = i915_gem_object_pin_pages(obj);
+ if (err)
+ goto out;
+
+ err = i915_mutex_lock_interruptible(dev);
+ if (err)
+ goto out_unpin;
if (read_domains & I915_GEM_DOMAIN_GTT)
- ret = i915_gem_object_set_to_gtt_domain(obj, write_domain != 0);
+ err = i915_gem_object_set_to_gtt_domain(obj, write_domain != 0);
else
- ret = i915_gem_object_set_to_cpu_domain(obj, write_domain != 0);
+ err = i915_gem_object_set_to_cpu_domain(obj, write_domain != 0);
+
+ /* And bump the LRU for this access */
+ i915_gem_object_bump_inactive_ggtt(obj);
+
+ mutex_unlock(&dev->struct_mutex);
if (write_domain != 0)
intel_fb_obj_invalidate(obj, write_origin(obj, write_domain));
+out_unpin:
+ i915_gem_object_unpin_pages(obj);
+out:
i915_gem_object_put(obj);
- mutex_unlock(&dev->struct_mutex);
- return ret;
-
-err:
- i915_gem_object_put_unlocked(obj);
- return ret;
+ return err;
}
/**
@@ -1589,7 +1622,7 @@ i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
}
}
- i915_gem_object_put_unlocked(obj);
+ i915_gem_object_put(obj);
return err;
}
@@ -1635,7 +1668,7 @@ i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
* pages from.
*/
if (!obj->base.filp) {
- i915_gem_object_put_unlocked(obj);
+ i915_gem_object_put(obj);
return -EINVAL;
}
@@ -1647,7 +1680,7 @@ i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
struct vm_area_struct *vma;
if (down_write_killable(&mm->mmap_sem)) {
- i915_gem_object_put_unlocked(obj);
+ i915_gem_object_put(obj);
return -EINTR;
}
vma = find_vma(mm, addr);
@@ -1661,7 +1694,7 @@ i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
/* This may race, but that's ok, it only gets set */
WRITE_ONCE(obj->frontbuffer_ggtt_origin, ORIGIN_CPU);
}
- i915_gem_object_put_unlocked(obj);
+ i915_gem_object_put(obj);
if (IS_ERR((void *)addr))
return addr;
@@ -1773,7 +1806,14 @@ int i915_gem_fault(struct vm_area_struct *area, struct vm_fault *vmf)
* repeat the flush holding the lock in the normal manner to catch cases
* where we are gazumped.
*/
- ret = __unsafe_wait_rendering(obj, NULL, !write);
+ ret = i915_gem_object_wait(obj,
+ I915_WAIT_INTERRUPTIBLE,
+ MAX_SCHEDULE_TIMEOUT,
+ NULL);
+ if (ret)
+ goto err;
+
+ ret = i915_gem_object_pin_pages(obj);
if (ret)
goto err;
@@ -1784,7 +1824,7 @@ int i915_gem_fault(struct vm_area_struct *area, struct vm_fault *vmf)
goto err_rpm;
/* Access to snoopable pages through the GTT is incoherent. */
- if (obj->cache_level != I915_CACHE_NONE && !HAS_LLC(dev)) {
+ if (obj->cache_level != I915_CACHE_NONE && !HAS_LLC(dev_priv)) {
ret = -EFAULT;
goto err_unlock;
}
@@ -1806,15 +1846,14 @@ int i915_gem_fault(struct vm_area_struct *area, struct vm_fault *vmf)
/* Use a partial view if it is bigger than available space */
chunk_size = MIN_CHUNK_PAGES;
if (i915_gem_object_is_tiled(obj))
- chunk_size = max(chunk_size, tile_row_pages(obj));
+ chunk_size = roundup(chunk_size, tile_row_pages(obj));
memset(&view, 0, sizeof(view));
view.type = I915_GGTT_VIEW_PARTIAL;
view.params.partial.offset = rounddown(page_offset, chunk_size);
view.params.partial.size =
min_t(unsigned int, chunk_size,
- (area->vm_end - area->vm_start) / PAGE_SIZE -
- view.params.partial.offset);
+ vma_pages(area) - view.params.partial.offset);
/* If the partial covers the entire object, just create a
* normal VMA.
@@ -1842,22 +1881,25 @@ int i915_gem_fault(struct vm_area_struct *area, struct vm_fault *vmf)
if (ret)
goto err_unpin;
+ /* Mark as being mmapped into userspace for later revocation */
+ assert_rpm_wakelock_held(dev_priv);
+ if (list_empty(&obj->userfault_link))
+ list_add(&obj->userfault_link, &dev_priv->mm.userfault_list);
+
/* Finally, remap it using the new GTT offset */
ret = remap_io_mapping(area,
area->vm_start + (vma->ggtt_view.params.partial.offset << PAGE_SHIFT),
(ggtt->mappable_base + vma->node.start) >> PAGE_SHIFT,
min_t(u64, vma->size, area->vm_end - area->vm_start),
&ggtt->mappable);
- if (ret)
- goto err_unpin;
- obj->fault_mappable = true;
err_unpin:
__i915_vma_unpin(vma);
err_unlock:
mutex_unlock(&dev->struct_mutex);
err_rpm:
intel_runtime_pm_put(dev_priv);
+ i915_gem_object_unpin_pages(obj);
err:
switch (ret) {
case -EIO:
@@ -1919,15 +1961,23 @@ err:
void
i915_gem_release_mmap(struct drm_i915_gem_object *obj)
{
+ struct drm_i915_private *i915 = to_i915(obj->base.dev);
+
/* Serialisation between user GTT access and our code depends upon
* revoking the CPU's PTE whilst the mutex is held. The next user
* pagefault then has to wait until we release the mutex.
+ *
+ * Note that RPM complicates somewhat by adding an additional
+ * requirement that operations to the GGTT be made holding the RPM
+ * wakeref.
*/
- lockdep_assert_held(&obj->base.dev->struct_mutex);
+ lockdep_assert_held(&i915->drm.struct_mutex);
+ intel_runtime_pm_get(i915);
- if (!obj->fault_mappable)
- return;
+ if (list_empty(&obj->userfault_link))
+ goto out;
+ list_del_init(&obj->userfault_link);
drm_vma_node_unmap(&obj->base.vma_node,
obj->base.dev->anon_inode->i_mapping);
@@ -1940,16 +1990,45 @@ i915_gem_release_mmap(struct drm_i915_gem_object *obj)
*/
wmb();
- obj->fault_mappable = false;
+out:
+ intel_runtime_pm_put(i915);
}
-void
-i915_gem_release_all_mmaps(struct drm_i915_private *dev_priv)
+void i915_gem_runtime_suspend(struct drm_i915_private *dev_priv)
{
- struct drm_i915_gem_object *obj;
+ struct drm_i915_gem_object *obj, *on;
+ int i;
+
+ /*
+ * Only called during RPM suspend. All users of the userfault_list
+ * must be holding an RPM wakeref to ensure that this can not
+ * run concurrently with themselves (and use the struct_mutex for
+ * protection between themselves).
+ */
+
+ list_for_each_entry_safe(obj, on,
+ &dev_priv->mm.userfault_list, userfault_link) {
+ list_del_init(&obj->userfault_link);
+ drm_vma_node_unmap(&obj->base.vma_node,
+ obj->base.dev->anon_inode->i_mapping);
+ }
+
+ /* The fence will be lost when the device powers down. If any were
+ * in use by hardware (i.e. they are pinned), we should not be powering
+ * down! All other fences will be reacquired by the user upon waking.
+ */
+ for (i = 0; i < dev_priv->num_fence_regs; i++) {
+ struct drm_i915_fence_reg *reg = &dev_priv->fence_regs[i];
+
+ if (WARN_ON(reg->pin_count))
+ continue;
+
+ if (!reg->vma)
+ continue;
- list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list)
- i915_gem_release_mmap(obj);
+ GEM_BUG_ON(!list_empty(&reg->vma->obj->userfault_link));
+ reg->dirty = true;
+ }
}
/**
@@ -2063,7 +2142,7 @@ i915_gem_mmap_gtt(struct drm_file *file,
if (ret == 0)
*offset = drm_vma_node_offset_addr(&obj->base.vma_node);
- i915_gem_object_put_unlocked(obj);
+ i915_gem_object_put(obj);
return ret;
}
@@ -2106,16 +2185,18 @@ i915_gem_object_truncate(struct drm_i915_gem_object *obj)
* backing pages, *now*.
*/
shmem_truncate_range(file_inode(obj->base.filp), 0, (loff_t)-1);
- obj->madv = __I915_MADV_PURGED;
+ obj->mm.madv = __I915_MADV_PURGED;
}
/* Try to discard unwanted pages */
-static void
-i915_gem_object_invalidate(struct drm_i915_gem_object *obj)
+void __i915_gem_object_invalidate(struct drm_i915_gem_object *obj)
{
struct address_space *mapping;
- switch (obj->madv) {
+ lockdep_assert_held(&obj->mm.lock);
+ GEM_BUG_ON(obj->mm.pages);
+
+ switch (obj->mm.madv) {
case I915_MADV_DONTNEED:
i915_gem_object_truncate(obj);
case __I915_MADV_PURGED:
@@ -2130,85 +2211,119 @@ i915_gem_object_invalidate(struct drm_i915_gem_object *obj)
}
static void
-i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj)
+i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj,
+ struct sg_table *pages)
{
struct sgt_iter sgt_iter;
struct page *page;
- int ret;
-
- BUG_ON(obj->madv == __I915_MADV_PURGED);
- ret = i915_gem_object_set_to_cpu_domain(obj, true);
- if (WARN_ON(ret)) {
- /* In the event of a disaster, abandon all caches and
- * hope for the best.
- */
- i915_gem_clflush_object(obj, true);
- obj->base.read_domains = obj->base.write_domain = I915_GEM_DOMAIN_CPU;
- }
+ __i915_gem_object_release_shmem(obj, pages);
- i915_gem_gtt_finish_object(obj);
+ i915_gem_gtt_finish_pages(obj, pages);
if (i915_gem_object_needs_bit17_swizzle(obj))
- i915_gem_object_save_bit_17_swizzle(obj);
+ i915_gem_object_save_bit_17_swizzle(obj, pages);
- if (obj->madv == I915_MADV_DONTNEED)
- obj->dirty = 0;
-
- for_each_sgt_page(page, sgt_iter, obj->pages) {
- if (obj->dirty)
+ for_each_sgt_page(page, sgt_iter, pages) {
+ if (obj->mm.dirty)
set_page_dirty(page);
- if (obj->madv == I915_MADV_WILLNEED)
+ if (obj->mm.madv == I915_MADV_WILLNEED)
mark_page_accessed(page);
put_page(page);
}
- obj->dirty = 0;
+ obj->mm.dirty = false;
- sg_free_table(obj->pages);
- kfree(obj->pages);
+ sg_free_table(pages);
+ kfree(pages);
}
-int
-i915_gem_object_put_pages(struct drm_i915_gem_object *obj)
+static void __i915_gem_object_reset_page_iter(struct drm_i915_gem_object *obj)
{
- const struct drm_i915_gem_object_ops *ops = obj->ops;
+ struct radix_tree_iter iter;
+ void **slot;
- if (obj->pages == NULL)
- return 0;
+ radix_tree_for_each_slot(slot, &obj->mm.get_page.radix, &iter, 0)
+ radix_tree_delete(&obj->mm.get_page.radix, iter.index);
+}
- if (obj->pages_pin_count)
- return -EBUSY;
+void __i915_gem_object_put_pages(struct drm_i915_gem_object *obj,
+ enum i915_mm_subclass subclass)
+{
+ struct sg_table *pages;
+
+ if (i915_gem_object_has_pinned_pages(obj))
+ return;
GEM_BUG_ON(obj->bind_count);
+ if (!READ_ONCE(obj->mm.pages))
+ return;
+
+ /* May be called by shrinker from within get_pages() (on another bo) */
+ mutex_lock_nested(&obj->mm.lock, subclass);
+ if (unlikely(atomic_read(&obj->mm.pages_pin_count)))
+ goto unlock;
/* ->put_pages might need to allocate memory for the bit17 swizzle
* array, hence protect them from being reaped by removing them from gtt
* lists early. */
- list_del(&obj->global_list);
+ pages = fetch_and_zero(&obj->mm.pages);
+ GEM_BUG_ON(!pages);
- if (obj->mapping) {
+ if (obj->mm.mapping) {
void *ptr;
- ptr = ptr_mask_bits(obj->mapping);
+ ptr = ptr_mask_bits(obj->mm.mapping);
if (is_vmalloc_addr(ptr))
vunmap(ptr);
else
kunmap(kmap_to_page(ptr));
- obj->mapping = NULL;
+ obj->mm.mapping = NULL;
}
- ops->put_pages(obj);
- obj->pages = NULL;
+ __i915_gem_object_reset_page_iter(obj);
- i915_gem_object_invalidate(obj);
+ obj->ops->put_pages(obj, pages);
+unlock:
+ mutex_unlock(&obj->mm.lock);
+}
+static unsigned int swiotlb_max_size(void)
+{
+#if IS_ENABLED(CONFIG_SWIOTLB)
+ return rounddown(swiotlb_nr_tbl() << IO_TLB_SHIFT, PAGE_SIZE);
+#else
return 0;
+#endif
}
-static int
+static void i915_sg_trim(struct sg_table *orig_st)
+{
+ struct sg_table new_st;
+ struct scatterlist *sg, *new_sg;
+ unsigned int i;
+
+ if (orig_st->nents == orig_st->orig_nents)
+ return;
+
+ if (sg_alloc_table(&new_st, orig_st->nents, GFP_KERNEL))
+ return;
+
+ new_sg = new_st.sgl;
+ for_each_sg(orig_st->sgl, sg, orig_st->nents, i) {
+ sg_set_page(new_sg, sg_page(sg), sg->length, 0);
+ /* called before being DMA mapped, no need to copy sg->dma_* */
+ new_sg = sg_next(new_sg);
+ }
+
+ sg_free_table(orig_st);
+
+ *orig_st = new_st;
+}
+
+static struct sg_table *
i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
{
struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
@@ -2219,6 +2334,7 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
struct sgt_iter sgt_iter;
struct page *page;
unsigned long last_pfn = 0; /* suppress gcc warning */
+ unsigned int max_segment;
int ret;
gfp_t gfp;
@@ -2226,17 +2342,21 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
* wasn't in the GTT, there shouldn't be any way it could have been in
* a GPU cache
*/
- BUG_ON(obj->base.read_domains & I915_GEM_GPU_DOMAINS);
- BUG_ON(obj->base.write_domain & I915_GEM_GPU_DOMAINS);
+ GEM_BUG_ON(obj->base.read_domains & I915_GEM_GPU_DOMAINS);
+ GEM_BUG_ON(obj->base.write_domain & I915_GEM_GPU_DOMAINS);
+
+ max_segment = swiotlb_max_size();
+ if (!max_segment)
+ max_segment = rounddown(UINT_MAX, PAGE_SIZE);
st = kmalloc(sizeof(*st), GFP_KERNEL);
if (st == NULL)
- return -ENOMEM;
+ return ERR_PTR(-ENOMEM);
page_count = obj->base.size / PAGE_SIZE;
if (sg_alloc_table(st, page_count, GFP_KERNEL)) {
kfree(st);
- return -ENOMEM;
+ return ERR_PTR(-ENOMEM);
}
/* Get the list of pages out of our struct file. They'll be pinned
@@ -2264,22 +2384,15 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
* our own buffer, now let the real VM do its job and
* go down in flames if truly OOM.
*/
- i915_gem_shrink_all(dev_priv);
page = shmem_read_mapping_page(mapping, i);
if (IS_ERR(page)) {
ret = PTR_ERR(page);
- goto err_pages;
+ goto err_sg;
}
}
-#ifdef CONFIG_SWIOTLB
- if (swiotlb_nr_tbl()) {
- st->nents++;
- sg_set_page(sg, page, PAGE_SIZE, 0);
- sg = sg_next(sg);
- continue;
- }
-#endif
- if (!i || page_to_pfn(page) != last_pfn + 1) {
+ if (!i ||
+ sg->length >= max_segment ||
+ page_to_pfn(page) != last_pfn + 1) {
if (i)
sg = sg_next(sg);
st->nents++;
@@ -2292,27 +2405,24 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
/* Check that the i965g/gm workaround works. */
WARN_ON((gfp & __GFP_DMA32) && (last_pfn >= 0x00100000UL));
}
-#ifdef CONFIG_SWIOTLB
- if (!swiotlb_nr_tbl())
-#endif
+ if (sg) /* loop terminated early; short sg table */
sg_mark_end(sg);
- obj->pages = st;
- ret = i915_gem_gtt_prepare_object(obj);
+ /* Trim unused sg entries to avoid wasting memory. */
+ i915_sg_trim(st);
+
+ ret = i915_gem_gtt_prepare_pages(obj, st);
if (ret)
goto err_pages;
if (i915_gem_object_needs_bit17_swizzle(obj))
- i915_gem_object_do_bit_17_swizzle(obj);
+ i915_gem_object_do_bit_17_swizzle(obj, st);
- if (i915_gem_object_is_tiled(obj) &&
- dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES)
- i915_gem_object_pin_pages(obj);
+ return st;
- return 0;
-
-err_pages:
+err_sg:
sg_mark_end(sg);
+err_pages:
for_each_sgt_page(page, sgt_iter, st)
put_page(page);
sg_free_table(st);
@@ -2329,43 +2439,73 @@ err_pages:
if (ret == -ENOSPC)
ret = -ENOMEM;
- return ret;
+ return ERR_PTR(ret);
}
-/* Ensure that the associated pages are gathered from the backing storage
- * and pinned into our object. i915_gem_object_get_pages() may be called
- * multiple times before they are released by a single call to
- * i915_gem_object_put_pages() - once the pages are no longer referenced
- * either as a result of memory pressure (reaping pages under the shrinker)
- * or as the object is itself released.
- */
-int
-i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
+void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj,
+ struct sg_table *pages)
{
- struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
- const struct drm_i915_gem_object_ops *ops = obj->ops;
- int ret;
+ lockdep_assert_held(&obj->mm.lock);
- if (obj->pages)
- return 0;
+ obj->mm.get_page.sg_pos = pages->sgl;
+ obj->mm.get_page.sg_idx = 0;
- if (obj->madv != I915_MADV_WILLNEED) {
+ obj->mm.pages = pages;
+
+ if (i915_gem_object_is_tiled(obj) &&
+ to_i915(obj->base.dev)->quirks & QUIRK_PIN_SWIZZLED_PAGES) {
+ GEM_BUG_ON(obj->mm.quirked);
+ __i915_gem_object_pin_pages(obj);
+ obj->mm.quirked = true;
+ }
+}
+
+static int ____i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
+{
+ struct sg_table *pages;
+
+ GEM_BUG_ON(i915_gem_object_has_pinned_pages(obj));
+
+ if (unlikely(obj->mm.madv != I915_MADV_WILLNEED)) {
DRM_DEBUG("Attempting to obtain a purgeable object\n");
return -EFAULT;
}
- BUG_ON(obj->pages_pin_count);
+ pages = obj->ops->get_pages(obj);
+ if (unlikely(IS_ERR(pages)))
+ return PTR_ERR(pages);
- ret = ops->get_pages(obj);
- if (ret)
- return ret;
+ __i915_gem_object_set_pages(obj, pages);
+ return 0;
+}
- list_add_tail(&obj->global_list, &dev_priv->mm.unbound_list);
+/* Ensure that the associated pages are gathered from the backing storage
+ * and pinned into our object. i915_gem_object_pin_pages() may be called
+ * multiple times before they are released by a single call to
+ * i915_gem_object_unpin_pages() - once the pages are no longer referenced
+ * either as a result of memory pressure (reaping pages under the shrinker)
+ * or as the object is itself released.
+ */
+int __i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
+{
+ int err;
- obj->get_page.sg = obj->pages->sgl;
- obj->get_page.last = 0;
+ err = mutex_lock_interruptible(&obj->mm.lock);
+ if (err)
+ return err;
- return 0;
+ if (unlikely(!obj->mm.pages)) {
+ err = ____i915_gem_object_get_pages(obj);
+ if (err)
+ goto unlock;
+
+ smp_mb__before_atomic();
+ }
+ atomic_inc(&obj->mm.pages_pin_count);
+
+unlock:
+ mutex_unlock(&obj->mm.lock);
+ return err;
}
/* The 'mapping' part of i915_gem_object_pin_map() below */
@@ -2373,7 +2513,7 @@ static void *i915_gem_object_map(const struct drm_i915_gem_object *obj,
enum i915_map_type type)
{
unsigned long n_pages = obj->base.size >> PAGE_SHIFT;
- struct sg_table *sgt = obj->pages;
+ struct sg_table *sgt = obj->mm.pages;
struct sgt_iter sgt_iter;
struct page *page;
struct page *stack_pages[32];
@@ -2424,21 +2564,31 @@ void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj,
void *ptr;
int ret;
- lockdep_assert_held(&obj->base.dev->struct_mutex);
GEM_BUG_ON(!i915_gem_object_has_struct_page(obj));
- ret = i915_gem_object_get_pages(obj);
+ ret = mutex_lock_interruptible(&obj->mm.lock);
if (ret)
return ERR_PTR(ret);
- i915_gem_object_pin_pages(obj);
- pinned = obj->pages_pin_count > 1;
+ pinned = true;
+ if (!atomic_inc_not_zero(&obj->mm.pages_pin_count)) {
+ if (unlikely(!obj->mm.pages)) {
+ ret = ____i915_gem_object_get_pages(obj);
+ if (ret)
+ goto err_unlock;
+
+ smp_mb__before_atomic();
+ }
+ atomic_inc(&obj->mm.pages_pin_count);
+ pinned = false;
+ }
+ GEM_BUG_ON(!obj->mm.pages);
- ptr = ptr_unpack_bits(obj->mapping, has_type);
+ ptr = ptr_unpack_bits(obj->mm.mapping, has_type);
if (ptr && has_type != type) {
if (pinned) {
ret = -EBUSY;
- goto err;
+ goto err_unpin;
}
if (is_vmalloc_addr(ptr))
@@ -2446,59 +2596,28 @@ void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj,
else
kunmap(kmap_to_page(ptr));
- ptr = obj->mapping = NULL;
+ ptr = obj->mm.mapping = NULL;
}
if (!ptr) {
ptr = i915_gem_object_map(obj, type);
if (!ptr) {
ret = -ENOMEM;
- goto err;
+ goto err_unpin;
}
- obj->mapping = ptr_pack_bits(ptr, type);
+ obj->mm.mapping = ptr_pack_bits(ptr, type);
}
+out_unlock:
+ mutex_unlock(&obj->mm.lock);
return ptr;
-err:
- i915_gem_object_unpin_pages(obj);
- return ERR_PTR(ret);
-}
-
-static void
-i915_gem_object_retire__write(struct i915_gem_active *active,
- struct drm_i915_gem_request *request)
-{
- struct drm_i915_gem_object *obj =
- container_of(active, struct drm_i915_gem_object, last_write);
-
- intel_fb_obj_flush(obj, true, ORIGIN_CS);
-}
-
-static void
-i915_gem_object_retire__read(struct i915_gem_active *active,
- struct drm_i915_gem_request *request)
-{
- int idx = request->engine->id;
- struct drm_i915_gem_object *obj =
- container_of(active, struct drm_i915_gem_object, last_read[idx]);
-
- GEM_BUG_ON(!i915_gem_object_has_active_engine(obj, idx));
-
- i915_gem_object_clear_active(obj, idx);
- if (i915_gem_object_is_active(obj))
- return;
-
- /* Bump our place on the bound list to keep it roughly in LRU order
- * so that we don't steal from recently used but inactive objects
- * (unless we are forced to ofc!)
- */
- if (obj->bind_count)
- list_move_tail(&obj->global_list,
- &request->i915->mm.bound_list);
-
- i915_gem_object_put(obj);
+err_unpin:
+ atomic_dec(&obj->mm.pages_pin_count);
+err_unlock:
+ ptr = ERR_PTR(ret);
+ goto out_unlock;
}
static bool i915_context_is_banned(const struct i915_gem_context *ctx)
@@ -2545,13 +2664,10 @@ i915_gem_find_active_request(struct intel_engine_cs *engine)
* extra delay for a recent interrupt is pointless. Hence, we do
* not need an engine->irq_seqno_barrier() before the seqno reads.
*/
- list_for_each_entry(request, &engine->request_list, link) {
- if (i915_gem_request_completed(request))
+ list_for_each_entry(request, &engine->timeline->requests, link) {
+ if (__i915_gem_request_completed(request))
continue;
- if (!i915_sw_fence_done(&request->submit))
- break;
-
return request;
}
@@ -2579,10 +2695,9 @@ static void i915_gem_reset_engine(struct intel_engine_cs *engine)
{
struct drm_i915_gem_request *request;
struct i915_gem_context *incomplete_ctx;
+ struct intel_timeline *timeline;
bool ring_hung;
- /* Ensure irq handler finishes, and not run again. */
- tasklet_kill(&engine->irq_tasklet);
if (engine->irq_seqno_barrier)
engine->irq_seqno_barrier(engine);
@@ -2591,12 +2706,15 @@ static void i915_gem_reset_engine(struct intel_engine_cs *engine)
return;
ring_hung = engine->hangcheck.score >= HANGCHECK_SCORE_RING_HUNG;
+ if (engine->hangcheck.seqno != intel_engine_get_seqno(engine))
+ ring_hung = false;
+
i915_set_reset_status(request->ctx, ring_hung);
if (!ring_hung)
return;
DRM_DEBUG_DRIVER("resetting %s to restart from tail of request 0x%x\n",
- engine->name, request->fence.seqno);
+ engine->name, request->global_seqno);
/* Setup the CS to resume from the breadcrumb of the hung request */
engine->reset_hw(engine, request);
@@ -2613,21 +2731,28 @@ static void i915_gem_reset_engine(struct intel_engine_cs *engine)
if (i915_gem_context_is_default(incomplete_ctx))
return;
- list_for_each_entry_continue(request, &engine->request_list, link)
+ list_for_each_entry_continue(request, &engine->timeline->requests, link)
if (request->ctx == incomplete_ctx)
reset_request(request);
+
+ timeline = i915_gem_context_lookup_timeline(incomplete_ctx, engine);
+ list_for_each_entry(request, &timeline->requests, link)
+ reset_request(request);
}
void i915_gem_reset(struct drm_i915_private *dev_priv)
{
struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+
+ lockdep_assert_held(&dev_priv->drm.struct_mutex);
i915_gem_retire_requests(dev_priv);
- for_each_engine(engine, dev_priv)
+ for_each_engine(engine, dev_priv, id)
i915_gem_reset_engine(engine);
- i915_gem_restore_fences(&dev_priv->drm);
+ i915_gem_restore_fences(dev_priv);
if (dev_priv->gt.awake) {
intel_sanitize_gt_powersave(dev_priv);
@@ -2649,7 +2774,8 @@ static void i915_gem_cleanup_engine(struct intel_engine_cs *engine)
* (lockless) lookup doesn't try and wait upon the request as we
* reset it.
*/
- intel_engine_init_seqno(engine, engine->last_submitted_seqno);
+ intel_engine_init_global_seqno(engine,
+ intel_engine_last_submit(engine));
/*
* Clear the execlists queue up before freeing the requests, as those
@@ -2658,26 +2784,30 @@ static void i915_gem_cleanup_engine(struct intel_engine_cs *engine)
*/
if (i915.enable_execlists) {
- spin_lock(&engine->execlist_lock);
- INIT_LIST_HEAD(&engine->execlist_queue);
+ unsigned long flags;
+
+ spin_lock_irqsave(&engine->timeline->lock, flags);
+
i915_gem_request_put(engine->execlist_port[0].request);
i915_gem_request_put(engine->execlist_port[1].request);
memset(engine->execlist_port, 0, sizeof(engine->execlist_port));
- spin_unlock(&engine->execlist_lock);
- }
+ engine->execlist_queue = RB_ROOT;
+ engine->execlist_first = NULL;
- engine->i915->gt.active_engines &= ~intel_engine_flag(engine);
+ spin_unlock_irqrestore(&engine->timeline->lock, flags);
+ }
}
void i915_gem_set_wedged(struct drm_i915_private *dev_priv)
{
struct intel_engine_cs *engine;
+ enum intel_engine_id id;
lockdep_assert_held(&dev_priv->drm.struct_mutex);
set_bit(I915_WEDGED, &dev_priv->gpu_error.flags);
i915_gem_context_lost(dev_priv);
- for_each_engine(engine, dev_priv)
+ for_each_engine(engine, dev_priv, id)
i915_gem_cleanup_engine(engine);
mod_delayed_work(dev_priv->wq, &dev_priv->gt.idle_work, 0);
@@ -2716,12 +2846,20 @@ i915_gem_idle_work_handler(struct work_struct *work)
container_of(work, typeof(*dev_priv), gt.idle_work.work);
struct drm_device *dev = &dev_priv->drm;
struct intel_engine_cs *engine;
+ enum intel_engine_id id;
bool rearm_hangcheck;
if (!READ_ONCE(dev_priv->gt.awake))
return;
- if (READ_ONCE(dev_priv->gt.active_engines))
+ /*
+ * Wait for last execlists context complete, but bail out in case a
+ * new request is submitted.
+ */
+ wait_for(READ_ONCE(dev_priv->gt.active_requests) ||
+ intel_execlists_idle(dev_priv), 10);
+
+ if (READ_ONCE(dev_priv->gt.active_requests))
return;
rearm_hangcheck =
@@ -2735,10 +2873,20 @@ i915_gem_idle_work_handler(struct work_struct *work)
goto out_rearm;
}
- if (dev_priv->gt.active_engines)
+ /*
+ * New request retired after this work handler started, extend active
+ * period until next instance of the work.
+ */
+ if (work_pending(work))
+ goto out_unlock;
+
+ if (dev_priv->gt.active_requests)
goto out_unlock;
- for_each_engine(engine, dev_priv)
+ if (wait_for(intel_execlists_idle(dev_priv), 10))
+ DRM_ERROR("Timeout waiting for engines to idle\n");
+
+ for_each_engine(engine, dev_priv, id)
i915_gem_batch_pool_fini(&engine->batch_pool);
GEM_BUG_ON(!dev_priv->gt.awake);
@@ -2768,9 +2916,26 @@ void i915_gem_close_object(struct drm_gem_object *gem, struct drm_file *file)
list_for_each_entry_safe(vma, vn, &obj->vma_list, obj_link)
if (vma->vm->file == fpriv)
i915_vma_close(vma);
+
+ if (i915_gem_object_is_active(obj) &&
+ !i915_gem_object_has_active_reference(obj)) {
+ i915_gem_object_set_active_reference(obj);
+ i915_gem_object_get(obj);
+ }
mutex_unlock(&obj->base.dev->struct_mutex);
}
+static unsigned long to_wait_timeout(s64 timeout_ns)
+{
+ if (timeout_ns < 0)
+ return MAX_SCHEDULE_TIMEOUT;
+
+ if (timeout_ns == 0)
+ return 0;
+
+ return nsecs_to_jiffies_timeout(timeout_ns);
+}
+
/**
* i915_gem_wait_ioctl - implements DRM_IOCTL_I915_GEM_WAIT
* @dev: drm device pointer
@@ -2799,10 +2964,9 @@ int
i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
{
struct drm_i915_gem_wait *args = data;
- struct intel_rps_client *rps = to_rps_client(file);
struct drm_i915_gem_object *obj;
- unsigned long active;
- int idx, ret = 0;
+ ktime_t start;
+ long ret;
if (args->flags != 0)
return -EINVAL;
@@ -2811,133 +2975,29 @@ i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
if (!obj)
return -ENOENT;
- active = __I915_BO_ACTIVE(obj);
- for_each_active(active, idx) {
- s64 *timeout = args->timeout_ns >= 0 ? &args->timeout_ns : NULL;
- ret = i915_gem_active_wait_unlocked(&obj->last_read[idx],
- I915_WAIT_INTERRUPTIBLE,
- timeout, rps);
- if (ret)
- break;
- }
-
- i915_gem_object_put_unlocked(obj);
- return ret;
-}
-
-static void __i915_vma_iounmap(struct i915_vma *vma)
-{
- GEM_BUG_ON(i915_vma_is_pinned(vma));
-
- if (vma->iomap == NULL)
- return;
-
- io_mapping_unmap(vma->iomap);
- vma->iomap = NULL;
-}
-
-int i915_vma_unbind(struct i915_vma *vma)
-{
- struct drm_i915_gem_object *obj = vma->obj;
- unsigned long active;
- int ret;
-
- /* First wait upon any activity as retiring the request may
- * have side-effects such as unpinning or even unbinding this vma.
- */
- active = i915_vma_get_active(vma);
- if (active) {
- int idx;
-
- /* When a closed VMA is retired, it is unbound - eek.
- * In order to prevent it from being recursively closed,
- * take a pin on the vma so that the second unbind is
- * aborted.
- */
- __i915_vma_pin(vma);
+ start = ktime_get();
- for_each_active(active, idx) {
- ret = i915_gem_active_retire(&vma->last_read[idx],
- &vma->vm->dev->struct_mutex);
- if (ret)
- break;
- }
+ ret = i915_gem_object_wait(obj,
+ I915_WAIT_INTERRUPTIBLE | I915_WAIT_ALL,
+ to_wait_timeout(args->timeout_ns),
+ to_rps_client(file));
- __i915_vma_unpin(vma);
- if (ret)
- return ret;
-
- GEM_BUG_ON(i915_vma_is_active(vma));
+ if (args->timeout_ns > 0) {
+ args->timeout_ns -= ktime_to_ns(ktime_sub(ktime_get(), start));
+ if (args->timeout_ns < 0)
+ args->timeout_ns = 0;
}
- if (i915_vma_is_pinned(vma))
- return -EBUSY;
-
- if (!drm_mm_node_allocated(&vma->node))
- goto destroy;
-
- GEM_BUG_ON(obj->bind_count == 0);
- GEM_BUG_ON(!obj->pages);
-
- if (i915_vma_is_map_and_fenceable(vma)) {
- /* release the fence reg _after_ flushing */
- ret = i915_vma_put_fence(vma);
- if (ret)
- return ret;
-
- /* Force a pagefault for domain tracking on next user access */
- i915_gem_release_mmap(obj);
-
- __i915_vma_iounmap(vma);
- vma->flags &= ~I915_VMA_CAN_FENCE;
- }
-
- if (likely(!vma->vm->closed)) {
- trace_i915_vma_unbind(vma);
- vma->vm->unbind_vma(vma);
- }
- vma->flags &= ~(I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND);
-
- drm_mm_remove_node(&vma->node);
- list_move_tail(&vma->vm_link, &vma->vm->unbound_list);
-
- if (vma->pages != obj->pages) {
- GEM_BUG_ON(!vma->pages);
- sg_free_table(vma->pages);
- kfree(vma->pages);
- }
- vma->pages = NULL;
-
- /* Since the unbound list is global, only move to that list if
- * no more VMAs exist. */
- if (--obj->bind_count == 0)
- list_move_tail(&obj->global_list,
- &to_i915(obj->base.dev)->mm.unbound_list);
-
- /* And finally now the object is completely decoupled from this vma,
- * we can drop its hold on the backing storage and allow it to be
- * reaped by the shrinker.
- */
- i915_gem_object_unpin_pages(obj);
-
-destroy:
- if (unlikely(i915_vma_is_closed(vma)))
- i915_vma_destroy(vma);
-
- return 0;
+ i915_gem_object_put(obj);
+ return ret;
}
-int i915_gem_wait_for_idle(struct drm_i915_private *dev_priv,
- unsigned int flags)
+static int wait_for_timeline(struct i915_gem_timeline *tl, unsigned int flags)
{
- struct intel_engine_cs *engine;
- int ret;
+ int ret, i;
- for_each_engine(engine, dev_priv) {
- if (engine->last_context == NULL)
- continue;
-
- ret = intel_engine_idle(engine, flags);
+ for (i = 0; i < ARRAY_SIZE(tl->engine); i++) {
+ ret = i915_gem_active_wait(&tl->engine[i].last_request, flags);
if (ret)
return ret;
}
@@ -2945,187 +3005,45 @@ int i915_gem_wait_for_idle(struct drm_i915_private *dev_priv,
return 0;
}
-static bool i915_gem_valid_gtt_space(struct i915_vma *vma,
- unsigned long cache_level)
+int i915_gem_wait_for_idle(struct drm_i915_private *i915, unsigned int flags)
{
- struct drm_mm_node *gtt_space = &vma->node;
- struct drm_mm_node *other;
-
- /*
- * On some machines we have to be careful when putting differing types
- * of snoopable memory together to avoid the prefetcher crossing memory
- * domains and dying. During vm initialisation, we decide whether or not
- * these constraints apply and set the drm_mm.color_adjust
- * appropriately.
- */
- if (vma->vm->mm.color_adjust == NULL)
- return true;
-
- if (!drm_mm_node_allocated(gtt_space))
- return true;
-
- if (list_empty(&gtt_space->node_list))
- return true;
-
- other = list_entry(gtt_space->node_list.prev, struct drm_mm_node, node_list);
- if (other->allocated && !other->hole_follows && other->color != cache_level)
- return false;
-
- other = list_entry(gtt_space->node_list.next, struct drm_mm_node, node_list);
- if (other->allocated && !gtt_space->hole_follows && other->color != cache_level)
- return false;
-
- return true;
-}
-
-/**
- * i915_vma_insert - finds a slot for the vma in its address space
- * @vma: the vma
- * @size: requested size in bytes (can be larger than the VMA)
- * @alignment: required alignment
- * @flags: mask of PIN_* flags to use
- *
- * First we try to allocate some free space that meets the requirements for
- * the VMA. Failiing that, if the flags permit, it will evict an old VMA,
- * preferrably the oldest idle entry to make room for the new VMA.
- *
- * Returns:
- * 0 on success, negative error code otherwise.
- */
-static int
-i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
-{
- struct drm_i915_private *dev_priv = to_i915(vma->vm->dev);
- struct drm_i915_gem_object *obj = vma->obj;
- u64 start, end;
int ret;
- GEM_BUG_ON(vma->flags & (I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND));
- GEM_BUG_ON(drm_mm_node_allocated(&vma->node));
-
- size = max(size, vma->size);
- if (flags & PIN_MAPPABLE)
- size = i915_gem_get_ggtt_size(dev_priv, size,
- i915_gem_object_get_tiling(obj));
-
- alignment = max(max(alignment, vma->display_alignment),
- i915_gem_get_ggtt_alignment(dev_priv, size,
- i915_gem_object_get_tiling(obj),
- flags & PIN_MAPPABLE));
-
- start = flags & PIN_OFFSET_BIAS ? flags & PIN_OFFSET_MASK : 0;
-
- end = vma->vm->total;
- if (flags & PIN_MAPPABLE)
- end = min_t(u64, end, dev_priv->ggtt.mappable_end);
- if (flags & PIN_ZONE_4G)
- end = min_t(u64, end, (1ULL << 32) - PAGE_SIZE);
-
- /* If binding the object/GGTT view requires more space than the entire
- * aperture has, reject it early before evicting everything in a vain
- * attempt to find space.
- */
- if (size > end) {
- DRM_DEBUG("Attempting to bind an object larger than the aperture: request=%llu [object=%zd] > %s aperture=%llu\n",
- size, obj->base.size,
- flags & PIN_MAPPABLE ? "mappable" : "total",
- end);
- return -E2BIG;
- }
-
- ret = i915_gem_object_get_pages(obj);
- if (ret)
- return ret;
-
- i915_gem_object_pin_pages(obj);
+ if (flags & I915_WAIT_LOCKED) {
+ struct i915_gem_timeline *tl;
- if (flags & PIN_OFFSET_FIXED) {
- u64 offset = flags & PIN_OFFSET_MASK;
- if (offset & (alignment - 1) || offset > end - size) {
- ret = -EINVAL;
- goto err_unpin;
- }
+ lockdep_assert_held(&i915->drm.struct_mutex);
- vma->node.start = offset;
- vma->node.size = size;
- vma->node.color = obj->cache_level;
- ret = drm_mm_reserve_node(&vma->vm->mm, &vma->node);
- if (ret) {
- ret = i915_gem_evict_for_vma(vma);
- if (ret == 0)
- ret = drm_mm_reserve_node(&vma->vm->mm, &vma->node);
+ list_for_each_entry(tl, &i915->gt.timelines, link) {
+ ret = wait_for_timeline(tl, flags);
if (ret)
- goto err_unpin;
+ return ret;
}
} else {
- u32 search_flag, alloc_flag;
-
- if (flags & PIN_HIGH) {
- search_flag = DRM_MM_SEARCH_BELOW;
- alloc_flag = DRM_MM_CREATE_TOP;
- } else {
- search_flag = DRM_MM_SEARCH_DEFAULT;
- alloc_flag = DRM_MM_CREATE_DEFAULT;
- }
-
- /* We only allocate in PAGE_SIZE/GTT_PAGE_SIZE (4096) chunks,
- * so we know that we always have a minimum alignment of 4096.
- * The drm_mm range manager is optimised to return results
- * with zero alignment, so where possible use the optimal
- * path.
- */
- if (alignment <= 4096)
- alignment = 0;
-
-search_free:
- ret = drm_mm_insert_node_in_range_generic(&vma->vm->mm,
- &vma->node,
- size, alignment,
- obj->cache_level,
- start, end,
- search_flag,
- alloc_flag);
- if (ret) {
- ret = i915_gem_evict_something(vma->vm, size, alignment,
- obj->cache_level,
- start, end,
- flags);
- if (ret == 0)
- goto search_free;
-
- goto err_unpin;
- }
+ ret = wait_for_timeline(&i915->gt.global_timeline, flags);
+ if (ret)
+ return ret;
}
- GEM_BUG_ON(!i915_gem_valid_gtt_space(vma, obj->cache_level));
-
- list_move_tail(&obj->global_list, &dev_priv->mm.bound_list);
- list_move_tail(&vma->vm_link, &vma->vm->inactive_list);
- obj->bind_count++;
return 0;
-
-err_unpin:
- i915_gem_object_unpin_pages(obj);
- return ret;
}
-bool
-i915_gem_clflush_object(struct drm_i915_gem_object *obj,
- bool force)
+void i915_gem_clflush_object(struct drm_i915_gem_object *obj,
+ bool force)
{
/* If we don't have a page list set up, then we're not pinned
* to GPU, and we can ignore the cache flush because it'll happen
* again at bind time.
*/
- if (obj->pages == NULL)
- return false;
+ if (!obj->mm.pages)
+ return;
/*
* Stolen memory is always coherent with the GPU as it is explicitly
* marked as wc by the system, or the system is cache-coherent.
*/
if (obj->stolen || obj->phys_handle)
- return false;
+ return;
/* If the GPU is snooping the contents of the CPU cache,
* we do not need to manually clear the CPU cache lines. However,
@@ -3137,14 +3055,12 @@ i915_gem_clflush_object(struct drm_i915_gem_object *obj,
*/
if (!force && cpu_cache_is_coherent(obj->base.dev, obj->cache_level)) {
obj->cache_dirty = true;
- return false;
+ return;
}
trace_i915_gem_object_clflush(obj);
- drm_clflush_sg(obj->pages);
+ drm_clflush_sg(obj->mm.pages);
obj->cache_dirty = false;
-
- return true;
}
/** Flushes the GTT write domain for the object if it's dirty. */
@@ -3173,7 +3089,7 @@ i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj)
*/
wmb();
if (INTEL_GEN(dev_priv) >= 6 && !HAS_LLC(dev_priv))
- POSTING_READ(RING_ACTHD(dev_priv->engine[RCS].mmio_base));
+ POSTING_READ(RING_ACTHD(dev_priv->engine[RCS]->mmio_base));
intel_fb_obj_flush(obj, false, write_origin(obj, I915_GEM_DOMAIN_GTT));
@@ -3190,9 +3106,7 @@ i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj)
if (obj->base.write_domain != I915_GEM_DOMAIN_CPU)
return;
- if (i915_gem_clflush_object(obj, obj->pin_display))
- i915_gem_chipset_flush(to_i915(obj->base.dev));
-
+ i915_gem_clflush_object(obj, obj->pin_display);
intel_fb_obj_flush(obj, false, ORIGIN_CPU);
obj->base.write_domain = 0;
@@ -3201,24 +3115,6 @@ i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj)
I915_GEM_DOMAIN_CPU);
}
-static void i915_gem_object_bump_inactive_ggtt(struct drm_i915_gem_object *obj)
-{
- struct i915_vma *vma;
-
- list_for_each_entry(vma, &obj->vma_list, obj_link) {
- if (!i915_vma_is_ggtt(vma))
- continue;
-
- if (i915_vma_is_active(vma))
- continue;
-
- if (!drm_mm_node_allocated(&vma->node))
- continue;
-
- list_move_tail(&vma->vm_link, &vma->vm->inactive_list);
- }
-}
-
/**
* Moves a single object to the GTT read, and possibly write domain.
* @obj: object to act on
@@ -3233,7 +3129,14 @@ i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
uint32_t old_write_domain, old_read_domains;
int ret;
- ret = i915_gem_object_wait_rendering(obj, !write);
+ lockdep_assert_held(&obj->base.dev->struct_mutex);
+
+ ret = i915_gem_object_wait(obj,
+ I915_WAIT_INTERRUPTIBLE |
+ I915_WAIT_LOCKED |
+ (write ? I915_WAIT_ALL : 0),
+ MAX_SCHEDULE_TIMEOUT,
+ NULL);
if (ret)
return ret;
@@ -3248,7 +3151,7 @@ i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
* continue to assume that the obj remained out of the CPU cached
* domain.
*/
- ret = i915_gem_object_get_pages(obj);
+ ret = i915_gem_object_pin_pages(obj);
if (ret)
return ret;
@@ -3267,21 +3170,19 @@ i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
/* It should now be out of any other write domains, and we can update
* the domain values for our changes.
*/
- BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_GTT) != 0);
+ GEM_BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_GTT) != 0);
obj->base.read_domains |= I915_GEM_DOMAIN_GTT;
if (write) {
obj->base.read_domains = I915_GEM_DOMAIN_GTT;
obj->base.write_domain = I915_GEM_DOMAIN_GTT;
- obj->dirty = 1;
+ obj->mm.dirty = true;
}
trace_i915_gem_object_change_domain(obj,
old_read_domains,
old_write_domain);
- /* And bump the LRU for this access */
- i915_gem_object_bump_inactive_ggtt(obj);
-
+ i915_gem_object_unpin_pages(obj);
return 0;
}
@@ -3304,10 +3205,12 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
enum i915_cache_level cache_level)
{
struct i915_vma *vma;
- int ret = 0;
+ int ret;
+
+ lockdep_assert_held(&obj->base.dev->struct_mutex);
if (obj->cache_level == cache_level)
- goto out;
+ return 0;
/* Inspect the list of currently bound VMA and unbind any that would
* be invalid given the new cache-level. This is principally to
@@ -3350,11 +3253,17 @@ restart:
* If we wait upon the object, we know that all the bound
* VMA are no longer active.
*/
- ret = i915_gem_object_wait_rendering(obj, false);
+ ret = i915_gem_object_wait(obj,
+ I915_WAIT_INTERRUPTIBLE |
+ I915_WAIT_LOCKED |
+ I915_WAIT_ALL,
+ MAX_SCHEDULE_TIMEOUT,
+ NULL);
if (ret)
return ret;
- if (!HAS_LLC(obj->base.dev) && cache_level != I915_CACHE_NONE) {
+ if (!HAS_LLC(to_i915(obj->base.dev)) &&
+ cache_level != I915_CACHE_NONE) {
/* Access to snoopable pages through the GTT is
* incoherent and on some machines causes a hard
* lockup. Relinquish the CPU mmaping to force
@@ -3396,20 +3305,14 @@ restart:
}
}
+ if (obj->base.write_domain == I915_GEM_DOMAIN_CPU &&
+ cpu_cache_is_coherent(obj->base.dev, obj->cache_level))
+ obj->cache_dirty = true;
+
list_for_each_entry(vma, &obj->vma_list, obj_link)
vma->node.color = cache_level;
obj->cache_level = cache_level;
-out:
- /* Flush the dirty CPU caches to the backing storage so that the
- * object is now coherent at its new cache level (with respect
- * to the access domain).
- */
- if (obj->cache_dirty && cpu_write_needs_clflush(obj)) {
- if (i915_gem_clflush_object(obj, true))
- i915_gem_chipset_flush(to_i915(obj->base.dev));
- }
-
return 0;
}
@@ -3418,10 +3321,14 @@ int i915_gem_get_caching_ioctl(struct drm_device *dev, void *data,
{
struct drm_i915_gem_caching *args = data;
struct drm_i915_gem_object *obj;
+ int err = 0;
- obj = i915_gem_object_lookup(file, args->handle);
- if (!obj)
- return -ENOENT;
+ rcu_read_lock();
+ obj = i915_gem_object_lookup_rcu(file, args->handle);
+ if (!obj) {
+ err = -ENOENT;
+ goto out;
+ }
switch (obj->cache_level) {
case I915_CACHE_LLC:
@@ -3437,15 +3344,15 @@ int i915_gem_get_caching_ioctl(struct drm_device *dev, void *data,
args->caching = I915_CACHING_NONE;
break;
}
-
- i915_gem_object_put_unlocked(obj);
- return 0;
+out:
+ rcu_read_unlock();
+ return err;
}
int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data,
struct drm_file *file)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *i915 = to_i915(dev);
struct drm_i915_gem_caching *args = data;
struct drm_i915_gem_object *obj;
enum i915_cache_level level;
@@ -3462,23 +3369,21 @@ int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data,
* cacheline, whereas normally such cachelines would get
* invalidated.
*/
- if (!HAS_LLC(dev) && !HAS_SNOOP(dev))
+ if (!HAS_LLC(i915) && !HAS_SNOOP(i915))
return -ENODEV;
level = I915_CACHE_LLC;
break;
case I915_CACHING_DISPLAY:
- level = HAS_WT(dev) ? I915_CACHE_WT : I915_CACHE_NONE;
+ level = HAS_WT(i915) ? I915_CACHE_WT : I915_CACHE_NONE;
break;
default:
return -EINVAL;
}
- intel_runtime_pm_get(dev_priv);
-
ret = i915_mutex_lock_interruptible(dev);
if (ret)
- goto rpm_put;
+ return ret;
obj = i915_gem_object_lookup(file, args->handle);
if (!obj) {
@@ -3487,13 +3392,9 @@ int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data,
}
ret = i915_gem_object_set_cache_level(obj, level);
-
i915_gem_object_put(obj);
unlock:
mutex_unlock(&dev->struct_mutex);
-rpm_put:
- intel_runtime_pm_put(dev_priv);
-
return ret;
}
@@ -3511,6 +3412,8 @@ i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
u32 old_read_domains, old_write_domain;
int ret;
+ lockdep_assert_held(&obj->base.dev->struct_mutex);
+
/* Mark the pin_display early so that we account for the
* display coherency whilst setting up the cache domains.
*/
@@ -3526,7 +3429,8 @@ i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
* with that bit in the PTE to main memory with just one PIPE_CONTROL.
*/
ret = i915_gem_object_set_cache_level(obj,
- HAS_WT(obj->base.dev) ? I915_CACHE_WT : I915_CACHE_NONE);
+ HAS_WT(to_i915(obj->base.dev)) ?
+ I915_CACHE_WT : I915_CACHE_NONE);
if (ret) {
vma = ERR_PTR(ret);
goto err_unpin_display;
@@ -3543,16 +3447,32 @@ i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
if (view->type == I915_GGTT_VIEW_NORMAL)
vma = i915_gem_object_ggtt_pin(obj, view, 0, alignment,
PIN_MAPPABLE | PIN_NONBLOCK);
- if (IS_ERR(vma))
- vma = i915_gem_object_ggtt_pin(obj, view, 0, alignment, 0);
+ if (IS_ERR(vma)) {
+ struct drm_i915_private *i915 = to_i915(obj->base.dev);
+ unsigned int flags;
+
+ /* Valleyview is definitely limited to scanning out the first
+ * 512MiB. Lets presume this behaviour was inherited from the
+ * g4x display engine and that all earlier gen are similarly
+ * limited. Testing suggests that it is a little more
+ * complicated than this. For example, Cherryview appears quite
+ * happy to scanout from anywhere within its global aperture.
+ */
+ flags = 0;
+ if (HAS_GMCH_DISPLAY(i915))
+ flags = PIN_MAPPABLE;
+ vma = i915_gem_object_ggtt_pin(obj, view, 0, alignment, flags);
+ }
if (IS_ERR(vma))
goto err_unpin_display;
vma->display_alignment = max_t(u64, vma->display_alignment, alignment);
- WARN_ON(obj->pin_display > i915_vma_pin_count(vma));
-
- i915_gem_object_flush_cpu_write_domain(obj);
+ /* Treat this as an end-of-frame, like intel_user_framebuffer_dirty() */
+ if (obj->cache_dirty) {
+ i915_gem_clflush_object(obj, true);
+ intel_fb_obj_flush(obj, false, ORIGIN_DIRTYFB);
+ }
old_write_domain = obj->base.write_domain;
old_read_domains = obj->base.read_domains;
@@ -3577,6 +3497,8 @@ err_unpin_display:
void
i915_gem_object_unpin_from_display_plane(struct i915_vma *vma)
{
+ lockdep_assert_held(&vma->vm->dev->struct_mutex);
+
if (WARN_ON(vma->obj->pin_display == 0))
return;
@@ -3588,7 +3510,6 @@ i915_gem_object_unpin_from_display_plane(struct i915_vma *vma)
list_move_tail(&vma->vm_link, &vma->vm->inactive_list);
i915_vma_unpin(vma);
- WARN_ON(vma->obj->pin_display > i915_vma_pin_count(vma));
}
/**
@@ -3605,7 +3526,14 @@ i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write)
uint32_t old_write_domain, old_read_domains;
int ret;
- ret = i915_gem_object_wait_rendering(obj, !write);
+ lockdep_assert_held(&obj->base.dev->struct_mutex);
+
+ ret = i915_gem_object_wait(obj,
+ I915_WAIT_INTERRUPTIBLE |
+ I915_WAIT_LOCKED |
+ (write ? I915_WAIT_ALL : 0),
+ MAX_SCHEDULE_TIMEOUT,
+ NULL);
if (ret)
return ret;
@@ -3627,7 +3555,7 @@ i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write)
/* It should now be out of any other write domains, and we can update
* the domain values for our changes.
*/
- BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_CPU) != 0);
+ GEM_BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_CPU) != 0);
/* If we're writing through the CPU, then the GPU read domains will
* need to be invalidated at next use.
@@ -3661,11 +3589,7 @@ i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
struct drm_i915_file_private *file_priv = file->driver_priv;
unsigned long recent_enough = jiffies - DRM_I915_THROTTLE_JIFFIES;
struct drm_i915_gem_request *request, *target = NULL;
- int ret;
-
- ret = i915_gem_wait_for_error(&dev_priv->gpu_error);
- if (ret)
- return ret;
+ long ret;
/* ABI: return -EIO if already wedged */
if (i915_terminally_wedged(&dev_priv->gpu_error))
@@ -3692,98 +3616,12 @@ i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
if (target == NULL)
return 0;
- ret = i915_wait_request(target, I915_WAIT_INTERRUPTIBLE, NULL, NULL);
+ ret = i915_wait_request(target,
+ I915_WAIT_INTERRUPTIBLE,
+ MAX_SCHEDULE_TIMEOUT);
i915_gem_request_put(target);
- return ret;
-}
-
-static bool
-i915_vma_misplaced(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
-{
- if (!drm_mm_node_allocated(&vma->node))
- return false;
-
- if (vma->node.size < size)
- return true;
-
- if (alignment && vma->node.start & (alignment - 1))
- return true;
-
- if (flags & PIN_MAPPABLE && !i915_vma_is_map_and_fenceable(vma))
- return true;
-
- if (flags & PIN_OFFSET_BIAS &&
- vma->node.start < (flags & PIN_OFFSET_MASK))
- return true;
-
- if (flags & PIN_OFFSET_FIXED &&
- vma->node.start != (flags & PIN_OFFSET_MASK))
- return true;
-
- return false;
-}
-
-void __i915_vma_set_map_and_fenceable(struct i915_vma *vma)
-{
- struct drm_i915_gem_object *obj = vma->obj;
- struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
- bool mappable, fenceable;
- u32 fence_size, fence_alignment;
-
- fence_size = i915_gem_get_ggtt_size(dev_priv,
- vma->size,
- i915_gem_object_get_tiling(obj));
- fence_alignment = i915_gem_get_ggtt_alignment(dev_priv,
- vma->size,
- i915_gem_object_get_tiling(obj),
- true);
-
- fenceable = (vma->node.size == fence_size &&
- (vma->node.start & (fence_alignment - 1)) == 0);
-
- mappable = (vma->node.start + fence_size <=
- dev_priv->ggtt.mappable_end);
-
- if (mappable && fenceable)
- vma->flags |= I915_VMA_CAN_FENCE;
- else
- vma->flags &= ~I915_VMA_CAN_FENCE;
-}
-
-int __i915_vma_do_pin(struct i915_vma *vma,
- u64 size, u64 alignment, u64 flags)
-{
- unsigned int bound = vma->flags;
- int ret;
-
- GEM_BUG_ON((flags & (PIN_GLOBAL | PIN_USER)) == 0);
- GEM_BUG_ON((flags & PIN_GLOBAL) && !i915_vma_is_ggtt(vma));
-
- if (WARN_ON(bound & I915_VMA_PIN_OVERFLOW)) {
- ret = -EBUSY;
- goto err;
- }
-
- if ((bound & I915_VMA_BIND_MASK) == 0) {
- ret = i915_vma_insert(vma, size, alignment, flags);
- if (ret)
- goto err;
- }
-
- ret = i915_vma_bind(vma, vma->obj->cache_level, flags);
- if (ret)
- goto err;
-
- if ((bound ^ vma->flags) & I915_VMA_GLOBAL_BIND)
- __i915_vma_set_map_and_fenceable(vma);
-
- GEM_BUG_ON(i915_vma_misplaced(vma, size, alignment, flags));
- return 0;
-
-err:
- __i915_vma_unpin(vma);
- return ret;
+ return ret < 0 ? ret : 0;
}
struct i915_vma *
@@ -3793,10 +3631,13 @@ i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj,
u64 alignment,
u64 flags)
{
- struct i915_address_space *vm = &to_i915(obj->base.dev)->ggtt.base;
+ struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
+ struct i915_address_space *vm = &dev_priv->ggtt.base;
struct i915_vma *vma;
int ret;
+ lockdep_assert_held(&obj->base.dev->struct_mutex);
+
vma = i915_gem_obj_lookup_or_create_vma(obj, vm, view);
if (IS_ERR(vma))
return vma;
@@ -3806,6 +3647,41 @@ i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj,
(i915_vma_is_pinned(vma) || i915_vma_is_active(vma)))
return ERR_PTR(-ENOSPC);
+ if (flags & PIN_MAPPABLE) {
+ u32 fence_size;
+
+ fence_size = i915_gem_get_ggtt_size(dev_priv, vma->size,
+ i915_gem_object_get_tiling(obj));
+ /* If the required space is larger than the available
+ * aperture, we will not able to find a slot for the
+ * object and unbinding the object now will be in
+ * vain. Worse, doing so may cause us to ping-pong
+ * the object in and out of the Global GTT and
+ * waste a lot of cycles under the mutex.
+ */
+ if (fence_size > dev_priv->ggtt.mappable_end)
+ return ERR_PTR(-E2BIG);
+
+ /* If NONBLOCK is set the caller is optimistically
+ * trying to cache the full object within the mappable
+ * aperture, and *must* have a fallback in place for
+ * situations where we cannot bind the object. We
+ * can be a little more lax here and use the fallback
+ * more often to avoid costly migrations of ourselves
+ * and other objects within the aperture.
+ *
+ * Half-the-aperture is used as a simple heuristic.
+ * More interesting would to do search for a free
+ * block prior to making the commitment to unbind.
+ * That caters for the self-harm case, and with a
+ * little more heuristics (e.g. NOFAULT, NOEVICT)
+ * we could try to minimise harm to others.
+ */
+ if (flags & PIN_NONBLOCK &&
+ fence_size > dev_priv->ggtt.mappable_end / 2)
+ return ERR_PTR(-ENOSPC);
+ }
+
WARN(i915_vma_is_pinned(vma),
"bo is already pinned in ggtt with incorrect alignment:"
" offset=%08x, req.alignment=%llx,"
@@ -3852,83 +3728,42 @@ static __always_inline unsigned int __busy_write_id(unsigned int id)
}
static __always_inline unsigned int
-__busy_set_if_active(const struct i915_gem_active *active,
+__busy_set_if_active(const struct dma_fence *fence,
unsigned int (*flag)(unsigned int id))
{
- struct drm_i915_gem_request *request;
+ struct drm_i915_gem_request *rq;
- request = rcu_dereference(active->request);
- if (!request || i915_gem_request_completed(request))
- return 0;
-
- /* This is racy. See __i915_gem_active_get_rcu() for an in detail
- * discussion of how to handle the race correctly, but for reporting
- * the busy state we err on the side of potentially reporting the
- * wrong engine as being busy (but we guarantee that the result
- * is at least self-consistent).
- *
- * As we use SLAB_DESTROY_BY_RCU, the request may be reallocated
- * whilst we are inspecting it, even under the RCU read lock as we are.
- * This means that there is a small window for the engine and/or the
- * seqno to have been overwritten. The seqno will always be in the
- * future compared to the intended, and so we know that if that
- * seqno is idle (on whatever engine) our request is idle and the
- * return 0 above is correct.
- *
- * The issue is that if the engine is switched, it is just as likely
- * to report that it is busy (but since the switch happened, we know
- * the request should be idle). So there is a small chance that a busy
- * result is actually the wrong engine.
+ /* We have to check the current hw status of the fence as the uABI
+ * guarantees forward progress. We could rely on the idle worker
+ * to eventually flush us, but to minimise latency just ask the
+ * hardware.
*
- * So why don't we care?
- *
- * For starters, the busy ioctl is a heuristic that is by definition
- * racy. Even with perfect serialisation in the driver, the hardware
- * state is constantly advancing - the state we report to the user
- * is stale.
- *
- * The critical information for the busy-ioctl is whether the object
- * is idle as userspace relies on that to detect whether its next
- * access will stall, or if it has missed submitting commands to
- * the hardware allowing the GPU to stall. We never generate a
- * false-positive for idleness, thus busy-ioctl is reliable at the
- * most fundamental level, and we maintain the guarantee that a
- * busy object left to itself will eventually become idle (and stay
- * idle!).
- *
- * We allow ourselves the leeway of potentially misreporting the busy
- * state because that is an optimisation heuristic that is constantly
- * in flux. Being quickly able to detect the busy/idle state is much
- * more important than accurate logging of exactly which engines were
- * busy.
- *
- * For accuracy in reporting the engine, we could use
- *
- * result = 0;
- * request = __i915_gem_active_get_rcu(active);
- * if (request) {
- * if (!i915_gem_request_completed(request))
- * result = flag(request->engine->exec_id);
- * i915_gem_request_put(request);
- * }
- *
- * but that still remains susceptible to both hardware and userspace
- * races. So we accept making the result of that race slightly worse,
- * given the rarity of the race and its low impact on the result.
+ * Note we only report on the status of native fences.
*/
- return flag(READ_ONCE(request->engine->exec_id));
+ if (!dma_fence_is_i915(fence))
+ return 0;
+
+ /* opencode to_request() in order to avoid const warnings */
+ rq = container_of(fence, struct drm_i915_gem_request, fence);
+ if (i915_gem_request_completed(rq))
+ return 0;
+
+ return flag(rq->engine->exec_id);
}
static __always_inline unsigned int
-busy_check_reader(const struct i915_gem_active *active)
+busy_check_reader(const struct dma_fence *fence)
{
- return __busy_set_if_active(active, __busy_read_flag);
+ return __busy_set_if_active(fence, __busy_read_flag);
}
static __always_inline unsigned int
-busy_check_writer(const struct i915_gem_active *active)
+busy_check_writer(const struct dma_fence *fence)
{
- return __busy_set_if_active(active, __busy_write_id);
+ if (!fence)
+ return 0;
+
+ return __busy_set_if_active(fence, __busy_write_id);
}
int
@@ -3937,64 +3772,58 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data,
{
struct drm_i915_gem_busy *args = data;
struct drm_i915_gem_object *obj;
- unsigned long active;
+ struct reservation_object_list *list;
+ unsigned int seq;
+ int err;
- obj = i915_gem_object_lookup(file, args->handle);
+ err = -ENOENT;
+ rcu_read_lock();
+ obj = i915_gem_object_lookup_rcu(file, args->handle);
if (!obj)
- return -ENOENT;
+ goto out;
- args->busy = 0;
- active = __I915_BO_ACTIVE(obj);
- if (active) {
- int idx;
+ /* A discrepancy here is that we do not report the status of
+ * non-i915 fences, i.e. even though we may report the object as idle,
+ * a call to set-domain may still stall waiting for foreign rendering.
+ * This also means that wait-ioctl may report an object as busy,
+ * where busy-ioctl considers it idle.
+ *
+ * We trade the ability to warn of foreign fences to report on which
+ * i915 engines are active for the object.
+ *
+ * Alternatively, we can trade that extra information on read/write
+ * activity with
+ * args->busy =
+ * !reservation_object_test_signaled_rcu(obj->resv, true);
+ * to report the overall busyness. This is what the wait-ioctl does.
+ *
+ */
+retry:
+ seq = raw_read_seqcount(&obj->resv->seq);
- /* Yes, the lookups are intentionally racy.
- *
- * First, we cannot simply rely on __I915_BO_ACTIVE. We have
- * to regard the value as stale and as our ABI guarantees
- * forward progress, we confirm the status of each active
- * request with the hardware.
- *
- * Even though we guard the pointer lookup by RCU, that only
- * guarantees that the pointer and its contents remain
- * dereferencable and does *not* mean that the request we
- * have is the same as the one being tracked by the object.
- *
- * Consider that we lookup the request just as it is being
- * retired and freed. We take a local copy of the pointer,
- * but before we add its engine into the busy set, the other
- * thread reallocates it and assigns it to a task on another
- * engine with a fresh and incomplete seqno. Guarding against
- * that requires careful serialisation and reference counting,
- * i.e. using __i915_gem_active_get_request_rcu(). We don't,
- * instead we expect that if the result is busy, which engines
- * are busy is not completely reliable - we only guarantee
- * that the object was busy.
- */
- rcu_read_lock();
+ /* Translate the exclusive fence to the READ *and* WRITE engine */
+ args->busy = busy_check_writer(rcu_dereference(obj->resv->fence_excl));
- for_each_active(active, idx)
- args->busy |= busy_check_reader(&obj->last_read[idx]);
+ /* Translate shared fences to READ set of engines */
+ list = rcu_dereference(obj->resv->fence);
+ if (list) {
+ unsigned int shared_count = list->shared_count, i;
- /* For ABI sanity, we only care that the write engine is in
- * the set of read engines. This should be ensured by the
- * ordering of setting last_read/last_write in
- * i915_vma_move_to_active(), and then in reverse in retire.
- * However, for good measure, we always report the last_write
- * request as a busy read as well as being a busy write.
- *
- * We don't care that the set of active read/write engines
- * may change during construction of the result, as it is
- * equally liable to change before userspace can inspect
- * the result.
- */
- args->busy |= busy_check_writer(&obj->last_write);
+ for (i = 0; i < shared_count; ++i) {
+ struct dma_fence *fence =
+ rcu_dereference(list->shared[i]);
- rcu_read_unlock();
+ args->busy |= busy_check_reader(fence);
+ }
}
- i915_gem_object_put_unlocked(obj);
- return 0;
+ if (args->busy && read_seqcount_retry(&obj->resv->seq, seq))
+ goto retry;
+
+ err = 0;
+out:
+ rcu_read_unlock();
+ return err;
}
int
@@ -4011,7 +3840,7 @@ i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_i915_gem_madvise *args = data;
struct drm_i915_gem_object *obj;
- int ret;
+ int err;
switch (args->madv) {
case I915_MADV_DONTNEED:
@@ -4021,77 +3850,111 @@ i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
return -EINVAL;
}
- ret = i915_mutex_lock_interruptible(dev);
- if (ret)
- return ret;
-
obj = i915_gem_object_lookup(file_priv, args->handle);
- if (!obj) {
- ret = -ENOENT;
- goto unlock;
- }
+ if (!obj)
+ return -ENOENT;
- if (obj->pages &&
+ err = mutex_lock_interruptible(&obj->mm.lock);
+ if (err)
+ goto out;
+
+ if (obj->mm.pages &&
i915_gem_object_is_tiled(obj) &&
dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES) {
- if (obj->madv == I915_MADV_WILLNEED)
- i915_gem_object_unpin_pages(obj);
- if (args->madv == I915_MADV_WILLNEED)
- i915_gem_object_pin_pages(obj);
+ if (obj->mm.madv == I915_MADV_WILLNEED) {
+ GEM_BUG_ON(!obj->mm.quirked);
+ __i915_gem_object_unpin_pages(obj);
+ obj->mm.quirked = false;
+ }
+ if (args->madv == I915_MADV_WILLNEED) {
+ GEM_BUG_ON(obj->mm.quirked);
+ __i915_gem_object_pin_pages(obj);
+ obj->mm.quirked = true;
+ }
}
- if (obj->madv != __I915_MADV_PURGED)
- obj->madv = args->madv;
+ if (obj->mm.madv != __I915_MADV_PURGED)
+ obj->mm.madv = args->madv;
/* if the object is no longer attached, discard its backing storage */
- if (obj->madv == I915_MADV_DONTNEED && obj->pages == NULL)
+ if (obj->mm.madv == I915_MADV_DONTNEED && !obj->mm.pages)
i915_gem_object_truncate(obj);
- args->retained = obj->madv != __I915_MADV_PURGED;
+ args->retained = obj->mm.madv != __I915_MADV_PURGED;
+ mutex_unlock(&obj->mm.lock);
+out:
i915_gem_object_put(obj);
-unlock:
- mutex_unlock(&dev->struct_mutex);
- return ret;
+ return err;
+}
+
+static void
+frontbuffer_retire(struct i915_gem_active *active,
+ struct drm_i915_gem_request *request)
+{
+ struct drm_i915_gem_object *obj =
+ container_of(active, typeof(*obj), frontbuffer_write);
+
+ intel_fb_obj_flush(obj, true, ORIGIN_CS);
}
void i915_gem_object_init(struct drm_i915_gem_object *obj,
const struct drm_i915_gem_object_ops *ops)
{
- int i;
+ mutex_init(&obj->mm.lock);
- INIT_LIST_HEAD(&obj->global_list);
- for (i = 0; i < I915_NUM_ENGINES; i++)
- init_request_active(&obj->last_read[i],
- i915_gem_object_retire__read);
- init_request_active(&obj->last_write,
- i915_gem_object_retire__write);
+ INIT_LIST_HEAD(&obj->global_link);
+ INIT_LIST_HEAD(&obj->userfault_link);
INIT_LIST_HEAD(&obj->obj_exec_link);
INIT_LIST_HEAD(&obj->vma_list);
INIT_LIST_HEAD(&obj->batch_pool_link);
obj->ops = ops;
+ reservation_object_init(&obj->__builtin_resv);
+ obj->resv = &obj->__builtin_resv;
+
obj->frontbuffer_ggtt_origin = ORIGIN_GTT;
- obj->madv = I915_MADV_WILLNEED;
+ init_request_active(&obj->frontbuffer_write, frontbuffer_retire);
+
+ obj->mm.madv = I915_MADV_WILLNEED;
+ INIT_RADIX_TREE(&obj->mm.get_page.radix, GFP_KERNEL | __GFP_NOWARN);
+ mutex_init(&obj->mm.get_page.lock);
i915_gem_info_add_obj(to_i915(obj->base.dev), obj->base.size);
}
static const struct drm_i915_gem_object_ops i915_gem_object_ops = {
- .flags = I915_GEM_OBJECT_HAS_STRUCT_PAGE,
+ .flags = I915_GEM_OBJECT_HAS_STRUCT_PAGE |
+ I915_GEM_OBJECT_IS_SHRINKABLE,
.get_pages = i915_gem_object_get_pages_gtt,
.put_pages = i915_gem_object_put_pages_gtt,
};
-struct drm_i915_gem_object *i915_gem_object_create(struct drm_device *dev,
- size_t size)
+/* Note we don't consider signbits :| */
+#define overflows_type(x, T) \
+ (sizeof(x) > sizeof(T) && (x) >> (sizeof(T) * BITS_PER_BYTE))
+
+struct drm_i915_gem_object *
+i915_gem_object_create(struct drm_device *dev, u64 size)
{
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_i915_gem_object *obj;
struct address_space *mapping;
gfp_t mask;
int ret;
+ /* There is a prevalence of the assumption that we fit the object's
+ * page count inside a 32bit _signed_ variable. Let's document this and
+ * catch if we ever need to fix it. In the meantime, if you do spot
+ * such a local variable, please consider fixing!
+ */
+ if (WARN_ON(size >> PAGE_SHIFT > INT_MAX))
+ return ERR_PTR(-E2BIG);
+
+ if (overflows_type(size, obj->base.size))
+ return ERR_PTR(-E2BIG);
+
obj = i915_gem_object_alloc(dev);
if (obj == NULL)
return ERR_PTR(-ENOMEM);
@@ -4101,7 +3964,7 @@ struct drm_i915_gem_object *i915_gem_object_create(struct drm_device *dev,
goto fail;
mask = GFP_HIGHUSER | __GFP_RECLAIMABLE;
- if (IS_CRESTLINE(dev) || IS_BROADWATER(dev)) {
+ if (IS_CRESTLINE(dev_priv) || IS_BROADWATER(dev_priv)) {
/* 965gm cannot relocate objects above 4GiB. */
mask &= ~__GFP_HIGHMEM;
mask |= __GFP_DMA32;
@@ -4115,7 +3978,7 @@ struct drm_i915_gem_object *i915_gem_object_create(struct drm_device *dev,
obj->base.write_domain = I915_GEM_DOMAIN_CPU;
obj->base.read_domains = I915_GEM_DOMAIN_CPU;
- if (HAS_LLC(dev)) {
+ if (HAS_LLC(dev_priv)) {
/* On some devices, we can have the GPU use the LLC (the CPU
* cache) for about a 10% performance improvement
* compared to uncached. Graphics requests other than
@@ -4138,7 +4001,6 @@ struct drm_i915_gem_object *i915_gem_object_create(struct drm_device *dev,
fail:
i915_gem_object_free(obj);
-
return ERR_PTR(ret);
}
@@ -4150,7 +4012,7 @@ static bool discard_backing_storage(struct drm_i915_gem_object *obj)
* back the contents from the GPU.
*/
- if (obj->madv != I915_MADV_WILLNEED)
+ if (obj->mm.madv != I915_MADV_WILLNEED)
return false;
if (obj->base.filp == NULL)
@@ -4166,16 +4028,72 @@ static bool discard_backing_storage(struct drm_i915_gem_object *obj)
return atomic_long_read(&obj->base.filp->f_count) == 1;
}
-void i915_gem_free_object(struct drm_gem_object *gem_obj)
+static void __i915_gem_free_objects(struct drm_i915_private *i915,
+ struct llist_node *freed)
{
- struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
- struct drm_device *dev = obj->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
- struct i915_vma *vma, *next;
+ struct drm_i915_gem_object *obj, *on;
- intel_runtime_pm_get(dev_priv);
+ mutex_lock(&i915->drm.struct_mutex);
+ intel_runtime_pm_get(i915);
+ llist_for_each_entry(obj, freed, freed) {
+ struct i915_vma *vma, *vn;
+
+ trace_i915_gem_object_destroy(obj);
- trace_i915_gem_object_destroy(obj);
+ GEM_BUG_ON(i915_gem_object_is_active(obj));
+ list_for_each_entry_safe(vma, vn,
+ &obj->vma_list, obj_link) {
+ GEM_BUG_ON(!i915_vma_is_ggtt(vma));
+ GEM_BUG_ON(i915_vma_is_active(vma));
+ vma->flags &= ~I915_VMA_PIN_MASK;
+ i915_vma_close(vma);
+ }
+ GEM_BUG_ON(!list_empty(&obj->vma_list));
+ GEM_BUG_ON(!RB_EMPTY_ROOT(&obj->vma_tree));
+
+ list_del(&obj->global_link);
+ }
+ intel_runtime_pm_put(i915);
+ mutex_unlock(&i915->drm.struct_mutex);
+
+ llist_for_each_entry_safe(obj, on, freed, freed) {
+ GEM_BUG_ON(obj->bind_count);
+ GEM_BUG_ON(atomic_read(&obj->frontbuffer_bits));
+
+ if (obj->ops->release)
+ obj->ops->release(obj);
+
+ if (WARN_ON(i915_gem_object_has_pinned_pages(obj)))
+ atomic_set(&obj->mm.pages_pin_count, 0);
+ __i915_gem_object_put_pages(obj, I915_MM_NORMAL);
+ GEM_BUG_ON(obj->mm.pages);
+
+ if (obj->base.import_attach)
+ drm_prime_gem_destroy(&obj->base, NULL);
+
+ reservation_object_fini(&obj->__builtin_resv);
+ drm_gem_object_release(&obj->base);
+ i915_gem_info_remove_obj(i915, obj->base.size);
+
+ kfree(obj->bit_17);
+ i915_gem_object_free(obj);
+ }
+}
+
+static void i915_gem_flush_free_objects(struct drm_i915_private *i915)
+{
+ struct llist_node *freed;
+
+ freed = llist_del_all(&i915->mm.free_list);
+ if (unlikely(freed))
+ __i915_gem_free_objects(i915, freed);
+}
+
+static void __i915_gem_free_work(struct work_struct *work)
+{
+ struct drm_i915_private *i915 =
+ container_of(work, struct drm_i915_private, mm.free_work);
+ struct llist_node *freed;
/* All file-owned VMA should have been released by this point through
* i915_gem_close_object(), or earlier by i915_gem_context_close().
@@ -4184,47 +4102,62 @@ void i915_gem_free_object(struct drm_gem_object *gem_obj)
* the GTT either for the user or for scanout). Those VMA still need to
* unbound now.
*/
- list_for_each_entry_safe(vma, next, &obj->vma_list, obj_link) {
- GEM_BUG_ON(!i915_vma_is_ggtt(vma));
- GEM_BUG_ON(i915_vma_is_active(vma));
- vma->flags &= ~I915_VMA_PIN_MASK;
- i915_vma_close(vma);
- }
- GEM_BUG_ON(obj->bind_count);
- /* Stolen objects don't hold a ref, but do hold pin count. Fix that up
- * before progressing. */
- if (obj->stolen)
- i915_gem_object_unpin_pages(obj);
+ while ((freed = llist_del_all(&i915->mm.free_list)))
+ __i915_gem_free_objects(i915, freed);
+}
- WARN_ON(atomic_read(&obj->frontbuffer_bits));
+static void __i915_gem_free_object_rcu(struct rcu_head *head)
+{
+ struct drm_i915_gem_object *obj =
+ container_of(head, typeof(*obj), rcu);
+ struct drm_i915_private *i915 = to_i915(obj->base.dev);
- if (obj->pages && obj->madv == I915_MADV_WILLNEED &&
- dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES &&
- i915_gem_object_is_tiled(obj))
- i915_gem_object_unpin_pages(obj);
+ /* We can't simply use call_rcu() from i915_gem_free_object()
+ * as we need to block whilst unbinding, and the call_rcu
+ * task may be called from softirq context. So we take a
+ * detour through a worker.
+ */
+ if (llist_add(&obj->freed, &i915->mm.free_list))
+ schedule_work(&i915->mm.free_work);
+}
- if (WARN_ON(obj->pages_pin_count))
- obj->pages_pin_count = 0;
- if (discard_backing_storage(obj))
- obj->madv = I915_MADV_DONTNEED;
- i915_gem_object_put_pages(obj);
+void i915_gem_free_object(struct drm_gem_object *gem_obj)
+{
+ struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
- BUG_ON(obj->pages);
+ if (obj->mm.quirked)
+ __i915_gem_object_unpin_pages(obj);
- if (obj->base.import_attach)
- drm_prime_gem_destroy(&obj->base, NULL);
+ if (discard_backing_storage(obj))
+ obj->mm.madv = I915_MADV_DONTNEED;
+
+ /* Before we free the object, make sure any pure RCU-only
+ * read-side critical sections are complete, e.g.
+ * i915_gem_busy_ioctl(). For the corresponding synchronized
+ * lookup see i915_gem_object_lookup_rcu().
+ */
+ call_rcu(&obj->rcu, __i915_gem_free_object_rcu);
+}
- if (obj->ops->release)
- obj->ops->release(obj);
+void __i915_gem_object_release_unless_active(struct drm_i915_gem_object *obj)
+{
+ lockdep_assert_held(&obj->base.dev->struct_mutex);
- drm_gem_object_release(&obj->base);
- i915_gem_info_remove_obj(dev_priv, obj->base.size);
+ GEM_BUG_ON(i915_gem_object_has_active_reference(obj));
+ if (i915_gem_object_is_active(obj))
+ i915_gem_object_set_active_reference(obj);
+ else
+ i915_gem_object_put(obj);
+}
- kfree(obj->bit_17);
- i915_gem_object_free(obj);
+static void assert_kernel_context_is_current(struct drm_i915_private *dev_priv)
+{
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
- intel_runtime_pm_put(dev_priv);
+ for_each_engine(engine, dev_priv, id)
+ GEM_BUG_ON(engine->last_context != dev_priv->kernel_context);
}
int i915_gem_suspend(struct drm_device *dev)
@@ -4255,18 +4188,46 @@ int i915_gem_suspend(struct drm_device *dev)
goto err;
i915_gem_retire_requests(dev_priv);
+ GEM_BUG_ON(dev_priv->gt.active_requests);
+ assert_kernel_context_is_current(dev_priv);
i915_gem_context_lost(dev_priv);
mutex_unlock(&dev->struct_mutex);
cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work);
cancel_delayed_work_sync(&dev_priv->gt.retire_work);
flush_delayed_work(&dev_priv->gt.idle_work);
+ flush_work(&dev_priv->mm.free_work);
/* Assert that we sucessfully flushed all the work and
* reset the GPU back to its idle, low power state.
*/
WARN_ON(dev_priv->gt.awake);
+ WARN_ON(!intel_execlists_idle(dev_priv));
+
+ /*
+ * Neither the BIOS, ourselves or any other kernel
+ * expects the system to be in execlists mode on startup,
+ * so we need to reset the GPU back to legacy mode. And the only
+ * known way to disable logical contexts is through a GPU reset.
+ *
+ * So in order to leave the system in a known default configuration,
+ * always reset the GPU upon unload and suspend. Afterwards we then
+ * clean up the GEM state tracking, flushing off the requests and
+ * leaving the system in a known idle state.
+ *
+ * Note that is of the upmost importance that the GPU is idle and
+ * all stray writes are flushed *before* we dismantle the backing
+ * storage for the pinned objects.
+ *
+ * However, since we are uncertain that resetting the GPU on older
+ * machines is a good idea, we don't - just in case it leaves the
+ * machine in an unusable condition.
+ */
+ if (HAS_HW_CONTEXTS(dev_priv)) {
+ int reset = intel_gpu_reset(dev_priv, ALL_ENGINES);
+ WARN_ON(reset && reset != -ENODEV);
+ }
return 0;
@@ -4279,8 +4240,10 @@ void i915_gem_resume(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = to_i915(dev);
+ WARN_ON(dev_priv->gt.awake);
+
mutex_lock(&dev->struct_mutex);
- i915_gem_restore_gtt_mappings(dev);
+ i915_gem_restore_gtt_mappings(dev_priv);
/* As we didn't flush the kernel context before suspend, we cannot
* guarantee that the context image is complete. So let's just reset
@@ -4291,55 +4254,51 @@ void i915_gem_resume(struct drm_device *dev)
mutex_unlock(&dev->struct_mutex);
}
-void i915_gem_init_swizzling(struct drm_device *dev)
+void i915_gem_init_swizzling(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
-
- if (INTEL_INFO(dev)->gen < 5 ||
+ if (INTEL_GEN(dev_priv) < 5 ||
dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_NONE)
return;
I915_WRITE(DISP_ARB_CTL, I915_READ(DISP_ARB_CTL) |
DISP_TILE_SURFACE_SWIZZLING);
- if (IS_GEN5(dev))
+ if (IS_GEN5(dev_priv))
return;
I915_WRITE(TILECTL, I915_READ(TILECTL) | TILECTL_SWZCTL);
- if (IS_GEN6(dev))
+ if (IS_GEN6(dev_priv))
I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_SNB));
- else if (IS_GEN7(dev))
+ else if (IS_GEN7(dev_priv))
I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_IVB));
- else if (IS_GEN8(dev))
+ else if (IS_GEN8(dev_priv))
I915_WRITE(GAMTARBMODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_BDW));
else
BUG();
}
-static void init_unused_ring(struct drm_device *dev, u32 base)
+static void init_unused_ring(struct drm_i915_private *dev_priv, u32 base)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
-
I915_WRITE(RING_CTL(base), 0);
I915_WRITE(RING_HEAD(base), 0);
I915_WRITE(RING_TAIL(base), 0);
I915_WRITE(RING_START(base), 0);
}
-static void init_unused_rings(struct drm_device *dev)
-{
- if (IS_I830(dev)) {
- init_unused_ring(dev, PRB1_BASE);
- init_unused_ring(dev, SRB0_BASE);
- init_unused_ring(dev, SRB1_BASE);
- init_unused_ring(dev, SRB2_BASE);
- init_unused_ring(dev, SRB3_BASE);
- } else if (IS_GEN2(dev)) {
- init_unused_ring(dev, SRB0_BASE);
- init_unused_ring(dev, SRB1_BASE);
- } else if (IS_GEN3(dev)) {
- init_unused_ring(dev, PRB1_BASE);
- init_unused_ring(dev, PRB2_BASE);
+static void init_unused_rings(struct drm_i915_private *dev_priv)
+{
+ if (IS_I830(dev_priv)) {
+ init_unused_ring(dev_priv, PRB1_BASE);
+ init_unused_ring(dev_priv, SRB0_BASE);
+ init_unused_ring(dev_priv, SRB1_BASE);
+ init_unused_ring(dev_priv, SRB2_BASE);
+ init_unused_ring(dev_priv, SRB3_BASE);
+ } else if (IS_GEN2(dev_priv)) {
+ init_unused_ring(dev_priv, SRB0_BASE);
+ init_unused_ring(dev_priv, SRB1_BASE);
+ } else if (IS_GEN3(dev_priv)) {
+ init_unused_ring(dev_priv, PRB1_BASE);
+ init_unused_ring(dev_priv, PRB2_BASE);
}
}
@@ -4348,31 +4307,34 @@ i915_gem_init_hw(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_engine_cs *engine;
+ enum intel_engine_id id;
int ret;
+ dev_priv->gt.last_init_time = ktime_get();
+
/* Double layer security blanket, see i915_gem_init() */
intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
- if (HAS_EDRAM(dev) && INTEL_GEN(dev_priv) < 9)
+ if (HAS_EDRAM(dev_priv) && INTEL_GEN(dev_priv) < 9)
I915_WRITE(HSW_IDICR, I915_READ(HSW_IDICR) | IDIHASHMSK(0xf));
- if (IS_HASWELL(dev))
- I915_WRITE(MI_PREDICATE_RESULT_2, IS_HSW_GT3(dev) ?
+ if (IS_HASWELL(dev_priv))
+ I915_WRITE(MI_PREDICATE_RESULT_2, IS_HSW_GT3(dev_priv) ?
LOWER_SLICE_ENABLED : LOWER_SLICE_DISABLED);
- if (HAS_PCH_NOP(dev)) {
- if (IS_IVYBRIDGE(dev)) {
+ if (HAS_PCH_NOP(dev_priv)) {
+ if (IS_IVYBRIDGE(dev_priv)) {
u32 temp = I915_READ(GEN7_MSG_CTL);
temp &= ~(WAIT_FOR_PCH_FLR_ACK | WAIT_FOR_PCH_RESET_ACK);
I915_WRITE(GEN7_MSG_CTL, temp);
- } else if (INTEL_INFO(dev)->gen >= 7) {
+ } else if (INTEL_GEN(dev_priv) >= 7) {
u32 temp = I915_READ(HSW_NDE_RSTWRN_OPT);
temp &= ~RESET_PCH_HANDSHAKE_ENABLE;
I915_WRITE(HSW_NDE_RSTWRN_OPT, temp);
}
}
- i915_gem_init_swizzling(dev);
+ i915_gem_init_swizzling(dev_priv);
/*
* At least 830 can leave some of the unused rings
@@ -4380,18 +4342,18 @@ i915_gem_init_hw(struct drm_device *dev)
* will prevent c3 entry. Makes sure all unused rings
* are totally idle.
*/
- init_unused_rings(dev);
+ init_unused_rings(dev_priv);
BUG_ON(!dev_priv->kernel_context);
- ret = i915_ppgtt_init_hw(dev);
+ ret = i915_ppgtt_init_hw(dev_priv);
if (ret) {
DRM_ERROR("PPGTT enable HW failed %d\n", ret);
goto out;
}
/* Need to do basic initialisation of all rings first: */
- for_each_engine(engine, dev_priv) {
+ for_each_engine(engine, dev_priv, id) {
ret = engine->init_hw(engine);
if (ret)
goto out;
@@ -4490,21 +4452,15 @@ i915_gem_cleanup_engines(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_engine_cs *engine;
+ enum intel_engine_id id;
- for_each_engine(engine, dev_priv)
+ for_each_engine(engine, dev_priv, id)
dev_priv->gt.cleanup_engine(engine);
}
-static void
-init_engine_lists(struct intel_engine_cs *engine)
-{
- INIT_LIST_HEAD(&engine->request_list);
-}
-
void
i915_gem_load_init_fences(struct drm_i915_private *dev_priv)
{
- struct drm_device *dev = &dev_priv->drm;
int i;
if (INTEL_INFO(dev_priv)->gen >= 7 && !IS_VALLEYVIEW(dev_priv) &&
@@ -4528,41 +4484,52 @@ i915_gem_load_init_fences(struct drm_i915_private *dev_priv)
fence->id = i;
list_add_tail(&fence->link, &dev_priv->mm.fence_list);
}
- i915_gem_restore_fences(dev);
+ i915_gem_restore_fences(dev_priv);
- i915_gem_detect_bit_6_swizzle(dev);
+ i915_gem_detect_bit_6_swizzle(dev_priv);
}
-void
+int
i915_gem_load_init(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = to_i915(dev);
- int i;
+ int err = -ENOMEM;
+
+ dev_priv->objects = KMEM_CACHE(drm_i915_gem_object, SLAB_HWCACHE_ALIGN);
+ if (!dev_priv->objects)
+ goto err_out;
+
+ dev_priv->vmas = KMEM_CACHE(i915_vma, SLAB_HWCACHE_ALIGN);
+ if (!dev_priv->vmas)
+ goto err_objects;
+
+ dev_priv->requests = KMEM_CACHE(drm_i915_gem_request,
+ SLAB_HWCACHE_ALIGN |
+ SLAB_RECLAIM_ACCOUNT |
+ SLAB_DESTROY_BY_RCU);
+ if (!dev_priv->requests)
+ goto err_vmas;
- dev_priv->objects =
- kmem_cache_create("i915_gem_object",
- sizeof(struct drm_i915_gem_object), 0,
- SLAB_HWCACHE_ALIGN,
- NULL);
- dev_priv->vmas =
- kmem_cache_create("i915_gem_vma",
- sizeof(struct i915_vma), 0,
- SLAB_HWCACHE_ALIGN,
- NULL);
- dev_priv->requests =
- kmem_cache_create("i915_gem_request",
- sizeof(struct drm_i915_gem_request), 0,
- SLAB_HWCACHE_ALIGN |
- SLAB_RECLAIM_ACCOUNT |
- SLAB_DESTROY_BY_RCU,
- NULL);
+ dev_priv->dependencies = KMEM_CACHE(i915_dependency,
+ SLAB_HWCACHE_ALIGN |
+ SLAB_RECLAIM_ACCOUNT);
+ if (!dev_priv->dependencies)
+ goto err_requests;
+
+ mutex_lock(&dev_priv->drm.struct_mutex);
+ INIT_LIST_HEAD(&dev_priv->gt.timelines);
+ err = i915_gem_timeline_init__global(dev_priv);
+ mutex_unlock(&dev_priv->drm.struct_mutex);
+ if (err)
+ goto err_dependencies;
INIT_LIST_HEAD(&dev_priv->context_list);
+ INIT_WORK(&dev_priv->mm.free_work, __i915_gem_free_work);
+ init_llist_head(&dev_priv->mm.free_list);
INIT_LIST_HEAD(&dev_priv->mm.unbound_list);
INIT_LIST_HEAD(&dev_priv->mm.bound_list);
INIT_LIST_HEAD(&dev_priv->mm.fence_list);
- for (i = 0; i < I915_NUM_ENGINES; i++)
- init_engine_lists(&dev_priv->engine[i]);
+ INIT_LIST_HEAD(&dev_priv->mm.userfault_list);
INIT_DELAYED_WORK(&dev_priv->gt.retire_work,
i915_gem_retire_work_handler);
INIT_DELAYED_WORK(&dev_priv->gt.idle_work,
@@ -4579,12 +4546,33 @@ i915_gem_load_init(struct drm_device *dev)
atomic_set(&dev_priv->mm.bsd_engine_dispatch_index, 0);
spin_lock_init(&dev_priv->fb_tracking.lock);
+
+ return 0;
+
+err_dependencies:
+ kmem_cache_destroy(dev_priv->dependencies);
+err_requests:
+ kmem_cache_destroy(dev_priv->requests);
+err_vmas:
+ kmem_cache_destroy(dev_priv->vmas);
+err_objects:
+ kmem_cache_destroy(dev_priv->objects);
+err_out:
+ return err;
}
void i915_gem_load_cleanup(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = to_i915(dev);
+ WARN_ON(!llist_empty(&dev_priv->mm.free_list));
+
+ mutex_lock(&dev_priv->drm.struct_mutex);
+ i915_gem_timeline_fini(&dev_priv->gt.global_timeline);
+ WARN_ON(!list_empty(&dev_priv->gt.timelines));
+ mutex_unlock(&dev_priv->drm.struct_mutex);
+
+ kmem_cache_destroy(dev_priv->dependencies);
kmem_cache_destroy(dev_priv->requests);
kmem_cache_destroy(dev_priv->vmas);
kmem_cache_destroy(dev_priv->objects);
@@ -4633,7 +4621,7 @@ int i915_gem_freeze_late(struct drm_i915_private *dev_priv)
i915_gem_shrink(dev_priv, -1UL, I915_SHRINK_UNBOUND);
for (p = phases; *p; p++) {
- list_for_each_entry(obj, *p, global_list) {
+ list_for_each_entry(obj, *p, global_link) {
obj->base.read_domains = I915_GEM_DOMAIN_CPU;
obj->base.write_domain = I915_GEM_DOMAIN_CPU;
}
@@ -4669,7 +4657,7 @@ int i915_gem_open(struct drm_device *dev, struct drm_file *file)
struct drm_i915_file_private *file_priv;
int ret;
- DRM_DEBUG_DRIVER("\n");
+ DRM_DEBUG("\n");
file_priv = kzalloc(sizeof(*file_priv), GFP_KERNEL);
if (!file_priv)
@@ -4725,21 +4713,6 @@ void i915_gem_track_fb(struct drm_i915_gem_object *old,
}
}
-/* Like i915_gem_object_get_page(), but mark the returned page dirty */
-struct page *
-i915_gem_object_get_dirty_page(struct drm_i915_gem_object *obj, int n)
-{
- struct page *page;
-
- /* Only default objects have per-page dirty tracking */
- if (WARN_ON(!i915_gem_object_has_struct_page(obj)))
- return NULL;
-
- page = i915_gem_object_get_page(obj, n);
- set_page_dirty(page);
- return page;
-}
-
/* Allocate a new GEM object and fill it with the supplied data */
struct drm_i915_gem_object *
i915_gem_object_create_from_data(struct drm_device *dev,
@@ -4758,14 +4731,13 @@ i915_gem_object_create_from_data(struct drm_device *dev,
if (ret)
goto fail;
- ret = i915_gem_object_get_pages(obj);
+ ret = i915_gem_object_pin_pages(obj);
if (ret)
goto fail;
- i915_gem_object_pin_pages(obj);
- sg = obj->pages;
+ sg = obj->mm.pages;
bytes = sg_copy_from_buffer(sg->sgl, sg->nents, (void *)data, size);
- obj->dirty = 1; /* Backing store is now out of date */
+ obj->mm.dirty = true; /* Backing store is now out of date */
i915_gem_object_unpin_pages(obj);
if (WARN_ON(bytes != size)) {
@@ -4780,3 +4752,156 @@ fail:
i915_gem_object_put(obj);
return ERR_PTR(ret);
}
+
+struct scatterlist *
+i915_gem_object_get_sg(struct drm_i915_gem_object *obj,
+ unsigned int n,
+ unsigned int *offset)
+{
+ struct i915_gem_object_page_iter *iter = &obj->mm.get_page;
+ struct scatterlist *sg;
+ unsigned int idx, count;
+
+ might_sleep();
+ GEM_BUG_ON(n >= obj->base.size >> PAGE_SHIFT);
+ GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
+
+ /* As we iterate forward through the sg, we record each entry in a
+ * radixtree for quick repeated (backwards) lookups. If we have seen
+ * this index previously, we will have an entry for it.
+ *
+ * Initial lookup is O(N), but this is amortized to O(1) for
+ * sequential page access (where each new request is consecutive
+ * to the previous one). Repeated lookups are O(lg(obj->base.size)),
+ * i.e. O(1) with a large constant!
+ */
+ if (n < READ_ONCE(iter->sg_idx))
+ goto lookup;
+
+ mutex_lock(&iter->lock);
+
+ /* We prefer to reuse the last sg so that repeated lookup of this
+ * (or the subsequent) sg are fast - comparing against the last
+ * sg is faster than going through the radixtree.
+ */
+
+ sg = iter->sg_pos;
+ idx = iter->sg_idx;
+ count = __sg_page_count(sg);
+
+ while (idx + count <= n) {
+ unsigned long exception, i;
+ int ret;
+
+ /* If we cannot allocate and insert this entry, or the
+ * individual pages from this range, cancel updating the
+ * sg_idx so that on this lookup we are forced to linearly
+ * scan onwards, but on future lookups we will try the
+ * insertion again (in which case we need to be careful of
+ * the error return reporting that we have already inserted
+ * this index).
+ */
+ ret = radix_tree_insert(&iter->radix, idx, sg);
+ if (ret && ret != -EEXIST)
+ goto scan;
+
+ exception =
+ RADIX_TREE_EXCEPTIONAL_ENTRY |
+ idx << RADIX_TREE_EXCEPTIONAL_SHIFT;
+ for (i = 1; i < count; i++) {
+ ret = radix_tree_insert(&iter->radix, idx + i,
+ (void *)exception);
+ if (ret && ret != -EEXIST)
+ goto scan;
+ }
+
+ idx += count;
+ sg = ____sg_next(sg);
+ count = __sg_page_count(sg);
+ }
+
+scan:
+ iter->sg_pos = sg;
+ iter->sg_idx = idx;
+
+ mutex_unlock(&iter->lock);
+
+ if (unlikely(n < idx)) /* insertion completed by another thread */
+ goto lookup;
+
+ /* In case we failed to insert the entry into the radixtree, we need
+ * to look beyond the current sg.
+ */
+ while (idx + count <= n) {
+ idx += count;
+ sg = ____sg_next(sg);
+ count = __sg_page_count(sg);
+ }
+
+ *offset = n - idx;
+ return sg;
+
+lookup:
+ rcu_read_lock();
+
+ sg = radix_tree_lookup(&iter->radix, n);
+ GEM_BUG_ON(!sg);
+
+ /* If this index is in the middle of multi-page sg entry,
+ * the radixtree will contain an exceptional entry that points
+ * to the start of that range. We will return the pointer to
+ * the base page and the offset of this page within the
+ * sg entry's range.
+ */
+ *offset = 0;
+ if (unlikely(radix_tree_exception(sg))) {
+ unsigned long base =
+ (unsigned long)sg >> RADIX_TREE_EXCEPTIONAL_SHIFT;
+
+ sg = radix_tree_lookup(&iter->radix, base);
+ GEM_BUG_ON(!sg);
+
+ *offset = n - base;
+ }
+
+ rcu_read_unlock();
+
+ return sg;
+}
+
+struct page *
+i915_gem_object_get_page(struct drm_i915_gem_object *obj, unsigned int n)
+{
+ struct scatterlist *sg;
+ unsigned int offset;
+
+ GEM_BUG_ON(!i915_gem_object_has_struct_page(obj));
+
+ sg = i915_gem_object_get_sg(obj, n, &offset);
+ return nth_page(sg_page(sg), offset);
+}
+
+/* Like i915_gem_object_get_page(), but mark the returned page dirty */
+struct page *
+i915_gem_object_get_dirty_page(struct drm_i915_gem_object *obj,
+ unsigned int n)
+{
+ struct page *page;
+
+ page = i915_gem_object_get_page(obj, n);
+ if (!obj->mm.dirty)
+ set_page_dirty(page);
+
+ return page;
+}
+
+dma_addr_t
+i915_gem_object_get_dma_address(struct drm_i915_gem_object *obj,
+ unsigned long n)
+{
+ struct scatterlist *sg;
+ unsigned int offset;
+
+ sg = i915_gem_object_get_sg(obj, n, &offset);
+ return sg_dma_address(sg) + (offset << PAGE_SHIFT);
+}
diff --git a/drivers/gpu/drm/i915/i915_gem.h b/drivers/gpu/drm/i915/i915_gem.h
index 8292e797d9b5..51ec793f2e20 100644
--- a/drivers/gpu/drm/i915/i915_gem.h
+++ b/drivers/gpu/drm/i915/i915_gem.h
@@ -28,7 +28,9 @@
#ifdef CONFIG_DRM_I915_DEBUG_GEM
#define GEM_BUG_ON(expr) BUG_ON(expr)
#else
-#define GEM_BUG_ON(expr)
+#define GEM_BUG_ON(expr) do { } while (0)
#endif
+#define I915_NUM_ENGINES 5
+
#endif /* __I915_GEM_H__ */
diff --git a/drivers/gpu/drm/i915/i915_gem_batch_pool.c b/drivers/gpu/drm/i915/i915_gem_batch_pool.c
index ed989596d9a3..b3bc119ec1bb 100644
--- a/drivers/gpu/drm/i915/i915_gem_batch_pool.c
+++ b/drivers/gpu/drm/i915/i915_gem_batch_pool.c
@@ -73,7 +73,7 @@ void i915_gem_batch_pool_fini(struct i915_gem_batch_pool *pool)
list_for_each_entry_safe(obj, next,
&pool->cache_list[n],
batch_pool_link)
- i915_gem_object_put(obj);
+ __i915_gem_object_release_unless_active(obj);
INIT_LIST_HEAD(&pool->cache_list[n]);
}
@@ -97,9 +97,9 @@ i915_gem_batch_pool_get(struct i915_gem_batch_pool *pool,
size_t size)
{
struct drm_i915_gem_object *obj = NULL;
- struct drm_i915_gem_object *tmp, *next;
+ struct drm_i915_gem_object *tmp;
struct list_head *list;
- int n;
+ int n, ret;
lockdep_assert_held(&pool->engine->i915->drm.struct_mutex);
@@ -112,40 +112,35 @@ i915_gem_batch_pool_get(struct i915_gem_batch_pool *pool,
n = ARRAY_SIZE(pool->cache_list) - 1;
list = &pool->cache_list[n];
- list_for_each_entry_safe(tmp, next, list, batch_pool_link) {
+ list_for_each_entry(tmp, list, batch_pool_link) {
/* The batches are strictly LRU ordered */
- if (!i915_gem_active_is_idle(&tmp->last_read[pool->engine->id],
- &tmp->base.dev->struct_mutex))
+ if (i915_gem_object_is_active(tmp))
break;
- /* While we're looping, do some clean up */
- if (tmp->madv == __I915_MADV_PURGED) {
- list_del(&tmp->batch_pool_link);
- i915_gem_object_put(tmp);
- continue;
- }
+ GEM_BUG_ON(!reservation_object_test_signaled_rcu(tmp->resv,
+ true));
if (tmp->base.size >= size) {
+ /* Clear the set of shared fences early */
+ ww_mutex_lock(&tmp->resv->lock, NULL);
+ reservation_object_add_excl_fence(tmp->resv, NULL);
+ ww_mutex_unlock(&tmp->resv->lock);
+
obj = tmp;
break;
}
}
if (obj == NULL) {
- int ret;
-
- obj = i915_gem_object_create(&pool->engine->i915->drm, size);
+ obj = i915_gem_object_create_internal(pool->engine->i915, size);
if (IS_ERR(obj))
return obj;
-
- ret = i915_gem_object_get_pages(obj);
- if (ret)
- return ERR_PTR(ret);
-
- obj->madv = I915_MADV_DONTNEED;
}
+ ret = i915_gem_object_pin_pages(obj);
+ if (ret)
+ return ERR_PTR(ret);
+
list_move_tail(&obj->batch_pool_link, list);
- i915_gem_object_pin_pages(obj);
return obj;
}
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
index df10f4e95736..1f94b8d6d83d 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/i915_gem_context.c
@@ -155,9 +155,10 @@ void i915_gem_context_free(struct kref *ctx_ref)
if (ce->ring)
intel_ring_free(ce->ring);
- i915_vma_put(ce->state);
+ __i915_gem_object_release_unless_active(ce->state->obj);
}
+ kfree(ctx->name);
put_pid(ctx->pid);
list_del(&ctx->link);
@@ -192,7 +193,7 @@ i915_gem_alloc_context_obj(struct drm_device *dev, size_t size)
* This is only applicable for Ivy Bridge devices since
* later platforms don't have L3 control bits in the PTE.
*/
- if (IS_IVYBRIDGE(dev)) {
+ if (IS_IVYBRIDGE(to_i915(dev))) {
ret = i915_gem_object_set_cache_level(obj, I915_CACHE_L3_LLC);
/* Failure shouldn't ever happen this early */
if (WARN_ON(ret)) {
@@ -303,19 +304,28 @@ __create_hw_context(struct drm_device *dev,
}
/* Default context will never have a file_priv */
- if (file_priv != NULL) {
+ ret = DEFAULT_CONTEXT_HANDLE;
+ if (file_priv) {
ret = idr_alloc(&file_priv->context_idr, ctx,
DEFAULT_CONTEXT_HANDLE, 0, GFP_KERNEL);
if (ret < 0)
goto err_out;
- } else
- ret = DEFAULT_CONTEXT_HANDLE;
+ }
+ ctx->user_handle = ret;
ctx->file_priv = file_priv;
- if (file_priv)
+ if (file_priv) {
ctx->pid = get_task_pid(current, PIDTYPE_PID);
+ ctx->name = kasprintf(GFP_KERNEL, "%s[%d]/%x",
+ current->comm,
+ pid_nr(ctx->pid),
+ ctx->user_handle);
+ if (!ctx->name) {
+ ret = -ENOMEM;
+ goto err_pid;
+ }
+ }
- ctx->user_handle = ret;
/* NB: Mark all slices as needing a remap so that when the context first
* loads it will restore whatever remap state already exists. If there
* is no remap info, it will be a NOP. */
@@ -329,6 +339,9 @@ __create_hw_context(struct drm_device *dev,
return ctx;
+err_pid:
+ put_pid(ctx->pid);
+ idr_remove(&file_priv->context_idr, ctx->user_handle);
err_out:
context_close(ctx);
return ERR_PTR(ret);
@@ -352,9 +365,9 @@ i915_gem_create_context(struct drm_device *dev,
return ctx;
if (USES_FULL_PPGTT(dev)) {
- struct i915_hw_ppgtt *ppgtt =
- i915_ppgtt_create(to_i915(dev), file_priv);
+ struct i915_hw_ppgtt *ppgtt;
+ ppgtt = i915_ppgtt_create(to_i915(dev), file_priv, ctx->name);
if (IS_ERR(ppgtt)) {
DRM_DEBUG_DRIVER("PPGTT setup failed (%ld)\n",
PTR_ERR(ppgtt));
@@ -463,6 +476,7 @@ int i915_gem_context_init(struct drm_device *dev)
return PTR_ERR(ctx);
}
+ ctx->priority = I915_PRIORITY_MIN; /* lowest priority; idle task */
dev_priv->kernel_context = ctx;
DRM_DEBUG_DRIVER("%s context support initialized\n",
@@ -474,10 +488,11 @@ int i915_gem_context_init(struct drm_device *dev)
void i915_gem_context_lost(struct drm_i915_private *dev_priv)
{
struct intel_engine_cs *engine;
+ enum intel_engine_id id;
lockdep_assert_held(&dev_priv->drm.struct_mutex);
- for_each_engine(engine, dev_priv) {
+ for_each_engine(engine, dev_priv, id) {
if (engine->last_context) {
i915_gem_context_unpin(engine->last_context, engine);
engine->last_context = NULL;
@@ -492,13 +507,13 @@ void i915_gem_context_lost(struct drm_i915_private *dev_priv)
if (!i915_gem_context_is_default(ctx))
continue;
- for_each_engine(engine, dev_priv)
+ for_each_engine(engine, dev_priv, id)
ctx->engine[engine->id].initialised = false;
ctx->remap_slice = ALL_L3_SLICES(dev_priv);
}
- for_each_engine(engine, dev_priv) {
+ for_each_engine(engine, dev_priv, id) {
struct intel_context *kce =
&dev_priv->kernel_context->engine[engine->id];
@@ -563,6 +578,7 @@ mi_set_context(struct drm_i915_gem_request *req, u32 hw_flags)
struct drm_i915_private *dev_priv = req->i915;
struct intel_ring *ring = req->ring;
struct intel_engine_cs *engine = req->engine;
+ enum intel_engine_id id;
u32 flags = hw_flags | MI_MM_SPACE_GTT;
const int num_rings =
/* Use an extended w/a on ivb+ if signalling from other rings */
@@ -605,7 +621,7 @@ mi_set_context(struct drm_i915_gem_request *req, u32 hw_flags)
intel_ring_emit(ring,
MI_LOAD_REGISTER_IMM(num_rings));
- for_each_engine(signaller, dev_priv) {
+ for_each_engine(signaller, dev_priv, id) {
if (signaller == engine)
continue;
@@ -634,7 +650,7 @@ mi_set_context(struct drm_i915_gem_request *req, u32 hw_flags)
intel_ring_emit(ring,
MI_LOAD_REGISTER_IMM(num_rings));
- for_each_engine(signaller, dev_priv) {
+ for_each_engine(signaller, dev_priv, id) {
if (signaller == engine)
continue;
@@ -749,12 +765,36 @@ needs_pd_load_post(struct i915_hw_ppgtt *ppgtt,
return false;
}
+struct i915_vma *
+i915_gem_context_pin_legacy(struct i915_gem_context *ctx,
+ unsigned int flags)
+{
+ struct i915_vma *vma = ctx->engine[RCS].state;
+ int ret;
+
+ /* Clear this page out of any CPU caches for coherent swap-in/out.
+ * We only want to do this on the first bind so that we do not stall
+ * on an active context (which by nature is already on the GPU).
+ */
+ if (!(vma->flags & I915_VMA_GLOBAL_BIND)) {
+ ret = i915_gem_object_set_to_gtt_domain(vma->obj, false);
+ if (ret)
+ return ERR_PTR(ret);
+ }
+
+ ret = i915_vma_pin(vma, 0, ctx->ggtt_alignment, PIN_GLOBAL | flags);
+ if (ret)
+ return ERR_PTR(ret);
+
+ return vma;
+}
+
static int do_rcs_switch(struct drm_i915_gem_request *req)
{
struct i915_gem_context *to = req->ctx;
struct intel_engine_cs *engine = req->engine;
struct i915_hw_ppgtt *ppgtt = to->ppgtt ?: req->i915->mm.aliasing_ppgtt;
- struct i915_vma *vma = to->engine[RCS].state;
+ struct i915_vma *vma;
struct i915_gem_context *from;
u32 hw_flags;
int ret, i;
@@ -762,17 +802,10 @@ static int do_rcs_switch(struct drm_i915_gem_request *req)
if (skip_rcs_switch(ppgtt, engine, to))
return 0;
- /* Clear this page out of any CPU caches for coherent swap-in/out. */
- if (!(vma->flags & I915_VMA_GLOBAL_BIND)) {
- ret = i915_gem_object_set_to_gtt_domain(vma->obj, false);
- if (ret)
- return ret;
- }
-
/* Trying to pin first makes error handling easier. */
- ret = i915_vma_pin(vma, 0, to->ggtt_alignment, PIN_GLOBAL);
- if (ret)
- return ret;
+ vma = i915_gem_context_pin_legacy(to, 0);
+ if (IS_ERR(vma))
+ return PTR_ERR(vma);
/*
* Pin can switch back to the default context if we end up calling into
@@ -929,21 +962,33 @@ int i915_switch_context(struct drm_i915_gem_request *req)
int i915_gem_switch_to_kernel_context(struct drm_i915_private *dev_priv)
{
struct intel_engine_cs *engine;
+ struct i915_gem_timeline *timeline;
+ enum intel_engine_id id;
+
+ lockdep_assert_held(&dev_priv->drm.struct_mutex);
- for_each_engine(engine, dev_priv) {
+ for_each_engine(engine, dev_priv, id) {
struct drm_i915_gem_request *req;
int ret;
- if (engine->last_context == NULL)
- continue;
-
- if (engine->last_context == dev_priv->kernel_context)
- continue;
-
req = i915_gem_request_alloc(engine, dev_priv->kernel_context);
if (IS_ERR(req))
return PTR_ERR(req);
+ /* Queue this switch after all other activity */
+ list_for_each_entry(timeline, &dev_priv->gt.timelines, link) {
+ struct drm_i915_gem_request *prev;
+ struct intel_timeline *tl;
+
+ tl = &timeline->engine[engine->id];
+ prev = i915_gem_active_raw(&tl->last_request,
+ &dev_priv->drm.struct_mutex);
+ if (prev)
+ i915_sw_fence_await_sw_fence_gfp(&req->submit,
+ &prev->submit,
+ GFP_KERNEL);
+ }
+
ret = i915_switch_context(req);
i915_add_request_no_flush(req);
if (ret)
diff --git a/drivers/gpu/drm/i915/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/i915_gem_dmabuf.c
index 97c9d68b45df..5e38299b5df6 100644
--- a/drivers/gpu/drm/i915/i915_gem_dmabuf.c
+++ b/drivers/gpu/drm/i915/i915_gem_dmabuf.c
@@ -44,51 +44,42 @@ static struct sg_table *i915_gem_map_dma_buf(struct dma_buf_attachment *attachme
struct scatterlist *src, *dst;
int ret, i;
- ret = i915_mutex_lock_interruptible(obj->base.dev);
+ ret = i915_gem_object_pin_pages(obj);
if (ret)
goto err;
- ret = i915_gem_object_get_pages(obj);
- if (ret)
- goto err_unlock;
-
- i915_gem_object_pin_pages(obj);
-
/* Copy sg so that we make an independent mapping */
st = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
if (st == NULL) {
ret = -ENOMEM;
- goto err_unpin;
+ goto err_unpin_pages;
}
- ret = sg_alloc_table(st, obj->pages->nents, GFP_KERNEL);
+ ret = sg_alloc_table(st, obj->mm.pages->nents, GFP_KERNEL);
if (ret)
goto err_free;
- src = obj->pages->sgl;
+ src = obj->mm.pages->sgl;
dst = st->sgl;
- for (i = 0; i < obj->pages->nents; i++) {
+ for (i = 0; i < obj->mm.pages->nents; i++) {
sg_set_page(dst, sg_page(src), src->length, 0);
dst = sg_next(dst);
src = sg_next(src);
}
if (!dma_map_sg(attachment->dev, st->sgl, st->nents, dir)) {
- ret =-ENOMEM;
+ ret = -ENOMEM;
goto err_free_sg;
}
- mutex_unlock(&obj->base.dev->struct_mutex);
return st;
err_free_sg:
sg_free_table(st);
err_free:
kfree(st);
-err_unpin:
+err_unpin_pages:
i915_gem_object_unpin_pages(obj);
-err_unlock:
- mutex_unlock(&obj->base.dev->struct_mutex);
err:
return ERR_PTR(ret);
}
@@ -103,36 +94,21 @@ static void i915_gem_unmap_dma_buf(struct dma_buf_attachment *attachment,
sg_free_table(sg);
kfree(sg);
- mutex_lock(&obj->base.dev->struct_mutex);
i915_gem_object_unpin_pages(obj);
- mutex_unlock(&obj->base.dev->struct_mutex);
}
static void *i915_gem_dmabuf_vmap(struct dma_buf *dma_buf)
{
struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
- struct drm_device *dev = obj->base.dev;
- void *addr;
- int ret;
- ret = i915_mutex_lock_interruptible(dev);
- if (ret)
- return ERR_PTR(ret);
-
- addr = i915_gem_object_pin_map(obj, I915_MAP_WB);
- mutex_unlock(&dev->struct_mutex);
-
- return addr;
+ return i915_gem_object_pin_map(obj, I915_MAP_WB);
}
static void i915_gem_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr)
{
struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
- struct drm_device *dev = obj->base.dev;
- mutex_lock(&dev->struct_mutex);
i915_gem_object_unpin_map(obj);
- mutex_unlock(&dev->struct_mutex);
}
static void *i915_gem_dmabuf_kmap_atomic(struct dma_buf *dma_buf, unsigned long page_num)
@@ -179,32 +155,45 @@ static int i915_gem_begin_cpu_access(struct dma_buf *dma_buf, enum dma_data_dire
{
struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
struct drm_device *dev = obj->base.dev;
- int ret;
bool write = (direction == DMA_BIDIRECTIONAL || direction == DMA_TO_DEVICE);
+ int err;
- ret = i915_mutex_lock_interruptible(dev);
- if (ret)
- return ret;
+ err = i915_gem_object_pin_pages(obj);
+ if (err)
+ return err;
+
+ err = i915_mutex_lock_interruptible(dev);
+ if (err)
+ goto out;
- ret = i915_gem_object_set_to_cpu_domain(obj, write);
+ err = i915_gem_object_set_to_cpu_domain(obj, write);
mutex_unlock(&dev->struct_mutex);
- return ret;
+
+out:
+ i915_gem_object_unpin_pages(obj);
+ return err;
}
static int i915_gem_end_cpu_access(struct dma_buf *dma_buf, enum dma_data_direction direction)
{
struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
struct drm_device *dev = obj->base.dev;
- int ret;
+ int err;
- ret = i915_mutex_lock_interruptible(dev);
- if (ret)
- return ret;
+ err = i915_gem_object_pin_pages(obj);
+ if (err)
+ return err;
- ret = i915_gem_object_set_to_gtt_domain(obj, false);
+ err = i915_mutex_lock_interruptible(dev);
+ if (err)
+ goto out;
+
+ err = i915_gem_object_set_to_gtt_domain(obj, false);
mutex_unlock(&dev->struct_mutex);
- return ret;
+out:
+ i915_gem_object_unpin_pages(obj);
+ return err;
}
static const struct dma_buf_ops i915_dmabuf_ops = {
@@ -222,60 +211,17 @@ static const struct dma_buf_ops i915_dmabuf_ops = {
.end_cpu_access = i915_gem_end_cpu_access,
};
-static void export_fences(struct drm_i915_gem_object *obj,
- struct dma_buf *dma_buf)
-{
- struct reservation_object *resv = dma_buf->resv;
- struct drm_i915_gem_request *req;
- unsigned long active;
- int idx;
-
- active = __I915_BO_ACTIVE(obj);
- if (!active)
- return;
-
- /* Serialise with execbuf to prevent concurrent fence-loops */
- mutex_lock(&obj->base.dev->struct_mutex);
-
- /* Mark the object for future fences before racily adding old fences */
- obj->base.dma_buf = dma_buf;
-
- ww_mutex_lock(&resv->lock, NULL);
-
- for_each_active(active, idx) {
- req = i915_gem_active_get(&obj->last_read[idx],
- &obj->base.dev->struct_mutex);
- if (!req)
- continue;
-
- if (reservation_object_reserve_shared(resv) == 0)
- reservation_object_add_shared_fence(resv, &req->fence);
-
- i915_gem_request_put(req);
- }
-
- req = i915_gem_active_get(&obj->last_write,
- &obj->base.dev->struct_mutex);
- if (req) {
- reservation_object_add_excl_fence(resv, &req->fence);
- i915_gem_request_put(req);
- }
-
- ww_mutex_unlock(&resv->lock);
- mutex_unlock(&obj->base.dev->struct_mutex);
-}
-
struct dma_buf *i915_gem_prime_export(struct drm_device *dev,
struct drm_gem_object *gem_obj, int flags)
{
struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
- struct dma_buf *dma_buf;
exp_info.ops = &i915_dmabuf_ops;
exp_info.size = gem_obj->size;
exp_info.flags = flags;
exp_info.priv = gem_obj;
+ exp_info.resv = obj->resv;
if (obj->ops->dmabuf_export) {
int ret = obj->ops->dmabuf_export(obj);
@@ -283,30 +229,21 @@ struct dma_buf *i915_gem_prime_export(struct drm_device *dev,
return ERR_PTR(ret);
}
- dma_buf = drm_gem_dmabuf_export(dev, &exp_info);
- if (IS_ERR(dma_buf))
- return dma_buf;
-
- export_fences(obj, dma_buf);
- return dma_buf;
+ return drm_gem_dmabuf_export(dev, &exp_info);
}
-static int i915_gem_object_get_pages_dmabuf(struct drm_i915_gem_object *obj)
+static struct sg_table *
+i915_gem_object_get_pages_dmabuf(struct drm_i915_gem_object *obj)
{
- struct sg_table *sg;
-
- sg = dma_buf_map_attachment(obj->base.import_attach, DMA_BIDIRECTIONAL);
- if (IS_ERR(sg))
- return PTR_ERR(sg);
-
- obj->pages = sg;
- return 0;
+ return dma_buf_map_attachment(obj->base.import_attach,
+ DMA_BIDIRECTIONAL);
}
-static void i915_gem_object_put_pages_dmabuf(struct drm_i915_gem_object *obj)
+static void i915_gem_object_put_pages_dmabuf(struct drm_i915_gem_object *obj,
+ struct sg_table *pages)
{
- dma_buf_unmap_attachment(obj->base.import_attach,
- obj->pages, DMA_BIDIRECTIONAL);
+ dma_buf_unmap_attachment(obj->base.import_attach, pages,
+ DMA_BIDIRECTIONAL);
}
static const struct drm_i915_gem_object_ops i915_gem_object_dmabuf_ops = {
@@ -350,6 +287,7 @@ struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev,
drm_gem_private_object_init(dev, &obj->base, dma_buf->size);
i915_gem_object_init(obj, &i915_gem_object_dmabuf_ops);
obj->base.import_attach = attach;
+ obj->resv = dma_buf->resv;
/* We use GTT as shorthand for a coherent domain, one that is
* neither in the GPU cache nor in the CPU cache, where all
diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c
index 5b6f81c1dbca..bd08814b015c 100644
--- a/drivers/gpu/drm/i915/i915_gem_evict.c
+++ b/drivers/gpu/drm/i915/i915_gem_evict.c
@@ -33,13 +33,17 @@
#include "intel_drv.h"
#include "i915_trace.h"
-static bool
-gpu_is_idle(struct drm_i915_private *dev_priv)
+static bool ggtt_is_idle(struct drm_i915_private *dev_priv)
{
+ struct i915_ggtt *ggtt = &dev_priv->ggtt;
struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+
+ for_each_engine(engine, dev_priv, id) {
+ struct intel_timeline *tl;
- for_each_engine(engine, dev_priv) {
- if (intel_engine_is_active(engine))
+ tl = &ggtt->base.timeline.engine[engine->id];
+ if (i915_gem_active_isset(&tl->last_request))
return false;
}
@@ -55,7 +59,7 @@ mark_free(struct i915_vma *vma, unsigned int flags, struct list_head *unwind)
if (WARN_ON(!list_empty(&vma->exec_list)))
return false;
- if (flags & PIN_NONFAULT && vma->obj->fault_mappable)
+ if (flags & PIN_NONFAULT && !list_empty(&vma->obj->userfault_link))
return false;
list_add(&vma->exec_list, unwind);
@@ -102,6 +106,7 @@ i915_gem_evict_something(struct i915_address_space *vm,
struct i915_vma *vma, *next;
int ret;
+ lockdep_assert_held(&vm->dev->struct_mutex);
trace_i915_gem_evict(vm, min_size, alignment, flags);
/*
@@ -152,7 +157,7 @@ search_again:
if (!i915_is_ggtt(vm) || flags & PIN_NONBLOCK)
return -ENOSPC;
- if (gpu_is_idle(dev_priv)) {
+ if (ggtt_is_idle(dev_priv)) {
/* If we still have pending pageflip completions, drop
* back to userspace to give our workqueues time to
* acquire our locks and unpin the old scanouts.
@@ -212,6 +217,8 @@ i915_gem_evict_for_vma(struct i915_vma *target)
{
struct drm_mm_node *node, *next;
+ lockdep_assert_held(&target->vm->dev->struct_mutex);
+
list_for_each_entry_safe(node, next,
&target->vm->mm.head_node.node_list,
node_list) {
@@ -265,7 +272,7 @@ int i915_gem_evict_vm(struct i915_address_space *vm, bool do_idle)
struct i915_vma *vma, *next;
int ret;
- WARN_ON(!mutex_is_locked(&vm->dev->struct_mutex));
+ lockdep_assert_held(&vm->dev->struct_mutex);
trace_i915_gem_evict_vm(vm);
if (do_idle) {
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index 7adb4c77cc7f..097d9d8c2315 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -34,7 +34,6 @@
#include <drm/i915_drm.h>
#include "i915_drv.h"
-#include "i915_gem_dmabuf.h"
#include "i915_trace.h"
#include "intel_drv.h"
#include "intel_frontbuffer.h"
@@ -288,7 +287,7 @@ static inline int use_cpu_reloc(struct drm_i915_gem_object *obj)
if (DBG_USE_CPU_RELOC)
return DBG_USE_CPU_RELOC > 0;
- return (HAS_LLC(obj->base.dev) ||
+ return (HAS_LLC(to_i915(obj->base.dev)) ||
obj->base.write_domain == I915_GEM_DOMAIN_CPU ||
obj->cache_level != I915_CACHE_NONE);
}
@@ -332,7 +331,8 @@ static void reloc_cache_init(struct reloc_cache *cache,
cache->page = -1;
cache->vaddr = 0;
cache->i915 = i915;
- cache->use_64bit_reloc = INTEL_GEN(cache->i915) >= 8;
+ /* Must be a variable in the struct to allow GCC to unroll. */
+ cache->use_64bit_reloc = HAS_64BIT_RELOC(i915);
cache->node.allocated = false;
}
@@ -370,8 +370,7 @@ static void reloc_cache_fini(struct reloc_cache *cache)
ggtt->base.clear_range(&ggtt->base,
cache->node.start,
- cache->node.size,
- true);
+ cache->node.size);
drm_mm_remove_node(&cache->node);
} else {
i915_vma_unpin((struct i915_vma *)cache->node.mm);
@@ -419,17 +418,8 @@ static void *reloc_iomap(struct drm_i915_gem_object *obj,
unsigned long offset;
void *vaddr;
- if (cache->node.allocated) {
- wmb();
- ggtt->base.insert_page(&ggtt->base,
- i915_gem_object_get_dma_address(obj, page),
- cache->node.start, I915_CACHE_NONE, 0);
- cache->page = page;
- return unmask_page(cache->vaddr);
- }
-
if (cache->vaddr) {
- io_mapping_unmap_atomic(unmask_page(cache->vaddr));
+ io_mapping_unmap_atomic((void __force __iomem *) unmask_page(cache->vaddr));
} else {
struct i915_vma *vma;
int ret;
@@ -467,6 +457,7 @@ static void *reloc_iomap(struct drm_i915_gem_object *obj,
offset = cache->node.start;
if (cache->node.allocated) {
+ wmb();
ggtt->base.insert_page(&ggtt->base,
i915_gem_object_get_dma_address(obj, page),
offset, I915_CACHE_NONE, 0);
@@ -474,7 +465,7 @@ static void *reloc_iomap(struct drm_i915_gem_object *obj,
offset += page << PAGE_SHIFT;
}
- vaddr = io_mapping_map_atomic_wc(&cache->i915->ggtt.mappable, offset);
+ vaddr = (void __force *) io_mapping_map_atomic_wc(&cache->i915->ggtt.mappable, offset);
cache->page = page;
cache->vaddr = (unsigned long)vaddr;
@@ -552,27 +543,13 @@ repeat:
return 0;
}
-static bool object_is_idle(struct drm_i915_gem_object *obj)
-{
- unsigned long active = i915_gem_object_get_active(obj);
- int idx;
-
- for_each_active(active, idx) {
- if (!i915_gem_active_is_idle(&obj->last_read[idx],
- &obj->base.dev->struct_mutex))
- return false;
- }
-
- return true;
-}
-
static int
i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
struct eb_vmas *eb,
struct drm_i915_gem_relocation_entry *reloc,
struct reloc_cache *cache)
{
- struct drm_device *dev = obj->base.dev;
+ struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
struct drm_gem_object *target_obj;
struct drm_i915_gem_object *target_i915_obj;
struct i915_vma *target_vma;
@@ -591,7 +568,7 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
/* Sandybridge PPGTT errata: We need a global gtt mapping for MI and
* pipe_control writes because the gpu doesn't properly redirect them
* through the ppgtt for non_secure batchbuffers. */
- if (unlikely(IS_GEN6(dev) &&
+ if (unlikely(IS_GEN6(dev_priv) &&
reloc->write_domain == I915_GEM_DOMAIN_INSTRUCTION)) {
ret = i915_vma_bind(target_vma, target_i915_obj->cache_level,
PIN_GLOBAL);
@@ -649,10 +626,6 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
return -EINVAL;
}
- /* We can't wait for rendering with pagefaults disabled */
- if (pagefault_disabled() && !object_is_idle(obj))
- return -EFAULT;
-
ret = relocate_entry(obj, reloc, cache, target_offset);
if (ret)
return ret;
@@ -679,12 +652,23 @@ i915_gem_execbuffer_relocate_vma(struct i915_vma *vma,
remain = entry->relocation_count;
while (remain) {
struct drm_i915_gem_relocation_entry *r = stack_reloc;
- int count = remain;
- if (count > ARRAY_SIZE(stack_reloc))
- count = ARRAY_SIZE(stack_reloc);
+ unsigned long unwritten;
+ unsigned int count;
+
+ count = min_t(unsigned int, remain, ARRAY_SIZE(stack_reloc));
remain -= count;
- if (__copy_from_user_inatomic(r, user_relocs, count*sizeof(r[0]))) {
+ /* This is the fast path and we cannot handle a pagefault
+ * whilst holding the struct mutex lest the user pass in the
+ * relocations contained within a mmaped bo. For in such a case
+ * we, the page fault handler would call i915_gem_fault() and
+ * we would try to acquire the struct mutex again. Obviously
+ * this is bad and so lockdep complains vehemently.
+ */
+ pagefault_disable();
+ unwritten = __copy_from_user_inatomic(r, user_relocs, count*sizeof(r[0]));
+ pagefault_enable();
+ if (unlikely(unwritten)) {
ret = -EFAULT;
goto out;
}
@@ -696,11 +680,26 @@ i915_gem_execbuffer_relocate_vma(struct i915_vma *vma,
if (ret)
goto out;
- if (r->presumed_offset != offset &&
- __put_user(r->presumed_offset,
- &user_relocs->presumed_offset)) {
- ret = -EFAULT;
- goto out;
+ if (r->presumed_offset != offset) {
+ pagefault_disable();
+ unwritten = __put_user(r->presumed_offset,
+ &user_relocs->presumed_offset);
+ pagefault_enable();
+ if (unlikely(unwritten)) {
+ /* Note that reporting an error now
+ * leaves everything in an inconsistent
+ * state as we have *already* changed
+ * the relocation value inside the
+ * object. As we have not changed the
+ * reloc.presumed_offset or will not
+ * change the execobject.offset, on the
+ * call we may not rewrite the value
+ * inside the object, leaving it
+ * dangling and causing a GPU hang.
+ */
+ ret = -EFAULT;
+ goto out;
+ }
}
user_relocs++;
@@ -740,20 +739,11 @@ i915_gem_execbuffer_relocate(struct eb_vmas *eb)
struct i915_vma *vma;
int ret = 0;
- /* This is the fast path and we cannot handle a pagefault whilst
- * holding the struct mutex lest the user pass in the relocations
- * contained within a mmaped bo. For in such a case we, the page
- * fault handler would call i915_gem_fault() and we would try to
- * acquire the struct mutex again. Obviously this is bad and so
- * lockdep complains vehemently.
- */
- pagefault_disable();
list_for_each_entry(vma, &eb->vmas, exec_list) {
ret = i915_gem_execbuffer_relocate_vma(vma, eb);
if (ret)
break;
}
- pagefault_enable();
return ret;
}
@@ -843,7 +833,7 @@ need_reloc_mappable(struct i915_vma *vma)
return false;
/* See also use_cpu_reloc() */
- if (HAS_LLC(vma->obj->base.dev))
+ if (HAS_LLC(to_i915(vma->obj->base.dev)))
return false;
if (vma->obj->base.write_domain == I915_GEM_DOMAIN_CPU)
@@ -1111,44 +1101,20 @@ err:
return ret;
}
-static unsigned int eb_other_engines(struct drm_i915_gem_request *req)
-{
- unsigned int mask;
-
- mask = ~intel_engine_flag(req->engine) & I915_BO_ACTIVE_MASK;
- mask <<= I915_BO_ACTIVE_SHIFT;
-
- return mask;
-}
-
static int
i915_gem_execbuffer_move_to_gpu(struct drm_i915_gem_request *req,
struct list_head *vmas)
{
- const unsigned int other_rings = eb_other_engines(req);
struct i915_vma *vma;
int ret;
list_for_each_entry(vma, vmas, exec_list) {
struct drm_i915_gem_object *obj = vma->obj;
- struct reservation_object *resv;
-
- if (obj->flags & other_rings) {
- ret = i915_gem_request_await_object
- (req, obj, obj->base.pending_write_domain);
- if (ret)
- return ret;
- }
- resv = i915_gem_object_get_dmabuf_resv(obj);
- if (resv) {
- ret = i915_sw_fence_await_reservation
- (&req->submit, resv, &i915_fence_ops,
- obj->base.pending_write_domain, 10*HZ,
- GFP_KERNEL | __GFP_NOWARN);
- if (ret < 0)
- return ret;
- }
+ ret = i915_gem_request_await_object
+ (req, obj, obj->base.pending_write_domain);
+ if (ret)
+ return ret;
if (obj->base.write_domain & I915_GEM_DOMAIN_CPU)
i915_gem_clflush_object(obj, false);
@@ -1281,6 +1247,12 @@ i915_gem_validate_context(struct drm_device *dev, struct drm_file *file,
return ctx;
}
+static bool gpu_write_needs_clflush(struct drm_i915_gem_object *obj)
+{
+ return !(obj->cache_level == I915_CACHE_NONE ||
+ obj->cache_level == I915_CACHE_WT);
+}
+
void i915_vma_move_to_active(struct i915_vma *vma,
struct drm_i915_gem_request *req,
unsigned int flags)
@@ -1290,8 +1262,6 @@ void i915_vma_move_to_active(struct i915_vma *vma,
GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
- obj->dirty = 1; /* be paranoid */
-
/* Add a reference if we're newly entering the active list.
* The order in which we add operations to the retirement queue is
* vital here: mark_active adds to the start of the callback list,
@@ -1299,37 +1269,31 @@ void i915_vma_move_to_active(struct i915_vma *vma,
* add the active reference first and queue for it to be dropped
* *last*.
*/
- if (!i915_gem_object_is_active(obj))
- i915_gem_object_get(obj);
- i915_gem_object_set_active(obj, idx);
- i915_gem_active_set(&obj->last_read[idx], req);
+ if (!i915_vma_is_active(vma))
+ obj->active_count++;
+ i915_vma_set_active(vma, idx);
+ i915_gem_active_set(&vma->last_read[idx], req);
+ list_move_tail(&vma->vm_link, &vma->vm->active_list);
if (flags & EXEC_OBJECT_WRITE) {
- i915_gem_active_set(&obj->last_write, req);
-
- intel_fb_obj_invalidate(obj, ORIGIN_CS);
+ if (intel_fb_obj_invalidate(obj, ORIGIN_CS))
+ i915_gem_active_set(&obj->frontbuffer_write, req);
/* update for the implicit flush after a batch */
obj->base.write_domain &= ~I915_GEM_GPU_DOMAINS;
+ if (!obj->cache_dirty && gpu_write_needs_clflush(obj))
+ obj->cache_dirty = true;
}
if (flags & EXEC_OBJECT_NEEDS_FENCE)
i915_gem_active_set(&vma->last_fence, req);
-
- i915_vma_set_active(vma, idx);
- i915_gem_active_set(&vma->last_read[idx], req);
- list_move_tail(&vma->vm_link, &vma->vm->active_list);
}
static void eb_export_fence(struct drm_i915_gem_object *obj,
struct drm_i915_gem_request *req,
unsigned int flags)
{
- struct reservation_object *resv;
-
- resv = i915_gem_object_get_dmabuf_resv(obj);
- if (!resv)
- return;
+ struct reservation_object *resv = obj->resv;
/* Ignore errors from failing to allocate the new fence, we can't
* handle an error right now. Worst case should be missed
@@ -1599,12 +1563,12 @@ eb_select_engine(struct drm_i915_private *dev_priv,
return NULL;
}
- engine = &dev_priv->engine[_VCS(bsd_idx)];
+ engine = dev_priv->engine[_VCS(bsd_idx)];
} else {
- engine = &dev_priv->engine[user_ring_map[user_ring_id]];
+ engine = dev_priv->engine[user_ring_map[user_ring_id]];
}
- if (!intel_engine_initialized(engine)) {
+ if (!engine) {
DRM_DEBUG("execbuf with invalid ring: %u\n", user_ring_id);
return NULL;
}
@@ -1659,7 +1623,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
}
if (args->flags & I915_EXEC_RESOURCE_STREAMER) {
- if (!HAS_RESOURCE_STREAMER(dev)) {
+ if (!HAS_RESOURCE_STREAMER(dev_priv)) {
DRM_DEBUG("RS is only allowed for Haswell, Gen8 and above\n");
return -EINVAL;
}
@@ -1913,7 +1877,7 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
exec2_list[i].relocs_ptr = exec_list[i].relocs_ptr;
exec2_list[i].alignment = exec_list[i].alignment;
exec2_list[i].offset = exec_list[i].offset;
- if (INTEL_INFO(dev)->gen < 4)
+ if (INTEL_GEN(to_i915(dev)) < 4)
exec2_list[i].flags = EXEC_OBJECT_NEEDS_FENCE;
else
exec2_list[i].flags = 0;
diff --git a/drivers/gpu/drm/i915/i915_gem_fence.c b/drivers/gpu/drm/i915/i915_gem_fence_reg.c
index 8df1fa7234e8..0efa3571afc3 100644
--- a/drivers/gpu/drm/i915/i915_gem_fence.c
+++ b/drivers/gpu/drm/i915/i915_gem_fence_reg.c
@@ -290,6 +290,8 @@ i915_vma_put_fence(struct i915_vma *vma)
{
struct drm_i915_fence_reg *fence = vma->fence;
+ assert_rpm_wakelock_held(to_i915(vma->vm->dev));
+
if (!fence)
return 0;
@@ -341,6 +343,11 @@ i915_vma_get_fence(struct i915_vma *vma)
struct drm_i915_fence_reg *fence;
struct i915_vma *set = i915_gem_object_is_tiled(vma->obj) ? vma : NULL;
+ /* Note that we revoke fences on runtime suspend. Therefore the user
+ * must keep the device awake whilst using the fence.
+ */
+ assert_rpm_wakelock_held(to_i915(vma->vm->dev));
+
/* Just update our place in the LRU if our fence is getting reused. */
if (vma->fence) {
fence = vma->fence;
@@ -361,14 +368,14 @@ i915_vma_get_fence(struct i915_vma *vma)
/**
* i915_gem_restore_fences - restore fence state
- * @dev: DRM device
+ * @dev_priv: i915 device private
*
* Restore the hw fence state to match the software tracking again, to be called
- * after a gpu reset and on resume.
+ * after a gpu reset and on resume. Note that on runtime suspend we only cancel
+ * the fences, to be reacquired by the user later.
*/
-void i915_gem_restore_fences(struct drm_device *dev)
+void i915_gem_restore_fences(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
int i;
for (i = 0; i < dev_priv->num_fence_regs; i++) {
@@ -379,10 +386,17 @@ void i915_gem_restore_fences(struct drm_device *dev)
* Commit delayed tiling changes if we have an object still
* attached to the fence, otherwise just clear the fence.
*/
- if (vma && !i915_gem_object_is_tiled(vma->obj))
+ if (vma && !i915_gem_object_is_tiled(vma->obj)) {
+ GEM_BUG_ON(!reg->dirty);
+ GEM_BUG_ON(!list_empty(&vma->obj->userfault_link));
+
+ list_move(&reg->link, &dev_priv->mm.fence_list);
+ vma->fence = NULL;
vma = NULL;
+ }
- fence_update(reg, vma);
+ fence_write(reg, vma);
+ reg->vma = vma;
}
}
@@ -436,19 +450,18 @@ void i915_gem_restore_fences(struct drm_device *dev)
/**
* i915_gem_detect_bit_6_swizzle - detect bit 6 swizzling pattern
- * @dev: DRM device
+ * @dev_priv: i915 device private
*
* Detects bit 6 swizzling of address lookup between IGD access and CPU
* access through main memory.
*/
void
-i915_gem_detect_bit_6_swizzle(struct drm_device *dev)
+i915_gem_detect_bit_6_swizzle(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
uint32_t swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN;
uint32_t swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN;
- if (INTEL_INFO(dev)->gen >= 8 || IS_VALLEYVIEW(dev)) {
+ if (INTEL_GEN(dev_priv) >= 8 || IS_VALLEYVIEW(dev_priv)) {
/*
* On BDW+, swizzling is not used. We leave the CPU memory
* controller in charge of optimizing memory accesses without
@@ -458,7 +471,7 @@ i915_gem_detect_bit_6_swizzle(struct drm_device *dev)
*/
swizzle_x = I915_BIT_6_SWIZZLE_NONE;
swizzle_y = I915_BIT_6_SWIZZLE_NONE;
- } else if (INTEL_INFO(dev)->gen >= 6) {
+ } else if (INTEL_GEN(dev_priv) >= 6) {
if (dev_priv->preserve_bios_swizzle) {
if (I915_READ(DISP_ARB_CTL) &
DISP_TILE_SURFACE_SWIZZLING) {
@@ -487,19 +500,20 @@ i915_gem_detect_bit_6_swizzle(struct drm_device *dev)
swizzle_y = I915_BIT_6_SWIZZLE_NONE;
}
}
- } else if (IS_GEN5(dev)) {
+ } else if (IS_GEN5(dev_priv)) {
/* On Ironlake whatever DRAM config, GPU always do
* same swizzling setup.
*/
swizzle_x = I915_BIT_6_SWIZZLE_9_10;
swizzle_y = I915_BIT_6_SWIZZLE_9;
- } else if (IS_GEN2(dev)) {
+ } else if (IS_GEN2(dev_priv)) {
/* As far as we know, the 865 doesn't have these bit 6
* swizzling issues.
*/
swizzle_x = I915_BIT_6_SWIZZLE_NONE;
swizzle_y = I915_BIT_6_SWIZZLE_NONE;
- } else if (IS_MOBILE(dev) || (IS_GEN3(dev) && !IS_G33(dev))) {
+ } else if (IS_MOBILE(dev_priv) || (IS_GEN3(dev_priv) &&
+ !IS_G33(dev_priv))) {
uint32_t dcc;
/* On 9xx chipsets, channel interleave by the CPU is
@@ -537,7 +551,7 @@ i915_gem_detect_bit_6_swizzle(struct drm_device *dev)
}
/* check for L-shaped memory aka modified enhanced addressing */
- if (IS_GEN4(dev) &&
+ if (IS_GEN4(dev_priv) &&
!(I915_READ(DCC2) & DCC2_MODIFIED_ENHANCED_DISABLE)) {
swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN;
swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN;
@@ -628,6 +642,7 @@ i915_gem_swizzle_page(struct page *page)
/**
* i915_gem_object_do_bit_17_swizzle - fixup bit 17 swizzling
* @obj: i915 GEM buffer object
+ * @pages: the scattergather list of physical pages
*
* This function fixes up the swizzling in case any page frame number for this
* object has changed in bit 17 since that state has been saved with
@@ -638,7 +653,8 @@ i915_gem_swizzle_page(struct page *page)
* by swapping them out and back in again).
*/
void
-i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj)
+i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj,
+ struct sg_table *pages)
{
struct sgt_iter sgt_iter;
struct page *page;
@@ -648,10 +664,9 @@ i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj)
return;
i = 0;
- for_each_sgt_page(page, sgt_iter, obj->pages) {
+ for_each_sgt_page(page, sgt_iter, pages) {
char new_bit_17 = page_to_phys(page) >> 17;
- if ((new_bit_17 & 0x1) !=
- (test_bit(i, obj->bit_17) != 0)) {
+ if ((new_bit_17 & 0x1) != (test_bit(i, obj->bit_17) != 0)) {
i915_gem_swizzle_page(page);
set_page_dirty(page);
}
@@ -662,17 +677,19 @@ i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj)
/**
* i915_gem_object_save_bit_17_swizzle - save bit 17 swizzling
* @obj: i915 GEM buffer object
+ * @pages: the scattergather list of physical pages
*
* This function saves the bit 17 of each page frame number so that swizzling
* can be fixed up later on with i915_gem_object_do_bit_17_swizzle(). This must
* be called before the backing storage can be unpinned.
*/
void
-i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj)
+i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj,
+ struct sg_table *pages)
{
+ const unsigned int page_count = obj->base.size >> PAGE_SHIFT;
struct sgt_iter sgt_iter;
struct page *page;
- int page_count = obj->base.size >> PAGE_SHIFT;
int i;
if (obj->bit_17 == NULL) {
@@ -687,7 +704,7 @@ i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj)
i = 0;
- for_each_sgt_page(page, sgt_iter, obj->pages) {
+ for_each_sgt_page(page, sgt_iter, pages) {
if (page_to_phys(page) & (1 << 17))
__set_bit(i, obj->bit_17);
else
diff --git a/drivers/gpu/drm/i915/i915_gem_fence_reg.h b/drivers/gpu/drm/i915/i915_gem_fence_reg.h
new file mode 100644
index 000000000000..22c4a2d01adf
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_gem_fence_reg.h
@@ -0,0 +1,51 @@
+/*
+ * Copyright © 2016 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __I915_FENCE_REG_H__
+#define __I915_FENCE_REG_H__
+
+#include <linux/list.h>
+
+struct drm_i915_private;
+struct i915_vma;
+
+struct drm_i915_fence_reg {
+ struct list_head link;
+ struct drm_i915_private *i915;
+ struct i915_vma *vma;
+ int pin_count;
+ int id;
+ /**
+ * Whether the tiling parameters for the currently
+ * associated fence register have changed. Note that
+ * for the purposes of tracking tiling changes we also
+ * treat the unfenced register, the register slot that
+ * the object occupies whilst it executes a fenced
+ * command (such as BLT on gen2/3), as a "fence".
+ */
+ bool dirty;
+};
+
+#endif
+
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 0bb4232f66bc..b4bde1452f2a 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -31,6 +31,7 @@
#include "i915_vgpu.h"
#include "i915_trace.h"
#include "intel_drv.h"
+#include "intel_frontbuffer.h"
#define I915_GFP_DMA (GFP_KERNEL | __GFP_HIGHMEM)
@@ -95,13 +96,6 @@
*
*/
-static inline struct i915_ggtt *
-i915_vm_to_ggtt(struct i915_address_space *vm)
-{
- GEM_BUG_ON(!i915_is_ggtt(vm));
- return container_of(vm, struct i915_ggtt, base);
-}
-
static int
i915_get_ggtt_vma_pages(struct i915_vma *vma);
@@ -175,7 +169,7 @@ static int ppgtt_bind_vma(struct i915_vma *vma,
{
u32 pte_flags = 0;
- vma->pages = vma->obj->pages;
+ vma->pages = vma->obj->mm.pages;
/* Currently applicable only to VLV */
if (vma->obj->gt_ro)
@@ -191,15 +185,13 @@ static void ppgtt_unbind_vma(struct i915_vma *vma)
{
vma->vm->clear_range(vma->vm,
vma->node.start,
- vma->size,
- true);
+ vma->size);
}
static gen8_pte_t gen8_pte_encode(dma_addr_t addr,
- enum i915_cache_level level,
- bool valid)
+ enum i915_cache_level level)
{
- gen8_pte_t pte = valid ? _PAGE_PRESENT | _PAGE_RW : 0;
+ gen8_pte_t pte = _PAGE_PRESENT | _PAGE_RW;
pte |= addr;
switch (level) {
@@ -234,9 +226,9 @@ static gen8_pde_t gen8_pde_encode(const dma_addr_t addr,
static gen6_pte_t snb_pte_encode(dma_addr_t addr,
enum i915_cache_level level,
- bool valid, u32 unused)
+ u32 unused)
{
- gen6_pte_t pte = valid ? GEN6_PTE_VALID : 0;
+ gen6_pte_t pte = GEN6_PTE_VALID;
pte |= GEN6_PTE_ADDR_ENCODE(addr);
switch (level) {
@@ -256,9 +248,9 @@ static gen6_pte_t snb_pte_encode(dma_addr_t addr,
static gen6_pte_t ivb_pte_encode(dma_addr_t addr,
enum i915_cache_level level,
- bool valid, u32 unused)
+ u32 unused)
{
- gen6_pte_t pte = valid ? GEN6_PTE_VALID : 0;
+ gen6_pte_t pte = GEN6_PTE_VALID;
pte |= GEN6_PTE_ADDR_ENCODE(addr);
switch (level) {
@@ -280,9 +272,9 @@ static gen6_pte_t ivb_pte_encode(dma_addr_t addr,
static gen6_pte_t byt_pte_encode(dma_addr_t addr,
enum i915_cache_level level,
- bool valid, u32 flags)
+ u32 flags)
{
- gen6_pte_t pte = valid ? GEN6_PTE_VALID : 0;
+ gen6_pte_t pte = GEN6_PTE_VALID;
pte |= GEN6_PTE_ADDR_ENCODE(addr);
if (!(flags & PTE_READ_ONLY))
@@ -296,9 +288,9 @@ static gen6_pte_t byt_pte_encode(dma_addr_t addr,
static gen6_pte_t hsw_pte_encode(dma_addr_t addr,
enum i915_cache_level level,
- bool valid, u32 unused)
+ u32 unused)
{
- gen6_pte_t pte = valid ? GEN6_PTE_VALID : 0;
+ gen6_pte_t pte = GEN6_PTE_VALID;
pte |= HSW_PTE_ADDR_ENCODE(addr);
if (level != I915_CACHE_NONE)
@@ -309,9 +301,9 @@ static gen6_pte_t hsw_pte_encode(dma_addr_t addr,
static gen6_pte_t iris_pte_encode(dma_addr_t addr,
enum i915_cache_level level,
- bool valid, u32 unused)
+ u32 unused)
{
- gen6_pte_t pte = valid ? GEN6_PTE_VALID : 0;
+ gen6_pte_t pte = GEN6_PTE_VALID;
pte |= HSW_PTE_ADDR_ENCODE(addr);
switch (level) {
@@ -328,10 +320,10 @@ static gen6_pte_t iris_pte_encode(dma_addr_t addr,
return pte;
}
-static int __setup_page_dma(struct drm_device *dev,
+static int __setup_page_dma(struct drm_i915_private *dev_priv,
struct i915_page_dma *p, gfp_t flags)
{
- struct device *kdev = &dev->pdev->dev;
+ struct device *kdev = &dev_priv->drm.pdev->dev;
p->page = alloc_page(flags);
if (!p->page)
@@ -348,14 +340,16 @@ static int __setup_page_dma(struct drm_device *dev,
return 0;
}
-static int setup_page_dma(struct drm_device *dev, struct i915_page_dma *p)
+static int setup_page_dma(struct drm_i915_private *dev_priv,
+ struct i915_page_dma *p)
{
- return __setup_page_dma(dev, p, I915_GFP_DMA);
+ return __setup_page_dma(dev_priv, p, I915_GFP_DMA);
}
-static void cleanup_page_dma(struct drm_device *dev, struct i915_page_dma *p)
+static void cleanup_page_dma(struct drm_i915_private *dev_priv,
+ struct i915_page_dma *p)
{
- struct pci_dev *pdev = dev->pdev;
+ struct pci_dev *pdev = dev_priv->drm.pdev;
if (WARN_ON(!p->page))
return;
@@ -373,27 +367,29 @@ static void *kmap_page_dma(struct i915_page_dma *p)
/* We use the flushing unmap only with ppgtt structures:
* page directories, page tables and scratch pages.
*/
-static void kunmap_page_dma(struct drm_device *dev, void *vaddr)
+static void kunmap_page_dma(struct drm_i915_private *dev_priv, void *vaddr)
{
/* There are only few exceptions for gen >=6. chv and bxt.
* And we are not sure about the latter so play safe for now.
*/
- if (IS_CHERRYVIEW(dev) || IS_BROXTON(dev))
+ if (IS_CHERRYVIEW(dev_priv) || IS_BROXTON(dev_priv))
drm_clflush_virt_range(vaddr, PAGE_SIZE);
kunmap_atomic(vaddr);
}
#define kmap_px(px) kmap_page_dma(px_base(px))
-#define kunmap_px(ppgtt, vaddr) kunmap_page_dma((ppgtt)->base.dev, (vaddr))
+#define kunmap_px(ppgtt, vaddr) \
+ kunmap_page_dma(to_i915((ppgtt)->base.dev), (vaddr))
-#define setup_px(dev, px) setup_page_dma((dev), px_base(px))
-#define cleanup_px(dev, px) cleanup_page_dma((dev), px_base(px))
-#define fill_px(dev, px, v) fill_page_dma((dev), px_base(px), (v))
-#define fill32_px(dev, px, v) fill_page_dma_32((dev), px_base(px), (v))
+#define setup_px(dev_priv, px) setup_page_dma((dev_priv), px_base(px))
+#define cleanup_px(dev_priv, px) cleanup_page_dma((dev_priv), px_base(px))
+#define fill_px(dev_priv, px, v) fill_page_dma((dev_priv), px_base(px), (v))
+#define fill32_px(dev_priv, px, v) \
+ fill_page_dma_32((dev_priv), px_base(px), (v))
-static void fill_page_dma(struct drm_device *dev, struct i915_page_dma *p,
- const uint64_t val)
+static void fill_page_dma(struct drm_i915_private *dev_priv,
+ struct i915_page_dma *p, const uint64_t val)
{
int i;
uint64_t * const vaddr = kmap_page_dma(p);
@@ -401,38 +397,37 @@ static void fill_page_dma(struct drm_device *dev, struct i915_page_dma *p,
for (i = 0; i < 512; i++)
vaddr[i] = val;
- kunmap_page_dma(dev, vaddr);
+ kunmap_page_dma(dev_priv, vaddr);
}
-static void fill_page_dma_32(struct drm_device *dev, struct i915_page_dma *p,
- const uint32_t val32)
+static void fill_page_dma_32(struct drm_i915_private *dev_priv,
+ struct i915_page_dma *p, const uint32_t val32)
{
uint64_t v = val32;
v = v << 32 | val32;
- fill_page_dma(dev, p, v);
+ fill_page_dma(dev_priv, p, v);
}
static int
-setup_scratch_page(struct drm_device *dev,
+setup_scratch_page(struct drm_i915_private *dev_priv,
struct i915_page_dma *scratch,
gfp_t gfp)
{
- return __setup_page_dma(dev, scratch, gfp | __GFP_ZERO);
+ return __setup_page_dma(dev_priv, scratch, gfp | __GFP_ZERO);
}
-static void cleanup_scratch_page(struct drm_device *dev,
+static void cleanup_scratch_page(struct drm_i915_private *dev_priv,
struct i915_page_dma *scratch)
{
- cleanup_page_dma(dev, scratch);
+ cleanup_page_dma(dev_priv, scratch);
}
-static struct i915_page_table *alloc_pt(struct drm_device *dev)
+static struct i915_page_table *alloc_pt(struct drm_i915_private *dev_priv)
{
struct i915_page_table *pt;
- const size_t count = INTEL_INFO(dev)->gen >= 8 ?
- GEN8_PTES : GEN6_PTES;
+ const size_t count = INTEL_GEN(dev_priv) >= 8 ? GEN8_PTES : GEN6_PTES;
int ret = -ENOMEM;
pt = kzalloc(sizeof(*pt), GFP_KERNEL);
@@ -445,7 +440,7 @@ static struct i915_page_table *alloc_pt(struct drm_device *dev)
if (!pt->used_ptes)
goto fail_bitmap;
- ret = setup_px(dev, pt);
+ ret = setup_px(dev_priv, pt);
if (ret)
goto fail_page_m;
@@ -459,9 +454,10 @@ fail_bitmap:
return ERR_PTR(ret);
}
-static void free_pt(struct drm_device *dev, struct i915_page_table *pt)
+static void free_pt(struct drm_i915_private *dev_priv,
+ struct i915_page_table *pt)
{
- cleanup_px(dev, pt);
+ cleanup_px(dev_priv, pt);
kfree(pt->used_ptes);
kfree(pt);
}
@@ -472,9 +468,9 @@ static void gen8_initialize_pt(struct i915_address_space *vm,
gen8_pte_t scratch_pte;
scratch_pte = gen8_pte_encode(vm->scratch_page.daddr,
- I915_CACHE_LLC, true);
+ I915_CACHE_LLC);
- fill_px(vm->dev, pt, scratch_pte);
+ fill_px(to_i915(vm->dev), pt, scratch_pte);
}
static void gen6_initialize_pt(struct i915_address_space *vm,
@@ -485,12 +481,12 @@ static void gen6_initialize_pt(struct i915_address_space *vm,
WARN_ON(vm->scratch_page.daddr == 0);
scratch_pte = vm->pte_encode(vm->scratch_page.daddr,
- I915_CACHE_LLC, true, 0);
+ I915_CACHE_LLC, 0);
- fill32_px(vm->dev, pt, scratch_pte);
+ fill32_px(to_i915(vm->dev), pt, scratch_pte);
}
-static struct i915_page_directory *alloc_pd(struct drm_device *dev)
+static struct i915_page_directory *alloc_pd(struct drm_i915_private *dev_priv)
{
struct i915_page_directory *pd;
int ret = -ENOMEM;
@@ -504,7 +500,7 @@ static struct i915_page_directory *alloc_pd(struct drm_device *dev)
if (!pd->used_pdes)
goto fail_bitmap;
- ret = setup_px(dev, pd);
+ ret = setup_px(dev_priv, pd);
if (ret)
goto fail_page_m;
@@ -518,10 +514,11 @@ fail_bitmap:
return ERR_PTR(ret);
}
-static void free_pd(struct drm_device *dev, struct i915_page_directory *pd)
+static void free_pd(struct drm_i915_private *dev_priv,
+ struct i915_page_directory *pd)
{
if (px_page(pd)) {
- cleanup_px(dev, pd);
+ cleanup_px(dev_priv, pd);
kfree(pd->used_pdes);
kfree(pd);
}
@@ -534,13 +531,13 @@ static void gen8_initialize_pd(struct i915_address_space *vm,
scratch_pde = gen8_pde_encode(px_dma(vm->scratch_pt), I915_CACHE_LLC);
- fill_px(vm->dev, pd, scratch_pde);
+ fill_px(to_i915(vm->dev), pd, scratch_pde);
}
-static int __pdp_init(struct drm_device *dev,
+static int __pdp_init(struct drm_i915_private *dev_priv,
struct i915_page_directory_pointer *pdp)
{
- size_t pdpes = I915_PDPES_PER_PDP(dev);
+ size_t pdpes = I915_PDPES_PER_PDP(dev_priv);
pdp->used_pdpes = kcalloc(BITS_TO_LONGS(pdpes),
sizeof(unsigned long),
@@ -569,22 +566,22 @@ static void __pdp_fini(struct i915_page_directory_pointer *pdp)
}
static struct
-i915_page_directory_pointer *alloc_pdp(struct drm_device *dev)
+i915_page_directory_pointer *alloc_pdp(struct drm_i915_private *dev_priv)
{
struct i915_page_directory_pointer *pdp;
int ret = -ENOMEM;
- WARN_ON(!USES_FULL_48BIT_PPGTT(dev));
+ WARN_ON(!USES_FULL_48BIT_PPGTT(dev_priv));
pdp = kzalloc(sizeof(*pdp), GFP_KERNEL);
if (!pdp)
return ERR_PTR(-ENOMEM);
- ret = __pdp_init(dev, pdp);
+ ret = __pdp_init(dev_priv, pdp);
if (ret)
goto fail_bitmap;
- ret = setup_px(dev, pdp);
+ ret = setup_px(dev_priv, pdp);
if (ret)
goto fail_page_m;
@@ -598,12 +595,12 @@ fail_bitmap:
return ERR_PTR(ret);
}
-static void free_pdp(struct drm_device *dev,
+static void free_pdp(struct drm_i915_private *dev_priv,
struct i915_page_directory_pointer *pdp)
{
__pdp_fini(pdp);
- if (USES_FULL_48BIT_PPGTT(dev)) {
- cleanup_px(dev, pdp);
+ if (USES_FULL_48BIT_PPGTT(dev_priv)) {
+ cleanup_px(dev_priv, pdp);
kfree(pdp);
}
}
@@ -615,7 +612,7 @@ static void gen8_initialize_pdp(struct i915_address_space *vm,
scratch_pdpe = gen8_pdpe_encode(px_dma(vm->scratch_pd), I915_CACHE_LLC);
- fill_px(vm->dev, pdp, scratch_pdpe);
+ fill_px(to_i915(vm->dev), pdp, scratch_pdpe);
}
static void gen8_initialize_pml4(struct i915_address_space *vm,
@@ -626,7 +623,7 @@ static void gen8_initialize_pml4(struct i915_address_space *vm,
scratch_pml4e = gen8_pml4e_encode(px_dma(vm->scratch_pdp),
I915_CACHE_LLC);
- fill_px(vm->dev, pml4, scratch_pml4e);
+ fill_px(to_i915(vm->dev), pml4, scratch_pml4e);
}
static void
@@ -637,7 +634,7 @@ gen8_setup_page_directory(struct i915_hw_ppgtt *ppgtt,
{
gen8_ppgtt_pdpe_t *page_directorypo;
- if (!USES_FULL_48BIT_PPGTT(ppgtt->base.dev))
+ if (!USES_FULL_48BIT_PPGTT(to_i915(ppgtt->base.dev)))
return;
page_directorypo = kmap_px(pdp);
@@ -653,7 +650,7 @@ gen8_setup_page_directory_pointer(struct i915_hw_ppgtt *ppgtt,
{
gen8_ppgtt_pml4e_t *pagemap = kmap_px(pml4);
- WARN_ON(!USES_FULL_48BIT_PPGTT(ppgtt->base.dev));
+ WARN_ON(!USES_FULL_48BIT_PPGTT(to_i915(ppgtt->base.dev)));
pagemap[index] = gen8_pml4e_encode(px_dma(pdp), I915_CACHE_LLC);
kunmap_px(ppgtt, pagemap);
}
@@ -706,85 +703,172 @@ static int gen8_48b_mm_switch(struct i915_hw_ppgtt *ppgtt,
return gen8_write_pdp(req, 0, px_dma(&ppgtt->pml4));
}
-static void gen8_ppgtt_clear_pte_range(struct i915_address_space *vm,
- struct i915_page_directory_pointer *pdp,
- uint64_t start,
- uint64_t length,
- gen8_pte_t scratch_pte)
+/* PDE TLBs are a pain to invalidate on GEN8+. When we modify
+ * the page table structures, we mark them dirty so that
+ * context switching/execlist queuing code takes extra steps
+ * to ensure that tlbs are flushed.
+ */
+static void mark_tlbs_dirty(struct i915_hw_ppgtt *ppgtt)
+{
+ ppgtt->pd_dirty_rings = INTEL_INFO(to_i915(ppgtt->base.dev))->ring_mask;
+}
+
+/* Removes entries from a single page table, releasing it if it's empty.
+ * Caller can use the return value to update higher-level entries.
+ */
+static bool gen8_ppgtt_clear_pt(struct i915_address_space *vm,
+ struct i915_page_table *pt,
+ uint64_t start,
+ uint64_t length)
{
struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
+ unsigned int num_entries = gen8_pte_count(start, length);
+ unsigned int pte = gen8_pte_index(start);
+ unsigned int pte_end = pte + num_entries;
gen8_pte_t *pt_vaddr;
- unsigned pdpe = gen8_pdpe_index(start);
- unsigned pde = gen8_pde_index(start);
- unsigned pte = gen8_pte_index(start);
- unsigned num_entries = length >> PAGE_SHIFT;
- unsigned last_pte, i;
+ gen8_pte_t scratch_pte = gen8_pte_encode(vm->scratch_page.daddr,
+ I915_CACHE_LLC);
- if (WARN_ON(!pdp))
- return;
+ if (WARN_ON(!px_page(pt)))
+ return false;
- while (num_entries) {
- struct i915_page_directory *pd;
- struct i915_page_table *pt;
+ GEM_BUG_ON(pte_end > GEN8_PTES);
- if (WARN_ON(!pdp->page_directory[pdpe]))
- break;
+ bitmap_clear(pt->used_ptes, pte, num_entries);
+
+ if (bitmap_empty(pt->used_ptes, GEN8_PTES)) {
+ free_pt(to_i915(vm->dev), pt);
+ return true;
+ }
- pd = pdp->page_directory[pdpe];
+ pt_vaddr = kmap_px(pt);
+ while (pte < pte_end)
+ pt_vaddr[pte++] = scratch_pte;
+
+ kunmap_px(ppgtt, pt_vaddr);
+
+ return false;
+}
+
+/* Removes entries from a single page dir, releasing it if it's empty.
+ * Caller can use the return value to update higher-level entries
+ */
+static bool gen8_ppgtt_clear_pd(struct i915_address_space *vm,
+ struct i915_page_directory *pd,
+ uint64_t start,
+ uint64_t length)
+{
+ struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
+ struct i915_page_table *pt;
+ uint64_t pde;
+ gen8_pde_t *pde_vaddr;
+ gen8_pde_t scratch_pde = gen8_pde_encode(px_dma(vm->scratch_pt),
+ I915_CACHE_LLC);
+
+ gen8_for_each_pde(pt, pd, start, length, pde) {
if (WARN_ON(!pd->page_table[pde]))
break;
- pt = pd->page_table[pde];
+ if (gen8_ppgtt_clear_pt(vm, pt, start, length)) {
+ __clear_bit(pde, pd->used_pdes);
+ pde_vaddr = kmap_px(pd);
+ pde_vaddr[pde] = scratch_pde;
+ kunmap_px(ppgtt, pde_vaddr);
+ }
+ }
- if (WARN_ON(!px_page(pt)))
- break;
+ if (bitmap_empty(pd->used_pdes, I915_PDES)) {
+ free_pd(to_i915(vm->dev), pd);
+ return true;
+ }
- last_pte = pte + num_entries;
- if (last_pte > GEN8_PTES)
- last_pte = GEN8_PTES;
+ return false;
+}
- pt_vaddr = kmap_px(pt);
+/* Removes entries from a single page dir pointer, releasing it if it's empty.
+ * Caller can use the return value to update higher-level entries
+ */
+static bool gen8_ppgtt_clear_pdp(struct i915_address_space *vm,
+ struct i915_page_directory_pointer *pdp,
+ uint64_t start,
+ uint64_t length)
+{
+ struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
+ struct drm_i915_private *dev_priv = to_i915(vm->dev);
+ struct i915_page_directory *pd;
+ uint64_t pdpe;
+ gen8_ppgtt_pdpe_t *pdpe_vaddr;
+ gen8_ppgtt_pdpe_t scratch_pdpe =
+ gen8_pdpe_encode(px_dma(vm->scratch_pd), I915_CACHE_LLC);
- for (i = pte; i < last_pte; i++) {
- pt_vaddr[i] = scratch_pte;
- num_entries--;
+ gen8_for_each_pdpe(pd, pdp, start, length, pdpe) {
+ if (WARN_ON(!pdp->page_directory[pdpe]))
+ break;
+
+ if (gen8_ppgtt_clear_pd(vm, pd, start, length)) {
+ __clear_bit(pdpe, pdp->used_pdpes);
+ if (USES_FULL_48BIT_PPGTT(dev_priv)) {
+ pdpe_vaddr = kmap_px(pdp);
+ pdpe_vaddr[pdpe] = scratch_pdpe;
+ kunmap_px(ppgtt, pdpe_vaddr);
+ }
}
+ }
- kunmap_px(ppgtt, pt_vaddr);
+ mark_tlbs_dirty(ppgtt);
- pte = 0;
- if (++pde == I915_PDES) {
- if (++pdpe == I915_PDPES_PER_PDP(vm->dev))
- break;
- pde = 0;
- }
+ if (USES_FULL_48BIT_PPGTT(dev_priv) &&
+ bitmap_empty(pdp->used_pdpes, I915_PDPES_PER_PDP(dev_priv))) {
+ free_pdp(dev_priv, pdp);
+ return true;
}
+
+ return false;
}
-static void gen8_ppgtt_clear_range(struct i915_address_space *vm,
- uint64_t start,
- uint64_t length,
- bool use_scratch)
+/* Removes entries from a single pml4.
+ * This is the top-level structure in 4-level page tables used on gen8+.
+ * Empty entries are always scratch pml4e.
+ */
+static void gen8_ppgtt_clear_pml4(struct i915_address_space *vm,
+ struct i915_pml4 *pml4,
+ uint64_t start,
+ uint64_t length)
{
struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
- gen8_pte_t scratch_pte = gen8_pte_encode(vm->scratch_page.daddr,
- I915_CACHE_LLC, use_scratch);
+ struct i915_page_directory_pointer *pdp;
+ uint64_t pml4e;
+ gen8_ppgtt_pml4e_t *pml4e_vaddr;
+ gen8_ppgtt_pml4e_t scratch_pml4e =
+ gen8_pml4e_encode(px_dma(vm->scratch_pdp), I915_CACHE_LLC);
- if (!USES_FULL_48BIT_PPGTT(vm->dev)) {
- gen8_ppgtt_clear_pte_range(vm, &ppgtt->pdp, start, length,
- scratch_pte);
- } else {
- uint64_t pml4e;
- struct i915_page_directory_pointer *pdp;
+ GEM_BUG_ON(!USES_FULL_48BIT_PPGTT(to_i915(vm->dev)));
- gen8_for_each_pml4e(pdp, &ppgtt->pml4, start, length, pml4e) {
- gen8_ppgtt_clear_pte_range(vm, pdp, start, length,
- scratch_pte);
+ gen8_for_each_pml4e(pdp, pml4, start, length, pml4e) {
+ if (WARN_ON(!pml4->pdps[pml4e]))
+ break;
+
+ if (gen8_ppgtt_clear_pdp(vm, pdp, start, length)) {
+ __clear_bit(pml4e, pml4->used_pml4es);
+ pml4e_vaddr = kmap_px(pml4);
+ pml4e_vaddr[pml4e] = scratch_pml4e;
+ kunmap_px(ppgtt, pml4e_vaddr);
}
}
}
+static void gen8_ppgtt_clear_range(struct i915_address_space *vm,
+ uint64_t start, uint64_t length)
+{
+ struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
+
+ if (USES_FULL_48BIT_PPGTT(to_i915(vm->dev)))
+ gen8_ppgtt_clear_pml4(vm, &ppgtt->pml4, start, length);
+ else
+ gen8_ppgtt_clear_pdp(vm, &ppgtt->pdp, start, length);
+}
+
static void
gen8_ppgtt_insert_pte_entries(struct i915_address_space *vm,
struct i915_page_directory_pointer *pdp,
@@ -809,12 +893,12 @@ gen8_ppgtt_insert_pte_entries(struct i915_address_space *vm,
pt_vaddr[pte] =
gen8_pte_encode(sg_page_iter_dma_address(sg_iter),
- cache_level, true);
+ cache_level);
if (++pte == GEN8_PTES) {
kunmap_px(ppgtt, pt_vaddr);
pt_vaddr = NULL;
if (++pde == I915_PDES) {
- if (++pdpe == I915_PDPES_PER_PDP(vm->dev))
+ if (++pdpe == I915_PDPES_PER_PDP(to_i915(vm->dev)))
break;
pde = 0;
}
@@ -837,7 +921,7 @@ static void gen8_ppgtt_insert_entries(struct i915_address_space *vm,
__sg_page_iter_start(&sg_iter, pages->sgl, sg_nents(pages->sgl), 0);
- if (!USES_FULL_48BIT_PPGTT(vm->dev)) {
+ if (!USES_FULL_48BIT_PPGTT(to_i915(vm->dev))) {
gen8_ppgtt_insert_pte_entries(vm, &ppgtt->pdp, &sg_iter, start,
cache_level);
} else {
@@ -852,7 +936,7 @@ static void gen8_ppgtt_insert_entries(struct i915_address_space *vm,
}
}
-static void gen8_free_page_tables(struct drm_device *dev,
+static void gen8_free_page_tables(struct drm_i915_private *dev_priv,
struct i915_page_directory *pd)
{
int i;
@@ -864,34 +948,34 @@ static void gen8_free_page_tables(struct drm_device *dev,
if (WARN_ON(!pd->page_table[i]))
continue;
- free_pt(dev, pd->page_table[i]);
+ free_pt(dev_priv, pd->page_table[i]);
pd->page_table[i] = NULL;
}
}
static int gen8_init_scratch(struct i915_address_space *vm)
{
- struct drm_device *dev = vm->dev;
+ struct drm_i915_private *dev_priv = to_i915(vm->dev);
int ret;
- ret = setup_scratch_page(dev, &vm->scratch_page, I915_GFP_DMA);
+ ret = setup_scratch_page(dev_priv, &vm->scratch_page, I915_GFP_DMA);
if (ret)
return ret;
- vm->scratch_pt = alloc_pt(dev);
+ vm->scratch_pt = alloc_pt(dev_priv);
if (IS_ERR(vm->scratch_pt)) {
ret = PTR_ERR(vm->scratch_pt);
goto free_scratch_page;
}
- vm->scratch_pd = alloc_pd(dev);
+ vm->scratch_pd = alloc_pd(dev_priv);
if (IS_ERR(vm->scratch_pd)) {
ret = PTR_ERR(vm->scratch_pd);
goto free_pt;
}
- if (USES_FULL_48BIT_PPGTT(dev)) {
- vm->scratch_pdp = alloc_pdp(dev);
+ if (USES_FULL_48BIT_PPGTT(dev_priv)) {
+ vm->scratch_pdp = alloc_pdp(dev_priv);
if (IS_ERR(vm->scratch_pdp)) {
ret = PTR_ERR(vm->scratch_pdp);
goto free_pd;
@@ -900,17 +984,17 @@ static int gen8_init_scratch(struct i915_address_space *vm)
gen8_initialize_pt(vm, vm->scratch_pt);
gen8_initialize_pd(vm, vm->scratch_pd);
- if (USES_FULL_48BIT_PPGTT(dev))
+ if (USES_FULL_48BIT_PPGTT(dev_priv))
gen8_initialize_pdp(vm, vm->scratch_pdp);
return 0;
free_pd:
- free_pd(dev, vm->scratch_pd);
+ free_pd(dev_priv, vm->scratch_pd);
free_pt:
- free_pt(dev, vm->scratch_pt);
+ free_pt(dev_priv, vm->scratch_pt);
free_scratch_page:
- cleanup_scratch_page(dev, &vm->scratch_page);
+ cleanup_scratch_page(dev_priv, &vm->scratch_page);
return ret;
}
@@ -948,54 +1032,56 @@ static int gen8_ppgtt_notify_vgt(struct i915_hw_ppgtt *ppgtt, bool create)
static void gen8_free_scratch(struct i915_address_space *vm)
{
- struct drm_device *dev = vm->dev;
+ struct drm_i915_private *dev_priv = to_i915(vm->dev);
- if (USES_FULL_48BIT_PPGTT(dev))
- free_pdp(dev, vm->scratch_pdp);
- free_pd(dev, vm->scratch_pd);
- free_pt(dev, vm->scratch_pt);
- cleanup_scratch_page(dev, &vm->scratch_page);
+ if (USES_FULL_48BIT_PPGTT(dev_priv))
+ free_pdp(dev_priv, vm->scratch_pdp);
+ free_pd(dev_priv, vm->scratch_pd);
+ free_pt(dev_priv, vm->scratch_pt);
+ cleanup_scratch_page(dev_priv, &vm->scratch_page);
}
-static void gen8_ppgtt_cleanup_3lvl(struct drm_device *dev,
+static void gen8_ppgtt_cleanup_3lvl(struct drm_i915_private *dev_priv,
struct i915_page_directory_pointer *pdp)
{
int i;
- for_each_set_bit(i, pdp->used_pdpes, I915_PDPES_PER_PDP(dev)) {
+ for_each_set_bit(i, pdp->used_pdpes, I915_PDPES_PER_PDP(dev_priv)) {
if (WARN_ON(!pdp->page_directory[i]))
continue;
- gen8_free_page_tables(dev, pdp->page_directory[i]);
- free_pd(dev, pdp->page_directory[i]);
+ gen8_free_page_tables(dev_priv, pdp->page_directory[i]);
+ free_pd(dev_priv, pdp->page_directory[i]);
}
- free_pdp(dev, pdp);
+ free_pdp(dev_priv, pdp);
}
static void gen8_ppgtt_cleanup_4lvl(struct i915_hw_ppgtt *ppgtt)
{
+ struct drm_i915_private *dev_priv = to_i915(ppgtt->base.dev);
int i;
for_each_set_bit(i, ppgtt->pml4.used_pml4es, GEN8_PML4ES_PER_PML4) {
if (WARN_ON(!ppgtt->pml4.pdps[i]))
continue;
- gen8_ppgtt_cleanup_3lvl(ppgtt->base.dev, ppgtt->pml4.pdps[i]);
+ gen8_ppgtt_cleanup_3lvl(dev_priv, ppgtt->pml4.pdps[i]);
}
- cleanup_px(ppgtt->base.dev, &ppgtt->pml4);
+ cleanup_px(dev_priv, &ppgtt->pml4);
}
static void gen8_ppgtt_cleanup(struct i915_address_space *vm)
{
+ struct drm_i915_private *dev_priv = to_i915(vm->dev);
struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
- if (intel_vgpu_active(to_i915(vm->dev)))
+ if (intel_vgpu_active(dev_priv))
gen8_ppgtt_notify_vgt(ppgtt, false);
- if (!USES_FULL_48BIT_PPGTT(ppgtt->base.dev))
- gen8_ppgtt_cleanup_3lvl(ppgtt->base.dev, &ppgtt->pdp);
+ if (!USES_FULL_48BIT_PPGTT(dev_priv))
+ gen8_ppgtt_cleanup_3lvl(dev_priv, &ppgtt->pdp);
else
gen8_ppgtt_cleanup_4lvl(ppgtt);
@@ -1026,7 +1112,7 @@ static int gen8_ppgtt_alloc_pagetabs(struct i915_address_space *vm,
uint64_t length,
unsigned long *new_pts)
{
- struct drm_device *dev = vm->dev;
+ struct drm_i915_private *dev_priv = to_i915(vm->dev);
struct i915_page_table *pt;
uint32_t pde;
@@ -1038,7 +1124,7 @@ static int gen8_ppgtt_alloc_pagetabs(struct i915_address_space *vm,
continue;
}
- pt = alloc_pt(dev);
+ pt = alloc_pt(dev_priv);
if (IS_ERR(pt))
goto unwind_out;
@@ -1052,7 +1138,7 @@ static int gen8_ppgtt_alloc_pagetabs(struct i915_address_space *vm,
unwind_out:
for_each_set_bit(pde, new_pts, I915_PDES)
- free_pt(dev, pd->page_table[pde]);
+ free_pt(dev_priv, pd->page_table[pde]);
return -ENOMEM;
}
@@ -1087,10 +1173,10 @@ gen8_ppgtt_alloc_page_directories(struct i915_address_space *vm,
uint64_t length,
unsigned long *new_pds)
{
- struct drm_device *dev = vm->dev;
+ struct drm_i915_private *dev_priv = to_i915(vm->dev);
struct i915_page_directory *pd;
uint32_t pdpe;
- uint32_t pdpes = I915_PDPES_PER_PDP(dev);
+ uint32_t pdpes = I915_PDPES_PER_PDP(dev_priv);
WARN_ON(!bitmap_empty(new_pds, pdpes));
@@ -1098,7 +1184,7 @@ gen8_ppgtt_alloc_page_directories(struct i915_address_space *vm,
if (test_bit(pdpe, pdp->used_pdpes))
continue;
- pd = alloc_pd(dev);
+ pd = alloc_pd(dev_priv);
if (IS_ERR(pd))
goto unwind_out;
@@ -1112,7 +1198,7 @@ gen8_ppgtt_alloc_page_directories(struct i915_address_space *vm,
unwind_out:
for_each_set_bit(pdpe, new_pds, pdpes)
- free_pd(dev, pdp->page_directory[pdpe]);
+ free_pd(dev_priv, pdp->page_directory[pdpe]);
return -ENOMEM;
}
@@ -1140,7 +1226,7 @@ gen8_ppgtt_alloc_page_dirpointers(struct i915_address_space *vm,
uint64_t length,
unsigned long *new_pdps)
{
- struct drm_device *dev = vm->dev;
+ struct drm_i915_private *dev_priv = to_i915(vm->dev);
struct i915_page_directory_pointer *pdp;
uint32_t pml4e;
@@ -1148,7 +1234,7 @@ gen8_ppgtt_alloc_page_dirpointers(struct i915_address_space *vm,
gen8_for_each_pml4e(pdp, pml4, start, length, pml4e) {
if (!test_bit(pml4e, pml4->used_pml4es)) {
- pdp = alloc_pdp(dev);
+ pdp = alloc_pdp(dev_priv);
if (IS_ERR(pdp))
goto unwind_out;
@@ -1166,7 +1252,7 @@ gen8_ppgtt_alloc_page_dirpointers(struct i915_address_space *vm,
unwind_out:
for_each_set_bit(pml4e, new_pdps, GEN8_PML4ES_PER_PML4)
- free_pdp(dev, pml4->pdps[pml4e]);
+ free_pdp(dev_priv, pml4->pdps[pml4e]);
return -ENOMEM;
}
@@ -1208,16 +1294,6 @@ err_out:
return -ENOMEM;
}
-/* PDE TLBs are a pain to invalidate on GEN8+. When we modify
- * the page table structures, we mark them dirty so that
- * context switching/execlist queuing code takes extra steps
- * to ensure that tlbs are flushed.
- */
-static void mark_tlbs_dirty(struct i915_hw_ppgtt *ppgtt)
-{
- ppgtt->pd_dirty_rings = INTEL_INFO(ppgtt->base.dev)->ring_mask;
-}
-
static int gen8_alloc_va_range_3lvl(struct i915_address_space *vm,
struct i915_page_directory_pointer *pdp,
uint64_t start,
@@ -1225,12 +1301,12 @@ static int gen8_alloc_va_range_3lvl(struct i915_address_space *vm,
{
struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
unsigned long *new_page_dirs, *new_page_tables;
- struct drm_device *dev = vm->dev;
+ struct drm_i915_private *dev_priv = to_i915(vm->dev);
struct i915_page_directory *pd;
const uint64_t orig_start = start;
const uint64_t orig_length = length;
uint32_t pdpe;
- uint32_t pdpes = I915_PDPES_PER_PDP(dev);
+ uint32_t pdpes = I915_PDPES_PER_PDP(dev_priv);
int ret;
/* Wrap is never okay since we can only represent 48b, and we don't
@@ -1318,11 +1394,12 @@ err_out:
for_each_set_bit(temp, new_page_tables + pdpe *
BITS_TO_LONGS(I915_PDES), I915_PDES)
- free_pt(dev, pdp->page_directory[pdpe]->page_table[temp]);
+ free_pt(dev_priv,
+ pdp->page_directory[pdpe]->page_table[temp]);
}
for_each_set_bit(pdpe, new_page_dirs, pdpes)
- free_pd(dev, pdp->page_directory[pdpe]);
+ free_pd(dev_priv, pdp->page_directory[pdpe]);
free_gen8_temp_bitmaps(new_page_dirs, new_page_tables);
mark_tlbs_dirty(ppgtt);
@@ -1373,7 +1450,7 @@ static int gen8_alloc_va_range_4lvl(struct i915_address_space *vm,
err_out:
for_each_set_bit(pml4e, new_pdps, GEN8_PML4ES_PER_PML4)
- gen8_ppgtt_cleanup_3lvl(vm->dev, pml4->pdps[pml4e]);
+ gen8_ppgtt_cleanup_3lvl(to_i915(vm->dev), pml4->pdps[pml4e]);
return ret;
}
@@ -1383,7 +1460,7 @@ static int gen8_alloc_va_range(struct i915_address_space *vm,
{
struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
- if (USES_FULL_48BIT_PPGTT(vm->dev))
+ if (USES_FULL_48BIT_PPGTT(to_i915(vm->dev)))
return gen8_alloc_va_range_4lvl(vm, &ppgtt->pml4, start, length);
else
return gen8_alloc_va_range_3lvl(vm, &ppgtt->pdp, start, length);
@@ -1452,9 +1529,9 @@ static void gen8_dump_ppgtt(struct i915_hw_ppgtt *ppgtt, struct seq_file *m)
uint64_t start = ppgtt->base.start;
uint64_t length = ppgtt->base.total;
gen8_pte_t scratch_pte = gen8_pte_encode(vm->scratch_page.daddr,
- I915_CACHE_LLC, true);
+ I915_CACHE_LLC);
- if (!USES_FULL_48BIT_PPGTT(vm->dev)) {
+ if (!USES_FULL_48BIT_PPGTT(to_i915(vm->dev))) {
gen8_dump_pdp(&ppgtt->pdp, start, length, scratch_pte, m);
} else {
uint64_t pml4e;
@@ -1474,7 +1551,7 @@ static void gen8_dump_ppgtt(struct i915_hw_ppgtt *ppgtt, struct seq_file *m)
static int gen8_preallocate_top_level_pdps(struct i915_hw_ppgtt *ppgtt)
{
unsigned long *new_page_dirs, *new_page_tables;
- uint32_t pdpes = I915_PDPES_PER_PDP(dev);
+ uint32_t pdpes = I915_PDPES_PER_PDP(to_i915(ppgtt->base.dev));
int ret;
/* We allocate temp bitmap for page tables for no gain
@@ -1507,6 +1584,7 @@ static int gen8_preallocate_top_level_pdps(struct i915_hw_ppgtt *ppgtt)
*/
static int gen8_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
{
+ struct drm_i915_private *dev_priv = to_i915(ppgtt->base.dev);
int ret;
ret = gen8_init_scratch(&ppgtt->base);
@@ -1522,8 +1600,8 @@ static int gen8_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
ppgtt->base.bind_vma = ppgtt_bind_vma;
ppgtt->debug_dump = gen8_dump_ppgtt;
- if (USES_FULL_48BIT_PPGTT(ppgtt->base.dev)) {
- ret = setup_px(ppgtt->base.dev, &ppgtt->pml4);
+ if (USES_FULL_48BIT_PPGTT(dev_priv)) {
+ ret = setup_px(dev_priv, &ppgtt->pml4);
if (ret)
goto free_scratch;
@@ -1532,7 +1610,7 @@ static int gen8_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
ppgtt->base.total = 1ULL << 48;
ppgtt->switch_mm = gen8_48b_mm_switch;
} else {
- ret = __pdp_init(ppgtt->base.dev, &ppgtt->pdp);
+ ret = __pdp_init(dev_priv, &ppgtt->pdp);
if (ret)
goto free_scratch;
@@ -1542,14 +1620,14 @@ static int gen8_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
0, 0,
GEN8_PML4E_SHIFT);
- if (intel_vgpu_active(to_i915(ppgtt->base.dev))) {
+ if (intel_vgpu_active(dev_priv)) {
ret = gen8_preallocate_top_level_pdps(ppgtt);
if (ret)
goto free_scratch;
}
}
- if (intel_vgpu_active(to_i915(ppgtt->base.dev)))
+ if (intel_vgpu_active(dev_priv))
gen8_ppgtt_notify_vgt(ppgtt, true);
return 0;
@@ -1569,7 +1647,7 @@ static void gen6_dump_ppgtt(struct i915_hw_ppgtt *ppgtt, struct seq_file *m)
uint32_t start = ppgtt->base.start, length = ppgtt->base.total;
scratch_pte = vm->pte_encode(vm->scratch_page.daddr,
- I915_CACHE_LLC, true, 0);
+ I915_CACHE_LLC, 0);
gen6_for_each_pde(unused, &ppgtt->pd, start, length, pde) {
u32 expected;
@@ -1724,29 +1802,30 @@ static int gen6_mm_switch(struct i915_hw_ppgtt *ppgtt,
return 0;
}
-static void gen8_ppgtt_enable(struct drm_device *dev)
+static void gen8_ppgtt_enable(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_engine_cs *engine;
+ enum intel_engine_id id;
- for_each_engine(engine, dev_priv) {
- u32 four_level = USES_FULL_48BIT_PPGTT(dev) ? GEN8_GFX_PPGTT_48B : 0;
+ for_each_engine(engine, dev_priv, id) {
+ u32 four_level = USES_FULL_48BIT_PPGTT(dev_priv) ?
+ GEN8_GFX_PPGTT_48B : 0;
I915_WRITE(RING_MODE_GEN7(engine),
_MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE | four_level));
}
}
-static void gen7_ppgtt_enable(struct drm_device *dev)
+static void gen7_ppgtt_enable(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_engine_cs *engine;
uint32_t ecochk, ecobits;
+ enum intel_engine_id id;
ecobits = I915_READ(GAC_ECO_BITS);
I915_WRITE(GAC_ECO_BITS, ecobits | ECOBITS_PPGTT_CACHE64B);
ecochk = I915_READ(GAM_ECOCHK);
- if (IS_HASWELL(dev)) {
+ if (IS_HASWELL(dev_priv)) {
ecochk |= ECOCHK_PPGTT_WB_HSW;
} else {
ecochk |= ECOCHK_PPGTT_LLC_IVB;
@@ -1754,16 +1833,15 @@ static void gen7_ppgtt_enable(struct drm_device *dev)
}
I915_WRITE(GAM_ECOCHK, ecochk);
- for_each_engine(engine, dev_priv) {
+ for_each_engine(engine, dev_priv, id) {
/* GFX_MODE is per-ring on gen7+ */
I915_WRITE(RING_MODE_GEN7(engine),
_MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
}
}
-static void gen6_ppgtt_enable(struct drm_device *dev)
+static void gen6_ppgtt_enable(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
uint32_t ecochk, gab_ctl, ecobits;
ecobits = I915_READ(GAC_ECO_BITS);
@@ -1782,8 +1860,7 @@ static void gen6_ppgtt_enable(struct drm_device *dev)
/* PPGTT support for Sandybdrige/Gen6 and later */
static void gen6_ppgtt_clear_range(struct i915_address_space *vm,
uint64_t start,
- uint64_t length,
- bool use_scratch)
+ uint64_t length)
{
struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
gen6_pte_t *pt_vaddr, scratch_pte;
@@ -1794,7 +1871,7 @@ static void gen6_ppgtt_clear_range(struct i915_address_space *vm,
unsigned last_pte, i;
scratch_pte = vm->pte_encode(vm->scratch_page.daddr,
- I915_CACHE_LLC, true, 0);
+ I915_CACHE_LLC, 0);
while (num_entries) {
last_pte = first_pte + num_entries;
@@ -1832,7 +1909,7 @@ static void gen6_ppgtt_insert_entries(struct i915_address_space *vm,
pt_vaddr = kmap_px(ppgtt->pd.page_table[act_pt]);
pt_vaddr[act_pte] =
- vm->pte_encode(addr, cache_level, true, flags);
+ vm->pte_encode(addr, cache_level, flags);
if (++act_pte == GEN6_PTES) {
kunmap_px(ppgtt, pt_vaddr);
@@ -1850,8 +1927,7 @@ static int gen6_alloc_va_range(struct i915_address_space *vm,
uint64_t start_in, uint64_t length_in)
{
DECLARE_BITMAP(new_page_tables, I915_PDES);
- struct drm_device *dev = vm->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(vm->dev);
struct i915_ggtt *ggtt = &dev_priv->ggtt;
struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
struct i915_page_table *pt;
@@ -1881,7 +1957,7 @@ static int gen6_alloc_va_range(struct i915_address_space *vm,
/* We've already allocated a page table */
WARN_ON(!bitmap_empty(pt->used_ptes, GEN6_PTES));
- pt = alloc_pt(dev);
+ pt = alloc_pt(dev_priv);
if (IS_ERR(pt)) {
ret = PTR_ERR(pt);
goto unwind_out;
@@ -1929,7 +2005,7 @@ unwind_out:
struct i915_page_table *pt = ppgtt->pd.page_table[pde];
ppgtt->pd.page_table[pde] = vm->scratch_pt;
- free_pt(vm->dev, pt);
+ free_pt(dev_priv, pt);
}
mark_tlbs_dirty(ppgtt);
@@ -1938,16 +2014,16 @@ unwind_out:
static int gen6_init_scratch(struct i915_address_space *vm)
{
- struct drm_device *dev = vm->dev;
+ struct drm_i915_private *dev_priv = to_i915(vm->dev);
int ret;
- ret = setup_scratch_page(dev, &vm->scratch_page, I915_GFP_DMA);
+ ret = setup_scratch_page(dev_priv, &vm->scratch_page, I915_GFP_DMA);
if (ret)
return ret;
- vm->scratch_pt = alloc_pt(dev);
+ vm->scratch_pt = alloc_pt(dev_priv);
if (IS_ERR(vm->scratch_pt)) {
- cleanup_scratch_page(dev, &vm->scratch_page);
+ cleanup_scratch_page(dev_priv, &vm->scratch_page);
return PTR_ERR(vm->scratch_pt);
}
@@ -1958,17 +2034,17 @@ static int gen6_init_scratch(struct i915_address_space *vm)
static void gen6_free_scratch(struct i915_address_space *vm)
{
- struct drm_device *dev = vm->dev;
+ struct drm_i915_private *dev_priv = to_i915(vm->dev);
- free_pt(dev, vm->scratch_pt);
- cleanup_scratch_page(dev, &vm->scratch_page);
+ free_pt(dev_priv, vm->scratch_pt);
+ cleanup_scratch_page(dev_priv, &vm->scratch_page);
}
static void gen6_ppgtt_cleanup(struct i915_address_space *vm)
{
struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
struct i915_page_directory *pd = &ppgtt->pd;
- struct drm_device *dev = vm->dev;
+ struct drm_i915_private *dev_priv = to_i915(vm->dev);
struct i915_page_table *pt;
uint32_t pde;
@@ -1976,7 +2052,7 @@ static void gen6_ppgtt_cleanup(struct i915_address_space *vm)
gen6_for_all_pdes(pt, pd, pde)
if (pt != vm->scratch_pt)
- free_pt(dev, pt);
+ free_pt(dev_priv, pt);
gen6_free_scratch(vm);
}
@@ -1984,8 +2060,7 @@ static void gen6_ppgtt_cleanup(struct i915_address_space *vm)
static int gen6_ppgtt_allocate_page_directories(struct i915_hw_ppgtt *ppgtt)
{
struct i915_address_space *vm = &ppgtt->base;
- struct drm_device *dev = ppgtt->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(ppgtt->base.dev);
struct i915_ggtt *ggtt = &dev_priv->ggtt;
bool retried = false;
int ret;
@@ -2050,17 +2125,16 @@ static void gen6_scratch_va_range(struct i915_hw_ppgtt *ppgtt,
static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
{
- struct drm_device *dev = ppgtt->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(ppgtt->base.dev);
struct i915_ggtt *ggtt = &dev_priv->ggtt;
int ret;
ppgtt->base.pte_encode = ggtt->base.pte_encode;
- if (intel_vgpu_active(dev_priv) || IS_GEN6(dev))
+ if (intel_vgpu_active(dev_priv) || IS_GEN6(dev_priv))
ppgtt->switch_mm = gen6_mm_switch;
- else if (IS_HASWELL(dev))
+ else if (IS_HASWELL(dev_priv))
ppgtt->switch_mm = hsw_mm_switch;
- else if (IS_GEN7(dev))
+ else if (IS_GEN7(dev_priv))
ppgtt->switch_mm = gen7_mm_switch;
else
BUG();
@@ -2111,8 +2185,10 @@ static int __hw_ppgtt_init(struct i915_hw_ppgtt *ppgtt,
}
static void i915_address_space_init(struct i915_address_space *vm,
- struct drm_i915_private *dev_priv)
+ struct drm_i915_private *dev_priv,
+ const char *name)
{
+ i915_gem_timeline_init(dev_priv, &vm->timeline, name);
drm_mm_init(&vm->mm, vm->start, vm->total);
INIT_LIST_HEAD(&vm->active_list);
INIT_LIST_HEAD(&vm->inactive_list);
@@ -2120,44 +2196,50 @@ static void i915_address_space_init(struct i915_address_space *vm,
list_add_tail(&vm->global_link, &dev_priv->vm_list);
}
-static void gtt_write_workarounds(struct drm_device *dev)
+static void i915_address_space_fini(struct i915_address_space *vm)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
+ i915_gem_timeline_fini(&vm->timeline);
+ drm_mm_takedown(&vm->mm);
+ list_del(&vm->global_link);
+}
+static void gtt_write_workarounds(struct drm_i915_private *dev_priv)
+{
/* This function is for gtt related workarounds. This function is
* called on driver load and after a GPU reset, so you can place
* workarounds here even if they get overwritten by GPU reset.
*/
/* WaIncreaseDefaultTLBEntries:chv,bdw,skl,bxt */
- if (IS_BROADWELL(dev))
+ if (IS_BROADWELL(dev_priv))
I915_WRITE(GEN8_L3_LRA_1_GPGPU, GEN8_L3_LRA_1_GPGPU_DEFAULT_VALUE_BDW);
- else if (IS_CHERRYVIEW(dev))
+ else if (IS_CHERRYVIEW(dev_priv))
I915_WRITE(GEN8_L3_LRA_1_GPGPU, GEN8_L3_LRA_1_GPGPU_DEFAULT_VALUE_CHV);
- else if (IS_SKYLAKE(dev))
+ else if (IS_SKYLAKE(dev_priv))
I915_WRITE(GEN8_L3_LRA_1_GPGPU, GEN9_L3_LRA_1_GPGPU_DEFAULT_VALUE_SKL);
- else if (IS_BROXTON(dev))
+ else if (IS_BROXTON(dev_priv))
I915_WRITE(GEN8_L3_LRA_1_GPGPU, GEN9_L3_LRA_1_GPGPU_DEFAULT_VALUE_BXT);
}
static int i915_ppgtt_init(struct i915_hw_ppgtt *ppgtt,
struct drm_i915_private *dev_priv,
- struct drm_i915_file_private *file_priv)
+ struct drm_i915_file_private *file_priv,
+ const char *name)
{
int ret;
ret = __hw_ppgtt_init(ppgtt, dev_priv);
if (ret == 0) {
kref_init(&ppgtt->ref);
- i915_address_space_init(&ppgtt->base, dev_priv);
+ i915_address_space_init(&ppgtt->base, dev_priv, name);
ppgtt->base.file = file_priv;
}
return ret;
}
-int i915_ppgtt_init_hw(struct drm_device *dev)
+int i915_ppgtt_init_hw(struct drm_i915_private *dev_priv)
{
- gtt_write_workarounds(dev);
+ gtt_write_workarounds(dev_priv);
/* In the case of execlists, PPGTT is enabled by the context descriptor
* and the PDPs are contained within the context itself. We don't
@@ -2165,24 +2247,25 @@ int i915_ppgtt_init_hw(struct drm_device *dev)
if (i915.enable_execlists)
return 0;
- if (!USES_PPGTT(dev))
+ if (!USES_PPGTT(dev_priv))
return 0;
- if (IS_GEN6(dev))
- gen6_ppgtt_enable(dev);
- else if (IS_GEN7(dev))
- gen7_ppgtt_enable(dev);
- else if (INTEL_INFO(dev)->gen >= 8)
- gen8_ppgtt_enable(dev);
+ if (IS_GEN6(dev_priv))
+ gen6_ppgtt_enable(dev_priv);
+ else if (IS_GEN7(dev_priv))
+ gen7_ppgtt_enable(dev_priv);
+ else if (INTEL_GEN(dev_priv) >= 8)
+ gen8_ppgtt_enable(dev_priv);
else
- MISSING_CASE(INTEL_INFO(dev)->gen);
+ MISSING_CASE(INTEL_GEN(dev_priv));
return 0;
}
struct i915_hw_ppgtt *
i915_ppgtt_create(struct drm_i915_private *dev_priv,
- struct drm_i915_file_private *fpriv)
+ struct drm_i915_file_private *fpriv,
+ const char *name)
{
struct i915_hw_ppgtt *ppgtt;
int ret;
@@ -2191,7 +2274,7 @@ i915_ppgtt_create(struct drm_i915_private *dev_priv,
if (!ppgtt)
return ERR_PTR(-ENOMEM);
- ret = i915_ppgtt_init(ppgtt, dev_priv, fpriv);
+ ret = i915_ppgtt_init(ppgtt, dev_priv, fpriv, name);
if (ret) {
kfree(ppgtt);
return ERR_PTR(ret);
@@ -2202,7 +2285,7 @@ i915_ppgtt_create(struct drm_i915_private *dev_priv,
return ppgtt;
}
-void i915_ppgtt_release(struct kref *kref)
+void i915_ppgtt_release(struct kref *kref)
{
struct i915_hw_ppgtt *ppgtt =
container_of(kref, struct i915_hw_ppgtt, ref);
@@ -2214,8 +2297,7 @@ void i915_ppgtt_release(struct kref *kref)
WARN_ON(!list_empty(&ppgtt->base.inactive_list));
WARN_ON(!list_empty(&ppgtt->base.unbound_list));
- list_del(&ppgtt->base.global_link);
- drm_mm_takedown(&ppgtt->base.mm);
+ i915_address_space_fini(&ppgtt->base);
ppgtt->base.cleanup(&ppgtt->base);
kfree(ppgtt);
@@ -2239,11 +2321,12 @@ static bool needs_idle_maps(struct drm_i915_private *dev_priv)
void i915_check_and_clear_faults(struct drm_i915_private *dev_priv)
{
struct intel_engine_cs *engine;
+ enum intel_engine_id id;
if (INTEL_INFO(dev_priv)->gen < 6)
return;
- for_each_engine(engine, dev_priv) {
+ for_each_engine(engine, dev_priv, id) {
u32 fault_reg;
fault_reg = I915_READ(RING_FAULT_REG(engine));
if (fault_reg & RING_FAULT_VALID) {
@@ -2260,7 +2343,10 @@ void i915_check_and_clear_faults(struct drm_i915_private *dev_priv)
fault_reg & ~RING_FAULT_VALID);
}
}
- POSTING_READ(RING_FAULT_REG(&dev_priv->engine[RCS]));
+
+ /* Engine specific init may not have been done till this point. */
+ if (dev_priv->engine[RCS])
+ POSTING_READ(RING_FAULT_REG(dev_priv->engine[RCS]));
}
static void i915_ggtt_flush(struct drm_i915_private *dev_priv)
@@ -2273,33 +2359,32 @@ static void i915_ggtt_flush(struct drm_i915_private *dev_priv)
}
}
-void i915_gem_suspend_gtt_mappings(struct drm_device *dev)
+void i915_gem_suspend_gtt_mappings(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
struct i915_ggtt *ggtt = &dev_priv->ggtt;
/* Don't bother messing with faults pre GEN6 as we have little
* documentation supporting that it's a good idea.
*/
- if (INTEL_INFO(dev)->gen < 6)
+ if (INTEL_GEN(dev_priv) < 6)
return;
i915_check_and_clear_faults(dev_priv);
- ggtt->base.clear_range(&ggtt->base, ggtt->base.start, ggtt->base.total,
- true);
+ ggtt->base.clear_range(&ggtt->base, ggtt->base.start, ggtt->base.total);
i915_ggtt_flush(dev_priv);
}
-int i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj)
+int i915_gem_gtt_prepare_pages(struct drm_i915_gem_object *obj,
+ struct sg_table *pages)
{
- if (!dma_map_sg(&obj->base.dev->pdev->dev,
- obj->pages->sgl, obj->pages->nents,
- PCI_DMA_BIDIRECTIONAL))
- return -ENOSPC;
+ if (dma_map_sg(&obj->base.dev->pdev->dev,
+ pages->sgl, pages->nents,
+ PCI_DMA_BIDIRECTIONAL))
+ return 0;
- return 0;
+ return -ENOSPC;
}
static void gen8_set_pte(void __iomem *addr, gen8_pte_t pte)
@@ -2317,16 +2402,11 @@ static void gen8_ggtt_insert_page(struct i915_address_space *vm,
gen8_pte_t __iomem *pte =
(gen8_pte_t __iomem *)dev_priv->ggtt.gsm +
(offset >> PAGE_SHIFT);
- int rpm_atomic_seq;
-
- rpm_atomic_seq = assert_rpm_atomic_begin(dev_priv);
- gen8_set_pte(pte, gen8_pte_encode(addr, level, true));
+ gen8_set_pte(pte, gen8_pte_encode(addr, level));
I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN);
POSTING_READ(GFX_FLSH_CNTL_GEN6);
-
- assert_rpm_atomic_end(dev_priv, rpm_atomic_seq);
}
static void gen8_ggtt_insert_entries(struct i915_address_space *vm,
@@ -2340,15 +2420,12 @@ static void gen8_ggtt_insert_entries(struct i915_address_space *vm,
gen8_pte_t __iomem *gtt_entries;
gen8_pte_t gtt_entry;
dma_addr_t addr;
- int rpm_atomic_seq;
int i = 0;
- rpm_atomic_seq = assert_rpm_atomic_begin(dev_priv);
-
gtt_entries = (gen8_pte_t __iomem *)ggtt->gsm + (start >> PAGE_SHIFT);
for_each_sgt_dma(addr, sgt_iter, st) {
- gtt_entry = gen8_pte_encode(addr, level, true);
+ gtt_entry = gen8_pte_encode(addr, level);
gen8_set_pte(&gtt_entries[i++], gtt_entry);
}
@@ -2368,8 +2445,6 @@ static void gen8_ggtt_insert_entries(struct i915_address_space *vm,
*/
I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN);
POSTING_READ(GFX_FLSH_CNTL_GEN6);
-
- assert_rpm_atomic_end(dev_priv, rpm_atomic_seq);
}
struct insert_entries {
@@ -2408,16 +2483,11 @@ static void gen6_ggtt_insert_page(struct i915_address_space *vm,
gen6_pte_t __iomem *pte =
(gen6_pte_t __iomem *)dev_priv->ggtt.gsm +
(offset >> PAGE_SHIFT);
- int rpm_atomic_seq;
-
- rpm_atomic_seq = assert_rpm_atomic_begin(dev_priv);
- iowrite32(vm->pte_encode(addr, level, true, flags), pte);
+ iowrite32(vm->pte_encode(addr, level, flags), pte);
I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN);
POSTING_READ(GFX_FLSH_CNTL_GEN6);
-
- assert_rpm_atomic_end(dev_priv, rpm_atomic_seq);
}
/*
@@ -2437,15 +2507,12 @@ static void gen6_ggtt_insert_entries(struct i915_address_space *vm,
gen6_pte_t __iomem *gtt_entries;
gen6_pte_t gtt_entry;
dma_addr_t addr;
- int rpm_atomic_seq;
int i = 0;
- rpm_atomic_seq = assert_rpm_atomic_begin(dev_priv);
-
gtt_entries = (gen6_pte_t __iomem *)ggtt->gsm + (start >> PAGE_SHIFT);
for_each_sgt_dma(addr, sgt_iter, st) {
- gtt_entry = vm->pte_encode(addr, level, true, flags);
+ gtt_entry = vm->pte_encode(addr, level, flags);
iowrite32(gtt_entry, &gtt_entries[i++]);
}
@@ -2464,23 +2531,16 @@ static void gen6_ggtt_insert_entries(struct i915_address_space *vm,
*/
I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN);
POSTING_READ(GFX_FLSH_CNTL_GEN6);
-
- assert_rpm_atomic_end(dev_priv, rpm_atomic_seq);
}
static void nop_clear_range(struct i915_address_space *vm,
- uint64_t start,
- uint64_t length,
- bool use_scratch)
+ uint64_t start, uint64_t length)
{
}
static void gen8_ggtt_clear_range(struct i915_address_space *vm,
- uint64_t start,
- uint64_t length,
- bool use_scratch)
+ uint64_t start, uint64_t length)
{
- struct drm_i915_private *dev_priv = to_i915(vm->dev);
struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
unsigned first_entry = start >> PAGE_SHIFT;
unsigned num_entries = length >> PAGE_SHIFT;
@@ -2488,9 +2548,6 @@ static void gen8_ggtt_clear_range(struct i915_address_space *vm,
(gen8_pte_t __iomem *)ggtt->gsm + first_entry;
const int max_entries = ggtt_total_entries(ggtt) - first_entry;
int i;
- int rpm_atomic_seq;
-
- rpm_atomic_seq = assert_rpm_atomic_begin(dev_priv);
if (WARN(num_entries > max_entries,
"First entry = %d; Num entries = %d (max=%d)\n",
@@ -2498,21 +2555,16 @@ static void gen8_ggtt_clear_range(struct i915_address_space *vm,
num_entries = max_entries;
scratch_pte = gen8_pte_encode(vm->scratch_page.daddr,
- I915_CACHE_LLC,
- use_scratch);
+ I915_CACHE_LLC);
for (i = 0; i < num_entries; i++)
gen8_set_pte(&gtt_base[i], scratch_pte);
readl(gtt_base);
-
- assert_rpm_atomic_end(dev_priv, rpm_atomic_seq);
}
static void gen6_ggtt_clear_range(struct i915_address_space *vm,
uint64_t start,
- uint64_t length,
- bool use_scratch)
+ uint64_t length)
{
- struct drm_i915_private *dev_priv = to_i915(vm->dev);
struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
unsigned first_entry = start >> PAGE_SHIFT;
unsigned num_entries = length >> PAGE_SHIFT;
@@ -2520,9 +2572,6 @@ static void gen6_ggtt_clear_range(struct i915_address_space *vm,
(gen6_pte_t __iomem *)ggtt->gsm + first_entry;
const int max_entries = ggtt_total_entries(ggtt) - first_entry;
int i;
- int rpm_atomic_seq;
-
- rpm_atomic_seq = assert_rpm_atomic_begin(dev_priv);
if (WARN(num_entries > max_entries,
"First entry = %d; Num entries = %d (max=%d)\n",
@@ -2530,13 +2579,11 @@ static void gen6_ggtt_clear_range(struct i915_address_space *vm,
num_entries = max_entries;
scratch_pte = vm->pte_encode(vm->scratch_page.daddr,
- I915_CACHE_LLC, use_scratch, 0);
+ I915_CACHE_LLC, 0);
for (i = 0; i < num_entries; i++)
iowrite32(scratch_pte, &gtt_base[i]);
readl(gtt_base);
-
- assert_rpm_atomic_end(dev_priv, rpm_atomic_seq);
}
static void i915_ggtt_insert_page(struct i915_address_space *vm,
@@ -2545,16 +2592,10 @@ static void i915_ggtt_insert_page(struct i915_address_space *vm,
enum i915_cache_level cache_level,
u32 unused)
{
- struct drm_i915_private *dev_priv = to_i915(vm->dev);
unsigned int flags = (cache_level == I915_CACHE_NONE) ?
AGP_USER_MEMORY : AGP_USER_CACHED_MEMORY;
- int rpm_atomic_seq;
-
- rpm_atomic_seq = assert_rpm_atomic_begin(dev_priv);
intel_gtt_insert_page(addr, offset >> PAGE_SHIFT, flags);
-
- assert_rpm_atomic_end(dev_priv, rpm_atomic_seq);
}
static void i915_ggtt_insert_entries(struct i915_address_space *vm,
@@ -2562,40 +2603,25 @@ static void i915_ggtt_insert_entries(struct i915_address_space *vm,
uint64_t start,
enum i915_cache_level cache_level, u32 unused)
{
- struct drm_i915_private *dev_priv = to_i915(vm->dev);
unsigned int flags = (cache_level == I915_CACHE_NONE) ?
AGP_USER_MEMORY : AGP_USER_CACHED_MEMORY;
- int rpm_atomic_seq;
-
- rpm_atomic_seq = assert_rpm_atomic_begin(dev_priv);
intel_gtt_insert_sg_entries(pages, start >> PAGE_SHIFT, flags);
- assert_rpm_atomic_end(dev_priv, rpm_atomic_seq);
-
}
static void i915_ggtt_clear_range(struct i915_address_space *vm,
uint64_t start,
- uint64_t length,
- bool unused)
+ uint64_t length)
{
- struct drm_i915_private *dev_priv = to_i915(vm->dev);
- unsigned first_entry = start >> PAGE_SHIFT;
- unsigned num_entries = length >> PAGE_SHIFT;
- int rpm_atomic_seq;
-
- rpm_atomic_seq = assert_rpm_atomic_begin(dev_priv);
-
- intel_gtt_clear_range(first_entry, num_entries);
-
- assert_rpm_atomic_end(dev_priv, rpm_atomic_seq);
+ intel_gtt_clear_range(start >> PAGE_SHIFT, length >> PAGE_SHIFT);
}
static int ggtt_bind_vma(struct i915_vma *vma,
enum i915_cache_level cache_level,
u32 flags)
{
+ struct drm_i915_private *i915 = to_i915(vma->vm->dev);
struct drm_i915_gem_object *obj = vma->obj;
u32 pte_flags = 0;
int ret;
@@ -2608,8 +2634,10 @@ static int ggtt_bind_vma(struct i915_vma *vma,
if (obj->gt_ro)
pte_flags |= PTE_READ_ONLY;
+ intel_runtime_pm_get(i915);
vma->vm->insert_entries(vma->vm, vma->pages, vma->node.start,
cache_level, pte_flags);
+ intel_runtime_pm_put(i915);
/*
* Without aliasing PPGTT there's no difference between
@@ -2625,6 +2653,7 @@ static int aliasing_gtt_bind_vma(struct i915_vma *vma,
enum i915_cache_level cache_level,
u32 flags)
{
+ struct drm_i915_private *i915 = to_i915(vma->vm->dev);
u32 pte_flags;
int ret;
@@ -2639,14 +2668,15 @@ static int aliasing_gtt_bind_vma(struct i915_vma *vma,
if (flags & I915_VMA_GLOBAL_BIND) {
+ intel_runtime_pm_get(i915);
vma->vm->insert_entries(vma->vm,
vma->pages, vma->node.start,
cache_level, pte_flags);
+ intel_runtime_pm_put(i915);
}
if (flags & I915_VMA_LOCAL_BIND) {
- struct i915_hw_ppgtt *appgtt =
- to_i915(vma->vm->dev)->mm.aliasing_ppgtt;
+ struct i915_hw_ppgtt *appgtt = i915->mm.aliasing_ppgtt;
appgtt->base.insert_entries(&appgtt->base,
vma->pages, vma->node.start,
cache_level, pte_flags);
@@ -2657,21 +2687,24 @@ static int aliasing_gtt_bind_vma(struct i915_vma *vma,
static void ggtt_unbind_vma(struct i915_vma *vma)
{
- struct i915_hw_ppgtt *appgtt = to_i915(vma->vm->dev)->mm.aliasing_ppgtt;
+ struct drm_i915_private *i915 = to_i915(vma->vm->dev);
+ struct i915_hw_ppgtt *appgtt = i915->mm.aliasing_ppgtt;
const u64 size = min(vma->size, vma->node.size);
- if (vma->flags & I915_VMA_GLOBAL_BIND)
+ if (vma->flags & I915_VMA_GLOBAL_BIND) {
+ intel_runtime_pm_get(i915);
vma->vm->clear_range(vma->vm,
- vma->node.start, size,
- true);
+ vma->node.start, size);
+ intel_runtime_pm_put(i915);
+ }
if (vma->flags & I915_VMA_LOCAL_BIND && appgtt)
appgtt->base.clear_range(&appgtt->base,
- vma->node.start, size,
- true);
+ vma->node.start, size);
}
-void i915_gem_gtt_finish_object(struct drm_i915_gem_object *obj)
+void i915_gem_gtt_finish_pages(struct drm_i915_gem_object *obj,
+ struct sg_table *pages)
{
struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
struct device *kdev = &dev_priv->drm.pdev->dev;
@@ -2685,8 +2718,7 @@ void i915_gem_gtt_finish_object(struct drm_i915_gem_object *obj)
}
}
- dma_unmap_sg(kdev, obj->pages->sgl, obj->pages->nents,
- PCI_DMA_BIDIRECTIONAL);
+ dma_unmap_sg(kdev, pages->sgl, pages->nents, PCI_DMA_BIDIRECTIONAL);
}
static void i915_gtt_color_adjust(struct drm_mm_node *node,
@@ -2717,6 +2749,7 @@ int i915_gem_init_ggtt(struct drm_i915_private *dev_priv)
*/
struct i915_ggtt *ggtt = &dev_priv->ggtt;
unsigned long hole_start, hole_end;
+ struct i915_hw_ppgtt *ppgtt;
struct drm_mm_node *entry;
int ret;
@@ -2724,45 +2757,48 @@ int i915_gem_init_ggtt(struct drm_i915_private *dev_priv)
if (ret)
return ret;
+ /* Reserve a mappable slot for our lockless error capture */
+ ret = drm_mm_insert_node_in_range_generic(&ggtt->base.mm,
+ &ggtt->error_capture,
+ 4096, 0, -1,
+ 0, ggtt->mappable_end,
+ 0, 0);
+ if (ret)
+ return ret;
+
/* Clear any non-preallocated blocks */
drm_mm_for_each_hole(entry, &ggtt->base.mm, hole_start, hole_end) {
DRM_DEBUG_KMS("clearing unused GTT space: [%lx, %lx]\n",
hole_start, hole_end);
ggtt->base.clear_range(&ggtt->base, hole_start,
- hole_end - hole_start, true);
+ hole_end - hole_start);
}
/* And finally clear the reserved guard page */
ggtt->base.clear_range(&ggtt->base,
- ggtt->base.total - PAGE_SIZE, PAGE_SIZE,
- true);
+ ggtt->base.total - PAGE_SIZE, PAGE_SIZE);
if (USES_PPGTT(dev_priv) && !USES_FULL_PPGTT(dev_priv)) {
- struct i915_hw_ppgtt *ppgtt;
-
ppgtt = kzalloc(sizeof(*ppgtt), GFP_KERNEL);
- if (!ppgtt)
- return -ENOMEM;
+ if (!ppgtt) {
+ ret = -ENOMEM;
+ goto err;
+ }
ret = __hw_ppgtt_init(ppgtt, dev_priv);
- if (ret) {
- kfree(ppgtt);
- return ret;
- }
+ if (ret)
+ goto err_ppgtt;
- if (ppgtt->base.allocate_va_range)
+ if (ppgtt->base.allocate_va_range) {
ret = ppgtt->base.allocate_va_range(&ppgtt->base, 0,
ppgtt->base.total);
- if (ret) {
- ppgtt->base.cleanup(&ppgtt->base);
- kfree(ppgtt);
- return ret;
+ if (ret)
+ goto err_ppgtt_cleanup;
}
ppgtt->base.clear_range(&ppgtt->base,
ppgtt->base.start,
- ppgtt->base.total,
- true);
+ ppgtt->base.total);
dev_priv->mm.aliasing_ppgtt = ppgtt;
WARN_ON(ggtt->base.bind_vma != ggtt_bind_vma);
@@ -2770,6 +2806,14 @@ int i915_gem_init_ggtt(struct drm_i915_private *dev_priv)
}
return 0;
+
+err_ppgtt_cleanup:
+ ppgtt->base.cleanup(&ppgtt->base);
+err_ppgtt:
+ kfree(ppgtt);
+err:
+ drm_mm_remove_node(&ggtt->error_capture);
+ return ret;
}
/**
@@ -2788,11 +2832,15 @@ void i915_ggtt_cleanup_hw(struct drm_i915_private *dev_priv)
i915_gem_cleanup_stolen(&dev_priv->drm);
+ if (drm_mm_node_allocated(&ggtt->error_capture))
+ drm_mm_remove_node(&ggtt->error_capture);
+
if (drm_mm_initialized(&ggtt->base.mm)) {
intel_vgt_deballoon(dev_priv);
- drm_mm_takedown(&ggtt->base.mm);
- list_del(&ggtt->base.global_link);
+ mutex_lock(&dev_priv->drm.struct_mutex);
+ i915_address_space_fini(&ggtt->base);
+ mutex_unlock(&dev_priv->drm.struct_mutex);
}
ggtt->base.cleanup(&ggtt->base);
@@ -2881,6 +2929,7 @@ static size_t gen9_get_stolen_size(u16 gen9_gmch_ctl)
static int ggtt_probe_common(struct i915_ggtt *ggtt, u64 size)
{
+ struct drm_i915_private *dev_priv = to_i915(ggtt->base.dev);
struct pci_dev *pdev = ggtt->base.dev->pdev;
phys_addr_t phys_addr;
int ret;
@@ -2895,7 +2944,7 @@ static int ggtt_probe_common(struct i915_ggtt *ggtt, u64 size)
* resort to an uncached mapping. The WC issue is easily caught by the
* readback check when writing GTT PTE entries.
*/
- if (IS_BROXTON(ggtt->base.dev))
+ if (IS_BROXTON(dev_priv))
ggtt->gsm = ioremap_nocache(phys_addr, size);
else
ggtt->gsm = ioremap_wc(phys_addr, size);
@@ -2904,9 +2953,7 @@ static int ggtt_probe_common(struct i915_ggtt *ggtt, u64 size)
return -ENOMEM;
}
- ret = setup_scratch_page(ggtt->base.dev,
- &ggtt->base.scratch_page,
- GFP_DMA32);
+ ret = setup_scratch_page(dev_priv, &ggtt->base.scratch_page, GFP_DMA32);
if (ret) {
DRM_ERROR("Scratch setup failed\n");
/* iounmap will also get called at remove, but meh */
@@ -2995,7 +3042,7 @@ static void gen6_gmch_remove(struct i915_address_space *vm)
struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
iounmap(ggtt->gsm);
- cleanup_scratch_page(vm->dev, &vm->scratch_page);
+ cleanup_scratch_page(to_i915(vm->dev), &vm->scratch_page);
}
static int gen8_gmch_probe(struct i915_ggtt *ggtt)
@@ -3190,11 +3237,13 @@ int i915_ggtt_init_hw(struct drm_i915_private *dev_priv)
/* Subtract the guard page before address space initialization to
* shrink the range used by drm_mm.
*/
+ mutex_lock(&dev_priv->drm.struct_mutex);
ggtt->base.total -= PAGE_SIZE;
- i915_address_space_init(&ggtt->base, dev_priv);
+ i915_address_space_init(&ggtt->base, dev_priv, "[global]");
ggtt->base.total += PAGE_SIZE;
if (!HAS_LLC(dev_priv))
ggtt->base.mm.color_adjust = i915_gtt_color_adjust;
+ mutex_unlock(&dev_priv->drm.struct_mutex);
if (!io_mapping_init_wc(&dev_priv->ggtt.mappable,
dev_priv->ggtt.mappable_base,
@@ -3209,7 +3258,7 @@ int i915_ggtt_init_hw(struct drm_i915_private *dev_priv)
* Initialise stolen early so that we may reserve preallocated
* objects for the BIOS to KMS transition.
*/
- ret = i915_gem_init_stolen(&dev_priv->drm);
+ ret = i915_gem_init_stolen(dev_priv);
if (ret)
goto out_gtt_cleanup;
@@ -3228,23 +3277,21 @@ int i915_ggtt_enable_hw(struct drm_i915_private *dev_priv)
return 0;
}
-void i915_gem_restore_gtt_mappings(struct drm_device *dev)
+void i915_gem_restore_gtt_mappings(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
struct i915_ggtt *ggtt = &dev_priv->ggtt;
struct drm_i915_gem_object *obj, *on;
i915_check_and_clear_faults(dev_priv);
/* First fill our portion of the GTT with scratch pages */
- ggtt->base.clear_range(&ggtt->base, ggtt->base.start, ggtt->base.total,
- true);
+ ggtt->base.clear_range(&ggtt->base, ggtt->base.start, ggtt->base.total);
ggtt->base.closed = true; /* skip rewriting PTE on VMA unbind */
/* clflush objects bound into the GGTT and rebind them. */
list_for_each_entry_safe(obj, on,
- &dev_priv->mm.bound_list, global_list) {
+ &dev_priv->mm.bound_list, global_link) {
bool ggtt_bound = false;
struct i915_vma *vma;
@@ -3266,8 +3313,8 @@ void i915_gem_restore_gtt_mappings(struct drm_device *dev)
ggtt->base.closed = false;
- if (INTEL_INFO(dev)->gen >= 8) {
- if (IS_CHERRYVIEW(dev) || IS_BROXTON(dev))
+ if (INTEL_GEN(dev_priv) >= 8) {
+ if (IS_CHERRYVIEW(dev_priv) || IS_BROXTON(dev_priv))
chv_setup_private_ppat(dev_priv);
else
bdw_setup_private_ppat(dev_priv);
@@ -3275,7 +3322,7 @@ void i915_gem_restore_gtt_mappings(struct drm_device *dev)
return;
}
- if (USES_PPGTT(dev)) {
+ if (USES_PPGTT(dev_priv)) {
struct i915_address_space *vm;
list_for_each_entry(vm, &dev_priv->vm_list, global_link) {
@@ -3296,137 +3343,28 @@ void i915_gem_restore_gtt_mappings(struct drm_device *dev)
i915_ggtt_flush(dev_priv);
}
-static void
-i915_vma_retire(struct i915_gem_active *active,
- struct drm_i915_gem_request *rq)
-{
- const unsigned int idx = rq->engine->id;
- struct i915_vma *vma =
- container_of(active, struct i915_vma, last_read[idx]);
-
- GEM_BUG_ON(!i915_vma_has_active_engine(vma, idx));
-
- i915_vma_clear_active(vma, idx);
- if (i915_vma_is_active(vma))
- return;
-
- list_move_tail(&vma->vm_link, &vma->vm->inactive_list);
- if (unlikely(i915_vma_is_closed(vma) && !i915_vma_is_pinned(vma)))
- WARN_ON(i915_vma_unbind(vma));
-}
-
-void i915_vma_destroy(struct i915_vma *vma)
-{
- GEM_BUG_ON(vma->node.allocated);
- GEM_BUG_ON(i915_vma_is_active(vma));
- GEM_BUG_ON(!i915_vma_is_closed(vma));
- GEM_BUG_ON(vma->fence);
-
- list_del(&vma->vm_link);
- if (!i915_vma_is_ggtt(vma))
- i915_ppgtt_put(i915_vm_to_ppgtt(vma->vm));
-
- kmem_cache_free(to_i915(vma->obj->base.dev)->vmas, vma);
-}
-
-void i915_vma_close(struct i915_vma *vma)
-{
- GEM_BUG_ON(i915_vma_is_closed(vma));
- vma->flags |= I915_VMA_CLOSED;
-
- list_del_init(&vma->obj_link);
- if (!i915_vma_is_active(vma) && !i915_vma_is_pinned(vma))
- WARN_ON(i915_vma_unbind(vma));
-}
-
-static struct i915_vma *
-__i915_vma_create(struct drm_i915_gem_object *obj,
- struct i915_address_space *vm,
- const struct i915_ggtt_view *view)
-{
- struct i915_vma *vma;
- int i;
-
- GEM_BUG_ON(vm->closed);
-
- vma = kmem_cache_zalloc(to_i915(obj->base.dev)->vmas, GFP_KERNEL);
- if (vma == NULL)
- return ERR_PTR(-ENOMEM);
-
- INIT_LIST_HEAD(&vma->exec_list);
- for (i = 0; i < ARRAY_SIZE(vma->last_read); i++)
- init_request_active(&vma->last_read[i], i915_vma_retire);
- init_request_active(&vma->last_fence, NULL);
- list_add(&vma->vm_link, &vm->unbound_list);
- vma->vm = vm;
- vma->obj = obj;
- vma->size = obj->base.size;
-
- if (view) {
- vma->ggtt_view = *view;
- if (view->type == I915_GGTT_VIEW_PARTIAL) {
- vma->size = view->params.partial.size;
- vma->size <<= PAGE_SHIFT;
- } else if (view->type == I915_GGTT_VIEW_ROTATED) {
- vma->size =
- intel_rotation_info_size(&view->params.rotated);
- vma->size <<= PAGE_SHIFT;
- }
- }
-
- if (i915_is_ggtt(vm)) {
- vma->flags |= I915_VMA_GGTT;
- } else {
- i915_ppgtt_get(i915_vm_to_ppgtt(vm));
- }
-
- list_add_tail(&vma->obj_link, &obj->vma_list);
- return vma;
-}
-
-static inline bool vma_matches(struct i915_vma *vma,
- struct i915_address_space *vm,
- const struct i915_ggtt_view *view)
-{
- if (vma->vm != vm)
- return false;
-
- if (!i915_vma_is_ggtt(vma))
- return true;
-
- if (!view)
- return vma->ggtt_view.type == 0;
-
- if (vma->ggtt_view.type != view->type)
- return false;
-
- return memcmp(&vma->ggtt_view.params,
- &view->params,
- sizeof(view->params)) == 0;
-}
-
-struct i915_vma *
-i915_vma_create(struct drm_i915_gem_object *obj,
- struct i915_address_space *vm,
- const struct i915_ggtt_view *view)
-{
- GEM_BUG_ON(view && !i915_is_ggtt(vm));
- GEM_BUG_ON(i915_gem_obj_to_vma(obj, vm, view));
-
- return __i915_vma_create(obj, vm, view);
-}
-
struct i915_vma *
i915_gem_obj_to_vma(struct drm_i915_gem_object *obj,
struct i915_address_space *vm,
const struct i915_ggtt_view *view)
{
- struct i915_vma *vma;
+ struct rb_node *rb;
- list_for_each_entry_reverse(vma, &obj->vma_list, obj_link)
- if (vma_matches(vma, vm, view))
+ rb = obj->vma_tree.rb_node;
+ while (rb) {
+ struct i915_vma *vma = rb_entry(rb, struct i915_vma, obj_node);
+ long cmp;
+
+ cmp = i915_vma_compare(vma, vm, view);
+ if (cmp == 0)
return vma;
+ if (cmp < 0)
+ rb = rb->rb_right;
+ else
+ rb = rb->rb_left;
+ }
+
return NULL;
}
@@ -3437,11 +3375,14 @@ i915_gem_obj_lookup_or_create_vma(struct drm_i915_gem_object *obj,
{
struct i915_vma *vma;
+ lockdep_assert_held(&obj->base.dev->struct_mutex);
GEM_BUG_ON(view && !i915_is_ggtt(vm));
vma = i915_gem_obj_to_vma(obj, vm, view);
- if (!vma)
- vma = __i915_vma_create(obj, vm, view);
+ if (!vma) {
+ vma = i915_vma_create(obj, vm, view);
+ GEM_BUG_ON(vma != i915_gem_obj_to_vma(obj, vm, view));
+ }
GEM_BUG_ON(i915_vma_is_closed(vma));
return vma;
@@ -3507,7 +3448,7 @@ intel_rotate_fb_obj_pages(const struct intel_rotation_info *rot_info,
/* Populate source page list from the object. */
i = 0;
- for_each_sgt_dma(dma_addr, sgt_iter, obj->pages)
+ for_each_sgt_dma(dma_addr, sgt_iter, obj->mm.pages)
page_addr_list[i++] = dma_addr;
GEM_BUG_ON(i != n_pages);
@@ -3543,35 +3484,47 @@ intel_partial_pages(const struct i915_ggtt_view *view,
struct drm_i915_gem_object *obj)
{
struct sg_table *st;
- struct scatterlist *sg;
- struct sg_page_iter obj_sg_iter;
+ struct scatterlist *sg, *iter;
+ unsigned int count = view->params.partial.size;
+ unsigned int offset;
int ret = -ENOMEM;
st = kmalloc(sizeof(*st), GFP_KERNEL);
if (!st)
goto err_st_alloc;
- ret = sg_alloc_table(st, view->params.partial.size, GFP_KERNEL);
+ ret = sg_alloc_table(st, count, GFP_KERNEL);
if (ret)
goto err_sg_alloc;
+ iter = i915_gem_object_get_sg(obj,
+ view->params.partial.offset,
+ &offset);
+ GEM_BUG_ON(!iter);
+
sg = st->sgl;
st->nents = 0;
- for_each_sg_page(obj->pages->sgl, &obj_sg_iter, obj->pages->nents,
- view->params.partial.offset)
- {
- if (st->nents >= view->params.partial.size)
- break;
+ do {
+ unsigned int len;
- sg_set_page(sg, NULL, PAGE_SIZE, 0);
- sg_dma_address(sg) = sg_page_iter_dma_address(&obj_sg_iter);
- sg_dma_len(sg) = PAGE_SIZE;
+ len = min(iter->length - (offset << PAGE_SHIFT),
+ count << PAGE_SHIFT);
+ sg_set_page(sg, NULL, len, 0);
+ sg_dma_address(sg) =
+ sg_dma_address(iter) + (offset << PAGE_SHIFT);
+ sg_dma_len(sg) = len;
- sg = sg_next(sg);
st->nents++;
- }
+ count -= len >> PAGE_SHIFT;
+ if (count == 0) {
+ sg_mark_end(sg);
+ return st;
+ }
- return st;
+ sg = __sg_next(sg);
+ iter = __sg_next(iter);
+ offset = 0;
+ } while (1);
err_sg_alloc:
kfree(st);
@@ -3584,11 +3537,18 @@ i915_get_ggtt_vma_pages(struct i915_vma *vma)
{
int ret = 0;
+ /* The vma->pages are only valid within the lifespan of the borrowed
+ * obj->mm.pages. When the obj->mm.pages sg_table is regenerated, so
+ * must be the vma->pages. A simple rule is that vma->pages must only
+ * be accessed when the obj->mm.pages are pinned.
+ */
+ GEM_BUG_ON(!i915_gem_object_has_pinned_pages(vma->obj));
+
if (vma->pages)
return 0;
if (vma->ggtt_view.type == I915_GGTT_VIEW_NORMAL)
- vma->pages = vma->obj->pages;
+ vma->pages = vma->obj->mm.pages;
else if (vma->ggtt_view.type == I915_GGTT_VIEW_ROTATED)
vma->pages =
intel_rotate_fb_obj_pages(&vma->ggtt_view.params.rotated, vma->obj);
@@ -3612,94 +3572,3 @@ i915_get_ggtt_vma_pages(struct i915_vma *vma)
return ret;
}
-/**
- * i915_vma_bind - Sets up PTEs for an VMA in it's corresponding address space.
- * @vma: VMA to map
- * @cache_level: mapping cache level
- * @flags: flags like global or local mapping
- *
- * DMA addresses are taken from the scatter-gather table of this object (or of
- * this VMA in case of non-default GGTT views) and PTE entries set up.
- * Note that DMA addresses are also the only part of the SG table we care about.
- */
-int i915_vma_bind(struct i915_vma *vma, enum i915_cache_level cache_level,
- u32 flags)
-{
- u32 bind_flags;
- u32 vma_flags;
- int ret;
-
- if (WARN_ON(flags == 0))
- return -EINVAL;
-
- bind_flags = 0;
- if (flags & PIN_GLOBAL)
- bind_flags |= I915_VMA_GLOBAL_BIND;
- if (flags & PIN_USER)
- bind_flags |= I915_VMA_LOCAL_BIND;
-
- vma_flags = vma->flags & (I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND);
- if (flags & PIN_UPDATE)
- bind_flags |= vma_flags;
- else
- bind_flags &= ~vma_flags;
- if (bind_flags == 0)
- return 0;
-
- if (vma_flags == 0 && vma->vm->allocate_va_range) {
- trace_i915_va_alloc(vma);
- ret = vma->vm->allocate_va_range(vma->vm,
- vma->node.start,
- vma->node.size);
- if (ret)
- return ret;
- }
-
- ret = vma->vm->bind_vma(vma, cache_level, bind_flags);
- if (ret)
- return ret;
-
- vma->flags |= bind_flags;
- return 0;
-}
-
-void __iomem *i915_vma_pin_iomap(struct i915_vma *vma)
-{
- void __iomem *ptr;
-
- /* Access through the GTT requires the device to be awake. */
- assert_rpm_wakelock_held(to_i915(vma->vm->dev));
-
- lockdep_assert_held(&vma->vm->dev->struct_mutex);
- if (WARN_ON(!i915_vma_is_map_and_fenceable(vma)))
- return IO_ERR_PTR(-ENODEV);
-
- GEM_BUG_ON(!i915_vma_is_ggtt(vma));
- GEM_BUG_ON((vma->flags & I915_VMA_GLOBAL_BIND) == 0);
-
- ptr = vma->iomap;
- if (ptr == NULL) {
- ptr = io_mapping_map_wc(&i915_vm_to_ggtt(vma->vm)->mappable,
- vma->node.start,
- vma->node.size);
- if (ptr == NULL)
- return IO_ERR_PTR(-ENOMEM);
-
- vma->iomap = ptr;
- }
-
- __i915_vma_pin(vma);
- return ptr;
-}
-
-void i915_vma_unpin_and_release(struct i915_vma **p_vma)
-{
- struct i915_vma *vma;
-
- vma = fetch_and_zero(p_vma);
- if (!vma)
- return;
-
- i915_vma_unpin(vma);
- i915_vma_put(vma);
-}
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.h b/drivers/gpu/drm/i915/i915_gem_gtt.h
index ec78be2f8c77..4f35be4c26c7 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.h
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.h
@@ -35,7 +35,9 @@
#define __I915_GEM_GTT_H__
#include <linux/io-mapping.h>
+#include <linux/mm.h>
+#include "i915_gem_timeline.h"
#include "i915_gem_request.h"
#define I915_FENCE_REG_NONE -1
@@ -118,8 +120,8 @@ typedef uint64_t gen8_ppgtt_pml4e_t;
#define GEN8_LEGACY_PDPES 4
#define GEN8_PTES I915_PTES(sizeof(gen8_pte_t))
-#define I915_PDPES_PER_PDP(dev) (USES_FULL_48BIT_PPGTT(dev) ?\
- GEN8_PML4ES_PER_PML4 : GEN8_LEGACY_PDPES)
+#define I915_PDPES_PER_PDP(dev_priv) (USES_FULL_48BIT_PPGTT(dev_priv) ?\
+ GEN8_PML4ES_PER_PML4 : GEN8_LEGACY_PDPES)
#define PPAT_UNCACHED_INDEX (_PAGE_PWT | _PAGE_PCD)
#define PPAT_CACHED_PDE_INDEX 0 /* WB LLC */
@@ -138,6 +140,8 @@ typedef uint64_t gen8_ppgtt_pml4e_t;
#define GEN8_PPAT_ELLC_OVERRIDE (0<<2)
#define GEN8_PPAT(i, x) ((uint64_t) (x) << ((i) * 8))
+struct sg_table;
+
enum i915_ggtt_view_type {
I915_GGTT_VIEW_NORMAL = 0,
I915_GGTT_VIEW_ROTATED,
@@ -168,133 +172,7 @@ extern const struct i915_ggtt_view i915_ggtt_view_rotated;
enum i915_cache_level;
-/**
- * A VMA represents a GEM BO that is bound into an address space. Therefore, a
- * VMA's presence cannot be guaranteed before binding, or after unbinding the
- * object into/from the address space.
- *
- * To make things as simple as possible (ie. no refcounting), a VMA's lifetime
- * will always be <= an objects lifetime. So object refcounting should cover us.
- */
-struct i915_vma {
- struct drm_mm_node node;
- struct drm_i915_gem_object *obj;
- struct i915_address_space *vm;
- struct drm_i915_fence_reg *fence;
- struct sg_table *pages;
- void __iomem *iomap;
- u64 size;
- u64 display_alignment;
-
- unsigned int flags;
- /**
- * How many users have pinned this object in GTT space. The following
- * users can each hold at most one reference: pwrite/pread, execbuffer
- * (objects are not allowed multiple times for the same batchbuffer),
- * and the framebuffer code. When switching/pageflipping, the
- * framebuffer code has at most two buffers pinned per crtc.
- *
- * In the worst case this is 1 + 1 + 1 + 2*2 = 7. That would fit into 3
- * bits with absolutely no headroom. So use 4 bits.
- */
-#define I915_VMA_PIN_MASK 0xf
-#define I915_VMA_PIN_OVERFLOW BIT(5)
-
- /** Flags and address space this VMA is bound to */
-#define I915_VMA_GLOBAL_BIND BIT(6)
-#define I915_VMA_LOCAL_BIND BIT(7)
-#define I915_VMA_BIND_MASK (I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND | I915_VMA_PIN_OVERFLOW)
-
-#define I915_VMA_GGTT BIT(8)
-#define I915_VMA_CAN_FENCE BIT(9)
-#define I915_VMA_CLOSED BIT(10)
-
- unsigned int active;
- struct i915_gem_active last_read[I915_NUM_ENGINES];
- struct i915_gem_active last_fence;
-
- /**
- * Support different GGTT views into the same object.
- * This means there can be multiple VMA mappings per object and per VM.
- * i915_ggtt_view_type is used to distinguish between those entries.
- * The default one of zero (I915_GGTT_VIEW_NORMAL) is default and also
- * assumed in GEM functions which take no ggtt view parameter.
- */
- struct i915_ggtt_view ggtt_view;
-
- /** This object's place on the active/inactive lists */
- struct list_head vm_link;
-
- struct list_head obj_link; /* Link in the object's VMA list */
-
- /** This vma's place in the batchbuffer or on the eviction list */
- struct list_head exec_list;
-
- /**
- * Used for performing relocations during execbuffer insertion.
- */
- struct hlist_node exec_node;
- unsigned long exec_handle;
- struct drm_i915_gem_exec_object2 *exec_entry;
-};
-
-struct i915_vma *
-i915_vma_create(struct drm_i915_gem_object *obj,
- struct i915_address_space *vm,
- const struct i915_ggtt_view *view);
-void i915_vma_unpin_and_release(struct i915_vma **p_vma);
-
-static inline bool i915_vma_is_ggtt(const struct i915_vma *vma)
-{
- return vma->flags & I915_VMA_GGTT;
-}
-
-static inline bool i915_vma_is_map_and_fenceable(const struct i915_vma *vma)
-{
- return vma->flags & I915_VMA_CAN_FENCE;
-}
-
-static inline bool i915_vma_is_closed(const struct i915_vma *vma)
-{
- return vma->flags & I915_VMA_CLOSED;
-}
-
-static inline unsigned int i915_vma_get_active(const struct i915_vma *vma)
-{
- return vma->active;
-}
-
-static inline bool i915_vma_is_active(const struct i915_vma *vma)
-{
- return i915_vma_get_active(vma);
-}
-
-static inline void i915_vma_set_active(struct i915_vma *vma,
- unsigned int engine)
-{
- vma->active |= BIT(engine);
-}
-
-static inline void i915_vma_clear_active(struct i915_vma *vma,
- unsigned int engine)
-{
- vma->active &= ~BIT(engine);
-}
-
-static inline bool i915_vma_has_active_engine(const struct i915_vma *vma,
- unsigned int engine)
-{
- return vma->active & BIT(engine);
-}
-
-static inline u32 i915_ggtt_offset(const struct i915_vma *vma)
-{
- GEM_BUG_ON(!i915_vma_is_ggtt(vma));
- GEM_BUG_ON(!vma->node.allocated);
- GEM_BUG_ON(upper_32_bits(vma->node.start));
- GEM_BUG_ON(upper_32_bits(vma->node.start + vma->node.size - 1));
- return lower_32_bits(vma->node.start);
-}
+struct i915_vma;
struct i915_page_dma {
struct page *page;
@@ -341,6 +219,7 @@ struct i915_pml4 {
struct i915_address_space {
struct drm_mm mm;
+ struct i915_gem_timeline timeline;
struct drm_device *dev;
/* Every address space belongs to a struct file - except for the global
* GTT that is owned by the driver (and so @file is set to NULL). In
@@ -395,7 +274,7 @@ struct i915_address_space {
/* FIXME: Need a more generic return type */
gen6_pte_t (*pte_encode)(dma_addr_t addr,
enum i915_cache_level level,
- bool valid, u32 flags); /* Create a valid PTE */
+ u32 flags); /* Create a valid PTE */
/* flags for pte_encode */
#define PTE_READ_ONLY (1<<0)
int (*allocate_va_range)(struct i915_address_space *vm,
@@ -403,8 +282,7 @@ struct i915_address_space {
uint64_t length);
void (*clear_range)(struct i915_address_space *vm,
uint64_t start,
- uint64_t length,
- bool use_scratch);
+ uint64_t length);
void (*insert_page)(struct i915_address_space *vm,
dma_addr_t addr,
uint64_t offset,
@@ -450,6 +328,8 @@ struct i915_ggtt {
bool do_idle_maps;
int mtrr;
+
+ struct drm_mm_node error_capture;
};
struct i915_hw_ppgtt {
@@ -602,16 +482,24 @@ i915_page_dir_dma_addr(const struct i915_hw_ppgtt *ppgtt, const unsigned n)
px_dma(ppgtt->base.scratch_pd);
}
+static inline struct i915_ggtt *
+i915_vm_to_ggtt(struct i915_address_space *vm)
+{
+ GEM_BUG_ON(!i915_is_ggtt(vm));
+ return container_of(vm, struct i915_ggtt, base);
+}
+
int i915_ggtt_probe_hw(struct drm_i915_private *dev_priv);
int i915_ggtt_init_hw(struct drm_i915_private *dev_priv);
int i915_ggtt_enable_hw(struct drm_i915_private *dev_priv);
int i915_gem_init_ggtt(struct drm_i915_private *dev_priv);
void i915_ggtt_cleanup_hw(struct drm_i915_private *dev_priv);
-int i915_ppgtt_init_hw(struct drm_device *dev);
+int i915_ppgtt_init_hw(struct drm_i915_private *dev_priv);
void i915_ppgtt_release(struct kref *kref);
struct i915_hw_ppgtt *i915_ppgtt_create(struct drm_i915_private *dev_priv,
- struct drm_i915_file_private *fpriv);
+ struct drm_i915_file_private *fpriv,
+ const char *name);
static inline void i915_ppgtt_get(struct i915_hw_ppgtt *ppgtt)
{
if (ppgtt)
@@ -624,11 +512,13 @@ static inline void i915_ppgtt_put(struct i915_hw_ppgtt *ppgtt)
}
void i915_check_and_clear_faults(struct drm_i915_private *dev_priv);
-void i915_gem_suspend_gtt_mappings(struct drm_device *dev);
-void i915_gem_restore_gtt_mappings(struct drm_device *dev);
+void i915_gem_suspend_gtt_mappings(struct drm_i915_private *dev_priv);
+void i915_gem_restore_gtt_mappings(struct drm_i915_private *dev_priv);
-int __must_check i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj);
-void i915_gem_gtt_finish_object(struct drm_i915_gem_object *obj);
+int __must_check i915_gem_gtt_prepare_pages(struct drm_i915_gem_object *obj,
+ struct sg_table *pages);
+void i915_gem_gtt_finish_pages(struct drm_i915_gem_object *obj,
+ struct sg_table *pages);
/* Flags used by pin/bind&friends. */
#define PIN_NONBLOCK BIT(0)
@@ -646,88 +536,4 @@ void i915_gem_gtt_finish_object(struct drm_i915_gem_object *obj);
#define PIN_OFFSET_FIXED BIT(11)
#define PIN_OFFSET_MASK (~4095)
-int __i915_vma_do_pin(struct i915_vma *vma,
- u64 size, u64 alignment, u64 flags);
-static inline int __must_check
-i915_vma_pin(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
-{
- BUILD_BUG_ON(PIN_MBZ != I915_VMA_PIN_OVERFLOW);
- BUILD_BUG_ON(PIN_GLOBAL != I915_VMA_GLOBAL_BIND);
- BUILD_BUG_ON(PIN_USER != I915_VMA_LOCAL_BIND);
-
- /* Pin early to prevent the shrinker/eviction logic from destroying
- * our vma as we insert and bind.
- */
- if (likely(((++vma->flags ^ flags) & I915_VMA_BIND_MASK) == 0))
- return 0;
-
- return __i915_vma_do_pin(vma, size, alignment, flags);
-}
-
-static inline int i915_vma_pin_count(const struct i915_vma *vma)
-{
- return vma->flags & I915_VMA_PIN_MASK;
-}
-
-static inline bool i915_vma_is_pinned(const struct i915_vma *vma)
-{
- return i915_vma_pin_count(vma);
-}
-
-static inline void __i915_vma_pin(struct i915_vma *vma)
-{
- vma->flags++;
- GEM_BUG_ON(vma->flags & I915_VMA_PIN_OVERFLOW);
-}
-
-static inline void __i915_vma_unpin(struct i915_vma *vma)
-{
- GEM_BUG_ON(!i915_vma_is_pinned(vma));
- vma->flags--;
-}
-
-static inline void i915_vma_unpin(struct i915_vma *vma)
-{
- GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
- __i915_vma_unpin(vma);
-}
-
-/**
- * i915_vma_pin_iomap - calls ioremap_wc to map the GGTT VMA via the aperture
- * @vma: VMA to iomap
- *
- * The passed in VMA has to be pinned in the global GTT mappable region.
- * An extra pinning of the VMA is acquired for the return iomapping,
- * the caller must call i915_vma_unpin_iomap to relinquish the pinning
- * after the iomapping is no longer required.
- *
- * Callers must hold the struct_mutex.
- *
- * Returns a valid iomapped pointer or ERR_PTR.
- */
-void __iomem *i915_vma_pin_iomap(struct i915_vma *vma);
-#define IO_ERR_PTR(x) ((void __iomem *)ERR_PTR(x))
-
-/**
- * i915_vma_unpin_iomap - unpins the mapping returned from i915_vma_iomap
- * @vma: VMA to unpin
- *
- * Unpins the previously iomapped VMA from i915_vma_pin_iomap().
- *
- * Callers must hold the struct_mutex. This function is only valid to be
- * called on a VMA previously iomapped by the caller with i915_vma_pin_iomap().
- */
-static inline void i915_vma_unpin_iomap(struct i915_vma *vma)
-{
- lockdep_assert_held(&vma->vm->dev->struct_mutex);
- GEM_BUG_ON(vma->iomap == NULL);
- i915_vma_unpin(vma);
-}
-
-static inline struct page *i915_vma_first_page(struct i915_vma *vma)
-{
- GEM_BUG_ON(!vma->pages);
- return sg_page(vma->pages->sgl);
-}
-
#endif
diff --git a/drivers/gpu/drm/i915/i915_gem_internal.c b/drivers/gpu/drm/i915/i915_gem_internal.c
new file mode 100644
index 000000000000..4b3ff3e5b911
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_gem_internal.c
@@ -0,0 +1,170 @@
+/*
+ * Copyright © 2014-2016 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+
+#include <drm/drmP.h>
+#include <drm/i915_drm.h>
+#include "i915_drv.h"
+
+#define QUIET (__GFP_NORETRY | __GFP_NOWARN)
+
+/* convert swiotlb segment size into sensible units (pages)! */
+#define IO_TLB_SEGPAGES (IO_TLB_SEGSIZE << IO_TLB_SHIFT >> PAGE_SHIFT)
+
+static void internal_free_pages(struct sg_table *st)
+{
+ struct scatterlist *sg;
+
+ for (sg = st->sgl; sg; sg = __sg_next(sg))
+ __free_pages(sg_page(sg), get_order(sg->length));
+
+ sg_free_table(st);
+ kfree(st);
+}
+
+static struct sg_table *
+i915_gem_object_get_pages_internal(struct drm_i915_gem_object *obj)
+{
+ struct drm_i915_private *i915 = to_i915(obj->base.dev);
+ unsigned int npages = obj->base.size / PAGE_SIZE;
+ struct sg_table *st;
+ struct scatterlist *sg;
+ int max_order;
+ gfp_t gfp;
+
+ st = kmalloc(sizeof(*st), GFP_KERNEL);
+ if (!st)
+ return ERR_PTR(-ENOMEM);
+
+ if (sg_alloc_table(st, npages, GFP_KERNEL)) {
+ kfree(st);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ sg = st->sgl;
+ st->nents = 0;
+
+ max_order = MAX_ORDER;
+#ifdef CONFIG_SWIOTLB
+ if (swiotlb_nr_tbl()) /* minimum max swiotlb size is IO_TLB_SEGSIZE */
+ max_order = min(max_order, ilog2(IO_TLB_SEGPAGES));
+#endif
+
+ gfp = GFP_KERNEL | __GFP_HIGHMEM | __GFP_RECLAIMABLE;
+ if (IS_CRESTLINE(i915) || IS_BROADWATER(i915)) {
+ /* 965gm cannot relocate objects above 4GiB. */
+ gfp &= ~__GFP_HIGHMEM;
+ gfp |= __GFP_DMA32;
+ }
+
+ do {
+ int order = min(fls(npages) - 1, max_order);
+ struct page *page;
+
+ do {
+ page = alloc_pages(gfp | (order ? QUIET : 0), order);
+ if (page)
+ break;
+ if (!order--)
+ goto err;
+
+ /* Limit subsequent allocations as well */
+ max_order = order;
+ } while (1);
+
+ sg_set_page(sg, page, PAGE_SIZE << order, 0);
+ st->nents++;
+
+ npages -= 1 << order;
+ if (!npages) {
+ sg_mark_end(sg);
+ break;
+ }
+
+ sg = __sg_next(sg);
+ } while (1);
+
+ if (i915_gem_gtt_prepare_pages(obj, st))
+ goto err;
+
+ /* Mark the pages as dontneed whilst they are still pinned. As soon
+ * as they are unpinned they are allowed to be reaped by the shrinker,
+ * and the caller is expected to repopulate - the contents of this
+ * object are only valid whilst active and pinned.
+ */
+ obj->mm.madv = I915_MADV_DONTNEED;
+ return st;
+
+err:
+ sg_mark_end(sg);
+ internal_free_pages(st);
+ return ERR_PTR(-ENOMEM);
+}
+
+static void i915_gem_object_put_pages_internal(struct drm_i915_gem_object *obj,
+ struct sg_table *pages)
+{
+ i915_gem_gtt_finish_pages(obj, pages);
+ internal_free_pages(pages);
+
+ obj->mm.dirty = false;
+ obj->mm.madv = I915_MADV_WILLNEED;
+}
+
+static const struct drm_i915_gem_object_ops i915_gem_object_internal_ops = {
+ .flags = I915_GEM_OBJECT_HAS_STRUCT_PAGE |
+ I915_GEM_OBJECT_IS_SHRINKABLE,
+ .get_pages = i915_gem_object_get_pages_internal,
+ .put_pages = i915_gem_object_put_pages_internal,
+};
+
+/**
+ * Creates a new object that wraps some internal memory for private use.
+ * This object is not backed by swappable storage, and as such its contents
+ * are volatile and only valid whilst pinned. If the object is reaped by the
+ * shrinker, its pages and data will be discarded. Equally, it is not a full
+ * GEM object and so not valid for access from userspace. This makes it useful
+ * for hardware interfaces like ringbuffers (which are pinned from the time
+ * the request is written to the time the hardware stops accessing it), but
+ * not for contexts (which need to be preserved when not active for later
+ * reuse). Note that it is not cleared upon allocation.
+ */
+struct drm_i915_gem_object *
+i915_gem_object_create_internal(struct drm_i915_private *i915,
+ unsigned int size)
+{
+ struct drm_i915_gem_object *obj;
+
+ obj = i915_gem_object_alloc(&i915->drm);
+ if (!obj)
+ return ERR_PTR(-ENOMEM);
+
+ drm_gem_private_object_init(&i915->drm, &obj->base, size);
+ i915_gem_object_init(obj, &i915_gem_object_internal_ops);
+
+ obj->base.write_domain = I915_GEM_DOMAIN_CPU;
+ obj->base.read_domains = I915_GEM_DOMAIN_CPU;
+ obj->cache_level = HAS_LLC(i915) ? I915_CACHE_LLC : I915_CACHE_NONE;
+
+ return obj;
+}
diff --git a/drivers/gpu/drm/i915/i915_gem_object.h b/drivers/gpu/drm/i915/i915_gem_object.h
new file mode 100644
index 000000000000..6a368de9d81e
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_gem_object.h
@@ -0,0 +1,338 @@
+/*
+ * Copyright © 2016 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __I915_GEM_OBJECT_H__
+#define __I915_GEM_OBJECT_H__
+
+#include <linux/reservation.h>
+
+#include <drm/drm_vma_manager.h>
+#include <drm/drm_gem.h>
+#include <drm/drmP.h>
+
+#include <drm/i915_drm.h>
+
+struct drm_i915_gem_object_ops {
+ unsigned int flags;
+#define I915_GEM_OBJECT_HAS_STRUCT_PAGE 0x1
+#define I915_GEM_OBJECT_IS_SHRINKABLE 0x2
+
+ /* Interface between the GEM object and its backing storage.
+ * get_pages() is called once prior to the use of the associated set
+ * of pages before to binding them into the GTT, and put_pages() is
+ * called after we no longer need them. As we expect there to be
+ * associated cost with migrating pages between the backing storage
+ * and making them available for the GPU (e.g. clflush), we may hold
+ * onto the pages after they are no longer referenced by the GPU
+ * in case they may be used again shortly (for example migrating the
+ * pages to a different memory domain within the GTT). put_pages()
+ * will therefore most likely be called when the object itself is
+ * being released or under memory pressure (where we attempt to
+ * reap pages for the shrinker).
+ */
+ struct sg_table *(*get_pages)(struct drm_i915_gem_object *);
+ void (*put_pages)(struct drm_i915_gem_object *, struct sg_table *);
+
+ int (*dmabuf_export)(struct drm_i915_gem_object *);
+ void (*release)(struct drm_i915_gem_object *);
+};
+
+struct drm_i915_gem_object {
+ struct drm_gem_object base;
+
+ const struct drm_i915_gem_object_ops *ops;
+
+ /** List of VMAs backed by this object */
+ struct list_head vma_list;
+ struct rb_root vma_tree;
+
+ /** Stolen memory for this object, instead of being backed by shmem. */
+ struct drm_mm_node *stolen;
+ struct list_head global_link;
+ union {
+ struct rcu_head rcu;
+ struct llist_node freed;
+ };
+
+ /**
+ * Whether the object is currently in the GGTT mmap.
+ */
+ struct list_head userfault_link;
+
+ /** Used in execbuf to temporarily hold a ref */
+ struct list_head obj_exec_link;
+
+ struct list_head batch_pool_link;
+
+ unsigned long flags;
+
+ /**
+ * Have we taken a reference for the object for incomplete GPU
+ * activity?
+ */
+#define I915_BO_ACTIVE_REF 0
+
+ /*
+ * Is the object to be mapped as read-only to the GPU
+ * Only honoured if hardware has relevant pte bit
+ */
+ unsigned long gt_ro:1;
+ unsigned int cache_level:3;
+ unsigned int cache_dirty:1;
+
+ atomic_t frontbuffer_bits;
+ unsigned int frontbuffer_ggtt_origin; /* write once */
+ struct i915_gem_active frontbuffer_write;
+
+ /** Current tiling stride for the object, if it's tiled. */
+ unsigned int tiling_and_stride;
+#define FENCE_MINIMUM_STRIDE 128 /* See i915_tiling_ok() */
+#define TILING_MASK (FENCE_MINIMUM_STRIDE-1)
+#define STRIDE_MASK (~TILING_MASK)
+
+ /** Count of VMA actually bound by this object */
+ unsigned int bind_count;
+ unsigned int active_count;
+ unsigned int pin_display;
+
+ struct {
+ struct mutex lock; /* protects the pages and their use */
+ atomic_t pages_pin_count;
+
+ struct sg_table *pages;
+ void *mapping;
+
+ struct i915_gem_object_page_iter {
+ struct scatterlist *sg_pos;
+ unsigned int sg_idx; /* in pages, but 32bit eek! */
+
+ struct radix_tree_root radix;
+ struct mutex lock; /* protects this cache */
+ } get_page;
+
+ /**
+ * Advice: are the backing pages purgeable?
+ */
+ unsigned int madv:2;
+
+ /**
+ * This is set if the object has been written to since the
+ * pages were last acquired.
+ */
+ bool dirty:1;
+
+ /**
+ * This is set if the object has been pinned due to unknown
+ * swizzling.
+ */
+ bool quirked:1;
+ } mm;
+
+ /** Breadcrumb of last rendering to the buffer.
+ * There can only be one writer, but we allow for multiple readers.
+ * If there is a writer that necessarily implies that all other
+ * read requests are complete - but we may only be lazily clearing
+ * the read requests. A read request is naturally the most recent
+ * request on a ring, so we may have two different write and read
+ * requests on one ring where the write request is older than the
+ * read request. This allows for the CPU to read from an active
+ * buffer by only waiting for the write to complete.
+ */
+ struct reservation_object *resv;
+
+ /** References from framebuffers, locks out tiling changes. */
+ unsigned long framebuffer_references;
+
+ /** Record of address bit 17 of each page at last unbind. */
+ unsigned long *bit_17;
+
+ struct i915_gem_userptr {
+ uintptr_t ptr;
+ unsigned read_only :1;
+
+ struct i915_mm_struct *mm;
+ struct i915_mmu_object *mmu_object;
+ struct work_struct *work;
+ } userptr;
+
+ /** for phys allocated objects */
+ struct drm_dma_handle *phys_handle;
+
+ struct reservation_object __builtin_resv;
+};
+
+static inline struct drm_i915_gem_object *
+to_intel_bo(struct drm_gem_object *gem)
+{
+ /* Assert that to_intel_bo(NULL) == NULL */
+ BUILD_BUG_ON(offsetof(struct drm_i915_gem_object, base));
+
+ return container_of(gem, struct drm_i915_gem_object, base);
+}
+
+/**
+ * i915_gem_object_lookup_rcu - look up a temporary GEM object from its handle
+ * @filp: DRM file private date
+ * @handle: userspace handle
+ *
+ * Returns:
+ *
+ * A pointer to the object named by the handle if such exists on @filp, NULL
+ * otherwise. This object is only valid whilst under the RCU read lock, and
+ * note carefully the object may be in the process of being destroyed.
+ */
+static inline struct drm_i915_gem_object *
+i915_gem_object_lookup_rcu(struct drm_file *file, u32 handle)
+{
+#ifdef CONFIG_LOCKDEP
+ WARN_ON(debug_locks && !lock_is_held(&rcu_lock_map));
+#endif
+ return idr_find(&file->object_idr, handle);
+}
+
+static inline struct drm_i915_gem_object *
+i915_gem_object_lookup(struct drm_file *file, u32 handle)
+{
+ struct drm_i915_gem_object *obj;
+
+ rcu_read_lock();
+ obj = i915_gem_object_lookup_rcu(file, handle);
+ if (obj && !kref_get_unless_zero(&obj->base.refcount))
+ obj = NULL;
+ rcu_read_unlock();
+
+ return obj;
+}
+
+__deprecated
+extern struct drm_gem_object *
+drm_gem_object_lookup(struct drm_file *file, u32 handle);
+
+__attribute__((nonnull))
+static inline struct drm_i915_gem_object *
+i915_gem_object_get(struct drm_i915_gem_object *obj)
+{
+ drm_gem_object_reference(&obj->base);
+ return obj;
+}
+
+__deprecated
+extern void drm_gem_object_reference(struct drm_gem_object *);
+
+__attribute__((nonnull))
+static inline void
+i915_gem_object_put(struct drm_i915_gem_object *obj)
+{
+ __drm_gem_object_unreference(&obj->base);
+}
+
+__deprecated
+extern void drm_gem_object_unreference(struct drm_gem_object *);
+
+__deprecated
+extern void drm_gem_object_unreference_unlocked(struct drm_gem_object *);
+
+static inline bool
+i915_gem_object_is_dead(const struct drm_i915_gem_object *obj)
+{
+ return atomic_read(&obj->base.refcount.refcount) == 0;
+}
+
+static inline bool
+i915_gem_object_has_struct_page(const struct drm_i915_gem_object *obj)
+{
+ return obj->ops->flags & I915_GEM_OBJECT_HAS_STRUCT_PAGE;
+}
+
+static inline bool
+i915_gem_object_is_shrinkable(const struct drm_i915_gem_object *obj)
+{
+ return obj->ops->flags & I915_GEM_OBJECT_IS_SHRINKABLE;
+}
+
+static inline bool
+i915_gem_object_is_active(const struct drm_i915_gem_object *obj)
+{
+ return obj->active_count;
+}
+
+static inline bool
+i915_gem_object_has_active_reference(const struct drm_i915_gem_object *obj)
+{
+ return test_bit(I915_BO_ACTIVE_REF, &obj->flags);
+}
+
+static inline void
+i915_gem_object_set_active_reference(struct drm_i915_gem_object *obj)
+{
+ lockdep_assert_held(&obj->base.dev->struct_mutex);
+ __set_bit(I915_BO_ACTIVE_REF, &obj->flags);
+}
+
+static inline void
+i915_gem_object_clear_active_reference(struct drm_i915_gem_object *obj)
+{
+ lockdep_assert_held(&obj->base.dev->struct_mutex);
+ __clear_bit(I915_BO_ACTIVE_REF, &obj->flags);
+}
+
+void __i915_gem_object_release_unless_active(struct drm_i915_gem_object *obj);
+
+static inline unsigned int
+i915_gem_object_get_tiling(struct drm_i915_gem_object *obj)
+{
+ return obj->tiling_and_stride & TILING_MASK;
+}
+
+static inline bool
+i915_gem_object_is_tiled(struct drm_i915_gem_object *obj)
+{
+ return i915_gem_object_get_tiling(obj) != I915_TILING_NONE;
+}
+
+static inline unsigned int
+i915_gem_object_get_stride(struct drm_i915_gem_object *obj)
+{
+ return obj->tiling_and_stride & STRIDE_MASK;
+}
+
+static inline struct intel_engine_cs *
+i915_gem_object_last_write_engine(struct drm_i915_gem_object *obj)
+{
+ struct intel_engine_cs *engine = NULL;
+ struct dma_fence *fence;
+
+ rcu_read_lock();
+ fence = reservation_object_get_excl_rcu(obj->resv);
+ rcu_read_unlock();
+
+ if (fence && dma_fence_is_i915(fence) && !dma_fence_is_signaled(fence))
+ engine = to_request(fence)->engine;
+ dma_fence_put(fence);
+
+ return engine;
+}
+
+#endif
+
diff --git a/drivers/gpu/drm/i915/i915_gem_render_state.c b/drivers/gpu/drm/i915/i915_gem_render_state.c
index 95b7e9afd5f8..5af19b0bf713 100644
--- a/drivers/gpu/drm/i915/i915_gem_render_state.c
+++ b/drivers/gpu/drm/i915/i915_gem_render_state.c
@@ -28,17 +28,19 @@
#include "i915_drv.h"
#include "intel_renderstate.h"
-struct render_state {
+struct intel_render_state {
const struct intel_renderstate_rodata *rodata;
struct i915_vma *vma;
- u32 aux_batch_size;
- u32 aux_batch_offset;
+ u32 batch_offset;
+ u32 batch_size;
+ u32 aux_offset;
+ u32 aux_size;
};
static const struct intel_renderstate_rodata *
-render_state_get_rodata(const struct drm_i915_gem_request *req)
+render_state_get_rodata(const struct intel_engine_cs *engine)
{
- switch (INTEL_GEN(req->i915)) {
+ switch (INTEL_GEN(engine->i915)) {
case 6:
return &gen6_null_state;
case 7:
@@ -63,29 +65,26 @@ render_state_get_rodata(const struct drm_i915_gem_request *req)
*/
#define OUT_BATCH(batch, i, val) \
do { \
- if (WARN_ON((i) >= PAGE_SIZE / sizeof(u32))) { \
- ret = -ENOSPC; \
- goto err_out; \
- } \
+ if ((i) >= PAGE_SIZE / sizeof(u32)) \
+ goto err; \
(batch)[(i)++] = (val); \
} while(0)
-static int render_state_setup(struct render_state *so)
+static int render_state_setup(struct intel_render_state *so,
+ struct drm_i915_private *i915)
{
- struct drm_device *dev = so->vma->vm->dev;
const struct intel_renderstate_rodata *rodata = so->rodata;
- const bool has_64bit_reloc = INTEL_GEN(dev) >= 8;
+ struct drm_i915_gem_object *obj = so->vma->obj;
unsigned int i = 0, reloc_index = 0;
- struct page *page;
+ unsigned int needs_clflush;
u32 *d;
int ret;
- ret = i915_gem_object_set_to_cpu_domain(so->vma->obj, true);
+ ret = i915_gem_obj_prepare_shmem_write(obj, &needs_clflush);
if (ret)
return ret;
- page = i915_gem_object_get_dirty_page(so->vma->obj, 0);
- d = kmap(page);
+ d = kmap_atomic(i915_gem_object_get_dirty_page(obj, 0));
while (i < rodata->batch_items) {
u32 s = rodata->batch[i];
@@ -93,12 +92,10 @@ static int render_state_setup(struct render_state *so)
if (i * 4 == rodata->reloc[reloc_index]) {
u64 r = s + so->vma->node.start;
s = lower_32_bits(r);
- if (has_64bit_reloc) {
+ if (HAS_64BIT_RELOC(i915)) {
if (i + 1 >= rodata->batch_items ||
- rodata->batch[i + 1] != 0) {
- ret = -EINVAL;
- goto err_out;
- }
+ rodata->batch[i + 1] != 0)
+ goto err;
d[i++] = s;
s = upper_32_bits(r);
@@ -110,12 +107,20 @@ static int render_state_setup(struct render_state *so)
d[i++] = s;
}
+ if (rodata->reloc[reloc_index] != -1) {
+ DRM_ERROR("only %d relocs resolved\n", reloc_index);
+ goto err;
+ }
+
+ so->batch_offset = so->vma->node.start;
+ so->batch_size = rodata->batch_items * sizeof(u32);
+
while (i % CACHELINE_DWORDS)
OUT_BATCH(d, i, MI_NOOP);
- so->aux_batch_offset = i * sizeof(u32);
+ so->aux_offset = i * sizeof(u32);
- if (HAS_POOLED_EU(dev)) {
+ if (HAS_POOLED_EU(i915)) {
/*
* We always program 3x6 pool config but depending upon which
* subslice is disabled HW drops down to appropriate config
@@ -143,88 +148,133 @@ static int render_state_setup(struct render_state *so)
}
OUT_BATCH(d, i, MI_BATCH_BUFFER_END);
- so->aux_batch_size = (i * sizeof(u32)) - so->aux_batch_offset;
-
+ so->aux_size = i * sizeof(u32) - so->aux_offset;
+ so->aux_offset += so->batch_offset;
/*
* Since we are sending length, we need to strictly conform to
* all requirements. For Gen2 this must be a multiple of 8.
*/
- so->aux_batch_size = ALIGN(so->aux_batch_size, 8);
-
- kunmap(page);
-
- ret = i915_gem_object_set_to_gtt_domain(so->vma->obj, false);
- if (ret)
- return ret;
-
- if (rodata->reloc[reloc_index] != -1) {
- DRM_ERROR("only %d relocs resolved\n", reloc_index);
- return -EINVAL;
- }
+ so->aux_size = ALIGN(so->aux_size, 8);
- return 0;
+ if (needs_clflush)
+ drm_clflush_virt_range(d, i * sizeof(u32));
+ kunmap_atomic(d);
-err_out:
- kunmap(page);
+ ret = i915_gem_object_set_to_gtt_domain(obj, false);
+out:
+ i915_gem_obj_finish_shmem_access(obj);
return ret;
+
+err:
+ kunmap_atomic(d);
+ ret = -EINVAL;
+ goto out;
}
#undef OUT_BATCH
-int i915_gem_render_state_init(struct drm_i915_gem_request *req)
+int i915_gem_render_state_init(struct intel_engine_cs *engine)
{
- struct render_state so;
+ struct intel_render_state *so;
+ const struct intel_renderstate_rodata *rodata;
struct drm_i915_gem_object *obj;
int ret;
- if (WARN_ON(req->engine->id != RCS))
- return -ENOENT;
+ if (engine->id != RCS)
+ return 0;
- so.rodata = render_state_get_rodata(req);
- if (!so.rodata)
+ rodata = render_state_get_rodata(engine);
+ if (!rodata)
return 0;
- if (so.rodata->batch_items * 4 > 4096)
+ if (rodata->batch_items * 4 > 4096)
return -EINVAL;
- obj = i915_gem_object_create(&req->i915->drm, 4096);
- if (IS_ERR(obj))
- return PTR_ERR(obj);
+ so = kmalloc(sizeof(*so), GFP_KERNEL);
+ if (!so)
+ return -ENOMEM;
- so.vma = i915_vma_create(obj, &req->i915->ggtt.base, NULL);
- if (IS_ERR(so.vma)) {
- ret = PTR_ERR(so.vma);
- goto err_obj;
+ obj = i915_gem_object_create_internal(engine->i915, 4096);
+ if (IS_ERR(obj)) {
+ ret = PTR_ERR(obj);
+ goto err_free;
}
- ret = i915_vma_pin(so.vma, 0, 0, PIN_GLOBAL);
- if (ret)
+ so->vma = i915_vma_create(obj, &engine->i915->ggtt.base, NULL);
+ if (IS_ERR(so->vma)) {
+ ret = PTR_ERR(so->vma);
goto err_obj;
+ }
+
+ so->rodata = rodata;
+ engine->render_state = so;
+ return 0;
+
+err_obj:
+ i915_gem_object_put(obj);
+err_free:
+ kfree(so);
+ return ret;
+}
+
+int i915_gem_render_state_emit(struct drm_i915_gem_request *req)
+{
+ struct intel_render_state *so;
+ int ret;
+
+ lockdep_assert_held(&req->i915->drm.struct_mutex);
- ret = render_state_setup(&so);
+ so = req->engine->render_state;
+ if (!so)
+ return 0;
+
+ /* Recreate the page after shrinking */
+ if (!so->vma->obj->mm.pages)
+ so->batch_offset = -1;
+
+ ret = i915_vma_pin(so->vma, 0, 0, PIN_GLOBAL | PIN_HIGH);
if (ret)
- goto err_unpin;
+ return ret;
+
+ if (so->vma->node.start != so->batch_offset) {
+ ret = render_state_setup(so, req->i915);
+ if (ret)
+ goto err_unpin;
+ }
- ret = req->engine->emit_bb_start(req, so.vma->node.start,
- so.rodata->batch_items * 4,
+ ret = req->engine->emit_bb_start(req,
+ so->batch_offset, so->batch_size,
I915_DISPATCH_SECURE);
if (ret)
goto err_unpin;
- if (so.aux_batch_size > 8) {
+ if (so->aux_size > 8) {
ret = req->engine->emit_bb_start(req,
- (so.vma->node.start +
- so.aux_batch_offset),
- so.aux_batch_size,
+ so->aux_offset, so->aux_size,
I915_DISPATCH_SECURE);
if (ret)
goto err_unpin;
}
- i915_vma_move_to_active(so.vma, req, 0);
+ i915_vma_move_to_active(so->vma, req, 0);
err_unpin:
- i915_vma_unpin(so.vma);
-err_obj:
- i915_gem_object_put(obj);
+ i915_vma_unpin(so->vma);
return ret;
}
+
+void i915_gem_render_state_fini(struct intel_engine_cs *engine)
+{
+ struct intel_render_state *so;
+ struct drm_i915_gem_object *obj;
+
+ so = fetch_and_zero(&engine->render_state);
+ if (!so)
+ return;
+
+ obj = so->vma->obj;
+
+ i915_vma_close(so->vma);
+ __i915_gem_object_release_unless_active(obj);
+
+ kfree(so);
+}
diff --git a/drivers/gpu/drm/i915/i915_gem_render_state.h b/drivers/gpu/drm/i915/i915_gem_render_state.h
index 18cce3f06e9c..87481845799d 100644
--- a/drivers/gpu/drm/i915/i915_gem_render_state.h
+++ b/drivers/gpu/drm/i915/i915_gem_render_state.h
@@ -26,6 +26,8 @@
struct drm_i915_gem_request;
-int i915_gem_render_state_init(struct drm_i915_gem_request *req);
+int i915_gem_render_state_init(struct intel_engine_cs *engine);
+int i915_gem_render_state_emit(struct drm_i915_gem_request *req);
+void i915_gem_render_state_fini(struct intel_engine_cs *engine);
#endif /* _I915_GEM_RENDER_STATE_H_ */
diff --git a/drivers/gpu/drm/i915/i915_gem_request.c b/drivers/gpu/drm/i915/i915_gem_request.c
index 8832f8ec1583..27e8f257fb39 100644
--- a/drivers/gpu/drm/i915/i915_gem_request.c
+++ b/drivers/gpu/drm/i915/i915_gem_request.c
@@ -23,31 +23,26 @@
*/
#include <linux/prefetch.h>
+#include <linux/dma-fence-array.h>
#include "i915_drv.h"
-static const char *i915_fence_get_driver_name(struct fence *fence)
+static const char *i915_fence_get_driver_name(struct dma_fence *fence)
{
return "i915";
}
-static const char *i915_fence_get_timeline_name(struct fence *fence)
+static const char *i915_fence_get_timeline_name(struct dma_fence *fence)
{
- /* Timelines are bound by eviction to a VM. However, since
- * we only have a global seqno at the moment, we only have
- * a single timeline. Note that each timeline will have
- * multiple execution contexts (fence contexts) as we allow
- * engines within a single timeline to execute in parallel.
- */
- return "global";
+ return to_request(fence)->timeline->common->name;
}
-static bool i915_fence_signaled(struct fence *fence)
+static bool i915_fence_signaled(struct dma_fence *fence)
{
return i915_gem_request_completed(to_request(fence));
}
-static bool i915_fence_enable_signaling(struct fence *fence)
+static bool i915_fence_enable_signaling(struct dma_fence *fence)
{
if (i915_fence_signaled(fence))
return false;
@@ -56,63 +51,27 @@ static bool i915_fence_enable_signaling(struct fence *fence)
return true;
}
-static signed long i915_fence_wait(struct fence *fence,
+static signed long i915_fence_wait(struct dma_fence *fence,
bool interruptible,
- signed long timeout_jiffies)
-{
- s64 timeout_ns, *timeout;
- int ret;
-
- if (timeout_jiffies != MAX_SCHEDULE_TIMEOUT) {
- timeout_ns = jiffies_to_nsecs(timeout_jiffies);
- timeout = &timeout_ns;
- } else {
- timeout = NULL;
- }
-
- ret = i915_wait_request(to_request(fence),
- interruptible, timeout,
- NO_WAITBOOST);
- if (ret == -ETIME)
- return 0;
-
- if (ret < 0)
- return ret;
-
- if (timeout_jiffies != MAX_SCHEDULE_TIMEOUT)
- timeout_jiffies = nsecs_to_jiffies(timeout_ns);
-
- return timeout_jiffies;
-}
-
-static void i915_fence_value_str(struct fence *fence, char *str, int size)
+ signed long timeout)
{
- snprintf(str, size, "%u", fence->seqno);
+ return i915_wait_request(to_request(fence), interruptible, timeout);
}
-static void i915_fence_timeline_value_str(struct fence *fence, char *str,
- int size)
-{
- snprintf(str, size, "%u",
- intel_engine_get_seqno(to_request(fence)->engine));
-}
-
-static void i915_fence_release(struct fence *fence)
+static void i915_fence_release(struct dma_fence *fence)
{
struct drm_i915_gem_request *req = to_request(fence);
kmem_cache_free(req->i915->requests, req);
}
-const struct fence_ops i915_fence_ops = {
+const struct dma_fence_ops i915_fence_ops = {
.get_driver_name = i915_fence_get_driver_name,
.get_timeline_name = i915_fence_get_timeline_name,
.enable_signaling = i915_fence_enable_signaling,
.signaled = i915_fence_signaled,
.wait = i915_fence_wait,
.release = i915_fence_release,
- .fence_value_str = i915_fence_value_str,
- .timeline_value_str = i915_fence_timeline_value_str,
};
int i915_gem_request_add_to_client(struct drm_i915_gem_request *req,
@@ -154,6 +113,82 @@ i915_gem_request_remove_from_client(struct drm_i915_gem_request *request)
spin_unlock(&file_priv->mm.lock);
}
+static struct i915_dependency *
+i915_dependency_alloc(struct drm_i915_private *i915)
+{
+ return kmem_cache_alloc(i915->dependencies, GFP_KERNEL);
+}
+
+static void
+i915_dependency_free(struct drm_i915_private *i915,
+ struct i915_dependency *dep)
+{
+ kmem_cache_free(i915->dependencies, dep);
+}
+
+static void
+__i915_priotree_add_dependency(struct i915_priotree *pt,
+ struct i915_priotree *signal,
+ struct i915_dependency *dep,
+ unsigned long flags)
+{
+ INIT_LIST_HEAD(&dep->dfs_link);
+ list_add(&dep->wait_link, &signal->waiters_list);
+ list_add(&dep->signal_link, &pt->signalers_list);
+ dep->signaler = signal;
+ dep->flags = flags;
+}
+
+static int
+i915_priotree_add_dependency(struct drm_i915_private *i915,
+ struct i915_priotree *pt,
+ struct i915_priotree *signal)
+{
+ struct i915_dependency *dep;
+
+ dep = i915_dependency_alloc(i915);
+ if (!dep)
+ return -ENOMEM;
+
+ __i915_priotree_add_dependency(pt, signal, dep, I915_DEPENDENCY_ALLOC);
+ return 0;
+}
+
+static void
+i915_priotree_fini(struct drm_i915_private *i915, struct i915_priotree *pt)
+{
+ struct i915_dependency *dep, *next;
+
+ GEM_BUG_ON(!RB_EMPTY_NODE(&pt->node));
+
+ /* Everyone we depended upon (the fences we wait to be signaled)
+ * should retire before us and remove themselves from our list.
+ * However, retirement is run independently on each timeline and
+ * so we may be called out-of-order.
+ */
+ list_for_each_entry_safe(dep, next, &pt->signalers_list, signal_link) {
+ list_del(&dep->wait_link);
+ if (dep->flags & I915_DEPENDENCY_ALLOC)
+ i915_dependency_free(i915, dep);
+ }
+
+ /* Remove ourselves from everyone who depends upon us */
+ list_for_each_entry_safe(dep, next, &pt->waiters_list, wait_link) {
+ list_del(&dep->signal_link);
+ if (dep->flags & I915_DEPENDENCY_ALLOC)
+ i915_dependency_free(i915, dep);
+ }
+}
+
+static void
+i915_priotree_init(struct i915_priotree *pt)
+{
+ INIT_LIST_HEAD(&pt->signalers_list);
+ INIT_LIST_HEAD(&pt->waiters_list);
+ RB_CLEAR_NODE(&pt->node);
+ pt->priority = INT_MIN;
+}
+
void i915_gem_retire_noop(struct i915_gem_active *active,
struct drm_i915_gem_request *request)
{
@@ -164,8 +199,17 @@ static void i915_gem_request_retire(struct drm_i915_gem_request *request)
{
struct i915_gem_active *active, *next;
+ lockdep_assert_held(&request->i915->drm.struct_mutex);
+ GEM_BUG_ON(!i915_sw_fence_done(&request->submit));
+ GEM_BUG_ON(!i915_sw_fence_done(&request->execute));
+ GEM_BUG_ON(!i915_gem_request_completed(request));
+ GEM_BUG_ON(!request->i915->gt.active_requests);
+
trace_i915_gem_request_retire(request);
- list_del(&request->link);
+
+ spin_lock_irq(&request->engine->timeline->lock);
+ list_del_init(&request->link);
+ spin_unlock_irq(&request->engine->timeline->lock);
/* We know the GPU must have read the request to have
* sent us the seqno + interrupt, so use the position
@@ -177,6 +221,12 @@ static void i915_gem_request_retire(struct drm_i915_gem_request *request)
*/
list_del(&request->ring_link);
request->ring->last_retired_head = request->postfix;
+ if (!--request->i915->gt.active_requests) {
+ GEM_BUG_ON(!request->i915->gt.awake);
+ mod_delayed_work(request->i915->wq,
+ &request->i915->gt.idle_work,
+ msecs_to_jiffies(100));
+ }
/* Walk through the active list, calling retire on each. This allows
* objects to track their GPU activity and mark themselves as idle
@@ -214,6 +264,10 @@ static void i915_gem_request_retire(struct drm_i915_gem_request *request)
}
i915_gem_context_put(request->ctx);
+
+ dma_fence_signal(&request->fence);
+
+ i915_priotree_fini(request->i915, &request->priotree);
i915_gem_request_put(request);
}
@@ -223,10 +277,11 @@ void i915_gem_request_retire_upto(struct drm_i915_gem_request *req)
struct drm_i915_gem_request *tmp;
lockdep_assert_held(&req->i915->drm.struct_mutex);
- GEM_BUG_ON(list_empty(&req->link));
+ if (list_empty(&req->link))
+ return;
do {
- tmp = list_first_entry(&engine->request_list,
+ tmp = list_first_entry(&engine->timeline->requests,
typeof(*tmp), link);
i915_gem_request_retire(tmp);
@@ -253,39 +308,50 @@ static int i915_gem_check_wedge(struct drm_i915_private *dev_priv)
return 0;
}
-static int i915_gem_init_seqno(struct drm_i915_private *dev_priv, u32 seqno)
+static int i915_gem_init_global_seqno(struct drm_i915_private *i915, u32 seqno)
{
+ struct i915_gem_timeline *timeline = &i915->gt.global_timeline;
struct intel_engine_cs *engine;
+ enum intel_engine_id id;
int ret;
/* Carefully retire all requests without writing to the rings */
- for_each_engine(engine, dev_priv) {
- ret = intel_engine_idle(engine,
- I915_WAIT_INTERRUPTIBLE |
- I915_WAIT_LOCKED);
- if (ret)
- return ret;
- }
- i915_gem_retire_requests(dev_priv);
+ ret = i915_gem_wait_for_idle(i915,
+ I915_WAIT_INTERRUPTIBLE |
+ I915_WAIT_LOCKED);
+ if (ret)
+ return ret;
+
+ i915_gem_retire_requests(i915);
+ GEM_BUG_ON(i915->gt.active_requests > 1);
/* If the seqno wraps around, we need to clear the breadcrumb rbtree */
- if (!i915_seqno_passed(seqno, dev_priv->next_seqno)) {
- while (intel_kick_waiters(dev_priv) ||
- intel_kick_signalers(dev_priv))
- yield();
+ if (!i915_seqno_passed(seqno, atomic_read(&timeline->next_seqno))) {
+ while (intel_breadcrumbs_busy(i915))
+ cond_resched(); /* spin until threads are complete */
}
+ atomic_set(&timeline->next_seqno, seqno);
/* Finally reset hw state */
- for_each_engine(engine, dev_priv)
- intel_engine_init_seqno(engine, seqno);
+ for_each_engine(engine, i915, id)
+ intel_engine_init_global_seqno(engine, seqno);
+
+ list_for_each_entry(timeline, &i915->gt.timelines, link) {
+ for_each_engine(engine, i915, id) {
+ struct intel_timeline *tl = &timeline->engine[id];
+
+ memset(tl->sync_seqno, 0, sizeof(tl->sync_seqno));
+ }
+ }
return 0;
}
-int i915_gem_set_seqno(struct drm_device *dev, u32 seqno)
+int i915_gem_set_global_seqno(struct drm_device *dev, u32 seqno)
{
struct drm_i915_private *dev_priv = to_i915(dev);
- int ret;
+
+ lockdep_assert_held(&dev_priv->drm.struct_mutex);
if (seqno == 0)
return -EINVAL;
@@ -293,52 +359,108 @@ int i915_gem_set_seqno(struct drm_device *dev, u32 seqno)
/* HWS page needs to be set less than what we
* will inject to ring
*/
- ret = i915_gem_init_seqno(dev_priv, seqno - 1);
- if (ret)
+ return i915_gem_init_global_seqno(dev_priv, seqno - 1);
+}
+
+static int reserve_global_seqno(struct drm_i915_private *i915)
+{
+ u32 active_requests = ++i915->gt.active_requests;
+ u32 next_seqno = atomic_read(&i915->gt.global_timeline.next_seqno);
+ int ret;
+
+ /* Reservation is fine until we need to wrap around */
+ if (likely(next_seqno + active_requests > next_seqno))
+ return 0;
+
+ ret = i915_gem_init_global_seqno(i915, 0);
+ if (ret) {
+ i915->gt.active_requests--;
return ret;
+ }
- dev_priv->next_seqno = seqno;
return 0;
}
-static int i915_gem_get_seqno(struct drm_i915_private *dev_priv, u32 *seqno)
+static u32 __timeline_get_seqno(struct i915_gem_timeline *tl)
{
- /* reserve 0 for non-seqno */
- if (unlikely(dev_priv->next_seqno == 0)) {
- int ret;
+ /* next_seqno only incremented under a mutex */
+ return ++tl->next_seqno.counter;
+}
- ret = i915_gem_init_seqno(dev_priv, 0);
- if (ret)
- return ret;
+static u32 timeline_get_seqno(struct i915_gem_timeline *tl)
+{
+ return atomic_inc_return(&tl->next_seqno);
+}
- dev_priv->next_seqno = 1;
- }
+void __i915_gem_request_submit(struct drm_i915_gem_request *request)
+{
+ struct intel_engine_cs *engine = request->engine;
+ struct intel_timeline *timeline;
+ u32 seqno;
- *seqno = dev_priv->next_seqno++;
- return 0;
+ /* Transfer from per-context onto the global per-engine timeline */
+ timeline = engine->timeline;
+ GEM_BUG_ON(timeline == request->timeline);
+ assert_spin_locked(&timeline->lock);
+
+ seqno = timeline_get_seqno(timeline->common);
+ GEM_BUG_ON(!seqno);
+ GEM_BUG_ON(i915_seqno_passed(intel_engine_get_seqno(engine), seqno));
+
+ GEM_BUG_ON(i915_seqno_passed(timeline->last_submitted_seqno, seqno));
+ request->previous_seqno = timeline->last_submitted_seqno;
+ timeline->last_submitted_seqno = seqno;
+
+ /* We may be recursing from the signal callback of another i915 fence */
+ spin_lock_nested(&request->lock, SINGLE_DEPTH_NESTING);
+ request->global_seqno = seqno;
+ if (test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &request->fence.flags))
+ intel_engine_enable_signaling(request);
+ spin_unlock(&request->lock);
+
+ GEM_BUG_ON(!request->global_seqno);
+ engine->emit_breadcrumb(request,
+ request->ring->vaddr + request->postfix);
+
+ spin_lock(&request->timeline->lock);
+ list_move_tail(&request->link, &timeline->requests);
+ spin_unlock(&request->timeline->lock);
+
+ i915_sw_fence_commit(&request->execute);
+}
+
+void i915_gem_request_submit(struct drm_i915_gem_request *request)
+{
+ struct intel_engine_cs *engine = request->engine;
+ unsigned long flags;
+
+ /* Will be called from irq-context when using foreign fences. */
+ spin_lock_irqsave(&engine->timeline->lock, flags);
+
+ __i915_gem_request_submit(request);
+
+ spin_unlock_irqrestore(&engine->timeline->lock, flags);
}
static int __i915_sw_fence_call
submit_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state)
{
- struct drm_i915_gem_request *request =
- container_of(fence, typeof(*request), submit);
-
- /* Will be called from irq-context when using foreign DMA fences */
+ if (state == FENCE_COMPLETE) {
+ struct drm_i915_gem_request *request =
+ container_of(fence, typeof(*request), submit);
- switch (state) {
- case FENCE_COMPLETE:
- request->engine->last_submitted_seqno = request->fence.seqno;
request->engine->submit_request(request);
- break;
-
- case FENCE_FREE:
- break;
}
return NOTIFY_DONE;
}
+static int __i915_sw_fence_call
+execute_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state)
+{
+ return NOTIFY_DONE;
+}
+
/**
* i915_gem_request_alloc - allocate a request structure
*
@@ -357,9 +479,10 @@ i915_gem_request_alloc(struct intel_engine_cs *engine,
{
struct drm_i915_private *dev_priv = engine->i915;
struct drm_i915_gem_request *req;
- u32 seqno;
int ret;
+ lockdep_assert_held(&dev_priv->drm.struct_mutex);
+
/* ABI: Before userspace accesses the GPU (e.g. execbuffer), report
* EIO if the GPU is already wedged, or EAGAIN to drop the struct_mutex
* and restart.
@@ -368,10 +491,14 @@ i915_gem_request_alloc(struct intel_engine_cs *engine,
if (ret)
return ERR_PTR(ret);
+ ret = reserve_global_seqno(dev_priv);
+ if (ret)
+ return ERR_PTR(ret);
+
/* Move the oldest request to the slab-cache (if not in use!) */
- req = list_first_entry_or_null(&engine->request_list,
+ req = list_first_entry_or_null(&engine->timeline->requests,
typeof(*req), link);
- if (req && i915_gem_request_completed(req))
+ if (req && __i915_gem_request_completed(req))
i915_gem_request_retire(req);
/* Beware: Dragons be flying overhead.
@@ -382,13 +509,13 @@ i915_gem_request_alloc(struct intel_engine_cs *engine,
* of being read by __i915_gem_active_get_rcu(). As such,
* we have to be very careful when overwriting the contents. During
* the RCU lookup, we change chase the request->engine pointer,
- * read the request->fence.seqno and increment the reference count.
+ * read the request->global_seqno and increment the reference count.
*
* The reference count is incremented atomically. If it is zero,
* the lookup knows the request is unallocated and complete. Otherwise,
* it is either still in use, or has been reallocated and reset
- * with fence_init(). This increment is safe for release as we check
- * that the request we have a reference to and matches the active
+ * with dma_fence_init(). This increment is safe for release as we
+ * check that the request we have a reference to and matches the active
* request.
*
* Before we increment the refcount, we chase the request->engine
@@ -403,21 +530,30 @@ i915_gem_request_alloc(struct intel_engine_cs *engine,
* Do not use kmem_cache_zalloc() here!
*/
req = kmem_cache_alloc(dev_priv->requests, GFP_KERNEL);
- if (!req)
- return ERR_PTR(-ENOMEM);
+ if (!req) {
+ ret = -ENOMEM;
+ goto err_unreserve;
+ }
- ret = i915_gem_get_seqno(dev_priv, &seqno);
- if (ret)
- goto err;
+ req->timeline = i915_gem_context_lookup_timeline(ctx, engine);
+ GEM_BUG_ON(req->timeline == engine->timeline);
spin_lock_init(&req->lock);
- fence_init(&req->fence,
- &i915_fence_ops,
- &req->lock,
- engine->fence_context,
- seqno);
+ dma_fence_init(&req->fence,
+ &i915_fence_ops,
+ &req->lock,
+ req->timeline->fence_context,
+ __timeline_get_seqno(req->timeline->common));
i915_sw_fence_init(&req->submit, submit_notify);
+ i915_sw_fence_init(&req->execute, execute_notify);
+ /* Ensure that the execute fence completes after the submit fence -
+ * as we complete the execute fence from within the submit fence
+ * callback, its completion would otherwise be visible first.
+ */
+ i915_sw_fence_await_sw_fence(&req->execute, &req->submit, &req->execq);
+
+ i915_priotree_init(&req->priotree);
INIT_LIST_HEAD(&req->active_list);
req->i915 = dev_priv;
@@ -425,6 +561,7 @@ i915_gem_request_alloc(struct intel_engine_cs *engine,
req->ctx = i915_gem_context_get(ctx);
/* No zalloc, must clear what we need by hand */
+ req->global_seqno = 0;
req->previous_context = NULL;
req->file_priv = NULL;
req->batch = NULL;
@@ -437,6 +574,7 @@ i915_gem_request_alloc(struct intel_engine_cs *engine,
* away, e.g. because a GPU scheduler has deferred it.
*/
req->reserved_space = MIN_SPACE_FOR_ADD_REQUEST;
+ GEM_BUG_ON(req->reserved_space < engine->emit_breadcrumb_sz);
if (i915.enable_execlists)
ret = intel_logical_ring_alloc_request_extras(req);
@@ -456,8 +594,9 @@ i915_gem_request_alloc(struct intel_engine_cs *engine,
err_ctx:
i915_gem_context_put(ctx);
-err:
kmem_cache_free(dev_priv->requests, req);
+err_unreserve:
+ dev_priv->gt.active_requests--;
return ERR_PTR(ret);
}
@@ -465,15 +604,36 @@ static int
i915_gem_request_await_request(struct drm_i915_gem_request *to,
struct drm_i915_gem_request *from)
{
- int idx, ret;
+ int ret;
GEM_BUG_ON(to == from);
- if (to->engine == from->engine)
+ if (to->engine->schedule) {
+ ret = i915_priotree_add_dependency(to->i915,
+ &to->priotree,
+ &from->priotree);
+ if (ret < 0)
+ return ret;
+ }
+
+ if (to->timeline == from->timeline)
return 0;
- idx = intel_engine_sync_index(from->engine, to->engine);
- if (from->fence.seqno <= from->engine->semaphore.sync_seqno[idx])
+ if (to->engine == from->engine) {
+ ret = i915_sw_fence_await_sw_fence_gfp(&to->submit,
+ &from->submit,
+ GFP_KERNEL);
+ return ret < 0 ? ret : 0;
+ }
+
+ if (!from->global_seqno) {
+ ret = i915_sw_fence_await_dma_fence(&to->submit,
+ &from->fence, 0,
+ GFP_KERNEL);
+ return ret < 0 ? ret : 0;
+ }
+
+ if (from->global_seqno <= to->timeline->sync_seqno[from->engine->id])
return 0;
trace_i915_gem_ring_sync_to(to, from);
@@ -491,7 +651,54 @@ i915_gem_request_await_request(struct drm_i915_gem_request *to,
return ret;
}
- from->engine->semaphore.sync_seqno[idx] = from->fence.seqno;
+ to->timeline->sync_seqno[from->engine->id] = from->global_seqno;
+ return 0;
+}
+
+int
+i915_gem_request_await_dma_fence(struct drm_i915_gem_request *req,
+ struct dma_fence *fence)
+{
+ struct dma_fence_array *array;
+ int ret;
+ int i;
+
+ if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
+ return 0;
+
+ if (dma_fence_is_i915(fence))
+ return i915_gem_request_await_request(req, to_request(fence));
+
+ if (!dma_fence_is_array(fence)) {
+ ret = i915_sw_fence_await_dma_fence(&req->submit,
+ fence, I915_FENCE_TIMEOUT,
+ GFP_KERNEL);
+ return ret < 0 ? ret : 0;
+ }
+
+ /* Note that if the fence-array was created in signal-on-any mode,
+ * we should *not* decompose it into its individual fences. However,
+ * we don't currently store which mode the fence-array is operating
+ * in. Fortunately, the only user of signal-on-any is private to
+ * amdgpu and we should not see any incoming fence-array from
+ * sync-file being in signal-on-any mode.
+ */
+
+ array = to_dma_fence_array(fence);
+ for (i = 0; i < array->num_fences; i++) {
+ struct dma_fence *child = array->fences[i];
+
+ if (dma_fence_is_i915(child))
+ ret = i915_gem_request_await_request(req,
+ to_request(child));
+ else
+ ret = i915_sw_fence_await_dma_fence(&req->submit,
+ child, I915_FENCE_TIMEOUT,
+ GFP_KERNEL);
+ if (ret < 0)
+ return ret;
+ }
+
return 0;
}
@@ -520,43 +727,52 @@ i915_gem_request_await_object(struct drm_i915_gem_request *to,
struct drm_i915_gem_object *obj,
bool write)
{
- struct i915_gem_active *active;
- unsigned long active_mask;
- int idx;
+ struct dma_fence *excl;
+ int ret = 0;
if (write) {
- active_mask = i915_gem_object_get_active(obj);
- active = obj->last_read;
+ struct dma_fence **shared;
+ unsigned int count, i;
+
+ ret = reservation_object_get_fences_rcu(obj->resv,
+ &excl, &count, &shared);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < count; i++) {
+ ret = i915_gem_request_await_dma_fence(to, shared[i]);
+ if (ret)
+ break;
+
+ dma_fence_put(shared[i]);
+ }
+
+ for (; i < count; i++)
+ dma_fence_put(shared[i]);
+ kfree(shared);
} else {
- active_mask = 1;
- active = &obj->last_write;
+ excl = reservation_object_get_excl_rcu(obj->resv);
}
- for_each_active(active_mask, idx) {
- struct drm_i915_gem_request *request;
- int ret;
+ if (excl) {
+ if (ret == 0)
+ ret = i915_gem_request_await_dma_fence(to, excl);
- request = i915_gem_active_peek(&active[idx],
- &obj->base.dev->struct_mutex);
- if (!request)
- continue;
-
- ret = i915_gem_request_await_request(to, request);
- if (ret)
- return ret;
+ dma_fence_put(excl);
}
- return 0;
+ return ret;
}
static void i915_gem_mark_busy(const struct intel_engine_cs *engine)
{
struct drm_i915_private *dev_priv = engine->i915;
- dev_priv->gt.active_engines |= intel_engine_flag(engine);
if (dev_priv->gt.awake)
return;
+ GEM_BUG_ON(!dev_priv->gt.active_requests);
+
intel_runtime_pm_get_noresume(dev_priv);
dev_priv->gt.awake = true;
@@ -579,11 +795,11 @@ void __i915_add_request(struct drm_i915_gem_request *request, bool flush_caches)
{
struct intel_engine_cs *engine = request->engine;
struct intel_ring *ring = request->ring;
+ struct intel_timeline *timeline = request->timeline;
struct drm_i915_gem_request *prev;
- u32 request_start;
- u32 reserved_tail;
- int ret;
+ int err;
+ lockdep_assert_held(&request->i915->drm.struct_mutex);
trace_i915_gem_request_add(request);
/*
@@ -591,8 +807,6 @@ void __i915_add_request(struct drm_i915_gem_request *request, bool flush_caches)
* should already have been reserved in the ring buffer. Let the ring
* know that it is time to use that space up.
*/
- request_start = ring->tail;
- reserved_tail = request->reserved_space;
request->reserved_space = 0;
/*
@@ -603,10 +817,10 @@ void __i915_add_request(struct drm_i915_gem_request *request, bool flush_caches)
* what.
*/
if (flush_caches) {
- ret = engine->emit_flush(request, EMIT_FLUSH);
+ err = engine->emit_flush(request, EMIT_FLUSH);
/* Not allowed to fail! */
- WARN(ret, "engine->emit_flush() failed: %d!\n", ret);
+ WARN(err, "engine->emit_flush() failed: %d!\n", err);
}
/* Record the position of the start of the breadcrumb so that
@@ -614,20 +828,10 @@ void __i915_add_request(struct drm_i915_gem_request *request, bool flush_caches)
* GPU processing the request, we never over-estimate the
* position of the ring's HEAD.
*/
+ err = intel_ring_begin(request, engine->emit_breadcrumb_sz);
+ GEM_BUG_ON(err);
request->postfix = ring->tail;
-
- /* Not allowed to fail! */
- ret = engine->emit_request(request);
- WARN(ret, "(%s)->emit_request failed: %d!\n", engine->name, ret);
-
- /* Sanity check that the reserved size was large enough. */
- ret = ring->tail - request_start;
- if (ret < 0)
- ret += ring->size;
- WARN_ONCE(ret > reserved_tail,
- "Not enough space reserved (%d bytes) "
- "for adding the request (%d bytes)\n",
- reserved_tail, ret);
+ ring->tail += engine->emit_breadcrumb_sz * sizeof(u32);
/* Seal the request and mark it as pending execution. Note that
* we may inspect this state, without holding any locks, during
@@ -635,21 +839,46 @@ void __i915_add_request(struct drm_i915_gem_request *request, bool flush_caches)
* see a more recent value in the hws than we are tracking.
*/
- prev = i915_gem_active_raw(&engine->last_request,
+ prev = i915_gem_active_raw(&timeline->last_request,
&request->i915->drm.struct_mutex);
- if (prev)
+ if (prev) {
i915_sw_fence_await_sw_fence(&request->submit, &prev->submit,
&request->submitq);
+ if (engine->schedule)
+ __i915_priotree_add_dependency(&request->priotree,
+ &prev->priotree,
+ &request->dep,
+ 0);
+ }
+
+ spin_lock_irq(&timeline->lock);
+ list_add_tail(&request->link, &timeline->requests);
+ spin_unlock_irq(&timeline->lock);
+
+ GEM_BUG_ON(i915_seqno_passed(timeline->last_submitted_seqno,
+ request->fence.seqno));
+
+ timeline->last_submitted_seqno = request->fence.seqno;
+ i915_gem_active_set(&timeline->last_request, request);
- request->emitted_jiffies = jiffies;
- request->previous_seqno = engine->last_pending_seqno;
- engine->last_pending_seqno = request->fence.seqno;
- i915_gem_active_set(&engine->last_request, request);
- list_add_tail(&request->link, &engine->request_list);
list_add_tail(&request->ring_link, &ring->request_list);
+ request->emitted_jiffies = jiffies;
i915_gem_mark_busy(engine);
+ /* Let the backend know a new request has arrived that may need
+ * to adjust the existing execution schedule due to a high priority
+ * request - i.e. we may want to preempt the current request in order
+ * to run a high priority dependency chain *before* we can execute this
+ * request.
+ *
+ * This is called before the request is ready to run so that we can
+ * decide whether to preempt the entire chain so that it is ready to
+ * run at the earliest possible convenience.
+ */
+ if (engine->schedule)
+ engine->schedule(request, request->ctx->priority);
+
local_bh_disable();
i915_sw_fence_commit(&request->submit);
local_bh_enable(); /* Kick the execlists tasklet if just scheduled */
@@ -714,7 +943,7 @@ bool __i915_spin_request(const struct drm_i915_gem_request *req,
timeout_us += local_clock_us(&cpu);
do {
- if (i915_gem_request_completed(req))
+ if (__i915_gem_request_completed(req))
return true;
if (signal_pending_state(state, current))
@@ -729,76 +958,102 @@ bool __i915_spin_request(const struct drm_i915_gem_request *req,
return false;
}
+static long
+__i915_request_wait_for_execute(struct drm_i915_gem_request *request,
+ unsigned int flags,
+ long timeout)
+{
+ const int state = flags & I915_WAIT_INTERRUPTIBLE ?
+ TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE;
+ wait_queue_head_t *q = &request->i915->gpu_error.wait_queue;
+ DEFINE_WAIT(reset);
+ DEFINE_WAIT(wait);
+
+ if (flags & I915_WAIT_LOCKED)
+ add_wait_queue(q, &reset);
+
+ do {
+ prepare_to_wait(&request->execute.wait, &wait, state);
+
+ if (i915_sw_fence_done(&request->execute))
+ break;
+
+ if (flags & I915_WAIT_LOCKED &&
+ i915_reset_in_progress(&request->i915->gpu_error)) {
+ __set_current_state(TASK_RUNNING);
+ i915_reset(request->i915);
+ reset_wait_queue(q, &reset);
+ continue;
+ }
+
+ if (signal_pending_state(state, current)) {
+ timeout = -ERESTARTSYS;
+ break;
+ }
+
+ timeout = io_schedule_timeout(timeout);
+ } while (timeout);
+ finish_wait(&request->execute.wait, &wait);
+
+ if (flags & I915_WAIT_LOCKED)
+ remove_wait_queue(q, &reset);
+
+ return timeout;
+}
+
/**
* i915_wait_request - wait until execution of request has finished
- * @req: duh!
+ * @req: the request to wait upon
* @flags: how to wait
- * @timeout: in - how long to wait (NULL forever); out - how much time remaining
- * @rps: client to charge for RPS boosting
+ * @timeout: how long to wait in jiffies
*
- * Note: It is of utmost importance that the passed in seqno and reset_counter
- * values have been read by the caller in an smp safe manner. Where read-side
- * locks are involved, it is sufficient to read the reset_counter before
- * unlocking the lock that protects the seqno. For lockless tricks, the
- * reset_counter _must_ be read before, and an appropriate smp_rmb must be
- * inserted.
+ * i915_wait_request() waits for the request to be completed, for a
+ * maximum of @timeout jiffies (with MAX_SCHEDULE_TIMEOUT implying an
+ * unbounded wait).
*
- * Returns 0 if the request was found within the alloted time. Else returns the
- * errno with remaining time filled in timeout argument.
+ * If the caller holds the struct_mutex, the caller must pass I915_WAIT_LOCKED
+ * in via the flags, and vice versa if the struct_mutex is not held, the caller
+ * must not specify that the wait is locked.
+ *
+ * Returns the remaining time (in jiffies) if the request completed, which may
+ * be zero or -ETIME if the request is unfinished after the timeout expires.
+ * May return -EINTR is called with I915_WAIT_INTERRUPTIBLE and a signal is
+ * pending before the request completes.
*/
-int i915_wait_request(struct drm_i915_gem_request *req,
- unsigned int flags,
- s64 *timeout,
- struct intel_rps_client *rps)
+long i915_wait_request(struct drm_i915_gem_request *req,
+ unsigned int flags,
+ long timeout)
{
const int state = flags & I915_WAIT_INTERRUPTIBLE ?
TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE;
DEFINE_WAIT(reset);
struct intel_wait wait;
- unsigned long timeout_remain;
- int ret = 0;
might_sleep();
#if IS_ENABLED(CONFIG_LOCKDEP)
- GEM_BUG_ON(!!lockdep_is_held(&req->i915->drm.struct_mutex) !=
+ GEM_BUG_ON(debug_locks &&
+ !!lockdep_is_held(&req->i915->drm.struct_mutex) !=
!!(flags & I915_WAIT_LOCKED));
#endif
+ GEM_BUG_ON(timeout < 0);
if (i915_gem_request_completed(req))
- return 0;
+ return timeout;
- timeout_remain = MAX_SCHEDULE_TIMEOUT;
- if (timeout) {
- if (WARN_ON(*timeout < 0))
- return -EINVAL;
-
- if (*timeout == 0)
- return -ETIME;
-
- /* Record current time in case interrupted, or wedged */
- timeout_remain = nsecs_to_jiffies_timeout(*timeout);
- *timeout += ktime_get_raw_ns();
- }
+ if (!timeout)
+ return -ETIME;
trace_i915_gem_request_wait_begin(req);
- /* This client is about to stall waiting for the GPU. In many cases
- * this is undesirable and limits the throughput of the system, as
- * many clients cannot continue processing user input/output whilst
- * blocked. RPS autotuning may take tens of milliseconds to respond
- * to the GPU load and thus incurs additional latency for the client.
- * We can circumvent that by promoting the GPU frequency to maximum
- * before we wait. This makes the GPU throttle up much more quickly
- * (good for benchmarks and user experience, e.g. window animations),
- * but at a cost of spending more power processing the workload
- * (bad for battery). Not all clients even want their results
- * immediately and for them we should just let the GPU select its own
- * frequency to maximise efficiency. To prevent a single client from
- * forcing the clocks too high for the whole system, we only allow
- * each client to waitboost once in a busy period.
- */
- if (IS_RPS_CLIENT(rps) && INTEL_GEN(req->i915) >= 6)
- gen6_rps_boost(req->i915, rps, req->emitted_jiffies);
+ if (!i915_sw_fence_done(&req->execute)) {
+ timeout = __i915_request_wait_for_execute(req, flags, timeout);
+ if (timeout < 0)
+ goto complete;
+
+ GEM_BUG_ON(!i915_sw_fence_done(&req->execute));
+ }
+ GEM_BUG_ON(!i915_sw_fence_done(&req->submit));
+ GEM_BUG_ON(!req->global_seqno);
/* Optimistic short spin before touching IRQs */
if (i915_spin_request(req, state, 5))
@@ -808,7 +1063,7 @@ int i915_wait_request(struct drm_i915_gem_request *req,
if (flags & I915_WAIT_LOCKED)
add_wait_queue(&req->i915->gpu_error.wait_queue, &reset);
- intel_wait_init(&wait, req->fence.seqno);
+ intel_wait_init(&wait, req->global_seqno);
if (intel_engine_add_wait(req->engine, &wait))
/* In order to check that we haven't missed the interrupt
* as we enabled it, we need to kick ourselves to do a
@@ -818,16 +1073,17 @@ int i915_wait_request(struct drm_i915_gem_request *req,
for (;;) {
if (signal_pending_state(state, current)) {
- ret = -ERESTARTSYS;
+ timeout = -ERESTARTSYS;
break;
}
- timeout_remain = io_schedule_timeout(timeout_remain);
- if (timeout_remain == 0) {
- ret = -ETIME;
+ if (!timeout) {
+ timeout = -ETIME;
break;
}
+ timeout = io_schedule_timeout(timeout);
+
if (intel_wait_complete(&wait))
break;
@@ -874,74 +1130,32 @@ wakeup:
complete:
trace_i915_gem_request_wait_end(req);
- if (timeout) {
- *timeout -= ktime_get_raw_ns();
- if (*timeout < 0)
- *timeout = 0;
-
- /*
- * Apparently ktime isn't accurate enough and occasionally has a
- * bit of mismatch in the jiffies<->nsecs<->ktime loop. So patch
- * things up to make the test happy. We allow up to 1 jiffy.
- *
- * This is a regrssion from the timespec->ktime conversion.
- */
- if (ret == -ETIME && *timeout < jiffies_to_usecs(1)*1000)
- *timeout = 0;
- }
-
- if (IS_RPS_USER(rps) &&
- req->fence.seqno == req->engine->last_submitted_seqno) {
- /* The GPU is now idle and this client has stalled.
- * Since no other client has submitted a request in the
- * meantime, assume that this client is the only one
- * supplying work to the GPU but is unable to keep that
- * work supplied because it is waiting. Since the GPU is
- * then never kept fully busy, RPS autoclocking will
- * keep the clocks relatively low, causing further delays.
- * Compensate by giving the synchronous client credit for
- * a waitboost next time.
- */
- spin_lock(&req->i915->rps.client_lock);
- list_del_init(&rps->link);
- spin_unlock(&req->i915->rps.client_lock);
- }
-
- return ret;
+ return timeout;
}
-static bool engine_retire_requests(struct intel_engine_cs *engine)
+static void engine_retire_requests(struct intel_engine_cs *engine)
{
struct drm_i915_gem_request *request, *next;
- list_for_each_entry_safe(request, next, &engine->request_list, link) {
- if (!i915_gem_request_completed(request))
- return false;
+ list_for_each_entry_safe(request, next,
+ &engine->timeline->requests, link) {
+ if (!__i915_gem_request_completed(request))
+ return;
i915_gem_request_retire(request);
}
-
- return true;
}
void i915_gem_retire_requests(struct drm_i915_private *dev_priv)
{
struct intel_engine_cs *engine;
- unsigned int tmp;
+ enum intel_engine_id id;
lockdep_assert_held(&dev_priv->drm.struct_mutex);
- if (dev_priv->gt.active_engines == 0)
+ if (!dev_priv->gt.active_requests)
return;
- GEM_BUG_ON(!dev_priv->gt.awake);
-
- for_each_engine_masked(engine, dev_priv, dev_priv->gt.active_engines, tmp)
- if (engine_retire_requests(engine))
- dev_priv->gt.active_engines &= ~intel_engine_flag(engine);
-
- if (dev_priv->gt.active_engines == 0)
- queue_delayed_work(dev_priv->wq,
- &dev_priv->gt.idle_work,
- msecs_to_jiffies(100));
+ for_each_engine(engine, dev_priv, id)
+ engine_retire_requests(engine);
}
diff --git a/drivers/gpu/drm/i915/i915_gem_request.h b/drivers/gpu/drm/i915/i915_gem_request.h
index 974bd7bcc801..e2b077df2da0 100644
--- a/drivers/gpu/drm/i915/i915_gem_request.h
+++ b/drivers/gpu/drm/i915/i915_gem_request.h
@@ -25,11 +25,14 @@
#ifndef I915_GEM_REQUEST_H
#define I915_GEM_REQUEST_H
-#include <linux/fence.h>
+#include <linux/dma-fence.h>
#include "i915_gem.h"
#include "i915_sw_fence.h"
+struct drm_file;
+struct drm_i915_gem_object;
+
struct intel_wait {
struct rb_node node;
struct task_struct *tsk;
@@ -41,6 +44,33 @@ struct intel_signal_node {
struct intel_wait wait;
};
+struct i915_dependency {
+ struct i915_priotree *signaler;
+ struct list_head signal_link;
+ struct list_head wait_link;
+ struct list_head dfs_link;
+ unsigned long flags;
+#define I915_DEPENDENCY_ALLOC BIT(0)
+};
+
+/* Requests exist in a complex web of interdependencies. Each request
+ * has to wait for some other request to complete before it is ready to be run
+ * (e.g. we have to wait until the pixels have been rendering into a texture
+ * before we can copy from it). We track the readiness of a request in terms
+ * of fences, but we also need to keep the dependency tree for the lifetime
+ * of the request (beyond the life of an individual fence). We use the tree
+ * at various points to reorder the requests whilst keeping the requests
+ * in order with respect to their various dependencies.
+ */
+struct i915_priotree {
+ struct list_head signalers_list; /* those before us, we depend upon */
+ struct list_head waiters_list; /* those after us, they depend upon us */
+ struct rb_node node;
+ int priority;
+#define I915_PRIORITY_MAX 1024
+#define I915_PRIORITY_MIN (-I915_PRIORITY_MAX)
+};
+
/**
* Request queue structure.
*
@@ -62,7 +92,7 @@ struct intel_signal_node {
* The requests are reference counted.
*/
struct drm_i915_gem_request {
- struct fence fence;
+ struct dma_fence fence;
spinlock_t lock;
/** On Which ring this request was generated */
@@ -81,10 +111,39 @@ struct drm_i915_gem_request {
struct i915_gem_context *ctx;
struct intel_engine_cs *engine;
struct intel_ring *ring;
+ struct intel_timeline *timeline;
struct intel_signal_node signaling;
+ /* Fences for the various phases in the request's lifetime.
+ *
+ * The submit fence is used to await upon all of the request's
+ * dependencies. When it is signaled, the request is ready to run.
+ * It is used by the driver to then queue the request for execution.
+ *
+ * The execute fence is used to signal when the request has been
+ * sent to hardware.
+ *
+ * It is illegal for the submit fence of one request to wait upon the
+ * execute fence of an earlier request. It should be sufficient to
+ * wait upon the submit fence of the earlier request.
+ */
struct i915_sw_fence submit;
+ struct i915_sw_fence execute;
wait_queue_t submitq;
+ wait_queue_t execq;
+
+ /* A list of everyone we wait upon, and everyone who waits upon us.
+ * Even though we will not be submitted to the hardware before the
+ * submit fence is signaled (it waits for all external events as well
+ * as our own requests), the scheduler still needs to know the
+ * dependency tree for the lifetime of the request (from execbuf
+ * to retirement), i.e. bidirectional dependency information for the
+ * request not tied to individual fences.
+ */
+ struct i915_priotree priotree;
+ struct i915_dependency dep;
+
+ u32 global_seqno;
/** GEM sequence number associated with the previous request,
* when the HWS breadcrumb is equal to this the GPU is processing
@@ -140,14 +199,11 @@ struct drm_i915_gem_request {
struct drm_i915_file_private *file_priv;
/** file_priv list entry for this request */
struct list_head client_list;
-
- /** Link in the execlist submission queue, guarded by execlist_lock. */
- struct list_head execlist_link;
};
-extern const struct fence_ops i915_fence_ops;
+extern const struct dma_fence_ops i915_fence_ops;
-static inline bool fence_is_i915(struct fence *fence)
+static inline bool dma_fence_is_i915(const struct dma_fence *fence)
{
return fence->ops == &i915_fence_ops;
}
@@ -159,43 +215,31 @@ int i915_gem_request_add_to_client(struct drm_i915_gem_request *req,
struct drm_file *file);
void i915_gem_request_retire_upto(struct drm_i915_gem_request *req);
-static inline u32
-i915_gem_request_get_seqno(struct drm_i915_gem_request *req)
-{
- return req ? req->fence.seqno : 0;
-}
-
-static inline struct intel_engine_cs *
-i915_gem_request_get_engine(struct drm_i915_gem_request *req)
-{
- return req ? req->engine : NULL;
-}
-
static inline struct drm_i915_gem_request *
-to_request(struct fence *fence)
+to_request(struct dma_fence *fence)
{
/* We assume that NULL fence/request are interoperable */
BUILD_BUG_ON(offsetof(struct drm_i915_gem_request, fence) != 0);
- GEM_BUG_ON(fence && !fence_is_i915(fence));
+ GEM_BUG_ON(fence && !dma_fence_is_i915(fence));
return container_of(fence, struct drm_i915_gem_request, fence);
}
static inline struct drm_i915_gem_request *
i915_gem_request_get(struct drm_i915_gem_request *req)
{
- return to_request(fence_get(&req->fence));
+ return to_request(dma_fence_get(&req->fence));
}
static inline struct drm_i915_gem_request *
i915_gem_request_get_rcu(struct drm_i915_gem_request *req)
{
- return to_request(fence_get_rcu(&req->fence));
+ return to_request(dma_fence_get_rcu(&req->fence));
}
static inline void
i915_gem_request_put(struct drm_i915_gem_request *req)
{
- fence_put(&req->fence);
+ dma_fence_put(&req->fence);
}
static inline void i915_gem_request_assign(struct drm_i915_gem_request **pdst,
@@ -214,6 +258,8 @@ int
i915_gem_request_await_object(struct drm_i915_gem_request *to,
struct drm_i915_gem_object *obj,
bool write);
+int i915_gem_request_await_dma_fence(struct drm_i915_gem_request *req,
+ struct dma_fence *fence);
void __i915_add_request(struct drm_i915_gem_request *req, bool flush_caches);
#define i915_add_request(req) \
@@ -221,18 +267,21 @@ void __i915_add_request(struct drm_i915_gem_request *req, bool flush_caches);
#define i915_add_request_no_flush(req) \
__i915_add_request(req, false)
+void __i915_gem_request_submit(struct drm_i915_gem_request *request);
+void i915_gem_request_submit(struct drm_i915_gem_request *request);
+
struct intel_rps_client;
#define NO_WAITBOOST ERR_PTR(-1)
#define IS_RPS_CLIENT(p) (!IS_ERR(p))
#define IS_RPS_USER(p) (!IS_ERR_OR_NULL(p))
-int i915_wait_request(struct drm_i915_gem_request *req,
- unsigned int flags,
- s64 *timeout,
- struct intel_rps_client *rps)
+long i915_wait_request(struct drm_i915_gem_request *req,
+ unsigned int flags,
+ long timeout)
__attribute__((nonnull(1)));
#define I915_WAIT_INTERRUPTIBLE BIT(0)
#define I915_WAIT_LOCKED BIT(1) /* struct_mutex held, handle GPU reset */
+#define I915_WAIT_ALL BIT(2) /* used by i915_gem_object_wait() */
static inline u32 intel_engine_get_seqno(struct intel_engine_cs *engine);
@@ -245,17 +294,37 @@ static inline bool i915_seqno_passed(u32 seq1, u32 seq2)
}
static inline bool
-i915_gem_request_started(const struct drm_i915_gem_request *req)
+__i915_gem_request_started(const struct drm_i915_gem_request *req)
{
+ GEM_BUG_ON(!req->global_seqno);
return i915_seqno_passed(intel_engine_get_seqno(req->engine),
req->previous_seqno);
}
static inline bool
-i915_gem_request_completed(const struct drm_i915_gem_request *req)
+i915_gem_request_started(const struct drm_i915_gem_request *req)
{
+ if (!req->global_seqno)
+ return false;
+
+ return __i915_gem_request_started(req);
+}
+
+static inline bool
+__i915_gem_request_completed(const struct drm_i915_gem_request *req)
+{
+ GEM_BUG_ON(!req->global_seqno);
return i915_seqno_passed(intel_engine_get_seqno(req->engine),
- req->fence.seqno);
+ req->global_seqno);
+}
+
+static inline bool
+i915_gem_request_completed(const struct drm_i915_gem_request *req)
+{
+ if (!req->global_seqno)
+ return false;
+
+ return __i915_gem_request_completed(req);
}
bool __i915_spin_request(const struct drm_i915_gem_request *request,
@@ -263,7 +332,7 @@ bool __i915_spin_request(const struct drm_i915_gem_request *request,
static inline bool i915_spin_request(const struct drm_i915_gem_request *request,
int state, unsigned long timeout_us)
{
- return (i915_gem_request_started(request) &&
+ return (__i915_gem_request_started(request) &&
__i915_spin_request(request, state, timeout_us));
}
@@ -497,7 +566,7 @@ __i915_gem_active_get_rcu(const struct i915_gem_active *active)
* compiler.
*
* The atomic operation at the heart of
- * i915_gem_request_get_rcu(), see fence_get_rcu(), is
+ * i915_gem_request_get_rcu(), see dma_fence_get_rcu(), is
* atomic_inc_not_zero() which is only a full memory barrier
* when successful. That is, if i915_gem_request_get_rcu()
* returns the request (and so with the reference counted
@@ -552,53 +621,13 @@ i915_gem_active_isset(const struct i915_gem_active *active)
}
/**
- * i915_gem_active_is_idle - report whether the active tracker is idle
- * @active - the active tracker
- *
- * i915_gem_active_is_idle() returns true if the active tracker is currently
- * unassigned or if the request is complete (but not yet retired). Requires
- * the caller to hold struct_mutex (but that can be relaxed if desired).
- */
-static inline bool
-i915_gem_active_is_idle(const struct i915_gem_active *active,
- struct mutex *mutex)
-{
- return !i915_gem_active_peek(active, mutex);
-}
-
-/**
* i915_gem_active_wait - waits until the request is completed
* @active - the active request on which to wait
- *
- * i915_gem_active_wait() waits until the request is completed before
- * returning. Note that it does not guarantee that the request is
- * retired first, see i915_gem_active_retire().
- *
- * i915_gem_active_wait() returns immediately if the active
- * request is already complete.
- */
-static inline int __must_check
-i915_gem_active_wait(const struct i915_gem_active *active, struct mutex *mutex)
-{
- struct drm_i915_gem_request *request;
-
- request = i915_gem_active_peek(active, mutex);
- if (!request)
- return 0;
-
- return i915_wait_request(request,
- I915_WAIT_INTERRUPTIBLE | I915_WAIT_LOCKED,
- NULL, NULL);
-}
-
-/**
- * i915_gem_active_wait_unlocked - waits until the request is completed
- * @active - the active request on which to wait
* @flags - how to wait
* @timeout - how long to wait at most
* @rps - userspace client to charge for a waitboost
*
- * i915_gem_active_wait_unlocked() waits until the request is completed before
+ * i915_gem_active_wait() waits until the request is completed before
* returning, without requiring any locks to be held. Note that it does not
* retire any requests before returning.
*
@@ -614,21 +643,18 @@ i915_gem_active_wait(const struct i915_gem_active *active, struct mutex *mutex)
* Returns 0 if successful, or a negative error code.
*/
static inline int
-i915_gem_active_wait_unlocked(const struct i915_gem_active *active,
- unsigned int flags,
- s64 *timeout,
- struct intel_rps_client *rps)
+i915_gem_active_wait(const struct i915_gem_active *active, unsigned int flags)
{
struct drm_i915_gem_request *request;
- int ret = 0;
+ long ret = 0;
request = i915_gem_active_get_unlocked(active);
if (request) {
- ret = i915_wait_request(request, flags, timeout, rps);
+ ret = i915_wait_request(request, flags, MAX_SCHEDULE_TIMEOUT);
i915_gem_request_put(request);
}
- return ret;
+ return ret < 0 ? ret : 0;
}
/**
@@ -645,7 +671,7 @@ i915_gem_active_retire(struct i915_gem_active *active,
struct mutex *mutex)
{
struct drm_i915_gem_request *request;
- int ret;
+ long ret;
request = i915_gem_active_raw(active, mutex);
if (!request)
@@ -653,8 +679,8 @@ i915_gem_active_retire(struct i915_gem_active *active,
ret = i915_wait_request(request,
I915_WAIT_INTERRUPTIBLE | I915_WAIT_LOCKED,
- NULL, NULL);
- if (ret)
+ MAX_SCHEDULE_TIMEOUT);
+ if (ret < 0)
return ret;
list_del_init(&active->link);
@@ -665,24 +691,6 @@ i915_gem_active_retire(struct i915_gem_active *active,
return 0;
}
-/* Convenience functions for peeking at state inside active's request whilst
- * guarded by the struct_mutex.
- */
-
-static inline uint32_t
-i915_gem_active_get_seqno(const struct i915_gem_active *active,
- struct mutex *mutex)
-{
- return i915_gem_request_get_seqno(i915_gem_active_peek(active, mutex));
-}
-
-static inline struct intel_engine_cs *
-i915_gem_active_get_engine(const struct i915_gem_active *active,
- struct mutex *mutex)
-{
- return i915_gem_request_get_engine(i915_gem_active_peek(active, mutex));
-}
-
#define for_each_active(mask, idx) \
for (; mask ? idx = ffs(mask) - 1, 1 : 0; mask &= ~BIT(idx))
diff --git a/drivers/gpu/drm/i915/i915_gem_shrinker.c b/drivers/gpu/drm/i915/i915_gem_shrinker.c
index 1c237d02f30b..a6fc1bdc48af 100644
--- a/drivers/gpu/drm/i915/i915_gem_shrinker.c
+++ b/drivers/gpu/drm/i915/i915_gem_shrinker.c
@@ -48,6 +48,20 @@ static bool mutex_is_locked_by(struct mutex *mutex, struct task_struct *task)
#endif
}
+static bool i915_gem_shrinker_lock(struct drm_device *dev, bool *unlock)
+{
+ if (!mutex_trylock(&dev->struct_mutex)) {
+ if (!mutex_is_locked_by(&dev->struct_mutex, current))
+ return false;
+
+ *unlock = false;
+ } else {
+ *unlock = true;
+ }
+
+ return true;
+}
+
static bool any_vma_pinned(struct drm_i915_gem_object *obj)
{
struct i915_vma *vma;
@@ -66,8 +80,11 @@ static bool swap_available(void)
static bool can_release_pages(struct drm_i915_gem_object *obj)
{
- /* Only shmemfs objects are backed by swap */
- if (!obj->base.filp)
+ if (!obj->mm.pages)
+ return false;
+
+ /* Consider only shrinkable ojects. */
+ if (!i915_gem_object_is_shrinkable(obj))
return false;
/* Only report true if by unbinding the object and putting its pages
@@ -78,7 +95,7 @@ static bool can_release_pages(struct drm_i915_gem_object *obj)
* to the GPU, simply unbinding from the GPU is not going to succeed
* in releasing our pin count on the pages themselves.
*/
- if (obj->pages_pin_count > obj->bind_count)
+ if (atomic_read(&obj->mm.pages_pin_count) > obj->bind_count)
return false;
if (any_vma_pinned(obj))
@@ -88,7 +105,14 @@ static bool can_release_pages(struct drm_i915_gem_object *obj)
* discard the contents (because the user has marked them as being
* purgeable) or if we can move their contents out to swap.
*/
- return swap_available() || obj->madv == I915_MADV_DONTNEED;
+ return swap_available() || obj->mm.madv == I915_MADV_DONTNEED;
+}
+
+static bool unsafe_drop_pages(struct drm_i915_gem_object *obj)
+{
+ if (i915_gem_object_unbind(obj) == 0)
+ __i915_gem_object_put_pages(obj, I915_MM_SHRINKER);
+ return !READ_ONCE(obj->mm.pages);
}
/**
@@ -128,6 +152,10 @@ i915_gem_shrink(struct drm_i915_private *dev_priv,
{ NULL, 0 },
}, *phase;
unsigned long count = 0;
+ bool unlock;
+
+ if (!i915_gem_shrinker_lock(&dev_priv->drm, &unlock))
+ return 0;
trace_i915_gem_shrink(dev_priv, target, flags);
i915_gem_retire_requests(dev_priv);
@@ -171,40 +199,51 @@ i915_gem_shrink(struct drm_i915_private *dev_priv,
while (count < target &&
(obj = list_first_entry_or_null(phase->list,
typeof(*obj),
- global_list))) {
- list_move_tail(&obj->global_list, &still_in_list);
+ global_link))) {
+ list_move_tail(&obj->global_link, &still_in_list);
+ if (!obj->mm.pages) {
+ list_del_init(&obj->global_link);
+ continue;
+ }
if (flags & I915_SHRINK_PURGEABLE &&
- obj->madv != I915_MADV_DONTNEED)
+ obj->mm.madv != I915_MADV_DONTNEED)
continue;
if (flags & I915_SHRINK_VMAPS &&
- !is_vmalloc_addr(obj->mapping))
+ !is_vmalloc_addr(obj->mm.mapping))
continue;
- if ((flags & I915_SHRINK_ACTIVE) == 0 &&
- i915_gem_object_is_active(obj))
+ if (!(flags & I915_SHRINK_ACTIVE) &&
+ (i915_gem_object_is_active(obj) ||
+ obj->framebuffer_references))
continue;
if (!can_release_pages(obj))
continue;
- i915_gem_object_get(obj);
-
- /* For the unbound phase, this should be a no-op! */
- i915_gem_object_unbind(obj);
- if (i915_gem_object_put_pages(obj) == 0)
- count += obj->base.size >> PAGE_SHIFT;
-
- i915_gem_object_put(obj);
+ if (unsafe_drop_pages(obj)) {
+ /* May arrive from get_pages on another bo */
+ mutex_lock_nested(&obj->mm.lock,
+ I915_MM_SHRINKER);
+ if (!obj->mm.pages) {
+ __i915_gem_object_invalidate(obj);
+ list_del_init(&obj->global_link);
+ count += obj->base.size >> PAGE_SHIFT;
+ }
+ mutex_unlock(&obj->mm.lock);
+ }
}
- list_splice(&still_in_list, phase->list);
+ list_splice_tail(&still_in_list, phase->list);
}
if (flags & I915_SHRINK_BOUND)
intel_runtime_pm_put(dev_priv);
i915_gem_retire_requests(dev_priv);
+ if (unlock)
+ mutex_unlock(&dev_priv->drm.struct_mutex);
+
/* expedite the RCU grace period to free some request slabs */
synchronize_rcu_expedited();
@@ -238,19 +277,6 @@ unsigned long i915_gem_shrink_all(struct drm_i915_private *dev_priv)
return freed;
}
-static bool i915_gem_shrinker_lock(struct drm_device *dev, bool *unlock)
-{
- if (!mutex_trylock(&dev->struct_mutex)) {
- if (!mutex_is_locked_by(&dev->struct_mutex, current))
- return false;
-
- *unlock = false;
- } else
- *unlock = true;
-
- return true;
-}
-
static unsigned long
i915_gem_shrinker_count(struct shrinker *shrinker, struct shrink_control *sc)
{
@@ -267,11 +293,11 @@ i915_gem_shrinker_count(struct shrinker *shrinker, struct shrink_control *sc)
i915_gem_retire_requests(dev_priv);
count = 0;
- list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_list)
+ list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_link)
if (can_release_pages(obj))
count += obj->base.size >> PAGE_SHIFT;
- list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
+ list_for_each_entry(obj, &dev_priv->mm.bound_list, global_link) {
if (!i915_gem_object_is_active(obj) && can_release_pages(obj))
count += obj->base.size >> PAGE_SHIFT;
}
@@ -372,13 +398,19 @@ i915_gem_shrinker_oom(struct notifier_block *nb, unsigned long event, void *ptr)
* being pointed to by hardware.
*/
unbound = bound = unevictable = 0;
- list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_list) {
+ list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_link) {
+ if (!obj->mm.pages)
+ continue;
+
if (!can_release_pages(obj))
unevictable += obj->base.size >> PAGE_SHIFT;
else
unbound += obj->base.size >> PAGE_SHIFT;
}
- list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
+ list_for_each_entry(obj, &dev_priv->mm.bound_list, global_link) {
+ if (!obj->mm.pages)
+ continue;
+
if (!can_release_pages(obj))
unevictable += obj->base.size >> PAGE_SHIFT;
else
diff --git a/drivers/gpu/drm/i915/i915_gem_stolen.c b/drivers/gpu/drm/i915/i915_gem_stolen.c
index 59989e8ee5dc..ebaa941c83af 100644
--- a/drivers/gpu/drm/i915/i915_gem_stolen.c
+++ b/drivers/gpu/drm/i915/i915_gem_stolen.c
@@ -89,9 +89,8 @@ void i915_gem_stolen_remove_node(struct drm_i915_private *dev_priv,
mutex_unlock(&dev_priv->mm.stolen_lock);
}
-static unsigned long i915_stolen_to_physical(struct drm_device *dev)
+static unsigned long i915_stolen_to_physical(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
struct pci_dev *pdev = dev_priv->drm.pdev;
struct i915_ggtt *ggtt = &dev_priv->ggtt;
struct resource *r;
@@ -109,13 +108,13 @@ static unsigned long i915_stolen_to_physical(struct drm_device *dev)
*
*/
base = 0;
- if (INTEL_INFO(dev)->gen >= 3) {
+ if (INTEL_GEN(dev_priv) >= 3) {
u32 bsm;
pci_read_config_dword(pdev, INTEL_BSM, &bsm);
base = bsm & INTEL_BSM_MASK;
- } else if (IS_I865G(dev)) {
+ } else if (IS_I865G(dev_priv)) {
u32 tseg_size = 0;
u16 toud = 0;
u8 tmp;
@@ -138,7 +137,7 @@ static unsigned long i915_stolen_to_physical(struct drm_device *dev)
I865_TOUD, &toud);
base = (toud << 16) + tseg_size;
- } else if (IS_I85X(dev)) {
+ } else if (IS_I85X(dev_priv)) {
u32 tseg_size = 0;
u32 tom;
u8 tmp;
@@ -154,7 +153,7 @@ static unsigned long i915_stolen_to_physical(struct drm_device *dev)
tom = tmp * MB(32);
base = tom - tseg_size - ggtt->stolen_size;
- } else if (IS_845G(dev)) {
+ } else if (IS_845G(dev_priv)) {
u32 tseg_size = 0;
u32 tom;
u8 tmp;
@@ -178,7 +177,7 @@ static unsigned long i915_stolen_to_physical(struct drm_device *dev)
tom = tmp * MB(32);
base = tom - tseg_size - ggtt->stolen_size;
- } else if (IS_I830(dev)) {
+ } else if (IS_I830(dev_priv)) {
u32 tseg_size = 0;
u32 tom;
u8 tmp;
@@ -204,7 +203,8 @@ static unsigned long i915_stolen_to_physical(struct drm_device *dev)
return 0;
/* make sure we don't clobber the GTT if it's within stolen memory */
- if (INTEL_INFO(dev)->gen <= 4 && !IS_G33(dev) && !IS_G4X(dev)) {
+ if (INTEL_GEN(dev_priv) <= 4 && !IS_G33(dev_priv) &&
+ !IS_G4X(dev_priv)) {
struct {
u32 start, end;
} stolen[2] = {
@@ -214,7 +214,7 @@ static unsigned long i915_stolen_to_physical(struct drm_device *dev)
u64 ggtt_start, ggtt_end;
ggtt_start = I915_READ(PGTBL_CTL);
- if (IS_GEN4(dev))
+ if (IS_GEN4(dev_priv))
ggtt_start = (ggtt_start & PGTBL_ADDRESS_LO_MASK) |
(ggtt_start & PGTBL_ADDRESS_HI_MASK) << 28;
else
@@ -252,7 +252,7 @@ static unsigned long i915_stolen_to_physical(struct drm_device *dev)
* kernel. So if the region is already marked as busy, something
* is seriously wrong.
*/
- r = devm_request_mem_region(dev->dev, base, ggtt->stolen_size,
+ r = devm_request_mem_region(dev_priv->drm.dev, base, ggtt->stolen_size,
"Graphics Stolen Memory");
if (r == NULL) {
/*
@@ -263,14 +263,14 @@ static unsigned long i915_stolen_to_physical(struct drm_device *dev)
* PCI bus, but have an off-by-one error. Hence retry the
* reservation starting from 1 instead of 0.
*/
- r = devm_request_mem_region(dev->dev, base + 1,
+ r = devm_request_mem_region(dev_priv->drm.dev, base + 1,
ggtt->stolen_size - 1,
"Graphics Stolen Memory");
/*
* GEN3 firmware likes to smash pci bridges into the stolen
* range. Apparently this works.
*/
- if (r == NULL && !IS_GEN3(dev)) {
+ if (r == NULL && !IS_GEN3(dev_priv)) {
DRM_ERROR("conflict detected with stolen region: [0x%08x - 0x%08x]\n",
base, base + (uint32_t)ggtt->stolen_size);
base = 0;
@@ -407,9 +407,8 @@ static void bdw_get_stolen_reserved(struct drm_i915_private *dev_priv,
*size = stolen_top - *base;
}
-int i915_gem_init_stolen(struct drm_device *dev)
+int i915_gem_init_stolen(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
struct i915_ggtt *ggtt = &dev_priv->ggtt;
unsigned long reserved_total, reserved_base = 0, reserved_size;
unsigned long stolen_top;
@@ -417,7 +416,7 @@ int i915_gem_init_stolen(struct drm_device *dev)
mutex_init(&dev_priv->mm.stolen_lock);
#ifdef CONFIG_INTEL_IOMMU
- if (intel_iommu_gfx_mapped && INTEL_INFO(dev)->gen < 8) {
+ if (intel_iommu_gfx_mapped && INTEL_GEN(dev_priv) < 8) {
DRM_INFO("DMAR active, disabling use of stolen memory\n");
return 0;
}
@@ -426,7 +425,7 @@ int i915_gem_init_stolen(struct drm_device *dev)
if (ggtt->stolen_size == 0)
return 0;
- dev_priv->mm.stolen_base = i915_stolen_to_physical(dev);
+ dev_priv->mm.stolen_base = i915_stolen_to_physical(dev_priv);
if (dev_priv->mm.stolen_base == 0)
return 0;
@@ -437,7 +436,7 @@ int i915_gem_init_stolen(struct drm_device *dev)
case 3:
break;
case 4:
- if (IS_G4X(dev))
+ if (IS_G4X(dev_priv))
g4x_get_stolen_reserved(dev_priv, &reserved_base,
&reserved_size);
break;
@@ -456,7 +455,7 @@ int i915_gem_init_stolen(struct drm_device *dev)
break;
default:
if (IS_BROADWELL(dev_priv) ||
- IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev))
+ IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))
bdw_get_stolen_reserved(dev_priv, &reserved_base,
&reserved_size);
else
@@ -514,12 +513,10 @@ i915_pages_create_for_stolen(struct drm_device *dev,
u32 offset, u32 size)
{
struct drm_i915_private *dev_priv = to_i915(dev);
- struct i915_ggtt *ggtt = &dev_priv->ggtt;
struct sg_table *st;
struct scatterlist *sg;
- DRM_DEBUG_DRIVER("offset=0x%x, size=%d\n", offset, size);
- BUG_ON(offset > ggtt->stolen_size - size);
+ GEM_BUG_ON(offset > dev_priv->ggtt.stolen_size - size);
/* We hide that we have no struct page backing our stolen object
* by wrapping the contiguous physical allocation with a fake
@@ -528,11 +525,11 @@ i915_pages_create_for_stolen(struct drm_device *dev,
st = kmalloc(sizeof(*st), GFP_KERNEL);
if (st == NULL)
- return NULL;
+ return ERR_PTR(-ENOMEM);
if (sg_alloc_table(st, 1, GFP_KERNEL)) {
kfree(st);
- return NULL;
+ return ERR_PTR(-ENOMEM);
}
sg = st->sgl;
@@ -545,31 +542,36 @@ i915_pages_create_for_stolen(struct drm_device *dev,
return st;
}
-static int i915_gem_object_get_pages_stolen(struct drm_i915_gem_object *obj)
+static struct sg_table *
+i915_gem_object_get_pages_stolen(struct drm_i915_gem_object *obj)
{
- BUG();
- return -EINVAL;
+ return i915_pages_create_for_stolen(obj->base.dev,
+ obj->stolen->start,
+ obj->stolen->size);
}
-static void i915_gem_object_put_pages_stolen(struct drm_i915_gem_object *obj)
+static void i915_gem_object_put_pages_stolen(struct drm_i915_gem_object *obj,
+ struct sg_table *pages)
{
- /* Should only be called during free */
- sg_free_table(obj->pages);
- kfree(obj->pages);
+ /* Should only be called from i915_gem_object_release_stolen() */
+ sg_free_table(pages);
+ kfree(pages);
}
-
static void
i915_gem_object_release_stolen(struct drm_i915_gem_object *obj)
{
struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
+ struct drm_mm_node *stolen = fetch_and_zero(&obj->stolen);
- if (obj->stolen) {
- i915_gem_stolen_remove_node(dev_priv, obj->stolen);
- kfree(obj->stolen);
- obj->stolen = NULL;
- }
+ GEM_BUG_ON(!stolen);
+
+ __i915_gem_object_unpin_pages(obj);
+
+ i915_gem_stolen_remove_node(dev_priv, stolen);
+ kfree(stolen);
}
+
static const struct drm_i915_gem_object_ops i915_gem_object_stolen_ops = {
.get_pages = i915_gem_object_get_pages_stolen,
.put_pages = i915_gem_object_put_pages_stolen,
@@ -589,19 +591,13 @@ _i915_gem_object_create_stolen(struct drm_device *dev,
drm_gem_private_object_init(dev, &obj->base, stolen->size);
i915_gem_object_init(obj, &i915_gem_object_stolen_ops);
- obj->pages = i915_pages_create_for_stolen(dev,
- stolen->start, stolen->size);
- if (obj->pages == NULL)
- goto cleanup;
-
- obj->get_page.sg = obj->pages->sgl;
- obj->get_page.last = 0;
-
- i915_gem_object_pin_pages(obj);
obj->stolen = stolen;
-
obj->base.read_domains = I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT;
- obj->cache_level = HAS_LLC(dev) ? I915_CACHE_LLC : I915_CACHE_NONE;
+ obj->cache_level = HAS_LLC(to_i915(dev)) ?
+ I915_CACHE_LLC : I915_CACHE_NONE;
+
+ if (i915_gem_object_pin_pages(obj))
+ goto cleanup;
return obj;
@@ -621,7 +617,6 @@ i915_gem_object_create_stolen(struct drm_device *dev, u32 size)
if (!drm_mm_initialized(&dev_priv->mm.stolen))
return NULL;
- DRM_DEBUG_KMS("creating stolen object: size=%x\n", size);
if (size == 0)
return NULL;
@@ -697,10 +692,14 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev,
if (gtt_offset == I915_GTT_OFFSET_NONE)
return obj;
+ ret = i915_gem_object_pin_pages(obj);
+ if (ret)
+ goto err;
+
vma = i915_gem_obj_lookup_or_create_vma(obj, &ggtt->base, NULL);
if (IS_ERR(vma)) {
ret = PTR_ERR(vma);
- goto err;
+ goto err_pages;
}
/* To simplify the initialisation sequence between KMS and GTT,
@@ -714,20 +713,20 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev,
ret = drm_mm_reserve_node(&ggtt->base.mm, &vma->node);
if (ret) {
DRM_DEBUG_KMS("failed to allocate stolen GTT space\n");
- goto err;
+ goto err_pages;
}
- vma->pages = obj->pages;
+ vma->pages = obj->mm.pages;
vma->flags |= I915_VMA_GLOBAL_BIND;
__i915_vma_set_map_and_fenceable(vma);
list_move_tail(&vma->vm_link, &ggtt->base.inactive_list);
+ list_move_tail(&obj->global_link, &dev_priv->mm.bound_list);
obj->bind_count++;
- list_add_tail(&obj->global_list, &dev_priv->mm.bound_list);
- i915_gem_object_pin_pages(obj);
-
return obj;
+err_pages:
+ i915_gem_object_unpin_pages(obj);
err:
i915_gem_object_put(obj);
return NULL;
diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c
index a14b1e3d4c78..c85e7b06bdba 100644
--- a/drivers/gpu/drm/i915/i915_gem_tiling.c
+++ b/drivers/gpu/drm/i915/i915_gem_tiling.c
@@ -60,7 +60,8 @@
/* Check pitch constriants for all chips & tiling formats */
static bool
-i915_tiling_ok(struct drm_device *dev, int stride, int size, int tiling_mode)
+i915_tiling_ok(struct drm_i915_private *dev_priv,
+ int stride, int size, int tiling_mode)
{
int tile_width;
@@ -71,8 +72,8 @@ i915_tiling_ok(struct drm_device *dev, int stride, int size, int tiling_mode)
if (tiling_mode > I915_TILING_LAST)
return false;
- if (IS_GEN2(dev) ||
- (tiling_mode == I915_TILING_Y && HAS_128_BYTE_Y_TILING(dev)))
+ if (IS_GEN2(dev_priv) ||
+ (tiling_mode == I915_TILING_Y && HAS_128_BYTE_Y_TILING(dev_priv)))
tile_width = 128;
else
tile_width = 512;
@@ -80,17 +81,17 @@ i915_tiling_ok(struct drm_device *dev, int stride, int size, int tiling_mode)
/* check maximum stride & object size */
/* i965+ stores the end address of the gtt mapping in the fence
* reg, so dont bother to check the size */
- if (INTEL_INFO(dev)->gen >= 7) {
+ if (INTEL_GEN(dev_priv) >= 7) {
if (stride / 128 > GEN7_FENCE_MAX_PITCH_VAL)
return false;
- } else if (INTEL_INFO(dev)->gen >= 4) {
+ } else if (INTEL_GEN(dev_priv) >= 4) {
if (stride / 128 > I965_FENCE_MAX_PITCH_VAL)
return false;
} else {
if (stride > 8192)
return false;
- if (IS_GEN3(dev)) {
+ if (IS_GEN3(dev_priv)) {
if (size > I830_FENCE_MAX_SIZE_VAL << 20)
return false;
} else {
@@ -103,7 +104,7 @@ i915_tiling_ok(struct drm_device *dev, int stride, int size, int tiling_mode)
return false;
/* 965+ just needs multiples of tile width */
- if (INTEL_INFO(dev)->gen >= 4) {
+ if (INTEL_GEN(dev_priv) >= 4) {
if (stride & (tile_width - 1))
return false;
return true;
@@ -198,14 +199,12 @@ i915_gem_set_tiling(struct drm_device *dev, void *data,
if (!obj)
return -ENOENT;
- if (!i915_tiling_ok(dev,
+ if (!i915_tiling_ok(dev_priv,
args->stride, obj->base.size, args->tiling_mode)) {
- i915_gem_object_put_unlocked(obj);
+ i915_gem_object_put(obj);
return -EINVAL;
}
- intel_runtime_pm_get(dev_priv);
-
mutex_lock(&dev->struct_mutex);
if (obj->pin_display || obj->framebuffer_references) {
err = -EBUSY;
@@ -260,14 +259,22 @@ i915_gem_set_tiling(struct drm_device *dev, void *data,
if (!err) {
struct i915_vma *vma;
- if (obj->pages &&
- obj->madv == I915_MADV_WILLNEED &&
+ mutex_lock(&obj->mm.lock);
+ if (obj->mm.pages &&
+ obj->mm.madv == I915_MADV_WILLNEED &&
dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES) {
- if (args->tiling_mode == I915_TILING_NONE)
- i915_gem_object_unpin_pages(obj);
- if (!i915_gem_object_is_tiled(obj))
- i915_gem_object_pin_pages(obj);
+ if (args->tiling_mode == I915_TILING_NONE) {
+ GEM_BUG_ON(!obj->mm.quirked);
+ __i915_gem_object_unpin_pages(obj);
+ obj->mm.quirked = false;
+ }
+ if (!i915_gem_object_is_tiled(obj)) {
+ GEM_BUG_ON(!obj->mm.quirked);
+ __i915_gem_object_pin_pages(obj);
+ obj->mm.quirked = true;
+ }
}
+ mutex_unlock(&obj->mm.lock);
list_for_each_entry(vma, &obj->vma_list, obj_link) {
if (!vma->fence)
@@ -301,8 +308,6 @@ err:
i915_gem_object_put(obj);
mutex_unlock(&dev->struct_mutex);
- intel_runtime_pm_put(dev_priv);
-
return err;
}
@@ -326,12 +331,19 @@ i915_gem_get_tiling(struct drm_device *dev, void *data,
struct drm_i915_gem_get_tiling *args = data;
struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_i915_gem_object *obj;
+ int err = -ENOENT;
+
+ rcu_read_lock();
+ obj = i915_gem_object_lookup_rcu(file, args->handle);
+ if (obj) {
+ args->tiling_mode =
+ READ_ONCE(obj->tiling_and_stride) & TILING_MASK;
+ err = 0;
+ }
+ rcu_read_unlock();
+ if (unlikely(err))
+ return err;
- obj = i915_gem_object_lookup(file, args->handle);
- if (!obj)
- return -ENOENT;
-
- args->tiling_mode = READ_ONCE(obj->tiling_and_stride) & TILING_MASK;
switch (args->tiling_mode) {
case I915_TILING_X:
args->swizzle_mode = dev_priv->mm.bit_6_swizzle_x;
@@ -339,11 +351,10 @@ i915_gem_get_tiling(struct drm_device *dev, void *data,
case I915_TILING_Y:
args->swizzle_mode = dev_priv->mm.bit_6_swizzle_y;
break;
+ default:
case I915_TILING_NONE:
args->swizzle_mode = I915_BIT_6_SWIZZLE_NONE;
break;
- default:
- DRM_ERROR("unknown tiling mode\n");
}
/* Hide bit 17 from the user -- see comment in i915_gem_set_tiling */
@@ -356,6 +367,5 @@ i915_gem_get_tiling(struct drm_device *dev, void *data,
if (args->swizzle_mode == I915_BIT_6_SWIZZLE_9_10_17)
args->swizzle_mode = I915_BIT_6_SWIZZLE_9_10;
- i915_gem_object_put_unlocked(obj);
return 0;
}
diff --git a/drivers/gpu/drm/i915/i915_gem_timeline.c b/drivers/gpu/drm/i915/i915_gem_timeline.c
new file mode 100644
index 000000000000..bf8a471b61e6
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_gem_timeline.c
@@ -0,0 +1,90 @@
+/*
+ * Copyright © 2016 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+
+#include "i915_drv.h"
+
+static int __i915_gem_timeline_init(struct drm_i915_private *i915,
+ struct i915_gem_timeline *timeline,
+ const char *name,
+ struct lock_class_key *lockclass,
+ const char *lockname)
+{
+ unsigned int i;
+ u64 fences;
+
+ lockdep_assert_held(&i915->drm.struct_mutex);
+
+ timeline->i915 = i915;
+ timeline->name = kstrdup(name ?: "[kernel]", GFP_KERNEL);
+ if (!timeline->name)
+ return -ENOMEM;
+
+ list_add(&timeline->link, &i915->gt.timelines);
+
+ /* Called during early_init before we know how many engines there are */
+ fences = dma_fence_context_alloc(ARRAY_SIZE(timeline->engine));
+ for (i = 0; i < ARRAY_SIZE(timeline->engine); i++) {
+ struct intel_timeline *tl = &timeline->engine[i];
+
+ tl->fence_context = fences++;
+ tl->common = timeline;
+#ifdef CONFIG_DEBUG_SPINLOCK
+ __raw_spin_lock_init(&tl->lock.rlock, lockname, lockclass);
+#else
+ spin_lock_init(&tl->lock);
+#endif
+ init_request_active(&tl->last_request, NULL);
+ INIT_LIST_HEAD(&tl->requests);
+ }
+
+ return 0;
+}
+
+int i915_gem_timeline_init(struct drm_i915_private *i915,
+ struct i915_gem_timeline *timeline,
+ const char *name)
+{
+ static struct lock_class_key class;
+
+ return __i915_gem_timeline_init(i915, timeline, name,
+ &class, "&timeline->lock");
+}
+
+int i915_gem_timeline_init__global(struct drm_i915_private *i915)
+{
+ static struct lock_class_key class;
+
+ return __i915_gem_timeline_init(i915,
+ &i915->gt.global_timeline,
+ "[execution]",
+ &class, "&global_timeline->lock");
+}
+
+void i915_gem_timeline_fini(struct i915_gem_timeline *tl)
+{
+ lockdep_assert_held(&tl->i915->drm.struct_mutex);
+
+ list_del(&tl->link);
+ kfree(tl->name);
+}
diff --git a/drivers/gpu/drm/i915/i915_gem_timeline.h b/drivers/gpu/drm/i915/i915_gem_timeline.h
new file mode 100644
index 000000000000..98d99a62b4ae
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_gem_timeline.h
@@ -0,0 +1,73 @@
+/*
+ * Copyright © 2016 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+
+#ifndef I915_GEM_TIMELINE_H
+#define I915_GEM_TIMELINE_H
+
+#include <linux/list.h>
+
+#include "i915_gem_request.h"
+
+struct i915_gem_timeline;
+
+struct intel_timeline {
+ u64 fence_context;
+ u32 last_submitted_seqno;
+
+ spinlock_t lock;
+
+ /**
+ * List of breadcrumbs associated with GPU requests currently
+ * outstanding.
+ */
+ struct list_head requests;
+
+ /* Contains an RCU guarded pointer to the last request. No reference is
+ * held to the request, users must carefully acquire a reference to
+ * the request using i915_gem_active_get_request_rcu(), or hold the
+ * struct_mutex.
+ */
+ struct i915_gem_active last_request;
+ u32 sync_seqno[I915_NUM_ENGINES];
+
+ struct i915_gem_timeline *common;
+};
+
+struct i915_gem_timeline {
+ struct list_head link;
+ atomic_t next_seqno;
+
+ struct drm_i915_private *i915;
+ const char *name;
+
+ struct intel_timeline engine[I915_NUM_ENGINES];
+};
+
+int i915_gem_timeline_init(struct drm_i915_private *i915,
+ struct i915_gem_timeline *tl,
+ const char *name);
+int i915_gem_timeline_init__global(struct drm_i915_private *i915);
+void i915_gem_timeline_fini(struct i915_gem_timeline *tl);
+
+#endif
diff --git a/drivers/gpu/drm/i915/i915_gem_userptr.c b/drivers/gpu/drm/i915/i915_gem_userptr.c
index c6f780f5abc9..107ddf51065e 100644
--- a/drivers/gpu/drm/i915/i915_gem_userptr.c
+++ b/drivers/gpu/drm/i915/i915_gem_userptr.c
@@ -61,33 +61,26 @@ struct i915_mmu_object {
bool attached;
};
-static void wait_rendering(struct drm_i915_gem_object *obj)
-{
- unsigned long active = __I915_BO_ACTIVE(obj);
- int idx;
-
- for_each_active(active, idx)
- i915_gem_active_wait_unlocked(&obj->last_read[idx],
- 0, NULL, NULL);
-}
-
static void cancel_userptr(struct work_struct *work)
{
struct i915_mmu_object *mo = container_of(work, typeof(*mo), work);
struct drm_i915_gem_object *obj = mo->obj;
struct drm_device *dev = obj->base.dev;
- wait_rendering(obj);
+ i915_gem_object_wait(obj, I915_WAIT_ALL, MAX_SCHEDULE_TIMEOUT, NULL);
mutex_lock(&dev->struct_mutex);
/* Cancel any active worker and force us to re-evaluate gup */
obj->userptr.work = NULL;
- if (obj->pages != NULL) {
- /* We are inside a kthread context and can't be interrupted */
- WARN_ON(i915_gem_object_unbind(obj));
- WARN_ON(i915_gem_object_put_pages(obj));
- }
+ /* We are inside a kthread context and can't be interrupted */
+ if (i915_gem_object_unbind(obj) == 0)
+ __i915_gem_object_put_pages(obj, I915_MM_NORMAL);
+ WARN_ONCE(obj->mm.pages,
+ "Failed to release pages: bind_count=%d, pages_pin_count=%d, pin_display=%d\n",
+ obj->bind_count,
+ atomic_read(&obj->mm.pages_pin_count),
+ obj->pin_display);
i915_gem_object_put(obj);
mutex_unlock(&dev->struct_mutex);
@@ -436,24 +429,25 @@ err:
return ret;
}
-static int
+static struct sg_table *
__i915_gem_userptr_set_pages(struct drm_i915_gem_object *obj,
struct page **pvec, int num_pages)
{
+ struct sg_table *pages;
int ret;
- ret = st_set_pages(&obj->pages, pvec, num_pages);
+ ret = st_set_pages(&pages, pvec, num_pages);
if (ret)
- return ret;
+ return ERR_PTR(ret);
- ret = i915_gem_gtt_prepare_object(obj);
+ ret = i915_gem_gtt_prepare_pages(obj, pages);
if (ret) {
- sg_free_table(obj->pages);
- kfree(obj->pages);
- obj->pages = NULL;
+ sg_free_table(pages);
+ kfree(pages);
+ return ERR_PTR(ret);
}
- return ret;
+ return pages;
}
static int
@@ -497,7 +491,6 @@ __i915_gem_userptr_get_pages_worker(struct work_struct *_work)
{
struct get_pages_work *work = container_of(_work, typeof(*work), work);
struct drm_i915_gem_object *obj = work->obj;
- struct drm_device *dev = obj->base.dev;
const int npages = obj->base.size >> PAGE_SHIFT;
struct page **pvec;
int pinned, ret;
@@ -533,33 +526,32 @@ __i915_gem_userptr_get_pages_worker(struct work_struct *_work)
}
}
- mutex_lock(&dev->struct_mutex);
+ mutex_lock(&obj->mm.lock);
if (obj->userptr.work == &work->work) {
+ struct sg_table *pages = ERR_PTR(ret);
+
if (pinned == npages) {
- ret = __i915_gem_userptr_set_pages(obj, pvec, npages);
- if (ret == 0) {
- list_add_tail(&obj->global_list,
- &to_i915(dev)->mm.unbound_list);
- obj->get_page.sg = obj->pages->sgl;
- obj->get_page.last = 0;
+ pages = __i915_gem_userptr_set_pages(obj, pvec, npages);
+ if (!IS_ERR(pages)) {
+ __i915_gem_object_set_pages(obj, pages);
pinned = 0;
+ pages = NULL;
}
}
- obj->userptr.work = ERR_PTR(ret);
- }
- obj->userptr.workers--;
- i915_gem_object_put(obj);
- mutex_unlock(&dev->struct_mutex);
+ obj->userptr.work = ERR_CAST(pages);
+ }
+ mutex_unlock(&obj->mm.lock);
release_pages(pvec, pinned, 0);
drm_free_large(pvec);
+ i915_gem_object_put(obj);
put_task_struct(work->task);
kfree(work);
}
-static int
+static struct sg_table *
__i915_gem_userptr_get_pages_schedule(struct drm_i915_gem_object *obj,
bool *active)
{
@@ -584,15 +576,11 @@ __i915_gem_userptr_get_pages_schedule(struct drm_i915_gem_object *obj,
* that error back to this function through
* obj->userptr.work = ERR_PTR.
*/
- if (obj->userptr.workers >= I915_GEM_USERPTR_MAX_WORKERS)
- return -EAGAIN;
-
work = kmalloc(sizeof(*work), GFP_KERNEL);
if (work == NULL)
- return -ENOMEM;
+ return ERR_PTR(-ENOMEM);
obj->userptr.work = &work->work;
- obj->userptr.workers++;
work->obj = i915_gem_object_get(obj);
@@ -603,14 +591,15 @@ __i915_gem_userptr_get_pages_schedule(struct drm_i915_gem_object *obj,
schedule_work(&work->work);
*active = true;
- return -EAGAIN;
+ return ERR_PTR(-EAGAIN);
}
-static int
+static struct sg_table *
i915_gem_userptr_get_pages(struct drm_i915_gem_object *obj)
{
const int num_pages = obj->base.size >> PAGE_SHIFT;
struct page **pvec;
+ struct sg_table *pages;
int pinned, ret;
bool active;
@@ -634,15 +623,15 @@ i915_gem_userptr_get_pages(struct drm_i915_gem_object *obj)
if (obj->userptr.work) {
/* active flag should still be held for the pending work */
if (IS_ERR(obj->userptr.work))
- return PTR_ERR(obj->userptr.work);
+ return ERR_CAST(obj->userptr.work);
else
- return -EAGAIN;
+ return ERR_PTR(-EAGAIN);
}
/* Let the mmu-notifier know that we have begun and need cancellation */
ret = __i915_gem_userptr_set_active(obj, true);
if (ret)
- return ret;
+ return ERR_PTR(ret);
pvec = NULL;
pinned = 0;
@@ -651,7 +640,7 @@ i915_gem_userptr_get_pages(struct drm_i915_gem_object *obj)
GFP_TEMPORARY);
if (pvec == NULL) {
__i915_gem_userptr_set_active(obj, false);
- return -ENOMEM;
+ return ERR_PTR(-ENOMEM);
}
pinned = __get_user_pages_fast(obj->userptr.ptr, num_pages,
@@ -660,21 +649,22 @@ i915_gem_userptr_get_pages(struct drm_i915_gem_object *obj)
active = false;
if (pinned < 0)
- ret = pinned, pinned = 0;
+ pages = ERR_PTR(pinned), pinned = 0;
else if (pinned < num_pages)
- ret = __i915_gem_userptr_get_pages_schedule(obj, &active);
+ pages = __i915_gem_userptr_get_pages_schedule(obj, &active);
else
- ret = __i915_gem_userptr_set_pages(obj, pvec, num_pages);
- if (ret) {
+ pages = __i915_gem_userptr_set_pages(obj, pvec, num_pages);
+ if (IS_ERR(pages)) {
__i915_gem_userptr_set_active(obj, active);
release_pages(pvec, pinned, 0);
}
drm_free_large(pvec);
- return ret;
+ return pages;
}
static void
-i915_gem_userptr_put_pages(struct drm_i915_gem_object *obj)
+i915_gem_userptr_put_pages(struct drm_i915_gem_object *obj,
+ struct sg_table *pages)
{
struct sgt_iter sgt_iter;
struct page *page;
@@ -682,22 +672,22 @@ i915_gem_userptr_put_pages(struct drm_i915_gem_object *obj)
BUG_ON(obj->userptr.work != NULL);
__i915_gem_userptr_set_active(obj, false);
- if (obj->madv != I915_MADV_WILLNEED)
- obj->dirty = 0;
+ if (obj->mm.madv != I915_MADV_WILLNEED)
+ obj->mm.dirty = false;
- i915_gem_gtt_finish_object(obj);
+ i915_gem_gtt_finish_pages(obj, pages);
- for_each_sgt_page(page, sgt_iter, obj->pages) {
- if (obj->dirty)
+ for_each_sgt_page(page, sgt_iter, pages) {
+ if (obj->mm.dirty)
set_page_dirty(page);
mark_page_accessed(page);
put_page(page);
}
- obj->dirty = 0;
+ obj->mm.dirty = false;
- sg_free_table(obj->pages);
- kfree(obj->pages);
+ sg_free_table(pages);
+ kfree(pages);
}
static void
@@ -717,7 +707,8 @@ i915_gem_userptr_dmabuf_export(struct drm_i915_gem_object *obj)
}
static const struct drm_i915_gem_object_ops i915_gem_userptr_ops = {
- .flags = I915_GEM_OBJECT_HAS_STRUCT_PAGE,
+ .flags = I915_GEM_OBJECT_HAS_STRUCT_PAGE |
+ I915_GEM_OBJECT_IS_SHRINKABLE,
.get_pages = i915_gem_userptr_get_pages,
.put_pages = i915_gem_userptr_put_pages,
.dmabuf_export = i915_gem_userptr_dmabuf_export,
@@ -762,12 +753,13 @@ static const struct drm_i915_gem_object_ops i915_gem_userptr_ops = {
int
i915_gem_userptr_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
{
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_i915_gem_userptr *args = data;
struct drm_i915_gem_object *obj;
int ret;
u32 handle;
- if (!HAS_LLC(dev) && !HAS_SNOOP(dev)) {
+ if (!HAS_LLC(dev_priv) && !HAS_SNOOP(dev_priv)) {
/* We cannot support coherent userptr objects on hw without
* LLC and broken snooping.
*/
@@ -816,7 +808,7 @@ i915_gem_userptr_ioctl(struct drm_device *dev, void *data, struct drm_file *file
ret = drm_gem_handle_create(file, &obj->base, &handle);
/* drop reference from allocate - handle holds it now */
- i915_gem_object_put_unlocked(obj);
+ i915_gem_object_put(obj);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
index 334f15df7c8d..ae84aa4b1467 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.c
+++ b/drivers/gpu/drm/i915/i915_gpu_error.c
@@ -28,6 +28,8 @@
*/
#include <generated/utsrelease.h>
+#include <linux/stop_machine.h>
+#include <linux/zlib.h>
#include "i915_drv.h"
static const char *engine_str(int engine)
@@ -172,6 +174,110 @@ static void i915_error_puts(struct drm_i915_error_state_buf *e,
#define err_printf(e, ...) i915_error_printf(e, __VA_ARGS__)
#define err_puts(e, s) i915_error_puts(e, s)
+#ifdef CONFIG_DRM_I915_COMPRESS_ERROR
+
+static bool compress_init(struct z_stream_s *zstream)
+{
+ memset(zstream, 0, sizeof(*zstream));
+
+ zstream->workspace =
+ kmalloc(zlib_deflate_workspacesize(MAX_WBITS, MAX_MEM_LEVEL),
+ GFP_ATOMIC | __GFP_NOWARN);
+ if (!zstream->workspace)
+ return false;
+
+ if (zlib_deflateInit(zstream, Z_DEFAULT_COMPRESSION) != Z_OK) {
+ kfree(zstream->workspace);
+ return false;
+ }
+
+ return true;
+}
+
+static int compress_page(struct z_stream_s *zstream,
+ void *src,
+ struct drm_i915_error_object *dst)
+{
+ zstream->next_in = src;
+ zstream->avail_in = PAGE_SIZE;
+
+ do {
+ if (zstream->avail_out == 0) {
+ unsigned long page;
+
+ page = __get_free_page(GFP_ATOMIC | __GFP_NOWARN);
+ if (!page)
+ return -ENOMEM;
+
+ dst->pages[dst->page_count++] = (void *)page;
+
+ zstream->next_out = (void *)page;
+ zstream->avail_out = PAGE_SIZE;
+ }
+
+ if (zlib_deflate(zstream, Z_SYNC_FLUSH) != Z_OK)
+ return -EIO;
+ } while (zstream->avail_in);
+
+ /* Fallback to uncompressed if we increase size? */
+ if (0 && zstream->total_out > zstream->total_in)
+ return -E2BIG;
+
+ return 0;
+}
+
+static void compress_fini(struct z_stream_s *zstream,
+ struct drm_i915_error_object *dst)
+{
+ if (dst) {
+ zlib_deflate(zstream, Z_FINISH);
+ dst->unused = zstream->avail_out;
+ }
+
+ zlib_deflateEnd(zstream);
+ kfree(zstream->workspace);
+}
+
+static void err_compression_marker(struct drm_i915_error_state_buf *m)
+{
+ err_puts(m, ":");
+}
+
+#else
+
+static bool compress_init(struct z_stream_s *zstream)
+{
+ return true;
+}
+
+static int compress_page(struct z_stream_s *zstream,
+ void *src,
+ struct drm_i915_error_object *dst)
+{
+ unsigned long page;
+
+ page = __get_free_page(GFP_ATOMIC | __GFP_NOWARN);
+ if (!page)
+ return -ENOMEM;
+
+ dst->pages[dst->page_count++] =
+ memcpy((void *)page, src, PAGE_SIZE);
+
+ return 0;
+}
+
+static void compress_fini(struct z_stream_s *zstream,
+ struct drm_i915_error_object *dst)
+{
+}
+
+static void err_compression_marker(struct drm_i915_error_state_buf *m)
+{
+ err_puts(m, "~");
+}
+
+#endif
+
static void print_error_buffers(struct drm_i915_error_state_buf *m,
const char *name,
struct drm_i915_error_buffer *err,
@@ -228,13 +334,57 @@ static const char *hangcheck_action_to_str(enum intel_engine_hangcheck_action a)
return "unknown";
}
+static void error_print_instdone(struct drm_i915_error_state_buf *m,
+ struct drm_i915_error_engine *ee)
+{
+ int slice;
+ int subslice;
+
+ err_printf(m, " INSTDONE: 0x%08x\n",
+ ee->instdone.instdone);
+
+ if (ee->engine_id != RCS || INTEL_GEN(m->i915) <= 3)
+ return;
+
+ err_printf(m, " SC_INSTDONE: 0x%08x\n",
+ ee->instdone.slice_common);
+
+ if (INTEL_GEN(m->i915) <= 6)
+ return;
+
+ for_each_instdone_slice_subslice(m->i915, slice, subslice)
+ err_printf(m, " SAMPLER_INSTDONE[%d][%d]: 0x%08x\n",
+ slice, subslice,
+ ee->instdone.sampler[slice][subslice]);
+
+ for_each_instdone_slice_subslice(m->i915, slice, subslice)
+ err_printf(m, " ROW_INSTDONE[%d][%d]: 0x%08x\n",
+ slice, subslice,
+ ee->instdone.row[slice][subslice]);
+}
+
+static void error_print_request(struct drm_i915_error_state_buf *m,
+ const char *prefix,
+ struct drm_i915_error_request *erq)
+{
+ if (!erq->seqno)
+ return;
+
+ err_printf(m, "%s pid %d, seqno %8x:%08x, emitted %dms ago, head %08x, tail %08x\n",
+ prefix, erq->pid,
+ erq->context, erq->seqno,
+ jiffies_to_msecs(jiffies - erq->jiffies),
+ erq->head, erq->tail);
+}
+
static void error_print_engine(struct drm_i915_error_state_buf *m,
struct drm_i915_error_engine *ee)
{
err_printf(m, "%s command stream:\n", engine_str(ee->engine_id));
err_printf(m, " START: 0x%08x\n", ee->start);
- err_printf(m, " HEAD: 0x%08x\n", ee->head);
- err_printf(m, " TAIL: 0x%08x\n", ee->tail);
+ err_printf(m, " HEAD: 0x%08x [0x%08x]\n", ee->head, ee->rq_head);
+ err_printf(m, " TAIL: 0x%08x [0x%08x, 0x%08x]\n",
+ ee->tail, ee->rq_post, ee->rq_tail);
err_printf(m, " CTL: 0x%08x\n", ee->ctl);
err_printf(m, " MODE: 0x%08x\n", ee->mode);
err_printf(m, " HWS: 0x%08x\n", ee->hws);
@@ -242,7 +392,9 @@ static void error_print_engine(struct drm_i915_error_state_buf *m,
(u32)(ee->acthd>>32), (u32)ee->acthd);
err_printf(m, " IPEIR: 0x%08x\n", ee->ipeir);
err_printf(m, " IPEHR: 0x%08x\n", ee->ipehr);
- err_printf(m, " INSTDONE: 0x%08x\n", ee->instdone);
+
+ error_print_instdone(m, ee);
+
if (ee->batchbuffer) {
u64 start = ee->batchbuffer->gtt_offset;
u64 end = start + ee->batchbuffer->gtt_size;
@@ -263,17 +415,13 @@ static void error_print_engine(struct drm_i915_error_state_buf *m,
if (INTEL_GEN(m->i915) >= 6) {
err_printf(m, " RC PSMI: 0x%08x\n", ee->rc_psmi);
err_printf(m, " FAULT_REG: 0x%08x\n", ee->fault_reg);
- err_printf(m, " SYNC_0: 0x%08x [last synced 0x%08x]\n",
- ee->semaphore_mboxes[0],
- ee->semaphore_seqno[0]);
- err_printf(m, " SYNC_1: 0x%08x [last synced 0x%08x]\n",
- ee->semaphore_mboxes[1],
- ee->semaphore_seqno[1]);
- if (HAS_VEBOX(m->i915)) {
- err_printf(m, " SYNC_2: 0x%08x [last synced 0x%08x]\n",
- ee->semaphore_mboxes[2],
- ee->semaphore_seqno[2]);
- }
+ err_printf(m, " SYNC_0: 0x%08x\n",
+ ee->semaphore_mboxes[0]);
+ err_printf(m, " SYNC_1: 0x%08x\n",
+ ee->semaphore_mboxes[1]);
+ if (HAS_VEBOX(m->i915))
+ err_printf(m, " SYNC_2: 0x%08x\n",
+ ee->semaphore_mboxes[2]);
}
if (USES_PPGTT(m->i915)) {
err_printf(m, " GFX_MODE: 0x%08x\n", ee->vm_info.gfx_mode);
@@ -296,6 +444,8 @@ static void error_print_engine(struct drm_i915_error_state_buf *m,
err_printf(m, " hangcheck: %s [%d]\n",
hangcheck_action_to_str(ee->hangcheck_action),
ee->hangcheck_score);
+ error_print_request(m, " ELSP[0]: ", &ee->execlist[0]);
+ error_print_request(m, " ELSP[1]: ", &ee->execlist[1]);
}
void i915_error_printf(struct drm_i915_error_state_buf *e, const char *f, ...)
@@ -307,40 +457,83 @@ void i915_error_printf(struct drm_i915_error_state_buf *e, const char *f, ...)
va_end(args);
}
+static int
+ascii85_encode_len(int len)
+{
+ return DIV_ROUND_UP(len, 4);
+}
+
+static bool
+ascii85_encode(u32 in, char *out)
+{
+ int i;
+
+ if (in == 0)
+ return false;
+
+ out[5] = '\0';
+ for (i = 5; i--; ) {
+ out[i] = '!' + in % 85;
+ in /= 85;
+ }
+
+ return true;
+}
+
static void print_error_obj(struct drm_i915_error_state_buf *m,
+ struct intel_engine_cs *engine,
+ const char *name,
struct drm_i915_error_object *obj)
{
- int page, offset, elt;
+ char out[6];
+ int page;
+
+ if (!obj)
+ return;
- for (page = offset = 0; page < obj->page_count; page++) {
- for (elt = 0; elt < PAGE_SIZE/4; elt++) {
- err_printf(m, "%08x : %08x\n", offset,
- obj->pages[page][elt]);
- offset += 4;
+ if (name) {
+ err_printf(m, "%s --- %s = 0x%08x %08x\n",
+ engine ? engine->name : "global", name,
+ upper_32_bits(obj->gtt_offset),
+ lower_32_bits(obj->gtt_offset));
+ }
+
+ err_compression_marker(m);
+ for (page = 0; page < obj->page_count; page++) {
+ int i, len;
+
+ len = PAGE_SIZE;
+ if (page == obj->page_count - 1)
+ len -= obj->unused;
+ len = ascii85_encode_len(len);
+
+ for (i = 0; i < len; i++) {
+ if (ascii85_encode(obj->pages[page][i], out))
+ err_puts(m, out);
+ else
+ err_puts(m, "z");
}
}
+ err_puts(m, "\n");
}
static void err_print_capabilities(struct drm_i915_error_state_buf *m,
const struct intel_device_info *info)
{
#define PRINT_FLAG(x) err_printf(m, #x ": %s\n", yesno(info->x))
-#define SEP_SEMICOLON ;
- DEV_INFO_FOR_EACH_FLAG(PRINT_FLAG, SEP_SEMICOLON);
+ DEV_INFO_FOR_EACH_FLAG(PRINT_FLAG);
#undef PRINT_FLAG
-#undef SEP_SEMICOLON
}
int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
const struct i915_error_state_file_priv *error_priv)
{
- struct drm_device *dev = error_priv->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(error_priv->dev);
struct pci_dev *pdev = dev_priv->drm.pdev;
struct drm_i915_error_state *error = error_priv->error;
struct drm_i915_error_object *obj;
- int i, j, offset, elt;
int max_hangcheck_score;
+ int i, j;
if (!error) {
err_printf(m, "no error state collected\n");
@@ -348,9 +541,13 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
}
err_printf(m, "%s\n", error->error_msg);
- err_printf(m, "Time: %ld s %ld us\n", error->time.tv_sec,
- error->time.tv_usec);
err_printf(m, "Kernel: " UTS_RELEASE "\n");
+ err_printf(m, "Time: %ld s %ld us\n",
+ error->time.tv_sec, error->time.tv_usec);
+ err_printf(m, "Boottime: %ld s %ld us\n",
+ error->boottime.tv_sec, error->boottime.tv_usec);
+ err_printf(m, "Uptime: %ld s %ld us\n",
+ error->uptime.tv_sec, error->uptime.tv_usec);
err_print_capabilities(m, &error->device_info);
max_hangcheck_score = 0;
for (i = 0; i < ARRAY_SIZE(error->engine); i++) {
@@ -375,7 +572,7 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
pdev->subsystem_device);
err_printf(m, "IOMMU enabled?: %d\n", error->iommu);
- if (HAS_CSR(dev)) {
+ if (HAS_CSR(dev_priv)) {
struct intel_csr *csr = &dev_priv->csr;
err_printf(m, "DMC loaded: %s\n",
@@ -387,11 +584,11 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
err_printf(m, "EIR: 0x%08x\n", error->eir);
err_printf(m, "IER: 0x%08x\n", error->ier);
- if (INTEL_INFO(dev)->gen >= 8) {
+ if (INTEL_GEN(dev_priv) >= 8) {
for (i = 0; i < 4; i++)
err_printf(m, "GTIER gt %d: 0x%08x\n", i,
error->gtier[i]);
- } else if (HAS_PCH_SPLIT(dev) || IS_VALLEYVIEW(dev))
+ } else if (HAS_PCH_SPLIT(dev_priv) || IS_VALLEYVIEW(dev_priv))
err_printf(m, "GTIER: 0x%08x\n", error->gtier[0]);
err_printf(m, "PGTBL_ER: 0x%08x\n", error->pgtbl_er);
err_printf(m, "FORCEWAKE: 0x%08x\n", error->forcewake);
@@ -402,21 +599,17 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
for (i = 0; i < dev_priv->num_fence_regs; i++)
err_printf(m, " fence[%d] = %08llx\n", i, error->fence[i]);
- for (i = 0; i < ARRAY_SIZE(error->extra_instdone); i++)
- err_printf(m, " INSTDONE_%d: 0x%08x\n", i,
- error->extra_instdone[i]);
-
- if (INTEL_INFO(dev)->gen >= 6) {
+ if (INTEL_GEN(dev_priv) >= 6) {
err_printf(m, "ERROR: 0x%08x\n", error->error);
- if (INTEL_INFO(dev)->gen >= 8)
+ if (INTEL_GEN(dev_priv) >= 8)
err_printf(m, "FAULT_TLB_DATA: 0x%08x 0x%08x\n",
error->fault_data1, error->fault_data0);
err_printf(m, "DONE_REG: 0x%08x\n", error->done_reg);
}
- if (IS_GEN7(dev))
+ if (IS_GEN7(dev_priv))
err_printf(m, "ERR_INT: 0x%08x\n", error->err_int);
for (i = 0; i < ARRAY_SIZE(error->engine); i++) {
@@ -438,7 +631,7 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
len += scnprintf(buf + len, sizeof(buf), "%s%s",
first ? "" : ", ",
- dev_priv->engine[j].name);
+ dev_priv->engine[j]->name);
first = 0;
}
scnprintf(buf + len, sizeof(buf), ")");
@@ -456,7 +649,7 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
obj = ee->batchbuffer;
if (obj) {
- err_puts(m, dev_priv->engine[i].name);
+ err_puts(m, dev_priv->engine[i]->name);
if (ee->pid != -1)
err_printf(m, " (submitted by %s [%d])",
ee->comm,
@@ -464,37 +657,23 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
err_printf(m, " --- gtt_offset = 0x%08x %08x\n",
upper_32_bits(obj->gtt_offset),
lower_32_bits(obj->gtt_offset));
- print_error_obj(m, obj);
- }
-
- obj = ee->wa_batchbuffer;
- if (obj) {
- err_printf(m, "%s (w/a) --- gtt_offset = 0x%08x\n",
- dev_priv->engine[i].name,
- lower_32_bits(obj->gtt_offset));
- print_error_obj(m, obj);
+ print_error_obj(m, dev_priv->engine[i], NULL, obj);
}
if (ee->num_requests) {
err_printf(m, "%s --- %d requests\n",
- dev_priv->engine[i].name,
+ dev_priv->engine[i]->name,
ee->num_requests);
- for (j = 0; j < ee->num_requests; j++) {
- err_printf(m, " pid %d, seqno 0x%08x, emitted %ld, head 0x%08x, tail 0x%08x\n",
- ee->requests[j].pid,
- ee->requests[j].seqno,
- ee->requests[j].jiffies,
- ee->requests[j].head,
- ee->requests[j].tail);
- }
+ for (j = 0; j < ee->num_requests; j++)
+ error_print_request(m, " ", &ee->requests[j]);
}
if (IS_ERR(ee->waiters)) {
err_printf(m, "%s --- ? waiters [unable to acquire spinlock]\n",
- dev_priv->engine[i].name);
+ dev_priv->engine[i]->name);
} else if (ee->num_waiters) {
err_printf(m, "%s --- %d waiters\n",
- dev_priv->engine[i].name,
+ dev_priv->engine[i]->name,
ee->num_waiters);
for (j = 0; j < ee->num_waiters; j++) {
err_printf(m, " seqno 0x%08x for %s [%d]\n",
@@ -504,83 +683,31 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
}
}
- if ((obj = ee->ringbuffer)) {
- err_printf(m, "%s --- ringbuffer = 0x%08x\n",
- dev_priv->engine[i].name,
- lower_32_bits(obj->gtt_offset));
- print_error_obj(m, obj);
- }
+ print_error_obj(m, dev_priv->engine[i],
+ "ringbuffer", ee->ringbuffer);
- if ((obj = ee->hws_page)) {
- u64 hws_offset = obj->gtt_offset;
- u32 *hws_page = &obj->pages[0][0];
+ print_error_obj(m, dev_priv->engine[i],
+ "HW Status", ee->hws_page);
- if (i915.enable_execlists) {
- hws_offset += LRC_PPHWSP_PN * PAGE_SIZE;
- hws_page = &obj->pages[LRC_PPHWSP_PN][0];
- }
- err_printf(m, "%s --- HW Status = 0x%08llx\n",
- dev_priv->engine[i].name, hws_offset);
- offset = 0;
- for (elt = 0; elt < PAGE_SIZE/16; elt += 4) {
- err_printf(m, "[%04x] %08x %08x %08x %08x\n",
- offset,
- hws_page[elt],
- hws_page[elt+1],
- hws_page[elt+2],
- hws_page[elt+3]);
- offset += 16;
- }
- }
+ print_error_obj(m, dev_priv->engine[i],
+ "HW context", ee->ctx);
- obj = ee->wa_ctx;
- if (obj) {
- u64 wa_ctx_offset = obj->gtt_offset;
- u32 *wa_ctx_page = &obj->pages[0][0];
- struct intel_engine_cs *engine = &dev_priv->engine[RCS];
- u32 wa_ctx_size = (engine->wa_ctx.indirect_ctx.size +
- engine->wa_ctx.per_ctx.size);
-
- err_printf(m, "%s --- WA ctx batch buffer = 0x%08llx\n",
- dev_priv->engine[i].name, wa_ctx_offset);
- offset = 0;
- for (elt = 0; elt < wa_ctx_size; elt += 4) {
- err_printf(m, "[%04x] %08x %08x %08x %08x\n",
- offset,
- wa_ctx_page[elt + 0],
- wa_ctx_page[elt + 1],
- wa_ctx_page[elt + 2],
- wa_ctx_page[elt + 3]);
- offset += 16;
- }
- }
+ print_error_obj(m, dev_priv->engine[i],
+ "WA context", ee->wa_ctx);
- if ((obj = ee->ctx)) {
- err_printf(m, "%s --- HW Context = 0x%08x\n",
- dev_priv->engine[i].name,
- lower_32_bits(obj->gtt_offset));
- print_error_obj(m, obj);
- }
+ print_error_obj(m, dev_priv->engine[i],
+ "WA batchbuffer", ee->wa_batchbuffer);
}
- if ((obj = error->semaphore)) {
- err_printf(m, "Semaphore page = 0x%08x\n",
- lower_32_bits(obj->gtt_offset));
- for (elt = 0; elt < PAGE_SIZE/16; elt += 4) {
- err_printf(m, "[%04x] %08x %08x %08x %08x\n",
- elt * 4,
- obj->pages[0][elt],
- obj->pages[0][elt+1],
- obj->pages[0][elt+2],
- obj->pages[0][elt+3]);
- }
- }
+ print_error_obj(m, NULL, "Semaphores", error->semaphore);
+
+ print_error_obj(m, NULL, "GuC log buffer", error->guc_log);
if (error->overlay)
intel_overlay_print_error_state(m, error->overlay);
if (error->display)
- intel_display_print_error_state(m, dev, error->display);
+ intel_display_print_error_state(m, dev_priv, error->display);
out:
if (m->bytes == 0 && m->err)
@@ -629,7 +756,7 @@ static void i915_error_object_free(struct drm_i915_error_object *obj)
return;
for (page = 0; page < obj->page_count; page++)
- kfree(obj->pages[page]);
+ free_page((unsigned long)obj->pages[page]);
kfree(obj);
}
@@ -656,6 +783,7 @@ static void i915_error_state_free(struct kref *error_ref)
}
i915_error_object_free(error->semaphore);
+ i915_error_object_free(error->guc_log);
for (i = 0; i < ARRAY_SIZE(error->active_bo); i++)
kfree(error->active_bo[i]);
@@ -667,104 +795,63 @@ static void i915_error_state_free(struct kref *error_ref)
}
static struct drm_i915_error_object *
-i915_error_object_create(struct drm_i915_private *dev_priv,
+i915_error_object_create(struct drm_i915_private *i915,
struct i915_vma *vma)
{
- struct i915_ggtt *ggtt = &dev_priv->ggtt;
- struct drm_i915_gem_object *src;
+ struct i915_ggtt *ggtt = &i915->ggtt;
+ const u64 slot = ggtt->error_capture.start;
struct drm_i915_error_object *dst;
- int num_pages;
- bool use_ggtt;
- int i = 0;
- u64 reloc_offset;
+ struct z_stream_s zstream;
+ unsigned long num_pages;
+ struct sgt_iter iter;
+ dma_addr_t dma;
if (!vma)
return NULL;
- src = vma->obj;
- if (!src->pages)
- return NULL;
-
- num_pages = src->base.size >> PAGE_SHIFT;
-
- dst = kmalloc(sizeof(*dst) + num_pages * sizeof(u32 *), GFP_ATOMIC);
+ num_pages = min_t(u64, vma->size, vma->obj->base.size) >> PAGE_SHIFT;
+ num_pages = DIV_ROUND_UP(10 * num_pages, 8); /* worstcase zlib growth */
+ dst = kmalloc(sizeof(*dst) + num_pages * sizeof(u32 *),
+ GFP_ATOMIC | __GFP_NOWARN);
if (!dst)
return NULL;
dst->gtt_offset = vma->node.start;
dst->gtt_size = vma->node.size;
+ dst->page_count = 0;
+ dst->unused = 0;
- reloc_offset = dst->gtt_offset;
- use_ggtt = (src->cache_level == I915_CACHE_NONE &&
- (vma->flags & I915_VMA_GLOBAL_BIND) &&
- reloc_offset + num_pages * PAGE_SIZE <= ggtt->mappable_end);
-
- /* Cannot access stolen address directly, try to use the aperture */
- if (src->stolen) {
- use_ggtt = true;
-
- if (!(vma->flags & I915_VMA_GLOBAL_BIND))
- goto unwind;
-
- reloc_offset = vma->node.start;
- if (reloc_offset + num_pages * PAGE_SIZE > ggtt->mappable_end)
- goto unwind;
+ if (!compress_init(&zstream)) {
+ kfree(dst);
+ return NULL;
}
- /* Cannot access snooped pages through the aperture */
- if (use_ggtt && src->cache_level != I915_CACHE_NONE &&
- !HAS_LLC(dev_priv))
- goto unwind;
-
- dst->page_count = num_pages;
- while (num_pages--) {
- unsigned long flags;
- void *d;
-
- d = kmalloc(PAGE_SIZE, GFP_ATOMIC);
- if (d == NULL)
- goto unwind;
-
- local_irq_save(flags);
- if (use_ggtt) {
- void __iomem *s;
-
- /* Simply ignore tiling or any overlapping fence.
- * It's part of the error state, and this hopefully
- * captures what the GPU read.
- */
-
- s = io_mapping_map_atomic_wc(&ggtt->mappable,
- reloc_offset);
- memcpy_fromio(d, s, PAGE_SIZE);
- io_mapping_unmap_atomic(s);
- } else {
- struct page *page;
- void *s;
-
- page = i915_gem_object_get_page(src, i);
-
- drm_clflush_pages(&page, 1);
+ for_each_sgt_dma(dma, iter, vma->pages) {
+ void __iomem *s;
+ int ret;
- s = kmap_atomic(page);
- memcpy(d, s, PAGE_SIZE);
- kunmap_atomic(s);
+ ggtt->base.insert_page(&ggtt->base, dma, slot,
+ I915_CACHE_NONE, 0);
- drm_clflush_pages(&page, 1);
- }
- local_irq_restore(flags);
+ s = io_mapping_map_atomic_wc(&ggtt->mappable, slot);
+ ret = compress_page(&zstream, (void __force *)s, dst);
+ io_mapping_unmap_atomic(s);
- dst->pages[i++] = d;
- reloc_offset += PAGE_SIZE;
+ if (ret)
+ goto unwind;
}
-
- return dst;
+ goto out;
unwind:
- while (i--)
- kfree(dst->pages[i]);
+ while (dst->page_count--)
+ free_page((unsigned long)dst->pages[dst->page_count]);
kfree(dst);
- return NULL;
+ dst = NULL;
+
+out:
+ compress_fini(&zstream, dst);
+ ggtt->base.clear_range(&ggtt->base, slot, PAGE_SIZE);
+ return dst;
}
/* The error capture is special as tries to run underneath the normal
@@ -773,16 +860,19 @@ unwind:
static inline uint32_t
__active_get_seqno(struct i915_gem_active *active)
{
- return i915_gem_request_get_seqno(__i915_gem_active_peek(active));
+ struct drm_i915_gem_request *request;
+
+ request = __i915_gem_active_peek(active);
+ return request ? request->global_seqno : 0;
}
static inline int
__active_get_engine_id(struct i915_gem_active *active)
{
- struct intel_engine_cs *engine;
+ struct drm_i915_gem_request *request;
- engine = i915_gem_request_get_engine(__i915_gem_active_peek(active));
- return engine ? engine->id : -1;
+ request = __i915_gem_active_peek(active);
+ return request ? request->engine->id : -1;
}
static void capture_bo(struct drm_i915_error_buffer *err,
@@ -795,17 +885,17 @@ static void capture_bo(struct drm_i915_error_buffer *err,
err->name = obj->base.name;
for (i = 0; i < I915_NUM_ENGINES; i++)
- err->rseqno[i] = __active_get_seqno(&obj->last_read[i]);
- err->wseqno = __active_get_seqno(&obj->last_write);
- err->engine = __active_get_engine_id(&obj->last_write);
+ err->rseqno[i] = __active_get_seqno(&vma->last_read[i]);
+ err->wseqno = __active_get_seqno(&obj->frontbuffer_write);
+ err->engine = __active_get_engine_id(&obj->frontbuffer_write);
err->gtt_offset = vma->node.start;
err->read_domains = obj->base.read_domains;
err->write_domain = obj->base.write_domain;
err->fence_reg = vma->fence ? vma->fence->id : -1;
err->tiling = i915_gem_object_get_tiling(obj);
- err->dirty = obj->dirty;
- err->purgeable = obj->madv != I915_MADV_WILLNEED;
+ err->dirty = obj->mm.dirty;
+ err->purgeable = obj->mm.madv != I915_MADV_WILLNEED;
err->userptr = obj->userptr.mm != NULL;
err->cache_level = obj->cache_level;
}
@@ -855,7 +945,8 @@ static uint32_t i915_error_generate_code(struct drm_i915_private *dev_priv,
if (engine_id)
*engine_id = i;
- return error->engine[i].ipehr ^ error->engine[i].instdone;
+ return error->engine[i].ipehr ^
+ error->engine[i].instdone.instdone;
}
}
@@ -879,6 +970,26 @@ static void i915_gem_record_fences(struct drm_i915_private *dev_priv,
}
}
+static inline u32
+gen8_engine_sync_index(struct intel_engine_cs *engine,
+ struct intel_engine_cs *other)
+{
+ int idx;
+
+ /*
+ * rcs -> 0 = vcs, 1 = bcs, 2 = vecs, 3 = vcs2;
+ * vcs -> 0 = bcs, 1 = vecs, 2 = vcs2, 3 = rcs;
+ * bcs -> 0 = vecs, 1 = vcs2. 2 = rcs, 3 = vcs;
+ * vecs -> 0 = vcs2, 1 = rcs, 2 = vcs, 3 = bcs;
+ * vcs2 -> 0 = rcs, 1 = vcs, 2 = bcs, 3 = vecs;
+ */
+
+ idx = (other - engine) - 1;
+ if (idx < 0)
+ idx += I915_NUM_ENGINES;
+
+ return idx;
+}
static void gen8_record_semaphore_state(struct drm_i915_error_state *error,
struct intel_engine_cs *engine,
@@ -891,7 +1002,7 @@ static void gen8_record_semaphore_state(struct drm_i915_error_state *error,
if (!error->semaphore)
return;
- for_each_engine_id(to, dev_priv, id) {
+ for_each_engine(to, dev_priv, id) {
int idx;
u16 signal_offset;
u32 *tmp;
@@ -902,10 +1013,9 @@ static void gen8_record_semaphore_state(struct drm_i915_error_state *error,
signal_offset =
(GEN8_SIGNAL_OFFSET(engine, id) & (PAGE_SIZE - 1)) / 4;
tmp = error->semaphore->pages[0];
- idx = intel_engine_sync_index(engine, to);
+ idx = gen8_engine_sync_index(engine, to);
ee->semaphore_mboxes[idx] = tmp[signal_offset];
- ee->semaphore_seqno[idx] = engine->semaphore.sync_seqno[idx];
}
}
@@ -916,14 +1026,9 @@ static void gen6_record_semaphore_state(struct intel_engine_cs *engine,
ee->semaphore_mboxes[0] = I915_READ(RING_SYNC_0(engine->mmio_base));
ee->semaphore_mboxes[1] = I915_READ(RING_SYNC_1(engine->mmio_base));
- ee->semaphore_seqno[0] = engine->semaphore.sync_seqno[0];
- ee->semaphore_seqno[1] = engine->semaphore.sync_seqno[1];
-
- if (HAS_VEBOX(dev_priv)) {
+ if (HAS_VEBOX(dev_priv))
ee->semaphore_mboxes[2] =
I915_READ(RING_SYNC_2(engine->mmio_base));
- ee->semaphore_seqno[2] = engine->semaphore.sync_seqno[2];
- }
}
static void error_record_engine_waiters(struct intel_engine_cs *engine,
@@ -940,7 +1045,7 @@ static void error_record_engine_waiters(struct intel_engine_cs *engine,
if (RB_EMPTY_ROOT(&b->waiters))
return;
- if (!spin_trylock(&b->lock)) {
+ if (!spin_trylock_irq(&b->lock)) {
ee->waiters = ERR_PTR(-EDEADLK);
return;
}
@@ -948,7 +1053,7 @@ static void error_record_engine_waiters(struct intel_engine_cs *engine,
count = 0;
for (rb = rb_first(&b->waiters); rb != NULL; rb = rb_next(rb))
count++;
- spin_unlock(&b->lock);
+ spin_unlock_irq(&b->lock);
waiter = NULL;
if (count)
@@ -958,7 +1063,7 @@ static void error_record_engine_waiters(struct intel_engine_cs *engine,
if (!waiter)
return;
- if (!spin_trylock(&b->lock)) {
+ if (!spin_trylock_irq(&b->lock)) {
kfree(waiter);
ee->waiters = ERR_PTR(-EDEADLK);
return;
@@ -976,7 +1081,7 @@ static void error_record_engine_waiters(struct intel_engine_cs *engine,
if (++ee->num_waiters == count)
break;
}
- spin_unlock(&b->lock);
+ spin_unlock_irq(&b->lock);
}
static void error_record_engine_registers(struct drm_i915_error_state *error,
@@ -998,7 +1103,6 @@ static void error_record_engine_registers(struct drm_i915_error_state *error,
ee->faddr = I915_READ(RING_DMA_FADD(engine->mmio_base));
ee->ipeir = I915_READ(RING_IPEIR(engine->mmio_base));
ee->ipehr = I915_READ(RING_IPEHR(engine->mmio_base));
- ee->instdone = I915_READ(RING_INSTDONE(engine->mmio_base));
ee->instps = I915_READ(RING_INSTPS(engine->mmio_base));
ee->bbaddr = I915_READ(RING_BBADDR(engine->mmio_base));
if (INTEL_GEN(dev_priv) >= 8) {
@@ -1010,14 +1114,15 @@ static void error_record_engine_registers(struct drm_i915_error_state *error,
ee->faddr = I915_READ(DMA_FADD_I8XX);
ee->ipeir = I915_READ(IPEIR);
ee->ipehr = I915_READ(IPEHR);
- ee->instdone = I915_READ(GEN2_INSTDONE);
}
+ intel_engine_get_instdone(engine, &ee->instdone);
+
ee->waiting = intel_engine_has_waiter(engine);
ee->instpm = I915_READ(RING_INSTPM(engine->mmio_base));
ee->acthd = intel_engine_get_active_head(engine);
ee->seqno = intel_engine_get_seqno(engine);
- ee->last_seqno = engine->last_submitted_seqno;
+ ee->last_seqno = intel_engine_last_submit(engine);
ee->start = I915_READ_START(engine);
ee->head = I915_READ_HEAD(engine);
ee->tail = I915_READ_TAIL(engine);
@@ -1079,6 +1184,20 @@ static void error_record_engine_registers(struct drm_i915_error_state *error,
}
}
+static void record_request(struct drm_i915_gem_request *request,
+ struct drm_i915_error_request *erq)
+{
+ erq->context = request->ctx->hw_id;
+ erq->seqno = request->global_seqno;
+ erq->jiffies = request->emitted_jiffies;
+ erq->head = request->head;
+ erq->tail = request->tail;
+
+ rcu_read_lock();
+ erq->pid = request->ctx->pid ? pid_nr(request->ctx->pid) : 0;
+ rcu_read_unlock();
+}
+
static void engine_record_requests(struct intel_engine_cs *engine,
struct drm_i915_gem_request *first,
struct drm_i915_error_engine *ee)
@@ -1088,7 +1207,7 @@ static void engine_record_requests(struct intel_engine_cs *engine,
count = 0;
request = first;
- list_for_each_entry_from(request, &engine->request_list, link)
+ list_for_each_entry_from(request, &engine->timeline->requests, link)
count++;
if (!count)
return;
@@ -1101,9 +1220,7 @@ static void engine_record_requests(struct intel_engine_cs *engine,
count = 0;
request = first;
- list_for_each_entry_from(request, &engine->request_list, link) {
- struct drm_i915_error_request *erq;
-
+ list_for_each_entry_from(request, &engine->timeline->requests, link) {
if (count >= ee->num_requests) {
/*
* If the ring request list was changed in
@@ -1123,19 +1240,22 @@ static void engine_record_requests(struct intel_engine_cs *engine,
break;
}
- erq = &ee->requests[count++];
- erq->seqno = request->fence.seqno;
- erq->jiffies = request->emitted_jiffies;
- erq->head = request->head;
- erq->tail = request->tail;
-
- rcu_read_lock();
- erq->pid = request->ctx->pid ? pid_nr(request->ctx->pid) : 0;
- rcu_read_unlock();
+ record_request(request, &ee->requests[count++]);
}
ee->num_requests = count;
}
+static void error_record_engine_execlists(struct intel_engine_cs *engine,
+ struct drm_i915_error_engine *ee)
+{
+ unsigned int n;
+
+ for (n = 0; n < ARRAY_SIZE(engine->execlist_port); n++)
+ if (engine->execlist_port[n].request)
+ record_request(engine->execlist_port[n].request,
+ &ee->execlist[n]);
+}
+
static void i915_gem_record_rings(struct drm_i915_private *dev_priv,
struct drm_i915_error_state *error)
{
@@ -1146,20 +1266,21 @@ static void i915_gem_record_rings(struct drm_i915_private *dev_priv,
i915_error_object_create(dev_priv, dev_priv->semaphore);
for (i = 0; i < I915_NUM_ENGINES; i++) {
- struct intel_engine_cs *engine = &dev_priv->engine[i];
+ struct intel_engine_cs *engine = dev_priv->engine[i];
struct drm_i915_error_engine *ee = &error->engine[i];
struct drm_i915_gem_request *request;
ee->pid = -1;
ee->engine_id = -1;
- if (!intel_engine_initialized(engine))
+ if (!engine)
continue;
ee->engine_id = i;
error_record_engine_registers(error, engine, ee);
error_record_engine_waiters(engine, ee);
+ error_record_engine_execlists(engine, ee);
request = i915_gem_find_active_request(engine);
if (request) {
@@ -1202,6 +1323,10 @@ static void i915_gem_record_rings(struct drm_i915_private *dev_priv,
error->simulated |=
request->ctx->flags & CONTEXT_NO_ERROR_CAPTURE;
+ ee->rq_head = request->head;
+ ee->rq_post = request->postfix;
+ ee->rq_tail = request->tail;
+
ring = request->ring;
ee->cpu_ring_head = ring->head;
ee->cpu_ring_tail = ring->tail;
@@ -1302,11 +1427,21 @@ static void i915_capture_pinned_buffers(struct drm_i915_private *dev_priv,
error->pinned_bo = bo;
}
+static void i915_gem_capture_guc_log_buffer(struct drm_i915_private *dev_priv,
+ struct drm_i915_error_state *error)
+{
+ /* Capturing log buf contents won't be useful if logging was disabled */
+ if (!dev_priv->guc.log.vma || (i915.guc_log_level < 0))
+ return;
+
+ error->guc_log = i915_error_object_create(dev_priv,
+ dev_priv->guc.log.vma);
+}
+
/* Capture all registers which don't fit into another category. */
static void i915_capture_reg_state(struct drm_i915_private *dev_priv,
struct drm_i915_error_state *error)
{
- struct drm_device *dev = &dev_priv->drm;
int i;
/* General organization
@@ -1318,62 +1453,60 @@ static void i915_capture_reg_state(struct drm_i915_private *dev_priv,
*/
/* 1: Registers specific to a single generation */
- if (IS_VALLEYVIEW(dev)) {
+ if (IS_VALLEYVIEW(dev_priv)) {
error->gtier[0] = I915_READ(GTIER);
error->ier = I915_READ(VLV_IER);
error->forcewake = I915_READ_FW(FORCEWAKE_VLV);
}
- if (IS_GEN7(dev))
+ if (IS_GEN7(dev_priv))
error->err_int = I915_READ(GEN7_ERR_INT);
- if (INTEL_INFO(dev)->gen >= 8) {
+ if (INTEL_GEN(dev_priv) >= 8) {
error->fault_data0 = I915_READ(GEN8_FAULT_TLB_DATA0);
error->fault_data1 = I915_READ(GEN8_FAULT_TLB_DATA1);
}
- if (IS_GEN6(dev)) {
+ if (IS_GEN6(dev_priv)) {
error->forcewake = I915_READ_FW(FORCEWAKE);
error->gab_ctl = I915_READ(GAB_CTL);
error->gfx_mode = I915_READ(GFX_MODE);
}
/* 2: Registers which belong to multiple generations */
- if (INTEL_INFO(dev)->gen >= 7)
+ if (INTEL_GEN(dev_priv) >= 7)
error->forcewake = I915_READ_FW(FORCEWAKE_MT);
- if (INTEL_INFO(dev)->gen >= 6) {
+ if (INTEL_GEN(dev_priv) >= 6) {
error->derrmr = I915_READ(DERRMR);
error->error = I915_READ(ERROR_GEN6);
error->done_reg = I915_READ(DONE_REG);
}
/* 3: Feature specific registers */
- if (IS_GEN6(dev) || IS_GEN7(dev)) {
+ if (IS_GEN6(dev_priv) || IS_GEN7(dev_priv)) {
error->gam_ecochk = I915_READ(GAM_ECOCHK);
error->gac_eco = I915_READ(GAC_ECO_BITS);
}
/* 4: Everything else */
- if (HAS_HW_CONTEXTS(dev))
+ if (HAS_HW_CONTEXTS(dev_priv))
error->ccid = I915_READ(CCID);
- if (INTEL_INFO(dev)->gen >= 8) {
+ if (INTEL_GEN(dev_priv) >= 8) {
error->ier = I915_READ(GEN8_DE_MISC_IER);
for (i = 0; i < 4; i++)
error->gtier[i] = I915_READ(GEN8_GT_IER(i));
- } else if (HAS_PCH_SPLIT(dev)) {
+ } else if (HAS_PCH_SPLIT(dev_priv)) {
error->ier = I915_READ(DEIER);
error->gtier[0] = I915_READ(GTIER);
- } else if (IS_GEN2(dev)) {
+ } else if (IS_GEN2(dev_priv)) {
error->ier = I915_READ16(IER);
- } else if (!IS_VALLEYVIEW(dev)) {
+ } else if (!IS_VALLEYVIEW(dev_priv)) {
error->ier = I915_READ(IER);
}
error->eir = I915_READ(EIR);
error->pgtbl_er = I915_READ(PGTBL_ER);
-
- i915_get_extra_instdone(dev_priv, error->extra_instdone);
}
static void i915_error_capture_msg(struct drm_i915_private *dev_priv,
@@ -1418,6 +1551,32 @@ static void i915_capture_gen_state(struct drm_i915_private *dev_priv,
sizeof(error->device_info));
}
+static int capture(void *data)
+{
+ struct drm_i915_error_state *error = data;
+
+ i915_capture_gen_state(error->i915, error);
+ i915_capture_reg_state(error->i915, error);
+ i915_gem_record_fences(error->i915, error);
+ i915_gem_record_rings(error->i915, error);
+ i915_capture_active_buffers(error->i915, error);
+ i915_capture_pinned_buffers(error->i915, error);
+ i915_gem_capture_guc_log_buffer(error->i915, error);
+
+ do_gettimeofday(&error->time);
+ error->boottime = ktime_to_timeval(ktime_get_boottime());
+ error->uptime =
+ ktime_to_timeval(ktime_sub(ktime_get(),
+ error->i915->gt.last_init_time));
+
+ error->overlay = intel_overlay_capture_error_state(error->i915);
+ error->display = intel_display_capture_error_state(error->i915);
+
+ return 0;
+}
+
+#define DAY_AS_SECONDS(x) (24 * 60 * 60 * (x))
+
/**
* i915_capture_error_state - capture an error record for later analysis
* @dev: drm device
@@ -1435,6 +1594,9 @@ void i915_capture_error_state(struct drm_i915_private *dev_priv,
struct drm_i915_error_state *error;
unsigned long flags;
+ if (!i915.error_capture)
+ return;
+
if (READ_ONCE(dev_priv->gpu_error.first_error))
return;
@@ -1446,18 +1608,9 @@ void i915_capture_error_state(struct drm_i915_private *dev_priv,
}
kref_init(&error->ref);
+ error->i915 = dev_priv;
- i915_capture_gen_state(dev_priv, error);
- i915_capture_reg_state(dev_priv, error);
- i915_gem_record_fences(dev_priv, error);
- i915_gem_record_rings(dev_priv, error);
- i915_capture_active_buffers(dev_priv, error);
- i915_capture_pinned_buffers(dev_priv, error);
-
- do_gettimeofday(&error->time);
-
- error->overlay = intel_overlay_capture_error_state(dev_priv);
- error->display = intel_display_capture_error_state(dev_priv);
+ stop_machine(capture, error, NULL);
i915_error_capture_msg(dev_priv, error, engine_mask, error_msg);
DRM_INFO("%s\n", error->error_msg);
@@ -1476,7 +1629,8 @@ void i915_capture_error_state(struct drm_i915_private *dev_priv,
return;
}
- if (!warned) {
+ if (!warned &&
+ ktime_get_real_seconds() - DRIVER_TIMESTAMP < DAY_AS_SECONDS(180)) {
DRM_INFO("GPU hangs can indicate a bug anywhere in the entire gfx stack, including userspace.\n");
DRM_INFO("Please file a _new_ bug report on bugs.freedesktop.org against DRI -> DRM/Intel\n");
DRM_INFO("drm/i915 developers can then reassign to the right component if it's not a kernel issue.\n");
@@ -1497,7 +1651,6 @@ void i915_error_state_get(struct drm_device *dev,
if (error_priv->error)
kref_get(&error_priv->error->ref);
spin_unlock_irq(&dev_priv->gpu_error.lock);
-
}
void i915_error_state_put(struct i915_error_state_file_priv *error_priv)
@@ -1519,33 +1672,3 @@ void i915_destroy_error_state(struct drm_device *dev)
if (error)
kref_put(&error->ref, i915_error_state_free);
}
-
-const char *i915_cache_level_str(struct drm_i915_private *i915, int type)
-{
- switch (type) {
- case I915_CACHE_NONE: return " uncached";
- case I915_CACHE_LLC: return HAS_LLC(i915) ? " LLC" : " snooped";
- case I915_CACHE_L3_LLC: return " L3+LLC";
- case I915_CACHE_WT: return " WT";
- default: return "";
- }
-}
-
-/* NB: please notice the memset */
-void i915_get_extra_instdone(struct drm_i915_private *dev_priv,
- uint32_t *instdone)
-{
- memset(instdone, 0, sizeof(*instdone) * I915_NUM_INSTDONE_REG);
-
- if (IS_GEN2(dev_priv) || IS_GEN3(dev_priv))
- instdone[0] = I915_READ(GEN2_INSTDONE);
- else if (IS_GEN4(dev_priv) || IS_GEN5(dev_priv) || IS_GEN6(dev_priv)) {
- instdone[0] = I915_READ(RING_INSTDONE(RENDER_RING_BASE));
- instdone[1] = I915_READ(GEN4_INSTDONE1);
- } else if (INTEL_GEN(dev_priv) >= 7) {
- instdone[0] = I915_READ(RING_INSTDONE(RENDER_RING_BASE));
- instdone[1] = I915_READ(GEN7_SC_INSTDONE);
- instdone[2] = I915_READ(GEN7_SAMPLER_INSTDONE);
- instdone[3] = I915_READ(GEN7_ROW_INSTDONE);
- }
-}
diff --git a/drivers/gpu/drm/i915/i915_guc_submission.c b/drivers/gpu/drm/i915/i915_guc_submission.c
index 3106dcc06fe9..4462112725ef 100644
--- a/drivers/gpu/drm/i915/i915_guc_submission.c
+++ b/drivers/gpu/drm/i915/i915_guc_submission.c
@@ -23,6 +23,8 @@
*/
#include <linux/firmware.h>
#include <linux/circ_buf.h>
+#include <linux/debugfs.h>
+#include <linux/relay.h>
#include "i915_drv.h"
#include "intel_guc.h"
@@ -85,6 +87,7 @@ static int host2guc_action(struct intel_guc *guc, u32 *data, u32 len)
if (WARN_ON(len < 1 || len > 15))
return -EINVAL;
+ mutex_lock(&guc->action_lock);
intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
dev_priv->guc.action_count += 1;
@@ -123,6 +126,7 @@ static int host2guc_action(struct intel_guc *guc, u32 *data, u32 len)
dev_priv->guc.action_status = status;
intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
+ mutex_unlock(&guc->action_lock);
return ret;
}
@@ -170,6 +174,35 @@ static int host2guc_sample_forcewake(struct intel_guc *guc,
return host2guc_action(guc, data, ARRAY_SIZE(data));
}
+static int host2guc_logbuffer_flush_complete(struct intel_guc *guc)
+{
+ u32 data[1];
+
+ data[0] = HOST2GUC_ACTION_LOG_BUFFER_FILE_FLUSH_COMPLETE;
+
+ return host2guc_action(guc, data, 1);
+}
+
+static int host2guc_force_logbuffer_flush(struct intel_guc *guc)
+{
+ u32 data[2];
+
+ data[0] = HOST2GUC_ACTION_FORCE_LOG_BUFFER_FLUSH;
+ data[1] = 0;
+
+ return host2guc_action(guc, data, 2);
+}
+
+static int host2guc_logging_control(struct intel_guc *guc, u32 control_val)
+{
+ u32 data[2];
+
+ data[0] = HOST2GUC_ACTION_UK_LOG_ENABLE_LOGGING;
+ data[1] = control_val;
+
+ return host2guc_action(guc, data, 2);
+}
+
/*
* Initialise, update, or clear doorbell data shared with the GuC
*
@@ -187,7 +220,7 @@ static int guc_update_doorbell_id(struct intel_guc *guc,
struct guc_context_desc desc;
size_t len;
- doorbell = client->client_base + client->doorbell_offset;
+ doorbell = client->vaddr + client->doorbell_offset;
if (client->doorbell_id != GUC_INVALID_DOORBELL_ID &&
test_bit(client->doorbell_id, doorbell_bitmap)) {
@@ -293,7 +326,7 @@ static void guc_proc_desc_init(struct intel_guc *guc,
{
struct guc_process_desc *desc;
- desc = client->client_base + client->proc_desc_offset;
+ desc = client->vaddr + client->proc_desc_offset;
memset(desc, 0, sizeof(*desc));
@@ -380,8 +413,8 @@ static void guc_ctx_desc_init(struct intel_guc *guc,
gfx_addr = i915_ggtt_offset(client->vma);
desc.db_trigger_phy = sg_dma_address(client->vma->pages->sgl) +
client->doorbell_offset;
- desc.db_trigger_cpu = (uintptr_t)client->client_base +
- client->doorbell_offset;
+ desc.db_trigger_cpu =
+ (uintptr_t)client->vaddr + client->doorbell_offset;
desc.db_trigger_uk = gfx_addr + client->doorbell_offset;
desc.process_desc = gfx_addr + client->proc_desc_offset;
desc.wq_addr = gfx_addr + client->wq_offset;
@@ -432,7 +465,7 @@ int i915_guc_wq_reserve(struct drm_i915_gem_request *request)
{
const size_t wqi_size = sizeof(struct guc_wq_item);
struct i915_guc_client *gc = request->i915->guc.execbuf_client;
- struct guc_process_desc *desc = gc->client_base + gc->proc_desc_offset;
+ struct guc_process_desc *desc = gc->vaddr + gc->proc_desc_offset;
u32 freespace;
int ret;
@@ -473,10 +506,9 @@ static void guc_wq_item_append(struct i915_guc_client *gc,
struct intel_engine_cs *engine = rq->engine;
struct guc_process_desc *desc;
struct guc_wq_item *wqi;
- void *base;
- u32 freespace, tail, wq_off, wq_page;
+ u32 freespace, tail, wq_off;
- desc = gc->client_base + gc->proc_desc_offset;
+ desc = gc->vaddr + gc->proc_desc_offset;
/* Free space is guaranteed, see i915_guc_wq_reserve() above */
freespace = CIRC_SPACE(gc->wq_tail, desc->head, gc->wq_size);
@@ -506,10 +538,7 @@ static void guc_wq_item_append(struct i915_guc_client *gc,
gc->wq_rsvd -= wqi_size;
/* WQ starts from the page after doorbell / process_desc */
- wq_page = (wq_off + GUC_DB_SIZE) >> PAGE_SHIFT;
- wq_off &= PAGE_SIZE - 1;
- base = kmap_atomic(i915_gem_object_get_page(gc->vma->obj, wq_page));
- wqi = (struct guc_wq_item *)((char *)base + wq_off);
+ wqi = gc->vaddr + wq_off + GUC_DB_SIZE;
/* Now fill in the 4-word work queue item */
wqi->header = WQ_TYPE_INORDER |
@@ -521,9 +550,7 @@ static void guc_wq_item_append(struct i915_guc_client *gc,
wqi->context_desc = (u32)intel_lr_context_descriptor(rq->ctx, engine);
wqi->ring_tail = tail << WQ_RING_TAIL_SHIFT;
- wqi->fence_id = rq->fence.seqno;
-
- kunmap_atomic(base);
+ wqi->fence_id = rq->global_seqno;
}
static int guc_ring_doorbell(struct i915_guc_client *gc)
@@ -533,7 +560,7 @@ static int guc_ring_doorbell(struct i915_guc_client *gc)
union guc_doorbell_qw *db;
int attempt = 2, ret = -EAGAIN;
- desc = gc->client_base + gc->proc_desc_offset;
+ desc = gc->vaddr + gc->proc_desc_offset;
/* Update the tail so it is visible to GuC */
desc->tail = gc->wq_tail;
@@ -549,7 +576,7 @@ static int guc_ring_doorbell(struct i915_guc_client *gc)
db_exc.cookie = 1;
/* pointer of current doorbell cacheline */
- db = gc->client_base + gc->doorbell_offset;
+ db = gc->vaddr + gc->doorbell_offset;
while (attempt--) {
/* lets ring the doorbell */
@@ -601,13 +628,31 @@ static int guc_ring_doorbell(struct i915_guc_client *gc)
*/
static void i915_guc_submit(struct drm_i915_gem_request *rq)
{
- unsigned int engine_id = rq->engine->id;
+ struct drm_i915_private *dev_priv = rq->i915;
+ struct intel_engine_cs *engine = rq->engine;
+ unsigned int engine_id = engine->id;
struct intel_guc *guc = &rq->i915->guc;
struct i915_guc_client *client = guc->execbuf_client;
int b_ret;
+ /* We keep the previous context alive until we retire the following
+ * request. This ensures that any the context object is still pinned
+ * for any residual writes the HW makes into it on the context switch
+ * into the next object following the breadcrumb. Otherwise, we may
+ * retire the context too early.
+ */
+ rq->previous_context = engine->last_context;
+ engine->last_context = rq->ctx;
+
+ i915_gem_request_submit(rq);
+
spin_lock(&client->wq_lock);
guc_wq_item_append(client, rq);
+
+ /* WA to flush out the pending GMADR writes to ring buffer. */
+ if (i915_vma_is_map_and_fenceable(rq->ring->vma))
+ POSTING_READ_FW(GUC_STATUS);
+
b_ret = guc_ring_doorbell(client);
client->submissions[engine_id] += 1;
@@ -616,7 +661,7 @@ static void i915_guc_submit(struct drm_i915_gem_request *rq)
client->b_fail += 1;
guc->submissions[engine_id] += 1;
- guc->last_seqno[engine_id] = rq->fence.seqno;
+ guc->last_seqno[engine_id] = rq->global_seqno;
spin_unlock(&client->wq_lock);
}
@@ -685,14 +730,14 @@ guc_client_free(struct drm_i915_private *dev_priv,
* Be sure to drop any locks
*/
- if (client->client_base) {
+ if (client->vaddr) {
/*
* If we got as far as setting up a doorbell, make sure we
* shut it down before unmapping & deallocating the memory.
*/
guc_disable_doorbell(guc, client);
- kunmap(kmap_to_page(client->client_base));
+ i915_gem_object_unpin_map(client->vma->obj);
}
i915_vma_unpin_and_release(&client->vma);
@@ -781,6 +826,7 @@ guc_client_alloc(struct drm_i915_private *dev_priv,
struct i915_guc_client *client;
struct intel_guc *guc = &dev_priv->guc;
struct i915_vma *vma;
+ void *vaddr;
uint16_t db_id;
client = kzalloc(sizeof(*client), GFP_KERNEL);
@@ -807,7 +853,12 @@ guc_client_alloc(struct drm_i915_private *dev_priv,
/* We'll keep just the first (doorbell/proc) page permanently kmap'd. */
client->vma = vma;
- client->client_base = kmap(i915_vma_first_page(vma));
+
+ vaddr = i915_gem_object_pin_map(vma->obj, I915_MAP_WB);
+ if (IS_ERR(vaddr))
+ goto err;
+
+ client->vaddr = vaddr;
spin_lock_init(&client->wq_lock);
client->wq_offset = GUC_DB_SIZE;
@@ -847,15 +898,411 @@ err:
return NULL;
}
+/*
+ * Sub buffer switch callback. Called whenever relay has to switch to a new
+ * sub buffer, relay stays on the same sub buffer if 0 is returned.
+ */
+static int subbuf_start_callback(struct rchan_buf *buf,
+ void *subbuf,
+ void *prev_subbuf,
+ size_t prev_padding)
+{
+ /* Use no-overwrite mode by default, where relay will stop accepting
+ * new data if there are no empty sub buffers left.
+ * There is no strict synchronization enforced by relay between Consumer
+ * and Producer. In overwrite mode, there is a possibility of getting
+ * inconsistent/garbled data, the producer could be writing on to the
+ * same sub buffer from which Consumer is reading. This can't be avoided
+ * unless Consumer is fast enough and can always run in tandem with
+ * Producer.
+ */
+ if (relay_buf_full(buf))
+ return 0;
+
+ return 1;
+}
+
+/*
+ * file_create() callback. Creates relay file in debugfs.
+ */
+static struct dentry *create_buf_file_callback(const char *filename,
+ struct dentry *parent,
+ umode_t mode,
+ struct rchan_buf *buf,
+ int *is_global)
+{
+ struct dentry *buf_file;
+
+ /* This to enable the use of a single buffer for the relay channel and
+ * correspondingly have a single file exposed to User, through which
+ * it can collect the logs in order without any post-processing.
+ * Need to set 'is_global' even if parent is NULL for early logging.
+ */
+ *is_global = 1;
+
+ if (!parent)
+ return NULL;
+
+ /* Not using the channel filename passed as an argument, since for each
+ * channel relay appends the corresponding CPU number to the filename
+ * passed in relay_open(). This should be fine as relay just needs a
+ * dentry of the file associated with the channel buffer and that file's
+ * name need not be same as the filename passed as an argument.
+ */
+ buf_file = debugfs_create_file("guc_log", mode,
+ parent, buf, &relay_file_operations);
+ return buf_file;
+}
+
+/*
+ * file_remove() default callback. Removes relay file in debugfs.
+ */
+static int remove_buf_file_callback(struct dentry *dentry)
+{
+ debugfs_remove(dentry);
+ return 0;
+}
+
+/* relay channel callbacks */
+static struct rchan_callbacks relay_callbacks = {
+ .subbuf_start = subbuf_start_callback,
+ .create_buf_file = create_buf_file_callback,
+ .remove_buf_file = remove_buf_file_callback,
+};
+
+static void guc_log_remove_relay_file(struct intel_guc *guc)
+{
+ relay_close(guc->log.relay_chan);
+}
+
+static int guc_log_create_relay_channel(struct intel_guc *guc)
+{
+ struct drm_i915_private *dev_priv = guc_to_i915(guc);
+ struct rchan *guc_log_relay_chan;
+ size_t n_subbufs, subbuf_size;
+
+ /* Keep the size of sub buffers same as shared log buffer */
+ subbuf_size = guc->log.vma->obj->base.size;
+
+ /* Store up to 8 snapshots, which is large enough to buffer sufficient
+ * boot time logs and provides enough leeway to User, in terms of
+ * latency, for consuming the logs from relay. Also doesn't take
+ * up too much memory.
+ */
+ n_subbufs = 8;
+
+ guc_log_relay_chan = relay_open(NULL, NULL, subbuf_size,
+ n_subbufs, &relay_callbacks, dev_priv);
+ if (!guc_log_relay_chan) {
+ DRM_ERROR("Couldn't create relay chan for GuC logging\n");
+ return -ENOMEM;
+ }
+
+ GEM_BUG_ON(guc_log_relay_chan->subbuf_size < subbuf_size);
+ guc->log.relay_chan = guc_log_relay_chan;
+ return 0;
+}
+
+static int guc_log_create_relay_file(struct intel_guc *guc)
+{
+ struct drm_i915_private *dev_priv = guc_to_i915(guc);
+ struct dentry *log_dir;
+ int ret;
+
+ /* For now create the log file in /sys/kernel/debug/dri/0 dir */
+ log_dir = dev_priv->drm.primary->debugfs_root;
+
+ /* If /sys/kernel/debug/dri/0 location do not exist, then debugfs is
+ * not mounted and so can't create the relay file.
+ * The relay API seems to fit well with debugfs only, for availing relay
+ * there are 3 requirements which can be met for debugfs file only in a
+ * straightforward/clean manner :-
+ * i) Need the associated dentry pointer of the file, while opening the
+ * relay channel.
+ * ii) Should be able to use 'relay_file_operations' fops for the file.
+ * iii) Set the 'i_private' field of file's inode to the pointer of
+ * relay channel buffer.
+ */
+ if (!log_dir) {
+ DRM_ERROR("Debugfs dir not available yet for GuC log file\n");
+ return -ENODEV;
+ }
+
+ ret = relay_late_setup_files(guc->log.relay_chan, "guc_log", log_dir);
+ if (ret) {
+ DRM_ERROR("Couldn't associate relay chan with file %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void guc_move_to_next_buf(struct intel_guc *guc)
+{
+ /* Make sure the updates made in the sub buffer are visible when
+ * Consumer sees the following update to offset inside the sub buffer.
+ */
+ smp_wmb();
+
+ /* All data has been written, so now move the offset of sub buffer. */
+ relay_reserve(guc->log.relay_chan, guc->log.vma->obj->base.size);
+
+ /* Switch to the next sub buffer */
+ relay_flush(guc->log.relay_chan);
+}
+
+static void *guc_get_write_buffer(struct intel_guc *guc)
+{
+ if (!guc->log.relay_chan)
+ return NULL;
+
+ /* Just get the base address of a new sub buffer and copy data into it
+ * ourselves. NULL will be returned in no-overwrite mode, if all sub
+ * buffers are full. Could have used the relay_write() to indirectly
+ * copy the data, but that would have been bit convoluted, as we need to
+ * write to only certain locations inside a sub buffer which cannot be
+ * done without using relay_reserve() along with relay_write(). So its
+ * better to use relay_reserve() alone.
+ */
+ return relay_reserve(guc->log.relay_chan, 0);
+}
+
+static bool
+guc_check_log_buf_overflow(struct intel_guc *guc,
+ enum guc_log_buffer_type type, unsigned int full_cnt)
+{
+ unsigned int prev_full_cnt = guc->log.prev_overflow_count[type];
+ bool overflow = false;
+
+ if (full_cnt != prev_full_cnt) {
+ overflow = true;
+
+ guc->log.prev_overflow_count[type] = full_cnt;
+ guc->log.total_overflow_count[type] += full_cnt - prev_full_cnt;
+
+ if (full_cnt < prev_full_cnt) {
+ /* buffer_full_cnt is a 4 bit counter */
+ guc->log.total_overflow_count[type] += 16;
+ }
+ DRM_ERROR_RATELIMITED("GuC log buffer overflow\n");
+ }
+
+ return overflow;
+}
+
+static unsigned int guc_get_log_buffer_size(enum guc_log_buffer_type type)
+{
+ switch (type) {
+ case GUC_ISR_LOG_BUFFER:
+ return (GUC_LOG_ISR_PAGES + 1) * PAGE_SIZE;
+ case GUC_DPC_LOG_BUFFER:
+ return (GUC_LOG_DPC_PAGES + 1) * PAGE_SIZE;
+ case GUC_CRASH_DUMP_LOG_BUFFER:
+ return (GUC_LOG_CRASH_PAGES + 1) * PAGE_SIZE;
+ default:
+ MISSING_CASE(type);
+ }
+
+ return 0;
+}
+
+static void guc_read_update_log_buffer(struct intel_guc *guc)
+{
+ unsigned int buffer_size, read_offset, write_offset, bytes_to_copy, full_cnt;
+ struct guc_log_buffer_state *log_buf_state, *log_buf_snapshot_state;
+ struct guc_log_buffer_state log_buf_state_local;
+ enum guc_log_buffer_type type;
+ void *src_data, *dst_data;
+ bool new_overflow;
+
+ if (WARN_ON(!guc->log.buf_addr))
+ return;
+
+ /* Get the pointer to shared GuC log buffer */
+ log_buf_state = src_data = guc->log.buf_addr;
+
+ /* Get the pointer to local buffer to store the logs */
+ log_buf_snapshot_state = dst_data = guc_get_write_buffer(guc);
+
+ /* Actual logs are present from the 2nd page */
+ src_data += PAGE_SIZE;
+ dst_data += PAGE_SIZE;
+
+ for (type = GUC_ISR_LOG_BUFFER; type < GUC_MAX_LOG_BUFFER; type++) {
+ /* Make a copy of the state structure, inside GuC log buffer
+ * (which is uncached mapped), on the stack to avoid reading
+ * from it multiple times.
+ */
+ memcpy(&log_buf_state_local, log_buf_state,
+ sizeof(struct guc_log_buffer_state));
+ buffer_size = guc_get_log_buffer_size(type);
+ read_offset = log_buf_state_local.read_ptr;
+ write_offset = log_buf_state_local.sampled_write_ptr;
+ full_cnt = log_buf_state_local.buffer_full_cnt;
+
+ /* Bookkeeping stuff */
+ guc->log.flush_count[type] += log_buf_state_local.flush_to_file;
+ new_overflow = guc_check_log_buf_overflow(guc, type, full_cnt);
+
+ /* Update the state of shared log buffer */
+ log_buf_state->read_ptr = write_offset;
+ log_buf_state->flush_to_file = 0;
+ log_buf_state++;
+
+ if (unlikely(!log_buf_snapshot_state))
+ continue;
+
+ /* First copy the state structure in snapshot buffer */
+ memcpy(log_buf_snapshot_state, &log_buf_state_local,
+ sizeof(struct guc_log_buffer_state));
+
+ /* The write pointer could have been updated by GuC firmware,
+ * after sending the flush interrupt to Host, for consistency
+ * set write pointer value to same value of sampled_write_ptr
+ * in the snapshot buffer.
+ */
+ log_buf_snapshot_state->write_ptr = write_offset;
+ log_buf_snapshot_state++;
+
+ /* Now copy the actual logs. */
+ if (unlikely(new_overflow)) {
+ /* copy the whole buffer in case of overflow */
+ read_offset = 0;
+ write_offset = buffer_size;
+ } else if (unlikely((read_offset > buffer_size) ||
+ (write_offset > buffer_size))) {
+ DRM_ERROR("invalid log buffer state\n");
+ /* copy whole buffer as offsets are unreliable */
+ read_offset = 0;
+ write_offset = buffer_size;
+ }
+
+ /* Just copy the newly written data */
+ if (read_offset > write_offset) {
+ i915_memcpy_from_wc(dst_data, src_data, write_offset);
+ bytes_to_copy = buffer_size - read_offset;
+ } else {
+ bytes_to_copy = write_offset - read_offset;
+ }
+ i915_memcpy_from_wc(dst_data + read_offset,
+ src_data + read_offset, bytes_to_copy);
+
+ src_data += buffer_size;
+ dst_data += buffer_size;
+ }
+
+ if (log_buf_snapshot_state)
+ guc_move_to_next_buf(guc);
+ else {
+ /* Used rate limited to avoid deluge of messages, logs might be
+ * getting consumed by User at a slow rate.
+ */
+ DRM_ERROR_RATELIMITED("no sub-buffer to capture logs\n");
+ guc->log.capture_miss_count++;
+ }
+}
+
+static void guc_capture_logs_work(struct work_struct *work)
+{
+ struct drm_i915_private *dev_priv =
+ container_of(work, struct drm_i915_private, guc.log.flush_work);
+
+ i915_guc_capture_logs(dev_priv);
+}
+
+static void guc_log_cleanup(struct intel_guc *guc)
+{
+ struct drm_i915_private *dev_priv = guc_to_i915(guc);
+
+ lockdep_assert_held(&dev_priv->drm.struct_mutex);
+
+ /* First disable the flush interrupt */
+ gen9_disable_guc_interrupts(dev_priv);
+
+ if (guc->log.flush_wq)
+ destroy_workqueue(guc->log.flush_wq);
+
+ guc->log.flush_wq = NULL;
+
+ if (guc->log.relay_chan)
+ guc_log_remove_relay_file(guc);
+
+ guc->log.relay_chan = NULL;
+
+ if (guc->log.buf_addr)
+ i915_gem_object_unpin_map(guc->log.vma->obj);
+
+ guc->log.buf_addr = NULL;
+}
+
+static int guc_log_create_extras(struct intel_guc *guc)
+{
+ struct drm_i915_private *dev_priv = guc_to_i915(guc);
+ void *vaddr;
+ int ret;
+
+ lockdep_assert_held(&dev_priv->drm.struct_mutex);
+
+ /* Nothing to do */
+ if (i915.guc_log_level < 0)
+ return 0;
+
+ if (!guc->log.buf_addr) {
+ /* Create a WC (Uncached for read) vmalloc mapping of log
+ * buffer pages, so that we can directly get the data
+ * (up-to-date) from memory.
+ */
+ vaddr = i915_gem_object_pin_map(guc->log.vma->obj, I915_MAP_WC);
+ if (IS_ERR(vaddr)) {
+ ret = PTR_ERR(vaddr);
+ DRM_ERROR("Couldn't map log buffer pages %d\n", ret);
+ return ret;
+ }
+
+ guc->log.buf_addr = vaddr;
+ }
+
+ if (!guc->log.relay_chan) {
+ /* Create a relay channel, so that we have buffers for storing
+ * the GuC firmware logs, the channel will be linked with a file
+ * later on when debugfs is registered.
+ */
+ ret = guc_log_create_relay_channel(guc);
+ if (ret)
+ return ret;
+ }
+
+ if (!guc->log.flush_wq) {
+ INIT_WORK(&guc->log.flush_work, guc_capture_logs_work);
+
+ /*
+ * GuC log buffer flush work item has to do register access to
+ * send the ack to GuC and this work item, if not synced before
+ * suspend, can potentially get executed after the GFX device is
+ * suspended.
+ * By marking the WQ as freezable, we don't have to bother about
+ * flushing of this work item from the suspend hooks, the pending
+ * work item if any will be either executed before the suspend
+ * or scheduled later on resume. This way the handling of work
+ * item can be kept same between system suspend & rpm suspend.
+ */
+ guc->log.flush_wq = alloc_ordered_workqueue("i915-guc_log",
+ WQ_HIGHPRI | WQ_FREEZABLE);
+ if (guc->log.flush_wq == NULL) {
+ DRM_ERROR("Couldn't allocate the wq for GuC logging\n");
+ return -ENOMEM;
+ }
+ }
+
+ return 0;
+}
+
static void guc_log_create(struct intel_guc *guc)
{
struct i915_vma *vma;
unsigned long offset;
uint32_t size, flags;
- if (i915.guc_log_level < GUC_LOG_VERBOSITY_MIN)
- return;
-
if (i915.guc_log_level > GUC_LOG_VERBOSITY_MAX)
i915.guc_log_level = GUC_LOG_VERBOSITY_MAX;
@@ -865,8 +1312,18 @@ static void guc_log_create(struct intel_guc *guc)
GUC_LOG_ISR_PAGES + 1 +
GUC_LOG_CRASH_PAGES + 1) << PAGE_SHIFT;
- vma = guc->log_vma;
+ vma = guc->log.vma;
if (!vma) {
+ /* We require SSE 4.1 for fast reads from the GuC log buffer and
+ * it should be present on the chipsets supporting GuC based
+ * submisssions.
+ */
+ if (WARN_ON(!i915_memcpy_from_wc(NULL, NULL, 0))) {
+ /* logging will not be enabled */
+ i915.guc_log_level = -1;
+ return;
+ }
+
vma = guc_allocate_vma(guc, size);
if (IS_ERR(vma)) {
/* logging will be off */
@@ -874,7 +1331,14 @@ static void guc_log_create(struct intel_guc *guc)
return;
}
- guc->log_vma = vma;
+ guc->log.vma = vma;
+
+ if (guc_log_create_extras(guc)) {
+ guc_log_cleanup(guc);
+ i915_vma_unpin_and_release(&guc->log.vma);
+ i915.guc_log_level = -1;
+ return;
+ }
}
/* each allocated unit is a page */
@@ -884,7 +1348,37 @@ static void guc_log_create(struct intel_guc *guc)
(GUC_LOG_CRASH_PAGES << GUC_LOG_CRASH_SHIFT);
offset = i915_ggtt_offset(vma) >> PAGE_SHIFT; /* in pages */
- guc->log_flags = (offset << GUC_LOG_BUF_ADDR_SHIFT) | flags;
+ guc->log.flags = (offset << GUC_LOG_BUF_ADDR_SHIFT) | flags;
+}
+
+static int guc_log_late_setup(struct intel_guc *guc)
+{
+ struct drm_i915_private *dev_priv = guc_to_i915(guc);
+ int ret;
+
+ lockdep_assert_held(&dev_priv->drm.struct_mutex);
+
+ if (i915.guc_log_level < 0)
+ return -EINVAL;
+
+ /* If log_level was set as -1 at boot time, then setup needed to
+ * handle log buffer flush interrupts would not have been done yet,
+ * so do that now.
+ */
+ ret = guc_log_create_extras(guc);
+ if (ret)
+ goto err;
+
+ ret = guc_log_create_relay_file(guc);
+ if (ret)
+ goto err;
+
+ return 0;
+err:
+ guc_log_cleanup(guc);
+ /* logging will remain off */
+ i915.guc_log_level = -1;
+ return ret;
}
static void guc_policies_init(struct guc_policies *policies)
@@ -917,6 +1411,7 @@ static void guc_addon_create(struct intel_guc *guc)
struct guc_policies *policies;
struct guc_mmio_reg_state *reg_state;
struct intel_engine_cs *engine;
+ enum intel_engine_id id;
struct page *page;
u32 size;
@@ -944,10 +1439,10 @@ static void guc_addon_create(struct intel_guc *guc)
* so its address won't change after we've told the GuC where
* to find it.
*/
- engine = &dev_priv->engine[RCS];
+ engine = dev_priv->engine[RCS];
ads->golden_context_lrca = engine->status_page.ggtt_offset;
- for_each_engine(engine, dev_priv)
+ for_each_engine(engine, dev_priv, id)
ads->eng_state_size[engine->guc_id] = intel_lr_context_size(engine);
/* GuC scheduling policies */
@@ -960,7 +1455,7 @@ static void guc_addon_create(struct intel_guc *guc)
/* MMIO reg state */
reg_state = (void *)policies + sizeof(struct guc_policies);
- for_each_engine(engine, dev_priv) {
+ for_each_engine(engine, dev_priv, id) {
reg_state->mmio_white_list[engine->guc_id].mmio_start =
engine->mmio_base + GUC_MMIO_WHITE_LIST_START;
@@ -1005,6 +1500,7 @@ int i915_guc_submission_init(struct drm_i915_private *dev_priv)
guc->ctx_pool_vma = vma;
ida_init(&guc->ctx_ids);
+ mutex_init(&guc->action_lock);
guc_log_create(guc);
guc_addon_create(guc);
@@ -1014,9 +1510,10 @@ int i915_guc_submission_init(struct drm_i915_private *dev_priv)
int i915_guc_submission_enable(struct drm_i915_private *dev_priv)
{
struct intel_guc *guc = &dev_priv->guc;
+ struct drm_i915_gem_request *request;
struct i915_guc_client *client;
struct intel_engine_cs *engine;
- struct drm_i915_gem_request *request;
+ enum intel_engine_id id;
/* client for execbuf submission */
client = guc_client_alloc(dev_priv,
@@ -1033,11 +1530,13 @@ int i915_guc_submission_enable(struct drm_i915_private *dev_priv)
guc_init_doorbell_hw(guc);
/* Take over from manual control of ELSP (execlists) */
- for_each_engine(engine, dev_priv) {
+ for_each_engine(engine, dev_priv, id) {
engine->submit_request = i915_guc_submit;
+ engine->schedule = NULL;
/* Replay the current set of previously submitted requests */
- list_for_each_entry(request, &engine->request_list, link) {
+ list_for_each_entry(request,
+ &engine->timeline->requests, link) {
client->wq_rsvd += sizeof(struct guc_wq_item);
if (i915_sw_fence_done(&request->submit))
i915_guc_submit(request);
@@ -1066,7 +1565,7 @@ void i915_guc_submission_fini(struct drm_i915_private *dev_priv)
struct intel_guc *guc = &dev_priv->guc;
i915_vma_unpin_and_release(&guc->ads_vma);
- i915_vma_unpin_and_release(&guc->log_vma);
+ i915_vma_unpin_and_release(&guc->log.vma);
if (guc->ctx_pool_vma)
ida_destroy(&guc->ctx_ids);
@@ -1087,6 +1586,8 @@ int intel_guc_suspend(struct drm_device *dev)
if (guc->guc_fw.guc_fw_load_status != GUC_FIRMWARE_SUCCESS)
return 0;
+ gen9_disable_guc_interrupts(dev_priv);
+
ctx = dev_priv->kernel_context;
data[0] = HOST2GUC_ACTION_ENTER_S_STATE;
@@ -1113,6 +1614,9 @@ int intel_guc_resume(struct drm_device *dev)
if (guc->guc_fw.guc_fw_load_status != GUC_FIRMWARE_SUCCESS)
return 0;
+ if (i915.guc_log_level >= 0)
+ gen9_enable_guc_interrupts(dev_priv);
+
ctx = dev_priv->kernel_context;
data[0] = HOST2GUC_ACTION_EXIT_S_STATE;
@@ -1122,3 +1626,104 @@ int intel_guc_resume(struct drm_device *dev)
return host2guc_action(guc, data, ARRAY_SIZE(data));
}
+
+void i915_guc_capture_logs(struct drm_i915_private *dev_priv)
+{
+ guc_read_update_log_buffer(&dev_priv->guc);
+
+ /* Generally device is expected to be active only at this
+ * time, so get/put should be really quick.
+ */
+ intel_runtime_pm_get(dev_priv);
+ host2guc_logbuffer_flush_complete(&dev_priv->guc);
+ intel_runtime_pm_put(dev_priv);
+}
+
+void i915_guc_flush_logs(struct drm_i915_private *dev_priv)
+{
+ if (!i915.enable_guc_submission || (i915.guc_log_level < 0))
+ return;
+
+ /* First disable the interrupts, will be renabled afterwards */
+ gen9_disable_guc_interrupts(dev_priv);
+
+ /* Before initiating the forceful flush, wait for any pending/ongoing
+ * flush to complete otherwise forceful flush may not actually happen.
+ */
+ flush_work(&dev_priv->guc.log.flush_work);
+
+ /* Ask GuC to update the log buffer state */
+ host2guc_force_logbuffer_flush(&dev_priv->guc);
+
+ /* GuC would have updated log buffer by now, so capture it */
+ i915_guc_capture_logs(dev_priv);
+}
+
+void i915_guc_unregister(struct drm_i915_private *dev_priv)
+{
+ if (!i915.enable_guc_submission)
+ return;
+
+ mutex_lock(&dev_priv->drm.struct_mutex);
+ guc_log_cleanup(&dev_priv->guc);
+ mutex_unlock(&dev_priv->drm.struct_mutex);
+}
+
+void i915_guc_register(struct drm_i915_private *dev_priv)
+{
+ if (!i915.enable_guc_submission)
+ return;
+
+ mutex_lock(&dev_priv->drm.struct_mutex);
+ guc_log_late_setup(&dev_priv->guc);
+ mutex_unlock(&dev_priv->drm.struct_mutex);
+}
+
+int i915_guc_log_control(struct drm_i915_private *dev_priv, u64 control_val)
+{
+ union guc_log_control log_param;
+ int ret;
+
+ log_param.value = control_val;
+
+ if (log_param.verbosity < GUC_LOG_VERBOSITY_MIN ||
+ log_param.verbosity > GUC_LOG_VERBOSITY_MAX)
+ return -EINVAL;
+
+ /* This combination doesn't make sense & won't have any effect */
+ if (!log_param.logging_enabled && (i915.guc_log_level < 0))
+ return 0;
+
+ ret = host2guc_logging_control(&dev_priv->guc, log_param.value);
+ if (ret < 0) {
+ DRM_DEBUG_DRIVER("host2guc action failed %d\n", ret);
+ return ret;
+ }
+
+ i915.guc_log_level = log_param.verbosity;
+
+ /* If log_level was set as -1 at boot time, then the relay channel file
+ * wouldn't have been created by now and interrupts also would not have
+ * been enabled.
+ */
+ if (!dev_priv->guc.log.relay_chan) {
+ ret = guc_log_late_setup(&dev_priv->guc);
+ if (!ret)
+ gen9_enable_guc_interrupts(dev_priv);
+ } else if (!log_param.logging_enabled) {
+ /* Once logging is disabled, GuC won't generate logs & send an
+ * interrupt. But there could be some data in the log buffer
+ * which is yet to be captured. So request GuC to update the log
+ * buffer state and then collect the left over logs.
+ */
+ i915_guc_flush_logs(dev_priv);
+
+ /* As logging is disabled, update log level to reflect that */
+ i915.guc_log_level = -1;
+ } else {
+ /* In case interrupts were disabled, enable them now */
+ gen9_enable_guc_interrupts(dev_priv);
+ }
+
+ return ret;
+}
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 3fc286cd1157..07ca71cabb2b 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -170,6 +170,7 @@ static void gen5_assert_iir_is_zero(struct drm_i915_private *dev_priv,
} while (0)
static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir);
+static void gen9_guc_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir);
/* For display hotplug interrupt */
static inline void
@@ -303,18 +304,18 @@ static void snb_update_pm_irq(struct drm_i915_private *dev_priv,
assert_spin_locked(&dev_priv->irq_lock);
- new_val = dev_priv->pm_irq_mask;
+ new_val = dev_priv->pm_imr;
new_val &= ~interrupt_mask;
new_val |= (~enabled_irq_mask & interrupt_mask);
- if (new_val != dev_priv->pm_irq_mask) {
- dev_priv->pm_irq_mask = new_val;
- I915_WRITE(gen6_pm_imr(dev_priv), dev_priv->pm_irq_mask);
+ if (new_val != dev_priv->pm_imr) {
+ dev_priv->pm_imr = new_val;
+ I915_WRITE(gen6_pm_imr(dev_priv), dev_priv->pm_imr);
POSTING_READ(gen6_pm_imr(dev_priv));
}
}
-void gen6_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask)
+void gen6_unmask_pm_irq(struct drm_i915_private *dev_priv, u32 mask)
{
if (WARN_ON(!intel_irqs_enabled(dev_priv)))
return;
@@ -322,28 +323,54 @@ void gen6_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask)
snb_update_pm_irq(dev_priv, mask, mask);
}
-static void __gen6_disable_pm_irq(struct drm_i915_private *dev_priv,
- uint32_t mask)
+static void __gen6_mask_pm_irq(struct drm_i915_private *dev_priv, u32 mask)
{
snb_update_pm_irq(dev_priv, mask, 0);
}
-void gen6_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask)
+void gen6_mask_pm_irq(struct drm_i915_private *dev_priv, u32 mask)
{
if (WARN_ON(!intel_irqs_enabled(dev_priv)))
return;
- __gen6_disable_pm_irq(dev_priv, mask);
+ __gen6_mask_pm_irq(dev_priv, mask);
}
-void gen6_reset_rps_interrupts(struct drm_i915_private *dev_priv)
+void gen6_reset_pm_iir(struct drm_i915_private *dev_priv, u32 reset_mask)
{
i915_reg_t reg = gen6_pm_iir(dev_priv);
- spin_lock_irq(&dev_priv->irq_lock);
- I915_WRITE(reg, dev_priv->pm_rps_events);
- I915_WRITE(reg, dev_priv->pm_rps_events);
+ assert_spin_locked(&dev_priv->irq_lock);
+
+ I915_WRITE(reg, reset_mask);
+ I915_WRITE(reg, reset_mask);
POSTING_READ(reg);
+}
+
+void gen6_enable_pm_irq(struct drm_i915_private *dev_priv, u32 enable_mask)
+{
+ assert_spin_locked(&dev_priv->irq_lock);
+
+ dev_priv->pm_ier |= enable_mask;
+ I915_WRITE(gen6_pm_ier(dev_priv), dev_priv->pm_ier);
+ gen6_unmask_pm_irq(dev_priv, enable_mask);
+ /* unmask_pm_irq provides an implicit barrier (POSTING_READ) */
+}
+
+void gen6_disable_pm_irq(struct drm_i915_private *dev_priv, u32 disable_mask)
+{
+ assert_spin_locked(&dev_priv->irq_lock);
+
+ dev_priv->pm_ier &= ~disable_mask;
+ __gen6_mask_pm_irq(dev_priv, disable_mask);
+ I915_WRITE(gen6_pm_ier(dev_priv), dev_priv->pm_ier);
+ /* though a barrier is missing here, but don't really need a one */
+}
+
+void gen6_reset_rps_interrupts(struct drm_i915_private *dev_priv)
+{
+ spin_lock_irq(&dev_priv->irq_lock);
+ gen6_reset_pm_iir(dev_priv, dev_priv->pm_rps_events);
dev_priv->rps.pm_iir = 0;
spin_unlock_irq(&dev_priv->irq_lock);
}
@@ -357,8 +384,6 @@ void gen6_enable_rps_interrupts(struct drm_i915_private *dev_priv)
WARN_ON_ONCE(dev_priv->rps.pm_iir);
WARN_ON_ONCE(I915_READ(gen6_pm_iir(dev_priv)) & dev_priv->pm_rps_events);
dev_priv->rps.interrupts_enabled = true;
- I915_WRITE(gen6_pm_ier(dev_priv), I915_READ(gen6_pm_ier(dev_priv)) |
- dev_priv->pm_rps_events);
gen6_enable_pm_irq(dev_priv, dev_priv->pm_rps_events);
spin_unlock_irq(&dev_priv->irq_lock);
@@ -379,9 +404,7 @@ void gen6_disable_rps_interrupts(struct drm_i915_private *dev_priv)
I915_WRITE(GEN6_PMINTRMSK, gen6_sanitize_rps_pm_mask(dev_priv, ~0u));
- __gen6_disable_pm_irq(dev_priv, dev_priv->pm_rps_events);
- I915_WRITE(gen6_pm_ier(dev_priv), I915_READ(gen6_pm_ier(dev_priv)) &
- ~dev_priv->pm_rps_events);
+ gen6_disable_pm_irq(dev_priv, dev_priv->pm_rps_events);
spin_unlock_irq(&dev_priv->irq_lock);
synchronize_irq(dev_priv->drm.irq);
@@ -395,6 +418,38 @@ void gen6_disable_rps_interrupts(struct drm_i915_private *dev_priv)
gen6_reset_rps_interrupts(dev_priv);
}
+void gen9_reset_guc_interrupts(struct drm_i915_private *dev_priv)
+{
+ spin_lock_irq(&dev_priv->irq_lock);
+ gen6_reset_pm_iir(dev_priv, dev_priv->pm_guc_events);
+ spin_unlock_irq(&dev_priv->irq_lock);
+}
+
+void gen9_enable_guc_interrupts(struct drm_i915_private *dev_priv)
+{
+ spin_lock_irq(&dev_priv->irq_lock);
+ if (!dev_priv->guc.interrupts_enabled) {
+ WARN_ON_ONCE(I915_READ(gen6_pm_iir(dev_priv)) &
+ dev_priv->pm_guc_events);
+ dev_priv->guc.interrupts_enabled = true;
+ gen6_enable_pm_irq(dev_priv, dev_priv->pm_guc_events);
+ }
+ spin_unlock_irq(&dev_priv->irq_lock);
+}
+
+void gen9_disable_guc_interrupts(struct drm_i915_private *dev_priv)
+{
+ spin_lock_irq(&dev_priv->irq_lock);
+ dev_priv->guc.interrupts_enabled = false;
+
+ gen6_disable_pm_irq(dev_priv, dev_priv->pm_guc_events);
+
+ spin_unlock_irq(&dev_priv->irq_lock);
+ synchronize_irq(dev_priv->drm.irq);
+
+ gen9_reset_guc_interrupts(dev_priv);
+}
+
/**
* bdw_update_port_irq - update DE port interrupt
* @dev_priv: driver private
@@ -670,8 +725,8 @@ static u32 i915_get_vblank_counter(struct drm_device *dev, unsigned int pipe)
struct drm_i915_private *dev_priv = to_i915(dev);
i915_reg_t high_frame, low_frame;
u32 high1, high2, low, pixel, vbl_start, hsync_start, htotal;
- struct intel_crtc *intel_crtc =
- to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
+ struct intel_crtc *intel_crtc = intel_get_crtc_for_pipe(dev_priv,
+ pipe);
const struct drm_display_mode *mode = &intel_crtc->base.hwmode;
htotal = mode->crtc_htotal;
@@ -776,8 +831,8 @@ static int i915_get_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe,
const struct drm_display_mode *mode)
{
struct drm_i915_private *dev_priv = to_i915(dev);
- struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ struct intel_crtc *intel_crtc = intel_get_crtc_for_pipe(dev_priv,
+ pipe);
int position;
int vbl_start, vbl_end, hsync_start, htotal, vtotal;
bool in_vbl = true;
@@ -912,21 +967,22 @@ static int i915_get_vblank_timestamp(struct drm_device *dev, unsigned int pipe,
struct timeval *vblank_time,
unsigned flags)
{
- struct drm_crtc *crtc;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ struct intel_crtc *crtc;
- if (pipe >= INTEL_INFO(dev)->num_pipes) {
+ if (pipe >= INTEL_INFO(dev_priv)->num_pipes) {
DRM_ERROR("Invalid crtc %u\n", pipe);
return -EINVAL;
}
/* Get drm_crtc to timestamp: */
- crtc = intel_get_crtc_for_pipe(dev, pipe);
+ crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
if (crtc == NULL) {
DRM_ERROR("Invalid crtc %u\n", pipe);
return -EINVAL;
}
- if (!crtc->hwmode.crtc_clock) {
+ if (!crtc->base.hwmode.crtc_clock) {
DRM_DEBUG_KMS("crtc %u is disabled\n", pipe);
return -EBUSY;
}
@@ -934,7 +990,7 @@ static int i915_get_vblank_timestamp(struct drm_device *dev, unsigned int pipe,
/* Helper routine in DRM core does all the work: */
return drm_calc_vbltimestamp_from_scanoutpos(dev, pipe, max_error,
vblank_time, flags,
- &crtc->hwmode);
+ &crtc->base.hwmode);
}
static void ironlake_rps_change_irq_handler(struct drm_i915_private *dev_priv)
@@ -1058,8 +1114,9 @@ static u32 vlv_wa_c0_ei(struct drm_i915_private *dev_priv, u32 pm_iir)
static bool any_waiters(struct drm_i915_private *dev_priv)
{
struct intel_engine_cs *engine;
+ enum intel_engine_id id;
- for_each_engine(engine, dev_priv)
+ for_each_engine(engine, dev_priv, id)
if (intel_engine_has_waiter(engine))
return true;
@@ -1084,7 +1141,7 @@ static void gen6_pm_rps_work(struct work_struct *work)
pm_iir = dev_priv->rps.pm_iir;
dev_priv->rps.pm_iir = 0;
/* Make sure not to corrupt PMIMR state used by ringbuffer on GEN6 */
- gen6_enable_pm_irq(dev_priv, dev_priv->pm_rps_events);
+ gen6_unmask_pm_irq(dev_priv, dev_priv->pm_rps_events);
client_boost = dev_priv->rps.client_boost;
dev_priv->rps.client_boost = false;
spin_unlock_irq(&dev_priv->irq_lock);
@@ -1257,20 +1314,20 @@ static void ilk_gt_irq_handler(struct drm_i915_private *dev_priv,
u32 gt_iir)
{
if (gt_iir & GT_RENDER_USER_INTERRUPT)
- notify_ring(&dev_priv->engine[RCS]);
+ notify_ring(dev_priv->engine[RCS]);
if (gt_iir & ILK_BSD_USER_INTERRUPT)
- notify_ring(&dev_priv->engine[VCS]);
+ notify_ring(dev_priv->engine[VCS]);
}
static void snb_gt_irq_handler(struct drm_i915_private *dev_priv,
u32 gt_iir)
{
if (gt_iir & GT_RENDER_USER_INTERRUPT)
- notify_ring(&dev_priv->engine[RCS]);
+ notify_ring(dev_priv->engine[RCS]);
if (gt_iir & GT_BSD_USER_INTERRUPT)
- notify_ring(&dev_priv->engine[VCS]);
+ notify_ring(dev_priv->engine[VCS]);
if (gt_iir & GT_BLT_USER_INTERRUPT)
- notify_ring(&dev_priv->engine[BCS]);
+ notify_ring(dev_priv->engine[BCS]);
if (gt_iir & (GT_BLT_CS_ERROR_INTERRUPT |
GT_BSD_CS_ERROR_INTERRUPT |
@@ -1323,11 +1380,13 @@ static irqreturn_t gen8_gt_irq_ack(struct drm_i915_private *dev_priv,
DRM_ERROR("The master control interrupt lied (GT3)!\n");
}
- if (master_ctl & GEN8_GT_PM_IRQ) {
+ if (master_ctl & (GEN8_GT_PM_IRQ | GEN8_GT_GUC_IRQ)) {
gt_iir[2] = I915_READ_FW(GEN8_GT_IIR(2));
- if (gt_iir[2] & dev_priv->pm_rps_events) {
+ if (gt_iir[2] & (dev_priv->pm_rps_events |
+ dev_priv->pm_guc_events)) {
I915_WRITE_FW(GEN8_GT_IIR(2),
- gt_iir[2] & dev_priv->pm_rps_events);
+ gt_iir[2] & (dev_priv->pm_rps_events |
+ dev_priv->pm_guc_events));
ret = IRQ_HANDLED;
} else
DRM_ERROR("The master control interrupt lied (PM)!\n");
@@ -1340,25 +1399,28 @@ static void gen8_gt_irq_handler(struct drm_i915_private *dev_priv,
u32 gt_iir[4])
{
if (gt_iir[0]) {
- gen8_cs_irq_handler(&dev_priv->engine[RCS],
+ gen8_cs_irq_handler(dev_priv->engine[RCS],
gt_iir[0], GEN8_RCS_IRQ_SHIFT);
- gen8_cs_irq_handler(&dev_priv->engine[BCS],
+ gen8_cs_irq_handler(dev_priv->engine[BCS],
gt_iir[0], GEN8_BCS_IRQ_SHIFT);
}
if (gt_iir[1]) {
- gen8_cs_irq_handler(&dev_priv->engine[VCS],
+ gen8_cs_irq_handler(dev_priv->engine[VCS],
gt_iir[1], GEN8_VCS1_IRQ_SHIFT);
- gen8_cs_irq_handler(&dev_priv->engine[VCS2],
+ gen8_cs_irq_handler(dev_priv->engine[VCS2],
gt_iir[1], GEN8_VCS2_IRQ_SHIFT);
}
if (gt_iir[3])
- gen8_cs_irq_handler(&dev_priv->engine[VECS],
+ gen8_cs_irq_handler(dev_priv->engine[VECS],
gt_iir[3], GEN8_VECS_IRQ_SHIFT);
if (gt_iir[2] & dev_priv->pm_rps_events)
gen6_rps_irq_handler(dev_priv, gt_iir[2]);
+
+ if (gt_iir[2] & dev_priv->pm_guc_events)
+ gen9_guc_irq_handler(dev_priv, gt_iir[2]);
}
static bool bxt_port_hotplug_long_detect(enum port port, u32 val)
@@ -1585,7 +1647,7 @@ static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir)
{
if (pm_iir & dev_priv->pm_rps_events) {
spin_lock(&dev_priv->irq_lock);
- gen6_disable_pm_irq(dev_priv, pm_iir & dev_priv->pm_rps_events);
+ gen6_mask_pm_irq(dev_priv, pm_iir & dev_priv->pm_rps_events);
if (dev_priv->rps.interrupts_enabled) {
dev_priv->rps.pm_iir |= pm_iir & dev_priv->pm_rps_events;
schedule_work(&dev_priv->rps.work);
@@ -1598,13 +1660,48 @@ static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir)
if (HAS_VEBOX(dev_priv)) {
if (pm_iir & PM_VEBOX_USER_INTERRUPT)
- notify_ring(&dev_priv->engine[VECS]);
+ notify_ring(dev_priv->engine[VECS]);
if (pm_iir & PM_VEBOX_CS_ERROR_INTERRUPT)
DRM_DEBUG("Command parser error, pm_iir 0x%08x\n", pm_iir);
}
}
+static void gen9_guc_irq_handler(struct drm_i915_private *dev_priv, u32 gt_iir)
+{
+ if (gt_iir & GEN9_GUC_TO_HOST_INT_EVENT) {
+ /* Sample the log buffer flush related bits & clear them out now
+ * itself from the message identity register to minimize the
+ * probability of losing a flush interrupt, when there are back
+ * to back flush interrupts.
+ * There can be a new flush interrupt, for different log buffer
+ * type (like for ISR), whilst Host is handling one (for DPC).
+ * Since same bit is used in message register for ISR & DPC, it
+ * could happen that GuC sets the bit for 2nd interrupt but Host
+ * clears out the bit on handling the 1st interrupt.
+ */
+ u32 msg, flush;
+
+ msg = I915_READ(SOFT_SCRATCH(15));
+ flush = msg & (GUC2HOST_MSG_CRASH_DUMP_POSTED |
+ GUC2HOST_MSG_FLUSH_LOG_BUFFER);
+ if (flush) {
+ /* Clear the message bits that are handled */
+ I915_WRITE(SOFT_SCRATCH(15), msg & ~flush);
+
+ /* Handle flush interrupt in bottom half */
+ queue_work(dev_priv->guc.log.flush_wq,
+ &dev_priv->guc.log.flush_work);
+
+ dev_priv->guc.log.flush_interrupt_count++;
+ } else {
+ /* Not clearing of unhandled event bits won't result in
+ * re-triggering of the interrupt.
+ */
+ }
+ }
+}
+
static bool intel_pipe_handle_vblank(struct drm_i915_private *dev_priv,
enum pipe pipe)
{
@@ -2407,7 +2504,7 @@ gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
fault_errors &= GEN8_DE_PIPE_IRQ_FAULT_ERRORS;
if (fault_errors)
- DRM_ERROR("Fault errors on pipe %c\n: 0x%08x",
+ DRM_ERROR("Fault errors on pipe %c: 0x%08x\n",
pipe_name(pipe),
fault_errors);
}
@@ -2551,92 +2648,52 @@ static void i915_reset_and_wakeup(struct drm_i915_private *dev_priv)
wake_up_all(&dev_priv->gpu_error.reset_queue);
}
-static void i915_report_and_clear_eir(struct drm_i915_private *dev_priv)
+static inline void
+i915_err_print_instdone(struct drm_i915_private *dev_priv,
+ struct intel_instdone *instdone)
{
- uint32_t instdone[I915_NUM_INSTDONE_REG];
- u32 eir = I915_READ(EIR);
- int pipe, i;
+ int slice;
+ int subslice;
+
+ pr_err(" INSTDONE: 0x%08x\n", instdone->instdone);
+
+ if (INTEL_GEN(dev_priv) <= 3)
+ return;
+
+ pr_err(" SC_INSTDONE: 0x%08x\n", instdone->slice_common);
- if (!eir)
+ if (INTEL_GEN(dev_priv) <= 6)
return;
- pr_err("render error detected, EIR: 0x%08x\n", eir);
+ for_each_instdone_slice_subslice(dev_priv, slice, subslice)
+ pr_err(" SAMPLER_INSTDONE[%d][%d]: 0x%08x\n",
+ slice, subslice, instdone->sampler[slice][subslice]);
- i915_get_extra_instdone(dev_priv, instdone);
+ for_each_instdone_slice_subslice(dev_priv, slice, subslice)
+ pr_err(" ROW_INSTDONE[%d][%d]: 0x%08x\n",
+ slice, subslice, instdone->row[slice][subslice]);
+}
- if (IS_G4X(dev_priv)) {
- if (eir & (GM45_ERROR_MEM_PRIV | GM45_ERROR_CP_PRIV)) {
- u32 ipeir = I915_READ(IPEIR_I965);
-
- pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR_I965));
- pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR_I965));
- for (i = 0; i < ARRAY_SIZE(instdone); i++)
- pr_err(" INSTDONE_%d: 0x%08x\n", i, instdone[i]);
- pr_err(" INSTPS: 0x%08x\n", I915_READ(INSTPS));
- pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD_I965));
- I915_WRITE(IPEIR_I965, ipeir);
- POSTING_READ(IPEIR_I965);
- }
- if (eir & GM45_ERROR_PAGE_TABLE) {
- u32 pgtbl_err = I915_READ(PGTBL_ER);
- pr_err("page table error\n");
- pr_err(" PGTBL_ER: 0x%08x\n", pgtbl_err);
- I915_WRITE(PGTBL_ER, pgtbl_err);
- POSTING_READ(PGTBL_ER);
- }
- }
+static void i915_clear_error_registers(struct drm_i915_private *dev_priv)
+{
+ u32 eir;
- if (!IS_GEN2(dev_priv)) {
- if (eir & I915_ERROR_PAGE_TABLE) {
- u32 pgtbl_err = I915_READ(PGTBL_ER);
- pr_err("page table error\n");
- pr_err(" PGTBL_ER: 0x%08x\n", pgtbl_err);
- I915_WRITE(PGTBL_ER, pgtbl_err);
- POSTING_READ(PGTBL_ER);
- }
- }
+ if (!IS_GEN2(dev_priv))
+ I915_WRITE(PGTBL_ER, I915_READ(PGTBL_ER));
- if (eir & I915_ERROR_MEMORY_REFRESH) {
- pr_err("memory refresh error:\n");
- for_each_pipe(dev_priv, pipe)
- pr_err("pipe %c stat: 0x%08x\n",
- pipe_name(pipe), I915_READ(PIPESTAT(pipe)));
- /* pipestat has already been acked */
- }
- if (eir & I915_ERROR_INSTRUCTION) {
- pr_err("instruction error\n");
- pr_err(" INSTPM: 0x%08x\n", I915_READ(INSTPM));
- for (i = 0; i < ARRAY_SIZE(instdone); i++)
- pr_err(" INSTDONE_%d: 0x%08x\n", i, instdone[i]);
- if (INTEL_GEN(dev_priv) < 4) {
- u32 ipeir = I915_READ(IPEIR);
-
- pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR));
- pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR));
- pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD));
- I915_WRITE(IPEIR, ipeir);
- POSTING_READ(IPEIR);
- } else {
- u32 ipeir = I915_READ(IPEIR_I965);
-
- pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR_I965));
- pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR_I965));
- pr_err(" INSTPS: 0x%08x\n", I915_READ(INSTPS));
- pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD_I965));
- I915_WRITE(IPEIR_I965, ipeir);
- POSTING_READ(IPEIR_I965);
- }
- }
+ if (INTEL_GEN(dev_priv) < 4)
+ I915_WRITE(IPEIR, I915_READ(IPEIR));
+ else
+ I915_WRITE(IPEIR_I965, I915_READ(IPEIR_I965));
- I915_WRITE(EIR, eir);
- POSTING_READ(EIR);
+ I915_WRITE(EIR, I915_READ(EIR));
eir = I915_READ(EIR);
if (eir) {
/*
* some errors might have become stuck,
* mask them.
*/
- DRM_ERROR("EIR stuck: 0x%08x, masking\n", eir);
+ DRM_DEBUG_DRIVER("EIR stuck: 0x%08x, masking\n", eir);
I915_WRITE(EMR, I915_READ(EMR) | eir);
I915_WRITE(IIR, I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
}
@@ -2665,7 +2722,7 @@ void i915_handle_error(struct drm_i915_private *dev_priv,
va_end(args);
i915_capture_error_state(dev_priv, engine_mask, error_msg);
- i915_report_and_clear_eir(dev_priv);
+ i915_clear_error_registers(dev_priv);
if (!engine_mask)
return;
@@ -2694,45 +2751,40 @@ void i915_handle_error(struct drm_i915_private *dev_priv,
/* Called from drm generic code, passed 'crtc' which
* we use as a pipe index
*/
-static int i915_enable_vblank(struct drm_device *dev, unsigned int pipe)
+static int i8xx_enable_vblank(struct drm_device *dev, unsigned int pipe)
{
struct drm_i915_private *dev_priv = to_i915(dev);
unsigned long irqflags;
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
- if (INTEL_INFO(dev)->gen >= 4)
- i915_enable_pipestat(dev_priv, pipe,
- PIPE_START_VBLANK_INTERRUPT_STATUS);
- else
- i915_enable_pipestat(dev_priv, pipe,
- PIPE_VBLANK_INTERRUPT_STATUS);
+ i915_enable_pipestat(dev_priv, pipe, PIPE_VBLANK_INTERRUPT_STATUS);
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
return 0;
}
-static int ironlake_enable_vblank(struct drm_device *dev, unsigned int pipe)
+static int i965_enable_vblank(struct drm_device *dev, unsigned int pipe)
{
struct drm_i915_private *dev_priv = to_i915(dev);
unsigned long irqflags;
- uint32_t bit = (INTEL_INFO(dev)->gen >= 7) ? DE_PIPE_VBLANK_IVB(pipe) :
- DE_PIPE_VBLANK(pipe);
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
- ilk_enable_display_irq(dev_priv, bit);
+ i915_enable_pipestat(dev_priv, pipe,
+ PIPE_START_VBLANK_INTERRUPT_STATUS);
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
return 0;
}
-static int valleyview_enable_vblank(struct drm_device *dev, unsigned int pipe)
+static int ironlake_enable_vblank(struct drm_device *dev, unsigned int pipe)
{
struct drm_i915_private *dev_priv = to_i915(dev);
unsigned long irqflags;
+ uint32_t bit = INTEL_GEN(dev_priv) >= 7 ?
+ DE_PIPE_VBLANK_IVB(pipe) : DE_PIPE_VBLANK(pipe);
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
- i915_enable_pipestat(dev_priv, pipe,
- PIPE_START_VBLANK_INTERRUPT_STATUS);
+ ilk_enable_display_irq(dev_priv, bit);
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
return 0;
@@ -2753,38 +2805,36 @@ static int gen8_enable_vblank(struct drm_device *dev, unsigned int pipe)
/* Called from drm generic code, passed 'crtc' which
* we use as a pipe index
*/
-static void i915_disable_vblank(struct drm_device *dev, unsigned int pipe)
+static void i8xx_disable_vblank(struct drm_device *dev, unsigned int pipe)
{
struct drm_i915_private *dev_priv = to_i915(dev);
unsigned long irqflags;
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
- i915_disable_pipestat(dev_priv, pipe,
- PIPE_VBLANK_INTERRUPT_STATUS |
- PIPE_START_VBLANK_INTERRUPT_STATUS);
+ i915_disable_pipestat(dev_priv, pipe, PIPE_VBLANK_INTERRUPT_STATUS);
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
}
-static void ironlake_disable_vblank(struct drm_device *dev, unsigned int pipe)
+static void i965_disable_vblank(struct drm_device *dev, unsigned int pipe)
{
struct drm_i915_private *dev_priv = to_i915(dev);
unsigned long irqflags;
- uint32_t bit = (INTEL_INFO(dev)->gen >= 7) ? DE_PIPE_VBLANK_IVB(pipe) :
- DE_PIPE_VBLANK(pipe);
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
- ilk_disable_display_irq(dev_priv, bit);
+ i915_disable_pipestat(dev_priv, pipe,
+ PIPE_START_VBLANK_INTERRUPT_STATUS);
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
}
-static void valleyview_disable_vblank(struct drm_device *dev, unsigned int pipe)
+static void ironlake_disable_vblank(struct drm_device *dev, unsigned int pipe)
{
struct drm_i915_private *dev_priv = to_i915(dev);
unsigned long irqflags;
+ uint32_t bit = INTEL_GEN(dev_priv) >= 7 ?
+ DE_PIPE_VBLANK_IVB(pipe) : DE_PIPE_VBLANK(pipe);
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
- i915_disable_pipestat(dev_priv, pipe,
- PIPE_START_VBLANK_INTERRUPT_STATUS);
+ ilk_disable_display_irq(dev_priv, bit);
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
}
@@ -2798,411 +2848,14 @@ static void gen8_disable_vblank(struct drm_device *dev, unsigned int pipe)
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
}
-static bool
-ipehr_is_semaphore_wait(struct intel_engine_cs *engine, u32 ipehr)
-{
- if (INTEL_GEN(engine->i915) >= 8) {
- return (ipehr >> 23) == 0x1c;
- } else {
- ipehr &= ~MI_SEMAPHORE_SYNC_MASK;
- return ipehr == (MI_SEMAPHORE_MBOX | MI_SEMAPHORE_COMPARE |
- MI_SEMAPHORE_REGISTER);
- }
-}
-
-static struct intel_engine_cs *
-semaphore_wait_to_signaller_ring(struct intel_engine_cs *engine, u32 ipehr,
- u64 offset)
-{
- struct drm_i915_private *dev_priv = engine->i915;
- struct intel_engine_cs *signaller;
-
- if (INTEL_GEN(dev_priv) >= 8) {
- for_each_engine(signaller, dev_priv) {
- if (engine == signaller)
- continue;
-
- if (offset == signaller->semaphore.signal_ggtt[engine->hw_id])
- return signaller;
- }
- } else {
- u32 sync_bits = ipehr & MI_SEMAPHORE_SYNC_MASK;
-
- for_each_engine(signaller, dev_priv) {
- if(engine == signaller)
- continue;
-
- if (sync_bits == signaller->semaphore.mbox.wait[engine->hw_id])
- return signaller;
- }
- }
-
- DRM_DEBUG_DRIVER("No signaller ring found for %s, ipehr 0x%08x, offset 0x%016llx\n",
- engine->name, ipehr, offset);
-
- return ERR_PTR(-ENODEV);
-}
-
-static struct intel_engine_cs *
-semaphore_waits_for(struct intel_engine_cs *engine, u32 *seqno)
+static void ibx_irq_reset(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = engine->i915;
- void __iomem *vaddr;
- u32 cmd, ipehr, head;
- u64 offset = 0;
- int i, backwards;
-
- /*
- * This function does not support execlist mode - any attempt to
- * proceed further into this function will result in a kernel panic
- * when dereferencing ring->buffer, which is not set up in execlist
- * mode.
- *
- * The correct way of doing it would be to derive the currently
- * executing ring buffer from the current context, which is derived
- * from the currently running request. Unfortunately, to get the
- * current request we would have to grab the struct_mutex before doing
- * anything else, which would be ill-advised since some other thread
- * might have grabbed it already and managed to hang itself, causing
- * the hang checker to deadlock.
- *
- * Therefore, this function does not support execlist mode in its
- * current form. Just return NULL and move on.
- */
- if (engine->buffer == NULL)
- return NULL;
-
- ipehr = I915_READ(RING_IPEHR(engine->mmio_base));
- if (!ipehr_is_semaphore_wait(engine, ipehr))
- return NULL;
-
- /*
- * HEAD is likely pointing to the dword after the actual command,
- * so scan backwards until we find the MBOX. But limit it to just 3
- * or 4 dwords depending on the semaphore wait command size.
- * Note that we don't care about ACTHD here since that might
- * point at at batch, and semaphores are always emitted into the
- * ringbuffer itself.
- */
- head = I915_READ_HEAD(engine) & HEAD_ADDR;
- backwards = (INTEL_GEN(dev_priv) >= 8) ? 5 : 4;
- vaddr = (void __iomem *)engine->buffer->vaddr;
-
- for (i = backwards; i; --i) {
- /*
- * Be paranoid and presume the hw has gone off into the wild -
- * our ring is smaller than what the hardware (and hence
- * HEAD_ADDR) allows. Also handles wrap-around.
- */
- head &= engine->buffer->size - 1;
-
- /* This here seems to blow up */
- cmd = ioread32(vaddr + head);
- if (cmd == ipehr)
- break;
-
- head -= 4;
- }
-
- if (!i)
- return NULL;
-
- *seqno = ioread32(vaddr + head + 4) + 1;
- if (INTEL_GEN(dev_priv) >= 8) {
- offset = ioread32(vaddr + head + 12);
- offset <<= 32;
- offset |= ioread32(vaddr + head + 8);
- }
- return semaphore_wait_to_signaller_ring(engine, ipehr, offset);
-}
-
-static int semaphore_passed(struct intel_engine_cs *engine)
-{
- struct drm_i915_private *dev_priv = engine->i915;
- struct intel_engine_cs *signaller;
- u32 seqno;
-
- engine->hangcheck.deadlock++;
-
- signaller = semaphore_waits_for(engine, &seqno);
- if (signaller == NULL)
- return -1;
-
- if (IS_ERR(signaller))
- return 0;
-
- /* Prevent pathological recursion due to driver bugs */
- if (signaller->hangcheck.deadlock >= I915_NUM_ENGINES)
- return -1;
-
- if (i915_seqno_passed(intel_engine_get_seqno(signaller), seqno))
- return 1;
-
- /* cursory check for an unkickable deadlock */
- if (I915_READ_CTL(signaller) & RING_WAIT_SEMAPHORE &&
- semaphore_passed(signaller) < 0)
- return -1;
-
- return 0;
-}
-
-static void semaphore_clear_deadlocks(struct drm_i915_private *dev_priv)
-{
- struct intel_engine_cs *engine;
-
- for_each_engine(engine, dev_priv)
- engine->hangcheck.deadlock = 0;
-}
-
-static bool subunits_stuck(struct intel_engine_cs *engine)
-{
- u32 instdone[I915_NUM_INSTDONE_REG];
- bool stuck;
- int i;
-
- if (engine->id != RCS)
- return true;
-
- i915_get_extra_instdone(engine->i915, instdone);
-
- /* There might be unstable subunit states even when
- * actual head is not moving. Filter out the unstable ones by
- * accumulating the undone -> done transitions and only
- * consider those as progress.
- */
- stuck = true;
- for (i = 0; i < I915_NUM_INSTDONE_REG; i++) {
- const u32 tmp = instdone[i] | engine->hangcheck.instdone[i];
-
- if (tmp != engine->hangcheck.instdone[i])
- stuck = false;
-
- engine->hangcheck.instdone[i] |= tmp;
- }
-
- return stuck;
-}
-
-static enum intel_engine_hangcheck_action
-head_stuck(struct intel_engine_cs *engine, u64 acthd)
-{
- if (acthd != engine->hangcheck.acthd) {
-
- /* Clear subunit states on head movement */
- memset(engine->hangcheck.instdone, 0,
- sizeof(engine->hangcheck.instdone));
-
- return HANGCHECK_ACTIVE;
- }
-
- if (!subunits_stuck(engine))
- return HANGCHECK_ACTIVE;
-
- return HANGCHECK_HUNG;
-}
-
-static enum intel_engine_hangcheck_action
-engine_stuck(struct intel_engine_cs *engine, u64 acthd)
-{
- struct drm_i915_private *dev_priv = engine->i915;
- enum intel_engine_hangcheck_action ha;
- u32 tmp;
-
- ha = head_stuck(engine, acthd);
- if (ha != HANGCHECK_HUNG)
- return ha;
-
- if (IS_GEN2(dev_priv))
- return HANGCHECK_HUNG;
-
- /* Is the chip hanging on a WAIT_FOR_EVENT?
- * If so we can simply poke the RB_WAIT bit
- * and break the hang. This should work on
- * all but the second generation chipsets.
- */
- tmp = I915_READ_CTL(engine);
- if (tmp & RING_WAIT) {
- i915_handle_error(dev_priv, 0,
- "Kicking stuck wait on %s",
- engine->name);
- I915_WRITE_CTL(engine, tmp);
- return HANGCHECK_KICK;
- }
-
- if (INTEL_GEN(dev_priv) >= 6 && tmp & RING_WAIT_SEMAPHORE) {
- switch (semaphore_passed(engine)) {
- default:
- return HANGCHECK_HUNG;
- case 1:
- i915_handle_error(dev_priv, 0,
- "Kicking stuck semaphore on %s",
- engine->name);
- I915_WRITE_CTL(engine, tmp);
- return HANGCHECK_KICK;
- case 0:
- return HANGCHECK_WAIT;
- }
- }
-
- return HANGCHECK_HUNG;
-}
-
-/*
- * This is called when the chip hasn't reported back with completed
- * batchbuffers in a long time. We keep track per ring seqno progress and
- * if there are no progress, hangcheck score for that ring is increased.
- * Further, acthd is inspected to see if the ring is stuck. On stuck case
- * we kick the ring. If we see no progress on three subsequent calls
- * we assume chip is wedged and try to fix it by resetting the chip.
- */
-static void i915_hangcheck_elapsed(struct work_struct *work)
-{
- struct drm_i915_private *dev_priv =
- container_of(work, typeof(*dev_priv),
- gpu_error.hangcheck_work.work);
- struct intel_engine_cs *engine;
- unsigned int hung = 0, stuck = 0;
- int busy_count = 0;
-#define BUSY 1
-#define KICK 5
-#define HUNG 20
-#define ACTIVE_DECAY 15
-
- if (!i915.enable_hangcheck)
- return;
-
- if (!READ_ONCE(dev_priv->gt.awake))
- return;
-
- /* As enabling the GPU requires fairly extensive mmio access,
- * periodically arm the mmio checker to see if we are triggering
- * any invalid access.
- */
- intel_uncore_arm_unclaimed_mmio_detection(dev_priv);
-
- for_each_engine(engine, dev_priv) {
- bool busy = intel_engine_has_waiter(engine);
- u64 acthd;
- u32 seqno;
- u32 submit;
-
- semaphore_clear_deadlocks(dev_priv);
-
- /* We don't strictly need an irq-barrier here, as we are not
- * serving an interrupt request, be paranoid in case the
- * barrier has side-effects (such as preventing a broken
- * cacheline snoop) and so be sure that we can see the seqno
- * advance. If the seqno should stick, due to a stale
- * cacheline, we would erroneously declare the GPU hung.
- */
- if (engine->irq_seqno_barrier)
- engine->irq_seqno_barrier(engine);
-
- acthd = intel_engine_get_active_head(engine);
- seqno = intel_engine_get_seqno(engine);
- submit = READ_ONCE(engine->last_submitted_seqno);
-
- if (engine->hangcheck.seqno == seqno) {
- if (i915_seqno_passed(seqno, submit)) {
- engine->hangcheck.action = HANGCHECK_IDLE;
- } else {
- /* We always increment the hangcheck score
- * if the engine is busy and still processing
- * the same request, so that no single request
- * can run indefinitely (such as a chain of
- * batches). The only time we do not increment
- * the hangcheck score on this ring, if this
- * engine is in a legitimate wait for another
- * engine. In that case the waiting engine is a
- * victim and we want to be sure we catch the
- * right culprit. Then every time we do kick
- * the ring, add a small increment to the
- * score so that we can catch a batch that is
- * being repeatedly kicked and so responsible
- * for stalling the machine.
- */
- engine->hangcheck.action =
- engine_stuck(engine, acthd);
-
- switch (engine->hangcheck.action) {
- case HANGCHECK_IDLE:
- case HANGCHECK_WAIT:
- break;
- case HANGCHECK_ACTIVE:
- engine->hangcheck.score += BUSY;
- break;
- case HANGCHECK_KICK:
- engine->hangcheck.score += KICK;
- break;
- case HANGCHECK_HUNG:
- engine->hangcheck.score += HUNG;
- break;
- }
- }
-
- if (engine->hangcheck.score >= HANGCHECK_SCORE_RING_HUNG) {
- hung |= intel_engine_flag(engine);
- if (engine->hangcheck.action != HANGCHECK_HUNG)
- stuck |= intel_engine_flag(engine);
- }
- } else {
- engine->hangcheck.action = HANGCHECK_ACTIVE;
-
- /* Gradually reduce the count so that we catch DoS
- * attempts across multiple batches.
- */
- if (engine->hangcheck.score > 0)
- engine->hangcheck.score -= ACTIVE_DECAY;
- if (engine->hangcheck.score < 0)
- engine->hangcheck.score = 0;
-
- /* Clear head and subunit states on seqno movement */
- acthd = 0;
-
- memset(engine->hangcheck.instdone, 0,
- sizeof(engine->hangcheck.instdone));
- }
-
- engine->hangcheck.seqno = seqno;
- engine->hangcheck.acthd = acthd;
- busy_count += busy;
- }
-
- if (hung) {
- char msg[80];
- unsigned int tmp;
- int len;
-
- /* If some rings hung but others were still busy, only
- * blame the hanging rings in the synopsis.
- */
- if (stuck != hung)
- hung &= ~stuck;
- len = scnprintf(msg, sizeof(msg),
- "%s on ", stuck == hung ? "No progress" : "Hang");
- for_each_engine_masked(engine, dev_priv, hung, tmp)
- len += scnprintf(msg + len, sizeof(msg) - len,
- "%s, ", engine->name);
- msg[len-2] = '\0';
-
- return i915_handle_error(dev_priv, hung, msg);
- }
-
- /* Reset timer in case GPU hangs without another request being added */
- if (busy_count)
- i915_queue_hangcheck(dev_priv);
-}
-
-static void ibx_irq_reset(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = to_i915(dev);
-
- if (HAS_PCH_NOP(dev))
+ if (HAS_PCH_NOP(dev_priv))
return;
GEN5_IRQ_RESET(SDE);
- if (HAS_PCH_CPT(dev) || HAS_PCH_LPT(dev))
+ if (HAS_PCH_CPT(dev_priv) || HAS_PCH_LPT(dev_priv))
I915_WRITE(SERR_INT, 0xffffffff);
}
@@ -3218,7 +2871,7 @@ static void ibx_irq_pre_postinstall(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = to_i915(dev);
- if (HAS_PCH_NOP(dev))
+ if (HAS_PCH_NOP(dev_priv))
return;
WARN_ON(I915_READ(SDEIER) != 0);
@@ -3226,12 +2879,10 @@ static void ibx_irq_pre_postinstall(struct drm_device *dev)
POSTING_READ(SDEIER);
}
-static void gen5_gt_irq_reset(struct drm_device *dev)
+static void gen5_gt_irq_reset(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
-
GEN5_IRQ_RESET(GT);
- if (INTEL_INFO(dev)->gen >= 6)
+ if (INTEL_GEN(dev_priv) >= 6)
GEN5_IRQ_RESET(GEN6_PM);
}
@@ -3293,12 +2944,12 @@ static void ironlake_irq_reset(struct drm_device *dev)
I915_WRITE(HWSTAM, 0xffffffff);
GEN5_IRQ_RESET(DE);
- if (IS_GEN7(dev))
+ if (IS_GEN7(dev_priv))
I915_WRITE(GEN7_ERR_INT, 0xffffffff);
- gen5_gt_irq_reset(dev);
+ gen5_gt_irq_reset(dev_priv);
- ibx_irq_reset(dev);
+ ibx_irq_reset(dev_priv);
}
static void valleyview_irq_preinstall(struct drm_device *dev)
@@ -3308,7 +2959,7 @@ static void valleyview_irq_preinstall(struct drm_device *dev)
I915_WRITE(VLV_MASTER_IER, 0);
POSTING_READ(VLV_MASTER_IER);
- gen5_gt_irq_reset(dev);
+ gen5_gt_irq_reset(dev_priv);
spin_lock_irq(&dev_priv->irq_lock);
if (dev_priv->display_irqs_enabled)
@@ -3343,8 +2994,8 @@ static void gen8_irq_reset(struct drm_device *dev)
GEN5_IRQ_RESET(GEN8_DE_MISC_);
GEN5_IRQ_RESET(GEN8_PCU_);
- if (HAS_PCH_SPLIT(dev))
- ibx_irq_reset(dev);
+ if (HAS_PCH_SPLIT(dev_priv))
+ ibx_irq_reset(dev_priv);
}
void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv,
@@ -3532,10 +3183,10 @@ static void ibx_irq_postinstall(struct drm_device *dev)
struct drm_i915_private *dev_priv = to_i915(dev);
u32 mask;
- if (HAS_PCH_NOP(dev))
+ if (HAS_PCH_NOP(dev_priv))
return;
- if (HAS_PCH_IBX(dev))
+ if (HAS_PCH_IBX(dev_priv))
mask = SDE_GMBUS | SDE_AUX_MASK | SDE_POISON;
else
mask = SDE_GMBUS_CPT | SDE_AUX_MASK_CPT;
@@ -3552,14 +3203,14 @@ static void gen5_gt_irq_postinstall(struct drm_device *dev)
pm_irqs = gt_irqs = 0;
dev_priv->gt_irq_mask = ~0;
- if (HAS_L3_DPF(dev)) {
+ if (HAS_L3_DPF(dev_priv)) {
/* L3 parity interrupt is always unmasked. */
- dev_priv->gt_irq_mask = ~GT_PARITY_ERROR(dev);
- gt_irqs |= GT_PARITY_ERROR(dev);
+ dev_priv->gt_irq_mask = ~GT_PARITY_ERROR(dev_priv);
+ gt_irqs |= GT_PARITY_ERROR(dev_priv);
}
gt_irqs |= GT_RENDER_USER_INTERRUPT;
- if (IS_GEN5(dev)) {
+ if (IS_GEN5(dev_priv)) {
gt_irqs |= ILK_BSD_USER_INTERRUPT;
} else {
gt_irqs |= GT_BLT_USER_INTERRUPT | GT_BSD_USER_INTERRUPT;
@@ -3567,16 +3218,18 @@ static void gen5_gt_irq_postinstall(struct drm_device *dev)
GEN5_IRQ_INIT(GT, dev_priv->gt_irq_mask, gt_irqs);
- if (INTEL_INFO(dev)->gen >= 6) {
+ if (INTEL_GEN(dev_priv) >= 6) {
/*
* RPS interrupts will get enabled/disabled on demand when RPS
* itself is enabled/disabled.
*/
- if (HAS_VEBOX(dev))
+ if (HAS_VEBOX(dev_priv)) {
pm_irqs |= PM_VEBOX_USER_INTERRUPT;
+ dev_priv->pm_ier |= PM_VEBOX_USER_INTERRUPT;
+ }
- dev_priv->pm_irq_mask = 0xffffffff;
- GEN5_IRQ_INIT(GEN6_PM, dev_priv->pm_irq_mask, pm_irqs);
+ dev_priv->pm_imr = 0xffffffff;
+ GEN5_IRQ_INIT(GEN6_PM, dev_priv->pm_imr, pm_irqs);
}
}
@@ -3585,7 +3238,7 @@ static int ironlake_irq_postinstall(struct drm_device *dev)
struct drm_i915_private *dev_priv = to_i915(dev);
u32 display_mask, extra_mask;
- if (INTEL_INFO(dev)->gen >= 7) {
+ if (INTEL_GEN(dev_priv) >= 7) {
display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE_IVB |
DE_PCH_EVENT_IVB | DE_PLANEC_FLIP_DONE_IVB |
DE_PLANEB_FLIP_DONE_IVB |
@@ -3616,7 +3269,7 @@ static int ironlake_irq_postinstall(struct drm_device *dev)
ibx_irq_postinstall(dev);
- if (IS_IRONLAKE_M(dev)) {
+ if (IS_IRONLAKE_M(dev_priv)) {
/* Enable PCU event interrupts
*
* spinlocking not required here for correctness since interrupt
@@ -3696,14 +3349,15 @@ static void gen8_gt_irq_postinstall(struct drm_i915_private *dev_priv)
if (HAS_L3_DPF(dev_priv))
gt_interrupts[0] |= GT_RENDER_L3_PARITY_ERROR_INTERRUPT;
- dev_priv->pm_irq_mask = 0xffffffff;
+ dev_priv->pm_ier = 0x0;
+ dev_priv->pm_imr = ~dev_priv->pm_ier;
GEN8_IRQ_INIT_NDX(GT, 0, ~gt_interrupts[0], gt_interrupts[0]);
GEN8_IRQ_INIT_NDX(GT, 1, ~gt_interrupts[1], gt_interrupts[1]);
/*
* RPS interrupts will get enabled/disabled on demand when RPS itself
- * is enabled/disabled.
+ * is enabled/disabled. Same wil be the case for GuC interrupts.
*/
- GEN8_IRQ_INIT_NDX(GT, 2, dev_priv->pm_irq_mask, 0);
+ GEN8_IRQ_INIT_NDX(GT, 2, dev_priv->pm_imr, dev_priv->pm_ier);
GEN8_IRQ_INIT_NDX(GT, 3, ~gt_interrupts[3], gt_interrupts[3]);
}
@@ -3756,13 +3410,13 @@ static int gen8_irq_postinstall(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = to_i915(dev);
- if (HAS_PCH_SPLIT(dev))
+ if (HAS_PCH_SPLIT(dev_priv))
ibx_irq_pre_postinstall(dev);
gen8_gt_irq_postinstall(dev_priv);
gen8_de_irq_postinstall(dev_priv);
- if (HAS_PCH_SPLIT(dev))
+ if (HAS_PCH_SPLIT(dev_priv))
ibx_irq_postinstall(dev);
I915_WRITE(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
@@ -3808,7 +3462,7 @@ static void valleyview_irq_uninstall(struct drm_device *dev)
I915_WRITE(VLV_MASTER_IER, 0);
POSTING_READ(VLV_MASTER_IER);
- gen5_gt_irq_reset(dev);
+ gen5_gt_irq_reset(dev_priv);
I915_WRITE(HWSTAM, 0xffffffff);
@@ -3971,7 +3625,7 @@ static irqreturn_t i8xx_irq_handler(int irq, void *arg)
new_iir = I915_READ16(IIR); /* Flush posted writes */
if (iir & I915_USER_INTERRUPT)
- notify_ring(&dev_priv->engine[RCS]);
+ notify_ring(dev_priv->engine[RCS]);
for_each_pipe(dev_priv, pipe) {
int plane = pipe;
@@ -4020,7 +3674,7 @@ static void i915_irq_preinstall(struct drm_device * dev)
struct drm_i915_private *dev_priv = to_i915(dev);
int pipe;
- if (I915_HAS_HOTPLUG(dev)) {
+ if (I915_HAS_HOTPLUG(dev_priv)) {
i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
}
@@ -4054,7 +3708,7 @@ static int i915_irq_postinstall(struct drm_device *dev)
I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
I915_USER_INTERRUPT;
- if (I915_HAS_HOTPLUG(dev)) {
+ if (I915_HAS_HOTPLUG(dev_priv)) {
i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
POSTING_READ(PORT_HOTPLUG_EN);
@@ -4168,7 +3822,7 @@ static irqreturn_t i915_irq_handler(int irq, void *arg)
new_iir = I915_READ(IIR); /* Flush posted writes */
if (iir & I915_USER_INTERRUPT)
- notify_ring(&dev_priv->engine[RCS]);
+ notify_ring(dev_priv->engine[RCS]);
for_each_pipe(dev_priv, pipe) {
int plane = pipe;
@@ -4222,7 +3876,7 @@ static void i915_irq_uninstall(struct drm_device * dev)
struct drm_i915_private *dev_priv = to_i915(dev);
int pipe;
- if (I915_HAS_HOTPLUG(dev)) {
+ if (I915_HAS_HOTPLUG(dev_priv)) {
i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
}
@@ -4400,9 +4054,9 @@ static irqreturn_t i965_irq_handler(int irq, void *arg)
new_iir = I915_READ(IIR); /* Flush posted writes */
if (iir & I915_USER_INTERRUPT)
- notify_ring(&dev_priv->engine[RCS]);
+ notify_ring(dev_priv->engine[RCS]);
if (iir & I915_BSD_USER_INTERRUPT)
- notify_ring(&dev_priv->engine[VCS]);
+ notify_ring(dev_priv->engine[VCS]);
for_each_pipe(dev_priv, pipe) {
if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS &&
@@ -4487,6 +4141,9 @@ void intel_irq_init(struct drm_i915_private *dev_priv)
INIT_WORK(&dev_priv->rps.work, gen6_pm_rps_work);
INIT_WORK(&dev_priv->l3_parity.error_work, ivybridge_parity_work);
+ if (HAS_GUC_SCHED(dev_priv))
+ dev_priv->pm_guc_events = GEN9_GUC_TO_HOST_INT_EVENT;
+
/* Let's track the enabled rps events */
if (IS_VALLEYVIEW(dev_priv))
/* WaGsvRC0ResidencyMethod:vlv */
@@ -4508,9 +4165,6 @@ void intel_irq_init(struct drm_i915_private *dev_priv)
if (INTEL_INFO(dev_priv)->gen >= 8)
dev_priv->rps.pm_intr_keep |= GEN8_PMINTR_REDIRECT_TO_GUC;
- INIT_DELAYED_WORK(&dev_priv->gpu_error.hangcheck_work,
- i915_hangcheck_elapsed);
-
if (IS_GEN2(dev_priv)) {
/* Gen2 doesn't have a hardware frame counter */
dev->max_vblank_count = 0;
@@ -4539,16 +4193,16 @@ void intel_irq_init(struct drm_i915_private *dev_priv)
dev->driver->irq_preinstall = cherryview_irq_preinstall;
dev->driver->irq_postinstall = cherryview_irq_postinstall;
dev->driver->irq_uninstall = cherryview_irq_uninstall;
- dev->driver->enable_vblank = valleyview_enable_vblank;
- dev->driver->disable_vblank = valleyview_disable_vblank;
+ dev->driver->enable_vblank = i965_enable_vblank;
+ dev->driver->disable_vblank = i965_disable_vblank;
dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
} else if (IS_VALLEYVIEW(dev_priv)) {
dev->driver->irq_handler = valleyview_irq_handler;
dev->driver->irq_preinstall = valleyview_irq_preinstall;
dev->driver->irq_postinstall = valleyview_irq_postinstall;
dev->driver->irq_uninstall = valleyview_irq_uninstall;
- dev->driver->enable_vblank = valleyview_enable_vblank;
- dev->driver->disable_vblank = valleyview_disable_vblank;
+ dev->driver->enable_vblank = i965_enable_vblank;
+ dev->driver->disable_vblank = i965_disable_vblank;
dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
} else if (INTEL_INFO(dev_priv)->gen >= 8) {
dev->driver->irq_handler = gen8_irq_handler;
@@ -4557,13 +4211,13 @@ void intel_irq_init(struct drm_i915_private *dev_priv)
dev->driver->irq_uninstall = gen8_irq_uninstall;
dev->driver->enable_vblank = gen8_enable_vblank;
dev->driver->disable_vblank = gen8_disable_vblank;
- if (IS_BROXTON(dev))
+ if (IS_BROXTON(dev_priv))
dev_priv->display.hpd_irq_setup = bxt_hpd_irq_setup;
- else if (HAS_PCH_SPT(dev) || HAS_PCH_KBP(dev))
+ else if (HAS_PCH_SPT(dev_priv) || HAS_PCH_KBP(dev_priv))
dev_priv->display.hpd_irq_setup = spt_hpd_irq_setup;
else
dev_priv->display.hpd_irq_setup = ilk_hpd_irq_setup;
- } else if (HAS_PCH_SPLIT(dev)) {
+ } else if (HAS_PCH_SPLIT(dev_priv)) {
dev->driver->irq_handler = ironlake_irq_handler;
dev->driver->irq_preinstall = ironlake_irq_reset;
dev->driver->irq_postinstall = ironlake_irq_postinstall;
@@ -4577,21 +4231,25 @@ void intel_irq_init(struct drm_i915_private *dev_priv)
dev->driver->irq_postinstall = i8xx_irq_postinstall;
dev->driver->irq_handler = i8xx_irq_handler;
dev->driver->irq_uninstall = i8xx_irq_uninstall;
+ dev->driver->enable_vblank = i8xx_enable_vblank;
+ dev->driver->disable_vblank = i8xx_disable_vblank;
} else if (IS_GEN3(dev_priv)) {
dev->driver->irq_preinstall = i915_irq_preinstall;
dev->driver->irq_postinstall = i915_irq_postinstall;
dev->driver->irq_uninstall = i915_irq_uninstall;
dev->driver->irq_handler = i915_irq_handler;
+ dev->driver->enable_vblank = i8xx_enable_vblank;
+ dev->driver->disable_vblank = i8xx_disable_vblank;
} else {
dev->driver->irq_preinstall = i965_irq_preinstall;
dev->driver->irq_postinstall = i965_irq_postinstall;
dev->driver->irq_uninstall = i965_irq_uninstall;
dev->driver->irq_handler = i965_irq_handler;
+ dev->driver->enable_vblank = i965_enable_vblank;
+ dev->driver->disable_vblank = i965_disable_vblank;
}
if (I915_HAS_HOTPLUG(dev_priv))
dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
- dev->driver->enable_vblank = i915_enable_vblank;
- dev->driver->disable_vblank = i915_disable_vblank;
}
}
diff --git a/drivers/gpu/drm/i915/i915_params.c b/drivers/gpu/drm/i915/i915_params.c
index 768ad89d9cd4..d46ffe7086bc 100644
--- a/drivers/gpu/drm/i915/i915_params.c
+++ b/drivers/gpu/drm/i915/i915_params.c
@@ -39,7 +39,7 @@ struct i915_params i915 __read_mostly = {
.enable_hangcheck = true,
.enable_ppgtt = -1,
.enable_psr = -1,
- .preliminary_hw_support = IS_ENABLED(CONFIG_DRM_I915_PRELIMINARY_HW_SUPPORT),
+ .alpha_support = IS_ENABLED(CONFIG_DRM_I915_ALPHA_SUPPORT),
.disable_power_well = -1,
.enable_ips = 1,
.fastboot = 0,
@@ -47,6 +47,7 @@ struct i915_params i915 __read_mostly = {
.load_detect_test = 0,
.force_reset_modeset_test = 0,
.reset = true,
+ .error_capture = true,
.invert_brightness = 0,
.disable_display = 0,
.enable_cmd_parser = 1,
@@ -115,6 +116,14 @@ MODULE_PARM_DESC(vbt_sdvo_panel_type,
module_param_named_unsafe(reset, i915.reset, bool, 0600);
MODULE_PARM_DESC(reset, "Attempt GPU resets (default: true)");
+#if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
+module_param_named(error_capture, i915.error_capture, bool, 0600);
+MODULE_PARM_DESC(error_capture,
+ "Record the GPU state following a hang. "
+ "This information in /sys/class/drm/card<N>/error is vital for "
+ "triaging and debugging hangs.");
+#endif
+
module_param_named_unsafe(enable_hangcheck, i915.enable_hangcheck, bool, 0644);
MODULE_PARM_DESC(enable_hangcheck,
"Periodically check GPU activity for detecting hangs. "
@@ -136,9 +145,10 @@ MODULE_PARM_DESC(enable_psr, "Enable PSR "
"(0=disabled, 1=enabled - link mode chosen per-platform, 2=force link-standby mode, 3=force link-off mode) "
"Default: -1 (use per-chip default)");
-module_param_named_unsafe(preliminary_hw_support, i915.preliminary_hw_support, int, 0400);
-MODULE_PARM_DESC(preliminary_hw_support,
- "Enable preliminary hardware support.");
+module_param_named_unsafe(alpha_support, i915.alpha_support, int, 0400);
+MODULE_PARM_DESC(alpha_support,
+ "Enable alpha quality driver support for latest hardware. "
+ "See also CONFIG_DRM_I915_ALPHA_SUPPORT.");
module_param_named_unsafe(disable_power_well, i915.disable_power_well, int, 0400);
MODULE_PARM_DESC(disable_power_well,
diff --git a/drivers/gpu/drm/i915/i915_params.h b/drivers/gpu/drm/i915/i915_params.h
index 3a0dd78ddb38..817ad959941e 100644
--- a/drivers/gpu/drm/i915/i915_params.h
+++ b/drivers/gpu/drm/i915/i915_params.h
@@ -40,7 +40,7 @@ struct i915_params {
int enable_ppgtt;
int enable_execlists;
int enable_psr;
- unsigned int preliminary_hw_support;
+ unsigned int alpha_support;
int disable_power_well;
int enable_ips;
int invert_brightness;
@@ -59,6 +59,7 @@ struct i915_params {
bool load_detect_test;
bool force_reset_modeset_test;
bool reset;
+ bool error_capture;
bool disable_display;
bool verbose_state_checks;
bool nuclear_pageflip;
diff --git a/drivers/gpu/drm/i915/i915_pci.c b/drivers/gpu/drm/i915/i915_pci.c
index 687c768833b3..fce8e198bc76 100644
--- a/drivers/gpu/drm/i915/i915_pci.c
+++ b/drivers/gpu/drm/i915/i915_pci.c
@@ -288,7 +288,8 @@ static const struct intel_device_info intel_haswell_info = {
#define BDW_FEATURES \
HSW_FEATURES, \
BDW_COLORS, \
- .has_logical_ring_contexts = 1
+ .has_logical_ring_contexts = 1, \
+ .has_64bit_reloc = 1
static const struct intel_device_info intel_broadwell_info = {
BDW_FEATURES,
@@ -308,6 +309,7 @@ static const struct intel_device_info intel_cherryview_info = {
.has_hotplug = 1,
.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
.is_cherryview = 1,
+ .has_64bit_reloc = 1,
.has_psr = 1,
.has_runtime_pm = 1,
.has_resource_streamer = 1,
@@ -347,6 +349,7 @@ static const struct intel_device_info intel_broxton_info = {
.has_hotplug = 1,
.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
.num_pipes = 3,
+ .has_64bit_reloc = 1,
.has_ddi = 1,
.has_fpga_dbg = 1,
.has_fbc = 1,
@@ -360,6 +363,7 @@ static const struct intel_device_info intel_broxton_info = {
.has_hw_contexts = 1,
.has_logical_ring_contexts = 1,
.has_guc = 1,
+ .has_decoupled_mmio = 1,
.ddb_size = 512,
GEN_DEFAULT_PIPEOFFSETS,
IVB_CURSOR_OFFSETS,
@@ -431,17 +435,15 @@ static const struct pci_device_id pciidlist[] = {
};
MODULE_DEVICE_TABLE(pci, pciidlist);
-extern int i915_driver_load(struct pci_dev *pdev,
- const struct pci_device_id *ent);
-
static int i915_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
struct intel_device_info *intel_info =
(struct intel_device_info *) ent->driver_data;
- if (IS_PRELIMINARY_HW(intel_info) && !i915.preliminary_hw_support) {
- DRM_INFO("This hardware requires preliminary hardware support.\n"
- "See CONFIG_DRM_I915_PRELIMINARY_HW_SUPPORT, and/or modparam preliminary_hw_support\n");
+ if (IS_ALPHA_SUPPORT(intel_info) && !i915.alpha_support) {
+ DRM_INFO("The driver support for your hardware in this kernel version is alpha quality\n"
+ "See CONFIG_DRM_I915_ALPHA_SUPPORT or i915.alpha_support module parameter\n"
+ "to enable support in this kernel version, or check for kernel updates.\n");
return -ENODEV;
}
@@ -463,8 +465,6 @@ static int i915_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
return i915_driver_load(pdev, ent);
}
-extern void i915_driver_unload(struct drm_device *dev);
-
static void i915_pci_remove(struct pci_dev *pdev)
{
struct drm_device *dev = pci_get_drvdata(pdev);
@@ -473,8 +473,6 @@ static void i915_pci_remove(struct pci_dev *pdev)
drm_dev_unref(dev);
}
-extern const struct dev_pm_ops i915_pm_ops;
-
static struct pci_driver i915_pci_driver = {
.name = DRIVER_NAME,
.id_table = pciidlist,
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 70d96162def6..c70c07a7b586 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -86,8 +86,7 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
#define DEVEN 0x54
#define DEVEN_MCHBAR_EN (1 << 28)
-#define BSM 0x5c
-#define BSM_MASK (0xFFFF << 20)
+/* BSM in include/drm/i915_drm.h */
#define HPLLCC 0xc0 /* 85x only */
#define GC_CLOCK_CONTROL_MASK (0x7 << 0)
@@ -831,96 +830,7 @@ enum skl_disp_power_wells {
#define CCK_FREQUENCY_STATUS_SHIFT 8
#define CCK_FREQUENCY_VALUES (0x1f << 0)
-/**
- * DOC: DPIO
- *
- * VLV, CHV and BXT have slightly peculiar display PHYs for driving DP/HDMI
- * ports. DPIO is the name given to such a display PHY. These PHYs
- * don't follow the standard programming model using direct MMIO
- * registers, and instead their registers must be accessed trough IOSF
- * sideband. VLV has one such PHY for driving ports B and C, and CHV
- * adds another PHY for driving port D. Each PHY responds to specific
- * IOSF-SB port.
- *
- * Each display PHY is made up of one or two channels. Each channel
- * houses a common lane part which contains the PLL and other common
- * logic. CH0 common lane also contains the IOSF-SB logic for the
- * Common Register Interface (CRI) ie. the DPIO registers. CRI clock
- * must be running when any DPIO registers are accessed.
- *
- * In addition to having their own registers, the PHYs are also
- * controlled through some dedicated signals from the display
- * controller. These include PLL reference clock enable, PLL enable,
- * and CRI clock selection, for example.
- *
- * Eeach channel also has two splines (also called data lanes), and
- * each spline is made up of one Physical Access Coding Sub-Layer
- * (PCS) block and two TX lanes. So each channel has two PCS blocks
- * and four TX lanes. The TX lanes are used as DP lanes or TMDS
- * data/clock pairs depending on the output type.
- *
- * Additionally the PHY also contains an AUX lane with AUX blocks
- * for each channel. This is used for DP AUX communication, but
- * this fact isn't really relevant for the driver since AUX is
- * controlled from the display controller side. No DPIO registers
- * need to be accessed during AUX communication,
- *
- * Generally on VLV/CHV the common lane corresponds to the pipe and
- * the spline (PCS/TX) corresponds to the port.
- *
- * For dual channel PHY (VLV/CHV):
- *
- * pipe A == CMN/PLL/REF CH0
- *
- * pipe B == CMN/PLL/REF CH1
- *
- * port B == PCS/TX CH0
- *
- * port C == PCS/TX CH1
- *
- * This is especially important when we cross the streams
- * ie. drive port B with pipe B, or port C with pipe A.
- *
- * For single channel PHY (CHV):
- *
- * pipe C == CMN/PLL/REF CH0
- *
- * port D == PCS/TX CH0
- *
- * On BXT the entire PHY channel corresponds to the port. That means
- * the PLL is also now associated with the port rather than the pipe,
- * and so the clock needs to be routed to the appropriate transcoder.
- * Port A PLL is directly connected to transcoder EDP and port B/C
- * PLLs can be routed to any transcoder A/B/C.
- *
- * Note: DDI0 is digital port B, DD1 is digital port C, and DDI2 is
- * digital port D (CHV) or port A (BXT). ::
- *
- *
- * Dual channel PHY (VLV/CHV/BXT)
- * ---------------------------------
- * | CH0 | CH1 |
- * | CMN/PLL/REF | CMN/PLL/REF |
- * |---------------|---------------| Display PHY
- * | PCS01 | PCS23 | PCS01 | PCS23 |
- * |-------|-------|-------|-------|
- * |TX0|TX1|TX2|TX3|TX0|TX1|TX2|TX3|
- * ---------------------------------
- * | DDI0 | DDI1 | DP/HDMI ports
- * ---------------------------------
- *
- * Single channel PHY (CHV/BXT)
- * -----------------
- * | CH0 |
- * | CMN/PLL/REF |
- * |---------------| Display PHY
- * | PCS01 | PCS23 |
- * |-------|-------|
- * |TX0|TX1|TX2|TX3|
- * -----------------
- * | DDI2 | DP/HDMI port
- * -----------------
- */
+/* DPIO registers */
#define DPIO_DEVFN 0
#define DPIO_CTL _MMIO(VLV_DISPLAY_BASE + 0x2110)
@@ -1276,7 +1186,19 @@ enum skl_disp_power_wells {
#define DPIO_UPAR_SHIFT 30
/* BXT PHY registers */
-#define _BXT_PHY(phy, a, b) _MMIO_PIPE((phy), (a), (b))
+#define _BXT_PHY0_BASE 0x6C000
+#define _BXT_PHY1_BASE 0x162000
+#define BXT_PHY_BASE(phy) _PIPE((phy), _BXT_PHY0_BASE, \
+ _BXT_PHY1_BASE)
+
+#define _BXT_PHY(phy, reg) \
+ _MMIO(BXT_PHY_BASE(phy) - _BXT_PHY0_BASE + (reg))
+
+#define _BXT_PHY_CH(phy, ch, reg_ch0, reg_ch1) \
+ (BXT_PHY_BASE(phy) + _PIPE((ch), (reg_ch0) - _BXT_PHY0_BASE, \
+ (reg_ch1) - _BXT_PHY0_BASE))
+#define _MMIO_BXT_PHY_CH(phy, ch, reg_ch0, reg_ch1) \
+ _MMIO(_BXT_PHY_CH(phy, ch, reg_ch0, reg_ch1))
#define BXT_P_CR_GT_DISP_PWRON _MMIO(0x138090)
#define GT_DISPLAY_POWER_ON(phy) (1 << (phy))
@@ -1293,8 +1215,8 @@ enum skl_disp_power_wells {
#define _PHY_CTL_FAMILY_EDP 0x64C80
#define _PHY_CTL_FAMILY_DDI 0x64C90
#define COMMON_RESET_DIS (1 << 31)
-#define BXT_PHY_CTL_FAMILY(phy) _BXT_PHY((phy), _PHY_CTL_FAMILY_DDI, \
- _PHY_CTL_FAMILY_EDP)
+#define BXT_PHY_CTL_FAMILY(phy) _MMIO_PIPE((phy), _PHY_CTL_FAMILY_DDI, \
+ _PHY_CTL_FAMILY_EDP)
/* BXT PHY PLL registers */
#define _PORT_PLL_A 0x46074
@@ -1314,18 +1236,18 @@ enum skl_disp_power_wells {
#define PORT_PLL_P2_SHIFT 8
#define PORT_PLL_P2_MASK (0x1f << PORT_PLL_P2_SHIFT)
#define PORT_PLL_P2(x) ((x) << PORT_PLL_P2_SHIFT)
-#define BXT_PORT_PLL_EBB_0(port) _MMIO_PORT3(port, _PORT_PLL_EBB_0_A, \
- _PORT_PLL_EBB_0_B, \
- _PORT_PLL_EBB_0_C)
+#define BXT_PORT_PLL_EBB_0(phy, ch) _MMIO_BXT_PHY_CH(phy, ch, \
+ _PORT_PLL_EBB_0_B, \
+ _PORT_PLL_EBB_0_C)
#define _PORT_PLL_EBB_4_A 0x162038
#define _PORT_PLL_EBB_4_B 0x6C038
#define _PORT_PLL_EBB_4_C 0x6C344
#define PORT_PLL_10BIT_CLK_ENABLE (1 << 13)
#define PORT_PLL_RECALIBRATE (1 << 14)
-#define BXT_PORT_PLL_EBB_4(port) _MMIO_PORT3(port, _PORT_PLL_EBB_4_A, \
- _PORT_PLL_EBB_4_B, \
- _PORT_PLL_EBB_4_C)
+#define BXT_PORT_PLL_EBB_4(phy, ch) _MMIO_BXT_PHY_CH(phy, ch, \
+ _PORT_PLL_EBB_4_B, \
+ _PORT_PLL_EBB_4_C)
#define _PORT_PLL_0_A 0x162100
#define _PORT_PLL_0_B 0x6C100
@@ -1356,57 +1278,56 @@ enum skl_disp_power_wells {
#define PORT_PLL_DCO_AMP_DEFAULT 15
#define PORT_PLL_DCO_AMP_MASK 0x3c00
#define PORT_PLL_DCO_AMP(x) ((x)<<10)
-#define _PORT_PLL_BASE(port) _PORT3(port, _PORT_PLL_0_A, \
- _PORT_PLL_0_B, \
- _PORT_PLL_0_C)
-#define BXT_PORT_PLL(port, idx) _MMIO(_PORT_PLL_BASE(port) + (idx) * 4)
+#define _PORT_PLL_BASE(phy, ch) _BXT_PHY_CH(phy, ch, \
+ _PORT_PLL_0_B, \
+ _PORT_PLL_0_C)
+#define BXT_PORT_PLL(phy, ch, idx) _MMIO(_PORT_PLL_BASE(phy, ch) + \
+ (idx) * 4)
/* BXT PHY common lane registers */
#define _PORT_CL1CM_DW0_A 0x162000
#define _PORT_CL1CM_DW0_BC 0x6C000
#define PHY_POWER_GOOD (1 << 16)
#define PHY_RESERVED (1 << 7)
-#define BXT_PORT_CL1CM_DW0(phy) _BXT_PHY((phy), _PORT_CL1CM_DW0_BC, \
- _PORT_CL1CM_DW0_A)
+#define BXT_PORT_CL1CM_DW0(phy) _BXT_PHY((phy), _PORT_CL1CM_DW0_BC)
#define _PORT_CL1CM_DW9_A 0x162024
#define _PORT_CL1CM_DW9_BC 0x6C024
#define IREF0RC_OFFSET_SHIFT 8
#define IREF0RC_OFFSET_MASK (0xFF << IREF0RC_OFFSET_SHIFT)
-#define BXT_PORT_CL1CM_DW9(phy) _BXT_PHY((phy), _PORT_CL1CM_DW9_BC, \
- _PORT_CL1CM_DW9_A)
+#define BXT_PORT_CL1CM_DW9(phy) _BXT_PHY((phy), _PORT_CL1CM_DW9_BC)
#define _PORT_CL1CM_DW10_A 0x162028
#define _PORT_CL1CM_DW10_BC 0x6C028
#define IREF1RC_OFFSET_SHIFT 8
#define IREF1RC_OFFSET_MASK (0xFF << IREF1RC_OFFSET_SHIFT)
-#define BXT_PORT_CL1CM_DW10(phy) _BXT_PHY((phy), _PORT_CL1CM_DW10_BC, \
- _PORT_CL1CM_DW10_A)
+#define BXT_PORT_CL1CM_DW10(phy) _BXT_PHY((phy), _PORT_CL1CM_DW10_BC)
#define _PORT_CL1CM_DW28_A 0x162070
#define _PORT_CL1CM_DW28_BC 0x6C070
#define OCL1_POWER_DOWN_EN (1 << 23)
#define DW28_OLDO_DYN_PWR_DOWN_EN (1 << 22)
#define SUS_CLK_CONFIG 0x3
-#define BXT_PORT_CL1CM_DW28(phy) _BXT_PHY((phy), _PORT_CL1CM_DW28_BC, \
- _PORT_CL1CM_DW28_A)
+#define BXT_PORT_CL1CM_DW28(phy) _BXT_PHY((phy), _PORT_CL1CM_DW28_BC)
#define _PORT_CL1CM_DW30_A 0x162078
#define _PORT_CL1CM_DW30_BC 0x6C078
#define OCL2_LDOFUSE_PWR_DIS (1 << 6)
-#define BXT_PORT_CL1CM_DW30(phy) _BXT_PHY((phy), _PORT_CL1CM_DW30_BC, \
- _PORT_CL1CM_DW30_A)
+#define BXT_PORT_CL1CM_DW30(phy) _BXT_PHY((phy), _PORT_CL1CM_DW30_BC)
-/* Defined for PHY0 only */
-#define BXT_PORT_CL2CM_DW6_BC _MMIO(0x6C358)
+/* The spec defines this only for BXT PHY0, but lets assume that this
+ * would exist for PHY1 too if it had a second channel.
+ */
+#define _PORT_CL2CM_DW6_A 0x162358
+#define _PORT_CL2CM_DW6_BC 0x6C358
+#define BXT_PORT_CL2CM_DW6(phy) _BXT_PHY((phy), _PORT_CL2CM_DW6_BC)
#define DW6_OLDO_DYN_PWR_DOWN_EN (1 << 28)
/* BXT PHY Ref registers */
#define _PORT_REF_DW3_A 0x16218C
#define _PORT_REF_DW3_BC 0x6C18C
#define GRC_DONE (1 << 22)
-#define BXT_PORT_REF_DW3(phy) _BXT_PHY((phy), _PORT_REF_DW3_BC, \
- _PORT_REF_DW3_A)
+#define BXT_PORT_REF_DW3(phy) _BXT_PHY((phy), _PORT_REF_DW3_BC)
#define _PORT_REF_DW6_A 0x162198
#define _PORT_REF_DW6_BC 0x6C198
@@ -1417,15 +1338,13 @@ enum skl_disp_power_wells {
#define GRC_CODE_SLOW_SHIFT 8
#define GRC_CODE_SLOW_MASK (0xFF << GRC_CODE_SLOW_SHIFT)
#define GRC_CODE_NOM_MASK 0xFF
-#define BXT_PORT_REF_DW6(phy) _BXT_PHY((phy), _PORT_REF_DW6_BC, \
- _PORT_REF_DW6_A)
+#define BXT_PORT_REF_DW6(phy) _BXT_PHY((phy), _PORT_REF_DW6_BC)
#define _PORT_REF_DW8_A 0x1621A0
#define _PORT_REF_DW8_BC 0x6C1A0
#define GRC_DIS (1 << 15)
#define GRC_RDY_OVRD (1 << 1)
-#define BXT_PORT_REF_DW8(phy) _BXT_PHY((phy), _PORT_REF_DW8_BC, \
- _PORT_REF_DW8_A)
+#define BXT_PORT_REF_DW8(phy) _BXT_PHY((phy), _PORT_REF_DW8_BC)
/* BXT PHY PCS registers */
#define _PORT_PCS_DW10_LN01_A 0x162428
@@ -1434,12 +1353,13 @@ enum skl_disp_power_wells {
#define _PORT_PCS_DW10_GRP_A 0x162C28
#define _PORT_PCS_DW10_GRP_B 0x6CC28
#define _PORT_PCS_DW10_GRP_C 0x6CE28
-#define BXT_PORT_PCS_DW10_LN01(port) _MMIO_PORT3(port, _PORT_PCS_DW10_LN01_A, \
- _PORT_PCS_DW10_LN01_B, \
- _PORT_PCS_DW10_LN01_C)
-#define BXT_PORT_PCS_DW10_GRP(port) _MMIO_PORT3(port, _PORT_PCS_DW10_GRP_A, \
- _PORT_PCS_DW10_GRP_B, \
- _PORT_PCS_DW10_GRP_C)
+#define BXT_PORT_PCS_DW10_LN01(phy, ch) _MMIO_BXT_PHY_CH(phy, ch, \
+ _PORT_PCS_DW10_LN01_B, \
+ _PORT_PCS_DW10_LN01_C)
+#define BXT_PORT_PCS_DW10_GRP(phy, ch) _MMIO_BXT_PHY_CH(phy, ch, \
+ _PORT_PCS_DW10_GRP_B, \
+ _PORT_PCS_DW10_GRP_C)
+
#define TX2_SWING_CALC_INIT (1 << 31)
#define TX1_SWING_CALC_INIT (1 << 30)
@@ -1454,15 +1374,15 @@ enum skl_disp_power_wells {
#define _PORT_PCS_DW12_GRP_C 0x6CE30
#define LANESTAGGER_STRAP_OVRD (1 << 6)
#define LANE_STAGGER_MASK 0x1F
-#define BXT_PORT_PCS_DW12_LN01(port) _MMIO_PORT3(port, _PORT_PCS_DW12_LN01_A, \
- _PORT_PCS_DW12_LN01_B, \
- _PORT_PCS_DW12_LN01_C)
-#define BXT_PORT_PCS_DW12_LN23(port) _MMIO_PORT3(port, _PORT_PCS_DW12_LN23_A, \
- _PORT_PCS_DW12_LN23_B, \
- _PORT_PCS_DW12_LN23_C)
-#define BXT_PORT_PCS_DW12_GRP(port) _MMIO_PORT3(port, _PORT_PCS_DW12_GRP_A, \
- _PORT_PCS_DW12_GRP_B, \
- _PORT_PCS_DW12_GRP_C)
+#define BXT_PORT_PCS_DW12_LN01(phy, ch) _MMIO_BXT_PHY_CH(phy, ch, \
+ _PORT_PCS_DW12_LN01_B, \
+ _PORT_PCS_DW12_LN01_C)
+#define BXT_PORT_PCS_DW12_LN23(phy, ch) _MMIO_BXT_PHY_CH(phy, ch, \
+ _PORT_PCS_DW12_LN23_B, \
+ _PORT_PCS_DW12_LN23_C)
+#define BXT_PORT_PCS_DW12_GRP(phy, ch) _MMIO_BXT_PHY_CH(phy, ch, \
+ _PORT_PCS_DW12_GRP_B, \
+ _PORT_PCS_DW12_GRP_C)
/* BXT PHY TX registers */
#define _BXT_LANE_OFFSET(lane) (((lane) >> 1) * 0x200 + \
@@ -1474,12 +1394,12 @@ enum skl_disp_power_wells {
#define _PORT_TX_DW2_GRP_A 0x162D08
#define _PORT_TX_DW2_GRP_B 0x6CD08
#define _PORT_TX_DW2_GRP_C 0x6CF08
-#define BXT_PORT_TX_DW2_GRP(port) _MMIO_PORT3(port, _PORT_TX_DW2_GRP_A, \
- _PORT_TX_DW2_GRP_B, \
- _PORT_TX_DW2_GRP_C)
-#define BXT_PORT_TX_DW2_LN0(port) _MMIO_PORT3(port, _PORT_TX_DW2_LN0_A, \
- _PORT_TX_DW2_LN0_B, \
- _PORT_TX_DW2_LN0_C)
+#define BXT_PORT_TX_DW2_LN0(phy, ch) _MMIO_BXT_PHY_CH(phy, ch, \
+ _PORT_TX_DW2_LN0_B, \
+ _PORT_TX_DW2_LN0_C)
+#define BXT_PORT_TX_DW2_GRP(phy, ch) _MMIO_BXT_PHY_CH(phy, ch, \
+ _PORT_TX_DW2_GRP_B, \
+ _PORT_TX_DW2_GRP_C)
#define MARGIN_000_SHIFT 16
#define MARGIN_000 (0xFF << MARGIN_000_SHIFT)
#define UNIQ_TRANS_SCALE_SHIFT 8
@@ -1491,12 +1411,12 @@ enum skl_disp_power_wells {
#define _PORT_TX_DW3_GRP_A 0x162D0C
#define _PORT_TX_DW3_GRP_B 0x6CD0C
#define _PORT_TX_DW3_GRP_C 0x6CF0C
-#define BXT_PORT_TX_DW3_GRP(port) _MMIO_PORT3(port, _PORT_TX_DW3_GRP_A, \
- _PORT_TX_DW3_GRP_B, \
- _PORT_TX_DW3_GRP_C)
-#define BXT_PORT_TX_DW3_LN0(port) _MMIO_PORT3(port, _PORT_TX_DW3_LN0_A, \
- _PORT_TX_DW3_LN0_B, \
- _PORT_TX_DW3_LN0_C)
+#define BXT_PORT_TX_DW3_LN0(phy, ch) _MMIO_BXT_PHY_CH(phy, ch, \
+ _PORT_TX_DW3_LN0_B, \
+ _PORT_TX_DW3_LN0_C)
+#define BXT_PORT_TX_DW3_GRP(phy, ch) _MMIO_BXT_PHY_CH(phy, ch, \
+ _PORT_TX_DW3_GRP_B, \
+ _PORT_TX_DW3_GRP_C)
#define SCALE_DCOMP_METHOD (1 << 26)
#define UNIQUE_TRANGE_EN_METHOD (1 << 27)
@@ -1506,12 +1426,12 @@ enum skl_disp_power_wells {
#define _PORT_TX_DW4_GRP_A 0x162D10
#define _PORT_TX_DW4_GRP_B 0x6CD10
#define _PORT_TX_DW4_GRP_C 0x6CF10
-#define BXT_PORT_TX_DW4_LN0(port) _MMIO_PORT3(port, _PORT_TX_DW4_LN0_A, \
- _PORT_TX_DW4_LN0_B, \
- _PORT_TX_DW4_LN0_C)
-#define BXT_PORT_TX_DW4_GRP(port) _MMIO_PORT3(port, _PORT_TX_DW4_GRP_A, \
- _PORT_TX_DW4_GRP_B, \
- _PORT_TX_DW4_GRP_C)
+#define BXT_PORT_TX_DW4_LN0(phy, ch) _MMIO_BXT_PHY_CH(phy, ch, \
+ _PORT_TX_DW4_LN0_B, \
+ _PORT_TX_DW4_LN0_C)
+#define BXT_PORT_TX_DW4_GRP(phy, ch) _MMIO_BXT_PHY_CH(phy, ch, \
+ _PORT_TX_DW4_GRP_B, \
+ _PORT_TX_DW4_GRP_C)
#define DEEMPH_SHIFT 24
#define DE_EMPHASIS (0xFF << DEEMPH_SHIFT)
@@ -1520,10 +1440,10 @@ enum skl_disp_power_wells {
#define _PORT_TX_DW14_LN0_C 0x6C938
#define LATENCY_OPTIM_SHIFT 30
#define LATENCY_OPTIM (1 << LATENCY_OPTIM_SHIFT)
-#define BXT_PORT_TX_DW14_LN(port, lane) _MMIO(_PORT3((port), _PORT_TX_DW14_LN0_A, \
- _PORT_TX_DW14_LN0_B, \
- _PORT_TX_DW14_LN0_C) + \
- _BXT_LANE_OFFSET(lane))
+#define BXT_PORT_TX_DW14_LN(phy, ch, lane) \
+ _MMIO(_BXT_PHY_CH(phy, ch, _PORT_TX_DW14_LN0_B, \
+ _PORT_TX_DW14_LN0_C) + \
+ _BXT_LANE_OFFSET(lane))
/* UAIMI scratch pad register 1 */
#define UAIMI_SPR1 _MMIO(0x4F074)
@@ -1605,6 +1525,7 @@ enum skl_disp_power_wells {
#define RING_HEAD(base) _MMIO((base)+0x34)
#define RING_START(base) _MMIO((base)+0x38)
#define RING_CTL(base) _MMIO((base)+0x3c)
+#define RING_CTL_SIZE(size) ((size) - PAGE_SIZE) /* in bytes -> pages */
#define RING_SYNC_0(base) _MMIO((base)+0x40)
#define RING_SYNC_1(base) _MMIO((base)+0x44)
#define RING_SYNC_2(base) _MMIO((base)+0x48)
@@ -1708,7 +1629,11 @@ enum skl_disp_power_wells {
#define GEN7_SC_INSTDONE _MMIO(0x7100)
#define GEN7_SAMPLER_INSTDONE _MMIO(0xe160)
#define GEN7_ROW_INSTDONE _MMIO(0xe164)
-#define I915_NUM_INSTDONE_REG 4
+#define GEN8_MCR_SELECTOR _MMIO(0xfdc)
+#define GEN8_MCR_SLICE(slice) (((slice) & 3) << 26)
+#define GEN8_MCR_SLICE_MASK GEN8_MCR_SLICE(3)
+#define GEN8_MCR_SUBSLICE(subslice) (((subslice) & 3) << 24)
+#define GEN8_MCR_SUBSLICE_MASK GEN8_MCR_SUBSLICE(3)
#define RING_IPEIR(base) _MMIO((base)+0x64)
#define RING_IPEHR(base) _MMIO((base)+0x68)
/*
@@ -2089,9 +2014,9 @@ enum skl_disp_power_wells {
#define PM_VEBOX_CS_ERROR_INTERRUPT (1 << 12) /* hsw+ */
#define PM_VEBOX_USER_INTERRUPT (1 << 10) /* hsw+ */
-#define GT_PARITY_ERROR(dev) \
+#define GT_PARITY_ERROR(dev_priv) \
(GT_RENDER_L3_PARITY_ERROR_INTERRUPT | \
- (IS_HASWELL(dev) ? GT_RENDER_L3_PARITY_ERROR_INTERRUPT_S1 : 0))
+ (IS_HASWELL(dev_priv) ? GT_RENDER_L3_PARITY_ERROR_INTERRUPT_S1 : 0))
/* These are all the "old" interrupts */
#define ILK_BSD_USER_INTERRUPT (1<<5)
@@ -2184,8 +2109,9 @@ enum skl_disp_power_wells {
#define FBC_FENCE_OFF _MMIO(0x3218) /* BSpec typo has 321Bh */
#define FBC_TAG(i) _MMIO(0x3300 + (i) * 4)
-#define FBC_STATUS2 _MMIO(0x43214)
-#define FBC_COMPRESSION_MASK 0x7ff
+#define FBC_STATUS2 _MMIO(0x43214)
+#define IVB_FBC_COMPRESSION_MASK 0x7ff
+#define BDW_FBC_COMPRESSION_MASK 0xfff
#define FBC_LL_SIZE (1536)
@@ -6011,6 +5937,7 @@ enum {
#define GEN8_DE_PIPE_A_IRQ (1<<16)
#define GEN8_DE_PIPE_IRQ(pipe) (1<<(16+(pipe)))
#define GEN8_GT_VECS_IRQ (1<<6)
+#define GEN8_GT_GUC_IRQ (1<<5)
#define GEN8_GT_PM_IRQ (1<<4)
#define GEN8_GT_VCS2_IRQ (1<<3)
#define GEN8_GT_VCS1_IRQ (1<<2)
@@ -6022,6 +5949,16 @@ enum {
#define GEN8_GT_IIR(which) _MMIO(0x44308 + (0x10 * (which)))
#define GEN8_GT_IER(which) _MMIO(0x4430c + (0x10 * (which)))
+#define GEN9_GUC_TO_HOST_INT_EVENT (1<<31)
+#define GEN9_GUC_EXEC_ERROR_EVENT (1<<30)
+#define GEN9_GUC_DISPLAY_EVENT (1<<29)
+#define GEN9_GUC_SEMA_SIGNAL_EVENT (1<<28)
+#define GEN9_GUC_IOMMU_MSG_EVENT (1<<27)
+#define GEN9_GUC_DB_RING_EVENT (1<<26)
+#define GEN9_GUC_DMA_DONE_EVENT (1<<25)
+#define GEN9_GUC_FATAL_ERROR_EVENT (1<<24)
+#define GEN9_GUC_NOTIFICATION_EVENT (1<<23)
+
#define GEN8_RCS_IRQ_SHIFT 0
#define GEN8_BCS_IRQ_SHIFT 16
#define GEN8_VCS1_IRQ_SHIFT 0
@@ -7327,6 +7264,10 @@ enum {
#define AUD_CONFIG_UPPER_N_MASK (0xff << 20)
#define AUD_CONFIG_LOWER_N_SHIFT 4
#define AUD_CONFIG_LOWER_N_MASK (0xfff << 4)
+#define AUD_CONFIG_N_MASK (AUD_CONFIG_UPPER_N_MASK | AUD_CONFIG_LOWER_N_MASK)
+#define AUD_CONFIG_N(n) \
+ (((((n) >> 12) & 0xff) << AUD_CONFIG_UPPER_N_SHIFT) | \
+ (((n) & 0xfff) << AUD_CONFIG_LOWER_N_SHIFT))
#define AUD_CONFIG_PIXEL_CLOCK_HDMI_SHIFT 16
#define AUD_CONFIG_PIXEL_CLOCK_HDMI_MASK (0xf << 16)
#define AUD_CONFIG_PIXEL_CLOCK_HDMI_25175 (0 << 16)
@@ -7350,6 +7291,13 @@ enum {
#define _HSW_AUD_MISC_CTRL_B 0x65110
#define HSW_AUD_MISC_CTRL(pipe) _MMIO_PIPE(pipe, _HSW_AUD_MISC_CTRL_A, _HSW_AUD_MISC_CTRL_B)
+#define _HSW_AUD_M_CTS_ENABLE_A 0x65028
+#define _HSW_AUD_M_CTS_ENABLE_B 0x65128
+#define HSW_AUD_M_CTS_ENABLE(pipe) _MMIO_PIPE(pipe, _HSW_AUD_M_CTS_ENABLE_A, _HSW_AUD_M_CTS_ENABLE_B)
+#define AUD_M_CTS_M_VALUE_INDEX (1 << 21)
+#define AUD_M_CTS_M_PROG_ENABLE (1 << 20)
+#define AUD_CONFIG_M_MASK 0xfffff
+
#define _HSW_AUD_DIP_ELD_CTRL_ST_A 0x650b4
#define _HSW_AUD_DIP_ELD_CTRL_ST_B 0x651b4
#define HSW_AUD_DIP_ELD_CTRL(pipe) _MMIO_PIPE(pipe, _HSW_AUD_DIP_ELD_CTRL_ST_A, _HSW_AUD_DIP_ELD_CTRL_ST_B)
@@ -7394,6 +7342,13 @@ enum {
#define SKL_FUSE_PG1_DIST_STATUS (1<<26)
#define SKL_FUSE_PG2_DIST_STATUS (1<<25)
+/* Decoupled MMIO register pair for kernel driver */
+#define GEN9_DECOUPLED_REG0_DW0 _MMIO(0xF00)
+#define GEN9_DECOUPLED_REG0_DW1 _MMIO(0xF04)
+#define GEN9_DECOUPLED_DW1_GO (1<<31)
+#define GEN9_DECOUPLED_PD_SHIFT 28
+#define GEN9_DECOUPLED_OP_SHIFT 24
+
/* Per-pipe DDI Function Control */
#define _TRANS_DDI_FUNC_CTL_A 0x60400
#define _TRANS_DDI_FUNC_CTL_B 0x61400
diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c
index a0af170062b1..b0e1e7ca75da 100644
--- a/drivers/gpu/drm/i915/i915_suspend.c
+++ b/drivers/gpu/drm/i915/i915_suspend.c
@@ -29,35 +29,31 @@
#include "intel_drv.h"
#include "i915_reg.h"
-static void i915_save_display(struct drm_device *dev)
+static void i915_save_display(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
-
/* Display arbitration control */
- if (INTEL_INFO(dev)->gen <= 4)
+ if (INTEL_GEN(dev_priv) <= 4)
dev_priv->regfile.saveDSPARB = I915_READ(DSPARB);
/* save FBC interval */
- if (HAS_FBC(dev) && INTEL_INFO(dev)->gen <= 4 && !IS_G4X(dev))
+ if (HAS_FBC(dev_priv) && INTEL_GEN(dev_priv) <= 4 && !IS_G4X(dev_priv))
dev_priv->regfile.saveFBC_CONTROL = I915_READ(FBC_CONTROL);
}
-static void i915_restore_display(struct drm_device *dev)
+static void i915_restore_display(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
-
/* Display arbitration */
- if (INTEL_INFO(dev)->gen <= 4)
+ if (INTEL_GEN(dev_priv) <= 4)
I915_WRITE(DSPARB, dev_priv->regfile.saveDSPARB);
/* only restore FBC info on the platform that supports FBC*/
intel_fbc_global_disable(dev_priv);
/* restore FBC interval */
- if (HAS_FBC(dev) && INTEL_INFO(dev)->gen <= 4 && !IS_G4X(dev))
+ if (HAS_FBC(dev_priv) && INTEL_GEN(dev_priv) <= 4 && !IS_G4X(dev_priv))
I915_WRITE(FBC_CONTROL, dev_priv->regfile.saveFBC_CONTROL);
- i915_redisable_vga(dev);
+ i915_redisable_vga(dev_priv);
}
int i915_save_state(struct drm_device *dev)
@@ -68,14 +64,14 @@ int i915_save_state(struct drm_device *dev)
mutex_lock(&dev->struct_mutex);
- i915_save_display(dev);
+ i915_save_display(dev_priv);
- if (IS_GEN4(dev))
+ if (IS_GEN4(dev_priv))
pci_read_config_word(pdev, GCDGMBUS,
&dev_priv->regfile.saveGCDGMBUS);
/* Cache mode state */
- if (INTEL_INFO(dev)->gen < 7)
+ if (INTEL_GEN(dev_priv) < 7)
dev_priv->regfile.saveCACHE_MODE_0 = I915_READ(CACHE_MODE_0);
/* Memory Arbitration state */
@@ -114,15 +110,15 @@ int i915_restore_state(struct drm_device *dev)
mutex_lock(&dev->struct_mutex);
- i915_gem_restore_fences(dev);
+ i915_gem_restore_fences(dev_priv);
- if (IS_GEN4(dev))
+ if (IS_GEN4(dev_priv))
pci_write_config_word(pdev, GCDGMBUS,
dev_priv->regfile.saveGCDGMBUS);
- i915_restore_display(dev);
+ i915_restore_display(dev_priv);
/* Cache mode state */
- if (INTEL_INFO(dev)->gen < 7)
+ if (INTEL_GEN(dev_priv) < 7)
I915_WRITE(CACHE_MODE_0, dev_priv->regfile.saveCACHE_MODE_0 |
0xffff0000);
diff --git a/drivers/gpu/drm/i915/i915_sw_fence.c b/drivers/gpu/drm/i915/i915_sw_fence.c
index 1e5cbc585ca2..147420ccf49c 100644
--- a/drivers/gpu/drm/i915/i915_sw_fence.c
+++ b/drivers/gpu/drm/i915/i915_sw_fence.c
@@ -8,11 +8,13 @@
*/
#include <linux/slab.h>
-#include <linux/fence.h>
+#include <linux/dma-fence.h>
#include <linux/reservation.h>
#include "i915_sw_fence.h"
+#define I915_SW_FENCE_FLAG_ALLOC BIT(3) /* after WQ_FLAG_* for safety */
+
static DEFINE_SPINLOCK(i915_sw_fence_lock);
static int __i915_sw_fence_notify(struct i915_sw_fence *fence,
@@ -114,11 +116,14 @@ static void i915_sw_fence_await(struct i915_sw_fence *fence)
WARN_ON(atomic_inc_return(&fence->pending) <= 1);
}
-void i915_sw_fence_init(struct i915_sw_fence *fence, i915_sw_fence_notify_t fn)
+void __i915_sw_fence_init(struct i915_sw_fence *fence,
+ i915_sw_fence_notify_t fn,
+ const char *name,
+ struct lock_class_key *key)
{
BUG_ON((unsigned long)fn & ~I915_SW_FENCE_MASK);
- init_waitqueue_head(&fence->wait);
+ __init_waitqueue_head(&fence->wait, name, key);
kref_init(&fence->kref);
atomic_set(&fence->pending, 1);
fence->flags = (unsigned long)fn;
@@ -135,6 +140,8 @@ static int i915_sw_fence_wake(wait_queue_t *wq, unsigned mode, int flags, void *
list_del(&wq->task_list);
__i915_sw_fence_complete(wq->private, key);
i915_sw_fence_put(wq->private);
+ if (wq->flags & I915_SW_FENCE_FLAG_ALLOC)
+ kfree(wq);
return 0;
}
@@ -192,9 +199,9 @@ static bool i915_sw_fence_check_if_after(struct i915_sw_fence *fence,
return err;
}
-int i915_sw_fence_await_sw_fence(struct i915_sw_fence *fence,
- struct i915_sw_fence *signaler,
- wait_queue_t *wq)
+static int __i915_sw_fence_await_sw_fence(struct i915_sw_fence *fence,
+ struct i915_sw_fence *signaler,
+ wait_queue_t *wq, gfp_t gfp)
{
unsigned long flags;
int pending;
@@ -206,8 +213,22 @@ int i915_sw_fence_await_sw_fence(struct i915_sw_fence *fence,
if (unlikely(i915_sw_fence_check_if_after(fence, signaler)))
return -EINVAL;
+ pending = 0;
+ if (!wq) {
+ wq = kmalloc(sizeof(*wq), gfp);
+ if (!wq) {
+ if (!gfpflags_allow_blocking(gfp))
+ return -ENOMEM;
+
+ i915_sw_fence_wait(signaler);
+ return 0;
+ }
+
+ pending |= I915_SW_FENCE_FLAG_ALLOC;
+ }
+
INIT_LIST_HEAD(&wq->task_list);
- wq->flags = 0;
+ wq->flags = pending;
wq->func = i915_sw_fence_wake;
wq->private = i915_sw_fence_get(fence);
@@ -226,49 +247,64 @@ int i915_sw_fence_await_sw_fence(struct i915_sw_fence *fence,
return pending;
}
-struct dma_fence_cb {
- struct fence_cb base;
+int i915_sw_fence_await_sw_fence(struct i915_sw_fence *fence,
+ struct i915_sw_fence *signaler,
+ wait_queue_t *wq)
+{
+ return __i915_sw_fence_await_sw_fence(fence, signaler, wq, 0);
+}
+
+int i915_sw_fence_await_sw_fence_gfp(struct i915_sw_fence *fence,
+ struct i915_sw_fence *signaler,
+ gfp_t gfp)
+{
+ return __i915_sw_fence_await_sw_fence(fence, signaler, NULL, gfp);
+}
+
+struct i915_sw_dma_fence_cb {
+ struct dma_fence_cb base;
struct i915_sw_fence *fence;
- struct fence *dma;
+ struct dma_fence *dma;
struct timer_list timer;
};
static void timer_i915_sw_fence_wake(unsigned long data)
{
- struct dma_fence_cb *cb = (struct dma_fence_cb *)data;
+ struct i915_sw_dma_fence_cb *cb = (struct i915_sw_dma_fence_cb *)data;
printk(KERN_WARNING "asynchronous wait on fence %s:%s:%x timed out\n",
cb->dma->ops->get_driver_name(cb->dma),
cb->dma->ops->get_timeline_name(cb->dma),
cb->dma->seqno);
- fence_put(cb->dma);
+ dma_fence_put(cb->dma);
cb->dma = NULL;
i915_sw_fence_commit(cb->fence);
cb->timer.function = NULL;
}
-static void dma_i915_sw_fence_wake(struct fence *dma, struct fence_cb *data)
+static void dma_i915_sw_fence_wake(struct dma_fence *dma,
+ struct dma_fence_cb *data)
{
- struct dma_fence_cb *cb = container_of(data, typeof(*cb), base);
+ struct i915_sw_dma_fence_cb *cb = container_of(data, typeof(*cb), base);
del_timer_sync(&cb->timer);
if (cb->timer.function)
i915_sw_fence_commit(cb->fence);
- fence_put(cb->dma);
+ dma_fence_put(cb->dma);
kfree(cb);
}
int i915_sw_fence_await_dma_fence(struct i915_sw_fence *fence,
- struct fence *dma,
+ struct dma_fence *dma,
unsigned long timeout,
gfp_t gfp)
{
- struct dma_fence_cb *cb;
+ struct i915_sw_dma_fence_cb *cb;
int ret;
- if (fence_is_signaled(dma))
+ if (dma_fence_is_signaled(dma))
return 0;
cb = kmalloc(sizeof(*cb), gfp);
@@ -276,7 +312,7 @@ int i915_sw_fence_await_dma_fence(struct i915_sw_fence *fence,
if (!gfpflags_allow_blocking(gfp))
return -ENOMEM;
- return fence_wait(dma, false);
+ return dma_fence_wait(dma, false);
}
cb->fence = i915_sw_fence_get(fence);
@@ -287,11 +323,11 @@ int i915_sw_fence_await_dma_fence(struct i915_sw_fence *fence,
timer_i915_sw_fence_wake, (unsigned long)cb,
TIMER_IRQSAFE);
if (timeout) {
- cb->dma = fence_get(dma);
+ cb->dma = dma_fence_get(dma);
mod_timer(&cb->timer, round_jiffies_up(jiffies + timeout));
}
- ret = fence_add_callback(dma, &cb->base, dma_i915_sw_fence_wake);
+ ret = dma_fence_add_callback(dma, &cb->base, dma_i915_sw_fence_wake);
if (ret == 0) {
ret = 1;
} else {
@@ -305,16 +341,16 @@ int i915_sw_fence_await_dma_fence(struct i915_sw_fence *fence,
int i915_sw_fence_await_reservation(struct i915_sw_fence *fence,
struct reservation_object *resv,
- const struct fence_ops *exclude,
+ const struct dma_fence_ops *exclude,
bool write,
unsigned long timeout,
gfp_t gfp)
{
- struct fence *excl;
+ struct dma_fence *excl;
int ret = 0, pending;
if (write) {
- struct fence **shared;
+ struct dma_fence **shared;
unsigned int count, i;
ret = reservation_object_get_fences_rcu(resv,
@@ -339,7 +375,7 @@ int i915_sw_fence_await_reservation(struct i915_sw_fence *fence,
}
for (i = 0; i < count; i++)
- fence_put(shared[i]);
+ dma_fence_put(shared[i]);
kfree(shared);
} else {
excl = reservation_object_get_excl_rcu(resv);
@@ -356,7 +392,7 @@ int i915_sw_fence_await_reservation(struct i915_sw_fence *fence,
ret |= pending;
}
- fence_put(excl);
+ dma_fence_put(excl);
return ret;
}
diff --git a/drivers/gpu/drm/i915/i915_sw_fence.h b/drivers/gpu/drm/i915/i915_sw_fence.h
index 373141602ca4..7508d23f823b 100644
--- a/drivers/gpu/drm/i915/i915_sw_fence.h
+++ b/drivers/gpu/drm/i915/i915_sw_fence.h
@@ -16,8 +16,8 @@
#include <linux/wait.h>
struct completion;
-struct fence;
-struct fence_ops;
+struct dma_fence;
+struct dma_fence_ops;
struct reservation_object;
struct i915_sw_fence {
@@ -40,19 +40,37 @@ typedef int (*i915_sw_fence_notify_t)(struct i915_sw_fence *,
enum i915_sw_fence_notify state);
#define __i915_sw_fence_call __aligned(4)
-void i915_sw_fence_init(struct i915_sw_fence *fence, i915_sw_fence_notify_t fn);
+void __i915_sw_fence_init(struct i915_sw_fence *fence,
+ i915_sw_fence_notify_t fn,
+ const char *name,
+ struct lock_class_key *key);
+#ifdef CONFIG_LOCKDEP
+#define i915_sw_fence_init(fence, fn) \
+do { \
+ static struct lock_class_key __key; \
+ \
+ __i915_sw_fence_init((fence), (fn), #fence, &__key); \
+} while (0)
+#else
+#define i915_sw_fence_init(fence, fn) \
+ __i915_sw_fence_init((fence), (fn), NULL, NULL)
+#endif
+
void i915_sw_fence_commit(struct i915_sw_fence *fence);
int i915_sw_fence_await_sw_fence(struct i915_sw_fence *fence,
struct i915_sw_fence *after,
wait_queue_t *wq);
+int i915_sw_fence_await_sw_fence_gfp(struct i915_sw_fence *fence,
+ struct i915_sw_fence *after,
+ gfp_t gfp);
int i915_sw_fence_await_dma_fence(struct i915_sw_fence *fence,
- struct fence *dma,
+ struct dma_fence *dma,
unsigned long timeout,
gfp_t gfp);
int i915_sw_fence_await_reservation(struct i915_sw_fence *fence,
struct reservation_object *resv,
- const struct fence_ops *exclude,
+ const struct dma_fence_ops *exclude,
bool write,
unsigned long timeout,
gfp_t gfp);
@@ -62,4 +80,9 @@ static inline bool i915_sw_fence_done(const struct i915_sw_fence *fence)
return atomic_read(&fence->pending) < 0;
}
+static inline void i915_sw_fence_wait(struct i915_sw_fence *fence)
+{
+ wait_event(fence->wait, i915_sw_fence_done(fence));
+}
+
#endif /* _I915_SW_FENCE_H_ */
diff --git a/drivers/gpu/drm/i915/i915_sysfs.c b/drivers/gpu/drm/i915/i915_sysfs.c
index 1012eeea1324..47590ab08d7e 100644
--- a/drivers/gpu/drm/i915/i915_sysfs.c
+++ b/drivers/gpu/drm/i915/i915_sysfs.c
@@ -514,6 +514,8 @@ static const struct attribute *vlv_attrs[] = {
NULL,
};
+#if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
+
static ssize_t error_state_read(struct file *filp, struct kobject *kobj,
struct bin_attribute *attr, char *buf,
loff_t off, size_t count)
@@ -571,6 +573,21 @@ static struct bin_attribute error_state_attr = {
.write = error_state_write,
};
+static void i915_setup_error_capture(struct device *kdev)
+{
+ if (sysfs_create_bin_file(&kdev->kobj, &error_state_attr))
+ DRM_ERROR("error_state sysfs setup failed\n");
+}
+
+static void i915_teardown_error_capture(struct device *kdev)
+{
+ sysfs_remove_bin_file(&kdev->kobj, &error_state_attr);
+}
+#else
+static void i915_setup_error_capture(struct device *kdev) {}
+static void i915_teardown_error_capture(struct device *kdev) {}
+#endif
+
void i915_setup_sysfs(struct drm_i915_private *dev_priv)
{
struct device *kdev = dev_priv->drm.primary->kdev;
@@ -617,17 +634,15 @@ void i915_setup_sysfs(struct drm_i915_private *dev_priv)
if (ret)
DRM_ERROR("RPS sysfs setup failed\n");
- ret = sysfs_create_bin_file(&kdev->kobj,
- &error_state_attr);
- if (ret)
- DRM_ERROR("error_state sysfs setup failed\n");
+ i915_setup_error_capture(kdev);
}
void i915_teardown_sysfs(struct drm_i915_private *dev_priv)
{
struct device *kdev = dev_priv->drm.primary->kdev;
- sysfs_remove_bin_file(&kdev->kobj, &error_state_attr);
+ i915_teardown_error_capture(kdev);
+
if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
sysfs_remove_files(&kdev->kobj, vlv_attrs);
else
diff --git a/drivers/gpu/drm/i915/i915_trace.h b/drivers/gpu/drm/i915/i915_trace.h
index 178798002a73..c5d210ebaa9a 100644
--- a/drivers/gpu/drm/i915/i915_trace.h
+++ b/drivers/gpu/drm/i915/i915_trace.h
@@ -466,7 +466,7 @@ TRACE_EVENT(i915_gem_ring_sync_to,
__entry->dev = from->i915->drm.primary->index;
__entry->sync_from = from->engine->id;
__entry->sync_to = to->engine->id;
- __entry->seqno = from->fence.seqno;
+ __entry->seqno = from->global_seqno;
),
TP_printk("dev=%u, sync-from=%u, sync-to=%u, seqno=%u",
@@ -489,9 +489,9 @@ TRACE_EVENT(i915_gem_ring_dispatch,
TP_fast_assign(
__entry->dev = req->i915->drm.primary->index;
__entry->ring = req->engine->id;
- __entry->seqno = req->fence.seqno;
+ __entry->seqno = req->global_seqno;
__entry->flags = flags;
- fence_enable_sw_signaling(&req->fence);
+ dma_fence_enable_sw_signaling(&req->fence);
),
TP_printk("dev=%u, ring=%u, seqno=%u, flags=%x",
@@ -534,7 +534,7 @@ DECLARE_EVENT_CLASS(i915_gem_request,
TP_fast_assign(
__entry->dev = req->i915->drm.primary->index;
__entry->ring = req->engine->id;
- __entry->seqno = req->fence.seqno;
+ __entry->seqno = req->global_seqno;
),
TP_printk("dev=%u, ring=%u, seqno=%u",
@@ -596,7 +596,7 @@ TRACE_EVENT(i915_gem_request_wait_begin,
TP_fast_assign(
__entry->dev = req->i915->drm.primary->index;
__entry->ring = req->engine->id;
- __entry->seqno = req->fence.seqno;
+ __entry->seqno = req->global_seqno;
__entry->blocking =
mutex_is_locked(&req->i915->drm.struct_mutex);
),
diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c
new file mode 100644
index 000000000000..a792dcb902b5
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_vma.c
@@ -0,0 +1,638 @@
+/*
+ * Copyright © 2016 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+
+#include "i915_vma.h"
+
+#include "i915_drv.h"
+#include "intel_ringbuffer.h"
+#include "intel_frontbuffer.h"
+
+#include <drm/drm_gem.h>
+
+static void
+i915_vma_retire(struct i915_gem_active *active,
+ struct drm_i915_gem_request *rq)
+{
+ const unsigned int idx = rq->engine->id;
+ struct i915_vma *vma =
+ container_of(active, struct i915_vma, last_read[idx]);
+ struct drm_i915_gem_object *obj = vma->obj;
+
+ GEM_BUG_ON(!i915_vma_has_active_engine(vma, idx));
+
+ i915_vma_clear_active(vma, idx);
+ if (i915_vma_is_active(vma))
+ return;
+
+ list_move_tail(&vma->vm_link, &vma->vm->inactive_list);
+ if (unlikely(i915_vma_is_closed(vma) && !i915_vma_is_pinned(vma)))
+ WARN_ON(i915_vma_unbind(vma));
+
+ GEM_BUG_ON(!i915_gem_object_is_active(obj));
+ if (--obj->active_count)
+ return;
+
+ /* Bump our place on the bound list to keep it roughly in LRU order
+ * so that we don't steal from recently used but inactive objects
+ * (unless we are forced to ofc!)
+ */
+ if (obj->bind_count)
+ list_move_tail(&obj->global_link, &rq->i915->mm.bound_list);
+
+ obj->mm.dirty = true; /* be paranoid */
+
+ if (i915_gem_object_has_active_reference(obj)) {
+ i915_gem_object_clear_active_reference(obj);
+ i915_gem_object_put(obj);
+ }
+}
+
+static struct i915_vma *
+__i915_vma_create(struct drm_i915_gem_object *obj,
+ struct i915_address_space *vm,
+ const struct i915_ggtt_view *view)
+{
+ struct i915_vma *vma;
+ struct rb_node *rb, **p;
+ int i;
+
+ GEM_BUG_ON(vm->closed);
+
+ vma = kmem_cache_zalloc(to_i915(obj->base.dev)->vmas, GFP_KERNEL);
+ if (vma == NULL)
+ return ERR_PTR(-ENOMEM);
+
+ INIT_LIST_HEAD(&vma->exec_list);
+ for (i = 0; i < ARRAY_SIZE(vma->last_read); i++)
+ init_request_active(&vma->last_read[i], i915_vma_retire);
+ init_request_active(&vma->last_fence, NULL);
+ list_add(&vma->vm_link, &vm->unbound_list);
+ vma->vm = vm;
+ vma->obj = obj;
+ vma->size = obj->base.size;
+
+ if (view) {
+ vma->ggtt_view = *view;
+ if (view->type == I915_GGTT_VIEW_PARTIAL) {
+ vma->size = view->params.partial.size;
+ vma->size <<= PAGE_SHIFT;
+ } else if (view->type == I915_GGTT_VIEW_ROTATED) {
+ vma->size =
+ intel_rotation_info_size(&view->params.rotated);
+ vma->size <<= PAGE_SHIFT;
+ }
+ }
+
+ if (i915_is_ggtt(vm)) {
+ vma->flags |= I915_VMA_GGTT;
+ list_add(&vma->obj_link, &obj->vma_list);
+ } else {
+ i915_ppgtt_get(i915_vm_to_ppgtt(vm));
+ list_add_tail(&vma->obj_link, &obj->vma_list);
+ }
+
+ rb = NULL;
+ p = &obj->vma_tree.rb_node;
+ while (*p) {
+ struct i915_vma *pos;
+
+ rb = *p;
+ pos = rb_entry(rb, struct i915_vma, obj_node);
+ if (i915_vma_compare(pos, vm, view) < 0)
+ p = &rb->rb_right;
+ else
+ p = &rb->rb_left;
+ }
+ rb_link_node(&vma->obj_node, rb, p);
+ rb_insert_color(&vma->obj_node, &obj->vma_tree);
+
+ return vma;
+}
+
+struct i915_vma *
+i915_vma_create(struct drm_i915_gem_object *obj,
+ struct i915_address_space *vm,
+ const struct i915_ggtt_view *view)
+{
+ lockdep_assert_held(&obj->base.dev->struct_mutex);
+ GEM_BUG_ON(view && !i915_is_ggtt(vm));
+ GEM_BUG_ON(i915_gem_obj_to_vma(obj, vm, view));
+
+ return __i915_vma_create(obj, vm, view);
+}
+
+/**
+ * i915_vma_bind - Sets up PTEs for an VMA in it's corresponding address space.
+ * @vma: VMA to map
+ * @cache_level: mapping cache level
+ * @flags: flags like global or local mapping
+ *
+ * DMA addresses are taken from the scatter-gather table of this object (or of
+ * this VMA in case of non-default GGTT views) and PTE entries set up.
+ * Note that DMA addresses are also the only part of the SG table we care about.
+ */
+int i915_vma_bind(struct i915_vma *vma, enum i915_cache_level cache_level,
+ u32 flags)
+{
+ u32 bind_flags;
+ u32 vma_flags;
+ int ret;
+
+ if (WARN_ON(flags == 0))
+ return -EINVAL;
+
+ bind_flags = 0;
+ if (flags & PIN_GLOBAL)
+ bind_flags |= I915_VMA_GLOBAL_BIND;
+ if (flags & PIN_USER)
+ bind_flags |= I915_VMA_LOCAL_BIND;
+
+ vma_flags = vma->flags & (I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND);
+ if (flags & PIN_UPDATE)
+ bind_flags |= vma_flags;
+ else
+ bind_flags &= ~vma_flags;
+ if (bind_flags == 0)
+ return 0;
+
+ if (vma_flags == 0 && vma->vm->allocate_va_range) {
+ trace_i915_va_alloc(vma);
+ ret = vma->vm->allocate_va_range(vma->vm,
+ vma->node.start,
+ vma->node.size);
+ if (ret)
+ return ret;
+ }
+
+ ret = vma->vm->bind_vma(vma, cache_level, bind_flags);
+ if (ret)
+ return ret;
+
+ vma->flags |= bind_flags;
+ return 0;
+}
+
+void __iomem *i915_vma_pin_iomap(struct i915_vma *vma)
+{
+ void __iomem *ptr;
+
+ /* Access through the GTT requires the device to be awake. */
+ assert_rpm_wakelock_held(to_i915(vma->vm->dev));
+
+ lockdep_assert_held(&vma->vm->dev->struct_mutex);
+ if (WARN_ON(!i915_vma_is_map_and_fenceable(vma)))
+ return IO_ERR_PTR(-ENODEV);
+
+ GEM_BUG_ON(!i915_vma_is_ggtt(vma));
+ GEM_BUG_ON((vma->flags & I915_VMA_GLOBAL_BIND) == 0);
+
+ ptr = vma->iomap;
+ if (ptr == NULL) {
+ ptr = io_mapping_map_wc(&i915_vm_to_ggtt(vma->vm)->mappable,
+ vma->node.start,
+ vma->node.size);
+ if (ptr == NULL)
+ return IO_ERR_PTR(-ENOMEM);
+
+ vma->iomap = ptr;
+ }
+
+ __i915_vma_pin(vma);
+ return ptr;
+}
+
+void i915_vma_unpin_and_release(struct i915_vma **p_vma)
+{
+ struct i915_vma *vma;
+ struct drm_i915_gem_object *obj;
+
+ vma = fetch_and_zero(p_vma);
+ if (!vma)
+ return;
+
+ obj = vma->obj;
+
+ i915_vma_unpin(vma);
+ i915_vma_close(vma);
+
+ __i915_gem_object_release_unless_active(obj);
+}
+
+bool
+i915_vma_misplaced(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
+{
+ if (!drm_mm_node_allocated(&vma->node))
+ return false;
+
+ if (vma->node.size < size)
+ return true;
+
+ if (alignment && vma->node.start & (alignment - 1))
+ return true;
+
+ if (flags & PIN_MAPPABLE && !i915_vma_is_map_and_fenceable(vma))
+ return true;
+
+ if (flags & PIN_OFFSET_BIAS &&
+ vma->node.start < (flags & PIN_OFFSET_MASK))
+ return true;
+
+ if (flags & PIN_OFFSET_FIXED &&
+ vma->node.start != (flags & PIN_OFFSET_MASK))
+ return true;
+
+ return false;
+}
+
+void __i915_vma_set_map_and_fenceable(struct i915_vma *vma)
+{
+ struct drm_i915_gem_object *obj = vma->obj;
+ struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
+ bool mappable, fenceable;
+ u32 fence_size, fence_alignment;
+
+ fence_size = i915_gem_get_ggtt_size(dev_priv,
+ vma->size,
+ i915_gem_object_get_tiling(obj));
+ fence_alignment = i915_gem_get_ggtt_alignment(dev_priv,
+ vma->size,
+ i915_gem_object_get_tiling(obj),
+ true);
+
+ fenceable = (vma->node.size == fence_size &&
+ (vma->node.start & (fence_alignment - 1)) == 0);
+
+ mappable = (vma->node.start + fence_size <=
+ dev_priv->ggtt.mappable_end);
+
+ /*
+ * Explicitly disable for rotated VMA since the display does not
+ * need the fence and the VMA is not accessible to other users.
+ */
+ if (mappable && fenceable &&
+ vma->ggtt_view.type != I915_GGTT_VIEW_ROTATED)
+ vma->flags |= I915_VMA_CAN_FENCE;
+ else
+ vma->flags &= ~I915_VMA_CAN_FENCE;
+}
+
+bool i915_gem_valid_gtt_space(struct i915_vma *vma,
+ unsigned long cache_level)
+{
+ struct drm_mm_node *gtt_space = &vma->node;
+ struct drm_mm_node *other;
+
+ /*
+ * On some machines we have to be careful when putting differing types
+ * of snoopable memory together to avoid the prefetcher crossing memory
+ * domains and dying. During vm initialisation, we decide whether or not
+ * these constraints apply and set the drm_mm.color_adjust
+ * appropriately.
+ */
+ if (vma->vm->mm.color_adjust == NULL)
+ return true;
+
+ if (!drm_mm_node_allocated(gtt_space))
+ return true;
+
+ if (list_empty(&gtt_space->node_list))
+ return true;
+
+ other = list_entry(gtt_space->node_list.prev, struct drm_mm_node, node_list);
+ if (other->allocated && !other->hole_follows && other->color != cache_level)
+ return false;
+
+ other = list_entry(gtt_space->node_list.next, struct drm_mm_node, node_list);
+ if (other->allocated && !gtt_space->hole_follows && other->color != cache_level)
+ return false;
+
+ return true;
+}
+
+/**
+ * i915_vma_insert - finds a slot for the vma in its address space
+ * @vma: the vma
+ * @size: requested size in bytes (can be larger than the VMA)
+ * @alignment: required alignment
+ * @flags: mask of PIN_* flags to use
+ *
+ * First we try to allocate some free space that meets the requirements for
+ * the VMA. Failiing that, if the flags permit, it will evict an old VMA,
+ * preferrably the oldest idle entry to make room for the new VMA.
+ *
+ * Returns:
+ * 0 on success, negative error code otherwise.
+ */
+static int
+i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
+{
+ struct drm_i915_private *dev_priv = to_i915(vma->vm->dev);
+ struct drm_i915_gem_object *obj = vma->obj;
+ u64 start, end;
+ int ret;
+
+ GEM_BUG_ON(vma->flags & (I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND));
+ GEM_BUG_ON(drm_mm_node_allocated(&vma->node));
+
+ size = max(size, vma->size);
+ if (flags & PIN_MAPPABLE)
+ size = i915_gem_get_ggtt_size(dev_priv, size,
+ i915_gem_object_get_tiling(obj));
+
+ alignment = max(max(alignment, vma->display_alignment),
+ i915_gem_get_ggtt_alignment(dev_priv, size,
+ i915_gem_object_get_tiling(obj),
+ flags & PIN_MAPPABLE));
+
+ start = flags & PIN_OFFSET_BIAS ? flags & PIN_OFFSET_MASK : 0;
+
+ end = vma->vm->total;
+ if (flags & PIN_MAPPABLE)
+ end = min_t(u64, end, dev_priv->ggtt.mappable_end);
+ if (flags & PIN_ZONE_4G)
+ end = min_t(u64, end, (1ULL << 32) - PAGE_SIZE);
+
+ /* If binding the object/GGTT view requires more space than the entire
+ * aperture has, reject it early before evicting everything in a vain
+ * attempt to find space.
+ */
+ if (size > end) {
+ DRM_DEBUG("Attempting to bind an object larger than the aperture: request=%llu [object=%zd] > %s aperture=%llu\n",
+ size, obj->base.size,
+ flags & PIN_MAPPABLE ? "mappable" : "total",
+ end);
+ return -E2BIG;
+ }
+
+ ret = i915_gem_object_pin_pages(obj);
+ if (ret)
+ return ret;
+
+ if (flags & PIN_OFFSET_FIXED) {
+ u64 offset = flags & PIN_OFFSET_MASK;
+ if (offset & (alignment - 1) || offset > end - size) {
+ ret = -EINVAL;
+ goto err_unpin;
+ }
+
+ vma->node.start = offset;
+ vma->node.size = size;
+ vma->node.color = obj->cache_level;
+ ret = drm_mm_reserve_node(&vma->vm->mm, &vma->node);
+ if (ret) {
+ ret = i915_gem_evict_for_vma(vma);
+ if (ret == 0)
+ ret = drm_mm_reserve_node(&vma->vm->mm, &vma->node);
+ if (ret)
+ goto err_unpin;
+ }
+ } else {
+ u32 search_flag, alloc_flag;
+
+ if (flags & PIN_HIGH) {
+ search_flag = DRM_MM_SEARCH_BELOW;
+ alloc_flag = DRM_MM_CREATE_TOP;
+ } else {
+ search_flag = DRM_MM_SEARCH_DEFAULT;
+ alloc_flag = DRM_MM_CREATE_DEFAULT;
+ }
+
+ /* We only allocate in PAGE_SIZE/GTT_PAGE_SIZE (4096) chunks,
+ * so we know that we always have a minimum alignment of 4096.
+ * The drm_mm range manager is optimised to return results
+ * with zero alignment, so where possible use the optimal
+ * path.
+ */
+ if (alignment <= 4096)
+ alignment = 0;
+
+search_free:
+ ret = drm_mm_insert_node_in_range_generic(&vma->vm->mm,
+ &vma->node,
+ size, alignment,
+ obj->cache_level,
+ start, end,
+ search_flag,
+ alloc_flag);
+ if (ret) {
+ ret = i915_gem_evict_something(vma->vm, size, alignment,
+ obj->cache_level,
+ start, end,
+ flags);
+ if (ret == 0)
+ goto search_free;
+
+ goto err_unpin;
+ }
+
+ GEM_BUG_ON(vma->node.start < start);
+ GEM_BUG_ON(vma->node.start + vma->node.size > end);
+ }
+ GEM_BUG_ON(!i915_gem_valid_gtt_space(vma, obj->cache_level));
+
+ list_move_tail(&obj->global_link, &dev_priv->mm.bound_list);
+ list_move_tail(&vma->vm_link, &vma->vm->inactive_list);
+ obj->bind_count++;
+ GEM_BUG_ON(atomic_read(&obj->mm.pages_pin_count) < obj->bind_count);
+
+ return 0;
+
+err_unpin:
+ i915_gem_object_unpin_pages(obj);
+ return ret;
+}
+
+int __i915_vma_do_pin(struct i915_vma *vma,
+ u64 size, u64 alignment, u64 flags)
+{
+ unsigned int bound = vma->flags;
+ int ret;
+
+ lockdep_assert_held(&vma->vm->dev->struct_mutex);
+ GEM_BUG_ON((flags & (PIN_GLOBAL | PIN_USER)) == 0);
+ GEM_BUG_ON((flags & PIN_GLOBAL) && !i915_vma_is_ggtt(vma));
+
+ if (WARN_ON(bound & I915_VMA_PIN_OVERFLOW)) {
+ ret = -EBUSY;
+ goto err;
+ }
+
+ if ((bound & I915_VMA_BIND_MASK) == 0) {
+ ret = i915_vma_insert(vma, size, alignment, flags);
+ if (ret)
+ goto err;
+ }
+
+ ret = i915_vma_bind(vma, vma->obj->cache_level, flags);
+ if (ret)
+ goto err;
+
+ if ((bound ^ vma->flags) & I915_VMA_GLOBAL_BIND)
+ __i915_vma_set_map_and_fenceable(vma);
+
+ GEM_BUG_ON(i915_vma_misplaced(vma, size, alignment, flags));
+ return 0;
+
+err:
+ __i915_vma_unpin(vma);
+ return ret;
+}
+
+void i915_vma_destroy(struct i915_vma *vma)
+{
+ GEM_BUG_ON(vma->node.allocated);
+ GEM_BUG_ON(i915_vma_is_active(vma));
+ GEM_BUG_ON(!i915_vma_is_closed(vma));
+ GEM_BUG_ON(vma->fence);
+
+ list_del(&vma->vm_link);
+ if (!i915_vma_is_ggtt(vma))
+ i915_ppgtt_put(i915_vm_to_ppgtt(vma->vm));
+
+ kmem_cache_free(to_i915(vma->obj->base.dev)->vmas, vma);
+}
+
+void i915_vma_close(struct i915_vma *vma)
+{
+ GEM_BUG_ON(i915_vma_is_closed(vma));
+ vma->flags |= I915_VMA_CLOSED;
+
+ list_del(&vma->obj_link);
+ rb_erase(&vma->obj_node, &vma->obj->vma_tree);
+
+ if (!i915_vma_is_active(vma) && !i915_vma_is_pinned(vma))
+ WARN_ON(i915_vma_unbind(vma));
+}
+
+static void __i915_vma_iounmap(struct i915_vma *vma)
+{
+ GEM_BUG_ON(i915_vma_is_pinned(vma));
+
+ if (vma->iomap == NULL)
+ return;
+
+ io_mapping_unmap(vma->iomap);
+ vma->iomap = NULL;
+}
+
+int i915_vma_unbind(struct i915_vma *vma)
+{
+ struct drm_i915_gem_object *obj = vma->obj;
+ unsigned long active;
+ int ret;
+
+ lockdep_assert_held(&obj->base.dev->struct_mutex);
+
+ /* First wait upon any activity as retiring the request may
+ * have side-effects such as unpinning or even unbinding this vma.
+ */
+ active = i915_vma_get_active(vma);
+ if (active) {
+ int idx;
+
+ /* When a closed VMA is retired, it is unbound - eek.
+ * In order to prevent it from being recursively closed,
+ * take a pin on the vma so that the second unbind is
+ * aborted.
+ *
+ * Even more scary is that the retire callback may free
+ * the object (last active vma). To prevent the explosion
+ * we defer the actual object free to a worker that can
+ * only proceed once it acquires the struct_mutex (which
+ * we currently hold, therefore it cannot free this object
+ * before we are finished).
+ */
+ __i915_vma_pin(vma);
+
+ for_each_active(active, idx) {
+ ret = i915_gem_active_retire(&vma->last_read[idx],
+ &vma->vm->dev->struct_mutex);
+ if (ret)
+ break;
+ }
+
+ __i915_vma_unpin(vma);
+ if (ret)
+ return ret;
+
+ GEM_BUG_ON(i915_vma_is_active(vma));
+ }
+
+ if (i915_vma_is_pinned(vma))
+ return -EBUSY;
+
+ if (!drm_mm_node_allocated(&vma->node))
+ goto destroy;
+
+ GEM_BUG_ON(obj->bind_count == 0);
+ GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
+
+ if (i915_vma_is_map_and_fenceable(vma)) {
+ /* release the fence reg _after_ flushing */
+ ret = i915_vma_put_fence(vma);
+ if (ret)
+ return ret;
+
+ /* Force a pagefault for domain tracking on next user access */
+ i915_gem_release_mmap(obj);
+
+ __i915_vma_iounmap(vma);
+ vma->flags &= ~I915_VMA_CAN_FENCE;
+ }
+
+ if (likely(!vma->vm->closed)) {
+ trace_i915_vma_unbind(vma);
+ vma->vm->unbind_vma(vma);
+ }
+ vma->flags &= ~(I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND);
+
+ drm_mm_remove_node(&vma->node);
+ list_move_tail(&vma->vm_link, &vma->vm->unbound_list);
+
+ if (vma->pages != obj->mm.pages) {
+ GEM_BUG_ON(!vma->pages);
+ sg_free_table(vma->pages);
+ kfree(vma->pages);
+ }
+ vma->pages = NULL;
+
+ /* Since the unbound list is global, only move to that list if
+ * no more VMAs exist. */
+ if (--obj->bind_count == 0)
+ list_move_tail(&obj->global_link,
+ &to_i915(obj->base.dev)->mm.unbound_list);
+
+ /* And finally now the object is completely decoupled from this vma,
+ * we can drop its hold on the backing storage and allow it to be
+ * reaped by the shrinker.
+ */
+ i915_gem_object_unpin_pages(obj);
+
+destroy:
+ if (unlikely(i915_vma_is_closed(vma)))
+ i915_vma_destroy(vma);
+
+ return 0;
+}
+
diff --git a/drivers/gpu/drm/i915/i915_vma.h b/drivers/gpu/drm/i915/i915_vma.h
new file mode 100644
index 000000000000..85446f0b0b3f
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_vma.h
@@ -0,0 +1,341 @@
+/*
+ * Copyright © 2016 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __I915_VMA_H__
+#define __I915_VMA_H__
+
+#include <linux/io-mapping.h>
+
+#include <drm/drm_mm.h>
+
+#include "i915_gem_gtt.h"
+#include "i915_gem_fence_reg.h"
+#include "i915_gem_object.h"
+#include "i915_gem_request.h"
+
+
+enum i915_cache_level;
+
+/**
+ * A VMA represents a GEM BO that is bound into an address space. Therefore, a
+ * VMA's presence cannot be guaranteed before binding, or after unbinding the
+ * object into/from the address space.
+ *
+ * To make things as simple as possible (ie. no refcounting), a VMA's lifetime
+ * will always be <= an objects lifetime. So object refcounting should cover us.
+ */
+struct i915_vma {
+ struct drm_mm_node node;
+ struct drm_i915_gem_object *obj;
+ struct i915_address_space *vm;
+ struct drm_i915_fence_reg *fence;
+ struct sg_table *pages;
+ void __iomem *iomap;
+ u64 size;
+ u64 display_alignment;
+
+ unsigned int flags;
+ /**
+ * How many users have pinned this object in GTT space. The following
+ * users can each hold at most one reference: pwrite/pread, execbuffer
+ * (objects are not allowed multiple times for the same batchbuffer),
+ * and the framebuffer code. When switching/pageflipping, the
+ * framebuffer code has at most two buffers pinned per crtc.
+ *
+ * In the worst case this is 1 + 1 + 1 + 2*2 = 7. That would fit into 3
+ * bits with absolutely no headroom. So use 4 bits.
+ */
+#define I915_VMA_PIN_MASK 0xf
+#define I915_VMA_PIN_OVERFLOW BIT(5)
+
+ /** Flags and address space this VMA is bound to */
+#define I915_VMA_GLOBAL_BIND BIT(6)
+#define I915_VMA_LOCAL_BIND BIT(7)
+#define I915_VMA_BIND_MASK (I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND | I915_VMA_PIN_OVERFLOW)
+
+#define I915_VMA_GGTT BIT(8)
+#define I915_VMA_CAN_FENCE BIT(9)
+#define I915_VMA_CLOSED BIT(10)
+
+ unsigned int active;
+ struct i915_gem_active last_read[I915_NUM_ENGINES];
+ struct i915_gem_active last_fence;
+
+ /**
+ * Support different GGTT views into the same object.
+ * This means there can be multiple VMA mappings per object and per VM.
+ * i915_ggtt_view_type is used to distinguish between those entries.
+ * The default one of zero (I915_GGTT_VIEW_NORMAL) is default and also
+ * assumed in GEM functions which take no ggtt view parameter.
+ */
+ struct i915_ggtt_view ggtt_view;
+
+ /** This object's place on the active/inactive lists */
+ struct list_head vm_link;
+
+ struct list_head obj_link; /* Link in the object's VMA list */
+ struct rb_node obj_node;
+
+ /** This vma's place in the batchbuffer or on the eviction list */
+ struct list_head exec_list;
+
+ /**
+ * Used for performing relocations during execbuffer insertion.
+ */
+ struct hlist_node exec_node;
+ unsigned long exec_handle;
+ struct drm_i915_gem_exec_object2 *exec_entry;
+};
+
+struct i915_vma *
+i915_vma_create(struct drm_i915_gem_object *obj,
+ struct i915_address_space *vm,
+ const struct i915_ggtt_view *view);
+
+void i915_vma_unpin_and_release(struct i915_vma **p_vma);
+
+static inline bool i915_vma_is_ggtt(const struct i915_vma *vma)
+{
+ return vma->flags & I915_VMA_GGTT;
+}
+
+static inline bool i915_vma_is_map_and_fenceable(const struct i915_vma *vma)
+{
+ return vma->flags & I915_VMA_CAN_FENCE;
+}
+
+static inline bool i915_vma_is_closed(const struct i915_vma *vma)
+{
+ return vma->flags & I915_VMA_CLOSED;
+}
+
+static inline unsigned int i915_vma_get_active(const struct i915_vma *vma)
+{
+ return vma->active;
+}
+
+static inline bool i915_vma_is_active(const struct i915_vma *vma)
+{
+ return i915_vma_get_active(vma);
+}
+
+static inline void i915_vma_set_active(struct i915_vma *vma,
+ unsigned int engine)
+{
+ vma->active |= BIT(engine);
+}
+
+static inline void i915_vma_clear_active(struct i915_vma *vma,
+ unsigned int engine)
+{
+ vma->active &= ~BIT(engine);
+}
+
+static inline bool i915_vma_has_active_engine(const struct i915_vma *vma,
+ unsigned int engine)
+{
+ return vma->active & BIT(engine);
+}
+
+static inline u32 i915_ggtt_offset(const struct i915_vma *vma)
+{
+ GEM_BUG_ON(!i915_vma_is_ggtt(vma));
+ GEM_BUG_ON(!vma->node.allocated);
+ GEM_BUG_ON(upper_32_bits(vma->node.start));
+ GEM_BUG_ON(upper_32_bits(vma->node.start + vma->node.size - 1));
+ return lower_32_bits(vma->node.start);
+}
+
+static inline struct i915_vma *i915_vma_get(struct i915_vma *vma)
+{
+ i915_gem_object_get(vma->obj);
+ return vma;
+}
+
+static inline void i915_vma_put(struct i915_vma *vma)
+{
+ i915_gem_object_put(vma->obj);
+}
+
+static inline long
+i915_vma_compare(struct i915_vma *vma,
+ struct i915_address_space *vm,
+ const struct i915_ggtt_view *view)
+{
+ GEM_BUG_ON(view && !i915_is_ggtt(vm));
+
+ if (vma->vm != vm)
+ return vma->vm - vm;
+
+ if (!view)
+ return vma->ggtt_view.type;
+
+ if (vma->ggtt_view.type != view->type)
+ return vma->ggtt_view.type - view->type;
+
+ return memcmp(&vma->ggtt_view.params,
+ &view->params,
+ sizeof(view->params));
+}
+
+int i915_vma_bind(struct i915_vma *vma, enum i915_cache_level cache_level,
+ u32 flags);
+bool i915_gem_valid_gtt_space(struct i915_vma *vma, unsigned long cache_level);
+bool
+i915_vma_misplaced(struct i915_vma *vma, u64 size, u64 alignment, u64 flags);
+void __i915_vma_set_map_and_fenceable(struct i915_vma *vma);
+int __must_check i915_vma_unbind(struct i915_vma *vma);
+void i915_vma_close(struct i915_vma *vma);
+void i915_vma_destroy(struct i915_vma *vma);
+
+int __i915_vma_do_pin(struct i915_vma *vma,
+ u64 size, u64 alignment, u64 flags);
+static inline int __must_check
+i915_vma_pin(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
+{
+ BUILD_BUG_ON(PIN_MBZ != I915_VMA_PIN_OVERFLOW);
+ BUILD_BUG_ON(PIN_GLOBAL != I915_VMA_GLOBAL_BIND);
+ BUILD_BUG_ON(PIN_USER != I915_VMA_LOCAL_BIND);
+
+ /* Pin early to prevent the shrinker/eviction logic from destroying
+ * our vma as we insert and bind.
+ */
+ if (likely(((++vma->flags ^ flags) & I915_VMA_BIND_MASK) == 0))
+ return 0;
+
+ return __i915_vma_do_pin(vma, size, alignment, flags);
+}
+
+static inline int i915_vma_pin_count(const struct i915_vma *vma)
+{
+ return vma->flags & I915_VMA_PIN_MASK;
+}
+
+static inline bool i915_vma_is_pinned(const struct i915_vma *vma)
+{
+ return i915_vma_pin_count(vma);
+}
+
+static inline void __i915_vma_pin(struct i915_vma *vma)
+{
+ vma->flags++;
+ GEM_BUG_ON(vma->flags & I915_VMA_PIN_OVERFLOW);
+}
+
+static inline void __i915_vma_unpin(struct i915_vma *vma)
+{
+ GEM_BUG_ON(!i915_vma_is_pinned(vma));
+ vma->flags--;
+}
+
+static inline void i915_vma_unpin(struct i915_vma *vma)
+{
+ GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
+ __i915_vma_unpin(vma);
+}
+
+/**
+ * i915_vma_pin_iomap - calls ioremap_wc to map the GGTT VMA via the aperture
+ * @vma: VMA to iomap
+ *
+ * The passed in VMA has to be pinned in the global GTT mappable region.
+ * An extra pinning of the VMA is acquired for the return iomapping,
+ * the caller must call i915_vma_unpin_iomap to relinquish the pinning
+ * after the iomapping is no longer required.
+ *
+ * Callers must hold the struct_mutex.
+ *
+ * Returns a valid iomapped pointer or ERR_PTR.
+ */
+void __iomem *i915_vma_pin_iomap(struct i915_vma *vma);
+#define IO_ERR_PTR(x) ((void __iomem *)ERR_PTR(x))
+
+/**
+ * i915_vma_unpin_iomap - unpins the mapping returned from i915_vma_iomap
+ * @vma: VMA to unpin
+ *
+ * Unpins the previously iomapped VMA from i915_vma_pin_iomap().
+ *
+ * Callers must hold the struct_mutex. This function is only valid to be
+ * called on a VMA previously iomapped by the caller with i915_vma_pin_iomap().
+ */
+static inline void i915_vma_unpin_iomap(struct i915_vma *vma)
+{
+ lockdep_assert_held(&vma->vm->dev->struct_mutex);
+ GEM_BUG_ON(vma->iomap == NULL);
+ i915_vma_unpin(vma);
+}
+
+static inline struct page *i915_vma_first_page(struct i915_vma *vma)
+{
+ GEM_BUG_ON(!vma->pages);
+ return sg_page(vma->pages->sgl);
+}
+
+/**
+ * i915_vma_pin_fence - pin fencing state
+ * @vma: vma to pin fencing for
+ *
+ * This pins the fencing state (whether tiled or untiled) to make sure the
+ * vma (and its object) is ready to be used as a scanout target. Fencing
+ * status must be synchronize first by calling i915_vma_get_fence():
+ *
+ * The resulting fence pin reference must be released again with
+ * i915_vma_unpin_fence().
+ *
+ * Returns:
+ *
+ * True if the vma has a fence, false otherwise.
+ */
+static inline bool
+i915_vma_pin_fence(struct i915_vma *vma)
+{
+ lockdep_assert_held(&vma->vm->dev->struct_mutex);
+ if (vma->fence) {
+ vma->fence->pin_count++;
+ return true;
+ } else
+ return false;
+}
+
+/**
+ * i915_vma_unpin_fence - unpin fencing state
+ * @vma: vma to unpin fencing for
+ *
+ * This releases the fence pin reference acquired through
+ * i915_vma_pin_fence. It will handle both objects with and without an
+ * attached fence correctly, callers do not need to distinguish this.
+ */
+static inline void
+i915_vma_unpin_fence(struct i915_vma *vma)
+{
+ lockdep_assert_held(&vma->vm->dev->struct_mutex);
+ if (vma->fence) {
+ GEM_BUG_ON(vma->fence->pin_count <= 0);
+ vma->fence->pin_count--;
+ }
+}
+
+#endif
+
diff --git a/drivers/gpu/drm/i915/intel_atomic_plane.c b/drivers/gpu/drm/i915/intel_atomic_plane.c
index b82de3072d4f..dbe9fb41ae53 100644
--- a/drivers/gpu/drm/i915/intel_atomic_plane.c
+++ b/drivers/gpu/drm/i915/intel_atomic_plane.c
@@ -84,7 +84,6 @@ intel_plane_duplicate_state(struct drm_plane *plane)
state = &intel_state->base;
__drm_atomic_helper_plane_duplicate_state(plane, state);
- intel_state->wait_req = NULL;
return state;
}
@@ -101,13 +100,13 @@ void
intel_plane_destroy_state(struct drm_plane *plane,
struct drm_plane_state *state)
{
- WARN_ON(state && to_intel_plane_state(state)->wait_req);
drm_atomic_helper_plane_destroy_state(plane, state);
}
static int intel_plane_atomic_check(struct drm_plane *plane,
struct drm_plane_state *state)
{
+ struct drm_i915_private *dev_priv = to_i915(plane->dev);
struct drm_crtc *crtc = state->crtc;
struct intel_crtc *intel_crtc;
struct intel_crtc_state *crtc_state;
@@ -142,10 +141,11 @@ static int intel_plane_atomic_check(struct drm_plane *plane,
intel_state->clip.y2 =
crtc_state->base.enable ? crtc_state->pipe_src_h : 0;
- if (state->fb && intel_rotation_90_or_270(state->rotation)) {
- char *format_name;
- if (!(state->fb->modifier[0] == I915_FORMAT_MOD_Y_TILED ||
- state->fb->modifier[0] == I915_FORMAT_MOD_Yf_TILED)) {
+ if (state->fb && drm_rotation_90_or_270(state->rotation)) {
+ struct drm_format_name_buf format_name;
+
+ if (state->fb->modifier != I915_FORMAT_MOD_Y_TILED &&
+ state->fb->modifier != I915_FORMAT_MOD_Yf_TILED) {
DRM_DEBUG_KMS("Y/Yf tiling required for 90/270!\n");
return -EINVAL;
}
@@ -158,9 +158,9 @@ static int intel_plane_atomic_check(struct drm_plane *plane,
switch (state->fb->pixel_format) {
case DRM_FORMAT_C8:
case DRM_FORMAT_RGB565:
- format_name = drm_get_format_name(state->fb->pixel_format);
- DRM_DEBUG_KMS("Unsupported pixel format %s for 90/270!\n", format_name);
- kfree(format_name);
+ DRM_DEBUG_KMS("Unsupported pixel format %s for 90/270!\n",
+ drm_get_format_name(state->fb->pixel_format,
+ &format_name));
return -EINVAL;
default:
@@ -168,6 +168,14 @@ static int intel_plane_atomic_check(struct drm_plane *plane,
}
}
+ /* CHV ignores the mirror bit when the rotate bit is set :( */
+ if (IS_CHERRYVIEW(dev_priv) &&
+ state->rotation & DRM_ROTATE_180 &&
+ state->rotation & DRM_REFLECT_X) {
+ DRM_DEBUG_KMS("Cannot rotate and reflect at the same time\n");
+ return -EINVAL;
+ }
+
intel_state->base.visible = false;
ret = intel_plane->check_plane(plane, crtc_state, intel_state);
if (ret)
diff --git a/drivers/gpu/drm/i915/intel_audio.c b/drivers/gpu/drm/i915/intel_audio.c
index 6c70a5bfd7d8..1c509f7410f5 100644
--- a/drivers/gpu/drm/i915/intel_audio.c
+++ b/drivers/gpu/drm/i915/intel_audio.c
@@ -57,6 +57,63 @@
* struct &i915_audio_component_audio_ops @audio_ops is called from i915 driver.
*/
+/* DP N/M table */
+#define LC_540M 540000
+#define LC_270M 270000
+#define LC_162M 162000
+
+struct dp_aud_n_m {
+ int sample_rate;
+ int clock;
+ u16 m;
+ u16 n;
+};
+
+/* Values according to DP 1.4 Table 2-104 */
+static const struct dp_aud_n_m dp_aud_n_m[] = {
+ { 32000, LC_162M, 1024, 10125 },
+ { 44100, LC_162M, 784, 5625 },
+ { 48000, LC_162M, 512, 3375 },
+ { 64000, LC_162M, 2048, 10125 },
+ { 88200, LC_162M, 1568, 5625 },
+ { 96000, LC_162M, 1024, 3375 },
+ { 128000, LC_162M, 4096, 10125 },
+ { 176400, LC_162M, 3136, 5625 },
+ { 192000, LC_162M, 2048, 3375 },
+ { 32000, LC_270M, 1024, 16875 },
+ { 44100, LC_270M, 784, 9375 },
+ { 48000, LC_270M, 512, 5625 },
+ { 64000, LC_270M, 2048, 16875 },
+ { 88200, LC_270M, 1568, 9375 },
+ { 96000, LC_270M, 1024, 5625 },
+ { 128000, LC_270M, 4096, 16875 },
+ { 176400, LC_270M, 3136, 9375 },
+ { 192000, LC_270M, 2048, 5625 },
+ { 32000, LC_540M, 1024, 33750 },
+ { 44100, LC_540M, 784, 18750 },
+ { 48000, LC_540M, 512, 11250 },
+ { 64000, LC_540M, 2048, 33750 },
+ { 88200, LC_540M, 1568, 18750 },
+ { 96000, LC_540M, 1024, 11250 },
+ { 128000, LC_540M, 4096, 33750 },
+ { 176400, LC_540M, 3136, 18750 },
+ { 192000, LC_540M, 2048, 11250 },
+};
+
+static const struct dp_aud_n_m *
+audio_config_dp_get_n_m(struct intel_crtc *intel_crtc, int rate)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(dp_aud_n_m); i++) {
+ if (rate == dp_aud_n_m[i].sample_rate &&
+ intel_crtc->config->port_clock == dp_aud_n_m[i].clock)
+ return &dp_aud_n_m[i];
+ }
+
+ return NULL;
+}
+
static const struct {
int clock;
u32 config;
@@ -81,7 +138,7 @@ static const struct {
int clock;
int n;
int cts;
-} aud_ncts[] = {
+} hdmi_aud_ncts[] = {
{ 44100, TMDS_296M, 4459, 234375 },
{ 44100, TMDS_297M, 4704, 247500 },
{ 48000, TMDS_296M, 5824, 281250 },
@@ -121,45 +178,20 @@ static u32 audio_config_hdmi_pixel_clock(const struct drm_display_mode *adjusted
return hdmi_audio_clock[i].config;
}
-static int audio_config_get_n(const struct drm_display_mode *mode, int rate)
+static int audio_config_hdmi_get_n(const struct drm_display_mode *adjusted_mode,
+ int rate)
{
int i;
- for (i = 0; i < ARRAY_SIZE(aud_ncts); i++) {
- if ((rate == aud_ncts[i].sample_rate) &&
- (mode->clock == aud_ncts[i].clock)) {
- return aud_ncts[i].n;
+ for (i = 0; i < ARRAY_SIZE(hdmi_aud_ncts); i++) {
+ if (rate == hdmi_aud_ncts[i].sample_rate &&
+ adjusted_mode->crtc_clock == hdmi_aud_ncts[i].clock) {
+ return hdmi_aud_ncts[i].n;
}
}
return 0;
}
-static uint32_t audio_config_setup_n_reg(int n, uint32_t val)
-{
- int n_low, n_up;
- uint32_t tmp = val;
-
- n_low = n & 0xfff;
- n_up = (n >> 12) & 0xff;
- tmp &= ~(AUD_CONFIG_UPPER_N_MASK | AUD_CONFIG_LOWER_N_MASK);
- tmp |= ((n_up << AUD_CONFIG_UPPER_N_SHIFT) |
- (n_low << AUD_CONFIG_LOWER_N_SHIFT) |
- AUD_CONFIG_N_PROG_ENABLE);
- return tmp;
-}
-
-/* check whether N/CTS/M need be set manually */
-static bool audio_rate_need_prog(struct intel_crtc *crtc,
- const struct drm_display_mode *mode)
-{
- if (((mode->clock == TMDS_297M) ||
- (mode->clock == TMDS_296M)) &&
- intel_crtc_has_type(crtc->config, INTEL_OUTPUT_HDMI))
- return true;
- else
- return false;
-}
-
static bool intel_eld_uptodate(struct drm_connector *connector,
i915_reg_t reg_eldv, uint32_t bits_eldv,
i915_reg_t reg_elda, uint32_t bits_elda,
@@ -245,6 +277,97 @@ static void g4x_audio_codec_enable(struct drm_connector *connector,
I915_WRITE(G4X_AUD_CNTL_ST, tmp);
}
+static void
+hsw_dp_audio_config_update(struct intel_crtc *intel_crtc, enum port port,
+ const struct drm_display_mode *adjusted_mode)
+{
+ struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
+ struct i915_audio_component *acomp = dev_priv->audio_component;
+ int rate = acomp ? acomp->aud_sample_rate[port] : 0;
+ const struct dp_aud_n_m *nm = audio_config_dp_get_n_m(intel_crtc, rate);
+ enum pipe pipe = intel_crtc->pipe;
+ u32 tmp;
+
+ if (nm)
+ DRM_DEBUG_KMS("using Maud %u, Naud %u\n", nm->m, nm->n);
+ else
+ DRM_DEBUG_KMS("using automatic Maud, Naud\n");
+
+ tmp = I915_READ(HSW_AUD_CFG(pipe));
+ tmp &= ~AUD_CONFIG_N_VALUE_INDEX;
+ tmp &= ~AUD_CONFIG_PIXEL_CLOCK_HDMI_MASK;
+ tmp &= ~AUD_CONFIG_N_PROG_ENABLE;
+ tmp |= AUD_CONFIG_N_VALUE_INDEX;
+
+ if (nm) {
+ tmp &= ~AUD_CONFIG_N_MASK;
+ tmp |= AUD_CONFIG_N(nm->n);
+ tmp |= AUD_CONFIG_N_PROG_ENABLE;
+ }
+
+ I915_WRITE(HSW_AUD_CFG(pipe), tmp);
+
+ tmp = I915_READ(HSW_AUD_M_CTS_ENABLE(pipe));
+ tmp &= ~AUD_CONFIG_M_MASK;
+ tmp &= ~AUD_M_CTS_M_VALUE_INDEX;
+ tmp &= ~AUD_M_CTS_M_PROG_ENABLE;
+
+ if (nm) {
+ tmp |= nm->m;
+ tmp |= AUD_M_CTS_M_VALUE_INDEX;
+ tmp |= AUD_M_CTS_M_PROG_ENABLE;
+ }
+
+ I915_WRITE(HSW_AUD_M_CTS_ENABLE(pipe), tmp);
+}
+
+static void
+hsw_hdmi_audio_config_update(struct intel_crtc *intel_crtc, enum port port,
+ const struct drm_display_mode *adjusted_mode)
+{
+ struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
+ struct i915_audio_component *acomp = dev_priv->audio_component;
+ int rate = acomp ? acomp->aud_sample_rate[port] : 0;
+ enum pipe pipe = intel_crtc->pipe;
+ int n;
+ u32 tmp;
+
+ tmp = I915_READ(HSW_AUD_CFG(pipe));
+ tmp &= ~AUD_CONFIG_N_VALUE_INDEX;
+ tmp &= ~AUD_CONFIG_PIXEL_CLOCK_HDMI_MASK;
+ tmp &= ~AUD_CONFIG_N_PROG_ENABLE;
+ tmp |= audio_config_hdmi_pixel_clock(adjusted_mode);
+
+ n = audio_config_hdmi_get_n(adjusted_mode, rate);
+ if (n != 0) {
+ DRM_DEBUG_KMS("using N %d\n", n);
+
+ tmp &= ~AUD_CONFIG_N_MASK;
+ tmp |= AUD_CONFIG_N(n);
+ tmp |= AUD_CONFIG_N_PROG_ENABLE;
+ } else {
+ DRM_DEBUG_KMS("using automatic N\n");
+ }
+
+ I915_WRITE(HSW_AUD_CFG(pipe), tmp);
+
+ tmp = I915_READ(HSW_AUD_M_CTS_ENABLE(pipe));
+ tmp &= ~AUD_CONFIG_M_MASK;
+ tmp &= ~AUD_M_CTS_M_VALUE_INDEX;
+ tmp |= AUD_M_CTS_M_PROG_ENABLE;
+ I915_WRITE(HSW_AUD_M_CTS_ENABLE(pipe), tmp);
+}
+
+static void
+hsw_audio_config_update(struct intel_crtc *intel_crtc, enum port port,
+ const struct drm_display_mode *adjusted_mode)
+{
+ if (intel_crtc_has_dp_encoder(intel_crtc->config))
+ hsw_dp_audio_config_update(intel_crtc, port, adjusted_mode);
+ else
+ hsw_hdmi_audio_config_update(intel_crtc, port, adjusted_mode);
+}
+
static void hsw_audio_codec_disable(struct intel_encoder *encoder)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
@@ -276,20 +399,16 @@ static void hsw_audio_codec_disable(struct intel_encoder *encoder)
}
static void hsw_audio_codec_enable(struct drm_connector *connector,
- struct intel_encoder *encoder,
+ struct intel_encoder *intel_encoder,
const struct drm_display_mode *adjusted_mode)
{
struct drm_i915_private *dev_priv = to_i915(connector->dev);
- struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
+ struct intel_crtc *intel_crtc = to_intel_crtc(intel_encoder->base.crtc);
enum pipe pipe = intel_crtc->pipe;
- struct i915_audio_component *acomp = dev_priv->audio_component;
+ enum port port = intel_encoder->port;
const uint8_t *eld = connector->eld;
- struct intel_digital_port *intel_dig_port =
- enc_to_dig_port(&encoder->base);
- enum port port = intel_dig_port->port;
uint32_t tmp;
int len, i;
- int n, rate;
DRM_DEBUG_KMS("Enable audio codec on pipe %c, %u bytes ELD\n",
pipe_name(pipe), drm_eld_size(eld));
@@ -325,42 +444,17 @@ static void hsw_audio_codec_enable(struct drm_connector *connector,
I915_WRITE(HSW_AUD_PIN_ELD_CP_VLD, tmp);
/* Enable timestamps */
- tmp = I915_READ(HSW_AUD_CFG(pipe));
- tmp &= ~AUD_CONFIG_N_VALUE_INDEX;
- tmp &= ~AUD_CONFIG_PIXEL_CLOCK_HDMI_MASK;
- if (intel_crtc_has_dp_encoder(intel_crtc->config))
- tmp |= AUD_CONFIG_N_VALUE_INDEX;
- else
- tmp |= audio_config_hdmi_pixel_clock(adjusted_mode);
-
- tmp &= ~AUD_CONFIG_N_PROG_ENABLE;
- if (audio_rate_need_prog(intel_crtc, adjusted_mode)) {
- if (!acomp)
- rate = 0;
- else if (port >= PORT_A && port <= PORT_E)
- rate = acomp->aud_sample_rate[port];
- else {
- DRM_ERROR("invalid port: %d\n", port);
- rate = 0;
- }
- n = audio_config_get_n(adjusted_mode, rate);
- if (n != 0)
- tmp = audio_config_setup_n_reg(n, tmp);
- else
- DRM_DEBUG_KMS("no suitable N value is found\n");
- }
-
- I915_WRITE(HSW_AUD_CFG(pipe), tmp);
+ hsw_audio_config_update(intel_crtc, port, adjusted_mode);
mutex_unlock(&dev_priv->av_mutex);
}
-static void ilk_audio_codec_disable(struct intel_encoder *encoder)
+static void ilk_audio_codec_disable(struct intel_encoder *intel_encoder)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
- enum port port = enc_to_dig_port(&encoder->base)->port;
+ struct drm_i915_private *dev_priv = to_i915(intel_encoder->base.dev);
+ struct intel_crtc *intel_crtc = to_intel_crtc(intel_encoder->base.crtc);
enum pipe pipe = intel_crtc->pipe;
+ enum port port = intel_encoder->port;
uint32_t tmp, eldv;
i915_reg_t aud_config, aud_cntrl_st2;
@@ -400,13 +494,13 @@ static void ilk_audio_codec_disable(struct intel_encoder *encoder)
}
static void ilk_audio_codec_enable(struct drm_connector *connector,
- struct intel_encoder *encoder,
+ struct intel_encoder *intel_encoder,
const struct drm_display_mode *adjusted_mode)
{
struct drm_i915_private *dev_priv = to_i915(connector->dev);
- struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
- enum port port = enc_to_dig_port(&encoder->base)->port;
+ struct intel_crtc *intel_crtc = to_intel_crtc(intel_encoder->base.crtc);
enum pipe pipe = intel_crtc->pipe;
+ enum port port = intel_encoder->port;
uint8_t *eld = connector->eld;
uint32_t tmp, eldv;
int len, i;
@@ -425,13 +519,13 @@ static void ilk_audio_codec_enable(struct drm_connector *connector,
* infrastructure is not there yet.
*/
- if (HAS_PCH_IBX(connector->dev)) {
+ if (HAS_PCH_IBX(dev_priv)) {
hdmiw_hdmiedid = IBX_HDMIW_HDMIEDID(pipe);
aud_config = IBX_AUD_CFG(pipe);
aud_cntl_st = IBX_AUD_CNTL_ST(pipe);
aud_cntrl_st2 = IBX_AUD_CNTL_ST2;
- } else if (IS_VALLEYVIEW(connector->dev) ||
- IS_CHERRYVIEW(connector->dev)) {
+ } else if (IS_VALLEYVIEW(dev_priv) ||
+ IS_CHERRYVIEW(dev_priv)) {
hdmiw_hdmiedid = VLV_HDMIW_HDMIEDID(pipe);
aud_config = VLV_AUD_CFG(pipe);
aud_cntl_st = VLV_AUD_CNTL_ST(pipe);
@@ -480,24 +574,26 @@ static void ilk_audio_codec_enable(struct drm_connector *connector,
/**
* intel_audio_codec_enable - Enable the audio codec for HD audio
* @intel_encoder: encoder on which to enable audio
+ * @crtc_state: pointer to the current crtc state.
+ * @conn_state: pointer to the current connector state.
*
* The enable sequences may only be performed after enabling the transcoder and
* port, and after completed link training.
*/
-void intel_audio_codec_enable(struct intel_encoder *intel_encoder)
+void intel_audio_codec_enable(struct intel_encoder *intel_encoder,
+ const struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state)
{
struct drm_encoder *encoder = &intel_encoder->base;
- struct intel_crtc *crtc = to_intel_crtc(encoder->crtc);
- const struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode;
+ const struct drm_display_mode *adjusted_mode = &crtc_state->base.adjusted_mode;
struct drm_connector *connector;
- struct drm_device *dev = encoder->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(encoder->dev);
struct i915_audio_component *acomp = dev_priv->audio_component;
- struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
- enum port port = intel_dig_port->port;
+ enum port port = intel_encoder->port;
+ enum pipe pipe = to_intel_crtc(crtc_state->base.crtc)->pipe;
- connector = drm_select_eld(encoder);
- if (!connector)
+ connector = conn_state->connector;
+ if (!connector || !connector->eld[0])
return;
DRM_DEBUG_DRIVER("ELD on [CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
@@ -508,7 +604,7 @@ void intel_audio_codec_enable(struct intel_encoder *intel_encoder)
/* ELD Conn_Type */
connector->eld[5] &= ~(3 << 2);
- if (intel_crtc_has_dp_encoder(crtc->config))
+ if (intel_crtc_has_dp_encoder(crtc_state))
connector->eld[5] |= (1 << 2);
connector->eld[6] = drm_av_sync_delay(connector, adjusted_mode) / 2;
@@ -518,13 +614,19 @@ void intel_audio_codec_enable(struct intel_encoder *intel_encoder)
adjusted_mode);
mutex_lock(&dev_priv->av_mutex);
- intel_dig_port->audio_connector = connector;
+ intel_encoder->audio_connector = connector;
+
/* referred in audio callbacks */
- dev_priv->dig_port_map[port] = intel_encoder;
+ dev_priv->av_enc_map[pipe] = intel_encoder;
mutex_unlock(&dev_priv->av_mutex);
+ /* audio drivers expect pipe = -1 to indicate Non-MST cases */
+ if (intel_encoder->type != INTEL_OUTPUT_DP_MST)
+ pipe = -1;
+
if (acomp && acomp->audio_ops && acomp->audio_ops->pin_eld_notify)
- acomp->audio_ops->pin_eld_notify(acomp->audio_ops->audio_ptr, (int) port);
+ acomp->audio_ops->pin_eld_notify(acomp->audio_ops->audio_ptr,
+ (int) port, (int) pipe);
}
/**
@@ -537,22 +639,27 @@ void intel_audio_codec_enable(struct intel_encoder *intel_encoder)
void intel_audio_codec_disable(struct intel_encoder *intel_encoder)
{
struct drm_encoder *encoder = &intel_encoder->base;
- struct drm_device *dev = encoder->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(encoder->dev);
struct i915_audio_component *acomp = dev_priv->audio_component;
- struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
- enum port port = intel_dig_port->port;
+ enum port port = intel_encoder->port;
+ struct intel_crtc *crtc = to_intel_crtc(encoder->crtc);
+ enum pipe pipe = crtc->pipe;
if (dev_priv->display.audio_codec_disable)
dev_priv->display.audio_codec_disable(intel_encoder);
mutex_lock(&dev_priv->av_mutex);
- intel_dig_port->audio_connector = NULL;
- dev_priv->dig_port_map[port] = NULL;
+ intel_encoder->audio_connector = NULL;
+ dev_priv->av_enc_map[pipe] = NULL;
mutex_unlock(&dev_priv->av_mutex);
+ /* audio drivers expect pipe = -1 to indicate Non-MST cases */
+ if (intel_encoder->type != INTEL_OUTPUT_DP_MST)
+ pipe = -1;
+
if (acomp && acomp->audio_ops && acomp->audio_ops->pin_eld_notify)
- acomp->audio_ops->pin_eld_notify(acomp->audio_ops->audio_ptr, (int) port);
+ acomp->audio_ops->pin_eld_notify(acomp->audio_ops->audio_ptr,
+ (int) port, (int) pipe);
}
/**
@@ -627,74 +734,68 @@ static int i915_audio_component_get_cdclk_freq(struct device *kdev)
return dev_priv->cdclk_freq;
}
-static int i915_audio_component_sync_audio_rate(struct device *kdev,
- int port, int rate)
+static struct intel_encoder *get_saved_enc(struct drm_i915_private *dev_priv,
+ int port, int pipe)
+{
+
+ if (WARN_ON(pipe >= I915_MAX_PIPES))
+ return NULL;
+
+ /* MST */
+ if (pipe >= 0)
+ return dev_priv->av_enc_map[pipe];
+
+ /* Non-MST */
+ for_each_pipe(dev_priv, pipe) {
+ struct intel_encoder *encoder;
+
+ encoder = dev_priv->av_enc_map[pipe];
+ if (encoder == NULL)
+ continue;
+
+ if (port == encoder->port)
+ return encoder;
+ }
+
+ return NULL;
+}
+
+static int i915_audio_component_sync_audio_rate(struct device *kdev, int port,
+ int pipe, int rate)
{
struct drm_i915_private *dev_priv = kdev_to_i915(kdev);
struct intel_encoder *intel_encoder;
struct intel_crtc *crtc;
- struct drm_display_mode *mode;
+ struct drm_display_mode *adjusted_mode;
struct i915_audio_component *acomp = dev_priv->audio_component;
- enum pipe pipe = INVALID_PIPE;
- u32 tmp;
- int n;
int err = 0;
- /* HSW, BDW, SKL, KBL need this fix */
- if (!IS_SKYLAKE(dev_priv) &&
- !IS_KABYLAKE(dev_priv) &&
- !IS_BROADWELL(dev_priv) &&
- !IS_HASWELL(dev_priv))
+ if (!HAS_DDI(dev_priv))
return 0;
i915_audio_component_get_power(kdev);
mutex_lock(&dev_priv->av_mutex);
+
/* 1. get the pipe */
- intel_encoder = dev_priv->dig_port_map[port];
- /* intel_encoder might be NULL for DP MST */
+ intel_encoder = get_saved_enc(dev_priv, port, pipe);
if (!intel_encoder || !intel_encoder->base.crtc ||
- intel_encoder->type != INTEL_OUTPUT_HDMI) {
- DRM_DEBUG_KMS("no valid port %c\n", port_name(port));
+ (intel_encoder->type != INTEL_OUTPUT_HDMI &&
+ intel_encoder->type != INTEL_OUTPUT_DP)) {
+ DRM_DEBUG_KMS("Not valid for port %c\n", port_name(port));
err = -ENODEV;
goto unlock;
}
+
+ /* pipe passed from the audio driver will be -1 for Non-MST case */
crtc = to_intel_crtc(intel_encoder->base.crtc);
pipe = crtc->pipe;
- if (pipe == INVALID_PIPE) {
- DRM_DEBUG_KMS("no pipe for the port %c\n", port_name(port));
- err = -ENODEV;
- goto unlock;
- }
- DRM_DEBUG_KMS("pipe %c connects port %c\n",
- pipe_name(pipe), port_name(port));
- mode = &crtc->config->base.adjusted_mode;
+ adjusted_mode = &crtc->config->base.adjusted_mode;
/* port must be valid now, otherwise the pipe will be invalid */
acomp->aud_sample_rate[port] = rate;
- /* 2. check whether to set the N/CTS/M manually or not */
- if (!audio_rate_need_prog(crtc, mode)) {
- tmp = I915_READ(HSW_AUD_CFG(pipe));
- tmp &= ~AUD_CONFIG_N_PROG_ENABLE;
- I915_WRITE(HSW_AUD_CFG(pipe), tmp);
- goto unlock;
- }
-
- n = audio_config_get_n(mode, rate);
- if (n == 0) {
- DRM_DEBUG_KMS("Using automatic mode for N value on port %c\n",
- port_name(port));
- tmp = I915_READ(HSW_AUD_CFG(pipe));
- tmp &= ~AUD_CONFIG_N_PROG_ENABLE;
- I915_WRITE(HSW_AUD_CFG(pipe), tmp);
- goto unlock;
- }
-
- /* 3. set the N/CTS/M */
- tmp = I915_READ(HSW_AUD_CFG(pipe));
- tmp = audio_config_setup_n_reg(n, tmp);
- I915_WRITE(HSW_AUD_CFG(pipe), tmp);
+ hsw_audio_config_update(crtc, port, adjusted_mode);
unlock:
mutex_unlock(&dev_priv->av_mutex);
@@ -703,27 +804,29 @@ static int i915_audio_component_sync_audio_rate(struct device *kdev,
}
static int i915_audio_component_get_eld(struct device *kdev, int port,
- bool *enabled,
+ int pipe, bool *enabled,
unsigned char *buf, int max_bytes)
{
struct drm_i915_private *dev_priv = kdev_to_i915(kdev);
struct intel_encoder *intel_encoder;
- struct intel_digital_port *intel_dig_port;
const u8 *eld;
int ret = -EINVAL;
mutex_lock(&dev_priv->av_mutex);
- intel_encoder = dev_priv->dig_port_map[port];
- /* intel_encoder might be NULL for DP MST */
- if (intel_encoder) {
- ret = 0;
- intel_dig_port = enc_to_dig_port(&intel_encoder->base);
- *enabled = intel_dig_port->audio_connector != NULL;
- if (*enabled) {
- eld = intel_dig_port->audio_connector->eld;
- ret = drm_eld_size(eld);
- memcpy(buf, eld, min(max_bytes, ret));
- }
+
+ intel_encoder = get_saved_enc(dev_priv, port, pipe);
+ if (!intel_encoder) {
+ DRM_DEBUG_KMS("Not valid for port %c\n", port_name(port));
+ mutex_unlock(&dev_priv->av_mutex);
+ return ret;
+ }
+
+ ret = 0;
+ *enabled = intel_encoder->audio_connector != NULL;
+ if (*enabled) {
+ eld = intel_encoder->audio_connector->eld;
+ ret = drm_eld_size(eld);
+ memcpy(buf, eld, min(max_bytes, ret));
}
mutex_unlock(&dev_priv->av_mutex);
diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c
index c6e69e4cfa83..7ffab1abc518 100644
--- a/drivers/gpu/drm/i915/intel_bios.c
+++ b/drivers/gpu/drm/i915/intel_bios.c
@@ -996,6 +996,10 @@ parse_mipi_sequence(struct drm_i915_private *dev_priv,
goto err;
}
+ /* Log about presence of sequences we won't run. */
+ if (seq_id == MIPI_SEQ_TEAR_ON || seq_id == MIPI_SEQ_TEAR_OFF)
+ DRM_DEBUG_KMS("Unsupported sequence %u\n", seq_id);
+
dev_priv->vbt.dsi.sequence[seq_id] = data + index;
if (sequence->version >= 3)
@@ -1031,6 +1035,77 @@ static u8 translate_iboost(u8 val)
return mapping[val];
}
+static void sanitize_ddc_pin(struct drm_i915_private *dev_priv,
+ enum port port)
+{
+ const struct ddi_vbt_port_info *info =
+ &dev_priv->vbt.ddi_port_info[port];
+ enum port p;
+
+ if (!info->alternate_ddc_pin)
+ return;
+
+ for_each_port_masked(p, (1 << port) - 1) {
+ struct ddi_vbt_port_info *i = &dev_priv->vbt.ddi_port_info[p];
+
+ if (info->alternate_ddc_pin != i->alternate_ddc_pin)
+ continue;
+
+ DRM_DEBUG_KMS("port %c trying to use the same DDC pin (0x%x) as port %c, "
+ "disabling port %c DVI/HDMI support\n",
+ port_name(p), i->alternate_ddc_pin,
+ port_name(port), port_name(p));
+
+ /*
+ * If we have multiple ports supposedly sharing the
+ * pin, then dvi/hdmi couldn't exist on the shared
+ * port. Otherwise they share the same ddc bin and
+ * system couldn't communicate with them separately.
+ *
+ * Due to parsing the ports in alphabetical order,
+ * a higher port will always clobber a lower one.
+ */
+ i->supports_dvi = false;
+ i->supports_hdmi = false;
+ i->alternate_ddc_pin = 0;
+ }
+}
+
+static void sanitize_aux_ch(struct drm_i915_private *dev_priv,
+ enum port port)
+{
+ const struct ddi_vbt_port_info *info =
+ &dev_priv->vbt.ddi_port_info[port];
+ enum port p;
+
+ if (!info->alternate_aux_channel)
+ return;
+
+ for_each_port_masked(p, (1 << port) - 1) {
+ struct ddi_vbt_port_info *i = &dev_priv->vbt.ddi_port_info[p];
+
+ if (info->alternate_aux_channel != i->alternate_aux_channel)
+ continue;
+
+ DRM_DEBUG_KMS("port %c trying to use the same AUX CH (0x%x) as port %c, "
+ "disabling port %c DP support\n",
+ port_name(p), i->alternate_aux_channel,
+ port_name(port), port_name(p));
+
+ /*
+ * If we have multiple ports supposedlt sharing the
+ * aux channel, then DP couldn't exist on the shared
+ * port. Otherwise they share the same aux channel
+ * and system couldn't communicate with them separately.
+ *
+ * Due to parsing the ports in alphabetical order,
+ * a higher port will always clobber a lower one.
+ */
+ i->supports_dp = false;
+ i->alternate_aux_channel = 0;
+ }
+}
+
static void parse_ddi_port(struct drm_i915_private *dev_priv, enum port port,
const struct bdb_header *bdb)
{
@@ -1072,7 +1147,7 @@ static void parse_ddi_port(struct drm_i915_private *dev_priv, enum port port,
if (!child)
return;
- aux_channel = child->raw[25];
+ aux_channel = child->common.aux_channel;
ddc_pin = child->common.ddc_pin;
is_dvi = child->common.device_type & DEVICE_TYPE_TMDS_DVI_SIGNALING;
@@ -1105,54 +1180,15 @@ static void parse_ddi_port(struct drm_i915_private *dev_priv, enum port port,
DRM_DEBUG_KMS("Port %c is internal DP\n", port_name(port));
if (is_dvi) {
- if (port == PORT_E) {
- info->alternate_ddc_pin = ddc_pin;
- /* if DDIE share ddc pin with other port, then
- * dvi/hdmi couldn't exist on the shared port.
- * Otherwise they share the same ddc bin and system
- * couldn't communicate with them seperately. */
- if (ddc_pin == DDC_PIN_B) {
- dev_priv->vbt.ddi_port_info[PORT_B].supports_dvi = 0;
- dev_priv->vbt.ddi_port_info[PORT_B].supports_hdmi = 0;
- } else if (ddc_pin == DDC_PIN_C) {
- dev_priv->vbt.ddi_port_info[PORT_C].supports_dvi = 0;
- dev_priv->vbt.ddi_port_info[PORT_C].supports_hdmi = 0;
- } else if (ddc_pin == DDC_PIN_D) {
- dev_priv->vbt.ddi_port_info[PORT_D].supports_dvi = 0;
- dev_priv->vbt.ddi_port_info[PORT_D].supports_hdmi = 0;
- }
- } else if (ddc_pin == DDC_PIN_B && port != PORT_B)
- DRM_DEBUG_KMS("Unexpected DDC pin for port B\n");
- else if (ddc_pin == DDC_PIN_C && port != PORT_C)
- DRM_DEBUG_KMS("Unexpected DDC pin for port C\n");
- else if (ddc_pin == DDC_PIN_D && port != PORT_D)
- DRM_DEBUG_KMS("Unexpected DDC pin for port D\n");
+ info->alternate_ddc_pin = ddc_pin;
+
+ sanitize_ddc_pin(dev_priv, port);
}
if (is_dp) {
- if (port == PORT_E) {
- info->alternate_aux_channel = aux_channel;
- /* if DDIE share aux channel with other port, then
- * DP couldn't exist on the shared port. Otherwise
- * they share the same aux channel and system
- * couldn't communicate with them seperately. */
- if (aux_channel == DP_AUX_A)
- dev_priv->vbt.ddi_port_info[PORT_A].supports_dp = 0;
- else if (aux_channel == DP_AUX_B)
- dev_priv->vbt.ddi_port_info[PORT_B].supports_dp = 0;
- else if (aux_channel == DP_AUX_C)
- dev_priv->vbt.ddi_port_info[PORT_C].supports_dp = 0;
- else if (aux_channel == DP_AUX_D)
- dev_priv->vbt.ddi_port_info[PORT_D].supports_dp = 0;
- }
- else if (aux_channel == DP_AUX_A && port != PORT_A)
- DRM_DEBUG_KMS("Unexpected AUX channel for port A\n");
- else if (aux_channel == DP_AUX_B && port != PORT_B)
- DRM_DEBUG_KMS("Unexpected AUX channel for port B\n");
- else if (aux_channel == DP_AUX_C && port != PORT_C)
- DRM_DEBUG_KMS("Unexpected AUX channel for port C\n");
- else if (aux_channel == DP_AUX_D && port != PORT_D)
- DRM_DEBUG_KMS("Unexpected AUX channel for port D\n");
+ info->alternate_aux_channel = aux_channel;
+
+ sanitize_aux_ch(dev_priv, port);
}
if (bdb->version >= 158) {
@@ -1641,7 +1677,8 @@ bool intel_bios_is_port_edp(struct drm_i915_private *dev_priv, enum port port)
return false;
}
-bool intel_bios_is_port_dp_dual_mode(struct drm_i915_private *dev_priv, enum port port)
+static bool child_dev_is_dp_dual_mode(const union child_device_config *p_child,
+ enum port port)
{
static const struct {
u16 dp, hdmi;
@@ -1655,22 +1692,35 @@ bool intel_bios_is_port_dp_dual_mode(struct drm_i915_private *dev_priv, enum por
[PORT_D] = { DVO_PORT_DPD, DVO_PORT_HDMID, },
[PORT_E] = { DVO_PORT_DPE, DVO_PORT_HDMIE, },
};
- int i;
if (port == PORT_A || port >= ARRAY_SIZE(port_mapping))
return false;
- if (!dev_priv->vbt.child_dev_num)
+ if ((p_child->common.device_type & DEVICE_TYPE_DP_DUAL_MODE_BITS) !=
+ (DEVICE_TYPE_DP_DUAL_MODE & DEVICE_TYPE_DP_DUAL_MODE_BITS))
return false;
+ if (p_child->common.dvo_port == port_mapping[port].dp)
+ return true;
+
+ /* Only accept a HDMI dvo_port as DP++ if it has an AUX channel */
+ if (p_child->common.dvo_port == port_mapping[port].hdmi &&
+ p_child->common.aux_channel != 0)
+ return true;
+
+ return false;
+}
+
+bool intel_bios_is_port_dp_dual_mode(struct drm_i915_private *dev_priv,
+ enum port port)
+{
+ int i;
+
for (i = 0; i < dev_priv->vbt.child_dev_num; i++) {
const union child_device_config *p_child =
&dev_priv->vbt.child_dev[i];
- if ((p_child->common.dvo_port == port_mapping[port].dp ||
- p_child->common.dvo_port == port_mapping[port].hdmi) &&
- (p_child->common.device_type & DEVICE_TYPE_DP_DUAL_MODE_BITS) ==
- (DEVICE_TYPE_DP_DUAL_MODE & DEVICE_TYPE_DP_DUAL_MODE_BITS))
+ if (child_dev_is_dp_dual_mode(p_child, port))
return true;
}
@@ -1759,3 +1809,52 @@ intel_bios_is_port_hpd_inverted(struct drm_i915_private *dev_priv,
return false;
}
+
+/**
+ * intel_bios_is_lspcon_present - if LSPCON is attached on %port
+ * @dev_priv: i915 device instance
+ * @port: port to check
+ *
+ * Return true if LSPCON is present on this port
+ */
+bool
+intel_bios_is_lspcon_present(struct drm_i915_private *dev_priv,
+ enum port port)
+{
+ int i;
+
+ if (!HAS_LSPCON(dev_priv))
+ return false;
+
+ for (i = 0; i < dev_priv->vbt.child_dev_num; i++) {
+ if (!dev_priv->vbt.child_dev[i].common.lspcon)
+ continue;
+
+ switch (dev_priv->vbt.child_dev[i].common.dvo_port) {
+ case DVO_PORT_DPA:
+ case DVO_PORT_HDMIA:
+ if (port == PORT_A)
+ return true;
+ break;
+ case DVO_PORT_DPB:
+ case DVO_PORT_HDMIB:
+ if (port == PORT_B)
+ return true;
+ break;
+ case DVO_PORT_DPC:
+ case DVO_PORT_HDMIC:
+ if (port == PORT_C)
+ return true;
+ break;
+ case DVO_PORT_DPD:
+ case DVO_PORT_HDMID:
+ if (port == PORT_D)
+ return true;
+ break;
+ default:
+ break;
+ }
+ }
+
+ return false;
+}
diff --git a/drivers/gpu/drm/i915/intel_breadcrumbs.c b/drivers/gpu/drm/i915/intel_breadcrumbs.c
index 495611b7068d..c9c46a538edb 100644
--- a/drivers/gpu/drm/i915/intel_breadcrumbs.c
+++ b/drivers/gpu/drm/i915/intel_breadcrumbs.c
@@ -83,16 +83,18 @@ static void irq_enable(struct intel_engine_cs *engine)
*/
engine->breadcrumbs.irq_posted = true;
- spin_lock_irq(&engine->i915->irq_lock);
+ /* Caller disables interrupts */
+ spin_lock(&engine->i915->irq_lock);
engine->irq_enable(engine);
- spin_unlock_irq(&engine->i915->irq_lock);
+ spin_unlock(&engine->i915->irq_lock);
}
static void irq_disable(struct intel_engine_cs *engine)
{
- spin_lock_irq(&engine->i915->irq_lock);
+ /* Caller disables interrupts */
+ spin_lock(&engine->i915->irq_lock);
engine->irq_disable(engine);
- spin_unlock_irq(&engine->i915->irq_lock);
+ spin_unlock(&engine->i915->irq_lock);
engine->breadcrumbs.irq_posted = false;
}
@@ -293,9 +295,9 @@ bool intel_engine_add_wait(struct intel_engine_cs *engine,
struct intel_breadcrumbs *b = &engine->breadcrumbs;
bool first;
- spin_lock(&b->lock);
+ spin_lock_irq(&b->lock);
first = __intel_engine_add_wait(engine, wait);
- spin_unlock(&b->lock);
+ spin_unlock_irq(&b->lock);
return first;
}
@@ -326,7 +328,7 @@ void intel_engine_remove_wait(struct intel_engine_cs *engine,
if (RB_EMPTY_NODE(&wait->node))
return;
- spin_lock(&b->lock);
+ spin_lock_irq(&b->lock);
if (RB_EMPTY_NODE(&wait->node))
goto out_unlock;
@@ -400,7 +402,7 @@ out_unlock:
GEM_BUG_ON(rb_first(&b->waiters) !=
(b->first_wait ? &b->first_wait->node : NULL));
GEM_BUG_ON(!rcu_access_pointer(b->irq_seqno_bh) ^ RB_EMPTY_ROOT(&b->waiters));
- spin_unlock(&b->lock);
+ spin_unlock_irq(&b->lock);
}
static bool signal_complete(struct drm_i915_gem_request *request)
@@ -464,7 +466,7 @@ static int intel_breadcrumbs_signaler(void *arg)
&request->signaling.wait);
local_bh_disable();
- fence_signal(&request->fence);
+ dma_fence_signal(&request->fence);
local_bh_enable(); /* kick start the tasklets */
/* Find the next oldest signal. Note that as we have
@@ -473,14 +475,14 @@ static int intel_breadcrumbs_signaler(void *arg)
* we just completed - so double check we are still
* the oldest before picking the next one.
*/
- spin_lock(&b->lock);
+ spin_lock_irq(&b->lock);
if (request == b->first_signal) {
struct rb_node *rb =
rb_next(&request->signaling.node);
b->first_signal = rb ? to_signaler(rb) : NULL;
}
rb_erase(&request->signaling.node, &b->signals);
- spin_unlock(&b->lock);
+ spin_unlock_irq(&b->lock);
i915_gem_request_put(request);
} else {
@@ -502,11 +504,20 @@ void intel_engine_enable_signaling(struct drm_i915_gem_request *request)
struct rb_node *parent, **p;
bool first, wakeup;
- /* locked by fence_enable_sw_signaling() */
+ /* Note that we may be called from an interrupt handler on another
+ * device (e.g. nouveau signaling a fence completion causing us
+ * to submit a request, and so enable signaling). As such,
+ * we need to make sure that all other users of b->lock protect
+ * against interrupts, i.e. use spin_lock_irqsave.
+ */
+
+ /* locked by dma_fence_enable_sw_signaling() (irqsafe fence->lock) */
assert_spin_locked(&request->lock);
+ if (!request->global_seqno)
+ return;
request->signaling.wait.tsk = b->signaler;
- request->signaling.wait.seqno = request->fence.seqno;
+ request->signaling.wait.seqno = request->global_seqno;
i915_gem_request_get(request);
spin_lock(&b->lock);
@@ -530,8 +541,8 @@ void intel_engine_enable_signaling(struct drm_i915_gem_request *request)
p = &b->signals.rb_node;
while (*p) {
parent = *p;
- if (i915_seqno_passed(request->fence.seqno,
- to_signaler(parent)->fence.seqno)) {
+ if (i915_seqno_passed(request->global_seqno,
+ to_signaler(parent)->global_seqno)) {
p = &parent->rb_right;
first = false;
} else {
@@ -592,7 +603,7 @@ void intel_engine_reset_breadcrumbs(struct intel_engine_cs *engine)
struct intel_breadcrumbs *b = &engine->breadcrumbs;
cancel_fake_irq(engine);
- spin_lock(&b->lock);
+ spin_lock_irq(&b->lock);
__intel_breadcrumbs_disable_irq(b);
if (intel_engine_has_waiter(engine)) {
@@ -605,7 +616,7 @@ void intel_engine_reset_breadcrumbs(struct intel_engine_cs *engine)
irq_disable(engine);
}
- spin_unlock(&b->lock);
+ spin_unlock_irq(&b->lock);
}
void intel_engine_fini_breadcrumbs(struct intel_engine_cs *engine)
@@ -618,33 +629,28 @@ void intel_engine_fini_breadcrumbs(struct intel_engine_cs *engine)
cancel_fake_irq(engine);
}
-unsigned int intel_kick_waiters(struct drm_i915_private *i915)
+unsigned int intel_breadcrumbs_busy(struct drm_i915_private *i915)
{
struct intel_engine_cs *engine;
+ enum intel_engine_id id;
unsigned int mask = 0;
- /* To avoid the task_struct disappearing beneath us as we wake up
- * the process, we must first inspect the task_struct->state under the
- * RCU lock, i.e. as we call wake_up_process() we must be holding the
- * rcu_read_lock().
- */
- for_each_engine(engine, i915)
- if (unlikely(intel_engine_wakeup(engine)))
- mask |= intel_engine_flag(engine);
+ for_each_engine(engine, i915, id) {
+ struct intel_breadcrumbs *b = &engine->breadcrumbs;
- return mask;
-}
+ spin_lock_irq(&b->lock);
-unsigned int intel_kick_signalers(struct drm_i915_private *i915)
-{
- struct intel_engine_cs *engine;
- unsigned int mask = 0;
+ if (b->first_wait) {
+ wake_up_process(b->first_wait->tsk);
+ mask |= intel_engine_flag(engine);
+ }
- for_each_engine(engine, i915) {
- if (unlikely(READ_ONCE(engine->breadcrumbs.first_signal))) {
- wake_up_process(engine->breadcrumbs.signaler);
+ if (b->first_signal) {
+ wake_up_process(b->signaler);
mask |= intel_engine_flag(engine);
}
+
+ spin_unlock_irq(&b->lock);
}
return mask;
diff --git a/drivers/gpu/drm/i915/intel_color.c b/drivers/gpu/drm/i915/intel_color.c
index 95a72771eea6..d81232b79f00 100644
--- a/drivers/gpu/drm/i915/intel_color.c
+++ b/drivers/gpu/drm/i915/intel_color.c
@@ -95,8 +95,7 @@ static void ctm_mult_by_limited(uint64_t *result, int64_t *input)
static void i9xx_load_csc_matrix(struct drm_crtc_state *crtc_state)
{
struct drm_crtc *crtc = crtc_state->crtc;
- struct drm_device *dev = crtc->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(crtc->dev);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int i, pipe = intel_crtc->pipe;
uint16_t coeffs[9] = { 0, };
@@ -180,7 +179,7 @@ static void i9xx_load_csc_matrix(struct drm_crtc_state *crtc_state)
I915_WRITE(PIPE_CSC_PREOFF_ME(pipe), 0);
I915_WRITE(PIPE_CSC_PREOFF_LO(pipe), 0);
- if (INTEL_INFO(dev)->gen > 6) {
+ if (INTEL_GEN(dev_priv) > 6) {
uint16_t postoff = 0;
if (intel_crtc_state->limited_color_range)
@@ -273,7 +272,7 @@ static void i9xx_load_luts_internal(struct drm_crtc *crtc,
enum pipe pipe = intel_crtc->pipe;
int i;
- if (HAS_GMCH_DISPLAY(dev)) {
+ if (HAS_GMCH_DISPLAY(dev_priv)) {
if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI))
assert_dsi_pll_enabled(dev_priv);
else
@@ -288,7 +287,7 @@ static void i9xx_load_luts_internal(struct drm_crtc *crtc,
(drm_color_lut_extract(lut[i].green, 8) << 8) |
drm_color_lut_extract(lut[i].blue, 8);
- if (HAS_GMCH_DISPLAY(dev))
+ if (HAS_GMCH_DISPLAY(dev_priv))
I915_WRITE(PALETTE(pipe, i), word);
else
I915_WRITE(LGC_PALETTE(pipe, i), word);
@@ -297,7 +296,7 @@ static void i9xx_load_luts_internal(struct drm_crtc *crtc,
for (i = 0; i < 256; i++) {
uint32_t word = (i << 16) | (i << 8) | i;
- if (HAS_GMCH_DISPLAY(dev))
+ if (HAS_GMCH_DISPLAY(dev_priv))
I915_WRITE(PALETTE(pipe, i), word);
else
I915_WRITE(LGC_PALETTE(pipe, i), word);
@@ -326,7 +325,7 @@ static void haswell_load_luts(struct drm_crtc_state *crtc_state)
* Workaround : Do not read or write the pipe palette/gamma data while
* GAMMA_MODE is configured for split gamma and IPS_CTL has IPS enabled.
*/
- if (IS_HASWELL(dev) && intel_crtc_state->ips_enabled &&
+ if (IS_HASWELL(dev_priv) && intel_crtc_state->ips_enabled &&
(intel_crtc_state->gamma_mode == GAMMA_MODE_MODE_SPLIT)) {
hsw_disable_ips(intel_crtc);
reenable_ips = true;
@@ -345,11 +344,10 @@ static void haswell_load_luts(struct drm_crtc_state *crtc_state)
static void broadwell_load_luts(struct drm_crtc_state *state)
{
struct drm_crtc *crtc = state->crtc;
- struct drm_device *dev = crtc->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(crtc->dev);
struct intel_crtc_state *intel_state = to_intel_crtc_state(state);
enum pipe pipe = to_intel_crtc(crtc)->pipe;
- uint32_t i, lut_size = INTEL_INFO(dev)->color.degamma_lut_size;
+ uint32_t i, lut_size = INTEL_INFO(dev_priv)->color.degamma_lut_size;
if (crtc_state_is_legacy(state)) {
haswell_load_luts(state);
@@ -428,8 +426,7 @@ static void broadwell_load_luts(struct drm_crtc_state *state)
static void cherryview_load_luts(struct drm_crtc_state *state)
{
struct drm_crtc *crtc = state->crtc;
- struct drm_device *dev = crtc->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(crtc->dev);
enum pipe pipe = to_intel_crtc(crtc)->pipe;
struct drm_color_lut *lut;
uint32_t i, lut_size;
@@ -446,7 +443,7 @@ static void cherryview_load_luts(struct drm_crtc_state *state)
if (state->degamma_lut) {
lut = (struct drm_color_lut *) state->degamma_lut->data;
- lut_size = INTEL_INFO(dev)->color.degamma_lut_size;
+ lut_size = INTEL_INFO(dev_priv)->color.degamma_lut_size;
for (i = 0; i < lut_size; i++) {
/* Write LUT in U0.14 format. */
word0 =
@@ -461,7 +458,7 @@ static void cherryview_load_luts(struct drm_crtc_state *state)
if (state->gamma_lut) {
lut = (struct drm_color_lut *) state->gamma_lut->data;
- lut_size = INTEL_INFO(dev)->color.gamma_lut_size;
+ lut_size = INTEL_INFO(dev_priv)->color.gamma_lut_size;
for (i = 0; i < lut_size; i++) {
/* Write LUT in U0.10 format. */
word0 =
@@ -497,12 +494,12 @@ void intel_color_load_luts(struct drm_crtc_state *crtc_state)
int intel_color_check(struct drm_crtc *crtc,
struct drm_crtc_state *crtc_state)
{
- struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = to_i915(crtc->dev);
size_t gamma_length, degamma_length;
- degamma_length = INTEL_INFO(dev)->color.degamma_lut_size *
+ degamma_length = INTEL_INFO(dev_priv)->color.degamma_lut_size *
sizeof(struct drm_color_lut);
- gamma_length = INTEL_INFO(dev)->color.gamma_lut_size *
+ gamma_length = INTEL_INFO(dev_priv)->color.gamma_lut_size *
sizeof(struct drm_color_lut);
/*
@@ -529,19 +526,18 @@ int intel_color_check(struct drm_crtc *crtc,
void intel_color_init(struct drm_crtc *crtc)
{
- struct drm_device *dev = crtc->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(crtc->dev);
drm_mode_crtc_set_gamma_size(crtc, 256);
- if (IS_CHERRYVIEW(dev)) {
+ if (IS_CHERRYVIEW(dev_priv)) {
dev_priv->display.load_csc_matrix = cherryview_load_csc_matrix;
dev_priv->display.load_luts = cherryview_load_luts;
- } else if (IS_HASWELL(dev)) {
+ } else if (IS_HASWELL(dev_priv)) {
dev_priv->display.load_csc_matrix = i9xx_load_csc_matrix;
dev_priv->display.load_luts = haswell_load_luts;
- } else if (IS_BROADWELL(dev) || IS_SKYLAKE(dev) ||
- IS_BROXTON(dev) || IS_KABYLAKE(dev)) {
+ } else if (IS_BROADWELL(dev_priv) || IS_SKYLAKE(dev_priv) ||
+ IS_BROXTON(dev_priv) || IS_KABYLAKE(dev_priv)) {
dev_priv->display.load_csc_matrix = i9xx_load_csc_matrix;
dev_priv->display.load_luts = broadwell_load_luts;
} else {
@@ -549,10 +545,10 @@ void intel_color_init(struct drm_crtc *crtc)
}
/* Enable color management support when we have degamma & gamma LUTs. */
- if (INTEL_INFO(dev)->color.degamma_lut_size != 0 &&
- INTEL_INFO(dev)->color.gamma_lut_size != 0)
+ if (INTEL_INFO(dev_priv)->color.degamma_lut_size != 0 &&
+ INTEL_INFO(dev_priv)->color.gamma_lut_size != 0)
drm_crtc_enable_color_mgmt(crtc,
- INTEL_INFO(dev)->color.degamma_lut_size,
- true,
- INTEL_INFO(dev)->color.gamma_lut_size);
+ INTEL_INFO(dev_priv)->color.degamma_lut_size,
+ true,
+ INTEL_INFO(dev_priv)->color.gamma_lut_size);
}
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c
index dfbcf16b41df..86ecec5601d4 100644
--- a/drivers/gpu/drm/i915/intel_crt.c
+++ b/drivers/gpu/drm/i915/intel_crt.c
@@ -84,7 +84,7 @@ static bool intel_crt_get_hw_state(struct intel_encoder *encoder,
if (!(tmp & ADPA_DAC_ENABLE))
goto out;
- if (HAS_PCH_CPT(dev))
+ if (HAS_PCH_CPT(dev_priv))
*pipe = PORT_TO_PIPE_CPT(tmp);
else
*pipe = PORT_TO_PIPE(tmp);
@@ -147,14 +147,13 @@ static void intel_crt_set_dpms(struct intel_encoder *encoder,
struct intel_crtc_state *crtc_state,
int mode)
{
- struct drm_device *dev = encoder->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_crt *crt = intel_encoder_to_crt(encoder);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
const struct drm_display_mode *adjusted_mode = &crtc_state->base.adjusted_mode;
u32 adpa;
- if (INTEL_INFO(dev)->gen >= 5)
+ if (INTEL_GEN(dev_priv) >= 5)
adpa = ADPA_HOTPLUG_BITS;
else
adpa = 0;
@@ -165,16 +164,16 @@ static void intel_crt_set_dpms(struct intel_encoder *encoder,
adpa |= ADPA_VSYNC_ACTIVE_HIGH;
/* For CPT allow 3 pipe config, for others just use A or B */
- if (HAS_PCH_LPT(dev))
+ if (HAS_PCH_LPT(dev_priv))
; /* Those bits don't exist here */
- else if (HAS_PCH_CPT(dev))
+ else if (HAS_PCH_CPT(dev_priv))
adpa |= PORT_TRANS_SEL_CPT(crtc->pipe);
else if (crtc->pipe == 0)
adpa |= ADPA_PIPE_A_SELECT;
else
adpa |= ADPA_PIPE_B_SELECT;
- if (!HAS_PCH_SPLIT(dev))
+ if (!HAS_PCH_SPLIT(dev_priv))
I915_WRITE(BCLRPAT(crtc->pipe), 0);
switch (mode) {
@@ -241,7 +240,8 @@ intel_crt_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
{
struct drm_device *dev = connector->dev;
- int max_dotclk = to_i915(dev)->max_dotclk_freq;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ int max_dotclk = dev_priv->max_dotclk_freq;
int max_clock;
if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
@@ -250,15 +250,15 @@ intel_crt_mode_valid(struct drm_connector *connector,
if (mode->clock < 25000)
return MODE_CLOCK_LOW;
- if (HAS_PCH_LPT(dev))
+ if (HAS_PCH_LPT(dev_priv))
max_clock = 180000;
- else if (IS_VALLEYVIEW(dev))
+ else if (IS_VALLEYVIEW(dev_priv))
/*
* 270 MHz due to current DPLL limits,
* DAC limit supposedly 355 MHz.
*/
max_clock = 270000;
- else if (IS_GEN3(dev) || IS_GEN4(dev))
+ else if (IS_GEN3(dev_priv) || IS_GEN4(dev_priv))
max_clock = 400000;
else
max_clock = 350000;
@@ -269,7 +269,7 @@ intel_crt_mode_valid(struct drm_connector *connector,
return MODE_CLOCK_HIGH;
/* The FDI receiver on LPT only supports 8bpc and only has 2 lanes. */
- if (HAS_PCH_LPT(dev) &&
+ if (HAS_PCH_LPT(dev_priv) &&
(ironlake_get_lanes_required(mode->clock, 270000, 24) > 2))
return MODE_CLOCK_HIGH;
@@ -280,13 +280,13 @@ static bool intel_crt_compute_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config,
struct drm_connector_state *conn_state)
{
- struct drm_device *dev = encoder->base.dev;
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- if (HAS_PCH_SPLIT(dev))
+ if (HAS_PCH_SPLIT(dev_priv))
pipe_config->has_pch_encoder = true;
/* LPT FDI RX only supports 8bpc. */
- if (HAS_PCH_LPT(dev)) {
+ if (HAS_PCH_LPT(dev_priv)) {
if (pipe_config->bw_constrained && pipe_config->pipe_bpp < 24) {
DRM_DEBUG_KMS("LPT only supports 24bpp\n");
return false;
@@ -296,7 +296,7 @@ static bool intel_crt_compute_config(struct intel_encoder *encoder,
}
/* FDI must always be 2.7 GHz */
- if (HAS_DDI(dev))
+ if (HAS_DDI(dev_priv))
pipe_config->port_clock = 135000 * 2;
return true;
@@ -312,7 +312,7 @@ static bool intel_ironlake_crt_detect_hotplug(struct drm_connector *connector)
/* The first time through, trigger an explicit detection cycle */
if (crt->force_hotplug_required) {
- bool turn_off_dac = HAS_PCH_SPLIT(dev);
+ bool turn_off_dac = HAS_PCH_SPLIT(dev_priv);
u32 save_adpa;
crt->force_hotplug_required = 0;
@@ -419,10 +419,10 @@ static bool intel_crt_detect_hotplug(struct drm_connector *connector)
bool ret = false;
int i, tries = 0;
- if (HAS_PCH_SPLIT(dev))
+ if (HAS_PCH_SPLIT(dev_priv))
return intel_ironlake_crt_detect_hotplug(connector);
- if (IS_VALLEYVIEW(dev))
+ if (IS_VALLEYVIEW(dev_priv))
return valleyview_crt_detect_hotplug(connector);
/*
@@ -430,7 +430,7 @@ static bool intel_crt_detect_hotplug(struct drm_connector *connector)
* to get a reliable result.
*/
- if (IS_G4X(dev) && !IS_GM45(dev))
+ if (IS_G4X(dev_priv) && !IS_GM45(dev_priv))
tries = 2;
else
tries = 1;
@@ -566,13 +566,13 @@ intel_crt_load_detect(struct intel_crt *crt, uint32_t pipe)
/* Set the border color to purple. */
I915_WRITE(bclrpat_reg, 0x500050);
- if (!IS_GEN2(dev)) {
+ if (!IS_GEN2(dev_priv)) {
uint32_t pipeconf = I915_READ(pipeconf_reg);
I915_WRITE(pipeconf_reg, pipeconf | PIPECONF_FORCE_BORDER);
POSTING_READ(pipeconf_reg);
/* Wait for next Vblank to substitue
* border color for Color info */
- intel_wait_for_vblank(dev, pipe);
+ intel_wait_for_vblank(dev_priv, pipe);
st00 = I915_READ8(_VGA_MSR_WRITE);
status = ((st00 & (1 << 4)) != 0) ?
connector_status_connected :
@@ -643,11 +643,36 @@ intel_crt_load_detect(struct intel_crt *crt, uint32_t pipe)
return status;
}
+static int intel_spurious_crt_detect_dmi_callback(const struct dmi_system_id *id)
+{
+ DRM_DEBUG_DRIVER("Skipping CRT detection for %s\n", id->ident);
+ return 1;
+}
+
+static const struct dmi_system_id intel_spurious_crt_detect[] = {
+ {
+ .callback = intel_spurious_crt_detect_dmi_callback,
+ .ident = "ACER ZGB",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ACER"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "ZGB"),
+ },
+ },
+ {
+ .callback = intel_spurious_crt_detect_dmi_callback,
+ .ident = "Intel DZ77BH-55K",
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "Intel Corporation"),
+ DMI_MATCH(DMI_BOARD_NAME, "DZ77BH-55K"),
+ },
+ },
+ { }
+};
+
static enum drm_connector_status
intel_crt_detect(struct drm_connector *connector, bool force)
{
- struct drm_device *dev = connector->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(connector->dev);
struct intel_crt *crt = intel_attached_crt(connector);
struct intel_encoder *intel_encoder = &crt->base;
enum intel_display_power_domain power_domain;
@@ -659,10 +684,14 @@ intel_crt_detect(struct drm_connector *connector, bool force)
connector->base.id, connector->name,
force);
+ /* Skip machines without VGA that falsely report hotplug events */
+ if (dmi_check_system(intel_spurious_crt_detect))
+ return connector_status_disconnected;
+
power_domain = intel_display_port_power_domain(intel_encoder);
intel_display_power_get(dev_priv, power_domain);
- if (I915_HAS_HOTPLUG(dev)) {
+ if (I915_HAS_HOTPLUG(dev_priv)) {
/* We can not rely on the HPD pin always being correctly wired
* up, for example many KVM do not pass it through, and so
* only trust an assertion that the monitor is connected.
@@ -684,7 +713,7 @@ intel_crt_detect(struct drm_connector *connector, bool force)
* broken monitor (without edid) to work behind a broken kvm (that fails
* to have the right resistors for HP detection) needs to fix this up.
* For now just bail out. */
- if (I915_HAS_HOTPLUG(dev) && !i915.load_detect_test) {
+ if (I915_HAS_HOTPLUG(dev_priv) && !i915.load_detect_test) {
status = connector_status_disconnected;
goto out;
}
@@ -700,7 +729,7 @@ intel_crt_detect(struct drm_connector *connector, bool force)
if (intel_get_load_detect_pipe(connector, NULL, &tmp, &ctx)) {
if (intel_crt_detect_ddc(connector))
status = connector_status_connected;
- else if (INTEL_INFO(dev)->gen < 4)
+ else if (INTEL_GEN(dev_priv) < 4)
status = intel_crt_load_detect(crt,
to_intel_crtc(connector->state->crtc)->pipe);
else if (i915.load_detect_test)
@@ -740,7 +769,7 @@ static int intel_crt_get_modes(struct drm_connector *connector)
i2c = intel_gmbus_get_adapter(dev_priv, dev_priv->vbt.crt_ddc_pin);
ret = intel_crt_ddc_get_modes(connector, i2c);
- if (ret || !IS_G4X(dev))
+ if (ret || !IS_G4X(dev_priv))
goto out;
/* Try to probe digital port for output in DVI-I -> VGA mode. */
@@ -762,11 +791,10 @@ static int intel_crt_set_property(struct drm_connector *connector,
void intel_crt_reset(struct drm_encoder *encoder)
{
- struct drm_device *dev = encoder->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(encoder->dev);
struct intel_crt *crt = intel_encoder_to_crt(to_intel_encoder(encoder));
- if (INTEL_INFO(dev)->gen >= 5) {
+ if (INTEL_GEN(dev_priv) >= 5) {
u32 adpa;
adpa = I915_READ(crt->adpa_reg);
@@ -808,32 +836,6 @@ static const struct drm_encoder_funcs intel_crt_enc_funcs = {
.destroy = intel_encoder_destroy,
};
-static int intel_no_crt_dmi_callback(const struct dmi_system_id *id)
-{
- DRM_INFO("Skipping CRT initialization for %s\n", id->ident);
- return 1;
-}
-
-static const struct dmi_system_id intel_no_crt[] = {
- {
- .callback = intel_no_crt_dmi_callback,
- .ident = "ACER ZGB",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "ACER"),
- DMI_MATCH(DMI_PRODUCT_NAME, "ZGB"),
- },
- },
- {
- .callback = intel_no_crt_dmi_callback,
- .ident = "DELL XPS 8700",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
- DMI_MATCH(DMI_PRODUCT_NAME, "XPS 8700"),
- },
- },
- { }
-};
-
void intel_crt_init(struct drm_device *dev)
{
struct drm_connector *connector;
@@ -843,13 +845,9 @@ void intel_crt_init(struct drm_device *dev)
i915_reg_t adpa_reg;
u32 adpa;
- /* Skip machines without VGA that falsely report hotplug events */
- if (dmi_check_system(intel_no_crt))
- return;
-
- if (HAS_PCH_SPLIT(dev))
+ if (HAS_PCH_SPLIT(dev_priv))
adpa_reg = PCH_ADPA;
- else if (IS_VALLEYVIEW(dev))
+ else if (IS_VALLEYVIEW(dev_priv))
adpa_reg = VLV_ADPA;
else
adpa_reg = ADPA;
@@ -893,12 +891,12 @@ void intel_crt_init(struct drm_device *dev)
crt->base.type = INTEL_OUTPUT_ANALOG;
crt->base.cloneable = (1 << INTEL_OUTPUT_DVO) | (1 << INTEL_OUTPUT_HDMI);
- if (IS_I830(dev))
+ if (IS_I830(dev_priv))
crt->base.crtc_mask = (1 << 0);
else
crt->base.crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
- if (IS_GEN2(dev))
+ if (IS_GEN2(dev_priv))
connector->interlace_allowed = 0;
else
connector->interlace_allowed = 1;
@@ -907,20 +905,23 @@ void intel_crt_init(struct drm_device *dev)
crt->adpa_reg = adpa_reg;
crt->base.compute_config = intel_crt_compute_config;
- if (HAS_PCH_SPLIT(dev)) {
+ if (HAS_PCH_SPLIT(dev_priv)) {
crt->base.disable = pch_disable_crt;
crt->base.post_disable = pch_post_disable_crt;
} else {
crt->base.disable = intel_disable_crt;
}
crt->base.enable = intel_enable_crt;
- if (I915_HAS_HOTPLUG(dev))
+ if (I915_HAS_HOTPLUG(dev_priv) &&
+ !dmi_check_system(intel_spurious_crt_detect))
crt->base.hpd_pin = HPD_CRT;
- if (HAS_DDI(dev)) {
+ if (HAS_DDI(dev_priv)) {
+ crt->base.port = PORT_E;
crt->base.get_config = hsw_crt_get_config;
crt->base.get_hw_state = intel_ddi_get_hw_state;
crt->base.post_disable = hsw_post_disable_crt;
} else {
+ crt->base.port = PORT_NONE;
crt->base.get_config = intel_crt_get_config;
crt->base.get_hw_state = intel_crt_get_hw_state;
}
@@ -928,7 +929,7 @@ void intel_crt_init(struct drm_device *dev)
drm_connector_helper_add(connector, &intel_crt_connector_helper_funcs);
- if (!I915_HAS_HOTPLUG(dev))
+ if (!I915_HAS_HOTPLUG(dev_priv))
intel_connector->polled = DRM_CONNECTOR_POLL_CONNECT;
/*
@@ -941,7 +942,7 @@ void intel_crt_init(struct drm_device *dev)
* polarity and link reversal bits or not, instead of relying on the
* BIOS.
*/
- if (HAS_PCH_LPT(dev)) {
+ if (HAS_PCH_LPT(dev_priv)) {
u32 fdi_config = FDI_RX_POLARITY_REVERSED_LPT |
FDI_RX_LINK_REVERSAL_OVERRIDE;
diff --git a/drivers/gpu/drm/i915/intel_csr.c b/drivers/gpu/drm/i915/intel_csr.c
index 1ea0e1f43397..d7a04bca8c28 100644
--- a/drivers/gpu/drm/i915/intel_csr.c
+++ b/drivers/gpu/drm/i915/intel_csr.c
@@ -168,12 +168,6 @@ struct stepping_info {
char substepping;
};
-static const struct stepping_info kbl_stepping_info[] = {
- {'A', '0'}, {'B', '0'}, {'C', '0'},
- {'D', '0'}, {'E', '0'}, {'F', '0'},
- {'G', '0'}, {'H', '0'}, {'I', '0'},
-};
-
static const struct stepping_info skl_stepping_info[] = {
{'A', '0'}, {'B', '0'}, {'C', '0'},
{'D', '0'}, {'E', '0'}, {'F', '0'},
@@ -194,10 +188,7 @@ intel_get_stepping_info(struct drm_i915_private *dev_priv)
const struct stepping_info *si;
unsigned int size;
- if (IS_KABYLAKE(dev_priv)) {
- size = ARRAY_SIZE(kbl_stepping_info);
- si = kbl_stepping_info;
- } else if (IS_SKYLAKE(dev_priv)) {
+ if (IS_SKYLAKE(dev_priv)) {
size = ARRAY_SIZE(skl_stepping_info);
si = skl_stepping_info;
} else if (IS_BROXTON(dev_priv)) {
diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c
index 15d47c87def6..10ec9d4b7d45 100644
--- a/drivers/gpu/drm/i915/intel_ddi.c
+++ b/drivers/gpu/drm/i915/intel_ddi.c
@@ -167,8 +167,47 @@ static const struct ddi_buf_trans skl_y_ddi_translations_dp[] = {
{ 0x80005012, 0x000000C0, 0x3 },
};
+/* Kabylake H and S */
+static const struct ddi_buf_trans kbl_ddi_translations_dp[] = {
+ { 0x00002016, 0x000000A0, 0x0 },
+ { 0x00005012, 0x0000009B, 0x0 },
+ { 0x00007011, 0x00000088, 0x0 },
+ { 0x80009010, 0x000000C0, 0x1 },
+ { 0x00002016, 0x0000009B, 0x0 },
+ { 0x00005012, 0x00000088, 0x0 },
+ { 0x80007011, 0x000000C0, 0x1 },
+ { 0x00002016, 0x00000097, 0x0 },
+ { 0x80005012, 0x000000C0, 0x1 },
+};
+
+/* Kabylake U */
+static const struct ddi_buf_trans kbl_u_ddi_translations_dp[] = {
+ { 0x0000201B, 0x000000A1, 0x0 },
+ { 0x00005012, 0x00000088, 0x0 },
+ { 0x80007011, 0x000000CD, 0x3 },
+ { 0x80009010, 0x000000C0, 0x3 },
+ { 0x0000201B, 0x0000009D, 0x0 },
+ { 0x80005012, 0x000000C0, 0x3 },
+ { 0x80007011, 0x000000C0, 0x3 },
+ { 0x00002016, 0x0000004F, 0x0 },
+ { 0x80005012, 0x000000C0, 0x3 },
+};
+
+/* Kabylake Y */
+static const struct ddi_buf_trans kbl_y_ddi_translations_dp[] = {
+ { 0x00001017, 0x000000A1, 0x0 },
+ { 0x00005012, 0x00000088, 0x0 },
+ { 0x80007011, 0x000000CD, 0x3 },
+ { 0x8000800F, 0x000000C0, 0x3 },
+ { 0x00001017, 0x0000009D, 0x0 },
+ { 0x80005012, 0x000000C0, 0x3 },
+ { 0x80007011, 0x000000C0, 0x3 },
+ { 0x00001017, 0x0000004C, 0x0 },
+ { 0x80005012, 0x000000C0, 0x3 },
+};
+
/*
- * Skylake H and S
+ * Skylake/Kabylake H and S
* eDP 1.4 low vswing translation parameters
*/
static const struct ddi_buf_trans skl_ddi_translations_edp[] = {
@@ -185,7 +224,7 @@ static const struct ddi_buf_trans skl_ddi_translations_edp[] = {
};
/*
- * Skylake U
+ * Skylake/Kabylake U
* eDP 1.4 low vswing translation parameters
*/
static const struct ddi_buf_trans skl_u_ddi_translations_edp[] = {
@@ -202,7 +241,7 @@ static const struct ddi_buf_trans skl_u_ddi_translations_edp[] = {
};
/*
- * Skylake Y
+ * Skylake/Kabylake Y
* eDP 1.4 low vswing translation parameters
*/
static const struct ddi_buf_trans skl_y_ddi_translations_edp[] = {
@@ -218,7 +257,7 @@ static const struct ddi_buf_trans skl_y_ddi_translations_edp[] = {
{ 0x00000018, 0x0000008A, 0x0 },
};
-/* Skylake U, H and S */
+/* Skylake/Kabylake U, H and S */
static const struct ddi_buf_trans skl_ddi_translations_hdmi[] = {
{ 0x00000018, 0x000000AC, 0x0 },
{ 0x00005012, 0x0000009D, 0x0 },
@@ -233,7 +272,7 @@ static const struct ddi_buf_trans skl_ddi_translations_hdmi[] = {
{ 0x80000018, 0x000000C0, 0x1 },
};
-/* Skylake Y */
+/* Skylake/Kabylake Y */
static const struct ddi_buf_trans skl_y_ddi_translations_hdmi[] = {
{ 0x00000018, 0x000000A1, 0x0 },
{ 0x00005012, 0x000000DF, 0x0 },
@@ -334,10 +373,10 @@ bdw_get_buf_trans_edp(struct drm_i915_private *dev_priv, int *n_entries)
static const struct ddi_buf_trans *
skl_get_buf_trans_dp(struct drm_i915_private *dev_priv, int *n_entries)
{
- if (IS_SKL_ULX(dev_priv) || IS_KBL_ULX(dev_priv)) {
+ if (IS_SKL_ULX(dev_priv)) {
*n_entries = ARRAY_SIZE(skl_y_ddi_translations_dp);
return skl_y_ddi_translations_dp;
- } else if (IS_SKL_ULT(dev_priv) || IS_KBL_ULT(dev_priv)) {
+ } else if (IS_SKL_ULT(dev_priv)) {
*n_entries = ARRAY_SIZE(skl_u_ddi_translations_dp);
return skl_u_ddi_translations_dp;
} else {
@@ -347,6 +386,21 @@ skl_get_buf_trans_dp(struct drm_i915_private *dev_priv, int *n_entries)
}
static const struct ddi_buf_trans *
+kbl_get_buf_trans_dp(struct drm_i915_private *dev_priv, int *n_entries)
+{
+ if (IS_KBL_ULX(dev_priv)) {
+ *n_entries = ARRAY_SIZE(kbl_y_ddi_translations_dp);
+ return kbl_y_ddi_translations_dp;
+ } else if (IS_KBL_ULT(dev_priv)) {
+ *n_entries = ARRAY_SIZE(kbl_u_ddi_translations_dp);
+ return kbl_u_ddi_translations_dp;
+ } else {
+ *n_entries = ARRAY_SIZE(kbl_ddi_translations_dp);
+ return kbl_ddi_translations_dp;
+ }
+}
+
+static const struct ddi_buf_trans *
skl_get_buf_trans_edp(struct drm_i915_private *dev_priv, int *n_entries)
{
if (dev_priv->vbt.edp.low_vswing) {
@@ -362,7 +416,10 @@ skl_get_buf_trans_edp(struct drm_i915_private *dev_priv, int *n_entries)
}
}
- return skl_get_buf_trans_dp(dev_priv, n_entries);
+ if (IS_KABYLAKE(dev_priv))
+ return kbl_get_buf_trans_dp(dev_priv, n_entries);
+ else
+ return skl_get_buf_trans_dp(dev_priv, n_entries);
}
static const struct ddi_buf_trans *
@@ -430,21 +487,18 @@ void intel_prepare_dp_ddi_buffers(struct intel_encoder *encoder)
if (IS_BROXTON(dev_priv))
return;
- if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
+ if (IS_KABYLAKE(dev_priv)) {
+ ddi_translations_fdi = NULL;
+ ddi_translations_dp =
+ kbl_get_buf_trans_dp(dev_priv, &n_dp_entries);
+ ddi_translations_edp =
+ skl_get_buf_trans_edp(dev_priv, &n_edp_entries);
+ } else if (IS_SKYLAKE(dev_priv)) {
ddi_translations_fdi = NULL;
ddi_translations_dp =
skl_get_buf_trans_dp(dev_priv, &n_dp_entries);
ddi_translations_edp =
skl_get_buf_trans_edp(dev_priv, &n_edp_entries);
-
- /* If we're boosting the current, set bit 31 of trans1 */
- if (dev_priv->vbt.ddi_port_info[port].dp_boost_level)
- iboost_bit = DDI_BUF_BALANCE_LEG_ENABLE;
-
- if (WARN_ON(encoder->type == INTEL_OUTPUT_EDP &&
- port != PORT_A && port != PORT_E &&
- n_edp_entries > 9))
- n_edp_entries = 9;
} else if (IS_BROADWELL(dev_priv)) {
ddi_translations_fdi = bdw_ddi_translations_fdi;
ddi_translations_dp = bdw_ddi_translations_dp;
@@ -464,6 +518,17 @@ void intel_prepare_dp_ddi_buffers(struct intel_encoder *encoder)
n_dp_entries = ARRAY_SIZE(bdw_ddi_translations_dp);
}
+ if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
+ /* If we're boosting the current, set bit 31 of trans1 */
+ if (dev_priv->vbt.ddi_port_info[port].dp_boost_level)
+ iboost_bit = DDI_BUF_BALANCE_LEG_ENABLE;
+
+ if (WARN_ON(encoder->type == INTEL_OUTPUT_EDP &&
+ port != PORT_A && port != PORT_E &&
+ n_edp_entries > 9))
+ n_edp_entries = 9;
+ }
+
switch (encoder->type) {
case INTEL_OUTPUT_EDP:
ddi_translations = ddi_translations_edp;
@@ -1020,13 +1085,13 @@ static void bxt_ddi_clock_get(struct intel_encoder *encoder,
void intel_ddi_clock_get(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config)
{
- struct drm_device *dev = encoder->base.dev;
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- if (INTEL_INFO(dev)->gen <= 8)
+ if (INTEL_GEN(dev_priv) <= 8)
hsw_ddi_clock_get(encoder, pipe_config);
- else if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev))
+ else if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))
skl_ddi_clock_get(encoder, pipe_config);
- else if (IS_BROXTON(dev))
+ else if (IS_BROXTON(dev_priv))
bxt_ddi_clock_get(encoder, pipe_config);
}
@@ -1081,14 +1146,14 @@ bxt_ddi_pll_select(struct intel_crtc *intel_crtc,
bool intel_ddi_pll_select(struct intel_crtc *intel_crtc,
struct intel_crtc_state *crtc_state)
{
- struct drm_device *dev = intel_crtc->base.dev;
+ struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
struct intel_encoder *intel_encoder =
intel_ddi_get_crtc_new_encoder(crtc_state);
- if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev))
+ if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))
return skl_ddi_pll_select(intel_crtc, crtc_state,
intel_encoder);
- else if (IS_BROXTON(dev))
+ else if (IS_BROXTON(dev_priv))
return bxt_ddi_pll_select(intel_crtc, crtc_state,
intel_encoder);
else
@@ -1189,7 +1254,7 @@ void intel_ddi_enable_transcoder_func(struct drm_crtc *crtc)
* eDP when not using the panel fitter, and when not
* using motion blur mitigation (which we don't
* support). */
- if (IS_HASWELL(dev) &&
+ if (IS_HASWELL(dev_priv) &&
(intel_crtc->config->pch_pfit.enabled ||
intel_crtc->config->pch_pfit.force_thru))
temp |= TRANS_DDI_EDP_INPUT_A_ONOFF;
@@ -1434,7 +1499,12 @@ static void skl_ddi_set_iboost(struct intel_encoder *encoder, u32 level)
if (dp_iboost) {
iboost = dp_iboost;
} else {
- ddi_translations = skl_get_buf_trans_dp(dev_priv, &n_entries);
+ if (IS_KABYLAKE(dev_priv))
+ ddi_translations = kbl_get_buf_trans_dp(dev_priv,
+ &n_entries);
+ else
+ ddi_translations = skl_get_buf_trans_dp(dev_priv,
+ &n_entries);
iboost = ddi_translations[level].i_boost;
}
} else if (type == INTEL_OUTPUT_EDP) {
@@ -1477,7 +1547,6 @@ static void bxt_ddi_vswing_sequence(struct drm_i915_private *dev_priv,
{
const struct bxt_ddi_buf_trans *ddi_translations;
u32 n_entries, i;
- uint32_t val;
if (type == INTEL_OUTPUT_EDP && dev_priv->vbt.edp.low_vswing) {
n_entries = ARRAY_SIZE(bxt_ddi_translations_edp);
@@ -1506,38 +1575,11 @@ static void bxt_ddi_vswing_sequence(struct drm_i915_private *dev_priv,
}
}
- /*
- * While we write to the group register to program all lanes at once we
- * can read only lane registers and we pick lanes 0/1 for that.
- */
- val = I915_READ(BXT_PORT_PCS_DW10_LN01(port));
- val &= ~(TX2_SWING_CALC_INIT | TX1_SWING_CALC_INIT);
- I915_WRITE(BXT_PORT_PCS_DW10_GRP(port), val);
-
- val = I915_READ(BXT_PORT_TX_DW2_LN0(port));
- val &= ~(MARGIN_000 | UNIQ_TRANS_SCALE);
- val |= ddi_translations[level].margin << MARGIN_000_SHIFT |
- ddi_translations[level].scale << UNIQ_TRANS_SCALE_SHIFT;
- I915_WRITE(BXT_PORT_TX_DW2_GRP(port), val);
-
- val = I915_READ(BXT_PORT_TX_DW3_LN0(port));
- val &= ~SCALE_DCOMP_METHOD;
- if (ddi_translations[level].enable)
- val |= SCALE_DCOMP_METHOD;
-
- if ((val & UNIQUE_TRANGE_EN_METHOD) && !(val & SCALE_DCOMP_METHOD))
- DRM_ERROR("Disabled scaling while ouniqetrangenmethod was set");
-
- I915_WRITE(BXT_PORT_TX_DW3_GRP(port), val);
-
- val = I915_READ(BXT_PORT_TX_DW4_LN0(port));
- val &= ~DE_EMPHASIS;
- val |= ddi_translations[level].deemphasis << DEEMPH_SHIFT;
- I915_WRITE(BXT_PORT_TX_DW4_GRP(port), val);
-
- val = I915_READ(BXT_PORT_PCS_DW10_LN01(port));
- val |= TX2_SWING_CALC_INIT | TX1_SWING_CALC_INIT;
- I915_WRITE(BXT_PORT_PCS_DW10_GRP(port), val);
+ bxt_ddi_phy_set_signal_level(dev_priv, port,
+ ddi_translations[level].margin,
+ ddi_translations[level].scale,
+ ddi_translations[level].enable,
+ ddi_translations[level].deemphasis);
}
static uint32_t translate_signal_level(int signal_levels)
@@ -1711,8 +1753,7 @@ static void intel_ddi_post_disable(struct intel_encoder *intel_encoder,
struct drm_connector_state *old_conn_state)
{
struct drm_encoder *encoder = &intel_encoder->base;
- struct drm_device *dev = encoder->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(encoder->dev);
enum port port = intel_ddi_get_encoder_port(intel_encoder);
int type = intel_encoder->type;
uint32_t val;
@@ -1742,10 +1783,10 @@ static void intel_ddi_post_disable(struct intel_encoder *intel_encoder,
intel_edp_panel_off(intel_dp);
}
- if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev))
+ if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))
I915_WRITE(DPLL_CTRL2, (I915_READ(DPLL_CTRL2) |
DPLL_CTRL2_DDI_CLK_OFF(port)));
- else if (INTEL_INFO(dev)->gen < 9)
+ else if (INTEL_GEN(dev_priv) < 9)
I915_WRITE(PORT_CLK_SEL(port), PORT_CLK_SEL_NONE);
if (type == INTEL_OUTPUT_HDMI) {
@@ -1795,8 +1836,7 @@ static void intel_enable_ddi(struct intel_encoder *intel_encoder,
struct drm_encoder *encoder = &intel_encoder->base;
struct drm_crtc *crtc = encoder->crtc;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- struct drm_device *dev = encoder->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(encoder->dev);
enum port port = intel_ddi_get_encoder_port(intel_encoder);
int type = intel_encoder->type;
@@ -1814,7 +1854,7 @@ static void intel_enable_ddi(struct intel_encoder *intel_encoder,
} else if (type == INTEL_OUTPUT_EDP) {
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
- if (port == PORT_A && INTEL_INFO(dev)->gen < 9)
+ if (port == PORT_A && INTEL_GEN(dev_priv) < 9)
intel_dp_stop_link_train(intel_dp);
intel_edp_backlight_on(intel_dp);
@@ -1824,7 +1864,7 @@ static void intel_enable_ddi(struct intel_encoder *intel_encoder,
if (intel_crtc->config->has_audio) {
intel_display_power_get(dev_priv, POWER_DOMAIN_AUDIO);
- intel_audio_codec_enable(intel_encoder);
+ intel_audio_codec_enable(intel_encoder, pipe_config, conn_state);
}
}
@@ -1853,332 +1893,14 @@ static void intel_disable_ddi(struct intel_encoder *intel_encoder,
}
}
-bool bxt_ddi_phy_is_enabled(struct drm_i915_private *dev_priv,
- enum dpio_phy phy)
-{
- enum port port;
-
- if (!(I915_READ(BXT_P_CR_GT_DISP_PWRON) & GT_DISPLAY_POWER_ON(phy)))
- return false;
-
- if ((I915_READ(BXT_PORT_CL1CM_DW0(phy)) &
- (PHY_POWER_GOOD | PHY_RESERVED)) != PHY_POWER_GOOD) {
- DRM_DEBUG_DRIVER("DDI PHY %d powered, but power hasn't settled\n",
- phy);
-
- return false;
- }
-
- if (phy == DPIO_PHY1 &&
- !(I915_READ(BXT_PORT_REF_DW3(DPIO_PHY1)) & GRC_DONE)) {
- DRM_DEBUG_DRIVER("DDI PHY 1 powered, but GRC isn't done\n");
-
- return false;
- }
-
- if (!(I915_READ(BXT_PHY_CTL_FAMILY(phy)) & COMMON_RESET_DIS)) {
- DRM_DEBUG_DRIVER("DDI PHY %d powered, but still in reset\n",
- phy);
-
- return false;
- }
-
- for_each_port_masked(port,
- phy == DPIO_PHY0 ? BIT(PORT_B) | BIT(PORT_C) :
- BIT(PORT_A)) {
- u32 tmp = I915_READ(BXT_PHY_CTL(port));
-
- if (tmp & BXT_PHY_CMNLANE_POWERDOWN_ACK) {
- DRM_DEBUG_DRIVER("DDI PHY %d powered, but common lane "
- "for port %c powered down "
- "(PHY_CTL %08x)\n",
- phy, port_name(port), tmp);
-
- return false;
- }
- }
-
- return true;
-}
-
-static u32 bxt_get_grc(struct drm_i915_private *dev_priv, enum dpio_phy phy)
-{
- u32 val = I915_READ(BXT_PORT_REF_DW6(phy));
-
- return (val & GRC_CODE_MASK) >> GRC_CODE_SHIFT;
-}
-
-static void bxt_phy_wait_grc_done(struct drm_i915_private *dev_priv,
- enum dpio_phy phy)
-{
- if (intel_wait_for_register(dev_priv,
- BXT_PORT_REF_DW3(phy),
- GRC_DONE, GRC_DONE,
- 10))
- DRM_ERROR("timeout waiting for PHY%d GRC\n", phy);
-}
-
-void bxt_ddi_phy_init(struct drm_i915_private *dev_priv, enum dpio_phy phy)
-{
- u32 val;
-
- if (bxt_ddi_phy_is_enabled(dev_priv, phy)) {
- /* Still read out the GRC value for state verification */
- if (phy == DPIO_PHY0)
- dev_priv->bxt_phy_grc = bxt_get_grc(dev_priv, phy);
-
- if (bxt_ddi_phy_verify_state(dev_priv, phy)) {
- DRM_DEBUG_DRIVER("DDI PHY %d already enabled, "
- "won't reprogram it\n", phy);
-
- return;
- }
-
- DRM_DEBUG_DRIVER("DDI PHY %d enabled with invalid state, "
- "force reprogramming it\n", phy);
- }
-
- val = I915_READ(BXT_P_CR_GT_DISP_PWRON);
- val |= GT_DISPLAY_POWER_ON(phy);
- I915_WRITE(BXT_P_CR_GT_DISP_PWRON, val);
-
- /*
- * The PHY registers start out inaccessible and respond to reads with
- * all 1s. Eventually they become accessible as they power up, then
- * the reserved bit will give the default 0. Poll on the reserved bit
- * becoming 0 to find when the PHY is accessible.
- * HW team confirmed that the time to reach phypowergood status is
- * anywhere between 50 us and 100us.
- */
- if (wait_for_us(((I915_READ(BXT_PORT_CL1CM_DW0(phy)) &
- (PHY_RESERVED | PHY_POWER_GOOD)) == PHY_POWER_GOOD), 100)) {
- DRM_ERROR("timeout during PHY%d power on\n", phy);
- }
-
- /* Program PLL Rcomp code offset */
- val = I915_READ(BXT_PORT_CL1CM_DW9(phy));
- val &= ~IREF0RC_OFFSET_MASK;
- val |= 0xE4 << IREF0RC_OFFSET_SHIFT;
- I915_WRITE(BXT_PORT_CL1CM_DW9(phy), val);
-
- val = I915_READ(BXT_PORT_CL1CM_DW10(phy));
- val &= ~IREF1RC_OFFSET_MASK;
- val |= 0xE4 << IREF1RC_OFFSET_SHIFT;
- I915_WRITE(BXT_PORT_CL1CM_DW10(phy), val);
-
- /* Program power gating */
- val = I915_READ(BXT_PORT_CL1CM_DW28(phy));
- val |= OCL1_POWER_DOWN_EN | DW28_OLDO_DYN_PWR_DOWN_EN |
- SUS_CLK_CONFIG;
- I915_WRITE(BXT_PORT_CL1CM_DW28(phy), val);
-
- if (phy == DPIO_PHY0) {
- val = I915_READ(BXT_PORT_CL2CM_DW6_BC);
- val |= DW6_OLDO_DYN_PWR_DOWN_EN;
- I915_WRITE(BXT_PORT_CL2CM_DW6_BC, val);
- }
-
- val = I915_READ(BXT_PORT_CL1CM_DW30(phy));
- val &= ~OCL2_LDOFUSE_PWR_DIS;
- /*
- * On PHY1 disable power on the second channel, since no port is
- * connected there. On PHY0 both channels have a port, so leave it
- * enabled.
- * TODO: port C is only connected on BXT-P, so on BXT0/1 we should
- * power down the second channel on PHY0 as well.
- *
- * FIXME: Clarify programming of the following, the register is
- * read-only with bit 6 fixed at 0 at least in stepping A.
- */
- if (phy == DPIO_PHY1)
- val |= OCL2_LDOFUSE_PWR_DIS;
- I915_WRITE(BXT_PORT_CL1CM_DW30(phy), val);
-
- if (phy == DPIO_PHY0) {
- uint32_t grc_code;
- /*
- * PHY0 isn't connected to an RCOMP resistor so copy over
- * the corresponding calibrated value from PHY1, and disable
- * the automatic calibration on PHY0.
- */
- val = dev_priv->bxt_phy_grc = bxt_get_grc(dev_priv, DPIO_PHY1);
- grc_code = val << GRC_CODE_FAST_SHIFT |
- val << GRC_CODE_SLOW_SHIFT |
- val;
- I915_WRITE(BXT_PORT_REF_DW6(DPIO_PHY0), grc_code);
-
- val = I915_READ(BXT_PORT_REF_DW8(DPIO_PHY0));
- val |= GRC_DIS | GRC_RDY_OVRD;
- I915_WRITE(BXT_PORT_REF_DW8(DPIO_PHY0), val);
- }
-
- val = I915_READ(BXT_PHY_CTL_FAMILY(phy));
- val |= COMMON_RESET_DIS;
- I915_WRITE(BXT_PHY_CTL_FAMILY(phy), val);
-
- if (phy == DPIO_PHY1)
- bxt_phy_wait_grc_done(dev_priv, DPIO_PHY1);
-}
-
-void bxt_ddi_phy_uninit(struct drm_i915_private *dev_priv, enum dpio_phy phy)
-{
- uint32_t val;
-
- val = I915_READ(BXT_PHY_CTL_FAMILY(phy));
- val &= ~COMMON_RESET_DIS;
- I915_WRITE(BXT_PHY_CTL_FAMILY(phy), val);
-
- val = I915_READ(BXT_P_CR_GT_DISP_PWRON);
- val &= ~GT_DISPLAY_POWER_ON(phy);
- I915_WRITE(BXT_P_CR_GT_DISP_PWRON, val);
-}
-
-static bool __printf(6, 7)
-__phy_reg_verify_state(struct drm_i915_private *dev_priv, enum dpio_phy phy,
- i915_reg_t reg, u32 mask, u32 expected,
- const char *reg_fmt, ...)
-{
- struct va_format vaf;
- va_list args;
- u32 val;
-
- val = I915_READ(reg);
- if ((val & mask) == expected)
- return true;
-
- va_start(args, reg_fmt);
- vaf.fmt = reg_fmt;
- vaf.va = &args;
-
- DRM_DEBUG_DRIVER("DDI PHY %d reg %pV [%08x] state mismatch: "
- "current %08x, expected %08x (mask %08x)\n",
- phy, &vaf, reg.reg, val, (val & ~mask) | expected,
- mask);
-
- va_end(args);
-
- return false;
-}
-
-bool bxt_ddi_phy_verify_state(struct drm_i915_private *dev_priv,
- enum dpio_phy phy)
-{
- uint32_t mask;
- bool ok;
-
-#define _CHK(reg, mask, exp, fmt, ...) \
- __phy_reg_verify_state(dev_priv, phy, reg, mask, exp, fmt, \
- ## __VA_ARGS__)
-
- if (!bxt_ddi_phy_is_enabled(dev_priv, phy))
- return false;
-
- ok = true;
-
- /* PLL Rcomp code offset */
- ok &= _CHK(BXT_PORT_CL1CM_DW9(phy),
- IREF0RC_OFFSET_MASK, 0xe4 << IREF0RC_OFFSET_SHIFT,
- "BXT_PORT_CL1CM_DW9(%d)", phy);
- ok &= _CHK(BXT_PORT_CL1CM_DW10(phy),
- IREF1RC_OFFSET_MASK, 0xe4 << IREF1RC_OFFSET_SHIFT,
- "BXT_PORT_CL1CM_DW10(%d)", phy);
-
- /* Power gating */
- mask = OCL1_POWER_DOWN_EN | DW28_OLDO_DYN_PWR_DOWN_EN | SUS_CLK_CONFIG;
- ok &= _CHK(BXT_PORT_CL1CM_DW28(phy), mask, mask,
- "BXT_PORT_CL1CM_DW28(%d)", phy);
-
- if (phy == DPIO_PHY0)
- ok &= _CHK(BXT_PORT_CL2CM_DW6_BC,
- DW6_OLDO_DYN_PWR_DOWN_EN, DW6_OLDO_DYN_PWR_DOWN_EN,
- "BXT_PORT_CL2CM_DW6_BC");
-
- /*
- * TODO: Verify BXT_PORT_CL1CM_DW30 bit OCL2_LDOFUSE_PWR_DIS,
- * at least on stepping A this bit is read-only and fixed at 0.
- */
-
- if (phy == DPIO_PHY0) {
- u32 grc_code = dev_priv->bxt_phy_grc;
-
- grc_code = grc_code << GRC_CODE_FAST_SHIFT |
- grc_code << GRC_CODE_SLOW_SHIFT |
- grc_code;
- mask = GRC_CODE_FAST_MASK | GRC_CODE_SLOW_MASK |
- GRC_CODE_NOM_MASK;
- ok &= _CHK(BXT_PORT_REF_DW6(DPIO_PHY0), mask, grc_code,
- "BXT_PORT_REF_DW6(%d)", DPIO_PHY0);
-
- mask = GRC_DIS | GRC_RDY_OVRD;
- ok &= _CHK(BXT_PORT_REF_DW8(DPIO_PHY0), mask, mask,
- "BXT_PORT_REF_DW8(%d)", DPIO_PHY0);
- }
-
- return ok;
-#undef _CHK
-}
-
-static uint8_t
-bxt_ddi_phy_calc_lane_lat_optim_mask(struct intel_encoder *encoder,
- struct intel_crtc_state *pipe_config)
-{
- switch (pipe_config->lane_count) {
- case 1:
- return 0;
- case 2:
- return BIT(2) | BIT(0);
- case 4:
- return BIT(3) | BIT(2) | BIT(0);
- default:
- MISSING_CASE(pipe_config->lane_count);
-
- return 0;
- }
-}
-
static void bxt_ddi_pre_pll_enable(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config,
struct drm_connector_state *conn_state)
{
- struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
- struct drm_i915_private *dev_priv = to_i915(dport->base.base.dev);
- enum port port = dport->port;
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
- int lane;
-
- for (lane = 0; lane < 4; lane++) {
- u32 val = I915_READ(BXT_PORT_TX_DW14_LN(port, lane));
-
- /*
- * Note that on CHV this flag is called UPAR, but has
- * the same function.
- */
- val &= ~LATENCY_OPTIM;
- if (intel_crtc->config->lane_lat_optim_mask & BIT(lane))
- val |= LATENCY_OPTIM;
-
- I915_WRITE(BXT_PORT_TX_DW14_LN(port, lane), val);
- }
-}
-
-static uint8_t
-bxt_ddi_phy_get_lane_lat_optim_mask(struct intel_encoder *encoder)
-{
- struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
- struct drm_i915_private *dev_priv = to_i915(dport->base.base.dev);
- enum port port = dport->port;
- int lane;
- uint8_t mask;
-
- mask = 0;
- for (lane = 0; lane < 4; lane++) {
- u32 val = I915_READ(BXT_PORT_TX_DW14_LN(port, lane));
-
- if (val & LATENCY_OPTIM)
- mask |= BIT(lane);
- }
+ uint8_t mask = intel_crtc->config->lane_lat_optim_mask;
- return mask;
+ bxt_ddi_phy_set_lane_optim_mask(encoder, mask);
}
void intel_ddi_prepare_link_retrain(struct intel_dp *intel_dp)
@@ -2347,7 +2069,7 @@ static bool intel_ddi_compute_config(struct intel_encoder *encoder,
if (IS_BROXTON(dev_priv) && ret)
pipe_config->lane_lat_optim_mask =
bxt_ddi_phy_calc_lane_lat_optim_mask(encoder,
- pipe_config);
+ pipe_config->lane_count);
return ret;
@@ -2438,7 +2160,7 @@ void intel_ddi_init(struct drm_device *dev, enum port port)
struct intel_digital_port *intel_dig_port;
struct intel_encoder *intel_encoder;
struct drm_encoder *encoder;
- bool init_hdmi, init_dp;
+ bool init_hdmi, init_dp, init_lspcon = false;
int max_lanes;
if (I915_READ(DDI_BUF_CTL(PORT_A)) & DDI_A_4_LANES) {
@@ -2470,6 +2192,19 @@ void intel_ddi_init(struct drm_device *dev, enum port port)
init_hdmi = (dev_priv->vbt.ddi_port_info[port].supports_dvi ||
dev_priv->vbt.ddi_port_info[port].supports_hdmi);
init_dp = dev_priv->vbt.ddi_port_info[port].supports_dp;
+
+ if (intel_bios_is_lspcon_present(dev_priv, port)) {
+ /*
+ * Lspcon device needs to be driven with DP connector
+ * with special detection sequence. So make sure DP
+ * is initialized before lspcon.
+ */
+ init_dp = true;
+ init_lspcon = true;
+ init_hdmi = false;
+ DRM_DEBUG_KMS("VBT says port %c has lspcon\n", port_name(port));
+ }
+
if (!init_dp && !init_hdmi) {
DRM_DEBUG_KMS("VBT says port %c is not DVI/HDMI/DP compatible, respect it\n",
port_name(port));
@@ -2509,7 +2244,7 @@ void intel_ddi_init(struct drm_device *dev, enum port port)
* configuration so that we use the proper lane count for our
* calculations.
*/
- if (IS_BROXTON(dev) && port == PORT_A) {
+ if (IS_BROXTON(dev_priv) && port == PORT_A) {
if (!(intel_dig_port->saved_port_bits & DDI_A_4_LANES)) {
DRM_DEBUG_KMS("BXT BIOS forgot to set DDI_A_4_LANES for port A; fixing\n");
intel_dig_port->saved_port_bits |= DDI_A_4_LANES;
@@ -2520,6 +2255,7 @@ void intel_ddi_init(struct drm_device *dev, enum port port)
intel_dig_port->max_lanes = max_lanes;
intel_encoder->type = INTEL_OUTPUT_UNKNOWN;
+ intel_encoder->port = port;
intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
intel_encoder->cloneable = 0;
@@ -2532,7 +2268,7 @@ void intel_ddi_init(struct drm_device *dev, enum port port)
* On BXT A0/A1, sw needs to activate DDIA HPD logic and
* interrupts to check the external panel connection.
*/
- if (IS_BXT_REVID(dev, 0, BXT_REVID_A1) && port == PORT_B)
+ if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1) && port == PORT_B)
dev_priv->hotplug.irq_port[PORT_A] = intel_dig_port;
else
dev_priv->hotplug.irq_port[port] = intel_dig_port;
@@ -2545,6 +2281,20 @@ void intel_ddi_init(struct drm_device *dev, enum port port)
goto err;
}
+ if (init_lspcon) {
+ if (lspcon_init(intel_dig_port))
+ /* TODO: handle hdmi info frame part */
+ DRM_DEBUG_KMS("LSPCON init success on port %c\n",
+ port_name(port));
+ else
+ /*
+ * LSPCON init faied, but DP init was success, so
+ * lets try to drive as DP++ port.
+ */
+ DRM_ERROR("LSPCON init failed on port %c\n",
+ port_name(port));
+ }
+
return;
err:
diff --git a/drivers/gpu/drm/i915/intel_device_info.c b/drivers/gpu/drm/i915/intel_device_info.c
index 73b6858600ac..185e3bbc9ec9 100644
--- a/drivers/gpu/drm/i915/intel_device_info.c
+++ b/drivers/gpu/drm/i915/intel_device_info.c
@@ -28,20 +28,14 @@ void intel_device_info_dump(struct drm_i915_private *dev_priv)
{
const struct intel_device_info *info = &dev_priv->info;
-#define PRINT_S(name) "%s"
-#define SEP_EMPTY
-#define PRINT_FLAG(name) info->name ? #name "," : ""
-#define SEP_COMMA ,
- DRM_DEBUG_DRIVER("i915 device info: gen=%i, pciid=0x%04x rev=0x%02x flags="
- DEV_INFO_FOR_EACH_FLAG(PRINT_S, SEP_EMPTY),
+ DRM_DEBUG_DRIVER("i915 device info: gen=%i, pciid=0x%04x rev=0x%02x",
info->gen,
dev_priv->drm.pdev->device,
- dev_priv->drm.pdev->revision,
- DEV_INFO_FOR_EACH_FLAG(PRINT_FLAG, SEP_COMMA));
-#undef PRINT_S
-#undef SEP_EMPTY
+ dev_priv->drm.pdev->revision);
+#define PRINT_FLAG(name) \
+ DRM_DEBUG_DRIVER("i915 device info: " #name ": %s", yesno(info->name))
+ DEV_INFO_FOR_EACH_FLAG(PRINT_FLAG);
#undef PRINT_FLAG
-#undef SEP_COMMA
}
static void cherryview_sseu_info_init(struct drm_i915_private *dev_priv)
@@ -192,7 +186,7 @@ static void broadwell_sseu_info_init(struct drm_i915_private *dev_priv)
struct sseu_dev_info *sseu = &mkwrite_device_info(dev_priv)->sseu;
const int s_max = 3, ss_max = 3, eu_max = 8;
int s, ss;
- u32 fuse2, eu_disable[s_max];
+ u32 fuse2, eu_disable[3]; /* s_max */
fuse2 = I915_READ(GEN8_FUSE2);
sseu->slice_mask = (fuse2 & GEN8_F2_S_ENA_MASK) >> GEN8_F2_S_ENA_SHIFT;
@@ -288,12 +282,13 @@ void intel_device_info_runtime_init(struct drm_i915_private *dev_priv)
info->num_sprites[PIPE_A] = 2;
info->num_sprites[PIPE_B] = 2;
info->num_sprites[PIPE_C] = 1;
- } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
+ } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
for_each_pipe(dev_priv, pipe)
info->num_sprites[pipe] = 2;
- else
+ } else if (INTEL_GEN(dev_priv) >= 5) {
for_each_pipe(dev_priv, pipe)
info->num_sprites[pipe] = 1;
+ }
if (i915.disable_display) {
DRM_INFO("Display disabled (module parameter)\n");
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index fbcfed63a76e..962aae631f13 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -37,7 +37,6 @@
#include "intel_frontbuffer.h"
#include <drm/i915_drm.h>
#include "i915_drv.h"
-#include "i915_gem_dmabuf.h"
#include "intel_dsi.h"
#include "i915_trace.h"
#include <drm/drm_atomic.h>
@@ -116,8 +115,9 @@ static void chv_prepare_pll(struct intel_crtc *crtc,
const struct intel_crtc_state *pipe_config);
static void intel_begin_crtc_commit(struct drm_crtc *, struct drm_crtc_state *);
static void intel_finish_crtc_commit(struct drm_crtc *, struct drm_crtc_state *);
-static void skl_init_scalers(struct drm_device *dev, struct intel_crtc *intel_crtc,
- struct intel_crtc_state *crtc_state);
+static void skl_init_scalers(struct drm_i915_private *dev_priv,
+ struct intel_crtc *crtc,
+ struct intel_crtc_state *crtc_state);
static void skylake_pfit_enable(struct intel_crtc *crtc);
static void ironlake_pfit_disable(struct intel_crtc *crtc, bool force);
static void ironlake_pfit_enable(struct intel_crtc *crtc);
@@ -600,7 +600,7 @@ int chv_calc_dpll_params(int refclk, struct dpll *clock)
* the given connectors.
*/
-static bool intel_PLL_is_valid(struct drm_device *dev,
+static bool intel_PLL_is_valid(struct drm_i915_private *dev_priv,
const struct intel_limit *limit,
const struct dpll *clock)
{
@@ -613,12 +613,13 @@ static bool intel_PLL_is_valid(struct drm_device *dev,
if (clock->m1 < limit->m1.min || limit->m1.max < clock->m1)
INTELPllInvalid("m1 out of range\n");
- if (!IS_PINEVIEW(dev) && !IS_VALLEYVIEW(dev) &&
- !IS_CHERRYVIEW(dev) && !IS_BROXTON(dev))
+ if (!IS_PINEVIEW(dev_priv) && !IS_VALLEYVIEW(dev_priv) &&
+ !IS_CHERRYVIEW(dev_priv) && !IS_BROXTON(dev_priv))
if (clock->m1 <= clock->m2)
INTELPllInvalid("m1 <= m2\n");
- if (!IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev) && !IS_BROXTON(dev)) {
+ if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv) &&
+ !IS_BROXTON(dev_priv)) {
if (clock->p < limit->p.min || limit->p.max < clock->p)
INTELPllInvalid("p out of range\n");
if (clock->m < limit->m.min || limit->m.max < clock->m)
@@ -698,7 +699,8 @@ i9xx_find_best_dpll(const struct intel_limit *limit,
int this_err;
i9xx_calc_dpll_params(refclk, &clock);
- if (!intel_PLL_is_valid(dev, limit,
+ if (!intel_PLL_is_valid(to_i915(dev),
+ limit,
&clock))
continue;
if (match_clock &&
@@ -753,7 +755,8 @@ pnv_find_best_dpll(const struct intel_limit *limit,
int this_err;
pnv_calc_dpll_params(refclk, &clock);
- if (!intel_PLL_is_valid(dev, limit,
+ if (!intel_PLL_is_valid(to_i915(dev),
+ limit,
&clock))
continue;
if (match_clock &&
@@ -813,7 +816,8 @@ g4x_find_best_dpll(const struct intel_limit *limit,
int this_err;
i9xx_calc_dpll_params(refclk, &clock);
- if (!intel_PLL_is_valid(dev, limit,
+ if (!intel_PLL_is_valid(to_i915(dev),
+ limit,
&clock))
continue;
@@ -845,7 +849,7 @@ static bool vlv_PLL_is_optimal(struct drm_device *dev, int target_freq,
* For CHV ignore the error and consider only the P value.
* Prefer a bigger P value based on HW requirements.
*/
- if (IS_CHERRYVIEW(dev)) {
+ if (IS_CHERRYVIEW(to_i915(dev))) {
*error_ppm = 0;
return calculated_clock->p > best_clock->p;
@@ -909,7 +913,8 @@ vlv_find_best_dpll(const struct intel_limit *limit,
vlv_calc_dpll_params(refclk, &clock);
- if (!intel_PLL_is_valid(dev, limit,
+ if (!intel_PLL_is_valid(to_i915(dev),
+ limit,
&clock))
continue;
@@ -977,7 +982,7 @@ chv_find_best_dpll(const struct intel_limit *limit,
chv_calc_dpll_params(refclk, &clock);
- if (!intel_PLL_is_valid(dev, limit, &clock))
+ if (!intel_PLL_is_valid(to_i915(dev), limit, &clock))
continue;
if (!vlv_PLL_is_optimal(dev, target, &clock, best_clock,
@@ -1003,10 +1008,8 @@ bool bxt_find_best_dpll(struct intel_crtc_state *crtc_state, int target_clock,
target_clock, refclk, NULL, best_clock);
}
-bool intel_crtc_active(struct drm_crtc *crtc)
+bool intel_crtc_active(struct intel_crtc *crtc)
{
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-
/* Be paranoid as we can arrive here with only partial
* state retrieved from the hardware during setup.
*
@@ -1020,27 +1023,25 @@ bool intel_crtc_active(struct drm_crtc *crtc)
* crtc->state->active once we have proper CRTC states wired up
* for atomic.
*/
- return intel_crtc->active && crtc->primary->state->fb &&
- intel_crtc->config->base.adjusted_mode.crtc_clock;
+ return crtc->active && crtc->base.primary->state->fb &&
+ crtc->config->base.adjusted_mode.crtc_clock;
}
enum transcoder intel_pipe_to_cpu_transcoder(struct drm_i915_private *dev_priv,
enum pipe pipe)
{
- struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
- return intel_crtc->config->cpu_transcoder;
+ return crtc->config->cpu_transcoder;
}
-static bool pipe_dsl_stopped(struct drm_device *dev, enum pipe pipe)
+static bool pipe_dsl_stopped(struct drm_i915_private *dev_priv, enum pipe pipe)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
i915_reg_t reg = PIPEDSL(pipe);
u32 line1, line2;
u32 line_mask;
- if (IS_GEN2(dev))
+ if (IS_GEN2(dev_priv))
line_mask = DSL_LINEMASK_GEN2;
else
line_mask = DSL_LINEMASK_GEN3;
@@ -1070,12 +1071,11 @@ static bool pipe_dsl_stopped(struct drm_device *dev, enum pipe pipe)
*/
static void intel_wait_for_pipe_off(struct intel_crtc *crtc)
{
- struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum transcoder cpu_transcoder = crtc->config->cpu_transcoder;
enum pipe pipe = crtc->pipe;
- if (INTEL_INFO(dev)->gen >= 4) {
+ if (INTEL_GEN(dev_priv) >= 4) {
i915_reg_t reg = PIPECONF(cpu_transcoder);
/* Wait for the Pipe State to go off */
@@ -1085,7 +1085,7 @@ static void intel_wait_for_pipe_off(struct intel_crtc *crtc)
WARN(1, "pipe_off wait timed out\n");
} else {
/* Wait for the display line to settle */
- if (wait_for(pipe_dsl_stopped(dev, pipe), 100))
+ if (wait_for(pipe_dsl_stopped(dev_priv, pipe), 100))
WARN(1, "pipe_off wait timed out\n");
}
}
@@ -1187,19 +1187,17 @@ void assert_fdi_rx_pll(struct drm_i915_private *dev_priv,
onoff(state), onoff(cur_state));
}
-void assert_panel_unlocked(struct drm_i915_private *dev_priv,
- enum pipe pipe)
+void assert_panel_unlocked(struct drm_i915_private *dev_priv, enum pipe pipe)
{
- struct drm_device *dev = &dev_priv->drm;
i915_reg_t pp_reg;
u32 val;
enum pipe panel_pipe = PIPE_A;
bool locked = true;
- if (WARN_ON(HAS_DDI(dev)))
+ if (WARN_ON(HAS_DDI(dev_priv)))
return;
- if (HAS_PCH_SPLIT(dev)) {
+ if (HAS_PCH_SPLIT(dev_priv)) {
u32 port_sel;
pp_reg = PP_CONTROL(0);
@@ -1209,7 +1207,7 @@ void assert_panel_unlocked(struct drm_i915_private *dev_priv,
I915_READ(PCH_LVDS) & LVDS_PIPEB_SELECT)
panel_pipe = PIPE_B;
/* XXX: else fix for eDP */
- } else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
+ } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
/* presumably write lock depends on pipe, not port select */
pp_reg = PP_CONTROL(pipe);
panel_pipe = pipe;
@@ -1232,10 +1230,9 @@ void assert_panel_unlocked(struct drm_i915_private *dev_priv,
static void assert_cursor(struct drm_i915_private *dev_priv,
enum pipe pipe, bool state)
{
- struct drm_device *dev = &dev_priv->drm;
bool cur_state;
- if (IS_845G(dev) || IS_I865G(dev))
+ if (IS_845G(dev_priv) || IS_I865G(dev_priv))
cur_state = I915_READ(CURCNTR(PIPE_A)) & CURSOR_ENABLE;
else
cur_state = I915_READ(CURCNTR(pipe)) & CURSOR_MODE;
@@ -1294,11 +1291,10 @@ static void assert_plane(struct drm_i915_private *dev_priv,
static void assert_planes_disabled(struct drm_i915_private *dev_priv,
enum pipe pipe)
{
- struct drm_device *dev = &dev_priv->drm;
int i;
/* Primary planes are fixed to pipes on gen4+ */
- if (INTEL_INFO(dev)->gen >= 4) {
+ if (INTEL_GEN(dev_priv) >= 4) {
u32 val = I915_READ(DSPCNTR(pipe));
I915_STATE_WARN(val & DISPLAY_PLANE_ENABLE,
"plane %c assertion failure, should be disabled but not\n",
@@ -1320,29 +1316,28 @@ static void assert_planes_disabled(struct drm_i915_private *dev_priv,
static void assert_sprites_disabled(struct drm_i915_private *dev_priv,
enum pipe pipe)
{
- struct drm_device *dev = &dev_priv->drm;
int sprite;
- if (INTEL_INFO(dev)->gen >= 9) {
+ if (INTEL_GEN(dev_priv) >= 9) {
for_each_sprite(dev_priv, pipe, sprite) {
u32 val = I915_READ(PLANE_CTL(pipe, sprite));
I915_STATE_WARN(val & PLANE_CTL_ENABLE,
"plane %d assertion failure, should be off on pipe %c but is still active\n",
sprite, pipe_name(pipe));
}
- } else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
+ } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
for_each_sprite(dev_priv, pipe, sprite) {
u32 val = I915_READ(SPCNTR(pipe, sprite));
I915_STATE_WARN(val & SP_ENABLE,
"sprite %c assertion failure, should be off on pipe %c but is still active\n",
sprite_name(pipe, sprite), pipe_name(pipe));
}
- } else if (INTEL_INFO(dev)->gen >= 7) {
+ } else if (INTEL_GEN(dev_priv) >= 7) {
u32 val = I915_READ(SPRCTL(pipe));
I915_STATE_WARN(val & SPRITE_ENABLE,
"sprite %c assertion failure, should be off on pipe %c but is still active\n",
plane_name(pipe), pipe_name(pipe));
- } else if (INTEL_INFO(dev)->gen >= 5) {
+ } else if (INTEL_GEN(dev_priv) >= 5) {
u32 val = I915_READ(DVSCNTR(pipe));
I915_STATE_WARN(val & DVS_ENABLE,
"sprite %c assertion failure, should be off on pipe %c but is still active\n",
@@ -1596,12 +1591,12 @@ static void chv_enable_pll(struct intel_crtc *crtc,
}
}
-static int intel_num_dvo_pipes(struct drm_device *dev)
+static int intel_num_dvo_pipes(struct drm_i915_private *dev_priv)
{
struct intel_crtc *crtc;
int count = 0;
- for_each_intel_crtc(dev, crtc) {
+ for_each_intel_crtc(&dev_priv->drm, crtc) {
count += crtc->base.state->active &&
intel_crtc_has_type(crtc->config, INTEL_OUTPUT_DVO);
}
@@ -1611,19 +1606,18 @@ static int intel_num_dvo_pipes(struct drm_device *dev)
static void i9xx_enable_pll(struct intel_crtc *crtc)
{
- struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
i915_reg_t reg = DPLL(crtc->pipe);
u32 dpll = crtc->config->dpll_hw_state.dpll;
assert_pipe_disabled(dev_priv, crtc->pipe);
/* PLL is protected by panel, make sure we can write it */
- if (IS_MOBILE(dev) && !IS_I830(dev))
+ if (IS_MOBILE(dev_priv) && !IS_I830(dev_priv))
assert_panel_unlocked(dev_priv, crtc->pipe);
/* Enable DVO 2x clock on both PLLs if necessary */
- if (IS_I830(dev) && intel_num_dvo_pipes(dev) > 0) {
+ if (IS_I830(dev_priv) && intel_num_dvo_pipes(dev_priv) > 0) {
/*
* It appears to be important that we don't enable this
* for the current pipe before otherwise configuring the
@@ -1648,7 +1642,7 @@ static void i9xx_enable_pll(struct intel_crtc *crtc)
POSTING_READ(reg);
udelay(150);
- if (INTEL_INFO(dev)->gen >= 4) {
+ if (INTEL_GEN(dev_priv) >= 4) {
I915_WRITE(DPLL_MD(crtc->pipe),
crtc->config->dpll_hw_state.dpll_md);
} else {
@@ -1683,14 +1677,13 @@ static void i9xx_enable_pll(struct intel_crtc *crtc)
*/
static void i9xx_disable_pll(struct intel_crtc *crtc)
{
- struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum pipe pipe = crtc->pipe;
/* Disable DVO 2x clock on both PLLs if necessary */
- if (IS_I830(dev) &&
+ if (IS_I830(dev_priv) &&
intel_crtc_has_type(crtc->config, INTEL_OUTPUT_DVO) &&
- !intel_num_dvo_pipes(dev)) {
+ !intel_num_dvo_pipes(dev_priv)) {
I915_WRITE(DPLL(PIPE_B),
I915_READ(DPLL(PIPE_B)) & ~DPLL_DVO_2X_MODE);
I915_WRITE(DPLL(PIPE_A),
@@ -1786,9 +1779,8 @@ void vlv_wait_port_ready(struct drm_i915_private *dev_priv,
static void ironlake_enable_pch_transcoder(struct drm_i915_private *dev_priv,
enum pipe pipe)
{
- struct drm_device *dev = &dev_priv->drm;
- struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ struct intel_crtc *intel_crtc = intel_get_crtc_for_pipe(dev_priv,
+ pipe);
i915_reg_t reg;
uint32_t val, pipeconf_val;
@@ -1799,7 +1791,7 @@ static void ironlake_enable_pch_transcoder(struct drm_i915_private *dev_priv,
assert_fdi_tx_enabled(dev_priv, pipe);
assert_fdi_rx_enabled(dev_priv, pipe);
- if (HAS_PCH_CPT(dev)) {
+ if (HAS_PCH_CPT(dev_priv)) {
/* Workaround: Set the timing override bit before enabling the
* pch transcoder. */
reg = TRANS_CHICKEN2(pipe);
@@ -1877,7 +1869,6 @@ static void lpt_enable_pch_transcoder(struct drm_i915_private *dev_priv,
static void ironlake_disable_pch_transcoder(struct drm_i915_private *dev_priv,
enum pipe pipe)
{
- struct drm_device *dev = &dev_priv->drm;
i915_reg_t reg;
uint32_t val;
@@ -1898,7 +1889,7 @@ static void ironlake_disable_pch_transcoder(struct drm_i915_private *dev_priv,
50))
DRM_ERROR("failed to disable transcoder %c\n", pipe_name(pipe));
- if (HAS_PCH_CPT(dev)) {
+ if (HAS_PCH_CPT(dev_priv)) {
/* Workaround: Clear the timing override chicken bit again. */
reg = TRANS_CHICKEN2(pipe);
val = I915_READ(reg);
@@ -1926,6 +1917,18 @@ void lpt_disable_pch_transcoder(struct drm_i915_private *dev_priv)
I915_WRITE(TRANS_CHICKEN2(PIPE_A), val);
}
+enum transcoder intel_crtc_pch_transcoder(struct intel_crtc *crtc)
+{
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+
+ WARN_ON(!crtc->config->has_pch_encoder);
+
+ if (HAS_PCH_LPT(dev_priv))
+ return TRANSCODER_A;
+ else
+ return (enum transcoder) crtc->pipe;
+}
+
/**
* intel_enable_pipe - enable a pipe, asserting requirements
* @crtc: crtc responsible for the pipe
@@ -1939,7 +1942,6 @@ static void intel_enable_pipe(struct intel_crtc *crtc)
struct drm_i915_private *dev_priv = to_i915(dev);
enum pipe pipe = crtc->pipe;
enum transcoder cpu_transcoder = crtc->config->cpu_transcoder;
- enum pipe pch_transcoder;
i915_reg_t reg;
u32 val;
@@ -1949,11 +1951,6 @@ static void intel_enable_pipe(struct intel_crtc *crtc)
assert_cursor_disabled(dev_priv, pipe);
assert_sprites_disabled(dev_priv, pipe);
- if (HAS_PCH_LPT(dev_priv))
- pch_transcoder = TRANSCODER_A;
- else
- pch_transcoder = pipe;
-
/*
* A pipe without a PLL won't actually be able to drive bits from
* a plane. On ILK+ the pipe PLLs are integrated, so we don't
@@ -1967,7 +1964,8 @@ static void intel_enable_pipe(struct intel_crtc *crtc)
} else {
if (crtc->config->has_pch_encoder) {
/* if driving the PCH, we need FDI enabled */
- assert_fdi_rx_pll_enabled(dev_priv, pch_transcoder);
+ assert_fdi_rx_pll_enabled(dev_priv,
+ (enum pipe) intel_crtc_pch_transcoder(crtc));
assert_fdi_tx_pll_enabled(dev_priv,
(enum pipe) cpu_transcoder);
}
@@ -2139,7 +2137,7 @@ intel_fill_fb_ggtt_view(struct i915_ggtt_view *view,
const struct drm_framebuffer *fb,
unsigned int rotation)
{
- if (intel_rotation_90_or_270(rotation)) {
+ if (drm_rotation_90_or_270(rotation)) {
*view = i915_ggtt_view_rotated;
view->params.rotated = to_intel_framebuffer(fb)->rot_info;
} else {
@@ -2191,7 +2189,7 @@ intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb, unsigned int rotation)
WARN_ON(!mutex_is_locked(&dev->struct_mutex));
- alignment = intel_surf_alignment(dev_priv, fb->modifier[0]);
+ alignment = intel_surf_alignment(dev_priv, fb->modifier);
intel_fill_fb_ggtt_view(&view, fb, rotation);
@@ -2260,7 +2258,7 @@ void intel_unpin_fb_obj(struct drm_framebuffer *fb, unsigned int rotation)
static int intel_fb_pitch(const struct drm_framebuffer *fb, int plane,
unsigned int rotation)
{
- if (intel_rotation_90_or_270(rotation))
+ if (drm_rotation_90_or_270(rotation))
return to_intel_framebuffer(fb)->rotated[plane].pitch;
else
return fb->pitches[plane];
@@ -2296,7 +2294,7 @@ void intel_add_fb_offsets(int *x, int *y,
const struct intel_framebuffer *intel_fb = to_intel_framebuffer(state->base.fb);
unsigned int rotation = state->base.rotation;
- if (intel_rotation_90_or_270(rotation)) {
+ if (drm_rotation_90_or_270(rotation)) {
*x += intel_fb->rotated[plane].x;
*y += intel_fb->rotated[plane].y;
} else {
@@ -2352,15 +2350,15 @@ static u32 intel_adjust_tile_offset(int *x, int *y,
WARN_ON(new_offset > old_offset);
- if (fb->modifier[plane] != DRM_FORMAT_MOD_NONE) {
+ if (fb->modifier != DRM_FORMAT_MOD_NONE) {
unsigned int tile_size, tile_width, tile_height;
unsigned int pitch_tiles;
tile_size = intel_tile_size(dev_priv);
intel_tile_dims(dev_priv, &tile_width, &tile_height,
- fb->modifier[plane], cpp);
+ fb->modifier, cpp);
- if (intel_rotation_90_or_270(rotation)) {
+ if (drm_rotation_90_or_270(rotation)) {
pitch_tiles = pitch / tile_height;
swap(tile_width, tile_height);
} else {
@@ -2401,7 +2399,7 @@ static u32 _intel_compute_tile_offset(const struct drm_i915_private *dev_priv,
unsigned int rotation,
u32 alignment)
{
- uint64_t fb_modifier = fb->modifier[plane];
+ uint64_t fb_modifier = fb->modifier;
unsigned int cpp = drm_format_plane_cpp(fb->pixel_format, plane);
u32 offset, offset_aligned;
@@ -2416,7 +2414,7 @@ static u32 _intel_compute_tile_offset(const struct drm_i915_private *dev_priv,
intel_tile_dims(dev_priv, &tile_width, &tile_height,
fb_modifier, cpp);
- if (intel_rotation_90_or_270(rotation)) {
+ if (drm_rotation_90_or_270(rotation)) {
pitch_tiles = pitch / tile_height;
swap(tile_width, tile_height);
} else {
@@ -2460,7 +2458,7 @@ u32 intel_compute_tile_offset(int *x, int *y,
if (fb->pixel_format == DRM_FORMAT_NV12 && plane == 1)
alignment = 4096;
else
- alignment = intel_surf_alignment(dev_priv, fb->modifier[plane]);
+ alignment = intel_surf_alignment(dev_priv, fb->modifier);
return _intel_compute_tile_offset(dev_priv, x, y, fb, plane, pitch,
rotation, alignment);
@@ -2542,13 +2540,13 @@ intel_fill_fb_info(struct drm_i915_private *dev_priv,
DRM_ROTATE_0, tile_size);
offset /= tile_size;
- if (fb->modifier[i] != DRM_FORMAT_MOD_NONE) {
+ if (fb->modifier != DRM_FORMAT_MOD_NONE) {
unsigned int tile_width, tile_height;
unsigned int pitch_tiles;
struct drm_rect r;
intel_tile_dims(dev_priv, &tile_width, &tile_height,
- fb->modifier[i], cpp);
+ fb->modifier, cpp);
rot_info->plane[i].offset = offset;
rot_info->plane[i].stride = DIV_ROUND_UP(fb->pitches[i], tile_width * cpp);
@@ -2707,7 +2705,7 @@ intel_alloc_initial_plane_obj(struct intel_crtc *crtc,
mode_cmd.width = fb->width;
mode_cmd.height = fb->height;
mode_cmd.pitches[0] = fb->pitches[0];
- mode_cmd.modifier[0] = fb->modifier[0];
+ mode_cmd.modifier[0] = fb->modifier;
mode_cmd.flags = DRM_MODE_FB_MODIFIERS;
if (intel_framebuffer_init(dev, to_intel_framebuffer(fb),
@@ -2817,14 +2815,8 @@ valid_fb:
plane_state->crtc_w = fb->width;
plane_state->crtc_h = fb->height;
- intel_state->base.src.x1 = plane_state->src_x;
- intel_state->base.src.y1 = plane_state->src_y;
- intel_state->base.src.x2 = plane_state->src_x + plane_state->src_w;
- intel_state->base.src.y2 = plane_state->src_y + plane_state->src_h;
- intel_state->base.dst.x1 = plane_state->crtc_x;
- intel_state->base.dst.y1 = plane_state->crtc_y;
- intel_state->base.dst.x2 = plane_state->crtc_x + plane_state->crtc_w;
- intel_state->base.dst.y2 = plane_state->crtc_y + plane_state->crtc_h;
+ intel_state->base.src = drm_plane_state_src(plane_state);
+ intel_state->base.dst = drm_plane_state_dest(plane_state);
obj = intel_fb_obj(fb);
if (i915_gem_object_is_tiled(obj))
@@ -2843,7 +2835,7 @@ static int skl_max_plane_width(const struct drm_framebuffer *fb, int plane,
{
int cpp = drm_format_plane_cpp(fb->pixel_format, plane);
- switch (fb->modifier[plane]) {
+ switch (fb->modifier) {
case DRM_FORMAT_MOD_NONE:
case I915_FORMAT_MOD_X_TILED:
switch (cpp) {
@@ -2874,7 +2866,7 @@ static int skl_max_plane_width(const struct drm_framebuffer *fb, int plane,
}
break;
default:
- MISSING_CASE(fb->modifier[plane]);
+ MISSING_CASE(fb->modifier);
}
return 2048;
@@ -2902,7 +2894,7 @@ static int skl_check_main_surface(struct intel_plane_state *plane_state)
intel_add_fb_offsets(&x, &y, plane_state, 0);
offset = intel_compute_tile_offset(&x, &y, plane_state, 0);
- alignment = intel_surf_alignment(dev_priv, fb->modifier[0]);
+ alignment = intel_surf_alignment(dev_priv, fb->modifier);
/*
* AUX surface offset is specified as the distance from the
@@ -2919,7 +2911,7 @@ static int skl_check_main_surface(struct intel_plane_state *plane_state)
*
* TODO: linear and Y-tiled seem fine, Yf untested,
*/
- if (fb->modifier[0] == I915_FORMAT_MOD_X_TILED) {
+ if (fb->modifier == I915_FORMAT_MOD_X_TILED) {
int cpp = drm_format_plane_cpp(fb->pixel_format, 0);
while ((x + w) * cpp > fb->pitches[0]) {
@@ -2976,9 +2968,10 @@ int skl_check_plane_surface(struct intel_plane_state *plane_state)
int ret;
/* Rotate src coordinates to match rotated GTT view */
- if (intel_rotation_90_or_270(rotation))
+ if (drm_rotation_90_or_270(rotation))
drm_rect_rotate(&plane_state->base.src,
- fb->width, fb->height, DRM_ROTATE_270);
+ fb->width << 16, fb->height << 16,
+ DRM_ROTATE_270);
/*
* Handle the AUX surface first since
@@ -3005,11 +2998,9 @@ static void i9xx_update_primary_plane(struct drm_plane *primary,
const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state)
{
- struct drm_device *dev = primary->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(primary->dev);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
struct drm_framebuffer *fb = plane_state->base.fb;
- struct drm_i915_gem_object *obj = intel_fb_obj(fb);
int plane = intel_crtc->plane;
u32 linear_offset;
u32 dspcntr;
@@ -3022,7 +3013,7 @@ static void i9xx_update_primary_plane(struct drm_plane *primary,
dspcntr |= DISPLAY_PLANE_ENABLE;
- if (INTEL_INFO(dev)->gen < 4) {
+ if (INTEL_GEN(dev_priv) < 4) {
if (intel_crtc->pipe == PIPE_B)
dspcntr |= DISPPLANE_SEL_PIPE_B;
@@ -3033,7 +3024,7 @@ static void i9xx_update_primary_plane(struct drm_plane *primary,
((crtc_state->pipe_src_h - 1) << 16) |
(crtc_state->pipe_src_w - 1));
I915_WRITE(DSPPOS(plane), 0);
- } else if (IS_CHERRYVIEW(dev) && plane == PLANE_B) {
+ } else if (IS_CHERRYVIEW(dev_priv) && plane == PLANE_B) {
I915_WRITE(PRIMSIZE(plane),
((crtc_state->pipe_src_h - 1) << 16) |
(crtc_state->pipe_src_w - 1));
@@ -3068,28 +3059,34 @@ static void i9xx_update_primary_plane(struct drm_plane *primary,
}
if (INTEL_GEN(dev_priv) >= 4 &&
- fb->modifier[0] == I915_FORMAT_MOD_X_TILED)
+ fb->modifier == I915_FORMAT_MOD_X_TILED)
dspcntr |= DISPPLANE_TILED;
- if (IS_G4X(dev))
+ if (rotation & DRM_ROTATE_180)
+ dspcntr |= DISPPLANE_ROTATE_180;
+
+ if (rotation & DRM_REFLECT_X)
+ dspcntr |= DISPPLANE_MIRROR;
+
+ if (IS_G4X(dev_priv))
dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE;
intel_add_fb_offsets(&x, &y, plane_state, 0);
- if (INTEL_INFO(dev)->gen >= 4)
+ if (INTEL_GEN(dev_priv) >= 4)
intel_crtc->dspaddr_offset =
intel_compute_tile_offset(&x, &y, plane_state, 0);
- if (rotation == DRM_ROTATE_180) {
- dspcntr |= DISPPLANE_ROTATE_180;
-
- x += (crtc_state->pipe_src_w - 1);
- y += (crtc_state->pipe_src_h - 1);
+ if (rotation & DRM_ROTATE_180) {
+ x += crtc_state->pipe_src_w - 1;
+ y += crtc_state->pipe_src_h - 1;
+ } else if (rotation & DRM_REFLECT_X) {
+ x += crtc_state->pipe_src_w - 1;
}
linear_offset = intel_fb_xy_to_linear(x, y, plane_state, 0);
- if (INTEL_INFO(dev)->gen < 4)
+ if (INTEL_GEN(dev_priv) < 4)
intel_crtc->dspaddr_offset = linear_offset;
intel_crtc->adjusted_x = x;
@@ -3098,14 +3095,17 @@ static void i9xx_update_primary_plane(struct drm_plane *primary,
I915_WRITE(reg, dspcntr);
I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]);
- if (INTEL_INFO(dev)->gen >= 4) {
+ if (INTEL_GEN(dev_priv) >= 4) {
I915_WRITE(DSPSURF(plane),
intel_fb_gtt_offset(fb, rotation) +
intel_crtc->dspaddr_offset);
I915_WRITE(DSPTILEOFF(plane), (y << 16) | x);
I915_WRITE(DSPLINOFF(plane), linear_offset);
- } else
- I915_WRITE(DSPADDR(plane), i915_gem_object_ggtt_offset(obj, NULL) + linear_offset);
+ } else {
+ I915_WRITE(DSPADDR(plane),
+ intel_fb_gtt_offset(fb, rotation) +
+ intel_crtc->dspaddr_offset);
+ }
POSTING_READ(reg);
}
@@ -3144,7 +3144,7 @@ static void ironlake_update_primary_plane(struct drm_plane *primary,
dspcntr = DISPPLANE_GAMMA_ENABLE;
dspcntr |= DISPLAY_PLANE_ENABLE;
- if (IS_HASWELL(dev) || IS_BROADWELL(dev))
+ if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
dspcntr |= DISPPLANE_PIPE_CSC_ENABLE;
switch (fb->pixel_format) {
@@ -3170,10 +3170,13 @@ static void ironlake_update_primary_plane(struct drm_plane *primary,
BUG();
}
- if (fb->modifier[0] == I915_FORMAT_MOD_X_TILED)
+ if (fb->modifier == I915_FORMAT_MOD_X_TILED)
dspcntr |= DISPPLANE_TILED;
- if (!IS_HASWELL(dev) && !IS_BROADWELL(dev))
+ if (rotation & DRM_ROTATE_180)
+ dspcntr |= DISPPLANE_ROTATE_180;
+
+ if (!IS_HASWELL(dev_priv) && !IS_BROADWELL(dev_priv))
dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE;
intel_add_fb_offsets(&x, &y, plane_state, 0);
@@ -3181,13 +3184,11 @@ static void ironlake_update_primary_plane(struct drm_plane *primary,
intel_crtc->dspaddr_offset =
intel_compute_tile_offset(&x, &y, plane_state, 0);
- if (rotation == DRM_ROTATE_180) {
- dspcntr |= DISPPLANE_ROTATE_180;
-
- if (!IS_HASWELL(dev) && !IS_BROADWELL(dev)) {
- x += (crtc_state->pipe_src_w - 1);
- y += (crtc_state->pipe_src_h - 1);
- }
+ /* HSW+ does this automagically in hardware */
+ if (!IS_HASWELL(dev_priv) && !IS_BROADWELL(dev_priv) &&
+ rotation & DRM_ROTATE_180) {
+ x += crtc_state->pipe_src_w - 1;
+ y += crtc_state->pipe_src_h - 1;
}
linear_offset = intel_fb_xy_to_linear(x, y, plane_state, 0);
@@ -3201,7 +3202,7 @@ static void ironlake_update_primary_plane(struct drm_plane *primary,
I915_WRITE(DSPSURF(plane),
intel_fb_gtt_offset(fb, rotation) +
intel_crtc->dspaddr_offset);
- if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
+ if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
I915_WRITE(DSPOFFSET(plane), (y << 16) | x);
} else {
I915_WRITE(DSPTILEOFF(plane), (y << 16) | x);
@@ -3276,12 +3277,12 @@ u32 skl_plane_stride(const struct drm_framebuffer *fb, int plane,
* The stride is either expressed as a multiple of 64 bytes chunks for
* linear buffers or in number of tiles for tiled buffers.
*/
- if (intel_rotation_90_or_270(rotation)) {
+ if (drm_rotation_90_or_270(rotation)) {
int cpp = drm_format_plane_cpp(fb->pixel_format, plane);
- stride /= intel_tile_height(dev_priv, fb->modifier[0], cpp);
+ stride /= intel_tile_height(dev_priv, fb->modifier, cpp);
} else {
- stride /= intel_fb_stride_alignment(dev_priv, fb->modifier[0],
+ stride /= intel_fb_stride_alignment(dev_priv, fb->modifier,
fb->pixel_format);
}
@@ -3377,7 +3378,6 @@ static void skylake_update_primary_plane(struct drm_plane *plane,
struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
struct drm_framebuffer *fb = plane_state->base.fb;
- const struct skl_wm_values *wm = &dev_priv->wm.skl_results;
int pipe = intel_crtc->pipe;
u32 plane_ctl;
unsigned int rotation = plane_state->base.rotation;
@@ -3398,7 +3398,7 @@ static void skylake_update_primary_plane(struct drm_plane *plane,
PLANE_CTL_PIPE_CSC_ENABLE;
plane_ctl |= skl_plane_ctl_format(fb->pixel_format);
- plane_ctl |= skl_plane_ctl_tiling(fb->modifier[0]);
+ plane_ctl |= skl_plane_ctl_tiling(fb->modifier);
plane_ctl |= PLANE_CTL_PLANE_GAMMA_DISABLE;
plane_ctl |= skl_plane_ctl_rotation(rotation);
@@ -3413,9 +3413,6 @@ static void skylake_update_primary_plane(struct drm_plane *plane,
intel_crtc->adjusted_x = src_x;
intel_crtc->adjusted_y = src_y;
- if (wm->dirty_pipes & drm_crtc_mask(&intel_crtc->base))
- skl_write_plane_wm(intel_crtc, wm, 0);
-
I915_WRITE(PLANE_CTL(pipe, 0), plane_ctl);
I915_WRITE(PLANE_OFFSET(pipe, 0), (src_y << 16) | src_x);
I915_WRITE(PLANE_STRIDE(pipe, 0), stride);
@@ -3450,13 +3447,6 @@ static void skylake_disable_primary_plane(struct drm_plane *primary,
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int pipe = intel_crtc->pipe;
- /*
- * We only populate skl_results on watermark updates, and if the
- * plane's visiblity isn't actually changing neither is its watermarks.
- */
- if (!crtc->primary->state->visible)
- skl_write_plane_wm(intel_crtc, &dev_priv->wm.skl_results, 0);
-
I915_WRITE(PLANE_CTL(pipe, 0), 0);
I915_WRITE(PLANE_SURF(pipe, 0), 0);
POSTING_READ(PLANE_SURF(pipe, 0));
@@ -3506,7 +3496,7 @@ __intel_display_resume(struct drm_device *dev,
int i, ret;
intel_modeset_setup_hw_state(dev);
- i915_redisable_vga(dev);
+ i915_redisable_vga(to_i915(dev));
if (!state)
return 0;
@@ -3584,7 +3574,7 @@ void intel_prepare_reset(struct drm_i915_private *dev_priv)
return;
err:
- drm_atomic_state_free(state);
+ drm_atomic_state_put(state);
}
void intel_finish_reset(struct drm_i915_private *dev_priv)
@@ -3603,8 +3593,6 @@ void intel_finish_reset(struct drm_i915_private *dev_priv)
dev_priv->modeset_restore_state = NULL;
- dev_priv->modeset_restore_state = NULL;
-
/* reset doesn't touch the display */
if (!gpu_reset_clobbers_display(dev_priv)) {
if (!state) {
@@ -3646,6 +3634,8 @@ void intel_finish_reset(struct drm_i915_private *dev_priv)
intel_hpd_init(dev_priv);
}
+ if (state)
+ drm_atomic_state_put(state);
drm_modeset_drop_locks(ctx);
drm_modeset_acquire_fini(ctx);
mutex_unlock(&dev->mode_config.mutex);
@@ -3683,8 +3673,7 @@ static bool intel_crtc_has_pending_flip(struct drm_crtc *crtc)
static void intel_update_pipe_config(struct intel_crtc *crtc,
struct intel_crtc_state *old_crtc_state)
{
- struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
struct intel_crtc_state *pipe_config =
to_intel_crtc_state(crtc->base.state);
@@ -3709,12 +3698,12 @@ static void intel_update_pipe_config(struct intel_crtc *crtc,
(pipe_config->pipe_src_h - 1));
/* on skylake this is done by detaching scalers */
- if (INTEL_INFO(dev)->gen >= 9) {
+ if (INTEL_GEN(dev_priv) >= 9) {
skl_detach_scalers(crtc);
if (pipe_config->pch_pfit.enabled)
skylake_pfit_enable(crtc);
- } else if (HAS_PCH_SPLIT(dev)) {
+ } else if (HAS_PCH_SPLIT(dev_priv)) {
if (pipe_config->pch_pfit.enabled)
ironlake_pfit_enable(crtc);
else if (old_crtc_state->pch_pfit.enabled)
@@ -3734,7 +3723,7 @@ static void intel_fdi_normal_train(struct drm_crtc *crtc)
/* enable normal train */
reg = FDI_TX_CTL(pipe);
temp = I915_READ(reg);
- if (IS_IVYBRIDGE(dev)) {
+ if (IS_IVYBRIDGE(dev_priv)) {
temp &= ~FDI_LINK_TRAIN_NONE_IVB;
temp |= FDI_LINK_TRAIN_NONE_IVB | FDI_TX_ENHANCE_FRAME_ENABLE;
} else {
@@ -3745,7 +3734,7 @@ static void intel_fdi_normal_train(struct drm_crtc *crtc)
reg = FDI_RX_CTL(pipe);
temp = I915_READ(reg);
- if (HAS_PCH_CPT(dev)) {
+ if (HAS_PCH_CPT(dev_priv)) {
temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
temp |= FDI_LINK_TRAIN_NORMAL_CPT;
} else {
@@ -3759,7 +3748,7 @@ static void intel_fdi_normal_train(struct drm_crtc *crtc)
udelay(1000);
/* IVB wants error correction enabled */
- if (IS_IVYBRIDGE(dev))
+ if (IS_IVYBRIDGE(dev_priv))
I915_WRITE(reg, I915_READ(reg) | FDI_FS_ERRC_ENABLE |
FDI_FE_ERRC_ENABLE);
}
@@ -3903,7 +3892,7 @@ static void gen6_fdi_link_train(struct drm_crtc *crtc)
reg = FDI_RX_CTL(pipe);
temp = I915_READ(reg);
- if (HAS_PCH_CPT(dev)) {
+ if (HAS_PCH_CPT(dev_priv)) {
temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
} else {
@@ -3947,7 +3936,7 @@ static void gen6_fdi_link_train(struct drm_crtc *crtc)
temp = I915_READ(reg);
temp &= ~FDI_LINK_TRAIN_NONE;
temp |= FDI_LINK_TRAIN_PATTERN_2;
- if (IS_GEN6(dev)) {
+ if (IS_GEN6(dev_priv)) {
temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
/* SNB-B */
temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
@@ -3956,7 +3945,7 @@ static void gen6_fdi_link_train(struct drm_crtc *crtc)
reg = FDI_RX_CTL(pipe);
temp = I915_READ(reg);
- if (HAS_PCH_CPT(dev)) {
+ if (HAS_PCH_CPT(dev_priv)) {
temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
temp |= FDI_LINK_TRAIN_PATTERN_2_CPT;
} else {
@@ -4210,7 +4199,7 @@ static void ironlake_fdi_disable(struct drm_crtc *crtc)
udelay(100);
/* Ironlake workaround, disable clock pointer after downing FDI */
- if (HAS_PCH_IBX(dev))
+ if (HAS_PCH_IBX(dev_priv))
I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR);
/* still set train pattern 1 */
@@ -4222,7 +4211,7 @@ static void ironlake_fdi_disable(struct drm_crtc *crtc)
reg = FDI_RX_CTL(pipe);
temp = I915_READ(reg);
- if (HAS_PCH_CPT(dev)) {
+ if (HAS_PCH_CPT(dev_priv)) {
temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
} else {
@@ -4240,6 +4229,7 @@ static void ironlake_fdi_disable(struct drm_crtc *crtc)
bool intel_has_pending_fb_unpin(struct drm_device *dev)
{
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_crtc *crtc;
/* Note that we don't need to be called with mode_config.lock here
@@ -4254,7 +4244,7 @@ bool intel_has_pending_fb_unpin(struct drm_device *dev)
continue;
if (crtc->flip_work)
- intel_wait_for_vblank(dev, crtc->pipe);
+ intel_wait_for_vblank(dev_priv, crtc->pipe);
return true;
}
@@ -4545,7 +4535,7 @@ static void ironlake_pch_enable(struct drm_crtc *crtc)
assert_pch_transcoder_disabled(dev_priv, pipe);
- if (IS_IVYBRIDGE(dev))
+ if (IS_IVYBRIDGE(dev_priv))
ivybridge_update_fdi_bc_bifurcation(intel_crtc);
/* Write the TU size bits before fdi link training, so that error
@@ -4558,7 +4548,7 @@ static void ironlake_pch_enable(struct drm_crtc *crtc)
/* We need to program the right clock selection before writing the pixel
* mutliplier into the DPLL. */
- if (HAS_PCH_CPT(dev)) {
+ if (HAS_PCH_CPT(dev_priv)) {
u32 sel;
temp = I915_READ(PCH_DPLL_SEL);
@@ -4588,7 +4578,8 @@ static void ironlake_pch_enable(struct drm_crtc *crtc)
intel_fdi_normal_train(crtc);
/* For PCH DP, enable TRANS_DP_CTL */
- if (HAS_PCH_CPT(dev) && intel_crtc_has_dp_encoder(intel_crtc->config)) {
+ if (HAS_PCH_CPT(dev_priv) &&
+ intel_crtc_has_dp_encoder(intel_crtc->config)) {
const struct drm_display_mode *adjusted_mode =
&intel_crtc->config->base.adjusted_mode;
u32 bpc = (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) >> 5;
@@ -4667,7 +4658,7 @@ skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach,
to_intel_crtc(crtc_state->base.crtc);
int need_scaling;
- need_scaling = intel_rotation_90_or_270(rotation) ?
+ need_scaling = drm_rotation_90_or_270(rotation) ?
(src_h != dst_w || src_w != dst_h):
(src_w != dst_w || src_h != dst_h);
@@ -4728,13 +4719,8 @@ skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach,
*/
int skl_update_scaler_crtc(struct intel_crtc_state *state)
{
- struct intel_crtc *intel_crtc = to_intel_crtc(state->base.crtc);
const struct drm_display_mode *adjusted_mode = &state->base.adjusted_mode;
- DRM_DEBUG_KMS("Updating scaler for [CRTC:%d:%s] scaler_user index %u.%u\n",
- intel_crtc->base.base.id, intel_crtc->base.name,
- intel_crtc->pipe, SKL_CRTC_INDEX);
-
return skl_update_scaler(state, !state->base.active, SKL_CRTC_INDEX,
&state->scaler_state.scaler_id, DRM_ROTATE_0,
state->pipe_src_w, state->pipe_src_h,
@@ -4755,7 +4741,6 @@ static int skl_update_scaler_plane(struct intel_crtc_state *crtc_state,
struct intel_plane_state *plane_state)
{
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
struct intel_plane *intel_plane =
to_intel_plane(plane_state->base.plane);
struct drm_framebuffer *fb = plane_state->base.fb;
@@ -4763,10 +4748,6 @@ static int skl_update_scaler_plane(struct intel_crtc_state *crtc_state,
bool force_detach = !fb || !plane_state->base.visible;
- DRM_DEBUG_KMS("Updating scaler for [PLANE:%d:%s] scaler_user index %u.%u\n",
- intel_plane->base.base.id, intel_plane->base.name,
- intel_crtc->pipe, drm_plane_index(&intel_plane->base));
-
ret = skl_update_scaler(crtc_state, force_detach,
drm_plane_index(&intel_plane->base),
&plane_state->scaler_id,
@@ -4858,7 +4839,7 @@ static void ironlake_pfit_enable(struct intel_crtc *crtc)
* as some pre-programmed values are broken,
* e.g. x201.
*/
- if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev))
+ if (IS_IVYBRIDGE(dev_priv) || IS_HASWELL(dev_priv))
I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3 |
PF_PIPE_SEL_IVB(pipe));
else
@@ -4883,7 +4864,7 @@ void hsw_enable_ips(struct intel_crtc *crtc)
*/
assert_plane_enabled(dev_priv, crtc->plane);
- if (IS_BROADWELL(dev)) {
+ if (IS_BROADWELL(dev_priv)) {
mutex_lock(&dev_priv->rps.hw_lock);
WARN_ON(sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL, 0xc0000000));
mutex_unlock(&dev_priv->rps.hw_lock);
@@ -4915,7 +4896,7 @@ void hsw_disable_ips(struct intel_crtc *crtc)
return;
assert_plane_enabled(dev_priv, crtc->plane);
- if (IS_BROADWELL(dev)) {
+ if (IS_BROADWELL(dev_priv)) {
mutex_lock(&dev_priv->rps.hw_lock);
WARN_ON(sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL, 0));
mutex_unlock(&dev_priv->rps.hw_lock);
@@ -4930,7 +4911,7 @@ void hsw_disable_ips(struct intel_crtc *crtc)
}
/* We need to wait for a vblank before we can disable the plane. */
- intel_wait_for_vblank(dev, crtc->pipe);
+ intel_wait_for_vblank(dev_priv, crtc->pipe);
}
static void intel_crtc_dpms_overlay_disable(struct intel_crtc *intel_crtc)
@@ -4984,7 +4965,7 @@ intel_post_enable_primary(struct drm_crtc *crtc)
* FIXME: Need to fix the logic to work when we turn off all planes
* but leave the pipe running.
*/
- if (IS_GEN2(dev))
+ if (IS_GEN2(dev_priv))
intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
/* Underruns don't always raise interrupts, so check manually. */
@@ -5007,7 +4988,7 @@ intel_pre_disable_primary(struct drm_crtc *crtc)
* FIXME: Need to fix the logic to work when we turn off all planes
* but leave the pipe running.
*/
- if (IS_GEN2(dev))
+ if (IS_GEN2(dev_priv))
intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
/*
@@ -5039,10 +5020,10 @@ intel_pre_disable_primary_noatomic(struct drm_crtc *crtc)
* event which is after the vblank start event, so we need to have a
* wait-for-vblank between disabling the plane and the pipe.
*/
- if (HAS_GMCH_DISPLAY(dev)) {
+ if (HAS_GMCH_DISPLAY(dev_priv)) {
intel_set_memory_cxsr(dev_priv, false);
dev_priv->wm.vlv.cxsr = false;
- intel_wait_for_vblank(dev, pipe);
+ intel_wait_for_vblank(dev_priv, pipe);
}
}
@@ -5061,7 +5042,7 @@ static void intel_post_plane_update(struct intel_crtc_state *old_crtc_state)
crtc->wm.cxsr_allowed = true;
if (pipe_config->update_wm_post && pipe_config->base.active)
- intel_update_watermarks(&crtc->base);
+ intel_update_watermarks(crtc);
if (old_pri_state) {
struct intel_plane_state *primary_state =
@@ -5090,6 +5071,8 @@ static void intel_pre_plane_update(struct intel_crtc_state *old_crtc_state)
struct drm_plane_state *old_pri_state =
drm_atomic_get_existing_plane_state(old_state, primary);
bool modeset = needs_modeset(&pipe_config->base);
+ struct intel_atomic_state *old_intel_state =
+ to_intel_atomic_state(old_state);
if (old_pri_state) {
struct intel_plane_state *primary_state =
@@ -5104,7 +5087,7 @@ static void intel_pre_plane_update(struct intel_crtc_state *old_crtc_state)
intel_pre_disable_primary(&crtc->base);
}
- if (pipe_config->disable_cxsr && HAS_GMCH_DISPLAY(dev)) {
+ if (pipe_config->disable_cxsr && HAS_GMCH_DISPLAY(dev_priv)) {
crtc->wm.cxsr_allowed = false;
/*
@@ -5119,7 +5102,7 @@ static void intel_pre_plane_update(struct intel_crtc_state *old_crtc_state)
if (old_crtc_state->base.active) {
intel_set_memory_cxsr(dev_priv, false);
dev_priv->wm.vlv.cxsr = false;
- intel_wait_for_vblank(dev, crtc->pipe);
+ intel_wait_for_vblank(dev_priv, crtc->pipe);
}
}
@@ -5132,7 +5115,7 @@ static void intel_pre_plane_update(struct intel_crtc_state *old_crtc_state)
*/
if (pipe_config->disable_lp_wm) {
ilk_disable_lp_wm(dev);
- intel_wait_for_vblank(dev, crtc->pipe);
+ intel_wait_for_vblank(dev_priv, crtc->pipe);
}
/*
@@ -5157,9 +5140,10 @@ static void intel_pre_plane_update(struct intel_crtc_state *old_crtc_state)
* us to.
*/
if (dev_priv->display.initial_watermarks != NULL)
- dev_priv->display.initial_watermarks(pipe_config);
+ dev_priv->display.initial_watermarks(old_intel_state,
+ pipe_config);
else if (pipe_config->update_wm_pre)
- intel_update_watermarks(&crtc->base);
+ intel_update_watermarks(crtc);
}
static void intel_crtc_disable_planes(struct drm_crtc *crtc, unsigned plane_mask)
@@ -5313,6 +5297,8 @@ static void ironlake_crtc_enable(struct intel_crtc_state *pipe_config,
struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int pipe = intel_crtc->pipe;
+ struct intel_atomic_state *old_intel_state =
+ to_intel_atomic_state(old_state);
if (WARN_ON(intel_crtc->active))
return;
@@ -5371,7 +5357,7 @@ static void ironlake_crtc_enable(struct intel_crtc_state *pipe_config,
intel_color_load_luts(&pipe_config->base);
if (dev_priv->display.initial_watermarks != NULL)
- dev_priv->display.initial_watermarks(intel_crtc->config);
+ dev_priv->display.initial_watermarks(old_intel_state, intel_crtc->config);
intel_enable_pipe(intel_crtc);
if (intel_crtc->config->has_pch_encoder)
@@ -5382,12 +5368,12 @@ static void ironlake_crtc_enable(struct intel_crtc_state *pipe_config,
intel_encoders_enable(crtc, pipe_config, old_state);
- if (HAS_PCH_CPT(dev))
+ if (HAS_PCH_CPT(dev_priv))
cpt_verify_modeset(dev, intel_crtc->pipe);
/* Must wait for vblank to avoid spurious PCH FIFO underruns */
if (intel_crtc->config->has_pch_encoder)
- intel_wait_for_vblank(dev, pipe);
+ intel_wait_for_vblank(dev_priv, pipe);
intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true);
}
@@ -5395,18 +5381,19 @@ static void ironlake_crtc_enable(struct intel_crtc_state *pipe_config,
/* IPS only exists on ULT machines and is tied to pipe A. */
static bool hsw_crtc_supports_ips(struct intel_crtc *crtc)
{
- return HAS_IPS(crtc->base.dev) && crtc->pipe == PIPE_A;
+ return HAS_IPS(to_i915(crtc->base.dev)) && crtc->pipe == PIPE_A;
}
static void haswell_crtc_enable(struct intel_crtc_state *pipe_config,
struct drm_atomic_state *old_state)
{
struct drm_crtc *crtc = pipe_config->base.crtc;
- struct drm_device *dev = crtc->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(crtc->dev);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int pipe = intel_crtc->pipe, hsw_workaround_pipe;
enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
+ struct intel_atomic_state *old_intel_state =
+ to_intel_atomic_state(old_state);
if (WARN_ON(intel_crtc->active))
return;
@@ -5461,7 +5448,7 @@ static void haswell_crtc_enable(struct intel_crtc_state *pipe_config,
if (!transcoder_is_dsi(cpu_transcoder))
intel_ddi_enable_pipe_clock(intel_crtc);
- if (INTEL_INFO(dev)->gen >= 9)
+ if (INTEL_GEN(dev_priv) >= 9)
skylake_pfit_enable(intel_crtc);
else
ironlake_pfit_enable(intel_crtc);
@@ -5477,9 +5464,10 @@ static void haswell_crtc_enable(struct intel_crtc_state *pipe_config,
intel_ddi_enable_transcoder_func(crtc);
if (dev_priv->display.initial_watermarks != NULL)
- dev_priv->display.initial_watermarks(pipe_config);
+ dev_priv->display.initial_watermarks(old_intel_state,
+ pipe_config);
else
- intel_update_watermarks(crtc);
+ intel_update_watermarks(intel_crtc);
/* XXX: Do the pipe assertions at the right place for BXT DSI. */
if (!transcoder_is_dsi(cpu_transcoder))
@@ -5488,7 +5476,7 @@ static void haswell_crtc_enable(struct intel_crtc_state *pipe_config,
if (intel_crtc->config->has_pch_encoder)
lpt_pch_enable(crtc);
- if (intel_crtc->config->dp_encoder_is_mst)
+ if (intel_crtc_has_type(intel_crtc->config, INTEL_OUTPUT_DP_MST))
intel_ddi_set_vc_payload_alloc(crtc, true);
assert_vblank_disabled(crtc);
@@ -5497,8 +5485,8 @@ static void haswell_crtc_enable(struct intel_crtc_state *pipe_config,
intel_encoders_enable(crtc, pipe_config, old_state);
if (intel_crtc->config->has_pch_encoder) {
- intel_wait_for_vblank(dev, pipe);
- intel_wait_for_vblank(dev, pipe);
+ intel_wait_for_vblank(dev_priv, pipe);
+ intel_wait_for_vblank(dev_priv, pipe);
intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
intel_set_pch_fifo_underrun_reporting(dev_priv, TRANSCODER_A,
true);
@@ -5507,9 +5495,9 @@ static void haswell_crtc_enable(struct intel_crtc_state *pipe_config,
/* If we change the relative order between pipe/planes enabling, we need
* to change the workaround. */
hsw_workaround_pipe = pipe_config->hsw_workaround_pipe;
- if (IS_HASWELL(dev) && hsw_workaround_pipe != INVALID_PIPE) {
- intel_wait_for_vblank(dev, hsw_workaround_pipe);
- intel_wait_for_vblank(dev, hsw_workaround_pipe);
+ if (IS_HASWELL(dev_priv) && hsw_workaround_pipe != INVALID_PIPE) {
+ intel_wait_for_vblank(dev_priv, hsw_workaround_pipe);
+ intel_wait_for_vblank(dev_priv, hsw_workaround_pipe);
}
}
@@ -5564,7 +5552,7 @@ static void ironlake_crtc_disable(struct intel_crtc_state *old_crtc_state,
if (intel_crtc->config->has_pch_encoder) {
ironlake_disable_pch_transcoder(dev_priv, pipe);
- if (HAS_PCH_CPT(dev)) {
+ if (HAS_PCH_CPT(dev_priv)) {
i915_reg_t reg;
u32 temp;
@@ -5593,8 +5581,7 @@ static void haswell_crtc_disable(struct intel_crtc_state *old_crtc_state,
struct drm_atomic_state *old_state)
{
struct drm_crtc *crtc = old_crtc_state->base.crtc;
- struct drm_device *dev = crtc->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(crtc->dev);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
@@ -5611,13 +5598,13 @@ static void haswell_crtc_disable(struct intel_crtc_state *old_crtc_state,
if (!transcoder_is_dsi(cpu_transcoder))
intel_disable_pipe(intel_crtc);
- if (intel_crtc->config->dp_encoder_is_mst)
+ if (intel_crtc_has_type(intel_crtc->config, INTEL_OUTPUT_DP_MST))
intel_ddi_set_vc_payload_alloc(crtc, false);
if (!transcoder_is_dsi(cpu_transcoder))
intel_ddi_disable_transcoder_func(dev_priv, cpu_transcoder);
- if (INTEL_INFO(dev)->gen >= 9)
+ if (INTEL_GEN(dev_priv) >= 9)
skylake_scaler_disable(intel_crtc);
else
ironlake_pfit_disable(intel_crtc, false);
@@ -5698,13 +5685,13 @@ static enum intel_display_power_domain port_to_aux_power_domain(enum port port)
enum intel_display_power_domain
intel_display_port_power_domain(struct intel_encoder *intel_encoder)
{
- struct drm_device *dev = intel_encoder->base.dev;
+ struct drm_i915_private *dev_priv = to_i915(intel_encoder->base.dev);
struct intel_digital_port *intel_dig_port;
switch (intel_encoder->type) {
case INTEL_OUTPUT_UNKNOWN:
/* Only DDI platforms should ever use this output type */
- WARN_ON_ONCE(!HAS_DDI(dev));
+ WARN_ON_ONCE(!HAS_DDI(dev_priv));
case INTEL_OUTPUT_DP:
case INTEL_OUTPUT_HDMI:
case INTEL_OUTPUT_EDP:
@@ -5725,7 +5712,7 @@ intel_display_port_power_domain(struct intel_encoder *intel_encoder)
enum intel_display_power_domain
intel_display_port_aux_power_domain(struct intel_encoder *intel_encoder)
{
- struct drm_device *dev = intel_encoder->base.dev;
+ struct drm_i915_private *dev_priv = to_i915(intel_encoder->base.dev);
struct intel_digital_port *intel_dig_port;
switch (intel_encoder->type) {
@@ -5738,7 +5725,7 @@ intel_display_port_aux_power_domain(struct intel_encoder *intel_encoder)
* what's the status of the given connectors, play safe and
* run the DP detection too.
*/
- WARN_ON_ONCE(!HAS_DDI(dev));
+ WARN_ON_ONCE(!HAS_DDI(dev_priv));
case INTEL_OUTPUT_DP:
case INTEL_OUTPUT_EDP:
intel_dig_port = enc_to_dig_port(&intel_encoder->base);
@@ -5830,11 +5817,9 @@ static int intel_compute_max_dotclk(struct drm_i915_private *dev_priv)
static int skl_calc_cdclk(int max_pixclk, int vco);
-static void intel_update_max_cdclk(struct drm_device *dev)
+static void intel_update_max_cdclk(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
-
- if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) {
+ if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
u32 limit = I915_READ(SKL_DFSM) & SKL_DFSM_CDCLK_LIMIT_MASK;
int max_cdclk, vco;
@@ -5856,9 +5841,9 @@ static void intel_update_max_cdclk(struct drm_device *dev)
max_cdclk = 308571;
dev_priv->max_cdclk_freq = skl_calc_cdclk(max_cdclk, vco);
- } else if (IS_BROXTON(dev)) {
+ } else if (IS_BROXTON(dev_priv)) {
dev_priv->max_cdclk_freq = 624000;
- } else if (IS_BROADWELL(dev)) {
+ } else if (IS_BROADWELL(dev_priv)) {
/*
* FIXME with extra cooling we can allow
* 540 MHz for ULX and 675 Mhz for ULT.
@@ -5867,15 +5852,15 @@ static void intel_update_max_cdclk(struct drm_device *dev)
*/
if (I915_READ(FUSE_STRAP) & HSW_CDCLK_LIMIT)
dev_priv->max_cdclk_freq = 450000;
- else if (IS_BDW_ULX(dev))
+ else if (IS_BDW_ULX(dev_priv))
dev_priv->max_cdclk_freq = 450000;
- else if (IS_BDW_ULT(dev))
+ else if (IS_BDW_ULT(dev_priv))
dev_priv->max_cdclk_freq = 540000;
else
dev_priv->max_cdclk_freq = 675000;
- } else if (IS_CHERRYVIEW(dev)) {
+ } else if (IS_CHERRYVIEW(dev_priv)) {
dev_priv->max_cdclk_freq = 320000;
- } else if (IS_VALLEYVIEW(dev)) {
+ } else if (IS_VALLEYVIEW(dev_priv)) {
dev_priv->max_cdclk_freq = 400000;
} else {
/* otherwise assume cdclk is fixed */
@@ -5891,11 +5876,9 @@ static void intel_update_max_cdclk(struct drm_device *dev)
dev_priv->max_dotclk_freq);
}
-static void intel_update_cdclk(struct drm_device *dev)
+static void intel_update_cdclk(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
-
- dev_priv->cdclk_freq = dev_priv->display.get_display_clock_speed(dev);
+ dev_priv->cdclk_freq = dev_priv->display.get_display_clock_speed(dev_priv);
if (INTEL_GEN(dev_priv) >= 9)
DRM_DEBUG_DRIVER("Current CD clock rate: %d kHz, VCO: %d kHz, ref: %d kHz\n",
@@ -6056,14 +6039,14 @@ static void bxt_set_cdclk(struct drm_i915_private *dev_priv, int cdclk)
return;
}
- intel_update_cdclk(&dev_priv->drm);
+ intel_update_cdclk(dev_priv);
}
static void bxt_sanitize_cdclk(struct drm_i915_private *dev_priv)
{
u32 cdctl, expected;
- intel_update_cdclk(&dev_priv->drm);
+ intel_update_cdclk(dev_priv);
if (dev_priv->cdclk_pll.vco == 0 ||
dev_priv->cdclk_freq == dev_priv->cdclk_pll.ref)
@@ -6196,7 +6179,7 @@ void skl_set_preferred_cdclk_vco(struct drm_i915_private *dev_priv, int vco)
dev_priv->skl_preferred_vco_freq = vco;
if (changed)
- intel_update_max_cdclk(&dev_priv->drm);
+ intel_update_max_cdclk(dev_priv);
}
static void
@@ -6282,7 +6265,6 @@ static bool skl_cdclk_wait_for_pcu_ready(struct drm_i915_private *dev_priv)
static void skl_set_cdclk(struct drm_i915_private *dev_priv, int cdclk, int vco)
{
- struct drm_device *dev = &dev_priv->drm;
u32 freq_select, pcu_ack;
WARN_ON((cdclk == 24000) != (vco == 0));
@@ -6333,7 +6315,7 @@ static void skl_set_cdclk(struct drm_i915_private *dev_priv, int cdclk, int vco)
sandybridge_pcode_write(dev_priv, SKL_PCODE_CDCLK_CONTROL, pcu_ack);
mutex_unlock(&dev_priv->rps.hw_lock);
- intel_update_cdclk(dev);
+ intel_update_cdclk(dev_priv);
}
static void skl_sanitize_cdclk(struct drm_i915_private *dev_priv);
@@ -6380,7 +6362,7 @@ static void skl_sanitize_cdclk(struct drm_i915_private *dev_priv)
if ((I915_READ(SWF_ILK(0x18)) & 0x00FFFFFF) == 0)
goto sanitize;
- intel_update_cdclk(&dev_priv->drm);
+ intel_update_cdclk(dev_priv);
/* Is PLL enabled and locked ? */
if (dev_priv->cdclk_pll.vco == 0 ||
dev_priv->cdclk_freq == dev_priv->cdclk_pll.ref)
@@ -6414,7 +6396,7 @@ static void valleyview_set_cdclk(struct drm_device *dev, int cdclk)
struct drm_i915_private *dev_priv = to_i915(dev);
u32 val, cmd;
- WARN_ON(dev_priv->display.get_display_clock_speed(dev)
+ WARN_ON(dev_priv->display.get_display_clock_speed(dev_priv)
!= dev_priv->cdclk_freq);
if (cdclk >= 320000) /* jump to highest voltage for 400MHz too */
@@ -6471,7 +6453,7 @@ static void valleyview_set_cdclk(struct drm_device *dev, int cdclk)
mutex_unlock(&dev_priv->sb_lock);
- intel_update_cdclk(dev);
+ intel_update_cdclk(dev_priv);
}
static void cherryview_set_cdclk(struct drm_device *dev, int cdclk)
@@ -6479,7 +6461,7 @@ static void cherryview_set_cdclk(struct drm_device *dev, int cdclk)
struct drm_i915_private *dev_priv = to_i915(dev);
u32 val, cmd;
- WARN_ON(dev_priv->display.get_display_clock_speed(dev)
+ WARN_ON(dev_priv->display.get_display_clock_speed(dev_priv)
!= dev_priv->cdclk_freq);
switch (cdclk) {
@@ -6512,7 +6494,7 @@ static void cherryview_set_cdclk(struct drm_device *dev, int cdclk)
}
mutex_unlock(&dev_priv->rps.hw_lock);
- intel_update_cdclk(dev);
+ intel_update_cdclk(dev_priv);
}
static int valleyview_calc_cdclk(struct drm_i915_private *dev_priv,
@@ -6675,7 +6657,7 @@ static void valleyview_modeset_commit_cdclk(struct drm_atomic_state *old_state)
*/
intel_display_power_get(dev_priv, POWER_DOMAIN_PIPE_A);
- if (IS_CHERRYVIEW(dev))
+ if (IS_CHERRYVIEW(dev_priv))
cherryview_set_cdclk(dev, req_cdclk);
else
valleyview_set_cdclk(dev, req_cdclk);
@@ -6703,7 +6685,7 @@ static void valleyview_crtc_enable(struct intel_crtc_state *pipe_config,
intel_set_pipe_timings(intel_crtc);
intel_set_pipe_src_size(intel_crtc);
- if (IS_CHERRYVIEW(dev) && pipe == PIPE_B) {
+ if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B) {
struct drm_i915_private *dev_priv = to_i915(dev);
I915_WRITE(CHV_BLEND(pipe), CHV_BLEND_LEGACY);
@@ -6718,7 +6700,7 @@ static void valleyview_crtc_enable(struct intel_crtc_state *pipe_config,
intel_encoders_pre_pll_enable(crtc, pipe_config, old_state);
- if (IS_CHERRYVIEW(dev)) {
+ if (IS_CHERRYVIEW(dev_priv)) {
chv_prepare_pll(intel_crtc, intel_crtc->config);
chv_enable_pll(intel_crtc, intel_crtc->config);
} else {
@@ -6732,7 +6714,7 @@ static void valleyview_crtc_enable(struct intel_crtc_state *pipe_config,
intel_color_load_luts(&pipe_config->base);
- intel_update_watermarks(crtc);
+ intel_update_watermarks(intel_crtc);
intel_enable_pipe(intel_crtc);
assert_vblank_disabled(crtc);
@@ -6774,7 +6756,7 @@ static void i9xx_crtc_enable(struct intel_crtc_state *pipe_config,
intel_crtc->active = true;
- if (!IS_GEN2(dev))
+ if (!IS_GEN2(dev_priv))
intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
intel_encoders_pre_enable(crtc, pipe_config, old_state);
@@ -6785,7 +6767,7 @@ static void i9xx_crtc_enable(struct intel_crtc_state *pipe_config,
intel_color_load_luts(&pipe_config->base);
- intel_update_watermarks(crtc);
+ intel_update_watermarks(intel_crtc);
intel_enable_pipe(intel_crtc);
assert_vblank_disabled(crtc);
@@ -6822,8 +6804,8 @@ static void i9xx_crtc_disable(struct intel_crtc_state *old_crtc_state,
* On gen2 planes are double buffered but the pipe isn't, so we must
* wait for planes to fully turn off before disabling the pipe.
*/
- if (IS_GEN2(dev))
- intel_wait_for_vblank(dev, pipe);
+ if (IS_GEN2(dev_priv))
+ intel_wait_for_vblank(dev_priv, pipe);
intel_encoders_disable(crtc, old_crtc_state, old_state);
@@ -6837,9 +6819,9 @@ static void i9xx_crtc_disable(struct intel_crtc_state *old_crtc_state,
intel_encoders_post_disable(crtc, old_crtc_state, old_state);
if (!intel_crtc_has_type(intel_crtc->config, INTEL_OUTPUT_DSI)) {
- if (IS_CHERRYVIEW(dev))
+ if (IS_CHERRYVIEW(dev_priv))
chv_disable_pll(dev_priv, pipe);
- else if (IS_VALLEYVIEW(dev))
+ else if (IS_VALLEYVIEW(dev_priv))
vlv_disable_pll(dev_priv, pipe);
else
i9xx_disable_pll(intel_crtc);
@@ -6847,7 +6829,7 @@ static void i9xx_crtc_disable(struct intel_crtc_state *old_crtc_state,
intel_encoders_post_pll_disable(crtc, old_crtc_state, old_state);
- if (!IS_GEN2(dev))
+ if (!IS_GEN2(dev_priv))
intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
}
@@ -6885,7 +6867,7 @@ static void intel_crtc_disable_noatomic(struct drm_crtc *crtc)
dev_priv->display.crtc_disable(crtc_state, state);
- drm_atomic_state_free(state);
+ drm_atomic_state_put(state);
DRM_DEBUG_KMS("[CRTC:%d:%s] hw state adjusted, was enabled, now disabled\n",
crtc->base.id, crtc->name);
@@ -6901,7 +6883,7 @@ static void intel_crtc_disable_noatomic(struct drm_crtc *crtc)
encoder->base.crtc = NULL;
intel_fbc_disable(intel_crtc);
- intel_update_watermarks(crtc);
+ intel_update_watermarks(intel_crtc);
intel_disable_shared_dpll(intel_crtc);
domains = intel_crtc->enabled_power_domains;
@@ -7027,6 +7009,7 @@ static int pipe_required_fdi_lanes(struct intel_crtc_state *crtc_state)
static int ironlake_check_fdi_lanes(struct drm_device *dev, enum pipe pipe,
struct intel_crtc_state *pipe_config)
{
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_atomic_state *state = pipe_config->base.state;
struct intel_crtc *other_crtc;
struct intel_crtc_state *other_crtc_state;
@@ -7039,7 +7022,7 @@ static int ironlake_check_fdi_lanes(struct drm_device *dev, enum pipe pipe,
return -EINVAL;
}
- if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
+ if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
if (pipe_config->fdi_lanes > 2) {
DRM_DEBUG_KMS("only 2 lanes on haswell, required: %i lanes\n",
pipe_config->fdi_lanes);
@@ -7049,7 +7032,7 @@ static int ironlake_check_fdi_lanes(struct drm_device *dev, enum pipe pipe,
}
}
- if (INTEL_INFO(dev)->num_pipes == 2)
+ if (INTEL_INFO(dev_priv)->num_pipes == 2)
return 0;
/* Ivybridge 3 pipe is really complicated */
@@ -7060,7 +7043,7 @@ static int ironlake_check_fdi_lanes(struct drm_device *dev, enum pipe pipe,
if (pipe_config->fdi_lanes <= 2)
return 0;
- other_crtc = to_intel_crtc(intel_get_crtc_for_pipe(dev, PIPE_C));
+ other_crtc = intel_get_crtc_for_pipe(dev_priv, PIPE_C);
other_crtc_state =
intel_atomic_get_crtc_state(state, other_crtc);
if (IS_ERR(other_crtc_state))
@@ -7079,7 +7062,7 @@ static int ironlake_check_fdi_lanes(struct drm_device *dev, enum pipe pipe,
return -EINVAL;
}
- other_crtc = to_intel_crtc(intel_get_crtc_for_pipe(dev, PIPE_B));
+ other_crtc = intel_get_crtc_for_pipe(dev_priv, PIPE_B);
other_crtc_state =
intel_atomic_get_crtc_state(state, other_crtc);
if (IS_ERR(other_crtc_state))
@@ -7190,7 +7173,7 @@ static int intel_crtc_compute_config(struct intel_crtc *crtc,
const struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
int clock_limit = dev_priv->max_dotclk_freq;
- if (INTEL_INFO(dev)->gen < 4) {
+ if (INTEL_GEN(dev_priv) < 4) {
clock_limit = dev_priv->max_cdclk_freq * 9 / 10;
/*
@@ -7224,11 +7207,11 @@ static int intel_crtc_compute_config(struct intel_crtc *crtc,
/* Cantiga+ cannot handle modes with a hsync front porch of 0.
* WaPruneModeWithIncorrectHsyncOffset:ctg,elk,ilk,snb,ivb,vlv,hsw.
*/
- if ((INTEL_INFO(dev)->gen > 4 || IS_G4X(dev)) &&
+ if ((INTEL_GEN(dev_priv) > 4 || IS_G4X(dev_priv)) &&
adjusted_mode->crtc_hsync_start == adjusted_mode->crtc_hdisplay)
return -EINVAL;
- if (HAS_IPS(dev))
+ if (HAS_IPS(dev_priv))
hsw_compute_ips_config(crtc, pipe_config);
if (pipe_config->has_pch_encoder)
@@ -7237,10 +7220,9 @@ static int intel_crtc_compute_config(struct intel_crtc *crtc,
return 0;
}
-static int skylake_get_display_clock_speed(struct drm_device *dev)
+static int skylake_get_display_clock_speed(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
- uint32_t cdctl;
+ u32 cdctl;
skl_dpll0_update(dev_priv);
@@ -7299,9 +7281,8 @@ static void bxt_de_pll_update(struct drm_i915_private *dev_priv)
dev_priv->cdclk_pll.ref;
}
-static int broxton_get_display_clock_speed(struct drm_device *dev)
+static int broxton_get_display_clock_speed(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
u32 divider;
int div, vco;
@@ -7334,9 +7315,8 @@ static int broxton_get_display_clock_speed(struct drm_device *dev)
return DIV_ROUND_CLOSEST(vco, div);
}
-static int broadwell_get_display_clock_speed(struct drm_device *dev)
+static int broadwell_get_display_clock_speed(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
uint32_t lcpll = I915_READ(LCPLL_CTL);
uint32_t freq = lcpll & LCPLL_CLK_FREQ_MASK;
@@ -7354,9 +7334,8 @@ static int broadwell_get_display_clock_speed(struct drm_device *dev)
return 675000;
}
-static int haswell_get_display_clock_speed(struct drm_device *dev)
+static int haswell_get_display_clock_speed(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
uint32_t lcpll = I915_READ(LCPLL_CTL);
uint32_t freq = lcpll & LCPLL_CLK_FREQ_MASK;
@@ -7366,41 +7345,41 @@ static int haswell_get_display_clock_speed(struct drm_device *dev)
return 450000;
else if (freq == LCPLL_CLK_FREQ_450)
return 450000;
- else if (IS_HSW_ULT(dev))
+ else if (IS_HSW_ULT(dev_priv))
return 337500;
else
return 540000;
}
-static int valleyview_get_display_clock_speed(struct drm_device *dev)
+static int valleyview_get_display_clock_speed(struct drm_i915_private *dev_priv)
{
- return vlv_get_cck_clock_hpll(to_i915(dev), "cdclk",
+ return vlv_get_cck_clock_hpll(dev_priv, "cdclk",
CCK_DISPLAY_CLOCK_CONTROL);
}
-static int ilk_get_display_clock_speed(struct drm_device *dev)
+static int ilk_get_display_clock_speed(struct drm_i915_private *dev_priv)
{
return 450000;
}
-static int i945_get_display_clock_speed(struct drm_device *dev)
+static int i945_get_display_clock_speed(struct drm_i915_private *dev_priv)
{
return 400000;
}
-static int i915_get_display_clock_speed(struct drm_device *dev)
+static int i915_get_display_clock_speed(struct drm_i915_private *dev_priv)
{
return 333333;
}
-static int i9xx_misc_get_display_clock_speed(struct drm_device *dev)
+static int i9xx_misc_get_display_clock_speed(struct drm_i915_private *dev_priv)
{
return 200000;
}
-static int pnv_get_display_clock_speed(struct drm_device *dev)
+static int pnv_get_display_clock_speed(struct drm_i915_private *dev_priv)
{
- struct pci_dev *pdev = dev->pdev;
+ struct pci_dev *pdev = dev_priv->drm.pdev;
u16 gcfgc = 0;
pci_read_config_word(pdev, GCFGC, &gcfgc);
@@ -7423,9 +7402,9 @@ static int pnv_get_display_clock_speed(struct drm_device *dev)
}
}
-static int i915gm_get_display_clock_speed(struct drm_device *dev)
+static int i915gm_get_display_clock_speed(struct drm_i915_private *dev_priv)
{
- struct pci_dev *pdev = dev->pdev;
+ struct pci_dev *pdev = dev_priv->drm.pdev;
u16 gcfgc = 0;
pci_read_config_word(pdev, GCFGC, &gcfgc);
@@ -7443,14 +7422,14 @@ static int i915gm_get_display_clock_speed(struct drm_device *dev)
}
}
-static int i865_get_display_clock_speed(struct drm_device *dev)
+static int i865_get_display_clock_speed(struct drm_i915_private *dev_priv)
{
return 266667;
}
-static int i85x_get_display_clock_speed(struct drm_device *dev)
+static int i85x_get_display_clock_speed(struct drm_i915_private *dev_priv)
{
- struct pci_dev *pdev = dev->pdev;
+ struct pci_dev *pdev = dev_priv->drm.pdev;
u16 hpllcc = 0;
/*
@@ -7486,14 +7465,13 @@ static int i85x_get_display_clock_speed(struct drm_device *dev)
return 0;
}
-static int i830_get_display_clock_speed(struct drm_device *dev)
+static int i830_get_display_clock_speed(struct drm_i915_private *dev_priv)
{
return 133333;
}
-static unsigned int intel_hpll_vco(struct drm_device *dev)
+static unsigned int intel_hpll_vco(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
static const unsigned int blb_vco[8] = {
[0] = 3200000,
[1] = 4000000,
@@ -7536,20 +7514,20 @@ static unsigned int intel_hpll_vco(struct drm_device *dev)
uint8_t tmp = 0;
/* FIXME other chipsets? */
- if (IS_GM45(dev))
+ if (IS_GM45(dev_priv))
vco_table = ctg_vco;
- else if (IS_G4X(dev))
+ else if (IS_G4X(dev_priv))
vco_table = elk_vco;
- else if (IS_CRESTLINE(dev))
+ else if (IS_CRESTLINE(dev_priv))
vco_table = cl_vco;
- else if (IS_PINEVIEW(dev))
+ else if (IS_PINEVIEW(dev_priv))
vco_table = pnv_vco;
- else if (IS_G33(dev))
+ else if (IS_G33(dev_priv))
vco_table = blb_vco;
else
return 0;
- tmp = I915_READ(IS_MOBILE(dev) ? HPLLVCO_MOBILE : HPLLVCO);
+ tmp = I915_READ(IS_MOBILE(dev_priv) ? HPLLVCO_MOBILE : HPLLVCO);
vco = vco_table[tmp & 0x7];
if (vco == 0)
@@ -7560,10 +7538,10 @@ static unsigned int intel_hpll_vco(struct drm_device *dev)
return vco;
}
-static int gm45_get_display_clock_speed(struct drm_device *dev)
+static int gm45_get_display_clock_speed(struct drm_i915_private *dev_priv)
{
- struct pci_dev *pdev = dev->pdev;
- unsigned int cdclk_sel, vco = intel_hpll_vco(dev);
+ struct pci_dev *pdev = dev_priv->drm.pdev;
+ unsigned int cdclk_sel, vco = intel_hpll_vco(dev_priv);
uint16_t tmp = 0;
pci_read_config_word(pdev, GCFGC, &tmp);
@@ -7583,14 +7561,14 @@ static int gm45_get_display_clock_speed(struct drm_device *dev)
}
}
-static int i965gm_get_display_clock_speed(struct drm_device *dev)
+static int i965gm_get_display_clock_speed(struct drm_i915_private *dev_priv)
{
- struct pci_dev *pdev = dev->pdev;
+ struct pci_dev *pdev = dev_priv->drm.pdev;
static const uint8_t div_3200[] = { 16, 10, 8 };
static const uint8_t div_4000[] = { 20, 12, 10 };
static const uint8_t div_5333[] = { 24, 16, 14 };
const uint8_t *div_table;
- unsigned int cdclk_sel, vco = intel_hpll_vco(dev);
+ unsigned int cdclk_sel, vco = intel_hpll_vco(dev_priv);
uint16_t tmp = 0;
pci_read_config_word(pdev, GCFGC, &tmp);
@@ -7621,15 +7599,15 @@ fail:
return 200000;
}
-static int g33_get_display_clock_speed(struct drm_device *dev)
+static int g33_get_display_clock_speed(struct drm_i915_private *dev_priv)
{
- struct pci_dev *pdev = dev->pdev;
+ struct pci_dev *pdev = dev_priv->drm.pdev;
static const uint8_t div_3200[] = { 12, 10, 8, 7, 5, 16 };
static const uint8_t div_4000[] = { 14, 12, 10, 8, 6, 20 };
static const uint8_t div_4800[] = { 20, 14, 12, 10, 8, 24 };
static const uint8_t div_5333[] = { 20, 16, 12, 12, 8, 28 };
const uint8_t *div_table;
- unsigned int cdclk_sel, vco = intel_hpll_vco(dev);
+ unsigned int cdclk_sel, vco = intel_hpll_vco(dev_priv);
uint16_t tmp = 0;
pci_read_config_word(pdev, GCFGC, &tmp);
@@ -7718,10 +7696,10 @@ static void i9xx_update_pll_dividers(struct intel_crtc *crtc,
struct intel_crtc_state *crtc_state,
struct dpll *reduced_clock)
{
- struct drm_device *dev = crtc->base.dev;
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
u32 fp, fp2 = 0;
- if (IS_PINEVIEW(dev)) {
+ if (IS_PINEVIEW(dev_priv)) {
fp = pnv_dpll_compute_fp(&crtc_state->dpll);
if (reduced_clock)
fp2 = pnv_dpll_compute_fp(reduced_clock);
@@ -7789,12 +7767,11 @@ static void intel_cpu_transcoder_set_m_n(struct intel_crtc *crtc,
struct intel_link_m_n *m_n,
struct intel_link_m_n *m2_n2)
{
- struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
int pipe = crtc->pipe;
enum transcoder transcoder = crtc->config->cpu_transcoder;
- if (INTEL_INFO(dev)->gen >= 5) {
+ if (INTEL_GEN(dev_priv) >= 5) {
I915_WRITE(PIPE_DATA_M1(transcoder), TU_SIZE(m_n->tu) | m_n->gmch_m);
I915_WRITE(PIPE_DATA_N1(transcoder), m_n->gmch_n);
I915_WRITE(PIPE_LINK_M1(transcoder), m_n->link_m);
@@ -7803,8 +7780,8 @@ static void intel_cpu_transcoder_set_m_n(struct intel_crtc *crtc,
* for gen < 8) and if DRRS is supported (to make sure the
* registers are not unnecessarily accessed).
*/
- if (m2_n2 && (IS_CHERRYVIEW(dev) || INTEL_INFO(dev)->gen < 8) &&
- crtc->config->has_drrs) {
+ if (m2_n2 && (IS_CHERRYVIEW(dev_priv) ||
+ INTEL_GEN(dev_priv) < 8) && crtc->config->has_drrs) {
I915_WRITE(PIPE_DATA_M2(transcoder),
TU_SIZE(m2_n2->tu) | m2_n2->gmch_m);
I915_WRITE(PIPE_DATA_N2(transcoder), m2_n2->gmch_n);
@@ -8091,11 +8068,10 @@ static void chv_prepare_pll(struct intel_crtc *crtc,
* in cases where we need the PLL enabled even when @pipe is not going to
* be enabled.
*/
-int vlv_force_pll_on(struct drm_device *dev, enum pipe pipe,
+int vlv_force_pll_on(struct drm_i915_private *dev_priv, enum pipe pipe,
const struct dpll *dpll)
{
- struct intel_crtc *crtc =
- to_intel_crtc(intel_get_crtc_for_pipe(dev, pipe));
+ struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
struct intel_crtc_state *pipe_config;
pipe_config = kzalloc(sizeof(*pipe_config), GFP_KERNEL);
@@ -8106,7 +8082,7 @@ int vlv_force_pll_on(struct drm_device *dev, enum pipe pipe,
pipe_config->pixel_multiplier = 1;
pipe_config->dpll = *dpll;
- if (IS_CHERRYVIEW(dev)) {
+ if (IS_CHERRYVIEW(dev_priv)) {
chv_compute_dpll(crtc, pipe_config);
chv_prepare_pll(crtc, pipe_config);
chv_enable_pll(crtc, pipe_config);
@@ -8129,20 +8105,19 @@ int vlv_force_pll_on(struct drm_device *dev, enum pipe pipe,
* Disable the PLL for @pipe. To be used in cases where we need
* the PLL enabled even when @pipe is not going to be enabled.
*/
-void vlv_force_pll_off(struct drm_device *dev, enum pipe pipe)
+void vlv_force_pll_off(struct drm_i915_private *dev_priv, enum pipe pipe)
{
- if (IS_CHERRYVIEW(dev))
- chv_disable_pll(to_i915(dev), pipe);
+ if (IS_CHERRYVIEW(dev_priv))
+ chv_disable_pll(dev_priv, pipe);
else
- vlv_disable_pll(to_i915(dev), pipe);
+ vlv_disable_pll(dev_priv, pipe);
}
static void i9xx_compute_dpll(struct intel_crtc *crtc,
struct intel_crtc_state *crtc_state,
struct dpll *reduced_clock)
{
- struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
u32 dpll;
struct dpll *clock = &crtc_state->dpll;
@@ -8155,7 +8130,7 @@ static void i9xx_compute_dpll(struct intel_crtc *crtc,
else
dpll |= DPLLB_MODE_DAC_SERIAL;
- if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) {
+ if (IS_I945G(dev_priv) || IS_I945GM(dev_priv) || IS_G33(dev_priv)) {
dpll |= (crtc_state->pixel_multiplier - 1)
<< SDVO_MULTIPLIER_SHIFT_HIRES;
}
@@ -8168,11 +8143,11 @@ static void i9xx_compute_dpll(struct intel_crtc *crtc,
dpll |= DPLL_SDVO_HIGH_SPEED;
/* compute bitmask from p1 value */
- if (IS_PINEVIEW(dev))
+ if (IS_PINEVIEW(dev_priv))
dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW;
else {
dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
- if (IS_G4X(dev) && reduced_clock)
+ if (IS_G4X(dev_priv) && reduced_clock)
dpll |= (1 << (reduced_clock->p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
}
switch (clock->p2) {
@@ -8189,7 +8164,7 @@ static void i9xx_compute_dpll(struct intel_crtc *crtc,
dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
break;
}
- if (INTEL_INFO(dev)->gen >= 4)
+ if (INTEL_GEN(dev_priv) >= 4)
dpll |= (6 << PLL_LOAD_PULSE_PHASE_SHIFT);
if (crtc_state->sdvo_tv_clock)
@@ -8203,7 +8178,7 @@ static void i9xx_compute_dpll(struct intel_crtc *crtc,
dpll |= DPLL_VCO_ENABLE;
crtc_state->dpll_hw_state.dpll = dpll;
- if (INTEL_INFO(dev)->gen >= 4) {
+ if (INTEL_GEN(dev_priv) >= 4) {
u32 dpll_md = (crtc_state->pixel_multiplier - 1)
<< DPLL_MD_UDI_MULTIPLIER_SHIFT;
crtc_state->dpll_hw_state.dpll_md = dpll_md;
@@ -8234,7 +8209,8 @@ static void i8xx_compute_dpll(struct intel_crtc *crtc,
dpll |= PLL_P2_DIVIDE_BY_4;
}
- if (!IS_I830(dev) && intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DVO))
+ if (!IS_I830(dev_priv) &&
+ intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DVO))
dpll |= DPLL_DVO_2X_MODE;
if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) &&
@@ -8249,8 +8225,7 @@ static void i8xx_compute_dpll(struct intel_crtc *crtc,
static void intel_set_pipe_timings(struct intel_crtc *intel_crtc)
{
- struct drm_device *dev = intel_crtc->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
enum pipe pipe = intel_crtc->pipe;
enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
const struct drm_display_mode *adjusted_mode = &intel_crtc->config->base.adjusted_mode;
@@ -8276,7 +8251,7 @@ static void intel_set_pipe_timings(struct intel_crtc *intel_crtc)
vsyncshift += adjusted_mode->crtc_htotal;
}
- if (INTEL_INFO(dev)->gen > 3)
+ if (INTEL_GEN(dev_priv) > 3)
I915_WRITE(VSYNCSHIFT(cpu_transcoder), vsyncshift);
I915_WRITE(HTOTAL(cpu_transcoder),
@@ -8303,7 +8278,7 @@ static void intel_set_pipe_timings(struct intel_crtc *intel_crtc)
* programmed with the VTOTAL_EDP value. Same for VTOTAL_C. This is
* documented on the DDI_FUNC_CTL register description, EDP Input Select
* bits. */
- if (IS_HASWELL(dev) && cpu_transcoder == TRANSCODER_EDP &&
+ if (IS_HASWELL(dev_priv) && cpu_transcoder == TRANSCODER_EDP &&
(pipe == PIPE_B || pipe == PIPE_C))
I915_WRITE(VTOTAL(pipe), I915_READ(VTOTAL(cpu_transcoder)));
@@ -8399,8 +8374,7 @@ void intel_mode_from_pipe_config(struct drm_display_mode *mode,
static void i9xx_set_pipeconf(struct intel_crtc *intel_crtc)
{
- struct drm_device *dev = intel_crtc->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
uint32_t pipeconf;
pipeconf = 0;
@@ -8413,7 +8387,8 @@ static void i9xx_set_pipeconf(struct intel_crtc *intel_crtc)
pipeconf |= PIPECONF_DOUBLE_WIDE;
/* only g4x and later have fancy bpc/dither controls */
- if (IS_G4X(dev) || IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
+ if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
+ IS_CHERRYVIEW(dev_priv)) {
/* Bspec claims that we can't use dithering for 30bpp pipes. */
if (intel_crtc->config->dither && intel_crtc->config->pipe_bpp != 30)
pipeconf |= PIPECONF_DITHER_EN |
@@ -8435,7 +8410,7 @@ static void i9xx_set_pipeconf(struct intel_crtc *intel_crtc)
}
}
- if (HAS_PIPE_CXSR(dev)) {
+ if (HAS_PIPE_CXSR(dev_priv)) {
if (intel_crtc->lowfreq_avail) {
DRM_DEBUG_KMS("enabling CxSR downclocking\n");
pipeconf |= PIPECONF_CXSR_DOWNCLOCK;
@@ -8445,7 +8420,7 @@ static void i9xx_set_pipeconf(struct intel_crtc *intel_crtc)
}
if (intel_crtc->config->base.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) {
- if (INTEL_INFO(dev)->gen < 4 ||
+ if (INTEL_GEN(dev_priv) < 4 ||
intel_crtc_has_type(intel_crtc->config, INTEL_OUTPUT_SDVO))
pipeconf |= PIPECONF_INTERLACE_W_FIELD_INDICATION;
else
@@ -8453,7 +8428,7 @@ static void i9xx_set_pipeconf(struct intel_crtc *intel_crtc)
} else
pipeconf |= PIPECONF_PROGRESSIVE;
- if ((IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) &&
+ if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
intel_crtc->config->limited_color_range)
pipeconf |= PIPECONF_COLOR_RANGE_SELECT;
@@ -8653,11 +8628,11 @@ static int vlv_crtc_compute_clock(struct intel_crtc *crtc,
static void i9xx_get_pfit_config(struct intel_crtc *crtc,
struct intel_crtc_state *pipe_config)
{
- struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
uint32_t tmp;
- if (INTEL_INFO(dev)->gen <= 3 && (IS_I830(dev) || !IS_MOBILE(dev)))
+ if (INTEL_GEN(dev_priv) <= 3 &&
+ (IS_I830(dev_priv) || !IS_MOBILE(dev_priv)))
return;
tmp = I915_READ(PFIT_CONTROL);
@@ -8665,7 +8640,7 @@ static void i9xx_get_pfit_config(struct intel_crtc *crtc,
return;
/* Check whether the pfit is attached to our pipe. */
- if (INTEL_INFO(dev)->gen < 4) {
+ if (INTEL_GEN(dev_priv) < 4) {
if (crtc->pipe != PIPE_B)
return;
} else {
@@ -8729,10 +8704,10 @@ i9xx_get_initial_plane_config(struct intel_crtc *crtc,
fb = &intel_fb->base;
- if (INTEL_INFO(dev)->gen >= 4) {
+ if (INTEL_GEN(dev_priv) >= 4) {
if (val & DISPPLANE_TILED) {
plane_config->tiling = I915_TILING_X;
- fb->modifier[0] = I915_FORMAT_MOD_X_TILED;
+ fb->modifier = I915_FORMAT_MOD_X_TILED;
}
}
@@ -8741,7 +8716,7 @@ i9xx_get_initial_plane_config(struct intel_crtc *crtc,
fb->pixel_format = fourcc;
fb->bits_per_pixel = drm_format_plane_cpp(fourcc, 0) * 8;
- if (INTEL_INFO(dev)->gen >= 4) {
+ if (INTEL_GEN(dev_priv) >= 4) {
if (plane_config->tiling)
offset = I915_READ(DSPTILEOFF(plane));
else
@@ -8761,7 +8736,7 @@ i9xx_get_initial_plane_config(struct intel_crtc *crtc,
aligned_height = intel_fb_align_height(dev, fb->height,
fb->pixel_format,
- fb->modifier[0]);
+ fb->modifier);
plane_config->size = fb->pitches[0] * aligned_height;
@@ -8810,8 +8785,7 @@ static void chv_crtc_clock_get(struct intel_crtc *crtc,
static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
struct intel_crtc_state *pipe_config)
{
- struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum intel_display_power_domain power_domain;
uint32_t tmp;
bool ret;
@@ -8829,7 +8803,8 @@ static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
if (!(tmp & PIPECONF_ENABLE))
goto out;
- if (IS_G4X(dev) || IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
+ if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
+ IS_CHERRYVIEW(dev_priv)) {
switch (tmp & PIPECONF_BPC_MASK) {
case PIPECONF_6BPC:
pipe_config->pipe_bpp = 18;
@@ -8845,11 +8820,11 @@ static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
}
}
- if ((IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) &&
+ if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
(tmp & PIPECONF_COLOR_RANGE_SELECT))
pipe_config->limited_color_range = true;
- if (INTEL_INFO(dev)->gen < 4)
+ if (INTEL_GEN(dev_priv) < 4)
pipe_config->double_wide = tmp & PIPECONF_DOUBLE_WIDE;
intel_get_pipe_timings(crtc, pipe_config);
@@ -8857,9 +8832,9 @@ static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
i9xx_get_pfit_config(crtc, pipe_config);
- if (INTEL_INFO(dev)->gen >= 4) {
+ if (INTEL_GEN(dev_priv) >= 4) {
/* No way to read it out on pipes B and C */
- if (IS_CHERRYVIEW(dev) && crtc->pipe != PIPE_A)
+ if (IS_CHERRYVIEW(dev_priv) && crtc->pipe != PIPE_A)
tmp = dev_priv->chv_dpll_md[crtc->pipe];
else
tmp = I915_READ(DPLL_MD(crtc->pipe));
@@ -8867,7 +8842,8 @@ static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
((tmp & DPLL_MD_UDI_MULTIPLIER_MASK)
>> DPLL_MD_UDI_MULTIPLIER_SHIFT) + 1;
pipe_config->dpll_hw_state.dpll_md = tmp;
- } else if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) {
+ } else if (IS_I945G(dev_priv) || IS_I945GM(dev_priv) ||
+ IS_G33(dev_priv)) {
tmp = I915_READ(DPLL(crtc->pipe));
pipe_config->pixel_multiplier =
((tmp & SDVO_MULTIPLIER_MASK)
@@ -8879,13 +8855,13 @@ static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
pipe_config->pixel_multiplier = 1;
}
pipe_config->dpll_hw_state.dpll = I915_READ(DPLL(crtc->pipe));
- if (!IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev)) {
+ if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv)) {
/*
* DPLL_DVO_2X_MODE must be enabled for both DPLLs
* on 830. Filter it out here so that we don't
* report errors due to that.
*/
- if (IS_I830(dev))
+ if (IS_I830(dev_priv))
pipe_config->dpll_hw_state.dpll &= ~DPLL_DVO_2X_MODE;
pipe_config->dpll_hw_state.fp0 = I915_READ(FP0(crtc->pipe));
@@ -8897,9 +8873,9 @@ static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
DPLL_PORTB_READY_MASK);
}
- if (IS_CHERRYVIEW(dev))
+ if (IS_CHERRYVIEW(dev_priv))
chv_crtc_clock_get(crtc, pipe_config);
- else if (IS_VALLEYVIEW(dev))
+ else if (IS_VALLEYVIEW(dev_priv))
vlv_crtc_clock_get(crtc, pipe_config);
else
i9xx_crtc_clock_get(crtc, pipe_config);
@@ -8950,7 +8926,7 @@ static void ironlake_init_pch_refclk(struct drm_device *dev)
}
}
- if (HAS_PCH_IBX(dev)) {
+ if (HAS_PCH_IBX(dev_priv)) {
has_ck505 = dev_priv->vbt.display_clock_mode;
can_ssc = has_ck505;
} else {
@@ -9198,7 +9174,8 @@ static void lpt_enable_clkout_dp(struct drm_device *dev, bool with_spread,
if (WARN(with_fdi && !with_spread, "FDI requires downspread\n"))
with_spread = true;
- if (WARN(HAS_PCH_LPT_LP(dev) && with_fdi, "LP PCH doesn't have FDI\n"))
+ if (WARN(HAS_PCH_LPT_LP(dev_priv) &&
+ with_fdi, "LP PCH doesn't have FDI\n"))
with_fdi = false;
mutex_lock(&dev_priv->sb_lock);
@@ -9221,7 +9198,7 @@ static void lpt_enable_clkout_dp(struct drm_device *dev, bool with_spread,
}
}
- reg = HAS_PCH_LPT_LP(dev) ? SBI_GEN0 : SBI_DBUFF0;
+ reg = HAS_PCH_LPT_LP(dev_priv) ? SBI_GEN0 : SBI_DBUFF0;
tmp = intel_sbi_read(dev_priv, reg, SBI_ICLK);
tmp |= SBI_GEN0_CFG_BUFFENABLE_DISABLE;
intel_sbi_write(dev_priv, reg, tmp, SBI_ICLK);
@@ -9237,7 +9214,7 @@ static void lpt_disable_clkout_dp(struct drm_device *dev)
mutex_lock(&dev_priv->sb_lock);
- reg = HAS_PCH_LPT_LP(dev) ? SBI_GEN0 : SBI_DBUFF0;
+ reg = HAS_PCH_LPT_LP(dev_priv) ? SBI_GEN0 : SBI_DBUFF0;
tmp = intel_sbi_read(dev_priv, reg, SBI_ICLK);
tmp &= ~SBI_GEN0_CFG_BUFFENABLE_DISABLE;
intel_sbi_write(dev_priv, reg, tmp, SBI_ICLK);
@@ -9345,9 +9322,11 @@ static void lpt_init_pch_refclk(struct drm_device *dev)
*/
void intel_init_pch_refclk(struct drm_device *dev)
{
- if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev))
+ struct drm_i915_private *dev_priv = to_i915(dev);
+
+ if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv))
ironlake_init_pch_refclk(dev);
- else if (HAS_PCH_LPT(dev))
+ else if (HAS_PCH_LPT(dev_priv))
lpt_init_pch_refclk(dev);
}
@@ -9476,7 +9455,7 @@ static void ironlake_compute_dpll(struct intel_crtc *intel_crtc,
if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
if ((intel_panel_use_ssc(dev_priv) &&
dev_priv->vbt.lvds_ssc_freq == 100000) ||
- (HAS_PCH_IBX(dev) && intel_is_dual_link_lvds(dev)))
+ (HAS_PCH_IBX(dev_priv) && intel_is_dual_link_lvds(dev)))
factor = 25;
} else if (crtc_state->sdvo_tv_clock)
factor = 20;
@@ -9650,11 +9629,10 @@ static void intel_cpu_transcoder_get_m_n(struct intel_crtc *crtc,
struct intel_link_m_n *m_n,
struct intel_link_m_n *m2_n2)
{
- struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum pipe pipe = crtc->pipe;
- if (INTEL_INFO(dev)->gen >= 5) {
+ if (INTEL_GEN(dev_priv) >= 5) {
m_n->link_m = I915_READ(PIPE_LINK_M1(transcoder));
m_n->link_n = I915_READ(PIPE_LINK_N1(transcoder));
m_n->gmch_m = I915_READ(PIPE_DATA_M1(transcoder))
@@ -9666,7 +9644,7 @@ static void intel_cpu_transcoder_get_m_n(struct intel_crtc *crtc,
* gen < 8) and if DRRS is supported (to make sure the
* registers are not unnecessarily read).
*/
- if (m2_n2 && INTEL_INFO(dev)->gen < 8 &&
+ if (m2_n2 && INTEL_GEN(dev_priv) < 8 &&
crtc->config->has_drrs) {
m2_n2->link_m = I915_READ(PIPE_LINK_M2(transcoder));
m2_n2->link_n = I915_READ(PIPE_LINK_N2(transcoder));
@@ -9770,17 +9748,17 @@ skylake_get_initial_plane_config(struct intel_crtc *crtc,
tiling = val & PLANE_CTL_TILED_MASK;
switch (tiling) {
case PLANE_CTL_TILED_LINEAR:
- fb->modifier[0] = DRM_FORMAT_MOD_NONE;
+ fb->modifier = DRM_FORMAT_MOD_NONE;
break;
case PLANE_CTL_TILED_X:
plane_config->tiling = I915_TILING_X;
- fb->modifier[0] = I915_FORMAT_MOD_X_TILED;
+ fb->modifier = I915_FORMAT_MOD_X_TILED;
break;
case PLANE_CTL_TILED_Y:
- fb->modifier[0] = I915_FORMAT_MOD_Y_TILED;
+ fb->modifier = I915_FORMAT_MOD_Y_TILED;
break;
case PLANE_CTL_TILED_YF:
- fb->modifier[0] = I915_FORMAT_MOD_Yf_TILED;
+ fb->modifier = I915_FORMAT_MOD_Yf_TILED;
break;
default:
MISSING_CASE(tiling);
@@ -9797,13 +9775,13 @@ skylake_get_initial_plane_config(struct intel_crtc *crtc,
fb->width = ((val >> 0) & 0x1fff) + 1;
val = I915_READ(PLANE_STRIDE(pipe, 0));
- stride_mult = intel_fb_stride_alignment(dev_priv, fb->modifier[0],
+ stride_mult = intel_fb_stride_alignment(dev_priv, fb->modifier,
fb->pixel_format);
fb->pitches[0] = (val & 0x3ff) * stride_mult;
aligned_height = intel_fb_align_height(dev, fb->height,
fb->pixel_format,
- fb->modifier[0]);
+ fb->modifier);
plane_config->size = fb->pitches[0] * aligned_height;
@@ -9836,7 +9814,7 @@ static void ironlake_get_pfit_config(struct intel_crtc *crtc,
/* We currently do not free assignements of panel fitters on
* ivb/hsw (since we don't use the higher upscaling modes which
* differentiates them) so just WARN about this case for now. */
- if (IS_GEN7(dev)) {
+ if (IS_GEN7(dev_priv)) {
WARN_ON((tmp & PF_PIPE_SEL_MASK_IVB) !=
PF_PIPE_SEL_IVB(crtc->pipe));
}
@@ -9868,10 +9846,10 @@ ironlake_get_initial_plane_config(struct intel_crtc *crtc,
fb = &intel_fb->base;
- if (INTEL_INFO(dev)->gen >= 4) {
+ if (INTEL_GEN(dev_priv) >= 4) {
if (val & DISPPLANE_TILED) {
plane_config->tiling = I915_TILING_X;
- fb->modifier[0] = I915_FORMAT_MOD_X_TILED;
+ fb->modifier = I915_FORMAT_MOD_X_TILED;
}
}
@@ -9881,7 +9859,7 @@ ironlake_get_initial_plane_config(struct intel_crtc *crtc,
fb->bits_per_pixel = drm_format_plane_cpp(fourcc, 0) * 8;
base = I915_READ(DSPSURF(pipe)) & 0xfffff000;
- if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
+ if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
offset = I915_READ(DSPOFFSET(pipe));
} else {
if (plane_config->tiling)
@@ -9900,7 +9878,7 @@ ironlake_get_initial_plane_config(struct intel_crtc *crtc,
aligned_height = intel_fb_align_height(dev, fb->height,
fb->pixel_format,
- fb->modifier[0]);
+ fb->modifier);
plane_config->size = fb->pitches[0] * aligned_height;
@@ -10025,7 +10003,7 @@ static void assert_can_disable_lcpll(struct drm_i915_private *dev_priv)
I915_STATE_WARN(I915_READ(PP_STATUS(0)) & PP_ON, "Panel power on\n");
I915_STATE_WARN(I915_READ(BLC_PWM_CPU_CTL2) & BLM_PWM_ENABLE,
"CPU PWM1 enabled\n");
- if (IS_HASWELL(dev))
+ if (IS_HASWELL(dev_priv))
I915_STATE_WARN(I915_READ(HSW_BLC_PWM2_CTL) & BLM_PWM_ENABLE,
"CPU PWM2 enabled\n");
I915_STATE_WARN(I915_READ(BLC_PWM_PCH_CTL1) & BLM_PCH_PWM_ENABLE,
@@ -10045,9 +10023,7 @@ static void assert_can_disable_lcpll(struct drm_i915_private *dev_priv)
static uint32_t hsw_read_dcomp(struct drm_i915_private *dev_priv)
{
- struct drm_device *dev = &dev_priv->drm;
-
- if (IS_HASWELL(dev))
+ if (IS_HASWELL(dev_priv))
return I915_READ(D_COMP_HSW);
else
return I915_READ(D_COMP_BDW);
@@ -10055,9 +10031,7 @@ static uint32_t hsw_read_dcomp(struct drm_i915_private *dev_priv)
static void hsw_write_dcomp(struct drm_i915_private *dev_priv, uint32_t val)
{
- struct drm_device *dev = &dev_priv->drm;
-
- if (IS_HASWELL(dev)) {
+ if (IS_HASWELL(dev_priv)) {
mutex_lock(&dev_priv->rps.hw_lock);
if (sandybridge_pcode_write(dev_priv, GEN6_PCODE_WRITE_D_COMP,
val))
@@ -10172,7 +10146,7 @@ static void hsw_restore_lcpll(struct drm_i915_private *dev_priv)
}
intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
- intel_update_cdclk(&dev_priv->drm);
+ intel_update_cdclk(dev_priv);
}
/*
@@ -10205,7 +10179,7 @@ void hsw_enable_pc8(struct drm_i915_private *dev_priv)
DRM_DEBUG_KMS("Enabling package C8+\n");
- if (HAS_PCH_LPT_LP(dev)) {
+ if (HAS_PCH_LPT_LP(dev_priv)) {
val = I915_READ(SOUTH_DSPCLK_GATE_D);
val &= ~PCH_LP_PARTITION_LEVEL_DISABLE;
I915_WRITE(SOUTH_DSPCLK_GATE_D, val);
@@ -10225,7 +10199,7 @@ void hsw_disable_pc8(struct drm_i915_private *dev_priv)
hsw_restore_lcpll(dev_priv);
lpt_init_pch_refclk(dev);
- if (HAS_PCH_LPT_LP(dev)) {
+ if (HAS_PCH_LPT_LP(dev_priv)) {
val = I915_READ(SOUTH_DSPCLK_GATE_D);
val |= PCH_LP_PARTITION_LEVEL_DISABLE;
I915_WRITE(SOUTH_DSPCLK_GATE_D, val);
@@ -10242,6 +10216,29 @@ static void bxt_modeset_commit_cdclk(struct drm_atomic_state *old_state)
bxt_set_cdclk(to_i915(dev), req_cdclk);
}
+static int bdw_adjust_min_pipe_pixel_rate(struct intel_crtc_state *crtc_state,
+ int pixel_rate)
+{
+ struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
+
+ /* pixel rate mustn't exceed 95% of cdclk with IPS on BDW */
+ if (IS_BROADWELL(dev_priv) && crtc_state->ips_enabled)
+ pixel_rate = DIV_ROUND_UP(pixel_rate * 100, 95);
+
+ /* BSpec says "Do not use DisplayPort with CDCLK less than
+ * 432 MHz, audio enabled, port width x4, and link rate
+ * HBR2 (5.4 GHz), or else there may be audio corruption or
+ * screen corruption."
+ */
+ if (intel_crtc_has_dp_encoder(crtc_state) &&
+ crtc_state->has_audio &&
+ crtc_state->port_clock >= 540000 &&
+ crtc_state->lane_count == 4)
+ pixel_rate = max(432000, pixel_rate);
+
+ return pixel_rate;
+}
+
/* compute the max rate for new configuration */
static int ilk_max_pixel_rate(struct drm_atomic_state *state)
{
@@ -10267,9 +10264,9 @@ static int ilk_max_pixel_rate(struct drm_atomic_state *state)
pixel_rate = ilk_pipe_pixel_rate(crtc_state);
- /* pixel rate mustn't exceed 95% of cdclk with IPS on BDW */
- if (IS_BROADWELL(dev_priv) && crtc_state->ips_enabled)
- pixel_rate = DIV_ROUND_UP(pixel_rate * 100, 95);
+ if (IS_BROADWELL(dev_priv) || IS_GEN9(dev_priv))
+ pixel_rate = bdw_adjust_min_pipe_pixel_rate(crtc_state,
+ pixel_rate);
intel_state->min_pixclk[i] = pixel_rate;
}
@@ -10352,7 +10349,7 @@ static void broadwell_set_cdclk(struct drm_device *dev, int cdclk)
I915_WRITE(CDCLK_FREQ, DIV_ROUND_CLOSEST(cdclk, 1000) - 1);
- intel_update_cdclk(dev);
+ intel_update_cdclk(dev_priv);
WARN(cdclk != dev_priv->cdclk_freq,
"cdclk requested %d kHz but got %d kHz\n",
@@ -10639,8 +10636,7 @@ static bool bxt_get_dsi_transcoder_state(struct intel_crtc *crtc,
static void haswell_get_ddi_port_state(struct intel_crtc *crtc,
struct intel_crtc_state *pipe_config)
{
- struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
struct intel_shared_dpll *pll;
enum port port;
uint32_t tmp;
@@ -10649,9 +10645,9 @@ static void haswell_get_ddi_port_state(struct intel_crtc *crtc,
port = (tmp & TRANS_DDI_PORT_MASK) >> TRANS_DDI_PORT_SHIFT;
- if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev))
+ if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))
skylake_get_ddi_pll(dev_priv, port, pipe_config);
- else if (IS_BROXTON(dev))
+ else if (IS_BROXTON(dev_priv))
bxt_get_ddi_pll(dev_priv, port, pipe_config);
else
haswell_get_ddi_pll(dev_priv, port, pipe_config);
@@ -10667,7 +10663,7 @@ static void haswell_get_ddi_port_state(struct intel_crtc *crtc,
* DDI E. So just check whether this pipe is wired to DDI E and whether
* the PCH transcoder is on.
*/
- if (INTEL_INFO(dev)->gen < 9 &&
+ if (INTEL_GEN(dev_priv) < 9 &&
(port == PORT_E) && I915_READ(LPT_TRANSCONF) & TRANS_ENABLE) {
pipe_config->has_pch_encoder = true;
@@ -10682,8 +10678,7 @@ static void haswell_get_ddi_port_state(struct intel_crtc *crtc,
static bool haswell_get_pipe_config(struct intel_crtc *crtc,
struct intel_crtc_state *pipe_config)
{
- struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum intel_display_power_domain power_domain;
unsigned long power_domain_mask;
bool active;
@@ -10716,11 +10711,9 @@ static bool haswell_get_pipe_config(struct intel_crtc *crtc,
pipe_config->gamma_mode =
I915_READ(GAMMA_MODE(crtc->pipe)) & GAMMA_MODE_MODE_MASK;
- if (INTEL_INFO(dev)->gen >= 9) {
- skl_init_scalers(dev, crtc, pipe_config);
- }
+ if (INTEL_GEN(dev_priv) >= 9) {
+ skl_init_scalers(dev_priv, crtc, pipe_config);
- if (INTEL_INFO(dev)->gen >= 9) {
pipe_config->scaler_state.scaler_id = -1;
pipe_config->scaler_state.scaler_users &= ~(1 << SKL_CRTC_INDEX);
}
@@ -10728,13 +10721,13 @@ static bool haswell_get_pipe_config(struct intel_crtc *crtc,
power_domain = POWER_DOMAIN_PIPE_PANEL_FITTER(crtc->pipe);
if (intel_display_power_get_if_enabled(dev_priv, power_domain)) {
power_domain_mask |= BIT(power_domain);
- if (INTEL_INFO(dev)->gen >= 9)
+ if (INTEL_GEN(dev_priv) >= 9)
skylake_get_pfit_config(crtc, pipe_config);
else
ironlake_get_pfit_config(crtc, pipe_config);
}
- if (IS_HASWELL(dev))
+ if (IS_HASWELL(dev_priv))
pipe_config->ips_enabled = hsw_crtc_supports_ips(crtc) &&
(I915_READ(IPS_CTL) & IPS_ENABLE);
@@ -10822,13 +10815,9 @@ static void i9xx_update_cursor(struct drm_crtc *crtc, u32 base,
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- const struct skl_wm_values *wm = &dev_priv->wm.skl_results;
int pipe = intel_crtc->pipe;
uint32_t cntl = 0;
- if (INTEL_GEN(dev_priv) >= 9 && wm->dirty_pipes & drm_crtc_mask(crtc))
- skl_write_cursor_wm(intel_crtc, wm);
-
if (plane_state && plane_state->base.visible) {
cntl = MCURSOR_GAMMA_ENABLE;
switch (plane_state->base.crtc_w) {
@@ -10847,10 +10836,10 @@ static void i9xx_update_cursor(struct drm_crtc *crtc, u32 base,
}
cntl |= pipe << 28; /* Connect to correct pipe */
- if (HAS_DDI(dev))
+ if (HAS_DDI(dev_priv))
cntl |= CURSOR_PIPE_CSC_ENABLE;
- if (plane_state->base.rotation == DRM_ROTATE_180)
+ if (plane_state->base.rotation & DRM_ROTATE_180)
cntl |= CURSOR_ROTATE_180;
}
@@ -10895,8 +10884,8 @@ static void intel_crtc_update_cursor(struct drm_crtc *crtc,
pos |= y << CURSOR_Y_SHIFT;
/* ILK+ do this automagically */
- if (HAS_GMCH_DISPLAY(dev) &&
- plane_state->base.rotation == DRM_ROTATE_180) {
+ if (HAS_GMCH_DISPLAY(dev_priv) &&
+ plane_state->base.rotation & DRM_ROTATE_180) {
base += (plane_state->base.crtc_h *
plane_state->base.crtc_w - 1) * 4;
}
@@ -10904,13 +10893,13 @@ static void intel_crtc_update_cursor(struct drm_crtc *crtc,
I915_WRITE(CURPOS(pipe), pos);
- if (IS_845G(dev) || IS_I865G(dev))
+ if (IS_845G(dev_priv) || IS_I865G(dev_priv))
i845_update_cursor(crtc, base, plane_state);
else
i9xx_update_cursor(crtc, base, plane_state);
}
-static bool cursor_size_ok(struct drm_device *dev,
+static bool cursor_size_ok(struct drm_i915_private *dev_priv,
uint32_t width, uint32_t height)
{
if (width == 0 || height == 0)
@@ -10922,11 +10911,11 @@ static bool cursor_size_ok(struct drm_device *dev,
* the precision of the register. Everything else requires
* square cursors, limited to a few power-of-two sizes.
*/
- if (IS_845G(dev) || IS_I865G(dev)) {
+ if (IS_845G(dev_priv) || IS_I865G(dev_priv)) {
if ((width & 63) != 0)
return false;
- if (width > (IS_845G(dev) ? 64 : 512))
+ if (width > (IS_845G(dev_priv) ? 64 : 512))
return false;
if (height > 1023)
@@ -10935,7 +10924,7 @@ static bool cursor_size_ok(struct drm_device *dev,
switch (width | height) {
case 256:
case 128:
- if (IS_GEN2(dev))
+ if (IS_GEN2(dev_priv))
return false;
case 64:
break;
@@ -11029,7 +11018,7 @@ intel_framebuffer_create_for_mode(struct drm_device *dev,
fb = intel_framebuffer_create(dev, &mode_cmd, obj);
if (IS_ERR(fb))
- i915_gem_object_put_unlocked(obj);
+ i915_gem_object_put(obj);
return fb;
}
@@ -11114,6 +11103,7 @@ bool intel_get_load_detect_pipe(struct drm_connector *connector,
struct drm_encoder *encoder = &intel_encoder->base;
struct drm_crtc *crtc = NULL;
struct drm_device *dev = encoder->dev;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_framebuffer *fb;
struct drm_mode_config *config = &dev->mode_config;
struct drm_atomic_state *state = NULL, *restore_state = NULL;
@@ -11266,13 +11256,18 @@ found:
old->restore_state = restore_state;
/* let the connector get through one full cycle before testing */
- intel_wait_for_vblank(dev, intel_crtc->pipe);
+ intel_wait_for_vblank(dev_priv, intel_crtc->pipe);
return true;
fail:
- drm_atomic_state_free(state);
- drm_atomic_state_free(restore_state);
- restore_state = state = NULL;
+ if (state) {
+ drm_atomic_state_put(state);
+ state = NULL;
+ }
+ if (restore_state) {
+ drm_atomic_state_put(restore_state);
+ restore_state = NULL;
+ }
if (ret == -EDEADLK) {
drm_modeset_backoff(ctx);
@@ -11300,10 +11295,9 @@ void intel_release_load_detect_pipe(struct drm_connector *connector,
return;
ret = drm_atomic_commit(state);
- if (ret) {
+ if (ret)
DRM_DEBUG_KMS("Couldn't release load detect pipe: %i\n", ret);
- drm_atomic_state_free(state);
- }
+ drm_atomic_state_put(state);
}
static int i9xx_pll_refclk(struct drm_device *dev,
@@ -11314,9 +11308,9 @@ static int i9xx_pll_refclk(struct drm_device *dev,
if ((dpll & PLL_REF_INPUT_MASK) == PLLB_REF_INPUT_SPREADSPECTRUMIN)
return dev_priv->vbt.lvds_ssc_freq;
- else if (HAS_PCH_SPLIT(dev))
+ else if (HAS_PCH_SPLIT(dev_priv))
return 120000;
- else if (!IS_GEN2(dev))
+ else if (!IS_GEN2(dev_priv))
return 96000;
else
return 48000;
@@ -11341,7 +11335,7 @@ static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
fp = pipe_config->dpll_hw_state.fp1;
clock.m1 = (fp & FP_M1_DIV_MASK) >> FP_M1_DIV_SHIFT;
- if (IS_PINEVIEW(dev)) {
+ if (IS_PINEVIEW(dev_priv)) {
clock.n = ffs((fp & FP_N_PINEVIEW_DIV_MASK) >> FP_N_DIV_SHIFT) - 1;
clock.m2 = (fp & FP_M2_PINEVIEW_DIV_MASK) >> FP_M2_DIV_SHIFT;
} else {
@@ -11349,8 +11343,8 @@ static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
clock.m2 = (fp & FP_M2_DIV_MASK) >> FP_M2_DIV_SHIFT;
}
- if (!IS_GEN2(dev)) {
- if (IS_PINEVIEW(dev))
+ if (!IS_GEN2(dev_priv)) {
+ if (IS_PINEVIEW(dev_priv))
clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_PINEVIEW) >>
DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW);
else
@@ -11372,12 +11366,12 @@ static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
return;
}
- if (IS_PINEVIEW(dev))
+ if (IS_PINEVIEW(dev_priv))
port_clock = pnv_calc_dpll_params(refclk, &clock);
else
port_clock = i9xx_calc_dpll_params(refclk, &clock);
} else {
- u32 lvds = IS_I830(dev) ? 0 : I915_READ(LVDS);
+ u32 lvds = IS_I830(dev_priv) ? 0 : I915_READ(LVDS);
bool is_lvds = (pipe == 1) && (lvds & LVDS_PORT_EN);
if (is_lvds) {
@@ -11578,7 +11572,7 @@ static bool __pageflip_finished_cs(struct intel_crtc *crtc,
* really needed there. But since ctg has the registers,
* include it in the check anyway.
*/
- if (INTEL_INFO(dev)->gen < 5 && !IS_G4X(dev))
+ if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv))
return true;
/*
@@ -11641,8 +11635,7 @@ static bool pageflip_finished(struct intel_crtc *crtc,
void intel_finish_page_flip_cs(struct drm_i915_private *dev_priv, int pipe)
{
struct drm_device *dev = &dev_priv->drm;
- struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
struct intel_flip_work *work;
unsigned long flags;
@@ -11655,12 +11648,12 @@ void intel_finish_page_flip_cs(struct drm_i915_private *dev_priv, int pipe)
* lost pageflips) so needs the full irqsave spinlocks.
*/
spin_lock_irqsave(&dev->event_lock, flags);
- work = intel_crtc->flip_work;
+ work = crtc->flip_work;
if (work != NULL &&
!is_mmio_work(work) &&
- pageflip_finished(intel_crtc, work))
- page_flip_completed(intel_crtc);
+ pageflip_finished(crtc, work))
+ page_flip_completed(crtc);
spin_unlock_irqrestore(&dev->event_lock, flags);
}
@@ -11668,8 +11661,7 @@ void intel_finish_page_flip_cs(struct drm_i915_private *dev_priv, int pipe)
void intel_finish_page_flip_mmio(struct drm_i915_private *dev_priv, int pipe)
{
struct drm_device *dev = &dev_priv->drm;
- struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
struct intel_flip_work *work;
unsigned long flags;
@@ -11682,12 +11674,12 @@ void intel_finish_page_flip_mmio(struct drm_i915_private *dev_priv, int pipe)
* lost pageflips) so needs the full irqsave spinlocks.
*/
spin_lock_irqsave(&dev->event_lock, flags);
- work = intel_crtc->flip_work;
+ work = crtc->flip_work;
if (work != NULL &&
is_mmio_work(work) &&
- pageflip_finished(intel_crtc, work))
- page_flip_completed(intel_crtc);
+ pageflip_finished(crtc, work))
+ page_flip_completed(crtc);
spin_unlock_irqrestore(&dev->event_lock, flags);
}
@@ -11792,7 +11784,7 @@ static int intel_gen4_queue_flip(struct drm_device *dev,
MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
intel_ring_emit(ring, fb->pitches[0]);
intel_ring_emit(ring, intel_crtc->flip_work->gtt_offset |
- intel_fb_modifier_to_tiling(fb->modifier[0]));
+ intel_fb_modifier_to_tiling(fb->modifier));
/* XXX Enabling the panel-fitter across page-flip is so far
* untested on non-native modes, so ignore it for now.
@@ -11825,7 +11817,7 @@ static int intel_gen6_queue_flip(struct drm_device *dev,
intel_ring_emit(ring, MI_DISPLAY_FLIP |
MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
intel_ring_emit(ring, fb->pitches[0] |
- intel_fb_modifier_to_tiling(fb->modifier[0]));
+ intel_fb_modifier_to_tiling(fb->modifier));
intel_ring_emit(ring, intel_crtc->flip_work->gtt_offset);
/* Contrary to the suggestions in the documentation,
@@ -11848,6 +11840,7 @@ static int intel_gen7_queue_flip(struct drm_device *dev,
struct drm_i915_gem_request *req,
uint32_t flags)
{
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_ring *ring = req->ring;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
uint32_t plane_bit = 0;
@@ -11876,7 +11869,7 @@ static int intel_gen7_queue_flip(struct drm_device *dev,
* 48bits addresses, and we need a NOOP for the batch size to
* stay even.
*/
- if (IS_GEN8(dev))
+ if (IS_GEN8(dev_priv))
len += 2;
}
@@ -11913,7 +11906,7 @@ static int intel_gen7_queue_flip(struct drm_device *dev,
intel_ring_emit(ring, ~(DERRMR_PIPEA_PRI_FLIP_DONE |
DERRMR_PIPEB_PRI_FLIP_DONE |
DERRMR_PIPEC_PRI_FLIP_DONE));
- if (IS_GEN8(dev))
+ if (IS_GEN8(dev_priv))
intel_ring_emit(ring, MI_STORE_REGISTER_MEM_GEN8 |
MI_SRM_LRM_GLOBAL_GTT);
else
@@ -11922,7 +11915,7 @@ static int intel_gen7_queue_flip(struct drm_device *dev,
intel_ring_emit_reg(ring, DERRMR);
intel_ring_emit(ring,
i915_ggtt_offset(req->engine->scratch) + 256);
- if (IS_GEN8(dev)) {
+ if (IS_GEN8(dev_priv)) {
intel_ring_emit(ring, 0);
intel_ring_emit(ring, MI_NOOP);
}
@@ -11930,7 +11923,7 @@ static int intel_gen7_queue_flip(struct drm_device *dev,
intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 | plane_bit);
intel_ring_emit(ring, fb->pitches[0] |
- intel_fb_modifier_to_tiling(fb->modifier[0]));
+ intel_fb_modifier_to_tiling(fb->modifier));
intel_ring_emit(ring, intel_crtc->flip_work->gtt_offset);
intel_ring_emit(ring, (MI_NOOP));
@@ -11940,8 +11933,6 @@ static int intel_gen7_queue_flip(struct drm_device *dev,
static bool use_mmio_flip(struct intel_engine_cs *engine,
struct drm_i915_gem_object *obj)
{
- struct reservation_object *resv;
-
/*
* This is not being used for older platforms, because
* non-availability of flip done interrupt forces us to use
@@ -11963,12 +11954,7 @@ static bool use_mmio_flip(struct intel_engine_cs *engine,
else if (i915.enable_execlists)
return true;
- resv = i915_gem_object_get_dmabuf_resv(obj);
- if (resv && !reservation_object_test_signaled_rcu(resv, false))
- return true;
-
- return engine != i915_gem_active_get_engine(&obj->last_write,
- &obj->base.dev->struct_mutex);
+ return engine != i915_gem_object_last_write_engine(obj);
}
static void skl_do_mmio_flip(struct intel_crtc *intel_crtc,
@@ -11983,7 +11969,7 @@ static void skl_do_mmio_flip(struct intel_crtc *intel_crtc,
ctl = I915_READ(PLANE_CTL(pipe, 0));
ctl &= ~PLANE_CTL_TILED_MASK;
- switch (fb->modifier[0]) {
+ switch (fb->modifier) {
case DRM_FORMAT_MOD_NONE:
break;
case I915_FORMAT_MOD_X_TILED:
@@ -11996,7 +11982,7 @@ static void skl_do_mmio_flip(struct intel_crtc *intel_crtc,
ctl |= PLANE_CTL_TILED_YF;
break;
default:
- MISSING_CASE(fb->modifier[0]);
+ MISSING_CASE(fb->modifier);
}
/*
@@ -12021,7 +12007,7 @@ static void ilk_do_mmio_flip(struct intel_crtc *intel_crtc,
dspcntr = I915_READ(reg);
- if (fb->modifier[0] == I915_FORMAT_MOD_X_TILED)
+ if (fb->modifier == I915_FORMAT_MOD_X_TILED)
dspcntr |= DISPPLANE_TILED;
else
dspcntr &= ~DISPPLANE_TILED;
@@ -12041,17 +12027,9 @@ static void intel_mmio_flip_work_func(struct work_struct *w)
struct intel_framebuffer *intel_fb =
to_intel_framebuffer(crtc->base.primary->fb);
struct drm_i915_gem_object *obj = intel_fb->obj;
- struct reservation_object *resv;
-
- if (work->flip_queued_req)
- WARN_ON(i915_wait_request(work->flip_queued_req,
- 0, NULL, NO_WAITBOOST));
- /* For framebuffer backed by dmabuf, wait for fence */
- resv = i915_gem_object_get_dmabuf_resv(obj);
- if (resv)
- WARN_ON(reservation_object_wait_timeout_rcu(resv, false, false,
- MAX_SCHEDULE_TIMEOUT) < 0);
+ i915_gem_object_wait_priority(obj, 0, I915_PRIORITY_DISPLAY);
+ WARN_ON(i915_gem_object_wait(obj, 0, MAX_SCHEDULE_TIMEOUT, NULL) < 0);
intel_pipe_update_start(crtc);
@@ -12114,8 +12092,7 @@ static bool __pageflip_stall_check_cs(struct drm_i915_private *dev_priv,
void intel_check_page_flip(struct drm_i915_private *dev_priv, int pipe)
{
struct drm_device *dev = &dev_priv->drm;
- struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
struct intel_flip_work *work;
WARN_ON(!in_interrupt());
@@ -12124,19 +12101,19 @@ void intel_check_page_flip(struct drm_i915_private *dev_priv, int pipe)
return;
spin_lock(&dev->event_lock);
- work = intel_crtc->flip_work;
+ work = crtc->flip_work;
if (work != NULL && !is_mmio_work(work) &&
- __pageflip_stall_check_cs(dev_priv, intel_crtc, work)) {
+ __pageflip_stall_check_cs(dev_priv, crtc, work)) {
WARN_ONCE(1,
"Kicking stuck page flip: queued at %d, now %d\n",
- work->flip_queued_vblank, intel_crtc_get_vblank_counter(intel_crtc));
- page_flip_completed(intel_crtc);
+ work->flip_queued_vblank, intel_crtc_get_vblank_counter(crtc));
+ page_flip_completed(crtc);
work = NULL;
}
if (work != NULL && !is_mmio_work(work) &&
- intel_crtc_get_vblank_counter(intel_crtc) - work->flip_queued_vblank > 1)
+ intel_crtc_get_vblank_counter(crtc) - work->flip_queued_vblank > 1)
intel_queue_rps_boost_for_request(work->flip_queued_req);
spin_unlock(&dev->event_lock);
}
@@ -12176,7 +12153,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
* TILEOFF/LINOFF registers can't be changed via MI display flips.
* Note that pitch changes could also affect these register.
*/
- if (INTEL_INFO(dev)->gen > 3 &&
+ if (INTEL_GEN(dev_priv) > 3 &&
(fb->offsets[0] != crtc->primary->fb->offsets[0] ||
fb->pitches[0] != crtc->primary->fb->pitches[0]))
return -EINVAL;
@@ -12236,28 +12213,27 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
intel_crtc->reset_count = i915_reset_count(&dev_priv->gpu_error);
if (i915_reset_in_progress_or_wedged(&dev_priv->gpu_error)) {
ret = -EIO;
- goto cleanup;
+ goto unlock;
}
atomic_inc(&intel_crtc->unpin_work_count);
- if (INTEL_INFO(dev)->gen >= 5 || IS_G4X(dev))
+ if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv))
work->flip_count = I915_READ(PIPE_FLIPCOUNT_G4X(pipe)) + 1;
- if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
- engine = &dev_priv->engine[BCS];
- if (fb->modifier[0] != old_fb->modifier[0])
+ if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
+ engine = dev_priv->engine[BCS];
+ if (fb->modifier != old_fb->modifier)
/* vlv: DISPLAY_FLIP fails to change tiling */
engine = NULL;
- } else if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) {
- engine = &dev_priv->engine[BCS];
- } else if (INTEL_INFO(dev)->gen >= 7) {
- engine = i915_gem_active_get_engine(&obj->last_write,
- &obj->base.dev->struct_mutex);
+ } else if (IS_IVYBRIDGE(dev_priv) || IS_HASWELL(dev_priv)) {
+ engine = dev_priv->engine[BCS];
+ } else if (INTEL_GEN(dev_priv) >= 7) {
+ engine = i915_gem_object_last_write_engine(obj);
if (engine == NULL || engine->id != RCS)
- engine = &dev_priv->engine[BCS];
+ engine = dev_priv->engine[BCS];
} else {
- engine = &dev_priv->engine[RCS];
+ engine = dev_priv->engine[RCS];
}
mmio_flip = use_mmio_flip(engine, obj);
@@ -12285,10 +12261,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
if (mmio_flip) {
INIT_WORK(&work->mmio_work, intel_mmio_flip_work_func);
-
- work->flip_queued_req = i915_gem_active_get(&obj->last_write,
- &obj->base.dev->struct_mutex);
- schedule_work(&work->mmio_work);
+ queue_work(system_unbound_wq, &work->mmio_work);
} else {
request = i915_gem_request_alloc(engine, engine->last_context);
if (IS_ERR(request)) {
@@ -12328,12 +12301,13 @@ cleanup_unpin:
intel_unpin_fb_obj(fb, crtc->primary->state->rotation);
cleanup_pending:
atomic_dec(&intel_crtc->unpin_work_count);
+unlock:
mutex_unlock(&dev->struct_mutex);
cleanup:
crtc->primary->fb = old_fb;
update_state_fb(crtc->primary);
- i915_gem_object_put_unlocked(obj);
+ i915_gem_object_put(obj);
drm_framebuffer_unreference(work->old_fb);
spin_lock_irq(&dev->event_lock);
@@ -12371,8 +12345,7 @@ retry:
goto retry;
}
- if (ret)
- drm_atomic_state_free(state);
+ drm_atomic_state_put(state);
if (ret == 0 && event) {
spin_lock_irq(&dev->event_lock);
@@ -12407,7 +12380,7 @@ static bool intel_wm_need_update(struct drm_plane *plane,
if (!cur->base.fb || !new->base.fb)
return false;
- if (cur->base.fb->modifier[0] != new->base.fb->modifier[0] ||
+ if (cur->base.fb->modifier != new->base.fb->modifier ||
cur->base.rotation != new->base.rotation ||
drm_rect_width(&new->base.src) != drm_rect_width(&cur->base.src) ||
drm_rect_height(&new->base.src) != drm_rect_height(&cur->base.src) ||
@@ -12446,7 +12419,7 @@ int intel_plane_atomic_calc_changes(struct drm_crtc_state *crtc_state,
struct drm_framebuffer *fb = plane_state->fb;
int ret;
- if (INTEL_GEN(dev) >= 9 && plane->type != DRM_PLANE_TYPE_CURSOR) {
+ if (INTEL_GEN(dev_priv) >= 9 && plane->type != DRM_PLANE_TYPE_CURSOR) {
ret = skl_update_scaler_plane(
to_intel_crtc_state(crtc_state),
to_intel_plane_state(plane_state));
@@ -12513,7 +12486,7 @@ int intel_plane_atomic_calc_changes(struct drm_crtc_state *crtc_state,
/* Pre-gen9 platforms need two-step watermark updates */
if ((pipe_config->update_wm_pre || pipe_config->update_wm_post) &&
- INTEL_INFO(dev)->gen < 9 && dev_priv->display.optimize_watermarks)
+ INTEL_GEN(dev_priv) < 9 && dev_priv->display.optimize_watermarks)
to_intel_crtc_state(crtc_state)->wm.need_postvbl_update = true;
if (visible || was_visible)
@@ -12525,7 +12498,7 @@ int intel_plane_atomic_calc_changes(struct drm_crtc_state *crtc_state,
* cstate->update_wm was already set above, so this flag will
* take effect when we commit and program watermarks.
*/
- if (plane->type == DRM_PLANE_TYPE_OVERLAY && IS_IVYBRIDGE(dev) &&
+ if (plane->type == DRM_PLANE_TYPE_OVERLAY && IS_IVYBRIDGE(dev_priv) &&
needs_scaling(to_intel_plane_state(plane_state)) &&
!needs_scaling(old_plane_state))
pipe_config->disable_lp_wm = true;
@@ -12618,7 +12591,7 @@ static int intel_crtc_atomic_check(struct drm_crtc *crtc,
* old state and the new state. We can program these
* immediately.
*/
- ret = dev_priv->display.compute_intermediate_wm(crtc->dev,
+ ret = dev_priv->display.compute_intermediate_wm(dev,
intel_crtc,
pipe_config);
if (ret) {
@@ -12630,7 +12603,7 @@ static int intel_crtc_atomic_check(struct drm_crtc *crtc,
pipe_config->wm.ilk.intermediate = pipe_config->wm.ilk.optimal;
}
- if (INTEL_INFO(dev)->gen >= 9) {
+ if (INTEL_GEN(dev_priv) >= 9) {
if (mode_changed)
ret = skl_update_scaler_crtc(pipe_config);
@@ -12701,15 +12674,16 @@ static int
compute_baseline_pipe_bpp(struct intel_crtc *crtc,
struct intel_crtc_state *pipe_config)
{
- struct drm_device *dev = crtc->base.dev;
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
struct drm_atomic_state *state;
struct drm_connector *connector;
struct drm_connector_state *connector_state;
int bpp, i;
- if ((IS_G4X(dev) || IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)))
+ if ((IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
+ IS_CHERRYVIEW(dev_priv)))
bpp = 10*3;
- else if (INTEL_INFO(dev)->gen >= 5)
+ else if (INTEL_GEN(dev_priv) >= 5)
bpp = 12*3;
else
bpp = 8*3;
@@ -12742,73 +12716,81 @@ static void intel_dump_crtc_timings(const struct drm_display_mode *mode)
mode->crtc_vsync_end, mode->crtc_vtotal, mode->type, mode->flags);
}
+static inline void
+intel_dump_m_n_config(struct intel_crtc_state *pipe_config, char *id,
+ unsigned int lane_count, struct intel_link_m_n *m_n)
+{
+ DRM_DEBUG_KMS("%s: lanes: %i; gmch_m: %u, gmch_n: %u, link_m: %u, link_n: %u, tu: %u\n",
+ id, lane_count,
+ m_n->gmch_m, m_n->gmch_n,
+ m_n->link_m, m_n->link_n, m_n->tu);
+}
+
static void intel_dump_pipe_config(struct intel_crtc *crtc,
struct intel_crtc_state *pipe_config,
const char *context)
{
struct drm_device *dev = crtc->base.dev;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_plane *plane;
struct intel_plane *intel_plane;
struct intel_plane_state *state;
struct drm_framebuffer *fb;
- DRM_DEBUG_KMS("[CRTC:%d:%s]%s config %p for pipe %c\n",
- crtc->base.base.id, crtc->base.name,
- context, pipe_config, pipe_name(crtc->pipe));
+ DRM_DEBUG_KMS("[CRTC:%d:%s]%s\n",
+ crtc->base.base.id, crtc->base.name, context);
- DRM_DEBUG_KMS("cpu_transcoder: %s\n", transcoder_name(pipe_config->cpu_transcoder));
- DRM_DEBUG_KMS("pipe bpp: %i, dithering: %i\n",
+ DRM_DEBUG_KMS("cpu_transcoder: %s, pipe bpp: %i, dithering: %i\n",
+ transcoder_name(pipe_config->cpu_transcoder),
pipe_config->pipe_bpp, pipe_config->dither);
- DRM_DEBUG_KMS("fdi/pch: %i, lanes: %i, gmch_m: %u, gmch_n: %u, link_m: %u, link_n: %u, tu: %u\n",
- pipe_config->has_pch_encoder,
- pipe_config->fdi_lanes,
- pipe_config->fdi_m_n.gmch_m, pipe_config->fdi_m_n.gmch_n,
- pipe_config->fdi_m_n.link_m, pipe_config->fdi_m_n.link_n,
- pipe_config->fdi_m_n.tu);
- DRM_DEBUG_KMS("dp: %i, lanes: %i, gmch_m: %u, gmch_n: %u, link_m: %u, link_n: %u, tu: %u\n",
- intel_crtc_has_dp_encoder(pipe_config),
- pipe_config->lane_count,
- pipe_config->dp_m_n.gmch_m, pipe_config->dp_m_n.gmch_n,
- pipe_config->dp_m_n.link_m, pipe_config->dp_m_n.link_n,
- pipe_config->dp_m_n.tu);
-
- DRM_DEBUG_KMS("dp: %i, lanes: %i, gmch_m2: %u, gmch_n2: %u, link_m2: %u, link_n2: %u, tu2: %u\n",
- intel_crtc_has_dp_encoder(pipe_config),
- pipe_config->lane_count,
- pipe_config->dp_m2_n2.gmch_m,
- pipe_config->dp_m2_n2.gmch_n,
- pipe_config->dp_m2_n2.link_m,
- pipe_config->dp_m2_n2.link_n,
- pipe_config->dp_m2_n2.tu);
+
+ if (pipe_config->has_pch_encoder)
+ intel_dump_m_n_config(pipe_config, "fdi",
+ pipe_config->fdi_lanes,
+ &pipe_config->fdi_m_n);
+
+ if (intel_crtc_has_dp_encoder(pipe_config)) {
+ intel_dump_m_n_config(pipe_config, "dp m_n",
+ pipe_config->lane_count, &pipe_config->dp_m_n);
+ if (pipe_config->has_drrs)
+ intel_dump_m_n_config(pipe_config, "dp m2_n2",
+ pipe_config->lane_count,
+ &pipe_config->dp_m2_n2);
+ }
DRM_DEBUG_KMS("audio: %i, infoframes: %i\n",
- pipe_config->has_audio,
- pipe_config->has_infoframe);
+ pipe_config->has_audio, pipe_config->has_infoframe);
DRM_DEBUG_KMS("requested mode:\n");
drm_mode_debug_printmodeline(&pipe_config->base.mode);
DRM_DEBUG_KMS("adjusted mode:\n");
drm_mode_debug_printmodeline(&pipe_config->base.adjusted_mode);
intel_dump_crtc_timings(&pipe_config->base.adjusted_mode);
- DRM_DEBUG_KMS("port clock: %d\n", pipe_config->port_clock);
- DRM_DEBUG_KMS("pipe src size: %dx%d\n",
+ DRM_DEBUG_KMS("port clock: %d, pipe src size: %dx%d\n",
+ pipe_config->port_clock,
pipe_config->pipe_src_w, pipe_config->pipe_src_h);
- DRM_DEBUG_KMS("num_scalers: %d, scaler_users: 0x%x, scaler_id: %d\n",
- crtc->num_scalers,
- pipe_config->scaler_state.scaler_users,
- pipe_config->scaler_state.scaler_id);
- DRM_DEBUG_KMS("gmch pfit: control: 0x%08x, ratios: 0x%08x, lvds border: 0x%08x\n",
- pipe_config->gmch_pfit.control,
- pipe_config->gmch_pfit.pgm_ratios,
- pipe_config->gmch_pfit.lvds_border_bits);
- DRM_DEBUG_KMS("pch pfit: pos: 0x%08x, size: 0x%08x, %s\n",
- pipe_config->pch_pfit.pos,
- pipe_config->pch_pfit.size,
- pipe_config->pch_pfit.enabled ? "enabled" : "disabled");
- DRM_DEBUG_KMS("ips: %i\n", pipe_config->ips_enabled);
- DRM_DEBUG_KMS("double wide: %i\n", pipe_config->double_wide);
-
- if (IS_BROXTON(dev)) {
+
+ if (INTEL_GEN(dev_priv) >= 9)
+ DRM_DEBUG_KMS("num_scalers: %d, scaler_users: 0x%x, scaler_id: %d\n",
+ crtc->num_scalers,
+ pipe_config->scaler_state.scaler_users,
+ pipe_config->scaler_state.scaler_id);
+
+ if (HAS_GMCH_DISPLAY(dev_priv))
+ DRM_DEBUG_KMS("gmch pfit: control: 0x%08x, ratios: 0x%08x, lvds border: 0x%08x\n",
+ pipe_config->gmch_pfit.control,
+ pipe_config->gmch_pfit.pgm_ratios,
+ pipe_config->gmch_pfit.lvds_border_bits);
+ else
+ DRM_DEBUG_KMS("pch pfit: pos: 0x%08x, size: 0x%08x, %s\n",
+ pipe_config->pch_pfit.pos,
+ pipe_config->pch_pfit.size,
+ enableddisabled(pipe_config->pch_pfit.enabled));
+
+ DRM_DEBUG_KMS("ips: %i, double wide: %i\n",
+ pipe_config->ips_enabled, pipe_config->double_wide);
+
+ if (IS_BROXTON(dev_priv)) {
DRM_DEBUG_KMS("dpll_hw_state: ebb0: 0x%x, ebb4: 0x%x,"
"pll0: 0x%x, pll1: 0x%x, pll2: 0x%x, pll3: 0x%x, "
"pll6: 0x%x, pll8: 0x%x, pll9: 0x%x, pll10: 0x%x, pcsdw12: 0x%x\n",
@@ -12823,13 +12805,13 @@ static void intel_dump_pipe_config(struct intel_crtc *crtc,
pipe_config->dpll_hw_state.pll9,
pipe_config->dpll_hw_state.pll10,
pipe_config->dpll_hw_state.pcsdw12);
- } else if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) {
+ } else if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
DRM_DEBUG_KMS("dpll_hw_state: "
"ctrl1: 0x%x, cfgcr1: 0x%x, cfgcr2: 0x%x\n",
pipe_config->dpll_hw_state.ctrl1,
pipe_config->dpll_hw_state.cfgcr1,
pipe_config->dpll_hw_state.cfgcr2);
- } else if (HAS_DDI(dev)) {
+ } else if (HAS_DDI(dev_priv)) {
DRM_DEBUG_KMS("dpll_hw_state: wrpll: 0x%x spll: 0x%x\n",
pipe_config->dpll_hw_state.wrpll,
pipe_config->dpll_hw_state.spll);
@@ -12844,7 +12826,7 @@ static void intel_dump_pipe_config(struct intel_crtc *crtc,
DRM_DEBUG_KMS("planes on this crtc\n");
list_for_each_entry(plane, &dev->mode_config.plane_list, head) {
- char *format_name;
+ struct drm_format_name_buf format_name;
intel_plane = to_intel_plane(plane);
if (intel_plane->pipe != crtc->pipe)
continue;
@@ -12857,23 +12839,20 @@ static void intel_dump_pipe_config(struct intel_crtc *crtc,
continue;
}
- format_name = drm_get_format_name(fb->pixel_format);
-
- DRM_DEBUG_KMS("[PLANE:%d:%s] enabled",
- plane->base.id, plane->name);
- DRM_DEBUG_KMS("\tFB:%d, fb = %ux%u format = %s",
- fb->base.id, fb->width, fb->height, format_name);
- DRM_DEBUG_KMS("\tscaler:%d src %dx%d+%d+%d dst %dx%d+%d+%d\n",
- state->scaler_id,
- state->base.src.x1 >> 16,
- state->base.src.y1 >> 16,
- drm_rect_width(&state->base.src) >> 16,
- drm_rect_height(&state->base.src) >> 16,
- state->base.dst.x1, state->base.dst.y1,
- drm_rect_width(&state->base.dst),
- drm_rect_height(&state->base.dst));
-
- kfree(format_name);
+ DRM_DEBUG_KMS("[PLANE:%d:%s] FB:%d, fb = %ux%u format = %s\n",
+ plane->base.id, plane->name,
+ fb->base.id, fb->width, fb->height,
+ drm_get_format_name(fb->pixel_format, &format_name));
+ if (INTEL_GEN(dev_priv) >= 9)
+ DRM_DEBUG_KMS("\tscaler:%d src %dx%d+%d+%d dst %dx%d+%d+%d\n",
+ state->scaler_id,
+ state->base.src.x1 >> 16,
+ state->base.src.y1 >> 16,
+ drm_rect_width(&state->base.src) >> 16,
+ drm_rect_height(&state->base.src) >> 16,
+ state->base.dst.x1, state->base.dst.y1,
+ drm_rect_width(&state->base.dst),
+ drm_rect_height(&state->base.dst));
}
}
@@ -12907,7 +12886,7 @@ static bool check_digital_port_conflicts(struct drm_atomic_state *state)
switch (encoder->type) {
unsigned int port_mask;
case INTEL_OUTPUT_UNKNOWN:
- if (WARN_ON(!HAS_DDI(dev)))
+ if (WARN_ON(!HAS_DDI(to_i915(dev))))
break;
case INTEL_OUTPUT_DP:
case INTEL_OUTPUT_HDMI:
@@ -13188,7 +13167,7 @@ intel_compare_link_m_n(const struct intel_link_m_n *m_n,
}
static bool
-intel_pipe_config_compare(struct drm_device *dev,
+intel_pipe_config_compare(struct drm_i915_private *dev_priv,
struct intel_crtc_state *current_config,
struct intel_crtc_state *pipe_config,
bool adjust)
@@ -13312,7 +13291,7 @@ intel_pipe_config_compare(struct drm_device *dev,
PIPE_CONF_CHECK_I(lane_count);
PIPE_CONF_CHECK_X(lane_lat_optim_mask);
- if (INTEL_INFO(dev)->gen < 8) {
+ if (INTEL_GEN(dev_priv) < 8) {
PIPE_CONF_CHECK_M_N(dp_m_n);
if (current_config->has_drrs)
@@ -13338,8 +13317,8 @@ intel_pipe_config_compare(struct drm_device *dev,
PIPE_CONF_CHECK_I(pixel_multiplier);
PIPE_CONF_CHECK_I(has_hdmi_sink);
- if ((INTEL_INFO(dev)->gen < 8 && !IS_HASWELL(dev)) ||
- IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
+ if ((INTEL_GEN(dev_priv) < 8 && !IS_HASWELL(dev_priv)) ||
+ IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
PIPE_CONF_CHECK_I(limited_color_range);
PIPE_CONF_CHECK_I(has_infoframe);
@@ -13361,7 +13340,7 @@ intel_pipe_config_compare(struct drm_device *dev,
PIPE_CONF_CHECK_X(gmch_pfit.control);
/* pfit ratios are autocomputed by the hw on gen4+ */
- if (INTEL_INFO(dev)->gen < 4)
+ if (INTEL_GEN(dev_priv) < 4)
PIPE_CONF_CHECK_X(gmch_pfit.pgm_ratios);
PIPE_CONF_CHECK_X(gmch_pfit.lvds_border_bits);
@@ -13379,7 +13358,7 @@ intel_pipe_config_compare(struct drm_device *dev,
}
/* BDW+ don't expose a synchronous way to read the state */
- if (IS_HASWELL(dev))
+ if (IS_HASWELL(dev_priv))
PIPE_CONF_CHECK_I(ips_enabled);
PIPE_CONF_CHECK_I(double_wide);
@@ -13398,7 +13377,7 @@ intel_pipe_config_compare(struct drm_device *dev,
PIPE_CONF_CHECK_X(dsi_pll.ctrl);
PIPE_CONF_CHECK_X(dsi_pll.div);
- if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5)
+ if (IS_G4X(dev_priv) || INTEL_GEN(dev_priv) >= 5)
PIPE_CONF_CHECK_I(pipe_bpp);
PIPE_CONF_CHECK_CLOCK_FUZZY(base.adjusted_mode.crtc_clock);
@@ -13436,33 +13415,67 @@ static void intel_pipe_config_sanity_check(struct drm_i915_private *dev_priv,
static void verify_wm_state(struct drm_crtc *crtc,
struct drm_crtc_state *new_state)
{
- struct drm_device *dev = crtc->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(crtc->dev);
struct skl_ddb_allocation hw_ddb, *sw_ddb;
- struct skl_ddb_entry *hw_entry, *sw_entry;
+ struct skl_pipe_wm hw_wm, *sw_wm;
+ struct skl_plane_wm *hw_plane_wm, *sw_plane_wm;
+ struct skl_ddb_entry *hw_ddb_entry, *sw_ddb_entry;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
const enum pipe pipe = intel_crtc->pipe;
- int plane;
+ int plane, level, max_level = ilk_wm_max_level(dev_priv);
- if (INTEL_INFO(dev)->gen < 9 || !new_state->active)
+ if (INTEL_GEN(dev_priv) < 9 || !new_state->active)
return;
+ skl_pipe_wm_get_hw_state(crtc, &hw_wm);
+ sw_wm = &to_intel_crtc_state(new_state)->wm.skl.optimal;
+
skl_ddb_get_hw_state(dev_priv, &hw_ddb);
sw_ddb = &dev_priv->wm.skl_hw.ddb;
/* planes */
- for_each_plane(dev_priv, pipe, plane) {
- hw_entry = &hw_ddb.plane[pipe][plane];
- sw_entry = &sw_ddb->plane[pipe][plane];
+ for_each_universal_plane(dev_priv, pipe, plane) {
+ hw_plane_wm = &hw_wm.planes[plane];
+ sw_plane_wm = &sw_wm->planes[plane];
+
+ /* Watermarks */
+ for (level = 0; level <= max_level; level++) {
+ if (skl_wm_level_equals(&hw_plane_wm->wm[level],
+ &sw_plane_wm->wm[level]))
+ continue;
- if (skl_ddb_entry_equal(hw_entry, sw_entry))
- continue;
+ DRM_ERROR("mismatch in WM pipe %c plane %d level %d (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
+ pipe_name(pipe), plane + 1, level,
+ sw_plane_wm->wm[level].plane_en,
+ sw_plane_wm->wm[level].plane_res_b,
+ sw_plane_wm->wm[level].plane_res_l,
+ hw_plane_wm->wm[level].plane_en,
+ hw_plane_wm->wm[level].plane_res_b,
+ hw_plane_wm->wm[level].plane_res_l);
+ }
+
+ if (!skl_wm_level_equals(&hw_plane_wm->trans_wm,
+ &sw_plane_wm->trans_wm)) {
+ DRM_ERROR("mismatch in trans WM pipe %c plane %d (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
+ pipe_name(pipe), plane + 1,
+ sw_plane_wm->trans_wm.plane_en,
+ sw_plane_wm->trans_wm.plane_res_b,
+ sw_plane_wm->trans_wm.plane_res_l,
+ hw_plane_wm->trans_wm.plane_en,
+ hw_plane_wm->trans_wm.plane_res_b,
+ hw_plane_wm->trans_wm.plane_res_l);
+ }
+
+ /* DDB */
+ hw_ddb_entry = &hw_ddb.plane[pipe][plane];
+ sw_ddb_entry = &sw_ddb->plane[pipe][plane];
- DRM_ERROR("mismatch in DDB state pipe %c plane %d "
- "(expected (%u,%u), found (%u,%u))\n",
- pipe_name(pipe), plane + 1,
- sw_entry->start, sw_entry->end,
- hw_entry->start, hw_entry->end);
+ if (!skl_ddb_entry_equal(hw_ddb_entry, sw_ddb_entry)) {
+ DRM_ERROR("mismatch in DDB state pipe %c plane %d (expected (%u,%u), found (%u,%u))\n",
+ pipe_name(pipe), plane + 1,
+ sw_ddb_entry->start, sw_ddb_entry->end,
+ hw_ddb_entry->start, hw_ddb_entry->end);
+ }
}
/*
@@ -13472,25 +13485,60 @@ static void verify_wm_state(struct drm_crtc *crtc,
* once the plane becomes visible, we can skip this check
*/
if (intel_crtc->cursor_addr) {
- hw_entry = &hw_ddb.plane[pipe][PLANE_CURSOR];
- sw_entry = &sw_ddb->plane[pipe][PLANE_CURSOR];
+ hw_plane_wm = &hw_wm.planes[PLANE_CURSOR];
+ sw_plane_wm = &sw_wm->planes[PLANE_CURSOR];
+
+ /* Watermarks */
+ for (level = 0; level <= max_level; level++) {
+ if (skl_wm_level_equals(&hw_plane_wm->wm[level],
+ &sw_plane_wm->wm[level]))
+ continue;
+
+ DRM_ERROR("mismatch in WM pipe %c cursor level %d (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
+ pipe_name(pipe), level,
+ sw_plane_wm->wm[level].plane_en,
+ sw_plane_wm->wm[level].plane_res_b,
+ sw_plane_wm->wm[level].plane_res_l,
+ hw_plane_wm->wm[level].plane_en,
+ hw_plane_wm->wm[level].plane_res_b,
+ hw_plane_wm->wm[level].plane_res_l);
+ }
+
+ if (!skl_wm_level_equals(&hw_plane_wm->trans_wm,
+ &sw_plane_wm->trans_wm)) {
+ DRM_ERROR("mismatch in trans WM pipe %c cursor (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
+ pipe_name(pipe),
+ sw_plane_wm->trans_wm.plane_en,
+ sw_plane_wm->trans_wm.plane_res_b,
+ sw_plane_wm->trans_wm.plane_res_l,
+ hw_plane_wm->trans_wm.plane_en,
+ hw_plane_wm->trans_wm.plane_res_b,
+ hw_plane_wm->trans_wm.plane_res_l);
+ }
+
+ /* DDB */
+ hw_ddb_entry = &hw_ddb.plane[pipe][PLANE_CURSOR];
+ sw_ddb_entry = &sw_ddb->plane[pipe][PLANE_CURSOR];
- if (!skl_ddb_entry_equal(hw_entry, sw_entry)) {
- DRM_ERROR("mismatch in DDB state pipe %c cursor "
- "(expected (%u,%u), found (%u,%u))\n",
+ if (!skl_ddb_entry_equal(hw_ddb_entry, sw_ddb_entry)) {
+ DRM_ERROR("mismatch in DDB state pipe %c cursor (expected (%u,%u), found (%u,%u))\n",
pipe_name(pipe),
- sw_entry->start, sw_entry->end,
- hw_entry->start, hw_entry->end);
+ sw_ddb_entry->start, sw_ddb_entry->end,
+ hw_ddb_entry->start, hw_ddb_entry->end);
}
}
}
static void
-verify_connector_state(struct drm_device *dev, struct drm_crtc *crtc)
+verify_connector_state(struct drm_device *dev,
+ struct drm_atomic_state *state,
+ struct drm_crtc *crtc)
{
struct drm_connector *connector;
+ struct drm_connector_state *old_conn_state;
+ int i;
- drm_for_each_connector(connector, dev) {
+ for_each_connector_in_state(state, connector, old_conn_state, i) {
struct drm_encoder *encoder = connector->encoder;
struct drm_connector_state *state = connector->state;
@@ -13605,7 +13653,7 @@ verify_crtc_state(struct drm_crtc *crtc,
intel_pipe_config_sanity_check(dev_priv, pipe_config);
sw_config = to_intel_crtc_state(crtc->state);
- if (!intel_pipe_config_compare(dev, sw_config,
+ if (!intel_pipe_config_compare(dev_priv, sw_config,
pipe_config, false)) {
I915_STATE_WARN(1, "pipe state doesn't match!\n");
intel_dump_pipe_config(intel_crtc, pipe_config,
@@ -13698,15 +13746,16 @@ verify_shared_dpll_state(struct drm_device *dev, struct drm_crtc *crtc,
static void
intel_modeset_verify_crtc(struct drm_crtc *crtc,
- struct drm_crtc_state *old_state,
- struct drm_crtc_state *new_state)
+ struct drm_atomic_state *state,
+ struct drm_crtc_state *old_state,
+ struct drm_crtc_state *new_state)
{
if (!needs_modeset(new_state) &&
!to_intel_crtc_state(new_state)->update_pipe)
return;
verify_wm_state(crtc, new_state);
- verify_connector_state(crtc->dev, crtc);
+ verify_connector_state(crtc->dev, state, crtc);
verify_crtc_state(crtc, old_state, new_state);
verify_shared_dpll_state(crtc->dev, crtc, old_state, new_state);
}
@@ -13722,16 +13771,17 @@ verify_disabled_dpll_state(struct drm_device *dev)
}
static void
-intel_modeset_verify_disabled(struct drm_device *dev)
+intel_modeset_verify_disabled(struct drm_device *dev,
+ struct drm_atomic_state *state)
{
verify_encoder_state(dev);
- verify_connector_state(dev, NULL);
+ verify_connector_state(dev, state, NULL);
verify_disabled_dpll_state(dev);
}
static void update_scanline_offset(struct intel_crtc *crtc)
{
- struct drm_device *dev = crtc->base.dev;
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
/*
* The scanline counter increments at the leading edge of hsync.
@@ -13751,7 +13801,7 @@ static void update_scanline_offset(struct intel_crtc *crtc)
* there's an extra 1 line difference. So we need to add two instead of
* one to the value.
*/
- if (IS_GEN2(dev)) {
+ if (IS_GEN2(dev_priv)) {
const struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode;
int vtotal;
@@ -13760,7 +13810,7 @@ static void update_scanline_offset(struct intel_crtc *crtc)
vtotal /= 2;
crtc->scanline_offset = vtotal - 1;
- } else if (HAS_DDI(dev) &&
+ } else if (HAS_DDI(dev_priv) &&
intel_crtc_has_type(crtc->config, INTEL_OUTPUT_HDMI)) {
crtc->scanline_offset = 2;
} else
@@ -14023,7 +14073,7 @@ static int intel_atomic_check(struct drm_device *dev,
}
if (i915.fastboot &&
- intel_pipe_config_compare(dev,
+ intel_pipe_config_compare(dev_priv,
to_intel_crtc_state(crtc->state),
pipe_config, true)) {
crtc_state->mode_changed = false;
@@ -14059,13 +14109,10 @@ static int intel_atomic_check(struct drm_device *dev,
}
static int intel_atomic_prepare_commit(struct drm_device *dev,
- struct drm_atomic_state *state,
- bool nonblock)
+ struct drm_atomic_state *state)
{
struct drm_i915_private *dev_priv = to_i915(dev);
- struct drm_plane_state *plane_state;
struct drm_crtc_state *crtc_state;
- struct drm_plane *plane;
struct drm_crtc *crtc;
int i, ret;
@@ -14088,28 +14135,6 @@ static int intel_atomic_prepare_commit(struct drm_device *dev,
ret = drm_atomic_helper_prepare_planes(dev, state);
mutex_unlock(&dev->struct_mutex);
- if (!ret && !nonblock) {
- for_each_plane_in_state(state, plane, plane_state, i) {
- struct intel_plane_state *intel_plane_state =
- to_intel_plane_state(plane_state);
-
- if (!intel_plane_state->wait_req)
- continue;
-
- ret = i915_wait_request(intel_plane_state->wait_req,
- I915_WAIT_INTERRUPTIBLE,
- NULL, NULL);
- if (ret) {
- /* Any hang should be swallowed by the wait */
- WARN_ON(ret == -EIO);
- mutex_lock(&dev->struct_mutex);
- drm_atomic_helper_cleanup_planes(dev, state);
- mutex_unlock(&dev->struct_mutex);
- break;
- }
- }
- }
-
return ret;
}
@@ -14135,22 +14160,24 @@ static void intel_atomic_wait_for_vblanks(struct drm_device *dev,
return;
for_each_pipe(dev_priv, pipe) {
- struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
+ struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv,
+ pipe);
if (!((1 << pipe) & crtc_mask))
continue;
- ret = drm_crtc_vblank_get(crtc);
+ ret = drm_crtc_vblank_get(&crtc->base);
if (WARN_ON(ret != 0)) {
crtc_mask &= ~(1 << pipe);
continue;
}
- last_vblank_count[pipe] = drm_crtc_vblank_count(crtc);
+ last_vblank_count[pipe] = drm_crtc_vblank_count(&crtc->base);
}
for_each_pipe(dev_priv, pipe) {
- struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
+ struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv,
+ pipe);
long lret;
if (!((1 << pipe) & crtc_mask))
@@ -14158,12 +14185,12 @@ static void intel_atomic_wait_for_vblanks(struct drm_device *dev,
lret = wait_event_timeout(dev->vblank[pipe].queue,
last_vblank_count[pipe] !=
- drm_crtc_vblank_count(crtc),
+ drm_crtc_vblank_count(&crtc->base),
msecs_to_jiffies(50));
WARN(!lret, "pipe %c vblank wait timed out\n", pipe_name(pipe));
- drm_crtc_vblank_put(crtc);
+ drm_crtc_vblank_put(&crtc->base);
}
}
@@ -14237,16 +14264,23 @@ static void intel_update_crtcs(struct drm_atomic_state *state,
static void skl_update_crtcs(struct drm_atomic_state *state,
unsigned int *crtc_vblank_mask)
{
- struct drm_device *dev = state->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(state->dev);
struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
struct drm_crtc *crtc;
+ struct intel_crtc *intel_crtc;
struct drm_crtc_state *old_crtc_state;
- struct skl_ddb_allocation *new_ddb = &intel_state->wm_results.ddb;
- struct skl_ddb_allocation *cur_ddb = &dev_priv->wm.skl_hw.ddb;
+ struct intel_crtc_state *cstate;
unsigned int updated = 0;
bool progress;
enum pipe pipe;
+ int i;
+
+ const struct skl_ddb_entry *entries[I915_MAX_PIPES] = {};
+
+ for_each_crtc_in_state(state, crtc, old_crtc_state, i)
+ /* ignore allocations for crtc's that have been turned off. */
+ if (crtc->state->active)
+ entries[i] = &to_intel_crtc_state(old_crtc_state)->wm.skl.ddb;
/*
* Whenever the number of active pipes changes, we need to make sure we
@@ -14255,21 +14289,24 @@ static void skl_update_crtcs(struct drm_atomic_state *state,
* cause pipe underruns and other bad stuff.
*/
do {
- int i;
progress = false;
for_each_crtc_in_state(state, crtc, old_crtc_state, i) {
bool vbl_wait = false;
unsigned int cmask = drm_crtc_mask(crtc);
- pipe = to_intel_crtc(crtc)->pipe;
- if (updated & cmask || !crtc->state->active)
+ intel_crtc = to_intel_crtc(crtc);
+ cstate = to_intel_crtc_state(crtc->state);
+ pipe = intel_crtc->pipe;
+
+ if (updated & cmask || !cstate->base.active)
continue;
- if (skl_ddb_allocation_overlaps(state, cur_ddb, new_ddb,
- pipe))
+
+ if (skl_ddb_allocation_overlaps(entries, &cstate->wm.skl.ddb, i))
continue;
updated |= cmask;
+ entries[i] = &cstate->wm.skl.ddb;
/*
* If this is an already active pipe, it's DDB changed,
@@ -14277,7 +14314,8 @@ static void skl_update_crtcs(struct drm_atomic_state *state,
* then we need to wait for a vblank to pass for the
* new ddb allocation to take effect.
*/
- if (!skl_ddb_allocation_equals(cur_ddb, new_ddb, pipe) &&
+ if (!skl_ddb_entry_equal(&cstate->wm.skl.ddb,
+ &to_intel_crtc_state(old_crtc_state)->wm.skl.ddb) &&
!crtc->state->active_changed &&
intel_state->wm_results.dirty_pipes != updated)
vbl_wait = true;
@@ -14286,7 +14324,7 @@ static void skl_update_crtcs(struct drm_atomic_state *state,
crtc_vblank_mask);
if (vbl_wait)
- intel_wait_for_vblank(dev, pipe);
+ intel_wait_for_vblank(dev_priv, pipe);
progress = true;
}
@@ -14301,37 +14339,15 @@ static void intel_atomic_commit_tail(struct drm_atomic_state *state)
struct drm_crtc_state *old_crtc_state;
struct drm_crtc *crtc;
struct intel_crtc_state *intel_cstate;
- struct drm_plane *plane;
- struct drm_plane_state *plane_state;
bool hw_check = intel_state->modeset;
unsigned long put_domains[I915_MAX_PIPES] = {};
unsigned crtc_vblank_mask = 0;
- int i, ret;
-
- for_each_plane_in_state(state, plane, plane_state, i) {
- struct intel_plane_state *intel_plane_state =
- to_intel_plane_state(plane_state);
-
- if (!intel_plane_state->wait_req)
- continue;
-
- ret = i915_wait_request(intel_plane_state->wait_req,
- 0, NULL, NULL);
- /* EIO should be eaten, and we can't get interrupted in the
- * worker, and blocking commits have waited already. */
- WARN_ON(ret);
- }
+ int i;
drm_atomic_helper_wait_for_dependencies(state);
- if (intel_state->modeset) {
- memcpy(dev_priv->min_pixclk, intel_state->min_pixclk,
- sizeof(intel_state->min_pixclk));
- dev_priv->active_crtcs = intel_state->active_crtcs;
- dev_priv->atomic_cdclk_freq = intel_state->cdclk;
-
+ if (intel_state->modeset)
intel_display_power_get(dev_priv, POWER_DOMAIN_MODESET);
- }
for_each_crtc_in_state(state, crtc, old_crtc_state, i) {
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
@@ -14364,8 +14380,17 @@ static void intel_atomic_commit_tail(struct drm_atomic_state *state)
intel_check_cpu_fifo_underruns(dev_priv);
intel_check_pch_fifo_underruns(dev_priv);
- if (!crtc->state->active)
- intel_update_watermarks(crtc);
+ if (!crtc->state->active) {
+ /*
+ * Make sure we don't call initial_watermarks
+ * for ILK-style watermark updates.
+ */
+ if (dev_priv->display.atomic_update_watermarks)
+ dev_priv->display.initial_watermarks(intel_state,
+ to_intel_crtc_state(crtc->state));
+ else
+ intel_update_watermarks(intel_crtc);
+ }
}
}
@@ -14388,7 +14413,7 @@ static void intel_atomic_commit_tail(struct drm_atomic_state *state)
if (!intel_can_enable_sagv(state))
intel_disable_sagv(dev_priv);
- intel_modeset_verify_disabled(dev);
+ intel_modeset_verify_disabled(dev, state);
}
/* Complete the events for pipes that have now been disabled */
@@ -14431,7 +14456,8 @@ static void intel_atomic_commit_tail(struct drm_atomic_state *state)
intel_cstate = to_intel_crtc_state(crtc->state);
if (dev_priv->display.optimize_watermarks)
- dev_priv->display.optimize_watermarks(intel_cstate);
+ dev_priv->display.optimize_watermarks(intel_state,
+ intel_cstate);
}
for_each_crtc_in_state(state, crtc, old_crtc_state, i) {
@@ -14440,7 +14466,7 @@ static void intel_atomic_commit_tail(struct drm_atomic_state *state)
if (put_domains[i])
modeset_put_power_domains(dev_priv, put_domains[i]);
- intel_modeset_verify_crtc(crtc, old_crtc_state, crtc->state);
+ intel_modeset_verify_crtc(crtc, state, old_crtc_state, crtc->state);
}
if (intel_state->modeset && intel_can_enable_sagv(state))
@@ -14457,7 +14483,7 @@ static void intel_atomic_commit_tail(struct drm_atomic_state *state)
drm_atomic_helper_commit_cleanup_done(state);
- drm_atomic_state_free(state);
+ drm_atomic_state_put(state);
/* As one of the primary mmio accessors, KMS has a high likelihood
* of triggering bugs in unclaimed access. After we finish
@@ -14475,12 +14501,33 @@ static void intel_atomic_commit_tail(struct drm_atomic_state *state)
static void intel_atomic_commit_work(struct work_struct *work)
{
- struct drm_atomic_state *state = container_of(work,
- struct drm_atomic_state,
- commit_work);
+ struct drm_atomic_state *state =
+ container_of(work, struct drm_atomic_state, commit_work);
+
intel_atomic_commit_tail(state);
}
+static int __i915_sw_fence_call
+intel_atomic_commit_ready(struct i915_sw_fence *fence,
+ enum i915_sw_fence_notify notify)
+{
+ struct intel_atomic_state *state =
+ container_of(fence, struct intel_atomic_state, commit_ready);
+
+ switch (notify) {
+ case FENCE_COMPLETE:
+ if (state->base.commit_work.func)
+ queue_work(system_unbound_wq, &state->base.commit_work);
+ break;
+
+ case FENCE_FREE:
+ drm_atomic_state_put(&state->base);
+ break;
+ }
+
+ return NOTIFY_DONE;
+}
+
static void intel_atomic_track_fbs(struct drm_atomic_state *state)
{
struct drm_plane_state *old_plane_state;
@@ -14502,10 +14549,6 @@ static void intel_atomic_track_fbs(struct drm_atomic_state *state)
* This function commits a top-level state object that has been validated
* with drm_atomic_helper_check().
*
- * FIXME: Atomic modeset support for i915 is not yet complete. At the moment
- * nonblocking commits are only safe for pure plane updates. Everything else
- * should work though.
- *
* RETURNS
* Zero for success or -errno.
*/
@@ -14517,33 +14560,42 @@ static int intel_atomic_commit(struct drm_device *dev,
struct drm_i915_private *dev_priv = to_i915(dev);
int ret = 0;
- if (intel_state->modeset && nonblock) {
- DRM_DEBUG_KMS("nonblocking commit for modeset not yet implemented.\n");
- return -EINVAL;
- }
-
ret = drm_atomic_helper_setup_commit(state, nonblock);
if (ret)
return ret;
- INIT_WORK(&state->commit_work, intel_atomic_commit_work);
+ drm_atomic_state_get(state);
+ i915_sw_fence_init(&intel_state->commit_ready,
+ intel_atomic_commit_ready);
- ret = intel_atomic_prepare_commit(dev, state, nonblock);
+ ret = intel_atomic_prepare_commit(dev, state);
if (ret) {
DRM_DEBUG_ATOMIC("Preparing state failed with %i\n", ret);
+ i915_sw_fence_commit(&intel_state->commit_ready);
return ret;
}
drm_atomic_helper_swap_state(state, true);
dev_priv->wm.distrust_bios_wm = false;
- dev_priv->wm.skl_results = intel_state->wm_results;
intel_shared_dpll_commit(state);
intel_atomic_track_fbs(state);
- if (nonblock)
- queue_work(system_unbound_wq, &state->commit_work);
- else
+ if (intel_state->modeset) {
+ memcpy(dev_priv->min_pixclk, intel_state->min_pixclk,
+ sizeof(intel_state->min_pixclk));
+ dev_priv->active_crtcs = intel_state->active_crtcs;
+ dev_priv->atomic_cdclk_freq = intel_state->cdclk;
+ }
+
+ drm_atomic_state_get(state);
+ INIT_WORK(&state->commit_work,
+ nonblock ? intel_atomic_commit_work : NULL);
+
+ i915_sw_fence_commit(&intel_state->commit_ready);
+ if (!nonblock) {
+ i915_sw_fence_wait(&intel_state->commit_ready);
intel_atomic_commit_tail(state);
+ }
return 0;
}
@@ -14581,9 +14633,8 @@ retry:
goto retry;
}
- if (ret)
out:
- drm_atomic_state_free(state);
+ drm_atomic_state_put(state);
}
/*
@@ -14656,19 +14707,21 @@ int
intel_prepare_plane_fb(struct drm_plane *plane,
struct drm_plane_state *new_state)
{
- struct drm_device *dev = plane->dev;
+ struct intel_atomic_state *intel_state =
+ to_intel_atomic_state(new_state->state);
+ struct drm_i915_private *dev_priv = to_i915(plane->dev);
struct drm_framebuffer *fb = new_state->fb;
struct drm_i915_gem_object *obj = intel_fb_obj(fb);
struct drm_i915_gem_object *old_obj = intel_fb_obj(plane->state->fb);
- struct reservation_object *resv;
- int ret = 0;
+ int ret;
if (!obj && !old_obj)
return 0;
if (old_obj) {
struct drm_crtc_state *crtc_state =
- drm_atomic_get_existing_crtc_state(new_state->state, plane->state->crtc);
+ drm_atomic_get_existing_crtc_state(new_state->state,
+ plane->state->crtc);
/* Big Hammer, we also need to ensure that any pending
* MI_WAIT_FOR_EVENT inside a user batch buffer on the
@@ -14681,52 +14734,58 @@ intel_prepare_plane_fb(struct drm_plane *plane,
* This should only fail upon a hung GPU, in which case we
* can safely continue.
*/
- if (needs_modeset(crtc_state))
- ret = i915_gem_object_wait_rendering(old_obj, true);
- if (ret) {
- /* GPU hangs should have been swallowed by the wait */
- WARN_ON(ret == -EIO);
- return ret;
+ if (needs_modeset(crtc_state)) {
+ ret = i915_sw_fence_await_reservation(&intel_state->commit_ready,
+ old_obj->resv, NULL,
+ false, 0,
+ GFP_KERNEL);
+ if (ret < 0)
+ return ret;
}
}
+ if (new_state->fence) { /* explicit fencing */
+ ret = i915_sw_fence_await_dma_fence(&intel_state->commit_ready,
+ new_state->fence,
+ I915_FENCE_TIMEOUT,
+ GFP_KERNEL);
+ if (ret < 0)
+ return ret;
+ }
+
if (!obj)
return 0;
- /* For framebuffer backed by dmabuf, wait for fence */
- resv = i915_gem_object_get_dmabuf_resv(obj);
- if (resv) {
- long lret;
-
- lret = reservation_object_wait_timeout_rcu(resv, false, true,
- MAX_SCHEDULE_TIMEOUT);
- if (lret == -ERESTARTSYS)
- return lret;
+ if (!new_state->fence) { /* implicit fencing */
+ ret = i915_sw_fence_await_reservation(&intel_state->commit_ready,
+ obj->resv, NULL,
+ false, I915_FENCE_TIMEOUT,
+ GFP_KERNEL);
+ if (ret < 0)
+ return ret;
- WARN(lret < 0, "waiting returns %li\n", lret);
+ i915_gem_object_wait_priority(obj, 0, I915_PRIORITY_DISPLAY);
}
if (plane->type == DRM_PLANE_TYPE_CURSOR &&
- INTEL_INFO(dev)->cursor_needs_physical) {
- int align = IS_I830(dev) ? 16 * 1024 : 256;
+ INTEL_INFO(dev_priv)->cursor_needs_physical) {
+ int align = IS_I830(dev_priv) ? 16 * 1024 : 256;
ret = i915_gem_object_attach_phys(obj, align);
- if (ret)
+ if (ret) {
DRM_DEBUG_KMS("failed to attach phys object\n");
+ return ret;
+ }
} else {
struct i915_vma *vma;
vma = intel_pin_and_fence_fb_obj(fb, new_state->rotation);
- if (IS_ERR(vma))
- ret = PTR_ERR(vma);
- }
-
- if (ret == 0) {
- to_intel_plane_state(new_state)->wait_req =
- i915_gem_active_get(&obj->last_write,
- &obj->base.dev->struct_mutex);
+ if (IS_ERR(vma)) {
+ DRM_DEBUG_KMS("failed to pin object\n");
+ return PTR_ERR(vma);
+ }
}
- return ret;
+ return 0;
}
/**
@@ -14742,9 +14801,8 @@ void
intel_cleanup_plane_fb(struct drm_plane *plane,
struct drm_plane_state *old_state)
{
- struct drm_device *dev = plane->dev;
+ struct drm_i915_private *dev_priv = to_i915(plane->dev);
struct intel_plane_state *old_intel_state;
- struct intel_plane_state *intel_state = to_intel_plane_state(plane->state);
struct drm_i915_gem_object *old_obj = intel_fb_obj(old_state->fb);
struct drm_i915_gem_object *obj = intel_fb_obj(plane->state->fb);
@@ -14754,11 +14812,8 @@ intel_cleanup_plane_fb(struct drm_plane *plane,
return;
if (old_obj && (plane->type != DRM_PLANE_TYPE_CURSOR ||
- !INTEL_INFO(dev)->cursor_needs_physical))
+ !INTEL_INFO(dev_priv)->cursor_needs_physical))
intel_unpin_fb_obj(old_state->fb, old_state->rotation);
-
- i915_gem_request_assign(&intel_state->wait_req, NULL);
- i915_gem_request_assign(&old_intel_state->wait_req, NULL);
}
int
@@ -14833,30 +14888,34 @@ static void intel_begin_crtc_commit(struct drm_crtc *crtc,
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- struct intel_crtc_state *old_intel_state =
+ struct intel_crtc_state *intel_cstate =
+ to_intel_crtc_state(crtc->state);
+ struct intel_crtc_state *old_intel_cstate =
to_intel_crtc_state(old_crtc_state);
+ struct intel_atomic_state *old_intel_state =
+ to_intel_atomic_state(old_crtc_state->state);
bool modeset = needs_modeset(crtc->state);
- enum pipe pipe = intel_crtc->pipe;
/* Perform vblank evasion around commit operation */
intel_pipe_update_start(intel_crtc);
if (modeset)
- return;
+ goto out;
if (crtc->state->color_mgmt_changed || to_intel_crtc_state(crtc->state)->update_pipe) {
intel_color_set_csc(crtc->state);
intel_color_load_luts(crtc->state);
}
- if (to_intel_crtc_state(crtc->state)->update_pipe)
- intel_update_pipe_config(intel_crtc, old_intel_state);
- else if (INTEL_GEN(dev_priv) >= 9) {
+ if (intel_cstate->update_pipe)
+ intel_update_pipe_config(intel_crtc, old_intel_cstate);
+ else if (INTEL_GEN(dev_priv) >= 9)
skl_detach_scalers(intel_crtc);
- I915_WRITE(PIPE_WM_LINETIME(pipe),
- dev_priv->wm.skl_hw.wm_linetime[pipe]);
- }
+out:
+ if (dev_priv->display.atomic_update_watermarks)
+ dev_priv->display.atomic_update_watermarks(old_intel_state,
+ intel_cstate);
}
static void intel_finish_crtc_commit(struct drm_crtc *crtc,
@@ -14876,9 +14935,6 @@ static void intel_finish_crtc_commit(struct drm_crtc *crtc,
*/
void intel_plane_destroy(struct drm_plane *plane)
{
- if (!plane)
- return;
-
drm_plane_cleanup(plane);
kfree(to_intel_plane(plane));
}
@@ -14892,53 +14948,63 @@ const struct drm_plane_funcs intel_plane_funcs = {
.atomic_set_property = intel_plane_atomic_set_property,
.atomic_duplicate_state = intel_plane_duplicate_state,
.atomic_destroy_state = intel_plane_destroy_state,
-
};
-static struct drm_plane *intel_primary_plane_create(struct drm_device *dev,
- int pipe)
+static struct intel_plane *
+intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe)
{
struct intel_plane *primary = NULL;
struct intel_plane_state *state = NULL;
const uint32_t *intel_primary_formats;
+ unsigned int supported_rotations;
unsigned int num_formats;
int ret;
primary = kzalloc(sizeof(*primary), GFP_KERNEL);
- if (!primary)
+ if (!primary) {
+ ret = -ENOMEM;
goto fail;
+ }
state = intel_create_plane_state(&primary->base);
- if (!state)
+ if (!state) {
+ ret = -ENOMEM;
goto fail;
+ }
+
primary->base.state = &state->base;
primary->can_scale = false;
primary->max_downscale = 1;
- if (INTEL_INFO(dev)->gen >= 9) {
+ if (INTEL_GEN(dev_priv) >= 9) {
primary->can_scale = true;
state->scaler_id = -1;
}
primary->pipe = pipe;
- primary->plane = pipe;
+ /*
+ * On gen2/3 only plane A can do FBC, but the panel fitter and LVDS
+ * port is hooked to pipe B. Hence we want plane A feeding pipe B.
+ */
+ if (HAS_FBC(dev_priv) && INTEL_GEN(dev_priv) < 4)
+ primary->plane = (enum plane) !pipe;
+ else
+ primary->plane = (enum plane) pipe;
primary->frontbuffer_bit = INTEL_FRONTBUFFER_PRIMARY(pipe);
primary->check_plane = intel_check_primary_plane;
- if (HAS_FBC(dev) && INTEL_INFO(dev)->gen < 4)
- primary->plane = !pipe;
- if (INTEL_INFO(dev)->gen >= 9) {
+ if (INTEL_GEN(dev_priv) >= 9) {
intel_primary_formats = skl_primary_formats;
num_formats = ARRAY_SIZE(skl_primary_formats);
primary->update_plane = skylake_update_primary_plane;
primary->disable_plane = skylake_disable_primary_plane;
- } else if (HAS_PCH_SPLIT(dev)) {
+ } else if (HAS_PCH_SPLIT(dev_priv)) {
intel_primary_formats = i965_primary_formats;
num_formats = ARRAY_SIZE(i965_primary_formats);
primary->update_plane = ironlake_update_primary_plane;
primary->disable_plane = i9xx_disable_primary_plane;
- } else if (INTEL_INFO(dev)->gen >= 4) {
+ } else if (INTEL_GEN(dev_priv) >= 4) {
intel_primary_formats = i965_primary_formats;
num_formats = ARRAY_SIZE(i965_primary_formats);
@@ -14952,57 +15018,56 @@ static struct drm_plane *intel_primary_plane_create(struct drm_device *dev,
primary->disable_plane = i9xx_disable_primary_plane;
}
- if (INTEL_INFO(dev)->gen >= 9)
- ret = drm_universal_plane_init(dev, &primary->base, 0,
- &intel_plane_funcs,
+ if (INTEL_GEN(dev_priv) >= 9)
+ ret = drm_universal_plane_init(&dev_priv->drm, &primary->base,
+ 0, &intel_plane_funcs,
intel_primary_formats, num_formats,
DRM_PLANE_TYPE_PRIMARY,
"plane 1%c", pipe_name(pipe));
- else if (INTEL_INFO(dev)->gen >= 5 || IS_G4X(dev))
- ret = drm_universal_plane_init(dev, &primary->base, 0,
- &intel_plane_funcs,
+ else if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv))
+ ret = drm_universal_plane_init(&dev_priv->drm, &primary->base,
+ 0, &intel_plane_funcs,
intel_primary_formats, num_formats,
DRM_PLANE_TYPE_PRIMARY,
"primary %c", pipe_name(pipe));
else
- ret = drm_universal_plane_init(dev, &primary->base, 0,
- &intel_plane_funcs,
+ ret = drm_universal_plane_init(&dev_priv->drm, &primary->base,
+ 0, &intel_plane_funcs,
intel_primary_formats, num_formats,
DRM_PLANE_TYPE_PRIMARY,
"plane %c", plane_name(primary->plane));
if (ret)
goto fail;
- if (INTEL_INFO(dev)->gen >= 4)
- intel_create_rotation_property(dev, primary);
+ if (INTEL_GEN(dev_priv) >= 9) {
+ supported_rotations =
+ DRM_ROTATE_0 | DRM_ROTATE_90 |
+ DRM_ROTATE_180 | DRM_ROTATE_270;
+ } else if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B) {
+ supported_rotations =
+ DRM_ROTATE_0 | DRM_ROTATE_180 |
+ DRM_REFLECT_X;
+ } else if (INTEL_GEN(dev_priv) >= 4) {
+ supported_rotations =
+ DRM_ROTATE_0 | DRM_ROTATE_180;
+ } else {
+ supported_rotations = DRM_ROTATE_0;
+ }
+
+ if (INTEL_GEN(dev_priv) >= 4)
+ drm_plane_create_rotation_property(&primary->base,
+ DRM_ROTATE_0,
+ supported_rotations);
drm_plane_helper_add(&primary->base, &intel_plane_helper_funcs);
- return &primary->base;
+ return primary;
fail:
kfree(state);
kfree(primary);
- return NULL;
-}
-
-void intel_create_rotation_property(struct drm_device *dev, struct intel_plane *plane)
-{
- if (!dev->mode_config.rotation_property) {
- unsigned long flags = DRM_ROTATE_0 |
- DRM_ROTATE_180;
-
- if (INTEL_INFO(dev)->gen >= 9)
- flags |= DRM_ROTATE_90 | DRM_ROTATE_270;
-
- dev->mode_config.rotation_property =
- drm_mode_create_rotation_property(dev, flags);
- }
- if (dev->mode_config.rotation_property)
- drm_object_attach_property(&plane->base.base,
- dev->mode_config.rotation_property,
- plane->base.state->rotation);
+ return ERR_PTR(ret);
}
static int
@@ -15029,7 +15094,8 @@ intel_check_cursor_plane(struct drm_plane *plane,
return 0;
/* Check for which cursor types we support */
- if (!cursor_size_ok(plane->dev, state->base.crtc_w, state->base.crtc_h)) {
+ if (!cursor_size_ok(to_i915(plane->dev), state->base.crtc_w,
+ state->base.crtc_h)) {
DRM_DEBUG("Cursor dimension %dx%d not supported\n",
state->base.crtc_w, state->base.crtc_h);
return -EINVAL;
@@ -15041,7 +15107,7 @@ intel_check_cursor_plane(struct drm_plane *plane,
return -ENOMEM;
}
- if (fb->modifier[0] != DRM_FORMAT_MOD_NONE) {
+ if (fb->modifier != DRM_FORMAT_MOD_NONE) {
DRM_DEBUG_KMS("cursor cannot be tiled\n");
return -EINVAL;
}
@@ -15056,7 +15122,7 @@ intel_check_cursor_plane(struct drm_plane *plane,
* display power well must be turned off and on again.
* Refuse the put the cursor into that compromised position.
*/
- if (IS_CHERRYVIEW(plane->dev) && pipe == PIPE_C &&
+ if (IS_CHERRYVIEW(to_i915(plane->dev)) && pipe == PIPE_C &&
state->base.visible && state->base.crtc_x < 0) {
DRM_DEBUG_KMS("CHV cursor C not allowed to straddle the left screen edge\n");
return -EINVAL;
@@ -15082,13 +15148,13 @@ intel_update_cursor_plane(struct drm_plane *plane,
{
struct drm_crtc *crtc = crtc_state->base.crtc;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- struct drm_device *dev = plane->dev;
+ struct drm_i915_private *dev_priv = to_i915(plane->dev);
struct drm_i915_gem_object *obj = intel_fb_obj(state->base.fb);
uint32_t addr;
if (!obj)
addr = 0;
- else if (!INTEL_INFO(dev)->cursor_needs_physical)
+ else if (!INTEL_INFO(dev_priv)->cursor_needs_physical)
addr = i915_gem_object_ggtt_offset(obj, NULL);
else
addr = obj->phys_handle->busaddr;
@@ -15097,20 +15163,25 @@ intel_update_cursor_plane(struct drm_plane *plane,
intel_crtc_update_cursor(crtc, state);
}
-static struct drm_plane *intel_cursor_plane_create(struct drm_device *dev,
- int pipe)
+static struct intel_plane *
+intel_cursor_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe)
{
struct intel_plane *cursor = NULL;
struct intel_plane_state *state = NULL;
int ret;
cursor = kzalloc(sizeof(*cursor), GFP_KERNEL);
- if (!cursor)
+ if (!cursor) {
+ ret = -ENOMEM;
goto fail;
+ }
state = intel_create_plane_state(&cursor->base);
- if (!state)
+ if (!state) {
+ ret = -ENOMEM;
goto fail;
+ }
+
cursor->base.state = &state->base;
cursor->can_scale = false;
@@ -15122,8 +15193,8 @@ static struct drm_plane *intel_cursor_plane_create(struct drm_device *dev,
cursor->update_plane = intel_update_cursor_plane;
cursor->disable_plane = intel_disable_cursor_plane;
- ret = drm_universal_plane_init(dev, &cursor->base, 0,
- &intel_plane_funcs,
+ ret = drm_universal_plane_init(&dev_priv->drm, &cursor->base,
+ 0, &intel_plane_funcs,
intel_cursor_formats,
ARRAY_SIZE(intel_cursor_formats),
DRM_PLANE_TYPE_CURSOR,
@@ -15131,102 +15202,106 @@ static struct drm_plane *intel_cursor_plane_create(struct drm_device *dev,
if (ret)
goto fail;
- if (INTEL_INFO(dev)->gen >= 4) {
- if (!dev->mode_config.rotation_property)
- dev->mode_config.rotation_property =
- drm_mode_create_rotation_property(dev,
- DRM_ROTATE_0 |
- DRM_ROTATE_180);
- if (dev->mode_config.rotation_property)
- drm_object_attach_property(&cursor->base.base,
- dev->mode_config.rotation_property,
- state->base.rotation);
- }
+ if (INTEL_GEN(dev_priv) >= 4)
+ drm_plane_create_rotation_property(&cursor->base,
+ DRM_ROTATE_0,
+ DRM_ROTATE_0 |
+ DRM_ROTATE_180);
- if (INTEL_INFO(dev)->gen >=9)
+ if (INTEL_GEN(dev_priv) >= 9)
state->scaler_id = -1;
drm_plane_helper_add(&cursor->base, &intel_plane_helper_funcs);
- return &cursor->base;
+ return cursor;
fail:
kfree(state);
kfree(cursor);
- return NULL;
+ return ERR_PTR(ret);
}
-static void skl_init_scalers(struct drm_device *dev, struct intel_crtc *intel_crtc,
- struct intel_crtc_state *crtc_state)
+static void skl_init_scalers(struct drm_i915_private *dev_priv,
+ struct intel_crtc *crtc,
+ struct intel_crtc_state *crtc_state)
{
+ struct intel_crtc_scaler_state *scaler_state =
+ &crtc_state->scaler_state;
int i;
- struct intel_scaler *intel_scaler;
- struct intel_crtc_scaler_state *scaler_state = &crtc_state->scaler_state;
- for (i = 0; i < intel_crtc->num_scalers; i++) {
- intel_scaler = &scaler_state->scalers[i];
- intel_scaler->in_use = 0;
- intel_scaler->mode = PS_SCALER_MODE_DYN;
+ for (i = 0; i < crtc->num_scalers; i++) {
+ struct intel_scaler *scaler = &scaler_state->scalers[i];
+
+ scaler->in_use = 0;
+ scaler->mode = PS_SCALER_MODE_DYN;
}
scaler_state->scaler_id = -1;
}
-static void intel_crtc_init(struct drm_device *dev, int pipe)
+static int intel_crtc_init(struct drm_i915_private *dev_priv, enum pipe pipe)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_crtc *intel_crtc;
struct intel_crtc_state *crtc_state = NULL;
- struct drm_plane *primary = NULL;
- struct drm_plane *cursor = NULL;
- int ret;
+ struct intel_plane *primary = NULL;
+ struct intel_plane *cursor = NULL;
+ int sprite, ret;
intel_crtc = kzalloc(sizeof(*intel_crtc), GFP_KERNEL);
- if (intel_crtc == NULL)
- return;
+ if (!intel_crtc)
+ return -ENOMEM;
crtc_state = kzalloc(sizeof(*crtc_state), GFP_KERNEL);
- if (!crtc_state)
+ if (!crtc_state) {
+ ret = -ENOMEM;
goto fail;
+ }
intel_crtc->config = crtc_state;
intel_crtc->base.state = &crtc_state->base;
crtc_state->base.crtc = &intel_crtc->base;
/* initialize shared scalers */
- if (INTEL_INFO(dev)->gen >= 9) {
+ if (INTEL_GEN(dev_priv) >= 9) {
if (pipe == PIPE_C)
intel_crtc->num_scalers = 1;
else
intel_crtc->num_scalers = SKL_NUM_SCALERS;
- skl_init_scalers(dev, intel_crtc, crtc_state);
+ skl_init_scalers(dev_priv, intel_crtc, crtc_state);
}
- primary = intel_primary_plane_create(dev, pipe);
- if (!primary)
+ primary = intel_primary_plane_create(dev_priv, pipe);
+ if (IS_ERR(primary)) {
+ ret = PTR_ERR(primary);
goto fail;
+ }
- cursor = intel_cursor_plane_create(dev, pipe);
- if (!cursor)
+ for_each_sprite(dev_priv, pipe, sprite) {
+ struct intel_plane *plane;
+
+ plane = intel_sprite_plane_create(dev_priv, pipe, sprite);
+ if (IS_ERR(plane)) {
+ ret = PTR_ERR(plane);
+ goto fail;
+ }
+ }
+
+ cursor = intel_cursor_plane_create(dev_priv, pipe);
+ if (IS_ERR(cursor)) {
+ ret = PTR_ERR(cursor);
goto fail;
+ }
- ret = drm_crtc_init_with_planes(dev, &intel_crtc->base, primary,
- cursor, &intel_crtc_funcs,
+ ret = drm_crtc_init_with_planes(&dev_priv->drm, &intel_crtc->base,
+ &primary->base, &cursor->base,
+ &intel_crtc_funcs,
"pipe %c", pipe_name(pipe));
if (ret)
goto fail;
- /*
- * On gen2/3 only plane A can do fbc, but the panel fitter and lvds port
- * is hooked to pipe B. Hence we want plane A feeding pipe B.
- */
intel_crtc->pipe = pipe;
- intel_crtc->plane = pipe;
- if (HAS_FBC(dev) && INTEL_INFO(dev)->gen < 4) {
- DRM_DEBUG_KMS("swapping pipes & planes for FBC\n");
- intel_crtc->plane = !pipe;
- }
+ intel_crtc->plane = primary->plane;
intel_crtc->cursor_base = ~0;
intel_crtc->cursor_cntl = ~0;
@@ -15236,21 +15311,26 @@ static void intel_crtc_init(struct drm_device *dev, int pipe)
BUG_ON(pipe >= ARRAY_SIZE(dev_priv->plane_to_crtc_mapping) ||
dev_priv->plane_to_crtc_mapping[intel_crtc->plane] != NULL);
- dev_priv->plane_to_crtc_mapping[intel_crtc->plane] = &intel_crtc->base;
- dev_priv->pipe_to_crtc_mapping[intel_crtc->pipe] = &intel_crtc->base;
+ dev_priv->plane_to_crtc_mapping[intel_crtc->plane] = intel_crtc;
+ dev_priv->pipe_to_crtc_mapping[intel_crtc->pipe] = intel_crtc;
drm_crtc_helper_add(&intel_crtc->base, &intel_helper_funcs);
intel_color_init(&intel_crtc->base);
WARN_ON(drm_crtc_index(&intel_crtc->base) != intel_crtc->pipe);
- return;
+
+ return 0;
fail:
- intel_plane_destroy(primary);
- intel_plane_destroy(cursor);
+ /*
+ * drm_mode_config_cleanup() will free up any
+ * crtcs/planes already initialized.
+ */
kfree(crtc_state);
kfree(intel_crtc);
+
+ return ret;
}
enum pipe intel_get_pipe_from_connector(struct intel_connector *connector)
@@ -15300,40 +15380,37 @@ static int intel_encoder_clones(struct intel_encoder *encoder)
return index_mask;
}
-static bool has_edp_a(struct drm_device *dev)
+static bool has_edp_a(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
-
- if (!IS_MOBILE(dev))
+ if (!IS_MOBILE(dev_priv))
return false;
if ((I915_READ(DP_A) & DP_DETECTED) == 0)
return false;
- if (IS_GEN5(dev) && (I915_READ(FUSE_STRAP) & ILK_eDP_A_DISABLE))
+ if (IS_GEN5(dev_priv) && (I915_READ(FUSE_STRAP) & ILK_eDP_A_DISABLE))
return false;
return true;
}
-static bool intel_crt_present(struct drm_device *dev)
+static bool intel_crt_present(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
-
- if (INTEL_INFO(dev)->gen >= 9)
+ if (INTEL_GEN(dev_priv) >= 9)
return false;
- if (IS_HSW_ULT(dev) || IS_BDW_ULT(dev))
+ if (IS_HSW_ULT(dev_priv) || IS_BDW_ULT(dev_priv))
return false;
- if (IS_CHERRYVIEW(dev))
+ if (IS_CHERRYVIEW(dev_priv))
return false;
- if (HAS_PCH_LPT_H(dev) && I915_READ(SFUSE_STRAP) & SFUSE_STRAP_CRT_DISABLED)
+ if (HAS_PCH_LPT_H(dev_priv) &&
+ I915_READ(SFUSE_STRAP) & SFUSE_STRAP_CRT_DISABLED)
return false;
/* DDI E can't be used if DDI A requires 4 lanes */
- if (HAS_DDI(dev) && I915_READ(DDI_BUF_CTL(PORT_A)) & DDI_A_4_LANES)
+ if (HAS_DDI(dev_priv) && I915_READ(DDI_BUF_CTL(PORT_A)) & DDI_A_4_LANES)
return false;
if (!dev_priv->vbt.int_crt_support)
@@ -15393,10 +15470,10 @@ static void intel_setup_outputs(struct drm_device *dev)
*/
intel_lvds_init(dev);
- if (intel_crt_present(dev))
+ if (intel_crt_present(dev_priv))
intel_crt_init(dev);
- if (IS_BROXTON(dev)) {
+ if (IS_BROXTON(dev_priv)) {
/*
* FIXME: Broxton doesn't support port detection via the
* DDI_BUF_CTL_A or SFUSE_STRAP registers, find another way to
@@ -15407,7 +15484,7 @@ static void intel_setup_outputs(struct drm_device *dev)
intel_ddi_init(dev, PORT_C);
intel_dsi_init(dev);
- } else if (HAS_DDI(dev)) {
+ } else if (HAS_DDI(dev_priv)) {
int found;
/*
@@ -15417,7 +15494,7 @@ static void intel_setup_outputs(struct drm_device *dev)
*/
found = I915_READ(DDI_BUF_CTL(PORT_A)) & DDI_INIT_DISPLAY_DETECTED;
/* WaIgnoreDDIAStrap: skl */
- if (found || IS_SKYLAKE(dev) || IS_KABYLAKE(dev))
+ if (found || IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))
intel_ddi_init(dev, PORT_A);
/* DDI B, C and D detection is indicated by the SFUSE_STRAP
@@ -15433,17 +15510,17 @@ static void intel_setup_outputs(struct drm_device *dev)
/*
* On SKL we don't have a way to detect DDI-E so we rely on VBT.
*/
- if ((IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) &&
+ if ((IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) &&
(dev_priv->vbt.ddi_port_info[PORT_E].supports_dp ||
dev_priv->vbt.ddi_port_info[PORT_E].supports_dvi ||
dev_priv->vbt.ddi_port_info[PORT_E].supports_hdmi))
intel_ddi_init(dev, PORT_E);
- } else if (HAS_PCH_SPLIT(dev)) {
+ } else if (HAS_PCH_SPLIT(dev_priv)) {
int found;
- dpd_is_edp = intel_dp_is_edp(dev, PORT_D);
+ dpd_is_edp = intel_dp_is_edp(dev_priv, PORT_D);
- if (has_edp_a(dev))
+ if (has_edp_a(dev_priv))
intel_dp_init(dev, DP_A, PORT_A);
if (I915_READ(PCH_HDMIB) & SDVO_DETECTED) {
@@ -15466,7 +15543,7 @@ static void intel_setup_outputs(struct drm_device *dev)
if (I915_READ(PCH_DP_D) & DP_DETECTED)
intel_dp_init(dev, PCH_DP_D, PORT_D);
- } else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
+ } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
bool has_edp, has_port;
/*
@@ -15484,21 +15561,21 @@ static void intel_setup_outputs(struct drm_device *dev)
* trust the port type the VBT declares as we've seen at least
* HDMI ports that the VBT claim are DP or eDP.
*/
- has_edp = intel_dp_is_edp(dev, PORT_B);
+ has_edp = intel_dp_is_edp(dev_priv, PORT_B);
has_port = intel_bios_is_port_present(dev_priv, PORT_B);
if (I915_READ(VLV_DP_B) & DP_DETECTED || has_port)
has_edp &= intel_dp_init(dev, VLV_DP_B, PORT_B);
if ((I915_READ(VLV_HDMIB) & SDVO_DETECTED || has_port) && !has_edp)
intel_hdmi_init(dev, VLV_HDMIB, PORT_B);
- has_edp = intel_dp_is_edp(dev, PORT_C);
+ has_edp = intel_dp_is_edp(dev_priv, PORT_C);
has_port = intel_bios_is_port_present(dev_priv, PORT_C);
if (I915_READ(VLV_DP_C) & DP_DETECTED || has_port)
has_edp &= intel_dp_init(dev, VLV_DP_C, PORT_C);
if ((I915_READ(VLV_HDMIC) & SDVO_DETECTED || has_port) && !has_edp)
intel_hdmi_init(dev, VLV_HDMIC, PORT_C);
- if (IS_CHERRYVIEW(dev)) {
+ if (IS_CHERRYVIEW(dev_priv)) {
/*
* eDP not supported on port D,
* so no need to worry about it
@@ -15511,18 +15588,18 @@ static void intel_setup_outputs(struct drm_device *dev)
}
intel_dsi_init(dev);
- } else if (!IS_GEN2(dev) && !IS_PINEVIEW(dev)) {
+ } else if (!IS_GEN2(dev_priv) && !IS_PINEVIEW(dev_priv)) {
bool found = false;
if (I915_READ(GEN3_SDVOB) & SDVO_DETECTED) {
DRM_DEBUG_KMS("probing SDVOB\n");
found = intel_sdvo_init(dev, GEN3_SDVOB, PORT_B);
- if (!found && IS_G4X(dev)) {
+ if (!found && IS_G4X(dev_priv)) {
DRM_DEBUG_KMS("probing HDMI on SDVOB\n");
intel_hdmi_init(dev, GEN4_HDMIB, PORT_B);
}
- if (!found && IS_G4X(dev))
+ if (!found && IS_G4X(dev_priv))
intel_dp_init(dev, DP_B, PORT_B);
}
@@ -15535,21 +15612,20 @@ static void intel_setup_outputs(struct drm_device *dev)
if (!found && (I915_READ(GEN3_SDVOC) & SDVO_DETECTED)) {
- if (IS_G4X(dev)) {
+ if (IS_G4X(dev_priv)) {
DRM_DEBUG_KMS("probing HDMI on SDVOC\n");
intel_hdmi_init(dev, GEN4_HDMIC, PORT_C);
}
- if (IS_G4X(dev))
+ if (IS_G4X(dev_priv))
intel_dp_init(dev, DP_C, PORT_C);
}
- if (IS_G4X(dev) &&
- (I915_READ(DP_D) & DP_DETECTED))
+ if (IS_G4X(dev_priv) && (I915_READ(DP_D) & DP_DETECTED))
intel_dp_init(dev, DP_D, PORT_D);
- } else if (IS_GEN2(dev))
+ } else if (IS_GEN2(dev_priv))
intel_dvo_init(dev);
- if (SUPPORTS_TV(dev))
+ if (SUPPORTS_TV(dev_priv))
intel_tv_init(dev);
intel_psr_init(dev);
@@ -15604,6 +15680,8 @@ static int intel_user_framebuffer_dirty(struct drm_framebuffer *fb,
struct drm_i915_gem_object *obj = intel_fb->obj;
mutex_lock(&dev->struct_mutex);
+ if (obj->pin_display && obj->cache_dirty)
+ i915_gem_clflush_object(obj, true);
intel_fb_obj_flush(obj, false, ORIGIN_DIRTYFB);
mutex_unlock(&dev->struct_mutex);
@@ -15617,10 +15695,10 @@ static const struct drm_framebuffer_funcs intel_fb_funcs = {
};
static
-u32 intel_fb_pitch_limit(struct drm_device *dev, uint64_t fb_modifier,
- uint32_t pixel_format)
+u32 intel_fb_pitch_limit(struct drm_i915_private *dev_priv,
+ uint64_t fb_modifier, uint32_t pixel_format)
{
- u32 gen = INTEL_INFO(dev)->gen;
+ u32 gen = INTEL_INFO(dev_priv)->gen;
if (gen >= 9) {
int cpp = drm_format_plane_cpp(pixel_format, 0);
@@ -15629,7 +15707,8 @@ u32 intel_fb_pitch_limit(struct drm_device *dev, uint64_t fb_modifier,
* pixels and 32K bytes."
*/
return min(8192 * cpp, 32768);
- } else if (gen >= 5 && !IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev)) {
+ } else if (gen >= 5 && !IS_VALLEYVIEW(dev_priv) &&
+ !IS_CHERRYVIEW(dev_priv)) {
return 32*1024;
} else if (gen >= 4) {
if (fb_modifier == I915_FORMAT_MOD_X_TILED)
@@ -15656,7 +15735,7 @@ static int intel_framebuffer_init(struct drm_device *dev,
unsigned int tiling = i915_gem_object_get_tiling(obj);
int ret;
u32 pitch_limit, stride_alignment;
- char *format_name;
+ struct drm_format_name_buf format_name;
WARN_ON(!mutex_is_locked(&dev->struct_mutex));
@@ -15683,7 +15762,7 @@ static int intel_framebuffer_init(struct drm_device *dev,
switch (mode_cmd->modifier[0]) {
case I915_FORMAT_MOD_Y_TILED:
case I915_FORMAT_MOD_Yf_TILED:
- if (INTEL_INFO(dev)->gen < 9) {
+ if (INTEL_GEN(dev_priv) < 9) {
DRM_DEBUG("Unsupported tiling 0x%llx!\n",
mode_cmd->modifier[0]);
return -EINVAL;
@@ -15716,7 +15795,7 @@ static int intel_framebuffer_init(struct drm_device *dev,
return -EINVAL;
}
- pitch_limit = intel_fb_pitch_limit(dev, mode_cmd->modifier[0],
+ pitch_limit = intel_fb_pitch_limit(dev_priv, mode_cmd->modifier[0],
mode_cmd->pixel_format);
if (mode_cmd->pitches[0] > pitch_limit) {
DRM_DEBUG("%s pitch (%u) must be at less than %d\n",
@@ -15746,37 +15825,33 @@ static int intel_framebuffer_init(struct drm_device *dev,
case DRM_FORMAT_ARGB8888:
break;
case DRM_FORMAT_XRGB1555:
- if (INTEL_INFO(dev)->gen > 3) {
- format_name = drm_get_format_name(mode_cmd->pixel_format);
- DRM_DEBUG("unsupported pixel format: %s\n", format_name);
- kfree(format_name);
+ if (INTEL_GEN(dev_priv) > 3) {
+ DRM_DEBUG("unsupported pixel format: %s\n",
+ drm_get_format_name(mode_cmd->pixel_format, &format_name));
return -EINVAL;
}
break;
case DRM_FORMAT_ABGR8888:
- if (!IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev) &&
- INTEL_INFO(dev)->gen < 9) {
- format_name = drm_get_format_name(mode_cmd->pixel_format);
- DRM_DEBUG("unsupported pixel format: %s\n", format_name);
- kfree(format_name);
+ if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv) &&
+ INTEL_GEN(dev_priv) < 9) {
+ DRM_DEBUG("unsupported pixel format: %s\n",
+ drm_get_format_name(mode_cmd->pixel_format, &format_name));
return -EINVAL;
}
break;
case DRM_FORMAT_XBGR8888:
case DRM_FORMAT_XRGB2101010:
case DRM_FORMAT_XBGR2101010:
- if (INTEL_INFO(dev)->gen < 4) {
- format_name = drm_get_format_name(mode_cmd->pixel_format);
- DRM_DEBUG("unsupported pixel format: %s\n", format_name);
- kfree(format_name);
+ if (INTEL_GEN(dev_priv) < 4) {
+ DRM_DEBUG("unsupported pixel format: %s\n",
+ drm_get_format_name(mode_cmd->pixel_format, &format_name));
return -EINVAL;
}
break;
case DRM_FORMAT_ABGR2101010:
- if (!IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev)) {
- format_name = drm_get_format_name(mode_cmd->pixel_format);
- DRM_DEBUG("unsupported pixel format: %s\n", format_name);
- kfree(format_name);
+ if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv)) {
+ DRM_DEBUG("unsupported pixel format: %s\n",
+ drm_get_format_name(mode_cmd->pixel_format, &format_name));
return -EINVAL;
}
break;
@@ -15784,17 +15859,15 @@ static int intel_framebuffer_init(struct drm_device *dev,
case DRM_FORMAT_UYVY:
case DRM_FORMAT_YVYU:
case DRM_FORMAT_VYUY:
- if (INTEL_INFO(dev)->gen < 5) {
- format_name = drm_get_format_name(mode_cmd->pixel_format);
- DRM_DEBUG("unsupported pixel format: %s\n", format_name);
- kfree(format_name);
+ if (INTEL_GEN(dev_priv) < 5) {
+ DRM_DEBUG("unsupported pixel format: %s\n",
+ drm_get_format_name(mode_cmd->pixel_format, &format_name));
return -EINVAL;
}
break;
default:
- format_name = drm_get_format_name(mode_cmd->pixel_format);
- DRM_DEBUG("unsupported pixel format: %s\n", format_name);
- kfree(format_name);
+ DRM_DEBUG("unsupported pixel format: %s\n",
+ drm_get_format_name(mode_cmd->pixel_format, &format_name));
return -EINVAL;
}
@@ -15835,17 +15908,11 @@ intel_user_framebuffer_create(struct drm_device *dev,
fb = intel_framebuffer_create(dev, &mode_cmd, obj);
if (IS_ERR(fb))
- i915_gem_object_put_unlocked(obj);
+ i915_gem_object_put(obj);
return fb;
}
-#ifndef CONFIG_DRM_FBDEV_EMULATION
-static inline void intel_fbdev_output_poll_changed(struct drm_device *dev)
-{
-}
-#endif
-
static const struct drm_mode_config_funcs intel_mode_funcs = {
.fb_create = intel_user_framebuffer_create,
.output_poll_changed = intel_fbdev_output_poll_changed,
@@ -16221,12 +16288,11 @@ static void intel_init_quirks(struct drm_device *dev)
}
/* Disable the VGA plane that we never use */
-static void i915_disable_vga(struct drm_device *dev)
+static void i915_disable_vga(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
struct pci_dev *pdev = dev_priv->drm.pdev;
u8 sr1;
- i915_reg_t vga_reg = i915_vgacntrl_reg(dev);
+ i915_reg_t vga_reg = i915_vgacntrl_reg(dev_priv);
/* WaEnableVGAAccessThroughIOPort:ctg,elk,ilk,snb,ivb,vlv,hsw */
vga_get_uninterruptible(pdev, VGA_RSRC_LEGACY_IO);
@@ -16244,11 +16310,11 @@ void intel_modeset_init_hw(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = to_i915(dev);
- intel_update_cdclk(dev);
+ intel_update_cdclk(dev_priv);
dev_priv->atomic_cdclk_freq = dev_priv->cdclk_freq;
- intel_init_clock_gating(dev);
+ intel_init_clock_gating(dev_priv);
}
/*
@@ -16265,6 +16331,7 @@ static void sanitize_watermarks(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_atomic_state *state;
+ struct intel_atomic_state *intel_state;
struct drm_crtc *crtc;
struct drm_crtc_state *cstate;
struct drm_modeset_acquire_ctx ctx;
@@ -16293,12 +16360,14 @@ retry:
if (WARN_ON(IS_ERR(state)))
goto fail;
+ intel_state = to_intel_atomic_state(state);
+
/*
* Hardware readout is the only time we don't want to calculate
* intermediate watermarks (since we don't trust the current
* watermarks).
*/
- to_intel_atomic_state(state)->skip_intermediate_wm = true;
+ intel_state->skip_intermediate_wm = true;
ret = intel_atomic_check(dev, state);
if (ret) {
@@ -16314,7 +16383,7 @@ retry:
* BIOS-programmed watermarks untouched and hope for the best.
*/
WARN(true, "Could not determine valid watermarks for inherited state\n");
- goto fail;
+ goto put_state;
}
/* Write calculated watermark values back */
@@ -16322,20 +16391,20 @@ retry:
struct intel_crtc_state *cs = to_intel_crtc_state(cstate);
cs->wm.need_postvbl_update = true;
- dev_priv->display.optimize_watermarks(cs);
+ dev_priv->display.optimize_watermarks(intel_state, cs);
}
- drm_atomic_state_free(state);
+put_state:
+ drm_atomic_state_put(state);
fail:
drm_modeset_drop_locks(&ctx);
drm_modeset_acquire_fini(&ctx);
}
-void intel_modeset_init(struct drm_device *dev)
+int intel_modeset_init(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = to_i915(dev);
struct i915_ggtt *ggtt = &dev_priv->ggtt;
- int sprite, ret;
enum pipe pipe;
struct intel_crtc *crtc;
@@ -16353,10 +16422,10 @@ void intel_modeset_init(struct drm_device *dev)
intel_init_quirks(dev);
- intel_init_pm(dev);
+ intel_init_pm(dev_priv);
- if (INTEL_INFO(dev)->num_pipes == 0)
- return;
+ if (INTEL_INFO(dev_priv)->num_pipes == 0)
+ return 0;
/*
* There may be no VBT; and if the BIOS enabled SSC we can
@@ -16364,7 +16433,7 @@ void intel_modeset_init(struct drm_device *dev)
* BIOS isn't using it, don't assume it will work even if the VBT
* indicates as much.
*/
- if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) {
+ if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)) {
bool bios_lvds_use_ssc = !!(I915_READ(PCH_DREF_CONTROL) &
DREF_SSC1_ENABLE);
@@ -16376,10 +16445,10 @@ void intel_modeset_init(struct drm_device *dev)
}
}
- if (IS_GEN2(dev)) {
+ if (IS_GEN2(dev_priv)) {
dev->mode_config.max_width = 2048;
dev->mode_config.max_height = 2048;
- } else if (IS_GEN3(dev)) {
+ } else if (IS_GEN3(dev_priv)) {
dev->mode_config.max_width = 4096;
dev->mode_config.max_height = 4096;
} else {
@@ -16387,10 +16456,10 @@ void intel_modeset_init(struct drm_device *dev)
dev->mode_config.max_height = 8192;
}
- if (IS_845G(dev) || IS_I865G(dev)) {
- dev->mode_config.cursor_width = IS_845G(dev) ? 64 : 512;
+ if (IS_845G(dev_priv) || IS_I865G(dev_priv)) {
+ dev->mode_config.cursor_width = IS_845G(dev_priv) ? 64 : 512;
dev->mode_config.cursor_height = 1023;
- } else if (IS_GEN2(dev)) {
+ } else if (IS_GEN2(dev_priv)) {
dev->mode_config.cursor_width = GEN2_CURSOR_WIDTH;
dev->mode_config.cursor_height = GEN2_CURSOR_HEIGHT;
} else {
@@ -16401,29 +16470,29 @@ void intel_modeset_init(struct drm_device *dev)
dev->mode_config.fb_base = ggtt->mappable_base;
DRM_DEBUG_KMS("%d display pipe%s available.\n",
- INTEL_INFO(dev)->num_pipes,
- INTEL_INFO(dev)->num_pipes > 1 ? "s" : "");
+ INTEL_INFO(dev_priv)->num_pipes,
+ INTEL_INFO(dev_priv)->num_pipes > 1 ? "s" : "");
for_each_pipe(dev_priv, pipe) {
- intel_crtc_init(dev, pipe);
- for_each_sprite(dev_priv, pipe, sprite) {
- ret = intel_plane_init(dev, pipe, sprite);
- if (ret)
- DRM_DEBUG_KMS("pipe %c sprite %c init failed: %d\n",
- pipe_name(pipe), sprite_name(pipe, sprite), ret);
+ int ret;
+
+ ret = intel_crtc_init(dev_priv, pipe);
+ if (ret) {
+ drm_mode_config_cleanup(dev);
+ return ret;
}
}
intel_update_czclk(dev_priv);
- intel_update_cdclk(dev);
+ intel_update_cdclk(dev_priv);
intel_shared_dpll_init(dev);
if (dev_priv->max_cdclk_freq == 0)
- intel_update_max_cdclk(dev);
+ intel_update_max_cdclk(dev_priv);
/* Just disable it once at startup */
- i915_disable_vga(dev);
+ i915_disable_vga(dev_priv);
intel_setup_outputs(dev);
drm_modeset_lock_all(dev);
@@ -16459,6 +16528,8 @@ void intel_modeset_init(struct drm_device *dev)
* since the watermark calculation done here will use pstate->fb.
*/
sanitize_watermarks(dev);
+
+ return 0;
}
static void intel_enable_pipe_a(struct drm_device *dev)
@@ -16488,11 +16559,10 @@ static void intel_enable_pipe_a(struct drm_device *dev)
static bool
intel_check_plane_mapping(struct intel_crtc *crtc)
{
- struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
u32 val;
- if (INTEL_INFO(dev)->num_pipes == 1)
+ if (INTEL_INFO(dev_priv)->num_pipes == 1)
return true;
val = I915_READ(DSPCNTR(!crtc->plane));
@@ -16566,7 +16636,7 @@ static void intel_sanitize_crtc(struct intel_crtc *crtc)
/* We need to sanitize the plane -> pipe mapping first because this will
* disable the crtc (and hence change the state) if it is wrong. Note
* that gen4+ has a fixed plane -> pipe mapping. */
- if (INTEL_INFO(dev)->gen < 4 && !intel_check_plane_mapping(crtc)) {
+ if (INTEL_GEN(dev_priv) < 4 && !intel_check_plane_mapping(crtc)) {
bool plane;
DRM_DEBUG_KMS("[CRTC:%d:%s] wrong plane connection detected!\n",
@@ -16596,7 +16666,7 @@ static void intel_sanitize_crtc(struct intel_crtc *crtc)
if (crtc->active && !intel_crtc_has_encoders(crtc))
intel_crtc_disable_noatomic(&crtc->base);
- if (crtc->active || HAS_GMCH_DISPLAY(dev)) {
+ if (crtc->active || HAS_GMCH_DISPLAY(dev_priv)) {
/*
* We start out with underrun reporting disabled to avoid races.
* For correct bookkeeping mark this on active crtcs.
@@ -16668,21 +16738,18 @@ static void intel_sanitize_encoder(struct intel_encoder *encoder)
* the crtc fixup. */
}
-void i915_redisable_vga_power_on(struct drm_device *dev)
+void i915_redisable_vga_power_on(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
- i915_reg_t vga_reg = i915_vgacntrl_reg(dev);
+ i915_reg_t vga_reg = i915_vgacntrl_reg(dev_priv);
if (!(I915_READ(vga_reg) & VGA_DISP_DISABLE)) {
DRM_DEBUG_KMS("Something enabled VGA plane, disabling it\n");
- i915_disable_vga(dev);
+ i915_disable_vga(dev_priv);
}
}
-void i915_redisable_vga(struct drm_device *dev)
+void i915_redisable_vga(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
-
/* This function can be called both from intel_modeset_setup_hw_state or
* at a very early point in our resume sequence, where the power well
* structures are not yet restored. Since this function is at a very
@@ -16693,7 +16760,7 @@ void i915_redisable_vga(struct drm_device *dev)
if (!intel_display_power_get_if_enabled(dev_priv, POWER_DOMAIN_VGA))
return;
- i915_redisable_vga_power_on(dev);
+ i915_redisable_vga_power_on(dev_priv);
intel_display_power_put(dev_priv, POWER_DOMAIN_VGA);
}
@@ -16765,7 +16832,7 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
DRM_DEBUG_KMS("[CRTC:%d:%s] hw state readout: %s\n",
crtc->base.base.id, crtc->base.name,
- crtc->active ? "enabled" : "disabled");
+ enableddisabled(crtc->active));
}
for (i = 0; i < dev_priv->num_shared_dpll; i++) {
@@ -16788,7 +16855,8 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
pipe = 0;
if (encoder->get_hw_state(encoder, &pipe)) {
- crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
+ crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
+
encoder->base.crtc = &crtc->base;
crtc->config->output_types |= 1 << encoder->type;
encoder->get_config(encoder, crtc->config);
@@ -16797,9 +16865,8 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
}
DRM_DEBUG_KMS("[ENCODER:%d:%s] hw state readout: %s, pipe %c\n",
- encoder->base.base.id,
- encoder->base.name,
- encoder->base.crtc ? "enabled" : "disabled",
+ encoder->base.base.id, encoder->base.name,
+ enableddisabled(encoder->base.crtc),
pipe_name(pipe));
}
@@ -16828,9 +16895,8 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
connector->base.encoder = NULL;
}
DRM_DEBUG_KMS("[CONNECTOR:%d:%s] hw state readout: %s\n",
- connector->base.base.id,
- connector->base.name,
- connector->base.encoder ? "enabled" : "disabled");
+ connector->base.base.id, connector->base.name,
+ enableddisabled(connector->base.encoder));
}
for_each_intel_crtc(dev, crtc) {
@@ -16889,7 +16955,8 @@ intel_modeset_setup_hw_state(struct drm_device *dev)
}
for_each_pipe(dev_priv, pipe) {
- crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
+ crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
+
intel_sanitize_crtc(crtc);
intel_dump_pipe_config(crtc, crtc->config,
"[setup_hw_state]");
@@ -16909,11 +16976,11 @@ intel_modeset_setup_hw_state(struct drm_device *dev)
pll->on = false;
}
- if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
+ if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
vlv_wm_get_hw_state(dev);
- else if (IS_GEN9(dev))
+ else if (IS_GEN9(dev_priv))
skl_wm_get_hw_state(dev);
- else if (HAS_PCH_SPLIT(dev))
+ else if (HAS_PCH_SPLIT(dev_priv))
ilk_wm_get_hw_state(dev);
for_each_intel_crtc(dev, crtc) {
@@ -16963,10 +17030,9 @@ void intel_display_resume(struct drm_device *dev)
drm_modeset_acquire_fini(&ctx);
mutex_unlock(&dev->mode_config.mutex);
- if (ret) {
+ if (ret)
DRM_ERROR("Restoring old state failed with %i\n", ret);
- drm_atomic_state_free(state);
- }
+ drm_atomic_state_put(state);
}
void intel_modeset_gem_init(struct drm_device *dev)
@@ -17078,10 +17144,9 @@ void intel_connector_attach_encoder(struct intel_connector *connector,
/*
* set vga decode state - true == enable VGA decode
*/
-int intel_modeset_vga_set_state(struct drm_device *dev, bool state)
+int intel_modeset_vga_set_state(struct drm_i915_private *dev_priv, bool state)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
- unsigned reg = INTEL_INFO(dev)->gen >= 6 ? SNB_GMCH_CTRL : INTEL_GMCH_CTRL;
+ unsigned reg = INTEL_GEN(dev_priv) >= 6 ? SNB_GMCH_CTRL : INTEL_GMCH_CTRL;
u16 gmch_ctrl;
if (pci_read_config_word(dev_priv->bridge_dev, reg, &gmch_ctrl)) {
@@ -17105,6 +17170,8 @@ int intel_modeset_vga_set_state(struct drm_device *dev, bool state)
return 0;
}
+#if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
+
struct intel_display_error_state {
u32 power_well_driver;
@@ -17233,17 +17300,16 @@ intel_display_capture_error_state(struct drm_i915_private *dev_priv)
void
intel_display_print_error_state(struct drm_i915_error_state_buf *m,
- struct drm_device *dev,
+ struct drm_i915_private *dev_priv,
struct intel_display_error_state *error)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
int i;
if (!error)
return;
- err_printf(m, "Num Pipes: %d\n", INTEL_INFO(dev)->num_pipes);
- if (IS_HASWELL(dev) || IS_BROADWELL(dev))
+ err_printf(m, "Num Pipes: %d\n", INTEL_INFO(dev_priv)->num_pipes);
+ if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
err_printf(m, "PWR_WELL_CTL2: %08x\n",
error->power_well_driver);
for_each_pipe(dev_priv, i) {
@@ -17256,13 +17322,13 @@ intel_display_print_error_state(struct drm_i915_error_state_buf *m,
err_printf(m, "Plane [%d]:\n", i);
err_printf(m, " CNTR: %08x\n", error->plane[i].control);
err_printf(m, " STRIDE: %08x\n", error->plane[i].stride);
- if (INTEL_INFO(dev)->gen <= 3) {
+ if (INTEL_GEN(dev_priv) <= 3) {
err_printf(m, " SIZE: %08x\n", error->plane[i].size);
err_printf(m, " POS: %08x\n", error->plane[i].pos);
}
- if (INTEL_INFO(dev)->gen <= 7 && !IS_HASWELL(dev))
+ if (INTEL_GEN(dev_priv) <= 7 && !IS_HASWELL(dev_priv))
err_printf(m, " ADDR: %08x\n", error->plane[i].addr);
- if (INTEL_INFO(dev)->gen >= 4) {
+ if (INTEL_GEN(dev_priv) >= 4) {
err_printf(m, " SURF: %08x\n", error->plane[i].surface);
err_printf(m, " TILEOFF: %08x\n", error->plane[i].tile_offset);
}
@@ -17287,3 +17353,5 @@ intel_display_print_error_state(struct drm_i915_error_state_buf *m,
err_printf(m, " VSYNC: %08x\n", error->transcoder[i].vsync);
}
}
+
+#endif
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index 14a3cf0b7213..90283edcafba 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -213,6 +213,81 @@ intel_dp_downstream_max_dotclock(struct intel_dp *intel_dp)
return max_dotclk;
}
+static int
+intel_dp_sink_rates(struct intel_dp *intel_dp, const int **sink_rates)
+{
+ if (intel_dp->num_sink_rates) {
+ *sink_rates = intel_dp->sink_rates;
+ return intel_dp->num_sink_rates;
+ }
+
+ *sink_rates = default_rates;
+
+ return (intel_dp_max_link_bw(intel_dp) >> 3) + 1;
+}
+
+static int
+intel_dp_source_rates(struct intel_dp *intel_dp, const int **source_rates)
+{
+ struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+ struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
+ int size;
+
+ if (IS_BROXTON(dev_priv)) {
+ *source_rates = bxt_rates;
+ size = ARRAY_SIZE(bxt_rates);
+ } else if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
+ *source_rates = skl_rates;
+ size = ARRAY_SIZE(skl_rates);
+ } else {
+ *source_rates = default_rates;
+ size = ARRAY_SIZE(default_rates);
+ }
+
+ /* This depends on the fact that 5.4 is last value in the array */
+ if (!intel_dp_source_supports_hbr2(intel_dp))
+ size--;
+
+ return size;
+}
+
+static int intersect_rates(const int *source_rates, int source_len,
+ const int *sink_rates, int sink_len,
+ int *common_rates)
+{
+ int i = 0, j = 0, k = 0;
+
+ while (i < source_len && j < sink_len) {
+ if (source_rates[i] == sink_rates[j]) {
+ if (WARN_ON(k >= DP_MAX_SUPPORTED_RATES))
+ return k;
+ common_rates[k] = source_rates[i];
+ ++k;
+ ++i;
+ ++j;
+ } else if (source_rates[i] < sink_rates[j]) {
+ ++i;
+ } else {
+ ++j;
+ }
+ }
+ return k;
+}
+
+static int intel_dp_common_rates(struct intel_dp *intel_dp,
+ int *common_rates)
+{
+ const int *source_rates, *sink_rates;
+ int source_len, sink_len;
+
+ sink_len = intel_dp_sink_rates(intel_dp, &sink_rates);
+ source_len = intel_dp_source_rates(intel_dp, &source_rates);
+
+ return intersect_rates(source_rates, source_len,
+ sink_rates, sink_len,
+ common_rates);
+}
+
static enum drm_mode_status
intel_dp_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
@@ -320,8 +395,7 @@ static void
vlv_power_sequencer_kick(struct intel_dp *intel_dp)
{
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
- struct drm_device *dev = intel_dig_port->base.base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(intel_dig_port->base.base.dev);
enum pipe pipe = intel_dp->pps_pipe;
bool pll_enabled, release_cl_override = false;
enum dpio_phy phy = DPIO_PHY(pipe);
@@ -344,7 +418,7 @@ vlv_power_sequencer_kick(struct intel_dp *intel_dp)
DP |= DP_PORT_WIDTH(1);
DP |= DP_LINK_TRAIN_PAT_1;
- if (IS_CHERRYVIEW(dev))
+ if (IS_CHERRYVIEW(dev_priv))
DP |= DP_PIPE_SELECT_CHV(pipe);
else if (pipe == PIPE_B)
DP |= DP_PIPEB_SELECT;
@@ -356,10 +430,10 @@ vlv_power_sequencer_kick(struct intel_dp *intel_dp)
* So enable temporarily it if it's not already enabled.
*/
if (!pll_enabled) {
- release_cl_override = IS_CHERRYVIEW(dev) &&
+ release_cl_override = IS_CHERRYVIEW(dev_priv) &&
!chv_phy_powergate_ch(dev_priv, phy, ch, true);
- if (vlv_force_pll_on(dev, pipe, IS_CHERRYVIEW(dev) ?
+ if (vlv_force_pll_on(dev_priv, pipe, IS_CHERRYVIEW(dev_priv) ?
&chv_dpll[0].dpll : &vlv_dpll[0].dpll)) {
DRM_ERROR("Failed to force on pll for pipe %c!\n",
pipe_name(pipe));
@@ -383,7 +457,7 @@ vlv_power_sequencer_kick(struct intel_dp *intel_dp)
POSTING_READ(intel_dp->output_reg);
if (!pll_enabled) {
- vlv_force_pll_off(dev, pipe);
+ vlv_force_pll_off(dev_priv, pipe);
if (release_cl_override)
chv_phy_powergate_ch(dev_priv, phy, ch, false);
@@ -570,8 +644,8 @@ void intel_power_sequencer_reset(struct drm_i915_private *dev_priv)
struct drm_device *dev = &dev_priv->drm;
struct intel_encoder *encoder;
- if (WARN_ON(!IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev) &&
- !IS_BROXTON(dev)))
+ if (WARN_ON(!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv) &&
+ !IS_BROXTON(dev_priv)))
return;
/*
@@ -591,7 +665,7 @@ void intel_power_sequencer_reset(struct drm_i915_private *dev_priv)
continue;
intel_dp = enc_to_intel_dp(&encoder->base);
- if (IS_BROXTON(dev))
+ if (IS_BROXTON(dev_priv))
intel_dp->pps_reset = true;
else
intel_dp->pps_pipe = INVALID_PIPE;
@@ -664,7 +738,7 @@ static int edp_notify_handler(struct notifier_block *this, unsigned long code,
pps_lock(intel_dp);
- if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
+ if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
i915_reg_t pp_ctrl_reg, pp_div_reg;
u32 pp_div;
@@ -692,7 +766,7 @@ static bool edp_have_panel_power(struct intel_dp *intel_dp)
lockdep_assert_held(&dev_priv->pps_mutex);
- if ((IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) &&
+ if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
intel_dp->pps_pipe == INVALID_PIPE)
return false;
@@ -706,7 +780,7 @@ static bool edp_have_panel_vdd(struct intel_dp *intel_dp)
lockdep_assert_held(&dev_priv->pps_mutex);
- if ((IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) &&
+ if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
intel_dp->pps_pipe == INVALID_PIPE)
return false;
@@ -821,15 +895,16 @@ static uint32_t g4x_get_aux_send_ctl(struct intel_dp *intel_dp,
uint32_t aux_clock_divider)
{
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
- struct drm_device *dev = intel_dig_port->base.base.dev;
+ struct drm_i915_private *dev_priv =
+ to_i915(intel_dig_port->base.base.dev);
uint32_t precharge, timeout;
- if (IS_GEN6(dev))
+ if (IS_GEN6(dev_priv))
precharge = 3;
else
precharge = 5;
- if (IS_BROADWELL(dev) && intel_dig_port->port == PORT_A)
+ if (IS_BROADWELL(dev_priv) && intel_dig_port->port == PORT_A)
timeout = DP_AUX_CH_CTL_TIME_OUT_600us;
else
timeout = DP_AUX_CH_CTL_TIME_OUT_400us;
@@ -867,14 +942,14 @@ intel_dp_aux_ch(struct intel_dp *intel_dp,
uint8_t *recv, int recv_size)
{
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
- struct drm_device *dev = intel_dig_port->base.base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv =
+ to_i915(intel_dig_port->base.base.dev);
i915_reg_t ch_ctl = intel_dp->aux_ch_ctl_reg;
uint32_t aux_clock_divider;
int i, ret, recv_bytes;
uint32_t status;
int try, clock = 0;
- bool has_aux_irq = HAS_AUX_IRQ(dev);
+ bool has_aux_irq = HAS_AUX_IRQ(dev_priv);
bool vdd;
pps_lock(intel_dp);
@@ -1108,8 +1183,46 @@ intel_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
return ret;
}
+static enum port intel_aux_port(struct drm_i915_private *dev_priv,
+ enum port port)
+{
+ const struct ddi_vbt_port_info *info =
+ &dev_priv->vbt.ddi_port_info[port];
+ enum port aux_port;
+
+ if (!info->alternate_aux_channel) {
+ DRM_DEBUG_KMS("using AUX %c for port %c (platform default)\n",
+ port_name(port), port_name(port));
+ return port;
+ }
+
+ switch (info->alternate_aux_channel) {
+ case DP_AUX_A:
+ aux_port = PORT_A;
+ break;
+ case DP_AUX_B:
+ aux_port = PORT_B;
+ break;
+ case DP_AUX_C:
+ aux_port = PORT_C;
+ break;
+ case DP_AUX_D:
+ aux_port = PORT_D;
+ break;
+ default:
+ MISSING_CASE(info->alternate_aux_channel);
+ aux_port = PORT_A;
+ break;
+ }
+
+ DRM_DEBUG_KMS("using AUX %c for port %c (VBT)\n",
+ port_name(aux_port), port_name(port));
+
+ return aux_port;
+}
+
static i915_reg_t g4x_aux_ctl_reg(struct drm_i915_private *dev_priv,
- enum port port)
+ enum port port)
{
switch (port) {
case PORT_B:
@@ -1123,7 +1236,7 @@ static i915_reg_t g4x_aux_ctl_reg(struct drm_i915_private *dev_priv,
}
static i915_reg_t g4x_aux_data_reg(struct drm_i915_private *dev_priv,
- enum port port, int index)
+ enum port port, int index)
{
switch (port) {
case PORT_B:
@@ -1137,7 +1250,7 @@ static i915_reg_t g4x_aux_data_reg(struct drm_i915_private *dev_priv,
}
static i915_reg_t ilk_aux_ctl_reg(struct drm_i915_private *dev_priv,
- enum port port)
+ enum port port)
{
switch (port) {
case PORT_A:
@@ -1153,7 +1266,7 @@ static i915_reg_t ilk_aux_ctl_reg(struct drm_i915_private *dev_priv,
}
static i915_reg_t ilk_aux_data_reg(struct drm_i915_private *dev_priv,
- enum port port, int index)
+ enum port port, int index)
{
switch (port) {
case PORT_A:
@@ -1168,36 +1281,9 @@ static i915_reg_t ilk_aux_data_reg(struct drm_i915_private *dev_priv,
}
}
-/*
- * On SKL we don't have Aux for port E so we rely
- * on VBT to set a proper alternate aux channel.
- */
-static enum port skl_porte_aux_port(struct drm_i915_private *dev_priv)
-{
- const struct ddi_vbt_port_info *info =
- &dev_priv->vbt.ddi_port_info[PORT_E];
-
- switch (info->alternate_aux_channel) {
- case DP_AUX_A:
- return PORT_A;
- case DP_AUX_B:
- return PORT_B;
- case DP_AUX_C:
- return PORT_C;
- case DP_AUX_D:
- return PORT_D;
- default:
- MISSING_CASE(info->alternate_aux_channel);
- return PORT_A;
- }
-}
-
static i915_reg_t skl_aux_ctl_reg(struct drm_i915_private *dev_priv,
- enum port port)
+ enum port port)
{
- if (port == PORT_E)
- port = skl_porte_aux_port(dev_priv);
-
switch (port) {
case PORT_A:
case PORT_B:
@@ -1211,11 +1297,8 @@ static i915_reg_t skl_aux_ctl_reg(struct drm_i915_private *dev_priv,
}
static i915_reg_t skl_aux_data_reg(struct drm_i915_private *dev_priv,
- enum port port, int index)
+ enum port port, int index)
{
- if (port == PORT_E)
- port = skl_porte_aux_port(dev_priv);
-
switch (port) {
case PORT_A:
case PORT_B:
@@ -1229,7 +1312,7 @@ static i915_reg_t skl_aux_data_reg(struct drm_i915_private *dev_priv,
}
static i915_reg_t intel_aux_ctl_reg(struct drm_i915_private *dev_priv,
- enum port port)
+ enum port port)
{
if (INTEL_INFO(dev_priv)->gen >= 9)
return skl_aux_ctl_reg(dev_priv, port);
@@ -1240,7 +1323,7 @@ static i915_reg_t intel_aux_ctl_reg(struct drm_i915_private *dev_priv,
}
static i915_reg_t intel_aux_data_reg(struct drm_i915_private *dev_priv,
- enum port port, int index)
+ enum port port, int index)
{
if (INTEL_INFO(dev_priv)->gen >= 9)
return skl_aux_data_reg(dev_priv, port, index);
@@ -1253,7 +1336,8 @@ static i915_reg_t intel_aux_data_reg(struct drm_i915_private *dev_priv,
static void intel_aux_reg_init(struct intel_dp *intel_dp)
{
struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
- enum port port = dp_to_dig_port(intel_dp)->port;
+ enum port port = intel_aux_port(dev_priv,
+ dp_to_dig_port(intel_dp)->port);
int i;
intel_dp->aux_ch_ctl_reg = intel_aux_ctl_reg(dev_priv, port);
@@ -1281,78 +1365,37 @@ intel_dp_aux_init(struct intel_dp *intel_dp)
intel_dp->aux.transfer = intel_dp_aux_transfer;
}
-static int
-intel_dp_sink_rates(struct intel_dp *intel_dp, const int **sink_rates)
-{
- if (intel_dp->num_sink_rates) {
- *sink_rates = intel_dp->sink_rates;
- return intel_dp->num_sink_rates;
- }
-
- *sink_rates = default_rates;
-
- return (intel_dp_max_link_bw(intel_dp) >> 3) + 1;
-}
-
bool intel_dp_source_supports_hbr2(struct intel_dp *intel_dp)
{
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
- struct drm_device *dev = dig_port->base.base.dev;
-
- /* WaDisableHBR2:skl */
- if (IS_SKL_REVID(dev, 0, SKL_REVID_B0))
- return false;
+ struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
- if ((IS_HASWELL(dev) && !IS_HSW_ULX(dev)) || IS_BROADWELL(dev) ||
- (INTEL_INFO(dev)->gen >= 9))
+ if ((IS_HASWELL(dev_priv) && !IS_HSW_ULX(dev_priv)) ||
+ IS_BROADWELL(dev_priv) || (INTEL_GEN(dev_priv) >= 9))
return true;
else
return false;
}
-static int
-intel_dp_source_rates(struct intel_dp *intel_dp, const int **source_rates)
-{
- struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
- struct drm_device *dev = dig_port->base.base.dev;
- int size;
-
- if (IS_BROXTON(dev)) {
- *source_rates = bxt_rates;
- size = ARRAY_SIZE(bxt_rates);
- } else if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) {
- *source_rates = skl_rates;
- size = ARRAY_SIZE(skl_rates);
- } else {
- *source_rates = default_rates;
- size = ARRAY_SIZE(default_rates);
- }
-
- /* This depends on the fact that 5.4 is last value in the array */
- if (!intel_dp_source_supports_hbr2(intel_dp))
- size--;
-
- return size;
-}
-
static void
intel_dp_set_clock(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config)
{
struct drm_device *dev = encoder->base.dev;
+ struct drm_i915_private *dev_priv = to_i915(dev);
const struct dp_link_dpll *divisor = NULL;
int i, count = 0;
- if (IS_G4X(dev)) {
+ if (IS_G4X(dev_priv)) {
divisor = gen4_dpll;
count = ARRAY_SIZE(gen4_dpll);
- } else if (HAS_PCH_SPLIT(dev)) {
+ } else if (HAS_PCH_SPLIT(dev_priv)) {
divisor = pch_dpll;
count = ARRAY_SIZE(pch_dpll);
- } else if (IS_CHERRYVIEW(dev)) {
+ } else if (IS_CHERRYVIEW(dev_priv)) {
divisor = chv_dpll;
count = ARRAY_SIZE(chv_dpll);
- } else if (IS_VALLEYVIEW(dev)) {
+ } else if (IS_VALLEYVIEW(dev_priv)) {
divisor = vlv_dpll;
count = ARRAY_SIZE(vlv_dpll);
}
@@ -1368,43 +1411,6 @@ intel_dp_set_clock(struct intel_encoder *encoder,
}
}
-static int intersect_rates(const int *source_rates, int source_len,
- const int *sink_rates, int sink_len,
- int *common_rates)
-{
- int i = 0, j = 0, k = 0;
-
- while (i < source_len && j < sink_len) {
- if (source_rates[i] == sink_rates[j]) {
- if (WARN_ON(k >= DP_MAX_SUPPORTED_RATES))
- return k;
- common_rates[k] = source_rates[i];
- ++k;
- ++i;
- ++j;
- } else if (source_rates[i] < sink_rates[j]) {
- ++i;
- } else {
- ++j;
- }
- }
- return k;
-}
-
-static int intel_dp_common_rates(struct intel_dp *intel_dp,
- int *common_rates)
-{
- const int *source_rates, *sink_rates;
- int source_len, sink_len;
-
- sink_len = intel_dp_sink_rates(intel_dp, &sink_rates);
- source_len = intel_dp_source_rates(intel_dp, &source_rates);
-
- return intersect_rates(source_rates, source_len,
- sink_rates, sink_len,
- common_rates);
-}
-
static void snprintf_int_array(char *str, size_t len,
const int *array, int nelem)
{
@@ -1444,42 +1450,35 @@ static void intel_dp_print_rates(struct intel_dp *intel_dp)
DRM_DEBUG_KMS("common rates: %s\n", str);
}
-static void intel_dp_print_hw_revision(struct intel_dp *intel_dp)
+bool
+__intel_dp_read_desc(struct intel_dp *intel_dp, struct intel_dp_desc *desc)
{
- uint8_t rev;
- int len;
-
- if ((drm_debug & DRM_UT_KMS) == 0)
- return;
-
- if (!(intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
- DP_DWN_STRM_PORT_PRESENT))
- return;
-
- len = drm_dp_dpcd_read(&intel_dp->aux, DP_BRANCH_HW_REV, &rev, 1);
- if (len < 0)
- return;
+ u32 base = drm_dp_is_branch(intel_dp->dpcd) ? DP_BRANCH_OUI :
+ DP_SINK_OUI;
- DRM_DEBUG_KMS("sink hw revision: %d.%d\n", (rev & 0xf0) >> 4, rev & 0xf);
+ return drm_dp_dpcd_read(&intel_dp->aux, base, desc, sizeof(*desc)) ==
+ sizeof(*desc);
}
-static void intel_dp_print_sw_revision(struct intel_dp *intel_dp)
+bool intel_dp_read_desc(struct intel_dp *intel_dp)
{
- uint8_t rev[2];
- int len;
+ struct intel_dp_desc *desc = &intel_dp->desc;
+ bool oui_sup = intel_dp->dpcd[DP_DOWN_STREAM_PORT_COUNT] &
+ DP_OUI_SUPPORT;
+ int dev_id_len;
- if ((drm_debug & DRM_UT_KMS) == 0)
- return;
-
- if (!(intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
- DP_DWN_STRM_PORT_PRESENT))
- return;
+ if (!__intel_dp_read_desc(intel_dp, desc))
+ return false;
- len = drm_dp_dpcd_read(&intel_dp->aux, DP_BRANCH_SW_REV, &rev, 2);
- if (len < 0)
- return;
+ dev_id_len = strnlen(desc->device_id, sizeof(desc->device_id));
+ DRM_DEBUG_KMS("DP %s: OUI %*phD%s dev-ID %*pE HW-rev %d.%d SW-rev %d.%d\n",
+ drm_dp_is_branch(intel_dp->dpcd) ? "branch" : "sink",
+ (int)sizeof(desc->oui), desc->oui, oui_sup ? "" : "(NS)",
+ dev_id_len, desc->device_id,
+ desc->hw_rev >> 4, desc->hw_rev & 0xf,
+ desc->sw_major_rev, desc->sw_minor_rev);
- DRM_DEBUG_KMS("sink sw revision: %d.%d\n", rev[0], rev[1]);
+ return true;
}
static int rate_to_index(int find, const int *rates)
@@ -1543,8 +1542,7 @@ intel_dp_compute_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config,
struct drm_connector_state *conn_state)
{
- struct drm_device *dev = encoder->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
enum port port = dp_to_dig_port(intel_dp)->port;
@@ -1569,7 +1567,7 @@ intel_dp_compute_config(struct intel_encoder *encoder,
max_clock = common_len - 1;
- if (HAS_PCH_SPLIT(dev) && !HAS_DDI(dev) && port != PORT_A)
+ if (HAS_PCH_SPLIT(dev_priv) && !HAS_DDI(dev_priv) && port != PORT_A)
pipe_config->has_pch_encoder = true;
pipe_config->has_drrs = false;
@@ -1579,14 +1577,14 @@ intel_dp_compute_config(struct intel_encoder *encoder,
intel_fixed_panel_mode(intel_connector->panel.fixed_mode,
adjusted_mode);
- if (INTEL_INFO(dev)->gen >= 9) {
+ if (INTEL_GEN(dev_priv) >= 9) {
int ret;
ret = skl_update_scaler_crtc(pipe_config);
if (ret)
return ret;
}
- if (HAS_GMCH_DISPLAY(dev))
+ if (HAS_GMCH_DISPLAY(dev_priv))
intel_gmch_panel_fitting(intel_crtc, pipe_config,
intel_connector->panel.fitting_mode);
else
@@ -1711,7 +1709,7 @@ found:
to_intel_atomic_state(pipe_config->base.state)->cdclk_pll_vco = vco;
}
- if (!HAS_DDI(dev))
+ if (!HAS_DDI(dev_priv))
intel_dp_set_clock(encoder, pipe_config);
return true;
@@ -1769,7 +1767,7 @@ static void intel_dp_prepare(struct intel_encoder *encoder,
/* Split out the IBX/CPU vs CPT settings */
- if (IS_GEN7(dev) && port == PORT_A) {
+ if (IS_GEN7(dev_priv) && port == PORT_A) {
if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
intel_dp->DP |= DP_SYNC_HS_HIGH;
if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
@@ -1780,7 +1778,7 @@ static void intel_dp_prepare(struct intel_encoder *encoder,
intel_dp->DP |= DP_ENHANCED_FRAMING;
intel_dp->DP |= crtc->pipe << 29;
- } else if (HAS_PCH_CPT(dev) && port != PORT_A) {
+ } else if (HAS_PCH_CPT(dev_priv) && port != PORT_A) {
u32 trans_dp;
intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
@@ -1792,8 +1790,7 @@ static void intel_dp_prepare(struct intel_encoder *encoder,
trans_dp &= ~TRANS_DP_ENH_FRAMING;
I915_WRITE(TRANS_DP_CTL(crtc->pipe), trans_dp);
} else {
- if (!HAS_PCH_SPLIT(dev) && !IS_VALLEYVIEW(dev) &&
- !IS_CHERRYVIEW(dev) && pipe_config->limited_color_range)
+ if (IS_G4X(dev_priv) && pipe_config->limited_color_range)
intel_dp->DP |= DP_COLOR_RANGE_16_235;
if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
@@ -1805,7 +1802,7 @@ static void intel_dp_prepare(struct intel_encoder *encoder,
if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
intel_dp->DP |= DP_ENHANCED_FRAMING;
- if (IS_CHERRYVIEW(dev))
+ if (IS_CHERRYVIEW(dev_priv))
intel_dp->DP |= DP_PIPE_SELECT_CHV(crtc->pipe);
else if (crtc->pipe == PIPE_B)
intel_dp->DP |= DP_PIPEB_SELECT;
@@ -2114,7 +2111,7 @@ static void edp_panel_on(struct intel_dp *intel_dp)
pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
pp = ironlake_get_pp_control(intel_dp);
- if (IS_GEN5(dev)) {
+ if (IS_GEN5(dev_priv)) {
/* ILK workaround: disable reset around power sequence */
pp &= ~PANEL_POWER_RESET;
I915_WRITE(pp_ctrl_reg, pp);
@@ -2122,7 +2119,7 @@ static void edp_panel_on(struct intel_dp *intel_dp)
}
pp |= PANEL_POWER_ON;
- if (!IS_GEN5(dev))
+ if (!IS_GEN5(dev_priv))
pp |= PANEL_POWER_RESET;
I915_WRITE(pp_ctrl_reg, pp);
@@ -2131,7 +2128,7 @@ static void edp_panel_on(struct intel_dp *intel_dp)
wait_panel_on(intel_dp);
intel_dp->last_power_on = jiffies;
- if (IS_GEN5(dev)) {
+ if (IS_GEN5(dev_priv)) {
pp |= PANEL_POWER_RESET; /* restore panel reset bit */
I915_WRITE(pp_ctrl_reg, pp);
POSTING_READ(pp_ctrl_reg);
@@ -2363,7 +2360,7 @@ static void ironlake_edp_pll_on(struct intel_dp *intel_dp,
* 2. Program DP PLL enable
*/
if (IS_GEN5(dev_priv))
- intel_wait_for_vblank_if_active(&dev_priv->drm, !crtc->pipe);
+ intel_wait_for_vblank_if_active(dev_priv, !crtc->pipe);
intel_dp->DP |= DP_PLL_ENABLE;
@@ -2444,9 +2441,9 @@ static bool intel_dp_get_hw_state(struct intel_encoder *encoder,
if (!(tmp & DP_PORT_EN))
goto out;
- if (IS_GEN7(dev) && port == PORT_A) {
+ if (IS_GEN7(dev_priv) && port == PORT_A) {
*pipe = PORT_TO_PIPE_CPT(tmp);
- } else if (HAS_PCH_CPT(dev) && port != PORT_A) {
+ } else if (HAS_PCH_CPT(dev_priv) && port != PORT_A) {
enum pipe p;
for_each_pipe(dev_priv, p) {
@@ -2461,7 +2458,7 @@ static bool intel_dp_get_hw_state(struct intel_encoder *encoder,
DRM_DEBUG_KMS("No pipe for dp port 0x%x found\n",
i915_mmio_reg_offset(intel_dp->output_reg));
- } else if (IS_CHERRYVIEW(dev)) {
+ } else if (IS_CHERRYVIEW(dev_priv)) {
*pipe = DP_PORT_TO_PIPE_CHV(tmp);
} else {
*pipe = PORT_TO_PIPE(tmp);
@@ -2489,7 +2486,7 @@ static void intel_dp_get_config(struct intel_encoder *encoder,
pipe_config->has_audio = tmp & DP_AUDIO_OUTPUT_ENABLE && port != PORT_A;
- if (HAS_PCH_CPT(dev) && port != PORT_A) {
+ if (HAS_PCH_CPT(dev_priv) && port != PORT_A) {
u32 trans_dp = I915_READ(TRANS_DP_CTL(crtc->pipe));
if (trans_dp & TRANS_DP_HSYNC_ACTIVE_HIGH)
@@ -2515,8 +2512,7 @@ static void intel_dp_get_config(struct intel_encoder *encoder,
pipe_config->base.adjusted_mode.flags |= flags;
- if (!HAS_PCH_SPLIT(dev) && !IS_VALLEYVIEW(dev) &&
- !IS_CHERRYVIEW(dev) && tmp & DP_COLOR_RANGE_16_235)
+ if (IS_G4X(dev_priv) && tmp & DP_COLOR_RANGE_16_235)
pipe_config->limited_color_range = true;
pipe_config->lane_count =
@@ -2636,7 +2632,7 @@ _intel_dp_set_link_train(struct intel_dp *intel_dp,
DRM_DEBUG_KMS("Using DP training pattern TPS%d\n",
dp_train_pat & DP_TRAINING_PATTERN_MASK);
- if (HAS_DDI(dev)) {
+ if (HAS_DDI(dev_priv)) {
uint32_t temp = I915_READ(DP_TP_CTL(port));
if (dp_train_pat & DP_LINK_SCRAMBLING_DISABLE)
@@ -2662,8 +2658,8 @@ _intel_dp_set_link_train(struct intel_dp *intel_dp,
}
I915_WRITE(DP_TP_CTL(port), temp);
- } else if ((IS_GEN7(dev) && port == PORT_A) ||
- (HAS_PCH_CPT(dev) && port != PORT_A)) {
+ } else if ((IS_GEN7(dev_priv) && port == PORT_A) ||
+ (HAS_PCH_CPT(dev_priv) && port != PORT_A)) {
*DP &= ~DP_LINK_TRAIN_MASK_CPT;
switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
@@ -2683,7 +2679,7 @@ _intel_dp_set_link_train(struct intel_dp *intel_dp,
}
} else {
- if (IS_CHERRYVIEW(dev))
+ if (IS_CHERRYVIEW(dev_priv))
*DP &= ~DP_LINK_TRAIN_MASK_CHV;
else
*DP &= ~DP_LINK_TRAIN_MASK;
@@ -2699,7 +2695,7 @@ _intel_dp_set_link_train(struct intel_dp *intel_dp,
*DP |= DP_LINK_TRAIN_PAT_2;
break;
case DP_TRAINING_PATTERN_3:
- if (IS_CHERRYVIEW(dev)) {
+ if (IS_CHERRYVIEW(dev_priv)) {
*DP |= DP_LINK_TRAIN_PAT_3_CHV;
} else {
DRM_DEBUG_KMS("TPS3 not supported, using TPS2 instead\n");
@@ -2735,7 +2731,8 @@ static void intel_dp_enable_port(struct intel_dp *intel_dp,
}
static void intel_enable_dp(struct intel_encoder *encoder,
- struct intel_crtc_state *pipe_config)
+ struct intel_crtc_state *pipe_config,
+ struct drm_connector_state *conn_state)
{
struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
struct drm_device *dev = encoder->base.dev;
@@ -2749,7 +2746,7 @@ static void intel_enable_dp(struct intel_encoder *encoder,
pps_lock(intel_dp);
- if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
+ if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
vlv_init_panel_power_sequencer(intel_dp);
intel_dp_enable_port(intel_dp, pipe_config);
@@ -2760,10 +2757,10 @@ static void intel_enable_dp(struct intel_encoder *encoder,
pps_unlock(intel_dp);
- if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
+ if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
unsigned int lane_mask = 0x0;
- if (IS_CHERRYVIEW(dev))
+ if (IS_CHERRYVIEW(dev_priv))
lane_mask = intel_dp_unused_lane_mask(pipe_config->lane_count);
vlv_wait_port_ready(dev_priv, dp_to_dig_port(intel_dp),
@@ -2777,7 +2774,7 @@ static void intel_enable_dp(struct intel_encoder *encoder,
if (pipe_config->has_audio) {
DRM_DEBUG_DRIVER("Enabling DP audio on pipe %c\n",
pipe_name(pipe));
- intel_audio_codec_enable(encoder);
+ intel_audio_codec_enable(encoder, pipe_config, conn_state);
}
}
@@ -2787,7 +2784,7 @@ static void g4x_enable_dp(struct intel_encoder *encoder,
{
struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
- intel_enable_dp(encoder, pipe_config);
+ intel_enable_dp(encoder, pipe_config, conn_state);
intel_edp_backlight_on(intel_dp);
}
@@ -2924,7 +2921,7 @@ static void vlv_pre_enable_dp(struct intel_encoder *encoder,
{
vlv_phy_pre_encoder_enable(encoder);
- intel_enable_dp(encoder, pipe_config);
+ intel_enable_dp(encoder, pipe_config, conn_state);
}
static void vlv_dp_pre_pll_enable(struct intel_encoder *encoder,
@@ -2942,7 +2939,7 @@ static void chv_pre_enable_dp(struct intel_encoder *encoder,
{
chv_phy_pre_encoder_enable(encoder);
- intel_enable_dp(encoder, pipe_config);
+ intel_enable_dp(encoder, pipe_config, conn_state);
/* Second common lane will stay alive on its own now */
chv_phy_release_cl2_override(encoder);
@@ -2979,21 +2976,20 @@ intel_dp_get_link_status(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_
uint8_t
intel_dp_voltage_max(struct intel_dp *intel_dp)
{
- struct drm_device *dev = intel_dp_to_dev(intel_dp);
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
enum port port = dp_to_dig_port(intel_dp)->port;
- if (IS_BROXTON(dev))
+ if (IS_BROXTON(dev_priv))
return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
- else if (INTEL_INFO(dev)->gen >= 9) {
+ else if (INTEL_GEN(dev_priv) >= 9) {
if (dev_priv->vbt.edp.low_vswing && port == PORT_A)
return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
- } else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
+ } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
- else if (IS_GEN7(dev) && port == PORT_A)
+ else if (IS_GEN7(dev_priv) && port == PORT_A)
return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
- else if (HAS_PCH_CPT(dev) && port != PORT_A)
+ else if (HAS_PCH_CPT(dev_priv) && port != PORT_A)
return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
else
return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
@@ -3002,10 +2998,10 @@ intel_dp_voltage_max(struct intel_dp *intel_dp)
uint8_t
intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, uint8_t voltage_swing)
{
- struct drm_device *dev = intel_dp_to_dev(intel_dp);
+ struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
enum port port = dp_to_dig_port(intel_dp)->port;
- if (INTEL_INFO(dev)->gen >= 9) {
+ if (INTEL_GEN(dev_priv) >= 9) {
switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
return DP_TRAIN_PRE_EMPH_LEVEL_3;
@@ -3018,7 +3014,7 @@ intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, uint8_t voltage_swing)
default:
return DP_TRAIN_PRE_EMPH_LEVEL_0;
}
- } else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
+ } else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
return DP_TRAIN_PRE_EMPH_LEVEL_3;
@@ -3030,7 +3026,7 @@ intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, uint8_t voltage_swing)
default:
return DP_TRAIN_PRE_EMPH_LEVEL_0;
}
- } else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
+ } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
return DP_TRAIN_PRE_EMPH_LEVEL_3;
@@ -3042,7 +3038,7 @@ intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, uint8_t voltage_swing)
default:
return DP_TRAIN_PRE_EMPH_LEVEL_0;
}
- } else if (IS_GEN7(dev) && port == PORT_A) {
+ } else if (IS_GEN7(dev_priv) && port == PORT_A) {
switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
return DP_TRAIN_PRE_EMPH_LEVEL_2;
@@ -3343,21 +3339,21 @@ intel_dp_set_signal_levels(struct intel_dp *intel_dp)
uint32_t signal_levels, mask = 0;
uint8_t train_set = intel_dp->train_set[0];
- if (HAS_DDI(dev)) {
+ if (HAS_DDI(dev_priv)) {
signal_levels = ddi_signal_levels(intel_dp);
- if (IS_BROXTON(dev))
+ if (IS_BROXTON(dev_priv))
signal_levels = 0;
else
mask = DDI_BUF_EMP_MASK;
- } else if (IS_CHERRYVIEW(dev)) {
+ } else if (IS_CHERRYVIEW(dev_priv)) {
signal_levels = chv_signal_levels(intel_dp);
- } else if (IS_VALLEYVIEW(dev)) {
+ } else if (IS_VALLEYVIEW(dev_priv)) {
signal_levels = vlv_signal_levels(intel_dp);
- } else if (IS_GEN7(dev) && port == PORT_A) {
+ } else if (IS_GEN7(dev_priv) && port == PORT_A) {
signal_levels = gen7_edp_signal_levels(train_set);
mask = EDP_LINK_TRAIN_VOL_EMP_MASK_IVB;
- } else if (IS_GEN6(dev) && port == PORT_A) {
+ } else if (IS_GEN6(dev_priv) && port == PORT_A) {
signal_levels = gen6_edp_signal_levels(train_set);
mask = EDP_LINK_TRAIN_VOL_EMP_MASK_SNB;
} else {
@@ -3402,7 +3398,7 @@ void intel_dp_set_idle_link_train(struct intel_dp *intel_dp)
enum port port = intel_dig_port->port;
uint32_t val;
- if (!HAS_DDI(dev))
+ if (!HAS_DDI(dev_priv))
return;
val = I915_READ(DP_TP_CTL(port));
@@ -3437,7 +3433,7 @@ intel_dp_link_down(struct intel_dp *intel_dp)
struct drm_i915_private *dev_priv = to_i915(dev);
uint32_t DP = intel_dp->DP;
- if (WARN_ON(HAS_DDI(dev)))
+ if (WARN_ON(HAS_DDI(dev_priv)))
return;
if (WARN_ON((I915_READ(intel_dp->output_reg) & DP_PORT_EN) == 0))
@@ -3445,12 +3441,12 @@ intel_dp_link_down(struct intel_dp *intel_dp)
DRM_DEBUG_KMS("\n");
- if ((IS_GEN7(dev) && port == PORT_A) ||
- (HAS_PCH_CPT(dev) && port != PORT_A)) {
+ if ((IS_GEN7(dev_priv) && port == PORT_A) ||
+ (HAS_PCH_CPT(dev_priv) && port != PORT_A)) {
DP &= ~DP_LINK_TRAIN_MASK_CPT;
DP |= DP_LINK_TRAIN_PAT_IDLE_CPT;
} else {
- if (IS_CHERRYVIEW(dev))
+ if (IS_CHERRYVIEW(dev_priv))
DP &= ~DP_LINK_TRAIN_MASK_CHV;
else
DP &= ~DP_LINK_TRAIN_MASK;
@@ -3468,7 +3464,7 @@ intel_dp_link_down(struct intel_dp *intel_dp)
* to transcoder A after disabling it to allow the
* matching HDMI port to be enabled on transcoder A.
*/
- if (HAS_PCH_IBX(dev) && crtc->pipe == PIPE_B && port != PORT_A) {
+ if (HAS_PCH_IBX(dev_priv) && crtc->pipe == PIPE_B && port != PORT_A) {
/*
* We get CPU/PCH FIFO underruns on the other pipe when
* doing the workaround. Sweep them under the rug.
@@ -3486,7 +3482,7 @@ intel_dp_link_down(struct intel_dp *intel_dp)
I915_WRITE(intel_dp->output_reg, DP);
POSTING_READ(intel_dp->output_reg);
- intel_wait_for_vblank_if_active(&dev_priv->drm, PIPE_A);
+ intel_wait_for_vblank_if_active(dev_priv, PIPE_A);
intel_set_cpu_fifo_underrun_reporting(dev_priv, PIPE_A, true);
intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, true);
}
@@ -3496,7 +3492,7 @@ intel_dp_link_down(struct intel_dp *intel_dp)
intel_dp->DP = DP;
}
-static bool
+bool
intel_dp_read_dpcd(struct intel_dp *intel_dp)
{
if (drm_dp_dpcd_read(&intel_dp->aux, 0x000, intel_dp->dpcd,
@@ -3520,6 +3516,8 @@ intel_edp_init_dpcd(struct intel_dp *intel_dp)
if (!intel_dp_read_dpcd(intel_dp))
return false;
+ intel_dp_read_desc(intel_dp);
+
if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11)
dev_priv->no_aux_handshake = intel_dp->dpcd[DP_MAX_DOWNSPREAD] &
DP_NO_AUX_HANDSHAKE_LINK_TRAINING;
@@ -3551,8 +3549,8 @@ intel_edp_init_dpcd(struct intel_dp *intel_dp)
/* Read the eDP Display control capabilities registers */
if ((intel_dp->dpcd[DP_EDP_CONFIGURATION_CAP] & DP_DPCD_DISPLAY_CONTROL_CAPABLE) &&
drm_dp_dpcd_read(&intel_dp->aux, DP_EDP_DPCD_REV,
- intel_dp->edp_dpcd, sizeof(intel_dp->edp_dpcd) ==
- sizeof(intel_dp->edp_dpcd)))
+ intel_dp->edp_dpcd, sizeof(intel_dp->edp_dpcd)) ==
+ sizeof(intel_dp->edp_dpcd))
DRM_DEBUG_KMS("EDP DPCD : %*ph\n", (int) sizeof(intel_dp->edp_dpcd),
intel_dp->edp_dpcd);
@@ -3607,8 +3605,7 @@ intel_dp_get_dpcd(struct intel_dp *intel_dp)
if (!is_edp(intel_dp) && !intel_dp->sink_count)
return false;
- if (!(intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
- DP_DWN_STRM_PORT_PRESENT))
+ if (!drm_dp_is_branch(intel_dp->dpcd))
return true; /* native DP sink */
if (intel_dp->dpcd[DP_DPCD_REV] == 0x10)
@@ -3622,23 +3619,6 @@ intel_dp_get_dpcd(struct intel_dp *intel_dp)
return true;
}
-static void
-intel_dp_probe_oui(struct intel_dp *intel_dp)
-{
- u8 buf[3];
-
- if (!(intel_dp->dpcd[DP_DOWN_STREAM_PORT_COUNT] & DP_OUI_SUPPORT))
- return;
-
- if (drm_dp_dpcd_read(&intel_dp->aux, DP_SINK_OUI, buf, 3) == 3)
- DRM_DEBUG_KMS("Sink OUI: %02hx%02hx%02hx\n",
- buf[0], buf[1], buf[2]);
-
- if (drm_dp_dpcd_read(&intel_dp->aux, DP_BRANCH_OUI, buf, 3) == 3)
- DRM_DEBUG_KMS("Branch OUI: %02hx%02hx%02hx\n",
- buf[0], buf[1], buf[2]);
-}
-
static bool
intel_dp_can_mst(struct intel_dp *intel_dp)
{
@@ -3682,7 +3662,7 @@ intel_dp_configure_mst(struct intel_dp *intel_dp)
static int intel_dp_sink_crc_stop(struct intel_dp *intel_dp)
{
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
- struct drm_device *dev = dig_port->base.base.dev;
+ struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
struct intel_crtc *intel_crtc = to_intel_crtc(dig_port->base.base.crtc);
u8 buf;
int ret = 0;
@@ -3703,7 +3683,7 @@ static int intel_dp_sink_crc_stop(struct intel_dp *intel_dp)
}
do {
- intel_wait_for_vblank(dev, intel_crtc->pipe);
+ intel_wait_for_vblank(dev_priv, intel_crtc->pipe);
if (drm_dp_dpcd_readb(&intel_dp->aux,
DP_TEST_SINK_MISC, &buf) < 0) {
@@ -3726,7 +3706,7 @@ static int intel_dp_sink_crc_stop(struct intel_dp *intel_dp)
static int intel_dp_sink_crc_start(struct intel_dp *intel_dp)
{
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
- struct drm_device *dev = dig_port->base.base.dev;
+ struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
struct intel_crtc *intel_crtc = to_intel_crtc(dig_port->base.base.crtc);
u8 buf;
int ret;
@@ -3754,14 +3734,14 @@ static int intel_dp_sink_crc_start(struct intel_dp *intel_dp)
return -EIO;
}
- intel_wait_for_vblank(dev, intel_crtc->pipe);
+ intel_wait_for_vblank(dev_priv, intel_crtc->pipe);
return 0;
}
int intel_dp_sink_crc(struct intel_dp *intel_dp, u8 *crc)
{
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
- struct drm_device *dev = dig_port->base.base.dev;
+ struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
struct intel_crtc *intel_crtc = to_intel_crtc(dig_port->base.base.crtc);
u8 buf;
int count, ret;
@@ -3772,7 +3752,7 @@ int intel_dp_sink_crc(struct intel_dp *intel_dp, u8 *crc)
return ret;
do {
- intel_wait_for_vblank(dev, intel_crtc->pipe);
+ intel_wait_for_vblank(dev_priv, intel_crtc->pipe);
if (drm_dp_dpcd_readb(&intel_dp->aux,
DP_TEST_SINK_MISC, &buf) < 0) {
@@ -3989,6 +3969,31 @@ go_again:
}
static void
+intel_dp_retrain_link(struct intel_dp *intel_dp)
+{
+ struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
+
+ /* Suppress underruns caused by re-training */
+ intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, false);
+ if (crtc->config->has_pch_encoder)
+ intel_set_pch_fifo_underrun_reporting(dev_priv,
+ intel_crtc_pch_transcoder(crtc), false);
+
+ intel_dp_start_link_train(intel_dp);
+ intel_dp_stop_link_train(intel_dp);
+
+ /* Keep underrun reporting disabled until things are stable */
+ intel_wait_for_vblank(dev_priv, crtc->pipe);
+
+ intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, true);
+ if (crtc->config->has_pch_encoder)
+ intel_set_pch_fifo_underrun_reporting(dev_priv,
+ intel_crtc_pch_transcoder(crtc), true);
+}
+
+static void
intel_dp_check_link_status(struct intel_dp *intel_dp)
{
struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base;
@@ -4008,13 +4013,18 @@ intel_dp_check_link_status(struct intel_dp *intel_dp)
if (!to_intel_crtc(intel_encoder->base.crtc)->active)
return;
+ /* FIXME: we need to synchronize this sort of stuff with hardware
+ * readout */
+ if (WARN_ON_ONCE(!intel_dp->lane_count))
+ return;
+
/* if link training is requested we should perform it always */
if ((intel_dp->compliance_test_type == DP_TEST_LINK_TRAINING) ||
(!drm_dp_channel_eq_ok(link_status, intel_dp->lane_count))) {
DRM_DEBUG_KMS("%s: channel EQ not ok, retraining\n",
intel_encoder->base.name);
- intel_dp_start_link_train(intel_dp);
- intel_dp_stop_link_train(intel_dp);
+
+ intel_dp_retrain_link(intel_dp);
}
}
@@ -4096,7 +4106,7 @@ intel_dp_detect_dpcd(struct intel_dp *intel_dp)
return connector_status_connected;
/* if there's no downstream port, we're done */
- if (!(dpcd[DP_DOWNSTREAMPORT_PRESENT] & DP_DWN_STRM_PORT_PRESENT))
+ if (!drm_dp_is_branch(dpcd))
return connector_status_connected;
/* If we're HPD-aware, SINK_COUNT changes dynamically */
@@ -4387,10 +4397,7 @@ intel_dp_long_pulse(struct intel_connector *intel_connector)
intel_dp_print_rates(intel_dp);
- intel_dp_probe_oui(intel_dp);
-
- intel_dp_print_hw_revision(intel_dp);
- intel_dp_print_sw_revision(intel_dp);
+ intel_dp_read_desc(intel_dp);
intel_dp_configure_mst(intel_dp);
@@ -4454,21 +4461,11 @@ static enum drm_connector_status
intel_dp_detect(struct drm_connector *connector, bool force)
{
struct intel_dp *intel_dp = intel_attached_dp(connector);
- struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
- struct intel_encoder *intel_encoder = &intel_dig_port->base;
enum drm_connector_status status = connector->status;
DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
connector->base.id, connector->name);
- if (intel_dp->is_mst) {
- /* MST devices are disconnected from a monitor POV */
- intel_dp_unset_edid(intel_dp);
- if (intel_encoder->type != INTEL_OUTPUT_EDP)
- intel_encoder->type = INTEL_OUTPUT_DP;
- return connector_status_disconnected;
- }
-
/* If full detect is not performed yet, do a full detect */
if (!intel_dp->detect_done)
status = intel_dp_long_pulse(intel_dp->attached_connector);
@@ -4756,11 +4753,16 @@ static void intel_edp_panel_vdd_sanitize(struct intel_dp *intel_dp)
void intel_dp_encoder_reset(struct drm_encoder *encoder)
{
struct drm_i915_private *dev_priv = to_i915(encoder->dev);
- struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+ struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
+ struct intel_lspcon *lspcon = &intel_dig_port->lspcon;
+ struct intel_dp *intel_dp = &intel_dig_port->dp;
if (!HAS_DDI(dev_priv))
intel_dp->DP = I915_READ(intel_dp->output_reg);
+ if (IS_GEN9(dev_priv) && lspcon->active)
+ lspcon_resume(lspcon);
+
if (to_intel_encoder(encoder)->type != INTEL_OUTPUT_EDP)
return;
@@ -4867,15 +4869,13 @@ put_power:
}
/* check the VBT to see whether the eDP is on another port */
-bool intel_dp_is_edp(struct drm_device *dev, enum port port)
+bool intel_dp_is_edp(struct drm_i915_private *dev_priv, enum port port)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
-
/*
* eDP not supported on g4x. so bail out early just
* for a bit extra safety in case the VBT is bonkers.
*/
- if (INTEL_INFO(dev)->gen < 5)
+ if (INTEL_GEN(dev_priv) < 5)
return false;
if (port == PORT_A)
@@ -5074,7 +5074,7 @@ intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
(seq->t10 << PANEL_POWER_DOWN_DELAY_SHIFT);
/* Compute the divisor for the pp clock, simply match the Bspec
* formula. */
- if (IS_BROXTON(dev)) {
+ if (IS_BROXTON(dev_priv)) {
pp_div = I915_READ(regs.pp_ctrl);
pp_div &= ~BXT_POWER_CYCLE_DELAY_MASK;
pp_div |= (DIV_ROUND_UP((seq->t11_t12 + 1), 1000)
@@ -5087,9 +5087,9 @@ intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
/* Haswell doesn't have any port selection bits for the panel
* power sequencer any more. */
- if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
+ if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
port_sel = PANEL_PORT_SELECT_VLV(port);
- } else if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) {
+ } else if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)) {
if (port == PORT_A)
port_sel = PANEL_PORT_SELECT_DPA;
else
@@ -5100,7 +5100,7 @@ intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
I915_WRITE(regs.pp_on, pp_on);
I915_WRITE(regs.pp_off, pp_off);
- if (IS_BROXTON(dev))
+ if (IS_BROXTON(dev_priv))
I915_WRITE(regs.pp_ctrl, pp_div);
else
I915_WRITE(regs.pp_div, pp_div);
@@ -5108,7 +5108,7 @@ intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
DRM_DEBUG_KMS("panel power sequencer register settings: PP_ON %#x, PP_OFF %#x, PP_DIV %#x\n",
I915_READ(regs.pp_on),
I915_READ(regs.pp_off),
- IS_BROXTON(dev) ?
+ IS_BROXTON(dev_priv) ?
(I915_READ(regs.pp_ctrl) & BXT_POWER_CYCLE_DELAY_MASK) :
I915_READ(regs.pp_div));
}
@@ -5116,7 +5116,9 @@ intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
static void intel_dp_pps_init(struct drm_device *dev,
struct intel_dp *intel_dp)
{
- if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
+ struct drm_i915_private *dev_priv = to_i915(dev);
+
+ if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
vlv_initial_power_sequencer_setup(intel_dp);
} else {
intel_dp_init_panel_power_sequencer(dev, intel_dp);
@@ -5475,7 +5477,7 @@ intel_dp_drrs_init(struct intel_connector *intel_connector,
INIT_DELAYED_WORK(&dev_priv->drrs.work, intel_edp_drrs_downclock_work);
mutex_init(&dev_priv->drrs.mutex);
- if (INTEL_INFO(dev)->gen <= 6) {
+ if (INTEL_GEN(dev_priv) <= 6) {
DRM_DEBUG_KMS("DRRS supported for Gen7 and above\n");
return NULL;
}
@@ -5586,7 +5588,7 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
}
mutex_unlock(&dev->mode_config.mutex);
- if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
+ if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
intel_dp->edp_notifier.notifier_call = edp_notify_handler;
register_reboot_notifier(&intel_dp->edp_notifier);
@@ -5595,7 +5597,7 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
* If the current pipe isn't valid, try the PPS pipe, and if that
* fails just assume pipe A.
*/
- if (IS_CHERRYVIEW(dev))
+ if (IS_CHERRYVIEW(dev_priv))
pipe = DP_PORT_TO_PIPE_CHV(intel_dp->DP);
else
pipe = PORT_TO_PIPE(intel_dp->DP);
@@ -5649,28 +5651,28 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
intel_dp->pps_pipe = INVALID_PIPE;
/* intel_dp vfuncs */
- if (INTEL_INFO(dev)->gen >= 9)
+ if (INTEL_GEN(dev_priv) >= 9)
intel_dp->get_aux_clock_divider = skl_get_aux_clock_divider;
- else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
+ else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
intel_dp->get_aux_clock_divider = hsw_get_aux_clock_divider;
- else if (HAS_PCH_SPLIT(dev))
+ else if (HAS_PCH_SPLIT(dev_priv))
intel_dp->get_aux_clock_divider = ilk_get_aux_clock_divider;
else
intel_dp->get_aux_clock_divider = g4x_get_aux_clock_divider;
- if (INTEL_INFO(dev)->gen >= 9)
+ if (INTEL_GEN(dev_priv) >= 9)
intel_dp->get_aux_send_ctl = skl_get_aux_send_ctl;
else
intel_dp->get_aux_send_ctl = g4x_get_aux_send_ctl;
- if (HAS_DDI(dev))
+ if (HAS_DDI(dev_priv))
intel_dp->prepare_link_retrain = intel_ddi_prepare_link_retrain;
/* Preserve the current hw state. */
intel_dp->DP = I915_READ(intel_dp->output_reg);
intel_dp->attached_connector = intel_connector;
- if (intel_dp_is_edp(dev, port))
+ if (intel_dp_is_edp(dev_priv, port))
type = DRM_MODE_CONNECTOR_eDP;
else
type = DRM_MODE_CONNECTOR_DisplayPort;
@@ -5684,7 +5686,7 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
intel_encoder->type = INTEL_OUTPUT_EDP;
/* eDP only on port B and/or C on vlv/chv */
- if (WARN_ON((IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) &&
+ if (WARN_ON((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
is_edp(intel_dp) && port != PORT_B && port != PORT_C))
return false;
@@ -5705,7 +5707,7 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
intel_connector_attach_encoder(intel_connector, intel_encoder);
- if (HAS_DDI(dev))
+ if (HAS_DDI(dev_priv))
intel_connector->get_hw_state = intel_ddi_connector_get_hw_state;
else
intel_connector->get_hw_state = intel_connector_get_hw_state;
@@ -5717,7 +5719,7 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
break;
case PORT_B:
intel_encoder->hpd_pin = HPD_PORT_B;
- if (IS_BXT_REVID(dev, 0, BXT_REVID_A1))
+ if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1))
intel_encoder->hpd_pin = HPD_PORT_A;
break;
case PORT_C:
@@ -5734,7 +5736,7 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
}
/* init MST on ports that can support it */
- if (HAS_DP_MST(dev) && !is_edp(intel_dp) &&
+ if (HAS_DP_MST(dev_priv) && !is_edp(intel_dp) &&
(port == PORT_B || port == PORT_C || port == PORT_D))
intel_dp_mst_encoder_init(intel_dig_port,
intel_connector->base.base.id);
@@ -5751,7 +5753,7 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
* 0xd. Failure to do so will result in spurious interrupts being
* generated on the port when a cable is not attached.
*/
- if (IS_G4X(dev) && !IS_GM45(dev)) {
+ if (IS_G4X(dev_priv) && !IS_GM45(dev_priv)) {
u32 temp = I915_READ(PEG_BAND_GAP_DATA);
I915_WRITE(PEG_BAND_GAP_DATA, (temp & ~0xf) | 0xd);
}
@@ -5794,13 +5796,13 @@ bool intel_dp_init(struct drm_device *dev,
intel_encoder->get_hw_state = intel_dp_get_hw_state;
intel_encoder->get_config = intel_dp_get_config;
intel_encoder->suspend = intel_dp_encoder_suspend;
- if (IS_CHERRYVIEW(dev)) {
+ if (IS_CHERRYVIEW(dev_priv)) {
intel_encoder->pre_pll_enable = chv_dp_pre_pll_enable;
intel_encoder->pre_enable = chv_pre_enable_dp;
intel_encoder->enable = vlv_enable_dp;
intel_encoder->post_disable = chv_post_disable_dp;
intel_encoder->post_pll_disable = chv_dp_post_pll_disable;
- } else if (IS_VALLEYVIEW(dev)) {
+ } else if (IS_VALLEYVIEW(dev_priv)) {
intel_encoder->pre_pll_enable = vlv_dp_pre_pll_enable;
intel_encoder->pre_enable = vlv_pre_enable_dp;
intel_encoder->enable = vlv_enable_dp;
@@ -5808,7 +5810,7 @@ bool intel_dp_init(struct drm_device *dev,
} else {
intel_encoder->pre_enable = g4x_pre_enable_dp;
intel_encoder->enable = g4x_enable_dp;
- if (INTEL_INFO(dev)->gen >= 5)
+ if (INTEL_GEN(dev_priv) >= 5)
intel_encoder->post_disable = ilk_post_disable_dp;
}
@@ -5817,7 +5819,7 @@ bool intel_dp_init(struct drm_device *dev,
intel_dig_port->max_lanes = 4;
intel_encoder->type = INTEL_OUTPUT_DP;
- if (IS_CHERRYVIEW(dev)) {
+ if (IS_CHERRYVIEW(dev_priv)) {
if (port == PORT_D)
intel_encoder->crtc_mask = 1 << 2;
else
@@ -5826,6 +5828,7 @@ bool intel_dp_init(struct drm_device *dev,
intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
}
intel_encoder->cloneable = 0;
+ intel_encoder->port = port;
intel_dig_port->hpd_pulse = intel_dp_hpd_pulse;
dev_priv->hotplug.irq_port[port] = intel_dig_port;
diff --git a/drivers/gpu/drm/i915/intel_dp_link_training.c b/drivers/gpu/drm/i915/intel_dp_link_training.c
index c438b02184cb..0048b520baf7 100644
--- a/drivers/gpu/drm/i915/intel_dp_link_training.c
+++ b/drivers/gpu/drm/i915/intel_dp_link_training.c
@@ -225,9 +225,6 @@ static u32 intel_dp_training_pattern(struct intel_dp *intel_dp)
* Intel platforms that support HBR2 also support TPS3. TPS3 support is
* also mandatory for downstream devices that support HBR2. However, not
* all sinks follow the spec.
- *
- * Due to WaDisableHBR2 SKL < B0 is the only exception where TPS3 is
- * supported in source but still not enabled.
*/
source_tps3 = intel_dp_source_supports_hbr2(intel_dp);
sink_tps3 = drm_dp_tps3_supported(intel_dp->dpcd);
diff --git a/drivers/gpu/drm/i915/intel_dp_mst.c b/drivers/gpu/drm/i915/intel_dp_mst.c
index 54a9d7610d8f..b029d1026a28 100644
--- a/drivers/gpu/drm/i915/intel_dp_mst.c
+++ b/drivers/gpu/drm/i915/intel_dp_mst.c
@@ -43,7 +43,6 @@ static bool intel_dp_mst_compute_config(struct intel_encoder *encoder,
const struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
int mst_pbn;
- pipe_config->dp_encoder_is_mst = true;
pipe_config->has_pch_encoder = false;
bpp = 24;
/*
@@ -523,6 +522,7 @@ intel_dp_create_fake_mst_encoder(struct intel_digital_port *intel_dig_port, enum
DRM_MODE_ENCODER_DPMST, "DP-MST %c", pipe_name(pipe));
intel_encoder->type = INTEL_OUTPUT_DP_MST;
+ intel_encoder->port = intel_dig_port->port;
intel_encoder->crtc_mask = 0x7;
intel_encoder->cloneable = 0;
diff --git a/drivers/gpu/drm/i915/intel_dpio_phy.c b/drivers/gpu/drm/i915/intel_dpio_phy.c
index 047f48748944..7a8e82dabbf2 100644
--- a/drivers/gpu/drm/i915/intel_dpio_phy.c
+++ b/drivers/gpu/drm/i915/intel_dpio_phy.c
@@ -23,6 +23,565 @@
#include "intel_drv.h"
+/**
+ * DOC: DPIO
+ *
+ * VLV, CHV and BXT have slightly peculiar display PHYs for driving DP/HDMI
+ * ports. DPIO is the name given to such a display PHY. These PHYs
+ * don't follow the standard programming model using direct MMIO
+ * registers, and instead their registers must be accessed trough IOSF
+ * sideband. VLV has one such PHY for driving ports B and C, and CHV
+ * adds another PHY for driving port D. Each PHY responds to specific
+ * IOSF-SB port.
+ *
+ * Each display PHY is made up of one or two channels. Each channel
+ * houses a common lane part which contains the PLL and other common
+ * logic. CH0 common lane also contains the IOSF-SB logic for the
+ * Common Register Interface (CRI) ie. the DPIO registers. CRI clock
+ * must be running when any DPIO registers are accessed.
+ *
+ * In addition to having their own registers, the PHYs are also
+ * controlled through some dedicated signals from the display
+ * controller. These include PLL reference clock enable, PLL enable,
+ * and CRI clock selection, for example.
+ *
+ * Eeach channel also has two splines (also called data lanes), and
+ * each spline is made up of one Physical Access Coding Sub-Layer
+ * (PCS) block and two TX lanes. So each channel has two PCS blocks
+ * and four TX lanes. The TX lanes are used as DP lanes or TMDS
+ * data/clock pairs depending on the output type.
+ *
+ * Additionally the PHY also contains an AUX lane with AUX blocks
+ * for each channel. This is used for DP AUX communication, but
+ * this fact isn't really relevant for the driver since AUX is
+ * controlled from the display controller side. No DPIO registers
+ * need to be accessed during AUX communication,
+ *
+ * Generally on VLV/CHV the common lane corresponds to the pipe and
+ * the spline (PCS/TX) corresponds to the port.
+ *
+ * For dual channel PHY (VLV/CHV):
+ *
+ * pipe A == CMN/PLL/REF CH0
+ *
+ * pipe B == CMN/PLL/REF CH1
+ *
+ * port B == PCS/TX CH0
+ *
+ * port C == PCS/TX CH1
+ *
+ * This is especially important when we cross the streams
+ * ie. drive port B with pipe B, or port C with pipe A.
+ *
+ * For single channel PHY (CHV):
+ *
+ * pipe C == CMN/PLL/REF CH0
+ *
+ * port D == PCS/TX CH0
+ *
+ * On BXT the entire PHY channel corresponds to the port. That means
+ * the PLL is also now associated with the port rather than the pipe,
+ * and so the clock needs to be routed to the appropriate transcoder.
+ * Port A PLL is directly connected to transcoder EDP and port B/C
+ * PLLs can be routed to any transcoder A/B/C.
+ *
+ * Note: DDI0 is digital port B, DD1 is digital port C, and DDI2 is
+ * digital port D (CHV) or port A (BXT). ::
+ *
+ *
+ * Dual channel PHY (VLV/CHV/BXT)
+ * ---------------------------------
+ * | CH0 | CH1 |
+ * | CMN/PLL/REF | CMN/PLL/REF |
+ * |---------------|---------------| Display PHY
+ * | PCS01 | PCS23 | PCS01 | PCS23 |
+ * |-------|-------|-------|-------|
+ * |TX0|TX1|TX2|TX3|TX0|TX1|TX2|TX3|
+ * ---------------------------------
+ * | DDI0 | DDI1 | DP/HDMI ports
+ * ---------------------------------
+ *
+ * Single channel PHY (CHV/BXT)
+ * -----------------
+ * | CH0 |
+ * | CMN/PLL/REF |
+ * |---------------| Display PHY
+ * | PCS01 | PCS23 |
+ * |-------|-------|
+ * |TX0|TX1|TX2|TX3|
+ * -----------------
+ * | DDI2 | DP/HDMI port
+ * -----------------
+ */
+
+/**
+ * struct bxt_ddi_phy_info - Hold info for a broxton DDI phy
+ */
+struct bxt_ddi_phy_info {
+ /**
+ * @dual_channel: true if this phy has a second channel.
+ */
+ bool dual_channel;
+
+ /**
+ * @rcomp_phy: If -1, indicates this phy has its own rcomp resistor.
+ * Otherwise the GRC value will be copied from the phy indicated by
+ * this field.
+ */
+ enum dpio_phy rcomp_phy;
+
+ /**
+ * @channel: struct containing per channel information.
+ */
+ struct {
+ /**
+ * @port: which port maps to this channel.
+ */
+ enum port port;
+ } channel[2];
+};
+
+static const struct bxt_ddi_phy_info bxt_ddi_phy_info[] = {
+ [DPIO_PHY0] = {
+ .dual_channel = true,
+ .rcomp_phy = DPIO_PHY1,
+
+ .channel = {
+ [DPIO_CH0] = { .port = PORT_B },
+ [DPIO_CH1] = { .port = PORT_C },
+ }
+ },
+ [DPIO_PHY1] = {
+ .dual_channel = false,
+ .rcomp_phy = -1,
+
+ .channel = {
+ [DPIO_CH0] = { .port = PORT_A },
+ }
+ },
+};
+
+static u32 bxt_phy_port_mask(const struct bxt_ddi_phy_info *phy_info)
+{
+ return (phy_info->dual_channel * BIT(phy_info->channel[DPIO_CH1].port)) |
+ BIT(phy_info->channel[DPIO_CH0].port);
+}
+
+void bxt_port_to_phy_channel(enum port port,
+ enum dpio_phy *phy, enum dpio_channel *ch)
+{
+ const struct bxt_ddi_phy_info *phy_info;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(bxt_ddi_phy_info); i++) {
+ phy_info = &bxt_ddi_phy_info[i];
+
+ if (port == phy_info->channel[DPIO_CH0].port) {
+ *phy = i;
+ *ch = DPIO_CH0;
+ return;
+ }
+
+ if (phy_info->dual_channel &&
+ port == phy_info->channel[DPIO_CH1].port) {
+ *phy = i;
+ *ch = DPIO_CH1;
+ return;
+ }
+ }
+
+ WARN(1, "PHY not found for PORT %c", port_name(port));
+ *phy = DPIO_PHY0;
+ *ch = DPIO_CH0;
+}
+
+void bxt_ddi_phy_set_signal_level(struct drm_i915_private *dev_priv,
+ enum port port, u32 margin, u32 scale,
+ u32 enable, u32 deemphasis)
+{
+ u32 val;
+ enum dpio_phy phy;
+ enum dpio_channel ch;
+
+ bxt_port_to_phy_channel(port, &phy, &ch);
+
+ /*
+ * While we write to the group register to program all lanes at once we
+ * can read only lane registers and we pick lanes 0/1 for that.
+ */
+ val = I915_READ(BXT_PORT_PCS_DW10_LN01(phy, ch));
+ val &= ~(TX2_SWING_CALC_INIT | TX1_SWING_CALC_INIT);
+ I915_WRITE(BXT_PORT_PCS_DW10_GRP(phy, ch), val);
+
+ val = I915_READ(BXT_PORT_TX_DW2_LN0(phy, ch));
+ val &= ~(MARGIN_000 | UNIQ_TRANS_SCALE);
+ val |= margin << MARGIN_000_SHIFT | scale << UNIQ_TRANS_SCALE_SHIFT;
+ I915_WRITE(BXT_PORT_TX_DW2_GRP(phy, ch), val);
+
+ val = I915_READ(BXT_PORT_TX_DW3_LN0(phy, ch));
+ val &= ~SCALE_DCOMP_METHOD;
+ if (enable)
+ val |= SCALE_DCOMP_METHOD;
+
+ if ((val & UNIQUE_TRANGE_EN_METHOD) && !(val & SCALE_DCOMP_METHOD))
+ DRM_ERROR("Disabled scaling while ouniqetrangenmethod was set");
+
+ I915_WRITE(BXT_PORT_TX_DW3_GRP(phy, ch), val);
+
+ val = I915_READ(BXT_PORT_TX_DW4_LN0(phy, ch));
+ val &= ~DE_EMPHASIS;
+ val |= deemphasis << DEEMPH_SHIFT;
+ I915_WRITE(BXT_PORT_TX_DW4_GRP(phy, ch), val);
+
+ val = I915_READ(BXT_PORT_PCS_DW10_LN01(phy, ch));
+ val |= TX2_SWING_CALC_INIT | TX1_SWING_CALC_INIT;
+ I915_WRITE(BXT_PORT_PCS_DW10_GRP(phy, ch), val);
+}
+
+bool bxt_ddi_phy_is_enabled(struct drm_i915_private *dev_priv,
+ enum dpio_phy phy)
+{
+ const struct bxt_ddi_phy_info *phy_info = &bxt_ddi_phy_info[phy];
+ enum port port;
+
+ if (!(I915_READ(BXT_P_CR_GT_DISP_PWRON) & GT_DISPLAY_POWER_ON(phy)))
+ return false;
+
+ if ((I915_READ(BXT_PORT_CL1CM_DW0(phy)) &
+ (PHY_POWER_GOOD | PHY_RESERVED)) != PHY_POWER_GOOD) {
+ DRM_DEBUG_DRIVER("DDI PHY %d powered, but power hasn't settled\n",
+ phy);
+
+ return false;
+ }
+
+ if (phy_info->rcomp_phy == -1 &&
+ !(I915_READ(BXT_PORT_REF_DW3(phy)) & GRC_DONE)) {
+ DRM_DEBUG_DRIVER("DDI PHY %d powered, but GRC isn't done\n",
+ phy);
+
+ return false;
+ }
+
+ if (!(I915_READ(BXT_PHY_CTL_FAMILY(phy)) & COMMON_RESET_DIS)) {
+ DRM_DEBUG_DRIVER("DDI PHY %d powered, but still in reset\n",
+ phy);
+
+ return false;
+ }
+
+ for_each_port_masked(port, bxt_phy_port_mask(phy_info)) {
+ u32 tmp = I915_READ(BXT_PHY_CTL(port));
+
+ if (tmp & BXT_PHY_CMNLANE_POWERDOWN_ACK) {
+ DRM_DEBUG_DRIVER("DDI PHY %d powered, but common lane "
+ "for port %c powered down "
+ "(PHY_CTL %08x)\n",
+ phy, port_name(port), tmp);
+
+ return false;
+ }
+ }
+
+ return true;
+}
+
+static u32 bxt_get_grc(struct drm_i915_private *dev_priv, enum dpio_phy phy)
+{
+ u32 val = I915_READ(BXT_PORT_REF_DW6(phy));
+
+ return (val & GRC_CODE_MASK) >> GRC_CODE_SHIFT;
+}
+
+static void bxt_phy_wait_grc_done(struct drm_i915_private *dev_priv,
+ enum dpio_phy phy)
+{
+ if (intel_wait_for_register(dev_priv,
+ BXT_PORT_REF_DW3(phy),
+ GRC_DONE, GRC_DONE,
+ 10))
+ DRM_ERROR("timeout waiting for PHY%d GRC\n", phy);
+}
+
+static void _bxt_ddi_phy_init(struct drm_i915_private *dev_priv,
+ enum dpio_phy phy)
+{
+ const struct bxt_ddi_phy_info *phy_info = &bxt_ddi_phy_info[phy];
+ u32 val;
+
+ if (bxt_ddi_phy_is_enabled(dev_priv, phy)) {
+ /* Still read out the GRC value for state verification */
+ if (phy_info->rcomp_phy != -1)
+ dev_priv->bxt_phy_grc = bxt_get_grc(dev_priv, phy);
+
+ if (bxt_ddi_phy_verify_state(dev_priv, phy)) {
+ DRM_DEBUG_DRIVER("DDI PHY %d already enabled, "
+ "won't reprogram it\n", phy);
+
+ return;
+ }
+
+ DRM_DEBUG_DRIVER("DDI PHY %d enabled with invalid state, "
+ "force reprogramming it\n", phy);
+ }
+
+ val = I915_READ(BXT_P_CR_GT_DISP_PWRON);
+ val |= GT_DISPLAY_POWER_ON(phy);
+ I915_WRITE(BXT_P_CR_GT_DISP_PWRON, val);
+
+ /*
+ * The PHY registers start out inaccessible and respond to reads with
+ * all 1s. Eventually they become accessible as they power up, then
+ * the reserved bit will give the default 0. Poll on the reserved bit
+ * becoming 0 to find when the PHY is accessible.
+ * HW team confirmed that the time to reach phypowergood status is
+ * anywhere between 50 us and 100us.
+ */
+ if (wait_for_us(((I915_READ(BXT_PORT_CL1CM_DW0(phy)) &
+ (PHY_RESERVED | PHY_POWER_GOOD)) == PHY_POWER_GOOD), 100)) {
+ DRM_ERROR("timeout during PHY%d power on\n", phy);
+ }
+
+ /* Program PLL Rcomp code offset */
+ val = I915_READ(BXT_PORT_CL1CM_DW9(phy));
+ val &= ~IREF0RC_OFFSET_MASK;
+ val |= 0xE4 << IREF0RC_OFFSET_SHIFT;
+ I915_WRITE(BXT_PORT_CL1CM_DW9(phy), val);
+
+ val = I915_READ(BXT_PORT_CL1CM_DW10(phy));
+ val &= ~IREF1RC_OFFSET_MASK;
+ val |= 0xE4 << IREF1RC_OFFSET_SHIFT;
+ I915_WRITE(BXT_PORT_CL1CM_DW10(phy), val);
+
+ /* Program power gating */
+ val = I915_READ(BXT_PORT_CL1CM_DW28(phy));
+ val |= OCL1_POWER_DOWN_EN | DW28_OLDO_DYN_PWR_DOWN_EN |
+ SUS_CLK_CONFIG;
+ I915_WRITE(BXT_PORT_CL1CM_DW28(phy), val);
+
+ if (phy_info->dual_channel) {
+ val = I915_READ(BXT_PORT_CL2CM_DW6(phy));
+ val |= DW6_OLDO_DYN_PWR_DOWN_EN;
+ I915_WRITE(BXT_PORT_CL2CM_DW6(phy), val);
+ }
+
+ if (phy_info->rcomp_phy != -1) {
+ uint32_t grc_code;
+ /*
+ * PHY0 isn't connected to an RCOMP resistor so copy over
+ * the corresponding calibrated value from PHY1, and disable
+ * the automatic calibration on PHY0.
+ */
+ val = dev_priv->bxt_phy_grc = bxt_get_grc(dev_priv,
+ phy_info->rcomp_phy);
+ grc_code = val << GRC_CODE_FAST_SHIFT |
+ val << GRC_CODE_SLOW_SHIFT |
+ val;
+ I915_WRITE(BXT_PORT_REF_DW6(phy), grc_code);
+
+ val = I915_READ(BXT_PORT_REF_DW8(phy));
+ val |= GRC_DIS | GRC_RDY_OVRD;
+ I915_WRITE(BXT_PORT_REF_DW8(phy), val);
+ }
+
+ val = I915_READ(BXT_PHY_CTL_FAMILY(phy));
+ val |= COMMON_RESET_DIS;
+ I915_WRITE(BXT_PHY_CTL_FAMILY(phy), val);
+
+ if (phy_info->rcomp_phy == -1)
+ bxt_phy_wait_grc_done(dev_priv, phy);
+
+}
+
+void bxt_ddi_phy_uninit(struct drm_i915_private *dev_priv, enum dpio_phy phy)
+{
+ uint32_t val;
+
+ val = I915_READ(BXT_PHY_CTL_FAMILY(phy));
+ val &= ~COMMON_RESET_DIS;
+ I915_WRITE(BXT_PHY_CTL_FAMILY(phy), val);
+
+ val = I915_READ(BXT_P_CR_GT_DISP_PWRON);
+ val &= ~GT_DISPLAY_POWER_ON(phy);
+ I915_WRITE(BXT_P_CR_GT_DISP_PWRON, val);
+}
+
+void bxt_ddi_phy_init(struct drm_i915_private *dev_priv, enum dpio_phy phy)
+{
+ const struct bxt_ddi_phy_info *phy_info = &bxt_ddi_phy_info[phy];
+ enum dpio_phy rcomp_phy = phy_info->rcomp_phy;
+ bool was_enabled;
+
+ lockdep_assert_held(&dev_priv->power_domains.lock);
+
+ if (rcomp_phy != -1) {
+ was_enabled = bxt_ddi_phy_is_enabled(dev_priv, rcomp_phy);
+
+ /*
+ * We need to copy the GRC calibration value from rcomp_phy,
+ * so make sure it's powered up.
+ */
+ if (!was_enabled)
+ _bxt_ddi_phy_init(dev_priv, rcomp_phy);
+ }
+
+ _bxt_ddi_phy_init(dev_priv, phy);
+
+ if (rcomp_phy != -1 && !was_enabled)
+ bxt_ddi_phy_uninit(dev_priv, phy_info->rcomp_phy);
+}
+
+static bool __printf(6, 7)
+__phy_reg_verify_state(struct drm_i915_private *dev_priv, enum dpio_phy phy,
+ i915_reg_t reg, u32 mask, u32 expected,
+ const char *reg_fmt, ...)
+{
+ struct va_format vaf;
+ va_list args;
+ u32 val;
+
+ val = I915_READ(reg);
+ if ((val & mask) == expected)
+ return true;
+
+ va_start(args, reg_fmt);
+ vaf.fmt = reg_fmt;
+ vaf.va = &args;
+
+ DRM_DEBUG_DRIVER("DDI PHY %d reg %pV [%08x] state mismatch: "
+ "current %08x, expected %08x (mask %08x)\n",
+ phy, &vaf, reg.reg, val, (val & ~mask) | expected,
+ mask);
+
+ va_end(args);
+
+ return false;
+}
+
+bool bxt_ddi_phy_verify_state(struct drm_i915_private *dev_priv,
+ enum dpio_phy phy)
+{
+ const struct bxt_ddi_phy_info *phy_info = &bxt_ddi_phy_info[phy];
+ uint32_t mask;
+ bool ok;
+
+#define _CHK(reg, mask, exp, fmt, ...) \
+ __phy_reg_verify_state(dev_priv, phy, reg, mask, exp, fmt, \
+ ## __VA_ARGS__)
+
+ if (!bxt_ddi_phy_is_enabled(dev_priv, phy))
+ return false;
+
+ ok = true;
+
+ /* PLL Rcomp code offset */
+ ok &= _CHK(BXT_PORT_CL1CM_DW9(phy),
+ IREF0RC_OFFSET_MASK, 0xe4 << IREF0RC_OFFSET_SHIFT,
+ "BXT_PORT_CL1CM_DW9(%d)", phy);
+ ok &= _CHK(BXT_PORT_CL1CM_DW10(phy),
+ IREF1RC_OFFSET_MASK, 0xe4 << IREF1RC_OFFSET_SHIFT,
+ "BXT_PORT_CL1CM_DW10(%d)", phy);
+
+ /* Power gating */
+ mask = OCL1_POWER_DOWN_EN | DW28_OLDO_DYN_PWR_DOWN_EN | SUS_CLK_CONFIG;
+ ok &= _CHK(BXT_PORT_CL1CM_DW28(phy), mask, mask,
+ "BXT_PORT_CL1CM_DW28(%d)", phy);
+
+ if (phy_info->dual_channel)
+ ok &= _CHK(BXT_PORT_CL2CM_DW6(phy),
+ DW6_OLDO_DYN_PWR_DOWN_EN, DW6_OLDO_DYN_PWR_DOWN_EN,
+ "BXT_PORT_CL2CM_DW6(%d)", phy);
+
+ if (phy_info->rcomp_phy != -1) {
+ u32 grc_code = dev_priv->bxt_phy_grc;
+
+ grc_code = grc_code << GRC_CODE_FAST_SHIFT |
+ grc_code << GRC_CODE_SLOW_SHIFT |
+ grc_code;
+ mask = GRC_CODE_FAST_MASK | GRC_CODE_SLOW_MASK |
+ GRC_CODE_NOM_MASK;
+ ok &= _CHK(BXT_PORT_REF_DW6(phy), mask, grc_code,
+ "BXT_PORT_REF_DW6(%d)", phy);
+
+ mask = GRC_DIS | GRC_RDY_OVRD;
+ ok &= _CHK(BXT_PORT_REF_DW8(phy), mask, mask,
+ "BXT_PORT_REF_DW8(%d)", phy);
+ }
+
+ return ok;
+#undef _CHK
+}
+
+uint8_t
+bxt_ddi_phy_calc_lane_lat_optim_mask(struct intel_encoder *encoder,
+ uint8_t lane_count)
+{
+ switch (lane_count) {
+ case 1:
+ return 0;
+ case 2:
+ return BIT(2) | BIT(0);
+ case 4:
+ return BIT(3) | BIT(2) | BIT(0);
+ default:
+ MISSING_CASE(lane_count);
+
+ return 0;
+ }
+}
+
+void bxt_ddi_phy_set_lane_optim_mask(struct intel_encoder *encoder,
+ uint8_t lane_lat_optim_mask)
+{
+ struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
+ struct drm_i915_private *dev_priv = to_i915(dport->base.base.dev);
+ enum port port = dport->port;
+ enum dpio_phy phy;
+ enum dpio_channel ch;
+ int lane;
+
+ bxt_port_to_phy_channel(port, &phy, &ch);
+
+ for (lane = 0; lane < 4; lane++) {
+ u32 val = I915_READ(BXT_PORT_TX_DW14_LN(phy, ch, lane));
+
+ /*
+ * Note that on CHV this flag is called UPAR, but has
+ * the same function.
+ */
+ val &= ~LATENCY_OPTIM;
+ if (lane_lat_optim_mask & BIT(lane))
+ val |= LATENCY_OPTIM;
+
+ I915_WRITE(BXT_PORT_TX_DW14_LN(phy, ch, lane), val);
+ }
+}
+
+uint8_t
+bxt_ddi_phy_get_lane_lat_optim_mask(struct intel_encoder *encoder)
+{
+ struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
+ struct drm_i915_private *dev_priv = to_i915(dport->base.base.dev);
+ enum port port = dport->port;
+ enum dpio_phy phy;
+ enum dpio_channel ch;
+ int lane;
+ uint8_t mask;
+
+ bxt_port_to_phy_channel(port, &phy, &ch);
+
+ mask = 0;
+ for (lane = 0; lane < 4; lane++) {
+ u32 val = I915_READ(BXT_PORT_TX_DW14_LN(phy, ch, lane));
+
+ if (val & LATENCY_OPTIM)
+ mask |= BIT(lane);
+ }
+
+ return mask;
+}
+
+
void chv_set_phy_signal_level(struct intel_encoder *encoder,
u32 deemph_reg_value, u32 margin_reg_value,
bool uniq_trans_scale)
diff --git a/drivers/gpu/drm/i915/intel_dpll_mgr.c b/drivers/gpu/drm/i915/intel_dpll_mgr.c
index 1c59ca50c430..58a756f2f224 100644
--- a/drivers/gpu/drm/i915/intel_dpll_mgr.c
+++ b/drivers/gpu/drm/i915/intel_dpll_mgr.c
@@ -188,13 +188,12 @@ out:
void intel_disable_shared_dpll(struct intel_crtc *crtc)
{
- struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
struct intel_shared_dpll *pll = crtc->config->shared_dpll;
unsigned crtc_mask = 1 << drm_crtc_index(&crtc->base);
/* PCH only available on ILK+ */
- if (INTEL_INFO(dev)->gen < 5)
+ if (INTEL_GEN(dev_priv) < 5)
return;
if (pll == NULL)
@@ -1371,6 +1370,10 @@ static void bxt_ddi_pll_enable(struct drm_i915_private *dev_priv,
{
uint32_t temp;
enum port port = (enum port)pll->id; /* 1:1 port->PLL mapping */
+ enum dpio_phy phy;
+ enum dpio_channel ch;
+
+ bxt_port_to_phy_channel(port, &phy, &ch);
/* Non-SSC reference */
temp = I915_READ(BXT_PORT_PLL_ENABLE(port));
@@ -1378,72 +1381,72 @@ static void bxt_ddi_pll_enable(struct drm_i915_private *dev_priv,
I915_WRITE(BXT_PORT_PLL_ENABLE(port), temp);
/* Disable 10 bit clock */
- temp = I915_READ(BXT_PORT_PLL_EBB_4(port));
+ temp = I915_READ(BXT_PORT_PLL_EBB_4(phy, ch));
temp &= ~PORT_PLL_10BIT_CLK_ENABLE;
- I915_WRITE(BXT_PORT_PLL_EBB_4(port), temp);
+ I915_WRITE(BXT_PORT_PLL_EBB_4(phy, ch), temp);
/* Write P1 & P2 */
- temp = I915_READ(BXT_PORT_PLL_EBB_0(port));
+ temp = I915_READ(BXT_PORT_PLL_EBB_0(phy, ch));
temp &= ~(PORT_PLL_P1_MASK | PORT_PLL_P2_MASK);
temp |= pll->config.hw_state.ebb0;
- I915_WRITE(BXT_PORT_PLL_EBB_0(port), temp);
+ I915_WRITE(BXT_PORT_PLL_EBB_0(phy, ch), temp);
/* Write M2 integer */
- temp = I915_READ(BXT_PORT_PLL(port, 0));
+ temp = I915_READ(BXT_PORT_PLL(phy, ch, 0));
temp &= ~PORT_PLL_M2_MASK;
temp |= pll->config.hw_state.pll0;
- I915_WRITE(BXT_PORT_PLL(port, 0), temp);
+ I915_WRITE(BXT_PORT_PLL(phy, ch, 0), temp);
/* Write N */
- temp = I915_READ(BXT_PORT_PLL(port, 1));
+ temp = I915_READ(BXT_PORT_PLL(phy, ch, 1));
temp &= ~PORT_PLL_N_MASK;
temp |= pll->config.hw_state.pll1;
- I915_WRITE(BXT_PORT_PLL(port, 1), temp);
+ I915_WRITE(BXT_PORT_PLL(phy, ch, 1), temp);
/* Write M2 fraction */
- temp = I915_READ(BXT_PORT_PLL(port, 2));
+ temp = I915_READ(BXT_PORT_PLL(phy, ch, 2));
temp &= ~PORT_PLL_M2_FRAC_MASK;
temp |= pll->config.hw_state.pll2;
- I915_WRITE(BXT_PORT_PLL(port, 2), temp);
+ I915_WRITE(BXT_PORT_PLL(phy, ch, 2), temp);
/* Write M2 fraction enable */
- temp = I915_READ(BXT_PORT_PLL(port, 3));
+ temp = I915_READ(BXT_PORT_PLL(phy, ch, 3));
temp &= ~PORT_PLL_M2_FRAC_ENABLE;
temp |= pll->config.hw_state.pll3;
- I915_WRITE(BXT_PORT_PLL(port, 3), temp);
+ I915_WRITE(BXT_PORT_PLL(phy, ch, 3), temp);
/* Write coeff */
- temp = I915_READ(BXT_PORT_PLL(port, 6));
+ temp = I915_READ(BXT_PORT_PLL(phy, ch, 6));
temp &= ~PORT_PLL_PROP_COEFF_MASK;
temp &= ~PORT_PLL_INT_COEFF_MASK;
temp &= ~PORT_PLL_GAIN_CTL_MASK;
temp |= pll->config.hw_state.pll6;
- I915_WRITE(BXT_PORT_PLL(port, 6), temp);
+ I915_WRITE(BXT_PORT_PLL(phy, ch, 6), temp);
/* Write calibration val */
- temp = I915_READ(BXT_PORT_PLL(port, 8));
+ temp = I915_READ(BXT_PORT_PLL(phy, ch, 8));
temp &= ~PORT_PLL_TARGET_CNT_MASK;
temp |= pll->config.hw_state.pll8;
- I915_WRITE(BXT_PORT_PLL(port, 8), temp);
+ I915_WRITE(BXT_PORT_PLL(phy, ch, 8), temp);
- temp = I915_READ(BXT_PORT_PLL(port, 9));
+ temp = I915_READ(BXT_PORT_PLL(phy, ch, 9));
temp &= ~PORT_PLL_LOCK_THRESHOLD_MASK;
temp |= pll->config.hw_state.pll9;
- I915_WRITE(BXT_PORT_PLL(port, 9), temp);
+ I915_WRITE(BXT_PORT_PLL(phy, ch, 9), temp);
- temp = I915_READ(BXT_PORT_PLL(port, 10));
+ temp = I915_READ(BXT_PORT_PLL(phy, ch, 10));
temp &= ~PORT_PLL_DCO_AMP_OVR_EN_H;
temp &= ~PORT_PLL_DCO_AMP_MASK;
temp |= pll->config.hw_state.pll10;
- I915_WRITE(BXT_PORT_PLL(port, 10), temp);
+ I915_WRITE(BXT_PORT_PLL(phy, ch, 10), temp);
/* Recalibrate with new settings */
- temp = I915_READ(BXT_PORT_PLL_EBB_4(port));
+ temp = I915_READ(BXT_PORT_PLL_EBB_4(phy, ch));
temp |= PORT_PLL_RECALIBRATE;
- I915_WRITE(BXT_PORT_PLL_EBB_4(port), temp);
+ I915_WRITE(BXT_PORT_PLL_EBB_4(phy, ch), temp);
temp &= ~PORT_PLL_10BIT_CLK_ENABLE;
temp |= pll->config.hw_state.ebb4;
- I915_WRITE(BXT_PORT_PLL_EBB_4(port), temp);
+ I915_WRITE(BXT_PORT_PLL_EBB_4(phy, ch), temp);
/* Enable PLL */
temp = I915_READ(BXT_PORT_PLL_ENABLE(port));
@@ -1459,11 +1462,11 @@ static void bxt_ddi_pll_enable(struct drm_i915_private *dev_priv,
* While we write to the group register to program all lanes at once we
* can read only lane registers and we pick lanes 0/1 for that.
*/
- temp = I915_READ(BXT_PORT_PCS_DW12_LN01(port));
+ temp = I915_READ(BXT_PORT_PCS_DW12_LN01(phy, ch));
temp &= ~LANE_STAGGER_MASK;
temp &= ~LANESTAGGER_STRAP_OVRD;
temp |= pll->config.hw_state.pcsdw12;
- I915_WRITE(BXT_PORT_PCS_DW12_GRP(port), temp);
+ I915_WRITE(BXT_PORT_PCS_DW12_GRP(phy, ch), temp);
}
static void bxt_ddi_pll_disable(struct drm_i915_private *dev_priv,
@@ -1485,6 +1488,10 @@ static bool bxt_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv,
enum port port = (enum port)pll->id; /* 1:1 port->PLL mapping */
uint32_t val;
bool ret;
+ enum dpio_phy phy;
+ enum dpio_channel ch;
+
+ bxt_port_to_phy_channel(port, &phy, &ch);
if (!intel_display_power_get_if_enabled(dev_priv, POWER_DOMAIN_PLLS))
return false;
@@ -1495,36 +1502,36 @@ static bool bxt_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv,
if (!(val & PORT_PLL_ENABLE))
goto out;
- hw_state->ebb0 = I915_READ(BXT_PORT_PLL_EBB_0(port));
+ hw_state->ebb0 = I915_READ(BXT_PORT_PLL_EBB_0(phy, ch));
hw_state->ebb0 &= PORT_PLL_P1_MASK | PORT_PLL_P2_MASK;
- hw_state->ebb4 = I915_READ(BXT_PORT_PLL_EBB_4(port));
+ hw_state->ebb4 = I915_READ(BXT_PORT_PLL_EBB_4(phy, ch));
hw_state->ebb4 &= PORT_PLL_10BIT_CLK_ENABLE;
- hw_state->pll0 = I915_READ(BXT_PORT_PLL(port, 0));
+ hw_state->pll0 = I915_READ(BXT_PORT_PLL(phy, ch, 0));
hw_state->pll0 &= PORT_PLL_M2_MASK;
- hw_state->pll1 = I915_READ(BXT_PORT_PLL(port, 1));
+ hw_state->pll1 = I915_READ(BXT_PORT_PLL(phy, ch, 1));
hw_state->pll1 &= PORT_PLL_N_MASK;
- hw_state->pll2 = I915_READ(BXT_PORT_PLL(port, 2));
+ hw_state->pll2 = I915_READ(BXT_PORT_PLL(phy, ch, 2));
hw_state->pll2 &= PORT_PLL_M2_FRAC_MASK;
- hw_state->pll3 = I915_READ(BXT_PORT_PLL(port, 3));
+ hw_state->pll3 = I915_READ(BXT_PORT_PLL(phy, ch, 3));
hw_state->pll3 &= PORT_PLL_M2_FRAC_ENABLE;
- hw_state->pll6 = I915_READ(BXT_PORT_PLL(port, 6));
+ hw_state->pll6 = I915_READ(BXT_PORT_PLL(phy, ch, 6));
hw_state->pll6 &= PORT_PLL_PROP_COEFF_MASK |
PORT_PLL_INT_COEFF_MASK |
PORT_PLL_GAIN_CTL_MASK;
- hw_state->pll8 = I915_READ(BXT_PORT_PLL(port, 8));
+ hw_state->pll8 = I915_READ(BXT_PORT_PLL(phy, ch, 8));
hw_state->pll8 &= PORT_PLL_TARGET_CNT_MASK;
- hw_state->pll9 = I915_READ(BXT_PORT_PLL(port, 9));
+ hw_state->pll9 = I915_READ(BXT_PORT_PLL(phy, ch, 9));
hw_state->pll9 &= PORT_PLL_LOCK_THRESHOLD_MASK;
- hw_state->pll10 = I915_READ(BXT_PORT_PLL(port, 10));
+ hw_state->pll10 = I915_READ(BXT_PORT_PLL(phy, ch, 10));
hw_state->pll10 &= PORT_PLL_DCO_AMP_OVR_EN_H |
PORT_PLL_DCO_AMP_MASK;
@@ -1533,11 +1540,11 @@ static bool bxt_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv,
* can read only lane registers. We configure all lanes the same way, so
* here just read out lanes 0/1 and output a note if lanes 2/3 differ.
*/
- hw_state->pcsdw12 = I915_READ(BXT_PORT_PCS_DW12_LN01(port));
- if (I915_READ(BXT_PORT_PCS_DW12_LN23(port)) != hw_state->pcsdw12)
+ hw_state->pcsdw12 = I915_READ(BXT_PORT_PCS_DW12_LN01(phy, ch));
+ if (I915_READ(BXT_PORT_PCS_DW12_LN23(phy, ch)) != hw_state->pcsdw12)
DRM_DEBUG_DRIVER("lane stagger config different for lane 01 (%08x) and 23 (%08x)\n",
hw_state->pcsdw12,
- I915_READ(BXT_PORT_PCS_DW12_LN23(port)));
+ I915_READ(BXT_PORT_PCS_DW12_LN23(phy, ch)));
hw_state->pcsdw12 &= LANE_STAGGER_MASK | LANESTAGGER_STRAP_OVRD;
ret = true;
@@ -1851,13 +1858,13 @@ void intel_shared_dpll_init(struct drm_device *dev)
const struct dpll_info *dpll_info;
int i;
- if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev))
+ if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))
dpll_mgr = &skl_pll_mgr;
- else if (IS_BROXTON(dev))
+ else if (IS_BROXTON(dev_priv))
dpll_mgr = &bxt_pll_mgr;
- else if (HAS_DDI(dev))
+ else if (HAS_DDI(dev_priv))
dpll_mgr = &hsw_pll_mgr;
- else if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev))
+ else if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv))
dpll_mgr = &pch_pll_mgr;
if (!dpll_mgr) {
@@ -1883,7 +1890,7 @@ void intel_shared_dpll_init(struct drm_device *dev)
BUG_ON(dev_priv->num_shared_dpll > I915_NUM_PLLS);
/* FIXME: Move this to a more suitable place */
- if (HAS_DDI(dev))
+ if (HAS_DDI(dev_priv))
intel_ddi_pll_init(dev);
}
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index a19ec06f9e42..cd132c216a67 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -206,6 +206,7 @@ struct intel_encoder {
struct drm_encoder base;
enum intel_output_type type;
+ enum port port;
unsigned int cloneable;
void (*hot_plug)(struct intel_encoder *);
bool (*compute_config)(struct intel_encoder *,
@@ -247,6 +248,8 @@ struct intel_encoder {
void (*suspend)(struct intel_encoder *);
int crtc_mask;
enum hpd_pin hpd_pin;
+ /* for communication with audio component; protected by av_mutex */
+ const struct drm_connector *audio_connector;
};
struct intel_panel {
@@ -291,6 +294,9 @@ struct intel_connector {
*/
struct intel_encoder *encoder;
+ /* ACPI device id for ACPI and driver cooperation */
+ u32 acpi_device_id;
+
/* Reads out the current hw, returning true if the connector is enabled
* and active (i.e. dpms ON state). */
bool (*get_hw_state)(struct intel_connector *);
@@ -362,6 +368,8 @@ struct intel_atomic_state {
/* Gen9+ only */
struct skl_wm_values wm_results;
+
+ struct i915_sw_fence commit_ready;
};
struct intel_plane_state {
@@ -398,9 +406,6 @@ struct intel_plane_state {
int scaler_id;
struct drm_intel_sprite_colorkey ckey;
-
- /* async flip related structures */
- struct drm_i915_gem_request *wait_req;
};
struct intel_initial_plane_config {
@@ -465,9 +470,13 @@ struct intel_pipe_wm {
bool sprites_scaled;
};
-struct skl_pipe_wm {
+struct skl_plane_wm {
struct skl_wm_level wm[8];
struct skl_wm_level trans_wm;
+};
+
+struct skl_pipe_wm {
+ struct skl_plane_wm planes[I915_MAX_PLANES];
uint32_t linetime;
};
@@ -493,14 +502,7 @@ struct intel_crtc_wm_state {
struct {
/* gen9+ only needs 1-step wm programming */
struct skl_pipe_wm optimal;
-
- /* cached plane data rate */
- unsigned plane_data_rate[I915_MAX_PLANES];
- unsigned plane_y_data_rate[I915_MAX_PLANES];
-
- /* minimum block allocation */
- uint16_t minimum_blocks[I915_MAX_PLANES];
- uint16_t minimum_y_blocks[I915_MAX_PLANES];
+ struct skl_ddb_entry ddb;
} skl;
};
@@ -653,7 +655,6 @@ struct intel_crtc_state {
bool double_wide;
- bool dp_encoder_is_mst;
int pbn;
struct intel_crtc_scaler_state scaler_state;
@@ -723,7 +724,6 @@ struct intel_crtc {
/* watermarks currently being used */
union {
struct intel_pipe_wm ilk;
- struct skl_pipe_wm skl;
} active;
/* allow CxSR on this pipe */
@@ -796,22 +796,22 @@ struct intel_plane {
};
struct intel_watermark_params {
- unsigned long fifo_size;
- unsigned long max_wm;
- unsigned long default_wm;
- unsigned long guard_size;
- unsigned long cacheline_size;
+ u16 fifo_size;
+ u16 max_wm;
+ u8 default_wm;
+ u8 guard_size;
+ u8 cacheline_size;
};
struct cxsr_latency {
- int is_desktop;
- int is_ddr3;
- unsigned long fsb_freq;
- unsigned long mem_freq;
- unsigned long display_sr;
- unsigned long display_hpll_disable;
- unsigned long cursor_sr;
- unsigned long cursor_hpll_disable;
+ bool is_desktop : 1;
+ bool is_ddr3 : 1;
+ u16 fsb_freq;
+ u16 mem_freq;
+ u16 display_sr;
+ u16 display_hpll_disable;
+ u16 cursor_sr;
+ u16 cursor_hpll_disable;
};
#define to_intel_atomic_state(x) container_of(x, struct intel_atomic_state, base)
@@ -872,6 +872,14 @@ enum link_m_n_set {
M2_N2
};
+struct intel_dp_desc {
+ u8 oui[3];
+ u8 device_id[6];
+ u8 hw_rev;
+ u8 sw_major_rev;
+ u8 sw_minor_rev;
+} __packed;
+
struct intel_dp {
i915_reg_t output_reg;
i915_reg_t aux_ch_ctl_reg;
@@ -894,6 +902,8 @@ struct intel_dp {
/* sink rates as reported by DP_SUPPORTED_LINK_RATES */
uint8_t num_sink_rates;
int sink_rates[DP_MAX_SUPPORTED_RATES];
+ /* sink or branch descriptor */
+ struct intel_dp_desc desc;
struct drm_dp_aux aux;
uint8_t train_set[4];
int panel_power_up_delay;
@@ -950,17 +960,22 @@ struct intel_dp {
bool compliance_test_active;
};
+struct intel_lspcon {
+ bool active;
+ enum drm_lspcon_mode mode;
+ bool desc_valid;
+};
+
struct intel_digital_port {
struct intel_encoder base;
enum port port;
u32 saved_port_bits;
struct intel_dp dp;
struct intel_hdmi hdmi;
+ struct intel_lspcon lspcon;
enum irqreturn (*hpd_pulse)(struct intel_digital_port *, bool);
bool release_cl2_override;
uint8_t max_lanes;
- /* for communication with audio component; protected by av_mutex */
- const struct drm_connector *audio_connector;
};
struct intel_dp_mst_encoder {
@@ -1012,17 +1027,15 @@ vlv_pipe_to_channel(enum pipe pipe)
}
}
-static inline struct drm_crtc *
-intel_get_crtc_for_pipe(struct drm_device *dev, int pipe)
+static inline struct intel_crtc *
+intel_get_crtc_for_pipe(struct drm_i915_private *dev_priv, enum pipe pipe)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
return dev_priv->pipe_to_crtc_mapping[pipe];
}
-static inline struct drm_crtc *
-intel_get_crtc_for_plane(struct drm_device *dev, int plane)
+static inline struct intel_crtc *
+intel_get_crtc_for_plane(struct drm_i915_private *dev_priv, enum plane plane)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
return dev_priv->plane_to_crtc_mapping[plane];
}
@@ -1082,15 +1095,6 @@ hdmi_to_dig_port(struct intel_hdmi *intel_hdmi)
return container_of(intel_hdmi, struct intel_digital_port, hdmi);
}
-/*
- * Returns the number of planes for this pipe, ie the number of sprites + 1
- * (primary plane). This doesn't count the cursor plane then.
- */
-static inline unsigned int intel_num_planes(struct intel_crtc *crtc)
-{
- return INTEL_INFO(crtc->base.dev)->num_sprites[crtc->pipe] + 1;
-}
-
/* intel_fifo_underrun.c */
bool intel_set_cpu_fifo_underrun_reporting(struct drm_i915_private *dev_priv,
enum pipe pipe, bool enable);
@@ -1107,6 +1111,9 @@ void intel_check_pch_fifo_underruns(struct drm_i915_private *dev_priv);
/* i915_irq.c */
void gen5_enable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask);
void gen5_disable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask);
+void gen6_reset_pm_iir(struct drm_i915_private *dev_priv, u32 mask);
+void gen6_mask_pm_irq(struct drm_i915_private *dev_priv, u32 mask);
+void gen6_unmask_pm_irq(struct drm_i915_private *dev_priv, u32 mask);
void gen6_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask);
void gen6_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask);
void gen6_reset_rps_interrupts(struct drm_i915_private *dev_priv);
@@ -1129,6 +1136,9 @@ void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv,
unsigned int pipe_mask);
void gen8_irq_power_well_pre_disable(struct drm_i915_private *dev_priv,
unsigned int pipe_mask);
+void gen9_reset_guc_interrupts(struct drm_i915_private *dev_priv);
+void gen9_enable_guc_interrupts(struct drm_i915_private *dev_priv);
+void gen9_disable_guc_interrupts(struct drm_i915_private *dev_priv);
/* intel_crt.c */
void intel_crt_init(struct drm_device *dev);
@@ -1176,12 +1186,15 @@ u32 intel_fb_stride_alignment(const struct drm_i915_private *dev_priv,
/* intel_audio.c */
void intel_init_audio_hooks(struct drm_i915_private *dev_priv);
-void intel_audio_codec_enable(struct intel_encoder *encoder);
+void intel_audio_codec_enable(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state);
void intel_audio_codec_disable(struct intel_encoder *encoder);
void i915_audio_component_init(struct drm_i915_private *dev_priv);
void i915_audio_component_cleanup(struct drm_i915_private *dev_priv);
/* intel_display.c */
+enum transcoder intel_crtc_pch_transcoder(struct intel_crtc *crtc);
void skl_set_preferred_cdclk_vco(struct drm_i915_private *dev_priv, int vco);
void intel_update_rawclk(struct drm_i915_private *dev_priv);
int vlv_get_cck_clock(struct drm_i915_private *dev_priv,
@@ -1230,18 +1243,17 @@ intel_crtc_has_dp_encoder(const struct intel_crtc_state *crtc_state)
(1 << INTEL_OUTPUT_EDP));
}
static inline void
-intel_wait_for_vblank(struct drm_device *dev, int pipe)
+intel_wait_for_vblank(struct drm_i915_private *dev_priv, enum pipe pipe)
{
- drm_wait_one_vblank(dev, pipe);
+ drm_wait_one_vblank(&dev_priv->drm, pipe);
}
static inline void
-intel_wait_for_vblank_if_active(struct drm_device *dev, int pipe)
+intel_wait_for_vblank_if_active(struct drm_i915_private *dev_priv, int pipe)
{
- const struct intel_crtc *crtc =
- to_intel_crtc(intel_get_crtc_for_pipe(dev, pipe));
+ const struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
if (crtc->active)
- intel_wait_for_vblank(dev, pipe);
+ intel_wait_for_vblank(dev_priv, pipe);
}
u32 intel_crtc_get_vblank_counter(struct intel_crtc *crtc);
@@ -1285,21 +1297,12 @@ int intel_plane_atomic_calc_changes(struct drm_crtc_state *crtc_state,
unsigned int intel_tile_height(const struct drm_i915_private *dev_priv,
uint64_t fb_modifier, unsigned int cpp);
-static inline bool
-intel_rotation_90_or_270(unsigned int rotation)
-{
- return rotation & (DRM_ROTATE_90 | DRM_ROTATE_270);
-}
-
-void intel_create_rotation_property(struct drm_device *dev,
- struct intel_plane *plane);
-
void assert_pch_transcoder_disabled(struct drm_i915_private *dev_priv,
enum pipe pipe);
-int vlv_force_pll_on(struct drm_device *dev, enum pipe pipe,
+int vlv_force_pll_on(struct drm_i915_private *dev_priv, enum pipe pipe,
const struct dpll *dpll);
-void vlv_force_pll_off(struct drm_device *dev, enum pipe pipe);
+void vlv_force_pll_off(struct drm_i915_private *dev_priv, enum pipe pipe);
int lpt_get_iclkip(struct drm_i915_private *dev_priv);
/* modesetting asserts */
@@ -1327,12 +1330,6 @@ void hsw_enable_pc8(struct drm_i915_private *dev_priv);
void hsw_disable_pc8(struct drm_i915_private *dev_priv);
void bxt_init_cdclk(struct drm_i915_private *dev_priv);
void bxt_uninit_cdclk(struct drm_i915_private *dev_priv);
-void bxt_ddi_phy_init(struct drm_i915_private *dev_priv, enum dpio_phy phy);
-void bxt_ddi_phy_uninit(struct drm_i915_private *dev_priv, enum dpio_phy phy);
-bool bxt_ddi_phy_is_enabled(struct drm_i915_private *dev_priv,
- enum dpio_phy phy);
-bool bxt_ddi_phy_verify_state(struct drm_i915_private *dev_priv,
- enum dpio_phy phy);
void gen9_sanitize_dc_state(struct drm_i915_private *dev_priv);
void bxt_enable_dc9(struct drm_i915_private *dev_priv);
void bxt_disable_dc9(struct drm_i915_private *dev_priv);
@@ -1350,7 +1347,7 @@ bool bxt_find_best_dpll(struct intel_crtc_state *crtc_state, int target_clock,
struct dpll *best_clock);
int chv_calc_dpll_params(int refclk, struct dpll *pll_clock);
-bool intel_crtc_active(struct drm_crtc *crtc);
+bool intel_crtc_active(struct intel_crtc *crtc);
void hsw_enable_ips(struct intel_crtc *crtc);
void hsw_disable_ips(struct intel_crtc *crtc);
enum intel_display_power_domain
@@ -1396,7 +1393,7 @@ int intel_dp_sink_crc(struct intel_dp *intel_dp, u8 *crc);
bool intel_dp_compute_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config,
struct drm_connector_state *conn_state);
-bool intel_dp_is_edp(struct drm_device *dev, enum port port);
+bool intel_dp_is_edp(struct drm_i915_private *dev_priv, enum port port);
enum irqreturn intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port,
bool long_hpd);
void intel_edp_backlight_on(struct intel_dp *intel_dp);
@@ -1443,6 +1440,11 @@ static inline unsigned int intel_dp_unused_lane_mask(int lane_count)
return ~((1 << lane_count) - 1) & 0xf;
}
+bool intel_dp_read_dpcd(struct intel_dp *intel_dp);
+bool __intel_dp_read_desc(struct intel_dp *intel_dp,
+ struct intel_dp_desc *desc);
+bool intel_dp_read_desc(struct intel_dp *intel_dp);
+
/* intel_dp_aux_backlight.c */
int intel_dp_aux_init_backlight_funcs(struct intel_connector *intel_connector);
@@ -1487,6 +1489,10 @@ static inline void intel_fbdev_set_suspend(struct drm_device *dev, int state, bo
{
}
+static inline void intel_fbdev_output_poll_changed(struct drm_device *dev)
+{
+}
+
static inline void intel_fbdev_restore_mode(struct drm_device *dev)
{
}
@@ -1513,6 +1519,7 @@ void intel_fbc_invalidate(struct drm_i915_private *dev_priv,
void intel_fbc_flush(struct drm_i915_private *dev_priv,
unsigned int frontbuffer_bits, enum fb_op_origin origin);
void intel_fbc_cleanup_cfb(struct drm_i915_private *dev_priv);
+void intel_fbc_handle_fifo_underrun_irq(struct drm_i915_private *dev_priv);
/* intel_hdmi.c */
void intel_hdmi_init(struct drm_device *dev, i915_reg_t hdmi_reg, enum port port);
@@ -1642,23 +1649,6 @@ assert_rpm_wakelock_held(struct drm_i915_private *dev_priv)
DRM_DEBUG_DRIVER("RPM wakelock ref not held during HW access");
}
-static inline int
-assert_rpm_atomic_begin(struct drm_i915_private *dev_priv)
-{
- int seq = atomic_read(&dev_priv->pm.atomic_seq);
-
- assert_rpm_wakelock_held(dev_priv);
-
- return seq;
-}
-
-static inline void
-assert_rpm_atomic_end(struct drm_i915_private *dev_priv, int begin_seq)
-{
- WARN_ONCE(atomic_read(&dev_priv->pm.atomic_seq) != begin_seq,
- "HW access outside of RPM atomic section\n");
-}
-
/**
* disable_rpm_wakeref_asserts - disable the RPM assert checks
* @dev_priv: i915 device instance
@@ -1714,11 +1704,11 @@ bool chv_phy_powergate_ch(struct drm_i915_private *dev_priv, enum dpio_phy phy,
/* intel_pm.c */
-void intel_init_clock_gating(struct drm_device *dev);
-void intel_suspend_hw(struct drm_device *dev);
-int ilk_wm_max_level(const struct drm_device *dev);
-void intel_update_watermarks(struct drm_crtc *crtc);
-void intel_init_pm(struct drm_device *dev);
+void intel_init_clock_gating(struct drm_i915_private *dev_priv);
+void intel_suspend_hw(struct drm_i915_private *dev_priv);
+int ilk_wm_max_level(const struct drm_i915_private *dev_priv);
+void intel_update_watermarks(struct intel_crtc *crtc);
+void intel_init_pm(struct drm_i915_private *dev_priv);
void intel_init_clock_gating_hooks(struct drm_i915_private *dev_priv);
void intel_pm_setup(struct drm_device *dev);
void intel_gpu_ips_init(struct drm_i915_private *dev_priv);
@@ -1742,21 +1732,16 @@ void ilk_wm_get_hw_state(struct drm_device *dev);
void skl_wm_get_hw_state(struct drm_device *dev);
void skl_ddb_get_hw_state(struct drm_i915_private *dev_priv,
struct skl_ddb_allocation *ddb /* out */);
+void skl_pipe_wm_get_hw_state(struct drm_crtc *crtc,
+ struct skl_pipe_wm *out);
bool intel_can_enable_sagv(struct drm_atomic_state *state);
int intel_enable_sagv(struct drm_i915_private *dev_priv);
int intel_disable_sagv(struct drm_i915_private *dev_priv);
-bool skl_ddb_allocation_equals(const struct skl_ddb_allocation *old,
- const struct skl_ddb_allocation *new,
- enum pipe pipe);
-bool skl_ddb_allocation_overlaps(struct drm_atomic_state *state,
- const struct skl_ddb_allocation *old,
- const struct skl_ddb_allocation *new,
- enum pipe pipe);
-void skl_write_cursor_wm(struct intel_crtc *intel_crtc,
- const struct skl_wm_values *wm);
-void skl_write_plane_wm(struct intel_crtc *intel_crtc,
- const struct skl_wm_values *wm,
- int plane);
+bool skl_wm_level_equals(const struct skl_wm_level *l1,
+ const struct skl_wm_level *l2);
+bool skl_ddb_allocation_overlaps(const struct skl_ddb_entry **entries,
+ const struct skl_ddb_entry *ddb,
+ int ignore);
uint32_t ilk_pipe_pixel_rate(const struct intel_crtc_state *pipe_config);
bool ilk_disable_lp_wm(struct drm_device *dev);
int sanitize_rc6_option(struct drm_i915_private *dev_priv, int enable_rc6);
@@ -1773,7 +1758,8 @@ bool intel_sdvo_init(struct drm_device *dev,
/* intel_sprite.c */
int intel_usecs_to_scanlines(const struct drm_display_mode *adjusted_mode,
int usecs);
-int intel_plane_init(struct drm_device *dev, enum pipe pipe, int plane);
+struct intel_plane *intel_sprite_plane_create(struct drm_i915_private *dev_priv,
+ enum pipe pipe, int plane);
int intel_sprite_set_colorkey(struct drm_device *dev, void *data,
struct drm_file *file_priv);
void intel_pipe_update_start(struct intel_crtc *crtc);
@@ -1835,4 +1821,7 @@ int intel_color_check(struct drm_crtc *crtc, struct drm_crtc_state *state);
void intel_color_set_csc(struct drm_crtc_state *crtc_state);
void intel_color_load_luts(struct drm_crtc_state *crtc_state);
+/* intel_lspcon.c */
+bool lspcon_init(struct intel_digital_port *intel_dig_port);
+void lspcon_resume(struct intel_lspcon *lspcon);
#endif /* __INTEL_DRV_H__ */
diff --git a/drivers/gpu/drm/i915/intel_dsi.c b/drivers/gpu/drm/i915/intel_dsi.c
index b2e3d3a334f7..4e0d025490a3 100644
--- a/drivers/gpu/drm/i915/intel_dsi.c
+++ b/drivers/gpu/drm/i915/intel_dsi.c
@@ -437,11 +437,11 @@ static void vlv_dsi_device_ready(struct intel_encoder *encoder)
static void intel_dsi_device_ready(struct intel_encoder *encoder)
{
- struct drm_device *dev = encoder->base.dev;
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
+ if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
vlv_dsi_device_ready(encoder);
- else if (IS_BROXTON(dev))
+ else if (IS_BROXTON(dev_priv))
bxt_dsi_device_ready(encoder);
}
@@ -464,7 +464,7 @@ static void intel_dsi_port_enable(struct intel_encoder *encoder)
}
for_each_dsi_port(port, intel_dsi->ports) {
- i915_reg_t port_ctrl = IS_BROXTON(dev) ?
+ i915_reg_t port_ctrl = IS_BROXTON(dev_priv) ?
BXT_MIPI_PORT_CTRL(port) : MIPI_PORT_CTRL(port);
u32 temp;
@@ -494,7 +494,7 @@ static void intel_dsi_port_disable(struct intel_encoder *encoder)
enum port port;
for_each_dsi_port(port, intel_dsi->ports) {
- i915_reg_t port_ctrl = IS_BROXTON(dev) ?
+ i915_reg_t port_ctrl = IS_BROXTON(dev_priv) ?
BXT_MIPI_PORT_CTRL(port) : MIPI_PORT_CTRL(port);
u32 temp;
@@ -656,7 +656,6 @@ static void intel_dsi_disable(struct intel_encoder *encoder)
static void intel_dsi_clear_device_ready(struct intel_encoder *encoder)
{
- struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
enum port port;
@@ -664,7 +663,7 @@ static void intel_dsi_clear_device_ready(struct intel_encoder *encoder)
DRM_DEBUG_KMS("\n");
for_each_dsi_port(port, intel_dsi->ports) {
/* Common bit for both MIPI Port A & MIPI Port C on VLV/CHV */
- i915_reg_t port_ctrl = IS_BROXTON(dev) ?
+ i915_reg_t port_ctrl = IS_BROXTON(dev_priv) ?
BXT_MIPI_PORT_CTRL(port) : MIPI_PORT_CTRL(PORT_A);
u32 val;
@@ -741,7 +740,6 @@ static bool intel_dsi_get_hw_state(struct intel_encoder *encoder,
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
- struct drm_device *dev = encoder->base.dev;
enum intel_display_power_domain power_domain;
enum port port;
bool active = false;
@@ -762,7 +760,7 @@ static bool intel_dsi_get_hw_state(struct intel_encoder *encoder,
/* XXX: this only works for one DSI output */
for_each_dsi_port(port, intel_dsi->ports) {
- i915_reg_t ctrl_reg = IS_BROXTON(dev) ?
+ i915_reg_t ctrl_reg = IS_BROXTON(dev_priv) ?
BXT_MIPI_PORT_CTRL(port) : MIPI_PORT_CTRL(port);
bool enabled = I915_READ(ctrl_reg) & DPI_ENABLE;
@@ -771,7 +769,8 @@ static bool intel_dsi_get_hw_state(struct intel_encoder *encoder,
* bit in port C control register does not get set. As a
* workaround, check pipe B conf instead.
*/
- if ((IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) && port == PORT_C)
+ if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
+ port == PORT_C)
enabled = I915_READ(PIPECONF(PIPE_B)) & PIPECONF_ENABLE;
/* Try command mode if video mode not enabled */
@@ -970,11 +969,11 @@ static void bxt_dsi_get_pipe_config(struct intel_encoder *encoder,
static void intel_dsi_get_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config)
{
- struct drm_device *dev = encoder->base.dev;
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
u32 pclk;
DRM_DEBUG_KMS("\n");
- if (IS_BROXTON(dev))
+ if (IS_BROXTON(dev_priv))
bxt_dsi_get_pipe_config(encoder, pipe_config);
pclk = intel_dsi_get_pclk(encoder, pipe_config->pipe_bpp,
@@ -1066,7 +1065,7 @@ static void set_dsi_timings(struct drm_encoder *encoder,
hbp = txbyteclkhs(hbp, bpp, lane_count, intel_dsi->burst_mode_ratio);
for_each_dsi_port(port, intel_dsi->ports) {
- if (IS_BROXTON(dev)) {
+ if (IS_BROXTON(dev_priv)) {
/*
* Program hdisplay and vdisplay on MIPI transcoder.
* This is different from calculated hactive and
@@ -1138,7 +1137,7 @@ static void intel_dsi_prepare(struct intel_encoder *intel_encoder,
}
for_each_dsi_port(port, intel_dsi->ports) {
- if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
+ if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
/*
* escape clock divider, 20MHz, shared for A and C.
* device ready must be off when doing this! txclkesc?
@@ -1153,7 +1152,7 @@ static void intel_dsi_prepare(struct intel_encoder *intel_encoder,
tmp &= ~READ_REQUEST_PRIORITY_MASK;
I915_WRITE(MIPI_CTRL(port), tmp |
READ_REQUEST_PRIORITY_HIGH);
- } else if (IS_BROXTON(dev)) {
+ } else if (IS_BROXTON(dev_priv)) {
enum pipe pipe = intel_crtc->pipe;
tmp = I915_READ(MIPI_CTRL(port));
@@ -1242,7 +1241,7 @@ static void intel_dsi_prepare(struct intel_encoder *intel_encoder,
I915_WRITE(MIPI_INIT_COUNT(port),
txclkesc(intel_dsi->escape_clk_div, 100));
- if (IS_BROXTON(dev) && (!intel_dsi->dual_link)) {
+ if (IS_BROXTON(dev_priv) && (!intel_dsi->dual_link)) {
/*
* BXT spec says write MIPI_INIT_COUNT for
* both the ports, even if only one is
@@ -1346,7 +1345,7 @@ static int intel_dsi_set_property(struct drm_connector *connector,
DRM_DEBUG_KMS("no scaling not supported\n");
return -EINVAL;
}
- if (HAS_GMCH_DISPLAY(dev) &&
+ if (HAS_GMCH_DISPLAY(to_i915(dev)) &&
val == DRM_MODE_SCALE_CENTER) {
DRM_DEBUG_KMS("centering not supported\n");
return -EINVAL;
@@ -1450,9 +1449,9 @@ void intel_dsi_init(struct drm_device *dev)
if (!intel_bios_is_dsi_present(dev_priv, &port))
return;
- if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
+ if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
dev_priv->mipi_mmio_base = VLV_MIPI_BASE;
- } else if (IS_BROXTON(dev)) {
+ } else if (IS_BROXTON(dev_priv)) {
dev_priv->mipi_mmio_base = BXT_MIPI_BASE;
} else {
DRM_ERROR("Unsupported Mipi device to reg base");
@@ -1488,6 +1487,7 @@ void intel_dsi_init(struct drm_device *dev)
intel_connector->get_hw_state = intel_connector_get_hw_state;
+ intel_encoder->port = port;
/*
* On BYT/CHV, pipe A maps to MIPI DSI port A, pipe B maps to MIPI DSI
* port C. BXT isn't limited like this.
diff --git a/drivers/gpu/drm/i915/intel_dsi_panel_vbt.c b/drivers/gpu/drm/i915/intel_dsi_panel_vbt.c
index cd154ce6b6c1..0d8ff0034b88 100644
--- a/drivers/gpu/drm/i915/intel_dsi_panel_vbt.c
+++ b/drivers/gpu/drm/i915/intel_dsi_panel_vbt.c
@@ -126,6 +126,8 @@ static const u8 *mipi_exec_send_packet(struct intel_dsi *intel_dsi,
u16 len;
enum port port;
+ DRM_DEBUG_KMS("\n");
+
flags = *data++;
type = *data++;
@@ -199,6 +201,8 @@ static const u8 *mipi_exec_delay(struct intel_dsi *intel_dsi, const u8 *data)
{
u32 delay = *((const u32 *) data);
+ DRM_DEBUG_KMS("\n");
+
usleep_range(delay, delay + 10);
data += 4;
@@ -307,6 +311,8 @@ static const u8 *mipi_exec_gpio(struct intel_dsi *intel_dsi, const u8 *data)
u8 gpio_source, gpio_index;
bool value;
+ DRM_DEBUG_KMS("\n");
+
if (dev_priv->vbt.dsi.seq_version >= 3)
data++;
@@ -331,18 +337,36 @@ static const u8 *mipi_exec_gpio(struct intel_dsi *intel_dsi, const u8 *data)
return data;
}
-static const u8 *mipi_exec_i2c_skip(struct intel_dsi *intel_dsi, const u8 *data)
+static const u8 *mipi_exec_i2c(struct intel_dsi *intel_dsi, const u8 *data)
{
+ DRM_DEBUG_KMS("Skipping I2C element execution\n");
+
return data + *(data + 6) + 7;
}
+static const u8 *mipi_exec_spi(struct intel_dsi *intel_dsi, const u8 *data)
+{
+ DRM_DEBUG_KMS("Skipping SPI element execution\n");
+
+ return data + *(data + 5) + 6;
+}
+
+static const u8 *mipi_exec_pmic(struct intel_dsi *intel_dsi, const u8 *data)
+{
+ DRM_DEBUG_KMS("Skipping PMIC element execution\n");
+
+ return data + 15;
+}
+
typedef const u8 * (*fn_mipi_elem_exec)(struct intel_dsi *intel_dsi,
const u8 *data);
static const fn_mipi_elem_exec exec_elem[] = {
[MIPI_SEQ_ELEM_SEND_PKT] = mipi_exec_send_packet,
[MIPI_SEQ_ELEM_DELAY] = mipi_exec_delay,
[MIPI_SEQ_ELEM_GPIO] = mipi_exec_gpio,
- [MIPI_SEQ_ELEM_I2C] = mipi_exec_i2c_skip,
+ [MIPI_SEQ_ELEM_I2C] = mipi_exec_i2c,
+ [MIPI_SEQ_ELEM_SPI] = mipi_exec_spi,
+ [MIPI_SEQ_ELEM_PMIC] = mipi_exec_pmic,
};
/*
@@ -385,11 +409,8 @@ static void generic_exec_sequence(struct drm_panel *panel, enum mipi_seq seq_id)
return;
data = dev_priv->vbt.dsi.sequence[seq_id];
- if (!data) {
- DRM_DEBUG_KMS("MIPI sequence %d - %s not available\n",
- seq_id, sequence_name(seq_id));
+ if (!data)
return;
- }
WARN_ON(*data != seq_id);
@@ -420,7 +441,15 @@ static void generic_exec_sequence(struct drm_panel *panel, enum mipi_seq seq_id)
operation_size = *data++;
if (mipi_elem_exec) {
+ const u8 *next = data + operation_size;
+
data = mipi_elem_exec(intel_dsi, data);
+
+ /* Consistency check if we have size. */
+ if (operation_size && data != next) {
+ DRM_ERROR("Inconsistent operation size\n");
+ return;
+ }
} else if (operation_size) {
/* We have size, skip. */
DRM_DEBUG_KMS("Unsupported MIPI operation byte %u\n",
@@ -438,6 +467,8 @@ static void generic_exec_sequence(struct drm_panel *panel, enum mipi_seq seq_id)
static int vbt_panel_prepare(struct drm_panel *panel)
{
generic_exec_sequence(panel, MIPI_SEQ_ASSERT_RESET);
+ generic_exec_sequence(panel, MIPI_SEQ_POWER_ON);
+ generic_exec_sequence(panel, MIPI_SEQ_DEASSERT_RESET);
generic_exec_sequence(panel, MIPI_SEQ_INIT_OTP);
return 0;
@@ -445,7 +476,8 @@ static int vbt_panel_prepare(struct drm_panel *panel)
static int vbt_panel_unprepare(struct drm_panel *panel)
{
- generic_exec_sequence(panel, MIPI_SEQ_DEASSERT_RESET);
+ generic_exec_sequence(panel, MIPI_SEQ_ASSERT_RESET);
+ generic_exec_sequence(panel, MIPI_SEQ_POWER_OFF);
return 0;
}
@@ -453,12 +485,14 @@ static int vbt_panel_unprepare(struct drm_panel *panel)
static int vbt_panel_enable(struct drm_panel *panel)
{
generic_exec_sequence(panel, MIPI_SEQ_DISPLAY_ON);
+ generic_exec_sequence(panel, MIPI_SEQ_BACKLIGHT_ON);
return 0;
}
static int vbt_panel_disable(struct drm_panel *panel)
{
+ generic_exec_sequence(panel, MIPI_SEQ_BACKLIGHT_OFF);
generic_exec_sequence(panel, MIPI_SEQ_DISPLAY_OFF);
return 0;
@@ -740,9 +774,8 @@ struct drm_panel *vbt_panel_init(struct intel_dsi *intel_dsi, u16 panel_id)
8);
intel_dsi->clk_hs_to_lp_count += extra_byte_count;
- DRM_DEBUG_KMS("Eot %s\n", intel_dsi->eotp_pkt ? "enabled" : "disabled");
- DRM_DEBUG_KMS("Clockstop %s\n", intel_dsi->clock_stop ?
- "disabled" : "enabled");
+ DRM_DEBUG_KMS("Eot %s\n", enableddisabled(intel_dsi->eotp_pkt));
+ DRM_DEBUG_KMS("Clockstop %s\n", enableddisabled(!intel_dsi->clock_stop));
DRM_DEBUG_KMS("Mode %s\n", intel_dsi->operation_mode ? "command" : "video");
if (intel_dsi->dual_link == DSI_DUAL_LINK_FRONT_BACK)
DRM_DEBUG_KMS("Dual link: DSI_DUAL_LINK_FRONT_BACK\n");
@@ -761,8 +794,7 @@ struct drm_panel *vbt_panel_init(struct intel_dsi *intel_dsi, u16 panel_id)
DRM_DEBUG_KMS("LP to HS Clock Count 0x%x\n", intel_dsi->clk_lp_to_hs_count);
DRM_DEBUG_KMS("HS to LP Clock Count 0x%x\n", intel_dsi->clk_hs_to_lp_count);
DRM_DEBUG_KMS("BTA %s\n",
- intel_dsi->video_frmt_cfg_bits & DISABLE_VIDEO_BTA ?
- "disabled" : "enabled");
+ enableddisabled(!(intel_dsi->video_frmt_cfg_bits & DISABLE_VIDEO_BTA)));
/* delays in VBT are in unit of 100us, so need to convert
* here in ms
diff --git a/drivers/gpu/drm/i915/intel_dsi_pll.c b/drivers/gpu/drm/i915/intel_dsi_pll.c
index 6ab58a01b18e..56eff6004bc0 100644
--- a/drivers/gpu/drm/i915/intel_dsi_pll.c
+++ b/drivers/gpu/drm/i915/intel_dsi_pll.c
@@ -351,7 +351,7 @@ static u32 bxt_dsi_get_pclk(struct intel_encoder *encoder, int pipe_bpp,
u32 intel_dsi_get_pclk(struct intel_encoder *encoder, int pipe_bpp,
struct intel_crtc_state *config)
{
- if (IS_BROXTON(encoder->base.dev))
+ if (IS_BROXTON(to_i915(encoder->base.dev)))
return bxt_dsi_get_pclk(encoder, pipe_bpp, config);
else
return vlv_dsi_get_pclk(encoder, pipe_bpp, config);
@@ -515,11 +515,11 @@ bool intel_dsi_pll_is_enabled(struct drm_i915_private *dev_priv)
int intel_compute_dsi_pll(struct intel_encoder *encoder,
struct intel_crtc_state *config)
{
- struct drm_device *dev = encoder->base.dev;
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
+ if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
return vlv_compute_dsi_pll(encoder, config);
- else if (IS_BROXTON(dev))
+ else if (IS_BROXTON(dev_priv))
return bxt_compute_dsi_pll(encoder, config);
return -ENODEV;
@@ -528,21 +528,21 @@ int intel_compute_dsi_pll(struct intel_encoder *encoder,
void intel_enable_dsi_pll(struct intel_encoder *encoder,
const struct intel_crtc_state *config)
{
- struct drm_device *dev = encoder->base.dev;
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
+ if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
vlv_enable_dsi_pll(encoder, config);
- else if (IS_BROXTON(dev))
+ else if (IS_BROXTON(dev_priv))
bxt_enable_dsi_pll(encoder, config);
}
void intel_disable_dsi_pll(struct intel_encoder *encoder)
{
- struct drm_device *dev = encoder->base.dev;
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
+ if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
vlv_disable_dsi_pll(encoder);
- else if (IS_BROXTON(dev))
+ else if (IS_BROXTON(dev_priv))
bxt_disable_dsi_pll(encoder);
}
@@ -564,10 +564,10 @@ static void bxt_dsi_reset_clocks(struct intel_encoder *encoder, enum port port)
void intel_dsi_reset_clocks(struct intel_encoder *encoder, enum port port)
{
- struct drm_device *dev = encoder->base.dev;
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- if (IS_BROXTON(dev))
+ if (IS_BROXTON(dev_priv))
bxt_dsi_reset_clocks(encoder, port);
- else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
+ else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
vlv_dsi_reset_clocks(encoder, port);
}
diff --git a/drivers/gpu/drm/i915/intel_dvo.c b/drivers/gpu/drm/i915/intel_dvo.c
index 2e452c505e7e..708645443046 100644
--- a/drivers/gpu/drm/i915/intel_dvo.c
+++ b/drivers/gpu/drm/i915/intel_dvo.c
@@ -393,12 +393,12 @@ intel_dvo_get_current_mode(struct drm_connector *connector)
* its timings to get how the BIOS set up the panel.
*/
if (dvo_val & DVO_ENABLE) {
- struct drm_crtc *crtc;
+ struct intel_crtc *crtc;
int pipe = (dvo_val & DVO_PIPE_B_SELECT) ? 1 : 0;
- crtc = intel_get_crtc_for_pipe(dev, pipe);
+ crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
if (crtc) {
- mode = intel_crtc_mode_get(dev, crtc);
+ mode = intel_crtc_mode_get(dev, &crtc->base);
if (mode) {
mode->type |= DRM_MODE_TYPE_PREFERRED;
if (dvo_val & DVO_HSYNC_ACTIVE_HIGH)
@@ -412,16 +412,14 @@ intel_dvo_get_current_mode(struct drm_connector *connector)
return mode;
}
-static char intel_dvo_port_name(i915_reg_t dvo_reg)
+static enum port intel_dvo_port(i915_reg_t dvo_reg)
{
if (i915_mmio_reg_equal(dvo_reg, DVOA))
- return 'A';
+ return PORT_A;
else if (i915_mmio_reg_equal(dvo_reg, DVOB))
- return 'B';
- else if (i915_mmio_reg_equal(dvo_reg, DVOC))
- return 'C';
+ return PORT_B;
else
- return '?';
+ return PORT_C;
}
void intel_dvo_init(struct drm_device *dev)
@@ -464,6 +462,7 @@ void intel_dvo_init(struct drm_device *dev)
bool dvoinit;
enum pipe pipe;
uint32_t dpll[I915_MAX_PIPES];
+ enum port port;
/* Allow the I2C driver info to specify the GPIO to be used in
* special cases, but otherwise default to what's defined
@@ -511,12 +510,15 @@ void intel_dvo_init(struct drm_device *dev)
if (!dvoinit)
continue;
+ port = intel_dvo_port(dvo->dvo_reg);
drm_encoder_init(dev, &intel_encoder->base,
&intel_dvo_enc_funcs, encoder_type,
- "DVO %c", intel_dvo_port_name(dvo->dvo_reg));
+ "DVO %c", port_name(port));
intel_encoder->type = INTEL_OUTPUT_DVO;
+ intel_encoder->port = port;
intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
+
switch (dvo->type) {
case INTEL_DVO_CHIP_TMDS:
intel_encoder->cloneable = (1 << INTEL_OUTPUT_ANALOG) |
diff --git a/drivers/gpu/drm/i915/intel_engine_cs.c b/drivers/gpu/drm/i915/intel_engine_cs.c
index 025e232a4205..3da4d466e332 100644
--- a/drivers/gpu/drm/i915/intel_engine_cs.c
+++ b/drivers/gpu/drm/i915/intel_engine_cs.c
@@ -82,12 +82,17 @@ static const struct engine_info {
},
};
-static struct intel_engine_cs *
+static int
intel_engine_setup(struct drm_i915_private *dev_priv,
enum intel_engine_id id)
{
const struct engine_info *info = &intel_engines[id];
- struct intel_engine_cs *engine = &dev_priv->engine[id];
+ struct intel_engine_cs *engine;
+
+ GEM_BUG_ON(dev_priv->engine[id]);
+ engine = kzalloc(sizeof(*engine), GFP_KERNEL);
+ if (!engine)
+ return -ENOMEM;
engine->id = id;
engine->i915 = dev_priv;
@@ -97,7 +102,11 @@ intel_engine_setup(struct drm_i915_private *dev_priv,
engine->mmio_base = info->mmio_base;
engine->irq_shift = info->irq_shift;
- return engine;
+ /* Nothing to do here, execute in order of dependencies */
+ engine->schedule = NULL;
+
+ dev_priv->engine[id] = engine;
+ return 0;
}
/**
@@ -110,13 +119,16 @@ int intel_engines_init(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_device_info *device_info = mkwrite_device_info(dev_priv);
+ unsigned int ring_mask = INTEL_INFO(dev_priv)->ring_mask;
unsigned int mask = 0;
int (*init)(struct intel_engine_cs *engine);
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
unsigned int i;
int ret;
- WARN_ON(INTEL_INFO(dev_priv)->ring_mask == 0);
- WARN_ON(INTEL_INFO(dev_priv)->ring_mask &
+ WARN_ON(ring_mask == 0);
+ WARN_ON(ring_mask &
GENMASK(sizeof(mask) * BITS_PER_BYTE - 1, I915_NUM_ENGINES));
for (i = 0; i < ARRAY_SIZE(intel_engines); i++) {
@@ -131,7 +143,11 @@ int intel_engines_init(struct drm_device *dev)
if (!init)
continue;
- ret = init(intel_engine_setup(dev_priv, i));
+ ret = intel_engine_setup(dev_priv, i);
+ if (ret)
+ goto cleanup;
+
+ ret = init(dev_priv->engine[i]);
if (ret)
goto cleanup;
@@ -143,7 +159,7 @@ int intel_engines_init(struct drm_device *dev)
* are added to the driver by a warning and disabling the forgotten
* engines.
*/
- if (WARN_ON(mask != INTEL_INFO(dev_priv)->ring_mask))
+ if (WARN_ON(mask != ring_mask))
device_info->ring_mask = mask;
device_info->num_rings = hweight32(mask);
@@ -151,17 +167,17 @@ int intel_engines_init(struct drm_device *dev)
return 0;
cleanup:
- for (i = 0; i < I915_NUM_ENGINES; i++) {
+ for_each_engine(engine, dev_priv, id) {
if (i915.enable_execlists)
- intel_logical_ring_cleanup(&dev_priv->engine[i]);
+ intel_logical_ring_cleanup(engine);
else
- intel_engine_cleanup(&dev_priv->engine[i]);
+ intel_engine_cleanup(engine);
}
return ret;
}
-void intel_engine_init_seqno(struct intel_engine_cs *engine, u32 seqno)
+void intel_engine_init_global_seqno(struct intel_engine_cs *engine, u32 seqno)
{
struct drm_i915_private *dev_priv = engine->i915;
@@ -191,13 +207,13 @@ void intel_engine_init_seqno(struct intel_engine_cs *engine, u32 seqno)
I915_NUM_ENGINES * gen8_semaphore_seqno_size);
kunmap(page);
}
- memset(engine->semaphore.sync_seqno, 0,
- sizeof(engine->semaphore.sync_seqno));
intel_write_status_page(engine, I915_GEM_HWS_INDEX, seqno);
if (engine->irq_seqno_barrier)
engine->irq_seqno_barrier(engine);
- engine->last_submitted_seqno = seqno;
+
+ GEM_BUG_ON(i915_gem_active_isset(&engine->timeline->last_request));
+ engine->timeline->last_submitted_seqno = seqno;
engine->hangcheck.seqno = seqno;
@@ -207,15 +223,9 @@ void intel_engine_init_seqno(struct intel_engine_cs *engine, u32 seqno)
intel_engine_wakeup(engine);
}
-void intel_engine_init_hangcheck(struct intel_engine_cs *engine)
-{
- memset(&engine->hangcheck, 0, sizeof(engine->hangcheck));
-}
-
-static void intel_engine_init_requests(struct intel_engine_cs *engine)
+static void intel_engine_init_timeline(struct intel_engine_cs *engine)
{
- init_request_active(&engine->last_request, NULL);
- INIT_LIST_HEAD(&engine->request_list);
+ engine->timeline = &engine->i915->gt.global_timeline.engine[engine->id];
}
/**
@@ -229,12 +239,10 @@ static void intel_engine_init_requests(struct intel_engine_cs *engine)
*/
void intel_engine_setup_common(struct intel_engine_cs *engine)
{
- INIT_LIST_HEAD(&engine->execlist_queue);
- spin_lock_init(&engine->execlist_lock);
-
- engine->fence_context = fence_context_alloc(1);
+ engine->execlist_queue = RB_ROOT;
+ engine->execlist_first = NULL;
- intel_engine_init_requests(engine);
+ intel_engine_init_timeline(engine);
intel_engine_init_hangcheck(engine);
i915_gem_batch_pool_init(engine, &engine->batch_pool);
@@ -251,7 +259,7 @@ int intel_engine_create_scratch(struct intel_engine_cs *engine, int size)
obj = i915_gem_object_create_stolen(&engine->i915->drm, size);
if (!obj)
- obj = i915_gem_object_create(&engine->i915->drm, size);
+ obj = i915_gem_object_create_internal(engine->i915, size);
if (IS_ERR(obj)) {
DRM_ERROR("Failed to allocate scratch page\n");
return PTR_ERR(obj);
@@ -301,6 +309,10 @@ int intel_engine_init_common(struct intel_engine_cs *engine)
if (ret)
return ret;
+ ret = i915_gem_render_state_init(engine);
+ if (ret)
+ return ret;
+
return 0;
}
@@ -315,7 +327,142 @@ void intel_engine_cleanup_common(struct intel_engine_cs *engine)
{
intel_engine_cleanup_scratch(engine);
+ i915_gem_render_state_fini(engine);
intel_engine_fini_breadcrumbs(engine);
intel_engine_cleanup_cmd_parser(engine);
i915_gem_batch_pool_fini(&engine->batch_pool);
}
+
+u64 intel_engine_get_active_head(struct intel_engine_cs *engine)
+{
+ struct drm_i915_private *dev_priv = engine->i915;
+ u64 acthd;
+
+ if (INTEL_GEN(dev_priv) >= 8)
+ acthd = I915_READ64_2x32(RING_ACTHD(engine->mmio_base),
+ RING_ACTHD_UDW(engine->mmio_base));
+ else if (INTEL_GEN(dev_priv) >= 4)
+ acthd = I915_READ(RING_ACTHD(engine->mmio_base));
+ else
+ acthd = I915_READ(ACTHD);
+
+ return acthd;
+}
+
+u64 intel_engine_get_last_batch_head(struct intel_engine_cs *engine)
+{
+ struct drm_i915_private *dev_priv = engine->i915;
+ u64 bbaddr;
+
+ if (INTEL_GEN(dev_priv) >= 8)
+ bbaddr = I915_READ64_2x32(RING_BBADDR(engine->mmio_base),
+ RING_BBADDR_UDW(engine->mmio_base));
+ else
+ bbaddr = I915_READ(RING_BBADDR(engine->mmio_base));
+
+ return bbaddr;
+}
+
+const char *i915_cache_level_str(struct drm_i915_private *i915, int type)
+{
+ switch (type) {
+ case I915_CACHE_NONE: return " uncached";
+ case I915_CACHE_LLC: return HAS_LLC(i915) ? " LLC" : " snooped";
+ case I915_CACHE_L3_LLC: return " L3+LLC";
+ case I915_CACHE_WT: return " WT";
+ default: return "";
+ }
+}
+
+static inline uint32_t
+read_subslice_reg(struct drm_i915_private *dev_priv, int slice,
+ int subslice, i915_reg_t reg)
+{
+ uint32_t mcr;
+ uint32_t ret;
+ enum forcewake_domains fw_domains;
+
+ fw_domains = intel_uncore_forcewake_for_reg(dev_priv, reg,
+ FW_REG_READ);
+ fw_domains |= intel_uncore_forcewake_for_reg(dev_priv,
+ GEN8_MCR_SELECTOR,
+ FW_REG_READ | FW_REG_WRITE);
+
+ spin_lock_irq(&dev_priv->uncore.lock);
+ intel_uncore_forcewake_get__locked(dev_priv, fw_domains);
+
+ mcr = I915_READ_FW(GEN8_MCR_SELECTOR);
+ /*
+ * The HW expects the slice and sublice selectors to be reset to 0
+ * after reading out the registers.
+ */
+ WARN_ON_ONCE(mcr & (GEN8_MCR_SLICE_MASK | GEN8_MCR_SUBSLICE_MASK));
+ mcr &= ~(GEN8_MCR_SLICE_MASK | GEN8_MCR_SUBSLICE_MASK);
+ mcr |= GEN8_MCR_SLICE(slice) | GEN8_MCR_SUBSLICE(subslice);
+ I915_WRITE_FW(GEN8_MCR_SELECTOR, mcr);
+
+ ret = I915_READ_FW(reg);
+
+ mcr &= ~(GEN8_MCR_SLICE_MASK | GEN8_MCR_SUBSLICE_MASK);
+ I915_WRITE_FW(GEN8_MCR_SELECTOR, mcr);
+
+ intel_uncore_forcewake_put__locked(dev_priv, fw_domains);
+ spin_unlock_irq(&dev_priv->uncore.lock);
+
+ return ret;
+}
+
+/* NB: please notice the memset */
+void intel_engine_get_instdone(struct intel_engine_cs *engine,
+ struct intel_instdone *instdone)
+{
+ struct drm_i915_private *dev_priv = engine->i915;
+ u32 mmio_base = engine->mmio_base;
+ int slice;
+ int subslice;
+
+ memset(instdone, 0, sizeof(*instdone));
+
+ switch (INTEL_GEN(dev_priv)) {
+ default:
+ instdone->instdone = I915_READ(RING_INSTDONE(mmio_base));
+
+ if (engine->id != RCS)
+ break;
+
+ instdone->slice_common = I915_READ(GEN7_SC_INSTDONE);
+ for_each_instdone_slice_subslice(dev_priv, slice, subslice) {
+ instdone->sampler[slice][subslice] =
+ read_subslice_reg(dev_priv, slice, subslice,
+ GEN7_SAMPLER_INSTDONE);
+ instdone->row[slice][subslice] =
+ read_subslice_reg(dev_priv, slice, subslice,
+ GEN7_ROW_INSTDONE);
+ }
+ break;
+ case 7:
+ instdone->instdone = I915_READ(RING_INSTDONE(mmio_base));
+
+ if (engine->id != RCS)
+ break;
+
+ instdone->slice_common = I915_READ(GEN7_SC_INSTDONE);
+ instdone->sampler[0][0] = I915_READ(GEN7_SAMPLER_INSTDONE);
+ instdone->row[0][0] = I915_READ(GEN7_ROW_INSTDONE);
+
+ break;
+ case 6:
+ case 5:
+ case 4:
+ instdone->instdone = I915_READ(RING_INSTDONE(mmio_base));
+
+ if (engine->id == RCS)
+ /* HACK: Using the wrong struct member */
+ instdone->slice_common = I915_READ(GEN4_INSTDONE1);
+ break;
+ case 3:
+ case 2:
+ instdone->instdone = I915_READ(GEN2_INSTDONE);
+ break;
+ }
+}
diff --git a/drivers/gpu/drm/i915/intel_fbc.c b/drivers/gpu/drm/i915/intel_fbc.c
index faa67624e1ed..62f215b12eb5 100644
--- a/drivers/gpu/drm/i915/intel_fbc.c
+++ b/drivers/gpu/drm/i915/intel_fbc.c
@@ -48,17 +48,17 @@ static inline bool fbc_supported(struct drm_i915_private *dev_priv)
static inline bool fbc_on_pipe_a_only(struct drm_i915_private *dev_priv)
{
- return IS_HASWELL(dev_priv) || INTEL_INFO(dev_priv)->gen >= 8;
+ return IS_HASWELL(dev_priv) || INTEL_GEN(dev_priv) >= 8;
}
static inline bool fbc_on_plane_a_only(struct drm_i915_private *dev_priv)
{
- return INTEL_INFO(dev_priv)->gen < 4;
+ return INTEL_GEN(dev_priv) < 4;
}
static inline bool no_fbc_on_multiple_pipes(struct drm_i915_private *dev_priv)
{
- return INTEL_INFO(dev_priv)->gen <= 3;
+ return INTEL_GEN(dev_priv) <= 3;
}
/*
@@ -84,7 +84,7 @@ static void intel_fbc_get_plane_source_size(struct intel_fbc_state_cache *cache,
{
int w, h;
- if (intel_rotation_90_or_270(cache->plane.rotation)) {
+ if (drm_rotation_90_or_270(cache->plane.rotation)) {
w = cache->plane.src_h;
h = cache->plane.src_w;
} else {
@@ -104,8 +104,10 @@ static int intel_fbc_calculate_cfb_size(struct drm_i915_private *dev_priv,
int lines;
intel_fbc_get_plane_source_size(cache, NULL, &lines);
- if (INTEL_INFO(dev_priv)->gen >= 7)
+ if (INTEL_GEN(dev_priv) == 7)
lines = min(lines, 2048);
+ else if (INTEL_GEN(dev_priv) >= 8)
+ lines = min(lines, 2560);
/* Hardware needs the full buffer stride, not just the active area. */
return lines * cache->fb.stride;
@@ -349,7 +351,7 @@ static void gen7_fbc_activate(struct drm_i915_private *dev_priv)
static bool intel_fbc_hw_is_active(struct drm_i915_private *dev_priv)
{
- if (INTEL_INFO(dev_priv)->gen >= 5)
+ if (INTEL_GEN(dev_priv) >= 5)
return ilk_fbc_is_active(dev_priv);
else if (IS_GM45(dev_priv))
return g4x_fbc_is_active(dev_priv);
@@ -363,9 +365,9 @@ static void intel_fbc_hw_activate(struct drm_i915_private *dev_priv)
fbc->active = true;
- if (INTEL_INFO(dev_priv)->gen >= 7)
+ if (INTEL_GEN(dev_priv) >= 7)
gen7_fbc_activate(dev_priv);
- else if (INTEL_INFO(dev_priv)->gen >= 5)
+ else if (INTEL_GEN(dev_priv) >= 5)
ilk_fbc_activate(dev_priv);
else if (IS_GM45(dev_priv))
g4x_fbc_activate(dev_priv);
@@ -379,7 +381,7 @@ static void intel_fbc_hw_deactivate(struct drm_i915_private *dev_priv)
fbc->active = false;
- if (INTEL_INFO(dev_priv)->gen >= 5)
+ if (INTEL_GEN(dev_priv) >= 5)
ilk_fbc_deactivate(dev_priv);
else if (IS_GM45(dev_priv))
g4x_fbc_deactivate(dev_priv);
@@ -559,7 +561,7 @@ again:
ret = i915_gem_stolen_insert_node_in_range(dev_priv, node, size >>= 1,
4096, 0, end);
- if (ret && INTEL_INFO(dev_priv)->gen <= 4) {
+ if (ret && INTEL_GEN(dev_priv) <= 4) {
return 0;
} else if (ret) {
compression_threshold <<= 1;
@@ -592,7 +594,7 @@ static int intel_fbc_alloc_cfb(struct intel_crtc *crtc)
fbc->threshold = ret;
- if (INTEL_INFO(dev_priv)->gen >= 5)
+ if (INTEL_GEN(dev_priv) >= 5)
I915_WRITE(ILK_DPFC_CB_BASE, fbc->compressed_fb.start);
else if (IS_GM45(dev_priv)) {
I915_WRITE(DPFC_CB_BASE, fbc->compressed_fb.start);
@@ -706,10 +708,10 @@ static bool intel_fbc_hw_tracking_covers_screen(struct intel_crtc *crtc)
struct intel_fbc *fbc = &dev_priv->fbc;
unsigned int effective_w, effective_h, max_w, max_h;
- if (INTEL_INFO(dev_priv)->gen >= 8 || IS_HASWELL(dev_priv)) {
+ if (INTEL_GEN(dev_priv) >= 8 || IS_HASWELL(dev_priv)) {
max_w = 4096;
max_h = 4096;
- } else if (IS_G4X(dev_priv) || INTEL_INFO(dev_priv)->gen >= 5) {
+ } else if (IS_G4X(dev_priv) || INTEL_GEN(dev_priv) >= 5) {
max_w = 4096;
max_h = 2048;
} else {
@@ -774,6 +776,14 @@ static bool intel_fbc_can_activate(struct intel_crtc *crtc)
struct intel_fbc *fbc = &dev_priv->fbc;
struct intel_fbc_state_cache *cache = &fbc->state_cache;
+ /* We don't need to use a state cache here since this information is
+ * global for all CRTC.
+ */
+ if (fbc->underrun_detected) {
+ fbc->no_fbc_reason = "underrun detected";
+ return false;
+ }
+
if (!cache->plane.visible) {
fbc->no_fbc_reason = "primary plane not visible";
return false;
@@ -802,7 +812,7 @@ static bool intel_fbc_can_activate(struct intel_crtc *crtc)
fbc->no_fbc_reason = "framebuffer not tiled or fenced";
return false;
}
- if (INTEL_INFO(dev_priv)->gen <= 4 && !IS_G4X(dev_priv) &&
+ if (INTEL_GEN(dev_priv) <= 4 && !IS_G4X(dev_priv) &&
cache->plane.rotation != DRM_ROTATE_0) {
fbc->no_fbc_reason = "rotation unsupported";
return false;
@@ -844,9 +854,8 @@ static bool intel_fbc_can_activate(struct intel_crtc *crtc)
return true;
}
-static bool intel_fbc_can_choose(struct intel_crtc *crtc)
+static bool intel_fbc_can_enable(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
struct intel_fbc *fbc = &dev_priv->fbc;
if (intel_vgpu_active(dev_priv)) {
@@ -859,13 +868,8 @@ static bool intel_fbc_can_choose(struct intel_crtc *crtc)
return false;
}
- if (fbc_on_pipe_a_only(dev_priv) && crtc->pipe != PIPE_A) {
- fbc->no_fbc_reason = "no enabled pipes can have FBC";
- return false;
- }
-
- if (fbc_on_plane_a_only(dev_priv) && crtc->plane != PLANE_A) {
- fbc->no_fbc_reason = "no enabled planes can have FBC";
+ if (fbc->underrun_detected) {
+ fbc->no_fbc_reason = "underrun detected";
return false;
}
@@ -1051,23 +1055,19 @@ void intel_fbc_choose_crtc(struct drm_i915_private *dev_priv,
struct drm_atomic_state *state)
{
struct intel_fbc *fbc = &dev_priv->fbc;
- struct drm_crtc *crtc;
- struct drm_crtc_state *crtc_state;
struct drm_plane *plane;
struct drm_plane_state *plane_state;
- bool fbc_crtc_present = false;
- int i, j;
+ bool crtc_chosen = false;
+ int i;
mutex_lock(&fbc->lock);
- for_each_crtc_in_state(state, crtc, crtc_state, i) {
- if (fbc->crtc == to_intel_crtc(crtc)) {
- fbc_crtc_present = true;
- break;
- }
- }
- /* This atomic commit doesn't involve the CRTC currently tied to FBC. */
- if (!fbc_crtc_present && fbc->crtc != NULL)
+ /* Does this atomic commit involve the CRTC currently tied to FBC? */
+ if (fbc->crtc &&
+ !drm_atomic_get_existing_crtc_state(state, &fbc->crtc->base))
+ goto out;
+
+ if (!intel_fbc_can_enable(dev_priv))
goto out;
/* Simply choose the first CRTC that is compatible and has a visible
@@ -1077,25 +1077,29 @@ void intel_fbc_choose_crtc(struct drm_i915_private *dev_priv,
for_each_plane_in_state(state, plane, plane_state, i) {
struct intel_plane_state *intel_plane_state =
to_intel_plane_state(plane_state);
+ struct intel_crtc_state *intel_crtc_state;
+ struct intel_crtc *crtc = to_intel_crtc(plane_state->crtc);
if (!intel_plane_state->base.visible)
continue;
- for_each_crtc_in_state(state, crtc, crtc_state, j) {
- struct intel_crtc_state *intel_crtc_state =
- to_intel_crtc_state(crtc_state);
+ if (fbc_on_pipe_a_only(dev_priv) && crtc->pipe != PIPE_A)
+ continue;
- if (plane_state->crtc != crtc)
- continue;
+ if (fbc_on_plane_a_only(dev_priv) && crtc->plane != PLANE_A)
+ continue;
- if (!intel_fbc_can_choose(to_intel_crtc(crtc)))
- break;
+ intel_crtc_state = to_intel_crtc_state(
+ drm_atomic_get_existing_crtc_state(state, &crtc->base));
- intel_crtc_state->enable_fbc = true;
- goto out;
- }
+ intel_crtc_state->enable_fbc = true;
+ crtc_chosen = true;
+ break;
}
+ if (!crtc_chosen)
+ fbc->no_fbc_reason = "no suitable CRTC for FBC";
+
out:
mutex_unlock(&fbc->lock);
}
@@ -1221,6 +1225,59 @@ void intel_fbc_global_disable(struct drm_i915_private *dev_priv)
cancel_work_sync(&fbc->work.work);
}
+static void intel_fbc_underrun_work_fn(struct work_struct *work)
+{
+ struct drm_i915_private *dev_priv =
+ container_of(work, struct drm_i915_private, fbc.underrun_work);
+ struct intel_fbc *fbc = &dev_priv->fbc;
+
+ mutex_lock(&fbc->lock);
+
+ /* Maybe we were scheduled twice. */
+ if (fbc->underrun_detected)
+ goto out;
+
+ DRM_DEBUG_KMS("Disabling FBC due to FIFO underrun.\n");
+ fbc->underrun_detected = true;
+
+ intel_fbc_deactivate(dev_priv);
+out:
+ mutex_unlock(&fbc->lock);
+}
+
+/**
+ * intel_fbc_handle_fifo_underrun_irq - disable FBC when we get a FIFO underrun
+ * @dev_priv: i915 device instance
+ *
+ * Without FBC, most underruns are harmless and don't really cause too many
+ * problems, except for an annoying message on dmesg. With FBC, underruns can
+ * become black screens or even worse, especially when paired with bad
+ * watermarks. So in order for us to be on the safe side, completely disable FBC
+ * in case we ever detect a FIFO underrun on any pipe. An underrun on any pipe
+ * already suggests that watermarks may be bad, so try to be as safe as
+ * possible.
+ *
+ * This function is called from the IRQ handler.
+ */
+void intel_fbc_handle_fifo_underrun_irq(struct drm_i915_private *dev_priv)
+{
+ struct intel_fbc *fbc = &dev_priv->fbc;
+
+ if (!fbc_supported(dev_priv))
+ return;
+
+ /* There's no guarantee that underrun_detected won't be set to true
+ * right after this check and before the work is scheduled, but that's
+ * not a problem since we'll check it again under the work function
+ * while FBC is locked. This check here is just to prevent us from
+ * unnecessarily scheduling the work, and it relies on the fact that we
+ * never switch underrun_detect back to false after it's true. */
+ if (READ_ONCE(fbc->underrun_detected))
+ return;
+
+ schedule_work(&fbc->underrun_work);
+}
+
/**
* intel_fbc_init_pipe_state - initialize FBC's CRTC visibility tracking
* @dev_priv: i915 device instance
@@ -1238,7 +1295,7 @@ void intel_fbc_init_pipe_state(struct drm_i915_private *dev_priv)
return;
for_each_intel_crtc(&dev_priv->drm, crtc)
- if (intel_crtc_active(&crtc->base) &&
+ if (intel_crtc_active(crtc) &&
to_intel_plane_state(crtc->base.primary->state)->base.visible)
dev_priv->fbc.visible_pipes_mask |= (1 << crtc->pipe);
}
@@ -1292,6 +1349,7 @@ void intel_fbc_init(struct drm_i915_private *dev_priv)
enum pipe pipe;
INIT_WORK(&fbc->work.work, intel_fbc_work_fn);
+ INIT_WORK(&fbc->underrun_work, intel_fbc_underrun_work_fn);
mutex_init(&fbc->lock);
fbc->enabled = false;
fbc->active = false;
@@ -1317,7 +1375,7 @@ void intel_fbc_init(struct drm_i915_private *dev_priv)
}
/* This value was pulled out of someone's hat */
- if (INTEL_INFO(dev_priv)->gen <= 4 && !IS_GM45(dev_priv))
+ if (INTEL_GEN(dev_priv) <= 4 && !IS_GM45(dev_priv))
I915_WRITE(FBC_CONTROL, 500 << FBC_CTL_INTERVAL_SHIFT);
/* We still don't have any sort of hardware state readout for FBC, so
diff --git a/drivers/gpu/drm/i915/intel_fbdev.c b/drivers/gpu/drm/i915/intel_fbdev.c
index b7098f98bb67..beb08982dc0b 100644
--- a/drivers/gpu/drm/i915/intel_fbdev.c
+++ b/drivers/gpu/drm/i915/intel_fbdev.c
@@ -102,16 +102,13 @@ static int intel_fbdev_pan_display(struct fb_var_screeninfo *var,
static struct fb_ops intelfb_ops = {
.owner = THIS_MODULE,
- .fb_check_var = drm_fb_helper_check_var,
+ DRM_FB_HELPER_DEFAULT_OPS,
.fb_set_par = intel_fbdev_set_par,
.fb_fillrect = drm_fb_helper_cfb_fillrect,
.fb_copyarea = drm_fb_helper_cfb_copyarea,
.fb_imageblit = drm_fb_helper_cfb_imageblit,
.fb_pan_display = intel_fbdev_pan_display,
.fb_blank = intel_fbdev_blank,
- .fb_setcmap = drm_fb_helper_setcmap,
- .fb_debug_enter = drm_fb_helper_debug_enter,
- .fb_debug_leave = drm_fb_helper_debug_leave,
};
static int intelfb_alloc(struct drm_fb_helper *helper,
@@ -359,7 +356,7 @@ static bool intel_fb_initial_config(struct drm_fb_helper *fb_helper,
struct drm_fb_offset *offsets,
bool *enabled, int width, int height)
{
- struct drm_device *dev = fb_helper->dev;
+ struct drm_i915_private *dev_priv = to_i915(fb_helper->dev);
unsigned long conn_configured, mask;
unsigned int count = min(fb_helper->connector_count, BITS_PER_LONG);
int i, j;
@@ -512,7 +509,7 @@ retry:
* fbdev helper library.
*/
if (num_connectors_enabled != num_connectors_detected &&
- num_connectors_enabled < INTEL_INFO(dev)->num_pipes) {
+ num_connectors_enabled < INTEL_INFO(dev_priv)->num_pipes) {
DRM_DEBUG_KMS("fallback: Not all outputs enabled\n");
DRM_DEBUG_KMS("Enabled: %i, detected: %i\n", num_connectors_enabled,
num_connectors_detected);
@@ -636,7 +633,7 @@ static bool intel_fbdev_init_bios(struct drm_device *dev,
cur_size = intel_crtc->config->base.adjusted_mode.crtc_vdisplay;
cur_size = intel_fb_align_height(dev, cur_size,
fb->base.pixel_format,
- fb->base.modifier[0]);
+ fb->base.modifier);
cur_size *= fb->base.pitches[0];
DRM_DEBUG_KMS("pipe %c area: %dx%d, bpp: %d, size: %d\n",
pipe_name(intel_crtc->pipe),
@@ -700,11 +697,11 @@ static void intel_fbdev_suspend_worker(struct work_struct *work)
int intel_fbdev_init(struct drm_device *dev)
{
- struct intel_fbdev *ifbdev;
struct drm_i915_private *dev_priv = to_i915(dev);
+ struct intel_fbdev *ifbdev;
int ret;
- if (WARN_ON(INTEL_INFO(dev)->num_pipes == 0))
+ if (WARN_ON(INTEL_INFO(dev_priv)->num_pipes == 0))
return -ENODEV;
ifbdev = kzalloc(sizeof(struct intel_fbdev), GFP_KERNEL);
@@ -717,7 +714,7 @@ int intel_fbdev_init(struct drm_device *dev)
ifbdev->preferred_bpp = 32;
ret = drm_fb_helper_init(dev, &ifbdev->helper,
- INTEL_INFO(dev)->num_pipes, 4);
+ INTEL_INFO(dev_priv)->num_pipes, 4);
if (ret) {
kfree(ifbdev);
return ret;
diff --git a/drivers/gpu/drm/i915/intel_fifo_underrun.c b/drivers/gpu/drm/i915/intel_fifo_underrun.c
index 2aa744081f09..e660d8b4bbc3 100644
--- a/drivers/gpu/drm/i915/intel_fifo_underrun.c
+++ b/drivers/gpu/drm/i915/intel_fifo_underrun.c
@@ -57,7 +57,7 @@ static bool ivb_can_enable_err_int(struct drm_device *dev)
assert_spin_locked(&dev_priv->irq_lock);
for_each_pipe(dev_priv, pipe) {
- crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
+ crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
if (crtc->cpu_fifo_underrun_disabled)
return false;
@@ -75,7 +75,7 @@ static bool cpt_can_enable_serr_int(struct drm_device *dev)
assert_spin_locked(&dev_priv->irq_lock);
for_each_pipe(dev_priv, pipe) {
- crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
+ crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
if (crtc->pch_fifo_underrun_disabled)
return false;
@@ -245,22 +245,21 @@ static bool __intel_set_cpu_fifo_underrun_reporting(struct drm_device *dev,
enum pipe pipe, bool enable)
{
struct drm_i915_private *dev_priv = to_i915(dev);
- struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
bool old;
assert_spin_locked(&dev_priv->irq_lock);
- old = !intel_crtc->cpu_fifo_underrun_disabled;
- intel_crtc->cpu_fifo_underrun_disabled = !enable;
+ old = !crtc->cpu_fifo_underrun_disabled;
+ crtc->cpu_fifo_underrun_disabled = !enable;
- if (HAS_GMCH_DISPLAY(dev))
+ if (HAS_GMCH_DISPLAY(dev_priv))
i9xx_set_fifo_underrun_reporting(dev, pipe, enable, old);
- else if (IS_GEN5(dev) || IS_GEN6(dev))
+ else if (IS_GEN5(dev_priv) || IS_GEN6(dev_priv))
ironlake_set_fifo_underrun_reporting(dev, pipe, enable);
- else if (IS_GEN7(dev))
+ else if (IS_GEN7(dev_priv))
ivybridge_set_fifo_underrun_reporting(dev, pipe, enable, old);
- else if (IS_GEN8(dev) || IS_GEN9(dev))
+ else if (IS_GEN8(dev_priv) || IS_GEN9(dev_priv))
broadwell_set_fifo_underrun_reporting(dev, pipe, enable);
return old;
@@ -314,8 +313,8 @@ bool intel_set_pch_fifo_underrun_reporting(struct drm_i915_private *dev_priv,
enum transcoder pch_transcoder,
bool enable)
{
- struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pch_transcoder];
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ struct intel_crtc *crtc =
+ intel_get_crtc_for_pipe(dev_priv, (enum pipe) pch_transcoder);
unsigned long flags;
bool old;
@@ -330,8 +329,8 @@ bool intel_set_pch_fifo_underrun_reporting(struct drm_i915_private *dev_priv,
spin_lock_irqsave(&dev_priv->irq_lock, flags);
- old = !intel_crtc->pch_fifo_underrun_disabled;
- intel_crtc->pch_fifo_underrun_disabled = !enable;
+ old = !crtc->pch_fifo_underrun_disabled;
+ crtc->pch_fifo_underrun_disabled = !enable;
if (HAS_PCH_IBX(dev_priv))
ibx_set_fifo_underrun_reporting(&dev_priv->drm,
@@ -358,7 +357,7 @@ bool intel_set_pch_fifo_underrun_reporting(struct drm_i915_private *dev_priv,
void intel_cpu_fifo_underrun_irq_handler(struct drm_i915_private *dev_priv,
enum pipe pipe)
{
- struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
+ struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
/* We may be called too early in init, thanks BIOS! */
if (crtc == NULL)
@@ -366,12 +365,14 @@ void intel_cpu_fifo_underrun_irq_handler(struct drm_i915_private *dev_priv,
/* GMCH can't disable fifo underruns, filter them. */
if (HAS_GMCH_DISPLAY(dev_priv) &&
- to_intel_crtc(crtc)->cpu_fifo_underrun_disabled)
+ crtc->cpu_fifo_underrun_disabled)
return;
if (intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false))
DRM_ERROR("CPU pipe %c FIFO underrun\n",
pipe_name(pipe));
+
+ intel_fbc_handle_fifo_underrun_irq(dev_priv);
}
/**
diff --git a/drivers/gpu/drm/i915/intel_frontbuffer.h b/drivers/gpu/drm/i915/intel_frontbuffer.h
index 76ceb539f9f0..7bab41218cf7 100644
--- a/drivers/gpu/drm/i915/intel_frontbuffer.h
+++ b/drivers/gpu/drm/i915/intel_frontbuffer.h
@@ -53,16 +53,17 @@ void __intel_fb_obj_flush(struct drm_i915_gem_object *obj,
* until the rendering completes or a flip on this frontbuffer plane is
* scheduled.
*/
-static inline void intel_fb_obj_invalidate(struct drm_i915_gem_object *obj,
+static inline bool intel_fb_obj_invalidate(struct drm_i915_gem_object *obj,
enum fb_op_origin origin)
{
unsigned int frontbuffer_bits;
frontbuffer_bits = atomic_read(&obj->frontbuffer_bits);
if (!frontbuffer_bits)
- return;
+ return false;
__intel_fb_obj_invalidate(obj, origin, frontbuffer_bits);
+ return true;
}
/**
diff --git a/drivers/gpu/drm/i915/intel_guc.h b/drivers/gpu/drm/i915/intel_guc.h
index 5cdf7aa75be5..0053258e03d3 100644
--- a/drivers/gpu/drm/i915/intel_guc.h
+++ b/drivers/gpu/drm/i915/intel_guc.h
@@ -64,7 +64,7 @@ struct drm_i915_gem_request;
*/
struct i915_guc_client {
struct i915_vma *vma;
- void *client_base; /* first page (only) of above */
+ void *vaddr;
struct i915_gem_context *owner;
struct intel_guc *guc;
@@ -123,10 +123,28 @@ struct intel_guc_fw {
uint32_t ucode_offset;
};
+struct intel_guc_log {
+ uint32_t flags;
+ struct i915_vma *vma;
+ void *buf_addr;
+ struct workqueue_struct *flush_wq;
+ struct work_struct flush_work;
+ struct rchan *relay_chan;
+
+ /* logging related stats */
+ u32 capture_miss_count;
+ u32 flush_interrupt_count;
+ u32 prev_overflow_count[GUC_MAX_LOG_BUFFER];
+ u32 total_overflow_count[GUC_MAX_LOG_BUFFER];
+ u32 flush_count[GUC_MAX_LOG_BUFFER];
+};
+
struct intel_guc {
struct intel_guc_fw guc_fw;
- uint32_t log_flags;
- struct i915_vma *log_vma;
+ struct intel_guc_log log;
+
+ /* GuC2Host interrupt related state */
+ bool interrupts_enabled;
struct i915_vma *ads_vma;
struct i915_vma *ctx_pool_vma;
@@ -146,6 +164,9 @@ struct intel_guc {
uint64_t submissions[I915_NUM_ENGINES];
uint32_t last_seqno[I915_NUM_ENGINES];
+
+ /* To serialize the Host2GuC actions */
+ struct mutex action_lock;
};
/* intel_guc_loader.c */
@@ -163,5 +184,10 @@ int i915_guc_wq_reserve(struct drm_i915_gem_request *rq);
void i915_guc_wq_unreserve(struct drm_i915_gem_request *request);
void i915_guc_submission_disable(struct drm_i915_private *dev_priv);
void i915_guc_submission_fini(struct drm_i915_private *dev_priv);
+void i915_guc_capture_logs(struct drm_i915_private *dev_priv);
+void i915_guc_flush_logs(struct drm_i915_private *dev_priv);
+void i915_guc_register(struct drm_i915_private *dev_priv);
+void i915_guc_unregister(struct drm_i915_private *dev_priv);
+int i915_guc_log_control(struct drm_i915_private *dev_priv, u64 control_val);
#endif
diff --git a/drivers/gpu/drm/i915/intel_guc_fwif.h b/drivers/gpu/drm/i915/intel_guc_fwif.h
index e40db2d2ae99..324ea902558b 100644
--- a/drivers/gpu/drm/i915/intel_guc_fwif.h
+++ b/drivers/gpu/drm/i915/intel_guc_fwif.h
@@ -104,9 +104,9 @@
#define GUC_LOG_ALLOC_IN_MEGABYTE (1 << 3)
#define GUC_LOG_CRASH_PAGES 1
#define GUC_LOG_CRASH_SHIFT 4
-#define GUC_LOG_DPC_PAGES 3
+#define GUC_LOG_DPC_PAGES 7
#define GUC_LOG_DPC_SHIFT 6
-#define GUC_LOG_ISR_PAGES 3
+#define GUC_LOG_ISR_PAGES 7
#define GUC_LOG_ISR_SHIFT 9
#define GUC_LOG_BUF_ADDR_SHIFT 12
@@ -419,15 +419,87 @@ struct guc_ads {
u32 reserved2[4];
} __packed;
+/* GuC logging structures */
+
+enum guc_log_buffer_type {
+ GUC_ISR_LOG_BUFFER,
+ GUC_DPC_LOG_BUFFER,
+ GUC_CRASH_DUMP_LOG_BUFFER,
+ GUC_MAX_LOG_BUFFER
+};
+
+/**
+ * DOC: GuC Log buffer Layout
+ *
+ * Page0 +-------------------------------+
+ * | ISR state header (32 bytes) |
+ * | DPC state header |
+ * | Crash dump state header |
+ * Page1 +-------------------------------+
+ * | ISR logs |
+ * Page9 +-------------------------------+
+ * | DPC logs |
+ * Page17 +-------------------------------+
+ * | Crash Dump logs |
+ * +-------------------------------+
+ *
+ * Below state structure is used for coordination of retrieval of GuC firmware
+ * logs. Separate state is maintained for each log buffer type.
+ * read_ptr points to the location where i915 read last in log buffer and
+ * is read only for GuC firmware. write_ptr is incremented by GuC with number
+ * of bytes written for each log entry and is read only for i915.
+ * When any type of log buffer becomes half full, GuC sends a flush interrupt.
+ * GuC firmware expects that while it is writing to 2nd half of the buffer,
+ * first half would get consumed by Host and then get a flush completed
+ * acknowledgment from Host, so that it does not end up doing any overwrite
+ * causing loss of logs. So when buffer gets half filled & i915 has requested
+ * for interrupt, GuC will set flush_to_file field, set the sampled_write_ptr
+ * to the value of write_ptr and raise the interrupt.
+ * On receiving the interrupt i915 should read the buffer, clear flush_to_file
+ * field and also update read_ptr with the value of sample_write_ptr, before
+ * sending an acknowledgment to GuC. marker & version fields are for internal
+ * usage of GuC and opaque to i915. buffer_full_cnt field is incremented every
+ * time GuC detects the log buffer overflow.
+ */
+struct guc_log_buffer_state {
+ u32 marker[2];
+ u32 read_ptr;
+ u32 write_ptr;
+ u32 size;
+ u32 sampled_write_ptr;
+ union {
+ struct {
+ u32 flush_to_file:1;
+ u32 buffer_full_cnt:4;
+ u32 reserved:27;
+ };
+ u32 flags;
+ };
+ u32 version;
+} __packed;
+
+union guc_log_control {
+ struct {
+ u32 logging_enabled:1;
+ u32 reserved1:3;
+ u32 verbosity:4;
+ u32 reserved2:24;
+ };
+ u32 value;
+} __packed;
+
/* This Action will be programmed in C180 - SOFT_SCRATCH_O_REG */
enum host2guc_action {
HOST2GUC_ACTION_DEFAULT = 0x0,
HOST2GUC_ACTION_SAMPLE_FORCEWAKE = 0x6,
HOST2GUC_ACTION_ALLOCATE_DOORBELL = 0x10,
HOST2GUC_ACTION_DEALLOCATE_DOORBELL = 0x20,
+ HOST2GUC_ACTION_LOG_BUFFER_FILE_FLUSH_COMPLETE = 0x30,
+ HOST2GUC_ACTION_FORCE_LOG_BUFFER_FLUSH = 0x302,
HOST2GUC_ACTION_ENTER_S_STATE = 0x501,
HOST2GUC_ACTION_EXIT_S_STATE = 0x502,
HOST2GUC_ACTION_SLPC_REQUEST = 0x3003,
+ HOST2GUC_ACTION_UK_LOG_ENABLE_LOGGING = 0x0E000,
HOST2GUC_ACTION_LIMIT
};
@@ -449,4 +521,10 @@ enum guc2host_status {
GUC2HOST_STATUS_GENERIC_FAIL = GUC2HOST_STATUS(0x0000F000)
};
+/* This action will be programmed in C1BC - SOFT_SCRATCH_15_REG */
+enum guc2host_message {
+ GUC2HOST_MSG_CRASH_DUMP_POSTED = (1 << 1),
+ GUC2HOST_MSG_FLUSH_LOG_BUFFER = (1 << 3)
+};
+
#endif
diff --git a/drivers/gpu/drm/i915/intel_guc_loader.c b/drivers/gpu/drm/i915/intel_guc_loader.c
index 6fd39efb7894..34d6ad2cf7c1 100644
--- a/drivers/gpu/drm/i915/intel_guc_loader.c
+++ b/drivers/gpu/drm/i915/intel_guc_loader.c
@@ -100,12 +100,13 @@ const char *intel_guc_fw_status_repr(enum intel_guc_fw_status status)
static void guc_interrupts_release(struct drm_i915_private *dev_priv)
{
struct intel_engine_cs *engine;
+ enum intel_engine_id id;
int irqs;
/* tell all command streamers NOT to forward interrupts or vblank to GuC */
irqs = _MASKED_FIELD(GFX_FORWARD_VBLANK_MASK, GFX_FORWARD_VBLANK_NEVER);
irqs |= _MASKED_BIT_DISABLE(GFX_INTERRUPT_STEERING);
- for_each_engine(engine, dev_priv)
+ for_each_engine(engine, dev_priv, id)
I915_WRITE(RING_MODE_GEN7(engine), irqs);
/* route all GT interrupts to the host */
@@ -117,12 +118,13 @@ static void guc_interrupts_release(struct drm_i915_private *dev_priv)
static void guc_interrupts_capture(struct drm_i915_private *dev_priv)
{
struct intel_engine_cs *engine;
+ enum intel_engine_id id;
int irqs;
u32 tmp;
/* tell all command streamers to forward interrupts (but not vblank) to GuC */
irqs = _MASKED_BIT_ENABLE(GFX_INTERRUPT_STEERING);
- for_each_engine(engine, dev_priv)
+ for_each_engine(engine, dev_priv, id)
I915_WRITE(RING_MODE_GEN7(engine), irqs);
/* route USER_INTERRUPT to Host, all others are sent to GuC. */
@@ -209,11 +211,13 @@ static void guc_params_init(struct drm_i915_private *dev_priv)
params[GUC_CTL_FEATURE] |= GUC_CTL_DISABLE_SCHEDULER |
GUC_CTL_VCS2_ENABLED;
+ params[GUC_CTL_LOG_PARAMS] = guc->log.flags;
+
if (i915.guc_log_level >= 0) {
- params[GUC_CTL_LOG_PARAMS] = guc->log_flags;
params[GUC_CTL_DEBUG] =
i915.guc_log_level << GUC_LOG_VERBOSITY_SHIFT;
- }
+ } else
+ params[GUC_CTL_DEBUG] = GUC_LOG_DISABLED;
if (guc->ads_vma) {
u32 ads = i915_ggtt_offset(guc->ads_vma) >> PAGE_SHIFT;
@@ -347,7 +351,6 @@ static u32 guc_wopcm_size(struct drm_i915_private *dev_priv)
static int guc_ucode_xfer(struct drm_i915_private *dev_priv)
{
struct intel_guc_fw *guc_fw = &dev_priv->guc.guc_fw;
- struct drm_device *dev = &dev_priv->drm;
struct i915_vma *vma;
int ret;
@@ -375,24 +378,22 @@ static int guc_ucode_xfer(struct drm_i915_private *dev_priv)
/* Enable MIA caching. GuC clock gating is disabled. */
I915_WRITE(GUC_SHIM_CONTROL, GUC_SHIM_CONTROL_VALUE);
- /* WaDisableMinuteIaClockGating:skl,bxt */
- if (IS_SKL_REVID(dev, 0, SKL_REVID_B0) ||
- IS_BXT_REVID(dev, 0, BXT_REVID_A1)) {
+ /* WaDisableMinuteIaClockGating:bxt */
+ if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1)) {
I915_WRITE(GUC_SHIM_CONTROL, (I915_READ(GUC_SHIM_CONTROL) &
~GUC_ENABLE_MIA_CLOCK_GATING));
}
- /* WaC6DisallowByGfxPause*/
- if (IS_SKL_REVID(dev, 0, SKL_REVID_C0) ||
- IS_BXT_REVID(dev, 0, BXT_REVID_B0))
+ /* WaC6DisallowByGfxPause:bxt */
+ if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_B0))
I915_WRITE(GEN6_GFXPAUSE, 0x30FFF);
- if (IS_BROXTON(dev))
+ if (IS_BROXTON(dev_priv))
I915_WRITE(GEN9LP_GT_PM_CONFIG, GT_DOORBELL_ENABLE);
else
I915_WRITE(GEN9_GT_PM_CONFIG, GT_DOORBELL_ENABLE);
- if (IS_GEN9(dev)) {
+ if (IS_GEN9(dev_priv)) {
/* DOP Clock Gating Enable for GuC clocks */
I915_WRITE(GEN7_MISCCPCTL, (GEN8_DOP_CLOCK_GATE_GUC_ENABLE |
I915_READ(GEN7_MISCCPCTL)));
@@ -484,6 +485,7 @@ int intel_guc_setup(struct drm_device *dev)
}
guc_interrupts_release(dev_priv);
+ gen9_reset_guc_interrupts(dev_priv);
guc_fw->guc_fw_load_status = GUC_FIRMWARE_PENDING;
@@ -528,6 +530,9 @@ int intel_guc_setup(struct drm_device *dev)
intel_guc_fw_status_repr(guc_fw->guc_fw_load_status));
if (i915.enable_guc_submission) {
+ if (i915.guc_log_level >= 0)
+ gen9_enable_guc_interrupts(dev_priv);
+
err = i915_guc_submission_enable(dev_priv);
if (err)
goto fail;
@@ -561,7 +566,7 @@ fail:
ret = 0;
}
- if (err == 0 && !HAS_GUC_UCODE(dev))
+ if (err == 0 && !HAS_GUC_UCODE(dev_priv))
; /* Don't mention the GuC! */
else if (err == 0)
DRM_INFO("GuC firmware load skipped\n");
@@ -720,23 +725,28 @@ void intel_guc_init(struct drm_device *dev)
struct intel_guc_fw *guc_fw = &dev_priv->guc.guc_fw;
const char *fw_path;
- /* A negative value means "use platform default" */
- if (i915.enable_guc_loading < 0)
- i915.enable_guc_loading = HAS_GUC_UCODE(dev);
- if (i915.enable_guc_submission < 0)
- i915.enable_guc_submission = HAS_GUC_SCHED(dev);
+ if (!HAS_GUC(dev_priv)) {
+ i915.enable_guc_loading = 0;
+ i915.enable_guc_submission = 0;
+ } else {
+ /* A negative value means "use platform default" */
+ if (i915.enable_guc_loading < 0)
+ i915.enable_guc_loading = HAS_GUC_UCODE(dev_priv);
+ if (i915.enable_guc_submission < 0)
+ i915.enable_guc_submission = HAS_GUC_SCHED(dev_priv);
+ }
- if (!HAS_GUC_UCODE(dev)) {
+ if (!HAS_GUC_UCODE(dev_priv)) {
fw_path = NULL;
- } else if (IS_SKYLAKE(dev)) {
+ } else if (IS_SKYLAKE(dev_priv)) {
fw_path = I915_SKL_GUC_UCODE;
guc_fw->guc_fw_major_wanted = SKL_FW_MAJOR;
guc_fw->guc_fw_minor_wanted = SKL_FW_MINOR;
- } else if (IS_BROXTON(dev)) {
+ } else if (IS_BROXTON(dev_priv)) {
fw_path = I915_BXT_GUC_UCODE;
guc_fw->guc_fw_major_wanted = BXT_FW_MAJOR;
guc_fw->guc_fw_minor_wanted = BXT_FW_MINOR;
- } else if (IS_KABYLAKE(dev)) {
+ } else if (IS_KABYLAKE(dev_priv)) {
fw_path = I915_KBL_GUC_UCODE;
guc_fw->guc_fw_major_wanted = KBL_FW_MAJOR;
guc_fw->guc_fw_minor_wanted = KBL_FW_MINOR;
diff --git a/drivers/gpu/drm/i915/intel_gvt.c b/drivers/gpu/drm/i915/intel_gvt.c
index 434f4d5c553d..290384e86c63 100644
--- a/drivers/gpu/drm/i915/intel_gvt.c
+++ b/drivers/gpu/drm/i915/intel_gvt.c
@@ -31,14 +31,20 @@
* GPU among multiple virtual machines on a time-sharing basis. Each
* virtual machine is presented a virtual GPU (vGPU), which has equivalent
* features as the underlying physical GPU (pGPU), so i915 driver can run
- * seamlessly in a virtual machine. This file provides the englightments
- * of GVT and the necessary components used by GVT in i915 driver.
+ * seamlessly in a virtual machine.
+ *
+ * To virtualize GPU resources GVT-g driver depends on hypervisor technology
+ * e.g KVM/VFIO/mdev, Xen, etc. to provide resource access trapping capability
+ * and be virtualized within GVT-g device module. More architectural design
+ * doc is available on https://01.org/group/2230/documentation-list.
*/
static bool is_supported_device(struct drm_i915_private *dev_priv)
{
if (IS_BROADWELL(dev_priv))
return true;
+ if (IS_SKYLAKE(dev_priv))
+ return true;
return false;
}
diff --git a/drivers/gpu/drm/i915/intel_gvt.h b/drivers/gpu/drm/i915/intel_gvt.h
index 960211df74db..25df2d65b985 100644
--- a/drivers/gpu/drm/i915/intel_gvt.h
+++ b/drivers/gpu/drm/i915/intel_gvt.h
@@ -24,7 +24,7 @@
#ifndef _INTEL_GVT_H_
#define _INTEL_GVT_H_
-#include "gvt/gvt.h"
+struct intel_gvt;
#ifdef CONFIG_DRM_I915_GVT
int intel_gvt_init(struct drm_i915_private *dev_priv);
diff --git a/drivers/gpu/drm/i915/intel_hangcheck.c b/drivers/gpu/drm/i915/intel_hangcheck.c
new file mode 100644
index 000000000000..53df5b11bff4
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_hangcheck.c
@@ -0,0 +1,450 @@
+/*
+ * Copyright © 2016 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+
+#include "i915_drv.h"
+
+static bool
+ipehr_is_semaphore_wait(struct intel_engine_cs *engine, u32 ipehr)
+{
+ if (INTEL_GEN(engine->i915) >= 8) {
+ return (ipehr >> 23) == 0x1c;
+ } else {
+ ipehr &= ~MI_SEMAPHORE_SYNC_MASK;
+ return ipehr == (MI_SEMAPHORE_MBOX | MI_SEMAPHORE_COMPARE |
+ MI_SEMAPHORE_REGISTER);
+ }
+}
+
+static struct intel_engine_cs *
+semaphore_wait_to_signaller_ring(struct intel_engine_cs *engine, u32 ipehr,
+ u64 offset)
+{
+ struct drm_i915_private *dev_priv = engine->i915;
+ struct intel_engine_cs *signaller;
+ enum intel_engine_id id;
+
+ if (INTEL_GEN(dev_priv) >= 8) {
+ for_each_engine(signaller, dev_priv, id) {
+ if (engine == signaller)
+ continue;
+
+ if (offset == signaller->semaphore.signal_ggtt[engine->hw_id])
+ return signaller;
+ }
+ } else {
+ u32 sync_bits = ipehr & MI_SEMAPHORE_SYNC_MASK;
+
+ for_each_engine(signaller, dev_priv, id) {
+ if(engine == signaller)
+ continue;
+
+ if (sync_bits == signaller->semaphore.mbox.wait[engine->hw_id])
+ return signaller;
+ }
+ }
+
+ DRM_DEBUG_DRIVER("No signaller ring found for %s, ipehr 0x%08x, offset 0x%016llx\n",
+ engine->name, ipehr, offset);
+
+ return ERR_PTR(-ENODEV);
+}
+
+static struct intel_engine_cs *
+semaphore_waits_for(struct intel_engine_cs *engine, u32 *seqno)
+{
+ struct drm_i915_private *dev_priv = engine->i915;
+ void __iomem *vaddr;
+ u32 cmd, ipehr, head;
+ u64 offset = 0;
+ int i, backwards;
+
+ /*
+ * This function does not support execlist mode - any attempt to
+ * proceed further into this function will result in a kernel panic
+ * when dereferencing ring->buffer, which is not set up in execlist
+ * mode.
+ *
+ * The correct way of doing it would be to derive the currently
+ * executing ring buffer from the current context, which is derived
+ * from the currently running request. Unfortunately, to get the
+ * current request we would have to grab the struct_mutex before doing
+ * anything else, which would be ill-advised since some other thread
+ * might have grabbed it already and managed to hang itself, causing
+ * the hang checker to deadlock.
+ *
+ * Therefore, this function does not support execlist mode in its
+ * current form. Just return NULL and move on.
+ */
+ if (engine->buffer == NULL)
+ return NULL;
+
+ ipehr = I915_READ(RING_IPEHR(engine->mmio_base));
+ if (!ipehr_is_semaphore_wait(engine, ipehr))
+ return NULL;
+
+ /*
+ * HEAD is likely pointing to the dword after the actual command,
+ * so scan backwards until we find the MBOX. But limit it to just 3
+ * or 4 dwords depending on the semaphore wait command size.
+ * Note that we don't care about ACTHD here since that might
+ * point at at batch, and semaphores are always emitted into the
+ * ringbuffer itself.
+ */
+ head = I915_READ_HEAD(engine) & HEAD_ADDR;
+ backwards = (INTEL_GEN(dev_priv) >= 8) ? 5 : 4;
+ vaddr = (void __iomem *)engine->buffer->vaddr;
+
+ for (i = backwards; i; --i) {
+ /*
+ * Be paranoid and presume the hw has gone off into the wild -
+ * our ring is smaller than what the hardware (and hence
+ * HEAD_ADDR) allows. Also handles wrap-around.
+ */
+ head &= engine->buffer->size - 1;
+
+ /* This here seems to blow up */
+ cmd = ioread32(vaddr + head);
+ if (cmd == ipehr)
+ break;
+
+ head -= 4;
+ }
+
+ if (!i)
+ return NULL;
+
+ *seqno = ioread32(vaddr + head + 4) + 1;
+ if (INTEL_GEN(dev_priv) >= 8) {
+ offset = ioread32(vaddr + head + 12);
+ offset <<= 32;
+ offset |= ioread32(vaddr + head + 8);
+ }
+ return semaphore_wait_to_signaller_ring(engine, ipehr, offset);
+}
+
+static int semaphore_passed(struct intel_engine_cs *engine)
+{
+ struct drm_i915_private *dev_priv = engine->i915;
+ struct intel_engine_cs *signaller;
+ u32 seqno;
+
+ engine->hangcheck.deadlock++;
+
+ signaller = semaphore_waits_for(engine, &seqno);
+ if (signaller == NULL)
+ return -1;
+
+ if (IS_ERR(signaller))
+ return 0;
+
+ /* Prevent pathological recursion due to driver bugs */
+ if (signaller->hangcheck.deadlock >= I915_NUM_ENGINES)
+ return -1;
+
+ if (i915_seqno_passed(intel_engine_get_seqno(signaller), seqno))
+ return 1;
+
+ /* cursory check for an unkickable deadlock */
+ if (I915_READ_CTL(signaller) & RING_WAIT_SEMAPHORE &&
+ semaphore_passed(signaller) < 0)
+ return -1;
+
+ return 0;
+}
+
+static void semaphore_clear_deadlocks(struct drm_i915_private *dev_priv)
+{
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+
+ for_each_engine(engine, dev_priv, id)
+ engine->hangcheck.deadlock = 0;
+}
+
+static bool instdone_unchanged(u32 current_instdone, u32 *old_instdone)
+{
+ u32 tmp = current_instdone | *old_instdone;
+ bool unchanged;
+
+ unchanged = tmp == *old_instdone;
+ *old_instdone |= tmp;
+
+ return unchanged;
+}
+
+static bool subunits_stuck(struct intel_engine_cs *engine)
+{
+ struct drm_i915_private *dev_priv = engine->i915;
+ struct intel_instdone instdone;
+ struct intel_instdone *accu_instdone = &engine->hangcheck.instdone;
+ bool stuck;
+ int slice;
+ int subslice;
+
+ if (engine->id != RCS)
+ return true;
+
+ intel_engine_get_instdone(engine, &instdone);
+
+ /* There might be unstable subunit states even when
+ * actual head is not moving. Filter out the unstable ones by
+ * accumulating the undone -> done transitions and only
+ * consider those as progress.
+ */
+ stuck = instdone_unchanged(instdone.instdone,
+ &accu_instdone->instdone);
+ stuck &= instdone_unchanged(instdone.slice_common,
+ &accu_instdone->slice_common);
+
+ for_each_instdone_slice_subslice(dev_priv, slice, subslice) {
+ stuck &= instdone_unchanged(instdone.sampler[slice][subslice],
+ &accu_instdone->sampler[slice][subslice]);
+ stuck &= instdone_unchanged(instdone.row[slice][subslice],
+ &accu_instdone->row[slice][subslice]);
+ }
+
+ return stuck;
+}
+
+static enum intel_engine_hangcheck_action
+head_stuck(struct intel_engine_cs *engine, u64 acthd)
+{
+ if (acthd != engine->hangcheck.acthd) {
+
+ /* Clear subunit states on head movement */
+ memset(&engine->hangcheck.instdone, 0,
+ sizeof(engine->hangcheck.instdone));
+
+ return HANGCHECK_ACTIVE;
+ }
+
+ if (!subunits_stuck(engine))
+ return HANGCHECK_ACTIVE;
+
+ return HANGCHECK_HUNG;
+}
+
+static enum intel_engine_hangcheck_action
+engine_stuck(struct intel_engine_cs *engine, u64 acthd)
+{
+ struct drm_i915_private *dev_priv = engine->i915;
+ enum intel_engine_hangcheck_action ha;
+ u32 tmp;
+
+ ha = head_stuck(engine, acthd);
+ if (ha != HANGCHECK_HUNG)
+ return ha;
+
+ if (IS_GEN2(dev_priv))
+ return HANGCHECK_HUNG;
+
+ /* Is the chip hanging on a WAIT_FOR_EVENT?
+ * If so we can simply poke the RB_WAIT bit
+ * and break the hang. This should work on
+ * all but the second generation chipsets.
+ */
+ tmp = I915_READ_CTL(engine);
+ if (tmp & RING_WAIT) {
+ i915_handle_error(dev_priv, 0,
+ "Kicking stuck wait on %s",
+ engine->name);
+ I915_WRITE_CTL(engine, tmp);
+ return HANGCHECK_KICK;
+ }
+
+ if (INTEL_GEN(dev_priv) >= 6 && tmp & RING_WAIT_SEMAPHORE) {
+ switch (semaphore_passed(engine)) {
+ default:
+ return HANGCHECK_HUNG;
+ case 1:
+ i915_handle_error(dev_priv, 0,
+ "Kicking stuck semaphore on %s",
+ engine->name);
+ I915_WRITE_CTL(engine, tmp);
+ return HANGCHECK_KICK;
+ case 0:
+ return HANGCHECK_WAIT;
+ }
+ }
+
+ return HANGCHECK_HUNG;
+}
+
+/*
+ * This is called when the chip hasn't reported back with completed
+ * batchbuffers in a long time. We keep track per ring seqno progress and
+ * if there are no progress, hangcheck score for that ring is increased.
+ * Further, acthd is inspected to see if the ring is stuck. On stuck case
+ * we kick the ring. If we see no progress on three subsequent calls
+ * we assume chip is wedged and try to fix it by resetting the chip.
+ */
+static void i915_hangcheck_elapsed(struct work_struct *work)
+{
+ struct drm_i915_private *dev_priv =
+ container_of(work, typeof(*dev_priv),
+ gpu_error.hangcheck_work.work);
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+ unsigned int hung = 0, stuck = 0;
+ int busy_count = 0;
+#define BUSY 1
+#define KICK 5
+#define HUNG 20
+#define ACTIVE_DECAY 15
+
+ if (!i915.enable_hangcheck)
+ return;
+
+ if (!READ_ONCE(dev_priv->gt.awake))
+ return;
+
+ /* As enabling the GPU requires fairly extensive mmio access,
+ * periodically arm the mmio checker to see if we are triggering
+ * any invalid access.
+ */
+ intel_uncore_arm_unclaimed_mmio_detection(dev_priv);
+
+ for_each_engine(engine, dev_priv, id) {
+ bool busy = intel_engine_has_waiter(engine);
+ u64 acthd;
+ u32 seqno;
+ u32 submit;
+
+ semaphore_clear_deadlocks(dev_priv);
+
+ /* We don't strictly need an irq-barrier here, as we are not
+ * serving an interrupt request, be paranoid in case the
+ * barrier has side-effects (such as preventing a broken
+ * cacheline snoop) and so be sure that we can see the seqno
+ * advance. If the seqno should stick, due to a stale
+ * cacheline, we would erroneously declare the GPU hung.
+ */
+ if (engine->irq_seqno_barrier)
+ engine->irq_seqno_barrier(engine);
+
+ acthd = intel_engine_get_active_head(engine);
+ seqno = intel_engine_get_seqno(engine);
+ submit = intel_engine_last_submit(engine);
+
+ if (engine->hangcheck.seqno == seqno) {
+ if (i915_seqno_passed(seqno, submit)) {
+ engine->hangcheck.action = HANGCHECK_IDLE;
+ } else {
+ /* We always increment the hangcheck score
+ * if the engine is busy and still processing
+ * the same request, so that no single request
+ * can run indefinitely (such as a chain of
+ * batches). The only time we do not increment
+ * the hangcheck score on this ring, if this
+ * engine is in a legitimate wait for another
+ * engine. In that case the waiting engine is a
+ * victim and we want to be sure we catch the
+ * right culprit. Then every time we do kick
+ * the ring, add a small increment to the
+ * score so that we can catch a batch that is
+ * being repeatedly kicked and so responsible
+ * for stalling the machine.
+ */
+ engine->hangcheck.action =
+ engine_stuck(engine, acthd);
+
+ switch (engine->hangcheck.action) {
+ case HANGCHECK_IDLE:
+ case HANGCHECK_WAIT:
+ break;
+ case HANGCHECK_ACTIVE:
+ engine->hangcheck.score += BUSY;
+ break;
+ case HANGCHECK_KICK:
+ engine->hangcheck.score += KICK;
+ break;
+ case HANGCHECK_HUNG:
+ engine->hangcheck.score += HUNG;
+ break;
+ }
+ }
+
+ if (engine->hangcheck.score >= HANGCHECK_SCORE_RING_HUNG) {
+ hung |= intel_engine_flag(engine);
+ if (engine->hangcheck.action != HANGCHECK_HUNG)
+ stuck |= intel_engine_flag(engine);
+ }
+ } else {
+ engine->hangcheck.action = HANGCHECK_ACTIVE;
+
+ /* Gradually reduce the count so that we catch DoS
+ * attempts across multiple batches.
+ */
+ if (engine->hangcheck.score > 0)
+ engine->hangcheck.score -= ACTIVE_DECAY;
+ if (engine->hangcheck.score < 0)
+ engine->hangcheck.score = 0;
+
+ /* Clear head and subunit states on seqno movement */
+ acthd = 0;
+
+ memset(&engine->hangcheck.instdone, 0,
+ sizeof(engine->hangcheck.instdone));
+ }
+
+ engine->hangcheck.seqno = seqno;
+ engine->hangcheck.acthd = acthd;
+ busy_count += busy;
+ }
+
+ if (hung) {
+ char msg[80];
+ unsigned int tmp;
+ int len;
+
+ /* If some rings hung but others were still busy, only
+ * blame the hanging rings in the synopsis.
+ */
+ if (stuck != hung)
+ hung &= ~stuck;
+ len = scnprintf(msg, sizeof(msg),
+ "%s on ", stuck == hung ? "No progress" : "Hang");
+ for_each_engine_masked(engine, dev_priv, hung, tmp)
+ len += scnprintf(msg + len, sizeof(msg) - len,
+ "%s, ", engine->name);
+ msg[len-2] = '\0';
+
+ return i915_handle_error(dev_priv, hung, msg);
+ }
+
+ /* Reset timer in case GPU hangs without another request being added */
+ if (busy_count)
+ i915_queue_hangcheck(dev_priv);
+}
+
+void intel_engine_init_hangcheck(struct intel_engine_cs *engine)
+{
+ memset(&engine->hangcheck, 0, sizeof(engine->hangcheck));
+}
+
+void intel_hangcheck_init(struct drm_i915_private *i915)
+{
+ INIT_DELAYED_WORK(&i915->gpu_error.hangcheck_work,
+ i915_hangcheck_elapsed);
+}
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
index f40a35f2913a..fb88e32e25a3 100644
--- a/drivers/gpu/drm/i915/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/intel_hdmi.c
@@ -50,7 +50,7 @@ assert_hdmi_port_disabled(struct intel_hdmi *intel_hdmi)
struct drm_i915_private *dev_priv = to_i915(dev);
uint32_t enabled_bits;
- enabled_bits = HAS_DDI(dev) ? DDI_BUF_CTL_ENABLE : SDVO_ENABLE;
+ enabled_bits = HAS_DDI(dev_priv) ? DDI_BUF_CTL_ENABLE : SDVO_ENABLE;
WARN(I915_READ(intel_hdmi->hdmi_reg) & enabled_bits,
"HDMI port enabled, expecting disabled\n");
@@ -864,7 +864,7 @@ static void intel_hdmi_prepare(struct intel_encoder *encoder)
intel_dp_dual_mode_set_tmds_output(intel_hdmi, true);
hdmi_val = SDVO_ENCODING_HDMI;
- if (!HAS_PCH_SPLIT(dev) && crtc->config->limited_color_range)
+ if (!HAS_PCH_SPLIT(dev_priv) && crtc->config->limited_color_range)
hdmi_val |= HDMI_COLOR_RANGE_16_235;
if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
hdmi_val |= SDVO_VSYNC_ACTIVE_HIGH;
@@ -879,9 +879,9 @@ static void intel_hdmi_prepare(struct intel_encoder *encoder)
if (crtc->config->has_hdmi_sink)
hdmi_val |= HDMI_MODE_SELECT_HDMI;
- if (HAS_PCH_CPT(dev))
+ if (HAS_PCH_CPT(dev_priv))
hdmi_val |= SDVO_PIPE_SEL_CPT(crtc->pipe);
- else if (IS_CHERRYVIEW(dev))
+ else if (IS_CHERRYVIEW(dev_priv))
hdmi_val |= SDVO_PIPE_SEL_CHV(crtc->pipe);
else
hdmi_val |= SDVO_PIPE_SEL(crtc->pipe);
@@ -911,9 +911,9 @@ static bool intel_hdmi_get_hw_state(struct intel_encoder *encoder,
if (!(tmp & SDVO_ENABLE))
goto out;
- if (HAS_PCH_CPT(dev))
+ if (HAS_PCH_CPT(dev_priv))
*pipe = PORT_TO_PIPE_CPT(tmp);
- else if (IS_CHERRYVIEW(dev))
+ else if (IS_CHERRYVIEW(dev_priv))
*pipe = SDVO_PORT_TO_PIPE_CHV(tmp);
else
*pipe = PORT_TO_PIPE(tmp);
@@ -956,7 +956,7 @@ static void intel_hdmi_get_config(struct intel_encoder *encoder,
if (tmp & SDVO_AUDIO_ENABLE)
pipe_config->has_audio = true;
- if (!HAS_PCH_SPLIT(dev) &&
+ if (!HAS_PCH_SPLIT(dev_priv) &&
tmp & HDMI_COLOR_RANGE_16_235)
pipe_config->limited_color_range = true;
@@ -975,14 +975,16 @@ static void intel_hdmi_get_config(struct intel_encoder *encoder,
pipe_config->lane_count = 4;
}
-static void intel_enable_hdmi_audio(struct intel_encoder *encoder)
+static void intel_enable_hdmi_audio(struct intel_encoder *encoder,
+ struct intel_crtc_state *pipe_config,
+ struct drm_connector_state *conn_state)
{
struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
WARN_ON(!crtc->config->has_hdmi_sink);
DRM_DEBUG_DRIVER("Enabling HDMI audio on pipe %c\n",
pipe_name(crtc->pipe));
- intel_audio_codec_enable(encoder);
+ intel_audio_codec_enable(encoder, pipe_config, conn_state);
}
static void g4x_enable_hdmi(struct intel_encoder *encoder,
@@ -991,21 +993,20 @@ static void g4x_enable_hdmi(struct intel_encoder *encoder,
{
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
- struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
u32 temp;
temp = I915_READ(intel_hdmi->hdmi_reg);
temp |= SDVO_ENABLE;
- if (crtc->config->has_audio)
+ if (pipe_config->has_audio)
temp |= SDVO_AUDIO_ENABLE;
I915_WRITE(intel_hdmi->hdmi_reg, temp);
POSTING_READ(intel_hdmi->hdmi_reg);
- if (crtc->config->has_audio)
- intel_enable_hdmi_audio(encoder);
+ if (pipe_config->has_audio)
+ intel_enable_hdmi_audio(encoder, pipe_config, conn_state);
}
static void ibx_enable_hdmi(struct intel_encoder *encoder,
@@ -1040,8 +1041,8 @@ static void ibx_enable_hdmi(struct intel_encoder *encoder,
* FIXME: BSpec says this should be done at the end of
* of the modeset sequence, so not sure if this isn't too soon.
*/
- if (crtc->config->pipe_bpp > 24 &&
- crtc->config->pixel_multiplier > 1) {
+ if (pipe_config->pipe_bpp > 24 &&
+ pipe_config->pixel_multiplier > 1) {
I915_WRITE(intel_hdmi->hdmi_reg, temp & ~SDVO_ENABLE);
POSTING_READ(intel_hdmi->hdmi_reg);
@@ -1055,8 +1056,8 @@ static void ibx_enable_hdmi(struct intel_encoder *encoder,
POSTING_READ(intel_hdmi->hdmi_reg);
}
- if (crtc->config->has_audio)
- intel_enable_hdmi_audio(encoder);
+ if (pipe_config->has_audio)
+ intel_enable_hdmi_audio(encoder, pipe_config, conn_state);
}
static void cpt_enable_hdmi(struct intel_encoder *encoder,
@@ -1073,7 +1074,7 @@ static void cpt_enable_hdmi(struct intel_encoder *encoder,
temp = I915_READ(intel_hdmi->hdmi_reg);
temp |= SDVO_ENABLE;
- if (crtc->config->has_audio)
+ if (pipe_config->has_audio)
temp |= SDVO_AUDIO_ENABLE;
/*
@@ -1086,7 +1087,7 @@ static void cpt_enable_hdmi(struct intel_encoder *encoder,
* 4. enable HDMI clock gating
*/
- if (crtc->config->pipe_bpp > 24) {
+ if (pipe_config->pipe_bpp > 24) {
I915_WRITE(TRANS_CHICKEN1(pipe),
I915_READ(TRANS_CHICKEN1(pipe)) |
TRANS_CHICKEN1_HDMIUNIT_GC_DISABLE);
@@ -1098,7 +1099,7 @@ static void cpt_enable_hdmi(struct intel_encoder *encoder,
I915_WRITE(intel_hdmi->hdmi_reg, temp);
POSTING_READ(intel_hdmi->hdmi_reg);
- if (crtc->config->pipe_bpp > 24) {
+ if (pipe_config->pipe_bpp > 24) {
temp &= ~SDVO_COLOR_FORMAT_MASK;
temp |= HDMI_COLOR_FORMAT_12bpc;
@@ -1110,8 +1111,8 @@ static void cpt_enable_hdmi(struct intel_encoder *encoder,
~TRANS_CHICKEN1_HDMIUNIT_GC_DISABLE);
}
- if (crtc->config->has_audio)
- intel_enable_hdmi_audio(encoder);
+ if (pipe_config->has_audio)
+ intel_enable_hdmi_audio(encoder, pipe_config, conn_state);
}
static void vlv_enable_hdmi(struct intel_encoder *encoder,
@@ -1141,7 +1142,7 @@ static void intel_disable_hdmi(struct intel_encoder *encoder,
* to transcoder A after disabling it to allow the
* matching DP port to be enabled on transcoder A.
*/
- if (HAS_PCH_IBX(dev) && crtc->pipe == PIPE_B) {
+ if (HAS_PCH_IBX(dev_priv) && crtc->pipe == PIPE_B) {
/*
* We get CPU/PCH FIFO underruns on the other pipe when
* doing the workaround. Sweep them under the rug.
@@ -1164,7 +1165,7 @@ static void intel_disable_hdmi(struct intel_encoder *encoder,
I915_WRITE(intel_hdmi->hdmi_reg, temp);
POSTING_READ(intel_hdmi->hdmi_reg);
- intel_wait_for_vblank_if_active(&dev_priv->drm, PIPE_A);
+ intel_wait_for_vblank_if_active(dev_priv, PIPE_A);
intel_set_cpu_fifo_underrun_reporting(dev_priv, PIPE_A, true);
intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, true);
}
@@ -1178,9 +1179,7 @@ static void g4x_disable_hdmi(struct intel_encoder *encoder,
struct intel_crtc_state *old_crtc_state,
struct drm_connector_state *old_conn_state)
{
- struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
-
- if (crtc->config->has_audio)
+ if (old_crtc_state->has_audio)
intel_audio_codec_disable(encoder);
intel_disable_hdmi(encoder, old_crtc_state, old_conn_state);
@@ -1190,9 +1189,7 @@ static void pch_disable_hdmi(struct intel_encoder *encoder,
struct intel_crtc_state *old_crtc_state,
struct drm_connector_state *old_conn_state)
{
- struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
-
- if (crtc->config->has_audio)
+ if (old_crtc_state->has_audio)
intel_audio_codec_disable(encoder);
}
@@ -1241,7 +1238,7 @@ static enum drm_mode_status
hdmi_port_clock_valid(struct intel_hdmi *hdmi,
int clock, bool respect_downstream_limits)
{
- struct drm_device *dev = intel_hdmi_to_dev(hdmi);
+ struct drm_i915_private *dev_priv = to_i915(intel_hdmi_to_dev(hdmi));
if (clock < 25000)
return MODE_CLOCK_LOW;
@@ -1249,11 +1246,11 @@ hdmi_port_clock_valid(struct intel_hdmi *hdmi,
return MODE_CLOCK_HIGH;
/* BXT DPLL can't generate 223-240 MHz */
- if (IS_BROXTON(dev) && clock > 223333 && clock < 240000)
+ if (IS_BROXTON(dev_priv) && clock > 223333 && clock < 240000)
return MODE_CLOCK_RANGE;
/* CHV DPLL can't generate 216-240 MHz */
- if (IS_CHERRYVIEW(dev) && clock > 216000 && clock < 240000)
+ if (IS_CHERRYVIEW(dev_priv) && clock > 216000 && clock < 240000)
return MODE_CLOCK_RANGE;
return MODE_OK;
@@ -1265,6 +1262,7 @@ intel_hdmi_mode_valid(struct drm_connector *connector,
{
struct intel_hdmi *hdmi = intel_attached_hdmi(connector);
struct drm_device *dev = intel_hdmi_to_dev(hdmi);
+ struct drm_i915_private *dev_priv = to_i915(dev);
enum drm_mode_status status;
int clock;
int max_dotclk = to_i915(connector->dev)->max_dotclk_freq;
@@ -1287,7 +1285,7 @@ intel_hdmi_mode_valid(struct drm_connector *connector,
status = hdmi_port_clock_valid(hdmi, clock, true);
/* if we can't do 8bpc we may still be able to do 12bpc */
- if (!HAS_GMCH_DISPLAY(dev) && status != MODE_OK)
+ if (!HAS_GMCH_DISPLAY(dev_priv) && status != MODE_OK)
status = hdmi_port_clock_valid(hdmi, clock * 3 / 2, true);
return status;
@@ -1297,7 +1295,7 @@ static bool hdmi_12bpc_possible(struct intel_crtc_state *crtc_state)
{
struct drm_device *dev = crtc_state->base.crtc->dev;
- if (HAS_GMCH_DISPLAY(dev))
+ if (HAS_GMCH_DISPLAY(to_i915(dev)))
return false;
/*
@@ -1312,7 +1310,7 @@ bool intel_hdmi_compute_config(struct intel_encoder *encoder,
struct drm_connector_state *conn_state)
{
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
- struct drm_device *dev = encoder->base.dev;
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
int clock_8bpc = pipe_config->base.adjusted_mode.crtc_clock;
int clock_12bpc = clock_8bpc * 3 / 2;
@@ -1339,7 +1337,7 @@ bool intel_hdmi_compute_config(struct intel_encoder *encoder,
clock_12bpc *= 2;
}
- if (HAS_PCH_SPLIT(dev) && !HAS_DDI(dev))
+ if (HAS_PCH_SPLIT(dev_priv) && !HAS_DDI(dev_priv))
pipe_config->has_pch_encoder = true;
if (pipe_config->has_hdmi_sink && intel_hdmi->has_audio)
@@ -1644,13 +1642,12 @@ static void intel_hdmi_pre_enable(struct intel_encoder *encoder,
struct drm_connector_state *conn_state)
{
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
- struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
- const struct drm_display_mode *adjusted_mode = &intel_crtc->config->base.adjusted_mode;
+ const struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
intel_hdmi_prepare(encoder);
intel_hdmi->set_infoframes(&encoder->base,
- intel_crtc->config->has_hdmi_sink,
+ pipe_config->has_hdmi_sink,
adjusted_mode);
}
@@ -1662,9 +1659,7 @@ static void vlv_hdmi_pre_enable(struct intel_encoder *encoder,
struct intel_hdmi *intel_hdmi = &dport->hdmi;
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
- struct intel_crtc *intel_crtc =
- to_intel_crtc(encoder->base.crtc);
- const struct drm_display_mode *adjusted_mode = &intel_crtc->config->base.adjusted_mode;
+ const struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
vlv_phy_pre_encoder_enable(encoder);
@@ -1673,7 +1668,7 @@ static void vlv_hdmi_pre_enable(struct intel_encoder *encoder,
0x2b247878);
intel_hdmi->set_infoframes(&encoder->base,
- intel_crtc->config->has_hdmi_sink,
+ pipe_config->has_hdmi_sink,
adjusted_mode);
g4x_enable_hdmi(encoder, pipe_config, conn_state);
@@ -1799,6 +1794,50 @@ intel_hdmi_add_properties(struct intel_hdmi *intel_hdmi, struct drm_connector *c
intel_hdmi->aspect_ratio = HDMI_PICTURE_ASPECT_NONE;
}
+static u8 intel_hdmi_ddc_pin(struct drm_i915_private *dev_priv,
+ enum port port)
+{
+ const struct ddi_vbt_port_info *info =
+ &dev_priv->vbt.ddi_port_info[port];
+ u8 ddc_pin;
+
+ if (info->alternate_ddc_pin) {
+ DRM_DEBUG_KMS("Using DDC pin 0x%x for port %c (VBT)\n",
+ info->alternate_ddc_pin, port_name(port));
+ return info->alternate_ddc_pin;
+ }
+
+ switch (port) {
+ case PORT_B:
+ if (IS_BROXTON(dev_priv))
+ ddc_pin = GMBUS_PIN_1_BXT;
+ else
+ ddc_pin = GMBUS_PIN_DPB;
+ break;
+ case PORT_C:
+ if (IS_BROXTON(dev_priv))
+ ddc_pin = GMBUS_PIN_2_BXT;
+ else
+ ddc_pin = GMBUS_PIN_DPC;
+ break;
+ case PORT_D:
+ if (IS_CHERRYVIEW(dev_priv))
+ ddc_pin = GMBUS_PIN_DPD_CHV;
+ else
+ ddc_pin = GMBUS_PIN_DPD;
+ break;
+ default:
+ MISSING_CASE(port);
+ ddc_pin = GMBUS_PIN_DPB;
+ break;
+ }
+
+ DRM_DEBUG_KMS("Using DDC pin 0x%x for port %c (platform default)\n",
+ ddc_pin, port_name(port));
+
+ return ddc_pin;
+}
+
void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
struct intel_connector *intel_connector)
{
@@ -1808,7 +1847,6 @@ void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
struct drm_device *dev = intel_encoder->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
enum port port = intel_dig_port->port;
- uint8_t alternate_ddc_pin;
DRM_DEBUG_KMS("Adding HDMI connector on port %c\n",
port_name(port));
@@ -1826,12 +1864,10 @@ void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
connector->doublescan_allowed = 0;
connector->stereo_allowed = 1;
+ intel_hdmi->ddc_bus = intel_hdmi_ddc_pin(dev_priv, port);
+
switch (port) {
case PORT_B:
- if (IS_BROXTON(dev_priv))
- intel_hdmi->ddc_bus = GMBUS_PIN_1_BXT;
- else
- intel_hdmi->ddc_bus = GMBUS_PIN_DPB;
/*
* On BXT A0/A1, sw needs to activate DDIA HPD logic and
* interrupts to check the external panel connection.
@@ -1842,61 +1878,32 @@ void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
intel_encoder->hpd_pin = HPD_PORT_B;
break;
case PORT_C:
- if (IS_BROXTON(dev_priv))
- intel_hdmi->ddc_bus = GMBUS_PIN_2_BXT;
- else
- intel_hdmi->ddc_bus = GMBUS_PIN_DPC;
intel_encoder->hpd_pin = HPD_PORT_C;
break;
case PORT_D:
- if (WARN_ON(IS_BROXTON(dev_priv)))
- intel_hdmi->ddc_bus = GMBUS_PIN_DISABLED;
- else if (IS_CHERRYVIEW(dev_priv))
- intel_hdmi->ddc_bus = GMBUS_PIN_DPD_CHV;
- else
- intel_hdmi->ddc_bus = GMBUS_PIN_DPD;
intel_encoder->hpd_pin = HPD_PORT_D;
break;
case PORT_E:
- /* On SKL PORT E doesn't have seperate GMBUS pin
- * We rely on VBT to set a proper alternate GMBUS pin. */
- alternate_ddc_pin =
- dev_priv->vbt.ddi_port_info[PORT_E].alternate_ddc_pin;
- switch (alternate_ddc_pin) {
- case DDC_PIN_B:
- intel_hdmi->ddc_bus = GMBUS_PIN_DPB;
- break;
- case DDC_PIN_C:
- intel_hdmi->ddc_bus = GMBUS_PIN_DPC;
- break;
- case DDC_PIN_D:
- intel_hdmi->ddc_bus = GMBUS_PIN_DPD;
- break;
- default:
- MISSING_CASE(alternate_ddc_pin);
- }
intel_encoder->hpd_pin = HPD_PORT_E;
break;
- case PORT_A:
- intel_encoder->hpd_pin = HPD_PORT_A;
- /* Internal port only for eDP. */
default:
- BUG();
+ MISSING_CASE(port);
+ return;
}
- if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
+ if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
intel_hdmi->write_infoframe = vlv_write_infoframe;
intel_hdmi->set_infoframes = vlv_set_infoframes;
intel_hdmi->infoframe_enabled = vlv_infoframe_enabled;
- } else if (IS_G4X(dev)) {
+ } else if (IS_G4X(dev_priv)) {
intel_hdmi->write_infoframe = g4x_write_infoframe;
intel_hdmi->set_infoframes = g4x_set_infoframes;
intel_hdmi->infoframe_enabled = g4x_infoframe_enabled;
- } else if (HAS_DDI(dev)) {
+ } else if (HAS_DDI(dev_priv)) {
intel_hdmi->write_infoframe = hsw_write_infoframe;
intel_hdmi->set_infoframes = hsw_set_infoframes;
intel_hdmi->infoframe_enabled = hsw_infoframe_enabled;
- } else if (HAS_PCH_IBX(dev)) {
+ } else if (HAS_PCH_IBX(dev_priv)) {
intel_hdmi->write_infoframe = ibx_write_infoframe;
intel_hdmi->set_infoframes = ibx_set_infoframes;
intel_hdmi->infoframe_enabled = ibx_infoframe_enabled;
@@ -1906,7 +1913,7 @@ void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
intel_hdmi->infoframe_enabled = cpt_infoframe_enabled;
}
- if (HAS_DDI(dev))
+ if (HAS_DDI(dev_priv))
intel_connector->get_hw_state = intel_ddi_connector_get_hw_state;
else
intel_connector->get_hw_state = intel_connector_get_hw_state;
@@ -1920,7 +1927,7 @@ void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
* 0xd. Failure to do so will result in spurious interrupts being
* generated on the port when a cable is not attached.
*/
- if (IS_G4X(dev) && !IS_GM45(dev)) {
+ if (IS_G4X(dev_priv) && !IS_GM45(dev_priv)) {
u32 temp = I915_READ(PEG_BAND_GAP_DATA);
I915_WRITE(PEG_BAND_GAP_DATA, (temp & ~0xf) | 0xd);
}
@@ -1929,6 +1936,7 @@ void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
void intel_hdmi_init(struct drm_device *dev,
i915_reg_t hdmi_reg, enum port port)
{
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_digital_port *intel_dig_port;
struct intel_encoder *intel_encoder;
struct intel_connector *intel_connector;
@@ -1949,7 +1957,7 @@ void intel_hdmi_init(struct drm_device *dev,
DRM_MODE_ENCODER_TMDS, "HDMI %c", port_name(port));
intel_encoder->compute_config = intel_hdmi_compute_config;
- if (HAS_PCH_SPLIT(dev)) {
+ if (HAS_PCH_SPLIT(dev_priv)) {
intel_encoder->disable = pch_disable_hdmi;
intel_encoder->post_disable = pch_post_disable_hdmi;
} else {
@@ -1957,29 +1965,30 @@ void intel_hdmi_init(struct drm_device *dev,
}
intel_encoder->get_hw_state = intel_hdmi_get_hw_state;
intel_encoder->get_config = intel_hdmi_get_config;
- if (IS_CHERRYVIEW(dev)) {
+ if (IS_CHERRYVIEW(dev_priv)) {
intel_encoder->pre_pll_enable = chv_hdmi_pre_pll_enable;
intel_encoder->pre_enable = chv_hdmi_pre_enable;
intel_encoder->enable = vlv_enable_hdmi;
intel_encoder->post_disable = chv_hdmi_post_disable;
intel_encoder->post_pll_disable = chv_hdmi_post_pll_disable;
- } else if (IS_VALLEYVIEW(dev)) {
+ } else if (IS_VALLEYVIEW(dev_priv)) {
intel_encoder->pre_pll_enable = vlv_hdmi_pre_pll_enable;
intel_encoder->pre_enable = vlv_hdmi_pre_enable;
intel_encoder->enable = vlv_enable_hdmi;
intel_encoder->post_disable = vlv_hdmi_post_disable;
} else {
intel_encoder->pre_enable = intel_hdmi_pre_enable;
- if (HAS_PCH_CPT(dev))
+ if (HAS_PCH_CPT(dev_priv))
intel_encoder->enable = cpt_enable_hdmi;
- else if (HAS_PCH_IBX(dev))
+ else if (HAS_PCH_IBX(dev_priv))
intel_encoder->enable = ibx_enable_hdmi;
else
intel_encoder->enable = g4x_enable_hdmi;
}
intel_encoder->type = INTEL_OUTPUT_HDMI;
- if (IS_CHERRYVIEW(dev)) {
+ intel_encoder->port = port;
+ if (IS_CHERRYVIEW(dev_priv)) {
if (port == PORT_D)
intel_encoder->crtc_mask = 1 << 2;
else
@@ -1993,7 +2002,7 @@ void intel_hdmi_init(struct drm_device *dev,
* to work on real hardware. And since g4x can send infoframes to
* only one port anyway, nothing is lost by allowing it.
*/
- if (IS_G4X(dev))
+ if (IS_G4X(dev_priv))
intel_encoder->cloneable |= 1 << INTEL_OUTPUT_HDMI;
intel_dig_port->port = port;
diff --git a/drivers/gpu/drm/i915/intel_hotplug.c b/drivers/gpu/drm/i915/intel_hotplug.c
index 334d47b5811a..3d546c019de0 100644
--- a/drivers/gpu/drm/i915/intel_hotplug.c
+++ b/drivers/gpu/drm/i915/intel_hotplug.c
@@ -501,7 +501,7 @@ static void i915_hpd_poll_init_work(struct work_struct *work)
if (intel_connector->mst_port)
continue;
- if (!connector->polled && I915_HAS_HOTPLUG(dev) &&
+ if (!connector->polled && I915_HAS_HOTPLUG(dev_priv) &&
intel_connector->encoder->hpd_pin > HPD_NONE) {
connector->polled = enabled ?
DRM_CONNECTOR_POLL_CONNECT |
diff --git a/drivers/gpu/drm/i915/intel_i2c.c b/drivers/gpu/drm/i915/intel_i2c.c
index 79aab9ad6faa..83f260bb4eef 100644
--- a/drivers/gpu/drm/i915/intel_i2c.c
+++ b/drivers/gpu/drm/i915/intel_i2c.c
@@ -138,11 +138,10 @@ static void intel_i2c_quirk_set(struct drm_i915_private *dev_priv, bool enable)
static u32 get_reserved(struct intel_gmbus *bus)
{
struct drm_i915_private *dev_priv = bus->dev_priv;
- struct drm_device *dev = &dev_priv->drm;
u32 reserved = 0;
/* On most chips, these bits must be preserved in software. */
- if (!IS_I830(dev) && !IS_845G(dev))
+ if (!IS_I830(dev_priv) && !IS_845G(dev_priv))
reserved = I915_READ_NOTRACE(bus->gpio_reg) &
(GPIO_DATA_PULLUP_DISABLE |
GPIO_CLOCK_PULLUP_DISABLE);
@@ -468,13 +467,9 @@ do_gmbus_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs, int num)
struct intel_gmbus,
adapter);
struct drm_i915_private *dev_priv = bus->dev_priv;
- const unsigned int fw =
- intel_uncore_forcewake_for_reg(dev_priv, GMBUS0,
- FW_REG_READ | FW_REG_WRITE);
int i = 0, inc, try = 0;
int ret = 0;
- intel_uncore_forcewake_get(dev_priv, fw);
retry:
I915_WRITE_FW(GMBUS0, bus->reg0);
@@ -576,7 +571,6 @@ timeout:
ret = -EAGAIN;
out:
- intel_uncore_forcewake_put(dev_priv, fw);
return ret;
}
@@ -633,10 +627,10 @@ int intel_setup_gmbus(struct drm_device *dev)
unsigned int pin;
int ret;
- if (HAS_PCH_NOP(dev))
+ if (HAS_PCH_NOP(dev_priv))
return 0;
- if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
+ if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
dev_priv->gpio_mmio_base = VLV_DISPLAY_BASE;
else if (!HAS_GMCH_DISPLAY(dev_priv))
dev_priv->gpio_mmio_base =
@@ -674,7 +668,7 @@ int intel_setup_gmbus(struct drm_device *dev)
bus->reg0 = pin | GMBUS_RATE_100KHZ;
/* gmbus seems to be broken on i830 */
- if (IS_I830(dev))
+ if (IS_I830(dev_priv))
bus->force_bit = 1;
intel_gpio_setup(bus, pin);
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index 0adb879833ff..0a09024d6ca3 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -275,8 +275,7 @@ logical_ring_init_platform_invariants(struct intel_engine_cs *engine)
struct drm_i915_private *dev_priv = engine->i915;
engine->disable_lite_restore_wa =
- (IS_SKL_REVID(dev_priv, 0, SKL_REVID_B0) ||
- IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1)) &&
+ IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1) &&
(engine->id == VCS || engine->id == VCS2);
engine->ctx_desc_template = GEN8_CTX_VALID;
@@ -366,7 +365,7 @@ static u64 execlists_update_context(struct drm_i915_gem_request *rq)
struct i915_hw_ppgtt *ppgtt = rq->ctx->ppgtt;
u32 *reg_state = ce->lrc_reg_state;
- reg_state[CTX_RING_TAIL+1] = intel_ring_offset(rq->ring, rq->tail);
+ reg_state[CTX_RING_TAIL+1] = rq->tail;
/* True 32b PPGTT with dynamic page allocation: update PDP
* registers and point the unallocated PDPs to scratch page.
@@ -433,15 +432,17 @@ static bool can_merge_ctx(const struct i915_gem_context *prev,
static void execlists_dequeue(struct intel_engine_cs *engine)
{
- struct drm_i915_gem_request *cursor, *last;
+ struct drm_i915_gem_request *last;
struct execlist_port *port = engine->execlist_port;
+ unsigned long flags;
+ struct rb_node *rb;
bool submit = false;
last = port->request;
if (last)
/* WaIdleLiteRestore:bdw,skl
* Apply the wa NOOPs to prevent ring:HEAD == req:TAIL
- * as we resubmit the request. See gen8_emit_request()
+ * as we resubmit the request. See gen8_emit_breadcrumb()
* for where we prepare the padding after the end of the
* request.
*/
@@ -470,8 +471,12 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
* and context switches) submission.
*/
- spin_lock(&engine->execlist_lock);
- list_for_each_entry(cursor, &engine->execlist_queue, execlist_link) {
+ spin_lock_irqsave(&engine->timeline->lock, flags);
+ rb = engine->execlist_first;
+ while (rb) {
+ struct drm_i915_gem_request *cursor =
+ rb_entry(rb, typeof(*cursor), priotree.node);
+
/* Can we combine this request with the current port? It has to
* be the same context/ringbuffer and not have any exceptions
* (e.g. GVT saying never to combine contexts).
@@ -494,7 +499,8 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
* context (even though a different request) to
* the second port.
*/
- if (ctx_single_port_submission(cursor->ctx))
+ if (ctx_single_port_submission(last->ctx) ||
+ ctx_single_port_submission(cursor->ctx))
break;
GEM_BUG_ON(last->ctx == cursor->ctx);
@@ -502,17 +508,30 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
i915_gem_request_assign(&port->request, last);
port++;
}
+
+ rb = rb_next(rb);
+ rb_erase(&cursor->priotree.node, &engine->execlist_queue);
+ RB_CLEAR_NODE(&cursor->priotree.node);
+ cursor->priotree.priority = INT_MAX;
+
+ /* We keep the previous context alive until we retire the
+ * following request. This ensures that any the context object
+ * is still pinned for any residual writes the HW makes into it
+ * on the context switch into the next object following the
+ * breadcrumb. Otherwise, we may retire the context too early.
+ */
+ cursor->previous_context = engine->last_context;
+ engine->last_context = cursor->ctx;
+
+ __i915_gem_request_submit(cursor);
last = cursor;
submit = true;
}
if (submit) {
- /* Decouple all the requests submitted from the queue */
- engine->execlist_queue.next = &cursor->execlist_link;
- cursor->execlist_link.prev = &engine->execlist_queue;
-
i915_gem_request_assign(&port->request, last);
+ engine->execlist_first = rb;
}
- spin_unlock(&engine->execlist_lock);
+ spin_unlock_irqrestore(&engine->timeline->lock, flags);
if (submit)
execlists_submit_ports(engine);
@@ -523,6 +542,28 @@ static bool execlists_elsp_idle(struct intel_engine_cs *engine)
return !engine->execlist_port[0].request;
}
+/**
+ * intel_execlists_idle() - Determine if all engine submission ports are idle
+ * @dev_priv: i915 device private
+ *
+ * Return true if there are no requests pending on any of the submission ports
+ * of any engines.
+ */
+bool intel_execlists_idle(struct drm_i915_private *dev_priv)
+{
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+
+ if (!i915.enable_execlists)
+ return true;
+
+ for_each_engine(engine, dev_priv, id)
+ if (!execlists_elsp_idle(engine))
+ return false;
+
+ return true;
+}
+
static bool execlists_elsp_ready(struct intel_engine_cs *engine)
{
int port;
@@ -593,18 +634,147 @@ static void intel_lrc_irq_handler(unsigned long data)
intel_uncore_forcewake_put(dev_priv, engine->fw_domains);
}
+static bool insert_request(struct i915_priotree *pt, struct rb_root *root)
+{
+ struct rb_node **p, *rb;
+ bool first = true;
+
+ /* most positive priority is scheduled first, equal priorities fifo */
+ rb = NULL;
+ p = &root->rb_node;
+ while (*p) {
+ struct i915_priotree *pos;
+
+ rb = *p;
+ pos = rb_entry(rb, typeof(*pos), node);
+ if (pt->priority > pos->priority) {
+ p = &rb->rb_left;
+ } else {
+ p = &rb->rb_right;
+ first = false;
+ }
+ }
+ rb_link_node(&pt->node, rb, p);
+ rb_insert_color(&pt->node, root);
+
+ return first;
+}
+
static void execlists_submit_request(struct drm_i915_gem_request *request)
{
struct intel_engine_cs *engine = request->engine;
unsigned long flags;
- spin_lock_irqsave(&engine->execlist_lock, flags);
+ /* Will be called from irq-context when using foreign fences. */
+ spin_lock_irqsave(&engine->timeline->lock, flags);
- list_add_tail(&request->execlist_link, &engine->execlist_queue);
+ if (insert_request(&request->priotree, &engine->execlist_queue))
+ engine->execlist_first = &request->priotree.node;
if (execlists_elsp_idle(engine))
tasklet_hi_schedule(&engine->irq_tasklet);
- spin_unlock_irqrestore(&engine->execlist_lock, flags);
+ spin_unlock_irqrestore(&engine->timeline->lock, flags);
+}
+
+static struct intel_engine_cs *
+pt_lock_engine(struct i915_priotree *pt, struct intel_engine_cs *locked)
+{
+ struct intel_engine_cs *engine;
+
+ engine = container_of(pt,
+ struct drm_i915_gem_request,
+ priotree)->engine;
+ if (engine != locked) {
+ if (locked)
+ spin_unlock_irq(&locked->timeline->lock);
+ spin_lock_irq(&engine->timeline->lock);
+ }
+
+ return engine;
+}
+
+static void execlists_schedule(struct drm_i915_gem_request *request, int prio)
+{
+ static DEFINE_MUTEX(lock);
+ struct intel_engine_cs *engine = NULL;
+ struct i915_dependency *dep, *p;
+ struct i915_dependency stack;
+ LIST_HEAD(dfs);
+
+ if (prio <= READ_ONCE(request->priotree.priority))
+ return;
+
+ /* Need global lock to use the temporary link inside i915_dependency */
+ mutex_lock(&lock);
+
+ stack.signaler = &request->priotree;
+ list_add(&stack.dfs_link, &dfs);
+
+ /* Recursively bump all dependent priorities to match the new request.
+ *
+ * A naive approach would be to use recursion:
+ * static void update_priorities(struct i915_priotree *pt, prio) {
+ * list_for_each_entry(dep, &pt->signalers_list, signal_link)
+ * update_priorities(dep->signal, prio)
+ * insert_request(pt);
+ * }
+ * but that may have unlimited recursion depth and so runs a very
+ * real risk of overunning the kernel stack. Instead, we build
+ * a flat list of all dependencies starting with the current request.
+ * As we walk the list of dependencies, we add all of its dependencies
+ * to the end of the list (this may include an already visited
+ * request) and continue to walk onwards onto the new dependencies. The
+ * end result is a topological list of requests in reverse order, the
+ * last element in the list is the request we must execute first.
+ */
+ list_for_each_entry_safe(dep, p, &dfs, dfs_link) {
+ struct i915_priotree *pt = dep->signaler;
+
+ list_for_each_entry(p, &pt->signalers_list, signal_link)
+ if (prio > READ_ONCE(p->signaler->priority))
+ list_move_tail(&p->dfs_link, &dfs);
+
+ p = list_next_entry(dep, dfs_link);
+ if (!RB_EMPTY_NODE(&pt->node))
+ continue;
+
+ engine = pt_lock_engine(pt, engine);
+
+ /* If it is not already in the rbtree, we can update the
+ * priority inplace and skip over it (and its dependencies)
+ * if it is referenced *again* as we descend the dfs.
+ */
+ if (prio > pt->priority && RB_EMPTY_NODE(&pt->node)) {
+ pt->priority = prio;
+ list_del_init(&dep->dfs_link);
+ }
+ }
+
+ /* Fifo and depth-first replacement ensure our deps execute before us */
+ list_for_each_entry_safe_reverse(dep, p, &dfs, dfs_link) {
+ struct i915_priotree *pt = dep->signaler;
+
+ INIT_LIST_HEAD(&dep->dfs_link);
+
+ engine = pt_lock_engine(pt, engine);
+
+ if (prio <= pt->priority)
+ continue;
+
+ GEM_BUG_ON(RB_EMPTY_NODE(&pt->node));
+
+ pt->priority = prio;
+ rb_erase(&pt->node, &engine->execlist_queue);
+ if (insert_request(pt, &engine->execlist_queue))
+ engine->execlist_first = &pt->node;
+ }
+
+ if (engine)
+ spin_unlock_irq(&engine->timeline->lock);
+
+ mutex_unlock(&lock);
+
+ /* XXX Do we need to preempt to make room for us and our deps? */
}
int intel_logical_ring_alloc_request_extras(struct drm_i915_gem_request *request)
@@ -672,46 +842,6 @@ err_unpin:
return ret;
}
-/*
- * intel_logical_ring_advance() - advance the tail and prepare for submission
- * @request: Request to advance the logical ringbuffer of.
- *
- * The tail is updated in our logical ringbuffer struct, not in the actual context. What
- * really happens during submission is that the context and current tail will be placed
- * on a queue waiting for the ELSP to be ready to accept a new context submission. At that
- * point, the tail *inside* the context is updated and the ELSP written to.
- */
-static int
-intel_logical_ring_advance(struct drm_i915_gem_request *request)
-{
- struct intel_ring *ring = request->ring;
- struct intel_engine_cs *engine = request->engine;
-
- intel_ring_advance(ring);
- request->tail = ring->tail;
-
- /*
- * Here we add two extra NOOPs as padding to avoid
- * lite restore of a context with HEAD==TAIL.
- *
- * Caller must reserve WA_TAIL_DWORDS for us!
- */
- intel_ring_emit(ring, MI_NOOP);
- intel_ring_emit(ring, MI_NOOP);
- intel_ring_advance(ring);
- request->wa_tail = ring->tail;
-
- /* We keep the previous context alive until we retire the following
- * request. This ensures that any the context object is still pinned
- * for any residual writes the HW makes into it on the context switch
- * into the next object following the breadcrumb. Otherwise, we may
- * retire the context too early.
- */
- request->previous_context = engine->last_context;
- engine->last_context = request->ctx;
- return 0;
-}
-
static int intel_lr_context_pin(struct i915_gem_context *ctx,
struct intel_engine_cs *engine)
{
@@ -745,7 +875,7 @@ static int intel_lr_context_pin(struct i915_gem_context *ctx,
ce->lrc_reg_state[CTX_RING_BUFFER_START+1] =
i915_ggtt_offset(ce->ring->vma);
- ce->state->obj->dirty = true;
+ ce->state->obj->mm.dirty = true;
/* Invalidate GuC TLB. */
if (i915.enable_guc_submission) {
@@ -853,13 +983,12 @@ static inline int gen8_emit_flush_coherentl3_wa(struct intel_engine_cs *engine,
uint32_t l3sqc4_flush = (0x40400000 | GEN8_LQSC_FLUSH_COHERENT_LINES);
/*
- * WaDisableLSQCROPERFforOCL:skl,kbl
+ * WaDisableLSQCROPERFforOCL:kbl
* This WA is implemented in skl_init_clock_gating() but since
* this batch updates GEN8_L3SQCREG4 with default value we need to
* set this bit here to retain the WA during flush.
*/
- if (IS_SKL_REVID(dev_priv, 0, SKL_REVID_E0) ||
- IS_KBL_REVID(dev_priv, 0, KBL_REVID_E0))
+ if (IS_KBL_REVID(dev_priv, 0, KBL_REVID_E0))
l3sqc4_flush |= GEN8_LQSC_RO_PERF_DIS;
wa_ctx_emit(batch, index, (MI_STORE_REGISTER_MEM_GEN8 |
@@ -1002,9 +1131,8 @@ static int gen9_init_indirectctx_bb(struct intel_engine_cs *engine,
struct drm_i915_private *dev_priv = engine->i915;
uint32_t index = wa_ctx_start(wa_ctx, *offset, CACHELINE_DWORDS);
- /* WaDisableCtxRestoreArbitration:skl,bxt */
- if (IS_SKL_REVID(dev_priv, 0, SKL_REVID_D0) ||
- IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1))
+ /* WaDisableCtxRestoreArbitration:bxt */
+ if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1))
wa_ctx_emit(batch, index, MI_ARB_ON_OFF | MI_ARB_DISABLE);
/* WaFlushCoherentL3CacheLinesAtContextSwitch:skl,bxt */
@@ -1075,9 +1203,8 @@ static int gen9_init_perctx_bb(struct intel_engine_cs *engine,
{
uint32_t index = wa_ctx_start(wa_ctx, *offset, CACHELINE_DWORDS);
- /* WaSetDisablePixMaskCammingAndRhwoInCommonSliceChicken:skl,bxt */
- if (IS_SKL_REVID(engine->i915, 0, SKL_REVID_B0) ||
- IS_BXT_REVID(engine->i915, 0, BXT_REVID_A1)) {
+ /* WaSetDisablePixMaskCammingAndRhwoInCommonSliceChicken:bxt */
+ if (IS_BXT_REVID(engine->i915, 0, BXT_REVID_A1)) {
wa_ctx_emit(batch, index, MI_LOAD_REGISTER_IMM(1));
wa_ctx_emit_reg(batch, index, GEN9_SLICE_COMMON_ECO_CHICKEN0);
wa_ctx_emit(batch, index,
@@ -1104,9 +1231,8 @@ static int gen9_init_perctx_bb(struct intel_engine_cs *engine,
wa_ctx_emit(batch, index, MI_NOOP);
}
- /* WaDisableCtxRestoreArbitration:skl,bxt */
- if (IS_SKL_REVID(engine->i915, 0, SKL_REVID_D0) ||
- IS_BXT_REVID(engine->i915, 0, BXT_REVID_A1))
+ /* WaDisableCtxRestoreArbitration:bxt */
+ if (IS_BXT_REVID(engine->i915, 0, BXT_REVID_A1))
wa_ctx_emit(batch, index, MI_ARB_ON_OFF | MI_ARB_ENABLE);
wa_ctx_emit(batch, index, MI_BATCH_BUFFER_END);
@@ -1250,8 +1376,12 @@ static int gen8_init_common_ring(struct intel_engine_cs *engine)
intel_engine_init_hangcheck(engine);
- if (!execlists_elsp_idle(engine))
+ /* After a GPU reset, we may have requests to replay */
+ if (!execlists_elsp_idle(engine)) {
+ engine->execlist_port[0].count = 0;
+ engine->execlist_port[1].count = 0;
execlists_submit_ports(engine);
+ }
return 0;
}
@@ -1326,10 +1456,7 @@ static void reset_common_ring(struct intel_engine_cs *engine,
memset(&port[1], 0, sizeof(port[1]));
}
- /* CS is stopped, and we will resubmit both ports on resume */
GEM_BUG_ON(request->ctx != port[0].request->ctx);
- port[0].count = 0;
- port[1].count = 0;
/* Reset WaIdleLiteRestore:bdw,skl as well */
request->tail = request->wa_tail - WA_TAIL_DWORDS * sizeof(u32);
@@ -1570,39 +1697,35 @@ static void bxt_a_seqno_barrier(struct intel_engine_cs *engine)
* used as a workaround for not being allowed to do lite
* restore with HEAD==TAIL (WaIdleLiteRestore).
*/
-
-static int gen8_emit_request(struct drm_i915_gem_request *request)
+static void gen8_emit_wa_tail(struct drm_i915_gem_request *request, u32 *out)
{
- struct intel_ring *ring = request->ring;
- int ret;
-
- ret = intel_ring_begin(request, 6 + WA_TAIL_DWORDS);
- if (ret)
- return ret;
+ *out++ = MI_NOOP;
+ *out++ = MI_NOOP;
+ request->wa_tail = intel_ring_offset(request->ring, out);
+}
+static void gen8_emit_breadcrumb(struct drm_i915_gem_request *request,
+ u32 *out)
+{
/* w/a: bit 5 needs to be zero for MI_FLUSH_DW address. */
BUILD_BUG_ON(I915_GEM_HWS_INDEX_ADDR & (1 << 5));
- intel_ring_emit(ring, (MI_FLUSH_DW + 1) | MI_FLUSH_DW_OP_STOREDW);
- intel_ring_emit(ring,
- intel_hws_seqno_address(request->engine) |
- MI_FLUSH_DW_USE_GTT);
- intel_ring_emit(ring, 0);
- intel_ring_emit(ring, request->fence.seqno);
- intel_ring_emit(ring, MI_USER_INTERRUPT);
- intel_ring_emit(ring, MI_NOOP);
- return intel_logical_ring_advance(request);
-}
+ *out++ = (MI_FLUSH_DW + 1) | MI_FLUSH_DW_OP_STOREDW;
+ *out++ = intel_hws_seqno_address(request->engine) | MI_FLUSH_DW_USE_GTT;
+ *out++ = 0;
+ *out++ = request->global_seqno;
+ *out++ = MI_USER_INTERRUPT;
+ *out++ = MI_NOOP;
+ request->tail = intel_ring_offset(request->ring, out);
-static int gen8_emit_request_render(struct drm_i915_gem_request *request)
-{
- struct intel_ring *ring = request->ring;
- int ret;
+ gen8_emit_wa_tail(request, out);
+}
- ret = intel_ring_begin(request, 8 + WA_TAIL_DWORDS);
- if (ret)
- return ret;
+static const int gen8_emit_breadcrumb_sz = 6 + WA_TAIL_DWORDS;
+static void gen8_emit_breadcrumb_render(struct drm_i915_gem_request *request,
+ u32 *out)
+{
/* We're using qword write, seqno should be aligned to 8 bytes. */
BUILD_BUG_ON(I915_GEM_HWS_INDEX & 1);
@@ -1610,21 +1733,24 @@ static int gen8_emit_request_render(struct drm_i915_gem_request *request)
* need a prior CS_STALL, which is emitted by the flush
* following the batch.
*/
- intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(6));
- intel_ring_emit(ring,
- (PIPE_CONTROL_GLOBAL_GTT_IVB |
- PIPE_CONTROL_CS_STALL |
- PIPE_CONTROL_QW_WRITE));
- intel_ring_emit(ring, intel_hws_seqno_address(request->engine));
- intel_ring_emit(ring, 0);
- intel_ring_emit(ring, i915_gem_request_get_seqno(request));
+ *out++ = GFX_OP_PIPE_CONTROL(6);
+ *out++ = (PIPE_CONTROL_GLOBAL_GTT_IVB |
+ PIPE_CONTROL_CS_STALL |
+ PIPE_CONTROL_QW_WRITE);
+ *out++ = intel_hws_seqno_address(request->engine);
+ *out++ = 0;
+ *out++ = request->global_seqno;
/* We're thrashing one dword of HWS. */
- intel_ring_emit(ring, 0);
- intel_ring_emit(ring, MI_USER_INTERRUPT);
- intel_ring_emit(ring, MI_NOOP);
- return intel_logical_ring_advance(request);
+ *out++ = 0;
+ *out++ = MI_USER_INTERRUPT;
+ *out++ = MI_NOOP;
+ request->tail = intel_ring_offset(request->ring, out);
+
+ gen8_emit_wa_tail(request, out);
}
+static const int gen8_emit_breadcrumb_render_sz = 8 + WA_TAIL_DWORDS;
+
static int gen8_init_rcs_context(struct drm_i915_gem_request *req)
{
int ret;
@@ -1641,7 +1767,7 @@ static int gen8_init_rcs_context(struct drm_i915_gem_request *req)
if (ret)
DRM_ERROR("MOCS failed to program: expect performance issues.\n");
- return i915_gem_render_state_init(req);
+ return i915_gem_render_state_emit(req);
}
/**
@@ -1652,9 +1778,6 @@ void intel_logical_ring_cleanup(struct intel_engine_cs *engine)
{
struct drm_i915_private *dev_priv;
- if (!intel_engine_initialized(engine))
- return;
-
/*
* Tasklet cannot be active at this point due intel_mark_active/idle
* so this is just for documentation.
@@ -1681,14 +1804,19 @@ void intel_logical_ring_cleanup(struct intel_engine_cs *engine)
lrc_destroy_wa_ctx_obj(engine);
engine->i915 = NULL;
+ dev_priv->engine[engine->id] = NULL;
+ kfree(engine);
}
void intel_execlists_enable_submission(struct drm_i915_private *dev_priv)
{
struct intel_engine_cs *engine;
+ enum intel_engine_id id;
- for_each_engine(engine, dev_priv)
+ for_each_engine(engine, dev_priv, id) {
engine->submit_request = execlists_submit_request;
+ engine->schedule = execlists_schedule;
+ }
}
static void
@@ -1698,8 +1826,10 @@ logical_ring_default_vfuncs(struct intel_engine_cs *engine)
engine->init_hw = gen8_init_common_ring;
engine->reset_hw = reset_common_ring;
engine->emit_flush = gen8_emit_flush;
- engine->emit_request = gen8_emit_request;
+ engine->emit_breadcrumb = gen8_emit_breadcrumb;
+ engine->emit_breadcrumb_sz = gen8_emit_breadcrumb_sz;
engine->submit_request = execlists_submit_request;
+ engine->schedule = execlists_schedule;
engine->irq_enable = gen8_logical_ring_enable_irq;
engine->irq_disable = gen8_logical_ring_disable_irq;
@@ -1820,7 +1950,8 @@ int logical_render_ring_init(struct intel_engine_cs *engine)
engine->init_hw = gen8_init_render_ring;
engine->init_context = gen8_init_rcs_context;
engine->emit_flush = gen8_emit_flush_render;
- engine->emit_request = gen8_emit_request_render;
+ engine->emit_breadcrumb = gen8_emit_breadcrumb_render;
+ engine->emit_breadcrumb_sz = gen8_emit_breadcrumb_render_sz;
ret = intel_engine_create_scratch(engine, 4096);
if (ret)
@@ -1945,7 +2076,7 @@ static void execlists_init_reg_state(u32 *reg_state,
RING_START(engine->mmio_base), 0);
ASSIGN_CTX_REG(reg_state, CTX_RING_BUFFER_CONTROL,
RING_CTL(engine->mmio_base),
- ((ring->size - PAGE_SIZE) & RING_NR_PAGES) | RING_VALID);
+ RING_CTL_SIZE(ring->size) | RING_VALID);
ASSIGN_CTX_REG(reg_state, CTX_BB_HEAD_U,
RING_BBADDR_UDW(engine->mmio_base), 0);
ASSIGN_CTX_REG(reg_state, CTX_BB_HEAD_L,
@@ -2046,7 +2177,7 @@ populate_lr_context(struct i915_gem_context *ctx,
DRM_DEBUG_DRIVER("Could not map object pages! (%d)\n", ret);
return ret;
}
- ctx_obj->dirty = true;
+ ctx_obj->mm.dirty = true;
/* The second page of the context object contains some fields which must
* be set up prior to the first execution. */
@@ -2153,30 +2284,43 @@ error_deref_obj:
void intel_lr_context_resume(struct drm_i915_private *dev_priv)
{
- struct i915_gem_context *ctx = dev_priv->kernel_context;
struct intel_engine_cs *engine;
+ struct i915_gem_context *ctx;
+ enum intel_engine_id id;
+
+ /* Because we emit WA_TAIL_DWORDS there may be a disparity
+ * between our bookkeeping in ce->ring->head and ce->ring->tail and
+ * that stored in context. As we only write new commands from
+ * ce->ring->tail onwards, everything before that is junk. If the GPU
+ * starts reading from its RING_HEAD from the context, it may try to
+ * execute that junk and die.
+ *
+ * So to avoid that we reset the context images upon resume. For
+ * simplicity, we just zero everything out.
+ */
+ list_for_each_entry(ctx, &dev_priv->context_list, link) {
+ for_each_engine(engine, dev_priv, id) {
+ struct intel_context *ce = &ctx->engine[engine->id];
+ u32 *reg;
- for_each_engine(engine, dev_priv) {
- struct intel_context *ce = &ctx->engine[engine->id];
- void *vaddr;
- uint32_t *reg_state;
-
- if (!ce->state)
- continue;
-
- vaddr = i915_gem_object_pin_map(ce->state->obj, I915_MAP_WB);
- if (WARN_ON(IS_ERR(vaddr)))
- continue;
+ if (!ce->state)
+ continue;
- reg_state = vaddr + LRC_STATE_PN * PAGE_SIZE;
+ reg = i915_gem_object_pin_map(ce->state->obj,
+ I915_MAP_WB);
+ if (WARN_ON(IS_ERR(reg)))
+ continue;
- reg_state[CTX_RING_HEAD+1] = 0;
- reg_state[CTX_RING_TAIL+1] = 0;
+ reg += LRC_STATE_PN * PAGE_SIZE / sizeof(*reg);
+ reg[CTX_RING_HEAD+1] = 0;
+ reg[CTX_RING_TAIL+1] = 0;
- ce->state->obj->dirty = true;
- i915_gem_object_unpin_map(ce->state->obj);
+ ce->state->obj->mm.dirty = true;
+ i915_gem_object_unpin_map(ce->state->obj);
- ce->ring->head = 0;
- ce->ring->tail = 0;
+ ce->ring->head = ce->ring->tail = 0;
+ ce->ring->last_retired_head = -1;
+ intel_ring_update_space(ce->ring);
+ }
}
}
diff --git a/drivers/gpu/drm/i915/intel_lrc.h b/drivers/gpu/drm/i915/intel_lrc.h
index 4fed8165f98a..c1f546180ba2 100644
--- a/drivers/gpu/drm/i915/intel_lrc.h
+++ b/drivers/gpu/drm/i915/intel_lrc.h
@@ -95,5 +95,6 @@ uint64_t intel_lr_context_descriptor(struct i915_gem_context *ctx,
int intel_sanitize_enable_execlists(struct drm_i915_private *dev_priv,
int enable_execlists);
void intel_execlists_enable_submission(struct drm_i915_private *dev_priv);
+bool intel_execlists_idle(struct drm_i915_private *dev_priv);
#endif /* _INTEL_LRC_H_ */
diff --git a/drivers/gpu/drm/i915/intel_lspcon.c b/drivers/gpu/drm/i915/intel_lspcon.c
new file mode 100644
index 000000000000..daa523410953
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_lspcon.c
@@ -0,0 +1,185 @@
+/*
+ * Copyright © 2016 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ *
+ */
+#include <drm/drm_edid.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_dp_dual_mode_helper.h>
+#include "intel_drv.h"
+
+static struct intel_dp *lspcon_to_intel_dp(struct intel_lspcon *lspcon)
+{
+ struct intel_digital_port *dig_port =
+ container_of(lspcon, struct intel_digital_port, lspcon);
+
+ return &dig_port->dp;
+}
+
+static enum drm_lspcon_mode lspcon_get_current_mode(struct intel_lspcon *lspcon)
+{
+ enum drm_lspcon_mode current_mode = DRM_LSPCON_MODE_INVALID;
+ struct i2c_adapter *adapter = &lspcon_to_intel_dp(lspcon)->aux.ddc;
+
+ if (drm_lspcon_get_mode(adapter, &current_mode))
+ DRM_ERROR("Error reading LSPCON mode\n");
+ else
+ DRM_DEBUG_KMS("Current LSPCON mode %s\n",
+ current_mode == DRM_LSPCON_MODE_PCON ? "PCON" : "LS");
+ return current_mode;
+}
+
+static int lspcon_change_mode(struct intel_lspcon *lspcon,
+ enum drm_lspcon_mode mode, bool force)
+{
+ int err;
+ enum drm_lspcon_mode current_mode;
+ struct i2c_adapter *adapter = &lspcon_to_intel_dp(lspcon)->aux.ddc;
+
+ err = drm_lspcon_get_mode(adapter, &current_mode);
+ if (err) {
+ DRM_ERROR("Error reading LSPCON mode\n");
+ return err;
+ }
+
+ if (current_mode == mode) {
+ DRM_DEBUG_KMS("Current mode = desired LSPCON mode\n");
+ return 0;
+ }
+
+ err = drm_lspcon_set_mode(adapter, mode);
+ if (err < 0) {
+ DRM_ERROR("LSPCON mode change failed\n");
+ return err;
+ }
+
+ lspcon->mode = mode;
+ DRM_DEBUG_KMS("LSPCON mode changed done\n");
+ return 0;
+}
+
+static bool lspcon_probe(struct intel_lspcon *lspcon)
+{
+ enum drm_dp_dual_mode_type adaptor_type;
+ struct i2c_adapter *adapter = &lspcon_to_intel_dp(lspcon)->aux.ddc;
+
+ /* Lets probe the adaptor and check its type */
+ adaptor_type = drm_dp_dual_mode_detect(adapter);
+ if (adaptor_type != DRM_DP_DUAL_MODE_LSPCON) {
+ DRM_DEBUG_KMS("No LSPCON detected, found %s\n",
+ drm_dp_get_dual_mode_type_name(adaptor_type));
+ return false;
+ }
+
+ /* Yay ... got a LSPCON device */
+ DRM_DEBUG_KMS("LSPCON detected\n");
+ lspcon->mode = lspcon_get_current_mode(lspcon);
+ lspcon->active = true;
+ return true;
+}
+
+static void lspcon_resume_in_pcon_wa(struct intel_lspcon *lspcon)
+{
+ struct intel_dp *intel_dp = lspcon_to_intel_dp(lspcon);
+ unsigned long start = jiffies;
+
+ if (!lspcon->desc_valid)
+ return;
+
+ while (1) {
+ struct intel_dp_desc desc;
+
+ /*
+ * The w/a only applies in PCON mode and we don't expect any
+ * AUX errors.
+ */
+ if (!__intel_dp_read_desc(intel_dp, &desc))
+ return;
+
+ if (!memcmp(&intel_dp->desc, &desc, sizeof(desc))) {
+ DRM_DEBUG_KMS("LSPCON recovering in PCON mode after %u ms\n",
+ jiffies_to_msecs(jiffies - start));
+ return;
+ }
+
+ if (time_after(jiffies, start + msecs_to_jiffies(1000)))
+ break;
+
+ usleep_range(10000, 15000);
+ }
+
+ DRM_DEBUG_KMS("LSPCON DP descriptor mismatch after resume\n");
+}
+
+void lspcon_resume(struct intel_lspcon *lspcon)
+{
+ lspcon_resume_in_pcon_wa(lspcon);
+
+ if (lspcon_change_mode(lspcon, DRM_LSPCON_MODE_PCON, true))
+ DRM_ERROR("LSPCON resume failed\n");
+ else
+ DRM_DEBUG_KMS("LSPCON resume success\n");
+}
+
+bool lspcon_init(struct intel_digital_port *intel_dig_port)
+{
+ struct intel_dp *dp = &intel_dig_port->dp;
+ struct intel_lspcon *lspcon = &intel_dig_port->lspcon;
+ struct drm_device *dev = intel_dig_port->base.base.dev;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+
+ if (!IS_GEN9(dev_priv)) {
+ DRM_ERROR("LSPCON is supported on GEN9 only\n");
+ return false;
+ }
+
+ lspcon->active = false;
+ lspcon->mode = DRM_LSPCON_MODE_INVALID;
+
+ if (!lspcon_probe(lspcon)) {
+ DRM_ERROR("Failed to probe lspcon\n");
+ return false;
+ }
+
+ /*
+ * In the SW state machine, lets Put LSPCON in PCON mode only.
+ * In this way, it will work with both HDMI 1.4 sinks as well as HDMI
+ * 2.0 sinks.
+ */
+ if (lspcon->active && lspcon->mode != DRM_LSPCON_MODE_PCON) {
+ if (lspcon_change_mode(lspcon, DRM_LSPCON_MODE_PCON,
+ true) < 0) {
+ DRM_ERROR("LSPCON mode change to PCON failed\n");
+ return false;
+ }
+ }
+
+ if (!intel_dp_read_dpcd(dp)) {
+ DRM_ERROR("LSPCON DPCD read failed\n");
+ return false;
+ }
+
+ lspcon->desc_valid = intel_dp_read_desc(dp);
+
+ DRM_DEBUG_KMS("Success: LSPCON init\n");
+ return true;
+}
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index e1d47d51ea47..d12ef0047d49 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -106,7 +106,7 @@ static bool intel_lvds_get_hw_state(struct intel_encoder *encoder,
if (!(tmp & LVDS_PORT_EN))
goto out;
- if (HAS_PCH_CPT(dev))
+ if (HAS_PCH_CPT(dev_priv))
*pipe = PORT_TO_PIPE_CPT(tmp);
else
*pipe = PORT_TO_PIPE(tmp);
@@ -122,8 +122,7 @@ out:
static void intel_lvds_get_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config)
{
- struct drm_device *dev = encoder->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base);
u32 tmp, flags = 0;
@@ -139,12 +138,12 @@ static void intel_lvds_get_config(struct intel_encoder *encoder,
pipe_config->base.adjusted_mode.flags |= flags;
- if (INTEL_INFO(dev)->gen < 5)
+ if (INTEL_GEN(dev_priv) < 5)
pipe_config->gmch_pfit.lvds_border_bits =
tmp & LVDS_BORDER_ENABLE;
/* gen2/3 store dither state in pfit control, needs to match */
- if (INTEL_INFO(dev)->gen < 4) {
+ if (INTEL_GEN(dev_priv) < 4) {
tmp = I915_READ(PFIT_CONTROL);
pipe_config->gmch_pfit.control |= tmp & PANEL_8TO6_DITHER_ENABLE;
@@ -396,7 +395,7 @@ static bool intel_lvds_compute_config(struct intel_encoder *intel_encoder,
struct intel_crtc_state *pipe_config,
struct drm_connector_state *conn_state)
{
- struct drm_device *dev = intel_encoder->base.dev;
+ struct drm_i915_private *dev_priv = to_i915(intel_encoder->base.dev);
struct intel_lvds_encoder *lvds_encoder =
to_lvds_encoder(&intel_encoder->base);
struct intel_connector *intel_connector =
@@ -406,7 +405,7 @@ static bool intel_lvds_compute_config(struct intel_encoder *intel_encoder,
unsigned int lvds_bpp;
/* Should never happen!! */
- if (INTEL_INFO(dev)->gen < 4 && intel_crtc->pipe == 0) {
+ if (INTEL_GEN(dev_priv) < 4 && intel_crtc->pipe == 0) {
DRM_ERROR("Can't support LVDS on pipe A\n");
return false;
}
@@ -431,7 +430,7 @@ static bool intel_lvds_compute_config(struct intel_encoder *intel_encoder,
intel_fixed_panel_mode(intel_connector->panel.fixed_mode,
adjusted_mode);
- if (HAS_PCH_SPLIT(dev)) {
+ if (HAS_PCH_SPLIT(dev_priv)) {
pipe_config->has_pch_encoder = true;
intel_pch_panel_fitting(intel_crtc, pipe_config,
@@ -566,7 +565,7 @@ static int intel_lid_notify(struct notifier_block *nb, unsigned long val,
* and as part of the cleanup in the hw state restore we also redisable
* the vga plane.
*/
- if (!HAS_PCH_SPLIT(dev))
+ if (!HAS_PCH_SPLIT(dev_priv))
intel_display_resume(dev);
dev_priv->modeset_restore = MODESET_DONE;
@@ -949,16 +948,17 @@ static bool compute_is_dual_link_lvds(struct intel_lvds_encoder *lvds_encoder)
return (val & LVDS_CLKB_POWER_MASK) == LVDS_CLKB_POWER_UP;
}
-static bool intel_lvds_supported(struct drm_device *dev)
+static bool intel_lvds_supported(struct drm_i915_private *dev_priv)
{
/* With the introduction of the PCH we gained a dedicated
* LVDS presence pin, use it. */
- if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev))
+ if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv))
return true;
/* Otherwise LVDS was only attached to mobile products,
* except for the inglorious 830gm */
- if (INTEL_INFO(dev)->gen <= 4 && IS_MOBILE(dev) && !IS_I830(dev))
+ if (INTEL_GEN(dev_priv) <= 4 &&
+ IS_MOBILE(dev_priv) && !IS_I830(dev_priv))
return true;
return false;
@@ -984,27 +984,27 @@ void intel_lvds_init(struct drm_device *dev)
struct drm_display_mode *fixed_mode = NULL;
struct drm_display_mode *downclock_mode = NULL;
struct edid *edid;
- struct drm_crtc *crtc;
+ struct intel_crtc *crtc;
i915_reg_t lvds_reg;
u32 lvds;
int pipe;
u8 pin;
- if (!intel_lvds_supported(dev))
+ if (!intel_lvds_supported(dev_priv))
return;
/* Skip init on machines we know falsely report LVDS */
if (dmi_check_system(intel_no_lvds))
return;
- if (HAS_PCH_SPLIT(dev))
+ if (HAS_PCH_SPLIT(dev_priv))
lvds_reg = PCH_LVDS;
else
lvds_reg = LVDS;
lvds = I915_READ(lvds_reg);
- if (HAS_PCH_SPLIT(dev)) {
+ if (HAS_PCH_SPLIT(dev_priv)) {
if ((lvds & LVDS_DETECTED) == 0)
return;
if (dev_priv->vbt.edp.support) {
@@ -1064,12 +1064,13 @@ void intel_lvds_init(struct drm_device *dev)
intel_connector->get_hw_state = intel_connector_get_hw_state;
intel_connector_attach_encoder(intel_connector, intel_encoder);
- intel_encoder->type = INTEL_OUTPUT_LVDS;
+ intel_encoder->type = INTEL_OUTPUT_LVDS;
+ intel_encoder->port = PORT_NONE;
intel_encoder->cloneable = 0;
- if (HAS_PCH_SPLIT(dev))
+ if (HAS_PCH_SPLIT(dev_priv))
intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
- else if (IS_GEN4(dev))
+ else if (IS_GEN4(dev_priv))
intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
else
intel_encoder->crtc_mask = (1 << 1);
@@ -1157,14 +1158,14 @@ void intel_lvds_init(struct drm_device *dev)
*/
/* Ironlake: FIXME if still fail, not try pipe mode now */
- if (HAS_PCH_SPLIT(dev))
+ if (HAS_PCH_SPLIT(dev_priv))
goto failed;
pipe = (lvds & LVDS_PIPEB_SELECT) ? 1 : 0;
- crtc = intel_get_crtc_for_pipe(dev, pipe);
+ crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
if (crtc && (lvds & LVDS_PORT_EN)) {
- fixed_mode = intel_crtc_mode_get(dev, crtc);
+ fixed_mode = intel_crtc_mode_get(dev, &crtc->base);
if (fixed_mode) {
DRM_DEBUG_KMS("using current (BIOS) mode: ");
drm_mode_debug_printmodeline(fixed_mode);
diff --git a/drivers/gpu/drm/i915/intel_opregion.c b/drivers/gpu/drm/i915/intel_opregion.c
index 7acbbbf97833..f4429f67a4e3 100644
--- a/drivers/gpu/drm/i915/intel_opregion.c
+++ b/drivers/gpu/drm/i915/intel_opregion.c
@@ -642,24 +642,6 @@ static struct notifier_block intel_opregion_notifier = {
* (version 3)
*/
-static u32 get_did(struct intel_opregion *opregion, int i)
-{
- u32 did;
-
- if (i < ARRAY_SIZE(opregion->acpi->didl)) {
- did = opregion->acpi->didl[i];
- } else {
- i -= ARRAY_SIZE(opregion->acpi->didl);
-
- if (WARN_ON(i >= ARRAY_SIZE(opregion->acpi->did2)))
- return 0;
-
- did = opregion->acpi->did2[i];
- }
-
- return did;
-}
-
static void set_did(struct intel_opregion *opregion, int i, u32 val)
{
if (i < ARRAY_SIZE(opregion->acpi->didl)) {
@@ -674,11 +656,11 @@ static void set_did(struct intel_opregion *opregion, int i, u32 val)
}
}
-static u32 acpi_display_type(struct drm_connector *connector)
+static u32 acpi_display_type(struct intel_connector *connector)
{
u32 display_type;
- switch (connector->connector_type) {
+ switch (connector->base.connector_type) {
case DRM_MODE_CONNECTOR_VGA:
case DRM_MODE_CONNECTOR_DVIA:
display_type = ACPI_DISPLAY_TYPE_VGA;
@@ -707,7 +689,7 @@ static u32 acpi_display_type(struct drm_connector *connector)
display_type = ACPI_DISPLAY_TYPE_OTHER;
break;
default:
- MISSING_CASE(connector->connector_type);
+ MISSING_CASE(connector->base.connector_type);
display_type = ACPI_DISPLAY_TYPE_OTHER;
break;
}
@@ -718,34 +700,9 @@ static u32 acpi_display_type(struct drm_connector *connector)
static void intel_didl_outputs(struct drm_i915_private *dev_priv)
{
struct intel_opregion *opregion = &dev_priv->opregion;
- struct pci_dev *pdev = dev_priv->drm.pdev;
- struct drm_connector *connector;
- acpi_handle handle;
- struct acpi_device *acpi_dev, *acpi_cdev, *acpi_video_bus = NULL;
- unsigned long long device_id;
- acpi_status status;
- u32 temp, max_outputs;
- int i = 0;
-
- handle = ACPI_HANDLE(&pdev->dev);
- if (!handle || acpi_bus_get_device(handle, &acpi_dev))
- return;
-
- if (acpi_is_video_device(handle))
- acpi_video_bus = acpi_dev;
- else {
- list_for_each_entry(acpi_cdev, &acpi_dev->children, node) {
- if (acpi_is_video_device(acpi_cdev->handle)) {
- acpi_video_bus = acpi_cdev;
- break;
- }
- }
- }
-
- if (!acpi_video_bus) {
- DRM_DEBUG_KMS("No ACPI video bus found\n");
- return;
- }
+ struct intel_connector *connector;
+ int i = 0, max_outputs;
+ int display_index[16] = {};
/*
* In theory, did2, the extended didl, gets added at opregion version
@@ -757,64 +714,58 @@ static void intel_didl_outputs(struct drm_i915_private *dev_priv)
max_outputs = ARRAY_SIZE(opregion->acpi->didl) +
ARRAY_SIZE(opregion->acpi->did2);
- list_for_each_entry(acpi_cdev, &acpi_video_bus->children, node) {
- if (i >= max_outputs) {
- DRM_DEBUG_KMS("More than %u outputs detected via ACPI\n",
- max_outputs);
- return;
- }
- status = acpi_evaluate_integer(acpi_cdev->handle, "_ADR",
- NULL, &device_id);
- if (ACPI_SUCCESS(status)) {
- if (!device_id)
- goto blind_set;
- set_did(opregion, i++, (u32)(device_id & 0x0f0f));
- }
+ for_each_intel_connector(&dev_priv->drm, connector) {
+ u32 device_id, type;
+
+ device_id = acpi_display_type(connector);
+
+ /* Use display type specific display index. */
+ type = (device_id & ACPI_DISPLAY_TYPE_MASK)
+ >> ACPI_DISPLAY_TYPE_SHIFT;
+ device_id |= display_index[type]++ << ACPI_DISPLAY_INDEX_SHIFT;
+
+ connector->acpi_device_id = device_id;
+ if (i < max_outputs)
+ set_did(opregion, i, device_id);
+ i++;
}
-end:
DRM_DEBUG_KMS("%d outputs detected\n", i);
+ if (i > max_outputs)
+ DRM_ERROR("More than %d outputs in connector list\n",
+ max_outputs);
+
/* If fewer than max outputs, the list must be null terminated */
if (i < max_outputs)
set_did(opregion, i, 0);
- return;
-
-blind_set:
- i = 0;
- list_for_each_entry(connector,
- &dev_priv->drm.mode_config.connector_list, head) {
- int display_type = acpi_display_type(connector);
-
- if (i >= max_outputs) {
- DRM_DEBUG_KMS("More than %u outputs in connector list\n",
- max_outputs);
- return;
- }
-
- temp = get_did(opregion, i);
- set_did(opregion, i, temp | (1 << 31) | display_type | i);
- i++;
- }
- goto end;
}
static void intel_setup_cadls(struct drm_i915_private *dev_priv)
{
struct intel_opregion *opregion = &dev_priv->opregion;
+ struct intel_connector *connector;
int i = 0;
- u32 disp_id;
-
- /* Initialize the CADL field by duplicating the DIDL values.
- * Technically, this is not always correct as display outputs may exist,
- * but not active. This initialization is necessary for some Clevo
- * laptops that check this field before processing the brightness and
- * display switching hotkeys. Just like DIDL, CADL is NULL-terminated if
- * there are less than eight devices. */
- do {
- disp_id = get_did(opregion, i);
- opregion->acpi->cadl[i] = disp_id;
- } while (++i < 8 && disp_id != 0);
+
+ /*
+ * Initialize the CADL field from the connector device ids. This is
+ * essentially the same as copying from the DIDL. Technically, this is
+ * not always correct as display outputs may exist, but not active. This
+ * initialization is necessary for some Clevo laptops that check this
+ * field before processing the brightness and display switching hotkeys.
+ *
+ * Note that internal panels should be at the front of the connector
+ * list already, ensuring they're not left out.
+ */
+ for_each_intel_connector(&dev_priv->drm, connector) {
+ if (i >= ARRAY_SIZE(opregion->acpi->cadl))
+ break;
+ opregion->acpi->cadl[i++] = connector->acpi_device_id;
+ }
+
+ /* If fewer than 8 active devices, the list must be null terminated */
+ if (i < ARRAY_SIZE(opregion->acpi->cadl))
+ opregion->acpi->cadl[i] = 0;
}
void intel_opregion_register(struct drm_i915_private *dev_priv)
diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c
index a24bc8c7889f..fd0e4dac7cc1 100644
--- a/drivers/gpu/drm/i915/intel_overlay.c
+++ b/drivers/gpu/drm/i915/intel_overlay.c
@@ -233,7 +233,7 @@ static int intel_overlay_do_wait_request(struct intel_overlay *overlay,
static struct drm_i915_gem_request *alloc_request(struct intel_overlay *overlay)
{
struct drm_i915_private *dev_priv = overlay->i915;
- struct intel_engine_cs *engine = &dev_priv->engine[RCS];
+ struct intel_engine_cs *engine = dev_priv->engine[RCS];
return i915_gem_request_alloc(engine, dev_priv->kernel_context);
}
@@ -1222,7 +1222,7 @@ int intel_overlay_put_image_ioctl(struct drm_device *dev, void *data,
out_unlock:
mutex_unlock(&dev->struct_mutex);
drm_modeset_unlock_all(dev);
- i915_gem_object_put_unlocked(new_bo);
+ i915_gem_object_put(new_bo);
out_free:
kfree(params);
@@ -1466,10 +1466,12 @@ void intel_cleanup_overlay(struct drm_i915_private *dev_priv)
* hardware should be off already */
WARN_ON(dev_priv->overlay->active);
- i915_gem_object_put_unlocked(dev_priv->overlay->reg_bo);
+ i915_gem_object_put(dev_priv->overlay->reg_bo);
kfree(dev_priv->overlay);
}
+#if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
+
struct intel_overlay_error_state {
struct overlay_registers regs;
unsigned long base;
@@ -1587,3 +1589,5 @@ intel_overlay_print_error_state(struct drm_i915_error_state_buf *m,
P(UVSCALEV);
#undef P
}
+
+#endif
diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c
index be4b4d546fd9..08ab6d762ca4 100644
--- a/drivers/gpu/drm/i915/intel_panel.c
+++ b/drivers/gpu/drm/i915/intel_panel.c
@@ -304,7 +304,7 @@ void intel_gmch_panel_fitting(struct intel_crtc *intel_crtc,
struct intel_crtc_state *pipe_config,
int fitting_mode)
{
- struct drm_device *dev = intel_crtc->base.dev;
+ struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
u32 pfit_control = 0, pfit_pgm_ratios = 0, border = 0;
struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
@@ -325,7 +325,7 @@ void intel_gmch_panel_fitting(struct intel_crtc *intel_crtc,
break;
case DRM_MODE_SCALE_ASPECT:
/* Scale but preserve the aspect ratio */
- if (INTEL_INFO(dev)->gen >= 4)
+ if (INTEL_GEN(dev_priv) >= 4)
i965_scale_aspect(pipe_config, &pfit_control);
else
i9xx_scale_aspect(pipe_config, &pfit_control,
@@ -339,7 +339,7 @@ void intel_gmch_panel_fitting(struct intel_crtc *intel_crtc,
if (pipe_config->pipe_src_h != adjusted_mode->crtc_vdisplay ||
pipe_config->pipe_src_w != adjusted_mode->crtc_hdisplay) {
pfit_control |= PFIT_ENABLE;
- if (INTEL_INFO(dev)->gen >= 4)
+ if (INTEL_GEN(dev_priv) >= 4)
pfit_control |= PFIT_SCALING_AUTO;
else
pfit_control |= (VERT_AUTO_SCALE |
@@ -355,7 +355,7 @@ void intel_gmch_panel_fitting(struct intel_crtc *intel_crtc,
/* 965+ wants fuzzy fitting */
/* FIXME: handle multiple panels by failing gracefully */
- if (INTEL_INFO(dev)->gen >= 4)
+ if (INTEL_GEN(dev_priv) >= 4)
pfit_control |= ((intel_crtc->pipe << PFIT_PIPE_SHIFT) |
PFIT_FILTER_FUZZY);
@@ -366,7 +366,7 @@ out:
}
/* Make sure pre-965 set dither correctly for 18bpp panels. */
- if (INTEL_INFO(dev)->gen < 4 && pipe_config->pipe_bpp == 18)
+ if (INTEL_GEN(dev_priv) < 4 && pipe_config->pipe_bpp == 18)
pfit_control |= PANEL_8TO6_DITHER_ENABLE;
pipe_config->gmch_pfit.control = pfit_control;
@@ -1722,7 +1722,7 @@ int intel_panel_setup_backlight(struct drm_connector *connector, enum pipe pipe)
DRM_DEBUG_KMS("Connector %s backlight initialized, %s, brightness %u/%u\n",
connector->name,
- panel->backlight.enabled ? "enabled" : "disabled",
+ enableddisabled(panel->backlight.enabled),
panel->backlight.level, panel->backlight.max);
return 0;
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index a2f751cd187a..bbb1eaf1e6db 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -31,6 +31,7 @@
#include "intel_drv.h"
#include "../../../platform/x86/intel_ips.h"
#include <linux/module.h>
+#include <drm/drm_atomic_helper.h>
/**
* DOC: RC6
@@ -55,10 +56,8 @@
#define INTEL_RC6p_ENABLE (1<<1)
#define INTEL_RC6pp_ENABLE (1<<2)
-static void gen9_init_clock_gating(struct drm_device *dev)
+static void gen9_init_clock_gating(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
-
/* See Bspec note for PSR2_CTL bit 31, Wa#828:skl,bxt,kbl */
I915_WRITE(CHICKEN_PAR1_1,
I915_READ(CHICKEN_PAR1_1) | SKL_EDP_PSR_FIX_RDWRAP);
@@ -81,11 +80,9 @@ static void gen9_init_clock_gating(struct drm_device *dev)
ILK_DPFC_DISABLE_DUMMY0);
}
-static void bxt_init_clock_gating(struct drm_device *dev)
+static void bxt_init_clock_gating(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
-
- gen9_init_clock_gating(dev);
+ gen9_init_clock_gating(dev_priv);
/* WaDisableSDEUnitClockGating:bxt */
I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) |
@@ -107,9 +104,8 @@ static void bxt_init_clock_gating(struct drm_device *dev)
PWM1_GATING_DIS | PWM2_GATING_DIS);
}
-static void i915_pineview_get_mem_freq(struct drm_device *dev)
+static void i915_pineview_get_mem_freq(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
u32 tmp;
tmp = I915_READ(CLKCFG);
@@ -146,9 +142,8 @@ static void i915_pineview_get_mem_freq(struct drm_device *dev)
dev_priv->is_ddr3 = (tmp & CSHRDDR3CTL_DDR3) ? 1 : 0;
}
-static void i915_ironlake_get_mem_freq(struct drm_device *dev)
+static void i915_ironlake_get_mem_freq(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
u16 ddrpll, csipll;
ddrpll = I915_READ16(DDRMPLL1);
@@ -252,8 +247,8 @@ static const struct cxsr_latency cxsr_latency_table[] = {
{0, 1, 400, 800, 6042, 36042, 6584, 36584}, /* DDR3-800 SC */
};
-static const struct cxsr_latency *intel_get_cxsr_latency(int is_desktop,
- int is_ddr3,
+static const struct cxsr_latency *intel_get_cxsr_latency(bool is_desktop,
+ bool is_ddr3,
int fsb,
int mem)
{
@@ -319,27 +314,26 @@ static void chv_set_memory_pm5(struct drm_i915_private *dev_priv, bool enable)
void intel_set_memory_cxsr(struct drm_i915_private *dev_priv, bool enable)
{
- struct drm_device *dev = &dev_priv->drm;
u32 val;
- if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
+ if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
I915_WRITE(FW_BLC_SELF_VLV, enable ? FW_CSPWRDWNEN : 0);
POSTING_READ(FW_BLC_SELF_VLV);
dev_priv->wm.vlv.cxsr = enable;
- } else if (IS_G4X(dev) || IS_CRESTLINE(dev)) {
+ } else if (IS_G4X(dev_priv) || IS_CRESTLINE(dev_priv)) {
I915_WRITE(FW_BLC_SELF, enable ? FW_BLC_SELF_EN : 0);
POSTING_READ(FW_BLC_SELF);
- } else if (IS_PINEVIEW(dev)) {
+ } else if (IS_PINEVIEW(dev_priv)) {
val = I915_READ(DSPFW3) & ~PINEVIEW_SELF_REFRESH_EN;
val |= enable ? PINEVIEW_SELF_REFRESH_EN : 0;
I915_WRITE(DSPFW3, val);
POSTING_READ(DSPFW3);
- } else if (IS_I945G(dev) || IS_I945GM(dev)) {
+ } else if (IS_I945G(dev_priv) || IS_I945GM(dev_priv)) {
val = enable ? _MASKED_BIT_ENABLE(FW_BLC_SELF_EN) :
_MASKED_BIT_DISABLE(FW_BLC_SELF_EN);
I915_WRITE(FW_BLC_SELF, val);
POSTING_READ(FW_BLC_SELF);
- } else if (IS_I915GM(dev)) {
+ } else if (IS_I915GM(dev_priv)) {
/*
* FIXME can't find a bit like this for 915G, and
* and yet it does have the related watermark in
@@ -353,8 +347,7 @@ void intel_set_memory_cxsr(struct drm_i915_private *dev_priv, bool enable)
return;
}
- DRM_DEBUG_KMS("memory self-refresh is %s\n",
- enable ? "enabled" : "disabled");
+ DRM_DEBUG_KMS("memory self-refresh is %s\n", enableddisabled(enable));
}
@@ -377,10 +370,9 @@ static const int pessimal_latency_ns = 5000;
#define VLV_FIFO_START(dsparb, dsparb2, lo_shift, hi_shift) \
((((dsparb) >> (lo_shift)) & 0xff) | ((((dsparb2) >> (hi_shift)) & 0x1) << 8))
-static int vlv_get_fifo_size(struct drm_device *dev,
+static int vlv_get_fifo_size(struct drm_i915_private *dev_priv,
enum pipe pipe, int plane)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
int sprite0_start, sprite1_start, size;
switch (pipe) {
@@ -429,9 +421,8 @@ static int vlv_get_fifo_size(struct drm_device *dev,
return size;
}
-static int i9xx_get_fifo_size(struct drm_device *dev, int plane)
+static int i9xx_get_fifo_size(struct drm_i915_private *dev_priv, int plane)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
uint32_t dsparb = I915_READ(DSPARB);
int size;
@@ -445,9 +436,8 @@ static int i9xx_get_fifo_size(struct drm_device *dev, int plane)
return size;
}
-static int i830_get_fifo_size(struct drm_device *dev, int plane)
+static int i830_get_fifo_size(struct drm_i915_private *dev_priv, int plane)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
uint32_t dsparb = I915_READ(DSPARB);
int size;
@@ -462,9 +452,8 @@ static int i830_get_fifo_size(struct drm_device *dev, int plane)
return size;
}
-static int i845_get_fifo_size(struct drm_device *dev, int plane)
+static int i845_get_fifo_size(struct drm_i915_private *dev_priv, int plane)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
uint32_t dsparb = I915_READ(DSPARB);
int size;
@@ -624,11 +613,11 @@ static unsigned long intel_calculate_wm(unsigned long clock_in_khz,
return wm_size;
}
-static struct drm_crtc *single_enabled_crtc(struct drm_device *dev)
+static struct intel_crtc *single_enabled_crtc(struct drm_i915_private *dev_priv)
{
- struct drm_crtc *crtc, *enabled = NULL;
+ struct intel_crtc *crtc, *enabled = NULL;
- for_each_crtc(dev, crtc) {
+ for_each_intel_crtc(&dev_priv->drm, crtc) {
if (intel_crtc_active(crtc)) {
if (enabled)
return NULL;
@@ -639,27 +628,31 @@ static struct drm_crtc *single_enabled_crtc(struct drm_device *dev)
return enabled;
}
-static void pineview_update_wm(struct drm_crtc *unused_crtc)
+static void pineview_update_wm(struct intel_crtc *unused_crtc)
{
- struct drm_device *dev = unused_crtc->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
- struct drm_crtc *crtc;
+ struct drm_i915_private *dev_priv = to_i915(unused_crtc->base.dev);
+ struct intel_crtc *crtc;
const struct cxsr_latency *latency;
u32 reg;
unsigned long wm;
- latency = intel_get_cxsr_latency(IS_PINEVIEW_G(dev), dev_priv->is_ddr3,
- dev_priv->fsb_freq, dev_priv->mem_freq);
+ latency = intel_get_cxsr_latency(IS_PINEVIEW_G(dev_priv),
+ dev_priv->is_ddr3,
+ dev_priv->fsb_freq,
+ dev_priv->mem_freq);
if (!latency) {
DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n");
intel_set_memory_cxsr(dev_priv, false);
return;
}
- crtc = single_enabled_crtc(dev);
+ crtc = single_enabled_crtc(dev_priv);
if (crtc) {
- const struct drm_display_mode *adjusted_mode = &to_intel_crtc(crtc)->config->base.adjusted_mode;
- int cpp = drm_format_plane_cpp(crtc->primary->state->fb->pixel_format, 0);
+ const struct drm_display_mode *adjusted_mode =
+ &crtc->config->base.adjusted_mode;
+ const struct drm_framebuffer *fb =
+ crtc->base.primary->state->fb;
+ int cpp = drm_format_plane_cpp(fb->pixel_format, 0);
int clock = adjusted_mode->crtc_clock;
/* Display SR */
@@ -706,7 +699,7 @@ static void pineview_update_wm(struct drm_crtc *unused_crtc)
}
}
-static bool g4x_compute_wm0(struct drm_device *dev,
+static bool g4x_compute_wm0(struct drm_i915_private *dev_priv,
int plane,
const struct intel_watermark_params *display,
int display_latency_ns,
@@ -715,24 +708,26 @@ static bool g4x_compute_wm0(struct drm_device *dev,
int *plane_wm,
int *cursor_wm)
{
- struct drm_crtc *crtc;
+ struct intel_crtc *crtc;
const struct drm_display_mode *adjusted_mode;
+ const struct drm_framebuffer *fb;
int htotal, hdisplay, clock, cpp;
int line_time_us, line_count;
int entries, tlb_miss;
- crtc = intel_get_crtc_for_plane(dev, plane);
+ crtc = intel_get_crtc_for_plane(dev_priv, plane);
if (!intel_crtc_active(crtc)) {
*cursor_wm = cursor->guard_size;
*plane_wm = display->guard_size;
return false;
}
- adjusted_mode = &to_intel_crtc(crtc)->config->base.adjusted_mode;
+ adjusted_mode = &crtc->config->base.adjusted_mode;
+ fb = crtc->base.primary->state->fb;
clock = adjusted_mode->crtc_clock;
htotal = adjusted_mode->crtc_htotal;
- hdisplay = to_intel_crtc(crtc)->config->pipe_src_w;
- cpp = drm_format_plane_cpp(crtc->primary->state->fb->pixel_format, 0);
+ hdisplay = crtc->config->pipe_src_w;
+ cpp = drm_format_plane_cpp(fb->pixel_format, 0);
/* Use the small buffer method to calculate plane watermark */
entries = ((clock * cpp / 1000) * display_latency_ns) / 1000;
@@ -747,7 +742,7 @@ static bool g4x_compute_wm0(struct drm_device *dev,
/* Use the large buffer method to calculate cursor watermark */
line_time_us = max(htotal * 1000 / clock, 1);
line_count = (cursor_latency_ns / line_time_us + 1000) / 1000;
- entries = line_count * crtc->cursor->state->crtc_w * cpp;
+ entries = line_count * crtc->base.cursor->state->crtc_w * cpp;
tlb_miss = cursor->fifo_size*cursor->cacheline_size - hdisplay * 8;
if (tlb_miss > 0)
entries += tlb_miss;
@@ -766,7 +761,7 @@ static bool g4x_compute_wm0(struct drm_device *dev,
* can be programmed into the associated watermark register, that watermark
* must be disabled.
*/
-static bool g4x_check_srwm(struct drm_device *dev,
+static bool g4x_check_srwm(struct drm_i915_private *dev_priv,
int display_wm, int cursor_wm,
const struct intel_watermark_params *display,
const struct intel_watermark_params *cursor)
@@ -775,13 +770,13 @@ static bool g4x_check_srwm(struct drm_device *dev,
display_wm, cursor_wm);
if (display_wm > display->max_wm) {
- DRM_DEBUG_KMS("display watermark is too large(%d/%ld), disabling\n",
+ DRM_DEBUG_KMS("display watermark is too large(%d/%u), disabling\n",
display_wm, display->max_wm);
return false;
}
if (cursor_wm > cursor->max_wm) {
- DRM_DEBUG_KMS("cursor watermark is too large(%d/%ld), disabling\n",
+ DRM_DEBUG_KMS("cursor watermark is too large(%d/%u), disabling\n",
cursor_wm, cursor->max_wm);
return false;
}
@@ -794,15 +789,16 @@ static bool g4x_check_srwm(struct drm_device *dev,
return true;
}
-static bool g4x_compute_srwm(struct drm_device *dev,
+static bool g4x_compute_srwm(struct drm_i915_private *dev_priv,
int plane,
int latency_ns,
const struct intel_watermark_params *display,
const struct intel_watermark_params *cursor,
int *display_wm, int *cursor_wm)
{
- struct drm_crtc *crtc;
+ struct intel_crtc *crtc;
const struct drm_display_mode *adjusted_mode;
+ const struct drm_framebuffer *fb;
int hdisplay, htotal, cpp, clock;
unsigned long line_time_us;
int line_count, line_size;
@@ -814,12 +810,13 @@ static bool g4x_compute_srwm(struct drm_device *dev,
return false;
}
- crtc = intel_get_crtc_for_plane(dev, plane);
- adjusted_mode = &to_intel_crtc(crtc)->config->base.adjusted_mode;
+ crtc = intel_get_crtc_for_plane(dev_priv, plane);
+ adjusted_mode = &crtc->config->base.adjusted_mode;
+ fb = crtc->base.primary->state->fb;
clock = adjusted_mode->crtc_clock;
htotal = adjusted_mode->crtc_htotal;
- hdisplay = to_intel_crtc(crtc)->config->pipe_src_w;
- cpp = drm_format_plane_cpp(crtc->primary->state->fb->pixel_format, 0);
+ hdisplay = crtc->config->pipe_src_w;
+ cpp = drm_format_plane_cpp(fb->pixel_format, 0);
line_time_us = max(htotal * 1000 / clock, 1);
line_count = (latency_ns / line_time_us + 1000) / 1000;
@@ -833,11 +830,11 @@ static bool g4x_compute_srwm(struct drm_device *dev,
*display_wm = entries + display->guard_size;
/* calculate the self-refresh watermark for display cursor */
- entries = line_count * cpp * crtc->cursor->state->crtc_w;
+ entries = line_count * cpp * crtc->base.cursor->state->crtc_w;
entries = DIV_ROUND_UP(entries, cursor->cacheline_size);
*cursor_wm = entries + cursor->guard_size;
- return g4x_check_srwm(dev,
+ return g4x_check_srwm(dev_priv,
*display_wm, *cursor_wm,
display, cursor);
}
@@ -937,10 +934,8 @@ static unsigned int vlv_wm_method2(unsigned int pixel_rate,
return ret;
}
-static void vlv_setup_wm_latency(struct drm_device *dev)
+static void vlv_setup_wm_latency(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
-
/* all latencies in usec */
dev_priv->wm.pri_latency[VLV_WM_LEVEL_PM2] = 3;
@@ -1065,7 +1060,8 @@ static void vlv_invert_wms(struct intel_crtc *crtc)
for (level = 0; level < wm_state->num_levels; level++) {
struct drm_device *dev = crtc->base.dev;
- const int sr_fifo_size = INTEL_INFO(dev)->num_pipes * 512 - 1;
+ const int sr_fifo_size =
+ INTEL_INFO(to_i915(dev))->num_pipes * 512 - 1;
struct intel_plane *plane;
wm_state->sr[level].plane = sr_fifo_size - wm_state->sr[level].plane;
@@ -1095,15 +1091,16 @@ static void vlv_invert_wms(struct intel_crtc *crtc)
static void vlv_compute_wm(struct intel_crtc *crtc)
{
struct drm_device *dev = crtc->base.dev;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct vlv_wm_state *wm_state = &crtc->wm_state;
struct intel_plane *plane;
- int sr_fifo_size = INTEL_INFO(dev)->num_pipes * 512 - 1;
+ int sr_fifo_size = INTEL_INFO(dev_priv)->num_pipes * 512 - 1;
int level;
memset(wm_state, 0, sizeof(*wm_state));
wm_state->cxsr = crtc->pipe != PIPE_C && crtc->wm.cxsr_allowed;
- wm_state->num_levels = to_i915(dev)->wm.max_level + 1;
+ wm_state->num_levels = dev_priv->wm.max_level + 1;
wm_state->num_active_planes = 0;
@@ -1183,7 +1180,7 @@ static void vlv_compute_wm(struct intel_crtc *crtc)
}
/* clear any (partially) filled invalid levels */
- for (level = wm_state->num_levels; level < to_i915(dev)->wm.max_level + 1; level++) {
+ for (level = wm_state->num_levels; level < dev_priv->wm.max_level + 1; level++) {
memset(&wm_state->wm[level], 0, sizeof(wm_state->wm[level]));
memset(&wm_state->sr[level], 0, sizeof(wm_state->sr[level]));
}
@@ -1327,20 +1324,19 @@ static void vlv_merge_wm(struct drm_device *dev,
}
}
-static void vlv_update_wm(struct drm_crtc *crtc)
+static void vlv_update_wm(struct intel_crtc *crtc)
{
- struct drm_device *dev = crtc->dev;
+ struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- enum pipe pipe = intel_crtc->pipe;
+ enum pipe pipe = crtc->pipe;
struct vlv_wm_values wm = {};
- vlv_compute_wm(intel_crtc);
+ vlv_compute_wm(crtc);
vlv_merge_wm(dev, &wm);
if (memcmp(&dev_priv->wm.vlv, &wm, sizeof(wm)) == 0) {
/* FIXME should be part of crtc atomic commit */
- vlv_pipe_set_fifo_size(intel_crtc);
+ vlv_pipe_set_fifo_size(crtc);
return;
}
@@ -1356,9 +1352,9 @@ static void vlv_update_wm(struct drm_crtc *crtc)
intel_set_memory_cxsr(dev_priv, false);
/* FIXME should be part of crtc atomic commit */
- vlv_pipe_set_fifo_size(intel_crtc);
+ vlv_pipe_set_fifo_size(crtc);
- vlv_write_wm_values(intel_crtc, &wm);
+ vlv_write_wm_values(crtc, &wm);
DRM_DEBUG_KMS("Setting FIFO watermarks - %c: plane=%d, cursor=%d, "
"sprite0=%d, sprite1=%d, SR: plane=%d, cursor=%d level=%d cxsr=%d\n",
@@ -1382,30 +1378,29 @@ static void vlv_update_wm(struct drm_crtc *crtc)
#define single_plane_enabled(mask) is_power_of_2(mask)
-static void g4x_update_wm(struct drm_crtc *crtc)
+static void g4x_update_wm(struct intel_crtc *crtc)
{
- struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
static const int sr_latency_ns = 12000;
- struct drm_i915_private *dev_priv = to_i915(dev);
int planea_wm, planeb_wm, cursora_wm, cursorb_wm;
int plane_sr, cursor_sr;
unsigned int enabled = 0;
bool cxsr_enabled;
- if (g4x_compute_wm0(dev, PIPE_A,
+ if (g4x_compute_wm0(dev_priv, PIPE_A,
&g4x_wm_info, pessimal_latency_ns,
&g4x_cursor_wm_info, pessimal_latency_ns,
&planea_wm, &cursora_wm))
enabled |= 1 << PIPE_A;
- if (g4x_compute_wm0(dev, PIPE_B,
+ if (g4x_compute_wm0(dev_priv, PIPE_B,
&g4x_wm_info, pessimal_latency_ns,
&g4x_cursor_wm_info, pessimal_latency_ns,
&planeb_wm, &cursorb_wm))
enabled |= 1 << PIPE_B;
if (single_plane_enabled(enabled) &&
- g4x_compute_srwm(dev, ffs(enabled) - 1,
+ g4x_compute_srwm(dev_priv, ffs(enabled) - 1,
sr_latency_ns,
&g4x_wm_info,
&g4x_cursor_wm_info,
@@ -1440,25 +1435,27 @@ static void g4x_update_wm(struct drm_crtc *crtc)
intel_set_memory_cxsr(dev_priv, true);
}
-static void i965_update_wm(struct drm_crtc *unused_crtc)
+static void i965_update_wm(struct intel_crtc *unused_crtc)
{
- struct drm_device *dev = unused_crtc->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
- struct drm_crtc *crtc;
+ struct drm_i915_private *dev_priv = to_i915(unused_crtc->base.dev);
+ struct intel_crtc *crtc;
int srwm = 1;
int cursor_sr = 16;
bool cxsr_enabled;
/* Calc sr entries for one plane configs */
- crtc = single_enabled_crtc(dev);
+ crtc = single_enabled_crtc(dev_priv);
if (crtc) {
/* self-refresh has much higher latency */
static const int sr_latency_ns = 12000;
- const struct drm_display_mode *adjusted_mode = &to_intel_crtc(crtc)->config->base.adjusted_mode;
+ const struct drm_display_mode *adjusted_mode =
+ &crtc->config->base.adjusted_mode;
+ const struct drm_framebuffer *fb =
+ crtc->base.primary->state->fb;
int clock = adjusted_mode->crtc_clock;
int htotal = adjusted_mode->crtc_htotal;
- int hdisplay = to_intel_crtc(crtc)->config->pipe_src_w;
- int cpp = drm_format_plane_cpp(crtc->primary->state->fb->pixel_format, 0);
+ int hdisplay = crtc->config->pipe_src_w;
+ int cpp = drm_format_plane_cpp(fb->pixel_format, 0);
unsigned long line_time_us;
int entries;
@@ -1476,7 +1473,7 @@ static void i965_update_wm(struct drm_crtc *unused_crtc)
entries, srwm);
entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
- cpp * crtc->cursor->state->crtc_w;
+ cpp * crtc->base.cursor->state->crtc_w;
entries = DIV_ROUND_UP(entries,
i965_cursor_wm_info.cacheline_size);
cursor_sr = i965_cursor_wm_info.fifo_size -
@@ -1514,34 +1511,38 @@ static void i965_update_wm(struct drm_crtc *unused_crtc)
#undef FW_WM
-static void i9xx_update_wm(struct drm_crtc *unused_crtc)
+static void i9xx_update_wm(struct intel_crtc *unused_crtc)
{
- struct drm_device *dev = unused_crtc->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(unused_crtc->base.dev);
const struct intel_watermark_params *wm_info;
uint32_t fwater_lo;
uint32_t fwater_hi;
int cwm, srwm = 1;
int fifo_size;
int planea_wm, planeb_wm;
- struct drm_crtc *crtc, *enabled = NULL;
+ struct intel_crtc *crtc, *enabled = NULL;
- if (IS_I945GM(dev))
+ if (IS_I945GM(dev_priv))
wm_info = &i945_wm_info;
- else if (!IS_GEN2(dev))
+ else if (!IS_GEN2(dev_priv))
wm_info = &i915_wm_info;
else
wm_info = &i830_a_wm_info;
- fifo_size = dev_priv->display.get_fifo_size(dev, 0);
- crtc = intel_get_crtc_for_plane(dev, 0);
+ fifo_size = dev_priv->display.get_fifo_size(dev_priv, 0);
+ crtc = intel_get_crtc_for_plane(dev_priv, 0);
if (intel_crtc_active(crtc)) {
- const struct drm_display_mode *adjusted_mode;
- int cpp = drm_format_plane_cpp(crtc->primary->state->fb->pixel_format, 0);
- if (IS_GEN2(dev))
+ const struct drm_display_mode *adjusted_mode =
+ &crtc->config->base.adjusted_mode;
+ const struct drm_framebuffer *fb =
+ crtc->base.primary->state->fb;
+ int cpp;
+
+ if (IS_GEN2(dev_priv))
cpp = 4;
+ else
+ cpp = drm_format_plane_cpp(fb->pixel_format, 0);
- adjusted_mode = &to_intel_crtc(crtc)->config->base.adjusted_mode;
planea_wm = intel_calculate_wm(adjusted_mode->crtc_clock,
wm_info, fifo_size, cpp,
pessimal_latency_ns);
@@ -1552,18 +1553,23 @@ static void i9xx_update_wm(struct drm_crtc *unused_crtc)
planea_wm = wm_info->max_wm;
}
- if (IS_GEN2(dev))
+ if (IS_GEN2(dev_priv))
wm_info = &i830_bc_wm_info;
- fifo_size = dev_priv->display.get_fifo_size(dev, 1);
- crtc = intel_get_crtc_for_plane(dev, 1);
+ fifo_size = dev_priv->display.get_fifo_size(dev_priv, 1);
+ crtc = intel_get_crtc_for_plane(dev_priv, 1);
if (intel_crtc_active(crtc)) {
- const struct drm_display_mode *adjusted_mode;
- int cpp = drm_format_plane_cpp(crtc->primary->state->fb->pixel_format, 0);
- if (IS_GEN2(dev))
+ const struct drm_display_mode *adjusted_mode =
+ &crtc->config->base.adjusted_mode;
+ const struct drm_framebuffer *fb =
+ crtc->base.primary->state->fb;
+ int cpp;
+
+ if (IS_GEN2(dev_priv))
cpp = 4;
+ else
+ cpp = drm_format_plane_cpp(fb->pixel_format, 0);
- adjusted_mode = &to_intel_crtc(crtc)->config->base.adjusted_mode;
planeb_wm = intel_calculate_wm(adjusted_mode->crtc_clock,
wm_info, fifo_size, cpp,
pessimal_latency_ns);
@@ -1579,10 +1585,10 @@ static void i9xx_update_wm(struct drm_crtc *unused_crtc)
DRM_DEBUG_KMS("FIFO watermarks - A: %d, B: %d\n", planea_wm, planeb_wm);
- if (IS_I915GM(dev) && enabled) {
+ if (IS_I915GM(dev_priv) && enabled) {
struct drm_i915_gem_object *obj;
- obj = intel_fb_obj(enabled->primary->state->fb);
+ obj = intel_fb_obj(enabled->base.primary->state->fb);
/* self-refresh seems busted with untiled */
if (!i915_gem_object_is_tiled(obj))
@@ -1598,19 +1604,24 @@ static void i9xx_update_wm(struct drm_crtc *unused_crtc)
intel_set_memory_cxsr(dev_priv, false);
/* Calc sr entries for one plane configs */
- if (HAS_FW_BLC(dev) && enabled) {
+ if (HAS_FW_BLC(dev_priv) && enabled) {
/* self-refresh has much higher latency */
static const int sr_latency_ns = 6000;
- const struct drm_display_mode *adjusted_mode = &to_intel_crtc(enabled)->config->base.adjusted_mode;
+ const struct drm_display_mode *adjusted_mode =
+ &enabled->config->base.adjusted_mode;
+ const struct drm_framebuffer *fb =
+ enabled->base.primary->state->fb;
int clock = adjusted_mode->crtc_clock;
int htotal = adjusted_mode->crtc_htotal;
- int hdisplay = to_intel_crtc(enabled)->config->pipe_src_w;
- int cpp = drm_format_plane_cpp(enabled->primary->state->fb->pixel_format, 0);
+ int hdisplay = enabled->config->pipe_src_w;
+ int cpp;
unsigned long line_time_us;
int entries;
- if (IS_I915GM(dev) || IS_I945GM(dev))
+ if (IS_I915GM(dev_priv) || IS_I945GM(dev_priv))
cpp = 4;
+ else
+ cpp = drm_format_plane_cpp(fb->pixel_format, 0);
line_time_us = max(htotal * 1000 / clock, 1);
@@ -1623,7 +1634,7 @@ static void i9xx_update_wm(struct drm_crtc *unused_crtc)
if (srwm < 0)
srwm = 1;
- if (IS_I945G(dev) || IS_I945GM(dev))
+ if (IS_I945G(dev_priv) || IS_I945GM(dev_priv))
I915_WRITE(FW_BLC_SELF,
FW_BLC_SELF_FIFO_MASK | (srwm & 0xff));
else
@@ -1647,23 +1658,22 @@ static void i9xx_update_wm(struct drm_crtc *unused_crtc)
intel_set_memory_cxsr(dev_priv, true);
}
-static void i845_update_wm(struct drm_crtc *unused_crtc)
+static void i845_update_wm(struct intel_crtc *unused_crtc)
{
- struct drm_device *dev = unused_crtc->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
- struct drm_crtc *crtc;
+ struct drm_i915_private *dev_priv = to_i915(unused_crtc->base.dev);
+ struct intel_crtc *crtc;
const struct drm_display_mode *adjusted_mode;
uint32_t fwater_lo;
int planea_wm;
- crtc = single_enabled_crtc(dev);
+ crtc = single_enabled_crtc(dev_priv);
if (crtc == NULL)
return;
- adjusted_mode = &to_intel_crtc(crtc)->config->base.adjusted_mode;
+ adjusted_mode = &crtc->config->base.adjusted_mode;
planea_wm = intel_calculate_wm(adjusted_mode->crtc_clock,
&i845_wm_info,
- dev_priv->display.get_fifo_size(dev, 0),
+ dev_priv->display.get_fifo_size(dev_priv, 0),
4, pessimal_latency_ns);
fwater_lo = I915_READ(FW_BLC) & ~0xfff;
fwater_lo |= (3<<8) | planea_wm;
@@ -1852,23 +1862,25 @@ static uint32_t ilk_compute_fbc_wm(const struct intel_crtc_state *cstate,
return ilk_wm_fbc(pri_val, drm_rect_width(&pstate->base.dst), cpp);
}
-static unsigned int ilk_display_fifo_size(const struct drm_device *dev)
+static unsigned int
+ilk_display_fifo_size(const struct drm_i915_private *dev_priv)
{
- if (INTEL_INFO(dev)->gen >= 8)
+ if (INTEL_GEN(dev_priv) >= 8)
return 3072;
- else if (INTEL_INFO(dev)->gen >= 7)
+ else if (INTEL_GEN(dev_priv) >= 7)
return 768;
else
return 512;
}
-static unsigned int ilk_plane_wm_reg_max(const struct drm_device *dev,
- int level, bool is_sprite)
+static unsigned int
+ilk_plane_wm_reg_max(const struct drm_i915_private *dev_priv,
+ int level, bool is_sprite)
{
- if (INTEL_INFO(dev)->gen >= 8)
+ if (INTEL_GEN(dev_priv) >= 8)
/* BDW primary/sprite plane watermarks */
return level == 0 ? 255 : 2047;
- else if (INTEL_INFO(dev)->gen >= 7)
+ else if (INTEL_GEN(dev_priv) >= 7)
/* IVB/HSW primary/sprite plane watermarks */
return level == 0 ? 127 : 1023;
else if (!is_sprite)
@@ -1879,18 +1891,18 @@ static unsigned int ilk_plane_wm_reg_max(const struct drm_device *dev,
return level == 0 ? 63 : 255;
}
-static unsigned int ilk_cursor_wm_reg_max(const struct drm_device *dev,
- int level)
+static unsigned int
+ilk_cursor_wm_reg_max(const struct drm_i915_private *dev_priv, int level)
{
- if (INTEL_INFO(dev)->gen >= 7)
+ if (INTEL_GEN(dev_priv) >= 7)
return level == 0 ? 63 : 255;
else
return level == 0 ? 31 : 63;
}
-static unsigned int ilk_fbc_wm_reg_max(const struct drm_device *dev)
+static unsigned int ilk_fbc_wm_reg_max(const struct drm_i915_private *dev_priv)
{
- if (INTEL_INFO(dev)->gen >= 8)
+ if (INTEL_GEN(dev_priv) >= 8)
return 31;
else
return 15;
@@ -1903,7 +1915,8 @@ static unsigned int ilk_plane_wm_max(const struct drm_device *dev,
enum intel_ddb_partitioning ddb_partitioning,
bool is_sprite)
{
- unsigned int fifo_size = ilk_display_fifo_size(dev);
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ unsigned int fifo_size = ilk_display_fifo_size(dev_priv);
/* if sprites aren't enabled, sprites get nothing */
if (is_sprite && !config->sprites_enabled)
@@ -1911,14 +1924,14 @@ static unsigned int ilk_plane_wm_max(const struct drm_device *dev,
/* HSW allows LP1+ watermarks even with multiple pipes */
if (level == 0 || config->num_pipes_active > 1) {
- fifo_size /= INTEL_INFO(dev)->num_pipes;
+ fifo_size /= INTEL_INFO(dev_priv)->num_pipes;
/*
* For some reason the non self refresh
* FIFO size is only half of the self
* refresh FIFO size on ILK/SNB.
*/
- if (INTEL_INFO(dev)->gen <= 6)
+ if (INTEL_GEN(dev_priv) <= 6)
fifo_size /= 2;
}
@@ -1934,7 +1947,7 @@ static unsigned int ilk_plane_wm_max(const struct drm_device *dev,
}
/* clamp to max that the registers can hold */
- return min(fifo_size, ilk_plane_wm_reg_max(dev, level, is_sprite));
+ return min(fifo_size, ilk_plane_wm_reg_max(dev_priv, level, is_sprite));
}
/* Calculate the maximum cursor plane watermark */
@@ -1947,7 +1960,7 @@ static unsigned int ilk_cursor_wm_max(const struct drm_device *dev,
return 64;
/* otherwise just report max that registers can hold */
- return ilk_cursor_wm_reg_max(dev, level);
+ return ilk_cursor_wm_reg_max(to_i915(dev), level);
}
static void ilk_compute_wm_maximums(const struct drm_device *dev,
@@ -1959,17 +1972,17 @@ static void ilk_compute_wm_maximums(const struct drm_device *dev,
max->pri = ilk_plane_wm_max(dev, level, config, ddb_partitioning, false);
max->spr = ilk_plane_wm_max(dev, level, config, ddb_partitioning, true);
max->cur = ilk_cursor_wm_max(dev, level, config);
- max->fbc = ilk_fbc_wm_reg_max(dev);
+ max->fbc = ilk_fbc_wm_reg_max(to_i915(dev));
}
-static void ilk_compute_wm_reg_maximums(struct drm_device *dev,
+static void ilk_compute_wm_reg_maximums(const struct drm_i915_private *dev_priv,
int level,
struct ilk_wm_maximums *max)
{
- max->pri = ilk_plane_wm_reg_max(dev, level, false);
- max->spr = ilk_plane_wm_reg_max(dev, level, true);
- max->cur = ilk_cursor_wm_reg_max(dev, level);
- max->fbc = ilk_fbc_wm_reg_max(dev);
+ max->pri = ilk_plane_wm_reg_max(dev_priv, level, false);
+ max->spr = ilk_plane_wm_reg_max(dev_priv, level, true);
+ max->cur = ilk_cursor_wm_reg_max(dev_priv, level);
+ max->fbc = ilk_fbc_wm_reg_max(dev_priv);
}
static bool ilk_validate_wm_level(int level,
@@ -2076,14 +2089,13 @@ hsw_compute_linetime_wm(const struct intel_crtc_state *cstate)
PIPE_WM_LINETIME_TIME(linetime);
}
-static void intel_read_wm_latency(struct drm_device *dev, uint16_t wm[8])
+static void intel_read_wm_latency(struct drm_i915_private *dev_priv,
+ uint16_t wm[8])
{
- struct drm_i915_private *dev_priv = to_i915(dev);
-
- if (IS_GEN9(dev)) {
+ if (IS_GEN9(dev_priv)) {
uint32_t val;
int ret, i;
- int level, max_level = ilk_wm_max_level(dev);
+ int level, max_level = ilk_wm_max_level(dev_priv);
/* read the first set of memory latencies[0:3] */
val = 0; /* data0 to be programmed to 0 for first set */
@@ -2155,7 +2167,7 @@ static void intel_read_wm_latency(struct drm_device *dev, uint16_t wm[8])
}
}
- } else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
+ } else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
uint64_t sskpd = I915_READ64(MCH_SSKPD);
wm[0] = (sskpd >> 56) & 0xFF;
@@ -2165,14 +2177,14 @@ static void intel_read_wm_latency(struct drm_device *dev, uint16_t wm[8])
wm[2] = (sskpd >> 12) & 0xFF;
wm[3] = (sskpd >> 20) & 0x1FF;
wm[4] = (sskpd >> 32) & 0x1FF;
- } else if (INTEL_INFO(dev)->gen >= 6) {
+ } else if (INTEL_GEN(dev_priv) >= 6) {
uint32_t sskpd = I915_READ(MCH_SSKPD);
wm[0] = (sskpd >> SSKPD_WM0_SHIFT) & SSKPD_WM_MASK;
wm[1] = (sskpd >> SSKPD_WM1_SHIFT) & SSKPD_WM_MASK;
wm[2] = (sskpd >> SSKPD_WM2_SHIFT) & SSKPD_WM_MASK;
wm[3] = (sskpd >> SSKPD_WM3_SHIFT) & SSKPD_WM_MASK;
- } else if (INTEL_INFO(dev)->gen >= 5) {
+ } else if (INTEL_GEN(dev_priv) >= 5) {
uint32_t mltr = I915_READ(MLTR_ILK);
/* ILK primary LP0 latency is 700 ns */
@@ -2182,42 +2194,44 @@ static void intel_read_wm_latency(struct drm_device *dev, uint16_t wm[8])
}
}
-static void intel_fixup_spr_wm_latency(struct drm_device *dev, uint16_t wm[5])
+static void intel_fixup_spr_wm_latency(struct drm_i915_private *dev_priv,
+ uint16_t wm[5])
{
/* ILK sprite LP0 latency is 1300 ns */
- if (IS_GEN5(dev))
+ if (IS_GEN5(dev_priv))
wm[0] = 13;
}
-static void intel_fixup_cur_wm_latency(struct drm_device *dev, uint16_t wm[5])
+static void intel_fixup_cur_wm_latency(struct drm_i915_private *dev_priv,
+ uint16_t wm[5])
{
/* ILK cursor LP0 latency is 1300 ns */
- if (IS_GEN5(dev))
+ if (IS_GEN5(dev_priv))
wm[0] = 13;
/* WaDoubleCursorLP3Latency:ivb */
- if (IS_IVYBRIDGE(dev))
+ if (IS_IVYBRIDGE(dev_priv))
wm[3] *= 2;
}
-int ilk_wm_max_level(const struct drm_device *dev)
+int ilk_wm_max_level(const struct drm_i915_private *dev_priv)
{
/* how many WM levels are we expecting */
- if (INTEL_INFO(dev)->gen >= 9)
+ if (INTEL_GEN(dev_priv) >= 9)
return 7;
- else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
+ else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
return 4;
- else if (INTEL_INFO(dev)->gen >= 6)
+ else if (INTEL_GEN(dev_priv) >= 6)
return 3;
else
return 2;
}
-static void intel_print_wm_latency(struct drm_device *dev,
+static void intel_print_wm_latency(struct drm_i915_private *dev_priv,
const char *name,
const uint16_t wm[8])
{
- int level, max_level = ilk_wm_max_level(dev);
+ int level, max_level = ilk_wm_max_level(dev_priv);
for (level = 0; level <= max_level; level++) {
unsigned int latency = wm[level];
@@ -2232,7 +2246,7 @@ static void intel_print_wm_latency(struct drm_device *dev,
* - latencies are in us on gen9.
* - before then, WM1+ latency values are in 0.5us units
*/
- if (IS_GEN9(dev))
+ if (IS_GEN9(dev_priv))
latency *= 10;
else if (level > 0)
latency *= 5;
@@ -2246,7 +2260,7 @@ static void intel_print_wm_latency(struct drm_device *dev,
static bool ilk_increase_wm_latency(struct drm_i915_private *dev_priv,
uint16_t wm[5], uint16_t min)
{
- int level, max_level = ilk_wm_max_level(&dev_priv->drm);
+ int level, max_level = ilk_wm_max_level(dev_priv);
if (wm[0] >= min)
return false;
@@ -2258,9 +2272,8 @@ static bool ilk_increase_wm_latency(struct drm_i915_private *dev_priv,
return true;
}
-static void snb_wm_latency_quirk(struct drm_device *dev)
+static void snb_wm_latency_quirk(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
bool changed;
/*
@@ -2275,39 +2288,35 @@ static void snb_wm_latency_quirk(struct drm_device *dev)
return;
DRM_DEBUG_KMS("WM latency values increased to avoid potential underruns\n");
- intel_print_wm_latency(dev, "Primary", dev_priv->wm.pri_latency);
- intel_print_wm_latency(dev, "Sprite", dev_priv->wm.spr_latency);
- intel_print_wm_latency(dev, "Cursor", dev_priv->wm.cur_latency);
+ intel_print_wm_latency(dev_priv, "Primary", dev_priv->wm.pri_latency);
+ intel_print_wm_latency(dev_priv, "Sprite", dev_priv->wm.spr_latency);
+ intel_print_wm_latency(dev_priv, "Cursor", dev_priv->wm.cur_latency);
}
-static void ilk_setup_wm_latency(struct drm_device *dev)
+static void ilk_setup_wm_latency(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
-
- intel_read_wm_latency(dev, dev_priv->wm.pri_latency);
+ intel_read_wm_latency(dev_priv, dev_priv->wm.pri_latency);
memcpy(dev_priv->wm.spr_latency, dev_priv->wm.pri_latency,
sizeof(dev_priv->wm.pri_latency));
memcpy(dev_priv->wm.cur_latency, dev_priv->wm.pri_latency,
sizeof(dev_priv->wm.pri_latency));
- intel_fixup_spr_wm_latency(dev, dev_priv->wm.spr_latency);
- intel_fixup_cur_wm_latency(dev, dev_priv->wm.cur_latency);
+ intel_fixup_spr_wm_latency(dev_priv, dev_priv->wm.spr_latency);
+ intel_fixup_cur_wm_latency(dev_priv, dev_priv->wm.cur_latency);
- intel_print_wm_latency(dev, "Primary", dev_priv->wm.pri_latency);
- intel_print_wm_latency(dev, "Sprite", dev_priv->wm.spr_latency);
- intel_print_wm_latency(dev, "Cursor", dev_priv->wm.cur_latency);
+ intel_print_wm_latency(dev_priv, "Primary", dev_priv->wm.pri_latency);
+ intel_print_wm_latency(dev_priv, "Sprite", dev_priv->wm.spr_latency);
+ intel_print_wm_latency(dev_priv, "Cursor", dev_priv->wm.cur_latency);
- if (IS_GEN6(dev))
- snb_wm_latency_quirk(dev);
+ if (IS_GEN6(dev_priv))
+ snb_wm_latency_quirk(dev_priv);
}
-static void skl_setup_wm_latency(struct drm_device *dev)
+static void skl_setup_wm_latency(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
-
- intel_read_wm_latency(dev, dev_priv->wm.skl_latency);
- intel_print_wm_latency(dev, "Gen9 Plane", dev_priv->wm.skl_latency);
+ intel_read_wm_latency(dev_priv, dev_priv->wm.skl_latency);
+ intel_print_wm_latency(dev_priv, "Gen9 Plane", dev_priv->wm.skl_latency);
}
static bool ilk_validate_pipe_wm(struct drm_device *dev,
@@ -2345,7 +2354,7 @@ static int ilk_compute_pipe_wm(struct intel_crtc_state *cstate)
struct intel_plane_state *pristate = NULL;
struct intel_plane_state *sprstate = NULL;
struct intel_plane_state *curstate = NULL;
- int level, max_level = ilk_wm_max_level(dev), usable_level;
+ int level, max_level = ilk_wm_max_level(dev_priv), usable_level;
struct ilk_wm_maximums max;
pipe_wm = &cstate->wm.ilk.optimal;
@@ -2377,7 +2386,7 @@ static int ilk_compute_pipe_wm(struct intel_crtc_state *cstate)
usable_level = max_level;
/* ILK/SNB: LP2+ watermarks only w/o sprites */
- if (INTEL_INFO(dev)->gen <= 6 && pipe_wm->sprites_enabled)
+ if (INTEL_GEN(dev_priv) <= 6 && pipe_wm->sprites_enabled)
usable_level = 1;
/* ILK/SNB/IVB: LP1+ watermarks only w/o scaling */
@@ -2390,13 +2399,13 @@ static int ilk_compute_pipe_wm(struct intel_crtc_state *cstate)
memset(&pipe_wm->wm, 0, sizeof(pipe_wm->wm));
pipe_wm->wm[0] = pipe_wm->raw_wm[0];
- if (IS_HASWELL(dev) || IS_BROADWELL(dev))
+ if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
pipe_wm->linetime = hsw_compute_linetime_wm(cstate);
if (!ilk_validate_pipe_wm(dev, pipe_wm))
return -EINVAL;
- ilk_compute_wm_reg_maximums(dev, 1, &max);
+ ilk_compute_wm_reg_maximums(dev_priv, 1, &max);
for (level = 1; level <= max_level; level++) {
struct intel_wm_level *wm = &pipe_wm->raw_wm[level];
@@ -2432,7 +2441,7 @@ static int ilk_compute_intermediate_wm(struct drm_device *dev,
{
struct intel_pipe_wm *a = &newstate->wm.ilk.intermediate;
struct intel_pipe_wm *b = &intel_crtc->wm.active.ilk;
- int level, max_level = ilk_wm_max_level(dev);
+ int level, max_level = ilk_wm_max_level(to_i915(dev));
/*
* Start with the final, target watermarks, then combine with the
@@ -2516,16 +2525,16 @@ static void ilk_wm_merge(struct drm_device *dev,
struct intel_pipe_wm *merged)
{
struct drm_i915_private *dev_priv = to_i915(dev);
- int level, max_level = ilk_wm_max_level(dev);
+ int level, max_level = ilk_wm_max_level(dev_priv);
int last_enabled_level = max_level;
/* ILK/SNB/IVB: LP1+ watermarks only w/ single pipe */
- if ((INTEL_INFO(dev)->gen <= 6 || IS_IVYBRIDGE(dev)) &&
+ if ((INTEL_GEN(dev_priv) <= 6 || IS_IVYBRIDGE(dev_priv)) &&
config->num_pipes_active > 1)
last_enabled_level = 0;
/* ILK: FBC WM must be disabled always */
- merged->fbc_wm_enabled = INTEL_INFO(dev)->gen >= 6;
+ merged->fbc_wm_enabled = INTEL_GEN(dev_priv) >= 6;
/* merge each WM1+ level */
for (level = 1; level <= max_level; level++) {
@@ -2556,7 +2565,7 @@ static void ilk_wm_merge(struct drm_device *dev,
* What we should check here is whether FBC can be
* enabled sometime later.
*/
- if (IS_GEN5(dev) && !merged->fbc_wm_enabled &&
+ if (IS_GEN5(dev_priv) && !merged->fbc_wm_enabled &&
intel_fbc_is_active(dev_priv)) {
for (level = 2; level <= max_level; level++) {
struct intel_wm_level *wm = &merged->wm[level];
@@ -2577,7 +2586,7 @@ static unsigned int ilk_wm_lp_latency(struct drm_device *dev, int level)
{
struct drm_i915_private *dev_priv = to_i915(dev);
- if (IS_HASWELL(dev) || IS_BROADWELL(dev))
+ if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
return 2 * level;
else
return dev_priv->wm.pri_latency[level];
@@ -2588,6 +2597,7 @@ static void ilk_compute_wm_results(struct drm_device *dev,
enum intel_ddb_partitioning partitioning,
struct ilk_wm_values *results)
{
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_crtc *intel_crtc;
int level, wm_lp;
@@ -2614,7 +2624,7 @@ static void ilk_compute_wm_results(struct drm_device *dev,
if (r->enable)
results->wm_lp[wm_lp - 1] |= WM1_LP_SR_EN;
- if (INTEL_INFO(dev)->gen >= 8)
+ if (INTEL_GEN(dev_priv) >= 8)
results->wm_lp[wm_lp - 1] |=
r->fbc_val << WM1_LP_FBC_SHIFT_BDW;
else
@@ -2625,7 +2635,7 @@ static void ilk_compute_wm_results(struct drm_device *dev,
* Always set WM1S_LP_EN when spr_val != 0, even if the
* level is disabled. Doing otherwise could cause underruns.
*/
- if (INTEL_INFO(dev)->gen <= 6 && r->spr_val) {
+ if (INTEL_GEN(dev_priv) <= 6 && r->spr_val) {
WARN_ON(wm_lp != 1);
results->wm_lp_spr[wm_lp - 1] = WM1S_LP_EN | r->spr_val;
} else
@@ -2656,7 +2666,7 @@ static struct intel_pipe_wm *ilk_find_best_result(struct drm_device *dev,
struct intel_pipe_wm *r1,
struct intel_pipe_wm *r2)
{
- int level, max_level = ilk_wm_max_level(dev);
+ int level, max_level = ilk_wm_max_level(to_i915(dev));
int level1 = 0, level2 = 0;
for (level = 1; level <= max_level; level++) {
@@ -2775,7 +2785,6 @@ static bool _ilk_disable_lp_wm(struct drm_i915_private *dev_priv,
static void ilk_write_wm_values(struct drm_i915_private *dev_priv,
struct ilk_wm_values *results)
{
- struct drm_device *dev = &dev_priv->drm;
struct ilk_wm_values *previous = &dev_priv->wm.hw;
unsigned int dirty;
uint32_t val;
@@ -2801,7 +2810,7 @@ static void ilk_write_wm_values(struct drm_i915_private *dev_priv,
I915_WRITE(PIPE_WM_LINETIME(PIPE_C), results->wm_linetime[2]);
if (dirty & WM_DIRTY_DDB) {
- if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
+ if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
val = I915_READ(WM_MISC);
if (results->partitioning == INTEL_DDB_PART_1_2)
val &= ~WM_MISC_DATA_PARTITION_5_6;
@@ -2831,7 +2840,7 @@ static void ilk_write_wm_values(struct drm_i915_private *dev_priv,
previous->wm_lp_spr[0] != results->wm_lp_spr[0])
I915_WRITE(WM1S_LP_ILK, results->wm_lp_spr[0]);
- if (INTEL_INFO(dev)->gen >= 7) {
+ if (INTEL_GEN(dev_priv) >= 7) {
if (dirty & WM_DIRTY_LP(2) && previous->wm_lp_spr[1] != results->wm_lp_spr[1])
I915_WRITE(WM2S_LP_IVB, results->wm_lp_spr[1]);
if (dirty & WM_DIRTY_LP(3) && previous->wm_lp_spr[2] != results->wm_lp_spr[2])
@@ -2879,6 +2888,21 @@ skl_wm_plane_id(const struct intel_plane *plane)
}
}
+/*
+ * FIXME: We still don't have the proper code detect if we need to apply the WA,
+ * so assume we'll always need it in order to avoid underruns.
+ */
+static bool skl_needs_memory_bw_wa(struct intel_atomic_state *state)
+{
+ struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+
+ if (IS_SKYLAKE(dev_priv) || IS_BROXTON(dev_priv) ||
+ IS_KABYLAKE(dev_priv))
+ return true;
+
+ return false;
+}
+
static bool
intel_has_sagv(struct drm_i915_private *dev_priv)
{
@@ -2999,9 +3023,12 @@ bool intel_can_enable_sagv(struct drm_atomic_state *state)
struct drm_device *dev = state->dev;
struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
- struct drm_crtc *crtc;
+ struct intel_crtc *crtc;
+ struct intel_plane *plane;
+ struct intel_crtc_state *cstate;
+ struct skl_plane_wm *wm;
enum pipe pipe;
- int level, plane;
+ int level, latency;
if (!intel_has_sagv(dev_priv))
return false;
@@ -3019,27 +3046,37 @@ bool intel_can_enable_sagv(struct drm_atomic_state *state)
/* Since we're now guaranteed to only have one active CRTC... */
pipe = ffs(intel_state->active_crtcs) - 1;
- crtc = dev_priv->pipe_to_crtc_mapping[pipe];
+ crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
+ cstate = to_intel_crtc_state(crtc->base.state);
- if (crtc->state->mode.flags & DRM_MODE_FLAG_INTERLACE)
+ if (crtc->base.state->adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
return false;
- for_each_plane(dev_priv, pipe, plane) {
+ for_each_intel_plane_on_crtc(dev, crtc, plane) {
+ wm = &cstate->wm.skl.optimal.planes[skl_wm_plane_id(plane)];
+
/* Skip this plane if it's not enabled */
- if (intel_state->wm_results.plane[pipe][plane][0] == 0)
+ if (!wm->wm[0].plane_en)
continue;
/* Find the highest enabled wm level for this plane */
- for (level = ilk_wm_max_level(dev);
- intel_state->wm_results.plane[pipe][plane][level] == 0; --level)
+ for (level = ilk_wm_max_level(dev_priv);
+ !wm->wm[level].plane_en; --level)
{ }
+ latency = dev_priv->wm.skl_latency[level];
+
+ if (skl_needs_memory_bw_wa(intel_state) &&
+ plane->base.state->fb->modifier ==
+ I915_FORMAT_MOD_X_TILED)
+ latency += 15;
+
/*
* If any of the planes on this pipe don't enable wm levels
* that incur memory latencies higher then 30µs we can't enable
* the SAGV
*/
- if (dev_priv->wm.skl_latency[level] < SKL_SAGV_BLOCK_TIME)
+ if (latency < SKL_SAGV_BLOCK_TIME)
return false;
}
@@ -3058,7 +3095,6 @@ skl_ddb_get_pipe_allocation_limits(struct drm_device *dev,
struct drm_crtc *for_crtc = cstate->base.crtc;
unsigned int pipe_size, ddb_size;
int nth_active_pipe;
- int pipe = to_intel_crtc(for_crtc)->pipe;
if (WARN_ON(!state) || !cstate->base.active) {
alloc->start = 0;
@@ -3086,7 +3122,11 @@ skl_ddb_get_pipe_allocation_limits(struct drm_device *dev,
* we currently hold.
*/
if (!intel_state->active_pipe_changes) {
- *alloc = dev_priv->wm.skl_hw.ddb.pipe[pipe];
+ /*
+ * alloc may be cleared by clear_intel_crtc_state,
+ * copy from old state to be sure
+ */
+ *alloc = to_intel_crtc_state(for_crtc->state)->wm.skl.ddb;
return;
}
@@ -3129,7 +3169,7 @@ void skl_ddb_get_hw_state(struct drm_i915_private *dev_priv,
if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
continue;
- for_each_plane(dev_priv, pipe, plane) {
+ for_each_universal_plane(dev_priv, pipe, plane) {
val = I915_READ(PLANE_BUF_CFG(pipe, plane));
skl_ddb_entry_init_from_hw(&ddb->plane[pipe][plane],
val);
@@ -3173,7 +3213,7 @@ skl_plane_downscale_amount(const struct intel_plane_state *pstate)
src_h = drm_rect_height(&pstate->base.src);
dst_w = drm_rect_width(&pstate->base.dst);
dst_h = drm_rect_height(&pstate->base.dst);
- if (intel_rotation_90_or_270(pstate->base.rotation))
+ if (drm_rotation_90_or_270(pstate->base.rotation))
swap(dst_w, dst_h);
downscale_h = max(src_h / dst_h, (uint32_t)DRM_PLANE_HELPER_NO_SCALING);
@@ -3204,7 +3244,7 @@ skl_plane_relative_data_rate(const struct intel_crtc_state *cstate,
width = drm_rect_width(&intel_pstate->base.src) >> 16;
height = drm_rect_height(&intel_pstate->base.src) >> 16;
- if (intel_rotation_90_or_270(pstate->rotation))
+ if (drm_rotation_90_or_270(pstate->rotation))
swap(width, height);
/* for planar format */
@@ -3231,49 +3271,39 @@ skl_plane_relative_data_rate(const struct intel_crtc_state *cstate,
* 3 * 4096 * 8192 * 4 < 2^32
*/
static unsigned int
-skl_get_total_relative_data_rate(struct intel_crtc_state *intel_cstate)
+skl_get_total_relative_data_rate(struct intel_crtc_state *intel_cstate,
+ unsigned *plane_data_rate,
+ unsigned *plane_y_data_rate)
{
struct drm_crtc_state *cstate = &intel_cstate->base;
struct drm_atomic_state *state = cstate->state;
- struct drm_crtc *crtc = cstate->crtc;
- struct drm_device *dev = crtc->dev;
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- const struct drm_plane *plane;
+ struct drm_plane *plane;
const struct intel_plane *intel_plane;
- struct drm_plane_state *pstate;
+ const struct drm_plane_state *pstate;
unsigned int rate, total_data_rate = 0;
int id;
- int i;
if (WARN_ON(!state))
return 0;
/* Calculate and cache data rate for each plane */
- for_each_plane_in_state(state, plane, pstate, i) {
+ drm_atomic_crtc_state_for_each_plane_state(plane, pstate, cstate) {
id = skl_wm_plane_id(to_intel_plane(plane));
intel_plane = to_intel_plane(plane);
- if (intel_plane->pipe != intel_crtc->pipe)
- continue;
-
/* packed/uv */
rate = skl_plane_relative_data_rate(intel_cstate,
pstate, 0);
- intel_cstate->wm.skl.plane_data_rate[id] = rate;
+ plane_data_rate[id] = rate;
+
+ total_data_rate += rate;
/* y-plane */
rate = skl_plane_relative_data_rate(intel_cstate,
pstate, 1);
- intel_cstate->wm.skl.plane_y_data_rate[id] = rate;
- }
-
- /* Calculate CRTC's total data rate from cached values */
- for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) {
- int id = skl_wm_plane_id(intel_plane);
+ plane_y_data_rate[id] = rate;
- /* packed/uv */
- total_data_rate += intel_cstate->wm.skl.plane_data_rate[id];
- total_data_rate += intel_cstate->wm.skl.plane_y_data_rate[id];
+ total_data_rate += rate;
}
return total_data_rate;
@@ -3297,14 +3327,14 @@ skl_ddb_min_alloc(const struct drm_plane_state *pstate,
return 0;
/* For Non Y-tile return 8-blocks */
- if (fb->modifier[0] != I915_FORMAT_MOD_Y_TILED &&
- fb->modifier[0] != I915_FORMAT_MOD_Yf_TILED)
+ if (fb->modifier != I915_FORMAT_MOD_Y_TILED &&
+ fb->modifier != I915_FORMAT_MOD_Yf_TILED)
return 8;
src_w = drm_rect_width(&intel_pstate->base.src) >> 16;
src_h = drm_rect_height(&intel_pstate->base.src) >> 16;
- if (intel_rotation_90_or_270(pstate->rotation))
+ if (drm_rotation_90_or_270(pstate->rotation))
swap(src_w, src_h);
/* Halve UV plane width and height for NV12 */
@@ -3318,7 +3348,7 @@ skl_ddb_min_alloc(const struct drm_plane_state *pstate,
else
plane_bpp = drm_format_plane_cpp(fb->pixel_format, 0);
- if (intel_rotation_90_or_270(pstate->rotation)) {
+ if (drm_rotation_90_or_270(pstate->rotation)) {
switch (plane_bpp) {
case 1:
min_scanlines = 32;
@@ -3342,6 +3372,30 @@ skl_ddb_min_alloc(const struct drm_plane_state *pstate,
return DIV_ROUND_UP((4 * src_w * plane_bpp), 512) * min_scanlines/4 + 3;
}
+static void
+skl_ddb_calc_min(const struct intel_crtc_state *cstate, int num_active,
+ uint16_t *minimum, uint16_t *y_minimum)
+{
+ const struct drm_plane_state *pstate;
+ struct drm_plane *plane;
+
+ drm_atomic_crtc_state_for_each_plane_state(plane, pstate, &cstate->base) {
+ struct intel_plane *intel_plane = to_intel_plane(plane);
+ int id = skl_wm_plane_id(intel_plane);
+
+ if (id == PLANE_CURSOR)
+ continue;
+
+ if (!pstate->visible)
+ continue;
+
+ minimum[id] = skl_ddb_min_alloc(pstate, 0);
+ y_minimum[id] = skl_ddb_min_alloc(pstate, 1);
+ }
+
+ minimum[PLANE_CURSOR] = skl_cursor_allocation(num_active);
+}
+
static int
skl_allocate_pipe_ddb(struct intel_crtc_state *cstate,
struct skl_ddb_allocation *ddb /* out */)
@@ -3350,25 +3404,26 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *cstate,
struct drm_crtc *crtc = cstate->base.crtc;
struct drm_device *dev = crtc->dev;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- struct intel_plane *intel_plane;
- struct drm_plane *plane;
- struct drm_plane_state *pstate;
enum pipe pipe = intel_crtc->pipe;
- struct skl_ddb_entry *alloc = &ddb->pipe[pipe];
- uint16_t alloc_size, start, cursor_blocks;
- uint16_t *minimum = cstate->wm.skl.minimum_blocks;
- uint16_t *y_minimum = cstate->wm.skl.minimum_y_blocks;
+ struct skl_ddb_entry *alloc = &cstate->wm.skl.ddb;
+ uint16_t alloc_size, start;
+ uint16_t minimum[I915_MAX_PLANES] = {};
+ uint16_t y_minimum[I915_MAX_PLANES] = {};
unsigned int total_data_rate;
int num_active;
int id, i;
+ unsigned plane_data_rate[I915_MAX_PLANES] = {};
+ unsigned plane_y_data_rate[I915_MAX_PLANES] = {};
+
+ /* Clear the partitioning for disabled planes. */
+ memset(ddb->plane[pipe], 0, sizeof(ddb->plane[pipe]));
+ memset(ddb->y_plane[pipe], 0, sizeof(ddb->y_plane[pipe]));
if (WARN_ON(!state))
return 0;
if (!cstate->base.active) {
- ddb->pipe[pipe].start = ddb->pipe[pipe].end = 0;
- memset(ddb->plane[pipe], 0, sizeof(ddb->plane[pipe]));
- memset(ddb->y_plane[pipe], 0, sizeof(ddb->y_plane[pipe]));
+ alloc->start = alloc->end = 0;
return 0;
}
@@ -3379,57 +3434,43 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *cstate,
return 0;
}
- cursor_blocks = skl_cursor_allocation(num_active);
- ddb->plane[pipe][PLANE_CURSOR].start = alloc->end - cursor_blocks;
- ddb->plane[pipe][PLANE_CURSOR].end = alloc->end;
-
- alloc_size -= cursor_blocks;
-
- /* 1. Allocate the mininum required blocks for each active plane */
- for_each_plane_in_state(state, plane, pstate, i) {
- intel_plane = to_intel_plane(plane);
- id = skl_wm_plane_id(intel_plane);
-
- if (intel_plane->pipe != pipe)
- continue;
-
- if (!to_intel_plane_state(pstate)->base.visible) {
- minimum[id] = 0;
- y_minimum[id] = 0;
- continue;
- }
- if (plane->type == DRM_PLANE_TYPE_CURSOR) {
- minimum[id] = 0;
- y_minimum[id] = 0;
- continue;
- }
+ skl_ddb_calc_min(cstate, num_active, minimum, y_minimum);
- minimum[id] = skl_ddb_min_alloc(pstate, 0);
- y_minimum[id] = skl_ddb_min_alloc(pstate, 1);
- }
+ /*
+ * 1. Allocate the mininum required blocks for each active plane
+ * and allocate the cursor, it doesn't require extra allocation
+ * proportional to the data rate.
+ */
- for (i = 0; i < PLANE_CURSOR; i++) {
+ for (i = 0; i < I915_MAX_PLANES; i++) {
alloc_size -= minimum[i];
alloc_size -= y_minimum[i];
}
+ ddb->plane[pipe][PLANE_CURSOR].start = alloc->end - minimum[PLANE_CURSOR];
+ ddb->plane[pipe][PLANE_CURSOR].end = alloc->end;
+
/*
* 2. Distribute the remaining space in proportion to the amount of
* data each plane needs to fetch from memory.
*
* FIXME: we may not allocate every single block here.
*/
- total_data_rate = skl_get_total_relative_data_rate(cstate);
+ total_data_rate = skl_get_total_relative_data_rate(cstate,
+ plane_data_rate,
+ plane_y_data_rate);
if (total_data_rate == 0)
return 0;
start = alloc->start;
- for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) {
+ for (id = 0; id < I915_MAX_PLANES; id++) {
unsigned int data_rate, y_data_rate;
uint16_t plane_blocks, y_plane_blocks = 0;
- int id = skl_wm_plane_id(intel_plane);
- data_rate = cstate->wm.skl.plane_data_rate[id];
+ if (id == PLANE_CURSOR)
+ continue;
+
+ data_rate = plane_data_rate[id];
/*
* allocation for (packed formats) or (uv-plane part of planar format):
@@ -3451,7 +3492,7 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *cstate,
/*
* allocation for y_plane part of planar format:
*/
- y_data_rate = cstate->wm.skl.plane_y_data_rate[id];
+ y_data_rate = plane_y_data_rate[id];
y_plane_blocks = y_minimum[id];
y_plane_blocks += div_u64((uint64_t)alloc_size * y_data_rate,
@@ -3468,12 +3509,6 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *cstate,
return 0;
}
-static uint32_t skl_pipe_pixel_rate(const struct intel_crtc_state *config)
-{
- /* TODO: Take into account the scalers once we support them */
- return config->base.adjusted_mode.crtc_clock;
-}
-
/*
* The max latency should be 257 (max the punit can code is 255 and we add 2us
* for the read latency) and cpp should always be <= 8, so that
@@ -3524,7 +3559,7 @@ static uint32_t skl_adjusted_plane_pixel_rate(const struct intel_crtc_state *cst
* Adjusted plane pixel rate is just the pipe's adjusted pixel rate
* with additional adjustments for plane-specific scaling.
*/
- adjusted_pixel_rate = skl_pipe_pixel_rate(cstate);
+ adjusted_pixel_rate = ilk_pipe_pixel_rate(cstate);
downscale_amount = skl_plane_downscale_amount(pstate);
pixel_rate = adjusted_pixel_rate * downscale_amount >> 16;
@@ -3553,22 +3588,28 @@ static int skl_compute_plane_wm(const struct drm_i915_private *dev_priv,
uint32_t width = 0, height = 0;
uint32_t plane_pixel_rate;
uint32_t y_tile_minimum, y_min_scanlines;
+ struct intel_atomic_state *state =
+ to_intel_atomic_state(cstate->base.state);
+ bool apply_memory_bw_wa = skl_needs_memory_bw_wa(state);
if (latency == 0 || !cstate->base.active || !intel_pstate->base.visible) {
*enabled = false;
return 0;
}
+ if (apply_memory_bw_wa && fb->modifier == I915_FORMAT_MOD_X_TILED)
+ latency += 15;
+
width = drm_rect_width(&intel_pstate->base.src) >> 16;
height = drm_rect_height(&intel_pstate->base.src) >> 16;
- if (intel_rotation_90_or_270(pstate->rotation))
+ if (drm_rotation_90_or_270(pstate->rotation))
swap(width, height);
cpp = drm_format_plane_cpp(fb->pixel_format, 0);
plane_pixel_rate = skl_adjusted_plane_pixel_rate(cstate, intel_pstate);
- if (intel_rotation_90_or_270(pstate->rotation)) {
+ if (drm_rotation_90_or_270(pstate->rotation)) {
int cpp = (fb->pixel_format == DRM_FORMAT_NV12) ?
drm_format_plane_cpp(fb->pixel_format, 1) :
drm_format_plane_cpp(fb->pixel_format, 0);
@@ -3580,23 +3621,27 @@ static int skl_compute_plane_wm(const struct drm_i915_private *dev_priv,
case 2:
y_min_scanlines = 8;
break;
- default:
- WARN(1, "Unsupported pixel depth for rotation");
case 4:
y_min_scanlines = 4;
break;
+ default:
+ MISSING_CASE(cpp);
+ return -EINVAL;
}
} else {
y_min_scanlines = 4;
}
+ if (apply_memory_bw_wa)
+ y_min_scanlines *= 2;
+
plane_bytes_per_line = width * cpp;
- if (fb->modifier[0] == I915_FORMAT_MOD_Y_TILED ||
- fb->modifier[0] == I915_FORMAT_MOD_Yf_TILED) {
+ if (fb->modifier == I915_FORMAT_MOD_Y_TILED ||
+ fb->modifier == I915_FORMAT_MOD_Yf_TILED) {
plane_blocks_per_line =
DIV_ROUND_UP(plane_bytes_per_line * y_min_scanlines, 512);
plane_blocks_per_line /= y_min_scanlines;
- } else if (fb->modifier[0] == DRM_FORMAT_MOD_NONE) {
+ } else if (fb->modifier == DRM_FORMAT_MOD_NONE) {
plane_blocks_per_line = DIV_ROUND_UP(plane_bytes_per_line, 512)
+ 1;
} else {
@@ -3611,11 +3656,14 @@ static int skl_compute_plane_wm(const struct drm_i915_private *dev_priv,
y_tile_minimum = plane_blocks_per_line * y_min_scanlines;
- if (fb->modifier[0] == I915_FORMAT_MOD_Y_TILED ||
- fb->modifier[0] == I915_FORMAT_MOD_Yf_TILED) {
+ if (fb->modifier == I915_FORMAT_MOD_Y_TILED ||
+ fb->modifier == I915_FORMAT_MOD_Yf_TILED) {
selected_result = max(method2, y_tile_minimum);
} else {
- if ((ddb_allocation / plane_blocks_per_line) >= 1)
+ if ((cpp * cstate->base.adjusted_mode.crtc_htotal / 512 < 1) &&
+ (plane_bytes_per_line / 512 < 1))
+ selected_result = method2;
+ else if ((ddb_allocation / plane_blocks_per_line) >= 1)
selected_result = min(method1, method2);
else
selected_result = method1;
@@ -3625,8 +3673,8 @@ static int skl_compute_plane_wm(const struct drm_i915_private *dev_priv,
res_lines = DIV_ROUND_UP(selected_result, plane_blocks_per_line);
if (level >= 1 && level <= 7) {
- if (fb->modifier[0] == I915_FORMAT_MOD_Y_TILED ||
- fb->modifier[0] == I915_FORMAT_MOD_Yf_TILED) {
+ if (fb->modifier == I915_FORMAT_MOD_Y_TILED ||
+ fb->modifier == I915_FORMAT_MOD_Yf_TILED) {
res_blocks += y_tile_minimum;
res_lines += y_min_scanlines;
} else {
@@ -3665,67 +3713,52 @@ static int
skl_compute_wm_level(const struct drm_i915_private *dev_priv,
struct skl_ddb_allocation *ddb,
struct intel_crtc_state *cstate,
+ struct intel_plane *intel_plane,
int level,
struct skl_wm_level *result)
{
struct drm_atomic_state *state = cstate->base.state;
struct intel_crtc *intel_crtc = to_intel_crtc(cstate->base.crtc);
- struct drm_plane *plane;
- struct intel_plane *intel_plane;
- struct intel_plane_state *intel_pstate;
+ struct drm_plane *plane = &intel_plane->base;
+ struct intel_plane_state *intel_pstate = NULL;
uint16_t ddb_blocks;
enum pipe pipe = intel_crtc->pipe;
int ret;
+ int i = skl_wm_plane_id(intel_plane);
+
+ if (state)
+ intel_pstate =
+ intel_atomic_get_existing_plane_state(state,
+ intel_plane);
/*
- * We'll only calculate watermarks for planes that are actually
- * enabled, so make sure all other planes are set as disabled.
+ * Note: If we start supporting multiple pending atomic commits against
+ * the same planes/CRTC's in the future, plane->state will no longer be
+ * the correct pre-state to use for the calculations here and we'll
+ * need to change where we get the 'unchanged' plane data from.
+ *
+ * For now this is fine because we only allow one queued commit against
+ * a CRTC. Even if the plane isn't modified by this transaction and we
+ * don't have a plane lock, we still have the CRTC's lock, so we know
+ * that no other transactions are racing with us to update it.
*/
- memset(result, 0, sizeof(*result));
+ if (!intel_pstate)
+ intel_pstate = to_intel_plane_state(plane->state);
- for_each_intel_plane_mask(&dev_priv->drm,
- intel_plane,
- cstate->base.plane_mask) {
- int i = skl_wm_plane_id(intel_plane);
-
- plane = &intel_plane->base;
- intel_pstate = NULL;
- if (state)
- intel_pstate =
- intel_atomic_get_existing_plane_state(state,
- intel_plane);
+ WARN_ON(!intel_pstate->base.fb);
- /*
- * Note: If we start supporting multiple pending atomic commits
- * against the same planes/CRTC's in the future, plane->state
- * will no longer be the correct pre-state to use for the
- * calculations here and we'll need to change where we get the
- * 'unchanged' plane data from.
- *
- * For now this is fine because we only allow one queued commit
- * against a CRTC. Even if the plane isn't modified by this
- * transaction and we don't have a plane lock, we still have
- * the CRTC's lock, so we know that no other transactions are
- * racing with us to update it.
- */
- if (!intel_pstate)
- intel_pstate = to_intel_plane_state(plane->state);
+ ddb_blocks = skl_ddb_entry_size(&ddb->plane[pipe][i]);
- WARN_ON(!intel_pstate->base.fb);
-
- ddb_blocks = skl_ddb_entry_size(&ddb->plane[pipe][i]);
-
- ret = skl_compute_plane_wm(dev_priv,
- cstate,
- intel_pstate,
- ddb_blocks,
- level,
- &result->plane_res_b[i],
- &result->plane_res_l[i],
- &result->plane_en[i]);
- if (ret)
- return ret;
- }
+ ret = skl_compute_plane_wm(dev_priv,
+ cstate,
+ intel_pstate,
+ ddb_blocks,
+ level,
+ &result->plane_res_b,
+ &result->plane_res_l,
+ &result->plane_en);
+ if (ret)
+ return ret;
return 0;
}
@@ -3733,32 +3766,28 @@ skl_compute_wm_level(const struct drm_i915_private *dev_priv,
static uint32_t
skl_compute_linetime_wm(struct intel_crtc_state *cstate)
{
+ uint32_t pixel_rate;
+
if (!cstate->base.active)
return 0;
- if (WARN_ON(skl_pipe_pixel_rate(cstate) == 0))
+ pixel_rate = ilk_pipe_pixel_rate(cstate);
+
+ if (WARN_ON(pixel_rate == 0))
return 0;
return DIV_ROUND_UP(8 * cstate->base.adjusted_mode.crtc_htotal * 1000,
- skl_pipe_pixel_rate(cstate));
+ pixel_rate);
}
static void skl_compute_transition_wm(struct intel_crtc_state *cstate,
struct skl_wm_level *trans_wm /* out */)
{
- struct drm_crtc *crtc = cstate->base.crtc;
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- struct intel_plane *intel_plane;
-
if (!cstate->base.active)
return;
/* Until we know more, just disable transition WMs */
- for_each_intel_plane_on_crtc(crtc->dev, intel_crtc, intel_plane) {
- int i = skl_wm_plane_id(intel_plane);
-
- trans_wm->plane_en[i] = false;
- }
+ trans_wm->plane_en = false;
}
static int skl_build_pipe_wm(struct intel_crtc_state *cstate,
@@ -3767,77 +3796,34 @@ static int skl_build_pipe_wm(struct intel_crtc_state *cstate,
{
struct drm_device *dev = cstate->base.crtc->dev;
const struct drm_i915_private *dev_priv = to_i915(dev);
- int level, max_level = ilk_wm_max_level(dev);
+ struct intel_plane *intel_plane;
+ struct skl_plane_wm *wm;
+ int level, max_level = ilk_wm_max_level(dev_priv);
int ret;
- for (level = 0; level <= max_level; level++) {
- ret = skl_compute_wm_level(dev_priv, ddb, cstate,
- level, &pipe_wm->wm[level]);
- if (ret)
- return ret;
- }
- pipe_wm->linetime = skl_compute_linetime_wm(cstate);
-
- skl_compute_transition_wm(cstate, &pipe_wm->trans_wm);
-
- return 0;
-}
-
-static void skl_compute_wm_results(struct drm_device *dev,
- struct skl_pipe_wm *p_wm,
- struct skl_wm_values *r,
- struct intel_crtc *intel_crtc)
-{
- int level, max_level = ilk_wm_max_level(dev);
- enum pipe pipe = intel_crtc->pipe;
- uint32_t temp;
- int i;
-
- for (level = 0; level <= max_level; level++) {
- for (i = 0; i < intel_num_planes(intel_crtc); i++) {
- temp = 0;
-
- temp |= p_wm->wm[level].plane_res_l[i] <<
- PLANE_WM_LINES_SHIFT;
- temp |= p_wm->wm[level].plane_res_b[i];
- if (p_wm->wm[level].plane_en[i])
- temp |= PLANE_WM_EN;
+ /*
+ * We'll only calculate watermarks for planes that are actually
+ * enabled, so make sure all other planes are set as disabled.
+ */
+ memset(pipe_wm->planes, 0, sizeof(pipe_wm->planes));
- r->plane[pipe][i][level] = temp;
+ for_each_intel_plane_mask(&dev_priv->drm,
+ intel_plane,
+ cstate->base.plane_mask) {
+ wm = &pipe_wm->planes[skl_wm_plane_id(intel_plane)];
+
+ for (level = 0; level <= max_level; level++) {
+ ret = skl_compute_wm_level(dev_priv, ddb, cstate,
+ intel_plane, level,
+ &wm->wm[level]);
+ if (ret)
+ return ret;
}
-
- temp = 0;
-
- temp |= p_wm->wm[level].plane_res_l[PLANE_CURSOR] << PLANE_WM_LINES_SHIFT;
- temp |= p_wm->wm[level].plane_res_b[PLANE_CURSOR];
-
- if (p_wm->wm[level].plane_en[PLANE_CURSOR])
- temp |= PLANE_WM_EN;
-
- r->plane[pipe][PLANE_CURSOR][level] = temp;
-
+ skl_compute_transition_wm(cstate, &wm->trans_wm);
}
+ pipe_wm->linetime = skl_compute_linetime_wm(cstate);
- /* transition WMs */
- for (i = 0; i < intel_num_planes(intel_crtc); i++) {
- temp = 0;
- temp |= p_wm->trans_wm.plane_res_l[i] << PLANE_WM_LINES_SHIFT;
- temp |= p_wm->trans_wm.plane_res_b[i];
- if (p_wm->trans_wm.plane_en[i])
- temp |= PLANE_WM_EN;
-
- r->plane_trans[pipe][i] = temp;
- }
-
- temp = 0;
- temp |= p_wm->trans_wm.plane_res_l[PLANE_CURSOR] << PLANE_WM_LINES_SHIFT;
- temp |= p_wm->trans_wm.plane_res_b[PLANE_CURSOR];
- if (p_wm->trans_wm.plane_en[PLANE_CURSOR])
- temp |= PLANE_WM_EN;
-
- r->plane_trans[pipe][PLANE_CURSOR] = temp;
-
- r->wm_linetime[pipe] = p_wm->linetime;
+ return 0;
}
static void skl_ddb_entry_write(struct drm_i915_private *dev_priv,
@@ -3850,53 +3836,77 @@ static void skl_ddb_entry_write(struct drm_i915_private *dev_priv,
I915_WRITE(reg, 0);
}
+static void skl_write_wm_level(struct drm_i915_private *dev_priv,
+ i915_reg_t reg,
+ const struct skl_wm_level *level)
+{
+ uint32_t val = 0;
+
+ if (level->plane_en) {
+ val |= PLANE_WM_EN;
+ val |= level->plane_res_b;
+ val |= level->plane_res_l << PLANE_WM_LINES_SHIFT;
+ }
+
+ I915_WRITE(reg, val);
+}
+
void skl_write_plane_wm(struct intel_crtc *intel_crtc,
- const struct skl_wm_values *wm,
+ const struct skl_plane_wm *wm,
+ const struct skl_ddb_allocation *ddb,
int plane)
{
struct drm_crtc *crtc = &intel_crtc->base;
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = to_i915(dev);
- int level, max_level = ilk_wm_max_level(dev);
+ int level, max_level = ilk_wm_max_level(dev_priv);
enum pipe pipe = intel_crtc->pipe;
for (level = 0; level <= max_level; level++) {
- I915_WRITE(PLANE_WM(pipe, plane, level),
- wm->plane[pipe][plane][level]);
+ skl_write_wm_level(dev_priv, PLANE_WM(pipe, plane, level),
+ &wm->wm[level]);
}
- I915_WRITE(PLANE_WM_TRANS(pipe, plane), wm->plane_trans[pipe][plane]);
+ skl_write_wm_level(dev_priv, PLANE_WM_TRANS(pipe, plane),
+ &wm->trans_wm);
skl_ddb_entry_write(dev_priv, PLANE_BUF_CFG(pipe, plane),
- &wm->ddb.plane[pipe][plane]);
+ &ddb->plane[pipe][plane]);
skl_ddb_entry_write(dev_priv, PLANE_NV12_BUF_CFG(pipe, plane),
- &wm->ddb.y_plane[pipe][plane]);
+ &ddb->y_plane[pipe][plane]);
}
void skl_write_cursor_wm(struct intel_crtc *intel_crtc,
- const struct skl_wm_values *wm)
+ const struct skl_plane_wm *wm,
+ const struct skl_ddb_allocation *ddb)
{
struct drm_crtc *crtc = &intel_crtc->base;
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = to_i915(dev);
- int level, max_level = ilk_wm_max_level(dev);
+ int level, max_level = ilk_wm_max_level(dev_priv);
enum pipe pipe = intel_crtc->pipe;
for (level = 0; level <= max_level; level++) {
- I915_WRITE(CUR_WM(pipe, level),
- wm->plane[pipe][PLANE_CURSOR][level]);
+ skl_write_wm_level(dev_priv, CUR_WM(pipe, level),
+ &wm->wm[level]);
}
- I915_WRITE(CUR_WM_TRANS(pipe), wm->plane_trans[pipe][PLANE_CURSOR]);
+ skl_write_wm_level(dev_priv, CUR_WM_TRANS(pipe), &wm->trans_wm);
skl_ddb_entry_write(dev_priv, CUR_BUF_CFG(pipe),
- &wm->ddb.plane[pipe][PLANE_CURSOR]);
+ &ddb->plane[pipe][PLANE_CURSOR]);
}
-bool skl_ddb_allocation_equals(const struct skl_ddb_allocation *old,
- const struct skl_ddb_allocation *new,
- enum pipe pipe)
+bool skl_wm_level_equals(const struct skl_wm_level *l1,
+ const struct skl_wm_level *l2)
{
- return new->pipe[pipe].start == old->pipe[pipe].start &&
- new->pipe[pipe].end == old->pipe[pipe].end;
+ if (l1->plane_en != l2->plane_en)
+ return false;
+
+ /* If both planes aren't enabled, the rest shouldn't matter */
+ if (!l1->plane_en)
+ return true;
+
+ return (l1->plane_res_l == l2->plane_res_l &&
+ l1->plane_res_b == l2->plane_res_b);
}
static inline bool skl_ddb_entries_overlap(const struct skl_ddb_entry *a,
@@ -3905,35 +3915,26 @@ static inline bool skl_ddb_entries_overlap(const struct skl_ddb_entry *a,
return a->start < b->end && b->start < a->end;
}
-bool skl_ddb_allocation_overlaps(struct drm_atomic_state *state,
- const struct skl_ddb_allocation *old,
- const struct skl_ddb_allocation *new,
- enum pipe pipe)
+bool skl_ddb_allocation_overlaps(const struct skl_ddb_entry **entries,
+ const struct skl_ddb_entry *ddb,
+ int ignore)
{
- struct drm_device *dev = state->dev;
- struct intel_crtc *intel_crtc;
- enum pipe otherp;
-
- for_each_intel_crtc(dev, intel_crtc) {
- otherp = intel_crtc->pipe;
-
- if (otherp == pipe)
- continue;
+ int i;
- if (skl_ddb_entries_overlap(&new->pipe[pipe],
- &old->pipe[otherp]))
+ for (i = 0; i < I915_MAX_PIPES; i++)
+ if (i != ignore && entries[i] &&
+ skl_ddb_entries_overlap(ddb, entries[i]))
return true;
- }
return false;
}
static int skl_update_pipe_wm(struct drm_crtc_state *cstate,
- struct skl_ddb_allocation *ddb, /* out */
+ const struct skl_pipe_wm *old_pipe_wm,
struct skl_pipe_wm *pipe_wm, /* out */
+ struct skl_ddb_allocation *ddb, /* out */
bool *changed /* out */)
{
- struct intel_crtc *intel_crtc = to_intel_crtc(cstate->crtc);
struct intel_crtc_state *intel_cstate = to_intel_crtc_state(cstate);
int ret;
@@ -3941,7 +3942,7 @@ static int skl_update_pipe_wm(struct drm_crtc_state *cstate,
if (ret)
return ret;
- if (!memcmp(&intel_crtc->wm.active.skl, pipe_wm, sizeof(*pipe_wm)))
+ if (!memcmp(old_pipe_wm, pipe_wm, sizeof(*pipe_wm)))
*changed = false;
else
*changed = true;
@@ -3962,7 +3963,7 @@ pipes_modified(struct drm_atomic_state *state)
return ret;
}
-int
+static int
skl_ddb_add_affected_planes(struct intel_crtc_state *cstate)
{
struct drm_atomic_state *state = cstate->base.state;
@@ -3980,7 +3981,7 @@ skl_ddb_add_affected_planes(struct intel_crtc_state *cstate)
WARN_ON(!drm_atomic_get_existing_crtc_state(state, crtc));
- drm_for_each_plane_mask(plane, dev, crtc->state->plane_mask) {
+ drm_for_each_plane_mask(plane, dev, cstate->base.plane_mask) {
id = skl_wm_plane_id(to_intel_plane(plane));
if (skl_ddb_entry_equal(&cur_ddb->plane[pipe][id],
@@ -4050,6 +4051,12 @@ skl_compute_ddb(struct drm_atomic_state *state)
intel_state->wm_results.dirty_pipes = ~0;
}
+ /*
+ * We're not recomputing for the pipes not included in the commit, so
+ * make sure we start with the current state.
+ */
+ memcpy(ddb, &dev_priv->wm.skl_hw.ddb, sizeof(*ddb));
+
for_each_intel_crtc_mask(dev, intel_crtc, realloc_pipes) {
struct intel_crtc_state *cstate;
@@ -4074,19 +4081,50 @@ skl_copy_wm_for_pipe(struct skl_wm_values *dst,
struct skl_wm_values *src,
enum pipe pipe)
{
- dst->wm_linetime[pipe] = src->wm_linetime[pipe];
- memcpy(dst->plane[pipe], src->plane[pipe],
- sizeof(dst->plane[pipe]));
- memcpy(dst->plane_trans[pipe], src->plane_trans[pipe],
- sizeof(dst->plane_trans[pipe]));
-
- dst->ddb.pipe[pipe] = src->ddb.pipe[pipe];
memcpy(dst->ddb.y_plane[pipe], src->ddb.y_plane[pipe],
sizeof(dst->ddb.y_plane[pipe]));
memcpy(dst->ddb.plane[pipe], src->ddb.plane[pipe],
sizeof(dst->ddb.plane[pipe]));
}
+static void
+skl_print_wm_changes(const struct drm_atomic_state *state)
+{
+ const struct drm_device *dev = state->dev;
+ const struct drm_i915_private *dev_priv = to_i915(dev);
+ const struct intel_atomic_state *intel_state =
+ to_intel_atomic_state(state);
+ const struct drm_crtc *crtc;
+ const struct drm_crtc_state *cstate;
+ const struct intel_plane *intel_plane;
+ const struct skl_ddb_allocation *old_ddb = &dev_priv->wm.skl_hw.ddb;
+ const struct skl_ddb_allocation *new_ddb = &intel_state->wm_results.ddb;
+ int id;
+ int i;
+
+ for_each_crtc_in_state(state, crtc, cstate, i) {
+ const struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ enum pipe pipe = intel_crtc->pipe;
+
+ for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) {
+ const struct skl_ddb_entry *old, *new;
+
+ id = skl_wm_plane_id(intel_plane);
+ old = &old_ddb->plane[pipe][id];
+ new = &new_ddb->plane[pipe][id];
+
+ if (skl_ddb_entry_equal(old, new))
+ continue;
+
+ DRM_DEBUG_ATOMIC("[PLANE:%d:%s] ddb (%d - %d) -> (%d - %d)\n",
+ intel_plane->base.base.id,
+ intel_plane->base.name,
+ old->start, old->end,
+ new->start, new->end);
+ }
+ }
+}
+
static int
skl_compute_wm(struct drm_atomic_state *state)
{
@@ -4129,13 +4167,14 @@ skl_compute_wm(struct drm_atomic_state *state)
* no suitable watermark values can be found.
*/
for_each_crtc_in_state(state, crtc, cstate, i) {
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct intel_crtc_state *intel_cstate =
to_intel_crtc_state(cstate);
+ const struct skl_pipe_wm *old_pipe_wm =
+ &to_intel_crtc_state(crtc->state)->wm.skl.optimal;
pipe_wm = &intel_cstate->wm.skl.optimal;
- ret = skl_update_pipe_wm(cstate, &results->ddb, pipe_wm,
- &changed);
+ ret = skl_update_pipe_wm(cstate, old_pipe_wm, pipe_wm,
+ &results->ddb, &changed);
if (ret)
return ret;
@@ -4147,44 +4186,51 @@ skl_compute_wm(struct drm_atomic_state *state)
continue;
intel_cstate->update_wm_pre = true;
- skl_compute_wm_results(crtc->dev, pipe_wm, results, intel_crtc);
}
+ skl_print_wm_changes(state);
+
return 0;
}
-static void skl_update_wm(struct drm_crtc *crtc)
+static void skl_atomic_update_crtc_wm(struct intel_atomic_state *state,
+ struct intel_crtc_state *cstate)
{
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- struct drm_device *dev = crtc->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
- struct skl_wm_values *results = &dev_priv->wm.skl_results;
- struct skl_wm_values *hw_vals = &dev_priv->wm.skl_hw;
- struct intel_crtc_state *cstate = to_intel_crtc_state(crtc->state);
+ struct intel_crtc *crtc = to_intel_crtc(cstate->base.crtc);
+ struct drm_i915_private *dev_priv = to_i915(state->base.dev);
struct skl_pipe_wm *pipe_wm = &cstate->wm.skl.optimal;
- enum pipe pipe = intel_crtc->pipe;
+ const struct skl_ddb_allocation *ddb = &state->wm_results.ddb;
+ enum pipe pipe = crtc->pipe;
+ int plane;
- if ((results->dirty_pipes & drm_crtc_mask(crtc)) == 0)
+ if (!(state->wm_results.dirty_pipes & drm_crtc_mask(&crtc->base)))
return;
- intel_crtc->wm.active.skl = *pipe_wm;
+ I915_WRITE(PIPE_WM_LINETIME(pipe), pipe_wm->linetime);
- mutex_lock(&dev_priv->wm.wm_mutex);
+ for_each_universal_plane(dev_priv, pipe, plane)
+ skl_write_plane_wm(crtc, &pipe_wm->planes[plane], ddb, plane);
- /*
- * If this pipe isn't active already, we're going to be enabling it
- * very soon. Since it's safe to update a pipe's ddb allocation while
- * the pipe's shut off, just do so here. Already active pipes will have
- * their watermarks updated once we update their planes.
- */
- if (crtc->state->active_changed) {
- int plane;
+ skl_write_cursor_wm(crtc, &pipe_wm->planes[PLANE_CURSOR], ddb);
+}
+
+static void skl_initial_wm(struct intel_atomic_state *state,
+ struct intel_crtc_state *cstate)
+{
+ struct intel_crtc *intel_crtc = to_intel_crtc(cstate->base.crtc);
+ struct drm_device *dev = intel_crtc->base.dev;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ struct skl_wm_values *results = &state->wm_results;
+ struct skl_wm_values *hw_vals = &dev_priv->wm.skl_hw;
+ enum pipe pipe = intel_crtc->pipe;
- for (plane = 0; plane < intel_num_planes(intel_crtc); plane++)
- skl_write_plane_wm(intel_crtc, results, plane);
+ if ((results->dirty_pipes & drm_crtc_mask(&intel_crtc->base)) == 0)
+ return;
- skl_write_cursor_wm(intel_crtc, results);
- }
+ mutex_lock(&dev_priv->wm.wm_mutex);
+
+ if (cstate->base.active_changed)
+ skl_atomic_update_crtc_wm(state, cstate);
skl_copy_wm_for_pipe(hw_vals, results, pipe);
@@ -4224,7 +4270,7 @@ static void ilk_program_watermarks(struct drm_i915_private *dev_priv)
ilk_wm_merge(dev, &config, &max, &lp_wm_1_2);
/* 5/6 split only in single pipe config on IVB+ */
- if (INTEL_INFO(dev)->gen >= 7 &&
+ if (INTEL_GEN(dev_priv) >= 7 &&
config.num_pipes_active == 1 && config.sprites_enabled) {
ilk_compute_wm_maximums(dev, 1, &config, INTEL_DDB_PART_5_6, &max);
ilk_wm_merge(dev, &config, &max, &lp_wm_5_6);
@@ -4242,7 +4288,8 @@ static void ilk_program_watermarks(struct drm_i915_private *dev_priv)
ilk_write_wm_values(dev_priv, &results);
}
-static void ilk_initial_watermarks(struct intel_crtc_state *cstate)
+static void ilk_initial_watermarks(struct intel_atomic_state *state,
+ struct intel_crtc_state *cstate)
{
struct drm_i915_private *dev_priv = to_i915(cstate->base.crtc->dev);
struct intel_crtc *intel_crtc = to_intel_crtc(cstate->base.crtc);
@@ -4253,7 +4300,8 @@ static void ilk_initial_watermarks(struct intel_crtc_state *cstate)
mutex_unlock(&dev_priv->wm.wm_mutex);
}
-static void ilk_optimize_watermarks(struct intel_crtc_state *cstate)
+static void ilk_optimize_watermarks(struct intel_atomic_state *state,
+ struct intel_crtc_state *cstate)
{
struct drm_i915_private *dev_priv = to_i915(cstate->base.crtc->dev);
struct intel_crtc *intel_crtc = to_intel_crtc(cstate->base.crtc);
@@ -4266,114 +4314,75 @@ static void ilk_optimize_watermarks(struct intel_crtc_state *cstate)
mutex_unlock(&dev_priv->wm.wm_mutex);
}
-static void skl_pipe_wm_active_state(uint32_t val,
- struct skl_pipe_wm *active,
- bool is_transwm,
- bool is_cursor,
- int i,
- int level)
+static inline void skl_wm_level_from_reg_val(uint32_t val,
+ struct skl_wm_level *level)
{
- bool is_enabled = (val & PLANE_WM_EN) != 0;
-
- if (!is_transwm) {
- if (!is_cursor) {
- active->wm[level].plane_en[i] = is_enabled;
- active->wm[level].plane_res_b[i] =
- val & PLANE_WM_BLOCKS_MASK;
- active->wm[level].plane_res_l[i] =
- (val >> PLANE_WM_LINES_SHIFT) &
- PLANE_WM_LINES_MASK;
- } else {
- active->wm[level].plane_en[PLANE_CURSOR] = is_enabled;
- active->wm[level].plane_res_b[PLANE_CURSOR] =
- val & PLANE_WM_BLOCKS_MASK;
- active->wm[level].plane_res_l[PLANE_CURSOR] =
- (val >> PLANE_WM_LINES_SHIFT) &
- PLANE_WM_LINES_MASK;
- }
- } else {
- if (!is_cursor) {
- active->trans_wm.plane_en[i] = is_enabled;
- active->trans_wm.plane_res_b[i] =
- val & PLANE_WM_BLOCKS_MASK;
- active->trans_wm.plane_res_l[i] =
- (val >> PLANE_WM_LINES_SHIFT) &
- PLANE_WM_LINES_MASK;
- } else {
- active->trans_wm.plane_en[PLANE_CURSOR] = is_enabled;
- active->trans_wm.plane_res_b[PLANE_CURSOR] =
- val & PLANE_WM_BLOCKS_MASK;
- active->trans_wm.plane_res_l[PLANE_CURSOR] =
- (val >> PLANE_WM_LINES_SHIFT) &
- PLANE_WM_LINES_MASK;
- }
- }
+ level->plane_en = val & PLANE_WM_EN;
+ level->plane_res_b = val & PLANE_WM_BLOCKS_MASK;
+ level->plane_res_l = (val >> PLANE_WM_LINES_SHIFT) &
+ PLANE_WM_LINES_MASK;
}
-static void skl_pipe_wm_get_hw_state(struct drm_crtc *crtc)
+void skl_pipe_wm_get_hw_state(struct drm_crtc *crtc,
+ struct skl_pipe_wm *out)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = to_i915(dev);
- struct skl_wm_values *hw = &dev_priv->wm.skl_hw;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- struct intel_crtc_state *cstate = to_intel_crtc_state(crtc->state);
- struct skl_pipe_wm *active = &cstate->wm.skl.optimal;
+ struct intel_plane *intel_plane;
+ struct skl_plane_wm *wm;
enum pipe pipe = intel_crtc->pipe;
- int level, i, max_level;
- uint32_t temp;
-
- max_level = ilk_wm_max_level(dev);
-
- hw->wm_linetime[pipe] = I915_READ(PIPE_WM_LINETIME(pipe));
-
- for (level = 0; level <= max_level; level++) {
- for (i = 0; i < intel_num_planes(intel_crtc); i++)
- hw->plane[pipe][i][level] =
- I915_READ(PLANE_WM(pipe, i, level));
- hw->plane[pipe][PLANE_CURSOR][level] = I915_READ(CUR_WM(pipe, level));
- }
-
- for (i = 0; i < intel_num_planes(intel_crtc); i++)
- hw->plane_trans[pipe][i] = I915_READ(PLANE_WM_TRANS(pipe, i));
- hw->plane_trans[pipe][PLANE_CURSOR] = I915_READ(CUR_WM_TRANS(pipe));
+ int level, id, max_level;
+ uint32_t val;
- if (!intel_crtc->active)
- return;
+ max_level = ilk_wm_max_level(dev_priv);
- hw->dirty_pipes |= drm_crtc_mask(crtc);
+ for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) {
+ id = skl_wm_plane_id(intel_plane);
+ wm = &out->planes[id];
- active->linetime = hw->wm_linetime[pipe];
+ for (level = 0; level <= max_level; level++) {
+ if (id != PLANE_CURSOR)
+ val = I915_READ(PLANE_WM(pipe, id, level));
+ else
+ val = I915_READ(CUR_WM(pipe, level));
- for (level = 0; level <= max_level; level++) {
- for (i = 0; i < intel_num_planes(intel_crtc); i++) {
- temp = hw->plane[pipe][i][level];
- skl_pipe_wm_active_state(temp, active, false,
- false, i, level);
+ skl_wm_level_from_reg_val(val, &wm->wm[level]);
}
- temp = hw->plane[pipe][PLANE_CURSOR][level];
- skl_pipe_wm_active_state(temp, active, false, true, i, level);
- }
- for (i = 0; i < intel_num_planes(intel_crtc); i++) {
- temp = hw->plane_trans[pipe][i];
- skl_pipe_wm_active_state(temp, active, true, false, i, 0);
+ if (id != PLANE_CURSOR)
+ val = I915_READ(PLANE_WM_TRANS(pipe, id));
+ else
+ val = I915_READ(CUR_WM_TRANS(pipe));
+
+ skl_wm_level_from_reg_val(val, &wm->trans_wm);
}
- temp = hw->plane_trans[pipe][PLANE_CURSOR];
- skl_pipe_wm_active_state(temp, active, true, true, i, 0);
+ if (!intel_crtc->active)
+ return;
- intel_crtc->wm.active.skl = *active;
+ out->linetime = I915_READ(PIPE_WM_LINETIME(pipe));
}
void skl_wm_get_hw_state(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = to_i915(dev);
+ struct skl_wm_values *hw = &dev_priv->wm.skl_hw;
struct skl_ddb_allocation *ddb = &dev_priv->wm.skl_hw.ddb;
struct drm_crtc *crtc;
+ struct intel_crtc *intel_crtc;
+ struct intel_crtc_state *cstate;
skl_ddb_get_hw_state(dev_priv, ddb);
- list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
- skl_pipe_wm_get_hw_state(crtc);
+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+ intel_crtc = to_intel_crtc(crtc);
+ cstate = to_intel_crtc_state(crtc->state);
+
+ skl_pipe_wm_get_hw_state(crtc, &cstate->wm.skl.optimal);
+
+ if (intel_crtc->active)
+ hw->dirty_pipes |= drm_crtc_mask(crtc);
+ }
if (dev_priv->active_crtcs) {
/* Fully recompute DDB on first atomic commit */
@@ -4400,7 +4409,7 @@ static void ilk_pipe_wm_get_hw_state(struct drm_crtc *crtc)
};
hw->wm_pipe[pipe] = I915_READ(wm0_pipe_reg[pipe]);
- if (IS_HASWELL(dev) || IS_BROADWELL(dev))
+ if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
hw->wm_linetime[pipe] = I915_READ(PIPE_WM_LINETIME(pipe));
memset(active, 0, sizeof(*active));
@@ -4422,7 +4431,7 @@ static void ilk_pipe_wm_get_hw_state(struct drm_crtc *crtc)
active->wm[0].cur_val = tmp & WM0_PIPE_CURSOR_MASK;
active->linetime = hw->wm_linetime[pipe];
} else {
- int level, max_level = ilk_wm_max_level(dev);
+ int level, max_level = ilk_wm_max_level(dev_priv);
/*
* For inactive pipes, all watermark levels
@@ -4534,11 +4543,11 @@ void vlv_wm_get_hw_state(struct drm_device *dev)
plane->wm.fifo_size = 63;
break;
case DRM_PLANE_TYPE_PRIMARY:
- plane->wm.fifo_size = vlv_get_fifo_size(dev, plane->pipe, 0);
+ plane->wm.fifo_size = vlv_get_fifo_size(dev_priv, plane->pipe, 0);
break;
case DRM_PLANE_TYPE_OVERLAY:
sprite = plane->plane;
- plane->wm.fifo_size = vlv_get_fifo_size(dev, plane->pipe, sprite + 1);
+ plane->wm.fifo_size = vlv_get_fifo_size(dev_priv, plane->pipe, sprite + 1);
break;
}
}
@@ -4603,15 +4612,15 @@ void ilk_wm_get_hw_state(struct drm_device *dev)
hw->wm_lp[2] = I915_READ(WM3_LP_ILK);
hw->wm_lp_spr[0] = I915_READ(WM1S_LP_ILK);
- if (INTEL_INFO(dev)->gen >= 7) {
+ if (INTEL_GEN(dev_priv) >= 7) {
hw->wm_lp_spr[1] = I915_READ(WM2S_LP_IVB);
hw->wm_lp_spr[2] = I915_READ(WM3S_LP_IVB);
}
- if (IS_HASWELL(dev) || IS_BROADWELL(dev))
+ if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
hw->partitioning = (I915_READ(WM_MISC) & WM_MISC_DATA_PARTITION_5_6) ?
INTEL_DDB_PART_5_6 : INTEL_DDB_PART_1_2;
- else if (IS_IVYBRIDGE(dev))
+ else if (IS_IVYBRIDGE(dev_priv))
hw->partitioning = (I915_READ(DISP_ARB_CTL2) & DISP_DATA_PARTITION_5_6) ?
INTEL_DDB_PART_5_6 : INTEL_DDB_PART_1_2;
@@ -4651,9 +4660,9 @@ void ilk_wm_get_hw_state(struct drm_device *dev)
* We don't use the sprite, so we can ignore that. And on Crestline we have
* to set the non-SR watermarks to 8.
*/
-void intel_update_watermarks(struct drm_crtc *crtc)
+void intel_update_watermarks(struct intel_crtc *crtc)
{
- struct drm_i915_private *dev_priv = to_i915(crtc->dev);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
if (dev_priv->display.update_wm)
dev_priv->display.update_wm(crtc);
@@ -5355,6 +5364,7 @@ static void gen9_enable_rps(struct drm_i915_private *dev_priv)
static void gen9_enable_rc6(struct drm_i915_private *dev_priv)
{
struct intel_engine_cs *engine;
+ enum intel_engine_id id;
uint32_t rc6_mask = 0;
/* 1a: Software RC state - RC0 */
@@ -5376,7 +5386,7 @@ static void gen9_enable_rc6(struct drm_i915_private *dev_priv)
I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 54 << 16);
I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000); /* 12500 * 1280ns */
I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25); /* 25 * 1280ns */
- for_each_engine(engine, dev_priv)
+ for_each_engine(engine, dev_priv, id)
I915_WRITE(RING_MAX_IDLE(engine->mmio_base), 10);
if (HAS_GUC(dev_priv))
@@ -5392,9 +5402,8 @@ static void gen9_enable_rc6(struct drm_i915_private *dev_priv)
if (intel_enable_rc6() & INTEL_RC6_ENABLE)
rc6_mask = GEN6_RC_CTL_RC6_ENABLE;
DRM_INFO("RC6 %s\n", onoff(rc6_mask & GEN6_RC_CTL_RC6_ENABLE));
- /* WaRsUseTimeoutMode */
- if (IS_SKL_REVID(dev_priv, 0, SKL_REVID_D0) ||
- IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1)) {
+ /* WaRsUseTimeoutMode:bxt */
+ if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1)) {
I915_WRITE(GEN6_RC6_THRESHOLD, 625); /* 800us */
I915_WRITE(GEN6_RC_CONTROL, GEN6_RC_CTL_HW_ENABLE |
GEN7_RC_CTL_TO_MODE |
@@ -5422,6 +5431,7 @@ static void gen9_enable_rc6(struct drm_i915_private *dev_priv)
static void gen8_enable_rps(struct drm_i915_private *dev_priv)
{
struct intel_engine_cs *engine;
+ enum intel_engine_id id;
uint32_t rc6_mask = 0;
/* 1a: Software RC state - RC0 */
@@ -5438,7 +5448,7 @@ static void gen8_enable_rps(struct drm_i915_private *dev_priv)
I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 40 << 16);
I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000); /* 12500 * 1280ns */
I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25); /* 25 * 1280ns */
- for_each_engine(engine, dev_priv)
+ for_each_engine(engine, dev_priv, id)
I915_WRITE(RING_MAX_IDLE(engine->mmio_base), 10);
I915_WRITE(GEN6_RC_SLEEP, 0);
if (IS_BROADWELL(dev_priv))
@@ -5498,6 +5508,7 @@ static void gen8_enable_rps(struct drm_i915_private *dev_priv)
static void gen6_enable_rps(struct drm_i915_private *dev_priv)
{
struct intel_engine_cs *engine;
+ enum intel_engine_id id;
u32 rc6vids, rc6_mask = 0;
u32 gtfifodbg;
int rc6_mode;
@@ -5531,7 +5542,7 @@ static void gen6_enable_rps(struct drm_i915_private *dev_priv)
I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000);
I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25);
- for_each_engine(engine, dev_priv)
+ for_each_engine(engine, dev_priv, id)
I915_WRITE(RING_MAX_IDLE(engine->mmio_base), 10);
I915_WRITE(GEN6_RC_SLEEP, 0);
@@ -5568,10 +5579,6 @@ static void gen6_enable_rps(struct drm_i915_private *dev_priv)
I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 50000);
I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
- ret = sandybridge_pcode_write(dev_priv, GEN6_PCODE_WRITE_MIN_FREQ_TABLE, 0);
- if (ret)
- DRM_DEBUG_DRIVER("Failed to set the min frequency\n");
-
reset_rps(dev_priv, gen6_set_rps);
rc6vids = 0;
@@ -5861,7 +5868,7 @@ static void valleyview_cleanup_pctx(struct drm_i915_private *dev_priv)
if (WARN_ON(!dev_priv->vlv_pctx))
return;
- i915_gem_object_put_unlocked(dev_priv->vlv_pctx);
+ i915_gem_object_put(dev_priv->vlv_pctx);
dev_priv->vlv_pctx = NULL;
}
@@ -5980,6 +5987,7 @@ static void valleyview_cleanup_gt_powersave(struct drm_i915_private *dev_priv)
static void cherryview_enable_rps(struct drm_i915_private *dev_priv)
{
struct intel_engine_cs *engine;
+ enum intel_engine_id id;
u32 gtfifodbg, val, rc6_mode = 0, pcbr;
WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
@@ -6006,7 +6014,7 @@ static void cherryview_enable_rps(struct drm_i915_private *dev_priv)
I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000); /* 12500 * 1280ns */
I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25); /* 25 * 1280ns */
- for_each_engine(engine, dev_priv)
+ for_each_engine(engine, dev_priv, id)
I915_WRITE(RING_MAX_IDLE(engine->mmio_base), 10);
I915_WRITE(GEN6_RC_SLEEP, 0);
@@ -6068,6 +6076,7 @@ static void cherryview_enable_rps(struct drm_i915_private *dev_priv)
static void valleyview_enable_rps(struct drm_i915_private *dev_priv)
{
struct intel_engine_cs *engine;
+ enum intel_engine_id id;
u32 gtfifodbg, val, rc6_mode = 0;
WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
@@ -6107,7 +6116,7 @@ static void valleyview_enable_rps(struct drm_i915_private *dev_priv)
I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000);
I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25);
- for_each_engine(engine, dev_priv)
+ for_each_engine(engine, dev_priv, id)
I915_WRITE(RING_MAX_IDLE(engine->mmio_base), 10);
I915_WRITE(GEN6_RC6_THRESHOLD, 0x557);
@@ -6790,7 +6799,7 @@ static void __intel_autoenable_gt_powersave(struct work_struct *work)
if (READ_ONCE(dev_priv->rps.enabled))
goto out;
- rcs = &dev_priv->engine[RCS];
+ rcs = dev_priv->engine[RCS];
if (rcs->last_context)
goto out;
@@ -6843,10 +6852,8 @@ void intel_autoenable_gt_powersave(struct drm_i915_private *dev_priv)
}
}
-static void ibx_init_clock_gating(struct drm_device *dev)
+static void ibx_init_clock_gating(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
-
/*
* On Ibex Peak and Cougar Point, we need to disable clock
* gating for the panel power sequencer or it will fail to
@@ -6855,9 +6862,8 @@ static void ibx_init_clock_gating(struct drm_device *dev)
I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE);
}
-static void g4x_disable_trickle_feed(struct drm_device *dev)
+static void g4x_disable_trickle_feed(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
enum pipe pipe;
for_each_pipe(dev_priv, pipe) {
@@ -6870,10 +6876,8 @@ static void g4x_disable_trickle_feed(struct drm_device *dev)
}
}
-static void ilk_init_lp_watermarks(struct drm_device *dev)
+static void ilk_init_lp_watermarks(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
-
I915_WRITE(WM3_LP_ILK, I915_READ(WM3_LP_ILK) & ~WM1_LP_SR_EN);
I915_WRITE(WM2_LP_ILK, I915_READ(WM2_LP_ILK) & ~WM1_LP_SR_EN);
I915_WRITE(WM1_LP_ILK, I915_READ(WM1_LP_ILK) & ~WM1_LP_SR_EN);
@@ -6884,9 +6888,8 @@ static void ilk_init_lp_watermarks(struct drm_device *dev)
*/
}
-static void ironlake_init_clock_gating(struct drm_device *dev)
+static void ironlake_init_clock_gating(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
uint32_t dspclk_gate = ILK_VRHUNIT_CLOCK_GATE_DISABLE;
/*
@@ -6918,7 +6921,7 @@ static void ironlake_init_clock_gating(struct drm_device *dev)
(I915_READ(DISP_ARB_CTL) |
DISP_FBC_WM_DIS));
- ilk_init_lp_watermarks(dev);
+ ilk_init_lp_watermarks(dev_priv);
/*
* Based on the document from hardware guys the following bits
@@ -6927,7 +6930,7 @@ static void ironlake_init_clock_gating(struct drm_device *dev)
* The bit 22 of 0x42004
* The bit 7,8,9 of 0x42020.
*/
- if (IS_IRONLAKE_M(dev)) {
+ if (IS_IRONLAKE_M(dev_priv)) {
/* WaFbcAsynchFlipDisableFbcQueue:ilk */
I915_WRITE(ILK_DISPLAY_CHICKEN1,
I915_READ(ILK_DISPLAY_CHICKEN1) |
@@ -6953,14 +6956,13 @@ static void ironlake_init_clock_gating(struct drm_device *dev)
/* WaDisable_RenderCache_OperationalFlush:ilk */
I915_WRITE(CACHE_MODE_0, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
- g4x_disable_trickle_feed(dev);
+ g4x_disable_trickle_feed(dev_priv);
- ibx_init_clock_gating(dev);
+ ibx_init_clock_gating(dev_priv);
}
-static void cpt_init_clock_gating(struct drm_device *dev)
+static void cpt_init_clock_gating(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
int pipe;
uint32_t val;
@@ -6995,9 +6997,8 @@ static void cpt_init_clock_gating(struct drm_device *dev)
}
}
-static void gen6_check_mch_setup(struct drm_device *dev)
+static void gen6_check_mch_setup(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
uint32_t tmp;
tmp = I915_READ(MCH_SSKPD);
@@ -7006,9 +7007,8 @@ static void gen6_check_mch_setup(struct drm_device *dev)
tmp);
}
-static void gen6_init_clock_gating(struct drm_device *dev)
+static void gen6_init_clock_gating(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
uint32_t dspclk_gate = ILK_VRHUNIT_CLOCK_GATE_DISABLE;
I915_WRITE(ILK_DSPCLK_GATE_D, dspclk_gate);
@@ -7035,7 +7035,7 @@ static void gen6_init_clock_gating(struct drm_device *dev)
I915_WRITE(GEN6_GT_MODE,
_MASKED_FIELD(GEN6_WIZ_HASHING_MASK, GEN6_WIZ_HASHING_16x4));
- ilk_init_lp_watermarks(dev);
+ ilk_init_lp_watermarks(dev_priv);
I915_WRITE(CACHE_MODE_0,
_MASKED_BIT_DISABLE(CM0_STC_EVICT_DISABLE_LRA_SNB));
@@ -7096,11 +7096,11 @@ static void gen6_init_clock_gating(struct drm_device *dev)
ILK_DPARBUNIT_CLOCK_GATE_ENABLE |
ILK_DPFDUNIT_CLOCK_GATE_ENABLE);
- g4x_disable_trickle_feed(dev);
+ g4x_disable_trickle_feed(dev_priv);
- cpt_init_clock_gating(dev);
+ cpt_init_clock_gating(dev_priv);
- gen6_check_mch_setup(dev);
+ gen6_check_mch_setup(dev_priv);
}
static void gen7_setup_fixed_func_scheduler(struct drm_i915_private *dev_priv)
@@ -7121,15 +7121,13 @@ static void gen7_setup_fixed_func_scheduler(struct drm_i915_private *dev_priv)
I915_WRITE(GEN7_FF_THREAD_MODE, reg);
}
-static void lpt_init_clock_gating(struct drm_device *dev)
+static void lpt_init_clock_gating(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
-
/*
* TODO: this bit should only be enabled when really needed, then
* disabled when not needed anymore in order to save power.
*/
- if (HAS_PCH_LPT_LP(dev))
+ if (HAS_PCH_LPT_LP(dev_priv))
I915_WRITE(SOUTH_DSPCLK_GATE_D,
I915_READ(SOUTH_DSPCLK_GATE_D) |
PCH_LP_PARTITION_LEVEL_DISABLE);
@@ -7140,11 +7138,9 @@ static void lpt_init_clock_gating(struct drm_device *dev)
TRANS_CHICKEN1_DP0UNIT_GC_DISABLE);
}
-static void lpt_suspend_hw(struct drm_device *dev)
+static void lpt_suspend_hw(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
-
- if (HAS_PCH_LPT_LP(dev)) {
+ if (HAS_PCH_LPT_LP(dev_priv)) {
uint32_t val = I915_READ(SOUTH_DSPCLK_GATE_D);
val &= ~PCH_LP_PARTITION_LEVEL_DISABLE;
@@ -7175,11 +7171,9 @@ static void gen8_set_l3sqc_credits(struct drm_i915_private *dev_priv,
I915_WRITE(GEN7_MISCCPCTL, misccpctl);
}
-static void kabylake_init_clock_gating(struct drm_device *dev)
+static void kabylake_init_clock_gating(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
-
- gen9_init_clock_gating(dev);
+ gen9_init_clock_gating(dev_priv);
/* WaDisableSDEUnitClockGating:kbl */
if (IS_KBL_REVID(dev_priv, 0, KBL_REVID_B0))
@@ -7196,11 +7190,9 @@ static void kabylake_init_clock_gating(struct drm_device *dev)
ILK_DPFC_NUKE_ON_ANY_MODIFICATION);
}
-static void skylake_init_clock_gating(struct drm_device *dev)
+static void skylake_init_clock_gating(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
-
- gen9_init_clock_gating(dev);
+ gen9_init_clock_gating(dev_priv);
/* WAC6entrylatency:skl */
I915_WRITE(FBC_LLC_READ_CTRL, I915_READ(FBC_LLC_READ_CTRL) |
@@ -7211,12 +7203,11 @@ static void skylake_init_clock_gating(struct drm_device *dev)
ILK_DPFC_NUKE_ON_ANY_MODIFICATION);
}
-static void broadwell_init_clock_gating(struct drm_device *dev)
+static void broadwell_init_clock_gating(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
enum pipe pipe;
- ilk_init_lp_watermarks(dev);
+ ilk_init_lp_watermarks(dev_priv);
/* WaSwitchSolVfFArbitrationPriority:bdw */
I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) | HSW_ECOCHK_ARB_PRIO_SOL);
@@ -7259,14 +7250,12 @@ static void broadwell_init_clock_gating(struct drm_device *dev)
I915_WRITE(CHICKEN_PAR2_1, I915_READ(CHICKEN_PAR2_1)
| KVM_CONFIG_CHANGE_NOTIFICATION_SELECT);
- lpt_init_clock_gating(dev);
+ lpt_init_clock_gating(dev_priv);
}
-static void haswell_init_clock_gating(struct drm_device *dev)
+static void haswell_init_clock_gating(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
-
- ilk_init_lp_watermarks(dev);
+ ilk_init_lp_watermarks(dev_priv);
/* L3 caching of data atomics doesn't work -- disable it. */
I915_WRITE(HSW_SCRATCH1, HSW_SCRATCH1_L3_DATA_ATOMICS_DISABLE);
@@ -7315,15 +7304,14 @@ static void haswell_init_clock_gating(struct drm_device *dev)
I915_WRITE(CHICKEN_PAR1_1,
I915_READ(CHICKEN_PAR1_1) | FORCE_ARB_IDLE_PLANES);
- lpt_init_clock_gating(dev);
+ lpt_init_clock_gating(dev_priv);
}
-static void ivybridge_init_clock_gating(struct drm_device *dev)
+static void ivybridge_init_clock_gating(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
uint32_t snpcr;
- ilk_init_lp_watermarks(dev);
+ ilk_init_lp_watermarks(dev_priv);
I915_WRITE(ILK_DSPCLK_GATE_D, ILK_VRHUNIT_CLOCK_GATE_DISABLE);
@@ -7337,7 +7325,7 @@ static void ivybridge_init_clock_gating(struct drm_device *dev)
CHICKEN3_DGMG_DONE_FIX_DISABLE);
/* WaDisablePSDDualDispatchEnable:ivb */
- if (IS_IVB_GT1(dev))
+ if (IS_IVB_GT1(dev_priv))
I915_WRITE(GEN7_HALF_SLICE_CHICKEN1,
_MASKED_BIT_ENABLE(GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE));
@@ -7353,7 +7341,7 @@ static void ivybridge_init_clock_gating(struct drm_device *dev)
GEN7_WA_FOR_GEN7_L3_CONTROL);
I915_WRITE(GEN7_L3_CHICKEN_MODE_REGISTER,
GEN7_WA_L3_CHICKEN_MODE);
- if (IS_IVB_GT1(dev))
+ if (IS_IVB_GT1(dev_priv))
I915_WRITE(GEN7_ROW_CHICKEN2,
_MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
else {
@@ -7380,7 +7368,7 @@ static void ivybridge_init_clock_gating(struct drm_device *dev)
I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) |
GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB);
- g4x_disable_trickle_feed(dev);
+ g4x_disable_trickle_feed(dev_priv);
gen7_setup_fixed_func_scheduler(dev_priv);
@@ -7410,16 +7398,14 @@ static void ivybridge_init_clock_gating(struct drm_device *dev)
snpcr |= GEN6_MBC_SNPCR_MED;
I915_WRITE(GEN6_MBCUNIT_SNPCR, snpcr);
- if (!HAS_PCH_NOP(dev))
- cpt_init_clock_gating(dev);
+ if (!HAS_PCH_NOP(dev_priv))
+ cpt_init_clock_gating(dev_priv);
- gen6_check_mch_setup(dev);
+ gen6_check_mch_setup(dev_priv);
}
-static void valleyview_init_clock_gating(struct drm_device *dev)
+static void valleyview_init_clock_gating(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
-
/* WaDisableEarlyCull:vlv */
I915_WRITE(_3D_CHICKEN3,
_MASKED_BIT_ENABLE(_3D_CHICKEN_SF_DISABLE_OBJEND_CULL));
@@ -7498,10 +7484,8 @@ static void valleyview_init_clock_gating(struct drm_device *dev)
I915_WRITE(VLV_GUNIT_CLOCK_GATE, GCFG_DIS);
}
-static void cherryview_init_clock_gating(struct drm_device *dev)
+static void cherryview_init_clock_gating(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
-
/* WaVSRefCountFullforceMissDisable:chv */
/* WaDSRefCountFullforceMissDisable:chv */
I915_WRITE(GEN7_FF_THREAD_MODE,
@@ -7534,9 +7518,8 @@ static void cherryview_init_clock_gating(struct drm_device *dev)
I915_WRITE(HSW_GTT_CACHE_EN, GTT_CACHE_EN_ALL);
}
-static void g4x_init_clock_gating(struct drm_device *dev)
+static void g4x_init_clock_gating(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
uint32_t dspclk_gate;
I915_WRITE(RENCLK_GATE_D1, 0);
@@ -7547,7 +7530,7 @@ static void g4x_init_clock_gating(struct drm_device *dev)
dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE |
OVRUNIT_CLOCK_GATE_DISABLE |
OVCUNIT_CLOCK_GATE_DISABLE;
- if (IS_GM45(dev))
+ if (IS_GM45(dev_priv))
dspclk_gate |= DSSUNIT_CLOCK_GATE_DISABLE;
I915_WRITE(DSPCLK_GATE_D, dspclk_gate);
@@ -7558,13 +7541,11 @@ static void g4x_init_clock_gating(struct drm_device *dev)
/* WaDisable_RenderCache_OperationalFlush:g4x */
I915_WRITE(CACHE_MODE_0, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
- g4x_disable_trickle_feed(dev);
+ g4x_disable_trickle_feed(dev_priv);
}
-static void crestline_init_clock_gating(struct drm_device *dev)
+static void crestline_init_clock_gating(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
-
I915_WRITE(RENCLK_GATE_D1, I965_RCC_CLOCK_GATE_DISABLE);
I915_WRITE(RENCLK_GATE_D2, 0);
I915_WRITE(DSPCLK_GATE_D, 0);
@@ -7577,10 +7558,8 @@ static void crestline_init_clock_gating(struct drm_device *dev)
I915_WRITE(CACHE_MODE_0, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
}
-static void broadwater_init_clock_gating(struct drm_device *dev)
+static void broadwater_init_clock_gating(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
-
I915_WRITE(RENCLK_GATE_D1, I965_RCZ_CLOCK_GATE_DISABLE |
I965_RCC_CLOCK_GATE_DISABLE |
I965_RCPB_CLOCK_GATE_DISABLE |
@@ -7594,16 +7573,15 @@ static void broadwater_init_clock_gating(struct drm_device *dev)
I915_WRITE(CACHE_MODE_0, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
}
-static void gen3_init_clock_gating(struct drm_device *dev)
+static void gen3_init_clock_gating(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
u32 dstate = I915_READ(D_STATE);
dstate |= DSTATE_PLL_D3_OFF | DSTATE_GFX_CLOCK_GATING |
DSTATE_DOT_CLOCK_GATING;
I915_WRITE(D_STATE, dstate);
- if (IS_PINEVIEW(dev))
+ if (IS_PINEVIEW(dev_priv))
I915_WRITE(ECOSKPD, _MASKED_BIT_ENABLE(ECO_GATING_CX_ONLY));
/* IIR "flip pending" means done if this bit is set */
@@ -7619,10 +7597,8 @@ static void gen3_init_clock_gating(struct drm_device *dev)
_MASKED_BIT_ENABLE(MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE));
}
-static void i85x_init_clock_gating(struct drm_device *dev)
+static void i85x_init_clock_gating(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
-
I915_WRITE(RENCLK_GATE_D1, SV_CLOCK_GATE_DISABLE);
/* interrupts should cause a wake up from C3 */
@@ -7633,10 +7609,8 @@ static void i85x_init_clock_gating(struct drm_device *dev)
_MASKED_BIT_ENABLE(MEM_DISPLAY_TRICKLE_FEED_DISABLE));
}
-static void i830_init_clock_gating(struct drm_device *dev)
+static void i830_init_clock_gating(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
-
I915_WRITE(DSPCLK_GATE_D, OVRUNIT_CLOCK_GATE_DISABLE);
I915_WRITE(MEM_MODE,
@@ -7644,20 +7618,18 @@ static void i830_init_clock_gating(struct drm_device *dev)
_MASKED_BIT_ENABLE(MEM_DISPLAY_B_TRICKLE_FEED_DISABLE));
}
-void intel_init_clock_gating(struct drm_device *dev)
+void intel_init_clock_gating(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
-
- dev_priv->display.init_clock_gating(dev);
+ dev_priv->display.init_clock_gating(dev_priv);
}
-void intel_suspend_hw(struct drm_device *dev)
+void intel_suspend_hw(struct drm_i915_private *dev_priv)
{
- if (HAS_PCH_LPT(dev))
- lpt_suspend_hw(dev);
+ if (HAS_PCH_LPT(dev_priv))
+ lpt_suspend_hw(dev_priv);
}
-static void nop_init_clock_gating(struct drm_device *dev)
+static void nop_init_clock_gating(struct drm_i915_private *dev_priv)
{
DRM_DEBUG_KMS("No clock gating settings or workarounds applied.\n");
}
@@ -7712,29 +7684,28 @@ void intel_init_clock_gating_hooks(struct drm_i915_private *dev_priv)
}
/* Set up chip specific power management-related functions */
-void intel_init_pm(struct drm_device *dev)
+void intel_init_pm(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
-
intel_fbc_init(dev_priv);
/* For cxsr */
- if (IS_PINEVIEW(dev))
- i915_pineview_get_mem_freq(dev);
- else if (IS_GEN5(dev))
- i915_ironlake_get_mem_freq(dev);
+ if (IS_PINEVIEW(dev_priv))
+ i915_pineview_get_mem_freq(dev_priv);
+ else if (IS_GEN5(dev_priv))
+ i915_ironlake_get_mem_freq(dev_priv);
/* For FIFO watermark updates */
- if (INTEL_INFO(dev)->gen >= 9) {
- skl_setup_wm_latency(dev);
- dev_priv->display.update_wm = skl_update_wm;
+ if (INTEL_GEN(dev_priv) >= 9) {
+ skl_setup_wm_latency(dev_priv);
+ dev_priv->display.initial_watermarks = skl_initial_wm;
+ dev_priv->display.atomic_update_watermarks = skl_atomic_update_crtc_wm;
dev_priv->display.compute_global_watermarks = skl_compute_wm;
- } else if (HAS_PCH_SPLIT(dev)) {
- ilk_setup_wm_latency(dev);
+ } else if (HAS_PCH_SPLIT(dev_priv)) {
+ ilk_setup_wm_latency(dev_priv);
- if ((IS_GEN5(dev) && dev_priv->wm.pri_latency[1] &&
+ if ((IS_GEN5(dev_priv) && dev_priv->wm.pri_latency[1] &&
dev_priv->wm.spr_latency[1] && dev_priv->wm.cur_latency[1]) ||
- (!IS_GEN5(dev) && dev_priv->wm.pri_latency[0] &&
+ (!IS_GEN5(dev_priv) && dev_priv->wm.pri_latency[0] &&
dev_priv->wm.spr_latency[0] && dev_priv->wm.cur_latency[0])) {
dev_priv->display.compute_pipe_wm = ilk_compute_pipe_wm;
dev_priv->display.compute_intermediate_wm =
@@ -7747,14 +7718,14 @@ void intel_init_pm(struct drm_device *dev)
DRM_DEBUG_KMS("Failed to read display plane latency. "
"Disable CxSR\n");
}
- } else if (IS_CHERRYVIEW(dev)) {
- vlv_setup_wm_latency(dev);
+ } else if (IS_CHERRYVIEW(dev_priv)) {
+ vlv_setup_wm_latency(dev_priv);
dev_priv->display.update_wm = vlv_update_wm;
- } else if (IS_VALLEYVIEW(dev)) {
- vlv_setup_wm_latency(dev);
+ } else if (IS_VALLEYVIEW(dev_priv)) {
+ vlv_setup_wm_latency(dev_priv);
dev_priv->display.update_wm = vlv_update_wm;
- } else if (IS_PINEVIEW(dev)) {
- if (!intel_get_cxsr_latency(IS_PINEVIEW_G(dev),
+ } else if (IS_PINEVIEW(dev_priv)) {
+ if (!intel_get_cxsr_latency(IS_PINEVIEW_G(dev_priv),
dev_priv->is_ddr3,
dev_priv->fsb_freq,
dev_priv->mem_freq)) {
@@ -7768,15 +7739,15 @@ void intel_init_pm(struct drm_device *dev)
dev_priv->display.update_wm = NULL;
} else
dev_priv->display.update_wm = pineview_update_wm;
- } else if (IS_G4X(dev)) {
+ } else if (IS_G4X(dev_priv)) {
dev_priv->display.update_wm = g4x_update_wm;
- } else if (IS_GEN4(dev)) {
+ } else if (IS_GEN4(dev_priv)) {
dev_priv->display.update_wm = i965_update_wm;
- } else if (IS_GEN3(dev)) {
+ } else if (IS_GEN3(dev_priv)) {
dev_priv->display.update_wm = i9xx_update_wm;
dev_priv->display.get_fifo_size = i9xx_get_fifo_size;
- } else if (IS_GEN2(dev)) {
- if (INTEL_INFO(dev)->num_pipes == 1) {
+ } else if (IS_GEN2(dev_priv)) {
+ if (INTEL_INFO(dev_priv)->num_pipes == 1) {
dev_priv->display.update_wm = i845_update_wm;
dev_priv->display.get_fifo_size = i845_get_fifo_size;
} else {
@@ -8024,5 +7995,4 @@ void intel_pm_setup(struct drm_device *dev)
dev_priv->pm.suspended = false;
atomic_set(&dev_priv->pm.wakeref_count, 0);
- atomic_set(&dev_priv->pm.atomic_seq, 0);
}
diff --git a/drivers/gpu/drm/i915/intel_psr.c b/drivers/gpu/drm/i915/intel_psr.c
index 108ba1e5d658..7b488e2793d9 100644
--- a/drivers/gpu/drm/i915/intel_psr.c
+++ b/drivers/gpu/drm/i915/intel_psr.c
@@ -268,7 +268,7 @@ static void hsw_psr_enable_source(struct intel_dp *intel_dp)
val |= max_sleep_time << EDP_PSR_MAX_SLEEP_TIME_SHIFT;
val |= idle_frames << EDP_PSR_IDLE_FRAME_SHIFT;
- if (IS_HASWELL(dev))
+ if (IS_HASWELL(dev_priv))
val |= EDP_PSR_MIN_LINK_ENTRY_TIME_8_LINES;
if (dev_priv->psr.link_standby)
@@ -344,7 +344,7 @@ static bool intel_psr_match_conditions(struct intel_dp *intel_dp)
* ones. Since by Display design transcoder EDP is tied to port A
* we can safely escape based on the port A.
*/
- if (HAS_DDI(dev) && dig_port->port != PORT_A) {
+ if (HAS_DDI(dev_priv) && dig_port->port != PORT_A) {
DRM_DEBUG_KMS("PSR condition failed: Port not supported\n");
return false;
}
@@ -354,20 +354,20 @@ static bool intel_psr_match_conditions(struct intel_dp *intel_dp)
return false;
}
- if ((IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) &&
+ if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
!dev_priv->psr.link_standby) {
DRM_ERROR("PSR condition failed: Link off requested but not supported on this platform\n");
return false;
}
- if (IS_HASWELL(dev) &&
+ if (IS_HASWELL(dev_priv) &&
I915_READ(HSW_STEREO_3D_CTL(intel_crtc->config->cpu_transcoder)) &
S3D_ENABLE) {
DRM_DEBUG_KMS("PSR condition failed: Stereo 3D is Enabled\n");
return false;
}
- if (IS_HASWELL(dev) &&
+ if (IS_HASWELL(dev_priv) &&
adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
DRM_DEBUG_KMS("PSR condition failed: Interlaced is Enabled\n");
return false;
@@ -402,7 +402,7 @@ static void intel_psr_activate(struct intel_dp *intel_dp)
lockdep_assert_held(&dev_priv->psr.lock);
/* Enable/Re-enable PSR on the host */
- if (HAS_DDI(dev))
+ if (HAS_DDI(dev_priv))
/* On HSW+ after we enable PSR on source it will activate it
* as soon as it match configure idle_frame count. So
* we just actually enable it here on activation time.
@@ -427,7 +427,7 @@ void intel_psr_enable(struct intel_dp *intel_dp)
struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_crtc *crtc = to_intel_crtc(intel_dig_port->base.base.crtc);
- if (!HAS_PSR(dev)) {
+ if (!HAS_PSR(dev_priv)) {
DRM_DEBUG_KMS("PSR not supported on this platform\n");
return;
}
@@ -448,7 +448,7 @@ void intel_psr_enable(struct intel_dp *intel_dp)
dev_priv->psr.busy_frontbuffer_bits = 0;
- if (HAS_DDI(dev)) {
+ if (HAS_DDI(dev_priv)) {
hsw_psr_setup_vsc(intel_dp);
if (dev_priv->psr.psr2_support) {
@@ -472,7 +472,7 @@ void intel_psr_enable(struct intel_dp *intel_dp)
/* Enable PSR on the panel */
hsw_psr_enable_sink(intel_dp);
- if (INTEL_INFO(dev)->gen >= 9)
+ if (INTEL_GEN(dev_priv) >= 9)
intel_psr_activate(intel_dp);
} else {
vlv_psr_setup_vsc(intel_dp);
@@ -498,7 +498,7 @@ void intel_psr_enable(struct intel_dp *intel_dp)
* - On HSW/BDW we get a recoverable frozen screen until next
* exit-activate sequence.
*/
- if (INTEL_INFO(dev)->gen < 9)
+ if (INTEL_GEN(dev_priv) < 9)
schedule_delayed_work(&dev_priv->psr.work,
msecs_to_jiffies(intel_dp->panel_power_cycle_delay * 5));
@@ -580,7 +580,7 @@ void intel_psr_disable(struct intel_dp *intel_dp)
}
/* Disable PSR on Source */
- if (HAS_DDI(dev))
+ if (HAS_DDI(dev_priv))
hsw_psr_disable(intel_dp);
else
vlv_psr_disable(intel_dp);
@@ -827,17 +827,17 @@ void intel_psr_init(struct drm_device *dev)
/* Per platform default */
if (i915.enable_psr == -1) {
- if (IS_HASWELL(dev) || IS_BROADWELL(dev))
+ if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
i915.enable_psr = 1;
else
i915.enable_psr = 0;
}
/* Set link_standby x link_off defaults */
- if (IS_HASWELL(dev) || IS_BROADWELL(dev))
+ if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
/* HSW and BDW require workarounds that we don't implement. */
dev_priv->psr.link_standby = false;
- else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
+ else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
/* On VLV and CHV only standby mode is supported. */
dev_priv->psr.link_standby = true;
else
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index ed9955dce156..aeb637dc1fdf 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -405,22 +405,6 @@ gen8_render_ring_flush(struct drm_i915_gem_request *req, u32 mode)
return gen8_emit_pipe_control(req, flags, scratch_addr);
}
-u64 intel_engine_get_active_head(struct intel_engine_cs *engine)
-{
- struct drm_i915_private *dev_priv = engine->i915;
- u64 acthd;
-
- if (INTEL_GEN(dev_priv) >= 8)
- acthd = I915_READ64_2x32(RING_ACTHD(engine->mmio_base),
- RING_ACTHD_UDW(engine->mmio_base));
- else if (INTEL_GEN(dev_priv) >= 4)
- acthd = I915_READ(RING_ACTHD(engine->mmio_base));
- else
- acthd = I915_READ(ACTHD);
-
- return acthd;
-}
-
static void ring_setup_phys_status_page(struct intel_engine_cs *engine)
{
struct drm_i915_private *dev_priv = engine->i915;
@@ -585,9 +569,7 @@ static int init_ring_common(struct intel_engine_cs *engine)
I915_WRITE_TAIL(engine, ring->tail);
(void)I915_READ_TAIL(engine);
- I915_WRITE_CTL(engine,
- ((ring->size - PAGE_SIZE) & RING_NR_PAGES)
- | RING_VALID);
+ I915_WRITE_CTL(engine, RING_CTL_SIZE(ring->size) | RING_VALID);
/* If the head is still not zero, the ring is dead */
if (intel_wait_for_register_fw(dev_priv, RING_CTL(engine->mmio_base),
@@ -666,7 +648,7 @@ static int intel_rcs_ctx_init(struct drm_i915_gem_request *req)
if (ret != 0)
return ret;
- ret = i915_gem_render_state_init(req);
+ ret = i915_gem_render_state_emit(req);
if (ret)
return ret;
@@ -851,15 +833,13 @@ static int gen9_init_workarounds(struct intel_engine_cs *engine)
WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN3,
GEN9_DISABLE_OCL_OOB_SUPPRESS_LOGIC);
- /* WaDisableDgMirrorFixInHalfSliceChicken5:skl,bxt */
- if (IS_SKL_REVID(dev_priv, 0, SKL_REVID_B0) ||
- IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1))
+ /* WaDisableDgMirrorFixInHalfSliceChicken5:bxt */
+ if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1))
WA_CLR_BIT_MASKED(GEN9_HALF_SLICE_CHICKEN5,
GEN9_DG_MIRROR_FIX_ENABLE);
- /* WaSetDisablePixMaskCammingAndRhwoInCommonSliceChicken:skl,bxt */
- if (IS_SKL_REVID(dev_priv, 0, SKL_REVID_B0) ||
- IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1)) {
+ /* WaSetDisablePixMaskCammingAndRhwoInCommonSliceChicken:bxt */
+ if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1)) {
WA_SET_BIT_MASKED(GEN7_COMMON_SLICE_CHICKEN1,
GEN9_RHWO_OPTIMIZATION_DISABLE);
/*
@@ -869,10 +849,8 @@ static int gen9_init_workarounds(struct intel_engine_cs *engine)
*/
}
- /* WaEnableYV12BugFixInHalfSliceChicken7:skl,bxt,kbl */
/* WaEnableSamplerGPGPUPreemptionSupport:skl,bxt,kbl */
WA_SET_BIT_MASKED(GEN9_HALF_SLICE_CHICKEN7,
- GEN9_ENABLE_YV12_BUGFIX |
GEN9_ENABLE_GPGPU_PREEMPTION);
/* Wa4x4STCOptimizationDisable:skl,bxt,kbl */
@@ -884,9 +862,8 @@ static int gen9_init_workarounds(struct intel_engine_cs *engine)
WA_CLR_BIT_MASKED(GEN9_HALF_SLICE_CHICKEN5,
GEN9_CCS_TLB_PREFETCH_ENABLE);
- /* WaDisableMaskBasedCammingInRCC:skl,bxt */
- if (IS_SKL_REVID(dev_priv, SKL_REVID_C0, SKL_REVID_C0) ||
- IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1))
+ /* WaDisableMaskBasedCammingInRCC:bxt */
+ if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1))
WA_SET_BIT_MASKED(SLICE_ECO_CHICKEN0,
PIXEL_MASK_CAMMING_DISABLE);
@@ -1003,47 +980,12 @@ static int skl_init_workarounds(struct intel_engine_cs *engine)
* until D0 which is the default case so this is equivalent to
* !WaDisablePerCtxtPreemptionGranularityControl:skl
*/
- if (IS_SKL_REVID(dev_priv, SKL_REVID_E0, REVID_FOREVER)) {
- I915_WRITE(GEN7_FF_SLICE_CS_CHICKEN1,
- _MASKED_BIT_ENABLE(GEN9_FFSC_PERCTX_PREEMPT_CTRL));
- }
-
- if (IS_SKL_REVID(dev_priv, 0, SKL_REVID_E0)) {
- /* WaDisableChickenBitTSGBarrierAckForFFSliceCS:skl */
- I915_WRITE(FF_SLICE_CS_CHICKEN2,
- _MASKED_BIT_ENABLE(GEN9_TSG_BARRIER_ACK_DISABLE));
- }
-
- /* GEN8_L3SQCREG4 has a dependency with WA batch so any new changes
- * involving this register should also be added to WA batch as required.
- */
- if (IS_SKL_REVID(dev_priv, 0, SKL_REVID_E0))
- /* WaDisableLSQCROPERFforOCL:skl */
- I915_WRITE(GEN8_L3SQCREG4, I915_READ(GEN8_L3SQCREG4) |
- GEN8_LQSC_RO_PERF_DIS);
+ I915_WRITE(GEN7_FF_SLICE_CS_CHICKEN1,
+ _MASKED_BIT_ENABLE(GEN9_FFSC_PERCTX_PREEMPT_CTRL));
/* WaEnableGapsTsvCreditFix:skl */
- if (IS_SKL_REVID(dev_priv, SKL_REVID_C0, REVID_FOREVER)) {
- I915_WRITE(GEN8_GARBCNTL, (I915_READ(GEN8_GARBCNTL) |
- GEN9_GAPS_TSV_CREDIT_DISABLE));
- }
-
- /* WaDisablePowerCompilerClockGating:skl */
- if (IS_SKL_REVID(dev_priv, SKL_REVID_B0, SKL_REVID_B0))
- WA_SET_BIT_MASKED(HIZ_CHICKEN,
- BDW_HIZ_POWER_COMPILER_CLOCK_GATING_DISABLE);
-
- /* WaBarrierPerformanceFixDisable:skl */
- if (IS_SKL_REVID(dev_priv, SKL_REVID_C0, SKL_REVID_D0))
- WA_SET_BIT_MASKED(HDC_CHICKEN0,
- HDC_FENCE_DEST_SLM_DISABLE |
- HDC_BARRIER_PERFORMANCE_DISABLE);
-
- /* WaDisableSbeCacheDispatchPortSharing:skl */
- if (IS_SKL_REVID(dev_priv, 0, SKL_REVID_F0))
- WA_SET_BIT_MASKED(
- GEN7_HALF_SLICE_CHICKEN1,
- GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE);
+ I915_WRITE(GEN8_GARBCNTL, (I915_READ(GEN8_GARBCNTL) |
+ GEN9_GAPS_TSV_CREDIT_DISABLE));
/* WaDisableGafsUnitClkGating:skl */
WA_SET_BIT(GEN7_UCGCTL4, GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE);
@@ -1271,91 +1213,64 @@ static void render_ring_cleanup(struct intel_engine_cs *engine)
i915_vma_unpin_and_release(&dev_priv->semaphore);
}
-static int gen8_rcs_signal(struct drm_i915_gem_request *req)
+static u32 *gen8_rcs_signal(struct drm_i915_gem_request *req, u32 *out)
{
- struct intel_ring *ring = req->ring;
struct drm_i915_private *dev_priv = req->i915;
struct intel_engine_cs *waiter;
enum intel_engine_id id;
- int ret, num_rings;
-
- num_rings = INTEL_INFO(dev_priv)->num_rings;
- ret = intel_ring_begin(req, (num_rings-1) * 8);
- if (ret)
- return ret;
- for_each_engine_id(waiter, dev_priv, id) {
+ for_each_engine(waiter, dev_priv, id) {
u64 gtt_offset = req->engine->semaphore.signal_ggtt[id];
if (gtt_offset == MI_SEMAPHORE_SYNC_INVALID)
continue;
- intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(6));
- intel_ring_emit(ring,
- PIPE_CONTROL_GLOBAL_GTT_IVB |
- PIPE_CONTROL_QW_WRITE |
- PIPE_CONTROL_CS_STALL);
- intel_ring_emit(ring, lower_32_bits(gtt_offset));
- intel_ring_emit(ring, upper_32_bits(gtt_offset));
- intel_ring_emit(ring, req->fence.seqno);
- intel_ring_emit(ring, 0);
- intel_ring_emit(ring,
- MI_SEMAPHORE_SIGNAL |
- MI_SEMAPHORE_TARGET(waiter->hw_id));
- intel_ring_emit(ring, 0);
+ *out++ = GFX_OP_PIPE_CONTROL(6);
+ *out++ = (PIPE_CONTROL_GLOBAL_GTT_IVB |
+ PIPE_CONTROL_QW_WRITE |
+ PIPE_CONTROL_CS_STALL);
+ *out++ = lower_32_bits(gtt_offset);
+ *out++ = upper_32_bits(gtt_offset);
+ *out++ = req->global_seqno;
+ *out++ = 0;
+ *out++ = (MI_SEMAPHORE_SIGNAL |
+ MI_SEMAPHORE_TARGET(waiter->hw_id));
+ *out++ = 0;
}
- intel_ring_advance(ring);
- return 0;
+ return out;
}
-static int gen8_xcs_signal(struct drm_i915_gem_request *req)
+static u32 *gen8_xcs_signal(struct drm_i915_gem_request *req, u32 *out)
{
- struct intel_ring *ring = req->ring;
struct drm_i915_private *dev_priv = req->i915;
struct intel_engine_cs *waiter;
enum intel_engine_id id;
- int ret, num_rings;
- num_rings = INTEL_INFO(dev_priv)->num_rings;
- ret = intel_ring_begin(req, (num_rings-1) * 6);
- if (ret)
- return ret;
-
- for_each_engine_id(waiter, dev_priv, id) {
+ for_each_engine(waiter, dev_priv, id) {
u64 gtt_offset = req->engine->semaphore.signal_ggtt[id];
if (gtt_offset == MI_SEMAPHORE_SYNC_INVALID)
continue;
- intel_ring_emit(ring,
- (MI_FLUSH_DW + 1) | MI_FLUSH_DW_OP_STOREDW);
- intel_ring_emit(ring,
- lower_32_bits(gtt_offset) |
- MI_FLUSH_DW_USE_GTT);
- intel_ring_emit(ring, upper_32_bits(gtt_offset));
- intel_ring_emit(ring, req->fence.seqno);
- intel_ring_emit(ring,
- MI_SEMAPHORE_SIGNAL |
- MI_SEMAPHORE_TARGET(waiter->hw_id));
- intel_ring_emit(ring, 0);
+ *out++ = (MI_FLUSH_DW + 1) | MI_FLUSH_DW_OP_STOREDW;
+ *out++ = lower_32_bits(gtt_offset) | MI_FLUSH_DW_USE_GTT;
+ *out++ = upper_32_bits(gtt_offset);
+ *out++ = req->global_seqno;
+ *out++ = (MI_SEMAPHORE_SIGNAL |
+ MI_SEMAPHORE_TARGET(waiter->hw_id));
+ *out++ = 0;
}
- intel_ring_advance(ring);
- return 0;
+ return out;
}
-static int gen6_signal(struct drm_i915_gem_request *req)
+static u32 *gen6_signal(struct drm_i915_gem_request *req, u32 *out)
{
- struct intel_ring *ring = req->ring;
struct drm_i915_private *dev_priv = req->i915;
struct intel_engine_cs *engine;
- int ret, num_rings;
-
- num_rings = INTEL_INFO(dev_priv)->num_rings;
- ret = intel_ring_begin(req, round_up((num_rings-1) * 3, 2));
- if (ret)
- return ret;
+ enum intel_engine_id id;
+ int num_rings = 0;
- for_each_engine(engine, dev_priv) {
+ for_each_engine(engine, dev_priv, id) {
i915_reg_t mbox_reg;
if (!(BIT(engine->hw_id) & GEN6_SEMAPHORES_MASK))
@@ -1363,101 +1278,80 @@ static int gen6_signal(struct drm_i915_gem_request *req)
mbox_reg = req->engine->semaphore.mbox.signal[engine->hw_id];
if (i915_mmio_reg_valid(mbox_reg)) {
- intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
- intel_ring_emit_reg(ring, mbox_reg);
- intel_ring_emit(ring, req->fence.seqno);
+ *out++ = MI_LOAD_REGISTER_IMM(1);
+ *out++ = i915_mmio_reg_offset(mbox_reg);
+ *out++ = req->global_seqno;
+ num_rings++;
}
}
+ if (num_rings & 1)
+ *out++ = MI_NOOP;
- /* If num_dwords was rounded, make sure the tail pointer is correct */
- if (num_rings % 2 == 0)
- intel_ring_emit(ring, MI_NOOP);
- intel_ring_advance(ring);
-
- return 0;
+ return out;
}
static void i9xx_submit_request(struct drm_i915_gem_request *request)
{
struct drm_i915_private *dev_priv = request->i915;
- I915_WRITE_TAIL(request->engine,
- intel_ring_offset(request->ring, request->tail));
+ i915_gem_request_submit(request);
+
+ I915_WRITE_TAIL(request->engine, request->tail);
}
-static int i9xx_emit_request(struct drm_i915_gem_request *req)
+static void i9xx_emit_breadcrumb(struct drm_i915_gem_request *req,
+ u32 *out)
{
- struct intel_ring *ring = req->ring;
- int ret;
-
- ret = intel_ring_begin(req, 4);
- if (ret)
- return ret;
-
- intel_ring_emit(ring, MI_STORE_DWORD_INDEX);
- intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
- intel_ring_emit(ring, req->fence.seqno);
- intel_ring_emit(ring, MI_USER_INTERRUPT);
- intel_ring_advance(ring);
+ *out++ = MI_STORE_DWORD_INDEX;
+ *out++ = I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT;
+ *out++ = req->global_seqno;
+ *out++ = MI_USER_INTERRUPT;
- req->tail = ring->tail;
-
- return 0;
+ req->tail = intel_ring_offset(req->ring, out);
}
+static const int i9xx_emit_breadcrumb_sz = 4;
+
/**
- * gen6_sema_emit_request - Update the semaphore mailbox registers
+ * gen6_sema_emit_breadcrumb - Update the semaphore mailbox registers
*
* @request - request to write to the ring
*
* Update the mailbox registers in the *other* rings with the current seqno.
* This acts like a signal in the canonical semaphore.
*/
-static int gen6_sema_emit_request(struct drm_i915_gem_request *req)
+static void gen6_sema_emit_breadcrumb(struct drm_i915_gem_request *req,
+ u32 *out)
{
- int ret;
-
- ret = req->engine->semaphore.signal(req);
- if (ret)
- return ret;
-
- return i9xx_emit_request(req);
+ return i9xx_emit_breadcrumb(req,
+ req->engine->semaphore.signal(req, out));
}
-static int gen8_render_emit_request(struct drm_i915_gem_request *req)
+static void gen8_render_emit_breadcrumb(struct drm_i915_gem_request *req,
+ u32 *out)
{
struct intel_engine_cs *engine = req->engine;
- struct intel_ring *ring = req->ring;
- int ret;
- if (engine->semaphore.signal) {
- ret = engine->semaphore.signal(req);
- if (ret)
- return ret;
- }
+ if (engine->semaphore.signal)
+ out = engine->semaphore.signal(req, out);
- ret = intel_ring_begin(req, 8);
- if (ret)
- return ret;
-
- intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(6));
- intel_ring_emit(ring, (PIPE_CONTROL_GLOBAL_GTT_IVB |
+ *out++ = GFX_OP_PIPE_CONTROL(6);
+ *out++ = (PIPE_CONTROL_GLOBAL_GTT_IVB |
PIPE_CONTROL_CS_STALL |
- PIPE_CONTROL_QW_WRITE));
- intel_ring_emit(ring, intel_hws_seqno_address(engine));
- intel_ring_emit(ring, 0);
- intel_ring_emit(ring, i915_gem_request_get_seqno(req));
+ PIPE_CONTROL_QW_WRITE);
+ *out++ = intel_hws_seqno_address(engine);
+ *out++ = 0;
+ *out++ = req->global_seqno;
/* We're thrashing one dword of HWS. */
- intel_ring_emit(ring, 0);
- intel_ring_emit(ring, MI_USER_INTERRUPT);
- intel_ring_emit(ring, MI_NOOP);
- intel_ring_advance(ring);
-
- req->tail = ring->tail;
+ *out++ = 0;
+ *out++ = MI_USER_INTERRUPT;
+ *out++ = MI_NOOP;
- return 0;
+ req->tail = intel_ring_offset(req->ring, out);
}
+static const int gen8_render_emit_breadcrumb_sz = 8;
+
/**
* intel_ring_sync - sync the waiter to the signaller on seqno
*
@@ -1484,7 +1378,7 @@ gen8_ring_sync_to(struct drm_i915_gem_request *req,
MI_SEMAPHORE_WAIT |
MI_SEMAPHORE_GLOBAL_GTT |
MI_SEMAPHORE_SAD_GTE_SDD);
- intel_ring_emit(ring, signal->fence.seqno);
+ intel_ring_emit(ring, signal->global_seqno);
intel_ring_emit(ring, lower_32_bits(offset));
intel_ring_emit(ring, upper_32_bits(offset));
intel_ring_advance(ring);
@@ -1522,7 +1416,7 @@ gen6_ring_sync_to(struct drm_i915_gem_request *req,
* seqno is >= the last seqno executed. However for hardware the
* comparison is strictly greater than.
*/
- intel_ring_emit(ring, signal->fence.seqno - 1);
+ intel_ring_emit(ring, signal->global_seqno - 1);
intel_ring_emit(ring, 0);
intel_ring_emit(ring, MI_NOOP);
intel_ring_advance(ring);
@@ -1665,7 +1559,7 @@ hsw_vebox_irq_enable(struct intel_engine_cs *engine)
struct drm_i915_private *dev_priv = engine->i915;
I915_WRITE_IMR(engine, ~engine->irq_enable_mask);
- gen6_enable_pm_irq(dev_priv, engine->irq_enable_mask);
+ gen6_unmask_pm_irq(dev_priv, engine->irq_enable_mask);
}
static void
@@ -1674,7 +1568,7 @@ hsw_vebox_irq_disable(struct intel_engine_cs *engine)
struct drm_i915_private *dev_priv = engine->i915;
I915_WRITE_IMR(engine, ~0);
- gen6_disable_pm_irq(dev_priv, engine->irq_enable_mask);
+ gen6_mask_pm_irq(dev_priv, engine->irq_enable_mask);
}
static void
@@ -1819,14 +1713,19 @@ static void cleanup_phys_status_page(struct intel_engine_cs *engine)
static void cleanup_status_page(struct intel_engine_cs *engine)
{
struct i915_vma *vma;
+ struct drm_i915_gem_object *obj;
vma = fetch_and_zero(&engine->status_page.vma);
if (!vma)
return;
+ obj = vma->obj;
+
i915_vma_unpin(vma);
- i915_gem_object_unpin_map(vma->obj);
- i915_vma_put(vma);
+ i915_vma_close(vma);
+
+ i915_gem_object_unpin_map(obj);
+ __i915_gem_object_release_unless_active(obj);
}
static int init_status_page(struct intel_engine_cs *engine)
@@ -1834,9 +1733,10 @@ static int init_status_page(struct intel_engine_cs *engine)
struct drm_i915_gem_object *obj;
struct i915_vma *vma;
unsigned int flags;
+ void *vaddr;
int ret;
- obj = i915_gem_object_create(&engine->i915->drm, 4096);
+ obj = i915_gem_object_create_internal(engine->i915, 4096);
if (IS_ERR(obj)) {
DRM_ERROR("Failed to allocate status page\n");
return PTR_ERR(obj);
@@ -1869,15 +1769,22 @@ static int init_status_page(struct intel_engine_cs *engine)
if (ret)
goto err;
+ vaddr = i915_gem_object_pin_map(obj, I915_MAP_WB);
+ if (IS_ERR(vaddr)) {
+ ret = PTR_ERR(vaddr);
+ goto err_unpin;
+ }
+
engine->status_page.vma = vma;
engine->status_page.ggtt_offset = i915_ggtt_offset(vma);
- engine->status_page.page_addr =
- i915_gem_object_pin_map(obj, I915_MAP_WB);
+ engine->status_page.page_addr = memset(vaddr, 0, 4096);
DRM_DEBUG_DRIVER("%s hws offset: 0x%08x\n",
engine->name, i915_ggtt_offset(vma));
return 0;
+err_unpin:
+ i915_vma_unpin(vma);
err:
i915_gem_object_put(obj);
return ret;
@@ -1989,6 +1896,7 @@ intel_engine_create_ring(struct intel_engine_cs *engine, int size)
struct i915_vma *vma;
GEM_BUG_ON(!is_power_of_2(size));
+ GEM_BUG_ON(RING_CTL_SIZE(size) & ~RING_NR_PAGES);
ring = kzalloc(sizeof(*ring), GFP_KERNEL);
if (!ring)
@@ -2023,7 +1931,11 @@ intel_engine_create_ring(struct intel_engine_cs *engine, int size)
void
intel_ring_free(struct intel_ring *ring)
{
- i915_vma_put(ring->vma);
+ struct drm_i915_gem_object *obj = ring->vma->obj;
+
+ i915_vma_close(ring->vma);
+ __i915_gem_object_release_unless_active(obj);
+
kfree(ring);
}
@@ -2039,14 +1951,13 @@ static int intel_ring_context_pin(struct i915_gem_context *ctx,
return 0;
if (ce->state) {
- ret = i915_gem_object_set_to_gtt_domain(ce->state->obj, false);
- if (ret)
- goto error;
+ struct i915_vma *vma;
- ret = i915_vma_pin(ce->state, 0, ctx->ggtt_alignment,
- PIN_GLOBAL | PIN_HIGH);
- if (ret)
+ vma = i915_gem_context_pin_legacy(ctx, PIN_HIGH);
+ if (IS_ERR(vma)) {
+ ret = PTR_ERR(vma);
goto error;
+ }
}
/* The kernel context is only used as a placeholder for flushing the
@@ -2093,9 +2004,6 @@ static int intel_init_ring_buffer(struct intel_engine_cs *engine)
intel_engine_setup_common(engine);
- memset(engine->semaphore.sync_seqno, 0,
- sizeof(engine->semaphore.sync_seqno));
-
ret = intel_engine_init_common(engine);
if (ret)
goto error;
@@ -2146,9 +2054,6 @@ void intel_engine_cleanup(struct intel_engine_cs *engine)
{
struct drm_i915_private *dev_priv;
- if (!intel_engine_initialized(engine))
- return;
-
dev_priv = engine->i915;
if (engine->buffer) {
@@ -2175,13 +2080,16 @@ void intel_engine_cleanup(struct intel_engine_cs *engine)
intel_ring_context_unpin(dev_priv->kernel_context, engine);
engine->i915 = NULL;
+ dev_priv->engine[engine->id] = NULL;
+ kfree(engine);
}
void intel_legacy_submission_resume(struct drm_i915_private *dev_priv)
{
struct intel_engine_cs *engine;
+ enum intel_engine_id id;
- for_each_engine(engine, dev_priv) {
+ for_each_engine(engine, dev_priv, id) {
engine->buffer->head = engine->buffer->tail;
engine->buffer->last_retired_head = -1;
}
@@ -2211,7 +2119,9 @@ static int wait_for_space(struct drm_i915_gem_request *req, int bytes)
{
struct intel_ring *ring = req->ring;
struct drm_i915_gem_request *target;
- int ret;
+ long timeout;
+
+ lockdep_assert_held(&req->i915->drm.struct_mutex);
intel_ring_update_space(ring);
if (ring->space >= bytes)
@@ -2241,11 +2151,11 @@ static int wait_for_space(struct drm_i915_gem_request *req, int bytes)
if (WARN_ON(&target->ring_link == &ring->request_list))
return -ENOSPC;
- ret = i915_wait_request(target,
- I915_WAIT_INTERRUPTIBLE | I915_WAIT_LOCKED,
- NULL, NO_WAITBOOST);
- if (ret)
- return ret;
+ timeout = i915_wait_request(target,
+ I915_WAIT_INTERRUPTIBLE | I915_WAIT_LOCKED,
+ MAX_SCHEDULE_TIMEOUT);
+ if (timeout < 0)
+ return timeout;
i915_gem_request_retire_upto(target);
@@ -2674,9 +2584,22 @@ static void intel_ring_default_vfuncs(struct drm_i915_private *dev_priv,
engine->init_hw = init_ring_common;
engine->reset_hw = reset_ring_common;
- engine->emit_request = i9xx_emit_request;
- if (i915.semaphores)
- engine->emit_request = gen6_sema_emit_request;
+ engine->emit_breadcrumb = i9xx_emit_breadcrumb;
+ engine->emit_breadcrumb_sz = i9xx_emit_breadcrumb_sz;
+ if (i915.semaphores) {
+ int num_rings;
+
+ engine->emit_breadcrumb = gen6_sema_emit_breadcrumb;
+
+ num_rings = hweight32(INTEL_INFO(dev_priv)->ring_mask) - 1;
+ if (INTEL_GEN(dev_priv) >= 8) {
+ engine->emit_breadcrumb_sz += num_rings * 6;
+ } else {
+ engine->emit_breadcrumb_sz += num_rings * 3;
+ if (num_rings & 1)
+ engine->emit_breadcrumb_sz++;
+ }
+ }
engine->submit_request = i9xx_submit_request;
if (INTEL_GEN(dev_priv) >= 8)
@@ -2703,10 +2626,18 @@ int intel_init_render_ring_buffer(struct intel_engine_cs *engine)
if (INTEL_GEN(dev_priv) >= 8) {
engine->init_context = intel_rcs_ctx_init;
- engine->emit_request = gen8_render_emit_request;
+ engine->emit_breadcrumb = gen8_render_emit_breadcrumb;
+ engine->emit_breadcrumb_sz = gen8_render_emit_breadcrumb_sz;
engine->emit_flush = gen8_render_ring_flush;
- if (i915.semaphores)
+ if (i915.semaphores) {
+ int num_rings;
+
engine->semaphore.signal = gen8_rcs_signal;
+
+ num_rings =
+ hweight32(INTEL_INFO(dev_priv)->ring_mask) - 1;
+ engine->emit_breadcrumb_sz += num_rings * 6;
+ }
} else if (INTEL_GEN(dev_priv) >= 6) {
engine->init_context = intel_rcs_ctx_init;
engine->emit_flush = gen7_render_ring_flush;
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index ec0b4a0c605d..3466b4e77e7c 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -4,6 +4,7 @@
#include <linux/hashtable.h>
#include "i915_gem_batch_pool.h"
#include "i915_gem_request.h"
+#include "i915_gem_timeline.h"
#define I915_CMD_HASH_ORDER 9
@@ -73,13 +74,40 @@ enum intel_engine_hangcheck_action {
#define HANGCHECK_SCORE_RING_HUNG 31
+#define I915_MAX_SLICES 3
+#define I915_MAX_SUBSLICES 3
+
+#define instdone_slice_mask(dev_priv__) \
+ (INTEL_GEN(dev_priv__) == 7 ? \
+ 1 : INTEL_INFO(dev_priv__)->sseu.slice_mask)
+
+#define instdone_subslice_mask(dev_priv__) \
+ (INTEL_GEN(dev_priv__) == 7 ? \
+ 1 : INTEL_INFO(dev_priv__)->sseu.subslice_mask)
+
+#define for_each_instdone_slice_subslice(dev_priv__, slice__, subslice__) \
+ for ((slice__) = 0, (subslice__) = 0; \
+ (slice__) < I915_MAX_SLICES; \
+ (subslice__) = ((subslice__) + 1) < I915_MAX_SUBSLICES ? (subslice__) + 1 : 0, \
+ (slice__) += ((subslice__) == 0)) \
+ for_each_if((BIT(slice__) & instdone_slice_mask(dev_priv__)) && \
+ (BIT(subslice__) & instdone_subslice_mask(dev_priv__)))
+
+struct intel_instdone {
+ u32 instdone;
+ /* The following exist only in the RCS engine */
+ u32 slice_common;
+ u32 sampler[I915_MAX_SLICES][I915_MAX_SUBSLICES];
+ u32 row[I915_MAX_SLICES][I915_MAX_SUBSLICES];
+};
+
struct intel_engine_hangcheck {
u64 acthd;
u32 seqno;
int score;
enum intel_engine_hangcheck_action action;
int deadlock;
- u32 instdone[I915_NUM_INSTDONE_REG];
+ struct intel_instdone instdone;
};
struct intel_ring {
@@ -130,6 +158,7 @@ struct i915_ctx_workarounds {
};
struct drm_i915_gem_request;
+struct intel_render_state;
struct intel_engine_cs {
struct drm_i915_private *i915;
@@ -141,7 +170,6 @@ struct intel_engine_cs {
VCS2, /* Keep instances of the same type engine together. */
VECS
} id;
-#define I915_NUM_ENGINES 5
#define _VCS(n) (VCS + (n))
unsigned int exec_id;
enum intel_engine_hw_id {
@@ -152,10 +180,12 @@ struct intel_engine_cs {
VCS2_HW
} hw_id;
enum intel_engine_hw_id guc_id; /* XXX same as hw_id? */
- u64 fence_context;
u32 mmio_base;
unsigned int irq_shift;
struct intel_ring *buffer;
+ struct intel_timeline *timeline;
+
+ struct intel_render_state *render_state;
/* Rather than have every client wait upon all user interrupts,
* with the herd waking after every interrupt and each doing the
@@ -177,7 +207,7 @@ struct intel_engine_cs {
struct task_struct __rcu *irq_seqno_bh; /* bh for interrupts */
bool irq_posted;
- spinlock_t lock; /* protects the lists of requests */
+ spinlock_t lock; /* protects the lists of requests; irqsafe */
struct rb_root waiters; /* sorted by retirement, priority */
struct rb_root signals; /* sorted by retirement */
struct intel_wait *first_wait; /* oldest waiter by retirement */
@@ -225,7 +255,9 @@ struct intel_engine_cs {
#define I915_DISPATCH_SECURE BIT(0)
#define I915_DISPATCH_PINNED BIT(1)
#define I915_DISPATCH_RS BIT(2)
- int (*emit_request)(struct drm_i915_gem_request *req);
+ void (*emit_breadcrumb)(struct drm_i915_gem_request *req,
+ u32 *out);
+ int emit_breadcrumb_sz;
/* Pass the request to the hardware queue (e.g. directly into
* the legacy ringbuffer or to the end of an execlist).
@@ -235,6 +267,15 @@ struct intel_engine_cs {
*/
void (*submit_request)(struct drm_i915_gem_request *req);
+ /* Call when the priority on a request has changed and it and its
+ * dependencies may need rescheduling. Note the request itself may
+ * not be ready to run!
+ *
+ * Called under the struct_mutex.
+ */
+ void (*schedule)(struct drm_i915_gem_request *request,
+ int priority);
+
/* Some chipsets are not quite as coherent as advertised and need
* an expensive kick to force a true read of the up-to-date seqno.
* However, the up-to-date seqno is not always required and the last
@@ -282,8 +323,6 @@ struct intel_engine_cs {
* ie. transpose of f(x, y)
*/
struct {
- u32 sync_seqno[I915_NUM_ENGINES-1];
-
union {
#define GEN6_SEMAPHORE_LAST VECS_HW
#define GEN6_NUM_SEMAPHORES (GEN6_SEMAPHORE_LAST + 1)
@@ -300,43 +339,22 @@ struct intel_engine_cs {
/* AKA wait() */
int (*sync_to)(struct drm_i915_gem_request *req,
struct drm_i915_gem_request *signal);
- int (*signal)(struct drm_i915_gem_request *req);
+ u32 *(*signal)(struct drm_i915_gem_request *req, u32 *out);
} semaphore;
/* Execlists */
struct tasklet_struct irq_tasklet;
- spinlock_t execlist_lock; /* used inside tasklet, use spin_lock_bh */
struct execlist_port {
struct drm_i915_gem_request *request;
unsigned int count;
} execlist_port[2];
- struct list_head execlist_queue;
+ struct rb_root execlist_queue;
+ struct rb_node *execlist_first;
unsigned int fw_domains;
bool disable_lite_restore_wa;
bool preempt_wa;
u32 ctx_desc_template;
- /**
- * List of breadcrumbs associated with GPU requests currently
- * outstanding.
- */
- struct list_head request_list;
-
- /**
- * Seqno of request most recently submitted to request_list.
- * Used exclusively by hang checker to avoid grabbing lock while
- * inspecting request list.
- */
- u32 last_submitted_seqno;
- u32 last_pending_seqno;
-
- /* An RCU guarded pointer to the last request. No reference is
- * held to the request, users must carefully acquire a reference to
- * the request using i915_gem_active_get_rcu(), or hold the
- * struct_mutex.
- */
- struct i915_gem_active last_request;
-
struct i915_gem_context *last_context;
struct intel_engine_hangcheck hangcheck;
@@ -368,39 +386,12 @@ struct intel_engine_cs {
u32 (*get_cmd_length_mask)(u32 cmd_header);
};
-static inline bool
-intel_engine_initialized(const struct intel_engine_cs *engine)
-{
- return engine->i915 != NULL;
-}
-
static inline unsigned
intel_engine_flag(const struct intel_engine_cs *engine)
{
return 1 << engine->id;
}
-static inline u32
-intel_engine_sync_index(struct intel_engine_cs *engine,
- struct intel_engine_cs *other)
-{
- int idx;
-
- /*
- * rcs -> 0 = vcs, 1 = bcs, 2 = vecs, 3 = vcs2;
- * vcs -> 0 = bcs, 1 = vecs, 2 = vcs2, 3 = rcs;
- * bcs -> 0 = vecs, 1 = vcs2. 2 = rcs, 3 = vcs;
- * vecs -> 0 = vcs2, 1 = rcs, 2 = vcs, 3 = bcs;
- * vcs2 -> 0 = rcs, 1 = vcs, 2 = bcs, 3 = vecs;
- */
-
- idx = (other - engine) - 1;
- if (idx < 0)
- idx += I915_NUM_ENGINES;
-
- return idx;
-}
-
static inline void
intel_flush_status_page(struct intel_engine_cs *engine, int reg)
{
@@ -483,30 +474,23 @@ static inline void intel_ring_advance(struct intel_ring *ring)
*/
}
-static inline u32 intel_ring_offset(struct intel_ring *ring, u32 value)
+static inline u32 intel_ring_offset(struct intel_ring *ring, void *addr)
{
/* Don't write ring->size (equivalent to 0) as that hangs some GPUs. */
- return value & (ring->size - 1);
+ u32 offset = addr - ring->vaddr;
+ return offset & (ring->size - 1);
}
int __intel_ring_space(int head, int tail, int size);
void intel_ring_update_space(struct intel_ring *ring);
-void intel_engine_init_seqno(struct intel_engine_cs *engine, u32 seqno);
+void intel_engine_init_global_seqno(struct intel_engine_cs *engine, u32 seqno);
void intel_engine_setup_common(struct intel_engine_cs *engine);
int intel_engine_init_common(struct intel_engine_cs *engine);
int intel_engine_create_scratch(struct intel_engine_cs *engine, int size);
void intel_engine_cleanup_common(struct intel_engine_cs *engine);
-static inline int intel_engine_idle(struct intel_engine_cs *engine,
- unsigned int flags)
-{
- /* Wait upon the last request to be completed */
- return i915_gem_active_wait_unlocked(&engine->last_request,
- flags, NULL, NULL);
-}
-
int intel_init_render_ring_buffer(struct intel_engine_cs *engine);
int intel_init_bsd_ring_buffer(struct intel_engine_cs *engine);
int intel_init_bsd2_ring_buffer(struct intel_engine_cs *engine);
@@ -514,13 +498,30 @@ int intel_init_blt_ring_buffer(struct intel_engine_cs *engine);
int intel_init_vebox_ring_buffer(struct intel_engine_cs *engine);
u64 intel_engine_get_active_head(struct intel_engine_cs *engine);
+u64 intel_engine_get_last_batch_head(struct intel_engine_cs *engine);
+
static inline u32 intel_engine_get_seqno(struct intel_engine_cs *engine)
{
return intel_read_status_page(engine, I915_GEM_HWS_INDEX);
}
+static inline u32 intel_engine_last_submit(struct intel_engine_cs *engine)
+{
+ /* We are only peeking at the tail of the submit queue (and not the
+ * queue itself) in order to gain a hint as to the current active
+ * state of the engine. Callers are not expected to be taking
+ * engine->timeline->lock, nor are they expected to be concerned
+ * wtih serialising this hint with anything, so document it as
+ * a hint and nothing more.
+ */
+ return READ_ONCE(engine->timeline->last_submitted_seqno);
+}
+
int init_workarounds_ring(struct intel_engine_cs *engine);
+void intel_engine_get_instdone(struct intel_engine_cs *engine,
+ struct intel_instdone *instdone);
+
/*
* Arbitrary size for largest possible 'add request' sequence. The code paths
* are complex and variable. Empirical measurement shows that the worst case
@@ -586,12 +587,6 @@ static inline bool intel_engine_wakeup(const struct intel_engine_cs *engine)
void intel_engine_reset_breadcrumbs(struct intel_engine_cs *engine);
void intel_engine_fini_breadcrumbs(struct intel_engine_cs *engine);
-unsigned int intel_kick_waiters(struct drm_i915_private *i915);
-unsigned int intel_kick_signalers(struct drm_i915_private *i915);
-
-static inline bool intel_engine_is_active(struct intel_engine_cs *engine)
-{
- return i915_gem_active_isset(&engine->last_request);
-}
+unsigned int intel_breadcrumbs_busy(struct drm_i915_private *i915);
#endif /* _INTEL_RINGBUFFER_H_ */
diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c
index 6c11168facd6..356c662ad453 100644
--- a/drivers/gpu/drm/i915/intel_runtime_pm.c
+++ b/drivers/gpu/drm/i915/intel_runtime_pm.c
@@ -288,7 +288,6 @@ void intel_display_set_init_power(struct drm_i915_private *dev_priv,
static void hsw_power_well_post_enable(struct drm_i915_private *dev_priv)
{
struct pci_dev *pdev = dev_priv->drm.pdev;
- struct drm_device *dev = &dev_priv->drm;
/*
* After we re-enable the power well, if we touch VGA register 0x3d5
@@ -304,7 +303,7 @@ static void hsw_power_well_post_enable(struct drm_i915_private *dev_priv)
outb(inb(VGA_MSR_READ), VGA_MSR_WRITE);
vga_put(pdev, VGA_RSRC_LEGACY_IO);
- if (IS_BROADWELL(dev))
+ if (IS_BROADWELL(dev_priv))
gen8_irq_power_well_post_enable(dev_priv,
1 << PIPE_C | 1 << PIPE_B);
}
@@ -331,7 +330,7 @@ static void skl_power_well_post_enable(struct drm_i915_private *dev_priv,
* sure vgacon can keep working normally without triggering interrupts
* and error messages.
*/
- if (power_well->data == SKL_DISP_PW_2) {
+ if (power_well->id == SKL_DISP_PW_2) {
vga_get_uninterruptible(pdev, VGA_RSRC_LEGACY_IO);
outb(inb(VGA_MSR_READ), VGA_MSR_WRITE);
vga_put(pdev, VGA_RSRC_LEGACY_IO);
@@ -344,7 +343,7 @@ static void skl_power_well_post_enable(struct drm_i915_private *dev_priv,
static void skl_power_well_pre_disable(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well)
{
- if (power_well->data == SKL_DISP_PW_2)
+ if (power_well->id == SKL_DISP_PW_2)
gen8_irq_power_well_pre_disable(dev_priv,
1 << PIPE_C | 1 << PIPE_B);
}
@@ -659,7 +658,7 @@ static void
gen9_sanitize_power_well_requests(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well)
{
- enum skl_disp_power_wells power_well_id = power_well->data;
+ enum skl_disp_power_wells power_well_id = power_well->id;
u32 val;
u32 mask;
@@ -704,7 +703,7 @@ static void skl_set_power_well(struct drm_i915_private *dev_priv,
tmp = I915_READ(HSW_PWR_WELL_DRIVER);
fuse_status = I915_READ(SKL_FUSE_STATUS);
- switch (power_well->data) {
+ switch (power_well->id) {
case SKL_DISP_PW_1:
if (intel_wait_for_register(dev_priv,
SKL_FUSE_STATUS,
@@ -728,13 +727,13 @@ static void skl_set_power_well(struct drm_i915_private *dev_priv,
case SKL_DISP_PW_MISC_IO:
break;
default:
- WARN(1, "Unknown power well %lu\n", power_well->data);
+ WARN(1, "Unknown power well %lu\n", power_well->id);
return;
}
- req_mask = SKL_POWER_WELL_REQ(power_well->data);
+ req_mask = SKL_POWER_WELL_REQ(power_well->id);
enable_requested = tmp & req_mask;
- state_mask = SKL_POWER_WELL_STATE(power_well->data);
+ state_mask = SKL_POWER_WELL_STATE(power_well->id);
is_enabled = tmp & state_mask;
if (!enable && enable_requested)
@@ -770,14 +769,14 @@ static void skl_set_power_well(struct drm_i915_private *dev_priv,
power_well->name, enable ? "enable" : "disable");
if (check_fuse_status) {
- if (power_well->data == SKL_DISP_PW_1) {
+ if (power_well->id == SKL_DISP_PW_1) {
if (intel_wait_for_register(dev_priv,
SKL_FUSE_STATUS,
SKL_FUSE_PG1_DIST_STATUS,
SKL_FUSE_PG1_DIST_STATUS,
1))
DRM_ERROR("PG1 distributing status timeout\n");
- } else if (power_well->data == SKL_DISP_PW_2) {
+ } else if (power_well->id == SKL_DISP_PW_2) {
if (intel_wait_for_register(dev_priv,
SKL_FUSE_STATUS,
SKL_FUSE_PG2_DIST_STATUS,
@@ -819,8 +818,8 @@ static void hsw_power_well_disable(struct drm_i915_private *dev_priv,
static bool skl_power_well_enabled(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well)
{
- uint32_t mask = SKL_POWER_WELL_REQ(power_well->data) |
- SKL_POWER_WELL_STATE(power_well->data);
+ uint32_t mask = SKL_POWER_WELL_REQ(power_well->id) |
+ SKL_POWER_WELL_STATE(power_well->id);
return (I915_READ(HSW_PWR_WELL_DRIVER) & mask) == mask;
}
@@ -846,45 +845,22 @@ static void skl_power_well_disable(struct drm_i915_private *dev_priv,
skl_set_power_well(dev_priv, power_well, false);
}
-static enum dpio_phy bxt_power_well_to_phy(struct i915_power_well *power_well)
-{
- enum skl_disp_power_wells power_well_id = power_well->data;
-
- return power_well_id == BXT_DPIO_CMN_A ? DPIO_PHY1 : DPIO_PHY0;
-}
-
static void bxt_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well)
{
- enum skl_disp_power_wells power_well_id = power_well->data;
- struct i915_power_well *cmn_a_well = NULL;
-
- if (power_well_id == BXT_DPIO_CMN_BC) {
- /*
- * We need to copy the GRC calibration value from the eDP PHY,
- * so make sure it's powered up.
- */
- cmn_a_well = lookup_power_well(dev_priv, BXT_DPIO_CMN_A);
- intel_power_well_get(dev_priv, cmn_a_well);
- }
-
- bxt_ddi_phy_init(dev_priv, bxt_power_well_to_phy(power_well));
-
- if (cmn_a_well)
- intel_power_well_put(dev_priv, cmn_a_well);
+ bxt_ddi_phy_init(dev_priv, power_well->data);
}
static void bxt_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well)
{
- bxt_ddi_phy_uninit(dev_priv, bxt_power_well_to_phy(power_well));
+ bxt_ddi_phy_uninit(dev_priv, power_well->data);
}
static bool bxt_dpio_cmn_power_well_enabled(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well)
{
- return bxt_ddi_phy_is_enabled(dev_priv,
- bxt_power_well_to_phy(power_well));
+ return bxt_ddi_phy_is_enabled(dev_priv, power_well->data);
}
static void bxt_dpio_cmn_power_well_sync_hw(struct drm_i915_private *dev_priv,
@@ -903,13 +879,11 @@ static void bxt_verify_ddi_phy_power_wells(struct drm_i915_private *dev_priv)
power_well = lookup_power_well(dev_priv, BXT_DPIO_CMN_A);
if (power_well->count > 0)
- bxt_ddi_phy_verify_state(dev_priv,
- bxt_power_well_to_phy(power_well));
+ bxt_ddi_phy_verify_state(dev_priv, power_well->data);
power_well = lookup_power_well(dev_priv, BXT_DPIO_CMN_BC);
if (power_well->count > 0)
- bxt_ddi_phy_verify_state(dev_priv,
- bxt_power_well_to_phy(power_well));
+ bxt_ddi_phy_verify_state(dev_priv, power_well->data);
}
static bool gen9_dc_off_power_well_enabled(struct drm_i915_private *dev_priv,
@@ -933,7 +907,7 @@ static void gen9_dc_off_power_well_enable(struct drm_i915_private *dev_priv,
gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
WARN_ON(dev_priv->cdclk_freq !=
- dev_priv->display.get_display_clock_speed(&dev_priv->drm));
+ dev_priv->display.get_display_clock_speed(dev_priv));
gen9_assert_dbuf_enabled(dev_priv);
@@ -976,7 +950,7 @@ static bool i9xx_always_on_power_well_enabled(struct drm_i915_private *dev_priv,
static void vlv_set_power_well(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well, bool enable)
{
- enum punit_power_well power_well_id = power_well->data;
+ enum punit_power_well power_well_id = power_well->id;
u32 mask;
u32 state;
u32 ctrl;
@@ -1030,7 +1004,7 @@ static void vlv_power_well_disable(struct drm_i915_private *dev_priv,
static bool vlv_power_well_enabled(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well)
{
- int power_well_id = power_well->data;
+ int power_well_id = power_well->id;
bool enabled = false;
u32 mask;
u32 state;
@@ -1092,7 +1066,7 @@ static void vlv_display_power_well_init(struct drm_i915_private *dev_priv)
*
* CHV DPLL B/C have some issues if VGA mode is enabled.
*/
- for_each_pipe(&dev_priv->drm, pipe) {
+ for_each_pipe(dev_priv, pipe) {
u32 val = I915_READ(DPLL(pipe));
val |= DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
@@ -1123,7 +1097,7 @@ static void vlv_display_power_well_init(struct drm_i915_private *dev_priv)
intel_crt_reset(&encoder->base);
}
- i915_redisable_vga_power_on(&dev_priv->drm);
+ i915_redisable_vga_power_on(dev_priv);
intel_pps_unlock_regs_wa(dev_priv);
}
@@ -1139,13 +1113,15 @@ static void vlv_display_power_well_deinit(struct drm_i915_private *dev_priv)
intel_power_sequencer_reset(dev_priv);
- intel_hpd_poll_init(dev_priv);
+ /* Prevent us from re-enabling polling on accident in late suspend */
+ if (!dev_priv->drm.dev->power.is_suspended)
+ intel_hpd_poll_init(dev_priv);
}
static void vlv_display_power_well_enable(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well)
{
- WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DISP2D);
+ WARN_ON_ONCE(power_well->id != PUNIT_POWER_WELL_DISP2D);
vlv_set_power_well(dev_priv, power_well, true);
@@ -1155,7 +1131,7 @@ static void vlv_display_power_well_enable(struct drm_i915_private *dev_priv,
static void vlv_display_power_well_disable(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well)
{
- WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DISP2D);
+ WARN_ON_ONCE(power_well->id != PUNIT_POWER_WELL_DISP2D);
vlv_display_power_well_deinit(dev_priv);
@@ -1165,7 +1141,7 @@ static void vlv_display_power_well_disable(struct drm_i915_private *dev_priv,
static void vlv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well)
{
- WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DPIO_CMN_BC);
+ WARN_ON_ONCE(power_well->id != PUNIT_POWER_WELL_DPIO_CMN_BC);
/* since ref/cri clock was enabled */
udelay(1); /* >10ns for cmnreset, >0ns for sidereset */
@@ -1191,7 +1167,7 @@ static void vlv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
{
enum pipe pipe;
- WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DPIO_CMN_BC);
+ WARN_ON_ONCE(power_well->id != PUNIT_POWER_WELL_DPIO_CMN_BC);
for_each_pipe(dev_priv, pipe)
assert_pll_disabled(dev_priv, pipe);
@@ -1214,7 +1190,7 @@ static struct i915_power_well *lookup_power_well(struct drm_i915_private *dev_pr
struct i915_power_well *power_well;
power_well = &power_domains->power_wells[i];
- if (power_well->data == power_well_id)
+ if (power_well->id == power_well_id)
return power_well;
}
@@ -1338,10 +1314,10 @@ static void chv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
enum pipe pipe;
uint32_t tmp;
- WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DPIO_CMN_BC &&
- power_well->data != PUNIT_POWER_WELL_DPIO_CMN_D);
+ WARN_ON_ONCE(power_well->id != PUNIT_POWER_WELL_DPIO_CMN_BC &&
+ power_well->id != PUNIT_POWER_WELL_DPIO_CMN_D);
- if (power_well->data == PUNIT_POWER_WELL_DPIO_CMN_BC) {
+ if (power_well->id == PUNIT_POWER_WELL_DPIO_CMN_BC) {
pipe = PIPE_A;
phy = DPIO_PHY0;
} else {
@@ -1369,7 +1345,7 @@ static void chv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
DPIO_SUS_CLK_CONFIG_GATE_CLKREQ;
vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW28, tmp);
- if (power_well->data == PUNIT_POWER_WELL_DPIO_CMN_BC) {
+ if (power_well->id == PUNIT_POWER_WELL_DPIO_CMN_BC) {
tmp = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW6_CH1);
tmp |= DPIO_DYNPWRDOWNEN_CH1;
vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW6_CH1, tmp);
@@ -1400,10 +1376,10 @@ static void chv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
{
enum dpio_phy phy;
- WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DPIO_CMN_BC &&
- power_well->data != PUNIT_POWER_WELL_DPIO_CMN_D);
+ WARN_ON_ONCE(power_well->id != PUNIT_POWER_WELL_DPIO_CMN_BC &&
+ power_well->id != PUNIT_POWER_WELL_DPIO_CMN_D);
- if (power_well->data == PUNIT_POWER_WELL_DPIO_CMN_BC) {
+ if (power_well->id == PUNIT_POWER_WELL_DPIO_CMN_BC) {
phy = DPIO_PHY0;
assert_pll_disabled(dev_priv, PIPE_A);
assert_pll_disabled(dev_priv, PIPE_B);
@@ -1552,7 +1528,7 @@ void chv_phy_powergate_lanes(struct intel_encoder *encoder,
static bool chv_pipe_power_well_enabled(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well)
{
- enum pipe pipe = power_well->data;
+ enum pipe pipe = power_well->id;
bool enabled;
u32 state, ctrl;
@@ -1582,7 +1558,7 @@ static void chv_set_pipe_power_well(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well,
bool enable)
{
- enum pipe pipe = power_well->data;
+ enum pipe pipe = power_well->id;
u32 state;
u32 ctrl;
@@ -1615,7 +1591,7 @@ out:
static void chv_pipe_power_well_sync_hw(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well)
{
- WARN_ON_ONCE(power_well->data != PIPE_A);
+ WARN_ON_ONCE(power_well->id != PIPE_A);
chv_set_pipe_power_well(dev_priv, power_well, power_well->count > 0);
}
@@ -1623,7 +1599,7 @@ static void chv_pipe_power_well_sync_hw(struct drm_i915_private *dev_priv,
static void chv_pipe_power_well_enable(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well)
{
- WARN_ON_ONCE(power_well->data != PIPE_A);
+ WARN_ON_ONCE(power_well->id != PIPE_A);
chv_set_pipe_power_well(dev_priv, power_well, true);
@@ -1633,7 +1609,7 @@ static void chv_pipe_power_well_enable(struct drm_i915_private *dev_priv,
static void chv_pipe_power_well_disable(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well)
{
- WARN_ON_ONCE(power_well->data != PIPE_A);
+ WARN_ON_ONCE(power_well->id != PIPE_A);
vlv_display_power_well_deinit(dev_priv);
@@ -1977,12 +1953,12 @@ static struct i915_power_well vlv_power_wells[] = {
.always_on = 1,
.domains = POWER_DOMAIN_MASK,
.ops = &i9xx_always_on_power_well_ops,
- .data = PUNIT_POWER_WELL_ALWAYS_ON,
+ .id = PUNIT_POWER_WELL_ALWAYS_ON,
},
{
.name = "display",
.domains = VLV_DISPLAY_POWER_DOMAINS,
- .data = PUNIT_POWER_WELL_DISP2D,
+ .id = PUNIT_POWER_WELL_DISP2D,
.ops = &vlv_display_power_well_ops,
},
{
@@ -1992,7 +1968,7 @@ static struct i915_power_well vlv_power_wells[] = {
VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
.ops = &vlv_dpio_power_well_ops,
- .data = PUNIT_POWER_WELL_DPIO_TX_B_LANES_01,
+ .id = PUNIT_POWER_WELL_DPIO_TX_B_LANES_01,
},
{
.name = "dpio-tx-b-23",
@@ -2001,7 +1977,7 @@ static struct i915_power_well vlv_power_wells[] = {
VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
.ops = &vlv_dpio_power_well_ops,
- .data = PUNIT_POWER_WELL_DPIO_TX_B_LANES_23,
+ .id = PUNIT_POWER_WELL_DPIO_TX_B_LANES_23,
},
{
.name = "dpio-tx-c-01",
@@ -2010,7 +1986,7 @@ static struct i915_power_well vlv_power_wells[] = {
VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
.ops = &vlv_dpio_power_well_ops,
- .data = PUNIT_POWER_WELL_DPIO_TX_C_LANES_01,
+ .id = PUNIT_POWER_WELL_DPIO_TX_C_LANES_01,
},
{
.name = "dpio-tx-c-23",
@@ -2019,12 +1995,12 @@ static struct i915_power_well vlv_power_wells[] = {
VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
.ops = &vlv_dpio_power_well_ops,
- .data = PUNIT_POWER_WELL_DPIO_TX_C_LANES_23,
+ .id = PUNIT_POWER_WELL_DPIO_TX_C_LANES_23,
},
{
.name = "dpio-common",
.domains = VLV_DPIO_CMN_BC_POWER_DOMAINS,
- .data = PUNIT_POWER_WELL_DPIO_CMN_BC,
+ .id = PUNIT_POWER_WELL_DPIO_CMN_BC,
.ops = &vlv_dpio_cmn_power_well_ops,
},
};
@@ -2044,19 +2020,19 @@ static struct i915_power_well chv_power_wells[] = {
* required for any pipe to work.
*/
.domains = CHV_DISPLAY_POWER_DOMAINS,
- .data = PIPE_A,
+ .id = PIPE_A,
.ops = &chv_pipe_power_well_ops,
},
{
.name = "dpio-common-bc",
.domains = CHV_DPIO_CMN_BC_POWER_DOMAINS,
- .data = PUNIT_POWER_WELL_DPIO_CMN_BC,
+ .id = PUNIT_POWER_WELL_DPIO_CMN_BC,
.ops = &chv_dpio_cmn_power_well_ops,
},
{
.name = "dpio-common-d",
.domains = CHV_DPIO_CMN_D_POWER_DOMAINS,
- .data = PUNIT_POWER_WELL_DPIO_CMN_D,
+ .id = PUNIT_POWER_WELL_DPIO_CMN_D,
.ops = &chv_dpio_cmn_power_well_ops,
},
};
@@ -2079,57 +2055,57 @@ static struct i915_power_well skl_power_wells[] = {
.always_on = 1,
.domains = POWER_DOMAIN_MASK,
.ops = &i9xx_always_on_power_well_ops,
- .data = SKL_DISP_PW_ALWAYS_ON,
+ .id = SKL_DISP_PW_ALWAYS_ON,
},
{
.name = "power well 1",
/* Handled by the DMC firmware */
.domains = 0,
.ops = &skl_power_well_ops,
- .data = SKL_DISP_PW_1,
+ .id = SKL_DISP_PW_1,
},
{
.name = "MISC IO power well",
/* Handled by the DMC firmware */
.domains = 0,
.ops = &skl_power_well_ops,
- .data = SKL_DISP_PW_MISC_IO,
+ .id = SKL_DISP_PW_MISC_IO,
},
{
.name = "DC off",
.domains = SKL_DISPLAY_DC_OFF_POWER_DOMAINS,
.ops = &gen9_dc_off_power_well_ops,
- .data = SKL_DISP_PW_DC_OFF,
+ .id = SKL_DISP_PW_DC_OFF,
},
{
.name = "power well 2",
.domains = SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS,
.ops = &skl_power_well_ops,
- .data = SKL_DISP_PW_2,
+ .id = SKL_DISP_PW_2,
},
{
.name = "DDI A/E power well",
.domains = SKL_DISPLAY_DDI_A_E_POWER_DOMAINS,
.ops = &skl_power_well_ops,
- .data = SKL_DISP_PW_DDI_A_E,
+ .id = SKL_DISP_PW_DDI_A_E,
},
{
.name = "DDI B power well",
.domains = SKL_DISPLAY_DDI_B_POWER_DOMAINS,
.ops = &skl_power_well_ops,
- .data = SKL_DISP_PW_DDI_B,
+ .id = SKL_DISP_PW_DDI_B,
},
{
.name = "DDI C power well",
.domains = SKL_DISPLAY_DDI_C_POWER_DOMAINS,
.ops = &skl_power_well_ops,
- .data = SKL_DISP_PW_DDI_C,
+ .id = SKL_DISP_PW_DDI_C,
},
{
.name = "DDI D power well",
.domains = SKL_DISPLAY_DDI_D_POWER_DOMAINS,
.ops = &skl_power_well_ops,
- .data = SKL_DISP_PW_DDI_D,
+ .id = SKL_DISP_PW_DDI_D,
},
};
@@ -2144,31 +2120,33 @@ static struct i915_power_well bxt_power_wells[] = {
.name = "power well 1",
.domains = 0,
.ops = &skl_power_well_ops,
- .data = SKL_DISP_PW_1,
+ .id = SKL_DISP_PW_1,
},
{
.name = "DC off",
.domains = BXT_DISPLAY_DC_OFF_POWER_DOMAINS,
.ops = &gen9_dc_off_power_well_ops,
- .data = SKL_DISP_PW_DC_OFF,
+ .id = SKL_DISP_PW_DC_OFF,
},
{
.name = "power well 2",
.domains = BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS,
.ops = &skl_power_well_ops,
- .data = SKL_DISP_PW_2,
+ .id = SKL_DISP_PW_2,
},
{
.name = "dpio-common-a",
.domains = BXT_DPIO_CMN_A_POWER_DOMAINS,
.ops = &bxt_dpio_cmn_power_well_ops,
- .data = BXT_DPIO_CMN_A,
+ .id = BXT_DPIO_CMN_A,
+ .data = DPIO_PHY1,
},
{
.name = "dpio-common-bc",
.domains = BXT_DPIO_CMN_BC_POWER_DOMAINS,
.ops = &bxt_dpio_cmn_power_well_ops,
- .data = BXT_DPIO_CMN_BC,
+ .id = BXT_DPIO_CMN_BC,
+ .data = DPIO_PHY0,
},
};
@@ -2590,20 +2568,19 @@ static void vlv_cmnlane_wa(struct drm_i915_private *dev_priv)
*/
void intel_power_domains_init_hw(struct drm_i915_private *dev_priv, bool resume)
{
- struct drm_device *dev = &dev_priv->drm;
struct i915_power_domains *power_domains = &dev_priv->power_domains;
power_domains->initializing = true;
- if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) {
+ if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
skl_display_core_init(dev_priv, resume);
- } else if (IS_BROXTON(dev)) {
+ } else if (IS_BROXTON(dev_priv)) {
bxt_display_core_init(dev_priv, resume);
- } else if (IS_CHERRYVIEW(dev)) {
+ } else if (IS_CHERRYVIEW(dev_priv)) {
mutex_lock(&power_domains->lock);
chv_phy_control_init(dev_priv);
mutex_unlock(&power_domains->lock);
- } else if (IS_VALLEYVIEW(dev)) {
+ } else if (IS_VALLEYVIEW(dev_priv)) {
mutex_lock(&power_domains->lock);
vlv_cmnlane_wa(dev_priv);
mutex_unlock(&power_domains->lock);
@@ -2738,8 +2715,7 @@ void intel_runtime_pm_put(struct drm_i915_private *dev_priv)
struct device *kdev = &pdev->dev;
assert_rpm_wakelock_held(dev_priv);
- if (atomic_dec_and_test(&dev_priv->pm.wakeref_count))
- atomic_inc(&dev_priv->pm.atomic_seq);
+ atomic_dec(&dev_priv->pm.wakeref_count);
pm_runtime_mark_last_busy(kdev);
pm_runtime_put_autosuspend(kdev);
@@ -2758,7 +2734,6 @@ void intel_runtime_pm_put(struct drm_i915_private *dev_priv)
void intel_runtime_pm_enable(struct drm_i915_private *dev_priv)
{
struct pci_dev *pdev = dev_priv->drm.pdev;
- struct drm_device *dev = &dev_priv->drm;
struct device *kdev = &pdev->dev;
pm_runtime_set_autosuspend_delay(kdev, 10000); /* 10s */
@@ -2770,7 +2745,7 @@ void intel_runtime_pm_enable(struct drm_i915_private *dev_priv)
* so the driver's own RPM reference tracking asserts also work on
* platforms without RPM support.
*/
- if (!HAS_RUNTIME_PM(dev)) {
+ if (!HAS_RUNTIME_PM(dev_priv)) {
pm_runtime_dont_use_autosuspend(kdev);
pm_runtime_get_sync(kdev);
} else {
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
index c551024d4871..27808e91cb5a 100644
--- a/drivers/gpu/drm/i915/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/intel_sdvo.c
@@ -251,7 +251,7 @@ static void intel_sdvo_write_sdvox(struct intel_sdvo *intel_sdvo, u32 val)
* HW workaround, need to write this twice for issue
* that may result in first write getting masked.
*/
- if (HAS_PCH_IBX(dev)) {
+ if (HAS_PCH_IBX(dev_priv)) {
I915_WRITE(intel_sdvo->sdvo_reg, val);
POSTING_READ(intel_sdvo->sdvo_reg);
}
@@ -307,7 +307,7 @@ static bool intel_sdvo_read_byte(struct intel_sdvo *intel_sdvo, u8 addr, u8 *ch)
static const struct _sdvo_cmd_name {
u8 cmd;
const char *name;
-} sdvo_cmd_names[] = {
+} __attribute__ ((packed)) sdvo_cmd_names[] = {
SDVO_CMD_NAME_ENTRY(SDVO_CMD_RESET),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_DEVICE_CAPS),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_FIRMWARE_REV),
@@ -1133,7 +1133,7 @@ static bool intel_sdvo_compute_config(struct intel_encoder *encoder,
DRM_DEBUG_KMS("forcing bpc to 8 for SDVO\n");
pipe_config->pipe_bpp = 8*3;
- if (HAS_PCH_SPLIT(encoder->base.dev))
+ if (HAS_PCH_SPLIT(to_i915(encoder->base.dev)))
pipe_config->has_pch_encoder = true;
/* We need to construct preferred input timings based on our
@@ -1195,8 +1195,7 @@ static void intel_sdvo_pre_enable(struct intel_encoder *intel_encoder,
struct intel_crtc_state *crtc_state,
struct drm_connector_state *conn_state)
{
- struct drm_device *dev = intel_encoder->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(intel_encoder->base.dev);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
const struct drm_display_mode *adjusted_mode = &crtc_state->base.adjusted_mode;
struct drm_display_mode *mode = &crtc_state->base.mode;
@@ -1269,13 +1268,13 @@ static void intel_sdvo_pre_enable(struct intel_encoder *intel_encoder,
return;
/* Set the SDVO control regs. */
- if (INTEL_INFO(dev)->gen >= 4) {
+ if (INTEL_GEN(dev_priv) >= 4) {
/* The real mode polarity is set by the SDVO commands, using
* struct intel_sdvo_dtd. */
sdvox = SDVO_VSYNC_ACTIVE_HIGH | SDVO_HSYNC_ACTIVE_HIGH;
- if (!HAS_PCH_SPLIT(dev) && crtc_state->limited_color_range)
+ if (!HAS_PCH_SPLIT(dev_priv) && crtc_state->limited_color_range)
sdvox |= HDMI_COLOR_RANGE_16_235;
- if (INTEL_INFO(dev)->gen < 5)
+ if (INTEL_GEN(dev_priv) < 5)
sdvox |= SDVO_BORDER_ENABLE;
} else {
sdvox = I915_READ(intel_sdvo->sdvo_reg);
@@ -1286,7 +1285,7 @@ static void intel_sdvo_pre_enable(struct intel_encoder *intel_encoder,
sdvox |= (9 << 19) | SDVO_BORDER_ENABLE;
}
- if (INTEL_PCH_TYPE(dev) >= PCH_CPT)
+ if (INTEL_PCH_TYPE(dev_priv) >= PCH_CPT)
sdvox |= SDVO_PIPE_SEL_CPT(crtc->pipe);
else
sdvox |= SDVO_PIPE_SEL(crtc->pipe);
@@ -1294,9 +1293,10 @@ static void intel_sdvo_pre_enable(struct intel_encoder *intel_encoder,
if (intel_sdvo->has_hdmi_audio)
sdvox |= SDVO_AUDIO_ENABLE;
- if (INTEL_INFO(dev)->gen >= 4) {
+ if (INTEL_GEN(dev_priv) >= 4) {
/* done in crtc_mode_set as the dpll_md reg must be written early */
- } else if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) {
+ } else if (IS_I945G(dev_priv) || IS_I945GM(dev_priv) ||
+ IS_G33(dev_priv)) {
/* done in crtc_mode_set as it lives inside the dpll register */
} else {
sdvox |= (crtc_state->pixel_multiplier - 1)
@@ -1304,7 +1304,7 @@ static void intel_sdvo_pre_enable(struct intel_encoder *intel_encoder,
}
if (input_dtd.part2.sdvo_flags & SDVO_NEED_TO_STALL &&
- INTEL_INFO(dev)->gen < 5)
+ INTEL_GEN(dev_priv) < 5)
sdvox |= SDVO_STALL_SELECT;
intel_sdvo_write_sdvox(intel_sdvo, sdvox);
}
@@ -1339,7 +1339,7 @@ static bool intel_sdvo_get_hw_state(struct intel_encoder *encoder,
if (!(tmp & SDVO_ENABLE) && (active_outputs == 0))
return false;
- if (HAS_PCH_CPT(dev))
+ if (HAS_PCH_CPT(dev_priv))
*pipe = PORT_TO_PIPE_CPT(tmp);
else
*pipe = PORT_TO_PIPE(tmp);
@@ -1389,7 +1389,7 @@ static void intel_sdvo_get_config(struct intel_encoder *encoder,
* encoder->get_config we so already have a valid pixel multplier on all
* other platfroms.
*/
- if (IS_I915G(dev) || IS_I915GM(dev)) {
+ if (IS_I915G(dev_priv) || IS_I915GM(dev_priv)) {
pipe_config->pixel_multiplier =
((sdvox & SDVO_PORT_MULTIPLY_MASK)
>> SDVO_PORT_MULTIPLY_SHIFT) + 1;
@@ -1471,7 +1471,7 @@ static void intel_disable_sdvo(struct intel_encoder *encoder,
temp &= ~SDVO_ENABLE;
intel_sdvo_write_sdvox(intel_sdvo, temp);
- intel_wait_for_vblank_if_active(&dev_priv->drm, PIPE_A);
+ intel_wait_for_vblank_if_active(dev_priv, PIPE_A);
intel_set_cpu_fifo_underrun_reporting(dev_priv, PIPE_A, true);
intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, true);
}
@@ -1508,7 +1508,7 @@ static void intel_enable_sdvo(struct intel_encoder *encoder,
intel_sdvo_write_sdvox(intel_sdvo, temp);
for (i = 0; i < 2; i++)
- intel_wait_for_vblank(dev, intel_crtc->pipe);
+ intel_wait_for_vblank(dev_priv, intel_crtc->pipe);
success = intel_sdvo_get_trained_inputs(intel_sdvo, &input1, &input2);
/* Warn if the device reported failure to sync.
@@ -1595,15 +1595,15 @@ static bool intel_sdvo_get_capabilities(struct intel_sdvo *intel_sdvo, struct in
static uint16_t intel_sdvo_get_hotplug_support(struct intel_sdvo *intel_sdvo)
{
- struct drm_device *dev = intel_sdvo->base.base.dev;
+ struct drm_i915_private *dev_priv = to_i915(intel_sdvo->base.base.dev);
uint16_t hotplug;
- if (!I915_HAS_HOTPLUG(dev))
+ if (!I915_HAS_HOTPLUG(dev_priv))
return 0;
/* HW Erratum: SDVO Hotplug is broken on all i945G chips, there's noise
* on the line. */
- if (IS_I945G(dev) || IS_I945GM(dev))
+ if (IS_I945G(dev_priv) || IS_I945GM(dev_priv))
return 0;
if (!intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_HOT_PLUG_SUPPORT,
@@ -2410,10 +2410,10 @@ static void
intel_sdvo_add_hdmi_properties(struct intel_sdvo *intel_sdvo,
struct intel_sdvo_connector *connector)
{
- struct drm_device *dev = connector->base.base.dev;
+ struct drm_i915_private *dev_priv = to_i915(connector->base.base.dev);
intel_attach_force_audio_property(&connector->base.base);
- if (INTEL_INFO(dev)->gen >= 4 && IS_MOBILE(dev)) {
+ if (INTEL_GEN(dev_priv) >= 4 && IS_MOBILE(dev_priv)) {
intel_attach_broadcast_rgb_property(&connector->base.base);
intel_sdvo->color_range_auto = true;
}
@@ -2981,6 +2981,7 @@ bool intel_sdvo_init(struct drm_device *dev,
/* encoder type will be decided later */
intel_encoder = &intel_sdvo->base;
intel_encoder->type = INTEL_OUTPUT_SDVO;
+ intel_encoder->port = port;
drm_encoder_init(dev, &intel_encoder->base, &intel_sdvo_enc_funcs, 0,
"SDVO %c", port_name(port));
@@ -2996,7 +2997,7 @@ bool intel_sdvo_init(struct drm_device *dev,
}
intel_encoder->compute_config = intel_sdvo_compute_config;
- if (HAS_PCH_SPLIT(dev)) {
+ if (HAS_PCH_SPLIT(dev_priv)) {
intel_encoder->disable = pch_disable_sdvo;
intel_encoder->post_disable = pch_post_disable_sdvo;
} else {
diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c
index 73a521fdf1bd..8f131a08d440 100644
--- a/drivers/gpu/drm/i915/intel_sprite.c
+++ b/drivers/gpu/drm/i915/intel_sprite.c
@@ -203,9 +203,6 @@ skl_update_plane(struct drm_plane *drm_plane,
struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_plane *intel_plane = to_intel_plane(drm_plane);
struct drm_framebuffer *fb = plane_state->base.fb;
- const struct skl_wm_values *wm = &dev_priv->wm.skl_results;
- struct drm_crtc *crtc = crtc_state->base.crtc;
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
const int pipe = intel_plane->pipe;
const int plane = intel_plane->plane + 1;
u32 plane_ctl;
@@ -227,13 +224,10 @@ skl_update_plane(struct drm_plane *drm_plane,
PLANE_CTL_PIPE_CSC_ENABLE;
plane_ctl |= skl_plane_ctl_format(fb->pixel_format);
- plane_ctl |= skl_plane_ctl_tiling(fb->modifier[0]);
+ plane_ctl |= skl_plane_ctl_tiling(fb->modifier);
plane_ctl |= skl_plane_ctl_rotation(rotation);
- if (wm->dirty_pipes & drm_crtc_mask(crtc))
- skl_write_plane_wm(intel_crtc, wm, plane);
-
if (key->flags) {
I915_WRITE(PLANE_KEYVAL(pipe, plane), key->min_value);
I915_WRITE(PLANE_KEYMAX(pipe, plane), key->max_value);
@@ -292,14 +286,6 @@ skl_disable_plane(struct drm_plane *dplane, struct drm_crtc *crtc)
const int pipe = intel_plane->pipe;
const int plane = intel_plane->plane + 1;
- /*
- * We only populate skl_results on watermark updates, and if the
- * plane's visiblity isn't actually changing neither is its watermarks.
- */
- if (!dplane->state->visible)
- skl_write_plane_wm(to_intel_crtc(crtc),
- &dev_priv->wm.skl_results, plane);
-
I915_WRITE(PLANE_CTL(pipe, plane), 0);
I915_WRITE(PLANE_SURF(pipe, plane), 0);
@@ -358,7 +344,7 @@ vlv_update_plane(struct drm_plane *dplane,
int plane = intel_plane->plane;
u32 sprctl;
u32 sprsurf_offset, linear_offset;
- unsigned int rotation = dplane->state->rotation;
+ unsigned int rotation = plane_state->base.rotation;
const struct drm_intel_sprite_colorkey *key = &plane_state->ckey;
int crtc_x = plane_state->base.dst.x1;
int crtc_y = plane_state->base.dst.y1;
@@ -420,9 +406,15 @@ vlv_update_plane(struct drm_plane *dplane,
*/
sprctl |= SP_GAMMA_ENABLE;
- if (fb->modifier[0] == I915_FORMAT_MOD_X_TILED)
+ if (fb->modifier == I915_FORMAT_MOD_X_TILED)
sprctl |= SP_TILED;
+ if (rotation & DRM_ROTATE_180)
+ sprctl |= SP_ROTATE_180;
+
+ if (rotation & DRM_REFLECT_X)
+ sprctl |= SP_MIRROR;
+
/* Sizes are 0 based */
src_w--;
src_h--;
@@ -432,11 +424,11 @@ vlv_update_plane(struct drm_plane *dplane,
intel_add_fb_offsets(&x, &y, plane_state, 0);
sprsurf_offset = intel_compute_tile_offset(&x, &y, plane_state, 0);
- if (rotation == DRM_ROTATE_180) {
- sprctl |= SP_ROTATE_180;
-
+ if (rotation & DRM_ROTATE_180) {
x += src_w;
y += src_h;
+ } else if (rotation & DRM_REFLECT_X) {
+ x += src_w;
}
linear_offset = intel_fb_xy_to_linear(x, y, plane_state, 0);
@@ -450,13 +442,13 @@ vlv_update_plane(struct drm_plane *dplane,
if (key->flags & I915_SET_COLORKEY_SOURCE)
sprctl |= SP_SOURCE_KEY;
- if (IS_CHERRYVIEW(dev) && pipe == PIPE_B)
+ if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B)
chv_update_csc(intel_plane, fb->pixel_format);
I915_WRITE(SPSTRIDE(pipe, plane), fb->pitches[0]);
I915_WRITE(SPPOS(pipe, plane), (crtc_y << 16) | crtc_x);
- if (fb->modifier[0] == I915_FORMAT_MOD_X_TILED)
+ if (fb->modifier == I915_FORMAT_MOD_X_TILED)
I915_WRITE(SPTILEOFF(pipe, plane), (y << 16) | x);
else
I915_WRITE(SPLINOFF(pipe, plane), linear_offset);
@@ -539,15 +531,18 @@ ivb_update_plane(struct drm_plane *plane,
*/
sprctl |= SPRITE_GAMMA_ENABLE;
- if (fb->modifier[0] == I915_FORMAT_MOD_X_TILED)
+ if (fb->modifier == I915_FORMAT_MOD_X_TILED)
sprctl |= SPRITE_TILED;
- if (IS_HASWELL(dev) || IS_BROADWELL(dev))
+ if (rotation & DRM_ROTATE_180)
+ sprctl |= SPRITE_ROTATE_180;
+
+ if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
sprctl &= ~SPRITE_TRICKLE_FEED_DISABLE;
else
sprctl |= SPRITE_TRICKLE_FEED_DISABLE;
- if (IS_HASWELL(dev) || IS_BROADWELL(dev))
+ if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
sprctl |= SPRITE_PIPE_CSC_ENABLE;
/* Sizes are 0 based */
@@ -562,14 +557,11 @@ ivb_update_plane(struct drm_plane *plane,
intel_add_fb_offsets(&x, &y, plane_state, 0);
sprsurf_offset = intel_compute_tile_offset(&x, &y, plane_state, 0);
- if (rotation == DRM_ROTATE_180) {
- sprctl |= SPRITE_ROTATE_180;
-
- /* HSW and BDW does this automagically in hardware */
- if (!IS_HASWELL(dev) && !IS_BROADWELL(dev)) {
- x += src_w;
- y += src_h;
- }
+ /* HSW+ does this automagically in hardware */
+ if (!IS_HASWELL(dev_priv) && !IS_BROADWELL(dev_priv) &&
+ rotation & DRM_ROTATE_180) {
+ x += src_w;
+ y += src_h;
}
linear_offset = intel_fb_xy_to_linear(x, y, plane_state, 0);
@@ -590,9 +582,9 @@ ivb_update_plane(struct drm_plane *plane,
/* HSW consolidates SPRTILEOFF and SPRLINOFF into a single SPROFFSET
* register */
- if (IS_HASWELL(dev) || IS_BROADWELL(dev))
+ if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
I915_WRITE(SPROFFSET(pipe), (y << 16) | x);
- else if (fb->modifier[0] == I915_FORMAT_MOD_X_TILED)
+ else if (fb->modifier == I915_FORMAT_MOD_X_TILED)
I915_WRITE(SPRTILEOFF(pipe), (y << 16) | x);
else
I915_WRITE(SPRLINOFF(pipe), linear_offset);
@@ -677,10 +669,13 @@ ilk_update_plane(struct drm_plane *plane,
*/
dvscntr |= DVS_GAMMA_ENABLE;
- if (fb->modifier[0] == I915_FORMAT_MOD_X_TILED)
+ if (fb->modifier == I915_FORMAT_MOD_X_TILED)
dvscntr |= DVS_TILED;
- if (IS_GEN6(dev))
+ if (rotation & DRM_ROTATE_180)
+ dvscntr |= DVS_ROTATE_180;
+
+ if (IS_GEN6(dev_priv))
dvscntr |= DVS_TRICKLE_FEED_DISABLE; /* must disable */
/* Sizes are 0 based */
@@ -696,9 +691,7 @@ ilk_update_plane(struct drm_plane *plane,
intel_add_fb_offsets(&x, &y, plane_state, 0);
dvssurf_offset = intel_compute_tile_offset(&x, &y, plane_state, 0);
- if (rotation == DRM_ROTATE_180) {
- dvscntr |= DVS_ROTATE_180;
-
+ if (rotation & DRM_ROTATE_180) {
x += src_w;
y += src_h;
}
@@ -719,7 +712,7 @@ ilk_update_plane(struct drm_plane *plane,
I915_WRITE(DVSSTRIDE(pipe), fb->pitches[0]);
I915_WRITE(DVSPOS(pipe), (crtc_y << 16) | crtc_x);
- if (fb->modifier[0] == I915_FORMAT_MOD_X_TILED)
+ if (fb->modifier == I915_FORMAT_MOD_X_TILED)
I915_WRITE(DVSTILEOFF(pipe), (y << 16) | x);
else
I915_WRITE(DVSLINOFF(pipe), linear_offset);
@@ -753,7 +746,7 @@ intel_check_sprite_plane(struct drm_plane *plane,
struct intel_crtc_state *crtc_state,
struct intel_plane_state *state)
{
- struct drm_device *dev = plane->dev;
+ struct drm_i915_private *dev_priv = to_i915(plane->dev);
struct drm_crtc *crtc = state->base.crtc;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct intel_plane *intel_plane = to_intel_plane(plane);
@@ -769,15 +762,8 @@ intel_check_sprite_plane(struct drm_plane *plane,
bool can_scale;
int ret;
- src->x1 = state->base.src_x;
- src->y1 = state->base.src_y;
- src->x2 = state->base.src_x + state->base.src_w;
- src->y2 = state->base.src_y + state->base.src_h;
-
- dst->x1 = state->base.crtc_x;
- dst->y1 = state->base.crtc_y;
- dst->x2 = state->base.crtc_x + state->base.crtc_w;
- dst->y2 = state->base.crtc_y + state->base.crtc_h;
+ *src = drm_plane_state_src(&state->base);
+ *dst = drm_plane_state_dest(&state->base);
if (!fb) {
state->base.visible = false;
@@ -797,7 +783,7 @@ intel_check_sprite_plane(struct drm_plane *plane,
}
/* setup can_scale, min_scale, max_scale */
- if (INTEL_INFO(dev)->gen >= 9) {
+ if (INTEL_GEN(dev_priv) >= 9) {
/* use scaler when colorkey is not required */
if (state->ckey.flags == I915_SET_COLORKEY_NONE) {
can_scale = 1;
@@ -913,7 +899,7 @@ intel_check_sprite_plane(struct drm_plane *plane,
width_bytes = ((src_x * cpp) & 63) + src_w * cpp;
- if (INTEL_INFO(dev)->gen < 9 && (src_w > 2048 || src_h > 2048 ||
+ if (INTEL_GEN(dev_priv) < 9 && (src_w > 2048 || src_h > 2048 ||
width_bytes > 4096 || fb->pitches[0] > 4096)) {
DRM_DEBUG_KMS("Source dimensions exceed hardware limits\n");
return -EINVAL;
@@ -932,7 +918,7 @@ intel_check_sprite_plane(struct drm_plane *plane,
dst->y1 = crtc_y;
dst->y2 = crtc_y + crtc_h;
- if (INTEL_GEN(dev) >= 9) {
+ if (INTEL_GEN(dev_priv) >= 9) {
ret = skl_check_plane_surface(state);
if (ret)
return ret;
@@ -944,6 +930,7 @@ intel_check_sprite_plane(struct drm_plane *plane,
int intel_sprite_set_colorkey(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_intel_sprite_colorkey *set = data;
struct drm_plane *plane;
struct drm_plane_state *plane_state;
@@ -955,7 +942,7 @@ int intel_sprite_set_colorkey(struct drm_device *dev, void *data,
if ((set->flags & (I915_SET_COLORKEY_DESTINATION | I915_SET_COLORKEY_SOURCE)) == (I915_SET_COLORKEY_DESTINATION | I915_SET_COLORKEY_SOURCE))
return -EINVAL;
- if ((IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) &&
+ if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
set->flags & I915_SET_COLORKEY_DESTINATION)
return -EINVAL;
@@ -987,9 +974,7 @@ int intel_sprite_set_colorkey(struct drm_device *dev, void *data,
drm_modeset_backoff(&ctx);
}
- if (ret)
- drm_atomic_state_free(state);
-
+ drm_atomic_state_put(state);
out:
drm_modeset_drop_locks(&ctx);
drm_modeset_acquire_fini(&ctx);
@@ -1039,19 +1024,18 @@ static uint32_t skl_plane_formats[] = {
DRM_FORMAT_VYUY,
};
-int
-intel_plane_init(struct drm_device *dev, enum pipe pipe, int plane)
+struct intel_plane *
+intel_sprite_plane_create(struct drm_i915_private *dev_priv,
+ enum pipe pipe, int plane)
{
struct intel_plane *intel_plane = NULL;
struct intel_plane_state *state = NULL;
unsigned long possible_crtcs;
const uint32_t *plane_formats;
+ unsigned int supported_rotations;
int num_plane_formats;
int ret;
- if (INTEL_INFO(dev)->gen < 5)
- return -ENODEV;
-
intel_plane = kzalloc(sizeof(*intel_plane), GFP_KERNEL);
if (!intel_plane) {
ret = -ENOMEM;
@@ -1065,26 +1049,26 @@ intel_plane_init(struct drm_device *dev, enum pipe pipe, int plane)
}
intel_plane->base.state = &state->base;
- switch (INTEL_INFO(dev)->gen) {
- case 5:
- case 6:
+ if (INTEL_GEN(dev_priv) >= 9) {
intel_plane->can_scale = true;
- intel_plane->max_downscale = 16;
- intel_plane->update_plane = ilk_update_plane;
- intel_plane->disable_plane = ilk_disable_plane;
+ state->scaler_id = -1;
- if (IS_GEN6(dev)) {
- plane_formats = snb_plane_formats;
- num_plane_formats = ARRAY_SIZE(snb_plane_formats);
- } else {
- plane_formats = ilk_plane_formats;
- num_plane_formats = ARRAY_SIZE(ilk_plane_formats);
- }
- break;
+ intel_plane->update_plane = skl_update_plane;
+ intel_plane->disable_plane = skl_disable_plane;
+
+ plane_formats = skl_plane_formats;
+ num_plane_formats = ARRAY_SIZE(skl_plane_formats);
+ } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
+ intel_plane->can_scale = false;
+ intel_plane->max_downscale = 1;
+
+ intel_plane->update_plane = vlv_update_plane;
+ intel_plane->disable_plane = vlv_disable_plane;
- case 7:
- case 8:
- if (IS_IVYBRIDGE(dev)) {
+ plane_formats = vlv_plane_formats;
+ num_plane_formats = ARRAY_SIZE(vlv_plane_formats);
+ } else if (INTEL_GEN(dev_priv) >= 7) {
+ if (IS_IVYBRIDGE(dev_priv)) {
intel_plane->can_scale = true;
intel_plane->max_downscale = 2;
} else {
@@ -1092,33 +1076,38 @@ intel_plane_init(struct drm_device *dev, enum pipe pipe, int plane)
intel_plane->max_downscale = 1;
}
- if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
- intel_plane->update_plane = vlv_update_plane;
- intel_plane->disable_plane = vlv_disable_plane;
+ intel_plane->update_plane = ivb_update_plane;
+ intel_plane->disable_plane = ivb_disable_plane;
- plane_formats = vlv_plane_formats;
- num_plane_formats = ARRAY_SIZE(vlv_plane_formats);
- } else {
- intel_plane->update_plane = ivb_update_plane;
- intel_plane->disable_plane = ivb_disable_plane;
+ plane_formats = snb_plane_formats;
+ num_plane_formats = ARRAY_SIZE(snb_plane_formats);
+ } else {
+ intel_plane->can_scale = true;
+ intel_plane->max_downscale = 16;
+
+ intel_plane->update_plane = ilk_update_plane;
+ intel_plane->disable_plane = ilk_disable_plane;
+ if (IS_GEN6(dev_priv)) {
plane_formats = snb_plane_formats;
num_plane_formats = ARRAY_SIZE(snb_plane_formats);
+ } else {
+ plane_formats = ilk_plane_formats;
+ num_plane_formats = ARRAY_SIZE(ilk_plane_formats);
}
- break;
- case 9:
- intel_plane->can_scale = true;
- intel_plane->update_plane = skl_update_plane;
- intel_plane->disable_plane = skl_disable_plane;
- state->scaler_id = -1;
+ }
- plane_formats = skl_plane_formats;
- num_plane_formats = ARRAY_SIZE(skl_plane_formats);
- break;
- default:
- MISSING_CASE(INTEL_INFO(dev)->gen);
- ret = -ENODEV;
- goto fail;
+ if (INTEL_GEN(dev_priv) >= 9) {
+ supported_rotations =
+ DRM_ROTATE_0 | DRM_ROTATE_90 |
+ DRM_ROTATE_180 | DRM_ROTATE_270;
+ } else if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B) {
+ supported_rotations =
+ DRM_ROTATE_0 | DRM_ROTATE_180 |
+ DRM_REFLECT_X;
+ } else {
+ supported_rotations =
+ DRM_ROTATE_0 | DRM_ROTATE_180;
}
intel_plane->pipe = pipe;
@@ -1128,30 +1117,32 @@ intel_plane_init(struct drm_device *dev, enum pipe pipe, int plane)
possible_crtcs = (1 << pipe);
- if (INTEL_INFO(dev)->gen >= 9)
- ret = drm_universal_plane_init(dev, &intel_plane->base, possible_crtcs,
- &intel_plane_funcs,
+ if (INTEL_GEN(dev_priv) >= 9)
+ ret = drm_universal_plane_init(&dev_priv->drm, &intel_plane->base,
+ possible_crtcs, &intel_plane_funcs,
plane_formats, num_plane_formats,
DRM_PLANE_TYPE_OVERLAY,
"plane %d%c", plane + 2, pipe_name(pipe));
else
- ret = drm_universal_plane_init(dev, &intel_plane->base, possible_crtcs,
- &intel_plane_funcs,
+ ret = drm_universal_plane_init(&dev_priv->drm, &intel_plane->base,
+ possible_crtcs, &intel_plane_funcs,
plane_formats, num_plane_formats,
DRM_PLANE_TYPE_OVERLAY,
"sprite %c", sprite_name(pipe, plane));
if (ret)
goto fail;
- intel_create_rotation_property(dev, intel_plane);
+ drm_plane_create_rotation_property(&intel_plane->base,
+ DRM_ROTATE_0,
+ supported_rotations);
drm_plane_helper_add(&intel_plane->base, &intel_plane_helper_funcs);
- return 0;
+ return intel_plane;
fail:
kfree(state);
kfree(intel_plane);
- return ret;
+ return ERR_PTR(ret);
}
diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c
index d960e4866595..78cdfc6833d6 100644
--- a/drivers/gpu/drm/i915/intel_tv.c
+++ b/drivers/gpu/drm/i915/intel_tv.c
@@ -86,7 +86,8 @@ struct intel_tv {
};
struct video_levels {
- int blank, black, burst;
+ u16 blank, black;
+ u8 burst;
};
struct color_conversion {
@@ -339,34 +340,43 @@ static const struct video_levels component_levels = {
struct tv_mode {
const char *name;
- int clock;
- int refresh; /* in millihertz (for precision) */
+
+ u32 clock;
+ u16 refresh; /* in millihertz (for precision) */
u32 oversample;
- int hsync_end, hblank_start, hblank_end, htotal;
- bool progressive, trilevel_sync, component_only;
- int vsync_start_f1, vsync_start_f2, vsync_len;
- bool veq_ena;
- int veq_start_f1, veq_start_f2, veq_len;
- int vi_end_f1, vi_end_f2, nbr_end;
- bool burst_ena;
- int hburst_start, hburst_len;
- int vburst_start_f1, vburst_end_f1;
- int vburst_start_f2, vburst_end_f2;
- int vburst_start_f3, vburst_end_f3;
- int vburst_start_f4, vburst_end_f4;
+ u8 hsync_end;
+ u16 hblank_start, hblank_end, htotal;
+ bool progressive : 1, trilevel_sync : 1, component_only : 1;
+ u8 vsync_start_f1, vsync_start_f2, vsync_len;
+ bool veq_ena : 1;
+ u8 veq_start_f1, veq_start_f2, veq_len;
+ u8 vi_end_f1, vi_end_f2;
+ u16 nbr_end;
+ bool burst_ena : 1;
+ u8 hburst_start, hburst_len;
+ u8 vburst_start_f1;
+ u16 vburst_end_f1;
+ u8 vburst_start_f2;
+ u16 vburst_end_f2;
+ u8 vburst_start_f3;
+ u16 vburst_end_f3;
+ u8 vburst_start_f4;
+ u16 vburst_end_f4;
/*
* subcarrier programming
*/
- int dda2_size, dda3_size, dda1_inc, dda2_inc, dda3_inc;
+ u16 dda2_size, dda3_size;
+ u8 dda1_inc;
+ u16 dda2_inc, dda3_inc;
u32 sc_reset;
- bool pal_burst;
+ bool pal_burst : 1;
/*
* blank/black levels
*/
const struct video_levels *composite_levels, *svideo_levels;
const struct color_conversion *composite_color, *svideo_color;
const u32 *filter_table;
- int max_srcw;
+ u16 max_srcw;
};
@@ -846,7 +856,7 @@ intel_enable_tv(struct intel_encoder *encoder,
struct drm_i915_private *dev_priv = to_i915(dev);
/* Prevents vblank waits from timing out in intel_tv_detect_type() */
- intel_wait_for_vblank(encoder->base.dev,
+ intel_wait_for_vblank(dev_priv,
to_intel_crtc(encoder->base.crtc)->pipe);
I915_WRITE(TV_CTL, I915_READ(TV_CTL) | TV_ENC_ENABLE);
@@ -1019,8 +1029,7 @@ static void intel_tv_pre_enable(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config,
struct drm_connector_state *conn_state)
{
- struct drm_device *dev = encoder->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
struct intel_tv *intel_tv = enc_to_tv(encoder);
const struct tv_mode *tv_mode = intel_tv_mode_find(intel_tv);
@@ -1095,7 +1104,7 @@ static void intel_tv_pre_enable(struct intel_encoder *encoder,
tv_mode->dda3_inc << TV_SCDDA3_INC_SHIFT;
/* Enable two fixes for the chips that need them. */
- if (IS_I915GM(dev))
+ if (IS_I915GM(dev_priv))
tv_ctl |= TV_ENC_C0_FIX | TV_ENC_SDP_FIX;
set_tv_mode_timings(dev_priv, tv_mode, burst_ena);
@@ -1106,7 +1115,7 @@ static void intel_tv_pre_enable(struct intel_encoder *encoder,
set_color_conversion(dev_priv, color_conversion);
- if (INTEL_INFO(dev)->gen >= 4)
+ if (INTEL_GEN(dev_priv) >= 4)
I915_WRITE(TV_CLR_KNOBS, 0x00404000);
else
I915_WRITE(TV_CLR_KNOBS, 0x00606000);
@@ -1220,7 +1229,7 @@ intel_tv_detect_type(struct intel_tv *intel_tv,
* The TV sense state should be cleared to zero on cantiga platform. Otherwise
* the TV is misdetected. This is hardware requirement.
*/
- if (IS_GM45(dev))
+ if (IS_GM45(dev_priv))
tv_dac &= ~(TVDAC_STATE_CHG_EN | TVDAC_A_SENSE_CTL |
TVDAC_B_SENSE_CTL | TVDAC_C_SENSE_CTL);
@@ -1228,7 +1237,7 @@ intel_tv_detect_type(struct intel_tv *intel_tv,
I915_WRITE(TV_DAC, tv_dac);
POSTING_READ(TV_DAC);
- intel_wait_for_vblank(dev, intel_crtc->pipe);
+ intel_wait_for_vblank(dev_priv, intel_crtc->pipe);
type = -1;
tv_dac = I915_READ(TV_DAC);
@@ -1258,7 +1267,7 @@ intel_tv_detect_type(struct intel_tv *intel_tv,
POSTING_READ(TV_CTL);
/* For unknown reasons the hw barfs if we don't do this vblank wait. */
- intel_wait_for_vblank(dev, intel_crtc->pipe);
+ intel_wait_for_vblank(dev_priv, intel_crtc->pipe);
/* Restore interrupt config */
if (connector->polled & DRM_CONNECTOR_POLL_HPD) {
@@ -1610,7 +1619,9 @@ intel_tv_init(struct drm_device *dev)
intel_connector->get_hw_state = intel_connector_get_hw_state;
intel_connector_attach_encoder(intel_connector, intel_encoder);
+
intel_encoder->type = INTEL_OUTPUT_TVOUT;
+ intel_encoder->port = PORT_NONE;
intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
intel_encoder->cloneable = 0;
intel_encoder->base.possible_crtcs = ((1 << 0) | (1 << 1));
diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c
index ee2306a79747..d7be0d94ba4d 100644
--- a/drivers/gpu/drm/i915/intel_uncore.c
+++ b/drivers/gpu/drm/i915/intel_uncore.c
@@ -231,19 +231,21 @@ intel_uncore_fw_release_timer(struct hrtimer *timer)
{
struct intel_uncore_forcewake_domain *domain =
container_of(timer, struct intel_uncore_forcewake_domain, timer);
+ struct drm_i915_private *dev_priv = domain->i915;
unsigned long irqflags;
- assert_rpm_device_not_suspended(domain->i915);
+ assert_rpm_device_not_suspended(dev_priv);
- spin_lock_irqsave(&domain->i915->uncore.lock, irqflags);
+ spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
if (WARN_ON(domain->wake_count == 0))
domain->wake_count++;
- if (--domain->wake_count == 0)
- domain->i915->uncore.funcs.force_wake_put(domain->i915,
- 1 << domain->id);
+ if (--domain->wake_count == 0) {
+ dev_priv->uncore.funcs.force_wake_put(dev_priv, domain->mask);
+ dev_priv->uncore.fw_domains_active &= ~domain->mask;
+ }
- spin_unlock_irqrestore(&domain->i915->uncore.lock, irqflags);
+ spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
return HRTIMER_NORESTART;
}
@@ -254,7 +256,7 @@ void intel_uncore_forcewake_reset(struct drm_i915_private *dev_priv,
unsigned long irqflags;
struct intel_uncore_forcewake_domain *domain;
int retry_count = 100;
- enum forcewake_domains fw = 0, active_domains;
+ enum forcewake_domains fw, active_domains;
/* Hold uncore.lock across reset to prevent any register access
* with forcewake not set correctly. Wait until all pending
@@ -291,10 +293,7 @@ void intel_uncore_forcewake_reset(struct drm_i915_private *dev_priv,
WARN_ON(active_domains);
- for_each_fw_domain(domain, dev_priv)
- if (domain->wake_count)
- fw |= domain->mask;
-
+ fw = dev_priv->uncore.fw_domains_active;
if (fw)
dev_priv->uncore.funcs.force_wake_put(dev_priv, fw);
@@ -403,6 +402,8 @@ check_for_unclaimed_mmio(struct drm_i915_private *dev_priv)
static void __intel_uncore_early_sanitize(struct drm_i915_private *dev_priv,
bool restore_forcewake)
{
+ struct intel_device_info *info = mkwrite_device_info(dev_priv);
+
/* clear out unclaimed reg detection bit */
if (check_for_unclaimed_mmio(dev_priv))
DRM_DEBUG("unclaimed mmio detected on uncore init, clearing\n");
@@ -420,6 +421,10 @@ static void __intel_uncore_early_sanitize(struct drm_i915_private *dev_priv,
GT_FIFO_CTL_RC6_POLICY_STALL);
}
+ /* Enable Decoupled MMIO only on BXT C stepping onwards */
+ if (!IS_BXT_REVID(dev_priv, BXT_REVID_C0, REVID_FOREVER))
+ info->has_decoupled_mmio = false;
+
intel_uncore_forcewake_reset(dev_priv, restore_forcewake);
}
@@ -443,9 +448,6 @@ static void __intel_uncore_forcewake_get(struct drm_i915_private *dev_priv,
{
struct intel_uncore_forcewake_domain *domain;
- if (!dev_priv->uncore.funcs.force_wake_get)
- return;
-
fw_domains &= dev_priv->uncore.fw_domains;
for_each_fw_domain_masked(domain, fw_domains, dev_priv) {
@@ -453,8 +455,10 @@ static void __intel_uncore_forcewake_get(struct drm_i915_private *dev_priv,
fw_domains &= ~domain->mask;
}
- if (fw_domains)
+ if (fw_domains) {
dev_priv->uncore.funcs.force_wake_get(dev_priv, fw_domains);
+ dev_priv->uncore.fw_domains_active |= fw_domains;
+ }
}
/**
@@ -509,9 +513,6 @@ static void __intel_uncore_forcewake_put(struct drm_i915_private *dev_priv,
{
struct intel_uncore_forcewake_domain *domain;
- if (!dev_priv->uncore.funcs.force_wake_put)
- return;
-
fw_domains &= dev_priv->uncore.fw_domains;
for_each_fw_domain_masked(domain, fw_domains, dev_priv) {
@@ -567,13 +568,10 @@ void intel_uncore_forcewake_put__locked(struct drm_i915_private *dev_priv,
void assert_forcewakes_inactive(struct drm_i915_private *dev_priv)
{
- struct intel_uncore_forcewake_domain *domain;
-
if (!dev_priv->uncore.funcs.force_wake_get)
return;
- for_each_fw_domain(domain, dev_priv)
- WARN_ON(domain->wake_count);
+ WARN_ON(dev_priv->uncore.fw_domains_active);
}
/* We give fast paths for the really cool registers */
@@ -589,49 +587,148 @@ void assert_forcewakes_inactive(struct drm_i915_private *dev_priv)
__fwd; \
})
-#define REG_RANGE(reg, start, end) ((reg) >= (start) && (reg) < (end))
+static int fw_range_cmp(u32 offset, const struct intel_forcewake_range *entry)
+{
+ if (offset < entry->start)
+ return -1;
+ else if (offset > entry->end)
+ return 1;
+ else
+ return 0;
+}
-#define FORCEWAKE_VLV_RENDER_RANGE_OFFSET(reg) \
- (REG_RANGE((reg), 0x2000, 0x4000) || \
- REG_RANGE((reg), 0x5000, 0x8000) || \
- REG_RANGE((reg), 0xB000, 0x12000) || \
- REG_RANGE((reg), 0x2E000, 0x30000))
+/* Copied and "macroized" from lib/bsearch.c */
+#define BSEARCH(key, base, num, cmp) ({ \
+ unsigned int start__ = 0, end__ = (num); \
+ typeof(base) result__ = NULL; \
+ while (start__ < end__) { \
+ unsigned int mid__ = start__ + (end__ - start__) / 2; \
+ int ret__ = (cmp)((key), (base) + mid__); \
+ if (ret__ < 0) { \
+ end__ = mid__; \
+ } else if (ret__ > 0) { \
+ start__ = mid__ + 1; \
+ } else { \
+ result__ = (base) + mid__; \
+ break; \
+ } \
+ } \
+ result__; \
+})
+
+static enum forcewake_domains
+find_fw_domain(struct drm_i915_private *dev_priv, u32 offset)
+{
+ const struct intel_forcewake_range *entry;
+
+ entry = BSEARCH(offset,
+ dev_priv->uncore.fw_domains_table,
+ dev_priv->uncore.fw_domains_table_entries,
+ fw_range_cmp);
-#define FORCEWAKE_VLV_MEDIA_RANGE_OFFSET(reg) \
- (REG_RANGE((reg), 0x12000, 0x14000) || \
- REG_RANGE((reg), 0x22000, 0x24000) || \
- REG_RANGE((reg), 0x30000, 0x40000))
+ return entry ? entry->domains : 0;
+}
-#define __vlv_reg_read_fw_domains(offset) \
+static void
+intel_fw_table_check(struct drm_i915_private *dev_priv)
+{
+ const struct intel_forcewake_range *ranges;
+ unsigned int num_ranges;
+ s32 prev;
+ unsigned int i;
+
+ if (!IS_ENABLED(CONFIG_DRM_I915_DEBUG))
+ return;
+
+ ranges = dev_priv->uncore.fw_domains_table;
+ if (!ranges)
+ return;
+
+ num_ranges = dev_priv->uncore.fw_domains_table_entries;
+
+ for (i = 0, prev = -1; i < num_ranges; i++, ranges++) {
+ WARN_ON_ONCE(IS_GEN9(dev_priv) &&
+ (prev + 1) != (s32)ranges->start);
+ WARN_ON_ONCE(prev >= (s32)ranges->start);
+ prev = ranges->start;
+ WARN_ON_ONCE(prev >= (s32)ranges->end);
+ prev = ranges->end;
+ }
+}
+
+#define GEN_FW_RANGE(s, e, d) \
+ { .start = (s), .end = (e), .domains = (d) }
+
+#define HAS_FWTABLE(dev_priv) \
+ (IS_GEN9(dev_priv) || \
+ IS_CHERRYVIEW(dev_priv) || \
+ IS_VALLEYVIEW(dev_priv))
+
+/* *Must* be sorted by offset ranges! See intel_fw_table_check(). */
+static const struct intel_forcewake_range __vlv_fw_ranges[] = {
+ GEN_FW_RANGE(0x2000, 0x3fff, FORCEWAKE_RENDER),
+ GEN_FW_RANGE(0x5000, 0x7fff, FORCEWAKE_RENDER),
+ GEN_FW_RANGE(0xb000, 0x11fff, FORCEWAKE_RENDER),
+ GEN_FW_RANGE(0x12000, 0x13fff, FORCEWAKE_MEDIA),
+ GEN_FW_RANGE(0x22000, 0x23fff, FORCEWAKE_MEDIA),
+ GEN_FW_RANGE(0x2e000, 0x2ffff, FORCEWAKE_RENDER),
+ GEN_FW_RANGE(0x30000, 0x3ffff, FORCEWAKE_MEDIA),
+};
+
+#define __fwtable_reg_read_fw_domains(offset) \
({ \
enum forcewake_domains __fwd = 0; \
- if (!NEEDS_FORCE_WAKE(offset)) \
- __fwd = 0; \
- else if (FORCEWAKE_VLV_RENDER_RANGE_OFFSET(offset)) \
- __fwd = FORCEWAKE_RENDER; \
- else if (FORCEWAKE_VLV_MEDIA_RANGE_OFFSET(offset)) \
- __fwd = FORCEWAKE_MEDIA; \
+ if (NEEDS_FORCE_WAKE((offset))) \
+ __fwd = find_fw_domain(dev_priv, offset); \
__fwd; \
})
+/* *Must* be sorted by offset! See intel_shadow_table_check(). */
static const i915_reg_t gen8_shadowed_regs[] = {
- GEN6_RPNSWREQ,
- GEN6_RC_VIDEO_FREQ,
- RING_TAIL(RENDER_RING_BASE),
- RING_TAIL(GEN6_BSD_RING_BASE),
- RING_TAIL(VEBOX_RING_BASE),
- RING_TAIL(BLT_RING_BASE),
+ RING_TAIL(RENDER_RING_BASE), /* 0x2000 (base) */
+ GEN6_RPNSWREQ, /* 0xA008 */
+ GEN6_RC_VIDEO_FREQ, /* 0xA00C */
+ RING_TAIL(GEN6_BSD_RING_BASE), /* 0x12000 (base) */
+ RING_TAIL(VEBOX_RING_BASE), /* 0x1a000 (base) */
+ RING_TAIL(BLT_RING_BASE), /* 0x22000 (base) */
/* TODO: Other registers are not yet used */
};
+static void intel_shadow_table_check(void)
+{
+ const i915_reg_t *reg = gen8_shadowed_regs;
+ s32 prev;
+ u32 offset;
+ unsigned int i;
+
+ if (!IS_ENABLED(CONFIG_DRM_I915_DEBUG))
+ return;
+
+ for (i = 0, prev = -1; i < ARRAY_SIZE(gen8_shadowed_regs); i++, reg++) {
+ offset = i915_mmio_reg_offset(*reg);
+ WARN_ON_ONCE(prev >= (s32)offset);
+ prev = offset;
+ }
+}
+
+static int mmio_reg_cmp(u32 key, const i915_reg_t *reg)
+{
+ u32 offset = i915_mmio_reg_offset(*reg);
+
+ if (key < offset)
+ return -1;
+ else if (key > offset)
+ return 1;
+ else
+ return 0;
+}
+
static bool is_gen8_shadowed(u32 offset)
{
- int i;
- for (i = 0; i < ARRAY_SIZE(gen8_shadowed_regs); i++)
- if (offset == gen8_shadowed_regs[i].reg)
- return true;
+ const i915_reg_t *regs = gen8_shadowed_regs;
- return false;
+ return BSEARCH(offset, regs, ARRAY_SIZE(gen8_shadowed_regs),
+ mmio_reg_cmp);
}
#define __gen8_reg_write_fw_domains(offset) \
@@ -644,143 +741,70 @@ static bool is_gen8_shadowed(u32 offset)
__fwd; \
})
-#define FORCEWAKE_CHV_RENDER_RANGE_OFFSET(reg) \
- (REG_RANGE((reg), 0x2000, 0x4000) || \
- REG_RANGE((reg), 0x5200, 0x8000) || \
- REG_RANGE((reg), 0x8300, 0x8500) || \
- REG_RANGE((reg), 0xB000, 0xB480) || \
- REG_RANGE((reg), 0xE000, 0xE800))
-
-#define FORCEWAKE_CHV_MEDIA_RANGE_OFFSET(reg) \
- (REG_RANGE((reg), 0x8800, 0x8900) || \
- REG_RANGE((reg), 0xD000, 0xD800) || \
- REG_RANGE((reg), 0x12000, 0x14000) || \
- REG_RANGE((reg), 0x1A000, 0x1C000) || \
- REG_RANGE((reg), 0x1E800, 0x1EA00) || \
- REG_RANGE((reg), 0x30000, 0x38000))
-
-#define FORCEWAKE_CHV_COMMON_RANGE_OFFSET(reg) \
- (REG_RANGE((reg), 0x4000, 0x5000) || \
- REG_RANGE((reg), 0x8000, 0x8300) || \
- REG_RANGE((reg), 0x8500, 0x8600) || \
- REG_RANGE((reg), 0x9000, 0xB000) || \
- REG_RANGE((reg), 0xF000, 0x10000))
-
-#define __chv_reg_read_fw_domains(offset) \
-({ \
- enum forcewake_domains __fwd = 0; \
- if (!NEEDS_FORCE_WAKE(offset)) \
- __fwd = 0; \
- else if (FORCEWAKE_CHV_RENDER_RANGE_OFFSET(offset)) \
- __fwd = FORCEWAKE_RENDER; \
- else if (FORCEWAKE_CHV_MEDIA_RANGE_OFFSET(offset)) \
- __fwd = FORCEWAKE_MEDIA; \
- else if (FORCEWAKE_CHV_COMMON_RANGE_OFFSET(offset)) \
- __fwd = FORCEWAKE_RENDER | FORCEWAKE_MEDIA; \
- __fwd; \
-})
+/* *Must* be sorted by offset ranges! See intel_fw_table_check(). */
+static const struct intel_forcewake_range __chv_fw_ranges[] = {
+ GEN_FW_RANGE(0x2000, 0x3fff, FORCEWAKE_RENDER),
+ GEN_FW_RANGE(0x4000, 0x4fff, FORCEWAKE_RENDER | FORCEWAKE_MEDIA),
+ GEN_FW_RANGE(0x5200, 0x7fff, FORCEWAKE_RENDER),
+ GEN_FW_RANGE(0x8000, 0x82ff, FORCEWAKE_RENDER | FORCEWAKE_MEDIA),
+ GEN_FW_RANGE(0x8300, 0x84ff, FORCEWAKE_RENDER),
+ GEN_FW_RANGE(0x8500, 0x85ff, FORCEWAKE_RENDER | FORCEWAKE_MEDIA),
+ GEN_FW_RANGE(0x8800, 0x88ff, FORCEWAKE_MEDIA),
+ GEN_FW_RANGE(0x9000, 0xafff, FORCEWAKE_RENDER | FORCEWAKE_MEDIA),
+ GEN_FW_RANGE(0xb000, 0xb47f, FORCEWAKE_RENDER),
+ GEN_FW_RANGE(0xd000, 0xd7ff, FORCEWAKE_MEDIA),
+ GEN_FW_RANGE(0xe000, 0xe7ff, FORCEWAKE_RENDER),
+ GEN_FW_RANGE(0xf000, 0xffff, FORCEWAKE_RENDER | FORCEWAKE_MEDIA),
+ GEN_FW_RANGE(0x12000, 0x13fff, FORCEWAKE_MEDIA),
+ GEN_FW_RANGE(0x1a000, 0x1bfff, FORCEWAKE_MEDIA),
+ GEN_FW_RANGE(0x1e800, 0x1e9ff, FORCEWAKE_MEDIA),
+ GEN_FW_RANGE(0x30000, 0x37fff, FORCEWAKE_MEDIA),
+};
-#define __chv_reg_write_fw_domains(offset) \
+#define __fwtable_reg_write_fw_domains(offset) \
({ \
enum forcewake_domains __fwd = 0; \
- if (!NEEDS_FORCE_WAKE(offset) || is_gen8_shadowed(offset)) \
- __fwd = 0; \
- else if (FORCEWAKE_CHV_RENDER_RANGE_OFFSET(offset)) \
- __fwd = FORCEWAKE_RENDER; \
- else if (FORCEWAKE_CHV_MEDIA_RANGE_OFFSET(offset)) \
- __fwd = FORCEWAKE_MEDIA; \
- else if (FORCEWAKE_CHV_COMMON_RANGE_OFFSET(offset)) \
- __fwd = FORCEWAKE_RENDER | FORCEWAKE_MEDIA; \
+ if (NEEDS_FORCE_WAKE((offset)) && !is_gen8_shadowed(offset)) \
+ __fwd = find_fw_domain(dev_priv, offset); \
__fwd; \
})
-#define FORCEWAKE_GEN9_UNCORE_RANGE_OFFSET(reg) \
- REG_RANGE((reg), 0xB00, 0x2000)
-
-#define FORCEWAKE_GEN9_RENDER_RANGE_OFFSET(reg) \
- (REG_RANGE((reg), 0x2000, 0x2700) || \
- REG_RANGE((reg), 0x3000, 0x4000) || \
- REG_RANGE((reg), 0x5200, 0x8000) || \
- REG_RANGE((reg), 0x8140, 0x8160) || \
- REG_RANGE((reg), 0x8300, 0x8500) || \
- REG_RANGE((reg), 0x8C00, 0x8D00) || \
- REG_RANGE((reg), 0xB000, 0xB480) || \
- REG_RANGE((reg), 0xE000, 0xE900) || \
- REG_RANGE((reg), 0x24400, 0x24800))
-
-#define FORCEWAKE_GEN9_MEDIA_RANGE_OFFSET(reg) \
- (REG_RANGE((reg), 0x8130, 0x8140) || \
- REG_RANGE((reg), 0x8800, 0x8A00) || \
- REG_RANGE((reg), 0xD000, 0xD800) || \
- REG_RANGE((reg), 0x12000, 0x14000) || \
- REG_RANGE((reg), 0x1A000, 0x1EA00) || \
- REG_RANGE((reg), 0x30000, 0x40000))
-
-#define FORCEWAKE_GEN9_COMMON_RANGE_OFFSET(reg) \
- REG_RANGE((reg), 0x9400, 0x9800)
-
-#define FORCEWAKE_GEN9_BLITTER_RANGE_OFFSET(reg) \
- ((reg) < 0x40000 && \
- !FORCEWAKE_GEN9_UNCORE_RANGE_OFFSET(reg) && \
- !FORCEWAKE_GEN9_RENDER_RANGE_OFFSET(reg) && \
- !FORCEWAKE_GEN9_MEDIA_RANGE_OFFSET(reg) && \
- !FORCEWAKE_GEN9_COMMON_RANGE_OFFSET(reg))
-
-#define SKL_NEEDS_FORCE_WAKE(reg) \
- ((reg) < 0x40000 && !FORCEWAKE_GEN9_UNCORE_RANGE_OFFSET(reg))
-
-#define __gen9_reg_read_fw_domains(offset) \
-({ \
- enum forcewake_domains __fwd; \
- if (!SKL_NEEDS_FORCE_WAKE(offset)) \
- __fwd = 0; \
- else if (FORCEWAKE_GEN9_RENDER_RANGE_OFFSET(offset)) \
- __fwd = FORCEWAKE_RENDER; \
- else if (FORCEWAKE_GEN9_MEDIA_RANGE_OFFSET(offset)) \
- __fwd = FORCEWAKE_MEDIA; \
- else if (FORCEWAKE_GEN9_COMMON_RANGE_OFFSET(offset)) \
- __fwd = FORCEWAKE_RENDER | FORCEWAKE_MEDIA; \
- else \
- __fwd = FORCEWAKE_BLITTER; \
- __fwd; \
-})
-
-static const i915_reg_t gen9_shadowed_regs[] = {
- RING_TAIL(RENDER_RING_BASE),
- RING_TAIL(GEN6_BSD_RING_BASE),
- RING_TAIL(VEBOX_RING_BASE),
- RING_TAIL(BLT_RING_BASE),
- GEN6_RPNSWREQ,
- GEN6_RC_VIDEO_FREQ,
- /* TODO: Other registers are not yet used */
+/* *Must* be sorted by offset ranges! See intel_fw_table_check(). */
+static const struct intel_forcewake_range __gen9_fw_ranges[] = {
+ GEN_FW_RANGE(0x0, 0xaff, FORCEWAKE_BLITTER),
+ GEN_FW_RANGE(0xb00, 0x1fff, 0), /* uncore range */
+ GEN_FW_RANGE(0x2000, 0x26ff, FORCEWAKE_RENDER),
+ GEN_FW_RANGE(0x2700, 0x2fff, FORCEWAKE_BLITTER),
+ GEN_FW_RANGE(0x3000, 0x3fff, FORCEWAKE_RENDER),
+ GEN_FW_RANGE(0x4000, 0x51ff, FORCEWAKE_BLITTER),
+ GEN_FW_RANGE(0x5200, 0x7fff, FORCEWAKE_RENDER),
+ GEN_FW_RANGE(0x8000, 0x812f, FORCEWAKE_BLITTER),
+ GEN_FW_RANGE(0x8130, 0x813f, FORCEWAKE_MEDIA),
+ GEN_FW_RANGE(0x8140, 0x815f, FORCEWAKE_RENDER),
+ GEN_FW_RANGE(0x8160, 0x82ff, FORCEWAKE_BLITTER),
+ GEN_FW_RANGE(0x8300, 0x84ff, FORCEWAKE_RENDER),
+ GEN_FW_RANGE(0x8500, 0x87ff, FORCEWAKE_BLITTER),
+ GEN_FW_RANGE(0x8800, 0x89ff, FORCEWAKE_MEDIA),
+ GEN_FW_RANGE(0x8a00, 0x8bff, FORCEWAKE_BLITTER),
+ GEN_FW_RANGE(0x8c00, 0x8cff, FORCEWAKE_RENDER),
+ GEN_FW_RANGE(0x8d00, 0x93ff, FORCEWAKE_BLITTER),
+ GEN_FW_RANGE(0x9400, 0x97ff, FORCEWAKE_RENDER | FORCEWAKE_MEDIA),
+ GEN_FW_RANGE(0x9800, 0xafff, FORCEWAKE_BLITTER),
+ GEN_FW_RANGE(0xb000, 0xb47f, FORCEWAKE_RENDER),
+ GEN_FW_RANGE(0xb480, 0xcfff, FORCEWAKE_BLITTER),
+ GEN_FW_RANGE(0xd000, 0xd7ff, FORCEWAKE_MEDIA),
+ GEN_FW_RANGE(0xd800, 0xdfff, FORCEWAKE_BLITTER),
+ GEN_FW_RANGE(0xe000, 0xe8ff, FORCEWAKE_RENDER),
+ GEN_FW_RANGE(0xe900, 0x11fff, FORCEWAKE_BLITTER),
+ GEN_FW_RANGE(0x12000, 0x13fff, FORCEWAKE_MEDIA),
+ GEN_FW_RANGE(0x14000, 0x19fff, FORCEWAKE_BLITTER),
+ GEN_FW_RANGE(0x1a000, 0x1e9ff, FORCEWAKE_MEDIA),
+ GEN_FW_RANGE(0x1ea00, 0x243ff, FORCEWAKE_BLITTER),
+ GEN_FW_RANGE(0x24400, 0x247ff, FORCEWAKE_RENDER),
+ GEN_FW_RANGE(0x24800, 0x2ffff, FORCEWAKE_BLITTER),
+ GEN_FW_RANGE(0x30000, 0x3ffff, FORCEWAKE_MEDIA),
};
-static bool is_gen9_shadowed(u32 offset)
-{
- int i;
- for (i = 0; i < ARRAY_SIZE(gen9_shadowed_regs); i++)
- if (offset == gen9_shadowed_regs[i].reg)
- return true;
-
- return false;
-}
-
-#define __gen9_reg_write_fw_domains(offset) \
-({ \
- enum forcewake_domains __fwd; \
- if (!SKL_NEEDS_FORCE_WAKE(offset) || is_gen9_shadowed(offset)) \
- __fwd = 0; \
- else if (FORCEWAKE_GEN9_RENDER_RANGE_OFFSET(offset)) \
- __fwd = FORCEWAKE_RENDER; \
- else if (FORCEWAKE_GEN9_MEDIA_RANGE_OFFSET(offset)) \
- __fwd = FORCEWAKE_MEDIA; \
- else if (FORCEWAKE_GEN9_COMMON_RANGE_OFFSET(offset)) \
- __fwd = FORCEWAKE_RENDER | FORCEWAKE_MEDIA; \
- else \
- __fwd = FORCEWAKE_BLITTER; \
- __fwd; \
-})
-
static void
ilk_dummy_write(struct drm_i915_private *dev_priv)
{
@@ -815,6 +839,66 @@ unclaimed_reg_debug(struct drm_i915_private *dev_priv,
__unclaimed_reg_debug(dev_priv, reg, read, before);
}
+static const enum decoupled_power_domain fw2dpd_domain[] = {
+ GEN9_DECOUPLED_PD_RENDER,
+ GEN9_DECOUPLED_PD_BLITTER,
+ GEN9_DECOUPLED_PD_ALL,
+ GEN9_DECOUPLED_PD_MEDIA,
+ GEN9_DECOUPLED_PD_ALL,
+ GEN9_DECOUPLED_PD_ALL,
+ GEN9_DECOUPLED_PD_ALL
+};
+
+/*
+ * Decoupled MMIO access for only 1 DWORD
+ */
+static void __gen9_decoupled_mmio_access(struct drm_i915_private *dev_priv,
+ u32 reg,
+ enum forcewake_domains fw_domain,
+ enum decoupled_ops operation)
+{
+ enum decoupled_power_domain dp_domain;
+ u32 ctrl_reg_data = 0;
+
+ dp_domain = fw2dpd_domain[fw_domain - 1];
+
+ ctrl_reg_data |= reg;
+ ctrl_reg_data |= (operation << GEN9_DECOUPLED_OP_SHIFT);
+ ctrl_reg_data |= (dp_domain << GEN9_DECOUPLED_PD_SHIFT);
+ ctrl_reg_data |= GEN9_DECOUPLED_DW1_GO;
+ __raw_i915_write32(dev_priv, GEN9_DECOUPLED_REG0_DW1, ctrl_reg_data);
+
+ if (wait_for_atomic((__raw_i915_read32(dev_priv,
+ GEN9_DECOUPLED_REG0_DW1) &
+ GEN9_DECOUPLED_DW1_GO) == 0,
+ FORCEWAKE_ACK_TIMEOUT_MS))
+ DRM_ERROR("Decoupled MMIO wait timed out\n");
+}
+
+static inline u32
+__gen9_decoupled_mmio_read32(struct drm_i915_private *dev_priv,
+ u32 reg,
+ enum forcewake_domains fw_domain)
+{
+ __gen9_decoupled_mmio_access(dev_priv, reg, fw_domain,
+ GEN9_DECOUPLED_OP_READ);
+
+ return __raw_i915_read32(dev_priv, GEN9_DECOUPLED_REG0_DW0);
+}
+
+static inline void
+__gen9_decoupled_mmio_write(struct drm_i915_private *dev_priv,
+ u32 reg, u32 data,
+ enum forcewake_domains fw_domain)
+{
+
+ __raw_i915_write32(dev_priv, GEN9_DECOUPLED_REG0_DW0, data);
+
+ __gen9_decoupled_mmio_access(dev_priv, reg, fw_domain,
+ GEN9_DECOUPLED_OP_WRITE);
+}
+
+
#define GEN2_READ_HEADER(x) \
u##x val = 0; \
assert_rpm_wakelock_held(dev_priv);
@@ -869,26 +953,30 @@ __gen2_read(64)
trace_i915_reg_rw(false, reg, val, sizeof(val), trace); \
return val
-static inline void __force_wake_auto(struct drm_i915_private *dev_priv,
- enum forcewake_domains fw_domains)
+static noinline void ___force_wake_auto(struct drm_i915_private *dev_priv,
+ enum forcewake_domains fw_domains)
{
struct intel_uncore_forcewake_domain *domain;
+ for_each_fw_domain_masked(domain, fw_domains, dev_priv)
+ fw_domain_arm_timer(domain);
+
+ dev_priv->uncore.funcs.force_wake_get(dev_priv, fw_domains);
+ dev_priv->uncore.fw_domains_active |= fw_domains;
+}
+
+static inline void __force_wake_auto(struct drm_i915_private *dev_priv,
+ enum forcewake_domains fw_domains)
+{
if (WARN_ON(!fw_domains))
return;
- /* Ideally GCC would be constant-fold and eliminate this loop */
- for_each_fw_domain_masked(domain, fw_domains, dev_priv) {
- if (domain->wake_count) {
- fw_domains &= ~domain->mask;
- continue;
- }
-
- fw_domain_arm_timer(domain);
- }
+ /* Turn on all requested but inactive supported forcewake domains. */
+ fw_domains &= dev_priv->uncore.fw_domains;
+ fw_domains &= ~dev_priv->uncore.fw_domains_active;
if (fw_domains)
- dev_priv->uncore.funcs.force_wake_get(dev_priv, fw_domains);
+ ___force_wake_auto(dev_priv, fw_domains);
}
#define __gen6_read(x) \
@@ -903,62 +991,50 @@ gen6_read##x(struct drm_i915_private *dev_priv, i915_reg_t reg, bool trace) { \
GEN6_READ_FOOTER; \
}
-#define __vlv_read(x) \
+#define __fwtable_read(x) \
static u##x \
-vlv_read##x(struct drm_i915_private *dev_priv, i915_reg_t reg, bool trace) { \
+fwtable_read##x(struct drm_i915_private *dev_priv, i915_reg_t reg, bool trace) { \
enum forcewake_domains fw_engine; \
GEN6_READ_HEADER(x); \
- fw_engine = __vlv_reg_read_fw_domains(offset); \
+ fw_engine = __fwtable_reg_read_fw_domains(offset); \
if (fw_engine) \
__force_wake_auto(dev_priv, fw_engine); \
val = __raw_i915_read##x(dev_priv, reg); \
GEN6_READ_FOOTER; \
}
-#define __chv_read(x) \
+#define __gen9_decoupled_read(x) \
static u##x \
-chv_read##x(struct drm_i915_private *dev_priv, i915_reg_t reg, bool trace) { \
+gen9_decoupled_read##x(struct drm_i915_private *dev_priv, \
+ i915_reg_t reg, bool trace) { \
enum forcewake_domains fw_engine; \
GEN6_READ_HEADER(x); \
- fw_engine = __chv_reg_read_fw_domains(offset); \
- if (fw_engine) \
- __force_wake_auto(dev_priv, fw_engine); \
- val = __raw_i915_read##x(dev_priv, reg); \
- GEN6_READ_FOOTER; \
-}
-
-#define __gen9_read(x) \
-static u##x \
-gen9_read##x(struct drm_i915_private *dev_priv, i915_reg_t reg, bool trace) { \
- enum forcewake_domains fw_engine; \
- GEN6_READ_HEADER(x); \
- fw_engine = __gen9_reg_read_fw_domains(offset); \
- if (fw_engine) \
- __force_wake_auto(dev_priv, fw_engine); \
- val = __raw_i915_read##x(dev_priv, reg); \
+ fw_engine = __fwtable_reg_read_fw_domains(offset); \
+ if (fw_engine & ~dev_priv->uncore.fw_domains_active) { \
+ unsigned i; \
+ u32 *ptr_data = (u32 *) &val; \
+ for (i = 0; i < x/32; i++, offset += sizeof(u32), ptr_data++) \
+ *ptr_data = __gen9_decoupled_mmio_read32(dev_priv, \
+ offset, \
+ fw_engine); \
+ } else { \
+ val = __raw_i915_read##x(dev_priv, reg); \
+ } \
GEN6_READ_FOOTER; \
}
-__gen9_read(8)
-__gen9_read(16)
-__gen9_read(32)
-__gen9_read(64)
-__chv_read(8)
-__chv_read(16)
-__chv_read(32)
-__chv_read(64)
-__vlv_read(8)
-__vlv_read(16)
-__vlv_read(32)
-__vlv_read(64)
+__gen9_decoupled_read(32)
+__gen9_decoupled_read(64)
+__fwtable_read(8)
+__fwtable_read(16)
+__fwtable_read(32)
+__fwtable_read(64)
__gen6_read(8)
__gen6_read(16)
__gen6_read(32)
__gen6_read(64)
-#undef __gen9_read
-#undef __chv_read
-#undef __vlv_read
+#undef __fwtable_read
#undef __gen6_read
#undef GEN6_READ_FOOTER
#undef GEN6_READ_HEADER
@@ -1054,21 +1130,6 @@ gen6_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, bool
GEN6_WRITE_FOOTER; \
}
-#define __hsw_write(x) \
-static void \
-hsw_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, bool trace) { \
- u32 __fifo_ret = 0; \
- GEN6_WRITE_HEADER; \
- if (NEEDS_FORCE_WAKE(offset)) { \
- __fifo_ret = __gen6_gt_wait_for_fifo(dev_priv); \
- } \
- __raw_i915_write##x(dev_priv, reg, val); \
- if (unlikely(__fifo_ret)) { \
- gen6_gt_check_fifodbg(dev_priv); \
- } \
- GEN6_WRITE_FOOTER; \
-}
-
#define __gen8_write(x) \
static void \
gen8_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, bool trace) { \
@@ -1081,51 +1142,49 @@ gen8_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, bool
GEN6_WRITE_FOOTER; \
}
-#define __chv_write(x) \
+#define __fwtable_write(x) \
static void \
-chv_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, bool trace) { \
+fwtable_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, bool trace) { \
enum forcewake_domains fw_engine; \
GEN6_WRITE_HEADER; \
- fw_engine = __chv_reg_write_fw_domains(offset); \
+ fw_engine = __fwtable_reg_write_fw_domains(offset); \
if (fw_engine) \
__force_wake_auto(dev_priv, fw_engine); \
__raw_i915_write##x(dev_priv, reg, val); \
GEN6_WRITE_FOOTER; \
}
-#define __gen9_write(x) \
+#define __gen9_decoupled_write(x) \
static void \
-gen9_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, \
+gen9_decoupled_write##x(struct drm_i915_private *dev_priv, \
+ i915_reg_t reg, u##x val, \
bool trace) { \
enum forcewake_domains fw_engine; \
GEN6_WRITE_HEADER; \
- fw_engine = __gen9_reg_write_fw_domains(offset); \
- if (fw_engine) \
- __force_wake_auto(dev_priv, fw_engine); \
- __raw_i915_write##x(dev_priv, reg, val); \
+ fw_engine = __fwtable_reg_write_fw_domains(offset); \
+ if (fw_engine & ~dev_priv->uncore.fw_domains_active) \
+ __gen9_decoupled_mmio_write(dev_priv, \
+ offset, \
+ val, \
+ fw_engine); \
+ else \
+ __raw_i915_write##x(dev_priv, reg, val); \
GEN6_WRITE_FOOTER; \
}
-__gen9_write(8)
-__gen9_write(16)
-__gen9_write(32)
-__chv_write(8)
-__chv_write(16)
-__chv_write(32)
+__gen9_decoupled_write(32)
+__fwtable_write(8)
+__fwtable_write(16)
+__fwtable_write(32)
__gen8_write(8)
__gen8_write(16)
__gen8_write(32)
-__hsw_write(8)
-__hsw_write(16)
-__hsw_write(32)
__gen6_write(8)
__gen6_write(16)
__gen6_write(32)
-#undef __gen9_write
-#undef __chv_write
+#undef __fwtable_write
#undef __gen8_write
-#undef __hsw_write
#undef __gen6_write
#undef GEN6_WRITE_FOOTER
#undef GEN6_WRITE_HEADER
@@ -1314,6 +1373,13 @@ static void intel_uncore_fw_domains_init(struct drm_i915_private *dev_priv)
WARN_ON(dev_priv->uncore.fw_domains == 0);
}
+#define ASSIGN_FW_DOMAINS_TABLE(d) \
+{ \
+ dev_priv->uncore.fw_domains_table = \
+ (struct intel_forcewake_range *)(d); \
+ dev_priv->uncore.fw_domains_table_entries = ARRAY_SIZE((d)); \
+}
+
void intel_uncore_init(struct drm_i915_private *dev_priv)
{
i915_check_vgpu(dev_priv);
@@ -1327,13 +1393,23 @@ void intel_uncore_init(struct drm_i915_private *dev_priv)
switch (INTEL_INFO(dev_priv)->gen) {
default:
case 9:
- ASSIGN_WRITE_MMIO_VFUNCS(gen9);
- ASSIGN_READ_MMIO_VFUNCS(gen9);
+ ASSIGN_FW_DOMAINS_TABLE(__gen9_fw_ranges);
+ ASSIGN_WRITE_MMIO_VFUNCS(fwtable);
+ ASSIGN_READ_MMIO_VFUNCS(fwtable);
+ if (HAS_DECOUPLED_MMIO(dev_priv)) {
+ dev_priv->uncore.funcs.mmio_readl =
+ gen9_decoupled_read32;
+ dev_priv->uncore.funcs.mmio_readq =
+ gen9_decoupled_read64;
+ dev_priv->uncore.funcs.mmio_writel =
+ gen9_decoupled_write32;
+ }
break;
case 8:
if (IS_CHERRYVIEW(dev_priv)) {
- ASSIGN_WRITE_MMIO_VFUNCS(chv);
- ASSIGN_READ_MMIO_VFUNCS(chv);
+ ASSIGN_FW_DOMAINS_TABLE(__chv_fw_ranges);
+ ASSIGN_WRITE_MMIO_VFUNCS(fwtable);
+ ASSIGN_READ_MMIO_VFUNCS(fwtable);
} else {
ASSIGN_WRITE_MMIO_VFUNCS(gen8);
@@ -1342,14 +1418,11 @@ void intel_uncore_init(struct drm_i915_private *dev_priv)
break;
case 7:
case 6:
- if (IS_HASWELL(dev_priv)) {
- ASSIGN_WRITE_MMIO_VFUNCS(hsw);
- } else {
- ASSIGN_WRITE_MMIO_VFUNCS(gen6);
- }
+ ASSIGN_WRITE_MMIO_VFUNCS(gen6);
if (IS_VALLEYVIEW(dev_priv)) {
- ASSIGN_READ_MMIO_VFUNCS(vlv);
+ ASSIGN_FW_DOMAINS_TABLE(__vlv_fw_ranges);
+ ASSIGN_READ_MMIO_VFUNCS(fwtable);
} else {
ASSIGN_READ_MMIO_VFUNCS(gen6);
}
@@ -1366,6 +1439,10 @@ void intel_uncore_init(struct drm_i915_private *dev_priv)
break;
}
+ intel_fw_table_check(dev_priv);
+ if (INTEL_GEN(dev_priv) >= 8)
+ intel_shadow_table_check();
+
if (intel_vgpu_active(dev_priv)) {
ASSIGN_WRITE_MMIO_VFUNCS(vgpu);
ASSIGN_READ_MMIO_VFUNCS(vgpu);
@@ -1408,7 +1485,7 @@ int i915_reg_read_ioctl(struct drm_device *dev,
for (i = 0; i < ARRAY_SIZE(whitelist); i++, entry++) {
if (i915_mmio_reg_offset(entry->offset_ldw) == (reg->offset & -entry->size) &&
- (INTEL_INFO(dev)->gen_mask & entry->gen_bitmask))
+ (INTEL_INFO(dev_priv)->gen_mask & entry->gen_bitmask))
break;
}
@@ -1815,35 +1892,16 @@ static enum forcewake_domains
intel_uncore_forcewake_for_read(struct drm_i915_private *dev_priv,
i915_reg_t reg)
{
+ u32 offset = i915_mmio_reg_offset(reg);
enum forcewake_domains fw_domains;
- if (intel_vgpu_active(dev_priv))
- return 0;
-
- switch (INTEL_GEN(dev_priv)) {
- case 9:
- fw_domains = __gen9_reg_read_fw_domains(i915_mmio_reg_offset(reg));
- break;
- case 8:
- if (IS_CHERRYVIEW(dev_priv))
- fw_domains = __chv_reg_read_fw_domains(i915_mmio_reg_offset(reg));
- else
- fw_domains = __gen6_reg_read_fw_domains(i915_mmio_reg_offset(reg));
- break;
- case 7:
- case 6:
- if (IS_VALLEYVIEW(dev_priv))
- fw_domains = __vlv_reg_read_fw_domains(i915_mmio_reg_offset(reg));
- else
- fw_domains = __gen6_reg_read_fw_domains(i915_mmio_reg_offset(reg));
- break;
- default:
- MISSING_CASE(INTEL_INFO(dev_priv)->gen);
- case 5: /* forcewake was introduced with gen6 */
- case 4:
- case 3:
- case 2:
- return 0;
+ if (HAS_FWTABLE(dev_priv)) {
+ fw_domains = __fwtable_reg_read_fw_domains(offset);
+ } else if (INTEL_GEN(dev_priv) >= 6) {
+ fw_domains = __gen6_reg_read_fw_domains(offset);
+ } else {
+ WARN_ON(!IS_GEN(dev_priv, 2, 5));
+ fw_domains = 0;
}
WARN_ON(fw_domains & ~dev_priv->uncore.fw_domains);
@@ -1855,32 +1913,18 @@ static enum forcewake_domains
intel_uncore_forcewake_for_write(struct drm_i915_private *dev_priv,
i915_reg_t reg)
{
+ u32 offset = i915_mmio_reg_offset(reg);
enum forcewake_domains fw_domains;
- if (intel_vgpu_active(dev_priv))
- return 0;
-
- switch (INTEL_GEN(dev_priv)) {
- case 9:
- fw_domains = __gen9_reg_write_fw_domains(i915_mmio_reg_offset(reg));
- break;
- case 8:
- if (IS_CHERRYVIEW(dev_priv))
- fw_domains = __chv_reg_write_fw_domains(i915_mmio_reg_offset(reg));
- else
- fw_domains = __gen8_reg_write_fw_domains(i915_mmio_reg_offset(reg));
- break;
- case 7:
- case 6:
+ if (HAS_FWTABLE(dev_priv) && !IS_VALLEYVIEW(dev_priv)) {
+ fw_domains = __fwtable_reg_write_fw_domains(offset);
+ } else if (IS_GEN8(dev_priv)) {
+ fw_domains = __gen8_reg_write_fw_domains(offset);
+ } else if (IS_GEN(dev_priv, 6, 7)) {
fw_domains = FORCEWAKE_RENDER;
- break;
- default:
- MISSING_CASE(INTEL_INFO(dev_priv)->gen);
- case 5:
- case 4:
- case 3:
- case 2:
- return 0;
+ } else {
+ WARN_ON(!IS_GEN(dev_priv, 2, 5));
+ fw_domains = 0;
}
WARN_ON(fw_domains & ~dev_priv->uncore.fw_domains);
@@ -1910,6 +1954,9 @@ intel_uncore_forcewake_for_reg(struct drm_i915_private *dev_priv,
WARN_ON(!op);
+ if (intel_vgpu_active(dev_priv))
+ return 0;
+
if (op & FW_REG_READ)
fw_domains = intel_uncore_forcewake_for_read(dev_priv, reg);
diff --git a/drivers/gpu/drm/i915/intel_vbt_defs.h b/drivers/gpu/drm/i915/intel_vbt_defs.h
index 68db9621f1f0..8886cab19f98 100644
--- a/drivers/gpu/drm/i915/intel_vbt_defs.h
+++ b/drivers/gpu/drm/i915/intel_vbt_defs.h
@@ -280,7 +280,8 @@ struct common_child_dev_config {
u8 dp_support:1;
u8 tmds_support:1;
u8 support_reserved:5;
- u8 not_common3[12];
+ u8 aux_channel;
+ u8 not_common3[11];
u8 iboost_level;
} __packed;
diff --git a/drivers/gpu/drm/imx/imx-drm-core.c b/drivers/gpu/drm/imx/imx-drm-core.c
index 98df09c2b388..33404295b447 100644
--- a/drivers/gpu/drm/imx/imx-drm-core.c
+++ b/drivers/gpu/drm/imx/imx-drm-core.c
@@ -18,7 +18,6 @@
#include <linux/dma-buf.h>
#include <linux/module.h>
#include <linux/platform_device.h>
-#include <linux/reservation.h>
#include <drm/drmP.h>
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
@@ -151,38 +150,11 @@ static int imx_drm_atomic_check(struct drm_device *dev,
return ret;
}
-static int imx_drm_atomic_commit(struct drm_device *dev,
- struct drm_atomic_state *state,
- bool nonblock)
-{
- struct drm_plane_state *plane_state;
- struct drm_plane *plane;
- struct dma_buf *dma_buf;
- int i;
-
- /*
- * If the plane fb has an dma-buf attached, fish out the exclusive
- * fence for the atomic helper to wait on.
- */
- for_each_plane_in_state(state, plane, plane_state, i) {
- if ((plane->state->fb != plane_state->fb) && plane_state->fb) {
- dma_buf = drm_fb_cma_get_gem_obj(plane_state->fb,
- 0)->base.dma_buf;
- if (!dma_buf)
- continue;
- plane_state->fence =
- reservation_object_get_excl_rcu(dma_buf->resv);
- }
- }
-
- return drm_atomic_helper_commit(dev, state, nonblock);
-}
-
static const struct drm_mode_config_funcs imx_drm_mode_config_funcs = {
.fb_create = drm_fb_cma_create,
.output_poll_changed = imx_drm_output_poll_changed,
.atomic_check = imx_drm_atomic_check,
- .atomic_commit = imx_drm_atomic_commit,
+ .atomic_commit = drm_atomic_helper_commit,
};
static void imx_drm_atomic_commit_tail(struct drm_atomic_state *state)
@@ -357,8 +329,8 @@ static int imx_drm_bind(struct device *dev)
int ret;
drm = drm_dev_alloc(&imx_drm_driver, dev);
- if (!drm)
- return -ENOMEM;
+ if (IS_ERR(drm))
+ return PTR_ERR(drm);
imxdrm = devm_kzalloc(dev, sizeof(*imxdrm), GFP_KERNEL);
if (!imxdrm) {
@@ -436,9 +408,11 @@ static int imx_drm_bind(struct device *dev)
err_fbhelper:
drm_kms_helper_poll_fini(drm);
+#if IS_ENABLED(CONFIG_DRM_FBDEV_EMULATION)
if (imxdrm->fbhelper)
drm_fbdev_cma_fini(imxdrm->fbhelper);
err_unbind:
+#endif
component_unbind_all(drm->dev, drm);
err_vblank:
drm_vblank_cleanup(drm);
diff --git a/drivers/gpu/drm/imx/imx-ldb.c b/drivers/gpu/drm/imx/imx-ldb.c
index 3ce391c239b0..b300998dce7d 100644
--- a/drivers/gpu/drm/imx/imx-ldb.c
+++ b/drivers/gpu/drm/imx/imx-ldb.c
@@ -319,18 +319,6 @@ static void imx_ldb_encoder_disable(struct drm_encoder *encoder)
struct imx_ldb *ldb = imx_ldb_ch->ldb;
int mux, ret;
- /*
- * imx_ldb_encoder_disable is called by
- * drm_helper_disable_unused_functions without
- * the encoder being enabled before.
- */
- if (imx_ldb_ch == &ldb->channel[0] &&
- (ldb->ldb_ctrl & LDB_CH0_MODE_EN_MASK) == 0)
- return;
- else if (imx_ldb_ch == &ldb->channel[1] &&
- (ldb->ldb_ctrl & LDB_CH1_MODE_EN_MASK) == 0)
- return;
-
drm_panel_disable(imx_ldb_ch->panel);
if (imx_ldb_ch == &ldb->channel[0])
diff --git a/drivers/gpu/drm/imx/ipuv3-crtc.c b/drivers/gpu/drm/imx/ipuv3-crtc.c
index 4e1ae3fc462d..6be515a9fb69 100644
--- a/drivers/gpu/drm/imx/ipuv3-crtc.c
+++ b/drivers/gpu/drm/imx/ipuv3-crtc.c
@@ -68,6 +68,12 @@ static void ipu_crtc_atomic_disable(struct drm_crtc *crtc,
ipu_dc_disable_channel(ipu_crtc->dc);
ipu_di_disable(ipu_crtc->di);
+ /*
+ * Planes must be disabled before DC clock is removed, as otherwise the
+ * attached IDMACs will be left in undefined state, possibly hanging
+ * the IPU or even system.
+ */
+ drm_atomic_helper_disable_planes_on_crtc(old_crtc_state, false);
ipu_dc_disable(ipu);
spin_lock_irq(&crtc->dev->event_lock);
@@ -77,9 +83,6 @@ static void ipu_crtc_atomic_disable(struct drm_crtc *crtc,
}
spin_unlock_irq(&crtc->dev->event_lock);
- /* always disable planes on the CRTC */
- drm_atomic_helper_disable_planes_on_crtc(old_crtc_state, true);
-
drm_crtc_vblank_off(crtc);
}
diff --git a/drivers/gpu/drm/imx/ipuv3-plane.c b/drivers/gpu/drm/imx/ipuv3-plane.c
index ce22d0a0ddc8..e74a0ad52950 100644
--- a/drivers/gpu/drm/imx/ipuv3-plane.c
+++ b/drivers/gpu/drm/imx/ipuv3-plane.c
@@ -50,6 +50,12 @@ static const uint32_t ipu_plane_formats[] = {
DRM_FORMAT_YVYU,
DRM_FORMAT_YUV420,
DRM_FORMAT_YVU420,
+ DRM_FORMAT_YUV422,
+ DRM_FORMAT_YVU422,
+ DRM_FORMAT_YUV444,
+ DRM_FORMAT_YVU444,
+ DRM_FORMAT_NV12,
+ DRM_FORMAT_NV16,
DRM_FORMAT_RGB565,
};
@@ -64,13 +70,14 @@ drm_plane_state_to_eba(struct drm_plane_state *state)
{
struct drm_framebuffer *fb = state->fb;
struct drm_gem_cma_object *cma_obj;
+ int x = state->src_x >> 16;
+ int y = state->src_y >> 16;
cma_obj = drm_fb_cma_get_gem_obj(fb, 0);
BUG_ON(!cma_obj);
- return cma_obj->paddr + fb->offsets[0] +
- fb->pitches[0] * (state->src_y >> 16) +
- (fb->bits_per_pixel >> 3) * (state->src_x >> 16);
+ return cma_obj->paddr + fb->offsets[0] + fb->pitches[0] * y +
+ drm_format_plane_cpp(fb->pixel_format, 0) * x;
}
static inline unsigned long
@@ -79,13 +86,17 @@ drm_plane_state_to_ubo(struct drm_plane_state *state)
struct drm_framebuffer *fb = state->fb;
struct drm_gem_cma_object *cma_obj;
unsigned long eba = drm_plane_state_to_eba(state);
+ int x = state->src_x >> 16;
+ int y = state->src_y >> 16;
cma_obj = drm_fb_cma_get_gem_obj(fb, 1);
BUG_ON(!cma_obj);
- return cma_obj->paddr + fb->offsets[1] +
- fb->pitches[1] * (state->src_y >> 16) / 2 +
- (state->src_x >> 16) / 2 - eba;
+ x /= drm_format_horz_chroma_subsampling(fb->pixel_format);
+ y /= drm_format_vert_chroma_subsampling(fb->pixel_format);
+
+ return cma_obj->paddr + fb->offsets[1] + fb->pitches[1] * y +
+ drm_format_plane_cpp(fb->pixel_format, 1) * x - eba;
}
static inline unsigned long
@@ -94,69 +105,17 @@ drm_plane_state_to_vbo(struct drm_plane_state *state)
struct drm_framebuffer *fb = state->fb;
struct drm_gem_cma_object *cma_obj;
unsigned long eba = drm_plane_state_to_eba(state);
+ int x = state->src_x >> 16;
+ int y = state->src_y >> 16;
cma_obj = drm_fb_cma_get_gem_obj(fb, 2);
BUG_ON(!cma_obj);
- return cma_obj->paddr + fb->offsets[2] +
- fb->pitches[2] * (state->src_y >> 16) / 2 +
- (state->src_x >> 16) / 2 - eba;
-}
-
-static void ipu_plane_atomic_set_base(struct ipu_plane *ipu_plane,
- struct drm_plane_state *old_state)
-{
- struct drm_plane *plane = &ipu_plane->base;
- struct drm_plane_state *state = plane->state;
- struct drm_framebuffer *fb = state->fb;
- unsigned long eba, ubo, vbo;
- int active;
-
- eba = drm_plane_state_to_eba(state);
-
- switch (fb->pixel_format) {
- case DRM_FORMAT_YUV420:
- case DRM_FORMAT_YVU420:
- if (old_state->fb)
- break;
-
- /*
- * Multiplanar formats have to meet the following restrictions:
- * - The (up to) three plane addresses are EBA, EBA+UBO, EBA+VBO
- * - EBA, UBO and VBO are a multiple of 8
- * - UBO and VBO are unsigned and not larger than 0xfffff8
- * - Only EBA may be changed while scanout is active
- * - The strides of U and V planes must be identical.
- */
- ubo = drm_plane_state_to_ubo(state);
- vbo = drm_plane_state_to_vbo(state);
-
- if (fb->pixel_format == DRM_FORMAT_YUV420)
- ipu_cpmem_set_yuv_planar_full(ipu_plane->ipu_ch,
- fb->pitches[1], ubo, vbo);
- else
- ipu_cpmem_set_yuv_planar_full(ipu_plane->ipu_ch,
- fb->pitches[1], vbo, ubo);
-
- dev_dbg(ipu_plane->base.dev->dev,
- "phy = %lu %lu %lu, x = %d, y = %d", eba, ubo, vbo,
- state->src_x >> 16, state->src_y >> 16);
- break;
- default:
- dev_dbg(ipu_plane->base.dev->dev, "phys = %lu, x = %d, y = %d",
- eba, state->src_x >> 16, state->src_y >> 16);
-
- break;
- }
+ x /= drm_format_horz_chroma_subsampling(fb->pixel_format);
+ y /= drm_format_vert_chroma_subsampling(fb->pixel_format);
- if (old_state->fb) {
- active = ipu_idmac_get_current_buffer(ipu_plane->ipu_ch);
- ipu_cpmem_set_buffer(ipu_plane->ipu_ch, !active, eba);
- ipu_idmac_select_buffer(ipu_plane->ipu_ch, !active);
- } else {
- ipu_cpmem_set_buffer(ipu_plane->ipu_ch, 0, eba);
- ipu_cpmem_set_buffer(ipu_plane->ipu_ch, 1, eba);
- }
+ return cma_obj->paddr + fb->offsets[2] + fb->pitches[2] * y +
+ drm_format_plane_cpp(fb->pixel_format, 2) * x - eba;
}
void ipu_plane_put_resources(struct ipu_plane *ipu_plane)
@@ -259,6 +218,7 @@ static int ipu_plane_atomic_check(struct drm_plane *plane,
struct drm_framebuffer *fb = state->fb;
struct drm_framebuffer *old_fb = old_state->fb;
unsigned long eba, ubo, vbo, old_ubo, old_vbo;
+ int hsub, vsub;
/* Ok to disable */
if (!fb)
@@ -338,6 +298,10 @@ static int ipu_plane_atomic_check(struct drm_plane *plane,
switch (fb->pixel_format) {
case DRM_FORMAT_YUV420:
case DRM_FORMAT_YVU420:
+ case DRM_FORMAT_YUV422:
+ case DRM_FORMAT_YVU422:
+ case DRM_FORMAT_YUV444:
+ case DRM_FORMAT_YVU444:
/*
* Multiplanar formats have to meet the following restrictions:
* - The (up to) three plane addresses are EBA, EBA+UBO, EBA+VBO
@@ -346,30 +310,49 @@ static int ipu_plane_atomic_check(struct drm_plane *plane,
* - Only EBA may be changed while scanout is active
* - The strides of U and V planes must be identical.
*/
- ubo = drm_plane_state_to_ubo(state);
vbo = drm_plane_state_to_vbo(state);
- if ((ubo & 0x7) || (vbo & 0x7))
+ if (vbo & 0x7 || vbo > 0xfffff8)
return -EINVAL;
- if ((ubo > 0xfffff8) || (vbo > 0xfffff8))
- return -EINVAL;
-
- if (old_fb) {
- old_ubo = drm_plane_state_to_ubo(old_state);
+ if (old_fb && (fb->pixel_format == old_fb->pixel_format)) {
old_vbo = drm_plane_state_to_vbo(old_state);
- if (ubo != old_ubo || vbo != old_vbo)
- return -EINVAL;
+ if (vbo != old_vbo)
+ crtc_state->mode_changed = true;
}
if (fb->pitches[1] != fb->pitches[2])
return -EINVAL;
+ /* fall-through */
+ case DRM_FORMAT_NV12:
+ case DRM_FORMAT_NV16:
+ ubo = drm_plane_state_to_ubo(state);
+
+ if (ubo & 0x7 || ubo > 0xfffff8)
+ return -EINVAL;
+
+ if (old_fb && (fb->pixel_format == old_fb->pixel_format)) {
+ old_ubo = drm_plane_state_to_ubo(old_state);
+ if (ubo != old_ubo)
+ crtc_state->mode_changed = true;
+ }
+
if (fb->pitches[1] < 1 || fb->pitches[1] > 16384)
return -EINVAL;
if (old_fb && old_fb->pitches[1] != fb->pitches[1])
crtc_state->mode_changed = true;
+
+ /*
+ * The x/y offsets must be even in case of horizontal/vertical
+ * chroma subsampling.
+ */
+ hsub = drm_format_horz_chroma_subsampling(fb->pixel_format);
+ vsub = drm_format_vert_chroma_subsampling(fb->pixel_format);
+ if (((state->src_x >> 16) & (hsub - 1)) ||
+ ((state->src_y >> 16) & (vsub - 1)))
+ return -EINVAL;
}
return 0;
@@ -386,15 +369,19 @@ static void ipu_plane_atomic_update(struct drm_plane *plane,
{
struct ipu_plane *ipu_plane = to_ipu_plane(plane);
struct drm_plane_state *state = plane->state;
+ struct drm_crtc_state *crtc_state = state->crtc->state;
+ struct drm_framebuffer *fb = state->fb;
+ unsigned long eba, ubo, vbo;
enum ipu_color_space ics;
+ int active;
- if (old_state->fb) {
- struct drm_crtc_state *crtc_state = state->crtc->state;
+ eba = drm_plane_state_to_eba(state);
- if (!drm_atomic_crtc_needs_modeset(crtc_state)) {
- ipu_plane_atomic_set_base(ipu_plane, old_state);
- return;
- }
+ if (old_state->fb && !drm_atomic_crtc_needs_modeset(crtc_state)) {
+ active = ipu_idmac_get_current_buffer(ipu_plane->ipu_ch);
+ ipu_cpmem_set_buffer(ipu_plane->ipu_ch, !active, eba);
+ ipu_idmac_select_buffer(ipu_plane->ipu_ch, !active);
+ return;
}
switch (ipu_plane->dp_flow) {
@@ -424,6 +411,7 @@ static void ipu_plane_atomic_update(struct drm_plane *plane,
ipu_dp_set_global_alpha(ipu_plane->dp, false, 0, false);
break;
default:
+ ipu_dp_set_global_alpha(ipu_plane->dp, true, 0, true);
break;
}
}
@@ -437,11 +425,50 @@ static void ipu_plane_atomic_update(struct drm_plane *plane,
ipu_cpmem_set_high_priority(ipu_plane->ipu_ch);
ipu_idmac_set_double_buffer(ipu_plane->ipu_ch, 1);
ipu_cpmem_set_stride(ipu_plane->ipu_ch, state->fb->pitches[0]);
- ipu_plane_atomic_set_base(ipu_plane, old_state);
+ switch (fb->pixel_format) {
+ case DRM_FORMAT_YUV420:
+ case DRM_FORMAT_YVU420:
+ case DRM_FORMAT_YUV422:
+ case DRM_FORMAT_YVU422:
+ case DRM_FORMAT_YUV444:
+ case DRM_FORMAT_YVU444:
+ ubo = drm_plane_state_to_ubo(state);
+ vbo = drm_plane_state_to_vbo(state);
+ if (fb->pixel_format == DRM_FORMAT_YVU420 ||
+ fb->pixel_format == DRM_FORMAT_YVU422 ||
+ fb->pixel_format == DRM_FORMAT_YVU444)
+ swap(ubo, vbo);
+
+ ipu_cpmem_set_yuv_planar_full(ipu_plane->ipu_ch,
+ fb->pitches[1], ubo, vbo);
+
+ dev_dbg(ipu_plane->base.dev->dev,
+ "phy = %lu %lu %lu, x = %d, y = %d", eba, ubo, vbo,
+ state->src_x >> 16, state->src_y >> 16);
+ break;
+ case DRM_FORMAT_NV12:
+ case DRM_FORMAT_NV16:
+ ubo = drm_plane_state_to_ubo(state);
+
+ ipu_cpmem_set_yuv_planar_full(ipu_plane->ipu_ch,
+ fb->pitches[1], ubo, ubo);
+
+ dev_dbg(ipu_plane->base.dev->dev,
+ "phy = %lu %lu, x = %d, y = %d", eba, ubo,
+ state->src_x >> 16, state->src_y >> 16);
+ break;
+ default:
+ dev_dbg(ipu_plane->base.dev->dev, "phys = %lu, x = %d, y = %d",
+ eba, state->src_x >> 16, state->src_y >> 16);
+ break;
+ }
+ ipu_cpmem_set_buffer(ipu_plane->ipu_ch, 0, eba);
+ ipu_cpmem_set_buffer(ipu_plane->ipu_ch, 1, eba);
ipu_plane_enable(ipu_plane);
}
static const struct drm_plane_helper_funcs ipu_plane_helper_funcs = {
+ .prepare_fb = drm_fb_cma_prepare_fb,
.atomic_check = ipu_plane_atomic_check,
.atomic_disable = ipu_plane_atomic_disable,
.atomic_update = ipu_plane_atomic_update,
diff --git a/drivers/gpu/drm/mediatek/mtk_disp_ovl.c b/drivers/gpu/drm/mediatek/mtk_disp_ovl.c
index 019b7ca392d7..c70310206ac5 100644
--- a/drivers/gpu/drm/mediatek/mtk_disp_ovl.c
+++ b/drivers/gpu/drm/mediatek/mtk_disp_ovl.c
@@ -80,6 +80,7 @@ static void mtk_ovl_enable_vblank(struct mtk_ddp_comp *comp,
ddp_comp);
priv->crtc = crtc;
+ writel(0x0, comp->regs + DISP_REG_OVL_INTSTA);
writel_relaxed(OVL_FME_CPL_INT, comp->regs + DISP_REG_OVL_INTEN);
}
@@ -250,13 +251,6 @@ static int mtk_disp_ovl_probe(struct platform_device *pdev)
if (irq < 0)
return irq;
- ret = devm_request_irq(dev, irq, mtk_disp_ovl_irq_handler,
- IRQF_TRIGGER_NONE, dev_name(dev), priv);
- if (ret < 0) {
- dev_err(dev, "Failed to request irq %d: %d\n", irq, ret);
- return ret;
- }
-
comp_id = mtk_ddp_comp_get_id(dev->of_node, MTK_DISP_OVL);
if (comp_id < 0) {
dev_err(dev, "Failed to identify by alias: %d\n", comp_id);
@@ -272,6 +266,13 @@ static int mtk_disp_ovl_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, priv);
+ ret = devm_request_irq(dev, irq, mtk_disp_ovl_irq_handler,
+ IRQF_TRIGGER_NONE, dev_name(dev), priv);
+ if (ret < 0) {
+ dev_err(dev, "Failed to request irq %d: %d\n", irq, ret);
+ return ret;
+ }
+
ret = component_add(dev, &mtk_disp_ovl_component_ops);
if (ret)
dev_err(dev, "Failed to add component: %d\n", ret);
diff --git a/drivers/gpu/drm/mediatek/mtk_dpi.c b/drivers/gpu/drm/mediatek/mtk_dpi.c
index 0186e500d2a5..90fb831ef031 100644
--- a/drivers/gpu/drm/mediatek/mtk_dpi.c
+++ b/drivers/gpu/drm/mediatek/mtk_dpi.c
@@ -432,11 +432,16 @@ static int mtk_dpi_set_display_mode(struct mtk_dpi *dpi,
unsigned long pll_rate;
unsigned int factor;
+ /* let pll_rate can fix the valid range of tvdpll (1G~2GHz) */
pix_rate = 1000UL * mode->clock;
- if (mode->clock <= 74000)
+ if (mode->clock <= 27000)
+ factor = 16 * 3;
+ else if (mode->clock <= 84000)
factor = 8 * 3;
- else
+ else if (mode->clock <= 167000)
factor = 4 * 3;
+ else
+ factor = 2 * 3;
pll_rate = pix_rate * factor;
dev_dbg(dpi->dev, "Want PLL %lu Hz, pixel clock %lu Hz\n",
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c b/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c
index df33b3ca6ffd..48cc01fd20c7 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c
+++ b/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c
@@ -123,7 +123,7 @@ static void mtk_od_config(struct mtk_ddp_comp *comp, unsigned int w,
unsigned int bpc)
{
writel(w << 16 | h, comp->regs + DISP_OD_SIZE);
- writel(OD_RELAYMODE, comp->regs + OD_RELAYMODE);
+ writel(OD_RELAYMODE, comp->regs + DISP_OD_CFG);
mtk_dither_set(comp, bpc, DISP_OD_CFG);
}
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_drv.c b/drivers/gpu/drm/mediatek/mtk_drm_drv.c
index cf83f6507ec8..4b7fe7eaec01 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_drv.c
+++ b/drivers/gpu/drm/mediatek/mtk_drm_drv.c
@@ -18,6 +18,7 @@
#include <drm/drm_crtc_helper.h>
#include <drm/drm_gem.h>
#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_of.h>
#include <linux/component.h>
#include <linux/iommu.h>
#include <linux/of_address.h>
@@ -83,7 +84,7 @@ static void mtk_atomic_complete(struct mtk_drm_private *private,
drm_atomic_helper_wait_for_vblanks(drm, state);
drm_atomic_helper_cleanup_planes(drm, state);
- drm_atomic_state_free(state);
+ drm_atomic_state_put(state);
}
static void mtk_atomic_work(struct work_struct *work)
@@ -110,6 +111,7 @@ static int mtk_atomic_commit(struct drm_device *drm,
drm_atomic_helper_swap_state(state, true);
+ drm_atomic_state_get(state);
if (async)
mtk_atomic_schedule(private, state);
else
@@ -247,16 +249,14 @@ static const struct file_operations mtk_drm_fops = {
.mmap = mtk_drm_gem_mmap,
.poll = drm_poll,
.read = drm_read,
-#ifdef CONFIG_COMPAT
.compat_ioctl = drm_compat_ioctl,
-#endif
};
static struct drm_driver mtk_drm_driver = {
.driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_PRIME |
DRIVER_ATOMIC,
- .get_vblank_counter = drm_vblank_count,
+ .get_vblank_counter = drm_vblank_no_hw_counter,
.enable_vblank = mtk_drm_crtc_enable_vblank,
.disable_vblank = mtk_drm_crtc_disable_vblank,
@@ -415,7 +415,8 @@ static int mtk_drm_probe(struct platform_device *pdev)
comp_type == MTK_DPI) {
dev_info(dev, "Adding component match for %s\n",
node->full_name);
- component_match_add(dev, &match, compare_of, node);
+ drm_of_component_match_add(dev, &match, compare_of,
+ node);
} else {
struct mtk_ddp_comp *comp;
diff --git a/drivers/gpu/drm/mediatek/mtk_dsi.c b/drivers/gpu/drm/mediatek/mtk_dsi.c
index 28b2044ed9f2..eaa5a2240c0c 100644
--- a/drivers/gpu/drm/mediatek/mtk_dsi.c
+++ b/drivers/gpu/drm/mediatek/mtk_dsi.c
@@ -86,7 +86,7 @@
#define DSI_PHY_TIMECON0 0x110
#define LPX (0xff << 0)
-#define HS_PRPR (0xff << 8)
+#define HS_PREP (0xff << 8)
#define HS_ZERO (0xff << 16)
#define HS_TRAIL (0xff << 24)
@@ -102,10 +102,16 @@
#define CLK_TRAIL (0xff << 24)
#define DSI_PHY_TIMECON3 0x11c
-#define CLK_HS_PRPR (0xff << 0)
+#define CLK_HS_PREP (0xff << 0)
#define CLK_HS_POST (0xff << 8)
#define CLK_HS_EXIT (0xff << 16)
+#define T_LPX 5
+#define T_HS_PREP 6
+#define T_HS_TRAIL 8
+#define T_HS_EXIT 7
+#define T_HS_ZERO 10
+
#define NS_TO_CYCLE(n, c) ((n) / (c) + (((n) % (c)) ? 1 : 0))
struct phy;
@@ -161,20 +167,18 @@ static void mtk_dsi_mask(struct mtk_dsi *dsi, u32 offset, u32 mask, u32 data)
static void dsi_phy_timconfig(struct mtk_dsi *dsi)
{
u32 timcon0, timcon1, timcon2, timcon3;
- unsigned int ui, cycle_time;
- unsigned int lpx;
+ u32 ui, cycle_time;
ui = 1000 / dsi->data_rate + 0x01;
cycle_time = 8000 / dsi->data_rate + 0x01;
- lpx = 5;
- timcon0 = (8 << 24) | (0xa << 16) | (0x6 << 8) | lpx;
- timcon1 = (7 << 24) | (5 * lpx << 16) | ((3 * lpx) / 2) << 8 |
- (4 * lpx);
+ timcon0 = T_LPX | T_HS_PREP << 8 | T_HS_ZERO << 16 | T_HS_TRAIL << 24;
+ timcon1 = 4 * T_LPX | (3 * T_LPX / 2) << 8 | 5 * T_LPX << 16 |
+ T_HS_EXIT << 24;
timcon2 = ((NS_TO_CYCLE(0x64, cycle_time) + 0xa) << 24) |
(NS_TO_CYCLE(0x150, cycle_time) << 16);
- timcon3 = (2 * lpx) << 16 | NS_TO_CYCLE(80 + 52 * ui, cycle_time) << 8 |
- NS_TO_CYCLE(0x40, cycle_time);
+ timcon3 = NS_TO_CYCLE(0x40, cycle_time) | (2 * T_LPX) << 16 |
+ NS_TO_CYCLE(80 + 52 * ui, cycle_time) << 8;
writel(timcon0, dsi->regs + DSI_PHY_TIMECON0);
writel(timcon1, dsi->regs + DSI_PHY_TIMECON1);
@@ -202,19 +206,47 @@ static int mtk_dsi_poweron(struct mtk_dsi *dsi)
{
struct device *dev = dsi->dev;
int ret;
+ u64 pixel_clock, total_bits;
+ u32 htotal, htotal_bits, bit_per_pixel, overhead_cycles, overhead_bits;
if (++dsi->refcount != 1)
return 0;
+ switch (dsi->format) {
+ case MIPI_DSI_FMT_RGB565:
+ bit_per_pixel = 16;
+ break;
+ case MIPI_DSI_FMT_RGB666_PACKED:
+ bit_per_pixel = 18;
+ break;
+ case MIPI_DSI_FMT_RGB666:
+ case MIPI_DSI_FMT_RGB888:
+ default:
+ bit_per_pixel = 24;
+ break;
+ }
+
/**
- * data_rate = (pixel_clock / 1000) * pixel_dipth * mipi_ratio;
- * pixel_clock unit is Khz, data_rata unit is MHz, so need divide 1000.
- * mipi_ratio is mipi clk coefficient for balance the pixel clk in mipi.
- * we set mipi_ratio is 1.05.
+ * vm.pixelclock is in kHz, pixel_clock unit is Hz, so multiply by 1000
+ * htotal_time = htotal * byte_per_pixel / num_lanes
+ * overhead_time = lpx + hs_prepare + hs_zero + hs_trail + hs_exit
+ * mipi_ratio = (htotal_time + overhead_time) / htotal_time
+ * data_rate = pixel_clock * bit_per_pixel * mipi_ratio / num_lanes;
*/
- dsi->data_rate = dsi->vm.pixelclock * 3 * 21 / (1 * 1000 * 10);
+ pixel_clock = dsi->vm.pixelclock * 1000;
+ htotal = dsi->vm.hactive + dsi->vm.hback_porch + dsi->vm.hfront_porch +
+ dsi->vm.hsync_len;
+ htotal_bits = htotal * bit_per_pixel;
+
+ overhead_cycles = T_LPX + T_HS_PREP + T_HS_ZERO + T_HS_TRAIL +
+ T_HS_EXIT;
+ overhead_bits = overhead_cycles * dsi->lanes * 8;
+ total_bits = htotal_bits + overhead_bits;
+
+ dsi->data_rate = DIV_ROUND_UP_ULL(pixel_clock * total_bits,
+ htotal * dsi->lanes);
- ret = clk_set_rate(dsi->hs_clk, dsi->data_rate * 1000000);
+ ret = clk_set_rate(dsi->hs_clk, dsi->data_rate);
if (ret < 0) {
dev_err(dev, "Failed to set data rate: %d\n", ret);
goto err_refcount;
diff --git a/drivers/gpu/drm/mediatek/mtk_hdmi.c b/drivers/gpu/drm/mediatek/mtk_hdmi.c
index 71227deef21b..0e8c4d9af340 100644
--- a/drivers/gpu/drm/mediatek/mtk_hdmi.c
+++ b/drivers/gpu/drm/mediatek/mtk_hdmi.c
@@ -1133,12 +1133,6 @@ static int mtk_hdmi_output_set_display_mode(struct mtk_hdmi *hdmi,
phy_power_on(hdmi->phy);
mtk_hdmi_aud_output_config(hdmi, mode);
- mtk_hdmi_setup_audio_infoframe(hdmi);
- mtk_hdmi_setup_avi_infoframe(hdmi, mode);
- mtk_hdmi_setup_spd_infoframe(hdmi, "mediatek", "On-chip HDMI");
- if (mode->flags & DRM_MODE_FLAG_3D_MASK)
- mtk_hdmi_setup_vendor_specific_infoframe(hdmi, mode);
-
mtk_hdmi_hw_vid_black(hdmi, false);
mtk_hdmi_hw_aud_unmute(hdmi);
mtk_hdmi_hw_send_av_unmute(hdmi);
@@ -1401,6 +1395,16 @@ static void mtk_hdmi_bridge_pre_enable(struct drm_bridge *bridge)
hdmi->powered = true;
}
+static void mtk_hdmi_send_infoframe(struct mtk_hdmi *hdmi,
+ struct drm_display_mode *mode)
+{
+ mtk_hdmi_setup_audio_infoframe(hdmi);
+ mtk_hdmi_setup_avi_infoframe(hdmi, mode);
+ mtk_hdmi_setup_spd_infoframe(hdmi, "mediatek", "On-chip HDMI");
+ if (mode->flags & DRM_MODE_FLAG_3D_MASK)
+ mtk_hdmi_setup_vendor_specific_infoframe(hdmi, mode);
+}
+
static void mtk_hdmi_bridge_enable(struct drm_bridge *bridge)
{
struct mtk_hdmi *hdmi = hdmi_ctx_from_bridge(bridge);
@@ -1409,6 +1413,7 @@ static void mtk_hdmi_bridge_enable(struct drm_bridge *bridge)
clk_prepare_enable(hdmi->clk[MTK_HDMI_CLK_HDMI_PLL]);
clk_prepare_enable(hdmi->clk[MTK_HDMI_CLK_HDMI_PIXEL]);
phy_power_on(hdmi->phy);
+ mtk_hdmi_send_infoframe(hdmi, &hdmi->mode);
hdmi->enabled = true;
}
diff --git a/drivers/gpu/drm/mediatek/mtk_mt8173_hdmi_phy.c b/drivers/gpu/drm/mediatek/mtk_mt8173_hdmi_phy.c
index 8a24754b440f..51cb9cfb6646 100644
--- a/drivers/gpu/drm/mediatek/mtk_mt8173_hdmi_phy.c
+++ b/drivers/gpu/drm/mediatek/mtk_mt8173_hdmi_phy.c
@@ -265,6 +265,9 @@ static int mtk_hdmi_pll_set_rate(struct clk_hw *hw, unsigned long rate,
struct mtk_hdmi_phy *hdmi_phy = to_mtk_hdmi_phy(hw);
unsigned int pre_div;
unsigned int div;
+ unsigned int pre_ibias;
+ unsigned int hdmi_ibias;
+ unsigned int imp_en;
dev_dbg(hdmi_phy->dev, "%s: %lu Hz, parent: %lu Hz\n", __func__,
rate, parent_rate);
@@ -298,18 +301,31 @@ static int mtk_hdmi_pll_set_rate(struct clk_hw *hw, unsigned long rate,
(0x1 << PLL_BR_SHIFT),
RG_HDMITX_PLL_BP | RG_HDMITX_PLL_BC |
RG_HDMITX_PLL_BR);
- mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON3, RG_HDMITX_PRD_IMP_EN);
+ if (rate < 165000000) {
+ mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON3,
+ RG_HDMITX_PRD_IMP_EN);
+ pre_ibias = 0x3;
+ imp_en = 0x0;
+ hdmi_ibias = hdmi_phy->ibias;
+ } else {
+ mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON3,
+ RG_HDMITX_PRD_IMP_EN);
+ pre_ibias = 0x6;
+ imp_en = 0xf;
+ hdmi_ibias = hdmi_phy->ibias_up;
+ }
mtk_hdmi_phy_mask(hdmi_phy, HDMI_CON4,
- (0x3 << PRD_IBIAS_CLK_SHIFT) |
- (0x3 << PRD_IBIAS_D2_SHIFT) |
- (0x3 << PRD_IBIAS_D1_SHIFT) |
- (0x3 << PRD_IBIAS_D0_SHIFT),
+ (pre_ibias << PRD_IBIAS_CLK_SHIFT) |
+ (pre_ibias << PRD_IBIAS_D2_SHIFT) |
+ (pre_ibias << PRD_IBIAS_D1_SHIFT) |
+ (pre_ibias << PRD_IBIAS_D0_SHIFT),
RG_HDMITX_PRD_IBIAS_CLK |
RG_HDMITX_PRD_IBIAS_D2 |
RG_HDMITX_PRD_IBIAS_D1 |
RG_HDMITX_PRD_IBIAS_D0);
mtk_hdmi_phy_mask(hdmi_phy, HDMI_CON3,
- (0x0 << DRV_IMP_EN_SHIFT), RG_HDMITX_DRV_IMP_EN);
+ (imp_en << DRV_IMP_EN_SHIFT),
+ RG_HDMITX_DRV_IMP_EN);
mtk_hdmi_phy_mask(hdmi_phy, HDMI_CON6,
(hdmi_phy->drv_imp_clk << DRV_IMP_CLK_SHIFT) |
(hdmi_phy->drv_imp_d2 << DRV_IMP_D2_SHIFT) |
@@ -318,12 +334,14 @@ static int mtk_hdmi_pll_set_rate(struct clk_hw *hw, unsigned long rate,
RG_HDMITX_DRV_IMP_CLK | RG_HDMITX_DRV_IMP_D2 |
RG_HDMITX_DRV_IMP_D1 | RG_HDMITX_DRV_IMP_D0);
mtk_hdmi_phy_mask(hdmi_phy, HDMI_CON5,
- (hdmi_phy->ibias << DRV_IBIAS_CLK_SHIFT) |
- (hdmi_phy->ibias << DRV_IBIAS_D2_SHIFT) |
- (hdmi_phy->ibias << DRV_IBIAS_D1_SHIFT) |
- (hdmi_phy->ibias << DRV_IBIAS_D0_SHIFT),
- RG_HDMITX_DRV_IBIAS_CLK | RG_HDMITX_DRV_IBIAS_D2 |
- RG_HDMITX_DRV_IBIAS_D1 | RG_HDMITX_DRV_IBIAS_D0);
+ (hdmi_ibias << DRV_IBIAS_CLK_SHIFT) |
+ (hdmi_ibias << DRV_IBIAS_D2_SHIFT) |
+ (hdmi_ibias << DRV_IBIAS_D1_SHIFT) |
+ (hdmi_ibias << DRV_IBIAS_D0_SHIFT),
+ RG_HDMITX_DRV_IBIAS_CLK |
+ RG_HDMITX_DRV_IBIAS_D2 |
+ RG_HDMITX_DRV_IBIAS_D1 |
+ RG_HDMITX_DRV_IBIAS_D0);
return 0;
}
diff --git a/drivers/gpu/drm/mgag200/mgag200_drv.c b/drivers/gpu/drm/mgag200/mgag200_drv.c
index 1443b3a34775..b0b874264f9d 100644
--- a/drivers/gpu/drm/mgag200/mgag200_drv.c
+++ b/drivers/gpu/drm/mgag200/mgag200_drv.c
@@ -82,9 +82,7 @@ static const struct file_operations mgag200_driver_fops = {
.unlocked_ioctl = drm_ioctl,
.mmap = mgag200_mmap,
.poll = drm_poll,
-#ifdef CONFIG_COMPAT
.compat_ioctl = drm_compat_ioctl,
-#endif
.read = drm_read,
};
diff --git a/drivers/gpu/drm/mgag200/mgag200_ttm.c b/drivers/gpu/drm/mgag200/mgag200_ttm.c
index dcf7d11ac380..5e20220ef4c6 100644
--- a/drivers/gpu/drm/mgag200/mgag200_ttm.c
+++ b/drivers/gpu/drm/mgag200/mgag200_ttm.c
@@ -230,6 +230,7 @@ struct ttm_bo_driver mgag200_bo_driver = {
.ttm_tt_populate = mgag200_ttm_tt_populate,
.ttm_tt_unpopulate = mgag200_ttm_tt_unpopulate,
.init_mem_type = mgag200_bo_init_mem_type,
+ .eviction_valuable = ttm_bo_eviction_valuable,
.evict_flags = mgag200_bo_evict_flags,
.move = NULL,
.verify_access = mgag200_bo_verify_access,
diff --git a/drivers/gpu/drm/msm/Makefile b/drivers/gpu/drm/msm/Makefile
index 4e2806cf778c..028c24df2291 100644
--- a/drivers/gpu/drm/msm/Makefile
+++ b/drivers/gpu/drm/msm/Makefile
@@ -6,6 +6,8 @@ msm-y := \
adreno/adreno_gpu.o \
adreno/a3xx_gpu.o \
adreno/a4xx_gpu.o \
+ adreno/a5xx_gpu.o \
+ adreno/a5xx_power.o \
hdmi/hdmi.o \
hdmi/hdmi_audio.o \
hdmi/hdmi_bridge.o \
@@ -37,6 +39,7 @@ msm-y := \
mdp/mdp5/mdp5_irq.o \
mdp/mdp5/mdp5_mdss.o \
mdp/mdp5/mdp5_kms.o \
+ mdp/mdp5/mdp5_pipe.o \
mdp/mdp5/mdp5_plane.o \
mdp/mdp5/mdp5_smp.o \
msm_atomic.o \
@@ -48,6 +51,7 @@ msm-y := \
msm_gem_prime.o \
msm_gem_shrinker.o \
msm_gem_submit.o \
+ msm_gem_vma.o \
msm_gpu.o \
msm_iommu.o \
msm_perf.o \
diff --git a/drivers/gpu/drm/msm/adreno/a2xx.xml.h b/drivers/gpu/drm/msm/adreno/a2xx.xml.h
index fee24297fb92..4be092f911f9 100644
--- a/drivers/gpu/drm/msm/adreno/a2xx.xml.h
+++ b/drivers/gpu/drm/msm/adreno/a2xx.xml.h
@@ -8,16 +8,17 @@ http://github.com/freedreno/envytools/
git clone https://github.com/freedreno/envytools.git
The rules-ng-ng source files this header was generated from are:
-- /home/robclark/src/freedreno/envytools/rnndb/adreno.xml ( 398 bytes, from 2015-09-24 17:25:31)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno.xml ( 431 bytes, from 2016-04-26 17:56:44)
- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2016-02-10 17:07:21)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/a2xx.xml ( 32901 bytes, from 2015-05-20 20:03:14)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_common.xml ( 11518 bytes, from 2016-02-10 21:03:25)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml ( 16166 bytes, from 2016-02-11 21:20:31)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml ( 83967 bytes, from 2016-02-10 17:07:21)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml ( 109916 bytes, from 2016-02-20 18:44:48)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/a2xx.xml ( 32907 bytes, from 2016-11-26 23:01:08)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_common.xml ( 12025 bytes, from 2016-11-26 23:01:08)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml ( 22544 bytes, from 2016-11-26 23:01:08)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml ( 83840 bytes, from 2016-11-26 23:01:08)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml ( 110765 bytes, from 2016-11-26 23:01:48)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/a5xx.xml ( 90321 bytes, from 2016-11-28 16:50:05)
- /home/robclark/src/freedreno/envytools/rnndb/adreno/ocmem.xml ( 1773 bytes, from 2015-09-24 17:30:00)
-Copyright (C) 2013-2015 by the following authors:
+Copyright (C) 2013-2016 by the following authors:
- Rob Clark <robdclark@gmail.com> (robclark)
- Ilia Mirkin <imirkin@alum.mit.edu> (imirkin)
@@ -206,12 +207,12 @@ enum a2xx_rb_copy_sample_select {
};
enum a2xx_rb_blend_opcode {
- BLEND_DST_PLUS_SRC = 0,
- BLEND_SRC_MINUS_DST = 1,
- BLEND_MIN_DST_SRC = 2,
- BLEND_MAX_DST_SRC = 3,
- BLEND_DST_MINUS_SRC = 4,
- BLEND_DST_PLUS_SRC_BIAS = 5,
+ BLEND2_DST_PLUS_SRC = 0,
+ BLEND2_SRC_MINUS_DST = 1,
+ BLEND2_MIN_DST_SRC = 2,
+ BLEND2_MAX_DST_SRC = 3,
+ BLEND2_DST_MINUS_SRC = 4,
+ BLEND2_DST_PLUS_SRC_BIAS = 5,
};
enum adreno_mmu_clnt_beh {
diff --git a/drivers/gpu/drm/msm/adreno/a3xx.xml.h b/drivers/gpu/drm/msm/adreno/a3xx.xml.h
index 27dabd5e57fb..a066c8b9eccd 100644
--- a/drivers/gpu/drm/msm/adreno/a3xx.xml.h
+++ b/drivers/gpu/drm/msm/adreno/a3xx.xml.h
@@ -8,13 +8,14 @@ http://github.com/freedreno/envytools/
git clone https://github.com/freedreno/envytools.git
The rules-ng-ng source files this header was generated from are:
-- /home/robclark/src/freedreno/envytools/rnndb/adreno.xml ( 398 bytes, from 2015-09-24 17:25:31)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno.xml ( 431 bytes, from 2016-04-26 17:56:44)
- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2016-02-10 17:07:21)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/a2xx.xml ( 32901 bytes, from 2015-05-20 20:03:14)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_common.xml ( 11518 bytes, from 2016-02-10 21:03:25)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml ( 16166 bytes, from 2016-02-11 21:20:31)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml ( 83967 bytes, from 2016-02-10 17:07:21)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml ( 109916 bytes, from 2016-02-20 18:44:48)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/a2xx.xml ( 32907 bytes, from 2016-11-26 23:01:08)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_common.xml ( 12025 bytes, from 2016-11-26 23:01:08)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml ( 22544 bytes, from 2016-11-26 23:01:08)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml ( 83840 bytes, from 2016-11-26 23:01:08)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml ( 110765 bytes, from 2016-11-26 23:01:48)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/a5xx.xml ( 90321 bytes, from 2016-11-28 16:50:05)
- /home/robclark/src/freedreno/envytools/rnndb/adreno/ocmem.xml ( 1773 bytes, from 2015-09-24 17:30:00)
Copyright (C) 2013-2016 by the following authors:
@@ -129,10 +130,14 @@ enum a3xx_tex_fmt {
TFMT_Z16_UNORM = 9,
TFMT_X8Z24_UNORM = 10,
TFMT_Z32_FLOAT = 11,
- TFMT_NV12_UV_TILED = 17,
- TFMT_NV12_Y_TILED = 19,
- TFMT_NV12_UV = 21,
- TFMT_NV12_Y = 23,
+ TFMT_UV_64X32 = 16,
+ TFMT_VU_64X32 = 17,
+ TFMT_Y_64X32 = 18,
+ TFMT_NV12_64X32 = 19,
+ TFMT_UV_LINEAR = 20,
+ TFMT_VU_LINEAR = 21,
+ TFMT_Y_LINEAR = 22,
+ TFMT_NV12_LINEAR = 23,
TFMT_I420_Y = 24,
TFMT_I420_U = 26,
TFMT_I420_V = 27,
@@ -525,14 +530,6 @@ enum a3xx_uche_perfcounter_select {
UCHE_UCHEPERF_ACTIVE_CYCLES = 20,
};
-enum a3xx_rb_blend_opcode {
- BLEND_DST_PLUS_SRC = 0,
- BLEND_SRC_MINUS_DST = 1,
- BLEND_DST_MINUS_SRC = 2,
- BLEND_MIN_DST_SRC = 3,
- BLEND_MAX_DST_SRC = 4,
-};
-
enum a3xx_intp_mode {
SMOOTH = 0,
FLAT = 1,
@@ -1393,13 +1390,14 @@ static inline uint32_t A3XX_RB_COPY_CONTROL_MODE(enum adreno_rb_copy_control_mod
{
return ((val) << A3XX_RB_COPY_CONTROL_MODE__SHIFT) & A3XX_RB_COPY_CONTROL_MODE__MASK;
}
+#define A3XX_RB_COPY_CONTROL_MSAA_SRGB_DOWNSAMPLE 0x00000080
#define A3XX_RB_COPY_CONTROL_FASTCLEAR__MASK 0x00000f00
#define A3XX_RB_COPY_CONTROL_FASTCLEAR__SHIFT 8
static inline uint32_t A3XX_RB_COPY_CONTROL_FASTCLEAR(uint32_t val)
{
return ((val) << A3XX_RB_COPY_CONTROL_FASTCLEAR__SHIFT) & A3XX_RB_COPY_CONTROL_FASTCLEAR__MASK;
}
-#define A3XX_RB_COPY_CONTROL_UNK12 0x00001000
+#define A3XX_RB_COPY_CONTROL_DEPTH32_RESOLVE 0x00001000
#define A3XX_RB_COPY_CONTROL_GMEM_BASE__MASK 0xffffc000
#define A3XX_RB_COPY_CONTROL_GMEM_BASE__SHIFT 14
static inline uint32_t A3XX_RB_COPY_CONTROL_GMEM_BASE(uint32_t val)
@@ -1472,7 +1470,7 @@ static inline uint32_t A3XX_RB_DEPTH_CONTROL_ZFUNC(enum adreno_compare_func val)
{
return ((val) << A3XX_RB_DEPTH_CONTROL_ZFUNC__SHIFT) & A3XX_RB_DEPTH_CONTROL_ZFUNC__MASK;
}
-#define A3XX_RB_DEPTH_CONTROL_BF_ENABLE 0x00000080
+#define A3XX_RB_DEPTH_CONTROL_Z_CLAMP_ENABLE 0x00000080
#define A3XX_RB_DEPTH_CONTROL_Z_TEST_ENABLE 0x80000000
#define REG_A3XX_RB_DEPTH_CLEAR 0x00002101
diff --git a/drivers/gpu/drm/msm/adreno/a3xx_gpu.c b/drivers/gpu/drm/msm/adreno/a3xx_gpu.c
index fd266ed963b6..b999349b7d2d 100644
--- a/drivers/gpu/drm/msm/adreno/a3xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a3xx_gpu.c
@@ -41,7 +41,7 @@ extern bool hang_debug;
static void a3xx_dump(struct msm_gpu *gpu);
-static void a3xx_me_init(struct msm_gpu *gpu)
+static bool a3xx_me_init(struct msm_gpu *gpu)
{
struct msm_ringbuffer *ring = gpu->rb;
@@ -65,7 +65,7 @@ static void a3xx_me_init(struct msm_gpu *gpu)
OUT_RING(ring, 0x00000000);
gpu->funcs->flush(gpu);
- gpu->funcs->idle(gpu);
+ return gpu->funcs->idle(gpu);
}
static int a3xx_hw_init(struct msm_gpu *gpu)
@@ -294,15 +294,20 @@ static int a3xx_hw_init(struct msm_gpu *gpu)
/* clear ME_HALT to start micro engine */
gpu_write(gpu, REG_AXXX_CP_ME_CNTL, 0);
- a3xx_me_init(gpu);
-
- return 0;
+ return a3xx_me_init(gpu) ? 0 : -EINVAL;
}
static void a3xx_recover(struct msm_gpu *gpu)
{
+ int i;
+
adreno_dump_info(gpu);
+ for (i = 0; i < 8; i++) {
+ printk("CP_SCRATCH_REG%d: %u\n", i,
+ gpu_read(gpu, REG_AXXX_CP_SCRATCH_REG0 + i));
+ }
+
/* dump registers before resetting gpu, if enabled: */
if (hang_debug)
a3xx_dump(gpu);
@@ -330,17 +335,22 @@ static void a3xx_destroy(struct msm_gpu *gpu)
kfree(a3xx_gpu);
}
-static void a3xx_idle(struct msm_gpu *gpu)
+static bool a3xx_idle(struct msm_gpu *gpu)
{
/* wait for ringbuffer to drain: */
- adreno_idle(gpu);
+ if (!adreno_idle(gpu))
+ return false;
/* then wait for GPU to finish: */
if (spin_until(!(gpu_read(gpu, REG_A3XX_RBBM_STATUS) &
- A3XX_RBBM_STATUS_GPU_BUSY)))
+ A3XX_RBBM_STATUS_GPU_BUSY))) {
DRM_ERROR("%s: timeout waiting for GPU to idle!\n", gpu->name);
- /* TODO maybe we need to reset GPU here to recover from hang? */
+ /* TODO maybe we need to reset GPU here to recover from hang? */
+ return false;
+ }
+
+ return true;
}
static irqreturn_t a3xx_irq(struct msm_gpu *gpu)
@@ -419,91 +429,13 @@ static void a3xx_dump(struct msm_gpu *gpu)
}
/* Register offset defines for A3XX */
static const unsigned int a3xx_register_offsets[REG_ADRENO_REGISTER_MAX] = {
- REG_ADRENO_DEFINE(REG_ADRENO_CP_DEBUG, REG_AXXX_CP_DEBUG),
- REG_ADRENO_DEFINE(REG_ADRENO_CP_ME_RAM_WADDR, REG_AXXX_CP_ME_RAM_WADDR),
- REG_ADRENO_DEFINE(REG_ADRENO_CP_ME_RAM_DATA, REG_AXXX_CP_ME_RAM_DATA),
- REG_ADRENO_DEFINE(REG_ADRENO_CP_PFP_UCODE_DATA,
- REG_A3XX_CP_PFP_UCODE_DATA),
- REG_ADRENO_DEFINE(REG_ADRENO_CP_PFP_UCODE_ADDR,
- REG_A3XX_CP_PFP_UCODE_ADDR),
- REG_ADRENO_DEFINE(REG_ADRENO_CP_WFI_PEND_CTR, REG_A3XX_CP_WFI_PEND_CTR),
REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_BASE, REG_AXXX_CP_RB_BASE),
+ REG_ADRENO_SKIP(REG_ADRENO_CP_RB_BASE_HI),
REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_RPTR_ADDR, REG_AXXX_CP_RB_RPTR_ADDR),
+ REG_ADRENO_SKIP(REG_ADRENO_CP_RB_RPTR_ADDR_HI),
REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_RPTR, REG_AXXX_CP_RB_RPTR),
REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_WPTR, REG_AXXX_CP_RB_WPTR),
- REG_ADRENO_DEFINE(REG_ADRENO_CP_PROTECT_CTRL, REG_A3XX_CP_PROTECT_CTRL),
- REG_ADRENO_DEFINE(REG_ADRENO_CP_ME_CNTL, REG_AXXX_CP_ME_CNTL),
REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_CNTL, REG_AXXX_CP_RB_CNTL),
- REG_ADRENO_DEFINE(REG_ADRENO_CP_IB1_BASE, REG_AXXX_CP_IB1_BASE),
- REG_ADRENO_DEFINE(REG_ADRENO_CP_IB1_BUFSZ, REG_AXXX_CP_IB1_BUFSZ),
- REG_ADRENO_DEFINE(REG_ADRENO_CP_IB2_BASE, REG_AXXX_CP_IB2_BASE),
- REG_ADRENO_DEFINE(REG_ADRENO_CP_IB2_BUFSZ, REG_AXXX_CP_IB2_BUFSZ),
- REG_ADRENO_DEFINE(REG_ADRENO_CP_TIMESTAMP, REG_AXXX_CP_SCRATCH_REG0),
- REG_ADRENO_DEFINE(REG_ADRENO_CP_ME_RAM_RADDR, REG_AXXX_CP_ME_RAM_RADDR),
- REG_ADRENO_DEFINE(REG_ADRENO_SCRATCH_ADDR, REG_AXXX_SCRATCH_ADDR),
- REG_ADRENO_DEFINE(REG_ADRENO_SCRATCH_UMSK, REG_AXXX_SCRATCH_UMSK),
- REG_ADRENO_DEFINE(REG_ADRENO_CP_ROQ_ADDR, REG_A3XX_CP_ROQ_ADDR),
- REG_ADRENO_DEFINE(REG_ADRENO_CP_ROQ_DATA, REG_A3XX_CP_ROQ_DATA),
- REG_ADRENO_DEFINE(REG_ADRENO_CP_MERCIU_ADDR, REG_A3XX_CP_MERCIU_ADDR),
- REG_ADRENO_DEFINE(REG_ADRENO_CP_MERCIU_DATA, REG_A3XX_CP_MERCIU_DATA),
- REG_ADRENO_DEFINE(REG_ADRENO_CP_MERCIU_DATA2, REG_A3XX_CP_MERCIU_DATA2),
- REG_ADRENO_DEFINE(REG_ADRENO_CP_MEQ_ADDR, REG_A3XX_CP_MEQ_ADDR),
- REG_ADRENO_DEFINE(REG_ADRENO_CP_MEQ_DATA, REG_A3XX_CP_MEQ_DATA),
- REG_ADRENO_DEFINE(REG_ADRENO_CP_HW_FAULT, REG_A3XX_CP_HW_FAULT),
- REG_ADRENO_DEFINE(REG_ADRENO_CP_PROTECT_STATUS,
- REG_A3XX_CP_PROTECT_STATUS),
- REG_ADRENO_DEFINE(REG_ADRENO_RBBM_STATUS, REG_A3XX_RBBM_STATUS),
- REG_ADRENO_DEFINE(REG_ADRENO_RBBM_PERFCTR_CTL,
- REG_A3XX_RBBM_PERFCTR_CTL),
- REG_ADRENO_DEFINE(REG_ADRENO_RBBM_PERFCTR_LOAD_CMD0,
- REG_A3XX_RBBM_PERFCTR_LOAD_CMD0),
- REG_ADRENO_DEFINE(REG_ADRENO_RBBM_PERFCTR_LOAD_CMD1,
- REG_A3XX_RBBM_PERFCTR_LOAD_CMD1),
- REG_ADRENO_DEFINE(REG_ADRENO_RBBM_PERFCTR_PWR_1_LO,
- REG_A3XX_RBBM_PERFCTR_PWR_1_LO),
- REG_ADRENO_DEFINE(REG_ADRENO_RBBM_INT_0_MASK, REG_A3XX_RBBM_INT_0_MASK),
- REG_ADRENO_DEFINE(REG_ADRENO_RBBM_INT_0_STATUS,
- REG_A3XX_RBBM_INT_0_STATUS),
- REG_ADRENO_DEFINE(REG_ADRENO_RBBM_AHB_ERROR_STATUS,
- REG_A3XX_RBBM_AHB_ERROR_STATUS),
- REG_ADRENO_DEFINE(REG_ADRENO_RBBM_AHB_CMD, REG_A3XX_RBBM_AHB_CMD),
- REG_ADRENO_DEFINE(REG_ADRENO_RBBM_INT_CLEAR_CMD,
- REG_A3XX_RBBM_INT_CLEAR_CMD),
- REG_ADRENO_DEFINE(REG_ADRENO_RBBM_CLOCK_CTL, REG_A3XX_RBBM_CLOCK_CTL),
- REG_ADRENO_DEFINE(REG_ADRENO_VPC_DEBUG_RAM_SEL,
- REG_A3XX_VPC_VPC_DEBUG_RAM_SEL),
- REG_ADRENO_DEFINE(REG_ADRENO_VPC_DEBUG_RAM_READ,
- REG_A3XX_VPC_VPC_DEBUG_RAM_READ),
- REG_ADRENO_DEFINE(REG_ADRENO_VSC_SIZE_ADDRESS,
- REG_A3XX_VSC_SIZE_ADDRESS),
- REG_ADRENO_DEFINE(REG_ADRENO_VFD_CONTROL_0, REG_A3XX_VFD_CONTROL_0),
- REG_ADRENO_DEFINE(REG_ADRENO_VFD_INDEX_MAX, REG_A3XX_VFD_INDEX_MAX),
- REG_ADRENO_DEFINE(REG_ADRENO_SP_VS_PVT_MEM_ADDR_REG,
- REG_A3XX_SP_VS_PVT_MEM_ADDR_REG),
- REG_ADRENO_DEFINE(REG_ADRENO_SP_FS_PVT_MEM_ADDR_REG,
- REG_A3XX_SP_FS_PVT_MEM_ADDR_REG),
- REG_ADRENO_DEFINE(REG_ADRENO_SP_VS_OBJ_START_REG,
- REG_A3XX_SP_VS_OBJ_START_REG),
- REG_ADRENO_DEFINE(REG_ADRENO_SP_FS_OBJ_START_REG,
- REG_A3XX_SP_FS_OBJ_START_REG),
- REG_ADRENO_DEFINE(REG_ADRENO_PA_SC_AA_CONFIG, REG_A3XX_PA_SC_AA_CONFIG),
- REG_ADRENO_DEFINE(REG_ADRENO_RBBM_PM_OVERRIDE2,
- REG_A3XX_RBBM_PM_OVERRIDE2),
- REG_ADRENO_DEFINE(REG_ADRENO_SCRATCH_REG2, REG_AXXX_CP_SCRATCH_REG2),
- REG_ADRENO_DEFINE(REG_ADRENO_SQ_GPR_MANAGEMENT,
- REG_A3XX_SQ_GPR_MANAGEMENT),
- REG_ADRENO_DEFINE(REG_ADRENO_SQ_INST_STORE_MANAGMENT,
- REG_A3XX_SQ_INST_STORE_MANAGMENT),
- REG_ADRENO_DEFINE(REG_ADRENO_TP0_CHICKEN, REG_A3XX_TP0_CHICKEN),
- REG_ADRENO_DEFINE(REG_ADRENO_RBBM_RBBM_CTL, REG_A3XX_RBBM_RBBM_CTL),
- REG_ADRENO_DEFINE(REG_ADRENO_RBBM_SW_RESET_CMD,
- REG_A3XX_RBBM_SW_RESET_CMD),
- REG_ADRENO_DEFINE(REG_ADRENO_UCHE_INVALIDATE0,
- REG_A3XX_UCHE_CACHE_INVALIDATE0_REG),
- REG_ADRENO_DEFINE(REG_ADRENO_RBBM_PERFCTR_LOAD_VALUE_LO,
- REG_A3XX_RBBM_PERFCTR_LOAD_VALUE_LO),
- REG_ADRENO_DEFINE(REG_ADRENO_RBBM_PERFCTR_LOAD_VALUE_HI,
- REG_A3XX_RBBM_PERFCTR_LOAD_VALUE_HI),
};
static const struct adreno_gpu_funcs funcs = {
@@ -583,7 +515,7 @@ struct msm_gpu *a3xx_gpu_init(struct drm_device *dev)
#endif
}
- if (!gpu->mmu) {
+ if (!gpu->aspace) {
/* TODO we think it is possible to configure the GPU to
* restrict access to VRAM carveout. But the required
* registers are unknown. For now just bail out and
diff --git a/drivers/gpu/drm/msm/adreno/a4xx.xml.h b/drivers/gpu/drm/msm/adreno/a4xx.xml.h
index 3220b91f559a..4ce21b902779 100644
--- a/drivers/gpu/drm/msm/adreno/a4xx.xml.h
+++ b/drivers/gpu/drm/msm/adreno/a4xx.xml.h
@@ -8,13 +8,14 @@ http://github.com/freedreno/envytools/
git clone https://github.com/freedreno/envytools.git
The rules-ng-ng source files this header was generated from are:
-- /home/robclark/src/freedreno/envytools/rnndb/adreno.xml ( 398 bytes, from 2015-09-24 17:25:31)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno.xml ( 431 bytes, from 2016-04-26 17:56:44)
- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2016-02-10 17:07:21)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/a2xx.xml ( 32901 bytes, from 2015-05-20 20:03:14)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_common.xml ( 11518 bytes, from 2016-02-10 21:03:25)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml ( 16166 bytes, from 2016-02-11 21:20:31)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml ( 83967 bytes, from 2016-02-10 17:07:21)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml ( 109916 bytes, from 2016-02-20 18:44:48)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/a2xx.xml ( 32907 bytes, from 2016-11-26 23:01:08)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_common.xml ( 12025 bytes, from 2016-11-26 23:01:08)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml ( 22544 bytes, from 2016-11-26 23:01:08)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml ( 83840 bytes, from 2016-11-26 23:01:08)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml ( 110765 bytes, from 2016-11-26 23:01:48)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/a5xx.xml ( 90321 bytes, from 2016-11-28 16:50:05)
- /home/robclark/src/freedreno/envytools/rnndb/adreno/ocmem.xml ( 1773 bytes, from 2015-09-24 17:30:00)
Copyright (C) 2013-2016 by the following authors:
@@ -46,6 +47,9 @@ WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
enum a4xx_color_fmt {
RB4_A8_UNORM = 1,
RB4_R8_UNORM = 2,
+ RB4_R8_SNORM = 3,
+ RB4_R8_UINT = 4,
+ RB4_R8_SINT = 5,
RB4_R4G4B4A4_UNORM = 8,
RB4_R5G5B5A1_UNORM = 10,
RB4_R5G6B5_UNORM = 14,
@@ -89,17 +93,10 @@ enum a4xx_color_fmt {
enum a4xx_tile_mode {
TILE4_LINEAR = 0,
+ TILE4_2 = 2,
TILE4_3 = 3,
};
-enum a4xx_rb_blend_opcode {
- BLEND_DST_PLUS_SRC = 0,
- BLEND_SRC_MINUS_DST = 1,
- BLEND_DST_MINUS_SRC = 2,
- BLEND_MIN_DST_SRC = 3,
- BLEND_MAX_DST_SRC = 4,
-};
-
enum a4xx_vtx_fmt {
VFMT4_32_FLOAT = 1,
VFMT4_32_32_FLOAT = 2,
@@ -940,6 +937,7 @@ static inline uint32_t A4XX_RB_MODE_CONTROL_HEIGHT(uint32_t val)
{
return ((val >> 5) << A4XX_RB_MODE_CONTROL_HEIGHT__SHIFT) & A4XX_RB_MODE_CONTROL_HEIGHT__MASK;
}
+#define A4XX_RB_MODE_CONTROL_ENABLE_GMEM 0x00010000
#define REG_A4XX_RB_RENDER_CONTROL 0x000020a1
#define A4XX_RB_RENDER_CONTROL_BINNING_PASS 0x00000001
@@ -1043,7 +1041,7 @@ static inline uint32_t A4XX_RB_MRT_BLEND_CONTROL_RGB_SRC_FACTOR(enum adreno_rb_b
}
#define A4XX_RB_MRT_BLEND_CONTROL_RGB_BLEND_OPCODE__MASK 0x000000e0
#define A4XX_RB_MRT_BLEND_CONTROL_RGB_BLEND_OPCODE__SHIFT 5
-static inline uint32_t A4XX_RB_MRT_BLEND_CONTROL_RGB_BLEND_OPCODE(enum a4xx_rb_blend_opcode val)
+static inline uint32_t A4XX_RB_MRT_BLEND_CONTROL_RGB_BLEND_OPCODE(enum a3xx_rb_blend_opcode val)
{
return ((val) << A4XX_RB_MRT_BLEND_CONTROL_RGB_BLEND_OPCODE__SHIFT) & A4XX_RB_MRT_BLEND_CONTROL_RGB_BLEND_OPCODE__MASK;
}
@@ -1061,7 +1059,7 @@ static inline uint32_t A4XX_RB_MRT_BLEND_CONTROL_ALPHA_SRC_FACTOR(enum adreno_rb
}
#define A4XX_RB_MRT_BLEND_CONTROL_ALPHA_BLEND_OPCODE__MASK 0x00e00000
#define A4XX_RB_MRT_BLEND_CONTROL_ALPHA_BLEND_OPCODE__SHIFT 21
-static inline uint32_t A4XX_RB_MRT_BLEND_CONTROL_ALPHA_BLEND_OPCODE(enum a4xx_rb_blend_opcode val)
+static inline uint32_t A4XX_RB_MRT_BLEND_CONTROL_ALPHA_BLEND_OPCODE(enum a3xx_rb_blend_opcode val)
{
return ((val) << A4XX_RB_MRT_BLEND_CONTROL_ALPHA_BLEND_OPCODE__SHIFT) & A4XX_RB_MRT_BLEND_CONTROL_ALPHA_BLEND_OPCODE__MASK;
}
@@ -1073,12 +1071,18 @@ static inline uint32_t A4XX_RB_MRT_BLEND_CONTROL_ALPHA_DEST_FACTOR(enum adreno_r
}
#define REG_A4XX_RB_BLEND_RED 0x000020f0
-#define A4XX_RB_BLEND_RED_UINT__MASK 0x0000ffff
+#define A4XX_RB_BLEND_RED_UINT__MASK 0x000000ff
#define A4XX_RB_BLEND_RED_UINT__SHIFT 0
static inline uint32_t A4XX_RB_BLEND_RED_UINT(uint32_t val)
{
return ((val) << A4XX_RB_BLEND_RED_UINT__SHIFT) & A4XX_RB_BLEND_RED_UINT__MASK;
}
+#define A4XX_RB_BLEND_RED_SINT__MASK 0x0000ff00
+#define A4XX_RB_BLEND_RED_SINT__SHIFT 8
+static inline uint32_t A4XX_RB_BLEND_RED_SINT(uint32_t val)
+{
+ return ((val) << A4XX_RB_BLEND_RED_SINT__SHIFT) & A4XX_RB_BLEND_RED_SINT__MASK;
+}
#define A4XX_RB_BLEND_RED_FLOAT__MASK 0xffff0000
#define A4XX_RB_BLEND_RED_FLOAT__SHIFT 16
static inline uint32_t A4XX_RB_BLEND_RED_FLOAT(float val)
@@ -1095,12 +1099,18 @@ static inline uint32_t A4XX_RB_BLEND_RED_F32(float val)
}
#define REG_A4XX_RB_BLEND_GREEN 0x000020f2
-#define A4XX_RB_BLEND_GREEN_UINT__MASK 0x0000ffff
+#define A4XX_RB_BLEND_GREEN_UINT__MASK 0x000000ff
#define A4XX_RB_BLEND_GREEN_UINT__SHIFT 0
static inline uint32_t A4XX_RB_BLEND_GREEN_UINT(uint32_t val)
{
return ((val) << A4XX_RB_BLEND_GREEN_UINT__SHIFT) & A4XX_RB_BLEND_GREEN_UINT__MASK;
}
+#define A4XX_RB_BLEND_GREEN_SINT__MASK 0x0000ff00
+#define A4XX_RB_BLEND_GREEN_SINT__SHIFT 8
+static inline uint32_t A4XX_RB_BLEND_GREEN_SINT(uint32_t val)
+{
+ return ((val) << A4XX_RB_BLEND_GREEN_SINT__SHIFT) & A4XX_RB_BLEND_GREEN_SINT__MASK;
+}
#define A4XX_RB_BLEND_GREEN_FLOAT__MASK 0xffff0000
#define A4XX_RB_BLEND_GREEN_FLOAT__SHIFT 16
static inline uint32_t A4XX_RB_BLEND_GREEN_FLOAT(float val)
@@ -1117,12 +1127,18 @@ static inline uint32_t A4XX_RB_BLEND_GREEN_F32(float val)
}
#define REG_A4XX_RB_BLEND_BLUE 0x000020f4
-#define A4XX_RB_BLEND_BLUE_UINT__MASK 0x0000ffff
+#define A4XX_RB_BLEND_BLUE_UINT__MASK 0x000000ff
#define A4XX_RB_BLEND_BLUE_UINT__SHIFT 0
static inline uint32_t A4XX_RB_BLEND_BLUE_UINT(uint32_t val)
{
return ((val) << A4XX_RB_BLEND_BLUE_UINT__SHIFT) & A4XX_RB_BLEND_BLUE_UINT__MASK;
}
+#define A4XX_RB_BLEND_BLUE_SINT__MASK 0x0000ff00
+#define A4XX_RB_BLEND_BLUE_SINT__SHIFT 8
+static inline uint32_t A4XX_RB_BLEND_BLUE_SINT(uint32_t val)
+{
+ return ((val) << A4XX_RB_BLEND_BLUE_SINT__SHIFT) & A4XX_RB_BLEND_BLUE_SINT__MASK;
+}
#define A4XX_RB_BLEND_BLUE_FLOAT__MASK 0xffff0000
#define A4XX_RB_BLEND_BLUE_FLOAT__SHIFT 16
static inline uint32_t A4XX_RB_BLEND_BLUE_FLOAT(float val)
@@ -1139,12 +1155,18 @@ static inline uint32_t A4XX_RB_BLEND_BLUE_F32(float val)
}
#define REG_A4XX_RB_BLEND_ALPHA 0x000020f6
-#define A4XX_RB_BLEND_ALPHA_UINT__MASK 0x0000ffff
+#define A4XX_RB_BLEND_ALPHA_UINT__MASK 0x000000ff
#define A4XX_RB_BLEND_ALPHA_UINT__SHIFT 0
static inline uint32_t A4XX_RB_BLEND_ALPHA_UINT(uint32_t val)
{
return ((val) << A4XX_RB_BLEND_ALPHA_UINT__SHIFT) & A4XX_RB_BLEND_ALPHA_UINT__MASK;
}
+#define A4XX_RB_BLEND_ALPHA_SINT__MASK 0x0000ff00
+#define A4XX_RB_BLEND_ALPHA_SINT__SHIFT 8
+static inline uint32_t A4XX_RB_BLEND_ALPHA_SINT(uint32_t val)
+{
+ return ((val) << A4XX_RB_BLEND_ALPHA_SINT__SHIFT) & A4XX_RB_BLEND_ALPHA_SINT__MASK;
+}
#define A4XX_RB_BLEND_ALPHA_FLOAT__MASK 0xffff0000
#define A4XX_RB_BLEND_ALPHA_FLOAT__SHIFT 16
static inline uint32_t A4XX_RB_BLEND_ALPHA_FLOAT(float val)
@@ -1348,7 +1370,7 @@ static inline uint32_t A4XX_RB_DEPTH_CONTROL_ZFUNC(enum adreno_compare_func val)
{
return ((val) << A4XX_RB_DEPTH_CONTROL_ZFUNC__SHIFT) & A4XX_RB_DEPTH_CONTROL_ZFUNC__MASK;
}
-#define A4XX_RB_DEPTH_CONTROL_BF_ENABLE 0x00000080
+#define A4XX_RB_DEPTH_CONTROL_Z_CLAMP_ENABLE 0x00000080
#define A4XX_RB_DEPTH_CONTROL_EARLY_Z_DISABLE 0x00010000
#define A4XX_RB_DEPTH_CONTROL_FORCE_FRAGZ_TO_FS 0x00020000
#define A4XX_RB_DEPTH_CONTROL_Z_TEST_ENABLE 0x80000000
@@ -2177,11 +2199,23 @@ static inline uint32_t REG_A4XX_RBBM_CLOCK_DELAY_RB_MARB_CCU_L1_REG(uint32_t i0)
#define REG_A4XX_CP_DRAW_STATE_ADDR 0x00000232
-#define REG_A4XX_CP_PROTECT_REG_0 0x00000240
-
static inline uint32_t REG_A4XX_CP_PROTECT(uint32_t i0) { return 0x00000240 + 0x1*i0; }
static inline uint32_t REG_A4XX_CP_PROTECT_REG(uint32_t i0) { return 0x00000240 + 0x1*i0; }
+#define A4XX_CP_PROTECT_REG_BASE_ADDR__MASK 0x0001ffff
+#define A4XX_CP_PROTECT_REG_BASE_ADDR__SHIFT 0
+static inline uint32_t A4XX_CP_PROTECT_REG_BASE_ADDR(uint32_t val)
+{
+ return ((val) << A4XX_CP_PROTECT_REG_BASE_ADDR__SHIFT) & A4XX_CP_PROTECT_REG_BASE_ADDR__MASK;
+}
+#define A4XX_CP_PROTECT_REG_MASK_LEN__MASK 0x1f000000
+#define A4XX_CP_PROTECT_REG_MASK_LEN__SHIFT 24
+static inline uint32_t A4XX_CP_PROTECT_REG_MASK_LEN(uint32_t val)
+{
+ return ((val) << A4XX_CP_PROTECT_REG_MASK_LEN__SHIFT) & A4XX_CP_PROTECT_REG_MASK_LEN__MASK;
+}
+#define A4XX_CP_PROTECT_REG_TRAP_WRITE 0x20000000
+#define A4XX_CP_PROTECT_REG_TRAP_READ 0x40000000
#define REG_A4XX_CP_PROTECT_CTRL 0x00000250
@@ -2272,7 +2306,7 @@ static inline uint32_t A4XX_SP_VS_CTRL_REG0_HALFREGFOOTPRINT(uint32_t val)
{
return ((val) << A4XX_SP_VS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT) & A4XX_SP_VS_CTRL_REG0_HALFREGFOOTPRINT__MASK;
}
-#define A4XX_SP_VS_CTRL_REG0_FULLREGFOOTPRINT__MASK 0x0003fc00
+#define A4XX_SP_VS_CTRL_REG0_FULLREGFOOTPRINT__MASK 0x0000fc00
#define A4XX_SP_VS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT 10
static inline uint32_t A4XX_SP_VS_CTRL_REG0_FULLREGFOOTPRINT(uint32_t val)
{
@@ -2420,7 +2454,7 @@ static inline uint32_t A4XX_SP_FS_CTRL_REG0_HALFREGFOOTPRINT(uint32_t val)
{
return ((val) << A4XX_SP_FS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT) & A4XX_SP_FS_CTRL_REG0_HALFREGFOOTPRINT__MASK;
}
-#define A4XX_SP_FS_CTRL_REG0_FULLREGFOOTPRINT__MASK 0x0003fc00
+#define A4XX_SP_FS_CTRL_REG0_FULLREGFOOTPRINT__MASK 0x0000fc00
#define A4XX_SP_FS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT 10
static inline uint32_t A4XX_SP_FS_CTRL_REG0_FULLREGFOOTPRINT(uint32_t val)
{
@@ -3117,6 +3151,8 @@ static inline uint32_t A4XX_TPL1_TP_TEX_COUNT_GS(uint32_t val)
#define REG_A4XX_GRAS_CL_CLIP_CNTL 0x00002000
#define A4XX_GRAS_CL_CLIP_CNTL_CLIP_DISABLE 0x00008000
+#define A4XX_GRAS_CL_CLIP_CNTL_ZNEAR_CLIP_DISABLE 0x00010000
+#define A4XX_GRAS_CL_CLIP_CNTL_ZFAR_CLIP_DISABLE 0x00020000
#define A4XX_GRAS_CL_CLIP_CNTL_ZERO_GB_SCALE_Z 0x00400000
#define REG_A4XX_GRAS_CLEAR_CNTL 0x00002003
@@ -3253,6 +3289,7 @@ static inline uint32_t A4XX_GRAS_SU_MODE_CONTROL_LINEHALFWIDTH(float val)
return ((((int32_t)(val * 4.0))) << A4XX_GRAS_SU_MODE_CONTROL_LINEHALFWIDTH__SHIFT) & A4XX_GRAS_SU_MODE_CONTROL_LINEHALFWIDTH__MASK;
}
#define A4XX_GRAS_SU_MODE_CONTROL_POLY_OFFSET 0x00000800
+#define A4XX_GRAS_SU_MODE_CONTROL_MSAA_ENABLE 0x00002000
#define A4XX_GRAS_SU_MODE_CONTROL_RENDERING_PASS 0x00100000
#define REG_A4XX_GRAS_SC_CONTROL 0x0000207b
@@ -3670,6 +3707,8 @@ static inline uint32_t A4XX_HLSQ_GS_CONTROL_REG_INSTRLENGTH(uint32_t val)
#define REG_A4XX_PC_BINNING_COMMAND 0x00000d00
#define A4XX_PC_BINNING_COMMAND_BINNING_ENABLE 0x00000001
+#define REG_A4XX_PC_TESSFACTOR_ADDR 0x00000d08
+
#define REG_A4XX_PC_DRAWCALL_SETUP_OVERRIDE 0x00000d0c
#define REG_A4XX_PC_PERFCTR_PC_SEL_0 0x00000d10
@@ -3690,6 +3729,20 @@ static inline uint32_t A4XX_HLSQ_GS_CONTROL_REG_INSTRLENGTH(uint32_t val)
#define REG_A4XX_PC_BIN_BASE 0x000021c0
+#define REG_A4XX_PC_VSTREAM_CONTROL 0x000021c2
+#define A4XX_PC_VSTREAM_CONTROL_SIZE__MASK 0x003f0000
+#define A4XX_PC_VSTREAM_CONTROL_SIZE__SHIFT 16
+static inline uint32_t A4XX_PC_VSTREAM_CONTROL_SIZE(uint32_t val)
+{
+ return ((val) << A4XX_PC_VSTREAM_CONTROL_SIZE__SHIFT) & A4XX_PC_VSTREAM_CONTROL_SIZE__MASK;
+}
+#define A4XX_PC_VSTREAM_CONTROL_N__MASK 0x07c00000
+#define A4XX_PC_VSTREAM_CONTROL_N__SHIFT 22
+static inline uint32_t A4XX_PC_VSTREAM_CONTROL_N(uint32_t val)
+{
+ return ((val) << A4XX_PC_VSTREAM_CONTROL_N__SHIFT) & A4XX_PC_VSTREAM_CONTROL_N__MASK;
+}
+
#define REG_A4XX_PC_PRIM_VTX_CNTL 0x000021c4
#define A4XX_PC_PRIM_VTX_CNTL_VAROUT__MASK 0x0000000f
#define A4XX_PC_PRIM_VTX_CNTL_VAROUT__SHIFT 0
@@ -3752,12 +3805,8 @@ static inline uint32_t A4XX_PC_HS_PARAM_SPACING(enum a4xx_tess_spacing val)
{
return ((val) << A4XX_PC_HS_PARAM_SPACING__SHIFT) & A4XX_PC_HS_PARAM_SPACING__MASK;
}
-#define A4XX_PC_HS_PARAM_PRIMTYPE__MASK 0x01800000
-#define A4XX_PC_HS_PARAM_PRIMTYPE__SHIFT 23
-static inline uint32_t A4XX_PC_HS_PARAM_PRIMTYPE(enum adreno_pa_su_sc_draw val)
-{
- return ((val) << A4XX_PC_HS_PARAM_PRIMTYPE__SHIFT) & A4XX_PC_HS_PARAM_PRIMTYPE__MASK;
-}
+#define A4XX_PC_HS_PARAM_CW 0x00800000
+#define A4XX_PC_HS_PARAM_CONNECTED 0x01000000
#define REG_A4XX_VBIF_VERSION 0x00003000
diff --git a/drivers/gpu/drm/msm/adreno/a4xx_gpu.c b/drivers/gpu/drm/msm/adreno/a4xx_gpu.c
index d0d3c7baa8fe..511bc855cc7f 100644
--- a/drivers/gpu/drm/msm/adreno/a4xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a4xx_gpu.c
@@ -113,7 +113,7 @@ static void a4xx_enable_hwcg(struct msm_gpu *gpu)
}
-static void a4xx_me_init(struct msm_gpu *gpu)
+static bool a4xx_me_init(struct msm_gpu *gpu)
{
struct msm_ringbuffer *ring = gpu->rb;
@@ -137,7 +137,7 @@ static void a4xx_me_init(struct msm_gpu *gpu)
OUT_RING(ring, 0x00000000);
gpu->funcs->flush(gpu);
- gpu->funcs->idle(gpu);
+ return gpu->funcs->idle(gpu);
}
static int a4xx_hw_init(struct msm_gpu *gpu)
@@ -292,15 +292,20 @@ static int a4xx_hw_init(struct msm_gpu *gpu)
/* clear ME_HALT to start micro engine */
gpu_write(gpu, REG_A4XX_CP_ME_CNTL, 0);
- a4xx_me_init(gpu);
-
- return 0;
+ return a4xx_me_init(gpu) ? 0 : -EINVAL;
}
static void a4xx_recover(struct msm_gpu *gpu)
{
+ int i;
+
adreno_dump_info(gpu);
+ for (i = 0; i < 8; i++) {
+ printk("CP_SCRATCH_REG%d: %u\n", i,
+ gpu_read(gpu, REG_AXXX_CP_SCRATCH_REG0 + i));
+ }
+
/* dump registers before resetting gpu, if enabled: */
if (hang_debug)
a4xx_dump(gpu);
@@ -328,17 +333,21 @@ static void a4xx_destroy(struct msm_gpu *gpu)
kfree(a4xx_gpu);
}
-static void a4xx_idle(struct msm_gpu *gpu)
+static bool a4xx_idle(struct msm_gpu *gpu)
{
/* wait for ringbuffer to drain: */
- adreno_idle(gpu);
+ if (!adreno_idle(gpu))
+ return false;
/* then wait for GPU to finish: */
if (spin_until(!(gpu_read(gpu, REG_A4XX_RBBM_STATUS) &
- A4XX_RBBM_STATUS_GPU_BUSY)))
+ A4XX_RBBM_STATUS_GPU_BUSY))) {
DRM_ERROR("%s: timeout waiting for GPU to idle!\n", gpu->name);
+ /* TODO maybe we need to reset GPU here to recover from hang? */
+ return false;
+ }
- /* TODO maybe we need to reset GPU here to recover from hang? */
+ return true;
}
static irqreturn_t a4xx_irq(struct msm_gpu *gpu)
@@ -460,87 +469,13 @@ static void a4xx_show(struct msm_gpu *gpu, struct seq_file *m)
/* Register offset defines for A4XX, in order of enum adreno_regs */
static const unsigned int a4xx_register_offsets[REG_ADRENO_REGISTER_MAX] = {
- REG_ADRENO_DEFINE(REG_ADRENO_CP_DEBUG, REG_A4XX_CP_DEBUG),
- REG_ADRENO_DEFINE(REG_ADRENO_CP_ME_RAM_WADDR, REG_A4XX_CP_ME_RAM_WADDR),
- REG_ADRENO_DEFINE(REG_ADRENO_CP_ME_RAM_DATA, REG_A4XX_CP_ME_RAM_DATA),
- REG_ADRENO_DEFINE(REG_ADRENO_CP_PFP_UCODE_DATA,
- REG_A4XX_CP_PFP_UCODE_DATA),
- REG_ADRENO_DEFINE(REG_ADRENO_CP_PFP_UCODE_ADDR,
- REG_A4XX_CP_PFP_UCODE_ADDR),
- REG_ADRENO_DEFINE(REG_ADRENO_CP_WFI_PEND_CTR, REG_A4XX_CP_WFI_PEND_CTR),
REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_BASE, REG_A4XX_CP_RB_BASE),
+ REG_ADRENO_SKIP(REG_ADRENO_CP_RB_BASE_HI),
REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_RPTR_ADDR, REG_A4XX_CP_RB_RPTR_ADDR),
+ REG_ADRENO_SKIP(REG_ADRENO_CP_RB_RPTR_ADDR_HI),
REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_RPTR, REG_A4XX_CP_RB_RPTR),
REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_WPTR, REG_A4XX_CP_RB_WPTR),
- REG_ADRENO_DEFINE(REG_ADRENO_CP_PROTECT_CTRL, REG_A4XX_CP_PROTECT_CTRL),
- REG_ADRENO_DEFINE(REG_ADRENO_CP_ME_CNTL, REG_A4XX_CP_ME_CNTL),
REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_CNTL, REG_A4XX_CP_RB_CNTL),
- REG_ADRENO_DEFINE(REG_ADRENO_CP_IB1_BASE, REG_A4XX_CP_IB1_BASE),
- REG_ADRENO_DEFINE(REG_ADRENO_CP_IB1_BUFSZ, REG_A4XX_CP_IB1_BUFSZ),
- REG_ADRENO_DEFINE(REG_ADRENO_CP_IB2_BASE, REG_A4XX_CP_IB2_BASE),
- REG_ADRENO_DEFINE(REG_ADRENO_CP_IB2_BUFSZ, REG_A4XX_CP_IB2_BUFSZ),
- REG_ADRENO_DEFINE(REG_ADRENO_CP_TIMESTAMP, REG_AXXX_CP_SCRATCH_REG0),
- REG_ADRENO_DEFINE(REG_ADRENO_CP_ME_RAM_RADDR, REG_A4XX_CP_ME_RAM_RADDR),
- REG_ADRENO_DEFINE(REG_ADRENO_CP_ROQ_ADDR, REG_A4XX_CP_ROQ_ADDR),
- REG_ADRENO_DEFINE(REG_ADRENO_CP_ROQ_DATA, REG_A4XX_CP_ROQ_DATA),
- REG_ADRENO_DEFINE(REG_ADRENO_CP_MERCIU_ADDR, REG_A4XX_CP_MERCIU_ADDR),
- REG_ADRENO_DEFINE(REG_ADRENO_CP_MERCIU_DATA, REG_A4XX_CP_MERCIU_DATA),
- REG_ADRENO_DEFINE(REG_ADRENO_CP_MERCIU_DATA2, REG_A4XX_CP_MERCIU_DATA2),
- REG_ADRENO_DEFINE(REG_ADRENO_CP_MEQ_ADDR, REG_A4XX_CP_MEQ_ADDR),
- REG_ADRENO_DEFINE(REG_ADRENO_CP_MEQ_DATA, REG_A4XX_CP_MEQ_DATA),
- REG_ADRENO_DEFINE(REG_ADRENO_CP_HW_FAULT, REG_A4XX_CP_HW_FAULT),
- REG_ADRENO_DEFINE(REG_ADRENO_CP_PROTECT_STATUS,
- REG_A4XX_CP_PROTECT_STATUS),
- REG_ADRENO_DEFINE(REG_ADRENO_SCRATCH_ADDR, REG_A4XX_CP_SCRATCH_ADDR),
- REG_ADRENO_DEFINE(REG_ADRENO_SCRATCH_UMSK, REG_A4XX_CP_SCRATCH_UMASK),
- REG_ADRENO_DEFINE(REG_ADRENO_RBBM_STATUS, REG_A4XX_RBBM_STATUS),
- REG_ADRENO_DEFINE(REG_ADRENO_RBBM_PERFCTR_CTL,
- REG_A4XX_RBBM_PERFCTR_CTL),
- REG_ADRENO_DEFINE(REG_ADRENO_RBBM_PERFCTR_LOAD_CMD0,
- REG_A4XX_RBBM_PERFCTR_LOAD_CMD0),
- REG_ADRENO_DEFINE(REG_ADRENO_RBBM_PERFCTR_LOAD_CMD1,
- REG_A4XX_RBBM_PERFCTR_LOAD_CMD1),
- REG_ADRENO_DEFINE(REG_ADRENO_RBBM_PERFCTR_LOAD_CMD2,
- REG_A4XX_RBBM_PERFCTR_LOAD_CMD2),
- REG_ADRENO_DEFINE(REG_ADRENO_RBBM_PERFCTR_PWR_1_LO,
- REG_A4XX_RBBM_PERFCTR_PWR_1_LO),
- REG_ADRENO_DEFINE(REG_ADRENO_RBBM_INT_0_MASK, REG_A4XX_RBBM_INT_0_MASK),
- REG_ADRENO_DEFINE(REG_ADRENO_RBBM_INT_0_STATUS,
- REG_A4XX_RBBM_INT_0_STATUS),
- REG_ADRENO_DEFINE(REG_ADRENO_RBBM_AHB_ERROR_STATUS,
- REG_A4XX_RBBM_AHB_ERROR_STATUS),
- REG_ADRENO_DEFINE(REG_ADRENO_RBBM_AHB_CMD, REG_A4XX_RBBM_AHB_CMD),
- REG_ADRENO_DEFINE(REG_ADRENO_RBBM_CLOCK_CTL, REG_A4XX_RBBM_CLOCK_CTL),
- REG_ADRENO_DEFINE(REG_ADRENO_RBBM_AHB_ME_SPLIT_STATUS,
- REG_A4XX_RBBM_AHB_ME_SPLIT_STATUS),
- REG_ADRENO_DEFINE(REG_ADRENO_RBBM_AHB_PFP_SPLIT_STATUS,
- REG_A4XX_RBBM_AHB_PFP_SPLIT_STATUS),
- REG_ADRENO_DEFINE(REG_ADRENO_VPC_DEBUG_RAM_SEL,
- REG_A4XX_VPC_DEBUG_RAM_SEL),
- REG_ADRENO_DEFINE(REG_ADRENO_VPC_DEBUG_RAM_READ,
- REG_A4XX_VPC_DEBUG_RAM_READ),
- REG_ADRENO_DEFINE(REG_ADRENO_RBBM_INT_CLEAR_CMD,
- REG_A4XX_RBBM_INT_CLEAR_CMD),
- REG_ADRENO_DEFINE(REG_ADRENO_VSC_SIZE_ADDRESS,
- REG_A4XX_VSC_SIZE_ADDRESS),
- REG_ADRENO_DEFINE(REG_ADRENO_VFD_CONTROL_0, REG_A4XX_VFD_CONTROL_0),
- REG_ADRENO_DEFINE(REG_ADRENO_SP_VS_PVT_MEM_ADDR_REG,
- REG_A4XX_SP_VS_PVT_MEM_ADDR),
- REG_ADRENO_DEFINE(REG_ADRENO_SP_FS_PVT_MEM_ADDR_REG,
- REG_A4XX_SP_FS_PVT_MEM_ADDR),
- REG_ADRENO_DEFINE(REG_ADRENO_SP_VS_OBJ_START_REG,
- REG_A4XX_SP_VS_OBJ_START),
- REG_ADRENO_DEFINE(REG_ADRENO_SP_FS_OBJ_START_REG,
- REG_A4XX_SP_FS_OBJ_START),
- REG_ADRENO_DEFINE(REG_ADRENO_RBBM_RBBM_CTL, REG_A4XX_RBBM_RBBM_CTL),
- REG_ADRENO_DEFINE(REG_ADRENO_RBBM_SW_RESET_CMD,
- REG_A4XX_RBBM_SW_RESET_CMD),
- REG_ADRENO_DEFINE(REG_ADRENO_UCHE_INVALIDATE0,
- REG_A4XX_UCHE_INVALIDATE0),
- REG_ADRENO_DEFINE(REG_ADRENO_RBBM_PERFCTR_LOAD_VALUE_LO,
- REG_A4XX_RBBM_PERFCTR_LOAD_VALUE_LO),
- REG_ADRENO_DEFINE(REG_ADRENO_RBBM_PERFCTR_LOAD_VALUE_HI,
- REG_A4XX_RBBM_PERFCTR_LOAD_VALUE_HI),
};
static void a4xx_dump(struct msm_gpu *gpu)
@@ -587,16 +522,8 @@ static int a4xx_pm_suspend(struct msm_gpu *gpu) {
static int a4xx_get_timestamp(struct msm_gpu *gpu, uint64_t *value)
{
- uint32_t hi, lo, tmp;
-
- tmp = gpu_read(gpu, REG_A4XX_RBBM_PERFCTR_CP_0_HI);
- do {
- hi = tmp;
- lo = gpu_read(gpu, REG_A4XX_RBBM_PERFCTR_CP_0_LO);
- tmp = gpu_read(gpu, REG_A4XX_RBBM_PERFCTR_CP_0_HI);
- } while (tmp != hi);
-
- *value = (((uint64_t)hi) << 32) | lo;
+ *value = gpu_read64(gpu, REG_A4XX_RBBM_PERFCTR_CP_0_LO,
+ REG_A4XX_RBBM_PERFCTR_CP_0_HI);
return 0;
}
@@ -672,7 +599,7 @@ struct msm_gpu *a4xx_gpu_init(struct drm_device *dev)
#endif
}
- if (!gpu->mmu) {
+ if (!gpu->aspace) {
/* TODO we think it is possible to configure the GPU to
* restrict access to VRAM carveout. But the required
* registers are unknown. For now just bail out and
diff --git a/drivers/gpu/drm/msm/adreno/a5xx.xml.h b/drivers/gpu/drm/msm/adreno/a5xx.xml.h
new file mode 100644
index 000000000000..b6fe763ddf34
--- /dev/null
+++ b/drivers/gpu/drm/msm/adreno/a5xx.xml.h
@@ -0,0 +1,3757 @@
+#ifndef A5XX_XML
+#define A5XX_XML
+
+/* Autogenerated file, DO NOT EDIT manually!
+
+This file was generated by the rules-ng-ng headergen tool in this git repository:
+http://github.com/freedreno/envytools/
+git clone https://github.com/freedreno/envytools.git
+
+The rules-ng-ng source files this header was generated from are:
+- /home/robclark/src/freedreno/envytools/rnndb/adreno.xml ( 431 bytes, from 2016-04-26 17:56:44)
+- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2016-02-10 17:07:21)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/a2xx.xml ( 32907 bytes, from 2016-11-26 23:01:08)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_common.xml ( 12025 bytes, from 2016-11-26 23:01:08)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml ( 22544 bytes, from 2016-11-26 23:01:08)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml ( 83840 bytes, from 2016-11-26 23:01:08)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml ( 110765 bytes, from 2016-11-26 23:01:48)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/a5xx.xml ( 90321 bytes, from 2016-11-28 16:50:05)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/ocmem.xml ( 1773 bytes, from 2015-09-24 17:30:00)
+
+Copyright (C) 2013-2016 by the following authors:
+- Rob Clark <robdclark@gmail.com> (robclark)
+- Ilia Mirkin <imirkin@alum.mit.edu> (imirkin)
+
+Permission is hereby granted, free of charge, to any person obtaining
+a copy of this software and associated documentation files (the
+"Software"), to deal in the Software without restriction, including
+without limitation the rights to use, copy, modify, merge, publish,
+distribute, sublicense, and/or sell copies of the Software, and to
+permit persons to whom the Software is furnished to do so, subject to
+the following conditions:
+
+The above copyright notice and this permission notice (including the
+next paragraph) shall be included in all copies or substantial
+portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/
+
+
+enum a5xx_color_fmt {
+ RB5_R8_UNORM = 3,
+ RB5_R4G4B4A4_UNORM = 8,
+ RB5_R5G5B5A1_UNORM = 10,
+ RB5_R5G6B5_UNORM = 14,
+ RB5_R16_FLOAT = 23,
+ RB5_R8G8B8A8_UNORM = 48,
+ RB5_R8G8B8_UNORM = 49,
+ RB5_R8G8B8A8_UINT = 51,
+ RB5_R10G10B10A2_UINT = 58,
+ RB5_R16G16_FLOAT = 69,
+ RB5_R32_FLOAT = 74,
+ RB5_R16G16B16A16_FLOAT = 98,
+ RB5_R32G32_FLOAT = 103,
+ RB5_R32G32B32A32_FLOAT = 130,
+};
+
+enum a5xx_tile_mode {
+ TILE5_LINEAR = 0,
+ TILE5_2 = 2,
+ TILE5_3 = 3,
+};
+
+enum a5xx_vtx_fmt {
+ VFMT5_8_UNORM = 3,
+ VFMT5_8_SNORM = 4,
+ VFMT5_8_UINT = 5,
+ VFMT5_8_SINT = 6,
+ VFMT5_8_8_UNORM = 15,
+ VFMT5_8_8_SNORM = 16,
+ VFMT5_8_8_UINT = 17,
+ VFMT5_8_8_SINT = 18,
+ VFMT5_16_UNORM = 21,
+ VFMT5_16_SNORM = 22,
+ VFMT5_16_FLOAT = 23,
+ VFMT5_16_UINT = 24,
+ VFMT5_16_SINT = 25,
+ VFMT5_8_8_8_UNORM = 33,
+ VFMT5_8_8_8_SNORM = 34,
+ VFMT5_8_8_8_UINT = 35,
+ VFMT5_8_8_8_SINT = 36,
+ VFMT5_8_8_8_8_UNORM = 48,
+ VFMT5_8_8_8_8_SNORM = 50,
+ VFMT5_8_8_8_8_UINT = 51,
+ VFMT5_8_8_8_8_SINT = 52,
+ VFMT5_16_16_UNORM = 67,
+ VFMT5_16_16_SNORM = 68,
+ VFMT5_16_16_FLOAT = 69,
+ VFMT5_16_16_UINT = 70,
+ VFMT5_16_16_SINT = 71,
+ VFMT5_32_UNORM = 72,
+ VFMT5_32_SNORM = 73,
+ VFMT5_32_FLOAT = 74,
+ VFMT5_32_UINT = 75,
+ VFMT5_32_SINT = 76,
+ VFMT5_32_FIXED = 77,
+ VFMT5_16_16_16_UNORM = 88,
+ VFMT5_16_16_16_SNORM = 89,
+ VFMT5_16_16_16_FLOAT = 90,
+ VFMT5_16_16_16_UINT = 91,
+ VFMT5_16_16_16_SINT = 92,
+ VFMT5_16_16_16_16_UNORM = 96,
+ VFMT5_16_16_16_16_SNORM = 97,
+ VFMT5_16_16_16_16_FLOAT = 98,
+ VFMT5_16_16_16_16_UINT = 99,
+ VFMT5_16_16_16_16_SINT = 100,
+ VFMT5_32_32_UNORM = 101,
+ VFMT5_32_32_SNORM = 102,
+ VFMT5_32_32_FLOAT = 103,
+ VFMT5_32_32_UINT = 104,
+ VFMT5_32_32_SINT = 105,
+ VFMT5_32_32_FIXED = 106,
+ VFMT5_32_32_32_UNORM = 112,
+ VFMT5_32_32_32_SNORM = 113,
+ VFMT5_32_32_32_UINT = 114,
+ VFMT5_32_32_32_SINT = 115,
+ VFMT5_32_32_32_FLOAT = 116,
+ VFMT5_32_32_32_FIXED = 117,
+ VFMT5_32_32_32_32_UNORM = 128,
+ VFMT5_32_32_32_32_SNORM = 129,
+ VFMT5_32_32_32_32_FLOAT = 130,
+ VFMT5_32_32_32_32_UINT = 131,
+ VFMT5_32_32_32_32_SINT = 132,
+ VFMT5_32_32_32_32_FIXED = 133,
+};
+
+enum a5xx_tex_fmt {
+ TFMT5_A8_UNORM = 2,
+ TFMT5_8_UNORM = 3,
+ TFMT5_4_4_4_4_UNORM = 8,
+ TFMT5_5_5_5_1_UNORM = 10,
+ TFMT5_5_6_5_UNORM = 14,
+ TFMT5_8_8_UNORM = 15,
+ TFMT5_8_8_SNORM = 16,
+ TFMT5_L8_A8_UNORM = 19,
+ TFMT5_16_FLOAT = 23,
+ TFMT5_8_8_8_8_UNORM = 48,
+ TFMT5_8_8_8_UNORM = 49,
+ TFMT5_8_8_8_SNORM = 50,
+ TFMT5_9_9_9_E5_FLOAT = 53,
+ TFMT5_10_10_10_2_UNORM = 54,
+ TFMT5_11_11_10_FLOAT = 66,
+ TFMT5_16_16_FLOAT = 69,
+ TFMT5_32_FLOAT = 74,
+ TFMT5_16_16_16_16_FLOAT = 98,
+ TFMT5_32_32_FLOAT = 103,
+ TFMT5_32_32_32_32_FLOAT = 130,
+ TFMT5_X8Z24_UNORM = 160,
+};
+
+enum a5xx_tex_fetchsize {
+ TFETCH5_1_BYTE = 0,
+ TFETCH5_2_BYTE = 1,
+ TFETCH5_4_BYTE = 2,
+ TFETCH5_8_BYTE = 3,
+ TFETCH5_16_BYTE = 4,
+};
+
+enum a5xx_depth_format {
+ DEPTH5_NONE = 0,
+ DEPTH5_16 = 1,
+ DEPTH5_24_8 = 2,
+ DEPTH5_32 = 4,
+};
+
+enum a5xx_blit_buf {
+ BLIT_MRT0 = 0,
+ BLIT_MRT1 = 1,
+ BLIT_MRT2 = 2,
+ BLIT_MRT3 = 3,
+ BLIT_MRT4 = 4,
+ BLIT_MRT5 = 5,
+ BLIT_MRT6 = 6,
+ BLIT_MRT7 = 7,
+ BLIT_ZS = 8,
+ BLIT_Z32 = 9,
+};
+
+enum a5xx_tex_filter {
+ A5XX_TEX_NEAREST = 0,
+ A5XX_TEX_LINEAR = 1,
+ A5XX_TEX_ANISO = 2,
+};
+
+enum a5xx_tex_clamp {
+ A5XX_TEX_REPEAT = 0,
+ A5XX_TEX_CLAMP_TO_EDGE = 1,
+ A5XX_TEX_MIRROR_REPEAT = 2,
+ A5XX_TEX_CLAMP_TO_BORDER = 3,
+ A5XX_TEX_MIRROR_CLAMP = 4,
+};
+
+enum a5xx_tex_aniso {
+ A5XX_TEX_ANISO_1 = 0,
+ A5XX_TEX_ANISO_2 = 1,
+ A5XX_TEX_ANISO_4 = 2,
+ A5XX_TEX_ANISO_8 = 3,
+ A5XX_TEX_ANISO_16 = 4,
+};
+
+enum a5xx_tex_swiz {
+ A5XX_TEX_X = 0,
+ A5XX_TEX_Y = 1,
+ A5XX_TEX_Z = 2,
+ A5XX_TEX_W = 3,
+ A5XX_TEX_ZERO = 4,
+ A5XX_TEX_ONE = 5,
+};
+
+enum a5xx_tex_type {
+ A5XX_TEX_1D = 0,
+ A5XX_TEX_2D = 1,
+ A5XX_TEX_CUBE = 2,
+ A5XX_TEX_3D = 3,
+};
+
+#define A5XX_INT0_RBBM_GPU_IDLE 0x00000001
+#define A5XX_INT0_RBBM_AHB_ERROR 0x00000002
+#define A5XX_INT0_RBBM_TRANSFER_TIMEOUT 0x00000004
+#define A5XX_INT0_RBBM_ME_MS_TIMEOUT 0x00000008
+#define A5XX_INT0_RBBM_PFP_MS_TIMEOUT 0x00000010
+#define A5XX_INT0_RBBM_ETS_MS_TIMEOUT 0x00000020
+#define A5XX_INT0_RBBM_ATB_ASYNC_OVERFLOW 0x00000040
+#define A5XX_INT0_RBBM_GPC_ERROR 0x00000080
+#define A5XX_INT0_CP_SW 0x00000100
+#define A5XX_INT0_CP_HW_ERROR 0x00000200
+#define A5XX_INT0_CP_CCU_FLUSH_DEPTH_TS 0x00000400
+#define A5XX_INT0_CP_CCU_FLUSH_COLOR_TS 0x00000800
+#define A5XX_INT0_CP_CCU_RESOLVE_TS 0x00001000
+#define A5XX_INT0_CP_IB2 0x00002000
+#define A5XX_INT0_CP_IB1 0x00004000
+#define A5XX_INT0_CP_RB 0x00008000
+#define A5XX_INT0_CP_UNUSED_1 0x00010000
+#define A5XX_INT0_CP_RB_DONE_TS 0x00020000
+#define A5XX_INT0_CP_WT_DONE_TS 0x00040000
+#define A5XX_INT0_UNKNOWN_1 0x00080000
+#define A5XX_INT0_CP_CACHE_FLUSH_TS 0x00100000
+#define A5XX_INT0_UNUSED_2 0x00200000
+#define A5XX_INT0_RBBM_ATB_BUS_OVERFLOW 0x00400000
+#define A5XX_INT0_MISC_HANG_DETECT 0x00800000
+#define A5XX_INT0_UCHE_OOB_ACCESS 0x01000000
+#define A5XX_INT0_UCHE_TRAP_INTR 0x02000000
+#define A5XX_INT0_DEBBUS_INTR_0 0x04000000
+#define A5XX_INT0_DEBBUS_INTR_1 0x08000000
+#define A5XX_INT0_GPMU_VOLTAGE_DROOP 0x10000000
+#define A5XX_INT0_GPMU_FIRMWARE 0x20000000
+#define A5XX_INT0_ISDB_CPU_IRQ 0x40000000
+#define A5XX_INT0_ISDB_UNDER_DEBUG 0x80000000
+#define A5XX_CP_INT_CP_OPCODE_ERROR 0x00000001
+#define A5XX_CP_INT_CP_RESERVED_BIT_ERROR 0x00000002
+#define A5XX_CP_INT_CP_HW_FAULT_ERROR 0x00000004
+#define A5XX_CP_INT_CP_DMA_ERROR 0x00000008
+#define A5XX_CP_INT_CP_REGISTER_PROTECTION_ERROR 0x00000010
+#define A5XX_CP_INT_CP_AHB_ERROR 0x00000020
+#define REG_A5XX_CP_RB_BASE 0x00000800
+
+#define REG_A5XX_CP_RB_BASE_HI 0x00000801
+
+#define REG_A5XX_CP_RB_CNTL 0x00000802
+
+#define REG_A5XX_CP_RB_RPTR_ADDR 0x00000804
+
+#define REG_A5XX_CP_RB_RPTR_ADDR_HI 0x00000805
+
+#define REG_A5XX_CP_RB_RPTR 0x00000806
+
+#define REG_A5XX_CP_RB_WPTR 0x00000807
+
+#define REG_A5XX_CP_PFP_STAT_ADDR 0x00000808
+
+#define REG_A5XX_CP_PFP_STAT_DATA 0x00000809
+
+#define REG_A5XX_CP_DRAW_STATE_ADDR 0x0000080b
+
+#define REG_A5XX_CP_DRAW_STATE_DATA 0x0000080c
+
+#define REG_A5XX_CP_CRASH_SCRIPT_BASE_LO 0x00000817
+
+#define REG_A5XX_CP_CRASH_SCRIPT_BASE_HI 0x00000818
+
+#define REG_A5XX_CP_CRASH_DUMP_CNTL 0x00000819
+
+#define REG_A5XX_CP_ME_STAT_ADDR 0x0000081a
+
+#define REG_A5XX_CP_ROQ_THRESHOLDS_1 0x0000081f
+
+#define REG_A5XX_CP_ROQ_THRESHOLDS_2 0x00000820
+
+#define REG_A5XX_CP_ROQ_DBG_ADDR 0x00000821
+
+#define REG_A5XX_CP_ROQ_DBG_DATA 0x00000822
+
+#define REG_A5XX_CP_MEQ_DBG_ADDR 0x00000823
+
+#define REG_A5XX_CP_MEQ_DBG_DATA 0x00000824
+
+#define REG_A5XX_CP_MEQ_THRESHOLDS 0x00000825
+
+#define REG_A5XX_CP_MERCIU_SIZE 0x00000826
+
+#define REG_A5XX_CP_MERCIU_DBG_ADDR 0x00000827
+
+#define REG_A5XX_CP_MERCIU_DBG_DATA_1 0x00000828
+
+#define REG_A5XX_CP_MERCIU_DBG_DATA_2 0x00000829
+
+#define REG_A5XX_CP_PFP_UCODE_DBG_ADDR 0x0000082a
+
+#define REG_A5XX_CP_PFP_UCODE_DBG_DATA 0x0000082b
+
+#define REG_A5XX_CP_ME_UCODE_DBG_ADDR 0x0000082f
+
+#define REG_A5XX_CP_ME_UCODE_DBG_DATA 0x00000830
+
+#define REG_A5XX_CP_CNTL 0x00000831
+
+#define REG_A5XX_CP_PFP_ME_CNTL 0x00000832
+
+#define REG_A5XX_CP_CHICKEN_DBG 0x00000833
+
+#define REG_A5XX_CP_PFP_INSTR_BASE_LO 0x00000835
+
+#define REG_A5XX_CP_PFP_INSTR_BASE_HI 0x00000836
+
+#define REG_A5XX_CP_ME_INSTR_BASE_LO 0x00000838
+
+#define REG_A5XX_CP_ME_INSTR_BASE_HI 0x00000839
+
+#define REG_A5XX_CP_CONTEXT_SWITCH_CNTL 0x0000083b
+
+#define REG_A5XX_CP_CONTEXT_SWITCH_RESTORE_ADDR_LO 0x0000083c
+
+#define REG_A5XX_CP_CONTEXT_SWITCH_RESTORE_ADDR_HI 0x0000083d
+
+#define REG_A5XX_CP_CONTEXT_SWITCH_SAVE_ADDR_LO 0x0000083e
+
+#define REG_A5XX_CP_CONTEXT_SWITCH_SAVE_ADDR_HI 0x0000083f
+
+#define REG_A5XX_CP_CONTEXT_SWITCH_SMMU_INFO_LO 0x00000840
+
+#define REG_A5XX_CP_CONTEXT_SWITCH_SMMU_INFO_HI 0x00000841
+
+#define REG_A5XX_CP_ADDR_MODE_CNTL 0x00000860
+
+#define REG_A5XX_CP_ME_STAT_DATA 0x00000b14
+
+#define REG_A5XX_CP_WFI_PEND_CTR 0x00000b15
+
+#define REG_A5XX_CP_INTERRUPT_STATUS 0x00000b18
+
+#define REG_A5XX_CP_HW_FAULT 0x00000b1a
+
+#define REG_A5XX_CP_PROTECT_STATUS 0x00000b1c
+
+#define REG_A5XX_CP_IB1_BASE 0x00000b1f
+
+#define REG_A5XX_CP_IB1_BASE_HI 0x00000b20
+
+#define REG_A5XX_CP_IB1_BUFSZ 0x00000b21
+
+#define REG_A5XX_CP_IB2_BASE 0x00000b22
+
+#define REG_A5XX_CP_IB2_BASE_HI 0x00000b23
+
+#define REG_A5XX_CP_IB2_BUFSZ 0x00000b24
+
+static inline uint32_t REG_A5XX_CP_SCRATCH(uint32_t i0) { return 0x00000b78 + 0x1*i0; }
+
+static inline uint32_t REG_A5XX_CP_SCRATCH_REG(uint32_t i0) { return 0x00000b78 + 0x1*i0; }
+
+static inline uint32_t REG_A5XX_CP_PROTECT(uint32_t i0) { return 0x00000880 + 0x1*i0; }
+
+static inline uint32_t REG_A5XX_CP_PROTECT_REG(uint32_t i0) { return 0x00000880 + 0x1*i0; }
+#define A5XX_CP_PROTECT_REG_BASE_ADDR__MASK 0x0001ffff
+#define A5XX_CP_PROTECT_REG_BASE_ADDR__SHIFT 0
+static inline uint32_t A5XX_CP_PROTECT_REG_BASE_ADDR(uint32_t val)
+{
+ return ((val) << A5XX_CP_PROTECT_REG_BASE_ADDR__SHIFT) & A5XX_CP_PROTECT_REG_BASE_ADDR__MASK;
+}
+#define A5XX_CP_PROTECT_REG_MASK_LEN__MASK 0x1f000000
+#define A5XX_CP_PROTECT_REG_MASK_LEN__SHIFT 24
+static inline uint32_t A5XX_CP_PROTECT_REG_MASK_LEN(uint32_t val)
+{
+ return ((val) << A5XX_CP_PROTECT_REG_MASK_LEN__SHIFT) & A5XX_CP_PROTECT_REG_MASK_LEN__MASK;
+}
+#define A5XX_CP_PROTECT_REG_TRAP_WRITE 0x20000000
+#define A5XX_CP_PROTECT_REG_TRAP_READ 0x40000000
+
+#define REG_A5XX_CP_PROTECT_CNTL 0x000008a0
+
+#define REG_A5XX_CP_AHB_FAULT 0x00000b1b
+
+#define REG_A5XX_CP_PERFCTR_CP_SEL_0 0x00000bb0
+
+#define REG_A5XX_CP_PERFCTR_CP_SEL_1 0x00000bb1
+
+#define REG_A5XX_CP_PERFCTR_CP_SEL_2 0x00000bb2
+
+#define REG_A5XX_CP_PERFCTR_CP_SEL_3 0x00000bb3
+
+#define REG_A5XX_CP_PERFCTR_CP_SEL_4 0x00000bb4
+
+#define REG_A5XX_CP_PERFCTR_CP_SEL_5 0x00000bb5
+
+#define REG_A5XX_CP_PERFCTR_CP_SEL_6 0x00000bb6
+
+#define REG_A5XX_CP_PERFCTR_CP_SEL_7 0x00000bb7
+
+#define REG_A5XX_VSC_ADDR_MODE_CNTL 0x00000bc1
+
+#define REG_A5XX_CP_POWERCTR_CP_SEL_0 0x00000bba
+
+#define REG_A5XX_CP_POWERCTR_CP_SEL_1 0x00000bbb
+
+#define REG_A5XX_CP_POWERCTR_CP_SEL_2 0x00000bbc
+
+#define REG_A5XX_CP_POWERCTR_CP_SEL_3 0x00000bbd
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_SEL_A 0x00000004
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_SEL_B 0x00000005
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_SEL_C 0x00000006
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_SEL_D 0x00000007
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_CNTLT 0x00000008
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_CNTLM 0x00000009
+
+#define REG_A5XX_RBBM_CFG_DEBBUS_CTLTM_ENABLE_SHIFT 0x00000018
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_OPL 0x0000000a
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_OPE 0x0000000b
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_IVTL_0 0x0000000c
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_IVTL_1 0x0000000d
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_IVTL_2 0x0000000e
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_IVTL_3 0x0000000f
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_MASKL_0 0x00000010
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_MASKL_1 0x00000011
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_MASKL_2 0x00000012
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_MASKL_3 0x00000013
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_BYTEL_0 0x00000014
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_BYTEL_1 0x00000015
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_IVTE_0 0x00000016
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_IVTE_1 0x00000017
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_IVTE_2 0x00000018
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_IVTE_3 0x00000019
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_MASKE_0 0x0000001a
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_MASKE_1 0x0000001b
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_MASKE_2 0x0000001c
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_MASKE_3 0x0000001d
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_NIBBLEE 0x0000001e
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_PTRC0 0x0000001f
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_PTRC1 0x00000020
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_LOADREG 0x00000021
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_IDX 0x00000022
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_CLRC 0x00000023
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_LOADIVT 0x00000024
+
+#define REG_A5XX_RBBM_INTERFACE_HANG_INT_CNTL 0x0000002f
+
+#define REG_A5XX_RBBM_INT_CLEAR_CMD 0x00000037
+
+#define REG_A5XX_RBBM_INT_0_MASK 0x00000038
+#define A5XX_RBBM_INT_0_MASK_RBBM_GPU_IDLE 0x00000001
+#define A5XX_RBBM_INT_0_MASK_RBBM_AHB_ERROR 0x00000002
+#define A5XX_RBBM_INT_0_MASK_RBBM_TRANSFER_TIMEOUT 0x00000004
+#define A5XX_RBBM_INT_0_MASK_RBBM_ME_MS_TIMEOUT 0x00000008
+#define A5XX_RBBM_INT_0_MASK_RBBM_PFP_MS_TIMEOUT 0x00000010
+#define A5XX_RBBM_INT_0_MASK_RBBM_ETS_MS_TIMEOUT 0x00000020
+#define A5XX_RBBM_INT_0_MASK_RBBM_ATB_ASYNC_OVERFLOW 0x00000040
+#define A5XX_RBBM_INT_0_MASK_RBBM_GPC_ERROR 0x00000080
+#define A5XX_RBBM_INT_0_MASK_CP_SW 0x00000100
+#define A5XX_RBBM_INT_0_MASK_CP_HW_ERROR 0x00000200
+#define A5XX_RBBM_INT_0_MASK_CP_CCU_FLUSH_DEPTH_TS 0x00000400
+#define A5XX_RBBM_INT_0_MASK_CP_CCU_FLUSH_COLOR_TS 0x00000800
+#define A5XX_RBBM_INT_0_MASK_CP_CCU_RESOLVE_TS 0x00001000
+#define A5XX_RBBM_INT_0_MASK_CP_IB2 0x00002000
+#define A5XX_RBBM_INT_0_MASK_CP_IB1 0x00004000
+#define A5XX_RBBM_INT_0_MASK_CP_RB 0x00008000
+#define A5XX_RBBM_INT_0_MASK_CP_RB_DONE_TS 0x00020000
+#define A5XX_RBBM_INT_0_MASK_CP_WT_DONE_TS 0x00040000
+#define A5XX_RBBM_INT_0_MASK_CP_CACHE_FLUSH_TS 0x00100000
+#define A5XX_RBBM_INT_0_MASK_RBBM_ATB_BUS_OVERFLOW 0x00400000
+#define A5XX_RBBM_INT_0_MASK_MISC_HANG_DETECT 0x00800000
+#define A5XX_RBBM_INT_0_MASK_UCHE_OOB_ACCESS 0x01000000
+#define A5XX_RBBM_INT_0_MASK_UCHE_TRAP_INTR 0x02000000
+#define A5XX_RBBM_INT_0_MASK_DEBBUS_INTR_0 0x04000000
+#define A5XX_RBBM_INT_0_MASK_DEBBUS_INTR_1 0x08000000
+#define A5XX_RBBM_INT_0_MASK_GPMU_VOLTAGE_DROOP 0x10000000
+#define A5XX_RBBM_INT_0_MASK_GPMU_FIRMWARE 0x20000000
+#define A5XX_RBBM_INT_0_MASK_ISDB_CPU_IRQ 0x40000000
+#define A5XX_RBBM_INT_0_MASK_ISDB_UNDER_DEBUG 0x80000000
+
+#define REG_A5XX_RBBM_AHB_DBG_CNTL 0x0000003f
+
+#define REG_A5XX_RBBM_EXT_VBIF_DBG_CNTL 0x00000041
+
+#define REG_A5XX_RBBM_SW_RESET_CMD 0x00000043
+
+#define REG_A5XX_RBBM_BLOCK_SW_RESET_CMD 0x00000045
+
+#define REG_A5XX_RBBM_BLOCK_SW_RESET_CMD2 0x00000046
+
+#define REG_A5XX_RBBM_DBG_LO_HI_GPIO 0x00000048
+
+#define REG_A5XX_RBBM_EXT_TRACE_BUS_CNTL 0x00000049
+
+#define REG_A5XX_RBBM_CLOCK_CNTL_TP0 0x0000004a
+
+#define REG_A5XX_RBBM_CLOCK_CNTL_TP1 0x0000004b
+
+#define REG_A5XX_RBBM_CLOCK_CNTL_TP2 0x0000004c
+
+#define REG_A5XX_RBBM_CLOCK_CNTL_TP3 0x0000004d
+
+#define REG_A5XX_RBBM_CLOCK_CNTL2_TP0 0x0000004e
+
+#define REG_A5XX_RBBM_CLOCK_CNTL2_TP1 0x0000004f
+
+#define REG_A5XX_RBBM_CLOCK_CNTL2_TP2 0x00000050
+
+#define REG_A5XX_RBBM_CLOCK_CNTL2_TP3 0x00000051
+
+#define REG_A5XX_RBBM_CLOCK_CNTL3_TP0 0x00000052
+
+#define REG_A5XX_RBBM_CLOCK_CNTL3_TP1 0x00000053
+
+#define REG_A5XX_RBBM_CLOCK_CNTL3_TP2 0x00000054
+
+#define REG_A5XX_RBBM_CLOCK_CNTL3_TP3 0x00000055
+
+#define REG_A5XX_RBBM_READ_AHB_THROUGH_DBG 0x00000059
+
+#define REG_A5XX_RBBM_CLOCK_CNTL_UCHE 0x0000005a
+
+#define REG_A5XX_RBBM_CLOCK_CNTL2_UCHE 0x0000005b
+
+#define REG_A5XX_RBBM_CLOCK_CNTL3_UCHE 0x0000005c
+
+#define REG_A5XX_RBBM_CLOCK_CNTL4_UCHE 0x0000005d
+
+#define REG_A5XX_RBBM_CLOCK_HYST_UCHE 0x0000005e
+
+#define REG_A5XX_RBBM_CLOCK_DELAY_UCHE 0x0000005f
+
+#define REG_A5XX_RBBM_CLOCK_MODE_GPC 0x00000060
+
+#define REG_A5XX_RBBM_CLOCK_DELAY_GPC 0x00000061
+
+#define REG_A5XX_RBBM_CLOCK_HYST_GPC 0x00000062
+
+#define REG_A5XX_RBBM_CLOCK_CNTL_TSE_RAS_RBBM 0x00000063
+
+#define REG_A5XX_RBBM_CLOCK_HYST_TSE_RAS_RBBM 0x00000064
+
+#define REG_A5XX_RBBM_CLOCK_DELAY_TSE_RAS_RBBM 0x00000065
+
+#define REG_A5XX_RBBM_CLOCK_DELAY_HLSQ 0x00000066
+
+#define REG_A5XX_RBBM_CLOCK_CNTL 0x00000067
+
+#define REG_A5XX_RBBM_CLOCK_CNTL_SP0 0x00000068
+
+#define REG_A5XX_RBBM_CLOCK_CNTL_SP1 0x00000069
+
+#define REG_A5XX_RBBM_CLOCK_CNTL_SP2 0x0000006a
+
+#define REG_A5XX_RBBM_CLOCK_CNTL_SP3 0x0000006b
+
+#define REG_A5XX_RBBM_CLOCK_CNTL2_SP0 0x0000006c
+
+#define REG_A5XX_RBBM_CLOCK_CNTL2_SP1 0x0000006d
+
+#define REG_A5XX_RBBM_CLOCK_CNTL2_SP2 0x0000006e
+
+#define REG_A5XX_RBBM_CLOCK_CNTL2_SP3 0x0000006f
+
+#define REG_A5XX_RBBM_CLOCK_HYST_SP0 0x00000070
+
+#define REG_A5XX_RBBM_CLOCK_HYST_SP1 0x00000071
+
+#define REG_A5XX_RBBM_CLOCK_HYST_SP2 0x00000072
+
+#define REG_A5XX_RBBM_CLOCK_HYST_SP3 0x00000073
+
+#define REG_A5XX_RBBM_CLOCK_DELAY_SP0 0x00000074
+
+#define REG_A5XX_RBBM_CLOCK_DELAY_SP1 0x00000075
+
+#define REG_A5XX_RBBM_CLOCK_DELAY_SP2 0x00000076
+
+#define REG_A5XX_RBBM_CLOCK_DELAY_SP3 0x00000077
+
+#define REG_A5XX_RBBM_CLOCK_CNTL_RB0 0x00000078
+
+#define REG_A5XX_RBBM_CLOCK_CNTL_RB1 0x00000079
+
+#define REG_A5XX_RBBM_CLOCK_CNTL_RB2 0x0000007a
+
+#define REG_A5XX_RBBM_CLOCK_CNTL_RB3 0x0000007b
+
+#define REG_A5XX_RBBM_CLOCK_CNTL2_RB0 0x0000007c
+
+#define REG_A5XX_RBBM_CLOCK_CNTL2_RB1 0x0000007d
+
+#define REG_A5XX_RBBM_CLOCK_CNTL2_RB2 0x0000007e
+
+#define REG_A5XX_RBBM_CLOCK_CNTL2_RB3 0x0000007f
+
+#define REG_A5XX_RBBM_CLOCK_HYST_RAC 0x00000080
+
+#define REG_A5XX_RBBM_CLOCK_DELAY_RAC 0x00000081
+
+#define REG_A5XX_RBBM_CLOCK_CNTL_CCU0 0x00000082
+
+#define REG_A5XX_RBBM_CLOCK_CNTL_CCU1 0x00000083
+
+#define REG_A5XX_RBBM_CLOCK_CNTL_CCU2 0x00000084
+
+#define REG_A5XX_RBBM_CLOCK_CNTL_CCU3 0x00000085
+
+#define REG_A5XX_RBBM_CLOCK_HYST_RB_CCU0 0x00000086
+
+#define REG_A5XX_RBBM_CLOCK_HYST_RB_CCU1 0x00000087
+
+#define REG_A5XX_RBBM_CLOCK_HYST_RB_CCU2 0x00000088
+
+#define REG_A5XX_RBBM_CLOCK_HYST_RB_CCU3 0x00000089
+
+#define REG_A5XX_RBBM_CLOCK_CNTL_RAC 0x0000008a
+
+#define REG_A5XX_RBBM_CLOCK_CNTL2_RAC 0x0000008b
+
+#define REG_A5XX_RBBM_CLOCK_DELAY_RB_CCU_L1_0 0x0000008c
+
+#define REG_A5XX_RBBM_CLOCK_DELAY_RB_CCU_L1_1 0x0000008d
+
+#define REG_A5XX_RBBM_CLOCK_DELAY_RB_CCU_L1_2 0x0000008e
+
+#define REG_A5XX_RBBM_CLOCK_DELAY_RB_CCU_L1_3 0x0000008f
+
+#define REG_A5XX_RBBM_CLOCK_HYST_VFD 0x00000090
+
+#define REG_A5XX_RBBM_CLOCK_MODE_VFD 0x00000091
+
+#define REG_A5XX_RBBM_CLOCK_DELAY_VFD 0x00000092
+
+#define REG_A5XX_RBBM_AHB_CNTL0 0x00000093
+
+#define REG_A5XX_RBBM_AHB_CNTL1 0x00000094
+
+#define REG_A5XX_RBBM_AHB_CNTL2 0x00000095
+
+#define REG_A5XX_RBBM_AHB_CMD 0x00000096
+
+#define REG_A5XX_RBBM_INTERFACE_HANG_MASK_CNTL11 0x0000009c
+
+#define REG_A5XX_RBBM_INTERFACE_HANG_MASK_CNTL12 0x0000009d
+
+#define REG_A5XX_RBBM_INTERFACE_HANG_MASK_CNTL13 0x0000009e
+
+#define REG_A5XX_RBBM_INTERFACE_HANG_MASK_CNTL14 0x0000009f
+
+#define REG_A5XX_RBBM_INTERFACE_HANG_MASK_CNTL15 0x000000a0
+
+#define REG_A5XX_RBBM_INTERFACE_HANG_MASK_CNTL16 0x000000a1
+
+#define REG_A5XX_RBBM_INTERFACE_HANG_MASK_CNTL17 0x000000a2
+
+#define REG_A5XX_RBBM_INTERFACE_HANG_MASK_CNTL18 0x000000a3
+
+#define REG_A5XX_RBBM_CLOCK_DELAY_TP0 0x000000a4
+
+#define REG_A5XX_RBBM_CLOCK_DELAY_TP1 0x000000a5
+
+#define REG_A5XX_RBBM_CLOCK_DELAY_TP2 0x000000a6
+
+#define REG_A5XX_RBBM_CLOCK_DELAY_TP3 0x000000a7
+
+#define REG_A5XX_RBBM_CLOCK_DELAY2_TP0 0x000000a8
+
+#define REG_A5XX_RBBM_CLOCK_DELAY2_TP1 0x000000a9
+
+#define REG_A5XX_RBBM_CLOCK_DELAY2_TP2 0x000000aa
+
+#define REG_A5XX_RBBM_CLOCK_DELAY2_TP3 0x000000ab
+
+#define REG_A5XX_RBBM_CLOCK_DELAY3_TP0 0x000000ac
+
+#define REG_A5XX_RBBM_CLOCK_DELAY3_TP1 0x000000ad
+
+#define REG_A5XX_RBBM_CLOCK_DELAY3_TP2 0x000000ae
+
+#define REG_A5XX_RBBM_CLOCK_DELAY3_TP3 0x000000af
+
+#define REG_A5XX_RBBM_CLOCK_HYST_TP0 0x000000b0
+
+#define REG_A5XX_RBBM_CLOCK_HYST_TP1 0x000000b1
+
+#define REG_A5XX_RBBM_CLOCK_HYST_TP2 0x000000b2
+
+#define REG_A5XX_RBBM_CLOCK_HYST_TP3 0x000000b3
+
+#define REG_A5XX_RBBM_CLOCK_HYST2_TP0 0x000000b4
+
+#define REG_A5XX_RBBM_CLOCK_HYST2_TP1 0x000000b5
+
+#define REG_A5XX_RBBM_CLOCK_HYST2_TP2 0x000000b6
+
+#define REG_A5XX_RBBM_CLOCK_HYST2_TP3 0x000000b7
+
+#define REG_A5XX_RBBM_CLOCK_HYST3_TP0 0x000000b8
+
+#define REG_A5XX_RBBM_CLOCK_HYST3_TP1 0x000000b9
+
+#define REG_A5XX_RBBM_CLOCK_HYST3_TP2 0x000000ba
+
+#define REG_A5XX_RBBM_CLOCK_HYST3_TP3 0x000000bb
+
+#define REG_A5XX_RBBM_CLOCK_CNTL_GPMU 0x000000c8
+
+#define REG_A5XX_RBBM_CLOCK_DELAY_GPMU 0x000000c9
+
+#define REG_A5XX_RBBM_CLOCK_HYST_GPMU 0x000000ca
+
+#define REG_A5XX_RBBM_PERFCTR_CP_0_LO 0x000003a0
+
+#define REG_A5XX_RBBM_PERFCTR_CP_0_HI 0x000003a1
+
+#define REG_A5XX_RBBM_PERFCTR_CP_1_LO 0x000003a2
+
+#define REG_A5XX_RBBM_PERFCTR_CP_1_HI 0x000003a3
+
+#define REG_A5XX_RBBM_PERFCTR_CP_2_LO 0x000003a4
+
+#define REG_A5XX_RBBM_PERFCTR_CP_2_HI 0x000003a5
+
+#define REG_A5XX_RBBM_PERFCTR_CP_3_LO 0x000003a6
+
+#define REG_A5XX_RBBM_PERFCTR_CP_3_HI 0x000003a7
+
+#define REG_A5XX_RBBM_PERFCTR_CP_4_LO 0x000003a8
+
+#define REG_A5XX_RBBM_PERFCTR_CP_4_HI 0x000003a9
+
+#define REG_A5XX_RBBM_PERFCTR_CP_5_LO 0x000003aa
+
+#define REG_A5XX_RBBM_PERFCTR_CP_5_HI 0x000003ab
+
+#define REG_A5XX_RBBM_PERFCTR_CP_6_LO 0x000003ac
+
+#define REG_A5XX_RBBM_PERFCTR_CP_6_HI 0x000003ad
+
+#define REG_A5XX_RBBM_PERFCTR_CP_7_LO 0x000003ae
+
+#define REG_A5XX_RBBM_PERFCTR_CP_7_HI 0x000003af
+
+#define REG_A5XX_RBBM_PERFCTR_RBBM_0_LO 0x000003b0
+
+#define REG_A5XX_RBBM_PERFCTR_RBBM_0_HI 0x000003b1
+
+#define REG_A5XX_RBBM_PERFCTR_RBBM_1_LO 0x000003b2
+
+#define REG_A5XX_RBBM_PERFCTR_RBBM_1_HI 0x000003b3
+
+#define REG_A5XX_RBBM_PERFCTR_RBBM_2_LO 0x000003b4
+
+#define REG_A5XX_RBBM_PERFCTR_RBBM_2_HI 0x000003b5
+
+#define REG_A5XX_RBBM_PERFCTR_RBBM_3_LO 0x000003b6
+
+#define REG_A5XX_RBBM_PERFCTR_RBBM_3_HI 0x000003b7
+
+#define REG_A5XX_RBBM_PERFCTR_PC_0_LO 0x000003b8
+
+#define REG_A5XX_RBBM_PERFCTR_PC_0_HI 0x000003b9
+
+#define REG_A5XX_RBBM_PERFCTR_PC_1_LO 0x000003ba
+
+#define REG_A5XX_RBBM_PERFCTR_PC_1_HI 0x000003bb
+
+#define REG_A5XX_RBBM_PERFCTR_PC_2_LO 0x000003bc
+
+#define REG_A5XX_RBBM_PERFCTR_PC_2_HI 0x000003bd
+
+#define REG_A5XX_RBBM_PERFCTR_PC_3_LO 0x000003be
+
+#define REG_A5XX_RBBM_PERFCTR_PC_3_HI 0x000003bf
+
+#define REG_A5XX_RBBM_PERFCTR_PC_4_LO 0x000003c0
+
+#define REG_A5XX_RBBM_PERFCTR_PC_4_HI 0x000003c1
+
+#define REG_A5XX_RBBM_PERFCTR_PC_5_LO 0x000003c2
+
+#define REG_A5XX_RBBM_PERFCTR_PC_5_HI 0x000003c3
+
+#define REG_A5XX_RBBM_PERFCTR_PC_6_LO 0x000003c4
+
+#define REG_A5XX_RBBM_PERFCTR_PC_6_HI 0x000003c5
+
+#define REG_A5XX_RBBM_PERFCTR_PC_7_LO 0x000003c6
+
+#define REG_A5XX_RBBM_PERFCTR_PC_7_HI 0x000003c7
+
+#define REG_A5XX_RBBM_PERFCTR_VFD_0_LO 0x000003c8
+
+#define REG_A5XX_RBBM_PERFCTR_VFD_0_HI 0x000003c9
+
+#define REG_A5XX_RBBM_PERFCTR_VFD_1_LO 0x000003ca
+
+#define REG_A5XX_RBBM_PERFCTR_VFD_1_HI 0x000003cb
+
+#define REG_A5XX_RBBM_PERFCTR_VFD_2_LO 0x000003cc
+
+#define REG_A5XX_RBBM_PERFCTR_VFD_2_HI 0x000003cd
+
+#define REG_A5XX_RBBM_PERFCTR_VFD_3_LO 0x000003ce
+
+#define REG_A5XX_RBBM_PERFCTR_VFD_3_HI 0x000003cf
+
+#define REG_A5XX_RBBM_PERFCTR_VFD_4_LO 0x000003d0
+
+#define REG_A5XX_RBBM_PERFCTR_VFD_4_HI 0x000003d1
+
+#define REG_A5XX_RBBM_PERFCTR_VFD_5_LO 0x000003d2
+
+#define REG_A5XX_RBBM_PERFCTR_VFD_5_HI 0x000003d3
+
+#define REG_A5XX_RBBM_PERFCTR_VFD_6_LO 0x000003d4
+
+#define REG_A5XX_RBBM_PERFCTR_VFD_6_HI 0x000003d5
+
+#define REG_A5XX_RBBM_PERFCTR_VFD_7_LO 0x000003d6
+
+#define REG_A5XX_RBBM_PERFCTR_VFD_7_HI 0x000003d7
+
+#define REG_A5XX_RBBM_PERFCTR_HLSQ_0_LO 0x000003d8
+
+#define REG_A5XX_RBBM_PERFCTR_HLSQ_0_HI 0x000003d9
+
+#define REG_A5XX_RBBM_PERFCTR_HLSQ_1_LO 0x000003da
+
+#define REG_A5XX_RBBM_PERFCTR_HLSQ_1_HI 0x000003db
+
+#define REG_A5XX_RBBM_PERFCTR_HLSQ_2_LO 0x000003dc
+
+#define REG_A5XX_RBBM_PERFCTR_HLSQ_2_HI 0x000003dd
+
+#define REG_A5XX_RBBM_PERFCTR_HLSQ_3_LO 0x000003de
+
+#define REG_A5XX_RBBM_PERFCTR_HLSQ_3_HI 0x000003df
+
+#define REG_A5XX_RBBM_PERFCTR_HLSQ_4_LO 0x000003e0
+
+#define REG_A5XX_RBBM_PERFCTR_HLSQ_4_HI 0x000003e1
+
+#define REG_A5XX_RBBM_PERFCTR_HLSQ_5_LO 0x000003e2
+
+#define REG_A5XX_RBBM_PERFCTR_HLSQ_5_HI 0x000003e3
+
+#define REG_A5XX_RBBM_PERFCTR_HLSQ_6_LO 0x000003e4
+
+#define REG_A5XX_RBBM_PERFCTR_HLSQ_6_HI 0x000003e5
+
+#define REG_A5XX_RBBM_PERFCTR_HLSQ_7_LO 0x000003e6
+
+#define REG_A5XX_RBBM_PERFCTR_HLSQ_7_HI 0x000003e7
+
+#define REG_A5XX_RBBM_PERFCTR_VPC_0_LO 0x000003e8
+
+#define REG_A5XX_RBBM_PERFCTR_VPC_0_HI 0x000003e9
+
+#define REG_A5XX_RBBM_PERFCTR_VPC_1_LO 0x000003ea
+
+#define REG_A5XX_RBBM_PERFCTR_VPC_1_HI 0x000003eb
+
+#define REG_A5XX_RBBM_PERFCTR_VPC_2_LO 0x000003ec
+
+#define REG_A5XX_RBBM_PERFCTR_VPC_2_HI 0x000003ed
+
+#define REG_A5XX_RBBM_PERFCTR_VPC_3_LO 0x000003ee
+
+#define REG_A5XX_RBBM_PERFCTR_VPC_3_HI 0x000003ef
+
+#define REG_A5XX_RBBM_PERFCTR_CCU_0_LO 0x000003f0
+
+#define REG_A5XX_RBBM_PERFCTR_CCU_0_HI 0x000003f1
+
+#define REG_A5XX_RBBM_PERFCTR_CCU_1_LO 0x000003f2
+
+#define REG_A5XX_RBBM_PERFCTR_CCU_1_HI 0x000003f3
+
+#define REG_A5XX_RBBM_PERFCTR_CCU_2_LO 0x000003f4
+
+#define REG_A5XX_RBBM_PERFCTR_CCU_2_HI 0x000003f5
+
+#define REG_A5XX_RBBM_PERFCTR_CCU_3_LO 0x000003f6
+
+#define REG_A5XX_RBBM_PERFCTR_CCU_3_HI 0x000003f7
+
+#define REG_A5XX_RBBM_PERFCTR_TSE_0_LO 0x000003f8
+
+#define REG_A5XX_RBBM_PERFCTR_TSE_0_HI 0x000003f9
+
+#define REG_A5XX_RBBM_PERFCTR_TSE_1_LO 0x000003fa
+
+#define REG_A5XX_RBBM_PERFCTR_TSE_1_HI 0x000003fb
+
+#define REG_A5XX_RBBM_PERFCTR_TSE_2_LO 0x000003fc
+
+#define REG_A5XX_RBBM_PERFCTR_TSE_2_HI 0x000003fd
+
+#define REG_A5XX_RBBM_PERFCTR_TSE_3_LO 0x000003fe
+
+#define REG_A5XX_RBBM_PERFCTR_TSE_3_HI 0x000003ff
+
+#define REG_A5XX_RBBM_PERFCTR_RAS_0_LO 0x00000400
+
+#define REG_A5XX_RBBM_PERFCTR_RAS_0_HI 0x00000401
+
+#define REG_A5XX_RBBM_PERFCTR_RAS_1_LO 0x00000402
+
+#define REG_A5XX_RBBM_PERFCTR_RAS_1_HI 0x00000403
+
+#define REG_A5XX_RBBM_PERFCTR_RAS_2_LO 0x00000404
+
+#define REG_A5XX_RBBM_PERFCTR_RAS_2_HI 0x00000405
+
+#define REG_A5XX_RBBM_PERFCTR_RAS_3_LO 0x00000406
+
+#define REG_A5XX_RBBM_PERFCTR_RAS_3_HI 0x00000407
+
+#define REG_A5XX_RBBM_PERFCTR_UCHE_0_LO 0x00000408
+
+#define REG_A5XX_RBBM_PERFCTR_UCHE_0_HI 0x00000409
+
+#define REG_A5XX_RBBM_PERFCTR_UCHE_1_LO 0x0000040a
+
+#define REG_A5XX_RBBM_PERFCTR_UCHE_1_HI 0x0000040b
+
+#define REG_A5XX_RBBM_PERFCTR_UCHE_2_LO 0x0000040c
+
+#define REG_A5XX_RBBM_PERFCTR_UCHE_2_HI 0x0000040d
+
+#define REG_A5XX_RBBM_PERFCTR_UCHE_3_LO 0x0000040e
+
+#define REG_A5XX_RBBM_PERFCTR_UCHE_3_HI 0x0000040f
+
+#define REG_A5XX_RBBM_PERFCTR_UCHE_4_LO 0x00000410
+
+#define REG_A5XX_RBBM_PERFCTR_UCHE_4_HI 0x00000411
+
+#define REG_A5XX_RBBM_PERFCTR_UCHE_5_LO 0x00000412
+
+#define REG_A5XX_RBBM_PERFCTR_UCHE_5_HI 0x00000413
+
+#define REG_A5XX_RBBM_PERFCTR_UCHE_6_LO 0x00000414
+
+#define REG_A5XX_RBBM_PERFCTR_UCHE_6_HI 0x00000415
+
+#define REG_A5XX_RBBM_PERFCTR_UCHE_7_LO 0x00000416
+
+#define REG_A5XX_RBBM_PERFCTR_UCHE_7_HI 0x00000417
+
+#define REG_A5XX_RBBM_PERFCTR_TP_0_LO 0x00000418
+
+#define REG_A5XX_RBBM_PERFCTR_TP_0_HI 0x00000419
+
+#define REG_A5XX_RBBM_PERFCTR_TP_1_LO 0x0000041a
+
+#define REG_A5XX_RBBM_PERFCTR_TP_1_HI 0x0000041b
+
+#define REG_A5XX_RBBM_PERFCTR_TP_2_LO 0x0000041c
+
+#define REG_A5XX_RBBM_PERFCTR_TP_2_HI 0x0000041d
+
+#define REG_A5XX_RBBM_PERFCTR_TP_3_LO 0x0000041e
+
+#define REG_A5XX_RBBM_PERFCTR_TP_3_HI 0x0000041f
+
+#define REG_A5XX_RBBM_PERFCTR_TP_4_LO 0x00000420
+
+#define REG_A5XX_RBBM_PERFCTR_TP_4_HI 0x00000421
+
+#define REG_A5XX_RBBM_PERFCTR_TP_5_LO 0x00000422
+
+#define REG_A5XX_RBBM_PERFCTR_TP_5_HI 0x00000423
+
+#define REG_A5XX_RBBM_PERFCTR_TP_6_LO 0x00000424
+
+#define REG_A5XX_RBBM_PERFCTR_TP_6_HI 0x00000425
+
+#define REG_A5XX_RBBM_PERFCTR_TP_7_LO 0x00000426
+
+#define REG_A5XX_RBBM_PERFCTR_TP_7_HI 0x00000427
+
+#define REG_A5XX_RBBM_PERFCTR_SP_0_LO 0x00000428
+
+#define REG_A5XX_RBBM_PERFCTR_SP_0_HI 0x00000429
+
+#define REG_A5XX_RBBM_PERFCTR_SP_1_LO 0x0000042a
+
+#define REG_A5XX_RBBM_PERFCTR_SP_1_HI 0x0000042b
+
+#define REG_A5XX_RBBM_PERFCTR_SP_2_LO 0x0000042c
+
+#define REG_A5XX_RBBM_PERFCTR_SP_2_HI 0x0000042d
+
+#define REG_A5XX_RBBM_PERFCTR_SP_3_LO 0x0000042e
+
+#define REG_A5XX_RBBM_PERFCTR_SP_3_HI 0x0000042f
+
+#define REG_A5XX_RBBM_PERFCTR_SP_4_LO 0x00000430
+
+#define REG_A5XX_RBBM_PERFCTR_SP_4_HI 0x00000431
+
+#define REG_A5XX_RBBM_PERFCTR_SP_5_LO 0x00000432
+
+#define REG_A5XX_RBBM_PERFCTR_SP_5_HI 0x00000433
+
+#define REG_A5XX_RBBM_PERFCTR_SP_6_LO 0x00000434
+
+#define REG_A5XX_RBBM_PERFCTR_SP_6_HI 0x00000435
+
+#define REG_A5XX_RBBM_PERFCTR_SP_7_LO 0x00000436
+
+#define REG_A5XX_RBBM_PERFCTR_SP_7_HI 0x00000437
+
+#define REG_A5XX_RBBM_PERFCTR_SP_8_LO 0x00000438
+
+#define REG_A5XX_RBBM_PERFCTR_SP_8_HI 0x00000439
+
+#define REG_A5XX_RBBM_PERFCTR_SP_9_LO 0x0000043a
+
+#define REG_A5XX_RBBM_PERFCTR_SP_9_HI 0x0000043b
+
+#define REG_A5XX_RBBM_PERFCTR_SP_10_LO 0x0000043c
+
+#define REG_A5XX_RBBM_PERFCTR_SP_10_HI 0x0000043d
+
+#define REG_A5XX_RBBM_PERFCTR_SP_11_LO 0x0000043e
+
+#define REG_A5XX_RBBM_PERFCTR_SP_11_HI 0x0000043f
+
+#define REG_A5XX_RBBM_PERFCTR_RB_0_LO 0x00000440
+
+#define REG_A5XX_RBBM_PERFCTR_RB_0_HI 0x00000441
+
+#define REG_A5XX_RBBM_PERFCTR_RB_1_LO 0x00000442
+
+#define REG_A5XX_RBBM_PERFCTR_RB_1_HI 0x00000443
+
+#define REG_A5XX_RBBM_PERFCTR_RB_2_LO 0x00000444
+
+#define REG_A5XX_RBBM_PERFCTR_RB_2_HI 0x00000445
+
+#define REG_A5XX_RBBM_PERFCTR_RB_3_LO 0x00000446
+
+#define REG_A5XX_RBBM_PERFCTR_RB_3_HI 0x00000447
+
+#define REG_A5XX_RBBM_PERFCTR_RB_4_LO 0x00000448
+
+#define REG_A5XX_RBBM_PERFCTR_RB_4_HI 0x00000449
+
+#define REG_A5XX_RBBM_PERFCTR_RB_5_LO 0x0000044a
+
+#define REG_A5XX_RBBM_PERFCTR_RB_5_HI 0x0000044b
+
+#define REG_A5XX_RBBM_PERFCTR_RB_6_LO 0x0000044c
+
+#define REG_A5XX_RBBM_PERFCTR_RB_6_HI 0x0000044d
+
+#define REG_A5XX_RBBM_PERFCTR_RB_7_LO 0x0000044e
+
+#define REG_A5XX_RBBM_PERFCTR_RB_7_HI 0x0000044f
+
+#define REG_A5XX_RBBM_PERFCTR_VSC_0_LO 0x00000450
+
+#define REG_A5XX_RBBM_PERFCTR_VSC_0_HI 0x00000451
+
+#define REG_A5XX_RBBM_PERFCTR_VSC_1_LO 0x00000452
+
+#define REG_A5XX_RBBM_PERFCTR_VSC_1_HI 0x00000453
+
+#define REG_A5XX_RBBM_PERFCTR_LRZ_0_LO 0x00000454
+
+#define REG_A5XX_RBBM_PERFCTR_LRZ_0_HI 0x00000455
+
+#define REG_A5XX_RBBM_PERFCTR_LRZ_1_LO 0x00000456
+
+#define REG_A5XX_RBBM_PERFCTR_LRZ_1_HI 0x00000457
+
+#define REG_A5XX_RBBM_PERFCTR_LRZ_2_LO 0x00000458
+
+#define REG_A5XX_RBBM_PERFCTR_LRZ_2_HI 0x00000459
+
+#define REG_A5XX_RBBM_PERFCTR_LRZ_3_LO 0x0000045a
+
+#define REG_A5XX_RBBM_PERFCTR_LRZ_3_HI 0x0000045b
+
+#define REG_A5XX_RBBM_PERFCTR_CMP_0_LO 0x0000045c
+
+#define REG_A5XX_RBBM_PERFCTR_CMP_0_HI 0x0000045d
+
+#define REG_A5XX_RBBM_PERFCTR_CMP_1_LO 0x0000045e
+
+#define REG_A5XX_RBBM_PERFCTR_CMP_1_HI 0x0000045f
+
+#define REG_A5XX_RBBM_PERFCTR_CMP_2_LO 0x00000460
+
+#define REG_A5XX_RBBM_PERFCTR_CMP_2_HI 0x00000461
+
+#define REG_A5XX_RBBM_PERFCTR_CMP_3_LO 0x00000462
+
+#define REG_A5XX_RBBM_PERFCTR_CMP_3_HI 0x00000463
+
+#define REG_A5XX_RBBM_PERFCTR_RBBM_SEL_0 0x0000046b
+
+#define REG_A5XX_RBBM_PERFCTR_RBBM_SEL_1 0x0000046c
+
+#define REG_A5XX_RBBM_PERFCTR_RBBM_SEL_2 0x0000046d
+
+#define REG_A5XX_RBBM_PERFCTR_RBBM_SEL_3 0x0000046e
+
+#define REG_A5XX_RBBM_ALWAYSON_COUNTER_LO 0x000004d2
+
+#define REG_A5XX_RBBM_ALWAYSON_COUNTER_HI 0x000004d3
+
+#define REG_A5XX_RBBM_STATUS 0x000004f5
+#define A5XX_RBBM_STATUS_GPU_BUSY_IGN_AHB 0x80000000
+#define A5XX_RBBM_STATUS_GPU_BUSY_IGN_AHB_CP 0x40000000
+#define A5XX_RBBM_STATUS_HLSQ_BUSY 0x20000000
+#define A5XX_RBBM_STATUS_VSC_BUSY 0x10000000
+#define A5XX_RBBM_STATUS_TPL1_BUSY 0x08000000
+#define A5XX_RBBM_STATUS_SP_BUSY 0x04000000
+#define A5XX_RBBM_STATUS_UCHE_BUSY 0x02000000
+#define A5XX_RBBM_STATUS_VPC_BUSY 0x01000000
+#define A5XX_RBBM_STATUS_VFDP_BUSY 0x00800000
+#define A5XX_RBBM_STATUS_VFD_BUSY 0x00400000
+#define A5XX_RBBM_STATUS_TESS_BUSY 0x00200000
+#define A5XX_RBBM_STATUS_PC_VSD_BUSY 0x00100000
+#define A5XX_RBBM_STATUS_PC_DCALL_BUSY 0x00080000
+#define A5XX_RBBM_STATUS_GPMU_SLAVE_BUSY 0x00040000
+#define A5XX_RBBM_STATUS_DCOM_BUSY 0x00020000
+#define A5XX_RBBM_STATUS_COM_BUSY 0x00010000
+#define A5XX_RBBM_STATUS_LRZ_BUZY 0x00008000
+#define A5XX_RBBM_STATUS_A2D_DSP_BUSY 0x00004000
+#define A5XX_RBBM_STATUS_CCUFCHE_BUSY 0x00002000
+#define A5XX_RBBM_STATUS_RB_BUSY 0x00001000
+#define A5XX_RBBM_STATUS_RAS_BUSY 0x00000800
+#define A5XX_RBBM_STATUS_TSE_BUSY 0x00000400
+#define A5XX_RBBM_STATUS_VBIF_BUSY 0x00000200
+#define A5XX_RBBM_STATUS_GPU_BUSY_IGN_AHB_HYST 0x00000100
+#define A5XX_RBBM_STATUS_CP_BUSY_IGN_HYST 0x00000080
+#define A5XX_RBBM_STATUS_CP_BUSY 0x00000040
+#define A5XX_RBBM_STATUS_GPMU_MASTER_BUSY 0x00000020
+#define A5XX_RBBM_STATUS_CP_CRASH_BUSY 0x00000010
+#define A5XX_RBBM_STATUS_CP_ETS_BUSY 0x00000008
+#define A5XX_RBBM_STATUS_CP_PFP_BUSY 0x00000004
+#define A5XX_RBBM_STATUS_CP_ME_BUSY 0x00000002
+#define A5XX_RBBM_STATUS_HI_BUSY 0x00000001
+
+#define REG_A5XX_RBBM_STATUS3 0x00000530
+
+#define REG_A5XX_RBBM_INT_0_STATUS 0x000004e1
+
+#define REG_A5XX_RBBM_AHB_ME_SPLIT_STATUS 0x000004f0
+
+#define REG_A5XX_RBBM_AHB_PFP_SPLIT_STATUS 0x000004f1
+
+#define REG_A5XX_RBBM_AHB_ETS_SPLIT_STATUS 0x000004f3
+
+#define REG_A5XX_RBBM_AHB_ERROR_STATUS 0x000004f4
+
+#define REG_A5XX_RBBM_PERFCTR_CNTL 0x00000464
+
+#define REG_A5XX_RBBM_PERFCTR_LOAD_CMD0 0x00000465
+
+#define REG_A5XX_RBBM_PERFCTR_LOAD_CMD1 0x00000466
+
+#define REG_A5XX_RBBM_PERFCTR_LOAD_CMD2 0x00000467
+
+#define REG_A5XX_RBBM_PERFCTR_LOAD_CMD3 0x00000468
+
+#define REG_A5XX_RBBM_PERFCTR_LOAD_VALUE_LO 0x00000469
+
+#define REG_A5XX_RBBM_PERFCTR_LOAD_VALUE_HI 0x0000046a
+
+#define REG_A5XX_RBBM_PERFCTR_RBBM_SEL_0 0x0000046b
+
+#define REG_A5XX_RBBM_PERFCTR_RBBM_SEL_1 0x0000046c
+
+#define REG_A5XX_RBBM_PERFCTR_RBBM_SEL_2 0x0000046d
+
+#define REG_A5XX_RBBM_PERFCTR_RBBM_SEL_3 0x0000046e
+
+#define REG_A5XX_RBBM_PERFCTR_GPU_BUSY_MASKED 0x0000046f
+
+#define REG_A5XX_RBBM_AHB_ERROR 0x000004ed
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_EVENT_LOGIC 0x00000504
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_OVER 0x00000505
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_COUNT0 0x00000506
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_COUNT1 0x00000507
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_COUNT2 0x00000508
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_COUNT3 0x00000509
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_COUNT4 0x0000050a
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_COUNT5 0x0000050b
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_TRACE_ADDR 0x0000050c
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_TRACE_BUF0 0x0000050d
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_TRACE_BUF1 0x0000050e
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_TRACE_BUF2 0x0000050f
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_TRACE_BUF3 0x00000510
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_TRACE_BUF4 0x00000511
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_MISR0 0x00000512
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_MISR1 0x00000513
+
+#define REG_A5XX_RBBM_ISDB_CNT 0x00000533
+
+#define REG_A5XX_RBBM_SECVID_TRUST_CONFIG 0x0000f000
+
+#define REG_A5XX_RBBM_SECVID_TRUST_CNTL 0x0000f400
+
+#define REG_A5XX_RBBM_SECVID_TSB_TRUSTED_BASE_LO 0x0000f800
+
+#define REG_A5XX_RBBM_SECVID_TSB_TRUSTED_BASE_HI 0x0000f801
+
+#define REG_A5XX_RBBM_SECVID_TSB_TRUSTED_SIZE 0x0000f802
+
+#define REG_A5XX_RBBM_SECVID_TSB_CNTL 0x0000f803
+
+#define REG_A5XX_RBBM_SECVID_TSB_COMP_STATUS_LO 0x0000f804
+
+#define REG_A5XX_RBBM_SECVID_TSB_COMP_STATUS_HI 0x0000f805
+
+#define REG_A5XX_RBBM_SECVID_TSB_UCHE_STATUS_LO 0x0000f806
+
+#define REG_A5XX_RBBM_SECVID_TSB_UCHE_STATUS_HI 0x0000f807
+
+#define REG_A5XX_RBBM_SECVID_TSB_ADDR_MODE_CNTL 0x0000f810
+
+#define REG_A5XX_VSC_PIPE_DATA_LENGTH_0 0x00000c00
+
+#define REG_A5XX_VSC_PERFCTR_VSC_SEL_0 0x00000c60
+
+#define REG_A5XX_VSC_PERFCTR_VSC_SEL_1 0x00000c61
+
+#define REG_A5XX_VSC_BIN_SIZE 0x00000cdd
+#define A5XX_VSC_BIN_SIZE_WINDOW_OFFSET_DISABLE 0x80000000
+#define A5XX_VSC_BIN_SIZE_X__MASK 0x00007fff
+#define A5XX_VSC_BIN_SIZE_X__SHIFT 0
+static inline uint32_t A5XX_VSC_BIN_SIZE_X(uint32_t val)
+{
+ return ((val) << A5XX_VSC_BIN_SIZE_X__SHIFT) & A5XX_VSC_BIN_SIZE_X__MASK;
+}
+#define A5XX_VSC_BIN_SIZE_Y__MASK 0x7fff0000
+#define A5XX_VSC_BIN_SIZE_Y__SHIFT 16
+static inline uint32_t A5XX_VSC_BIN_SIZE_Y(uint32_t val)
+{
+ return ((val) << A5XX_VSC_BIN_SIZE_Y__SHIFT) & A5XX_VSC_BIN_SIZE_Y__MASK;
+}
+
+#define REG_A5XX_GRAS_ADDR_MODE_CNTL 0x00000c81
+
+#define REG_A5XX_GRAS_PERFCTR_TSE_SEL_0 0x00000c90
+
+#define REG_A5XX_GRAS_PERFCTR_TSE_SEL_1 0x00000c91
+
+#define REG_A5XX_GRAS_PERFCTR_TSE_SEL_2 0x00000c92
+
+#define REG_A5XX_GRAS_PERFCTR_TSE_SEL_3 0x00000c93
+
+#define REG_A5XX_GRAS_PERFCTR_RAS_SEL_0 0x00000c94
+
+#define REG_A5XX_GRAS_PERFCTR_RAS_SEL_1 0x00000c95
+
+#define REG_A5XX_GRAS_PERFCTR_RAS_SEL_2 0x00000c96
+
+#define REG_A5XX_GRAS_PERFCTR_RAS_SEL_3 0x00000c97
+
+#define REG_A5XX_GRAS_PERFCTR_LRZ_SEL_0 0x00000c98
+
+#define REG_A5XX_GRAS_PERFCTR_LRZ_SEL_1 0x00000c99
+
+#define REG_A5XX_GRAS_PERFCTR_LRZ_SEL_2 0x00000c9a
+
+#define REG_A5XX_GRAS_PERFCTR_LRZ_SEL_3 0x00000c9b
+
+#define REG_A5XX_RB_DBG_ECO_CNTL 0x00000cc4
+
+#define REG_A5XX_RB_ADDR_MODE_CNTL 0x00000cc5
+
+#define REG_A5XX_RB_MODE_CNTL 0x00000cc6
+
+#define REG_A5XX_RB_CCU_CNTL 0x00000cc7
+
+#define REG_A5XX_RB_PERFCTR_RB_SEL_0 0x00000cd0
+
+#define REG_A5XX_RB_PERFCTR_RB_SEL_1 0x00000cd1
+
+#define REG_A5XX_RB_PERFCTR_RB_SEL_2 0x00000cd2
+
+#define REG_A5XX_RB_PERFCTR_RB_SEL_3 0x00000cd3
+
+#define REG_A5XX_RB_PERFCTR_RB_SEL_4 0x00000cd4
+
+#define REG_A5XX_RB_PERFCTR_RB_SEL_5 0x00000cd5
+
+#define REG_A5XX_RB_PERFCTR_RB_SEL_6 0x00000cd6
+
+#define REG_A5XX_RB_PERFCTR_RB_SEL_7 0x00000cd7
+
+#define REG_A5XX_RB_PERFCTR_CCU_SEL_0 0x00000cd8
+
+#define REG_A5XX_RB_PERFCTR_CCU_SEL_1 0x00000cd9
+
+#define REG_A5XX_RB_PERFCTR_CCU_SEL_2 0x00000cda
+
+#define REG_A5XX_RB_PERFCTR_CCU_SEL_3 0x00000cdb
+
+#define REG_A5XX_RB_POWERCTR_RB_SEL_0 0x00000ce0
+
+#define REG_A5XX_RB_POWERCTR_RB_SEL_1 0x00000ce1
+
+#define REG_A5XX_RB_POWERCTR_RB_SEL_2 0x00000ce2
+
+#define REG_A5XX_RB_POWERCTR_RB_SEL_3 0x00000ce3
+
+#define REG_A5XX_RB_POWERCTR_CCU_SEL_0 0x00000ce4
+
+#define REG_A5XX_RB_POWERCTR_CCU_SEL_1 0x00000ce5
+
+#define REG_A5XX_RB_PERFCTR_CMP_SEL_0 0x00000cec
+
+#define REG_A5XX_RB_PERFCTR_CMP_SEL_1 0x00000ced
+
+#define REG_A5XX_RB_PERFCTR_CMP_SEL_2 0x00000cee
+
+#define REG_A5XX_RB_PERFCTR_CMP_SEL_3 0x00000cef
+
+#define REG_A5XX_PC_DBG_ECO_CNTL 0x00000d00
+#define A5XX_PC_DBG_ECO_CNTL_TWOPASSUSEWFI 0x00000100
+
+#define REG_A5XX_PC_ADDR_MODE_CNTL 0x00000d01
+
+#define REG_A5XX_PC_MODE_CNTL 0x00000d02
+
+#define REG_A5XX_UNKNOWN_0D08 0x00000d08
+
+#define REG_A5XX_UNKNOWN_0D09 0x00000d09
+
+#define REG_A5XX_PC_PERFCTR_PC_SEL_0 0x00000d10
+
+#define REG_A5XX_PC_PERFCTR_PC_SEL_1 0x00000d11
+
+#define REG_A5XX_PC_PERFCTR_PC_SEL_2 0x00000d12
+
+#define REG_A5XX_PC_PERFCTR_PC_SEL_3 0x00000d13
+
+#define REG_A5XX_PC_PERFCTR_PC_SEL_4 0x00000d14
+
+#define REG_A5XX_PC_PERFCTR_PC_SEL_5 0x00000d15
+
+#define REG_A5XX_PC_PERFCTR_PC_SEL_6 0x00000d16
+
+#define REG_A5XX_PC_PERFCTR_PC_SEL_7 0x00000d17
+
+#define REG_A5XX_HLSQ_TIMEOUT_THRESHOLD_0 0x00000e00
+
+#define REG_A5XX_HLSQ_TIMEOUT_THRESHOLD_1 0x00000e01
+
+#define REG_A5XX_HLSQ_ADDR_MODE_CNTL 0x00000e05
+
+#define REG_A5XX_HLSQ_MODE_CNTL 0x00000e06
+
+#define REG_A5XX_HLSQ_PERFCTR_HLSQ_SEL_0 0x00000e10
+
+#define REG_A5XX_HLSQ_PERFCTR_HLSQ_SEL_1 0x00000e11
+
+#define REG_A5XX_HLSQ_PERFCTR_HLSQ_SEL_2 0x00000e12
+
+#define REG_A5XX_HLSQ_PERFCTR_HLSQ_SEL_3 0x00000e13
+
+#define REG_A5XX_HLSQ_PERFCTR_HLSQ_SEL_4 0x00000e14
+
+#define REG_A5XX_HLSQ_PERFCTR_HLSQ_SEL_5 0x00000e15
+
+#define REG_A5XX_HLSQ_PERFCTR_HLSQ_SEL_6 0x00000e16
+
+#define REG_A5XX_HLSQ_PERFCTR_HLSQ_SEL_7 0x00000e17
+
+#define REG_A5XX_HLSQ_SPTP_RDSEL 0x00000f08
+
+#define REG_A5XX_HLSQ_DBG_READ_SEL 0x0000bc00
+
+#define REG_A5XX_HLSQ_DBG_AHB_READ_APERTURE 0x0000a000
+
+#define REG_A5XX_VFD_ADDR_MODE_CNTL 0x00000e41
+
+#define REG_A5XX_VFD_MODE_CNTL 0x00000e42
+
+#define REG_A5XX_VFD_PERFCTR_VFD_SEL_0 0x00000e50
+
+#define REG_A5XX_VFD_PERFCTR_VFD_SEL_1 0x00000e51
+
+#define REG_A5XX_VFD_PERFCTR_VFD_SEL_2 0x00000e52
+
+#define REG_A5XX_VFD_PERFCTR_VFD_SEL_3 0x00000e53
+
+#define REG_A5XX_VFD_PERFCTR_VFD_SEL_4 0x00000e54
+
+#define REG_A5XX_VFD_PERFCTR_VFD_SEL_5 0x00000e55
+
+#define REG_A5XX_VFD_PERFCTR_VFD_SEL_6 0x00000e56
+
+#define REG_A5XX_VFD_PERFCTR_VFD_SEL_7 0x00000e57
+
+#define REG_A5XX_VPC_DBG_ECO_CNTL 0x00000e60
+
+#define REG_A5XX_VPC_ADDR_MODE_CNTL 0x00000e61
+
+#define REG_A5XX_VPC_MODE_CNTL 0x00000e62
+
+#define REG_A5XX_VPC_PERFCTR_VPC_SEL_0 0x00000e64
+
+#define REG_A5XX_VPC_PERFCTR_VPC_SEL_1 0x00000e65
+
+#define REG_A5XX_VPC_PERFCTR_VPC_SEL_2 0x00000e66
+
+#define REG_A5XX_VPC_PERFCTR_VPC_SEL_3 0x00000e67
+
+#define REG_A5XX_UCHE_ADDR_MODE_CNTL 0x00000e80
+
+#define REG_A5XX_UCHE_SVM_CNTL 0x00000e82
+
+#define REG_A5XX_UCHE_WRITE_THRU_BASE_LO 0x00000e87
+
+#define REG_A5XX_UCHE_WRITE_THRU_BASE_HI 0x00000e88
+
+#define REG_A5XX_UCHE_TRAP_BASE_LO 0x00000e89
+
+#define REG_A5XX_UCHE_TRAP_BASE_HI 0x00000e8a
+
+#define REG_A5XX_UCHE_GMEM_RANGE_MIN_LO 0x00000e8b
+
+#define REG_A5XX_UCHE_GMEM_RANGE_MIN_HI 0x00000e8c
+
+#define REG_A5XX_UCHE_GMEM_RANGE_MAX_LO 0x00000e8d
+
+#define REG_A5XX_UCHE_GMEM_RANGE_MAX_HI 0x00000e8e
+
+#define REG_A5XX_UCHE_DBG_ECO_CNTL_2 0x00000e8f
+
+#define REG_A5XX_UCHE_DBG_ECO_CNTL 0x00000e90
+
+#define REG_A5XX_UCHE_CACHE_INVALIDATE_MIN_LO 0x00000e91
+
+#define REG_A5XX_UCHE_CACHE_INVALIDATE_MIN_HI 0x00000e92
+
+#define REG_A5XX_UCHE_CACHE_INVALIDATE_MAX_LO 0x00000e93
+
+#define REG_A5XX_UCHE_CACHE_INVALIDATE_MAX_HI 0x00000e94
+
+#define REG_A5XX_UCHE_CACHE_INVALIDATE 0x00000e95
+
+#define REG_A5XX_UCHE_CACHE_WAYS 0x00000e96
+
+#define REG_A5XX_UCHE_PERFCTR_UCHE_SEL_0 0x00000ea0
+
+#define REG_A5XX_UCHE_PERFCTR_UCHE_SEL_1 0x00000ea1
+
+#define REG_A5XX_UCHE_PERFCTR_UCHE_SEL_2 0x00000ea2
+
+#define REG_A5XX_UCHE_PERFCTR_UCHE_SEL_3 0x00000ea3
+
+#define REG_A5XX_UCHE_PERFCTR_UCHE_SEL_4 0x00000ea4
+
+#define REG_A5XX_UCHE_PERFCTR_UCHE_SEL_5 0x00000ea5
+
+#define REG_A5XX_UCHE_PERFCTR_UCHE_SEL_6 0x00000ea6
+
+#define REG_A5XX_UCHE_PERFCTR_UCHE_SEL_7 0x00000ea7
+
+#define REG_A5XX_UCHE_POWERCTR_UCHE_SEL_0 0x00000ea8
+
+#define REG_A5XX_UCHE_POWERCTR_UCHE_SEL_1 0x00000ea9
+
+#define REG_A5XX_UCHE_POWERCTR_UCHE_SEL_2 0x00000eaa
+
+#define REG_A5XX_UCHE_POWERCTR_UCHE_SEL_3 0x00000eab
+
+#define REG_A5XX_UCHE_TRAP_LOG_LO 0x00000eb1
+
+#define REG_A5XX_UCHE_TRAP_LOG_HI 0x00000eb2
+
+#define REG_A5XX_SP_DBG_ECO_CNTL 0x00000ec0
+
+#define REG_A5XX_SP_ADDR_MODE_CNTL 0x00000ec1
+
+#define REG_A5XX_SP_MODE_CNTL 0x00000ec2
+
+#define REG_A5XX_SP_PERFCTR_SP_SEL_0 0x00000ed0
+
+#define REG_A5XX_SP_PERFCTR_SP_SEL_1 0x00000ed1
+
+#define REG_A5XX_SP_PERFCTR_SP_SEL_2 0x00000ed2
+
+#define REG_A5XX_SP_PERFCTR_SP_SEL_3 0x00000ed3
+
+#define REG_A5XX_SP_PERFCTR_SP_SEL_4 0x00000ed4
+
+#define REG_A5XX_SP_PERFCTR_SP_SEL_5 0x00000ed5
+
+#define REG_A5XX_SP_PERFCTR_SP_SEL_6 0x00000ed6
+
+#define REG_A5XX_SP_PERFCTR_SP_SEL_7 0x00000ed7
+
+#define REG_A5XX_SP_PERFCTR_SP_SEL_8 0x00000ed8
+
+#define REG_A5XX_SP_PERFCTR_SP_SEL_9 0x00000ed9
+
+#define REG_A5XX_SP_PERFCTR_SP_SEL_10 0x00000eda
+
+#define REG_A5XX_SP_PERFCTR_SP_SEL_11 0x00000edb
+
+#define REG_A5XX_SP_POWERCTR_SP_SEL_0 0x00000edc
+
+#define REG_A5XX_SP_POWERCTR_SP_SEL_1 0x00000edd
+
+#define REG_A5XX_SP_POWERCTR_SP_SEL_2 0x00000ede
+
+#define REG_A5XX_SP_POWERCTR_SP_SEL_3 0x00000edf
+
+#define REG_A5XX_TPL1_ADDR_MODE_CNTL 0x00000f01
+
+#define REG_A5XX_TPL1_MODE_CNTL 0x00000f02
+
+#define REG_A5XX_TPL1_PERFCTR_TP_SEL_0 0x00000f10
+
+#define REG_A5XX_TPL1_PERFCTR_TP_SEL_1 0x00000f11
+
+#define REG_A5XX_TPL1_PERFCTR_TP_SEL_2 0x00000f12
+
+#define REG_A5XX_TPL1_PERFCTR_TP_SEL_3 0x00000f13
+
+#define REG_A5XX_TPL1_PERFCTR_TP_SEL_4 0x00000f14
+
+#define REG_A5XX_TPL1_PERFCTR_TP_SEL_5 0x00000f15
+
+#define REG_A5XX_TPL1_PERFCTR_TP_SEL_6 0x00000f16
+
+#define REG_A5XX_TPL1_PERFCTR_TP_SEL_7 0x00000f17
+
+#define REG_A5XX_TPL1_POWERCTR_TP_SEL_0 0x00000f18
+
+#define REG_A5XX_TPL1_POWERCTR_TP_SEL_1 0x00000f19
+
+#define REG_A5XX_TPL1_POWERCTR_TP_SEL_2 0x00000f1a
+
+#define REG_A5XX_TPL1_POWERCTR_TP_SEL_3 0x00000f1b
+
+#define REG_A5XX_VBIF_VERSION 0x00003000
+
+#define REG_A5XX_VBIF_CLKON 0x00003001
+
+#define REG_A5XX_VBIF_ABIT_SORT 0x00003028
+
+#define REG_A5XX_VBIF_ABIT_SORT_CONF 0x00003029
+
+#define REG_A5XX_VBIF_ROUND_ROBIN_QOS_ARB 0x00003049
+
+#define REG_A5XX_VBIF_GATE_OFF_WRREQ_EN 0x0000302a
+
+#define REG_A5XX_VBIF_IN_RD_LIM_CONF0 0x0000302c
+
+#define REG_A5XX_VBIF_IN_RD_LIM_CONF1 0x0000302d
+
+#define REG_A5XX_VBIF_XIN_HALT_CTRL0 0x00003080
+
+#define REG_A5XX_VBIF_XIN_HALT_CTRL1 0x00003081
+
+#define REG_A5XX_VBIF_TEST_BUS_OUT_CTRL 0x00003084
+
+#define REG_A5XX_VBIF_TEST_BUS1_CTRL0 0x00003085
+
+#define REG_A5XX_VBIF_TEST_BUS1_CTRL1 0x00003086
+
+#define REG_A5XX_VBIF_TEST_BUS2_CTRL0 0x00003087
+
+#define REG_A5XX_VBIF_TEST_BUS2_CTRL1 0x00003088
+
+#define REG_A5XX_VBIF_TEST_BUS_OUT 0x0000308c
+
+#define REG_A5XX_VBIF_PERF_CNT_SEL0 0x000030d0
+
+#define REG_A5XX_VBIF_PERF_CNT_SEL1 0x000030d1
+
+#define REG_A5XX_VBIF_PERF_CNT_SEL2 0x000030d2
+
+#define REG_A5XX_VBIF_PERF_CNT_SEL3 0x000030d3
+
+#define REG_A5XX_VBIF_PERF_CNT_LOW0 0x000030d8
+
+#define REG_A5XX_VBIF_PERF_CNT_LOW1 0x000030d9
+
+#define REG_A5XX_VBIF_PERF_CNT_LOW2 0x000030da
+
+#define REG_A5XX_VBIF_PERF_CNT_LOW3 0x000030db
+
+#define REG_A5XX_VBIF_PERF_CNT_HIGH0 0x000030e0
+
+#define REG_A5XX_VBIF_PERF_CNT_HIGH1 0x000030e1
+
+#define REG_A5XX_VBIF_PERF_CNT_HIGH2 0x000030e2
+
+#define REG_A5XX_VBIF_PERF_CNT_HIGH3 0x000030e3
+
+#define REG_A5XX_VBIF_PERF_PWR_CNT_EN0 0x00003100
+
+#define REG_A5XX_VBIF_PERF_PWR_CNT_EN1 0x00003101
+
+#define REG_A5XX_VBIF_PERF_PWR_CNT_EN2 0x00003102
+
+#define REG_A5XX_VBIF_PERF_PWR_CNT_LOW0 0x00003110
+
+#define REG_A5XX_VBIF_PERF_PWR_CNT_LOW1 0x00003111
+
+#define REG_A5XX_VBIF_PERF_PWR_CNT_LOW2 0x00003112
+
+#define REG_A5XX_VBIF_PERF_PWR_CNT_HIGH0 0x00003118
+
+#define REG_A5XX_VBIF_PERF_PWR_CNT_HIGH1 0x00003119
+
+#define REG_A5XX_VBIF_PERF_PWR_CNT_HIGH2 0x0000311a
+
+#define REG_A5XX_GPMU_INST_RAM_BASE 0x00008800
+
+#define REG_A5XX_GPMU_DATA_RAM_BASE 0x00009800
+
+#define REG_A5XX_GPMU_SP_POWER_CNTL 0x0000a881
+
+#define REG_A5XX_GPMU_RBCCU_CLOCK_CNTL 0x0000a886
+
+#define REG_A5XX_GPMU_RBCCU_POWER_CNTL 0x0000a887
+
+#define REG_A5XX_GPMU_SP_PWR_CLK_STATUS 0x0000a88b
+#define A5XX_GPMU_SP_PWR_CLK_STATUS_PWR_ON 0x00100000
+
+#define REG_A5XX_GPMU_RBCCU_PWR_CLK_STATUS 0x0000a88d
+#define A5XX_GPMU_RBCCU_PWR_CLK_STATUS_PWR_ON 0x00100000
+
+#define REG_A5XX_GPMU_PWR_COL_STAGGER_DELAY 0x0000a891
+
+#define REG_A5XX_GPMU_PWR_COL_INTER_FRAME_CTRL 0x0000a892
+
+#define REG_A5XX_GPMU_PWR_COL_INTER_FRAME_HYST 0x0000a893
+
+#define REG_A5XX_GPMU_PWR_COL_BINNING_CTRL 0x0000a894
+
+#define REG_A5XX_GPMU_CLOCK_THROTTLE_CTRL 0x0000a8a3
+
+#define REG_A5XX_GPMU_WFI_CONFIG 0x0000a8c1
+
+#define REG_A5XX_GPMU_RBBM_INTR_INFO 0x0000a8d6
+
+#define REG_A5XX_GPMU_CM3_SYSRESET 0x0000a8d8
+
+#define REG_A5XX_GPMU_GENERAL_0 0x0000a8e0
+
+#define REG_A5XX_GPMU_GENERAL_1 0x0000a8e1
+
+#define REG_A5XX_SP_POWER_COUNTER_0_LO 0x0000a840
+
+#define REG_A5XX_SP_POWER_COUNTER_0_HI 0x0000a841
+
+#define REG_A5XX_SP_POWER_COUNTER_1_LO 0x0000a842
+
+#define REG_A5XX_SP_POWER_COUNTER_1_HI 0x0000a843
+
+#define REG_A5XX_SP_POWER_COUNTER_2_LO 0x0000a844
+
+#define REG_A5XX_SP_POWER_COUNTER_2_HI 0x0000a845
+
+#define REG_A5XX_SP_POWER_COUNTER_3_LO 0x0000a846
+
+#define REG_A5XX_SP_POWER_COUNTER_3_HI 0x0000a847
+
+#define REG_A5XX_TP_POWER_COUNTER_0_LO 0x0000a848
+
+#define REG_A5XX_TP_POWER_COUNTER_0_HI 0x0000a849
+
+#define REG_A5XX_TP_POWER_COUNTER_1_LO 0x0000a84a
+
+#define REG_A5XX_TP_POWER_COUNTER_1_HI 0x0000a84b
+
+#define REG_A5XX_TP_POWER_COUNTER_2_LO 0x0000a84c
+
+#define REG_A5XX_TP_POWER_COUNTER_2_HI 0x0000a84d
+
+#define REG_A5XX_TP_POWER_COUNTER_3_LO 0x0000a84e
+
+#define REG_A5XX_TP_POWER_COUNTER_3_HI 0x0000a84f
+
+#define REG_A5XX_RB_POWER_COUNTER_0_LO 0x0000a850
+
+#define REG_A5XX_RB_POWER_COUNTER_0_HI 0x0000a851
+
+#define REG_A5XX_RB_POWER_COUNTER_1_LO 0x0000a852
+
+#define REG_A5XX_RB_POWER_COUNTER_1_HI 0x0000a853
+
+#define REG_A5XX_RB_POWER_COUNTER_2_LO 0x0000a854
+
+#define REG_A5XX_RB_POWER_COUNTER_2_HI 0x0000a855
+
+#define REG_A5XX_RB_POWER_COUNTER_3_LO 0x0000a856
+
+#define REG_A5XX_RB_POWER_COUNTER_3_HI 0x0000a857
+
+#define REG_A5XX_CCU_POWER_COUNTER_0_LO 0x0000a858
+
+#define REG_A5XX_CCU_POWER_COUNTER_0_HI 0x0000a859
+
+#define REG_A5XX_CCU_POWER_COUNTER_1_LO 0x0000a85a
+
+#define REG_A5XX_CCU_POWER_COUNTER_1_HI 0x0000a85b
+
+#define REG_A5XX_UCHE_POWER_COUNTER_0_LO 0x0000a85c
+
+#define REG_A5XX_UCHE_POWER_COUNTER_0_HI 0x0000a85d
+
+#define REG_A5XX_UCHE_POWER_COUNTER_1_LO 0x0000a85e
+
+#define REG_A5XX_UCHE_POWER_COUNTER_1_HI 0x0000a85f
+
+#define REG_A5XX_UCHE_POWER_COUNTER_2_LO 0x0000a860
+
+#define REG_A5XX_UCHE_POWER_COUNTER_2_HI 0x0000a861
+
+#define REG_A5XX_UCHE_POWER_COUNTER_3_LO 0x0000a862
+
+#define REG_A5XX_UCHE_POWER_COUNTER_3_HI 0x0000a863
+
+#define REG_A5XX_CP_POWER_COUNTER_0_LO 0x0000a864
+
+#define REG_A5XX_CP_POWER_COUNTER_0_HI 0x0000a865
+
+#define REG_A5XX_CP_POWER_COUNTER_1_LO 0x0000a866
+
+#define REG_A5XX_CP_POWER_COUNTER_1_HI 0x0000a867
+
+#define REG_A5XX_CP_POWER_COUNTER_2_LO 0x0000a868
+
+#define REG_A5XX_CP_POWER_COUNTER_2_HI 0x0000a869
+
+#define REG_A5XX_CP_POWER_COUNTER_3_LO 0x0000a86a
+
+#define REG_A5XX_CP_POWER_COUNTER_3_HI 0x0000a86b
+
+#define REG_A5XX_GPMU_POWER_COUNTER_0_LO 0x0000a86c
+
+#define REG_A5XX_GPMU_POWER_COUNTER_0_HI 0x0000a86d
+
+#define REG_A5XX_GPMU_POWER_COUNTER_1_LO 0x0000a86e
+
+#define REG_A5XX_GPMU_POWER_COUNTER_1_HI 0x0000a86f
+
+#define REG_A5XX_GPMU_POWER_COUNTER_2_LO 0x0000a870
+
+#define REG_A5XX_GPMU_POWER_COUNTER_2_HI 0x0000a871
+
+#define REG_A5XX_GPMU_POWER_COUNTER_3_LO 0x0000a872
+
+#define REG_A5XX_GPMU_POWER_COUNTER_3_HI 0x0000a873
+
+#define REG_A5XX_GPMU_POWER_COUNTER_4_LO 0x0000a874
+
+#define REG_A5XX_GPMU_POWER_COUNTER_4_HI 0x0000a875
+
+#define REG_A5XX_GPMU_POWER_COUNTER_5_LO 0x0000a876
+
+#define REG_A5XX_GPMU_POWER_COUNTER_5_HI 0x0000a877
+
+#define REG_A5XX_GPMU_POWER_COUNTER_ENABLE 0x0000a878
+
+#define REG_A5XX_GPMU_ALWAYS_ON_COUNTER_LO 0x0000a879
+
+#define REG_A5XX_GPMU_ALWAYS_ON_COUNTER_HI 0x0000a87a
+
+#define REG_A5XX_GPMU_ALWAYS_ON_COUNTER_RESET 0x0000a87b
+
+#define REG_A5XX_GPMU_POWER_COUNTER_SELECT_0 0x0000a87c
+
+#define REG_A5XX_GPMU_POWER_COUNTER_SELECT_1 0x0000a87d
+
+#define REG_A5XX_GPMU_CLOCK_THROTTLE_CTRL 0x0000a8a3
+
+#define REG_A5XX_GPMU_THROTTLE_UNMASK_FORCE_CTRL 0x0000a8a8
+
+#define REG_A5XX_GPMU_TEMP_SENSOR_ID 0x0000ac00
+
+#define REG_A5XX_GPMU_TEMP_SENSOR_CONFIG 0x0000ac01
+
+#define REG_A5XX_GPMU_TEMP_VAL 0x0000ac02
+
+#define REG_A5XX_GPMU_DELTA_TEMP_THRESHOLD 0x0000ac03
+
+#define REG_A5XX_GPMU_TEMP_THRESHOLD_INTR_STATUS 0x0000ac05
+
+#define REG_A5XX_GPMU_TEMP_THRESHOLD_INTR_EN_MASK 0x0000ac06
+
+#define REG_A5XX_GPMU_LEAKAGE_TEMP_COEFF_0_1 0x0000ac40
+
+#define REG_A5XX_GPMU_LEAKAGE_TEMP_COEFF_2_3 0x0000ac41
+
+#define REG_A5XX_GPMU_LEAKAGE_VTG_COEFF_0_1 0x0000ac42
+
+#define REG_A5XX_GPMU_LEAKAGE_VTG_COEFF_2_3 0x0000ac43
+
+#define REG_A5XX_GPMU_BASE_LEAKAGE 0x0000ac46
+
+#define REG_A5XX_GPMU_GPMU_VOLTAGE 0x0000ac60
+
+#define REG_A5XX_GPMU_GPMU_VOLTAGE_INTR_STATUS 0x0000ac61
+
+#define REG_A5XX_GPMU_GPMU_VOLTAGE_INTR_EN_MASK 0x0000ac62
+
+#define REG_A5XX_GPMU_GPMU_PWR_THRESHOLD 0x0000ac80
+
+#define REG_A5XX_GPMU_GPMU_LLM_GLM_SLEEP_CTRL 0x0000acc4
+
+#define REG_A5XX_GPMU_GPMU_LLM_GLM_SLEEP_STATUS 0x0000acc5
+
+#define REG_A5XX_GDPM_CONFIG1 0x0000b80c
+
+#define REG_A5XX_GDPM_CONFIG2 0x0000b80d
+
+#define REG_A5XX_GDPM_INT_EN 0x0000b80f
+
+#define REG_A5XX_GDPM_INT_MASK 0x0000b811
+
+#define REG_A5XX_GPMU_BEC_ENABLE 0x0000b9a0
+
+#define REG_A5XX_GPU_CS_SENSOR_GENERAL_STATUS 0x0000c41a
+
+#define REG_A5XX_GPU_CS_AMP_CALIBRATION_STATUS1_0 0x0000c41d
+
+#define REG_A5XX_GPU_CS_AMP_CALIBRATION_STATUS1_2 0x0000c41f
+
+#define REG_A5XX_GPU_CS_AMP_CALIBRATION_STATUS1_4 0x0000c421
+
+#define REG_A5XX_GPU_CS_ENABLE_REG 0x0000c520
+
+#define REG_A5XX_GPU_CS_AMP_CALIBRATION_CONTROL1 0x0000c557
+
+#define REG_A5XX_GRAS_CL_CNTL 0x0000e000
+
+#define REG_A5XX_UNKNOWN_E001 0x0000e001
+
+#define REG_A5XX_UNKNOWN_E004 0x0000e004
+
+#define REG_A5XX_GRAS_CNTL 0x0000e005
+#define A5XX_GRAS_CNTL_VARYING 0x00000001
+
+#define REG_A5XX_GRAS_CL_GUARDBAND_CLIP_ADJ 0x0000e006
+#define A5XX_GRAS_CL_GUARDBAND_CLIP_ADJ_HORZ__MASK 0x000003ff
+#define A5XX_GRAS_CL_GUARDBAND_CLIP_ADJ_HORZ__SHIFT 0
+static inline uint32_t A5XX_GRAS_CL_GUARDBAND_CLIP_ADJ_HORZ(uint32_t val)
+{
+ return ((val) << A5XX_GRAS_CL_GUARDBAND_CLIP_ADJ_HORZ__SHIFT) & A5XX_GRAS_CL_GUARDBAND_CLIP_ADJ_HORZ__MASK;
+}
+#define A5XX_GRAS_CL_GUARDBAND_CLIP_ADJ_VERT__MASK 0x000ffc00
+#define A5XX_GRAS_CL_GUARDBAND_CLIP_ADJ_VERT__SHIFT 10
+static inline uint32_t A5XX_GRAS_CL_GUARDBAND_CLIP_ADJ_VERT(uint32_t val)
+{
+ return ((val) << A5XX_GRAS_CL_GUARDBAND_CLIP_ADJ_VERT__SHIFT) & A5XX_GRAS_CL_GUARDBAND_CLIP_ADJ_VERT__MASK;
+}
+
+#define REG_A5XX_GRAS_CL_VPORT_XOFFSET_0 0x0000e010
+#define A5XX_GRAS_CL_VPORT_XOFFSET_0__MASK 0xffffffff
+#define A5XX_GRAS_CL_VPORT_XOFFSET_0__SHIFT 0
+static inline uint32_t A5XX_GRAS_CL_VPORT_XOFFSET_0(float val)
+{
+ return ((fui(val)) << A5XX_GRAS_CL_VPORT_XOFFSET_0__SHIFT) & A5XX_GRAS_CL_VPORT_XOFFSET_0__MASK;
+}
+
+#define REG_A5XX_GRAS_CL_VPORT_XSCALE_0 0x0000e011
+#define A5XX_GRAS_CL_VPORT_XSCALE_0__MASK 0xffffffff
+#define A5XX_GRAS_CL_VPORT_XSCALE_0__SHIFT 0
+static inline uint32_t A5XX_GRAS_CL_VPORT_XSCALE_0(float val)
+{
+ return ((fui(val)) << A5XX_GRAS_CL_VPORT_XSCALE_0__SHIFT) & A5XX_GRAS_CL_VPORT_XSCALE_0__MASK;
+}
+
+#define REG_A5XX_GRAS_CL_VPORT_YOFFSET_0 0x0000e012
+#define A5XX_GRAS_CL_VPORT_YOFFSET_0__MASK 0xffffffff
+#define A5XX_GRAS_CL_VPORT_YOFFSET_0__SHIFT 0
+static inline uint32_t A5XX_GRAS_CL_VPORT_YOFFSET_0(float val)
+{
+ return ((fui(val)) << A5XX_GRAS_CL_VPORT_YOFFSET_0__SHIFT) & A5XX_GRAS_CL_VPORT_YOFFSET_0__MASK;
+}
+
+#define REG_A5XX_GRAS_CL_VPORT_YSCALE_0 0x0000e013
+#define A5XX_GRAS_CL_VPORT_YSCALE_0__MASK 0xffffffff
+#define A5XX_GRAS_CL_VPORT_YSCALE_0__SHIFT 0
+static inline uint32_t A5XX_GRAS_CL_VPORT_YSCALE_0(float val)
+{
+ return ((fui(val)) << A5XX_GRAS_CL_VPORT_YSCALE_0__SHIFT) & A5XX_GRAS_CL_VPORT_YSCALE_0__MASK;
+}
+
+#define REG_A5XX_GRAS_CL_VPORT_ZOFFSET_0 0x0000e014
+#define A5XX_GRAS_CL_VPORT_ZOFFSET_0__MASK 0xffffffff
+#define A5XX_GRAS_CL_VPORT_ZOFFSET_0__SHIFT 0
+static inline uint32_t A5XX_GRAS_CL_VPORT_ZOFFSET_0(float val)
+{
+ return ((fui(val)) << A5XX_GRAS_CL_VPORT_ZOFFSET_0__SHIFT) & A5XX_GRAS_CL_VPORT_ZOFFSET_0__MASK;
+}
+
+#define REG_A5XX_GRAS_CL_VPORT_ZSCALE_0 0x0000e015
+#define A5XX_GRAS_CL_VPORT_ZSCALE_0__MASK 0xffffffff
+#define A5XX_GRAS_CL_VPORT_ZSCALE_0__SHIFT 0
+static inline uint32_t A5XX_GRAS_CL_VPORT_ZSCALE_0(float val)
+{
+ return ((fui(val)) << A5XX_GRAS_CL_VPORT_ZSCALE_0__SHIFT) & A5XX_GRAS_CL_VPORT_ZSCALE_0__MASK;
+}
+
+#define REG_A5XX_GRAS_SU_CNTL 0x0000e090
+#define A5XX_GRAS_SU_CNTL_FRONT_CW 0x00000004
+#define A5XX_GRAS_SU_CNTL_LINEHALFWIDTH__MASK 0x000007f8
+#define A5XX_GRAS_SU_CNTL_LINEHALFWIDTH__SHIFT 3
+static inline uint32_t A5XX_GRAS_SU_CNTL_LINEHALFWIDTH(float val)
+{
+ return ((((int32_t)(val * 4.0))) << A5XX_GRAS_SU_CNTL_LINEHALFWIDTH__SHIFT) & A5XX_GRAS_SU_CNTL_LINEHALFWIDTH__MASK;
+}
+#define A5XX_GRAS_SU_CNTL_POLY_OFFSET 0x00000800
+#define A5XX_GRAS_SU_CNTL_MSAA_ENABLE 0x00002000
+
+#define REG_A5XX_GRAS_SU_POINT_MINMAX 0x0000e091
+#define A5XX_GRAS_SU_POINT_MINMAX_MIN__MASK 0x0000ffff
+#define A5XX_GRAS_SU_POINT_MINMAX_MIN__SHIFT 0
+static inline uint32_t A5XX_GRAS_SU_POINT_MINMAX_MIN(float val)
+{
+ return ((((uint32_t)(val * 16.0))) << A5XX_GRAS_SU_POINT_MINMAX_MIN__SHIFT) & A5XX_GRAS_SU_POINT_MINMAX_MIN__MASK;
+}
+#define A5XX_GRAS_SU_POINT_MINMAX_MAX__MASK 0xffff0000
+#define A5XX_GRAS_SU_POINT_MINMAX_MAX__SHIFT 16
+static inline uint32_t A5XX_GRAS_SU_POINT_MINMAX_MAX(float val)
+{
+ return ((((uint32_t)(val * 16.0))) << A5XX_GRAS_SU_POINT_MINMAX_MAX__SHIFT) & A5XX_GRAS_SU_POINT_MINMAX_MAX__MASK;
+}
+
+#define REG_A5XX_GRAS_SU_POINT_SIZE 0x0000e092
+#define A5XX_GRAS_SU_POINT_SIZE__MASK 0xffffffff
+#define A5XX_GRAS_SU_POINT_SIZE__SHIFT 0
+static inline uint32_t A5XX_GRAS_SU_POINT_SIZE(float val)
+{
+ return ((((int32_t)(val * 16.0))) << A5XX_GRAS_SU_POINT_SIZE__SHIFT) & A5XX_GRAS_SU_POINT_SIZE__MASK;
+}
+
+#define REG_A5XX_UNKNOWN_E093 0x0000e093
+
+#define REG_A5XX_GRAS_SU_DEPTH_PLANE_CNTL 0x0000e094
+#define A5XX_GRAS_SU_DEPTH_PLANE_CNTL_ALPHA_TEST_ENABLE 0x00000001
+
+#define REG_A5XX_GRAS_SU_POLY_OFFSET_SCALE 0x0000e095
+#define A5XX_GRAS_SU_POLY_OFFSET_SCALE__MASK 0xffffffff
+#define A5XX_GRAS_SU_POLY_OFFSET_SCALE__SHIFT 0
+static inline uint32_t A5XX_GRAS_SU_POLY_OFFSET_SCALE(float val)
+{
+ return ((fui(val)) << A5XX_GRAS_SU_POLY_OFFSET_SCALE__SHIFT) & A5XX_GRAS_SU_POLY_OFFSET_SCALE__MASK;
+}
+
+#define REG_A5XX_GRAS_SU_POLY_OFFSET_OFFSET 0x0000e096
+#define A5XX_GRAS_SU_POLY_OFFSET_OFFSET__MASK 0xffffffff
+#define A5XX_GRAS_SU_POLY_OFFSET_OFFSET__SHIFT 0
+static inline uint32_t A5XX_GRAS_SU_POLY_OFFSET_OFFSET(float val)
+{
+ return ((fui(val)) << A5XX_GRAS_SU_POLY_OFFSET_OFFSET__SHIFT) & A5XX_GRAS_SU_POLY_OFFSET_OFFSET__MASK;
+}
+
+#define REG_A5XX_GRAS_SU_POLY_OFFSET_OFFSET_CLAMP 0x0000e097
+#define A5XX_GRAS_SU_POLY_OFFSET_OFFSET_CLAMP__MASK 0xffffffff
+#define A5XX_GRAS_SU_POLY_OFFSET_OFFSET_CLAMP__SHIFT 0
+static inline uint32_t A5XX_GRAS_SU_POLY_OFFSET_OFFSET_CLAMP(float val)
+{
+ return ((fui(val)) << A5XX_GRAS_SU_POLY_OFFSET_OFFSET_CLAMP__SHIFT) & A5XX_GRAS_SU_POLY_OFFSET_OFFSET_CLAMP__MASK;
+}
+
+#define REG_A5XX_GRAS_SU_DEPTH_BUFFER_INFO 0x0000e098
+#define A5XX_GRAS_SU_DEPTH_BUFFER_INFO_DEPTH_FORMAT__MASK 0x00000007
+#define A5XX_GRAS_SU_DEPTH_BUFFER_INFO_DEPTH_FORMAT__SHIFT 0
+static inline uint32_t A5XX_GRAS_SU_DEPTH_BUFFER_INFO_DEPTH_FORMAT(enum a5xx_depth_format val)
+{
+ return ((val) << A5XX_GRAS_SU_DEPTH_BUFFER_INFO_DEPTH_FORMAT__SHIFT) & A5XX_GRAS_SU_DEPTH_BUFFER_INFO_DEPTH_FORMAT__MASK;
+}
+
+#define REG_A5XX_GRAS_SU_CONSERVATIVE_RAS_CNTL 0x0000e099
+
+#define REG_A5XX_GRAS_SC_CNTL 0x0000e0a0
+#define A5XX_GRAS_SC_CNTL_SAMPLES_PASSED 0x00008000
+
+#define REG_A5XX_GRAS_SC_BIN_CNTL 0x0000e0a1
+
+#define REG_A5XX_GRAS_SC_RAS_MSAA_CNTL 0x0000e0a2
+#define A5XX_GRAS_SC_RAS_MSAA_CNTL_SAMPLES__MASK 0x00000003
+#define A5XX_GRAS_SC_RAS_MSAA_CNTL_SAMPLES__SHIFT 0
+static inline uint32_t A5XX_GRAS_SC_RAS_MSAA_CNTL_SAMPLES(enum a3xx_msaa_samples val)
+{
+ return ((val) << A5XX_GRAS_SC_RAS_MSAA_CNTL_SAMPLES__SHIFT) & A5XX_GRAS_SC_RAS_MSAA_CNTL_SAMPLES__MASK;
+}
+
+#define REG_A5XX_GRAS_SC_DEST_MSAA_CNTL 0x0000e0a3
+#define A5XX_GRAS_SC_DEST_MSAA_CNTL_SAMPLES__MASK 0x00000003
+#define A5XX_GRAS_SC_DEST_MSAA_CNTL_SAMPLES__SHIFT 0
+static inline uint32_t A5XX_GRAS_SC_DEST_MSAA_CNTL_SAMPLES(enum a3xx_msaa_samples val)
+{
+ return ((val) << A5XX_GRAS_SC_DEST_MSAA_CNTL_SAMPLES__SHIFT) & A5XX_GRAS_SC_DEST_MSAA_CNTL_SAMPLES__MASK;
+}
+#define A5XX_GRAS_SC_DEST_MSAA_CNTL_MSAA_DISABLE 0x00000004
+
+#define REG_A5XX_GRAS_SC_SCREEN_SCISSOR_CNTL 0x0000e0a4
+
+#define REG_A5XX_GRAS_SC_SCREEN_SCISSOR_TL_0 0x0000e0aa
+#define A5XX_GRAS_SC_SCREEN_SCISSOR_TL_0_WINDOW_OFFSET_DISABLE 0x80000000
+#define A5XX_GRAS_SC_SCREEN_SCISSOR_TL_0_X__MASK 0x00007fff
+#define A5XX_GRAS_SC_SCREEN_SCISSOR_TL_0_X__SHIFT 0
+static inline uint32_t A5XX_GRAS_SC_SCREEN_SCISSOR_TL_0_X(uint32_t val)
+{
+ return ((val) << A5XX_GRAS_SC_SCREEN_SCISSOR_TL_0_X__SHIFT) & A5XX_GRAS_SC_SCREEN_SCISSOR_TL_0_X__MASK;
+}
+#define A5XX_GRAS_SC_SCREEN_SCISSOR_TL_0_Y__MASK 0x7fff0000
+#define A5XX_GRAS_SC_SCREEN_SCISSOR_TL_0_Y__SHIFT 16
+static inline uint32_t A5XX_GRAS_SC_SCREEN_SCISSOR_TL_0_Y(uint32_t val)
+{
+ return ((val) << A5XX_GRAS_SC_SCREEN_SCISSOR_TL_0_Y__SHIFT) & A5XX_GRAS_SC_SCREEN_SCISSOR_TL_0_Y__MASK;
+}
+
+#define REG_A5XX_GRAS_SC_SCREEN_SCISSOR_BR_0 0x0000e0ab
+#define A5XX_GRAS_SC_SCREEN_SCISSOR_BR_0_WINDOW_OFFSET_DISABLE 0x80000000
+#define A5XX_GRAS_SC_SCREEN_SCISSOR_BR_0_X__MASK 0x00007fff
+#define A5XX_GRAS_SC_SCREEN_SCISSOR_BR_0_X__SHIFT 0
+static inline uint32_t A5XX_GRAS_SC_SCREEN_SCISSOR_BR_0_X(uint32_t val)
+{
+ return ((val) << A5XX_GRAS_SC_SCREEN_SCISSOR_BR_0_X__SHIFT) & A5XX_GRAS_SC_SCREEN_SCISSOR_BR_0_X__MASK;
+}
+#define A5XX_GRAS_SC_SCREEN_SCISSOR_BR_0_Y__MASK 0x7fff0000
+#define A5XX_GRAS_SC_SCREEN_SCISSOR_BR_0_Y__SHIFT 16
+static inline uint32_t A5XX_GRAS_SC_SCREEN_SCISSOR_BR_0_Y(uint32_t val)
+{
+ return ((val) << A5XX_GRAS_SC_SCREEN_SCISSOR_BR_0_Y__SHIFT) & A5XX_GRAS_SC_SCREEN_SCISSOR_BR_0_Y__MASK;
+}
+
+#define REG_A5XX_GRAS_SC_VIEWPORT_SCISSOR_TL_0 0x0000e0ca
+#define A5XX_GRAS_SC_VIEWPORT_SCISSOR_TL_0_WINDOW_OFFSET_DISABLE 0x80000000
+#define A5XX_GRAS_SC_VIEWPORT_SCISSOR_TL_0_X__MASK 0x00007fff
+#define A5XX_GRAS_SC_VIEWPORT_SCISSOR_TL_0_X__SHIFT 0
+static inline uint32_t A5XX_GRAS_SC_VIEWPORT_SCISSOR_TL_0_X(uint32_t val)
+{
+ return ((val) << A5XX_GRAS_SC_VIEWPORT_SCISSOR_TL_0_X__SHIFT) & A5XX_GRAS_SC_VIEWPORT_SCISSOR_TL_0_X__MASK;
+}
+#define A5XX_GRAS_SC_VIEWPORT_SCISSOR_TL_0_Y__MASK 0x7fff0000
+#define A5XX_GRAS_SC_VIEWPORT_SCISSOR_TL_0_Y__SHIFT 16
+static inline uint32_t A5XX_GRAS_SC_VIEWPORT_SCISSOR_TL_0_Y(uint32_t val)
+{
+ return ((val) << A5XX_GRAS_SC_VIEWPORT_SCISSOR_TL_0_Y__SHIFT) & A5XX_GRAS_SC_VIEWPORT_SCISSOR_TL_0_Y__MASK;
+}
+
+#define REG_A5XX_GRAS_SC_VIEWPORT_SCISSOR_BR_0 0x0000e0cb
+#define A5XX_GRAS_SC_VIEWPORT_SCISSOR_BR_0_WINDOW_OFFSET_DISABLE 0x80000000
+#define A5XX_GRAS_SC_VIEWPORT_SCISSOR_BR_0_X__MASK 0x00007fff
+#define A5XX_GRAS_SC_VIEWPORT_SCISSOR_BR_0_X__SHIFT 0
+static inline uint32_t A5XX_GRAS_SC_VIEWPORT_SCISSOR_BR_0_X(uint32_t val)
+{
+ return ((val) << A5XX_GRAS_SC_VIEWPORT_SCISSOR_BR_0_X__SHIFT) & A5XX_GRAS_SC_VIEWPORT_SCISSOR_BR_0_X__MASK;
+}
+#define A5XX_GRAS_SC_VIEWPORT_SCISSOR_BR_0_Y__MASK 0x7fff0000
+#define A5XX_GRAS_SC_VIEWPORT_SCISSOR_BR_0_Y__SHIFT 16
+static inline uint32_t A5XX_GRAS_SC_VIEWPORT_SCISSOR_BR_0_Y(uint32_t val)
+{
+ return ((val) << A5XX_GRAS_SC_VIEWPORT_SCISSOR_BR_0_Y__SHIFT) & A5XX_GRAS_SC_VIEWPORT_SCISSOR_BR_0_Y__MASK;
+}
+
+#define REG_A5XX_GRAS_SC_WINDOW_SCISSOR_TL 0x0000e0ea
+#define A5XX_GRAS_SC_WINDOW_SCISSOR_TL_WINDOW_OFFSET_DISABLE 0x80000000
+#define A5XX_GRAS_SC_WINDOW_SCISSOR_TL_X__MASK 0x00007fff
+#define A5XX_GRAS_SC_WINDOW_SCISSOR_TL_X__SHIFT 0
+static inline uint32_t A5XX_GRAS_SC_WINDOW_SCISSOR_TL_X(uint32_t val)
+{
+ return ((val) << A5XX_GRAS_SC_WINDOW_SCISSOR_TL_X__SHIFT) & A5XX_GRAS_SC_WINDOW_SCISSOR_TL_X__MASK;
+}
+#define A5XX_GRAS_SC_WINDOW_SCISSOR_TL_Y__MASK 0x7fff0000
+#define A5XX_GRAS_SC_WINDOW_SCISSOR_TL_Y__SHIFT 16
+static inline uint32_t A5XX_GRAS_SC_WINDOW_SCISSOR_TL_Y(uint32_t val)
+{
+ return ((val) << A5XX_GRAS_SC_WINDOW_SCISSOR_TL_Y__SHIFT) & A5XX_GRAS_SC_WINDOW_SCISSOR_TL_Y__MASK;
+}
+
+#define REG_A5XX_GRAS_SC_WINDOW_SCISSOR_BR 0x0000e0eb
+#define A5XX_GRAS_SC_WINDOW_SCISSOR_BR_WINDOW_OFFSET_DISABLE 0x80000000
+#define A5XX_GRAS_SC_WINDOW_SCISSOR_BR_X__MASK 0x00007fff
+#define A5XX_GRAS_SC_WINDOW_SCISSOR_BR_X__SHIFT 0
+static inline uint32_t A5XX_GRAS_SC_WINDOW_SCISSOR_BR_X(uint32_t val)
+{
+ return ((val) << A5XX_GRAS_SC_WINDOW_SCISSOR_BR_X__SHIFT) & A5XX_GRAS_SC_WINDOW_SCISSOR_BR_X__MASK;
+}
+#define A5XX_GRAS_SC_WINDOW_SCISSOR_BR_Y__MASK 0x7fff0000
+#define A5XX_GRAS_SC_WINDOW_SCISSOR_BR_Y__SHIFT 16
+static inline uint32_t A5XX_GRAS_SC_WINDOW_SCISSOR_BR_Y(uint32_t val)
+{
+ return ((val) << A5XX_GRAS_SC_WINDOW_SCISSOR_BR_Y__SHIFT) & A5XX_GRAS_SC_WINDOW_SCISSOR_BR_Y__MASK;
+}
+
+#define REG_A5XX_GRAS_LRZ_CNTL 0x0000e100
+
+#define REG_A5XX_GRAS_LRZ_BUFFER_BASE_LO 0x0000e101
+
+#define REG_A5XX_GRAS_LRZ_BUFFER_BASE_HI 0x0000e102
+
+#define REG_A5XX_GRAS_LRZ_BUFFER_PITCH 0x0000e103
+
+#define REG_A5XX_GRAS_LRZ_FAST_CLEAR_BUFFER_BASE_LO 0x0000e104
+
+#define REG_A5XX_GRAS_LRZ_FAST_CLEAR_BUFFER_BASE_HI 0x0000e105
+
+#define REG_A5XX_RB_CNTL 0x0000e140
+#define A5XX_RB_CNTL_WIDTH__MASK 0x000000ff
+#define A5XX_RB_CNTL_WIDTH__SHIFT 0
+static inline uint32_t A5XX_RB_CNTL_WIDTH(uint32_t val)
+{
+ return ((val >> 5) << A5XX_RB_CNTL_WIDTH__SHIFT) & A5XX_RB_CNTL_WIDTH__MASK;
+}
+#define A5XX_RB_CNTL_HEIGHT__MASK 0x0001fe00
+#define A5XX_RB_CNTL_HEIGHT__SHIFT 9
+static inline uint32_t A5XX_RB_CNTL_HEIGHT(uint32_t val)
+{
+ return ((val >> 5) << A5XX_RB_CNTL_HEIGHT__SHIFT) & A5XX_RB_CNTL_HEIGHT__MASK;
+}
+#define A5XX_RB_CNTL_BYPASS 0x00020000
+
+#define REG_A5XX_RB_RENDER_CNTL 0x0000e141
+#define A5XX_RB_RENDER_CNTL_SAMPLES_PASSED 0x00000040
+#define A5XX_RB_RENDER_CNTL_FLAG_DEPTH 0x00004000
+#define A5XX_RB_RENDER_CNTL_FLAG_DEPTH2 0x00008000
+#define A5XX_RB_RENDER_CNTL_FLAG_MRTS__MASK 0x00ff0000
+#define A5XX_RB_RENDER_CNTL_FLAG_MRTS__SHIFT 16
+static inline uint32_t A5XX_RB_RENDER_CNTL_FLAG_MRTS(uint32_t val)
+{
+ return ((val) << A5XX_RB_RENDER_CNTL_FLAG_MRTS__SHIFT) & A5XX_RB_RENDER_CNTL_FLAG_MRTS__MASK;
+}
+#define A5XX_RB_RENDER_CNTL_FLAG_MRTS2__MASK 0xff000000
+#define A5XX_RB_RENDER_CNTL_FLAG_MRTS2__SHIFT 24
+static inline uint32_t A5XX_RB_RENDER_CNTL_FLAG_MRTS2(uint32_t val)
+{
+ return ((val) << A5XX_RB_RENDER_CNTL_FLAG_MRTS2__SHIFT) & A5XX_RB_RENDER_CNTL_FLAG_MRTS2__MASK;
+}
+
+#define REG_A5XX_RB_RAS_MSAA_CNTL 0x0000e142
+#define A5XX_RB_RAS_MSAA_CNTL_SAMPLES__MASK 0x00000003
+#define A5XX_RB_RAS_MSAA_CNTL_SAMPLES__SHIFT 0
+static inline uint32_t A5XX_RB_RAS_MSAA_CNTL_SAMPLES(enum a3xx_msaa_samples val)
+{
+ return ((val) << A5XX_RB_RAS_MSAA_CNTL_SAMPLES__SHIFT) & A5XX_RB_RAS_MSAA_CNTL_SAMPLES__MASK;
+}
+
+#define REG_A5XX_RB_DEST_MSAA_CNTL 0x0000e143
+#define A5XX_RB_DEST_MSAA_CNTL_SAMPLES__MASK 0x00000003
+#define A5XX_RB_DEST_MSAA_CNTL_SAMPLES__SHIFT 0
+static inline uint32_t A5XX_RB_DEST_MSAA_CNTL_SAMPLES(enum a3xx_msaa_samples val)
+{
+ return ((val) << A5XX_RB_DEST_MSAA_CNTL_SAMPLES__SHIFT) & A5XX_RB_DEST_MSAA_CNTL_SAMPLES__MASK;
+}
+#define A5XX_RB_DEST_MSAA_CNTL_MSAA_DISABLE 0x00000004
+
+#define REG_A5XX_RB_RENDER_CONTROL0 0x0000e144
+#define A5XX_RB_RENDER_CONTROL0_VARYING 0x00000001
+#define A5XX_RB_RENDER_CONTROL0_XCOORD 0x00000040
+#define A5XX_RB_RENDER_CONTROL0_YCOORD 0x00000080
+#define A5XX_RB_RENDER_CONTROL0_ZCOORD 0x00000100
+#define A5XX_RB_RENDER_CONTROL0_WCOORD 0x00000200
+
+#define REG_A5XX_RB_RENDER_CONTROL1 0x0000e145
+#define A5XX_RB_RENDER_CONTROL1_FACENESS 0x00000002
+
+#define REG_A5XX_RB_FS_OUTPUT_CNTL 0x0000e146
+#define A5XX_RB_FS_OUTPUT_CNTL_MRT__MASK 0x0000000f
+#define A5XX_RB_FS_OUTPUT_CNTL_MRT__SHIFT 0
+static inline uint32_t A5XX_RB_FS_OUTPUT_CNTL_MRT(uint32_t val)
+{
+ return ((val) << A5XX_RB_FS_OUTPUT_CNTL_MRT__SHIFT) & A5XX_RB_FS_OUTPUT_CNTL_MRT__MASK;
+}
+#define A5XX_RB_FS_OUTPUT_CNTL_FRAG_WRITES_Z 0x00000020
+
+#define REG_A5XX_RB_RENDER_COMPONENTS 0x0000e147
+#define A5XX_RB_RENDER_COMPONENTS_RT0__MASK 0x0000000f
+#define A5XX_RB_RENDER_COMPONENTS_RT0__SHIFT 0
+static inline uint32_t A5XX_RB_RENDER_COMPONENTS_RT0(uint32_t val)
+{
+ return ((val) << A5XX_RB_RENDER_COMPONENTS_RT0__SHIFT) & A5XX_RB_RENDER_COMPONENTS_RT0__MASK;
+}
+#define A5XX_RB_RENDER_COMPONENTS_RT1__MASK 0x000000f0
+#define A5XX_RB_RENDER_COMPONENTS_RT1__SHIFT 4
+static inline uint32_t A5XX_RB_RENDER_COMPONENTS_RT1(uint32_t val)
+{
+ return ((val) << A5XX_RB_RENDER_COMPONENTS_RT1__SHIFT) & A5XX_RB_RENDER_COMPONENTS_RT1__MASK;
+}
+#define A5XX_RB_RENDER_COMPONENTS_RT2__MASK 0x00000f00
+#define A5XX_RB_RENDER_COMPONENTS_RT2__SHIFT 8
+static inline uint32_t A5XX_RB_RENDER_COMPONENTS_RT2(uint32_t val)
+{
+ return ((val) << A5XX_RB_RENDER_COMPONENTS_RT2__SHIFT) & A5XX_RB_RENDER_COMPONENTS_RT2__MASK;
+}
+#define A5XX_RB_RENDER_COMPONENTS_RT3__MASK 0x0000f000
+#define A5XX_RB_RENDER_COMPONENTS_RT3__SHIFT 12
+static inline uint32_t A5XX_RB_RENDER_COMPONENTS_RT3(uint32_t val)
+{
+ return ((val) << A5XX_RB_RENDER_COMPONENTS_RT3__SHIFT) & A5XX_RB_RENDER_COMPONENTS_RT3__MASK;
+}
+#define A5XX_RB_RENDER_COMPONENTS_RT4__MASK 0x000f0000
+#define A5XX_RB_RENDER_COMPONENTS_RT4__SHIFT 16
+static inline uint32_t A5XX_RB_RENDER_COMPONENTS_RT4(uint32_t val)
+{
+ return ((val) << A5XX_RB_RENDER_COMPONENTS_RT4__SHIFT) & A5XX_RB_RENDER_COMPONENTS_RT4__MASK;
+}
+#define A5XX_RB_RENDER_COMPONENTS_RT5__MASK 0x00f00000
+#define A5XX_RB_RENDER_COMPONENTS_RT5__SHIFT 20
+static inline uint32_t A5XX_RB_RENDER_COMPONENTS_RT5(uint32_t val)
+{
+ return ((val) << A5XX_RB_RENDER_COMPONENTS_RT5__SHIFT) & A5XX_RB_RENDER_COMPONENTS_RT5__MASK;
+}
+#define A5XX_RB_RENDER_COMPONENTS_RT6__MASK 0x0f000000
+#define A5XX_RB_RENDER_COMPONENTS_RT6__SHIFT 24
+static inline uint32_t A5XX_RB_RENDER_COMPONENTS_RT6(uint32_t val)
+{
+ return ((val) << A5XX_RB_RENDER_COMPONENTS_RT6__SHIFT) & A5XX_RB_RENDER_COMPONENTS_RT6__MASK;
+}
+#define A5XX_RB_RENDER_COMPONENTS_RT7__MASK 0xf0000000
+#define A5XX_RB_RENDER_COMPONENTS_RT7__SHIFT 28
+static inline uint32_t A5XX_RB_RENDER_COMPONENTS_RT7(uint32_t val)
+{
+ return ((val) << A5XX_RB_RENDER_COMPONENTS_RT7__SHIFT) & A5XX_RB_RENDER_COMPONENTS_RT7__MASK;
+}
+
+static inline uint32_t REG_A5XX_RB_MRT(uint32_t i0) { return 0x0000e150 + 0x7*i0; }
+
+static inline uint32_t REG_A5XX_RB_MRT_CONTROL(uint32_t i0) { return 0x0000e150 + 0x7*i0; }
+#define A5XX_RB_MRT_CONTROL_BLEND 0x00000001
+#define A5XX_RB_MRT_CONTROL_BLEND2 0x00000002
+#define A5XX_RB_MRT_CONTROL_COMPONENT_ENABLE__MASK 0x00000780
+#define A5XX_RB_MRT_CONTROL_COMPONENT_ENABLE__SHIFT 7
+static inline uint32_t A5XX_RB_MRT_CONTROL_COMPONENT_ENABLE(uint32_t val)
+{
+ return ((val) << A5XX_RB_MRT_CONTROL_COMPONENT_ENABLE__SHIFT) & A5XX_RB_MRT_CONTROL_COMPONENT_ENABLE__MASK;
+}
+
+static inline uint32_t REG_A5XX_RB_MRT_BLEND_CONTROL(uint32_t i0) { return 0x0000e151 + 0x7*i0; }
+#define A5XX_RB_MRT_BLEND_CONTROL_RGB_SRC_FACTOR__MASK 0x0000001f
+#define A5XX_RB_MRT_BLEND_CONTROL_RGB_SRC_FACTOR__SHIFT 0
+static inline uint32_t A5XX_RB_MRT_BLEND_CONTROL_RGB_SRC_FACTOR(enum adreno_rb_blend_factor val)
+{
+ return ((val) << A5XX_RB_MRT_BLEND_CONTROL_RGB_SRC_FACTOR__SHIFT) & A5XX_RB_MRT_BLEND_CONTROL_RGB_SRC_FACTOR__MASK;
+}
+#define A5XX_RB_MRT_BLEND_CONTROL_RGB_BLEND_OPCODE__MASK 0x000000e0
+#define A5XX_RB_MRT_BLEND_CONTROL_RGB_BLEND_OPCODE__SHIFT 5
+static inline uint32_t A5XX_RB_MRT_BLEND_CONTROL_RGB_BLEND_OPCODE(enum a3xx_rb_blend_opcode val)
+{
+ return ((val) << A5XX_RB_MRT_BLEND_CONTROL_RGB_BLEND_OPCODE__SHIFT) & A5XX_RB_MRT_BLEND_CONTROL_RGB_BLEND_OPCODE__MASK;
+}
+#define A5XX_RB_MRT_BLEND_CONTROL_RGB_DEST_FACTOR__MASK 0x00001f00
+#define A5XX_RB_MRT_BLEND_CONTROL_RGB_DEST_FACTOR__SHIFT 8
+static inline uint32_t A5XX_RB_MRT_BLEND_CONTROL_RGB_DEST_FACTOR(enum adreno_rb_blend_factor val)
+{
+ return ((val) << A5XX_RB_MRT_BLEND_CONTROL_RGB_DEST_FACTOR__SHIFT) & A5XX_RB_MRT_BLEND_CONTROL_RGB_DEST_FACTOR__MASK;
+}
+#define A5XX_RB_MRT_BLEND_CONTROL_ALPHA_SRC_FACTOR__MASK 0x001f0000
+#define A5XX_RB_MRT_BLEND_CONTROL_ALPHA_SRC_FACTOR__SHIFT 16
+static inline uint32_t A5XX_RB_MRT_BLEND_CONTROL_ALPHA_SRC_FACTOR(enum adreno_rb_blend_factor val)
+{
+ return ((val) << A5XX_RB_MRT_BLEND_CONTROL_ALPHA_SRC_FACTOR__SHIFT) & A5XX_RB_MRT_BLEND_CONTROL_ALPHA_SRC_FACTOR__MASK;
+}
+#define A5XX_RB_MRT_BLEND_CONTROL_ALPHA_BLEND_OPCODE__MASK 0x00e00000
+#define A5XX_RB_MRT_BLEND_CONTROL_ALPHA_BLEND_OPCODE__SHIFT 21
+static inline uint32_t A5XX_RB_MRT_BLEND_CONTROL_ALPHA_BLEND_OPCODE(enum a3xx_rb_blend_opcode val)
+{
+ return ((val) << A5XX_RB_MRT_BLEND_CONTROL_ALPHA_BLEND_OPCODE__SHIFT) & A5XX_RB_MRT_BLEND_CONTROL_ALPHA_BLEND_OPCODE__MASK;
+}
+#define A5XX_RB_MRT_BLEND_CONTROL_ALPHA_DEST_FACTOR__MASK 0x1f000000
+#define A5XX_RB_MRT_BLEND_CONTROL_ALPHA_DEST_FACTOR__SHIFT 24
+static inline uint32_t A5XX_RB_MRT_BLEND_CONTROL_ALPHA_DEST_FACTOR(enum adreno_rb_blend_factor val)
+{
+ return ((val) << A5XX_RB_MRT_BLEND_CONTROL_ALPHA_DEST_FACTOR__SHIFT) & A5XX_RB_MRT_BLEND_CONTROL_ALPHA_DEST_FACTOR__MASK;
+}
+
+static inline uint32_t REG_A5XX_RB_MRT_BUF_INFO(uint32_t i0) { return 0x0000e152 + 0x7*i0; }
+#define A5XX_RB_MRT_BUF_INFO_COLOR_FORMAT__MASK 0x000000ff
+#define A5XX_RB_MRT_BUF_INFO_COLOR_FORMAT__SHIFT 0
+static inline uint32_t A5XX_RB_MRT_BUF_INFO_COLOR_FORMAT(enum a5xx_color_fmt val)
+{
+ return ((val) << A5XX_RB_MRT_BUF_INFO_COLOR_FORMAT__SHIFT) & A5XX_RB_MRT_BUF_INFO_COLOR_FORMAT__MASK;
+}
+#define A5XX_RB_MRT_BUF_INFO_COLOR_TILE_MODE__MASK 0x00000300
+#define A5XX_RB_MRT_BUF_INFO_COLOR_TILE_MODE__SHIFT 8
+static inline uint32_t A5XX_RB_MRT_BUF_INFO_COLOR_TILE_MODE(enum a5xx_tile_mode val)
+{
+ return ((val) << A5XX_RB_MRT_BUF_INFO_COLOR_TILE_MODE__SHIFT) & A5XX_RB_MRT_BUF_INFO_COLOR_TILE_MODE__MASK;
+}
+#define A5XX_RB_MRT_BUF_INFO_COLOR_SWAP__MASK 0x00006000
+#define A5XX_RB_MRT_BUF_INFO_COLOR_SWAP__SHIFT 13
+static inline uint32_t A5XX_RB_MRT_BUF_INFO_COLOR_SWAP(enum a3xx_color_swap val)
+{
+ return ((val) << A5XX_RB_MRT_BUF_INFO_COLOR_SWAP__SHIFT) & A5XX_RB_MRT_BUF_INFO_COLOR_SWAP__MASK;
+}
+#define A5XX_RB_MRT_BUF_INFO_COLOR_SRGB 0x00008000
+
+static inline uint32_t REG_A5XX_RB_MRT_PITCH(uint32_t i0) { return 0x0000e153 + 0x7*i0; }
+#define A5XX_RB_MRT_PITCH__MASK 0xffffffff
+#define A5XX_RB_MRT_PITCH__SHIFT 0
+static inline uint32_t A5XX_RB_MRT_PITCH(uint32_t val)
+{
+ return ((val >> 6) << A5XX_RB_MRT_PITCH__SHIFT) & A5XX_RB_MRT_PITCH__MASK;
+}
+
+static inline uint32_t REG_A5XX_RB_MRT_ARRAY_PITCH(uint32_t i0) { return 0x0000e154 + 0x7*i0; }
+#define A5XX_RB_MRT_ARRAY_PITCH__MASK 0xffffffff
+#define A5XX_RB_MRT_ARRAY_PITCH__SHIFT 0
+static inline uint32_t A5XX_RB_MRT_ARRAY_PITCH(uint32_t val)
+{
+ return ((val >> 6) << A5XX_RB_MRT_ARRAY_PITCH__SHIFT) & A5XX_RB_MRT_ARRAY_PITCH__MASK;
+}
+
+static inline uint32_t REG_A5XX_RB_MRT_BASE_LO(uint32_t i0) { return 0x0000e155 + 0x7*i0; }
+
+static inline uint32_t REG_A5XX_RB_MRT_BASE_HI(uint32_t i0) { return 0x0000e156 + 0x7*i0; }
+
+#define REG_A5XX_RB_BLEND_RED 0x0000e1a0
+#define A5XX_RB_BLEND_RED_UINT__MASK 0x000000ff
+#define A5XX_RB_BLEND_RED_UINT__SHIFT 0
+static inline uint32_t A5XX_RB_BLEND_RED_UINT(uint32_t val)
+{
+ return ((val) << A5XX_RB_BLEND_RED_UINT__SHIFT) & A5XX_RB_BLEND_RED_UINT__MASK;
+}
+#define A5XX_RB_BLEND_RED_SINT__MASK 0x0000ff00
+#define A5XX_RB_BLEND_RED_SINT__SHIFT 8
+static inline uint32_t A5XX_RB_BLEND_RED_SINT(uint32_t val)
+{
+ return ((val) << A5XX_RB_BLEND_RED_SINT__SHIFT) & A5XX_RB_BLEND_RED_SINT__MASK;
+}
+#define A5XX_RB_BLEND_RED_FLOAT__MASK 0xffff0000
+#define A5XX_RB_BLEND_RED_FLOAT__SHIFT 16
+static inline uint32_t A5XX_RB_BLEND_RED_FLOAT(float val)
+{
+ return ((util_float_to_half(val)) << A5XX_RB_BLEND_RED_FLOAT__SHIFT) & A5XX_RB_BLEND_RED_FLOAT__MASK;
+}
+
+#define REG_A5XX_RB_BLEND_RED_F32 0x0000e1a1
+#define A5XX_RB_BLEND_RED_F32__MASK 0xffffffff
+#define A5XX_RB_BLEND_RED_F32__SHIFT 0
+static inline uint32_t A5XX_RB_BLEND_RED_F32(float val)
+{
+ return ((fui(val)) << A5XX_RB_BLEND_RED_F32__SHIFT) & A5XX_RB_BLEND_RED_F32__MASK;
+}
+
+#define REG_A5XX_RB_BLEND_GREEN 0x0000e1a2
+#define A5XX_RB_BLEND_GREEN_UINT__MASK 0x000000ff
+#define A5XX_RB_BLEND_GREEN_UINT__SHIFT 0
+static inline uint32_t A5XX_RB_BLEND_GREEN_UINT(uint32_t val)
+{
+ return ((val) << A5XX_RB_BLEND_GREEN_UINT__SHIFT) & A5XX_RB_BLEND_GREEN_UINT__MASK;
+}
+#define A5XX_RB_BLEND_GREEN_SINT__MASK 0x0000ff00
+#define A5XX_RB_BLEND_GREEN_SINT__SHIFT 8
+static inline uint32_t A5XX_RB_BLEND_GREEN_SINT(uint32_t val)
+{
+ return ((val) << A5XX_RB_BLEND_GREEN_SINT__SHIFT) & A5XX_RB_BLEND_GREEN_SINT__MASK;
+}
+#define A5XX_RB_BLEND_GREEN_FLOAT__MASK 0xffff0000
+#define A5XX_RB_BLEND_GREEN_FLOAT__SHIFT 16
+static inline uint32_t A5XX_RB_BLEND_GREEN_FLOAT(float val)
+{
+ return ((util_float_to_half(val)) << A5XX_RB_BLEND_GREEN_FLOAT__SHIFT) & A5XX_RB_BLEND_GREEN_FLOAT__MASK;
+}
+
+#define REG_A5XX_RB_BLEND_GREEN_F32 0x0000e1a3
+#define A5XX_RB_BLEND_GREEN_F32__MASK 0xffffffff
+#define A5XX_RB_BLEND_GREEN_F32__SHIFT 0
+static inline uint32_t A5XX_RB_BLEND_GREEN_F32(float val)
+{
+ return ((fui(val)) << A5XX_RB_BLEND_GREEN_F32__SHIFT) & A5XX_RB_BLEND_GREEN_F32__MASK;
+}
+
+#define REG_A5XX_RB_BLEND_BLUE 0x0000e1a4
+#define A5XX_RB_BLEND_BLUE_UINT__MASK 0x000000ff
+#define A5XX_RB_BLEND_BLUE_UINT__SHIFT 0
+static inline uint32_t A5XX_RB_BLEND_BLUE_UINT(uint32_t val)
+{
+ return ((val) << A5XX_RB_BLEND_BLUE_UINT__SHIFT) & A5XX_RB_BLEND_BLUE_UINT__MASK;
+}
+#define A5XX_RB_BLEND_BLUE_SINT__MASK 0x0000ff00
+#define A5XX_RB_BLEND_BLUE_SINT__SHIFT 8
+static inline uint32_t A5XX_RB_BLEND_BLUE_SINT(uint32_t val)
+{
+ return ((val) << A5XX_RB_BLEND_BLUE_SINT__SHIFT) & A5XX_RB_BLEND_BLUE_SINT__MASK;
+}
+#define A5XX_RB_BLEND_BLUE_FLOAT__MASK 0xffff0000
+#define A5XX_RB_BLEND_BLUE_FLOAT__SHIFT 16
+static inline uint32_t A5XX_RB_BLEND_BLUE_FLOAT(float val)
+{
+ return ((util_float_to_half(val)) << A5XX_RB_BLEND_BLUE_FLOAT__SHIFT) & A5XX_RB_BLEND_BLUE_FLOAT__MASK;
+}
+
+#define REG_A5XX_RB_BLEND_BLUE_F32 0x0000e1a5
+#define A5XX_RB_BLEND_BLUE_F32__MASK 0xffffffff
+#define A5XX_RB_BLEND_BLUE_F32__SHIFT 0
+static inline uint32_t A5XX_RB_BLEND_BLUE_F32(float val)
+{
+ return ((fui(val)) << A5XX_RB_BLEND_BLUE_F32__SHIFT) & A5XX_RB_BLEND_BLUE_F32__MASK;
+}
+
+#define REG_A5XX_RB_BLEND_ALPHA 0x0000e1a6
+#define A5XX_RB_BLEND_ALPHA_UINT__MASK 0x000000ff
+#define A5XX_RB_BLEND_ALPHA_UINT__SHIFT 0
+static inline uint32_t A5XX_RB_BLEND_ALPHA_UINT(uint32_t val)
+{
+ return ((val) << A5XX_RB_BLEND_ALPHA_UINT__SHIFT) & A5XX_RB_BLEND_ALPHA_UINT__MASK;
+}
+#define A5XX_RB_BLEND_ALPHA_SINT__MASK 0x0000ff00
+#define A5XX_RB_BLEND_ALPHA_SINT__SHIFT 8
+static inline uint32_t A5XX_RB_BLEND_ALPHA_SINT(uint32_t val)
+{
+ return ((val) << A5XX_RB_BLEND_ALPHA_SINT__SHIFT) & A5XX_RB_BLEND_ALPHA_SINT__MASK;
+}
+#define A5XX_RB_BLEND_ALPHA_FLOAT__MASK 0xffff0000
+#define A5XX_RB_BLEND_ALPHA_FLOAT__SHIFT 16
+static inline uint32_t A5XX_RB_BLEND_ALPHA_FLOAT(float val)
+{
+ return ((util_float_to_half(val)) << A5XX_RB_BLEND_ALPHA_FLOAT__SHIFT) & A5XX_RB_BLEND_ALPHA_FLOAT__MASK;
+}
+
+#define REG_A5XX_RB_BLEND_ALPHA_F32 0x0000e1a7
+#define A5XX_RB_BLEND_ALPHA_F32__MASK 0xffffffff
+#define A5XX_RB_BLEND_ALPHA_F32__SHIFT 0
+static inline uint32_t A5XX_RB_BLEND_ALPHA_F32(float val)
+{
+ return ((fui(val)) << A5XX_RB_BLEND_ALPHA_F32__SHIFT) & A5XX_RB_BLEND_ALPHA_F32__MASK;
+}
+
+#define REG_A5XX_RB_ALPHA_CONTROL 0x0000e1a8
+#define A5XX_RB_ALPHA_CONTROL_ALPHA_REF__MASK 0x000000ff
+#define A5XX_RB_ALPHA_CONTROL_ALPHA_REF__SHIFT 0
+static inline uint32_t A5XX_RB_ALPHA_CONTROL_ALPHA_REF(uint32_t val)
+{
+ return ((val) << A5XX_RB_ALPHA_CONTROL_ALPHA_REF__SHIFT) & A5XX_RB_ALPHA_CONTROL_ALPHA_REF__MASK;
+}
+#define A5XX_RB_ALPHA_CONTROL_ALPHA_TEST 0x00000100
+#define A5XX_RB_ALPHA_CONTROL_ALPHA_TEST_FUNC__MASK 0x00000e00
+#define A5XX_RB_ALPHA_CONTROL_ALPHA_TEST_FUNC__SHIFT 9
+static inline uint32_t A5XX_RB_ALPHA_CONTROL_ALPHA_TEST_FUNC(enum adreno_compare_func val)
+{
+ return ((val) << A5XX_RB_ALPHA_CONTROL_ALPHA_TEST_FUNC__SHIFT) & A5XX_RB_ALPHA_CONTROL_ALPHA_TEST_FUNC__MASK;
+}
+
+#define REG_A5XX_RB_BLEND_CNTL 0x0000e1a9
+#define A5XX_RB_BLEND_CNTL_ENABLE_BLEND__MASK 0x000000ff
+#define A5XX_RB_BLEND_CNTL_ENABLE_BLEND__SHIFT 0
+static inline uint32_t A5XX_RB_BLEND_CNTL_ENABLE_BLEND(uint32_t val)
+{
+ return ((val) << A5XX_RB_BLEND_CNTL_ENABLE_BLEND__SHIFT) & A5XX_RB_BLEND_CNTL_ENABLE_BLEND__MASK;
+}
+#define A5XX_RB_BLEND_CNTL_INDEPENDENT_BLEND 0x00000100
+#define A5XX_RB_BLEND_CNTL_SAMPLE_MASK__MASK 0xffff0000
+#define A5XX_RB_BLEND_CNTL_SAMPLE_MASK__SHIFT 16
+static inline uint32_t A5XX_RB_BLEND_CNTL_SAMPLE_MASK(uint32_t val)
+{
+ return ((val) << A5XX_RB_BLEND_CNTL_SAMPLE_MASK__SHIFT) & A5XX_RB_BLEND_CNTL_SAMPLE_MASK__MASK;
+}
+
+#define REG_A5XX_RB_DEPTH_PLANE_CNTL 0x0000e1b0
+#define A5XX_RB_DEPTH_PLANE_CNTL_FRAG_WRITES_Z 0x00000001
+
+#define REG_A5XX_RB_DEPTH_CNTL 0x0000e1b1
+#define A5XX_RB_DEPTH_CNTL_Z_ENABLE 0x00000001
+#define A5XX_RB_DEPTH_CNTL_Z_WRITE_ENABLE 0x00000002
+#define A5XX_RB_DEPTH_CNTL_ZFUNC__MASK 0x0000001c
+#define A5XX_RB_DEPTH_CNTL_ZFUNC__SHIFT 2
+static inline uint32_t A5XX_RB_DEPTH_CNTL_ZFUNC(enum adreno_compare_func val)
+{
+ return ((val) << A5XX_RB_DEPTH_CNTL_ZFUNC__SHIFT) & A5XX_RB_DEPTH_CNTL_ZFUNC__MASK;
+}
+#define A5XX_RB_DEPTH_CNTL_Z_TEST_ENABLE 0x00000040
+
+#define REG_A5XX_RB_DEPTH_BUFFER_INFO 0x0000e1b2
+#define A5XX_RB_DEPTH_BUFFER_INFO_DEPTH_FORMAT__MASK 0x00000007
+#define A5XX_RB_DEPTH_BUFFER_INFO_DEPTH_FORMAT__SHIFT 0
+static inline uint32_t A5XX_RB_DEPTH_BUFFER_INFO_DEPTH_FORMAT(enum a5xx_depth_format val)
+{
+ return ((val) << A5XX_RB_DEPTH_BUFFER_INFO_DEPTH_FORMAT__SHIFT) & A5XX_RB_DEPTH_BUFFER_INFO_DEPTH_FORMAT__MASK;
+}
+
+#define REG_A5XX_RB_DEPTH_BUFFER_BASE_LO 0x0000e1b3
+
+#define REG_A5XX_RB_DEPTH_BUFFER_BASE_HI 0x0000e1b4
+
+#define REG_A5XX_RB_DEPTH_BUFFER_PITCH 0x0000e1b5
+#define A5XX_RB_DEPTH_BUFFER_PITCH__MASK 0xffffffff
+#define A5XX_RB_DEPTH_BUFFER_PITCH__SHIFT 0
+static inline uint32_t A5XX_RB_DEPTH_BUFFER_PITCH(uint32_t val)
+{
+ return ((val >> 5) << A5XX_RB_DEPTH_BUFFER_PITCH__SHIFT) & A5XX_RB_DEPTH_BUFFER_PITCH__MASK;
+}
+
+#define REG_A5XX_RB_DEPTH_BUFFER_ARRAY_PITCH 0x0000e1b6
+#define A5XX_RB_DEPTH_BUFFER_ARRAY_PITCH__MASK 0xffffffff
+#define A5XX_RB_DEPTH_BUFFER_ARRAY_PITCH__SHIFT 0
+static inline uint32_t A5XX_RB_DEPTH_BUFFER_ARRAY_PITCH(uint32_t val)
+{
+ return ((val >> 5) << A5XX_RB_DEPTH_BUFFER_ARRAY_PITCH__SHIFT) & A5XX_RB_DEPTH_BUFFER_ARRAY_PITCH__MASK;
+}
+
+#define REG_A5XX_RB_STENCIL_CONTROL 0x0000e1c0
+#define A5XX_RB_STENCIL_CONTROL_STENCIL_ENABLE 0x00000001
+#define A5XX_RB_STENCIL_CONTROL_STENCIL_ENABLE_BF 0x00000002
+#define A5XX_RB_STENCIL_CONTROL_STENCIL_READ 0x00000004
+#define A5XX_RB_STENCIL_CONTROL_FUNC__MASK 0x00000700
+#define A5XX_RB_STENCIL_CONTROL_FUNC__SHIFT 8
+static inline uint32_t A5XX_RB_STENCIL_CONTROL_FUNC(enum adreno_compare_func val)
+{
+ return ((val) << A5XX_RB_STENCIL_CONTROL_FUNC__SHIFT) & A5XX_RB_STENCIL_CONTROL_FUNC__MASK;
+}
+#define A5XX_RB_STENCIL_CONTROL_FAIL__MASK 0x00003800
+#define A5XX_RB_STENCIL_CONTROL_FAIL__SHIFT 11
+static inline uint32_t A5XX_RB_STENCIL_CONTROL_FAIL(enum adreno_stencil_op val)
+{
+ return ((val) << A5XX_RB_STENCIL_CONTROL_FAIL__SHIFT) & A5XX_RB_STENCIL_CONTROL_FAIL__MASK;
+}
+#define A5XX_RB_STENCIL_CONTROL_ZPASS__MASK 0x0001c000
+#define A5XX_RB_STENCIL_CONTROL_ZPASS__SHIFT 14
+static inline uint32_t A5XX_RB_STENCIL_CONTROL_ZPASS(enum adreno_stencil_op val)
+{
+ return ((val) << A5XX_RB_STENCIL_CONTROL_ZPASS__SHIFT) & A5XX_RB_STENCIL_CONTROL_ZPASS__MASK;
+}
+#define A5XX_RB_STENCIL_CONTROL_ZFAIL__MASK 0x000e0000
+#define A5XX_RB_STENCIL_CONTROL_ZFAIL__SHIFT 17
+static inline uint32_t A5XX_RB_STENCIL_CONTROL_ZFAIL(enum adreno_stencil_op val)
+{
+ return ((val) << A5XX_RB_STENCIL_CONTROL_ZFAIL__SHIFT) & A5XX_RB_STENCIL_CONTROL_ZFAIL__MASK;
+}
+#define A5XX_RB_STENCIL_CONTROL_FUNC_BF__MASK 0x00700000
+#define A5XX_RB_STENCIL_CONTROL_FUNC_BF__SHIFT 20
+static inline uint32_t A5XX_RB_STENCIL_CONTROL_FUNC_BF(enum adreno_compare_func val)
+{
+ return ((val) << A5XX_RB_STENCIL_CONTROL_FUNC_BF__SHIFT) & A5XX_RB_STENCIL_CONTROL_FUNC_BF__MASK;
+}
+#define A5XX_RB_STENCIL_CONTROL_FAIL_BF__MASK 0x03800000
+#define A5XX_RB_STENCIL_CONTROL_FAIL_BF__SHIFT 23
+static inline uint32_t A5XX_RB_STENCIL_CONTROL_FAIL_BF(enum adreno_stencil_op val)
+{
+ return ((val) << A5XX_RB_STENCIL_CONTROL_FAIL_BF__SHIFT) & A5XX_RB_STENCIL_CONTROL_FAIL_BF__MASK;
+}
+#define A5XX_RB_STENCIL_CONTROL_ZPASS_BF__MASK 0x1c000000
+#define A5XX_RB_STENCIL_CONTROL_ZPASS_BF__SHIFT 26
+static inline uint32_t A5XX_RB_STENCIL_CONTROL_ZPASS_BF(enum adreno_stencil_op val)
+{
+ return ((val) << A5XX_RB_STENCIL_CONTROL_ZPASS_BF__SHIFT) & A5XX_RB_STENCIL_CONTROL_ZPASS_BF__MASK;
+}
+#define A5XX_RB_STENCIL_CONTROL_ZFAIL_BF__MASK 0xe0000000
+#define A5XX_RB_STENCIL_CONTROL_ZFAIL_BF__SHIFT 29
+static inline uint32_t A5XX_RB_STENCIL_CONTROL_ZFAIL_BF(enum adreno_stencil_op val)
+{
+ return ((val) << A5XX_RB_STENCIL_CONTROL_ZFAIL_BF__SHIFT) & A5XX_RB_STENCIL_CONTROL_ZFAIL_BF__MASK;
+}
+
+#define REG_A5XX_RB_STENCIL_INFO 0x0000e1c1
+#define A5XX_RB_STENCIL_INFO_SEPARATE_STENCIL 0x00000001
+
+#define REG_A5XX_RB_STENCIL_BASE_LO 0x0000e1c2
+
+#define REG_A5XX_RB_STENCIL_BASE_HI 0x0000e1c3
+
+#define REG_A5XX_RB_STENCIL_PITCH 0x0000e1c4
+#define A5XX_RB_STENCIL_PITCH__MASK 0xffffffff
+#define A5XX_RB_STENCIL_PITCH__SHIFT 0
+static inline uint32_t A5XX_RB_STENCIL_PITCH(uint32_t val)
+{
+ return ((val >> 6) << A5XX_RB_STENCIL_PITCH__SHIFT) & A5XX_RB_STENCIL_PITCH__MASK;
+}
+
+#define REG_A5XX_RB_STENCIL_ARRAY_PITCH 0x0000e1c5
+#define A5XX_RB_STENCIL_ARRAY_PITCH__MASK 0xffffffff
+#define A5XX_RB_STENCIL_ARRAY_PITCH__SHIFT 0
+static inline uint32_t A5XX_RB_STENCIL_ARRAY_PITCH(uint32_t val)
+{
+ return ((val >> 6) << A5XX_RB_STENCIL_ARRAY_PITCH__SHIFT) & A5XX_RB_STENCIL_ARRAY_PITCH__MASK;
+}
+
+#define REG_A5XX_RB_STENCILREFMASK 0x0000e1c6
+#define A5XX_RB_STENCILREFMASK_STENCILREF__MASK 0x000000ff
+#define A5XX_RB_STENCILREFMASK_STENCILREF__SHIFT 0
+static inline uint32_t A5XX_RB_STENCILREFMASK_STENCILREF(uint32_t val)
+{
+ return ((val) << A5XX_RB_STENCILREFMASK_STENCILREF__SHIFT) & A5XX_RB_STENCILREFMASK_STENCILREF__MASK;
+}
+#define A5XX_RB_STENCILREFMASK_STENCILMASK__MASK 0x0000ff00
+#define A5XX_RB_STENCILREFMASK_STENCILMASK__SHIFT 8
+static inline uint32_t A5XX_RB_STENCILREFMASK_STENCILMASK(uint32_t val)
+{
+ return ((val) << A5XX_RB_STENCILREFMASK_STENCILMASK__SHIFT) & A5XX_RB_STENCILREFMASK_STENCILMASK__MASK;
+}
+#define A5XX_RB_STENCILREFMASK_STENCILWRITEMASK__MASK 0x00ff0000
+#define A5XX_RB_STENCILREFMASK_STENCILWRITEMASK__SHIFT 16
+static inline uint32_t A5XX_RB_STENCILREFMASK_STENCILWRITEMASK(uint32_t val)
+{
+ return ((val) << A5XX_RB_STENCILREFMASK_STENCILWRITEMASK__SHIFT) & A5XX_RB_STENCILREFMASK_STENCILWRITEMASK__MASK;
+}
+
+#define REG_A5XX_UNKNOWN_E1C7 0x0000e1c7
+
+#define REG_A5XX_RB_WINDOW_OFFSET 0x0000e1d0
+#define A5XX_RB_WINDOW_OFFSET_WINDOW_OFFSET_DISABLE 0x80000000
+#define A5XX_RB_WINDOW_OFFSET_X__MASK 0x00007fff
+#define A5XX_RB_WINDOW_OFFSET_X__SHIFT 0
+static inline uint32_t A5XX_RB_WINDOW_OFFSET_X(uint32_t val)
+{
+ return ((val) << A5XX_RB_WINDOW_OFFSET_X__SHIFT) & A5XX_RB_WINDOW_OFFSET_X__MASK;
+}
+#define A5XX_RB_WINDOW_OFFSET_Y__MASK 0x7fff0000
+#define A5XX_RB_WINDOW_OFFSET_Y__SHIFT 16
+static inline uint32_t A5XX_RB_WINDOW_OFFSET_Y(uint32_t val)
+{
+ return ((val) << A5XX_RB_WINDOW_OFFSET_Y__SHIFT) & A5XX_RB_WINDOW_OFFSET_Y__MASK;
+}
+
+#define REG_A5XX_RB_BLIT_CNTL 0x0000e210
+#define A5XX_RB_BLIT_CNTL_BUF__MASK 0x0000003f
+#define A5XX_RB_BLIT_CNTL_BUF__SHIFT 0
+static inline uint32_t A5XX_RB_BLIT_CNTL_BUF(enum a5xx_blit_buf val)
+{
+ return ((val) << A5XX_RB_BLIT_CNTL_BUF__SHIFT) & A5XX_RB_BLIT_CNTL_BUF__MASK;
+}
+
+#define REG_A5XX_RB_RESOLVE_CNTL_1 0x0000e211
+#define A5XX_RB_RESOLVE_CNTL_1_WINDOW_OFFSET_DISABLE 0x80000000
+#define A5XX_RB_RESOLVE_CNTL_1_X__MASK 0x00007fff
+#define A5XX_RB_RESOLVE_CNTL_1_X__SHIFT 0
+static inline uint32_t A5XX_RB_RESOLVE_CNTL_1_X(uint32_t val)
+{
+ return ((val) << A5XX_RB_RESOLVE_CNTL_1_X__SHIFT) & A5XX_RB_RESOLVE_CNTL_1_X__MASK;
+}
+#define A5XX_RB_RESOLVE_CNTL_1_Y__MASK 0x7fff0000
+#define A5XX_RB_RESOLVE_CNTL_1_Y__SHIFT 16
+static inline uint32_t A5XX_RB_RESOLVE_CNTL_1_Y(uint32_t val)
+{
+ return ((val) << A5XX_RB_RESOLVE_CNTL_1_Y__SHIFT) & A5XX_RB_RESOLVE_CNTL_1_Y__MASK;
+}
+
+#define REG_A5XX_RB_RESOLVE_CNTL_2 0x0000e212
+#define A5XX_RB_RESOLVE_CNTL_2_WINDOW_OFFSET_DISABLE 0x80000000
+#define A5XX_RB_RESOLVE_CNTL_2_X__MASK 0x00007fff
+#define A5XX_RB_RESOLVE_CNTL_2_X__SHIFT 0
+static inline uint32_t A5XX_RB_RESOLVE_CNTL_2_X(uint32_t val)
+{
+ return ((val) << A5XX_RB_RESOLVE_CNTL_2_X__SHIFT) & A5XX_RB_RESOLVE_CNTL_2_X__MASK;
+}
+#define A5XX_RB_RESOLVE_CNTL_2_Y__MASK 0x7fff0000
+#define A5XX_RB_RESOLVE_CNTL_2_Y__SHIFT 16
+static inline uint32_t A5XX_RB_RESOLVE_CNTL_2_Y(uint32_t val)
+{
+ return ((val) << A5XX_RB_RESOLVE_CNTL_2_Y__SHIFT) & A5XX_RB_RESOLVE_CNTL_2_Y__MASK;
+}
+
+#define REG_A5XX_RB_RESOLVE_CNTL_3 0x0000e213
+
+#define REG_A5XX_RB_BLIT_DST_LO 0x0000e214
+
+#define REG_A5XX_RB_BLIT_DST_HI 0x0000e215
+
+#define REG_A5XX_RB_BLIT_DST_PITCH 0x0000e216
+#define A5XX_RB_BLIT_DST_PITCH__MASK 0xffffffff
+#define A5XX_RB_BLIT_DST_PITCH__SHIFT 0
+static inline uint32_t A5XX_RB_BLIT_DST_PITCH(uint32_t val)
+{
+ return ((val >> 6) << A5XX_RB_BLIT_DST_PITCH__SHIFT) & A5XX_RB_BLIT_DST_PITCH__MASK;
+}
+
+#define REG_A5XX_RB_BLIT_DST_ARRAY_PITCH 0x0000e217
+#define A5XX_RB_BLIT_DST_ARRAY_PITCH__MASK 0xffffffff
+#define A5XX_RB_BLIT_DST_ARRAY_PITCH__SHIFT 0
+static inline uint32_t A5XX_RB_BLIT_DST_ARRAY_PITCH(uint32_t val)
+{
+ return ((val >> 6) << A5XX_RB_BLIT_DST_ARRAY_PITCH__SHIFT) & A5XX_RB_BLIT_DST_ARRAY_PITCH__MASK;
+}
+
+#define REG_A5XX_RB_CLEAR_COLOR_DW0 0x0000e218
+
+#define REG_A5XX_RB_CLEAR_COLOR_DW1 0x0000e219
+
+#define REG_A5XX_RB_CLEAR_COLOR_DW2 0x0000e21a
+
+#define REG_A5XX_RB_CLEAR_COLOR_DW3 0x0000e21b
+
+#define REG_A5XX_RB_CLEAR_CNTL 0x0000e21c
+#define A5XX_RB_CLEAR_CNTL_FAST_CLEAR 0x00000002
+#define A5XX_RB_CLEAR_CNTL_MASK__MASK 0x000000f0
+#define A5XX_RB_CLEAR_CNTL_MASK__SHIFT 4
+static inline uint32_t A5XX_RB_CLEAR_CNTL_MASK(uint32_t val)
+{
+ return ((val) << A5XX_RB_CLEAR_CNTL_MASK__SHIFT) & A5XX_RB_CLEAR_CNTL_MASK__MASK;
+}
+
+#define REG_A5XX_RB_DEPTH_FLAG_BUFFER_BASE_LO 0x0000e240
+
+#define REG_A5XX_RB_DEPTH_FLAG_BUFFER_BASE_HI 0x0000e241
+
+#define REG_A5XX_RB_DEPTH_FLAG_BUFFER_PITCH 0x0000e242
+
+static inline uint32_t REG_A5XX_RB_MRT_FLAG_BUFFER(uint32_t i0) { return 0x0000e243 + 0x4*i0; }
+
+static inline uint32_t REG_A5XX_RB_MRT_FLAG_BUFFER_ADDR_LO(uint32_t i0) { return 0x0000e243 + 0x4*i0; }
+
+static inline uint32_t REG_A5XX_RB_MRT_FLAG_BUFFER_ADDR_HI(uint32_t i0) { return 0x0000e244 + 0x4*i0; }
+
+static inline uint32_t REG_A5XX_RB_MRT_FLAG_BUFFER_PITCH(uint32_t i0) { return 0x0000e245 + 0x4*i0; }
+#define A5XX_RB_MRT_FLAG_BUFFER_PITCH__MASK 0xffffffff
+#define A5XX_RB_MRT_FLAG_BUFFER_PITCH__SHIFT 0
+static inline uint32_t A5XX_RB_MRT_FLAG_BUFFER_PITCH(uint32_t val)
+{
+ return ((val >> 6) << A5XX_RB_MRT_FLAG_BUFFER_PITCH__SHIFT) & A5XX_RB_MRT_FLAG_BUFFER_PITCH__MASK;
+}
+
+static inline uint32_t REG_A5XX_RB_MRT_FLAG_BUFFER_ARRAY_PITCH(uint32_t i0) { return 0x0000e246 + 0x4*i0; }
+#define A5XX_RB_MRT_FLAG_BUFFER_ARRAY_PITCH__MASK 0xffffffff
+#define A5XX_RB_MRT_FLAG_BUFFER_ARRAY_PITCH__SHIFT 0
+static inline uint32_t A5XX_RB_MRT_FLAG_BUFFER_ARRAY_PITCH(uint32_t val)
+{
+ return ((val >> 6) << A5XX_RB_MRT_FLAG_BUFFER_ARRAY_PITCH__SHIFT) & A5XX_RB_MRT_FLAG_BUFFER_ARRAY_PITCH__MASK;
+}
+
+#define REG_A5XX_RB_BLIT_FLAG_DST_LO 0x0000e263
+
+#define REG_A5XX_RB_BLIT_FLAG_DST_HI 0x0000e264
+
+#define REG_A5XX_RB_BLIT_FLAG_DST_PITCH 0x0000e265
+#define A5XX_RB_BLIT_FLAG_DST_PITCH__MASK 0xffffffff
+#define A5XX_RB_BLIT_FLAG_DST_PITCH__SHIFT 0
+static inline uint32_t A5XX_RB_BLIT_FLAG_DST_PITCH(uint32_t val)
+{
+ return ((val >> 6) << A5XX_RB_BLIT_FLAG_DST_PITCH__SHIFT) & A5XX_RB_BLIT_FLAG_DST_PITCH__MASK;
+}
+
+#define REG_A5XX_RB_BLIT_FLAG_DST_ARRAY_PITCH 0x0000e266
+#define A5XX_RB_BLIT_FLAG_DST_ARRAY_PITCH__MASK 0xffffffff
+#define A5XX_RB_BLIT_FLAG_DST_ARRAY_PITCH__SHIFT 0
+static inline uint32_t A5XX_RB_BLIT_FLAG_DST_ARRAY_PITCH(uint32_t val)
+{
+ return ((val >> 6) << A5XX_RB_BLIT_FLAG_DST_ARRAY_PITCH__SHIFT) & A5XX_RB_BLIT_FLAG_DST_ARRAY_PITCH__MASK;
+}
+
+#define REG_A5XX_VPC_CNTL_0 0x0000e280
+#define A5XX_VPC_CNTL_0_STRIDE_IN_VPC__MASK 0x0000007f
+#define A5XX_VPC_CNTL_0_STRIDE_IN_VPC__SHIFT 0
+static inline uint32_t A5XX_VPC_CNTL_0_STRIDE_IN_VPC(uint32_t val)
+{
+ return ((val) << A5XX_VPC_CNTL_0_STRIDE_IN_VPC__SHIFT) & A5XX_VPC_CNTL_0_STRIDE_IN_VPC__MASK;
+}
+#define A5XX_VPC_CNTL_0_VARYING 0x00000800
+
+static inline uint32_t REG_A5XX_VPC_VARYING_INTERP(uint32_t i0) { return 0x0000e282 + 0x1*i0; }
+
+static inline uint32_t REG_A5XX_VPC_VARYING_INTERP_MODE(uint32_t i0) { return 0x0000e282 + 0x1*i0; }
+
+static inline uint32_t REG_A5XX_VPC_VARYING_PS_REPL(uint32_t i0) { return 0x0000e28a + 0x1*i0; }
+
+static inline uint32_t REG_A5XX_VPC_VARYING_PS_REPL_MODE(uint32_t i0) { return 0x0000e28a + 0x1*i0; }
+
+#define REG_A5XX_UNKNOWN_E292 0x0000e292
+
+#define REG_A5XX_UNKNOWN_E293 0x0000e293
+
+static inline uint32_t REG_A5XX_VPC_VAR(uint32_t i0) { return 0x0000e294 + 0x1*i0; }
+
+static inline uint32_t REG_A5XX_VPC_VAR_DISABLE(uint32_t i0) { return 0x0000e294 + 0x1*i0; }
+
+#define REG_A5XX_VPC_GS_SIV_CNTL 0x0000e298
+
+#define REG_A5XX_UNKNOWN_E29A 0x0000e29a
+
+#define REG_A5XX_VPC_PACK 0x0000e29d
+#define A5XX_VPC_PACK_NUMNONPOSVAR__MASK 0x000000ff
+#define A5XX_VPC_PACK_NUMNONPOSVAR__SHIFT 0
+static inline uint32_t A5XX_VPC_PACK_NUMNONPOSVAR(uint32_t val)
+{
+ return ((val) << A5XX_VPC_PACK_NUMNONPOSVAR__SHIFT) & A5XX_VPC_PACK_NUMNONPOSVAR__MASK;
+}
+
+#define REG_A5XX_VPC_FS_PRIMITIVEID_CNTL 0x0000e2a0
+
+#define REG_A5XX_UNKNOWN_E2A1 0x0000e2a1
+
+#define REG_A5XX_VPC_SO_OVERRIDE 0x0000e2a2
+
+#define REG_A5XX_VPC_SO_BUFFER_BASE_LO_0 0x0000e2a7
+
+#define REG_A5XX_VPC_SO_BUFFER_BASE_HI_0 0x0000e2a8
+
+#define REG_A5XX_VPC_SO_BUFFER_SIZE_0 0x0000e2a9
+
+#define REG_A5XX_UNKNOWN_E2AB 0x0000e2ab
+
+#define REG_A5XX_VPC_SO_FLUSH_BASE_LO_0 0x0000e2ac
+
+#define REG_A5XX_VPC_SO_FLUSH_BASE_HI_0 0x0000e2ad
+
+#define REG_A5XX_UNKNOWN_E2AE 0x0000e2ae
+
+#define REG_A5XX_UNKNOWN_E2B2 0x0000e2b2
+
+#define REG_A5XX_UNKNOWN_E2B9 0x0000e2b9
+
+#define REG_A5XX_UNKNOWN_E2C0 0x0000e2c0
+
+#define REG_A5XX_PC_PRIMITIVE_CNTL 0x0000e384
+#define A5XX_PC_PRIMITIVE_CNTL_STRIDE_IN_VPC__MASK 0x0000007f
+#define A5XX_PC_PRIMITIVE_CNTL_STRIDE_IN_VPC__SHIFT 0
+static inline uint32_t A5XX_PC_PRIMITIVE_CNTL_STRIDE_IN_VPC(uint32_t val)
+{
+ return ((val) << A5XX_PC_PRIMITIVE_CNTL_STRIDE_IN_VPC__SHIFT) & A5XX_PC_PRIMITIVE_CNTL_STRIDE_IN_VPC__MASK;
+}
+
+#define REG_A5XX_PC_PRIM_VTX_CNTL 0x0000e385
+#define A5XX_PC_PRIM_VTX_CNTL_PSIZE 0x00000800
+
+#define REG_A5XX_PC_RASTER_CNTL 0x0000e388
+
+#define REG_A5XX_UNKNOWN_E389 0x0000e389
+
+#define REG_A5XX_PC_RESTART_INDEX 0x0000e38c
+
+#define REG_A5XX_UNKNOWN_E38D 0x0000e38d
+
+#define REG_A5XX_PC_GS_PARAM 0x0000e38e
+
+#define REG_A5XX_PC_HS_PARAM 0x0000e38f
+
+#define REG_A5XX_PC_POWER_CNTL 0x0000e3b0
+
+#define REG_A5XX_VFD_CONTROL_0 0x0000e400
+#define A5XX_VFD_CONTROL_0_VTXCNT__MASK 0x0000003f
+#define A5XX_VFD_CONTROL_0_VTXCNT__SHIFT 0
+static inline uint32_t A5XX_VFD_CONTROL_0_VTXCNT(uint32_t val)
+{
+ return ((val) << A5XX_VFD_CONTROL_0_VTXCNT__SHIFT) & A5XX_VFD_CONTROL_0_VTXCNT__MASK;
+}
+
+#define REG_A5XX_VFD_CONTROL_1 0x0000e401
+#define A5XX_VFD_CONTROL_1_REGID4INST__MASK 0x0000ff00
+#define A5XX_VFD_CONTROL_1_REGID4INST__SHIFT 8
+static inline uint32_t A5XX_VFD_CONTROL_1_REGID4INST(uint32_t val)
+{
+ return ((val) << A5XX_VFD_CONTROL_1_REGID4INST__SHIFT) & A5XX_VFD_CONTROL_1_REGID4INST__MASK;
+}
+#define A5XX_VFD_CONTROL_1_REGID4VTX__MASK 0x00ff0000
+#define A5XX_VFD_CONTROL_1_REGID4VTX__SHIFT 16
+static inline uint32_t A5XX_VFD_CONTROL_1_REGID4VTX(uint32_t val)
+{
+ return ((val) << A5XX_VFD_CONTROL_1_REGID4VTX__SHIFT) & A5XX_VFD_CONTROL_1_REGID4VTX__MASK;
+}
+
+#define REG_A5XX_VFD_CONTROL_2 0x0000e402
+
+#define REG_A5XX_VFD_CONTROL_3 0x0000e403
+
+#define REG_A5XX_VFD_CONTROL_4 0x0000e404
+
+#define REG_A5XX_VFD_CONTROL_5 0x0000e405
+
+#define REG_A5XX_VFD_INDEX_OFFSET 0x0000e408
+
+#define REG_A5XX_VFD_INSTANCE_START_OFFSET 0x0000e409
+
+static inline uint32_t REG_A5XX_VFD_FETCH(uint32_t i0) { return 0x0000e40a + 0x4*i0; }
+
+static inline uint32_t REG_A5XX_VFD_FETCH_BASE_LO(uint32_t i0) { return 0x0000e40a + 0x4*i0; }
+
+static inline uint32_t REG_A5XX_VFD_FETCH_BASE_HI(uint32_t i0) { return 0x0000e40b + 0x4*i0; }
+
+static inline uint32_t REG_A5XX_VFD_FETCH_SIZE(uint32_t i0) { return 0x0000e40c + 0x4*i0; }
+
+static inline uint32_t REG_A5XX_VFD_FETCH_STRIDE(uint32_t i0) { return 0x0000e40d + 0x4*i0; }
+
+static inline uint32_t REG_A5XX_VFD_DECODE(uint32_t i0) { return 0x0000e48a + 0x2*i0; }
+
+static inline uint32_t REG_A5XX_VFD_DECODE_INSTR(uint32_t i0) { return 0x0000e48a + 0x2*i0; }
+#define A5XX_VFD_DECODE_INSTR_IDX__MASK 0x0000001f
+#define A5XX_VFD_DECODE_INSTR_IDX__SHIFT 0
+static inline uint32_t A5XX_VFD_DECODE_INSTR_IDX(uint32_t val)
+{
+ return ((val) << A5XX_VFD_DECODE_INSTR_IDX__SHIFT) & A5XX_VFD_DECODE_INSTR_IDX__MASK;
+}
+#define A5XX_VFD_DECODE_INSTR_FORMAT__MASK 0x3ff00000
+#define A5XX_VFD_DECODE_INSTR_FORMAT__SHIFT 20
+static inline uint32_t A5XX_VFD_DECODE_INSTR_FORMAT(enum a5xx_vtx_fmt val)
+{
+ return ((val) << A5XX_VFD_DECODE_INSTR_FORMAT__SHIFT) & A5XX_VFD_DECODE_INSTR_FORMAT__MASK;
+}
+#define A5XX_VFD_DECODE_INSTR_SWAP__MASK 0xc0000000
+#define A5XX_VFD_DECODE_INSTR_SWAP__SHIFT 30
+static inline uint32_t A5XX_VFD_DECODE_INSTR_SWAP(enum a3xx_color_swap val)
+{
+ return ((val) << A5XX_VFD_DECODE_INSTR_SWAP__SHIFT) & A5XX_VFD_DECODE_INSTR_SWAP__MASK;
+}
+
+static inline uint32_t REG_A5XX_VFD_DECODE_STEP_RATE(uint32_t i0) { return 0x0000e48b + 0x2*i0; }
+
+static inline uint32_t REG_A5XX_VFD_DEST_CNTL(uint32_t i0) { return 0x0000e4ca + 0x1*i0; }
+
+static inline uint32_t REG_A5XX_VFD_DEST_CNTL_INSTR(uint32_t i0) { return 0x0000e4ca + 0x1*i0; }
+#define A5XX_VFD_DEST_CNTL_INSTR_WRITEMASK__MASK 0x0000000f
+#define A5XX_VFD_DEST_CNTL_INSTR_WRITEMASK__SHIFT 0
+static inline uint32_t A5XX_VFD_DEST_CNTL_INSTR_WRITEMASK(uint32_t val)
+{
+ return ((val) << A5XX_VFD_DEST_CNTL_INSTR_WRITEMASK__SHIFT) & A5XX_VFD_DEST_CNTL_INSTR_WRITEMASK__MASK;
+}
+#define A5XX_VFD_DEST_CNTL_INSTR_REGID__MASK 0x00000ff0
+#define A5XX_VFD_DEST_CNTL_INSTR_REGID__SHIFT 4
+static inline uint32_t A5XX_VFD_DEST_CNTL_INSTR_REGID(uint32_t val)
+{
+ return ((val) << A5XX_VFD_DEST_CNTL_INSTR_REGID__SHIFT) & A5XX_VFD_DEST_CNTL_INSTR_REGID__MASK;
+}
+
+#define REG_A5XX_VFD_POWER_CNTL 0x0000e4f0
+
+#define REG_A5XX_SP_SP_CNTL 0x0000e580
+
+#define REG_A5XX_SP_VS_CONTROL_REG 0x0000e584
+#define A5XX_SP_VS_CONTROL_REG_ENABLED 0x00000001
+#define A5XX_SP_VS_CONTROL_REG_CONSTOBJECTOFFSET__MASK 0x000000fe
+#define A5XX_SP_VS_CONTROL_REG_CONSTOBJECTOFFSET__SHIFT 1
+static inline uint32_t A5XX_SP_VS_CONTROL_REG_CONSTOBJECTOFFSET(uint32_t val)
+{
+ return ((val) << A5XX_SP_VS_CONTROL_REG_CONSTOBJECTOFFSET__SHIFT) & A5XX_SP_VS_CONTROL_REG_CONSTOBJECTOFFSET__MASK;
+}
+#define A5XX_SP_VS_CONTROL_REG_SHADEROBJOFFSET__MASK 0x00007f00
+#define A5XX_SP_VS_CONTROL_REG_SHADEROBJOFFSET__SHIFT 8
+static inline uint32_t A5XX_SP_VS_CONTROL_REG_SHADEROBJOFFSET(uint32_t val)
+{
+ return ((val) << A5XX_SP_VS_CONTROL_REG_SHADEROBJOFFSET__SHIFT) & A5XX_SP_VS_CONTROL_REG_SHADEROBJOFFSET__MASK;
+}
+
+#define REG_A5XX_SP_FS_CONTROL_REG 0x0000e585
+#define A5XX_SP_FS_CONTROL_REG_ENABLED 0x00000001
+#define A5XX_SP_FS_CONTROL_REG_CONSTOBJECTOFFSET__MASK 0x000000fe
+#define A5XX_SP_FS_CONTROL_REG_CONSTOBJECTOFFSET__SHIFT 1
+static inline uint32_t A5XX_SP_FS_CONTROL_REG_CONSTOBJECTOFFSET(uint32_t val)
+{
+ return ((val) << A5XX_SP_FS_CONTROL_REG_CONSTOBJECTOFFSET__SHIFT) & A5XX_SP_FS_CONTROL_REG_CONSTOBJECTOFFSET__MASK;
+}
+#define A5XX_SP_FS_CONTROL_REG_SHADEROBJOFFSET__MASK 0x00007f00
+#define A5XX_SP_FS_CONTROL_REG_SHADEROBJOFFSET__SHIFT 8
+static inline uint32_t A5XX_SP_FS_CONTROL_REG_SHADEROBJOFFSET(uint32_t val)
+{
+ return ((val) << A5XX_SP_FS_CONTROL_REG_SHADEROBJOFFSET__SHIFT) & A5XX_SP_FS_CONTROL_REG_SHADEROBJOFFSET__MASK;
+}
+
+#define REG_A5XX_SP_HS_CONTROL_REG 0x0000e586
+#define A5XX_SP_HS_CONTROL_REG_ENABLED 0x00000001
+#define A5XX_SP_HS_CONTROL_REG_CONSTOBJECTOFFSET__MASK 0x000000fe
+#define A5XX_SP_HS_CONTROL_REG_CONSTOBJECTOFFSET__SHIFT 1
+static inline uint32_t A5XX_SP_HS_CONTROL_REG_CONSTOBJECTOFFSET(uint32_t val)
+{
+ return ((val) << A5XX_SP_HS_CONTROL_REG_CONSTOBJECTOFFSET__SHIFT) & A5XX_SP_HS_CONTROL_REG_CONSTOBJECTOFFSET__MASK;
+}
+#define A5XX_SP_HS_CONTROL_REG_SHADEROBJOFFSET__MASK 0x00007f00
+#define A5XX_SP_HS_CONTROL_REG_SHADEROBJOFFSET__SHIFT 8
+static inline uint32_t A5XX_SP_HS_CONTROL_REG_SHADEROBJOFFSET(uint32_t val)
+{
+ return ((val) << A5XX_SP_HS_CONTROL_REG_SHADEROBJOFFSET__SHIFT) & A5XX_SP_HS_CONTROL_REG_SHADEROBJOFFSET__MASK;
+}
+
+#define REG_A5XX_SP_DS_CONTROL_REG 0x0000e587
+#define A5XX_SP_DS_CONTROL_REG_ENABLED 0x00000001
+#define A5XX_SP_DS_CONTROL_REG_CONSTOBJECTOFFSET__MASK 0x000000fe
+#define A5XX_SP_DS_CONTROL_REG_CONSTOBJECTOFFSET__SHIFT 1
+static inline uint32_t A5XX_SP_DS_CONTROL_REG_CONSTOBJECTOFFSET(uint32_t val)
+{
+ return ((val) << A5XX_SP_DS_CONTROL_REG_CONSTOBJECTOFFSET__SHIFT) & A5XX_SP_DS_CONTROL_REG_CONSTOBJECTOFFSET__MASK;
+}
+#define A5XX_SP_DS_CONTROL_REG_SHADEROBJOFFSET__MASK 0x00007f00
+#define A5XX_SP_DS_CONTROL_REG_SHADEROBJOFFSET__SHIFT 8
+static inline uint32_t A5XX_SP_DS_CONTROL_REG_SHADEROBJOFFSET(uint32_t val)
+{
+ return ((val) << A5XX_SP_DS_CONTROL_REG_SHADEROBJOFFSET__SHIFT) & A5XX_SP_DS_CONTROL_REG_SHADEROBJOFFSET__MASK;
+}
+
+#define REG_A5XX_SP_GS_CONTROL_REG 0x0000e588
+#define A5XX_SP_GS_CONTROL_REG_ENABLED 0x00000001
+#define A5XX_SP_GS_CONTROL_REG_CONSTOBJECTOFFSET__MASK 0x000000fe
+#define A5XX_SP_GS_CONTROL_REG_CONSTOBJECTOFFSET__SHIFT 1
+static inline uint32_t A5XX_SP_GS_CONTROL_REG_CONSTOBJECTOFFSET(uint32_t val)
+{
+ return ((val) << A5XX_SP_GS_CONTROL_REG_CONSTOBJECTOFFSET__SHIFT) & A5XX_SP_GS_CONTROL_REG_CONSTOBJECTOFFSET__MASK;
+}
+#define A5XX_SP_GS_CONTROL_REG_SHADEROBJOFFSET__MASK 0x00007f00
+#define A5XX_SP_GS_CONTROL_REG_SHADEROBJOFFSET__SHIFT 8
+static inline uint32_t A5XX_SP_GS_CONTROL_REG_SHADEROBJOFFSET(uint32_t val)
+{
+ return ((val) << A5XX_SP_GS_CONTROL_REG_SHADEROBJOFFSET__SHIFT) & A5XX_SP_GS_CONTROL_REG_SHADEROBJOFFSET__MASK;
+}
+
+#define REG_A5XX_SP_CS_CONFIG 0x0000e589
+
+#define REG_A5XX_SP_VS_CONFIG_MAX_CONST 0x0000e58a
+
+#define REG_A5XX_SP_FS_CONFIG_MAX_CONST 0x0000e58b
+
+#define REG_A5XX_SP_VS_CTRL_REG0 0x0000e590
+#define A5XX_SP_VS_CTRL_REG0_HALFREGFOOTPRINT__MASK 0x000003f0
+#define A5XX_SP_VS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT 4
+static inline uint32_t A5XX_SP_VS_CTRL_REG0_HALFREGFOOTPRINT(uint32_t val)
+{
+ return ((val) << A5XX_SP_VS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT) & A5XX_SP_VS_CTRL_REG0_HALFREGFOOTPRINT__MASK;
+}
+#define A5XX_SP_VS_CTRL_REG0_FULLREGFOOTPRINT__MASK 0x0000fc00
+#define A5XX_SP_VS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT 10
+static inline uint32_t A5XX_SP_VS_CTRL_REG0_FULLREGFOOTPRINT(uint32_t val)
+{
+ return ((val) << A5XX_SP_VS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT) & A5XX_SP_VS_CTRL_REG0_FULLREGFOOTPRINT__MASK;
+}
+#define A5XX_SP_VS_CTRL_REG0_VARYING 0x00010000
+#define A5XX_SP_VS_CTRL_REG0_PIXLODENABLE 0x00100000
+
+#define REG_A5XX_SP_PRIMITIVE_CNTL 0x0000e592
+#define A5XX_SP_PRIMITIVE_CNTL_STRIDE_IN_VPC__MASK 0x0000001f
+#define A5XX_SP_PRIMITIVE_CNTL_STRIDE_IN_VPC__SHIFT 0
+static inline uint32_t A5XX_SP_PRIMITIVE_CNTL_STRIDE_IN_VPC(uint32_t val)
+{
+ return ((val >> 2) << A5XX_SP_PRIMITIVE_CNTL_STRIDE_IN_VPC__SHIFT) & A5XX_SP_PRIMITIVE_CNTL_STRIDE_IN_VPC__MASK;
+}
+
+static inline uint32_t REG_A5XX_SP_VS_OUT(uint32_t i0) { return 0x0000e593 + 0x1*i0; }
+
+static inline uint32_t REG_A5XX_SP_VS_OUT_REG(uint32_t i0) { return 0x0000e593 + 0x1*i0; }
+#define A5XX_SP_VS_OUT_REG_A_REGID__MASK 0x000000ff
+#define A5XX_SP_VS_OUT_REG_A_REGID__SHIFT 0
+static inline uint32_t A5XX_SP_VS_OUT_REG_A_REGID(uint32_t val)
+{
+ return ((val) << A5XX_SP_VS_OUT_REG_A_REGID__SHIFT) & A5XX_SP_VS_OUT_REG_A_REGID__MASK;
+}
+#define A5XX_SP_VS_OUT_REG_A_COMPMASK__MASK 0x00000f00
+#define A5XX_SP_VS_OUT_REG_A_COMPMASK__SHIFT 8
+static inline uint32_t A5XX_SP_VS_OUT_REG_A_COMPMASK(uint32_t val)
+{
+ return ((val) << A5XX_SP_VS_OUT_REG_A_COMPMASK__SHIFT) & A5XX_SP_VS_OUT_REG_A_COMPMASK__MASK;
+}
+#define A5XX_SP_VS_OUT_REG_B_REGID__MASK 0x00ff0000
+#define A5XX_SP_VS_OUT_REG_B_REGID__SHIFT 16
+static inline uint32_t A5XX_SP_VS_OUT_REG_B_REGID(uint32_t val)
+{
+ return ((val) << A5XX_SP_VS_OUT_REG_B_REGID__SHIFT) & A5XX_SP_VS_OUT_REG_B_REGID__MASK;
+}
+#define A5XX_SP_VS_OUT_REG_B_COMPMASK__MASK 0x0f000000
+#define A5XX_SP_VS_OUT_REG_B_COMPMASK__SHIFT 24
+static inline uint32_t A5XX_SP_VS_OUT_REG_B_COMPMASK(uint32_t val)
+{
+ return ((val) << A5XX_SP_VS_OUT_REG_B_COMPMASK__SHIFT) & A5XX_SP_VS_OUT_REG_B_COMPMASK__MASK;
+}
+
+static inline uint32_t REG_A5XX_SP_VS_VPC_DST(uint32_t i0) { return 0x0000e5a3 + 0x1*i0; }
+
+static inline uint32_t REG_A5XX_SP_VS_VPC_DST_REG(uint32_t i0) { return 0x0000e5a3 + 0x1*i0; }
+#define A5XX_SP_VS_VPC_DST_REG_OUTLOC0__MASK 0x000000ff
+#define A5XX_SP_VS_VPC_DST_REG_OUTLOC0__SHIFT 0
+static inline uint32_t A5XX_SP_VS_VPC_DST_REG_OUTLOC0(uint32_t val)
+{
+ return ((val) << A5XX_SP_VS_VPC_DST_REG_OUTLOC0__SHIFT) & A5XX_SP_VS_VPC_DST_REG_OUTLOC0__MASK;
+}
+#define A5XX_SP_VS_VPC_DST_REG_OUTLOC1__MASK 0x0000ff00
+#define A5XX_SP_VS_VPC_DST_REG_OUTLOC1__SHIFT 8
+static inline uint32_t A5XX_SP_VS_VPC_DST_REG_OUTLOC1(uint32_t val)
+{
+ return ((val) << A5XX_SP_VS_VPC_DST_REG_OUTLOC1__SHIFT) & A5XX_SP_VS_VPC_DST_REG_OUTLOC1__MASK;
+}
+#define A5XX_SP_VS_VPC_DST_REG_OUTLOC2__MASK 0x00ff0000
+#define A5XX_SP_VS_VPC_DST_REG_OUTLOC2__SHIFT 16
+static inline uint32_t A5XX_SP_VS_VPC_DST_REG_OUTLOC2(uint32_t val)
+{
+ return ((val) << A5XX_SP_VS_VPC_DST_REG_OUTLOC2__SHIFT) & A5XX_SP_VS_VPC_DST_REG_OUTLOC2__MASK;
+}
+#define A5XX_SP_VS_VPC_DST_REG_OUTLOC3__MASK 0xff000000
+#define A5XX_SP_VS_VPC_DST_REG_OUTLOC3__SHIFT 24
+static inline uint32_t A5XX_SP_VS_VPC_DST_REG_OUTLOC3(uint32_t val)
+{
+ return ((val) << A5XX_SP_VS_VPC_DST_REG_OUTLOC3__SHIFT) & A5XX_SP_VS_VPC_DST_REG_OUTLOC3__MASK;
+}
+
+#define REG_A5XX_UNKNOWN_E5AB 0x0000e5ab
+
+#define REG_A5XX_SP_VS_OBJ_START_LO 0x0000e5ac
+
+#define REG_A5XX_SP_VS_OBJ_START_HI 0x0000e5ad
+
+#define REG_A5XX_SP_FS_CTRL_REG0 0x0000e5c0
+#define A5XX_SP_FS_CTRL_REG0_HALFREGFOOTPRINT__MASK 0x000003f0
+#define A5XX_SP_FS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT 4
+static inline uint32_t A5XX_SP_FS_CTRL_REG0_HALFREGFOOTPRINT(uint32_t val)
+{
+ return ((val) << A5XX_SP_FS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT) & A5XX_SP_FS_CTRL_REG0_HALFREGFOOTPRINT__MASK;
+}
+#define A5XX_SP_FS_CTRL_REG0_FULLREGFOOTPRINT__MASK 0x0000fc00
+#define A5XX_SP_FS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT 10
+static inline uint32_t A5XX_SP_FS_CTRL_REG0_FULLREGFOOTPRINT(uint32_t val)
+{
+ return ((val) << A5XX_SP_FS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT) & A5XX_SP_FS_CTRL_REG0_FULLREGFOOTPRINT__MASK;
+}
+#define A5XX_SP_FS_CTRL_REG0_VARYING 0x00010000
+#define A5XX_SP_FS_CTRL_REG0_PIXLODENABLE 0x00100000
+
+#define REG_A5XX_UNKNOWN_E5C2 0x0000e5c2
+
+#define REG_A5XX_SP_FS_OBJ_START_LO 0x0000e5c3
+
+#define REG_A5XX_SP_FS_OBJ_START_HI 0x0000e5c4
+
+#define REG_A5XX_SP_BLEND_CNTL 0x0000e5c9
+
+#define REG_A5XX_SP_FS_OUTPUT_CNTL 0x0000e5ca
+#define A5XX_SP_FS_OUTPUT_CNTL_MRT__MASK 0x0000000f
+#define A5XX_SP_FS_OUTPUT_CNTL_MRT__SHIFT 0
+static inline uint32_t A5XX_SP_FS_OUTPUT_CNTL_MRT(uint32_t val)
+{
+ return ((val) << A5XX_SP_FS_OUTPUT_CNTL_MRT__SHIFT) & A5XX_SP_FS_OUTPUT_CNTL_MRT__MASK;
+}
+#define A5XX_SP_FS_OUTPUT_CNTL_DEPTH_REGID__MASK 0x00001fe0
+#define A5XX_SP_FS_OUTPUT_CNTL_DEPTH_REGID__SHIFT 5
+static inline uint32_t A5XX_SP_FS_OUTPUT_CNTL_DEPTH_REGID(uint32_t val)
+{
+ return ((val) << A5XX_SP_FS_OUTPUT_CNTL_DEPTH_REGID__SHIFT) & A5XX_SP_FS_OUTPUT_CNTL_DEPTH_REGID__MASK;
+}
+#define A5XX_SP_FS_OUTPUT_CNTL_SAMPLEMASK_REGID__MASK 0x001fe000
+#define A5XX_SP_FS_OUTPUT_CNTL_SAMPLEMASK_REGID__SHIFT 13
+static inline uint32_t A5XX_SP_FS_OUTPUT_CNTL_SAMPLEMASK_REGID(uint32_t val)
+{
+ return ((val) << A5XX_SP_FS_OUTPUT_CNTL_SAMPLEMASK_REGID__SHIFT) & A5XX_SP_FS_OUTPUT_CNTL_SAMPLEMASK_REGID__MASK;
+}
+
+static inline uint32_t REG_A5XX_SP_FS_OUTPUT(uint32_t i0) { return 0x0000e5cb + 0x1*i0; }
+
+static inline uint32_t REG_A5XX_SP_FS_OUTPUT_REG(uint32_t i0) { return 0x0000e5cb + 0x1*i0; }
+#define A5XX_SP_FS_OUTPUT_REG_REGID__MASK 0x000000ff
+#define A5XX_SP_FS_OUTPUT_REG_REGID__SHIFT 0
+static inline uint32_t A5XX_SP_FS_OUTPUT_REG_REGID(uint32_t val)
+{
+ return ((val) << A5XX_SP_FS_OUTPUT_REG_REGID__SHIFT) & A5XX_SP_FS_OUTPUT_REG_REGID__MASK;
+}
+#define A5XX_SP_FS_OUTPUT_REG_HALF_PRECISION 0x00000100
+
+static inline uint32_t REG_A5XX_SP_FS_MRT(uint32_t i0) { return 0x0000e5d3 + 0x1*i0; }
+
+static inline uint32_t REG_A5XX_SP_FS_MRT_REG(uint32_t i0) { return 0x0000e5d3 + 0x1*i0; }
+#define A5XX_SP_FS_MRT_REG_COLOR_FORMAT__MASK 0x000000ff
+#define A5XX_SP_FS_MRT_REG_COLOR_FORMAT__SHIFT 0
+static inline uint32_t A5XX_SP_FS_MRT_REG_COLOR_FORMAT(enum a5xx_color_fmt val)
+{
+ return ((val) << A5XX_SP_FS_MRT_REG_COLOR_FORMAT__SHIFT) & A5XX_SP_FS_MRT_REG_COLOR_FORMAT__MASK;
+}
+
+#define REG_A5XX_UNKNOWN_E5DB 0x0000e5db
+
+#define REG_A5XX_SP_CS_CNTL_0 0x0000e5f0
+
+#define REG_A5XX_UNKNOWN_E600 0x0000e600
+
+#define REG_A5XX_UNKNOWN_E640 0x0000e640
+
+#define REG_A5XX_TPL1_TP_RAS_MSAA_CNTL 0x0000e704
+#define A5XX_TPL1_TP_RAS_MSAA_CNTL_SAMPLES__MASK 0x00000003
+#define A5XX_TPL1_TP_RAS_MSAA_CNTL_SAMPLES__SHIFT 0
+static inline uint32_t A5XX_TPL1_TP_RAS_MSAA_CNTL_SAMPLES(enum a3xx_msaa_samples val)
+{
+ return ((val) << A5XX_TPL1_TP_RAS_MSAA_CNTL_SAMPLES__SHIFT) & A5XX_TPL1_TP_RAS_MSAA_CNTL_SAMPLES__MASK;
+}
+
+#define REG_A5XX_TPL1_TP_DEST_MSAA_CNTL 0x0000e705
+#define A5XX_TPL1_TP_DEST_MSAA_CNTL_SAMPLES__MASK 0x00000003
+#define A5XX_TPL1_TP_DEST_MSAA_CNTL_SAMPLES__SHIFT 0
+static inline uint32_t A5XX_TPL1_TP_DEST_MSAA_CNTL_SAMPLES(enum a3xx_msaa_samples val)
+{
+ return ((val) << A5XX_TPL1_TP_DEST_MSAA_CNTL_SAMPLES__SHIFT) & A5XX_TPL1_TP_DEST_MSAA_CNTL_SAMPLES__MASK;
+}
+#define A5XX_TPL1_TP_DEST_MSAA_CNTL_MSAA_DISABLE 0x00000004
+
+#define REG_A5XX_TPL1_VS_TEX_COUNT 0x0000e700
+
+#define REG_A5XX_TPL1_VS_TEX_SAMP_LO 0x0000e722
+
+#define REG_A5XX_TPL1_VS_TEX_SAMP_HI 0x0000e723
+
+#define REG_A5XX_TPL1_VS_TEX_CONST_LO 0x0000e72a
+
+#define REG_A5XX_TPL1_VS_TEX_CONST_HI 0x0000e72b
+
+#define REG_A5XX_TPL1_FS_TEX_COUNT 0x0000e750
+
+#define REG_A5XX_TPL1_FS_TEX_SAMP_LO 0x0000e75a
+
+#define REG_A5XX_TPL1_FS_TEX_SAMP_HI 0x0000e75b
+
+#define REG_A5XX_TPL1_FS_TEX_CONST_LO 0x0000e75e
+
+#define REG_A5XX_TPL1_FS_TEX_CONST_HI 0x0000e75f
+
+#define REG_A5XX_TPL1_TP_FS_ROTATION_CNTL 0x0000e764
+
+#define REG_A5XX_HLSQ_CONTROL_0_REG 0x0000e784
+
+#define REG_A5XX_HLSQ_CONTROL_1_REG 0x0000e785
+#define A5XX_HLSQ_CONTROL_1_REG_PRIMALLOCTHRESHOLD__MASK 0x0000003f
+#define A5XX_HLSQ_CONTROL_1_REG_PRIMALLOCTHRESHOLD__SHIFT 0
+static inline uint32_t A5XX_HLSQ_CONTROL_1_REG_PRIMALLOCTHRESHOLD(uint32_t val)
+{
+ return ((val) << A5XX_HLSQ_CONTROL_1_REG_PRIMALLOCTHRESHOLD__SHIFT) & A5XX_HLSQ_CONTROL_1_REG_PRIMALLOCTHRESHOLD__MASK;
+}
+
+#define REG_A5XX_HLSQ_CONTROL_2_REG 0x0000e786
+#define A5XX_HLSQ_CONTROL_2_REG_FACEREGID__MASK 0x000000ff
+#define A5XX_HLSQ_CONTROL_2_REG_FACEREGID__SHIFT 0
+static inline uint32_t A5XX_HLSQ_CONTROL_2_REG_FACEREGID(uint32_t val)
+{
+ return ((val) << A5XX_HLSQ_CONTROL_2_REG_FACEREGID__SHIFT) & A5XX_HLSQ_CONTROL_2_REG_FACEREGID__MASK;
+}
+
+#define REG_A5XX_HLSQ_CONTROL_3_REG 0x0000e787
+#define A5XX_HLSQ_CONTROL_3_REG_FRAGCOORDXYREGID__MASK 0x000000ff
+#define A5XX_HLSQ_CONTROL_3_REG_FRAGCOORDXYREGID__SHIFT 0
+static inline uint32_t A5XX_HLSQ_CONTROL_3_REG_FRAGCOORDXYREGID(uint32_t val)
+{
+ return ((val) << A5XX_HLSQ_CONTROL_3_REG_FRAGCOORDXYREGID__SHIFT) & A5XX_HLSQ_CONTROL_3_REG_FRAGCOORDXYREGID__MASK;
+}
+
+#define REG_A5XX_HLSQ_CONTROL_4_REG 0x0000e788
+#define A5XX_HLSQ_CONTROL_4_REG_XYCOORDREGID__MASK 0x00ff0000
+#define A5XX_HLSQ_CONTROL_4_REG_XYCOORDREGID__SHIFT 16
+static inline uint32_t A5XX_HLSQ_CONTROL_4_REG_XYCOORDREGID(uint32_t val)
+{
+ return ((val) << A5XX_HLSQ_CONTROL_4_REG_XYCOORDREGID__SHIFT) & A5XX_HLSQ_CONTROL_4_REG_XYCOORDREGID__MASK;
+}
+#define A5XX_HLSQ_CONTROL_4_REG_ZWCOORDREGID__MASK 0xff000000
+#define A5XX_HLSQ_CONTROL_4_REG_ZWCOORDREGID__SHIFT 24
+static inline uint32_t A5XX_HLSQ_CONTROL_4_REG_ZWCOORDREGID(uint32_t val)
+{
+ return ((val) << A5XX_HLSQ_CONTROL_4_REG_ZWCOORDREGID__SHIFT) & A5XX_HLSQ_CONTROL_4_REG_ZWCOORDREGID__MASK;
+}
+
+#define REG_A5XX_HLSQ_UPDATE_CNTL 0x0000e78a
+
+#define REG_A5XX_HLSQ_VS_CONTROL_REG 0x0000e78b
+#define A5XX_HLSQ_VS_CONTROL_REG_ENABLED 0x00000001
+#define A5XX_HLSQ_VS_CONTROL_REG_CONSTOBJECTOFFSET__MASK 0x000000fe
+#define A5XX_HLSQ_VS_CONTROL_REG_CONSTOBJECTOFFSET__SHIFT 1
+static inline uint32_t A5XX_HLSQ_VS_CONTROL_REG_CONSTOBJECTOFFSET(uint32_t val)
+{
+ return ((val) << A5XX_HLSQ_VS_CONTROL_REG_CONSTOBJECTOFFSET__SHIFT) & A5XX_HLSQ_VS_CONTROL_REG_CONSTOBJECTOFFSET__MASK;
+}
+#define A5XX_HLSQ_VS_CONTROL_REG_SHADEROBJOFFSET__MASK 0x00007f00
+#define A5XX_HLSQ_VS_CONTROL_REG_SHADEROBJOFFSET__SHIFT 8
+static inline uint32_t A5XX_HLSQ_VS_CONTROL_REG_SHADEROBJOFFSET(uint32_t val)
+{
+ return ((val) << A5XX_HLSQ_VS_CONTROL_REG_SHADEROBJOFFSET__SHIFT) & A5XX_HLSQ_VS_CONTROL_REG_SHADEROBJOFFSET__MASK;
+}
+
+#define REG_A5XX_HLSQ_FS_CONTROL_REG 0x0000e78c
+#define A5XX_HLSQ_FS_CONTROL_REG_ENABLED 0x00000001
+#define A5XX_HLSQ_FS_CONTROL_REG_CONSTOBJECTOFFSET__MASK 0x000000fe
+#define A5XX_HLSQ_FS_CONTROL_REG_CONSTOBJECTOFFSET__SHIFT 1
+static inline uint32_t A5XX_HLSQ_FS_CONTROL_REG_CONSTOBJECTOFFSET(uint32_t val)
+{
+ return ((val) << A5XX_HLSQ_FS_CONTROL_REG_CONSTOBJECTOFFSET__SHIFT) & A5XX_HLSQ_FS_CONTROL_REG_CONSTOBJECTOFFSET__MASK;
+}
+#define A5XX_HLSQ_FS_CONTROL_REG_SHADEROBJOFFSET__MASK 0x00007f00
+#define A5XX_HLSQ_FS_CONTROL_REG_SHADEROBJOFFSET__SHIFT 8
+static inline uint32_t A5XX_HLSQ_FS_CONTROL_REG_SHADEROBJOFFSET(uint32_t val)
+{
+ return ((val) << A5XX_HLSQ_FS_CONTROL_REG_SHADEROBJOFFSET__SHIFT) & A5XX_HLSQ_FS_CONTROL_REG_SHADEROBJOFFSET__MASK;
+}
+
+#define REG_A5XX_HLSQ_HS_CONTROL_REG 0x0000e78d
+#define A5XX_HLSQ_HS_CONTROL_REG_ENABLED 0x00000001
+#define A5XX_HLSQ_HS_CONTROL_REG_CONSTOBJECTOFFSET__MASK 0x000000fe
+#define A5XX_HLSQ_HS_CONTROL_REG_CONSTOBJECTOFFSET__SHIFT 1
+static inline uint32_t A5XX_HLSQ_HS_CONTROL_REG_CONSTOBJECTOFFSET(uint32_t val)
+{
+ return ((val) << A5XX_HLSQ_HS_CONTROL_REG_CONSTOBJECTOFFSET__SHIFT) & A5XX_HLSQ_HS_CONTROL_REG_CONSTOBJECTOFFSET__MASK;
+}
+#define A5XX_HLSQ_HS_CONTROL_REG_SHADEROBJOFFSET__MASK 0x00007f00
+#define A5XX_HLSQ_HS_CONTROL_REG_SHADEROBJOFFSET__SHIFT 8
+static inline uint32_t A5XX_HLSQ_HS_CONTROL_REG_SHADEROBJOFFSET(uint32_t val)
+{
+ return ((val) << A5XX_HLSQ_HS_CONTROL_REG_SHADEROBJOFFSET__SHIFT) & A5XX_HLSQ_HS_CONTROL_REG_SHADEROBJOFFSET__MASK;
+}
+
+#define REG_A5XX_HLSQ_DS_CONTROL_REG 0x0000e78e
+#define A5XX_HLSQ_DS_CONTROL_REG_ENABLED 0x00000001
+#define A5XX_HLSQ_DS_CONTROL_REG_CONSTOBJECTOFFSET__MASK 0x000000fe
+#define A5XX_HLSQ_DS_CONTROL_REG_CONSTOBJECTOFFSET__SHIFT 1
+static inline uint32_t A5XX_HLSQ_DS_CONTROL_REG_CONSTOBJECTOFFSET(uint32_t val)
+{
+ return ((val) << A5XX_HLSQ_DS_CONTROL_REG_CONSTOBJECTOFFSET__SHIFT) & A5XX_HLSQ_DS_CONTROL_REG_CONSTOBJECTOFFSET__MASK;
+}
+#define A5XX_HLSQ_DS_CONTROL_REG_SHADEROBJOFFSET__MASK 0x00007f00
+#define A5XX_HLSQ_DS_CONTROL_REG_SHADEROBJOFFSET__SHIFT 8
+static inline uint32_t A5XX_HLSQ_DS_CONTROL_REG_SHADEROBJOFFSET(uint32_t val)
+{
+ return ((val) << A5XX_HLSQ_DS_CONTROL_REG_SHADEROBJOFFSET__SHIFT) & A5XX_HLSQ_DS_CONTROL_REG_SHADEROBJOFFSET__MASK;
+}
+
+#define REG_A5XX_HLSQ_GS_CONTROL_REG 0x0000e78f
+#define A5XX_HLSQ_GS_CONTROL_REG_ENABLED 0x00000001
+#define A5XX_HLSQ_GS_CONTROL_REG_CONSTOBJECTOFFSET__MASK 0x000000fe
+#define A5XX_HLSQ_GS_CONTROL_REG_CONSTOBJECTOFFSET__SHIFT 1
+static inline uint32_t A5XX_HLSQ_GS_CONTROL_REG_CONSTOBJECTOFFSET(uint32_t val)
+{
+ return ((val) << A5XX_HLSQ_GS_CONTROL_REG_CONSTOBJECTOFFSET__SHIFT) & A5XX_HLSQ_GS_CONTROL_REG_CONSTOBJECTOFFSET__MASK;
+}
+#define A5XX_HLSQ_GS_CONTROL_REG_SHADEROBJOFFSET__MASK 0x00007f00
+#define A5XX_HLSQ_GS_CONTROL_REG_SHADEROBJOFFSET__SHIFT 8
+static inline uint32_t A5XX_HLSQ_GS_CONTROL_REG_SHADEROBJOFFSET(uint32_t val)
+{
+ return ((val) << A5XX_HLSQ_GS_CONTROL_REG_SHADEROBJOFFSET__SHIFT) & A5XX_HLSQ_GS_CONTROL_REG_SHADEROBJOFFSET__MASK;
+}
+
+#define REG_A5XX_HLSQ_CS_CONFIG 0x0000e790
+
+#define REG_A5XX_HLSQ_VS_CNTL 0x0000e791
+#define A5XX_HLSQ_VS_CNTL_INSTRLEN__MASK 0xfffffffe
+#define A5XX_HLSQ_VS_CNTL_INSTRLEN__SHIFT 1
+static inline uint32_t A5XX_HLSQ_VS_CNTL_INSTRLEN(uint32_t val)
+{
+ return ((val) << A5XX_HLSQ_VS_CNTL_INSTRLEN__SHIFT) & A5XX_HLSQ_VS_CNTL_INSTRLEN__MASK;
+}
+
+#define REG_A5XX_HLSQ_FS_CNTL 0x0000e792
+#define A5XX_HLSQ_FS_CNTL_INSTRLEN__MASK 0xfffffffe
+#define A5XX_HLSQ_FS_CNTL_INSTRLEN__SHIFT 1
+static inline uint32_t A5XX_HLSQ_FS_CNTL_INSTRLEN(uint32_t val)
+{
+ return ((val) << A5XX_HLSQ_FS_CNTL_INSTRLEN__SHIFT) & A5XX_HLSQ_FS_CNTL_INSTRLEN__MASK;
+}
+
+#define REG_A5XX_HLSQ_HS_CNTL 0x0000e793
+#define A5XX_HLSQ_HS_CNTL_INSTRLEN__MASK 0xfffffffe
+#define A5XX_HLSQ_HS_CNTL_INSTRLEN__SHIFT 1
+static inline uint32_t A5XX_HLSQ_HS_CNTL_INSTRLEN(uint32_t val)
+{
+ return ((val) << A5XX_HLSQ_HS_CNTL_INSTRLEN__SHIFT) & A5XX_HLSQ_HS_CNTL_INSTRLEN__MASK;
+}
+
+#define REG_A5XX_HLSQ_DS_CNTL 0x0000e794
+#define A5XX_HLSQ_DS_CNTL_INSTRLEN__MASK 0xfffffffe
+#define A5XX_HLSQ_DS_CNTL_INSTRLEN__SHIFT 1
+static inline uint32_t A5XX_HLSQ_DS_CNTL_INSTRLEN(uint32_t val)
+{
+ return ((val) << A5XX_HLSQ_DS_CNTL_INSTRLEN__SHIFT) & A5XX_HLSQ_DS_CNTL_INSTRLEN__MASK;
+}
+
+#define REG_A5XX_HLSQ_GS_CNTL 0x0000e795
+#define A5XX_HLSQ_GS_CNTL_INSTRLEN__MASK 0xfffffffe
+#define A5XX_HLSQ_GS_CNTL_INSTRLEN__SHIFT 1
+static inline uint32_t A5XX_HLSQ_GS_CNTL_INSTRLEN(uint32_t val)
+{
+ return ((val) << A5XX_HLSQ_GS_CNTL_INSTRLEN__SHIFT) & A5XX_HLSQ_GS_CNTL_INSTRLEN__MASK;
+}
+
+#define REG_A5XX_HLSQ_CS_CNTL 0x0000e796
+#define A5XX_HLSQ_CS_CNTL_INSTRLEN__MASK 0xfffffffe
+#define A5XX_HLSQ_CS_CNTL_INSTRLEN__SHIFT 1
+static inline uint32_t A5XX_HLSQ_CS_CNTL_INSTRLEN(uint32_t val)
+{
+ return ((val) << A5XX_HLSQ_CS_CNTL_INSTRLEN__SHIFT) & A5XX_HLSQ_CS_CNTL_INSTRLEN__MASK;
+}
+
+#define REG_A5XX_HLSQ_CS_KERNEL_GROUP_X 0x0000e7b9
+
+#define REG_A5XX_HLSQ_CS_KERNEL_GROUP_Y 0x0000e7ba
+
+#define REG_A5XX_HLSQ_CS_KERNEL_GROUP_Z 0x0000e7bb
+
+#define REG_A5XX_HLSQ_CS_NDRANGE_0 0x0000e7b0
+
+#define REG_A5XX_HLSQ_CS_NDRANGE_1 0x0000e7b1
+
+#define REG_A5XX_HLSQ_CS_NDRANGE_2 0x0000e7b2
+
+#define REG_A5XX_HLSQ_CS_NDRANGE_3 0x0000e7b3
+
+#define REG_A5XX_HLSQ_CS_NDRANGE_4 0x0000e7b4
+
+#define REG_A5XX_HLSQ_CS_NDRANGE_5 0x0000e7b5
+
+#define REG_A5XX_HLSQ_CS_NDRANGE_6 0x0000e7b6
+
+#define REG_A5XX_HLSQ_CS_CNTL_0 0x0000e7b7
+
+#define REG_A5XX_HLSQ_CS_CNTL_1 0x0000e7b8
+
+#define REG_A5XX_UNKNOWN_E7C0 0x0000e7c0
+
+#define REG_A5XX_HLSQ_VS_CONSTLEN 0x0000e7c3
+
+#define REG_A5XX_HLSQ_VS_INSTRLEN 0x0000e7c4
+
+#define REG_A5XX_UNKNOWN_E7C5 0x0000e7c5
+
+#define REG_A5XX_UNKNOWN_E7CA 0x0000e7ca
+
+#define REG_A5XX_HLSQ_FS_CONSTLEN 0x0000e7d7
+
+#define REG_A5XX_HLSQ_FS_INSTRLEN 0x0000e7d8
+
+#define REG_A5XX_HLSQ_HS_CONSTLEN 0x0000e7c8
+
+#define REG_A5XX_HLSQ_HS_INSTRLEN 0x0000e7c9
+
+#define REG_A5XX_HLSQ_DS_CONSTLEN 0x0000e7cd
+
+#define REG_A5XX_HLSQ_DS_INSTRLEN 0x0000e7ce
+
+#define REG_A5XX_UNKNOWN_E7CF 0x0000e7cf
+
+#define REG_A5XX_HLSQ_GS_CONSTLEN 0x0000e7d2
+
+#define REG_A5XX_HLSQ_GS_INSTRLEN 0x0000e7d3
+
+#define REG_A5XX_UNKNOWN_E7D4 0x0000e7d4
+
+#define REG_A5XX_UNKNOWN_E7D9 0x0000e7d9
+
+#define REG_A5XX_HLSQ_CONTEXT_SWITCH_CS_SW_3 0x0000e7dc
+
+#define REG_A5XX_HLSQ_CONTEXT_SWITCH_CS_SW_4 0x0000e7dd
+
+#define REG_A5XX_RB_2D_DST_FILL 0x00002101
+
+#define REG_A5XX_RB_2D_SRC_INFO 0x00002107
+#define A5XX_RB_2D_SRC_INFO_COLOR_FORMAT__MASK 0x000000ff
+#define A5XX_RB_2D_SRC_INFO_COLOR_FORMAT__SHIFT 0
+static inline uint32_t A5XX_RB_2D_SRC_INFO_COLOR_FORMAT(enum a5xx_color_fmt val)
+{
+ return ((val) << A5XX_RB_2D_SRC_INFO_COLOR_FORMAT__SHIFT) & A5XX_RB_2D_SRC_INFO_COLOR_FORMAT__MASK;
+}
+#define A5XX_RB_2D_SRC_INFO_COLOR_SWAP__MASK 0x00000c00
+#define A5XX_RB_2D_SRC_INFO_COLOR_SWAP__SHIFT 10
+static inline uint32_t A5XX_RB_2D_SRC_INFO_COLOR_SWAP(enum a3xx_color_swap val)
+{
+ return ((val) << A5XX_RB_2D_SRC_INFO_COLOR_SWAP__SHIFT) & A5XX_RB_2D_SRC_INFO_COLOR_SWAP__MASK;
+}
+
+#define REG_A5XX_RB_2D_SRC_LO 0x00002108
+
+#define REG_A5XX_RB_2D_SRC_HI 0x00002109
+
+#define REG_A5XX_RB_2D_DST_INFO 0x00002110
+#define A5XX_RB_2D_DST_INFO_COLOR_FORMAT__MASK 0x000000ff
+#define A5XX_RB_2D_DST_INFO_COLOR_FORMAT__SHIFT 0
+static inline uint32_t A5XX_RB_2D_DST_INFO_COLOR_FORMAT(enum a5xx_color_fmt val)
+{
+ return ((val) << A5XX_RB_2D_DST_INFO_COLOR_FORMAT__SHIFT) & A5XX_RB_2D_DST_INFO_COLOR_FORMAT__MASK;
+}
+#define A5XX_RB_2D_DST_INFO_COLOR_SWAP__MASK 0x00000c00
+#define A5XX_RB_2D_DST_INFO_COLOR_SWAP__SHIFT 10
+static inline uint32_t A5XX_RB_2D_DST_INFO_COLOR_SWAP(enum a3xx_color_swap val)
+{
+ return ((val) << A5XX_RB_2D_DST_INFO_COLOR_SWAP__SHIFT) & A5XX_RB_2D_DST_INFO_COLOR_SWAP__MASK;
+}
+
+#define REG_A5XX_RB_2D_SRC_FLAGS_LO 0x00002140
+
+#define REG_A5XX_RB_2D_SRC_FLAGS_HI 0x00002141
+
+#define REG_A5XX_RB_2D_DST_LO 0x00002111
+
+#define REG_A5XX_RB_2D_DST_HI 0x00002112
+
+#define REG_A5XX_RB_2D_DST_FLAGS_LO 0x00002143
+
+#define REG_A5XX_RB_2D_DST_FLAGS_HI 0x00002144
+
+#define REG_A5XX_GRAS_2D_SRC_INFO 0x00002181
+#define A5XX_GRAS_2D_SRC_INFO_COLOR_FORMAT__MASK 0x000000ff
+#define A5XX_GRAS_2D_SRC_INFO_COLOR_FORMAT__SHIFT 0
+static inline uint32_t A5XX_GRAS_2D_SRC_INFO_COLOR_FORMAT(enum a5xx_color_fmt val)
+{
+ return ((val) << A5XX_GRAS_2D_SRC_INFO_COLOR_FORMAT__SHIFT) & A5XX_GRAS_2D_SRC_INFO_COLOR_FORMAT__MASK;
+}
+#define A5XX_GRAS_2D_SRC_INFO_COLOR_SWAP__MASK 0x00000c00
+#define A5XX_GRAS_2D_SRC_INFO_COLOR_SWAP__SHIFT 10
+static inline uint32_t A5XX_GRAS_2D_SRC_INFO_COLOR_SWAP(enum a3xx_color_swap val)
+{
+ return ((val) << A5XX_GRAS_2D_SRC_INFO_COLOR_SWAP__SHIFT) & A5XX_GRAS_2D_SRC_INFO_COLOR_SWAP__MASK;
+}
+
+#define REG_A5XX_GRAS_2D_DST_INFO 0x00002182
+#define A5XX_GRAS_2D_DST_INFO_COLOR_FORMAT__MASK 0x000000ff
+#define A5XX_GRAS_2D_DST_INFO_COLOR_FORMAT__SHIFT 0
+static inline uint32_t A5XX_GRAS_2D_DST_INFO_COLOR_FORMAT(enum a5xx_color_fmt val)
+{
+ return ((val) << A5XX_GRAS_2D_DST_INFO_COLOR_FORMAT__SHIFT) & A5XX_GRAS_2D_DST_INFO_COLOR_FORMAT__MASK;
+}
+#define A5XX_GRAS_2D_DST_INFO_COLOR_SWAP__MASK 0x00000c00
+#define A5XX_GRAS_2D_DST_INFO_COLOR_SWAP__SHIFT 10
+static inline uint32_t A5XX_GRAS_2D_DST_INFO_COLOR_SWAP(enum a3xx_color_swap val)
+{
+ return ((val) << A5XX_GRAS_2D_DST_INFO_COLOR_SWAP__SHIFT) & A5XX_GRAS_2D_DST_INFO_COLOR_SWAP__MASK;
+}
+
+#define REG_A5XX_TEX_SAMP_0 0x00000000
+#define A5XX_TEX_SAMP_0_MIPFILTER_LINEAR_NEAR 0x00000001
+#define A5XX_TEX_SAMP_0_XY_MAG__MASK 0x00000006
+#define A5XX_TEX_SAMP_0_XY_MAG__SHIFT 1
+static inline uint32_t A5XX_TEX_SAMP_0_XY_MAG(enum a5xx_tex_filter val)
+{
+ return ((val) << A5XX_TEX_SAMP_0_XY_MAG__SHIFT) & A5XX_TEX_SAMP_0_XY_MAG__MASK;
+}
+#define A5XX_TEX_SAMP_0_XY_MIN__MASK 0x00000018
+#define A5XX_TEX_SAMP_0_XY_MIN__SHIFT 3
+static inline uint32_t A5XX_TEX_SAMP_0_XY_MIN(enum a5xx_tex_filter val)
+{
+ return ((val) << A5XX_TEX_SAMP_0_XY_MIN__SHIFT) & A5XX_TEX_SAMP_0_XY_MIN__MASK;
+}
+#define A5XX_TEX_SAMP_0_WRAP_S__MASK 0x000000e0
+#define A5XX_TEX_SAMP_0_WRAP_S__SHIFT 5
+static inline uint32_t A5XX_TEX_SAMP_0_WRAP_S(enum a5xx_tex_clamp val)
+{
+ return ((val) << A5XX_TEX_SAMP_0_WRAP_S__SHIFT) & A5XX_TEX_SAMP_0_WRAP_S__MASK;
+}
+#define A5XX_TEX_SAMP_0_WRAP_T__MASK 0x00000700
+#define A5XX_TEX_SAMP_0_WRAP_T__SHIFT 8
+static inline uint32_t A5XX_TEX_SAMP_0_WRAP_T(enum a5xx_tex_clamp val)
+{
+ return ((val) << A5XX_TEX_SAMP_0_WRAP_T__SHIFT) & A5XX_TEX_SAMP_0_WRAP_T__MASK;
+}
+#define A5XX_TEX_SAMP_0_WRAP_R__MASK 0x00003800
+#define A5XX_TEX_SAMP_0_WRAP_R__SHIFT 11
+static inline uint32_t A5XX_TEX_SAMP_0_WRAP_R(enum a5xx_tex_clamp val)
+{
+ return ((val) << A5XX_TEX_SAMP_0_WRAP_R__SHIFT) & A5XX_TEX_SAMP_0_WRAP_R__MASK;
+}
+#define A5XX_TEX_SAMP_0_ANISO__MASK 0x0001c000
+#define A5XX_TEX_SAMP_0_ANISO__SHIFT 14
+static inline uint32_t A5XX_TEX_SAMP_0_ANISO(enum a5xx_tex_aniso val)
+{
+ return ((val) << A5XX_TEX_SAMP_0_ANISO__SHIFT) & A5XX_TEX_SAMP_0_ANISO__MASK;
+}
+#define A5XX_TEX_SAMP_0_LOD_BIAS__MASK 0xfff80000
+#define A5XX_TEX_SAMP_0_LOD_BIAS__SHIFT 19
+static inline uint32_t A5XX_TEX_SAMP_0_LOD_BIAS(float val)
+{
+ return ((((int32_t)(val * 256.0))) << A5XX_TEX_SAMP_0_LOD_BIAS__SHIFT) & A5XX_TEX_SAMP_0_LOD_BIAS__MASK;
+}
+
+#define REG_A5XX_TEX_SAMP_1 0x00000001
+#define A5XX_TEX_SAMP_1_COMPARE_FUNC__MASK 0x0000000e
+#define A5XX_TEX_SAMP_1_COMPARE_FUNC__SHIFT 1
+static inline uint32_t A5XX_TEX_SAMP_1_COMPARE_FUNC(enum adreno_compare_func val)
+{
+ return ((val) << A5XX_TEX_SAMP_1_COMPARE_FUNC__SHIFT) & A5XX_TEX_SAMP_1_COMPARE_FUNC__MASK;
+}
+#define A5XX_TEX_SAMP_1_CUBEMAPSEAMLESSFILTOFF 0x00000010
+#define A5XX_TEX_SAMP_1_UNNORM_COORDS 0x00000020
+#define A5XX_TEX_SAMP_1_MIPFILTER_LINEAR_FAR 0x00000040
+#define A5XX_TEX_SAMP_1_MAX_LOD__MASK 0x000fff00
+#define A5XX_TEX_SAMP_1_MAX_LOD__SHIFT 8
+static inline uint32_t A5XX_TEX_SAMP_1_MAX_LOD(float val)
+{
+ return ((((uint32_t)(val * 256.0))) << A5XX_TEX_SAMP_1_MAX_LOD__SHIFT) & A5XX_TEX_SAMP_1_MAX_LOD__MASK;
+}
+#define A5XX_TEX_SAMP_1_MIN_LOD__MASK 0xfff00000
+#define A5XX_TEX_SAMP_1_MIN_LOD__SHIFT 20
+static inline uint32_t A5XX_TEX_SAMP_1_MIN_LOD(float val)
+{
+ return ((((uint32_t)(val * 256.0))) << A5XX_TEX_SAMP_1_MIN_LOD__SHIFT) & A5XX_TEX_SAMP_1_MIN_LOD__MASK;
+}
+
+#define REG_A5XX_TEX_SAMP_2 0x00000002
+
+#define REG_A5XX_TEX_SAMP_3 0x00000003
+
+#define REG_A5XX_TEX_CONST_0 0x00000000
+#define A5XX_TEX_CONST_0_TILE_MODE__MASK 0x00000003
+#define A5XX_TEX_CONST_0_TILE_MODE__SHIFT 0
+static inline uint32_t A5XX_TEX_CONST_0_TILE_MODE(enum a5xx_tile_mode val)
+{
+ return ((val) << A5XX_TEX_CONST_0_TILE_MODE__SHIFT) & A5XX_TEX_CONST_0_TILE_MODE__MASK;
+}
+#define A5XX_TEX_CONST_0_SRGB 0x00000004
+#define A5XX_TEX_CONST_0_SWIZ_X__MASK 0x00000070
+#define A5XX_TEX_CONST_0_SWIZ_X__SHIFT 4
+static inline uint32_t A5XX_TEX_CONST_0_SWIZ_X(enum a5xx_tex_swiz val)
+{
+ return ((val) << A5XX_TEX_CONST_0_SWIZ_X__SHIFT) & A5XX_TEX_CONST_0_SWIZ_X__MASK;
+}
+#define A5XX_TEX_CONST_0_SWIZ_Y__MASK 0x00000380
+#define A5XX_TEX_CONST_0_SWIZ_Y__SHIFT 7
+static inline uint32_t A5XX_TEX_CONST_0_SWIZ_Y(enum a5xx_tex_swiz val)
+{
+ return ((val) << A5XX_TEX_CONST_0_SWIZ_Y__SHIFT) & A5XX_TEX_CONST_0_SWIZ_Y__MASK;
+}
+#define A5XX_TEX_CONST_0_SWIZ_Z__MASK 0x00001c00
+#define A5XX_TEX_CONST_0_SWIZ_Z__SHIFT 10
+static inline uint32_t A5XX_TEX_CONST_0_SWIZ_Z(enum a5xx_tex_swiz val)
+{
+ return ((val) << A5XX_TEX_CONST_0_SWIZ_Z__SHIFT) & A5XX_TEX_CONST_0_SWIZ_Z__MASK;
+}
+#define A5XX_TEX_CONST_0_SWIZ_W__MASK 0x0000e000
+#define A5XX_TEX_CONST_0_SWIZ_W__SHIFT 13
+static inline uint32_t A5XX_TEX_CONST_0_SWIZ_W(enum a5xx_tex_swiz val)
+{
+ return ((val) << A5XX_TEX_CONST_0_SWIZ_W__SHIFT) & A5XX_TEX_CONST_0_SWIZ_W__MASK;
+}
+#define A5XX_TEX_CONST_0_FMT__MASK 0x3fc00000
+#define A5XX_TEX_CONST_0_FMT__SHIFT 22
+static inline uint32_t A5XX_TEX_CONST_0_FMT(enum a5xx_tex_fmt val)
+{
+ return ((val) << A5XX_TEX_CONST_0_FMT__SHIFT) & A5XX_TEX_CONST_0_FMT__MASK;
+}
+#define A5XX_TEX_CONST_0_SWAP__MASK 0xc0000000
+#define A5XX_TEX_CONST_0_SWAP__SHIFT 30
+static inline uint32_t A5XX_TEX_CONST_0_SWAP(enum a3xx_color_swap val)
+{
+ return ((val) << A5XX_TEX_CONST_0_SWAP__SHIFT) & A5XX_TEX_CONST_0_SWAP__MASK;
+}
+
+#define REG_A5XX_TEX_CONST_1 0x00000001
+#define A5XX_TEX_CONST_1_WIDTH__MASK 0x00007fff
+#define A5XX_TEX_CONST_1_WIDTH__SHIFT 0
+static inline uint32_t A5XX_TEX_CONST_1_WIDTH(uint32_t val)
+{
+ return ((val) << A5XX_TEX_CONST_1_WIDTH__SHIFT) & A5XX_TEX_CONST_1_WIDTH__MASK;
+}
+#define A5XX_TEX_CONST_1_HEIGHT__MASK 0x3fff8000
+#define A5XX_TEX_CONST_1_HEIGHT__SHIFT 15
+static inline uint32_t A5XX_TEX_CONST_1_HEIGHT(uint32_t val)
+{
+ return ((val) << A5XX_TEX_CONST_1_HEIGHT__SHIFT) & A5XX_TEX_CONST_1_HEIGHT__MASK;
+}
+
+#define REG_A5XX_TEX_CONST_2 0x00000002
+#define A5XX_TEX_CONST_2_FETCHSIZE__MASK 0x0000000f
+#define A5XX_TEX_CONST_2_FETCHSIZE__SHIFT 0
+static inline uint32_t A5XX_TEX_CONST_2_FETCHSIZE(enum a5xx_tex_fetchsize val)
+{
+ return ((val) << A5XX_TEX_CONST_2_FETCHSIZE__SHIFT) & A5XX_TEX_CONST_2_FETCHSIZE__MASK;
+}
+#define A5XX_TEX_CONST_2_PITCH__MASK 0x1fffff80
+#define A5XX_TEX_CONST_2_PITCH__SHIFT 7
+static inline uint32_t A5XX_TEX_CONST_2_PITCH(uint32_t val)
+{
+ return ((val) << A5XX_TEX_CONST_2_PITCH__SHIFT) & A5XX_TEX_CONST_2_PITCH__MASK;
+}
+#define A5XX_TEX_CONST_2_TYPE__MASK 0x60000000
+#define A5XX_TEX_CONST_2_TYPE__SHIFT 29
+static inline uint32_t A5XX_TEX_CONST_2_TYPE(enum a5xx_tex_type val)
+{
+ return ((val) << A5XX_TEX_CONST_2_TYPE__SHIFT) & A5XX_TEX_CONST_2_TYPE__MASK;
+}
+
+#define REG_A5XX_TEX_CONST_3 0x00000003
+#define A5XX_TEX_CONST_3_ARRAY_PITCH__MASK 0x00003fff
+#define A5XX_TEX_CONST_3_ARRAY_PITCH__SHIFT 0
+static inline uint32_t A5XX_TEX_CONST_3_ARRAY_PITCH(uint32_t val)
+{
+ return ((val >> 12) << A5XX_TEX_CONST_3_ARRAY_PITCH__SHIFT) & A5XX_TEX_CONST_3_ARRAY_PITCH__MASK;
+}
+#define A5XX_TEX_CONST_3_FLAG 0x10000000
+
+#define REG_A5XX_TEX_CONST_4 0x00000004
+#define A5XX_TEX_CONST_4_BASE_LO__MASK 0xffffffe0
+#define A5XX_TEX_CONST_4_BASE_LO__SHIFT 5
+static inline uint32_t A5XX_TEX_CONST_4_BASE_LO(uint32_t val)
+{
+ return ((val >> 5) << A5XX_TEX_CONST_4_BASE_LO__SHIFT) & A5XX_TEX_CONST_4_BASE_LO__MASK;
+}
+
+#define REG_A5XX_TEX_CONST_5 0x00000005
+#define A5XX_TEX_CONST_5_BASE_HI__MASK 0x0001ffff
+#define A5XX_TEX_CONST_5_BASE_HI__SHIFT 0
+static inline uint32_t A5XX_TEX_CONST_5_BASE_HI(uint32_t val)
+{
+ return ((val) << A5XX_TEX_CONST_5_BASE_HI__SHIFT) & A5XX_TEX_CONST_5_BASE_HI__MASK;
+}
+#define A5XX_TEX_CONST_5_DEPTH__MASK 0x3ffe0000
+#define A5XX_TEX_CONST_5_DEPTH__SHIFT 17
+static inline uint32_t A5XX_TEX_CONST_5_DEPTH(uint32_t val)
+{
+ return ((val) << A5XX_TEX_CONST_5_DEPTH__SHIFT) & A5XX_TEX_CONST_5_DEPTH__MASK;
+}
+
+#define REG_A5XX_TEX_CONST_6 0x00000006
+
+#define REG_A5XX_TEX_CONST_7 0x00000007
+
+#define REG_A5XX_TEX_CONST_8 0x00000008
+
+#define REG_A5XX_TEX_CONST_9 0x00000009
+
+#define REG_A5XX_TEX_CONST_10 0x0000000a
+
+#define REG_A5XX_TEX_CONST_11 0x0000000b
+
+
+#endif /* A5XX_XML */
diff --git a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
new file mode 100644
index 000000000000..b8647198c11c
--- /dev/null
+++ b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
@@ -0,0 +1,888 @@
+/* Copyright (c) 2016 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include "msm_gem.h"
+#include "a5xx_gpu.h"
+
+extern bool hang_debug;
+static void a5xx_dump(struct msm_gpu *gpu);
+
+static void a5xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
+ struct msm_file_private *ctx)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct msm_drm_private *priv = gpu->dev->dev_private;
+ struct msm_ringbuffer *ring = gpu->rb;
+ unsigned int i, ibs = 0;
+
+ for (i = 0; i < submit->nr_cmds; i++) {
+ switch (submit->cmd[i].type) {
+ case MSM_SUBMIT_CMD_IB_TARGET_BUF:
+ break;
+ case MSM_SUBMIT_CMD_CTX_RESTORE_BUF:
+ if (priv->lastctx == ctx)
+ break;
+ case MSM_SUBMIT_CMD_BUF:
+ OUT_PKT7(ring, CP_INDIRECT_BUFFER_PFE, 3);
+ OUT_RING(ring, lower_32_bits(submit->cmd[i].iova));
+ OUT_RING(ring, upper_32_bits(submit->cmd[i].iova));
+ OUT_RING(ring, submit->cmd[i].size);
+ ibs++;
+ break;
+ }
+ }
+
+ OUT_PKT4(ring, REG_A5XX_CP_SCRATCH_REG(2), 1);
+ OUT_RING(ring, submit->fence->seqno);
+
+ OUT_PKT7(ring, CP_EVENT_WRITE, 4);
+ OUT_RING(ring, CACHE_FLUSH_TS | (1 << 31));
+ OUT_RING(ring, lower_32_bits(rbmemptr(adreno_gpu, fence)));
+ OUT_RING(ring, upper_32_bits(rbmemptr(adreno_gpu, fence)));
+ OUT_RING(ring, submit->fence->seqno);
+
+ gpu->funcs->flush(gpu);
+}
+
+struct a5xx_hwcg {
+ u32 offset;
+ u32 value;
+};
+
+static const struct a5xx_hwcg a530_hwcg[] = {
+ {REG_A5XX_RBBM_CLOCK_CNTL_SP0, 0x02222222},
+ {REG_A5XX_RBBM_CLOCK_CNTL_SP1, 0x02222222},
+ {REG_A5XX_RBBM_CLOCK_CNTL_SP2, 0x02222222},
+ {REG_A5XX_RBBM_CLOCK_CNTL_SP3, 0x02222222},
+ {REG_A5XX_RBBM_CLOCK_CNTL2_SP0, 0x02222220},
+ {REG_A5XX_RBBM_CLOCK_CNTL2_SP1, 0x02222220},
+ {REG_A5XX_RBBM_CLOCK_CNTL2_SP2, 0x02222220},
+ {REG_A5XX_RBBM_CLOCK_CNTL2_SP3, 0x02222220},
+ {REG_A5XX_RBBM_CLOCK_HYST_SP0, 0x0000F3CF},
+ {REG_A5XX_RBBM_CLOCK_HYST_SP1, 0x0000F3CF},
+ {REG_A5XX_RBBM_CLOCK_HYST_SP2, 0x0000F3CF},
+ {REG_A5XX_RBBM_CLOCK_HYST_SP3, 0x0000F3CF},
+ {REG_A5XX_RBBM_CLOCK_DELAY_SP0, 0x00000080},
+ {REG_A5XX_RBBM_CLOCK_DELAY_SP1, 0x00000080},
+ {REG_A5XX_RBBM_CLOCK_DELAY_SP2, 0x00000080},
+ {REG_A5XX_RBBM_CLOCK_DELAY_SP3, 0x00000080},
+ {REG_A5XX_RBBM_CLOCK_CNTL_TP0, 0x22222222},
+ {REG_A5XX_RBBM_CLOCK_CNTL_TP1, 0x22222222},
+ {REG_A5XX_RBBM_CLOCK_CNTL_TP2, 0x22222222},
+ {REG_A5XX_RBBM_CLOCK_CNTL_TP3, 0x22222222},
+ {REG_A5XX_RBBM_CLOCK_CNTL2_TP0, 0x22222222},
+ {REG_A5XX_RBBM_CLOCK_CNTL2_TP1, 0x22222222},
+ {REG_A5XX_RBBM_CLOCK_CNTL2_TP2, 0x22222222},
+ {REG_A5XX_RBBM_CLOCK_CNTL2_TP3, 0x22222222},
+ {REG_A5XX_RBBM_CLOCK_CNTL3_TP0, 0x00002222},
+ {REG_A5XX_RBBM_CLOCK_CNTL3_TP1, 0x00002222},
+ {REG_A5XX_RBBM_CLOCK_CNTL3_TP2, 0x00002222},
+ {REG_A5XX_RBBM_CLOCK_CNTL3_TP3, 0x00002222},
+ {REG_A5XX_RBBM_CLOCK_HYST_TP0, 0x77777777},
+ {REG_A5XX_RBBM_CLOCK_HYST_TP1, 0x77777777},
+ {REG_A5XX_RBBM_CLOCK_HYST_TP2, 0x77777777},
+ {REG_A5XX_RBBM_CLOCK_HYST_TP3, 0x77777777},
+ {REG_A5XX_RBBM_CLOCK_HYST2_TP0, 0x77777777},
+ {REG_A5XX_RBBM_CLOCK_HYST2_TP1, 0x77777777},
+ {REG_A5XX_RBBM_CLOCK_HYST2_TP2, 0x77777777},
+ {REG_A5XX_RBBM_CLOCK_HYST2_TP3, 0x77777777},
+ {REG_A5XX_RBBM_CLOCK_HYST3_TP0, 0x00007777},
+ {REG_A5XX_RBBM_CLOCK_HYST3_TP1, 0x00007777},
+ {REG_A5XX_RBBM_CLOCK_HYST3_TP2, 0x00007777},
+ {REG_A5XX_RBBM_CLOCK_HYST3_TP3, 0x00007777},
+ {REG_A5XX_RBBM_CLOCK_DELAY_TP0, 0x11111111},
+ {REG_A5XX_RBBM_CLOCK_DELAY_TP1, 0x11111111},
+ {REG_A5XX_RBBM_CLOCK_DELAY_TP2, 0x11111111},
+ {REG_A5XX_RBBM_CLOCK_DELAY_TP3, 0x11111111},
+ {REG_A5XX_RBBM_CLOCK_DELAY2_TP0, 0x11111111},
+ {REG_A5XX_RBBM_CLOCK_DELAY2_TP1, 0x11111111},
+ {REG_A5XX_RBBM_CLOCK_DELAY2_TP2, 0x11111111},
+ {REG_A5XX_RBBM_CLOCK_DELAY2_TP3, 0x11111111},
+ {REG_A5XX_RBBM_CLOCK_DELAY3_TP0, 0x00001111},
+ {REG_A5XX_RBBM_CLOCK_DELAY3_TP1, 0x00001111},
+ {REG_A5XX_RBBM_CLOCK_DELAY3_TP2, 0x00001111},
+ {REG_A5XX_RBBM_CLOCK_DELAY3_TP3, 0x00001111},
+ {REG_A5XX_RBBM_CLOCK_CNTL_UCHE, 0x22222222},
+ {REG_A5XX_RBBM_CLOCK_CNTL2_UCHE, 0x22222222},
+ {REG_A5XX_RBBM_CLOCK_CNTL3_UCHE, 0x22222222},
+ {REG_A5XX_RBBM_CLOCK_CNTL4_UCHE, 0x00222222},
+ {REG_A5XX_RBBM_CLOCK_HYST_UCHE, 0x00444444},
+ {REG_A5XX_RBBM_CLOCK_DELAY_UCHE, 0x00000002},
+ {REG_A5XX_RBBM_CLOCK_CNTL_RB0, 0x22222222},
+ {REG_A5XX_RBBM_CLOCK_CNTL_RB1, 0x22222222},
+ {REG_A5XX_RBBM_CLOCK_CNTL_RB2, 0x22222222},
+ {REG_A5XX_RBBM_CLOCK_CNTL_RB3, 0x22222222},
+ {REG_A5XX_RBBM_CLOCK_CNTL2_RB0, 0x00222222},
+ {REG_A5XX_RBBM_CLOCK_CNTL2_RB1, 0x00222222},
+ {REG_A5XX_RBBM_CLOCK_CNTL2_RB2, 0x00222222},
+ {REG_A5XX_RBBM_CLOCK_CNTL2_RB3, 0x00222222},
+ {REG_A5XX_RBBM_CLOCK_CNTL_CCU0, 0x00022220},
+ {REG_A5XX_RBBM_CLOCK_CNTL_CCU1, 0x00022220},
+ {REG_A5XX_RBBM_CLOCK_CNTL_CCU2, 0x00022220},
+ {REG_A5XX_RBBM_CLOCK_CNTL_CCU3, 0x00022220},
+ {REG_A5XX_RBBM_CLOCK_CNTL_RAC, 0x05522222},
+ {REG_A5XX_RBBM_CLOCK_CNTL2_RAC, 0x00505555},
+ {REG_A5XX_RBBM_CLOCK_HYST_RB_CCU0, 0x04040404},
+ {REG_A5XX_RBBM_CLOCK_HYST_RB_CCU1, 0x04040404},
+ {REG_A5XX_RBBM_CLOCK_HYST_RB_CCU2, 0x04040404},
+ {REG_A5XX_RBBM_CLOCK_HYST_RB_CCU3, 0x04040404},
+ {REG_A5XX_RBBM_CLOCK_HYST_RAC, 0x07444044},
+ {REG_A5XX_RBBM_CLOCK_DELAY_RB_CCU_L1_0, 0x00000002},
+ {REG_A5XX_RBBM_CLOCK_DELAY_RB_CCU_L1_1, 0x00000002},
+ {REG_A5XX_RBBM_CLOCK_DELAY_RB_CCU_L1_2, 0x00000002},
+ {REG_A5XX_RBBM_CLOCK_DELAY_RB_CCU_L1_3, 0x00000002},
+ {REG_A5XX_RBBM_CLOCK_DELAY_RAC, 0x00010011},
+ {REG_A5XX_RBBM_CLOCK_CNTL_TSE_RAS_RBBM, 0x04222222},
+ {REG_A5XX_RBBM_CLOCK_MODE_GPC, 0x02222222},
+ {REG_A5XX_RBBM_CLOCK_MODE_VFD, 0x00002222},
+ {REG_A5XX_RBBM_CLOCK_HYST_TSE_RAS_RBBM, 0x00000000},
+ {REG_A5XX_RBBM_CLOCK_HYST_GPC, 0x04104004},
+ {REG_A5XX_RBBM_CLOCK_HYST_VFD, 0x00000000},
+ {REG_A5XX_RBBM_CLOCK_DELAY_HLSQ, 0x00000000},
+ {REG_A5XX_RBBM_CLOCK_DELAY_TSE_RAS_RBBM, 0x00004000},
+ {REG_A5XX_RBBM_CLOCK_DELAY_GPC, 0x00000200},
+ {REG_A5XX_RBBM_CLOCK_DELAY_VFD, 0x00002222}
+};
+
+static const struct {
+ int (*test)(struct adreno_gpu *gpu);
+ const struct a5xx_hwcg *regs;
+ unsigned int count;
+} a5xx_hwcg_regs[] = {
+ { adreno_is_a530, a530_hwcg, ARRAY_SIZE(a530_hwcg), },
+};
+
+static void _a5xx_enable_hwcg(struct msm_gpu *gpu,
+ const struct a5xx_hwcg *regs, unsigned int count)
+{
+ unsigned int i;
+
+ for (i = 0; i < count; i++)
+ gpu_write(gpu, regs[i].offset, regs[i].value);
+
+ gpu_write(gpu, REG_A5XX_RBBM_CLOCK_CNTL, 0xAAA8AA00);
+ gpu_write(gpu, REG_A5XX_RBBM_ISDB_CNT, 0x182);
+}
+
+static void a5xx_enable_hwcg(struct msm_gpu *gpu)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(a5xx_hwcg_regs); i++) {
+ if (a5xx_hwcg_regs[i].test(adreno_gpu)) {
+ _a5xx_enable_hwcg(gpu, a5xx_hwcg_regs[i].regs,
+ a5xx_hwcg_regs[i].count);
+ return;
+ }
+ }
+}
+
+static int a5xx_me_init(struct msm_gpu *gpu)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct msm_ringbuffer *ring = gpu->rb;
+
+ OUT_PKT7(ring, CP_ME_INIT, 8);
+
+ OUT_RING(ring, 0x0000002F);
+
+ /* Enable multiple hardware contexts */
+ OUT_RING(ring, 0x00000003);
+
+ /* Enable error detection */
+ OUT_RING(ring, 0x20000000);
+
+ /* Don't enable header dump */
+ OUT_RING(ring, 0x00000000);
+ OUT_RING(ring, 0x00000000);
+
+ /* Specify workarounds for various microcode issues */
+ if (adreno_is_a530(adreno_gpu)) {
+ /* Workaround for token end syncs
+ * Force a WFI after every direct-render 3D mode draw and every
+ * 2D mode 3 draw
+ */
+ OUT_RING(ring, 0x0000000B);
+ } else {
+ /* No workarounds enabled */
+ OUT_RING(ring, 0x00000000);
+ }
+
+ OUT_RING(ring, 0x00000000);
+ OUT_RING(ring, 0x00000000);
+
+ gpu->funcs->flush(gpu);
+
+ return gpu->funcs->idle(gpu) ? 0 : -EINVAL;
+}
+
+static struct drm_gem_object *a5xx_ucode_load_bo(struct msm_gpu *gpu,
+ const struct firmware *fw, u64 *iova)
+{
+ struct drm_device *drm = gpu->dev;
+ struct drm_gem_object *bo;
+ void *ptr;
+
+ mutex_lock(&drm->struct_mutex);
+ bo = msm_gem_new(drm, fw->size - 4, MSM_BO_UNCACHED);
+ mutex_unlock(&drm->struct_mutex);
+
+ if (IS_ERR(bo))
+ return bo;
+
+ ptr = msm_gem_get_vaddr(bo);
+ if (!ptr) {
+ drm_gem_object_unreference_unlocked(bo);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ if (iova) {
+ int ret = msm_gem_get_iova(bo, gpu->id, iova);
+
+ if (ret) {
+ drm_gem_object_unreference_unlocked(bo);
+ return ERR_PTR(ret);
+ }
+ }
+
+ memcpy(ptr, &fw->data[4], fw->size - 4);
+
+ msm_gem_put_vaddr(bo);
+ return bo;
+}
+
+static int a5xx_ucode_init(struct msm_gpu *gpu)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
+ int ret;
+
+ if (!a5xx_gpu->pm4_bo) {
+ a5xx_gpu->pm4_bo = a5xx_ucode_load_bo(gpu, adreno_gpu->pm4,
+ &a5xx_gpu->pm4_iova);
+
+ if (IS_ERR(a5xx_gpu->pm4_bo)) {
+ ret = PTR_ERR(a5xx_gpu->pm4_bo);
+ a5xx_gpu->pm4_bo = NULL;
+ dev_err(gpu->dev->dev, "could not allocate PM4: %d\n",
+ ret);
+ return ret;
+ }
+ }
+
+ if (!a5xx_gpu->pfp_bo) {
+ a5xx_gpu->pfp_bo = a5xx_ucode_load_bo(gpu, adreno_gpu->pfp,
+ &a5xx_gpu->pfp_iova);
+
+ if (IS_ERR(a5xx_gpu->pfp_bo)) {
+ ret = PTR_ERR(a5xx_gpu->pfp_bo);
+ a5xx_gpu->pfp_bo = NULL;
+ dev_err(gpu->dev->dev, "could not allocate PFP: %d\n",
+ ret);
+ return ret;
+ }
+ }
+
+ gpu_write64(gpu, REG_A5XX_CP_ME_INSTR_BASE_LO,
+ REG_A5XX_CP_ME_INSTR_BASE_HI, a5xx_gpu->pm4_iova);
+
+ gpu_write64(gpu, REG_A5XX_CP_PFP_INSTR_BASE_LO,
+ REG_A5XX_CP_PFP_INSTR_BASE_HI, a5xx_gpu->pfp_iova);
+
+ return 0;
+}
+
+#define A5XX_INT_MASK (A5XX_RBBM_INT_0_MASK_RBBM_AHB_ERROR | \
+ A5XX_RBBM_INT_0_MASK_RBBM_TRANSFER_TIMEOUT | \
+ A5XX_RBBM_INT_0_MASK_RBBM_ME_MS_TIMEOUT | \
+ A5XX_RBBM_INT_0_MASK_RBBM_PFP_MS_TIMEOUT | \
+ A5XX_RBBM_INT_0_MASK_RBBM_ETS_MS_TIMEOUT | \
+ A5XX_RBBM_INT_0_MASK_RBBM_ATB_ASYNC_OVERFLOW | \
+ A5XX_RBBM_INT_0_MASK_CP_HW_ERROR | \
+ A5XX_RBBM_INT_0_MASK_CP_CACHE_FLUSH_TS | \
+ A5XX_RBBM_INT_0_MASK_UCHE_OOB_ACCESS | \
+ A5XX_RBBM_INT_0_MASK_GPMU_VOLTAGE_DROOP)
+
+static int a5xx_hw_init(struct msm_gpu *gpu)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ int ret;
+
+ gpu_write(gpu, REG_A5XX_VBIF_ROUND_ROBIN_QOS_ARB, 0x00000003);
+
+ /* Make all blocks contribute to the GPU BUSY perf counter */
+ gpu_write(gpu, REG_A5XX_RBBM_PERFCTR_GPU_BUSY_MASKED, 0xFFFFFFFF);
+
+ /* Enable RBBM error reporting bits */
+ gpu_write(gpu, REG_A5XX_RBBM_AHB_CNTL0, 0x00000001);
+
+ if (adreno_gpu->quirks & ADRENO_QUIRK_FAULT_DETECT_MASK) {
+ /*
+ * Mask out the activity signals from RB1-3 to avoid false
+ * positives
+ */
+
+ gpu_write(gpu, REG_A5XX_RBBM_INTERFACE_HANG_MASK_CNTL11,
+ 0xF0000000);
+ gpu_write(gpu, REG_A5XX_RBBM_INTERFACE_HANG_MASK_CNTL12,
+ 0xFFFFFFFF);
+ gpu_write(gpu, REG_A5XX_RBBM_INTERFACE_HANG_MASK_CNTL13,
+ 0xFFFFFFFF);
+ gpu_write(gpu, REG_A5XX_RBBM_INTERFACE_HANG_MASK_CNTL14,
+ 0xFFFFFFFF);
+ gpu_write(gpu, REG_A5XX_RBBM_INTERFACE_HANG_MASK_CNTL15,
+ 0xFFFFFFFF);
+ gpu_write(gpu, REG_A5XX_RBBM_INTERFACE_HANG_MASK_CNTL16,
+ 0xFFFFFFFF);
+ gpu_write(gpu, REG_A5XX_RBBM_INTERFACE_HANG_MASK_CNTL17,
+ 0xFFFFFFFF);
+ gpu_write(gpu, REG_A5XX_RBBM_INTERFACE_HANG_MASK_CNTL18,
+ 0xFFFFFFFF);
+ }
+
+ /* Enable fault detection */
+ gpu_write(gpu, REG_A5XX_RBBM_INTERFACE_HANG_INT_CNTL,
+ (1 << 30) | 0xFFFF);
+
+ /* Turn on performance counters */
+ gpu_write(gpu, REG_A5XX_RBBM_PERFCTR_CNTL, 0x01);
+
+ /* Increase VFD cache access so LRZ and other data gets evicted less */
+ gpu_write(gpu, REG_A5XX_UCHE_CACHE_WAYS, 0x02);
+
+ /* Disable L2 bypass in the UCHE */
+ gpu_write(gpu, REG_A5XX_UCHE_TRAP_BASE_LO, 0xFFFF0000);
+ gpu_write(gpu, REG_A5XX_UCHE_TRAP_BASE_HI, 0x0001FFFF);
+ gpu_write(gpu, REG_A5XX_UCHE_WRITE_THRU_BASE_LO, 0xFFFF0000);
+ gpu_write(gpu, REG_A5XX_UCHE_WRITE_THRU_BASE_HI, 0x0001FFFF);
+
+ /* Set the GMEM VA range (0 to gpu->gmem) */
+ gpu_write(gpu, REG_A5XX_UCHE_GMEM_RANGE_MIN_LO, 0x00100000);
+ gpu_write(gpu, REG_A5XX_UCHE_GMEM_RANGE_MIN_HI, 0x00000000);
+ gpu_write(gpu, REG_A5XX_UCHE_GMEM_RANGE_MAX_LO,
+ 0x00100000 + adreno_gpu->gmem - 1);
+ gpu_write(gpu, REG_A5XX_UCHE_GMEM_RANGE_MAX_HI, 0x00000000);
+
+ gpu_write(gpu, REG_A5XX_CP_MEQ_THRESHOLDS, 0x40);
+ gpu_write(gpu, REG_A5XX_CP_MERCIU_SIZE, 0x40);
+ gpu_write(gpu, REG_A5XX_CP_ROQ_THRESHOLDS_2, 0x80000060);
+ gpu_write(gpu, REG_A5XX_CP_ROQ_THRESHOLDS_1, 0x40201B16);
+
+ gpu_write(gpu, REG_A5XX_PC_DBG_ECO_CNTL, (0x400 << 11 | 0x300 << 22));
+
+ if (adreno_gpu->quirks & ADRENO_QUIRK_TWO_PASS_USE_WFI)
+ gpu_rmw(gpu, REG_A5XX_PC_DBG_ECO_CNTL, 0, (1 << 8));
+
+ gpu_write(gpu, REG_A5XX_PC_DBG_ECO_CNTL, 0xc0200100);
+
+ /* Enable USE_RETENTION_FLOPS */
+ gpu_write(gpu, REG_A5XX_CP_CHICKEN_DBG, 0x02000000);
+
+ /* Enable ME/PFP split notification */
+ gpu_write(gpu, REG_A5XX_RBBM_AHB_CNTL1, 0xA6FFFFFF);
+
+ /* Enable HWCG */
+ a5xx_enable_hwcg(gpu);
+
+ gpu_write(gpu, REG_A5XX_RBBM_AHB_CNTL2, 0x0000003F);
+
+ /* Set the highest bank bit */
+ gpu_write(gpu, REG_A5XX_TPL1_MODE_CNTL, 2 << 7);
+ gpu_write(gpu, REG_A5XX_RB_MODE_CNTL, 2 << 1);
+
+ /* Protect registers from the CP */
+ gpu_write(gpu, REG_A5XX_CP_PROTECT_CNTL, 0x00000007);
+
+ /* RBBM */
+ gpu_write(gpu, REG_A5XX_CP_PROTECT(0), ADRENO_PROTECT_RW(0x04, 4));
+ gpu_write(gpu, REG_A5XX_CP_PROTECT(1), ADRENO_PROTECT_RW(0x08, 8));
+ gpu_write(gpu, REG_A5XX_CP_PROTECT(2), ADRENO_PROTECT_RW(0x10, 16));
+ gpu_write(gpu, REG_A5XX_CP_PROTECT(3), ADRENO_PROTECT_RW(0x20, 32));
+ gpu_write(gpu, REG_A5XX_CP_PROTECT(4), ADRENO_PROTECT_RW(0x40, 64));
+ gpu_write(gpu, REG_A5XX_CP_PROTECT(5), ADRENO_PROTECT_RW(0x80, 64));
+
+ /* Content protect */
+ gpu_write(gpu, REG_A5XX_CP_PROTECT(6),
+ ADRENO_PROTECT_RW(REG_A5XX_RBBM_SECVID_TSB_TRUSTED_BASE_LO,
+ 16));
+ gpu_write(gpu, REG_A5XX_CP_PROTECT(7),
+ ADRENO_PROTECT_RW(REG_A5XX_RBBM_SECVID_TRUST_CNTL, 2));
+
+ /* CP */
+ gpu_write(gpu, REG_A5XX_CP_PROTECT(8), ADRENO_PROTECT_RW(0x800, 64));
+ gpu_write(gpu, REG_A5XX_CP_PROTECT(9), ADRENO_PROTECT_RW(0x840, 8));
+ gpu_write(gpu, REG_A5XX_CP_PROTECT(10), ADRENO_PROTECT_RW(0x880, 32));
+ gpu_write(gpu, REG_A5XX_CP_PROTECT(11), ADRENO_PROTECT_RW(0xAA0, 1));
+
+ /* RB */
+ gpu_write(gpu, REG_A5XX_CP_PROTECT(12), ADRENO_PROTECT_RW(0xCC0, 1));
+ gpu_write(gpu, REG_A5XX_CP_PROTECT(13), ADRENO_PROTECT_RW(0xCF0, 2));
+
+ /* VPC */
+ gpu_write(gpu, REG_A5XX_CP_PROTECT(14), ADRENO_PROTECT_RW(0xE68, 8));
+ gpu_write(gpu, REG_A5XX_CP_PROTECT(15), ADRENO_PROTECT_RW(0xE70, 4));
+
+ /* UCHE */
+ gpu_write(gpu, REG_A5XX_CP_PROTECT(16), ADRENO_PROTECT_RW(0xE80, 16));
+
+ if (adreno_is_a530(adreno_gpu))
+ gpu_write(gpu, REG_A5XX_CP_PROTECT(17),
+ ADRENO_PROTECT_RW(0x10000, 0x8000));
+
+ gpu_write(gpu, REG_A5XX_RBBM_SECVID_TSB_CNTL, 0);
+ /*
+ * Disable the trusted memory range - we don't actually supported secure
+ * memory rendering at this point in time and we don't want to block off
+ * part of the virtual memory space.
+ */
+ gpu_write64(gpu, REG_A5XX_RBBM_SECVID_TSB_TRUSTED_BASE_LO,
+ REG_A5XX_RBBM_SECVID_TSB_TRUSTED_BASE_HI, 0x00000000);
+ gpu_write(gpu, REG_A5XX_RBBM_SECVID_TSB_TRUSTED_SIZE, 0x00000000);
+
+ /* Load the GPMU firmware before starting the HW init */
+ a5xx_gpmu_ucode_init(gpu);
+
+ ret = adreno_hw_init(gpu);
+ if (ret)
+ return ret;
+
+ ret = a5xx_ucode_init(gpu);
+ if (ret)
+ return ret;
+
+ /* Disable the interrupts through the initial bringup stage */
+ gpu_write(gpu, REG_A5XX_RBBM_INT_0_MASK, A5XX_INT_MASK);
+
+ /* Clear ME_HALT to start the micro engine */
+ gpu_write(gpu, REG_A5XX_CP_PFP_ME_CNTL, 0);
+ ret = a5xx_me_init(gpu);
+ if (ret)
+ return ret;
+
+ ret = a5xx_power_init(gpu);
+ if (ret)
+ return ret;
+
+ /*
+ * Send a pipeline event stat to get misbehaving counters to start
+ * ticking correctly
+ */
+ if (adreno_is_a530(adreno_gpu)) {
+ OUT_PKT7(gpu->rb, CP_EVENT_WRITE, 1);
+ OUT_RING(gpu->rb, 0x0F);
+
+ gpu->funcs->flush(gpu);
+ if (!gpu->funcs->idle(gpu))
+ return -EINVAL;
+ }
+
+ /* Put the GPU into unsecure mode */
+ gpu_write(gpu, REG_A5XX_RBBM_SECVID_TRUST_CNTL, 0x0);
+
+ return 0;
+}
+
+static void a5xx_recover(struct msm_gpu *gpu)
+{
+ int i;
+
+ adreno_dump_info(gpu);
+
+ for (i = 0; i < 8; i++) {
+ printk("CP_SCRATCH_REG%d: %u\n", i,
+ gpu_read(gpu, REG_A5XX_CP_SCRATCH_REG(i)));
+ }
+
+ if (hang_debug)
+ a5xx_dump(gpu);
+
+ gpu_write(gpu, REG_A5XX_RBBM_SW_RESET_CMD, 1);
+ gpu_read(gpu, REG_A5XX_RBBM_SW_RESET_CMD);
+ gpu_write(gpu, REG_A5XX_RBBM_SW_RESET_CMD, 0);
+ adreno_recover(gpu);
+}
+
+static void a5xx_destroy(struct msm_gpu *gpu)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
+
+ DBG("%s", gpu->name);
+
+ if (a5xx_gpu->pm4_bo) {
+ if (a5xx_gpu->pm4_iova)
+ msm_gem_put_iova(a5xx_gpu->pm4_bo, gpu->id);
+ drm_gem_object_unreference_unlocked(a5xx_gpu->pm4_bo);
+ }
+
+ if (a5xx_gpu->pfp_bo) {
+ if (a5xx_gpu->pfp_iova)
+ msm_gem_put_iova(a5xx_gpu->pfp_bo, gpu->id);
+ drm_gem_object_unreference_unlocked(a5xx_gpu->pfp_bo);
+ }
+
+ if (a5xx_gpu->gpmu_bo) {
+ if (a5xx_gpu->gpmu_bo)
+ msm_gem_put_iova(a5xx_gpu->gpmu_bo, gpu->id);
+ drm_gem_object_unreference_unlocked(a5xx_gpu->gpmu_bo);
+ }
+
+ adreno_gpu_cleanup(adreno_gpu);
+ kfree(a5xx_gpu);
+}
+
+static inline bool _a5xx_check_idle(struct msm_gpu *gpu)
+{
+ if (gpu_read(gpu, REG_A5XX_RBBM_STATUS) & ~A5XX_RBBM_STATUS_HI_BUSY)
+ return false;
+
+ /*
+ * Nearly every abnormality ends up pausing the GPU and triggering a
+ * fault so we can safely just watch for this one interrupt to fire
+ */
+ return !(gpu_read(gpu, REG_A5XX_RBBM_INT_0_STATUS) &
+ A5XX_RBBM_INT_0_MASK_MISC_HANG_DETECT);
+}
+
+static bool a5xx_idle(struct msm_gpu *gpu)
+{
+ /* wait for CP to drain ringbuffer: */
+ if (!adreno_idle(gpu))
+ return false;
+
+ if (spin_until(_a5xx_check_idle(gpu))) {
+ DRM_ERROR("%s: %ps: timeout waiting for GPU to idle: status %8.8X irq %8.8X\n",
+ gpu->name, __builtin_return_address(0),
+ gpu_read(gpu, REG_A5XX_RBBM_STATUS),
+ gpu_read(gpu, REG_A5XX_RBBM_INT_0_STATUS));
+
+ return false;
+ }
+
+ return true;
+}
+
+static void a5xx_cp_err_irq(struct msm_gpu *gpu)
+{
+ u32 status = gpu_read(gpu, REG_A5XX_CP_INTERRUPT_STATUS);
+
+ if (status & A5XX_CP_INT_CP_OPCODE_ERROR) {
+ u32 val;
+
+ gpu_write(gpu, REG_A5XX_CP_PFP_STAT_ADDR, 0);
+
+ /*
+ * REG_A5XX_CP_PFP_STAT_DATA is indexed, and we want index 1 so
+ * read it twice
+ */
+
+ gpu_read(gpu, REG_A5XX_CP_PFP_STAT_DATA);
+ val = gpu_read(gpu, REG_A5XX_CP_PFP_STAT_DATA);
+
+ dev_err_ratelimited(gpu->dev->dev, "CP | opcode error | possible opcode=0x%8.8X\n",
+ val);
+ }
+
+ if (status & A5XX_CP_INT_CP_HW_FAULT_ERROR)
+ dev_err_ratelimited(gpu->dev->dev, "CP | HW fault | status=0x%8.8X\n",
+ gpu_read(gpu, REG_A5XX_CP_HW_FAULT));
+
+ if (status & A5XX_CP_INT_CP_DMA_ERROR)
+ dev_err_ratelimited(gpu->dev->dev, "CP | DMA error\n");
+
+ if (status & A5XX_CP_INT_CP_REGISTER_PROTECTION_ERROR) {
+ u32 val = gpu_read(gpu, REG_A5XX_CP_PROTECT_STATUS);
+
+ dev_err_ratelimited(gpu->dev->dev,
+ "CP | protected mode error | %s | addr=0x%8.8X | status=0x%8.8X\n",
+ val & (1 << 24) ? "WRITE" : "READ",
+ (val & 0xFFFFF) >> 2, val);
+ }
+
+ if (status & A5XX_CP_INT_CP_AHB_ERROR) {
+ u32 status = gpu_read(gpu, REG_A5XX_CP_AHB_FAULT);
+ const char *access[16] = { "reserved", "reserved",
+ "timestamp lo", "timestamp hi", "pfp read", "pfp write",
+ "", "", "me read", "me write", "", "", "crashdump read",
+ "crashdump write" };
+
+ dev_err_ratelimited(gpu->dev->dev,
+ "CP | AHB error | addr=%X access=%s error=%d | status=0x%8.8X\n",
+ status & 0xFFFFF, access[(status >> 24) & 0xF],
+ (status & (1 << 31)), status);
+ }
+}
+
+static void a5xx_rbbm_err_irq(struct msm_gpu *gpu)
+{
+ u32 status = gpu_read(gpu, REG_A5XX_RBBM_INT_0_STATUS);
+
+ if (status & A5XX_RBBM_INT_0_MASK_RBBM_AHB_ERROR) {
+ u32 val = gpu_read(gpu, REG_A5XX_RBBM_AHB_ERROR_STATUS);
+
+ dev_err_ratelimited(gpu->dev->dev,
+ "RBBM | AHB bus error | %s | addr=0x%X | ports=0x%X:0x%X\n",
+ val & (1 << 28) ? "WRITE" : "READ",
+ (val & 0xFFFFF) >> 2, (val >> 20) & 0x3,
+ (val >> 24) & 0xF);
+
+ /* Clear the error */
+ gpu_write(gpu, REG_A5XX_RBBM_AHB_CMD, (1 << 4));
+ }
+
+ if (status & A5XX_RBBM_INT_0_MASK_RBBM_TRANSFER_TIMEOUT)
+ dev_err_ratelimited(gpu->dev->dev, "RBBM | AHB transfer timeout\n");
+
+ if (status & A5XX_RBBM_INT_0_MASK_RBBM_ME_MS_TIMEOUT)
+ dev_err_ratelimited(gpu->dev->dev, "RBBM | ME master split | status=0x%X\n",
+ gpu_read(gpu, REG_A5XX_RBBM_AHB_ME_SPLIT_STATUS));
+
+ if (status & A5XX_RBBM_INT_0_MASK_RBBM_PFP_MS_TIMEOUT)
+ dev_err_ratelimited(gpu->dev->dev, "RBBM | PFP master split | status=0x%X\n",
+ gpu_read(gpu, REG_A5XX_RBBM_AHB_PFP_SPLIT_STATUS));
+
+ if (status & A5XX_RBBM_INT_0_MASK_RBBM_ETS_MS_TIMEOUT)
+ dev_err_ratelimited(gpu->dev->dev, "RBBM | ETS master split | status=0x%X\n",
+ gpu_read(gpu, REG_A5XX_RBBM_AHB_ETS_SPLIT_STATUS));
+
+ if (status & A5XX_RBBM_INT_0_MASK_RBBM_ATB_ASYNC_OVERFLOW)
+ dev_err_ratelimited(gpu->dev->dev, "RBBM | ATB ASYNC overflow\n");
+
+ if (status & A5XX_RBBM_INT_0_MASK_RBBM_ATB_BUS_OVERFLOW)
+ dev_err_ratelimited(gpu->dev->dev, "RBBM | ATB bus overflow\n");
+}
+
+static void a5xx_uche_err_irq(struct msm_gpu *gpu)
+{
+ uint64_t addr = (uint64_t) gpu_read(gpu, REG_A5XX_UCHE_TRAP_LOG_HI);
+
+ addr |= gpu_read(gpu, REG_A5XX_UCHE_TRAP_LOG_LO);
+
+ dev_err_ratelimited(gpu->dev->dev, "UCHE | Out of bounds access | addr=0x%llX\n",
+ addr);
+}
+
+static void a5xx_gpmu_err_irq(struct msm_gpu *gpu)
+{
+ dev_err_ratelimited(gpu->dev->dev, "GPMU | voltage droop\n");
+}
+
+#define RBBM_ERROR_MASK \
+ (A5XX_RBBM_INT_0_MASK_RBBM_AHB_ERROR | \
+ A5XX_RBBM_INT_0_MASK_RBBM_TRANSFER_TIMEOUT | \
+ A5XX_RBBM_INT_0_MASK_RBBM_ME_MS_TIMEOUT | \
+ A5XX_RBBM_INT_0_MASK_RBBM_PFP_MS_TIMEOUT | \
+ A5XX_RBBM_INT_0_MASK_RBBM_ETS_MS_TIMEOUT | \
+ A5XX_RBBM_INT_0_MASK_RBBM_ATB_ASYNC_OVERFLOW)
+
+static irqreturn_t a5xx_irq(struct msm_gpu *gpu)
+{
+ u32 status = gpu_read(gpu, REG_A5XX_RBBM_INT_0_STATUS);
+
+ gpu_write(gpu, REG_A5XX_RBBM_INT_CLEAR_CMD, status);
+
+ if (status & RBBM_ERROR_MASK)
+ a5xx_rbbm_err_irq(gpu);
+
+ if (status & A5XX_RBBM_INT_0_MASK_CP_HW_ERROR)
+ a5xx_cp_err_irq(gpu);
+
+ if (status & A5XX_RBBM_INT_0_MASK_UCHE_OOB_ACCESS)
+ a5xx_uche_err_irq(gpu);
+
+ if (status & A5XX_RBBM_INT_0_MASK_GPMU_VOLTAGE_DROOP)
+ a5xx_gpmu_err_irq(gpu);
+
+ if (status & A5XX_RBBM_INT_0_MASK_CP_CACHE_FLUSH_TS)
+ msm_gpu_retire(gpu);
+
+ return IRQ_HANDLED;
+}
+
+static const u32 a5xx_register_offsets[REG_ADRENO_REGISTER_MAX] = {
+ REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_BASE, REG_A5XX_CP_RB_BASE),
+ REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_BASE_HI, REG_A5XX_CP_RB_BASE_HI),
+ REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_RPTR_ADDR, REG_A5XX_CP_RB_RPTR_ADDR),
+ REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_RPTR_ADDR_HI,
+ REG_A5XX_CP_RB_RPTR_ADDR_HI),
+ REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_RPTR, REG_A5XX_CP_RB_RPTR),
+ REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_WPTR, REG_A5XX_CP_RB_WPTR),
+ REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_CNTL, REG_A5XX_CP_RB_CNTL),
+};
+
+static const u32 a5xx_registers[] = {
+ 0x0000, 0x0002, 0x0004, 0x0020, 0x0022, 0x0026, 0x0029, 0x002B,
+ 0x002E, 0x0035, 0x0038, 0x0042, 0x0044, 0x0044, 0x0047, 0x0095,
+ 0x0097, 0x00BB, 0x03A0, 0x0464, 0x0469, 0x046F, 0x04D2, 0x04D3,
+ 0x04E0, 0x0533, 0x0540, 0x0555, 0xF400, 0xF400, 0xF800, 0xF807,
+ 0x0800, 0x081A, 0x081F, 0x0841, 0x0860, 0x0860, 0x0880, 0x08A0,
+ 0x0B00, 0x0B12, 0x0B15, 0x0B28, 0x0B78, 0x0B7F, 0x0BB0, 0x0BBD,
+ 0x0BC0, 0x0BC6, 0x0BD0, 0x0C53, 0x0C60, 0x0C61, 0x0C80, 0x0C82,
+ 0x0C84, 0x0C85, 0x0C90, 0x0C98, 0x0CA0, 0x0CA0, 0x0CB0, 0x0CB2,
+ 0x2180, 0x2185, 0x2580, 0x2585, 0x0CC1, 0x0CC1, 0x0CC4, 0x0CC7,
+ 0x0CCC, 0x0CCC, 0x0CD0, 0x0CD8, 0x0CE0, 0x0CE5, 0x0CE8, 0x0CE8,
+ 0x0CEC, 0x0CF1, 0x0CFB, 0x0D0E, 0x2100, 0x211E, 0x2140, 0x2145,
+ 0x2500, 0x251E, 0x2540, 0x2545, 0x0D10, 0x0D17, 0x0D20, 0x0D23,
+ 0x0D30, 0x0D30, 0x20C0, 0x20C0, 0x24C0, 0x24C0, 0x0E40, 0x0E43,
+ 0x0E4A, 0x0E4A, 0x0E50, 0x0E57, 0x0E60, 0x0E7C, 0x0E80, 0x0E8E,
+ 0x0E90, 0x0E96, 0x0EA0, 0x0EA8, 0x0EB0, 0x0EB2, 0xE140, 0xE147,
+ 0xE150, 0xE187, 0xE1A0, 0xE1A9, 0xE1B0, 0xE1B6, 0xE1C0, 0xE1C7,
+ 0xE1D0, 0xE1D1, 0xE200, 0xE201, 0xE210, 0xE21C, 0xE240, 0xE268,
+ 0xE000, 0xE006, 0xE010, 0xE09A, 0xE0A0, 0xE0A4, 0xE0AA, 0xE0EB,
+ 0xE100, 0xE105, 0xE380, 0xE38F, 0xE3B0, 0xE3B0, 0xE400, 0xE405,
+ 0xE408, 0xE4E9, 0xE4F0, 0xE4F0, 0xE280, 0xE280, 0xE282, 0xE2A3,
+ 0xE2A5, 0xE2C2, 0xE940, 0xE947, 0xE950, 0xE987, 0xE9A0, 0xE9A9,
+ 0xE9B0, 0xE9B6, 0xE9C0, 0xE9C7, 0xE9D0, 0xE9D1, 0xEA00, 0xEA01,
+ 0xEA10, 0xEA1C, 0xEA40, 0xEA68, 0xE800, 0xE806, 0xE810, 0xE89A,
+ 0xE8A0, 0xE8A4, 0xE8AA, 0xE8EB, 0xE900, 0xE905, 0xEB80, 0xEB8F,
+ 0xEBB0, 0xEBB0, 0xEC00, 0xEC05, 0xEC08, 0xECE9, 0xECF0, 0xECF0,
+ 0xEA80, 0xEA80, 0xEA82, 0xEAA3, 0xEAA5, 0xEAC2, 0xA800, 0xA8FF,
+ 0xAC60, 0xAC60, 0xB000, 0xB97F, 0xB9A0, 0xB9BF,
+ ~0
+};
+
+static void a5xx_dump(struct msm_gpu *gpu)
+{
+ dev_info(gpu->dev->dev, "status: %08x\n",
+ gpu_read(gpu, REG_A5XX_RBBM_STATUS));
+ adreno_dump(gpu);
+}
+
+static int a5xx_pm_resume(struct msm_gpu *gpu)
+{
+ int ret;
+
+ /* Turn on the core power */
+ ret = msm_gpu_pm_resume(gpu);
+ if (ret)
+ return ret;
+
+ /* Turn the RBCCU domain first to limit the chances of voltage droop */
+ gpu_write(gpu, REG_A5XX_GPMU_RBCCU_POWER_CNTL, 0x778000);
+
+ /* Wait 3 usecs before polling */
+ udelay(3);
+
+ ret = spin_usecs(gpu, 20, REG_A5XX_GPMU_RBCCU_PWR_CLK_STATUS,
+ (1 << 20), (1 << 20));
+ if (ret) {
+ DRM_ERROR("%s: timeout waiting for RBCCU GDSC enable: %X\n",
+ gpu->name,
+ gpu_read(gpu, REG_A5XX_GPMU_RBCCU_PWR_CLK_STATUS));
+ return ret;
+ }
+
+ /* Turn on the SP domain */
+ gpu_write(gpu, REG_A5XX_GPMU_SP_POWER_CNTL, 0x778000);
+ ret = spin_usecs(gpu, 20, REG_A5XX_GPMU_SP_PWR_CLK_STATUS,
+ (1 << 20), (1 << 20));
+ if (ret)
+ DRM_ERROR("%s: timeout waiting for SP GDSC enable\n",
+ gpu->name);
+
+ return ret;
+}
+
+static int a5xx_pm_suspend(struct msm_gpu *gpu)
+{
+ /* Clear the VBIF pipe before shutting down */
+ gpu_write(gpu, REG_A5XX_VBIF_XIN_HALT_CTRL0, 0xF);
+ spin_until((gpu_read(gpu, REG_A5XX_VBIF_XIN_HALT_CTRL1) & 0xF) == 0xF);
+
+ gpu_write(gpu, REG_A5XX_VBIF_XIN_HALT_CTRL0, 0);
+
+ /*
+ * Reset the VBIF before power collapse to avoid issue with FIFO
+ * entries
+ */
+ gpu_write(gpu, REG_A5XX_RBBM_BLOCK_SW_RESET_CMD, 0x003C0000);
+ gpu_write(gpu, REG_A5XX_RBBM_BLOCK_SW_RESET_CMD, 0x00000000);
+
+ return msm_gpu_pm_suspend(gpu);
+}
+
+static int a5xx_get_timestamp(struct msm_gpu *gpu, uint64_t *value)
+{
+ *value = gpu_read64(gpu, REG_A5XX_RBBM_PERFCTR_CP_0_LO,
+ REG_A5XX_RBBM_PERFCTR_CP_0_HI);
+
+ return 0;
+}
+
+#ifdef CONFIG_DEBUG_FS
+static void a5xx_show(struct msm_gpu *gpu, struct seq_file *m)
+{
+ gpu->funcs->pm_resume(gpu);
+
+ seq_printf(m, "status: %08x\n",
+ gpu_read(gpu, REG_A5XX_RBBM_STATUS));
+ gpu->funcs->pm_suspend(gpu);
+
+ adreno_show(gpu, m);
+}
+#endif
+
+static const struct adreno_gpu_funcs funcs = {
+ .base = {
+ .get_param = adreno_get_param,
+ .hw_init = a5xx_hw_init,
+ .pm_suspend = a5xx_pm_suspend,
+ .pm_resume = a5xx_pm_resume,
+ .recover = a5xx_recover,
+ .last_fence = adreno_last_fence,
+ .submit = a5xx_submit,
+ .flush = adreno_flush,
+ .idle = a5xx_idle,
+ .irq = a5xx_irq,
+ .destroy = a5xx_destroy,
+ .show = a5xx_show,
+ },
+ .get_timestamp = a5xx_get_timestamp,
+};
+
+struct msm_gpu *a5xx_gpu_init(struct drm_device *dev)
+{
+ struct msm_drm_private *priv = dev->dev_private;
+ struct platform_device *pdev = priv->gpu_pdev;
+ struct a5xx_gpu *a5xx_gpu = NULL;
+ struct adreno_gpu *adreno_gpu;
+ struct msm_gpu *gpu;
+ int ret;
+
+ if (!pdev) {
+ dev_err(dev->dev, "No A5XX device is defined\n");
+ return ERR_PTR(-ENXIO);
+ }
+
+ a5xx_gpu = kzalloc(sizeof(*a5xx_gpu), GFP_KERNEL);
+ if (!a5xx_gpu)
+ return ERR_PTR(-ENOMEM);
+
+ adreno_gpu = &a5xx_gpu->base;
+ gpu = &adreno_gpu->base;
+
+ a5xx_gpu->pdev = pdev;
+ adreno_gpu->registers = a5xx_registers;
+ adreno_gpu->reg_offsets = a5xx_register_offsets;
+
+ a5xx_gpu->lm_leakage = 0x4E001A;
+
+ ret = adreno_gpu_init(dev, pdev, adreno_gpu, &funcs);
+ if (ret) {
+ a5xx_destroy(&(a5xx_gpu->base.base));
+ return ERR_PTR(ret);
+ }
+
+ return gpu;
+}
diff --git a/drivers/gpu/drm/msm/adreno/a5xx_gpu.h b/drivers/gpu/drm/msm/adreno/a5xx_gpu.h
new file mode 100644
index 000000000000..1590f845d554
--- /dev/null
+++ b/drivers/gpu/drm/msm/adreno/a5xx_gpu.h
@@ -0,0 +1,60 @@
+/* Copyright (c) 2016 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+#ifndef __A5XX_GPU_H__
+#define __A5XX_GPU_H__
+
+#include "adreno_gpu.h"
+
+/* Bringing over the hack from the previous targets */
+#undef ROP_COPY
+#undef ROP_XOR
+
+#include "a5xx.xml.h"
+
+struct a5xx_gpu {
+ struct adreno_gpu base;
+ struct platform_device *pdev;
+
+ struct drm_gem_object *pm4_bo;
+ uint64_t pm4_iova;
+
+ struct drm_gem_object *pfp_bo;
+ uint64_t pfp_iova;
+
+ struct drm_gem_object *gpmu_bo;
+ uint64_t gpmu_iova;
+ uint32_t gpmu_dwords;
+
+ uint32_t lm_leakage;
+};
+
+#define to_a5xx_gpu(x) container_of(x, struct a5xx_gpu, base)
+
+int a5xx_power_init(struct msm_gpu *gpu);
+void a5xx_gpmu_ucode_init(struct msm_gpu *gpu);
+
+static inline int spin_usecs(struct msm_gpu *gpu, uint32_t usecs,
+ uint32_t reg, uint32_t mask, uint32_t value)
+{
+ while (usecs--) {
+ udelay(1);
+ if ((gpu_read(gpu, reg) & mask) == value)
+ return 0;
+ cpu_relax();
+ }
+
+ return -ETIMEDOUT;
+}
+
+
+#endif /* __A5XX_GPU_H__ */
diff --git a/drivers/gpu/drm/msm/adreno/a5xx_power.c b/drivers/gpu/drm/msm/adreno/a5xx_power.c
new file mode 100644
index 000000000000..72d52c71f769
--- /dev/null
+++ b/drivers/gpu/drm/msm/adreno/a5xx_power.c
@@ -0,0 +1,344 @@
+/* Copyright (c) 2016 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/pm_opp.h>
+#include "a5xx_gpu.h"
+
+/*
+ * The GPMU data block is a block of shared registers that can be used to
+ * communicate back and forth. These "registers" are by convention with the GPMU
+ * firwmare and not bound to any specific hardware design
+ */
+
+#define AGC_INIT_BASE REG_A5XX_GPMU_DATA_RAM_BASE
+#define AGC_INIT_MSG_MAGIC (AGC_INIT_BASE + 5)
+#define AGC_MSG_BASE (AGC_INIT_BASE + 7)
+
+#define AGC_MSG_STATE (AGC_MSG_BASE + 0)
+#define AGC_MSG_COMMAND (AGC_MSG_BASE + 1)
+#define AGC_MSG_PAYLOAD_SIZE (AGC_MSG_BASE + 3)
+#define AGC_MSG_PAYLOAD(_o) ((AGC_MSG_BASE + 5) + (_o))
+
+#define AGC_POWER_CONFIG_PRODUCTION_ID 1
+#define AGC_INIT_MSG_VALUE 0xBABEFACE
+
+static struct {
+ uint32_t reg;
+ uint32_t value;
+} a5xx_sequence_regs[] = {
+ { 0xB9A1, 0x00010303 },
+ { 0xB9A2, 0x13000000 },
+ { 0xB9A3, 0x00460020 },
+ { 0xB9A4, 0x10000000 },
+ { 0xB9A5, 0x040A1707 },
+ { 0xB9A6, 0x00010000 },
+ { 0xB9A7, 0x0E000904 },
+ { 0xB9A8, 0x10000000 },
+ { 0xB9A9, 0x01165000 },
+ { 0xB9AA, 0x000E0002 },
+ { 0xB9AB, 0x03884141 },
+ { 0xB9AC, 0x10000840 },
+ { 0xB9AD, 0x572A5000 },
+ { 0xB9AE, 0x00000003 },
+ { 0xB9AF, 0x00000000 },
+ { 0xB9B0, 0x10000000 },
+ { 0xB828, 0x6C204010 },
+ { 0xB829, 0x6C204011 },
+ { 0xB82A, 0x6C204012 },
+ { 0xB82B, 0x6C204013 },
+ { 0xB82C, 0x6C204014 },
+ { 0xB90F, 0x00000004 },
+ { 0xB910, 0x00000002 },
+ { 0xB911, 0x00000002 },
+ { 0xB912, 0x00000002 },
+ { 0xB913, 0x00000002 },
+ { 0xB92F, 0x00000004 },
+ { 0xB930, 0x00000005 },
+ { 0xB931, 0x00000005 },
+ { 0xB932, 0x00000005 },
+ { 0xB933, 0x00000005 },
+ { 0xB96F, 0x00000001 },
+ { 0xB970, 0x00000003 },
+ { 0xB94F, 0x00000004 },
+ { 0xB950, 0x0000000B },
+ { 0xB951, 0x0000000B },
+ { 0xB952, 0x0000000B },
+ { 0xB953, 0x0000000B },
+ { 0xB907, 0x00000019 },
+ { 0xB927, 0x00000019 },
+ { 0xB947, 0x00000019 },
+ { 0xB967, 0x00000019 },
+ { 0xB987, 0x00000019 },
+ { 0xB906, 0x00220001 },
+ { 0xB926, 0x00220001 },
+ { 0xB946, 0x00220001 },
+ { 0xB966, 0x00220001 },
+ { 0xB986, 0x00300000 },
+ { 0xAC40, 0x0340FF41 },
+ { 0xAC41, 0x03BEFED0 },
+ { 0xAC42, 0x00331FED },
+ { 0xAC43, 0x021FFDD3 },
+ { 0xAC44, 0x5555AAAA },
+ { 0xAC45, 0x5555AAAA },
+ { 0xB9BA, 0x00000008 },
+};
+
+/*
+ * Get the actual voltage value for the operating point at the specified
+ * frequency
+ */
+static inline uint32_t _get_mvolts(struct msm_gpu *gpu, uint32_t freq)
+{
+ struct drm_device *dev = gpu->dev;
+ struct msm_drm_private *priv = dev->dev_private;
+ struct platform_device *pdev = priv->gpu_pdev;
+ struct dev_pm_opp *opp;
+
+ opp = dev_pm_opp_find_freq_exact(&pdev->dev, freq, true);
+
+ return (!IS_ERR(opp)) ? dev_pm_opp_get_voltage(opp) / 1000 : 0;
+}
+
+/* Setup thermal limit management */
+static void a5xx_lm_setup(struct msm_gpu *gpu)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
+ unsigned int i;
+
+ /* Write the block of sequence registers */
+ for (i = 0; i < ARRAY_SIZE(a5xx_sequence_regs); i++)
+ gpu_write(gpu, a5xx_sequence_regs[i].reg,
+ a5xx_sequence_regs[i].value);
+
+ /* Hard code the A530 GPU thermal sensor ID for the GPMU */
+ gpu_write(gpu, REG_A5XX_GPMU_TEMP_SENSOR_ID, 0x60007);
+ gpu_write(gpu, REG_A5XX_GPMU_DELTA_TEMP_THRESHOLD, 0x01);
+ gpu_write(gpu, REG_A5XX_GPMU_TEMP_SENSOR_CONFIG, 0x01);
+
+ /* Until we get clock scaling 0 is always the active power level */
+ gpu_write(gpu, REG_A5XX_GPMU_GPMU_VOLTAGE, 0x80000000 | 0);
+
+ gpu_write(gpu, REG_A5XX_GPMU_BASE_LEAKAGE, a5xx_gpu->lm_leakage);
+
+ /* The threshold is fixed at 6000 for A530 */
+ gpu_write(gpu, REG_A5XX_GPMU_GPMU_PWR_THRESHOLD, 0x80000000 | 6000);
+
+ gpu_write(gpu, REG_A5XX_GPMU_BEC_ENABLE, 0x10001FFF);
+ gpu_write(gpu, REG_A5XX_GDPM_CONFIG1, 0x00201FF1);
+
+ /* Write the voltage table */
+ gpu_write(gpu, REG_A5XX_GPMU_BEC_ENABLE, 0x10001FFF);
+ gpu_write(gpu, REG_A5XX_GDPM_CONFIG1, 0x201FF1);
+
+ gpu_write(gpu, AGC_MSG_STATE, 1);
+ gpu_write(gpu, AGC_MSG_COMMAND, AGC_POWER_CONFIG_PRODUCTION_ID);
+
+ /* Write the max power - hard coded to 5448 for A530 */
+ gpu_write(gpu, AGC_MSG_PAYLOAD(0), 5448);
+ gpu_write(gpu, AGC_MSG_PAYLOAD(1), 1);
+
+ /*
+ * For now just write the one voltage level - we will do more when we
+ * can do scaling
+ */
+ gpu_write(gpu, AGC_MSG_PAYLOAD(2), _get_mvolts(gpu, gpu->fast_rate));
+ gpu_write(gpu, AGC_MSG_PAYLOAD(3), gpu->fast_rate / 1000000);
+
+ gpu_write(gpu, AGC_MSG_PAYLOAD_SIZE, 4 * sizeof(uint32_t));
+ gpu_write(gpu, AGC_INIT_MSG_MAGIC, AGC_INIT_MSG_VALUE);
+}
+
+/* Enable SP/TP cpower collapse */
+static void a5xx_pc_init(struct msm_gpu *gpu)
+{
+ gpu_write(gpu, REG_A5XX_GPMU_PWR_COL_INTER_FRAME_CTRL, 0x7F);
+ gpu_write(gpu, REG_A5XX_GPMU_PWR_COL_BINNING_CTRL, 0);
+ gpu_write(gpu, REG_A5XX_GPMU_PWR_COL_INTER_FRAME_HYST, 0xA0080);
+ gpu_write(gpu, REG_A5XX_GPMU_PWR_COL_STAGGER_DELAY, 0x600040);
+}
+
+/* Enable the GPMU microcontroller */
+static int a5xx_gpmu_init(struct msm_gpu *gpu)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
+ struct msm_ringbuffer *ring = gpu->rb;
+
+ if (!a5xx_gpu->gpmu_dwords)
+ return 0;
+
+ /* Turn off protected mode for this operation */
+ OUT_PKT7(ring, CP_SET_PROTECTED_MODE, 1);
+ OUT_RING(ring, 0);
+
+ /* Kick off the IB to load the GPMU microcode */
+ OUT_PKT7(ring, CP_INDIRECT_BUFFER_PFE, 3);
+ OUT_RING(ring, lower_32_bits(a5xx_gpu->gpmu_iova));
+ OUT_RING(ring, upper_32_bits(a5xx_gpu->gpmu_iova));
+ OUT_RING(ring, a5xx_gpu->gpmu_dwords);
+
+ /* Turn back on protected mode */
+ OUT_PKT7(ring, CP_SET_PROTECTED_MODE, 1);
+ OUT_RING(ring, 1);
+
+ gpu->funcs->flush(gpu);
+
+ if (!gpu->funcs->idle(gpu)) {
+ DRM_ERROR("%s: Unable to load GPMU firmware. GPMU will not be active\n",
+ gpu->name);
+ return -EINVAL;
+ }
+
+ gpu_write(gpu, REG_A5XX_GPMU_WFI_CONFIG, 0x4014);
+
+ /* Kick off the GPMU */
+ gpu_write(gpu, REG_A5XX_GPMU_CM3_SYSRESET, 0x0);
+
+ /*
+ * Wait for the GPMU to respond. It isn't fatal if it doesn't, we just
+ * won't have advanced power collapse.
+ */
+ if (spin_usecs(gpu, 25, REG_A5XX_GPMU_GENERAL_0, 0xFFFFFFFF,
+ 0xBABEFACE))
+ DRM_ERROR("%s: GPMU firmware initialization timed out\n",
+ gpu->name);
+
+ return 0;
+}
+
+/* Enable limits management */
+static void a5xx_lm_enable(struct msm_gpu *gpu)
+{
+ gpu_write(gpu, REG_A5XX_GDPM_INT_MASK, 0x0);
+ gpu_write(gpu, REG_A5XX_GDPM_INT_EN, 0x0A);
+ gpu_write(gpu, REG_A5XX_GPMU_GPMU_VOLTAGE_INTR_EN_MASK, 0x01);
+ gpu_write(gpu, REG_A5XX_GPMU_TEMP_THRESHOLD_INTR_EN_MASK, 0x50000);
+ gpu_write(gpu, REG_A5XX_GPMU_THROTTLE_UNMASK_FORCE_CTRL, 0x30000);
+
+ gpu_write(gpu, REG_A5XX_GPMU_CLOCK_THROTTLE_CTRL, 0x011);
+}
+
+int a5xx_power_init(struct msm_gpu *gpu)
+{
+ int ret;
+
+ /* Set up the limits management */
+ a5xx_lm_setup(gpu);
+
+ /* Set up SP/TP power collpase */
+ a5xx_pc_init(gpu);
+
+ /* Start the GPMU */
+ ret = a5xx_gpmu_init(gpu);
+ if (ret)
+ return ret;
+
+ /* Start the limits management */
+ a5xx_lm_enable(gpu);
+
+ return 0;
+}
+
+void a5xx_gpmu_ucode_init(struct msm_gpu *gpu)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
+ struct drm_device *drm = gpu->dev;
+ const struct firmware *fw;
+ uint32_t dwords = 0, offset = 0, bosize;
+ unsigned int *data, *ptr, *cmds;
+ unsigned int cmds_size;
+
+ if (a5xx_gpu->gpmu_bo)
+ return;
+
+ /* Get the firmware */
+ if (request_firmware(&fw, adreno_gpu->info->gpmufw, drm->dev)) {
+ DRM_ERROR("%s: Could not get GPMU firmware. GPMU will not be active\n",
+ gpu->name);
+ return;
+ }
+
+ data = (unsigned int *) fw->data;
+
+ /*
+ * The first dword is the size of the remaining data in dwords. Use it
+ * as a checksum of sorts and make sure it matches the actual size of
+ * the firmware that we read
+ */
+
+ if (fw->size < 8 || (data[0] < 2) || (data[0] >= (fw->size >> 2)))
+ goto out;
+
+ /* The second dword is an ID - look for 2 (GPMU_FIRMWARE_ID) */
+ if (data[1] != 2)
+ goto out;
+
+ cmds = data + data[2] + 3;
+ cmds_size = data[0] - data[2] - 2;
+
+ /*
+ * A single type4 opcode can only have so many values attached so
+ * add enough opcodes to load the all the commands
+ */
+ bosize = (cmds_size + (cmds_size / TYPE4_MAX_PAYLOAD) + 1) << 2;
+
+ mutex_lock(&drm->struct_mutex);
+ a5xx_gpu->gpmu_bo = msm_gem_new(drm, bosize, MSM_BO_UNCACHED);
+ mutex_unlock(&drm->struct_mutex);
+
+ if (IS_ERR(a5xx_gpu->gpmu_bo))
+ goto err;
+
+ if (msm_gem_get_iova(a5xx_gpu->gpmu_bo, gpu->id, &a5xx_gpu->gpmu_iova))
+ goto err;
+
+ ptr = msm_gem_get_vaddr(a5xx_gpu->gpmu_bo);
+ if (!ptr)
+ goto err;
+
+ while (cmds_size > 0) {
+ int i;
+ uint32_t _size = cmds_size > TYPE4_MAX_PAYLOAD ?
+ TYPE4_MAX_PAYLOAD : cmds_size;
+
+ ptr[dwords++] = PKT4(REG_A5XX_GPMU_INST_RAM_BASE + offset,
+ _size);
+
+ for (i = 0; i < _size; i++)
+ ptr[dwords++] = *cmds++;
+
+ offset += _size;
+ cmds_size -= _size;
+ }
+
+ msm_gem_put_vaddr(a5xx_gpu->gpmu_bo);
+ a5xx_gpu->gpmu_dwords = dwords;
+
+ goto out;
+
+err:
+ if (a5xx_gpu->gpmu_iova)
+ msm_gem_put_iova(a5xx_gpu->gpmu_bo, gpu->id);
+ if (a5xx_gpu->gpmu_bo)
+ drm_gem_object_unreference_unlocked(a5xx_gpu->gpmu_bo);
+
+ a5xx_gpu->gpmu_bo = NULL;
+ a5xx_gpu->gpmu_iova = 0;
+ a5xx_gpu->gpmu_dwords = 0;
+
+out:
+ /* No need to keep that firmware laying around anymore */
+ release_firmware(fw);
+}
diff --git a/drivers/gpu/drm/msm/adreno/adreno_common.xml.h b/drivers/gpu/drm/msm/adreno/adreno_common.xml.h
index e81481d1b7df..4a33ba6f1244 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_common.xml.h
+++ b/drivers/gpu/drm/msm/adreno/adreno_common.xml.h
@@ -8,13 +8,14 @@ http://github.com/freedreno/envytools/
git clone https://github.com/freedreno/envytools.git
The rules-ng-ng source files this header was generated from are:
-- /home/robclark/src/freedreno/envytools/rnndb/adreno.xml ( 398 bytes, from 2015-09-24 17:25:31)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno.xml ( 431 bytes, from 2016-04-26 17:56:44)
- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2016-02-10 17:07:21)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/a2xx.xml ( 32901 bytes, from 2015-05-20 20:03:14)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_common.xml ( 11518 bytes, from 2016-02-10 21:03:25)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml ( 16166 bytes, from 2016-02-11 21:20:31)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml ( 83967 bytes, from 2016-02-10 17:07:21)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml ( 109916 bytes, from 2016-02-20 18:44:48)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/a2xx.xml ( 32907 bytes, from 2016-11-26 23:01:08)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_common.xml ( 12025 bytes, from 2016-11-26 23:01:08)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml ( 22544 bytes, from 2016-11-26 23:01:08)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml ( 83840 bytes, from 2016-11-26 23:01:08)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml ( 110765 bytes, from 2016-11-26 23:01:48)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/a5xx.xml ( 90321 bytes, from 2016-11-28 16:50:05)
- /home/robclark/src/freedreno/envytools/rnndb/adreno/ocmem.xml ( 1773 bytes, from 2015-09-24 17:30:00)
Copyright (C) 2013-2016 by the following authors:
@@ -172,6 +173,14 @@ enum a3xx_color_swap {
XYZW = 3,
};
+enum a3xx_rb_blend_opcode {
+ BLEND_DST_PLUS_SRC = 0,
+ BLEND_SRC_MINUS_DST = 1,
+ BLEND_DST_MINUS_SRC = 2,
+ BLEND_MIN_DST_SRC = 3,
+ BLEND_MAX_DST_SRC = 4,
+};
+
#define REG_AXXX_CP_RB_BASE 0x000001c0
#define REG_AXXX_CP_RB_CNTL 0x000001c1
diff --git a/drivers/gpu/drm/msm/adreno/adreno_device.c b/drivers/gpu/drm/msm/adreno/adreno_device.c
index 5127b75dbf40..893eb2b2531b 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_device.c
+++ b/drivers/gpu/drm/msm/adreno/adreno_device.c
@@ -25,9 +25,6 @@ bool hang_debug = false;
MODULE_PARM_DESC(hang_debug, "Dump registers when hang is detected (can be slow!)");
module_param_named(hang_debug, hang_debug, bool, 0600);
-struct msm_gpu *a3xx_gpu_init(struct drm_device *dev);
-struct msm_gpu *a4xx_gpu_init(struct drm_device *dev);
-
static const struct adreno_info gpulist[] = {
{
.rev = ADRENO_REV(3, 0, 5, ANY_ID),
@@ -77,6 +74,15 @@ static const struct adreno_info gpulist[] = {
.pfpfw = "a420_pfp.fw",
.gmem = (SZ_1M + SZ_512K),
.init = a4xx_gpu_init,
+ }, {
+ .rev = ADRENO_REV(5, 3, 0, ANY_ID),
+ .revn = 530,
+ .name = "A530",
+ .pm4fw = "a530_pm4.fw",
+ .pfpfw = "a530_pfp.fw",
+ .gmem = SZ_1M,
+ .init = a5xx_gpu_init,
+ .gpmufw = "a530v3_gpmu.fw2",
},
};
@@ -86,6 +92,8 @@ MODULE_FIRMWARE("a330_pm4.fw");
MODULE_FIRMWARE("a330_pfp.fw");
MODULE_FIRMWARE("a420_pm4.fw");
MODULE_FIRMWARE("a420_pfp.fw");
+MODULE_FIRMWARE("a530_fm4.fw");
+MODULE_FIRMWARE("a530_pfp.fw");
static inline bool _rev_match(uint8_t entry, uint8_t id)
{
@@ -148,12 +156,16 @@ struct msm_gpu *adreno_load_gpu(struct drm_device *dev)
mutex_lock(&dev->struct_mutex);
gpu->funcs->pm_resume(gpu);
mutex_unlock(&dev->struct_mutex);
+
+ disable_irq(gpu->irq);
+
ret = gpu->funcs->hw_init(gpu);
if (ret) {
dev_err(dev->dev, "gpu hw init failed: %d\n", ret);
gpu->funcs->destroy(gpu);
gpu = NULL;
} else {
+ enable_irq(gpu->irq);
/* give inactive pm a chance to kick in: */
msm_gpu_retire(gpu);
}
@@ -169,12 +181,20 @@ static void set_gpu_pdev(struct drm_device *dev,
priv->gpu_pdev = pdev;
}
+static const struct {
+ const char *str;
+ uint32_t flag;
+} quirks[] = {
+ { "qcom,gpu-quirk-two-pass-use-wfi", ADRENO_QUIRK_TWO_PASS_USE_WFI },
+ { "qcom,gpu-quirk-fault-detect-mask", ADRENO_QUIRK_FAULT_DETECT_MASK },
+};
+
static int adreno_bind(struct device *dev, struct device *master, void *data)
{
static struct adreno_platform_config config = {};
struct device_node *child, *node = dev->of_node;
u32 val;
- int ret;
+ int ret, i;
ret = of_property_read_u32(node, "qcom,chipid", &val);
if (ret) {
@@ -208,6 +228,10 @@ static int adreno_bind(struct device *dev, struct device *master, void *data)
return -ENXIO;
}
+ for (i = 0; i < ARRAY_SIZE(quirks); i++)
+ if (of_property_read_bool(node, quirks[i].str))
+ config.quirks |= quirks[i].flag;
+
dev->platform_data = &config;
set_gpu_pdev(dev_get_drvdata(master), to_platform_device(dev));
return 0;
diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.c b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
index f386f463278d..a18126150e11 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
@@ -22,7 +22,7 @@
#include "msm_mmu.h"
#define RB_SIZE SZ_32K
-#define RB_BLKSIZE 16
+#define RB_BLKSIZE 32
int adreno_get_param(struct msm_gpu *gpu, uint32_t param, uint64_t *value)
{
@@ -54,9 +54,6 @@ int adreno_get_param(struct msm_gpu *gpu, uint32_t param, uint64_t *value)
}
}
-#define rbmemptr(adreno_gpu, member) \
- ((adreno_gpu)->memptrs_iova + offsetof(struct adreno_rbmemptrs, member))
-
int adreno_hw_init(struct msm_gpu *gpu)
{
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
@@ -79,11 +76,14 @@ int adreno_hw_init(struct msm_gpu *gpu)
(adreno_is_a430(adreno_gpu) ? AXXX_CP_RB_CNTL_NO_UPDATE : 0));
/* Setup ringbuffer address: */
- adreno_gpu_write(adreno_gpu, REG_ADRENO_CP_RB_BASE, gpu->rb_iova);
+ adreno_gpu_write64(adreno_gpu, REG_ADRENO_CP_RB_BASE,
+ REG_ADRENO_CP_RB_BASE_HI, gpu->rb_iova);
- if (!adreno_is_a430(adreno_gpu))
- adreno_gpu_write(adreno_gpu, REG_ADRENO_CP_RB_RPTR_ADDR,
- rbmemptr(adreno_gpu, rptr));
+ if (!adreno_is_a430(adreno_gpu)) {
+ adreno_gpu_write64(adreno_gpu, REG_ADRENO_CP_RB_RPTR_ADDR,
+ REG_ADRENO_CP_RB_RPTR_ADDR_HI,
+ rbmemptr(adreno_gpu, rptr));
+ }
return 0;
}
@@ -126,11 +126,14 @@ void adreno_recover(struct msm_gpu *gpu)
adreno_gpu->memptrs->wptr = 0;
gpu->funcs->pm_resume(gpu);
+
+ disable_irq(gpu->irq);
ret = gpu->funcs->hw_init(gpu);
if (ret) {
dev_err(dev->dev, "gpu hw init failed: %d\n", ret);
/* hmm, oh well? */
}
+ enable_irq(gpu->irq);
}
void adreno_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
@@ -218,19 +221,18 @@ void adreno_flush(struct msm_gpu *gpu)
adreno_gpu_write(adreno_gpu, REG_ADRENO_CP_RB_WPTR, wptr);
}
-void adreno_idle(struct msm_gpu *gpu)
+bool adreno_idle(struct msm_gpu *gpu)
{
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
uint32_t wptr = get_wptr(gpu->rb);
- int ret;
/* wait for CP to drain ringbuffer: */
- ret = spin_until(get_rptr(adreno_gpu) == wptr);
-
- if (ret)
- DRM_ERROR("%s: timeout waiting to drain ringbuffer!\n", gpu->name);
+ if (!spin_until(get_rptr(adreno_gpu) == wptr))
+ return true;
/* TODO maybe we need to reset GPU here to recover from hang? */
+ DRM_ERROR("%s: timeout waiting to drain ringbuffer!\n", gpu->name);
+ return false;
}
#ifdef CONFIG_DEBUG_FS
@@ -278,7 +280,6 @@ void adreno_show(struct msm_gpu *gpu, struct seq_file *m)
void adreno_dump_info(struct msm_gpu *gpu)
{
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
- int i;
printk("revision: %d (%d.%d.%d.%d)\n",
adreno_gpu->info->revn, adreno_gpu->rev.core,
@@ -290,11 +291,6 @@ void adreno_dump_info(struct msm_gpu *gpu)
printk("rptr: %d\n", get_rptr(adreno_gpu));
printk("wptr: %d\n", adreno_gpu->memptrs->wptr);
printk("rb wptr: %d\n", get_wptr(gpu->rb));
-
- for (i = 0; i < 8; i++) {
- printk("CP_SCRATCH_REG%d: %u\n", i,
- gpu_read(gpu, REG_AXXX_CP_SCRATCH_REG0 + i));
- }
}
/* would be nice to not have to duplicate the _show() stuff with printk(): */
@@ -350,6 +346,7 @@ int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev,
adreno_gpu->gmem = adreno_gpu->info->gmem;
adreno_gpu->revn = adreno_gpu->info->revn;
adreno_gpu->rev = config->rev;
+ adreno_gpu->quirks = config->quirks;
gpu->fast_rate = config->fast_rate;
gpu->slow_rate = config->slow_rate;
@@ -381,7 +378,7 @@ int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev,
return ret;
}
- mmu = gpu->mmu;
+ mmu = gpu->aspace->mmu;
if (mmu) {
ret = mmu->funcs->attach(mmu, iommu_ports,
ARRAY_SIZE(iommu_ports));
diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.h b/drivers/gpu/drm/msm/adreno/adreno_gpu.h
index a54f6e036b4a..e8d55b0306ed 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_gpu.h
+++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.h
@@ -28,6 +28,9 @@
#include "adreno_pm4.xml.h"
#define REG_ADRENO_DEFINE(_offset, _reg) [_offset] = (_reg) + 1
+#define REG_SKIP ~0
+#define REG_ADRENO_SKIP(_offset) [_offset] = REG_SKIP
+
/**
* adreno_regs: List of registers that are used in across all
* 3D devices. Each device type has different offset value for the same
@@ -35,73 +38,21 @@
* and are indexed by the enumeration values defined in this enum
*/
enum adreno_regs {
- REG_ADRENO_CP_DEBUG,
- REG_ADRENO_CP_ME_RAM_WADDR,
- REG_ADRENO_CP_ME_RAM_DATA,
- REG_ADRENO_CP_PFP_UCODE_DATA,
- REG_ADRENO_CP_PFP_UCODE_ADDR,
- REG_ADRENO_CP_WFI_PEND_CTR,
REG_ADRENO_CP_RB_BASE,
+ REG_ADRENO_CP_RB_BASE_HI,
REG_ADRENO_CP_RB_RPTR_ADDR,
+ REG_ADRENO_CP_RB_RPTR_ADDR_HI,
REG_ADRENO_CP_RB_RPTR,
REG_ADRENO_CP_RB_WPTR,
- REG_ADRENO_CP_PROTECT_CTRL,
- REG_ADRENO_CP_ME_CNTL,
REG_ADRENO_CP_RB_CNTL,
- REG_ADRENO_CP_IB1_BASE,
- REG_ADRENO_CP_IB1_BUFSZ,
- REG_ADRENO_CP_IB2_BASE,
- REG_ADRENO_CP_IB2_BUFSZ,
- REG_ADRENO_CP_TIMESTAMP,
- REG_ADRENO_CP_ME_RAM_RADDR,
- REG_ADRENO_CP_ROQ_ADDR,
- REG_ADRENO_CP_ROQ_DATA,
- REG_ADRENO_CP_MERCIU_ADDR,
- REG_ADRENO_CP_MERCIU_DATA,
- REG_ADRENO_CP_MERCIU_DATA2,
- REG_ADRENO_CP_MEQ_ADDR,
- REG_ADRENO_CP_MEQ_DATA,
- REG_ADRENO_CP_HW_FAULT,
- REG_ADRENO_CP_PROTECT_STATUS,
- REG_ADRENO_SCRATCH_ADDR,
- REG_ADRENO_SCRATCH_UMSK,
- REG_ADRENO_SCRATCH_REG2,
- REG_ADRENO_RBBM_STATUS,
- REG_ADRENO_RBBM_PERFCTR_CTL,
- REG_ADRENO_RBBM_PERFCTR_LOAD_CMD0,
- REG_ADRENO_RBBM_PERFCTR_LOAD_CMD1,
- REG_ADRENO_RBBM_PERFCTR_LOAD_CMD2,
- REG_ADRENO_RBBM_PERFCTR_PWR_1_LO,
- REG_ADRENO_RBBM_INT_0_MASK,
- REG_ADRENO_RBBM_INT_0_STATUS,
- REG_ADRENO_RBBM_AHB_ERROR_STATUS,
- REG_ADRENO_RBBM_PM_OVERRIDE2,
- REG_ADRENO_RBBM_AHB_CMD,
- REG_ADRENO_RBBM_INT_CLEAR_CMD,
- REG_ADRENO_RBBM_SW_RESET_CMD,
- REG_ADRENO_RBBM_CLOCK_CTL,
- REG_ADRENO_RBBM_AHB_ME_SPLIT_STATUS,
- REG_ADRENO_RBBM_AHB_PFP_SPLIT_STATUS,
- REG_ADRENO_VPC_DEBUG_RAM_SEL,
- REG_ADRENO_VPC_DEBUG_RAM_READ,
- REG_ADRENO_VSC_SIZE_ADDRESS,
- REG_ADRENO_VFD_CONTROL_0,
- REG_ADRENO_VFD_INDEX_MAX,
- REG_ADRENO_SP_VS_PVT_MEM_ADDR_REG,
- REG_ADRENO_SP_FS_PVT_MEM_ADDR_REG,
- REG_ADRENO_SP_VS_OBJ_START_REG,
- REG_ADRENO_SP_FS_OBJ_START_REG,
- REG_ADRENO_PA_SC_AA_CONFIG,
- REG_ADRENO_SQ_GPR_MANAGEMENT,
- REG_ADRENO_SQ_INST_STORE_MANAGMENT,
- REG_ADRENO_TP0_CHICKEN,
- REG_ADRENO_RBBM_RBBM_CTL,
- REG_ADRENO_UCHE_INVALIDATE0,
- REG_ADRENO_RBBM_PERFCTR_LOAD_VALUE_LO,
- REG_ADRENO_RBBM_PERFCTR_LOAD_VALUE_HI,
REG_ADRENO_REGISTER_MAX,
};
+enum adreno_quirks {
+ ADRENO_QUIRK_TWO_PASS_USE_WFI = 1,
+ ADRENO_QUIRK_FAULT_DETECT_MASK = 2,
+};
+
struct adreno_rev {
uint8_t core;
uint8_t major;
@@ -122,12 +73,16 @@ struct adreno_info {
uint32_t revn;
const char *name;
const char *pm4fw, *pfpfw;
+ const char *gpmufw;
uint32_t gmem;
struct msm_gpu *(*init)(struct drm_device *dev);
};
const struct adreno_info *adreno_info(struct adreno_rev rev);
+#define rbmemptr(adreno_gpu, member) \
+ ((adreno_gpu)->memptrs_iova + offsetof(struct adreno_rbmemptrs, member))
+
struct adreno_rbmemptrs {
volatile uint32_t rptr;
volatile uint32_t wptr;
@@ -153,7 +108,7 @@ struct adreno_gpu {
// different for z180..
struct adreno_rbmemptrs *memptrs;
struct drm_gem_object *memptrs_bo;
- uint32_t memptrs_iova;
+ uint64_t memptrs_iova;
/*
* Register offsets are different between some GPUs.
@@ -161,6 +116,8 @@ struct adreno_gpu {
* code (a3xx_gpu.c) and stored in this common location.
*/
const unsigned int *reg_offsets;
+
+ uint32_t quirks;
};
#define to_adreno_gpu(x) container_of(x, struct adreno_gpu, base)
@@ -171,6 +128,7 @@ struct adreno_platform_config {
#ifdef DOWNSTREAM_CONFIG_MSM_BUS_SCALING
struct msm_bus_scale_pdata *bus_scale_table;
#endif
+ uint32_t quirks;
};
#define ADRENO_IDLE_TIMEOUT msecs_to_jiffies(1000)
@@ -234,6 +192,11 @@ static inline int adreno_is_a430(struct adreno_gpu *gpu)
return gpu->revn == 430;
}
+static inline int adreno_is_a530(struct adreno_gpu *gpu)
+{
+ return gpu->revn == 530;
+}
+
int adreno_get_param(struct msm_gpu *gpu, uint32_t param, uint64_t *value);
int adreno_hw_init(struct msm_gpu *gpu);
uint32_t adreno_last_fence(struct msm_gpu *gpu);
@@ -241,7 +204,7 @@ void adreno_recover(struct msm_gpu *gpu);
void adreno_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
struct msm_file_private *ctx);
void adreno_flush(struct msm_gpu *gpu);
-void adreno_idle(struct msm_gpu *gpu);
+bool adreno_idle(struct msm_gpu *gpu);
#ifdef CONFIG_DEBUG_FS
void adreno_show(struct msm_gpu *gpu, struct seq_file *m);
#endif
@@ -278,8 +241,38 @@ OUT_PKT3(struct msm_ringbuffer *ring, uint8_t opcode, uint16_t cnt)
OUT_RING(ring, CP_TYPE3_PKT | ((cnt-1) << 16) | ((opcode & 0xFF) << 8));
}
+static inline u32 PM4_PARITY(u32 val)
+{
+ return (0x9669 >> (0xF & (val ^
+ (val >> 4) ^ (val >> 8) ^ (val >> 12) ^
+ (val >> 16) ^ ((val) >> 20) ^ (val >> 24) ^
+ (val >> 28)))) & 1;
+}
+
+/* Maximum number of values that can be executed for one opcode */
+#define TYPE4_MAX_PAYLOAD 127
+
+#define PKT4(_reg, _cnt) \
+ (CP_TYPE4_PKT | ((_cnt) << 0) | (PM4_PARITY((_cnt)) << 7) | \
+ (((_reg) & 0x3FFFF) << 8) | (PM4_PARITY((_reg)) << 27))
+
+static inline void
+OUT_PKT4(struct msm_ringbuffer *ring, uint16_t regindx, uint16_t cnt)
+{
+ adreno_wait_ring(ring->gpu, cnt + 1);
+ OUT_RING(ring, PKT4(regindx, cnt));
+}
+
+static inline void
+OUT_PKT7(struct msm_ringbuffer *ring, uint8_t opcode, uint16_t cnt)
+{
+ adreno_wait_ring(ring->gpu, cnt + 1);
+ OUT_RING(ring, CP_TYPE7_PKT | (cnt << 0) | (PM4_PARITY(cnt) << 15) |
+ ((opcode & 0x7F) << 16) | (PM4_PARITY(opcode) << 23));
+}
+
/*
- * adreno_checkreg_off() - Checks the validity of a register enum
+ * adreno_reg_check() - Checks the validity of a register enum
* @gpu: Pointer to struct adreno_gpu
* @offset_name: The register enum that is checked
*/
@@ -290,6 +283,16 @@ static inline bool adreno_reg_check(struct adreno_gpu *gpu,
!gpu->reg_offsets[offset_name]) {
BUG();
}
+
+ /*
+ * REG_SKIP is a special value that tell us that the register in
+ * question isn't implemented on target but don't trigger a BUG(). This
+ * is used to cleanly implement adreno_gpu_write64() and
+ * adreno_gpu_read64() in a generic fashion
+ */
+ if (gpu->reg_offsets[offset_name] == REG_SKIP)
+ return false;
+
return true;
}
@@ -311,4 +314,39 @@ static inline void adreno_gpu_write(struct adreno_gpu *gpu,
gpu_write(&gpu->base, reg - 1, data);
}
+struct msm_gpu *a3xx_gpu_init(struct drm_device *dev);
+struct msm_gpu *a4xx_gpu_init(struct drm_device *dev);
+struct msm_gpu *a5xx_gpu_init(struct drm_device *dev);
+
+static inline void adreno_gpu_write64(struct adreno_gpu *gpu,
+ enum adreno_regs lo, enum adreno_regs hi, u64 data)
+{
+ adreno_gpu_write(gpu, lo, lower_32_bits(data));
+ adreno_gpu_write(gpu, hi, upper_32_bits(data));
+}
+
+/*
+ * Given a register and a count, return a value to program into
+ * REG_CP_PROTECT_REG(n) - this will block both reads and writes for _len
+ * registers starting at _reg.
+ *
+ * The register base needs to be a multiple of the length. If it is not, the
+ * hardware will quietly mask off the bits for you and shift the size. For
+ * example, if you intend the protection to start at 0x07 for a length of 4
+ * (0x07-0x0A) the hardware will actually protect (0x04-0x07) which might
+ * expose registers you intended to protect!
+ */
+#define ADRENO_PROTECT_RW(_reg, _len) \
+ ((1 << 30) | (1 << 29) | \
+ ((ilog2((_len)) & 0x1F) << 24) | (((_reg) << 2) & 0xFFFFF))
+
+/*
+ * Same as above, but allow reads over the range. For areas of mixed use (such
+ * as performance counters) this allows us to protect a much larger range with a
+ * single register
+ */
+#define ADRENO_PROTECT_RDONLY(_reg, _len) \
+ ((1 << 29) \
+ ((ilog2((_len)) & 0x1F) << 24) | (((_reg) << 2) & 0xFFFFF))
+
#endif /* __ADRENO_GPU_H__ */
diff --git a/drivers/gpu/drm/msm/adreno/adreno_pm4.xml.h b/drivers/gpu/drm/msm/adreno/adreno_pm4.xml.h
index d7477ff867c9..6a2930e75503 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_pm4.xml.h
+++ b/drivers/gpu/drm/msm/adreno/adreno_pm4.xml.h
@@ -8,13 +8,14 @@ http://github.com/freedreno/envytools/
git clone https://github.com/freedreno/envytools.git
The rules-ng-ng source files this header was generated from are:
-- /home/robclark/src/freedreno/envytools/rnndb/adreno.xml ( 398 bytes, from 2015-09-24 17:25:31)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno.xml ( 431 bytes, from 2016-04-26 17:56:44)
- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2016-02-10 17:07:21)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/a2xx.xml ( 32901 bytes, from 2015-05-20 20:03:14)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_common.xml ( 11518 bytes, from 2016-02-10 21:03:25)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml ( 16166 bytes, from 2016-02-11 21:20:31)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml ( 83967 bytes, from 2016-02-10 17:07:21)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml ( 109916 bytes, from 2016-02-20 18:44:48)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/a2xx.xml ( 32907 bytes, from 2016-11-26 23:01:08)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_common.xml ( 12025 bytes, from 2016-11-26 23:01:08)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml ( 22544 bytes, from 2016-11-26 23:01:08)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml ( 83840 bytes, from 2016-11-26 23:01:08)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml ( 110765 bytes, from 2016-11-26 23:01:48)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/a5xx.xml ( 90321 bytes, from 2016-11-28 16:50:05)
- /home/robclark/src/freedreno/envytools/rnndb/adreno/ocmem.xml ( 1773 bytes, from 2015-09-24 17:30:00)
Copyright (C) 2013-2016 by the following authors:
@@ -58,6 +59,7 @@ enum vgt_event_type {
RST_PIX_CNT = 13,
RST_VTX_CNT = 14,
TILE_FLUSH = 15,
+ STAT_EVENT = 16,
CACHE_FLUSH_AND_INV_TS_EVENT = 20,
ZPASS_DONE = 21,
CACHE_FLUSH_AND_INV_EVENT = 22,
@@ -65,6 +67,10 @@ enum vgt_event_type {
PERFCOUNTER_STOP = 24,
VS_FETCH_DONE = 27,
FACENESS_FLUSH = 28,
+ UNK_1C = 28,
+ UNK_1D = 29,
+ BLIT = 30,
+ UNK_26 = 38,
};
enum pc_di_primtype {
@@ -82,7 +88,6 @@ enum pc_di_primtype {
DI_PT_LINESTRIP_ADJ = 11,
DI_PT_TRI_ADJ = 12,
DI_PT_TRISTRIP_ADJ = 13,
- DI_PT_PATCHES = 34,
};
enum pc_di_src_sel {
@@ -110,11 +115,15 @@ enum adreno_pm4_packet_type {
CP_TYPE1_PKT = 0x40000000,
CP_TYPE2_PKT = 0x80000000,
CP_TYPE3_PKT = 0xc0000000,
+ CP_TYPE4_PKT = 0x40000000,
+ CP_TYPE7_PKT = 0x70000000,
};
enum adreno_pm4_type3_packets {
CP_ME_INIT = 72,
CP_NOP = 16,
+ CP_PREEMPT_ENABLE = 28,
+ CP_PREEMPT_TOKEN = 30,
CP_INDIRECT_BUFFER = 63,
CP_INDIRECT_BUFFER_PFD = 55,
CP_WAIT_FOR_IDLE = 38,
@@ -163,6 +172,7 @@ enum adreno_pm4_type3_packets {
CP_TEST_TWO_MEMS = 113,
CP_REG_WR_NO_CTXT = 120,
CP_RECORD_PFP_TIMESTAMP = 17,
+ CP_SET_SECURE_MODE = 102,
CP_WAIT_FOR_ME = 19,
CP_SET_DRAW_STATE = 67,
CP_DRAW_INDX_OFFSET = 56,
@@ -178,6 +188,22 @@ enum adreno_pm4_type3_packets {
CP_WAIT_MEM_WRITES = 18,
CP_COND_REG_EXEC = 71,
CP_MEM_TO_REG = 66,
+ CP_EXEC_CS = 51,
+ CP_PERFCOUNTER_ACTION = 80,
+ CP_SMMU_TABLE_UPDATE = 83,
+ CP_CONTEXT_REG_BUNCH = 92,
+ CP_YIELD_ENABLE = 28,
+ CP_SKIP_IB2_ENABLE_GLOBAL = 29,
+ CP_SKIP_IB2_ENABLE_LOCAL = 35,
+ CP_SET_SUBDRAW_SIZE = 53,
+ CP_SET_VISIBILITY_OVERRIDE = 100,
+ CP_PREEMPT_ENABLE_GLOBAL = 105,
+ CP_PREEMPT_ENABLE_LOCAL = 106,
+ CP_CONTEXT_SWITCH_YIELD = 107,
+ CP_SET_RENDER_MODE = 108,
+ CP_COMPUTE_CHECKPOINT = 110,
+ CP_MEM_TO_MEM = 115,
+ CP_BLIT = 44,
IN_IB_PREFETCH_END = 23,
IN_SUBBLK_PREFETCH = 31,
IN_INSTR_PREFETCH = 32,
@@ -196,6 +222,7 @@ enum adreno_state_block {
SB_VERT_SHADER = 4,
SB_GEOM_SHADER = 5,
SB_FRAG_SHADER = 6,
+ SB_COMPUTE_SHADER = 7,
};
enum adreno_state_type {
@@ -218,6 +245,17 @@ enum a4xx_index_size {
INDEX4_SIZE_32_BIT = 2,
};
+enum render_mode_cmd {
+ BYPASS = 1,
+ GMEM = 3,
+ BLIT2D = 5,
+};
+
+enum cp_blit_cmd {
+ BLIT_OP_FILL = 0,
+ BLIT_OP_BLIT = 1,
+};
+
#define REG_CP_LOAD_STATE_0 0x00000000
#define CP_LOAD_STATE_0_DST_OFF__MASK 0x0000ffff
#define CP_LOAD_STATE_0_DST_OFF__SHIFT 0
@@ -258,6 +296,14 @@ static inline uint32_t CP_LOAD_STATE_1_EXT_SRC_ADDR(uint32_t val)
return ((val >> 2) << CP_LOAD_STATE_1_EXT_SRC_ADDR__SHIFT) & CP_LOAD_STATE_1_EXT_SRC_ADDR__MASK;
}
+#define REG_CP_LOAD_STATE_2 0x00000002
+#define CP_LOAD_STATE_2_EXT_SRC_ADDR_HI__MASK 0xffffffff
+#define CP_LOAD_STATE_2_EXT_SRC_ADDR_HI__SHIFT 0
+static inline uint32_t CP_LOAD_STATE_2_EXT_SRC_ADDR_HI(uint32_t val)
+{
+ return ((val) << CP_LOAD_STATE_2_EXT_SRC_ADDR_HI__SHIFT) & CP_LOAD_STATE_2_EXT_SRC_ADDR_HI__MASK;
+}
+
#define REG_CP_DRAW_INDX_0 0x00000000
#define CP_DRAW_INDX_0_VIZ_QUERY__MASK 0xffffffff
#define CP_DRAW_INDX_0_VIZ_QUERY__SHIFT 0
@@ -389,7 +435,12 @@ static inline uint32_t CP_DRAW_INDX_OFFSET_0_SOURCE_SELECT(enum pc_di_src_sel va
{
return ((val) << CP_DRAW_INDX_OFFSET_0_SOURCE_SELECT__SHIFT) & CP_DRAW_INDX_OFFSET_0_SOURCE_SELECT__MASK;
}
-#define CP_DRAW_INDX_OFFSET_0_TESSELLATE 0x00000100
+#define CP_DRAW_INDX_OFFSET_0_VIS_CULL__MASK 0x00000300
+#define CP_DRAW_INDX_OFFSET_0_VIS_CULL__SHIFT 8
+static inline uint32_t CP_DRAW_INDX_OFFSET_0_VIS_CULL(enum pc_di_vis_cull_mode val)
+{
+ return ((val) << CP_DRAW_INDX_OFFSET_0_VIS_CULL__SHIFT) & CP_DRAW_INDX_OFFSET_0_VIS_CULL__MASK;
+}
#define CP_DRAW_INDX_OFFSET_0_INDEX_SIZE__MASK 0x00000c00
#define CP_DRAW_INDX_OFFSET_0_INDEX_SIZE__SHIFT 10
static inline uint32_t CP_DRAW_INDX_OFFSET_0_INDEX_SIZE(enum a4xx_index_size val)
@@ -437,30 +488,40 @@ static inline uint32_t CP_DRAW_INDX_OFFSET_5_INDX_SIZE(uint32_t val)
return ((val) << CP_DRAW_INDX_OFFSET_5_INDX_SIZE__SHIFT) & CP_DRAW_INDX_OFFSET_5_INDX_SIZE__MASK;
}
-#define REG_CP_SET_DRAW_STATE_0 0x00000000
-#define CP_SET_DRAW_STATE_0_COUNT__MASK 0x0000ffff
-#define CP_SET_DRAW_STATE_0_COUNT__SHIFT 0
-static inline uint32_t CP_SET_DRAW_STATE_0_COUNT(uint32_t val)
+static inline uint32_t REG_CP_SET_DRAW_STATE_(uint32_t i0) { return 0x00000000 + 0x3*i0; }
+
+static inline uint32_t REG_CP_SET_DRAW_STATE__0(uint32_t i0) { return 0x00000000 + 0x3*i0; }
+#define CP_SET_DRAW_STATE__0_COUNT__MASK 0x0000ffff
+#define CP_SET_DRAW_STATE__0_COUNT__SHIFT 0
+static inline uint32_t CP_SET_DRAW_STATE__0_COUNT(uint32_t val)
{
- return ((val) << CP_SET_DRAW_STATE_0_COUNT__SHIFT) & CP_SET_DRAW_STATE_0_COUNT__MASK;
+ return ((val) << CP_SET_DRAW_STATE__0_COUNT__SHIFT) & CP_SET_DRAW_STATE__0_COUNT__MASK;
}
-#define CP_SET_DRAW_STATE_0_DIRTY 0x00010000
-#define CP_SET_DRAW_STATE_0_DISABLE 0x00020000
-#define CP_SET_DRAW_STATE_0_DISABLE_ALL_GROUPS 0x00040000
-#define CP_SET_DRAW_STATE_0_LOAD_IMMED 0x00080000
-#define CP_SET_DRAW_STATE_0_GROUP_ID__MASK 0x1f000000
-#define CP_SET_DRAW_STATE_0_GROUP_ID__SHIFT 24
-static inline uint32_t CP_SET_DRAW_STATE_0_GROUP_ID(uint32_t val)
+#define CP_SET_DRAW_STATE__0_DIRTY 0x00010000
+#define CP_SET_DRAW_STATE__0_DISABLE 0x00020000
+#define CP_SET_DRAW_STATE__0_DISABLE_ALL_GROUPS 0x00040000
+#define CP_SET_DRAW_STATE__0_LOAD_IMMED 0x00080000
+#define CP_SET_DRAW_STATE__0_GROUP_ID__MASK 0x1f000000
+#define CP_SET_DRAW_STATE__0_GROUP_ID__SHIFT 24
+static inline uint32_t CP_SET_DRAW_STATE__0_GROUP_ID(uint32_t val)
{
- return ((val) << CP_SET_DRAW_STATE_0_GROUP_ID__SHIFT) & CP_SET_DRAW_STATE_0_GROUP_ID__MASK;
+ return ((val) << CP_SET_DRAW_STATE__0_GROUP_ID__SHIFT) & CP_SET_DRAW_STATE__0_GROUP_ID__MASK;
}
-#define REG_CP_SET_DRAW_STATE_1 0x00000001
-#define CP_SET_DRAW_STATE_1_ADDR__MASK 0xffffffff
-#define CP_SET_DRAW_STATE_1_ADDR__SHIFT 0
-static inline uint32_t CP_SET_DRAW_STATE_1_ADDR(uint32_t val)
+static inline uint32_t REG_CP_SET_DRAW_STATE__1(uint32_t i0) { return 0x00000001 + 0x3*i0; }
+#define CP_SET_DRAW_STATE__1_ADDR_LO__MASK 0xffffffff
+#define CP_SET_DRAW_STATE__1_ADDR_LO__SHIFT 0
+static inline uint32_t CP_SET_DRAW_STATE__1_ADDR_LO(uint32_t val)
{
- return ((val) << CP_SET_DRAW_STATE_1_ADDR__SHIFT) & CP_SET_DRAW_STATE_1_ADDR__MASK;
+ return ((val) << CP_SET_DRAW_STATE__1_ADDR_LO__SHIFT) & CP_SET_DRAW_STATE__1_ADDR_LO__MASK;
+}
+
+static inline uint32_t REG_CP_SET_DRAW_STATE__2(uint32_t i0) { return 0x00000002 + 0x3*i0; }
+#define CP_SET_DRAW_STATE__2_ADDR_HI__MASK 0xffffffff
+#define CP_SET_DRAW_STATE__2_ADDR_HI__SHIFT 0
+static inline uint32_t CP_SET_DRAW_STATE__2_ADDR_HI(uint32_t val)
+{
+ return ((val) << CP_SET_DRAW_STATE__2_ADDR_HI__SHIFT) & CP_SET_DRAW_STATE__2_ADDR_HI__MASK;
}
#define REG_CP_SET_BIN_0 0x00000000
@@ -533,5 +594,192 @@ static inline uint32_t CP_REG_TO_MEM_1_DEST(uint32_t val)
return ((val) << CP_REG_TO_MEM_1_DEST__SHIFT) & CP_REG_TO_MEM_1_DEST__MASK;
}
+#define REG_CP_DISPATCH_COMPUTE_0 0x00000000
+
+#define REG_CP_DISPATCH_COMPUTE_1 0x00000001
+#define CP_DISPATCH_COMPUTE_1_X__MASK 0xffffffff
+#define CP_DISPATCH_COMPUTE_1_X__SHIFT 0
+static inline uint32_t CP_DISPATCH_COMPUTE_1_X(uint32_t val)
+{
+ return ((val) << CP_DISPATCH_COMPUTE_1_X__SHIFT) & CP_DISPATCH_COMPUTE_1_X__MASK;
+}
+
+#define REG_CP_DISPATCH_COMPUTE_2 0x00000002
+#define CP_DISPATCH_COMPUTE_2_Y__MASK 0xffffffff
+#define CP_DISPATCH_COMPUTE_2_Y__SHIFT 0
+static inline uint32_t CP_DISPATCH_COMPUTE_2_Y(uint32_t val)
+{
+ return ((val) << CP_DISPATCH_COMPUTE_2_Y__SHIFT) & CP_DISPATCH_COMPUTE_2_Y__MASK;
+}
+
+#define REG_CP_DISPATCH_COMPUTE_3 0x00000003
+#define CP_DISPATCH_COMPUTE_3_Z__MASK 0xffffffff
+#define CP_DISPATCH_COMPUTE_3_Z__SHIFT 0
+static inline uint32_t CP_DISPATCH_COMPUTE_3_Z(uint32_t val)
+{
+ return ((val) << CP_DISPATCH_COMPUTE_3_Z__SHIFT) & CP_DISPATCH_COMPUTE_3_Z__MASK;
+}
+
+#define REG_CP_SET_RENDER_MODE_0 0x00000000
+#define CP_SET_RENDER_MODE_0_MODE__MASK 0x000001ff
+#define CP_SET_RENDER_MODE_0_MODE__SHIFT 0
+static inline uint32_t CP_SET_RENDER_MODE_0_MODE(enum render_mode_cmd val)
+{
+ return ((val) << CP_SET_RENDER_MODE_0_MODE__SHIFT) & CP_SET_RENDER_MODE_0_MODE__MASK;
+}
+
+#define REG_CP_SET_RENDER_MODE_1 0x00000001
+#define CP_SET_RENDER_MODE_1_ADDR_0_LO__MASK 0xffffffff
+#define CP_SET_RENDER_MODE_1_ADDR_0_LO__SHIFT 0
+static inline uint32_t CP_SET_RENDER_MODE_1_ADDR_0_LO(uint32_t val)
+{
+ return ((val) << CP_SET_RENDER_MODE_1_ADDR_0_LO__SHIFT) & CP_SET_RENDER_MODE_1_ADDR_0_LO__MASK;
+}
+
+#define REG_CP_SET_RENDER_MODE_2 0x00000002
+#define CP_SET_RENDER_MODE_2_ADDR_0_HI__MASK 0xffffffff
+#define CP_SET_RENDER_MODE_2_ADDR_0_HI__SHIFT 0
+static inline uint32_t CP_SET_RENDER_MODE_2_ADDR_0_HI(uint32_t val)
+{
+ return ((val) << CP_SET_RENDER_MODE_2_ADDR_0_HI__SHIFT) & CP_SET_RENDER_MODE_2_ADDR_0_HI__MASK;
+}
+
+#define REG_CP_SET_RENDER_MODE_3 0x00000003
+#define CP_SET_RENDER_MODE_3_GMEM_ENABLE 0x00000010
+
+#define REG_CP_SET_RENDER_MODE_4 0x00000004
+
+#define REG_CP_SET_RENDER_MODE_5 0x00000005
+#define CP_SET_RENDER_MODE_5_ADDR_1_LEN__MASK 0xffffffff
+#define CP_SET_RENDER_MODE_5_ADDR_1_LEN__SHIFT 0
+static inline uint32_t CP_SET_RENDER_MODE_5_ADDR_1_LEN(uint32_t val)
+{
+ return ((val) << CP_SET_RENDER_MODE_5_ADDR_1_LEN__SHIFT) & CP_SET_RENDER_MODE_5_ADDR_1_LEN__MASK;
+}
+
+#define REG_CP_SET_RENDER_MODE_6 0x00000006
+#define CP_SET_RENDER_MODE_6_ADDR_1_LO__MASK 0xffffffff
+#define CP_SET_RENDER_MODE_6_ADDR_1_LO__SHIFT 0
+static inline uint32_t CP_SET_RENDER_MODE_6_ADDR_1_LO(uint32_t val)
+{
+ return ((val) << CP_SET_RENDER_MODE_6_ADDR_1_LO__SHIFT) & CP_SET_RENDER_MODE_6_ADDR_1_LO__MASK;
+}
+
+#define REG_CP_SET_RENDER_MODE_7 0x00000007
+#define CP_SET_RENDER_MODE_7_ADDR_1_HI__MASK 0xffffffff
+#define CP_SET_RENDER_MODE_7_ADDR_1_HI__SHIFT 0
+static inline uint32_t CP_SET_RENDER_MODE_7_ADDR_1_HI(uint32_t val)
+{
+ return ((val) << CP_SET_RENDER_MODE_7_ADDR_1_HI__SHIFT) & CP_SET_RENDER_MODE_7_ADDR_1_HI__MASK;
+}
+
+#define REG_CP_PERFCOUNTER_ACTION_0 0x00000000
+
+#define REG_CP_PERFCOUNTER_ACTION_1 0x00000001
+#define CP_PERFCOUNTER_ACTION_1_ADDR_0_LO__MASK 0xffffffff
+#define CP_PERFCOUNTER_ACTION_1_ADDR_0_LO__SHIFT 0
+static inline uint32_t CP_PERFCOUNTER_ACTION_1_ADDR_0_LO(uint32_t val)
+{
+ return ((val) << CP_PERFCOUNTER_ACTION_1_ADDR_0_LO__SHIFT) & CP_PERFCOUNTER_ACTION_1_ADDR_0_LO__MASK;
+}
+
+#define REG_CP_PERFCOUNTER_ACTION_2 0x00000002
+#define CP_PERFCOUNTER_ACTION_2_ADDR_0_HI__MASK 0xffffffff
+#define CP_PERFCOUNTER_ACTION_2_ADDR_0_HI__SHIFT 0
+static inline uint32_t CP_PERFCOUNTER_ACTION_2_ADDR_0_HI(uint32_t val)
+{
+ return ((val) << CP_PERFCOUNTER_ACTION_2_ADDR_0_HI__SHIFT) & CP_PERFCOUNTER_ACTION_2_ADDR_0_HI__MASK;
+}
+
+#define REG_CP_EVENT_WRITE_0 0x00000000
+#define CP_EVENT_WRITE_0_EVENT__MASK 0x000000ff
+#define CP_EVENT_WRITE_0_EVENT__SHIFT 0
+static inline uint32_t CP_EVENT_WRITE_0_EVENT(enum vgt_event_type val)
+{
+ return ((val) << CP_EVENT_WRITE_0_EVENT__SHIFT) & CP_EVENT_WRITE_0_EVENT__MASK;
+}
+
+#define REG_CP_EVENT_WRITE_1 0x00000001
+#define CP_EVENT_WRITE_1_ADDR_0_LO__MASK 0xffffffff
+#define CP_EVENT_WRITE_1_ADDR_0_LO__SHIFT 0
+static inline uint32_t CP_EVENT_WRITE_1_ADDR_0_LO(uint32_t val)
+{
+ return ((val) << CP_EVENT_WRITE_1_ADDR_0_LO__SHIFT) & CP_EVENT_WRITE_1_ADDR_0_LO__MASK;
+}
+
+#define REG_CP_EVENT_WRITE_2 0x00000002
+#define CP_EVENT_WRITE_2_ADDR_0_HI__MASK 0xffffffff
+#define CP_EVENT_WRITE_2_ADDR_0_HI__SHIFT 0
+static inline uint32_t CP_EVENT_WRITE_2_ADDR_0_HI(uint32_t val)
+{
+ return ((val) << CP_EVENT_WRITE_2_ADDR_0_HI__SHIFT) & CP_EVENT_WRITE_2_ADDR_0_HI__MASK;
+}
+
+#define REG_CP_EVENT_WRITE_3 0x00000003
+
+#define REG_CP_BLIT_0 0x00000000
+#define CP_BLIT_0_OP__MASK 0x0000000f
+#define CP_BLIT_0_OP__SHIFT 0
+static inline uint32_t CP_BLIT_0_OP(enum cp_blit_cmd val)
+{
+ return ((val) << CP_BLIT_0_OP__SHIFT) & CP_BLIT_0_OP__MASK;
+}
+
+#define REG_CP_BLIT_1 0x00000001
+#define CP_BLIT_1_SRC_X1__MASK 0x0000ffff
+#define CP_BLIT_1_SRC_X1__SHIFT 0
+static inline uint32_t CP_BLIT_1_SRC_X1(uint32_t val)
+{
+ return ((val) << CP_BLIT_1_SRC_X1__SHIFT) & CP_BLIT_1_SRC_X1__MASK;
+}
+#define CP_BLIT_1_SRC_Y1__MASK 0xffff0000
+#define CP_BLIT_1_SRC_Y1__SHIFT 16
+static inline uint32_t CP_BLIT_1_SRC_Y1(uint32_t val)
+{
+ return ((val) << CP_BLIT_1_SRC_Y1__SHIFT) & CP_BLIT_1_SRC_Y1__MASK;
+}
+
+#define REG_CP_BLIT_2 0x00000002
+#define CP_BLIT_2_SRC_X2__MASK 0x0000ffff
+#define CP_BLIT_2_SRC_X2__SHIFT 0
+static inline uint32_t CP_BLIT_2_SRC_X2(uint32_t val)
+{
+ return ((val) << CP_BLIT_2_SRC_X2__SHIFT) & CP_BLIT_2_SRC_X2__MASK;
+}
+#define CP_BLIT_2_SRC_Y2__MASK 0xffff0000
+#define CP_BLIT_2_SRC_Y2__SHIFT 16
+static inline uint32_t CP_BLIT_2_SRC_Y2(uint32_t val)
+{
+ return ((val) << CP_BLIT_2_SRC_Y2__SHIFT) & CP_BLIT_2_SRC_Y2__MASK;
+}
+
+#define REG_CP_BLIT_3 0x00000003
+#define CP_BLIT_3_DST_X1__MASK 0x0000ffff
+#define CP_BLIT_3_DST_X1__SHIFT 0
+static inline uint32_t CP_BLIT_3_DST_X1(uint32_t val)
+{
+ return ((val) << CP_BLIT_3_DST_X1__SHIFT) & CP_BLIT_3_DST_X1__MASK;
+}
+#define CP_BLIT_3_DST_Y1__MASK 0xffff0000
+#define CP_BLIT_3_DST_Y1__SHIFT 16
+static inline uint32_t CP_BLIT_3_DST_Y1(uint32_t val)
+{
+ return ((val) << CP_BLIT_3_DST_Y1__SHIFT) & CP_BLIT_3_DST_Y1__MASK;
+}
+
+#define REG_CP_BLIT_4 0x00000004
+#define CP_BLIT_4_DST_X2__MASK 0x0000ffff
+#define CP_BLIT_4_DST_X2__SHIFT 0
+static inline uint32_t CP_BLIT_4_DST_X2(uint32_t val)
+{
+ return ((val) << CP_BLIT_4_DST_X2__SHIFT) & CP_BLIT_4_DST_X2__MASK;
+}
+#define CP_BLIT_4_DST_Y2__MASK 0xffff0000
+#define CP_BLIT_4_DST_Y2__SHIFT 16
+static inline uint32_t CP_BLIT_4_DST_Y2(uint32_t val)
+{
+ return ((val) << CP_BLIT_4_DST_Y2__SHIFT) & CP_BLIT_4_DST_Y2__MASK;
+}
+
#endif /* ADRENO_PM4_XML */
diff --git a/drivers/gpu/drm/msm/dsi/dsi.xml.h b/drivers/gpu/drm/msm/dsi/dsi.xml.h
index 4958594d5266..39dff7d5e89b 100644
--- a/drivers/gpu/drm/msm/dsi/dsi.xml.h
+++ b/drivers/gpu/drm/msm/dsi/dsi.xml.h
@@ -12,7 +12,7 @@ The rules-ng-ng source files this header was generated from are:
- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2016-02-10 17:07:21)
- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20915 bytes, from 2015-05-20 20:03:14)
- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 2849 bytes, from 2015-09-18 12:07:28)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 37194 bytes, from 2015-09-18 12:07:28)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 36965 bytes, from 2016-11-26 23:01:08)
- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 27887 bytes, from 2015-10-22 16:34:52)
- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 602 bytes, from 2015-10-22 16:35:02)
- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2015-05-20 20:03:14)
diff --git a/drivers/gpu/drm/msm/dsi/dsi_host.c b/drivers/gpu/drm/msm/dsi/dsi_host.c
index f05ed0e1f3d6..3819fdefcae2 100644
--- a/drivers/gpu/drm/msm/dsi/dsi_host.c
+++ b/drivers/gpu/drm/msm/dsi/dsi_host.c
@@ -139,6 +139,7 @@ struct msm_dsi_host {
u32 err_work_state;
struct work_struct err_work;
+ struct work_struct hpd_work;
struct workqueue_struct *workqueue;
/* DSI 6G TX buffer*/
@@ -981,7 +982,7 @@ static int dsi_tx_buf_alloc(struct msm_dsi_host *msm_host, int size)
struct drm_device *dev = msm_host->dev;
const struct msm_dsi_cfg_handler *cfg_hnd = msm_host->cfg_hnd;
int ret;
- u32 iova;
+ uint64_t iova;
if (cfg_hnd->major == MSM_DSI_VER_MAJOR_6G) {
mutex_lock(&dev->struct_mutex);
@@ -1146,7 +1147,7 @@ static int dsi_cmd_dma_tx(struct msm_dsi_host *msm_host, int len)
{
const struct msm_dsi_cfg_handler *cfg_hnd = msm_host->cfg_hnd;
int ret;
- u32 dma_base;
+ uint64_t dma_base;
bool triggered;
if (cfg_hnd->major == MSM_DSI_VER_MAJOR_6G) {
@@ -1294,6 +1295,14 @@ static void dsi_sw_reset_restore(struct msm_dsi_host *msm_host)
wmb(); /* make sure dsi controller enabled again */
}
+static void dsi_hpd_worker(struct work_struct *work)
+{
+ struct msm_dsi_host *msm_host =
+ container_of(work, struct msm_dsi_host, hpd_work);
+
+ drm_helper_hpd_irq_event(msm_host->dev);
+}
+
static void dsi_err_worker(struct work_struct *work)
{
struct msm_dsi_host *msm_host =
@@ -1480,7 +1489,7 @@ static int dsi_host_attach(struct mipi_dsi_host *host,
DBG("id=%d", msm_host->id);
if (msm_host->dev)
- drm_helper_hpd_irq_event(msm_host->dev);
+ queue_work(msm_host->workqueue, &msm_host->hpd_work);
return 0;
}
@@ -1494,7 +1503,7 @@ static int dsi_host_detach(struct mipi_dsi_host *host,
DBG("id=%d", msm_host->id);
if (msm_host->dev)
- drm_helper_hpd_irq_event(msm_host->dev);
+ queue_work(msm_host->workqueue, &msm_host->hpd_work);
return 0;
}
@@ -1748,6 +1757,7 @@ int msm_dsi_host_init(struct msm_dsi *msm_dsi)
/* setup workqueue */
msm_host->workqueue = alloc_ordered_workqueue("dsi_drm_work", 0);
INIT_WORK(&msm_host->err_work, dsi_err_worker);
+ INIT_WORK(&msm_host->hpd_work, dsi_hpd_worker);
msm_dsi->host = &msm_host->base;
msm_dsi->id = msm_host->id;
diff --git a/drivers/gpu/drm/msm/dsi/mmss_cc.xml.h b/drivers/gpu/drm/msm/dsi/mmss_cc.xml.h
index 2d999494cdea..8b9f3ebaeba7 100644
--- a/drivers/gpu/drm/msm/dsi/mmss_cc.xml.h
+++ b/drivers/gpu/drm/msm/dsi/mmss_cc.xml.h
@@ -12,7 +12,7 @@ The rules-ng-ng source files this header was generated from are:
- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2016-02-10 17:07:21)
- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20915 bytes, from 2015-05-20 20:03:14)
- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 2849 bytes, from 2015-09-18 12:07:28)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 37194 bytes, from 2015-09-18 12:07:28)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 36965 bytes, from 2016-11-26 23:01:08)
- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 27887 bytes, from 2015-10-22 16:34:52)
- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 602 bytes, from 2015-10-22 16:35:02)
- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2015-05-20 20:03:14)
diff --git a/drivers/gpu/drm/msm/dsi/pll/dsi_pll_28nm.c b/drivers/gpu/drm/msm/dsi/pll/dsi_pll_28nm.c
index 598fdaff0a41..26e3a01a99c2 100644
--- a/drivers/gpu/drm/msm/dsi/pll/dsi_pll_28nm.c
+++ b/drivers/gpu/drm/msm/dsi/pll/dsi_pll_28nm.c
@@ -521,6 +521,7 @@ static int pll_28nm_register(struct dsi_pll_28nm *pll_28nm)
.parent_names = (const char *[]){ "xo" },
.num_parents = 1,
.name = vco_name,
+ .flags = CLK_IGNORE_UNUSED,
.ops = &clk_ops_dsi_pll_28nm_vco,
};
struct device *dev = &pll_28nm->pdev->dev;
diff --git a/drivers/gpu/drm/msm/dsi/pll/dsi_pll_28nm_8960.c b/drivers/gpu/drm/msm/dsi/pll/dsi_pll_28nm_8960.c
index 38c90e1eb002..49008451085b 100644
--- a/drivers/gpu/drm/msm/dsi/pll/dsi_pll_28nm_8960.c
+++ b/drivers/gpu/drm/msm/dsi/pll/dsi_pll_28nm_8960.c
@@ -412,6 +412,7 @@ static int pll_28nm_register(struct dsi_pll_28nm *pll_28nm)
struct clk_init_data vco_init = {
.parent_names = (const char *[]){ "pxo" },
.num_parents = 1,
+ .flags = CLK_IGNORE_UNUSED,
.ops = &clk_ops_dsi_pll_28nm_vco,
};
struct device *dev = &pll_28nm->pdev->dev;
diff --git a/drivers/gpu/drm/msm/dsi/sfpb.xml.h b/drivers/gpu/drm/msm/dsi/sfpb.xml.h
index 506434fac993..3fcbb30dc241 100644
--- a/drivers/gpu/drm/msm/dsi/sfpb.xml.h
+++ b/drivers/gpu/drm/msm/dsi/sfpb.xml.h
@@ -12,7 +12,7 @@ The rules-ng-ng source files this header was generated from are:
- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2016-02-10 17:07:21)
- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20915 bytes, from 2015-05-20 20:03:14)
- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 2849 bytes, from 2015-09-18 12:07:28)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 37194 bytes, from 2015-09-18 12:07:28)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 36965 bytes, from 2016-11-26 23:01:08)
- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 27887 bytes, from 2015-10-22 16:34:52)
- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 602 bytes, from 2015-10-22 16:35:02)
- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2015-05-20 20:03:14)
diff --git a/drivers/gpu/drm/msm/edp/edp.xml.h b/drivers/gpu/drm/msm/edp/edp.xml.h
index f1072c18c81e..d7bf3232dc88 100644
--- a/drivers/gpu/drm/msm/edp/edp.xml.h
+++ b/drivers/gpu/drm/msm/edp/edp.xml.h
@@ -12,7 +12,7 @@ The rules-ng-ng source files this header was generated from are:
- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2016-02-10 17:07:21)
- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20915 bytes, from 2015-05-20 20:03:14)
- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 2849 bytes, from 2015-09-18 12:07:28)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 37194 bytes, from 2015-09-18 12:07:28)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 36965 bytes, from 2016-11-26 23:01:08)
- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 27887 bytes, from 2015-10-22 16:34:52)
- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 602 bytes, from 2015-10-22 16:35:02)
- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2015-05-20 20:03:14)
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi.xml.h b/drivers/gpu/drm/msm/hdmi/hdmi.xml.h
index 34c7df6549c1..0a97ff75ed6f 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi.xml.h
+++ b/drivers/gpu/drm/msm/hdmi/hdmi.xml.h
@@ -12,7 +12,7 @@ The rules-ng-ng source files this header was generated from are:
- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2016-02-10 17:07:21)
- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20915 bytes, from 2015-05-20 20:03:14)
- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 2849 bytes, from 2015-09-18 12:07:28)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 37194 bytes, from 2015-09-18 12:07:28)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 36965 bytes, from 2016-11-26 23:01:08)
- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 27887 bytes, from 2015-10-22 16:34:52)
- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 602 bytes, from 2015-10-22 16:35:02)
- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2015-05-20 20:03:14)
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_phy_8996.c b/drivers/gpu/drm/msm/hdmi/hdmi_phy_8996.c
index aa94a553794f..143eab46ba68 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi_phy_8996.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi_phy_8996.c
@@ -702,6 +702,7 @@ static struct clk_init_data pll_init = {
.ops = &hdmi_8996_pll_ops,
.parent_names = hdmi_pll_parents,
.num_parents = ARRAY_SIZE(hdmi_pll_parents),
+ .flags = CLK_IGNORE_UNUSED,
};
int msm_hdmi_pll_8996_init(struct platform_device *pdev)
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_pll_8960.c b/drivers/gpu/drm/msm/hdmi/hdmi_pll_8960.c
index 92da69aa6187..99590758c68b 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi_pll_8960.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi_pll_8960.c
@@ -424,6 +424,7 @@ static struct clk_init_data pll_init = {
.ops = &hdmi_pll_ops,
.parent_names = hdmi_pll_parents,
.num_parents = ARRAY_SIZE(hdmi_pll_parents),
+ .flags = CLK_IGNORE_UNUSED,
};
int msm_hdmi_pll_8960_init(struct platform_device *pdev)
diff --git a/drivers/gpu/drm/msm/hdmi/qfprom.xml.h b/drivers/gpu/drm/msm/hdmi/qfprom.xml.h
index 6eab7d0cf6b5..1b996ede7a65 100644
--- a/drivers/gpu/drm/msm/hdmi/qfprom.xml.h
+++ b/drivers/gpu/drm/msm/hdmi/qfprom.xml.h
@@ -12,7 +12,7 @@ The rules-ng-ng source files this header was generated from are:
- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2016-02-10 17:07:21)
- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20915 bytes, from 2015-05-20 20:03:14)
- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 2849 bytes, from 2015-09-18 12:07:28)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 37194 bytes, from 2015-09-18 12:07:28)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 36965 bytes, from 2016-11-26 23:01:08)
- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 27887 bytes, from 2015-10-22 16:34:52)
- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 602 bytes, from 2015-10-22 16:35:02)
- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2015-05-20 20:03:14)
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4.xml.h b/drivers/gpu/drm/msm/mdp/mdp4/mdp4.xml.h
index 6688e79cc88e..88037889589b 100644
--- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4.xml.h
+++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4.xml.h
@@ -12,7 +12,7 @@ The rules-ng-ng source files this header was generated from are:
- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2016-02-10 17:07:21)
- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20915 bytes, from 2015-05-20 20:03:14)
- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 2849 bytes, from 2015-09-18 12:07:28)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 37194 bytes, from 2015-09-18 12:07:28)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 36965 bytes, from 2016-11-26 23:01:08)
- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 27887 bytes, from 2015-10-22 16:34:52)
- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 602 bytes, from 2015-10-22 16:35:02)
- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2015-05-20 20:03:14)
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c
index 9527dafc3e69..1c29618f4ddb 100644
--- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c
+++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c
@@ -373,7 +373,7 @@ static void update_cursor(struct drm_crtc *crtc)
if (mdp4_crtc->cursor.stale) {
struct drm_gem_object *next_bo = mdp4_crtc->cursor.next_bo;
struct drm_gem_object *prev_bo = mdp4_crtc->cursor.scanout_bo;
- uint32_t iova = mdp4_crtc->cursor.next_iova;
+ uint64_t iova = mdp4_crtc->cursor.next_iova;
if (next_bo) {
/* take a obj ref + iova ref when we start scanning out: */
@@ -418,7 +418,7 @@ static int mdp4_crtc_cursor_set(struct drm_crtc *crtc,
struct drm_device *dev = crtc->dev;
struct drm_gem_object *cursor_bo, *old_bo;
unsigned long flags;
- uint32_t iova;
+ uint64_t iova;
int ret;
if ((width > CURSOR_WIDTH) || (height > CURSOR_HEIGHT)) {
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_irq.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_irq.c
index a521207db8a1..b764d7f10312 100644
--- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_irq.c
+++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_irq.c
@@ -15,6 +15,7 @@
* this program. If not, see <http://www.gnu.org/licenses/>.
*/
+#include <drm/drm_print.h>
#include "msm_drv.h"
#include "mdp4_kms.h"
@@ -29,7 +30,16 @@ void mdp4_set_irqmask(struct mdp_kms *mdp_kms, uint32_t irqmask,
static void mdp4_irq_error_handler(struct mdp_irq *irq, uint32_t irqstatus)
{
+ struct mdp4_kms *mdp4_kms = container_of(irq, struct mdp4_kms, error_handler);
+ static DEFINE_RATELIMIT_STATE(rs, 5*HZ, 1);
+ extern bool dumpstate;
+
DRM_ERROR_RATELIMITED("errors: %08x\n", irqstatus);
+
+ if (dumpstate && __ratelimit(&rs)) {
+ struct drm_printer p = drm_info_printer(mdp4_kms->dev->dev);
+ drm_state_dump(mdp4_kms->dev, &p);
+ }
}
void mdp4_irq_preinstall(struct msm_kms *kms)
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c
index 571a91ee9607..b782efd4b95f 100644
--- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c
+++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c
@@ -17,6 +17,7 @@
#include "msm_drv.h"
+#include "msm_gem.h"
#include "msm_mmu.h"
#include "mdp4_kms.h"
@@ -159,17 +160,18 @@ static void mdp4_destroy(struct msm_kms *kms)
{
struct mdp4_kms *mdp4_kms = to_mdp4_kms(to_mdp_kms(kms));
struct device *dev = mdp4_kms->dev->dev;
- struct msm_mmu *mmu = mdp4_kms->mmu;
-
- if (mmu) {
- mmu->funcs->detach(mmu, iommu_ports, ARRAY_SIZE(iommu_ports));
- mmu->funcs->destroy(mmu);
- }
+ struct msm_gem_address_space *aspace = mdp4_kms->aspace;
if (mdp4_kms->blank_cursor_iova)
msm_gem_put_iova(mdp4_kms->blank_cursor_bo, mdp4_kms->id);
drm_gem_object_unreference_unlocked(mdp4_kms->blank_cursor_bo);
+ if (aspace) {
+ aspace->mmu->funcs->detach(aspace->mmu,
+ iommu_ports, ARRAY_SIZE(iommu_ports));
+ msm_gem_address_space_destroy(aspace);
+ }
+
if (mdp4_kms->rpm_enabled)
pm_runtime_disable(dev);
@@ -440,7 +442,7 @@ struct msm_kms *mdp4_kms_init(struct drm_device *dev)
struct mdp4_platform_config *config = mdp4_get_config(pdev);
struct mdp4_kms *mdp4_kms;
struct msm_kms *kms = NULL;
- struct msm_mmu *mmu;
+ struct msm_gem_address_space *aspace;
int irq, ret;
mdp4_kms = kzalloc(sizeof(*mdp4_kms), GFP_KERNEL);
@@ -531,24 +533,26 @@ struct msm_kms *mdp4_kms_init(struct drm_device *dev)
mdelay(16);
if (config->iommu) {
- mmu = msm_iommu_new(&pdev->dev, config->iommu);
- if (IS_ERR(mmu)) {
- ret = PTR_ERR(mmu);
+ aspace = msm_gem_address_space_create(&pdev->dev,
+ config->iommu, "mdp4");
+ if (IS_ERR(aspace)) {
+ ret = PTR_ERR(aspace);
goto fail;
}
- ret = mmu->funcs->attach(mmu, iommu_ports,
+
+ mdp4_kms->aspace = aspace;
+
+ ret = aspace->mmu->funcs->attach(aspace->mmu, iommu_ports,
ARRAY_SIZE(iommu_ports));
if (ret)
goto fail;
-
- mdp4_kms->mmu = mmu;
} else {
dev_info(dev->dev, "no iommu, fallback to phys "
"contig buffers for scanout\n");
- mmu = NULL;
+ aspace = NULL;
}
- mdp4_kms->id = msm_register_mmu(dev, mmu);
+ mdp4_kms->id = msm_register_address_space(dev, aspace);
if (mdp4_kms->id < 0) {
ret = mdp4_kms->id;
dev_err(dev->dev, "failed to register mdp4 iommu: %d\n", ret);
@@ -598,6 +602,10 @@ static struct mdp4_platform_config *mdp4_get_config(struct platform_device *dev)
/* TODO: Chips that aren't apq8064 have a 200 Mhz max_clk */
config.max_clk = 266667000;
config.iommu = iommu_domain_alloc(&platform_bus_type);
+ if (config.iommu) {
+ config.iommu->geometry.aperture_start = 0x1000;
+ config.iommu->geometry.aperture_end = 0xffffffff;
+ }
return &config;
}
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.h b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.h
index 25fb83997119..62712ca164ee 100644
--- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.h
+++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.h
@@ -43,7 +43,7 @@ struct mdp4_kms {
struct clk *pclk;
struct clk *lut_clk;
struct clk *axi_clk;
- struct msm_mmu *mmu;
+ struct msm_gem_address_space *aspace;
struct mdp_irq error_handler;
@@ -51,7 +51,7 @@ struct mdp4_kms {
/* empty/blank cursor bo to use when cursor is "disabled" */
struct drm_gem_object *blank_cursor_bo;
- uint32_t blank_cursor_iova;
+ uint64_t blank_cursor_iova;
};
#define to_mdp4_kms(x) container_of(x, struct mdp4_kms, base)
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c
index 3903dbcda763..911e4690d36a 100644
--- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c
+++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c
@@ -40,7 +40,7 @@ enum mdp4_frame_format mdp4_get_frame_format(struct drm_framebuffer *fb)
{
bool is_tile = false;
- if (fb->modifier[1] == DRM_FORMAT_MOD_SAMSUNG_64_32_TILE)
+ if (fb->modifier == DRM_FORMAT_MOD_SAMSUNG_64_32_TILE)
is_tile = true;
if (fb->pixel_format == DRM_FORMAT_NV12 && is_tile)
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5.xml.h b/drivers/gpu/drm/msm/mdp/mdp5/mdp5.xml.h
index ca6ca30650a0..27d5371acee0 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5.xml.h
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5.xml.h
@@ -8,9 +8,17 @@ http://github.com/freedreno/envytools/
git clone https://github.com/freedreno/envytools.git
The rules-ng-ng source files this header was generated from are:
-- /local/mnt/workspace/source_trees/envytools/rnndb/../rnndb/mdp/mdp5.xml ( 36965 bytes, from 2016-05-10 05:06:30)
-- /local/mnt/workspace/source_trees/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2016-05-09 06:32:54)
-- /local/mnt/workspace/source_trees/envytools/rnndb/mdp/mdp_common.xml ( 2849 bytes, from 2016-01-07 08:45:55)
+- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 676 bytes, from 2015-05-20 20:03:14)
+- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2016-02-10 17:07:21)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20915 bytes, from 2015-05-20 20:03:14)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 2849 bytes, from 2015-09-18 12:07:28)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 36965 bytes, from 2016-11-26 23:01:08)
+- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 27887 bytes, from 2015-10-22 16:34:52)
+- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 602 bytes, from 2015-10-22 16:35:02)
+- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2015-05-20 20:03:14)
+- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2015-05-20 20:03:07)
+- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 41472 bytes, from 2016-01-22 18:18:18)
+- /home/robclark/src/freedreno/envytools/rnndb/edp/edp.xml ( 10416 bytes, from 2015-05-20 20:03:14)
Copyright (C) 2013-2016 by the following authors:
- Rob Clark <robdclark@gmail.com> (robclark)
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.c
index ac9e4cde1380..618b2ffed9b4 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.c
@@ -272,7 +272,7 @@ const struct mdp5_cfg_hw msm8x16_config = {
.count = 2,
.base = { 0x14000, 0x16000 },
.caps = MDP_PIPE_CAP_HFLIP | MDP_PIPE_CAP_VFLIP |
- MDP_PIPE_CAP_SCALE | MDP_PIPE_CAP_DECIMATION,
+ MDP_PIPE_CAP_DECIMATION,
},
.pipe_dma = {
.count = 1,
@@ -282,7 +282,7 @@ const struct mdp5_cfg_hw msm8x16_config = {
.lm = {
.count = 2, /* LM0 and LM3 */
.base = { 0x44000, 0x47000 },
- .nb_stages = 5,
+ .nb_stages = 8,
.max_width = 2048,
.max_height = 0xFFFF,
},
@@ -550,6 +550,10 @@ static struct mdp5_cfg_platform *mdp5_get_config(struct platform_device *dev)
static struct mdp5_cfg_platform config = {};
config.iommu = iommu_domain_alloc(&platform_bus_type);
+ if (config.iommu) {
+ config.iommu->geometry.aperture_start = 0x1000;
+ config.iommu->geometry.aperture_end = 0xffffffff;
+ }
return &config;
}
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c
index fa2be7ce9468..1ce8a01a5a28 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c
@@ -27,11 +27,8 @@
#define CURSOR_WIDTH 64
#define CURSOR_HEIGHT 64
-#define SSPP_MAX (SSPP_RGB3 + 1) /* TODO: Add SSPP_MAX in mdp5.xml.h */
-
struct mdp5_crtc {
struct drm_crtc base;
- char name[8];
int id;
bool enabled;
@@ -102,7 +99,7 @@ static u32 crtc_flush(struct drm_crtc *crtc, u32 flush_mask)
{
struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
- DBG("%s: flush=%08x", mdp5_crtc->name, flush_mask);
+ DBG("%s: flush=%08x", crtc->name, flush_mask);
return mdp5_ctl_commit(mdp5_crtc->ctl, flush_mask);
}
@@ -136,7 +133,6 @@ static void complete_flip(struct drm_crtc *crtc, struct drm_file *file)
struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
struct drm_device *dev = crtc->dev;
struct drm_pending_vblank_event *event;
- struct drm_plane *plane;
unsigned long flags;
spin_lock_irqsave(&dev->event_lock, flags);
@@ -148,16 +144,12 @@ static void complete_flip(struct drm_crtc *crtc, struct drm_file *file)
*/
if (!file || (event->base.file_priv == file)) {
mdp5_crtc->event = NULL;
- DBG("%s: send event: %p", mdp5_crtc->name, event);
+ DBG("%s: send event: %p", crtc->name, event);
drm_crtc_send_vblank_event(crtc, event);
}
}
spin_unlock_irqrestore(&dev->event_lock, flags);
- drm_atomic_crtc_for_each_plane(plane, crtc) {
- mdp5_plane_complete_flip(plane);
- }
-
if (mdp5_crtc->ctl && !crtc->state->enable) {
/* set STAGE_UNUSED for all layers */
mdp5_ctl_blend(mdp5_crtc->ctl, NULL, 0, 0);
@@ -223,12 +215,7 @@ static void blend_setup(struct drm_crtc *crtc)
plane_cnt++;
}
- /*
- * If there is no base layer, enable border color.
- * Although it's not possbile in current blend logic,
- * put it here as a reminder.
- */
- if (!pstates[STAGE_BASE] && plane_cnt) {
+ if (!pstates[STAGE_BASE]) {
ctl_blend_flags |= MDP5_CTL_BLEND_OP_FLAG_BORDER_OUT;
DBG("Border Color is enabled");
}
@@ -300,7 +287,7 @@ static void mdp5_crtc_mode_set_nofb(struct drm_crtc *crtc)
mode = &crtc->state->adjusted_mode;
DBG("%s: set mode: %d:\"%s\" %d %d %d %d %d %d %d %d %d %d 0x%x 0x%x",
- mdp5_crtc->name, mode->base.id, mode->name,
+ crtc->name, mode->base.id, mode->name,
mode->vrefresh, mode->clock,
mode->hdisplay, mode->hsync_start,
mode->hsync_end, mode->htotal,
@@ -320,7 +307,7 @@ static void mdp5_crtc_disable(struct drm_crtc *crtc)
struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
struct mdp5_kms *mdp5_kms = get_kms(crtc);
- DBG("%s", mdp5_crtc->name);
+ DBG("%s", crtc->name);
if (WARN_ON(!mdp5_crtc->enabled))
return;
@@ -339,7 +326,7 @@ static void mdp5_crtc_enable(struct drm_crtc *crtc)
struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
struct mdp5_kms *mdp5_kms = get_kms(crtc);
- DBG("%s", mdp5_crtc->name);
+ DBG("%s", crtc->name);
if (WARN_ON(mdp5_crtc->enabled))
return;
@@ -365,31 +352,29 @@ static int pstate_cmp(const void *a, const void *b)
return pa->state->zpos - pb->state->zpos;
}
+/* is there a helper for this? */
+static bool is_fullscreen(struct drm_crtc_state *cstate,
+ struct drm_plane_state *pstate)
+{
+ return (pstate->crtc_x <= 0) && (pstate->crtc_y <= 0) &&
+ ((pstate->crtc_x + pstate->crtc_w) >= cstate->mode.hdisplay) &&
+ ((pstate->crtc_y + pstate->crtc_h) >= cstate->mode.vdisplay);
+}
+
static int mdp5_crtc_atomic_check(struct drm_crtc *crtc,
struct drm_crtc_state *state)
{
- struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
struct mdp5_kms *mdp5_kms = get_kms(crtc);
struct drm_plane *plane;
struct drm_device *dev = crtc->dev;
struct plane_state pstates[STAGE_MAX + 1];
const struct mdp5_cfg_hw *hw_cfg;
const struct drm_plane_state *pstate;
- int cnt = 0, i;
+ int cnt = 0, base = 0, i;
- DBG("%s: check", mdp5_crtc->name);
+ DBG("%s: check", crtc->name);
- /* verify that there are not too many planes attached to crtc
- * and that we don't have conflicting mixer stages:
- */
- hw_cfg = mdp5_cfg_get_hw_config(mdp5_kms->cfg);
drm_atomic_crtc_state_for_each_plane_state(plane, pstate, state) {
- if (cnt >= (hw_cfg->lm.nb_stages)) {
- dev_err(dev->dev, "too many planes!\n");
- return -EINVAL;
- }
-
-
pstates[cnt].plane = plane;
pstates[cnt].state = to_mdp5_plane_state(pstate);
@@ -399,10 +384,26 @@ static int mdp5_crtc_atomic_check(struct drm_crtc *crtc,
/* assign a stage based on sorted zpos property */
sort(pstates, cnt, sizeof(pstates[0]), pstate_cmp, NULL);
+ /* if the bottom-most layer is not fullscreen, we need to use
+ * it for solid-color:
+ */
+ if ((cnt > 0) && !is_fullscreen(state, &pstates[0].state->base))
+ base++;
+
+ /* verify that there are not too many planes attached to crtc
+ * and that we don't have conflicting mixer stages:
+ */
+ hw_cfg = mdp5_cfg_get_hw_config(mdp5_kms->cfg);
+
+ if ((cnt + base) >= hw_cfg->lm.nb_stages) {
+ dev_err(dev->dev, "too many planes! cnt=%d, base=%d\n", cnt, base);
+ return -EINVAL;
+ }
+
for (i = 0; i < cnt; i++) {
- pstates[i].state->stage = STAGE_BASE + i;
- DBG("%s: assign pipe %s on stage=%d", mdp5_crtc->name,
- pipe2name(mdp5_plane_pipe(pstates[i].plane)),
+ pstates[i].state->stage = STAGE_BASE + i + base;
+ DBG("%s: assign pipe %s on stage=%d", crtc->name,
+ pstates[i].plane->name,
pstates[i].state->stage);
}
@@ -412,8 +413,7 @@ static int mdp5_crtc_atomic_check(struct drm_crtc *crtc,
static void mdp5_crtc_atomic_begin(struct drm_crtc *crtc,
struct drm_crtc_state *old_crtc_state)
{
- struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
- DBG("%s: begin", mdp5_crtc->name);
+ DBG("%s: begin", crtc->name);
}
static void mdp5_crtc_atomic_flush(struct drm_crtc *crtc,
@@ -423,7 +423,7 @@ static void mdp5_crtc_atomic_flush(struct drm_crtc *crtc,
struct drm_device *dev = crtc->dev;
unsigned long flags;
- DBG("%s: event: %p", mdp5_crtc->name, crtc->state->event);
+ DBG("%s: event: %p", crtc->name, crtc->state->event);
WARN_ON(mdp5_crtc->event);
@@ -489,7 +489,8 @@ static int mdp5_crtc_cursor_set(struct drm_crtc *crtc,
struct drm_device *dev = crtc->dev;
struct mdp5_kms *mdp5_kms = get_kms(crtc);
struct drm_gem_object *cursor_bo, *old_bo = NULL;
- uint32_t blendcfg, cursor_addr, stride;
+ uint32_t blendcfg, stride;
+ uint64_t cursor_addr;
int ret, lm;
enum mdp5_cursor_alpha cur_alpha = CURSOR_ALPHA_PER_PIXEL;
uint32_t flush_mask = mdp_ctl_flush_mask_cursor(0);
@@ -643,7 +644,7 @@ static void mdp5_crtc_err_irq(struct mdp_irq *irq, uint32_t irqstatus)
{
struct mdp5_crtc *mdp5_crtc = container_of(irq, struct mdp5_crtc, err);
- DBG("%s: error: %08x", mdp5_crtc->name, irqstatus);
+ DBG("%s: error: %08x", mdp5_crtc->base.name, irqstatus);
}
static void mdp5_crtc_pp_done_irq(struct mdp_irq *irq, uint32_t irqstatus)
@@ -765,9 +766,6 @@ struct drm_crtc *mdp5_crtc_init(struct drm_device *dev,
mdp5_crtc->vblank.irq = mdp5_crtc_vblank_irq;
mdp5_crtc->err.irq = mdp5_crtc_err_irq;
- snprintf(mdp5_crtc->name, sizeof(mdp5_crtc->name), "%s:%d",
- pipe2name(mdp5_plane_pipe(plane)), id);
-
drm_crtc_init_with_planes(dev, crtc, plane, NULL, &mdp5_crtc_funcs,
NULL);
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_irq.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_irq.c
index d53e5510fd7c..3ce8b9dec9c1 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_irq.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_irq.c
@@ -17,6 +17,8 @@
#include <linux/irq.h>
+#include <drm/drm_print.h>
+
#include "msm_drv.h"
#include "mdp5_kms.h"
@@ -30,7 +32,18 @@ void mdp5_set_irqmask(struct mdp_kms *mdp_kms, uint32_t irqmask,
static void mdp5_irq_error_handler(struct mdp_irq *irq, uint32_t irqstatus)
{
+ struct mdp5_kms *mdp5_kms = container_of(irq, struct mdp5_kms, error_handler);
+ static DEFINE_RATELIMIT_STATE(rs, 5*HZ, 1);
+ extern bool dumpstate;
+
DRM_ERROR_RATELIMITED("errors: %08x\n", irqstatus);
+
+ if (dumpstate && __ratelimit(&rs)) {
+ struct drm_printer p = drm_info_printer(mdp5_kms->dev->dev);
+ drm_state_dump(mdp5_kms->dev, &p);
+ if (mdp5_kms->smp)
+ mdp5_smp_dump(mdp5_kms->smp, &p);
+ }
}
void mdp5_irq_preinstall(struct msm_kms *kms)
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c
index ed7143d35b25..5f6cd8745dbc 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c
@@ -19,6 +19,7 @@
#include <linux/of_irq.h>
#include "msm_drv.h"
+#include "msm_gem.h"
#include "msm_mmu.h"
#include "mdp5_kms.h"
@@ -71,10 +72,49 @@ static int mdp5_hw_init(struct msm_kms *kms)
return 0;
}
+struct mdp5_state *mdp5_get_state(struct drm_atomic_state *s)
+{
+ struct msm_drm_private *priv = s->dev->dev_private;
+ struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(priv->kms));
+ struct msm_kms_state *state = to_kms_state(s);
+ struct mdp5_state *new_state;
+ int ret;
+
+ if (state->state)
+ return state->state;
+
+ ret = drm_modeset_lock(&mdp5_kms->state_lock, s->acquire_ctx);
+ if (ret)
+ return ERR_PTR(ret);
+
+ new_state = kmalloc(sizeof(*mdp5_kms->state), GFP_KERNEL);
+ if (!new_state)
+ return ERR_PTR(-ENOMEM);
+
+ /* Copy state: */
+ new_state->hwpipe = mdp5_kms->state->hwpipe;
+ if (mdp5_kms->smp)
+ new_state->smp = mdp5_kms->state->smp;
+
+ state->state = new_state;
+
+ return new_state;
+}
+
+static void mdp5_swap_state(struct msm_kms *kms, struct drm_atomic_state *state)
+{
+ struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms));
+ swap(to_kms_state(state)->state, mdp5_kms->state);
+}
+
static void mdp5_prepare_commit(struct msm_kms *kms, struct drm_atomic_state *state)
{
struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms));
+
mdp5_enable(mdp5_kms);
+
+ if (mdp5_kms->smp)
+ mdp5_smp_prepare_commit(mdp5_kms->smp, &mdp5_kms->state->smp);
}
static void mdp5_complete_commit(struct msm_kms *kms, struct drm_atomic_state *state)
@@ -87,6 +127,9 @@ static void mdp5_complete_commit(struct msm_kms *kms, struct drm_atomic_state *s
for_each_plane_in_state(state, plane, plane_state, i)
mdp5_plane_complete_commit(plane, plane_state);
+ if (mdp5_kms->smp)
+ mdp5_smp_complete_commit(mdp5_kms->smp, &mdp5_kms->state->smp);
+
mdp5_disable(mdp5_kms);
}
@@ -117,14 +160,66 @@ static int mdp5_set_split_display(struct msm_kms *kms,
static void mdp5_kms_destroy(struct msm_kms *kms)
{
struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms));
- struct msm_mmu *mmu = mdp5_kms->mmu;
+ struct msm_gem_address_space *aspace = mdp5_kms->aspace;
+ int i;
+
+ for (i = 0; i < mdp5_kms->num_hwpipes; i++)
+ mdp5_pipe_destroy(mdp5_kms->hwpipes[i]);
- if (mmu) {
- mmu->funcs->detach(mmu, iommu_ports, ARRAY_SIZE(iommu_ports));
- mmu->funcs->destroy(mmu);
+ if (aspace) {
+ aspace->mmu->funcs->detach(aspace->mmu,
+ iommu_ports, ARRAY_SIZE(iommu_ports));
+ msm_gem_address_space_destroy(aspace);
}
}
+#ifdef CONFIG_DEBUG_FS
+static int smp_show(struct seq_file *m, void *arg)
+{
+ struct drm_info_node *node = (struct drm_info_node *) m->private;
+ struct drm_device *dev = node->minor->dev;
+ struct msm_drm_private *priv = dev->dev_private;
+ struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(priv->kms));
+ struct drm_printer p = drm_seq_file_printer(m);
+
+ if (!mdp5_kms->smp) {
+ drm_printf(&p, "no SMP pool\n");
+ return 0;
+ }
+
+ mdp5_smp_dump(mdp5_kms->smp, &p);
+
+ return 0;
+}
+
+static struct drm_info_list mdp5_debugfs_list[] = {
+ {"smp", smp_show },
+};
+
+static int mdp5_kms_debugfs_init(struct msm_kms *kms, struct drm_minor *minor)
+{
+ struct drm_device *dev = minor->dev;
+ int ret;
+
+ ret = drm_debugfs_create_files(mdp5_debugfs_list,
+ ARRAY_SIZE(mdp5_debugfs_list),
+ minor->debugfs_root, minor);
+
+ if (ret) {
+ dev_err(dev->dev, "could not install mdp5_debugfs_list\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static void mdp5_kms_debugfs_cleanup(struct msm_kms *kms, struct drm_minor *minor)
+{
+ drm_debugfs_remove_files(mdp5_debugfs_list,
+ ARRAY_SIZE(mdp5_debugfs_list), minor);
+}
+#endif
+
static const struct mdp_kms_funcs kms_funcs = {
.base = {
.hw_init = mdp5_hw_init,
@@ -134,6 +229,7 @@ static const struct mdp_kms_funcs kms_funcs = {
.irq = mdp5_irq,
.enable_vblank = mdp5_enable_vblank,
.disable_vblank = mdp5_disable_vblank,
+ .swap_state = mdp5_swap_state,
.prepare_commit = mdp5_prepare_commit,
.complete_commit = mdp5_complete_commit,
.wait_for_crtc_commit_done = mdp5_wait_for_crtc_commit_done,
@@ -141,6 +237,10 @@ static const struct mdp_kms_funcs kms_funcs = {
.round_pixclk = mdp5_round_pixclk,
.set_split_display = mdp5_set_split_display,
.destroy = mdp5_kms_destroy,
+#ifdef CONFIG_DEBUG_FS
+ .debugfs_init = mdp5_kms_debugfs_init,
+ .debugfs_cleanup = mdp5_kms_debugfs_cleanup,
+#endif
},
.set_irqmask = mdp5_set_irqmask,
};
@@ -321,15 +421,6 @@ static int modeset_init_intf(struct mdp5_kms *mdp5_kms, int intf_num)
static int modeset_init(struct mdp5_kms *mdp5_kms)
{
- static const enum mdp5_pipe crtcs[] = {
- SSPP_RGB0, SSPP_RGB1, SSPP_RGB2, SSPP_RGB3,
- };
- static const enum mdp5_pipe vig_planes[] = {
- SSPP_VIG0, SSPP_VIG1, SSPP_VIG2, SSPP_VIG3,
- };
- static const enum mdp5_pipe dma_planes[] = {
- SSPP_DMA0, SSPP_DMA1,
- };
struct drm_device *dev = mdp5_kms->dev;
struct msm_drm_private *priv = dev->dev_private;
const struct mdp5_cfg_hw *hw_cfg;
@@ -337,58 +428,35 @@ static int modeset_init(struct mdp5_kms *mdp5_kms)
hw_cfg = mdp5_cfg_get_hw_config(mdp5_kms->cfg);
- /* construct CRTCs and their private planes: */
- for (i = 0; i < hw_cfg->pipe_rgb.count; i++) {
+ /* Construct planes equaling the number of hw pipes, and CRTCs
+ * for the N layer-mixers (LM). The first N planes become primary
+ * planes for the CRTCs, with the remainder as overlay planes:
+ */
+ for (i = 0; i < mdp5_kms->num_hwpipes; i++) {
+ bool primary = i < mdp5_cfg->lm.count;
struct drm_plane *plane;
struct drm_crtc *crtc;
- plane = mdp5_plane_init(dev, crtcs[i], true,
- hw_cfg->pipe_rgb.base[i], hw_cfg->pipe_rgb.caps);
+ plane = mdp5_plane_init(dev, primary);
if (IS_ERR(plane)) {
ret = PTR_ERR(plane);
- dev_err(dev->dev, "failed to construct plane for %s (%d)\n",
- pipe2name(crtcs[i]), ret);
+ dev_err(dev->dev, "failed to construct plane %d (%d)\n", i, ret);
goto fail;
}
+ priv->planes[priv->num_planes++] = plane;
+
+ if (!primary)
+ continue;
crtc = mdp5_crtc_init(dev, plane, i);
if (IS_ERR(crtc)) {
ret = PTR_ERR(crtc);
- dev_err(dev->dev, "failed to construct crtc for %s (%d)\n",
- pipe2name(crtcs[i]), ret);
+ dev_err(dev->dev, "failed to construct crtc %d (%d)\n", i, ret);
goto fail;
}
priv->crtcs[priv->num_crtcs++] = crtc;
}
- /* Construct video planes: */
- for (i = 0; i < hw_cfg->pipe_vig.count; i++) {
- struct drm_plane *plane;
-
- plane = mdp5_plane_init(dev, vig_planes[i], false,
- hw_cfg->pipe_vig.base[i], hw_cfg->pipe_vig.caps);
- if (IS_ERR(plane)) {
- ret = PTR_ERR(plane);
- dev_err(dev->dev, "failed to construct %s plane: %d\n",
- pipe2name(vig_planes[i]), ret);
- goto fail;
- }
- }
-
- /* DMA planes */
- for (i = 0; i < hw_cfg->pipe_dma.count; i++) {
- struct drm_plane *plane;
-
- plane = mdp5_plane_init(dev, dma_planes[i], false,
- hw_cfg->pipe_dma.base[i], hw_cfg->pipe_dma.caps);
- if (IS_ERR(plane)) {
- ret = PTR_ERR(plane);
- dev_err(dev->dev, "failed to construct %s plane: %d\n",
- pipe2name(dma_planes[i]), ret);
- goto fail;
- }
- }
-
/* Construct encoders and modeset initialize connector devices
* for each external display interface.
*/
@@ -564,7 +632,7 @@ struct msm_kms *mdp5_kms_init(struct drm_device *dev)
struct mdp5_kms *mdp5_kms;
struct mdp5_cfg *config;
struct msm_kms *kms;
- struct msm_mmu *mmu;
+ struct msm_gem_address_space *aspace;
int irq, i, ret;
/* priv->kms would have been populated by the MDP5 driver */
@@ -606,30 +674,29 @@ struct msm_kms *mdp5_kms_init(struct drm_device *dev)
mdelay(16);
if (config->platform.iommu) {
- mmu = msm_iommu_new(&pdev->dev, config->platform.iommu);
- if (IS_ERR(mmu)) {
- ret = PTR_ERR(mmu);
- dev_err(&pdev->dev, "failed to init iommu: %d\n", ret);
- iommu_domain_free(config->platform.iommu);
+ aspace = msm_gem_address_space_create(&pdev->dev,
+ config->platform.iommu, "mdp5");
+ if (IS_ERR(aspace)) {
+ ret = PTR_ERR(aspace);
goto fail;
}
- ret = mmu->funcs->attach(mmu, iommu_ports,
+ mdp5_kms->aspace = aspace;
+
+ ret = aspace->mmu->funcs->attach(aspace->mmu, iommu_ports,
ARRAY_SIZE(iommu_ports));
if (ret) {
dev_err(&pdev->dev, "failed to attach iommu: %d\n",
ret);
- mmu->funcs->destroy(mmu);
goto fail;
}
} else {
dev_info(&pdev->dev,
"no iommu, fallback to phys contig buffers for scanout\n");
- mmu = NULL;
+ aspace = NULL;;
}
- mdp5_kms->mmu = mmu;
- mdp5_kms->id = msm_register_mmu(dev, mmu);
+ mdp5_kms->id = msm_register_address_space(dev, aspace);
if (mdp5_kms->id < 0) {
ret = mdp5_kms->id;
dev_err(&pdev->dev, "failed to register mdp5 iommu: %d\n", ret);
@@ -644,8 +711,8 @@ struct msm_kms *mdp5_kms_init(struct drm_device *dev)
dev->mode_config.min_width = 0;
dev->mode_config.min_height = 0;
- dev->mode_config.max_width = config->hw->lm.max_width;
- dev->mode_config.max_height = config->hw->lm.max_height;
+ dev->mode_config.max_width = 0xffff;
+ dev->mode_config.max_height = 0xffff;
dev->driver->get_vblank_timestamp = mdp5_get_vblank_timestamp;
dev->driver->get_scanout_position = mdp5_get_scanoutpos;
@@ -673,6 +740,69 @@ static void mdp5_destroy(struct platform_device *pdev)
if (mdp5_kms->rpm_enabled)
pm_runtime_disable(&pdev->dev);
+
+ kfree(mdp5_kms->state);
+}
+
+static int construct_pipes(struct mdp5_kms *mdp5_kms, int cnt,
+ const enum mdp5_pipe *pipes, const uint32_t *offsets,
+ uint32_t caps)
+{
+ struct drm_device *dev = mdp5_kms->dev;
+ int i, ret;
+
+ for (i = 0; i < cnt; i++) {
+ struct mdp5_hw_pipe *hwpipe;
+
+ hwpipe = mdp5_pipe_init(pipes[i], offsets[i], caps);
+ if (IS_ERR(hwpipe)) {
+ ret = PTR_ERR(hwpipe);
+ dev_err(dev->dev, "failed to construct pipe for %s (%d)\n",
+ pipe2name(pipes[i]), ret);
+ return ret;
+ }
+ hwpipe->idx = mdp5_kms->num_hwpipes;
+ mdp5_kms->hwpipes[mdp5_kms->num_hwpipes++] = hwpipe;
+ }
+
+ return 0;
+}
+
+static int hwpipe_init(struct mdp5_kms *mdp5_kms)
+{
+ static const enum mdp5_pipe rgb_planes[] = {
+ SSPP_RGB0, SSPP_RGB1, SSPP_RGB2, SSPP_RGB3,
+ };
+ static const enum mdp5_pipe vig_planes[] = {
+ SSPP_VIG0, SSPP_VIG1, SSPP_VIG2, SSPP_VIG3,
+ };
+ static const enum mdp5_pipe dma_planes[] = {
+ SSPP_DMA0, SSPP_DMA1,
+ };
+ const struct mdp5_cfg_hw *hw_cfg;
+ int ret;
+
+ hw_cfg = mdp5_cfg_get_hw_config(mdp5_kms->cfg);
+
+ /* Construct RGB pipes: */
+ ret = construct_pipes(mdp5_kms, hw_cfg->pipe_rgb.count, rgb_planes,
+ hw_cfg->pipe_rgb.base, hw_cfg->pipe_rgb.caps);
+ if (ret)
+ return ret;
+
+ /* Construct video (VIG) pipes: */
+ ret = construct_pipes(mdp5_kms, hw_cfg->pipe_vig.count, vig_planes,
+ hw_cfg->pipe_vig.base, hw_cfg->pipe_vig.caps);
+ if (ret)
+ return ret;
+
+ /* Construct DMA pipes: */
+ ret = construct_pipes(mdp5_kms, hw_cfg->pipe_dma.count, dma_planes,
+ hw_cfg->pipe_dma.base, hw_cfg->pipe_dma.caps);
+ if (ret)
+ return ret;
+
+ return 0;
}
static int mdp5_init(struct platform_device *pdev, struct drm_device *dev)
@@ -696,6 +826,13 @@ static int mdp5_init(struct platform_device *pdev, struct drm_device *dev)
mdp5_kms->dev = dev;
mdp5_kms->pdev = pdev;
+ drm_modeset_lock_init(&mdp5_kms->state_lock);
+ mdp5_kms->state = kzalloc(sizeof(*mdp5_kms->state), GFP_KERNEL);
+ if (!mdp5_kms->state) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+
mdp5_kms->mmio = msm_ioremap(pdev, "mdp_phys", "MDP5");
if (IS_ERR(mdp5_kms->mmio)) {
ret = PTR_ERR(mdp5_kms->mmio);
@@ -749,7 +886,7 @@ static int mdp5_init(struct platform_device *pdev, struct drm_device *dev)
* this section initializes the SMP:
*/
if (mdp5_kms->caps & MDP_CAP_SMP) {
- mdp5_kms->smp = mdp5_smp_init(mdp5_kms->dev, &config->hw->smp);
+ mdp5_kms->smp = mdp5_smp_init(mdp5_kms, &config->hw->smp);
if (IS_ERR(mdp5_kms->smp)) {
ret = PTR_ERR(mdp5_kms->smp);
mdp5_kms->smp = NULL;
@@ -764,6 +901,10 @@ static int mdp5_init(struct platform_device *pdev, struct drm_device *dev)
goto fail;
}
+ ret = hwpipe_init(mdp5_kms);
+ if (ret)
+ goto fail;
+
/* set uninit-ed kms */
priv->kms = &mdp5_kms->base.base;
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h
index 03738927be10..17b0cc101171 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h
@@ -24,8 +24,11 @@
#include "mdp5_cfg.h" /* must be included before mdp5.xml.h */
#include "mdp5.xml.h"
#include "mdp5_ctl.h"
+#include "mdp5_pipe.h"
#include "mdp5_smp.h"
+struct mdp5_state;
+
struct mdp5_kms {
struct mdp_kms base;
@@ -33,13 +36,21 @@ struct mdp5_kms {
struct platform_device *pdev;
+ unsigned num_hwpipes;
+ struct mdp5_hw_pipe *hwpipes[SSPP_MAX];
+
struct mdp5_cfg_handler *cfg;
uint32_t caps; /* MDP capabilities (MDP_CAP_XXX bits) */
+ /**
+ * Global atomic state. Do not access directly, use mdp5_get_state()
+ */
+ struct mdp5_state *state;
+ struct drm_modeset_lock state_lock;
/* mapper-id used to request GEM buffer mapped for scanout: */
int id;
- struct msm_mmu *mmu;
+ struct msm_gem_address_space *aspace;
struct mdp5_smp *smp;
struct mdp5_ctl_manager *ctlm;
@@ -65,9 +76,27 @@ struct mdp5_kms {
};
#define to_mdp5_kms(x) container_of(x, struct mdp5_kms, base)
+/* Global atomic state for tracking resources that are shared across
+ * multiple kms objects (planes/crtcs/etc).
+ *
+ * For atomic updates which require modifying global state,
+ */
+struct mdp5_state {
+ struct mdp5_hw_pipe_state hwpipe;
+ struct mdp5_smp_state smp;
+};
+
+struct mdp5_state *__must_check
+mdp5_get_state(struct drm_atomic_state *s);
+
+/* Atomic plane state. Subclasses the base drm_plane_state in order to
+ * track assigned hwpipe and hw specific state.
+ */
struct mdp5_plane_state {
struct drm_plane_state base;
+ struct mdp5_hw_pipe *hwpipe;
+
/* aligned with property */
uint8_t premultiplied;
uint8_t zpos;
@@ -76,11 +105,6 @@ struct mdp5_plane_state {
/* assigned by crtc blender */
enum mdp_mixer_stage_id stage;
- /* some additional transactional status to help us know in the
- * apply path whether we need to update SMP allocation, and
- * whether current update is still pending:
- */
- bool mode_changed : 1;
bool pending : 1;
};
#define to_mdp5_plane_state(x) \
@@ -114,6 +138,18 @@ static inline u32 mdp5_read(struct mdp5_kms *mdp5_kms, u32 reg)
return msm_readl(mdp5_kms->mmio + reg);
}
+static inline const char *stage2name(enum mdp_mixer_stage_id stage)
+{
+ static const char *names[] = {
+#define NAME(n) [n] = #n
+ NAME(STAGE_UNUSED), NAME(STAGE_BASE),
+ NAME(STAGE0), NAME(STAGE1), NAME(STAGE2),
+ NAME(STAGE3), NAME(STAGE4), NAME(STAGE6),
+#undef NAME
+ };
+ return names[stage];
+}
+
static inline const char *pipe2name(enum mdp5_pipe pipe)
{
static const char *names[] = {
@@ -196,13 +232,10 @@ int mdp5_irq_domain_init(struct mdp5_kms *mdp5_kms);
void mdp5_irq_domain_fini(struct mdp5_kms *mdp5_kms);
uint32_t mdp5_plane_get_flush(struct drm_plane *plane);
-void mdp5_plane_complete_flip(struct drm_plane *plane);
void mdp5_plane_complete_commit(struct drm_plane *plane,
struct drm_plane_state *state);
enum mdp5_pipe mdp5_plane_pipe(struct drm_plane *plane);
-struct drm_plane *mdp5_plane_init(struct drm_device *dev,
- enum mdp5_pipe pipe, bool private_plane,
- uint32_t reg_offset, uint32_t caps);
+struct drm_plane *mdp5_plane_init(struct drm_device *dev, bool primary);
uint32_t mdp5_crtc_vblank(struct drm_crtc *crtc);
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_pipe.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_pipe.c
new file mode 100644
index 000000000000..1ae9dc8d260d
--- /dev/null
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_pipe.c
@@ -0,0 +1,133 @@
+/*
+ * Copyright (C) 2016 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "mdp5_kms.h"
+
+struct mdp5_hw_pipe *mdp5_pipe_assign(struct drm_atomic_state *s,
+ struct drm_plane *plane, uint32_t caps, uint32_t blkcfg)
+{
+ struct msm_drm_private *priv = s->dev->dev_private;
+ struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(priv->kms));
+ struct mdp5_state *state;
+ struct mdp5_hw_pipe_state *old_state, *new_state;
+ struct mdp5_hw_pipe *hwpipe = NULL;
+ int i;
+
+ state = mdp5_get_state(s);
+ if (IS_ERR(state))
+ return ERR_CAST(state);
+
+ /* grab old_state after mdp5_get_state(), since now we hold lock: */
+ old_state = &mdp5_kms->state->hwpipe;
+ new_state = &state->hwpipe;
+
+ for (i = 0; i < mdp5_kms->num_hwpipes; i++) {
+ struct mdp5_hw_pipe *cur = mdp5_kms->hwpipes[i];
+
+ /* skip if already in-use.. check both new and old state,
+ * since we cannot immediately re-use a pipe that is
+ * released in the current update in some cases:
+ * (1) mdp5 can have SMP (non-double-buffered)
+ * (2) hw pipe previously assigned to different CRTC
+ * (vblanks might not be aligned)
+ */
+ if (new_state->hwpipe_to_plane[cur->idx] ||
+ old_state->hwpipe_to_plane[cur->idx])
+ continue;
+
+ /* skip if doesn't support some required caps: */
+ if (caps & ~cur->caps)
+ continue;
+
+ /* possible candidate, take the one with the
+ * fewest unneeded caps bits set:
+ */
+ if (!hwpipe || (hweight_long(cur->caps & ~caps) <
+ hweight_long(hwpipe->caps & ~caps)))
+ hwpipe = cur;
+ }
+
+ if (!hwpipe)
+ return ERR_PTR(-ENOMEM);
+
+ if (mdp5_kms->smp) {
+ int ret;
+
+ DBG("%s: alloc SMP blocks", hwpipe->name);
+ ret = mdp5_smp_assign(mdp5_kms->smp, &state->smp,
+ hwpipe->pipe, blkcfg);
+ if (ret)
+ return ERR_PTR(-ENOMEM);
+
+ hwpipe->blkcfg = blkcfg;
+ }
+
+ DBG("%s: assign to plane %s for caps %x",
+ hwpipe->name, plane->name, caps);
+ new_state->hwpipe_to_plane[hwpipe->idx] = plane;
+
+ return hwpipe;
+}
+
+void mdp5_pipe_release(struct drm_atomic_state *s, struct mdp5_hw_pipe *hwpipe)
+{
+ struct msm_drm_private *priv = s->dev->dev_private;
+ struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(priv->kms));
+ struct mdp5_state *state = mdp5_get_state(s);
+ struct mdp5_hw_pipe_state *new_state = &state->hwpipe;
+
+ if (!hwpipe)
+ return;
+
+ if (WARN_ON(!new_state->hwpipe_to_plane[hwpipe->idx]))
+ return;
+
+ DBG("%s: release from plane %s", hwpipe->name,
+ new_state->hwpipe_to_plane[hwpipe->idx]->name);
+
+ if (mdp5_kms->smp) {
+ DBG("%s: free SMP blocks", hwpipe->name);
+ mdp5_smp_release(mdp5_kms->smp, &state->smp, hwpipe->pipe);
+ }
+
+ new_state->hwpipe_to_plane[hwpipe->idx] = NULL;
+}
+
+void mdp5_pipe_destroy(struct mdp5_hw_pipe *hwpipe)
+{
+ kfree(hwpipe);
+}
+
+struct mdp5_hw_pipe *mdp5_pipe_init(enum mdp5_pipe pipe,
+ uint32_t reg_offset, uint32_t caps)
+{
+ struct mdp5_hw_pipe *hwpipe;
+
+ hwpipe = kzalloc(sizeof(*hwpipe), GFP_KERNEL);
+ if (!hwpipe)
+ return ERR_PTR(-ENOMEM);
+
+ hwpipe->name = pipe2name(pipe);
+ hwpipe->pipe = pipe;
+ hwpipe->reg_offset = reg_offset;
+ hwpipe->caps = caps;
+ hwpipe->flush_mask = mdp_ctl_flush_mask_pipe(pipe);
+
+ spin_lock_init(&hwpipe->pipe_lock);
+
+ return hwpipe;
+}
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_pipe.h b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_pipe.h
new file mode 100644
index 000000000000..611da7a660c9
--- /dev/null
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_pipe.h
@@ -0,0 +1,56 @@
+/*
+ * Copyright (C) 2016 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __MDP5_PIPE_H__
+#define __MDP5_PIPE_H__
+
+#define SSPP_MAX (SSPP_RGB3 + 1) /* TODO: Add SSPP_MAX in mdp5.xml.h */
+
+/* represents a hw pipe, which is dynamically assigned to a plane */
+struct mdp5_hw_pipe {
+ int idx;
+
+ const char *name;
+ enum mdp5_pipe pipe;
+
+ spinlock_t pipe_lock; /* protect REG_MDP5_PIPE_* registers */
+ uint32_t reg_offset;
+ uint32_t caps;
+
+ uint32_t flush_mask; /* used to commit pipe registers */
+
+ /* number of smp blocks per plane, ie:
+ * nblks_y | (nblks_u << 8) | (nblks_v << 16)
+ */
+ uint32_t blkcfg;
+};
+
+/* global atomic state of assignment between pipes and planes: */
+struct mdp5_hw_pipe_state {
+ struct drm_plane *hwpipe_to_plane[SSPP_MAX];
+};
+
+struct mdp5_hw_pipe *__must_check
+mdp5_pipe_assign(struct drm_atomic_state *s, struct drm_plane *plane,
+ uint32_t caps, uint32_t blkcfg);
+void mdp5_pipe_release(struct drm_atomic_state *s, struct mdp5_hw_pipe *hwpipe);
+
+struct mdp5_hw_pipe *mdp5_pipe_init(enum mdp5_pipe pipe,
+ uint32_t reg_offset, uint32_t caps);
+void mdp5_pipe_destroy(struct mdp5_hw_pipe *hwpipe);
+
+#endif /* __MDP5_PIPE_H__ */
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c
index 951c002b05df..c099da7bc212 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c
@@ -16,19 +16,11 @@
* this program. If not, see <http://www.gnu.org/licenses/>.
*/
+#include <drm/drm_print.h>
#include "mdp5_kms.h"
struct mdp5_plane {
struct drm_plane base;
- const char *name;
-
- enum mdp5_pipe pipe;
-
- spinlock_t pipe_lock; /* protect REG_MDP5_PIPE_* registers */
- uint32_t reg_offset;
- uint32_t caps;
-
- uint32_t flush_mask; /* used to commit pipe registers */
uint32_t nformats;
uint32_t formats[32];
@@ -69,21 +61,12 @@ static void mdp5_plane_destroy(struct drm_plane *plane)
static void mdp5_plane_install_rotation_property(struct drm_device *dev,
struct drm_plane *plane)
{
- struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane);
-
- if (!(mdp5_plane->caps & MDP_PIPE_CAP_HFLIP) &&
- !(mdp5_plane->caps & MDP_PIPE_CAP_VFLIP))
- return;
-
- if (!dev->mode_config.rotation_property)
- dev->mode_config.rotation_property =
- drm_mode_create_rotation_property(dev,
- DRM_ROTATE_0 | DRM_REFLECT_X | DRM_REFLECT_Y);
-
- if (dev->mode_config.rotation_property)
- drm_object_attach_property(&plane->base,
- dev->mode_config.rotation_property,
- DRM_ROTATE_0);
+ drm_plane_create_rotation_property(plane,
+ DRM_ROTATE_0,
+ DRM_ROTATE_0 |
+ DRM_ROTATE_180 |
+ DRM_REFLECT_X |
+ DRM_REFLECT_Y);
}
/* helper to install properties which are common to planes and crtcs */
@@ -184,6 +167,21 @@ done:
#undef SET_PROPERTY
}
+static void
+mdp5_plane_atomic_print_state(struct drm_printer *p,
+ const struct drm_plane_state *state)
+{
+ struct mdp5_plane_state *pstate = to_mdp5_plane_state(state);
+
+ drm_printf(p, "\thwpipe=%s\n", pstate->hwpipe ?
+ pstate->hwpipe->name : "(null)");
+ drm_printf(p, "\tpremultiplied=%u\n", pstate->premultiplied);
+ drm_printf(p, "\tzpos=%u\n", pstate->zpos);
+ drm_printf(p, "\talpha=%u\n", pstate->alpha);
+ drm_printf(p, "\tstage=%s\n", stage2name(pstate->stage));
+ drm_printf(p, "\tpending=%u\n", pstate->pending);
+}
+
static void mdp5_plane_reset(struct drm_plane *plane)
{
struct mdp5_plane_state *mdp5_state;
@@ -222,7 +220,6 @@ mdp5_plane_duplicate_state(struct drm_plane *plane)
if (mdp5_state && mdp5_state->base.fb)
drm_framebuffer_reference(mdp5_state->base.fb);
- mdp5_state->mode_changed = false;
mdp5_state->pending = false;
return &mdp5_state->base;
@@ -231,10 +228,12 @@ mdp5_plane_duplicate_state(struct drm_plane *plane)
static void mdp5_plane_destroy_state(struct drm_plane *plane,
struct drm_plane_state *state)
{
+ struct mdp5_plane_state *pstate = to_mdp5_plane_state(state);
+
if (state->fb)
drm_framebuffer_unreference(state->fb);
- kfree(to_mdp5_plane_state(state));
+ kfree(pstate);
}
static const struct drm_plane_funcs mdp5_plane_funcs = {
@@ -247,102 +246,121 @@ static const struct drm_plane_funcs mdp5_plane_funcs = {
.reset = mdp5_plane_reset,
.atomic_duplicate_state = mdp5_plane_duplicate_state,
.atomic_destroy_state = mdp5_plane_destroy_state,
+ .atomic_print_state = mdp5_plane_atomic_print_state,
};
static int mdp5_plane_prepare_fb(struct drm_plane *plane,
struct drm_plane_state *new_state)
{
- struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane);
struct mdp5_kms *mdp5_kms = get_kms(plane);
struct drm_framebuffer *fb = new_state->fb;
if (!new_state->fb)
return 0;
- DBG("%s: prepare: FB[%u]", mdp5_plane->name, fb->base.id);
+ DBG("%s: prepare: FB[%u]", plane->name, fb->base.id);
return msm_framebuffer_prepare(fb, mdp5_kms->id);
}
static void mdp5_plane_cleanup_fb(struct drm_plane *plane,
struct drm_plane_state *old_state)
{
- struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane);
struct mdp5_kms *mdp5_kms = get_kms(plane);
struct drm_framebuffer *fb = old_state->fb;
if (!fb)
return;
- DBG("%s: cleanup: FB[%u]", mdp5_plane->name, fb->base.id);
+ DBG("%s: cleanup: FB[%u]", plane->name, fb->base.id);
msm_framebuffer_cleanup(fb, mdp5_kms->id);
}
static int mdp5_plane_atomic_check(struct drm_plane *plane,
struct drm_plane_state *state)
{
- struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane);
+ struct mdp5_plane_state *mdp5_state = to_mdp5_plane_state(state);
struct drm_plane_state *old_state = plane->state;
- const struct mdp_format *format;
- bool vflip, hflip;
+ struct mdp5_cfg *config = mdp5_cfg_get_config(get_kms(plane)->cfg);
+ bool new_hwpipe = false;
+ uint32_t max_width, max_height;
+ uint32_t caps = 0;
- DBG("%s: check (%d -> %d)", mdp5_plane->name,
+ DBG("%s: check (%d -> %d)", plane->name,
plane_enabled(old_state), plane_enabled(state));
+ /* We don't allow faster-than-vblank updates.. if we did add this
+ * some day, we would need to disallow in cases where hwpipe
+ * changes
+ */
+ if (WARN_ON(to_mdp5_plane_state(old_state)->pending))
+ return -EBUSY;
+
+ max_width = config->hw->lm.max_width << 16;
+ max_height = config->hw->lm.max_height << 16;
+
+ /* Make sure source dimensions are within bounds. */
+ if ((state->src_w > max_width) || (state->src_h > max_height)) {
+ struct drm_rect src = drm_plane_state_src(state);
+ DBG("Invalid source size "DRM_RECT_FP_FMT,
+ DRM_RECT_FP_ARG(&src));
+ return -ERANGE;
+ }
+
if (plane_enabled(state)) {
+ unsigned int rotation;
+ const struct mdp_format *format;
+ struct mdp5_kms *mdp5_kms = get_kms(plane);
+ uint32_t blkcfg = 0;
+
format = to_mdp_format(msm_framebuffer_format(state->fb));
- if (MDP_FORMAT_IS_YUV(format) &&
- !pipe_supports_yuv(mdp5_plane->caps)) {
- dev_err(plane->dev->dev,
- "Pipe doesn't support YUV\n");
+ if (MDP_FORMAT_IS_YUV(format))
+ caps |= MDP_PIPE_CAP_SCALE | MDP_PIPE_CAP_CSC;
- return -EINVAL;
- }
+ if (((state->src_w >> 16) != state->crtc_w) ||
+ ((state->src_h >> 16) != state->crtc_h))
+ caps |= MDP_PIPE_CAP_SCALE;
- if (!(mdp5_plane->caps & MDP_PIPE_CAP_SCALE) &&
- (((state->src_w >> 16) != state->crtc_w) ||
- ((state->src_h >> 16) != state->crtc_h))) {
- dev_err(plane->dev->dev,
- "Pipe doesn't support scaling (%dx%d -> %dx%d)\n",
- state->src_w >> 16, state->src_h >> 16,
- state->crtc_w, state->crtc_h);
+ rotation = drm_rotation_simplify(state->rotation,
+ DRM_ROTATE_0 |
+ DRM_REFLECT_X |
+ DRM_REFLECT_Y);
- return -EINVAL;
- }
+ if (rotation & DRM_REFLECT_X)
+ caps |= MDP_PIPE_CAP_HFLIP;
- hflip = !!(state->rotation & DRM_REFLECT_X);
- vflip = !!(state->rotation & DRM_REFLECT_Y);
- if ((vflip && !(mdp5_plane->caps & MDP_PIPE_CAP_VFLIP)) ||
- (hflip && !(mdp5_plane->caps & MDP_PIPE_CAP_HFLIP))) {
- dev_err(plane->dev->dev,
- "Pipe doesn't support flip\n");
+ if (rotation & DRM_REFLECT_Y)
+ caps |= MDP_PIPE_CAP_VFLIP;
- return -EINVAL;
- }
- }
+ /* (re)allocate hw pipe if we don't have one or caps-mismatch: */
+ if (!mdp5_state->hwpipe || (caps & ~mdp5_state->hwpipe->caps))
+ new_hwpipe = true;
- if (plane_enabled(state) && plane_enabled(old_state)) {
- /* we cannot change SMP block configuration during scanout: */
- bool full_modeset = false;
- if (state->fb->pixel_format != old_state->fb->pixel_format) {
- DBG("%s: pixel_format change!", mdp5_plane->name);
- full_modeset = true;
- }
- if (state->src_w != old_state->src_w) {
- DBG("%s: src_w change!", mdp5_plane->name);
- full_modeset = true;
- }
- if (to_mdp5_plane_state(old_state)->pending) {
- DBG("%s: still pending!", mdp5_plane->name);
- full_modeset = true;
+ if (mdp5_kms->smp) {
+ const struct mdp_format *format =
+ to_mdp_format(msm_framebuffer_format(state->fb));
+
+ blkcfg = mdp5_smp_calculate(mdp5_kms->smp, format,
+ state->src_w >> 16, false);
+
+ if (mdp5_state->hwpipe && (mdp5_state->hwpipe->blkcfg != blkcfg))
+ new_hwpipe = true;
}
- if (full_modeset) {
- struct drm_crtc_state *crtc_state =
- drm_atomic_get_crtc_state(state->state, state->crtc);
- crtc_state->mode_changed = true;
- to_mdp5_plane_state(state)->mode_changed = true;
+
+ /* (re)assign hwpipe if needed, otherwise keep old one: */
+ if (new_hwpipe) {
+ /* TODO maybe we want to re-assign hwpipe sometimes
+ * in cases when we no-longer need some caps to make
+ * it available for other planes?
+ */
+ struct mdp5_hw_pipe *old_hwpipe = mdp5_state->hwpipe;
+ mdp5_state->hwpipe = mdp5_pipe_assign(state->state,
+ plane, caps, blkcfg);
+ if (IS_ERR(mdp5_state->hwpipe)) {
+ DBG("%s: failed to assign hwpipe!", plane->name);
+ return PTR_ERR(mdp5_state->hwpipe);
+ }
+ mdp5_pipe_release(state->state, old_hwpipe);
}
- } else {
- to_mdp5_plane_state(state)->mode_changed = true;
}
return 0;
@@ -351,16 +369,16 @@ static int mdp5_plane_atomic_check(struct drm_plane *plane,
static void mdp5_plane_atomic_update(struct drm_plane *plane,
struct drm_plane_state *old_state)
{
- struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane);
struct drm_plane_state *state = plane->state;
+ struct mdp5_plane_state *mdp5_state = to_mdp5_plane_state(state);
- DBG("%s: update", mdp5_plane->name);
+ DBG("%s: update", plane->name);
- if (!plane_enabled(state)) {
- to_mdp5_plane_state(state)->pending = true;
- } else if (to_mdp5_plane_state(state)->mode_changed) {
+ mdp5_state->pending = true;
+
+ if (plane_enabled(state)) {
int ret;
- to_mdp5_plane_state(state)->pending = true;
+
ret = mdp5_plane_mode_set(plane,
state->crtc, state->fb,
state->crtc_x, state->crtc_y,
@@ -369,11 +387,6 @@ static void mdp5_plane_atomic_update(struct drm_plane *plane,
state->src_w, state->src_h);
/* atomic_check should have ensured that this doesn't fail */
WARN_ON(ret < 0);
- } else {
- unsigned long flags;
- spin_lock_irqsave(&mdp5_plane->pipe_lock, flags);
- set_scanout_locked(plane, state->fb);
- spin_unlock_irqrestore(&mdp5_plane->pipe_lock, flags);
}
}
@@ -387,9 +400,9 @@ static const struct drm_plane_helper_funcs mdp5_plane_helper_funcs = {
static void set_scanout_locked(struct drm_plane *plane,
struct drm_framebuffer *fb)
{
- struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane);
struct mdp5_kms *mdp5_kms = get_kms(plane);
- enum mdp5_pipe pipe = mdp5_plane->pipe;
+ struct mdp5_hw_pipe *hwpipe = to_mdp5_plane_state(plane->state)->hwpipe;
+ enum mdp5_pipe pipe = hwpipe->pipe;
mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_STRIDE_A(pipe),
MDP5_PIPE_SRC_STRIDE_A_P0(fb->pitches[0]) |
@@ -669,18 +682,19 @@ static int mdp5_plane_mode_set(struct drm_plane *plane,
uint32_t src_x, uint32_t src_y,
uint32_t src_w, uint32_t src_h)
{
- struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane);
struct drm_plane_state *pstate = plane->state;
+ struct mdp5_hw_pipe *hwpipe = to_mdp5_plane_state(pstate)->hwpipe;
struct mdp5_kms *mdp5_kms = get_kms(plane);
- enum mdp5_pipe pipe = mdp5_plane->pipe;
+ enum mdp5_pipe pipe = hwpipe->pipe;
const struct mdp_format *format;
uint32_t nplanes, config = 0;
uint32_t phasex_step[COMP_MAX] = {0,}, phasey_step[COMP_MAX] = {0,};
- bool pe = mdp5_plane->caps & MDP_PIPE_CAP_SW_PIX_EXT;
+ bool pe = hwpipe->caps & MDP_PIPE_CAP_SW_PIX_EXT;
int pe_left[COMP_MAX], pe_right[COMP_MAX];
int pe_top[COMP_MAX], pe_bottom[COMP_MAX];
uint32_t hdecm = 0, vdecm = 0;
uint32_t pix_format;
+ unsigned int rotation;
bool vflip, hflip;
unsigned long flags;
int ret;
@@ -700,27 +714,10 @@ static int mdp5_plane_mode_set(struct drm_plane *plane,
src_w = src_w >> 16;
src_h = src_h >> 16;
- DBG("%s: FB[%u] %u,%u,%u,%u -> CRTC[%u] %d,%d,%u,%u", mdp5_plane->name,
+ DBG("%s: FB[%u] %u,%u,%u,%u -> CRTC[%u] %d,%d,%u,%u", plane->name,
fb->base.id, src_x, src_y, src_w, src_h,
crtc->base.id, crtc_x, crtc_y, crtc_w, crtc_h);
- /* Request some memory from the SMP: */
- if (mdp5_kms->smp) {
- ret = mdp5_smp_request(mdp5_kms->smp,
- mdp5_plane->pipe, format, src_w, false);
- if (ret)
- return ret;
- }
-
- /*
- * Currently we update the hw for allocations/requests immediately,
- * but once atomic modeset/pageflip is in place, the allocation
- * would move into atomic->check_plane_state(), while updating the
- * hw would remain here:
- */
- if (mdp5_kms->smp)
- mdp5_smp_configure(mdp5_kms->smp, pipe);
-
ret = calc_scalex_steps(plane, pix_format, src_w, crtc_w, phasex_step);
if (ret)
return ret;
@@ -729,7 +726,7 @@ static int mdp5_plane_mode_set(struct drm_plane *plane,
if (ret)
return ret;
- if (mdp5_plane->caps & MDP_PIPE_CAP_SW_PIX_EXT) {
+ if (hwpipe->caps & MDP_PIPE_CAP_SW_PIX_EXT) {
calc_pixel_ext(format, src_w, crtc_w, phasex_step,
pe_left, pe_right, true);
calc_pixel_ext(format, src_h, crtc_h, phasey_step,
@@ -743,14 +740,18 @@ static int mdp5_plane_mode_set(struct drm_plane *plane,
config |= get_scale_config(format, src_h, crtc_h, false);
DBG("scale config = %x", config);
- hflip = !!(pstate->rotation & DRM_REFLECT_X);
- vflip = !!(pstate->rotation & DRM_REFLECT_Y);
+ rotation = drm_rotation_simplify(pstate->rotation,
+ DRM_ROTATE_0 |
+ DRM_REFLECT_X |
+ DRM_REFLECT_Y);
+ hflip = !!(rotation & DRM_REFLECT_X);
+ vflip = !!(rotation & DRM_REFLECT_Y);
- spin_lock_irqsave(&mdp5_plane->pipe_lock, flags);
+ spin_lock_irqsave(&hwpipe->pipe_lock, flags);
mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_IMG_SIZE(pipe),
- MDP5_PIPE_SRC_IMG_SIZE_WIDTH(fb->width) |
- MDP5_PIPE_SRC_IMG_SIZE_HEIGHT(fb->height));
+ MDP5_PIPE_SRC_IMG_SIZE_WIDTH(min(fb->width, src_w)) |
+ MDP5_PIPE_SRC_IMG_SIZE_HEIGHT(min(fb->height, src_h)));
mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_SIZE(pipe),
MDP5_PIPE_SRC_SIZE_WIDTH(src_w) |
@@ -795,12 +796,12 @@ static int mdp5_plane_mode_set(struct drm_plane *plane,
/* not using secure mode: */
mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_ADDR_SW_STATUS(pipe), 0);
- if (mdp5_plane->caps & MDP_PIPE_CAP_SW_PIX_EXT)
+ if (hwpipe->caps & MDP_PIPE_CAP_SW_PIX_EXT)
mdp5_write_pixel_ext(mdp5_kms, pipe, format,
src_w, pe_left, pe_right,
src_h, pe_top, pe_bottom);
- if (mdp5_plane->caps & MDP_PIPE_CAP_SCALE) {
+ if (hwpipe->caps & MDP_PIPE_CAP_SCALE) {
mdp5_write(mdp5_kms, REG_MDP5_PIPE_SCALE_PHASE_STEP_X(pipe),
phasex_step[COMP_0]);
mdp5_write(mdp5_kms, REG_MDP5_PIPE_SCALE_PHASE_STEP_Y(pipe),
@@ -815,7 +816,7 @@ static int mdp5_plane_mode_set(struct drm_plane *plane,
mdp5_write(mdp5_kms, REG_MDP5_PIPE_SCALE_CONFIG(pipe), config);
}
- if (mdp5_plane->caps & MDP_PIPE_CAP_CSC) {
+ if (hwpipe->caps & MDP_PIPE_CAP_CSC) {
if (MDP_FORMAT_IS_YUV(format))
csc_enable(mdp5_kms, pipe,
mdp_get_default_csc_cfg(CSC_YUV2RGB));
@@ -825,56 +826,42 @@ static int mdp5_plane_mode_set(struct drm_plane *plane,
set_scanout_locked(plane, fb);
- spin_unlock_irqrestore(&mdp5_plane->pipe_lock, flags);
+ spin_unlock_irqrestore(&hwpipe->pipe_lock, flags);
return ret;
}
-void mdp5_plane_complete_flip(struct drm_plane *plane)
+enum mdp5_pipe mdp5_plane_pipe(struct drm_plane *plane)
{
- struct mdp5_kms *mdp5_kms = get_kms(plane);
- struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane);
- enum mdp5_pipe pipe = mdp5_plane->pipe;
+ struct mdp5_plane_state *pstate = to_mdp5_plane_state(plane->state);
- DBG("%s: complete flip", mdp5_plane->name);
-
- if (mdp5_kms->smp)
- mdp5_smp_commit(mdp5_kms->smp, pipe);
-
- to_mdp5_plane_state(plane->state)->pending = false;
-}
+ if (WARN_ON(!pstate->hwpipe))
+ return 0;
-enum mdp5_pipe mdp5_plane_pipe(struct drm_plane *plane)
-{
- struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane);
- return mdp5_plane->pipe;
+ return pstate->hwpipe->pipe;
}
uint32_t mdp5_plane_get_flush(struct drm_plane *plane)
{
- struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane);
+ struct mdp5_plane_state *pstate = to_mdp5_plane_state(plane->state);
- return mdp5_plane->flush_mask;
+ if (WARN_ON(!pstate->hwpipe))
+ return 0;
+
+ return pstate->hwpipe->flush_mask;
}
/* called after vsync in thread context */
void mdp5_plane_complete_commit(struct drm_plane *plane,
struct drm_plane_state *state)
{
- struct mdp5_kms *mdp5_kms = get_kms(plane);
- struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane);
- enum mdp5_pipe pipe = mdp5_plane->pipe;
+ struct mdp5_plane_state *pstate = to_mdp5_plane_state(plane->state);
- if (!plane_enabled(plane->state) && mdp5_kms->smp) {
- DBG("%s: free SMP", mdp5_plane->name);
- mdp5_smp_release(mdp5_kms->smp, pipe);
- }
+ pstate->pending = false;
}
/* initialize plane */
-struct drm_plane *mdp5_plane_init(struct drm_device *dev,
- enum mdp5_pipe pipe, bool private_plane, uint32_t reg_offset,
- uint32_t caps)
+struct drm_plane *mdp5_plane_init(struct drm_device *dev, bool primary)
{
struct drm_plane *plane = NULL;
struct mdp5_plane *mdp5_plane;
@@ -889,19 +876,10 @@ struct drm_plane *mdp5_plane_init(struct drm_device *dev,
plane = &mdp5_plane->base;
- mdp5_plane->pipe = pipe;
- mdp5_plane->name = pipe2name(pipe);
- mdp5_plane->caps = caps;
-
mdp5_plane->nformats = mdp_get_formats(mdp5_plane->formats,
- ARRAY_SIZE(mdp5_plane->formats),
- !pipe_supports_yuv(mdp5_plane->caps));
-
- mdp5_plane->flush_mask = mdp_ctl_flush_mask_pipe(pipe);
- mdp5_plane->reg_offset = reg_offset;
- spin_lock_init(&mdp5_plane->pipe_lock);
+ ARRAY_SIZE(mdp5_plane->formats), false);
- type = private_plane ? DRM_PLANE_TYPE_PRIMARY : DRM_PLANE_TYPE_OVERLAY;
+ type = primary ? DRM_PLANE_TYPE_PRIMARY : DRM_PLANE_TYPE_OVERLAY;
ret = drm_universal_plane_init(dev, plane, 0xff, &mdp5_plane_funcs,
mdp5_plane->formats, mdp5_plane->nformats,
type, NULL);
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.c
index 27d7b55b52c9..58f712d37e7f 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.c
@@ -21,72 +21,6 @@
#include "mdp5_smp.h"
-/* SMP - Shared Memory Pool
- *
- * These are shared between all the clients, where each plane in a
- * scanout buffer is a SMP client. Ie. scanout of 3 plane I420 on
- * pipe VIG0 => 3 clients: VIG0_Y, VIG0_CB, VIG0_CR.
- *
- * Based on the size of the attached scanout buffer, a certain # of
- * blocks must be allocated to that client out of the shared pool.
- *
- * In some hw, some blocks are statically allocated for certain pipes
- * and CANNOT be re-allocated (eg: MMB0 and MMB1 both tied to RGB0).
- *
- * For each block that can be dynamically allocated, it can be either
- * free:
- * The block is free.
- *
- * pending:
- * The block is allocated to some client and not free.
- *
- * configured:
- * The block is allocated to some client, and assigned to that
- * client in MDP5_SMP_ALLOC registers.
- *
- * inuse:
- * The block is being actively used by a client.
- *
- * The updates happen in the following steps:
- *
- * 1) mdp5_smp_request():
- * When plane scanout is setup, calculate required number of
- * blocks needed per client, and request. Blocks neither inuse nor
- * configured nor pending by any other client are added to client's
- * pending set.
- * For shrinking, blocks in pending but not in configured can be freed
- * directly, but those already in configured will be freed later by
- * mdp5_smp_commit.
- *
- * 2) mdp5_smp_configure():
- * As hw is programmed, before FLUSH, MDP5_SMP_ALLOC registers
- * are configured for the union(pending, inuse)
- * Current pending is copied to configured.
- * It is assumed that mdp5_smp_request and mdp5_smp_configure not run
- * concurrently for the same pipe.
- *
- * 3) mdp5_smp_commit():
- * After next vblank, copy configured -> inuse. Optionally update
- * MDP5_SMP_ALLOC registers if there are newly unused blocks
- *
- * 4) mdp5_smp_release():
- * Must be called after the pipe is disabled and no longer uses any SMB
- *
- * On the next vblank after changes have been committed to hw, the
- * client's pending blocks become it's in-use blocks (and no-longer
- * in-use blocks become available to other clients).
- *
- * btw, hurray for confusing overloaded acronyms! :-/
- *
- * NOTE: for atomic modeset/pageflip NONBLOCK operations, step #1
- * should happen at (or before)? atomic->check(). And we'd need
- * an API to discard previous requests if update is aborted or
- * (test-only).
- *
- * TODO would perhaps be nice to have debugfs to dump out kernel
- * inuse and pending state of all clients..
- */
-
struct mdp5_smp {
struct drm_device *dev;
@@ -94,16 +28,8 @@ struct mdp5_smp {
int blk_cnt;
int blk_size;
-
- spinlock_t state_lock;
- mdp5_smp_state_t state; /* to track smp allocation amongst pipes: */
-
- struct mdp5_client_smp_state client_state[MAX_CLIENTS];
};
-static void update_smp_state(struct mdp5_smp *smp,
- u32 cid, mdp5_smp_state_t *assigned);
-
static inline
struct mdp5_kms *get_kms(struct mdp5_smp *smp)
{
@@ -134,57 +60,38 @@ static inline u32 pipe2client(enum mdp5_pipe pipe, int plane)
return mdp5_cfg->smp.clients[pipe] + plane;
}
-/* step #1: update # of blocks pending for the client: */
+/* allocate blocks for the specified request: */
static int smp_request_block(struct mdp5_smp *smp,
+ struct mdp5_smp_state *state,
u32 cid, int nblks)
{
- struct mdp5_kms *mdp5_kms = get_kms(smp);
- struct mdp5_client_smp_state *ps = &smp->client_state[cid];
- int i, ret, avail, cur_nblks, cnt = smp->blk_cnt;
+ void *cs = state->client_state[cid];
+ int i, avail, cnt = smp->blk_cnt;
uint8_t reserved;
- unsigned long flags;
- reserved = smp->reserved[cid];
+ /* we shouldn't be requesting blocks for an in-use client: */
+ WARN_ON(bitmap_weight(cs, cnt) > 0);
- spin_lock_irqsave(&smp->state_lock, flags);
+ reserved = smp->reserved[cid];
if (reserved) {
nblks = max(0, nblks - reserved);
DBG("%d MMBs allocated (%d reserved)", nblks, reserved);
}
- avail = cnt - bitmap_weight(smp->state, cnt);
+ avail = cnt - bitmap_weight(state->state, cnt);
if (nblks > avail) {
- dev_err(mdp5_kms->dev->dev, "out of blks (req=%d > avail=%d)\n",
+ dev_err(smp->dev->dev, "out of blks (req=%d > avail=%d)\n",
nblks, avail);
- ret = -ENOSPC;
- goto fail;
+ return -ENOSPC;
}
- cur_nblks = bitmap_weight(ps->pending, cnt);
- if (nblks > cur_nblks) {
- /* grow the existing pending reservation: */
- for (i = cur_nblks; i < nblks; i++) {
- int blk = find_first_zero_bit(smp->state, cnt);
- set_bit(blk, ps->pending);
- set_bit(blk, smp->state);
- }
- } else {
- /* shrink the existing pending reservation: */
- for (i = cur_nblks; i > nblks; i--) {
- int blk = find_first_bit(ps->pending, cnt);
- clear_bit(blk, ps->pending);
-
- /* clear in global smp_state if not in configured
- * otherwise until _commit()
- */
- if (!test_bit(blk, ps->configured))
- clear_bit(blk, smp->state);
- }
+ for (i = 0; i < nblks; i++) {
+ int blk = find_first_zero_bit(state->state, cnt);
+ set_bit(blk, cs);
+ set_bit(blk, state->state);
}
-fail:
- spin_unlock_irqrestore(&smp->state_lock, flags);
return 0;
}
@@ -209,14 +116,15 @@ static void set_fifo_thresholds(struct mdp5_smp *smp,
* decimated width. Ie. SMP buffering sits downstream of decimation (which
* presumably happens during the dma from scanout buffer).
*/
-int mdp5_smp_request(struct mdp5_smp *smp, enum mdp5_pipe pipe,
- const struct mdp_format *format, u32 width, bool hdecim)
+uint32_t mdp5_smp_calculate(struct mdp5_smp *smp,
+ const struct mdp_format *format,
+ u32 width, bool hdecim)
{
struct mdp5_kms *mdp5_kms = get_kms(smp);
- struct drm_device *dev = mdp5_kms->dev;
int rev = mdp5_cfg_get_hw_rev(mdp5_kms->cfg);
- int i, hsub, nplanes, nlines, nblks, ret;
+ int i, hsub, nplanes, nlines;
u32 fmt = format->base.pixel_format;
+ uint32_t blkcfg = 0;
nplanes = drm_format_num_planes(fmt);
hsub = drm_format_horz_chroma_subsampling(fmt);
@@ -239,7 +147,7 @@ int mdp5_smp_request(struct mdp5_smp *smp, enum mdp5_pipe pipe,
hsub = 1;
}
- for (i = 0, nblks = 0; i < nplanes; i++) {
+ for (i = 0; i < nplanes; i++) {
int n, fetch_stride, cpp;
cpp = drm_format_plane_cpp(fmt, i);
@@ -251,60 +159,72 @@ int mdp5_smp_request(struct mdp5_smp *smp, enum mdp5_pipe pipe,
if (rev == 0)
n = roundup_pow_of_two(n);
+ blkcfg |= (n << (8 * i));
+ }
+
+ return blkcfg;
+}
+
+int mdp5_smp_assign(struct mdp5_smp *smp, struct mdp5_smp_state *state,
+ enum mdp5_pipe pipe, uint32_t blkcfg)
+{
+ struct mdp5_kms *mdp5_kms = get_kms(smp);
+ struct drm_device *dev = mdp5_kms->dev;
+ int i, ret;
+
+ for (i = 0; i < pipe2nclients(pipe); i++) {
+ u32 cid = pipe2client(pipe, i);
+ int n = blkcfg & 0xff;
+
+ if (!n)
+ continue;
+
DBG("%s[%d]: request %d SMP blocks", pipe2name(pipe), i, n);
- ret = smp_request_block(smp, pipe2client(pipe, i), n);
+ ret = smp_request_block(smp, state, cid, n);
if (ret) {
dev_err(dev->dev, "Cannot allocate %d SMP blocks: %d\n",
n, ret);
return ret;
}
- nblks += n;
+ blkcfg >>= 8;
}
- set_fifo_thresholds(smp, pipe, nblks);
+ state->assigned |= (1 << pipe);
return 0;
}
/* Release SMP blocks for all clients of the pipe */
-void mdp5_smp_release(struct mdp5_smp *smp, enum mdp5_pipe pipe)
+void mdp5_smp_release(struct mdp5_smp *smp, struct mdp5_smp_state *state,
+ enum mdp5_pipe pipe)
{
int i;
- unsigned long flags;
int cnt = smp->blk_cnt;
for (i = 0; i < pipe2nclients(pipe); i++) {
- mdp5_smp_state_t assigned;
u32 cid = pipe2client(pipe, i);
- struct mdp5_client_smp_state *ps = &smp->client_state[cid];
-
- spin_lock_irqsave(&smp->state_lock, flags);
-
- /* clear hw assignment */
- bitmap_or(assigned, ps->inuse, ps->configured, cnt);
- update_smp_state(smp, CID_UNUSED, &assigned);
-
- /* free to global pool */
- bitmap_andnot(smp->state, smp->state, ps->pending, cnt);
- bitmap_andnot(smp->state, smp->state, assigned, cnt);
+ void *cs = state->client_state[cid];
- /* clear client's infor */
- bitmap_zero(ps->pending, cnt);
- bitmap_zero(ps->configured, cnt);
- bitmap_zero(ps->inuse, cnt);
+ /* update global state: */
+ bitmap_andnot(state->state, state->state, cs, cnt);
- spin_unlock_irqrestore(&smp->state_lock, flags);
+ /* clear client's state */
+ bitmap_zero(cs, cnt);
}
- set_fifo_thresholds(smp, pipe, 0);
+ state->released |= (1 << pipe);
}
-static void update_smp_state(struct mdp5_smp *smp,
+/* NOTE: SMP_ALLOC_* regs are *not* double buffered, so release has to
+ * happen after scanout completes.
+ */
+static unsigned update_smp_state(struct mdp5_smp *smp,
u32 cid, mdp5_smp_state_t *assigned)
{
struct mdp5_kms *mdp5_kms = get_kms(smp);
int cnt = smp->blk_cnt;
+ unsigned nblks = 0;
u32 blk, val;
for_each_set_bit(blk, *assigned, cnt) {
@@ -330,62 +250,88 @@ static void update_smp_state(struct mdp5_smp *smp,
mdp5_write(mdp5_kms, REG_MDP5_SMP_ALLOC_W_REG(idx), val);
mdp5_write(mdp5_kms, REG_MDP5_SMP_ALLOC_R_REG(idx), val);
+
+ nblks++;
}
+
+ return nblks;
}
-/* step #2: configure hw for union(pending, inuse): */
-void mdp5_smp_configure(struct mdp5_smp *smp, enum mdp5_pipe pipe)
+void mdp5_smp_prepare_commit(struct mdp5_smp *smp, struct mdp5_smp_state *state)
{
- int cnt = smp->blk_cnt;
- mdp5_smp_state_t assigned;
- int i;
+ enum mdp5_pipe pipe;
- for (i = 0; i < pipe2nclients(pipe); i++) {
- u32 cid = pipe2client(pipe, i);
- struct mdp5_client_smp_state *ps = &smp->client_state[cid];
+ for_each_set_bit(pipe, &state->assigned, sizeof(state->assigned) * 8) {
+ unsigned i, nblks = 0;
- /*
- * if vblank has not happened since last smp_configure
- * skip the configure for now
- */
- if (!bitmap_equal(ps->inuse, ps->configured, cnt))
- continue;
+ for (i = 0; i < pipe2nclients(pipe); i++) {
+ u32 cid = pipe2client(pipe, i);
+ void *cs = state->client_state[cid];
- bitmap_copy(ps->configured, ps->pending, cnt);
- bitmap_or(assigned, ps->inuse, ps->configured, cnt);
- update_smp_state(smp, cid, &assigned);
+ nblks += update_smp_state(smp, cid, cs);
+
+ DBG("assign %s:%u, %u blks",
+ pipe2name(pipe), i, nblks);
+ }
+
+ set_fifo_thresholds(smp, pipe, nblks);
}
+
+ state->assigned = 0;
}
-/* step #3: after vblank, copy configured -> inuse: */
-void mdp5_smp_commit(struct mdp5_smp *smp, enum mdp5_pipe pipe)
+void mdp5_smp_complete_commit(struct mdp5_smp *smp, struct mdp5_smp_state *state)
{
- int cnt = smp->blk_cnt;
- mdp5_smp_state_t released;
- int i;
-
- for (i = 0; i < pipe2nclients(pipe); i++) {
- u32 cid = pipe2client(pipe, i);
- struct mdp5_client_smp_state *ps = &smp->client_state[cid];
+ enum mdp5_pipe pipe;
- /*
- * Figure out if there are any blocks we where previously
- * using, which can be released and made available to other
- * clients:
- */
- if (bitmap_andnot(released, ps->inuse, ps->configured, cnt)) {
- unsigned long flags;
+ for_each_set_bit(pipe, &state->released, sizeof(state->released) * 8) {
+ DBG("release %s", pipe2name(pipe));
+ set_fifo_thresholds(smp, pipe, 0);
+ }
- spin_lock_irqsave(&smp->state_lock, flags);
- /* clear released blocks: */
- bitmap_andnot(smp->state, smp->state, released, cnt);
- spin_unlock_irqrestore(&smp->state_lock, flags);
+ state->released = 0;
+}
- update_smp_state(smp, CID_UNUSED, &released);
+void mdp5_smp_dump(struct mdp5_smp *smp, struct drm_printer *p)
+{
+ struct mdp5_kms *mdp5_kms = get_kms(smp);
+ struct mdp5_hw_pipe_state *hwpstate;
+ struct mdp5_smp_state *state;
+ int total = 0, i, j;
+
+ drm_printf(p, "name\tinuse\tplane\n");
+ drm_printf(p, "----\t-----\t-----\n");
+
+ if (drm_can_sleep())
+ drm_modeset_lock(&mdp5_kms->state_lock, NULL);
+
+ /* grab these *after* we hold the state_lock */
+ hwpstate = &mdp5_kms->state->hwpipe;
+ state = &mdp5_kms->state->smp;
+
+ for (i = 0; i < mdp5_kms->num_hwpipes; i++) {
+ struct mdp5_hw_pipe *hwpipe = mdp5_kms->hwpipes[i];
+ struct drm_plane *plane = hwpstate->hwpipe_to_plane[hwpipe->idx];
+ enum mdp5_pipe pipe = hwpipe->pipe;
+ for (j = 0; j < pipe2nclients(pipe); j++) {
+ u32 cid = pipe2client(pipe, j);
+ void *cs = state->client_state[cid];
+ int inuse = bitmap_weight(cs, smp->blk_cnt);
+
+ drm_printf(p, "%s:%d\t%d\t%s\n",
+ pipe2name(pipe), j, inuse,
+ plane ? plane->name : NULL);
+
+ total += inuse;
}
-
- bitmap_copy(ps->inuse, ps->configured, cnt);
}
+
+ drm_printf(p, "TOTAL:\t%d\t(of %d)\n", total, smp->blk_cnt);
+ drm_printf(p, "AVAIL:\t%d\n", smp->blk_cnt -
+ bitmap_weight(state->state, smp->blk_cnt));
+
+ if (drm_can_sleep())
+ drm_modeset_unlock(&mdp5_kms->state_lock);
}
void mdp5_smp_destroy(struct mdp5_smp *smp)
@@ -393,8 +339,9 @@ void mdp5_smp_destroy(struct mdp5_smp *smp)
kfree(smp);
}
-struct mdp5_smp *mdp5_smp_init(struct drm_device *dev, const struct mdp5_smp_block *cfg)
+struct mdp5_smp *mdp5_smp_init(struct mdp5_kms *mdp5_kms, const struct mdp5_smp_block *cfg)
{
+ struct mdp5_smp_state *state = &mdp5_kms->state->smp;
struct mdp5_smp *smp = NULL;
int ret;
@@ -404,14 +351,13 @@ struct mdp5_smp *mdp5_smp_init(struct drm_device *dev, const struct mdp5_smp_blo
goto fail;
}
- smp->dev = dev;
+ smp->dev = mdp5_kms->dev;
smp->blk_cnt = cfg->mmb_count;
smp->blk_size = cfg->mmb_size;
/* statically tied MMBs cannot be re-allocated: */
- bitmap_copy(smp->state, cfg->reserved_state, smp->blk_cnt);
+ bitmap_copy(state->state, cfg->reserved_state, smp->blk_cnt);
memcpy(smp->reserved, cfg->reserved, sizeof(smp->reserved));
- spin_lock_init(&smp->state_lock);
return smp;
fail:
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.h b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.h
index 20b87e800ea3..b41d0448fbe8 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.h
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.h
@@ -19,12 +19,53 @@
#ifndef __MDP5_SMP_H__
#define __MDP5_SMP_H__
+#include <drm/drm_print.h>
+
#include "msm_drv.h"
-struct mdp5_client_smp_state {
- mdp5_smp_state_t inuse;
- mdp5_smp_state_t configured;
- mdp5_smp_state_t pending;
+/*
+ * SMP - Shared Memory Pool:
+ *
+ * SMP blocks are shared between all the clients, where each plane in
+ * a scanout buffer is a SMP client. Ie. scanout of 3 plane I420 on
+ * pipe VIG0 => 3 clients: VIG0_Y, VIG0_CB, VIG0_CR.
+ *
+ * Based on the size of the attached scanout buffer, a certain # of
+ * blocks must be allocated to that client out of the shared pool.
+ *
+ * In some hw, some blocks are statically allocated for certain pipes
+ * and CANNOT be re-allocated (eg: MMB0 and MMB1 both tied to RGB0).
+ *
+ *
+ * Atomic SMP State:
+ *
+ * On atomic updates that modify SMP configuration, the state is cloned
+ * (copied) and modified. For test-only, or in cases where atomic
+ * update fails (or if we hit ww_mutex deadlock/backoff condition) the
+ * new state is simply thrown away.
+ *
+ * Because the SMP registers are not double buffered, updates are a
+ * two step process:
+ *
+ * 1) in _prepare_commit() we configure things (via read-modify-write)
+ * for the newly assigned pipes, so we don't take away blocks
+ * assigned to pipes that are still scanning out
+ * 2) in _complete_commit(), after vblank/etc, we clear things for the
+ * released clients, since at that point old pipes are no longer
+ * scanning out.
+ */
+struct mdp5_smp_state {
+ /* global state of what blocks are in use: */
+ mdp5_smp_state_t state;
+
+ /* per client state of what blocks they are using: */
+ mdp5_smp_state_t client_state[MAX_CLIENTS];
+
+ /* assigned pipes (hw updated at _prepare_commit()): */
+ unsigned long assigned;
+
+ /* released pipes (hw updated at _complete_commit()): */
+ unsigned long released;
};
struct mdp5_kms;
@@ -36,13 +77,22 @@ struct mdp5_smp;
* which is then used to call the other mdp5_smp_*(handler, ...) functions.
*/
-struct mdp5_smp *mdp5_smp_init(struct drm_device *dev, const struct mdp5_smp_block *cfg);
+struct mdp5_smp *mdp5_smp_init(struct mdp5_kms *mdp5_kms,
+ const struct mdp5_smp_block *cfg);
void mdp5_smp_destroy(struct mdp5_smp *smp);
-int mdp5_smp_request(struct mdp5_smp *smp, enum mdp5_pipe pipe,
- const struct mdp_format *format, u32 width, bool hdecim);
-void mdp5_smp_configure(struct mdp5_smp *smp, enum mdp5_pipe pipe);
-void mdp5_smp_commit(struct mdp5_smp *smp, enum mdp5_pipe pipe);
-void mdp5_smp_release(struct mdp5_smp *smp, enum mdp5_pipe pipe);
+void mdp5_smp_dump(struct mdp5_smp *smp, struct drm_printer *p);
+
+uint32_t mdp5_smp_calculate(struct mdp5_smp *smp,
+ const struct mdp_format *format,
+ u32 width, bool hdecim);
+
+int mdp5_smp_assign(struct mdp5_smp *smp, struct mdp5_smp_state *state,
+ enum mdp5_pipe pipe, uint32_t blkcfg);
+void mdp5_smp_release(struct mdp5_smp *smp, struct mdp5_smp_state *state,
+ enum mdp5_pipe pipe);
+
+void mdp5_smp_prepare_commit(struct mdp5_smp *smp, struct mdp5_smp_state *state);
+void mdp5_smp_complete_commit(struct mdp5_smp *smp, struct mdp5_smp_state *state);
#endif /* __MDP5_SMP_H__ */
diff --git a/drivers/gpu/drm/msm/mdp/mdp_common.xml.h b/drivers/gpu/drm/msm/mdp/mdp_common.xml.h
index 452e3518f98b..8994c365e218 100644
--- a/drivers/gpu/drm/msm/mdp/mdp_common.xml.h
+++ b/drivers/gpu/drm/msm/mdp/mdp_common.xml.h
@@ -12,7 +12,7 @@ The rules-ng-ng source files this header was generated from are:
- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2016-02-10 17:07:21)
- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20915 bytes, from 2015-05-20 20:03:14)
- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 2849 bytes, from 2015-09-18 12:07:28)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 37194 bytes, from 2015-09-18 12:07:28)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 36965 bytes, from 2016-11-26 23:01:08)
- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 27887 bytes, from 2015-10-22 16:34:52)
- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 602 bytes, from 2015-10-22 16:35:02)
- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2015-05-20 20:03:14)
diff --git a/drivers/gpu/drm/msm/msm_atomic.c b/drivers/gpu/drm/msm/msm_atomic.c
index 73bae382eac3..30b5d23e53b4 100644
--- a/drivers/gpu/drm/msm/msm_atomic.c
+++ b/drivers/gpu/drm/msm/msm_atomic.c
@@ -141,7 +141,7 @@ static void complete_commit(struct msm_commit *c, bool async)
kms->funcs->complete_commit(kms, state);
- drm_atomic_state_free(state);
+ drm_atomic_state_put(state);
commit_destroy(c);
}
@@ -217,8 +217,9 @@ int msm_atomic_commit(struct drm_device *dev,
if ((plane->state->fb != plane_state->fb) && plane_state->fb) {
struct drm_gem_object *obj = msm_framebuffer_bo(plane_state->fb, 0);
struct msm_gem_object *msm_obj = to_msm_bo(obj);
+ struct dma_fence *fence = reservation_object_get_excl_rcu(msm_obj->resv);
- plane_state->fence = reservation_object_get_excl_rcu(msm_obj->resv);
+ drm_atomic_set_fence_for_plane(plane_state, fence);
}
}
@@ -240,6 +241,10 @@ int msm_atomic_commit(struct drm_device *dev,
drm_atomic_helper_swap_state(state, true);
+ /* swap driver private state while still holding state_lock */
+ if (to_kms_state(state)->state)
+ priv->kms->funcs->swap_state(priv->kms, state);
+
/*
* Everything below can be run asynchronously without the need to grab
* any modeset locks at all under one conditions: It must be guaranteed
@@ -256,6 +261,7 @@ int msm_atomic_commit(struct drm_device *dev,
* current layout.
*/
+ drm_atomic_state_get(state);
if (nonblock) {
queue_work(priv->atomic_wq, &c->work);
return 0;
@@ -269,3 +275,30 @@ error:
drm_atomic_helper_cleanup_planes(dev, state);
return ret;
}
+
+struct drm_atomic_state *msm_atomic_state_alloc(struct drm_device *dev)
+{
+ struct msm_kms_state *state = kzalloc(sizeof(*state), GFP_KERNEL);
+
+ if (!state || drm_atomic_state_init(dev, &state->base) < 0) {
+ kfree(state);
+ return NULL;
+ }
+
+ return &state->base;
+}
+
+void msm_atomic_state_clear(struct drm_atomic_state *s)
+{
+ struct msm_kms_state *state = to_kms_state(s);
+ drm_atomic_state_default_clear(&state->base);
+ kfree(state->state);
+ state->state = NULL;
+}
+
+void msm_atomic_state_free(struct drm_atomic_state *state)
+{
+ kfree(to_kms_state(state)->state);
+ drm_atomic_state_default_release(state);
+ kfree(state);
+}
diff --git a/drivers/gpu/drm/msm/msm_debugfs.c b/drivers/gpu/drm/msm/msm_debugfs.c
index 663f2b6ef091..c1b40f5adb60 100644
--- a/drivers/gpu/drm/msm/msm_debugfs.c
+++ b/drivers/gpu/drm/msm/msm_debugfs.c
@@ -18,6 +18,8 @@
#ifdef CONFIG_DEBUG_FS
#include "msm_drv.h"
#include "msm_gpu.h"
+#include "msm_kms.h"
+#include "msm_debugfs.h"
static int msm_gpu_show(struct drm_device *dev, struct seq_file *m)
{
@@ -141,6 +143,7 @@ int msm_debugfs_late_init(struct drm_device *dev)
int msm_debugfs_init(struct drm_minor *minor)
{
struct drm_device *dev = minor->dev;
+ struct msm_drm_private *priv = dev->dev_private;
int ret;
ret = drm_debugfs_create_files(msm_debugfs_list,
@@ -152,15 +155,25 @@ int msm_debugfs_init(struct drm_minor *minor)
return ret;
}
- return 0;
+ if (priv->kms->funcs->debugfs_init)
+ ret = priv->kms->funcs->debugfs_init(priv->kms, minor);
+
+ return ret;
}
void msm_debugfs_cleanup(struct drm_minor *minor)
{
+ struct drm_device *dev = minor->dev;
+ struct msm_drm_private *priv = dev->dev_private;
+
drm_debugfs_remove_files(msm_debugfs_list,
ARRAY_SIZE(msm_debugfs_list), minor);
- if (!minor->dev->dev_private)
+ if (!priv)
return;
+
+ if (priv->kms->funcs->debugfs_cleanup)
+ priv->kms->funcs->debugfs_cleanup(priv->kms, minor);
+
msm_rd_debugfs_cleanup(minor);
msm_perf_debugfs_cleanup(minor);
}
diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
index fb5c0b0a7594..e29bb66f55b1 100644
--- a/drivers/gpu/drm/msm/msm_drv.c
+++ b/drivers/gpu/drm/msm/msm_drv.c
@@ -15,6 +15,8 @@
* this program. If not, see <http://www.gnu.org/licenses/>.
*/
+#include <drm/drm_of.h>
+
#include "msm_drv.h"
#include "msm_debugfs.h"
#include "msm_fence.h"
@@ -44,17 +46,21 @@ static const struct drm_mode_config_funcs mode_config_funcs = {
.output_poll_changed = msm_fb_output_poll_changed,
.atomic_check = msm_atomic_check,
.atomic_commit = msm_atomic_commit,
+ .atomic_state_alloc = msm_atomic_state_alloc,
+ .atomic_state_clear = msm_atomic_state_clear,
+ .atomic_state_free = msm_atomic_state_free,
};
-int msm_register_mmu(struct drm_device *dev, struct msm_mmu *mmu)
+int msm_register_address_space(struct drm_device *dev,
+ struct msm_gem_address_space *aspace)
{
struct msm_drm_private *priv = dev->dev_private;
- int idx = priv->num_mmus++;
+ int idx = priv->num_aspaces++;
- if (WARN_ON(idx >= ARRAY_SIZE(priv->mmus)))
+ if (WARN_ON(idx >= ARRAY_SIZE(priv->aspace)))
return -EINVAL;
- priv->mmus[idx] = mmu;
+ priv->aspace[idx] = aspace;
return idx;
}
@@ -77,6 +83,10 @@ static char *vram = "16m";
MODULE_PARM_DESC(vram, "Configure VRAM size (for devices without IOMMU/GPUMMU)");
module_param(vram, charp, 0);
+bool dumpstate = false;
+MODULE_PARM_DESC(dumpstate, "Dump KMS state on errors");
+module_param(dumpstate, bool, 0600);
+
/*
* Util/helpers:
*/
@@ -228,7 +238,7 @@ static int msm_drm_uninit(struct device *dev)
flush_workqueue(priv->atomic_wq);
destroy_workqueue(priv->atomic_wq);
- if (kms)
+ if (kms && kms->funcs)
kms->funcs->destroy(kms);
if (gpu) {
@@ -766,9 +776,7 @@ static const struct file_operations fops = {
.open = drm_open,
.release = drm_release,
.unlocked_ioctl = drm_ioctl,
-#ifdef CONFIG_COMPAT
.compat_ioctl = drm_compat_ioctl,
-#endif
.poll = drm_poll,
.read = drm_read,
.llseek = no_llseek,
@@ -903,10 +911,8 @@ static int add_components_mdp(struct device *mdp_dev,
* remote-endpoint isn't a component that we need to add
*/
if (of_device_is_compatible(np, "qcom,mdp4") &&
- ep.port == 0) {
- of_node_put(ep_node);
+ ep.port == 0)
continue;
- }
/*
* It's okay if some of the ports don't have a remote endpoint
@@ -914,15 +920,12 @@ static int add_components_mdp(struct device *mdp_dev,
* any external interface.
*/
intf = of_graph_get_remote_port_parent(ep_node);
- if (!intf) {
- of_node_put(ep_node);
+ if (!intf)
continue;
- }
-
- component_match_add(master_dev, matchptr, compare_of, intf);
+ drm_of_component_match_add(master_dev, matchptr, compare_of,
+ intf);
of_node_put(intf);
- of_node_put(ep_node);
}
return 0;
@@ -962,8 +965,8 @@ static int add_display_components(struct device *dev,
put_device(mdp_dev);
/* add the MDP component itself */
- component_match_add(dev, matchptr, compare_of,
- mdp_dev->of_node);
+ drm_of_component_match_add(dev, matchptr, compare_of,
+ mdp_dev->of_node);
} else {
/* MDP4 */
mdp_dev = dev;
@@ -996,7 +999,7 @@ static int add_gpu_components(struct device *dev,
if (!np)
return 0;
- component_match_add(dev, matchptr, compare_of, np);
+ drm_of_component_match_add(dev, matchptr, compare_of, np);
of_node_put(np);
@@ -1035,7 +1038,13 @@ static int msm_pdev_probe(struct platform_device *pdev)
if (ret)
return ret;
- pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
+ /* on all devices that I am aware of, iommu's which can map
+ * any address the cpu can see are used:
+ */
+ ret = dma_set_mask_and_coherent(&pdev->dev, ~0);
+ if (ret)
+ return ret;
+
return component_master_add_with_match(&pdev->dev, &msm_drm_ops, match);
}
diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h
index d0da52f2a806..ed4dad3ca133 100644
--- a/drivers/gpu/drm/msm/msm_drv.h
+++ b/drivers/gpu/drm/msm/msm_drv.h
@@ -52,6 +52,8 @@ struct msm_perf_state;
struct msm_gem_submit;
struct msm_fence_context;
struct msm_fence_cb;
+struct msm_gem_address_space;
+struct msm_gem_vma;
#define NUM_DOMAINS 2 /* one for KMS, then one per gpu core (?) */
@@ -121,12 +123,16 @@ struct msm_drm_private {
uint32_t pending_crtcs;
wait_queue_head_t pending_crtcs_event;
- /* registered MMUs: */
- unsigned int num_mmus;
- struct msm_mmu *mmus[NUM_DOMAINS];
+ /* Registered address spaces.. currently this is fixed per # of
+ * iommu's. Ie. one for display block and one for gpu block.
+ * Eventually, to do per-process gpu pagetables, we'll want one
+ * of these per-process.
+ */
+ unsigned int num_aspaces;
+ struct msm_gem_address_space *aspace[NUM_DOMAINS];
unsigned int num_planes;
- struct drm_plane *planes[8];
+ struct drm_plane *planes[16];
unsigned int num_crtcs;
struct drm_crtc *crtcs[8];
@@ -173,8 +179,22 @@ int msm_atomic_check(struct drm_device *dev,
struct drm_atomic_state *state);
int msm_atomic_commit(struct drm_device *dev,
struct drm_atomic_state *state, bool nonblock);
+struct drm_atomic_state *msm_atomic_state_alloc(struct drm_device *dev);
+void msm_atomic_state_clear(struct drm_atomic_state *state);
+void msm_atomic_state_free(struct drm_atomic_state *state);
+
+int msm_register_address_space(struct drm_device *dev,
+ struct msm_gem_address_space *aspace);
+
+void msm_gem_unmap_vma(struct msm_gem_address_space *aspace,
+ struct msm_gem_vma *vma, struct sg_table *sgt);
+int msm_gem_map_vma(struct msm_gem_address_space *aspace,
+ struct msm_gem_vma *vma, struct sg_table *sgt, int npages);
-int msm_register_mmu(struct drm_device *dev, struct msm_mmu *mmu);
+void msm_gem_address_space_destroy(struct msm_gem_address_space *aspace);
+struct msm_gem_address_space *
+msm_gem_address_space_create(struct device *dev, struct iommu_domain *domain,
+ const char *name);
void msm_gem_submit_free(struct msm_gem_submit *submit);
int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
@@ -189,9 +209,9 @@ int msm_gem_mmap(struct file *filp, struct vm_area_struct *vma);
int msm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
uint64_t msm_gem_mmap_offset(struct drm_gem_object *obj);
int msm_gem_get_iova_locked(struct drm_gem_object *obj, int id,
- uint32_t *iova);
-int msm_gem_get_iova(struct drm_gem_object *obj, int id, uint32_t *iova);
-uint32_t msm_gem_iova(struct drm_gem_object *obj, int id);
+ uint64_t *iova);
+int msm_gem_get_iova(struct drm_gem_object *obj, int id, uint64_t *iova);
+uint64_t msm_gem_iova(struct drm_gem_object *obj, int id);
struct page **msm_gem_get_pages(struct drm_gem_object *obj);
void msm_gem_put_pages(struct drm_gem_object *obj);
void msm_gem_put_iova(struct drm_gem_object *obj, int id);
@@ -217,7 +237,7 @@ void msm_gem_vunmap(struct drm_gem_object *obj);
int msm_gem_sync_object(struct drm_gem_object *obj,
struct msm_fence_context *fctx, bool exclusive);
void msm_gem_move_to_active(struct drm_gem_object *obj,
- struct msm_gpu *gpu, bool exclusive, struct fence *fence);
+ struct msm_gpu *gpu, bool exclusive, struct dma_fence *fence);
void msm_gem_move_to_inactive(struct drm_gem_object *obj);
int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op, ktime_t *timeout);
int msm_gem_cpu_fini(struct drm_gem_object *obj);
@@ -303,8 +323,8 @@ void __iomem *msm_ioremap(struct platform_device *pdev, const char *name,
void msm_writel(u32 data, void __iomem *addr);
u32 msm_readl(const void __iomem *addr);
-#define DBG(fmt, ...) DRM_DEBUG(fmt"\n", ##__VA_ARGS__)
-#define VERB(fmt, ...) if (0) DRM_DEBUG(fmt"\n", ##__VA_ARGS__)
+#define DBG(fmt, ...) DRM_DEBUG_DRIVER(fmt"\n", ##__VA_ARGS__)
+#define VERB(fmt, ...) if (0) DRM_DEBUG_DRIVER(fmt"\n", ##__VA_ARGS__)
static inline int align_pitch(int width, int bpp)
{
diff --git a/drivers/gpu/drm/msm/msm_fb.c b/drivers/gpu/drm/msm/msm_fb.c
index 95cf8fe72ee5..9acf544e7a8f 100644
--- a/drivers/gpu/drm/msm/msm_fb.c
+++ b/drivers/gpu/drm/msm/msm_fb.c
@@ -88,11 +88,11 @@ int msm_framebuffer_prepare(struct drm_framebuffer *fb, int id)
{
struct msm_framebuffer *msm_fb = to_msm_framebuffer(fb);
int ret, i, n = drm_format_num_planes(fb->pixel_format);
- uint32_t iova;
+ uint64_t iova;
for (i = 0; i < n; i++) {
ret = msm_gem_get_iova(msm_fb->planes[i], id, &iova);
- DBG("FB[%u]: iova[%d]: %08x (%d)", fb->base.id, i, iova, ret);
+ DBG("FB[%u]: iova[%d]: %08llx (%d)", fb->base.id, i, iova, ret);
if (ret)
return ret;
}
diff --git a/drivers/gpu/drm/msm/msm_fbdev.c b/drivers/gpu/drm/msm/msm_fbdev.c
index ffd4a338ca12..bffe93498512 100644
--- a/drivers/gpu/drm/msm/msm_fbdev.c
+++ b/drivers/gpu/drm/msm/msm_fbdev.c
@@ -39,6 +39,7 @@ struct msm_fbdev {
static struct fb_ops msm_fb_ops = {
.owner = THIS_MODULE,
+ DRM_FB_HELPER_DEFAULT_OPS,
/* Note: to properly handle manual update displays, we wrap the
* basic fbdev ops which write to the framebuffer
@@ -49,12 +50,6 @@ static struct fb_ops msm_fb_ops = {
.fb_copyarea = drm_fb_helper_sys_copyarea,
.fb_imageblit = drm_fb_helper_sys_imageblit,
.fb_mmap = msm_fbdev_mmap,
-
- .fb_check_var = drm_fb_helper_check_var,
- .fb_set_par = drm_fb_helper_set_par,
- .fb_pan_display = drm_fb_helper_pan_display,
- .fb_blank = drm_fb_helper_blank,
- .fb_setcmap = drm_fb_helper_setcmap,
};
static int msm_fbdev_mmap(struct fb_info *info, struct vm_area_struct *vma)
@@ -81,7 +76,7 @@ static int msm_fbdev_create(struct drm_fb_helper *helper,
struct drm_framebuffer *fb = NULL;
struct fb_info *fbi = NULL;
struct drm_mode_fb_cmd2 mode_cmd = {0};
- uint32_t paddr;
+ uint64_t paddr;
int ret, size;
DBG("create fbdev: %dx%d@%d (%dx%d)", sizes->surface_width,
diff --git a/drivers/gpu/drm/msm/msm_fence.c b/drivers/gpu/drm/msm/msm_fence.c
index a9b9b1c95a2e..3f299c537b77 100644
--- a/drivers/gpu/drm/msm/msm_fence.c
+++ b/drivers/gpu/drm/msm/msm_fence.c
@@ -15,7 +15,7 @@
* this program. If not, see <http://www.gnu.org/licenses/>.
*/
-#include <linux/fence.h>
+#include <linux/dma-fence.h>
#include "msm_drv.h"
#include "msm_fence.h"
@@ -32,7 +32,7 @@ msm_fence_context_alloc(struct drm_device *dev, const char *name)
fctx->dev = dev;
fctx->name = name;
- fctx->context = fence_context_alloc(1);
+ fctx->context = dma_fence_context_alloc(1);
init_waitqueue_head(&fctx->event);
spin_lock_init(&fctx->spinlock);
@@ -100,52 +100,52 @@ void msm_update_fence(struct msm_fence_context *fctx, uint32_t fence)
struct msm_fence {
struct msm_fence_context *fctx;
- struct fence base;
+ struct dma_fence base;
};
-static inline struct msm_fence *to_msm_fence(struct fence *fence)
+static inline struct msm_fence *to_msm_fence(struct dma_fence *fence)
{
return container_of(fence, struct msm_fence, base);
}
-static const char *msm_fence_get_driver_name(struct fence *fence)
+static const char *msm_fence_get_driver_name(struct dma_fence *fence)
{
return "msm";
}
-static const char *msm_fence_get_timeline_name(struct fence *fence)
+static const char *msm_fence_get_timeline_name(struct dma_fence *fence)
{
struct msm_fence *f = to_msm_fence(fence);
return f->fctx->name;
}
-static bool msm_fence_enable_signaling(struct fence *fence)
+static bool msm_fence_enable_signaling(struct dma_fence *fence)
{
return true;
}
-static bool msm_fence_signaled(struct fence *fence)
+static bool msm_fence_signaled(struct dma_fence *fence)
{
struct msm_fence *f = to_msm_fence(fence);
return fence_completed(f->fctx, f->base.seqno);
}
-static void msm_fence_release(struct fence *fence)
+static void msm_fence_release(struct dma_fence *fence)
{
struct msm_fence *f = to_msm_fence(fence);
kfree_rcu(f, base.rcu);
}
-static const struct fence_ops msm_fence_ops = {
+static const struct dma_fence_ops msm_fence_ops = {
.get_driver_name = msm_fence_get_driver_name,
.get_timeline_name = msm_fence_get_timeline_name,
.enable_signaling = msm_fence_enable_signaling,
.signaled = msm_fence_signaled,
- .wait = fence_default_wait,
+ .wait = dma_fence_default_wait,
.release = msm_fence_release,
};
-struct fence *
+struct dma_fence *
msm_fence_alloc(struct msm_fence_context *fctx)
{
struct msm_fence *f;
@@ -156,8 +156,8 @@ msm_fence_alloc(struct msm_fence_context *fctx)
f->fctx = fctx;
- fence_init(&f->base, &msm_fence_ops, &fctx->spinlock,
- fctx->context, ++fctx->last_fence);
+ dma_fence_init(&f->base, &msm_fence_ops, &fctx->spinlock,
+ fctx->context, ++fctx->last_fence);
return &f->base;
}
diff --git a/drivers/gpu/drm/msm/msm_fence.h b/drivers/gpu/drm/msm/msm_fence.h
index ceb5b3d314b4..56061aa1959d 100644
--- a/drivers/gpu/drm/msm/msm_fence.h
+++ b/drivers/gpu/drm/msm/msm_fence.h
@@ -41,6 +41,6 @@ int msm_queue_fence_cb(struct msm_fence_context *fctx,
struct msm_fence_cb *cb, uint32_t fence);
void msm_update_fence(struct msm_fence_context *fctx, uint32_t fence);
-struct fence * msm_fence_alloc(struct msm_fence_context *fctx);
+struct dma_fence * msm_fence_alloc(struct msm_fence_context *fctx);
#endif
diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c
index b6ac27e31929..cd06cfd94687 100644
--- a/drivers/gpu/drm/msm/msm_gem.c
+++ b/drivers/gpu/drm/msm/msm_gem.c
@@ -296,12 +296,8 @@ put_iova(struct drm_gem_object *obj)
WARN_ON(!mutex_is_locked(&dev->struct_mutex));
for (id = 0; id < ARRAY_SIZE(msm_obj->domain); id++) {
- struct msm_mmu *mmu = priv->mmus[id];
- if (mmu && msm_obj->domain[id].iova) {
- uint32_t offset = msm_obj->domain[id].iova;
- mmu->funcs->unmap(mmu, offset, msm_obj->sgt, obj->size);
- msm_obj->domain[id].iova = 0;
- }
+ msm_gem_unmap_vma(priv->aspace[id],
+ &msm_obj->domain[id], msm_obj->sgt);
}
}
@@ -313,7 +309,7 @@ put_iova(struct drm_gem_object *obj)
* the refcnt counter needs to be atomic_t.
*/
int msm_gem_get_iova_locked(struct drm_gem_object *obj, int id,
- uint32_t *iova)
+ uint64_t *iova)
{
struct msm_gem_object *msm_obj = to_msm_bo(obj);
int ret = 0;
@@ -326,16 +322,8 @@ int msm_gem_get_iova_locked(struct drm_gem_object *obj, int id,
return PTR_ERR(pages);
if (iommu_present(&platform_bus_type)) {
- struct msm_mmu *mmu = priv->mmus[id];
- uint32_t offset;
-
- if (WARN_ON(!mmu))
- return -EINVAL;
-
- offset = (uint32_t)mmap_offset(obj);
- ret = mmu->funcs->map(mmu, offset, msm_obj->sgt,
- obj->size, IOMMU_READ | IOMMU_WRITE);
- msm_obj->domain[id].iova = offset;
+ ret = msm_gem_map_vma(priv->aspace[id], &msm_obj->domain[id],
+ msm_obj->sgt, obj->size >> PAGE_SHIFT);
} else {
msm_obj->domain[id].iova = physaddr(obj);
}
@@ -348,7 +336,7 @@ int msm_gem_get_iova_locked(struct drm_gem_object *obj, int id,
}
/* get iova, taking a reference. Should have a matching put */
-int msm_gem_get_iova(struct drm_gem_object *obj, int id, uint32_t *iova)
+int msm_gem_get_iova(struct drm_gem_object *obj, int id, uint64_t *iova)
{
struct msm_gem_object *msm_obj = to_msm_bo(obj);
int ret;
@@ -370,7 +358,7 @@ int msm_gem_get_iova(struct drm_gem_object *obj, int id, uint32_t *iova)
/* get iova without taking a reference, used in places where you have
* already done a 'msm_gem_get_iova()'.
*/
-uint32_t msm_gem_iova(struct drm_gem_object *obj, int id)
+uint64_t msm_gem_iova(struct drm_gem_object *obj, int id)
{
struct msm_gem_object *msm_obj = to_msm_bo(obj);
WARN_ON(!msm_obj->domain[id].iova);
@@ -521,7 +509,7 @@ int msm_gem_sync_object(struct drm_gem_object *obj,
{
struct msm_gem_object *msm_obj = to_msm_bo(obj);
struct reservation_object_list *fobj;
- struct fence *fence;
+ struct dma_fence *fence;
int i, ret;
if (!exclusive) {
@@ -540,7 +528,7 @@ int msm_gem_sync_object(struct drm_gem_object *obj,
fence = reservation_object_get_excl(msm_obj->resv);
/* don't need to wait on our own fences, since ring is fifo */
if (fence && (fence->context != fctx->context)) {
- ret = fence_wait(fence, true);
+ ret = dma_fence_wait(fence, true);
if (ret)
return ret;
}
@@ -553,7 +541,7 @@ int msm_gem_sync_object(struct drm_gem_object *obj,
fence = rcu_dereference_protected(fobj->shared[i],
reservation_object_held(msm_obj->resv));
if (fence->context != fctx->context) {
- ret = fence_wait(fence, true);
+ ret = dma_fence_wait(fence, true);
if (ret)
return ret;
}
@@ -563,7 +551,7 @@ int msm_gem_sync_object(struct drm_gem_object *obj,
}
void msm_gem_move_to_active(struct drm_gem_object *obj,
- struct msm_gpu *gpu, bool exclusive, struct fence *fence)
+ struct msm_gpu *gpu, bool exclusive, struct dma_fence *fence)
{
struct msm_gem_object *msm_obj = to_msm_bo(obj);
WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED);
@@ -616,10 +604,10 @@ int msm_gem_cpu_fini(struct drm_gem_object *obj)
}
#ifdef CONFIG_DEBUG_FS
-static void describe_fence(struct fence *fence, const char *type,
+static void describe_fence(struct dma_fence *fence, const char *type,
struct seq_file *m)
{
- if (!fence_is_signaled(fence))
+ if (!dma_fence_is_signaled(fence))
seq_printf(m, "\t%9s: %s %s seq %u\n", type,
fence->ops->get_driver_name(fence),
fence->ops->get_timeline_name(fence),
@@ -631,9 +619,11 @@ void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
struct msm_gem_object *msm_obj = to_msm_bo(obj);
struct reservation_object *robj = msm_obj->resv;
struct reservation_object_list *fobj;
- struct fence *fence;
+ struct msm_drm_private *priv = obj->dev->dev_private;
+ struct dma_fence *fence;
uint64_t off = drm_vma_node_start(&obj->vma_node);
const char *madv;
+ unsigned id;
WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex));
@@ -650,10 +640,15 @@ void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
break;
}
- seq_printf(m, "%08x: %c %2d (%2d) %08llx %p %zu%s\n",
+ seq_printf(m, "%08x: %c %2d (%2d) %08llx %p\t",
msm_obj->flags, is_active(msm_obj) ? 'A' : 'I',
obj->name, obj->refcount.refcount.counter,
- off, msm_obj->vaddr, obj->size, madv);
+ off, msm_obj->vaddr);
+
+ for (id = 0; id < priv->num_aspaces; id++)
+ seq_printf(m, " %08llx", msm_obj->domain[id].iova);
+
+ seq_printf(m, " %zu%s\n", obj->size, madv);
rcu_read_lock();
fobj = rcu_dereference(robj->fence);
@@ -761,7 +756,6 @@ static int msm_gem_new_impl(struct drm_device *dev,
{
struct msm_drm_private *priv = dev->dev_private;
struct msm_gem_object *msm_obj;
- unsigned sz;
bool use_vram = false;
switch (flags & MSM_BO_CACHE_MASK) {
@@ -783,16 +777,12 @@ static int msm_gem_new_impl(struct drm_device *dev,
if (WARN_ON(use_vram && !priv->vram.size))
return -EINVAL;
- sz = sizeof(*msm_obj);
- if (use_vram)
- sz += sizeof(struct drm_mm_node);
-
- msm_obj = kzalloc(sz, GFP_KERNEL);
+ msm_obj = kzalloc(sizeof(*msm_obj), GFP_KERNEL);
if (!msm_obj)
return -ENOMEM;
if (use_vram)
- msm_obj->vram_node = (void *)&msm_obj[1];
+ msm_obj->vram_node = &msm_obj->domain[0].node;
msm_obj->flags = flags;
msm_obj->madv = MSM_MADV_WILLNEED;
diff --git a/drivers/gpu/drm/msm/msm_gem.h b/drivers/gpu/drm/msm/msm_gem.h
index b2f13cfe945e..7d529516b332 100644
--- a/drivers/gpu/drm/msm/msm_gem.h
+++ b/drivers/gpu/drm/msm/msm_gem.h
@@ -24,6 +24,20 @@
/* Additional internal-use only BO flags: */
#define MSM_BO_STOLEN 0x10000000 /* try to use stolen/splash memory */
+struct msm_gem_address_space {
+ const char *name;
+ /* NOTE: mm managed at the page level, size is in # of pages
+ * and position mm_node->start is in # of pages:
+ */
+ struct drm_mm mm;
+ struct msm_mmu *mmu;
+};
+
+struct msm_gem_vma {
+ struct drm_mm_node node;
+ uint64_t iova;
+};
+
struct msm_gem_object {
struct drm_gem_object base;
@@ -61,10 +75,7 @@ struct msm_gem_object {
struct sg_table *sgt;
void *vaddr;
- struct {
- // XXX
- uint32_t iova;
- } domain[NUM_DOMAINS];
+ struct msm_gem_vma domain[NUM_DOMAINS];
/* normally (resv == &_resv) except for imported bo's */
struct reservation_object *resv;
@@ -104,7 +115,7 @@ struct msm_gem_submit {
struct list_head node; /* node in gpu submit_list */
struct list_head bo_list;
struct ww_acquire_ctx ticket;
- struct fence *fence;
+ struct dma_fence *fence;
struct pid *pid; /* submitting process */
bool valid; /* true if no cmdstream patching needed */
unsigned int nr_cmds;
@@ -112,13 +123,13 @@ struct msm_gem_submit {
struct {
uint32_t type;
uint32_t size; /* in dwords */
- uint32_t iova;
+ uint64_t iova;
uint32_t idx; /* cmdstream buffer idx in bos[] */
} *cmd; /* array of size nr_cmds */
struct {
uint32_t flags;
struct msm_gem_object *obj;
- uint32_t iova;
+ uint64_t iova;
} bos[0];
};
diff --git a/drivers/gpu/drm/msm/msm_gem_shrinker.c b/drivers/gpu/drm/msm/msm_gem_shrinker.c
index 283d2841ba58..192b2d3a79cb 100644
--- a/drivers/gpu/drm/msm/msm_gem_shrinker.c
+++ b/drivers/gpu/drm/msm/msm_gem_shrinker.c
@@ -163,6 +163,9 @@ void msm_gem_shrinker_init(struct drm_device *dev)
void msm_gem_shrinker_cleanup(struct drm_device *dev)
{
struct msm_drm_private *priv = dev->dev_private;
- WARN_ON(unregister_vmap_purge_notifier(&priv->vmap_notifier));
- unregister_shrinker(&priv->shrinker);
+
+ if (priv->shrinker.nr_deferred) {
+ WARN_ON(unregister_vmap_purge_notifier(&priv->vmap_notifier));
+ unregister_shrinker(&priv->shrinker);
+ }
}
diff --git a/drivers/gpu/drm/msm/msm_gem_submit.c b/drivers/gpu/drm/msm/msm_gem_submit.c
index b6a0f37a65f3..166e84e4f0d4 100644
--- a/drivers/gpu/drm/msm/msm_gem_submit.c
+++ b/drivers/gpu/drm/msm/msm_gem_submit.c
@@ -60,7 +60,7 @@ static struct msm_gem_submit *submit_create(struct drm_device *dev,
void msm_gem_submit_free(struct msm_gem_submit *submit)
{
- fence_put(submit->fence);
+ dma_fence_put(submit->fence);
list_del(&submit->node);
put_pid(submit->pid);
kfree(submit);
@@ -241,7 +241,7 @@ static int submit_pin_objects(struct msm_gem_submit *submit)
for (i = 0; i < submit->nr_bos; i++) {
struct msm_gem_object *msm_obj = submit->bos[i].obj;
- uint32_t iova;
+ uint64_t iova;
/* if locking succeeded, pin bo: */
ret = msm_gem_get_iova_locked(&msm_obj->base,
@@ -266,7 +266,7 @@ static int submit_pin_objects(struct msm_gem_submit *submit)
}
static int submit_bo(struct msm_gem_submit *submit, uint32_t idx,
- struct msm_gem_object **obj, uint32_t *iova, bool *valid)
+ struct msm_gem_object **obj, uint64_t *iova, bool *valid)
{
if (idx >= submit->nr_bos) {
DRM_ERROR("invalid buffer index: %u (out of %u)\n",
@@ -312,7 +312,8 @@ static int submit_reloc(struct msm_gem_submit *submit, struct msm_gem_object *ob
struct drm_msm_gem_submit_reloc submit_reloc;
void __user *userptr =
u64_to_user_ptr(relocs + (i * sizeof(submit_reloc)));
- uint32_t iova, off;
+ uint32_t off;
+ uint64_t iova;
bool valid;
ret = copy_from_user(&submit_reloc, userptr, sizeof(submit_reloc));
@@ -380,7 +381,7 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
struct msm_file_private *ctx = file->driver_priv;
struct msm_gem_submit *submit;
struct msm_gpu *gpu = priv->gpu;
- struct fence *in_fence = NULL;
+ struct dma_fence *in_fence = NULL;
struct sync_file *sync_file = NULL;
int out_fence_fd = -1;
unsigned i;
@@ -439,7 +440,7 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
*/
if (in_fence->context != gpu->fctx->context) {
- ret = fence_wait(in_fence, true);
+ ret = dma_fence_wait(in_fence, true);
if (ret)
goto out;
}
@@ -461,7 +462,7 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
void __user *userptr =
u64_to_user_ptr(args->cmds + (i * sizeof(submit_cmd)));
struct msm_gem_object *msm_obj;
- uint32_t iova;
+ uint64_t iova;
ret = copy_from_user(&submit_cmd, userptr, sizeof(submit_cmd));
if (ret) {
@@ -542,7 +543,7 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
out:
if (in_fence)
- fence_put(in_fence);
+ dma_fence_put(in_fence);
submit_cleanup(submit);
if (ret)
msm_gem_submit_free(submit);
diff --git a/drivers/gpu/drm/msm/msm_gem_vma.c b/drivers/gpu/drm/msm/msm_gem_vma.c
new file mode 100644
index 000000000000..a311d26ccb21
--- /dev/null
+++ b/drivers/gpu/drm/msm/msm_gem_vma.c
@@ -0,0 +1,90 @@
+/*
+ * Copyright (C) 2016 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "msm_drv.h"
+#include "msm_gem.h"
+#include "msm_mmu.h"
+
+void
+msm_gem_unmap_vma(struct msm_gem_address_space *aspace,
+ struct msm_gem_vma *vma, struct sg_table *sgt)
+{
+ if (!vma->iova)
+ return;
+
+ if (aspace->mmu) {
+ unsigned size = vma->node.size << PAGE_SHIFT;
+ aspace->mmu->funcs->unmap(aspace->mmu, vma->iova, sgt, size);
+ }
+
+ drm_mm_remove_node(&vma->node);
+
+ vma->iova = 0;
+}
+
+int
+msm_gem_map_vma(struct msm_gem_address_space *aspace,
+ struct msm_gem_vma *vma, struct sg_table *sgt, int npages)
+{
+ int ret;
+
+ if (WARN_ON(drm_mm_node_allocated(&vma->node)))
+ return 0;
+
+ ret = drm_mm_insert_node(&aspace->mm, &vma->node, npages,
+ 0, DRM_MM_SEARCH_DEFAULT);
+ if (ret)
+ return ret;
+
+ vma->iova = vma->node.start << PAGE_SHIFT;
+
+ if (aspace->mmu) {
+ unsigned size = npages << PAGE_SHIFT;
+ ret = aspace->mmu->funcs->map(aspace->mmu, vma->iova, sgt,
+ size, IOMMU_READ | IOMMU_WRITE);
+ }
+
+ return ret;
+}
+
+void
+msm_gem_address_space_destroy(struct msm_gem_address_space *aspace)
+{
+ drm_mm_takedown(&aspace->mm);
+ if (aspace->mmu)
+ aspace->mmu->funcs->destroy(aspace->mmu);
+ kfree(aspace);
+}
+
+struct msm_gem_address_space *
+msm_gem_address_space_create(struct device *dev, struct iommu_domain *domain,
+ const char *name)
+{
+ struct msm_gem_address_space *aspace;
+
+ aspace = kzalloc(sizeof(*aspace), GFP_KERNEL);
+ if (!aspace)
+ return ERR_PTR(-ENOMEM);
+
+ aspace->name = name;
+ aspace->mmu = msm_iommu_new(dev, domain);
+
+ drm_mm_init(&aspace->mm, (domain->geometry.aperture_start >> PAGE_SHIFT),
+ (domain->geometry.aperture_end >> PAGE_SHIFT) - 1);
+
+ return aspace;
+}
diff --git a/drivers/gpu/drm/msm/msm_gpu.c b/drivers/gpu/drm/msm/msm_gpu.c
index 5bb09838b5ae..b28527a65d09 100644
--- a/drivers/gpu/drm/msm/msm_gpu.c
+++ b/drivers/gpu/drm/msm/msm_gpu.c
@@ -91,21 +91,20 @@ static int disable_pwrrail(struct msm_gpu *gpu)
static int enable_clk(struct msm_gpu *gpu)
{
- struct clk *rate_clk = NULL;
int i;
- /* NOTE: kgsl_pwrctrl_clk() ignores grp_clks[0].. */
- for (i = ARRAY_SIZE(gpu->grp_clks) - 1; i > 0; i--) {
- if (gpu->grp_clks[i]) {
- clk_prepare(gpu->grp_clks[i]);
- rate_clk = gpu->grp_clks[i];
- }
- }
+ if (gpu->grp_clks[0] && gpu->fast_rate)
+ clk_set_rate(gpu->grp_clks[0], gpu->fast_rate);
- if (rate_clk && gpu->fast_rate)
- clk_set_rate(rate_clk, gpu->fast_rate);
+ /* Set the RBBM timer rate to 19.2Mhz */
+ if (gpu->grp_clks[2])
+ clk_set_rate(gpu->grp_clks[2], 19200000);
- for (i = ARRAY_SIZE(gpu->grp_clks) - 1; i > 0; i--)
+ for (i = ARRAY_SIZE(gpu->grp_clks) - 1; i >= 0; i--)
+ if (gpu->grp_clks[i])
+ clk_prepare(gpu->grp_clks[i]);
+
+ for (i = ARRAY_SIZE(gpu->grp_clks) - 1; i >= 0; i--)
if (gpu->grp_clks[i])
clk_enable(gpu->grp_clks[i]);
@@ -114,24 +113,22 @@ static int enable_clk(struct msm_gpu *gpu)
static int disable_clk(struct msm_gpu *gpu)
{
- struct clk *rate_clk = NULL;
int i;
- /* NOTE: kgsl_pwrctrl_clk() ignores grp_clks[0].. */
- for (i = ARRAY_SIZE(gpu->grp_clks) - 1; i > 0; i--) {
- if (gpu->grp_clks[i]) {
+ for (i = ARRAY_SIZE(gpu->grp_clks) - 1; i >= 0; i--)
+ if (gpu->grp_clks[i])
clk_disable(gpu->grp_clks[i]);
- rate_clk = gpu->grp_clks[i];
- }
- }
-
- if (rate_clk && gpu->slow_rate)
- clk_set_rate(rate_clk, gpu->slow_rate);
- for (i = ARRAY_SIZE(gpu->grp_clks) - 1; i > 0; i--)
+ for (i = ARRAY_SIZE(gpu->grp_clks) - 1; i >= 0; i--)
if (gpu->grp_clks[i])
clk_unprepare(gpu->grp_clks[i]);
+ if (gpu->grp_clks[0] && gpu->slow_rate)
+ clk_set_rate(gpu->grp_clks[0], gpu->slow_rate);
+
+ if (gpu->grp_clks[2])
+ clk_set_rate(gpu->grp_clks[2], 0);
+
return 0;
}
@@ -476,7 +473,7 @@ static void retire_submits(struct msm_gpu *gpu)
submit = list_first_entry(&gpu->submit_list,
struct msm_gem_submit, node);
- if (fence_is_signaled(submit->fence)) {
+ if (dma_fence_is_signaled(submit->fence)) {
retire_submit(gpu, submit);
} else {
break;
@@ -528,7 +525,7 @@ void msm_gpu_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
for (i = 0; i < submit->nr_bos; i++) {
struct msm_gem_object *msm_obj = submit->bos[i].obj;
- uint32_t iova;
+ uint64_t iova;
/* can't happen yet.. but when we add 2d support we'll have
* to deal w/ cross-ring synchronization:
@@ -563,8 +560,8 @@ static irqreturn_t irq_handler(int irq, void *data)
}
static const char *clk_names[] = {
- "src_clk", "core_clk", "iface_clk", "mem_clk", "mem_iface_clk",
- "alt_mem_iface_clk",
+ "core_clk", "iface_clk", "rbbmtimer_clk", "mem_clk",
+ "mem_iface_clk", "alt_mem_iface_clk",
};
int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev,
@@ -656,12 +653,17 @@ int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev,
*/
iommu = iommu_domain_alloc(&platform_bus_type);
if (iommu) {
+ /* TODO 32b vs 64b address space.. */
+ iommu->geometry.aperture_start = SZ_16M;
+ iommu->geometry.aperture_end = 0xffffffff;
+
dev_info(drm->dev, "%s: using IOMMU\n", name);
- gpu->mmu = msm_iommu_new(&pdev->dev, iommu);
- if (IS_ERR(gpu->mmu)) {
- ret = PTR_ERR(gpu->mmu);
+ gpu->aspace = msm_gem_address_space_create(&pdev->dev,
+ iommu, "gpu");
+ if (IS_ERR(gpu->aspace)) {
+ ret = PTR_ERR(gpu->aspace);
dev_err(drm->dev, "failed to init iommu: %d\n", ret);
- gpu->mmu = NULL;
+ gpu->aspace = NULL;
iommu_domain_free(iommu);
goto fail;
}
@@ -669,7 +671,7 @@ int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev,
} else {
dev_info(drm->dev, "%s: no IOMMU, fallback to VRAM carveout!\n", name);
}
- gpu->id = msm_register_mmu(drm, gpu->mmu);
+ gpu->id = msm_register_address_space(drm, gpu->aspace);
/* Create ringbuffer: */
@@ -705,8 +707,8 @@ void msm_gpu_cleanup(struct msm_gpu *gpu)
msm_ringbuffer_destroy(gpu->rb);
}
- if (gpu->mmu)
- gpu->mmu->funcs->destroy(gpu->mmu);
+ if (gpu->aspace)
+ msm_gem_address_space_destroy(gpu->aspace);
if (gpu->fctx)
msm_fence_context_free(gpu->fctx);
diff --git a/drivers/gpu/drm/msm/msm_gpu.h b/drivers/gpu/drm/msm/msm_gpu.h
index d61d98a6e047..c4c39d3272c7 100644
--- a/drivers/gpu/drm/msm/msm_gpu.h
+++ b/drivers/gpu/drm/msm/msm_gpu.h
@@ -50,7 +50,7 @@ struct msm_gpu_funcs {
void (*submit)(struct msm_gpu *gpu, struct msm_gem_submit *submit,
struct msm_file_private *ctx);
void (*flush)(struct msm_gpu *gpu);
- void (*idle)(struct msm_gpu *gpu);
+ bool (*idle)(struct msm_gpu *gpu);
irqreturn_t (*irq)(struct msm_gpu *irq);
uint32_t (*last_fence)(struct msm_gpu *gpu);
void (*recover)(struct msm_gpu *gpu);
@@ -80,7 +80,7 @@ struct msm_gpu {
/* ringbuffer: */
struct msm_ringbuffer *rb;
- uint32_t rb_iova;
+ uint64_t rb_iova;
/* list of GEM active objects: */
struct list_head active_list;
@@ -98,7 +98,7 @@ struct msm_gpu {
void __iomem *mmio;
int irq;
- struct msm_mmu *mmu;
+ struct msm_gem_address_space *aspace;
int id;
/* Power Control: */
@@ -154,6 +154,45 @@ static inline u32 gpu_read(struct msm_gpu *gpu, u32 reg)
return msm_readl(gpu->mmio + (reg << 2));
}
+static inline void gpu_rmw(struct msm_gpu *gpu, u32 reg, u32 mask, u32 or)
+{
+ uint32_t val = gpu_read(gpu, reg);
+
+ val &= ~mask;
+ gpu_write(gpu, reg, val | or);
+}
+
+static inline u64 gpu_read64(struct msm_gpu *gpu, u32 lo, u32 hi)
+{
+ u64 val;
+
+ /*
+ * Why not a readq here? Two reasons: 1) many of the LO registers are
+ * not quad word aligned and 2) the GPU hardware designers have a bit
+ * of a history of putting registers where they fit, especially in
+ * spins. The longer a GPU family goes the higher the chance that
+ * we'll get burned. We could do a series of validity checks if we
+ * wanted to, but really is a readq() that much better? Nah.
+ */
+
+ /*
+ * For some lo/hi registers (like perfcounters), the hi value is latched
+ * when the lo is read, so make sure to read the lo first to trigger
+ * that
+ */
+ val = (u64) msm_readl(gpu->mmio + (lo << 2));
+ val |= ((u64) msm_readl(gpu->mmio + (hi << 2)) << 32);
+
+ return val;
+}
+
+static inline void gpu_write64(struct msm_gpu *gpu, u32 lo, u32 hi, u64 val)
+{
+ /* Why not a writeq here? Read the screed above */
+ msm_writel(lower_32_bits(val), gpu->mmio + (lo << 2));
+ msm_writel(upper_32_bits(val), gpu->mmio + (hi << 2));
+}
+
int msm_gpu_pm_suspend(struct msm_gpu *gpu);
int msm_gpu_pm_resume(struct msm_gpu *gpu);
diff --git a/drivers/gpu/drm/msm/msm_iommu.c b/drivers/gpu/drm/msm/msm_iommu.c
index 3a294d0da3a0..61aaaa1de6eb 100644
--- a/drivers/gpu/drm/msm/msm_iommu.c
+++ b/drivers/gpu/drm/msm/msm_iommu.c
@@ -45,13 +45,13 @@ static void msm_iommu_detach(struct msm_mmu *mmu, const char * const *names,
iommu_detach_device(iommu->domain, mmu->dev);
}
-static int msm_iommu_map(struct msm_mmu *mmu, uint32_t iova,
+static int msm_iommu_map(struct msm_mmu *mmu, uint64_t iova,
struct sg_table *sgt, unsigned len, int prot)
{
struct msm_iommu *iommu = to_msm_iommu(mmu);
struct iommu_domain *domain = iommu->domain;
struct scatterlist *sg;
- unsigned int da = iova;
+ unsigned long da = iova;
unsigned int i, j;
int ret;
@@ -62,7 +62,7 @@ static int msm_iommu_map(struct msm_mmu *mmu, uint32_t iova,
dma_addr_t pa = sg_phys(sg) - sg->offset;
size_t bytes = sg->length + sg->offset;
- VERB("map[%d]: %08x %08lx(%zx)", i, da, (unsigned long)pa, bytes);
+ VERB("map[%d]: %08lx %08lx(%zx)", i, da, (unsigned long)pa, bytes);
ret = iommu_map(domain, da, pa, bytes, prot);
if (ret)
@@ -84,13 +84,13 @@ fail:
return ret;
}
-static int msm_iommu_unmap(struct msm_mmu *mmu, uint32_t iova,
+static int msm_iommu_unmap(struct msm_mmu *mmu, uint64_t iova,
struct sg_table *sgt, unsigned len)
{
struct msm_iommu *iommu = to_msm_iommu(mmu);
struct iommu_domain *domain = iommu->domain;
struct scatterlist *sg;
- unsigned int da = iova;
+ unsigned long da = iova;
int i;
for_each_sg(sgt->sgl, sg, sgt->nents, i) {
@@ -101,7 +101,7 @@ static int msm_iommu_unmap(struct msm_mmu *mmu, uint32_t iova,
if (unmapped < bytes)
return unmapped;
- VERB("unmap[%d]: %08x(%zx)", i, da, bytes);
+ VERB("unmap[%d]: %08lx(%zx)", i, da, bytes);
BUG_ON(!PAGE_ALIGNED(bytes));
diff --git a/drivers/gpu/drm/msm/msm_kms.h b/drivers/gpu/drm/msm/msm_kms.h
index 40e41e5cdbc6..e470f4cf8f76 100644
--- a/drivers/gpu/drm/msm/msm_kms.h
+++ b/drivers/gpu/drm/msm/msm_kms.h
@@ -40,6 +40,8 @@ struct msm_kms_funcs {
irqreturn_t (*irq)(struct msm_kms *kms);
int (*enable_vblank)(struct msm_kms *kms, struct drm_crtc *crtc);
void (*disable_vblank)(struct msm_kms *kms, struct drm_crtc *crtc);
+ /* swap global atomic state: */
+ void (*swap_state)(struct msm_kms *kms, struct drm_atomic_state *state);
/* modeset, bracketing atomic_commit(): */
void (*prepare_commit)(struct msm_kms *kms, struct drm_atomic_state *state);
void (*complete_commit)(struct msm_kms *kms, struct drm_atomic_state *state);
@@ -56,6 +58,11 @@ struct msm_kms_funcs {
bool is_cmd_mode);
/* cleanup: */
void (*destroy)(struct msm_kms *kms);
+#ifdef CONFIG_DEBUG_FS
+ /* debugfs: */
+ int (*debugfs_init)(struct msm_kms *kms, struct drm_minor *minor);
+ void (*debugfs_cleanup)(struct msm_kms *kms, struct drm_minor *minor);
+#endif
};
struct msm_kms {
@@ -65,6 +72,18 @@ struct msm_kms {
int irq;
};
+/**
+ * Subclass of drm_atomic_state, to allow kms backend to have driver
+ * private global state. The kms backend can do whatever it wants
+ * with the ->state ptr. On ->atomic_state_clear() the ->state ptr
+ * is kfree'd and set back to NULL.
+ */
+struct msm_kms_state {
+ struct drm_atomic_state base;
+ void *state;
+};
+#define to_kms_state(x) container_of(x, struct msm_kms_state, base)
+
static inline void msm_kms_init(struct msm_kms *kms,
const struct msm_kms_funcs *funcs)
{
diff --git a/drivers/gpu/drm/msm/msm_mmu.h b/drivers/gpu/drm/msm/msm_mmu.h
index b8ca9a0e9170..f85c879e68d2 100644
--- a/drivers/gpu/drm/msm/msm_mmu.h
+++ b/drivers/gpu/drm/msm/msm_mmu.h
@@ -23,9 +23,9 @@
struct msm_mmu_funcs {
int (*attach)(struct msm_mmu *mmu, const char * const *names, int cnt);
void (*detach)(struct msm_mmu *mmu, const char * const *names, int cnt);
- int (*map)(struct msm_mmu *mmu, uint32_t iova, struct sg_table *sgt,
+ int (*map)(struct msm_mmu *mmu, uint64_t iova, struct sg_table *sgt,
unsigned len, int prot);
- int (*unmap)(struct msm_mmu *mmu, uint32_t iova, struct sg_table *sgt,
+ int (*unmap)(struct msm_mmu *mmu, uint64_t iova, struct sg_table *sgt,
unsigned len);
void (*destroy)(struct msm_mmu *mmu);
};
diff --git a/drivers/gpu/drm/msm/msm_rd.c b/drivers/gpu/drm/msm/msm_rd.c
index 8487f461f05f..6607456dc626 100644
--- a/drivers/gpu/drm/msm/msm_rd.c
+++ b/drivers/gpu/drm/msm/msm_rd.c
@@ -289,7 +289,7 @@ void msm_rd_debugfs_cleanup(struct drm_minor *minor)
static void snapshot_buf(struct msm_rd_state *rd,
struct msm_gem_submit *submit, int idx,
- uint32_t iova, uint32_t size)
+ uint64_t iova, uint32_t size)
{
struct msm_gem_object *obj = submit->bos[idx].obj;
const char *buf;
@@ -306,7 +306,7 @@ static void snapshot_buf(struct msm_rd_state *rd,
}
rd_write_section(rd, RD_GPUADDR,
- (uint32_t[2]){ iova, size }, 8);
+ (uint32_t[3]){ iova, size, iova >> 32 }, 12);
rd_write_section(rd, RD_BUFFER_CONTENTS, buf, size);
msm_gem_put_vaddr_locked(&obj->base);
diff --git a/drivers/gpu/drm/nouveau/Kbuild b/drivers/gpu/drm/nouveau/Kbuild
index 2527bf4ca5d9..fde6e3656636 100644
--- a/drivers/gpu/drm/nouveau/Kbuild
+++ b/drivers/gpu/drm/nouveau/Kbuild
@@ -22,6 +22,7 @@ nouveau-$(CONFIG_DEBUG_FS) += nouveau_debugfs.o
nouveau-y += nouveau_drm.o
nouveau-y += nouveau_hwmon.o
nouveau-$(CONFIG_COMPAT) += nouveau_ioc32.o
+nouveau-$(CONFIG_LEDS_CLASS) += nouveau_led.o
nouveau-y += nouveau_nvif.o
nouveau-$(CONFIG_NOUVEAU_PLATFORM_DRIVER) += nouveau_platform.o
nouveau-y += nouveau_usif.o # userspace <-> nvif
diff --git a/drivers/gpu/drm/nouveau/dispnv04/crtc.c b/drivers/gpu/drm/nouveau/dispnv04/crtc.c
index 0cb7a18cde26..59d1d1c5de5f 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/crtc.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/crtc.c
@@ -702,7 +702,7 @@ static void nv_crtc_prepare(struct drm_crtc *crtc)
if (nv_two_heads(dev))
NVSetOwner(dev, nv_crtc->index);
- drm_vblank_pre_modeset(dev, nv_crtc->index);
+ drm_crtc_vblank_off(crtc);
funcs->dpms(crtc, DRM_MODE_DPMS_OFF);
NVBlankScreen(dev, nv_crtc->index, true);
@@ -734,7 +734,7 @@ static void nv_crtc_commit(struct drm_crtc *crtc)
#endif
funcs->dpms(crtc, DRM_MODE_DPMS_ON);
- drm_vblank_post_modeset(dev, nv_crtc->index);
+ drm_crtc_vblank_on(crtc);
}
static void nv_crtc_destroy(struct drm_crtc *crtc)
diff --git a/drivers/gpu/drm/nouveau/dispnv04/overlay.c b/drivers/gpu/drm/nouveau/dispnv04/overlay.c
index ec444eac6258..a79514d440b3 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/overlay.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/overlay.c
@@ -33,7 +33,7 @@
#include "nouveau_connector.h"
#include "nouveau_display.h"
#include "nvreg.h"
-
+#include "disp.h"
struct nouveau_plane {
struct drm_plane base;
diff --git a/drivers/gpu/drm/nouveau/include/nvif/cl5070.h b/drivers/gpu/drm/nouveau/include/nvif/cl5070.h
index d15c296b5f33..ae49dfd1f97b 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/cl5070.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/cl5070.h
@@ -34,6 +34,8 @@ struct nv50_disp_mthd_v1 {
#define NV50_DISP_MTHD_V1_SOR_HDMI_PWR 0x22
#define NV50_DISP_MTHD_V1_SOR_LVDS_SCRIPT 0x23
#define NV50_DISP_MTHD_V1_SOR_DP_PWR 0x24
+#define NV50_DISP_MTHD_V1_SOR_DP_MST_LINK 0x25
+#define NV50_DISP_MTHD_V1_SOR_DP_MST_VCPI 0x26
#define NV50_DISP_MTHD_V1_PIOR_PWR 0x30
__u8 method;
__u16 hasht;
@@ -90,6 +92,21 @@ struct nv50_disp_sor_dp_pwr_v0 {
__u8 pad02[6];
};
+struct nv50_disp_sor_dp_mst_link_v0 {
+ __u8 version;
+ __u8 state;
+ __u8 pad02[6];
+};
+
+struct nv50_disp_sor_dp_mst_vcpi_v0 {
+ __u8 version;
+ __u8 pad01[1];
+ __u8 start_slot;
+ __u8 num_slots;
+ __u16 pbn;
+ __u16 aligned_pbn;
+};
+
struct nv50_disp_pior_pwr_v0 {
__u8 version;
__u8 state;
diff --git a/drivers/gpu/drm/nouveau/include/nvif/class.h b/drivers/gpu/drm/nouveau/include/nvif/class.h
index e6e9537537cf..82235f30277c 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/class.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/class.h
@@ -52,7 +52,7 @@
#define GM107_DISP /* cl5070.h */ 0x00009470
#define GM200_DISP /* cl5070.h */ 0x00009570
#define GP100_DISP /* cl5070.h */ 0x00009770
-#define GP104_DISP /* cl5070.h */ 0x00009870
+#define GP102_DISP /* cl5070.h */ 0x00009870
#define NV31_MPEG 0x00003174
#define G82_MPEG 0x00008274
@@ -90,7 +90,7 @@
#define GM107_DISP_CORE_CHANNEL_DMA /* cl507d.h */ 0x0000947d
#define GM200_DISP_CORE_CHANNEL_DMA /* cl507d.h */ 0x0000957d
#define GP100_DISP_CORE_CHANNEL_DMA /* cl507d.h */ 0x0000977d
-#define GP104_DISP_CORE_CHANNEL_DMA /* cl507d.h */ 0x0000987d
+#define GP102_DISP_CORE_CHANNEL_DMA /* cl507d.h */ 0x0000987d
#define NV50_DISP_OVERLAY_CHANNEL_DMA /* cl507e.h */ 0x0000507e
#define G82_DISP_OVERLAY_CHANNEL_DMA /* cl507e.h */ 0x0000827e
diff --git a/drivers/gpu/drm/nouveau/include/nvif/object.h b/drivers/gpu/drm/nouveau/include/nvif/object.h
index 8d815967767f..9e58b305b020 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/object.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/object.h
@@ -66,6 +66,35 @@ void nvif_object_unmap(struct nvif_object *);
#define nvif_mthd(a,b,c,d) nvif_object_mthd((a), (b), (c), (d))
+struct nvif_mclass {
+ s32 oclass;
+ int version;
+};
+
+#define nvif_mclass(o,m) ({ \
+ struct nvif_object *object = (o); \
+ struct nvif_sclass *sclass; \
+ const typeof(m[0]) *mclass = (m); \
+ int ret = -ENODEV; \
+ int cnt, i, j; \
+ \
+ cnt = nvif_object_sclass_get(object, &sclass); \
+ if (cnt >= 0) { \
+ for (i = 0; ret < 0 && mclass[i].oclass; i++) { \
+ for (j = 0; j < cnt; j++) { \
+ if (mclass[i].oclass == sclass[j].oclass && \
+ mclass[i].version >= sclass[j].minver && \
+ mclass[i].version <= sclass[j].maxver) { \
+ ret = i; \
+ break; \
+ } \
+ } \
+ } \
+ nvif_object_sclass_put(&sclass); \
+ } \
+ ret; \
+})
+
/*XXX*/
#include <core/object.h>
#define nvxx_object(a) ({ \
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/ce.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/ce.h
index d3d26a1e215d..b93f4c1a95e5 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/engine/ce.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/ce.h
@@ -8,5 +8,5 @@ int gk104_ce_new(struct nvkm_device *, int, struct nvkm_engine **);
int gm107_ce_new(struct nvkm_device *, int, struct nvkm_engine **);
int gm200_ce_new(struct nvkm_device *, int, struct nvkm_engine **);
int gp100_ce_new(struct nvkm_device *, int, struct nvkm_engine **);
-int gp104_ce_new(struct nvkm_device *, int, struct nvkm_engine **);
+int gp102_ce_new(struct nvkm_device *, int, struct nvkm_engine **);
#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/disp.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/disp.h
index e82049667ce4..970ae753968a 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/engine/disp.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/disp.h
@@ -33,5 +33,5 @@ int gk110_disp_new(struct nvkm_device *, int, struct nvkm_disp **);
int gm107_disp_new(struct nvkm_device *, int, struct nvkm_disp **);
int gm200_disp_new(struct nvkm_device *, int, struct nvkm_disp **);
int gp100_disp_new(struct nvkm_device *, int, struct nvkm_disp **);
-int gp104_disp_new(struct nvkm_device *, int, struct nvkm_disp **);
+int gp102_disp_new(struct nvkm_device *, int, struct nvkm_disp **);
#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/boost.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/boost.h
index 934b0ae5521d..2ff64a20c0ec 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/boost.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/boost.h
@@ -1,6 +1,6 @@
#ifndef __NVBIOS_BOOST_H__
#define __NVBIOS_BOOST_H__
-u16 nvbios_boostTe(struct nvkm_bios *, u8 *, u8 *, u8 *, u8 *, u8 *, u8 *);
+u32 nvbios_boostTe(struct nvkm_bios *, u8 *, u8 *, u8 *, u8 *, u8 *, u8 *);
struct nvbios_boostE {
u8 pstate;
@@ -8,10 +8,10 @@ struct nvbios_boostE {
u32 max;
};
-u16 nvbios_boostEe(struct nvkm_bios *, int idx, u8 *, u8 *, u8 *, u8 *);
-u16 nvbios_boostEp(struct nvkm_bios *, int idx, u8 *, u8 *, u8 *, u8 *,
+u32 nvbios_boostEe(struct nvkm_bios *, int idx, u8 *, u8 *, u8 *, u8 *);
+u32 nvbios_boostEp(struct nvkm_bios *, int idx, u8 *, u8 *, u8 *, u8 *,
struct nvbios_boostE *);
-u16 nvbios_boostEm(struct nvkm_bios *, u8, u8 *, u8 *, u8 *, u8 *,
+u32 nvbios_boostEm(struct nvkm_bios *, u8, u8 *, u8 *, u8 *, u8 *,
struct nvbios_boostE *);
struct nvbios_boostS {
@@ -21,7 +21,7 @@ struct nvbios_boostS {
u32 max;
};
-u16 nvbios_boostSe(struct nvkm_bios *, int, u16, u8 *, u8 *, u8, u8);
-u16 nvbios_boostSp(struct nvkm_bios *, int, u16, u8 *, u8 *, u8, u8,
+u32 nvbios_boostSe(struct nvkm_bios *, int, u32, u8 *, u8 *, u8, u8);
+u32 nvbios_boostSp(struct nvkm_bios *, int, u32, u8 *, u8 *, u8, u8,
struct nvbios_boostS *);
#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/cstep.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/cstep.h
index 2f0e0c8e83be..76fe7d50a1ce 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/cstep.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/cstep.h
@@ -1,6 +1,6 @@
#ifndef __NVBIOS_CSTEP_H__
#define __NVBIOS_CSTEP_H__
-u16 nvbios_cstepTe(struct nvkm_bios *,
+u32 nvbios_cstepTe(struct nvkm_bios *,
u8 *ver, u8 *hdr, u8 *cnt, u8 *len, u8 *xnr, u8 *xsz);
struct nvbios_cstepE {
@@ -8,10 +8,10 @@ struct nvbios_cstepE {
u8 index;
};
-u16 nvbios_cstepEe(struct nvkm_bios *, int idx, u8 *ver, u8 *hdr);
-u16 nvbios_cstepEp(struct nvkm_bios *, int idx, u8 *ver, u8 *hdr,
+u32 nvbios_cstepEe(struct nvkm_bios *, int idx, u8 *ver, u8 *hdr);
+u32 nvbios_cstepEp(struct nvkm_bios *, int idx, u8 *ver, u8 *hdr,
struct nvbios_cstepE *);
-u16 nvbios_cstepEm(struct nvkm_bios *, u8 pstate, u8 *ver, u8 *hdr,
+u32 nvbios_cstepEm(struct nvkm_bios *, u8 pstate, u8 *ver, u8 *hdr,
struct nvbios_cstepE *);
struct nvbios_cstepX {
@@ -20,7 +20,7 @@ struct nvbios_cstepX {
u8 voltage;
};
-u16 nvbios_cstepXe(struct nvkm_bios *, int idx, u8 *ver, u8 *hdr);
-u16 nvbios_cstepXp(struct nvkm_bios *, int idx, u8 *ver, u8 *hdr,
+u32 nvbios_cstepXe(struct nvkm_bios *, int idx, u8 *ver, u8 *hdr);
+u32 nvbios_cstepXp(struct nvkm_bios *, int idx, u8 *ver, u8 *hdr,
struct nvbios_cstepX *);
#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/fan.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/fan.h
index 693ea7d9ec43..a7513e8406a3 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/fan.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/fan.h
@@ -2,5 +2,5 @@
#define __NVBIOS_FAN_H__
#include <subdev/bios/therm.h>
-u16 nvbios_fan_parse(struct nvkm_bios *bios, struct nvbios_therm_fan *fan);
+u32 nvbios_fan_parse(struct nvkm_bios *bios, struct nvbios_therm_fan *fan);
#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/gpio.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/gpio.h
index a47d46dda704..b7a54e605469 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/gpio.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/gpio.h
@@ -6,6 +6,7 @@ enum dcb_gpio_func_name {
DCB_GPIO_TVDAC1 = 0x2d,
DCB_GPIO_FAN = 0x09,
DCB_GPIO_FAN_SENSE = 0x3d,
+ DCB_GPIO_LOGO_LED_PWM = 0x84,
DCB_GPIO_UNUSED = 0xff,
DCB_GPIO_VID0 = 0x04,
DCB_GPIO_VID1 = 0x05,
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/iccsense.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/iccsense.h
index 9cb97477248b..e933d3eede70 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/iccsense.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/iccsense.h
@@ -1,10 +1,16 @@
#ifndef __NVBIOS_ICCSENSE_H__
#define __NVBIOS_ICCSENSE_H__
+struct pwr_rail_resistor_t {
+ u8 mohm;
+ bool enabled;
+};
+
struct pwr_rail_t {
u8 mode;
u8 extdev_id;
- u8 resistor_mohm;
- u8 rail;
+ u8 resistor_count;
+ struct pwr_rail_resistor_t resistors[3];
+ u16 config;
};
struct nvbios_iccsense {
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/perf.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/perf.h
index d3bd250103d5..478b1c0d2089 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/perf.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/perf.h
@@ -1,6 +1,6 @@
#ifndef __NVBIOS_PERF_H__
#define __NVBIOS_PERF_H__
-u16 nvbios_perf_table(struct nvkm_bios *, u8 *ver, u8 *hdr,
+u32 nvbios_perf_table(struct nvkm_bios *, u8 *ver, u8 *hdr,
u8 *cnt, u8 *len, u8 *snr, u8 *ssz);
struct nvbios_perfE {
@@ -17,9 +17,9 @@ struct nvbios_perfE {
u8 pcie_width;
};
-u16 nvbios_perf_entry(struct nvkm_bios *, int idx,
+u32 nvbios_perf_entry(struct nvkm_bios *, int idx,
u8 *ver, u8 *hdr, u8 *cnt, u8 *len);
-u16 nvbios_perfEp(struct nvkm_bios *, int idx,
+u32 nvbios_perfEp(struct nvkm_bios *, int idx,
u8 *ver, u8 *hdr, u8 *cnt, u8 *len, struct nvbios_perfE *);
struct nvbios_perfS {
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/timing.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/timing.h
index 339a826aa176..38188d4c9ab5 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/timing.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/timing.h
@@ -2,10 +2,10 @@
#define __NVBIOS_TIMING_H__
#include <subdev/bios/ramcfg.h>
-u16 nvbios_timingTe(struct nvkm_bios *,
+u32 nvbios_timingTe(struct nvkm_bios *,
u8 *ver, u8 *hdr, u8 *cnt, u8 *len, u8 *snr, u8 *ssz);
-u16 nvbios_timingEe(struct nvkm_bios *, int idx,
+u32 nvbios_timingEe(struct nvkm_bios *, int idx,
u8 *ver, u8 *hdr, u8 *cnt, u8 *len);
-u16 nvbios_timingEp(struct nvkm_bios *, int idx,
+u32 nvbios_timingEp(struct nvkm_bios *, int idx,
u8 *ver, u8 *hdr, u8 *cnt, u8 *len, struct nvbios_ramcfg *);
#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/vmap.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/vmap.h
index 6633c6db9281..bea31cdd1dd1 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/vmap.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/vmap.h
@@ -1,21 +1,24 @@
#ifndef __NVBIOS_VMAP_H__
#define __NVBIOS_VMAP_H__
struct nvbios_vmap {
+ u8 max0;
+ u8 max1;
+ u8 max2;
};
-u16 nvbios_vmap_table(struct nvkm_bios *, u8 *ver, u8 *hdr, u8 *cnt, u8 *len);
-u16 nvbios_vmap_parse(struct nvkm_bios *, u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
+u32 nvbios_vmap_table(struct nvkm_bios *, u8 *ver, u8 *hdr, u8 *cnt, u8 *len);
+u32 nvbios_vmap_parse(struct nvkm_bios *, u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
struct nvbios_vmap *);
struct nvbios_vmap_entry {
- u8 unk0;
+ u8 mode;
u8 link;
u32 min;
u32 max;
s32 arg[6];
};
-u16 nvbios_vmap_entry(struct nvkm_bios *, int idx, u8 *ver, u8 *len);
-u16 nvbios_vmap_entry_parse(struct nvkm_bios *, int idx, u8 *ver, u8 *len,
+u32 nvbios_vmap_entry(struct nvkm_bios *, int idx, u8 *ver, u8 *len);
+u32 nvbios_vmap_entry_parse(struct nvkm_bios *, int idx, u8 *ver, u8 *len,
struct nvbios_vmap_entry *);
#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/volt.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/volt.h
index b0df610cec2b..f0baa2c7de09 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/volt.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/volt.h
@@ -13,16 +13,17 @@ struct nvbios_volt {
u32 base;
/* GPIO mode */
- u8 vidmask;
- s16 step;
+ bool ranged;
+ u8 vidmask;
+ s16 step;
/* PWM mode */
u32 pwm_freq;
u32 pwm_range;
};
-u16 nvbios_volt_table(struct nvkm_bios *, u8 *ver, u8 *hdr, u8 *cnt, u8 *len);
-u16 nvbios_volt_parse(struct nvkm_bios *, u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
+u32 nvbios_volt_table(struct nvkm_bios *, u8 *ver, u8 *hdr, u8 *cnt, u8 *len);
+u32 nvbios_volt_parse(struct nvkm_bios *, u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
struct nvbios_volt *);
struct nvbios_volt_entry {
@@ -30,7 +31,7 @@ struct nvbios_volt_entry {
u8 vid;
};
-u16 nvbios_volt_entry(struct nvkm_bios *, int idx, u8 *ver, u8 *len);
-u16 nvbios_volt_entry_parse(struct nvkm_bios *, int idx, u8 *ver, u8 *len,
+u32 nvbios_volt_entry(struct nvkm_bios *, int idx, u8 *ver, u8 *len);
+u32 nvbios_volt_entry_parse(struct nvkm_bios *, int idx, u8 *ver, u8 *len,
struct nvbios_volt_entry *);
#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/vpstate.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/vpstate.h
new file mode 100644
index 000000000000..87f804fc3a88
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/vpstate.h
@@ -0,0 +1,24 @@
+#ifndef __NVBIOS_VPSTATE_H__
+#define __NVBIOS_VPSTATE_H__
+struct nvbios_vpstate_header {
+ u32 offset;
+
+ u8 version;
+ u8 hlen;
+ u8 ecount;
+ u8 elen;
+ u8 scount;
+ u8 slen;
+
+ u8 base_id;
+ u8 boost_id;
+ u8 tdp_id;
+};
+struct nvbios_vpstate_entry {
+ u8 pstate;
+ u16 clock_mhz;
+};
+int nvbios_vpstate_parse(struct nvkm_bios *, struct nvbios_vpstate_header *);
+int nvbios_vpstate_entry(struct nvkm_bios *, struct nvbios_vpstate_header *,
+ u8 idx, struct nvbios_vpstate_entry *);
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/clk.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/clk.h
index fb54417bc458..e5275f742977 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/clk.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/clk.h
@@ -6,6 +6,10 @@
struct nvbios_pll;
struct nvkm_pll_vals;
+#define NVKM_CLK_CSTATE_DEFAULT -1 /* POSTed default */
+#define NVKM_CLK_CSTATE_BASE -2 /* pstate base */
+#define NVKM_CLK_CSTATE_HIGHEST -3 /* highest possible */
+
enum nv_clk_src {
nv_clk_src_crystal,
nv_clk_src_href,
@@ -52,6 +56,7 @@ struct nvkm_cstate {
struct list_head head;
u8 voltage;
u32 domain[nv_clk_src_max];
+ u8 id;
};
struct nvkm_pstate {
@@ -67,7 +72,8 @@ struct nvkm_pstate {
struct nvkm_domain {
enum nv_clk_src name;
u8 bios; /* 0xff for none */
-#define NVKM_CLK_DOM_FLAG_CORE 0x01
+#define NVKM_CLK_DOM_FLAG_CORE 0x01
+#define NVKM_CLK_DOM_FLAG_VPSTATE 0x02
u8 flags;
const char *mname;
int mdiv;
@@ -93,10 +99,16 @@ struct nvkm_clk {
int ustate_ac; /* user-requested (-1 disabled, -2 perfmon) */
int ustate_dc; /* user-requested (-1 disabled, -2 perfmon) */
int astate; /* perfmon adjustment (base) */
- int tstate; /* thermal adjustment (max-) */
int dstate; /* display adjustment (min+) */
+ u8 temp;
bool allow_reclock;
+#define NVKM_CLK_BOOST_NONE 0x0
+#define NVKM_CLK_BOOST_BIOS 0x1
+#define NVKM_CLK_BOOST_FULL 0x2
+ u8 boost_mode;
+ u32 base_khz;
+ u32 boost_khz;
/*XXX: die, these are here *only* to support the completely
* bat-shit insane what-was-nouveau_hw.c code
@@ -110,7 +122,7 @@ int nvkm_clk_read(struct nvkm_clk *, enum nv_clk_src);
int nvkm_clk_ustate(struct nvkm_clk *, int req, int pwr);
int nvkm_clk_astate(struct nvkm_clk *, int req, int rel, bool wait);
int nvkm_clk_dstate(struct nvkm_clk *, int req, int rel);
-int nvkm_clk_tstate(struct nvkm_clk *, int req, int rel);
+int nvkm_clk_tstate(struct nvkm_clk *, u8 temperature);
int nv04_clk_new(struct nvkm_device *, int, struct nvkm_clk **);
int nv40_clk_new(struct nvkm_device *, int, struct nvkm_clk **);
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/fb.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/fb.h
index 3a410275fa71..794e432578b2 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/fb.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/fb.h
@@ -93,8 +93,9 @@ int gk104_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
int gk20a_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
int gm107_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
int gm200_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
+int gm20b_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
int gp100_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
-int gp104_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
+int gp102_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
#include <subdev/bios.h>
#include <subdev/bios/ramcfg.h>
@@ -156,4 +157,6 @@ struct nvkm_ram_func {
int (*prog)(struct nvkm_ram *);
void (*tidy)(struct nvkm_ram *);
};
+
+extern const u8 gf100_pte_storage_type_map[256];
#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/pmu.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/pmu.h
index e61923d5e49c..f37538eb1fe5 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/pmu.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/pmu.h
@@ -35,6 +35,8 @@ int gk110_pmu_new(struct nvkm_device *, int, struct nvkm_pmu **);
int gk208_pmu_new(struct nvkm_device *, int, struct nvkm_pmu **);
int gk20a_pmu_new(struct nvkm_device *, int, struct nvkm_pmu **);
int gm107_pmu_new(struct nvkm_device *, int, struct nvkm_pmu **);
+int gp100_pmu_new(struct nvkm_device *, int, struct nvkm_pmu **);
+int gp102_pmu_new(struct nvkm_device *, int, struct nvkm_pmu **);
/* interface to MEMX process running on PMU */
struct nvkm_memx;
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/volt.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/volt.h
index b765f4ffcde6..08ef9983c643 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/volt.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/volt.h
@@ -15,12 +15,28 @@ struct nvkm_volt {
u32 max_uv;
u32 min_uv;
+
+ /*
+ * These are fully functional map entries creating a sw ceiling for
+ * the voltage. These all can describe different kind of curves, so
+ * that for any given temperature a different one can return the lowest
+ * value of all three.
+ */
+ u8 max0_id;
+ u8 max1_id;
+ u8 max2_id;
+
+ int speedo;
};
+int nvkm_volt_map(struct nvkm_volt *volt, u8 id, u8 temperature);
+int nvkm_volt_map_min(struct nvkm_volt *volt, u8 id);
int nvkm_volt_get(struct nvkm_volt *);
-int nvkm_volt_set_id(struct nvkm_volt *, u8 id, int condition);
+int nvkm_volt_set_id(struct nvkm_volt *, u8 id, u8 min_id, u8 temp,
+ int condition);
int nv40_volt_new(struct nvkm_device *, int, struct nvkm_volt **);
+int gf100_volt_new(struct nvkm_device *, int, struct nvkm_volt **);
int gk104_volt_new(struct nvkm_device *, int, struct nvkm_volt **);
int gk20a_volt_new(struct nvkm_device *, int, struct nvkm_volt **);
int gm20b_volt_new(struct nvkm_device *, int, struct nvkm_volt **);
diff --git a/drivers/gpu/drm/nouveau/nouveau_acpi.c b/drivers/gpu/drm/nouveau/nouveau_acpi.c
index dc57b628e074..193573d191e5 100644
--- a/drivers/gpu/drm/nouveau/nouveau_acpi.c
+++ b/drivers/gpu/drm/nouveau/nouveau_acpi.c
@@ -240,7 +240,8 @@ static bool nouveau_pr3_present(struct pci_dev *pdev)
if (!parent_adev)
return false;
- return acpi_has_method(parent_adev->handle, "_PR3");
+ return parent_adev->power.flags.power_resources &&
+ acpi_has_method(parent_adev->handle, "_PR3");
}
static void nouveau_dsm_pci_probe(struct pci_dev *pdev, acpi_handle *dhandle_out,
diff --git a/drivers/gpu/drm/nouveau/nouveau_backlight.c b/drivers/gpu/drm/nouveau/nouveau_backlight.c
index f5101be806cb..5e2c5685b4dd 100644
--- a/drivers/gpu/drm/nouveau/nouveau_backlight.c
+++ b/drivers/gpu/drm/nouveau/nouveau_backlight.c
@@ -232,6 +232,7 @@ nouveau_backlight_init(struct drm_device *dev)
case NV_DEVICE_INFO_V0_TESLA:
case NV_DEVICE_INFO_V0_FERMI:
case NV_DEVICE_INFO_V0_KEPLER:
+ case NV_DEVICE_INFO_V0_MAXWELL:
return nv50_backlight_init(connector);
default:
break;
diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.c b/drivers/gpu/drm/nouveau/nouveau_bios.c
index a1570b109434..23ffe8571a99 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bios.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bios.c
@@ -333,6 +333,9 @@ get_fp_strap(struct drm_device *dev, struct nvbios *bios)
if (bios->major_version < 5 && bios->data[0x48] & 0x4)
return NVReadVgaCrtc5758(dev, 0, 0xf) & 0xf;
+ if (drm->device.info.family >= NV_DEVICE_INFO_V0_MAXWELL)
+ return nvif_rd32(device, 0x001800) & 0x0000000f;
+ else
if (drm->device.info.family >= NV_DEVICE_INFO_V0_TESLA)
return (nvif_rd32(device, NV_PEXTDEV_BOOT_0) >> 24) & 0xf;
else
diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.h b/drivers/gpu/drm/nouveau/nouveau_bios.h
index 0067586eb015..18eb061ccafb 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bios.h
+++ b/drivers/gpu/drm/nouveau/nouveau_bios.h
@@ -31,10 +31,8 @@
#define DCB_LOC_ON_CHIP 0
-#define ROM16(x) le16_to_cpu(*(u16 *)&(x))
-#define ROM32(x) le32_to_cpu(*(u32 *)&(x))
-#define ROM48(x) ({ u8 *p = &(x); (u64)ROM16(p[4]) << 32 | ROM32(p[0]); })
-#define ROM64(x) le64_to_cpu(*(u64 *)&(x))
+#define ROM16(x) get_unaligned_le16(&(x))
+#define ROM32(x) get_unaligned_le32(&(x))
#define ROMPTR(d,x) ({ \
struct nouveau_drm *drm = nouveau_drm((d)); \
ROM16(x) ? &drm->vbios.data[ROM16(x)] : NULL; \
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
index 343b8659472c..e0c0007689e5 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
@@ -83,13 +83,13 @@ nv10_bo_get_tile_region(struct drm_device *dev, int i)
static void
nv10_bo_put_tile_region(struct drm_device *dev, struct nouveau_drm_tile *tile,
- struct fence *fence)
+ struct dma_fence *fence)
{
struct nouveau_drm *drm = nouveau_drm(dev);
if (tile) {
spin_lock(&drm->tile.lock);
- tile->fence = (struct nouveau_fence *)fence_get(fence);
+ tile->fence = (struct nouveau_fence *)dma_fence_get(fence);
tile->used = false;
spin_unlock(&drm->tile.lock);
}
@@ -1243,7 +1243,7 @@ nouveau_bo_vm_cleanup(struct ttm_buffer_object *bo,
{
struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
struct drm_device *dev = drm->dev;
- struct fence *fence = reservation_object_get_excl(bo->resv);
+ struct dma_fence *fence = reservation_object_get_excl(bo->resv);
nv10_bo_put_tile_region(dev, *old_tile, fence);
*old_tile = new_tile;
@@ -1561,6 +1561,7 @@ struct ttm_bo_driver nouveau_bo_driver = {
.ttm_tt_unpopulate = &nouveau_ttm_tt_unpopulate,
.invalidate_caches = nouveau_bo_invalidate_caches,
.init_mem_type = nouveau_bo_init_mem_type,
+ .eviction_valuable = ttm_bo_eviction_valuable,
.evict_flags = nouveau_bo_evict_flags,
.move_notify = nouveau_bo_move_ntfy,
.move = nouveau_bo_move,
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c
index c1084088f9e4..947c200655b4 100644
--- a/drivers/gpu/drm/nouveau/nouveau_connector.c
+++ b/drivers/gpu/drm/nouveau/nouveau_connector.c
@@ -30,6 +30,7 @@
#include <linux/vga_switcheroo.h>
#include <drm/drmP.h>
+#include <drm/drm_atomic_helper.h>
#include <drm/drm_edid.h>
#include <drm/drm_crtc_helper.h>
@@ -47,6 +48,301 @@
#include <nvif/cl0046.h>
#include <nvif/event.h>
+struct drm_display_mode *
+nouveau_conn_native_mode(struct drm_connector *connector)
+{
+ const struct drm_connector_helper_funcs *helper = connector->helper_private;
+ struct nouveau_drm *drm = nouveau_drm(connector->dev);
+ struct drm_device *dev = connector->dev;
+ struct drm_display_mode *mode, *largest = NULL;
+ int high_w = 0, high_h = 0, high_v = 0;
+
+ list_for_each_entry(mode, &connector->probed_modes, head) {
+ mode->vrefresh = drm_mode_vrefresh(mode);
+ if (helper->mode_valid(connector, mode) != MODE_OK ||
+ (mode->flags & DRM_MODE_FLAG_INTERLACE))
+ continue;
+
+ /* Use preferred mode if there is one.. */
+ if (mode->type & DRM_MODE_TYPE_PREFERRED) {
+ NV_DEBUG(drm, "native mode from preferred\n");
+ return drm_mode_duplicate(dev, mode);
+ }
+
+ /* Otherwise, take the resolution with the largest width, then
+ * height, then vertical refresh
+ */
+ if (mode->hdisplay < high_w)
+ continue;
+
+ if (mode->hdisplay == high_w && mode->vdisplay < high_h)
+ continue;
+
+ if (mode->hdisplay == high_w && mode->vdisplay == high_h &&
+ mode->vrefresh < high_v)
+ continue;
+
+ high_w = mode->hdisplay;
+ high_h = mode->vdisplay;
+ high_v = mode->vrefresh;
+ largest = mode;
+ }
+
+ NV_DEBUG(drm, "native mode from largest: %dx%d@%d\n",
+ high_w, high_h, high_v);
+ return largest ? drm_mode_duplicate(dev, largest) : NULL;
+}
+
+int
+nouveau_conn_atomic_get_property(struct drm_connector *connector,
+ const struct drm_connector_state *state,
+ struct drm_property *property, u64 *val)
+{
+ struct nouveau_conn_atom *asyc = nouveau_conn_atom(state);
+ struct nouveau_display *disp = nouveau_display(connector->dev);
+ struct drm_device *dev = connector->dev;
+
+ if (property == dev->mode_config.scaling_mode_property)
+ *val = asyc->scaler.mode;
+ else if (property == disp->underscan_property)
+ *val = asyc->scaler.underscan.mode;
+ else if (property == disp->underscan_hborder_property)
+ *val = asyc->scaler.underscan.hborder;
+ else if (property == disp->underscan_vborder_property)
+ *val = asyc->scaler.underscan.vborder;
+ else if (property == disp->dithering_mode)
+ *val = asyc->dither.mode;
+ else if (property == disp->dithering_depth)
+ *val = asyc->dither.depth;
+ else if (property == disp->vibrant_hue_property)
+ *val = asyc->procamp.vibrant_hue;
+ else if (property == disp->color_vibrance_property)
+ *val = asyc->procamp.color_vibrance;
+ else
+ return -EINVAL;
+
+ return 0;
+}
+
+int
+nouveau_conn_atomic_set_property(struct drm_connector *connector,
+ struct drm_connector_state *state,
+ struct drm_property *property, u64 val)
+{
+ struct drm_device *dev = connector->dev;
+ struct nouveau_conn_atom *asyc = nouveau_conn_atom(state);
+ struct nouveau_display *disp = nouveau_display(dev);
+
+ if (property == dev->mode_config.scaling_mode_property) {
+ switch (val) {
+ case DRM_MODE_SCALE_NONE:
+ /* We allow 'None' for EDID modes, even on a fixed
+ * panel (some exist with support for lower refresh
+ * rates, which people might want to use for power-
+ * saving purposes).
+ *
+ * Non-EDID modes will force the use of GPU scaling
+ * to the native mode regardless of this setting.
+ */
+ switch (connector->connector_type) {
+ case DRM_MODE_CONNECTOR_LVDS:
+ case DRM_MODE_CONNECTOR_eDP:
+ /* ... except prior to G80, where the code
+ * doesn't support such things.
+ */
+ if (disp->disp.oclass < NV50_DISP)
+ return -EINVAL;
+ break;
+ default:
+ break;
+ }
+ case DRM_MODE_SCALE_FULLSCREEN:
+ case DRM_MODE_SCALE_CENTER:
+ case DRM_MODE_SCALE_ASPECT:
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (asyc->scaler.mode != val) {
+ asyc->scaler.mode = val;
+ asyc->set.scaler = true;
+ }
+ } else
+ if (property == disp->underscan_property) {
+ if (asyc->scaler.underscan.mode != val) {
+ asyc->scaler.underscan.mode = val;
+ asyc->set.scaler = true;
+ }
+ } else
+ if (property == disp->underscan_hborder_property) {
+ if (asyc->scaler.underscan.hborder != val) {
+ asyc->scaler.underscan.hborder = val;
+ asyc->set.scaler = true;
+ }
+ } else
+ if (property == disp->underscan_vborder_property) {
+ if (asyc->scaler.underscan.vborder != val) {
+ asyc->scaler.underscan.vborder = val;
+ asyc->set.scaler = true;
+ }
+ } else
+ if (property == disp->dithering_mode) {
+ if (asyc->dither.mode != val) {
+ asyc->dither.mode = val;
+ asyc->set.dither = true;
+ }
+ } else
+ if (property == disp->dithering_depth) {
+ if (asyc->dither.mode != val) {
+ asyc->dither.depth = val;
+ asyc->set.dither = true;
+ }
+ } else
+ if (property == disp->vibrant_hue_property) {
+ if (asyc->procamp.vibrant_hue != val) {
+ asyc->procamp.vibrant_hue = val;
+ asyc->set.procamp = true;
+ }
+ } else
+ if (property == disp->color_vibrance_property) {
+ if (asyc->procamp.color_vibrance != val) {
+ asyc->procamp.color_vibrance = val;
+ asyc->set.procamp = true;
+ }
+ } else {
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+void
+nouveau_conn_atomic_destroy_state(struct drm_connector *connector,
+ struct drm_connector_state *state)
+{
+ struct nouveau_conn_atom *asyc = nouveau_conn_atom(state);
+ __drm_atomic_helper_connector_destroy_state(&asyc->state);
+ kfree(asyc);
+}
+
+struct drm_connector_state *
+nouveau_conn_atomic_duplicate_state(struct drm_connector *connector)
+{
+ struct nouveau_conn_atom *armc = nouveau_conn_atom(connector->state);
+ struct nouveau_conn_atom *asyc;
+ if (!(asyc = kmalloc(sizeof(*asyc), GFP_KERNEL)))
+ return NULL;
+ __drm_atomic_helper_connector_duplicate_state(connector, &asyc->state);
+ asyc->dither = armc->dither;
+ asyc->scaler = armc->scaler;
+ asyc->procamp = armc->procamp;
+ asyc->set.mask = 0;
+ return &asyc->state;
+}
+
+void
+nouveau_conn_reset(struct drm_connector *connector)
+{
+ struct nouveau_conn_atom *asyc;
+
+ if (WARN_ON(!(asyc = kzalloc(sizeof(*asyc), GFP_KERNEL))))
+ return;
+
+ if (connector->state)
+ __drm_atomic_helper_connector_destroy_state(connector->state);
+ __drm_atomic_helper_connector_reset(connector, &asyc->state);
+ asyc->dither.mode = DITHERING_MODE_AUTO;
+ asyc->dither.depth = DITHERING_DEPTH_AUTO;
+ asyc->scaler.mode = DRM_MODE_SCALE_NONE;
+ asyc->scaler.underscan.mode = UNDERSCAN_OFF;
+ asyc->procamp.color_vibrance = 150;
+ asyc->procamp.vibrant_hue = 90;
+
+ if (nouveau_display(connector->dev)->disp.oclass < NV50_DISP) {
+ switch (connector->connector_type) {
+ case DRM_MODE_CONNECTOR_LVDS:
+ /* See note in nouveau_conn_atomic_set_property(). */
+ asyc->scaler.mode = DRM_MODE_SCALE_FULLSCREEN;
+ break;
+ default:
+ break;
+ }
+ }
+}
+
+void
+nouveau_conn_attach_properties(struct drm_connector *connector)
+{
+ struct drm_device *dev = connector->dev;
+ struct nouveau_conn_atom *armc = nouveau_conn_atom(connector->state);
+ struct nouveau_display *disp = nouveau_display(dev);
+
+ /* Init DVI-I specific properties. */
+ if (connector->connector_type == DRM_MODE_CONNECTOR_DVII)
+ drm_object_attach_property(&connector->base, dev->mode_config.
+ dvi_i_subconnector_property, 0);
+
+ /* Add overscan compensation options to digital outputs. */
+ if (disp->underscan_property &&
+ (connector->connector_type == DRM_MODE_CONNECTOR_DVID ||
+ connector->connector_type == DRM_MODE_CONNECTOR_DVII ||
+ connector->connector_type == DRM_MODE_CONNECTOR_HDMIA ||
+ connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort)) {
+ drm_object_attach_property(&connector->base,
+ disp->underscan_property,
+ UNDERSCAN_OFF);
+ drm_object_attach_property(&connector->base,
+ disp->underscan_hborder_property, 0);
+ drm_object_attach_property(&connector->base,
+ disp->underscan_vborder_property, 0);
+ }
+
+ /* Add hue and saturation options. */
+ if (disp->vibrant_hue_property)
+ drm_object_attach_property(&connector->base,
+ disp->vibrant_hue_property,
+ armc->procamp.vibrant_hue);
+ if (disp->color_vibrance_property)
+ drm_object_attach_property(&connector->base,
+ disp->color_vibrance_property,
+ armc->procamp.color_vibrance);
+
+ /* Scaling mode property. */
+ switch (connector->connector_type) {
+ case DRM_MODE_CONNECTOR_TV:
+ break;
+ case DRM_MODE_CONNECTOR_VGA:
+ if (disp->disp.oclass < NV50_DISP)
+ break; /* Can only scale on DFPs. */
+ /* Fall-through. */
+ default:
+ drm_object_attach_property(&connector->base, dev->mode_config.
+ scaling_mode_property,
+ armc->scaler.mode);
+ break;
+ }
+
+ /* Dithering properties. */
+ switch (connector->connector_type) {
+ case DRM_MODE_CONNECTOR_TV:
+ case DRM_MODE_CONNECTOR_VGA:
+ break;
+ default:
+ if (disp->dithering_mode) {
+ drm_object_attach_property(&connector->base,
+ disp->dithering_mode,
+ armc->dither.mode);
+ }
+ if (disp->dithering_depth) {
+ drm_object_attach_property(&connector->base,
+ disp->dithering_depth,
+ armc->dither.depth);
+ }
+ break;
+ }
+}
+
MODULE_PARM_DESC(tv_disable, "Disable TV-out detection");
int nouveau_tv_disable = 0;
module_param_named(tv_disable, nouveau_tv_disable, int, 0400);
@@ -151,7 +447,9 @@ nouveau_connector_ddc_detect(struct drm_connector *connector)
if (nv_encoder->dcb->type == DCB_OUTPUT_DP) {
int ret = nouveau_dp_detect(nv_encoder);
- if (ret == 0)
+ if (ret == NOUVEAU_DP_MST)
+ return NULL;
+ if (ret == NOUVEAU_DP_SST)
break;
} else
if ((vga_switcheroo_handler_flags() &
@@ -465,199 +763,39 @@ static int
nouveau_connector_set_property(struct drm_connector *connector,
struct drm_property *property, uint64_t value)
{
- struct nouveau_display *disp = nouveau_display(connector->dev);
+ struct nouveau_conn_atom *asyc = nouveau_conn_atom(connector->state);
struct nouveau_connector *nv_connector = nouveau_connector(connector);
struct nouveau_encoder *nv_encoder = nv_connector->detected_encoder;
struct drm_encoder *encoder = to_drm_encoder(nv_encoder);
- struct drm_device *dev = connector->dev;
- struct nouveau_crtc *nv_crtc;
int ret;
- nv_crtc = NULL;
- if (connector->encoder && connector->encoder->crtc)
- nv_crtc = nouveau_crtc(connector->encoder->crtc);
-
- /* Scaling mode */
- if (property == dev->mode_config.scaling_mode_property) {
- bool modeset = false;
-
- switch (value) {
- case DRM_MODE_SCALE_NONE:
- /* We allow 'None' for EDID modes, even on a fixed
- * panel (some exist with support for lower refresh
- * rates, which people might want to use for power
- * saving purposes).
- *
- * Non-EDID modes will force the use of GPU scaling
- * to the native mode regardless of this setting.
- */
- switch (nv_connector->type) {
- case DCB_CONNECTOR_LVDS:
- case DCB_CONNECTOR_LVDS_SPWG:
- case DCB_CONNECTOR_eDP:
- /* ... except prior to G80, where the code
- * doesn't support such things.
- */
- if (disp->disp.oclass < NV50_DISP)
- return -EINVAL;
- break;
- default:
- break;
- }
- break;
- case DRM_MODE_SCALE_FULLSCREEN:
- case DRM_MODE_SCALE_CENTER:
- case DRM_MODE_SCALE_ASPECT:
- break;
- default:
- return -EINVAL;
- }
-
- /* Changing between GPU and panel scaling requires a full
- * modeset
- */
- if ((nv_connector->scaling_mode == DRM_MODE_SCALE_NONE) ||
- (value == DRM_MODE_SCALE_NONE))
- modeset = true;
- nv_connector->scaling_mode = value;
-
- if (!nv_crtc)
- return 0;
-
- if (modeset || !nv_crtc->set_scale) {
- ret = drm_crtc_helper_set_mode(&nv_crtc->base,
- &nv_crtc->base.mode,
- nv_crtc->base.x,
- nv_crtc->base.y, NULL);
- if (!ret)
- return -EINVAL;
- } else {
- ret = nv_crtc->set_scale(nv_crtc, true);
- if (ret)
- return ret;
- }
-
- return 0;
- }
-
- /* Underscan */
- if (property == disp->underscan_property) {
- if (nv_connector->underscan != value) {
- nv_connector->underscan = value;
- if (!nv_crtc || !nv_crtc->set_scale)
- return 0;
-
- return nv_crtc->set_scale(nv_crtc, true);
- }
-
- return 0;
- }
-
- if (property == disp->underscan_hborder_property) {
- if (nv_connector->underscan_hborder != value) {
- nv_connector->underscan_hborder = value;
- if (!nv_crtc || !nv_crtc->set_scale)
- return 0;
-
- return nv_crtc->set_scale(nv_crtc, true);
- }
-
- return 0;
- }
-
- if (property == disp->underscan_vborder_property) {
- if (nv_connector->underscan_vborder != value) {
- nv_connector->underscan_vborder = value;
- if (!nv_crtc || !nv_crtc->set_scale)
- return 0;
-
- return nv_crtc->set_scale(nv_crtc, true);
- }
-
- return 0;
- }
+ if (connector->dev->mode_config.funcs->atomic_commit)
+ return drm_atomic_helper_connector_set_property(connector, property, value);
- /* Dithering */
- if (property == disp->dithering_mode) {
- nv_connector->dithering_mode = value;
- if (!nv_crtc || !nv_crtc->set_dither)
- return 0;
-
- return nv_crtc->set_dither(nv_crtc, true);
- }
-
- if (property == disp->dithering_depth) {
- nv_connector->dithering_depth = value;
- if (!nv_crtc || !nv_crtc->set_dither)
- return 0;
-
- return nv_crtc->set_dither(nv_crtc, true);
- }
-
- if (nv_crtc && nv_crtc->set_color_vibrance) {
- /* Hue */
- if (property == disp->vibrant_hue_property) {
- nv_crtc->vibrant_hue = value - 90;
- return nv_crtc->set_color_vibrance(nv_crtc, true);
- }
- /* Saturation */
- if (property == disp->color_vibrance_property) {
- nv_crtc->color_vibrance = value - 100;
- return nv_crtc->set_color_vibrance(nv_crtc, true);
- }
+ ret = connector->funcs->atomic_set_property(&nv_connector->base,
+ &asyc->state,
+ property, value);
+ if (ret) {
+ if (nv_encoder && nv_encoder->dcb->type == DCB_OUTPUT_TV)
+ return get_slave_funcs(encoder)->set_property(
+ encoder, connector, property, value);
+ return ret;
}
- if (nv_encoder && nv_encoder->dcb->type == DCB_OUTPUT_TV)
- return get_slave_funcs(encoder)->set_property(
- encoder, connector, property, value);
-
- return -EINVAL;
-}
-
-static struct drm_display_mode *
-nouveau_connector_native_mode(struct drm_connector *connector)
-{
- const struct drm_connector_helper_funcs *helper = connector->helper_private;
- struct nouveau_drm *drm = nouveau_drm(connector->dev);
- struct nouveau_connector *nv_connector = nouveau_connector(connector);
- struct drm_device *dev = connector->dev;
- struct drm_display_mode *mode, *largest = NULL;
- int high_w = 0, high_h = 0, high_v = 0;
+ nv_connector->scaling_mode = asyc->scaler.mode;
+ nv_connector->dithering_mode = asyc->dither.mode;
- list_for_each_entry(mode, &nv_connector->base.probed_modes, head) {
- mode->vrefresh = drm_mode_vrefresh(mode);
- if (helper->mode_valid(connector, mode) != MODE_OK ||
- (mode->flags & DRM_MODE_FLAG_INTERLACE))
- continue;
-
- /* Use preferred mode if there is one.. */
- if (mode->type & DRM_MODE_TYPE_PREFERRED) {
- NV_DEBUG(drm, "native mode from preferred\n");
- return drm_mode_duplicate(dev, mode);
- }
-
- /* Otherwise, take the resolution with the largest width, then
- * height, then vertical refresh
- */
- if (mode->hdisplay < high_w)
- continue;
-
- if (mode->hdisplay == high_w && mode->vdisplay < high_h)
- continue;
-
- if (mode->hdisplay == high_w && mode->vdisplay == high_h &&
- mode->vrefresh < high_v)
- continue;
-
- high_w = mode->hdisplay;
- high_h = mode->vdisplay;
- high_v = mode->vrefresh;
- largest = mode;
+ if (connector->encoder && connector->encoder->crtc) {
+ ret = drm_crtc_helper_set_mode(connector->encoder->crtc,
+ &connector->encoder->crtc->mode,
+ connector->encoder->crtc->x,
+ connector->encoder->crtc->y,
+ NULL);
+ if (!ret)
+ return -EINVAL;
}
- NV_DEBUG(drm, "native mode from largest: %dx%d@%d\n",
- high_w, high_h, high_v);
- return largest ? drm_mode_duplicate(dev, largest) : NULL;
+ return 0;
}
struct moderec {
@@ -805,8 +943,7 @@ nouveau_connector_get_modes(struct drm_connector *connector)
* the list of modes.
*/
if (!nv_connector->native_mode)
- nv_connector->native_mode =
- nouveau_connector_native_mode(connector);
+ nv_connector->native_mode = nouveau_conn_native_mode(connector);
if (ret == 0 && nv_connector->native_mode) {
struct drm_display_mode *mode;
@@ -934,56 +1071,42 @@ nouveau_connector_helper_funcs = {
.best_encoder = nouveau_connector_best_encoder,
};
+static int
+nouveau_connector_dpms(struct drm_connector *connector, int mode)
+{
+ if (connector->dev->mode_config.funcs->atomic_commit)
+ return drm_atomic_helper_connector_dpms(connector, mode);
+ return drm_helper_connector_dpms(connector, mode);
+}
+
static const struct drm_connector_funcs
nouveau_connector_funcs = {
- .dpms = drm_helper_connector_dpms,
+ .dpms = nouveau_connector_dpms,
+ .reset = nouveau_conn_reset,
.detect = nouveau_connector_detect,
- .destroy = nouveau_connector_destroy,
+ .force = nouveau_connector_force,
.fill_modes = drm_helper_probe_single_connector_modes,
.set_property = nouveau_connector_set_property,
- .force = nouveau_connector_force
+ .destroy = nouveau_connector_destroy,
+ .atomic_duplicate_state = nouveau_conn_atomic_duplicate_state,
+ .atomic_destroy_state = nouveau_conn_atomic_destroy_state,
+ .atomic_set_property = nouveau_conn_atomic_set_property,
+ .atomic_get_property = nouveau_conn_atomic_get_property,
};
static const struct drm_connector_funcs
nouveau_connector_funcs_lvds = {
- .dpms = drm_helper_connector_dpms,
+ .dpms = nouveau_connector_dpms,
+ .reset = nouveau_conn_reset,
.detect = nouveau_connector_detect_lvds,
- .destroy = nouveau_connector_destroy,
+ .force = nouveau_connector_force,
.fill_modes = drm_helper_probe_single_connector_modes,
.set_property = nouveau_connector_set_property,
- .force = nouveau_connector_force
-};
-
-static int
-nouveau_connector_dp_dpms(struct drm_connector *connector, int mode)
-{
- struct nouveau_encoder *nv_encoder = NULL;
-
- if (connector->encoder)
- nv_encoder = nouveau_encoder(connector->encoder);
- if (nv_encoder && nv_encoder->dcb &&
- nv_encoder->dcb->type == DCB_OUTPUT_DP) {
- if (mode == DRM_MODE_DPMS_ON) {
- u8 data = DP_SET_POWER_D0;
- nvkm_wraux(nv_encoder->aux, DP_SET_POWER, &data, 1);
- usleep_range(1000, 2000);
- } else {
- u8 data = DP_SET_POWER_D3;
- nvkm_wraux(nv_encoder->aux, DP_SET_POWER, &data, 1);
- }
- }
-
- return drm_helper_connector_dpms(connector, mode);
-}
-
-static const struct drm_connector_funcs
-nouveau_connector_funcs_dp = {
- .dpms = nouveau_connector_dp_dpms,
- .detect = nouveau_connector_detect,
.destroy = nouveau_connector_destroy,
- .fill_modes = drm_helper_probe_single_connector_modes,
- .set_property = nouveau_connector_set_property,
- .force = nouveau_connector_force
+ .atomic_duplicate_state = nouveau_conn_atomic_duplicate_state,
+ .atomic_destroy_state = nouveau_conn_atomic_destroy_state,
+ .atomic_set_property = nouveau_conn_atomic_set_property,
+ .atomic_get_property = nouveau_conn_atomic_get_property,
};
static int
@@ -995,19 +1118,20 @@ nouveau_connector_hotplug(struct nvif_notify *notify)
struct nouveau_drm *drm = nouveau_drm(connector->dev);
const struct nvif_notify_conn_rep_v0 *rep = notify->data;
const char *name = connector->name;
+ struct nouveau_encoder *nv_encoder;
if (rep->mask & NVIF_NOTIFY_CONN_V0_IRQ) {
+ NV_DEBUG(drm, "service %s\n", name);
+ if ((nv_encoder = find_encoder(connector, DCB_OUTPUT_DP)))
+ nv50_mstm_service(nv_encoder->dp.mstm);
} else {
bool plugged = (rep->mask != NVIF_NOTIFY_CONN_V0_UNPLUG);
NV_DEBUG(drm, "%splugged %s\n", plugged ? "" : "un", name);
-
- mutex_lock(&drm->dev->mode_config.mutex);
- if (plugged)
- drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON);
- else
- drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF);
- mutex_unlock(&drm->dev->mode_config.mutex);
+ if ((nv_encoder = find_encoder(connector, DCB_OUTPUT_DP))) {
+ if (!plugged)
+ nv50_mstm_remove(nv_encoder->dp.mstm);
+ }
drm_helper_hpd_irq_event(connector->dev);
}
@@ -1188,7 +1312,7 @@ nouveau_connector_create(struct drm_device *dev, int index)
return ERR_PTR(ret);
}
- funcs = &nouveau_connector_funcs_dp;
+ funcs = &nouveau_connector_funcs;
break;
default:
funcs = &nouveau_connector_funcs;
@@ -1202,38 +1326,10 @@ nouveau_connector_create(struct drm_device *dev, int index)
drm_connector_init(dev, connector, funcs, type);
drm_connector_helper_add(connector, &nouveau_connector_helper_funcs);
- /* Init DVI-I specific properties */
- if (nv_connector->type == DCB_CONNECTOR_DVI_I)
- drm_object_attach_property(&connector->base, dev->mode_config.dvi_i_subconnector_property, 0);
+ connector->funcs->reset(connector);
+ nouveau_conn_attach_properties(connector);
- /* Add overscan compensation options to digital outputs */
- if (disp->underscan_property &&
- (type == DRM_MODE_CONNECTOR_DVID ||
- type == DRM_MODE_CONNECTOR_DVII ||
- type == DRM_MODE_CONNECTOR_HDMIA ||
- type == DRM_MODE_CONNECTOR_DisplayPort)) {
- drm_object_attach_property(&connector->base,
- disp->underscan_property,
- UNDERSCAN_OFF);
- drm_object_attach_property(&connector->base,
- disp->underscan_hborder_property,
- 0);
- drm_object_attach_property(&connector->base,
- disp->underscan_vborder_property,
- 0);
- }
-
- /* Add hue and saturation options */
- if (disp->vibrant_hue_property)
- drm_object_attach_property(&connector->base,
- disp->vibrant_hue_property,
- 90);
- if (disp->color_vibrance_property)
- drm_object_attach_property(&connector->base,
- disp->color_vibrance_property,
- 150);
-
- /* default scaling mode */
+ /* Default scaling mode */
switch (nv_connector->type) {
case DCB_CONNECTOR_LVDS:
case DCB_CONNECTOR_LVDS_SPWG:
@@ -1250,23 +1346,6 @@ nouveau_connector_create(struct drm_device *dev, int index)
break;
}
- /* scaling mode property */
- switch (nv_connector->type) {
- case DCB_CONNECTOR_TV_0:
- case DCB_CONNECTOR_TV_1:
- case DCB_CONNECTOR_TV_3:
- break;
- case DCB_CONNECTOR_VGA:
- if (disp->disp.oclass < NV50_DISP)
- break; /* can only scale on DFPs */
- /* fall-through */
- default:
- drm_object_attach_property(&connector->base, dev->mode_config.
- scaling_mode_property,
- nv_connector->scaling_mode);
- break;
- }
-
/* dithering properties */
switch (nv_connector->type) {
case DCB_CONNECTOR_TV_0:
@@ -1275,20 +1354,7 @@ nouveau_connector_create(struct drm_device *dev, int index)
case DCB_CONNECTOR_VGA:
break;
default:
- if (disp->dithering_mode) {
- nv_connector->dithering_mode = DITHERING_MODE_AUTO;
- drm_object_attach_property(&connector->base,
- disp->dithering_mode,
- nv_connector->
- dithering_mode);
- }
- if (disp->dithering_depth) {
- nv_connector->dithering_depth = DITHERING_DEPTH_AUTO;
- drm_object_attach_property(&connector->base,
- disp->dithering_depth,
- nv_connector->
- dithering_depth);
- }
+ nv_connector->dithering_mode = DITHERING_MODE_AUTO;
break;
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.h b/drivers/gpu/drm/nouveau/nouveau_connector.h
index 7446ee66ea04..096983c42a1f 100644
--- a/drivers/gpu/drm/nouveau/nouveau_connector.h
+++ b/drivers/gpu/drm/nouveau/nouveau_connector.h
@@ -35,30 +35,6 @@
struct nvkm_i2c_port;
-enum nouveau_underscan_type {
- UNDERSCAN_OFF,
- UNDERSCAN_ON,
- UNDERSCAN_AUTO,
-};
-
-/* the enum values specifically defined here match nv50/nvd0 hw values, and
- * the code relies on this
- */
-enum nouveau_dithering_mode {
- DITHERING_MODE_OFF = 0x00,
- DITHERING_MODE_ON = 0x01,
- DITHERING_MODE_DYNAMIC2X2 = 0x10 | DITHERING_MODE_ON,
- DITHERING_MODE_STATIC2X2 = 0x18 | DITHERING_MODE_ON,
- DITHERING_MODE_TEMPORAL = 0x20 | DITHERING_MODE_ON,
- DITHERING_MODE_AUTO
-};
-
-enum nouveau_dithering_depth {
- DITHERING_DEPTH_6BPC = 0x00,
- DITHERING_DEPTH_8BPC = 0x02,
- DITHERING_DEPTH_AUTO
-};
-
struct nouveau_connector {
struct drm_connector base;
enum dcb_connector_type type;
@@ -70,12 +46,7 @@ struct nouveau_connector {
struct drm_dp_aux aux;
int dithering_mode;
- int dithering_depth;
int scaling_mode;
- bool scaling_full;
- enum nouveau_underscan_type underscan;
- u32 underscan_hborder;
- u32 underscan_vborder;
struct nouveau_encoder *detected_encoder;
struct edid *edid;
@@ -109,5 +80,74 @@ nouveau_connector_create(struct drm_device *, int index);
extern int nouveau_tv_disable;
extern int nouveau_ignorelid;
extern int nouveau_duallink;
+extern int nouveau_hdmimhz;
+
+#include <drm/drm_crtc.h>
+#define nouveau_conn_atom(p) \
+ container_of((p), struct nouveau_conn_atom, state)
+
+struct nouveau_conn_atom {
+ struct drm_connector_state state;
+
+ struct {
+ /* The enum values specifically defined here match nv50/gf119
+ * hw values, and the code relies on this.
+ */
+ enum {
+ DITHERING_MODE_OFF = 0x00,
+ DITHERING_MODE_ON = 0x01,
+ DITHERING_MODE_DYNAMIC2X2 = 0x10 | DITHERING_MODE_ON,
+ DITHERING_MODE_STATIC2X2 = 0x18 | DITHERING_MODE_ON,
+ DITHERING_MODE_TEMPORAL = 0x20 | DITHERING_MODE_ON,
+ DITHERING_MODE_AUTO
+ } mode;
+ enum {
+ DITHERING_DEPTH_6BPC = 0x00,
+ DITHERING_DEPTH_8BPC = 0x02,
+ DITHERING_DEPTH_AUTO
+ } depth;
+ } dither;
+
+ struct {
+ int mode; /* DRM_MODE_SCALE_* */
+ struct {
+ enum {
+ UNDERSCAN_OFF,
+ UNDERSCAN_ON,
+ UNDERSCAN_AUTO,
+ } mode;
+ u32 hborder;
+ u32 vborder;
+ } underscan;
+ bool full;
+ } scaler;
+
+ struct {
+ int color_vibrance;
+ int vibrant_hue;
+ } procamp;
+
+ union {
+ struct {
+ bool dither:1;
+ bool scaler:1;
+ bool procamp:1;
+ };
+ u8 mask;
+ } set;
+};
+void nouveau_conn_attach_properties(struct drm_connector *);
+void nouveau_conn_reset(struct drm_connector *);
+struct drm_connector_state *
+nouveau_conn_atomic_duplicate_state(struct drm_connector *);
+void nouveau_conn_atomic_destroy_state(struct drm_connector *,
+ struct drm_connector_state *);
+int nouveau_conn_atomic_set_property(struct drm_connector *,
+ struct drm_connector_state *,
+ struct drm_property *, u64);
+int nouveau_conn_atomic_get_property(struct drm_connector *,
+ const struct drm_connector_state *,
+ struct drm_property *, u64 *);
+struct drm_display_mode *nouveau_conn_native_mode(struct drm_connector *);
#endif /* __NOUVEAU_CONNECTOR_H__ */
diff --git a/drivers/gpu/drm/nouveau/nouveau_crtc.h b/drivers/gpu/drm/nouveau/nouveau_crtc.h
index 863f10b8d818..050fcf30a0d2 100644
--- a/drivers/gpu/drm/nouveau/nouveau_crtc.h
+++ b/drivers/gpu/drm/nouveau/nouveau_crtc.h
@@ -38,8 +38,6 @@ struct nouveau_crtc {
uint32_t dpms_saved_fp_control;
uint32_t fp_users;
int saturation;
- int color_vibrance;
- int vibrant_hue;
int sharpness;
int last_dpms;
@@ -54,7 +52,6 @@ struct nouveau_crtc {
struct {
struct nouveau_bo *nvbo;
- bool visible;
uint32_t offset;
void (*set_offset)(struct nouveau_crtc *, uint32_t offset);
void (*set_pos)(struct nouveau_crtc *, int x, int y);
@@ -70,10 +67,6 @@ struct nouveau_crtc {
int depth;
} lut;
- int (*set_dither)(struct nouveau_crtc *crtc, bool update);
- int (*set_scale)(struct nouveau_crtc *crtc, bool update);
- int (*set_color_vibrance)(struct nouveau_crtc *crtc, bool update);
-
void (*save)(struct drm_crtc *crtc);
void (*restore)(struct drm_crtc *crtc);
};
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c
index afbf557b23d4..cef08da1da4e 100644
--- a/drivers/gpu/drm/nouveau/nouveau_display.c
+++ b/drivers/gpu/drm/nouveau/nouveau_display.c
@@ -24,7 +24,10 @@
*
*/
+#include <acpi/video.h>
#include <drm/drmP.h>
+#include <drm/drm_atomic.h>
+#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc_helper.h>
#include <nvif/class.h>
@@ -92,7 +95,7 @@ calc(int blanks, int blanke, int total, int line)
return line;
}
-int
+static int
nouveau_display_scanoutpos_head(struct drm_crtc *crtc, int *vpos, int *hpos,
ktime_t *stime, ktime_t *etime)
{
@@ -158,9 +161,13 @@ nouveau_display_vblstamp(struct drm_device *dev, unsigned int pipe,
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
if (nouveau_crtc(crtc)->index == pipe) {
+ struct drm_display_mode *mode;
+ if (dev->mode_config.funcs->atomic_commit)
+ mode = &crtc->state->adjusted_mode;
+ else
+ mode = &crtc->hwmode;
return drm_calc_vbltimestamp_from_scanoutpos(dev,
- pipe, max_error, time, flags,
- &crtc->hwmode);
+ pipe, max_error, time, flags, mode);
}
}
@@ -217,10 +224,6 @@ static void
nouveau_user_framebuffer_destroy(struct drm_framebuffer *drm_fb)
{
struct nouveau_framebuffer *fb = nouveau_framebuffer(drm_fb);
- struct nouveau_display *disp = nouveau_display(drm_fb->dev);
-
- if (disp->fb_dtor)
- disp->fb_dtor(drm_fb);
if (fb->nvbo)
drm_gem_object_unreference_unlocked(&fb->nvbo->gem);
@@ -245,57 +248,45 @@ static const struct drm_framebuffer_funcs nouveau_framebuffer_funcs = {
};
int
-nouveau_framebuffer_init(struct drm_device *dev,
- struct nouveau_framebuffer *nv_fb,
- const struct drm_mode_fb_cmd2 *mode_cmd,
- struct nouveau_bo *nvbo)
+nouveau_framebuffer_new(struct drm_device *dev,
+ const struct drm_mode_fb_cmd2 *mode_cmd,
+ struct nouveau_bo *nvbo,
+ struct nouveau_framebuffer **pfb)
{
- struct nouveau_display *disp = nouveau_display(dev);
- struct drm_framebuffer *fb = &nv_fb->base;
+ struct nouveau_framebuffer *fb;
int ret;
- drm_helper_mode_fill_fb_struct(fb, mode_cmd);
- nv_fb->nvbo = nvbo;
-
- ret = drm_framebuffer_init(dev, fb, &nouveau_framebuffer_funcs);
- if (ret)
- return ret;
+ if (!(fb = *pfb = kzalloc(sizeof(*fb), GFP_KERNEL)))
+ return -ENOMEM;
- if (disp->fb_ctor) {
- ret = disp->fb_ctor(fb);
- if (ret)
- disp->fb_dtor(fb);
- }
+ drm_helper_mode_fill_fb_struct(&fb->base, mode_cmd);
+ fb->nvbo = nvbo;
+ ret = drm_framebuffer_init(dev, &fb->base, &nouveau_framebuffer_funcs);
+ if (ret)
+ kfree(fb);
return ret;
}
-static struct drm_framebuffer *
+struct drm_framebuffer *
nouveau_user_framebuffer_create(struct drm_device *dev,
struct drm_file *file_priv,
const struct drm_mode_fb_cmd2 *mode_cmd)
{
- struct nouveau_framebuffer *nouveau_fb;
+ struct nouveau_framebuffer *fb;
+ struct nouveau_bo *nvbo;
struct drm_gem_object *gem;
- int ret = -ENOMEM;
+ int ret;
gem = drm_gem_object_lookup(file_priv, mode_cmd->handles[0]);
if (!gem)
return ERR_PTR(-ENOENT);
+ nvbo = nouveau_gem_object(gem);
- nouveau_fb = kzalloc(sizeof(struct nouveau_framebuffer), GFP_KERNEL);
- if (!nouveau_fb)
- goto err_unref;
-
- ret = nouveau_framebuffer_init(dev, nouveau_fb, mode_cmd, nouveau_gem_object(gem));
- if (ret)
- goto err;
+ ret = nouveau_framebuffer_new(dev, mode_cmd, nvbo, &fb);
+ if (ret == 0)
+ return &fb->base;
- return &nouveau_fb->base;
-
-err:
- kfree(nouveau_fb);
-err_unref:
drm_gem_object_unreference_unlocked(gem);
return ERR_PTR(ret);
}
@@ -358,6 +349,55 @@ static struct nouveau_drm_prop_enum_list dither_depth[] = {
} \
} while(0)
+static void
+nouveau_display_hpd_work(struct work_struct *work)
+{
+ struct nouveau_drm *drm = container_of(work, typeof(*drm), hpd_work);
+
+ pm_runtime_get_sync(drm->dev->dev);
+
+ drm_helper_hpd_irq_event(drm->dev);
+
+ pm_runtime_mark_last_busy(drm->dev->dev);
+ pm_runtime_put_sync(drm->dev->dev);
+}
+
+#ifdef CONFIG_ACPI
+
+/*
+ * Hans de Goede: This define belongs in acpi/video.h, I've submitted a patch
+ * to the acpi subsys to move it there from drivers/acpi/acpi_video.c .
+ * This should be dropped once that is merged.
+ */
+#ifndef ACPI_VIDEO_NOTIFY_PROBE
+#define ACPI_VIDEO_NOTIFY_PROBE 0x81
+#endif
+
+static int
+nouveau_display_acpi_ntfy(struct notifier_block *nb, unsigned long val,
+ void *data)
+{
+ struct nouveau_drm *drm = container_of(nb, typeof(*drm), acpi_nb);
+ struct acpi_bus_event *info = data;
+
+ if (!strcmp(info->device_class, ACPI_VIDEO_CLASS)) {
+ if (info->type == ACPI_VIDEO_NOTIFY_PROBE) {
+ /*
+ * This may be the only indication we receive of a
+ * connector hotplug on a runtime suspended GPU,
+ * schedule hpd_work to check.
+ */
+ schedule_work(&drm->hpd_work);
+
+ /* acpi-video should not generate keypresses for this */
+ return NOTIFY_BAD;
+ }
+ }
+
+ return NOTIFY_DONE;
+}
+#endif
+
int
nouveau_display_init(struct drm_device *dev)
{
@@ -385,16 +425,19 @@ nouveau_display_init(struct drm_device *dev)
}
void
-nouveau_display_fini(struct drm_device *dev)
+nouveau_display_fini(struct drm_device *dev, bool suspend)
{
struct nouveau_display *disp = nouveau_display(dev);
struct nouveau_drm *drm = nouveau_drm(dev);
struct drm_connector *connector;
- int head;
+ struct drm_crtc *crtc;
+
+ if (!suspend)
+ drm_crtc_force_disable_all(dev);
/* Make sure that drm and hw vblank irqs get properly disabled. */
- for (head = 0; head < dev->mode_config.num_crtc; head++)
- drm_vblank_off(dev, head);
+ drm_for_each_crtc(crtc, dev)
+ drm_crtc_vblank_off(crtc);
/* disable flip completion events */
nvif_notify_put(&drm->flip);
@@ -495,7 +538,7 @@ nouveau_display_create(struct drm_device *dev)
if (nouveau_modeset != 2 && drm->vbios.dcb.entries) {
static const u16 oclass[] = {
- GP104_DISP,
+ GP102_DISP,
GP100_DISP,
GM200_DISP,
GM107_DISP,
@@ -530,6 +573,8 @@ nouveau_display_create(struct drm_device *dev)
if (ret)
goto disp_create_err;
+ drm_mode_config_reset(dev);
+
if (dev->mode_config.num_crtc) {
ret = nouveau_display_vblank_init(dev);
if (ret)
@@ -537,6 +582,12 @@ nouveau_display_create(struct drm_device *dev)
}
nouveau_backlight_init(dev);
+ INIT_WORK(&drm->hpd_work, nouveau_display_hpd_work);
+#ifdef CONFIG_ACPI
+ drm->acpi_nb.notifier_call = nouveau_display_acpi_ntfy;
+ register_acpi_notifier(&drm->acpi_nb);
+#endif
+
return 0;
vblank_err:
@@ -552,11 +603,13 @@ nouveau_display_destroy(struct drm_device *dev)
{
struct nouveau_display *disp = nouveau_display(dev);
+#ifdef CONFIG_ACPI
+ unregister_acpi_notifier(&nouveau_drm(dev)->acpi_nb);
+#endif
nouveau_backlight_exit(dev);
nouveau_display_vblank_fini(dev);
drm_kms_helper_poll_fini(dev);
- drm_crtc_force_disable_all(dev);
drm_mode_config_cleanup(dev);
if (disp->dtor)
@@ -568,12 +621,138 @@ nouveau_display_destroy(struct drm_device *dev)
kfree(disp);
}
+static int
+nouveau_atomic_disable_connector(struct drm_atomic_state *state,
+ struct drm_connector *connector)
+{
+ struct drm_connector_state *connector_state;
+ struct drm_crtc *crtc;
+ struct drm_crtc_state *crtc_state;
+ struct drm_plane_state *plane_state;
+ struct drm_plane *plane;
+ int ret;
+
+ if (!(crtc = connector->state->crtc))
+ return 0;
+
+ connector_state = drm_atomic_get_connector_state(state, connector);
+ if (IS_ERR(connector_state))
+ return PTR_ERR(connector_state);
+
+ ret = drm_atomic_set_crtc_for_connector(connector_state, NULL);
+ if (ret)
+ return ret;
+
+ crtc_state = drm_atomic_get_crtc_state(state, crtc);
+ if (IS_ERR(crtc_state))
+ return PTR_ERR(crtc_state);
+
+ ret = drm_atomic_set_mode_for_crtc(crtc_state, NULL);
+ if (ret)
+ return ret;
+
+ crtc_state->active = false;
+
+ drm_for_each_plane_mask(plane, connector->dev, crtc_state->plane_mask) {
+ plane_state = drm_atomic_get_plane_state(state, plane);
+ if (IS_ERR(plane_state))
+ return PTR_ERR(plane_state);
+
+ ret = drm_atomic_set_crtc_for_plane(plane_state, NULL);
+ if (ret)
+ return ret;
+
+ drm_atomic_set_fb_for_plane(plane_state, NULL);
+ }
+
+ return 0;
+}
+
+static int
+nouveau_atomic_disable(struct drm_device *dev,
+ struct drm_modeset_acquire_ctx *ctx)
+{
+ struct drm_atomic_state *state;
+ struct drm_connector *connector;
+ int ret;
+
+ state = drm_atomic_state_alloc(dev);
+ if (!state)
+ return -ENOMEM;
+
+ state->acquire_ctx = ctx;
+
+ drm_for_each_connector(connector, dev) {
+ ret = nouveau_atomic_disable_connector(state, connector);
+ if (ret)
+ break;
+ }
+
+ if (ret == 0)
+ ret = drm_atomic_commit(state);
+ drm_atomic_state_put(state);
+ return ret;
+}
+
+static struct drm_atomic_state *
+nouveau_atomic_suspend(struct drm_device *dev)
+{
+ struct drm_modeset_acquire_ctx ctx;
+ struct drm_atomic_state *state;
+ int ret;
+
+ drm_modeset_acquire_init(&ctx, 0);
+
+retry:
+ ret = drm_modeset_lock_all_ctx(dev, &ctx);
+ if (ret < 0) {
+ state = ERR_PTR(ret);
+ goto unlock;
+ }
+
+ state = drm_atomic_helper_duplicate_state(dev, &ctx);
+ if (IS_ERR(state))
+ goto unlock;
+
+ ret = nouveau_atomic_disable(dev, &ctx);
+ if (ret < 0) {
+ drm_atomic_state_put(state);
+ state = ERR_PTR(ret);
+ goto unlock;
+ }
+
+unlock:
+ if (PTR_ERR(state) == -EDEADLK) {
+ drm_modeset_backoff(&ctx);
+ goto retry;
+ }
+
+ drm_modeset_drop_locks(&ctx);
+ drm_modeset_acquire_fini(&ctx);
+ return state;
+}
+
int
nouveau_display_suspend(struct drm_device *dev, bool runtime)
{
+ struct nouveau_display *disp = nouveau_display(dev);
struct drm_crtc *crtc;
- nouveau_display_fini(dev);
+ if (dev->mode_config.funcs->atomic_commit) {
+ if (!runtime) {
+ disp->suspend = nouveau_atomic_suspend(dev);
+ if (IS_ERR(disp->suspend)) {
+ int ret = PTR_ERR(disp->suspend);
+ disp->suspend = NULL;
+ return ret;
+ }
+ }
+
+ nouveau_display_fini(dev, true);
+ return 0;
+ }
+
+ nouveau_display_fini(dev, true);
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
struct nouveau_framebuffer *nouveau_fb;
@@ -600,9 +779,19 @@ nouveau_display_suspend(struct drm_device *dev, bool runtime)
void
nouveau_display_resume(struct drm_device *dev, bool runtime)
{
+ struct nouveau_display *disp = nouveau_display(dev);
struct nouveau_drm *drm = nouveau_drm(dev);
struct drm_crtc *crtc;
- int ret, head;
+ int ret;
+
+ if (dev->mode_config.funcs->atomic_commit) {
+ nouveau_display_init(dev);
+ if (disp->suspend) {
+ drm_atomic_helper_resume(dev, disp->suspend);
+ disp->suspend = NULL;
+ }
+ return;
+ }
/* re-pin fb/cursors */
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
@@ -647,10 +836,6 @@ nouveau_display_resume(struct drm_device *dev, bool runtime)
drm_helper_resume_force_mode(dev);
- /* Make sure that drm and hw vblank irqs get resumed if needed. */
- for (head = 0; head < dev->mode_config.num_crtc; head++)
- drm_vblank_on(dev, head);
-
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
@@ -692,10 +877,7 @@ nouveau_page_flip_emit(struct nouveau_channel *chan,
if (ret)
goto fail;
- if (drm->device.info.family < NV_DEVICE_INFO_V0_FERMI)
- BEGIN_NV04(chan, NvSubSw, NV_SW_PAGE_FLIP, 1);
- else
- BEGIN_NVC0(chan, FermiSw, NV_SW_PAGE_FLIP, 1);
+ BEGIN_NV04(chan, NvSubSw, NV_SW_PAGE_FLIP, 1);
OUT_RING (chan, 0x00000000);
FIRE_RING (chan);
@@ -724,6 +906,8 @@ nouveau_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb,
struct nouveau_channel *chan;
struct nouveau_cli *cli;
struct nouveau_fence *fence;
+ struct nv04_display *dispnv04 = nv04_display(dev);
+ int head = nouveau_crtc(crtc)->index;
int ret;
chan = drm->channel;
@@ -770,32 +954,23 @@ nouveau_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb,
drm_crtc_vblank_get(crtc);
/* Emit a page flip */
- if (drm->device.info.family >= NV_DEVICE_INFO_V0_TESLA) {
- ret = nv50_display_flip_next(crtc, fb, chan, swap_interval);
+ if (swap_interval) {
+ ret = RING_SPACE(chan, 8);
if (ret)
goto fail_unreserve;
- } else {
- struct nv04_display *dispnv04 = nv04_display(dev);
- int head = nouveau_crtc(crtc)->index;
-
- if (swap_interval) {
- ret = RING_SPACE(chan, 8);
- if (ret)
- goto fail_unreserve;
-
- BEGIN_NV04(chan, NvSubImageBlit, 0x012c, 1);
- OUT_RING (chan, 0);
- BEGIN_NV04(chan, NvSubImageBlit, 0x0134, 1);
- OUT_RING (chan, head);
- BEGIN_NV04(chan, NvSubImageBlit, 0x0100, 1);
- OUT_RING (chan, 0);
- BEGIN_NV04(chan, NvSubImageBlit, 0x0130, 1);
- OUT_RING (chan, 0);
- }
- nouveau_bo_ref(new_bo, &dispnv04->image[head]);
+ BEGIN_NV04(chan, NvSubImageBlit, 0x012c, 1);
+ OUT_RING (chan, 0);
+ BEGIN_NV04(chan, NvSubImageBlit, 0x0134, 1);
+ OUT_RING (chan, head);
+ BEGIN_NV04(chan, NvSubImageBlit, 0x0100, 1);
+ OUT_RING (chan, 0);
+ BEGIN_NV04(chan, NvSubImageBlit, 0x0130, 1);
+ OUT_RING (chan, 0);
}
+ nouveau_bo_ref(new_bo, &dispnv04->image[head]);
+
ret = nouveau_page_flip_emit(chan, old_bo, new_bo, s, &fence);
if (ret)
goto fail_unreserve;
@@ -843,16 +1018,8 @@ nouveau_finish_page_flip(struct nouveau_channel *chan,
s = list_first_entry(&fctx->flip, struct nouveau_page_flip_state, head);
if (s->event) {
- if (drm->device.info.family < NV_DEVICE_INFO_V0_TESLA) {
- drm_crtc_arm_vblank_event(s->crtc, s->event);
- } else {
- drm_crtc_send_vblank_event(s->crtc, s->event);
-
- /* Give up ownership of vblank for page-flipped crtc */
- drm_crtc_vblank_put(s->crtc);
- }
- }
- else {
+ drm_crtc_arm_vblank_event(s->crtc, s->event);
+ } else {
/* Give up ownership of vblank for page-flipped crtc */
drm_crtc_vblank_put(s->crtc);
}
@@ -874,12 +1041,10 @@ nouveau_flip_complete(struct nvif_notify *notify)
struct nouveau_page_flip_state state;
if (!nouveau_finish_page_flip(chan, &state)) {
- if (drm->device.info.family < NV_DEVICE_INFO_V0_TESLA) {
- nv_set_crtc_base(drm->dev, drm_crtc_index(state.crtc),
- state.offset + state.crtc->y *
- state.pitch + state.crtc->x *
- state.bpp / 8);
- }
+ nv_set_crtc_base(drm->dev, drm_crtc_index(state.crtc),
+ state.offset + state.crtc->y *
+ state.pitch + state.crtc->x *
+ state.bpp / 8);
}
return NVIF_NOTIFY_KEEP;
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.h b/drivers/gpu/drm/nouveau/nouveau_display.h
index 0420ee861ea4..330fe0fc5c11 100644
--- a/drivers/gpu/drm/nouveau/nouveau_display.h
+++ b/drivers/gpu/drm/nouveau/nouveau_display.h
@@ -22,8 +22,9 @@ nouveau_framebuffer(struct drm_framebuffer *fb)
return container_of(fb, struct nouveau_framebuffer, base);
}
-int nouveau_framebuffer_init(struct drm_device *, struct nouveau_framebuffer *,
- const struct drm_mode_fb_cmd2 *, struct nouveau_bo *);
+int nouveau_framebuffer_new(struct drm_device *,
+ const struct drm_mode_fb_cmd2 *,
+ struct nouveau_bo *, struct nouveau_framebuffer **);
struct nouveau_page_flip_state {
struct list_head head;
@@ -39,9 +40,6 @@ struct nouveau_display {
int (*init)(struct drm_device *);
void (*fini)(struct drm_device *);
- int (*fb_ctor)(struct drm_framebuffer *);
- void (*fb_dtor)(struct drm_framebuffer *);
-
struct nvif_object disp;
struct drm_property *dithering_mode;
@@ -52,6 +50,8 @@ struct nouveau_display {
/* not really hue and saturation: */
struct drm_property *vibrant_hue_property;
struct drm_property *color_vibrance_property;
+
+ struct drm_atomic_state *suspend;
};
static inline struct nouveau_display *
@@ -63,7 +63,7 @@ nouveau_display(struct drm_device *dev)
int nouveau_display_create(struct drm_device *dev);
void nouveau_display_destroy(struct drm_device *dev);
int nouveau_display_init(struct drm_device *dev);
-void nouveau_display_fini(struct drm_device *dev);
+void nouveau_display_fini(struct drm_device *dev, bool suspend);
int nouveau_display_suspend(struct drm_device *dev, bool runtime);
void nouveau_display_resume(struct drm_device *dev, bool runtime);
int nouveau_display_vblank_enable(struct drm_device *, unsigned int);
@@ -103,4 +103,7 @@ nouveau_backlight_exit(struct drm_device *dev) {
}
#endif
+struct drm_framebuffer *
+nouveau_user_framebuffer_create(struct drm_device *, struct drm_file *,
+ const struct drm_mode_fb_cmd2 *);
#endif
diff --git a/drivers/gpu/drm/nouveau/nouveau_dp.c b/drivers/gpu/drm/nouveau/nouveau_dp.c
index 87d52d36f4fc..0d052e1660f8 100644
--- a/drivers/gpu/drm/nouveau/nouveau_dp.c
+++ b/drivers/gpu/drm/nouveau/nouveau_dp.c
@@ -30,6 +30,13 @@
#include "nouveau_encoder.h"
#include "nouveau_crtc.h"
+#include <nvif/class.h>
+#include <nvif/cl5070.h>
+
+MODULE_PARM_DESC(mst, "Enable DisplayPort multi-stream (default: enabled)");
+static int nouveau_mst = 1;
+module_param_named(mst, nouveau_mst, int, 0400);
+
static void
nouveau_dp_probe_oui(struct drm_device *dev, struct nvkm_i2c_aux *aux, u8 *dpcd)
{
@@ -55,14 +62,14 @@ nouveau_dp_detect(struct nouveau_encoder *nv_encoder)
struct drm_device *dev = nv_encoder->base.base.dev;
struct nouveau_drm *drm = nouveau_drm(dev);
struct nvkm_i2c_aux *aux;
- u8 *dpcd = nv_encoder->dp.dpcd;
+ u8 dpcd[8];
int ret;
aux = nv_encoder->aux;
if (!aux)
return -ENODEV;
- ret = nvkm_rdaux(aux, DP_DPCD_REV, dpcd, 8);
+ ret = nvkm_rdaux(aux, DP_DPCD_REV, dpcd, sizeof(dpcd));
if (ret)
return ret;
@@ -84,5 +91,11 @@ nouveau_dp_detect(struct nouveau_encoder *nv_encoder)
nv_encoder->dp.link_nr, nv_encoder->dp.link_bw);
nouveau_dp_probe_oui(dev, aux, dpcd);
- return 0;
+
+ ret = nv50_mstm_detect(nv_encoder->dp.mstm, dpcd, nouveau_mst);
+ if (ret == 1)
+ return NOUVEAU_DP_MST;
+ if (ret == 0)
+ return NOUVEAU_DP_SST;
+ return ret;
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c
index 3100fd88a015..064a925ed69a 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_drm.c
@@ -47,6 +47,7 @@
#include "nouveau_ttm.h"
#include "nouveau_gem.h"
#include "nouveau_vga.h"
+#include "nouveau_led.h"
#include "nouveau_hwmon.h"
#include "nouveau_acpi.h"
#include "nouveau_bios.h"
@@ -475,6 +476,7 @@ nouveau_drm_load(struct drm_device *dev, unsigned long flags)
nouveau_hwmon_init(dev);
nouveau_accel_init(drm);
nouveau_fbcon_init(dev);
+ nouveau_led_init(dev);
if (nouveau_runtime_pm != 0) {
pm_runtime_use_autosuspend(dev->dev);
@@ -510,13 +512,14 @@ nouveau_drm_unload(struct drm_device *dev)
pm_runtime_forbid(dev->dev);
}
+ nouveau_led_fini(dev);
nouveau_fbcon_fini(dev);
nouveau_accel_fini(drm);
nouveau_hwmon_fini(dev);
nouveau_debugfs_fini(drm);
if (dev->mode_config.num_crtc)
- nouveau_display_fini(dev);
+ nouveau_display_fini(dev, false);
nouveau_display_destroy(dev);
nouveau_bios_takedown(dev);
@@ -561,6 +564,8 @@ nouveau_do_suspend(struct drm_device *dev, bool runtime)
struct nouveau_cli *cli;
int ret;
+ nouveau_led_suspend(dev);
+
if (dev->mode_config.num_crtc) {
NV_INFO(drm, "suspending console...\n");
nouveau_fbcon_set_suspend(dev, 1);
@@ -649,6 +654,8 @@ nouveau_do_resume(struct drm_device *dev, bool runtime)
nouveau_fbcon_set_suspend(dev, 0);
}
+ nouveau_led_resume(dev);
+
return 0;
}
@@ -692,7 +699,12 @@ nouveau_pmops_resume(struct device *dev)
return ret;
pci_set_master(pdev);
- return nouveau_do_resume(drm_dev, false);
+ ret = nouveau_do_resume(drm_dev, false);
+
+ /* Monitors may have been connected / disconnected during suspend */
+ schedule_work(&nouveau_drm(drm_dev)->hpd_work);
+
+ return ret;
}
static int
@@ -766,6 +778,10 @@ nouveau_pmops_runtime_resume(struct device *dev)
nvif_mask(&device->object, 0x088488, (1 << 25), (1 << 25));
vga_switcheroo_set_dynamic_switch(pdev, VGA_SWITCHEROO_ON);
drm_dev->switch_power_state = DRM_SWITCH_POWER_ON;
+
+ /* Monitors may have been connected / disconnected during suspend */
+ schedule_work(&nouveau_drm(drm_dev)->hpd_work);
+
return ret;
}
@@ -1030,6 +1046,7 @@ static void nouveau_display_options(void)
DRM_DEBUG_DRIVER("... modeset : %d\n", nouveau_modeset);
DRM_DEBUG_DRIVER("... runpm : %d\n", nouveau_runtime_pm);
DRM_DEBUG_DRIVER("... vram_pushbuf : %d\n", nouveau_vram_pushbuf);
+ DRM_DEBUG_DRIVER("... hdmimhz : %d\n", nouveau_hdmimhz);
}
static const struct dev_pm_ops nouveau_pm_ops = {
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h
index 822a0212cd48..9730c0ef6c6a 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drv.h
+++ b/drivers/gpu/drm/nouveau/nouveau_drv.h
@@ -37,6 +37,8 @@
* - implemented limited ABI16/NVIF interop
*/
+#include <linux/notifier.h>
+
#include <nvif/client.h>
#include <nvif/device.h>
#include <nvif/ioctl.h>
@@ -161,11 +163,18 @@ struct nouveau_drm {
struct nvbios vbios;
struct nouveau_display *display;
struct backlight_device *backlight;
+ struct work_struct hpd_work;
+#ifdef CONFIG_ACPI
+ struct notifier_block acpi_nb;
+#endif
/* power management */
struct nouveau_hwmon *hwmon;
struct nouveau_debugfs *debugfs;
+ /* led management */
+ struct nouveau_led *led;
+
/* display power reference */
bool have_disp_power_ref;
@@ -201,6 +210,10 @@ void nouveau_drm_device_remove(struct drm_device *dev);
if (unlikely(drm_debug & DRM_UT_DRIVER)) \
NV_PRINTK(info, &(drm)->client, f, ##a); \
} while(0)
+#define NV_ATOMIC(drm,f,a...) do { \
+ if (unlikely(drm_debug & DRM_UT_ATOMIC)) \
+ NV_PRINTK(info, &(drm)->client, f, ##a); \
+} while(0)
extern int nouveau_modeset;
diff --git a/drivers/gpu/drm/nouveau/nouveau_encoder.h b/drivers/gpu/drm/nouveau/nouveau_encoder.h
index ee6a6d3fc80f..198e5f27682f 100644
--- a/drivers/gpu/drm/nouveau/nouveau_encoder.h
+++ b/drivers/gpu/drm/nouveau/nouveau_encoder.h
@@ -30,6 +30,7 @@
#include <subdev/bios/dcb.h>
#include <drm/drm_encoder_slave.h>
+#include <drm/drm_dp_mst_helper.h>
#include "dispnv04/disp.h"
#define NV_DPMS_CLEARED 0x80
@@ -57,15 +58,16 @@ struct nouveau_encoder {
union {
struct {
- u8 dpcd[8];
+ struct nv50_mstm *mstm;
int link_nr;
int link_bw;
- u32 datarate;
} dp;
};
void (*enc_save)(struct drm_encoder *encoder);
void (*enc_restore)(struct drm_encoder *encoder);
+ void (*update)(struct nouveau_encoder *, u8 head,
+ struct drm_display_mode *, u8 proto, u8 depth);
};
struct nouveau_encoder *
@@ -90,9 +92,17 @@ get_slave_funcs(struct drm_encoder *enc)
}
/* nouveau_dp.c */
+enum nouveau_dp_status {
+ NOUVEAU_DP_SST,
+ NOUVEAU_DP_MST,
+};
+
int nouveau_dp_detect(struct nouveau_encoder *);
struct nouveau_connector *
nouveau_encoder_connector_get(struct nouveau_encoder *encoder);
+int nv50_mstm_detect(struct nv50_mstm *, u8 dpcd[8], int allow);
+void nv50_mstm_remove(struct nv50_mstm *);
+void nv50_mstm_service(struct nv50_mstm *);
#endif /* __NOUVEAU_ENCODER_H__ */
diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
index 9f5692726c16..2f2a3dcd4ad7 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
@@ -58,7 +58,7 @@ static void
nouveau_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
{
struct nouveau_fbdev *fbcon = info->par;
- struct nouveau_drm *drm = nouveau_drm(fbcon->dev);
+ struct nouveau_drm *drm = nouveau_drm(fbcon->helper.dev);
struct nvif_device *device = &drm->device;
int ret;
@@ -90,7 +90,7 @@ static void
nouveau_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *image)
{
struct nouveau_fbdev *fbcon = info->par;
- struct nouveau_drm *drm = nouveau_drm(fbcon->dev);
+ struct nouveau_drm *drm = nouveau_drm(fbcon->helper.dev);
struct nvif_device *device = &drm->device;
int ret;
@@ -122,7 +122,7 @@ static void
nouveau_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
{
struct nouveau_fbdev *fbcon = info->par;
- struct nouveau_drm *drm = nouveau_drm(fbcon->dev);
+ struct nouveau_drm *drm = nouveau_drm(fbcon->helper.dev);
struct nvif_device *device = &drm->device;
int ret;
@@ -154,7 +154,7 @@ static int
nouveau_fbcon_sync(struct fb_info *info)
{
struct nouveau_fbdev *fbcon = info->par;
- struct nouveau_drm *drm = nouveau_drm(fbcon->dev);
+ struct nouveau_drm *drm = nouveau_drm(fbcon->helper.dev);
struct nouveau_channel *chan = drm->channel;
int ret;
@@ -181,7 +181,7 @@ static int
nouveau_fbcon_open(struct fb_info *info, int user)
{
struct nouveau_fbdev *fbcon = info->par;
- struct nouveau_drm *drm = nouveau_drm(fbcon->dev);
+ struct nouveau_drm *drm = nouveau_drm(fbcon->helper.dev);
int ret = pm_runtime_get_sync(drm->dev->dev);
if (ret < 0 && ret != -EACCES)
return ret;
@@ -192,42 +192,30 @@ static int
nouveau_fbcon_release(struct fb_info *info, int user)
{
struct nouveau_fbdev *fbcon = info->par;
- struct nouveau_drm *drm = nouveau_drm(fbcon->dev);
+ struct nouveau_drm *drm = nouveau_drm(fbcon->helper.dev);
pm_runtime_put(drm->dev->dev);
return 0;
}
static struct fb_ops nouveau_fbcon_ops = {
.owner = THIS_MODULE,
+ DRM_FB_HELPER_DEFAULT_OPS,
.fb_open = nouveau_fbcon_open,
.fb_release = nouveau_fbcon_release,
- .fb_check_var = drm_fb_helper_check_var,
- .fb_set_par = drm_fb_helper_set_par,
.fb_fillrect = nouveau_fbcon_fillrect,
.fb_copyarea = nouveau_fbcon_copyarea,
.fb_imageblit = nouveau_fbcon_imageblit,
.fb_sync = nouveau_fbcon_sync,
- .fb_pan_display = drm_fb_helper_pan_display,
- .fb_blank = drm_fb_helper_blank,
- .fb_setcmap = drm_fb_helper_setcmap,
- .fb_debug_enter = drm_fb_helper_debug_enter,
- .fb_debug_leave = drm_fb_helper_debug_leave,
};
static struct fb_ops nouveau_fbcon_sw_ops = {
.owner = THIS_MODULE,
+ DRM_FB_HELPER_DEFAULT_OPS,
.fb_open = nouveau_fbcon_open,
.fb_release = nouveau_fbcon_release,
- .fb_check_var = drm_fb_helper_check_var,
- .fb_set_par = drm_fb_helper_set_par,
.fb_fillrect = drm_fb_helper_cfb_fillrect,
.fb_copyarea = drm_fb_helper_cfb_copyarea,
.fb_imageblit = drm_fb_helper_cfb_imageblit,
- .fb_pan_display = drm_fb_helper_pan_display,
- .fb_blank = drm_fb_helper_blank,
- .fb_setcmap = drm_fb_helper_setcmap,
- .fb_debug_enter = drm_fb_helper_debug_enter,
- .fb_debug_leave = drm_fb_helper_debug_leave,
};
void
@@ -333,16 +321,15 @@ nouveau_fbcon_create(struct drm_fb_helper *helper,
{
struct nouveau_fbdev *fbcon =
container_of(helper, struct nouveau_fbdev, helper);
- struct drm_device *dev = fbcon->dev;
+ struct drm_device *dev = fbcon->helper.dev;
struct nouveau_drm *drm = nouveau_drm(dev);
struct nvif_device *device = &drm->device;
struct fb_info *info;
- struct drm_framebuffer *fb;
- struct nouveau_framebuffer *nouveau_fb;
+ struct nouveau_framebuffer *fb;
struct nouveau_channel *chan;
struct nouveau_bo *nvbo;
struct drm_mode_fb_cmd2 mode_cmd;
- int size, ret;
+ int ret;
mode_cmd.width = sizes->surface_width;
mode_cmd.height = sizes->surface_height;
@@ -353,16 +340,17 @@ nouveau_fbcon_create(struct drm_fb_helper *helper,
mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp,
sizes->surface_depth);
- size = mode_cmd.pitches[0] * mode_cmd.height;
- size = roundup(size, PAGE_SIZE);
-
- ret = nouveau_gem_new(dev, size, 0, NOUVEAU_GEM_DOMAIN_VRAM,
- 0, 0x0000, &nvbo);
+ ret = nouveau_gem_new(dev, mode_cmd.pitches[0] * mode_cmd.height,
+ 0, NOUVEAU_GEM_DOMAIN_VRAM, 0, 0x0000, &nvbo);
if (ret) {
NV_ERROR(drm, "failed to allocate framebuffer\n");
goto out;
}
+ ret = nouveau_framebuffer_new(dev, &mode_cmd, nvbo, &fb);
+ if (ret)
+ goto out_unref;
+
ret = nouveau_bo_pin(nvbo, TTM_PL_FLAG_VRAM, false);
if (ret) {
NV_ERROR(drm, "failed to pin fb: %d\n", ret);
@@ -377,8 +365,7 @@ nouveau_fbcon_create(struct drm_fb_helper *helper,
chan = nouveau_nofbaccel ? NULL : drm->channel;
if (chan && device->info.family >= NV_DEVICE_INFO_V0_TESLA) {
- ret = nouveau_bo_vma_add(nvbo, drm->client.vm,
- &fbcon->nouveau_fb.vma);
+ ret = nouveau_bo_vma_add(nvbo, drm->client.vm, &fb->vma);
if (ret) {
NV_ERROR(drm, "failed to map fb into chan: %d\n", ret);
chan = NULL;
@@ -394,13 +381,8 @@ nouveau_fbcon_create(struct drm_fb_helper *helper,
info->par = fbcon;
- nouveau_framebuffer_init(dev, &fbcon->nouveau_fb, &mode_cmd, nvbo);
-
- nouveau_fb = &fbcon->nouveau_fb;
- fb = &nouveau_fb->base;
-
/* setup helper */
- fbcon->helper.fb = fb;
+ fbcon->helper.fb = &fb->base;
strcpy(info->fix.id, "nouveaufb");
if (!chan)
@@ -411,14 +393,14 @@ nouveau_fbcon_create(struct drm_fb_helper *helper,
FBINFO_HWACCEL_IMAGEBLIT;
info->flags |= FBINFO_CAN_FORCE_OUTPUT;
info->fbops = &nouveau_fbcon_sw_ops;
- info->fix.smem_start = nvbo->bo.mem.bus.base +
- nvbo->bo.mem.bus.offset;
- info->fix.smem_len = size;
+ info->fix.smem_start = fb->nvbo->bo.mem.bus.base +
+ fb->nvbo->bo.mem.bus.offset;
+ info->fix.smem_len = fb->nvbo->bo.mem.num_pages << PAGE_SHIFT;
- info->screen_base = nvbo_kmap_obj_iovirtual(nouveau_fb->nvbo);
- info->screen_size = size;
+ info->screen_base = nvbo_kmap_obj_iovirtual(fb->nvbo);
+ info->screen_size = fb->nvbo->bo.mem.num_pages << PAGE_SHIFT;
- drm_fb_helper_fill_fix(info, fb->pitches[0], fb->depth);
+ drm_fb_helper_fill_fix(info, fb->base.pitches[0], fb->base.depth);
drm_fb_helper_fill_var(info, &fbcon->helper, sizes->fb_width, sizes->fb_height);
/* Use default scratch pixmap (info->pixmap.flags = FB_PIXMAP_SYSTEM) */
@@ -429,20 +411,19 @@ nouveau_fbcon_create(struct drm_fb_helper *helper,
/* To allow resizeing without swapping buffers */
NV_INFO(drm, "allocated %dx%d fb: 0x%llx, bo %p\n",
- nouveau_fb->base.width, nouveau_fb->base.height,
- nvbo->bo.offset, nvbo);
+ fb->base.width, fb->base.height, fb->nvbo->bo.offset, nvbo);
vga_switcheroo_client_fb_set(dev->pdev, info);
return 0;
out_unlock:
if (chan)
- nouveau_bo_vma_del(nvbo, &fbcon->nouveau_fb.vma);
- nouveau_bo_unmap(nvbo);
+ nouveau_bo_vma_del(fb->nvbo, &fb->vma);
+ nouveau_bo_unmap(fb->nvbo);
out_unpin:
- nouveau_bo_unpin(nvbo);
+ nouveau_bo_unpin(fb->nvbo);
out_unref:
- nouveau_bo_ref(NULL, &nvbo);
+ nouveau_bo_ref(NULL, &fb->nvbo);
out:
return ret;
}
@@ -458,28 +439,26 @@ nouveau_fbcon_output_poll_changed(struct drm_device *dev)
static int
nouveau_fbcon_destroy(struct drm_device *dev, struct nouveau_fbdev *fbcon)
{
- struct nouveau_framebuffer *nouveau_fb = &fbcon->nouveau_fb;
+ struct nouveau_framebuffer *nouveau_fb = nouveau_framebuffer(fbcon->helper.fb);
drm_fb_helper_unregister_fbi(&fbcon->helper);
drm_fb_helper_release_fbi(&fbcon->helper);
+ drm_fb_helper_fini(&fbcon->helper);
if (nouveau_fb->nvbo) {
- nouveau_bo_unmap(nouveau_fb->nvbo);
nouveau_bo_vma_del(nouveau_fb->nvbo, &nouveau_fb->vma);
+ nouveau_bo_unmap(nouveau_fb->nvbo);
nouveau_bo_unpin(nouveau_fb->nvbo);
- drm_gem_object_unreference_unlocked(&nouveau_fb->nvbo->gem);
- nouveau_fb->nvbo = NULL;
+ drm_framebuffer_unreference(&nouveau_fb->base);
}
- drm_fb_helper_fini(&fbcon->helper);
- drm_framebuffer_unregister_private(&nouveau_fb->base);
- drm_framebuffer_cleanup(&nouveau_fb->base);
+
return 0;
}
void nouveau_fbcon_gpu_lockup(struct fb_info *info)
{
struct nouveau_fbdev *fbcon = info->par;
- struct nouveau_drm *drm = nouveau_drm(fbcon->dev);
+ struct nouveau_drm *drm = nouveau_drm(fbcon->helper.dev);
NV_ERROR(drm, "GPU lockup - switching to software fbcon\n");
info->flags |= FBINFO_HWACCEL_DISABLED;
@@ -522,7 +501,6 @@ nouveau_fbcon_init(struct drm_device *dev)
if (!fbcon)
return -ENOMEM;
- fbcon->dev = dev;
drm->fbcon = fbcon;
drm_fb_helper_prepare(dev, &fbcon->helper, &nouveau_fbcon_helper_funcs);
@@ -545,7 +523,8 @@ nouveau_fbcon_init(struct drm_device *dev)
preferred_bpp = 32;
/* disable all the possible outputs/crtcs before entering KMS mode */
- drm_helper_disable_unused_functions(dev);
+ if (!dev->mode_config.funcs->atomic_commit)
+ drm_helper_disable_unused_functions(dev);
ret = drm_fb_helper_initial_config(&fbcon->helper, preferred_bpp);
if (ret)
diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.h b/drivers/gpu/drm/nouveau/nouveau_fbcon.h
index ca77ad001978..e2bca729721e 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fbcon.h
+++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.h
@@ -33,8 +33,6 @@
struct nouveau_fbdev {
struct drm_fb_helper helper;
- struct nouveau_framebuffer nouveau_fb;
- struct drm_device *dev;
unsigned int saved_flags;
struct nvif_object surf2d;
struct nvif_object clip;
diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.c b/drivers/gpu/drm/nouveau/nouveau_fence.c
index 4bb9ab892ae1..f2f348f0160c 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fence.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fence.c
@@ -28,7 +28,7 @@
#include <linux/ktime.h>
#include <linux/hrtimer.h>
-#include <trace/events/fence.h>
+#include <trace/events/dma_fence.h>
#include <nvif/cl826e.h>
#include <nvif/notify.h>
@@ -38,11 +38,11 @@
#include "nouveau_dma.h"
#include "nouveau_fence.h"
-static const struct fence_ops nouveau_fence_ops_uevent;
-static const struct fence_ops nouveau_fence_ops_legacy;
+static const struct dma_fence_ops nouveau_fence_ops_uevent;
+static const struct dma_fence_ops nouveau_fence_ops_legacy;
static inline struct nouveau_fence *
-from_fence(struct fence *fence)
+from_fence(struct dma_fence *fence)
{
return container_of(fence, struct nouveau_fence, base);
}
@@ -58,23 +58,23 @@ nouveau_fence_signal(struct nouveau_fence *fence)
{
int drop = 0;
- fence_signal_locked(&fence->base);
+ dma_fence_signal_locked(&fence->base);
list_del(&fence->head);
rcu_assign_pointer(fence->channel, NULL);
- if (test_bit(FENCE_FLAG_USER_BITS, &fence->base.flags)) {
+ if (test_bit(DMA_FENCE_FLAG_USER_BITS, &fence->base.flags)) {
struct nouveau_fence_chan *fctx = nouveau_fctx(fence);
if (!--fctx->notify_ref)
drop = 1;
}
- fence_put(&fence->base);
+ dma_fence_put(&fence->base);
return drop;
}
static struct nouveau_fence *
-nouveau_local_fence(struct fence *fence, struct nouveau_drm *drm) {
+nouveau_local_fence(struct dma_fence *fence, struct nouveau_drm *drm) {
struct nouveau_fence_priv *priv = (void*)drm->fence;
if (fence->ops != &nouveau_fence_ops_legacy &&
@@ -201,7 +201,7 @@ nouveau_fence_context_new(struct nouveau_channel *chan, struct nouveau_fence_cha
struct nouveau_fence_work {
struct work_struct work;
- struct fence_cb cb;
+ struct dma_fence_cb cb;
void (*func)(void *);
void *data;
};
@@ -214,7 +214,7 @@ nouveau_fence_work_handler(struct work_struct *kwork)
kfree(work);
}
-static void nouveau_fence_work_cb(struct fence *fence, struct fence_cb *cb)
+static void nouveau_fence_work_cb(struct dma_fence *fence, struct dma_fence_cb *cb)
{
struct nouveau_fence_work *work = container_of(cb, typeof(*work), cb);
@@ -222,12 +222,12 @@ static void nouveau_fence_work_cb(struct fence *fence, struct fence_cb *cb)
}
void
-nouveau_fence_work(struct fence *fence,
+nouveau_fence_work(struct dma_fence *fence,
void (*func)(void *), void *data)
{
struct nouveau_fence_work *work;
- if (fence_is_signaled(fence))
+ if (dma_fence_is_signaled(fence))
goto err;
work = kmalloc(sizeof(*work), GFP_KERNEL);
@@ -245,7 +245,7 @@ nouveau_fence_work(struct fence *fence,
work->func = func;
work->data = data;
- if (fence_add_callback(fence, &work->cb, nouveau_fence_work_cb) < 0)
+ if (dma_fence_add_callback(fence, &work->cb, nouveau_fence_work_cb) < 0)
goto err_free;
return;
@@ -266,17 +266,17 @@ nouveau_fence_emit(struct nouveau_fence *fence, struct nouveau_channel *chan)
fence->timeout = jiffies + (15 * HZ);
if (priv->uevent)
- fence_init(&fence->base, &nouveau_fence_ops_uevent,
- &fctx->lock, fctx->context, ++fctx->sequence);
+ dma_fence_init(&fence->base, &nouveau_fence_ops_uevent,
+ &fctx->lock, fctx->context, ++fctx->sequence);
else
- fence_init(&fence->base, &nouveau_fence_ops_legacy,
- &fctx->lock, fctx->context, ++fctx->sequence);
+ dma_fence_init(&fence->base, &nouveau_fence_ops_legacy,
+ &fctx->lock, fctx->context, ++fctx->sequence);
kref_get(&fctx->fence_ref);
- trace_fence_emit(&fence->base);
+ trace_dma_fence_emit(&fence->base);
ret = fctx->emit(fence);
if (!ret) {
- fence_get(&fence->base);
+ dma_fence_get(&fence->base);
spin_lock_irq(&fctx->lock);
if (nouveau_fence_update(chan, fctx))
@@ -298,7 +298,7 @@ nouveau_fence_done(struct nouveau_fence *fence)
struct nouveau_channel *chan;
unsigned long flags;
- if (test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->base.flags))
+ if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->base.flags))
return true;
spin_lock_irqsave(&fctx->lock, flags);
@@ -307,11 +307,11 @@ nouveau_fence_done(struct nouveau_fence *fence)
nvif_notify_put(&fctx->notify);
spin_unlock_irqrestore(&fctx->lock, flags);
}
- return fence_is_signaled(&fence->base);
+ return dma_fence_is_signaled(&fence->base);
}
static long
-nouveau_fence_wait_legacy(struct fence *f, bool intr, long wait)
+nouveau_fence_wait_legacy(struct dma_fence *f, bool intr, long wait)
{
struct nouveau_fence *fence = from_fence(f);
unsigned long sleep_time = NSEC_PER_MSEC / 1000;
@@ -378,7 +378,7 @@ nouveau_fence_wait(struct nouveau_fence *fence, bool lazy, bool intr)
if (!lazy)
return nouveau_fence_wait_busy(fence, intr);
- ret = fence_wait_timeout(&fence->base, intr, 15 * HZ);
+ ret = dma_fence_wait_timeout(&fence->base, intr, 15 * HZ);
if (ret < 0)
return ret;
else if (!ret)
@@ -391,7 +391,7 @@ int
nouveau_fence_sync(struct nouveau_bo *nvbo, struct nouveau_channel *chan, bool exclusive, bool intr)
{
struct nouveau_fence_chan *fctx = chan->fence;
- struct fence *fence;
+ struct dma_fence *fence;
struct reservation_object *resv = nvbo->bo.resv;
struct reservation_object_list *fobj;
struct nouveau_fence *f;
@@ -421,7 +421,7 @@ nouveau_fence_sync(struct nouveau_bo *nvbo, struct nouveau_channel *chan, bool e
}
if (must_wait)
- ret = fence_wait(fence, intr);
+ ret = dma_fence_wait(fence, intr);
return ret;
}
@@ -446,7 +446,7 @@ nouveau_fence_sync(struct nouveau_bo *nvbo, struct nouveau_channel *chan, bool e
}
if (must_wait)
- ret = fence_wait(fence, intr);
+ ret = dma_fence_wait(fence, intr);
}
return ret;
@@ -456,7 +456,7 @@ void
nouveau_fence_unref(struct nouveau_fence **pfence)
{
if (*pfence)
- fence_put(&(*pfence)->base);
+ dma_fence_put(&(*pfence)->base);
*pfence = NULL;
}
@@ -484,12 +484,12 @@ nouveau_fence_new(struct nouveau_channel *chan, bool sysmem,
return ret;
}
-static const char *nouveau_fence_get_get_driver_name(struct fence *fence)
+static const char *nouveau_fence_get_get_driver_name(struct dma_fence *fence)
{
return "nouveau";
}
-static const char *nouveau_fence_get_timeline_name(struct fence *f)
+static const char *nouveau_fence_get_timeline_name(struct dma_fence *f)
{
struct nouveau_fence *fence = from_fence(f);
struct nouveau_fence_chan *fctx = nouveau_fctx(fence);
@@ -503,7 +503,7 @@ static const char *nouveau_fence_get_timeline_name(struct fence *f)
* result. The drm node should still be there, so we can derive the index from
* the fence context.
*/
-static bool nouveau_fence_is_signaled(struct fence *f)
+static bool nouveau_fence_is_signaled(struct dma_fence *f)
{
struct nouveau_fence *fence = from_fence(f);
struct nouveau_fence_chan *fctx = nouveau_fctx(fence);
@@ -519,7 +519,7 @@ static bool nouveau_fence_is_signaled(struct fence *f)
return ret;
}
-static bool nouveau_fence_no_signaling(struct fence *f)
+static bool nouveau_fence_no_signaling(struct dma_fence *f)
{
struct nouveau_fence *fence = from_fence(f);
@@ -530,30 +530,30 @@ static bool nouveau_fence_no_signaling(struct fence *f)
WARN_ON(atomic_read(&fence->base.refcount.refcount) <= 1);
/*
- * This needs uevents to work correctly, but fence_add_callback relies on
+ * This needs uevents to work correctly, but dma_fence_add_callback relies on
* being able to enable signaling. It will still get signaled eventually,
* just not right away.
*/
if (nouveau_fence_is_signaled(f)) {
list_del(&fence->head);
- fence_put(&fence->base);
+ dma_fence_put(&fence->base);
return false;
}
return true;
}
-static void nouveau_fence_release(struct fence *f)
+static void nouveau_fence_release(struct dma_fence *f)
{
struct nouveau_fence *fence = from_fence(f);
struct nouveau_fence_chan *fctx = nouveau_fctx(fence);
kref_put(&fctx->fence_ref, nouveau_fence_context_put);
- fence_free(&fence->base);
+ dma_fence_free(&fence->base);
}
-static const struct fence_ops nouveau_fence_ops_legacy = {
+static const struct dma_fence_ops nouveau_fence_ops_legacy = {
.get_driver_name = nouveau_fence_get_get_driver_name,
.get_timeline_name = nouveau_fence_get_timeline_name,
.enable_signaling = nouveau_fence_no_signaling,
@@ -562,7 +562,7 @@ static const struct fence_ops nouveau_fence_ops_legacy = {
.release = nouveau_fence_release
};
-static bool nouveau_fence_enable_signaling(struct fence *f)
+static bool nouveau_fence_enable_signaling(struct dma_fence *f)
{
struct nouveau_fence *fence = from_fence(f);
struct nouveau_fence_chan *fctx = nouveau_fctx(fence);
@@ -573,18 +573,18 @@ static bool nouveau_fence_enable_signaling(struct fence *f)
ret = nouveau_fence_no_signaling(f);
if (ret)
- set_bit(FENCE_FLAG_USER_BITS, &fence->base.flags);
+ set_bit(DMA_FENCE_FLAG_USER_BITS, &fence->base.flags);
else if (!--fctx->notify_ref)
nvif_notify_put(&fctx->notify);
return ret;
}
-static const struct fence_ops nouveau_fence_ops_uevent = {
+static const struct dma_fence_ops nouveau_fence_ops_uevent = {
.get_driver_name = nouveau_fence_get_get_driver_name,
.get_timeline_name = nouveau_fence_get_timeline_name,
.enable_signaling = nouveau_fence_enable_signaling,
.signaled = nouveau_fence_is_signaled,
- .wait = fence_default_wait,
- .release = NULL
+ .wait = dma_fence_default_wait,
+ .release = nouveau_fence_release
};
diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.h b/drivers/gpu/drm/nouveau/nouveau_fence.h
index 64c4ce7115ad..ccdce1b4eec4 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fence.h
+++ b/drivers/gpu/drm/nouveau/nouveau_fence.h
@@ -1,14 +1,14 @@
#ifndef __NOUVEAU_FENCE_H__
#define __NOUVEAU_FENCE_H__
-#include <linux/fence.h>
+#include <linux/dma-fence.h>
#include <nvif/notify.h>
struct nouveau_drm;
struct nouveau_bo;
struct nouveau_fence {
- struct fence base;
+ struct dma_fence base;
struct list_head head;
@@ -24,7 +24,7 @@ void nouveau_fence_unref(struct nouveau_fence **);
int nouveau_fence_emit(struct nouveau_fence *, struct nouveau_channel *);
bool nouveau_fence_done(struct nouveau_fence *);
-void nouveau_fence_work(struct fence *, void (*)(void *), void *);
+void nouveau_fence_work(struct dma_fence *, void (*)(void *), void *);
int nouveau_fence_wait(struct nouveau_fence *, bool lazy, bool intr);
int nouveau_fence_sync(struct nouveau_bo *, struct nouveau_channel *, bool exclusive, bool intr);
@@ -92,7 +92,6 @@ struct nv84_fence_chan {
struct nouveau_fence_chan base;
struct nvkm_vma vma;
struct nvkm_vma vma_gart;
- struct nvkm_vma dispc_vma[4];
};
struct nv84_fence_priv {
@@ -102,7 +101,6 @@ struct nv84_fence_priv {
u32 *suspend;
};
-u64 nv84_fence_crtc(struct nouveau_channel *, int);
int nv84_fence_context_new(struct nouveau_channel *);
#endif
diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c
index 72e2399bce39..201b52b750dd 100644
--- a/drivers/gpu/drm/nouveau/nouveau_gem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_gem.c
@@ -119,7 +119,7 @@ nouveau_gem_object_unmap(struct nouveau_bo *nvbo, struct nvkm_vma *vma)
const bool mapped = nvbo->bo.mem.mem_type != TTM_PL_SYSTEM;
struct reservation_object *resv = nvbo->bo.resv;
struct reservation_object_list *fobj;
- struct fence *fence = NULL;
+ struct dma_fence *fence = NULL;
fobj = reservation_object_get_list(resv);
@@ -369,7 +369,7 @@ validate_init(struct nouveau_channel *chan, struct drm_file *file_priv,
{
struct nouveau_cli *cli = nouveau_cli(file_priv);
int trycnt = 0;
- int ret, i;
+ int ret = -EINVAL, i;
struct nouveau_bo *res_bo = NULL;
LIST_HEAD(gart_list);
LIST_HEAD(vram_list);
@@ -861,6 +861,7 @@ nouveau_gem_ioctl_cpu_prep(struct drm_device *dev, void *data,
struct nouveau_bo *nvbo;
bool no_wait = !!(req->flags & NOUVEAU_GEM_CPU_PREP_NOWAIT);
bool write = !!(req->flags & NOUVEAU_GEM_CPU_PREP_WRITE);
+ long lret;
int ret;
gem = drm_gem_object_lookup(file_priv, req->handle);
@@ -868,19 +869,15 @@ nouveau_gem_ioctl_cpu_prep(struct drm_device *dev, void *data,
return -ENOENT;
nvbo = nouveau_gem_object(gem);
- if (no_wait)
- ret = reservation_object_test_signaled_rcu(nvbo->bo.resv, write) ? 0 : -EBUSY;
- else {
- long lret;
+ lret = reservation_object_wait_timeout_rcu(nvbo->bo.resv, write, true,
+ no_wait ? 0 : 30 * HZ);
+ if (!lret)
+ ret = -EBUSY;
+ else if (lret > 0)
+ ret = 0;
+ else
+ ret = lret;
- lret = reservation_object_wait_timeout_rcu(nvbo->bo.resv, write, true, 30 * HZ);
- if (!lret)
- ret = -EBUSY;
- else if (lret > 0)
- ret = 0;
- else
- ret = lret;
- }
nouveau_bo_sync_for_cpu(nvbo);
drm_gem_object_unreference_unlocked(gem);
diff --git a/drivers/gpu/drm/nouveau/nouveau_led.c b/drivers/gpu/drm/nouveau/nouveau_led.c
new file mode 100644
index 000000000000..3e2f1b6cd4df
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_led.c
@@ -0,0 +1,139 @@
+/*
+ * Copyright (C) 2016 Martin Peres
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+/*
+ * Authors:
+ * Martin Peres <martin.peres@free.fr>
+ */
+
+#include <linux/leds.h>
+
+#include "nouveau_led.h"
+#include <nvkm/subdev/gpio.h>
+
+static enum led_brightness
+nouveau_led_get_brightness(struct led_classdev *led)
+{
+ struct drm_device *drm_dev = container_of(led, struct nouveau_led, led)->dev;
+ struct nouveau_drm *drm = nouveau_drm(drm_dev);
+ struct nvif_object *device = &drm->device.object;
+ u32 div, duty;
+
+ div = nvif_rd32(device, 0x61c880) & 0x00ffffff;
+ duty = nvif_rd32(device, 0x61c884) & 0x00ffffff;
+
+ if (div > 0)
+ return duty * LED_FULL / div;
+ else
+ return 0;
+}
+
+static void
+nouveau_led_set_brightness(struct led_classdev *led, enum led_brightness value)
+{
+ struct drm_device *drm_dev = container_of(led, struct nouveau_led, led)->dev;
+ struct nouveau_drm *drm = nouveau_drm(drm_dev);
+ struct nvif_object *device = &drm->device.object;
+
+ u32 input_clk = 27e6; /* PDISPLAY.SOR[1].PWM is connected to the crystal */
+ u32 freq = 100; /* this is what nvidia uses and it should be good-enough */
+ u32 div, duty;
+
+ div = input_clk / freq;
+ duty = value * div / LED_FULL;
+
+ /* for now, this is safe to directly poke those registers because:
+ * - A: nvidia never puts the logo led to any other PWM controler
+ * than PDISPLAY.SOR[1].PWM.
+ * - B: nouveau does not touch these registers anywhere else
+ */
+ nvif_wr32(device, 0x61c880, div);
+ nvif_wr32(device, 0x61c884, 0xc0000000 | duty);
+}
+
+
+int
+nouveau_led_init(struct drm_device *dev)
+{
+ struct nouveau_drm *drm = nouveau_drm(dev);
+ struct nvkm_gpio *gpio = nvxx_gpio(&drm->device);
+ struct dcb_gpio_func logo_led;
+ int ret;
+
+ if (!gpio)
+ return 0;
+
+ /* check that there is a GPIO controlling the logo LED */
+ if (nvkm_gpio_find(gpio, 0, DCB_GPIO_LOGO_LED_PWM, 0xff, &logo_led))
+ return 0;
+
+ drm->led = kzalloc(sizeof(*drm->led), GFP_KERNEL);
+ if (!drm->led)
+ return -ENOMEM;
+ drm->led->dev = dev;
+
+ drm->led->led.name = "nvidia-logo";
+ drm->led->led.max_brightness = 255;
+ drm->led->led.brightness_get = nouveau_led_get_brightness;
+ drm->led->led.brightness_set = nouveau_led_set_brightness;
+
+ ret = led_classdev_register(dev->dev, &drm->led->led);
+ if (ret) {
+ kfree(drm->led);
+ return ret;
+ }
+
+ return 0;
+}
+
+void
+nouveau_led_suspend(struct drm_device *dev)
+{
+ struct nouveau_drm *drm = nouveau_drm(dev);
+
+ if (drm->led)
+ led_classdev_suspend(&drm->led->led);
+}
+
+void
+nouveau_led_resume(struct drm_device *dev)
+{
+ struct nouveau_drm *drm = nouveau_drm(dev);
+
+ if (drm->led)
+ led_classdev_resume(&drm->led->led);
+}
+
+void
+nouveau_led_fini(struct drm_device *dev)
+{
+ struct nouveau_drm *drm = nouveau_drm(dev);
+
+ if (drm->led) {
+ led_classdev_unregister(&drm->led->led);
+ kfree(drm->led);
+ drm->led = NULL;
+ }
+}
diff --git a/drivers/gpu/drm/nouveau/nouveau_led.h b/drivers/gpu/drm/nouveau/nouveau_led.h
new file mode 100644
index 000000000000..187ecdb82002
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_led.h
@@ -0,0 +1,57 @@
+/*
+ * Copyright 2015 Martin Peres
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Martin Peres <martin.peres@free.fr>
+ */
+
+#ifndef __NOUVEAU_LED_H__
+#define __NOUVEAU_LED_H__
+
+#include "nouveau_drv.h"
+
+struct led_classdev;
+
+struct nouveau_led {
+ struct drm_device *dev;
+
+ struct led_classdev led;
+};
+
+static inline struct nouveau_led *
+nouveau_led(struct drm_device *dev)
+{
+ return nouveau_drm(dev)->led;
+}
+
+/* nouveau_led.c */
+#if IS_ENABLED(CONFIG_LEDS_CLASS)
+int nouveau_led_init(struct drm_device *dev);
+void nouveau_led_suspend(struct drm_device *dev);
+void nouveau_led_resume(struct drm_device *dev);
+void nouveau_led_fini(struct drm_device *dev);
+#else
+static inline int nouveau_led_init(struct drm_device *dev) { return 0; };
+static inline void nouveau_led_suspend(struct drm_device *dev) { };
+static inline void nouveau_led_resume(struct drm_device *dev) { };
+static inline void nouveau_led_fini(struct drm_device *dev) { };
+#endif
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/nv04_fbcon.c b/drivers/gpu/drm/nouveau/nv04_fbcon.c
index da8fd5ff9d0f..6a2b187e3c3b 100644
--- a/drivers/gpu/drm/nouveau/nv04_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nv04_fbcon.c
@@ -30,7 +30,7 @@ int
nv04_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region)
{
struct nouveau_fbdev *nfbdev = info->par;
- struct nouveau_drm *drm = nouveau_drm(nfbdev->dev);
+ struct nouveau_drm *drm = nouveau_drm(nfbdev->helper.dev);
struct nouveau_channel *chan = drm->channel;
int ret;
@@ -50,7 +50,7 @@ int
nv04_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
{
struct nouveau_fbdev *nfbdev = info->par;
- struct nouveau_drm *drm = nouveau_drm(nfbdev->dev);
+ struct nouveau_drm *drm = nouveau_drm(nfbdev->helper.dev);
struct nouveau_channel *chan = drm->channel;
int ret;
@@ -77,7 +77,7 @@ int
nv04_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
{
struct nouveau_fbdev *nfbdev = info->par;
- struct nouveau_drm *drm = nouveau_drm(nfbdev->dev);
+ struct nouveau_drm *drm = nouveau_drm(nfbdev->helper.dev);
struct nouveau_channel *chan = drm->channel;
uint32_t fg;
uint32_t bg;
@@ -133,7 +133,7 @@ int
nv04_fbcon_accel_init(struct fb_info *info)
{
struct nouveau_fbdev *nfbdev = info->par;
- struct drm_device *dev = nfbdev->dev;
+ struct drm_device *dev = nfbdev->helper.dev;
struct nouveau_drm *drm = nouveau_drm(dev);
struct nouveau_channel *chan = drm->channel;
struct nvif_device *device = &drm->device;
diff --git a/drivers/gpu/drm/nouveau/nv04_fence.c b/drivers/gpu/drm/nouveau/nv04_fence.c
index 1915b7b82a59..fa8f2375c398 100644
--- a/drivers/gpu/drm/nouveau/nv04_fence.c
+++ b/drivers/gpu/drm/nouveau/nv04_fence.c
@@ -110,6 +110,6 @@ nv04_fence_create(struct nouveau_drm *drm)
priv->base.context_new = nv04_fence_context_new;
priv->base.context_del = nv04_fence_context_del;
priv->base.contexts = 15;
- priv->base.context_base = fence_context_alloc(priv->base.contexts);
+ priv->base.context_base = dma_fence_context_alloc(priv->base.contexts);
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nv10_fence.c b/drivers/gpu/drm/nouveau/nv10_fence.c
index 4e3de34ff6f4..2998bde29211 100644
--- a/drivers/gpu/drm/nouveau/nv10_fence.c
+++ b/drivers/gpu/drm/nouveau/nv10_fence.c
@@ -57,16 +57,13 @@ void
nv10_fence_context_del(struct nouveau_channel *chan)
{
struct nv10_fence_chan *fctx = chan->fence;
- int i;
nouveau_fence_context_del(&fctx->base);
- for (i = 0; i < ARRAY_SIZE(fctx->head); i++)
- nvif_object_fini(&fctx->head[i]);
nvif_object_fini(&fctx->sema);
chan->fence = NULL;
nouveau_fence_context_free(&fctx->base);
}
-int
+static int
nv10_fence_context_new(struct nouveau_channel *chan)
{
struct nv10_fence_chan *fctx;
@@ -107,7 +104,7 @@ nv10_fence_create(struct nouveau_drm *drm)
priv->base.context_new = nv10_fence_context_new;
priv->base.context_del = nv10_fence_context_del;
priv->base.contexts = 31;
- priv->base.context_base = fence_context_alloc(priv->base.contexts);
+ priv->base.context_base = dma_fence_context_alloc(priv->base.contexts);
spin_lock_init(&priv->lock);
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nv10_fence.h b/drivers/gpu/drm/nouveau/nv10_fence.h
index a87259f3983a..b7a508585304 100644
--- a/drivers/gpu/drm/nouveau/nv10_fence.h
+++ b/drivers/gpu/drm/nouveau/nv10_fence.h
@@ -7,7 +7,6 @@
struct nv10_fence_chan {
struct nouveau_fence_chan base;
struct nvif_object sema;
- struct nvif_object head[4];
};
struct nv10_fence_priv {
diff --git a/drivers/gpu/drm/nouveau/nv17_fence.c b/drivers/gpu/drm/nouveau/nv17_fence.c
index 7d5e562a55c5..79bc01111351 100644
--- a/drivers/gpu/drm/nouveau/nv17_fence.c
+++ b/drivers/gpu/drm/nouveau/nv17_fence.c
@@ -126,7 +126,7 @@ nv17_fence_create(struct nouveau_drm *drm)
priv->base.context_new = nv17_fence_context_new;
priv->base.context_del = nv10_fence_context_del;
priv->base.contexts = 31;
- priv->base.context_base = fence_context_alloc(priv->base.contexts);
+ priv->base.context_base = dma_fence_context_alloc(priv->base.contexts);
spin_lock_init(&priv->lock);
ret = nouveau_bo_new(drm->dev, 4096, 0x1000, TTM_PL_FLAG_VRAM,
diff --git a/drivers/gpu/drm/nouveau/nv50_display.c b/drivers/gpu/drm/nouveau/nv50_display.c
index 7d0edcbcfca7..7a1aa9161982 100644
--- a/drivers/gpu/drm/nouveau/nv50_display.c
+++ b/drivers/gpu/drm/nouveau/nv50_display.c
@@ -25,10 +25,12 @@
#include <linux/dma-mapping.h>
#include <drm/drmP.h>
+#include <drm/drm_atomic.h>
+#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc_helper.h>
-#include <drm/drm_plane_helper.h>
#include <drm/drm_dp_helper.h>
#include <drm/drm_fb_helper.h>
+#include <drm/drm_plane_helper.h>
#include <nvif/class.h>
#include <nvif/cl0002.h>
@@ -38,6 +40,7 @@
#include <nvif/cl507c.h>
#include <nvif/cl507d.h>
#include <nvif/cl507e.h>
+#include <nvif/event.h>
#include "nouveau_drv.h"
#include "nouveau_dma.h"
@@ -46,6 +49,7 @@
#include "nouveau_encoder.h"
#include "nouveau_crtc.h"
#include "nouveau_fence.h"
+#include "nouveau_fbcon.h"
#include "nv50_display.h"
#define EVO_DMA_NR 9
@@ -61,6 +65,227 @@
#define EVO_MAST_NTFY EVO_SYNC( 0, 0x00)
#define EVO_FLIP_SEM0(c) EVO_SYNC((c) + 1, 0x00)
#define EVO_FLIP_SEM1(c) EVO_SYNC((c) + 1, 0x10)
+#define EVO_FLIP_NTFY0(c) EVO_SYNC((c) + 1, 0x20)
+#define EVO_FLIP_NTFY1(c) EVO_SYNC((c) + 1, 0x30)
+
+/******************************************************************************
+ * Atomic state
+ *****************************************************************************/
+#define nv50_atom(p) container_of((p), struct nv50_atom, state)
+
+struct nv50_atom {
+ struct drm_atomic_state state;
+
+ struct list_head outp;
+ bool lock_core;
+ bool flush_disable;
+};
+
+struct nv50_outp_atom {
+ struct list_head head;
+
+ struct drm_encoder *encoder;
+ bool flush_disable;
+
+ union {
+ struct {
+ bool ctrl:1;
+ };
+ u8 mask;
+ } clr;
+
+ union {
+ struct {
+ bool ctrl:1;
+ };
+ u8 mask;
+ } set;
+};
+
+#define nv50_head_atom(p) container_of((p), struct nv50_head_atom, state)
+
+struct nv50_head_atom {
+ struct drm_crtc_state state;
+
+ struct {
+ u16 iW;
+ u16 iH;
+ u16 oW;
+ u16 oH;
+ } view;
+
+ struct nv50_head_mode {
+ bool interlace;
+ u32 clock;
+ struct {
+ u16 active;
+ u16 synce;
+ u16 blanke;
+ u16 blanks;
+ } h;
+ struct {
+ u32 active;
+ u16 synce;
+ u16 blanke;
+ u16 blanks;
+ u16 blank2s;
+ u16 blank2e;
+ u16 blankus;
+ } v;
+ } mode;
+
+ struct {
+ u32 handle;
+ u64 offset:40;
+ } lut;
+
+ struct {
+ bool visible;
+ u32 handle;
+ u64 offset:40;
+ u8 format;
+ u8 kind:7;
+ u8 layout:1;
+ u8 block:4;
+ u32 pitch:20;
+ u16 x;
+ u16 y;
+ u16 w;
+ u16 h;
+ } core;
+
+ struct {
+ bool visible;
+ u32 handle;
+ u64 offset:40;
+ u8 layout:1;
+ u8 format:1;
+ } curs;
+
+ struct {
+ u8 depth;
+ u8 cpp;
+ u16 x;
+ u16 y;
+ u16 w;
+ u16 h;
+ } base;
+
+ struct {
+ u8 cpp;
+ } ovly;
+
+ struct {
+ bool enable:1;
+ u8 bits:2;
+ u8 mode:4;
+ } dither;
+
+ struct {
+ struct {
+ u16 cos:12;
+ u16 sin:12;
+ } sat;
+ } procamp;
+
+ union {
+ struct {
+ bool core:1;
+ bool curs:1;
+ };
+ u8 mask;
+ } clr;
+
+ union {
+ struct {
+ bool core:1;
+ bool curs:1;
+ bool view:1;
+ bool mode:1;
+ bool base:1;
+ bool ovly:1;
+ bool dither:1;
+ bool procamp:1;
+ };
+ u16 mask;
+ } set;
+};
+
+static inline struct nv50_head_atom *
+nv50_head_atom_get(struct drm_atomic_state *state, struct drm_crtc *crtc)
+{
+ struct drm_crtc_state *statec = drm_atomic_get_crtc_state(state, crtc);
+ if (IS_ERR(statec))
+ return (void *)statec;
+ return nv50_head_atom(statec);
+}
+
+#define nv50_wndw_atom(p) container_of((p), struct nv50_wndw_atom, state)
+
+struct nv50_wndw_atom {
+ struct drm_plane_state state;
+ u8 interval;
+
+ struct drm_rect clip;
+
+ struct {
+ u32 handle;
+ u16 offset:12;
+ bool awaken:1;
+ } ntfy;
+
+ struct {
+ u32 handle;
+ u16 offset:12;
+ u32 acquire;
+ u32 release;
+ } sema;
+
+ struct {
+ u8 enable:2;
+ } lut;
+
+ struct {
+ u8 mode:2;
+ u8 interval:4;
+
+ u8 format;
+ u8 kind:7;
+ u8 layout:1;
+ u8 block:4;
+ u32 pitch:20;
+ u16 w;
+ u16 h;
+
+ u32 handle;
+ u64 offset;
+ } image;
+
+ struct {
+ u16 x;
+ u16 y;
+ } point;
+
+ union {
+ struct {
+ bool ntfy:1;
+ bool sema:1;
+ bool image:1;
+ };
+ u8 mask;
+ } clr;
+
+ union {
+ struct {
+ bool ntfy:1;
+ bool sema:1;
+ bool image:1;
+ bool lut:1;
+ bool point:1;
+ };
+ u8 mask;
+ } set;
+};
/******************************************************************************
* EVO channel
@@ -133,34 +358,6 @@ nv50_pioc_create(struct nvif_device *device, struct nvif_object *disp,
}
/******************************************************************************
- * Cursor Immediate
- *****************************************************************************/
-
-struct nv50_curs {
- struct nv50_pioc base;
-};
-
-static int
-nv50_curs_create(struct nvif_device *device, struct nvif_object *disp,
- int head, struct nv50_curs *curs)
-{
- struct nv50_disp_cursor_v0 args = {
- .head = head,
- };
- static const s32 oclass[] = {
- GK104_DISP_CURSOR,
- GF110_DISP_CURSOR,
- GT214_DISP_CURSOR,
- G82_DISP_CURSOR,
- NV50_DISP_CURSOR,
- 0
- };
-
- return nv50_pioc_create(device, disp, oclass, head, &args, sizeof(args),
- &curs->base);
-}
-
-/******************************************************************************
* Overlay Immediate
*****************************************************************************/
@@ -192,6 +389,11 @@ nv50_oimm_create(struct nvif_device *device, struct nvif_object *disp,
* DMA EVO channel
*****************************************************************************/
+struct nv50_dmac_ctxdma {
+ struct list_head head;
+ struct nvif_object object;
+};
+
struct nv50_dmac {
struct nv50_chan base;
dma_addr_t handle;
@@ -199,6 +401,7 @@ struct nv50_dmac {
struct nvif_object sync;
struct nvif_object vram;
+ struct list_head ctxdma;
/* Protects against concurrent pushbuf access to this channel, lock is
* grabbed by evo_wait (if the pushbuf reservation is successful) and
@@ -207,9 +410,82 @@ struct nv50_dmac {
};
static void
+nv50_dmac_ctxdma_del(struct nv50_dmac_ctxdma *ctxdma)
+{
+ nvif_object_fini(&ctxdma->object);
+ list_del(&ctxdma->head);
+ kfree(ctxdma);
+}
+
+static struct nv50_dmac_ctxdma *
+nv50_dmac_ctxdma_new(struct nv50_dmac *dmac, struct nouveau_framebuffer *fb)
+{
+ struct nouveau_drm *drm = nouveau_drm(fb->base.dev);
+ struct nv50_dmac_ctxdma *ctxdma;
+ const u8 kind = (fb->nvbo->tile_flags & 0x0000ff00) >> 8;
+ const u32 handle = 0xfb000000 | kind;
+ struct {
+ struct nv_dma_v0 base;
+ union {
+ struct nv50_dma_v0 nv50;
+ struct gf100_dma_v0 gf100;
+ struct gf119_dma_v0 gf119;
+ };
+ } args = {};
+ u32 argc = sizeof(args.base);
+ int ret;
+
+ list_for_each_entry(ctxdma, &dmac->ctxdma, head) {
+ if (ctxdma->object.handle == handle)
+ return ctxdma;
+ }
+
+ if (!(ctxdma = kzalloc(sizeof(*ctxdma), GFP_KERNEL)))
+ return ERR_PTR(-ENOMEM);
+ list_add(&ctxdma->head, &dmac->ctxdma);
+
+ args.base.target = NV_DMA_V0_TARGET_VRAM;
+ args.base.access = NV_DMA_V0_ACCESS_RDWR;
+ args.base.start = 0;
+ args.base.limit = drm->device.info.ram_user - 1;
+
+ if (drm->device.info.chipset < 0x80) {
+ args.nv50.part = NV50_DMA_V0_PART_256;
+ argc += sizeof(args.nv50);
+ } else
+ if (drm->device.info.chipset < 0xc0) {
+ args.nv50.part = NV50_DMA_V0_PART_256;
+ args.nv50.kind = kind;
+ argc += sizeof(args.nv50);
+ } else
+ if (drm->device.info.chipset < 0xd0) {
+ args.gf100.kind = kind;
+ argc += sizeof(args.gf100);
+ } else {
+ args.gf119.page = GF119_DMA_V0_PAGE_LP;
+ args.gf119.kind = kind;
+ argc += sizeof(args.gf119);
+ }
+
+ ret = nvif_object_init(&dmac->base.user, handle, NV_DMA_IN_MEMORY,
+ &args, argc, &ctxdma->object);
+ if (ret) {
+ nv50_dmac_ctxdma_del(ctxdma);
+ return ERR_PTR(ret);
+ }
+
+ return ctxdma;
+}
+
+static void
nv50_dmac_destroy(struct nv50_dmac *dmac, struct nvif_object *disp)
{
struct nvif_device *device = dmac->base.device;
+ struct nv50_dmac_ctxdma *ctxdma, *ctxtmp;
+
+ list_for_each_entry_safe(ctxdma, ctxtmp, &dmac->ctxdma, head) {
+ nv50_dmac_ctxdma_del(ctxdma);
+ }
nvif_object_fini(&dmac->vram);
nvif_object_fini(&dmac->sync);
@@ -278,6 +554,7 @@ nv50_dmac_create(struct nvif_device *device, struct nvif_object *disp,
if (ret)
return ret;
+ INIT_LIST_HEAD(&dmac->ctxdma);
return ret;
}
@@ -297,7 +574,7 @@ nv50_core_create(struct nvif_device *device, struct nvif_object *disp,
.pushbuf = 0xb0007d00,
};
static const s32 oclass[] = {
- GP104_DISP_CORE_CHANNEL_DMA,
+ GP102_DISP_CORE_CHANNEL_DMA,
GP100_DISP_CORE_CHANNEL_DMA,
GM200_DISP_CORE_CHANNEL_DMA,
GM107_DISP_CORE_CHANNEL_DMA,
@@ -381,34 +658,23 @@ nv50_ovly_create(struct nvif_device *device, struct nvif_object *disp,
struct nv50_head {
struct nouveau_crtc base;
- struct nouveau_bo *image;
- struct nv50_curs curs;
- struct nv50_sync sync;
struct nv50_ovly ovly;
struct nv50_oimm oimm;
};
#define nv50_head(c) ((struct nv50_head *)nouveau_crtc(c))
-#define nv50_curs(c) (&nv50_head(c)->curs)
-#define nv50_sync(c) (&nv50_head(c)->sync)
#define nv50_ovly(c) (&nv50_head(c)->ovly)
#define nv50_oimm(c) (&nv50_head(c)->oimm)
#define nv50_chan(c) (&(c)->base.base)
#define nv50_vers(c) nv50_chan(c)->user.oclass
-struct nv50_fbdma {
- struct list_head head;
- struct nvif_object core;
- struct nvif_object base[4];
-};
-
struct nv50_disp {
struct nvif_object *disp;
struct nv50_mast mast;
- struct list_head fbdma;
-
struct nouveau_bo *sync;
+
+ struct mutex mutex;
};
static struct nv50_disp *
@@ -419,12 +685,6 @@ nv50_disp(struct drm_device *dev)
#define nv50_mast(d) (&nv50_disp(d)->mast)
-static struct drm_crtc *
-nv50_display_crtc_get(struct drm_encoder *encoder)
-{
- return nouveau_encoder(encoder)->crtc;
-}
-
/******************************************************************************
* EVO channel helpers
*****************************************************************************/
@@ -463,812 +723,1460 @@ evo_kick(u32 *push, void *evoc)
mutex_unlock(&dmac->lock);
}
-#if 1
-#define evo_mthd(p,m,s) *((p)++) = (((s) << 18) | (m))
-#define evo_data(p,d) *((p)++) = (d)
-#else
#define evo_mthd(p,m,s) do { \
const u32 _m = (m), _s = (s); \
- printk(KERN_ERR "%04x %d %s\n", _m, _s, __func__); \
+ if (drm_debug & DRM_UT_KMS) \
+ printk(KERN_ERR "%04x %d %s\n", _m, _s, __func__); \
*((p)++) = ((_s << 18) | _m); \
} while(0)
+
#define evo_data(p,d) do { \
const u32 _d = (d); \
- printk(KERN_ERR "\t%08x\n", _d); \
+ if (drm_debug & DRM_UT_KMS) \
+ printk(KERN_ERR "\t%08x\n", _d); \
*((p)++) = _d; \
} while(0)
-#endif
-static bool
-evo_sync_wait(void *data)
+/******************************************************************************
+ * Plane
+ *****************************************************************************/
+#define nv50_wndw(p) container_of((p), struct nv50_wndw, plane)
+
+struct nv50_wndw {
+ const struct nv50_wndw_func *func;
+ struct nv50_dmac *dmac;
+
+ struct drm_plane plane;
+
+ struct nvif_notify notify;
+ u16 ntfy;
+ u16 sema;
+ u32 data;
+};
+
+struct nv50_wndw_func {
+ void *(*dtor)(struct nv50_wndw *);
+ int (*acquire)(struct nv50_wndw *, struct nv50_wndw_atom *asyw,
+ struct nv50_head_atom *asyh);
+ void (*release)(struct nv50_wndw *, struct nv50_wndw_atom *asyw,
+ struct nv50_head_atom *asyh);
+ void (*prepare)(struct nv50_wndw *, struct nv50_head_atom *asyh,
+ struct nv50_wndw_atom *asyw);
+
+ void (*sema_set)(struct nv50_wndw *, struct nv50_wndw_atom *);
+ void (*sema_clr)(struct nv50_wndw *);
+ void (*ntfy_set)(struct nv50_wndw *, struct nv50_wndw_atom *);
+ void (*ntfy_clr)(struct nv50_wndw *);
+ int (*ntfy_wait_begun)(struct nv50_wndw *, struct nv50_wndw_atom *);
+ void (*image_set)(struct nv50_wndw *, struct nv50_wndw_atom *);
+ void (*image_clr)(struct nv50_wndw *);
+ void (*lut)(struct nv50_wndw *, struct nv50_wndw_atom *);
+ void (*point)(struct nv50_wndw *, struct nv50_wndw_atom *);
+
+ u32 (*update)(struct nv50_wndw *, u32 interlock);
+};
+
+static int
+nv50_wndw_wait_armed(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
{
- if (nouveau_bo_rd32(data, EVO_MAST_NTFY) != 0x00000000)
- return true;
- usleep_range(1, 2);
- return false;
+ if (asyw->set.ntfy)
+ return wndw->func->ntfy_wait_begun(wndw, asyw);
+ return 0;
}
-static int
-evo_sync(struct drm_device *dev)
+static u32
+nv50_wndw_flush_clr(struct nv50_wndw *wndw, u32 interlock, bool flush,
+ struct nv50_wndw_atom *asyw)
{
- struct nvif_device *device = &nouveau_drm(dev)->device;
- struct nv50_disp *disp = nv50_disp(dev);
- struct nv50_mast *mast = nv50_mast(dev);
- u32 *push = evo_wait(mast, 8);
- if (push) {
- nouveau_bo_wr32(disp->sync, EVO_MAST_NTFY, 0x00000000);
- evo_mthd(push, 0x0084, 1);
- evo_data(push, 0x80000000 | EVO_MAST_NTFY);
- evo_mthd(push, 0x0080, 2);
- evo_data(push, 0x00000000);
- evo_data(push, 0x00000000);
- evo_kick(push, mast);
- if (nvif_msec(device, 2000,
- if (evo_sync_wait(disp->sync))
- break;
- ) >= 0)
- return 0;
+ if (asyw->clr.sema && (!asyw->set.sema || flush))
+ wndw->func->sema_clr(wndw);
+ if (asyw->clr.ntfy && (!asyw->set.ntfy || flush))
+ wndw->func->ntfy_clr(wndw);
+ if (asyw->clr.image && (!asyw->set.image || flush))
+ wndw->func->image_clr(wndw);
+
+ return flush ? wndw->func->update(wndw, interlock) : 0;
+}
+
+static u32
+nv50_wndw_flush_set(struct nv50_wndw *wndw, u32 interlock,
+ struct nv50_wndw_atom *asyw)
+{
+ if (interlock) {
+ asyw->image.mode = 0;
+ asyw->image.interval = 1;
}
- return -EBUSY;
+ if (asyw->set.sema ) wndw->func->sema_set (wndw, asyw);
+ if (asyw->set.ntfy ) wndw->func->ntfy_set (wndw, asyw);
+ if (asyw->set.image) wndw->func->image_set(wndw, asyw);
+ if (asyw->set.lut ) wndw->func->lut (wndw, asyw);
+ if (asyw->set.point) wndw->func->point (wndw, asyw);
+
+ return wndw->func->update(wndw, interlock);
}
-/******************************************************************************
- * Page flipping channel
- *****************************************************************************/
-struct nouveau_bo *
-nv50_display_crtc_sema(struct drm_device *dev, int crtc)
+static void
+nv50_wndw_atomic_check_release(struct nv50_wndw *wndw,
+ struct nv50_wndw_atom *asyw,
+ struct nv50_head_atom *asyh)
{
- return nv50_disp(dev)->sync;
+ struct nouveau_drm *drm = nouveau_drm(wndw->plane.dev);
+ NV_ATOMIC(drm, "%s release\n", wndw->plane.name);
+ wndw->func->release(wndw, asyw, asyh);
+ asyw->ntfy.handle = 0;
+ asyw->sema.handle = 0;
}
-struct nv50_display_flip {
- struct nv50_disp *disp;
- struct nv50_sync *chan;
-};
-
-static bool
-nv50_display_flip_wait(void *data)
+static int
+nv50_wndw_atomic_check_acquire(struct nv50_wndw *wndw,
+ struct nv50_wndw_atom *asyw,
+ struct nv50_head_atom *asyh)
{
- struct nv50_display_flip *flip = data;
- if (nouveau_bo_rd32(flip->disp->sync, flip->chan->addr / 4) ==
- flip->chan->data)
- return true;
- usleep_range(1, 2);
- return false;
+ struct nouveau_framebuffer *fb = nouveau_framebuffer(asyw->state.fb);
+ struct nouveau_drm *drm = nouveau_drm(wndw->plane.dev);
+ int ret;
+
+ NV_ATOMIC(drm, "%s acquire\n", wndw->plane.name);
+ asyw->clip.x1 = 0;
+ asyw->clip.y1 = 0;
+ asyw->clip.x2 = asyh->state.mode.hdisplay;
+ asyw->clip.y2 = asyh->state.mode.vdisplay;
+
+ asyw->image.w = fb->base.width;
+ asyw->image.h = fb->base.height;
+ asyw->image.kind = (fb->nvbo->tile_flags & 0x0000ff00) >> 8;
+ if (asyw->image.kind) {
+ asyw->image.layout = 0;
+ if (drm->device.info.chipset >= 0xc0)
+ asyw->image.block = fb->nvbo->tile_mode >> 4;
+ else
+ asyw->image.block = fb->nvbo->tile_mode;
+ asyw->image.pitch = (fb->base.pitches[0] / 4) << 4;
+ } else {
+ asyw->image.layout = 1;
+ asyw->image.block = 0;
+ asyw->image.pitch = fb->base.pitches[0];
+ }
+
+ ret = wndw->func->acquire(wndw, asyw, asyh);
+ if (ret)
+ return ret;
+
+ if (asyw->set.image) {
+ if (!(asyw->image.mode = asyw->interval ? 0 : 1))
+ asyw->image.interval = asyw->interval;
+ else
+ asyw->image.interval = 0;
+ }
+
+ return 0;
}
-void
-nv50_display_flip_stop(struct drm_crtc *crtc)
+static int
+nv50_wndw_atomic_check(struct drm_plane *plane, struct drm_plane_state *state)
{
- struct nvif_device *device = &nouveau_drm(crtc->dev)->device;
- struct nv50_display_flip flip = {
- .disp = nv50_disp(crtc->dev),
- .chan = nv50_sync(crtc),
- };
- u32 *push;
+ struct nouveau_drm *drm = nouveau_drm(plane->dev);
+ struct nv50_wndw *wndw = nv50_wndw(plane);
+ struct nv50_wndw_atom *armw = nv50_wndw_atom(wndw->plane.state);
+ struct nv50_wndw_atom *asyw = nv50_wndw_atom(state);
+ struct nv50_head_atom *harm = NULL, *asyh = NULL;
+ bool varm = false, asyv = false, asym = false;
+ int ret;
- push = evo_wait(flip.chan, 8);
- if (push) {
- evo_mthd(push, 0x0084, 1);
- evo_data(push, 0x00000000);
- evo_mthd(push, 0x0094, 1);
- evo_data(push, 0x00000000);
- evo_mthd(push, 0x00c0, 1);
- evo_data(push, 0x00000000);
- evo_mthd(push, 0x0080, 1);
- evo_data(push, 0x00000000);
- evo_kick(push, flip.chan);
+ NV_ATOMIC(drm, "%s atomic_check\n", plane->name);
+ if (asyw->state.crtc) {
+ asyh = nv50_head_atom_get(asyw->state.state, asyw->state.crtc);
+ if (IS_ERR(asyh))
+ return PTR_ERR(asyh);
+ asym = drm_atomic_crtc_needs_modeset(&asyh->state);
+ asyv = asyh->state.active;
}
- nvif_msec(device, 2000,
- if (nv50_display_flip_wait(&flip))
- break;
- );
+ if (armw->state.crtc) {
+ harm = nv50_head_atom_get(asyw->state.state, armw->state.crtc);
+ if (IS_ERR(harm))
+ return PTR_ERR(harm);
+ varm = harm->state.crtc->state->active;
+ }
+
+ if (asyv) {
+ asyw->point.x = asyw->state.crtc_x;
+ asyw->point.y = asyw->state.crtc_y;
+ if (memcmp(&armw->point, &asyw->point, sizeof(asyw->point)))
+ asyw->set.point = true;
+
+ if (!varm || asym || armw->state.fb != asyw->state.fb) {
+ ret = nv50_wndw_atomic_check_acquire(wndw, asyw, asyh);
+ if (ret)
+ return ret;
+ }
+ } else
+ if (varm) {
+ nv50_wndw_atomic_check_release(wndw, asyw, harm);
+ } else {
+ return 0;
+ }
+
+ if (!asyv || asym) {
+ asyw->clr.ntfy = armw->ntfy.handle != 0;
+ asyw->clr.sema = armw->sema.handle != 0;
+ if (wndw->func->image_clr)
+ asyw->clr.image = armw->image.handle != 0;
+ asyw->set.lut = wndw->func->lut && asyv;
+ }
+
+ return 0;
}
-int
-nv50_display_flip_next(struct drm_crtc *crtc, struct drm_framebuffer *fb,
- struct nouveau_channel *chan, u32 swap_interval)
+static void
+nv50_wndw_cleanup_fb(struct drm_plane *plane, struct drm_plane_state *old_state)
{
- struct nouveau_framebuffer *nv_fb = nouveau_framebuffer(fb);
- struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
- struct nv50_head *head = nv50_head(crtc);
- struct nv50_sync *sync = nv50_sync(crtc);
- u32 *push;
- int ret;
+ struct nouveau_framebuffer *fb = nouveau_framebuffer(old_state->fb);
+ struct nouveau_drm *drm = nouveau_drm(plane->dev);
- if (crtc->primary->fb->width != fb->width ||
- crtc->primary->fb->height != fb->height)
- return -EINVAL;
+ NV_ATOMIC(drm, "%s cleanup: %p\n", plane->name, old_state->fb);
+ if (!old_state->fb)
+ return;
+
+ nouveau_bo_unpin(fb->nvbo);
+}
- swap_interval <<= 4;
- if (swap_interval == 0)
- swap_interval |= 0x100;
- if (chan == NULL)
- evo_sync(crtc->dev);
+static int
+nv50_wndw_prepare_fb(struct drm_plane *plane, struct drm_plane_state *state)
+{
+ struct nouveau_framebuffer *fb = nouveau_framebuffer(state->fb);
+ struct nouveau_drm *drm = nouveau_drm(plane->dev);
+ struct nv50_wndw *wndw = nv50_wndw(plane);
+ struct nv50_wndw_atom *asyw = nv50_wndw_atom(state);
+ struct nv50_head_atom *asyh;
+ struct nv50_dmac_ctxdma *ctxdma;
+ int ret;
- push = evo_wait(sync, 128);
- if (unlikely(push == NULL))
- return -EBUSY;
+ NV_ATOMIC(drm, "%s prepare: %p\n", plane->name, state->fb);
+ if (!asyw->state.fb)
+ return 0;
- if (chan && chan->user.oclass < G82_CHANNEL_GPFIFO) {
- ret = RING_SPACE(chan, 8);
- if (ret)
- return ret;
+ ret = nouveau_bo_pin(fb->nvbo, TTM_PL_FLAG_VRAM, true);
+ if (ret)
+ return ret;
- BEGIN_NV04(chan, 0, NV11_SUBCHAN_DMA_SEMAPHORE, 2);
- OUT_RING (chan, NvEvoSema0 + nv_crtc->index);
- OUT_RING (chan, sync->addr ^ 0x10);
- BEGIN_NV04(chan, 0, NV11_SUBCHAN_SEMAPHORE_RELEASE, 1);
- OUT_RING (chan, sync->data + 1);
- BEGIN_NV04(chan, 0, NV11_SUBCHAN_SEMAPHORE_OFFSET, 2);
- OUT_RING (chan, sync->addr);
- OUT_RING (chan, sync->data);
- } else
- if (chan && chan->user.oclass < FERMI_CHANNEL_GPFIFO) {
- u64 addr = nv84_fence_crtc(chan, nv_crtc->index) + sync->addr;
- ret = RING_SPACE(chan, 12);
- if (ret)
- return ret;
+ ctxdma = nv50_dmac_ctxdma_new(wndw->dmac, fb);
+ if (IS_ERR(ctxdma)) {
+ nouveau_bo_unpin(fb->nvbo);
+ return PTR_ERR(ctxdma);
+ }
- BEGIN_NV04(chan, 0, NV11_SUBCHAN_DMA_SEMAPHORE, 1);
- OUT_RING (chan, chan->vram.handle);
- BEGIN_NV04(chan, 0, NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH, 4);
- OUT_RING (chan, upper_32_bits(addr ^ 0x10));
- OUT_RING (chan, lower_32_bits(addr ^ 0x10));
- OUT_RING (chan, sync->data + 1);
- OUT_RING (chan, NV84_SUBCHAN_SEMAPHORE_TRIGGER_WRITE_LONG);
- BEGIN_NV04(chan, 0, NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH, 4);
- OUT_RING (chan, upper_32_bits(addr));
- OUT_RING (chan, lower_32_bits(addr));
- OUT_RING (chan, sync->data);
- OUT_RING (chan, NV84_SUBCHAN_SEMAPHORE_TRIGGER_ACQUIRE_EQUAL);
- } else
- if (chan) {
- u64 addr = nv84_fence_crtc(chan, nv_crtc->index) + sync->addr;
- ret = RING_SPACE(chan, 10);
- if (ret)
- return ret;
+ asyw->state.fence = reservation_object_get_excl_rcu(fb->nvbo->bo.resv);
+ asyw->image.handle = ctxdma->object.handle;
+ asyw->image.offset = fb->nvbo->bo.offset;
- BEGIN_NVC0(chan, 0, NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH, 4);
- OUT_RING (chan, upper_32_bits(addr ^ 0x10));
- OUT_RING (chan, lower_32_bits(addr ^ 0x10));
- OUT_RING (chan, sync->data + 1);
- OUT_RING (chan, NV84_SUBCHAN_SEMAPHORE_TRIGGER_WRITE_LONG |
- NVC0_SUBCHAN_SEMAPHORE_TRIGGER_YIELD);
- BEGIN_NVC0(chan, 0, NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH, 4);
- OUT_RING (chan, upper_32_bits(addr));
- OUT_RING (chan, lower_32_bits(addr));
- OUT_RING (chan, sync->data);
- OUT_RING (chan, NV84_SUBCHAN_SEMAPHORE_TRIGGER_ACQUIRE_EQUAL |
- NVC0_SUBCHAN_SEMAPHORE_TRIGGER_YIELD);
- }
-
- if (chan) {
- sync->addr ^= 0x10;
- sync->data++;
- FIRE_RING (chan);
- }
-
- /* queue the flip */
- evo_mthd(push, 0x0100, 1);
- evo_data(push, 0xfffe0000);
- evo_mthd(push, 0x0084, 1);
- evo_data(push, swap_interval);
- if (!(swap_interval & 0x00000100)) {
- evo_mthd(push, 0x00e0, 1);
- evo_data(push, 0x40000000);
- }
- evo_mthd(push, 0x0088, 4);
- evo_data(push, sync->addr);
- evo_data(push, sync->data++);
- evo_data(push, sync->data);
- evo_data(push, sync->base.sync.handle);
- evo_mthd(push, 0x00a0, 2);
- evo_data(push, 0x00000000);
- evo_data(push, 0x00000000);
- evo_mthd(push, 0x00c0, 1);
- evo_data(push, nv_fb->r_handle);
- evo_mthd(push, 0x0110, 2);
- evo_data(push, 0x00000000);
- evo_data(push, 0x00000000);
- if (nv50_vers(sync) < GF110_DISP_BASE_CHANNEL_DMA) {
- evo_mthd(push, 0x0800, 5);
- evo_data(push, nv_fb->nvbo->bo.offset >> 8);
- evo_data(push, 0);
- evo_data(push, (fb->height << 16) | fb->width);
- evo_data(push, nv_fb->r_pitch);
- evo_data(push, nv_fb->r_format);
- } else {
- evo_mthd(push, 0x0400, 5);
- evo_data(push, nv_fb->nvbo->bo.offset >> 8);
- evo_data(push, 0);
- evo_data(push, (fb->height << 16) | fb->width);
- evo_data(push, nv_fb->r_pitch);
- evo_data(push, nv_fb->r_format);
+ if (wndw->func->prepare) {
+ asyh = nv50_head_atom_get(asyw->state.state, asyw->state.crtc);
+ if (IS_ERR(asyh))
+ return PTR_ERR(asyh);
+
+ wndw->func->prepare(wndw, asyh, asyw);
}
- evo_mthd(push, 0x0080, 1);
- evo_data(push, 0x00000000);
- evo_kick(push, sync);
- nouveau_bo_ref(nv_fb->nvbo, &head->image);
+ return 0;
+}
+
+static const struct drm_plane_helper_funcs
+nv50_wndw_helper = {
+ .prepare_fb = nv50_wndw_prepare_fb,
+ .cleanup_fb = nv50_wndw_cleanup_fb,
+ .atomic_check = nv50_wndw_atomic_check,
+};
+
+static void
+nv50_wndw_atomic_destroy_state(struct drm_plane *plane,
+ struct drm_plane_state *state)
+{
+ struct nv50_wndw_atom *asyw = nv50_wndw_atom(state);
+ __drm_atomic_helper_plane_destroy_state(&asyw->state);
+ dma_fence_put(asyw->state.fence);
+ kfree(asyw);
+}
+
+static struct drm_plane_state *
+nv50_wndw_atomic_duplicate_state(struct drm_plane *plane)
+{
+ struct nv50_wndw_atom *armw = nv50_wndw_atom(plane->state);
+ struct nv50_wndw_atom *asyw;
+ if (!(asyw = kmalloc(sizeof(*asyw), GFP_KERNEL)))
+ return NULL;
+ __drm_atomic_helper_plane_duplicate_state(plane, &asyw->state);
+ asyw->state.fence = NULL;
+ asyw->interval = 1;
+ asyw->sema = armw->sema;
+ asyw->ntfy = armw->ntfy;
+ asyw->image = armw->image;
+ asyw->point = armw->point;
+ asyw->lut = armw->lut;
+ asyw->clr.mask = 0;
+ asyw->set.mask = 0;
+ return &asyw->state;
+}
+
+static void
+nv50_wndw_reset(struct drm_plane *plane)
+{
+ struct nv50_wndw_atom *asyw;
+
+ if (WARN_ON(!(asyw = kzalloc(sizeof(*asyw), GFP_KERNEL))))
+ return;
+
+ if (plane->state)
+ plane->funcs->atomic_destroy_state(plane, plane->state);
+ plane->state = &asyw->state;
+ plane->state->plane = plane;
+ plane->state->rotation = DRM_ROTATE_0;
+}
+
+static void
+nv50_wndw_destroy(struct drm_plane *plane)
+{
+ struct nv50_wndw *wndw = nv50_wndw(plane);
+ void *data;
+ nvif_notify_fini(&wndw->notify);
+ data = wndw->func->dtor(wndw);
+ drm_plane_cleanup(&wndw->plane);
+ kfree(data);
+}
+
+static const struct drm_plane_funcs
+nv50_wndw = {
+ .update_plane = drm_atomic_helper_update_plane,
+ .disable_plane = drm_atomic_helper_disable_plane,
+ .destroy = nv50_wndw_destroy,
+ .reset = nv50_wndw_reset,
+ .set_property = drm_atomic_helper_plane_set_property,
+ .atomic_duplicate_state = nv50_wndw_atomic_duplicate_state,
+ .atomic_destroy_state = nv50_wndw_atomic_destroy_state,
+};
+
+static void
+nv50_wndw_fini(struct nv50_wndw *wndw)
+{
+ nvif_notify_put(&wndw->notify);
+}
+
+static void
+nv50_wndw_init(struct nv50_wndw *wndw)
+{
+ nvif_notify_get(&wndw->notify);
+}
+
+static int
+nv50_wndw_ctor(const struct nv50_wndw_func *func, struct drm_device *dev,
+ enum drm_plane_type type, const char *name, int index,
+ struct nv50_dmac *dmac, const u32 *format, int nformat,
+ struct nv50_wndw *wndw)
+{
+ int ret;
+
+ wndw->func = func;
+ wndw->dmac = dmac;
+
+ ret = drm_universal_plane_init(dev, &wndw->plane, 0, &nv50_wndw, format,
+ nformat, type, "%s-%d", name, index);
+ if (ret)
+ return ret;
+
+ drm_plane_helper_add(&wndw->plane, &nv50_wndw_helper);
return 0;
}
/******************************************************************************
- * CRTC
+ * Cursor plane
*****************************************************************************/
+#define nv50_curs(p) container_of((p), struct nv50_curs, wndw)
+
+struct nv50_curs {
+ struct nv50_wndw wndw;
+ struct nvif_object chan;
+};
+
+static u32
+nv50_curs_update(struct nv50_wndw *wndw, u32 interlock)
+{
+ struct nv50_curs *curs = nv50_curs(wndw);
+ nvif_wr32(&curs->chan, 0x0080, 0x00000000);
+ return 0;
+}
+
+static void
+nv50_curs_point(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
+{
+ struct nv50_curs *curs = nv50_curs(wndw);
+ nvif_wr32(&curs->chan, 0x0084, (asyw->point.y << 16) | asyw->point.x);
+}
+
+static void
+nv50_curs_prepare(struct nv50_wndw *wndw, struct nv50_head_atom *asyh,
+ struct nv50_wndw_atom *asyw)
+{
+ asyh->curs.handle = nv50_disp(wndw->plane.dev)->mast.base.vram.handle;
+ asyh->curs.offset = asyw->image.offset;
+ asyh->set.curs = asyh->curs.visible;
+}
+
+static void
+nv50_curs_release(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw,
+ struct nv50_head_atom *asyh)
+{
+ asyh->curs.visible = false;
+}
+
static int
-nv50_crtc_set_dither(struct nouveau_crtc *nv_crtc, bool update)
+nv50_curs_acquire(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw,
+ struct nv50_head_atom *asyh)
{
- struct nv50_mast *mast = nv50_mast(nv_crtc->base.dev);
- struct nouveau_connector *nv_connector;
- struct drm_connector *connector;
- u32 *push, mode = 0x00;
+ int ret;
- nv_connector = nouveau_crtc_connector_get(nv_crtc);
- connector = &nv_connector->base;
- if (nv_connector->dithering_mode == DITHERING_MODE_AUTO) {
- if (nv_crtc->base.primary->fb->depth > connector->display_info.bpc * 3)
- mode = DITHERING_MODE_DYNAMIC2X2;
- } else {
- mode = nv_connector->dithering_mode;
- }
+ ret = drm_plane_helper_check_state(&asyw->state, &asyw->clip,
+ DRM_PLANE_HELPER_NO_SCALING,
+ DRM_PLANE_HELPER_NO_SCALING,
+ true, true);
+ asyh->curs.visible = asyw->state.visible;
+ if (ret || !asyh->curs.visible)
+ return ret;
- if (nv_connector->dithering_depth == DITHERING_DEPTH_AUTO) {
- if (connector->display_info.bpc >= 8)
- mode |= DITHERING_DEPTH_8BPC;
- } else {
- mode |= nv_connector->dithering_depth;
+ switch (asyw->state.fb->width) {
+ case 32: asyh->curs.layout = 0; break;
+ case 64: asyh->curs.layout = 1; break;
+ default:
+ return -EINVAL;
}
- push = evo_wait(mast, 4);
- if (push) {
- if (nv50_vers(mast) < GF110_DISP_CORE_CHANNEL_DMA) {
- evo_mthd(push, 0x08a0 + (nv_crtc->index * 0x0400), 1);
- evo_data(push, mode);
- } else
- if (nv50_vers(mast) < GK104_DISP_CORE_CHANNEL_DMA) {
- evo_mthd(push, 0x0490 + (nv_crtc->index * 0x0300), 1);
- evo_data(push, mode);
- } else {
- evo_mthd(push, 0x04a0 + (nv_crtc->index * 0x0300), 1);
- evo_data(push, mode);
- }
+ if (asyw->state.fb->width != asyw->state.fb->height)
+ return -EINVAL;
- if (update) {
- evo_mthd(push, 0x0080, 1);
- evo_data(push, 0x00000000);
- }
- evo_kick(push, mast);
+ switch (asyw->state.fb->pixel_format) {
+ case DRM_FORMAT_ARGB8888: asyh->curs.format = 1; break;
+ default:
+ WARN_ON(1);
+ return -EINVAL;
}
return 0;
}
+static void *
+nv50_curs_dtor(struct nv50_wndw *wndw)
+{
+ struct nv50_curs *curs = nv50_curs(wndw);
+ nvif_object_fini(&curs->chan);
+ return curs;
+}
+
+static const u32
+nv50_curs_format[] = {
+ DRM_FORMAT_ARGB8888,
+};
+
+static const struct nv50_wndw_func
+nv50_curs = {
+ .dtor = nv50_curs_dtor,
+ .acquire = nv50_curs_acquire,
+ .release = nv50_curs_release,
+ .prepare = nv50_curs_prepare,
+ .point = nv50_curs_point,
+ .update = nv50_curs_update,
+};
+
static int
-nv50_crtc_set_scale(struct nouveau_crtc *nv_crtc, bool update)
+nv50_curs_new(struct nouveau_drm *drm, struct nv50_head *head,
+ struct nv50_curs **pcurs)
{
- struct nv50_mast *mast = nv50_mast(nv_crtc->base.dev);
- struct drm_display_mode *omode, *umode = &nv_crtc->base.mode;
- struct drm_crtc *crtc = &nv_crtc->base;
- struct nouveau_connector *nv_connector;
- int mode = DRM_MODE_SCALE_NONE;
- u32 oX, oY, *push;
+ static const struct nvif_mclass curses[] = {
+ { GK104_DISP_CURSOR, 0 },
+ { GF110_DISP_CURSOR, 0 },
+ { GT214_DISP_CURSOR, 0 },
+ { G82_DISP_CURSOR, 0 },
+ { NV50_DISP_CURSOR, 0 },
+ {}
+ };
+ struct nv50_disp_cursor_v0 args = {
+ .head = head->base.index,
+ };
+ struct nv50_disp *disp = nv50_disp(drm->dev);
+ struct nv50_curs *curs;
+ int cid, ret;
+
+ cid = nvif_mclass(disp->disp, curses);
+ if (cid < 0) {
+ NV_ERROR(drm, "No supported cursor immediate class\n");
+ return cid;
+ }
- /* start off at the resolution we programmed the crtc for, this
- * effectively handles NONE/FULL scaling
- */
- nv_connector = nouveau_crtc_connector_get(nv_crtc);
- if (nv_connector && nv_connector->native_mode) {
- mode = nv_connector->scaling_mode;
- if (nv_connector->scaling_full) /* non-EDID LVDS/eDP mode */
- mode = DRM_MODE_SCALE_FULLSCREEN;
+ if (!(curs = *pcurs = kzalloc(sizeof(*curs), GFP_KERNEL)))
+ return -ENOMEM;
+
+ ret = nv50_wndw_ctor(&nv50_curs, drm->dev, DRM_PLANE_TYPE_CURSOR,
+ "curs", head->base.index, &disp->mast.base,
+ nv50_curs_format, ARRAY_SIZE(nv50_curs_format),
+ &curs->wndw);
+ if (ret) {
+ kfree(curs);
+ return ret;
}
- if (mode != DRM_MODE_SCALE_NONE)
- omode = nv_connector->native_mode;
- else
- omode = umode;
+ ret = nvif_object_init(disp->disp, 0, curses[cid].oclass, &args,
+ sizeof(args), &curs->chan);
+ if (ret) {
+ NV_ERROR(drm, "curs%04x allocation failed: %d\n",
+ curses[cid].oclass, ret);
+ return ret;
+ }
- oX = omode->hdisplay;
- oY = omode->vdisplay;
- if (omode->flags & DRM_MODE_FLAG_DBLSCAN)
- oY *= 2;
+ return 0;
+}
- /* add overscan compensation if necessary, will keep the aspect
- * ratio the same as the backend mode unless overridden by the
- * user setting both hborder and vborder properties.
- */
- if (nv_connector && ( nv_connector->underscan == UNDERSCAN_ON ||
- (nv_connector->underscan == UNDERSCAN_AUTO &&
- drm_detect_hdmi_monitor(nv_connector->edid)))) {
- u32 bX = nv_connector->underscan_hborder;
- u32 bY = nv_connector->underscan_vborder;
- u32 aspect = (oY << 19) / oX;
+/******************************************************************************
+ * Primary plane
+ *****************************************************************************/
+#define nv50_base(p) container_of((p), struct nv50_base, wndw)
- if (bX) {
- oX -= (bX * 2);
- if (bY) oY -= (bY * 2);
- else oY = ((oX * aspect) + (aspect / 2)) >> 19;
- } else {
- oX -= (oX >> 4) + 32;
- if (bY) oY -= (bY * 2);
- else oY = ((oX * aspect) + (aspect / 2)) >> 19;
- }
+struct nv50_base {
+ struct nv50_wndw wndw;
+ struct nv50_sync chan;
+ int id;
+};
+
+static int
+nv50_base_notify(struct nvif_notify *notify)
+{
+ return NVIF_NOTIFY_KEEP;
+}
+
+static void
+nv50_base_lut(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
+{
+ struct nv50_base *base = nv50_base(wndw);
+ u32 *push;
+ if ((push = evo_wait(&base->chan, 2))) {
+ evo_mthd(push, 0x00e0, 1);
+ evo_data(push, asyw->lut.enable << 30);
+ evo_kick(push, &base->chan);
}
+}
- /* handle CENTER/ASPECT scaling, taking into account the areas
- * removed already for overscan compensation
- */
- switch (mode) {
- case DRM_MODE_SCALE_CENTER:
- oX = min((u32)umode->hdisplay, oX);
- oY = min((u32)umode->vdisplay, oY);
- /* fall-through */
- case DRM_MODE_SCALE_ASPECT:
- if (oY < oX) {
- u32 aspect = (umode->hdisplay << 19) / umode->vdisplay;
- oX = ((oY * aspect) + (aspect / 2)) >> 19;
- } else {
- u32 aspect = (umode->vdisplay << 19) / umode->hdisplay;
- oY = ((oX * aspect) + (aspect / 2)) >> 19;
- }
- break;
- default:
- break;
+static void
+nv50_base_image_clr(struct nv50_wndw *wndw)
+{
+ struct nv50_base *base = nv50_base(wndw);
+ u32 *push;
+ if ((push = evo_wait(&base->chan, 4))) {
+ evo_mthd(push, 0x0084, 1);
+ evo_data(push, 0x00000000);
+ evo_mthd(push, 0x00c0, 1);
+ evo_data(push, 0x00000000);
+ evo_kick(push, &base->chan);
}
+}
- push = evo_wait(mast, 8);
- if (push) {
- if (nv50_vers(mast) < GF110_DISP_CORE_CHANNEL_DMA) {
- /*XXX: SCALE_CTRL_ACTIVE??? */
- evo_mthd(push, 0x08d8 + (nv_crtc->index * 0x400), 2);
- evo_data(push, (oY << 16) | oX);
- evo_data(push, (oY << 16) | oX);
- evo_mthd(push, 0x08a4 + (nv_crtc->index * 0x400), 1);
+static void
+nv50_base_image_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
+{
+ struct nv50_base *base = nv50_base(wndw);
+ const s32 oclass = base->chan.base.base.user.oclass;
+ u32 *push;
+ if ((push = evo_wait(&base->chan, 10))) {
+ evo_mthd(push, 0x0084, 1);
+ evo_data(push, (asyw->image.mode << 8) |
+ (asyw->image.interval << 4));
+ evo_mthd(push, 0x00c0, 1);
+ evo_data(push, asyw->image.handle);
+ if (oclass < G82_DISP_BASE_CHANNEL_DMA) {
+ evo_mthd(push, 0x0800, 5);
+ evo_data(push, asyw->image.offset >> 8);
+ evo_data(push, 0x00000000);
+ evo_data(push, (asyw->image.h << 16) | asyw->image.w);
+ evo_data(push, (asyw->image.layout << 20) |
+ asyw->image.pitch |
+ asyw->image.block);
+ evo_data(push, (asyw->image.kind << 16) |
+ (asyw->image.format << 8));
+ } else
+ if (oclass < GF110_DISP_BASE_CHANNEL_DMA) {
+ evo_mthd(push, 0x0800, 5);
+ evo_data(push, asyw->image.offset >> 8);
evo_data(push, 0x00000000);
- evo_mthd(push, 0x08c8 + (nv_crtc->index * 0x400), 1);
- evo_data(push, umode->vdisplay << 16 | umode->hdisplay);
+ evo_data(push, (asyw->image.h << 16) | asyw->image.w);
+ evo_data(push, (asyw->image.layout << 20) |
+ asyw->image.pitch |
+ asyw->image.block);
+ evo_data(push, asyw->image.format << 8);
} else {
- evo_mthd(push, 0x04c0 + (nv_crtc->index * 0x300), 3);
- evo_data(push, (oY << 16) | oX);
- evo_data(push, (oY << 16) | oX);
- evo_data(push, (oY << 16) | oX);
- evo_mthd(push, 0x0494 + (nv_crtc->index * 0x300), 1);
+ evo_mthd(push, 0x0400, 5);
+ evo_data(push, asyw->image.offset >> 8);
evo_data(push, 0x00000000);
- evo_mthd(push, 0x04b8 + (nv_crtc->index * 0x300), 1);
- evo_data(push, umode->vdisplay << 16 | umode->hdisplay);
+ evo_data(push, (asyw->image.h << 16) | asyw->image.w);
+ evo_data(push, (asyw->image.layout << 24) |
+ asyw->image.pitch |
+ asyw->image.block);
+ evo_data(push, asyw->image.format << 8);
}
+ evo_kick(push, &base->chan);
+ }
+}
- evo_kick(push, mast);
+static void
+nv50_base_ntfy_clr(struct nv50_wndw *wndw)
+{
+ struct nv50_base *base = nv50_base(wndw);
+ u32 *push;
+ if ((push = evo_wait(&base->chan, 2))) {
+ evo_mthd(push, 0x00a4, 1);
+ evo_data(push, 0x00000000);
+ evo_kick(push, &base->chan);
+ }
+}
- if (update) {
- nv50_display_flip_stop(crtc);
- nv50_display_flip_next(crtc, crtc->primary->fb,
- NULL, 1);
- }
+static void
+nv50_base_ntfy_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
+{
+ struct nv50_base *base = nv50_base(wndw);
+ u32 *push;
+ if ((push = evo_wait(&base->chan, 3))) {
+ evo_mthd(push, 0x00a0, 2);
+ evo_data(push, (asyw->ntfy.awaken << 30) | asyw->ntfy.offset);
+ evo_data(push, asyw->ntfy.handle);
+ evo_kick(push, &base->chan);
}
+}
- return 0;
+static void
+nv50_base_sema_clr(struct nv50_wndw *wndw)
+{
+ struct nv50_base *base = nv50_base(wndw);
+ u32 *push;
+ if ((push = evo_wait(&base->chan, 2))) {
+ evo_mthd(push, 0x0094, 1);
+ evo_data(push, 0x00000000);
+ evo_kick(push, &base->chan);
+ }
}
-static int
-nv50_crtc_set_raster_vblank_dmi(struct nouveau_crtc *nv_crtc, u32 usec)
+static void
+nv50_base_sema_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
{
- struct nv50_mast *mast = nv50_mast(nv_crtc->base.dev);
+ struct nv50_base *base = nv50_base(wndw);
u32 *push;
+ if ((push = evo_wait(&base->chan, 5))) {
+ evo_mthd(push, 0x0088, 4);
+ evo_data(push, asyw->sema.offset);
+ evo_data(push, asyw->sema.acquire);
+ evo_data(push, asyw->sema.release);
+ evo_data(push, asyw->sema.handle);
+ evo_kick(push, &base->chan);
+ }
+}
- push = evo_wait(mast, 8);
- if (!push)
- return -ENOMEM;
+static u32
+nv50_base_update(struct nv50_wndw *wndw, u32 interlock)
+{
+ struct nv50_base *base = nv50_base(wndw);
+ u32 *push;
- evo_mthd(push, 0x0828 + (nv_crtc->index * 0x400), 1);
- evo_data(push, usec);
- evo_kick(push, mast);
+ if (!(push = evo_wait(&base->chan, 2)))
+ return 0;
+ evo_mthd(push, 0x0080, 1);
+ evo_data(push, interlock);
+ evo_kick(push, &base->chan);
+
+ if (base->chan.base.base.user.oclass < GF110_DISP_BASE_CHANNEL_DMA)
+ return interlock ? 2 << (base->id * 8) : 0;
+ return interlock ? 2 << (base->id * 4) : 0;
+}
+
+static int
+nv50_base_ntfy_wait_begun(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
+{
+ struct nouveau_drm *drm = nouveau_drm(wndw->plane.dev);
+ struct nv50_disp *disp = nv50_disp(wndw->plane.dev);
+ if (nvif_msec(&drm->device, 2000ULL,
+ u32 data = nouveau_bo_rd32(disp->sync, asyw->ntfy.offset / 4);
+ if ((data & 0xc0000000) == 0x40000000)
+ break;
+ usleep_range(1, 2);
+ ) < 0)
+ return -ETIMEDOUT;
return 0;
}
+static void
+nv50_base_release(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw,
+ struct nv50_head_atom *asyh)
+{
+ asyh->base.cpp = 0;
+}
+
static int
-nv50_crtc_set_color_vibrance(struct nouveau_crtc *nv_crtc, bool update)
+nv50_base_acquire(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw,
+ struct nv50_head_atom *asyh)
{
- struct nv50_mast *mast = nv50_mast(nv_crtc->base.dev);
- u32 *push, hue, vib;
- int adj;
+ const u32 format = asyw->state.fb->pixel_format;
+ const struct drm_format_info *info;
+ int ret;
- adj = (nv_crtc->color_vibrance > 0) ? 50 : 0;
- vib = ((nv_crtc->color_vibrance * 2047 + adj) / 100) & 0xfff;
- hue = ((nv_crtc->vibrant_hue * 2047) / 100) & 0xfff;
+ info = drm_format_info(format);
+ if (!info || !info->depth)
+ return -EINVAL;
- push = evo_wait(mast, 16);
- if (push) {
- if (nv50_vers(mast) < GF110_DISP_CORE_CHANNEL_DMA) {
- evo_mthd(push, 0x08a8 + (nv_crtc->index * 0x400), 1);
- evo_data(push, (hue << 20) | (vib << 8));
- } else {
- evo_mthd(push, 0x0498 + (nv_crtc->index * 0x300), 1);
- evo_data(push, (hue << 20) | (vib << 8));
- }
+ ret = drm_plane_helper_check_state(&asyw->state, &asyw->clip,
+ DRM_PLANE_HELPER_NO_SCALING,
+ DRM_PLANE_HELPER_NO_SCALING,
+ false, true);
+ if (ret)
+ return ret;
- if (update) {
- evo_mthd(push, 0x0080, 1);
- evo_data(push, 0x00000000);
- }
- evo_kick(push, mast);
+ asyh->base.depth = info->depth;
+ asyh->base.cpp = info->cpp[0];
+ asyh->base.x = asyw->state.src.x1 >> 16;
+ asyh->base.y = asyw->state.src.y1 >> 16;
+ asyh->base.w = asyw->state.fb->width;
+ asyh->base.h = asyw->state.fb->height;
+
+ switch (format) {
+ case DRM_FORMAT_C8 : asyw->image.format = 0x1e; break;
+ case DRM_FORMAT_RGB565 : asyw->image.format = 0xe8; break;
+ case DRM_FORMAT_XRGB1555 :
+ case DRM_FORMAT_ARGB1555 : asyw->image.format = 0xe9; break;
+ case DRM_FORMAT_XRGB8888 :
+ case DRM_FORMAT_ARGB8888 : asyw->image.format = 0xcf; break;
+ case DRM_FORMAT_XBGR2101010:
+ case DRM_FORMAT_ABGR2101010: asyw->image.format = 0xd1; break;
+ case DRM_FORMAT_XBGR8888 :
+ case DRM_FORMAT_ABGR8888 : asyw->image.format = 0xd5; break;
+ default:
+ WARN_ON(1);
+ return -EINVAL;
}
+ asyw->lut.enable = 1;
+ asyw->set.image = true;
return 0;
}
+static void *
+nv50_base_dtor(struct nv50_wndw *wndw)
+{
+ struct nv50_disp *disp = nv50_disp(wndw->plane.dev);
+ struct nv50_base *base = nv50_base(wndw);
+ nv50_dmac_destroy(&base->chan.base, disp->disp);
+ return base;
+}
+
+static const u32
+nv50_base_format[] = {
+ DRM_FORMAT_C8,
+ DRM_FORMAT_RGB565,
+ DRM_FORMAT_XRGB1555,
+ DRM_FORMAT_ARGB1555,
+ DRM_FORMAT_XRGB8888,
+ DRM_FORMAT_ARGB8888,
+ DRM_FORMAT_XBGR2101010,
+ DRM_FORMAT_ABGR2101010,
+ DRM_FORMAT_XBGR8888,
+ DRM_FORMAT_ABGR8888,
+};
+
+static const struct nv50_wndw_func
+nv50_base = {
+ .dtor = nv50_base_dtor,
+ .acquire = nv50_base_acquire,
+ .release = nv50_base_release,
+ .sema_set = nv50_base_sema_set,
+ .sema_clr = nv50_base_sema_clr,
+ .ntfy_set = nv50_base_ntfy_set,
+ .ntfy_clr = nv50_base_ntfy_clr,
+ .ntfy_wait_begun = nv50_base_ntfy_wait_begun,
+ .image_set = nv50_base_image_set,
+ .image_clr = nv50_base_image_clr,
+ .lut = nv50_base_lut,
+ .update = nv50_base_update,
+};
+
static int
-nv50_crtc_set_image(struct nouveau_crtc *nv_crtc, struct drm_framebuffer *fb,
- int x, int y, bool update)
+nv50_base_new(struct nouveau_drm *drm, struct nv50_head *head,
+ struct nv50_base **pbase)
+{
+ struct nv50_disp *disp = nv50_disp(drm->dev);
+ struct nv50_base *base;
+ int ret;
+
+ if (!(base = *pbase = kzalloc(sizeof(*base), GFP_KERNEL)))
+ return -ENOMEM;
+ base->id = head->base.index;
+ base->wndw.ntfy = EVO_FLIP_NTFY0(base->id);
+ base->wndw.sema = EVO_FLIP_SEM0(base->id);
+ base->wndw.data = 0x00000000;
+
+ ret = nv50_wndw_ctor(&nv50_base, drm->dev, DRM_PLANE_TYPE_PRIMARY,
+ "base", base->id, &base->chan.base,
+ nv50_base_format, ARRAY_SIZE(nv50_base_format),
+ &base->wndw);
+ if (ret) {
+ kfree(base);
+ return ret;
+ }
+
+ ret = nv50_base_create(&drm->device, disp->disp, base->id,
+ disp->sync->bo.offset, &base->chan);
+ if (ret)
+ return ret;
+
+ return nvif_notify_init(&base->chan.base.base.user, nv50_base_notify,
+ false,
+ NV50_DISP_BASE_CHANNEL_DMA_V0_NTFY_UEVENT,
+ &(struct nvif_notify_uevent_req) {},
+ sizeof(struct nvif_notify_uevent_req),
+ sizeof(struct nvif_notify_uevent_rep),
+ &base->wndw.notify);
+}
+
+/******************************************************************************
+ * Head
+ *****************************************************************************/
+static void
+nv50_head_procamp(struct nv50_head *head, struct nv50_head_atom *asyh)
{
- struct nouveau_framebuffer *nvfb = nouveau_framebuffer(fb);
- struct nv50_mast *mast = nv50_mast(nv_crtc->base.dev);
+ struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->mast.base;
u32 *push;
+ if ((push = evo_wait(core, 2))) {
+ if (core->base.user.oclass < GF110_DISP_CORE_CHANNEL_DMA)
+ evo_mthd(push, 0x08a8 + (head->base.index * 0x400), 1);
+ else
+ evo_mthd(push, 0x0498 + (head->base.index * 0x300), 1);
+ evo_data(push, (asyh->procamp.sat.sin << 20) |
+ (asyh->procamp.sat.cos << 8));
+ evo_kick(push, core);
+ }
+}
- push = evo_wait(mast, 16);
- if (push) {
- if (nv50_vers(mast) < GF110_DISP_CORE_CHANNEL_DMA) {
- evo_mthd(push, 0x0860 + (nv_crtc->index * 0x400), 1);
- evo_data(push, nvfb->nvbo->bo.offset >> 8);
- evo_mthd(push, 0x0868 + (nv_crtc->index * 0x400), 3);
- evo_data(push, (fb->height << 16) | fb->width);
- evo_data(push, nvfb->r_pitch);
- evo_data(push, nvfb->r_format);
- evo_mthd(push, 0x08c0 + (nv_crtc->index * 0x400), 1);
- evo_data(push, (y << 16) | x);
- if (nv50_vers(mast) > NV50_DISP_CORE_CHANNEL_DMA) {
- evo_mthd(push, 0x0874 + (nv_crtc->index * 0x400), 1);
- evo_data(push, nvfb->r_handle);
- }
- } else {
- evo_mthd(push, 0x0460 + (nv_crtc->index * 0x300), 1);
- evo_data(push, nvfb->nvbo->bo.offset >> 8);
- evo_mthd(push, 0x0468 + (nv_crtc->index * 0x300), 4);
- evo_data(push, (fb->height << 16) | fb->width);
- evo_data(push, nvfb->r_pitch);
- evo_data(push, nvfb->r_format);
- evo_data(push, nvfb->r_handle);
- evo_mthd(push, 0x04b0 + (nv_crtc->index * 0x300), 1);
- evo_data(push, (y << 16) | x);
- }
+static void
+nv50_head_dither(struct nv50_head *head, struct nv50_head_atom *asyh)
+{
+ struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->mast.base;
+ u32 *push;
+ if ((push = evo_wait(core, 2))) {
+ if (core->base.user.oclass < GF110_DISP_CORE_CHANNEL_DMA)
+ evo_mthd(push, 0x08a0 + (head->base.index * 0x0400), 1);
+ else
+ if (core->base.user.oclass < GK104_DISP_CORE_CHANNEL_DMA)
+ evo_mthd(push, 0x0490 + (head->base.index * 0x0300), 1);
+ else
+ evo_mthd(push, 0x04a0 + (head->base.index * 0x0300), 1);
+ evo_data(push, (asyh->dither.mode << 3) |
+ (asyh->dither.bits << 1) |
+ asyh->dither.enable);
+ evo_kick(push, core);
+ }
+}
- if (update) {
- evo_mthd(push, 0x0080, 1);
- evo_data(push, 0x00000000);
+static void
+nv50_head_ovly(struct nv50_head *head, struct nv50_head_atom *asyh)
+{
+ struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->mast.base;
+ u32 bounds = 0;
+ u32 *push;
+
+ if (asyh->base.cpp) {
+ switch (asyh->base.cpp) {
+ case 8: bounds |= 0x00000500; break;
+ case 4: bounds |= 0x00000300; break;
+ case 2: bounds |= 0x00000100; break;
+ default:
+ WARN_ON(1);
+ break;
}
- evo_kick(push, mast);
+ bounds |= 0x00000001;
}
- nv_crtc->fb.handle = nvfb->r_handle;
- return 0;
+ if ((push = evo_wait(core, 2))) {
+ if (core->base.user.oclass < GF110_DISP_CORE_CHANNEL_DMA)
+ evo_mthd(push, 0x0904 + head->base.index * 0x400, 1);
+ else
+ evo_mthd(push, 0x04d4 + head->base.index * 0x300, 1);
+ evo_data(push, bounds);
+ evo_kick(push, core);
+ }
}
static void
-nv50_crtc_cursor_show(struct nouveau_crtc *nv_crtc)
+nv50_head_base(struct nv50_head *head, struct nv50_head_atom *asyh)
{
- struct nv50_mast *mast = nv50_mast(nv_crtc->base.dev);
- u32 *push = evo_wait(mast, 16);
- if (push) {
- if (nv50_vers(mast) < G82_DISP_CORE_CHANNEL_DMA) {
- evo_mthd(push, 0x0880 + (nv_crtc->index * 0x400), 2);
- evo_data(push, 0x85000000);
- evo_data(push, nv_crtc->cursor.nvbo->bo.offset >> 8);
- } else
- if (nv50_vers(mast) < GF110_DISP_CORE_CHANNEL_DMA) {
- evo_mthd(push, 0x0880 + (nv_crtc->index * 0x400), 2);
- evo_data(push, 0x85000000);
- evo_data(push, nv_crtc->cursor.nvbo->bo.offset >> 8);
- evo_mthd(push, 0x089c + (nv_crtc->index * 0x400), 1);
- evo_data(push, mast->base.vram.handle);
- } else {
- evo_mthd(push, 0x0480 + (nv_crtc->index * 0x300), 2);
- evo_data(push, 0x85000000);
- evo_data(push, nv_crtc->cursor.nvbo->bo.offset >> 8);
- evo_mthd(push, 0x048c + (nv_crtc->index * 0x300), 1);
- evo_data(push, mast->base.vram.handle);
+ struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->mast.base;
+ u32 bounds = 0;
+ u32 *push;
+
+ if (asyh->base.cpp) {
+ switch (asyh->base.cpp) {
+ case 8: bounds |= 0x00000500; break;
+ case 4: bounds |= 0x00000300; break;
+ case 2: bounds |= 0x00000100; break;
+ case 1: bounds |= 0x00000000; break;
+ default:
+ WARN_ON(1);
+ break;
}
- evo_kick(push, mast);
+ bounds |= 0x00000001;
+ }
+
+ if ((push = evo_wait(core, 2))) {
+ if (core->base.user.oclass < GF110_DISP_CORE_CHANNEL_DMA)
+ evo_mthd(push, 0x0900 + head->base.index * 0x400, 1);
+ else
+ evo_mthd(push, 0x04d0 + head->base.index * 0x300, 1);
+ evo_data(push, bounds);
+ evo_kick(push, core);
}
- nv_crtc->cursor.visible = true;
}
static void
-nv50_crtc_cursor_hide(struct nouveau_crtc *nv_crtc)
+nv50_head_curs_clr(struct nv50_head *head)
{
- struct nv50_mast *mast = nv50_mast(nv_crtc->base.dev);
- u32 *push = evo_wait(mast, 16);
- if (push) {
- if (nv50_vers(mast) < G82_DISP_CORE_CHANNEL_DMA) {
- evo_mthd(push, 0x0880 + (nv_crtc->index * 0x400), 1);
+ struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->mast.base;
+ u32 *push;
+ if ((push = evo_wait(core, 4))) {
+ if (core->base.user.oclass < G82_DISP_CORE_CHANNEL_DMA) {
+ evo_mthd(push, 0x0880 + head->base.index * 0x400, 1);
evo_data(push, 0x05000000);
} else
- if (nv50_vers(mast) < GF110_DISP_CORE_CHANNEL_DMA) {
- evo_mthd(push, 0x0880 + (nv_crtc->index * 0x400), 1);
+ if (core->base.user.oclass < GF110_DISP_CORE_CHANNEL_DMA) {
+ evo_mthd(push, 0x0880 + head->base.index * 0x400, 1);
evo_data(push, 0x05000000);
- evo_mthd(push, 0x089c + (nv_crtc->index * 0x400), 1);
+ evo_mthd(push, 0x089c + head->base.index * 0x400, 1);
evo_data(push, 0x00000000);
} else {
- evo_mthd(push, 0x0480 + (nv_crtc->index * 0x300), 1);
+ evo_mthd(push, 0x0480 + head->base.index * 0x300, 1);
evo_data(push, 0x05000000);
- evo_mthd(push, 0x048c + (nv_crtc->index * 0x300), 1);
+ evo_mthd(push, 0x048c + head->base.index * 0x300, 1);
evo_data(push, 0x00000000);
}
- evo_kick(push, mast);
+ evo_kick(push, core);
}
- nv_crtc->cursor.visible = false;
}
static void
-nv50_crtc_cursor_show_hide(struct nouveau_crtc *nv_crtc, bool show, bool update)
+nv50_head_curs_set(struct nv50_head *head, struct nv50_head_atom *asyh)
{
- struct nv50_mast *mast = nv50_mast(nv_crtc->base.dev);
-
- if (show && nv_crtc->cursor.nvbo && nv_crtc->base.enabled)
- nv50_crtc_cursor_show(nv_crtc);
- else
- nv50_crtc_cursor_hide(nv_crtc);
-
- if (update) {
- u32 *push = evo_wait(mast, 2);
- if (push) {
- evo_mthd(push, 0x0080, 1);
- evo_data(push, 0x00000000);
- evo_kick(push, mast);
+ struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->mast.base;
+ u32 *push;
+ if ((push = evo_wait(core, 5))) {
+ if (core->base.user.oclass < G82_DISP_BASE_CHANNEL_DMA) {
+ evo_mthd(push, 0x0880 + head->base.index * 0x400, 2);
+ evo_data(push, 0x80000000 | (asyh->curs.layout << 26) |
+ (asyh->curs.format << 24));
+ evo_data(push, asyh->curs.offset >> 8);
+ } else
+ if (core->base.user.oclass < GF110_DISP_BASE_CHANNEL_DMA) {
+ evo_mthd(push, 0x0880 + head->base.index * 0x400, 2);
+ evo_data(push, 0x80000000 | (asyh->curs.layout << 26) |
+ (asyh->curs.format << 24));
+ evo_data(push, asyh->curs.offset >> 8);
+ evo_mthd(push, 0x089c + head->base.index * 0x400, 1);
+ evo_data(push, asyh->curs.handle);
+ } else {
+ evo_mthd(push, 0x0480 + head->base.index * 0x300, 2);
+ evo_data(push, 0x80000000 | (asyh->curs.layout << 26) |
+ (asyh->curs.format << 24));
+ evo_data(push, asyh->curs.offset >> 8);
+ evo_mthd(push, 0x048c + head->base.index * 0x300, 1);
+ evo_data(push, asyh->curs.handle);
}
+ evo_kick(push, core);
}
}
static void
-nv50_crtc_dpms(struct drm_crtc *crtc, int mode)
+nv50_head_core_clr(struct nv50_head *head)
{
+ struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->mast.base;
+ u32 *push;
+ if ((push = evo_wait(core, 2))) {
+ if (core->base.user.oclass < GF110_DISP_CORE_CHANNEL_DMA)
+ evo_mthd(push, 0x0874 + head->base.index * 0x400, 1);
+ else
+ evo_mthd(push, 0x0474 + head->base.index * 0x300, 1);
+ evo_data(push, 0x00000000);
+ evo_kick(push, core);
+ }
}
static void
-nv50_crtc_prepare(struct drm_crtc *crtc)
+nv50_head_core_set(struct nv50_head *head, struct nv50_head_atom *asyh)
{
- struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
- struct nv50_mast *mast = nv50_mast(crtc->dev);
+ struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->mast.base;
u32 *push;
+ if ((push = evo_wait(core, 9))) {
+ if (core->base.user.oclass < G82_DISP_CORE_CHANNEL_DMA) {
+ evo_mthd(push, 0x0860 + head->base.index * 0x400, 1);
+ evo_data(push, asyh->core.offset >> 8);
+ evo_mthd(push, 0x0868 + head->base.index * 0x400, 4);
+ evo_data(push, (asyh->core.h << 16) | asyh->core.w);
+ evo_data(push, asyh->core.layout << 20 |
+ (asyh->core.pitch >> 8) << 8 |
+ asyh->core.block);
+ evo_data(push, asyh->core.kind << 16 |
+ asyh->core.format << 8);
+ evo_data(push, asyh->core.handle);
+ evo_mthd(push, 0x08c0 + head->base.index * 0x400, 1);
+ evo_data(push, (asyh->core.y << 16) | asyh->core.x);
+ } else
+ if (core->base.user.oclass < GF110_DISP_CORE_CHANNEL_DMA) {
+ evo_mthd(push, 0x0860 + head->base.index * 0x400, 1);
+ evo_data(push, asyh->core.offset >> 8);
+ evo_mthd(push, 0x0868 + head->base.index * 0x400, 4);
+ evo_data(push, (asyh->core.h << 16) | asyh->core.w);
+ evo_data(push, asyh->core.layout << 20 |
+ (asyh->core.pitch >> 8) << 8 |
+ asyh->core.block);
+ evo_data(push, asyh->core.format << 8);
+ evo_data(push, asyh->core.handle);
+ evo_mthd(push, 0x08c0 + head->base.index * 0x400, 1);
+ evo_data(push, (asyh->core.y << 16) | asyh->core.x);
+ } else {
+ evo_mthd(push, 0x0460 + head->base.index * 0x300, 1);
+ evo_data(push, asyh->core.offset >> 8);
+ evo_mthd(push, 0x0468 + head->base.index * 0x300, 4);
+ evo_data(push, (asyh->core.h << 16) | asyh->core.w);
+ evo_data(push, asyh->core.layout << 24 |
+ (asyh->core.pitch >> 8) << 8 |
+ asyh->core.block);
+ evo_data(push, asyh->core.format << 8);
+ evo_data(push, asyh->core.handle);
+ evo_mthd(push, 0x04b0 + head->base.index * 0x300, 1);
+ evo_data(push, (asyh->core.y << 16) | asyh->core.x);
+ }
+ evo_kick(push, core);
+ }
+}
- nv50_display_flip_stop(crtc);
-
- push = evo_wait(mast, 6);
- if (push) {
- if (nv50_vers(mast) < G82_DISP_CORE_CHANNEL_DMA) {
- evo_mthd(push, 0x0874 + (nv_crtc->index * 0x400), 1);
- evo_data(push, 0x00000000);
- evo_mthd(push, 0x0840 + (nv_crtc->index * 0x400), 1);
+static void
+nv50_head_lut_clr(struct nv50_head *head)
+{
+ struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->mast.base;
+ u32 *push;
+ if ((push = evo_wait(core, 4))) {
+ if (core->base.user.oclass < G82_DISP_CORE_CHANNEL_DMA) {
+ evo_mthd(push, 0x0840 + (head->base.index * 0x400), 1);
evo_data(push, 0x40000000);
} else
- if (nv50_vers(mast) < GF110_DISP_CORE_CHANNEL_DMA) {
- evo_mthd(push, 0x0874 + (nv_crtc->index * 0x400), 1);
- evo_data(push, 0x00000000);
- evo_mthd(push, 0x0840 + (nv_crtc->index * 0x400), 1);
+ if (core->base.user.oclass < GF110_DISP_CORE_CHANNEL_DMA) {
+ evo_mthd(push, 0x0840 + (head->base.index * 0x400), 1);
evo_data(push, 0x40000000);
- evo_mthd(push, 0x085c + (nv_crtc->index * 0x400), 1);
+ evo_mthd(push, 0x085c + (head->base.index * 0x400), 1);
evo_data(push, 0x00000000);
} else {
- evo_mthd(push, 0x0474 + (nv_crtc->index * 0x300), 1);
- evo_data(push, 0x00000000);
- evo_mthd(push, 0x0440 + (nv_crtc->index * 0x300), 1);
+ evo_mthd(push, 0x0440 + (head->base.index * 0x300), 1);
evo_data(push, 0x03000000);
- evo_mthd(push, 0x045c + (nv_crtc->index * 0x300), 1);
+ evo_mthd(push, 0x045c + (head->base.index * 0x300), 1);
evo_data(push, 0x00000000);
}
-
- evo_kick(push, mast);
+ evo_kick(push, core);
}
-
- nv50_crtc_cursor_show_hide(nv_crtc, false, false);
}
static void
-nv50_crtc_commit(struct drm_crtc *crtc)
+nv50_head_lut_set(struct nv50_head *head, struct nv50_head_atom *asyh)
{
- struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
- struct nv50_mast *mast = nv50_mast(crtc->dev);
+ struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->mast.base;
u32 *push;
-
- push = evo_wait(mast, 32);
- if (push) {
- if (nv50_vers(mast) < G82_DISP_CORE_CHANNEL_DMA) {
- evo_mthd(push, 0x0874 + (nv_crtc->index * 0x400), 1);
- evo_data(push, nv_crtc->fb.handle);
- evo_mthd(push, 0x0840 + (nv_crtc->index * 0x400), 2);
+ if ((push = evo_wait(core, 7))) {
+ if (core->base.user.oclass < G82_DISP_CORE_CHANNEL_DMA) {
+ evo_mthd(push, 0x0840 + (head->base.index * 0x400), 2);
evo_data(push, 0xc0000000);
- evo_data(push, nv_crtc->lut.nvbo->bo.offset >> 8);
+ evo_data(push, asyh->lut.offset >> 8);
} else
- if (nv50_vers(mast) < GF110_DISP_CORE_CHANNEL_DMA) {
- evo_mthd(push, 0x0874 + (nv_crtc->index * 0x400), 1);
- evo_data(push, nv_crtc->fb.handle);
- evo_mthd(push, 0x0840 + (nv_crtc->index * 0x400), 2);
+ if (core->base.user.oclass < GF110_DISP_CORE_CHANNEL_DMA) {
+ evo_mthd(push, 0x0840 + (head->base.index * 0x400), 2);
evo_data(push, 0xc0000000);
- evo_data(push, nv_crtc->lut.nvbo->bo.offset >> 8);
- evo_mthd(push, 0x085c + (nv_crtc->index * 0x400), 1);
- evo_data(push, mast->base.vram.handle);
+ evo_data(push, asyh->lut.offset >> 8);
+ evo_mthd(push, 0x085c + (head->base.index * 0x400), 1);
+ evo_data(push, asyh->lut.handle);
} else {
- evo_mthd(push, 0x0474 + (nv_crtc->index * 0x300), 1);
- evo_data(push, nv_crtc->fb.handle);
- evo_mthd(push, 0x0440 + (nv_crtc->index * 0x300), 4);
+ evo_mthd(push, 0x0440 + (head->base.index * 0x300), 4);
evo_data(push, 0x83000000);
- evo_data(push, nv_crtc->lut.nvbo->bo.offset >> 8);
+ evo_data(push, asyh->lut.offset >> 8);
evo_data(push, 0x00000000);
evo_data(push, 0x00000000);
- evo_mthd(push, 0x045c + (nv_crtc->index * 0x300), 1);
- evo_data(push, mast->base.vram.handle);
- evo_mthd(push, 0x0430 + (nv_crtc->index * 0x300), 1);
+ evo_mthd(push, 0x045c + (head->base.index * 0x300), 1);
+ evo_data(push, asyh->lut.handle);
+ }
+ evo_kick(push, core);
+ }
+}
+
+static void
+nv50_head_mode(struct nv50_head *head, struct nv50_head_atom *asyh)
+{
+ struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->mast.base;
+ struct nv50_head_mode *m = &asyh->mode;
+ u32 *push;
+ if ((push = evo_wait(core, 14))) {
+ if (core->base.user.oclass < GF110_DISP_CORE_CHANNEL_DMA) {
+ evo_mthd(push, 0x0804 + (head->base.index * 0x400), 2);
+ evo_data(push, 0x00800000 | m->clock);
+ evo_data(push, m->interlace ? 0x00000002 : 0x00000000);
+ evo_mthd(push, 0x0810 + (head->base.index * 0x400), 7);
+ evo_data(push, 0x00000000);
+ evo_data(push, (m->v.active << 16) | m->h.active );
+ evo_data(push, (m->v.synce << 16) | m->h.synce );
+ evo_data(push, (m->v.blanke << 16) | m->h.blanke );
+ evo_data(push, (m->v.blanks << 16) | m->h.blanks );
+ evo_data(push, (m->v.blank2e << 16) | m->v.blank2s);
+ evo_data(push, asyh->mode.v.blankus);
+ evo_mthd(push, 0x082c + (head->base.index * 0x400), 1);
+ evo_data(push, 0x00000000);
+ } else {
+ evo_mthd(push, 0x0410 + (head->base.index * 0x300), 6);
+ evo_data(push, 0x00000000);
+ evo_data(push, (m->v.active << 16) | m->h.active );
+ evo_data(push, (m->v.synce << 16) | m->h.synce );
+ evo_data(push, (m->v.blanke << 16) | m->h.blanke );
+ evo_data(push, (m->v.blanks << 16) | m->h.blanks );
+ evo_data(push, (m->v.blank2e << 16) | m->v.blank2s);
+ evo_mthd(push, 0x042c + (head->base.index * 0x300), 2);
+ evo_data(push, 0x00000000); /* ??? */
evo_data(push, 0xffffff00);
+ evo_mthd(push, 0x0450 + (head->base.index * 0x300), 3);
+ evo_data(push, m->clock * 1000);
+ evo_data(push, 0x00200000); /* ??? */
+ evo_data(push, m->clock * 1000);
}
+ evo_kick(push, core);
+ }
+}
- evo_kick(push, mast);
+static void
+nv50_head_view(struct nv50_head *head, struct nv50_head_atom *asyh)
+{
+ struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->mast.base;
+ u32 *push;
+ if ((push = evo_wait(core, 10))) {
+ if (core->base.user.oclass < GF110_DISP_CORE_CHANNEL_DMA) {
+ evo_mthd(push, 0x08a4 + (head->base.index * 0x400), 1);
+ evo_data(push, 0x00000000);
+ evo_mthd(push, 0x08c8 + (head->base.index * 0x400), 1);
+ evo_data(push, (asyh->view.iH << 16) | asyh->view.iW);
+ evo_mthd(push, 0x08d8 + (head->base.index * 0x400), 2);
+ evo_data(push, (asyh->view.oH << 16) | asyh->view.oW);
+ evo_data(push, (asyh->view.oH << 16) | asyh->view.oW);
+ } else {
+ evo_mthd(push, 0x0494 + (head->base.index * 0x300), 1);
+ evo_data(push, 0x00000000);
+ evo_mthd(push, 0x04b8 + (head->base.index * 0x300), 1);
+ evo_data(push, (asyh->view.iH << 16) | asyh->view.iW);
+ evo_mthd(push, 0x04c0 + (head->base.index * 0x300), 3);
+ evo_data(push, (asyh->view.oH << 16) | asyh->view.oW);
+ evo_data(push, (asyh->view.oH << 16) | asyh->view.oW);
+ evo_data(push, (asyh->view.oH << 16) | asyh->view.oW);
+ }
+ evo_kick(push, core);
}
+}
- nv50_crtc_cursor_show_hide(nv_crtc, true, true);
- nv50_display_flip_next(crtc, crtc->primary->fb, NULL, 1);
+static void
+nv50_head_flush_clr(struct nv50_head *head, struct nv50_head_atom *asyh, bool y)
+{
+ if (asyh->clr.core && (!asyh->set.core || y))
+ nv50_head_lut_clr(head);
+ if (asyh->clr.core && (!asyh->set.core || y))
+ nv50_head_core_clr(head);
+ if (asyh->clr.curs && (!asyh->set.curs || y))
+ nv50_head_curs_clr(head);
}
-static bool
-nv50_crtc_mode_fixup(struct drm_crtc *crtc, const struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
+static void
+nv50_head_flush_set(struct nv50_head *head, struct nv50_head_atom *asyh)
{
- drm_mode_set_crtcinfo(adjusted_mode, CRTC_INTERLACE_HALVE_V);
- return true;
+ if (asyh->set.view ) nv50_head_view (head, asyh);
+ if (asyh->set.mode ) nv50_head_mode (head, asyh);
+ if (asyh->set.core ) nv50_head_lut_set (head, asyh);
+ if (asyh->set.core ) nv50_head_core_set(head, asyh);
+ if (asyh->set.curs ) nv50_head_curs_set(head, asyh);
+ if (asyh->set.base ) nv50_head_base (head, asyh);
+ if (asyh->set.ovly ) nv50_head_ovly (head, asyh);
+ if (asyh->set.dither ) nv50_head_dither (head, asyh);
+ if (asyh->set.procamp) nv50_head_procamp (head, asyh);
}
-static int
-nv50_crtc_swap_fbs(struct drm_crtc *crtc, struct drm_framebuffer *old_fb)
+static void
+nv50_head_atomic_check_procamp(struct nv50_head_atom *armh,
+ struct nv50_head_atom *asyh,
+ struct nouveau_conn_atom *asyc)
{
- struct nouveau_framebuffer *nvfb = nouveau_framebuffer(crtc->primary->fb);
- struct nv50_head *head = nv50_head(crtc);
- int ret;
+ const int vib = asyc->procamp.color_vibrance - 100;
+ const int hue = asyc->procamp.vibrant_hue - 90;
+ const int adj = (vib > 0) ? 50 : 0;
+ asyh->procamp.sat.cos = ((vib * 2047 + adj) / 100) & 0xfff;
+ asyh->procamp.sat.sin = ((hue * 2047) / 100) & 0xfff;
+ asyh->set.procamp = true;
+}
- ret = nouveau_bo_pin(nvfb->nvbo, TTM_PL_FLAG_VRAM, true);
- if (ret == 0) {
- if (head->image)
- nouveau_bo_unpin(head->image);
- nouveau_bo_ref(nvfb->nvbo, &head->image);
+static void
+nv50_head_atomic_check_dither(struct nv50_head_atom *armh,
+ struct nv50_head_atom *asyh,
+ struct nouveau_conn_atom *asyc)
+{
+ struct drm_connector *connector = asyc->state.connector;
+ u32 mode = 0x00;
+
+ if (asyc->dither.mode == DITHERING_MODE_AUTO) {
+ if (asyh->base.depth > connector->display_info.bpc * 3)
+ mode = DITHERING_MODE_DYNAMIC2X2;
+ } else {
+ mode = asyc->dither.mode;
}
- return ret;
+ if (asyc->dither.depth == DITHERING_DEPTH_AUTO) {
+ if (connector->display_info.bpc >= 8)
+ mode |= DITHERING_DEPTH_8BPC;
+ } else {
+ mode |= asyc->dither.depth;
+ }
+
+ asyh->dither.enable = mode;
+ asyh->dither.bits = mode >> 1;
+ asyh->dither.mode = mode >> 3;
+ asyh->set.dither = true;
}
-static int
-nv50_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *umode,
- struct drm_display_mode *mode, int x, int y,
- struct drm_framebuffer *old_fb)
+static void
+nv50_head_atomic_check_view(struct nv50_head_atom *armh,
+ struct nv50_head_atom *asyh,
+ struct nouveau_conn_atom *asyc)
{
- struct nv50_mast *mast = nv50_mast(crtc->dev);
- struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
- struct nouveau_connector *nv_connector;
- u32 ilace = (mode->flags & DRM_MODE_FLAG_INTERLACE) ? 2 : 1;
- u32 vscan = (mode->flags & DRM_MODE_FLAG_DBLSCAN) ? 2 : 1;
- u32 hactive, hsynce, hbackp, hfrontp, hblanke, hblanks;
- u32 vactive, vsynce, vbackp, vfrontp, vblanke, vblanks;
- u32 vblan2e = 0, vblan2s = 1, vblankus = 0;
- u32 *push;
- int ret;
-
- hactive = mode->htotal;
- hsynce = mode->hsync_end - mode->hsync_start - 1;
- hbackp = mode->htotal - mode->hsync_end;
- hblanke = hsynce + hbackp;
- hfrontp = mode->hsync_start - mode->hdisplay;
- hblanks = mode->htotal - hfrontp - 1;
-
- vactive = mode->vtotal * vscan / ilace;
- vsynce = ((mode->vsync_end - mode->vsync_start) * vscan / ilace) - 1;
- vbackp = (mode->vtotal - mode->vsync_end) * vscan / ilace;
- vblanke = vsynce + vbackp;
- vfrontp = (mode->vsync_start - mode->vdisplay) * vscan / ilace;
- vblanks = vactive - vfrontp - 1;
- /* XXX: Safe underestimate, even "0" works */
- vblankus = (vactive - mode->vdisplay - 2) * hactive;
- vblankus *= 1000;
- vblankus /= mode->clock;
+ struct drm_connector *connector = asyc->state.connector;
+ struct drm_display_mode *omode = &asyh->state.adjusted_mode;
+ struct drm_display_mode *umode = &asyh->state.mode;
+ int mode = asyc->scaler.mode;
+ struct edid *edid;
+
+ if (connector->edid_blob_ptr)
+ edid = (struct edid *)connector->edid_blob_ptr->data;
+ else
+ edid = NULL;
- if (mode->flags & DRM_MODE_FLAG_INTERLACE) {
- vblan2e = vactive + vsynce + vbackp;
- vblan2s = vblan2e + (mode->vdisplay * vscan / ilace);
- vactive = (vactive * 2) + 1;
+ if (!asyc->scaler.full) {
+ if (mode == DRM_MODE_SCALE_NONE)
+ omode = umode;
+ } else {
+ /* Non-EDID LVDS/eDP mode. */
+ mode = DRM_MODE_SCALE_FULLSCREEN;
}
- ret = nv50_crtc_swap_fbs(crtc, old_fb);
- if (ret)
- return ret;
+ asyh->view.iW = umode->hdisplay;
+ asyh->view.iH = umode->vdisplay;
+ asyh->view.oW = omode->hdisplay;
+ asyh->view.oH = omode->vdisplay;
+ if (omode->flags & DRM_MODE_FLAG_DBLSCAN)
+ asyh->view.oH *= 2;
- push = evo_wait(mast, 64);
- if (push) {
- if (nv50_vers(mast) < GF110_DISP_CORE_CHANNEL_DMA) {
- evo_mthd(push, 0x0804 + (nv_crtc->index * 0x400), 2);
- evo_data(push, 0x00800000 | mode->clock);
- evo_data(push, (ilace == 2) ? 2 : 0);
- evo_mthd(push, 0x0810 + (nv_crtc->index * 0x400), 6);
- evo_data(push, 0x00000000);
- evo_data(push, (vactive << 16) | hactive);
- evo_data(push, ( vsynce << 16) | hsynce);
- evo_data(push, (vblanke << 16) | hblanke);
- evo_data(push, (vblanks << 16) | hblanks);
- evo_data(push, (vblan2e << 16) | vblan2s);
- evo_mthd(push, 0x082c + (nv_crtc->index * 0x400), 1);
- evo_data(push, 0x00000000);
- evo_mthd(push, 0x0900 + (nv_crtc->index * 0x400), 2);
- evo_data(push, 0x00000311);
- evo_data(push, 0x00000100);
+ /* Add overscan compensation if necessary, will keep the aspect
+ * ratio the same as the backend mode unless overridden by the
+ * user setting both hborder and vborder properties.
+ */
+ if ((asyc->scaler.underscan.mode == UNDERSCAN_ON ||
+ (asyc->scaler.underscan.mode == UNDERSCAN_AUTO &&
+ drm_detect_hdmi_monitor(edid)))) {
+ u32 bX = asyc->scaler.underscan.hborder;
+ u32 bY = asyc->scaler.underscan.vborder;
+ u32 r = (asyh->view.oH << 19) / asyh->view.oW;
+
+ if (bX) {
+ asyh->view.oW -= (bX * 2);
+ if (bY) asyh->view.oH -= (bY * 2);
+ else asyh->view.oH = ((asyh->view.oW * r) + (r / 2)) >> 19;
} else {
- evo_mthd(push, 0x0410 + (nv_crtc->index * 0x300), 6);
- evo_data(push, 0x00000000);
- evo_data(push, (vactive << 16) | hactive);
- evo_data(push, ( vsynce << 16) | hsynce);
- evo_data(push, (vblanke << 16) | hblanke);
- evo_data(push, (vblanks << 16) | hblanks);
- evo_data(push, (vblan2e << 16) | vblan2s);
- evo_mthd(push, 0x042c + (nv_crtc->index * 0x300), 1);
- evo_data(push, 0x00000000); /* ??? */
- evo_mthd(push, 0x0450 + (nv_crtc->index * 0x300), 3);
- evo_data(push, mode->clock * 1000);
- evo_data(push, 0x00200000); /* ??? */
- evo_data(push, mode->clock * 1000);
- evo_mthd(push, 0x04d0 + (nv_crtc->index * 0x300), 2);
- evo_data(push, 0x00000311);
- evo_data(push, 0x00000100);
+ asyh->view.oW -= (asyh->view.oW >> 4) + 32;
+ if (bY) asyh->view.oH -= (bY * 2);
+ else asyh->view.oH = ((asyh->view.oW * r) + (r / 2)) >> 19;
}
+ }
- evo_kick(push, mast);
+ /* Handle CENTER/ASPECT scaling, taking into account the areas
+ * removed already for overscan compensation.
+ */
+ switch (mode) {
+ case DRM_MODE_SCALE_CENTER:
+ asyh->view.oW = min((u16)umode->hdisplay, asyh->view.oW);
+ asyh->view.oH = min((u16)umode->vdisplay, asyh->view.oH);
+ /* fall-through */
+ case DRM_MODE_SCALE_ASPECT:
+ if (asyh->view.oH < asyh->view.oW) {
+ u32 r = (asyh->view.iW << 19) / asyh->view.iH;
+ asyh->view.oW = ((asyh->view.oH * r) + (r / 2)) >> 19;
+ } else {
+ u32 r = (asyh->view.iH << 19) / asyh->view.iW;
+ asyh->view.oH = ((asyh->view.oW * r) + (r / 2)) >> 19;
+ }
+ break;
+ default:
+ break;
}
- nv_connector = nouveau_crtc_connector_get(nv_crtc);
- nv50_crtc_set_dither(nv_crtc, false);
- nv50_crtc_set_scale(nv_crtc, false);
+ asyh->set.view = true;
+}
+
+static void
+nv50_head_atomic_check_mode(struct nv50_head *head, struct nv50_head_atom *asyh)
+{
+ struct drm_display_mode *mode = &asyh->state.adjusted_mode;
+ u32 ilace = (mode->flags & DRM_MODE_FLAG_INTERLACE) ? 2 : 1;
+ u32 vscan = (mode->flags & DRM_MODE_FLAG_DBLSCAN) ? 2 : 1;
+ u32 hbackp = mode->htotal - mode->hsync_end;
+ u32 vbackp = (mode->vtotal - mode->vsync_end) * vscan / ilace;
+ u32 hfrontp = mode->hsync_start - mode->hdisplay;
+ u32 vfrontp = (mode->vsync_start - mode->vdisplay) * vscan / ilace;
+ struct nv50_head_mode *m = &asyh->mode;
+
+ m->h.active = mode->htotal;
+ m->h.synce = mode->hsync_end - mode->hsync_start - 1;
+ m->h.blanke = m->h.synce + hbackp;
+ m->h.blanks = mode->htotal - hfrontp - 1;
+
+ m->v.active = mode->vtotal * vscan / ilace;
+ m->v.synce = ((mode->vsync_end - mode->vsync_start) * vscan / ilace) - 1;
+ m->v.blanke = m->v.synce + vbackp;
+ m->v.blanks = m->v.active - vfrontp - 1;
+
+ /*XXX: Safe underestimate, even "0" works */
+ m->v.blankus = (m->v.active - mode->vdisplay - 2) * m->h.active;
+ m->v.blankus *= 1000;
+ m->v.blankus /= mode->clock;
- /* G94 only accepts this after setting scale */
- if (nv50_vers(mast) < GF110_DISP_CORE_CHANNEL_DMA)
- nv50_crtc_set_raster_vblank_dmi(nv_crtc, vblankus);
+ if (mode->flags & DRM_MODE_FLAG_INTERLACE) {
+ m->v.blank2e = m->v.active + m->v.synce + vbackp;
+ m->v.blank2s = m->v.blank2e + (mode->vdisplay * vscan / ilace);
+ m->v.active = (m->v.active * 2) + 1;
+ m->interlace = true;
+ } else {
+ m->v.blank2e = 0;
+ m->v.blank2s = 1;
+ m->interlace = false;
+ }
+ m->clock = mode->clock;
- nv50_crtc_set_color_vibrance(nv_crtc, false);
- nv50_crtc_set_image(nv_crtc, crtc->primary->fb, x, y, false);
- return 0;
+ drm_mode_set_crtcinfo(mode, CRTC_INTERLACE_HALVE_V);
+ asyh->set.mode = true;
}
static int
-nv50_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
- struct drm_framebuffer *old_fb)
+nv50_head_atomic_check(struct drm_crtc *crtc, struct drm_crtc_state *state)
{
struct nouveau_drm *drm = nouveau_drm(crtc->dev);
- struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
- int ret;
+ struct nv50_disp *disp = nv50_disp(crtc->dev);
+ struct nv50_head *head = nv50_head(crtc);
+ struct nv50_head_atom *armh = nv50_head_atom(crtc->state);
+ struct nv50_head_atom *asyh = nv50_head_atom(state);
+ struct nouveau_conn_atom *asyc = NULL;
+ struct drm_connector_state *conns;
+ struct drm_connector *conn;
+ int i;
- if (!crtc->primary->fb) {
- NV_DEBUG(drm, "No FB bound\n");
- return 0;
+ NV_ATOMIC(drm, "%s atomic_check %d\n", crtc->name, asyh->state.active);
+ if (asyh->state.active) {
+ for_each_connector_in_state(asyh->state.state, conn, conns, i) {
+ if (conns->crtc == crtc) {
+ asyc = nouveau_conn_atom(conns);
+ break;
+ }
+ }
+
+ if (armh->state.active) {
+ if (asyc) {
+ if (asyh->state.mode_changed)
+ asyc->set.scaler = true;
+ if (armh->base.depth != asyh->base.depth)
+ asyc->set.dither = true;
+ }
+ } else {
+ asyc->set.mask = ~0;
+ asyh->set.mask = ~0;
+ }
+
+ if (asyh->state.mode_changed)
+ nv50_head_atomic_check_mode(head, asyh);
+
+ if (asyc) {
+ if (asyc->set.scaler)
+ nv50_head_atomic_check_view(armh, asyh, asyc);
+ if (asyc->set.dither)
+ nv50_head_atomic_check_dither(armh, asyh, asyc);
+ if (asyc->set.procamp)
+ nv50_head_atomic_check_procamp(armh, asyh, asyc);
+ }
+
+ if ((asyh->core.visible = (asyh->base.cpp != 0))) {
+ asyh->core.x = asyh->base.x;
+ asyh->core.y = asyh->base.y;
+ asyh->core.w = asyh->base.w;
+ asyh->core.h = asyh->base.h;
+ } else
+ if ((asyh->core.visible = asyh->curs.visible)) {
+ /*XXX: We need to either find some way of having the
+ * primary base layer appear black, while still
+ * being able to display the other layers, or we
+ * need to allocate a dummy black surface here.
+ */
+ asyh->core.x = 0;
+ asyh->core.y = 0;
+ asyh->core.w = asyh->state.mode.hdisplay;
+ asyh->core.h = asyh->state.mode.vdisplay;
+ }
+ asyh->core.handle = disp->mast.base.vram.handle;
+ asyh->core.offset = 0;
+ asyh->core.format = 0xcf;
+ asyh->core.kind = 0;
+ asyh->core.layout = 1;
+ asyh->core.block = 0;
+ asyh->core.pitch = ALIGN(asyh->core.w, 64) * 4;
+ asyh->lut.handle = disp->mast.base.vram.handle;
+ asyh->lut.offset = head->base.lut.nvbo->bo.offset;
+ asyh->set.base = armh->base.cpp != asyh->base.cpp;
+ asyh->set.ovly = armh->ovly.cpp != asyh->ovly.cpp;
+ } else {
+ asyh->core.visible = false;
+ asyh->curs.visible = false;
+ asyh->base.cpp = 0;
+ asyh->ovly.cpp = 0;
}
- ret = nv50_crtc_swap_fbs(crtc, old_fb);
- if (ret)
- return ret;
+ if (!drm_atomic_crtc_needs_modeset(&asyh->state)) {
+ if (asyh->core.visible) {
+ if (memcmp(&armh->core, &asyh->core, sizeof(asyh->core)))
+ asyh->set.core = true;
+ } else
+ if (armh->core.visible) {
+ asyh->clr.core = true;
+ }
- nv50_display_flip_stop(crtc);
- nv50_crtc_set_image(nv_crtc, crtc->primary->fb, x, y, true);
- nv50_display_flip_next(crtc, crtc->primary->fb, NULL, 1);
- return 0;
-}
+ if (asyh->curs.visible) {
+ if (memcmp(&armh->curs, &asyh->curs, sizeof(asyh->curs)))
+ asyh->set.curs = true;
+ } else
+ if (armh->curs.visible) {
+ asyh->clr.curs = true;
+ }
+ } else {
+ asyh->clr.core = armh->core.visible;
+ asyh->clr.curs = armh->curs.visible;
+ asyh->set.core = asyh->core.visible;
+ asyh->set.curs = asyh->curs.visible;
+ }
-static int
-nv50_crtc_mode_set_base_atomic(struct drm_crtc *crtc,
- struct drm_framebuffer *fb, int x, int y,
- enum mode_set_atomic state)
-{
- struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
- nv50_display_flip_stop(crtc);
- nv50_crtc_set_image(nv_crtc, fb, x, y, true);
+ if (asyh->clr.mask || asyh->set.mask)
+ nv50_atom(asyh->state.state)->lock_core = true;
return 0;
}
static void
-nv50_crtc_lut_load(struct drm_crtc *crtc)
+nv50_head_lut_load(struct drm_crtc *crtc)
{
struct nv50_disp *disp = nv50_disp(crtc->dev);
struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
@@ -1292,64 +2200,95 @@ nv50_crtc_lut_load(struct drm_crtc *crtc)
}
}
-static void
-nv50_crtc_disable(struct drm_crtc *crtc)
+static int
+nv50_head_mode_set_base_atomic(struct drm_crtc *crtc,
+ struct drm_framebuffer *fb, int x, int y,
+ enum mode_set_atomic state)
{
- struct nv50_head *head = nv50_head(crtc);
- evo_sync(crtc->dev);
- if (head->image)
- nouveau_bo_unpin(head->image);
- nouveau_bo_ref(NULL, &head->image);
+ WARN_ON(1);
+ return 0;
}
+static const struct drm_crtc_helper_funcs
+nv50_head_help = {
+ .mode_set_base_atomic = nv50_head_mode_set_base_atomic,
+ .load_lut = nv50_head_lut_load,
+ .atomic_check = nv50_head_atomic_check,
+};
+
+/* This is identical to the version in the atomic helpers, except that
+ * it supports non-vblanked ("async") page flips.
+ */
static int
-nv50_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv,
- uint32_t handle, uint32_t width, uint32_t height)
+nv50_head_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb,
+ struct drm_pending_vblank_event *event, u32 flags)
{
- struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
- struct drm_gem_object *gem = NULL;
- struct nouveau_bo *nvbo = NULL;
+ struct drm_plane *plane = crtc->primary;
+ struct drm_atomic_state *state;
+ struct drm_plane_state *plane_state;
+ struct drm_crtc_state *crtc_state;
int ret = 0;
- if (handle) {
- if (width != 64 || height != 64)
- return -EINVAL;
+ state = drm_atomic_state_alloc(plane->dev);
+ if (!state)
+ return -ENOMEM;
- gem = drm_gem_object_lookup(file_priv, handle);
- if (unlikely(!gem))
- return -ENOENT;
- nvbo = nouveau_gem_object(gem);
+ state->acquire_ctx = drm_modeset_legacy_acquire_ctx(crtc);
+retry:
+ crtc_state = drm_atomic_get_crtc_state(state, crtc);
+ if (IS_ERR(crtc_state)) {
+ ret = PTR_ERR(crtc_state);
+ goto fail;
+ }
+ crtc_state->event = event;
- ret = nouveau_bo_pin(nvbo, TTM_PL_FLAG_VRAM, true);
+ plane_state = drm_atomic_get_plane_state(state, plane);
+ if (IS_ERR(plane_state)) {
+ ret = PTR_ERR(plane_state);
+ goto fail;
}
- if (ret == 0) {
- if (nv_crtc->cursor.nvbo)
- nouveau_bo_unpin(nv_crtc->cursor.nvbo);
- nouveau_bo_ref(nvbo, &nv_crtc->cursor.nvbo);
+ ret = drm_atomic_set_crtc_for_plane(plane_state, crtc);
+ if (ret != 0)
+ goto fail;
+ drm_atomic_set_fb_for_plane(plane_state, fb);
+
+ /* Make sure we don't accidentally do a full modeset. */
+ state->allow_modeset = false;
+ if (!crtc_state->active) {
+ DRM_DEBUG_ATOMIC("[CRTC:%d] disabled, rejecting legacy flip\n",
+ crtc->base.id);
+ ret = -EINVAL;
+ goto fail;
}
- drm_gem_object_unreference_unlocked(gem);
- nv50_crtc_cursor_show_hide(nv_crtc, true, true);
+ if (flags & DRM_MODE_PAGE_FLIP_ASYNC)
+ nv50_wndw_atom(plane_state)->interval = 0;
+
+ ret = drm_atomic_nonblocking_commit(state);
+fail:
+ if (ret == -EDEADLK)
+ goto backoff;
+
+ drm_atomic_state_put(state);
return ret;
-}
-static int
-nv50_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
-{
- struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
- struct nv50_curs *curs = nv50_curs(crtc);
- struct nv50_chan *chan = nv50_chan(curs);
- nvif_wr32(&chan->user, 0x0084, (y << 16) | (x & 0xffff));
- nvif_wr32(&chan->user, 0x0080, 0x00000000);
+backoff:
+ drm_atomic_state_clear(state);
+ drm_atomic_legacy_backoff(state);
- nv_crtc->cursor_saved_x = x;
- nv_crtc->cursor_saved_y = y;
- return 0;
+ /*
+ * Someone might have exchanged the framebuffer while we dropped locks
+ * in the backoff code. We need to fix up the fb refcount tracking the
+ * core does for us.
+ */
+ plane->old_fb = plane->fb;
+
+ goto retry;
}
static int
-nv50_crtc_gamma_set(struct drm_crtc *crtc, u16 *r, u16 *g, u16 *b,
+nv50_head_gamma_set(struct drm_crtc *crtc, u16 *r, u16 *g, u16 *b,
uint32_t size)
{
struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
@@ -1361,47 +2300,71 @@ nv50_crtc_gamma_set(struct drm_crtc *crtc, u16 *r, u16 *g, u16 *b,
nv_crtc->lut.b[i] = b[i];
}
- nv50_crtc_lut_load(crtc);
-
+ nv50_head_lut_load(crtc);
return 0;
}
static void
-nv50_crtc_cursor_restore(struct nouveau_crtc *nv_crtc, int x, int y)
+nv50_head_atomic_destroy_state(struct drm_crtc *crtc,
+ struct drm_crtc_state *state)
{
- nv50_crtc_cursor_move(&nv_crtc->base, x, y);
+ struct nv50_head_atom *asyh = nv50_head_atom(state);
+ __drm_atomic_helper_crtc_destroy_state(&asyh->state);
+ kfree(asyh);
+}
- nv50_crtc_cursor_show_hide(nv_crtc, true, true);
+static struct drm_crtc_state *
+nv50_head_atomic_duplicate_state(struct drm_crtc *crtc)
+{
+ struct nv50_head_atom *armh = nv50_head_atom(crtc->state);
+ struct nv50_head_atom *asyh;
+ if (!(asyh = kmalloc(sizeof(*asyh), GFP_KERNEL)))
+ return NULL;
+ __drm_atomic_helper_crtc_duplicate_state(crtc, &asyh->state);
+ asyh->view = armh->view;
+ asyh->mode = armh->mode;
+ asyh->lut = armh->lut;
+ asyh->core = armh->core;
+ asyh->curs = armh->curs;
+ asyh->base = armh->base;
+ asyh->ovly = armh->ovly;
+ asyh->dither = armh->dither;
+ asyh->procamp = armh->procamp;
+ asyh->clr.mask = 0;
+ asyh->set.mask = 0;
+ return &asyh->state;
+}
+
+static void
+__drm_atomic_helper_crtc_reset(struct drm_crtc *crtc,
+ struct drm_crtc_state *state)
+{
+ if (crtc->state)
+ crtc->funcs->atomic_destroy_state(crtc, crtc->state);
+ crtc->state = state;
+ crtc->state->crtc = crtc;
}
static void
-nv50_crtc_destroy(struct drm_crtc *crtc)
+nv50_head_reset(struct drm_crtc *crtc)
+{
+ struct nv50_head_atom *asyh;
+
+ if (WARN_ON(!(asyh = kzalloc(sizeof(*asyh), GFP_KERNEL))))
+ return;
+
+ __drm_atomic_helper_crtc_reset(crtc, &asyh->state);
+}
+
+static void
+nv50_head_destroy(struct drm_crtc *crtc)
{
struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
struct nv50_disp *disp = nv50_disp(crtc->dev);
struct nv50_head *head = nv50_head(crtc);
- struct nv50_fbdma *fbdma;
-
- list_for_each_entry(fbdma, &disp->fbdma, head) {
- nvif_object_fini(&fbdma->base[nv_crtc->index]);
- }
nv50_dmac_destroy(&head->ovly.base, disp->disp);
nv50_pioc_destroy(&head->oimm.base);
- nv50_dmac_destroy(&head->sync.base, disp->disp);
- nv50_pioc_destroy(&head->curs.base);
-
- /*XXX: this shouldn't be necessary, but the core doesn't call
- * disconnect() during the cleanup paths
- */
- if (head->image)
- nouveau_bo_unpin(head->image);
- nouveau_bo_ref(NULL, &head->image);
-
- /*XXX: ditto */
- if (nv_crtc->cursor.nvbo)
- nouveau_bo_unpin(nv_crtc->cursor.nvbo);
- nouveau_bo_ref(NULL, &nv_crtc->cursor.nvbo);
nouveau_bo_unmap(nv_crtc->lut.nvbo);
if (nv_crtc->lut.nvbo)
@@ -1412,34 +2375,27 @@ nv50_crtc_destroy(struct drm_crtc *crtc)
kfree(crtc);
}
-static const struct drm_crtc_helper_funcs nv50_crtc_hfunc = {
- .dpms = nv50_crtc_dpms,
- .prepare = nv50_crtc_prepare,
- .commit = nv50_crtc_commit,
- .mode_fixup = nv50_crtc_mode_fixup,
- .mode_set = nv50_crtc_mode_set,
- .mode_set_base = nv50_crtc_mode_set_base,
- .mode_set_base_atomic = nv50_crtc_mode_set_base_atomic,
- .load_lut = nv50_crtc_lut_load,
- .disable = nv50_crtc_disable,
-};
-
-static const struct drm_crtc_funcs nv50_crtc_func = {
- .cursor_set = nv50_crtc_cursor_set,
- .cursor_move = nv50_crtc_cursor_move,
- .gamma_set = nv50_crtc_gamma_set,
- .set_config = nouveau_crtc_set_config,
- .destroy = nv50_crtc_destroy,
- .page_flip = nouveau_crtc_page_flip,
+static const struct drm_crtc_funcs
+nv50_head_func = {
+ .reset = nv50_head_reset,
+ .gamma_set = nv50_head_gamma_set,
+ .destroy = nv50_head_destroy,
+ .set_config = drm_atomic_helper_set_config,
+ .page_flip = nv50_head_page_flip,
+ .set_property = drm_atomic_helper_crtc_set_property,
+ .atomic_duplicate_state = nv50_head_atomic_duplicate_state,
+ .atomic_destroy_state = nv50_head_atomic_destroy_state,
};
static int
-nv50_crtc_create(struct drm_device *dev, int index)
+nv50_head_create(struct drm_device *dev, int index)
{
struct nouveau_drm *drm = nouveau_drm(dev);
struct nvif_device *device = &drm->device;
struct nv50_disp *disp = nv50_disp(dev);
struct nv50_head *head;
+ struct nv50_base *base;
+ struct nv50_curs *curs;
struct drm_crtc *crtc;
int ret, i;
@@ -1448,21 +2404,25 @@ nv50_crtc_create(struct drm_device *dev, int index)
return -ENOMEM;
head->base.index = index;
- head->base.set_dither = nv50_crtc_set_dither;
- head->base.set_scale = nv50_crtc_set_scale;
- head->base.set_color_vibrance = nv50_crtc_set_color_vibrance;
- head->base.color_vibrance = 50;
- head->base.vibrant_hue = 0;
- head->base.cursor.set_pos = nv50_crtc_cursor_restore;
for (i = 0; i < 256; i++) {
head->base.lut.r[i] = i << 8;
head->base.lut.g[i] = i << 8;
head->base.lut.b[i] = i << 8;
}
+ ret = nv50_base_new(drm, head, &base);
+ if (ret == 0)
+ ret = nv50_curs_new(drm, head, &curs);
+ if (ret) {
+ kfree(head);
+ return ret;
+ }
+
crtc = &head->base.base;
- drm_crtc_init(dev, crtc, &nv50_crtc_func);
- drm_crtc_helper_add(crtc, &nv50_crtc_hfunc);
+ drm_crtc_init_with_planes(dev, crtc, &base->wndw.plane,
+ &curs->wndw.plane, &nv50_head_func,
+ "head-%d", head->base.index);
+ drm_crtc_helper_add(crtc, &nv50_head_help);
drm_mode_crtc_set_gamma_size(crtc, 256);
ret = nouveau_bo_new(dev, 8192, 0x100, TTM_PL_FLAG_VRAM,
@@ -1481,20 +2441,6 @@ nv50_crtc_create(struct drm_device *dev, int index)
if (ret)
goto out;
- /* allocate cursor resources */
- ret = nv50_curs_create(device, disp->disp, index, &head->curs);
- if (ret)
- goto out;
-
- /* allocate page flip / sync resources */
- ret = nv50_base_create(device, disp->disp, index, disp->sync->bo.offset,
- &head->sync);
- if (ret)
- goto out;
-
- head->sync.addr = EVO_FLIP_SEM0(index);
- head->sync.data = 0x00000000;
-
/* allocate overlay resources */
ret = nv50_oimm_create(device, disp->disp, index, &head->oimm);
if (ret)
@@ -1507,43 +2453,64 @@ nv50_crtc_create(struct drm_device *dev, int index)
out:
if (ret)
- nv50_crtc_destroy(crtc);
+ nv50_head_destroy(crtc);
return ret;
}
/******************************************************************************
- * Encoder helpers
+ * Output path helpers
*****************************************************************************/
-static bool
-nv50_encoder_mode_fixup(struct drm_encoder *encoder,
- const struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
+static int
+nv50_outp_atomic_check_view(struct drm_encoder *encoder,
+ struct drm_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state,
+ struct drm_display_mode *native_mode)
{
- struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
- struct nouveau_connector *nv_connector;
+ struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode;
+ struct drm_display_mode *mode = &crtc_state->mode;
+ struct drm_connector *connector = conn_state->connector;
+ struct nouveau_conn_atom *asyc = nouveau_conn_atom(conn_state);
+ struct nouveau_drm *drm = nouveau_drm(encoder->dev);
+
+ NV_ATOMIC(drm, "%s atomic_check\n", encoder->name);
+ asyc->scaler.full = false;
+ if (!native_mode)
+ return 0;
- nv_connector = nouveau_encoder_connector_get(nv_encoder);
- if (nv_connector && nv_connector->native_mode) {
- nv_connector->scaling_full = false;
- if (nv_connector->scaling_mode == DRM_MODE_SCALE_NONE) {
- switch (nv_connector->type) {
- case DCB_CONNECTOR_LVDS:
- case DCB_CONNECTOR_LVDS_SPWG:
- case DCB_CONNECTOR_eDP:
- /* force use of scaler for non-edid modes */
- if (adjusted_mode->type & DRM_MODE_TYPE_DRIVER)
- return true;
- nv_connector->scaling_full = true;
+ if (asyc->scaler.mode == DRM_MODE_SCALE_NONE) {
+ switch (connector->connector_type) {
+ case DRM_MODE_CONNECTOR_LVDS:
+ case DRM_MODE_CONNECTOR_eDP:
+ /* Force use of scaler for non-EDID modes. */
+ if (adjusted_mode->type & DRM_MODE_TYPE_DRIVER)
break;
- default:
- return true;
- }
+ mode = native_mode;
+ asyc->scaler.full = true;
+ break;
+ default:
+ break;
}
+ } else {
+ mode = native_mode;
+ }
- drm_mode_copy(adjusted_mode, nv_connector->native_mode);
+ if (!drm_mode_equal(adjusted_mode, mode)) {
+ drm_mode_copy(adjusted_mode, mode);
+ crtc_state->mode_changed = true;
}
- return true;
+ return 0;
+}
+
+static int
+nv50_outp_atomic_check(struct drm_encoder *encoder,
+ struct drm_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state)
+{
+ struct nouveau_connector *nv_connector =
+ nouveau_connector(conn_state->connector);
+ return nv50_outp_atomic_check_view(encoder, crtc_state, conn_state,
+ nv_connector->native_mode);
}
/******************************************************************************
@@ -1574,21 +2541,39 @@ nv50_dac_dpms(struct drm_encoder *encoder, int mode)
}
static void
-nv50_dac_commit(struct drm_encoder *encoder)
+nv50_dac_disable(struct drm_encoder *encoder)
{
+ struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+ struct nv50_mast *mast = nv50_mast(encoder->dev);
+ const int or = nv_encoder->or;
+ u32 *push;
+
+ if (nv_encoder->crtc) {
+ push = evo_wait(mast, 4);
+ if (push) {
+ if (nv50_vers(mast) < GF110_DISP_CORE_CHANNEL_DMA) {
+ evo_mthd(push, 0x0400 + (or * 0x080), 1);
+ evo_data(push, 0x00000000);
+ } else {
+ evo_mthd(push, 0x0180 + (or * 0x020), 1);
+ evo_data(push, 0x00000000);
+ }
+ evo_kick(push, mast);
+ }
+ }
+
+ nv_encoder->crtc = NULL;
}
static void
-nv50_dac_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
+nv50_dac_enable(struct drm_encoder *encoder)
{
struct nv50_mast *mast = nv50_mast(encoder->dev);
struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc);
+ struct drm_display_mode *mode = &nv_crtc->base.state->adjusted_mode;
u32 *push;
- nv50_dac_dpms(encoder, DRM_MODE_DPMS_ON);
-
push = evo_wait(mast, 8);
if (push) {
if (nv50_vers(mast) < GF110_DISP_CORE_CHANNEL_DMA) {
@@ -1627,33 +2612,6 @@ nv50_dac_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
nv_encoder->crtc = encoder->crtc;
}
-static void
-nv50_dac_disconnect(struct drm_encoder *encoder)
-{
- struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
- struct nv50_mast *mast = nv50_mast(encoder->dev);
- const int or = nv_encoder->or;
- u32 *push;
-
- if (nv_encoder->crtc) {
- nv50_crtc_prepare(nv_encoder->crtc);
-
- push = evo_wait(mast, 4);
- if (push) {
- if (nv50_vers(mast) < GF110_DISP_CORE_CHANNEL_DMA) {
- evo_mthd(push, 0x0400 + (or * 0x080), 1);
- evo_data(push, 0x00000000);
- } else {
- evo_mthd(push, 0x0180 + (or * 0x020), 1);
- evo_data(push, 0x00000000);
- }
- evo_kick(push, mast);
- }
- }
-
- nv_encoder->crtc = NULL;
-}
-
static enum drm_connector_status
nv50_dac_detect(struct drm_encoder *encoder, struct drm_connector *connector)
{
@@ -1681,6 +2639,15 @@ nv50_dac_detect(struct drm_encoder *encoder, struct drm_connector *connector)
return connector_status_connected;
}
+static const struct drm_encoder_helper_funcs
+nv50_dac_help = {
+ .dpms = nv50_dac_dpms,
+ .atomic_check = nv50_outp_atomic_check,
+ .enable = nv50_dac_enable,
+ .disable = nv50_dac_disable,
+ .detect = nv50_dac_detect
+};
+
static void
nv50_dac_destroy(struct drm_encoder *encoder)
{
@@ -1688,18 +2655,8 @@ nv50_dac_destroy(struct drm_encoder *encoder)
kfree(encoder);
}
-static const struct drm_encoder_helper_funcs nv50_dac_hfunc = {
- .dpms = nv50_dac_dpms,
- .mode_fixup = nv50_encoder_mode_fixup,
- .prepare = nv50_dac_disconnect,
- .commit = nv50_dac_commit,
- .mode_set = nv50_dac_mode_set,
- .disable = nv50_dac_disconnect,
- .get_crtc = nv50_display_crtc_get,
- .detect = nv50_dac_detect
-};
-
-static const struct drm_encoder_funcs nv50_dac_func = {
+static const struct drm_encoder_funcs
+nv50_dac_func = {
.destroy = nv50_dac_destroy,
};
@@ -1726,8 +2683,9 @@ nv50_dac_create(struct drm_connector *connector, struct dcb_output *dcbe)
encoder = to_drm_encoder(nv_encoder);
encoder->possible_crtcs = dcbe->heads;
encoder->possible_clones = 0;
- drm_encoder_init(connector->dev, encoder, &nv50_dac_func, type, NULL);
- drm_encoder_helper_add(encoder, &nv50_dac_hfunc);
+ drm_encoder_init(connector->dev, encoder, &nv50_dac_func, type,
+ "dac-%04x-%04x", dcbe->hasht, dcbe->hashm);
+ drm_encoder_helper_add(encoder, &nv50_dac_help);
drm_mode_connector_attach_encoder(connector, encoder);
return 0;
@@ -1737,7 +2695,26 @@ nv50_dac_create(struct drm_connector *connector, struct dcb_output *dcbe)
* Audio
*****************************************************************************/
static void
-nv50_audio_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode)
+nv50_audio_disable(struct drm_encoder *encoder, struct nouveau_crtc *nv_crtc)
+{
+ struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+ struct nv50_disp *disp = nv50_disp(encoder->dev);
+ struct {
+ struct nv50_disp_mthd_v1 base;
+ struct nv50_disp_sor_hda_eld_v0 eld;
+ } args = {
+ .base.version = 1,
+ .base.method = NV50_DISP_MTHD_V1_SOR_HDA_ELD,
+ .base.hasht = nv_encoder->dcb->hasht,
+ .base.hashm = (0xf0ff & nv_encoder->dcb->hashm) |
+ (0x0100 << nv_crtc->index),
+ };
+
+ nvif_mthd(disp->disp, 0, &args, sizeof(args));
+}
+
+static void
+nv50_audio_enable(struct drm_encoder *encoder, struct drm_display_mode *mode)
{
struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc);
@@ -1768,30 +2745,30 @@ nv50_audio_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode)
sizeof(args.base) + drm_eld_size(args.data));
}
+/******************************************************************************
+ * HDMI
+ *****************************************************************************/
static void
-nv50_audio_disconnect(struct drm_encoder *encoder, struct nouveau_crtc *nv_crtc)
+nv50_hdmi_disable(struct drm_encoder *encoder, struct nouveau_crtc *nv_crtc)
{
struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
struct nv50_disp *disp = nv50_disp(encoder->dev);
struct {
struct nv50_disp_mthd_v1 base;
- struct nv50_disp_sor_hda_eld_v0 eld;
+ struct nv50_disp_sor_hdmi_pwr_v0 pwr;
} args = {
.base.version = 1,
- .base.method = NV50_DISP_MTHD_V1_SOR_HDA_ELD,
- .base.hasht = nv_encoder->dcb->hasht,
- .base.hashm = (0xf0ff & nv_encoder->dcb->hashm) |
- (0x0100 << nv_crtc->index),
+ .base.method = NV50_DISP_MTHD_V1_SOR_HDMI_PWR,
+ .base.hasht = nv_encoder->dcb->hasht,
+ .base.hashm = (0xf0ff & nv_encoder->dcb->hashm) |
+ (0x0100 << nv_crtc->index),
};
nvif_mthd(disp->disp, 0, &args, sizeof(args));
}
-/******************************************************************************
- * HDMI
- *****************************************************************************/
static void
-nv50_hdmi_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode)
+nv50_hdmi_enable(struct drm_encoder *encoder, struct drm_display_mode *mode)
{
struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc);
@@ -1821,26 +2798,635 @@ nv50_hdmi_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode)
args.pwr.max_ac_packet = max_ac_packet / 32;
nvif_mthd(disp->disp, 0, &args, sizeof(args));
- nv50_audio_mode_set(encoder, mode);
+ nv50_audio_enable(encoder, mode);
+}
+
+/******************************************************************************
+ * MST
+ *****************************************************************************/
+#define nv50_mstm(p) container_of((p), struct nv50_mstm, mgr)
+#define nv50_mstc(p) container_of((p), struct nv50_mstc, connector)
+#define nv50_msto(p) container_of((p), struct nv50_msto, encoder)
+
+struct nv50_mstm {
+ struct nouveau_encoder *outp;
+
+ struct drm_dp_mst_topology_mgr mgr;
+ struct nv50_msto *msto[4];
+
+ bool modified;
+};
+
+struct nv50_mstc {
+ struct nv50_mstm *mstm;
+ struct drm_dp_mst_port *port;
+ struct drm_connector connector;
+
+ struct drm_display_mode *native;
+ struct edid *edid;
+
+ int pbn;
+};
+
+struct nv50_msto {
+ struct drm_encoder encoder;
+
+ struct nv50_head *head;
+ struct nv50_mstc *mstc;
+ bool disabled;
+};
+
+static struct drm_dp_payload *
+nv50_msto_payload(struct nv50_msto *msto)
+{
+ struct nouveau_drm *drm = nouveau_drm(msto->encoder.dev);
+ struct nv50_mstc *mstc = msto->mstc;
+ struct nv50_mstm *mstm = mstc->mstm;
+ int vcpi = mstc->port->vcpi.vcpi, i;
+
+ NV_ATOMIC(drm, "%s: vcpi %d\n", msto->encoder.name, vcpi);
+ for (i = 0; i < mstm->mgr.max_payloads; i++) {
+ struct drm_dp_payload *payload = &mstm->mgr.payloads[i];
+ NV_ATOMIC(drm, "%s: %d: vcpi %d start 0x%02x slots 0x%02x\n",
+ mstm->outp->base.base.name, i, payload->vcpi,
+ payload->start_slot, payload->num_slots);
+ }
+
+ for (i = 0; i < mstm->mgr.max_payloads; i++) {
+ struct drm_dp_payload *payload = &mstm->mgr.payloads[i];
+ if (payload->vcpi == vcpi)
+ return payload;
+ }
+
+ return NULL;
}
static void
-nv50_hdmi_disconnect(struct drm_encoder *encoder, struct nouveau_crtc *nv_crtc)
+nv50_msto_cleanup(struct nv50_msto *msto)
{
- struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
- struct nv50_disp *disp = nv50_disp(encoder->dev);
+ struct nouveau_drm *drm = nouveau_drm(msto->encoder.dev);
+ struct nv50_mstc *mstc = msto->mstc;
+ struct nv50_mstm *mstm = mstc->mstm;
+
+ NV_ATOMIC(drm, "%s: msto cleanup\n", msto->encoder.name);
+ if (mstc->port && mstc->port->vcpi.vcpi > 0 && !nv50_msto_payload(msto))
+ drm_dp_mst_deallocate_vcpi(&mstm->mgr, mstc->port);
+ if (msto->disabled) {
+ msto->mstc = NULL;
+ msto->head = NULL;
+ msto->disabled = false;
+ }
+}
+
+static void
+nv50_msto_prepare(struct nv50_msto *msto)
+{
+ struct nouveau_drm *drm = nouveau_drm(msto->encoder.dev);
+ struct nv50_mstc *mstc = msto->mstc;
+ struct nv50_mstm *mstm = mstc->mstm;
struct {
struct nv50_disp_mthd_v1 base;
- struct nv50_disp_sor_hdmi_pwr_v0 pwr;
+ struct nv50_disp_sor_dp_mst_vcpi_v0 vcpi;
} args = {
.base.version = 1,
- .base.method = NV50_DISP_MTHD_V1_SOR_HDMI_PWR,
- .base.hasht = nv_encoder->dcb->hasht,
- .base.hashm = (0xf0ff & nv_encoder->dcb->hashm) |
- (0x0100 << nv_crtc->index),
+ .base.method = NV50_DISP_MTHD_V1_SOR_DP_MST_VCPI,
+ .base.hasht = mstm->outp->dcb->hasht,
+ .base.hashm = (0xf0ff & mstm->outp->dcb->hashm) |
+ (0x0100 << msto->head->base.index),
};
- nvif_mthd(disp->disp, 0, &args, sizeof(args));
+ NV_ATOMIC(drm, "%s: msto prepare\n", msto->encoder.name);
+ if (mstc->port && mstc->port->vcpi.vcpi > 0) {
+ struct drm_dp_payload *payload = nv50_msto_payload(msto);
+ if (payload) {
+ args.vcpi.start_slot = payload->start_slot;
+ args.vcpi.num_slots = payload->num_slots;
+ args.vcpi.pbn = mstc->port->vcpi.pbn;
+ args.vcpi.aligned_pbn = mstc->port->vcpi.aligned_pbn;
+ }
+ }
+
+ NV_ATOMIC(drm, "%s: %s: %02x %02x %04x %04x\n",
+ msto->encoder.name, msto->head->base.base.name,
+ args.vcpi.start_slot, args.vcpi.num_slots,
+ args.vcpi.pbn, args.vcpi.aligned_pbn);
+ nvif_mthd(&drm->display->disp, 0, &args, sizeof(args));
+}
+
+static int
+nv50_msto_atomic_check(struct drm_encoder *encoder,
+ struct drm_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state)
+{
+ struct nv50_mstc *mstc = nv50_mstc(conn_state->connector);
+ struct nv50_mstm *mstm = mstc->mstm;
+ int bpp = conn_state->connector->display_info.bpc * 3;
+ int slots;
+
+ mstc->pbn = drm_dp_calc_pbn_mode(crtc_state->adjusted_mode.clock, bpp);
+
+ slots = drm_dp_find_vcpi_slots(&mstm->mgr, mstc->pbn);
+ if (slots < 0)
+ return slots;
+
+ return nv50_outp_atomic_check_view(encoder, crtc_state, conn_state,
+ mstc->native);
+}
+
+static void
+nv50_msto_enable(struct drm_encoder *encoder)
+{
+ struct nv50_head *head = nv50_head(encoder->crtc);
+ struct nv50_msto *msto = nv50_msto(encoder);
+ struct nv50_mstc *mstc = NULL;
+ struct nv50_mstm *mstm = NULL;
+ struct drm_connector *connector;
+ u8 proto, depth;
+ int slots;
+ bool r;
+
+ drm_for_each_connector(connector, encoder->dev) {
+ if (connector->state->best_encoder == &msto->encoder) {
+ mstc = nv50_mstc(connector);
+ mstm = mstc->mstm;
+ break;
+ }
+ }
+
+ if (WARN_ON(!mstc))
+ return;
+
+ r = drm_dp_mst_allocate_vcpi(&mstm->mgr, mstc->port, mstc->pbn, &slots);
+ WARN_ON(!r);
+
+ if (mstm->outp->dcb->sorconf.link & 1)
+ proto = 0x8;
+ else
+ proto = 0x9;
+
+ switch (mstc->connector.display_info.bpc) {
+ case 6: depth = 0x2; break;
+ case 8: depth = 0x5; break;
+ case 10:
+ default: depth = 0x6; break;
+ }
+
+ mstm->outp->update(mstm->outp, head->base.index,
+ &head->base.base.state->adjusted_mode, proto, depth);
+
+ msto->head = head;
+ msto->mstc = mstc;
+ mstm->modified = true;
+}
+
+static void
+nv50_msto_disable(struct drm_encoder *encoder)
+{
+ struct nv50_msto *msto = nv50_msto(encoder);
+ struct nv50_mstc *mstc = msto->mstc;
+ struct nv50_mstm *mstm = mstc->mstm;
+
+ if (mstc->port)
+ drm_dp_mst_reset_vcpi_slots(&mstm->mgr, mstc->port);
+
+ mstm->outp->update(mstm->outp, msto->head->base.index, NULL, 0, 0);
+ mstm->modified = true;
+ msto->disabled = true;
+}
+
+static const struct drm_encoder_helper_funcs
+nv50_msto_help = {
+ .disable = nv50_msto_disable,
+ .enable = nv50_msto_enable,
+ .atomic_check = nv50_msto_atomic_check,
+};
+
+static void
+nv50_msto_destroy(struct drm_encoder *encoder)
+{
+ struct nv50_msto *msto = nv50_msto(encoder);
+ drm_encoder_cleanup(&msto->encoder);
+ kfree(msto);
+}
+
+static const struct drm_encoder_funcs
+nv50_msto = {
+ .destroy = nv50_msto_destroy,
+};
+
+static int
+nv50_msto_new(struct drm_device *dev, u32 heads, const char *name, int id,
+ struct nv50_msto **pmsto)
+{
+ struct nv50_msto *msto;
+ int ret;
+
+ if (!(msto = *pmsto = kzalloc(sizeof(*msto), GFP_KERNEL)))
+ return -ENOMEM;
+
+ ret = drm_encoder_init(dev, &msto->encoder, &nv50_msto,
+ DRM_MODE_ENCODER_DPMST, "%s-mst-%d", name, id);
+ if (ret) {
+ kfree(*pmsto);
+ *pmsto = NULL;
+ return ret;
+ }
+
+ drm_encoder_helper_add(&msto->encoder, &nv50_msto_help);
+ msto->encoder.possible_crtcs = heads;
+ return 0;
+}
+
+static struct drm_encoder *
+nv50_mstc_atomic_best_encoder(struct drm_connector *connector,
+ struct drm_connector_state *connector_state)
+{
+ struct nv50_head *head = nv50_head(connector_state->crtc);
+ struct nv50_mstc *mstc = nv50_mstc(connector);
+ if (mstc->port) {
+ struct nv50_mstm *mstm = mstc->mstm;
+ return &mstm->msto[head->base.index]->encoder;
+ }
+ return NULL;
+}
+
+static struct drm_encoder *
+nv50_mstc_best_encoder(struct drm_connector *connector)
+{
+ struct nv50_mstc *mstc = nv50_mstc(connector);
+ if (mstc->port) {
+ struct nv50_mstm *mstm = mstc->mstm;
+ return &mstm->msto[0]->encoder;
+ }
+ return NULL;
+}
+
+static enum drm_mode_status
+nv50_mstc_mode_valid(struct drm_connector *connector,
+ struct drm_display_mode *mode)
+{
+ return MODE_OK;
+}
+
+static int
+nv50_mstc_get_modes(struct drm_connector *connector)
+{
+ struct nv50_mstc *mstc = nv50_mstc(connector);
+ int ret = 0;
+
+ mstc->edid = drm_dp_mst_get_edid(&mstc->connector, mstc->port->mgr, mstc->port);
+ drm_mode_connector_update_edid_property(&mstc->connector, mstc->edid);
+ if (mstc->edid) {
+ ret = drm_add_edid_modes(&mstc->connector, mstc->edid);
+ drm_edid_to_eld(&mstc->connector, mstc->edid);
+ }
+
+ if (!mstc->connector.display_info.bpc)
+ mstc->connector.display_info.bpc = 8;
+
+ if (mstc->native)
+ drm_mode_destroy(mstc->connector.dev, mstc->native);
+ mstc->native = nouveau_conn_native_mode(&mstc->connector);
+ return ret;
+}
+
+static const struct drm_connector_helper_funcs
+nv50_mstc_help = {
+ .get_modes = nv50_mstc_get_modes,
+ .mode_valid = nv50_mstc_mode_valid,
+ .best_encoder = nv50_mstc_best_encoder,
+ .atomic_best_encoder = nv50_mstc_atomic_best_encoder,
+};
+
+static enum drm_connector_status
+nv50_mstc_detect(struct drm_connector *connector, bool force)
+{
+ struct nv50_mstc *mstc = nv50_mstc(connector);
+ if (!mstc->port)
+ return connector_status_disconnected;
+ return drm_dp_mst_detect_port(connector, mstc->port->mgr, mstc->port);
+}
+
+static void
+nv50_mstc_destroy(struct drm_connector *connector)
+{
+ struct nv50_mstc *mstc = nv50_mstc(connector);
+ drm_connector_cleanup(&mstc->connector);
+ kfree(mstc);
+}
+
+static const struct drm_connector_funcs
+nv50_mstc = {
+ .dpms = drm_atomic_helper_connector_dpms,
+ .reset = nouveau_conn_reset,
+ .detect = nv50_mstc_detect,
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .set_property = drm_atomic_helper_connector_set_property,
+ .destroy = nv50_mstc_destroy,
+ .atomic_duplicate_state = nouveau_conn_atomic_duplicate_state,
+ .atomic_destroy_state = nouveau_conn_atomic_destroy_state,
+ .atomic_set_property = nouveau_conn_atomic_set_property,
+ .atomic_get_property = nouveau_conn_atomic_get_property,
+};
+
+static int
+nv50_mstc_new(struct nv50_mstm *mstm, struct drm_dp_mst_port *port,
+ const char *path, struct nv50_mstc **pmstc)
+{
+ struct drm_device *dev = mstm->outp->base.base.dev;
+ struct nv50_mstc *mstc;
+ int ret, i;
+
+ if (!(mstc = *pmstc = kzalloc(sizeof(*mstc), GFP_KERNEL)))
+ return -ENOMEM;
+ mstc->mstm = mstm;
+ mstc->port = port;
+
+ ret = drm_connector_init(dev, &mstc->connector, &nv50_mstc,
+ DRM_MODE_CONNECTOR_DisplayPort);
+ if (ret) {
+ kfree(*pmstc);
+ *pmstc = NULL;
+ return ret;
+ }
+
+ drm_connector_helper_add(&mstc->connector, &nv50_mstc_help);
+
+ mstc->connector.funcs->reset(&mstc->connector);
+ nouveau_conn_attach_properties(&mstc->connector);
+
+ for (i = 0; i < ARRAY_SIZE(mstm->msto) && mstm->msto; i++)
+ drm_mode_connector_attach_encoder(&mstc->connector, &mstm->msto[i]->encoder);
+
+ drm_object_attach_property(&mstc->connector.base, dev->mode_config.path_property, 0);
+ drm_object_attach_property(&mstc->connector.base, dev->mode_config.tile_property, 0);
+ drm_mode_connector_set_path_property(&mstc->connector, path);
+ return 0;
+}
+
+static void
+nv50_mstm_cleanup(struct nv50_mstm *mstm)
+{
+ struct nouveau_drm *drm = nouveau_drm(mstm->outp->base.base.dev);
+ struct drm_encoder *encoder;
+ int ret;
+
+ NV_ATOMIC(drm, "%s: mstm cleanup\n", mstm->outp->base.base.name);
+ ret = drm_dp_check_act_status(&mstm->mgr);
+
+ ret = drm_dp_update_payload_part2(&mstm->mgr);
+
+ drm_for_each_encoder(encoder, mstm->outp->base.base.dev) {
+ if (encoder->encoder_type == DRM_MODE_ENCODER_DPMST) {
+ struct nv50_msto *msto = nv50_msto(encoder);
+ struct nv50_mstc *mstc = msto->mstc;
+ if (mstc && mstc->mstm == mstm)
+ nv50_msto_cleanup(msto);
+ }
+ }
+
+ mstm->modified = false;
+}
+
+static void
+nv50_mstm_prepare(struct nv50_mstm *mstm)
+{
+ struct nouveau_drm *drm = nouveau_drm(mstm->outp->base.base.dev);
+ struct drm_encoder *encoder;
+ int ret;
+
+ NV_ATOMIC(drm, "%s: mstm prepare\n", mstm->outp->base.base.name);
+ ret = drm_dp_update_payload_part1(&mstm->mgr);
+
+ drm_for_each_encoder(encoder, mstm->outp->base.base.dev) {
+ if (encoder->encoder_type == DRM_MODE_ENCODER_DPMST) {
+ struct nv50_msto *msto = nv50_msto(encoder);
+ struct nv50_mstc *mstc = msto->mstc;
+ if (mstc && mstc->mstm == mstm)
+ nv50_msto_prepare(msto);
+ }
+ }
+}
+
+static void
+nv50_mstm_hotplug(struct drm_dp_mst_topology_mgr *mgr)
+{
+ struct nv50_mstm *mstm = nv50_mstm(mgr);
+ drm_kms_helper_hotplug_event(mstm->outp->base.base.dev);
+}
+
+static void
+nv50_mstm_destroy_connector(struct drm_dp_mst_topology_mgr *mgr,
+ struct drm_connector *connector)
+{
+ struct nouveau_drm *drm = nouveau_drm(connector->dev);
+ struct nv50_mstc *mstc = nv50_mstc(connector);
+
+ drm_connector_unregister(&mstc->connector);
+
+ drm_modeset_lock_all(drm->dev);
+ drm_fb_helper_remove_one_connector(&drm->fbcon->helper, &mstc->connector);
+ mstc->port = NULL;
+ drm_modeset_unlock_all(drm->dev);
+
+ drm_connector_unreference(&mstc->connector);
+}
+
+static void
+nv50_mstm_register_connector(struct drm_connector *connector)
+{
+ struct nouveau_drm *drm = nouveau_drm(connector->dev);
+
+ drm_modeset_lock_all(drm->dev);
+ drm_fb_helper_add_one_connector(&drm->fbcon->helper, connector);
+ drm_modeset_unlock_all(drm->dev);
+
+ drm_connector_register(connector);
+}
+
+static struct drm_connector *
+nv50_mstm_add_connector(struct drm_dp_mst_topology_mgr *mgr,
+ struct drm_dp_mst_port *port, const char *path)
+{
+ struct nv50_mstm *mstm = nv50_mstm(mgr);
+ struct nv50_mstc *mstc;
+ int ret;
+
+ ret = nv50_mstc_new(mstm, port, path, &mstc);
+ if (ret) {
+ if (mstc)
+ mstc->connector.funcs->destroy(&mstc->connector);
+ return NULL;
+ }
+
+ return &mstc->connector;
+}
+
+static const struct drm_dp_mst_topology_cbs
+nv50_mstm = {
+ .add_connector = nv50_mstm_add_connector,
+ .register_connector = nv50_mstm_register_connector,
+ .destroy_connector = nv50_mstm_destroy_connector,
+ .hotplug = nv50_mstm_hotplug,
+};
+
+void
+nv50_mstm_service(struct nv50_mstm *mstm)
+{
+ struct drm_dp_aux *aux = mstm->mgr.aux;
+ bool handled = true;
+ int ret;
+ u8 esi[8] = {};
+
+ while (handled) {
+ ret = drm_dp_dpcd_read(aux, DP_SINK_COUNT_ESI, esi, 8);
+ if (ret != 8) {
+ drm_dp_mst_topology_mgr_set_mst(&mstm->mgr, false);
+ return;
+ }
+
+ drm_dp_mst_hpd_irq(&mstm->mgr, esi, &handled);
+ if (!handled)
+ break;
+
+ drm_dp_dpcd_write(aux, DP_SINK_COUNT_ESI + 1, &esi[1], 3);
+ }
+}
+
+void
+nv50_mstm_remove(struct nv50_mstm *mstm)
+{
+ if (mstm)
+ drm_dp_mst_topology_mgr_set_mst(&mstm->mgr, false);
+}
+
+static int
+nv50_mstm_enable(struct nv50_mstm *mstm, u8 dpcd, int state)
+{
+ struct nouveau_encoder *outp = mstm->outp;
+ struct {
+ struct nv50_disp_mthd_v1 base;
+ struct nv50_disp_sor_dp_mst_link_v0 mst;
+ } args = {
+ .base.version = 1,
+ .base.method = NV50_DISP_MTHD_V1_SOR_DP_MST_LINK,
+ .base.hasht = outp->dcb->hasht,
+ .base.hashm = outp->dcb->hashm,
+ .mst.state = state,
+ };
+ struct nouveau_drm *drm = nouveau_drm(outp->base.base.dev);
+ struct nvif_object *disp = &drm->display->disp;
+ int ret;
+
+ if (dpcd >= 0x12) {
+ ret = drm_dp_dpcd_readb(mstm->mgr.aux, DP_MSTM_CTRL, &dpcd);
+ if (ret < 0)
+ return ret;
+
+ dpcd &= ~DP_MST_EN;
+ if (state)
+ dpcd |= DP_MST_EN;
+
+ ret = drm_dp_dpcd_writeb(mstm->mgr.aux, DP_MSTM_CTRL, dpcd);
+ if (ret < 0)
+ return ret;
+ }
+
+ return nvif_mthd(disp, 0, &args, sizeof(args));
+}
+
+int
+nv50_mstm_detect(struct nv50_mstm *mstm, u8 dpcd[8], int allow)
+{
+ int ret, state = 0;
+
+ if (!mstm)
+ return 0;
+
+ if (dpcd[0] >= 0x12) {
+ ret = drm_dp_dpcd_readb(mstm->mgr.aux, DP_MSTM_CAP, &dpcd[1]);
+ if (ret < 0)
+ return ret;
+
+ if (!(dpcd[1] & DP_MST_CAP))
+ dpcd[0] = 0x11;
+ else
+ state = allow;
+ }
+
+ ret = nv50_mstm_enable(mstm, dpcd[0], state);
+ if (ret)
+ return ret;
+
+ ret = drm_dp_mst_topology_mgr_set_mst(&mstm->mgr, state);
+ if (ret)
+ return nv50_mstm_enable(mstm, dpcd[0], 0);
+
+ return mstm->mgr.mst_state;
+}
+
+static void
+nv50_mstm_fini(struct nv50_mstm *mstm)
+{
+ if (mstm && mstm->mgr.mst_state)
+ drm_dp_mst_topology_mgr_suspend(&mstm->mgr);
+}
+
+static void
+nv50_mstm_init(struct nv50_mstm *mstm)
+{
+ if (mstm && mstm->mgr.mst_state)
+ drm_dp_mst_topology_mgr_resume(&mstm->mgr);
+}
+
+static void
+nv50_mstm_del(struct nv50_mstm **pmstm)
+{
+ struct nv50_mstm *mstm = *pmstm;
+ if (mstm) {
+ kfree(*pmstm);
+ *pmstm = NULL;
+ }
+}
+
+static int
+nv50_mstm_new(struct nouveau_encoder *outp, struct drm_dp_aux *aux, int aux_max,
+ int conn_base_id, struct nv50_mstm **pmstm)
+{
+ const int max_payloads = hweight8(outp->dcb->heads);
+ struct drm_device *dev = outp->base.base.dev;
+ struct nv50_mstm *mstm;
+ int ret, i;
+ u8 dpcd;
+
+ /* This is a workaround for some monitors not functioning
+ * correctly in MST mode on initial module load. I think
+ * some bad interaction with the VBIOS may be responsible.
+ *
+ * A good ol' off and on again seems to work here ;)
+ */
+ ret = drm_dp_dpcd_readb(aux, DP_DPCD_REV, &dpcd);
+ if (ret >= 0 && dpcd >= 0x12)
+ drm_dp_dpcd_writeb(aux, DP_MSTM_CTRL, 0);
+
+ if (!(mstm = *pmstm = kzalloc(sizeof(*mstm), GFP_KERNEL)))
+ return -ENOMEM;
+ mstm->outp = outp;
+ mstm->mgr.cbs = &nv50_mstm;
+
+ ret = drm_dp_mst_topology_mgr_init(&mstm->mgr, dev->dev, aux, aux_max,
+ max_payloads, conn_base_id);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < max_payloads; i++) {
+ ret = nv50_msto_new(dev, outp->dcb->heads, outp->base.base.name,
+ i, &mstm->msto[i]);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
}
/******************************************************************************
@@ -1861,89 +3447,91 @@ nv50_sor_dpms(struct drm_encoder *encoder, int mode)
.base.hashm = nv_encoder->dcb->hashm,
.pwr.state = mode == DRM_MODE_DPMS_ON,
};
- struct {
- struct nv50_disp_mthd_v1 base;
- struct nv50_disp_sor_dp_pwr_v0 pwr;
- } link = {
- .base.version = 1,
- .base.method = NV50_DISP_MTHD_V1_SOR_DP_PWR,
- .base.hasht = nv_encoder->dcb->hasht,
- .base.hashm = nv_encoder->dcb->hashm,
- .pwr.state = mode == DRM_MODE_DPMS_ON,
- };
- struct drm_device *dev = encoder->dev;
- struct drm_encoder *partner;
- nv_encoder->last_dpms = mode;
-
- list_for_each_entry(partner, &dev->mode_config.encoder_list, head) {
- struct nouveau_encoder *nv_partner = nouveau_encoder(partner);
-
- if (partner->encoder_type != DRM_MODE_ENCODER_TMDS)
- continue;
+ nvif_mthd(disp->disp, 0, &args, sizeof(args));
+}
- if (nv_partner != nv_encoder &&
- nv_partner->dcb->or == nv_encoder->dcb->or) {
- if (nv_partner->last_dpms == DRM_MODE_DPMS_ON)
- return;
- break;
- }
- }
+static void
+nv50_sor_update(struct nouveau_encoder *nv_encoder, u8 head,
+ struct drm_display_mode *mode, u8 proto, u8 depth)
+{
+ struct nv50_dmac *core = &nv50_mast(nv_encoder->base.base.dev)->base;
+ u32 *push;
- if (nv_encoder->dcb->type == DCB_OUTPUT_DP) {
- args.pwr.state = 1;
- nvif_mthd(disp->disp, 0, &args, sizeof(args));
- nvif_mthd(disp->disp, 0, &link, sizeof(link));
+ if (!mode) {
+ nv_encoder->ctrl &= ~BIT(head);
+ if (!(nv_encoder->ctrl & 0x0000000f))
+ nv_encoder->ctrl = 0;
} else {
- nvif_mthd(disp->disp, 0, &args, sizeof(args));
+ nv_encoder->ctrl |= proto << 8;
+ nv_encoder->ctrl |= BIT(head);
}
-}
-static void
-nv50_sor_ctrl(struct nouveau_encoder *nv_encoder, u32 mask, u32 data)
-{
- struct nv50_mast *mast = nv50_mast(nv_encoder->base.base.dev);
- u32 temp = (nv_encoder->ctrl & ~mask) | (data & mask), *push;
- if (temp != nv_encoder->ctrl && (push = evo_wait(mast, 2))) {
- if (nv50_vers(mast) < GF110_DISP_CORE_CHANNEL_DMA) {
+ if ((push = evo_wait(core, 6))) {
+ if (core->base.user.oclass < GF110_DISP_CORE_CHANNEL_DMA) {
+ if (mode) {
+ if (mode->flags & DRM_MODE_FLAG_NHSYNC)
+ nv_encoder->ctrl |= 0x00001000;
+ if (mode->flags & DRM_MODE_FLAG_NVSYNC)
+ nv_encoder->ctrl |= 0x00002000;
+ nv_encoder->ctrl |= depth << 16;
+ }
evo_mthd(push, 0x0600 + (nv_encoder->or * 0x40), 1);
- evo_data(push, (nv_encoder->ctrl = temp));
} else {
+ if (mode) {
+ u32 magic = 0x31ec6000 | (head << 25);
+ u32 syncs = 0x00000001;
+ if (mode->flags & DRM_MODE_FLAG_NHSYNC)
+ syncs |= 0x00000008;
+ if (mode->flags & DRM_MODE_FLAG_NVSYNC)
+ syncs |= 0x00000010;
+ if (mode->flags & DRM_MODE_FLAG_INTERLACE)
+ magic |= 0x00000001;
+
+ evo_mthd(push, 0x0404 + (head * 0x300), 2);
+ evo_data(push, syncs | (depth << 6));
+ evo_data(push, magic);
+ }
evo_mthd(push, 0x0200 + (nv_encoder->or * 0x20), 1);
- evo_data(push, (nv_encoder->ctrl = temp));
}
- evo_kick(push, mast);
+ evo_data(push, nv_encoder->ctrl);
+ evo_kick(push, core);
}
}
static void
-nv50_sor_disconnect(struct drm_encoder *encoder)
+nv50_sor_disable(struct drm_encoder *encoder)
{
struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
struct nouveau_crtc *nv_crtc = nouveau_crtc(nv_encoder->crtc);
- nv_encoder->last_dpms = DRM_MODE_DPMS_OFF;
nv_encoder->crtc = NULL;
if (nv_crtc) {
- nv50_crtc_prepare(&nv_crtc->base);
- nv50_sor_ctrl(nv_encoder, 1 << nv_crtc->index, 0);
- nv50_audio_disconnect(encoder, nv_crtc);
- nv50_hdmi_disconnect(&nv_encoder->base.base, nv_crtc);
- }
-}
+ struct nvkm_i2c_aux *aux = nv_encoder->aux;
+ u8 pwr;
-static void
-nv50_sor_commit(struct drm_encoder *encoder)
-{
+ if (aux) {
+ int ret = nvkm_rdaux(aux, DP_SET_POWER, &pwr, 1);
+ if (ret == 0) {
+ pwr &= ~DP_SET_POWER_MASK;
+ pwr |= DP_SET_POWER_D3;
+ nvkm_wraux(aux, DP_SET_POWER, &pwr, 1);
+ }
+ }
+
+ nv_encoder->update(nv_encoder, nv_crtc->index, NULL, 0, 0);
+ nv50_audio_disable(encoder, nv_crtc);
+ nv50_hdmi_disable(&nv_encoder->base.base, nv_crtc);
+ }
}
static void
-nv50_sor_mode_set(struct drm_encoder *encoder, struct drm_display_mode *umode,
- struct drm_display_mode *mode)
+nv50_sor_enable(struct drm_encoder *encoder)
{
struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc);
+ struct drm_display_mode *mode = &nv_crtc->base.state->adjusted_mode;
struct {
struct nv50_disp_mthd_v1 base;
struct nv50_disp_sor_lvds_script_v0 lvds;
@@ -1954,13 +3542,10 @@ nv50_sor_mode_set(struct drm_encoder *encoder, struct drm_display_mode *umode,
.base.hashm = nv_encoder->dcb->hashm,
};
struct nv50_disp *disp = nv50_disp(encoder->dev);
- struct nv50_mast *mast = nv50_mast(encoder->dev);
struct drm_device *dev = encoder->dev;
struct nouveau_drm *drm = nouveau_drm(dev);
struct nouveau_connector *nv_connector;
struct nvbios *bios = &drm->vbios;
- u32 mask, ctrl;
- u8 owner = 1 << nv_crtc->index;
u8 proto = 0xf;
u8 depth = 0x0;
@@ -1985,7 +3570,7 @@ nv50_sor_mode_set(struct drm_encoder *encoder, struct drm_display_mode *umode,
proto = 0x2;
}
- nv50_hdmi_mode_set(&nv_encoder->base.base, mode);
+ nv50_hdmi_enable(&nv_encoder->base.base, mode);
break;
case DCB_OUTPUT_LVDS:
proto = 0x0;
@@ -2019,94 +3604,60 @@ nv50_sor_mode_set(struct drm_encoder *encoder, struct drm_display_mode *umode,
nvif_mthd(disp->disp, 0, &lvds, sizeof(lvds));
break;
case DCB_OUTPUT_DP:
- if (nv_connector->base.display_info.bpc == 6) {
- nv_encoder->dp.datarate = mode->clock * 18 / 8;
+ if (nv_connector->base.display_info.bpc == 6)
depth = 0x2;
- } else
- if (nv_connector->base.display_info.bpc == 8) {
- nv_encoder->dp.datarate = mode->clock * 24 / 8;
+ else
+ if (nv_connector->base.display_info.bpc == 8)
depth = 0x5;
- } else {
- nv_encoder->dp.datarate = mode->clock * 30 / 8;
+ else
depth = 0x6;
- }
if (nv_encoder->dcb->sorconf.link & 1)
proto = 0x8;
else
proto = 0x9;
- nv50_audio_mode_set(encoder, mode);
+
+ nv50_audio_enable(encoder, mode);
break;
default:
BUG_ON(1);
break;
}
- nv50_sor_dpms(&nv_encoder->base.base, DRM_MODE_DPMS_ON);
-
- if (nv50_vers(mast) >= GF110_DISP) {
- u32 *push = evo_wait(mast, 3);
- if (push) {
- u32 magic = 0x31ec6000 | (nv_crtc->index << 25);
- u32 syncs = 0x00000001;
-
- if (mode->flags & DRM_MODE_FLAG_NHSYNC)
- syncs |= 0x00000008;
- if (mode->flags & DRM_MODE_FLAG_NVSYNC)
- syncs |= 0x00000010;
-
- if (mode->flags & DRM_MODE_FLAG_INTERLACE)
- magic |= 0x00000001;
-
- evo_mthd(push, 0x0404 + (nv_crtc->index * 0x300), 2);
- evo_data(push, syncs | (depth << 6));
- evo_data(push, magic);
- evo_kick(push, mast);
- }
-
- ctrl = proto << 8;
- mask = 0x00000f00;
- } else {
- ctrl = (depth << 16) | (proto << 8);
- if (mode->flags & DRM_MODE_FLAG_NHSYNC)
- ctrl |= 0x00001000;
- if (mode->flags & DRM_MODE_FLAG_NVSYNC)
- ctrl |= 0x00002000;
- mask = 0x000f3f00;
- }
-
- nv50_sor_ctrl(nv_encoder, mask | owner, ctrl | owner);
+ nv_encoder->update(nv_encoder, nv_crtc->index, mode, proto, depth);
}
+static const struct drm_encoder_helper_funcs
+nv50_sor_help = {
+ .dpms = nv50_sor_dpms,
+ .atomic_check = nv50_outp_atomic_check,
+ .enable = nv50_sor_enable,
+ .disable = nv50_sor_disable,
+};
+
static void
nv50_sor_destroy(struct drm_encoder *encoder)
{
+ struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+ nv50_mstm_del(&nv_encoder->dp.mstm);
drm_encoder_cleanup(encoder);
kfree(encoder);
}
-static const struct drm_encoder_helper_funcs nv50_sor_hfunc = {
- .dpms = nv50_sor_dpms,
- .mode_fixup = nv50_encoder_mode_fixup,
- .prepare = nv50_sor_disconnect,
- .commit = nv50_sor_commit,
- .mode_set = nv50_sor_mode_set,
- .disable = nv50_sor_disconnect,
- .get_crtc = nv50_display_crtc_get,
-};
-
-static const struct drm_encoder_funcs nv50_sor_func = {
+static const struct drm_encoder_funcs
+nv50_sor_func = {
.destroy = nv50_sor_destroy,
};
static int
nv50_sor_create(struct drm_connector *connector, struct dcb_output *dcbe)
{
+ struct nouveau_connector *nv_connector = nouveau_connector(connector);
struct nouveau_drm *drm = nouveau_drm(connector->dev);
struct nvkm_i2c *i2c = nvxx_i2c(&drm->device);
struct nouveau_encoder *nv_encoder;
struct drm_encoder *encoder;
- int type;
+ int type, ret;
switch (dcbe->type) {
case DCB_OUTPUT_LVDS: type = DRM_MODE_ENCODER_LVDS; break;
@@ -2122,7 +3673,16 @@ nv50_sor_create(struct drm_connector *connector, struct dcb_output *dcbe)
return -ENOMEM;
nv_encoder->dcb = dcbe;
nv_encoder->or = ffs(dcbe->or) - 1;
- nv_encoder->last_dpms = DRM_MODE_DPMS_OFF;
+ nv_encoder->update = nv50_sor_update;
+
+ encoder = to_drm_encoder(nv_encoder);
+ encoder->possible_crtcs = dcbe->heads;
+ encoder->possible_clones = 0;
+ drm_encoder_init(connector->dev, encoder, &nv50_sor_func, type,
+ "sor-%04x-%04x", dcbe->hasht, dcbe->hashm);
+ drm_encoder_helper_add(encoder, &nv50_sor_help);
+
+ drm_mode_connector_attach_encoder(connector, encoder);
if (dcbe->type == DCB_OUTPUT_DP) {
struct nvkm_i2c_aux *aux =
@@ -2131,6 +3691,15 @@ nv50_sor_create(struct drm_connector *connector, struct dcb_output *dcbe)
nv_encoder->i2c = &aux->i2c;
nv_encoder->aux = aux;
}
+
+ /*TODO: Use DP Info Table to check for support. */
+ if (nv50_disp(encoder->dev)->disp->oclass >= GF110_DISP) {
+ ret = nv50_mstm_new(nv_encoder, &nv_connector->aux, 16,
+ nv_connector->base.base.id,
+ &nv_encoder->dp.mstm);
+ if (ret)
+ return ret;
+ }
} else {
struct nvkm_i2c_bus *bus =
nvkm_i2c_bus_find(i2c, dcbe->i2c_index);
@@ -2138,20 +3707,12 @@ nv50_sor_create(struct drm_connector *connector, struct dcb_output *dcbe)
nv_encoder->i2c = &bus->i2c;
}
- encoder = to_drm_encoder(nv_encoder);
- encoder->possible_crtcs = dcbe->heads;
- encoder->possible_clones = 0;
- drm_encoder_init(connector->dev, encoder, &nv50_sor_func, type, NULL);
- drm_encoder_helper_add(encoder, &nv50_sor_hfunc);
-
- drm_mode_connector_attach_encoder(connector, encoder);
return 0;
}
/******************************************************************************
* PIOR
*****************************************************************************/
-
static void
nv50_pior_dpms(struct drm_encoder *encoder, int mode)
{
@@ -2172,30 +3733,48 @@ nv50_pior_dpms(struct drm_encoder *encoder, int mode)
nvif_mthd(disp->disp, 0, &args, sizeof(args));
}
-static bool
-nv50_pior_mode_fixup(struct drm_encoder *encoder,
- const struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
+static int
+nv50_pior_atomic_check(struct drm_encoder *encoder,
+ struct drm_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state)
{
- if (!nv50_encoder_mode_fixup(encoder, mode, adjusted_mode))
- return false;
- adjusted_mode->clock *= 2;
- return true;
+ int ret = nv50_outp_atomic_check(encoder, crtc_state, conn_state);
+ if (ret)
+ return ret;
+ crtc_state->adjusted_mode.clock *= 2;
+ return 0;
}
static void
-nv50_pior_commit(struct drm_encoder *encoder)
+nv50_pior_disable(struct drm_encoder *encoder)
{
+ struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+ struct nv50_mast *mast = nv50_mast(encoder->dev);
+ const int or = nv_encoder->or;
+ u32 *push;
+
+ if (nv_encoder->crtc) {
+ push = evo_wait(mast, 4);
+ if (push) {
+ if (nv50_vers(mast) < GF110_DISP_CORE_CHANNEL_DMA) {
+ evo_mthd(push, 0x0700 + (or * 0x040), 1);
+ evo_data(push, 0x00000000);
+ }
+ evo_kick(push, mast);
+ }
+ }
+
+ nv_encoder->crtc = NULL;
}
static void
-nv50_pior_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
+nv50_pior_enable(struct drm_encoder *encoder)
{
struct nv50_mast *mast = nv50_mast(encoder->dev);
struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc);
struct nouveau_connector *nv_connector;
+ struct drm_display_mode *mode = &nv_crtc->base.state->adjusted_mode;
u8 owner = 1 << nv_crtc->index;
u8 proto, depth;
u32 *push;
@@ -2218,8 +3797,6 @@ nv50_pior_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
break;
}
- nv50_pior_dpms(encoder, DRM_MODE_DPMS_ON);
-
push = evo_wait(mast, 8);
if (push) {
if (nv50_vers(mast) < GF110_DISP_CORE_CHANNEL_DMA) {
@@ -2238,29 +3815,13 @@ nv50_pior_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
nv_encoder->crtc = encoder->crtc;
}
-static void
-nv50_pior_disconnect(struct drm_encoder *encoder)
-{
- struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
- struct nv50_mast *mast = nv50_mast(encoder->dev);
- const int or = nv_encoder->or;
- u32 *push;
-
- if (nv_encoder->crtc) {
- nv50_crtc_prepare(nv_encoder->crtc);
-
- push = evo_wait(mast, 4);
- if (push) {
- if (nv50_vers(mast) < GF110_DISP_CORE_CHANNEL_DMA) {
- evo_mthd(push, 0x0700 + (or * 0x040), 1);
- evo_data(push, 0x00000000);
- }
- evo_kick(push, mast);
- }
- }
-
- nv_encoder->crtc = NULL;
-}
+static const struct drm_encoder_helper_funcs
+nv50_pior_help = {
+ .dpms = nv50_pior_dpms,
+ .atomic_check = nv50_pior_atomic_check,
+ .enable = nv50_pior_enable,
+ .disable = nv50_pior_disable,
+};
static void
nv50_pior_destroy(struct drm_encoder *encoder)
@@ -2269,17 +3830,8 @@ nv50_pior_destroy(struct drm_encoder *encoder)
kfree(encoder);
}
-static const struct drm_encoder_helper_funcs nv50_pior_hfunc = {
- .dpms = nv50_pior_dpms,
- .mode_fixup = nv50_pior_mode_fixup,
- .prepare = nv50_pior_disconnect,
- .commit = nv50_pior_commit,
- .mode_set = nv50_pior_mode_set,
- .disable = nv50_pior_disconnect,
- .get_crtc = nv50_display_crtc_get,
-};
-
-static const struct drm_encoder_funcs nv50_pior_func = {
+static const struct drm_encoder_funcs
+nv50_pior_func = {
.destroy = nv50_pior_destroy,
};
@@ -2321,149 +3873,464 @@ nv50_pior_create(struct drm_connector *connector, struct dcb_output *dcbe)
encoder = to_drm_encoder(nv_encoder);
encoder->possible_crtcs = dcbe->heads;
encoder->possible_clones = 0;
- drm_encoder_init(connector->dev, encoder, &nv50_pior_func, type, NULL);
- drm_encoder_helper_add(encoder, &nv50_pior_hfunc);
+ drm_encoder_init(connector->dev, encoder, &nv50_pior_func, type,
+ "pior-%04x-%04x", dcbe->hasht, dcbe->hashm);
+ drm_encoder_helper_add(encoder, &nv50_pior_help);
drm_mode_connector_attach_encoder(connector, encoder);
return 0;
}
/******************************************************************************
- * Framebuffer
+ * Atomic
*****************************************************************************/
static void
-nv50_fbdma_fini(struct nv50_fbdma *fbdma)
+nv50_disp_atomic_commit_core(struct nouveau_drm *drm, u32 interlock)
{
- int i;
- for (i = 0; i < ARRAY_SIZE(fbdma->base); i++)
- nvif_object_fini(&fbdma->base[i]);
- nvif_object_fini(&fbdma->core);
- list_del(&fbdma->head);
- kfree(fbdma);
+ struct nv50_disp *disp = nv50_disp(drm->dev);
+ struct nv50_dmac *core = &disp->mast.base;
+ struct nv50_mstm *mstm;
+ struct drm_encoder *encoder;
+ u32 *push;
+
+ NV_ATOMIC(drm, "commit core %08x\n", interlock);
+
+ drm_for_each_encoder(encoder, drm->dev) {
+ if (encoder->encoder_type != DRM_MODE_ENCODER_DPMST) {
+ mstm = nouveau_encoder(encoder)->dp.mstm;
+ if (mstm && mstm->modified)
+ nv50_mstm_prepare(mstm);
+ }
+ }
+
+ if ((push = evo_wait(core, 5))) {
+ evo_mthd(push, 0x0084, 1);
+ evo_data(push, 0x80000000);
+ evo_mthd(push, 0x0080, 2);
+ evo_data(push, interlock);
+ evo_data(push, 0x00000000);
+ nouveau_bo_wr32(disp->sync, 0, 0x00000000);
+ evo_kick(push, core);
+ if (nvif_msec(&drm->device, 2000ULL,
+ if (nouveau_bo_rd32(disp->sync, 0))
+ break;
+ usleep_range(1, 2);
+ ) < 0)
+ NV_ERROR(drm, "EVO timeout\n");
+ }
+
+ drm_for_each_encoder(encoder, drm->dev) {
+ if (encoder->encoder_type != DRM_MODE_ENCODER_DPMST) {
+ mstm = nouveau_encoder(encoder)->dp.mstm;
+ if (mstm && mstm->modified)
+ nv50_mstm_cleanup(mstm);
+ }
+ }
}
-static int
-nv50_fbdma_init(struct drm_device *dev, u32 name, u64 offset, u64 length, u8 kind)
+static void
+nv50_disp_atomic_commit_tail(struct drm_atomic_state *state)
{
+ struct drm_device *dev = state->dev;
+ struct drm_crtc_state *crtc_state;
+ struct drm_crtc *crtc;
+ struct drm_plane_state *plane_state;
+ struct drm_plane *plane;
struct nouveau_drm *drm = nouveau_drm(dev);
struct nv50_disp *disp = nv50_disp(dev);
- struct nv50_mast *mast = nv50_mast(dev);
- struct __attribute__ ((packed)) {
- struct nv_dma_v0 base;
- union {
- struct nv50_dma_v0 nv50;
- struct gf100_dma_v0 gf100;
- struct gf119_dma_v0 gf119;
- };
- } args = {};
- struct nv50_fbdma *fbdma;
- struct drm_crtc *crtc;
- u32 size = sizeof(args.base);
- int ret;
+ struct nv50_atom *atom = nv50_atom(state);
+ struct nv50_outp_atom *outp, *outt;
+ u32 interlock_core = 0;
+ u32 interlock_chan = 0;
+ int i;
+
+ NV_ATOMIC(drm, "commit %d %d\n", atom->lock_core, atom->flush_disable);
+ drm_atomic_helper_wait_for_fences(dev, state, false);
+ drm_atomic_helper_wait_for_dependencies(state);
+ drm_atomic_helper_update_legacy_modeset_state(dev, state);
- list_for_each_entry(fbdma, &disp->fbdma, head) {
- if (fbdma->core.handle == name)
- return 0;
+ if (atom->lock_core)
+ mutex_lock(&disp->mutex);
+
+ /* Disable head(s). */
+ for_each_crtc_in_state(state, crtc, crtc_state, i) {
+ struct nv50_head_atom *asyh = nv50_head_atom(crtc->state);
+ struct nv50_head *head = nv50_head(crtc);
+
+ NV_ATOMIC(drm, "%s: clr %04x (set %04x)\n", crtc->name,
+ asyh->clr.mask, asyh->set.mask);
+
+ if (asyh->clr.mask) {
+ nv50_head_flush_clr(head, asyh, atom->flush_disable);
+ interlock_core |= 1;
+ }
}
- fbdma = kzalloc(sizeof(*fbdma), GFP_KERNEL);
- if (!fbdma)
- return -ENOMEM;
- list_add(&fbdma->head, &disp->fbdma);
+ /* Disable plane(s). */
+ for_each_plane_in_state(state, plane, plane_state, i) {
+ struct nv50_wndw_atom *asyw = nv50_wndw_atom(plane->state);
+ struct nv50_wndw *wndw = nv50_wndw(plane);
- args.base.target = NV_DMA_V0_TARGET_VRAM;
- args.base.access = NV_DMA_V0_ACCESS_RDWR;
- args.base.start = offset;
- args.base.limit = offset + length - 1;
+ NV_ATOMIC(drm, "%s: clr %02x (set %02x)\n", plane->name,
+ asyw->clr.mask, asyw->set.mask);
+ if (!asyw->clr.mask)
+ continue;
- if (drm->device.info.chipset < 0x80) {
- args.nv50.part = NV50_DMA_V0_PART_256;
- size += sizeof(args.nv50);
- } else
- if (drm->device.info.chipset < 0xc0) {
- args.nv50.part = NV50_DMA_V0_PART_256;
- args.nv50.kind = kind;
- size += sizeof(args.nv50);
- } else
- if (drm->device.info.chipset < 0xd0) {
- args.gf100.kind = kind;
- size += sizeof(args.gf100);
- } else {
- args.gf119.page = GF119_DMA_V0_PAGE_LP;
- args.gf119.kind = kind;
- size += sizeof(args.gf119);
+ interlock_chan |= nv50_wndw_flush_clr(wndw, interlock_core,
+ atom->flush_disable,
+ asyw);
+ }
+
+ /* Disable output path(s). */
+ list_for_each_entry(outp, &atom->outp, head) {
+ const struct drm_encoder_helper_funcs *help;
+ struct drm_encoder *encoder;
+
+ encoder = outp->encoder;
+ help = encoder->helper_private;
+
+ NV_ATOMIC(drm, "%s: clr %02x (set %02x)\n", encoder->name,
+ outp->clr.mask, outp->set.mask);
+
+ if (outp->clr.mask) {
+ help->disable(encoder);
+ interlock_core |= 1;
+ if (outp->flush_disable) {
+ nv50_disp_atomic_commit_core(drm, interlock_chan);
+ interlock_core = 0;
+ interlock_chan = 0;
+ }
+ }
+ }
+
+ /* Flush disable. */
+ if (interlock_core) {
+ if (atom->flush_disable) {
+ nv50_disp_atomic_commit_core(drm, interlock_chan);
+ interlock_core = 0;
+ interlock_chan = 0;
+ }
}
- list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+ /* Update output path(s). */
+ list_for_each_entry_safe(outp, outt, &atom->outp, head) {
+ const struct drm_encoder_helper_funcs *help;
+ struct drm_encoder *encoder;
+
+ encoder = outp->encoder;
+ help = encoder->helper_private;
+
+ NV_ATOMIC(drm, "%s: set %02x (clr %02x)\n", encoder->name,
+ outp->set.mask, outp->clr.mask);
+
+ if (outp->set.mask) {
+ help->enable(encoder);
+ interlock_core = 1;
+ }
+
+ list_del(&outp->head);
+ kfree(outp);
+ }
+
+ /* Update head(s). */
+ for_each_crtc_in_state(state, crtc, crtc_state, i) {
+ struct nv50_head_atom *asyh = nv50_head_atom(crtc->state);
struct nv50_head *head = nv50_head(crtc);
- int ret = nvif_object_init(&head->sync.base.base.user, name,
- NV_DMA_IN_MEMORY, &args, size,
- &fbdma->base[head->base.index]);
- if (ret) {
- nv50_fbdma_fini(fbdma);
- return ret;
+
+ NV_ATOMIC(drm, "%s: set %04x (clr %04x)\n", crtc->name,
+ asyh->set.mask, asyh->clr.mask);
+
+ if (asyh->set.mask) {
+ nv50_head_flush_set(head, asyh);
+ interlock_core = 1;
}
}
- ret = nvif_object_init(&mast->base.base.user, name, NV_DMA_IN_MEMORY,
- &args, size, &fbdma->core);
- if (ret) {
- nv50_fbdma_fini(fbdma);
+ /* Update plane(s). */
+ for_each_plane_in_state(state, plane, plane_state, i) {
+ struct nv50_wndw_atom *asyw = nv50_wndw_atom(plane->state);
+ struct nv50_wndw *wndw = nv50_wndw(plane);
+
+ NV_ATOMIC(drm, "%s: set %02x (clr %02x)\n", plane->name,
+ asyw->set.mask, asyw->clr.mask);
+ if ( !asyw->set.mask &&
+ (!asyw->clr.mask || atom->flush_disable))
+ continue;
+
+ interlock_chan |= nv50_wndw_flush_set(wndw, interlock_core, asyw);
+ }
+
+ /* Flush update. */
+ if (interlock_core) {
+ if (!interlock_chan && atom->state.legacy_cursor_update) {
+ u32 *push = evo_wait(&disp->mast, 2);
+ if (push) {
+ evo_mthd(push, 0x0080, 1);
+ evo_data(push, 0x00000000);
+ evo_kick(push, &disp->mast);
+ }
+ } else {
+ nv50_disp_atomic_commit_core(drm, interlock_chan);
+ }
+ }
+
+ if (atom->lock_core)
+ mutex_unlock(&disp->mutex);
+
+ /* Wait for HW to signal completion. */
+ for_each_plane_in_state(state, plane, plane_state, i) {
+ struct nv50_wndw_atom *asyw = nv50_wndw_atom(plane->state);
+ struct nv50_wndw *wndw = nv50_wndw(plane);
+ int ret = nv50_wndw_wait_armed(wndw, asyw);
+ if (ret)
+ NV_ERROR(drm, "%s: timeout\n", plane->name);
+ }
+
+ for_each_crtc_in_state(state, crtc, crtc_state, i) {
+ if (crtc->state->event) {
+ unsigned long flags;
+ /* Get correct count/ts if racing with vblank irq */
+ drm_accurate_vblank_count(crtc);
+ spin_lock_irqsave(&crtc->dev->event_lock, flags);
+ drm_crtc_send_vblank_event(crtc, crtc->state->event);
+ spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
+ crtc->state->event = NULL;
+ }
+ }
+
+ drm_atomic_helper_commit_hw_done(state);
+ drm_atomic_helper_cleanup_planes(dev, state);
+ drm_atomic_helper_commit_cleanup_done(state);
+ drm_atomic_state_put(state);
+}
+
+static void
+nv50_disp_atomic_commit_work(struct work_struct *work)
+{
+ struct drm_atomic_state *state =
+ container_of(work, typeof(*state), commit_work);
+ nv50_disp_atomic_commit_tail(state);
+}
+
+static int
+nv50_disp_atomic_commit(struct drm_device *dev,
+ struct drm_atomic_state *state, bool nonblock)
+{
+ struct nouveau_drm *drm = nouveau_drm(dev);
+ struct nv50_disp *disp = nv50_disp(dev);
+ struct drm_plane_state *plane_state;
+ struct drm_plane *plane;
+ struct drm_crtc *crtc;
+ bool active = false;
+ int ret, i;
+
+ ret = pm_runtime_get_sync(dev->dev);
+ if (ret < 0 && ret != -EACCES)
return ret;
+
+ ret = drm_atomic_helper_setup_commit(state, nonblock);
+ if (ret)
+ goto done;
+
+ INIT_WORK(&state->commit_work, nv50_disp_atomic_commit_work);
+
+ ret = drm_atomic_helper_prepare_planes(dev, state);
+ if (ret)
+ goto done;
+
+ if (!nonblock) {
+ ret = drm_atomic_helper_wait_for_fences(dev, state, true);
+ if (ret)
+ goto done;
+ }
+
+ for_each_plane_in_state(state, plane, plane_state, i) {
+ struct nv50_wndw_atom *asyw = nv50_wndw_atom(plane_state);
+ struct nv50_wndw *wndw = nv50_wndw(plane);
+ if (asyw->set.image) {
+ asyw->ntfy.handle = wndw->dmac->sync.handle;
+ asyw->ntfy.offset = wndw->ntfy;
+ asyw->ntfy.awaken = false;
+ asyw->set.ntfy = true;
+ nouveau_bo_wr32(disp->sync, wndw->ntfy / 4, 0x00000000);
+ wndw->ntfy ^= 0x10;
+ }
+ }
+
+ drm_atomic_helper_swap_state(state, true);
+ drm_atomic_state_get(state);
+
+ if (nonblock)
+ queue_work(system_unbound_wq, &state->commit_work);
+ else
+ nv50_disp_atomic_commit_tail(state);
+
+ drm_for_each_crtc(crtc, dev) {
+ if (crtc->state->enable) {
+ if (!drm->have_disp_power_ref) {
+ drm->have_disp_power_ref = true;
+ return ret;
+ }
+ active = true;
+ break;
+ }
+ }
+
+ if (!active && drm->have_disp_power_ref) {
+ pm_runtime_put_autosuspend(dev->dev);
+ drm->have_disp_power_ref = false;
+ }
+
+done:
+ pm_runtime_put_autosuspend(dev->dev);
+ return ret;
+}
+
+static struct nv50_outp_atom *
+nv50_disp_outp_atomic_add(struct nv50_atom *atom, struct drm_encoder *encoder)
+{
+ struct nv50_outp_atom *outp;
+
+ list_for_each_entry(outp, &atom->outp, head) {
+ if (outp->encoder == encoder)
+ return outp;
+ }
+
+ outp = kzalloc(sizeof(*outp), GFP_KERNEL);
+ if (!outp)
+ return ERR_PTR(-ENOMEM);
+
+ list_add(&outp->head, &atom->outp);
+ outp->encoder = encoder;
+ return outp;
+}
+
+static int
+nv50_disp_outp_atomic_check_clr(struct nv50_atom *atom,
+ struct drm_connector *connector)
+{
+ struct drm_encoder *encoder = connector->state->best_encoder;
+ struct drm_crtc_state *crtc_state;
+ struct drm_crtc *crtc;
+ struct nv50_outp_atom *outp;
+
+ if (!(crtc = connector->state->crtc))
+ return 0;
+
+ crtc_state = drm_atomic_get_existing_crtc_state(&atom->state, crtc);
+ if (crtc->state->active && drm_atomic_crtc_needs_modeset(crtc_state)) {
+ outp = nv50_disp_outp_atomic_add(atom, encoder);
+ if (IS_ERR(outp))
+ return PTR_ERR(outp);
+
+ if (outp->encoder->encoder_type == DRM_MODE_ENCODER_DPMST) {
+ outp->flush_disable = true;
+ atom->flush_disable = true;
+ }
+ outp->clr.ctrl = true;
+ atom->lock_core = true;
}
return 0;
}
-static void
-nv50_fb_dtor(struct drm_framebuffer *fb)
+static int
+nv50_disp_outp_atomic_check_set(struct nv50_atom *atom,
+ struct drm_connector_state *connector_state)
{
+ struct drm_encoder *encoder = connector_state->best_encoder;
+ struct drm_crtc_state *crtc_state;
+ struct drm_crtc *crtc;
+ struct nv50_outp_atom *outp;
+
+ if (!(crtc = connector_state->crtc))
+ return 0;
+
+ crtc_state = drm_atomic_get_existing_crtc_state(&atom->state, crtc);
+ if (crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state)) {
+ outp = nv50_disp_outp_atomic_add(atom, encoder);
+ if (IS_ERR(outp))
+ return PTR_ERR(outp);
+
+ outp->set.ctrl = true;
+ atom->lock_core = true;
+ }
+
+ return 0;
}
static int
-nv50_fb_ctor(struct drm_framebuffer *fb)
-{
- struct nouveau_framebuffer *nv_fb = nouveau_framebuffer(fb);
- struct nouveau_drm *drm = nouveau_drm(fb->dev);
- struct nouveau_bo *nvbo = nv_fb->nvbo;
- struct nv50_disp *disp = nv50_disp(fb->dev);
- u8 kind = nouveau_bo_tile_layout(nvbo) >> 8;
- u8 tile = nvbo->tile_mode;
-
- if (drm->device.info.chipset >= 0xc0)
- tile >>= 4; /* yep.. */
-
- switch (fb->depth) {
- case 8: nv_fb->r_format = 0x1e00; break;
- case 15: nv_fb->r_format = 0xe900; break;
- case 16: nv_fb->r_format = 0xe800; break;
- case 24:
- case 32: nv_fb->r_format = 0xcf00; break;
- case 30: nv_fb->r_format = 0xd100; break;
- default:
- NV_ERROR(drm, "unknown depth %d\n", fb->depth);
- return -EINVAL;
+nv50_disp_atomic_check(struct drm_device *dev, struct drm_atomic_state *state)
+{
+ struct nv50_atom *atom = nv50_atom(state);
+ struct drm_connector_state *connector_state;
+ struct drm_connector *connector;
+ int ret, i;
+
+ ret = drm_atomic_helper_check(dev, state);
+ if (ret)
+ return ret;
+
+ for_each_connector_in_state(state, connector, connector_state, i) {
+ ret = nv50_disp_outp_atomic_check_clr(atom, connector);
+ if (ret)
+ return ret;
+
+ ret = nv50_disp_outp_atomic_check_set(atom, connector_state);
+ if (ret)
+ return ret;
}
- if (disp->disp->oclass < G82_DISP) {
- nv_fb->r_pitch = kind ? (((fb->pitches[0] / 4) << 4) | tile) :
- (fb->pitches[0] | 0x00100000);
- nv_fb->r_format |= kind << 16;
- } else
- if (disp->disp->oclass < GF110_DISP) {
- nv_fb->r_pitch = kind ? (((fb->pitches[0] / 4) << 4) | tile) :
- (fb->pitches[0] | 0x00100000);
- } else {
- nv_fb->r_pitch = kind ? (((fb->pitches[0] / 4) << 4) | tile) :
- (fb->pitches[0] | 0x01000000);
+ return 0;
+}
+
+static void
+nv50_disp_atomic_state_clear(struct drm_atomic_state *state)
+{
+ struct nv50_atom *atom = nv50_atom(state);
+ struct nv50_outp_atom *outp, *outt;
+
+ list_for_each_entry_safe(outp, outt, &atom->outp, head) {
+ list_del(&outp->head);
+ kfree(outp);
}
- nv_fb->r_handle = 0xffff0000 | kind;
- return nv50_fbdma_init(fb->dev, nv_fb->r_handle, 0,
- drm->device.info.ram_user, kind);
+ drm_atomic_state_default_clear(state);
+}
+
+static void
+nv50_disp_atomic_state_free(struct drm_atomic_state *state)
+{
+ struct nv50_atom *atom = nv50_atom(state);
+ drm_atomic_state_default_release(&atom->state);
+ kfree(atom);
}
+static struct drm_atomic_state *
+nv50_disp_atomic_state_alloc(struct drm_device *dev)
+{
+ struct nv50_atom *atom;
+ if (!(atom = kzalloc(sizeof(*atom), GFP_KERNEL)) ||
+ drm_atomic_state_init(dev, &atom->state) < 0) {
+ kfree(atom);
+ return NULL;
+ }
+ INIT_LIST_HEAD(&atom->outp);
+ return &atom->state;
+}
+
+static const struct drm_mode_config_funcs
+nv50_disp_func = {
+ .fb_create = nouveau_user_framebuffer_create,
+ .output_poll_changed = nouveau_fbcon_output_poll_changed,
+ .atomic_check = nv50_disp_atomic_check,
+ .atomic_commit = nv50_disp_atomic_commit,
+ .atomic_state_alloc = nv50_disp_atomic_state_alloc,
+ .atomic_state_clear = nv50_disp_atomic_state_clear,
+ .atomic_state_free = nv50_disp_atomic_state_free,
+};
+
/******************************************************************************
* Init
*****************************************************************************/
@@ -2471,12 +4338,30 @@ nv50_fb_ctor(struct drm_framebuffer *fb)
void
nv50_display_fini(struct drm_device *dev)
{
+ struct nouveau_encoder *nv_encoder;
+ struct drm_encoder *encoder;
+ struct drm_plane *plane;
+
+ drm_for_each_plane(plane, dev) {
+ struct nv50_wndw *wndw = nv50_wndw(plane);
+ if (plane->funcs != &nv50_wndw)
+ continue;
+ nv50_wndw_fini(wndw);
+ }
+
+ list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+ if (encoder->encoder_type != DRM_MODE_ENCODER_DPMST) {
+ nv_encoder = nouveau_encoder(encoder);
+ nv50_mstm_fini(nv_encoder->dp.mstm);
+ }
+ }
}
int
nv50_display_init(struct drm_device *dev)
{
- struct nv50_disp *disp = nv50_disp(dev);
+ struct drm_encoder *encoder;
+ struct drm_plane *plane;
struct drm_crtc *crtc;
u32 *push;
@@ -2484,16 +4369,35 @@ nv50_display_init(struct drm_device *dev)
if (!push)
return -EBUSY;
- list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
- struct nv50_sync *sync = nv50_sync(crtc);
-
- nv50_crtc_lut_load(crtc);
- nouveau_bo_wr32(disp->sync, sync->addr / 4, sync->data);
- }
-
evo_mthd(push, 0x0088, 1);
evo_data(push, nv50_mast(dev)->base.sync.handle);
evo_kick(push, nv50_mast(dev));
+
+ list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+ if (encoder->encoder_type != DRM_MODE_ENCODER_DPMST) {
+ const struct drm_encoder_helper_funcs *help;
+ struct nouveau_encoder *nv_encoder;
+
+ nv_encoder = nouveau_encoder(encoder);
+ help = encoder->helper_private;
+ if (help && help->dpms)
+ help->dpms(encoder, DRM_MODE_DPMS_ON);
+
+ nv50_mstm_init(nv_encoder->dp.mstm);
+ }
+ }
+
+ drm_for_each_crtc(crtc, dev) {
+ nv50_head_lut_load(crtc);
+ }
+
+ drm_for_each_plane(plane, dev) {
+ struct nv50_wndw *wndw = nv50_wndw(plane);
+ if (plane->funcs != &nv50_wndw)
+ continue;
+ nv50_wndw_init(wndw);
+ }
+
return 0;
}
@@ -2501,11 +4405,6 @@ void
nv50_display_destroy(struct drm_device *dev)
{
struct nv50_disp *disp = nv50_disp(dev);
- struct nv50_fbdma *fbdma, *fbtmp;
-
- list_for_each_entry_safe(fbdma, fbtmp, &disp->fbdma, head) {
- nv50_fbdma_fini(fbdma);
- }
nv50_dmac_destroy(&disp->mast.base, disp->disp);
@@ -2518,6 +4417,10 @@ nv50_display_destroy(struct drm_device *dev)
kfree(disp);
}
+MODULE_PARM_DESC(atomic, "Expose atomic ioctl (default: disabled)");
+static int nouveau_atomic = 0;
+module_param_named(atomic, nouveau_atomic, int, 0400);
+
int
nv50_display_create(struct drm_device *dev)
{
@@ -2532,15 +4435,17 @@ nv50_display_create(struct drm_device *dev)
disp = kzalloc(sizeof(*disp), GFP_KERNEL);
if (!disp)
return -ENOMEM;
- INIT_LIST_HEAD(&disp->fbdma);
+
+ mutex_init(&disp->mutex);
nouveau_display(dev)->priv = disp;
nouveau_display(dev)->dtor = nv50_display_destroy;
nouveau_display(dev)->init = nv50_display_init;
nouveau_display(dev)->fini = nv50_display_fini;
- nouveau_display(dev)->fb_ctor = nv50_fb_ctor;
- nouveau_display(dev)->fb_dtor = nv50_fb_dtor;
disp->disp = &nouveau_display(dev)->disp;
+ dev->mode_config.funcs = &nv50_disp_func;
+ if (nouveau_atomic)
+ dev->driver->driver_features |= DRIVER_ATOMIC;
/* small shared memory area we use for notifiers and semaphores */
ret = nouveau_bo_new(dev, 4096, 0x1000, TTM_PL_FLAG_VRAM,
@@ -2572,7 +4477,7 @@ nv50_display_create(struct drm_device *dev)
crtcs = 2;
for (i = 0; i < crtcs; i++) {
- ret = nv50_crtc_create(dev, i);
+ ret = nv50_head_create(dev, i);
if (ret)
goto out;
}
diff --git a/drivers/gpu/drm/nouveau/nv50_display.h b/drivers/gpu/drm/nouveau/nv50_display.h
index 70da347aa8c5..918187cee84b 100644
--- a/drivers/gpu/drm/nouveau/nv50_display.h
+++ b/drivers/gpu/drm/nouveau/nv50_display.h
@@ -35,11 +35,4 @@ int nv50_display_create(struct drm_device *);
void nv50_display_destroy(struct drm_device *);
int nv50_display_init(struct drm_device *);
void nv50_display_fini(struct drm_device *);
-
-void nv50_display_flip_stop(struct drm_crtc *);
-int nv50_display_flip_next(struct drm_crtc *, struct drm_framebuffer *,
- struct nouveau_channel *, u32 swap_interval);
-
-struct nouveau_bo *nv50_display_crtc_sema(struct drm_device *, int head);
-
#endif /* __NV50_DISPLAY_H__ */
diff --git a/drivers/gpu/drm/nouveau/nv50_fbcon.c b/drivers/gpu/drm/nouveau/nv50_fbcon.c
index af3d3c49411a..327dcd7901ed 100644
--- a/drivers/gpu/drm/nouveau/nv50_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nv50_fbcon.c
@@ -30,7 +30,7 @@ int
nv50_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
{
struct nouveau_fbdev *nfbdev = info->par;
- struct nouveau_drm *drm = nouveau_drm(nfbdev->dev);
+ struct nouveau_drm *drm = nouveau_drm(nfbdev->helper.dev);
struct nouveau_channel *chan = drm->channel;
int ret;
@@ -65,7 +65,7 @@ int
nv50_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region)
{
struct nouveau_fbdev *nfbdev = info->par;
- struct nouveau_drm *drm = nouveau_drm(nfbdev->dev);
+ struct nouveau_drm *drm = nouveau_drm(nfbdev->helper.dev);
struct nouveau_channel *chan = drm->channel;
int ret;
@@ -93,7 +93,7 @@ int
nv50_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
{
struct nouveau_fbdev *nfbdev = info->par;
- struct nouveau_drm *drm = nouveau_drm(nfbdev->dev);
+ struct nouveau_drm *drm = nouveau_drm(nfbdev->helper.dev);
struct nouveau_channel *chan = drm->channel;
uint32_t dwords, *data = (uint32_t *)image->data;
uint32_t mask = ~(~0 >> (32 - info->var.bits_per_pixel));
@@ -148,8 +148,8 @@ int
nv50_fbcon_accel_init(struct fb_info *info)
{
struct nouveau_fbdev *nfbdev = info->par;
- struct nouveau_framebuffer *fb = &nfbdev->nouveau_fb;
- struct drm_device *dev = nfbdev->dev;
+ struct nouveau_framebuffer *fb = nouveau_framebuffer(nfbdev->helper.fb);
+ struct drm_device *dev = nfbdev->helper.dev;
struct nouveau_drm *drm = nouveau_drm(dev);
struct nouveau_channel *chan = drm->channel;
int ret, format;
diff --git a/drivers/gpu/drm/nouveau/nv50_fence.c b/drivers/gpu/drm/nouveau/nv50_fence.c
index 4d6f202b7770..f68c7054fd53 100644
--- a/drivers/gpu/drm/nouveau/nv50_fence.c
+++ b/drivers/gpu/drm/nouveau/nv50_fence.c
@@ -35,13 +35,12 @@
static int
nv50_fence_context_new(struct nouveau_channel *chan)
{
- struct drm_device *dev = chan->drm->dev;
struct nv10_fence_priv *priv = chan->drm->fence;
struct nv10_fence_chan *fctx;
struct ttm_mem_reg *mem = &priv->bo->bo.mem;
u32 start = mem->start * PAGE_SIZE;
u32 limit = start + mem->size - 1;
- int ret, i;
+ int ret;
fctx = chan->fence = kzalloc(sizeof(*fctx), GFP_KERNEL);
if (!fctx)
@@ -60,23 +59,6 @@ nv50_fence_context_new(struct nouveau_channel *chan)
.limit = limit,
}, sizeof(struct nv_dma_v0),
&fctx->sema);
-
- /* dma objects for display sync channel semaphore blocks */
- for (i = 0; !ret && i < dev->mode_config.num_crtc; i++) {
- struct nouveau_bo *bo = nv50_display_crtc_sema(dev, i);
- u32 start = bo->bo.mem.start * PAGE_SIZE;
- u32 limit = start + bo->bo.mem.size - 1;
-
- ret = nvif_object_init(&chan->user, NvEvoSema0 + i,
- NV_DMA_IN_MEMORY, &(struct nv_dma_v0) {
- .target = NV_DMA_V0_TARGET_VRAM,
- .access = NV_DMA_V0_ACCESS_RDWR,
- .start = start,
- .limit = limit,
- }, sizeof(struct nv_dma_v0),
- &fctx->head[i]);
- }
-
if (ret)
nv10_fence_context_del(chan);
return ret;
@@ -97,7 +79,7 @@ nv50_fence_create(struct nouveau_drm *drm)
priv->base.context_new = nv50_fence_context_new;
priv->base.context_del = nv10_fence_context_del;
priv->base.contexts = 127;
- priv->base.context_base = fence_context_alloc(priv->base.contexts);
+ priv->base.context_base = dma_fence_context_alloc(priv->base.contexts);
spin_lock_init(&priv->lock);
ret = nouveau_bo_new(drm->dev, 4096, 0x1000, TTM_PL_FLAG_VRAM,
diff --git a/drivers/gpu/drm/nouveau/nv84_fence.c b/drivers/gpu/drm/nouveau/nv84_fence.c
index 18bde9d8e6d6..52b87ae83e7b 100644
--- a/drivers/gpu/drm/nouveau/nv84_fence.c
+++ b/drivers/gpu/drm/nouveau/nv84_fence.c
@@ -28,13 +28,6 @@
#include "nv50_display.h"
-u64
-nv84_fence_crtc(struct nouveau_channel *chan, int crtc)
-{
- struct nv84_fence_chan *fctx = chan->fence;
- return fctx->dispc_vma[crtc].offset;
-}
-
static int
nv84_fence_emit32(struct nouveau_channel *chan, u64 virtual, u32 sequence)
{
@@ -110,15 +103,8 @@ nv84_fence_read(struct nouveau_channel *chan)
static void
nv84_fence_context_del(struct nouveau_channel *chan)
{
- struct drm_device *dev = chan->drm->dev;
struct nv84_fence_priv *priv = chan->drm->fence;
struct nv84_fence_chan *fctx = chan->fence;
- int i;
-
- for (i = 0; i < dev->mode_config.num_crtc; i++) {
- struct nouveau_bo *bo = nv50_display_crtc_sema(dev, i);
- nouveau_bo_vma_del(bo, &fctx->dispc_vma[i]);
- }
nouveau_bo_wr32(priv->bo, chan->chid * 16 / 4, fctx->base.sequence);
nouveau_bo_vma_del(priv->bo, &fctx->vma_gart);
@@ -134,7 +120,7 @@ nv84_fence_context_new(struct nouveau_channel *chan)
struct nouveau_cli *cli = (void *)chan->user.client;
struct nv84_fence_priv *priv = chan->drm->fence;
struct nv84_fence_chan *fctx;
- int ret, i;
+ int ret;
fctx = chan->fence = kzalloc(sizeof(*fctx), GFP_KERNEL);
if (!fctx)
@@ -154,12 +140,6 @@ nv84_fence_context_new(struct nouveau_channel *chan)
&fctx->vma_gart);
}
- /* map display semaphore buffers into channel's vm */
- for (i = 0; !ret && i < chan->drm->dev->mode_config.num_crtc; i++) {
- struct nouveau_bo *bo = nv50_display_crtc_sema(chan->drm->dev, i);
- ret = nouveau_bo_vma_add(bo, cli->vm, &fctx->dispc_vma[i]);
- }
-
if (ret)
nv84_fence_context_del(chan);
return ret;
@@ -229,7 +209,7 @@ nv84_fence_create(struct nouveau_drm *drm)
priv->base.context_del = nv84_fence_context_del;
priv->base.contexts = fifo->nr;
- priv->base.context_base = fence_context_alloc(priv->base.contexts);
+ priv->base.context_base = dma_fence_context_alloc(priv->base.contexts);
priv->base.uevent = true;
/* Use VRAM if there is any ; otherwise fallback to system memory */
diff --git a/drivers/gpu/drm/nouveau/nvc0_fbcon.c b/drivers/gpu/drm/nouveau/nvc0_fbcon.c
index 054b6a056d99..90f27bfa381f 100644
--- a/drivers/gpu/drm/nouveau/nvc0_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nvc0_fbcon.c
@@ -30,7 +30,7 @@ int
nvc0_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
{
struct nouveau_fbdev *nfbdev = info->par;
- struct nouveau_drm *drm = nouveau_drm(nfbdev->dev);
+ struct nouveau_drm *drm = nouveau_drm(nfbdev->helper.dev);
struct nouveau_channel *chan = drm->channel;
int ret;
@@ -65,7 +65,7 @@ int
nvc0_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region)
{
struct nouveau_fbdev *nfbdev = info->par;
- struct nouveau_drm *drm = nouveau_drm(nfbdev->dev);
+ struct nouveau_drm *drm = nouveau_drm(nfbdev->helper.dev);
struct nouveau_channel *chan = drm->channel;
int ret;
@@ -93,7 +93,7 @@ int
nvc0_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
{
struct nouveau_fbdev *nfbdev = info->par;
- struct nouveau_drm *drm = nouveau_drm(nfbdev->dev);
+ struct nouveau_drm *drm = nouveau_drm(nfbdev->helper.dev);
struct nouveau_channel *chan = drm->channel;
uint32_t dwords, *data = (uint32_t *)image->data;
uint32_t mask = ~(~0 >> (32 - info->var.bits_per_pixel));
@@ -148,8 +148,8 @@ int
nvc0_fbcon_accel_init(struct fb_info *info)
{
struct nouveau_fbdev *nfbdev = info->par;
- struct drm_device *dev = nfbdev->dev;
- struct nouveau_framebuffer *fb = &nfbdev->nouveau_fb;
+ struct drm_device *dev = nfbdev->helper.dev;
+ struct nouveau_framebuffer *fb = nouveau_framebuffer(nfbdev->helper.fb);
struct nouveau_drm *drm = nouveau_drm(dev);
struct nouveau_channel *chan = drm->channel;
int ret, format;
diff --git a/drivers/gpu/drm/nouveau/nvif/client.c b/drivers/gpu/drm/nouveau/nvif/client.c
index 1ee9294eca2e..29c20dfd894d 100644
--- a/drivers/gpu/drm/nouveau/nvif/client.c
+++ b/drivers/gpu/drm/nouveau/nvif/client.c
@@ -55,7 +55,7 @@ nvif_client_fini(struct nvif_client *client)
}
}
-const struct nvif_driver *
+static const struct nvif_driver *
nvif_drivers[] = {
#ifdef __KERNEL__
&nvif_driver_nvkm,
diff --git a/drivers/gpu/drm/nouveau/nvif/notify.c b/drivers/gpu/drm/nouveau/nvif/notify.c
index b0787ff833ef..278b3933dc96 100644
--- a/drivers/gpu/drm/nouveau/nvif/notify.c
+++ b/drivers/gpu/drm/nouveau/nvif/notify.c
@@ -155,10 +155,8 @@ nvif_notify_fini(struct nvif_notify *notify)
int ret = nvif_notify_put(notify);
if (ret >= 0 && object) {
ret = nvif_object_ioctl(object, &args, sizeof(args), NULL);
- if (ret == 0) {
- notify->object = NULL;
- kfree((void *)notify->data);
- }
+ notify->object = NULL;
+ kfree((void *)notify->data);
}
return ret;
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/core/firmware.c b/drivers/gpu/drm/nouveau/nvkm/core/firmware.c
index 34ecd4a7e0c1..058ff46b5f16 100644
--- a/drivers/gpu/drm/nouveau/nvkm/core/firmware.c
+++ b/drivers/gpu/drm/nouveau/nvkm/core/firmware.c
@@ -20,6 +20,7 @@
* DEALINGS IN THE SOFTWARE.
*/
#include <core/device.h>
+#include <core/firmware.h>
/**
* nvkm_firmware_get - load firmware from the official nvidia/chip/ directory
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/ce/Kbuild b/drivers/gpu/drm/nouveau/nvkm/engine/ce/Kbuild
index a4458a8eb30a..255d81ccf916 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/ce/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/ce/Kbuild
@@ -4,4 +4,4 @@ nvkm-y += nvkm/engine/ce/gk104.o
nvkm-y += nvkm/engine/ce/gm107.o
nvkm-y += nvkm/engine/ce/gm200.o
nvkm-y += nvkm/engine/ce/gp100.o
-nvkm-y += nvkm/engine/ce/gp104.o
+nvkm-y += nvkm/engine/ce/gp102.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/ce/fuc/gf100.fuc3.h b/drivers/gpu/drm/nouveau/nvkm/engine/ce/fuc/gf100.fuc3.h
index 05bb65608dfe..d9ca9636a3e3 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/ce/fuc/gf100.fuc3.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/ce/fuc/gf100.fuc3.h
@@ -1,4 +1,4 @@
-uint32_t gf100_ce_data[] = {
+static uint32_t gf100_ce_data[] = {
/* 0x0000: ctx_object */
0x00000000,
/* 0x0004: ctx_query_address_high */
@@ -171,7 +171,7 @@ uint32_t gf100_ce_data[] = {
0x00000800,
};
-uint32_t gf100_ce_code[] = {
+static uint32_t gf100_ce_code[] = {
/* 0x0000: main */
0x04fe04bd,
0x3517f000,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/ce/fuc/gt215.fuc3.h b/drivers/gpu/drm/nouveau/nvkm/engine/ce/fuc/gt215.fuc3.h
index 972281d10f38..f0a1cf31c7ca 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/ce/fuc/gt215.fuc3.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/ce/fuc/gt215.fuc3.h
@@ -1,4 +1,4 @@
-uint32_t gt215_ce_data[] = {
+static uint32_t gt215_ce_data[] = {
/* 0x0000: ctx_object */
0x00000000,
/* 0x0004: ctx_dma */
@@ -183,7 +183,7 @@ uint32_t gt215_ce_data[] = {
0x00000800,
};
-uint32_t gt215_ce_code[] = {
+static uint32_t gt215_ce_code[] = {
/* 0x0000: main */
0x04fe04bd,
0x3517f000,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/ce/gp104.c b/drivers/gpu/drm/nouveau/nvkm/engine/ce/gp102.c
index 20e019788a53..985c8f653874 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/ce/gp104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/ce/gp102.c
@@ -27,7 +27,7 @@
#include <nvif/class.h>
static const struct nvkm_engine_func
-gp104_ce = {
+gp102_ce = {
.intr = gp100_ce_intr,
.sclass = {
{ -1, -1, PASCAL_DMA_COPY_B },
@@ -37,8 +37,8 @@ gp104_ce = {
};
int
-gp104_ce_new(struct nvkm_device *device, int index,
+gp102_ce_new(struct nvkm_device *device, int index,
struct nvkm_engine **pengine)
{
- return nvkm_engine_new_(&gp104_ce, device, index, true, pengine);
+ return nvkm_engine_new_(&gp102_ce, device, index, true, pengine);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
index 7218a067a6c5..2cbcffe78c3e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
@@ -1357,7 +1357,7 @@ nvc0_chipset = {
.pmu = gf100_pmu_new,
.therm = gt215_therm_new,
.timer = nv41_timer_new,
- .volt = nv40_volt_new,
+ .volt = gf100_volt_new,
.ce[0] = gf100_ce_new,
.ce[1] = gf100_ce_new,
.disp = gt215_disp_new,
@@ -1394,7 +1394,7 @@ nvc1_chipset = {
.pmu = gf100_pmu_new,
.therm = gt215_therm_new,
.timer = nv41_timer_new,
- .volt = nv40_volt_new,
+ .volt = gf100_volt_new,
.ce[0] = gf100_ce_new,
.disp = gt215_disp_new,
.dma = gf100_dma_new,
@@ -1430,7 +1430,7 @@ nvc3_chipset = {
.pmu = gf100_pmu_new,
.therm = gt215_therm_new,
.timer = nv41_timer_new,
- .volt = nv40_volt_new,
+ .volt = gf100_volt_new,
.ce[0] = gf100_ce_new,
.disp = gt215_disp_new,
.dma = gf100_dma_new,
@@ -1466,7 +1466,7 @@ nvc4_chipset = {
.pmu = gf100_pmu_new,
.therm = gt215_therm_new,
.timer = nv41_timer_new,
- .volt = nv40_volt_new,
+ .volt = gf100_volt_new,
.ce[0] = gf100_ce_new,
.ce[1] = gf100_ce_new,
.disp = gt215_disp_new,
@@ -1503,7 +1503,7 @@ nvc8_chipset = {
.pmu = gf100_pmu_new,
.therm = gt215_therm_new,
.timer = nv41_timer_new,
- .volt = nv40_volt_new,
+ .volt = gf100_volt_new,
.ce[0] = gf100_ce_new,
.ce[1] = gf100_ce_new,
.disp = gt215_disp_new,
@@ -1540,7 +1540,7 @@ nvce_chipset = {
.pmu = gf100_pmu_new,
.therm = gt215_therm_new,
.timer = nv41_timer_new,
- .volt = nv40_volt_new,
+ .volt = gf100_volt_new,
.ce[0] = gf100_ce_new,
.ce[1] = gf100_ce_new,
.disp = gt215_disp_new,
@@ -1577,7 +1577,7 @@ nvcf_chipset = {
.pmu = gf100_pmu_new,
.therm = gt215_therm_new,
.timer = nv41_timer_new,
- .volt = nv40_volt_new,
+ .volt = gf100_volt_new,
.ce[0] = gf100_ce_new,
.disp = gt215_disp_new,
.dma = gf100_dma_new,
@@ -1612,6 +1612,7 @@ nvd7_chipset = {
.pci = gf106_pci_new,
.therm = gf119_therm_new,
.timer = nv41_timer_new,
+ .volt = gf100_volt_new,
.ce[0] = gf100_ce_new,
.disp = gf119_disp_new,
.dma = gf119_dma_new,
@@ -1647,7 +1648,7 @@ nvd9_chipset = {
.pmu = gf119_pmu_new,
.therm = gf119_therm_new,
.timer = nv41_timer_new,
- .volt = nv40_volt_new,
+ .volt = gf100_volt_new,
.ce[0] = gf100_ce_new,
.disp = gf119_disp_new,
.dma = gf119_dma_new,
@@ -1851,7 +1852,7 @@ nvf1_chipset = {
.fb = gk104_fb_new,
.fuse = gf100_fuse_new,
.gpio = gk104_gpio_new,
- .i2c = gf119_i2c_new,
+ .i2c = gk104_i2c_new,
.ibus = gk104_ibus_new,
.iccsense = gf100_iccsense_new,
.imem = nv50_instmem_new,
@@ -1965,7 +1966,7 @@ nv117_chipset = {
.fb = gm107_fb_new,
.fuse = gm107_fuse_new,
.gpio = gk104_gpio_new,
- .i2c = gf119_i2c_new,
+ .i2c = gk104_i2c_new,
.ibus = gk104_ibus_new,
.iccsense = gf100_iccsense_new,
.imem = nv50_instmem_new,
@@ -1999,7 +2000,7 @@ nv118_chipset = {
.fb = gm107_fb_new,
.fuse = gm107_fuse_new,
.gpio = gk104_gpio_new,
- .i2c = gf119_i2c_new,
+ .i2c = gk104_i2c_new,
.ibus = gk104_ibus_new,
.iccsense = gf100_iccsense_new,
.imem = nv50_instmem_new,
@@ -2130,7 +2131,7 @@ nv12b_chipset = {
.bar = gk20a_bar_new,
.bus = gf100_bus_new,
.clk = gm20b_clk_new,
- .fb = gk20a_fb_new,
+ .fb = gm20b_fb_new,
.fuse = gm107_fuse_new,
.ibus = gk20a_ibus_new,
.imem = gk20a_instmem_new,
@@ -2166,6 +2167,7 @@ nv130_chipset = {
.mmu = gf100_mmu_new,
.secboot = gm200_secboot_new,
.pci = gp100_pci_new,
+ .pmu = gp100_pmu_new,
.timer = gk20a_timer_new,
.top = gk104_top_new,
.ce[0] = gp100_ce_new,
@@ -2182,13 +2184,42 @@ nv130_chipset = {
};
static const struct nvkm_device_chip
+nv132_chipset = {
+ .name = "GP102",
+ .bar = gf100_bar_new,
+ .bios = nvkm_bios_new,
+ .bus = gf100_bus_new,
+ .devinit = gm200_devinit_new,
+ .fb = gp102_fb_new,
+ .fuse = gm107_fuse_new,
+ .gpio = gk104_gpio_new,
+ .i2c = gm200_i2c_new,
+ .ibus = gm200_ibus_new,
+ .imem = nv50_instmem_new,
+ .ltc = gp100_ltc_new,
+ .mc = gp100_mc_new,
+ .mmu = gf100_mmu_new,
+ .pci = gp100_pci_new,
+ .pmu = gp102_pmu_new,
+ .timer = gk20a_timer_new,
+ .top = gk104_top_new,
+ .ce[0] = gp102_ce_new,
+ .ce[1] = gp102_ce_new,
+ .ce[2] = gp102_ce_new,
+ .ce[3] = gp102_ce_new,
+ .disp = gp102_disp_new,
+ .dma = gf119_dma_new,
+ .fifo = gp100_fifo_new,
+};
+
+static const struct nvkm_device_chip
nv134_chipset = {
.name = "GP104",
.bar = gf100_bar_new,
.bios = nvkm_bios_new,
.bus = gf100_bus_new,
.devinit = gm200_devinit_new,
- .fb = gp104_fb_new,
+ .fb = gp102_fb_new,
.fuse = gm107_fuse_new,
.gpio = gk104_gpio_new,
.i2c = gm200_i2c_new,
@@ -2198,13 +2229,14 @@ nv134_chipset = {
.mc = gp100_mc_new,
.mmu = gf100_mmu_new,
.pci = gp100_pci_new,
+ .pmu = gp102_pmu_new,
.timer = gk20a_timer_new,
.top = gk104_top_new,
- .ce[0] = gp104_ce_new,
- .ce[1] = gp104_ce_new,
- .ce[2] = gp104_ce_new,
- .ce[3] = gp104_ce_new,
- .disp = gp104_disp_new,
+ .ce[0] = gp102_ce_new,
+ .ce[1] = gp102_ce_new,
+ .ce[2] = gp102_ce_new,
+ .ce[3] = gp102_ce_new,
+ .disp = gp102_disp_new,
.dma = gf119_dma_new,
.fifo = gp100_fifo_new,
};
@@ -2643,6 +2675,7 @@ nvkm_device_ctor(const struct nvkm_device_func *func,
case 0x126: device->chip = &nv126_chipset; break;
case 0x12b: device->chip = &nv12b_chipset; break;
case 0x130: device->chip = &nv130_chipset; break;
+ case 0x132: device->chip = &nv132_chipset; break;
case 0x134: device->chip = &nv134_chipset; break;
default:
nvdev_error(device, "unknown chipset (%08x)\n", boot0);
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/pci.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/pci.c
index 62ad0300cfa5..74a1ffa425f7 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/pci.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/pci.c
@@ -1665,14 +1665,31 @@ nvkm_device_pci_new(struct pci_dev *pci_dev, const char *cfg, const char *dbg,
*pdevice = &pdev->device;
pdev->pdev = pci_dev;
- return nvkm_device_ctor(&nvkm_device_pci_func, quirk, &pci_dev->dev,
- pci_is_pcie(pci_dev) ? NVKM_DEVICE_PCIE :
- pci_find_capability(pci_dev, PCI_CAP_ID_AGP) ?
- NVKM_DEVICE_AGP : NVKM_DEVICE_PCI,
- (u64)pci_domain_nr(pci_dev->bus) << 32 |
- pci_dev->bus->number << 16 |
- PCI_SLOT(pci_dev->devfn) << 8 |
- PCI_FUNC(pci_dev->devfn), name,
- cfg, dbg, detect, mmio, subdev_mask,
- &pdev->device);
+ ret = nvkm_device_ctor(&nvkm_device_pci_func, quirk, &pci_dev->dev,
+ pci_is_pcie(pci_dev) ? NVKM_DEVICE_PCIE :
+ pci_find_capability(pci_dev, PCI_CAP_ID_AGP) ?
+ NVKM_DEVICE_AGP : NVKM_DEVICE_PCI,
+ (u64)pci_domain_nr(pci_dev->bus) << 32 |
+ pci_dev->bus->number << 16 |
+ PCI_SLOT(pci_dev->devfn) << 8 |
+ PCI_FUNC(pci_dev->devfn), name,
+ cfg, dbg, detect, mmio, subdev_mask,
+ &pdev->device);
+
+ if (ret)
+ return ret;
+
+ /*
+ * Set a preliminary DMA mask based on the .dma_bits member of the
+ * MMU subdevice. This allows other subdevices to create DMA mappings
+ * in their init() or oneinit() methods, which may be called before the
+ * TTM layer sets the DMA mask definitively.
+ * This is necessary for platforms where the default DMA mask of 32
+ * does not cover any system memory, i.e., when all RAM is > 4 GB.
+ */
+ if (pdev->device.mmu)
+ dma_set_mask_and_coherent(&pci_dev->dev,
+ DMA_BIT_MASK(pdev->device.mmu->dma_bits));
+
+ return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c
index 9b638bd905ff..f2bc0b7d9b93 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c
@@ -102,7 +102,7 @@ nvkm_device_tegra_probe_iommu(struct nvkm_device_tegra *tdev)
if (iommu_present(&platform_bus_type)) {
tdev->iommu.domain = iommu_domain_alloc(&platform_bus_type);
- if (IS_ERR(tdev->iommu.domain))
+ if (!tdev->iommu.domain)
goto error;
/*
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/user.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/user.c
index 79a8f71cf788..513ee6b79553 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/user.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/user.c
@@ -326,7 +326,7 @@ nvkm_udevice = {
.sclass = nvkm_udevice_child_get,
};
-int
+static int
nvkm_udevice_new(const struct nvkm_oclass *oclass, void *data, u32 size,
struct nvkm_object **pobject)
{
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/Kbuild b/drivers/gpu/drm/nouveau/nvkm/engine/disp/Kbuild
index 77a52b54a31e..fa05d16ae948 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/Kbuild
@@ -11,7 +11,7 @@ nvkm-y += nvkm/engine/disp/gk110.o
nvkm-y += nvkm/engine/disp/gm107.o
nvkm-y += nvkm/engine/disp/gm200.o
nvkm-y += nvkm/engine/disp/gp100.o
-nvkm-y += nvkm/engine/disp/gp104.o
+nvkm-y += nvkm/engine/disp/gp102.o
nvkm-y += nvkm/engine/disp/outp.o
nvkm-y += nvkm/engine/disp/outpdp.o
@@ -48,14 +48,14 @@ nvkm-y += nvkm/engine/disp/rootgk110.o
nvkm-y += nvkm/engine/disp/rootgm107.o
nvkm-y += nvkm/engine/disp/rootgm200.o
nvkm-y += nvkm/engine/disp/rootgp100.o
-nvkm-y += nvkm/engine/disp/rootgp104.o
+nvkm-y += nvkm/engine/disp/rootgp102.o
nvkm-y += nvkm/engine/disp/channv50.o
nvkm-y += nvkm/engine/disp/changf119.o
nvkm-y += nvkm/engine/disp/dmacnv50.o
nvkm-y += nvkm/engine/disp/dmacgf119.o
-nvkm-y += nvkm/engine/disp/dmacgp104.o
+nvkm-y += nvkm/engine/disp/dmacgp102.o
nvkm-y += nvkm/engine/disp/basenv50.o
nvkm-y += nvkm/engine/disp/baseg84.o
@@ -64,7 +64,7 @@ nvkm-y += nvkm/engine/disp/basegt215.o
nvkm-y += nvkm/engine/disp/basegf119.o
nvkm-y += nvkm/engine/disp/basegk104.o
nvkm-y += nvkm/engine/disp/basegk110.o
-nvkm-y += nvkm/engine/disp/basegp104.o
+nvkm-y += nvkm/engine/disp/basegp102.o
nvkm-y += nvkm/engine/disp/corenv50.o
nvkm-y += nvkm/engine/disp/coreg84.o
@@ -77,7 +77,7 @@ nvkm-y += nvkm/engine/disp/coregk110.o
nvkm-y += nvkm/engine/disp/coregm107.o
nvkm-y += nvkm/engine/disp/coregm200.o
nvkm-y += nvkm/engine/disp/coregp100.o
-nvkm-y += nvkm/engine/disp/coregp104.o
+nvkm-y += nvkm/engine/disp/coregp102.o
nvkm-y += nvkm/engine/disp/ovlynv50.o
nvkm-y += nvkm/engine/disp/ovlyg84.o
@@ -85,7 +85,7 @@ nvkm-y += nvkm/engine/disp/ovlygt200.o
nvkm-y += nvkm/engine/disp/ovlygt215.o
nvkm-y += nvkm/engine/disp/ovlygf119.o
nvkm-y += nvkm/engine/disp/ovlygk104.o
-nvkm-y += nvkm/engine/disp/ovlygp104.o
+nvkm-y += nvkm/engine/disp/ovlygp102.o
nvkm-y += nvkm/engine/disp/piocnv50.o
nvkm-y += nvkm/engine/disp/piocgf119.o
@@ -95,9 +95,11 @@ nvkm-y += nvkm/engine/disp/cursg84.o
nvkm-y += nvkm/engine/disp/cursgt215.o
nvkm-y += nvkm/engine/disp/cursgf119.o
nvkm-y += nvkm/engine/disp/cursgk104.o
+nvkm-y += nvkm/engine/disp/cursgp102.o
nvkm-y += nvkm/engine/disp/oimmnv50.o
nvkm-y += nvkm/engine/disp/oimmg84.o
nvkm-y += nvkm/engine/disp/oimmgt215.o
nvkm-y += nvkm/engine/disp/oimmgf119.o
nvkm-y += nvkm/engine/disp/oimmgk104.o
+nvkm-y += nvkm/engine/disp/oimmgp102.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/basegp104.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/basegp102.c
index 51688e37c54e..8a3cdeef8d2c 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/basegp104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/basegp102.c
@@ -27,12 +27,12 @@
#include <nvif/class.h>
const struct nv50_disp_dmac_oclass
-gp104_disp_base_oclass = {
+gp102_disp_base_oclass = {
.base.oclass = GK110_DISP_BASE_CHANNEL_DMA,
.base.minver = 0,
.base.maxver = 0,
.ctor = nv50_disp_base_new,
- .func = &gp104_disp_dmac_func,
+ .func = &gp102_disp_dmac_func,
.mthd = &gf119_disp_base_chan_mthd,
.chid = 1,
};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/channv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/channv50.c
index dd2953bc9264..524a24eae1a0 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/channv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/channv50.c
@@ -82,7 +82,7 @@ nv50_disp_chan_mthd(struct nv50_disp_chan *chan, int debug)
if (mthd->addr) {
snprintf(cname_, sizeof(cname_), "%s %d",
- mthd->name, chan->chid);
+ mthd->name, chan->chid.user);
cname = cname_;
}
@@ -139,7 +139,7 @@ nv50_disp_chan_uevent_ctor(struct nvkm_object *object, void *data, u32 size,
if (!(ret = nvif_unvers(ret, &data, &size, args->none))) {
notify->size = sizeof(struct nvif_notify_uevent_rep);
notify->types = 1;
- notify->index = chan->chid;
+ notify->index = chan->chid.user;
return 0;
}
@@ -153,27 +153,27 @@ nv50_disp_chan_uevent = {
.fini = nv50_disp_chan_uevent_fini,
};
-int
+static int
nv50_disp_chan_rd32(struct nvkm_object *object, u64 addr, u32 *data)
{
struct nv50_disp_chan *chan = nv50_disp_chan(object);
struct nv50_disp *disp = chan->root->disp;
struct nvkm_device *device = disp->base.engine.subdev.device;
- *data = nvkm_rd32(device, 0x640000 + (chan->chid * 0x1000) + addr);
+ *data = nvkm_rd32(device, 0x640000 + (chan->chid.user * 0x1000) + addr);
return 0;
}
-int
+static int
nv50_disp_chan_wr32(struct nvkm_object *object, u64 addr, u32 data)
{
struct nv50_disp_chan *chan = nv50_disp_chan(object);
struct nv50_disp *disp = chan->root->disp;
struct nvkm_device *device = disp->base.engine.subdev.device;
- nvkm_wr32(device, 0x640000 + (chan->chid * 0x1000) + addr, data);
+ nvkm_wr32(device, 0x640000 + (chan->chid.user * 0x1000) + addr, data);
return 0;
}
-int
+static int
nv50_disp_chan_ntfy(struct nvkm_object *object, u32 type,
struct nvkm_event **pevent)
{
@@ -189,14 +189,14 @@ nv50_disp_chan_ntfy(struct nvkm_object *object, u32 type,
return -EINVAL;
}
-int
+static int
nv50_disp_chan_map(struct nvkm_object *object, u64 *addr, u32 *size)
{
struct nv50_disp_chan *chan = nv50_disp_chan(object);
struct nv50_disp *disp = chan->root->disp;
struct nvkm_device *device = disp->base.engine.subdev.device;
*addr = device->func->resource_addr(device, 0) +
- 0x640000 + (chan->chid * 0x1000);
+ 0x640000 + (chan->chid.user * 0x1000);
*size = 0x001000;
return 0;
}
@@ -243,8 +243,8 @@ nv50_disp_chan_dtor(struct nvkm_object *object)
{
struct nv50_disp_chan *chan = nv50_disp_chan(object);
struct nv50_disp *disp = chan->root->disp;
- if (chan->chid >= 0)
- disp->chan[chan->chid] = NULL;
+ if (chan->chid.user >= 0)
+ disp->chan[chan->chid.user] = NULL;
return chan->func->dtor ? chan->func->dtor(chan) : chan;
}
@@ -263,7 +263,7 @@ nv50_disp_chan = {
int
nv50_disp_chan_ctor(const struct nv50_disp_chan_func *func,
const struct nv50_disp_chan_mthd *mthd,
- struct nv50_disp_root *root, int chid, int head,
+ struct nv50_disp_root *root, int ctrl, int user, int head,
const struct nvkm_oclass *oclass,
struct nv50_disp_chan *chan)
{
@@ -273,21 +273,22 @@ nv50_disp_chan_ctor(const struct nv50_disp_chan_func *func,
chan->func = func;
chan->mthd = mthd;
chan->root = root;
- chan->chid = chid;
+ chan->chid.ctrl = ctrl;
+ chan->chid.user = user;
chan->head = head;
- if (disp->chan[chan->chid]) {
- chan->chid = -1;
+ if (disp->chan[chan->chid.user]) {
+ chan->chid.user = -1;
return -EBUSY;
}
- disp->chan[chan->chid] = chan;
+ disp->chan[chan->chid.user] = chan;
return 0;
}
int
nv50_disp_chan_new_(const struct nv50_disp_chan_func *func,
const struct nv50_disp_chan_mthd *mthd,
- struct nv50_disp_root *root, int chid, int head,
+ struct nv50_disp_root *root, int ctrl, int user, int head,
const struct nvkm_oclass *oclass,
struct nvkm_object **pobject)
{
@@ -297,5 +298,6 @@ nv50_disp_chan_new_(const struct nv50_disp_chan_func *func,
return -ENOMEM;
*pobject = &chan->object;
- return nv50_disp_chan_ctor(func, mthd, root, chid, head, oclass, chan);
+ return nv50_disp_chan_ctor(func, mthd, root, ctrl, user,
+ head, oclass, chan);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/channv50.h b/drivers/gpu/drm/nouveau/nvkm/engine/disp/channv50.h
index f5f683d9fd20..737b38f6fbd2 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/channv50.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/channv50.h
@@ -7,7 +7,11 @@ struct nv50_disp_chan {
const struct nv50_disp_chan_func *func;
const struct nv50_disp_chan_mthd *mthd;
struct nv50_disp_root *root;
- int chid;
+
+ struct {
+ int ctrl;
+ int user;
+ } chid;
int head;
struct nvkm_object object;
@@ -25,11 +29,11 @@ struct nv50_disp_chan_func {
int nv50_disp_chan_ctor(const struct nv50_disp_chan_func *,
const struct nv50_disp_chan_mthd *,
- struct nv50_disp_root *, int chid, int head,
+ struct nv50_disp_root *, int ctrl, int user, int head,
const struct nvkm_oclass *, struct nv50_disp_chan *);
int nv50_disp_chan_new_(const struct nv50_disp_chan_func *,
const struct nv50_disp_chan_mthd *,
- struct nv50_disp_root *, int chid, int head,
+ struct nv50_disp_root *, int ctrl, int user, int head,
const struct nvkm_oclass *, struct nvkm_object **);
extern const struct nv50_disp_chan_func nv50_disp_pioc_func;
@@ -90,13 +94,16 @@ extern const struct nv50_disp_chan_mthd gk104_disp_ovly_chan_mthd;
struct nv50_disp_pioc_oclass {
int (*ctor)(const struct nv50_disp_chan_func *,
const struct nv50_disp_chan_mthd *,
- struct nv50_disp_root *, int chid,
+ struct nv50_disp_root *, int ctrl, int user,
const struct nvkm_oclass *, void *data, u32 size,
struct nvkm_object **);
struct nvkm_sclass base;
const struct nv50_disp_chan_func *func;
const struct nv50_disp_chan_mthd *mthd;
- int chid;
+ struct {
+ int ctrl;
+ int user;
+ } chid;
};
extern const struct nv50_disp_pioc_oclass nv50_disp_oimm_oclass;
@@ -114,15 +121,17 @@ extern const struct nv50_disp_pioc_oclass gf119_disp_curs_oclass;
extern const struct nv50_disp_pioc_oclass gk104_disp_oimm_oclass;
extern const struct nv50_disp_pioc_oclass gk104_disp_curs_oclass;
+extern const struct nv50_disp_pioc_oclass gp102_disp_oimm_oclass;
+extern const struct nv50_disp_pioc_oclass gp102_disp_curs_oclass;
int nv50_disp_curs_new(const struct nv50_disp_chan_func *,
const struct nv50_disp_chan_mthd *,
- struct nv50_disp_root *, int chid,
+ struct nv50_disp_root *, int ctrl, int user,
const struct nvkm_oclass *, void *data, u32 size,
struct nvkm_object **);
int nv50_disp_oimm_new(const struct nv50_disp_chan_func *,
const struct nv50_disp_chan_mthd *,
- struct nv50_disp_root *, int chid,
+ struct nv50_disp_root *, int ctrl, int user,
const struct nvkm_oclass *, void *data, u32 size,
struct nvkm_object **);
#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/coreg94.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/coreg94.c
index 019379a3a01c..c65c9f3ff69f 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/coreg94.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/coreg94.c
@@ -26,7 +26,7 @@
#include <nvif/class.h>
-const struct nv50_disp_mthd_list
+static const struct nv50_disp_mthd_list
g94_disp_core_mthd_sor = {
.mthd = 0x0040,
.addr = 0x000008,
@@ -43,8 +43,8 @@ g94_disp_core_chan_mthd = {
.prev = 0x000004,
.data = {
{ "Global", 1, &nv50_disp_core_mthd_base },
- { "DAC", 3, &g84_disp_core_mthd_dac },
- { "SOR", 4, &g94_disp_core_mthd_sor },
+ { "DAC", 3, &g84_disp_core_mthd_dac },
+ { "SOR", 4, &g94_disp_core_mthd_sor },
{ "PIOR", 3, &nv50_disp_core_mthd_pior },
{ "HEAD", 2, &g84_disp_core_mthd_head },
{}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/coregp104.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/coregp102.c
index 6922f4007b61..b0df4b752b8c 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/coregp104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/coregp102.c
@@ -29,7 +29,7 @@
#include <nvif/class.h>
static int
-gp104_disp_core_init(struct nv50_disp_dmac *chan)
+gp102_disp_core_init(struct nv50_disp_dmac *chan)
{
struct nv50_disp *disp = chan->base.root->disp;
struct nvkm_subdev *subdev = &disp->base.engine.subdev;
@@ -59,20 +59,20 @@ gp104_disp_core_init(struct nv50_disp_dmac *chan)
return 0;
}
-const struct nv50_disp_dmac_func
-gp104_disp_core_func = {
- .init = gp104_disp_core_init,
+static const struct nv50_disp_dmac_func
+gp102_disp_core_func = {
+ .init = gp102_disp_core_init,
.fini = gf119_disp_core_fini,
.bind = gf119_disp_dmac_bind,
};
const struct nv50_disp_dmac_oclass
-gp104_disp_core_oclass = {
- .base.oclass = GP104_DISP_CORE_CHANNEL_DMA,
+gp102_disp_core_oclass = {
+ .base.oclass = GP102_DISP_CORE_CHANNEL_DMA,
.base.minver = 0,
.base.maxver = 0,
.ctor = nv50_disp_core_new,
- .func = &gp104_disp_core_func,
+ .func = &gp102_disp_core_func,
.mthd = &gk104_disp_core_chan_mthd,
.chid = 0,
};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/cursg84.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/cursg84.c
index dd99fc7060b1..fa781b5a7e07 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/cursg84.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/cursg84.c
@@ -33,5 +33,5 @@ g84_disp_curs_oclass = {
.base.maxver = 0,
.ctor = nv50_disp_curs_new,
.func = &nv50_disp_pioc_func,
- .chid = 7,
+ .chid = { 7, 7 },
};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/cursgf119.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/cursgf119.c
index 2a1574e06ad6..2be6fb052c65 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/cursgf119.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/cursgf119.c
@@ -33,5 +33,5 @@ gf119_disp_curs_oclass = {
.base.maxver = 0,
.ctor = nv50_disp_curs_new,
.func = &gf119_disp_pioc_func,
- .chid = 13,
+ .chid = { 13, 13 },
};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/cursgk104.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/cursgk104.c
index 28e8f06c9472..2a99db4bf8f8 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/cursgk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/cursgk104.c
@@ -33,5 +33,5 @@ gk104_disp_curs_oclass = {
.base.maxver = 0,
.ctor = nv50_disp_curs_new,
.func = &gf119_disp_pioc_func,
- .chid = 13,
+ .chid = { 13, 13 },
};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/cursgp102.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/cursgp102.c
new file mode 100644
index 000000000000..e958210d8105
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/cursgp102.c
@@ -0,0 +1,37 @@
+/*
+ * Copyright 2016 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs <bskeggs@redhat.com>
+ */
+#include "channv50.h"
+#include "rootnv50.h"
+
+#include <nvif/class.h>
+
+const struct nv50_disp_pioc_oclass
+gp102_disp_curs_oclass = {
+ .base.oclass = GK104_DISP_CURSOR,
+ .base.minver = 0,
+ .base.maxver = 0,
+ .ctor = nv50_disp_curs_new,
+ .func = &gf119_disp_pioc_func,
+ .chid = { 13, 17 },
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/cursgt215.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/cursgt215.c
index d8a4b9ca139c..00a7f3564450 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/cursgt215.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/cursgt215.c
@@ -33,5 +33,5 @@ gt215_disp_curs_oclass = {
.base.maxver = 0,
.ctor = nv50_disp_curs_new,
.func = &nv50_disp_pioc_func,
- .chid = 7,
+ .chid = { 7, 7 },
};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/cursnv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/cursnv50.c
index 8b1320499a0f..82ff82d8c1ab 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/cursnv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/cursnv50.c
@@ -33,7 +33,7 @@
int
nv50_disp_curs_new(const struct nv50_disp_chan_func *func,
const struct nv50_disp_chan_mthd *mthd,
- struct nv50_disp_root *root, int chid,
+ struct nv50_disp_root *root, int ctrl, int user,
const struct nvkm_oclass *oclass, void *data, u32 size,
struct nvkm_object **pobject)
{
@@ -54,7 +54,7 @@ nv50_disp_curs_new(const struct nv50_disp_chan_func *func,
} else
return ret;
- return nv50_disp_chan_new_(func, mthd, root, chid + head,
+ return nv50_disp_chan_new_(func, mthd, root, ctrl + head, user + head,
head, oclass, pobject);
}
@@ -65,5 +65,5 @@ nv50_disp_curs_oclass = {
.base.maxver = 0,
.ctor = nv50_disp_curs_new,
.func = &nv50_disp_pioc_func,
- .chid = 7,
+ .chid = { 7, 7 },
};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacgf119.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacgf119.c
index a57f7cef307a..ce7cd74fbd5d 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacgf119.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacgf119.c
@@ -32,8 +32,8 @@ gf119_disp_dmac_bind(struct nv50_disp_dmac *chan,
struct nvkm_object *object, u32 handle)
{
return nvkm_ramht_insert(chan->base.root->ramht, object,
- chan->base.chid, -9, handle,
- chan->base.chid << 27 | 0x00000001);
+ chan->base.chid.user, -9, handle,
+ chan->base.chid.user << 27 | 0x00000001);
}
void
@@ -42,22 +42,23 @@ gf119_disp_dmac_fini(struct nv50_disp_dmac *chan)
struct nv50_disp *disp = chan->base.root->disp;
struct nvkm_subdev *subdev = &disp->base.engine.subdev;
struct nvkm_device *device = subdev->device;
- int chid = chan->base.chid;
+ int ctrl = chan->base.chid.ctrl;
+ int user = chan->base.chid.user;
/* deactivate channel */
- nvkm_mask(device, 0x610490 + (chid * 0x0010), 0x00001010, 0x00001000);
- nvkm_mask(device, 0x610490 + (chid * 0x0010), 0x00000003, 0x00000000);
+ nvkm_mask(device, 0x610490 + (ctrl * 0x0010), 0x00001010, 0x00001000);
+ nvkm_mask(device, 0x610490 + (ctrl * 0x0010), 0x00000003, 0x00000000);
if (nvkm_msec(device, 2000,
- if (!(nvkm_rd32(device, 0x610490 + (chid * 0x10)) & 0x001e0000))
+ if (!(nvkm_rd32(device, 0x610490 + (ctrl * 0x10)) & 0x001e0000))
break;
) < 0) {
- nvkm_error(subdev, "ch %d fini: %08x\n", chid,
- nvkm_rd32(device, 0x610490 + (chid * 0x10)));
+ nvkm_error(subdev, "ch %d fini: %08x\n", user,
+ nvkm_rd32(device, 0x610490 + (ctrl * 0x10)));
}
/* disable error reporting and completion notification */
- nvkm_mask(device, 0x610090, 0x00000001 << chid, 0x00000000);
- nvkm_mask(device, 0x6100a0, 0x00000001 << chid, 0x00000000);
+ nvkm_mask(device, 0x610090, 0x00000001 << user, 0x00000000);
+ nvkm_mask(device, 0x6100a0, 0x00000001 << user, 0x00000000);
}
static int
@@ -66,26 +67,27 @@ gf119_disp_dmac_init(struct nv50_disp_dmac *chan)
struct nv50_disp *disp = chan->base.root->disp;
struct nvkm_subdev *subdev = &disp->base.engine.subdev;
struct nvkm_device *device = subdev->device;
- int chid = chan->base.chid;
+ int ctrl = chan->base.chid.ctrl;
+ int user = chan->base.chid.user;
/* enable error reporting */
- nvkm_mask(device, 0x6100a0, 0x00000001 << chid, 0x00000001 << chid);
+ nvkm_mask(device, 0x6100a0, 0x00000001 << user, 0x00000001 << user);
/* initialise channel for dma command submission */
- nvkm_wr32(device, 0x610494 + (chid * 0x0010), chan->push);
- nvkm_wr32(device, 0x610498 + (chid * 0x0010), 0x00010000);
- nvkm_wr32(device, 0x61049c + (chid * 0x0010), 0x00000001);
- nvkm_mask(device, 0x610490 + (chid * 0x0010), 0x00000010, 0x00000010);
- nvkm_wr32(device, 0x640000 + (chid * 0x1000), 0x00000000);
- nvkm_wr32(device, 0x610490 + (chid * 0x0010), 0x00000013);
+ nvkm_wr32(device, 0x610494 + (ctrl * 0x0010), chan->push);
+ nvkm_wr32(device, 0x610498 + (ctrl * 0x0010), 0x00010000);
+ nvkm_wr32(device, 0x61049c + (ctrl * 0x0010), 0x00000001);
+ nvkm_mask(device, 0x610490 + (ctrl * 0x0010), 0x00000010, 0x00000010);
+ nvkm_wr32(device, 0x640000 + (ctrl * 0x1000), 0x00000000);
+ nvkm_wr32(device, 0x610490 + (ctrl * 0x0010), 0x00000013);
/* wait for it to go inactive */
if (nvkm_msec(device, 2000,
- if (!(nvkm_rd32(device, 0x610490 + (chid * 0x10)) & 0x80000000))
+ if (!(nvkm_rd32(device, 0x610490 + (ctrl * 0x10)) & 0x80000000))
break;
) < 0) {
- nvkm_error(subdev, "ch %d init: %08x\n", chid,
- nvkm_rd32(device, 0x610490 + (chid * 0x10)));
+ nvkm_error(subdev, "ch %d init: %08x\n", user,
+ nvkm_rd32(device, 0x610490 + (ctrl * 0x10)));
return -EBUSY;
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacgp104.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacgp102.c
index ad24c2c57696..cdead9500343 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacgp104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacgp102.c
@@ -27,31 +27,32 @@
#include <subdev/timer.h>
static int
-gp104_disp_dmac_init(struct nv50_disp_dmac *chan)
+gp102_disp_dmac_init(struct nv50_disp_dmac *chan)
{
struct nv50_disp *disp = chan->base.root->disp;
struct nvkm_subdev *subdev = &disp->base.engine.subdev;
struct nvkm_device *device = subdev->device;
- int chid = chan->base.chid;
+ int ctrl = chan->base.chid.ctrl;
+ int user = chan->base.chid.user;
/* enable error reporting */
- nvkm_mask(device, 0x6100a0, 0x00000001 << chid, 0x00000001 << chid);
+ nvkm_mask(device, 0x6100a0, 0x00000001 << user, 0x00000001 << user);
/* initialise channel for dma command submission */
- nvkm_wr32(device, 0x611494 + (chid * 0x0010), chan->push);
- nvkm_wr32(device, 0x611498 + (chid * 0x0010), 0x00010000);
- nvkm_wr32(device, 0x61149c + (chid * 0x0010), 0x00000001);
- nvkm_mask(device, 0x610490 + (chid * 0x0010), 0x00000010, 0x00000010);
- nvkm_wr32(device, 0x640000 + (chid * 0x1000), 0x00000000);
- nvkm_wr32(device, 0x610490 + (chid * 0x0010), 0x00000013);
+ nvkm_wr32(device, 0x611494 + (ctrl * 0x0010), chan->push);
+ nvkm_wr32(device, 0x611498 + (ctrl * 0x0010), 0x00010000);
+ nvkm_wr32(device, 0x61149c + (ctrl * 0x0010), 0x00000001);
+ nvkm_mask(device, 0x610490 + (ctrl * 0x0010), 0x00000010, 0x00000010);
+ nvkm_wr32(device, 0x640000 + (ctrl * 0x1000), 0x00000000);
+ nvkm_wr32(device, 0x610490 + (ctrl * 0x0010), 0x00000013);
/* wait for it to go inactive */
if (nvkm_msec(device, 2000,
- if (!(nvkm_rd32(device, 0x610490 + (chid * 0x10)) & 0x80000000))
+ if (!(nvkm_rd32(device, 0x610490 + (ctrl * 0x10)) & 0x80000000))
break;
) < 0) {
- nvkm_error(subdev, "ch %d init: %08x\n", chid,
- nvkm_rd32(device, 0x610490 + (chid * 0x10)));
+ nvkm_error(subdev, "ch %d init: %08x\n", user,
+ nvkm_rd32(device, 0x610490 + (ctrl * 0x10)));
return -EBUSY;
}
@@ -59,8 +60,8 @@ gp104_disp_dmac_init(struct nv50_disp_dmac *chan)
}
const struct nv50_disp_dmac_func
-gp104_disp_dmac_func = {
- .init = gp104_disp_dmac_init,
+gp102_disp_dmac_func = {
+ .init = gp102_disp_dmac_init,
.fini = gf119_disp_dmac_fini,
.bind = gf119_disp_dmac_bind,
};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacnv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacnv50.c
index 9c6645a357b9..0a1381a84552 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacnv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacnv50.c
@@ -149,7 +149,7 @@ nv50_disp_dmac_new_(const struct nv50_disp_dmac_func *func,
chan->func = func;
ret = nv50_disp_chan_ctor(&nv50_disp_dmac_func_, mthd, root,
- chid, head, oclass, &chan->base);
+ chid, chid, head, oclass, &chan->base);
if (ret)
return ret;
@@ -179,9 +179,9 @@ nv50_disp_dmac_bind(struct nv50_disp_dmac *chan,
struct nvkm_object *object, u32 handle)
{
return nvkm_ramht_insert(chan->base.root->ramht, object,
- chan->base.chid, -10, handle,
- chan->base.chid << 28 |
- chan->base.chid);
+ chan->base.chid.user, -10, handle,
+ chan->base.chid.user << 28 |
+ chan->base.chid.user);
}
static void
@@ -190,21 +190,22 @@ nv50_disp_dmac_fini(struct nv50_disp_dmac *chan)
struct nv50_disp *disp = chan->base.root->disp;
struct nvkm_subdev *subdev = &disp->base.engine.subdev;
struct nvkm_device *device = subdev->device;
- int chid = chan->base.chid;
+ int ctrl = chan->base.chid.ctrl;
+ int user = chan->base.chid.user;
/* deactivate channel */
- nvkm_mask(device, 0x610200 + (chid * 0x0010), 0x00001010, 0x00001000);
- nvkm_mask(device, 0x610200 + (chid * 0x0010), 0x00000003, 0x00000000);
+ nvkm_mask(device, 0x610200 + (ctrl * 0x0010), 0x00001010, 0x00001000);
+ nvkm_mask(device, 0x610200 + (ctrl * 0x0010), 0x00000003, 0x00000000);
if (nvkm_msec(device, 2000,
- if (!(nvkm_rd32(device, 0x610200 + (chid * 0x10)) & 0x001e0000))
+ if (!(nvkm_rd32(device, 0x610200 + (ctrl * 0x10)) & 0x001e0000))
break;
) < 0) {
- nvkm_error(subdev, "ch %d fini timeout, %08x\n", chid,
- nvkm_rd32(device, 0x610200 + (chid * 0x10)));
+ nvkm_error(subdev, "ch %d fini timeout, %08x\n", user,
+ nvkm_rd32(device, 0x610200 + (ctrl * 0x10)));
}
/* disable error reporting and completion notifications */
- nvkm_mask(device, 0x610028, 0x00010001 << chid, 0x00000000 << chid);
+ nvkm_mask(device, 0x610028, 0x00010001 << user, 0x00000000 << user);
}
static int
@@ -213,26 +214,27 @@ nv50_disp_dmac_init(struct nv50_disp_dmac *chan)
struct nv50_disp *disp = chan->base.root->disp;
struct nvkm_subdev *subdev = &disp->base.engine.subdev;
struct nvkm_device *device = subdev->device;
- int chid = chan->base.chid;
+ int ctrl = chan->base.chid.ctrl;
+ int user = chan->base.chid.user;
/* enable error reporting */
- nvkm_mask(device, 0x610028, 0x00010000 << chid, 0x00010000 << chid);
+ nvkm_mask(device, 0x610028, 0x00010000 << user, 0x00010000 << user);
/* initialise channel for dma command submission */
- nvkm_wr32(device, 0x610204 + (chid * 0x0010), chan->push);
- nvkm_wr32(device, 0x610208 + (chid * 0x0010), 0x00010000);
- nvkm_wr32(device, 0x61020c + (chid * 0x0010), chid);
- nvkm_mask(device, 0x610200 + (chid * 0x0010), 0x00000010, 0x00000010);
- nvkm_wr32(device, 0x640000 + (chid * 0x1000), 0x00000000);
- nvkm_wr32(device, 0x610200 + (chid * 0x0010), 0x00000013);
+ nvkm_wr32(device, 0x610204 + (ctrl * 0x0010), chan->push);
+ nvkm_wr32(device, 0x610208 + (ctrl * 0x0010), 0x00010000);
+ nvkm_wr32(device, 0x61020c + (ctrl * 0x0010), ctrl);
+ nvkm_mask(device, 0x610200 + (ctrl * 0x0010), 0x00000010, 0x00000010);
+ nvkm_wr32(device, 0x640000 + (ctrl * 0x1000), 0x00000000);
+ nvkm_wr32(device, 0x610200 + (ctrl * 0x0010), 0x00000013);
/* wait for it to go inactive */
if (nvkm_msec(device, 2000,
- if (!(nvkm_rd32(device, 0x610200 + (chid * 0x10)) & 0x80000000))
+ if (!(nvkm_rd32(device, 0x610200 + (ctrl * 0x10)) & 0x80000000))
break;
) < 0) {
- nvkm_error(subdev, "ch %d init timeout, %08x\n", chid,
- nvkm_rd32(device, 0x610200 + (chid * 0x10)));
+ nvkm_error(subdev, "ch %d init timeout, %08x\n", user,
+ nvkm_rd32(device, 0x610200 + (ctrl * 0x10)));
return -EBUSY;
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacnv50.h b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacnv50.h
index 43ac05857853..ea4a0d062e31 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacnv50.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacnv50.h
@@ -30,7 +30,7 @@ int gf119_disp_dmac_bind(struct nv50_disp_dmac *, struct nvkm_object *, u32);
extern const struct nv50_disp_dmac_func gf119_disp_core_func;
void gf119_disp_core_fini(struct nv50_disp_dmac *);
-extern const struct nv50_disp_dmac_func gp104_disp_dmac_func;
+extern const struct nv50_disp_dmac_func gp102_disp_dmac_func;
struct nv50_disp_dmac_oclass {
int (*ctor)(const struct nv50_disp_dmac_func *,
@@ -95,7 +95,7 @@ extern const struct nv50_disp_dmac_oclass gm200_disp_core_oclass;
extern const struct nv50_disp_dmac_oclass gp100_disp_core_oclass;
-extern const struct nv50_disp_dmac_oclass gp104_disp_core_oclass;
-extern const struct nv50_disp_dmac_oclass gp104_disp_base_oclass;
-extern const struct nv50_disp_dmac_oclass gp104_disp_ovly_oclass;
+extern const struct nv50_disp_dmac_oclass gp102_disp_core_oclass;
+extern const struct nv50_disp_dmac_oclass gp102_disp_base_oclass;
+extern const struct nv50_disp_dmac_oclass gp102_disp_ovly_oclass;
#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dport.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dport.c
index 9688970eca47..4a93ceb850ac 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dport.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dport.c
@@ -319,9 +319,8 @@ static const struct dp_rates {
};
void
-nvkm_dp_train(struct work_struct *w)
+nvkm_dp_train(struct nvkm_output_dp *outp)
{
- struct nvkm_output_dp *outp = container_of(w, typeof(*outp), lt.work);
struct nv50_disp *disp = nv50_disp(outp->base.disp);
const struct dp_rates *cfg = nvkm_dp_rates;
struct dp_state _dp = {
@@ -353,9 +352,6 @@ nvkm_dp_train(struct work_struct *w)
}
cfg--;
- /* disable link interrupt handling during link training */
- nvkm_notify_put(&outp->irq);
-
/* ensure sink is not in a low-power state */
if (!nvkm_rdaux(outp->aux, DPCD_SC00, &pwr, 1)) {
if ((pwr & DPCD_SC00_SET_POWER) != DPCD_SC00_SET_POWER_D0) {
@@ -400,9 +396,6 @@ nvkm_dp_train(struct work_struct *w)
dp_link_train_fini(dp);
- /* signal completion and enable link interrupt handling */
OUTP_DBG(&outp->base, "training complete");
atomic_set(&outp->lt.done, 1);
- wake_up(&outp->lt.wait);
- nvkm_notify_get(&outp->irq);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dport.h b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dport.h
index 6e10c5e0ef11..baf1dd9ff975 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dport.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dport.h
@@ -1,6 +1,6 @@
#ifndef __NVKM_DISP_DPORT_H__
#define __NVKM_DISP_DPORT_H__
-#include <core/os.h>
+struct nvkm_output_dp;
/* DPCD Receiver Capabilities */
#define DPCD_RC00_DPCD_REV 0x00000
@@ -77,5 +77,5 @@
#define DPCD_SC00_SET_POWER_D0 0x01
#define DPCD_SC00_SET_POWER_D3 0x03
-void nvkm_dp_train(struct work_struct *);
+void nvkm_dp_train(struct nvkm_output_dp *);
#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gf119.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gf119.c
index 29e84b241cca..7b346ccc38b7 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gf119.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gf119.c
@@ -203,17 +203,20 @@ gf119_disp_intr_unk2_0(struct nv50_disp *disp, int head)
/* see note in nv50_disp_intr_unk20_0() */
if (outp && outp->info.type == DCB_OUTPUT_DP) {
struct nvkm_output_dp *outpdp = nvkm_output_dp(outp);
- struct nvbios_init init = {
- .subdev = subdev,
- .bios = subdev->device->bios,
- .outp = &outp->info,
- .crtc = head,
- .offset = outpdp->info.script[4],
- .execute = 1,
- };
+ if (!outpdp->lt.mst) {
+ struct nvbios_init init = {
+ .subdev = subdev,
+ .bios = subdev->device->bios,
+ .outp = &outp->info,
+ .crtc = head,
+ .offset = outpdp->info.script[4],
+ .execute = 1,
+ };
- nvbios_exec(&init);
- atomic_set(&outpdp->lt.done, 0);
+ nvkm_notify_put(&outpdp->irq);
+ nvbios_exec(&init);
+ atomic_set(&outpdp->lt.done, 0);
+ }
}
}
@@ -314,7 +317,7 @@ gf119_disp_intr_unk2_2(struct nv50_disp *disp, int head)
break;
}
- if (nvkm_output_dp_train(outp, pclk, true))
+ if (nvkm_output_dp_train(outp, pclk))
OUTP_ERR(outp, "link not trained before attach");
} else {
if (disp->func->sor.magic)
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gp104.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gp102.c
index 3bf3380336e4..f5d613f82709 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gp104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gp102.c
@@ -25,7 +25,7 @@
#include "rootnv50.h"
static void
-gp104_disp_intr_error(struct nv50_disp *disp, int chid)
+gp102_disp_intr_error(struct nv50_disp *disp, int chid)
{
struct nvkm_subdev *subdev = &disp->base.engine.subdev;
struct nvkm_device *device = subdev->device;
@@ -51,12 +51,12 @@ gp104_disp_intr_error(struct nv50_disp *disp, int chid)
}
static const struct nv50_disp_func
-gp104_disp = {
+gp102_disp = {
.intr = gf119_disp_intr,
- .intr_error = gp104_disp_intr_error,
+ .intr_error = gp102_disp_intr_error,
.uevent = &gf119_disp_chan_uevent,
.super = gf119_disp_intr_supervisor,
- .root = &gp104_disp_root_oclass,
+ .root = &gp102_disp_root_oclass,
.head.vblank_init = gf119_disp_vblank_init,
.head.vblank_fini = gf119_disp_vblank_fini,
.head.scanoutpos = gf119_disp_root_scanoutpos,
@@ -75,7 +75,7 @@ gp104_disp = {
};
int
-gp104_disp_new(struct nvkm_device *device, int index, struct nvkm_disp **pdisp)
+gp102_disp_new(struct nvkm_device *device, int index, struct nvkm_disp **pdisp)
{
- return gf119_disp_new_(&gp104_disp, device, index, pdisp);
+ return gf119_disp_new_(&gp102_disp, device, index, pdisp);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c
index fbb8c7dc18fd..567466f93cd5 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c
@@ -590,6 +590,7 @@ nv50_disp_intr_unk20_0(struct nv50_disp *disp, int head)
.execute = 1,
};
+ nvkm_notify_put(&outpdp->irq);
nvbios_exec(&init);
atomic_set(&outpdp->lt.done, 0);
}
@@ -779,7 +780,7 @@ nv50_disp_intr_unk20_2(struct nv50_disp *disp, int head)
break;
}
- if (nvkm_output_dp_train(outp, datarate / soff, true))
+ if (nvkm_output_dp_train(outp, datarate / soff))
OUTP_ERR(outp, "link not trained before attach");
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmg84.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmg84.c
index 54a4ae8d66c6..5ad5d0f5db05 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmg84.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmg84.c
@@ -33,5 +33,5 @@ g84_disp_oimm_oclass = {
.base.maxver = 0,
.ctor = nv50_disp_oimm_new,
.func = &nv50_disp_pioc_func,
- .chid = 5,
+ .chid = { 5, 5 },
};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmgf119.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmgf119.c
index c658db54afc5..1f9fd3403f07 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmgf119.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmgf119.c
@@ -33,5 +33,5 @@ gf119_disp_oimm_oclass = {
.base.maxver = 0,
.ctor = nv50_disp_oimm_new,
.func = &gf119_disp_pioc_func,
- .chid = 9,
+ .chid = { 9, 9 },
};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmgk104.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmgk104.c
index b1fde8c125d6..0c09fe85e952 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmgk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmgk104.c
@@ -33,5 +33,5 @@ gk104_disp_oimm_oclass = {
.base.maxver = 0,
.ctor = nv50_disp_oimm_new,
.func = &gf119_disp_pioc_func,
- .chid = 9,
+ .chid = { 9, 9 },
};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmgp102.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmgp102.c
new file mode 100644
index 000000000000..abf82365c671
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmgp102.c
@@ -0,0 +1,37 @@
+/*
+ * Copyright 2016 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs <bskeggs@redhat.com>
+ */
+#include "channv50.h"
+#include "rootnv50.h"
+
+#include <nvif/class.h>
+
+const struct nv50_disp_pioc_oclass
+gp102_disp_oimm_oclass = {
+ .base.oclass = GK104_DISP_OVERLAY,
+ .base.minver = 0,
+ .base.maxver = 0,
+ .ctor = nv50_disp_oimm_new,
+ .func = &gf119_disp_pioc_func,
+ .chid = { 9, 13 },
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmgt215.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmgt215.c
index f4e7eb3d1177..1281db28aebd 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmgt215.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmgt215.c
@@ -33,5 +33,5 @@ gt215_disp_oimm_oclass = {
.base.maxver = 0,
.ctor = nv50_disp_oimm_new,
.func = &nv50_disp_pioc_func,
- .chid = 5,
+ .chid = { 5, 5 },
};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmnv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmnv50.c
index 3940b9c966ec..07540f3d32dc 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmnv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmnv50.c
@@ -33,7 +33,7 @@
int
nv50_disp_oimm_new(const struct nv50_disp_chan_func *func,
const struct nv50_disp_chan_mthd *mthd,
- struct nv50_disp_root *root, int chid,
+ struct nv50_disp_root *root, int ctrl, int user,
const struct nvkm_oclass *oclass, void *data, u32 size,
struct nvkm_object **pobject)
{
@@ -54,7 +54,7 @@ nv50_disp_oimm_new(const struct nv50_disp_chan_func *func,
} else
return ret;
- return nv50_disp_chan_new_(func, mthd, root, chid + head,
+ return nv50_disp_chan_new_(func, mthd, root, ctrl + head, user + head,
head, oclass, pobject);
}
@@ -65,5 +65,5 @@ nv50_disp_oimm_oclass = {
.base.maxver = 0,
.ctor = nv50_disp_oimm_new,
.func = &nv50_disp_pioc_func,
- .chid = 5,
+ .chid = { 5, 5 },
};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/outpdp.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/outpdp.c
index 3b7a9e7a1ea8..de36f73b14dc 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/outpdp.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/outpdp.c
@@ -31,7 +31,7 @@
#include <nvif/event.h>
int
-nvkm_output_dp_train(struct nvkm_output *base, u32 datarate, bool wait)
+nvkm_output_dp_train(struct nvkm_output *base, u32 datarate)
{
struct nvkm_output_dp *outp = nvkm_output_dp(base);
bool retrain = true;
@@ -39,6 +39,8 @@ nvkm_output_dp_train(struct nvkm_output *base, u32 datarate, bool wait)
u32 linkrate;
int ret, i;
+ mutex_lock(&outp->mutex);
+
/* check that the link is trained at a high enough rate */
ret = nvkm_rdaux(outp->aux, DPCD_LC00_LINK_BW_SET, link, 2);
if (ret) {
@@ -88,19 +90,10 @@ done:
outp->dpcd[DPCD_RC02] =
outp->base.info.dpconf.link_nr;
}
- atomic_set(&outp->lt.done, 0);
- schedule_work(&outp->lt.work);
- } else {
- nvkm_notify_get(&outp->irq);
- }
-
- if (wait) {
- if (!wait_event_timeout(outp->lt.wait,
- atomic_read(&outp->lt.done),
- msecs_to_jiffies(2000)))
- ret = -ETIMEDOUT;
+ nvkm_dp_train(outp);
}
+ mutex_unlock(&outp->mutex);
return ret;
}
@@ -118,7 +111,7 @@ nvkm_output_dp_enable(struct nvkm_output_dp *outp, bool enable)
if (!nvkm_rdaux(aux, DPCD_RC00_DPCD_REV, outp->dpcd,
sizeof(outp->dpcd))) {
- nvkm_output_dp_train(&outp->base, 0, true);
+ nvkm_output_dp_train(&outp->base, 0);
return;
}
}
@@ -165,10 +158,10 @@ nvkm_output_dp_irq(struct nvkm_notify *notify)
};
OUTP_DBG(&outp->base, "IRQ: %d", line->mask);
- nvkm_output_dp_train(&outp->base, 0, true);
+ nvkm_output_dp_train(&outp->base, 0);
nvkm_event_send(&disp->hpd, rep.mask, conn->index, &rep, sizeof(rep));
- return NVKM_NOTIFY_DROP;
+ return NVKM_NOTIFY_KEEP;
}
static void
@@ -177,7 +170,6 @@ nvkm_output_dp_fini(struct nvkm_output *base)
struct nvkm_output_dp *outp = nvkm_output_dp(base);
nvkm_notify_put(&outp->hpd);
nvkm_notify_put(&outp->irq);
- flush_work(&outp->lt.work);
nvkm_output_dp_enable(outp, false);
}
@@ -187,6 +179,7 @@ nvkm_output_dp_init(struct nvkm_output *base)
struct nvkm_output_dp *outp = nvkm_output_dp(base);
nvkm_notify_put(&outp->base.conn->hpd);
nvkm_output_dp_enable(outp, true);
+ nvkm_notify_get(&outp->irq);
nvkm_notify_get(&outp->hpd);
}
@@ -238,11 +231,6 @@ nvkm_output_dp_ctor(const struct nvkm_output_dp_func *func,
OUTP_DBG(&outp->base, "bios dp %02x %02x %02x %02x",
outp->version, hdr, cnt, len);
- /* link training */
- INIT_WORK(&outp->lt.work, nvkm_dp_train);
- init_waitqueue_head(&outp->lt.wait);
- atomic_set(&outp->lt.done, 0);
-
/* link maintenance */
ret = nvkm_notify_init(NULL, &i2c->event, nvkm_output_dp_irq, true,
&(struct nvkm_i2c_ntfy_req) {
@@ -257,6 +245,9 @@ nvkm_output_dp_ctor(const struct nvkm_output_dp_func *func,
return ret;
}
+ mutex_init(&outp->mutex);
+ atomic_set(&outp->lt.done, 0);
+
/* hotplug detect, replaces gpio-based mechanism with aux events */
ret = nvkm_notify_init(NULL, &i2c->event, nvkm_output_dp_hpd, true,
&(struct nvkm_i2c_ntfy_req) {
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/outpdp.h b/drivers/gpu/drm/nouveau/nvkm/engine/disp/outpdp.h
index 4e983f6d7032..3c83a561cd88 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/outpdp.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/outpdp.h
@@ -29,10 +29,10 @@ struct nvkm_output_dp {
bool present;
u8 dpcd[16];
+ struct mutex mutex;
struct {
- struct work_struct work;
- wait_queue_head_t wait;
atomic_t done;
+ bool mst;
} lt;
};
@@ -41,9 +41,11 @@ struct nvkm_output_dp_func {
int (*lnk_pwr)(struct nvkm_output_dp *, int nr);
int (*lnk_ctl)(struct nvkm_output_dp *, int nr, int bw, bool ef);
int (*drv_ctl)(struct nvkm_output_dp *, int ln, int vs, int pe, int pc);
+ void (*vcpi)(struct nvkm_output_dp *, int head, u8 start_slot,
+ u8 num_slots, u16 pbn, u16 aligned_pbn);
};
-int nvkm_output_dp_train(struct nvkm_output *, u32 rate, bool wait);
+int nvkm_output_dp_train(struct nvkm_output *, u32 rate);
int nvkm_output_dp_ctor(const struct nvkm_output_dp_func *, struct nvkm_disp *,
int index, struct dcb_output *, struct nvkm_i2c_aux *,
@@ -63,6 +65,7 @@ int gf119_sor_dp_new(struct nvkm_disp *, int, struct dcb_output *,
struct nvkm_output **);
int gf119_sor_dp_lnk_ctl(struct nvkm_output_dp *, int, int, bool);
int gf119_sor_dp_drv_ctl(struct nvkm_output_dp *, int, int, int, int);
+void gf119_sor_dp_vcpi(struct nvkm_output_dp *, int, u8, u8, u16, u16);
int gm107_sor_dp_new(struct nvkm_disp *, int, struct dcb_output *,
struct nvkm_output **);
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/ovlygp104.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/ovlygp102.c
index 97e2dd2d908e..589bd2f12b41 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/ovlygp104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/ovlygp102.c
@@ -27,12 +27,12 @@
#include <nvif/class.h>
const struct nv50_disp_dmac_oclass
-gp104_disp_ovly_oclass = {
+gp102_disp_ovly_oclass = {
.base.oclass = GK104_DISP_OVERLAY_CONTROL_DMA,
.base.minver = 0,
.base.maxver = 0,
.ctor = nv50_disp_ovly_new,
- .func = &gp104_disp_dmac_func,
+ .func = &gp102_disp_dmac_func,
.mthd = &gk104_disp_ovly_chan_mthd,
.chid = 5,
};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/piocgf119.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/piocgf119.c
index a625a9876e34..0abaa6431943 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/piocgf119.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/piocgf119.c
@@ -32,20 +32,21 @@ gf119_disp_pioc_fini(struct nv50_disp_chan *chan)
struct nv50_disp *disp = chan->root->disp;
struct nvkm_subdev *subdev = &disp->base.engine.subdev;
struct nvkm_device *device = subdev->device;
- int chid = chan->chid;
+ int ctrl = chan->chid.ctrl;
+ int user = chan->chid.user;
- nvkm_mask(device, 0x610490 + (chid * 0x10), 0x00000001, 0x00000000);
+ nvkm_mask(device, 0x610490 + (ctrl * 0x10), 0x00000001, 0x00000000);
if (nvkm_msec(device, 2000,
- if (!(nvkm_rd32(device, 0x610490 + (chid * 0x10)) & 0x00030000))
+ if (!(nvkm_rd32(device, 0x610490 + (ctrl * 0x10)) & 0x00030000))
break;
) < 0) {
- nvkm_error(subdev, "ch %d fini: %08x\n", chid,
- nvkm_rd32(device, 0x610490 + (chid * 0x10)));
+ nvkm_error(subdev, "ch %d fini: %08x\n", user,
+ nvkm_rd32(device, 0x610490 + (ctrl * 0x10)));
}
/* disable error reporting and completion notification */
- nvkm_mask(device, 0x610090, 0x00000001 << chid, 0x00000000);
- nvkm_mask(device, 0x6100a0, 0x00000001 << chid, 0x00000000);
+ nvkm_mask(device, 0x610090, 0x00000001 << user, 0x00000000);
+ nvkm_mask(device, 0x6100a0, 0x00000001 << user, 0x00000000);
}
static int
@@ -54,20 +55,21 @@ gf119_disp_pioc_init(struct nv50_disp_chan *chan)
struct nv50_disp *disp = chan->root->disp;
struct nvkm_subdev *subdev = &disp->base.engine.subdev;
struct nvkm_device *device = subdev->device;
- int chid = chan->chid;
+ int ctrl = chan->chid.ctrl;
+ int user = chan->chid.user;
/* enable error reporting */
- nvkm_mask(device, 0x6100a0, 0x00000001 << chid, 0x00000001 << chid);
+ nvkm_mask(device, 0x6100a0, 0x00000001 << user, 0x00000001 << user);
/* activate channel */
- nvkm_wr32(device, 0x610490 + (chid * 0x10), 0x00000001);
+ nvkm_wr32(device, 0x610490 + (ctrl * 0x10), 0x00000001);
if (nvkm_msec(device, 2000,
- u32 tmp = nvkm_rd32(device, 0x610490 + (chid * 0x10));
+ u32 tmp = nvkm_rd32(device, 0x610490 + (ctrl * 0x10));
if ((tmp & 0x00030000) == 0x00010000)
break;
) < 0) {
- nvkm_error(subdev, "ch %d init: %08x\n", chid,
- nvkm_rd32(device, 0x610490 + (chid * 0x10)));
+ nvkm_error(subdev, "ch %d init: %08x\n", user,
+ nvkm_rd32(device, 0x610490 + (ctrl * 0x10)));
return -EBUSY;
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/piocnv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/piocnv50.c
index 9d2618dacf20..0211e0e8a35f 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/piocnv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/piocnv50.c
@@ -32,15 +32,16 @@ nv50_disp_pioc_fini(struct nv50_disp_chan *chan)
struct nv50_disp *disp = chan->root->disp;
struct nvkm_subdev *subdev = &disp->base.engine.subdev;
struct nvkm_device *device = subdev->device;
- int chid = chan->chid;
+ int ctrl = chan->chid.ctrl;
+ int user = chan->chid.user;
- nvkm_mask(device, 0x610200 + (chid * 0x10), 0x00000001, 0x00000000);
+ nvkm_mask(device, 0x610200 + (ctrl * 0x10), 0x00000001, 0x00000000);
if (nvkm_msec(device, 2000,
- if (!(nvkm_rd32(device, 0x610200 + (chid * 0x10)) & 0x00030000))
+ if (!(nvkm_rd32(device, 0x610200 + (ctrl * 0x10)) & 0x00030000))
break;
) < 0) {
- nvkm_error(subdev, "ch %d timeout: %08x\n", chid,
- nvkm_rd32(device, 0x610200 + (chid * 0x10)));
+ nvkm_error(subdev, "ch %d timeout: %08x\n", user,
+ nvkm_rd32(device, 0x610200 + (ctrl * 0x10)));
}
}
@@ -50,26 +51,27 @@ nv50_disp_pioc_init(struct nv50_disp_chan *chan)
struct nv50_disp *disp = chan->root->disp;
struct nvkm_subdev *subdev = &disp->base.engine.subdev;
struct nvkm_device *device = subdev->device;
- int chid = chan->chid;
+ int ctrl = chan->chid.ctrl;
+ int user = chan->chid.user;
- nvkm_wr32(device, 0x610200 + (chid * 0x10), 0x00002000);
+ nvkm_wr32(device, 0x610200 + (ctrl * 0x10), 0x00002000);
if (nvkm_msec(device, 2000,
- if (!(nvkm_rd32(device, 0x610200 + (chid * 0x10)) & 0x00030000))
+ if (!(nvkm_rd32(device, 0x610200 + (ctrl * 0x10)) & 0x00030000))
break;
) < 0) {
- nvkm_error(subdev, "ch %d timeout0: %08x\n", chid,
- nvkm_rd32(device, 0x610200 + (chid * 0x10)));
+ nvkm_error(subdev, "ch %d timeout0: %08x\n", user,
+ nvkm_rd32(device, 0x610200 + (ctrl * 0x10)));
return -EBUSY;
}
- nvkm_wr32(device, 0x610200 + (chid * 0x10), 0x00000001);
+ nvkm_wr32(device, 0x610200 + (ctrl * 0x10), 0x00000001);
if (nvkm_msec(device, 2000,
- u32 tmp = nvkm_rd32(device, 0x610200 + (chid * 0x10));
+ u32 tmp = nvkm_rd32(device, 0x610200 + (ctrl * 0x10));
if ((tmp & 0x00030000) == 0x00010000)
break;
) < 0) {
- nvkm_error(subdev, "ch %d timeout1: %08x\n", chid,
- nvkm_rd32(device, 0x610200 + (chid * 0x10)));
+ nvkm_error(subdev, "ch %d timeout1: %08x\n", user,
+ nvkm_rd32(device, 0x610200 + (ctrl * 0x10)));
return -EBUSY;
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgp104.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgp102.c
index 8443e04dc626..37122ca579ad 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgp104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgp102.c
@@ -27,32 +27,32 @@
#include <nvif/class.h>
static const struct nv50_disp_root_func
-gp104_disp_root = {
+gp102_disp_root = {
.init = gf119_disp_root_init,
.fini = gf119_disp_root_fini,
.dmac = {
- &gp104_disp_core_oclass,
- &gp104_disp_base_oclass,
- &gp104_disp_ovly_oclass,
+ &gp102_disp_core_oclass,
+ &gp102_disp_base_oclass,
+ &gp102_disp_ovly_oclass,
},
.pioc = {
- &gk104_disp_oimm_oclass,
- &gk104_disp_curs_oclass,
+ &gp102_disp_oimm_oclass,
+ &gp102_disp_curs_oclass,
},
};
static int
-gp104_disp_root_new(struct nvkm_disp *disp, const struct nvkm_oclass *oclass,
+gp102_disp_root_new(struct nvkm_disp *disp, const struct nvkm_oclass *oclass,
void *data, u32 size, struct nvkm_object **pobject)
{
- return nv50_disp_root_new_(&gp104_disp_root, disp, oclass,
+ return nv50_disp_root_new_(&gp102_disp_root, disp, oclass,
data, size, pobject);
}
const struct nvkm_disp_oclass
-gp104_disp_root_oclass = {
- .base.oclass = GP104_DISP,
+gp102_disp_root_oclass = {
+ .base.oclass = GP102_DISP,
.base.minver = -1,
.base.maxver = -1,
- .ctor = gp104_disp_root_new,
+ .ctor = gp102_disp_root_new,
};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootnv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootnv50.c
index 2f9cecd81d04..e70dc6a9ff7d 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootnv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootnv50.c
@@ -66,7 +66,7 @@ nv50_disp_root_scanoutpos(NV50_DISP_MTHD_V0)
return 0;
}
-int
+static int
nv50_disp_root_mthd_(struct nvkm_object *object, u32 mthd, void *data, u32 size)
{
union {
@@ -173,13 +173,56 @@ nv50_disp_root_mthd_(struct nvkm_object *object, u32 mthd, void *data, u32 size)
return 0;
} else
if (args->v0.state != 0) {
- nvkm_output_dp_train(&outpdp->base, 0, true);
+ nvkm_output_dp_train(&outpdp->base, 0);
return 0;
}
} else
return ret;
}
break;
+ case NV50_DISP_MTHD_V1_SOR_DP_MST_LINK: {
+ struct nvkm_output_dp *outpdp = nvkm_output_dp(outp);
+ union {
+ struct nv50_disp_sor_dp_mst_link_v0 v0;
+ } *args = data;
+ int ret = -ENOSYS;
+ nvif_ioctl(object, "disp sor dp mst link size %d\n", size);
+ if (!(ret = nvif_unpack(ret, &data, &size, args->v0, 0, 0, false))) {
+ nvif_ioctl(object, "disp sor dp mst link vers %d state %d\n",
+ args->v0.version, args->v0.state);
+ if (outpdp->lt.mst != !!args->v0.state) {
+ outpdp->lt.mst = !!args->v0.state;
+ atomic_set(&outpdp->lt.done, 0);
+ nvkm_output_dp_train(&outpdp->base, 0);
+ }
+ return 0;
+ } else
+ return ret;
+ }
+ break;
+ case NV50_DISP_MTHD_V1_SOR_DP_MST_VCPI: {
+ struct nvkm_output_dp *outpdp = nvkm_output_dp(outp);
+ union {
+ struct nv50_disp_sor_dp_mst_vcpi_v0 v0;
+ } *args = data;
+ int ret = -ENOSYS;
+ nvif_ioctl(object, "disp sor dp mst vcpi size %d\n", size);
+ if (!(ret = nvif_unpack(ret, &data, &size, args->v0, 0, 0, false))) {
+ nvif_ioctl(object, "disp sor dp mst vcpi vers %d "
+ "slot %02x/%02x pbn %04x/%04x\n",
+ args->v0.version, args->v0.start_slot,
+ args->v0.num_slots, args->v0.pbn,
+ args->v0.aligned_pbn);
+ if (!outpdp->func->vcpi)
+ return -ENODEV;
+ outpdp->func->vcpi(outpdp, head, args->v0.start_slot,
+ args->v0.num_slots, args->v0.pbn,
+ args->v0.aligned_pbn);
+ return 0;
+ } else
+ return ret;
+ }
+ break;
case NV50_DISP_MTHD_V1_PIOR_PWR:
if (!func->pior.power)
return -ENODEV;
@@ -207,8 +250,8 @@ nv50_disp_root_pioc_new_(const struct nvkm_oclass *oclass,
{
const struct nv50_disp_pioc_oclass *sclass = oclass->priv;
struct nv50_disp_root *root = nv50_disp_root(oclass->parent);
- return sclass->ctor(sclass->func, sclass->mthd, root, sclass->chid,
- oclass, data, size, pobject);
+ return sclass->ctor(sclass->func, sclass->mthd, root, sclass->chid.ctrl,
+ sclass->chid.user, oclass, data, size, pobject);
}
static int
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootnv50.h b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootnv50.h
index ad00f1724b72..b147cf5b3518 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootnv50.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootnv50.h
@@ -41,5 +41,5 @@ extern const struct nvkm_disp_oclass gk110_disp_root_oclass;
extern const struct nvkm_disp_oclass gm107_disp_root_oclass;
extern const struct nvkm_disp_oclass gm200_disp_root_oclass;
extern const struct nvkm_disp_oclass gp100_disp_root_oclass;
-extern const struct nvkm_disp_oclass gp104_disp_root_oclass;
+extern const struct nvkm_disp_oclass gp102_disp_root_oclass;
#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorg94.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorg94.c
index 1bb9d661e9b3..4510cb6e10a8 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorg94.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorg94.c
@@ -45,14 +45,6 @@ static const struct nvkm_output_func
g94_sor_output_func = {
};
-int
-g94_sor_output_new(struct nvkm_disp *disp, int index,
- struct dcb_output *dcbE, struct nvkm_output **poutp)
-{
- return nvkm_output_new_(&g94_sor_output_func, disp,
- index, dcbE, poutp);
-}
-
/*******************************************************************************
* DisplayPort
******************************************************************************/
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgf119.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgf119.c
index 49bd5da194e1..6ffdaa65aa77 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgf119.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgf119.c
@@ -56,11 +56,13 @@ gf119_sor_dp_lnk_ctl(struct nvkm_output_dp *outp, int nr, int bw, bool ef)
clksor |= bw << 18;
dpctrl |= ((1 << nr) - 1) << 16;
+ if (outp->lt.mst)
+ dpctrl |= 0x40000000;
if (ef)
dpctrl |= 0x00004000;
nvkm_mask(device, 0x612300 + soff, 0x007c0000, clksor);
- nvkm_mask(device, 0x61c10c + loff, 0x001f4000, dpctrl);
+ nvkm_mask(device, 0x61c10c + loff, 0x401f4000, dpctrl);
return 0;
}
@@ -101,12 +103,24 @@ gf119_sor_dp_drv_ctl(struct nvkm_output_dp *outp,
return 0;
}
+void
+gf119_sor_dp_vcpi(struct nvkm_output_dp *outp, int head, u8 slot,
+ u8 slot_nr, u16 pbn, u16 aligned)
+{
+ struct nvkm_device *device = outp->base.disp->engine.subdev.device;
+ const u32 hoff = head * 0x800;
+
+ nvkm_mask(device, 0x616588 + hoff, 0x00003f3f, (slot_nr << 8) | slot);
+ nvkm_mask(device, 0x61658c + hoff, 0xffffffff, (aligned << 16) | pbn);
+}
+
static const struct nvkm_output_dp_func
gf119_sor_dp_func = {
.pattern = gf119_sor_dp_pattern,
.lnk_pwr = g94_sor_dp_lnk_pwr,
.lnk_ctl = gf119_sor_dp_lnk_ctl,
.drv_ctl = gf119_sor_dp_drv_ctl,
+ .vcpi = gf119_sor_dp_vcpi,
};
int
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgm107.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgm107.c
index 37790b2617c5..4cf8ad4d18ab 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgm107.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgm107.c
@@ -43,6 +43,7 @@ gm107_sor_dp_func = {
.lnk_pwr = g94_sor_dp_lnk_pwr,
.lnk_ctl = gf119_sor_dp_lnk_ctl,
.drv_ctl = gf119_sor_dp_drv_ctl,
+ .vcpi = gf119_sor_dp_vcpi,
};
int
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgm200.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgm200.c
index c44fa7ea672a..81b788fa61be 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgm200.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgm200.c
@@ -120,6 +120,7 @@ gm200_sor_dp_func = {
.lnk_pwr = gm200_sor_dp_lnk_pwr,
.lnk_ctl = gf119_sor_dp_lnk_ctl,
.drv_ctl = gm200_sor_dp_drv_ctl,
+ .vcpi = gf119_sor_dp_vcpi,
};
int
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/chang84.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/chang84.c
index aeb3387a3fb0..15a992b3580a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/chang84.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/chang84.c
@@ -129,7 +129,7 @@ g84_fifo_chan_engine_fini(struct nvkm_fifo_chan *base,
}
-int
+static int
g84_fifo_chan_engine_init(struct nvkm_fifo_chan *base,
struct nvkm_engine *engine)
{
@@ -170,7 +170,7 @@ g84_fifo_chan_engine_ctor(struct nvkm_fifo_chan *base,
return nvkm_object_bind(object, NULL, 0, &chan->engn[engn]);
}
-int
+static int
g84_fifo_chan_object_ctor(struct nvkm_fifo_chan *base,
struct nvkm_object *object)
{
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogf100.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogf100.c
index cbc67f262322..12d964260a29 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogf100.c
@@ -60,6 +60,7 @@ gf100_fifo_gpfifo_engine_fini(struct nvkm_fifo_chan *base,
struct nvkm_gpuobj *inst = chan->base.inst;
int ret = 0;
+ mutex_lock(&subdev->mutex);
nvkm_wr32(device, 0x002634, chan->base.chid);
if (nvkm_msec(device, 2000,
if (nvkm_rd32(device, 0x002634) == chan->base.chid)
@@ -67,10 +68,12 @@ gf100_fifo_gpfifo_engine_fini(struct nvkm_fifo_chan *base,
) < 0) {
nvkm_error(subdev, "channel %d [%s] kick timeout\n",
chan->base.chid, chan->base.object.client->name);
- ret = -EBUSY;
- if (suspend)
- return ret;
+ ret = -ETIMEDOUT;
}
+ mutex_unlock(&subdev->mutex);
+
+ if (ret && suspend)
+ return ret;
if (offset) {
nvkm_kmap(inst);
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogk104.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogk104.c
index ed4351032ed6..a2df4f3e7763 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogk104.c
@@ -40,7 +40,9 @@ gk104_fifo_gpfifo_kick(struct gk104_fifo_chan *chan)
struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
struct nvkm_device *device = subdev->device;
struct nvkm_client *client = chan->base.object.client;
+ int ret = 0;
+ mutex_lock(&subdev->mutex);
nvkm_wr32(device, 0x002634, chan->base.chid);
if (nvkm_msec(device, 2000,
if (!(nvkm_rd32(device, 0x002634) & 0x00100000))
@@ -48,10 +50,10 @@ gk104_fifo_gpfifo_kick(struct gk104_fifo_chan *chan)
) < 0) {
nvkm_error(subdev, "channel %d [%s] kick timeout\n",
chan->base.chid, client->name);
- return -EBUSY;
+ ret = -ETIMEDOUT;
}
-
- return 0;
+ mutex_unlock(&subdev->mutex);
+ return ret;
}
static u32
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf117.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf117.c
index c925ade5880e..74a64e3fd59a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf117.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf117.c
@@ -218,7 +218,7 @@ gf117_grctx_generate_attrib(struct gf100_grctx *info)
}
}
-void
+static void
gf117_grctx_generate_main(struct gf100_gr *gr, struct gf100_grctx *info)
{
struct nvkm_device *device = gr->base.engine.subdev.device;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm107.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm107.c
index 6d3c5011e18c..4c4b5ab6e46d 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm107.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm107.c
@@ -933,7 +933,7 @@ gm107_grctx_generate_attrib(struct gf100_grctx *info)
}
}
-void
+static void
gm107_grctx_generate_tpcid(struct gf100_gr *gr)
{
struct nvkm_device *device = gr->base.engine.subdev.device;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxnv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxnv50.c
index 1e13278cf306..c8bb9191f9a2 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxnv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxnv50.c
@@ -106,6 +106,7 @@
#define CP_SEEK_2 0x00c800ff
#include "ctxnv40.h"
+#include "nv50.h"
#include <subdev/fb.h>
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgf100.fuc3.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgf100.fuc3.h
index 8cb240b65ec2..12a703fe355d 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgf100.fuc3.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgf100.fuc3.h
@@ -1,4 +1,4 @@
-uint32_t gf100_grgpc_data[] = {
+static uint32_t gf100_grgpc_data[] = {
/* 0x0000: gpc_mmio_list_head */
0x00000064,
/* 0x0004: gpc_mmio_list_tail */
@@ -36,7 +36,7 @@ uint32_t gf100_grgpc_data[] = {
0x00000000,
};
-uint32_t gf100_grgpc_code[] = {
+static uint32_t gf100_grgpc_code[] = {
0x03a10ef5,
/* 0x0004: queue_put */
0x9800d898,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgf117.fuc3.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgf117.fuc3.h
index 550d6ba0933b..ffbfc51200f1 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgf117.fuc3.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgf117.fuc3.h
@@ -1,4 +1,4 @@
-uint32_t gf117_grgpc_data[] = {
+static uint32_t gf117_grgpc_data[] = {
/* 0x0000: gpc_mmio_list_head */
0x0000006c,
/* 0x0004: gpc_mmio_list_tail */
@@ -40,7 +40,7 @@ uint32_t gf117_grgpc_data[] = {
0x00000000,
};
-uint32_t gf117_grgpc_code[] = {
+static uint32_t gf117_grgpc_code[] = {
0x03a10ef5,
/* 0x0004: queue_put */
0x9800d898,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgk104.fuc3.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgk104.fuc3.h
index 271b59d365e5..357f662de571 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgk104.fuc3.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgk104.fuc3.h
@@ -1,4 +1,4 @@
-uint32_t gk104_grgpc_data[] = {
+static uint32_t gk104_grgpc_data[] = {
/* 0x0000: gpc_mmio_list_head */
0x0000006c,
/* 0x0004: gpc_mmio_list_tail */
@@ -40,7 +40,7 @@ uint32_t gk104_grgpc_data[] = {
0x00000000,
};
-uint32_t gk104_grgpc_code[] = {
+static uint32_t gk104_grgpc_code[] = {
0x03a10ef5,
/* 0x0004: queue_put */
0x9800d898,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgk110.fuc3.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgk110.fuc3.h
index 73b4a32c5d29..4ffc8212a85c 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgk110.fuc3.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgk110.fuc3.h
@@ -1,4 +1,4 @@
-uint32_t gk110_grgpc_data[] = {
+static uint32_t gk110_grgpc_data[] = {
/* 0x0000: gpc_mmio_list_head */
0x0000006c,
/* 0x0004: gpc_mmio_list_tail */
@@ -40,7 +40,7 @@ uint32_t gk110_grgpc_data[] = {
0x00000000,
};
-uint32_t gk110_grgpc_code[] = {
+static uint32_t gk110_grgpc_code[] = {
0x03a10ef5,
/* 0x0004: queue_put */
0x9800d898,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgk208.fuc5.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgk208.fuc5.h
index 018169818317..09196206c9bc 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgk208.fuc5.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgk208.fuc5.h
@@ -1,4 +1,4 @@
-uint32_t gk208_grgpc_data[] = {
+static uint32_t gk208_grgpc_data[] = {
/* 0x0000: gpc_mmio_list_head */
0x0000006c,
/* 0x0004: gpc_mmio_list_tail */
@@ -40,7 +40,7 @@ uint32_t gk208_grgpc_data[] = {
0x00000000,
};
-uint32_t gk208_grgpc_code[] = {
+static uint32_t gk208_grgpc_code[] = {
0x03140ef5,
/* 0x0004: queue_put */
0x9800d898,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgm107.fuc5.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgm107.fuc5.h
index eca007f03fa9..6d7d004363d9 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgm107.fuc5.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgm107.fuc5.h
@@ -1,4 +1,4 @@
-uint32_t gm107_grgpc_data[] = {
+static uint32_t gm107_grgpc_data[] = {
/* 0x0000: gpc_mmio_list_head */
0x0000006c,
/* 0x0004: gpc_mmio_list_tail */
@@ -40,7 +40,7 @@ uint32_t gm107_grgpc_data[] = {
0x00000000,
};
-uint32_t gm107_grgpc_code[] = {
+static uint32_t gm107_grgpc_code[] = {
0x03410ef5,
/* 0x0004: queue_put */
0x9800d898,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgf100.fuc3.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgf100.fuc3.h
index 8015b40a61d6..7538404b8b13 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgf100.fuc3.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgf100.fuc3.h
@@ -1,4 +1,4 @@
-uint32_t gf100_grhub_data[] = {
+static uint32_t gf100_grhub_data[] = {
/* 0x0000: hub_mmio_list_head */
0x00000300,
/* 0x0004: hub_mmio_list_tail */
@@ -205,7 +205,7 @@ uint32_t gf100_grhub_data[] = {
0x0417e91c,
};
-uint32_t gf100_grhub_code[] = {
+static uint32_t gf100_grhub_code[] = {
0x039b0ef5,
/* 0x0004: queue_put */
0x9800d898,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgf117.fuc3.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgf117.fuc3.h
index 2af90ec6852a..ce000a47ec6d 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgf117.fuc3.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgf117.fuc3.h
@@ -1,4 +1,4 @@
-uint32_t gf117_grhub_data[] = {
+static uint32_t gf117_grhub_data[] = {
/* 0x0000: hub_mmio_list_head */
0x00000300,
/* 0x0004: hub_mmio_list_tail */
@@ -205,7 +205,7 @@ uint32_t gf117_grhub_data[] = {
0x0417e91c,
};
-uint32_t gf117_grhub_code[] = {
+static uint32_t gf117_grhub_code[] = {
0x039b0ef5,
/* 0x0004: queue_put */
0x9800d898,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgk104.fuc3.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgk104.fuc3.h
index e8b8c1c94700..1f26cb6a233c 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgk104.fuc3.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgk104.fuc3.h
@@ -1,4 +1,4 @@
-uint32_t gk104_grhub_data[] = {
+static uint32_t gk104_grhub_data[] = {
/* 0x0000: hub_mmio_list_head */
0x00000300,
/* 0x0004: hub_mmio_list_tail */
@@ -205,7 +205,7 @@ uint32_t gk104_grhub_data[] = {
0x0417e91c,
};
-uint32_t gk104_grhub_code[] = {
+static uint32_t gk104_grhub_code[] = {
0x039b0ef5,
/* 0x0004: queue_put */
0x9800d898,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgk110.fuc3.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgk110.fuc3.h
index f4ed2fb6f714..70436d93efe3 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgk110.fuc3.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgk110.fuc3.h
@@ -1,4 +1,4 @@
-uint32_t gk110_grhub_data[] = {
+static uint32_t gk110_grhub_data[] = {
/* 0x0000: hub_mmio_list_head */
0x00000300,
/* 0x0004: hub_mmio_list_tail */
@@ -205,7 +205,7 @@ uint32_t gk110_grhub_data[] = {
0x0417e91c,
};
-uint32_t gk110_grhub_code[] = {
+static uint32_t gk110_grhub_code[] = {
0x039b0ef5,
/* 0x0004: queue_put */
0x9800d898,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgk208.fuc5.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgk208.fuc5.h
index ed488973c117..e0933a07426a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgk208.fuc5.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgk208.fuc5.h
@@ -1,4 +1,4 @@
-uint32_t gk208_grhub_data[] = {
+static uint32_t gk208_grhub_data[] = {
/* 0x0000: hub_mmio_list_head */
0x00000300,
/* 0x0004: hub_mmio_list_tail */
@@ -205,7 +205,7 @@ uint32_t gk208_grhub_data[] = {
0x0417e91c,
};
-uint32_t gk208_grhub_code[] = {
+static uint32_t gk208_grhub_code[] = {
0x030e0ef5,
/* 0x0004: queue_put */
0x9800d898,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgm107.fuc5.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgm107.fuc5.h
index 5c9051839557..9b432823bcbe 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgm107.fuc5.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgm107.fuc5.h
@@ -1,4 +1,4 @@
-uint32_t gm107_grhub_data[] = {
+static uint32_t gm107_grhub_data[] = {
/* 0x0000: hub_mmio_list_head */
0x00000300,
/* 0x0004: hub_mmio_list_tail */
@@ -205,7 +205,7 @@ uint32_t gm107_grhub_data[] = {
0x0417e91c,
};
-uint32_t gm107_grhub_code[] = {
+static uint32_t gm107_grhub_code[] = {
0x030e0ef5,
/* 0x0004: queue_put */
0x9800d898,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
index 157919c788e6..60a1b5c8214b 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
@@ -1384,7 +1384,7 @@ gf100_gr_intr(struct nvkm_gr *base)
nvkm_fifo_chan_put(device->fifo, flags, &chan);
}
-void
+static void
gf100_gr_init_fw(struct gf100_gr *gr, u32 fuc_base,
struct gf100_gr_fuc *code, struct gf100_gr_fuc *data)
{
@@ -1701,7 +1701,7 @@ gf100_gr_oneinit(struct nvkm_gr *base)
return 0;
}
-int
+static int
gf100_gr_init_(struct nvkm_gr *base)
{
struct gf100_gr *gr = gf100_gr(base);
@@ -1756,6 +1756,50 @@ gf100_gr_ = {
};
int
+gf100_gr_ctor_fw_legacy(struct gf100_gr *gr, const char *fwname,
+ struct gf100_gr_fuc *fuc, int ret)
+{
+ struct nvkm_subdev *subdev = &gr->base.engine.subdev;
+ struct nvkm_device *device = subdev->device;
+ const struct firmware *fw;
+ char f[32];
+
+ /* see if this firmware has a legacy path */
+ if (!strcmp(fwname, "fecs_inst"))
+ fwname = "fuc409c";
+ else if (!strcmp(fwname, "fecs_data"))
+ fwname = "fuc409d";
+ else if (!strcmp(fwname, "gpccs_inst"))
+ fwname = "fuc41ac";
+ else if (!strcmp(fwname, "gpccs_data"))
+ fwname = "fuc41ad";
+ else {
+ /* nope, let's just return the error we got */
+ nvkm_error(subdev, "failed to load %s\n", fwname);
+ return ret;
+ }
+
+ /* yes, try to load from the legacy path */
+ nvkm_debug(subdev, "%s: falling back to legacy path\n", fwname);
+
+ snprintf(f, sizeof(f), "nouveau/nv%02x_%s", device->chipset, fwname);
+ ret = request_firmware(&fw, f, device->dev);
+ if (ret) {
+ snprintf(f, sizeof(f), "nouveau/%s", fwname);
+ ret = request_firmware(&fw, f, device->dev);
+ if (ret) {
+ nvkm_error(subdev, "failed to load %s\n", fwname);
+ return ret;
+ }
+ }
+
+ fuc->size = fw->size;
+ fuc->data = kmemdup(fw->data, fuc->size, GFP_KERNEL);
+ release_firmware(fw);
+ return (fuc->data != NULL) ? 0 : -ENOMEM;
+}
+
+int
gf100_gr_ctor_fw(struct gf100_gr *gr, const char *fwname,
struct gf100_gr_fuc *fuc)
{
@@ -1765,10 +1809,8 @@ gf100_gr_ctor_fw(struct gf100_gr *gr, const char *fwname,
int ret;
ret = nvkm_firmware_get(device, fwname, &fw);
- if (ret) {
- nvkm_error(subdev, "failed to load %s\n", fwname);
- return ret;
- }
+ if (ret)
+ return gf100_gr_ctor_fw_legacy(gr, fwname, fuc, ret);
fuc->size = fw->size;
fuc->data = kmemdup(fw->data, fuc->size, GFP_KERNEL);
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf117.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf117.c
index 70335f65c51e..0124e468086e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf117.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf117.c
@@ -102,7 +102,7 @@ gf117_gr_pack_mmio[] = {
#include "fuc/hubgf117.fuc3.h"
-struct gf100_gr_ucode
+static struct gf100_gr_ucode
gf117_gr_fecs_ucode = {
.code.data = gf117_grhub_code,
.code.size = sizeof(gf117_grhub_code),
@@ -112,7 +112,7 @@ gf117_gr_fecs_ucode = {
#include "fuc/gpcgf117.fuc3.h"
-struct gf100_gr_ucode
+static struct gf100_gr_ucode
gf117_gr_gpccs_ucode = {
.code.data = gf117_grgpc_code,
.code.size = sizeof(gf117_grgpc_code),
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm107.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm107.c
index 45f965f608a7..2c67fac576d1 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm107.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm107.c
@@ -308,7 +308,7 @@ gm107_gr_init_bios(struct gf100_gr *gr)
}
}
-int
+static int
gm107_gr_init(struct gf100_gr *gr)
{
struct nvkm_device *device = gr->base.engine.subdev.device;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv30.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv30.c
index f1e15a4d4f64..b4e3c50badc7 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv30.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv30.c
@@ -187,6 +187,7 @@ nv30_gr = {
{ -1, -1, 0x038a, &nv04_gr_object }, /* ifc (nv30) */
{ -1, -1, 0x039e, &nv04_gr_object }, /* swzsurf (nv30) */
{ -1, -1, 0x0397, &nv04_gr_object }, /* rankine */
+ { -1, -1, 0x0597, &nv04_gr_object }, /* kelvin */
{}
}
};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv34.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv34.c
index 300f5ed5de0b..e7ed04b935cd 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv34.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv34.c
@@ -123,6 +123,7 @@ nv34_gr = {
{ -1, -1, 0x0389, &nv04_gr_object }, /* sifm (nv30) */
{ -1, -1, 0x038a, &nv04_gr_object }, /* ifc (nv30) */
{ -1, -1, 0x039e, &nv04_gr_object }, /* swzsurf (nv30) */
+ { -1, -1, 0x0597, &nv04_gr_object }, /* kelvin */
{ -1, -1, 0x0697, &nv04_gr_object }, /* rankine */
{}
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv35.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv35.c
index 740df0f52c38..5e8abacbacc6 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv35.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv35.c
@@ -124,6 +124,7 @@ nv35_gr = {
{ -1, -1, 0x038a, &nv04_gr_object }, /* ifc (nv30) */
{ -1, -1, 0x039e, &nv04_gr_object }, /* swzsurf (nv30) */
{ -1, -1, 0x0497, &nv04_gr_object }, /* rankine */
+ { -1, -1, 0x0597, &nv04_gr_object }, /* kelvin */
{}
}
};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/pm/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/pm/base.c
index 8616636ad7b4..dde89a4a0f5b 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/pm/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/pm/base.c
@@ -71,7 +71,7 @@ nvkm_perfdom_find(struct nvkm_pm *pm, int di)
return NULL;
}
-struct nvkm_perfsig *
+static struct nvkm_perfsig *
nvkm_perfsig_find(struct nvkm_pm *pm, u8 di, u8 si, struct nvkm_perfdom **pdom)
{
struct nvkm_perfdom *dom = *pdom;
@@ -699,7 +699,7 @@ nvkm_pm_oclass_get(struct nvkm_oclass *oclass, int index,
return 1;
}
-int
+static int
nvkm_perfsrc_new(struct nvkm_pm *pm, struct nvkm_perfsig *sig,
const struct nvkm_specsrc *spec)
{
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/pm/gf100.c b/drivers/gpu/drm/nouveau/nvkm/engine/pm/gf100.c
index d2901e9a7808..fe2532ee4145 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/pm/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/pm/gf100.c
@@ -102,7 +102,7 @@ gf100_pm_gpc[] = {
{}
};
-const struct nvkm_specdom
+static const struct nvkm_specdom
gf100_pm_part[] = {
{ 0xe0, (const struct nvkm_specsig[]) {
{ 0x0f, "part00_pbfb_00", gf100_pbfb_sources },
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/sec/fuc/g98.fuc0s.h b/drivers/gpu/drm/nouveau/nvkm/engine/sec/fuc/g98.fuc0s.h
index eca62221f299..4b57f8814560 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/sec/fuc/g98.fuc0s.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/sec/fuc/g98.fuc0s.h
@@ -1,4 +1,4 @@
-uint32_t g98_sec_data[] = {
+static uint32_t g98_sec_data[] = {
/* 0x0000: ctx_dma */
/* 0x0000: ctx_dma_query */
0x00000000,
@@ -150,7 +150,7 @@ uint32_t g98_sec_data[] = {
0x00000000,
};
-uint32_t g98_sec_code[] = {
+static uint32_t g98_sec_code[] = {
0x17f004bd,
0x0010fe35,
0xf10004fe,
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/nv50.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/nv50.c
index 370dcd8ff7b5..6eff637ac301 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/nv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/nv50.c
@@ -84,7 +84,7 @@ nv50_bar_oneinit(struct nvkm_bar *base)
start = 0x0100000000ULL;
limit = start + device->func->resource_size(device, 3);
- ret = nvkm_vm_new(device, start, limit, start, &bar3_lock, &vm);
+ ret = nvkm_vm_new(device, start, limit - start, start, &bar3_lock, &vm);
if (ret)
return ret;
@@ -117,7 +117,7 @@ nv50_bar_oneinit(struct nvkm_bar *base)
start = 0x0000000000ULL;
limit = start + device->func->resource_size(device, 1);
- ret = nvkm_vm_new(device, start, limit--, start, &bar1_lock, &vm);
+ ret = nvkm_vm_new(device, start, limit-- - start, start, &bar1_lock, &vm);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/Kbuild
index dbcb0ef21587..be57220a2e01 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/Kbuild
@@ -31,6 +31,7 @@ nvkm-y += nvkm/subdev/bios/timing.o
nvkm-y += nvkm/subdev/bios/therm.o
nvkm-y += nvkm/subdev/bios/vmap.o
nvkm-y += nvkm/subdev/bios/volt.o
+nvkm-y += nvkm/subdev/bios/vpstate.o
nvkm-y += nvkm/subdev/bios/xpio.o
nvkm-y += nvkm/subdev/bios/M0203.o
nvkm-y += nvkm/subdev/bios/M0205.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/boost.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/boost.c
index 3756ec91a88d..eaf74eb72983 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/boost.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/boost.c
@@ -25,16 +25,16 @@
#include <subdev/bios/bit.h>
#include <subdev/bios/boost.h>
-u16
+u32
nvbios_boostTe(struct nvkm_bios *bios,
u8 *ver, u8 *hdr, u8 *cnt, u8 *len, u8 *snr, u8 *ssz)
{
struct bit_entry bit_P;
- u16 boost = 0x0000;
+ u32 boost = 0;
if (!bit_entry(bios, 'P', &bit_P)) {
if (bit_P.version == 2)
- boost = nvbios_rd16(bios, bit_P.offset + 0x30);
+ boost = nvbios_rd32(bios, bit_P.offset + 0x30);
if (boost) {
*ver = nvbios_rd08(bios, boost + 0);
@@ -52,15 +52,15 @@ nvbios_boostTe(struct nvkm_bios *bios,
}
}
- return 0x0000;
+ return 0;
}
-u16
+u32
nvbios_boostEe(struct nvkm_bios *bios, int idx,
u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
{
u8 snr, ssz;
- u16 data = nvbios_boostTe(bios, ver, hdr, cnt, len, &snr, &ssz);
+ u32 data = nvbios_boostTe(bios, ver, hdr, cnt, len, &snr, &ssz);
if (data && idx < *cnt) {
data = data + *hdr + (idx * (*len + (snr * ssz)));
*hdr = *len;
@@ -68,14 +68,14 @@ nvbios_boostEe(struct nvkm_bios *bios, int idx,
*len = ssz;
return data;
}
- return 0x0000;
+ return 0;
}
-u16
+u32
nvbios_boostEp(struct nvkm_bios *bios, int idx,
u8 *ver, u8 *hdr, u8 *cnt, u8 *len, struct nvbios_boostE *info)
{
- u16 data = nvbios_boostEe(bios, idx, ver, hdr, cnt, len);
+ u32 data = nvbios_boostEe(bios, idx, ver, hdr, cnt, len);
memset(info, 0x00, sizeof(*info));
if (data) {
info->pstate = (nvbios_rd16(bios, data + 0x00) & 0x01e0) >> 5;
@@ -85,7 +85,7 @@ nvbios_boostEp(struct nvkm_bios *bios, int idx,
return data;
}
-u16
+u32
nvbios_boostEm(struct nvkm_bios *bios, u8 pstate,
u8 *ver, u8 *hdr, u8 *cnt, u8 *len, struct nvbios_boostE *info)
{
@@ -97,21 +97,21 @@ nvbios_boostEm(struct nvkm_bios *bios, u8 pstate,
return data;
}
-u16
+u32
nvbios_boostSe(struct nvkm_bios *bios, int idx,
- u16 data, u8 *ver, u8 *hdr, u8 cnt, u8 len)
+ u32 data, u8 *ver, u8 *hdr, u8 cnt, u8 len)
{
if (data && idx < cnt) {
data = data + *hdr + (idx * len);
*hdr = len;
return data;
}
- return 0x0000;
+ return 0;
}
-u16
+u32
nvbios_boostSp(struct nvkm_bios *bios, int idx,
- u16 data, u8 *ver, u8 *hdr, u8 cnt, u8 len,
+ u32 data, u8 *ver, u8 *hdr, u8 cnt, u8 len,
struct nvbios_boostS *info)
{
data = nvbios_boostSe(bios, idx, data, ver, hdr, cnt, len);
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/cstep.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/cstep.c
index 32e01624a162..5063382d8a6c 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/cstep.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/cstep.c
@@ -25,16 +25,16 @@
#include <subdev/bios/bit.h>
#include <subdev/bios/cstep.h>
-u16
+u32
nvbios_cstepTe(struct nvkm_bios *bios,
u8 *ver, u8 *hdr, u8 *cnt, u8 *len, u8 *xnr, u8 *xsz)
{
struct bit_entry bit_P;
- u16 cstep = 0x0000;
+ u32 cstep = 0;
if (!bit_entry(bios, 'P', &bit_P)) {
if (bit_P.version == 2)
- cstep = nvbios_rd16(bios, bit_P.offset + 0x34);
+ cstep = nvbios_rd32(bios, bit_P.offset + 0x34);
if (cstep) {
*ver = nvbios_rd08(bios, cstep + 0);
@@ -52,27 +52,27 @@ nvbios_cstepTe(struct nvkm_bios *bios,
}
}
- return 0x0000;
+ return 0;
}
-u16
+u32
nvbios_cstepEe(struct nvkm_bios *bios, int idx, u8 *ver, u8 *hdr)
{
u8 cnt, len, xnr, xsz;
- u16 data = nvbios_cstepTe(bios, ver, hdr, &cnt, &len, &xnr, &xsz);
+ u32 data = nvbios_cstepTe(bios, ver, hdr, &cnt, &len, &xnr, &xsz);
if (data && idx < cnt) {
data = data + *hdr + (idx * len);
*hdr = len;
return data;
}
- return 0x0000;
+ return 0;
}
-u16
+u32
nvbios_cstepEp(struct nvkm_bios *bios, int idx, u8 *ver, u8 *hdr,
struct nvbios_cstepE *info)
{
- u16 data = nvbios_cstepEe(bios, idx, ver, hdr);
+ u32 data = nvbios_cstepEe(bios, idx, ver, hdr);
memset(info, 0x00, sizeof(*info));
if (data) {
info->pstate = (nvbios_rd16(bios, data + 0x00) & 0x01e0) >> 5;
@@ -81,7 +81,7 @@ nvbios_cstepEp(struct nvkm_bios *bios, int idx, u8 *ver, u8 *hdr,
return data;
}
-u16
+u32
nvbios_cstepEm(struct nvkm_bios *bios, u8 pstate, u8 *ver, u8 *hdr,
struct nvbios_cstepE *info)
{
@@ -93,24 +93,24 @@ nvbios_cstepEm(struct nvkm_bios *bios, u8 pstate, u8 *ver, u8 *hdr,
return data;
}
-u16
+u32
nvbios_cstepXe(struct nvkm_bios *bios, int idx, u8 *ver, u8 *hdr)
{
u8 cnt, len, xnr, xsz;
- u16 data = nvbios_cstepTe(bios, ver, hdr, &cnt, &len, &xnr, &xsz);
+ u32 data = nvbios_cstepTe(bios, ver, hdr, &cnt, &len, &xnr, &xsz);
if (data && idx < xnr) {
data = data + *hdr + (cnt * len) + (idx * xsz);
*hdr = xsz;
return data;
}
- return 0x0000;
+ return 0;
}
-u16
+u32
nvbios_cstepXp(struct nvkm_bios *bios, int idx, u8 *ver, u8 *hdr,
struct nvbios_cstepX *info)
{
- u16 data = nvbios_cstepXe(bios, idx, ver, hdr);
+ u32 data = nvbios_cstepXe(bios, idx, ver, hdr);
memset(info, 0x00, sizeof(*info));
if (data) {
info->freq = nvbios_rd16(bios, data + 0x00) * 1000;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/fan.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/fan.c
index 80fed7e78dcb..456f9ea920dc 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/fan.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/fan.c
@@ -25,15 +25,15 @@
#include <subdev/bios/bit.h>
#include <subdev/bios/fan.h>
-u16
+static u32
nvbios_fan_table(struct nvkm_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
{
struct bit_entry bit_P;
- u16 fan = 0x0000;
+ u32 fan = 0;
if (!bit_entry(bios, 'P', &bit_P)) {
if (bit_P.version == 2 && bit_P.length >= 0x5a)
- fan = nvbios_rd16(bios, bit_P.offset + 0x58);
+ fan = nvbios_rd32(bios, bit_P.offset + 0x58);
if (fan) {
*ver = nvbios_rd08(bios, fan + 0);
@@ -49,25 +49,25 @@ nvbios_fan_table(struct nvkm_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
}
}
- return 0x0000;
+ return 0;
}
-u16
+static u32
nvbios_fan_entry(struct nvkm_bios *bios, int idx, u8 *ver, u8 *hdr,
u8 *cnt, u8 *len)
{
- u16 data = nvbios_fan_table(bios, ver, hdr, cnt, len);
+ u32 data = nvbios_fan_table(bios, ver, hdr, cnt, len);
if (data && idx < *cnt)
return data + *hdr + (idx * (*len));
- return 0x0000;
+ return 0;
}
-u16
+u32
nvbios_fan_parse(struct nvkm_bios *bios, struct nvbios_therm_fan *fan)
{
u8 ver, hdr, cnt, len;
- u16 data = nvbios_fan_entry(bios, 0, &ver, &hdr, &cnt, &len);
+ u32 data = nvbios_fan_entry(bios, 0, &ver, &hdr, &cnt, &len);
if (data) {
u8 type = nvbios_rd08(bios, data + 0x00);
switch (type) {
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/iccsense.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/iccsense.c
index 084328028af1..3953d11844ea 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/iccsense.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/iccsense.c
@@ -23,20 +23,21 @@
*/
#include <subdev/bios.h>
#include <subdev/bios/bit.h>
+#include <subdev/bios/extdev.h>
#include <subdev/bios/iccsense.h>
-static u16
+static u32
nvbios_iccsense_table(struct nvkm_bios *bios, u8 *ver, u8 *hdr, u8 *cnt,
u8 *len)
{
struct bit_entry bit_P;
- u16 iccsense;
+ u32 iccsense;
if (bit_entry(bios, 'P', &bit_P) || bit_P.version != 2 ||
bit_P.length < 0x2c)
return 0;
- iccsense = nvbios_rd16(bios, bit_P.offset + 0x28);
+ iccsense = nvbios_rd32(bios, bit_P.offset + 0x28);
if (!iccsense)
return 0;
@@ -60,7 +61,7 @@ nvbios_iccsense_parse(struct nvkm_bios *bios, struct nvbios_iccsense *iccsense)
{
struct nvkm_subdev *subdev = &bios->subdev;
u8 ver, hdr, cnt, len, i;
- u16 table, entry;
+ u32 table, entry;
table = nvbios_iccsense_table(bios, &ver, &hdr, &cnt, &len);
if (!table || !cnt)
@@ -77,23 +78,47 @@ nvbios_iccsense_parse(struct nvkm_bios *bios, struct nvbios_iccsense *iccsense)
return -ENOMEM;
for (i = 0; i < cnt; ++i) {
+ struct nvbios_extdev_func extdev;
struct pwr_rail_t *rail = &iccsense->rail[i];
+ u8 res_start = 0;
+ int r;
+
entry = table + hdr + i * len;
switch(ver) {
case 0x10:
rail->mode = nvbios_rd08(bios, entry + 0x1);
rail->extdev_id = nvbios_rd08(bios, entry + 0x2);
- rail->resistor_mohm = nvbios_rd08(bios, entry + 0x3);
- rail->rail = nvbios_rd08(bios, entry + 0x4);
+ res_start = 0x3;
break;
case 0x20:
rail->mode = nvbios_rd08(bios, entry);
rail->extdev_id = nvbios_rd08(bios, entry + 0x1);
- rail->resistor_mohm = nvbios_rd08(bios, entry + 0x5);
- rail->rail = nvbios_rd08(bios, entry + 0x6);
+ res_start = 0x5;
+ break;
+ };
+
+ if (nvbios_extdev_parse(bios, rail->extdev_id, &extdev))
+ continue;
+
+ switch (extdev.type) {
+ case NVBIOS_EXTDEV_INA209:
+ case NVBIOS_EXTDEV_INA219:
+ rail->resistor_count = 1;
+ break;
+ case NVBIOS_EXTDEV_INA3221:
+ rail->resistor_count = 3;
+ break;
+ default:
+ rail->resistor_count = 0;
break;
};
+
+ for (r = 0; r < rail->resistor_count; ++r) {
+ rail->resistors[r].mohm = nvbios_rd08(bios, entry + res_start + r * 2);
+ rail->resistors[r].enabled = !(nvbios_rd08(bios, entry + res_start + r * 2 + 1) & 0x40);
+ }
+ rail->config = nvbios_rd16(bios, entry + res_start + rail->resistor_count * 2);
}
return 0;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/mxm.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/mxm.c
index 3ddf0939ded3..994cc2d7759b 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/mxm.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/mxm.c
@@ -81,7 +81,7 @@ mxm_sor_map(struct nvkm_bios *bios, u8 conn)
u16 map = nvbios_rd16(bios, mxm + 4);
if (map) {
ver = nvbios_rd08(bios, map);
- if (ver == 0x10) {
+ if (ver == 0x10 || ver == 0x11) {
if (conn < nvbios_rd08(bios, map + 3)) {
map += nvbios_rd08(bios, map + 1);
map += conn;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/perf.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/perf.c
index 636bfb665bb9..c3068358f695 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/perf.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/perf.c
@@ -26,16 +26,16 @@
#include <subdev/bios/perf.h>
#include <subdev/pci.h>
-u16
+u32
nvbios_perf_table(struct nvkm_bios *bios, u8 *ver, u8 *hdr,
u8 *cnt, u8 *len, u8 *snr, u8 *ssz)
{
struct bit_entry bit_P;
- u16 perf = 0x0000;
+ u32 perf = 0;
if (!bit_entry(bios, 'P', &bit_P)) {
if (bit_P.version <= 2) {
- perf = nvbios_rd16(bios, bit_P.offset + 0);
+ perf = nvbios_rd32(bios, bit_P.offset + 0);
if (perf) {
*ver = nvbios_rd08(bios, perf + 0);
*hdr = nvbios_rd08(bios, perf + 1);
@@ -72,15 +72,15 @@ nvbios_perf_table(struct nvkm_bios *bios, u8 *ver, u8 *hdr,
}
}
- return 0x0000;
+ return 0;
}
-u16
+u32
nvbios_perf_entry(struct nvkm_bios *bios, int idx,
u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
{
u8 snr, ssz;
- u16 perf = nvbios_perf_table(bios, ver, hdr, cnt, len, &snr, &ssz);
+ u32 perf = nvbios_perf_table(bios, ver, hdr, cnt, len, &snr, &ssz);
if (perf && idx < *cnt) {
perf = perf + *hdr + (idx * (*len + (snr * ssz)));
*hdr = *len;
@@ -88,14 +88,14 @@ nvbios_perf_entry(struct nvkm_bios *bios, int idx,
*len = ssz;
return perf;
}
- return 0x0000;
+ return 0;
}
-u16
+u32
nvbios_perfEp(struct nvkm_bios *bios, int idx,
u8 *ver, u8 *hdr, u8 *cnt, u8 *len, struct nvbios_perfE *info)
{
- u16 perf = nvbios_perf_entry(bios, idx, ver, hdr, cnt, len);
+ u32 perf = nvbios_perf_entry(bios, idx, ver, hdr, cnt, len);
memset(info, 0x00, sizeof(*info));
info->pstate = nvbios_rd08(bios, perf + 0x00);
switch (!!perf * *ver) {
@@ -163,7 +163,7 @@ nvbios_perfEp(struct nvkm_bios *bios, int idx,
info->pcie_width = 0xff;
break;
default:
- return 0x0000;
+ return 0;
}
return perf;
}
@@ -202,7 +202,7 @@ nvbios_perf_fan_parse(struct nvkm_bios *bios,
struct nvbios_perf_fan *fan)
{
u8 ver, hdr, cnt, len, snr, ssz;
- u16 perf = nvbios_perf_table(bios, &ver, &hdr, &cnt, &len, &snr, &ssz);
+ u32 perf = nvbios_perf_table(bios, &ver, &hdr, &cnt, &len, &snr, &ssz);
if (!perf)
return -ENODEV;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/priv.h
index 212800ecdce9..7d1d3c6b4b72 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/priv.h
@@ -12,6 +12,7 @@ struct nvbios_source {
bool rw;
bool ignore_checksum;
bool no_pcir;
+ bool require_checksum;
};
int nvbios_extend(struct nvkm_bios *, u32 length);
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadow.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadow.c
index b2557e87afdd..7deb81b6dbac 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadow.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadow.c
@@ -86,9 +86,12 @@ shadow_image(struct nvkm_bios *bios, int idx, u32 offset, struct shadow *mthd)
nvbios_checksum(&bios->data[image.base], image.size)) {
nvkm_debug(subdev, "%08x: checksum failed\n",
image.base);
- if (mthd->func->rw)
+ if (!mthd->func->require_checksum) {
+ if (mthd->func->rw)
+ score += 1;
score += 1;
- score += 1;
+ } else
+ return 0;
} else {
score += 3;
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadowacpi.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadowacpi.c
index 8fecb5ff22a0..06572f8ce914 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadowacpi.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadowacpi.c
@@ -99,6 +99,7 @@ nvbios_acpi_fast = {
.init = acpi_init,
.read = acpi_read_fast,
.rw = false,
+ .require_checksum = true,
};
const struct nvbios_source
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/therm.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/therm.c
index a54cfec0550d..5babc5a7c7d5 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/therm.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/therm.c
@@ -25,17 +25,17 @@
#include <subdev/bios/bit.h>
#include <subdev/bios/therm.h>
-static u16
+static u32
therm_table(struct nvkm_bios *bios, u8 *ver, u8 *hdr, u8 *len, u8 *cnt)
{
struct bit_entry bit_P;
- u16 therm = 0;
+ u32 therm = 0;
if (!bit_entry(bios, 'P', &bit_P)) {
if (bit_P.version == 1)
- therm = nvbios_rd16(bios, bit_P.offset + 12);
+ therm = nvbios_rd32(bios, bit_P.offset + 12);
else if (bit_P.version == 2)
- therm = nvbios_rd16(bios, bit_P.offset + 16);
+ therm = nvbios_rd32(bios, bit_P.offset + 16);
else
nvkm_error(&bios->subdev,
"unknown offset for thermal in BIT P %d\n",
@@ -44,7 +44,7 @@ therm_table(struct nvkm_bios *bios, u8 *ver, u8 *hdr, u8 *len, u8 *cnt)
/* exit now if we haven't found the thermal table */
if (!therm)
- return 0x0000;
+ return 0;
*ver = nvbios_rd08(bios, therm + 0);
*hdr = nvbios_rd08(bios, therm + 1);
@@ -53,14 +53,14 @@ therm_table(struct nvkm_bios *bios, u8 *ver, u8 *hdr, u8 *len, u8 *cnt)
return therm + nvbios_rd08(bios, therm + 1);
}
-static u16
+static u32
nvbios_therm_entry(struct nvkm_bios *bios, int idx, u8 *ver, u8 *len)
{
u8 hdr, cnt;
- u16 therm = therm_table(bios, ver, &hdr, len, &cnt);
+ u32 therm = therm_table(bios, ver, &hdr, len, &cnt);
if (therm && idx < cnt)
return therm + idx * *len;
- return 0x0000;
+ return 0;
}
int
@@ -70,7 +70,7 @@ nvbios_therm_sensor_parse(struct nvkm_bios *bios,
{
s8 thrs_section, sensor_section, offset;
u8 ver, len, i;
- u16 entry;
+ u32 entry;
/* we only support the core domain for now */
if (domain != NVBIOS_THERM_DOMAIN_CORE)
@@ -154,7 +154,7 @@ nvbios_therm_fan_parse(struct nvkm_bios *bios, struct nvbios_therm_fan *fan)
{
struct nvbios_therm_trip_point *cur_trip = NULL;
u8 ver, len, i;
- u16 entry;
+ u32 entry;
uint8_t duty_lut[] = { 0, 0, 25, 0, 40, 0, 50, 0,
75, 0, 85, 0, 100, 0, 100, 0 };
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/timing.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/timing.c
index 99f6432ac0af..7e83c3985020 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/timing.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/timing.c
@@ -25,19 +25,19 @@
#include <subdev/bios/bit.h>
#include <subdev/bios/timing.h>
-u16
+u32
nvbios_timingTe(struct nvkm_bios *bios,
u8 *ver, u8 *hdr, u8 *cnt, u8 *len, u8 *snr, u8 *ssz)
{
struct bit_entry bit_P;
- u16 timing = 0x0000;
+ u32 timing = 0;
if (!bit_entry(bios, 'P', &bit_P)) {
if (bit_P.version == 1)
- timing = nvbios_rd16(bios, bit_P.offset + 4);
+ timing = nvbios_rd32(bios, bit_P.offset + 4);
else
if (bit_P.version == 2)
- timing = nvbios_rd16(bios, bit_P.offset + 8);
+ timing = nvbios_rd32(bios, bit_P.offset + 8);
if (timing) {
*ver = nvbios_rd08(bios, timing + 0);
@@ -62,15 +62,15 @@ nvbios_timingTe(struct nvkm_bios *bios,
}
}
- return 0x0000;
+ return 0;
}
-u16
+u32
nvbios_timingEe(struct nvkm_bios *bios, int idx,
u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
{
u8 snr, ssz;
- u16 timing = nvbios_timingTe(bios, ver, hdr, cnt, len, &snr, &ssz);
+ u32 timing = nvbios_timingTe(bios, ver, hdr, cnt, len, &snr, &ssz);
if (timing && idx < *cnt) {
timing += *hdr + idx * (*len + (snr * ssz));
*hdr = *len;
@@ -78,14 +78,14 @@ nvbios_timingEe(struct nvkm_bios *bios, int idx,
*len = ssz;
return timing;
}
- return 0x0000;
+ return 0;
}
-u16
+u32
nvbios_timingEp(struct nvkm_bios *bios, int idx,
u8 *ver, u8 *hdr, u8 *cnt, u8 *len, struct nvbios_ramcfg *p)
{
- u16 data = nvbios_timingEe(bios, idx, ver, hdr, cnt, len), temp;
+ u32 data = nvbios_timingEe(bios, idx, ver, hdr, cnt, len), temp;
p->timing_ver = *ver;
p->timing_hdr = *hdr;
switch (!!data * *ver) {
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/vmap.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/vmap.c
index 2f13db745948..c228ca15fa3b 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/vmap.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/vmap.c
@@ -25,15 +25,15 @@
#include <subdev/bios/bit.h>
#include <subdev/bios/vmap.h>
-u16
+u32
nvbios_vmap_table(struct nvkm_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
{
struct bit_entry bit_P;
- u16 vmap = 0x0000;
+ u32 vmap = 0;
if (!bit_entry(bios, 'P', &bit_P)) {
if (bit_P.version == 2) {
- vmap = nvbios_rd16(bios, bit_P.offset + 0x20);
+ vmap = nvbios_rd32(bios, bit_P.offset + 0x20);
if (vmap) {
*ver = nvbios_rd08(bios, vmap + 0);
switch (*ver) {
@@ -50,40 +50,50 @@ nvbios_vmap_table(struct nvkm_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
}
}
- return 0x0000;
+ return 0;
}
-u16
+u32
nvbios_vmap_parse(struct nvkm_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
struct nvbios_vmap *info)
{
- u16 vmap = nvbios_vmap_table(bios, ver, hdr, cnt, len);
+ u32 vmap = nvbios_vmap_table(bios, ver, hdr, cnt, len);
memset(info, 0x00, sizeof(*info));
switch (!!vmap * *ver) {
case 0x10:
+ info->max0 = 0xff;
+ info->max1 = 0xff;
+ info->max2 = 0xff;
+ break;
case 0x20:
+ info->max0 = nvbios_rd08(bios, vmap + 0x7);
+ info->max1 = nvbios_rd08(bios, vmap + 0x8);
+ if (*len >= 0xc)
+ info->max2 = nvbios_rd08(bios, vmap + 0xc);
+ else
+ info->max2 = 0xff;
break;
}
return vmap;
}
-u16
+u32
nvbios_vmap_entry(struct nvkm_bios *bios, int idx, u8 *ver, u8 *len)
{
u8 hdr, cnt;
- u16 vmap = nvbios_vmap_table(bios, ver, &hdr, &cnt, len);
+ u32 vmap = nvbios_vmap_table(bios, ver, &hdr, &cnt, len);
if (vmap && idx < cnt) {
vmap = vmap + hdr + (idx * *len);
return vmap;
}
- return 0x0000;
+ return 0;
}
-u16
+u32
nvbios_vmap_entry_parse(struct nvkm_bios *bios, int idx, u8 *ver, u8 *len,
struct nvbios_vmap_entry *info)
{
- u16 vmap = nvbios_vmap_entry(bios, idx, ver, len);
+ u32 vmap = nvbios_vmap_entry(bios, idx, ver, len);
memset(info, 0x00, sizeof(*info));
switch (!!vmap * *ver) {
case 0x10:
@@ -95,7 +105,7 @@ nvbios_vmap_entry_parse(struct nvkm_bios *bios, int idx, u8 *ver, u8 *len,
info->arg[2] = nvbios_rd32(bios, vmap + 0x10);
break;
case 0x20:
- info->unk0 = nvbios_rd08(bios, vmap + 0x00);
+ info->mode = nvbios_rd08(bios, vmap + 0x00);
info->link = nvbios_rd08(bios, vmap + 0x01);
info->min = nvbios_rd32(bios, vmap + 0x02);
info->max = nvbios_rd32(bios, vmap + 0x06);
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/volt.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/volt.c
index 6e0a33648be9..a7797a9e9cbc 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/volt.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/volt.c
@@ -25,18 +25,18 @@
#include <subdev/bios/bit.h>
#include <subdev/bios/volt.h>
-u16
+u32
nvbios_volt_table(struct nvkm_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
{
struct bit_entry bit_P;
- u16 volt = 0x0000;
+ u32 volt = 0;
if (!bit_entry(bios, 'P', &bit_P)) {
if (bit_P.version == 2)
- volt = nvbios_rd16(bios, bit_P.offset + 0x0c);
+ volt = nvbios_rd32(bios, bit_P.offset + 0x0c);
else
if (bit_P.version == 1)
- volt = nvbios_rd16(bios, bit_P.offset + 0x10);
+ volt = nvbios_rd32(bios, bit_P.offset + 0x10);
if (volt) {
*ver = nvbios_rd08(bios, volt + 0);
@@ -62,33 +62,37 @@ nvbios_volt_table(struct nvkm_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
}
}
- return 0x0000;
+ return 0;
}
-u16
+u32
nvbios_volt_parse(struct nvkm_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
struct nvbios_volt *info)
{
- u16 volt = nvbios_volt_table(bios, ver, hdr, cnt, len);
+ u32 volt = nvbios_volt_table(bios, ver, hdr, cnt, len);
memset(info, 0x00, sizeof(*info));
switch (!!volt * *ver) {
case 0x12:
info->type = NVBIOS_VOLT_GPIO;
info->vidmask = nvbios_rd08(bios, volt + 0x04);
+ info->ranged = false;
break;
case 0x20:
info->type = NVBIOS_VOLT_GPIO;
info->vidmask = nvbios_rd08(bios, volt + 0x05);
+ info->ranged = false;
break;
case 0x30:
info->type = NVBIOS_VOLT_GPIO;
info->vidmask = nvbios_rd08(bios, volt + 0x04);
+ info->ranged = false;
break;
case 0x40:
info->type = NVBIOS_VOLT_GPIO;
info->base = nvbios_rd32(bios, volt + 0x04);
info->step = nvbios_rd16(bios, volt + 0x08);
info->vidmask = nvbios_rd08(bios, volt + 0x0b);
+ info->ranged = true; /* XXX: find the flag byte */
/*XXX*/
info->min = 0;
info->max = info->base;
@@ -104,32 +108,34 @@ nvbios_volt_parse(struct nvkm_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
info->pwm_freq = nvbios_rd32(bios, volt + 0x5) / 1000;
info->pwm_range = nvbios_rd32(bios, volt + 0x16);
} else {
- info->type = NVBIOS_VOLT_GPIO;
- info->vidmask = nvbios_rd08(bios, volt + 0x06);
- info->step = nvbios_rd16(bios, volt + 0x16);
+ info->type = NVBIOS_VOLT_GPIO;
+ info->vidmask = nvbios_rd08(bios, volt + 0x06);
+ info->step = nvbios_rd16(bios, volt + 0x16);
+ info->ranged =
+ !!(nvbios_rd08(bios, volt + 0x4) & 0x2);
}
break;
}
return volt;
}
-u16
+u32
nvbios_volt_entry(struct nvkm_bios *bios, int idx, u8 *ver, u8 *len)
{
u8 hdr, cnt;
- u16 volt = nvbios_volt_table(bios, ver, &hdr, &cnt, len);
+ u32 volt = nvbios_volt_table(bios, ver, &hdr, &cnt, len);
if (volt && idx < cnt) {
volt = volt + hdr + (idx * *len);
return volt;
}
- return 0x0000;
+ return 0;
}
-u16
+u32
nvbios_volt_entry_parse(struct nvkm_bios *bios, int idx, u8 *ver, u8 *len,
struct nvbios_volt_entry *info)
{
- u16 volt = nvbios_volt_entry(bios, idx, ver, len);
+ u32 volt = nvbios_volt_entry(bios, idx, ver, len);
memset(info, 0x00, sizeof(*info));
switch (!!volt * *ver) {
case 0x12:
@@ -142,7 +148,10 @@ nvbios_volt_entry_parse(struct nvkm_bios *bios, int idx, u8 *ver, u8 *len,
info->vid = nvbios_rd08(bios, volt + 0x01) >> 2;
break;
case 0x40:
+ break;
case 0x50:
+ info->voltage = nvbios_rd32(bios, volt) & 0x001fffff;
+ info->vid = (nvbios_rd32(bios, volt) >> 23) & 0xff;
break;
}
return volt;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/vpstate.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/vpstate.c
new file mode 100644
index 000000000000..f199270163d2
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/vpstate.c
@@ -0,0 +1,82 @@
+/*
+ * Copyright 2016 Karol Herbst
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Karol Herbst
+ */
+#include <subdev/bios.h>
+#include <subdev/bios/bit.h>
+#include <subdev/bios/vpstate.h>
+
+static u32
+nvbios_vpstate_offset(struct nvkm_bios *b)
+{
+ struct bit_entry bit_P;
+
+ if (!bit_entry(b, 'P', &bit_P)) {
+ if (bit_P.version == 2)
+ return nvbios_rd32(b, bit_P.offset + 0x38);
+ }
+
+ return 0x0000;
+}
+
+int
+nvbios_vpstate_parse(struct nvkm_bios *b, struct nvbios_vpstate_header *h)
+{
+ if (!h)
+ return -EINVAL;
+
+ h->offset = nvbios_vpstate_offset(b);
+ if (!h->offset)
+ return -ENODEV;
+
+ h->version = nvbios_rd08(b, h->offset);
+ switch (h->version) {
+ case 0x10:
+ h->hlen = nvbios_rd08(b, h->offset + 0x1);
+ h->elen = nvbios_rd08(b, h->offset + 0x2);
+ h->slen = nvbios_rd08(b, h->offset + 0x3);
+ h->scount = nvbios_rd08(b, h->offset + 0x4);
+ h->ecount = nvbios_rd08(b, h->offset + 0x5);
+
+ h->base_id = nvbios_rd08(b, h->offset + 0x0f);
+ h->boost_id = nvbios_rd08(b, h->offset + 0x10);
+ h->tdp_id = nvbios_rd08(b, h->offset + 0x11);
+ return 0;
+ default:
+ return -EINVAL;
+ }
+}
+
+int
+nvbios_vpstate_entry(struct nvkm_bios *b, struct nvbios_vpstate_header *h,
+ u8 idx, struct nvbios_vpstate_entry *e)
+{
+ u32 offset;
+
+ if (!e || !h || idx > h->ecount)
+ return -EINVAL;
+
+ offset = h->offset + h->hlen + idx * (h->elen + (h->slen * h->scount));
+ e->pstate = nvbios_rd08(b, offset);
+ e->clock_mhz = nvbios_rd16(b, offset + 0x5);
+ return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/base.c
index 7102c25320fc..e4c8d310d870 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/base.c
@@ -27,6 +27,7 @@
#include <subdev/bios/boost.h>
#include <subdev/bios/cstep.h>
#include <subdev/bios/perf.h>
+#include <subdev/bios/vpstate.h>
#include <subdev/fb.h>
#include <subdev/therm.h>
#include <subdev/volt.h>
@@ -43,13 +44,13 @@ nvkm_clk_adjust(struct nvkm_clk *clk, bool adjust,
struct nvkm_bios *bios = clk->subdev.device->bios;
struct nvbios_boostE boostE;
u8 ver, hdr, cnt, len;
- u16 data;
+ u32 data;
data = nvbios_boostEm(bios, pstate, &ver, &hdr, &cnt, &len, &boostE);
if (data) {
struct nvbios_boostS boostS;
u8 idx = 0, sver, shdr;
- u16 subd;
+ u32 subd;
input = max(boostE.min, input);
input = min(boostE.max, input);
@@ -74,6 +75,88 @@ nvkm_clk_adjust(struct nvkm_clk *clk, bool adjust,
/******************************************************************************
* C-States
*****************************************************************************/
+static bool
+nvkm_cstate_valid(struct nvkm_clk *clk, struct nvkm_cstate *cstate,
+ u32 max_volt, int temp)
+{
+ const struct nvkm_domain *domain = clk->domains;
+ struct nvkm_volt *volt = clk->subdev.device->volt;
+ int voltage;
+
+ while (domain && domain->name != nv_clk_src_max) {
+ if (domain->flags & NVKM_CLK_DOM_FLAG_VPSTATE) {
+ u32 freq = cstate->domain[domain->name];
+ switch (clk->boost_mode) {
+ case NVKM_CLK_BOOST_NONE:
+ if (clk->base_khz && freq > clk->base_khz)
+ return false;
+ case NVKM_CLK_BOOST_BIOS:
+ if (clk->boost_khz && freq > clk->boost_khz)
+ return false;
+ }
+ }
+ domain++;
+ }
+
+ if (!volt)
+ return true;
+
+ voltage = nvkm_volt_map(volt, cstate->voltage, temp);
+ if (voltage < 0)
+ return false;
+ return voltage <= min(max_volt, volt->max_uv);
+}
+
+static struct nvkm_cstate *
+nvkm_cstate_find_best(struct nvkm_clk *clk, struct nvkm_pstate *pstate,
+ struct nvkm_cstate *start)
+{
+ struct nvkm_device *device = clk->subdev.device;
+ struct nvkm_volt *volt = device->volt;
+ struct nvkm_cstate *cstate;
+ int max_volt;
+
+ if (!pstate || !start)
+ return NULL;
+
+ if (!volt)
+ return start;
+
+ max_volt = volt->max_uv;
+ if (volt->max0_id != 0xff)
+ max_volt = min(max_volt,
+ nvkm_volt_map(volt, volt->max0_id, clk->temp));
+ if (volt->max1_id != 0xff)
+ max_volt = min(max_volt,
+ nvkm_volt_map(volt, volt->max1_id, clk->temp));
+ if (volt->max2_id != 0xff)
+ max_volt = min(max_volt,
+ nvkm_volt_map(volt, volt->max2_id, clk->temp));
+
+ for (cstate = start; &cstate->head != &pstate->list;
+ cstate = list_entry(cstate->head.prev, typeof(*cstate), head)) {
+ if (nvkm_cstate_valid(clk, cstate, max_volt, clk->temp))
+ break;
+ }
+
+ return cstate;
+}
+
+static struct nvkm_cstate *
+nvkm_cstate_get(struct nvkm_clk *clk, struct nvkm_pstate *pstate, int cstatei)
+{
+ struct nvkm_cstate *cstate;
+ if (cstatei == NVKM_CLK_CSTATE_HIGHEST)
+ return list_last_entry(&pstate->list, typeof(*cstate), head);
+ else {
+ list_for_each_entry(cstate, &pstate->list, head) {
+ if (cstate->id == cstatei)
+ return cstate;
+ }
+ }
+ return NULL;
+}
+
static int
nvkm_cstate_prog(struct nvkm_clk *clk, struct nvkm_pstate *pstate, int cstatei)
{
@@ -85,7 +168,8 @@ nvkm_cstate_prog(struct nvkm_clk *clk, struct nvkm_pstate *pstate, int cstatei)
int ret;
if (!list_empty(&pstate->list)) {
- cstate = list_entry(pstate->list.prev, typeof(*cstate), head);
+ cstate = nvkm_cstate_get(clk, pstate, cstatei);
+ cstate = nvkm_cstate_find_best(clk, pstate, cstate);
} else {
cstate = &pstate->base;
}
@@ -99,7 +183,8 @@ nvkm_cstate_prog(struct nvkm_clk *clk, struct nvkm_pstate *pstate, int cstatei)
}
if (volt) {
- ret = nvkm_volt_set_id(volt, cstate->voltage, +1);
+ ret = nvkm_volt_set_id(volt, cstate->voltage,
+ pstate->base.voltage, clk->temp, +1);
if (ret && ret != -ENODEV) {
nvkm_error(subdev, "failed to raise voltage: %d\n", ret);
return ret;
@@ -113,7 +198,8 @@ nvkm_cstate_prog(struct nvkm_clk *clk, struct nvkm_pstate *pstate, int cstatei)
}
if (volt) {
- ret = nvkm_volt_set_id(volt, cstate->voltage, -1);
+ ret = nvkm_volt_set_id(volt, cstate->voltage,
+ pstate->base.voltage, clk->temp, -1);
if (ret && ret != -ENODEV)
nvkm_error(subdev, "failed to lower voltage: %d\n", ret);
}
@@ -138,22 +224,27 @@ static int
nvkm_cstate_new(struct nvkm_clk *clk, int idx, struct nvkm_pstate *pstate)
{
struct nvkm_bios *bios = clk->subdev.device->bios;
+ struct nvkm_volt *volt = clk->subdev.device->volt;
const struct nvkm_domain *domain = clk->domains;
struct nvkm_cstate *cstate = NULL;
struct nvbios_cstepX cstepX;
u8 ver, hdr;
- u16 data;
+ u32 data;
data = nvbios_cstepXp(bios, idx, &ver, &hdr, &cstepX);
if (!data)
return -ENOENT;
+ if (volt && nvkm_volt_map_min(volt, cstepX.voltage) > volt->max_uv)
+ return -EINVAL;
+
cstate = kzalloc(sizeof(*cstate), GFP_KERNEL);
if (!cstate)
return -ENOMEM;
*cstate = pstate->base;
cstate->voltage = cstepX.voltage;
+ cstate->id = idx;
while (domain && domain->name != nv_clk_src_max) {
if (domain->flags & NVKM_CLK_DOM_FLAG_CORE) {
@@ -175,7 +266,7 @@ static int
nvkm_pstate_prog(struct nvkm_clk *clk, int pstatei)
{
struct nvkm_subdev *subdev = &clk->subdev;
- struct nvkm_ram *ram = subdev->device->fb->ram;
+ struct nvkm_fb *fb = subdev->device->fb;
struct nvkm_pci *pci = subdev->device->pci;
struct nvkm_pstate *pstate;
int ret, idx = 0;
@@ -190,7 +281,8 @@ nvkm_pstate_prog(struct nvkm_clk *clk, int pstatei)
nvkm_pcie_set_link(pci, pstate->pcie_speed, pstate->pcie_width);
- if (ram && ram->func->calc) {
+ if (fb && fb->ram && fb->ram->func->calc) {
+ struct nvkm_ram *ram = fb->ram;
int khz = pstate->base.domain[nv_clk_src_mem];
do {
ret = ram->func->calc(ram, khz);
@@ -200,7 +292,7 @@ nvkm_pstate_prog(struct nvkm_clk *clk, int pstatei)
ram->func->tidy(ram);
}
- return nvkm_cstate_prog(clk, pstate, 0);
+ return nvkm_cstate_prog(clk, pstate, NVKM_CLK_CSTATE_HIGHEST);
}
static void
@@ -214,14 +306,14 @@ nvkm_pstate_work(struct work_struct *work)
return;
clk->pwrsrc = power_supply_is_system_supplied();
- nvkm_trace(subdev, "P %d PWR %d U(AC) %d U(DC) %d A %d T %d D %d\n",
+ nvkm_trace(subdev, "P %d PWR %d U(AC) %d U(DC) %d A %d T %d°C D %d\n",
clk->pstate, clk->pwrsrc, clk->ustate_ac, clk->ustate_dc,
- clk->astate, clk->tstate, clk->dstate);
+ clk->astate, clk->temp, clk->dstate);
pstate = clk->pwrsrc ? clk->ustate_ac : clk->ustate_dc;
if (clk->state_nr && pstate != -1) {
pstate = (pstate < 0) ? clk->astate : pstate;
- pstate = min(pstate, clk->state_nr - 1 + clk->tstate);
+ pstate = min(pstate, clk->state_nr - 1);
pstate = max(pstate, clk->dstate);
} else {
pstate = clk->pstate = -1;
@@ -316,7 +408,7 @@ nvkm_pstate_new(struct nvkm_clk *clk, int idx)
struct nvbios_cstepE cstepE;
struct nvbios_perfE perfE;
u8 ver, hdr, cnt, len;
- u16 data;
+ u32 data;
data = nvbios_perfEp(bios, idx, &ver, &hdr, &cnt, &len, &perfE);
if (!data)
@@ -448,13 +540,12 @@ nvkm_clk_astate(struct nvkm_clk *clk, int req, int rel, bool wait)
}
int
-nvkm_clk_tstate(struct nvkm_clk *clk, int req, int rel)
+nvkm_clk_tstate(struct nvkm_clk *clk, u8 temp)
{
- if (!rel) clk->tstate = req;
- if ( rel) clk->tstate += rel;
- clk->tstate = min(clk->tstate, 0);
- clk->tstate = max(clk->tstate, -(clk->state_nr - 1));
- return nvkm_pstate_calc(clk, true);
+ if (clk->temp == temp)
+ return 0;
+ clk->temp = temp;
+ return nvkm_pstate_calc(clk, false);
}
int
@@ -524,9 +615,9 @@ nvkm_clk_init(struct nvkm_subdev *subdev)
return clk->func->init(clk);
clk->astate = clk->state_nr - 1;
- clk->tstate = 0;
clk->dstate = 0;
clk->pstate = -1;
+ clk->temp = 90; /* reasonable default value */
nvkm_pstate_calc(clk, true);
return 0;
}
@@ -561,10 +652,22 @@ int
nvkm_clk_ctor(const struct nvkm_clk_func *func, struct nvkm_device *device,
int index, bool allow_reclock, struct nvkm_clk *clk)
{
+ struct nvkm_subdev *subdev = &clk->subdev;
+ struct nvkm_bios *bios = device->bios;
int ret, idx, arglen;
const char *mode;
+ struct nvbios_vpstate_header h;
+
+ nvkm_subdev_ctor(&nvkm_clk, device, index, subdev);
+
+ if (bios && !nvbios_vpstate_parse(bios, &h)) {
+ struct nvbios_vpstate_entry base, boost;
+ if (!nvbios_vpstate_entry(bios, &h, h.boost_id, &boost))
+ clk->boost_khz = boost.clock_mhz * 1000;
+ if (!nvbios_vpstate_entry(bios, &h, h.base_id, &base))
+ clk->base_khz = base.clock_mhz * 1000;
+ }
- nvkm_subdev_ctor(&nvkm_clk, device, index, &clk->subdev);
clk->func = func;
INIT_LIST_HEAD(&clk->states);
clk->domains = func->domains;
@@ -607,6 +710,8 @@ nvkm_clk_ctor(const struct nvkm_clk_func *func, struct nvkm_device *device,
if (mode)
clk->ustate_dc = nvkm_clk_nstate(clk, mode, arglen);
+ clk->boost_mode = nvkm_longopt(device->cfgopt, "NvBoost",
+ NVKM_CLK_BOOST_NONE);
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gf100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gf100.c
index 89d5543118cf..7f67f9f5a550 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gf100.c
@@ -457,7 +457,7 @@ gf100_clk = {
{ nv_clk_src_hubk06 , 0x00 },
{ nv_clk_src_hubk01 , 0x01 },
{ nv_clk_src_copy , 0x02 },
- { nv_clk_src_gpc , 0x03, 0, "core", 2000 },
+ { nv_clk_src_gpc , 0x03, NVKM_CLK_DOM_FLAG_VPSTATE, "core", 2000 },
{ nv_clk_src_rop , 0x04 },
{ nv_clk_src_mem , 0x05, 0, "memory", 1000 },
{ nv_clk_src_vdec , 0x06 },
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk104.c b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk104.c
index 06bc0d2d6ae1..0b37e3da7feb 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk104.c
@@ -491,7 +491,7 @@ gk104_clk = {
.domains = {
{ nv_clk_src_crystal, 0xff },
{ nv_clk_src_href , 0xff },
- { nv_clk_src_gpc , 0x00, NVKM_CLK_DOM_FLAG_CORE, "core", 2000 },
+ { nv_clk_src_gpc , 0x00, NVKM_CLK_DOM_FLAG_CORE | NVKM_CLK_DOM_FLAG_VPSTATE, "core", 2000 },
{ nv_clk_src_hubk07 , 0x01, NVKM_CLK_DOM_FLAG_CORE },
{ nv_clk_src_rop , 0x02, NVKM_CLK_DOM_FLAG_CORE },
{ nv_clk_src_mem , 0x03, 0, "memory", 500 },
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gt215.c b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gt215.c
index 056702ef69aa..96e0941c8edd 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gt215.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gt215.c
@@ -180,7 +180,7 @@ gt215_clk_read(struct nvkm_clk *base, enum nv_clk_src src)
return 0;
}
-int
+static int
gt215_clk_info(struct nvkm_clk *base, int idx, u32 khz,
struct gt215_clk_info *info)
{
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gm200.c b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gm200.c
index a410c0db8a08..1730371933df 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gm200.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gm200.c
@@ -26,6 +26,7 @@
#include <subdev/bios.h>
#include <subdev/bios/bit.h>
#include <subdev/bios/pmu.h>
+#include <subdev/timer.h>
static void
pmu_code(struct nv50_devinit *init, u32 pmu, u32 img, u32 len, bool sec)
@@ -123,21 +124,13 @@ gm200_devinit_post(struct nvkm_devinit *base, bool post)
return -EINVAL;
}
- /* reset PMU and load init table parser ucode */
- if (post) {
- nvkm_mask(device, 0x000200, 0x00002000, 0x00000000);
- nvkm_mask(device, 0x000200, 0x00002000, 0x00002000);
- nvkm_rd32(device, 0x000200);
- while (nvkm_rd32(device, 0x10a10c) & 0x00000006) {
- }
- }
-
ret = pmu_load(init, 0x04, post, &exec, &args);
if (ret)
return ret;
/* upload first chunk of init data */
if (post) {
+ // devinit tables
u32 pmu = pmu_args(init, args + 0x08, 0x08);
u32 img = nvbios_rd16(bios, bit_I.offset + 0x14);
u32 len = nvbios_rd16(bios, bit_I.offset + 0x16);
@@ -146,6 +139,7 @@ gm200_devinit_post(struct nvkm_devinit *base, bool post)
/* upload second chunk of init data */
if (post) {
+ // devinit boot scripts
u32 pmu = pmu_args(init, args + 0x08, 0x10);
u32 img = nvbios_rd16(bios, bit_I.offset + 0x18);
u32 len = nvbios_rd16(bios, bit_I.offset + 0x1a);
@@ -156,8 +150,11 @@ gm200_devinit_post(struct nvkm_devinit *base, bool post)
if (post) {
nvkm_wr32(device, 0x10a040, 0x00005000);
pmu_exec(init, exec);
- while (!(nvkm_rd32(device, 0x10a040) & 0x00002000)) {
- }
+ if (nvkm_msec(device, 2000,
+ if (nvkm_rd32(device, 0x10a040) & 0x00002000)
+ break;
+ ) < 0)
+ return -ETIMEDOUT;
}
/* load and execute some other ucode image (bios therm?) */
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/Kbuild
index edcc157e6ac8..63566ba12fbb 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/Kbuild
@@ -24,8 +24,9 @@ nvkm-y += nvkm/subdev/fb/gk104.o
nvkm-y += nvkm/subdev/fb/gk20a.o
nvkm-y += nvkm/subdev/fb/gm107.o
nvkm-y += nvkm/subdev/fb/gm200.o
+nvkm-y += nvkm/subdev/fb/gm20b.o
nvkm-y += nvkm/subdev/fb/gp100.o
-nvkm-y += nvkm/subdev/fb/gp104.o
+nvkm-y += nvkm/subdev/fb/gp102.o
nvkm-y += nvkm/subdev/fb/ram.o
nvkm-y += nvkm/subdev/fb/ramnv04.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gf100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gf100.c
index 76433cc66fff..3841ad6be99e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gf100.c
@@ -50,24 +50,33 @@ gf100_fb_intr(struct nvkm_fb *base)
}
int
-gf100_fb_oneinit(struct nvkm_fb *fb)
+gf100_fb_oneinit(struct nvkm_fb *base)
{
- struct nvkm_device *device = fb->subdev.device;
+ struct gf100_fb *fb = gf100_fb(base);
+ struct nvkm_device *device = fb->base.subdev.device;
int ret, size = 0x1000;
size = nvkm_longopt(device->cfgopt, "MmuDebugBufferSize", size);
size = min(size, 0x1000);
ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, size, 0x1000,
- false, &fb->mmu_rd);
+ false, &fb->base.mmu_rd);
if (ret)
return ret;
ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, size, 0x1000,
- false, &fb->mmu_wr);
+ false, &fb->base.mmu_wr);
if (ret)
return ret;
+ fb->r100c10_page = alloc_page(GFP_KERNEL | __GFP_ZERO);
+ if (fb->r100c10_page) {
+ fb->r100c10 = dma_map_page(device->dev, fb->r100c10_page, 0,
+ PAGE_SIZE, DMA_BIDIRECTIONAL);
+ if (dma_mapping_error(device->dev, fb->r100c10))
+ return -EFAULT;
+ }
+
return 0;
}
@@ -123,14 +132,6 @@ gf100_fb_new_(const struct nvkm_fb_func *func, struct nvkm_device *device,
nvkm_fb_ctor(func, device, index, &fb->base);
*pfb = &fb->base;
- fb->r100c10_page = alloc_page(GFP_KERNEL | __GFP_ZERO);
- if (fb->r100c10_page) {
- fb->r100c10 = dma_map_page(device->dev, fb->r100c10_page, 0,
- PAGE_SIZE, DMA_BIDIRECTIONAL);
- if (dma_mapping_error(device->dev, fb->r100c10))
- return -EFAULT;
- }
-
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gf100.h b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gf100.h
index 449f431644b3..412eb89834e8 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gf100.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gf100.h
@@ -16,4 +16,8 @@ void gf100_fb_init(struct nvkm_fb *);
void gf100_fb_intr(struct nvkm_fb *);
void gp100_fb_init(struct nvkm_fb *);
+
+void gm200_fb_init_page(struct nvkm_fb *fb);
+void gm200_fb_init(struct nvkm_fb *base);
+
#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gk20a.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gk20a.c
index f815fe2bbf08..5d34d6136616 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gk20a.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gk20a.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2014, NVIDIA CORPORATION. All rights reserved.
+ * Copyright (c) 2014-2016, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -20,27 +20,21 @@
* DEALINGS IN THE SOFTWARE.
*/
#include "priv.h"
+#include "gf100.h"
-#include <core/memory.h>
-
-static void
-gk20a_fb_init(struct nvkm_fb *fb)
-{
- struct nvkm_device *device = fb->subdev.device;
- nvkm_wr32(device, 0x100cc8, nvkm_memory_addr(fb->mmu_wr) >> 8);
- nvkm_wr32(device, 0x100ccc, nvkm_memory_addr(fb->mmu_rd) >> 8);
-}
-
+/* GK20A's FB is similar to GF100's, but without the ability to allocate VRAM */
static const struct nvkm_fb_func
gk20a_fb = {
+ .dtor = gf100_fb_dtor,
.oneinit = gf100_fb_oneinit,
- .init = gk20a_fb_init,
+ .init = gf100_fb_init,
.init_page = gf100_fb_init_page,
+ .intr = gf100_fb_intr,
.memtype_valid = gf100_fb_memtype_valid,
};
int
gk20a_fb_new(struct nvkm_device *device, int index, struct nvkm_fb **pfb)
{
- return nvkm_fb_new_(&gk20a_fb, device, index, pfb);
+ return gf100_fb_new_(&gk20a_fb, device, index, pfb);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gm200.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gm200.c
index 62f653240be3..fe5886013ac0 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gm200.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gm200.c
@@ -44,7 +44,7 @@ gm200_fb_init_page(struct nvkm_fb *fb)
}
}
-static void
+void
gm200_fb_init(struct nvkm_fb *base)
{
struct gf100_fb *fb = gf100_fb(base);
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gm20b.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gm20b.c
new file mode 100644
index 000000000000..b87c233bcd6d
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gm20b.c
@@ -0,0 +1,40 @@
+/*
+ * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+#include "priv.h"
+#include "gf100.h"
+
+/* GM20B's FB is similar to GM200, but without the ability to allocate VRAM */
+static const struct nvkm_fb_func
+gm20b_fb = {
+ .dtor = gf100_fb_dtor,
+ .oneinit = gf100_fb_oneinit,
+ .init = gm200_fb_init,
+ .init_page = gm200_fb_init_page,
+ .intr = gf100_fb_intr,
+ .memtype_valid = gf100_fb_memtype_valid,
+};
+
+int
+gm20b_fb_new(struct nvkm_device *device, int index, struct nvkm_fb **pfb)
+{
+ return gf100_fb_new_(&gm20b_fb, device, index, pfb);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gp104.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gp102.c
index 92cb71861bec..73b4ae1c73dc 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gp104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gp102.c
@@ -27,7 +27,7 @@
#include <core/memory.h>
static const struct nvkm_fb_func
-gp104_fb = {
+gp102_fb = {
.dtor = gf100_fb_dtor,
.oneinit = gf100_fb_oneinit,
.init = gp100_fb_init,
@@ -37,7 +37,7 @@ gp104_fb = {
};
int
-gp104_fb_new(struct nvkm_device *device, int index, struct nvkm_fb **pfb)
+gp102_fb_new(struct nvkm_device *device, int index, struct nvkm_fb **pfb)
{
- return gf100_fb_new_(&gp104_fb, device, index, pfb);
+ return gf100_fb_new_(&gp102_fb, device, index, pfb);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv50.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv50.c
index 1b5fb02eab2a..0595e0722bfc 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv50.c
@@ -210,6 +210,23 @@ nv50_fb_intr(struct nvkm_fb *base)
nvkm_fifo_chan_put(fifo, flags, &chan);
}
+static int
+nv50_fb_oneinit(struct nvkm_fb *base)
+{
+ struct nv50_fb *fb = nv50_fb(base);
+ struct nvkm_device *device = fb->base.subdev.device;
+
+ fb->r100c08_page = alloc_page(GFP_KERNEL | __GFP_ZERO);
+ if (fb->r100c08_page) {
+ fb->r100c08 = dma_map_page(device->dev, fb->r100c08_page, 0,
+ PAGE_SIZE, DMA_BIDIRECTIONAL);
+ if (dma_mapping_error(device->dev, fb->r100c08))
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
static void
nv50_fb_init(struct nvkm_fb *base)
{
@@ -245,6 +262,7 @@ nv50_fb_dtor(struct nvkm_fb *base)
static const struct nvkm_fb_func
nv50_fb_ = {
.dtor = nv50_fb_dtor,
+ .oneinit = nv50_fb_oneinit,
.init = nv50_fb_init,
.intr = nv50_fb_intr,
.ram_new = nv50_fb_ram_new,
@@ -263,16 +281,6 @@ nv50_fb_new_(const struct nv50_fb_func *func, struct nvkm_device *device,
fb->func = func;
*pfb = &fb->base;
- fb->r100c08_page = alloc_page(GFP_KERNEL | __GFP_ZERO);
- if (fb->r100c08_page) {
- fb->r100c08 = dma_map_page(device->dev, fb->r100c08_page, 0,
- PAGE_SIZE, DMA_BIDIRECTIONAL);
- if (dma_mapping_error(device->dev, fb->r100c08))
- return -EFAULT;
- } else {
- nvkm_warn(&fb->base.subdev, "failed 100c08 page alloc\n");
- }
-
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ram.h b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ram.h
index b9ec0ae6723a..b60068b7d8f9 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ram.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ram.h
@@ -24,6 +24,7 @@ int gf100_ram_ctor(const struct nvkm_ram_func *, struct nvkm_fb *,
int gf100_ram_get(struct nvkm_ram *, u64, u32, u32, u32, struct nvkm_mem **);
void gf100_ram_put(struct nvkm_ram *, struct nvkm_mem **);
+int gk104_ram_ctor(struct nvkm_fb *, struct nvkm_ram **, u32);
int gk104_ram_init(struct nvkm_ram *ram);
/* RAM type-specific MR calculation routines */
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgf100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgf100.c
index 772425ca5a9e..093223d1df4f 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgf100.c
@@ -420,8 +420,6 @@ gf100_ram_tidy(struct nvkm_ram *base)
ram_exec(&ram->fuc, false);
}
-extern const u8 gf100_pte_storage_type_map[256];
-
void
gf100_ram_put(struct nvkm_ram *ram, struct nvkm_mem **pmem)
{
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgk104.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgk104.c
index 1fa3ade468ae..7904fa41acef 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgk104.c
@@ -259,7 +259,9 @@ gk104_ram_calc_gddr5(struct gk104_ram *ram, u32 freq)
ram_mask(fuc, 0x10f808, 0x40000000, 0x40000000);
ram_block(fuc);
- ram_wr32(fuc, 0x62c000, 0x0f0f0000);
+
+ if (nvkm_device_engine(ram->base.fb->subdev.device, NVKM_ENGINE_DISP))
+ ram_wr32(fuc, 0x62c000, 0x0f0f0000);
/* MR1: turn termination on early, for some reason.. */
if ((ram->base.mr[1] & 0x03c) != 0x030) {
@@ -658,7 +660,9 @@ gk104_ram_calc_gddr5(struct gk104_ram *ram, u32 freq)
gk104_ram_train(fuc, 0x80020000, 0x01000000);
ram_unblock(fuc);
- ram_wr32(fuc, 0x62c000, 0x0f0f0f00);
+
+ if (nvkm_device_engine(ram->base.fb->subdev.device, NVKM_ENGINE_DISP))
+ ram_wr32(fuc, 0x62c000, 0x0f0f0f00);
if (next->bios.rammap_11_08_01)
data = 0x00000800;
@@ -706,7 +710,9 @@ gk104_ram_calc_sddr3(struct gk104_ram *ram, u32 freq)
ram_mask(fuc, 0x10f808, 0x40000000, 0x40000000);
ram_block(fuc);
- ram_wr32(fuc, 0x62c000, 0x0f0f0000);
+
+ if (nvkm_device_engine(ram->base.fb->subdev.device, NVKM_ENGINE_DISP))
+ ram_wr32(fuc, 0x62c000, 0x0f0f0000);
if (vc == 1 && ram_have(fuc, gpio2E)) {
u32 temp = ram_mask(fuc, gpio2E, 0x3000, fuc->r_func2E[1]);
@@ -936,7 +942,9 @@ gk104_ram_calc_sddr3(struct gk104_ram *ram, u32 freq)
ram_nsec(fuc, 1000);
ram_unblock(fuc);
- ram_wr32(fuc, 0x62c000, 0x0f0f0f00);
+
+ if (nvkm_device_engine(ram->base.fb->subdev.device, NVKM_ENGINE_DISP))
+ ram_wr32(fuc, 0x62c000, 0x0f0f0f00);
if (next->bios.rammap_11_08_01)
data = 0x00000800;
@@ -1530,6 +1538,12 @@ gk104_ram_func = {
int
gk104_ram_new(struct nvkm_fb *fb, struct nvkm_ram **pram)
{
+ return gk104_ram_ctor(fb, pram, 0x022554);
+}
+
+int
+gk104_ram_ctor(struct nvkm_fb *fb, struct nvkm_ram **pram, u32 maskaddr)
+{
struct nvkm_subdev *subdev = &fb->subdev;
struct nvkm_device *device = subdev->device;
struct nvkm_bios *bios = device->bios;
@@ -1544,7 +1558,7 @@ gk104_ram_new(struct nvkm_fb *fb, struct nvkm_ram **pram)
return -ENOMEM;
*pram = &ram->base;
- ret = gf100_ram_ctor(&gk104_ram_func, fb, 0x022554, &ram->base);
+ ret = gf100_ram_ctor(&gk104_ram_func, fb, maskaddr, &ram->base);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgm107.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgm107.c
index 43d807f6ca71..ac862d1d77bd 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgm107.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgm107.c
@@ -23,18 +23,8 @@
*/
#include "ram.h"
-static const struct nvkm_ram_func
-gm107_ram_func = {
- .init = gk104_ram_init,
- .get = gf100_ram_get,
- .put = gf100_ram_put,
-};
-
int
gm107_ram_new(struct nvkm_fb *fb, struct nvkm_ram **pram)
{
- if (!(*pram = kzalloc(sizeof(**pram), GFP_KERNEL)))
- return -ENOMEM;
-
- return gf100_ram_ctor(&gm107_ram_func, fb, 0x021c14, *pram);
+ return gk104_ram_ctor(fb, pram, 0x021c14);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgp100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgp100.c
index f3be408b5e5e..405faabe8dcd 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgp100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgp100.c
@@ -92,13 +92,13 @@ gp100_ram_new(struct nvkm_fb *fb, struct nvkm_ram **pram)
enum nvkm_ram_type type = nvkm_fb_bios_memtype(device->bios);
const u32 rsvd_head = ( 256 * 1024); /* vga memory */
const u32 rsvd_tail = (1024 * 1024); /* vbios etc */
- u32 fbpa_num = nvkm_rd32(device, 0x022438), fbpa;
+ u32 fbpa_num = nvkm_rd32(device, 0x02243c), fbpa;
u32 fbio_opt = nvkm_rd32(device, 0x021c14);
u64 part, size = 0, comm = ~0ULL;
bool mixed = false;
int ret;
- nvkm_debug(subdev, "022438: %08x\n", fbpa_num);
+ nvkm_debug(subdev, "02243c: %08x\n", fbpa_num);
nvkm_debug(subdev, "021c14: %08x\n", fbio_opt);
for (fbpa = 0; fbpa < fbpa_num; fbpa++) {
if (!(fbio_opt & (1 << fbpa))) {
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgt215.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgt215.c
index d15ea886df27..f10664372161 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgt215.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgt215.c
@@ -95,7 +95,7 @@ struct gt215_ram {
struct gt215_ltrain ltrain;
};
-void
+static void
gt215_link_train_calc(u32 *vals, struct gt215_ltrain *train)
{
int i, lo, hi;
@@ -149,7 +149,7 @@ gt215_link_train_calc(u32 *vals, struct gt215_ltrain *train)
/*
* Link training for (at least) DDR3
*/
-int
+static int
gt215_link_train(struct gt215_ram *ram)
{
struct gt215_ltrain *train = &ram->ltrain;
@@ -267,7 +267,7 @@ out:
return ret;
}
-int
+static int
gt215_link_train_init(struct gt215_ram *ram)
{
static const u32 pattern[16] = {
@@ -333,7 +333,7 @@ gt215_link_train_init(struct gt215_ram *ram)
return 0;
}
-void
+static void
gt215_link_train_fini(struct gt215_ram *ram)
{
if (ram->ltrain.mem)
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/sddr2.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/sddr2.c
index b9f1ffdfc602..4dcd8742f2da 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/sddr2.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/sddr2.c
@@ -23,6 +23,7 @@
* Ben Skeggs
*/
#include "priv.h"
+#include "ram.h"
struct ramxlat {
int id;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/sddr3.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/sddr3.c
index 26900333b1d6..eca8a445eab3 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/sddr3.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/sddr3.c
@@ -23,6 +23,7 @@
* Roy Spliet <rspliet@eclipso.eu>
*/
#include "priv.h"
+#include "ram.h"
struct ramxlat {
int id;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/gk104.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/gk104.c
index 3f45afd17d5a..2ead515b8530 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/gk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/gk104.c
@@ -37,7 +37,7 @@ gk104_gpio_intr_stat(struct nvkm_gpio *gpio, u32 *hi, u32 *lo)
nvkm_wr32(device, 0x00dc80, intr1);
}
-void
+static void
gk104_gpio_intr_mask(struct nvkm_gpio *gpio, u32 type, u32 mask, u32 data)
{
struct nvkm_device *device = gpio->subdev.device;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.c b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.c
index f0851d57df2f..01d5c5a56e2e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.c
@@ -74,7 +74,7 @@ nvkm_i2c_aux_i2c_func(struct i2c_adapter *adap)
return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
}
-const struct i2c_algorithm
+static const struct i2c_algorithm
nvkm_i2c_aux_i2c_algo = {
.master_xfer = nvkm_i2c_aux_i2c_xfer,
.functionality = nvkm_i2c_aux_i2c_func
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxg94.c b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxg94.c
index 954f5b76bfcf..b80236a4eeac 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxg94.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxg94.c
@@ -79,7 +79,7 @@ g94_i2c_aux_xfer(struct nvkm_i2c_aux *obj, bool retry,
struct g94_i2c_aux *aux = g94_i2c_aux(obj);
struct nvkm_device *device = aux->base.pad->i2c->subdev.device;
const u32 base = aux->ch * 0x50;
- u32 ctrl, stat, timeout, retries;
+ u32 ctrl, stat, timeout, retries = 0;
u32 xbuf[4] = {};
int ret, i;
@@ -111,7 +111,7 @@ g94_i2c_aux_xfer(struct nvkm_i2c_aux *obj, bool retry,
nvkm_wr32(device, 0x00e4e0 + base, addr);
/* (maybe) retry transaction a number of times on failure... */
- for (retries = 0; !ret && retries < 32; retries++) {
+ do {
/* reset, and delay a while if this is a retry */
nvkm_wr32(device, 0x00e4e4 + base, 0x80000000 | ctrl);
nvkm_wr32(device, 0x00e4e4 + base, 0x00000000 | ctrl);
@@ -131,20 +131,20 @@ g94_i2c_aux_xfer(struct nvkm_i2c_aux *obj, bool retry,
goto out;
}
} while (ctrl & 0x00010000);
- ret = 1;
+ ret = 0;
/* read status, and check if transaction completed ok */
stat = nvkm_mask(device, 0x00e4e8 + base, 0, 0);
if ((stat & 0x000f0000) == 0x00080000 ||
(stat & 0x000f0000) == 0x00020000)
- ret = retry ? 0 : 1;
+ ret = 1;
if ((stat & 0x00000100))
ret = -ETIMEDOUT;
if ((stat & 0x00000e00))
ret = -EIO;
AUX_TRACE(&aux->base, "%02d %08x %08x", retries, ctrl, stat);
- }
+ } while (ret && retry && retries++ < 32);
if (type & 1) {
for (i = 0; i < 16; i += 4) {
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxgm200.c b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxgm200.c
index 61d729b82c69..ed458c7f056b 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxgm200.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxgm200.c
@@ -79,7 +79,7 @@ gm200_i2c_aux_xfer(struct nvkm_i2c_aux *obj, bool retry,
struct gm200_i2c_aux *aux = gm200_i2c_aux(obj);
struct nvkm_device *device = aux->base.pad->i2c->subdev.device;
const u32 base = aux->ch * 0x50;
- u32 ctrl, stat, timeout, retries;
+ u32 ctrl, stat, timeout, retries = 0;
u32 xbuf[4] = {};
int ret, i;
@@ -111,7 +111,7 @@ gm200_i2c_aux_xfer(struct nvkm_i2c_aux *obj, bool retry,
nvkm_wr32(device, 0x00d950 + base, addr);
/* (maybe) retry transaction a number of times on failure... */
- for (retries = 0; !ret && retries < 32; retries++) {
+ do {
/* reset, and delay a while if this is a retry */
nvkm_wr32(device, 0x00d954 + base, 0x80000000 | ctrl);
nvkm_wr32(device, 0x00d954 + base, 0x00000000 | ctrl);
@@ -131,20 +131,20 @@ gm200_i2c_aux_xfer(struct nvkm_i2c_aux *obj, bool retry,
goto out;
}
} while (ctrl & 0x00010000);
- ret = 1;
+ ret = 0;
/* read status, and check if transaction completed ok */
stat = nvkm_mask(device, 0x00d958 + base, 0, 0);
if ((stat & 0x000f0000) == 0x00080000 ||
(stat & 0x000f0000) == 0x00020000)
- ret = retry ? 0 : 1;
+ ret = 1;
if ((stat & 0x00000100))
ret = -ETIMEDOUT;
if ((stat & 0x00000e00))
ret = -EIO;
AUX_TRACE(&aux->base, "%02d %08x %08x", retries, ctrl, stat);
- }
+ } while (ret && retry && retries++ < 32);
if (type & 1) {
for (i = 0; i < 16; i += 4) {
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gk20a.c b/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gk20a.c
index b7159b338fac..1a4ab825852c 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gk20a.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gk20a.c
@@ -29,7 +29,7 @@ gk20a_ibus_init_ibus_ring(struct nvkm_subdev *ibus)
nvkm_mask(device, 0x137250, 0x3f, 0);
nvkm_mask(device, 0x000200, 0x20, 0);
- usleep_range(20, 30);
+ udelay(20);
nvkm_mask(device, 0x000200, 0x20, 0x20);
nvkm_wr32(device, 0x12004c, 0x4);
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/iccsense/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/iccsense/base.c
index 41bd5d0f7692..f0af2a381eea 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/iccsense/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/iccsense/base.c
@@ -96,60 +96,12 @@ nvkm_iccsense_ina3221_read(struct nvkm_iccsense *iccsense,
}
static void
-nvkm_iccsense_ina209_config(struct nvkm_iccsense *iccsense,
- struct nvkm_iccsense_sensor *sensor)
-{
- struct nvkm_subdev *subdev = &iccsense->subdev;
- /* configuration:
- * 0x0007: 0x0007 shunt and bus continous
- * 0x0078: 0x0078 128 samples shunt
- * 0x0780: 0x0780 128 samples bus
- * 0x1800: 0x0000 +-40 mV shunt range
- * 0x2000: 0x0000 16V FSR
- */
- u16 value = 0x07ff;
- nvkm_debug(subdev, "config for sensor id %i: 0x%x\n", sensor->id, value);
- nv_wr16i2cr(sensor->i2c, sensor->addr, 0x00, value);
-}
-
-static void
-nvkm_iccsense_ina3221_config(struct nvkm_iccsense *iccsense,
- struct nvkm_iccsense_sensor *sensor)
-{
- struct nvkm_subdev *subdev = &iccsense->subdev;
- /* configuration:
- * 0x0007: 0x0007 shunt and bus continous
- * 0x0031: 0x0000 140 us conversion time shunt
- * 0x01c0: 0x0000 140 us conversion time bus
- * 0x0f00: 0x0f00 1024 samples
- * 0x7000: 0x?000 channels
- */
- u16 value = 0x0e07;
- if (sensor->rail_mask & 0x1)
- value |= 0x1 << 14;
- if (sensor->rail_mask & 0x2)
- value |= 0x1 << 13;
- if (sensor->rail_mask & 0x4)
- value |= 0x1 << 12;
- nvkm_debug(subdev, "config for sensor id %i: 0x%x\n", sensor->id, value);
- nv_wr16i2cr(sensor->i2c, sensor->addr, 0x00, value);
-}
-
-static void
nvkm_iccsense_sensor_config(struct nvkm_iccsense *iccsense,
struct nvkm_iccsense_sensor *sensor)
{
- switch (sensor->type) {
- case NVBIOS_EXTDEV_INA209:
- case NVBIOS_EXTDEV_INA219:
- nvkm_iccsense_ina209_config(iccsense, sensor);
- break;
- case NVBIOS_EXTDEV_INA3221:
- nvkm_iccsense_ina3221_config(iccsense, sensor);
- break;
- default:
- break;
- }
+ struct nvkm_subdev *subdev = &iccsense->subdev;
+ nvkm_trace(subdev, "write config of extdev %i: 0x%04x\n", sensor->id, sensor->config);
+ nv_wr16i2cr(sensor->i2c, sensor->addr, 0x00, sensor->config);
}
int
@@ -196,7 +148,6 @@ nvkm_iccsense_dtor(struct nvkm_subdev *subdev)
static struct nvkm_iccsense_sensor*
nvkm_iccsense_create_sensor(struct nvkm_iccsense *iccsense, u8 id)
{
-
struct nvkm_subdev *subdev = &iccsense->subdev;
struct nvkm_bios *bios = subdev->device->bios;
struct nvkm_i2c *i2c = subdev->device->i2c;
@@ -245,7 +196,7 @@ nvkm_iccsense_create_sensor(struct nvkm_iccsense *iccsense, u8 id)
sensor->type = extdev.type;
sensor->i2c = &i2c_bus->i2c;
sensor->addr = addr;
- sensor->rail_mask = 0x0;
+ sensor->config = 0x0;
return sensor;
}
@@ -273,48 +224,56 @@ nvkm_iccsense_oneinit(struct nvkm_subdev *subdev)
iccsense->data_valid = true;
for (i = 0; i < stbl.nr_entry; ++i) {
- struct pwr_rail_t *r = &stbl.rail[i];
- struct nvkm_iccsense_rail *rail;
+ struct pwr_rail_t *pwr_rail = &stbl.rail[i];
struct nvkm_iccsense_sensor *sensor;
- int (*read)(struct nvkm_iccsense *,
- struct nvkm_iccsense_rail *);
+ int r;
- if (!r->mode || r->resistor_mohm == 0)
+ if (pwr_rail->mode != 1 || !pwr_rail->resistor_count)
continue;
- sensor = nvkm_iccsense_get_sensor(iccsense, r->extdev_id);
+ sensor = nvkm_iccsense_get_sensor(iccsense, pwr_rail->extdev_id);
if (!sensor)
continue;
- switch (sensor->type) {
- case NVBIOS_EXTDEV_INA209:
- if (r->rail != 0)
- continue;
- read = nvkm_iccsense_ina209_read;
- break;
- case NVBIOS_EXTDEV_INA219:
- if (r->rail != 0)
+ if (!sensor->config)
+ sensor->config = pwr_rail->config;
+ else if (sensor->config != pwr_rail->config)
+ nvkm_error(subdev, "config mismatch found for extdev %i\n", pwr_rail->extdev_id);
+
+ for (r = 0; r < pwr_rail->resistor_count; ++r) {
+ struct nvkm_iccsense_rail *rail;
+ struct pwr_rail_resistor_t *res = &pwr_rail->resistors[r];
+ int (*read)(struct nvkm_iccsense *,
+ struct nvkm_iccsense_rail *);
+
+ if (!res->mohm || !res->enabled)
continue;
- read = nvkm_iccsense_ina219_read;
- break;
- case NVBIOS_EXTDEV_INA3221:
- if (r->rail >= 3)
+
+ switch (sensor->type) {
+ case NVBIOS_EXTDEV_INA209:
+ read = nvkm_iccsense_ina209_read;
+ break;
+ case NVBIOS_EXTDEV_INA219:
+ read = nvkm_iccsense_ina219_read;
+ break;
+ case NVBIOS_EXTDEV_INA3221:
+ read = nvkm_iccsense_ina3221_read;
+ break;
+ default:
continue;
- read = nvkm_iccsense_ina3221_read;
- break;
- default:
- continue;
+ }
+
+ rail = kmalloc(sizeof(*rail), GFP_KERNEL);
+ if (!rail)
+ return -ENOMEM;
+
+ rail->read = read;
+ rail->sensor = sensor;
+ rail->idx = r;
+ rail->mohm = res->mohm;
+ nvkm_debug(subdev, "create rail for extdev %i: { idx: %i, mohm: %i }\n", pwr_rail->extdev_id, r, rail->mohm);
+ list_add_tail(&rail->head, &iccsense->rails);
}
-
- rail = kmalloc(sizeof(*rail), GFP_KERNEL);
- if (!rail)
- return -ENOMEM;
- sensor->rail_mask |= 1 << r->rail;
- rail->read = read;
- rail->sensor = sensor;
- rail->idx = r->rail;
- rail->mohm = r->resistor_mohm;
- list_add_tail(&rail->head, &iccsense->rails);
}
return 0;
}
@@ -329,7 +288,8 @@ nvkm_iccsense_init(struct nvkm_subdev *subdev)
return 0;
}
-struct nvkm_subdev_func iccsense_func = {
+static const struct nvkm_subdev_func
+iccsense_func = {
.oneinit = nvkm_iccsense_oneinit,
.init = nvkm_iccsense_init,
.dtor = nvkm_iccsense_dtor,
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/iccsense/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/iccsense/priv.h
index b72c31d2f908..e90e0f6ed008 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/iccsense/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/iccsense/priv.h
@@ -10,7 +10,7 @@ struct nvkm_iccsense_sensor {
enum nvbios_extdev_type type;
struct i2c_adapter *i2c;
u8 addr;
- u8 rail_mask;
+ u16 config;
};
struct nvkm_iccsense_rail {
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/base.c
index 8ed8f65ff664..10c987a654ec 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/base.c
@@ -104,7 +104,7 @@ nvkm_instobj_dtor(struct nvkm_memory *memory)
return iobj;
}
-const struct nvkm_memory_func
+static const struct nvkm_memory_func
nvkm_instobj_func = {
.dtor = nvkm_instobj_dtor,
.target = nvkm_instobj_target,
@@ -156,7 +156,7 @@ nvkm_instobj_wr32_slow(struct nvkm_memory *memory, u64 offset, u32 data)
return nvkm_wo32(iobj->parent, offset, data);
}
-const struct nvkm_memory_func
+static const struct nvkm_memory_func
nvkm_instobj_func_slow = {
.dtor = nvkm_instobj_dtor,
.target = nvkm_instobj_target,
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/g84.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/g84.c
index c3d66ef5dc12..430a61c3df44 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/g84.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/g84.c
@@ -34,7 +34,7 @@ g84_mc_reset[] = {
{}
};
-const struct nvkm_mc_map
+static const struct nvkm_mc_map
g84_mc_intr[] = {
{ 0x04000000, NVKM_ENGINE_DISP },
{ 0x00020000, NVKM_ENGINE_VP },
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mxm/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mxm/base.c
index 21b65ee254e4..e3e2f5e83815 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mxm/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mxm/base.c
@@ -250,6 +250,10 @@ nvkm_mxm_new_(struct nvkm_device *device, int index, struct nvkm_mxm **pmxm)
}
nvkm_info(&mxm->subdev, "BIOS version %d.%d\n", ver >> 4, ver & 0x0f);
+ nvkm_debug(&mxm->subdev, "module flags: %02x\n",
+ nvbios_rd08(bios, data + 0x01));
+ nvkm_debug(&mxm->subdev, "config flags: %02x\n",
+ nvbios_rd08(bios, data + 0x02));
if (mxm_shadow(mxm, ver)) {
nvkm_warn(&mxm->subdev, "failed to locate valid SIS\n");
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mxm/mxms.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mxm/mxms.c
index 45a2f8e784f9..9abfa5e2fe9f 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mxm/mxms.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mxm/mxms.c
@@ -23,8 +23,8 @@
*/
#include "mxms.h"
-#define ROM16(x) le16_to_cpu(*(u16 *)&(x))
-#define ROM32(x) le32_to_cpu(*(u32 *)&(x))
+#define ROM16(x) get_unaligned_le16(&(x))
+#define ROM32(x) get_unaligned_le32(&(x))
static u8 *
mxms_data(struct nvkm_mxm *mxm)
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mxm/nv50.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mxm/nv50.c
index db14fad2ddfc..844971e5e874 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mxm/nv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mxm/nv50.c
@@ -190,8 +190,8 @@ mxm_dcb_sanitise(struct nvkm_mxm *mxm)
struct nvkm_bios *bios = subdev->device->bios;
u8 ver, hdr, cnt, len;
u16 dcb = dcb_table(bios, &ver, &hdr, &cnt, &len);
- if (dcb == 0x0000 || ver != 0x40) {
- nvkm_debug(subdev, "unsupported DCB version\n");
+ if (dcb == 0x0000 || (ver != 0x40 && ver != 0x41)) {
+ nvkm_warn(subdev, "unsupported DCB version\n");
return;
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/Kbuild
index 88b643b8664e..51fb4bf94a44 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/Kbuild
@@ -8,3 +8,5 @@ nvkm-y += nvkm/subdev/pmu/gk110.o
nvkm-y += nvkm/subdev/pmu/gk208.o
nvkm-y += nvkm/subdev/pmu/gk20a.o
nvkm-y += nvkm/subdev/pmu/gm107.o
+nvkm-y += nvkm/subdev/pmu/gp100.o
+nvkm-y += nvkm/subdev/pmu/gp102.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/base.c
index 8dd164d13043..e611ce80f8ef 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/base.c
@@ -32,225 +32,85 @@ nvkm_pmu_pgob(struct nvkm_pmu *pmu, bool enable)
pmu->func->pgob(pmu, enable);
}
-int
-nvkm_pmu_send(struct nvkm_pmu *pmu, u32 reply[2],
- u32 process, u32 message, u32 data0, u32 data1)
-{
- struct nvkm_subdev *subdev = &pmu->subdev;
- struct nvkm_device *device = subdev->device;
- u32 addr;
-
- mutex_lock(&subdev->mutex);
- /* wait for a free slot in the fifo */
- addr = nvkm_rd32(device, 0x10a4a0);
- if (nvkm_msec(device, 2000,
- u32 tmp = nvkm_rd32(device, 0x10a4b0);
- if (tmp != (addr ^ 8))
- break;
- ) < 0) {
- mutex_unlock(&subdev->mutex);
- return -EBUSY;
- }
-
- /* we currently only support a single process at a time waiting
- * on a synchronous reply, take the PMU mutex and tell the
- * receive handler what we're waiting for
- */
- if (reply) {
- pmu->recv.message = message;
- pmu->recv.process = process;
- }
-
- /* acquire data segment access */
- do {
- nvkm_wr32(device, 0x10a580, 0x00000001);
- } while (nvkm_rd32(device, 0x10a580) != 0x00000001);
-
- /* write the packet */
- nvkm_wr32(device, 0x10a1c0, 0x01000000 | (((addr & 0x07) << 4) +
- pmu->send.base));
- nvkm_wr32(device, 0x10a1c4, process);
- nvkm_wr32(device, 0x10a1c4, message);
- nvkm_wr32(device, 0x10a1c4, data0);
- nvkm_wr32(device, 0x10a1c4, data1);
- nvkm_wr32(device, 0x10a4a0, (addr + 1) & 0x0f);
-
- /* release data segment access */
- nvkm_wr32(device, 0x10a580, 0x00000000);
-
- /* wait for reply, if requested */
- if (reply) {
- wait_event(pmu->recv.wait, (pmu->recv.process == 0));
- reply[0] = pmu->recv.data[0];
- reply[1] = pmu->recv.data[1];
- }
-
- mutex_unlock(&subdev->mutex);
- return 0;
-}
-
static void
nvkm_pmu_recv(struct work_struct *work)
{
- struct nvkm_pmu *pmu = container_of(work, struct nvkm_pmu, recv.work);
- struct nvkm_subdev *subdev = &pmu->subdev;
- struct nvkm_device *device = subdev->device;
- u32 process, message, data0, data1;
-
- /* nothing to do if GET == PUT */
- u32 addr = nvkm_rd32(device, 0x10a4cc);
- if (addr == nvkm_rd32(device, 0x10a4c8))
- return;
-
- /* acquire data segment access */
- do {
- nvkm_wr32(device, 0x10a580, 0x00000002);
- } while (nvkm_rd32(device, 0x10a580) != 0x00000002);
-
- /* read the packet */
- nvkm_wr32(device, 0x10a1c0, 0x02000000 | (((addr & 0x07) << 4) +
- pmu->recv.base));
- process = nvkm_rd32(device, 0x10a1c4);
- message = nvkm_rd32(device, 0x10a1c4);
- data0 = nvkm_rd32(device, 0x10a1c4);
- data1 = nvkm_rd32(device, 0x10a1c4);
- nvkm_wr32(device, 0x10a4cc, (addr + 1) & 0x0f);
-
- /* release data segment access */
- nvkm_wr32(device, 0x10a580, 0x00000000);
-
- /* wake process if it's waiting on a synchronous reply */
- if (pmu->recv.process) {
- if (process == pmu->recv.process &&
- message == pmu->recv.message) {
- pmu->recv.data[0] = data0;
- pmu->recv.data[1] = data1;
- pmu->recv.process = 0;
- wake_up(&pmu->recv.wait);
- return;
- }
- }
+ struct nvkm_pmu *pmu = container_of(work, typeof(*pmu), recv.work);
+ return pmu->func->recv(pmu);
+}
- /* right now there's no other expected responses from the engine,
- * so assume that any unexpected message is an error.
- */
- nvkm_warn(subdev, "%c%c%c%c %08x %08x %08x %08x\n",
- (char)((process & 0x000000ff) >> 0),
- (char)((process & 0x0000ff00) >> 8),
- (char)((process & 0x00ff0000) >> 16),
- (char)((process & 0xff000000) >> 24),
- process, message, data0, data1);
+int
+nvkm_pmu_send(struct nvkm_pmu *pmu, u32 reply[2],
+ u32 process, u32 message, u32 data0, u32 data1)
+{
+ if (!pmu || !pmu->func->send)
+ return -ENODEV;
+ return pmu->func->send(pmu, reply, process, message, data0, data1);
}
static void
nvkm_pmu_intr(struct nvkm_subdev *subdev)
{
struct nvkm_pmu *pmu = nvkm_pmu(subdev);
- struct nvkm_device *device = pmu->subdev.device;
- u32 disp = nvkm_rd32(device, 0x10a01c);
- u32 intr = nvkm_rd32(device, 0x10a008) & disp & ~(disp >> 16);
-
- if (intr & 0x00000020) {
- u32 stat = nvkm_rd32(device, 0x10a16c);
- if (stat & 0x80000000) {
- nvkm_error(subdev, "UAS fault at %06x addr %08x\n",
- stat & 0x00ffffff,
- nvkm_rd32(device, 0x10a168));
- nvkm_wr32(device, 0x10a16c, 0x00000000);
- intr &= ~0x00000020;
- }
- }
-
- if (intr & 0x00000040) {
- schedule_work(&pmu->recv.work);
- nvkm_wr32(device, 0x10a004, 0x00000040);
- intr &= ~0x00000040;
- }
-
- if (intr & 0x00000080) {
- nvkm_info(subdev, "wr32 %06x %08x\n",
- nvkm_rd32(device, 0x10a7a0),
- nvkm_rd32(device, 0x10a7a4));
- nvkm_wr32(device, 0x10a004, 0x00000080);
- intr &= ~0x00000080;
- }
-
- if (intr) {
- nvkm_error(subdev, "intr %08x\n", intr);
- nvkm_wr32(device, 0x10a004, intr);
- }
+ if (!pmu->func->intr)
+ return;
+ pmu->func->intr(pmu);
}
static int
nvkm_pmu_fini(struct nvkm_subdev *subdev, bool suspend)
{
struct nvkm_pmu *pmu = nvkm_pmu(subdev);
- struct nvkm_device *device = pmu->subdev.device;
- nvkm_wr32(device, 0x10a014, 0x00000060);
+ if (pmu->func->fini)
+ pmu->func->fini(pmu);
+
flush_work(&pmu->recv.work);
return 0;
}
static int
-nvkm_pmu_init(struct nvkm_subdev *subdev)
+nvkm_pmu_reset(struct nvkm_pmu *pmu)
{
- struct nvkm_pmu *pmu = nvkm_pmu(subdev);
struct nvkm_device *device = pmu->subdev.device;
- int i;
- /* prevent previous ucode from running, wait for idle, reset */
- nvkm_wr32(device, 0x10a014, 0x0000ffff); /* INTR_EN_CLR = ALL */
+ if (!(nvkm_rd32(device, 0x000200) & 0x00002000))
+ return 0;
+
+ /* Inhibit interrupts, and wait for idle. */
+ nvkm_wr32(device, 0x10a014, 0x0000ffff);
nvkm_msec(device, 2000,
if (!nvkm_rd32(device, 0x10a04c))
break;
);
- nvkm_mask(device, 0x000200, 0x00002000, 0x00000000);
- nvkm_mask(device, 0x000200, 0x00002000, 0x00002000);
- nvkm_rd32(device, 0x000200);
+
+ /* Reset. */
+ pmu->func->reset(pmu);
+
+ /* Wait for IMEM/DMEM scrubbing to be complete. */
nvkm_msec(device, 2000,
if (!(nvkm_rd32(device, 0x10a10c) & 0x00000006))
break;
);
- /* upload data segment */
- nvkm_wr32(device, 0x10a1c0, 0x01000000);
- for (i = 0; i < pmu->func->data.size / 4; i++)
- nvkm_wr32(device, 0x10a1c4, pmu->func->data.data[i]);
-
- /* upload code segment */
- nvkm_wr32(device, 0x10a180, 0x01000000);
- for (i = 0; i < pmu->func->code.size / 4; i++) {
- if ((i & 0x3f) == 0)
- nvkm_wr32(device, 0x10a188, i >> 6);
- nvkm_wr32(device, 0x10a184, pmu->func->code.data[i]);
- }
-
- /* start it running */
- nvkm_wr32(device, 0x10a10c, 0x00000000);
- nvkm_wr32(device, 0x10a104, 0x00000000);
- nvkm_wr32(device, 0x10a100, 0x00000002);
-
- /* wait for valid host->pmu ring configuration */
- if (nvkm_msec(device, 2000,
- if (nvkm_rd32(device, 0x10a4d0))
- break;
- ) < 0)
- return -EBUSY;
- pmu->send.base = nvkm_rd32(device, 0x10a4d0) & 0x0000ffff;
- pmu->send.size = nvkm_rd32(device, 0x10a4d0) >> 16;
+ return 0;
+}
- /* wait for valid pmu->host ring configuration */
- if (nvkm_msec(device, 2000,
- if (nvkm_rd32(device, 0x10a4dc))
- break;
- ) < 0)
- return -EBUSY;
- pmu->recv.base = nvkm_rd32(device, 0x10a4dc) & 0x0000ffff;
- pmu->recv.size = nvkm_rd32(device, 0x10a4dc) >> 16;
+static int
+nvkm_pmu_preinit(struct nvkm_subdev *subdev)
+{
+ struct nvkm_pmu *pmu = nvkm_pmu(subdev);
+ return nvkm_pmu_reset(pmu);
+}
- nvkm_wr32(device, 0x10a010, 0x000000e0);
- return 0;
+static int
+nvkm_pmu_init(struct nvkm_subdev *subdev)
+{
+ struct nvkm_pmu *pmu = nvkm_pmu(subdev);
+ int ret = nvkm_pmu_reset(pmu);
+ if (ret == 0 && pmu->func->init)
+ ret = pmu->func->init(pmu);
+ return ret;
}
static void *
@@ -262,6 +122,7 @@ nvkm_pmu_dtor(struct nvkm_subdev *subdev)
static const struct nvkm_subdev_func
nvkm_pmu = {
.dtor = nvkm_pmu_dtor,
+ .preinit = nvkm_pmu_preinit,
.init = nvkm_pmu_init,
.fini = nvkm_pmu_fini,
.intr = nvkm_pmu_intr,
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gf100.fuc3.h b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gf100.fuc3.h
index e2faccffee6f..0bcf0b307a61 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gf100.fuc3.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gf100.fuc3.h
@@ -1,4 +1,4 @@
-uint32_t gf100_pmu_data[] = {
+static uint32_t gf100_pmu_data[] = {
/* 0x0000: proc_kern */
0x52544e49,
0x00000000,
@@ -916,7 +916,7 @@ uint32_t gf100_pmu_data[] = {
0x00000000,
};
-uint32_t gf100_pmu_code[] = {
+static uint32_t gf100_pmu_code[] = {
0x03920ef5,
/* 0x0004: rd32 */
0x07a007f1,
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gf119.fuc4.h b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gf119.fuc4.h
index 2d5bdc539697..fe8905666c67 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gf119.fuc4.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gf119.fuc4.h
@@ -1,4 +1,4 @@
-uint32_t gf119_pmu_data[] = {
+static uint32_t gf119_pmu_data[] = {
/* 0x0000: proc_kern */
0x52544e49,
0x00000000,
@@ -915,7 +915,7 @@ uint32_t gf119_pmu_data[] = {
0x00000000,
};
-uint32_t gf119_pmu_code[] = {
+static uint32_t gf119_pmu_code[] = {
0x03410ef5,
/* 0x0004: rd32 */
0x07a007f1,
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gk208.fuc5.h b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gk208.fuc5.h
index 3c731ff12871..9cf4e6fc724e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gk208.fuc5.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gk208.fuc5.h
@@ -1,4 +1,4 @@
-uint32_t gk208_pmu_data[] = {
+static uint32_t gk208_pmu_data[] = {
/* 0x0000: proc_kern */
0x52544e49,
0x00000000,
@@ -915,7 +915,7 @@ uint32_t gk208_pmu_data[] = {
0x00000000,
};
-uint32_t gk208_pmu_code[] = {
+static uint32_t gk208_pmu_code[] = {
0x02f90ef5,
/* 0x0004: rd32 */
0xf607a040,
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gt215.fuc3.h b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gt215.fuc3.h
index e83341815ec6..5d692425b190 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gt215.fuc3.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gt215.fuc3.h
@@ -1,4 +1,4 @@
-uint32_t gt215_pmu_data[] = {
+static uint32_t gt215_pmu_data[] = {
/* 0x0000: proc_kern */
0x52544e49,
0x00000000,
@@ -916,7 +916,7 @@ uint32_t gt215_pmu_data[] = {
0x00000000,
};
-uint32_t gt215_pmu_code[] = {
+static uint32_t gt215_pmu_code[] = {
0x03920ef5,
/* 0x0004: rd32 */
0x07a007f1,
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gf100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gf100.c
index aeb8ccd891fc..0e36d4cb7201 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gf100.c
@@ -30,6 +30,12 @@ gf100_pmu = {
.code.size = sizeof(gf100_pmu_code),
.data.data = gf100_pmu_data,
.data.size = sizeof(gf100_pmu_data),
+ .reset = gt215_pmu_reset,
+ .init = gt215_pmu_init,
+ .fini = gt215_pmu_fini,
+ .intr = gt215_pmu_intr,
+ .send = gt215_pmu_send,
+ .recv = gt215_pmu_recv,
};
int
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gf119.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gf119.c
index fbc88d8ecd4d..0e4ba4248b15 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gf119.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gf119.c
@@ -30,6 +30,12 @@ gf119_pmu = {
.code.size = sizeof(gf119_pmu_code),
.data.data = gf119_pmu_data,
.data.size = sizeof(gf119_pmu_data),
+ .reset = gt215_pmu_reset,
+ .init = gt215_pmu_init,
+ .fini = gt215_pmu_fini,
+ .intr = gt215_pmu_intr,
+ .send = gt215_pmu_send,
+ .recv = gt215_pmu_recv,
};
int
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk104.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk104.c
index 86f9f3b13f71..2ad858d825ac 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk104.c
@@ -109,6 +109,12 @@ gk104_pmu = {
.code.size = sizeof(gk104_pmu_code),
.data.data = gk104_pmu_data,
.data.size = sizeof(gk104_pmu_data),
+ .reset = gt215_pmu_reset,
+ .init = gt215_pmu_init,
+ .fini = gt215_pmu_fini,
+ .intr = gt215_pmu_intr,
+ .send = gt215_pmu_send,
+ .recv = gt215_pmu_recv,
.pgob = gk104_pmu_pgob,
};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk110.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk110.c
index ae255247c9d1..fc4b8ecfdaeb 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk110.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk110.c
@@ -88,6 +88,12 @@ gk110_pmu = {
.code.size = sizeof(gk110_pmu_code),
.data.data = gk110_pmu_data,
.data.size = sizeof(gk110_pmu_data),
+ .reset = gt215_pmu_reset,
+ .init = gt215_pmu_init,
+ .fini = gt215_pmu_fini,
+ .intr = gt215_pmu_intr,
+ .send = gt215_pmu_send,
+ .recv = gt215_pmu_recv,
.pgob = gk110_pmu_pgob,
};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk208.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk208.c
index 3b4917637902..e9a91277683a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk208.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk208.c
@@ -30,6 +30,12 @@ gk208_pmu = {
.code.size = sizeof(gk208_pmu_code),
.data.data = gk208_pmu_data,
.data.size = sizeof(gk208_pmu_data),
+ .reset = gt215_pmu_reset,
+ .init = gt215_pmu_init,
+ .fini = gt215_pmu_fini,
+ .intr = gt215_pmu_intr,
+ .send = gt215_pmu_send,
+ .recv = gt215_pmu_recv,
.pgob = gk110_pmu_pgob,
};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gm107.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gm107.c
index 31b8692b4641..9a248ed75f09 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gm107.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gm107.c
@@ -32,6 +32,12 @@ gm107_pmu = {
.code.size = sizeof(gm107_pmu_code),
.data.data = gm107_pmu_data,
.data.size = sizeof(gm107_pmu_data),
+ .reset = gt215_pmu_reset,
+ .init = gt215_pmu_init,
+ .fini = gt215_pmu_fini,
+ .intr = gt215_pmu_intr,
+ .send = gt215_pmu_send,
+ .recv = gt215_pmu_recv,
};
int
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gp100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gp100.c
new file mode 100644
index 000000000000..6c41c20c85a7
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gp100.c
@@ -0,0 +1,35 @@
+/*
+ * Copyright 2016 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs <bskeggs@redhat.com>
+ */
+#include "priv.h"
+
+static const struct nvkm_pmu_func
+gp100_pmu = {
+ .reset = gt215_pmu_reset,
+};
+
+int
+gp100_pmu_new(struct nvkm_device *device, int index, struct nvkm_pmu **ppmu)
+{
+ return nvkm_pmu_new_(&gp100_pmu, device, index, ppmu);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gp102.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gp102.c
new file mode 100644
index 000000000000..f017352206c9
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gp102.c
@@ -0,0 +1,43 @@
+/*
+ * Copyright 2016 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs <bskeggs@redhat.com>
+ */
+#include "priv.h"
+
+static void
+gp102_pmu_reset(struct nvkm_pmu *pmu)
+{
+ struct nvkm_device *device = pmu->subdev.device;
+ nvkm_mask(device, 0x10a3c0, 0x00000001, 0x00000001);
+ nvkm_mask(device, 0x10a3c0, 0x00000001, 0x00000000);
+}
+
+static const struct nvkm_pmu_func
+gp102_pmu = {
+ .reset = gp102_pmu_reset,
+};
+
+int
+gp102_pmu_new(struct nvkm_device *device, int index, struct nvkm_pmu **ppmu)
+{
+ return nvkm_pmu_new_(&gp102_pmu, device, index, ppmu);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gt215.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gt215.c
index 8ba7fa4ca75b..90d428b3be97 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gt215.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gt215.c
@@ -24,21 +24,229 @@
#include "priv.h"
#include "fuc/gt215.fuc3.h"
-static void
+#include <subdev/timer.h>
+
+int
+gt215_pmu_send(struct nvkm_pmu *pmu, u32 reply[2],
+ u32 process, u32 message, u32 data0, u32 data1)
+{
+ struct nvkm_subdev *subdev = &pmu->subdev;
+ struct nvkm_device *device = subdev->device;
+ u32 addr;
+
+ mutex_lock(&subdev->mutex);
+ /* wait for a free slot in the fifo */
+ addr = nvkm_rd32(device, 0x10a4a0);
+ if (nvkm_msec(device, 2000,
+ u32 tmp = nvkm_rd32(device, 0x10a4b0);
+ if (tmp != (addr ^ 8))
+ break;
+ ) < 0) {
+ mutex_unlock(&subdev->mutex);
+ return -EBUSY;
+ }
+
+ /* we currently only support a single process at a time waiting
+ * on a synchronous reply, take the PMU mutex and tell the
+ * receive handler what we're waiting for
+ */
+ if (reply) {
+ pmu->recv.message = message;
+ pmu->recv.process = process;
+ }
+
+ /* acquire data segment access */
+ do {
+ nvkm_wr32(device, 0x10a580, 0x00000001);
+ } while (nvkm_rd32(device, 0x10a580) != 0x00000001);
+
+ /* write the packet */
+ nvkm_wr32(device, 0x10a1c0, 0x01000000 | (((addr & 0x07) << 4) +
+ pmu->send.base));
+ nvkm_wr32(device, 0x10a1c4, process);
+ nvkm_wr32(device, 0x10a1c4, message);
+ nvkm_wr32(device, 0x10a1c4, data0);
+ nvkm_wr32(device, 0x10a1c4, data1);
+ nvkm_wr32(device, 0x10a4a0, (addr + 1) & 0x0f);
+
+ /* release data segment access */
+ nvkm_wr32(device, 0x10a580, 0x00000000);
+
+ /* wait for reply, if requested */
+ if (reply) {
+ wait_event(pmu->recv.wait, (pmu->recv.process == 0));
+ reply[0] = pmu->recv.data[0];
+ reply[1] = pmu->recv.data[1];
+ }
+
+ mutex_unlock(&subdev->mutex);
+ return 0;
+}
+
+void
+gt215_pmu_recv(struct nvkm_pmu *pmu)
+{
+ struct nvkm_subdev *subdev = &pmu->subdev;
+ struct nvkm_device *device = subdev->device;
+ u32 process, message, data0, data1;
+
+ /* nothing to do if GET == PUT */
+ u32 addr = nvkm_rd32(device, 0x10a4cc);
+ if (addr == nvkm_rd32(device, 0x10a4c8))
+ return;
+
+ /* acquire data segment access */
+ do {
+ nvkm_wr32(device, 0x10a580, 0x00000002);
+ } while (nvkm_rd32(device, 0x10a580) != 0x00000002);
+
+ /* read the packet */
+ nvkm_wr32(device, 0x10a1c0, 0x02000000 | (((addr & 0x07) << 4) +
+ pmu->recv.base));
+ process = nvkm_rd32(device, 0x10a1c4);
+ message = nvkm_rd32(device, 0x10a1c4);
+ data0 = nvkm_rd32(device, 0x10a1c4);
+ data1 = nvkm_rd32(device, 0x10a1c4);
+ nvkm_wr32(device, 0x10a4cc, (addr + 1) & 0x0f);
+
+ /* release data segment access */
+ nvkm_wr32(device, 0x10a580, 0x00000000);
+
+ /* wake process if it's waiting on a synchronous reply */
+ if (pmu->recv.process) {
+ if (process == pmu->recv.process &&
+ message == pmu->recv.message) {
+ pmu->recv.data[0] = data0;
+ pmu->recv.data[1] = data1;
+ pmu->recv.process = 0;
+ wake_up(&pmu->recv.wait);
+ return;
+ }
+ }
+
+ /* right now there's no other expected responses from the engine,
+ * so assume that any unexpected message is an error.
+ */
+ nvkm_warn(subdev, "%c%c%c%c %08x %08x %08x %08x\n",
+ (char)((process & 0x000000ff) >> 0),
+ (char)((process & 0x0000ff00) >> 8),
+ (char)((process & 0x00ff0000) >> 16),
+ (char)((process & 0xff000000) >> 24),
+ process, message, data0, data1);
+}
+
+void
+gt215_pmu_intr(struct nvkm_pmu *pmu)
+{
+ struct nvkm_subdev *subdev = &pmu->subdev;
+ struct nvkm_device *device = subdev->device;
+ u32 disp = nvkm_rd32(device, 0x10a01c);
+ u32 intr = nvkm_rd32(device, 0x10a008) & disp & ~(disp >> 16);
+
+ if (intr & 0x00000020) {
+ u32 stat = nvkm_rd32(device, 0x10a16c);
+ if (stat & 0x80000000) {
+ nvkm_error(subdev, "UAS fault at %06x addr %08x\n",
+ stat & 0x00ffffff,
+ nvkm_rd32(device, 0x10a168));
+ nvkm_wr32(device, 0x10a16c, 0x00000000);
+ intr &= ~0x00000020;
+ }
+ }
+
+ if (intr & 0x00000040) {
+ schedule_work(&pmu->recv.work);
+ nvkm_wr32(device, 0x10a004, 0x00000040);
+ intr &= ~0x00000040;
+ }
+
+ if (intr & 0x00000080) {
+ nvkm_info(subdev, "wr32 %06x %08x\n",
+ nvkm_rd32(device, 0x10a7a0),
+ nvkm_rd32(device, 0x10a7a4));
+ nvkm_wr32(device, 0x10a004, 0x00000080);
+ intr &= ~0x00000080;
+ }
+
+ if (intr) {
+ nvkm_error(subdev, "intr %08x\n", intr);
+ nvkm_wr32(device, 0x10a004, intr);
+ }
+}
+
+void
+gt215_pmu_fini(struct nvkm_pmu *pmu)
+{
+ nvkm_wr32(pmu->subdev.device, 0x10a014, 0x00000060);
+}
+
+void
gt215_pmu_reset(struct nvkm_pmu *pmu)
{
struct nvkm_device *device = pmu->subdev.device;
- nvkm_mask(device, 0x022210, 0x00000001, 0x00000000);
- nvkm_mask(device, 0x022210, 0x00000001, 0x00000001);
+ nvkm_mask(device, 0x000200, 0x00002000, 0x00000000);
+ nvkm_mask(device, 0x000200, 0x00002000, 0x00002000);
+ nvkm_rd32(device, 0x000200);
+}
+
+int
+gt215_pmu_init(struct nvkm_pmu *pmu)
+{
+ struct nvkm_device *device = pmu->subdev.device;
+ int i;
+
+ /* upload data segment */
+ nvkm_wr32(device, 0x10a1c0, 0x01000000);
+ for (i = 0; i < pmu->func->data.size / 4; i++)
+ nvkm_wr32(device, 0x10a1c4, pmu->func->data.data[i]);
+
+ /* upload code segment */
+ nvkm_wr32(device, 0x10a180, 0x01000000);
+ for (i = 0; i < pmu->func->code.size / 4; i++) {
+ if ((i & 0x3f) == 0)
+ nvkm_wr32(device, 0x10a188, i >> 6);
+ nvkm_wr32(device, 0x10a184, pmu->func->code.data[i]);
+ }
+
+ /* start it running */
+ nvkm_wr32(device, 0x10a10c, 0x00000000);
+ nvkm_wr32(device, 0x10a104, 0x00000000);
+ nvkm_wr32(device, 0x10a100, 0x00000002);
+
+ /* wait for valid host->pmu ring configuration */
+ if (nvkm_msec(device, 2000,
+ if (nvkm_rd32(device, 0x10a4d0))
+ break;
+ ) < 0)
+ return -EBUSY;
+ pmu->send.base = nvkm_rd32(device, 0x10a4d0) & 0x0000ffff;
+ pmu->send.size = nvkm_rd32(device, 0x10a4d0) >> 16;
+
+ /* wait for valid pmu->host ring configuration */
+ if (nvkm_msec(device, 2000,
+ if (nvkm_rd32(device, 0x10a4dc))
+ break;
+ ) < 0)
+ return -EBUSY;
+ pmu->recv.base = nvkm_rd32(device, 0x10a4dc) & 0x0000ffff;
+ pmu->recv.size = nvkm_rd32(device, 0x10a4dc) >> 16;
+
+ nvkm_wr32(device, 0x10a010, 0x000000e0);
+ return 0;
}
static const struct nvkm_pmu_func
gt215_pmu = {
- .reset = gt215_pmu_reset,
.code.data = gt215_pmu_code,
.code.size = sizeof(gt215_pmu_code),
.data.data = gt215_pmu_data,
.data.size = sizeof(gt215_pmu_data),
+ .reset = gt215_pmu_reset,
+ .init = gt215_pmu_init,
+ .fini = gt215_pmu_fini,
+ .intr = gt215_pmu_intr,
+ .send = gt215_pmu_send,
+ .recv = gt215_pmu_recv,
};
int
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/priv.h
index f38c88fae3d6..2e2179a4ad17 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/priv.h
@@ -8,8 +8,6 @@ int nvkm_pmu_new_(const struct nvkm_pmu_func *, struct nvkm_device *,
int index, struct nvkm_pmu **);
struct nvkm_pmu_func {
- void (*reset)(struct nvkm_pmu *);
-
struct {
u32 *data;
u32 size;
@@ -20,8 +18,22 @@ struct nvkm_pmu_func {
u32 size;
} data;
+ void (*reset)(struct nvkm_pmu *);
+ int (*init)(struct nvkm_pmu *);
+ void (*fini)(struct nvkm_pmu *);
+ void (*intr)(struct nvkm_pmu *);
+ int (*send)(struct nvkm_pmu *, u32 reply[2], u32 process,
+ u32 message, u32 data0, u32 data1);
+ void (*recv)(struct nvkm_pmu *);
void (*pgob)(struct nvkm_pmu *, bool);
};
+void gt215_pmu_reset(struct nvkm_pmu *);
+int gt215_pmu_init(struct nvkm_pmu *);
+void gt215_pmu_fini(struct nvkm_pmu *);
+void gt215_pmu_intr(struct nvkm_pmu *);
+void gt215_pmu_recv(struct nvkm_pmu *);
+int gt215_pmu_send(struct nvkm_pmu *, u32[2], u32, u32, u32, u32);
+
void gk110_pmu_pgob(struct nvkm_pmu *, bool);
#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gm200.c b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gm200.c
index f1e2dc914366..ec48e4ace37a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gm200.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gm200.c
@@ -1364,7 +1364,7 @@ gm200_secboot_init(struct nvkm_secboot *sb)
return 0;
}
-int
+static int
gm200_secboot_fini(struct nvkm_secboot *sb, bool suspend)
{
struct gm200_secboot *gsb = gm200_secboot(sb);
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/volt/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/volt/Kbuild
index c34076223b7b..bcd179ba11d0 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/volt/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/volt/Kbuild
@@ -1,6 +1,7 @@
nvkm-y += nvkm/subdev/volt/base.o
nvkm-y += nvkm/subdev/volt/gpio.o
nvkm-y += nvkm/subdev/volt/nv40.o
+nvkm-y += nvkm/subdev/volt/gf100.o
nvkm-y += nvkm/subdev/volt/gk104.o
nvkm-y += nvkm/subdev/volt/gk20a.o
nvkm-y += nvkm/subdev/volt/gm20b.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/volt/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/volt/base.c
index 1c3d23b0e84a..e344901cfdc7 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/volt/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/volt/base.c
@@ -26,6 +26,7 @@
#include <subdev/bios.h>
#include <subdev/bios/vmap.h>
#include <subdev/bios/volt.h>
+#include <subdev/therm.h>
int
nvkm_volt_get(struct nvkm_volt *volt)
@@ -50,33 +51,45 @@ static int
nvkm_volt_set(struct nvkm_volt *volt, u32 uv)
{
struct nvkm_subdev *subdev = &volt->subdev;
- int i, ret = -EINVAL;
+ int i, ret = -EINVAL, best_err = volt->max_uv, best = -1;
if (volt->func->volt_set)
return volt->func->volt_set(volt, uv);
for (i = 0; i < volt->vid_nr; i++) {
- if (volt->vid[i].uv == uv) {
- ret = volt->func->vid_set(volt, volt->vid[i].vid);
- nvkm_debug(subdev, "set %duv: %d\n", uv, ret);
+ int err = volt->vid[i].uv - uv;
+ if (err < 0 || err > best_err)
+ continue;
+
+ best_err = err;
+ best = i;
+ if (best_err == 0)
break;
- }
}
+
+ if (best == -1) {
+ nvkm_error(subdev, "couldn't set %iuv\n", uv);
+ return ret;
+ }
+
+ ret = volt->func->vid_set(volt, volt->vid[best].vid);
+ nvkm_debug(subdev, "set req %duv to %duv: %d\n", uv,
+ volt->vid[best].uv, ret);
return ret;
}
-static int
-nvkm_volt_map(struct nvkm_volt *volt, u8 id)
+int
+nvkm_volt_map_min(struct nvkm_volt *volt, u8 id)
{
struct nvkm_bios *bios = volt->subdev.device->bios;
struct nvbios_vmap_entry info;
u8 ver, len;
- u16 vmap;
+ u32 vmap;
vmap = nvbios_vmap_entry_parse(bios, id, &ver, &len, &info);
if (vmap) {
if (info.link != 0xff) {
- int ret = nvkm_volt_map(volt, info.link);
+ int ret = nvkm_volt_map_min(volt, info.link);
if (ret < 0)
return ret;
info.min += ret;
@@ -88,19 +101,79 @@ nvkm_volt_map(struct nvkm_volt *volt, u8 id)
}
int
-nvkm_volt_set_id(struct nvkm_volt *volt, u8 id, int condition)
+nvkm_volt_map(struct nvkm_volt *volt, u8 id, u8 temp)
+{
+ struct nvkm_bios *bios = volt->subdev.device->bios;
+ struct nvbios_vmap_entry info;
+ u8 ver, len;
+ u32 vmap;
+
+ vmap = nvbios_vmap_entry_parse(bios, id, &ver, &len, &info);
+ if (vmap) {
+ s64 result;
+
+ if (volt->speedo < 0)
+ return volt->speedo;
+
+ if (ver == 0x10 || (ver == 0x20 && info.mode == 0)) {
+ result = div64_s64((s64)info.arg[0], 10);
+ result += div64_s64((s64)info.arg[1] * volt->speedo, 10);
+ result += div64_s64((s64)info.arg[2] * volt->speedo * volt->speedo, 100000);
+ } else if (ver == 0x20) {
+ switch (info.mode) {
+ /* 0x0 handled above! */
+ case 0x1:
+ result = ((s64)info.arg[0] * 15625) >> 18;
+ result += ((s64)info.arg[1] * volt->speedo * 15625) >> 18;
+ result += ((s64)info.arg[2] * temp * 15625) >> 10;
+ result += ((s64)info.arg[3] * volt->speedo * temp * 15625) >> 18;
+ result += ((s64)info.arg[4] * volt->speedo * volt->speedo * 15625) >> 30;
+ result += ((s64)info.arg[5] * temp * temp * 15625) >> 18;
+ break;
+ case 0x3:
+ result = (info.min + info.max) / 2;
+ break;
+ case 0x2:
+ default:
+ result = info.min;
+ break;
+ }
+ } else {
+ return -ENODEV;
+ }
+
+ result = min(max(result, (s64)info.min), (s64)info.max);
+
+ if (info.link != 0xff) {
+ int ret = nvkm_volt_map(volt, info.link, temp);
+ if (ret < 0)
+ return ret;
+ result += ret;
+ }
+ return result;
+ }
+
+ return id ? id * 10000 : -ENODEV;
+}
+
+int
+nvkm_volt_set_id(struct nvkm_volt *volt, u8 id, u8 min_id, u8 temp,
+ int condition)
{
int ret;
if (volt->func->set_id)
return volt->func->set_id(volt, id, condition);
- ret = nvkm_volt_map(volt, id);
+ ret = nvkm_volt_map(volt, id, temp);
if (ret >= 0) {
int prev = nvkm_volt_get(volt);
if (!condition || prev < 0 ||
(condition < 0 && ret < prev) ||
(condition > 0 && ret > prev)) {
+ int min = nvkm_volt_map(volt, min_id, temp);
+ if (min >= 0)
+ ret = max(min, ret);
ret = nvkm_volt_set(volt, ret);
} else {
ret = 0;
@@ -112,14 +185,16 @@ nvkm_volt_set_id(struct nvkm_volt *volt, u8 id, int condition)
static void
nvkm_volt_parse_bios(struct nvkm_bios *bios, struct nvkm_volt *volt)
{
+ struct nvkm_subdev *subdev = &bios->subdev;
struct nvbios_volt_entry ivid;
struct nvbios_volt info;
u8 ver, hdr, cnt, len;
- u16 data;
+ u32 data;
int i;
data = nvbios_volt_parse(bios, &ver, &hdr, &cnt, &len, &info);
- if (data && info.vidmask && info.base && info.step) {
+ if (data && info.vidmask && info.base && info.step && info.ranged) {
+ nvkm_debug(subdev, "found ranged based VIDs\n");
volt->min_uv = info.min;
volt->max_uv = info.max;
for (i = 0; i < info.vidmask + 1; i++) {
@@ -132,7 +207,8 @@ nvkm_volt_parse_bios(struct nvkm_bios *bios, struct nvkm_volt *volt)
info.base += info.step;
}
volt->vid_mask = info.vidmask;
- } else if (data && info.vidmask) {
+ } else if (data && info.vidmask && !info.ranged) {
+ nvkm_debug(subdev, "found entry based VIDs\n");
volt->min_uv = 0xffffffff;
volt->max_uv = 0;
for (i = 0; i < cnt; i++) {
@@ -154,6 +230,14 @@ nvkm_volt_parse_bios(struct nvkm_bios *bios, struct nvkm_volt *volt)
}
static int
+nvkm_volt_speedo_read(struct nvkm_volt *volt)
+{
+ if (volt->func->speedo_read)
+ return volt->func->speedo_read(volt);
+ return -EINVAL;
+}
+
+static int
nvkm_volt_init(struct nvkm_subdev *subdev)
{
struct nvkm_volt *volt = nvkm_volt(subdev);
@@ -167,6 +251,21 @@ nvkm_volt_init(struct nvkm_subdev *subdev)
return 0;
}
+static int
+nvkm_volt_oneinit(struct nvkm_subdev *subdev)
+{
+ struct nvkm_volt *volt = nvkm_volt(subdev);
+
+ volt->speedo = nvkm_volt_speedo_read(volt);
+ if (volt->speedo > 0)
+ nvkm_debug(&volt->subdev, "speedo %x\n", volt->speedo);
+
+ if (volt->func->oneinit)
+ return volt->func->oneinit(volt);
+
+ return 0;
+}
+
static void *
nvkm_volt_dtor(struct nvkm_subdev *subdev)
{
@@ -177,6 +276,7 @@ static const struct nvkm_subdev_func
nvkm_volt = {
.dtor = nvkm_volt_dtor,
.init = nvkm_volt_init,
+ .oneinit = nvkm_volt_oneinit,
};
void
@@ -191,9 +291,22 @@ nvkm_volt_ctor(const struct nvkm_volt_func *func, struct nvkm_device *device,
/* Assuming the non-bios device should build the voltage table later */
if (bios) {
+ u8 ver, hdr, cnt, len;
+ struct nvbios_vmap vmap;
+
nvkm_volt_parse_bios(bios, volt);
nvkm_debug(&volt->subdev, "min: %iuv max: %iuv\n",
volt->min_uv, volt->max_uv);
+
+ if (nvbios_vmap_parse(bios, &ver, &hdr, &cnt, &len, &vmap)) {
+ volt->max0_id = vmap.max0;
+ volt->max1_id = vmap.max1;
+ volt->max2_id = vmap.max2;
+ } else {
+ volt->max0_id = 0xff;
+ volt->max1_id = 0xff;
+ volt->max2_id = 0xff;
+ }
}
if (volt->vid_nr) {
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/volt/gf100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/volt/gf100.c
new file mode 100644
index 000000000000..d9ed6925ca64
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/volt/gf100.c
@@ -0,0 +1,70 @@
+/*
+ * Copyright 2016 Karol Herbst
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Karol Herbst
+ */
+#include "priv.h"
+
+#include <subdev/fuse.h>
+
+static int
+gf100_volt_speedo_read(struct nvkm_volt *volt)
+{
+ struct nvkm_device *device = volt->subdev.device;
+ struct nvkm_fuse *fuse = device->fuse;
+
+ if (!fuse)
+ return -EINVAL;
+
+ return nvkm_fuse_read(fuse, 0x1cc);
+}
+
+int
+gf100_volt_oneinit(struct nvkm_volt *volt)
+{
+ struct nvkm_subdev *subdev = &volt->subdev;
+ if (volt->speedo <= 0)
+ nvkm_error(subdev, "couldn't find speedo value, volting not "
+ "possible\n");
+ return 0;
+}
+
+static const struct nvkm_volt_func
+gf100_volt = {
+ .oneinit = gf100_volt_oneinit,
+ .vid_get = nvkm_voltgpio_get,
+ .vid_set = nvkm_voltgpio_set,
+ .speedo_read = gf100_volt_speedo_read,
+};
+
+int
+gf100_volt_new(struct nvkm_device *device, int index, struct nvkm_volt **pvolt)
+{
+ struct nvkm_volt *volt;
+ int ret;
+
+ ret = nvkm_volt_new_(&gf100_volt, device, index, &volt);
+ *pvolt = volt;
+ if (ret)
+ return ret;
+
+ return nvkm_voltgpio_init(volt);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/volt/gk104.c b/drivers/gpu/drm/nouveau/nvkm/subdev/volt/gk104.c
index 420bd84d8483..1c744e029454 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/volt/gk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/volt/gk104.c
@@ -27,6 +27,7 @@
#include <subdev/gpio.h>
#include <subdev/bios.h>
#include <subdev/bios/volt.h>
+#include <subdev/fuse.h>
#define gk104_volt(p) container_of((p), struct gk104_volt, base)
struct gk104_volt {
@@ -34,7 +35,7 @@ struct gk104_volt {
struct nvbios_volt bios;
};
-int
+static int
gk104_volt_get(struct nvkm_volt *base)
{
struct nvbios_volt *bios = &gk104_volt(base)->bios;
@@ -47,7 +48,7 @@ gk104_volt_get(struct nvkm_volt *base)
return bios->base + bios->pwm_range * duty / div;
}
-int
+static int
gk104_volt_set(struct nvkm_volt *base, u32 uv)
{
struct nvbios_volt *bios = &gk104_volt(base)->bios;
@@ -64,13 +65,33 @@ gk104_volt_set(struct nvkm_volt *base, u32 uv)
return 0;
}
+static int
+gk104_volt_speedo_read(struct nvkm_volt *volt)
+{
+ struct nvkm_device *device = volt->subdev.device;
+ struct nvkm_fuse *fuse = device->fuse;
+ int ret;
+
+ if (!fuse)
+ return -EINVAL;
+
+ nvkm_wr32(device, 0x122634, 0x0);
+ ret = nvkm_fuse_read(fuse, 0x3a8);
+ nvkm_wr32(device, 0x122634, 0x41);
+ return ret;
+}
+
static const struct nvkm_volt_func
gk104_volt_pwm = {
+ .oneinit = gf100_volt_oneinit,
.volt_get = gk104_volt_get,
.volt_set = gk104_volt_set,
+ .speedo_read = gk104_volt_speedo_read,
}, gk104_volt_gpio = {
+ .oneinit = gf100_volt_oneinit,
.vid_get = nvkm_voltgpio_get,
.vid_set = nvkm_voltgpio_set,
+ .speedo_read = gk104_volt_speedo_read,
};
int
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/volt/gm20b.c b/drivers/gpu/drm/nouveau/nvkm/subdev/volt/gm20b.c
index 74db4d28930f..2925b9cae681 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/volt/gm20b.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/volt/gm20b.c
@@ -25,7 +25,7 @@
#include <core/tegra.h>
-const struct cvb_coef gm20b_cvb_coef[] = {
+static const struct cvb_coef gm20b_cvb_coef[] = {
/* KHz, c0, c1, c2 */
/* 76800 */ { 1786666, -85625, 1632 },
/* 153600 */ { 1846729, -87525, 1632 },
@@ -58,7 +58,7 @@ static const struct cvb_coef gm20b_na_cvb_coef[] = {
/* 998400 */ { 1316991, 8144, -940, 808, -21583, 226 },
};
-const u32 speedo_to_vmin[] = {
+static const u32 speedo_to_vmin[] = {
/* 0, 1, 2, 3, 4, */
950000, 840000, 818750, 840000, 810000,
};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/volt/gpio.c b/drivers/gpu/drm/nouveau/nvkm/subdev/volt/gpio.c
index d2bac1d77819..443c031b966b 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/volt/gpio.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/volt/gpio.c
@@ -25,6 +25,7 @@
#include <subdev/bios.h>
#include <subdev/bios/gpio.h>
#include <subdev/gpio.h>
+#include "priv.h"
static const u8 tags[] = {
DCB_GPIO_VID0, DCB_GPIO_VID1, DCB_GPIO_VID2, DCB_GPIO_VID3,
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/volt/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/volt/priv.h
index d5140d991161..354bafe4b4e2 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/volt/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/volt/priv.h
@@ -9,11 +9,13 @@ int nvkm_volt_new_(const struct nvkm_volt_func *, struct nvkm_device *,
int index, struct nvkm_volt **);
struct nvkm_volt_func {
+ int (*oneinit)(struct nvkm_volt *);
int (*volt_get)(struct nvkm_volt *);
int (*volt_set)(struct nvkm_volt *, u32 uv);
int (*vid_get)(struct nvkm_volt *);
int (*vid_set)(struct nvkm_volt *, u8 vid);
int (*set_id)(struct nvkm_volt *, u8 id, int condition);
+ int (*speedo_read)(struct nvkm_volt *);
};
int nvkm_voltgpio_init(struct nvkm_volt *);
@@ -23,4 +25,6 @@ int nvkm_voltgpio_set(struct nvkm_volt *, u8);
int nvkm_voltpwm_init(struct nvkm_volt *volt);
int nvkm_voltpwm_get(struct nvkm_volt *volt);
int nvkm_voltpwm_set(struct nvkm_volt *volt, u32 uv);
+
+int gf100_volt_oneinit(struct nvkm_volt *);
#endif
diff --git a/drivers/gpu/drm/omapdrm/displays/connector-analog-tv.c b/drivers/gpu/drm/omapdrm/displays/connector-analog-tv.c
index 3485d1ecd655..aaa8a58390f1 100644
--- a/drivers/gpu/drm/omapdrm/displays/connector-analog-tv.c
+++ b/drivers/gpu/drm/omapdrm/displays/connector-analog-tv.c
@@ -24,23 +24,24 @@ struct panel_drv_data {
struct device *dev;
- struct omap_video_timings timings;
+ struct videomode vm;
bool invert_polarity;
};
-static const struct omap_video_timings tvc_pal_timings = {
- .x_res = 720,
- .y_res = 574,
+static const struct videomode tvc_pal_vm = {
+ .hactive = 720,
+ .vactive = 574,
.pixelclock = 13500000,
- .hsw = 64,
- .hfp = 12,
- .hbp = 68,
- .vsw = 5,
- .vfp = 5,
- .vbp = 41,
-
- .interlace = true,
+ .hsync_len = 64,
+ .hfront_porch = 12,
+ .hback_porch = 68,
+ .vsync_len = 5,
+ .vfront_porch = 5,
+ .vback_porch = 41,
+
+ .flags = DISPLAY_FLAGS_INTERLACED | DISPLAY_FLAGS_HSYNC_LOW |
+ DISPLAY_FLAGS_VSYNC_LOW,
};
static const struct of_device_id tvc_of_match[];
@@ -92,7 +93,7 @@ static int tvc_enable(struct omap_dss_device *dssdev)
if (omapdss_device_is_enabled(dssdev))
return 0;
- in->ops.atv->set_timings(in, &ddata->timings);
+ in->ops.atv->set_timings(in, &ddata->vm);
if (!ddata->dev->of_node) {
in->ops.atv->set_type(in, OMAP_DSS_VENC_TYPE_COMPOSITE);
@@ -126,32 +127,32 @@ static void tvc_disable(struct omap_dss_device *dssdev)
}
static void tvc_set_timings(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings)
+ struct videomode *vm)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
struct omap_dss_device *in = ddata->in;
- ddata->timings = *timings;
- dssdev->panel.timings = *timings;
+ ddata->vm = *vm;
+ dssdev->panel.vm = *vm;
- in->ops.atv->set_timings(in, timings);
+ in->ops.atv->set_timings(in, vm);
}
static void tvc_get_timings(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings)
+ struct videomode *vm)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
- *timings = ddata->timings;
+ *vm = ddata->vm;
}
static int tvc_check_timings(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings)
+ struct videomode *vm)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
struct omap_dss_device *in = ddata->in;
- return in->ops.atv->check_timings(in, timings);
+ return in->ops.atv->check_timings(in, vm);
}
static u32 tvc_get_wss(struct omap_dss_device *dssdev)
@@ -253,14 +254,14 @@ static int tvc_probe(struct platform_device *pdev)
return -ENODEV;
}
- ddata->timings = tvc_pal_timings;
+ ddata->vm = tvc_pal_vm;
dssdev = &ddata->dssdev;
dssdev->driver = &tvc_driver;
dssdev->dev = &pdev->dev;
dssdev->type = OMAP_DISPLAY_TYPE_VENC;
dssdev->owner = THIS_MODULE;
- dssdev->panel.timings = tvc_pal_timings;
+ dssdev->panel.vm = tvc_pal_vm;
r = omapdss_register_display(dssdev);
if (r) {
diff --git a/drivers/gpu/drm/omapdrm/displays/connector-dvi.c b/drivers/gpu/drm/omapdrm/displays/connector-dvi.c
index 684b7aeda411..d6875d9fcefa 100644
--- a/drivers/gpu/drm/omapdrm/displays/connector-dvi.c
+++ b/drivers/gpu/drm/omapdrm/displays/connector-dvi.c
@@ -19,32 +19,30 @@
#include "../dss/omapdss.h"
-static const struct omap_video_timings dvic_default_timings = {
- .x_res = 640,
- .y_res = 480,
+static const struct videomode dvic_default_vm = {
+ .hactive = 640,
+ .vactive = 480,
.pixelclock = 23500000,
- .hfp = 48,
- .hsw = 32,
- .hbp = 80,
+ .hfront_porch = 48,
+ .hsync_len = 32,
+ .hback_porch = 80,
- .vfp = 3,
- .vsw = 4,
- .vbp = 7,
+ .vfront_porch = 3,
+ .vsync_len = 4,
+ .vback_porch = 7,
- .vsync_level = OMAPDSS_SIG_ACTIVE_HIGH,
- .hsync_level = OMAPDSS_SIG_ACTIVE_HIGH,
- .data_pclk_edge = OMAPDSS_DRIVE_SIG_RISING_EDGE,
- .de_level = OMAPDSS_SIG_ACTIVE_HIGH,
- .sync_pclk_edge = OMAPDSS_DRIVE_SIG_FALLING_EDGE,
+ .flags = DISPLAY_FLAGS_HSYNC_HIGH | DISPLAY_FLAGS_VSYNC_HIGH |
+ DISPLAY_FLAGS_SYNC_NEGEDGE | DISPLAY_FLAGS_DE_HIGH |
+ DISPLAY_FLAGS_PIXDATA_POSEDGE,
};
struct panel_drv_data {
struct omap_dss_device dssdev;
struct omap_dss_device *in;
- struct omap_video_timings timings;
+ struct videomode vm;
struct i2c_adapter *i2c_adapter;
};
@@ -90,7 +88,7 @@ static int dvic_enable(struct omap_dss_device *dssdev)
if (omapdss_device_is_enabled(dssdev))
return 0;
- in->ops.dvi->set_timings(in, &ddata->timings);
+ in->ops.dvi->set_timings(in, &ddata->vm);
r = in->ops.dvi->enable(in);
if (r)
@@ -115,32 +113,32 @@ static void dvic_disable(struct omap_dss_device *dssdev)
}
static void dvic_set_timings(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings)
+ struct videomode *vm)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
struct omap_dss_device *in = ddata->in;
- ddata->timings = *timings;
- dssdev->panel.timings = *timings;
+ ddata->vm = *vm;
+ dssdev->panel.vm = *vm;
- in->ops.dvi->set_timings(in, timings);
+ in->ops.dvi->set_timings(in, vm);
}
static void dvic_get_timings(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings)
+ struct videomode *vm)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
- *timings = ddata->timings;
+ *vm = ddata->vm;
}
static int dvic_check_timings(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings)
+ struct videomode *vm)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
struct omap_dss_device *in = ddata->in;
- return in->ops.dvi->check_timings(in, timings);
+ return in->ops.dvi->check_timings(in, vm);
}
static int dvic_ddc_read(struct i2c_adapter *adapter,
@@ -287,14 +285,14 @@ static int dvic_probe(struct platform_device *pdev)
if (r)
return r;
- ddata->timings = dvic_default_timings;
+ ddata->vm = dvic_default_vm;
dssdev = &ddata->dssdev;
dssdev->driver = &dvic_driver;
dssdev->dev = &pdev->dev;
dssdev->type = OMAP_DISPLAY_TYPE_DVI;
dssdev->owner = THIS_MODULE;
- dssdev->panel.timings = dvic_default_timings;
+ dssdev->panel.vm = dvic_default_vm;
r = omapdss_register_display(dssdev);
if (r) {
diff --git a/drivers/gpu/drm/omapdrm/displays/connector-hdmi.c b/drivers/gpu/drm/omapdrm/displays/connector-hdmi.c
index 7bdf83af9797..1ef130641bae 100644
--- a/drivers/gpu/drm/omapdrm/displays/connector-hdmi.c
+++ b/drivers/gpu/drm/omapdrm/displays/connector-hdmi.c
@@ -21,21 +21,18 @@
#include "../dss/omapdss.h"
-static const struct omap_video_timings hdmic_default_timings = {
- .x_res = 640,
- .y_res = 480,
+static const struct videomode hdmic_default_vm = {
+ .hactive = 640,
+ .vactive = 480,
.pixelclock = 25175000,
- .hsw = 96,
- .hfp = 16,
- .hbp = 48,
- .vsw = 2,
- .vfp = 11,
- .vbp = 31,
-
- .vsync_level = OMAPDSS_SIG_ACTIVE_LOW,
- .hsync_level = OMAPDSS_SIG_ACTIVE_LOW,
-
- .interlace = false,
+ .hsync_len = 96,
+ .hfront_porch = 16,
+ .hback_porch = 48,
+ .vsync_len = 2,
+ .vfront_porch = 11,
+ .vback_porch = 31,
+
+ .flags = DISPLAY_FLAGS_HSYNC_LOW | DISPLAY_FLAGS_VSYNC_LOW,
};
struct panel_drv_data {
@@ -44,7 +41,7 @@ struct panel_drv_data {
struct device *dev;
- struct omap_video_timings timings;
+ struct videomode vm;
int hpd_gpio;
};
@@ -96,7 +93,7 @@ static int hdmic_enable(struct omap_dss_device *dssdev)
if (omapdss_device_is_enabled(dssdev))
return 0;
- in->ops.hdmi->set_timings(in, &ddata->timings);
+ in->ops.hdmi->set_timings(in, &ddata->vm);
r = in->ops.hdmi->enable(in);
if (r)
@@ -123,32 +120,32 @@ static void hdmic_disable(struct omap_dss_device *dssdev)
}
static void hdmic_set_timings(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings)
+ struct videomode *vm)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
struct omap_dss_device *in = ddata->in;
- ddata->timings = *timings;
- dssdev->panel.timings = *timings;
+ ddata->vm = *vm;
+ dssdev->panel.vm = *vm;
- in->ops.hdmi->set_timings(in, timings);
+ in->ops.hdmi->set_timings(in, vm);
}
static void hdmic_get_timings(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings)
+ struct videomode *vm)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
- *timings = ddata->timings;
+ *vm = ddata->vm;
}
static int hdmic_check_timings(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings)
+ struct videomode *vm)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
struct omap_dss_device *in = ddata->in;
- return in->ops.hdmi->check_timings(in, timings);
+ return in->ops.hdmi->check_timings(in, vm);
}
static int hdmic_read_edid(struct omap_dss_device *dssdev,
@@ -259,14 +256,14 @@ static int hdmic_probe(struct platform_device *pdev)
goto err_reg;
}
- ddata->timings = hdmic_default_timings;
+ ddata->vm = hdmic_default_vm;
dssdev = &ddata->dssdev;
dssdev->driver = &hdmic_driver;
dssdev->dev = &pdev->dev;
dssdev->type = OMAP_DISPLAY_TYPE_HDMI;
dssdev->owner = THIS_MODULE;
- dssdev->panel.timings = hdmic_default_timings;
+ dssdev->panel.vm = hdmic_default_vm;
r = omapdss_register_display(dssdev);
if (r) {
diff --git a/drivers/gpu/drm/omapdrm/displays/encoder-opa362.c b/drivers/gpu/drm/omapdrm/displays/encoder-opa362.c
index fe4e7ec3bab0..f7a5731492d0 100644
--- a/drivers/gpu/drm/omapdrm/displays/encoder-opa362.c
+++ b/drivers/gpu/drm/omapdrm/displays/encoder-opa362.c
@@ -27,7 +27,7 @@ struct panel_drv_data {
struct gpio_desc *enable_gpio;
- struct omap_video_timings timings;
+ struct videomode vm;
};
#define to_panel_data(x) container_of(x, struct panel_drv_data, dssdev)
@@ -90,7 +90,7 @@ static int opa362_enable(struct omap_dss_device *dssdev)
if (omapdss_device_is_enabled(dssdev))
return 0;
- in->ops.atv->set_timings(in, &ddata->timings);
+ in->ops.atv->set_timings(in, &ddata->vm);
r = in->ops.atv->enable(in);
if (r)
@@ -123,38 +123,38 @@ static void opa362_disable(struct omap_dss_device *dssdev)
}
static void opa362_set_timings(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings)
+ struct videomode *vm)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
struct omap_dss_device *in = ddata->in;
dev_dbg(dssdev->dev, "set_timings\n");
- ddata->timings = *timings;
- dssdev->panel.timings = *timings;
+ ddata->vm = *vm;
+ dssdev->panel.vm = *vm;
- in->ops.atv->set_timings(in, timings);
+ in->ops.atv->set_timings(in, vm);
}
static void opa362_get_timings(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings)
+ struct videomode *vm)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
dev_dbg(dssdev->dev, "get_timings\n");
- *timings = ddata->timings;
+ *vm = ddata->vm;
}
static int opa362_check_timings(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings)
+ struct videomode *vm)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
struct omap_dss_device *in = ddata->in;
dev_dbg(dssdev->dev, "check_timings\n");
- return in->ops.atv->check_timings(in, timings);
+ return in->ops.atv->check_timings(in, vm);
}
static void opa362_set_type(struct omap_dss_device *dssdev,
diff --git a/drivers/gpu/drm/omapdrm/displays/encoder-tfp410.c b/drivers/gpu/drm/omapdrm/displays/encoder-tfp410.c
index d768217cefe0..13e32d02c884 100644
--- a/drivers/gpu/drm/omapdrm/displays/encoder-tfp410.c
+++ b/drivers/gpu/drm/omapdrm/displays/encoder-tfp410.c
@@ -24,7 +24,7 @@ struct panel_drv_data {
int pd_gpio;
int data_lines;
- struct omap_video_timings timings;
+ struct videomode vm;
};
#define to_panel_data(x) container_of(x, struct panel_drv_data, dssdev)
@@ -81,7 +81,7 @@ static int tfp410_enable(struct omap_dss_device *dssdev)
if (omapdss_device_is_enabled(dssdev))
return 0;
- in->ops.dpi->set_timings(in, &ddata->timings);
+ in->ops.dpi->set_timings(in, &ddata->vm);
if (ddata->data_lines)
in->ops.dpi->set_data_lines(in, ddata->data_lines);
@@ -113,44 +113,43 @@ static void tfp410_disable(struct omap_dss_device *dssdev)
dssdev->state = OMAP_DSS_DISPLAY_DISABLED;
}
-static void tfp410_fix_timings(struct omap_video_timings *timings)
+static void tfp410_fix_timings(struct videomode *vm)
{
- timings->data_pclk_edge = OMAPDSS_DRIVE_SIG_RISING_EDGE;
- timings->sync_pclk_edge = OMAPDSS_DRIVE_SIG_RISING_EDGE;
- timings->de_level = OMAPDSS_SIG_ACTIVE_HIGH;
+ vm->flags |= DISPLAY_FLAGS_DE_HIGH | DISPLAY_FLAGS_PIXDATA_POSEDGE |
+ DISPLAY_FLAGS_SYNC_POSEDGE;
}
static void tfp410_set_timings(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings)
+ struct videomode *vm)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
struct omap_dss_device *in = ddata->in;
- tfp410_fix_timings(timings);
+ tfp410_fix_timings(vm);
- ddata->timings = *timings;
- dssdev->panel.timings = *timings;
+ ddata->vm = *vm;
+ dssdev->panel.vm = *vm;
- in->ops.dpi->set_timings(in, timings);
+ in->ops.dpi->set_timings(in, vm);
}
static void tfp410_get_timings(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings)
+ struct videomode *vm)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
- *timings = ddata->timings;
+ *vm = ddata->vm;
}
static int tfp410_check_timings(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings)
+ struct videomode *vm)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
struct omap_dss_device *in = ddata->in;
- tfp410_fix_timings(timings);
+ tfp410_fix_timings(vm);
- return in->ops.dpi->check_timings(in, timings);
+ return in->ops.dpi->check_timings(in, vm);
}
static const struct omapdss_dvi_ops tfp410_dvi_ops = {
diff --git a/drivers/gpu/drm/omapdrm/displays/encoder-tpd12s015.c b/drivers/gpu/drm/omapdrm/displays/encoder-tpd12s015.c
index 46855c8f5cbf..6d8f79b29af6 100644
--- a/drivers/gpu/drm/omapdrm/displays/encoder-tpd12s015.c
+++ b/drivers/gpu/drm/omapdrm/displays/encoder-tpd12s015.c
@@ -26,7 +26,7 @@ struct panel_drv_data {
struct gpio_desc *ls_oe_gpio;
struct gpio_desc *hpd_gpio;
- struct omap_video_timings timings;
+ struct videomode vm;
};
#define to_panel_data(x) container_of(x, struct panel_drv_data, dssdev)
@@ -80,7 +80,7 @@ static int tpd_enable(struct omap_dss_device *dssdev)
if (dssdev->state == OMAP_DSS_DISPLAY_ACTIVE)
return 0;
- in->ops.hdmi->set_timings(in, &ddata->timings);
+ in->ops.hdmi->set_timings(in, &ddata->vm);
r = in->ops.hdmi->enable(in);
if (r)
@@ -105,33 +105,33 @@ static void tpd_disable(struct omap_dss_device *dssdev)
}
static void tpd_set_timings(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings)
+ struct videomode *vm)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
struct omap_dss_device *in = ddata->in;
- ddata->timings = *timings;
- dssdev->panel.timings = *timings;
+ ddata->vm = *vm;
+ dssdev->panel.vm = *vm;
- in->ops.hdmi->set_timings(in, timings);
+ in->ops.hdmi->set_timings(in, vm);
}
static void tpd_get_timings(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings)
+ struct videomode *vm)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
- *timings = ddata->timings;
+ *vm = ddata->vm;
}
static int tpd_check_timings(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings)
+ struct videomode *vm)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
struct omap_dss_device *in = ddata->in;
int r;
- r = in->ops.hdmi->check_timings(in, timings);
+ r = in->ops.hdmi->check_timings(in, vm);
return r;
}
diff --git a/drivers/gpu/drm/omapdrm/displays/panel-dpi.c b/drivers/gpu/drm/omapdrm/displays/panel-dpi.c
index 7f16f985ab22..38003208d9ca 100644
--- a/drivers/gpu/drm/omapdrm/displays/panel-dpi.c
+++ b/drivers/gpu/drm/omapdrm/displays/panel-dpi.c
@@ -28,7 +28,7 @@ struct panel_drv_data {
int data_lines;
- struct omap_video_timings videomode;
+ struct videomode vm;
/* used for non-DT boot, to be removed */
int backlight_gpio;
@@ -80,7 +80,7 @@ static int panel_dpi_enable(struct omap_dss_device *dssdev)
if (ddata->data_lines)
in->ops.dpi->set_data_lines(in, ddata->data_lines);
- in->ops.dpi->set_timings(in, &ddata->videomode);
+ in->ops.dpi->set_timings(in, &ddata->vm);
r = in->ops.dpi->enable(in);
if (r)
@@ -122,32 +122,32 @@ static void panel_dpi_disable(struct omap_dss_device *dssdev)
}
static void panel_dpi_set_timings(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings)
+ struct videomode *vm)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
struct omap_dss_device *in = ddata->in;
- ddata->videomode = *timings;
- dssdev->panel.timings = *timings;
+ ddata->vm = *vm;
+ dssdev->panel.vm = *vm;
- in->ops.dpi->set_timings(in, timings);
+ in->ops.dpi->set_timings(in, vm);
}
static void panel_dpi_get_timings(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings)
+ struct videomode *vm)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
- *timings = ddata->videomode;
+ *vm = ddata->vm;
}
static int panel_dpi_check_timings(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings)
+ struct videomode *vm)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
struct omap_dss_device *in = ddata->in;
- return in->ops.dpi->check_timings(in, timings);
+ return in->ops.dpi->check_timings(in, vm);
}
static struct omap_dss_driver panel_dpi_ops = {
@@ -169,7 +169,6 @@ static int panel_dpi_probe_pdata(struct platform_device *pdev)
const struct panel_dpi_platform_data *pdata;
struct panel_drv_data *ddata = platform_get_drvdata(pdev);
struct omap_dss_device *dssdev, *in;
- struct videomode vm;
int r;
pdata = dev_get_platdata(&pdev->dev);
@@ -185,8 +184,7 @@ static int panel_dpi_probe_pdata(struct platform_device *pdev)
ddata->data_lines = pdata->data_lines;
- videomode_from_timing(pdata->display_timing, &vm);
- videomode_to_omap_video_timings(&vm, &ddata->videomode);
+ videomode_from_timing(pdata->display_timing, &ddata->vm);
dssdev = &ddata->dssdev;
dssdev->name = pdata->name;
@@ -214,7 +212,6 @@ static int panel_dpi_probe_of(struct platform_device *pdev)
struct omap_dss_device *in;
int r;
struct display_timing timing;
- struct videomode vm;
struct gpio_desc *gpio;
gpio = devm_gpiod_get_optional(&pdev->dev, "enable", GPIOD_OUT_LOW);
@@ -245,8 +242,7 @@ static int panel_dpi_probe_of(struct platform_device *pdev)
return r;
}
- videomode_from_timing(&timing, &vm);
- videomode_to_omap_video_timings(&vm, &ddata->videomode);
+ videomode_from_timing(&timing, &ddata->vm);
in = omapdss_of_find_source_for_first_ep(node);
if (IS_ERR(in)) {
@@ -295,7 +291,7 @@ static int panel_dpi_probe(struct platform_device *pdev)
dssdev->driver = &panel_dpi_ops;
dssdev->type = OMAP_DISPLAY_TYPE_DPI;
dssdev->owner = THIS_MODULE;
- dssdev->panel.timings = ddata->videomode;
+ dssdev->panel.vm = ddata->vm;
dssdev->phy.dpi.data_lines = ddata->data_lines;
r = omapdss_register_display(dssdev);
diff --git a/drivers/gpu/drm/omapdrm/displays/panel-dsi-cm.c b/drivers/gpu/drm/omapdrm/displays/panel-dsi-cm.c
index b1f3b818edf4..dc026a843712 100644
--- a/drivers/gpu/drm/omapdrm/displays/panel-dsi-cm.c
+++ b/drivers/gpu/drm/omapdrm/displays/panel-dsi-cm.c
@@ -42,7 +42,7 @@ struct panel_drv_data {
struct omap_dss_device dssdev;
struct omap_dss_device *in;
- struct omap_video_timings timings;
+ struct videomode vm;
struct platform_device *pdev;
@@ -382,8 +382,8 @@ static const struct backlight_ops dsicm_bl_ops = {
static void dsicm_get_resolution(struct omap_dss_device *dssdev,
u16 *xres, u16 *yres)
{
- *xres = dssdev->panel.timings.x_res;
- *yres = dssdev->panel.timings.y_res;
+ *xres = dssdev->panel.vm.hactive;
+ *yres = dssdev->panel.vm.vactive;
}
static ssize_t dsicm_num_errors_show(struct device *dev,
@@ -589,7 +589,7 @@ static int dsicm_power_on(struct panel_drv_data *ddata)
struct omap_dss_dsi_config dsi_config = {
.mode = OMAP_DSS_DSI_CMD_MODE,
.pixel_format = OMAP_DSS_DSI_FMT_RGB888,
- .timings = &ddata->timings,
+ .vm = &ddata->vm,
.hs_clk_min = 150000000,
.hs_clk_max = 300000000,
.lp_clk_min = 7000000,
@@ -892,8 +892,8 @@ static int dsicm_update(struct omap_dss_device *dssdev,
/* XXX no need to send this every frame, but dsi break if not done */
r = dsicm_set_update_window(ddata, 0, 0,
- dssdev->panel.timings.x_res,
- dssdev->panel.timings.y_res);
+ dssdev->panel.vm.hactive,
+ dssdev->panel.vm.vactive);
if (r)
goto err;
@@ -1023,9 +1023,8 @@ static int dsicm_memory_read(struct omap_dss_device *dssdev,
goto err1;
}
- size = min(w * h * 3,
- dssdev->panel.timings.x_res *
- dssdev->panel.timings.y_res * 3);
+ size = min((u32)w * h * 3,
+ dssdev->panel.vm.hactive * dssdev->panel.vm.vactive * 3);
in->ops.dsi->bus_lock(in);
@@ -1186,14 +1185,14 @@ static int dsicm_probe(struct platform_device *pdev)
if (r)
return r;
- ddata->timings.x_res = 864;
- ddata->timings.y_res = 480;
- ddata->timings.pixelclock = 864 * 480 * 60;
+ ddata->vm.hactive = 864;
+ ddata->vm.vactive = 480;
+ ddata->vm.pixelclock = 864 * 480 * 60;
dssdev = &ddata->dssdev;
dssdev->dev = dev;
dssdev->driver = &dsicm_ops;
- dssdev->panel.timings = ddata->timings;
+ dssdev->panel.vm = ddata->vm;
dssdev->type = OMAP_DISPLAY_TYPE_DSI;
dssdev->owner = THIS_MODULE;
diff --git a/drivers/gpu/drm/omapdrm/displays/panel-lgphilips-lb035q02.c b/drivers/gpu/drm/omapdrm/displays/panel-lgphilips-lb035q02.c
index 6dfb96cea293..43d21edb51f5 100644
--- a/drivers/gpu/drm/omapdrm/displays/panel-lgphilips-lb035q02.c
+++ b/drivers/gpu/drm/omapdrm/displays/panel-lgphilips-lb035q02.c
@@ -19,25 +19,28 @@
#include "../dss/omapdss.h"
-static struct omap_video_timings lb035q02_timings = {
- .x_res = 320,
- .y_res = 240,
+static struct videomode lb035q02_vm = {
+ .hactive = 320,
+ .vactive = 240,
.pixelclock = 6500000,
- .hsw = 2,
- .hfp = 20,
- .hbp = 68,
-
- .vsw = 2,
- .vfp = 4,
- .vbp = 18,
-
- .vsync_level = OMAPDSS_SIG_ACTIVE_LOW,
- .hsync_level = OMAPDSS_SIG_ACTIVE_LOW,
- .data_pclk_edge = OMAPDSS_DRIVE_SIG_RISING_EDGE,
- .de_level = OMAPDSS_SIG_ACTIVE_HIGH,
- .sync_pclk_edge = OMAPDSS_DRIVE_SIG_FALLING_EDGE,
+ .hsync_len = 2,
+ .hfront_porch = 20,
+ .hback_porch = 68,
+
+ .vsync_len = 2,
+ .vfront_porch = 4,
+ .vback_porch = 18,
+
+ .flags = DISPLAY_FLAGS_HSYNC_LOW | DISPLAY_FLAGS_VSYNC_LOW |
+ DISPLAY_FLAGS_DE_HIGH | DISPLAY_FLAGS_SYNC_NEGEDGE |
+ DISPLAY_FLAGS_PIXDATA_POSEDGE,
+ /*
+ * Note: According to the panel documentation:
+ * DE is active LOW
+ * DATA needs to be driven on the FALLING edge
+ */
};
struct panel_drv_data {
@@ -48,7 +51,7 @@ struct panel_drv_data {
int data_lines;
- struct omap_video_timings videomode;
+ struct videomode vm;
struct gpio_desc *enable_gpio;
};
@@ -158,7 +161,7 @@ static int lb035q02_enable(struct omap_dss_device *dssdev)
if (ddata->data_lines)
in->ops.dpi->set_data_lines(in, ddata->data_lines);
- in->ops.dpi->set_timings(in, &ddata->videomode);
+ in->ops.dpi->set_timings(in, &ddata->vm);
r = in->ops.dpi->enable(in);
if (r)
@@ -189,32 +192,32 @@ static void lb035q02_disable(struct omap_dss_device *dssdev)
}
static void lb035q02_set_timings(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings)
+ struct videomode *vm)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
struct omap_dss_device *in = ddata->in;
- ddata->videomode = *timings;
- dssdev->panel.timings = *timings;
+ ddata->vm = *vm;
+ dssdev->panel.vm = *vm;
- in->ops.dpi->set_timings(in, timings);
+ in->ops.dpi->set_timings(in, vm);
}
static void lb035q02_get_timings(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings)
+ struct videomode *vm)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
- *timings = ddata->videomode;
+ *vm = ddata->vm;
}
static int lb035q02_check_timings(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings)
+ struct videomode *vm)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
struct omap_dss_device *in = ddata->in;
- return in->ops.dpi->check_timings(in, timings);
+ return in->ops.dpi->check_timings(in, vm);
}
static struct omap_dss_driver lb035q02_ops = {
@@ -278,14 +281,14 @@ static int lb035q02_panel_spi_probe(struct spi_device *spi)
if (r)
return r;
- ddata->videomode = lb035q02_timings;
+ ddata->vm = lb035q02_vm;
dssdev = &ddata->dssdev;
dssdev->dev = &spi->dev;
dssdev->driver = &lb035q02_ops;
dssdev->type = OMAP_DISPLAY_TYPE_DPI;
dssdev->owner = THIS_MODULE;
- dssdev->panel.timings = ddata->videomode;
+ dssdev->panel.vm = ddata->vm;
dssdev->phy.dpi.data_lines = ddata->data_lines;
r = omapdss_register_display(dssdev);
diff --git a/drivers/gpu/drm/omapdrm/displays/panel-nec-nl8048hl11.c b/drivers/gpu/drm/omapdrm/displays/panel-nec-nl8048hl11.c
index 9f3d6f48f3e1..2de27ba01552 100644
--- a/drivers/gpu/drm/omapdrm/displays/panel-nec-nl8048hl11.c
+++ b/drivers/gpu/drm/omapdrm/displays/panel-nec-nl8048hl11.c
@@ -23,7 +23,7 @@ struct panel_drv_data {
struct omap_dss_device dssdev;
struct omap_dss_device *in;
- struct omap_video_timings videomode;
+ struct videomode vm;
int data_lines;
@@ -65,22 +65,20 @@ static const struct {
{ 156, 0x00 }, { 157, 0x00 }, { 2, 0x00 },
};
-static const struct omap_video_timings nec_8048_panel_timings = {
- .x_res = LCD_XRES,
- .y_res = LCD_YRES,
+static const struct videomode nec_8048_panel_vm = {
+ .hactive = LCD_XRES,
+ .vactive = LCD_YRES,
.pixelclock = LCD_PIXEL_CLOCK,
- .hfp = 6,
- .hsw = 1,
- .hbp = 4,
- .vfp = 3,
- .vsw = 1,
- .vbp = 4,
-
- .vsync_level = OMAPDSS_SIG_ACTIVE_LOW,
- .hsync_level = OMAPDSS_SIG_ACTIVE_LOW,
- .data_pclk_edge = OMAPDSS_DRIVE_SIG_RISING_EDGE,
- .de_level = OMAPDSS_SIG_ACTIVE_HIGH,
- .sync_pclk_edge = OMAPDSS_DRIVE_SIG_RISING_EDGE,
+ .hfront_porch = 6,
+ .hsync_len = 1,
+ .hback_porch = 4,
+ .vfront_porch = 3,
+ .vsync_len = 1,
+ .vback_porch = 4,
+
+ .flags = DISPLAY_FLAGS_HSYNC_LOW | DISPLAY_FLAGS_VSYNC_LOW |
+ DISPLAY_FLAGS_DE_HIGH | DISPLAY_FLAGS_SYNC_POSEDGE |
+ DISPLAY_FLAGS_PIXDATA_POSEDGE,
};
#define to_panel_data(p) container_of(p, struct panel_drv_data, dssdev)
@@ -157,7 +155,7 @@ static int nec_8048_enable(struct omap_dss_device *dssdev)
if (ddata->data_lines)
in->ops.dpi->set_data_lines(in, ddata->data_lines);
- in->ops.dpi->set_timings(in, &ddata->videomode);
+ in->ops.dpi->set_timings(in, &ddata->vm);
r = in->ops.dpi->enable(in);
if (r)
@@ -188,32 +186,32 @@ static void nec_8048_disable(struct omap_dss_device *dssdev)
}
static void nec_8048_set_timings(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings)
+ struct videomode *vm)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
struct omap_dss_device *in = ddata->in;
- ddata->videomode = *timings;
- dssdev->panel.timings = *timings;
+ ddata->vm = *vm;
+ dssdev->panel.vm = *vm;
- in->ops.dpi->set_timings(in, timings);
+ in->ops.dpi->set_timings(in, vm);
}
static void nec_8048_get_timings(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings)
+ struct videomode *vm)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
- *timings = ddata->videomode;
+ *vm = ddata->vm;
}
static int nec_8048_check_timings(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings)
+ struct videomode *vm)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
struct omap_dss_device *in = ddata->in;
- return in->ops.dpi->check_timings(in, timings);
+ return in->ops.dpi->check_timings(in, vm);
}
static struct omap_dss_driver nec_8048_ops = {
@@ -306,14 +304,14 @@ static int nec_8048_probe(struct spi_device *spi)
goto err_gpio;
}
- ddata->videomode = nec_8048_panel_timings;
+ ddata->vm = nec_8048_panel_vm;
dssdev = &ddata->dssdev;
dssdev->dev = &spi->dev;
dssdev->driver = &nec_8048_ops;
dssdev->type = OMAP_DISPLAY_TYPE_DPI;
dssdev->owner = THIS_MODULE;
- dssdev->panel.timings = ddata->videomode;
+ dssdev->panel.vm = ddata->vm;
r = omapdss_register_display(dssdev);
if (r) {
diff --git a/drivers/gpu/drm/omapdrm/displays/panel-sharp-ls037v7dw01.c b/drivers/gpu/drm/omapdrm/displays/panel-sharp-ls037v7dw01.c
index 3d3efc561ea9..04fe235b7cac 100644
--- a/drivers/gpu/drm/omapdrm/displays/panel-sharp-ls037v7dw01.c
+++ b/drivers/gpu/drm/omapdrm/displays/panel-sharp-ls037v7dw01.c
@@ -26,7 +26,7 @@ struct panel_drv_data {
int data_lines;
- struct omap_video_timings videomode;
+ struct videomode vm;
struct gpio_desc *resb_gpio; /* low = reset active min 20 us */
struct gpio_desc *ini_gpio; /* high = power on */
@@ -35,25 +35,27 @@ struct panel_drv_data {
struct gpio_desc *ud_gpio; /* high = conventional vertical scanning */
};
-static const struct omap_video_timings sharp_ls_timings = {
- .x_res = 480,
- .y_res = 640,
+static const struct videomode sharp_ls_vm = {
+ .hactive = 480,
+ .vactive = 640,
.pixelclock = 19200000,
- .hsw = 2,
- .hfp = 1,
- .hbp = 28,
-
- .vsw = 1,
- .vfp = 1,
- .vbp = 1,
-
- .vsync_level = OMAPDSS_SIG_ACTIVE_LOW,
- .hsync_level = OMAPDSS_SIG_ACTIVE_LOW,
- .data_pclk_edge = OMAPDSS_DRIVE_SIG_RISING_EDGE,
- .de_level = OMAPDSS_SIG_ACTIVE_HIGH,
- .sync_pclk_edge = OMAPDSS_DRIVE_SIG_FALLING_EDGE,
+ .hsync_len = 2,
+ .hfront_porch = 1,
+ .hback_porch = 28,
+
+ .vsync_len = 1,
+ .vfront_porch = 1,
+ .vback_porch = 1,
+
+ .flags = DISPLAY_FLAGS_HSYNC_LOW | DISPLAY_FLAGS_VSYNC_LOW |
+ DISPLAY_FLAGS_DE_HIGH | DISPLAY_FLAGS_SYNC_NEGEDGE |
+ DISPLAY_FLAGS_PIXDATA_POSEDGE,
+ /*
+ * Note: According to the panel documentation:
+ * DATA needs to be driven on the FALLING edge
+ */
};
#define to_panel_data(p) container_of(p, struct panel_drv_data, dssdev)
@@ -99,7 +101,7 @@ static int sharp_ls_enable(struct omap_dss_device *dssdev)
if (ddata->data_lines)
in->ops.dpi->set_data_lines(in, ddata->data_lines);
- in->ops.dpi->set_timings(in, &ddata->videomode);
+ in->ops.dpi->set_timings(in, &ddata->vm);
if (ddata->vcc) {
r = regulator_enable(ddata->vcc);
@@ -154,32 +156,32 @@ static void sharp_ls_disable(struct omap_dss_device *dssdev)
}
static void sharp_ls_set_timings(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings)
+ struct videomode *vm)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
struct omap_dss_device *in = ddata->in;
- ddata->videomode = *timings;
- dssdev->panel.timings = *timings;
+ ddata->vm = *vm;
+ dssdev->panel.vm = *vm;
- in->ops.dpi->set_timings(in, timings);
+ in->ops.dpi->set_timings(in, vm);
}
static void sharp_ls_get_timings(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings)
+ struct videomode *vm)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
- *timings = ddata->videomode;
+ *vm = ddata->vm;
}
static int sharp_ls_check_timings(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings)
+ struct videomode *vm)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
struct omap_dss_device *in = ddata->in;
- return in->ops.dpi->check_timings(in, timings);
+ return in->ops.dpi->check_timings(in, vm);
}
static struct omap_dss_driver sharp_ls_ops = {
@@ -279,14 +281,14 @@ static int sharp_ls_probe(struct platform_device *pdev)
if (r)
return r;
- ddata->videomode = sharp_ls_timings;
+ ddata->vm = sharp_ls_vm;
dssdev = &ddata->dssdev;
dssdev->dev = &pdev->dev;
dssdev->driver = &sharp_ls_ops;
dssdev->type = OMAP_DISPLAY_TYPE_DPI;
dssdev->owner = THIS_MODULE;
- dssdev->panel.timings = ddata->videomode;
+ dssdev->panel.vm = ddata->vm;
dssdev->phy.dpi.data_lines = ddata->data_lines;
r = omapdss_register_display(dssdev);
diff --git a/drivers/gpu/drm/omapdrm/displays/panel-sony-acx565akm.c b/drivers/gpu/drm/omapdrm/displays/panel-sony-acx565akm.c
index 3557a4c7dd7b..746cb8d9cba1 100644
--- a/drivers/gpu/drm/omapdrm/displays/panel-sony-acx565akm.c
+++ b/drivers/gpu/drm/omapdrm/displays/panel-sony-acx565akm.c
@@ -71,7 +71,7 @@ struct panel_drv_data {
int reset_gpio;
int datapairs;
- struct omap_video_timings videomode;
+ struct videomode vm;
char *name;
int enabled;
@@ -92,23 +92,20 @@ struct panel_drv_data {
struct backlight_device *bl_dev;
};
-static const struct omap_video_timings acx565akm_panel_timings = {
- .x_res = 800,
- .y_res = 480,
+static const struct videomode acx565akm_panel_vm = {
+ .hactive = 800,
+ .vactive = 480,
.pixelclock = 24000000,
- .hfp = 28,
- .hsw = 4,
- .hbp = 24,
- .vfp = 3,
- .vsw = 3,
- .vbp = 4,
-
- .vsync_level = OMAPDSS_SIG_ACTIVE_LOW,
- .hsync_level = OMAPDSS_SIG_ACTIVE_LOW,
-
- .data_pclk_edge = OMAPDSS_DRIVE_SIG_RISING_EDGE,
- .de_level = OMAPDSS_SIG_ACTIVE_HIGH,
- .sync_pclk_edge = OMAPDSS_DRIVE_SIG_FALLING_EDGE,
+ .hfront_porch = 28,
+ .hsync_len = 4,
+ .hback_porch = 24,
+ .vfront_porch = 3,
+ .vsync_len = 3,
+ .vback_porch = 4,
+
+ .flags = DISPLAY_FLAGS_HSYNC_LOW | DISPLAY_FLAGS_VSYNC_LOW |
+ DISPLAY_FLAGS_DE_HIGH | DISPLAY_FLAGS_SYNC_NEGEDGE |
+ DISPLAY_FLAGS_PIXDATA_POSEDGE,
};
#define to_panel_data(p) container_of(p, struct panel_drv_data, dssdev)
@@ -548,7 +545,7 @@ static int acx565akm_panel_power_on(struct omap_dss_device *dssdev)
dev_dbg(&ddata->spi->dev, "%s\n", __func__);
- in->ops.sdi->set_timings(in, &ddata->videomode);
+ in->ops.sdi->set_timings(in, &ddata->vm);
if (ddata->datapairs > 0)
in->ops.sdi->set_datapairs(in, ddata->datapairs);
@@ -662,32 +659,32 @@ static void acx565akm_disable(struct omap_dss_device *dssdev)
}
static void acx565akm_set_timings(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings)
+ struct videomode *vm)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
struct omap_dss_device *in = ddata->in;
- ddata->videomode = *timings;
- dssdev->panel.timings = *timings;
+ ddata->vm = *vm;
+ dssdev->panel.vm = *vm;
- in->ops.sdi->set_timings(in, timings);
+ in->ops.sdi->set_timings(in, vm);
}
static void acx565akm_get_timings(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings)
+ struct videomode *vm)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
- *timings = ddata->videomode;
+ *vm = ddata->vm;
}
static int acx565akm_check_timings(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings)
+ struct videomode *vm)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
struct omap_dss_device *in = ddata->in;
- return in->ops.sdi->check_timings(in, timings);
+ return in->ops.sdi->check_timings(in, vm);
}
static struct omap_dss_driver acx565akm_ops = {
@@ -845,14 +842,14 @@ static int acx565akm_probe(struct spi_device *spi)
acx565akm_bl_update_status(bldev);
- ddata->videomode = acx565akm_panel_timings;
+ ddata->vm = acx565akm_panel_vm;
dssdev = &ddata->dssdev;
dssdev->dev = &spi->dev;
dssdev->driver = &acx565akm_ops;
dssdev->type = OMAP_DISPLAY_TYPE_SDI;
dssdev->owner = THIS_MODULE;
- dssdev->panel.timings = ddata->videomode;
+ dssdev->panel.vm = ddata->vm;
r = omapdss_register_display(dssdev);
if (r) {
diff --git a/drivers/gpu/drm/omapdrm/displays/panel-tpo-td028ttec1.c b/drivers/gpu/drm/omapdrm/displays/panel-tpo-td028ttec1.c
index e859b3f893f7..f313dbfcbacb 100644
--- a/drivers/gpu/drm/omapdrm/displays/panel-tpo-td028ttec1.c
+++ b/drivers/gpu/drm/omapdrm/displays/panel-tpo-td028ttec1.c
@@ -37,28 +37,29 @@ struct panel_drv_data {
int data_lines;
- struct omap_video_timings videomode;
+ struct videomode vm;
struct spi_device *spi_dev;
};
-static struct omap_video_timings td028ttec1_panel_timings = {
- .x_res = 480,
- .y_res = 640,
+static struct videomode td028ttec1_panel_vm = {
+ .hactive = 480,
+ .vactive = 640,
.pixelclock = 22153000,
- .hfp = 24,
- .hsw = 8,
- .hbp = 8,
- .vfp = 4,
- .vsw = 2,
- .vbp = 2,
-
- .vsync_level = OMAPDSS_SIG_ACTIVE_LOW,
- .hsync_level = OMAPDSS_SIG_ACTIVE_LOW,
-
- .data_pclk_edge = OMAPDSS_DRIVE_SIG_FALLING_EDGE,
- .de_level = OMAPDSS_SIG_ACTIVE_HIGH,
- .sync_pclk_edge = OMAPDSS_DRIVE_SIG_RISING_EDGE,
+ .hfront_porch = 24,
+ .hsync_len = 8,
+ .hback_porch = 8,
+ .vfront_porch = 4,
+ .vsync_len = 2,
+ .vback_porch = 2,
+
+ .flags = DISPLAY_FLAGS_HSYNC_LOW | DISPLAY_FLAGS_VSYNC_LOW |
+ DISPLAY_FLAGS_DE_HIGH | DISPLAY_FLAGS_SYNC_POSEDGE |
+ DISPLAY_FLAGS_PIXDATA_NEGEDGE,
+ /*
+ * Note: According to the panel documentation:
+ * SYNC needs to be driven on the FALLING edge
+ */
};
#define JBT_COMMAND 0x000
@@ -208,7 +209,7 @@ static int td028ttec1_panel_enable(struct omap_dss_device *dssdev)
if (ddata->data_lines)
in->ops.dpi->set_data_lines(in, ddata->data_lines);
- in->ops.dpi->set_timings(in, &ddata->videomode);
+ in->ops.dpi->set_timings(in, &ddata->vm);
r = in->ops.dpi->enable(in);
if (r)
@@ -325,32 +326,32 @@ static void td028ttec1_panel_disable(struct omap_dss_device *dssdev)
}
static void td028ttec1_panel_set_timings(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings)
+ struct videomode *vm)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
struct omap_dss_device *in = ddata->in;
- ddata->videomode = *timings;
- dssdev->panel.timings = *timings;
+ ddata->vm = *vm;
+ dssdev->panel.vm = *vm;
- in->ops.dpi->set_timings(in, timings);
+ in->ops.dpi->set_timings(in, vm);
}
static void td028ttec1_panel_get_timings(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings)
+ struct videomode *vm)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
- *timings = ddata->videomode;
+ *vm = ddata->vm;
}
static int td028ttec1_panel_check_timings(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings)
+ struct videomode *vm)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
struct omap_dss_device *in = ddata->in;
- return in->ops.dpi->check_timings(in, timings);
+ return in->ops.dpi->check_timings(in, vm);
}
static struct omap_dss_driver td028ttec1_ops = {
@@ -414,14 +415,14 @@ static int td028ttec1_panel_probe(struct spi_device *spi)
if (r)
return r;
- ddata->videomode = td028ttec1_panel_timings;
+ ddata->vm = td028ttec1_panel_vm;
dssdev = &ddata->dssdev;
dssdev->dev = &spi->dev;
dssdev->driver = &td028ttec1_ops;
dssdev->type = OMAP_DISPLAY_TYPE_DPI;
dssdev->owner = THIS_MODULE;
- dssdev->panel.timings = ddata->videomode;
+ dssdev->panel.vm = ddata->vm;
dssdev->phy.dpi.data_lines = ddata->data_lines;
r = omapdss_register_display(dssdev);
diff --git a/drivers/gpu/drm/omapdrm/displays/panel-tpo-td043mtea1.c b/drivers/gpu/drm/omapdrm/displays/panel-tpo-td043mtea1.c
index 66c6bbe6472b..0787dba44faa 100644
--- a/drivers/gpu/drm/omapdrm/displays/panel-tpo-td043mtea1.c
+++ b/drivers/gpu/drm/omapdrm/displays/panel-tpo-td043mtea1.c
@@ -56,7 +56,7 @@ struct panel_drv_data {
struct omap_dss_device dssdev;
struct omap_dss_device *in;
- struct omap_video_timings videomode;
+ struct videomode vm;
int data_lines;
@@ -72,25 +72,27 @@ struct panel_drv_data {
u32 power_on_resume:1;
};
-static const struct omap_video_timings tpo_td043_timings = {
- .x_res = 800,
- .y_res = 480,
+static const struct videomode tpo_td043_vm = {
+ .hactive = 800,
+ .vactive = 480,
.pixelclock = 36000000,
- .hsw = 1,
- .hfp = 68,
- .hbp = 214,
+ .hsync_len = 1,
+ .hfront_porch = 68,
+ .hback_porch = 214,
- .vsw = 1,
- .vfp = 39,
- .vbp = 34,
+ .vsync_len = 1,
+ .vfront_porch = 39,
+ .vback_porch = 34,
- .vsync_level = OMAPDSS_SIG_ACTIVE_LOW,
- .hsync_level = OMAPDSS_SIG_ACTIVE_LOW,
- .data_pclk_edge = OMAPDSS_DRIVE_SIG_FALLING_EDGE,
- .de_level = OMAPDSS_SIG_ACTIVE_HIGH,
- .sync_pclk_edge = OMAPDSS_DRIVE_SIG_RISING_EDGE,
+ .flags = DISPLAY_FLAGS_HSYNC_LOW | DISPLAY_FLAGS_VSYNC_LOW |
+ DISPLAY_FLAGS_DE_HIGH | DISPLAY_FLAGS_SYNC_POSEDGE |
+ DISPLAY_FLAGS_PIXDATA_NEGEDGE,
+ /*
+ * Note: According to the panel documentation:
+ * SYNC needs to be driven on the FALLING edge
+ */
};
#define to_panel_data(p) container_of(p, struct panel_drv_data, dssdev)
@@ -378,7 +380,7 @@ static int tpo_td043_enable(struct omap_dss_device *dssdev)
if (ddata->data_lines)
in->ops.dpi->set_data_lines(in, ddata->data_lines);
- in->ops.dpi->set_timings(in, &ddata->videomode);
+ in->ops.dpi->set_timings(in, &ddata->vm);
r = in->ops.dpi->enable(in);
if (r)
@@ -418,32 +420,32 @@ static void tpo_td043_disable(struct omap_dss_device *dssdev)
}
static void tpo_td043_set_timings(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings)
+ struct videomode *vm)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
struct omap_dss_device *in = ddata->in;
- ddata->videomode = *timings;
- dssdev->panel.timings = *timings;
+ ddata->vm = *vm;
+ dssdev->panel.vm = *vm;
- in->ops.dpi->set_timings(in, timings);
+ in->ops.dpi->set_timings(in, vm);
}
static void tpo_td043_get_timings(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings)
+ struct videomode *vm)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
- *timings = ddata->videomode;
+ *vm = ddata->vm;
}
static int tpo_td043_check_timings(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings)
+ struct videomode *vm)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
struct omap_dss_device *in = ddata->in;
- return in->ops.dpi->check_timings(in, timings);
+ return in->ops.dpi->check_timings(in, vm);
}
static struct omap_dss_driver tpo_td043_ops = {
@@ -546,14 +548,14 @@ static int tpo_td043_probe(struct spi_device *spi)
goto err_sysfs;
}
- ddata->videomode = tpo_td043_timings;
+ ddata->vm = tpo_td043_vm;
dssdev = &ddata->dssdev;
dssdev->dev = &spi->dev;
dssdev->driver = &tpo_td043_ops;
dssdev->type = OMAP_DISPLAY_TYPE_DPI;
dssdev->owner = THIS_MODULE;
- dssdev->panel.timings = ddata->videomode;
+ dssdev->panel.vm = ddata->vm;
r = omapdss_register_display(dssdev);
if (r) {
diff --git a/drivers/gpu/drm/omapdrm/dss/dispc.c b/drivers/gpu/drm/omapdrm/dss/dispc.c
index 535240fba671..c839f6456db2 100644
--- a/drivers/gpu/drm/omapdrm/dss/dispc.c
+++ b/drivers/gpu/drm/omapdrm/dss/dispc.c
@@ -75,7 +75,7 @@ struct dispc_features {
unsigned long max_lcd_pclk;
unsigned long max_tv_pclk;
int (*calc_scaling) (unsigned long pclk, unsigned long lclk,
- const struct omap_video_timings *mgr_timings,
+ const struct videomode *vm,
u16 width, u16 height, u16 out_width, u16 out_height,
enum omap_color_mode color_mode, bool *five_taps,
int *x_predecim, int *y_predecim, int *decim_x, int *decim_y,
@@ -1679,7 +1679,7 @@ static void dispc_ovl_set_scaling_uv(enum omap_plane plane,
{
int scale_x = out_width != orig_width;
int scale_y = out_height != orig_height;
- bool chroma_upscale = plane != OMAP_DSS_WB ? true : false;
+ bool chroma_upscale = plane != OMAP_DSS_WB;
if (!dss_has_feature(FEAT_HANDLE_UV_SEPARATE))
return;
@@ -2179,7 +2179,7 @@ static void calc_tiler_rotation_offset(u16 screen_width, u16 width,
* undocumented horizontal position and timing related limitations.
*/
static int check_horiz_timing_omap3(unsigned long pclk, unsigned long lclk,
- const struct omap_video_timings *t, u16 pos_x,
+ const struct videomode *vm, u16 pos_x,
u16 width, u16 height, u16 out_width, u16 out_height,
bool five_taps)
{
@@ -2189,14 +2189,16 @@ static int check_horiz_timing_omap3(unsigned long pclk, unsigned long lclk,
u64 val, blank;
int i;
- nonactive = t->x_res + t->hfp + t->hsw + t->hbp - out_width;
+ nonactive = vm->hactive + vm->hfront_porch + vm->hsync_len +
+ vm->hback_porch - out_width;
i = 0;
if (out_height < height)
i++;
if (out_width < width)
i++;
- blank = div_u64((u64)(t->hbp + t->hsw + t->hfp) * lclk, pclk);
+ blank = div_u64((u64)(vm->hback_porch + vm->hsync_len + vm->hfront_porch) *
+ lclk, pclk);
DSSDBG("blanking period + ppl = %llu (limit = %u)\n", blank, limits[i]);
if (blank <= limits[i])
return -EINVAL;
@@ -2231,7 +2233,7 @@ static int check_horiz_timing_omap3(unsigned long pclk, unsigned long lclk,
}
static unsigned long calc_core_clk_five_taps(unsigned long pclk,
- const struct omap_video_timings *mgr_timings, u16 width,
+ const struct videomode *vm, u16 width,
u16 height, u16 out_width, u16 out_height,
enum omap_color_mode color_mode)
{
@@ -2242,7 +2244,7 @@ static unsigned long calc_core_clk_five_taps(unsigned long pclk,
return (unsigned long) pclk;
if (height > out_height) {
- unsigned int ppl = mgr_timings->x_res;
+ unsigned int ppl = vm->hactive;
tmp = (u64)pclk * height * out_width;
do_div(tmp, 2 * out_height * ppl);
@@ -2324,7 +2326,7 @@ static unsigned long calc_core_clk_44xx(unsigned long pclk, u16 width,
}
static int dispc_ovl_calc_scaling_24xx(unsigned long pclk, unsigned long lclk,
- const struct omap_video_timings *mgr_timings,
+ const struct videomode *vm,
u16 width, u16 height, u16 out_width, u16 out_height,
enum omap_color_mode color_mode, bool *five_taps,
int *x_predecim, int *y_predecim, int *decim_x, int *decim_y,
@@ -2370,7 +2372,7 @@ static int dispc_ovl_calc_scaling_24xx(unsigned long pclk, unsigned long lclk,
}
static int dispc_ovl_calc_scaling_34xx(unsigned long pclk, unsigned long lclk,
- const struct omap_video_timings *mgr_timings,
+ const struct videomode *vm,
u16 width, u16 height, u16 out_width, u16 out_height,
enum omap_color_mode color_mode, bool *five_taps,
int *x_predecim, int *y_predecim, int *decim_x, int *decim_y,
@@ -2392,7 +2394,7 @@ static int dispc_ovl_calc_scaling_34xx(unsigned long pclk, unsigned long lclk,
*five_taps = false;
again:
if (*five_taps)
- *core_clk = calc_core_clk_five_taps(pclk, mgr_timings,
+ *core_clk = calc_core_clk_five_taps(pclk, vm,
in_width, in_height, out_width,
out_height, color_mode);
else
@@ -2400,7 +2402,7 @@ again:
in_height, out_width, out_height,
mem_to_mem);
- error = check_horiz_timing_omap3(pclk, lclk, mgr_timings,
+ error = check_horiz_timing_omap3(pclk, lclk, vm,
pos_x, in_width, in_height, out_width,
out_height, *five_taps);
if (error && *five_taps) {
@@ -2435,7 +2437,7 @@ again:
return -EINVAL;
}
- if (check_horiz_timing_omap3(pclk, lclk, mgr_timings, pos_x, in_width,
+ if (check_horiz_timing_omap3(pclk, lclk, vm, pos_x, in_width,
in_height, out_width, out_height, *five_taps)) {
DSSERR("horizontal timing too tight\n");
return -EINVAL;
@@ -2455,7 +2457,7 @@ again:
}
static int dispc_ovl_calc_scaling_44xx(unsigned long pclk, unsigned long lclk,
- const struct omap_video_timings *mgr_timings,
+ const struct videomode *vm,
u16 width, u16 height, u16 out_width, u16 out_height,
enum omap_color_mode color_mode, bool *five_taps,
int *x_predecim, int *y_predecim, int *decim_x, int *decim_y,
@@ -2501,7 +2503,7 @@ static int dispc_ovl_calc_scaling_44xx(unsigned long pclk, unsigned long lclk,
static int dispc_ovl_calc_scaling(unsigned long pclk, unsigned long lclk,
enum omap_overlay_caps caps,
- const struct omap_video_timings *mgr_timings,
+ const struct videomode *vm,
u16 width, u16 height, u16 out_width, u16 out_height,
enum omap_color_mode color_mode, bool *five_taps,
int *x_predecim, int *y_predecim, u16 pos_x,
@@ -2515,7 +2517,7 @@ static int dispc_ovl_calc_scaling(unsigned long pclk, unsigned long lclk,
if (width == out_width && height == out_height)
return 0;
- if (!mem_to_mem && (pclk == 0 || mgr_timings->pixelclock == 0)) {
+ if (!mem_to_mem && (pclk == 0 || vm->pixelclock == 0)) {
DSSERR("cannot calculate scaling settings: pclk is zero\n");
return -EINVAL;
}
@@ -2551,7 +2553,7 @@ static int dispc_ovl_calc_scaling(unsigned long pclk, unsigned long lclk,
if (decim_y > *y_predecim || out_height > height * 8)
return -EINVAL;
- ret = dispc.feat->calc_scaling(pclk, lclk, mgr_timings, width, height,
+ ret = dispc.feat->calc_scaling(pclk, lclk, vm, width, height,
out_width, out_height, color_mode, five_taps,
x_predecim, y_predecim, &decim_x, &decim_y, pos_x, &core_clk,
mem_to_mem);
@@ -2591,7 +2593,7 @@ static int dispc_ovl_setup_common(enum omap_plane plane,
u16 out_width, u16 out_height, enum omap_color_mode color_mode,
u8 rotation, bool mirror, u8 zorder, u8 pre_mult_alpha,
u8 global_alpha, enum omap_dss_rotation_type rotation_type,
- bool replication, const struct omap_video_timings *mgr_timings,
+ bool replication, const struct videomode *vm,
bool mem_to_mem)
{
bool five_taps = true;
@@ -2605,7 +2607,7 @@ static int dispc_ovl_setup_common(enum omap_plane plane,
u16 in_height = height;
u16 in_width = width;
int x_predecim = 1, y_predecim = 1;
- bool ilace = mgr_timings->interlace;
+ bool ilace = !!(vm->flags & DISPLAY_FLAGS_INTERLACED);
unsigned long pclk = dispc_plane_pclk_rate(plane);
unsigned long lclk = dispc_plane_lclk_rate(plane);
@@ -2647,7 +2649,7 @@ static int dispc_ovl_setup_common(enum omap_plane plane,
if (!dss_feat_color_mode_supported(plane, color_mode))
return -EINVAL;
- r = dispc_ovl_calc_scaling(pclk, lclk, caps, mgr_timings, in_width,
+ r = dispc_ovl_calc_scaling(pclk, lclk, caps, vm, in_width,
in_height, out_width, out_height, color_mode,
&five_taps, &x_predecim, &y_predecim, pos_x,
rotation_type, mem_to_mem);
@@ -2784,7 +2786,7 @@ static int dispc_ovl_setup_common(enum omap_plane plane,
}
int dispc_ovl_setup(enum omap_plane plane, const struct omap_overlay_info *oi,
- bool replication, const struct omap_video_timings *mgr_timings,
+ bool replication, const struct videomode *vm,
bool mem_to_mem)
{
int r;
@@ -2803,14 +2805,14 @@ int dispc_ovl_setup(enum omap_plane plane, const struct omap_overlay_info *oi,
oi->screen_width, oi->pos_x, oi->pos_y, oi->width, oi->height,
oi->out_width, oi->out_height, oi->color_mode, oi->rotation,
oi->mirror, oi->zorder, oi->pre_mult_alpha, oi->global_alpha,
- oi->rotation_type, replication, mgr_timings, mem_to_mem);
+ oi->rotation_type, replication, vm, mem_to_mem);
return r;
}
EXPORT_SYMBOL(dispc_ovl_setup);
int dispc_wb_setup(const struct omap_dss_writeback_info *wi,
- bool mem_to_mem, const struct omap_video_timings *mgr_timings)
+ bool mem_to_mem, const struct videomode *vm)
{
int r;
u32 l;
@@ -2819,8 +2821,8 @@ int dispc_wb_setup(const struct omap_dss_writeback_info *wi,
const u8 zorder = 0, global_alpha = 0;
const bool replication = false;
bool truncation;
- int in_width = mgr_timings->x_res;
- int in_height = mgr_timings->y_res;
+ int in_width = vm->hactive;
+ int in_height = vm->vactive;
enum omap_overlay_caps caps =
OMAP_DSS_OVL_CAP_SCALE | OMAP_DSS_OVL_CAP_PRE_MULT_ALPHA;
@@ -2833,7 +2835,7 @@ int dispc_wb_setup(const struct omap_dss_writeback_info *wi,
wi->buf_width, pos_x, pos_y, in_width, in_height, wi->width,
wi->height, wi->color_mode, wi->rotation, wi->mirror, zorder,
wi->pre_mult_alpha, global_alpha, wi->rotation_type,
- replication, mgr_timings, mem_to_mem);
+ replication, vm, mem_to_mem);
switch (wi->color_mode) {
case OMAP_DSS_COLOR_RGB16:
@@ -2867,8 +2869,8 @@ int dispc_wb_setup(const struct omap_dss_writeback_info *wi,
} else {
int wbdelay;
- wbdelay = min(mgr_timings->vfp + mgr_timings->vsw +
- mgr_timings->vbp, 255);
+ wbdelay = min(vm->vfront_porch +
+ vm->vsync_len + vm->vback_porch, (u32)255);
/* WBDELAYCOUNT */
REG_FLD_MOD(DISPC_OVL_ATTRIBUTES2(plane), wbdelay, 7, 0);
@@ -3093,10 +3095,10 @@ static bool _dispc_mgr_size_ok(u16 width, u16 height)
height <= dispc.feat->mgr_height_max;
}
-static bool _dispc_lcd_timings_ok(int hsw, int hfp, int hbp,
+static bool _dispc_lcd_timings_ok(int hsync_len, int hfp, int hbp,
int vsw, int vfp, int vbp)
{
- if (hsw < 1 || hsw > dispc.feat->sw_max ||
+ if (hsync_len < 1 || hsync_len > dispc.feat->sw_max ||
hfp < 1 || hfp > dispc.feat->hp_max ||
hbp < 1 || hbp > dispc.feat->hp_max ||
vsw < 1 || vsw > dispc.feat->sw_max ||
@@ -3110,113 +3112,77 @@ static bool _dispc_mgr_pclk_ok(enum omap_channel channel,
unsigned long pclk)
{
if (dss_mgr_is_lcd(channel))
- return pclk <= dispc.feat->max_lcd_pclk ? true : false;
+ return pclk <= dispc.feat->max_lcd_pclk;
else
- return pclk <= dispc.feat->max_tv_pclk ? true : false;
+ return pclk <= dispc.feat->max_tv_pclk;
}
-bool dispc_mgr_timings_ok(enum omap_channel channel,
- const struct omap_video_timings *timings)
+bool dispc_mgr_timings_ok(enum omap_channel channel, const struct videomode *vm)
{
- if (!_dispc_mgr_size_ok(timings->x_res, timings->y_res))
+ if (!_dispc_mgr_size_ok(vm->hactive, vm->vactive))
return false;
- if (!_dispc_mgr_pclk_ok(channel, timings->pixelclock))
+ if (!_dispc_mgr_pclk_ok(channel, vm->pixelclock))
return false;
if (dss_mgr_is_lcd(channel)) {
/* TODO: OMAP4+ supports interlace for LCD outputs */
- if (timings->interlace)
+ if (vm->flags & DISPLAY_FLAGS_INTERLACED)
return false;
- if (!_dispc_lcd_timings_ok(timings->hsw, timings->hfp,
- timings->hbp, timings->vsw, timings->vfp,
- timings->vbp))
+ if (!_dispc_lcd_timings_ok(vm->hsync_len,
+ vm->hfront_porch, vm->hback_porch,
+ vm->vsync_len, vm->vfront_porch,
+ vm->vback_porch))
return false;
}
return true;
}
-static void _dispc_mgr_set_lcd_timings(enum omap_channel channel, int hsw,
- int hfp, int hbp, int vsw, int vfp, int vbp,
- enum omap_dss_signal_level vsync_level,
- enum omap_dss_signal_level hsync_level,
- enum omap_dss_signal_edge data_pclk_edge,
- enum omap_dss_signal_level de_level,
- enum omap_dss_signal_edge sync_pclk_edge)
-
+static void _dispc_mgr_set_lcd_timings(enum omap_channel channel,
+ const struct videomode *vm)
{
u32 timing_h, timing_v, l;
bool onoff, rf, ipc, vs, hs, de;
- timing_h = FLD_VAL(hsw-1, dispc.feat->sw_start, 0) |
- FLD_VAL(hfp-1, dispc.feat->fp_start, 8) |
- FLD_VAL(hbp-1, dispc.feat->bp_start, 20);
- timing_v = FLD_VAL(vsw-1, dispc.feat->sw_start, 0) |
- FLD_VAL(vfp, dispc.feat->fp_start, 8) |
- FLD_VAL(vbp, dispc.feat->bp_start, 20);
+ timing_h = FLD_VAL(vm->hsync_len - 1, dispc.feat->sw_start, 0) |
+ FLD_VAL(vm->hfront_porch - 1, dispc.feat->fp_start, 8) |
+ FLD_VAL(vm->hback_porch - 1, dispc.feat->bp_start, 20);
+ timing_v = FLD_VAL(vm->vsync_len - 1, dispc.feat->sw_start, 0) |
+ FLD_VAL(vm->vfront_porch, dispc.feat->fp_start, 8) |
+ FLD_VAL(vm->vback_porch, dispc.feat->bp_start, 20);
dispc_write_reg(DISPC_TIMING_H(channel), timing_h);
dispc_write_reg(DISPC_TIMING_V(channel), timing_v);
- switch (vsync_level) {
- case OMAPDSS_SIG_ACTIVE_LOW:
- vs = true;
- break;
- case OMAPDSS_SIG_ACTIVE_HIGH:
+ if (vm->flags & DISPLAY_FLAGS_VSYNC_HIGH)
vs = false;
- break;
- default:
- BUG();
- }
+ else
+ vs = true;
- switch (hsync_level) {
- case OMAPDSS_SIG_ACTIVE_LOW:
- hs = true;
- break;
- case OMAPDSS_SIG_ACTIVE_HIGH:
+ if (vm->flags & DISPLAY_FLAGS_HSYNC_HIGH)
hs = false;
- break;
- default:
- BUG();
- }
+ else
+ hs = true;
- switch (de_level) {
- case OMAPDSS_SIG_ACTIVE_LOW:
- de = true;
- break;
- case OMAPDSS_SIG_ACTIVE_HIGH:
+ if (vm->flags & DISPLAY_FLAGS_DE_HIGH)
de = false;
- break;
- default:
- BUG();
- }
+ else
+ de = true;
- switch (data_pclk_edge) {
- case OMAPDSS_DRIVE_SIG_RISING_EDGE:
+ if (vm->flags & DISPLAY_FLAGS_PIXDATA_POSEDGE)
ipc = false;
- break;
- case OMAPDSS_DRIVE_SIG_FALLING_EDGE:
+ else
ipc = true;
- break;
- default:
- BUG();
- }
/* always use the 'rf' setting */
onoff = true;
- switch (sync_pclk_edge) {
- case OMAPDSS_DRIVE_SIG_FALLING_EDGE:
- rf = false;
- break;
- case OMAPDSS_DRIVE_SIG_RISING_EDGE:
+ if (vm->flags & DISPLAY_FLAGS_SYNC_POSEDGE)
rf = true;
- break;
- default:
- BUG();
- }
+ else
+ rf = false;
l = FLD_VAL(onoff, 17, 17) |
FLD_VAL(rf, 16, 16) |
@@ -3253,13 +3219,13 @@ static void _dispc_mgr_set_lcd_timings(enum omap_channel channel, int hsw,
/* change name to mode? */
void dispc_mgr_set_timings(enum omap_channel channel,
- const struct omap_video_timings *timings)
+ const struct videomode *vm)
{
unsigned xtot, ytot;
unsigned long ht, vt;
- struct omap_video_timings t = *timings;
+ struct videomode t = *vm;
- DSSDBG("channel %d xres %u yres %u\n", channel, t.x_res, t.y_res);
+ DSSDBG("channel %d xres %u yres %u\n", channel, t.hactive, t.vactive);
if (!dispc_mgr_timings_ok(channel, &t)) {
BUG();
@@ -3267,34 +3233,37 @@ void dispc_mgr_set_timings(enum omap_channel channel,
}
if (dss_mgr_is_lcd(channel)) {
- _dispc_mgr_set_lcd_timings(channel, t.hsw, t.hfp, t.hbp, t.vsw,
- t.vfp, t.vbp, t.vsync_level, t.hsync_level,
- t.data_pclk_edge, t.de_level, t.sync_pclk_edge);
+ _dispc_mgr_set_lcd_timings(channel, &t);
- xtot = t.x_res + t.hfp + t.hsw + t.hbp;
- ytot = t.y_res + t.vfp + t.vsw + t.vbp;
+ xtot = t.hactive + t.hfront_porch + t.hsync_len + t.hback_porch;
+ ytot = t.vactive + t.vfront_porch + t.vsync_len + t.vback_porch;
- ht = timings->pixelclock / xtot;
- vt = timings->pixelclock / xtot / ytot;
+ ht = vm->pixelclock / xtot;
+ vt = vm->pixelclock / xtot / ytot;
- DSSDBG("pck %u\n", timings->pixelclock);
- DSSDBG("hsw %d hfp %d hbp %d vsw %d vfp %d vbp %d\n",
- t.hsw, t.hfp, t.hbp, t.vsw, t.vfp, t.vbp);
+ DSSDBG("pck %lu\n", vm->pixelclock);
+ DSSDBG("hsync_len %d hfp %d hbp %d vsw %d vfp %d vbp %d\n",
+ t.hsync_len, t.hfront_porch, t.hback_porch,
+ t.vsync_len, t.vfront_porch, t.vback_porch);
DSSDBG("vsync_level %d hsync_level %d data_pclk_edge %d de_level %d sync_pclk_edge %d\n",
- t.vsync_level, t.hsync_level, t.data_pclk_edge,
- t.de_level, t.sync_pclk_edge);
+ !!(t.flags & DISPLAY_FLAGS_VSYNC_HIGH),
+ !!(t.flags & DISPLAY_FLAGS_HSYNC_HIGH),
+ !!(t.flags & DISPLAY_FLAGS_PIXDATA_POSEDGE),
+ !!(t.flags & DISPLAY_FLAGS_DE_HIGH),
+ !!(t.flags & DISPLAY_FLAGS_SYNC_POSEDGE));
DSSDBG("hsync %luHz, vsync %luHz\n", ht, vt);
} else {
- if (t.interlace)
- t.y_res /= 2;
+ if (t.flags & DISPLAY_FLAGS_INTERLACED)
+ t.vactive /= 2;
if (dispc.feat->supports_double_pixel)
- REG_FLD_MOD(DISPC_CONTROL, t.double_pixel ? 1 : 0,
- 19, 17);
+ REG_FLD_MOD(DISPC_CONTROL,
+ !!(t.flags & DISPLAY_FLAGS_DOUBLECLK),
+ 19, 17);
}
- dispc_mgr_set_size(channel, t.x_res, t.y_res);
+ dispc_mgr_set_size(channel, t.hactive, t.vactive);
}
EXPORT_SYMBOL(dispc_mgr_set_timings);
@@ -4214,23 +4183,20 @@ EXPORT_SYMBOL(dispc_free_irq);
*/
static const struct dispc_errata_i734_data {
- struct omap_video_timings timings;
+ struct videomode vm;
struct omap_overlay_info ovli;
struct omap_overlay_manager_info mgri;
struct dss_lcd_mgr_config lcd_conf;
} i734 = {
- .timings = {
- .x_res = 8, .y_res = 1,
+ .vm = {
+ .hactive = 8, .vactive = 1,
.pixelclock = 16000000,
- .hsw = 8, .hfp = 4, .hbp = 4,
- .vsw = 1, .vfp = 1, .vbp = 1,
- .vsync_level = OMAPDSS_SIG_ACTIVE_LOW,
- .hsync_level = OMAPDSS_SIG_ACTIVE_LOW,
- .interlace = false,
- .data_pclk_edge = OMAPDSS_DRIVE_SIG_RISING_EDGE,
- .de_level = OMAPDSS_SIG_ACTIVE_HIGH,
- .sync_pclk_edge = OMAPDSS_DRIVE_SIG_RISING_EDGE,
- .double_pixel = false,
+ .hsync_len = 8, .hfront_porch = 4, .hback_porch = 4,
+ .vsync_len = 1, .vfront_porch = 1, .vback_porch = 1,
+
+ .flags = DISPLAY_FLAGS_HSYNC_LOW | DISPLAY_FLAGS_VSYNC_LOW |
+ DISPLAY_FLAGS_DE_HIGH | DISPLAY_FLAGS_SYNC_POSEDGE |
+ DISPLAY_FLAGS_PIXDATA_POSEDGE,
},
.ovli = {
.screen_width = 1,
@@ -4320,7 +4286,7 @@ static void dispc_errata_i734_wa(void)
/* Setup and enable GFX plane */
dispc_ovl_set_channel_out(OMAP_DSS_GFX, OMAP_DSS_CHANNEL_LCD);
- dispc_ovl_setup(OMAP_DSS_GFX, &ovli, false, &i734.timings, false);
+ dispc_ovl_setup(OMAP_DSS_GFX, &ovli, false, &i734.vm, false);
dispc_ovl_enable(OMAP_DSS_GFX, true);
/* Set up and enable display manager for LCD1 */
@@ -4328,7 +4294,7 @@ static void dispc_errata_i734_wa(void)
dispc_calc_clock_rates(dss_get_dispc_clk_rate(),
&lcd_conf.clock_info);
dispc_mgr_set_lcd_config(OMAP_DSS_CHANNEL_LCD, &lcd_conf);
- dispc_mgr_set_timings(OMAP_DSS_CHANNEL_LCD, &i734.timings);
+ dispc_mgr_set_timings(OMAP_DSS_CHANNEL_LCD, &i734.vm);
dispc_clear_irqstatus(framedone_irq);
diff --git a/drivers/gpu/drm/omapdrm/dss/display.c b/drivers/gpu/drm/omapdrm/dss/display.c
index 8dcdd7cf9937..425a5a8dff8b 100644
--- a/drivers/gpu/drm/omapdrm/dss/display.c
+++ b/drivers/gpu/drm/omapdrm/dss/display.c
@@ -35,8 +35,8 @@
void omapdss_default_get_resolution(struct omap_dss_device *dssdev,
u16 *xres, u16 *yres)
{
- *xres = dssdev->panel.timings.x_res;
- *yres = dssdev->panel.timings.y_res;
+ *xres = dssdev->panel.vm.hactive;
+ *yres = dssdev->panel.vm.vactive;
}
EXPORT_SYMBOL(omapdss_default_get_resolution);
@@ -72,9 +72,9 @@ int omapdss_default_get_recommended_bpp(struct omap_dss_device *dssdev)
EXPORT_SYMBOL(omapdss_default_get_recommended_bpp);
void omapdss_default_get_timings(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings)
+ struct videomode *vm)
{
- *timings = dssdev->panel.timings;
+ *vm = dssdev->panel.vm;
}
EXPORT_SYMBOL(omapdss_default_get_timings);
@@ -217,73 +217,3 @@ struct omap_dss_device *omap_dss_find_device(void *data,
return NULL;
}
EXPORT_SYMBOL(omap_dss_find_device);
-
-void videomode_to_omap_video_timings(const struct videomode *vm,
- struct omap_video_timings *ovt)
-{
- memset(ovt, 0, sizeof(*ovt));
-
- ovt->pixelclock = vm->pixelclock;
- ovt->x_res = vm->hactive;
- ovt->hbp = vm->hback_porch;
- ovt->hfp = vm->hfront_porch;
- ovt->hsw = vm->hsync_len;
- ovt->y_res = vm->vactive;
- ovt->vbp = vm->vback_porch;
- ovt->vfp = vm->vfront_porch;
- ovt->vsw = vm->vsync_len;
-
- ovt->vsync_level = vm->flags & DISPLAY_FLAGS_VSYNC_HIGH ?
- OMAPDSS_SIG_ACTIVE_HIGH :
- OMAPDSS_SIG_ACTIVE_LOW;
- ovt->hsync_level = vm->flags & DISPLAY_FLAGS_HSYNC_HIGH ?
- OMAPDSS_SIG_ACTIVE_HIGH :
- OMAPDSS_SIG_ACTIVE_LOW;
- ovt->de_level = vm->flags & DISPLAY_FLAGS_DE_HIGH ?
- OMAPDSS_SIG_ACTIVE_HIGH :
- OMAPDSS_SIG_ACTIVE_LOW;
- ovt->data_pclk_edge = vm->flags & DISPLAY_FLAGS_PIXDATA_POSEDGE ?
- OMAPDSS_DRIVE_SIG_RISING_EDGE :
- OMAPDSS_DRIVE_SIG_FALLING_EDGE;
-
- ovt->sync_pclk_edge = ovt->data_pclk_edge;
-}
-EXPORT_SYMBOL(videomode_to_omap_video_timings);
-
-void omap_video_timings_to_videomode(const struct omap_video_timings *ovt,
- struct videomode *vm)
-{
- memset(vm, 0, sizeof(*vm));
-
- vm->pixelclock = ovt->pixelclock;
-
- vm->hactive = ovt->x_res;
- vm->hback_porch = ovt->hbp;
- vm->hfront_porch = ovt->hfp;
- vm->hsync_len = ovt->hsw;
- vm->vactive = ovt->y_res;
- vm->vback_porch = ovt->vbp;
- vm->vfront_porch = ovt->vfp;
- vm->vsync_len = ovt->vsw;
-
- if (ovt->hsync_level == OMAPDSS_SIG_ACTIVE_HIGH)
- vm->flags |= DISPLAY_FLAGS_HSYNC_HIGH;
- else
- vm->flags |= DISPLAY_FLAGS_HSYNC_LOW;
-
- if (ovt->vsync_level == OMAPDSS_SIG_ACTIVE_HIGH)
- vm->flags |= DISPLAY_FLAGS_VSYNC_HIGH;
- else
- vm->flags |= DISPLAY_FLAGS_VSYNC_LOW;
-
- if (ovt->de_level == OMAPDSS_SIG_ACTIVE_HIGH)
- vm->flags |= DISPLAY_FLAGS_DE_HIGH;
- else
- vm->flags |= DISPLAY_FLAGS_DE_LOW;
-
- if (ovt->data_pclk_edge == OMAPDSS_DRIVE_SIG_RISING_EDGE)
- vm->flags |= DISPLAY_FLAGS_PIXDATA_POSEDGE;
- else
- vm->flags |= DISPLAY_FLAGS_PIXDATA_NEGEDGE;
-}
-EXPORT_SYMBOL(omap_video_timings_to_videomode);
diff --git a/drivers/gpu/drm/omapdrm/dss/dpi.c b/drivers/gpu/drm/omapdrm/dss/dpi.c
index b268295b76cf..e75162d26ac0 100644
--- a/drivers/gpu/drm/omapdrm/dss/dpi.c
+++ b/drivers/gpu/drm/omapdrm/dss/dpi.c
@@ -47,7 +47,7 @@ struct dpi_data {
struct mutex lock;
- struct omap_video_timings timings;
+ struct videomode vm;
struct dss_lcd_mgr_config mgr_config;
int data_lines;
@@ -333,31 +333,31 @@ static int dpi_set_mode(struct dpi_data *dpi)
{
struct omap_dss_device *out = &dpi->output;
enum omap_channel channel = out->dispc_channel;
- struct omap_video_timings *t = &dpi->timings;
+ struct videomode *vm = &dpi->vm;
int lck_div = 0, pck_div = 0;
unsigned long fck = 0;
unsigned long pck;
int r = 0;
if (dpi->pll)
- r = dpi_set_pll_clk(dpi, channel, t->pixelclock, &fck,
+ r = dpi_set_pll_clk(dpi, channel, vm->pixelclock, &fck,
&lck_div, &pck_div);
else
- r = dpi_set_dispc_clk(dpi, t->pixelclock, &fck,
+ r = dpi_set_dispc_clk(dpi, vm->pixelclock, &fck,
&lck_div, &pck_div);
if (r)
return r;
pck = fck / lck_div / pck_div;
- if (pck != t->pixelclock) {
- DSSWARN("Could not find exact pixel clock. Requested %d Hz, got %lu Hz\n",
- t->pixelclock, pck);
+ if (pck != vm->pixelclock) {
+ DSSWARN("Could not find exact pixel clock. Requested %lu Hz, got %lu Hz\n",
+ vm->pixelclock, pck);
- t->pixelclock = pck;
+ vm->pixelclock = pck;
}
- dss_mgr_set_timings(channel, t);
+ dss_mgr_set_timings(channel, vm);
return 0;
}
@@ -476,7 +476,7 @@ static void dpi_display_disable(struct omap_dss_device *dssdev)
}
static void dpi_set_timings(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings)
+ struct videomode *vm)
{
struct dpi_data *dpi = dpi_get_data_from_dssdev(dssdev);
@@ -484,25 +484,25 @@ static void dpi_set_timings(struct omap_dss_device *dssdev,
mutex_lock(&dpi->lock);
- dpi->timings = *timings;
+ dpi->vm = *vm;
mutex_unlock(&dpi->lock);
}
static void dpi_get_timings(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings)
+ struct videomode *vm)
{
struct dpi_data *dpi = dpi_get_data_from_dssdev(dssdev);
mutex_lock(&dpi->lock);
- *timings = dpi->timings;
+ *vm = dpi->vm;
mutex_unlock(&dpi->lock);
}
static int dpi_check_timings(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings)
+ struct videomode *vm)
{
struct dpi_data *dpi = dpi_get_data_from_dssdev(dssdev);
enum omap_channel channel = dpi->output.dispc_channel;
@@ -512,23 +512,23 @@ static int dpi_check_timings(struct omap_dss_device *dssdev,
struct dpi_clk_calc_ctx ctx;
bool ok;
- if (timings->x_res % 8 != 0)
+ if (vm->hactive % 8 != 0)
return -EINVAL;
- if (!dispc_mgr_timings_ok(channel, timings))
+ if (!dispc_mgr_timings_ok(channel, vm))
return -EINVAL;
- if (timings->pixelclock == 0)
+ if (vm->pixelclock == 0)
return -EINVAL;
if (dpi->pll) {
- ok = dpi_pll_clk_calc(dpi, timings->pixelclock, &ctx);
+ ok = dpi_pll_clk_calc(dpi, vm->pixelclock, &ctx);
if (!ok)
return -EINVAL;
fck = ctx.pll_cinfo.clkout[ctx.clkout_idx];
} else {
- ok = dpi_dss_clk_calc(timings->pixelclock, &ctx);
+ ok = dpi_dss_clk_calc(vm->pixelclock, &ctx);
if (!ok)
return -EINVAL;
@@ -540,7 +540,7 @@ static int dpi_check_timings(struct omap_dss_device *dssdev,
pck = fck / lck_div / pck_div;
- timings->pixelclock = pck;
+ vm->pixelclock = pck;
return 0;
}
diff --git a/drivers/gpu/drm/omapdrm/dss/dsi.c b/drivers/gpu/drm/omapdrm/dss/dsi.c
index e1be5e795cd8..f060bda31235 100644
--- a/drivers/gpu/drm/omapdrm/dss/dsi.c
+++ b/drivers/gpu/drm/omapdrm/dss/dsi.c
@@ -289,7 +289,7 @@ struct dsi_clk_calc_ctx {
struct dss_pll_clock_info dsi_cinfo;
struct dispc_clock_info dispc_cinfo;
- struct omap_video_timings dispc_vm;
+ struct videomode vm;
struct omap_dss_dsi_videomode_timings dsi_vm;
};
@@ -383,7 +383,7 @@ struct dsi_data {
unsigned scp_clk_refcount;
struct dss_lcd_mgr_config mgr_config;
- struct omap_video_timings timings;
+ struct videomode vm;
enum omap_dss_dsi_pixel_format pix_fmt;
enum omap_dss_dsi_mode mode;
struct omap_dss_dsi_videomode_timings vm_timings;
@@ -3321,12 +3321,12 @@ static void dsi_config_vp_num_line_buffers(struct platform_device *dsidev)
if (dsi->mode == OMAP_DSS_DSI_VIDEO_MODE) {
int bpp = dsi_get_pixel_size(dsi->pix_fmt);
- struct omap_video_timings *timings = &dsi->timings;
+ struct videomode *vm = &dsi->vm;
/*
* Don't use line buffers if width is greater than the video
* port's line buffer size
*/
- if (dsi->line_buffer_size <= timings->x_res * bpp / 8)
+ if (dsi->line_buffer_size <= vm->hactive * bpp / 8)
num_line_buffers = 0;
else
num_line_buffers = 2;
@@ -3453,7 +3453,7 @@ static void dsi_config_cmd_mode_interleaving(struct platform_device *dsidev)
int ddr_clk_pre, ddr_clk_post, enter_hs_mode_lat, exit_hs_mode_lat;
int tclk_trail, ths_exit, exiths_clk;
bool ddr_alwon;
- struct omap_video_timings *timings = &dsi->timings;
+ struct videomode *vm = &dsi->vm;
int bpp = dsi_get_pixel_size(dsi->pix_fmt);
int ndl = dsi->num_lanes_used - 1;
int dsi_fclk_hsdiv = dsi->user_dsi_cinfo.mX[HSDIV_DSI] + 1;
@@ -3494,7 +3494,7 @@ static void dsi_config_cmd_mode_interleaving(struct platform_device *dsidev)
exiths_clk = ths_exit + tclk_trail;
- width_bytes = DIV_ROUND_UP(timings->x_res * bpp, 8);
+ width_bytes = DIV_ROUND_UP(vm->hactive * bpp, 8);
bllp = hbp + hfp + hsa + DIV_ROUND_UP(width_bytes + 6, ndl);
if (!hsa_blanking_mode) {
@@ -3705,7 +3705,7 @@ static void dsi_proto_timings(struct platform_device *dsidev)
int vbp = dsi->vm_timings.vbp;
int window_sync = dsi->vm_timings.window_sync;
bool hsync_end;
- struct omap_video_timings *timings = &dsi->timings;
+ struct videomode *vm = &dsi->vm;
int bpp = dsi_get_pixel_size(dsi->pix_fmt);
int tl, t_he, width_bytes;
@@ -3713,7 +3713,7 @@ static void dsi_proto_timings(struct platform_device *dsidev)
t_he = hsync_end ?
((hsa == 0 && ndl == 3) ? 1 : DIV_ROUND_UP(4, ndl)) : 0;
- width_bytes = DIV_ROUND_UP(timings->x_res * bpp, 8);
+ width_bytes = DIV_ROUND_UP(vm->hactive * bpp, 8);
/* TL = t_HS + HSA + t_HE + HFP + ceil((WC + 6) / NDL) + HBP */
tl = DIV_ROUND_UP(4, ndl) + (hsync_end ? hsa : 0) + t_he + hfp +
@@ -3722,7 +3722,7 @@ static void dsi_proto_timings(struct platform_device *dsidev)
DSSDBG("HBP: %d, HFP: %d, HSA: %d, TL: %d TXBYTECLKHS\n", hbp,
hfp, hsync_end ? hsa : 0, tl);
DSSDBG("VBP: %d, VFP: %d, VSA: %d, VACT: %d lines\n", vbp, vfp,
- vsa, timings->y_res);
+ vsa, vm->vactive);
r = dsi_read_reg(dsidev, DSI_VM_TIMING1);
r = FLD_MOD(r, hbp, 11, 0); /* HBP */
@@ -3738,7 +3738,7 @@ static void dsi_proto_timings(struct platform_device *dsidev)
dsi_write_reg(dsidev, DSI_VM_TIMING2, r);
r = dsi_read_reg(dsidev, DSI_VM_TIMING3);
- r = FLD_MOD(r, timings->y_res, 14, 0); /* VACT */
+ r = FLD_MOD(r, vm->vactive, 14, 0); /* VACT */
r = FLD_MOD(r, tl, 31, 16); /* TL */
dsi_write_reg(dsidev, DSI_VM_TIMING3, r);
}
@@ -3856,7 +3856,7 @@ static int dsi_enable_video_output(struct omap_dss_device *dssdev, int channel)
/* MODE, 1 = video mode */
REG_FLD_MOD(dsidev, DSI_VC_CTRL(channel), 1, 4, 4);
- word_count = DIV_ROUND_UP(dsi->timings.x_res * bpp, 8);
+ word_count = DIV_ROUND_UP(dsi->vm.hactive * bpp, 8);
dsi_vc_write_long_header(dsidev, channel, data_type,
word_count, 0);
@@ -3918,8 +3918,8 @@ static void dsi_update_screen_dispc(struct platform_device *dsidev)
int r;
const unsigned channel = dsi->update_channel;
const unsigned line_buf_size = dsi->line_buffer_size;
- u16 w = dsi->timings.x_res;
- u16 h = dsi->timings.y_res;
+ u16 w = dsi->vm.hactive;
+ u16 h = dsi->vm.vactive;
DSSDBG("dsi_update_screen_dispc(%dx%d)\n", w, h);
@@ -3969,7 +3969,7 @@ static void dsi_update_screen_dispc(struct platform_device *dsidev)
msecs_to_jiffies(250));
BUG_ON(r == 0);
- dss_mgr_set_timings(dispc_channel, &dsi->timings);
+ dss_mgr_set_timings(dispc_channel, &dsi->vm);
dss_mgr_start_update(dispc_channel);
@@ -4056,8 +4056,8 @@ static int dsi_update(struct omap_dss_device *dssdev, int channel,
dsi->framedone_callback = callback;
dsi->framedone_data = data;
- dw = dsi->timings.x_res;
- dh = dsi->timings.y_res;
+ dw = dsi->vm.hactive;
+ dh = dsi->vm.vactive;
#ifdef DSI_PERF_MEASURE
dsi->update_bytes = dw * dh *
@@ -4120,16 +4120,21 @@ static int dsi_display_init_dispc(struct platform_device *dsidev,
/*
* override interlace, logic level and edge related parameters in
- * omap_video_timings with default values
+ * videomode with default values
*/
- dsi->timings.interlace = false;
- dsi->timings.hsync_level = OMAPDSS_SIG_ACTIVE_HIGH;
- dsi->timings.vsync_level = OMAPDSS_SIG_ACTIVE_HIGH;
- dsi->timings.data_pclk_edge = OMAPDSS_DRIVE_SIG_RISING_EDGE;
- dsi->timings.de_level = OMAPDSS_SIG_ACTIVE_HIGH;
- dsi->timings.sync_pclk_edge = OMAPDSS_DRIVE_SIG_FALLING_EDGE;
-
- dss_mgr_set_timings(channel, &dsi->timings);
+ dsi->vm.flags &= ~DISPLAY_FLAGS_INTERLACED;
+ dsi->vm.flags &= ~DISPLAY_FLAGS_HSYNC_LOW;
+ dsi->vm.flags |= DISPLAY_FLAGS_HSYNC_HIGH;
+ dsi->vm.flags &= ~DISPLAY_FLAGS_VSYNC_LOW;
+ dsi->vm.flags |= DISPLAY_FLAGS_VSYNC_HIGH;
+ dsi->vm.flags &= ~DISPLAY_FLAGS_PIXDATA_NEGEDGE;
+ dsi->vm.flags |= DISPLAY_FLAGS_PIXDATA_POSEDGE;
+ dsi->vm.flags &= ~DISPLAY_FLAGS_DE_LOW;
+ dsi->vm.flags |= DISPLAY_FLAGS_DE_HIGH;
+ dsi->vm.flags &= ~DISPLAY_FLAGS_SYNC_POSEDGE;
+ dsi->vm.flags |= DISPLAY_FLAGS_SYNC_NEGEDGE;
+
+ dss_mgr_set_timings(channel, &dsi->vm);
r = dsi_configure_dispc_clocks(dsidev);
if (r)
@@ -4331,7 +4336,7 @@ static void print_dsi_vm(const char *str,
wc = DIV_ROUND_UP(t->hact * t->bitspp, 8);
pps = DIV_ROUND_UP(wc + 6, t->ndl); /* pixel packet size */
- bl = t->hss + t->hsa + t->hse + t->hbp + t->hfp;
+ bl = t->hss + t->hsa + t->hse + t->hbp + t->hfront_porch;
tot = bl + pps;
#define TO_DSI_T(x) ((u32)div64_u64((u64)x * 1000000000llu, byteclk))
@@ -4340,14 +4345,14 @@ static void print_dsi_vm(const char *str,
"%u/%u/%u/%u/%u/%u = %u + %u = %u\n",
str,
byteclk,
- t->hss, t->hsa, t->hse, t->hbp, pps, t->hfp,
+ t->hss, t->hsa, t->hse, t->hbp, pps, t->hfront_porch,
bl, pps, tot,
TO_DSI_T(t->hss),
TO_DSI_T(t->hsa),
TO_DSI_T(t->hse),
TO_DSI_T(t->hbp),
TO_DSI_T(pps),
- TO_DSI_T(t->hfp),
+ TO_DSI_T(t->hfront_porch),
TO_DSI_T(bl),
TO_DSI_T(pps),
@@ -4356,13 +4361,13 @@ static void print_dsi_vm(const char *str,
#undef TO_DSI_T
}
-static void print_dispc_vm(const char *str, const struct omap_video_timings *t)
+static void print_dispc_vm(const char *str, const struct videomode *vm)
{
- unsigned long pck = t->pixelclock;
+ unsigned long pck = vm->pixelclock;
int hact, bl, tot;
- hact = t->x_res;
- bl = t->hsw + t->hbp + t->hfp;
+ hact = vm->hactive;
+ bl = vm->hsync_len + vm->hbp + vm->hfront_porch;
tot = hact + bl;
#define TO_DISPC_T(x) ((u32)div64_u64((u64)x * 1000000000llu, pck))
@@ -4371,12 +4376,12 @@ static void print_dispc_vm(const char *str, const struct omap_video_timings *t)
"%u/%u/%u/%u = %u + %u = %u\n",
str,
pck,
- t->hsw, t->hbp, hact, t->hfp,
+ vm->hsync_len, vm->hbp, hact, vm->hfront_porch,
bl, hact, tot,
- TO_DISPC_T(t->hsw),
- TO_DISPC_T(t->hbp),
+ TO_DISPC_T(vm->hsync_len),
+ TO_DISPC_T(vm->hbp),
TO_DISPC_T(hact),
- TO_DISPC_T(t->hfp),
+ TO_DISPC_T(vm->hfront_porch),
TO_DISPC_T(bl),
TO_DISPC_T(hact),
TO_DISPC_T(tot));
@@ -4387,7 +4392,7 @@ static void print_dispc_vm(const char *str, const struct omap_video_timings *t)
static void print_dsi_dispc_vm(const char *str,
const struct omap_dss_dsi_videomode_timings *t)
{
- struct omap_video_timings vm = { 0 };
+ struct videomode vm = { 0 };
unsigned long byteclk = t->hsclk / 4;
unsigned long pck;
u64 dsi_tput;
@@ -4396,13 +4401,13 @@ static void print_dsi_dispc_vm(const char *str,
dsi_tput = (u64)byteclk * t->ndl * 8;
pck = (u32)div64_u64(dsi_tput, t->bitspp);
dsi_hact = DIV_ROUND_UP(DIV_ROUND_UP(t->hact * t->bitspp, 8) + 6, t->ndl);
- dsi_htot = t->hss + t->hsa + t->hse + t->hbp + dsi_hact + t->hfp;
+ dsi_htot = t->hss + t->hsa + t->hse + t->hbp + dsi_hact + t->hfront_porch;
vm.pixelclock = pck;
- vm.hsw = div64_u64((u64)(t->hsa + t->hse) * pck, byteclk);
+ vm.hsync_len = div64_u64((u64)(t->hsa + t->hse) * pck, byteclk);
vm.hbp = div64_u64((u64)t->hbp * pck, byteclk);
- vm.hfp = div64_u64((u64)t->hfp * pck, byteclk);
- vm.x_res = t->hact;
+ vm.hfront_porch = div64_u64((u64)t->hfront_porch * pck, byteclk);
+ vm.hactive = t->hact;
print_dispc_vm(str, &vm);
}
@@ -4412,19 +4417,19 @@ static bool dsi_cm_calc_dispc_cb(int lckd, int pckd, unsigned long lck,
unsigned long pck, void *data)
{
struct dsi_clk_calc_ctx *ctx = data;
- struct omap_video_timings *t = &ctx->dispc_vm;
+ struct videomode *vm = &ctx->vm;
ctx->dispc_cinfo.lck_div = lckd;
ctx->dispc_cinfo.pck_div = pckd;
ctx->dispc_cinfo.lck = lck;
ctx->dispc_cinfo.pck = pck;
- *t = *ctx->config->timings;
- t->pixelclock = pck;
- t->x_res = ctx->config->timings->x_res;
- t->y_res = ctx->config->timings->y_res;
- t->hsw = t->hfp = t->hbp = t->vsw = 1;
- t->vfp = t->vbp = 0;
+ *vm = *ctx->config->vm;
+ vm->pixelclock = pck;
+ vm->hactive = ctx->config->vm->hactive;
+ vm->vactive = ctx->config->vm->vactive;
+ vm->hsync_len = vm->hfront_porch = vm->hback_porch = vm->vsync_len = 1;
+ vm->vfront_porch = vm->vback_porch = 0;
return true;
}
@@ -4475,7 +4480,7 @@ static bool dsi_cm_calc(struct dsi_data *dsi,
* especially as we go to LP between each pixel packet due to HW
* "feature". So let's just estimate very roughly and multiply by 1.5.
*/
- pck = cfg->timings->pixelclock;
+ pck = cfg->vm->pixelclock;
pck = pck * 3 / 2;
txbyteclk = pck * bitspp / 8 / ndl;
@@ -4510,14 +4515,14 @@ static bool dsi_vm_calc_blanking(struct dsi_clk_calc_ctx *ctx)
int dispc_htot, dispc_hbl; /* pixels */
int dsi_htot, dsi_hact, dsi_hbl, hss, hse; /* byteclks */
int hfp, hsa, hbp;
- const struct omap_video_timings *req_vm;
- struct omap_video_timings *dispc_vm;
+ const struct videomode *req_vm;
+ struct videomode *dispc_vm;
struct omap_dss_dsi_videomode_timings *dsi_vm;
u64 dsi_tput, dispc_tput;
dsi_tput = (u64)byteclk * ndl * 8;
- req_vm = cfg->timings;
+ req_vm = cfg->vm;
req_pck_min = ctx->req_pck_min;
req_pck_max = ctx->req_pck_max;
req_pck_nom = ctx->req_pck_nom;
@@ -4525,9 +4530,10 @@ static bool dsi_vm_calc_blanking(struct dsi_clk_calc_ctx *ctx)
dispc_pck = ctx->dispc_cinfo.pck;
dispc_tput = (u64)dispc_pck * bitspp;
- xres = req_vm->x_res;
+ xres = req_vm->hactive;
- panel_hbl = req_vm->hfp + req_vm->hbp + req_vm->hsw;
+ panel_hbl = req_vm->hfront_porch + req_vm->hback_porch +
+ req_vm->hsync_len;
panel_htot = xres + panel_hbl;
dsi_hact = DIV_ROUND_UP(DIV_ROUND_UP(xres * bitspp, 8) + 6, ndl);
@@ -4557,7 +4563,7 @@ static bool dsi_vm_calc_blanking(struct dsi_clk_calc_ctx *ctx)
hss = DIV_ROUND_UP(4, ndl);
if (cfg->trans_mode == OMAP_DSS_DSI_PULSE_MODE) {
- if (ndl == 3 && req_vm->hsw == 0)
+ if (ndl == 3 && req_vm->hsync_len == 0)
hse = 1;
else
hse = DIV_ROUND_UP(4, ndl);
@@ -4596,14 +4602,14 @@ static bool dsi_vm_calc_blanking(struct dsi_clk_calc_ctx *ctx)
if (cfg->trans_mode != OMAP_DSS_DSI_PULSE_MODE) {
hsa = 0;
- } else if (ndl == 3 && req_vm->hsw == 0) {
+ } else if (ndl == 3 && req_vm->hsync_len == 0) {
hsa = 0;
} else {
- hsa = div64_u64((u64)req_vm->hsw * byteclk, req_pck_nom);
+ hsa = div64_u64((u64)req_vm->hsync_len * byteclk, req_pck_nom);
hsa = max(hsa - hse, 1);
}
- hbp = div64_u64((u64)req_vm->hbp * byteclk, req_pck_nom);
+ hbp = div64_u64((u64)req_vm->hback_porch * byteclk, req_pck_nom);
hbp = max(hbp, 1);
hfp = dsi_hbl - (hss + hsa + hse + hbp);
@@ -4633,10 +4639,10 @@ static bool dsi_vm_calc_blanking(struct dsi_clk_calc_ctx *ctx)
dsi_vm->hact = xres;
dsi_vm->hfp = hfp;
- dsi_vm->vsa = req_vm->vsw;
- dsi_vm->vbp = req_vm->vbp;
- dsi_vm->vact = req_vm->y_res;
- dsi_vm->vfp = req_vm->vfp;
+ dsi_vm->vsa = req_vm->vsync_len;
+ dsi_vm->vbp = req_vm->vback_porch;
+ dsi_vm->vact = req_vm->vactive;
+ dsi_vm->vfp = req_vm->vfront_porch;
dsi_vm->trans_mode = cfg->trans_mode;
@@ -4650,19 +4656,19 @@ static bool dsi_vm_calc_blanking(struct dsi_clk_calc_ctx *ctx)
/* setup DISPC videomode */
- dispc_vm = &ctx->dispc_vm;
+ dispc_vm = &ctx->vm;
*dispc_vm = *req_vm;
dispc_vm->pixelclock = dispc_pck;
if (cfg->trans_mode == OMAP_DSS_DSI_PULSE_MODE) {
- hsa = div64_u64((u64)req_vm->hsw * dispc_pck,
+ hsa = div64_u64((u64)req_vm->hsync_len * dispc_pck,
req_pck_nom);
hsa = max(hsa, 1);
} else {
hsa = 1;
}
- hbp = div64_u64((u64)req_vm->hbp * dispc_pck, req_pck_nom);
+ hbp = div64_u64((u64)req_vm->hback_porch * dispc_pck, req_pck_nom);
hbp = max(hbp, 1);
hfp = dispc_hbl - hsa - hbp;
@@ -4685,9 +4691,9 @@ static bool dsi_vm_calc_blanking(struct dsi_clk_calc_ctx *ctx)
if (hfp < 1)
return false;
- dispc_vm->hfp = hfp;
- dispc_vm->hsw = hsa;
- dispc_vm->hbp = hbp;
+ dispc_vm->hfront_porch = hfp;
+ dispc_vm->hsync_len = hsa;
+ dispc_vm->hback_porch = hbp;
return true;
}
@@ -4707,9 +4713,9 @@ static bool dsi_vm_calc_dispc_cb(int lckd, int pckd, unsigned long lck,
return false;
#ifdef PRINT_VERBOSE_VM_TIMINGS
- print_dispc_vm("dispc", &ctx->dispc_vm);
+ print_dispc_vm("dispc", &ctx->vm);
print_dsi_vm("dsi ", &ctx->dsi_vm);
- print_dispc_vm("req ", ctx->config->timings);
+ print_dispc_vm("req ", ctx->config->vm);
print_dsi_dispc_vm("act ", &ctx->dsi_vm);
#endif
@@ -4758,7 +4764,7 @@ static bool dsi_vm_calc(struct dsi_data *dsi,
const struct omap_dss_dsi_config *cfg,
struct dsi_clk_calc_ctx *ctx)
{
- const struct omap_video_timings *t = cfg->timings;
+ const struct videomode *vm = cfg->vm;
unsigned long clkin;
unsigned long pll_min;
unsigned long pll_max;
@@ -4774,9 +4780,9 @@ static bool dsi_vm_calc(struct dsi_data *dsi,
ctx->config = cfg;
/* these limits should come from the panel driver */
- ctx->req_pck_min = t->pixelclock - 1000;
- ctx->req_pck_nom = t->pixelclock;
- ctx->req_pck_max = t->pixelclock + 1000;
+ ctx->req_pck_min = vm->pixelclock - 1000;
+ ctx->req_pck_nom = vm->pixelclock;
+ ctx->req_pck_max = vm->pixelclock + 1000;
byteclk_min = div64_u64((u64)ctx->req_pck_min * bitspp, ndl * 8);
pll_min = max(cfg->hs_clk_min * 4, byteclk_min * 4 * 4);
@@ -4833,7 +4839,7 @@ static int dsi_set_config(struct omap_dss_device *dssdev,
dsi->user_dsi_cinfo = ctx.dsi_cinfo;
dsi->user_dispc_cinfo = ctx.dispc_cinfo;
- dsi->timings = ctx.dispc_vm;
+ dsi->vm = ctx.vm;
dsi->vm_timings = ctx.dsi_vm;
mutex_unlock(&dsi->lock);
@@ -5342,7 +5348,7 @@ static int dsi_bind(struct device *dev, struct device *master, void *data)
dsi->phy_base = devm_ioremap(&dsidev->dev, res->start,
resource_size(res));
- if (!dsi->proto_base) {
+ if (!dsi->phy_base) {
DSSERR("can't ioremap DSI PHY\n");
return -ENOMEM;
}
@@ -5362,7 +5368,7 @@ static int dsi_bind(struct device *dev, struct device *master, void *data)
dsi->pll_base = devm_ioremap(&dsidev->dev, res->start,
resource_size(res));
- if (!dsi->proto_base) {
+ if (!dsi->pll_base) {
DSSERR("can't ioremap DSI PLL\n");
return -ENOMEM;
}
diff --git a/drivers/gpu/drm/omapdrm/dss/dss.h b/drivers/gpu/drm/omapdrm/dss/dss.h
index 4fd06dc41cb3..56493b290731 100644
--- a/drivers/gpu/drm/omapdrm/dss/dss.h
+++ b/drivers/gpu/drm/omapdrm/dss/dss.h
@@ -366,8 +366,7 @@ bool dispc_div_calc(unsigned long dispc,
unsigned long pck_min, unsigned long pck_max,
dispc_div_calc_func func, void *data);
-bool dispc_mgr_timings_ok(enum omap_channel channel,
- const struct omap_video_timings *timings);
+bool dispc_mgr_timings_ok(enum omap_channel channel, const struct videomode *vm);
int dispc_calc_clock_rates(unsigned long dispc_fclk_rate,
struct dispc_clock_info *cinfo);
@@ -390,7 +389,7 @@ void dispc_wb_enable(bool enable);
bool dispc_wb_is_enabled(void);
void dispc_wb_set_channel_in(enum dss_writeback_channel channel);
int dispc_wb_setup(const struct omap_dss_writeback_info *wi,
- bool mem_to_mem, const struct omap_video_timings *timings);
+ bool mem_to_mem, const struct videomode *vm);
/* VENC */
int venc_init_platform_driver(void) __init;
diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi.h b/drivers/gpu/drm/omapdrm/dss/hdmi.h
index 63e711545865..fb6cccd02374 100644
--- a/drivers/gpu/drm/omapdrm/dss/hdmi.h
+++ b/drivers/gpu/drm/omapdrm/dss/hdmi.h
@@ -181,7 +181,7 @@ struct hdmi_video_format {
};
struct hdmi_config {
- struct omap_video_timings timings;
+ struct videomode vm;
struct hdmi_avi_infoframe infoframe;
enum hdmi_core_hdmi_dvi hdmi_dvi_mode;
};
@@ -298,11 +298,11 @@ int hdmi_wp_set_pll_pwr(struct hdmi_wp_data *wp, enum hdmi_pll_pwr val);
void hdmi_wp_video_config_format(struct hdmi_wp_data *wp,
struct hdmi_video_format *video_fmt);
void hdmi_wp_video_config_interface(struct hdmi_wp_data *wp,
- struct omap_video_timings *timings);
+ struct videomode *vm);
void hdmi_wp_video_config_timing(struct hdmi_wp_data *wp,
- struct omap_video_timings *timings);
+ struct videomode *vm);
void hdmi_wp_init_vid_fmt_timings(struct hdmi_video_format *video_fmt,
- struct omap_video_timings *timings, struct hdmi_config *param);
+ struct videomode *vm, struct hdmi_config *param);
int hdmi_wp_init(struct platform_device *pdev, struct hdmi_wp_data *wp);
phys_addr_t hdmi_wp_get_audio_dma_addr(struct hdmi_wp_data *wp);
diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi4.c b/drivers/gpu/drm/omapdrm/dss/hdmi4.c
index cbd28dfdb86a..e7162c16de2e 100644
--- a/drivers/gpu/drm/omapdrm/dss/hdmi4.c
+++ b/drivers/gpu/drm/omapdrm/dss/hdmi4.c
@@ -155,7 +155,7 @@ static void hdmi_power_off_core(struct omap_dss_device *dssdev)
static int hdmi_power_on_full(struct omap_dss_device *dssdev)
{
int r;
- struct omap_video_timings *p;
+ struct videomode *vm;
enum omap_channel channel = dssdev->dispc_channel;
struct hdmi_wp_data *wp = &hdmi.wp;
struct dss_pll_clock_info hdmi_cinfo = { 0 };
@@ -169,12 +169,13 @@ static int hdmi_power_on_full(struct omap_dss_device *dssdev)
hdmi_wp_clear_irqenable(wp, 0xffffffff);
hdmi_wp_set_irqstatus(wp, 0xffffffff);
- p = &hdmi.cfg.timings;
+ vm = &hdmi.cfg.vm;
- DSSDBG("hdmi_power_on x_res= %d y_res = %d\n", p->x_res, p->y_res);
+ DSSDBG("hdmi_power_on hactive= %d vactive = %d\n", vm->hactive,
+ vm->vactive);
- pc = p->pixelclock;
- if (p->double_pixel)
+ pc = vm->pixelclock;
+ if (vm->flags & DISPLAY_FLAGS_DOUBLECLK)
pc *= 2;
/* DSS_HDMI_TCLK is bitclk / 10 */
@@ -209,7 +210,7 @@ static int hdmi_power_on_full(struct omap_dss_device *dssdev)
hdmi4_configure(&hdmi.core, &hdmi.wp, &hdmi.cfg);
/* tv size */
- dss_mgr_set_timings(channel, p);
+ dss_mgr_set_timings(channel, vm);
r = dss_mgr_enable(channel);
if (r)
@@ -255,30 +256,30 @@ static void hdmi_power_off_full(struct omap_dss_device *dssdev)
}
static int hdmi_display_check_timing(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings)
+ struct videomode *vm)
{
- if (!dispc_mgr_timings_ok(dssdev->dispc_channel, timings))
+ if (!dispc_mgr_timings_ok(dssdev->dispc_channel, vm))
return -EINVAL;
return 0;
}
static void hdmi_display_set_timing(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings)
+ struct videomode *vm)
{
mutex_lock(&hdmi.lock);
- hdmi.cfg.timings = *timings;
+ hdmi.cfg.vm = *vm;
- dispc_set_tv_pclk(timings->pixelclock);
+ dispc_set_tv_pclk(vm->pixelclock);
mutex_unlock(&hdmi.lock);
}
static void hdmi_display_get_timings(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings)
+ struct videomode *vm)
{
- *timings = hdmi.cfg.timings;
+ *vm = hdmi.cfg.vm;
}
static void hdmi_dump_regs(struct seq_file *s)
@@ -352,7 +353,7 @@ static int hdmi_display_enable(struct omap_dss_device *dssdev)
if (hdmi.audio_configured) {
r = hdmi4_audio_config(&hdmi.core, &hdmi.wp, &hdmi.audio_config,
- hdmi.cfg.timings.pixelclock);
+ hdmi.cfg.vm.pixelclock);
if (r) {
DSSERR("Error restoring audio configuration: %d", r);
hdmi.audio_abort_cb(&hdmi.pdev->dev);
@@ -643,7 +644,7 @@ static int hdmi_audio_config(struct device *dev,
}
ret = hdmi4_audio_config(&hd->core, &hd->wp, dss_audio,
- hd->cfg.timings.pixelclock);
+ hd->cfg.vm.pixelclock);
if (!ret) {
hd->audio_configured = true;
hd->audio_config = *dss_audio;
diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi4_core.c b/drivers/gpu/drm/omapdrm/dss/hdmi4_core.c
index ef3afe99e487..e05b7ac4f7dd 100644
--- a/drivers/gpu/drm/omapdrm/dss/hdmi4_core.c
+++ b/drivers/gpu/drm/omapdrm/dss/hdmi4_core.c
@@ -310,7 +310,7 @@ void hdmi4_configure(struct hdmi_core_data *core,
struct hdmi_wp_data *wp, struct hdmi_config *cfg)
{
/* HDMI */
- struct omap_video_timings video_timing;
+ struct videomode vm;
struct hdmi_video_format video_format;
/* HDMI core */
struct hdmi_core_video_config v_core_cfg;
@@ -318,16 +318,16 @@ void hdmi4_configure(struct hdmi_core_data *core,
hdmi_core_init(&v_core_cfg);
- hdmi_wp_init_vid_fmt_timings(&video_format, &video_timing, cfg);
+ hdmi_wp_init_vid_fmt_timings(&video_format, &vm, cfg);
- hdmi_wp_video_config_timing(wp, &video_timing);
+ hdmi_wp_video_config_timing(wp, &vm);
/* video config */
video_format.packing_mode = HDMI_PACK_24b_RGB_YUV444_YUV422;
hdmi_wp_video_config_format(wp, &video_format);
- hdmi_wp_video_config_interface(wp, &video_timing);
+ hdmi_wp_video_config_interface(wp, &vm);
/*
* configure core video part
diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi5.c b/drivers/gpu/drm/omapdrm/dss/hdmi5.c
index 0c0a5139a301..678dfb02764a 100644
--- a/drivers/gpu/drm/omapdrm/dss/hdmi5.c
+++ b/drivers/gpu/drm/omapdrm/dss/hdmi5.c
@@ -172,7 +172,7 @@ static void hdmi_power_off_core(struct omap_dss_device *dssdev)
static int hdmi_power_on_full(struct omap_dss_device *dssdev)
{
int r;
- struct omap_video_timings *p;
+ struct videomode *vm;
enum omap_channel channel = dssdev->dispc_channel;
struct dss_pll_clock_info hdmi_cinfo = { 0 };
unsigned pc;
@@ -181,12 +181,13 @@ static int hdmi_power_on_full(struct omap_dss_device *dssdev)
if (r)
return r;
- p = &hdmi.cfg.timings;
+ vm = &hdmi.cfg.vm;
- DSSDBG("hdmi_power_on x_res= %d y_res = %d\n", p->x_res, p->y_res);
+ DSSDBG("hdmi_power_on hactive= %d vactive = %d\n", vm->hactive,
+ vm->vactive);
- pc = p->pixelclock;
- if (p->double_pixel)
+ pc = vm->pixelclock;
+ if (vm->flags & DISPLAY_FLAGS_DOUBLECLK)
pc *= 2;
/* DSS_HDMI_TCLK is bitclk / 10 */
@@ -226,7 +227,7 @@ static int hdmi_power_on_full(struct omap_dss_device *dssdev)
hdmi5_configure(&hdmi.core, &hdmi.wp, &hdmi.cfg);
/* tv size */
- dss_mgr_set_timings(channel, p);
+ dss_mgr_set_timings(channel, vm);
r = dss_mgr_enable(channel);
if (r)
@@ -272,30 +273,30 @@ static void hdmi_power_off_full(struct omap_dss_device *dssdev)
}
static int hdmi_display_check_timing(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings)
+ struct videomode *vm)
{
- if (!dispc_mgr_timings_ok(dssdev->dispc_channel, timings))
+ if (!dispc_mgr_timings_ok(dssdev->dispc_channel, vm))
return -EINVAL;
return 0;
}
static void hdmi_display_set_timing(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings)
+ struct videomode *vm)
{
mutex_lock(&hdmi.lock);
- hdmi.cfg.timings = *timings;
+ hdmi.cfg.vm = *vm;
- dispc_set_tv_pclk(timings->pixelclock);
+ dispc_set_tv_pclk(vm->pixelclock);
mutex_unlock(&hdmi.lock);
}
static void hdmi_display_get_timings(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings)
+ struct videomode *vm)
{
- *timings = hdmi.cfg.timings;
+ *vm = hdmi.cfg.vm;
}
static void hdmi_dump_regs(struct seq_file *s)
@@ -378,7 +379,7 @@ static int hdmi_display_enable(struct omap_dss_device *dssdev)
if (hdmi.audio_configured) {
r = hdmi5_audio_config(&hdmi.core, &hdmi.wp, &hdmi.audio_config,
- hdmi.cfg.timings.pixelclock);
+ hdmi.cfg.vm.pixelclock);
if (r) {
DSSERR("Error restoring audio configuration: %d", r);
hdmi.audio_abort_cb(&hdmi.pdev->dev);
@@ -669,7 +670,7 @@ static int hdmi_audio_config(struct device *dev,
}
ret = hdmi5_audio_config(&hd->core, &hd->wp, dss_audio,
- hd->cfg.timings.pixelclock);
+ hd->cfg.vm.pixelclock);
if (!ret) {
hd->audio_configured = true;
diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi5_core.c b/drivers/gpu/drm/omapdrm/dss/hdmi5_core.c
index 8ab2093daa12..8de1d7b2ae55 100644
--- a/drivers/gpu/drm/omapdrm/dss/hdmi5_core.c
+++ b/drivers/gpu/drm/omapdrm/dss/hdmi5_core.c
@@ -292,35 +292,35 @@ static void hdmi_core_init(struct hdmi_core_vid_config *video_cfg,
{
DSSDBG("hdmi_core_init\n");
- video_cfg->v_fc_config.timings = cfg->timings;
+ video_cfg->v_fc_config.vm = cfg->vm;
/* video core */
video_cfg->data_enable_pol = 1; /* It is always 1*/
- video_cfg->hblank = cfg->timings.hfp +
- cfg->timings.hbp + cfg->timings.hsw;
+ video_cfg->hblank = cfg->vm.hfront_porch +
+ cfg->vm.hback_porch + cfg->vm.hsync_len;
video_cfg->vblank_osc = 0;
- video_cfg->vblank = cfg->timings.vsw +
- cfg->timings.vfp + cfg->timings.vbp;
+ video_cfg->vblank = cfg->vm.vsync_len + cfg->vm.vfront_porch +
+ cfg->vm.vback_porch;
video_cfg->v_fc_config.hdmi_dvi_mode = cfg->hdmi_dvi_mode;
- if (cfg->timings.interlace) {
+ if (cfg->vm.flags & DISPLAY_FLAGS_INTERLACED) {
/* set vblank_osc if vblank is fractional */
if (video_cfg->vblank % 2 != 0)
video_cfg->vblank_osc = 1;
- video_cfg->v_fc_config.timings.y_res /= 2;
+ video_cfg->v_fc_config.vm.vactive /= 2;
video_cfg->vblank /= 2;
- video_cfg->v_fc_config.timings.vfp /= 2;
- video_cfg->v_fc_config.timings.vsw /= 2;
- video_cfg->v_fc_config.timings.vbp /= 2;
+ video_cfg->v_fc_config.vm.vfront_porch /= 2;
+ video_cfg->v_fc_config.vm.vsync_len /= 2;
+ video_cfg->v_fc_config.vm.vback_porch /= 2;
}
- if (cfg->timings.double_pixel) {
- video_cfg->v_fc_config.timings.x_res *= 2;
+ if (cfg->vm.flags & DISPLAY_FLAGS_DOUBLECLK) {
+ video_cfg->v_fc_config.vm.hactive *= 2;
video_cfg->hblank *= 2;
- video_cfg->v_fc_config.timings.hfp *= 2;
- video_cfg->v_fc_config.timings.hsw *= 2;
- video_cfg->v_fc_config.timings.hbp *= 2;
+ video_cfg->v_fc_config.vm.hfront_porch *= 2;
+ video_cfg->v_fc_config.vm.hsync_len *= 2;
+ video_cfg->v_fc_config.vm.hback_porch *= 2;
}
}
@@ -329,13 +329,12 @@ static void hdmi_core_video_config(struct hdmi_core_data *core,
struct hdmi_core_vid_config *cfg)
{
void __iomem *base = core->base;
+ struct videomode *vm = &cfg->v_fc_config.vm;
unsigned char r = 0;
bool vsync_pol, hsync_pol;
- vsync_pol =
- cfg->v_fc_config.timings.vsync_level == OMAPDSS_SIG_ACTIVE_HIGH;
- hsync_pol =
- cfg->v_fc_config.timings.hsync_level == OMAPDSS_SIG_ACTIVE_HIGH;
+ vsync_pol = !!(vm->flags & DISPLAY_FLAGS_VSYNC_HIGH);
+ hsync_pol = !!(vm->flags & DISPLAY_FLAGS_HSYNC_HIGH);
/* Set hsync, vsync and data-enable polarity */
r = hdmi_read_reg(base, HDMI_CORE_FC_INVIDCONF);
@@ -343,20 +342,16 @@ static void hdmi_core_video_config(struct hdmi_core_data *core,
r = FLD_MOD(r, hsync_pol, 5, 5);
r = FLD_MOD(r, cfg->data_enable_pol, 4, 4);
r = FLD_MOD(r, cfg->vblank_osc, 1, 1);
- r = FLD_MOD(r, cfg->v_fc_config.timings.interlace, 0, 0);
+ r = FLD_MOD(r, !!(vm->flags & DISPLAY_FLAGS_INTERLACED), 0, 0);
hdmi_write_reg(base, HDMI_CORE_FC_INVIDCONF, r);
/* set x resolution */
- REG_FLD_MOD(base, HDMI_CORE_FC_INHACTIV1,
- cfg->v_fc_config.timings.x_res >> 8, 4, 0);
- REG_FLD_MOD(base, HDMI_CORE_FC_INHACTIV0,
- cfg->v_fc_config.timings.x_res & 0xFF, 7, 0);
+ REG_FLD_MOD(base, HDMI_CORE_FC_INHACTIV1, vm->hactive >> 8, 4, 0);
+ REG_FLD_MOD(base, HDMI_CORE_FC_INHACTIV0, vm->hactive & 0xFF, 7, 0);
/* set y resolution */
- REG_FLD_MOD(base, HDMI_CORE_FC_INVACTIV1,
- cfg->v_fc_config.timings.y_res >> 8, 4, 0);
- REG_FLD_MOD(base, HDMI_CORE_FC_INVACTIV0,
- cfg->v_fc_config.timings.y_res & 0xFF, 7, 0);
+ REG_FLD_MOD(base, HDMI_CORE_FC_INVACTIV1, vm->vactive >> 8, 4, 0);
+ REG_FLD_MOD(base, HDMI_CORE_FC_INVACTIV0, vm->vactive & 0xFF, 7, 0);
/* set horizontal blanking pixels */
REG_FLD_MOD(base, HDMI_CORE_FC_INHBLANK1, cfg->hblank >> 8, 4, 0);
@@ -366,30 +361,28 @@ static void hdmi_core_video_config(struct hdmi_core_data *core,
REG_FLD_MOD(base, HDMI_CORE_FC_INVBLANK, cfg->vblank, 7, 0);
/* set horizontal sync offset */
- REG_FLD_MOD(base, HDMI_CORE_FC_HSYNCINDELAY1,
- cfg->v_fc_config.timings.hfp >> 8, 4, 0);
- REG_FLD_MOD(base, HDMI_CORE_FC_HSYNCINDELAY0,
- cfg->v_fc_config.timings.hfp & 0xFF, 7, 0);
+ REG_FLD_MOD(base, HDMI_CORE_FC_HSYNCINDELAY1, vm->hfront_porch >> 8,
+ 4, 0);
+ REG_FLD_MOD(base, HDMI_CORE_FC_HSYNCINDELAY0, vm->hfront_porch & 0xFF,
+ 7, 0);
/* set vertical sync offset */
- REG_FLD_MOD(base, HDMI_CORE_FC_VSYNCINDELAY,
- cfg->v_fc_config.timings.vfp, 7, 0);
+ REG_FLD_MOD(base, HDMI_CORE_FC_VSYNCINDELAY, vm->vfront_porch, 7, 0);
/* set horizontal sync pulse width */
- REG_FLD_MOD(base, HDMI_CORE_FC_HSYNCINWIDTH1,
- (cfg->v_fc_config.timings.hsw >> 8), 1, 0);
- REG_FLD_MOD(base, HDMI_CORE_FC_HSYNCINWIDTH0,
- cfg->v_fc_config.timings.hsw & 0xFF, 7, 0);
+ REG_FLD_MOD(base, HDMI_CORE_FC_HSYNCINWIDTH1, (vm->hsync_len >> 8),
+ 1, 0);
+ REG_FLD_MOD(base, HDMI_CORE_FC_HSYNCINWIDTH0, vm->hsync_len & 0xFF,
+ 7, 0);
/* set vertical sync pulse width */
- REG_FLD_MOD(base, HDMI_CORE_FC_VSYNCINWIDTH,
- cfg->v_fc_config.timings.vsw, 5, 0);
+ REG_FLD_MOD(base, HDMI_CORE_FC_VSYNCINWIDTH, vm->vsync_len, 5, 0);
/* select DVI mode */
REG_FLD_MOD(base, HDMI_CORE_FC_INVIDCONF,
- cfg->v_fc_config.hdmi_dvi_mode, 3, 3);
+ cfg->v_fc_config.hdmi_dvi_mode, 3, 3);
- if (cfg->v_fc_config.timings.double_pixel)
+ if (vm->flags & DISPLAY_FLAGS_DOUBLECLK)
REG_FLD_MOD(base, HDMI_CORE_FC_PRCONF, 2, 7, 4);
else
REG_FLD_MOD(base, HDMI_CORE_FC_PRCONF, 1, 7, 4);
@@ -616,7 +609,7 @@ int hdmi5_core_handle_irqs(struct hdmi_core_data *core)
void hdmi5_configure(struct hdmi_core_data *core, struct hdmi_wp_data *wp,
struct hdmi_config *cfg)
{
- struct omap_video_timings video_timing;
+ struct videomode vm;
struct hdmi_video_format video_format;
struct hdmi_core_vid_config v_core_cfg;
@@ -624,16 +617,16 @@ void hdmi5_configure(struct hdmi_core_data *core, struct hdmi_wp_data *wp,
hdmi_core_init(&v_core_cfg, cfg);
- hdmi_wp_init_vid_fmt_timings(&video_format, &video_timing, cfg);
+ hdmi_wp_init_vid_fmt_timings(&video_format, &vm, cfg);
- hdmi_wp_video_config_timing(wp, &video_timing);
+ hdmi_wp_video_config_timing(wp, &vm);
/* video config */
video_format.packing_mode = HDMI_PACK_24b_RGB_YUV444_YUV422;
hdmi_wp_video_config_format(wp, &video_format);
- hdmi_wp_video_config_interface(wp, &video_timing);
+ hdmi_wp_video_config_interface(wp, &vm);
/* support limited range with 24 bit color depth for now */
hdmi_core_configure_range(core);
diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi_wp.c b/drivers/gpu/drm/omapdrm/dss/hdmi_wp.c
index 203694a52d18..b783d5a0750e 100644
--- a/drivers/gpu/drm/omapdrm/dss/hdmi_wp.c
+++ b/drivers/gpu/drm/omapdrm/dss/hdmi_wp.c
@@ -144,87 +144,84 @@ void hdmi_wp_video_config_format(struct hdmi_wp_data *wp,
}
void hdmi_wp_video_config_interface(struct hdmi_wp_data *wp,
- struct omap_video_timings *timings)
+ struct videomode *vm)
{
u32 r;
bool vsync_pol, hsync_pol;
DSSDBG("Enter hdmi_wp_video_config_interface\n");
- vsync_pol = timings->vsync_level == OMAPDSS_SIG_ACTIVE_HIGH;
- hsync_pol = timings->hsync_level == OMAPDSS_SIG_ACTIVE_HIGH;
+ vsync_pol = !!(vm->flags & DISPLAY_FLAGS_VSYNC_HIGH);
+ hsync_pol = !!(vm->flags & DISPLAY_FLAGS_HSYNC_HIGH);
r = hdmi_read_reg(wp->base, HDMI_WP_VIDEO_CFG);
r = FLD_MOD(r, vsync_pol, 7, 7);
r = FLD_MOD(r, hsync_pol, 6, 6);
- r = FLD_MOD(r, timings->interlace, 3, 3);
+ r = FLD_MOD(r, !!(vm->flags & DISPLAY_FLAGS_INTERLACED), 3, 3);
r = FLD_MOD(r, 1, 1, 0); /* HDMI_TIMING_MASTER_24BIT */
hdmi_write_reg(wp->base, HDMI_WP_VIDEO_CFG, r);
}
void hdmi_wp_video_config_timing(struct hdmi_wp_data *wp,
- struct omap_video_timings *timings)
+ struct videomode *vm)
{
u32 timing_h = 0;
u32 timing_v = 0;
- unsigned hsw_offset = 1;
+ unsigned hsync_len_offset = 1;
DSSDBG("Enter hdmi_wp_video_config_timing\n");
/*
* On OMAP4 and OMAP5 ES1 the HSW field is programmed as is. On OMAP5
- * ES2+ (including DRA7/AM5 SoCs) HSW field is programmed to hsw-1.
+ * ES2+ (including DRA7/AM5 SoCs) HSW field is programmed to hsync_len-1.
* However, we don't support OMAP5 ES1 at all, so we can just check for
* OMAP4 here.
*/
if (omapdss_get_version() == OMAPDSS_VER_OMAP4430_ES1 ||
omapdss_get_version() == OMAPDSS_VER_OMAP4430_ES2 ||
omapdss_get_version() == OMAPDSS_VER_OMAP4)
- hsw_offset = 0;
+ hsync_len_offset = 0;
- timing_h |= FLD_VAL(timings->hbp, 31, 20);
- timing_h |= FLD_VAL(timings->hfp, 19, 8);
- timing_h |= FLD_VAL(timings->hsw - hsw_offset, 7, 0);
+ timing_h |= FLD_VAL(vm->hback_porch, 31, 20);
+ timing_h |= FLD_VAL(vm->hfront_porch, 19, 8);
+ timing_h |= FLD_VAL(vm->hsync_len - hsync_len_offset, 7, 0);
hdmi_write_reg(wp->base, HDMI_WP_VIDEO_TIMING_H, timing_h);
- timing_v |= FLD_VAL(timings->vbp, 31, 20);
- timing_v |= FLD_VAL(timings->vfp, 19, 8);
- timing_v |= FLD_VAL(timings->vsw, 7, 0);
+ timing_v |= FLD_VAL(vm->vback_porch, 31, 20);
+ timing_v |= FLD_VAL(vm->vfront_porch, 19, 8);
+ timing_v |= FLD_VAL(vm->vsync_len, 7, 0);
hdmi_write_reg(wp->base, HDMI_WP_VIDEO_TIMING_V, timing_v);
}
void hdmi_wp_init_vid_fmt_timings(struct hdmi_video_format *video_fmt,
- struct omap_video_timings *timings, struct hdmi_config *param)
+ struct videomode *vm, struct hdmi_config *param)
{
DSSDBG("Enter hdmi_wp_video_init_format\n");
video_fmt->packing_mode = HDMI_PACK_10b_RGB_YUV444;
- video_fmt->y_res = param->timings.y_res;
- video_fmt->x_res = param->timings.x_res;
-
- timings->hbp = param->timings.hbp;
- timings->hfp = param->timings.hfp;
- timings->hsw = param->timings.hsw;
- timings->vbp = param->timings.vbp;
- timings->vfp = param->timings.vfp;
- timings->vsw = param->timings.vsw;
-
- timings->vsync_level = param->timings.vsync_level;
- timings->hsync_level = param->timings.hsync_level;
- timings->interlace = param->timings.interlace;
- timings->double_pixel = param->timings.double_pixel;
-
- if (param->timings.interlace) {
+ video_fmt->y_res = param->vm.vactive;
+ video_fmt->x_res = param->vm.hactive;
+
+ vm->hback_porch = param->vm.hback_porch;
+ vm->hfront_porch = param->vm.hfront_porch;
+ vm->hsync_len = param->vm.hsync_len;
+ vm->vback_porch = param->vm.vback_porch;
+ vm->vfront_porch = param->vm.vfront_porch;
+ vm->vsync_len = param->vm.vsync_len;
+
+ vm->flags = param->vm.flags;
+
+ if (param->vm.flags & DISPLAY_FLAGS_INTERLACED) {
video_fmt->y_res /= 2;
- timings->vbp /= 2;
- timings->vfp /= 2;
- timings->vsw /= 2;
+ vm->vback_porch /= 2;
+ vm->vfront_porch /= 2;
+ vm->vsync_len /= 2;
}
- if (param->timings.double_pixel) {
+ if (param->vm.flags & DISPLAY_FLAGS_DOUBLECLK) {
video_fmt->x_res *= 2;
- timings->hfp *= 2;
- timings->hsw *= 2;
- timings->hbp *= 2;
+ vm->hfront_porch *= 2;
+ vm->hsync_len *= 2;
+ vm->hback_porch *= 2;
}
}
diff --git a/drivers/gpu/drm/omapdrm/dss/omapdss.h b/drivers/gpu/drm/omapdrm/dss/omapdss.h
index 6eaf1adbd606..b420dde8c0fb 100644
--- a/drivers/gpu/drm/omapdrm/dss/omapdss.h
+++ b/drivers/gpu/drm/omapdrm/dss/omapdss.h
@@ -290,7 +290,7 @@ struct omap_dss_dsi_videomode_timings {
struct omap_dss_dsi_config {
enum omap_dss_dsi_mode mode;
enum omap_dss_dsi_pixel_format pixel_format;
- const struct omap_video_timings *timings;
+ const struct videomode *vm;
unsigned long hs_clk_min, hs_clk_max;
unsigned long lp_clk_min, lp_clk_max;
@@ -299,48 +299,12 @@ struct omap_dss_dsi_config {
enum omap_dss_dsi_trans_mode trans_mode;
};
-struct omap_video_timings {
- /* Unit: pixels */
- u16 x_res;
- /* Unit: pixels */
- u16 y_res;
- /* Unit: Hz */
- u32 pixelclock;
- /* Unit: pixel clocks */
- u16 hsw; /* Horizontal synchronization pulse width */
- /* Unit: pixel clocks */
- u16 hfp; /* Horizontal front porch */
- /* Unit: pixel clocks */
- u16 hbp; /* Horizontal back porch */
- /* Unit: line clocks */
- u16 vsw; /* Vertical synchronization pulse width */
- /* Unit: line clocks */
- u16 vfp; /* Vertical front porch */
- /* Unit: line clocks */
- u16 vbp; /* Vertical back porch */
-
- /* Vsync logic level */
- enum omap_dss_signal_level vsync_level;
- /* Hsync logic level */
- enum omap_dss_signal_level hsync_level;
- /* Interlaced or Progressive timings */
- bool interlace;
- /* Pixel clock edge to drive LCD data */
- enum omap_dss_signal_edge data_pclk_edge;
- /* Data enable logic level */
- enum omap_dss_signal_level de_level;
- /* Pixel clock edges to drive HSYNC and VSYNC signals */
- enum omap_dss_signal_edge sync_pclk_edge;
-
- bool double_pixel;
-};
-
-/* Hardcoded timings for tv modes. Venc only uses these to
+/* Hardcoded videomodes for tv. Venc only uses these to
* identify the mode, and does not actually use the configs
* itself. However, the configs should be something that
* a normal monitor can also show */
-extern const struct omap_video_timings omap_dss_pal_timings;
-extern const struct omap_video_timings omap_dss_ntsc_timings;
+extern const struct videomode omap_dss_pal_vm;
+extern const struct videomode omap_dss_ntsc_vm;
struct omap_dss_cpr_coefs {
s16 rr, rg, rb;
@@ -502,11 +466,11 @@ struct omapdss_dpi_ops {
void (*disable)(struct omap_dss_device *dssdev);
int (*check_timings)(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings);
+ struct videomode *vm);
void (*set_timings)(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings);
+ struct videomode *vm);
void (*get_timings)(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings);
+ struct videomode *vm);
void (*set_data_lines)(struct omap_dss_device *dssdev, int data_lines);
};
@@ -521,11 +485,11 @@ struct omapdss_sdi_ops {
void (*disable)(struct omap_dss_device *dssdev);
int (*check_timings)(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings);
+ struct videomode *vm);
void (*set_timings)(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings);
+ struct videomode *vm);
void (*get_timings)(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings);
+ struct videomode *vm);
void (*set_datapairs)(struct omap_dss_device *dssdev, int datapairs);
};
@@ -540,11 +504,11 @@ struct omapdss_dvi_ops {
void (*disable)(struct omap_dss_device *dssdev);
int (*check_timings)(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings);
+ struct videomode *vm);
void (*set_timings)(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings);
+ struct videomode *vm);
void (*get_timings)(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings);
+ struct videomode *vm);
};
struct omapdss_atv_ops {
@@ -557,11 +521,11 @@ struct omapdss_atv_ops {
void (*disable)(struct omap_dss_device *dssdev);
int (*check_timings)(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings);
+ struct videomode *vm);
void (*set_timings)(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings);
+ struct videomode *vm);
void (*get_timings)(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings);
+ struct videomode *vm);
void (*set_type)(struct omap_dss_device *dssdev,
enum omap_dss_venc_type type);
@@ -582,11 +546,11 @@ struct omapdss_hdmi_ops {
void (*disable)(struct omap_dss_device *dssdev);
int (*check_timings)(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings);
+ struct videomode *vm);
void (*set_timings)(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings);
+ struct videomode *vm);
void (*get_timings)(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings);
+ struct videomode *vm);
int (*read_edid)(struct omap_dss_device *dssdev, u8 *buf, int len);
bool (*detect)(struct omap_dss_device *dssdev);
@@ -692,7 +656,7 @@ struct omap_dss_device {
} phy;
struct {
- struct omap_video_timings timings;
+ struct videomode vm;
enum omap_dss_dsi_pixel_format dsi_pix_fmt;
enum omap_dss_dsi_mode dsi_mode;
@@ -785,11 +749,11 @@ struct omap_dss_driver {
int (*get_recommended_bpp)(struct omap_dss_device *dssdev);
int (*check_timings)(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings);
+ struct videomode *vm);
void (*set_timings)(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings);
+ struct videomode *vm);
void (*get_timings)(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings);
+ struct videomode *vm);
int (*set_wss)(struct omap_dss_device *dssdev, u32 wss);
u32 (*get_wss)(struct omap_dss_device *dssdev);
@@ -819,11 +783,6 @@ struct omap_dss_device *omap_dss_find_device(void *data,
int (*match)(struct omap_dss_device *dssdev, void *data));
const char *omapdss_get_default_display_name(void);
-void videomode_to_omap_video_timings(const struct videomode *vm,
- struct omap_video_timings *ovt);
-void omap_video_timings_to_videomode(const struct omap_video_timings *ovt,
- struct videomode *vm);
-
int dss_feat_get_num_mgrs(void);
int dss_feat_get_num_ovls(void);
enum omap_color_mode dss_feat_get_supported_color_modes(enum omap_plane plane);
@@ -852,7 +811,7 @@ void omapdss_default_get_resolution(struct omap_dss_device *dssdev,
u16 *xres, u16 *yres);
int omapdss_default_get_recommended_bpp(struct omap_dss_device *dssdev);
void omapdss_default_get_timings(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings);
+ struct videomode *vm);
typedef void (*omap_dispc_isr_t) (void *arg, u32 mask);
int omap_dispc_register_isr(omap_dispc_isr_t isr, void *arg, u32 mask);
@@ -906,7 +865,7 @@ void dispc_mgr_go(enum omap_channel channel);
void dispc_mgr_set_lcd_config(enum omap_channel channel,
const struct dss_lcd_mgr_config *config);
void dispc_mgr_set_timings(enum omap_channel channel,
- const struct omap_video_timings *timings);
+ const struct videomode *vm);
void dispc_mgr_setup(enum omap_channel channel,
const struct omap_overlay_manager_info *info);
u32 dispc_mgr_gamma_size(enum omap_channel channel);
@@ -919,8 +878,7 @@ bool dispc_ovl_enabled(enum omap_plane plane);
void dispc_ovl_set_channel_out(enum omap_plane plane,
enum omap_channel channel);
int dispc_ovl_setup(enum omap_plane plane, const struct omap_overlay_info *oi,
- bool replication, const struct omap_video_timings *mgr_timings,
- bool mem_to_mem);
+ bool replication, const struct videomode *vm, bool mem_to_mem);
enum omap_dss_output_id dispc_mgr_get_supported_outputs(enum omap_channel channel);
@@ -934,7 +892,7 @@ struct dss_mgr_ops {
int (*enable)(enum omap_channel channel);
void (*disable)(enum omap_channel channel);
void (*set_timings)(enum omap_channel channel,
- const struct omap_video_timings *timings);
+ const struct videomode *vm);
void (*set_lcd_config)(enum omap_channel channel,
const struct dss_lcd_mgr_config *config);
int (*register_framedone_handler)(enum omap_channel channel,
@@ -951,7 +909,7 @@ int dss_mgr_connect(enum omap_channel channel,
void dss_mgr_disconnect(enum omap_channel channel,
struct omap_dss_device *dst);
void dss_mgr_set_timings(enum omap_channel channel,
- const struct omap_video_timings *timings);
+ const struct videomode *vm);
void dss_mgr_set_lcd_config(enum omap_channel channel,
const struct dss_lcd_mgr_config *config);
int dss_mgr_enable(enum omap_channel channel);
diff --git a/drivers/gpu/drm/omapdrm/dss/output.c b/drivers/gpu/drm/omapdrm/dss/output.c
index 24f859488201..a901af5a9bc3 100644
--- a/drivers/gpu/drm/omapdrm/dss/output.c
+++ b/drivers/gpu/drm/omapdrm/dss/output.c
@@ -201,10 +201,9 @@ void dss_mgr_disconnect(enum omap_channel channel,
}
EXPORT_SYMBOL(dss_mgr_disconnect);
-void dss_mgr_set_timings(enum omap_channel channel,
- const struct omap_video_timings *timings)
+void dss_mgr_set_timings(enum omap_channel channel, const struct videomode *vm)
{
- dss_mgr_ops->set_timings(channel, timings);
+ dss_mgr_ops->set_timings(channel, vm);
}
EXPORT_SYMBOL(dss_mgr_set_timings);
diff --git a/drivers/gpu/drm/omapdrm/dss/rfbi.c b/drivers/gpu/drm/omapdrm/dss/rfbi.c
index cd53566d75eb..09724757366a 100644
--- a/drivers/gpu/drm/omapdrm/dss/rfbi.c
+++ b/drivers/gpu/drm/omapdrm/dss/rfbi.c
@@ -113,7 +113,7 @@ static struct {
struct semaphore bus_lock;
- struct omap_video_timings timings;
+ struct videomode vm;
int pixel_size;
int data_lines;
struct rfbi_timings intf_timings;
@@ -308,15 +308,15 @@ static int rfbi_transfer_area(struct omap_dss_device *dssdev,
u32 l;
int r;
struct omap_overlay_manager *mgr = rfbi.output.manager;
- u16 width = rfbi.timings.x_res;
- u16 height = rfbi.timings.y_res;
+ u16 width = rfbi.vm.hactive;
+ u16 height = rfbi.vm.vactive;
/*BUG_ON(callback == 0);*/
BUG_ON(rfbi.framedone_callback != NULL);
DSSDBG("rfbi_transfer_area %dx%d\n", width, height);
- dss_mgr_set_timings(mgr, &rfbi.timings);
+ dss_mgr_set_timings(mgr, &rfbi.vm);
r = dss_mgr_enable(mgr);
if (r)
@@ -777,8 +777,8 @@ static int rfbi_update(struct omap_dss_device *dssdev, void (*callback)(void *),
static void rfbi_set_size(struct omap_dss_device *dssdev, u16 w, u16 h)
{
- rfbi.timings.x_res = w;
- rfbi.timings.y_res = h;
+ rfbi.vm.hactive = w;
+ rfbi.vm.vactive = h;
}
static void rfbi_set_pixel_size(struct omap_dss_device *dssdev, int pixel_size)
@@ -854,25 +854,30 @@ static void rfbi_config_lcd_manager(struct omap_dss_device *dssdev)
dss_mgr_set_lcd_config(mgr, &mgr_config);
/*
- * Set rfbi.timings with default values, the x_res and y_res fields
+ * Set rfbi.timings with default values, the hactive and vactive fields
* are expected to be already configured by the panel driver via
* omapdss_rfbi_set_size()
*/
- rfbi.timings.hsw = 1;
- rfbi.timings.hfp = 1;
- rfbi.timings.hbp = 1;
- rfbi.timings.vsw = 1;
- rfbi.timings.vfp = 0;
- rfbi.timings.vbp = 0;
-
- rfbi.timings.interlace = false;
- rfbi.timings.hsync_level = OMAPDSS_SIG_ACTIVE_HIGH;
- rfbi.timings.vsync_level = OMAPDSS_SIG_ACTIVE_HIGH;
- rfbi.timings.data_pclk_edge = OMAPDSS_DRIVE_SIG_RISING_EDGE;
- rfbi.timings.de_level = OMAPDSS_SIG_ACTIVE_HIGH;
- rfbi.timings.sync_pclk_edge = OMAPDSS_DRIVE_SIG_FALLING_EDGE;
-
- dss_mgr_set_timings(mgr, &rfbi.timings);
+ rfbi.vm.hsync_len = 1;
+ rfbi.vm.hfront_porch = 1;
+ rfbi.vm.hback_porch = 1;
+ rfbi.vm.vsync_len = 1;
+ rfbi.vm.vfront_porch = 0;
+ rfbi.vm.vback_porch = 0;
+
+ rfbi.vm.flags &= ~DISPLAY_FLAGS_INTERLACED;
+ rfbi.vm.flags &= ~DISPLAY_FLAGS_HSYNC_LOW;
+ rfbi.vm.flags |= DISPLAY_FLAGS_HSYNC_HIGH;
+ rfbi.vm.flags &= ~DISPLAY_FLAGS_VSYNC_LOW;
+ rfbi.vm.flags |= DISPLAY_FLAGS_VSYNC_HIGH;
+ rfbi.vm.flags &= ~DISPLAY_FLAGS_PIXDATA_NEGEDGE;
+ rfbi.vm.flags |= DISPLAY_FLAGS_PIXDATA_POSEDGE;
+ rfbi.vm.flags &= ~DISPLAY_FLAGS_DE_LOW;
+ rfbi.vm.flags |= DISPLAY_FLAGS_DE_HIGH;
+ rfbi.vm.flags &= ~DISPLAY_FLAGS_SYNC_POSEDGE;
+ rfbi.vm.flags |= DISPLAY_FLAGS_SYNC_NEGEDGE;
+
+ dss_mgr_set_timings(mgr, &rfbi.vm);
}
static int rfbi_display_enable(struct omap_dss_device *dssdev)
diff --git a/drivers/gpu/drm/omapdrm/dss/sdi.c b/drivers/gpu/drm/omapdrm/dss/sdi.c
index 0a96c321ce62..b3bda2d3c08d 100644
--- a/drivers/gpu/drm/omapdrm/dss/sdi.c
+++ b/drivers/gpu/drm/omapdrm/dss/sdi.c
@@ -39,7 +39,7 @@ static struct {
struct regulator *vdds_sdi_reg;
struct dss_lcd_mgr_config mgr_config;
- struct omap_video_timings timings;
+ struct videomode vm;
int datapairs;
struct omap_dss_device output;
@@ -131,7 +131,7 @@ static int sdi_display_enable(struct omap_dss_device *dssdev)
{
struct omap_dss_device *out = &sdi.output;
enum omap_channel channel = dssdev->dispc_channel;
- struct omap_video_timings *t = &sdi.timings;
+ struct videomode *vm = &sdi.vm;
unsigned long fck;
struct dispc_clock_info dispc_cinfo;
unsigned long pck;
@@ -151,10 +151,9 @@ static int sdi_display_enable(struct omap_dss_device *dssdev)
goto err_get_dispc;
/* 15.5.9.1.2 */
- t->data_pclk_edge = OMAPDSS_DRIVE_SIG_RISING_EDGE;
- t->sync_pclk_edge = OMAPDSS_DRIVE_SIG_RISING_EDGE;
+ vm->flags |= DISPLAY_FLAGS_PIXDATA_POSEDGE | DISPLAY_FLAGS_SYNC_POSEDGE;
- r = sdi_calc_clock_div(t->pixelclock, &fck, &dispc_cinfo);
+ r = sdi_calc_clock_div(vm->pixelclock, &fck, &dispc_cinfo);
if (r)
goto err_calc_clock_div;
@@ -162,15 +161,15 @@ static int sdi_display_enable(struct omap_dss_device *dssdev)
pck = fck / dispc_cinfo.lck_div / dispc_cinfo.pck_div;
- if (pck != t->pixelclock) {
- DSSWARN("Could not find exact pixel clock. Requested %d Hz, got %lu Hz\n",
- t->pixelclock, pck);
+ if (pck != vm->pixelclock) {
+ DSSWARN("Could not find exact pixel clock. Requested %lu Hz, got %lu Hz\n",
+ vm->pixelclock, pck);
- t->pixelclock = pck;
+ vm->pixelclock = pck;
}
- dss_mgr_set_timings(channel, t);
+ dss_mgr_set_timings(channel, vm);
r = dss_set_fck_rate(fck);
if (r)
@@ -229,26 +228,26 @@ static void sdi_display_disable(struct omap_dss_device *dssdev)
}
static void sdi_set_timings(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings)
+ struct videomode *vm)
{
- sdi.timings = *timings;
+ sdi.vm = *vm;
}
static void sdi_get_timings(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings)
+ struct videomode *vm)
{
- *timings = sdi.timings;
+ *vm = sdi.vm;
}
static int sdi_check_timings(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings)
+ struct videomode *vm)
{
enum omap_channel channel = dssdev->dispc_channel;
- if (!dispc_mgr_timings_ok(channel, timings))
+ if (!dispc_mgr_timings_ok(channel, vm))
return -EINVAL;
- if (timings->pixelclock == 0)
+ if (vm->pixelclock == 0)
return -EINVAL;
return 0;
diff --git a/drivers/gpu/drm/omapdrm/dss/venc.c b/drivers/gpu/drm/omapdrm/dss/venc.c
index 6eedf2118708..d74f7fcc2e46 100644
--- a/drivers/gpu/drm/omapdrm/dss/venc.c
+++ b/drivers/gpu/drm/omapdrm/dss/venc.c
@@ -262,47 +262,41 @@ static const struct venc_config venc_config_pal_bdghi = {
.fid_ext_start_y__fid_ext_offset_y = 0x01380005,
};
-const struct omap_video_timings omap_dss_pal_timings = {
- .x_res = 720,
- .y_res = 574,
+const struct videomode omap_dss_pal_vm = {
+ .hactive = 720,
+ .vactive = 574,
.pixelclock = 13500000,
- .hsw = 64,
- .hfp = 12,
- .hbp = 68,
- .vsw = 5,
- .vfp = 5,
- .vbp = 41,
-
- .interlace = true,
-
- .hsync_level = OMAPDSS_SIG_ACTIVE_LOW,
- .vsync_level = OMAPDSS_SIG_ACTIVE_LOW,
- .data_pclk_edge = OMAPDSS_DRIVE_SIG_RISING_EDGE,
- .de_level = OMAPDSS_SIG_ACTIVE_HIGH,
- .sync_pclk_edge = OMAPDSS_DRIVE_SIG_FALLING_EDGE,
+ .hsync_len = 64,
+ .hfront_porch = 12,
+ .hback_porch = 68,
+ .vsync_len = 5,
+ .vfront_porch = 5,
+ .vback_porch = 41,
+
+ .flags = DISPLAY_FLAGS_INTERLACED | DISPLAY_FLAGS_HSYNC_LOW |
+ DISPLAY_FLAGS_VSYNC_LOW | DISPLAY_FLAGS_DE_HIGH |
+ DISPLAY_FLAGS_PIXDATA_POSEDGE |
+ DISPLAY_FLAGS_SYNC_NEGEDGE,
};
-EXPORT_SYMBOL(omap_dss_pal_timings);
+EXPORT_SYMBOL(omap_dss_pal_vm);
-const struct omap_video_timings omap_dss_ntsc_timings = {
- .x_res = 720,
- .y_res = 482,
+const struct videomode omap_dss_ntsc_vm = {
+ .hactive = 720,
+ .vactive = 482,
.pixelclock = 13500000,
- .hsw = 64,
- .hfp = 16,
- .hbp = 58,
- .vsw = 6,
- .vfp = 6,
- .vbp = 31,
-
- .interlace = true,
-
- .hsync_level = OMAPDSS_SIG_ACTIVE_LOW,
- .vsync_level = OMAPDSS_SIG_ACTIVE_LOW,
- .data_pclk_edge = OMAPDSS_DRIVE_SIG_RISING_EDGE,
- .de_level = OMAPDSS_SIG_ACTIVE_HIGH,
- .sync_pclk_edge = OMAPDSS_DRIVE_SIG_FALLING_EDGE,
+ .hsync_len = 64,
+ .hfront_porch = 16,
+ .hback_porch = 58,
+ .vsync_len = 6,
+ .vfront_porch = 6,
+ .vback_porch = 31,
+
+ .flags = DISPLAY_FLAGS_INTERLACED | DISPLAY_FLAGS_HSYNC_LOW |
+ DISPLAY_FLAGS_VSYNC_LOW | DISPLAY_FLAGS_DE_HIGH |
+ DISPLAY_FLAGS_PIXDATA_POSEDGE |
+ DISPLAY_FLAGS_SYNC_NEGEDGE,
};
-EXPORT_SYMBOL(omap_dss_ntsc_timings);
+EXPORT_SYMBOL(omap_dss_ntsc_vm);
static struct {
struct platform_device *pdev;
@@ -313,7 +307,7 @@ static struct {
struct clk *tv_dac_clk;
- struct omap_video_timings timings;
+ struct videomode vm;
enum omap_dss_venc_type type;
bool invert_polarity;
@@ -427,13 +421,12 @@ static void venc_runtime_put(void)
WARN_ON(r < 0 && r != -ENOSYS);
}
-static const struct venc_config *venc_timings_to_config(
- struct omap_video_timings *timings)
+static const struct venc_config *venc_timings_to_config(struct videomode *vm)
{
- if (memcmp(&omap_dss_pal_timings, timings, sizeof(*timings)) == 0)
+ if (memcmp(&omap_dss_pal_vm, vm, sizeof(*vm)) == 0)
return &venc_config_pal_trm;
- if (memcmp(&omap_dss_ntsc_timings, timings, sizeof(*timings)) == 0)
+ if (memcmp(&omap_dss_ntsc_vm, vm, sizeof(*vm)) == 0)
return &venc_config_ntsc_trm;
BUG();
@@ -451,7 +444,7 @@ static int venc_power_on(struct omap_dss_device *dssdev)
goto err0;
venc_reset();
- venc_write_config(venc_timings_to_config(&venc.timings));
+ venc_write_config(venc_timings_to_config(&venc.vm));
dss_set_venc_output(venc.type);
dss_set_dac_pwrdn_bgz(1);
@@ -468,7 +461,7 @@ static int venc_power_on(struct omap_dss_device *dssdev)
venc_write_reg(VENC_OUTPUT_CONTROL, l);
- dss_mgr_set_timings(channel, &venc.timings);
+ dss_mgr_set_timings(channel, &venc.vm);
r = regulator_enable(venc.vdda_dac_reg);
if (r)
@@ -546,17 +539,17 @@ static void venc_display_disable(struct omap_dss_device *dssdev)
}
static void venc_set_timings(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings)
+ struct videomode *vm)
{
DSSDBG("venc_set_timings\n");
mutex_lock(&venc.venc_lock);
/* Reset WSS data when the TV standard changes. */
- if (memcmp(&venc.timings, timings, sizeof(*timings)))
+ if (memcmp(&venc.vm, vm, sizeof(*vm)))
venc.wss_data = 0;
- venc.timings = *timings;
+ venc.vm = *vm;
dispc_set_tv_pclk(13500000);
@@ -564,25 +557,25 @@ static void venc_set_timings(struct omap_dss_device *dssdev,
}
static int venc_check_timings(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings)
+ struct videomode *vm)
{
DSSDBG("venc_check_timings\n");
- if (memcmp(&omap_dss_pal_timings, timings, sizeof(*timings)) == 0)
+ if (memcmp(&omap_dss_pal_vm, vm, sizeof(*vm)) == 0)
return 0;
- if (memcmp(&omap_dss_ntsc_timings, timings, sizeof(*timings)) == 0)
+ if (memcmp(&omap_dss_ntsc_vm, vm, sizeof(*vm)) == 0)
return 0;
return -EINVAL;
}
static void venc_get_timings(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings)
+ struct videomode *vm)
{
mutex_lock(&venc.venc_lock);
- *timings = venc.timings;
+ *vm = venc.vm;
mutex_unlock(&venc.venc_lock);
}
@@ -602,7 +595,7 @@ static int venc_set_wss(struct omap_dss_device *dssdev, u32 wss)
mutex_lock(&venc.venc_lock);
- config = venc_timings_to_config(&venc.timings);
+ config = venc_timings_to_config(&venc.vm);
/* Invert due to VENC_L21_WC_CTL:INV=1 */
venc.wss_data = (wss ^ 0xfffff) << 8;
diff --git a/drivers/gpu/drm/omapdrm/omap_connector.c b/drivers/gpu/drm/omapdrm/omap_connector.c
index 137fe690a0da..2580e8673908 100644
--- a/drivers/gpu/drm/omapdrm/omap_connector.c
+++ b/drivers/gpu/drm/omapdrm/omap_connector.c
@@ -42,73 +42,6 @@ bool omap_connector_get_hdmi_mode(struct drm_connector *connector)
return omap_connector->hdmi_mode;
}
-void copy_timings_omap_to_drm(struct drm_display_mode *mode,
- struct omap_video_timings *timings)
-{
- mode->clock = timings->pixelclock / 1000;
-
- mode->hdisplay = timings->x_res;
- mode->hsync_start = mode->hdisplay + timings->hfp;
- mode->hsync_end = mode->hsync_start + timings->hsw;
- mode->htotal = mode->hsync_end + timings->hbp;
-
- mode->vdisplay = timings->y_res;
- mode->vsync_start = mode->vdisplay + timings->vfp;
- mode->vsync_end = mode->vsync_start + timings->vsw;
- mode->vtotal = mode->vsync_end + timings->vbp;
-
- mode->flags = 0;
-
- if (timings->interlace)
- mode->flags |= DRM_MODE_FLAG_INTERLACE;
-
- if (timings->double_pixel)
- mode->flags |= DRM_MODE_FLAG_DBLCLK;
-
- if (timings->hsync_level == OMAPDSS_SIG_ACTIVE_HIGH)
- mode->flags |= DRM_MODE_FLAG_PHSYNC;
- else
- mode->flags |= DRM_MODE_FLAG_NHSYNC;
-
- if (timings->vsync_level == OMAPDSS_SIG_ACTIVE_HIGH)
- mode->flags |= DRM_MODE_FLAG_PVSYNC;
- else
- mode->flags |= DRM_MODE_FLAG_NVSYNC;
-}
-
-void copy_timings_drm_to_omap(struct omap_video_timings *timings,
- struct drm_display_mode *mode)
-{
- timings->pixelclock = mode->clock * 1000;
-
- timings->x_res = mode->hdisplay;
- timings->hfp = mode->hsync_start - mode->hdisplay;
- timings->hsw = mode->hsync_end - mode->hsync_start;
- timings->hbp = mode->htotal - mode->hsync_end;
-
- timings->y_res = mode->vdisplay;
- timings->vfp = mode->vsync_start - mode->vdisplay;
- timings->vsw = mode->vsync_end - mode->vsync_start;
- timings->vbp = mode->vtotal - mode->vsync_end;
-
- timings->interlace = !!(mode->flags & DRM_MODE_FLAG_INTERLACE);
- timings->double_pixel = !!(mode->flags & DRM_MODE_FLAG_DBLCLK);
-
- if (mode->flags & DRM_MODE_FLAG_PHSYNC)
- timings->hsync_level = OMAPDSS_SIG_ACTIVE_HIGH;
- else
- timings->hsync_level = OMAPDSS_SIG_ACTIVE_LOW;
-
- if (mode->flags & DRM_MODE_FLAG_PVSYNC)
- timings->vsync_level = OMAPDSS_SIG_ACTIVE_HIGH;
- else
- timings->vsync_level = OMAPDSS_SIG_ACTIVE_LOW;
-
- timings->data_pclk_edge = OMAPDSS_DRIVE_SIG_RISING_EDGE;
- timings->de_level = OMAPDSS_SIG_ACTIVE_HIGH;
- timings->sync_pclk_edge = OMAPDSS_DRIVE_SIG_FALLING_EDGE;
-}
-
static enum drm_connector_status omap_connector_detect(
struct drm_connector *connector, bool force)
{
@@ -185,11 +118,11 @@ static int omap_connector_get_modes(struct drm_connector *connector)
kfree(edid);
} else {
struct drm_display_mode *mode = drm_mode_create(dev);
- struct omap_video_timings timings = {0};
+ struct videomode vm = {0};
- dssdrv->get_timings(dssdev, &timings);
+ dssdrv->get_timings(dssdev, &vm);
- copy_timings_omap_to_drm(mode, &timings);
+ drm_display_mode_from_videomode(&vm, mode);
mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED;
drm_mode_set_name(mode);
@@ -207,12 +140,14 @@ static int omap_connector_mode_valid(struct drm_connector *connector,
struct omap_connector *omap_connector = to_omap_connector(connector);
struct omap_dss_device *dssdev = omap_connector->dssdev;
struct omap_dss_driver *dssdrv = dssdev->driver;
- struct omap_video_timings timings = {0};
+ struct videomode vm = {0};
struct drm_device *dev = connector->dev;
struct drm_display_mode *new_mode;
int r, ret = MODE_BAD;
- copy_timings_drm_to_omap(&timings, mode);
+ drm_display_mode_to_videomode(mode, &vm);
+ vm.flags |= DISPLAY_FLAGS_DE_HIGH | DISPLAY_FLAGS_PIXDATA_POSEDGE |
+ DISPLAY_FLAGS_SYNC_NEGEDGE;
mode->vrefresh = drm_mode_vrefresh(mode);
/*
@@ -221,13 +156,13 @@ static int omap_connector_mode_valid(struct drm_connector *connector,
* panel's timings
*/
if (dssdrv->check_timings) {
- r = dssdrv->check_timings(dssdev, &timings);
+ r = dssdrv->check_timings(dssdev, &vm);
} else {
- struct omap_video_timings t = {0};
+ struct videomode t = {0};
dssdrv->get_timings(dssdev, &t);
- if (memcmp(&timings, &t, sizeof(struct omap_video_timings)))
+ if (memcmp(&vm, &t, sizeof(struct videomode)))
r = -EINVAL;
else
r = 0;
@@ -236,7 +171,7 @@ static int omap_connector_mode_valid(struct drm_connector *connector,
if (!r) {
/* check if vrefresh is still valid */
new_mode = drm_mode_duplicate(dev, mode);
- new_mode->clock = timings.pixelclock / 1000;
+ new_mode->clock = vm.pixelclock / 1000;
new_mode->vrefresh = 0;
if (mode->vrefresh == drm_mode_vrefresh(new_mode))
ret = MODE_OK;
diff --git a/drivers/gpu/drm/omapdrm/omap_crtc.c b/drivers/gpu/drm/omapdrm/omap_crtc.c
index 180f644e861e..8dea89030e66 100644
--- a/drivers/gpu/drm/omapdrm/omap_crtc.c
+++ b/drivers/gpu/drm/omapdrm/omap_crtc.c
@@ -34,7 +34,7 @@ struct omap_crtc {
const char *name;
enum omap_channel channel;
- struct omap_video_timings timings;
+ struct videomode vm;
struct omap_drm_irq vblank_irq;
struct omap_drm_irq error_irq;
@@ -56,10 +56,10 @@ uint32_t pipe2vbl(struct drm_crtc *crtc)
return dispc_mgr_get_vsync_irq(omap_crtc->channel);
}
-struct omap_video_timings *omap_crtc_timings(struct drm_crtc *crtc)
+struct videomode *omap_crtc_timings(struct drm_crtc *crtc)
{
struct omap_crtc *omap_crtc = to_omap_crtc(crtc);
- return &omap_crtc->timings;
+ return &omap_crtc->vm;
}
enum omap_channel omap_crtc_channel(struct drm_crtc *crtc)
@@ -201,7 +201,7 @@ static int omap_crtc_dss_enable(enum omap_channel channel)
dispc_mgr_setup(omap_crtc->channel, &info);
dispc_mgr_set_timings(omap_crtc->channel,
- &omap_crtc->timings);
+ &omap_crtc->vm);
omap_crtc_set_enabled(&omap_crtc->base, true);
return 0;
@@ -215,11 +215,11 @@ static void omap_crtc_dss_disable(enum omap_channel channel)
}
static void omap_crtc_dss_set_timings(enum omap_channel channel,
- const struct omap_video_timings *timings)
+ const struct videomode *vm)
{
struct omap_crtc *omap_crtc = omap_crtcs[channel];
DBG("%s", omap_crtc->name);
- omap_crtc->timings = *timings;
+ omap_crtc->vm = *vm;
}
static void omap_crtc_dss_set_lcd_config(enum omap_channel channel,
@@ -369,7 +369,10 @@ static void omap_crtc_mode_set_nofb(struct drm_crtc *crtc)
mode->vdisplay, mode->vsync_start, mode->vsync_end, mode->vtotal,
mode->type, mode->flags);
- copy_timings_drm_to_omap(&omap_crtc->timings, mode);
+ drm_display_mode_to_videomode(mode, &omap_crtc->vm);
+ omap_crtc->vm.flags |= DISPLAY_FLAGS_DE_HIGH |
+ DISPLAY_FLAGS_PIXDATA_POSEDGE |
+ DISPLAY_FLAGS_SYNC_NEGEDGE;
}
static int omap_crtc_atomic_check(struct drm_crtc *crtc,
@@ -411,19 +414,6 @@ static void omap_crtc_atomic_flush(struct drm_crtc *crtc,
dispc_mgr_set_gamma(omap_crtc->channel, lut, length);
}
- if (crtc->state->color_mgmt_changed) {
- struct drm_color_lut *lut = NULL;
- uint length = 0;
-
- if (crtc->state->gamma_lut) {
- lut = (struct drm_color_lut *)
- crtc->state->gamma_lut->data;
- length = crtc->state->gamma_lut->length /
- sizeof(*lut);
- }
- dispc_mgr_set_gamma(omap_crtc->channel, lut, length);
- }
-
if (dispc_mgr_is_enabled(omap_crtc->channel)) {
DBG("%s: GO", omap_crtc->name);
@@ -438,13 +428,14 @@ static void omap_crtc_atomic_flush(struct drm_crtc *crtc,
}
}
-static bool omap_crtc_is_plane_prop(struct drm_device *dev,
+static bool omap_crtc_is_plane_prop(struct drm_crtc *crtc,
struct drm_property *property)
{
+ struct drm_device *dev = crtc->dev;
struct omap_drm_private *priv = dev->dev_private;
return property == priv->zorder_prop ||
- property == dev->mode_config.rotation_property;
+ property == crtc->primary->rotation_property;
}
static int omap_crtc_atomic_set_property(struct drm_crtc *crtc,
@@ -452,9 +443,7 @@ static int omap_crtc_atomic_set_property(struct drm_crtc *crtc,
struct drm_property *property,
uint64_t val)
{
- struct drm_device *dev = crtc->dev;
-
- if (omap_crtc_is_plane_prop(dev, property)) {
+ if (omap_crtc_is_plane_prop(crtc, property)) {
struct drm_plane_state *plane_state;
struct drm_plane *plane = crtc->primary;
@@ -479,9 +468,7 @@ static int omap_crtc_atomic_get_property(struct drm_crtc *crtc,
struct drm_property *property,
uint64_t *val)
{
- struct drm_device *dev = crtc->dev;
-
- if (omap_crtc_is_plane_prop(dev, property)) {
+ if (omap_crtc_is_plane_prop(crtc, property)) {
/*
* Delegate property get to the primary plane. The
* drm_atomic_plane_get_property() function isn't exported, but
diff --git a/drivers/gpu/drm/omapdrm/omap_drv.c b/drivers/gpu/drm/omapdrm/omap_drv.c
index e1cfba51cff6..39c5312b466c 100644
--- a/drivers/gpu/drm/omapdrm/omap_drv.c
+++ b/drivers/gpu/drm/omapdrm/omap_drv.c
@@ -105,7 +105,7 @@ static void omap_atomic_complete(struct omap_atomic_state_commit *commit)
dispc_runtime_put();
- drm_atomic_state_free(old_state);
+ drm_atomic_state_put(old_state);
/* Complete the commit, wake up any waiter. */
spin_lock(&priv->commit.lock);
@@ -176,6 +176,7 @@ static int omap_atomic_commit(struct drm_device *dev,
/* Swap the state, this is the point of no return. */
drm_atomic_helper_swap_state(state, true);
+ drm_atomic_state_get(state);
if (nonblock)
schedule_work(&commit->work);
else
@@ -292,16 +293,6 @@ static int omap_modeset_init_properties(struct drm_device *dev)
{
struct omap_drm_private *priv = dev->dev_private;
- if (priv->has_dmm) {
- dev->mode_config.rotation_property =
- drm_mode_create_rotation_property(dev,
- DRM_ROTATE_0 | DRM_ROTATE_90 |
- DRM_ROTATE_180 | DRM_ROTATE_270 |
- DRM_REFLECT_X | DRM_REFLECT_Y);
- if (!dev->mode_config.rotation_property)
- return -ENOMEM;
- }
-
priv->zorder_prop = drm_property_create_range(dev, 0, "zorder", 0, 3);
if (!priv->zorder_prop)
return -ENOMEM;
@@ -752,22 +743,32 @@ static void dev_lastclose(struct drm_device *dev)
DBG("lastclose: dev=%p", dev);
- if (dev->mode_config.rotation_property) {
- /* need to restore default rotation state.. not sure
- * if there is a cleaner way to restore properties to
- * default state? Maybe a flag that properties should
- * automatically be restored to default state on
- * lastclose?
- */
- for (i = 0; i < priv->num_crtcs; i++) {
- drm_object_property_set_value(&priv->crtcs[i]->base,
- dev->mode_config.rotation_property, 0);
- }
+ /* need to restore default rotation state.. not sure
+ * if there is a cleaner way to restore properties to
+ * default state? Maybe a flag that properties should
+ * automatically be restored to default state on
+ * lastclose?
+ */
+ for (i = 0; i < priv->num_crtcs; i++) {
+ struct drm_crtc *crtc = priv->crtcs[i];
- for (i = 0; i < priv->num_planes; i++) {
- drm_object_property_set_value(&priv->planes[i]->base,
- dev->mode_config.rotation_property, 0);
- }
+ if (!crtc->primary->rotation_property)
+ continue;
+
+ drm_object_property_set_value(&crtc->base,
+ crtc->primary->rotation_property,
+ DRM_ROTATE_0);
+ }
+
+ for (i = 0; i < priv->num_planes; i++) {
+ struct drm_plane *plane = priv->planes[i];
+
+ if (!plane->rotation_property)
+ continue;
+
+ drm_object_property_set_value(&plane->base,
+ plane->rotation_property,
+ DRM_ROTATE_0);
}
if (priv->fbdev) {
diff --git a/drivers/gpu/drm/omapdrm/omap_drv.h b/drivers/gpu/drm/omapdrm/omap_drv.h
index dcc30a98b9d4..4c51135eb9a6 100644
--- a/drivers/gpu/drm/omapdrm/omap_drv.h
+++ b/drivers/gpu/drm/omapdrm/omap_drv.h
@@ -148,7 +148,7 @@ static inline void omap_fbdev_free(struct drm_device *dev)
}
#endif
-struct omap_video_timings *omap_crtc_timings(struct drm_crtc *crtc);
+struct videomode *omap_crtc_timings(struct drm_crtc *crtc);
enum omap_channel omap_crtc_channel(struct drm_crtc *crtc);
void omap_crtc_pre_init(void);
void omap_crtc_pre_uninit(void);
@@ -171,11 +171,6 @@ struct drm_encoder *omap_connector_attached_encoder(
struct drm_connector *connector);
bool omap_connector_get_hdmi_mode(struct drm_connector *connector);
-void copy_timings_omap_to_drm(struct drm_display_mode *mode,
- struct omap_video_timings *timings);
-void copy_timings_drm_to_omap(struct omap_video_timings *timings,
- struct drm_display_mode *mode);
-
uint32_t omap_framebuffer_get_formats(uint32_t *pixel_formats,
uint32_t max_formats, enum omap_color_mode supported_modes);
struct drm_framebuffer *omap_framebuffer_create(struct drm_device *dev,
diff --git a/drivers/gpu/drm/omapdrm/omap_encoder.c b/drivers/gpu/drm/omapdrm/omap_encoder.c
index 0bbb9c59622e..a20f30039aee 100644
--- a/drivers/gpu/drm/omapdrm/omap_encoder.c
+++ b/drivers/gpu/drm/omapdrm/omap_encoder.c
@@ -102,7 +102,7 @@ static void omap_encoder_disable(struct drm_encoder *encoder)
static int omap_encoder_update(struct drm_encoder *encoder,
enum omap_channel channel,
- struct omap_video_timings *timings)
+ struct videomode *vm)
{
struct drm_device *dev = encoder->dev;
struct omap_encoder *omap_encoder = to_omap_encoder(encoder);
@@ -111,13 +111,13 @@ static int omap_encoder_update(struct drm_encoder *encoder,
int ret;
if (dssdrv->check_timings) {
- ret = dssdrv->check_timings(dssdev, timings);
+ ret = dssdrv->check_timings(dssdev, vm);
} else {
- struct omap_video_timings t = {0};
+ struct videomode t = {0};
dssdrv->get_timings(dssdev, &t);
- if (memcmp(timings, &t, sizeof(struct omap_video_timings)))
+ if (memcmp(vm, &t, sizeof(struct videomode)))
ret = -EINVAL;
else
ret = 0;
@@ -129,7 +129,7 @@ static int omap_encoder_update(struct drm_encoder *encoder,
}
if (dssdrv->set_timings)
- dssdrv->set_timings(dssdev, timings);
+ dssdrv->set_timings(dssdev, vm);
return 0;
}
diff --git a/drivers/gpu/drm/omapdrm/omap_fbdev.c b/drivers/gpu/drm/omapdrm/omap_fbdev.c
index adb10fbe918d..8d8ac173f55d 100644
--- a/drivers/gpu/drm/omapdrm/omap_fbdev.c
+++ b/drivers/gpu/drm/omapdrm/omap_fbdev.c
@@ -82,6 +82,7 @@ fallback:
static struct fb_ops omap_fb_ops = {
.owner = THIS_MODULE,
+ DRM_FB_HELPER_DEFAULT_OPS,
/* Note: to properly handle manual update displays, we wrap the
* basic fbdev ops which write to the framebuffer
@@ -92,11 +93,7 @@ static struct fb_ops omap_fb_ops = {
.fb_copyarea = drm_fb_helper_sys_copyarea,
.fb_imageblit = drm_fb_helper_sys_imageblit,
- .fb_check_var = drm_fb_helper_check_var,
- .fb_set_par = drm_fb_helper_set_par,
.fb_pan_display = omap_fbdev_pan_display,
- .fb_blank = drm_fb_helper_blank,
- .fb_setcmap = drm_fb_helper_setcmap,
};
static int omap_fbdev_create(struct drm_fb_helper *helper,
diff --git a/drivers/gpu/drm/omapdrm/omap_gem.c b/drivers/gpu/drm/omapdrm/omap_gem.c
index 505dee0db973..d4e1e11466f8 100644
--- a/drivers/gpu/drm/omapdrm/omap_gem.c
+++ b/drivers/gpu/drm/omapdrm/omap_gem.c
@@ -336,8 +336,10 @@ static void omap_gem_detach_pages(struct drm_gem_object *obj)
if (omap_obj->flags & (OMAP_BO_WC|OMAP_BO_UNCACHED)) {
int i, npages = obj->size >> PAGE_SHIFT;
for (i = 0; i < npages; i++) {
- dma_unmap_page(obj->dev->dev, omap_obj->addrs[i],
- PAGE_SIZE, DMA_BIDIRECTIONAL);
+ if (omap_obj->addrs[i])
+ dma_unmap_page(obj->dev->dev,
+ omap_obj->addrs[i],
+ PAGE_SIZE, DMA_BIDIRECTIONAL);
}
}
diff --git a/drivers/gpu/drm/omapdrm/omap_plane.c b/drivers/gpu/drm/omapdrm/omap_plane.c
index 66ac8c40db26..9c43cb481e62 100644
--- a/drivers/gpu/drm/omapdrm/omap_plane.c
+++ b/drivers/gpu/drm/omapdrm/omap_plane.c
@@ -108,16 +108,12 @@ static void omap_plane_atomic_update(struct drm_plane *plane,
win.src_x = state->src_x >> 16;
win.src_y = state->src_y >> 16;
- switch (state->rotation & DRM_ROTATE_MASK) {
- case DRM_ROTATE_90:
- case DRM_ROTATE_270:
+ if (drm_rotation_90_or_270(state->rotation)) {
win.src_w = state->src_h >> 16;
win.src_h = state->src_w >> 16;
- break;
- default:
+ } else {
win.src_w = state->src_w >> 16;
win.src_h = state->src_h >> 16;
- break;
}
/* update scanout: */
@@ -135,7 +131,9 @@ static void omap_plane_atomic_update(struct drm_plane *plane,
/* and finally, update omapdss: */
ret = dispc_ovl_setup(omap_plane->id, &info, false,
omap_crtc_timings(state->crtc), false);
- if (WARN_ON(ret)) {
+ if (ret) {
+ dev_err(plane->dev->dev, "Failed to setup plane %s\n",
+ omap_plane->name);
dispc_ovl_enable(omap_plane->id, false);
return;
}
@@ -161,12 +159,20 @@ static int omap_plane_atomic_check(struct drm_plane *plane,
{
struct drm_crtc_state *crtc_state;
- if (!state->crtc)
+ if (!state->fb)
return 0;
- crtc_state = drm_atomic_get_crtc_state(state->state, state->crtc);
- if (IS_ERR(crtc_state))
- return PTR_ERR(crtc_state);
+ /* crtc should only be NULL when disabling (i.e., !state->fb) */
+ if (WARN_ON(!state->crtc))
+ return 0;
+
+ crtc_state = drm_atomic_get_existing_crtc_state(state->state, state->crtc);
+ /* we should have a crtc state if the plane is attached to a crtc */
+ if (WARN_ON(!crtc_state))
+ return 0;
+
+ if (!crtc_state->enable)
+ return 0;
if (state->crtc_x < 0 || state->crtc_y < 0)
return -EINVAL;
@@ -177,11 +183,9 @@ static int omap_plane_atomic_check(struct drm_plane *plane,
if (state->crtc_y + state->crtc_h > crtc_state->adjusted_mode.vdisplay)
return -EINVAL;
- if (state->fb) {
- if (state->rotation != DRM_ROTATE_0 &&
- !omap_framebuffer_supports_rotation(state->fb))
- return -EINVAL;
- }
+ if (state->rotation != DRM_ROTATE_0 &&
+ !omap_framebuffer_supports_rotation(state->fb))
+ return -EINVAL;
return 0;
}
@@ -215,9 +219,17 @@ void omap_plane_install_properties(struct drm_plane *plane,
struct omap_drm_private *priv = dev->dev_private;
if (priv->has_dmm) {
- struct drm_property *prop = dev->mode_config.rotation_property;
-
- drm_object_attach_property(obj, prop, 0);
+ if (!plane->rotation_property)
+ drm_plane_create_rotation_property(plane,
+ DRM_ROTATE_0,
+ DRM_ROTATE_0 | DRM_ROTATE_90 |
+ DRM_ROTATE_180 | DRM_ROTATE_270 |
+ DRM_REFLECT_X | DRM_REFLECT_Y);
+
+ /* Attach the rotation property also to the crtc object */
+ if (plane->rotation_property && obj != &plane->base)
+ drm_object_attach_property(obj, plane->rotation_property,
+ DRM_ROTATE_0);
}
drm_object_attach_property(obj, priv->zorder_prop, 0);
diff --git a/drivers/gpu/drm/qxl/qxl_cmd.c b/drivers/gpu/drm/qxl/qxl_cmd.c
index 04270f5d110c..74fc9362ecf9 100644
--- a/drivers/gpu/drm/qxl/qxl_cmd.c
+++ b/drivers/gpu/drm/qxl/qxl_cmd.c
@@ -578,7 +578,7 @@ int qxl_hw_surface_dealloc(struct qxl_device *qdev,
return 0;
}
-int qxl_update_surface(struct qxl_device *qdev, struct qxl_bo *surf)
+static int qxl_update_surface(struct qxl_device *qdev, struct qxl_bo *surf)
{
struct qxl_rect rect;
int ret;
diff --git a/drivers/gpu/drm/qxl/qxl_display.c b/drivers/gpu/drm/qxl/qxl_display.c
index a61c0d460ec2..4b5eab8a47b3 100644
--- a/drivers/gpu/drm/qxl/qxl_display.c
+++ b/drivers/gpu/drm/qxl/qxl_display.c
@@ -36,7 +36,7 @@ static bool qxl_head_enabled(struct qxl_head *head)
return head->width && head->height;
}
-void qxl_alloc_client_monitors_config(struct qxl_device *qdev, unsigned count)
+static void qxl_alloc_client_monitors_config(struct qxl_device *qdev, unsigned count)
{
if (qdev->client_monitors_config &&
count > qdev->client_monitors_config->count) {
@@ -57,11 +57,18 @@ void qxl_alloc_client_monitors_config(struct qxl_device *qdev, unsigned count)
qdev->client_monitors_config->count = count;
}
+enum {
+ MONITORS_CONFIG_MODIFIED,
+ MONITORS_CONFIG_UNCHANGED,
+ MONITORS_CONFIG_BAD_CRC,
+};
+
static int qxl_display_copy_rom_client_monitors_config(struct qxl_device *qdev)
{
int i;
int num_monitors;
uint32_t crc;
+ int status = MONITORS_CONFIG_UNCHANGED;
num_monitors = qdev->rom->client_monitors_config.count;
crc = crc32(0, (const uint8_t *)&qdev->rom->client_monitors_config,
@@ -70,7 +77,7 @@ static int qxl_display_copy_rom_client_monitors_config(struct qxl_device *qdev)
qxl_io_log(qdev, "crc mismatch: have %X (%zd) != %X\n", crc,
sizeof(qdev->rom->client_monitors_config),
qdev->rom->client_monitors_config_crc);
- return 1;
+ return MONITORS_CONFIG_BAD_CRC;
}
if (num_monitors > qdev->monitors_config->max_allowed) {
DRM_DEBUG_KMS("client monitors list will be truncated: %d < %d\n",
@@ -79,6 +86,10 @@ static int qxl_display_copy_rom_client_monitors_config(struct qxl_device *qdev)
} else {
num_monitors = qdev->rom->client_monitors_config.count;
}
+ if (qdev->client_monitors_config
+ && (num_monitors != qdev->client_monitors_config->count)) {
+ status = MONITORS_CONFIG_MODIFIED;
+ }
qxl_alloc_client_monitors_config(qdev, num_monitors);
/* we copy max from the client but it isn't used */
qdev->client_monitors_config->max_allowed =
@@ -88,17 +99,39 @@ static int qxl_display_copy_rom_client_monitors_config(struct qxl_device *qdev)
&qdev->rom->client_monitors_config.heads[i];
struct qxl_head *client_head =
&qdev->client_monitors_config->heads[i];
- client_head->x = c_rect->left;
- client_head->y = c_rect->top;
- client_head->width = c_rect->right - c_rect->left;
- client_head->height = c_rect->bottom - c_rect->top;
- client_head->surface_id = 0;
- client_head->id = i;
- client_head->flags = 0;
+ if (client_head->x != c_rect->left) {
+ client_head->x = c_rect->left;
+ status = MONITORS_CONFIG_MODIFIED;
+ }
+ if (client_head->y != c_rect->top) {
+ client_head->y = c_rect->top;
+ status = MONITORS_CONFIG_MODIFIED;
+ }
+ if (client_head->width != c_rect->right - c_rect->left) {
+ client_head->width = c_rect->right - c_rect->left;
+ status = MONITORS_CONFIG_MODIFIED;
+ }
+ if (client_head->height != c_rect->bottom - c_rect->top) {
+ client_head->height = c_rect->bottom - c_rect->top;
+ status = MONITORS_CONFIG_MODIFIED;
+ }
+ if (client_head->surface_id != 0) {
+ client_head->surface_id = 0;
+ status = MONITORS_CONFIG_MODIFIED;
+ }
+ if (client_head->id != i) {
+ client_head->id = i;
+ status = MONITORS_CONFIG_MODIFIED;
+ }
+ if (client_head->flags != 0) {
+ client_head->flags = 0;
+ status = MONITORS_CONFIG_MODIFIED;
+ }
DRM_DEBUG_KMS("read %dx%d+%d+%d\n", client_head->width, client_head->height,
client_head->x, client_head->y);
}
- return 0;
+
+ return status;
}
static void qxl_update_offset_props(struct qxl_device *qdev)
@@ -124,9 +157,18 @@ void qxl_display_read_client_monitors_config(struct qxl_device *qdev)
{
struct drm_device *dev = qdev->ddev;
- while (qxl_display_copy_rom_client_monitors_config(qdev)) {
+ int status;
+
+ status = qxl_display_copy_rom_client_monitors_config(qdev);
+ while (status == MONITORS_CONFIG_BAD_CRC) {
qxl_io_log(qdev, "failed crc check for client_monitors_config,"
" retrying\n");
+ status = qxl_display_copy_rom_client_monitors_config(qdev);
+ }
+ if (status == MONITORS_CONFIG_UNCHANGED) {
+ qxl_io_log(qdev, "config unchanged\n");
+ DRM_DEBUG("ignoring unchanged client monitors config");
+ return;
}
drm_modeset_lock_all(dev);
@@ -157,6 +199,9 @@ static int qxl_add_monitors_config_modes(struct drm_connector *connector,
mode = drm_cvt_mode(dev, head->width, head->height, 60, false, false,
false);
mode->type |= DRM_MODE_TYPE_PREFERRED;
+ mode->hdisplay = head->width;
+ mode->vdisplay = head->height;
+ drm_mode_set_name(mode);
*pwidth = head->width;
*pheight = head->height;
drm_mode_probed_add(connector, mode);
@@ -607,7 +652,7 @@ static bool qxl_crtc_mode_fixup(struct drm_crtc *crtc,
return true;
}
-void
+static void
qxl_send_monitors_config(struct qxl_device *qdev)
{
int i;
diff --git a/drivers/gpu/drm/qxl/qxl_drv.h b/drivers/gpu/drm/qxl/qxl_drv.h
index 5f3e5ad99de7..785aad42e9bb 100644
--- a/drivers/gpu/drm/qxl/qxl_drv.h
+++ b/drivers/gpu/drm/qxl/qxl_drv.h
@@ -31,7 +31,7 @@
* Definitions taken from spice-protocol, plus kernel driver specific bits.
*/
-#include <linux/fence.h>
+#include <linux/dma-fence.h>
#include <linux/workqueue.h>
#include <linux/firmware.h>
#include <linux/platform_device.h>
@@ -190,7 +190,7 @@ enum {
* spice-protocol/qxl_dev.h */
#define QXL_MAX_RES 96
struct qxl_release {
- struct fence base;
+ struct dma_fence base;
int id;
int type;
@@ -395,16 +395,11 @@ qxl_framebuffer_init(struct drm_device *dev,
struct drm_gem_object *obj,
const struct drm_framebuffer_funcs *funcs);
void qxl_display_read_client_monitors_config(struct qxl_device *qdev);
-void qxl_send_monitors_config(struct qxl_device *qdev);
int qxl_create_monitors_object(struct qxl_device *qdev);
int qxl_destroy_monitors_object(struct qxl_device *qdev);
-/* used by qxl_debugfs only */
-void qxl_crtc_set_from_monitors_config(struct qxl_device *qdev);
-void qxl_alloc_client_monitors_config(struct qxl_device *qdev, unsigned count);
-
/* qxl_gem.c */
-int qxl_gem_init(struct qxl_device *qdev);
+void qxl_gem_init(struct qxl_device *qdev);
void qxl_gem_fini(struct qxl_device *qdev);
int qxl_gem_object_create(struct qxl_device *qdev, int size,
int alignment, int initial_domain,
@@ -574,6 +569,5 @@ int qxl_bo_check_id(struct qxl_device *qdev, struct qxl_bo *bo);
struct qxl_drv_surface *
qxl_surface_lookup(struct drm_device *dev, int surface_id);
void qxl_surface_evict(struct qxl_device *qdev, struct qxl_bo *surf, bool freeing);
-int qxl_update_surface(struct qxl_device *qdev, struct qxl_bo *surf);
#endif
diff --git a/drivers/gpu/drm/qxl/qxl_fb.c b/drivers/gpu/drm/qxl/qxl_fb.c
index 2cd879a4ae15..fd7e5e94be5b 100644
--- a/drivers/gpu/drm/qxl/qxl_fb.c
+++ b/drivers/gpu/drm/qxl/qxl_fb.c
@@ -81,16 +81,10 @@ static struct fb_deferred_io qxl_defio = {
static struct fb_ops qxlfb_ops = {
.owner = THIS_MODULE,
- .fb_check_var = drm_fb_helper_check_var,
- .fb_set_par = drm_fb_helper_set_par, /* TODO: copy vmwgfx */
+ DRM_FB_HELPER_DEFAULT_OPS,
.fb_fillrect = drm_fb_helper_sys_fillrect,
.fb_copyarea = drm_fb_helper_sys_copyarea,
.fb_imageblit = drm_fb_helper_sys_imageblit,
- .fb_pan_display = drm_fb_helper_pan_display,
- .fb_blank = drm_fb_helper_blank,
- .fb_setcmap = drm_fb_helper_setcmap,
- .fb_debug_enter = drm_fb_helper_debug_enter,
- .fb_debug_leave = drm_fb_helper_debug_leave,
};
static void qxlfb_destroy_pinned_object(struct drm_gem_object *gobj)
@@ -197,7 +191,7 @@ static int qxlfb_framebuffer_dirty(struct drm_framebuffer *fb,
/*
* we are using a shadow draw buffer, at qdev->surface0_shadow
*/
- qxl_io_log(qdev, "dirty x[%d, %d], y[%d, %d]", clips->x1, clips->x2,
+ qxl_io_log(qdev, "dirty x[%d, %d], y[%d, %d]\n", clips->x1, clips->x2,
clips->y1, clips->y2);
image->dx = clips->x1;
image->dy = clips->y1;
diff --git a/drivers/gpu/drm/qxl/qxl_gem.c b/drivers/gpu/drm/qxl/qxl_gem.c
index d9746e904ef1..3f185c4da5b7 100644
--- a/drivers/gpu/drm/qxl/qxl_gem.c
+++ b/drivers/gpu/drm/qxl/qxl_gem.c
@@ -111,10 +111,9 @@ void qxl_gem_object_close(struct drm_gem_object *obj,
{
}
-int qxl_gem_init(struct qxl_device *qdev)
+void qxl_gem_init(struct qxl_device *qdev)
{
INIT_LIST_HEAD(&qdev->gem.objects);
- return 0;
}
void qxl_gem_fini(struct qxl_device *qdev)
diff --git a/drivers/gpu/drm/qxl/qxl_kms.c b/drivers/gpu/drm/qxl/qxl_kms.c
index e642242728c0..af685f1d91f8 100644
--- a/drivers/gpu/drm/qxl/qxl_kms.c
+++ b/drivers/gpu/drm/qxl/qxl_kms.c
@@ -131,7 +131,7 @@ static int qxl_device_init(struct qxl_device *qdev,
mutex_init(&qdev->update_area_mutex);
mutex_init(&qdev->release_mutex);
mutex_init(&qdev->surf_evict_mutex);
- INIT_LIST_HEAD(&qdev->gem.objects);
+ qxl_gem_init(qdev);
qdev->rom_base = pci_resource_start(pdev, 2);
qdev->rom_size = pci_resource_len(pdev, 2);
@@ -273,6 +273,7 @@ static void qxl_device_fini(struct qxl_device *qdev)
qxl_ring_free(qdev->command_ring);
qxl_ring_free(qdev->cursor_ring);
qxl_ring_free(qdev->release_ring);
+ qxl_gem_fini(qdev);
qxl_bo_fini(qdev);
io_mapping_free(qdev->surface_mapping);
io_mapping_free(qdev->vram_mapping);
diff --git a/drivers/gpu/drm/qxl/qxl_release.c b/drivers/gpu/drm/qxl/qxl_release.c
index cd83f050cf3e..50b4e522f05f 100644
--- a/drivers/gpu/drm/qxl/qxl_release.c
+++ b/drivers/gpu/drm/qxl/qxl_release.c
@@ -21,7 +21,7 @@
*/
#include "qxl_drv.h"
#include "qxl_object.h"
-#include <trace/events/fence.h>
+#include <trace/events/dma_fence.h>
/*
* drawable cmd cache - allocate a bunch of VRAM pages, suballocate
@@ -40,23 +40,24 @@
static const int release_size_per_bo[] = { RELEASE_SIZE, SURFACE_RELEASE_SIZE, RELEASE_SIZE };
static const int releases_per_bo[] = { RELEASES_PER_BO, SURFACE_RELEASES_PER_BO, RELEASES_PER_BO };
-static const char *qxl_get_driver_name(struct fence *fence)
+static const char *qxl_get_driver_name(struct dma_fence *fence)
{
return "qxl";
}
-static const char *qxl_get_timeline_name(struct fence *fence)
+static const char *qxl_get_timeline_name(struct dma_fence *fence)
{
return "release";
}
-static bool qxl_nop_signaling(struct fence *fence)
+static bool qxl_nop_signaling(struct dma_fence *fence)
{
/* fences are always automatically signaled, so just pretend we did this.. */
return true;
}
-static long qxl_fence_wait(struct fence *fence, bool intr, signed long timeout)
+static long qxl_fence_wait(struct dma_fence *fence, bool intr,
+ signed long timeout)
{
struct qxl_device *qdev;
struct qxl_release *release;
@@ -71,7 +72,7 @@ static long qxl_fence_wait(struct fence *fence, bool intr, signed long timeout)
retry:
sc++;
- if (fence_is_signaled(fence))
+ if (dma_fence_is_signaled(fence))
goto signaled;
qxl_io_notify_oom(qdev);
@@ -80,11 +81,11 @@ retry:
if (!qxl_queue_garbage_collect(qdev, true))
break;
- if (fence_is_signaled(fence))
+ if (dma_fence_is_signaled(fence))
goto signaled;
}
- if (fence_is_signaled(fence))
+ if (dma_fence_is_signaled(fence))
goto signaled;
if (have_drawable_releases || sc < 4) {
@@ -96,9 +97,9 @@ retry:
return 0;
if (have_drawable_releases && sc > 300) {
- FENCE_WARN(fence, "failed to wait on release %llu "
- "after spincount %d\n",
- fence->context & ~0xf0000000, sc);
+ DMA_FENCE_WARN(fence, "failed to wait on release %llu "
+ "after spincount %d\n",
+ fence->context & ~0xf0000000, sc);
goto signaled;
}
goto retry;
@@ -115,7 +116,7 @@ signaled:
return end - cur;
}
-static const struct fence_ops qxl_fence_ops = {
+static const struct dma_fence_ops qxl_fence_ops = {
.get_driver_name = qxl_get_driver_name,
.get_timeline_name = qxl_get_timeline_name,
.enable_signaling = qxl_nop_signaling,
@@ -192,8 +193,8 @@ qxl_release_free(struct qxl_device *qdev,
WARN_ON(list_empty(&release->bos));
qxl_release_free_list(release);
- fence_signal(&release->base);
- fence_put(&release->base);
+ dma_fence_signal(&release->base);
+ dma_fence_put(&release->base);
} else {
qxl_release_free_list(release);
kfree(release);
@@ -453,9 +454,9 @@ void qxl_release_fence_buffer_objects(struct qxl_release *release)
* Since we never really allocated a context and we don't want to conflict,
* set the highest bits. This will break if we really allow exporting of dma-bufs.
*/
- fence_init(&release->base, &qxl_fence_ops, &qdev->release_lock,
- release->id | 0xf0000000, release->base.seqno);
- trace_fence_emit(&release->base);
+ dma_fence_init(&release->base, &qxl_fence_ops, &qdev->release_lock,
+ release->id | 0xf0000000, release->base.seqno);
+ trace_dma_fence_emit(&release->base);
driver = bdev->driver;
glob = bo->glob;
diff --git a/drivers/gpu/drm/qxl/qxl_ttm.c b/drivers/gpu/drm/qxl/qxl_ttm.c
index e26c82db948b..11761330a6b8 100644
--- a/drivers/gpu/drm/qxl/qxl_ttm.c
+++ b/drivers/gpu/drm/qxl/qxl_ttm.c
@@ -387,6 +387,7 @@ static struct ttm_bo_driver qxl_bo_driver = {
.ttm_tt_unpopulate = &qxl_ttm_tt_unpopulate,
.invalidate_caches = &qxl_invalidate_caches,
.init_mem_type = &qxl_init_mem_type,
+ .eviction_valuable = ttm_bo_eviction_valuable,
.evict_flags = &qxl_evict_flags,
.move = &qxl_bo_move,
.verify_access = &qxl_verify_access,
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
index 74f99bac08b1..05f4ebe31ce2 100644
--- a/drivers/gpu/drm/radeon/atombios_crtc.c
+++ b/drivers/gpu/drm/radeon/atombios_crtc.c
@@ -1156,7 +1156,7 @@ static int dce4_crtc_do_set_base(struct drm_crtc *crtc,
u32 tmp, viewport_w, viewport_h;
int r;
bool bypass_lut = false;
- char *format_name;
+ struct drm_format_name_buf format_name;
/* no fb bound */
if (!atomic && !crtc->primary->fb) {
@@ -1260,9 +1260,8 @@ static int dce4_crtc_do_set_base(struct drm_crtc *crtc,
bypass_lut = true;
break;
default:
- format_name = drm_get_format_name(target_fb->pixel_format);
- DRM_ERROR("Unsupported screen format %s\n", format_name);
- kfree(format_name);
+ DRM_ERROR("Unsupported screen format %s\n",
+ drm_get_format_name(target_fb->pixel_format, &format_name));
return -EINVAL;
}
@@ -1473,7 +1472,7 @@ static int avivo_crtc_do_set_base(struct drm_crtc *crtc,
u32 viewport_w, viewport_h;
int r;
bool bypass_lut = false;
- char *format_name;
+ struct drm_format_name_buf format_name;
/* no fb bound */
if (!atomic && !crtc->primary->fb) {
@@ -1563,9 +1562,8 @@ static int avivo_crtc_do_set_base(struct drm_crtc *crtc,
bypass_lut = true;
break;
default:
- format_name = drm_get_format_name(target_fb->pixel_format);
- DRM_ERROR("Unsupported screen format %s\n", format_name);
- kfree(format_name);
+ DRM_ERROR("Unsupported screen format %s\n",
+ drm_get_format_name(target_fb->pixel_format, &format_name));
return -EINVAL;
}
diff --git a/drivers/gpu/drm/radeon/atombios_encoders.c b/drivers/gpu/drm/radeon/atombios_encoders.c
index 56bb758f4e33..fa4f8f008e4d 100644
--- a/drivers/gpu/drm/radeon/atombios_encoders.c
+++ b/drivers/gpu/drm/radeon/atombios_encoders.c
@@ -28,6 +28,7 @@
#include <drm/radeon_drm.h>
#include "radeon.h"
#include "radeon_audio.h"
+#include "radeon_asic.h"
#include "atom.h"
#include <linux/backlight.h>
diff --git a/drivers/gpu/drm/radeon/evergreen_cs.c b/drivers/gpu/drm/radeon/evergreen_cs.c
index d960d3915408..f8b05090232a 100644
--- a/drivers/gpu/drm/radeon/evergreen_cs.c
+++ b/drivers/gpu/drm/radeon/evergreen_cs.c
@@ -27,6 +27,7 @@
*/
#include <drm/drmP.h>
#include "radeon.h"
+#include "radeon_asic.h"
#include "evergreend.h"
#include "evergreen_reg_safe.h"
#include "cayman_reg_safe.h"
diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c
index 103fc8650197..a0d4a0522fdc 100644
--- a/drivers/gpu/drm/radeon/ni.c
+++ b/drivers/gpu/drm/radeon/ni.c
@@ -1396,9 +1396,7 @@ static void cayman_pcie_gart_fini(struct radeon_device *rdev)
void cayman_cp_int_cntl_setup(struct radeon_device *rdev,
int ring, u32 cp_int_cntl)
{
- u32 srbm_gfx_cntl = RREG32(SRBM_GFX_CNTL) & ~3;
-
- WREG32(SRBM_GFX_CNTL, srbm_gfx_cntl | (ring & 3));
+ WREG32(SRBM_GFX_CNTL, RINGID(ring));
WREG32(CP_INT_CNTL, cp_int_cntl);
}
diff --git a/drivers/gpu/drm/radeon/r600_cs.c b/drivers/gpu/drm/radeon/r600_cs.c
index b69c8de35bd3..595a19736458 100644
--- a/drivers/gpu/drm/radeon/r600_cs.c
+++ b/drivers/gpu/drm/radeon/r600_cs.c
@@ -28,6 +28,7 @@
#include <linux/kernel.h>
#include <drm/drmP.h>
#include "radeon.h"
+#include "radeon_asic.h"
#include "r600d.h"
#include "r600_reg_safe.h"
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index 1b0dcad916b0..44e0c5ed6418 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -66,7 +66,7 @@
#include <linux/kref.h>
#include <linux/interval_tree.h>
#include <linux/hashtable.h>
-#include <linux/fence.h>
+#include <linux/dma-fence.h>
#include <ttm/ttm_bo_api.h>
#include <ttm/ttm_bo_driver.h>
@@ -367,7 +367,7 @@ struct radeon_fence_driver {
};
struct radeon_fence {
- struct fence base;
+ struct dma_fence base;
struct radeon_device *rdev;
uint64_t seq;
@@ -746,7 +746,7 @@ struct radeon_flip_work {
uint64_t base;
struct drm_pending_vblank_event *event;
struct radeon_bo *old_rbo;
- struct fence *fence;
+ struct dma_fence *fence;
bool async;
};
@@ -2514,9 +2514,9 @@ void cik_mm_wdoorbell(struct radeon_device *rdev, u32 index, u32 v);
/*
* Cast helper
*/
-extern const struct fence_ops radeon_fence_ops;
+extern const struct dma_fence_ops radeon_fence_ops;
-static inline struct radeon_fence *to_radeon_fence(struct fence *f)
+static inline struct radeon_fence *to_radeon_fence(struct dma_fence *f)
{
struct radeon_fence *__f = container_of(f, struct radeon_fence, base);
diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c
index 5df3ec73021b..4134759a6823 100644
--- a/drivers/gpu/drm/radeon/radeon_atombios.c
+++ b/drivers/gpu/drm/radeon/radeon_atombios.c
@@ -29,6 +29,7 @@
#include "atom.h"
#include "atom-bits.h"
+#include "radeon_asic.h"
extern void
radeon_add_atom_encoder(struct drm_device *dev, uint32_t encoder_enum,
diff --git a/drivers/gpu/drm/radeon/radeon_atpx_handler.c b/drivers/gpu/drm/radeon/radeon_atpx_handler.c
index 2fdcd04bc93f..0ae13cd2adda 100644
--- a/drivers/gpu/drm/radeon/radeon_atpx_handler.c
+++ b/drivers/gpu/drm/radeon/radeon_atpx_handler.c
@@ -34,6 +34,7 @@ struct radeon_atpx {
static struct radeon_atpx_priv {
bool atpx_detected;
+ bool bridge_pm_usable;
/* handle for device - and atpx */
acpi_handle dhandle;
struct radeon_atpx atpx;
@@ -203,7 +204,11 @@ static int radeon_atpx_validate(struct radeon_atpx *atpx)
atpx->is_hybrid = false;
if (valid_bits & ATPX_MS_HYBRID_GFX_SUPPORTED) {
printk("ATPX Hybrid Graphics\n");
- atpx->functions.power_cntl = false;
+ /*
+ * Disable legacy PM methods only when pcie port PM is usable,
+ * otherwise the device might fail to power off or power on.
+ */
+ atpx->functions.power_cntl = !radeon_atpx_priv.bridge_pm_usable;
atpx->is_hybrid = true;
}
@@ -548,11 +553,16 @@ static bool radeon_atpx_detect(void)
struct pci_dev *pdev = NULL;
bool has_atpx = false;
int vga_count = 0;
+ bool d3_supported = false;
+ struct pci_dev *parent_pdev;
while ((pdev = pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, pdev)) != NULL) {
vga_count++;
has_atpx |= (radeon_atpx_pci_probe_handle(pdev) == true);
+
+ parent_pdev = pci_upstream_bridge(pdev);
+ d3_supported |= parent_pdev && parent_pdev->bridge_d3;
}
/* some newer PX laptops mark the dGPU as a non-VGA display device */
@@ -560,6 +570,9 @@ static bool radeon_atpx_detect(void)
vga_count++;
has_atpx |= (radeon_atpx_pci_probe_handle(pdev) == true);
+
+ parent_pdev = pci_upstream_bridge(pdev);
+ d3_supported |= parent_pdev && parent_pdev->bridge_d3;
}
if (has_atpx && vga_count == 2) {
@@ -567,6 +580,7 @@ static bool radeon_atpx_detect(void)
printk(KERN_INFO "vga_switcheroo: detected switching method %s handle\n",
acpi_method_name);
radeon_atpx_priv.atpx_detected = true;
+ radeon_atpx_priv.bridge_pm_usable = d3_supported;
radeon_atpx_init();
return true;
}
diff --git a/drivers/gpu/drm/radeon/radeon_clocks.c b/drivers/gpu/drm/radeon/radeon_clocks.c
index 38e396dae0a9..c1135feb93c1 100644
--- a/drivers/gpu/drm/radeon/radeon_clocks.c
+++ b/drivers/gpu/drm/radeon/radeon_clocks.c
@@ -29,6 +29,7 @@
#include <drm/radeon_drm.h>
#include "radeon_reg.h"
#include "radeon.h"
+#include "radeon_asic.h"
#include "atom.h"
/* 10 khz */
diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
index e18839d52e3e..27affbde058c 100644
--- a/drivers/gpu/drm/radeon/radeon_connectors.c
+++ b/drivers/gpu/drm/radeon/radeon_connectors.c
@@ -931,7 +931,7 @@ static void radeon_connector_unregister(struct drm_connector *connector)
{
struct radeon_connector *radeon_connector = to_radeon_connector(connector);
- if (radeon_connector->ddc_bus->has_aux) {
+ if (radeon_connector->ddc_bus && radeon_connector->ddc_bus->has_aux) {
drm_dp_aux_unregister(&radeon_connector->ddc_bus->aux);
radeon_connector->ddc_bus->has_aux = false;
}
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index eb92aef46e3c..60a8920fa0b9 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -104,6 +104,14 @@ static const char radeon_family_name[][16] = {
"LAST",
};
+#if defined(CONFIG_VGA_SWITCHEROO)
+bool radeon_has_atpx_dgpu_power_cntl(void);
+bool radeon_is_atpx_hybrid(void);
+#else
+static inline bool radeon_has_atpx_dgpu_power_cntl(void) { return false; }
+static inline bool radeon_is_atpx_hybrid(void) { return false; }
+#endif
+
#define RADEON_PX_QUIRK_DISABLE_PX (1 << 0)
#define RADEON_PX_QUIRK_LONG_WAKEUP (1 << 1)
@@ -160,6 +168,11 @@ static void radeon_device_handle_px_quirks(struct radeon_device *rdev)
if (rdev->px_quirk_flags & RADEON_PX_QUIRK_DISABLE_PX)
rdev->flags &= ~RADEON_IS_PX;
+
+ /* disable PX is the system doesn't support dGPU power control or hybrid gfx */
+ if (!radeon_is_atpx_hybrid() &&
+ !radeon_has_atpx_dgpu_power_cntl())
+ rdev->flags &= ~RADEON_IS_PX;
}
/**
@@ -1320,7 +1333,7 @@ int radeon_device_init(struct radeon_device *rdev,
for (i = 0; i < RADEON_NUM_RINGS; i++) {
rdev->ring[i].idx = i;
}
- rdev->fence_context = fence_context_alloc(RADEON_NUM_RINGS);
+ rdev->fence_context = dma_fence_context_alloc(RADEON_NUM_RINGS);
DRM_INFO("initializing kernel modesetting (%s 0x%04X:0x%04X 0x%04X:0x%04X 0x%02X).\n",
radeon_family_name[rdev->family], pdev->vendor, pdev->device,
@@ -1651,7 +1664,10 @@ int radeon_suspend_kms(struct drm_device *dev, bool suspend,
radeon_suspend(rdev);
radeon_hpd_fini(rdev);
- /* evict remaining vram memory */
+ /* evict remaining vram memory
+ * This second call to evict vram is to evict the gart page table
+ * using the CPU.
+ */
radeon_bo_evict_vram(rdev);
radeon_agp_suspend(rdev);
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
index cdb8cb568c15..e7409e8a9f87 100644
--- a/drivers/gpu/drm/radeon/radeon_display.c
+++ b/drivers/gpu/drm/radeon/radeon_display.c
@@ -437,7 +437,7 @@ static void radeon_flip_work_func(struct work_struct *__work)
down_read(&rdev->exclusive_lock);
}
} else
- r = fence_wait(work->fence, false);
+ r = dma_fence_wait(work->fence, false);
if (r)
DRM_ERROR("failed to wait on page flip fence (%d)!\n", r);
@@ -447,7 +447,7 @@ static void radeon_flip_work_func(struct work_struct *__work)
* confused about which BO the CRTC is scanning out
*/
- fence_put(work->fence);
+ dma_fence_put(work->fence);
work->fence = NULL;
}
@@ -542,7 +542,7 @@ static int radeon_crtc_page_flip_target(struct drm_crtc *crtc,
DRM_ERROR("failed to pin new rbo buffer before flip\n");
goto cleanup;
}
- work->fence = fence_get(reservation_object_get_excl(new_rbo->tbo.resv));
+ work->fence = dma_fence_get(reservation_object_get_excl(new_rbo->tbo.resv));
radeon_bo_get_tiling_flags(new_rbo, &tiling_flags, NULL);
radeon_bo_unreserve(new_rbo);
@@ -617,7 +617,7 @@ pflip_cleanup:
cleanup:
drm_gem_object_unreference_unlocked(&work->old_rbo->gem_base);
- fence_put(work->fence);
+ dma_fence_put(work->fence);
kfree(work);
return r;
}
diff --git a/drivers/gpu/drm/radeon/radeon_dp_auxch.c b/drivers/gpu/drm/radeon/radeon_dp_auxch.c
index 2d465648856a..474a8a1886f7 100644
--- a/drivers/gpu/drm/radeon/radeon_dp_auxch.c
+++ b/drivers/gpu/drm/radeon/radeon_dp_auxch.c
@@ -105,7 +105,7 @@ radeon_dp_aux_transfer_native(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg
tmp &= AUX_HPD_SEL(0x7);
tmp |= AUX_HPD_SEL(chan->rec.hpd);
- tmp |= AUX_EN | AUX_LS_READ_EN | AUX_HPD_DISCON(0x1);
+ tmp |= AUX_EN | AUX_LS_READ_EN;
WREG32(AUX_CONTROL + aux_offset[instance], tmp);
diff --git a/drivers/gpu/drm/radeon/radeon_dp_mst.c b/drivers/gpu/drm/radeon/radeon_dp_mst.c
index de504ea29c06..6d1237d6e1b8 100644
--- a/drivers/gpu/drm/radeon/radeon_dp_mst.c
+++ b/drivers/gpu/drm/radeon/radeon_dp_mst.c
@@ -223,7 +223,8 @@ radeon_dp_mst_mode_valid(struct drm_connector *connector,
return MODE_OK;
}
-struct drm_encoder *radeon_mst_best_encoder(struct drm_connector *connector)
+static struct
+drm_encoder *radeon_mst_best_encoder(struct drm_connector *connector)
{
struct radeon_connector *radeon_connector = to_radeon_connector(connector);
@@ -341,7 +342,8 @@ const struct drm_dp_mst_topology_cbs mst_cbs = {
.hotplug = radeon_dp_mst_hotplug,
};
-struct radeon_connector *radeon_mst_find_connector(struct drm_encoder *encoder)
+static struct
+radeon_connector *radeon_mst_find_connector(struct drm_encoder *encoder)
{
struct drm_device *dev = encoder->dev;
struct drm_connector *connector;
@@ -597,7 +599,7 @@ static const struct drm_encoder_helper_funcs radeon_mst_helper_funcs = {
.commit = radeon_mst_encoder_commit,
};
-void radeon_dp_mst_encoder_destroy(struct drm_encoder *encoder)
+static void radeon_dp_mst_encoder_destroy(struct drm_encoder *encoder)
{
drm_encoder_cleanup(encoder);
kfree(encoder);
diff --git a/drivers/gpu/drm/radeon/radeon_fb.c b/drivers/gpu/drm/radeon/radeon_fb.c
index 0daad446d2c7..899b6a1644bd 100644
--- a/drivers/gpu/drm/radeon/radeon_fb.c
+++ b/drivers/gpu/drm/radeon/radeon_fb.c
@@ -74,28 +74,22 @@ radeonfb_release(struct fb_info *info, int user)
static struct fb_ops radeonfb_ops = {
.owner = THIS_MODULE,
+ DRM_FB_HELPER_DEFAULT_OPS,
.fb_open = radeonfb_open,
.fb_release = radeonfb_release,
- .fb_check_var = drm_fb_helper_check_var,
- .fb_set_par = drm_fb_helper_set_par,
.fb_fillrect = drm_fb_helper_cfb_fillrect,
.fb_copyarea = drm_fb_helper_cfb_copyarea,
.fb_imageblit = drm_fb_helper_cfb_imageblit,
- .fb_pan_display = drm_fb_helper_pan_display,
- .fb_blank = drm_fb_helper_blank,
- .fb_setcmap = drm_fb_helper_setcmap,
- .fb_debug_enter = drm_fb_helper_debug_enter,
- .fb_debug_leave = drm_fb_helper_debug_leave,
};
-int radeon_align_pitch(struct radeon_device *rdev, int width, int bpp, bool tiled)
+int radeon_align_pitch(struct radeon_device *rdev, int width, int cpp, bool tiled)
{
int aligned = width;
int align_large = (ASIC_IS_AVIVO(rdev)) || tiled;
int pitch_mask = 0;
- switch (bpp / 8) {
+ switch (cpp) {
case 1:
pitch_mask = align_large ? 255 : 127;
break;
@@ -110,7 +104,7 @@ int radeon_align_pitch(struct radeon_device *rdev, int width, int bpp, bool tile
aligned += pitch_mask;
aligned &= ~pitch_mask;
- return aligned;
+ return aligned * cpp;
}
static void radeonfb_destroy_pinned_object(struct drm_gem_object *gobj)
@@ -139,13 +133,13 @@ static int radeonfb_create_pinned_object(struct radeon_fbdev *rfbdev,
int ret;
int aligned_size, size;
int height = mode_cmd->height;
- u32 bpp, depth;
+ u32 cpp;
- drm_fb_get_bpp_depth(mode_cmd->pixel_format, &depth, &bpp);
+ cpp = drm_format_plane_cpp(mode_cmd->pixel_format, 0);
/* need to align pitch with crtc limits */
- mode_cmd->pitches[0] = radeon_align_pitch(rdev, mode_cmd->width, bpp,
- fb_tiled) * ((bpp + 1) / 8);
+ mode_cmd->pitches[0] = radeon_align_pitch(rdev, mode_cmd->width, cpp,
+ fb_tiled);
if (rdev->family >= CHIP_R600)
height = ALIGN(mode_cmd->height, 8);
@@ -165,11 +159,11 @@ static int radeonfb_create_pinned_object(struct radeon_fbdev *rfbdev,
tiling_flags = RADEON_TILING_MACRO;
#ifdef __BIG_ENDIAN
- switch (bpp) {
- case 32:
+ switch (cpp) {
+ case 4:
tiling_flags |= RADEON_TILING_SWAP_32BIT;
break;
- case 16:
+ case 2:
tiling_flags |= RADEON_TILING_SWAP_16BIT;
default:
break;
diff --git a/drivers/gpu/drm/radeon/radeon_fence.c b/drivers/gpu/drm/radeon/radeon_fence.c
index 7ef075acde9c..ef09f0a63754 100644
--- a/drivers/gpu/drm/radeon/radeon_fence.c
+++ b/drivers/gpu/drm/radeon/radeon_fence.c
@@ -141,8 +141,10 @@ int radeon_fence_emit(struct radeon_device *rdev,
(*fence)->seq = seq = ++rdev->fence_drv[ring].sync_seq[ring];
(*fence)->ring = ring;
(*fence)->is_vm_update = false;
- fence_init(&(*fence)->base, &radeon_fence_ops,
- &rdev->fence_queue.lock, rdev->fence_context + ring, seq);
+ dma_fence_init(&(*fence)->base, &radeon_fence_ops,
+ &rdev->fence_queue.lock,
+ rdev->fence_context + ring,
+ seq);
radeon_fence_ring_emit(rdev, ring, *fence);
trace_radeon_fence_emit(rdev->ddev, ring, (*fence)->seq);
radeon_fence_schedule_check(rdev, ring);
@@ -169,18 +171,18 @@ static int radeon_fence_check_signaled(wait_queue_t *wait, unsigned mode, int fl
*/
seq = atomic64_read(&fence->rdev->fence_drv[fence->ring].last_seq);
if (seq >= fence->seq) {
- int ret = fence_signal_locked(&fence->base);
+ int ret = dma_fence_signal_locked(&fence->base);
if (!ret)
- FENCE_TRACE(&fence->base, "signaled from irq context\n");
+ DMA_FENCE_TRACE(&fence->base, "signaled from irq context\n");
else
- FENCE_TRACE(&fence->base, "was already signaled\n");
+ DMA_FENCE_TRACE(&fence->base, "was already signaled\n");
radeon_irq_kms_sw_irq_put(fence->rdev, fence->ring);
__remove_wait_queue(&fence->rdev->fence_queue, &fence->fence_wake);
- fence_put(&fence->base);
+ dma_fence_put(&fence->base);
} else
- FENCE_TRACE(&fence->base, "pending\n");
+ DMA_FENCE_TRACE(&fence->base, "pending\n");
return 0;
}
@@ -351,7 +353,7 @@ static bool radeon_fence_seq_signaled(struct radeon_device *rdev,
return false;
}
-static bool radeon_fence_is_signaled(struct fence *f)
+static bool radeon_fence_is_signaled(struct dma_fence *f)
{
struct radeon_fence *fence = to_radeon_fence(f);
struct radeon_device *rdev = fence->rdev;
@@ -381,7 +383,7 @@ static bool radeon_fence_is_signaled(struct fence *f)
* to fence_queue that checks if this fence is signaled, and if so it
* signals the fence and removes itself.
*/
-static bool radeon_fence_enable_signaling(struct fence *f)
+static bool radeon_fence_enable_signaling(struct dma_fence *f)
{
struct radeon_fence *fence = to_radeon_fence(f);
struct radeon_device *rdev = fence->rdev;
@@ -414,9 +416,9 @@ static bool radeon_fence_enable_signaling(struct fence *f)
fence->fence_wake.private = NULL;
fence->fence_wake.func = radeon_fence_check_signaled;
__add_wait_queue(&rdev->fence_queue, &fence->fence_wake);
- fence_get(f);
+ dma_fence_get(f);
- FENCE_TRACE(&fence->base, "armed on ring %i!\n", fence->ring);
+ DMA_FENCE_TRACE(&fence->base, "armed on ring %i!\n", fence->ring);
return true;
}
@@ -436,9 +438,9 @@ bool radeon_fence_signaled(struct radeon_fence *fence)
if (radeon_fence_seq_signaled(fence->rdev, fence->seq, fence->ring)) {
int ret;
- ret = fence_signal(&fence->base);
+ ret = dma_fence_signal(&fence->base);
if (!ret)
- FENCE_TRACE(&fence->base, "signaled from radeon_fence_signaled\n");
+ DMA_FENCE_TRACE(&fence->base, "signaled from radeon_fence_signaled\n");
return true;
}
return false;
@@ -552,7 +554,7 @@ long radeon_fence_wait_timeout(struct radeon_fence *fence, bool intr, long timeo
* exclusive_lock is not held in that case.
*/
if (WARN_ON_ONCE(!to_radeon_fence(&fence->base)))
- return fence_wait(&fence->base, intr);
+ return dma_fence_wait(&fence->base, intr);
seq[fence->ring] = fence->seq;
r = radeon_fence_wait_seq_timeout(fence->rdev, seq, intr, timeout);
@@ -560,9 +562,9 @@ long radeon_fence_wait_timeout(struct radeon_fence *fence, bool intr, long timeo
return r;
}
- r_sig = fence_signal(&fence->base);
+ r_sig = dma_fence_signal(&fence->base);
if (!r_sig)
- FENCE_TRACE(&fence->base, "signaled from fence_wait\n");
+ DMA_FENCE_TRACE(&fence->base, "signaled from fence_wait\n");
return r;
}
@@ -697,7 +699,7 @@ int radeon_fence_wait_empty(struct radeon_device *rdev, int ring)
*/
struct radeon_fence *radeon_fence_ref(struct radeon_fence *fence)
{
- fence_get(&fence->base);
+ dma_fence_get(&fence->base);
return fence;
}
@@ -714,7 +716,7 @@ void radeon_fence_unref(struct radeon_fence **fence)
*fence = NULL;
if (tmp) {
- fence_put(&tmp->base);
+ dma_fence_put(&tmp->base);
}
}
@@ -1028,12 +1030,12 @@ int radeon_debugfs_fence_init(struct radeon_device *rdev)
#endif
}
-static const char *radeon_fence_get_driver_name(struct fence *fence)
+static const char *radeon_fence_get_driver_name(struct dma_fence *fence)
{
return "radeon";
}
-static const char *radeon_fence_get_timeline_name(struct fence *f)
+static const char *radeon_fence_get_timeline_name(struct dma_fence *f)
{
struct radeon_fence *fence = to_radeon_fence(f);
switch (fence->ring) {
@@ -1051,16 +1053,16 @@ static const char *radeon_fence_get_timeline_name(struct fence *f)
static inline bool radeon_test_signaled(struct radeon_fence *fence)
{
- return test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->base.flags);
+ return test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->base.flags);
}
struct radeon_wait_cb {
- struct fence_cb base;
+ struct dma_fence_cb base;
struct task_struct *task;
};
static void
-radeon_fence_wait_cb(struct fence *fence, struct fence_cb *cb)
+radeon_fence_wait_cb(struct dma_fence *fence, struct dma_fence_cb *cb)
{
struct radeon_wait_cb *wait =
container_of(cb, struct radeon_wait_cb, base);
@@ -1068,7 +1070,7 @@ radeon_fence_wait_cb(struct fence *fence, struct fence_cb *cb)
wake_up_process(wait->task);
}
-static signed long radeon_fence_default_wait(struct fence *f, bool intr,
+static signed long radeon_fence_default_wait(struct dma_fence *f, bool intr,
signed long t)
{
struct radeon_fence *fence = to_radeon_fence(f);
@@ -1077,7 +1079,7 @@ static signed long radeon_fence_default_wait(struct fence *f, bool intr,
cb.task = current;
- if (fence_add_callback(f, &cb.base, radeon_fence_wait_cb))
+ if (dma_fence_add_callback(f, &cb.base, radeon_fence_wait_cb))
return t;
while (t > 0) {
@@ -1105,12 +1107,12 @@ static signed long radeon_fence_default_wait(struct fence *f, bool intr,
}
__set_current_state(TASK_RUNNING);
- fence_remove_callback(f, &cb.base);
+ dma_fence_remove_callback(f, &cb.base);
return t;
}
-const struct fence_ops radeon_fence_ops = {
+const struct dma_fence_ops radeon_fence_ops = {
.get_driver_name = radeon_fence_get_driver_name,
.get_timeline_name = radeon_fence_get_timeline_name,
.enable_signaling = radeon_fence_enable_signaling,
diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c
index deb9511725c9..0bcffd8a7bd3 100644
--- a/drivers/gpu/drm/radeon/radeon_gem.c
+++ b/drivers/gpu/drm/radeon/radeon_gem.c
@@ -745,7 +745,8 @@ int radeon_mode_dumb_create(struct drm_file *file_priv,
uint32_t handle;
int r;
- args->pitch = radeon_align_pitch(rdev, args->width, args->bpp, 0) * ((args->bpp + 1) / 8);
+ args->pitch = radeon_align_pitch(rdev, args->width,
+ DIV_ROUND_UP(args->bpp, 8), 0);
args->size = args->pitch * args->height;
args->size = ALIGN(args->size, PAGE_SIZE);
diff --git a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
index 868c3ba2efaa..222a1fa41d7c 100644
--- a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
+++ b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
@@ -27,6 +27,7 @@
#include <drm/drm_crtc_helper.h>
#include <drm/radeon_drm.h>
#include "radeon.h"
+#include "radeon_asic.h"
#include "atom.h"
#include <linux/backlight.h>
#ifdef CONFIG_PMAC_BACKLIGHT
diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c
index 4b6542538ff9..326ad068c15a 100644
--- a/drivers/gpu/drm/radeon/radeon_pm.c
+++ b/drivers/gpu/drm/radeon/radeon_pm.c
@@ -47,6 +47,7 @@ static bool radeon_pm_in_vbl(struct radeon_device *rdev);
static bool radeon_pm_debug_check_in_vbl(struct radeon_device *rdev, bool finish);
static void radeon_pm_update_profile(struct radeon_device *rdev);
static void radeon_pm_set_clocks(struct radeon_device *rdev);
+static void radeon_pm_compute_clocks_dpm(struct radeon_device *rdev);
int radeon_pm_get_type_index(struct radeon_device *rdev,
enum radeon_pm_state_type ps_type,
@@ -79,6 +80,8 @@ void radeon_pm_acpi_event_handler(struct radeon_device *rdev)
radeon_dpm_enable_bapm(rdev, rdev->pm.dpm.ac_power);
}
mutex_unlock(&rdev->pm.mutex);
+ /* allow new DPM state to be picked */
+ radeon_pm_compute_clocks_dpm(rdev);
} else if (rdev->pm.pm_method == PM_METHOD_PROFILE) {
if (rdev->pm.profile == PM_PROFILE_AUTO) {
mutex_lock(&rdev->pm.mutex);
@@ -882,7 +885,8 @@ static struct radeon_ps *radeon_dpm_pick_power_state(struct radeon_device *rdev,
dpm_state = POWER_STATE_TYPE_INTERNAL_3DPERF;
/* balanced states don't exist at the moment */
if (dpm_state == POWER_STATE_TYPE_BALANCED)
- dpm_state = POWER_STATE_TYPE_PERFORMANCE;
+ dpm_state = rdev->pm.dpm.ac_power ?
+ POWER_STATE_TYPE_PERFORMANCE : POWER_STATE_TYPE_BATTERY;
restart_search:
/* Pick the best power state based on current conditions */
diff --git a/drivers/gpu/drm/radeon/radeon_sync.c b/drivers/gpu/drm/radeon/radeon_sync.c
index 02ac8a1de4ff..be5d7a38d3aa 100644
--- a/drivers/gpu/drm/radeon/radeon_sync.c
+++ b/drivers/gpu/drm/radeon/radeon_sync.c
@@ -92,7 +92,7 @@ int radeon_sync_resv(struct radeon_device *rdev,
bool shared)
{
struct reservation_object_list *flist;
- struct fence *f;
+ struct dma_fence *f;
struct radeon_fence *fence;
unsigned i;
int r = 0;
@@ -103,7 +103,7 @@ int radeon_sync_resv(struct radeon_device *rdev,
if (fence && fence->rdev == rdev)
radeon_sync_fence(sync, fence);
else if (f)
- r = fence_wait(f, true);
+ r = dma_fence_wait(f, true);
flist = reservation_object_get_list(resv);
if (shared || !flist || r)
@@ -116,7 +116,7 @@ int radeon_sync_resv(struct radeon_device *rdev,
if (fence && fence->rdev == rdev)
radeon_sync_fence(sync, fence);
else
- r = fence_wait(f, true);
+ r = dma_fence_wait(f, true);
if (r)
break;
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
index 3de5e6e21662..0cf03ccbf0a7 100644
--- a/drivers/gpu/drm/radeon/radeon_ttm.c
+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
@@ -863,6 +863,7 @@ static struct ttm_bo_driver radeon_bo_driver = {
.ttm_tt_unpopulate = &radeon_ttm_tt_unpopulate,
.invalidate_caches = &radeon_invalidate_caches,
.init_mem_type = &radeon_init_mem_type,
+ .eviction_valuable = ttm_bo_eviction_valuable,
.evict_flags = &radeon_evict_flags,
.move = &radeon_bo_move,
.verify_access = &radeon_verify_access,
diff --git a/drivers/gpu/drm/radeon/radeon_uvd.c b/drivers/gpu/drm/radeon/radeon_uvd.c
index 0cd0e7bdee55..d34d1cf33895 100644
--- a/drivers/gpu/drm/radeon/radeon_uvd.c
+++ b/drivers/gpu/drm/radeon/radeon_uvd.c
@@ -467,7 +467,7 @@ static int radeon_uvd_cs_msg(struct radeon_cs_parser *p, struct radeon_bo *bo,
{
int32_t *msg, msg_type, handle;
unsigned img_size = 0;
- struct fence *f;
+ struct dma_fence *f;
void *ptr;
int i, r;
diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c
index e402be8821c4..143280dc0851 100644
--- a/drivers/gpu/drm/radeon/si.c
+++ b/drivers/gpu/drm/radeon/si.c
@@ -7858,7 +7858,7 @@ static void si_program_aspm(struct radeon_device *rdev)
}
}
-int si_vce_send_vcepll_ctlreq(struct radeon_device *rdev)
+static int si_vce_send_vcepll_ctlreq(struct radeon_device *rdev)
{
unsigned i;
diff --git a/drivers/gpu/drm/radeon/si_dpm.c b/drivers/gpu/drm/radeon/si_dpm.c
index 89bdf20344ae..c49934527a87 100644
--- a/drivers/gpu/drm/radeon/si_dpm.c
+++ b/drivers/gpu/drm/radeon/si_dpm.c
@@ -2999,6 +2999,49 @@ static void si_apply_state_adjust_rules(struct radeon_device *rdev,
int i;
struct si_dpm_quirk *p = si_dpm_quirk_list;
+ /* limit all SI kickers */
+ if (rdev->family == CHIP_PITCAIRN) {
+ if ((rdev->pdev->revision == 0x81) ||
+ (rdev->pdev->device == 0x6810) ||
+ (rdev->pdev->device == 0x6811) ||
+ (rdev->pdev->device == 0x6816) ||
+ (rdev->pdev->device == 0x6817) ||
+ (rdev->pdev->device == 0x6806))
+ max_mclk = 120000;
+ } else if (rdev->family == CHIP_VERDE) {
+ if ((rdev->pdev->revision == 0x81) ||
+ (rdev->pdev->revision == 0x83) ||
+ (rdev->pdev->revision == 0x87) ||
+ (rdev->pdev->device == 0x6820) ||
+ (rdev->pdev->device == 0x6821) ||
+ (rdev->pdev->device == 0x6822) ||
+ (rdev->pdev->device == 0x6823) ||
+ (rdev->pdev->device == 0x682A) ||
+ (rdev->pdev->device == 0x682B)) {
+ max_sclk = 75000;
+ max_mclk = 80000;
+ }
+ } else if (rdev->family == CHIP_OLAND) {
+ if ((rdev->pdev->revision == 0xC7) ||
+ (rdev->pdev->revision == 0x80) ||
+ (rdev->pdev->revision == 0x81) ||
+ (rdev->pdev->revision == 0x83) ||
+ (rdev->pdev->device == 0x6604) ||
+ (rdev->pdev->device == 0x6605)) {
+ max_sclk = 75000;
+ max_mclk = 80000;
+ }
+ } else if (rdev->family == CHIP_HAINAN) {
+ if ((rdev->pdev->revision == 0x81) ||
+ (rdev->pdev->revision == 0x83) ||
+ (rdev->pdev->revision == 0xC3) ||
+ (rdev->pdev->device == 0x6664) ||
+ (rdev->pdev->device == 0x6665) ||
+ (rdev->pdev->device == 0x6667)) {
+ max_sclk = 75000;
+ max_mclk = 80000;
+ }
+ }
/* Apply dpm quirks */
while (p && p->chip_device != 0) {
if (rdev->pdev->vendor == p->chip_vendor &&
@@ -3011,16 +3054,6 @@ static void si_apply_state_adjust_rules(struct radeon_device *rdev,
}
++p;
}
- /* limit mclk on all R7 370 parts for stability */
- if (rdev->pdev->device == 0x6811 &&
- rdev->pdev->revision == 0x81)
- max_mclk = 120000;
- /* limit sclk/mclk on Jet parts for stability */
- if (rdev->pdev->device == 0x6665 &&
- rdev->pdev->revision == 0xc3) {
- max_sclk = 75000;
- max_mclk = 80000;
- }
if (rps->vce_active) {
rps->evclk = rdev->pm.dpm.vce_states[rdev->pm.dpm.vce_level].evclk;
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_crtc.c b/drivers/gpu/drm/rcar-du/rcar_du_crtc.c
index 7316fc7fa0bd..a2ec6d8796a0 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_crtc.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_crtc.c
@@ -149,8 +149,8 @@ static void rcar_du_crtc_set_display_timing(struct rcar_du_crtc *rcrtc)
rcar_du_group_write(rcrtc->group, rcrtc->index % 2 ? OTAR2 : OTAR, 0);
/* Signal polarities */
- value = ((mode->flags & DRM_MODE_FLAG_PVSYNC) ? 0 : DSMR_VSL)
- | ((mode->flags & DRM_MODE_FLAG_PHSYNC) ? 0 : DSMR_HSL)
+ value = ((mode->flags & DRM_MODE_FLAG_PVSYNC) ? DSMR_VSL : 0)
+ | ((mode->flags & DRM_MODE_FLAG_PHSYNC) ? DSMR_HSL : 0)
| DSMR_DIPM_DISP | DSMR_CSPM;
rcar_du_crtc_write(rcrtc, DSMR, value);
@@ -172,7 +172,7 @@ static void rcar_du_crtc_set_display_timing(struct rcar_du_crtc *rcrtc)
mode->crtc_vsync_start - 1);
rcar_du_crtc_write(rcrtc, VCR, mode->crtc_vtotal - 1);
- rcar_du_crtc_write(rcrtc, DESR, mode->htotal - mode->hsync_start);
+ rcar_du_crtc_write(rcrtc, DESR, mode->htotal - mode->hsync_start - 1);
rcar_du_crtc_write(rcrtc, DEWR, mode->hdisplay);
}
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_drv.c b/drivers/gpu/drm/rcar-du/rcar_du_drv.c
index 73c971e39b1c..c05e00872778 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_drv.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_drv.c
@@ -110,6 +110,27 @@ static const struct rcar_du_device_info rcar_du_r8a7791_info = {
.num_lvds = 1,
};
+static const struct rcar_du_device_info rcar_du_r8a7792_info = {
+ .gen = 2,
+ .features = RCAR_DU_FEATURE_CRTC_IRQ_CLOCK
+ | RCAR_DU_FEATURE_EXT_CTRL_REGS,
+ .num_crtcs = 2,
+ .routes = {
+ /* R8A7792 has two RGB outputs. */
+ [RCAR_DU_OUTPUT_DPAD0] = {
+ .possible_crtcs = BIT(0),
+ .encoder_type = DRM_MODE_ENCODER_NONE,
+ .port = 0,
+ },
+ [RCAR_DU_OUTPUT_DPAD1] = {
+ .possible_crtcs = BIT(1),
+ .encoder_type = DRM_MODE_ENCODER_NONE,
+ .port = 1,
+ },
+ },
+ .num_lvds = 0,
+};
+
static const struct rcar_du_device_info rcar_du_r8a7794_info = {
.gen = 2,
.features = RCAR_DU_FEATURE_CRTC_IRQ_CLOCK
@@ -157,13 +178,39 @@ static const struct rcar_du_device_info rcar_du_r8a7795_info = {
.num_lvds = 1,
};
+static const struct rcar_du_device_info rcar_du_r8a7796_info = {
+ .gen = 3,
+ .features = RCAR_DU_FEATURE_CRTC_IRQ_CLOCK
+ | RCAR_DU_FEATURE_EXT_CTRL_REGS
+ | RCAR_DU_FEATURE_VSP1_SOURCE,
+ .num_crtcs = 3,
+ .routes = {
+ /* R8A7796 has one RGB output, one LVDS output and one
+ * (currently unsupported) HDMI output.
+ */
+ [RCAR_DU_OUTPUT_DPAD0] = {
+ .possible_crtcs = BIT(2),
+ .encoder_type = DRM_MODE_ENCODER_NONE,
+ .port = 0,
+ },
+ [RCAR_DU_OUTPUT_LVDS0] = {
+ .possible_crtcs = BIT(0),
+ .encoder_type = DRM_MODE_ENCODER_LVDS,
+ .port = 2,
+ },
+ },
+ .num_lvds = 1,
+};
+
static const struct of_device_id rcar_du_of_table[] = {
{ .compatible = "renesas,du-r8a7779", .data = &rcar_du_r8a7779_info },
{ .compatible = "renesas,du-r8a7790", .data = &rcar_du_r8a7790_info },
{ .compatible = "renesas,du-r8a7791", .data = &rcar_du_r8a7791_info },
+ { .compatible = "renesas,du-r8a7792", .data = &rcar_du_r8a7792_info },
{ .compatible = "renesas,du-r8a7793", .data = &rcar_du_r8a7791_info },
{ .compatible = "renesas,du-r8a7794", .data = &rcar_du_r8a7794_info },
{ .compatible = "renesas,du-r8a7795", .data = &rcar_du_r8a7795_info },
+ { .compatible = "renesas,du-r8a7796", .data = &rcar_du_r8a7796_info },
{ }
};
@@ -201,9 +248,7 @@ static const struct file_operations rcar_du_fops = {
.open = drm_open,
.release = drm_release,
.unlocked_ioctl = drm_ioctl,
-#ifdef CONFIG_COMPAT
.compat_ioctl = drm_compat_ioctl,
-#endif
.poll = drm_poll,
.read = drm_read,
.llseek = no_llseek,
@@ -285,7 +330,6 @@ static int rcar_du_remove(struct platform_device *pdev)
drm_kms_helper_poll_fini(ddev);
drm_mode_config_cleanup(ddev);
- drm_vblank_cleanup(ddev);
drm_dev_unref(ddev);
@@ -294,18 +338,12 @@ static int rcar_du_remove(struct platform_device *pdev)
static int rcar_du_probe(struct platform_device *pdev)
{
- struct device_node *np = pdev->dev.of_node;
struct rcar_du_device *rcdu;
struct drm_device *ddev;
struct resource *mem;
int ret;
- if (np == NULL) {
- dev_err(&pdev->dev, "no device tree node\n");
- return -ENODEV;
- }
-
- /* Allocate and initialize the DRM and R-Car device structures. */
+ /* Allocate and initialize the R-Car device structure. */
rcdu = devm_kzalloc(&pdev->dev, sizeof(*rcdu), GFP_KERNEL);
if (rcdu == NULL)
return -ENOMEM;
@@ -315,31 +353,22 @@ static int rcar_du_probe(struct platform_device *pdev)
rcdu->dev = &pdev->dev;
rcdu->info = of_match_device(rcar_du_of_table, rcdu->dev)->data;
- ddev = drm_dev_alloc(&rcar_du_driver, &pdev->dev);
- if (IS_ERR(ddev))
- return PTR_ERR(ddev);
-
- rcdu->ddev = ddev;
- ddev->dev_private = rcdu;
-
platform_set_drvdata(pdev, rcdu);
/* I/O resources */
mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
rcdu->mmio = devm_ioremap_resource(&pdev->dev, mem);
- if (IS_ERR(rcdu->mmio)) {
- ret = PTR_ERR(rcdu->mmio);
- goto error;
- }
-
- /* Initialize vertical blanking interrupts handling. Start with vblank
- * disabled for all CRTCs.
- */
- ret = drm_vblank_init(ddev, (1 << rcdu->info->num_crtcs) - 1);
- if (ret < 0)
- goto error;
+ if (IS_ERR(rcdu->mmio))
+ return PTR_ERR(rcdu->mmio);
/* DRM/KMS objects */
+ ddev = drm_dev_alloc(&rcar_du_driver, &pdev->dev);
+ if (IS_ERR(ddev))
+ return PTR_ERR(ddev);
+
+ rcdu->ddev = ddev;
+ ddev->dev_private = rcdu;
+
ret = rcar_du_modeset_init(rcdu);
if (ret < 0) {
if (ret != -EPROBE_DEFER)
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_group.c b/drivers/gpu/drm/rcar-du/rcar_du_group.c
index 33b2fc53da3e..64738fca96d0 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_group.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_group.c
@@ -105,16 +105,20 @@ static void rcar_du_group_setup(struct rcar_du_group *rgrp)
if (rcar_du_has(rgrp->dev, RCAR_DU_FEATURE_EXT_CTRL_REGS)) {
rcar_du_group_setup_defr8(rgrp);
- /* Configure input dot clock routing. We currently hardcode the
- * configuration to routing DOTCLKINn to DUn.
+ /*
+ * Configure input dot clock routing. We currently hardcode the
+ * configuration to routing DOTCLKINn to DUn. Register fields
+ * depend on the DU generation, but the resulting value is 0 in
+ * all cases.
+ *
+ * On Gen2 a single register in the first group controls dot
+ * clock selection for all channels, while on Gen3 dot clocks
+ * are setup through per-group registers, only available when
+ * the group has two channels.
*/
- rcar_du_group_write(rgrp, DIDSR, DIDSR_CODE |
- DIDSR_LCDS_DCLKIN(2) |
- DIDSR_LCDS_DCLKIN(1) |
- DIDSR_LCDS_DCLKIN(0) |
- DIDSR_PDCS_CLK(2, 0) |
- DIDSR_PDCS_CLK(1, 0) |
- DIDSR_PDCS_CLK(0, 0));
+ if ((rcdu->info->gen < 3 && rgrp->index == 0) ||
+ (rcdu->info->gen == 3 && rgrp->num_crtcs > 1))
+ rcar_du_group_write(rgrp, DIDSR, DIDSR_CODE);
}
if (rcdu->info->gen >= 3)
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_hdmienc.c b/drivers/gpu/drm/rcar-du/rcar_du_hdmienc.c
index e03004f4588d..f9515f53cc5b 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_hdmienc.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_hdmienc.c
@@ -108,7 +108,7 @@ int rcar_du_hdmienc_init(struct rcar_du_device *rcdu,
if (hdmienc == NULL)
return -ENOMEM;
- /* Locate drm bridge from the hdmi encoder DT node */
+ /* Locate the DRM bridge from the HDMI encoder DT node. */
bridge = of_drm_find_bridge(np);
if (!bridge)
return -EPROBE_DEFER;
@@ -123,7 +123,7 @@ int rcar_du_hdmienc_init(struct rcar_du_device *rcdu,
renc->hdmi = hdmienc;
hdmienc->renc = renc;
- /* Link drm_bridge to encoder */
+ /* Link the bridge to the encoder. */
bridge->encoder = encoder;
encoder->bridge = bridge;
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_kms.c b/drivers/gpu/drm/rcar-du/rcar_du_kms.c
index bd9c3bb9252c..b5d3f16cfa12 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_kms.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_kms.c
@@ -231,8 +231,16 @@ static int rcar_du_atomic_check(struct drm_device *dev,
struct rcar_du_device *rcdu = dev->dev_private;
int ret;
- ret = drm_atomic_helper_check(dev, state);
- if (ret < 0)
+ ret = drm_atomic_helper_check_modeset(dev, state);
+ if (ret)
+ return ret;
+
+ ret = drm_atomic_normalize_zpos(dev, state);
+ if (ret)
+ return ret;
+
+ ret = drm_atomic_helper_check_planes(dev, state);
+ if (ret)
return ret;
if (rcar_du_has(rcdu, RCAR_DU_FEATURE_VSP1_SOURCE))
@@ -264,7 +272,7 @@ static void rcar_du_atomic_complete(struct rcar_du_commit *commit)
drm_atomic_helper_cleanup_planes(dev, old_state);
- drm_atomic_state_free(old_state);
+ drm_atomic_state_put(old_state);
/* Complete the commit, wake up any waiter. */
spin_lock(&rcdu->commit.wait.lock);
@@ -330,6 +338,7 @@ static int rcar_du_atomic_commit(struct drm_device *dev,
/* Swap the state, this is the point of no return. */
drm_atomic_helper_swap_state(state, true);
+ drm_atomic_state_get(state);
if (nonblock)
schedule_work(&commit->work);
else
@@ -445,13 +454,13 @@ static int rcar_du_encoders_init_one(struct rcar_du_device *rcdu,
}
ret = rcar_du_encoder_init(rcdu, enc_type, output, encoder, connector);
- of_node_put(encoder);
- of_node_put(connector);
-
if (ret && ret != -EPROBE_DEFER)
dev_warn(rcdu->dev,
- "failed to initialize encoder %s (%d), skipping\n",
- encoder->full_name, ret);
+ "failed to initialize encoder %s on output %u (%d), skipping\n",
+ of_node_full_name(encoder), output, ret);
+
+ of_node_put(encoder);
+ of_node_put(connector);
return ret;
}
@@ -559,6 +568,13 @@ int rcar_du_modeset_init(struct rcar_du_device *rcdu)
if (ret < 0)
return ret;
+ /* Initialize vertical blanking interrupts handling. Start with vblank
+ * disabled for all CRTCs.
+ */
+ ret = drm_vblank_init(dev, (1 << rcdu->info->num_crtcs) - 1);
+ if (ret < 0)
+ return ret;
+
/* Initialize the groups. */
num_groups = DIV_ROUND_UP(rcdu->num_crtcs, 2);
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_lvdscon.c b/drivers/gpu/drm/rcar-du/rcar_du_lvdscon.c
index 6afd0af312ba..64e9f0b86e58 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_lvdscon.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_lvdscon.c
@@ -79,7 +79,7 @@ static const struct drm_connector_funcs connector_funcs = {
int rcar_du_lvds_connector_init(struct rcar_du_device *rcdu,
struct rcar_du_encoder *renc,
- /* TODO const */ struct device_node *np)
+ const struct device_node *np)
{
struct drm_encoder *encoder = rcar_encoder_to_drm_encoder(renc);
struct rcar_du_lvds_connector *lvdscon;
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_lvdscon.h b/drivers/gpu/drm/rcar-du/rcar_du_lvdscon.h
index d4881ee0be7e..639071dd235c 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_lvdscon.h
+++ b/drivers/gpu/drm/rcar-du/rcar_du_lvdscon.h
@@ -19,6 +19,6 @@ struct rcar_du_encoder;
int rcar_du_lvds_connector_init(struct rcar_du_device *rcdu,
struct rcar_du_encoder *renc,
- struct device_node *np);
+ const struct device_node *np);
#endif /* __RCAR_DU_LVDSCON_H__ */
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_lvdsenc.c b/drivers/gpu/drm/rcar-du/rcar_du_lvdsenc.c
index ef3a50321ecc..e3a4985f6f3f 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_lvdsenc.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_lvdsenc.c
@@ -104,7 +104,14 @@ static void rcar_du_lvdsenc_start_gen3(struct rcar_du_lvdsenc *lvds,
rcar_lvds_write(lvds, LVDPLLCR, pllcr);
- /* Turn the PLL on, set it to LVDS normal mode, wait for the startup
+ /* Turn all the channels on. */
+ rcar_lvds_write(lvds, LVDCR1,
+ LVDCR1_CHSTBY_GEN3(3) | LVDCR1_CHSTBY_GEN3(2) |
+ LVDCR1_CHSTBY_GEN3(1) | LVDCR1_CHSTBY_GEN3(0) |
+ LVDCR1_CLKSTBY_GEN3);
+
+ /*
+ * Turn the PLL on, set it to LVDS normal mode, wait for the startup
* delay and turn the output on.
*/
lvdcr0 = LVDCR0_PLLON;
@@ -117,12 +124,6 @@ static void rcar_du_lvdsenc_start_gen3(struct rcar_du_lvdsenc *lvds,
lvdcr0 |= LVDCR0_LVRES;
rcar_lvds_write(lvds, LVDCR0, lvdcr0);
-
- /* Turn all the channels on. */
- rcar_lvds_write(lvds, LVDCR1,
- LVDCR1_CHSTBY_GEN3(3) | LVDCR1_CHSTBY_GEN3(2) |
- LVDCR1_CHSTBY_GEN3(1) | LVDCR1_CHSTBY_GEN3(0) |
- LVDCR1_CLKSTBY_GEN3);
}
static int rcar_du_lvdsenc_start(struct rcar_du_lvdsenc *lvds,
@@ -241,10 +242,8 @@ int rcar_du_lvdsenc_init(struct rcar_du_device *rcdu)
for (i = 0; i < rcdu->info->num_lvds; ++i) {
lvds = devm_kzalloc(&pdev->dev, sizeof(*lvds), GFP_KERNEL);
- if (lvds == NULL) {
- dev_err(&pdev->dev, "failed to allocate private data\n");
+ if (lvds == NULL)
return -ENOMEM;
- }
lvds->dev = rcdu;
lvds->index = i;
diff --git a/drivers/gpu/drm/rockchip/Kconfig b/drivers/gpu/drm/rockchip/Kconfig
index 3c58669a06ce..6f7f9c59f05b 100644
--- a/drivers/gpu/drm/rockchip/Kconfig
+++ b/drivers/gpu/drm/rockchip/Kconfig
@@ -1,7 +1,6 @@
config DRM_ROCKCHIP
tristate "DRM Support for Rockchip"
depends on DRM && ROCKCHIP_IOMMU
- depends on RESET_CONTROLLER
select DRM_GEM_CMA_HELPER
select DRM_KMS_HELPER
select DRM_PANEL
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_drv.c b/drivers/gpu/drm/rockchip/rockchip_drm_drv.c
index 8c8cbe837e61..2390c8577617 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_drv.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_drv.c
@@ -20,6 +20,7 @@
#include <drm/drm_crtc_helper.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_of.h>
#include <linux/dma-mapping.h>
#include <linux/pm_runtime.h>
#include <linux/module.h>
@@ -274,9 +275,7 @@ static const struct file_operations rockchip_drm_driver_fops = {
.poll = drm_poll,
.read = drm_read,
.unlocked_ioctl = drm_ioctl,
-#ifdef CONFIG_COMPAT
.compat_ioctl = drm_compat_ioctl,
-#endif
.release = drm_release,
};
@@ -388,7 +387,7 @@ static void rockchip_add_endpoints(struct device *dev,
continue;
}
- component_match_add(dev, match, compare_of, remote);
+ drm_of_component_match_add(dev, match, compare_of, remote);
of_node_put(remote);
}
}
@@ -437,7 +436,8 @@ static int rockchip_drm_platform_probe(struct platform_device *pdev)
}
of_node_put(iommu);
- component_match_add(dev, &match, compare_of, port->parent);
+ drm_of_component_match_add(dev, &match, compare_of,
+ port->parent);
of_node_put(port);
}
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_fbdev.c b/drivers/gpu/drm/rockchip/rockchip_drm_fbdev.c
index a16c69f96ed5..8f639c8597a5 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_fbdev.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_fbdev.c
@@ -37,15 +37,11 @@ static int rockchip_fbdev_mmap(struct fb_info *info,
static struct fb_ops rockchip_drm_fbdev_ops = {
.owner = THIS_MODULE,
+ DRM_FB_HELPER_DEFAULT_OPS,
.fb_mmap = rockchip_fbdev_mmap,
.fb_fillrect = drm_fb_helper_cfb_fillrect,
.fb_copyarea = drm_fb_helper_cfb_copyarea,
.fb_imageblit = drm_fb_helper_cfb_imageblit,
- .fb_check_var = drm_fb_helper_check_var,
- .fb_set_par = drm_fb_helper_set_par,
- .fb_blank = drm_fb_helper_blank,
- .fb_pan_display = drm_fb_helper_pan_display,
- .fb_setcmap = drm_fb_helper_setcmap,
};
static int rockchip_drm_fbdev_create(struct drm_fb_helper *helper,
diff --git a/drivers/gpu/drm/savage/savage_drv.c b/drivers/gpu/drm/savage/savage_drv.c
index 3b807135a5cd..78c6d8e9b42c 100644
--- a/drivers/gpu/drm/savage/savage_drv.c
+++ b/drivers/gpu/drm/savage/savage_drv.c
@@ -42,9 +42,7 @@ static const struct file_operations savage_driver_fops = {
.unlocked_ioctl = drm_ioctl,
.mmap = drm_legacy_mmap,
.poll = drm_poll,
-#ifdef CONFIG_COMPAT
.compat_ioctl = drm_compat_ioctl,
-#endif
.llseek = noop_llseek,
};
diff --git a/drivers/gpu/drm/savage/savage_state.c b/drivers/gpu/drm/savage/savage_state.c
index 3dc0d8ff95ec..2db89bed52e8 100644
--- a/drivers/gpu/drm/savage/savage_state.c
+++ b/drivers/gpu/drm/savage/savage_state.c
@@ -1004,6 +1004,7 @@ int savage_bci_cmdbuf(struct drm_device *dev, void *data, struct drm_file *file_
kvb_addr = memdup_user(cmdbuf->vb_addr, cmdbuf->vb_size);
if (IS_ERR(kvb_addr)) {
ret = PTR_ERR(kvb_addr);
+ kvb_addr = NULL;
goto done;
}
cmdbuf->vb_addr = kvb_addr;
diff --git a/drivers/gpu/drm/shmobile/shmob_drm_drv.c b/drivers/gpu/drm/shmobile/shmob_drm_drv.c
index f0492603ea88..38dd55f4af81 100644
--- a/drivers/gpu/drm/shmobile/shmob_drm_drv.c
+++ b/drivers/gpu/drm/shmobile/shmob_drm_drv.c
@@ -245,9 +245,7 @@ static const struct file_operations shmob_drm_fops = {
.open = drm_open,
.release = drm_release,
.unlocked_ioctl = drm_ioctl,
-#ifdef CONFIG_COMPAT
.compat_ioctl = drm_compat_ioctl,
-#endif
.poll = drm_poll,
.read = drm_read,
.llseek = no_llseek,
diff --git a/drivers/gpu/drm/sis/sis_drv.c b/drivers/gpu/drm/sis/sis_drv.c
index ae9839886c4d..a836451920f0 100644
--- a/drivers/gpu/drm/sis/sis_drv.c
+++ b/drivers/gpu/drm/sis/sis_drv.c
@@ -72,9 +72,7 @@ static const struct file_operations sis_driver_fops = {
.unlocked_ioctl = drm_ioctl,
.mmap = drm_legacy_mmap,
.poll = drm_poll,
-#ifdef CONFIG_COMPAT
.compat_ioctl = drm_compat_ioctl,
-#endif
.llseek = noop_llseek,
};
diff --git a/drivers/gpu/drm/sti/sti_drv.c b/drivers/gpu/drm/sti/sti_drv.c
index 2784919a7366..ff71e25ab5bf 100644
--- a/drivers/gpu/drm/sti/sti_drv.c
+++ b/drivers/gpu/drm/sti/sti_drv.c
@@ -17,6 +17,7 @@
#include <drm/drm_crtc_helper.h>
#include <drm/drm_gem_cma_helper.h>
#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_of.h>
#include "sti_crtc.h"
#include "sti_drv.h"
@@ -184,7 +185,7 @@ static void sti_atomic_complete(struct sti_private *private,
drm_atomic_helper_wait_for_vblanks(drm, state);
drm_atomic_helper_cleanup_planes(drm, state);
- drm_atomic_state_free(state);
+ drm_atomic_state_put(state);
}
static void sti_atomic_work(struct work_struct *work)
@@ -195,6 +196,26 @@ static void sti_atomic_work(struct work_struct *work)
sti_atomic_complete(private, private->commit.state);
}
+static int sti_atomic_check(struct drm_device *dev,
+ struct drm_atomic_state *state)
+{
+ int ret;
+
+ ret = drm_atomic_helper_check_modeset(dev, state);
+ if (ret)
+ return ret;
+
+ ret = drm_atomic_normalize_zpos(dev, state);
+ if (ret)
+ return ret;
+
+ ret = drm_atomic_helper_check_planes(dev, state);
+ if (ret)
+ return ret;
+
+ return ret;
+}
+
static int sti_atomic_commit(struct drm_device *drm,
struct drm_atomic_state *state, bool nonblock)
{
@@ -217,6 +238,7 @@ static int sti_atomic_commit(struct drm_device *drm,
drm_atomic_helper_swap_state(state, true);
+ drm_atomic_state_get(state);
if (nonblock)
sti_atomic_schedule(private, state);
else
@@ -248,7 +270,7 @@ static void sti_output_poll_changed(struct drm_device *ddev)
static const struct drm_mode_config_funcs sti_mode_config_funcs = {
.fb_create = drm_fb_cma_create,
.output_poll_changed = sti_output_poll_changed,
- .atomic_check = drm_atomic_helper_check,
+ .atomic_check = sti_atomic_check,
.atomic_commit = sti_atomic_commit,
};
@@ -275,9 +297,7 @@ static const struct file_operations sti_driver_fops = {
.poll = drm_poll,
.read = drm_read,
.unlocked_ioctl = drm_ioctl,
-#ifdef CONFIG_COMPAT
.compat_ioctl = drm_compat_ioctl,
-#endif
.release = drm_release,
};
@@ -423,8 +443,8 @@ static int sti_platform_probe(struct platform_device *pdev)
child_np = of_get_next_available_child(node, NULL);
while (child_np) {
- component_match_add(dev, &match, compare_of, child_np);
- of_node_put(child_np);
+ drm_of_component_match_add(dev, &match, compare_of,
+ child_np);
child_np = of_get_next_available_child(node, child_np);
}
diff --git a/drivers/gpu/drm/sun4i/sun4i_backend.c b/drivers/gpu/drm/sun4i/sun4i_backend.c
index 32c0584e3c35..2e08f969bb64 100644
--- a/drivers/gpu/drm/sun4i/sun4i_backend.c
+++ b/drivers/gpu/drm/sun4i/sun4i_backend.c
@@ -95,6 +95,22 @@ static int sun4i_backend_drm_format_to_layer(struct drm_plane *plane,
*mode = SUN4I_BACKEND_LAY_FBFMT_ARGB8888;
break;
+ case DRM_FORMAT_ARGB4444:
+ *mode = SUN4I_BACKEND_LAY_FBFMT_ARGB4444;
+ break;
+
+ case DRM_FORMAT_ARGB1555:
+ *mode = SUN4I_BACKEND_LAY_FBFMT_ARGB1555;
+ break;
+
+ case DRM_FORMAT_RGBA5551:
+ *mode = SUN4I_BACKEND_LAY_FBFMT_RGBA5551;
+ break;
+
+ case DRM_FORMAT_RGBA4444:
+ *mode = SUN4I_BACKEND_LAY_FBFMT_RGBA4444;
+ break;
+
case DRM_FORMAT_XRGB8888:
*mode = SUN4I_BACKEND_LAY_FBFMT_XRGB8888;
break;
@@ -103,6 +119,10 @@ static int sun4i_backend_drm_format_to_layer(struct drm_plane *plane,
*mode = SUN4I_BACKEND_LAY_FBFMT_RGB888;
break;
+ case DRM_FORMAT_RGB565:
+ *mode = SUN4I_BACKEND_LAY_FBFMT_RGB565;
+ break;
+
default:
return -EINVAL;
}
@@ -389,7 +409,7 @@ static void sun4i_backend_unbind(struct device *dev, struct device *master,
reset_control_assert(backend->reset);
}
-static struct component_ops sun4i_backend_ops = {
+static const struct component_ops sun4i_backend_ops = {
.bind = sun4i_backend_bind,
.unbind = sun4i_backend_unbind,
};
@@ -408,6 +428,7 @@ static int sun4i_backend_remove(struct platform_device *pdev)
static const struct of_device_id sun4i_backend_of_table[] = {
{ .compatible = "allwinner,sun5i-a13-display-backend" },
+ { .compatible = "allwinner,sun6i-a31-display-backend" },
{ .compatible = "allwinner,sun8i-a33-display-backend" },
{ }
};
diff --git a/drivers/gpu/drm/sun4i/sun4i_drv.c b/drivers/gpu/drm/sun4i/sun4i_drv.c
index 0da9862ad8ed..4ce665349f6b 100644
--- a/drivers/gpu/drm/sun4i/sun4i_drv.c
+++ b/drivers/gpu/drm/sun4i/sun4i_drv.c
@@ -18,6 +18,7 @@
#include <drm/drm_fb_cma_helper.h>
#include <drm/drm_gem_cma_helper.h>
#include <drm/drm_fb_helper.h>
+#include <drm/drm_of.h>
#include "sun4i_crtc.h"
#include "sun4i_drv.h"
@@ -52,9 +53,7 @@ static const struct file_operations sun4i_drv_fops = {
.open = drm_open,
.release = drm_release,
.unlocked_ioctl = drm_ioctl,
-#ifdef CONFIG_COMPAT
.compat_ioctl = drm_compat_ioctl,
-#endif
.poll = drm_poll,
.read = drm_read,
.llseek = no_llseek,
@@ -142,9 +141,9 @@ static int sun4i_drv_bind(struct device *dev)
/* Create our layers */
drv->layers = sun4i_layers_init(drm);
- if (!drv->layers) {
+ if (IS_ERR(drv->layers)) {
dev_err(drm->dev, "Couldn't create the planes\n");
- ret = -EINVAL;
+ ret = PTR_ERR(drv->layers);
goto free_drm;
}
@@ -201,12 +200,15 @@ static const struct component_master_ops sun4i_drv_master_ops = {
static bool sun4i_drv_node_is_frontend(struct device_node *node)
{
return of_device_is_compatible(node, "allwinner,sun5i-a13-display-frontend") ||
+ of_device_is_compatible(node, "allwinner,sun6i-a31-display-frontend") ||
of_device_is_compatible(node, "allwinner,sun8i-a33-display-frontend");
}
static bool sun4i_drv_node_is_tcon(struct device_node *node)
{
return of_device_is_compatible(node, "allwinner,sun5i-a13-tcon") ||
+ of_device_is_compatible(node, "allwinner,sun6i-a31-tcon") ||
+ of_device_is_compatible(node, "allwinner,sun6i-a31s-tcon") ||
of_device_is_compatible(node, "allwinner,sun8i-a33-tcon");
}
@@ -239,7 +241,7 @@ static int sun4i_drv_add_endpoints(struct device *dev,
/* Add current component */
DRM_DEBUG_DRIVER("Adding component %s\n",
of_node_full_name(node));
- component_match_add(dev, match, compare_of, node);
+ drm_of_component_match_add(dev, match, compare_of, node);
count++;
}
@@ -322,6 +324,8 @@ static int sun4i_drv_remove(struct platform_device *pdev)
static const struct of_device_id sun4i_drv_of_table[] = {
{ .compatible = "allwinner,sun5i-a13-display-engine" },
+ { .compatible = "allwinner,sun6i-a31-display-engine" },
+ { .compatible = "allwinner,sun6i-a31s-display-engine" },
{ .compatible = "allwinner,sun8i-a33-display-engine" },
{ }
};
diff --git a/drivers/gpu/drm/sun4i/sun4i_layer.c b/drivers/gpu/drm/sun4i/sun4i_layer.c
index f0035bf5efea..5d53c977bca5 100644
--- a/drivers/gpu/drm/sun4i/sun4i_layer.c
+++ b/drivers/gpu/drm/sun4i/sun4i_layer.c
@@ -73,12 +73,18 @@ static const struct drm_plane_funcs sun4i_backend_layer_funcs = {
static const uint32_t sun4i_backend_layer_formats_primary[] = {
DRM_FORMAT_ARGB8888,
DRM_FORMAT_RGB888,
+ DRM_FORMAT_RGB565,
DRM_FORMAT_XRGB8888,
};
static const uint32_t sun4i_backend_layer_formats_overlay[] = {
DRM_FORMAT_ARGB8888,
+ DRM_FORMAT_ARGB4444,
+ DRM_FORMAT_ARGB1555,
+ DRM_FORMAT_RGBA5551,
+ DRM_FORMAT_RGBA4444,
DRM_FORMAT_RGB888,
+ DRM_FORMAT_RGB565,
DRM_FORMAT_XRGB8888,
};
diff --git a/drivers/gpu/drm/sun4i/sun4i_rgb.c b/drivers/gpu/drm/sun4i/sun4i_rgb.c
index c3ff10f559cc..d198ad7e5323 100644
--- a/drivers/gpu/drm/sun4i/sun4i_rgb.c
+++ b/drivers/gpu/drm/sun4i/sun4i_rgb.c
@@ -152,15 +152,13 @@ static void sun4i_rgb_encoder_enable(struct drm_encoder *encoder)
DRM_DEBUG_DRIVER("Enabling RGB output\n");
- if (!IS_ERR(tcon->panel)) {
+ if (!IS_ERR(tcon->panel))
drm_panel_prepare(tcon->panel);
- drm_panel_enable(tcon->panel);
- }
-
- /* encoder->bridge can be NULL; drm_bridge_enable checks for it */
- drm_bridge_enable(encoder->bridge);
sun4i_tcon_channel_enable(tcon, 0);
+
+ if (!IS_ERR(tcon->panel))
+ drm_panel_enable(tcon->panel);
}
static void sun4i_rgb_encoder_disable(struct drm_encoder *encoder)
@@ -171,15 +169,13 @@ static void sun4i_rgb_encoder_disable(struct drm_encoder *encoder)
DRM_DEBUG_DRIVER("Disabling RGB output\n");
- sun4i_tcon_channel_disable(tcon, 0);
+ if (!IS_ERR(tcon->panel))
+ drm_panel_disable(tcon->panel);
- /* encoder->bridge can be NULL; drm_bridge_disable checks for it */
- drm_bridge_disable(encoder->bridge);
+ sun4i_tcon_channel_disable(tcon, 0);
- if (!IS_ERR(tcon->panel)) {
- drm_panel_disable(tcon->panel);
+ if (!IS_ERR(tcon->panel))
drm_panel_unprepare(tcon->panel);
- }
}
static void sun4i_rgb_encoder_mode_set(struct drm_encoder *encoder,
diff --git a/drivers/gpu/drm/sun4i/sun4i_tcon.c b/drivers/gpu/drm/sun4i/sun4i_tcon.c
index cadacb517f95..ea2906f87cb9 100644
--- a/drivers/gpu/drm/sun4i/sun4i_tcon.c
+++ b/drivers/gpu/drm/sun4i/sun4i_tcon.c
@@ -20,6 +20,7 @@
#include <linux/component.h>
#include <linux/ioport.h>
#include <linux/of_address.h>
+#include <linux/of_device.h>
#include <linux/of_graph.h>
#include <linux/of_irq.h>
#include <linux/regmap.h>
@@ -62,7 +63,7 @@ void sun4i_tcon_channel_disable(struct sun4i_tcon *tcon, int channel)
return;
}
- WARN_ON(!tcon->has_channel_1);
+ WARN_ON(!tcon->quirks->has_channel_1);
regmap_update_bits(tcon->regs, SUN4I_TCON1_CTL_REG,
SUN4I_TCON1_CTL_TCON_ENABLE, 0);
clk_disable_unprepare(tcon->sclk1);
@@ -80,7 +81,7 @@ void sun4i_tcon_channel_enable(struct sun4i_tcon *tcon, int channel)
return;
}
- WARN_ON(!tcon->has_channel_1);
+ WARN_ON(!tcon->quirks->has_channel_1);
regmap_update_bits(tcon->regs, SUN4I_TCON1_CTL_REG,
SUN4I_TCON1_CTL_TCON_ENABLE,
SUN4I_TCON1_CTL_TCON_ENABLE);
@@ -202,7 +203,7 @@ void sun4i_tcon1_mode_set(struct sun4i_tcon *tcon,
u8 clk_delay;
u32 val;
- WARN_ON(!tcon->has_channel_1);
+ WARN_ON(!tcon->quirks->has_channel_1);
/* Adjust clock delay */
clk_delay = sun4i_tcon_get_clk_delay(mode, 1);
@@ -266,7 +267,7 @@ void sun4i_tcon1_mode_set(struct sun4i_tcon *tcon,
/*
* FIXME: Undocumented bits
*/
- if (tcon->has_mux)
+ if (tcon->quirks->has_unknown_mux)
regmap_write(tcon->regs, SUN4I_TCON_MUX_CTRL_REG, 1);
}
EXPORT_SYMBOL(sun4i_tcon1_mode_set);
@@ -327,7 +328,7 @@ static int sun4i_tcon_init_clocks(struct device *dev,
return PTR_ERR(tcon->sclk0);
}
- if (tcon->has_channel_1) {
+ if (tcon->quirks->has_channel_1) {
tcon->sclk1 = devm_clk_get(dev, "tcon-ch1");
if (IS_ERR(tcon->sclk1)) {
dev_err(dev, "Couldn't get the TCON channel 1 clock\n");
@@ -487,14 +488,7 @@ static int sun4i_tcon_bind(struct device *dev, struct device *master,
drv->tcon = tcon;
tcon->drm = drm;
tcon->dev = dev;
-
- if (of_device_is_compatible(dev->of_node, "allwinner,sun5i-a13-tcon")) {
- tcon->has_mux = true;
- tcon->has_channel_1 = true;
- } else {
- tcon->has_mux = false;
- tcon->has_channel_1 = false;
- }
+ tcon->quirks = of_device_get_match_data(dev);
tcon->lcd_rst = devm_reset_control_get(dev, "lcd");
if (IS_ERR(tcon->lcd_rst)) {
@@ -551,7 +545,7 @@ static void sun4i_tcon_unbind(struct device *dev, struct device *master,
sun4i_tcon_free_clocks(tcon);
}
-static struct component_ops sun4i_tcon_ops = {
+static const struct component_ops sun4i_tcon_ops = {
.bind = sun4i_tcon_bind,
.unbind = sun4i_tcon_unbind,
};
@@ -588,9 +582,28 @@ static int sun4i_tcon_remove(struct platform_device *pdev)
return 0;
}
+static const struct sun4i_tcon_quirks sun5i_a13_quirks = {
+ .has_unknown_mux = true,
+ .has_channel_1 = true,
+};
+
+static const struct sun4i_tcon_quirks sun6i_a31_quirks = {
+ .has_channel_1 = true,
+};
+
+static const struct sun4i_tcon_quirks sun6i_a31s_quirks = {
+ .has_channel_1 = true,
+};
+
+static const struct sun4i_tcon_quirks sun8i_a33_quirks = {
+ /* nothing is supported */
+};
+
static const struct of_device_id sun4i_tcon_of_table[] = {
- { .compatible = "allwinner,sun5i-a13-tcon" },
- { .compatible = "allwinner,sun8i-a33-tcon" },
+ { .compatible = "allwinner,sun5i-a13-tcon", .data = &sun5i_a13_quirks },
+ { .compatible = "allwinner,sun6i-a31-tcon", .data = &sun6i_a31_quirks },
+ { .compatible = "allwinner,sun6i-a31s-tcon", .data = &sun6i_a31s_quirks },
+ { .compatible = "allwinner,sun8i-a33-tcon", .data = &sun8i_a33_quirks },
{ }
};
MODULE_DEVICE_TABLE(of, sun4i_tcon_of_table);
diff --git a/drivers/gpu/drm/sun4i/sun4i_tcon.h b/drivers/gpu/drm/sun4i/sun4i_tcon.h
index 12bd48925f4d..166064bafe2e 100644
--- a/drivers/gpu/drm/sun4i/sun4i_tcon.h
+++ b/drivers/gpu/drm/sun4i/sun4i_tcon.h
@@ -142,6 +142,11 @@
#define SUN4I_TCON_MAX_CHANNELS 2
+struct sun4i_tcon_quirks {
+ bool has_unknown_mux; /* sun5i has undocumented mux */
+ bool has_channel_1; /* a33 does not have channel 1 */
+};
+
struct sun4i_tcon {
struct device *dev;
struct drm_device *drm;
@@ -160,12 +165,10 @@ struct sun4i_tcon {
/* Reset control */
struct reset_control *lcd_rst;
- /* Platform adjustments */
- bool has_mux;
-
struct drm_panel *panel;
- bool has_channel_1;
+ /* Platform adjustments */
+ const struct sun4i_tcon_quirks *quirks;
};
struct drm_bridge *sun4i_tcon_find_bridge(struct device_node *node);
diff --git a/drivers/gpu/drm/sun4i/sun4i_tv.c b/drivers/gpu/drm/sun4i/sun4i_tv.c
index 1dd3d9eabf2e..d430b331fed5 100644
--- a/drivers/gpu/drm/sun4i/sun4i_tv.c
+++ b/drivers/gpu/drm/sun4i/sun4i_tv.c
@@ -667,7 +667,7 @@ static void sun4i_tv_unbind(struct device *dev, struct device *master,
clk_disable_unprepare(tv->clk);
}
-static struct component_ops sun4i_tv_ops = {
+static const struct component_ops sun4i_tv_ops = {
.bind = sun4i_tv_bind,
.unbind = sun4i_tv_unbind,
};
diff --git a/drivers/gpu/drm/sun4i/sun6i_drc.c b/drivers/gpu/drm/sun4i/sun6i_drc.c
index bf6d846d8132..09bba853e2a4 100644
--- a/drivers/gpu/drm/sun4i/sun6i_drc.c
+++ b/drivers/gpu/drm/sun4i/sun6i_drc.c
@@ -80,7 +80,7 @@ static void sun6i_drc_unbind(struct device *dev, struct device *master,
reset_control_assert(drc->reset);
}
-static struct component_ops sun6i_drc_ops = {
+static const struct component_ops sun6i_drc_ops = {
.bind = sun6i_drc_bind,
.unbind = sun6i_drc_unbind,
};
@@ -98,6 +98,8 @@ static int sun6i_drc_remove(struct platform_device *pdev)
}
static const struct of_device_id sun6i_drc_of_table[] = {
+ { .compatible = "allwinner,sun6i-a31-drc" },
+ { .compatible = "allwinner,sun6i-a31s-drc" },
{ .compatible = "allwinner,sun8i-a33-drc" },
{ }
};
diff --git a/drivers/gpu/drm/tdfx/tdfx_drv.c b/drivers/gpu/drm/tdfx/tdfx_drv.c
index f418892b0c71..c54138c3a376 100644
--- a/drivers/gpu/drm/tdfx/tdfx_drv.c
+++ b/drivers/gpu/drm/tdfx/tdfx_drv.c
@@ -49,9 +49,7 @@ static const struct file_operations tdfx_driver_fops = {
.unlocked_ioctl = drm_ioctl,
.mmap = drm_legacy_mmap,
.poll = drm_poll,
-#ifdef CONFIG_COMPAT
.compat_ioctl = drm_compat_ioctl,
-#endif
.llseek = noop_llseek,
};
diff --git a/drivers/gpu/drm/tegra/Kconfig b/drivers/gpu/drm/tegra/Kconfig
index 63ebb154b9b5..bbf5a4b7e0b6 100644
--- a/drivers/gpu/drm/tegra/Kconfig
+++ b/drivers/gpu/drm/tegra/Kconfig
@@ -3,7 +3,6 @@ config DRM_TEGRA
depends on ARCH_TEGRA || (ARM && COMPILE_TEST)
depends on COMMON_CLK
depends on DRM
- depends on RESET_CONTROLLER
select DRM_KMS_HELPER
select DRM_MIPI_DSI
select DRM_PANEL
diff --git a/drivers/gpu/drm/tegra/drm.c b/drivers/gpu/drm/tegra/drm.c
index 8ab47b502d83..b8be3ee4d3b8 100644
--- a/drivers/gpu/drm/tegra/drm.c
+++ b/drivers/gpu/drm/tegra/drm.c
@@ -63,7 +63,7 @@ static void tegra_atomic_complete(struct tegra_drm *tegra,
drm_atomic_helper_wait_for_vblanks(drm, state);
drm_atomic_helper_cleanup_planes(drm, state);
- drm_atomic_state_free(state);
+ drm_atomic_state_put(state);
}
static void tegra_atomic_work(struct work_struct *work)
@@ -96,6 +96,7 @@ static int tegra_atomic_commit(struct drm_device *drm,
drm_atomic_helper_swap_state(state, true);
+ drm_atomic_state_get(state);
if (nonblock)
tegra_atomic_schedule(tegra, state);
else
@@ -801,9 +802,7 @@ static const struct file_operations tegra_drm_fops = {
.mmap = tegra_drm_mmap,
.poll = drm_poll,
.read = drm_read,
-#ifdef CONFIG_COMPAT
.compat_ioctl = drm_compat_ioctl,
-#endif
.llseek = noop_llseek,
};
diff --git a/drivers/gpu/drm/tegra/fb.c b/drivers/gpu/drm/tegra/fb.c
index e6d71fa4028e..e4a5ab0a9677 100644
--- a/drivers/gpu/drm/tegra/fb.c
+++ b/drivers/gpu/drm/tegra/fb.c
@@ -186,14 +186,10 @@ unreference:
#ifdef CONFIG_DRM_FBDEV_EMULATION
static struct fb_ops tegra_fb_ops = {
.owner = THIS_MODULE,
+ DRM_FB_HELPER_DEFAULT_OPS,
.fb_fillrect = drm_fb_helper_sys_fillrect,
.fb_copyarea = drm_fb_helper_sys_copyarea,
.fb_imageblit = drm_fb_helper_sys_imageblit,
- .fb_check_var = drm_fb_helper_check_var,
- .fb_set_par = drm_fb_helper_set_par,
- .fb_blank = drm_fb_helper_blank,
- .fb_pan_display = drm_fb_helper_pan_display,
- .fb_setcmap = drm_fb_helper_setcmap,
};
static int tegra_fbdev_probe(struct drm_fb_helper *helper,
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_crtc.c b/drivers/gpu/drm/tilcdc/tilcdc_crtc.c
index 52ebe8fc1784..9942b0577d6e 100644
--- a/drivers/gpu/drm/tilcdc/tilcdc_crtc.c
+++ b/drivers/gpu/drm/tilcdc/tilcdc_crtc.c
@@ -21,11 +21,15 @@
#include <drm/drm_flip_work.h>
#include <drm/drm_plane_helper.h>
#include <linux/workqueue.h>
+#include <linux/completion.h>
+#include <linux/dma-mapping.h>
#include "tilcdc_drv.h"
#include "tilcdc_regs.h"
-#define TILCDC_VBLANK_SAFETY_THRESHOLD_US 1000
+#define TILCDC_VBLANK_SAFETY_THRESHOLD_US 1000
+#define TILCDC_PALETTE_SIZE 32
+#define TILCDC_PALETTE_FIRST_ENTRY 0x4000
struct tilcdc_crtc {
struct drm_crtc base;
@@ -33,7 +37,9 @@ struct tilcdc_crtc {
struct drm_plane primary;
const struct tilcdc_panel_info *info;
struct drm_pending_vblank_event *event;
+ struct mutex enable_lock;
bool enabled;
+ bool shutdown;
wait_queue_head_t frame_done_wq;
bool frame_done;
spinlock_t irq_lock;
@@ -53,6 +59,11 @@ struct tilcdc_crtc {
int sync_lost_count;
bool frame_intact;
+ struct work_struct recover_work;
+
+ dma_addr_t palette_dma_handle;
+ u16 *palette_base;
+ struct completion palette_loaded;
};
#define to_tilcdc_crtc(x) container_of(x, struct tilcdc_crtc, base)
@@ -71,17 +82,16 @@ static void set_scanout(struct drm_crtc *crtc, struct drm_framebuffer *fb)
{
struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc);
struct drm_device *dev = crtc->dev;
+ struct tilcdc_drm_private *priv = dev->dev_private;
struct drm_gem_cma_object *gem;
- unsigned int depth, bpp;
dma_addr_t start, end;
u64 dma_base_and_ceiling;
- drm_fb_get_bpp_depth(fb->pixel_format, &depth, &bpp);
gem = drm_fb_cma_get_gem_obj(fb, 0);
start = gem->paddr + fb->offsets[0] +
crtc->y * fb->pitches[0] +
- crtc->x * bpp / 8;
+ crtc->x * drm_format_plane_cpp(fb->pixel_format, 0);
end = start + (crtc->mode.vdisplay * fb->pitches[0]);
@@ -90,7 +100,10 @@ static void set_scanout(struct drm_crtc *crtc, struct drm_framebuffer *fb)
* unlikely that LCDC would fetch the DMA addresses in the middle of
* an update.
*/
- dma_base_and_ceiling = (u64)(end - 1) << 32 | start;
+ if (priv->rev == 1)
+ end -= 1;
+
+ dma_base_and_ceiling = (u64)end << 32 | start;
tilcdc_write64(dev, LCDC_DMA_FB_BASE_ADDR_0_REG, dma_base_and_ceiling);
if (tilcdc_crtc->curr_fb)
@@ -100,6 +113,56 @@ static void set_scanout(struct drm_crtc *crtc, struct drm_framebuffer *fb)
tilcdc_crtc->curr_fb = fb;
}
+/*
+ * The driver currently only supports only true color formats. For
+ * true color the palette block is bypassed, but a 32 byte palette
+ * should still be loaded. The first 16-bit entry must be 0x4000 while
+ * all other entries must be zeroed.
+ */
+static void tilcdc_crtc_load_palette(struct drm_crtc *crtc)
+{
+ struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc);
+ struct drm_device *dev = crtc->dev;
+ struct tilcdc_drm_private *priv = dev->dev_private;
+ int ret;
+
+ reinit_completion(&tilcdc_crtc->palette_loaded);
+
+ /* Tell the LCDC where the palette is located. */
+ tilcdc_write(dev, LCDC_DMA_FB_BASE_ADDR_0_REG,
+ tilcdc_crtc->palette_dma_handle);
+ tilcdc_write(dev, LCDC_DMA_FB_CEILING_ADDR_0_REG,
+ (u32) tilcdc_crtc->palette_dma_handle +
+ TILCDC_PALETTE_SIZE - 1);
+
+ /* Set dma load mode for palette loading only. */
+ tilcdc_write_mask(dev, LCDC_RASTER_CTRL_REG,
+ LCDC_PALETTE_LOAD_MODE(PALETTE_ONLY),
+ LCDC_PALETTE_LOAD_MODE_MASK);
+
+ /* Enable DMA Palette Loaded Interrupt */
+ if (priv->rev == 1)
+ tilcdc_set(dev, LCDC_RASTER_CTRL_REG, LCDC_V1_PL_INT_ENA);
+ else
+ tilcdc_write(dev, LCDC_INT_ENABLE_SET_REG, LCDC_V2_PL_INT_ENA);
+
+ /* Enable LCDC DMA and wait for palette to be loaded. */
+ tilcdc_clear_irqstatus(dev, 0xffffffff);
+ tilcdc_set(dev, LCDC_RASTER_CTRL_REG, LCDC_RASTER_ENABLE);
+
+ ret = wait_for_completion_timeout(&tilcdc_crtc->palette_loaded,
+ msecs_to_jiffies(50));
+ if (ret == 0)
+ dev_err(dev->dev, "%s: Palette loading timeout", __func__);
+
+ /* Disable LCDC DMA and DMA Palette Loaded Interrupt. */
+ tilcdc_clear(dev, LCDC_RASTER_CTRL_REG, LCDC_RASTER_ENABLE);
+ if (priv->rev == 1)
+ tilcdc_clear(dev, LCDC_RASTER_CTRL_REG, LCDC_V1_PL_INT_ENA);
+ else
+ tilcdc_write(dev, LCDC_INT_ENABLE_CLR_REG, LCDC_V2_PL_INT_ENA);
+}
+
static void tilcdc_crtc_enable_irqs(struct drm_device *dev)
{
struct tilcdc_drm_private *priv = dev->dev_private;
@@ -108,6 +171,7 @@ static void tilcdc_crtc_enable_irqs(struct drm_device *dev)
if (priv->rev == 1) {
tilcdc_set(dev, LCDC_RASTER_CTRL_REG,
+ LCDC_V1_SYNC_LOST_INT_ENA | LCDC_V1_FRAME_DONE_INT_ENA |
LCDC_V1_UNDERFLOW_INT_ENA);
tilcdc_set(dev, LCDC_DMA_CTRL_REG,
LCDC_V1_END_OF_FRAME_INT_ENA);
@@ -126,6 +190,7 @@ static void tilcdc_crtc_disable_irqs(struct drm_device *dev)
/* disable irqs that we might have enabled: */
if (priv->rev == 1) {
tilcdc_clear(dev, LCDC_RASTER_CTRL_REG,
+ LCDC_V1_SYNC_LOST_INT_ENA | LCDC_V1_FRAME_DONE_INT_ENA |
LCDC_V1_UNDERFLOW_INT_ENA | LCDC_V1_PL_INT_ENA);
tilcdc_clear(dev, LCDC_DMA_CTRL_REG,
LCDC_V1_END_OF_FRAME_INT_ENA);
@@ -150,193 +215,68 @@ static void reset(struct drm_crtc *crtc)
tilcdc_clear(dev, LCDC_CLK_RESET_REG, LCDC_CLK_MAIN_RESET);
}
-static void tilcdc_crtc_enable(struct drm_crtc *crtc)
+/*
+ * Calculate the percentage difference between the requested pixel clock rate
+ * and the effective rate resulting from calculating the clock divider value.
+ */
+static unsigned int tilcdc_pclk_diff(unsigned long rate,
+ unsigned long real_rate)
{
- struct drm_device *dev = crtc->dev;
- struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc);
-
- WARN_ON(!drm_modeset_is_locked(&crtc->mutex));
-
- if (tilcdc_crtc->enabled)
- return;
-
- pm_runtime_get_sync(dev->dev);
-
- reset(crtc);
-
- tilcdc_crtc_enable_irqs(dev);
-
- tilcdc_clear(dev, LCDC_DMA_CTRL_REG, LCDC_DUAL_FRAME_BUFFER_ENABLE);
- tilcdc_set(dev, LCDC_RASTER_CTRL_REG, LCDC_PALETTE_LOAD_MODE(DATA_ONLY));
- tilcdc_set(dev, LCDC_RASTER_CTRL_REG, LCDC_RASTER_ENABLE);
-
- drm_crtc_vblank_on(crtc);
+ int r = rate / 100, rr = real_rate / 100;
- tilcdc_crtc->enabled = true;
+ return (unsigned int)(abs(((rr - r) * 100) / r));
}
-void tilcdc_crtc_disable(struct drm_crtc *crtc)
+static void tilcdc_crtc_set_clk(struct drm_crtc *crtc)
{
- struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc);
struct drm_device *dev = crtc->dev;
struct tilcdc_drm_private *priv = dev->dev_private;
-
- WARN_ON(!drm_modeset_is_locked(&crtc->mutex));
-
- if (!tilcdc_crtc->enabled)
- return;
-
- tilcdc_crtc->frame_done = false;
- tilcdc_clear(dev, LCDC_RASTER_CTRL_REG, LCDC_RASTER_ENABLE);
-
- /*
- * if necessary wait for framedone irq which will still come
- * before putting things to sleep..
- */
- if (priv->rev == 2) {
- int ret = wait_event_timeout(tilcdc_crtc->frame_done_wq,
- tilcdc_crtc->frame_done,
- msecs_to_jiffies(500));
- if (ret == 0)
- dev_err(dev->dev, "%s: timeout waiting for framedone\n",
- __func__);
- }
-
- drm_crtc_vblank_off(crtc);
-
- tilcdc_crtc_disable_irqs(dev);
-
- pm_runtime_put_sync(dev->dev);
-
- if (tilcdc_crtc->next_fb) {
- drm_flip_work_queue(&tilcdc_crtc->unref_work,
- tilcdc_crtc->next_fb);
- tilcdc_crtc->next_fb = NULL;
- }
-
- if (tilcdc_crtc->curr_fb) {
- drm_flip_work_queue(&tilcdc_crtc->unref_work,
- tilcdc_crtc->curr_fb);
- tilcdc_crtc->curr_fb = NULL;
- }
-
- drm_flip_work_commit(&tilcdc_crtc->unref_work, priv->wq);
- tilcdc_crtc->last_vblank = ktime_set(0, 0);
-
- tilcdc_crtc->enabled = false;
-}
-
-static bool tilcdc_crtc_is_on(struct drm_crtc *crtc)
-{
- return crtc->state && crtc->state->enable && crtc->state->active;
-}
-
-static void tilcdc_crtc_destroy(struct drm_crtc *crtc)
-{
- struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc);
- struct tilcdc_drm_private *priv = crtc->dev->dev_private;
-
- drm_modeset_lock_crtc(crtc, NULL);
- tilcdc_crtc_disable(crtc);
- drm_modeset_unlock_crtc(crtc);
-
- flush_workqueue(priv->wq);
-
- of_node_put(crtc->port);
- drm_crtc_cleanup(crtc);
- drm_flip_work_cleanup(&tilcdc_crtc->unref_work);
-}
-
-int tilcdc_crtc_update_fb(struct drm_crtc *crtc,
- struct drm_framebuffer *fb,
- struct drm_pending_vblank_event *event)
-{
- struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc);
- struct drm_device *dev = crtc->dev;
- unsigned long flags;
-
- WARN_ON(!drm_modeset_is_locked(&crtc->mutex));
-
- if (tilcdc_crtc->event) {
- dev_err(dev->dev, "already pending page flip!\n");
- return -EBUSY;
- }
-
- drm_framebuffer_reference(fb);
-
- crtc->primary->fb = fb;
-
- spin_lock_irqsave(&tilcdc_crtc->irq_lock, flags);
-
- if (crtc->hwmode.vrefresh && ktime_to_ns(tilcdc_crtc->last_vblank)) {
- ktime_t next_vblank;
- s64 tdiff;
-
- next_vblank = ktime_add_us(tilcdc_crtc->last_vblank,
- 1000000 / crtc->hwmode.vrefresh);
-
- tdiff = ktime_to_us(ktime_sub(next_vblank, ktime_get()));
-
- if (tdiff < TILCDC_VBLANK_SAFETY_THRESHOLD_US)
- tilcdc_crtc->next_fb = fb;
- }
-
- if (tilcdc_crtc->next_fb != fb)
- set_scanout(crtc, fb);
-
- tilcdc_crtc->event = event;
-
- spin_unlock_irqrestore(&tilcdc_crtc->irq_lock, flags);
-
- return 0;
-}
-
-static bool tilcdc_crtc_mode_fixup(struct drm_crtc *crtc,
- const struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
-{
struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc);
+ unsigned long clk_rate, real_rate, req_rate;
+ unsigned int clkdiv;
+ int ret;
- if (!tilcdc_crtc->simulate_vesa_sync)
- return true;
+ clkdiv = 2; /* first try using a standard divider of 2 */
- /*
- * tilcdc does not generate VESA-compliant sync but aligns
- * VS on the second edge of HS instead of first edge.
- * We use adjusted_mode, to fixup sync by aligning both rising
- * edges and add HSKEW offset to fix the sync.
- */
- adjusted_mode->hskew = mode->hsync_end - mode->hsync_start;
- adjusted_mode->flags |= DRM_MODE_FLAG_HSKEW;
+ /* mode.clock is in KHz, set_rate wants parameter in Hz */
+ req_rate = crtc->mode.clock * 1000;
- if (mode->flags & DRM_MODE_FLAG_NHSYNC) {
- adjusted_mode->flags |= DRM_MODE_FLAG_PHSYNC;
- adjusted_mode->flags &= ~DRM_MODE_FLAG_NHSYNC;
- } else {
- adjusted_mode->flags |= DRM_MODE_FLAG_NHSYNC;
- adjusted_mode->flags &= ~DRM_MODE_FLAG_PHSYNC;
- }
+ ret = clk_set_rate(priv->clk, req_rate * clkdiv);
+ clk_rate = clk_get_rate(priv->clk);
+ if (ret < 0) {
+ /*
+ * If we fail to set the clock rate (some architectures don't
+ * use the common clock framework yet and may not implement
+ * all the clk API calls for every clock), try the next best
+ * thing: adjusting the clock divider, unless clk_get_rate()
+ * failed as well.
+ */
+ if (!clk_rate) {
+ /* Nothing more we can do. Just bail out. */
+ dev_err(dev->dev,
+ "failed to set the pixel clock - unable to read current lcdc clock rate\n");
+ return;
+ }
- return true;
-}
+ clkdiv = DIV_ROUND_CLOSEST(clk_rate, req_rate);
-static void tilcdc_crtc_set_clk(struct drm_crtc *crtc)
-{
- struct drm_device *dev = crtc->dev;
- struct tilcdc_drm_private *priv = dev->dev_private;
- struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc);
- const unsigned clkdiv = 2; /* using a fixed divider of 2 */
- int ret;
+ /*
+ * Emit a warning if the real clock rate resulting from the
+ * calculated divider differs much from the requested rate.
+ *
+ * 5% is an arbitrary value - LCDs are usually quite tolerant
+ * about pixel clock rates.
+ */
+ real_rate = clkdiv * req_rate;
- /* mode.clock is in KHz, set_rate wants parameter in Hz */
- ret = clk_set_rate(priv->clk, crtc->mode.clock * 1000 * clkdiv);
- if (ret < 0) {
- dev_err(dev->dev, "failed to set display clock rate to: %d\n",
- crtc->mode.clock);
- return;
+ if (tilcdc_pclk_diff(clk_rate, real_rate) > 5) {
+ dev_warn(dev->dev,
+ "effective pixel clock rate (%luHz) differs from the calculated rate (%luHz)\n",
+ clk_rate, real_rate);
+ }
}
- tilcdc_crtc->lcd_fck_rate = clk_get_rate(priv->clk);
+ tilcdc_crtc->lcd_fck_rate = clk_rate;
DBG("lcd_clk=%u, mode clock=%d, div=%u",
tilcdc_crtc->lcd_fck_rate, crtc->mode.clock, clkdiv);
@@ -351,7 +291,7 @@ static void tilcdc_crtc_set_clk(struct drm_crtc *crtc)
LCDC_V2_CORE_CLK_EN);
}
-static void tilcdc_crtc_mode_set_nofb(struct drm_crtc *crtc)
+static void tilcdc_crtc_set_mode(struct drm_crtc *crtc)
{
struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc);
struct drm_device *dev = crtc->dev;
@@ -361,8 +301,6 @@ static void tilcdc_crtc_mode_set_nofb(struct drm_crtc *crtc)
struct drm_display_mode *mode = &crtc->state->adjusted_mode;
struct drm_framebuffer *fb = crtc->primary->state->fb;
- WARN_ON(!drm_modeset_is_locked(&crtc->mutex));
-
if (WARN_ON(!info))
return;
@@ -461,16 +399,16 @@ static void tilcdc_crtc_mode_set_nofb(struct drm_crtc *crtc)
if (info->tft_alt_mode)
reg |= LCDC_TFT_ALT_ENABLE;
if (priv->rev == 2) {
- unsigned int depth, bpp;
-
- drm_fb_get_bpp_depth(fb->pixel_format, &depth, &bpp);
- switch (bpp) {
- case 16:
+ switch (fb->pixel_format) {
+ case DRM_FORMAT_BGR565:
+ case DRM_FORMAT_RGB565:
break;
- case 32:
+ case DRM_FORMAT_XBGR8888:
+ case DRM_FORMAT_XRGB8888:
reg |= LCDC_V2_TFT_24BPP_UNPACK;
/* fallthrough */
- case 24:
+ case DRM_FORMAT_BGR888:
+ case DRM_FORMAT_RGB888:
reg |= LCDC_V2_TFT_24BPP_MODE;
break;
default:
@@ -511,15 +449,226 @@ static void tilcdc_crtc_mode_set_nofb(struct drm_crtc *crtc)
else
tilcdc_clear(dev, LCDC_RASTER_CTRL_REG, LCDC_RASTER_ORDER);
- drm_framebuffer_reference(fb);
+ tilcdc_crtc_set_clk(crtc);
+
+ tilcdc_crtc_load_palette(crtc);
set_scanout(crtc, fb);
- tilcdc_crtc_set_clk(crtc);
+ drm_framebuffer_reference(fb);
crtc->hwmode = crtc->state->adjusted_mode;
}
+static void tilcdc_crtc_enable(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc);
+
+ WARN_ON(!drm_modeset_is_locked(&crtc->mutex));
+ mutex_lock(&tilcdc_crtc->enable_lock);
+ if (tilcdc_crtc->enabled || tilcdc_crtc->shutdown) {
+ mutex_unlock(&tilcdc_crtc->enable_lock);
+ return;
+ }
+
+ pm_runtime_get_sync(dev->dev);
+
+ reset(crtc);
+
+ tilcdc_crtc_set_mode(crtc);
+
+ tilcdc_crtc_enable_irqs(dev);
+
+ tilcdc_clear(dev, LCDC_DMA_CTRL_REG, LCDC_DUAL_FRAME_BUFFER_ENABLE);
+ tilcdc_write_mask(dev, LCDC_RASTER_CTRL_REG,
+ LCDC_PALETTE_LOAD_MODE(DATA_ONLY),
+ LCDC_PALETTE_LOAD_MODE_MASK);
+ tilcdc_set(dev, LCDC_RASTER_CTRL_REG, LCDC_RASTER_ENABLE);
+
+ drm_crtc_vblank_on(crtc);
+
+ tilcdc_crtc->enabled = true;
+ mutex_unlock(&tilcdc_crtc->enable_lock);
+}
+
+static void tilcdc_crtc_off(struct drm_crtc *crtc, bool shutdown)
+{
+ struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc);
+ struct drm_device *dev = crtc->dev;
+ struct tilcdc_drm_private *priv = dev->dev_private;
+ int ret;
+
+ mutex_lock(&tilcdc_crtc->enable_lock);
+ if (shutdown)
+ tilcdc_crtc->shutdown = true;
+ if (!tilcdc_crtc->enabled) {
+ mutex_unlock(&tilcdc_crtc->enable_lock);
+ return;
+ }
+ tilcdc_crtc->frame_done = false;
+ tilcdc_clear(dev, LCDC_RASTER_CTRL_REG, LCDC_RASTER_ENABLE);
+
+ /*
+ * Wait for framedone irq which will still come before putting
+ * things to sleep..
+ */
+ ret = wait_event_timeout(tilcdc_crtc->frame_done_wq,
+ tilcdc_crtc->frame_done,
+ msecs_to_jiffies(500));
+ if (ret == 0)
+ dev_err(dev->dev, "%s: timeout waiting for framedone\n",
+ __func__);
+
+ drm_crtc_vblank_off(crtc);
+
+ tilcdc_crtc_disable_irqs(dev);
+
+ pm_runtime_put_sync(dev->dev);
+
+ if (tilcdc_crtc->next_fb) {
+ drm_flip_work_queue(&tilcdc_crtc->unref_work,
+ tilcdc_crtc->next_fb);
+ tilcdc_crtc->next_fb = NULL;
+ }
+
+ if (tilcdc_crtc->curr_fb) {
+ drm_flip_work_queue(&tilcdc_crtc->unref_work,
+ tilcdc_crtc->curr_fb);
+ tilcdc_crtc->curr_fb = NULL;
+ }
+
+ drm_flip_work_commit(&tilcdc_crtc->unref_work, priv->wq);
+ tilcdc_crtc->last_vblank = ktime_set(0, 0);
+
+ tilcdc_crtc->enabled = false;
+ mutex_unlock(&tilcdc_crtc->enable_lock);
+}
+
+static void tilcdc_crtc_disable(struct drm_crtc *crtc)
+{
+ WARN_ON(!drm_modeset_is_locked(&crtc->mutex));
+ tilcdc_crtc_off(crtc, false);
+}
+
+void tilcdc_crtc_shutdown(struct drm_crtc *crtc)
+{
+ tilcdc_crtc_off(crtc, true);
+}
+
+static bool tilcdc_crtc_is_on(struct drm_crtc *crtc)
+{
+ return crtc->state && crtc->state->enable && crtc->state->active;
+}
+
+static void tilcdc_crtc_recover_work(struct work_struct *work)
+{
+ struct tilcdc_crtc *tilcdc_crtc =
+ container_of(work, struct tilcdc_crtc, recover_work);
+ struct drm_crtc *crtc = &tilcdc_crtc->base;
+
+ dev_info(crtc->dev->dev, "%s: Reset CRTC", __func__);
+
+ drm_modeset_lock_crtc(crtc, NULL);
+
+ if (!tilcdc_crtc_is_on(crtc))
+ goto out;
+
+ tilcdc_crtc_disable(crtc);
+ tilcdc_crtc_enable(crtc);
+out:
+ drm_modeset_unlock_crtc(crtc);
+}
+
+static void tilcdc_crtc_destroy(struct drm_crtc *crtc)
+{
+ struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc);
+ struct tilcdc_drm_private *priv = crtc->dev->dev_private;
+
+ drm_modeset_lock_crtc(crtc, NULL);
+ tilcdc_crtc_disable(crtc);
+ drm_modeset_unlock_crtc(crtc);
+
+ flush_workqueue(priv->wq);
+
+ of_node_put(crtc->port);
+ drm_crtc_cleanup(crtc);
+ drm_flip_work_cleanup(&tilcdc_crtc->unref_work);
+}
+
+int tilcdc_crtc_update_fb(struct drm_crtc *crtc,
+ struct drm_framebuffer *fb,
+ struct drm_pending_vblank_event *event)
+{
+ struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc);
+ struct drm_device *dev = crtc->dev;
+ unsigned long flags;
+
+ WARN_ON(!drm_modeset_is_locked(&crtc->mutex));
+
+ if (tilcdc_crtc->event) {
+ dev_err(dev->dev, "already pending page flip!\n");
+ return -EBUSY;
+ }
+
+ drm_framebuffer_reference(fb);
+
+ crtc->primary->fb = fb;
+
+ spin_lock_irqsave(&tilcdc_crtc->irq_lock, flags);
+
+ if (crtc->hwmode.vrefresh && ktime_to_ns(tilcdc_crtc->last_vblank)) {
+ ktime_t next_vblank;
+ s64 tdiff;
+
+ next_vblank = ktime_add_us(tilcdc_crtc->last_vblank,
+ 1000000 / crtc->hwmode.vrefresh);
+
+ tdiff = ktime_to_us(ktime_sub(next_vblank, ktime_get()));
+
+ if (tdiff < TILCDC_VBLANK_SAFETY_THRESHOLD_US)
+ tilcdc_crtc->next_fb = fb;
+ }
+
+ if (tilcdc_crtc->next_fb != fb)
+ set_scanout(crtc, fb);
+
+ tilcdc_crtc->event = event;
+
+ spin_unlock_irqrestore(&tilcdc_crtc->irq_lock, flags);
+
+ return 0;
+}
+
+static bool tilcdc_crtc_mode_fixup(struct drm_crtc *crtc,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc);
+
+ if (!tilcdc_crtc->simulate_vesa_sync)
+ return true;
+
+ /*
+ * tilcdc does not generate VESA-compliant sync but aligns
+ * VS on the second edge of HS instead of first edge.
+ * We use adjusted_mode, to fixup sync by aligning both rising
+ * edges and add HSKEW offset to fix the sync.
+ */
+ adjusted_mode->hskew = mode->hsync_end - mode->hsync_start;
+ adjusted_mode->flags |= DRM_MODE_FLAG_HSKEW;
+
+ if (mode->flags & DRM_MODE_FLAG_NHSYNC) {
+ adjusted_mode->flags |= DRM_MODE_FLAG_PHSYNC;
+ adjusted_mode->flags &= ~DRM_MODE_FLAG_NHSYNC;
+ } else {
+ adjusted_mode->flags |= DRM_MODE_FLAG_NHSYNC;
+ adjusted_mode->flags &= ~DRM_MODE_FLAG_PHSYNC;
+ }
+
+ return true;
+}
+
static int tilcdc_crtc_atomic_check(struct drm_crtc *crtc,
struct drm_crtc_state *state)
{
@@ -560,7 +709,6 @@ static const struct drm_crtc_helper_funcs tilcdc_crtc_helper_funcs = {
.enable = tilcdc_crtc_enable,
.disable = tilcdc_crtc_disable,
.atomic_check = tilcdc_crtc_atomic_check,
- .mode_set_nofb = tilcdc_crtc_mode_set_nofb,
};
int tilcdc_crtc_max_width(struct drm_crtc *crtc)
@@ -756,28 +904,48 @@ irqreturn_t tilcdc_crtc_irq(struct drm_crtc *crtc)
}
if (stat & LCDC_FIFO_UNDERFLOW)
- dev_err_ratelimited(dev->dev, "%s(0x%08x): FIFO underfow",
+ dev_err_ratelimited(dev->dev, "%s(0x%08x): FIFO underflow",
__func__, stat);
- /* For revision 2 only */
- if (priv->rev == 2) {
- if (stat & LCDC_FRAME_DONE) {
- tilcdc_crtc->frame_done = true;
- wake_up(&tilcdc_crtc->frame_done_wq);
- }
+ if (stat & LCDC_PL_LOAD_DONE) {
+ complete(&tilcdc_crtc->palette_loaded);
+ if (priv->rev == 1)
+ tilcdc_clear(dev, LCDC_RASTER_CTRL_REG,
+ LCDC_V1_PL_INT_ENA);
+ else
+ tilcdc_write(dev, LCDC_INT_ENABLE_CLR_REG,
+ LCDC_V2_PL_INT_ENA);
+ }
- if (stat & LCDC_SYNC_LOST) {
- dev_err_ratelimited(dev->dev, "%s(0x%08x): Sync lost",
- __func__, stat);
- tilcdc_crtc->frame_intact = false;
- if (tilcdc_crtc->sync_lost_count++ >
- SYNC_LOST_COUNT_LIMIT) {
- dev_err(dev->dev, "%s(0x%08x): Sync lost flood detected, disabling the interrupt", __func__, stat);
+ if (stat & LCDC_SYNC_LOST) {
+ dev_err_ratelimited(dev->dev, "%s(0x%08x): Sync lost",
+ __func__, stat);
+ tilcdc_crtc->frame_intact = false;
+ if (tilcdc_crtc->sync_lost_count++ >
+ SYNC_LOST_COUNT_LIMIT) {
+ dev_err(dev->dev, "%s(0x%08x): Sync lost flood detected, recovering", __func__, stat);
+ queue_work(system_wq, &tilcdc_crtc->recover_work);
+ if (priv->rev == 1)
+ tilcdc_clear(dev, LCDC_RASTER_CTRL_REG,
+ LCDC_V1_SYNC_LOST_INT_ENA);
+ else
tilcdc_write(dev, LCDC_INT_ENABLE_CLR_REG,
LCDC_SYNC_LOST);
- }
+ tilcdc_crtc->sync_lost_count = 0;
}
+ }
+
+ if (stat & LCDC_FRAME_DONE) {
+ tilcdc_crtc->frame_done = true;
+ wake_up(&tilcdc_crtc->frame_done_wq);
+ /* rev 1 lcdc appears to hang if irq is not disbaled here */
+ if (priv->rev == 1)
+ tilcdc_clear(dev, LCDC_RASTER_CTRL_REG,
+ LCDC_V1_FRAME_DONE_INT_ENA);
+ }
+ /* For revision 2 only */
+ if (priv->rev == 2) {
/* Indicate to LCDC that the interrupt service routine has
* completed, see 13.3.6.1.6 in AM335x TRM.
*/
@@ -787,7 +955,7 @@ irqreturn_t tilcdc_crtc_irq(struct drm_crtc *crtc)
return IRQ_HANDLED;
}
-struct drm_crtc *tilcdc_crtc_create(struct drm_device *dev)
+int tilcdc_crtc_create(struct drm_device *dev)
{
struct tilcdc_drm_private *priv = dev->dev_private;
struct tilcdc_crtc *tilcdc_crtc;
@@ -797,21 +965,33 @@ struct drm_crtc *tilcdc_crtc_create(struct drm_device *dev)
tilcdc_crtc = devm_kzalloc(dev->dev, sizeof(*tilcdc_crtc), GFP_KERNEL);
if (!tilcdc_crtc) {
dev_err(dev->dev, "allocation failed\n");
- return NULL;
+ return -ENOMEM;
}
+ init_completion(&tilcdc_crtc->palette_loaded);
+ tilcdc_crtc->palette_base = dmam_alloc_coherent(dev->dev,
+ TILCDC_PALETTE_SIZE,
+ &tilcdc_crtc->palette_dma_handle,
+ GFP_KERNEL | __GFP_ZERO);
+ if (!tilcdc_crtc->palette_base)
+ return -ENOMEM;
+ *tilcdc_crtc->palette_base = TILCDC_PALETTE_FIRST_ENTRY;
+
crtc = &tilcdc_crtc->base;
ret = tilcdc_plane_init(dev, &tilcdc_crtc->primary);
if (ret < 0)
goto fail;
+ mutex_init(&tilcdc_crtc->enable_lock);
+
init_waitqueue_head(&tilcdc_crtc->frame_done_wq);
drm_flip_work_init(&tilcdc_crtc->unref_work,
"unref", unref_worker);
spin_lock_init(&tilcdc_crtc->irq_lock);
+ INIT_WORK(&tilcdc_crtc->recover_work, tilcdc_crtc_recover_work);
ret = drm_crtc_init_with_planes(dev, crtc,
&tilcdc_crtc->primary,
@@ -837,13 +1017,15 @@ struct drm_crtc *tilcdc_crtc_create(struct drm_device *dev)
if (!crtc->port) { /* This should never happen */
dev_err(dev->dev, "Port node not found in %s\n",
dev->dev->of_node->full_name);
+ ret = -EINVAL;
goto fail;
}
}
- return crtc;
+ priv->crtc = crtc;
+ return 0;
fail:
tilcdc_crtc_destroy(crtc);
- return NULL;
+ return -ENOMEM;
}
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_drv.c b/drivers/gpu/drm/tilcdc/tilcdc_drv.c
index a694977c32f4..bd0a3bd07167 100644
--- a/drivers/gpu/drm/tilcdc/tilcdc_drv.c
+++ b/drivers/gpu/drm/tilcdc/tilcdc_drv.c
@@ -127,24 +127,16 @@ static int tilcdc_commit(struct drm_device *dev,
* current layout.
*/
- /* Keep HW on while we commit the state. */
- pm_runtime_get_sync(dev->dev);
-
drm_atomic_helper_commit_modeset_disables(dev, state);
drm_atomic_helper_commit_planes(dev, state, 0);
drm_atomic_helper_commit_modeset_enables(dev, state);
- /* Now HW should remain on if need becase the crtc is enabled */
- pm_runtime_put_sync(dev->dev);
-
drm_atomic_helper_wait_for_vblanks(dev, state);
drm_atomic_helper_cleanup_planes(dev, state);
- drm_atomic_state_free(state);
-
return 0;
}
@@ -155,15 +147,11 @@ static const struct drm_mode_config_funcs mode_config_funcs = {
.atomic_commit = tilcdc_commit,
};
-static int modeset_init(struct drm_device *dev)
+static void modeset_init(struct drm_device *dev)
{
struct tilcdc_drm_private *priv = dev->dev_private;
struct tilcdc_module *mod;
- drm_mode_config_init(dev);
-
- priv->crtc = tilcdc_crtc_create(dev);
-
list_for_each_entry(mod, &module_list, list) {
DBG("loading module: %s", mod->name);
mod->funcs->modeset_init(mod, dev);
@@ -174,8 +162,6 @@ static int modeset_init(struct drm_device *dev)
dev->mode_config.max_width = tilcdc_crtc_max_width(priv->crtc);
dev->mode_config.max_height = 2048;
dev->mode_config.funcs = &mode_config_funcs;
-
- return 0;
}
#ifdef CONFIG_CPU_FREQ
@@ -196,22 +182,29 @@ static int cpufreq_transition(struct notifier_block *nb,
* DRM operations:
*/
-static int tilcdc_unload(struct drm_device *dev)
+static void tilcdc_fini(struct drm_device *dev)
{
struct tilcdc_drm_private *priv = dev->dev_private;
- tilcdc_remove_external_encoders(dev);
+ if (priv->crtc)
+ tilcdc_crtc_shutdown(priv->crtc);
+
+ if (priv->is_registered)
+ drm_dev_unregister(dev);
- drm_fbdev_cma_fini(priv->fbdev);
drm_kms_helper_poll_fini(dev);
- drm_mode_config_cleanup(dev);
- drm_vblank_cleanup(dev);
+
+ if (priv->fbdev)
+ drm_fbdev_cma_fini(priv->fbdev);
drm_irq_uninstall(dev);
+ drm_mode_config_cleanup(dev);
+ tilcdc_remove_external_device(dev);
#ifdef CONFIG_CPU_FREQ
- cpufreq_unregister_notifier(&priv->freq_transition,
- CPUFREQ_TRANSITION_NOTIFIER);
+ if (priv->freq_transition.notifier_call)
+ cpufreq_unregister_notifier(&priv->freq_transition,
+ CPUFREQ_TRANSITION_NOTIFIER);
#endif
if (priv->clk)
@@ -220,61 +213,71 @@ static int tilcdc_unload(struct drm_device *dev)
if (priv->mmio)
iounmap(priv->mmio);
- flush_workqueue(priv->wq);
- destroy_workqueue(priv->wq);
+ if (priv->wq) {
+ flush_workqueue(priv->wq);
+ destroy_workqueue(priv->wq);
+ }
dev->dev_private = NULL;
pm_runtime_disable(dev->dev);
- return 0;
+ drm_dev_unref(dev);
}
-static int tilcdc_load(struct drm_device *dev, unsigned long flags)
+static int tilcdc_init(struct drm_driver *ddrv, struct device *dev)
{
- struct platform_device *pdev = dev->platformdev;
- struct device_node *node = pdev->dev.of_node;
+ struct drm_device *ddev;
+ struct platform_device *pdev = to_platform_device(dev);
+ struct device_node *node = dev->of_node;
struct tilcdc_drm_private *priv;
struct resource *res;
u32 bpp = 0;
int ret;
- priv = devm_kzalloc(dev->dev, sizeof(*priv), GFP_KERNEL);
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
if (!priv) {
- dev_err(dev->dev, "failed to allocate private data\n");
+ dev_err(dev, "failed to allocate private data\n");
return -ENOMEM;
}
- dev->dev_private = priv;
+ ddev = drm_dev_alloc(ddrv, dev);
+ if (IS_ERR(ddev))
+ return PTR_ERR(ddev);
+
+ ddev->platformdev = pdev;
+ ddev->dev_private = priv;
+ platform_set_drvdata(pdev, ddev);
+ drm_mode_config_init(ddev);
priv->is_componentized =
- tilcdc_get_external_components(dev->dev, NULL) > 0;
+ tilcdc_get_external_components(dev, NULL) > 0;
priv->wq = alloc_ordered_workqueue("tilcdc", 0);
if (!priv->wq) {
ret = -ENOMEM;
- goto fail_unset_priv;
+ goto init_failed;
}
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res) {
- dev_err(dev->dev, "failed to get memory resource\n");
+ dev_err(dev, "failed to get memory resource\n");
ret = -EINVAL;
- goto fail_free_wq;
+ goto init_failed;
}
priv->mmio = ioremap_nocache(res->start, resource_size(res));
if (!priv->mmio) {
- dev_err(dev->dev, "failed to ioremap\n");
+ dev_err(dev, "failed to ioremap\n");
ret = -ENOMEM;
- goto fail_free_wq;
+ goto init_failed;
}
- priv->clk = clk_get(dev->dev, "fck");
+ priv->clk = clk_get(dev, "fck");
if (IS_ERR(priv->clk)) {
- dev_err(dev->dev, "failed to get functional clock\n");
+ dev_err(dev, "failed to get functional clock\n");
ret = -ENODEV;
- goto fail_iounmap;
+ goto init_failed;
}
#ifdef CONFIG_CPU_FREQ
@@ -282,8 +285,9 @@ static int tilcdc_load(struct drm_device *dev, unsigned long flags)
ret = cpufreq_register_notifier(&priv->freq_transition,
CPUFREQ_TRANSITION_NOTIFIER);
if (ret) {
- dev_err(dev->dev, "failed to register cpufreq notifier\n");
- goto fail_put_clk;
+ dev_err(dev, "failed to register cpufreq notifier\n");
+ priv->freq_transition.notifier_call = NULL;
+ goto init_failed;
}
#endif
@@ -292,22 +296,22 @@ static int tilcdc_load(struct drm_device *dev, unsigned long flags)
DBG("Maximum Bandwidth Value %d", priv->max_bandwidth);
- if (of_property_read_u32(node, "ti,max-width", &priv->max_width))
+ if (of_property_read_u32(node, "max-width", &priv->max_width))
priv->max_width = TILCDC_DEFAULT_MAX_WIDTH;
DBG("Maximum Horizontal Pixel Width Value %dpixels", priv->max_width);
- if (of_property_read_u32(node, "ti,max-pixelclock",
+ if (of_property_read_u32(node, "max-pixelclock",
&priv->max_pixelclock))
priv->max_pixelclock = TILCDC_DEFAULT_MAX_PIXELCLOCK;
DBG("Maximum Pixel Clock Value %dKHz", priv->max_pixelclock);
- pm_runtime_enable(dev->dev);
+ pm_runtime_enable(dev);
/* Determine LCD IP Version */
- pm_runtime_get_sync(dev->dev);
- switch (tilcdc_read(dev, LCDC_PID_REG)) {
+ pm_runtime_get_sync(dev);
+ switch (tilcdc_read(ddev, LCDC_PID_REG)) {
case 0x4c100102:
priv->rev = 1;
break;
@@ -316,14 +320,14 @@ static int tilcdc_load(struct drm_device *dev, unsigned long flags)
priv->rev = 2;
break;
default:
- dev_warn(dev->dev, "Unknown PID Reg value 0x%08x, "
- "defaulting to LCD revision 1\n",
- tilcdc_read(dev, LCDC_PID_REG));
+ dev_warn(dev, "Unknown PID Reg value 0x%08x, "
+ "defaulting to LCD revision 1\n",
+ tilcdc_read(ddev, LCDC_PID_REG));
priv->rev = 1;
break;
}
- pm_runtime_put_sync(dev->dev);
+ pm_runtime_put_sync(dev);
if (priv->rev == 1) {
DBG("Revision 1 LCDC supports only RGB565 format");
@@ -356,91 +360,67 @@ static int tilcdc_load(struct drm_device *dev, unsigned long flags)
}
}
- ret = modeset_init(dev);
+ ret = tilcdc_crtc_create(ddev);
if (ret < 0) {
- dev_err(dev->dev, "failed to initialize mode setting\n");
- goto fail_cpufreq_unregister;
+ dev_err(dev, "failed to create crtc\n");
+ goto init_failed;
}
-
- platform_set_drvdata(pdev, dev);
+ modeset_init(ddev);
if (priv->is_componentized) {
- ret = component_bind_all(dev->dev, dev);
+ ret = component_bind_all(dev, ddev);
if (ret < 0)
- goto fail_mode_config_cleanup;
+ goto init_failed;
- ret = tilcdc_add_external_encoders(dev);
+ ret = tilcdc_add_component_encoder(ddev);
if (ret < 0)
- goto fail_component_cleanup;
+ goto init_failed;
+ } else {
+ ret = tilcdc_attach_external_device(ddev);
+ if (ret)
+ goto init_failed;
}
- if ((priv->num_encoders == 0) || (priv->num_connectors == 0)) {
- dev_err(dev->dev, "no encoders/connectors found\n");
+ if (!priv->external_connector &&
+ ((priv->num_encoders == 0) || (priv->num_connectors == 0))) {
+ dev_err(dev, "no encoders/connectors found\n");
ret = -ENXIO;
- goto fail_external_cleanup;
+ goto init_failed;
}
- ret = drm_vblank_init(dev, 1);
+ ret = drm_vblank_init(ddev, 1);
if (ret < 0) {
- dev_err(dev->dev, "failed to initialize vblank\n");
- goto fail_external_cleanup;
+ dev_err(dev, "failed to initialize vblank\n");
+ goto init_failed;
}
- ret = drm_irq_install(dev, platform_get_irq(dev->platformdev, 0));
+ ret = drm_irq_install(ddev, platform_get_irq(pdev, 0));
if (ret < 0) {
- dev_err(dev->dev, "failed to install IRQ handler\n");
- goto fail_vblank_cleanup;
+ dev_err(dev, "failed to install IRQ handler\n");
+ goto init_failed;
}
- drm_mode_config_reset(dev);
+ drm_mode_config_reset(ddev);
- priv->fbdev = drm_fbdev_cma_init(dev, bpp,
- dev->mode_config.num_crtc,
- dev->mode_config.num_connector);
+ priv->fbdev = drm_fbdev_cma_init(ddev, bpp,
+ ddev->mode_config.num_crtc,
+ ddev->mode_config.num_connector);
if (IS_ERR(priv->fbdev)) {
ret = PTR_ERR(priv->fbdev);
- goto fail_irq_uninstall;
+ goto init_failed;
}
- drm_kms_helper_poll_init(dev);
+ drm_kms_helper_poll_init(ddev);
- return 0;
-
-fail_irq_uninstall:
- drm_irq_uninstall(dev);
-
-fail_vblank_cleanup:
- drm_vblank_cleanup(dev);
-
-fail_component_cleanup:
- if (priv->is_componentized)
- component_unbind_all(dev->dev, dev);
-
-fail_mode_config_cleanup:
- drm_mode_config_cleanup(dev);
-
-fail_external_cleanup:
- tilcdc_remove_external_encoders(dev);
-
-fail_cpufreq_unregister:
- pm_runtime_disable(dev->dev);
-#ifdef CONFIG_CPU_FREQ
- cpufreq_unregister_notifier(&priv->freq_transition,
- CPUFREQ_TRANSITION_NOTIFIER);
-
-fail_put_clk:
-#endif
- clk_put(priv->clk);
-
-fail_iounmap:
- iounmap(priv->mmio);
+ ret = drm_dev_register(ddev, 0);
+ if (ret)
+ goto init_failed;
-fail_free_wq:
- flush_workqueue(priv->wq);
- destroy_workqueue(priv->wq);
+ priv->is_registered = true;
+ return 0;
-fail_unset_priv:
- dev->dev_private = NULL;
+init_failed:
+ tilcdc_fini(ddev);
return ret;
}
@@ -575,9 +555,7 @@ static const struct file_operations fops = {
.open = drm_open,
.release = drm_release,
.unlocked_ioctl = drm_ioctl,
-#ifdef CONFIG_COMPAT
.compat_ioctl = drm_compat_ioctl,
-#endif
.poll = drm_poll,
.read = drm_read,
.llseek = no_llseek,
@@ -587,8 +565,6 @@ static const struct file_operations fops = {
static struct drm_driver tilcdc_driver = {
.driver_features = (DRIVER_HAVE_IRQ | DRIVER_GEM | DRIVER_MODESET |
DRIVER_PRIME | DRIVER_ATOMIC),
- .load = tilcdc_load,
- .unload = tilcdc_unload,
.lastclose = tilcdc_lastclose,
.irq_handler = tilcdc_irq,
.get_vblank_counter = drm_vblank_no_hw_counter,
@@ -662,10 +638,9 @@ static const struct dev_pm_ops tilcdc_pm_ops = {
/*
* Platform driver:
*/
-
static int tilcdc_bind(struct device *dev)
{
- return drm_platform_init(&tilcdc_driver, to_platform_device(dev));
+ return tilcdc_init(&tilcdc_driver, dev);
}
static void tilcdc_unbind(struct device *dev)
@@ -676,7 +651,7 @@ static void tilcdc_unbind(struct device *dev)
if (!ddev->dev_private)
return;
- drm_put_dev(dev_get_drvdata(dev));
+ tilcdc_fini(dev_get_drvdata(dev));
}
static const struct component_master_ops tilcdc_comp_ops = {
@@ -699,7 +674,7 @@ static int tilcdc_pdev_probe(struct platform_device *pdev)
if (ret < 0)
return ret;
else if (ret == 0)
- return drm_platform_init(&tilcdc_driver, pdev);
+ return tilcdc_init(&tilcdc_driver, &pdev->dev);
else
return component_master_add_with_match(&pdev->dev,
&tilcdc_comp_ops,
@@ -714,7 +689,7 @@ static int tilcdc_pdev_remove(struct platform_device *pdev)
if (ret < 0)
return ret;
else if (ret == 0)
- drm_put_dev(platform_get_drvdata(pdev));
+ tilcdc_fini(platform_get_drvdata(pdev));
else
component_master_del(&pdev->dev, &tilcdc_comp_ops);
@@ -723,6 +698,7 @@ static int tilcdc_pdev_remove(struct platform_device *pdev)
static struct of_device_id tilcdc_of_match[] = {
{ .compatible = "ti,am33xx-tilcdc", },
+ { .compatible = "ti,da850-tilcdc", },
{ },
};
MODULE_DEVICE_TABLE(of, tilcdc_of_match);
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_drv.h b/drivers/gpu/drm/tilcdc/tilcdc_drv.h
index 9780c37ec4cd..0e71daf5b5cb 100644
--- a/drivers/gpu/drm/tilcdc/tilcdc_drv.h
+++ b/drivers/gpu/drm/tilcdc/tilcdc_drv.h
@@ -33,6 +33,7 @@
#include <drm/drm_crtc_helper.h>
#include <drm/drm_gem_cma_helper.h>
#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_bridge.h>
/* Defaulting to pixel clock defined on AM335x */
#define TILCDC_DEFAULT_MAX_PIXELCLOCK 126000
@@ -87,8 +88,12 @@ struct tilcdc_drm_private {
unsigned int num_connectors;
struct drm_connector *connectors[8];
- const struct drm_connector_helper_funcs *connector_funcs[8];
+ struct drm_encoder *external_encoder;
+ struct drm_connector *external_connector;
+ const struct drm_connector_helper_funcs *connector_funcs;
+
+ bool is_registered;
bool is_componentized;
};
@@ -163,7 +168,7 @@ struct tilcdc_panel_info {
#define DBG(fmt, ...) DRM_DEBUG(fmt"\n", ##__VA_ARGS__)
-struct drm_crtc *tilcdc_crtc_create(struct drm_device *dev);
+int tilcdc_crtc_create(struct drm_device *dev);
irqreturn_t tilcdc_crtc_irq(struct drm_crtc *crtc);
void tilcdc_crtc_update_clk(struct drm_crtc *crtc);
void tilcdc_crtc_set_panel_info(struct drm_crtc *crtc,
@@ -172,7 +177,7 @@ void tilcdc_crtc_set_simulate_vesa_sync(struct drm_crtc *crtc,
bool simulate_vesa_sync);
int tilcdc_crtc_mode_valid(struct drm_crtc *crtc, struct drm_display_mode *mode);
int tilcdc_crtc_max_width(struct drm_crtc *crtc);
-void tilcdc_crtc_disable(struct drm_crtc *crtc);
+void tilcdc_crtc_shutdown(struct drm_crtc *crtc);
int tilcdc_crtc_update_fb(struct drm_crtc *crtc,
struct drm_framebuffer *fb,
struct drm_pending_vblank_event *event);
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_external.c b/drivers/gpu/drm/tilcdc/tilcdc_external.c
index 68e895021005..c67d7cd7d57e 100644
--- a/drivers/gpu/drm/tilcdc/tilcdc_external.c
+++ b/drivers/gpu/drm/tilcdc/tilcdc_external.c
@@ -10,6 +10,7 @@
#include <linux/component.h>
#include <linux/of_graph.h>
+#include <drm/drm_of.h>
#include "tilcdc_drv.h"
#include "tilcdc_external.h"
@@ -27,44 +28,50 @@ static const struct tilcdc_panel_info panel_info_tda998x = {
.raster_order = 0,
};
+static const struct tilcdc_panel_info panel_info_default = {
+ .ac_bias = 255,
+ .ac_bias_intrpt = 0,
+ .dma_burst_sz = 16,
+ .bpp = 16,
+ .fdd = 0x80,
+ .tft_alt_mode = 0,
+ .sync_edge = 0,
+ .sync_ctrl = 1,
+ .raster_order = 0,
+};
+
static int tilcdc_external_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
{
struct tilcdc_drm_private *priv = connector->dev->dev_private;
- int ret, i;
+ int ret;
ret = tilcdc_crtc_mode_valid(priv->crtc, mode);
if (ret != MODE_OK)
return ret;
- for (i = 0; i < priv->num_connectors &&
- priv->connectors[i] != connector; i++)
- ;
-
- BUG_ON(priv->connectors[i] != connector);
- BUG_ON(!priv->connector_funcs[i]);
+ BUG_ON(priv->external_connector != connector);
+ BUG_ON(!priv->connector_funcs);
/* If the connector has its own mode_valid call it. */
- if (!IS_ERR(priv->connector_funcs[i]) &&
- priv->connector_funcs[i]->mode_valid)
- return priv->connector_funcs[i]->mode_valid(connector, mode);
+ if (!IS_ERR(priv->connector_funcs) &&
+ priv->connector_funcs->mode_valid)
+ return priv->connector_funcs->mode_valid(connector, mode);
return MODE_OK;
}
-static int tilcdc_add_external_encoder(struct drm_device *dev,
- struct drm_connector *connector)
+static int tilcdc_add_external_connector(struct drm_device *dev,
+ struct drm_connector *connector)
{
struct tilcdc_drm_private *priv = dev->dev_private;
struct drm_connector_helper_funcs *connector_funcs;
- priv->connectors[priv->num_connectors] = connector;
- priv->encoders[priv->num_encoders++] = connector->encoder;
-
- /* Only tda998x is supported at the moment. */
- tilcdc_crtc_set_simulate_vesa_sync(priv->crtc, true);
- tilcdc_crtc_set_panel_info(priv->crtc, &panel_info_tda998x);
+ /* There should never be more than one connector */
+ if (WARN_ON(priv->external_connector))
+ return -EINVAL;
+ priv->external_connector = connector;
connector_funcs = devm_kzalloc(dev->dev, sizeof(*connector_funcs),
GFP_KERNEL);
if (!connector_funcs)
@@ -77,56 +84,177 @@ static int tilcdc_add_external_encoder(struct drm_device *dev,
* everything else but use our own mode_valid() (above).
*/
if (connector->helper_private) {
- priv->connector_funcs[priv->num_connectors] =
- connector->helper_private;
- *connector_funcs = *priv->connector_funcs[priv->num_connectors];
+ priv->connector_funcs = connector->helper_private;
+ *connector_funcs = *priv->connector_funcs;
} else {
- priv->connector_funcs[priv->num_connectors] = ERR_PTR(-ENOENT);
+ priv->connector_funcs = ERR_PTR(-ENOENT);
}
connector_funcs->mode_valid = tilcdc_external_mode_valid;
drm_connector_helper_add(connector, connector_funcs);
- priv->num_connectors++;
- dev_dbg(dev->dev, "External encoder '%s' connected\n",
- connector->encoder->name);
+ dev_dbg(dev->dev, "External connector '%s' connected\n",
+ connector->name);
return 0;
}
-int tilcdc_add_external_encoders(struct drm_device *dev)
+static
+struct drm_connector *tilcdc_encoder_find_connector(struct drm_device *ddev,
+ struct drm_encoder *encoder)
{
- struct tilcdc_drm_private *priv = dev->dev_private;
struct drm_connector *connector;
- int num_internal_connectors = priv->num_connectors;
-
- list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
- bool found = false;
- int i, ret;
-
- for (i = 0; i < num_internal_connectors; i++)
- if (connector == priv->connectors[i])
- found = true;
- if (!found) {
- ret = tilcdc_add_external_encoder(dev, connector);
- if (ret)
- return ret;
- }
+ int i;
+
+ list_for_each_entry(connector, &ddev->mode_config.connector_list, head)
+ for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++)
+ if (connector->encoder_ids[i] == encoder->base.id)
+ return connector;
+
+ dev_err(ddev->dev, "No connector found for %s encoder (id %d)\n",
+ encoder->name, encoder->base.id);
+
+ return NULL;
+}
+
+int tilcdc_add_component_encoder(struct drm_device *ddev)
+{
+ struct tilcdc_drm_private *priv = ddev->dev_private;
+ struct drm_connector *connector;
+ struct drm_encoder *encoder;
+
+ list_for_each_entry(encoder, &ddev->mode_config.encoder_list, head)
+ if (encoder->possible_crtcs & (1 << priv->crtc->index))
+ break;
+
+ if (!encoder) {
+ dev_err(ddev->dev, "%s: No suitable encoder found\n", __func__);
+ return -ENODEV;
}
- return 0;
+
+ connector = tilcdc_encoder_find_connector(ddev, encoder);
+
+ if (!connector)
+ return -ENODEV;
+
+ /* Only tda998x is supported at the moment. */
+ tilcdc_crtc_set_simulate_vesa_sync(priv->crtc, true);
+ tilcdc_crtc_set_panel_info(priv->crtc, &panel_info_tda998x);
+
+ return tilcdc_add_external_connector(ddev, connector);
}
-void tilcdc_remove_external_encoders(struct drm_device *dev)
+void tilcdc_remove_external_device(struct drm_device *dev)
{
struct tilcdc_drm_private *priv = dev->dev_private;
- int i;
/* Restore the original helper functions, if any. */
- for (i = 0; i < priv->num_connectors; i++)
- if (IS_ERR(priv->connector_funcs[i]))
- drm_connector_helper_add(priv->connectors[i], NULL);
- else if (priv->connector_funcs[i])
- drm_connector_helper_add(priv->connectors[i],
- priv->connector_funcs[i]);
+ if (IS_ERR(priv->connector_funcs))
+ drm_connector_helper_add(priv->external_connector, NULL);
+ else if (priv->connector_funcs)
+ drm_connector_helper_add(priv->external_connector,
+ priv->connector_funcs);
+}
+
+static const struct drm_encoder_funcs tilcdc_external_encoder_funcs = {
+ .destroy = drm_encoder_cleanup,
+};
+
+static
+int tilcdc_attach_bridge(struct drm_device *ddev, struct drm_bridge *bridge)
+{
+ struct tilcdc_drm_private *priv = ddev->dev_private;
+ struct drm_connector *connector;
+ int ret;
+
+ priv->external_encoder->possible_crtcs = BIT(0);
+ priv->external_encoder->bridge = bridge;
+ bridge->encoder = priv->external_encoder;
+
+ ret = drm_bridge_attach(ddev, bridge);
+ if (ret) {
+ dev_err(ddev->dev, "drm_bridge_attach() failed %d\n", ret);
+ return ret;
+ }
+
+ tilcdc_crtc_set_panel_info(priv->crtc, &panel_info_default);
+
+ connector = tilcdc_encoder_find_connector(ddev, priv->external_encoder);
+ if (!connector)
+ return -ENODEV;
+
+ ret = tilcdc_add_external_connector(ddev, connector);
+
+ return ret;
+}
+
+static int tilcdc_node_has_port(struct device_node *dev_node)
+{
+ struct device_node *node;
+
+ node = of_get_child_by_name(dev_node, "ports");
+ if (!node)
+ node = of_get_child_by_name(dev_node, "port");
+ if (!node)
+ return 0;
+ of_node_put(node);
+
+ return 1;
+}
+
+static
+struct device_node *tilcdc_get_remote_node(struct device_node *node)
+{
+ struct device_node *ep;
+ struct device_node *parent;
+
+ if (!tilcdc_node_has_port(node))
+ return NULL;
+
+ ep = of_graph_get_next_endpoint(node, NULL);
+ if (!ep)
+ return NULL;
+
+ parent = of_graph_get_remote_port_parent(ep);
+ of_node_put(ep);
+
+ return parent;
+}
+
+int tilcdc_attach_external_device(struct drm_device *ddev)
+{
+ struct tilcdc_drm_private *priv = ddev->dev_private;
+ struct device_node *remote_node;
+ struct drm_bridge *bridge;
+ int ret;
+
+ remote_node = tilcdc_get_remote_node(ddev->dev->of_node);
+ if (!remote_node)
+ return 0;
+
+ bridge = of_drm_find_bridge(remote_node);
+ of_node_put(remote_node);
+ if (!bridge)
+ return -EPROBE_DEFER;
+
+ priv->external_encoder = devm_kzalloc(ddev->dev,
+ sizeof(*priv->external_encoder),
+ GFP_KERNEL);
+ if (!priv->external_encoder)
+ return -ENOMEM;
+
+ ret = drm_encoder_init(ddev, priv->external_encoder,
+ &tilcdc_external_encoder_funcs,
+ DRM_MODE_ENCODER_NONE, NULL);
+ if (ret) {
+ dev_err(ddev->dev, "drm_encoder_init() failed %d\n", ret);
+ return ret;
+ }
+
+ ret = tilcdc_attach_bridge(ddev, bridge);
+ if (ret)
+ drm_encoder_cleanup(priv->external_encoder);
+
+ return ret;
}
static int dev_match_of(struct device *dev, void *data)
@@ -140,16 +268,10 @@ int tilcdc_get_external_components(struct device *dev,
struct device_node *node;
struct device_node *ep = NULL;
int count = 0;
+ int ret = 0;
- /* Avoid error print by of_graph_get_next_endpoint() if there
- * is no ports present.
- */
- node = of_get_child_by_name(dev->of_node, "ports");
- if (!node)
- node = of_get_child_by_name(dev->of_node, "port");
- if (!node)
+ if (!tilcdc_node_has_port(dev->of_node))
return 0;
- of_node_put(node);
while ((ep = of_graph_get_next_endpoint(dev->of_node, ep))) {
node = of_graph_get_remote_port_parent(ep);
@@ -159,16 +281,20 @@ int tilcdc_get_external_components(struct device *dev,
}
dev_dbg(dev, "Subdevice node '%s' found\n", node->name);
- if (match)
- component_match_add(dev, match, dev_match_of, node);
- of_node_put(node);
- count++;
- }
- if (count > 1) {
- dev_err(dev, "Only one external encoder is supported\n");
- return -EINVAL;
+ if (of_device_is_compatible(node, "nxp,tda998x")) {
+ if (match)
+ drm_of_component_match_add(dev, match,
+ dev_match_of, node);
+ ret = 1;
+ }
+
+ of_node_put(node);
+ if (count++ > 1) {
+ dev_err(dev, "Only one port is supported\n");
+ return -EINVAL;
+ }
}
- return count;
+ return ret;
}
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_external.h b/drivers/gpu/drm/tilcdc/tilcdc_external.h
index c700e0c1623e..763d18f006c7 100644
--- a/drivers/gpu/drm/tilcdc/tilcdc_external.h
+++ b/drivers/gpu/drm/tilcdc/tilcdc_external.h
@@ -18,8 +18,9 @@
#ifndef __TILCDC_EXTERNAL_H__
#define __TILCDC_EXTERNAL_H__
-int tilcdc_add_external_encoders(struct drm_device *dev);
-void tilcdc_remove_external_encoders(struct drm_device *dev);
+int tilcdc_add_component_encoder(struct drm_device *dev);
+void tilcdc_remove_external_device(struct drm_device *dev);
int tilcdc_get_external_components(struct device *dev,
struct component_match **match);
+int tilcdc_attach_external_device(struct drm_device *ddev);
#endif /* __TILCDC_SLAVE_H__ */
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_panel.c b/drivers/gpu/drm/tilcdc/tilcdc_panel.c
index 2134bb20fbe9..ad7a0e8ea5f4 100644
--- a/drivers/gpu/drm/tilcdc/tilcdc_panel.c
+++ b/drivers/gpu/drm/tilcdc/tilcdc_panel.c
@@ -240,8 +240,6 @@ static struct drm_connector *panel_connector_create(struct drm_device *dev,
if (ret)
goto fail;
- drm_connector_register(connector);
-
return connector;
fail:
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_plane.c b/drivers/gpu/drm/tilcdc/tilcdc_plane.c
index 74c65fa859b2..8a6a50d74aff 100644
--- a/drivers/gpu/drm/tilcdc/tilcdc_plane.c
+++ b/drivers/gpu/drm/tilcdc/tilcdc_plane.c
@@ -39,7 +39,7 @@ static int tilcdc_plane_atomic_check(struct drm_plane *plane,
{
struct drm_crtc_state *crtc_state;
struct drm_plane_state *old_state = plane->state;
- unsigned int depth, bpp;
+ unsigned int pitch;
if (!state->crtc)
return 0;
@@ -68,8 +68,9 @@ static int tilcdc_plane_atomic_check(struct drm_plane *plane,
return -EINVAL;
}
- drm_fb_get_bpp_depth(state->fb->pixel_format, &depth, &bpp);
- if (state->fb->pitches[0] != crtc_state->mode.hdisplay * bpp / 8) {
+ pitch = crtc_state->mode.hdisplay *
+ drm_format_plane_cpp(state->fb->pixel_format, 0);
+ if (state->fb->pitches[0] != pitch) {
dev_err(plane->dev->dev,
"Invalid pitch: fb and crtc widths must be the same");
return -EINVAL;
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_regs.h b/drivers/gpu/drm/tilcdc/tilcdc_regs.h
index f57c0d62c76a..9d528c0a67a4 100644
--- a/drivers/gpu/drm/tilcdc/tilcdc_regs.h
+++ b/drivers/gpu/drm/tilcdc/tilcdc_regs.h
@@ -34,11 +34,14 @@
/* LCDC DMA Control Register */
#define LCDC_DMA_BURST_SIZE(x) ((x) << 4)
+#define LCDC_DMA_BURST_SIZE_MASK ((0x7) << 4)
#define LCDC_DMA_BURST_1 0x0
#define LCDC_DMA_BURST_2 0x1
#define LCDC_DMA_BURST_4 0x2
#define LCDC_DMA_BURST_8 0x3
#define LCDC_DMA_BURST_16 0x4
+#define LCDC_DMA_FIFO_THRESHOLD(x) ((x) << 8)
+#define LCDC_DMA_FIFO_THRESHOLD_MASK ((0x3) << 8)
#define LCDC_V1_END_OF_FRAME_INT_ENA BIT(2)
#define LCDC_V2_END_OF_FRAME0_INT_ENA BIT(8)
#define LCDC_V2_END_OF_FRAME1_INT_ENA BIT(9)
@@ -46,10 +49,12 @@
/* LCDC Control Register */
#define LCDC_CLK_DIVISOR(x) ((x) << 8)
+#define LCDC_CLK_DIVISOR_MASK ((0xFF) << 8)
#define LCDC_RASTER_MODE 0x01
/* LCDC Raster Control Register */
#define LCDC_PALETTE_LOAD_MODE(x) ((x) << 20)
+#define LCDC_PALETTE_LOAD_MODE_MASK ((0x3) << 20)
#define PALETTE_AND_DATA 0x00
#define PALETTE_ONLY 0x01
#define DATA_ONLY 0x02
@@ -61,6 +66,8 @@
#define LCDC_V2_UNDERFLOW_INT_ENA BIT(5)
#define LCDC_V1_PL_INT_ENA BIT(4)
#define LCDC_V2_PL_INT_ENA BIT(6)
+#define LCDC_V1_SYNC_LOST_INT_ENA BIT(5)
+#define LCDC_V1_FRAME_DONE_INT_ENA BIT(3)
#define LCDC_MONOCHROME_MODE BIT(1)
#define LCDC_RASTER_ENABLE BIT(0)
#define LCDC_TFT_ALT_ENABLE BIT(23)
@@ -74,7 +81,9 @@
/* LCDC Raster Timing 2 Register */
#define LCDC_AC_BIAS_TRANSITIONS_PER_INT(x) ((x) << 16)
+#define LCDC_AC_BIAS_TRANSITIONS_PER_INT_MASK ((0xF) << 16)
#define LCDC_AC_BIAS_FREQUENCY(x) ((x) << 8)
+#define LCDC_AC_BIAS_FREQUENCY_MASK ((0xFF) << 8)
#define LCDC_SYNC_CTRL BIT(25)
#define LCDC_SYNC_EDGE BIT(24)
#define LCDC_INVERT_PIXEL_CLOCK BIT(22)
@@ -139,6 +148,12 @@ static inline u32 tilcdc_read(struct drm_device *dev, u32 reg)
return ioread32(priv->mmio + reg);
}
+static inline void tilcdc_write_mask(struct drm_device *dev, u32 reg,
+ u32 val, u32 mask)
+{
+ tilcdc_write(dev, reg, (tilcdc_read(dev, reg) & ~mask) | (val & mask));
+}
+
static inline void tilcdc_set(struct drm_device *dev, u32 reg, u32 mask)
{
tilcdc_write(dev, reg, tilcdc_read(dev, reg) | mask);
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_tfp410.c b/drivers/gpu/drm/tilcdc/tilcdc_tfp410.c
index 458043a53995..aabfad882e23 100644
--- a/drivers/gpu/drm/tilcdc/tilcdc_tfp410.c
+++ b/drivers/gpu/drm/tilcdc/tilcdc_tfp410.c
@@ -249,8 +249,6 @@ static struct drm_connector *tfp410_connector_create(struct drm_device *dev,
if (ret)
goto fail;
- drm_connector_register(connector);
-
return connector;
fail:
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index fc6217dfe401..d5063618efa7 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -148,7 +148,7 @@ static void ttm_bo_release_list(struct kref *list_kref)
BUG_ON(!list_empty(&bo->ddestroy));
ttm_tt_destroy(bo->ttm);
atomic_dec(&bo->glob->bo_count);
- fence_put(bo->moving);
+ dma_fence_put(bo->moving);
if (bo->resv == &bo->ttm_resv)
reservation_object_fini(&bo->ttm_resv);
mutex_destroy(&bo->wu_mutex);
@@ -426,20 +426,20 @@ static void ttm_bo_cleanup_memtype_use(struct ttm_buffer_object *bo)
static void ttm_bo_flush_all_fences(struct ttm_buffer_object *bo)
{
struct reservation_object_list *fobj;
- struct fence *fence;
+ struct dma_fence *fence;
int i;
fobj = reservation_object_get_list(bo->resv);
fence = reservation_object_get_excl(bo->resv);
if (fence && !fence->ops->signaled)
- fence_enable_sw_signaling(fence);
+ dma_fence_enable_sw_signaling(fence);
for (i = 0; fobj && i < fobj->shared_count; ++i) {
fence = rcu_dereference_protected(fobj->shared[i],
reservation_object_held(bo->resv));
if (!fence->ops->signaled)
- fence_enable_sw_signaling(fence);
+ dma_fence_enable_sw_signaling(fence);
}
}
@@ -717,6 +717,20 @@ out:
return ret;
}
+bool ttm_bo_eviction_valuable(struct ttm_buffer_object *bo,
+ const struct ttm_place *place)
+{
+ /* Don't evict this BO if it's outside of the
+ * requested placement range
+ */
+ if (place->fpfn >= (bo->mem.start + bo->mem.size) ||
+ (place->lpfn && place->lpfn <= bo->mem.start))
+ return false;
+
+ return true;
+}
+EXPORT_SYMBOL(ttm_bo_eviction_valuable);
+
static int ttm_mem_evict_first(struct ttm_bo_device *bdev,
uint32_t mem_type,
const struct ttm_place *place,
@@ -731,21 +745,16 @@ static int ttm_mem_evict_first(struct ttm_bo_device *bdev,
spin_lock(&glob->lru_lock);
list_for_each_entry(bo, &man->lru, lru) {
ret = __ttm_bo_reserve(bo, false, true, NULL);
- if (!ret) {
- if (place && (place->fpfn || place->lpfn)) {
- /* Don't evict this BO if it's outside of the
- * requested placement range
- */
- if (place->fpfn >= (bo->mem.start + bo->mem.size) ||
- (place->lpfn && place->lpfn <= bo->mem.start)) {
- __ttm_bo_unreserve(bo);
- ret = -EBUSY;
- continue;
- }
- }
+ if (ret)
+ continue;
- break;
+ if (place && !bdev->driver->eviction_valuable(bo, place)) {
+ __ttm_bo_unreserve(bo);
+ ret = -EBUSY;
+ continue;
}
+
+ break;
}
if (ret) {
@@ -792,11 +801,11 @@ static int ttm_bo_add_move_fence(struct ttm_buffer_object *bo,
struct ttm_mem_type_manager *man,
struct ttm_mem_reg *mem)
{
- struct fence *fence;
+ struct dma_fence *fence;
int ret;
spin_lock(&man->move_lock);
- fence = fence_get(man->move);
+ fence = dma_fence_get(man->move);
spin_unlock(&man->move_lock);
if (fence) {
@@ -806,7 +815,7 @@ static int ttm_bo_add_move_fence(struct ttm_buffer_object *bo,
if (unlikely(ret))
return ret;
- fence_put(bo->moving);
+ dma_fence_put(bo->moving);
bo->moving = fence;
}
@@ -1286,7 +1295,7 @@ static int ttm_bo_force_list_clean(struct ttm_bo_device *bdev,
{
struct ttm_mem_type_manager *man = &bdev->man[mem_type];
struct ttm_bo_global *glob = bdev->glob;
- struct fence *fence;
+ struct dma_fence *fence;
int ret;
/*
@@ -1309,12 +1318,12 @@ static int ttm_bo_force_list_clean(struct ttm_bo_device *bdev,
spin_unlock(&glob->lru_lock);
spin_lock(&man->move_lock);
- fence = fence_get(man->move);
+ fence = dma_fence_get(man->move);
spin_unlock(&man->move_lock);
if (fence) {
- ret = fence_wait(fence, false);
- fence_put(fence);
+ ret = dma_fence_wait(fence, false);
+ dma_fence_put(fence);
if (ret) {
if (allow_errors) {
return ret;
@@ -1343,7 +1352,7 @@ int ttm_bo_clean_mm(struct ttm_bo_device *bdev, unsigned mem_type)
mem_type);
return ret;
}
- fence_put(man->move);
+ dma_fence_put(man->move);
man->use_type = false;
man->has_type = false;
@@ -1602,7 +1611,14 @@ EXPORT_SYMBOL(ttm_bo_unmap_virtual);
int ttm_bo_wait(struct ttm_buffer_object *bo,
bool interruptible, bool no_wait)
{
- long timeout = no_wait ? 0 : 15 * HZ;
+ long timeout = 15 * HZ;
+
+ if (no_wait) {
+ if (reservation_object_test_signaled_rcu(bo->resv, true))
+ return 0;
+ else
+ return -EBUSY;
+ }
timeout = reservation_object_wait_timeout_rcu(bo->resv, true,
interruptible, timeout);
diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c
index bf6e21655c57..d0459b392e5e 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_util.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_util.c
@@ -644,7 +644,7 @@ void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map)
EXPORT_SYMBOL(ttm_bo_kunmap);
int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
- struct fence *fence,
+ struct dma_fence *fence,
bool evict,
struct ttm_mem_reg *new_mem)
{
@@ -674,8 +674,8 @@ int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
* operation has completed.
*/
- fence_put(bo->moving);
- bo->moving = fence_get(fence);
+ dma_fence_put(bo->moving);
+ bo->moving = dma_fence_get(fence);
ret = ttm_buffer_object_transfer(bo, &ghost_obj);
if (ret)
@@ -706,7 +706,7 @@ int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
EXPORT_SYMBOL(ttm_bo_move_accel_cleanup);
int ttm_bo_pipeline_move(struct ttm_buffer_object *bo,
- struct fence *fence, bool evict,
+ struct dma_fence *fence, bool evict,
struct ttm_mem_reg *new_mem)
{
struct ttm_bo_device *bdev = bo->bdev;
@@ -730,8 +730,8 @@ int ttm_bo_pipeline_move(struct ttm_buffer_object *bo,
* operation has completed.
*/
- fence_put(bo->moving);
- bo->moving = fence_get(fence);
+ dma_fence_put(bo->moving);
+ bo->moving = dma_fence_get(fence);
ret = ttm_buffer_object_transfer(bo, &ghost_obj);
if (ret)
@@ -761,16 +761,16 @@ int ttm_bo_pipeline_move(struct ttm_buffer_object *bo,
*/
spin_lock(&from->move_lock);
- if (!from->move || fence_is_later(fence, from->move)) {
- fence_put(from->move);
- from->move = fence_get(fence);
+ if (!from->move || dma_fence_is_later(fence, from->move)) {
+ dma_fence_put(from->move);
+ from->move = dma_fence_get(fence);
}
spin_unlock(&from->move_lock);
ttm_bo_free_old_node(bo);
- fence_put(bo->moving);
- bo->moving = fence_get(fence);
+ dma_fence_put(bo->moving);
+ bo->moving = dma_fence_get(fence);
} else {
/**
diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c
index a6ed9d5e5167..4748aedc933a 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_vm.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c
@@ -54,7 +54,7 @@ static int ttm_bo_vm_fault_idle(struct ttm_buffer_object *bo,
/*
* Quick non-stalling check for idle.
*/
- if (fence_is_signaled(bo->moving))
+ if (dma_fence_is_signaled(bo->moving))
goto out_clear;
/*
@@ -67,14 +67,14 @@ static int ttm_bo_vm_fault_idle(struct ttm_buffer_object *bo,
goto out_unlock;
up_read(&vma->vm_mm->mmap_sem);
- (void) fence_wait(bo->moving, true);
+ (void) dma_fence_wait(bo->moving, true);
goto out_unlock;
}
/*
* Ordinary wait.
*/
- ret = fence_wait(bo->moving, true);
+ ret = dma_fence_wait(bo->moving, true);
if (unlikely(ret != 0)) {
ret = (ret != -ERESTARTSYS) ? VM_FAULT_SIGBUS :
VM_FAULT_NOPAGE;
@@ -82,7 +82,7 @@ static int ttm_bo_vm_fault_idle(struct ttm_buffer_object *bo,
}
out_clear:
- fence_put(bo->moving);
+ dma_fence_put(bo->moving);
bo->moving = NULL;
out_unlock:
diff --git a/drivers/gpu/drm/ttm/ttm_execbuf_util.c b/drivers/gpu/drm/ttm/ttm_execbuf_util.c
index a80717b35dc6..d35bc491e8de 100644
--- a/drivers/gpu/drm/ttm/ttm_execbuf_util.c
+++ b/drivers/gpu/drm/ttm/ttm_execbuf_util.c
@@ -179,7 +179,8 @@ int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket,
EXPORT_SYMBOL(ttm_eu_reserve_buffers);
void ttm_eu_fence_buffer_objects(struct ww_acquire_ctx *ticket,
- struct list_head *list, struct fence *fence)
+ struct list_head *list,
+ struct dma_fence *fence)
{
struct ttm_validate_buffer *entry;
struct ttm_buffer_object *bo;
diff --git a/drivers/gpu/drm/udl/udl_drv.c b/drivers/gpu/drm/udl/udl_drv.c
index cc45d98f9bb5..cd8b01727734 100644
--- a/drivers/gpu/drm/udl/udl_drv.c
+++ b/drivers/gpu/drm/udl/udl_drv.c
@@ -44,9 +44,7 @@ static const struct file_operations udl_driver_fops = {
.read = drm_read,
.unlocked_ioctl = drm_ioctl,
.release = drm_release,
-#ifdef CONFIG_COMPAT
.compat_ioctl = drm_compat_ioctl,
-#endif
.llseek = noop_llseek,
};
diff --git a/drivers/gpu/drm/udl/udl_fb.c b/drivers/gpu/drm/udl/udl_fb.c
index 611b6b9bb3cb..167f42c67c7c 100644
--- a/drivers/gpu/drm/udl/udl_fb.c
+++ b/drivers/gpu/drm/udl/udl_fb.c
@@ -254,16 +254,10 @@ static int udl_fb_release(struct fb_info *info, int user)
static struct fb_ops udlfb_ops = {
.owner = THIS_MODULE,
- .fb_check_var = drm_fb_helper_check_var,
- .fb_set_par = drm_fb_helper_set_par,
+ DRM_FB_HELPER_DEFAULT_OPS,
.fb_fillrect = drm_fb_helper_sys_fillrect,
.fb_copyarea = drm_fb_helper_sys_copyarea,
.fb_imageblit = drm_fb_helper_sys_imageblit,
- .fb_pan_display = drm_fb_helper_pan_display,
- .fb_blank = drm_fb_helper_blank,
- .fb_setcmap = drm_fb_helper_setcmap,
- .fb_debug_enter = drm_fb_helper_debug_enter,
- .fb_debug_leave = drm_fb_helper_debug_leave,
.fb_mmap = udl_fb_mmap,
.fb_open = udl_fb_open,
.fb_release = udl_fb_release,
diff --git a/drivers/gpu/drm/udl/udl_main.c b/drivers/gpu/drm/udl/udl_main.c
index 29f0207fa677..873f010d9616 100644
--- a/drivers/gpu/drm/udl/udl_main.c
+++ b/drivers/gpu/drm/udl/udl_main.c
@@ -98,17 +98,23 @@ success:
static int udl_select_std_channel(struct udl_device *udl)
{
int ret;
- u8 set_def_chn[] = {0x57, 0xCD, 0xDC, 0xA7,
- 0x1C, 0x88, 0x5E, 0x15,
- 0x60, 0xFE, 0xC6, 0x97,
- 0x16, 0x3D, 0x47, 0xF2};
+ static const u8 set_def_chn[] = {0x57, 0xCD, 0xDC, 0xA7,
+ 0x1C, 0x88, 0x5E, 0x15,
+ 0x60, 0xFE, 0xC6, 0x97,
+ 0x16, 0x3D, 0x47, 0xF2};
+ void *sendbuf;
+
+ sendbuf = kmemdup(set_def_chn, sizeof(set_def_chn), GFP_KERNEL);
+ if (!sendbuf)
+ return -ENOMEM;
ret = usb_control_msg(udl->udev,
usb_sndctrlpipe(udl->udev, 0),
NR_USB_REQUEST_CHANNEL,
(USB_DIR_OUT | USB_TYPE_VENDOR), 0, 0,
- set_def_chn, sizeof(set_def_chn),
+ sendbuf, sizeof(set_def_chn),
USB_CTRL_SET_TIMEOUT);
+ kfree(sendbuf);
return ret < 0 ? ret : 0;
}
diff --git a/drivers/gpu/drm/vc4/vc4_drv.c b/drivers/gpu/drm/vc4/vc4_drv.c
index 8703f56b7947..1dab9e5b3689 100644
--- a/drivers/gpu/drm/vc4/vc4_drv.c
+++ b/drivers/gpu/drm/vc4/vc4_drv.c
@@ -61,23 +61,28 @@ static int vc4_get_param_ioctl(struct drm_device *dev, void *data,
if (ret < 0)
return ret;
args->value = V3D_READ(V3D_IDENT0);
- pm_runtime_put(&vc4->v3d->pdev->dev);
+ pm_runtime_mark_last_busy(&vc4->v3d->pdev->dev);
+ pm_runtime_put_autosuspend(&vc4->v3d->pdev->dev);
break;
case DRM_VC4_PARAM_V3D_IDENT1:
ret = pm_runtime_get_sync(&vc4->v3d->pdev->dev);
if (ret < 0)
return ret;
args->value = V3D_READ(V3D_IDENT1);
- pm_runtime_put(&vc4->v3d->pdev->dev);
+ pm_runtime_mark_last_busy(&vc4->v3d->pdev->dev);
+ pm_runtime_put_autosuspend(&vc4->v3d->pdev->dev);
break;
case DRM_VC4_PARAM_V3D_IDENT2:
ret = pm_runtime_get_sync(&vc4->v3d->pdev->dev);
if (ret < 0)
return ret;
args->value = V3D_READ(V3D_IDENT2);
- pm_runtime_put(&vc4->v3d->pdev->dev);
+ pm_runtime_mark_last_busy(&vc4->v3d->pdev->dev);
+ pm_runtime_put_autosuspend(&vc4->v3d->pdev->dev);
break;
case DRM_VC4_PARAM_SUPPORTS_BRANCHES:
+ case DRM_VC4_PARAM_SUPPORTS_ETC1:
+ case DRM_VC4_PARAM_SUPPORTS_THREADED_FS:
args->value = true;
break;
default:
@@ -103,9 +108,7 @@ static const struct file_operations vc4_drm_fops = {
.mmap = vc4_mmap,
.poll = drm_poll,
.read = drm_read,
-#ifdef CONFIG_COMPAT
.compat_ioctl = drm_compat_ioctl,
-#endif
.llseek = noop_llseek,
};
diff --git a/drivers/gpu/drm/vc4/vc4_drv.h b/drivers/gpu/drm/vc4/vc4_drv.h
index 7c1e4d97486f..fef172804345 100644
--- a/drivers/gpu/drm/vc4/vc4_drv.h
+++ b/drivers/gpu/drm/vc4/vc4_drv.h
@@ -381,6 +381,8 @@ struct vc4_validated_shader_info {
uint32_t num_uniform_addr_offsets;
uint32_t *uniform_addr_offsets;
+
+ bool is_threaded;
};
/**
diff --git a/drivers/gpu/drm/vc4/vc4_gem.c b/drivers/gpu/drm/vc4/vc4_gem.c
index 47a095f392f8..db920771bfb5 100644
--- a/drivers/gpu/drm/vc4/vc4_gem.c
+++ b/drivers/gpu/drm/vc4/vc4_gem.c
@@ -544,14 +544,15 @@ vc4_cl_lookup_bos(struct drm_device *dev,
handles = drm_malloc_ab(exec->bo_count, sizeof(uint32_t));
if (!handles) {
+ ret = -ENOMEM;
DRM_ERROR("Failed to allocate incoming GEM handles\n");
goto fail;
}
- ret = copy_from_user(handles,
- (void __user *)(uintptr_t)args->bo_handles,
- exec->bo_count * sizeof(uint32_t));
- if (ret) {
+ if (copy_from_user(handles,
+ (void __user *)(uintptr_t)args->bo_handles,
+ exec->bo_count * sizeof(uint32_t))) {
+ ret = -EFAULT;
DRM_ERROR("Failed to copy in GEM handles\n");
goto fail;
}
@@ -708,8 +709,10 @@ vc4_complete_exec(struct drm_device *dev, struct vc4_exec_info *exec)
}
mutex_lock(&vc4->power_lock);
- if (--vc4->power_refcount == 0)
- pm_runtime_put(&vc4->v3d->pdev->dev);
+ if (--vc4->power_refcount == 0) {
+ pm_runtime_mark_last_busy(&vc4->v3d->pdev->dev);
+ pm_runtime_put_autosuspend(&vc4->v3d->pdev->dev);
+ }
mutex_unlock(&vc4->power_lock);
kfree(exec);
diff --git a/drivers/gpu/drm/vc4/vc4_kms.c b/drivers/gpu/drm/vc4/vc4_kms.c
index c1f65c6c8e60..f31f72af8551 100644
--- a/drivers/gpu/drm/vc4/vc4_kms.c
+++ b/drivers/gpu/drm/vc4/vc4_kms.c
@@ -61,7 +61,7 @@ vc4_atomic_complete_commit(struct vc4_commit *c)
drm_atomic_helper_cleanup_planes(dev, state);
- drm_atomic_state_free(state);
+ drm_atomic_state_put(state);
up(&vc4->async_modeset);
@@ -173,6 +173,7 @@ static int vc4_atomic_commit(struct drm_device *dev,
* current layout.
*/
+ drm_atomic_state_get(state);
if (nonblock) {
vc4_queue_seqno_cb(dev, &c->cb, wait_seqno,
vc4_atomic_complete_commit_seqno_cb);
diff --git a/drivers/gpu/drm/vc4/vc4_v3d.c b/drivers/gpu/drm/vc4/vc4_v3d.c
index e6d3c6028341..7cc346ad9b0b 100644
--- a/drivers/gpu/drm/vc4/vc4_v3d.c
+++ b/drivers/gpu/drm/vc4/vc4_v3d.c
@@ -222,6 +222,8 @@ static int vc4_v3d_bind(struct device *dev, struct device *master, void *data)
return ret;
}
+ pm_runtime_use_autosuspend(dev);
+ pm_runtime_set_autosuspend_delay(dev, 40); /* a little over 2 frames. */
pm_runtime_enable(dev);
return 0;
diff --git a/drivers/gpu/drm/vc4/vc4_validate.c b/drivers/gpu/drm/vc4/vc4_validate.c
index 26503e307438..9fd171c361c2 100644
--- a/drivers/gpu/drm/vc4/vc4_validate.c
+++ b/drivers/gpu/drm/vc4/vc4_validate.c
@@ -644,6 +644,13 @@ reloc_tex(struct vc4_exec_info *exec,
cpp = 1;
break;
case VC4_TEXTURE_TYPE_ETC1:
+ /* ETC1 is arranged as 64-bit blocks, where each block is 4x4
+ * pixels.
+ */
+ cpp = 8;
+ width = (width + 3) >> 2;
+ height = (height + 3) >> 2;
+ break;
case VC4_TEXTURE_TYPE_BW1:
case VC4_TEXTURE_TYPE_A4:
case VC4_TEXTURE_TYPE_A1:
@@ -782,11 +789,6 @@ validate_gl_shader_rec(struct drm_device *dev,
exec->shader_rec_v += roundup(packet_size, 16);
exec->shader_rec_size -= packet_size;
- if (!(*(uint16_t *)pkt_u & VC4_SHADER_FLAG_FS_SINGLE_THREAD)) {
- DRM_ERROR("Multi-threaded fragment shaders not supported.\n");
- return -EINVAL;
- }
-
for (i = 0; i < shader_reloc_count; i++) {
if (src_handles[i] > exec->bo_count) {
DRM_ERROR("Shader handle %d too big\n", src_handles[i]);
@@ -803,6 +805,18 @@ validate_gl_shader_rec(struct drm_device *dev,
return -EINVAL;
}
+ if (((*(uint16_t *)pkt_u & VC4_SHADER_FLAG_FS_SINGLE_THREAD) == 0) !=
+ to_vc4_bo(&bo[0]->base)->validated_shader->is_threaded) {
+ DRM_ERROR("Thread mode of CL and FS do not match\n");
+ return -EINVAL;
+ }
+
+ if (to_vc4_bo(&bo[1]->base)->validated_shader->is_threaded ||
+ to_vc4_bo(&bo[2]->base)->validated_shader->is_threaded) {
+ DRM_ERROR("cs and vs cannot be threaded\n");
+ return -EINVAL;
+ }
+
for (i = 0; i < shader_reloc_count; i++) {
struct vc4_validated_shader_info *validated_shader;
uint32_t o = shader_reloc_offsets[i];
diff --git a/drivers/gpu/drm/vc4/vc4_validate_shaders.c b/drivers/gpu/drm/vc4/vc4_validate_shaders.c
index 2543cf5b8b51..5dba13dd1e9b 100644
--- a/drivers/gpu/drm/vc4/vc4_validate_shaders.c
+++ b/drivers/gpu/drm/vc4/vc4_validate_shaders.c
@@ -83,6 +83,13 @@ struct vc4_shader_validation_state {
* basic blocks.
*/
bool needs_uniform_address_for_loop;
+
+ /* Set when we find an instruction writing the top half of the
+ * register files. If we allowed writing the unusable regs in
+ * a threaded shader, then the other shader running on our
+ * QPU's clamp validation would be invalid.
+ */
+ bool all_registers_used;
};
static uint32_t
@@ -119,6 +126,13 @@ raddr_add_a_to_live_reg_index(uint64_t inst)
}
static bool
+live_reg_is_upper_half(uint32_t lri)
+{
+ return (lri >= 16 && lri < 32) ||
+ (lri >= 32 + 16 && lri < 32 + 32);
+}
+
+static bool
is_tmu_submit(uint32_t waddr)
{
return (waddr == QPU_W_TMU0_S ||
@@ -390,6 +404,9 @@ check_reg_write(struct vc4_validated_shader_info *validated_shader,
} else {
validation_state->live_immediates[lri] = ~0;
}
+
+ if (live_reg_is_upper_half(lri))
+ validation_state->all_registers_used = true;
}
switch (waddr) {
@@ -598,6 +615,11 @@ check_instruction_reads(struct vc4_validated_shader_info *validated_shader,
}
}
+ if ((raddr_a >= 16 && raddr_a < 32) ||
+ (raddr_b >= 16 && raddr_b < 32 && sig != QPU_SIG_SMALL_IMM)) {
+ validation_state->all_registers_used = true;
+ }
+
return true;
}
@@ -608,9 +630,7 @@ static bool
vc4_validate_branches(struct vc4_shader_validation_state *validation_state)
{
uint32_t max_branch_target = 0;
- bool found_shader_end = false;
int ip;
- int shader_end_ip = 0;
int last_branch = -2;
for (ip = 0; ip < validation_state->max_ip; ip++) {
@@ -621,8 +641,13 @@ vc4_validate_branches(struct vc4_shader_validation_state *validation_state)
uint32_t branch_target_ip;
if (sig == QPU_SIG_PROG_END) {
- shader_end_ip = ip;
- found_shader_end = true;
+ /* There are two delay slots after program end is
+ * signaled that are still executed, then we're
+ * finished. validation_state->max_ip is the
+ * instruction after the last valid instruction in the
+ * program.
+ */
+ validation_state->max_ip = ip + 3;
continue;
}
@@ -676,15 +701,9 @@ vc4_validate_branches(struct vc4_shader_validation_state *validation_state)
}
set_bit(after_delay_ip, validation_state->branch_targets);
max_branch_target = max(max_branch_target, after_delay_ip);
-
- /* There are two delay slots after program end is signaled
- * that are still executed, then we're finished.
- */
- if (found_shader_end && ip == shader_end_ip + 2)
- break;
}
- if (max_branch_target > shader_end_ip) {
+ if (max_branch_target > validation_state->max_ip - 3) {
DRM_ERROR("Branch landed after QPU_SIG_PROG_END");
return false;
}
@@ -756,6 +775,7 @@ vc4_validate_shader(struct drm_gem_cma_object *shader_obj)
{
bool found_shader_end = false;
int shader_end_ip = 0;
+ uint32_t last_thread_switch_ip = -3;
uint32_t ip;
struct vc4_validated_shader_info *validated_shader = NULL;
struct vc4_shader_validation_state validation_state;
@@ -788,6 +808,17 @@ vc4_validate_shader(struct drm_gem_cma_object *shader_obj)
if (!vc4_handle_branch_target(&validation_state))
goto fail;
+ if (ip == last_thread_switch_ip + 3) {
+ /* Reset r0-r3 live clamp data */
+ int i;
+
+ for (i = 64; i < LIVE_REG_COUNT; i++) {
+ validation_state.live_min_clamp_offsets[i] = ~0;
+ validation_state.live_max_clamp_regs[i] = false;
+ validation_state.live_immediates[i] = ~0;
+ }
+ }
+
switch (sig) {
case QPU_SIG_NONE:
case QPU_SIG_WAIT_FOR_SCOREBOARD:
@@ -797,6 +828,8 @@ vc4_validate_shader(struct drm_gem_cma_object *shader_obj)
case QPU_SIG_LOAD_TMU1:
case QPU_SIG_PROG_END:
case QPU_SIG_SMALL_IMM:
+ case QPU_SIG_THREAD_SWITCH:
+ case QPU_SIG_LAST_THREAD_SWITCH:
if (!check_instruction_writes(validated_shader,
&validation_state)) {
DRM_ERROR("Bad write at ip %d\n", ip);
@@ -812,6 +845,18 @@ vc4_validate_shader(struct drm_gem_cma_object *shader_obj)
shader_end_ip = ip;
}
+ if (sig == QPU_SIG_THREAD_SWITCH ||
+ sig == QPU_SIG_LAST_THREAD_SWITCH) {
+ validated_shader->is_threaded = true;
+
+ if (ip < last_thread_switch_ip + 3) {
+ DRM_ERROR("Thread switch too soon after "
+ "last switch at ip %d\n", ip);
+ goto fail;
+ }
+ last_thread_switch_ip = ip;
+ }
+
break;
case QPU_SIG_LOAD_IMM:
@@ -826,6 +871,13 @@ vc4_validate_shader(struct drm_gem_cma_object *shader_obj)
if (!check_branch(inst, validated_shader,
&validation_state, ip))
goto fail;
+
+ if (ip < last_thread_switch_ip + 3) {
+ DRM_ERROR("Branch in thread switch at ip %d",
+ ip);
+ goto fail;
+ }
+
break;
default:
DRM_ERROR("Unsupported QPU signal %d at "
@@ -847,6 +899,14 @@ vc4_validate_shader(struct drm_gem_cma_object *shader_obj)
goto fail;
}
+ /* Might corrupt other thread */
+ if (validated_shader->is_threaded &&
+ validation_state.all_registers_used) {
+ DRM_ERROR("Shader uses threading, but uses the upper "
+ "half of the registers, too\n");
+ goto fail;
+ }
+
/* If we did a backwards branch and we haven't emitted a uniforms
* reset since then, we still need the uniforms stream to have the
* uniforms address available so that the backwards branch can do its
diff --git a/drivers/gpu/drm/vgem/vgem_fence.c b/drivers/gpu/drm/vgem/vgem_fence.c
index 5c57c1ffa1f9..488909a21ed8 100644
--- a/drivers/gpu/drm/vgem/vgem_fence.c
+++ b/drivers/gpu/drm/vgem/vgem_fence.c
@@ -28,56 +28,57 @@
#define VGEM_FENCE_TIMEOUT (10*HZ)
struct vgem_fence {
- struct fence base;
+ struct dma_fence base;
struct spinlock lock;
struct timer_list timer;
};
-static const char *vgem_fence_get_driver_name(struct fence *fence)
+static const char *vgem_fence_get_driver_name(struct dma_fence *fence)
{
return "vgem";
}
-static const char *vgem_fence_get_timeline_name(struct fence *fence)
+static const char *vgem_fence_get_timeline_name(struct dma_fence *fence)
{
return "unbound";
}
-static bool vgem_fence_signaled(struct fence *fence)
+static bool vgem_fence_signaled(struct dma_fence *fence)
{
return false;
}
-static bool vgem_fence_enable_signaling(struct fence *fence)
+static bool vgem_fence_enable_signaling(struct dma_fence *fence)
{
return true;
}
-static void vgem_fence_release(struct fence *base)
+static void vgem_fence_release(struct dma_fence *base)
{
struct vgem_fence *fence = container_of(base, typeof(*fence), base);
del_timer_sync(&fence->timer);
- fence_free(&fence->base);
+ dma_fence_free(&fence->base);
}
-static void vgem_fence_value_str(struct fence *fence, char *str, int size)
+static void vgem_fence_value_str(struct dma_fence *fence, char *str, int size)
{
snprintf(str, size, "%u", fence->seqno);
}
-static void vgem_fence_timeline_value_str(struct fence *fence, char *str,
+static void vgem_fence_timeline_value_str(struct dma_fence *fence, char *str,
int size)
{
- snprintf(str, size, "%u", fence_is_signaled(fence) ? fence->seqno : 0);
+ snprintf(str, size, "%u",
+ dma_fence_is_signaled(fence) ? fence->seqno : 0);
}
-static const struct fence_ops vgem_fence_ops = {
+static const struct dma_fence_ops vgem_fence_ops = {
.get_driver_name = vgem_fence_get_driver_name,
.get_timeline_name = vgem_fence_get_timeline_name,
.enable_signaling = vgem_fence_enable_signaling,
.signaled = vgem_fence_signaled,
- .wait = fence_default_wait,
+ .wait = dma_fence_default_wait,
.release = vgem_fence_release,
.fence_value_str = vgem_fence_value_str,
@@ -88,11 +89,11 @@ static void vgem_fence_timeout(unsigned long data)
{
struct vgem_fence *fence = (struct vgem_fence *)data;
- fence_signal(&fence->base);
+ dma_fence_signal(&fence->base);
}
-static struct fence *vgem_fence_create(struct vgem_file *vfile,
- unsigned int flags)
+static struct dma_fence *vgem_fence_create(struct vgem_file *vfile,
+ unsigned int flags)
{
struct vgem_fence *fence;
@@ -101,8 +102,8 @@ static struct fence *vgem_fence_create(struct vgem_file *vfile,
return NULL;
spin_lock_init(&fence->lock);
- fence_init(&fence->base, &vgem_fence_ops, &fence->lock,
- fence_context_alloc(1), 1);
+ dma_fence_init(&fence->base, &vgem_fence_ops, &fence->lock,
+ dma_fence_context_alloc(1), 1);
setup_timer(&fence->timer, vgem_fence_timeout, (unsigned long)fence);
@@ -157,7 +158,7 @@ int vgem_fence_attach_ioctl(struct drm_device *dev,
struct vgem_file *vfile = file->driver_priv;
struct reservation_object *resv;
struct drm_gem_object *obj;
- struct fence *fence;
+ struct dma_fence *fence;
int ret;
if (arg->flags & ~VGEM_FENCE_WRITE)
@@ -209,8 +210,8 @@ int vgem_fence_attach_ioctl(struct drm_device *dev,
}
err_fence:
if (ret) {
- fence_signal(fence);
- fence_put(fence);
+ dma_fence_signal(fence);
+ dma_fence_put(fence);
}
err:
drm_gem_object_unreference_unlocked(obj);
@@ -239,7 +240,7 @@ int vgem_fence_signal_ioctl(struct drm_device *dev,
{
struct vgem_file *vfile = file->driver_priv;
struct drm_vgem_fence_signal *arg = data;
- struct fence *fence;
+ struct dma_fence *fence;
int ret = 0;
if (arg->flags)
@@ -253,11 +254,11 @@ int vgem_fence_signal_ioctl(struct drm_device *dev,
if (IS_ERR(fence))
return PTR_ERR(fence);
- if (fence_is_signaled(fence))
+ if (dma_fence_is_signaled(fence))
ret = -ETIMEDOUT;
- fence_signal(fence);
- fence_put(fence);
+ dma_fence_signal(fence);
+ dma_fence_put(fence);
return ret;
}
@@ -271,8 +272,8 @@ int vgem_fence_open(struct vgem_file *vfile)
static int __vgem_fence_idr_fini(int id, void *p, void *data)
{
- fence_signal(p);
- fence_put(p);
+ dma_fence_signal(p);
+ dma_fence_put(p);
return 0;
}
diff --git a/drivers/gpu/drm/via/via_drv.c b/drivers/gpu/drm/via/via_drv.c
index e5582bab7e3c..9e0e5392b6ec 100644
--- a/drivers/gpu/drm/via/via_drv.c
+++ b/drivers/gpu/drm/via/via_drv.c
@@ -64,9 +64,7 @@ static const struct file_operations via_driver_fops = {
.unlocked_ioctl = drm_ioctl,
.mmap = drm_legacy_mmap,
.poll = drm_poll,
-#ifdef CONFIG_COMPAT
.compat_ioctl = drm_compat_ioctl,
-#endif
.llseek = noop_llseek,
};
diff --git a/drivers/gpu/drm/virtio/Kconfig b/drivers/gpu/drm/virtio/Kconfig
index e1afc3d3f8d9..81d1807ac228 100644
--- a/drivers/gpu/drm/virtio/Kconfig
+++ b/drivers/gpu/drm/virtio/Kconfig
@@ -1,10 +1,10 @@
config DRM_VIRTIO_GPU
tristate "Virtio GPU driver"
depends on DRM && VIRTIO
- select DRM_KMS_HELPER
- select DRM_TTM
+ select DRM_KMS_HELPER
+ select DRM_TTM
help
This is the virtual GPU driver for virtio. It can be used with
- QEMU based VMMs (like KVM or Xen).
+ QEMU based VMMs (like KVM or Xen).
If unsure say M.
diff --git a/drivers/gpu/drm/virtio/virtgpu_display.c b/drivers/gpu/drm/virtio/virtgpu_display.c
index 7cf3678623c3..58048709c34e 100644
--- a/drivers/gpu/drm/virtio/virtgpu_display.c
+++ b/drivers/gpu/drm/virtio/virtgpu_display.c
@@ -338,8 +338,7 @@ static void vgdev_atomic_commit_tail(struct drm_atomic_state *state)
drm_atomic_helper_commit_modeset_disables(dev, state);
drm_atomic_helper_commit_modeset_enables(dev, state);
- drm_atomic_helper_commit_planes(dev, state,
- DRM_PLANE_COMMIT_ACTIVE_ONLY);
+ drm_atomic_helper_commit_planes(dev, state, 0);
drm_atomic_helper_commit_hw_done(state);
diff --git a/drivers/gpu/drm/virtio/virtgpu_drm_bus.c b/drivers/gpu/drm/virtio/virtgpu_drm_bus.c
index 49e5996cb9f2..3b97d50fd392 100644
--- a/drivers/gpu/drm/virtio/virtgpu_drm_bus.c
+++ b/drivers/gpu/drm/virtio/virtgpu_drm_bus.c
@@ -28,16 +28,6 @@
#include "virtgpu_drv.h"
-int drm_virtio_set_busid(struct drm_device *dev, struct drm_master *master)
-{
- struct pci_dev *pdev = dev->pdev;
-
- if (pdev) {
- return drm_pci_set_busid(dev, master);
- }
- return 0;
-}
-
static void virtio_pci_kick_out_firmware_fb(struct pci_dev *pci_dev)
{
struct apertures_struct *ap;
@@ -71,13 +61,22 @@ int drm_virtio_init(struct drm_driver *driver, struct virtio_device *vdev)
if (strcmp(vdev->dev.parent->bus->name, "pci") == 0) {
struct pci_dev *pdev = to_pci_dev(vdev->dev.parent);
+ const char *pname = dev_name(&pdev->dev);
bool vga = (pdev->class >> 8) == PCI_CLASS_DISPLAY_VGA;
+ char unique[20];
- DRM_INFO("pci: %s detected\n",
- vga ? "virtio-vga" : "virtio-gpu-pci");
+ DRM_INFO("pci: %s detected at %s\n",
+ vga ? "virtio-vga" : "virtio-gpu-pci",
+ pname);
dev->pdev = pdev;
if (vga)
virtio_pci_kick_out_firmware_fb(pdev);
+
+ snprintf(unique, sizeof(unique), "pci:%s", pname);
+ ret = drm_dev_set_unique(dev, unique);
+ if (ret)
+ goto err_free;
+
}
ret = drm_dev_register(dev, 0);
diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.c b/drivers/gpu/drm/virtio/virtgpu_drv.c
index 5820b7020ae5..d82489815096 100644
--- a/drivers/gpu/drm/virtio/virtgpu_drv.c
+++ b/drivers/gpu/drm/virtio/virtgpu_drv.c
@@ -108,16 +108,13 @@ static const struct file_operations virtio_gpu_driver_fops = {
.read = drm_read,
.unlocked_ioctl = drm_ioctl,
.release = drm_release,
-#ifdef CONFIG_COMPAT
.compat_ioctl = drm_compat_ioctl,
-#endif
.llseek = noop_llseek,
};
static struct drm_driver driver = {
.driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_PRIME | DRIVER_RENDER | DRIVER_ATOMIC,
- .set_busid = drm_virtio_set_busid,
.load = virtio_gpu_driver_load,
.unload = virtio_gpu_driver_unload,
.open = virtio_gpu_driver_open,
diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.h b/drivers/gpu/drm/virtio/virtgpu_drv.h
index ae59080d63d1..08906c8ce3fa 100644
--- a/drivers/gpu/drm/virtio/virtgpu_drv.h
+++ b/drivers/gpu/drm/virtio/virtgpu_drv.h
@@ -49,7 +49,6 @@
#define DRIVER_PATCHLEVEL 1
/* virtgpu_drm_bus.c */
-int drm_virtio_set_busid(struct drm_device *dev, struct drm_master *master);
int drm_virtio_init(struct drm_driver *driver, struct virtio_device *vdev);
struct virtio_gpu_object {
@@ -82,7 +81,7 @@ struct virtio_gpu_fence_driver {
};
struct virtio_gpu_fence {
- struct fence f;
+ struct dma_fence f;
struct virtio_gpu_fence_driver *drv;
struct list_head node;
uint64_t seq;
diff --git a/drivers/gpu/drm/virtio/virtgpu_fb.c b/drivers/gpu/drm/virtio/virtgpu_fb.c
index 2242a80866a9..dd21f950e129 100644
--- a/drivers/gpu/drm/virtio/virtgpu_fb.c
+++ b/drivers/gpu/drm/virtio/virtgpu_fb.c
@@ -200,16 +200,10 @@ static void virtio_gpu_3d_imageblit(struct fb_info *info,
static struct fb_ops virtio_gpufb_ops = {
.owner = THIS_MODULE,
- .fb_check_var = drm_fb_helper_check_var,
- .fb_set_par = drm_fb_helper_set_par, /* TODO: copy vmwgfx */
+ DRM_FB_HELPER_DEFAULT_OPS,
.fb_fillrect = virtio_gpu_3d_fillrect,
.fb_copyarea = virtio_gpu_3d_copyarea,
.fb_imageblit = virtio_gpu_3d_imageblit,
- .fb_pan_display = drm_fb_helper_pan_display,
- .fb_blank = drm_fb_helper_blank,
- .fb_setcmap = drm_fb_helper_setcmap,
- .fb_debug_enter = drm_fb_helper_debug_enter,
- .fb_debug_leave = drm_fb_helper_debug_leave,
};
static int virtio_gpu_vmap_fb(struct virtio_gpu_device *vgdev,
diff --git a/drivers/gpu/drm/virtio/virtgpu_fence.c b/drivers/gpu/drm/virtio/virtgpu_fence.c
index f3f70fa8a4c7..23353521f903 100644
--- a/drivers/gpu/drm/virtio/virtgpu_fence.c
+++ b/drivers/gpu/drm/virtio/virtgpu_fence.c
@@ -26,22 +26,22 @@
#include <drm/drmP.h>
#include "virtgpu_drv.h"
-static const char *virtio_get_driver_name(struct fence *f)
+static const char *virtio_get_driver_name(struct dma_fence *f)
{
return "virtio_gpu";
}
-static const char *virtio_get_timeline_name(struct fence *f)
+static const char *virtio_get_timeline_name(struct dma_fence *f)
{
return "controlq";
}
-static bool virtio_enable_signaling(struct fence *f)
+static bool virtio_enable_signaling(struct dma_fence *f)
{
return true;
}
-static bool virtio_signaled(struct fence *f)
+static bool virtio_signaled(struct dma_fence *f)
{
struct virtio_gpu_fence *fence = to_virtio_fence(f);
@@ -50,26 +50,26 @@ static bool virtio_signaled(struct fence *f)
return false;
}
-static void virtio_fence_value_str(struct fence *f, char *str, int size)
+static void virtio_fence_value_str(struct dma_fence *f, char *str, int size)
{
struct virtio_gpu_fence *fence = to_virtio_fence(f);
snprintf(str, size, "%llu", fence->seq);
}
-static void virtio_timeline_value_str(struct fence *f, char *str, int size)
+static void virtio_timeline_value_str(struct dma_fence *f, char *str, int size)
{
struct virtio_gpu_fence *fence = to_virtio_fence(f);
snprintf(str, size, "%llu", (u64)atomic64_read(&fence->drv->last_seq));
}
-static const struct fence_ops virtio_fence_ops = {
+static const struct dma_fence_ops virtio_fence_ops = {
.get_driver_name = virtio_get_driver_name,
.get_timeline_name = virtio_get_timeline_name,
.enable_signaling = virtio_enable_signaling,
.signaled = virtio_signaled,
- .wait = fence_default_wait,
+ .wait = dma_fence_default_wait,
.fence_value_str = virtio_fence_value_str,
.timeline_value_str = virtio_timeline_value_str,
};
@@ -88,9 +88,9 @@ int virtio_gpu_fence_emit(struct virtio_gpu_device *vgdev,
spin_lock_irqsave(&drv->lock, irq_flags);
(*fence)->drv = drv;
(*fence)->seq = ++drv->sync_seq;
- fence_init(&(*fence)->f, &virtio_fence_ops, &drv->lock,
- drv->context, (*fence)->seq);
- fence_get(&(*fence)->f);
+ dma_fence_init(&(*fence)->f, &virtio_fence_ops, &drv->lock,
+ drv->context, (*fence)->seq);
+ dma_fence_get(&(*fence)->f);
list_add_tail(&(*fence)->node, &drv->fences);
spin_unlock_irqrestore(&drv->lock, irq_flags);
@@ -111,9 +111,9 @@ void virtio_gpu_fence_event_process(struct virtio_gpu_device *vgdev,
list_for_each_entry_safe(fence, tmp, &drv->fences, node) {
if (last_seq < fence->seq)
continue;
- fence_signal_locked(&fence->f);
+ dma_fence_signal_locked(&fence->f);
list_del(&fence->node);
- fence_put(&fence->f);
+ dma_fence_put(&fence->f);
}
spin_unlock_irqrestore(&drv->lock, irq_flags);
}
diff --git a/drivers/gpu/drm/virtio/virtgpu_ioctl.c b/drivers/gpu/drm/virtio/virtgpu_ioctl.c
index 818478b4c4f0..61f3a963af95 100644
--- a/drivers/gpu/drm/virtio/virtgpu_ioctl.c
+++ b/drivers/gpu/drm/virtio/virtgpu_ioctl.c
@@ -172,7 +172,7 @@ static int virtio_gpu_execbuffer_ioctl(struct drm_device *dev, void *data,
/* fence the command bo */
virtio_gpu_unref_list(&validate_list);
drm_free_large(buflist);
- fence_put(&fence->f);
+ dma_fence_put(&fence->f);
return 0;
out_unresv:
@@ -298,7 +298,7 @@ static int virtio_gpu_resource_create_ioctl(struct drm_device *dev, void *data,
drm_gem_object_release(obj);
if (vgdev->has_virgl_3d) {
virtio_gpu_unref_list(&validate_list);
- fence_put(&fence->f);
+ dma_fence_put(&fence->f);
}
return ret;
}
@@ -309,13 +309,13 @@ static int virtio_gpu_resource_create_ioctl(struct drm_device *dev, void *data,
if (vgdev->has_virgl_3d) {
virtio_gpu_unref_list(&validate_list);
- fence_put(&fence->f);
+ dma_fence_put(&fence->f);
}
return 0;
fail_unref:
if (vgdev->has_virgl_3d) {
virtio_gpu_unref_list(&validate_list);
- fence_put(&fence->f);
+ dma_fence_put(&fence->f);
}
//fail_obj:
// drm_gem_object_handle_unreference_unlocked(obj);
@@ -383,7 +383,7 @@ static int virtio_gpu_transfer_from_host_ioctl(struct drm_device *dev,
reservation_object_add_excl_fence(qobj->tbo.resv,
&fence->f);
- fence_put(&fence->f);
+ dma_fence_put(&fence->f);
out_unres:
virtio_gpu_object_unreserve(qobj);
out:
@@ -431,7 +431,7 @@ static int virtio_gpu_transfer_to_host_ioctl(struct drm_device *dev, void *data,
args->level, &box, &fence);
reservation_object_add_excl_fence(qobj->tbo.resv,
&fence->f);
- fence_put(&fence->f);
+ dma_fence_put(&fence->f);
}
out_unres:
diff --git a/drivers/gpu/drm/virtio/virtgpu_kms.c b/drivers/gpu/drm/virtio/virtgpu_kms.c
index 036b0fbae0fb..1235519853f4 100644
--- a/drivers/gpu/drm/virtio/virtgpu_kms.c
+++ b/drivers/gpu/drm/virtio/virtgpu_kms.c
@@ -159,7 +159,7 @@ int virtio_gpu_driver_load(struct drm_device *dev, unsigned long flags)
virtio_gpu_init_vq(&vgdev->ctrlq, virtio_gpu_dequeue_ctrl_func);
virtio_gpu_init_vq(&vgdev->cursorq, virtio_gpu_dequeue_cursor_func);
- vgdev->fence_drv.context = fence_context_alloc(1);
+ vgdev->fence_drv.context = dma_fence_context_alloc(1);
spin_lock_init(&vgdev->fence_drv.lock);
INIT_LIST_HEAD(&vgdev->fence_drv.fences);
INIT_LIST_HEAD(&vgdev->cap_cache);
diff --git a/drivers/gpu/drm/virtio/virtgpu_plane.c b/drivers/gpu/drm/virtio/virtgpu_plane.c
index ba28c0f6f28a..cb75f0663ba0 100644
--- a/drivers/gpu/drm/virtio/virtgpu_plane.c
+++ b/drivers/gpu/drm/virtio/virtgpu_plane.c
@@ -152,7 +152,7 @@ static void virtio_gpu_cursor_plane_update(struct drm_plane *plane,
if (!ret) {
reservation_object_add_excl_fence(bo->tbo.resv,
&fence->f);
- fence_put(&fence->f);
+ dma_fence_put(&fence->f);
fence = NULL;
virtio_gpu_object_unreserve(bo);
virtio_gpu_object_wait(bo, false);
diff --git a/drivers/gpu/drm/virtio/virtgpu_ttm.c b/drivers/gpu/drm/virtio/virtgpu_ttm.c
index 80482ac5f95d..4a1de9f81193 100644
--- a/drivers/gpu/drm/virtio/virtgpu_ttm.c
+++ b/drivers/gpu/drm/virtio/virtgpu_ttm.c
@@ -425,6 +425,7 @@ static struct ttm_bo_driver virtio_gpu_bo_driver = {
.ttm_tt_unpopulate = &virtio_gpu_ttm_tt_unpopulate,
.invalidate_caches = &virtio_gpu_invalidate_caches,
.init_mem_type = &virtio_gpu_init_mem_type,
+ .eviction_valuable = ttm_bo_eviction_valuable,
.evict_flags = &virtio_gpu_evict_flags,
.move = &virtio_gpu_bo_move,
.verify_access = &virtio_gpu_verify_access,
diff --git a/drivers/gpu/drm/virtio/virtgpu_vq.c b/drivers/gpu/drm/virtio/virtgpu_vq.c
index 5a0f8a745b9d..974f9410474b 100644
--- a/drivers/gpu/drm/virtio/virtgpu_vq.c
+++ b/drivers/gpu/drm/virtio/virtgpu_vq.c
@@ -75,7 +75,7 @@ void virtio_gpu_cursor_ack(struct virtqueue *vq)
int virtio_gpu_alloc_vbufs(struct virtio_gpu_device *vgdev)
{
struct virtio_gpu_vbuffer *vbuf;
- int i, size, count = 0;
+ int i, size, count = 16;
void *ptr;
INIT_LIST_HEAD(&vgdev->free_vbufs);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c b/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c
index 78b75ee3c931..c894a48a74a6 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c
@@ -849,6 +849,7 @@ struct ttm_bo_driver vmw_bo_driver = {
.ttm_tt_unpopulate = &vmw_ttm_unpopulate,
.invalidate_caches = vmw_invalidate_caches,
.init_mem_type = vmw_init_mem_type,
+ .eviction_valuable = ttm_bo_eviction_valuable,
.evict_flags = vmw_evict_flags,
.move = NULL,
.verify_access = vmw_verify_access,
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
index 26ac8e80a478..6541dd8b82dc 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
@@ -108,7 +108,7 @@ fman_from_fence(struct vmw_fence_obj *fence)
* objects with actions attached to them.
*/
-static void vmw_fence_obj_destroy(struct fence *f)
+static void vmw_fence_obj_destroy(struct dma_fence *f)
{
struct vmw_fence_obj *fence =
container_of(f, struct vmw_fence_obj, base);
@@ -123,17 +123,17 @@ static void vmw_fence_obj_destroy(struct fence *f)
fence->destroy(fence);
}
-static const char *vmw_fence_get_driver_name(struct fence *f)
+static const char *vmw_fence_get_driver_name(struct dma_fence *f)
{
return "vmwgfx";
}
-static const char *vmw_fence_get_timeline_name(struct fence *f)
+static const char *vmw_fence_get_timeline_name(struct dma_fence *f)
{
return "svga";
}
-static bool vmw_fence_enable_signaling(struct fence *f)
+static bool vmw_fence_enable_signaling(struct dma_fence *f)
{
struct vmw_fence_obj *fence =
container_of(f, struct vmw_fence_obj, base);
@@ -152,12 +152,12 @@ static bool vmw_fence_enable_signaling(struct fence *f)
}
struct vmwgfx_wait_cb {
- struct fence_cb base;
+ struct dma_fence_cb base;
struct task_struct *task;
};
static void
-vmwgfx_wait_cb(struct fence *fence, struct fence_cb *cb)
+vmwgfx_wait_cb(struct dma_fence *fence, struct dma_fence_cb *cb)
{
struct vmwgfx_wait_cb *wait =
container_of(cb, struct vmwgfx_wait_cb, base);
@@ -167,7 +167,7 @@ vmwgfx_wait_cb(struct fence *fence, struct fence_cb *cb)
static void __vmw_fences_update(struct vmw_fence_manager *fman);
-static long vmw_fence_wait(struct fence *f, bool intr, signed long timeout)
+static long vmw_fence_wait(struct dma_fence *f, bool intr, signed long timeout)
{
struct vmw_fence_obj *fence =
container_of(f, struct vmw_fence_obj, base);
@@ -197,7 +197,7 @@ static long vmw_fence_wait(struct fence *f, bool intr, signed long timeout)
while (ret > 0) {
__vmw_fences_update(fman);
- if (test_bit(FENCE_FLAG_SIGNALED_BIT, &f->flags))
+ if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &f->flags))
break;
if (intr)
@@ -225,7 +225,7 @@ out:
return ret;
}
-static struct fence_ops vmw_fence_ops = {
+static struct dma_fence_ops vmw_fence_ops = {
.get_driver_name = vmw_fence_get_driver_name,
.get_timeline_name = vmw_fence_get_timeline_name,
.enable_signaling = vmw_fence_enable_signaling,
@@ -298,7 +298,7 @@ struct vmw_fence_manager *vmw_fence_manager_init(struct vmw_private *dev_priv)
fman->event_fence_action_size =
ttm_round_pot(sizeof(struct vmw_event_fence_action));
mutex_init(&fman->goal_irq_mutex);
- fman->ctx = fence_context_alloc(1);
+ fman->ctx = dma_fence_context_alloc(1);
return fman;
}
@@ -326,8 +326,8 @@ static int vmw_fence_obj_init(struct vmw_fence_manager *fman,
unsigned long irq_flags;
int ret = 0;
- fence_init(&fence->base, &vmw_fence_ops, &fman->lock,
- fman->ctx, seqno);
+ dma_fence_init(&fence->base, &vmw_fence_ops, &fman->lock,
+ fman->ctx, seqno);
INIT_LIST_HEAD(&fence->seq_passed_actions);
fence->destroy = destroy;
@@ -431,7 +431,7 @@ static bool vmw_fence_goal_check_locked(struct vmw_fence_obj *fence)
u32 goal_seqno;
u32 *fifo_mem;
- if (fence_is_signaled_locked(&fence->base))
+ if (dma_fence_is_signaled_locked(&fence->base))
return false;
fifo_mem = fman->dev_priv->mmio_virt;
@@ -459,7 +459,7 @@ rerun:
list_for_each_entry_safe(fence, next_fence, &fman->fence_list, head) {
if (seqno - fence->base.seqno < VMW_FENCE_WRAP) {
list_del_init(&fence->head);
- fence_signal_locked(&fence->base);
+ dma_fence_signal_locked(&fence->base);
INIT_LIST_HEAD(&action_list);
list_splice_init(&fence->seq_passed_actions,
&action_list);
@@ -500,18 +500,18 @@ bool vmw_fence_obj_signaled(struct vmw_fence_obj *fence)
{
struct vmw_fence_manager *fman = fman_from_fence(fence);
- if (test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->base.flags))
+ if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->base.flags))
return 1;
vmw_fences_update(fman);
- return fence_is_signaled(&fence->base);
+ return dma_fence_is_signaled(&fence->base);
}
int vmw_fence_obj_wait(struct vmw_fence_obj *fence, bool lazy,
bool interruptible, unsigned long timeout)
{
- long ret = fence_wait_timeout(&fence->base, interruptible, timeout);
+ long ret = dma_fence_wait_timeout(&fence->base, interruptible, timeout);
if (likely(ret > 0))
return 0;
@@ -530,7 +530,7 @@ void vmw_fence_obj_flush(struct vmw_fence_obj *fence)
static void vmw_fence_destroy(struct vmw_fence_obj *fence)
{
- fence_free(&fence->base);
+ dma_fence_free(&fence->base);
}
int vmw_fence_create(struct vmw_fence_manager *fman,
@@ -669,7 +669,7 @@ void vmw_fence_fifo_down(struct vmw_fence_manager *fman)
struct vmw_fence_obj *fence =
list_entry(fman->fence_list.prev, struct vmw_fence_obj,
head);
- fence_get(&fence->base);
+ dma_fence_get(&fence->base);
spin_unlock_irq(&fman->lock);
ret = vmw_fence_obj_wait(fence, false, false,
@@ -677,7 +677,7 @@ void vmw_fence_fifo_down(struct vmw_fence_manager *fman)
if (unlikely(ret != 0)) {
list_del_init(&fence->head);
- fence_signal(&fence->base);
+ dma_fence_signal(&fence->base);
INIT_LIST_HEAD(&action_list);
list_splice_init(&fence->seq_passed_actions,
&action_list);
@@ -685,7 +685,7 @@ void vmw_fence_fifo_down(struct vmw_fence_manager *fman)
}
BUG_ON(!list_empty(&fence->head));
- fence_put(&fence->base);
+ dma_fence_put(&fence->base);
spin_lock_irq(&fman->lock);
}
spin_unlock_irq(&fman->lock);
@@ -884,7 +884,7 @@ static void vmw_fence_obj_add_action(struct vmw_fence_obj *fence,
spin_lock_irqsave(&fman->lock, irq_flags);
fman->pending_actions[action->type]++;
- if (fence_is_signaled_locked(&fence->base)) {
+ if (dma_fence_is_signaled_locked(&fence->base)) {
struct list_head action_list;
INIT_LIST_HEAD(&action_list);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.h b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.h
index 83ae301ee141..d9d85aa6ed20 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.h
@@ -27,7 +27,7 @@
#ifndef _VMWGFX_FENCE_H_
-#include <linux/fence.h>
+#include <linux/dma-fence.h>
#define VMW_FENCE_WAIT_TIMEOUT (5*HZ)
@@ -52,7 +52,7 @@ struct vmw_fence_action {
};
struct vmw_fence_obj {
- struct fence base;
+ struct dma_fence base;
struct list_head head;
struct list_head seq_passed_actions;
@@ -71,14 +71,14 @@ vmw_fence_obj_unreference(struct vmw_fence_obj **fence_p)
*fence_p = NULL;
if (fence)
- fence_put(&fence->base);
+ dma_fence_put(&fence->base);
}
static inline struct vmw_fence_obj *
vmw_fence_obj_reference(struct vmw_fence_obj *fence)
{
if (fence)
- fence_get(&fence->base);
+ dma_fence_get(&fence->base);
return fence;
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
index bf28ccc150df..e3f68cc9bb4b 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
@@ -980,14 +980,23 @@ static struct drm_framebuffer *vmw_kms_fb_create(struct drm_device *dev,
struct vmw_dma_buffer *bo = NULL;
struct ttm_base_object *user_obj;
struct drm_mode_fb_cmd mode_cmd;
+ const struct drm_format_info *info;
int ret;
+ info = drm_format_info(mode_cmd2->pixel_format);
+ if (!info || !info->depth) {
+ struct drm_format_name_buf format_name;
+ DRM_ERROR("Unsupported framebuffer format %s\n",
+ drm_get_format_name(mode_cmd2->pixel_format, &format_name));
+ return ERR_PTR(-EINVAL);
+ }
+
mode_cmd.width = mode_cmd2->width;
mode_cmd.height = mode_cmd2->height;
mode_cmd.pitch = mode_cmd2->pitches[0];
mode_cmd.handle = mode_cmd2->handles[0];
- drm_fb_get_bpp_depth(mode_cmd2->pixel_format, &mode_cmd.depth,
- &mode_cmd.bpp);
+ mode_cmd.depth = info->depth;
+ mode_cmd.bpp = info->cpp[0] * 8;
/**
* This code should be conditioned on Screen Objects not being used.
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
index 52ca1c9d070e..8e86d6d4141b 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
@@ -575,7 +575,7 @@ static int vmw_user_dmabuf_synccpu_grab(struct vmw_user_dma_buffer *user_bo,
long lret;
lret = reservation_object_wait_timeout_rcu(bo->resv, true, true,
- nonblock ? 0 : MAX_SCHEDULE_TIMEOUT);
+ nonblock ? 0 : MAX_SCHEDULE_TIMEOUT);
if (!lret)
return -EBUSY;
else if (lret < 0)
@@ -1454,7 +1454,7 @@ void vmw_fence_single_bo(struct ttm_buffer_object *bo,
if (fence == NULL) {
vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL);
reservation_object_add_excl_fence(bo->resv, &fence->base);
- fence_put(&fence->base);
+ dma_fence_put(&fence->base);
} else
reservation_object_add_excl_fence(bo->resv, &fence->base);
}
diff --git a/drivers/gpu/drm/zte/Kconfig b/drivers/gpu/drm/zte/Kconfig
new file mode 100644
index 000000000000..4065b2840f1c
--- /dev/null
+++ b/drivers/gpu/drm/zte/Kconfig
@@ -0,0 +1,8 @@
+config DRM_ZTE
+ tristate "DRM Support for ZTE SoCs"
+ depends on DRM && ARCH_ZX
+ select DRM_KMS_CMA_HELPER
+ select DRM_KMS_FB_HELPER
+ select DRM_KMS_HELPER
+ help
+ Choose this option to enable DRM on ZTE ZX SoCs.
diff --git a/drivers/gpu/drm/zte/Makefile b/drivers/gpu/drm/zte/Makefile
new file mode 100644
index 000000000000..699180bfd57c
--- /dev/null
+++ b/drivers/gpu/drm/zte/Makefile
@@ -0,0 +1,7 @@
+zxdrm-y := \
+ zx_drm_drv.o \
+ zx_hdmi.o \
+ zx_plane.o \
+ zx_vou.o
+
+obj-$(CONFIG_DRM_ZTE) += zxdrm.o
diff --git a/drivers/gpu/drm/zte/zx_drm_drv.c b/drivers/gpu/drm/zte/zx_drm_drv.c
new file mode 100644
index 000000000000..3e76f72c92ff
--- /dev/null
+++ b/drivers/gpu/drm/zte/zx_drm_drv.c
@@ -0,0 +1,267 @@
+/*
+ * Copyright 2016 Linaro Ltd.
+ * Copyright 2016 ZTE Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include <linux/clk.h>
+#include <linux/component.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/of_graph.h>
+#include <linux/of_platform.h>
+#include <linux/spinlock.h>
+
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_fb_helper.h>
+#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_of.h>
+#include <drm/drmP.h>
+
+#include "zx_drm_drv.h"
+#include "zx_vou.h"
+
+struct zx_drm_private {
+ struct drm_fbdev_cma *fbdev;
+};
+
+static void zx_drm_fb_output_poll_changed(struct drm_device *drm)
+{
+ struct zx_drm_private *priv = drm->dev_private;
+
+ drm_fbdev_cma_hotplug_event(priv->fbdev);
+}
+
+static const struct drm_mode_config_funcs zx_drm_mode_config_funcs = {
+ .fb_create = drm_fb_cma_create,
+ .output_poll_changed = zx_drm_fb_output_poll_changed,
+ .atomic_check = drm_atomic_helper_check,
+ .atomic_commit = drm_atomic_helper_commit,
+};
+
+static void zx_drm_lastclose(struct drm_device *drm)
+{
+ struct zx_drm_private *priv = drm->dev_private;
+
+ drm_fbdev_cma_restore_mode(priv->fbdev);
+}
+
+static const struct file_operations zx_drm_fops = {
+ .owner = THIS_MODULE,
+ .open = drm_open,
+ .release = drm_release,
+ .unlocked_ioctl = drm_ioctl,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = drm_compat_ioctl,
+#endif
+ .poll = drm_poll,
+ .read = drm_read,
+ .llseek = noop_llseek,
+ .mmap = drm_gem_cma_mmap,
+};
+
+static struct drm_driver zx_drm_driver = {
+ .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_PRIME |
+ DRIVER_ATOMIC,
+ .lastclose = zx_drm_lastclose,
+ .get_vblank_counter = drm_vblank_no_hw_counter,
+ .enable_vblank = zx_vou_enable_vblank,
+ .disable_vblank = zx_vou_disable_vblank,
+ .gem_free_object = drm_gem_cma_free_object,
+ .gem_vm_ops = &drm_gem_cma_vm_ops,
+ .dumb_create = drm_gem_cma_dumb_create,
+ .dumb_map_offset = drm_gem_cma_dumb_map_offset,
+ .dumb_destroy = drm_gem_dumb_destroy,
+ .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
+ .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
+ .gem_prime_export = drm_gem_prime_export,
+ .gem_prime_import = drm_gem_prime_import,
+ .gem_prime_get_sg_table = drm_gem_cma_prime_get_sg_table,
+ .gem_prime_import_sg_table = drm_gem_cma_prime_import_sg_table,
+ .gem_prime_vmap = drm_gem_cma_prime_vmap,
+ .gem_prime_vunmap = drm_gem_cma_prime_vunmap,
+ .gem_prime_mmap = drm_gem_cma_prime_mmap,
+ .fops = &zx_drm_fops,
+ .name = "zx-vou",
+ .desc = "ZTE VOU Controller DRM",
+ .date = "20160811",
+ .major = 1,
+ .minor = 0,
+};
+
+static int zx_drm_bind(struct device *dev)
+{
+ struct drm_device *drm;
+ struct zx_drm_private *priv;
+ int ret;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ drm = drm_dev_alloc(&zx_drm_driver, dev);
+ if (IS_ERR(drm))
+ return PTR_ERR(drm);
+
+ drm->dev_private = priv;
+ dev_set_drvdata(dev, drm);
+
+ drm_mode_config_init(drm);
+ drm->mode_config.min_width = 16;
+ drm->mode_config.min_height = 16;
+ drm->mode_config.max_width = 4096;
+ drm->mode_config.max_height = 4096;
+ drm->mode_config.funcs = &zx_drm_mode_config_funcs;
+
+ ret = component_bind_all(dev, drm);
+ if (ret) {
+ DRM_DEV_ERROR(dev, "failed to bind all components: %d\n", ret);
+ goto out_unregister;
+ }
+
+ ret = drm_vblank_init(drm, drm->mode_config.num_crtc);
+ if (ret < 0) {
+ DRM_DEV_ERROR(dev, "failed to init vblank: %d\n", ret);
+ goto out_unbind;
+ }
+
+ /*
+ * We will manage irq handler on our own. In this case, irq_enabled
+ * need to be true for using vblank core support.
+ */
+ drm->irq_enabled = true;
+
+ drm_mode_config_reset(drm);
+ drm_kms_helper_poll_init(drm);
+
+ priv->fbdev = drm_fbdev_cma_init(drm, 32, drm->mode_config.num_crtc,
+ drm->mode_config.num_connector);
+ if (IS_ERR(priv->fbdev)) {
+ ret = PTR_ERR(priv->fbdev);
+ DRM_DEV_ERROR(dev, "failed to init cma fbdev: %d\n", ret);
+ priv->fbdev = NULL;
+ goto out_poll_fini;
+ }
+
+ ret = drm_dev_register(drm, 0);
+ if (ret)
+ goto out_fbdev_fini;
+
+ return 0;
+
+out_fbdev_fini:
+ if (priv->fbdev) {
+ drm_fbdev_cma_fini(priv->fbdev);
+ priv->fbdev = NULL;
+ }
+out_poll_fini:
+ drm_kms_helper_poll_fini(drm);
+ drm_mode_config_cleanup(drm);
+ drm_vblank_cleanup(drm);
+out_unbind:
+ component_unbind_all(dev, drm);
+out_unregister:
+ dev_set_drvdata(dev, NULL);
+ drm->dev_private = NULL;
+ drm_dev_unref(drm);
+ return ret;
+}
+
+static void zx_drm_unbind(struct device *dev)
+{
+ struct drm_device *drm = dev_get_drvdata(dev);
+ struct zx_drm_private *priv = drm->dev_private;
+
+ drm_dev_unregister(drm);
+ if (priv->fbdev) {
+ drm_fbdev_cma_fini(priv->fbdev);
+ priv->fbdev = NULL;
+ }
+ drm_kms_helper_poll_fini(drm);
+ drm_mode_config_cleanup(drm);
+ drm_vblank_cleanup(drm);
+ component_unbind_all(dev, drm);
+ dev_set_drvdata(dev, NULL);
+ drm->dev_private = NULL;
+ drm_dev_unref(drm);
+}
+
+static const struct component_master_ops zx_drm_master_ops = {
+ .bind = zx_drm_bind,
+ .unbind = zx_drm_unbind,
+};
+
+static int compare_of(struct device *dev, void *data)
+{
+ return dev->of_node == data;
+}
+
+static int zx_drm_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *parent = dev->of_node;
+ struct device_node *child;
+ struct component_match *match = NULL;
+ int ret;
+
+ ret = of_platform_populate(parent, NULL, NULL, dev);
+ if (ret)
+ return ret;
+
+ for_each_available_child_of_node(parent, child) {
+ component_match_add(dev, &match, compare_of, child);
+ of_node_put(child);
+ }
+
+ return component_master_add_with_match(dev, &zx_drm_master_ops, match);
+}
+
+static int zx_drm_remove(struct platform_device *pdev)
+{
+ component_master_del(&pdev->dev, &zx_drm_master_ops);
+ return 0;
+}
+
+static const struct of_device_id zx_drm_of_match[] = {
+ { .compatible = "zte,zx296718-vou", },
+ { /* end */ },
+};
+MODULE_DEVICE_TABLE(of, zx_drm_of_match);
+
+static struct platform_driver zx_drm_platform_driver = {
+ .probe = zx_drm_probe,
+ .remove = zx_drm_remove,
+ .driver = {
+ .name = "zx-drm",
+ .of_match_table = zx_drm_of_match,
+ },
+};
+
+static struct platform_driver *drivers[] = {
+ &zx_crtc_driver,
+ &zx_hdmi_driver,
+ &zx_drm_platform_driver,
+};
+
+static int zx_drm_init(void)
+{
+ return platform_register_drivers(drivers, ARRAY_SIZE(drivers));
+}
+module_init(zx_drm_init);
+
+static void zx_drm_exit(void)
+{
+ platform_unregister_drivers(drivers, ARRAY_SIZE(drivers));
+}
+module_exit(zx_drm_exit);
+
+MODULE_AUTHOR("Shawn Guo <shawn.guo@linaro.org>");
+MODULE_DESCRIPTION("ZTE ZX VOU DRM driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/zte/zx_drm_drv.h b/drivers/gpu/drm/zte/zx_drm_drv.h
new file mode 100644
index 000000000000..e65cd18a6cba
--- /dev/null
+++ b/drivers/gpu/drm/zte/zx_drm_drv.h
@@ -0,0 +1,36 @@
+/*
+ * Copyright 2016 Linaro Ltd.
+ * Copyright 2016 ZTE Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#ifndef __ZX_DRM_DRV_H__
+#define __ZX_DRM_DRV_H__
+
+extern struct platform_driver zx_crtc_driver;
+extern struct platform_driver zx_hdmi_driver;
+
+static inline u32 zx_readl(void __iomem *reg)
+{
+ return readl_relaxed(reg);
+}
+
+static inline void zx_writel(void __iomem *reg, u32 val)
+{
+ writel_relaxed(val, reg);
+}
+
+static inline void zx_writel_mask(void __iomem *reg, u32 mask, u32 val)
+{
+ u32 tmp;
+
+ tmp = zx_readl(reg);
+ tmp = (tmp & ~mask) | (val & mask);
+ zx_writel(reg, tmp);
+}
+
+#endif /* __ZX_DRM_DRV_H__ */
diff --git a/drivers/gpu/drm/zte/zx_hdmi.c b/drivers/gpu/drm/zte/zx_hdmi.c
new file mode 100644
index 000000000000..6bf6c364811e
--- /dev/null
+++ b/drivers/gpu/drm/zte/zx_hdmi.c
@@ -0,0 +1,624 @@
+/*
+ * Copyright 2016 Linaro Ltd.
+ * Copyright 2016 ZTE Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include <linux/clk.h>
+#include <linux/component.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/hdmi.h>
+#include <linux/irq.h>
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/of_device.h>
+
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_edid.h>
+#include <drm/drm_of.h>
+#include <drm/drmP.h>
+
+#include "zx_hdmi_regs.h"
+#include "zx_vou.h"
+
+#define ZX_HDMI_INFOFRAME_SIZE 31
+#define DDC_SEGMENT_ADDR 0x30
+
+struct zx_hdmi_i2c {
+ struct i2c_adapter adap;
+ struct mutex lock;
+};
+
+struct zx_hdmi {
+ struct drm_connector connector;
+ struct drm_encoder encoder;
+ struct zx_hdmi_i2c *ddc;
+ struct device *dev;
+ struct drm_device *drm;
+ void __iomem *mmio;
+ struct clk *cec_clk;
+ struct clk *osc_clk;
+ struct clk *xclk;
+ bool sink_is_hdmi;
+ bool sink_has_audio;
+ const struct vou_inf *inf;
+};
+
+#define to_zx_hdmi(x) container_of(x, struct zx_hdmi, x)
+
+static const struct vou_inf vou_inf_hdmi = {
+ .id = VOU_HDMI,
+ .data_sel = VOU_YUV444,
+ .clocks_en_bits = BIT(24) | BIT(18) | BIT(6),
+ .clocks_sel_bits = BIT(13) | BIT(2),
+};
+
+static inline u8 hdmi_readb(struct zx_hdmi *hdmi, u16 offset)
+{
+ return readl_relaxed(hdmi->mmio + offset * 4);
+}
+
+static inline void hdmi_writeb(struct zx_hdmi *hdmi, u16 offset, u8 val)
+{
+ writel_relaxed(val, hdmi->mmio + offset * 4);
+}
+
+static inline void hdmi_writeb_mask(struct zx_hdmi *hdmi, u16 offset,
+ u8 mask, u8 val)
+{
+ u8 tmp;
+
+ tmp = hdmi_readb(hdmi, offset);
+ tmp = (tmp & ~mask) | (val & mask);
+ hdmi_writeb(hdmi, offset, tmp);
+}
+
+static int zx_hdmi_infoframe_trans(struct zx_hdmi *hdmi,
+ union hdmi_infoframe *frame, u8 fsel)
+{
+ u8 buffer[ZX_HDMI_INFOFRAME_SIZE];
+ int num;
+ int i;
+
+ hdmi_writeb(hdmi, TPI_INFO_FSEL, fsel);
+
+ num = hdmi_infoframe_pack(frame, buffer, ZX_HDMI_INFOFRAME_SIZE);
+ if (num < 0) {
+ DRM_DEV_ERROR(hdmi->dev, "failed to pack infoframe: %d\n", num);
+ return num;
+ }
+
+ for (i = 0; i < num; i++)
+ hdmi_writeb(hdmi, TPI_INFO_B0 + i, buffer[i]);
+
+ hdmi_writeb_mask(hdmi, TPI_INFO_EN, TPI_INFO_TRANS_RPT,
+ TPI_INFO_TRANS_RPT);
+ hdmi_writeb_mask(hdmi, TPI_INFO_EN, TPI_INFO_TRANS_EN,
+ TPI_INFO_TRANS_EN);
+
+ return num;
+}
+
+static int zx_hdmi_config_video_vsi(struct zx_hdmi *hdmi,
+ struct drm_display_mode *mode)
+{
+ union hdmi_infoframe frame;
+ int ret;
+
+ ret = drm_hdmi_vendor_infoframe_from_display_mode(&frame.vendor.hdmi,
+ mode);
+ if (ret) {
+ DRM_DEV_ERROR(hdmi->dev, "failed to get vendor infoframe: %d\n",
+ ret);
+ return ret;
+ }
+
+ return zx_hdmi_infoframe_trans(hdmi, &frame, FSEL_VSIF);
+}
+
+static int zx_hdmi_config_video_avi(struct zx_hdmi *hdmi,
+ struct drm_display_mode *mode)
+{
+ union hdmi_infoframe frame;
+ int ret;
+
+ ret = drm_hdmi_avi_infoframe_from_display_mode(&frame.avi, mode);
+ if (ret) {
+ DRM_DEV_ERROR(hdmi->dev, "failed to get avi infoframe: %d\n",
+ ret);
+ return ret;
+ }
+
+ /* We always use YUV444 for HDMI output. */
+ frame.avi.colorspace = HDMI_COLORSPACE_YUV444;
+
+ return zx_hdmi_infoframe_trans(hdmi, &frame, FSEL_AVI);
+}
+
+static void zx_hdmi_encoder_mode_set(struct drm_encoder *encoder,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adj_mode)
+{
+ struct zx_hdmi *hdmi = to_zx_hdmi(encoder);
+
+ if (hdmi->sink_is_hdmi) {
+ zx_hdmi_config_video_avi(hdmi, mode);
+ zx_hdmi_config_video_vsi(hdmi, mode);
+ }
+}
+
+static void zx_hdmi_phy_start(struct zx_hdmi *hdmi)
+{
+ /* Copy from ZTE BSP code */
+ hdmi_writeb(hdmi, 0x222, 0x0);
+ hdmi_writeb(hdmi, 0x224, 0x4);
+ hdmi_writeb(hdmi, 0x909, 0x0);
+ hdmi_writeb(hdmi, 0x7b0, 0x90);
+ hdmi_writeb(hdmi, 0x7b1, 0x00);
+ hdmi_writeb(hdmi, 0x7b2, 0xa7);
+ hdmi_writeb(hdmi, 0x7b8, 0xaa);
+ hdmi_writeb(hdmi, 0x7b2, 0xa7);
+ hdmi_writeb(hdmi, 0x7b3, 0x0f);
+ hdmi_writeb(hdmi, 0x7b4, 0x0f);
+ hdmi_writeb(hdmi, 0x7b5, 0x55);
+ hdmi_writeb(hdmi, 0x7b7, 0x03);
+ hdmi_writeb(hdmi, 0x7b9, 0x12);
+ hdmi_writeb(hdmi, 0x7ba, 0x32);
+ hdmi_writeb(hdmi, 0x7bc, 0x68);
+ hdmi_writeb(hdmi, 0x7be, 0x40);
+ hdmi_writeb(hdmi, 0x7bf, 0x84);
+ hdmi_writeb(hdmi, 0x7c1, 0x0f);
+ hdmi_writeb(hdmi, 0x7c8, 0x02);
+ hdmi_writeb(hdmi, 0x7c9, 0x03);
+ hdmi_writeb(hdmi, 0x7ca, 0x40);
+ hdmi_writeb(hdmi, 0x7dc, 0x31);
+ hdmi_writeb(hdmi, 0x7e2, 0x04);
+ hdmi_writeb(hdmi, 0x7e0, 0x06);
+ hdmi_writeb(hdmi, 0x7cb, 0x68);
+ hdmi_writeb(hdmi, 0x7f9, 0x02);
+ hdmi_writeb(hdmi, 0x7b6, 0x02);
+ hdmi_writeb(hdmi, 0x7f3, 0x0);
+}
+
+static void zx_hdmi_hw_enable(struct zx_hdmi *hdmi)
+{
+ /* Enable pclk */
+ hdmi_writeb_mask(hdmi, CLKPWD, CLKPWD_PDIDCK, CLKPWD_PDIDCK);
+
+ /* Enable HDMI for TX */
+ hdmi_writeb_mask(hdmi, FUNC_SEL, FUNC_HDMI_EN, FUNC_HDMI_EN);
+
+ /* Enable deep color packet */
+ hdmi_writeb_mask(hdmi, P2T_CTRL, P2T_DC_PKT_EN, P2T_DC_PKT_EN);
+
+ /* Enable HDMI/MHL mode for output */
+ hdmi_writeb_mask(hdmi, TEST_TXCTRL, TEST_TXCTRL_HDMI_MODE,
+ TEST_TXCTRL_HDMI_MODE);
+
+ /* Configure reg_qc_sel */
+ hdmi_writeb(hdmi, HDMICTL4, 0x3);
+
+ /* Enable interrupt */
+ hdmi_writeb_mask(hdmi, INTR1_MASK, INTR1_MONITOR_DETECT,
+ INTR1_MONITOR_DETECT);
+
+ /* Start up phy */
+ zx_hdmi_phy_start(hdmi);
+}
+
+static void zx_hdmi_hw_disable(struct zx_hdmi *hdmi)
+{
+ /* Disable interrupt */
+ hdmi_writeb_mask(hdmi, INTR1_MASK, INTR1_MONITOR_DETECT, 0);
+
+ /* Disable deep color packet */
+ hdmi_writeb_mask(hdmi, P2T_CTRL, P2T_DC_PKT_EN, P2T_DC_PKT_EN);
+
+ /* Disable HDMI for TX */
+ hdmi_writeb_mask(hdmi, FUNC_SEL, FUNC_HDMI_EN, 0);
+
+ /* Disable pclk */
+ hdmi_writeb_mask(hdmi, CLKPWD, CLKPWD_PDIDCK, 0);
+}
+
+static void zx_hdmi_encoder_enable(struct drm_encoder *encoder)
+{
+ struct zx_hdmi *hdmi = to_zx_hdmi(encoder);
+
+ clk_prepare_enable(hdmi->cec_clk);
+ clk_prepare_enable(hdmi->osc_clk);
+ clk_prepare_enable(hdmi->xclk);
+
+ zx_hdmi_hw_enable(hdmi);
+
+ vou_inf_enable(hdmi->inf, encoder->crtc);
+}
+
+static void zx_hdmi_encoder_disable(struct drm_encoder *encoder)
+{
+ struct zx_hdmi *hdmi = to_zx_hdmi(encoder);
+
+ vou_inf_disable(hdmi->inf, encoder->crtc);
+
+ zx_hdmi_hw_disable(hdmi);
+
+ clk_disable_unprepare(hdmi->xclk);
+ clk_disable_unprepare(hdmi->osc_clk);
+ clk_disable_unprepare(hdmi->cec_clk);
+}
+
+static const struct drm_encoder_helper_funcs zx_hdmi_encoder_helper_funcs = {
+ .enable = zx_hdmi_encoder_enable,
+ .disable = zx_hdmi_encoder_disable,
+ .mode_set = zx_hdmi_encoder_mode_set,
+};
+
+static const struct drm_encoder_funcs zx_hdmi_encoder_funcs = {
+ .destroy = drm_encoder_cleanup,
+};
+
+static int zx_hdmi_connector_get_modes(struct drm_connector *connector)
+{
+ struct zx_hdmi *hdmi = to_zx_hdmi(connector);
+ struct edid *edid;
+ int ret;
+
+ edid = drm_get_edid(connector, &hdmi->ddc->adap);
+ if (!edid)
+ return 0;
+
+ hdmi->sink_is_hdmi = drm_detect_hdmi_monitor(edid);
+ hdmi->sink_has_audio = drm_detect_monitor_audio(edid);
+ drm_mode_connector_update_edid_property(connector, edid);
+ ret = drm_add_edid_modes(connector, edid);
+ kfree(edid);
+
+ return ret;
+}
+
+static enum drm_mode_status
+zx_hdmi_connector_mode_valid(struct drm_connector *connector,
+ struct drm_display_mode *mode)
+{
+ return MODE_OK;
+}
+
+static struct drm_connector_helper_funcs zx_hdmi_connector_helper_funcs = {
+ .get_modes = zx_hdmi_connector_get_modes,
+ .mode_valid = zx_hdmi_connector_mode_valid,
+};
+
+static enum drm_connector_status
+zx_hdmi_connector_detect(struct drm_connector *connector, bool force)
+{
+ struct zx_hdmi *hdmi = to_zx_hdmi(connector);
+
+ return (hdmi_readb(hdmi, TPI_HPD_RSEN) & TPI_HPD_CONNECTION) ?
+ connector_status_connected : connector_status_disconnected;
+}
+
+static const struct drm_connector_funcs zx_hdmi_connector_funcs = {
+ .dpms = drm_atomic_helper_connector_dpms,
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .detect = zx_hdmi_connector_detect,
+ .destroy = drm_connector_cleanup,
+ .reset = drm_atomic_helper_connector_reset,
+ .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
+};
+
+static int zx_hdmi_register(struct drm_device *drm, struct zx_hdmi *hdmi)
+{
+ struct drm_encoder *encoder = &hdmi->encoder;
+
+ encoder->possible_crtcs = VOU_CRTC_MASK;
+
+ drm_encoder_init(drm, encoder, &zx_hdmi_encoder_funcs,
+ DRM_MODE_ENCODER_TMDS, NULL);
+ drm_encoder_helper_add(encoder, &zx_hdmi_encoder_helper_funcs);
+
+ hdmi->connector.polled = DRM_CONNECTOR_POLL_HPD;
+
+ drm_connector_init(drm, &hdmi->connector, &zx_hdmi_connector_funcs,
+ DRM_MODE_CONNECTOR_HDMIA);
+ drm_connector_helper_add(&hdmi->connector,
+ &zx_hdmi_connector_helper_funcs);
+
+ drm_mode_connector_attach_encoder(&hdmi->connector, encoder);
+
+ return 0;
+}
+
+static irqreturn_t zx_hdmi_irq_thread(int irq, void *dev_id)
+{
+ struct zx_hdmi *hdmi = dev_id;
+
+ drm_helper_hpd_irq_event(hdmi->connector.dev);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t zx_hdmi_irq_handler(int irq, void *dev_id)
+{
+ struct zx_hdmi *hdmi = dev_id;
+ u8 lstat;
+
+ lstat = hdmi_readb(hdmi, L1_INTR_STAT);
+
+ /* Monitor detect/HPD interrupt */
+ if (lstat & L1_INTR_STAT_INTR1) {
+ u8 stat;
+
+ stat = hdmi_readb(hdmi, INTR1_STAT);
+ hdmi_writeb(hdmi, INTR1_STAT, stat);
+
+ if (stat & INTR1_MONITOR_DETECT)
+ return IRQ_WAKE_THREAD;
+ }
+
+ return IRQ_NONE;
+}
+
+static int zx_hdmi_i2c_read(struct zx_hdmi *hdmi, struct i2c_msg *msg)
+{
+ int len = msg->len;
+ u8 *buf = msg->buf;
+ int retry = 0;
+ int ret = 0;
+
+ /* Bits [9:8] of bytes */
+ hdmi_writeb(hdmi, ZX_DDC_DIN_CNT2, (len >> 8) & 0xff);
+ /* Bits [7:0] of bytes */
+ hdmi_writeb(hdmi, ZX_DDC_DIN_CNT1, len & 0xff);
+
+ /* Clear FIFO */
+ hdmi_writeb_mask(hdmi, ZX_DDC_CMD, DDC_CMD_MASK, DDC_CMD_CLEAR_FIFO);
+
+ /* Kick off the read */
+ hdmi_writeb_mask(hdmi, ZX_DDC_CMD, DDC_CMD_MASK,
+ DDC_CMD_SEQUENTIAL_READ);
+
+ while (len > 0) {
+ int cnt, i;
+
+ /* FIFO needs some time to get ready */
+ usleep_range(500, 1000);
+
+ cnt = hdmi_readb(hdmi, ZX_DDC_DOUT_CNT) & DDC_DOUT_CNT_MASK;
+ if (cnt == 0) {
+ if (++retry > 5) {
+ DRM_DEV_ERROR(hdmi->dev,
+ "DDC FIFO read timed out!");
+ return -ETIMEDOUT;
+ }
+ continue;
+ }
+
+ for (i = 0; i < cnt; i++)
+ *buf++ = hdmi_readb(hdmi, ZX_DDC_DATA);
+ len -= cnt;
+ }
+
+ return ret;
+}
+
+static int zx_hdmi_i2c_write(struct zx_hdmi *hdmi, struct i2c_msg *msg)
+{
+ /*
+ * The DDC I2C adapter is only for reading EDID data, so we assume
+ * that the write to this adapter must be the EDID data offset.
+ */
+ if ((msg->len != 1) ||
+ ((msg->addr != DDC_ADDR) && (msg->addr != DDC_SEGMENT_ADDR)))
+ return -EINVAL;
+
+ if (msg->addr == DDC_SEGMENT_ADDR)
+ hdmi_writeb(hdmi, ZX_DDC_SEGM, msg->addr << 1);
+ else if (msg->addr == DDC_ADDR)
+ hdmi_writeb(hdmi, ZX_DDC_ADDR, msg->addr << 1);
+
+ hdmi_writeb(hdmi, ZX_DDC_OFFSET, msg->buf[0]);
+
+ return 0;
+}
+
+static int zx_hdmi_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs,
+ int num)
+{
+ struct zx_hdmi *hdmi = i2c_get_adapdata(adap);
+ struct zx_hdmi_i2c *ddc = hdmi->ddc;
+ int i, ret = 0;
+
+ mutex_lock(&ddc->lock);
+
+ /* Enable DDC master access */
+ hdmi_writeb_mask(hdmi, TPI_DDC_MASTER_EN, HW_DDC_MASTER, HW_DDC_MASTER);
+
+ for (i = 0; i < num; i++) {
+ DRM_DEV_DEBUG(hdmi->dev,
+ "xfer: num: %d/%d, len: %d, flags: %#x\n",
+ i + 1, num, msgs[i].len, msgs[i].flags);
+
+ if (msgs[i].flags & I2C_M_RD)
+ ret = zx_hdmi_i2c_read(hdmi, &msgs[i]);
+ else
+ ret = zx_hdmi_i2c_write(hdmi, &msgs[i]);
+
+ if (ret < 0)
+ break;
+ }
+
+ if (!ret)
+ ret = num;
+
+ /* Disable DDC master access */
+ hdmi_writeb_mask(hdmi, TPI_DDC_MASTER_EN, HW_DDC_MASTER, 0);
+
+ mutex_unlock(&ddc->lock);
+
+ return ret;
+}
+
+static u32 zx_hdmi_i2c_func(struct i2c_adapter *adapter)
+{
+ return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
+}
+
+static const struct i2c_algorithm zx_hdmi_algorithm = {
+ .master_xfer = zx_hdmi_i2c_xfer,
+ .functionality = zx_hdmi_i2c_func,
+};
+
+static int zx_hdmi_ddc_register(struct zx_hdmi *hdmi)
+{
+ struct i2c_adapter *adap;
+ struct zx_hdmi_i2c *ddc;
+ int ret;
+
+ ddc = devm_kzalloc(hdmi->dev, sizeof(*ddc), GFP_KERNEL);
+ if (!ddc)
+ return -ENOMEM;
+
+ hdmi->ddc = ddc;
+ mutex_init(&ddc->lock);
+
+ adap = &ddc->adap;
+ adap->owner = THIS_MODULE;
+ adap->class = I2C_CLASS_DDC;
+ adap->dev.parent = hdmi->dev;
+ adap->algo = &zx_hdmi_algorithm;
+ snprintf(adap->name, sizeof(adap->name), "zx hdmi i2c");
+
+ ret = i2c_add_adapter(adap);
+ if (ret) {
+ DRM_DEV_ERROR(hdmi->dev, "failed to add I2C adapter: %d\n",
+ ret);
+ return ret;
+ }
+
+ i2c_set_adapdata(adap, hdmi);
+
+ return 0;
+}
+
+static int zx_hdmi_bind(struct device *dev, struct device *master, void *data)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct drm_device *drm = data;
+ struct resource *res;
+ struct zx_hdmi *hdmi;
+ int irq;
+ int ret;
+
+ hdmi = devm_kzalloc(dev, sizeof(*hdmi), GFP_KERNEL);
+ if (!hdmi)
+ return -ENOMEM;
+
+ hdmi->dev = dev;
+ hdmi->drm = drm;
+ hdmi->inf = &vou_inf_hdmi;
+
+ dev_set_drvdata(dev, hdmi);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ hdmi->mmio = devm_ioremap_resource(dev, res);
+ if (IS_ERR(hdmi->mmio)) {
+ ret = PTR_ERR(hdmi->mmio);
+ DRM_DEV_ERROR(dev, "failed to remap hdmi region: %d\n", ret);
+ return ret;
+ }
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return irq;
+
+ hdmi->cec_clk = devm_clk_get(hdmi->dev, "osc_cec");
+ if (IS_ERR(hdmi->cec_clk)) {
+ ret = PTR_ERR(hdmi->cec_clk);
+ DRM_DEV_ERROR(dev, "failed to get cec_clk: %d\n", ret);
+ return ret;
+ }
+
+ hdmi->osc_clk = devm_clk_get(hdmi->dev, "osc_clk");
+ if (IS_ERR(hdmi->osc_clk)) {
+ ret = PTR_ERR(hdmi->osc_clk);
+ DRM_DEV_ERROR(dev, "failed to get osc_clk: %d\n", ret);
+ return ret;
+ }
+
+ hdmi->xclk = devm_clk_get(hdmi->dev, "xclk");
+ if (IS_ERR(hdmi->xclk)) {
+ ret = PTR_ERR(hdmi->xclk);
+ DRM_DEV_ERROR(dev, "failed to get xclk: %d\n", ret);
+ return ret;
+ }
+
+ ret = zx_hdmi_ddc_register(hdmi);
+ if (ret) {
+ DRM_DEV_ERROR(dev, "failed to register ddc: %d\n", ret);
+ return ret;
+ }
+
+ ret = zx_hdmi_register(drm, hdmi);
+ if (ret) {
+ DRM_DEV_ERROR(dev, "failed to register hdmi: %d\n", ret);
+ return ret;
+ }
+
+ ret = devm_request_threaded_irq(dev, irq, zx_hdmi_irq_handler,
+ zx_hdmi_irq_thread, IRQF_SHARED,
+ dev_name(dev), hdmi);
+ if (ret) {
+ DRM_DEV_ERROR(dev, "failed to request threaded irq: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void zx_hdmi_unbind(struct device *dev, struct device *master,
+ void *data)
+{
+ struct zx_hdmi *hdmi = dev_get_drvdata(dev);
+
+ hdmi->connector.funcs->destroy(&hdmi->connector);
+ hdmi->encoder.funcs->destroy(&hdmi->encoder);
+}
+
+static const struct component_ops zx_hdmi_component_ops = {
+ .bind = zx_hdmi_bind,
+ .unbind = zx_hdmi_unbind,
+};
+
+static int zx_hdmi_probe(struct platform_device *pdev)
+{
+ return component_add(&pdev->dev, &zx_hdmi_component_ops);
+}
+
+static int zx_hdmi_remove(struct platform_device *pdev)
+{
+ component_del(&pdev->dev, &zx_hdmi_component_ops);
+ return 0;
+}
+
+static const struct of_device_id zx_hdmi_of_match[] = {
+ { .compatible = "zte,zx296718-hdmi", },
+ { /* end */ },
+};
+MODULE_DEVICE_TABLE(of, zx_hdmi_of_match);
+
+struct platform_driver zx_hdmi_driver = {
+ .probe = zx_hdmi_probe,
+ .remove = zx_hdmi_remove,
+ .driver = {
+ .name = "zx-hdmi",
+ .of_match_table = zx_hdmi_of_match,
+ },
+};
diff --git a/drivers/gpu/drm/zte/zx_hdmi_regs.h b/drivers/gpu/drm/zte/zx_hdmi_regs.h
new file mode 100644
index 000000000000..de911f66b658
--- /dev/null
+++ b/drivers/gpu/drm/zte/zx_hdmi_regs.h
@@ -0,0 +1,56 @@
+/*
+ * Copyright 2016 Linaro Ltd.
+ * Copyright 2016 ZTE Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#ifndef __ZX_HDMI_REGS_H__
+#define __ZX_HDMI_REGS_H__
+
+#define FUNC_SEL 0x000b
+#define FUNC_HDMI_EN BIT(0)
+#define CLKPWD 0x000d
+#define CLKPWD_PDIDCK BIT(2)
+#define P2T_CTRL 0x0066
+#define P2T_DC_PKT_EN BIT(7)
+#define L1_INTR_STAT 0x007e
+#define L1_INTR_STAT_INTR1 BIT(0)
+#define INTR1_STAT 0x008f
+#define INTR1_MASK 0x0095
+#define INTR1_MONITOR_DETECT (BIT(5) | BIT(6))
+#define ZX_DDC_ADDR 0x00ed
+#define ZX_DDC_SEGM 0x00ee
+#define ZX_DDC_OFFSET 0x00ef
+#define ZX_DDC_DIN_CNT1 0x00f0
+#define ZX_DDC_DIN_CNT2 0x00f1
+#define ZX_DDC_CMD 0x00f3
+#define DDC_CMD_MASK 0xf
+#define DDC_CMD_CLEAR_FIFO 0x9
+#define DDC_CMD_SEQUENTIAL_READ 0x2
+#define ZX_DDC_DATA 0x00f4
+#define ZX_DDC_DOUT_CNT 0x00f5
+#define DDC_DOUT_CNT_MASK 0x1f
+#define TEST_TXCTRL 0x00f7
+#define TEST_TXCTRL_HDMI_MODE BIT(1)
+#define HDMICTL4 0x0235
+#define TPI_HPD_RSEN 0x063b
+#define TPI_HPD_CONNECTION (BIT(1) | BIT(2))
+#define TPI_INFO_FSEL 0x06bf
+#define FSEL_AVI 0
+#define FSEL_GBD 1
+#define FSEL_AUDIO 2
+#define FSEL_SPD 3
+#define FSEL_MPEG 4
+#define FSEL_VSIF 5
+#define TPI_INFO_B0 0x06c0
+#define TPI_INFO_EN 0x06df
+#define TPI_INFO_TRANS_EN BIT(7)
+#define TPI_INFO_TRANS_RPT BIT(6)
+#define TPI_DDC_MASTER_EN 0x06f8
+#define HW_DDC_MASTER BIT(7)
+
+#endif /* __ZX_HDMI_REGS_H__ */
diff --git a/drivers/gpu/drm/zte/zx_plane.c b/drivers/gpu/drm/zte/zx_plane.c
new file mode 100644
index 000000000000..546eb92a94e8
--- /dev/null
+++ b/drivers/gpu/drm/zte/zx_plane.c
@@ -0,0 +1,299 @@
+/*
+ * Copyright 2016 Linaro Ltd.
+ * Copyright 2016 ZTE Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include <drm/drm_atomic.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_modeset_helper_vtables.h>
+#include <drm/drm_plane_helper.h>
+#include <drm/drmP.h>
+
+#include "zx_drm_drv.h"
+#include "zx_plane.h"
+#include "zx_plane_regs.h"
+#include "zx_vou.h"
+
+struct zx_plane {
+ struct drm_plane plane;
+ void __iomem *layer;
+ void __iomem *csc;
+ void __iomem *hbsc;
+ void __iomem *rsz;
+};
+
+#define to_zx_plane(plane) container_of(plane, struct zx_plane, plane)
+
+static const uint32_t gl_formats[] = {
+ DRM_FORMAT_ARGB8888,
+ DRM_FORMAT_XRGB8888,
+ DRM_FORMAT_RGB888,
+ DRM_FORMAT_RGB565,
+ DRM_FORMAT_ARGB1555,
+ DRM_FORMAT_ARGB4444,
+};
+
+static int zx_gl_plane_atomic_check(struct drm_plane *plane,
+ struct drm_plane_state *plane_state)
+{
+ struct drm_framebuffer *fb = plane_state->fb;
+ struct drm_crtc *crtc = plane_state->crtc;
+ struct drm_crtc_state *crtc_state;
+ struct drm_rect clip;
+
+ if (!crtc || !fb)
+ return 0;
+
+ crtc_state = drm_atomic_get_existing_crtc_state(plane_state->state,
+ crtc);
+ if (WARN_ON(!crtc_state))
+ return -EINVAL;
+
+ /* nothing to check when disabling or disabled */
+ if (!crtc_state->enable)
+ return 0;
+
+ /* plane must be enabled */
+ if (!plane_state->crtc)
+ return -EINVAL;
+
+ clip.x1 = 0;
+ clip.y1 = 0;
+ clip.x2 = crtc_state->adjusted_mode.hdisplay;
+ clip.y2 = crtc_state->adjusted_mode.vdisplay;
+
+ return drm_plane_helper_check_state(plane_state, &clip,
+ DRM_PLANE_HELPER_NO_SCALING,
+ DRM_PLANE_HELPER_NO_SCALING,
+ false, true);
+}
+
+static int zx_gl_get_fmt(uint32_t format)
+{
+ switch (format) {
+ case DRM_FORMAT_ARGB8888:
+ case DRM_FORMAT_XRGB8888:
+ return GL_FMT_ARGB8888;
+ case DRM_FORMAT_RGB888:
+ return GL_FMT_RGB888;
+ case DRM_FORMAT_RGB565:
+ return GL_FMT_RGB565;
+ case DRM_FORMAT_ARGB1555:
+ return GL_FMT_ARGB1555;
+ case DRM_FORMAT_ARGB4444:
+ return GL_FMT_ARGB4444;
+ default:
+ WARN_ONCE(1, "invalid pixel format %d\n", format);
+ return -EINVAL;
+ }
+}
+
+static inline void zx_gl_set_update(struct zx_plane *zplane)
+{
+ void __iomem *layer = zplane->layer;
+
+ zx_writel_mask(layer + GL_CTRL0, GL_UPDATE, GL_UPDATE);
+}
+
+static inline void zx_gl_rsz_set_update(struct zx_plane *zplane)
+{
+ zx_writel(zplane->rsz + RSZ_ENABLE_CFG, 1);
+}
+
+void zx_plane_set_update(struct drm_plane *plane)
+{
+ struct zx_plane *zplane = to_zx_plane(plane);
+
+ zx_gl_rsz_set_update(zplane);
+ zx_gl_set_update(zplane);
+}
+
+static void zx_gl_rsz_setup(struct zx_plane *zplane, u32 src_w, u32 src_h,
+ u32 dst_w, u32 dst_h)
+{
+ void __iomem *rsz = zplane->rsz;
+
+ zx_writel(rsz + RSZ_SRC_CFG, RSZ_VER(src_h - 1) | RSZ_HOR(src_w - 1));
+ zx_writel(rsz + RSZ_DEST_CFG, RSZ_VER(dst_h - 1) | RSZ_HOR(dst_w - 1));
+
+ zx_gl_rsz_set_update(zplane);
+}
+
+static void zx_gl_plane_atomic_update(struct drm_plane *plane,
+ struct drm_plane_state *old_state)
+{
+ struct zx_plane *zplane = to_zx_plane(plane);
+ struct drm_framebuffer *fb = plane->state->fb;
+ struct drm_gem_cma_object *cma_obj;
+ void __iomem *layer = zplane->layer;
+ void __iomem *csc = zplane->csc;
+ void __iomem *hbsc = zplane->hbsc;
+ u32 src_x, src_y, src_w, src_h;
+ u32 dst_x, dst_y, dst_w, dst_h;
+ unsigned int bpp;
+ uint32_t format;
+ dma_addr_t paddr;
+ u32 stride;
+ int fmt;
+
+ if (!fb)
+ return;
+
+ format = fb->pixel_format;
+ stride = fb->pitches[0];
+
+ src_x = plane->state->src_x >> 16;
+ src_y = plane->state->src_y >> 16;
+ src_w = plane->state->src_w >> 16;
+ src_h = plane->state->src_h >> 16;
+
+ dst_x = plane->state->crtc_x;
+ dst_y = plane->state->crtc_y;
+ dst_w = plane->state->crtc_w;
+ dst_h = plane->state->crtc_h;
+
+ bpp = drm_format_plane_cpp(format, 0);
+
+ cma_obj = drm_fb_cma_get_gem_obj(fb, 0);
+ paddr = cma_obj->paddr + fb->offsets[0];
+ paddr += src_y * stride + src_x * bpp / 8;
+ zx_writel(layer + GL_ADDR, paddr);
+
+ /* Set up source height/width register */
+ zx_writel(layer + GL_SRC_SIZE, GL_SRC_W(src_w) | GL_SRC_H(src_h));
+
+ /* Set up start position register */
+ zx_writel(layer + GL_POS_START, GL_POS_X(dst_x) | GL_POS_Y(dst_y));
+
+ /* Set up end position register */
+ zx_writel(layer + GL_POS_END,
+ GL_POS_X(dst_x + dst_w) | GL_POS_Y(dst_y + dst_h));
+
+ /* Set up stride register */
+ zx_writel(layer + GL_STRIDE, stride & 0xffff);
+
+ /* Set up graphic layer data format */
+ fmt = zx_gl_get_fmt(format);
+ if (fmt >= 0)
+ zx_writel_mask(layer + GL_CTRL1, GL_DATA_FMT_MASK,
+ fmt << GL_DATA_FMT_SHIFT);
+
+ /* Initialize global alpha with a sane value */
+ zx_writel_mask(layer + GL_CTRL2, GL_GLOBAL_ALPHA_MASK,
+ 0xff << GL_GLOBAL_ALPHA_SHIFT);
+
+ /* Setup CSC for the GL */
+ if (dst_h > 720)
+ zx_writel_mask(csc + CSC_CTRL0, CSC_COV_MODE_MASK,
+ CSC_BT709_IMAGE_RGB2YCBCR << CSC_COV_MODE_SHIFT);
+ else
+ zx_writel_mask(csc + CSC_CTRL0, CSC_COV_MODE_MASK,
+ CSC_BT601_IMAGE_RGB2YCBCR << CSC_COV_MODE_SHIFT);
+ zx_writel_mask(csc + CSC_CTRL0, CSC_WORK_ENABLE, CSC_WORK_ENABLE);
+
+ /* Always use scaler since it exists (set for not bypass) */
+ zx_writel_mask(layer + GL_CTRL3, GL_SCALER_BYPASS_MODE,
+ GL_SCALER_BYPASS_MODE);
+
+ zx_gl_rsz_setup(zplane, src_w, src_h, dst_w, dst_h);
+
+ /* Enable HBSC block */
+ zx_writel_mask(hbsc + HBSC_CTRL0, HBSC_CTRL_EN, HBSC_CTRL_EN);
+
+ zx_gl_set_update(zplane);
+}
+
+static const struct drm_plane_helper_funcs zx_gl_plane_helper_funcs = {
+ .atomic_check = zx_gl_plane_atomic_check,
+ .atomic_update = zx_gl_plane_atomic_update,
+};
+
+static void zx_plane_destroy(struct drm_plane *plane)
+{
+ drm_plane_helper_disable(plane);
+ drm_plane_cleanup(plane);
+}
+
+static const struct drm_plane_funcs zx_plane_funcs = {
+ .update_plane = drm_atomic_helper_update_plane,
+ .disable_plane = drm_atomic_helper_disable_plane,
+ .destroy = zx_plane_destroy,
+ .reset = drm_atomic_helper_plane_reset,
+ .atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_plane_destroy_state,
+};
+
+static void zx_plane_hbsc_init(struct zx_plane *zplane)
+{
+ void __iomem *hbsc = zplane->hbsc;
+
+ /*
+ * Initialize HBSC block with a sane configuration per recommedation
+ * from ZTE BSP code.
+ */
+ zx_writel(hbsc + HBSC_SATURATION, 0x200);
+ zx_writel(hbsc + HBSC_HUE, 0x0);
+ zx_writel(hbsc + HBSC_BRIGHT, 0x0);
+ zx_writel(hbsc + HBSC_CONTRAST, 0x200);
+
+ zx_writel(hbsc + HBSC_THRESHOLD_COL1, (0x3ac << 16) | 0x40);
+ zx_writel(hbsc + HBSC_THRESHOLD_COL2, (0x3c0 << 16) | 0x40);
+ zx_writel(hbsc + HBSC_THRESHOLD_COL3, (0x3c0 << 16) | 0x40);
+}
+
+struct drm_plane *zx_plane_init(struct drm_device *drm, struct device *dev,
+ struct zx_layer_data *data,
+ enum drm_plane_type type)
+{
+ const struct drm_plane_helper_funcs *helper;
+ struct zx_plane *zplane;
+ struct drm_plane *plane;
+ const uint32_t *formats;
+ unsigned int format_count;
+ int ret;
+
+ zplane = devm_kzalloc(dev, sizeof(*zplane), GFP_KERNEL);
+ if (!zplane)
+ return ERR_PTR(-ENOMEM);
+
+ plane = &zplane->plane;
+
+ zplane->layer = data->layer;
+ zplane->hbsc = data->hbsc;
+ zplane->csc = data->csc;
+ zplane->rsz = data->rsz;
+
+ zx_plane_hbsc_init(zplane);
+
+ switch (type) {
+ case DRM_PLANE_TYPE_PRIMARY:
+ helper = &zx_gl_plane_helper_funcs;
+ formats = gl_formats;
+ format_count = ARRAY_SIZE(gl_formats);
+ break;
+ case DRM_PLANE_TYPE_OVERLAY:
+ /* TODO: add video layer (vl) support */
+ break;
+ default:
+ return ERR_PTR(-ENODEV);
+ }
+
+ ret = drm_universal_plane_init(drm, plane, VOU_CRTC_MASK,
+ &zx_plane_funcs, formats, format_count,
+ type, NULL);
+ if (ret) {
+ DRM_DEV_ERROR(dev, "failed to init universal plane: %d\n", ret);
+ return ERR_PTR(ret);
+ }
+
+ drm_plane_helper_add(plane, helper);
+
+ return plane;
+}
diff --git a/drivers/gpu/drm/zte/zx_plane.h b/drivers/gpu/drm/zte/zx_plane.h
new file mode 100644
index 000000000000..2b82cd558d9d
--- /dev/null
+++ b/drivers/gpu/drm/zte/zx_plane.h
@@ -0,0 +1,26 @@
+/*
+ * Copyright 2016 Linaro Ltd.
+ * Copyright 2016 ZTE Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#ifndef __ZX_PLANE_H__
+#define __ZX_PLANE_H__
+
+struct zx_layer_data {
+ void __iomem *layer;
+ void __iomem *csc;
+ void __iomem *hbsc;
+ void __iomem *rsz;
+};
+
+struct drm_plane *zx_plane_init(struct drm_device *drm, struct device *dev,
+ struct zx_layer_data *data,
+ enum drm_plane_type type);
+void zx_plane_set_update(struct drm_plane *plane);
+
+#endif /* __ZX_PLANE_H__ */
diff --git a/drivers/gpu/drm/zte/zx_plane_regs.h b/drivers/gpu/drm/zte/zx_plane_regs.h
new file mode 100644
index 000000000000..3dde6716a558
--- /dev/null
+++ b/drivers/gpu/drm/zte/zx_plane_regs.h
@@ -0,0 +1,91 @@
+/*
+ * Copyright 2016 Linaro Ltd.
+ * Copyright 2016 ZTE Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#ifndef __ZX_PLANE_REGS_H__
+#define __ZX_PLANE_REGS_H__
+
+/* GL registers */
+#define GL_CTRL0 0x00
+#define GL_UPDATE BIT(5)
+#define GL_CTRL1 0x04
+#define GL_DATA_FMT_SHIFT 0
+#define GL_DATA_FMT_MASK (0xf << GL_DATA_FMT_SHIFT)
+#define GL_FMT_ARGB8888 0
+#define GL_FMT_RGB888 1
+#define GL_FMT_RGB565 2
+#define GL_FMT_ARGB1555 3
+#define GL_FMT_ARGB4444 4
+#define GL_CTRL2 0x08
+#define GL_GLOBAL_ALPHA_SHIFT 8
+#define GL_GLOBAL_ALPHA_MASK (0xff << GL_GLOBAL_ALPHA_SHIFT)
+#define GL_CTRL3 0x0c
+#define GL_SCALER_BYPASS_MODE BIT(0)
+#define GL_STRIDE 0x18
+#define GL_ADDR 0x1c
+#define GL_SRC_SIZE 0x38
+#define GL_SRC_W_SHIFT 16
+#define GL_SRC_W_MASK (0x3fff << GL_SRC_W_SHIFT)
+#define GL_SRC_H_SHIFT 0
+#define GL_SRC_H_MASK (0x3fff << GL_SRC_H_SHIFT)
+#define GL_POS_START 0x9c
+#define GL_POS_END 0xa0
+#define GL_POS_X_SHIFT 16
+#define GL_POS_X_MASK (0x1fff << GL_POS_X_SHIFT)
+#define GL_POS_Y_SHIFT 0
+#define GL_POS_Y_MASK (0x1fff << GL_POS_Y_SHIFT)
+
+#define GL_SRC_W(x) (((x) << GL_SRC_W_SHIFT) & GL_SRC_W_MASK)
+#define GL_SRC_H(x) (((x) << GL_SRC_H_SHIFT) & GL_SRC_H_MASK)
+#define GL_POS_X(x) (((x) << GL_POS_X_SHIFT) & GL_POS_X_MASK)
+#define GL_POS_Y(x) (((x) << GL_POS_Y_SHIFT) & GL_POS_Y_MASK)
+
+/* CSC registers */
+#define CSC_CTRL0 0x30
+#define CSC_COV_MODE_SHIFT 16
+#define CSC_COV_MODE_MASK (0xffff << CSC_COV_MODE_SHIFT)
+#define CSC_BT601_IMAGE_RGB2YCBCR 0
+#define CSC_BT601_IMAGE_YCBCR2RGB 1
+#define CSC_BT601_VIDEO_RGB2YCBCR 2
+#define CSC_BT601_VIDEO_YCBCR2RGB 3
+#define CSC_BT709_IMAGE_RGB2YCBCR 4
+#define CSC_BT709_IMAGE_YCBCR2RGB 5
+#define CSC_BT709_VIDEO_RGB2YCBCR 6
+#define CSC_BT709_VIDEO_YCBCR2RGB 7
+#define CSC_BT2020_IMAGE_RGB2YCBCR 8
+#define CSC_BT2020_IMAGE_YCBCR2RGB 9
+#define CSC_BT2020_VIDEO_RGB2YCBCR 10
+#define CSC_BT2020_VIDEO_YCBCR2RGB 11
+#define CSC_WORK_ENABLE BIT(0)
+
+/* RSZ registers */
+#define RSZ_SRC_CFG 0x00
+#define RSZ_DEST_CFG 0x04
+#define RSZ_ENABLE_CFG 0x14
+
+#define RSZ_VER_SHIFT 16
+#define RSZ_VER_MASK (0xffff << RSZ_VER_SHIFT)
+#define RSZ_HOR_SHIFT 0
+#define RSZ_HOR_MASK (0xffff << RSZ_HOR_SHIFT)
+
+#define RSZ_VER(x) (((x) << RSZ_VER_SHIFT) & RSZ_VER_MASK)
+#define RSZ_HOR(x) (((x) << RSZ_HOR_SHIFT) & RSZ_HOR_MASK)
+
+/* HBSC registers */
+#define HBSC_SATURATION 0x00
+#define HBSC_HUE 0x04
+#define HBSC_BRIGHT 0x08
+#define HBSC_CONTRAST 0x0c
+#define HBSC_THRESHOLD_COL1 0x10
+#define HBSC_THRESHOLD_COL2 0x14
+#define HBSC_THRESHOLD_COL3 0x18
+#define HBSC_CTRL0 0x28
+#define HBSC_CTRL_EN BIT(2)
+
+#endif /* __ZX_PLANE_REGS_H__ */
diff --git a/drivers/gpu/drm/zte/zx_vou.c b/drivers/gpu/drm/zte/zx_vou.c
new file mode 100644
index 000000000000..73fe15c17c32
--- /dev/null
+++ b/drivers/gpu/drm/zte/zx_vou.c
@@ -0,0 +1,661 @@
+/*
+ * Copyright 2016 Linaro Ltd.
+ * Copyright 2016 ZTE Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include <linux/clk.h>
+#include <linux/component.h>
+#include <linux/of_address.h>
+#include <video/videomode.h>
+
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_fb_helper.h>
+#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_of.h>
+#include <drm/drm_plane_helper.h>
+#include <drm/drmP.h>
+
+#include "zx_drm_drv.h"
+#include "zx_plane.h"
+#include "zx_vou.h"
+#include "zx_vou_regs.h"
+
+#define GL_NUM 2
+#define VL_NUM 3
+
+enum vou_chn_type {
+ VOU_CHN_MAIN,
+ VOU_CHN_AUX,
+};
+
+struct zx_crtc_regs {
+ u32 fir_active;
+ u32 fir_htiming;
+ u32 fir_vtiming;
+ u32 timing_shift;
+ u32 timing_pi_shift;
+};
+
+static const struct zx_crtc_regs main_crtc_regs = {
+ .fir_active = FIR_MAIN_ACTIVE,
+ .fir_htiming = FIR_MAIN_H_TIMING,
+ .fir_vtiming = FIR_MAIN_V_TIMING,
+ .timing_shift = TIMING_MAIN_SHIFT,
+ .timing_pi_shift = TIMING_MAIN_PI_SHIFT,
+};
+
+static const struct zx_crtc_regs aux_crtc_regs = {
+ .fir_active = FIR_AUX_ACTIVE,
+ .fir_htiming = FIR_AUX_H_TIMING,
+ .fir_vtiming = FIR_AUX_V_TIMING,
+ .timing_shift = TIMING_AUX_SHIFT,
+ .timing_pi_shift = TIMING_AUX_PI_SHIFT,
+};
+
+struct zx_crtc_bits {
+ u32 polarity_mask;
+ u32 polarity_shift;
+ u32 int_frame_mask;
+ u32 tc_enable;
+ u32 gl_enable;
+};
+
+static const struct zx_crtc_bits main_crtc_bits = {
+ .polarity_mask = MAIN_POL_MASK,
+ .polarity_shift = MAIN_POL_SHIFT,
+ .int_frame_mask = TIMING_INT_MAIN_FRAME,
+ .tc_enable = MAIN_TC_EN,
+ .gl_enable = OSD_CTRL0_GL0_EN,
+};
+
+static const struct zx_crtc_bits aux_crtc_bits = {
+ .polarity_mask = AUX_POL_MASK,
+ .polarity_shift = AUX_POL_SHIFT,
+ .int_frame_mask = TIMING_INT_AUX_FRAME,
+ .tc_enable = AUX_TC_EN,
+ .gl_enable = OSD_CTRL0_GL1_EN,
+};
+
+struct zx_crtc {
+ struct drm_crtc crtc;
+ struct drm_plane *primary;
+ struct zx_vou_hw *vou;
+ void __iomem *chnreg;
+ const struct zx_crtc_regs *regs;
+ const struct zx_crtc_bits *bits;
+ enum vou_chn_type chn_type;
+ struct clk *pixclk;
+};
+
+#define to_zx_crtc(x) container_of(x, struct zx_crtc, crtc)
+
+struct zx_vou_hw {
+ struct device *dev;
+ void __iomem *osd;
+ void __iomem *timing;
+ void __iomem *vouctl;
+ void __iomem *otfppu;
+ void __iomem *dtrc;
+ struct clk *axi_clk;
+ struct clk *ppu_clk;
+ struct clk *main_clk;
+ struct clk *aux_clk;
+ struct zx_crtc *main_crtc;
+ struct zx_crtc *aux_crtc;
+};
+
+static inline struct zx_vou_hw *crtc_to_vou(struct drm_crtc *crtc)
+{
+ struct zx_crtc *zcrtc = to_zx_crtc(crtc);
+
+ return zcrtc->vou;
+}
+
+void vou_inf_enable(const struct vou_inf *inf, struct drm_crtc *crtc)
+{
+ struct zx_crtc *zcrtc = to_zx_crtc(crtc);
+ struct zx_vou_hw *vou = zcrtc->vou;
+ bool is_main = zcrtc->chn_type == VOU_CHN_MAIN;
+ u32 data_sel_shift = inf->id << 1;
+
+ /* Select data format */
+ zx_writel_mask(vou->vouctl + VOU_INF_DATA_SEL, 0x3 << data_sel_shift,
+ inf->data_sel << data_sel_shift);
+
+ /* Select channel */
+ zx_writel_mask(vou->vouctl + VOU_INF_CH_SEL, 0x1 << inf->id,
+ zcrtc->chn_type << inf->id);
+
+ /* Select interface clocks */
+ zx_writel_mask(vou->vouctl + VOU_CLK_SEL, inf->clocks_sel_bits,
+ is_main ? 0 : inf->clocks_sel_bits);
+
+ /* Enable interface clocks */
+ zx_writel_mask(vou->vouctl + VOU_CLK_EN, inf->clocks_en_bits,
+ inf->clocks_en_bits);
+
+ /* Enable the device */
+ zx_writel_mask(vou->vouctl + VOU_INF_EN, 1 << inf->id, 1 << inf->id);
+}
+
+void vou_inf_disable(const struct vou_inf *inf, struct drm_crtc *crtc)
+{
+ struct zx_vou_hw *vou = crtc_to_vou(crtc);
+
+ /* Disable the device */
+ zx_writel_mask(vou->vouctl + VOU_INF_EN, 1 << inf->id, 0);
+
+ /* Disable interface clocks */
+ zx_writel_mask(vou->vouctl + VOU_CLK_EN, inf->clocks_en_bits, 0);
+}
+
+static inline void vou_chn_set_update(struct zx_crtc *zcrtc)
+{
+ zx_writel(zcrtc->chnreg + CHN_UPDATE, 1);
+}
+
+static void zx_crtc_enable(struct drm_crtc *crtc)
+{
+ struct drm_display_mode *mode = &crtc->state->adjusted_mode;
+ struct zx_crtc *zcrtc = to_zx_crtc(crtc);
+ struct zx_vou_hw *vou = zcrtc->vou;
+ const struct zx_crtc_regs *regs = zcrtc->regs;
+ const struct zx_crtc_bits *bits = zcrtc->bits;
+ struct videomode vm;
+ u32 pol = 0;
+ u32 val;
+ int ret;
+
+ drm_display_mode_to_videomode(mode, &vm);
+
+ /* Set up timing parameters */
+ val = V_ACTIVE(vm.vactive - 1);
+ val |= H_ACTIVE(vm.hactive - 1);
+ zx_writel(vou->timing + regs->fir_active, val);
+
+ val = SYNC_WIDE(vm.hsync_len - 1);
+ val |= BACK_PORCH(vm.hback_porch - 1);
+ val |= FRONT_PORCH(vm.hfront_porch - 1);
+ zx_writel(vou->timing + regs->fir_htiming, val);
+
+ val = SYNC_WIDE(vm.vsync_len - 1);
+ val |= BACK_PORCH(vm.vback_porch - 1);
+ val |= FRONT_PORCH(vm.vfront_porch - 1);
+ zx_writel(vou->timing + regs->fir_vtiming, val);
+
+ /* Set up polarities */
+ if (vm.flags & DISPLAY_FLAGS_VSYNC_LOW)
+ pol |= 1 << POL_VSYNC_SHIFT;
+ if (vm.flags & DISPLAY_FLAGS_HSYNC_LOW)
+ pol |= 1 << POL_HSYNC_SHIFT;
+
+ zx_writel_mask(vou->timing + TIMING_CTRL, bits->polarity_mask,
+ pol << bits->polarity_shift);
+
+ /* Setup SHIFT register by following what ZTE BSP does */
+ zx_writel(vou->timing + regs->timing_shift, H_SHIFT_VAL);
+ zx_writel(vou->timing + regs->timing_pi_shift, H_PI_SHIFT_VAL);
+
+ /* Enable TIMING_CTRL */
+ zx_writel_mask(vou->timing + TIMING_TC_ENABLE, bits->tc_enable,
+ bits->tc_enable);
+
+ /* Configure channel screen size */
+ zx_writel_mask(zcrtc->chnreg + CHN_CTRL1, CHN_SCREEN_W_MASK,
+ vm.hactive << CHN_SCREEN_W_SHIFT);
+ zx_writel_mask(zcrtc->chnreg + CHN_CTRL1, CHN_SCREEN_H_MASK,
+ vm.vactive << CHN_SCREEN_H_SHIFT);
+
+ /* Update channel */
+ vou_chn_set_update(zcrtc);
+
+ /* Enable channel */
+ zx_writel_mask(zcrtc->chnreg + CHN_CTRL0, CHN_ENABLE, CHN_ENABLE);
+
+ /* Enable Graphic Layer */
+ zx_writel_mask(vou->osd + OSD_CTRL0, bits->gl_enable,
+ bits->gl_enable);
+
+ drm_crtc_vblank_on(crtc);
+
+ ret = clk_set_rate(zcrtc->pixclk, mode->clock * 1000);
+ if (ret) {
+ DRM_DEV_ERROR(vou->dev, "failed to set pixclk rate: %d\n", ret);
+ return;
+ }
+
+ ret = clk_prepare_enable(zcrtc->pixclk);
+ if (ret)
+ DRM_DEV_ERROR(vou->dev, "failed to enable pixclk: %d\n", ret);
+}
+
+static void zx_crtc_disable(struct drm_crtc *crtc)
+{
+ struct zx_crtc *zcrtc = to_zx_crtc(crtc);
+ const struct zx_crtc_bits *bits = zcrtc->bits;
+ struct zx_vou_hw *vou = zcrtc->vou;
+
+ clk_disable_unprepare(zcrtc->pixclk);
+
+ drm_crtc_vblank_off(crtc);
+
+ /* Disable Graphic Layer */
+ zx_writel_mask(vou->osd + OSD_CTRL0, bits->gl_enable, 0);
+
+ /* Disable channel */
+ zx_writel_mask(zcrtc->chnreg + CHN_CTRL0, CHN_ENABLE, 0);
+
+ /* Disable TIMING_CTRL */
+ zx_writel_mask(vou->timing + TIMING_TC_ENABLE, bits->tc_enable, 0);
+}
+
+static void zx_crtc_atomic_flush(struct drm_crtc *crtc,
+ struct drm_crtc_state *old_state)
+{
+ struct drm_pending_vblank_event *event = crtc->state->event;
+
+ if (!event)
+ return;
+
+ crtc->state->event = NULL;
+
+ spin_lock_irq(&crtc->dev->event_lock);
+ if (drm_crtc_vblank_get(crtc) == 0)
+ drm_crtc_arm_vblank_event(crtc, event);
+ else
+ drm_crtc_send_vblank_event(crtc, event);
+ spin_unlock_irq(&crtc->dev->event_lock);
+}
+
+static const struct drm_crtc_helper_funcs zx_crtc_helper_funcs = {
+ .enable = zx_crtc_enable,
+ .disable = zx_crtc_disable,
+ .atomic_flush = zx_crtc_atomic_flush,
+};
+
+static const struct drm_crtc_funcs zx_crtc_funcs = {
+ .destroy = drm_crtc_cleanup,
+ .set_config = drm_atomic_helper_set_config,
+ .page_flip = drm_atomic_helper_page_flip,
+ .reset = drm_atomic_helper_crtc_reset,
+ .atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_crtc_destroy_state,
+};
+
+static int zx_crtc_init(struct drm_device *drm, struct zx_vou_hw *vou,
+ enum vou_chn_type chn_type)
+{
+ struct device *dev = vou->dev;
+ struct zx_layer_data data;
+ struct zx_crtc *zcrtc;
+ int ret;
+
+ zcrtc = devm_kzalloc(dev, sizeof(*zcrtc), GFP_KERNEL);
+ if (!zcrtc)
+ return -ENOMEM;
+
+ zcrtc->vou = vou;
+ zcrtc->chn_type = chn_type;
+
+ if (chn_type == VOU_CHN_MAIN) {
+ data.layer = vou->osd + MAIN_GL_OFFSET;
+ data.csc = vou->osd + MAIN_CSC_OFFSET;
+ data.hbsc = vou->osd + MAIN_HBSC_OFFSET;
+ data.rsz = vou->otfppu + MAIN_RSZ_OFFSET;
+ zcrtc->chnreg = vou->osd + OSD_MAIN_CHN;
+ zcrtc->regs = &main_crtc_regs;
+ zcrtc->bits = &main_crtc_bits;
+ } else {
+ data.layer = vou->osd + AUX_GL_OFFSET;
+ data.csc = vou->osd + AUX_CSC_OFFSET;
+ data.hbsc = vou->osd + AUX_HBSC_OFFSET;
+ data.rsz = vou->otfppu + AUX_RSZ_OFFSET;
+ zcrtc->chnreg = vou->osd + OSD_AUX_CHN;
+ zcrtc->regs = &aux_crtc_regs;
+ zcrtc->bits = &aux_crtc_bits;
+ }
+
+ zcrtc->pixclk = devm_clk_get(dev, (chn_type == VOU_CHN_MAIN) ?
+ "main_wclk" : "aux_wclk");
+ if (IS_ERR(zcrtc->pixclk)) {
+ ret = PTR_ERR(zcrtc->pixclk);
+ DRM_DEV_ERROR(dev, "failed to get pix clk: %d\n", ret);
+ return ret;
+ }
+
+ zcrtc->primary = zx_plane_init(drm, dev, &data, DRM_PLANE_TYPE_PRIMARY);
+ if (IS_ERR(zcrtc->primary)) {
+ ret = PTR_ERR(zcrtc->primary);
+ DRM_DEV_ERROR(dev, "failed to init primary plane: %d\n", ret);
+ return ret;
+ }
+
+ ret = drm_crtc_init_with_planes(drm, &zcrtc->crtc, zcrtc->primary, NULL,
+ &zx_crtc_funcs, NULL);
+ if (ret) {
+ DRM_DEV_ERROR(dev, "failed to init drm crtc: %d\n", ret);
+ return ret;
+ }
+
+ drm_crtc_helper_add(&zcrtc->crtc, &zx_crtc_helper_funcs);
+
+ if (chn_type == VOU_CHN_MAIN)
+ vou->main_crtc = zcrtc;
+ else
+ vou->aux_crtc = zcrtc;
+
+ return 0;
+}
+
+static inline struct drm_crtc *zx_find_crtc(struct drm_device *drm, int pipe)
+{
+ struct drm_crtc *crtc;
+
+ list_for_each_entry(crtc, &drm->mode_config.crtc_list, head)
+ if (crtc->index == pipe)
+ return crtc;
+
+ return NULL;
+}
+
+int zx_vou_enable_vblank(struct drm_device *drm, unsigned int pipe)
+{
+ struct drm_crtc *crtc;
+ struct zx_crtc *zcrtc;
+ struct zx_vou_hw *vou;
+ u32 int_frame_mask;
+
+ crtc = zx_find_crtc(drm, pipe);
+ if (!crtc)
+ return 0;
+
+ vou = crtc_to_vou(crtc);
+ zcrtc = to_zx_crtc(crtc);
+ int_frame_mask = zcrtc->bits->int_frame_mask;
+
+ zx_writel_mask(vou->timing + TIMING_INT_CTRL, int_frame_mask,
+ int_frame_mask);
+
+ return 0;
+}
+
+void zx_vou_disable_vblank(struct drm_device *drm, unsigned int pipe)
+{
+ struct drm_crtc *crtc;
+ struct zx_crtc *zcrtc;
+ struct zx_vou_hw *vou;
+
+ crtc = zx_find_crtc(drm, pipe);
+ if (!crtc)
+ return;
+
+ vou = crtc_to_vou(crtc);
+ zcrtc = to_zx_crtc(crtc);
+
+ zx_writel_mask(vou->timing + TIMING_INT_CTRL,
+ zcrtc->bits->int_frame_mask, 0);
+}
+
+static irqreturn_t vou_irq_handler(int irq, void *dev_id)
+{
+ struct zx_vou_hw *vou = dev_id;
+ u32 state;
+
+ /* Handle TIMING_CTRL frame interrupts */
+ state = zx_readl(vou->timing + TIMING_INT_STATE);
+ zx_writel(vou->timing + TIMING_INT_STATE, state);
+
+ if (state & TIMING_INT_MAIN_FRAME)
+ drm_crtc_handle_vblank(&vou->main_crtc->crtc);
+
+ if (state & TIMING_INT_AUX_FRAME)
+ drm_crtc_handle_vblank(&vou->aux_crtc->crtc);
+
+ /* Handle OSD interrupts */
+ state = zx_readl(vou->osd + OSD_INT_STA);
+ zx_writel(vou->osd + OSD_INT_CLRSTA, state);
+
+ if (state & OSD_INT_MAIN_UPT) {
+ vou_chn_set_update(vou->main_crtc);
+ zx_plane_set_update(vou->main_crtc->primary);
+ }
+
+ if (state & OSD_INT_AUX_UPT) {
+ vou_chn_set_update(vou->aux_crtc);
+ zx_plane_set_update(vou->aux_crtc->primary);
+ }
+
+ if (state & OSD_INT_ERROR)
+ DRM_DEV_ERROR(vou->dev, "OSD ERROR: 0x%08x!\n", state);
+
+ return IRQ_HANDLED;
+}
+
+static void vou_dtrc_init(struct zx_vou_hw *vou)
+{
+ /* Clear bit for bypass by ID */
+ zx_writel_mask(vou->dtrc + DTRC_DETILE_CTRL,
+ TILE2RASTESCAN_BYPASS_MODE, 0);
+
+ /* Select ARIDR mode */
+ zx_writel_mask(vou->dtrc + DTRC_DETILE_CTRL, DETILE_ARIDR_MODE_MASK,
+ DETILE_ARID_IN_ARIDR);
+
+ /* Bypass decompression for both frames */
+ zx_writel_mask(vou->dtrc + DTRC_F0_CTRL, DTRC_DECOMPRESS_BYPASS,
+ DTRC_DECOMPRESS_BYPASS);
+ zx_writel_mask(vou->dtrc + DTRC_F1_CTRL, DTRC_DECOMPRESS_BYPASS,
+ DTRC_DECOMPRESS_BYPASS);
+
+ /* Set up ARID register */
+ zx_writel(vou->dtrc + DTRC_ARID, DTRC_ARID3(0xf) | DTRC_ARID2(0xe) |
+ DTRC_ARID1(0xf) | DTRC_ARID0(0xe));
+}
+
+static void vou_hw_init(struct zx_vou_hw *vou)
+{
+ /* Set GL0 to main channel and GL1 to aux channel */
+ zx_writel_mask(vou->osd + OSD_CTRL0, OSD_CTRL0_GL0_SEL, 0);
+ zx_writel_mask(vou->osd + OSD_CTRL0, OSD_CTRL0_GL1_SEL,
+ OSD_CTRL0_GL1_SEL);
+
+ /* Release reset for all VOU modules */
+ zx_writel(vou->vouctl + VOU_SOFT_RST, ~0);
+
+ /* Select main clock for GL0 and aux clock for GL1 module */
+ zx_writel_mask(vou->vouctl + VOU_CLK_SEL, VOU_CLK_GL0_SEL, 0);
+ zx_writel_mask(vou->vouctl + VOU_CLK_SEL, VOU_CLK_GL1_SEL,
+ VOU_CLK_GL1_SEL);
+
+ /* Enable clock auto-gating for all VOU modules */
+ zx_writel(vou->vouctl + VOU_CLK_REQEN, ~0);
+
+ /* Enable all VOU module clocks */
+ zx_writel(vou->vouctl + VOU_CLK_EN, ~0);
+
+ /* Clear both OSD and TIMING_CTRL interrupt state */
+ zx_writel(vou->osd + OSD_INT_CLRSTA, ~0);
+ zx_writel(vou->timing + TIMING_INT_STATE, ~0);
+
+ /* Enable OSD and TIMING_CTRL interrrupts */
+ zx_writel(vou->osd + OSD_INT_MSK, OSD_INT_ENABLE);
+ zx_writel(vou->timing + TIMING_INT_CTRL, TIMING_INT_ENABLE);
+
+ /* Select GPC as input to gl/vl scaler as a sane default setting */
+ zx_writel(vou->otfppu + OTFPPU_RSZ_DATA_SOURCE, 0x2a);
+
+ /*
+ * Needs to reset channel and layer logic per frame when frame starts
+ * to get VOU work properly.
+ */
+ zx_writel_mask(vou->osd + OSD_RST_CLR, RST_PER_FRAME, RST_PER_FRAME);
+
+ vou_dtrc_init(vou);
+}
+
+static int zx_crtc_bind(struct device *dev, struct device *master, void *data)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct drm_device *drm = data;
+ struct zx_vou_hw *vou;
+ struct resource *res;
+ int irq;
+ int ret;
+
+ vou = devm_kzalloc(dev, sizeof(*vou), GFP_KERNEL);
+ if (!vou)
+ return -ENOMEM;
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "osd");
+ vou->osd = devm_ioremap_resource(dev, res);
+ if (IS_ERR(vou->osd)) {
+ ret = PTR_ERR(vou->osd);
+ DRM_DEV_ERROR(dev, "failed to remap osd region: %d\n", ret);
+ return ret;
+ }
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "timing_ctrl");
+ vou->timing = devm_ioremap_resource(dev, res);
+ if (IS_ERR(vou->timing)) {
+ ret = PTR_ERR(vou->timing);
+ DRM_DEV_ERROR(dev, "failed to remap timing_ctrl region: %d\n",
+ ret);
+ return ret;
+ }
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dtrc");
+ vou->dtrc = devm_ioremap_resource(dev, res);
+ if (IS_ERR(vou->dtrc)) {
+ ret = PTR_ERR(vou->dtrc);
+ DRM_DEV_ERROR(dev, "failed to remap dtrc region: %d\n", ret);
+ return ret;
+ }
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "vou_ctrl");
+ vou->vouctl = devm_ioremap_resource(dev, res);
+ if (IS_ERR(vou->vouctl)) {
+ ret = PTR_ERR(vou->vouctl);
+ DRM_DEV_ERROR(dev, "failed to remap vou_ctrl region: %d\n",
+ ret);
+ return ret;
+ }
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "otfppu");
+ vou->otfppu = devm_ioremap_resource(dev, res);
+ if (IS_ERR(vou->otfppu)) {
+ ret = PTR_ERR(vou->otfppu);
+ DRM_DEV_ERROR(dev, "failed to remap otfppu region: %d\n", ret);
+ return ret;
+ }
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return irq;
+
+ vou->axi_clk = devm_clk_get(dev, "aclk");
+ if (IS_ERR(vou->axi_clk)) {
+ ret = PTR_ERR(vou->axi_clk);
+ DRM_DEV_ERROR(dev, "failed to get axi_clk: %d\n", ret);
+ return ret;
+ }
+
+ vou->ppu_clk = devm_clk_get(dev, "ppu_wclk");
+ if (IS_ERR(vou->ppu_clk)) {
+ ret = PTR_ERR(vou->ppu_clk);
+ DRM_DEV_ERROR(dev, "failed to get ppu_clk: %d\n", ret);
+ return ret;
+ }
+
+ ret = clk_prepare_enable(vou->axi_clk);
+ if (ret) {
+ DRM_DEV_ERROR(dev, "failed to enable axi_clk: %d\n", ret);
+ return ret;
+ }
+
+ clk_prepare_enable(vou->ppu_clk);
+ if (ret) {
+ DRM_DEV_ERROR(dev, "failed to enable ppu_clk: %d\n", ret);
+ goto disable_axi_clk;
+ }
+
+ vou->dev = dev;
+ dev_set_drvdata(dev, vou);
+
+ vou_hw_init(vou);
+
+ ret = devm_request_irq(dev, irq, vou_irq_handler, 0, "zx_vou", vou);
+ if (ret < 0) {
+ DRM_DEV_ERROR(dev, "failed to request vou irq: %d\n", ret);
+ goto disable_ppu_clk;
+ }
+
+ ret = zx_crtc_init(drm, vou, VOU_CHN_MAIN);
+ if (ret) {
+ DRM_DEV_ERROR(dev, "failed to init main channel crtc: %d\n",
+ ret);
+ goto disable_ppu_clk;
+ }
+
+ ret = zx_crtc_init(drm, vou, VOU_CHN_AUX);
+ if (ret) {
+ DRM_DEV_ERROR(dev, "failed to init aux channel crtc: %d\n",
+ ret);
+ goto disable_ppu_clk;
+ }
+
+ return 0;
+
+disable_ppu_clk:
+ clk_disable_unprepare(vou->ppu_clk);
+disable_axi_clk:
+ clk_disable_unprepare(vou->axi_clk);
+ return ret;
+}
+
+static void zx_crtc_unbind(struct device *dev, struct device *master,
+ void *data)
+{
+ struct zx_vou_hw *vou = dev_get_drvdata(dev);
+
+ clk_disable_unprepare(vou->axi_clk);
+ clk_disable_unprepare(vou->ppu_clk);
+}
+
+static const struct component_ops zx_crtc_component_ops = {
+ .bind = zx_crtc_bind,
+ .unbind = zx_crtc_unbind,
+};
+
+static int zx_crtc_probe(struct platform_device *pdev)
+{
+ return component_add(&pdev->dev, &zx_crtc_component_ops);
+}
+
+static int zx_crtc_remove(struct platform_device *pdev)
+{
+ component_del(&pdev->dev, &zx_crtc_component_ops);
+ return 0;
+}
+
+static const struct of_device_id zx_crtc_of_match[] = {
+ { .compatible = "zte,zx296718-dpc", },
+ { /* end */ },
+};
+MODULE_DEVICE_TABLE(of, zx_crtc_of_match);
+
+struct platform_driver zx_crtc_driver = {
+ .probe = zx_crtc_probe,
+ .remove = zx_crtc_remove,
+ .driver = {
+ .name = "zx-crtc",
+ .of_match_table = zx_crtc_of_match,
+ },
+};
diff --git a/drivers/gpu/drm/zte/zx_vou.h b/drivers/gpu/drm/zte/zx_vou.h
new file mode 100644
index 000000000000..349e06cd86f4
--- /dev/null
+++ b/drivers/gpu/drm/zte/zx_vou.h
@@ -0,0 +1,46 @@
+/*
+ * Copyright 2016 Linaro Ltd.
+ * Copyright 2016 ZTE Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#ifndef __ZX_VOU_H__
+#define __ZX_VOU_H__
+
+#define VOU_CRTC_MASK 0x3
+
+/* VOU output interfaces */
+enum vou_inf_id {
+ VOU_HDMI = 0,
+ VOU_RGB_LCD = 1,
+ VOU_TV_ENC = 2,
+ VOU_MIPI_DSI = 3,
+ VOU_LVDS = 4,
+ VOU_VGA = 5,
+};
+
+enum vou_inf_data_sel {
+ VOU_YUV444 = 0,
+ VOU_RGB_101010 = 1,
+ VOU_RGB_888 = 2,
+ VOU_RGB_666 = 3,
+};
+
+struct vou_inf {
+ enum vou_inf_id id;
+ enum vou_inf_data_sel data_sel;
+ u32 clocks_en_bits;
+ u32 clocks_sel_bits;
+};
+
+void vou_inf_enable(const struct vou_inf *inf, struct drm_crtc *crtc);
+void vou_inf_disable(const struct vou_inf *inf, struct drm_crtc *crtc);
+
+int zx_vou_enable_vblank(struct drm_device *drm, unsigned int pipe);
+void zx_vou_disable_vblank(struct drm_device *drm, unsigned int pipe);
+
+#endif /* __ZX_VOU_H__ */
diff --git a/drivers/gpu/drm/zte/zx_vou_regs.h b/drivers/gpu/drm/zte/zx_vou_regs.h
new file mode 100644
index 000000000000..f44e7a4ae441
--- /dev/null
+++ b/drivers/gpu/drm/zte/zx_vou_regs.h
@@ -0,0 +1,157 @@
+/*
+ * Copyright 2016 Linaro Ltd.
+ * Copyright 2016 ZTE Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#ifndef __ZX_VOU_REGS_H__
+#define __ZX_VOU_REGS_H__
+
+/* Sub-module offset */
+#define MAIN_GL_OFFSET 0x130
+#define MAIN_CSC_OFFSET 0x580
+#define MAIN_HBSC_OFFSET 0x820
+#define MAIN_RSZ_OFFSET 0x600 /* OTFPPU sub-module */
+
+#define AUX_GL_OFFSET 0x200
+#define AUX_CSC_OFFSET 0x5d0
+#define AUX_HBSC_OFFSET 0x860
+#define AUX_RSZ_OFFSET 0x800
+
+/* OSD (GPC_GLOBAL) registers */
+#define OSD_INT_STA 0x04
+#define OSD_INT_CLRSTA 0x08
+#define OSD_INT_MSK 0x0c
+#define OSD_INT_AUX_UPT BIT(14)
+#define OSD_INT_MAIN_UPT BIT(13)
+#define OSD_INT_GL1_LBW BIT(10)
+#define OSD_INT_GL0_LBW BIT(9)
+#define OSD_INT_VL2_LBW BIT(8)
+#define OSD_INT_VL1_LBW BIT(7)
+#define OSD_INT_VL0_LBW BIT(6)
+#define OSD_INT_BUS_ERR BIT(3)
+#define OSD_INT_CFG_ERR BIT(2)
+#define OSD_INT_ERROR (\
+ OSD_INT_GL1_LBW | OSD_INT_GL0_LBW | \
+ OSD_INT_VL2_LBW | OSD_INT_VL1_LBW | OSD_INT_VL0_LBW | \
+ OSD_INT_BUS_ERR | OSD_INT_CFG_ERR \
+)
+#define OSD_INT_ENABLE (OSD_INT_ERROR | OSD_INT_AUX_UPT | OSD_INT_MAIN_UPT)
+#define OSD_CTRL0 0x10
+#define OSD_CTRL0_GL0_EN BIT(7)
+#define OSD_CTRL0_GL0_SEL BIT(6)
+#define OSD_CTRL0_GL1_EN BIT(5)
+#define OSD_CTRL0_GL1_SEL BIT(4)
+#define OSD_RST_CLR 0x1c
+#define RST_PER_FRAME BIT(19)
+
+/* Main/Aux channel registers */
+#define OSD_MAIN_CHN 0x470
+#define OSD_AUX_CHN 0x4d0
+#define CHN_CTRL0 0x00
+#define CHN_ENABLE BIT(0)
+#define CHN_CTRL1 0x04
+#define CHN_SCREEN_W_SHIFT 18
+#define CHN_SCREEN_W_MASK (0x1fff << CHN_SCREEN_W_SHIFT)
+#define CHN_SCREEN_H_SHIFT 5
+#define CHN_SCREEN_H_MASK (0x1fff << CHN_SCREEN_H_SHIFT)
+#define CHN_UPDATE 0x08
+
+/* TIMING_CTRL registers */
+#define TIMING_TC_ENABLE 0x04
+#define AUX_TC_EN BIT(1)
+#define MAIN_TC_EN BIT(0)
+#define FIR_MAIN_ACTIVE 0x08
+#define FIR_AUX_ACTIVE 0x0c
+#define V_ACTIVE_SHIFT 16
+#define V_ACTIVE_MASK (0xffff << V_ACTIVE_SHIFT)
+#define H_ACTIVE_SHIFT 0
+#define H_ACTIVE_MASK (0xffff << H_ACTIVE_SHIFT)
+#define FIR_MAIN_H_TIMING 0x10
+#define FIR_MAIN_V_TIMING 0x14
+#define FIR_AUX_H_TIMING 0x18
+#define FIR_AUX_V_TIMING 0x1c
+#define SYNC_WIDE_SHIFT 22
+#define SYNC_WIDE_MASK (0x3ff << SYNC_WIDE_SHIFT)
+#define BACK_PORCH_SHIFT 11
+#define BACK_PORCH_MASK (0x7ff << BACK_PORCH_SHIFT)
+#define FRONT_PORCH_SHIFT 0
+#define FRONT_PORCH_MASK (0x7ff << FRONT_PORCH_SHIFT)
+#define TIMING_CTRL 0x20
+#define AUX_POL_SHIFT 3
+#define AUX_POL_MASK (0x7 << AUX_POL_SHIFT)
+#define MAIN_POL_SHIFT 0
+#define MAIN_POL_MASK (0x7 << MAIN_POL_SHIFT)
+#define POL_DE_SHIFT 2
+#define POL_VSYNC_SHIFT 1
+#define POL_HSYNC_SHIFT 0
+#define TIMING_INT_CTRL 0x24
+#define TIMING_INT_STATE 0x28
+#define TIMING_INT_AUX_FRAME BIT(3)
+#define TIMING_INT_MAIN_FRAME BIT(1)
+#define TIMING_INT_AUX_FRAME_SEL_VSW (0x2 << 10)
+#define TIMING_INT_MAIN_FRAME_SEL_VSW (0x2 << 6)
+#define TIMING_INT_ENABLE (\
+ TIMING_INT_MAIN_FRAME_SEL_VSW | TIMING_INT_AUX_FRAME_SEL_VSW | \
+ TIMING_INT_MAIN_FRAME | TIMING_INT_AUX_FRAME \
+)
+#define TIMING_MAIN_SHIFT 0x2c
+#define TIMING_AUX_SHIFT 0x30
+#define H_SHIFT_VAL 0x0048
+#define TIMING_MAIN_PI_SHIFT 0x68
+#define TIMING_AUX_PI_SHIFT 0x6c
+#define H_PI_SHIFT_VAL 0x000f
+
+#define V_ACTIVE(x) (((x) << V_ACTIVE_SHIFT) & V_ACTIVE_MASK)
+#define H_ACTIVE(x) (((x) << H_ACTIVE_SHIFT) & H_ACTIVE_MASK)
+
+#define SYNC_WIDE(x) (((x) << SYNC_WIDE_SHIFT) & SYNC_WIDE_MASK)
+#define BACK_PORCH(x) (((x) << BACK_PORCH_SHIFT) & BACK_PORCH_MASK)
+#define FRONT_PORCH(x) (((x) << FRONT_PORCH_SHIFT) & FRONT_PORCH_MASK)
+
+/* DTRC registers */
+#define DTRC_F0_CTRL 0x2c
+#define DTRC_F1_CTRL 0x5c
+#define DTRC_DECOMPRESS_BYPASS BIT(17)
+#define DTRC_DETILE_CTRL 0x68
+#define TILE2RASTESCAN_BYPASS_MODE BIT(30)
+#define DETILE_ARIDR_MODE_MASK (0x3 << 0)
+#define DETILE_ARID_ALL 0
+#define DETILE_ARID_IN_ARIDR 1
+#define DETILE_ARID_BYP_BUT_ARIDR 2
+#define DETILE_ARID_IN_ARIDR2 3
+#define DTRC_ARID 0x6c
+#define DTRC_ARID3_SHIFT 24
+#define DTRC_ARID3_MASK (0xff << DTRC_ARID3_SHIFT)
+#define DTRC_ARID2_SHIFT 16
+#define DTRC_ARID2_MASK (0xff << DTRC_ARID2_SHIFT)
+#define DTRC_ARID1_SHIFT 8
+#define DTRC_ARID1_MASK (0xff << DTRC_ARID1_SHIFT)
+#define DTRC_ARID0_SHIFT 0
+#define DTRC_ARID0_MASK (0xff << DTRC_ARID0_SHIFT)
+#define DTRC_DEC2DDR_ARID 0x70
+
+#define DTRC_ARID3(x) (((x) << DTRC_ARID3_SHIFT) & DTRC_ARID3_MASK)
+#define DTRC_ARID2(x) (((x) << DTRC_ARID2_SHIFT) & DTRC_ARID2_MASK)
+#define DTRC_ARID1(x) (((x) << DTRC_ARID1_SHIFT) & DTRC_ARID1_MASK)
+#define DTRC_ARID0(x) (((x) << DTRC_ARID0_SHIFT) & DTRC_ARID0_MASK)
+
+/* VOU_CTRL registers */
+#define VOU_INF_EN 0x00
+#define VOU_INF_CH_SEL 0x04
+#define VOU_INF_DATA_SEL 0x08
+#define VOU_SOFT_RST 0x14
+#define VOU_CLK_SEL 0x18
+#define VOU_CLK_GL1_SEL BIT(5)
+#define VOU_CLK_GL0_SEL BIT(4)
+#define VOU_CLK_REQEN 0x20
+#define VOU_CLK_EN 0x24
+
+/* OTFPPU_CTRL registers */
+#define OTFPPU_RSZ_DATA_SOURCE 0x04
+
+#endif /* __ZX_VOU_REGS_H__ */
diff --git a/drivers/gpu/ipu-v3/Kconfig b/drivers/gpu/ipu-v3/Kconfig
index aefdff95356d..08766c6e7856 100644
--- a/drivers/gpu/ipu-v3/Kconfig
+++ b/drivers/gpu/ipu-v3/Kconfig
@@ -1,7 +1,6 @@
config IMX_IPUV3_CORE
tristate "IPUv3 core support"
depends on SOC_IMX5 || SOC_IMX6Q || ARCH_MULTIPLATFORM
- depends on RESET_CONTROLLER
select GENERIC_IRQ_CHIP
help
Choose this if you have a i.MX5/6 system and want to use the Image
diff --git a/drivers/gpu/ipu-v3/ipu-common.c b/drivers/gpu/ipu-v3/ipu-common.c
index b9539f7c5e9a..97218af4fe75 100644
--- a/drivers/gpu/ipu-v3/ipu-common.c
+++ b/drivers/gpu/ipu-v3/ipu-common.c
@@ -88,6 +88,8 @@ enum ipu_color_space ipu_drm_fourcc_to_colorspace(u32 drm_fourcc)
case DRM_FORMAT_YVU420:
case DRM_FORMAT_YUV422:
case DRM_FORMAT_YVU422:
+ case DRM_FORMAT_YUV444:
+ case DRM_FORMAT_YVU444:
case DRM_FORMAT_NV12:
case DRM_FORMAT_NV21:
case DRM_FORMAT_NV16:
@@ -1284,8 +1286,11 @@ static int ipu_irq_init(struct ipu_soc *ipu)
return ret;
}
- for (i = 0; i < IPU_NUM_IRQS; i += 32)
+ /* Mask and clear all interrupts */
+ for (i = 0; i < IPU_NUM_IRQS; i += 32) {
ipu_cm_write(ipu, 0, IPU_INT_CTRL(i / 32));
+ ipu_cm_write(ipu, ~unused[i / 32], IPU_INT_STAT(i / 32));
+ }
for (i = 0; i < IPU_NUM_IRQS; i += 32) {
gc = irq_get_domain_generic_chip(ipu->domain, i);
diff --git a/drivers/gpu/ipu-v3/ipu-cpmem.c b/drivers/gpu/ipu-v3/ipu-cpmem.c
index fcb7dc86167b..4b2b67113d92 100644
--- a/drivers/gpu/ipu-v3/ipu-cpmem.c
+++ b/drivers/gpu/ipu-v3/ipu-cpmem.c
@@ -417,42 +417,6 @@ void ipu_cpmem_set_yuv_planar_full(struct ipuv3_channel *ch,
}
EXPORT_SYMBOL_GPL(ipu_cpmem_set_yuv_planar_full);
-void ipu_cpmem_set_yuv_planar(struct ipuv3_channel *ch,
- u32 pixel_format, int stride, int height)
-{
- int fourcc, u_offset, v_offset;
- int uv_stride = 0;
-
- fourcc = v4l2_pix_fmt_to_drm_fourcc(pixel_format);
- switch (fourcc) {
- case DRM_FORMAT_YUV420:
- uv_stride = stride / 2;
- u_offset = stride * height;
- v_offset = u_offset + (uv_stride * height / 2);
- break;
- case DRM_FORMAT_YVU420:
- uv_stride = stride / 2;
- v_offset = stride * height;
- u_offset = v_offset + (uv_stride * height / 2);
- break;
- case DRM_FORMAT_YUV422:
- uv_stride = stride / 2;
- u_offset = stride * height;
- v_offset = u_offset + (uv_stride * height);
- break;
- case DRM_FORMAT_NV12:
- case DRM_FORMAT_NV16:
- uv_stride = stride;
- u_offset = stride * height;
- v_offset = 0;
- break;
- default:
- return;
- }
- ipu_cpmem_set_yuv_planar_full(ch, uv_stride, u_offset, v_offset);
-}
-EXPORT_SYMBOL_GPL(ipu_cpmem_set_yuv_planar);
-
static const struct ipu_rgb def_xrgb_32 = {
.red = { .offset = 16, .length = 8, },
.green = { .offset = 8, .length = 8, },
@@ -590,6 +554,13 @@ int ipu_cpmem_set_fmt(struct ipuv3_channel *ch, u32 drm_fourcc)
/* burst size */
ipu_ch_param_write_field(ch, IPU_FIELD_NPB, 31);
break;
+ case DRM_FORMAT_YUV444:
+ case DRM_FORMAT_YVU444:
+ /* pix format */
+ ipu_ch_param_write_field(ch, IPU_FIELD_PFS, 0);
+ /* burst size */
+ ipu_ch_param_write_field(ch, IPU_FIELD_NPB, 31);
+ break;
case DRM_FORMAT_NV12:
/* pix format */
ipu_ch_param_write_field(ch, IPU_FIELD_PFS, 4);
diff --git a/drivers/gpu/ipu-v3/ipu-csi.c b/drivers/gpu/ipu-v3/ipu-csi.c
index d6e5ded24418..63c7292f427a 100644
--- a/drivers/gpu/ipu-v3/ipu-csi.c
+++ b/drivers/gpu/ipu-v3/ipu-csi.c
@@ -529,6 +529,22 @@ void ipu_csi_set_window(struct ipu_csi *csi, struct v4l2_rect *w)
}
EXPORT_SYMBOL_GPL(ipu_csi_set_window);
+void ipu_csi_set_downsize(struct ipu_csi *csi, bool horiz, bool vert)
+{
+ unsigned long flags;
+ u32 reg;
+
+ spin_lock_irqsave(&csi->lock, flags);
+
+ reg = ipu_csi_read(csi, CSI_OUT_FRM_CTRL);
+ reg &= ~(CSI_HORI_DOWNSIZE_EN | CSI_VERT_DOWNSIZE_EN);
+ reg |= (horiz ? CSI_HORI_DOWNSIZE_EN : 0) |
+ (vert ? CSI_VERT_DOWNSIZE_EN : 0);
+ ipu_csi_write(csi, reg, CSI_OUT_FRM_CTRL);
+
+ spin_unlock_irqrestore(&csi->lock, flags);
+}
+
void ipu_csi_set_test_generator(struct ipu_csi *csi, bool active,
u32 r_value, u32 g_value, u32 b_value,
u32 pix_clk)
diff --git a/drivers/gpu/ipu-v3/ipu-di.c b/drivers/gpu/ipu-v3/ipu-di.c
index a8d87ddd8a17..d2f1bd9d3deb 100644
--- a/drivers/gpu/ipu-v3/ipu-di.c
+++ b/drivers/gpu/ipu-v3/ipu-di.c
@@ -535,7 +535,7 @@ int ipu_di_adjust_videomode(struct ipu_di *di, struct videomode *mode)
return -EINVAL;
}
- dev_warn(di->ipu->dev, "videomode adapted for IPU restrictions\n");
+ dev_dbg(di->ipu->dev, "videomode adapted for IPU restrictions\n");
return 0;
}
EXPORT_SYMBOL_GPL(ipu_di_adjust_videomode);
diff --git a/drivers/gpu/ipu-v3/ipu-image-convert.c b/drivers/gpu/ipu-v3/ipu-image-convert.c
index 2ba7d437a2af..805b6fa7b5f4 100644
--- a/drivers/gpu/ipu-v3/ipu-image-convert.c
+++ b/drivers/gpu/ipu-v3/ipu-image-convert.c
@@ -1617,7 +1617,7 @@ ipu_image_convert(struct ipu_soc *ipu, enum ipu_ic_task ic_task,
ctx = ipu_image_convert_prepare(ipu, ic_task, in, out, rot_mode,
complete, complete_context);
if (IS_ERR(ctx))
- return ERR_PTR(PTR_ERR(ctx));
+ return ERR_CAST(ctx);
run = kzalloc(sizeof(*run), GFP_KERNEL);
if (!run) {
diff --git a/drivers/gpu/vga/vgaarb.c b/drivers/gpu/vga/vgaarb.c
index 1887f199ccb7..0f5b2dd24507 100644
--- a/drivers/gpu/vga/vgaarb.c
+++ b/drivers/gpu/vga/vgaarb.c
@@ -31,6 +31,10 @@
#define pr_fmt(fmt) "vgaarb: " fmt
+#define vgaarb_dbg(dev, fmt, arg...) dev_dbg(dev, "vgaarb: " fmt, ##arg)
+#define vgaarb_info(dev, fmt, arg...) dev_info(dev, "vgaarb: " fmt, ##arg)
+#define vgaarb_err(dev, fmt, arg...) dev_err(dev, "vgaarb: " fmt, ##arg)
+
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/pci.h>
@@ -188,6 +192,7 @@ static void vga_check_first_use(void)
static struct vga_device *__vga_tryget(struct vga_device *vgadev,
unsigned int rsrc)
{
+ struct device *dev = &vgadev->pdev->dev;
unsigned int wants, legacy_wants, match;
struct vga_device *conflict;
unsigned int pci_bits;
@@ -203,8 +208,8 @@ static struct vga_device *__vga_tryget(struct vga_device *vgadev,
(vgadev->decodes & VGA_RSRC_LEGACY_MEM))
rsrc |= VGA_RSRC_LEGACY_MEM;
- pr_debug("%s: %d\n", __func__, rsrc);
- pr_debug("%s: owns: %d\n", __func__, vgadev->owns);
+ vgaarb_dbg(dev, "%s: %d\n", __func__, rsrc);
+ vgaarb_dbg(dev, "%s: owns: %d\n", __func__, vgadev->owns);
/* Check what resources we need to acquire */
wants = rsrc & ~vgadev->owns;
@@ -336,9 +341,10 @@ lock_them:
static void __vga_put(struct vga_device *vgadev, unsigned int rsrc)
{
+ struct device *dev = &vgadev->pdev->dev;
unsigned int old_locks = vgadev->locks;
- pr_debug("%s\n", __func__);
+ vgaarb_dbg(dev, "%s\n", __func__);
/* Update our counters, and account for equivalent legacy resources
* if we decode them
@@ -611,7 +617,7 @@ static bool vga_arbiter_add_pci_device(struct pci_dev *pdev)
/* Allocate structure */
vgadev = kzalloc(sizeof(struct vga_device), GFP_KERNEL);
if (vgadev == NULL) {
- pr_err("failed to allocate pci device\n");
+ vgaarb_err(&pdev->dev, "failed to allocate VGA arbiter data\n");
/*
* What to do on allocation failure ? For now, let's just do
* nothing, I'm not sure there is anything saner to be done.
@@ -663,7 +669,7 @@ static bool vga_arbiter_add_pci_device(struct pci_dev *pdev)
*/
if (vga_default == NULL &&
((vgadev->owns & VGA_RSRC_LEGACY_MASK) == VGA_RSRC_LEGACY_MASK)) {
- pr_info("setting as boot device: PCI:%s\n", pci_name(pdev));
+ vgaarb_info(&pdev->dev, "setting as boot VGA device\n");
vga_set_default_device(pdev);
}
@@ -672,8 +678,7 @@ static bool vga_arbiter_add_pci_device(struct pci_dev *pdev)
/* Add to the list */
list_add(&vgadev->list, &vga_list);
vga_count++;
- pr_info("device added: PCI:%s,decodes=%s,owns=%s,locks=%s\n",
- pci_name(pdev),
+ vgaarb_info(&pdev->dev, "VGA device added: decodes=%s,owns=%s,locks=%s\n",
vga_iostate_to_str(vgadev->decodes),
vga_iostate_to_str(vgadev->owns),
vga_iostate_to_str(vgadev->locks));
@@ -725,6 +730,7 @@ bail:
static inline void vga_update_device_decodes(struct vga_device *vgadev,
int new_decodes)
{
+ struct device *dev = &vgadev->pdev->dev;
int old_decodes, decodes_removed, decodes_unlocked;
old_decodes = vgadev->decodes;
@@ -732,8 +738,7 @@ static inline void vga_update_device_decodes(struct vga_device *vgadev,
decodes_unlocked = vgadev->locks & decodes_removed;
vgadev->decodes = new_decodes;
- pr_info("device changed decodes: PCI:%s,olddecodes=%s,decodes=%s:owns=%s\n",
- pci_name(vgadev->pdev),
+ vgaarb_info(dev, "changed VGA decodes: olddecodes=%s,decodes=%s:owns=%s\n",
vga_iostate_to_str(old_decodes),
vga_iostate_to_str(vgadev->decodes),
vga_iostate_to_str(vgadev->owns));
@@ -754,7 +759,7 @@ static inline void vga_update_device_decodes(struct vga_device *vgadev,
if (!(old_decodes & VGA_RSRC_LEGACY_MASK) &&
new_decodes & VGA_RSRC_LEGACY_MASK)
vga_decode_count++;
- pr_debug("decoding count now is: %d\n", vga_decode_count);
+ vgaarb_dbg(dev, "decoding count now is: %d\n", vga_decode_count);
}
static void __vga_set_legacy_decoding(struct pci_dev *pdev,
@@ -1022,21 +1027,16 @@ static ssize_t vga_arb_write(struct file *file, const char __user *buf,
unsigned int io_state;
- char *kbuf, *curr_pos;
+ char kbuf[64], *curr_pos;
size_t remaining = count;
int ret_val;
int i;
-
- kbuf = kmalloc(count + 1, GFP_KERNEL);
- if (!kbuf)
- return -ENOMEM;
-
- if (copy_from_user(kbuf, buf, count)) {
- kfree(kbuf);
+ if (count >= sizeof(kbuf))
+ return -EINVAL;
+ if (copy_from_user(kbuf, buf, count))
return -EFAULT;
- }
curr_pos = kbuf;
kbuf[count] = '\0'; /* Just to make sure... */
@@ -1189,24 +1189,25 @@ static ssize_t vga_arb_write(struct file *file, const char __user *buf,
ret_val = -EPROTO;
goto done;
}
- pr_debug("%s ==> %x:%x:%x.%x\n", curr_pos,
- domain, bus, PCI_SLOT(devfn), PCI_FUNC(devfn));
-
pdev = pci_get_domain_bus_and_slot(domain, bus, devfn);
- pr_debug("pdev %p\n", pdev);
if (!pdev) {
- pr_err("invalid PCI address %x:%x:%x\n",
- domain, bus, devfn);
+ pr_debug("invalid PCI address %04x:%02x:%02x.%x\n",
+ domain, bus, PCI_SLOT(devfn),
+ PCI_FUNC(devfn));
ret_val = -ENODEV;
goto done;
}
+
+ pr_debug("%s ==> %04x:%02x:%02x.%x pdev %p\n", curr_pos,
+ domain, bus, PCI_SLOT(devfn), PCI_FUNC(devfn),
+ pdev);
}
vgadev = vgadev_find(pdev);
pr_debug("vgadev %p\n", vgadev);
if (vgadev == NULL) {
if (pdev) {
- pr_err("this pci device is not a vga device\n");
+ vgaarb_dbg(&pdev->dev, "not a VGA device\n");
pci_dev_put(pdev);
}
@@ -1226,7 +1227,7 @@ static ssize_t vga_arb_write(struct file *file, const char __user *buf,
}
}
if (i == MAX_USER_CARDS) {
- pr_err("maximum user cards (%d) number reached!\n",
+ vgaarb_dbg(&pdev->dev, "maximum user cards (%d) number reached, ignoring this one!\n",
MAX_USER_CARDS);
pci_dev_put(pdev);
/* XXX: which value to return? */
@@ -1259,11 +1260,9 @@ static ssize_t vga_arb_write(struct file *file, const char __user *buf,
goto done;
}
/* If we got here, the message written is not part of the protocol! */
- kfree(kbuf);
return -EPROTO;
done:
- kfree(kbuf);
return ret_val;
}
@@ -1317,8 +1316,8 @@ static int vga_arb_release(struct inode *inode, struct file *file)
uc = &priv->cards[i];
if (uc->pdev == NULL)
continue;
- pr_debug("uc->io_cnt == %d, uc->mem_cnt == %d\n",
- uc->io_cnt, uc->mem_cnt);
+ vgaarb_dbg(&uc->pdev->dev, "uc->io_cnt == %d, uc->mem_cnt == %d\n",
+ uc->io_cnt, uc->mem_cnt);
while (uc->io_cnt--)
vga_put(uc->pdev, VGA_RSRC_LEGACY_IO);
while (uc->mem_cnt--)
@@ -1371,7 +1370,7 @@ static int pci_notify(struct notifier_block *nb, unsigned long action,
struct pci_dev *pdev = to_pci_dev(dev);
bool notify = false;
- pr_debug("%s\n", __func__);
+ vgaarb_dbg(dev, "%s\n", __func__);
/* For now we're only intereted in devices added and removed. I didn't
* test this thing here, so someone needs to double check for the
@@ -1423,9 +1422,8 @@ static int __init vga_arb_device_init(void)
PCI_ANY_ID, pdev)) != NULL)
vga_arbiter_add_pci_device(pdev);
- pr_info("loaded\n");
-
list_for_each_entry(vgadev, &vga_list, list) {
+ struct device *dev = &vgadev->pdev->dev;
#if defined(CONFIG_X86) || defined(CONFIG_IA64)
/*
* Override vga_arbiter_add_pci_device()'s I/O based detection
@@ -1458,21 +1456,19 @@ static int __init vga_arb_device_init(void)
continue;
if (!vga_default_device())
- pr_info("setting as boot device: PCI:%s\n",
- pci_name(vgadev->pdev));
+ vgaarb_info(dev, "setting as boot device\n");
else if (vgadev->pdev != vga_default_device())
- pr_info("overriding boot device: PCI:%s\n",
- pci_name(vgadev->pdev));
+ vgaarb_info(dev, "overriding boot device\n");
vga_set_default_device(vgadev->pdev);
}
#endif
if (vgadev->bridge_has_one_vga)
- pr_info("bridge control possible %s\n",
- pci_name(vgadev->pdev));
+ vgaarb_info(dev, "bridge control possible\n");
else
- pr_info("no bridge control possible %s\n",
- pci_name(vgadev->pdev));
+ vgaarb_info(dev, "no bridge control possible\n");
}
+
+ pr_info("loaded\n");
return rc;
}
subsys_initcall(vga_arb_device_init);
diff --git a/drivers/hid/hid-cp2112.c b/drivers/hid/hid-cp2112.c
index 086d8a507157..60d30203a5fa 100644
--- a/drivers/hid/hid-cp2112.c
+++ b/drivers/hid/hid-cp2112.c
@@ -32,6 +32,11 @@
#include <linux/usb/ch9.h>
#include "hid-ids.h"
+#define CP2112_REPORT_MAX_LENGTH 64
+#define CP2112_GPIO_CONFIG_LENGTH 5
+#define CP2112_GPIO_GET_LENGTH 2
+#define CP2112_GPIO_SET_LENGTH 3
+
enum {
CP2112_GPIO_CONFIG = 0x02,
CP2112_GPIO_GET = 0x03,
@@ -161,6 +166,8 @@ struct cp2112_device {
atomic_t read_avail;
atomic_t xfer_avail;
struct gpio_chip gc;
+ u8 *in_out_buffer;
+ spinlock_t lock;
};
static int gpio_push_pull = 0xFF;
@@ -171,62 +178,86 @@ static int cp2112_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
{
struct cp2112_device *dev = gpiochip_get_data(chip);
struct hid_device *hdev = dev->hdev;
- u8 buf[5];
+ u8 *buf = dev->in_out_buffer;
+ unsigned long flags;
int ret;
+ spin_lock_irqsave(&dev->lock, flags);
+
ret = hid_hw_raw_request(hdev, CP2112_GPIO_CONFIG, buf,
- sizeof(buf), HID_FEATURE_REPORT,
- HID_REQ_GET_REPORT);
- if (ret != sizeof(buf)) {
+ CP2112_GPIO_CONFIG_LENGTH, HID_FEATURE_REPORT,
+ HID_REQ_GET_REPORT);
+ if (ret != CP2112_GPIO_CONFIG_LENGTH) {
hid_err(hdev, "error requesting GPIO config: %d\n", ret);
- return ret;
+ goto exit;
}
buf[1] &= ~(1 << offset);
buf[2] = gpio_push_pull;
- ret = hid_hw_raw_request(hdev, CP2112_GPIO_CONFIG, buf, sizeof(buf),
- HID_FEATURE_REPORT, HID_REQ_SET_REPORT);
+ ret = hid_hw_raw_request(hdev, CP2112_GPIO_CONFIG, buf,
+ CP2112_GPIO_CONFIG_LENGTH, HID_FEATURE_REPORT,
+ HID_REQ_SET_REPORT);
if (ret < 0) {
hid_err(hdev, "error setting GPIO config: %d\n", ret);
- return ret;
+ goto exit;
}
- return 0;
+ ret = 0;
+
+exit:
+ spin_unlock_irqrestore(&dev->lock, flags);
+ return ret <= 0 ? ret : -EIO;
}
static void cp2112_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
{
struct cp2112_device *dev = gpiochip_get_data(chip);
struct hid_device *hdev = dev->hdev;
- u8 buf[3];
+ u8 *buf = dev->in_out_buffer;
+ unsigned long flags;
int ret;
+ spin_lock_irqsave(&dev->lock, flags);
+
buf[0] = CP2112_GPIO_SET;
buf[1] = value ? 0xff : 0;
buf[2] = 1 << offset;
- ret = hid_hw_raw_request(hdev, CP2112_GPIO_SET, buf, sizeof(buf),
- HID_FEATURE_REPORT, HID_REQ_SET_REPORT);
+ ret = hid_hw_raw_request(hdev, CP2112_GPIO_SET, buf,
+ CP2112_GPIO_SET_LENGTH, HID_FEATURE_REPORT,
+ HID_REQ_SET_REPORT);
if (ret < 0)
hid_err(hdev, "error setting GPIO values: %d\n", ret);
+
+ spin_unlock_irqrestore(&dev->lock, flags);
}
static int cp2112_gpio_get(struct gpio_chip *chip, unsigned offset)
{
struct cp2112_device *dev = gpiochip_get_data(chip);
struct hid_device *hdev = dev->hdev;
- u8 buf[2];
+ u8 *buf = dev->in_out_buffer;
+ unsigned long flags;
int ret;
- ret = hid_hw_raw_request(hdev, CP2112_GPIO_GET, buf, sizeof(buf),
- HID_FEATURE_REPORT, HID_REQ_GET_REPORT);
- if (ret != sizeof(buf)) {
+ spin_lock_irqsave(&dev->lock, flags);
+
+ ret = hid_hw_raw_request(hdev, CP2112_GPIO_GET, buf,
+ CP2112_GPIO_GET_LENGTH, HID_FEATURE_REPORT,
+ HID_REQ_GET_REPORT);
+ if (ret != CP2112_GPIO_GET_LENGTH) {
hid_err(hdev, "error requesting GPIO values: %d\n", ret);
- return ret;
+ ret = ret < 0 ? ret : -EIO;
+ goto exit;
}
- return (buf[1] >> offset) & 1;
+ ret = (buf[1] >> offset) & 1;
+
+exit:
+ spin_unlock_irqrestore(&dev->lock, flags);
+
+ return ret;
}
static int cp2112_gpio_direction_output(struct gpio_chip *chip,
@@ -234,27 +265,33 @@ static int cp2112_gpio_direction_output(struct gpio_chip *chip,
{
struct cp2112_device *dev = gpiochip_get_data(chip);
struct hid_device *hdev = dev->hdev;
- u8 buf[5];
+ u8 *buf = dev->in_out_buffer;
+ unsigned long flags;
int ret;
+ spin_lock_irqsave(&dev->lock, flags);
+
ret = hid_hw_raw_request(hdev, CP2112_GPIO_CONFIG, buf,
- sizeof(buf), HID_FEATURE_REPORT,
- HID_REQ_GET_REPORT);
- if (ret != sizeof(buf)) {
+ CP2112_GPIO_CONFIG_LENGTH, HID_FEATURE_REPORT,
+ HID_REQ_GET_REPORT);
+ if (ret != CP2112_GPIO_CONFIG_LENGTH) {
hid_err(hdev, "error requesting GPIO config: %d\n", ret);
- return ret;
+ goto fail;
}
buf[1] |= 1 << offset;
buf[2] = gpio_push_pull;
- ret = hid_hw_raw_request(hdev, CP2112_GPIO_CONFIG, buf, sizeof(buf),
- HID_FEATURE_REPORT, HID_REQ_SET_REPORT);
+ ret = hid_hw_raw_request(hdev, CP2112_GPIO_CONFIG, buf,
+ CP2112_GPIO_CONFIG_LENGTH, HID_FEATURE_REPORT,
+ HID_REQ_SET_REPORT);
if (ret < 0) {
hid_err(hdev, "error setting GPIO config: %d\n", ret);
- return ret;
+ goto fail;
}
+ spin_unlock_irqrestore(&dev->lock, flags);
+
/*
* Set gpio value when output direction is already set,
* as specified in AN495, Rev. 0.2, cpt. 4.4
@@ -262,6 +299,10 @@ static int cp2112_gpio_direction_output(struct gpio_chip *chip,
cp2112_gpio_set(chip, offset, value);
return 0;
+
+fail:
+ spin_unlock_irqrestore(&dev->lock, flags);
+ return ret < 0 ? ret : -EIO;
}
static int cp2112_hid_get(struct hid_device *hdev, unsigned char report_number,
@@ -1007,6 +1048,17 @@ static int cp2112_probe(struct hid_device *hdev, const struct hid_device_id *id)
struct cp2112_smbus_config_report config;
int ret;
+ dev = devm_kzalloc(&hdev->dev, sizeof(*dev), GFP_KERNEL);
+ if (!dev)
+ return -ENOMEM;
+
+ dev->in_out_buffer = devm_kzalloc(&hdev->dev, CP2112_REPORT_MAX_LENGTH,
+ GFP_KERNEL);
+ if (!dev->in_out_buffer)
+ return -ENOMEM;
+
+ spin_lock_init(&dev->lock);
+
ret = hid_parse(hdev);
if (ret) {
hid_err(hdev, "parse failed\n");
@@ -1063,12 +1115,6 @@ static int cp2112_probe(struct hid_device *hdev, const struct hid_device_id *id)
goto err_power_normal;
}
- dev = kzalloc(sizeof(*dev), GFP_KERNEL);
- if (!dev) {
- ret = -ENOMEM;
- goto err_power_normal;
- }
-
hid_set_drvdata(hdev, (void *)dev);
dev->hdev = hdev;
dev->adap.owner = THIS_MODULE;
@@ -1087,7 +1133,7 @@ static int cp2112_probe(struct hid_device *hdev, const struct hid_device_id *id)
if (ret) {
hid_err(hdev, "error registering i2c adapter\n");
- goto err_free_dev;
+ goto err_power_normal;
}
hid_dbg(hdev, "adapter registered\n");
@@ -1123,8 +1169,6 @@ err_gpiochip_remove:
gpiochip_remove(&dev->gc);
err_free_i2c:
i2c_del_adapter(&dev->adap);
-err_free_dev:
- kfree(dev);
err_power_normal:
hid_hw_power(hdev, PM_HINT_NORMAL);
err_hid_close:
@@ -1149,7 +1193,6 @@ static void cp2112_remove(struct hid_device *hdev)
*/
hid_hw_close(hdev);
hid_hw_stop(hdev);
- kfree(dev);
}
static int cp2112_raw_event(struct hid_device *hdev, struct hid_report *report,
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
index 6cfb5cacc253..575aa65436d1 100644
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -179,6 +179,7 @@
#define USB_DEVICE_ID_ATEN_4PORTKVM 0x2205
#define USB_DEVICE_ID_ATEN_4PORTKVMC 0x2208
#define USB_DEVICE_ID_ATEN_CS682 0x2213
+#define USB_DEVICE_ID_ATEN_CS692 0x8021
#define USB_VENDOR_ID_ATMEL 0x03eb
#define USB_DEVICE_ID_ATMEL_MULTITOUCH 0x211c
diff --git a/drivers/hid/hid-lg.c b/drivers/hid/hid-lg.c
index 76f644deb0a7..c5c5fbe9d605 100644
--- a/drivers/hid/hid-lg.c
+++ b/drivers/hid/hid-lg.c
@@ -756,11 +756,16 @@ static int lg_probe(struct hid_device *hdev, const struct hid_device_id *id)
/* Setup wireless link with Logitech Wii wheel */
if (hdev->product == USB_DEVICE_ID_LOGITECH_WII_WHEEL) {
- unsigned char buf[] = { 0x00, 0xAF, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
+ const unsigned char cbuf[] = { 0x00, 0xAF, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
+ u8 *buf = kmemdup(cbuf, sizeof(cbuf), GFP_KERNEL);
- ret = hid_hw_raw_request(hdev, buf[0], buf, sizeof(buf),
- HID_FEATURE_REPORT, HID_REQ_SET_REPORT);
+ if (!buf) {
+ ret = -ENOMEM;
+ goto err_free;
+ }
+ ret = hid_hw_raw_request(hdev, buf[0], buf, sizeof(cbuf),
+ HID_FEATURE_REPORT, HID_REQ_SET_REPORT);
if (ret >= 0) {
/* insert a little delay of 10 jiffies ~ 40ms */
wait_queue_head_t wait;
@@ -772,9 +777,10 @@ static int lg_probe(struct hid_device *hdev, const struct hid_device_id *id)
buf[1] = 0xB2;
get_random_bytes(&buf[2], 2);
- ret = hid_hw_raw_request(hdev, buf[0], buf, sizeof(buf),
+ ret = hid_hw_raw_request(hdev, buf[0], buf, sizeof(cbuf),
HID_FEATURE_REPORT, HID_REQ_SET_REPORT);
}
+ kfree(buf);
}
if (drv_data->quirks & LG_FF)
diff --git a/drivers/hid/hid-magicmouse.c b/drivers/hid/hid-magicmouse.c
index d6fa496d0ca2..20b40ad26325 100644
--- a/drivers/hid/hid-magicmouse.c
+++ b/drivers/hid/hid-magicmouse.c
@@ -493,7 +493,8 @@ static int magicmouse_input_configured(struct hid_device *hdev,
static int magicmouse_probe(struct hid_device *hdev,
const struct hid_device_id *id)
{
- __u8 feature[] = { 0xd7, 0x01 };
+ const u8 feature[] = { 0xd7, 0x01 };
+ u8 *buf;
struct magicmouse_sc *msc;
struct hid_report *report;
int ret;
@@ -544,6 +545,12 @@ static int magicmouse_probe(struct hid_device *hdev,
}
report->size = 6;
+ buf = kmemdup(feature, sizeof(feature), GFP_KERNEL);
+ if (!buf) {
+ ret = -ENOMEM;
+ goto err_stop_hw;
+ }
+
/*
* Some devices repond with 'invalid report id' when feature
* report switching it into multitouch mode is sent to it.
@@ -552,8 +559,9 @@ static int magicmouse_probe(struct hid_device *hdev,
* but there seems to be no other way of switching the mode.
* Thus the super-ugly hacky success check below.
*/
- ret = hid_hw_raw_request(hdev, feature[0], feature, sizeof(feature),
+ ret = hid_hw_raw_request(hdev, buf[0], buf, sizeof(feature),
HID_FEATURE_REPORT, HID_REQ_SET_REPORT);
+ kfree(buf);
if (ret != -EIO && ret != sizeof(feature)) {
hid_err(hdev, "unable to request touch data (%d)\n", ret);
goto err_stop_hw;
diff --git a/drivers/hid/hid-rmi.c b/drivers/hid/hid-rmi.c
index 9cd2ca34a6be..be89bcbf6a71 100644
--- a/drivers/hid/hid-rmi.c
+++ b/drivers/hid/hid-rmi.c
@@ -188,10 +188,16 @@ static int rmi_set_page(struct hid_device *hdev, u8 page)
static int rmi_set_mode(struct hid_device *hdev, u8 mode)
{
int ret;
- u8 txbuf[2] = {RMI_SET_RMI_MODE_REPORT_ID, mode};
+ const u8 txbuf[2] = {RMI_SET_RMI_MODE_REPORT_ID, mode};
+ u8 *buf;
- ret = hid_hw_raw_request(hdev, RMI_SET_RMI_MODE_REPORT_ID, txbuf,
+ buf = kmemdup(txbuf, sizeof(txbuf), GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ ret = hid_hw_raw_request(hdev, RMI_SET_RMI_MODE_REPORT_ID, buf,
sizeof(txbuf), HID_FEATURE_REPORT, HID_REQ_SET_REPORT);
+ kfree(buf);
if (ret < 0) {
dev_err(&hdev->dev, "unable to set rmi mode to %d (%d)\n", mode,
ret);
diff --git a/drivers/hid/hid-sensor-custom.c b/drivers/hid/hid-sensor-custom.c
index 5614fee82347..3a84aaf1418b 100644
--- a/drivers/hid/hid-sensor-custom.c
+++ b/drivers/hid/hid-sensor-custom.c
@@ -292,11 +292,11 @@ static ssize_t show_value(struct device *dev, struct device_attribute *attr,
bool input = false;
int value = 0;
- if (sscanf(attr->attr.name, "feature-%d-%x-%s", &index, &usage,
+ if (sscanf(attr->attr.name, "feature-%x-%x-%s", &index, &usage,
name) == 3) {
feature = true;
field_index = index + sensor_inst->input_field_count;
- } else if (sscanf(attr->attr.name, "input-%d-%x-%s", &index, &usage,
+ } else if (sscanf(attr->attr.name, "input-%x-%x-%s", &index, &usage,
name) == 3) {
input = true;
field_index = index;
@@ -398,7 +398,7 @@ static ssize_t store_value(struct device *dev, struct device_attribute *attr,
char name[HID_CUSTOM_NAME_LENGTH];
int value;
- if (sscanf(attr->attr.name, "feature-%d-%x-%s", &index, &usage,
+ if (sscanf(attr->attr.name, "feature-%x-%x-%s", &index, &usage,
name) == 3) {
field_index = index + sensor_inst->input_field_count;
} else
diff --git a/drivers/hid/hid-sensor-hub.c b/drivers/hid/hid-sensor-hub.c
index 658a607dc6d9..60875625cbdf 100644
--- a/drivers/hid/hid-sensor-hub.c
+++ b/drivers/hid/hid-sensor-hub.c
@@ -212,6 +212,7 @@ int sensor_hub_set_feature(struct hid_sensor_hub_device *hsdev, u32 report_id,
__s32 value;
int ret = 0;
+ memset(buffer, 0, buffer_size);
mutex_lock(&data->mutex);
report = sensor_hub_report(report_id, hsdev->hdev, HID_FEATURE_REPORT);
if (!report || (field_index >= report->maxfield)) {
@@ -251,6 +252,9 @@ int sensor_hub_get_feature(struct hid_sensor_hub_device *hsdev, u32 report_id,
struct sensor_hub_data *data = hid_get_drvdata(hsdev->hdev);
int report_size;
int ret = 0;
+ u8 *val_ptr;
+ int buffer_index = 0;
+ int i;
mutex_lock(&data->mutex);
report = sensor_hub_report(report_id, hsdev->hdev, HID_FEATURE_REPORT);
@@ -271,7 +275,17 @@ int sensor_hub_get_feature(struct hid_sensor_hub_device *hsdev, u32 report_id,
goto done_proc;
}
ret = min(report_size, buffer_size);
- memcpy(buffer, report->field[field_index]->value, ret);
+
+ val_ptr = (u8 *)report->field[field_index]->value;
+ for (i = 0; i < report->field[field_index]->report_count; ++i) {
+ if (buffer_index >= ret)
+ break;
+
+ memcpy(&((u8 *)buffer)[buffer_index], val_ptr,
+ report->field[field_index]->report_size / 8);
+ val_ptr += sizeof(__s32);
+ buffer_index += (report->field[field_index]->report_size / 8);
+ }
done_proc:
mutex_unlock(&data->mutex);
diff --git a/drivers/hid/intel-ish-hid/ipc/ipc.c b/drivers/hid/intel-ish-hid/ipc/ipc.c
index e2517c11e0ee..0c9ac4d5d850 100644
--- a/drivers/hid/intel-ish-hid/ipc/ipc.c
+++ b/drivers/hid/intel-ish-hid/ipc/ipc.c
@@ -638,6 +638,58 @@ eoi:
}
/**
+ * ish_disable_dma() - disable dma communication between host and ISHFW
+ * @dev: ishtp device pointer
+ *
+ * Clear the dma enable bit and wait for dma inactive.
+ *
+ * Return: 0 for success else error code.
+ */
+static int ish_disable_dma(struct ishtp_device *dev)
+{
+ unsigned int dma_delay;
+
+ /* Clear the dma enable bit */
+ ish_reg_write(dev, IPC_REG_ISH_RMP2, 0);
+
+ /* wait for dma inactive */
+ for (dma_delay = 0; dma_delay < MAX_DMA_DELAY &&
+ _ish_read_fw_sts_reg(dev) & (IPC_ISH_IN_DMA);
+ dma_delay += 5)
+ mdelay(5);
+
+ if (dma_delay >= MAX_DMA_DELAY) {
+ dev_err(dev->devc,
+ "Wait for DMA inactive timeout\n");
+ return -EBUSY;
+ }
+
+ return 0;
+}
+
+/**
+ * ish_wakeup() - wakeup ishfw from waiting-for-host state
+ * @dev: ishtp device pointer
+ *
+ * Set the dma enable bit and send a void message to FW,
+ * it wil wakeup FW from waiting-for-host state.
+ */
+static void ish_wakeup(struct ishtp_device *dev)
+{
+ /* Set dma enable bit */
+ ish_reg_write(dev, IPC_REG_ISH_RMP2, IPC_RMP2_DMA_ENABLED);
+
+ /*
+ * Send 0 IPC message so that ISH FW wakes up if it was already
+ * asleep.
+ */
+ ish_reg_write(dev, IPC_REG_HOST2ISH_DRBL, IPC_DRBL_BUSY_BIT);
+
+ /* Flush writes to doorbell and REMAP2 */
+ ish_reg_read(dev, IPC_REG_ISH_HOST_FWSTS);
+}
+
+/**
* _ish_hw_reset() - HW reset
* @dev: ishtp device pointer
*
@@ -649,7 +701,6 @@ static int _ish_hw_reset(struct ishtp_device *dev)
{
struct pci_dev *pdev = dev->pdev;
int rv;
- unsigned int dma_delay;
uint16_t csr;
if (!pdev)
@@ -664,15 +715,8 @@ static int _ish_hw_reset(struct ishtp_device *dev)
return -EINVAL;
}
- /* Now trigger reset to FW */
- ish_reg_write(dev, IPC_REG_ISH_RMP2, 0);
-
- for (dma_delay = 0; dma_delay < MAX_DMA_DELAY &&
- _ish_read_fw_sts_reg(dev) & (IPC_ISH_IN_DMA);
- dma_delay += 5)
- mdelay(5);
-
- if (dma_delay >= MAX_DMA_DELAY) {
+ /* Disable dma communication between FW and host */
+ if (ish_disable_dma(dev)) {
dev_err(&pdev->dev,
"Can't reset - stuck with DMA in-progress\n");
return -EBUSY;
@@ -690,16 +734,8 @@ static int _ish_hw_reset(struct ishtp_device *dev)
csr |= PCI_D0;
pci_write_config_word(pdev, pdev->pm_cap + PCI_PM_CTRL, csr);
- ish_reg_write(dev, IPC_REG_ISH_RMP2, IPC_RMP2_DMA_ENABLED);
-
- /*
- * Send 0 IPC message so that ISH FW wakes up if it was already
- * asleep
- */
- ish_reg_write(dev, IPC_REG_HOST2ISH_DRBL, IPC_DRBL_BUSY_BIT);
-
- /* Flush writes to doorbell and REMAP2 */
- ish_reg_read(dev, IPC_REG_ISH_HOST_FWSTS);
+ /* Now we can enable ISH DMA operation and wakeup ISHFW */
+ ish_wakeup(dev);
return 0;
}
@@ -758,16 +794,9 @@ static int _ish_ipc_reset(struct ishtp_device *dev)
int ish_hw_start(struct ishtp_device *dev)
{
ish_set_host_rdy(dev);
- /* After that we can enable ISH DMA operation */
- ish_reg_write(dev, IPC_REG_ISH_RMP2, IPC_RMP2_DMA_ENABLED);
- /*
- * Send 0 IPC message so that ISH FW wakes up if it was already
- * asleep
- */
- ish_reg_write(dev, IPC_REG_HOST2ISH_DRBL, IPC_DRBL_BUSY_BIT);
- /* Flush write to doorbell */
- ish_reg_read(dev, IPC_REG_ISH_HOST_FWSTS);
+ /* After that we can enable ISH DMA operation and wakeup ISHFW */
+ ish_wakeup(dev);
set_host_ready(dev);
@@ -876,6 +905,21 @@ struct ishtp_device *ish_dev_init(struct pci_dev *pdev)
*/
void ish_device_disable(struct ishtp_device *dev)
{
+ struct pci_dev *pdev = dev->pdev;
+
+ if (!pdev)
+ return;
+
+ /* Disable dma communication between FW and host */
+ if (ish_disable_dma(dev)) {
+ dev_err(&pdev->dev,
+ "Can't reset - stuck with DMA in-progress\n");
+ return;
+ }
+
+ /* Put ISH to D3hot state for power saving */
+ pci_set_power_state(pdev, PCI_D3hot);
+
dev->dev_state = ISHTP_DEV_DISABLED;
ish_clr_host_rdy(dev);
}
diff --git a/drivers/hid/intel-ish-hid/ipc/pci-ish.c b/drivers/hid/intel-ish-hid/ipc/pci-ish.c
index 42f0beeb09fd..20d647d2dd2c 100644
--- a/drivers/hid/intel-ish-hid/ipc/pci-ish.c
+++ b/drivers/hid/intel-ish-hid/ipc/pci-ish.c
@@ -146,7 +146,7 @@ static int ish_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
pdev->dev_flags |= PCI_DEV_FLAGS_NO_D3;
/* request and enable interrupt */
- ret = request_irq(pdev->irq, ish_irq_handler, IRQF_NO_SUSPEND,
+ ret = request_irq(pdev->irq, ish_irq_handler, IRQF_SHARED,
KBUILD_MODNAME, dev);
if (ret) {
dev_err(&pdev->dev, "ISH: request IRQ failure (%d)\n",
@@ -202,6 +202,7 @@ static void ish_remove(struct pci_dev *pdev)
kfree(ishtp_dev);
}
+#ifdef CONFIG_PM
static struct device *ish_resume_device;
/**
@@ -293,7 +294,6 @@ static int ish_resume(struct device *device)
return 0;
}
-#ifdef CONFIG_PM
static const struct dev_pm_ops ish_pm_ops = {
.suspend = ish_suspend,
.resume = ish_resume,
@@ -301,7 +301,7 @@ static const struct dev_pm_ops ish_pm_ops = {
#define ISHTP_ISH_PM_OPS (&ish_pm_ops)
#else
#define ISHTP_ISH_PM_OPS NULL
-#endif
+#endif /* CONFIG_PM */
static struct pci_driver ish_driver = {
.name = KBUILD_MODNAME,
diff --git a/drivers/hid/usbhid/hid-quirks.c b/drivers/hid/usbhid/hid-quirks.c
index 354d49ea36dd..e6cfd323babc 100644
--- a/drivers/hid/usbhid/hid-quirks.c
+++ b/drivers/hid/usbhid/hid-quirks.c
@@ -63,6 +63,7 @@ static const struct hid_blacklist {
{ USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_4PORTKVM, HID_QUIRK_NOGET },
{ USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_4PORTKVMC, HID_QUIRK_NOGET },
{ USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_CS682, HID_QUIRK_NOGET },
+ { USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_CS692, HID_QUIRK_NOGET },
{ USB_VENDOR_ID_CH, USB_DEVICE_ID_CH_FIGHTERSTICK, HID_QUIRK_NOGET },
{ USB_VENDOR_ID_CH, USB_DEVICE_ID_CH_COMBATSTICK, HID_QUIRK_NOGET },
{ USB_VENDOR_ID_CH, USB_DEVICE_ID_CH_FLIGHT_SIM_ECLIPSE_YOKE, HID_QUIRK_NOGET },
diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c
index a259e18d22d5..0276d2ef06ee 100644
--- a/drivers/hv/vmbus_drv.c
+++ b/drivers/hv/vmbus_drv.c
@@ -961,7 +961,7 @@ int vmbus_device_register(struct hv_device *child_device_obj)
{
int ret = 0;
- dev_set_name(&child_device_obj->device, "vmbus-%pUl",
+ dev_set_name(&child_device_obj->device, "%pUl",
child_device_obj->channel->offermsg.offer.if_instance.b);
child_device_obj->device.bus = &hv_bus;
diff --git a/drivers/hwmon/hwmon.c b/drivers/hwmon/hwmon.c
index adae6848ffb2..a74c075a30ec 100644
--- a/drivers/hwmon/hwmon.c
+++ b/drivers/hwmon/hwmon.c
@@ -536,8 +536,10 @@ __hwmon_device_register(struct device *dev, const char *name, void *drvdata,
hwdev->groups = devm_kcalloc(dev, ngroups, sizeof(*groups),
GFP_KERNEL);
- if (!hwdev->groups)
- return ERR_PTR(-ENOMEM);
+ if (!hwdev->groups) {
+ err = -ENOMEM;
+ goto free_hwmon;
+ }
attrs = __hwmon_create_attrs(dev, drvdata, chip);
if (IS_ERR(attrs)) {
diff --git a/drivers/i2c/Kconfig b/drivers/i2c/Kconfig
index d223650a97e4..11edabf425ae 100644
--- a/drivers/i2c/Kconfig
+++ b/drivers/i2c/Kconfig
@@ -59,7 +59,6 @@ config I2C_CHARDEV
config I2C_MUX
tristate "I2C bus multiplexing support"
- depends on HAS_IOMEM
help
Say Y here if you want the I2C core to support the ability to
handle multiplexed I2C bus topologies, by presenting each
diff --git a/drivers/i2c/busses/i2c-designware-core.c b/drivers/i2c/busses/i2c-designware-core.c
index 11e866d05368..b403fa5ecf49 100644
--- a/drivers/i2c/busses/i2c-designware-core.c
+++ b/drivers/i2c/busses/i2c-designware-core.c
@@ -91,9 +91,7 @@
DW_IC_INTR_TX_ABRT | \
DW_IC_INTR_STOP_DET)
-#define DW_IC_STATUS_ACTIVITY 0x1
-#define DW_IC_STATUS_TFE BIT(2)
-#define DW_IC_STATUS_MST_ACTIVITY BIT(5)
+#define DW_IC_STATUS_ACTIVITY 0x1
#define DW_IC_SDA_HOLD_RX_SHIFT 16
#define DW_IC_SDA_HOLD_RX_MASK GENMASK(23, DW_IC_SDA_HOLD_RX_SHIFT)
@@ -478,25 +476,9 @@ static void i2c_dw_xfer_init(struct dw_i2c_dev *dev)
{
struct i2c_msg *msgs = dev->msgs;
u32 ic_tar = 0;
- bool enabled;
- enabled = dw_readl(dev, DW_IC_ENABLE_STATUS) & 1;
-
- if (enabled) {
- u32 ic_status;
-
- /*
- * Only disable adapter if ic_tar and ic_con can't be
- * dynamically updated
- */
- ic_status = dw_readl(dev, DW_IC_STATUS);
- if (!dev->dynamic_tar_update_enabled ||
- (ic_status & DW_IC_STATUS_MST_ACTIVITY) ||
- !(ic_status & DW_IC_STATUS_TFE)) {
- __i2c_dw_enable_and_wait(dev, false);
- enabled = false;
- }
- }
+ /* Disable the adapter */
+ __i2c_dw_enable_and_wait(dev, false);
/* if the slave address is ten bit address, enable 10BITADDR */
if (dev->dynamic_tar_update_enabled) {
@@ -526,8 +508,8 @@ static void i2c_dw_xfer_init(struct dw_i2c_dev *dev)
/* enforce disabled interrupts (due to HW issues) */
i2c_dw_disable_int(dev);
- if (!enabled)
- __i2c_dw_enable(dev, true);
+ /* Enable the adapter */
+ __i2c_dw_enable(dev, true);
/* Clear and enable interrupts */
dw_readl(dev, DW_IC_CLR_INTR);
@@ -611,7 +593,7 @@ i2c_dw_xfer_msg(struct dw_i2c_dev *dev)
if (msgs[dev->msg_write_idx].flags & I2C_M_RD) {
/* avoid rx buffer overrun */
- if (rx_limit - dev->rx_outstanding <= 0)
+ if (dev->rx_outstanding >= dev->rx_fifo_depth)
break;
dw_writel(dev, cmd | 0x100, DW_IC_DATA_CMD);
@@ -708,8 +690,7 @@ static int i2c_dw_handle_tx_abort(struct dw_i2c_dev *dev)
}
/*
- * Prepare controller for a transaction and start transfer by calling
- * i2c_dw_xfer_init()
+ * Prepare controller for a transaction and call i2c_dw_xfer_msg
*/
static int
i2c_dw_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], int num)
@@ -752,13 +733,23 @@ i2c_dw_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], int num)
goto done;
}
+ /*
+ * We must disable the adapter before returning and signaling the end
+ * of the current transfer. Otherwise the hardware might continue
+ * generating interrupts which in turn causes a race condition with
+ * the following transfer. Needs some more investigation if the
+ * additional interrupts are a hardware bug or this driver doesn't
+ * handle them correctly yet.
+ */
+ __i2c_dw_enable(dev, false);
+
if (dev->msg_err) {
ret = dev->msg_err;
goto done;
}
/* no error */
- if (likely(!dev->cmd_err)) {
+ if (likely(!dev->cmd_err && !dev->status)) {
ret = num;
goto done;
}
@@ -768,6 +759,11 @@ i2c_dw_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], int num)
ret = i2c_dw_handle_tx_abort(dev);
goto done;
}
+
+ if (dev->status)
+ dev_err(dev->dev,
+ "transfer terminated early - interrupt latency too high?\n");
+
ret = -EIO;
done:
@@ -888,19 +884,9 @@ static irqreturn_t i2c_dw_isr(int this_irq, void *dev_id)
*/
tx_aborted:
- if ((stat & (DW_IC_INTR_TX_ABRT | DW_IC_INTR_STOP_DET))
- || dev->msg_err) {
- /*
- * We must disable interruts before returning and signaling
- * the end of the current transfer. Otherwise the hardware
- * might continue generating interrupts for non-existent
- * transfers.
- */
- i2c_dw_disable_int(dev);
- dw_readl(dev, DW_IC_CLR_INTR);
-
+ if ((stat & (DW_IC_INTR_TX_ABRT | DW_IC_INTR_STOP_DET)) || dev->msg_err)
complete(&dev->cmd_complete);
- } else if (unlikely(dev->accessor_flags & ACCESS_INTR_MASK)) {
+ else if (unlikely(dev->accessor_flags & ACCESS_INTR_MASK)) {
/* workaround to trigger pending interrupt */
stat = dw_readl(dev, DW_IC_INTR_MASK);
i2c_dw_disable_int(dev);
diff --git a/drivers/i2c/busses/i2c-digicolor.c b/drivers/i2c/busses/i2c-digicolor.c
index 49f2084f7bb5..50813a24c541 100644
--- a/drivers/i2c/busses/i2c-digicolor.c
+++ b/drivers/i2c/busses/i2c-digicolor.c
@@ -347,7 +347,7 @@ static int dc_i2c_probe(struct platform_device *pdev)
ret = i2c_add_adapter(&i2c->adap);
if (ret < 0) {
- clk_unprepare(i2c->clk);
+ clk_disable_unprepare(i2c->clk);
return ret;
}
diff --git a/drivers/i2c/busses/i2c-octeon-core.c b/drivers/i2c/busses/i2c-octeon-core.c
index 419b54bfc7c7..5e63b17f935d 100644
--- a/drivers/i2c/busses/i2c-octeon-core.c
+++ b/drivers/i2c/busses/i2c-octeon-core.c
@@ -381,9 +381,7 @@ static int octeon_i2c_read(struct octeon_i2c *i2c, int target,
if (result)
return result;
- data[i] = octeon_i2c_data_read(i2c, &result);
- if (result)
- return result;
+ data[i] = octeon_i2c_data_read(i2c);
if (recv_len && i == 0) {
if (data[i] > I2C_SMBUS_BLOCK_MAX + 1)
return -EPROTO;
diff --git a/drivers/i2c/busses/i2c-octeon-core.h b/drivers/i2c/busses/i2c-octeon-core.h
index 1db7c835a454..87151ea74acd 100644
--- a/drivers/i2c/busses/i2c-octeon-core.h
+++ b/drivers/i2c/busses/i2c-octeon-core.h
@@ -5,7 +5,6 @@
#include <linux/i2c.h>
#include <linux/i2c-smbus.h>
#include <linux/io.h>
-#include <linux/iopoll.h>
#include <linux/kernel.h>
#include <linux/pci.h>
@@ -145,9 +144,9 @@ static inline void octeon_i2c_reg_write(struct octeon_i2c *i2c, u64 eop_reg, u8
u64 tmp;
__raw_writeq(SW_TWSI_V | eop_reg | data, i2c->twsi_base + SW_TWSI(i2c));
-
- readq_poll_timeout(i2c->twsi_base + SW_TWSI(i2c), tmp, tmp & SW_TWSI_V,
- I2C_OCTEON_EVENT_WAIT, i2c->adap.timeout);
+ do {
+ tmp = __raw_readq(i2c->twsi_base + SW_TWSI(i2c));
+ } while ((tmp & SW_TWSI_V) != 0);
}
#define octeon_i2c_ctl_write(i2c, val) \
@@ -164,28 +163,24 @@ static inline void octeon_i2c_reg_write(struct octeon_i2c *i2c, u64 eop_reg, u8
*
* The I2C core registers are accessed indirectly via the SW_TWSI CSR.
*/
-static inline int octeon_i2c_reg_read(struct octeon_i2c *i2c, u64 eop_reg,
- int *error)
+static inline u8 octeon_i2c_reg_read(struct octeon_i2c *i2c, u64 eop_reg)
{
u64 tmp;
- int ret;
__raw_writeq(SW_TWSI_V | eop_reg | SW_TWSI_R, i2c->twsi_base + SW_TWSI(i2c));
+ do {
+ tmp = __raw_readq(i2c->twsi_base + SW_TWSI(i2c));
+ } while ((tmp & SW_TWSI_V) != 0);
- ret = readq_poll_timeout(i2c->twsi_base + SW_TWSI(i2c), tmp,
- tmp & SW_TWSI_V, I2C_OCTEON_EVENT_WAIT,
- i2c->adap.timeout);
- if (error)
- *error = ret;
return tmp & 0xFF;
}
#define octeon_i2c_ctl_read(i2c) \
- octeon_i2c_reg_read(i2c, SW_TWSI_EOP_TWSI_CTL, NULL)
-#define octeon_i2c_data_read(i2c, error) \
- octeon_i2c_reg_read(i2c, SW_TWSI_EOP_TWSI_DATA, error)
+ octeon_i2c_reg_read(i2c, SW_TWSI_EOP_TWSI_CTL)
+#define octeon_i2c_data_read(i2c) \
+ octeon_i2c_reg_read(i2c, SW_TWSI_EOP_TWSI_DATA)
#define octeon_i2c_stat_read(i2c) \
- octeon_i2c_reg_read(i2c, SW_TWSI_EOP_TWSI_STAT, NULL)
+ octeon_i2c_reg_read(i2c, SW_TWSI_EOP_TWSI_STAT)
/**
* octeon_i2c_read_int - read the TWSI_INT register
diff --git a/drivers/i2c/i2c-core.c b/drivers/i2c/i2c-core.c
index 1704fc84d647..b432b64e307a 100644
--- a/drivers/i2c/i2c-core.c
+++ b/drivers/i2c/i2c-core.c
@@ -2179,6 +2179,7 @@ int i2c_register_driver(struct module *owner, struct i2c_driver *driver)
/* add the driver to the list of i2c drivers in the driver core */
driver->driver.owner = owner;
driver->driver.bus = &i2c_bus_type;
+ INIT_LIST_HEAD(&driver->clients);
/* When registration returns, the driver core
* will have called probe() for all matching-but-unbound devices.
@@ -2189,7 +2190,6 @@ int i2c_register_driver(struct module *owner, struct i2c_driver *driver)
pr_debug("driver [%s] registered\n", driver->driver.name);
- INIT_LIST_HEAD(&driver->clients);
/* Walk the adapters that are already present */
i2c_for_each_dev(driver, __process_new_driver);
diff --git a/drivers/i2c/muxes/Kconfig b/drivers/i2c/muxes/Kconfig
index e280c8ecc0b5..96de9ce5669b 100644
--- a/drivers/i2c/muxes/Kconfig
+++ b/drivers/i2c/muxes/Kconfig
@@ -63,6 +63,7 @@ config I2C_MUX_PINCTRL
config I2C_MUX_REG
tristate "Register-based I2C multiplexer"
+ depends on HAS_IOMEM
help
If you say yes to this option, support will be included for a
register based I2C multiplexer. This driver provides access to
diff --git a/drivers/i2c/muxes/i2c-demux-pinctrl.c b/drivers/i2c/muxes/i2c-demux-pinctrl.c
index b3893f6282ba..3e6fe1760d82 100644
--- a/drivers/i2c/muxes/i2c-demux-pinctrl.c
+++ b/drivers/i2c/muxes/i2c-demux-pinctrl.c
@@ -69,10 +69,28 @@ static int i2c_demux_activate_master(struct i2c_demux_pinctrl_priv *priv, u32 ne
goto err_with_revert;
}
- p = devm_pinctrl_get_select(adap->dev.parent, priv->bus_name);
+ /*
+ * Check if there are pinctrl states at all. Note: we cant' use
+ * devm_pinctrl_get_select() because we need to distinguish between
+ * the -ENODEV from devm_pinctrl_get() and pinctrl_lookup_state().
+ */
+ p = devm_pinctrl_get(adap->dev.parent);
if (IS_ERR(p)) {
ret = PTR_ERR(p);
- goto err_with_put;
+ /* continue if just no pinctrl states (e.g. i2c-gpio), otherwise exit */
+ if (ret != -ENODEV)
+ goto err_with_put;
+ } else {
+ /* there are states. check and use them */
+ struct pinctrl_state *s = pinctrl_lookup_state(p, priv->bus_name);
+
+ if (IS_ERR(s)) {
+ ret = PTR_ERR(s);
+ goto err_with_put;
+ }
+ ret = pinctrl_select_state(p, s);
+ if (ret < 0)
+ goto err_with_put;
}
priv->chan[new_chan].parent_adap = adap;
diff --git a/drivers/i2c/muxes/i2c-mux-pca954x.c b/drivers/i2c/muxes/i2c-mux-pca954x.c
index 1091346f2480..8bc3d36d2837 100644
--- a/drivers/i2c/muxes/i2c-mux-pca954x.c
+++ b/drivers/i2c/muxes/i2c-mux-pca954x.c
@@ -268,9 +268,9 @@ static int pca954x_probe(struct i2c_client *client,
/* discard unconfigured channels */
break;
idle_disconnect_pd = pdata->modes[num].deselect_on_exit;
- data->deselect |= (idle_disconnect_pd
- || idle_disconnect_dt) << num;
}
+ data->deselect |= (idle_disconnect_pd ||
+ idle_disconnect_dt) << num;
ret = i2c_mux_add_adapter(muxc, force, num, class);
diff --git a/drivers/iio/accel/st_accel_core.c b/drivers/iio/accel/st_accel_core.c
index da3fb069ec5c..ce69048c88e9 100644
--- a/drivers/iio/accel/st_accel_core.c
+++ b/drivers/iio/accel/st_accel_core.c
@@ -743,8 +743,8 @@ static int st_accel_read_raw(struct iio_dev *indio_dev,
return IIO_VAL_INT;
case IIO_CHAN_INFO_SCALE:
- *val = 0;
- *val2 = adata->current_fullscale->gain;
+ *val = adata->current_fullscale->gain / 1000000;
+ *val2 = adata->current_fullscale->gain % 1000000;
return IIO_VAL_INT_PLUS_MICRO;
case IIO_CHAN_INFO_SAMP_FREQ:
*val = adata->odr;
@@ -763,9 +763,13 @@ static int st_accel_write_raw(struct iio_dev *indio_dev,
int err;
switch (mask) {
- case IIO_CHAN_INFO_SCALE:
- err = st_sensors_set_fullscale_by_gain(indio_dev, val2);
+ case IIO_CHAN_INFO_SCALE: {
+ int gain;
+
+ gain = val * 1000000 + val2;
+ err = st_sensors_set_fullscale_by_gain(indio_dev, gain);
break;
+ }
case IIO_CHAN_INFO_SAMP_FREQ:
if (val2)
return -EINVAL;
diff --git a/drivers/iio/common/hid-sensors/hid-sensor-attributes.c b/drivers/iio/common/hid-sensors/hid-sensor-attributes.c
index dc33c1dd5191..b5beea53d6f6 100644
--- a/drivers/iio/common/hid-sensors/hid-sensor-attributes.c
+++ b/drivers/iio/common/hid-sensors/hid-sensor-attributes.c
@@ -30,26 +30,26 @@ static struct {
u32 usage_id;
int unit; /* 0 for default others from HID sensor spec */
int scale_val0; /* scale, whole number */
- int scale_val1; /* scale, fraction in micros */
+ int scale_val1; /* scale, fraction in nanos */
} unit_conversion[] = {
- {HID_USAGE_SENSOR_ACCEL_3D, 0, 9, 806650},
+ {HID_USAGE_SENSOR_ACCEL_3D, 0, 9, 806650000},
{HID_USAGE_SENSOR_ACCEL_3D,
HID_USAGE_SENSOR_UNITS_METERS_PER_SEC_SQRD, 1, 0},
{HID_USAGE_SENSOR_ACCEL_3D,
- HID_USAGE_SENSOR_UNITS_G, 9, 806650},
+ HID_USAGE_SENSOR_UNITS_G, 9, 806650000},
- {HID_USAGE_SENSOR_GYRO_3D, 0, 0, 17453},
+ {HID_USAGE_SENSOR_GYRO_3D, 0, 0, 17453293},
{HID_USAGE_SENSOR_GYRO_3D,
HID_USAGE_SENSOR_UNITS_RADIANS_PER_SECOND, 1, 0},
{HID_USAGE_SENSOR_GYRO_3D,
- HID_USAGE_SENSOR_UNITS_DEGREES_PER_SECOND, 0, 17453},
+ HID_USAGE_SENSOR_UNITS_DEGREES_PER_SECOND, 0, 17453293},
- {HID_USAGE_SENSOR_COMPASS_3D, 0, 0, 1000},
+ {HID_USAGE_SENSOR_COMPASS_3D, 0, 0, 1000000},
{HID_USAGE_SENSOR_COMPASS_3D, HID_USAGE_SENSOR_UNITS_GAUSS, 1, 0},
- {HID_USAGE_SENSOR_INCLINOMETER_3D, 0, 0, 17453},
+ {HID_USAGE_SENSOR_INCLINOMETER_3D, 0, 0, 17453293},
{HID_USAGE_SENSOR_INCLINOMETER_3D,
- HID_USAGE_SENSOR_UNITS_DEGREES, 0, 17453},
+ HID_USAGE_SENSOR_UNITS_DEGREES, 0, 17453293},
{HID_USAGE_SENSOR_INCLINOMETER_3D,
HID_USAGE_SENSOR_UNITS_RADIANS, 1, 0},
@@ -57,7 +57,7 @@ static struct {
{HID_USAGE_SENSOR_ALS, HID_USAGE_SENSOR_UNITS_LUX, 1, 0},
{HID_USAGE_SENSOR_PRESSURE, 0, 100, 0},
- {HID_USAGE_SENSOR_PRESSURE, HID_USAGE_SENSOR_UNITS_PASCAL, 0, 1000},
+ {HID_USAGE_SENSOR_PRESSURE, HID_USAGE_SENSOR_UNITS_PASCAL, 0, 1000000},
};
static int pow_10(unsigned power)
@@ -266,15 +266,15 @@ EXPORT_SYMBOL(hid_sensor_write_raw_hyst_value);
/*
* This fuction applies the unit exponent to the scale.
* For example:
- * 9.806650 ->exp:2-> val0[980]val1[665000]
- * 9.000806 ->exp:2-> val0[900]val1[80600]
- * 0.174535 ->exp:2-> val0[17]val1[453500]
- * 1.001745 ->exp:0-> val0[1]val1[1745]
- * 1.001745 ->exp:2-> val0[100]val1[174500]
- * 1.001745 ->exp:4-> val0[10017]val1[450000]
- * 9.806650 ->exp:-2-> val0[0]val1[98066]
+ * 9.806650000 ->exp:2-> val0[980]val1[665000000]
+ * 9.000806000 ->exp:2-> val0[900]val1[80600000]
+ * 0.174535293 ->exp:2-> val0[17]val1[453529300]
+ * 1.001745329 ->exp:0-> val0[1]val1[1745329]
+ * 1.001745329 ->exp:2-> val0[100]val1[174532900]
+ * 1.001745329 ->exp:4-> val0[10017]val1[453290000]
+ * 9.806650000 ->exp:-2-> val0[0]val1[98066500]
*/
-static void adjust_exponent_micro(int *val0, int *val1, int scale0,
+static void adjust_exponent_nano(int *val0, int *val1, int scale0,
int scale1, int exp)
{
int i;
@@ -285,32 +285,32 @@ static void adjust_exponent_micro(int *val0, int *val1, int scale0,
if (exp > 0) {
*val0 = scale0 * pow_10(exp);
res = 0;
- if (exp > 6) {
+ if (exp > 9) {
*val1 = 0;
return;
}
for (i = 0; i < exp; ++i) {
- x = scale1 / pow_10(5 - i);
+ x = scale1 / pow_10(8 - i);
res += (pow_10(exp - 1 - i) * x);
- scale1 = scale1 % pow_10(5 - i);
+ scale1 = scale1 % pow_10(8 - i);
}
*val0 += res;
*val1 = scale1 * pow_10(exp);
} else if (exp < 0) {
exp = abs(exp);
- if (exp > 6) {
+ if (exp > 9) {
*val0 = *val1 = 0;
return;
}
*val0 = scale0 / pow_10(exp);
rem = scale0 % pow_10(exp);
res = 0;
- for (i = 0; i < (6 - exp); ++i) {
- x = scale1 / pow_10(5 - i);
- res += (pow_10(5 - exp - i) * x);
- scale1 = scale1 % pow_10(5 - i);
+ for (i = 0; i < (9 - exp); ++i) {
+ x = scale1 / pow_10(8 - i);
+ res += (pow_10(8 - exp - i) * x);
+ scale1 = scale1 % pow_10(8 - i);
}
- *val1 = rem * pow_10(6 - exp) + res;
+ *val1 = rem * pow_10(9 - exp) + res;
} else {
*val0 = scale0;
*val1 = scale1;
@@ -332,14 +332,14 @@ int hid_sensor_format_scale(u32 usage_id,
unit_conversion[i].unit == attr_info->units) {
exp = hid_sensor_convert_exponent(
attr_info->unit_expo);
- adjust_exponent_micro(val0, val1,
+ adjust_exponent_nano(val0, val1,
unit_conversion[i].scale_val0,
unit_conversion[i].scale_val1, exp);
break;
}
}
- return IIO_VAL_INT_PLUS_MICRO;
+ return IIO_VAL_INT_PLUS_NANO;
}
EXPORT_SYMBOL(hid_sensor_format_scale);
diff --git a/drivers/iio/common/st_sensors/st_sensors_core.c b/drivers/iio/common/st_sensors/st_sensors_core.c
index 285a64a589d7..975a1f19f747 100644
--- a/drivers/iio/common/st_sensors/st_sensors_core.c
+++ b/drivers/iio/common/st_sensors/st_sensors_core.c
@@ -612,7 +612,7 @@ EXPORT_SYMBOL(st_sensors_sysfs_sampling_frequency_avail);
ssize_t st_sensors_sysfs_scale_avail(struct device *dev,
struct device_attribute *attr, char *buf)
{
- int i, len = 0;
+ int i, len = 0, q, r;
struct iio_dev *indio_dev = dev_get_drvdata(dev);
struct st_sensor_data *sdata = iio_priv(indio_dev);
@@ -621,8 +621,10 @@ ssize_t st_sensors_sysfs_scale_avail(struct device *dev,
if (sdata->sensor_settings->fs.fs_avl[i].num == 0)
break;
- len += scnprintf(buf + len, PAGE_SIZE - len, "0.%06u ",
- sdata->sensor_settings->fs.fs_avl[i].gain);
+ q = sdata->sensor_settings->fs.fs_avl[i].gain / 1000000;
+ r = sdata->sensor_settings->fs.fs_avl[i].gain % 1000000;
+
+ len += scnprintf(buf + len, PAGE_SIZE - len, "%u.%06u ", q, r);
}
mutex_unlock(&indio_dev->mlock);
buf[len - 1] = '\n';
diff --git a/drivers/iio/orientation/hid-sensor-rotation.c b/drivers/iio/orientation/hid-sensor-rotation.c
index b98b9d94d184..a97e802ca523 100644
--- a/drivers/iio/orientation/hid-sensor-rotation.c
+++ b/drivers/iio/orientation/hid-sensor-rotation.c
@@ -335,6 +335,7 @@ static struct platform_driver hid_dev_rot_platform_driver = {
.id_table = hid_dev_rot_ids,
.driver = {
.name = KBUILD_MODNAME,
+ .pm = &hid_sensor_pm_ops,
},
.probe = hid_dev_rot_probe,
.remove = hid_dev_rot_remove,
diff --git a/drivers/iio/temperature/maxim_thermocouple.c b/drivers/iio/temperature/maxim_thermocouple.c
index 066161a4bccd..f962f31a5eb2 100644
--- a/drivers/iio/temperature/maxim_thermocouple.c
+++ b/drivers/iio/temperature/maxim_thermocouple.c
@@ -136,6 +136,8 @@ static int maxim_thermocouple_read(struct maxim_thermocouple_data *data,
ret = spi_read(data->spi, (void *)&buf32, storage_bytes);
*val = be32_to_cpu(buf32);
break;
+ default:
+ ret = -EINVAL;
}
if (ret)
diff --git a/drivers/infiniband/core/addr.c b/drivers/infiniband/core/addr.c
index b136d3acc5bd..0f58f46dbad7 100644
--- a/drivers/infiniband/core/addr.c
+++ b/drivers/infiniband/core/addr.c
@@ -699,13 +699,16 @@ EXPORT_SYMBOL(rdma_addr_cancel);
struct resolve_cb_context {
struct rdma_dev_addr *addr;
struct completion comp;
+ int status;
};
static void resolve_cb(int status, struct sockaddr *src_addr,
struct rdma_dev_addr *addr, void *context)
{
- memcpy(((struct resolve_cb_context *)context)->addr, addr, sizeof(struct
- rdma_dev_addr));
+ if (!status)
+ memcpy(((struct resolve_cb_context *)context)->addr,
+ addr, sizeof(struct rdma_dev_addr));
+ ((struct resolve_cb_context *)context)->status = status;
complete(&((struct resolve_cb_context *)context)->comp);
}
@@ -743,6 +746,10 @@ int rdma_addr_find_l2_eth_by_grh(const union ib_gid *sgid,
wait_for_completion(&ctx.comp);
+ ret = ctx.status;
+ if (ret)
+ return ret;
+
memcpy(dmac, dev_addr.dst_dev_addr, ETH_ALEN);
dev = dev_get_by_index(&init_net, dev_addr.bound_dev_if);
if (!dev)
diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c
index c99525512b34..71c7c4c328ef 100644
--- a/drivers/infiniband/core/cm.c
+++ b/drivers/infiniband/core/cm.c
@@ -80,6 +80,8 @@ static struct ib_cm {
__be32 random_id_operand;
struct list_head timewait_list;
struct workqueue_struct *wq;
+ /* Sync on cm change port state */
+ spinlock_t state_lock;
} cm;
/* Counter indexes ordered by attribute ID */
@@ -161,6 +163,8 @@ struct cm_port {
struct ib_mad_agent *mad_agent;
struct kobject port_obj;
u8 port_num;
+ struct list_head cm_priv_prim_list;
+ struct list_head cm_priv_altr_list;
struct cm_counter_group counter_group[CM_COUNTER_GROUPS];
};
@@ -241,6 +245,12 @@ struct cm_id_private {
u8 service_timeout;
u8 target_ack_delay;
+ struct list_head prim_list;
+ struct list_head altr_list;
+ /* Indicates that the send port mad is registered and av is set */
+ int prim_send_port_not_ready;
+ int altr_send_port_not_ready;
+
struct list_head work_list;
atomic_t work_count;
};
@@ -259,20 +269,47 @@ static int cm_alloc_msg(struct cm_id_private *cm_id_priv,
struct ib_mad_agent *mad_agent;
struct ib_mad_send_buf *m;
struct ib_ah *ah;
+ struct cm_av *av;
+ unsigned long flags, flags2;
+ int ret = 0;
+ /* don't let the port to be released till the agent is down */
+ spin_lock_irqsave(&cm.state_lock, flags2);
+ spin_lock_irqsave(&cm.lock, flags);
+ if (!cm_id_priv->prim_send_port_not_ready)
+ av = &cm_id_priv->av;
+ else if (!cm_id_priv->altr_send_port_not_ready &&
+ (cm_id_priv->alt_av.port))
+ av = &cm_id_priv->alt_av;
+ else {
+ pr_info("%s: not valid CM id\n", __func__);
+ ret = -ENODEV;
+ spin_unlock_irqrestore(&cm.lock, flags);
+ goto out;
+ }
+ spin_unlock_irqrestore(&cm.lock, flags);
+ /* Make sure the port haven't released the mad yet */
mad_agent = cm_id_priv->av.port->mad_agent;
- ah = ib_create_ah(mad_agent->qp->pd, &cm_id_priv->av.ah_attr);
- if (IS_ERR(ah))
- return PTR_ERR(ah);
+ if (!mad_agent) {
+ pr_info("%s: not a valid MAD agent\n", __func__);
+ ret = -ENODEV;
+ goto out;
+ }
+ ah = ib_create_ah(mad_agent->qp->pd, &av->ah_attr);
+ if (IS_ERR(ah)) {
+ ret = PTR_ERR(ah);
+ goto out;
+ }
m = ib_create_send_mad(mad_agent, cm_id_priv->id.remote_cm_qpn,
- cm_id_priv->av.pkey_index,
+ av->pkey_index,
0, IB_MGMT_MAD_HDR, IB_MGMT_MAD_DATA,
GFP_ATOMIC,
IB_MGMT_BASE_VERSION);
if (IS_ERR(m)) {
ib_destroy_ah(ah);
- return PTR_ERR(m);
+ ret = PTR_ERR(m);
+ goto out;
}
/* Timeout set by caller if response is expected. */
@@ -282,7 +319,10 @@ static int cm_alloc_msg(struct cm_id_private *cm_id_priv,
atomic_inc(&cm_id_priv->refcount);
m->context[0] = cm_id_priv;
*msg = m;
- return 0;
+
+out:
+ spin_unlock_irqrestore(&cm.state_lock, flags2);
+ return ret;
}
static int cm_alloc_response_msg(struct cm_port *port,
@@ -352,7 +392,8 @@ static void cm_init_av_for_response(struct cm_port *port, struct ib_wc *wc,
grh, &av->ah_attr);
}
-static int cm_init_av_by_path(struct ib_sa_path_rec *path, struct cm_av *av)
+static int cm_init_av_by_path(struct ib_sa_path_rec *path, struct cm_av *av,
+ struct cm_id_private *cm_id_priv)
{
struct cm_device *cm_dev;
struct cm_port *port = NULL;
@@ -387,7 +428,17 @@ static int cm_init_av_by_path(struct ib_sa_path_rec *path, struct cm_av *av)
&av->ah_attr);
av->timeout = path->packet_life_time + 1;
- return 0;
+ spin_lock_irqsave(&cm.lock, flags);
+ if (&cm_id_priv->av == av)
+ list_add_tail(&cm_id_priv->prim_list, &port->cm_priv_prim_list);
+ else if (&cm_id_priv->alt_av == av)
+ list_add_tail(&cm_id_priv->altr_list, &port->cm_priv_altr_list);
+ else
+ ret = -EINVAL;
+
+ spin_unlock_irqrestore(&cm.lock, flags);
+
+ return ret;
}
static int cm_alloc_id(struct cm_id_private *cm_id_priv)
@@ -677,6 +728,8 @@ struct ib_cm_id *ib_create_cm_id(struct ib_device *device,
spin_lock_init(&cm_id_priv->lock);
init_completion(&cm_id_priv->comp);
INIT_LIST_HEAD(&cm_id_priv->work_list);
+ INIT_LIST_HEAD(&cm_id_priv->prim_list);
+ INIT_LIST_HEAD(&cm_id_priv->altr_list);
atomic_set(&cm_id_priv->work_count, -1);
atomic_set(&cm_id_priv->refcount, 1);
return &cm_id_priv->id;
@@ -892,6 +945,15 @@ retest:
break;
}
+ spin_lock_irq(&cm.lock);
+ if (!list_empty(&cm_id_priv->altr_list) &&
+ (!cm_id_priv->altr_send_port_not_ready))
+ list_del(&cm_id_priv->altr_list);
+ if (!list_empty(&cm_id_priv->prim_list) &&
+ (!cm_id_priv->prim_send_port_not_ready))
+ list_del(&cm_id_priv->prim_list);
+ spin_unlock_irq(&cm.lock);
+
cm_free_id(cm_id->local_id);
cm_deref_id(cm_id_priv);
wait_for_completion(&cm_id_priv->comp);
@@ -1192,12 +1254,13 @@ int ib_send_cm_req(struct ib_cm_id *cm_id,
goto out;
}
- ret = cm_init_av_by_path(param->primary_path, &cm_id_priv->av);
+ ret = cm_init_av_by_path(param->primary_path, &cm_id_priv->av,
+ cm_id_priv);
if (ret)
goto error1;
if (param->alternate_path) {
ret = cm_init_av_by_path(param->alternate_path,
- &cm_id_priv->alt_av);
+ &cm_id_priv->alt_av, cm_id_priv);
if (ret)
goto error1;
}
@@ -1653,7 +1716,8 @@ static int cm_req_handler(struct cm_work *work)
dev_put(gid_attr.ndev);
}
work->path[0].gid_type = gid_attr.gid_type;
- ret = cm_init_av_by_path(&work->path[0], &cm_id_priv->av);
+ ret = cm_init_av_by_path(&work->path[0], &cm_id_priv->av,
+ cm_id_priv);
}
if (ret) {
int err = ib_get_cached_gid(work->port->cm_dev->ib_device,
@@ -1672,7 +1736,8 @@ static int cm_req_handler(struct cm_work *work)
goto rejected;
}
if (req_msg->alt_local_lid) {
- ret = cm_init_av_by_path(&work->path[1], &cm_id_priv->alt_av);
+ ret = cm_init_av_by_path(&work->path[1], &cm_id_priv->alt_av,
+ cm_id_priv);
if (ret) {
ib_send_cm_rej(cm_id, IB_CM_REJ_INVALID_ALT_GID,
&work->path[0].sgid,
@@ -2727,7 +2792,8 @@ int ib_send_cm_lap(struct ib_cm_id *cm_id,
goto out;
}
- ret = cm_init_av_by_path(alternate_path, &cm_id_priv->alt_av);
+ ret = cm_init_av_by_path(alternate_path, &cm_id_priv->alt_av,
+ cm_id_priv);
if (ret)
goto out;
cm_id_priv->alt_av.timeout =
@@ -2839,7 +2905,8 @@ static int cm_lap_handler(struct cm_work *work)
cm_init_av_for_response(work->port, work->mad_recv_wc->wc,
work->mad_recv_wc->recv_buf.grh,
&cm_id_priv->av);
- cm_init_av_by_path(param->alternate_path, &cm_id_priv->alt_av);
+ cm_init_av_by_path(param->alternate_path, &cm_id_priv->alt_av,
+ cm_id_priv);
ret = atomic_inc_and_test(&cm_id_priv->work_count);
if (!ret)
list_add_tail(&work->list, &cm_id_priv->work_list);
@@ -3031,7 +3098,7 @@ int ib_send_cm_sidr_req(struct ib_cm_id *cm_id,
return -EINVAL;
cm_id_priv = container_of(cm_id, struct cm_id_private, id);
- ret = cm_init_av_by_path(param->path, &cm_id_priv->av);
+ ret = cm_init_av_by_path(param->path, &cm_id_priv->av, cm_id_priv);
if (ret)
goto out;
@@ -3468,7 +3535,9 @@ out:
static int cm_migrate(struct ib_cm_id *cm_id)
{
struct cm_id_private *cm_id_priv;
+ struct cm_av tmp_av;
unsigned long flags;
+ int tmp_send_port_not_ready;
int ret = 0;
cm_id_priv = container_of(cm_id, struct cm_id_private, id);
@@ -3477,7 +3546,14 @@ static int cm_migrate(struct ib_cm_id *cm_id)
(cm_id->lap_state == IB_CM_LAP_UNINIT ||
cm_id->lap_state == IB_CM_LAP_IDLE)) {
cm_id->lap_state = IB_CM_LAP_IDLE;
+ /* Swap address vector */
+ tmp_av = cm_id_priv->av;
cm_id_priv->av = cm_id_priv->alt_av;
+ cm_id_priv->alt_av = tmp_av;
+ /* Swap port send ready state */
+ tmp_send_port_not_ready = cm_id_priv->prim_send_port_not_ready;
+ cm_id_priv->prim_send_port_not_ready = cm_id_priv->altr_send_port_not_ready;
+ cm_id_priv->altr_send_port_not_ready = tmp_send_port_not_ready;
} else
ret = -EINVAL;
spin_unlock_irqrestore(&cm_id_priv->lock, flags);
@@ -3888,6 +3964,9 @@ static void cm_add_one(struct ib_device *ib_device)
port->cm_dev = cm_dev;
port->port_num = i;
+ INIT_LIST_HEAD(&port->cm_priv_prim_list);
+ INIT_LIST_HEAD(&port->cm_priv_altr_list);
+
ret = cm_create_port_fs(port);
if (ret)
goto error1;
@@ -3945,6 +4024,8 @@ static void cm_remove_one(struct ib_device *ib_device, void *client_data)
{
struct cm_device *cm_dev = client_data;
struct cm_port *port;
+ struct cm_id_private *cm_id_priv;
+ struct ib_mad_agent *cur_mad_agent;
struct ib_port_modify port_modify = {
.clr_port_cap_mask = IB_PORT_CM_SUP
};
@@ -3968,15 +4049,27 @@ static void cm_remove_one(struct ib_device *ib_device, void *client_data)
port = cm_dev->port[i-1];
ib_modify_port(ib_device, port->port_num, 0, &port_modify);
+ /* Mark all the cm_id's as not valid */
+ spin_lock_irq(&cm.lock);
+ list_for_each_entry(cm_id_priv, &port->cm_priv_altr_list, altr_list)
+ cm_id_priv->altr_send_port_not_ready = 1;
+ list_for_each_entry(cm_id_priv, &port->cm_priv_prim_list, prim_list)
+ cm_id_priv->prim_send_port_not_ready = 1;
+ spin_unlock_irq(&cm.lock);
/*
* We flush the queue here after the going_down set, this
* verify that no new works will be queued in the recv handler,
* after that we can call the unregister_mad_agent
*/
flush_workqueue(cm.wq);
- ib_unregister_mad_agent(port->mad_agent);
+ spin_lock_irq(&cm.state_lock);
+ cur_mad_agent = port->mad_agent;
+ port->mad_agent = NULL;
+ spin_unlock_irq(&cm.state_lock);
+ ib_unregister_mad_agent(cur_mad_agent);
cm_remove_port_fs(port);
}
+
device_unregister(cm_dev->device);
kfree(cm_dev);
}
@@ -3989,6 +4082,7 @@ static int __init ib_cm_init(void)
INIT_LIST_HEAD(&cm.device_list);
rwlock_init(&cm.device_lock);
spin_lock_init(&cm.lock);
+ spin_lock_init(&cm.state_lock);
cm.listen_service_table = RB_ROOT;
cm.listen_service_id = be64_to_cpu(IB_CM_ASSIGN_SERVICE_ID);
cm.remote_id_table = RB_ROOT;
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
index 36bf50ebb187..2a6fc47a1dfb 100644
--- a/drivers/infiniband/core/cma.c
+++ b/drivers/infiniband/core/cma.c
@@ -1094,47 +1094,47 @@ static void cma_save_ib_info(struct sockaddr *src_addr,
}
}
-static void cma_save_ip4_info(struct sockaddr *src_addr,
- struct sockaddr *dst_addr,
+static void cma_save_ip4_info(struct sockaddr_in *src_addr,
+ struct sockaddr_in *dst_addr,
struct cma_hdr *hdr,
__be16 local_port)
{
- struct sockaddr_in *ip4;
-
if (src_addr) {
- ip4 = (struct sockaddr_in *)src_addr;
- ip4->sin_family = AF_INET;
- ip4->sin_addr.s_addr = hdr->dst_addr.ip4.addr;
- ip4->sin_port = local_port;
+ *src_addr = (struct sockaddr_in) {
+ .sin_family = AF_INET,
+ .sin_addr.s_addr = hdr->dst_addr.ip4.addr,
+ .sin_port = local_port,
+ };
}
if (dst_addr) {
- ip4 = (struct sockaddr_in *)dst_addr;
- ip4->sin_family = AF_INET;
- ip4->sin_addr.s_addr = hdr->src_addr.ip4.addr;
- ip4->sin_port = hdr->port;
+ *dst_addr = (struct sockaddr_in) {
+ .sin_family = AF_INET,
+ .sin_addr.s_addr = hdr->src_addr.ip4.addr,
+ .sin_port = hdr->port,
+ };
}
}
-static void cma_save_ip6_info(struct sockaddr *src_addr,
- struct sockaddr *dst_addr,
+static void cma_save_ip6_info(struct sockaddr_in6 *src_addr,
+ struct sockaddr_in6 *dst_addr,
struct cma_hdr *hdr,
__be16 local_port)
{
- struct sockaddr_in6 *ip6;
-
if (src_addr) {
- ip6 = (struct sockaddr_in6 *)src_addr;
- ip6->sin6_family = AF_INET6;
- ip6->sin6_addr = hdr->dst_addr.ip6;
- ip6->sin6_port = local_port;
+ *src_addr = (struct sockaddr_in6) {
+ .sin6_family = AF_INET6,
+ .sin6_addr = hdr->dst_addr.ip6,
+ .sin6_port = local_port,
+ };
}
if (dst_addr) {
- ip6 = (struct sockaddr_in6 *)dst_addr;
- ip6->sin6_family = AF_INET6;
- ip6->sin6_addr = hdr->src_addr.ip6;
- ip6->sin6_port = hdr->port;
+ *dst_addr = (struct sockaddr_in6) {
+ .sin6_family = AF_INET6,
+ .sin6_addr = hdr->src_addr.ip6,
+ .sin6_port = hdr->port,
+ };
}
}
@@ -1159,10 +1159,12 @@ static int cma_save_ip_info(struct sockaddr *src_addr,
switch (cma_get_ip_ver(hdr)) {
case 4:
- cma_save_ip4_info(src_addr, dst_addr, hdr, port);
+ cma_save_ip4_info((struct sockaddr_in *)src_addr,
+ (struct sockaddr_in *)dst_addr, hdr, port);
break;
case 6:
- cma_save_ip6_info(src_addr, dst_addr, hdr, port);
+ cma_save_ip6_info((struct sockaddr_in6 *)src_addr,
+ (struct sockaddr_in6 *)dst_addr, hdr, port);
break;
default:
return -EAFNOSUPPORT;
@@ -2436,6 +2438,18 @@ static int iboe_tos_to_sl(struct net_device *ndev, int tos)
return 0;
}
+static enum ib_gid_type cma_route_gid_type(enum rdma_network_type network_type,
+ unsigned long supported_gids,
+ enum ib_gid_type default_gid)
+{
+ if ((network_type == RDMA_NETWORK_IPV4 ||
+ network_type == RDMA_NETWORK_IPV6) &&
+ test_bit(IB_GID_TYPE_ROCE_UDP_ENCAP, &supported_gids))
+ return IB_GID_TYPE_ROCE_UDP_ENCAP;
+
+ return default_gid;
+}
+
static int cma_resolve_iboe_route(struct rdma_id_private *id_priv)
{
struct rdma_route *route = &id_priv->id.route;
@@ -2461,6 +2475,8 @@ static int cma_resolve_iboe_route(struct rdma_id_private *id_priv)
route->num_paths = 1;
if (addr->dev_addr.bound_dev_if) {
+ unsigned long supported_gids;
+
ndev = dev_get_by_index(&init_net, addr->dev_addr.bound_dev_if);
if (!ndev) {
ret = -ENODEV;
@@ -2484,7 +2500,12 @@ static int cma_resolve_iboe_route(struct rdma_id_private *id_priv)
route->path_rec->net = &init_net;
route->path_rec->ifindex = ndev->ifindex;
- route->path_rec->gid_type = id_priv->gid_type;
+ supported_gids = roce_gid_type_mask_support(id_priv->id.device,
+ id_priv->id.port_num);
+ route->path_rec->gid_type =
+ cma_route_gid_type(addr->dev_addr.network,
+ supported_gids,
+ id_priv->gid_type);
}
if (!ndev) {
ret = -ENODEV;
diff --git a/drivers/infiniband/core/umem.c b/drivers/infiniband/core/umem.c
index 224ad274ea0b..84b4eff90395 100644
--- a/drivers/infiniband/core/umem.c
+++ b/drivers/infiniband/core/umem.c
@@ -175,7 +175,7 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
cur_base = addr & PAGE_MASK;
- if (npages == 0) {
+ if (npages == 0 || npages > UINT_MAX) {
ret = -EINVAL;
goto out;
}
diff --git a/drivers/infiniband/core/uverbs_main.c b/drivers/infiniband/core/uverbs_main.c
index 0012fa58c105..44b1104eb168 100644
--- a/drivers/infiniband/core/uverbs_main.c
+++ b/drivers/infiniband/core/uverbs_main.c
@@ -262,12 +262,9 @@ static int ib_uverbs_cleanup_ucontext(struct ib_uverbs_file *file,
container_of(uobj, struct ib_uqp_object, uevent.uobject);
idr_remove_uobj(&ib_uverbs_qp_idr, uobj);
- if (qp != qp->real_qp) {
- ib_close_qp(qp);
- } else {
+ if (qp == qp->real_qp)
ib_uverbs_detach_umcast(qp, uqp);
- ib_destroy_qp(qp);
- }
+ ib_destroy_qp(qp);
ib_uverbs_release_uevent(file, &uqp->uevent);
kfree(uqp);
}
diff --git a/drivers/infiniband/hw/cxgb4/cq.c b/drivers/infiniband/hw/cxgb4/cq.c
index 867b8cf82be8..19c6477af19f 100644
--- a/drivers/infiniband/hw/cxgb4/cq.c
+++ b/drivers/infiniband/hw/cxgb4/cq.c
@@ -666,18 +666,6 @@ skip_cqe:
return ret;
}
-static void invalidate_mr(struct c4iw_dev *rhp, u32 rkey)
-{
- struct c4iw_mr *mhp;
- unsigned long flags;
-
- spin_lock_irqsave(&rhp->lock, flags);
- mhp = get_mhp(rhp, rkey >> 8);
- if (mhp)
- mhp->attr.state = 0;
- spin_unlock_irqrestore(&rhp->lock, flags);
-}
-
/*
* Get one cq entry from c4iw and map it to openib.
*
@@ -733,7 +721,7 @@ static int c4iw_poll_cq_one(struct c4iw_cq *chp, struct ib_wc *wc)
CQE_OPCODE(&cqe) == FW_RI_SEND_WITH_SE_INV) {
wc->ex.invalidate_rkey = CQE_WRID_STAG(&cqe);
wc->wc_flags |= IB_WC_WITH_INVALIDATE;
- invalidate_mr(qhp->rhp, wc->ex.invalidate_rkey);
+ c4iw_invalidate_mr(qhp->rhp, wc->ex.invalidate_rkey);
}
} else {
switch (CQE_OPCODE(&cqe)) {
@@ -762,7 +750,8 @@ static int c4iw_poll_cq_one(struct c4iw_cq *chp, struct ib_wc *wc)
/* Invalidate the MR if the fastreg failed */
if (CQE_STATUS(&cqe) != T4_ERR_SUCCESS)
- invalidate_mr(qhp->rhp, CQE_WRID_FR_STAG(&cqe));
+ c4iw_invalidate_mr(qhp->rhp,
+ CQE_WRID_FR_STAG(&cqe));
break;
default:
printk(KERN_ERR MOD "Unexpected opcode %d "
diff --git a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
index 7e7f79e55006..4788e1a46fde 100644
--- a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
+++ b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
@@ -999,6 +999,6 @@ extern int db_coalescing_threshold;
extern int use_dsgl;
void c4iw_drain_rq(struct ib_qp *qp);
void c4iw_drain_sq(struct ib_qp *qp);
-
+void c4iw_invalidate_mr(struct c4iw_dev *rhp, u32 rkey);
#endif
diff --git a/drivers/infiniband/hw/cxgb4/mem.c b/drivers/infiniband/hw/cxgb4/mem.c
index 80e27749420a..410408f886c1 100644
--- a/drivers/infiniband/hw/cxgb4/mem.c
+++ b/drivers/infiniband/hw/cxgb4/mem.c
@@ -770,3 +770,15 @@ int c4iw_dereg_mr(struct ib_mr *ib_mr)
kfree(mhp);
return 0;
}
+
+void c4iw_invalidate_mr(struct c4iw_dev *rhp, u32 rkey)
+{
+ struct c4iw_mr *mhp;
+ unsigned long flags;
+
+ spin_lock_irqsave(&rhp->lock, flags);
+ mhp = get_mhp(rhp, rkey >> 8);
+ if (mhp)
+ mhp->attr.state = 0;
+ spin_unlock_irqrestore(&rhp->lock, flags);
+}
diff --git a/drivers/infiniband/hw/cxgb4/qp.c b/drivers/infiniband/hw/cxgb4/qp.c
index f57deba6717c..b7ac97b27c88 100644
--- a/drivers/infiniband/hw/cxgb4/qp.c
+++ b/drivers/infiniband/hw/cxgb4/qp.c
@@ -706,12 +706,8 @@ static int build_memreg(struct t4_sq *sq, union t4_wr *wqe,
return 0;
}
-static int build_inv_stag(struct c4iw_dev *dev, union t4_wr *wqe,
- struct ib_send_wr *wr, u8 *len16)
+static int build_inv_stag(union t4_wr *wqe, struct ib_send_wr *wr, u8 *len16)
{
- struct c4iw_mr *mhp = get_mhp(dev, wr->ex.invalidate_rkey >> 8);
-
- mhp->attr.state = 0;
wqe->inv.stag_inv = cpu_to_be32(wr->ex.invalidate_rkey);
wqe->inv.r2 = 0;
*len16 = DIV_ROUND_UP(sizeof wqe->inv, 16);
@@ -797,11 +793,13 @@ int c4iw_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
spin_lock_irqsave(&qhp->lock, flag);
if (t4_wq_in_error(&qhp->wq)) {
spin_unlock_irqrestore(&qhp->lock, flag);
+ *bad_wr = wr;
return -EINVAL;
}
num_wrs = t4_sq_avail(&qhp->wq);
if (num_wrs == 0) {
spin_unlock_irqrestore(&qhp->lock, flag);
+ *bad_wr = wr;
return -ENOMEM;
}
while (wr) {
@@ -840,10 +838,13 @@ int c4iw_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
case IB_WR_RDMA_READ_WITH_INV:
fw_opcode = FW_RI_RDMA_READ_WR;
swsqe->opcode = FW_RI_READ_REQ;
- if (wr->opcode == IB_WR_RDMA_READ_WITH_INV)
+ if (wr->opcode == IB_WR_RDMA_READ_WITH_INV) {
+ c4iw_invalidate_mr(qhp->rhp,
+ wr->sg_list[0].lkey);
fw_flags = FW_RI_RDMA_READ_INVALIDATE;
- else
+ } else {
fw_flags = 0;
+ }
err = build_rdma_read(wqe, wr, &len16);
if (err)
break;
@@ -876,7 +877,8 @@ int c4iw_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
fw_flags |= FW_RI_LOCAL_FENCE_FLAG;
fw_opcode = FW_RI_INV_LSTAG_WR;
swsqe->opcode = FW_RI_LOCAL_INV;
- err = build_inv_stag(qhp->rhp, wqe, wr, &len16);
+ err = build_inv_stag(wqe, wr, &len16);
+ c4iw_invalidate_mr(qhp->rhp, wr->ex.invalidate_rkey);
break;
default:
PDBG("%s post of type=%d TBD!\n", __func__,
@@ -934,11 +936,13 @@ int c4iw_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr,
spin_lock_irqsave(&qhp->lock, flag);
if (t4_wq_in_error(&qhp->wq)) {
spin_unlock_irqrestore(&qhp->lock, flag);
+ *bad_wr = wr;
return -EINVAL;
}
num_wrs = t4_rq_avail(&qhp->wq);
if (num_wrs == 0) {
spin_unlock_irqrestore(&qhp->lock, flag);
+ *bad_wr = wr;
return -ENOMEM;
}
while (wr) {
diff --git a/drivers/infiniband/hw/hfi1/affinity.c b/drivers/infiniband/hw/hfi1/affinity.c
index a26a9a0bfc41..67ea85a56945 100644
--- a/drivers/infiniband/hw/hfi1/affinity.c
+++ b/drivers/infiniband/hw/hfi1/affinity.c
@@ -775,75 +775,3 @@ void hfi1_put_proc_affinity(int cpu)
}
mutex_unlock(&affinity->lock);
}
-
-int hfi1_set_sdma_affinity(struct hfi1_devdata *dd, const char *buf,
- size_t count)
-{
- struct hfi1_affinity_node *entry;
- cpumask_var_t mask;
- int ret, i;
-
- mutex_lock(&node_affinity.lock);
- entry = node_affinity_lookup(dd->node);
-
- if (!entry) {
- ret = -EINVAL;
- goto unlock;
- }
-
- ret = zalloc_cpumask_var(&mask, GFP_KERNEL);
- if (!ret) {
- ret = -ENOMEM;
- goto unlock;
- }
-
- ret = cpulist_parse(buf, mask);
- if (ret)
- goto out;
-
- if (!cpumask_subset(mask, cpu_online_mask) || cpumask_empty(mask)) {
- dd_dev_warn(dd, "Invalid CPU mask\n");
- ret = -EINVAL;
- goto out;
- }
-
- /* reset the SDMA interrupt affinity details */
- init_cpu_mask_set(&entry->def_intr);
- cpumask_copy(&entry->def_intr.mask, mask);
-
- /* Reassign the affinity for each SDMA interrupt. */
- for (i = 0; i < dd->num_msix_entries; i++) {
- struct hfi1_msix_entry *msix;
-
- msix = &dd->msix_entries[i];
- if (msix->type != IRQ_SDMA)
- continue;
-
- ret = get_irq_affinity(dd, msix);
-
- if (ret)
- break;
- }
-out:
- free_cpumask_var(mask);
-unlock:
- mutex_unlock(&node_affinity.lock);
- return ret ? ret : strnlen(buf, PAGE_SIZE);
-}
-
-int hfi1_get_sdma_affinity(struct hfi1_devdata *dd, char *buf)
-{
- struct hfi1_affinity_node *entry;
-
- mutex_lock(&node_affinity.lock);
- entry = node_affinity_lookup(dd->node);
-
- if (!entry) {
- mutex_unlock(&node_affinity.lock);
- return -EINVAL;
- }
-
- cpumap_print_to_pagebuf(true, buf, &entry->def_intr.mask);
- mutex_unlock(&node_affinity.lock);
- return strnlen(buf, PAGE_SIZE);
-}
diff --git a/drivers/infiniband/hw/hfi1/affinity.h b/drivers/infiniband/hw/hfi1/affinity.h
index b89ea3c0ee1a..42e63316afd1 100644
--- a/drivers/infiniband/hw/hfi1/affinity.h
+++ b/drivers/infiniband/hw/hfi1/affinity.h
@@ -102,10 +102,6 @@ int hfi1_get_proc_affinity(int);
/* Release a CPU used by a user process. */
void hfi1_put_proc_affinity(int);
-int hfi1_get_sdma_affinity(struct hfi1_devdata *dd, char *buf);
-int hfi1_set_sdma_affinity(struct hfi1_devdata *dd, const char *buf,
- size_t count);
-
struct hfi1_affinity_node {
int node;
struct cpu_mask_set def_intr;
diff --git a/drivers/infiniband/hw/hfi1/chip.c b/drivers/infiniband/hw/hfi1/chip.c
index 9bf5f23544d4..24d0820873cf 100644
--- a/drivers/infiniband/hw/hfi1/chip.c
+++ b/drivers/infiniband/hw/hfi1/chip.c
@@ -6301,19 +6301,8 @@ void set_up_vl15(struct hfi1_devdata *dd, u8 vau, u16 vl15buf)
/* leave shared count at zero for both global and VL15 */
write_global_credit(dd, vau, vl15buf, 0);
- /* We may need some credits for another VL when sending packets
- * with the snoop interface. Dividing it down the middle for VL15
- * and VL0 should suffice.
- */
- if (unlikely(dd->hfi1_snoop.mode_flag == HFI1_PORT_SNOOP_MODE)) {
- write_csr(dd, SEND_CM_CREDIT_VL15, (u64)(vl15buf >> 1)
- << SEND_CM_CREDIT_VL15_DEDICATED_LIMIT_VL_SHIFT);
- write_csr(dd, SEND_CM_CREDIT_VL, (u64)(vl15buf >> 1)
- << SEND_CM_CREDIT_VL_DEDICATED_LIMIT_VL_SHIFT);
- } else {
- write_csr(dd, SEND_CM_CREDIT_VL15, (u64)vl15buf
- << SEND_CM_CREDIT_VL15_DEDICATED_LIMIT_VL_SHIFT);
- }
+ write_csr(dd, SEND_CM_CREDIT_VL15, (u64)vl15buf
+ << SEND_CM_CREDIT_VL15_DEDICATED_LIMIT_VL_SHIFT);
}
/*
@@ -9915,9 +9904,6 @@ static void set_lidlmc(struct hfi1_pportdata *ppd)
u32 mask = ~((1U << ppd->lmc) - 1);
u64 c1 = read_csr(ppd->dd, DCC_CFG_PORT_CONFIG1);
- if (dd->hfi1_snoop.mode_flag)
- dd_dev_info(dd, "Set lid/lmc while snooping");
-
c1 &= ~(DCC_CFG_PORT_CONFIG1_TARGET_DLID_SMASK
| DCC_CFG_PORT_CONFIG1_DLID_MASK_SMASK);
c1 |= ((ppd->lid & DCC_CFG_PORT_CONFIG1_TARGET_DLID_MASK)
@@ -12112,7 +12098,7 @@ static void update_synth_timer(unsigned long opaque)
mod_timer(&dd->synth_stats_timer, jiffies + HZ * SYNTH_CNT_TIME);
}
-#define C_MAX_NAME 13 /* 12 chars + one for /0 */
+#define C_MAX_NAME 16 /* 15 chars + one for /0 */
static int init_cntrs(struct hfi1_devdata *dd)
{
int i, rcv_ctxts, j;
@@ -14463,7 +14449,7 @@ struct hfi1_devdata *hfi1_init_dd(struct pci_dev *pdev,
* Any error printing is already done by the init code.
* On return, we have the chip mapped.
*/
- ret = hfi1_pcie_ddinit(dd, pdev, ent);
+ ret = hfi1_pcie_ddinit(dd, pdev);
if (ret < 0)
goto bail_free;
@@ -14691,6 +14677,11 @@ struct hfi1_devdata *hfi1_init_dd(struct pci_dev *pdev,
if (ret)
goto bail_free_cntrs;
+ init_completion(&dd->user_comp);
+
+ /* The user refcount starts with one to inidicate an active device */
+ atomic_set(&dd->user_refcount, 1);
+
goto bail;
bail_free_rcverr:
diff --git a/drivers/infiniband/hw/hfi1/chip.h b/drivers/infiniband/hw/hfi1/chip.h
index 92345259a8f4..043fd21dc5f3 100644
--- a/drivers/infiniband/hw/hfi1/chip.h
+++ b/drivers/infiniband/hw/hfi1/chip.h
@@ -320,6 +320,9 @@
/* DC_DC8051_CFG_MODE.GENERAL bits */
#define DISABLE_SELF_GUID_CHECK 0x2
+/* Bad L2 frame error code */
+#define BAD_L2_ERR 0x6
+
/*
* Eager buffer minimum and maximum sizes supported by the hardware.
* All power-of-two sizes in between are supported as well.
diff --git a/drivers/infiniband/hw/hfi1/driver.c b/drivers/infiniband/hw/hfi1/driver.c
index 6563e4d38b80..c5efff29c147 100644
--- a/drivers/infiniband/hw/hfi1/driver.c
+++ b/drivers/infiniband/hw/hfi1/driver.c
@@ -599,7 +599,6 @@ static void __prescan_rxq(struct hfi1_packet *packet)
dd->rhf_offset;
struct rvt_qp *qp;
struct ib_header *hdr;
- struct ib_other_headers *ohdr;
struct rvt_dev_info *rdi = &dd->verbs_dev.rdi;
u64 rhf = rhf_to_cpu(rhf_addr);
u32 etype = rhf_rcv_type(rhf), qpn, bth1;
@@ -615,18 +614,21 @@ static void __prescan_rxq(struct hfi1_packet *packet)
if (etype != RHF_RCV_TYPE_IB)
goto next;
- hdr = hfi1_get_msgheader(dd, rhf_addr);
+ packet->hdr = hfi1_get_msgheader(dd, rhf_addr);
+ hdr = packet->hdr;
lnh = be16_to_cpu(hdr->lrh[0]) & 3;
- if (lnh == HFI1_LRH_BTH)
- ohdr = &hdr->u.oth;
- else if (lnh == HFI1_LRH_GRH)
- ohdr = &hdr->u.l.oth;
- else
+ if (lnh == HFI1_LRH_BTH) {
+ packet->ohdr = &hdr->u.oth;
+ } else if (lnh == HFI1_LRH_GRH) {
+ packet->ohdr = &hdr->u.l.oth;
+ packet->rcv_flags |= HFI1_HAS_GRH;
+ } else {
goto next; /* just in case */
+ }
- bth1 = be32_to_cpu(ohdr->bth[1]);
+ bth1 = be32_to_cpu(packet->ohdr->bth[1]);
is_ecn = !!(bth1 & (HFI1_FECN_SMASK | HFI1_BECN_SMASK));
if (!is_ecn)
@@ -646,7 +648,7 @@ static void __prescan_rxq(struct hfi1_packet *packet)
/* turn off BECN, FECN */
bth1 &= ~(HFI1_FECN_SMASK | HFI1_BECN_SMASK);
- ohdr->bth[1] = cpu_to_be32(bth1);
+ packet->ohdr->bth[1] = cpu_to_be32(bth1);
next:
update_ps_mdata(&mdata, rcd);
}
@@ -1360,12 +1362,25 @@ int process_receive_ib(struct hfi1_packet *packet)
int process_receive_bypass(struct hfi1_packet *packet)
{
+ struct hfi1_devdata *dd = packet->rcd->dd;
+
if (unlikely(rhf_err_flags(packet->rhf)))
handle_eflags(packet);
- dd_dev_err(packet->rcd->dd,
+ dd_dev_err(dd,
"Bypass packets are not supported in normal operation. Dropping\n");
- incr_cntr64(&packet->rcd->dd->sw_rcv_bypass_packet_errors);
+ incr_cntr64(&dd->sw_rcv_bypass_packet_errors);
+ if (!(dd->err_info_rcvport.status_and_code & OPA_EI_STATUS_SMASK)) {
+ u64 *flits = packet->ebuf;
+
+ if (flits && !(packet->rhf & RHF_LEN_ERR)) {
+ dd->err_info_rcvport.packet_flit1 = flits[0];
+ dd->err_info_rcvport.packet_flit2 =
+ packet->tlen > sizeof(flits[0]) ? flits[1] : 0;
+ }
+ dd->err_info_rcvport.status_and_code |=
+ (OPA_EI_STATUS_SMASK | BAD_L2_ERR);
+ }
return RHF_RCV_CONTINUE;
}
diff --git a/drivers/infiniband/hw/hfi1/file_ops.c b/drivers/infiniband/hw/hfi1/file_ops.c
index 677efa0e8cd6..bd786b7bd30b 100644
--- a/drivers/infiniband/hw/hfi1/file_ops.c
+++ b/drivers/infiniband/hw/hfi1/file_ops.c
@@ -172,6 +172,9 @@ static int hfi1_file_open(struct inode *inode, struct file *fp)
struct hfi1_devdata,
user_cdev);
+ if (!atomic_inc_not_zero(&dd->user_refcount))
+ return -ENXIO;
+
/* Just take a ref now. Not all opens result in a context assign */
kobject_get(&dd->kobj);
@@ -183,11 +186,17 @@ static int hfi1_file_open(struct inode *inode, struct file *fp)
fd->rec_cpu_num = -1; /* no cpu affinity by default */
fd->mm = current->mm;
atomic_inc(&fd->mm->mm_count);
- }
+ fp->private_data = fd;
+ } else {
+ fp->private_data = NULL;
+
+ if (atomic_dec_and_test(&dd->user_refcount))
+ complete(&dd->user_comp);
- fp->private_data = fd;
+ return -ENOMEM;
+ }
- return fd ? 0 : -ENOMEM;
+ return 0;
}
static long hfi1_file_ioctl(struct file *fp, unsigned int cmd,
@@ -798,6 +807,10 @@ static int hfi1_file_close(struct inode *inode, struct file *fp)
done:
mmdrop(fdata->mm);
kobject_put(&dd->kobj);
+
+ if (atomic_dec_and_test(&dd->user_refcount))
+ complete(&dd->user_comp);
+
kfree(fdata);
return 0;
}
diff --git a/drivers/infiniband/hw/hfi1/hfi.h b/drivers/infiniband/hw/hfi1/hfi.h
index 7eef11b316ff..cc87fd4e534b 100644
--- a/drivers/infiniband/hw/hfi1/hfi.h
+++ b/drivers/infiniband/hw/hfi1/hfi.h
@@ -367,26 +367,6 @@ struct hfi1_packet {
u8 etype;
};
-/*
- * Private data for snoop/capture support.
- */
-struct hfi1_snoop_data {
- int mode_flag;
- struct cdev cdev;
- struct device *class_dev;
- /* protect snoop data */
- spinlock_t snoop_lock;
- struct list_head queue;
- wait_queue_head_t waitq;
- void *filter_value;
- int (*filter_callback)(void *hdr, void *data, void *value);
- u64 dcc_cfg; /* saved value of DCC Cfg register */
-};
-
-/* snoop mode_flag values */
-#define HFI1_PORT_SNOOP_MODE 1U
-#define HFI1_PORT_CAPTURE_MODE 2U
-
struct rvt_sge_state;
/*
@@ -613,8 +593,6 @@ struct hfi1_pportdata {
struct mutex hls_lock;
u32 host_link_state;
- spinlock_t sdma_alllock ____cacheline_aligned_in_smp;
-
u32 lstate; /* logical link state */
/* these are the "32 bit" regs */
@@ -1104,8 +1082,6 @@ struct hfi1_devdata {
char *portcntrnames;
size_t portcntrnameslen;
- struct hfi1_snoop_data hfi1_snoop;
-
struct err_info_rcvport err_info_rcvport;
struct err_info_constraint err_info_rcv_constraint;
struct err_info_constraint err_info_xmit_constraint;
@@ -1141,8 +1117,8 @@ struct hfi1_devdata {
rhf_rcv_function_ptr normal_rhf_rcv_functions[8];
/*
- * Handlers for outgoing data so that snoop/capture does not
- * have to have its hooks in the send path
+ * Capability to have different send engines simply by changing a
+ * pointer value.
*/
send_routine process_pio_send;
send_routine process_dma_send;
@@ -1174,6 +1150,10 @@ struct hfi1_devdata {
spinlock_t aspm_lock;
/* Number of verbs contexts which have disabled ASPM */
atomic_t aspm_disabled_cnt;
+ /* Keeps track of user space clients */
+ atomic_t user_refcount;
+ /* Used to wait for outstanding user space clients before dev removal */
+ struct completion user_comp;
struct hfi1_affinity *affinity;
struct rhashtable sdma_rht;
@@ -1221,8 +1201,6 @@ struct hfi1_devdata *hfi1_lookup(int unit);
extern u32 hfi1_cpulist_count;
extern unsigned long *hfi1_cpulist;
-extern unsigned int snoop_drop_send;
-extern unsigned int snoop_force_capture;
int hfi1_init(struct hfi1_devdata *, int);
int hfi1_count_units(int *npresentp, int *nupp);
int hfi1_count_active_units(void);
@@ -1557,13 +1535,6 @@ void set_up_vl15(struct hfi1_devdata *dd, u8 vau, u16 vl15buf);
void reset_link_credits(struct hfi1_devdata *dd);
void assign_remote_cm_au_table(struct hfi1_devdata *dd, u8 vcu);
-int snoop_recv_handler(struct hfi1_packet *packet);
-int snoop_send_dma_handler(struct rvt_qp *qp, struct hfi1_pkt_state *ps,
- u64 pbc);
-int snoop_send_pio_handler(struct rvt_qp *qp, struct hfi1_pkt_state *ps,
- u64 pbc);
-void snoop_inline_pio_send(struct hfi1_devdata *dd, struct pio_buf *pbuf,
- u64 pbc, const void *from, size_t count);
int set_buffer_control(struct hfi1_pportdata *ppd, struct buffer_control *bc);
static inline struct hfi1_devdata *dd_from_ppd(struct hfi1_pportdata *ppd)
@@ -1763,8 +1734,7 @@ int qsfp_dump(struct hfi1_pportdata *ppd, char *buf, int len);
int hfi1_pcie_init(struct pci_dev *, const struct pci_device_id *);
void hfi1_pcie_cleanup(struct pci_dev *);
-int hfi1_pcie_ddinit(struct hfi1_devdata *, struct pci_dev *,
- const struct pci_device_id *);
+int hfi1_pcie_ddinit(struct hfi1_devdata *, struct pci_dev *);
void hfi1_pcie_ddcleanup(struct hfi1_devdata *);
void hfi1_pcie_flr(struct hfi1_devdata *);
int pcie_speeds(struct hfi1_devdata *);
@@ -1799,8 +1769,6 @@ int kdeth_process_expected(struct hfi1_packet *packet);
int kdeth_process_eager(struct hfi1_packet *packet);
int process_receive_invalid(struct hfi1_packet *packet);
-extern rhf_rcv_function_ptr snoop_rhf_rcv_functions[8];
-
void update_sge(struct rvt_sge_state *ss, u32 length);
/* global module parameter variables */
@@ -1827,9 +1795,6 @@ extern struct mutex hfi1_mutex;
#define DRIVER_NAME "hfi1"
#define HFI1_USER_MINOR_BASE 0
#define HFI1_TRACE_MINOR 127
-#define HFI1_DIAGPKT_MINOR 128
-#define HFI1_DIAG_MINOR_BASE 129
-#define HFI1_SNOOP_CAPTURE_BASE 200
#define HFI1_NMINORS 255
#define PCI_VENDOR_ID_INTEL 0x8086
@@ -1848,7 +1813,13 @@ extern struct mutex hfi1_mutex;
static inline u64 hfi1_pkt_default_send_ctxt_mask(struct hfi1_devdata *dd,
u16 ctxt_type)
{
- u64 base_sc_integrity =
+ u64 base_sc_integrity;
+
+ /* No integrity checks if HFI1_CAP_NO_INTEGRITY is set */
+ if (HFI1_CAP_IS_KSET(NO_INTEGRITY))
+ return 0;
+
+ base_sc_integrity =
SEND_CTXT_CHECK_ENABLE_DISALLOW_BYPASS_BAD_PKT_LEN_SMASK
| SEND_CTXT_CHECK_ENABLE_DISALLOW_PBC_STATIC_RATE_CONTROL_SMASK
| SEND_CTXT_CHECK_ENABLE_DISALLOW_TOO_LONG_BYPASS_PACKETS_SMASK
@@ -1863,7 +1834,6 @@ static inline u64 hfi1_pkt_default_send_ctxt_mask(struct hfi1_devdata *dd,
| SEND_CTXT_CHECK_ENABLE_CHECK_VL_MAPPING_SMASK
| SEND_CTXT_CHECK_ENABLE_CHECK_OPCODE_SMASK
| SEND_CTXT_CHECK_ENABLE_CHECK_SLID_SMASK
- | SEND_CTXT_CHECK_ENABLE_CHECK_JOB_KEY_SMASK
| SEND_CTXT_CHECK_ENABLE_CHECK_VL_SMASK
| SEND_CTXT_CHECK_ENABLE_CHECK_ENABLE_SMASK;
@@ -1872,18 +1842,23 @@ static inline u64 hfi1_pkt_default_send_ctxt_mask(struct hfi1_devdata *dd,
else
base_sc_integrity |= HFI1_PKT_KERNEL_SC_INTEGRITY;
- if (is_ax(dd))
- /* turn off send-side job key checks - A0 */
- return base_sc_integrity &
- ~SEND_CTXT_CHECK_ENABLE_CHECK_JOB_KEY_SMASK;
+ /* turn on send-side job key checks if !A0 */
+ if (!is_ax(dd))
+ base_sc_integrity |= SEND_CTXT_CHECK_ENABLE_CHECK_JOB_KEY_SMASK;
+
return base_sc_integrity;
}
static inline u64 hfi1_pkt_base_sdma_integrity(struct hfi1_devdata *dd)
{
- u64 base_sdma_integrity =
+ u64 base_sdma_integrity;
+
+ /* No integrity checks if HFI1_CAP_NO_INTEGRITY is set */
+ if (HFI1_CAP_IS_KSET(NO_INTEGRITY))
+ return 0;
+
+ base_sdma_integrity =
SEND_DMA_CHECK_ENABLE_DISALLOW_BYPASS_BAD_PKT_LEN_SMASK
- | SEND_DMA_CHECK_ENABLE_DISALLOW_PBC_STATIC_RATE_CONTROL_SMASK
| SEND_DMA_CHECK_ENABLE_DISALLOW_TOO_LONG_BYPASS_PACKETS_SMASK
| SEND_DMA_CHECK_ENABLE_DISALLOW_TOO_LONG_IB_PACKETS_SMASK
| SEND_DMA_CHECK_ENABLE_DISALLOW_BAD_PKT_LEN_SMASK
@@ -1895,14 +1870,18 @@ static inline u64 hfi1_pkt_base_sdma_integrity(struct hfi1_devdata *dd)
| SEND_DMA_CHECK_ENABLE_CHECK_VL_MAPPING_SMASK
| SEND_DMA_CHECK_ENABLE_CHECK_OPCODE_SMASK
| SEND_DMA_CHECK_ENABLE_CHECK_SLID_SMASK
- | SEND_DMA_CHECK_ENABLE_CHECK_JOB_KEY_SMASK
| SEND_DMA_CHECK_ENABLE_CHECK_VL_SMASK
| SEND_DMA_CHECK_ENABLE_CHECK_ENABLE_SMASK;
- if (is_ax(dd))
- /* turn off send-side job key checks - A0 */
- return base_sdma_integrity &
- ~SEND_DMA_CHECK_ENABLE_CHECK_JOB_KEY_SMASK;
+ if (!HFI1_CAP_IS_KSET(STATIC_RATE_CTRL))
+ base_sdma_integrity |=
+ SEND_DMA_CHECK_ENABLE_DISALLOW_PBC_STATIC_RATE_CONTROL_SMASK;
+
+ /* turn on send-side job key checks if !A0 */
+ if (!is_ax(dd))
+ base_sdma_integrity |=
+ SEND_DMA_CHECK_ENABLE_CHECK_JOB_KEY_SMASK;
+
return base_sdma_integrity;
}
diff --git a/drivers/infiniband/hw/hfi1/init.c b/drivers/infiniband/hw/hfi1/init.c
index 60db61536fed..e3b5bc93bc70 100644
--- a/drivers/infiniband/hw/hfi1/init.c
+++ b/drivers/infiniband/hw/hfi1/init.c
@@ -144,6 +144,8 @@ int hfi1_create_ctxts(struct hfi1_devdata *dd)
struct hfi1_ctxtdata *rcd;
ppd = dd->pport + (i % dd->num_pports);
+
+ /* dd->rcd[i] gets assigned inside the callee */
rcd = hfi1_create_ctxtdata(ppd, i, dd->node);
if (!rcd) {
dd_dev_err(dd,
@@ -169,8 +171,6 @@ int hfi1_create_ctxts(struct hfi1_devdata *dd)
if (!rcd->sc) {
dd_dev_err(dd,
"Unable to allocate kernel send context, failing\n");
- dd->rcd[rcd->ctxt] = NULL;
- hfi1_free_ctxtdata(dd, rcd);
goto nomem;
}
@@ -178,9 +178,6 @@ int hfi1_create_ctxts(struct hfi1_devdata *dd)
if (ret < 0) {
dd_dev_err(dd,
"Failed to setup kernel receive context, failing\n");
- sc_free(rcd->sc);
- dd->rcd[rcd->ctxt] = NULL;
- hfi1_free_ctxtdata(dd, rcd);
ret = -EFAULT;
goto bail;
}
@@ -196,6 +193,10 @@ int hfi1_create_ctxts(struct hfi1_devdata *dd)
nomem:
ret = -ENOMEM;
bail:
+ if (dd->rcd) {
+ for (i = 0; i < dd->num_rcv_contexts; ++i)
+ hfi1_free_ctxtdata(dd, dd->rcd[i]);
+ }
kfree(dd->rcd);
dd->rcd = NULL;
return ret;
@@ -216,7 +217,7 @@ struct hfi1_ctxtdata *hfi1_create_ctxtdata(struct hfi1_pportdata *ppd, u32 ctxt,
dd->num_rcv_contexts - dd->first_user_ctxt)
kctxt_ngroups = (dd->rcv_entries.nctxt_extra -
(dd->num_rcv_contexts - dd->first_user_ctxt));
- rcd = kzalloc(sizeof(*rcd), GFP_KERNEL);
+ rcd = kzalloc_node(sizeof(*rcd), GFP_KERNEL, numa);
if (rcd) {
u32 rcvtids, max_entries;
@@ -261,13 +262,6 @@ struct hfi1_ctxtdata *hfi1_create_ctxtdata(struct hfi1_pportdata *ppd, u32 ctxt,
}
rcd->eager_base = base * dd->rcv_entries.group_size;
- /* Validate and initialize Rcv Hdr Q variables */
- if (rcvhdrcnt % HDRQ_INCREMENT) {
- dd_dev_err(dd,
- "ctxt%u: header queue count %d must be divisible by %lu\n",
- rcd->ctxt, rcvhdrcnt, HDRQ_INCREMENT);
- goto bail;
- }
rcd->rcvhdrq_cnt = rcvhdrcnt;
rcd->rcvhdrqentsize = hfi1_hdrq_entsize;
/*
@@ -506,7 +500,6 @@ void hfi1_init_pportdata(struct pci_dev *pdev, struct hfi1_pportdata *ppd,
INIT_WORK(&ppd->qsfp_info.qsfp_work, qsfp_event);
mutex_init(&ppd->hls_lock);
- spin_lock_init(&ppd->sdma_alllock);
spin_lock_init(&ppd->qsfp_info.qsfp_lock);
ppd->qsfp_info.ppd = ppd;
@@ -1399,28 +1392,43 @@ static void postinit_cleanup(struct hfi1_devdata *dd)
hfi1_free_devdata(dd);
}
+static int init_validate_rcvhdrcnt(struct device *dev, uint thecnt)
+{
+ if (thecnt <= HFI1_MIN_HDRQ_EGRBUF_CNT) {
+ hfi1_early_err(dev, "Receive header queue count too small\n");
+ return -EINVAL;
+ }
+
+ if (thecnt > HFI1_MAX_HDRQ_EGRBUF_CNT) {
+ hfi1_early_err(dev,
+ "Receive header queue count cannot be greater than %u\n",
+ HFI1_MAX_HDRQ_EGRBUF_CNT);
+ return -EINVAL;
+ }
+
+ if (thecnt % HDRQ_INCREMENT) {
+ hfi1_early_err(dev, "Receive header queue count %d must be divisible by %lu\n",
+ thecnt, HDRQ_INCREMENT);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
{
int ret = 0, j, pidx, initfail;
- struct hfi1_devdata *dd = ERR_PTR(-EINVAL);
+ struct hfi1_devdata *dd;
struct hfi1_pportdata *ppd;
/* First, lock the non-writable module parameters */
HFI1_CAP_LOCK();
/* Validate some global module parameters */
- if (rcvhdrcnt <= HFI1_MIN_HDRQ_EGRBUF_CNT) {
- hfi1_early_err(&pdev->dev, "Header queue count too small\n");
- ret = -EINVAL;
- goto bail;
- }
- if (rcvhdrcnt > HFI1_MAX_HDRQ_EGRBUF_CNT) {
- hfi1_early_err(&pdev->dev,
- "Receive header queue count cannot be greater than %u\n",
- HFI1_MAX_HDRQ_EGRBUF_CNT);
- ret = -EINVAL;
+ ret = init_validate_rcvhdrcnt(&pdev->dev, rcvhdrcnt);
+ if (ret)
goto bail;
- }
+
/* use the encoding function as a sanitization check */
if (!encode_rcv_header_entry_size(hfi1_hdrq_entsize)) {
hfi1_early_err(&pdev->dev, "Invalid HdrQ Entry size %u\n",
@@ -1461,26 +1469,25 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
if (ret)
goto bail;
- /*
- * Do device-specific initialization, function table setup, dd
- * allocation, etc.
- */
- switch (ent->device) {
- case PCI_DEVICE_ID_INTEL0:
- case PCI_DEVICE_ID_INTEL1:
- dd = hfi1_init_dd(pdev, ent);
- break;
- default:
+ if (!(ent->device == PCI_DEVICE_ID_INTEL0 ||
+ ent->device == PCI_DEVICE_ID_INTEL1)) {
hfi1_early_err(&pdev->dev,
"Failing on unknown Intel deviceid 0x%x\n",
ent->device);
ret = -ENODEV;
+ goto clean_bail;
}
- if (IS_ERR(dd))
+ /*
+ * Do device-specific initialization, function table setup, dd
+ * allocation, etc.
+ */
+ dd = hfi1_init_dd(pdev, ent);
+
+ if (IS_ERR(dd)) {
ret = PTR_ERR(dd);
- if (ret)
goto clean_bail; /* error already printed */
+ }
ret = create_workqueues(dd);
if (ret)
@@ -1538,12 +1545,31 @@ bail:
return ret;
}
+static void wait_for_clients(struct hfi1_devdata *dd)
+{
+ /*
+ * Remove the device init value and complete the device if there is
+ * no clients or wait for active clients to finish.
+ */
+ if (atomic_dec_and_test(&dd->user_refcount))
+ complete(&dd->user_comp);
+
+ wait_for_completion(&dd->user_comp);
+}
+
static void remove_one(struct pci_dev *pdev)
{
struct hfi1_devdata *dd = pci_get_drvdata(pdev);
/* close debugfs files before ib unregister */
hfi1_dbg_ibdev_exit(&dd->verbs_dev);
+
+ /* remove the /dev hfi1 interface */
+ hfi1_device_remove(dd);
+
+ /* wait for existing user space clients to finish */
+ wait_for_clients(dd);
+
/* unregister from IB core */
hfi1_unregister_ib_device(dd);
@@ -1558,8 +1584,6 @@ static void remove_one(struct pci_dev *pdev)
/* wait until all of our (qsfp) queue_work() calls complete */
flush_workqueue(ib_wq);
- hfi1_device_remove(dd);
-
postinit_cleanup(dd);
}
diff --git a/drivers/infiniband/hw/hfi1/pcie.c b/drivers/infiniband/hw/hfi1/pcie.c
index 89c68da1c273..4ac8f330c5cb 100644
--- a/drivers/infiniband/hw/hfi1/pcie.c
+++ b/drivers/infiniband/hw/hfi1/pcie.c
@@ -157,8 +157,7 @@ void hfi1_pcie_cleanup(struct pci_dev *pdev)
* fields required to re-initialize after a chip reset, or for
* various other purposes
*/
-int hfi1_pcie_ddinit(struct hfi1_devdata *dd, struct pci_dev *pdev,
- const struct pci_device_id *ent)
+int hfi1_pcie_ddinit(struct hfi1_devdata *dd, struct pci_dev *pdev)
{
unsigned long len;
resource_size_t addr;
diff --git a/drivers/infiniband/hw/hfi1/pio.c b/drivers/infiniband/hw/hfi1/pio.c
index 50a3a36d9363..d89b8745d4c1 100644
--- a/drivers/infiniband/hw/hfi1/pio.c
+++ b/drivers/infiniband/hw/hfi1/pio.c
@@ -668,19 +668,12 @@ void sc_set_cr_threshold(struct send_context *sc, u32 new_threshold)
void set_pio_integrity(struct send_context *sc)
{
struct hfi1_devdata *dd = sc->dd;
- u64 reg = 0;
u32 hw_context = sc->hw_context;
int type = sc->type;
- /*
- * No integrity checks if HFI1_CAP_NO_INTEGRITY is set, or if
- * we're snooping.
- */
- if (likely(!HFI1_CAP_IS_KSET(NO_INTEGRITY)) &&
- dd->hfi1_snoop.mode_flag != HFI1_PORT_SNOOP_MODE)
- reg = hfi1_pkt_default_send_ctxt_mask(dd, type);
-
- write_kctxt_csr(dd, hw_context, SC(CHECK_ENABLE), reg);
+ write_kctxt_csr(dd, hw_context,
+ SC(CHECK_ENABLE),
+ hfi1_pkt_default_send_ctxt_mask(dd, type));
}
static u32 get_buffers_allocated(struct send_context *sc)
diff --git a/drivers/infiniband/hw/hfi1/rc.c b/drivers/infiniband/hw/hfi1/rc.c
index 8bc5013f39a1..83198a8a8797 100644
--- a/drivers/infiniband/hw/hfi1/rc.c
+++ b/drivers/infiniband/hw/hfi1/rc.c
@@ -89,7 +89,7 @@ void hfi1_add_rnr_timer(struct rvt_qp *qp, u32 to)
lockdep_assert_held(&qp->s_lock);
qp->s_flags |= RVT_S_WAIT_RNR;
- qp->s_timer.expires = jiffies + usecs_to_jiffies(to);
+ priv->s_rnr_timer.expires = jiffies + usecs_to_jiffies(to);
add_timer(&priv->s_rnr_timer);
}
diff --git a/drivers/infiniband/hw/hfi1/sdma.c b/drivers/infiniband/hw/hfi1/sdma.c
index fd39bcaa062d..9cbe52d21077 100644
--- a/drivers/infiniband/hw/hfi1/sdma.c
+++ b/drivers/infiniband/hw/hfi1/sdma.c
@@ -2009,11 +2009,6 @@ static void sdma_hw_start_up(struct sdma_engine *sde)
write_sde_csr(sde, SD(ENG_ERR_CLEAR), reg);
}
-#define CLEAR_STATIC_RATE_CONTROL_SMASK(r) \
-(r &= ~SEND_DMA_CHECK_ENABLE_DISALLOW_PBC_STATIC_RATE_CONTROL_SMASK)
-
-#define SET_STATIC_RATE_CONTROL_SMASK(r) \
-(r |= SEND_DMA_CHECK_ENABLE_DISALLOW_PBC_STATIC_RATE_CONTROL_SMASK)
/*
* set_sdma_integrity
*
@@ -2022,19 +2017,9 @@ static void sdma_hw_start_up(struct sdma_engine *sde)
static void set_sdma_integrity(struct sdma_engine *sde)
{
struct hfi1_devdata *dd = sde->dd;
- u64 reg;
-
- if (unlikely(HFI1_CAP_IS_KSET(NO_INTEGRITY)))
- return;
-
- reg = hfi1_pkt_base_sdma_integrity(dd);
-
- if (HFI1_CAP_IS_KSET(STATIC_RATE_CTRL))
- CLEAR_STATIC_RATE_CONTROL_SMASK(reg);
- else
- SET_STATIC_RATE_CONTROL_SMASK(reg);
- write_sde_csr(sde, SD(CHECK_ENABLE), reg);
+ write_sde_csr(sde, SD(CHECK_ENABLE),
+ hfi1_pkt_base_sdma_integrity(dd));
}
static void init_sdma_regs(
diff --git a/drivers/infiniband/hw/hfi1/sysfs.c b/drivers/infiniband/hw/hfi1/sysfs.c
index edba22461a9c..919a5474e651 100644
--- a/drivers/infiniband/hw/hfi1/sysfs.c
+++ b/drivers/infiniband/hw/hfi1/sysfs.c
@@ -49,7 +49,6 @@
#include "hfi.h"
#include "mad.h"
#include "trace.h"
-#include "affinity.h"
/*
* Start of per-port congestion control structures and support code
@@ -623,27 +622,6 @@ static ssize_t show_tempsense(struct device *device,
return ret;
}
-static ssize_t show_sdma_affinity(struct device *device,
- struct device_attribute *attr, char *buf)
-{
- struct hfi1_ibdev *dev =
- container_of(device, struct hfi1_ibdev, rdi.ibdev.dev);
- struct hfi1_devdata *dd = dd_from_dev(dev);
-
- return hfi1_get_sdma_affinity(dd, buf);
-}
-
-static ssize_t store_sdma_affinity(struct device *device,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct hfi1_ibdev *dev =
- container_of(device, struct hfi1_ibdev, rdi.ibdev.dev);
- struct hfi1_devdata *dd = dd_from_dev(dev);
-
- return hfi1_set_sdma_affinity(dd, buf, count);
-}
-
/*
* end of per-unit (or driver, in some cases, but replicated
* per unit) functions
@@ -658,8 +636,6 @@ static DEVICE_ATTR(serial, S_IRUGO, show_serial, NULL);
static DEVICE_ATTR(boardversion, S_IRUGO, show_boardversion, NULL);
static DEVICE_ATTR(tempsense, S_IRUGO, show_tempsense, NULL);
static DEVICE_ATTR(chip_reset, S_IWUSR, NULL, store_chip_reset);
-static DEVICE_ATTR(sdma_affinity, S_IWUSR | S_IRUGO, show_sdma_affinity,
- store_sdma_affinity);
static struct device_attribute *hfi1_attributes[] = {
&dev_attr_hw_rev,
@@ -670,7 +646,6 @@ static struct device_attribute *hfi1_attributes[] = {
&dev_attr_boardversion,
&dev_attr_tempsense,
&dev_attr_chip_reset,
- &dev_attr_sdma_affinity,
};
int hfi1_create_port_files(struct ib_device *ibdev, u8 port_num,
diff --git a/drivers/infiniband/hw/hfi1/trace_rx.h b/drivers/infiniband/hw/hfi1/trace_rx.h
index 11e02b228922..f77e59fb43fe 100644
--- a/drivers/infiniband/hw/hfi1/trace_rx.h
+++ b/drivers/infiniband/hw/hfi1/trace_rx.h
@@ -253,66 +253,6 @@ TRACE_EVENT(hfi1_mmu_invalidate,
)
);
-#define SNOOP_PRN \
- "slid %.4x dlid %.4x qpn 0x%.6x opcode 0x%.2x,%s " \
- "svc lvl %d pkey 0x%.4x [header = %d bytes] [data = %d bytes]"
-
-TRACE_EVENT(snoop_capture,
- TP_PROTO(struct hfi1_devdata *dd,
- int hdr_len,
- struct ib_header *hdr,
- int data_len,
- void *data),
- TP_ARGS(dd, hdr_len, hdr, data_len, data),
- TP_STRUCT__entry(
- DD_DEV_ENTRY(dd)
- __field(u16, slid)
- __field(u16, dlid)
- __field(u32, qpn)
- __field(u8, opcode)
- __field(u8, sl)
- __field(u16, pkey)
- __field(u32, hdr_len)
- __field(u32, data_len)
- __field(u8, lnh)
- __dynamic_array(u8, raw_hdr, hdr_len)
- __dynamic_array(u8, raw_pkt, data_len)
- ),
- TP_fast_assign(
- struct ib_other_headers *ohdr;
-
- __entry->lnh = (u8)(be16_to_cpu(hdr->lrh[0]) & 3);
- if (__entry->lnh == HFI1_LRH_BTH)
- ohdr = &hdr->u.oth;
- else
- ohdr = &hdr->u.l.oth;
- DD_DEV_ASSIGN(dd);
- __entry->slid = be16_to_cpu(hdr->lrh[3]);
- __entry->dlid = be16_to_cpu(hdr->lrh[1]);
- __entry->qpn = be32_to_cpu(ohdr->bth[1]) & RVT_QPN_MASK;
- __entry->opcode = (be32_to_cpu(ohdr->bth[0]) >> 24) & 0xff;
- __entry->sl = (u8)(be16_to_cpu(hdr->lrh[0]) >> 4) & 0xf;
- __entry->pkey = be32_to_cpu(ohdr->bth[0]) & 0xffff;
- __entry->hdr_len = hdr_len;
- __entry->data_len = data_len;
- memcpy(__get_dynamic_array(raw_hdr), hdr, hdr_len);
- memcpy(__get_dynamic_array(raw_pkt), data, data_len);
- ),
- TP_printk(
- "[%s] " SNOOP_PRN,
- __get_str(dev),
- __entry->slid,
- __entry->dlid,
- __entry->qpn,
- __entry->opcode,
- show_ib_opcode(__entry->opcode),
- __entry->sl,
- __entry->pkey,
- __entry->hdr_len,
- __entry->data_len
- )
-);
-
#endif /* __HFI1_TRACE_RX_H */
#undef TRACE_INCLUDE_PATH
diff --git a/drivers/infiniband/hw/hfi1/user_sdma.c b/drivers/infiniband/hw/hfi1/user_sdma.c
index a761f804111e..77697d690f3e 100644
--- a/drivers/infiniband/hw/hfi1/user_sdma.c
+++ b/drivers/infiniband/hw/hfi1/user_sdma.c
@@ -1144,7 +1144,7 @@ static int pin_vector_pages(struct user_sdma_request *req,
rb_node = hfi1_mmu_rb_extract(pq->handler,
(unsigned long)iovec->iov.iov_base,
iovec->iov.iov_len);
- if (rb_node && !IS_ERR(rb_node))
+ if (rb_node)
node = container_of(rb_node, struct sdma_mmu_node, rb);
else
rb_node = NULL;
diff --git a/drivers/infiniband/hw/mlx4/ah.c b/drivers/infiniband/hw/mlx4/ah.c
index 5fc623362731..b9bf0759f10a 100644
--- a/drivers/infiniband/hw/mlx4/ah.c
+++ b/drivers/infiniband/hw/mlx4/ah.c
@@ -102,7 +102,10 @@ static struct ib_ah *create_iboe_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr
if (vlan_tag < 0x1000)
vlan_tag |= (ah_attr->sl & 7) << 13;
ah->av.eth.port_pd = cpu_to_be32(to_mpd(pd)->pdn | (ah_attr->port_num << 24));
- ah->av.eth.gid_index = mlx4_ib_gid_index_to_real_index(ibdev, ah_attr->port_num, ah_attr->grh.sgid_index);
+ ret = mlx4_ib_gid_index_to_real_index(ibdev, ah_attr->port_num, ah_attr->grh.sgid_index);
+ if (ret < 0)
+ return ERR_PTR(ret);
+ ah->av.eth.gid_index = ret;
ah->av.eth.vlan = cpu_to_be16(vlan_tag);
ah->av.eth.hop_limit = ah_attr->grh.hop_limit;
if (ah_attr->static_rate) {
diff --git a/drivers/infiniband/hw/mlx4/cq.c b/drivers/infiniband/hw/mlx4/cq.c
index 1ea686b9e0f9..6a0fec357dae 100644
--- a/drivers/infiniband/hw/mlx4/cq.c
+++ b/drivers/infiniband/hw/mlx4/cq.c
@@ -253,11 +253,14 @@ struct ib_cq *mlx4_ib_create_cq(struct ib_device *ibdev,
if (context)
if (ib_copy_to_udata(udata, &cq->mcq.cqn, sizeof (__u32))) {
err = -EFAULT;
- goto err_dbmap;
+ goto err_cq_free;
}
return &cq->ibcq;
+err_cq_free:
+ mlx4_cq_free(dev->dev, &cq->mcq);
+
err_dbmap:
if (context)
mlx4_ib_db_unmap_user(to_mucontext(context), &cq->db);
diff --git a/drivers/infiniband/hw/mlx5/cq.c b/drivers/infiniband/hw/mlx5/cq.c
index 79d017baf6f4..fcd04b881ec1 100644
--- a/drivers/infiniband/hw/mlx5/cq.c
+++ b/drivers/infiniband/hw/mlx5/cq.c
@@ -932,8 +932,7 @@ struct ib_cq *mlx5_ib_create_cq(struct ib_device *ibdev,
if (err)
goto err_create;
} else {
- /* for now choose 64 bytes till we have a proper interface */
- cqe_size = 64;
+ cqe_size = cache_line_size() == 128 ? 128 : 64;
err = create_cq_kernel(dev, cq, entries, cqe_size, &cqb,
&index, &inlen);
if (err)
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
index 22174774dbb8..32b09f059c84 100644
--- a/drivers/infiniband/hw/mlx5/main.c
+++ b/drivers/infiniband/hw/mlx5/main.c
@@ -1019,7 +1019,7 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev,
resp.qp_tab_size = 1 << MLX5_CAP_GEN(dev->mdev, log_max_qp);
if (mlx5_core_is_pf(dev->mdev) && MLX5_CAP_GEN(dev->mdev, bf))
resp.bf_reg_size = 1 << MLX5_CAP_GEN(dev->mdev, log_bf_reg_size);
- resp.cache_line_size = L1_CACHE_BYTES;
+ resp.cache_line_size = cache_line_size();
resp.max_sq_desc_sz = MLX5_CAP_GEN(dev->mdev, max_wqe_sz_sq);
resp.max_rq_desc_sz = MLX5_CAP_GEN(dev->mdev, max_wqe_sz_rq);
resp.max_send_wqebb = 1 << MLX5_CAP_GEN(dev->mdev, log_max_qp_sz);
@@ -2311,14 +2311,14 @@ static void mlx5_ib_event(struct mlx5_core_dev *dev, void *context,
{
struct mlx5_ib_dev *ibdev = (struct mlx5_ib_dev *)context;
struct ib_event ibev;
-
+ bool fatal = false;
u8 port = 0;
switch (event) {
case MLX5_DEV_EVENT_SYS_ERROR:
- ibdev->ib_active = false;
ibev.event = IB_EVENT_DEVICE_FATAL;
mlx5_ib_handle_internal_error(ibdev);
+ fatal = true;
break;
case MLX5_DEV_EVENT_PORT_UP:
@@ -2370,6 +2370,9 @@ static void mlx5_ib_event(struct mlx5_core_dev *dev, void *context,
if (ibdev->ib_active)
ib_dispatch_event(&ibev);
+
+ if (fatal)
+ ibdev->ib_active = false;
}
static void get_ext_port_caps(struct mlx5_ib_dev *dev)
@@ -3115,7 +3118,7 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev)
}
err = init_node_data(dev);
if (err)
- goto err_dealloc;
+ goto err_free_port;
mutex_init(&dev->flow_db.lock);
mutex_init(&dev->cap_mask_mutex);
@@ -3125,7 +3128,7 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev)
if (ll == IB_LINK_LAYER_ETHERNET) {
err = mlx5_enable_roce(dev);
if (err)
- goto err_dealloc;
+ goto err_free_port;
}
err = create_dev_resources(&dev->devr);
diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h
index dcdcd195fe53..7d689903c87c 100644
--- a/drivers/infiniband/hw/mlx5/mlx5_ib.h
+++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h
@@ -626,6 +626,8 @@ struct mlx5_ib_dev {
struct mlx5_ib_resources devr;
struct mlx5_mr_cache cache;
struct timer_list delay_timer;
+ /* Prevents soft lock on massive reg MRs */
+ struct mutex slow_path_mutex;
int fill_delay;
#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
struct ib_odp_caps odp_caps;
diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c
index d4ad672b905b..4e9012463c37 100644
--- a/drivers/infiniband/hw/mlx5/mr.c
+++ b/drivers/infiniband/hw/mlx5/mr.c
@@ -610,6 +610,7 @@ int mlx5_mr_cache_init(struct mlx5_ib_dev *dev)
int err;
int i;
+ mutex_init(&dev->slow_path_mutex);
cache->wq = alloc_ordered_workqueue("mkey_cache", WQ_MEM_RECLAIM);
if (!cache->wq) {
mlx5_ib_warn(dev, "failed to create work queue\n");
@@ -1182,9 +1183,12 @@ struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
goto error;
}
- if (!mr)
+ if (!mr) {
+ mutex_lock(&dev->slow_path_mutex);
mr = reg_create(NULL, pd, virt_addr, length, umem, ncont,
page_shift, access_flags);
+ mutex_unlock(&dev->slow_path_mutex);
+ }
if (IS_ERR(mr)) {
err = PTR_ERR(mr);
diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c
index 41f4c2afbcdd..d1e921816bfe 100644
--- a/drivers/infiniband/hw/mlx5/qp.c
+++ b/drivers/infiniband/hw/mlx5/qp.c
@@ -52,7 +52,6 @@ enum {
enum {
MLX5_IB_SQ_STRIDE = 6,
- MLX5_IB_CACHE_LINE_SIZE = 64,
};
static const u32 mlx5_ib_opcode[] = {
@@ -2052,8 +2051,8 @@ struct ib_qp *mlx5_ib_create_qp(struct ib_pd *pd,
mlx5_ib_dbg(dev, "ib qpnum 0x%x, mlx qpn 0x%x, rcqn 0x%x, scqn 0x%x\n",
qp->ibqp.qp_num, qp->trans_qp.base.mqp.qpn,
- to_mcq(init_attr->recv_cq)->mcq.cqn,
- to_mcq(init_attr->send_cq)->mcq.cqn);
+ init_attr->recv_cq ? to_mcq(init_attr->recv_cq)->mcq.cqn : -1,
+ init_attr->send_cq ? to_mcq(init_attr->send_cq)->mcq.cqn : -1);
qp->trans_qp.xrcdn = xrcdn;
@@ -4815,6 +4814,14 @@ struct ib_rwq_ind_table *mlx5_ib_create_rwq_ind_table(struct ib_device *device,
udata->inlen))
return ERR_PTR(-EOPNOTSUPP);
+ if (init_attr->log_ind_tbl_size >
+ MLX5_CAP_GEN(dev->mdev, log_max_rqt_size)) {
+ mlx5_ib_dbg(dev, "log_ind_tbl_size = %d is bigger than supported = %d\n",
+ init_attr->log_ind_tbl_size,
+ MLX5_CAP_GEN(dev->mdev, log_max_rqt_size));
+ return ERR_PTR(-EINVAL);
+ }
+
min_resp_len = offsetof(typeof(resp), reserved) + sizeof(resp.reserved);
if (udata->outlen && udata->outlen < min_resp_len)
return ERR_PTR(-EINVAL);
diff --git a/drivers/infiniband/hw/qedr/Kconfig b/drivers/infiniband/hw/qedr/Kconfig
index 7c06d85568d4..6c9f3923e838 100644
--- a/drivers/infiniband/hw/qedr/Kconfig
+++ b/drivers/infiniband/hw/qedr/Kconfig
@@ -2,6 +2,7 @@ config INFINIBAND_QEDR
tristate "QLogic RoCE driver"
depends on 64BIT && QEDE
select QED_LL2
+ select QED_RDMA
---help---
This driver provides low-level InfiniBand over Ethernet
support for QLogic QED host channel adapters (HCAs).
diff --git a/drivers/infiniband/sw/rdmavt/dma.c b/drivers/infiniband/sw/rdmavt/dma.c
index 01f71caa3ac4..f2cefb0d9180 100644
--- a/drivers/infiniband/sw/rdmavt/dma.c
+++ b/drivers/infiniband/sw/rdmavt/dma.c
@@ -90,9 +90,6 @@ static u64 rvt_dma_map_page(struct ib_device *dev, struct page *page,
if (WARN_ON(!valid_dma_direction(direction)))
return BAD_DMA_ADDRESS;
- if (offset + size > PAGE_SIZE)
- return BAD_DMA_ADDRESS;
-
addr = (u64)page_address(page);
if (addr)
addr += offset;
diff --git a/drivers/infiniband/sw/rxe/rxe_net.c b/drivers/infiniband/sw/rxe/rxe_net.c
index b8258e4f0aea..ffff5a54cb34 100644
--- a/drivers/infiniband/sw/rxe/rxe_net.c
+++ b/drivers/infiniband/sw/rxe/rxe_net.c
@@ -243,10 +243,8 @@ static struct socket *rxe_setup_udp_tunnel(struct net *net, __be16 port,
{
int err;
struct socket *sock;
- struct udp_port_cfg udp_cfg;
- struct udp_tunnel_sock_cfg tnl_cfg;
-
- memset(&udp_cfg, 0, sizeof(udp_cfg));
+ struct udp_port_cfg udp_cfg = {0};
+ struct udp_tunnel_sock_cfg tnl_cfg = {0};
if (ipv6) {
udp_cfg.family = AF_INET6;
@@ -264,10 +262,8 @@ static struct socket *rxe_setup_udp_tunnel(struct net *net, __be16 port,
return ERR_PTR(err);
}
- tnl_cfg.sk_user_data = NULL;
tnl_cfg.encap_type = 1;
tnl_cfg.encap_rcv = rxe_udp_encap_recv;
- tnl_cfg.encap_destroy = NULL;
/* Setup UDP tunnel */
setup_udp_tunnel_sock(net, sock, &tnl_cfg);
diff --git a/drivers/infiniband/sw/rxe/rxe_qp.c b/drivers/infiniband/sw/rxe/rxe_qp.c
index b8036cfbce04..c3e60e4bde6e 100644
--- a/drivers/infiniband/sw/rxe/rxe_qp.c
+++ b/drivers/infiniband/sw/rxe/rxe_qp.c
@@ -522,6 +522,7 @@ static void rxe_qp_reset(struct rxe_qp *qp)
if (qp->sq.queue) {
__rxe_do_task(&qp->comp.task);
__rxe_do_task(&qp->req.task);
+ rxe_queue_reset(qp->sq.queue);
}
/* cleanup attributes */
@@ -573,6 +574,7 @@ void rxe_qp_error(struct rxe_qp *qp)
{
qp->req.state = QP_STATE_ERROR;
qp->resp.state = QP_STATE_ERROR;
+ qp->attr.qp_state = IB_QPS_ERR;
/* drain work and packet queues */
rxe_run_task(&qp->resp.task, 1);
diff --git a/drivers/infiniband/sw/rxe/rxe_queue.c b/drivers/infiniband/sw/rxe/rxe_queue.c
index 08274254eb88..d14bf496d62d 100644
--- a/drivers/infiniband/sw/rxe/rxe_queue.c
+++ b/drivers/infiniband/sw/rxe/rxe_queue.c
@@ -84,6 +84,15 @@ err1:
return -EINVAL;
}
+inline void rxe_queue_reset(struct rxe_queue *q)
+{
+ /* queue is comprised from header and the memory
+ * of the actual queue. See "struct rxe_queue_buf" in rxe_queue.h
+ * reset only the queue itself and not the management header
+ */
+ memset(q->buf->data, 0, q->buf_size - sizeof(struct rxe_queue_buf));
+}
+
struct rxe_queue *rxe_queue_init(struct rxe_dev *rxe,
int *num_elem,
unsigned int elem_size)
diff --git a/drivers/infiniband/sw/rxe/rxe_queue.h b/drivers/infiniband/sw/rxe/rxe_queue.h
index 239fd609c31e..8c8641c87817 100644
--- a/drivers/infiniband/sw/rxe/rxe_queue.h
+++ b/drivers/infiniband/sw/rxe/rxe_queue.h
@@ -84,6 +84,8 @@ int do_mmap_info(struct rxe_dev *rxe,
size_t buf_size,
struct rxe_mmap_info **ip_p);
+void rxe_queue_reset(struct rxe_queue *q);
+
struct rxe_queue *rxe_queue_init(struct rxe_dev *rxe,
int *num_elem,
unsigned int elem_size);
diff --git a/drivers/infiniband/sw/rxe/rxe_req.c b/drivers/infiniband/sw/rxe/rxe_req.c
index 832846b73ea0..22bd9630dcd9 100644
--- a/drivers/infiniband/sw/rxe/rxe_req.c
+++ b/drivers/infiniband/sw/rxe/rxe_req.c
@@ -696,7 +696,8 @@ next_wqe:
qp->req.wqe_index);
wqe->state = wqe_state_done;
wqe->status = IB_WC_SUCCESS;
- goto complete;
+ __rxe_do_task(&qp->comp.task);
+ return 0;
}
payload = mtu;
}
@@ -745,13 +746,17 @@ err:
wqe->status = IB_WC_LOC_PROT_ERR;
wqe->state = wqe_state_error;
-complete:
- if (qp_type(qp) != IB_QPT_RC) {
- while (rxe_completer(qp) == 0)
- ;
- }
-
- return 0;
+ /*
+ * IBA Spec. Section 10.7.3.1 SIGNALED COMPLETIONS
+ * ---------8<---------8<-------------
+ * ...Note that if a completion error occurs, a Work Completion
+ * will always be generated, even if the signaling
+ * indicator requests an Unsignaled Completion.
+ * ---------8<---------8<-------------
+ */
+ wqe->wr.send_flags |= IB_SEND_SIGNALED;
+ __rxe_do_task(&qp->comp.task);
+ return -EAGAIN;
exit:
return -EAGAIN;
diff --git a/drivers/infiniband/ulp/ipoib/ipoib.h b/drivers/infiniband/ulp/ipoib/ipoib.h
index 7b8d2d9e2263..da12717a3eb7 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib.h
+++ b/drivers/infiniband/ulp/ipoib/ipoib.h
@@ -63,6 +63,8 @@ enum ipoib_flush_level {
enum {
IPOIB_ENCAP_LEN = 4,
+ IPOIB_PSEUDO_LEN = 20,
+ IPOIB_HARD_LEN = IPOIB_ENCAP_LEN + IPOIB_PSEUDO_LEN,
IPOIB_UD_HEAD_SIZE = IB_GRH_BYTES + IPOIB_ENCAP_LEN,
IPOIB_UD_RX_SG = 2, /* max buffer needed for 4K mtu */
@@ -134,15 +136,21 @@ struct ipoib_header {
u16 reserved;
};
-struct ipoib_cb {
- struct qdisc_skb_cb qdisc_cb;
- u8 hwaddr[INFINIBAND_ALEN];
+struct ipoib_pseudo_header {
+ u8 hwaddr[INFINIBAND_ALEN];
};
-static inline struct ipoib_cb *ipoib_skb_cb(const struct sk_buff *skb)
+static inline void skb_add_pseudo_hdr(struct sk_buff *skb)
{
- BUILD_BUG_ON(sizeof(skb->cb) < sizeof(struct ipoib_cb));
- return (struct ipoib_cb *)skb->cb;
+ char *data = skb_push(skb, IPOIB_PSEUDO_LEN);
+
+ /*
+ * only the ipoib header is present now, make room for a dummy
+ * pseudo header and set skb field accordingly
+ */
+ memset(data, 0, IPOIB_PSEUDO_LEN);
+ skb_reset_mac_header(skb);
+ skb_pull(skb, IPOIB_HARD_LEN);
}
/* Used for all multicast joins (broadcast, IPv4 mcast and IPv6 mcast) */
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_cm.c b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
index 4ad297d3de89..339a1eecdfe3 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_cm.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
@@ -63,6 +63,8 @@ MODULE_PARM_DESC(cm_data_debug_level,
#define IPOIB_CM_RX_DELAY (3 * 256 * HZ)
#define IPOIB_CM_RX_UPDATE_MASK (0x3)
+#define IPOIB_CM_RX_RESERVE (ALIGN(IPOIB_HARD_LEN, 16) - IPOIB_ENCAP_LEN)
+
static struct ib_qp_attr ipoib_cm_err_attr = {
.qp_state = IB_QPS_ERR
};
@@ -146,15 +148,15 @@ static struct sk_buff *ipoib_cm_alloc_rx_skb(struct net_device *dev,
struct sk_buff *skb;
int i;
- skb = dev_alloc_skb(IPOIB_CM_HEAD_SIZE + 12);
+ skb = dev_alloc_skb(ALIGN(IPOIB_CM_HEAD_SIZE + IPOIB_PSEUDO_LEN, 16));
if (unlikely(!skb))
return NULL;
/*
- * IPoIB adds a 4 byte header. So we need 12 more bytes to align the
+ * IPoIB adds a IPOIB_ENCAP_LEN byte header, this will align the
* IP header to a multiple of 16.
*/
- skb_reserve(skb, 12);
+ skb_reserve(skb, IPOIB_CM_RX_RESERVE);
mapping[0] = ib_dma_map_single(priv->ca, skb->data, IPOIB_CM_HEAD_SIZE,
DMA_FROM_DEVICE);
@@ -624,9 +626,9 @@ void ipoib_cm_handle_rx_wc(struct net_device *dev, struct ib_wc *wc)
if (wc->byte_len < IPOIB_CM_COPYBREAK) {
int dlen = wc->byte_len;
- small_skb = dev_alloc_skb(dlen + 12);
+ small_skb = dev_alloc_skb(dlen + IPOIB_CM_RX_RESERVE);
if (small_skb) {
- skb_reserve(small_skb, 12);
+ skb_reserve(small_skb, IPOIB_CM_RX_RESERVE);
ib_dma_sync_single_for_cpu(priv->ca, rx_ring[wr_id].mapping[0],
dlen, DMA_FROM_DEVICE);
skb_copy_from_linear_data(skb, small_skb->data, dlen);
@@ -663,8 +665,7 @@ void ipoib_cm_handle_rx_wc(struct net_device *dev, struct ib_wc *wc)
copied:
skb->protocol = ((struct ipoib_header *) skb->data)->proto;
- skb_reset_mac_header(skb);
- skb_pull(skb, IPOIB_ENCAP_LEN);
+ skb_add_pseudo_hdr(skb);
++dev->stats.rx_packets;
dev->stats.rx_bytes += skb->len;
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ib.c b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
index be11d5d5b8c1..830fecb6934c 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_ib.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
@@ -128,16 +128,15 @@ static struct sk_buff *ipoib_alloc_rx_skb(struct net_device *dev, int id)
buf_size = IPOIB_UD_BUF_SIZE(priv->max_ib_mtu);
- skb = dev_alloc_skb(buf_size + IPOIB_ENCAP_LEN);
+ skb = dev_alloc_skb(buf_size + IPOIB_HARD_LEN);
if (unlikely(!skb))
return NULL;
/*
- * IB will leave a 40 byte gap for a GRH and IPoIB adds a 4 byte
- * header. So we need 4 more bytes to get to 48 and align the
- * IP header to a multiple of 16.
+ * the IP header will be at IPOIP_HARD_LEN + IB_GRH_BYTES, that is
+ * 64 bytes aligned
*/
- skb_reserve(skb, 4);
+ skb_reserve(skb, sizeof(struct ipoib_pseudo_header));
mapping = priv->rx_ring[id].mapping;
mapping[0] = ib_dma_map_single(priv->ca, skb->data, buf_size,
@@ -253,8 +252,7 @@ static void ipoib_ib_handle_rx_wc(struct net_device *dev, struct ib_wc *wc)
skb_pull(skb, IB_GRH_BYTES);
skb->protocol = ((struct ipoib_header *) skb->data)->proto;
- skb_reset_mac_header(skb);
- skb_pull(skb, IPOIB_ENCAP_LEN);
+ skb_add_pseudo_hdr(skb);
++dev->stats.rx_packets;
dev->stats.rx_bytes += skb->len;
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c
index 5636fc3da6b8..b58d9dca5c93 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_main.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c
@@ -925,9 +925,12 @@ static void neigh_add_path(struct sk_buff *skb, u8 *daddr,
ipoib_neigh_free(neigh);
goto err_drop;
}
- if (skb_queue_len(&neigh->queue) < IPOIB_MAX_PATH_REC_QUEUE)
+ if (skb_queue_len(&neigh->queue) <
+ IPOIB_MAX_PATH_REC_QUEUE) {
+ /* put pseudoheader back on for next time */
+ skb_push(skb, IPOIB_PSEUDO_LEN);
__skb_queue_tail(&neigh->queue, skb);
- else {
+ } else {
ipoib_warn(priv, "queue length limit %d. Packet drop.\n",
skb_queue_len(&neigh->queue));
goto err_drop;
@@ -964,7 +967,7 @@ err_drop:
}
static void unicast_arp_send(struct sk_buff *skb, struct net_device *dev,
- struct ipoib_cb *cb)
+ struct ipoib_pseudo_header *phdr)
{
struct ipoib_dev_priv *priv = netdev_priv(dev);
struct ipoib_path *path;
@@ -972,16 +975,18 @@ static void unicast_arp_send(struct sk_buff *skb, struct net_device *dev,
spin_lock_irqsave(&priv->lock, flags);
- path = __path_find(dev, cb->hwaddr + 4);
+ path = __path_find(dev, phdr->hwaddr + 4);
if (!path || !path->valid) {
int new_path = 0;
if (!path) {
- path = path_rec_create(dev, cb->hwaddr + 4);
+ path = path_rec_create(dev, phdr->hwaddr + 4);
new_path = 1;
}
if (path) {
if (skb_queue_len(&path->queue) < IPOIB_MAX_PATH_REC_QUEUE) {
+ /* put pseudoheader back on for next time */
+ skb_push(skb, IPOIB_PSEUDO_LEN);
__skb_queue_tail(&path->queue, skb);
} else {
++dev->stats.tx_dropped;
@@ -1009,10 +1014,12 @@ static void unicast_arp_send(struct sk_buff *skb, struct net_device *dev,
be16_to_cpu(path->pathrec.dlid));
spin_unlock_irqrestore(&priv->lock, flags);
- ipoib_send(dev, skb, path->ah, IPOIB_QPN(cb->hwaddr));
+ ipoib_send(dev, skb, path->ah, IPOIB_QPN(phdr->hwaddr));
return;
} else if ((path->query || !path_rec_start(dev, path)) &&
skb_queue_len(&path->queue) < IPOIB_MAX_PATH_REC_QUEUE) {
+ /* put pseudoheader back on for next time */
+ skb_push(skb, IPOIB_PSEUDO_LEN);
__skb_queue_tail(&path->queue, skb);
} else {
++dev->stats.tx_dropped;
@@ -1026,13 +1033,15 @@ static int ipoib_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct ipoib_dev_priv *priv = netdev_priv(dev);
struct ipoib_neigh *neigh;
- struct ipoib_cb *cb = ipoib_skb_cb(skb);
+ struct ipoib_pseudo_header *phdr;
struct ipoib_header *header;
unsigned long flags;
+ phdr = (struct ipoib_pseudo_header *) skb->data;
+ skb_pull(skb, sizeof(*phdr));
header = (struct ipoib_header *) skb->data;
- if (unlikely(cb->hwaddr[4] == 0xff)) {
+ if (unlikely(phdr->hwaddr[4] == 0xff)) {
/* multicast, arrange "if" according to probability */
if ((header->proto != htons(ETH_P_IP)) &&
(header->proto != htons(ETH_P_IPV6)) &&
@@ -1045,13 +1054,13 @@ static int ipoib_start_xmit(struct sk_buff *skb, struct net_device *dev)
return NETDEV_TX_OK;
}
/* Add in the P_Key for multicast*/
- cb->hwaddr[8] = (priv->pkey >> 8) & 0xff;
- cb->hwaddr[9] = priv->pkey & 0xff;
+ phdr->hwaddr[8] = (priv->pkey >> 8) & 0xff;
+ phdr->hwaddr[9] = priv->pkey & 0xff;
- neigh = ipoib_neigh_get(dev, cb->hwaddr);
+ neigh = ipoib_neigh_get(dev, phdr->hwaddr);
if (likely(neigh))
goto send_using_neigh;
- ipoib_mcast_send(dev, cb->hwaddr, skb);
+ ipoib_mcast_send(dev, phdr->hwaddr, skb);
return NETDEV_TX_OK;
}
@@ -1060,16 +1069,16 @@ static int ipoib_start_xmit(struct sk_buff *skb, struct net_device *dev)
case htons(ETH_P_IP):
case htons(ETH_P_IPV6):
case htons(ETH_P_TIPC):
- neigh = ipoib_neigh_get(dev, cb->hwaddr);
+ neigh = ipoib_neigh_get(dev, phdr->hwaddr);
if (unlikely(!neigh)) {
- neigh_add_path(skb, cb->hwaddr, dev);
+ neigh_add_path(skb, phdr->hwaddr, dev);
return NETDEV_TX_OK;
}
break;
case htons(ETH_P_ARP):
case htons(ETH_P_RARP):
/* for unicast ARP and RARP should always perform path find */
- unicast_arp_send(skb, dev, cb);
+ unicast_arp_send(skb, dev, phdr);
return NETDEV_TX_OK;
default:
/* ethertype not supported by IPoIB */
@@ -1086,11 +1095,13 @@ send_using_neigh:
goto unref;
}
} else if (neigh->ah) {
- ipoib_send(dev, skb, neigh->ah, IPOIB_QPN(cb->hwaddr));
+ ipoib_send(dev, skb, neigh->ah, IPOIB_QPN(phdr->hwaddr));
goto unref;
}
if (skb_queue_len(&neigh->queue) < IPOIB_MAX_PATH_REC_QUEUE) {
+ /* put pseudoheader back on for next time */
+ skb_push(skb, sizeof(*phdr));
spin_lock_irqsave(&priv->lock, flags);
__skb_queue_tail(&neigh->queue, skb);
spin_unlock_irqrestore(&priv->lock, flags);
@@ -1122,8 +1133,8 @@ static int ipoib_hard_header(struct sk_buff *skb,
unsigned short type,
const void *daddr, const void *saddr, unsigned len)
{
+ struct ipoib_pseudo_header *phdr;
struct ipoib_header *header;
- struct ipoib_cb *cb = ipoib_skb_cb(skb);
header = (struct ipoib_header *) skb_push(skb, sizeof *header);
@@ -1132,12 +1143,13 @@ static int ipoib_hard_header(struct sk_buff *skb,
/*
* we don't rely on dst_entry structure, always stuff the
- * destination address into skb->cb so we can figure out where
+ * destination address into skb hard header so we can figure out where
* to send the packet later.
*/
- memcpy(cb->hwaddr, daddr, INFINIBAND_ALEN);
+ phdr = (struct ipoib_pseudo_header *) skb_push(skb, sizeof(*phdr));
+ memcpy(phdr->hwaddr, daddr, INFINIBAND_ALEN);
- return sizeof *header;
+ return IPOIB_HARD_LEN;
}
static void ipoib_set_mcast_list(struct net_device *dev)
@@ -1759,7 +1771,7 @@ void ipoib_setup(struct net_device *dev)
dev->flags |= IFF_BROADCAST | IFF_MULTICAST;
- dev->hard_header_len = IPOIB_ENCAP_LEN;
+ dev->hard_header_len = IPOIB_HARD_LEN;
dev->addr_len = INFINIBAND_ALEN;
dev->type = ARPHRD_INFINIBAND;
dev->tx_queue_len = ipoib_sendq_size * 2;
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
index d3394b6add24..1909dd252c94 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
@@ -796,9 +796,11 @@ void ipoib_mcast_send(struct net_device *dev, u8 *daddr, struct sk_buff *skb)
__ipoib_mcast_add(dev, mcast);
list_add_tail(&mcast->list, &priv->multicast_list);
}
- if (skb_queue_len(&mcast->pkt_queue) < IPOIB_MAX_MCAST_QUEUE)
+ if (skb_queue_len(&mcast->pkt_queue) < IPOIB_MAX_MCAST_QUEUE) {
+ /* put pseudoheader back on for next time */
+ skb_push(skb, sizeof(struct ipoib_pseudo_header));
skb_queue_tail(&mcast->pkt_queue, skb);
- else {
+ } else {
++dev->stats.tx_dropped;
dev_kfree_skb_any(skb);
}
diff --git a/drivers/input/mouse/focaltech.c b/drivers/input/mouse/focaltech.c
index 54eceb30ede5..a7d39689bbfb 100644
--- a/drivers/input/mouse/focaltech.c
+++ b/drivers/input/mouse/focaltech.c
@@ -43,7 +43,7 @@ int focaltech_detect(struct psmouse *psmouse, bool set_properties)
if (set_properties) {
psmouse->vendor = "FocalTech";
- psmouse->name = "FocalTech Touchpad";
+ psmouse->name = "Touchpad";
}
return 0;
@@ -146,8 +146,8 @@ static void focaltech_report_state(struct psmouse *psmouse)
}
input_mt_report_pointer_emulation(dev, true);
- input_report_key(psmouse->dev, BTN_LEFT, state->pressed);
- input_sync(psmouse->dev);
+ input_report_key(dev, BTN_LEFT, state->pressed);
+ input_sync(dev);
}
static void focaltech_process_touch_packet(struct psmouse *psmouse,
diff --git a/drivers/input/mouse/psmouse-base.c b/drivers/input/mouse/psmouse-base.c
index fb4b185dea96..bee267424972 100644
--- a/drivers/input/mouse/psmouse-base.c
+++ b/drivers/input/mouse/psmouse-base.c
@@ -1115,10 +1115,6 @@ static int psmouse_extensions(struct psmouse *psmouse,
if (psmouse_try_protocol(psmouse, PSMOUSE_TOUCHKIT_PS2,
&max_proto, set_properties, true))
return PSMOUSE_TOUCHKIT_PS2;
-
- if (psmouse_try_protocol(psmouse, PSMOUSE_BYD,
- &max_proto, set_properties, true))
- return PSMOUSE_BYD;
}
/*
diff --git a/drivers/input/serio/i8042-x86ia64io.h b/drivers/input/serio/i8042-x86ia64io.h
index f4bfb4b2d50a..073246c7d163 100644
--- a/drivers/input/serio/i8042-x86ia64io.h
+++ b/drivers/input/serio/i8042-x86ia64io.h
@@ -877,6 +877,13 @@ static const struct dmi_system_id __initconst i8042_dmi_kbdreset_table[] = {
DMI_MATCH(DMI_PRODUCT_NAME, "P34"),
},
},
+ {
+ /* Schenker XMG C504 - Elantech touchpad */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "XMG"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "C504"),
+ },
+ },
{ }
};
diff --git a/drivers/iommu/arm-smmu-v3.c b/drivers/iommu/arm-smmu-v3.c
index 15c01c3cd540..e6f9b2d745ca 100644
--- a/drivers/iommu/arm-smmu-v3.c
+++ b/drivers/iommu/arm-smmu-v3.c
@@ -2636,17 +2636,26 @@ static int arm_smmu_device_dt_probe(struct platform_device *pdev)
/* And we're up. Go go go! */
of_iommu_set_ops(dev->of_node, &arm_smmu_ops);
#ifdef CONFIG_PCI
- pci_request_acs();
- ret = bus_set_iommu(&pci_bus_type, &arm_smmu_ops);
- if (ret)
- return ret;
+ if (pci_bus_type.iommu_ops != &arm_smmu_ops) {
+ pci_request_acs();
+ ret = bus_set_iommu(&pci_bus_type, &arm_smmu_ops);
+ if (ret)
+ return ret;
+ }
#endif
#ifdef CONFIG_ARM_AMBA
- ret = bus_set_iommu(&amba_bustype, &arm_smmu_ops);
- if (ret)
- return ret;
+ if (amba_bustype.iommu_ops != &arm_smmu_ops) {
+ ret = bus_set_iommu(&amba_bustype, &arm_smmu_ops);
+ if (ret)
+ return ret;
+ }
#endif
- return bus_set_iommu(&platform_bus_type, &arm_smmu_ops);
+ if (platform_bus_type.iommu_ops != &arm_smmu_ops) {
+ ret = bus_set_iommu(&platform_bus_type, &arm_smmu_ops);
+ if (ret)
+ return ret;
+ }
+ return 0;
}
static int arm_smmu_device_remove(struct platform_device *pdev)
diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c
index c841eb7a1a74..8f7281444551 100644
--- a/drivers/iommu/arm-smmu.c
+++ b/drivers/iommu/arm-smmu.c
@@ -324,8 +324,10 @@ struct arm_smmu_master_cfg {
#define INVALID_SMENDX -1
#define __fwspec_cfg(fw) ((struct arm_smmu_master_cfg *)fw->iommu_priv)
#define fwspec_smmu(fw) (__fwspec_cfg(fw)->smmu)
+#define fwspec_smendx(fw, i) \
+ (i >= fw->num_ids ? INVALID_SMENDX : __fwspec_cfg(fw)->smendx[i])
#define for_each_cfg_sme(fw, i, idx) \
- for (i = 0; idx = __fwspec_cfg(fw)->smendx[i], i < fw->num_ids; ++i)
+ for (i = 0; idx = fwspec_smendx(fw, i), i < fw->num_ids; ++i)
struct arm_smmu_device {
struct device *dev;
@@ -1228,6 +1230,16 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
return -ENXIO;
}
+ /*
+ * FIXME: The arch/arm DMA API code tries to attach devices to its own
+ * domains between of_xlate() and add_device() - we have no way to cope
+ * with that, so until ARM gets converted to rely on groups and default
+ * domains, just say no (but more politely than by dereferencing NULL).
+ * This should be at least a WARN_ON once that's sorted.
+ */
+ if (!fwspec->iommu_priv)
+ return -ENODEV;
+
smmu = fwspec_smmu(fwspec);
/* Ensure that the domain is finalised */
ret = arm_smmu_init_domain_context(domain, smmu);
@@ -1390,7 +1402,7 @@ static int arm_smmu_add_device(struct device *dev)
fwspec = dev->iommu_fwspec;
if (ret)
goto out_free;
- } else if (fwspec) {
+ } else if (fwspec && fwspec->ops == &arm_smmu_ops) {
smmu = arm_smmu_get_by_node(to_of_node(fwspec->iommu_fwnode));
} else {
return -ENODEV;
diff --git a/drivers/iommu/dmar.c b/drivers/iommu/dmar.c
index 58470f5ced04..8c53748a769d 100644
--- a/drivers/iommu/dmar.c
+++ b/drivers/iommu/dmar.c
@@ -338,7 +338,9 @@ static int dmar_pci_bus_notifier(struct notifier_block *nb,
struct pci_dev *pdev = to_pci_dev(data);
struct dmar_pci_notify_info *info;
- /* Only care about add/remove events for physical functions */
+ /* Only care about add/remove events for physical functions.
+ * For VFs we actually do the lookup based on the corresponding
+ * PF in device_to_iommu() anyway. */
if (pdev->is_virtfn)
return NOTIFY_DONE;
if (action != BUS_NOTIFY_ADD_DEVICE &&
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
index a4407eabf0e6..d8376c2d18b3 100644
--- a/drivers/iommu/intel-iommu.c
+++ b/drivers/iommu/intel-iommu.c
@@ -892,7 +892,13 @@ static struct intel_iommu *device_to_iommu(struct device *dev, u8 *bus, u8 *devf
return NULL;
if (dev_is_pci(dev)) {
+ struct pci_dev *pf_pdev;
+
pdev = to_pci_dev(dev);
+ /* VFs aren't listed in scope tables; we need to look up
+ * the PF instead to find the IOMMU. */
+ pf_pdev = pci_physfn(pdev);
+ dev = &pf_pdev->dev;
segment = pci_domain_nr(pdev->bus);
} else if (has_acpi_companion(dev))
dev = &ACPI_COMPANION(dev)->dev;
@@ -905,6 +911,13 @@ static struct intel_iommu *device_to_iommu(struct device *dev, u8 *bus, u8 *devf
for_each_active_dev_scope(drhd->devices,
drhd->devices_cnt, i, tmp) {
if (tmp == dev) {
+ /* For a VF use its original BDF# not that of the PF
+ * which we used for the IOMMU lookup. Strictly speaking
+ * we could do this for all PCI devices; we only need to
+ * get the BDF# from the scope table for ACPI matches. */
+ if (pdev->is_virtfn)
+ goto got_pdev;
+
*bus = drhd->devices[i].bus;
*devfn = drhd->devices[i].devfn;
goto out;
@@ -1711,6 +1724,7 @@ static void disable_dmar_iommu(struct intel_iommu *iommu)
if (!iommu->domains || !iommu->domain_ids)
return;
+again:
spin_lock_irqsave(&device_domain_lock, flags);
list_for_each_entry_safe(info, tmp, &device_domain_list, global) {
struct dmar_domain *domain;
@@ -1723,10 +1737,19 @@ static void disable_dmar_iommu(struct intel_iommu *iommu)
domain = info->domain;
- dmar_remove_one_dev_info(domain, info->dev);
+ __dmar_remove_one_dev_info(info);
- if (!domain_type_is_vm_or_si(domain))
+ if (!domain_type_is_vm_or_si(domain)) {
+ /*
+ * The domain_exit() function can't be called under
+ * device_domain_lock, as it takes this lock itself.
+ * So release the lock here and re-run the loop
+ * afterwards.
+ */
+ spin_unlock_irqrestore(&device_domain_lock, flags);
domain_exit(domain);
+ goto again;
+ }
}
spin_unlock_irqrestore(&device_domain_lock, flags);
diff --git a/drivers/iommu/intel-svm.c b/drivers/iommu/intel-svm.c
index 8ebb3530afa7..cb72e0011310 100644
--- a/drivers/iommu/intel-svm.c
+++ b/drivers/iommu/intel-svm.c
@@ -39,10 +39,18 @@ int intel_svm_alloc_pasid_tables(struct intel_iommu *iommu)
struct page *pages;
int order;
- order = ecap_pss(iommu->ecap) + 7 - PAGE_SHIFT;
- if (order < 0)
- order = 0;
-
+ /* Start at 2 because it's defined as 2^(1+PSS) */
+ iommu->pasid_max = 2 << ecap_pss(iommu->ecap);
+
+ /* Eventually I'm promised we will get a multi-level PASID table
+ * and it won't have to be physically contiguous. Until then,
+ * limit the size because 8MiB contiguous allocations can be hard
+ * to come by. The limit of 0x20000, which is 1MiB for each of
+ * the PASID and PASID-state tables, is somewhat arbitrary. */
+ if (iommu->pasid_max > 0x20000)
+ iommu->pasid_max = 0x20000;
+
+ order = get_order(sizeof(struct pasid_entry) * iommu->pasid_max);
pages = alloc_pages(GFP_KERNEL | __GFP_ZERO, order);
if (!pages) {
pr_warn("IOMMU: %s: Failed to allocate PASID table\n",
@@ -53,6 +61,8 @@ int intel_svm_alloc_pasid_tables(struct intel_iommu *iommu)
pr_info("%s: Allocated order %d PASID table.\n", iommu->name, order);
if (ecap_dis(iommu->ecap)) {
+ /* Just making it explicit... */
+ BUILD_BUG_ON(sizeof(struct pasid_entry) != sizeof(struct pasid_state_entry));
pages = alloc_pages(GFP_KERNEL | __GFP_ZERO, order);
if (pages)
iommu->pasid_state_table = page_address(pages);
@@ -68,11 +78,7 @@ int intel_svm_alloc_pasid_tables(struct intel_iommu *iommu)
int intel_svm_free_pasid_tables(struct intel_iommu *iommu)
{
- int order;
-
- order = ecap_pss(iommu->ecap) + 7 - PAGE_SHIFT;
- if (order < 0)
- order = 0;
+ int order = get_order(sizeof(struct pasid_entry) * iommu->pasid_max);
if (iommu->pasid_table) {
free_pages((unsigned long)iommu->pasid_table, order);
@@ -371,8 +377,8 @@ int intel_svm_bind_mm(struct device *dev, int *pasid, int flags, struct svm_dev_
}
svm->iommu = iommu;
- if (pasid_max > 2 << ecap_pss(iommu->ecap))
- pasid_max = 2 << ecap_pss(iommu->ecap);
+ if (pasid_max > iommu->pasid_max)
+ pasid_max = iommu->pasid_max;
/* Do not use PASID 0 in caching mode (virtualised IOMMU) */
ret = idr_alloc(&iommu->pasid_idr, svm,
diff --git a/drivers/mailbox/pcc.c b/drivers/mailbox/pcc.c
index 08c87fadca8c..1f32688c312d 100644
--- a/drivers/mailbox/pcc.c
+++ b/drivers/mailbox/pcc.c
@@ -65,6 +65,7 @@
#include <linux/mailbox_controller.h>
#include <linux/mailbox_client.h>
#include <linux/io-64-nonatomic-lo-hi.h>
+#include <acpi/pcc.h>
#include "mailbox.h"
@@ -267,6 +268,8 @@ struct mbox_chan *pcc_mbox_request_channel(struct mbox_client *cl,
if (chan->txdone_method == TXDONE_BY_POLL && cl->knows_txdone)
chan->txdone_method |= TXDONE_BY_ACK;
+ spin_unlock_irqrestore(&chan->lock, flags);
+
if (pcc_doorbell_irq[subspace_id] > 0) {
int rc;
@@ -275,12 +278,11 @@ struct mbox_chan *pcc_mbox_request_channel(struct mbox_client *cl,
if (unlikely(rc)) {
dev_err(dev, "failed to register PCC interrupt %d\n",
pcc_doorbell_irq[subspace_id]);
+ pcc_mbox_free_channel(chan);
chan = ERR_PTR(rc);
}
}
- spin_unlock_irqrestore(&chan->lock, flags);
-
return chan;
}
EXPORT_SYMBOL_GPL(pcc_mbox_request_channel);
@@ -304,20 +306,19 @@ void pcc_mbox_free_channel(struct mbox_chan *chan)
return;
}
+ if (pcc_doorbell_irq[id] > 0)
+ devm_free_irq(chan->mbox->dev, pcc_doorbell_irq[id], chan);
+
spin_lock_irqsave(&chan->lock, flags);
chan->cl = NULL;
chan->active_req = NULL;
if (chan->txdone_method == (TXDONE_BY_POLL | TXDONE_BY_ACK))
chan->txdone_method = TXDONE_BY_POLL;
- if (pcc_doorbell_irq[id] > 0)
- devm_free_irq(chan->mbox->dev, pcc_doorbell_irq[id], chan);
-
spin_unlock_irqrestore(&chan->lock, flags);
}
EXPORT_SYMBOL_GPL(pcc_mbox_free_channel);
-
/**
* pcc_send_data - Called from Mailbox Controller code. Used
* here only to ring the channel doorbell. The PCC client
diff --git a/drivers/md/md.c b/drivers/md/md.c
index eac84d8ff724..2089d46b0eb8 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -3887,10 +3887,10 @@ array_state_show(struct mddev *mddev, char *page)
st = read_auto;
break;
case 0:
- if (mddev->in_sync)
- st = clean;
- else if (test_bit(MD_CHANGE_PENDING, &mddev->flags))
+ if (test_bit(MD_CHANGE_PENDING, &mddev->flags))
st = write_pending;
+ else if (mddev->in_sync)
+ st = clean;
else if (mddev->safemode)
st = active_idle;
else
@@ -8144,14 +8144,14 @@ void md_do_sync(struct md_thread *thread)
if (!test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
!test_bit(MD_RECOVERY_INTR, &mddev->recovery) &&
- mddev->curr_resync > 2) {
+ mddev->curr_resync > 3) {
mddev->curr_resync_completed = mddev->curr_resync;
sysfs_notify(&mddev->kobj, NULL, "sync_completed");
}
mddev->pers->sync_request(mddev, max_sectors, &skipped);
if (!test_bit(MD_RECOVERY_CHECK, &mddev->recovery) &&
- mddev->curr_resync > 2) {
+ mddev->curr_resync > 3) {
if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) {
if (mddev->curr_resync >= mddev->recovery_cp) {
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index 1961d827dbd1..29e2df5cd77b 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -403,11 +403,14 @@ static void raid1_end_write_request(struct bio *bio)
struct bio *to_put = NULL;
int mirror = find_bio_disk(r1_bio, bio);
struct md_rdev *rdev = conf->mirrors[mirror].rdev;
+ bool discard_error;
+
+ discard_error = bio->bi_error && bio_op(bio) == REQ_OP_DISCARD;
/*
* 'one mirror IO has finished' event handler:
*/
- if (bio->bi_error) {
+ if (bio->bi_error && !discard_error) {
set_bit(WriteErrorSeen, &rdev->flags);
if (!test_and_set_bit(WantReplacement, &rdev->flags))
set_bit(MD_RECOVERY_NEEDED, &
@@ -444,7 +447,7 @@ static void raid1_end_write_request(struct bio *bio)
/* Maybe we can clear some bad blocks. */
if (is_badblock(rdev, r1_bio->sector, r1_bio->sectors,
- &first_bad, &bad_sectors)) {
+ &first_bad, &bad_sectors) && !discard_error) {
r1_bio->bios[mirror] = IO_MADE_GOOD;
set_bit(R1BIO_MadeGood, &r1_bio->state);
}
@@ -2294,17 +2297,23 @@ static void handle_read_error(struct r1conf *conf, struct r1bio *r1_bio)
* This is all done synchronously while the array is
* frozen
*/
+
+ bio = r1_bio->bios[r1_bio->read_disk];
+ bdevname(bio->bi_bdev, b);
+ bio_put(bio);
+ r1_bio->bios[r1_bio->read_disk] = NULL;
+
if (mddev->ro == 0) {
freeze_array(conf, 1);
fix_read_error(conf, r1_bio->read_disk,
r1_bio->sector, r1_bio->sectors);
unfreeze_array(conf);
- } else
- md_error(mddev, conf->mirrors[r1_bio->read_disk].rdev);
+ } else {
+ r1_bio->bios[r1_bio->read_disk] = IO_BLOCKED;
+ }
+
rdev_dec_pending(conf->mirrors[r1_bio->read_disk].rdev, conf->mddev);
- bio = r1_bio->bios[r1_bio->read_disk];
- bdevname(bio->bi_bdev, b);
read_more:
disk = read_balance(conf, r1_bio, &max_sectors);
if (disk == -1) {
@@ -2315,11 +2324,6 @@ read_more:
} else {
const unsigned long do_sync
= r1_bio->master_bio->bi_opf & REQ_SYNC;
- if (bio) {
- r1_bio->bios[r1_bio->read_disk] =
- mddev->ro ? IO_BLOCKED : NULL;
- bio_put(bio);
- }
r1_bio->read_disk = disk;
bio = bio_clone_mddev(r1_bio->master_bio, GFP_NOIO, mddev);
bio_trim(bio, r1_bio->sector - bio->bi_iter.bi_sector,
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index be1a9fca3b2d..39fddda2fef2 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -447,6 +447,9 @@ static void raid10_end_write_request(struct bio *bio)
struct r10conf *conf = r10_bio->mddev->private;
int slot, repl;
struct md_rdev *rdev = NULL;
+ bool discard_error;
+
+ discard_error = bio->bi_error && bio_op(bio) == REQ_OP_DISCARD;
dev = find_bio_disk(conf, r10_bio, bio, &slot, &repl);
@@ -460,7 +463,7 @@ static void raid10_end_write_request(struct bio *bio)
/*
* this branch is our 'one mirror IO has finished' event handler:
*/
- if (bio->bi_error) {
+ if (bio->bi_error && !discard_error) {
if (repl)
/* Never record new bad blocks to replacement,
* just fail it.
@@ -503,7 +506,7 @@ static void raid10_end_write_request(struct bio *bio)
if (is_badblock(rdev,
r10_bio->devs[slot].addr,
r10_bio->sectors,
- &first_bad, &bad_sectors)) {
+ &first_bad, &bad_sectors) && !discard_error) {
bio_put(bio);
if (repl)
r10_bio->devs[slot].repl_bio = IO_MADE_GOOD;
diff --git a/drivers/md/raid5-cache.c b/drivers/md/raid5-cache.c
index 1b1ab4a1d132..a227a9f3ee65 100644
--- a/drivers/md/raid5-cache.c
+++ b/drivers/md/raid5-cache.c
@@ -1087,7 +1087,7 @@ static int r5l_recovery_log(struct r5l_log *log)
* 1's seq + 10 and let superblock points to meta2. The same recovery will
* not think meta 3 is a valid meta, because its seq doesn't match
*/
- if (ctx.seq > log->last_cp_seq + 1) {
+ if (ctx.seq > log->last_cp_seq) {
int ret;
ret = r5l_log_write_empty_meta_block(log, ctx.pos, ctx.seq + 10);
@@ -1096,6 +1096,8 @@ static int r5l_recovery_log(struct r5l_log *log)
log->seq = ctx.seq + 11;
log->log_start = r5l_ring_add(log, ctx.pos, BLOCK_SECTORS);
r5l_write_super(log, ctx.pos);
+ log->last_checkpoint = ctx.pos;
+ log->next_checkpoint = ctx.pos;
} else {
log->log_start = ctx.pos;
log->seq = ctx.seq;
@@ -1154,6 +1156,7 @@ create:
if (create_super) {
log->last_cp_seq = prandom_u32();
cp = 0;
+ r5l_log_write_empty_meta_block(log, cp, log->last_cp_seq);
/*
* Make sure super points to correct address. Log might have
* data very soon. If super hasn't correct log tail address,
@@ -1168,6 +1171,7 @@ create:
if (log->max_free_space > RECLAIM_MAX_FREE_SPACE)
log->max_free_space = RECLAIM_MAX_FREE_SPACE;
log->last_checkpoint = cp;
+ log->next_checkpoint = cp;
__free_page(page);
diff --git a/drivers/media/dvb-frontends/Kconfig b/drivers/media/dvb-frontends/Kconfig
index 012225587c25..b71b747ee0ba 100644
--- a/drivers/media/dvb-frontends/Kconfig
+++ b/drivers/media/dvb-frontends/Kconfig
@@ -513,6 +513,11 @@ config DVB_AS102_FE
depends on DVB_CORE
default DVB_AS102
+config DVB_GP8PSK_FE
+ tristate
+ depends on DVB_CORE
+ default DVB_USB_GP8PSK
+
comment "DVB-C (cable) frontends"
depends on DVB_CORE
diff --git a/drivers/media/dvb-frontends/Makefile b/drivers/media/dvb-frontends/Makefile
index e90165ad361b..93921a4eaa27 100644
--- a/drivers/media/dvb-frontends/Makefile
+++ b/drivers/media/dvb-frontends/Makefile
@@ -121,6 +121,7 @@ obj-$(CONFIG_DVB_RTL2832_SDR) += rtl2832_sdr.o
obj-$(CONFIG_DVB_M88RS2000) += m88rs2000.o
obj-$(CONFIG_DVB_AF9033) += af9033.o
obj-$(CONFIG_DVB_AS102_FE) += as102_fe.o
+obj-$(CONFIG_DVB_GP8PSK_FE) += gp8psk-fe.o
obj-$(CONFIG_DVB_TC90522) += tc90522.o
obj-$(CONFIG_DVB_HORUS3A) += horus3a.o
obj-$(CONFIG_DVB_ASCOT2E) += ascot2e.o
diff --git a/drivers/media/usb/dvb-usb/gp8psk-fe.c b/drivers/media/dvb-frontends/gp8psk-fe.c
index db6eb79cde07..93f59bfea092 100644
--- a/drivers/media/usb/dvb-usb/gp8psk-fe.c
+++ b/drivers/media/dvb-frontends/gp8psk-fe.c
@@ -1,5 +1,5 @@
-/* DVB USB compliant Linux driver for the
- * - GENPIX 8pks/qpsk/DCII USB2.0 DVB-S module
+/*
+ * Frontend driver for the GENPIX 8pks/qpsk/DCII USB2.0 DVB-S module
*
* Copyright (C) 2006,2007 Alan Nisota (alannisota@gmail.com)
* Copyright (C) 2006,2007 Genpix Electronics (genpix@genpix-electronics.com)
@@ -8,17 +8,31 @@
*
* This module is based off the vp7045 and vp702x modules
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the Free
- * Software Foundation, version 2.
- *
- * see Documentation/dvb/README.dvb-usb for more information
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation, version 2.
*/
-#include "gp8psk.h"
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include "gp8psk-fe.h"
+#include "dvb_frontend.h"
+
+static int debug;
+module_param(debug, int, 0644);
+MODULE_PARM_DESC(debug, "Turn on/off debugging (default:off).");
+
+#define dprintk(fmt, arg...) do { \
+ if (debug) \
+ printk(KERN_DEBUG pr_fmt("%s: " fmt), \
+ __func__, ##arg); \
+} while (0)
struct gp8psk_fe_state {
struct dvb_frontend fe;
- struct dvb_usb_device *d;
+ void *priv;
+ const struct gp8psk_fe_ops *ops;
+ bool is_rev1;
u8 lock;
u16 snr;
unsigned long next_status_check;
@@ -29,22 +43,24 @@ static int gp8psk_tuned_to_DCII(struct dvb_frontend *fe)
{
struct gp8psk_fe_state *st = fe->demodulator_priv;
u8 status;
- gp8psk_usb_in_op(st->d, GET_8PSK_CONFIG, 0, 0, &status, 1);
+
+ st->ops->in(st->priv, GET_8PSK_CONFIG, 0, 0, &status, 1);
return status & bmDCtuned;
}
static int gp8psk_set_tuner_mode(struct dvb_frontend *fe, int mode)
{
- struct gp8psk_fe_state *state = fe->demodulator_priv;
- return gp8psk_usb_out_op(state->d, SET_8PSK_CONFIG, mode, 0, NULL, 0);
+ struct gp8psk_fe_state *st = fe->demodulator_priv;
+
+ return st->ops->out(st->priv, SET_8PSK_CONFIG, mode, 0, NULL, 0);
}
static int gp8psk_fe_update_status(struct gp8psk_fe_state *st)
{
u8 buf[6];
if (time_after(jiffies,st->next_status_check)) {
- gp8psk_usb_in_op(st->d, GET_SIGNAL_LOCK, 0,0,&st->lock,1);
- gp8psk_usb_in_op(st->d, GET_SIGNAL_STRENGTH, 0,0,buf,6);
+ st->ops->in(st->priv, GET_SIGNAL_LOCK, 0, 0, &st->lock, 1);
+ st->ops->in(st->priv, GET_SIGNAL_STRENGTH, 0, 0, buf, 6);
st->snr = (buf[1]) << 8 | buf[0];
st->next_status_check = jiffies + (st->status_check_interval*HZ)/1000;
}
@@ -116,13 +132,12 @@ static int gp8psk_fe_get_tune_settings(struct dvb_frontend* fe, struct dvb_front
static int gp8psk_fe_set_frontend(struct dvb_frontend *fe)
{
- struct gp8psk_fe_state *state = fe->demodulator_priv;
+ struct gp8psk_fe_state *st = fe->demodulator_priv;
struct dtv_frontend_properties *c = &fe->dtv_property_cache;
u8 cmd[10];
u32 freq = c->frequency * 1000;
- int gp_product_id = le16_to_cpu(state->d->udev->descriptor.idProduct);
- deb_fe("%s()\n", __func__);
+ dprintk("%s()\n", __func__);
cmd[4] = freq & 0xff;
cmd[5] = (freq >> 8) & 0xff;
@@ -136,21 +151,21 @@ static int gp8psk_fe_set_frontend(struct dvb_frontend *fe)
switch (c->delivery_system) {
case SYS_DVBS:
if (c->modulation != QPSK) {
- deb_fe("%s: unsupported modulation selected (%d)\n",
+ dprintk("%s: unsupported modulation selected (%d)\n",
__func__, c->modulation);
return -EOPNOTSUPP;
}
c->fec_inner = FEC_AUTO;
break;
case SYS_DVBS2: /* kept for backwards compatibility */
- deb_fe("%s: DVB-S2 delivery system selected\n", __func__);
+ dprintk("%s: DVB-S2 delivery system selected\n", __func__);
break;
case SYS_TURBO:
- deb_fe("%s: Turbo-FEC delivery system selected\n", __func__);
+ dprintk("%s: Turbo-FEC delivery system selected\n", __func__);
break;
default:
- deb_fe("%s: unsupported delivery system selected (%d)\n",
+ dprintk("%s: unsupported delivery system selected (%d)\n",
__func__, c->delivery_system);
return -EOPNOTSUPP;
}
@@ -161,9 +176,9 @@ static int gp8psk_fe_set_frontend(struct dvb_frontend *fe)
cmd[3] = (c->symbol_rate >> 24) & 0xff;
switch (c->modulation) {
case QPSK:
- if (gp_product_id == USB_PID_GENPIX_8PSK_REV_1_WARM)
+ if (st->is_rev1)
if (gp8psk_tuned_to_DCII(fe))
- gp8psk_bcm4500_reload(state->d);
+ st->ops->reload(st->priv);
switch (c->fec_inner) {
case FEC_1_2:
cmd[9] = 0; break;
@@ -207,18 +222,18 @@ static int gp8psk_fe_set_frontend(struct dvb_frontend *fe)
cmd[9] = 0;
break;
default: /* Unknown modulation */
- deb_fe("%s: unsupported modulation selected (%d)\n",
+ dprintk("%s: unsupported modulation selected (%d)\n",
__func__, c->modulation);
return -EOPNOTSUPP;
}
- if (gp_product_id == USB_PID_GENPIX_8PSK_REV_1_WARM)
+ if (st->is_rev1)
gp8psk_set_tuner_mode(fe, 0);
- gp8psk_usb_out_op(state->d, TUNE_8PSK, 0, 0, cmd, 10);
+ st->ops->out(st->priv, TUNE_8PSK, 0, 0, cmd, 10);
- state->lock = 0;
- state->next_status_check = jiffies;
- state->status_check_interval = 200;
+ st->lock = 0;
+ st->next_status_check = jiffies;
+ st->status_check_interval = 200;
return 0;
}
@@ -228,9 +243,9 @@ static int gp8psk_fe_send_diseqc_msg (struct dvb_frontend* fe,
{
struct gp8psk_fe_state *st = fe->demodulator_priv;
- deb_fe("%s\n",__func__);
+ dprintk("%s\n", __func__);
- if (gp8psk_usb_out_op(st->d,SEND_DISEQC_COMMAND, m->msg[0], 0,
+ if (st->ops->out(st->priv, SEND_DISEQC_COMMAND, m->msg[0], 0,
m->msg, m->msg_len)) {
return -EINVAL;
}
@@ -243,12 +258,12 @@ static int gp8psk_fe_send_diseqc_burst(struct dvb_frontend *fe,
struct gp8psk_fe_state *st = fe->demodulator_priv;
u8 cmd;
- deb_fe("%s\n",__func__);
+ dprintk("%s\n", __func__);
/* These commands are certainly wrong */
cmd = (burst == SEC_MINI_A) ? 0x00 : 0x01;
- if (gp8psk_usb_out_op(st->d,SEND_DISEQC_COMMAND, cmd, 0,
+ if (st->ops->out(st->priv, SEND_DISEQC_COMMAND, cmd, 0,
&cmd, 0)) {
return -EINVAL;
}
@@ -258,10 +273,10 @@ static int gp8psk_fe_send_diseqc_burst(struct dvb_frontend *fe,
static int gp8psk_fe_set_tone(struct dvb_frontend *fe,
enum fe_sec_tone_mode tone)
{
- struct gp8psk_fe_state* state = fe->demodulator_priv;
+ struct gp8psk_fe_state *st = fe->demodulator_priv;
- if (gp8psk_usb_out_op(state->d,SET_22KHZ_TONE,
- (tone == SEC_TONE_ON), 0, NULL, 0)) {
+ if (st->ops->out(st->priv, SET_22KHZ_TONE,
+ (tone == SEC_TONE_ON), 0, NULL, 0)) {
return -EINVAL;
}
return 0;
@@ -270,9 +285,9 @@ static int gp8psk_fe_set_tone(struct dvb_frontend *fe,
static int gp8psk_fe_set_voltage(struct dvb_frontend *fe,
enum fe_sec_voltage voltage)
{
- struct gp8psk_fe_state* state = fe->demodulator_priv;
+ struct gp8psk_fe_state *st = fe->demodulator_priv;
- if (gp8psk_usb_out_op(state->d,SET_LNB_VOLTAGE,
+ if (st->ops->out(st->priv, SET_LNB_VOLTAGE,
voltage == SEC_VOLTAGE_18, 0, NULL, 0)) {
return -EINVAL;
}
@@ -281,52 +296,60 @@ static int gp8psk_fe_set_voltage(struct dvb_frontend *fe,
static int gp8psk_fe_enable_high_lnb_voltage(struct dvb_frontend* fe, long onoff)
{
- struct gp8psk_fe_state* state = fe->demodulator_priv;
- return gp8psk_usb_out_op(state->d, USE_EXTRA_VOLT, onoff, 0,NULL,0);
+ struct gp8psk_fe_state *st = fe->demodulator_priv;
+
+ return st->ops->out(st->priv, USE_EXTRA_VOLT, onoff, 0, NULL, 0);
}
static int gp8psk_fe_send_legacy_dish_cmd (struct dvb_frontend* fe, unsigned long sw_cmd)
{
- struct gp8psk_fe_state* state = fe->demodulator_priv;
+ struct gp8psk_fe_state *st = fe->demodulator_priv;
u8 cmd = sw_cmd & 0x7f;
- if (gp8psk_usb_out_op(state->d,SET_DN_SWITCH, cmd, 0,
- NULL, 0)) {
+ if (st->ops->out(st->priv, SET_DN_SWITCH, cmd, 0, NULL, 0))
return -EINVAL;
- }
- if (gp8psk_usb_out_op(state->d,SET_LNB_VOLTAGE, !!(sw_cmd & 0x80),
- 0, NULL, 0)) {
+
+ if (st->ops->out(st->priv, SET_LNB_VOLTAGE, !!(sw_cmd & 0x80),
+ 0, NULL, 0))
return -EINVAL;
- }
return 0;
}
static void gp8psk_fe_release(struct dvb_frontend* fe)
{
- struct gp8psk_fe_state *state = fe->demodulator_priv;
- kfree(state);
+ struct gp8psk_fe_state *st = fe->demodulator_priv;
+
+ kfree(st);
}
static struct dvb_frontend_ops gp8psk_fe_ops;
-struct dvb_frontend * gp8psk_fe_attach(struct dvb_usb_device *d)
+struct dvb_frontend *gp8psk_fe_attach(const struct gp8psk_fe_ops *ops,
+ void *priv, bool is_rev1)
{
- struct gp8psk_fe_state *s = kzalloc(sizeof(struct gp8psk_fe_state), GFP_KERNEL);
- if (s == NULL)
- goto error;
-
- s->d = d;
- memcpy(&s->fe.ops, &gp8psk_fe_ops, sizeof(struct dvb_frontend_ops));
- s->fe.demodulator_priv = s;
-
- goto success;
-error:
- return NULL;
-success:
- return &s->fe;
-}
+ struct gp8psk_fe_state *st;
+ if (!ops || !ops->in || !ops->out || !ops->reload) {
+ pr_err("Error! gp8psk-fe ops not defined.\n");
+ return NULL;
+ }
+
+ st = kzalloc(sizeof(struct gp8psk_fe_state), GFP_KERNEL);
+ if (!st)
+ return NULL;
+
+ memcpy(&st->fe.ops, &gp8psk_fe_ops, sizeof(struct dvb_frontend_ops));
+ st->fe.demodulator_priv = st;
+ st->ops = ops;
+ st->priv = priv;
+ st->is_rev1 = is_rev1;
+
+ pr_info("Frontend %sattached\n", is_rev1 ? "revision 1 " : "");
+
+ return &st->fe;
+}
+EXPORT_SYMBOL_GPL(gp8psk_fe_attach);
static struct dvb_frontend_ops gp8psk_fe_ops = {
.delsys = { SYS_DVBS },
@@ -370,3 +393,8 @@ static struct dvb_frontend_ops gp8psk_fe_ops = {
.dishnetwork_send_legacy_command = gp8psk_fe_send_legacy_dish_cmd,
.enable_high_lnb_voltage = gp8psk_fe_enable_high_lnb_voltage
};
+
+MODULE_AUTHOR("Alan Nisota <alannisota@gamil.com>");
+MODULE_DESCRIPTION("Frontend Driver for Genpix DVB-S");
+MODULE_VERSION("1.1");
+MODULE_LICENSE("GPL");
diff --git a/drivers/media/dvb-frontends/gp8psk-fe.h b/drivers/media/dvb-frontends/gp8psk-fe.h
new file mode 100644
index 000000000000..6c7944b1ecd6
--- /dev/null
+++ b/drivers/media/dvb-frontends/gp8psk-fe.h
@@ -0,0 +1,82 @@
+/*
+ * gp8psk_fe driver
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef GP8PSK_FE_H
+#define GP8PSK_FE_H
+
+#include <linux/types.h>
+
+/* gp8psk commands */
+
+#define GET_8PSK_CONFIG 0x80 /* in */
+#define SET_8PSK_CONFIG 0x81
+#define I2C_WRITE 0x83
+#define I2C_READ 0x84
+#define ARM_TRANSFER 0x85
+#define TUNE_8PSK 0x86
+#define GET_SIGNAL_STRENGTH 0x87 /* in */
+#define LOAD_BCM4500 0x88
+#define BOOT_8PSK 0x89 /* in */
+#define START_INTERSIL 0x8A /* in */
+#define SET_LNB_VOLTAGE 0x8B
+#define SET_22KHZ_TONE 0x8C
+#define SEND_DISEQC_COMMAND 0x8D
+#define SET_DVB_MODE 0x8E
+#define SET_DN_SWITCH 0x8F
+#define GET_SIGNAL_LOCK 0x90 /* in */
+#define GET_FW_VERS 0x92
+#define GET_SERIAL_NUMBER 0x93 /* in */
+#define USE_EXTRA_VOLT 0x94
+#define GET_FPGA_VERS 0x95
+#define CW3K_INIT 0x9d
+
+/* PSK_configuration bits */
+#define bm8pskStarted 0x01
+#define bm8pskFW_Loaded 0x02
+#define bmIntersilOn 0x04
+#define bmDVBmode 0x08
+#define bm22kHz 0x10
+#define bmSEL18V 0x20
+#define bmDCtuned 0x40
+#define bmArmed 0x80
+
+/* Satellite modulation modes */
+#define ADV_MOD_DVB_QPSK 0 /* DVB-S QPSK */
+#define ADV_MOD_TURBO_QPSK 1 /* Turbo QPSK */
+#define ADV_MOD_TURBO_8PSK 2 /* Turbo 8PSK (also used for Trellis 8PSK) */
+#define ADV_MOD_TURBO_16QAM 3 /* Turbo 16QAM (also used for Trellis 8PSK) */
+
+#define ADV_MOD_DCII_C_QPSK 4 /* Digicipher II Combo */
+#define ADV_MOD_DCII_I_QPSK 5 /* Digicipher II I-stream */
+#define ADV_MOD_DCII_Q_QPSK 6 /* Digicipher II Q-stream */
+#define ADV_MOD_DCII_C_OQPSK 7 /* Digicipher II offset QPSK */
+#define ADV_MOD_DSS_QPSK 8 /* DSS (DIRECTV) QPSK */
+#define ADV_MOD_DVB_BPSK 9 /* DVB-S BPSK */
+
+/* firmware revision id's */
+#define GP8PSK_FW_REV1 0x020604
+#define GP8PSK_FW_REV2 0x020704
+#define GP8PSK_FW_VERS(_fw_vers) \
+ ((_fw_vers)[2]<<0x10 | (_fw_vers)[1]<<0x08 | (_fw_vers)[0])
+
+struct gp8psk_fe_ops {
+ int (*in)(void *priv, u8 req, u16 value, u16 index, u8 *b, int blen);
+ int (*out)(void *priv, u8 req, u16 value, u16 index, u8 *b, int blen);
+ int (*reload)(void *priv);
+};
+
+struct dvb_frontend *gp8psk_fe_attach(const struct gp8psk_fe_ops *ops,
+ void *priv, bool is_rev1);
+
+#endif
diff --git a/drivers/media/i2c/ir-kbd-i2c.c b/drivers/media/i2c/ir-kbd-i2c.c
index f95a6bc839d5..cede3975d04b 100644
--- a/drivers/media/i2c/ir-kbd-i2c.c
+++ b/drivers/media/i2c/ir-kbd-i2c.c
@@ -118,7 +118,7 @@ static int get_key_haup_common(struct IR_i2c *ir, enum rc_type *protocol,
*protocol = RC_TYPE_RC6_MCE;
dev &= 0x7f;
dprintk(1, "ir hauppauge (rc6-mce): t%d vendor=%d dev=%d code=%d\n",
- toggle, vendor, dev, code);
+ *ptoggle, vendor, dev, code);
} else {
*ptoggle = 0;
*protocol = RC_TYPE_RC6_6A_32;
diff --git a/drivers/media/tuners/tuner-xc2028.c b/drivers/media/tuners/tuner-xc2028.c
index 317ef63ee789..8d96a22647b3 100644
--- a/drivers/media/tuners/tuner-xc2028.c
+++ b/drivers/media/tuners/tuner-xc2028.c
@@ -281,6 +281,14 @@ static void free_firmware(struct xc2028_data *priv)
int i;
tuner_dbg("%s called\n", __func__);
+ /* free allocated f/w string */
+ if (priv->fname != firmware_name)
+ kfree(priv->fname);
+ priv->fname = NULL;
+
+ priv->state = XC2028_NO_FIRMWARE;
+ memset(&priv->cur_fw, 0, sizeof(priv->cur_fw));
+
if (!priv->firm)
return;
@@ -291,9 +299,6 @@ static void free_firmware(struct xc2028_data *priv)
priv->firm = NULL;
priv->firm_size = 0;
- priv->state = XC2028_NO_FIRMWARE;
-
- memset(&priv->cur_fw, 0, sizeof(priv->cur_fw));
}
static int load_all_firmwares(struct dvb_frontend *fe,
@@ -884,9 +889,8 @@ read_not_reliable:
return 0;
fail:
- priv->state = XC2028_NO_FIRMWARE;
+ free_firmware(priv);
- memset(&priv->cur_fw, 0, sizeof(priv->cur_fw));
if (retry_count < 8) {
msleep(50);
retry_count++;
@@ -1332,11 +1336,8 @@ static int xc2028_dvb_release(struct dvb_frontend *fe)
mutex_lock(&xc2028_list_mutex);
/* only perform final cleanup if this is the last instance */
- if (hybrid_tuner_report_instance_count(priv) == 1) {
+ if (hybrid_tuner_report_instance_count(priv) == 1)
free_firmware(priv);
- kfree(priv->ctrl.fname);
- priv->ctrl.fname = NULL;
- }
if (priv)
hybrid_tuner_release_state(priv);
@@ -1399,19 +1400,8 @@ static int xc2028_set_config(struct dvb_frontend *fe, void *priv_cfg)
/*
* Copy the config data.
- * For the firmware name, keep a local copy of the string,
- * in order to avoid troubles during device release.
*/
- kfree(priv->ctrl.fname);
- priv->ctrl.fname = NULL;
memcpy(&priv->ctrl, p, sizeof(priv->ctrl));
- if (p->fname) {
- priv->ctrl.fname = kstrdup(p->fname, GFP_KERNEL);
- if (priv->ctrl.fname == NULL) {
- rc = -ENOMEM;
- goto unlock;
- }
- }
/*
* If firmware name changed, frees firmware. As free_firmware will
@@ -1426,10 +1416,15 @@ static int xc2028_set_config(struct dvb_frontend *fe, void *priv_cfg)
if (priv->state == XC2028_NO_FIRMWARE) {
if (!firmware_name[0])
- priv->fname = priv->ctrl.fname;
+ priv->fname = kstrdup(p->fname, GFP_KERNEL);
else
priv->fname = firmware_name;
+ if (!priv->fname) {
+ rc = -ENOMEM;
+ goto unlock;
+ }
+
rc = request_firmware_nowait(THIS_MODULE, 1,
priv->fname,
priv->i2c_props.adap->dev.parent,
diff --git a/drivers/media/usb/b2c2/flexcop-usb.c b/drivers/media/usb/b2c2/flexcop-usb.c
index d4bdba60b0f7..52bc42da8a4c 100644
--- a/drivers/media/usb/b2c2/flexcop-usb.c
+++ b/drivers/media/usb/b2c2/flexcop-usb.c
@@ -73,23 +73,34 @@ static int flexcop_usb_readwrite_dw(struct flexcop_device *fc, u16 wRegOffsPCI,
u8 request_type = (read ? USB_DIR_IN : USB_DIR_OUT) | USB_TYPE_VENDOR;
u8 wAddress = B2C2_FLEX_PCIOFFSET_TO_INTERNALADDR(wRegOffsPCI) |
(read ? 0x80 : 0);
+ int ret;
+
+ mutex_lock(&fc_usb->data_mutex);
+ if (!read)
+ memcpy(fc_usb->data, val, sizeof(*val));
- int len = usb_control_msg(fc_usb->udev,
+ ret = usb_control_msg(fc_usb->udev,
read ? B2C2_USB_CTRL_PIPE_IN : B2C2_USB_CTRL_PIPE_OUT,
request,
request_type, /* 0xc0 read or 0x40 write */
wAddress,
0,
- val,
+ fc_usb->data,
sizeof(u32),
B2C2_WAIT_FOR_OPERATION_RDW * HZ);
- if (len != sizeof(u32)) {
+ if (ret != sizeof(u32)) {
err("error while %s dword from %d (%d).", read ? "reading" :
"writing", wAddress, wRegOffsPCI);
- return -EIO;
+ if (ret >= 0)
+ ret = -EIO;
}
- return 0;
+
+ if (read && ret >= 0)
+ memcpy(val, fc_usb->data, sizeof(*val));
+ mutex_unlock(&fc_usb->data_mutex);
+
+ return ret;
}
/*
* DKT 010817 - add support for V8 memory read/write and flash update
@@ -100,9 +111,14 @@ static int flexcop_usb_v8_memory_req(struct flexcop_usb *fc_usb,
{
u8 request_type = USB_TYPE_VENDOR;
u16 wIndex;
- int nWaitTime, pipe, len;
+ int nWaitTime, pipe, ret;
wIndex = page << 8;
+ if (buflen > sizeof(fc_usb->data)) {
+ err("Buffer size bigger than max URB control message\n");
+ return -EIO;
+ }
+
switch (req) {
case B2C2_USB_READ_V8_MEM:
nWaitTime = B2C2_WAIT_FOR_OPERATION_V8READ;
@@ -127,17 +143,32 @@ static int flexcop_usb_v8_memory_req(struct flexcop_usb *fc_usb,
deb_v8("v8mem: %02x %02x %04x %04x, len: %d\n", request_type, req,
wAddress, wIndex, buflen);
- len = usb_control_msg(fc_usb->udev, pipe,
+ mutex_lock(&fc_usb->data_mutex);
+
+ if ((request_type & USB_ENDPOINT_DIR_MASK) == USB_DIR_OUT)
+ memcpy(fc_usb->data, pbBuffer, buflen);
+
+ ret = usb_control_msg(fc_usb->udev, pipe,
req,
request_type,
wAddress,
wIndex,
- pbBuffer,
+ fc_usb->data,
buflen,
nWaitTime * HZ);
+ if (ret != buflen)
+ ret = -EIO;
+
+ if (ret >= 0) {
+ ret = 0;
+ if ((request_type & USB_ENDPOINT_DIR_MASK) == USB_DIR_IN)
+ memcpy(pbBuffer, fc_usb->data, buflen);
+ }
- debug_dump(pbBuffer, len, deb_v8);
- return len == buflen ? 0 : -EIO;
+ mutex_unlock(&fc_usb->data_mutex);
+
+ debug_dump(pbBuffer, ret, deb_v8);
+ return ret;
}
#define bytes_left_to_read_on_page(paddr,buflen) \
@@ -196,29 +227,6 @@ static int flexcop_usb_get_mac_addr(struct flexcop_device *fc, int extended)
fc->dvb_adapter.proposed_mac, 6);
}
-#if 0
-static int flexcop_usb_utility_req(struct flexcop_usb *fc_usb, int set,
- flexcop_usb_utility_function_t func, u8 extra, u16 wIndex,
- u16 buflen, u8 *pvBuffer)
-{
- u16 wValue;
- u8 request_type = (set ? USB_DIR_OUT : USB_DIR_IN) | USB_TYPE_VENDOR;
- int nWaitTime = 2,
- pipe = set ? B2C2_USB_CTRL_PIPE_OUT : B2C2_USB_CTRL_PIPE_IN, len;
- wValue = (func << 8) | extra;
-
- len = usb_control_msg(fc_usb->udev,pipe,
- B2C2_USB_UTILITY,
- request_type,
- wValue,
- wIndex,
- pvBuffer,
- buflen,
- nWaitTime * HZ);
- return len == buflen ? 0 : -EIO;
-}
-#endif
-
/* usb i2c stuff */
static int flexcop_usb_i2c_req(struct flexcop_i2c_adapter *i2c,
flexcop_usb_request_t req, flexcop_usb_i2c_function_t func,
@@ -226,9 +234,14 @@ static int flexcop_usb_i2c_req(struct flexcop_i2c_adapter *i2c,
{
struct flexcop_usb *fc_usb = i2c->fc->bus_specific;
u16 wValue, wIndex;
- int nWaitTime,pipe,len;
+ int nWaitTime, pipe, ret;
u8 request_type = USB_TYPE_VENDOR;
+ if (buflen > sizeof(fc_usb->data)) {
+ err("Buffer size bigger than max URB control message\n");
+ return -EIO;
+ }
+
switch (func) {
case USB_FUNC_I2C_WRITE:
case USB_FUNC_I2C_MULTIWRITE:
@@ -257,15 +270,32 @@ static int flexcop_usb_i2c_req(struct flexcop_i2c_adapter *i2c,
wValue & 0xff, wValue >> 8,
wIndex & 0xff, wIndex >> 8);
- len = usb_control_msg(fc_usb->udev,pipe,
+ mutex_lock(&fc_usb->data_mutex);
+
+ if ((request_type & USB_ENDPOINT_DIR_MASK) == USB_DIR_OUT)
+ memcpy(fc_usb->data, buf, buflen);
+
+ ret = usb_control_msg(fc_usb->udev, pipe,
req,
request_type,
wValue,
wIndex,
- buf,
+ fc_usb->data,
buflen,
nWaitTime * HZ);
- return len == buflen ? 0 : -EREMOTEIO;
+
+ if (ret != buflen)
+ ret = -EIO;
+
+ if (ret >= 0) {
+ ret = 0;
+ if ((request_type & USB_ENDPOINT_DIR_MASK) == USB_DIR_IN)
+ memcpy(buf, fc_usb->data, buflen);
+ }
+
+ mutex_unlock(&fc_usb->data_mutex);
+
+ return 0;
}
/* actual bus specific access functions,
@@ -516,6 +546,7 @@ static int flexcop_usb_probe(struct usb_interface *intf,
/* general flexcop init */
fc_usb = fc->bus_specific;
fc_usb->fc_dev = fc;
+ mutex_init(&fc_usb->data_mutex);
fc->read_ibi_reg = flexcop_usb_read_ibi_reg;
fc->write_ibi_reg = flexcop_usb_write_ibi_reg;
diff --git a/drivers/media/usb/b2c2/flexcop-usb.h b/drivers/media/usb/b2c2/flexcop-usb.h
index 92529a9c4475..25ad43166e78 100644
--- a/drivers/media/usb/b2c2/flexcop-usb.h
+++ b/drivers/media/usb/b2c2/flexcop-usb.h
@@ -29,6 +29,10 @@ struct flexcop_usb {
u8 tmp_buffer[1023+190];
int tmp_buffer_length;
+
+ /* for URB control messages */
+ u8 data[80];
+ struct mutex data_mutex;
};
#if 0
diff --git a/drivers/media/usb/cpia2/cpia2_usb.c b/drivers/media/usb/cpia2/cpia2_usb.c
index 13620cdf0599..e9100a235831 100644
--- a/drivers/media/usb/cpia2/cpia2_usb.c
+++ b/drivers/media/usb/cpia2/cpia2_usb.c
@@ -545,18 +545,30 @@ static void free_sbufs(struct camera_data *cam)
static int write_packet(struct usb_device *udev,
u8 request, u8 * registers, u16 start, size_t size)
{
+ unsigned char *buf;
+ int ret;
+
if (!registers || size <= 0)
return -EINVAL;
- return usb_control_msg(udev,
+ buf = kmalloc(size, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ memcpy(buf, registers, size);
+
+ ret = usb_control_msg(udev,
usb_sndctrlpipe(udev, 0),
request,
USB_TYPE_VENDOR | USB_RECIP_DEVICE,
start, /* value */
0, /* index */
- registers, /* buffer */
+ buf, /* buffer */
size,
HZ);
+
+ kfree(buf);
+ return ret;
}
/****************************************************************************
@@ -567,18 +579,32 @@ static int write_packet(struct usb_device *udev,
static int read_packet(struct usb_device *udev,
u8 request, u8 * registers, u16 start, size_t size)
{
+ unsigned char *buf;
+ int ret;
+
if (!registers || size <= 0)
return -EINVAL;
- return usb_control_msg(udev,
+ buf = kmalloc(size, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ ret = usb_control_msg(udev,
usb_rcvctrlpipe(udev, 0),
request,
USB_DIR_IN|USB_TYPE_VENDOR|USB_RECIP_DEVICE,
start, /* value */
0, /* index */
- registers, /* buffer */
+ buf, /* buffer */
size,
HZ);
+
+ if (ret >= 0)
+ memcpy(registers, buf, size);
+
+ kfree(buf);
+
+ return ret;
}
/******************************************************************************
diff --git a/drivers/media/usb/dvb-usb/Makefile b/drivers/media/usb/dvb-usb/Makefile
index 2a7b5a963acf..3b3f32b426d1 100644
--- a/drivers/media/usb/dvb-usb/Makefile
+++ b/drivers/media/usb/dvb-usb/Makefile
@@ -8,7 +8,7 @@ obj-$(CONFIG_DVB_USB_VP7045) += dvb-usb-vp7045.o
dvb-usb-vp702x-objs := vp702x.o vp702x-fe.o
obj-$(CONFIG_DVB_USB_VP702X) += dvb-usb-vp702x.o
-dvb-usb-gp8psk-objs := gp8psk.o gp8psk-fe.o
+dvb-usb-gp8psk-objs := gp8psk.o
obj-$(CONFIG_DVB_USB_GP8PSK) += dvb-usb-gp8psk.o
dvb-usb-dtt200u-objs := dtt200u.o dtt200u-fe.o
diff --git a/drivers/media/usb/dvb-usb/af9005.c b/drivers/media/usb/dvb-usb/af9005.c
index efa782ed6e2d..7853261906b1 100644
--- a/drivers/media/usb/dvb-usb/af9005.c
+++ b/drivers/media/usb/dvb-usb/af9005.c
@@ -52,17 +52,15 @@ u8 regmask[8] = { 0x01, 0x03, 0x07, 0x0f, 0x1f, 0x3f, 0x7f, 0xff };
struct af9005_device_state {
u8 sequence;
int led_state;
+ unsigned char data[256];
};
static int af9005_generic_read_write(struct dvb_usb_device *d, u16 reg,
int readwrite, int type, u8 * values, int len)
{
struct af9005_device_state *st = d->priv;
- u8 obuf[16] = { 0 };
- u8 ibuf[17] = { 0 };
- u8 command;
- int i;
- int ret;
+ u8 command, seq;
+ int i, ret;
if (len < 1) {
err("generic read/write, less than 1 byte. Makes no sense.");
@@ -73,16 +71,17 @@ static int af9005_generic_read_write(struct dvb_usb_device *d, u16 reg,
return -EINVAL;
}
- obuf[0] = 14; /* rest of buffer length low */
- obuf[1] = 0; /* rest of buffer length high */
+ mutex_lock(&d->data_mutex);
+ st->data[0] = 14; /* rest of buffer length low */
+ st->data[1] = 0; /* rest of buffer length high */
- obuf[2] = AF9005_REGISTER_RW; /* register operation */
- obuf[3] = 12; /* rest of buffer length */
+ st->data[2] = AF9005_REGISTER_RW; /* register operation */
+ st->data[3] = 12; /* rest of buffer length */
- obuf[4] = st->sequence++; /* sequence number */
+ st->data[4] = seq = st->sequence++; /* sequence number */
- obuf[5] = (u8) (reg >> 8); /* register address */
- obuf[6] = (u8) (reg & 0xff);
+ st->data[5] = (u8) (reg >> 8); /* register address */
+ st->data[6] = (u8) (reg & 0xff);
if (type == AF9005_OFDM_REG) {
command = AF9005_CMD_OFDM_REG;
@@ -96,51 +95,52 @@ static int af9005_generic_read_write(struct dvb_usb_device *d, u16 reg,
command |= readwrite;
if (readwrite == AF9005_CMD_WRITE)
for (i = 0; i < len; i++)
- obuf[8 + i] = values[i];
+ st->data[8 + i] = values[i];
else if (type == AF9005_TUNER_REG)
/* read command for tuner, the first byte contains the i2c address */
- obuf[8] = values[0];
- obuf[7] = command;
+ st->data[8] = values[0];
+ st->data[7] = command;
- ret = dvb_usb_generic_rw(d, obuf, 16, ibuf, 17, 0);
+ ret = dvb_usb_generic_rw(d, st->data, 16, st->data, 17, 0);
if (ret)
- return ret;
+ goto ret;
/* sanity check */
- if (ibuf[2] != AF9005_REGISTER_RW_ACK) {
+ if (st->data[2] != AF9005_REGISTER_RW_ACK) {
err("generic read/write, wrong reply code.");
- return -EIO;
+ ret = -EIO;
+ goto ret;
}
- if (ibuf[3] != 0x0d) {
+ if (st->data[3] != 0x0d) {
err("generic read/write, wrong length in reply.");
- return -EIO;
+ ret = -EIO;
+ goto ret;
}
- if (ibuf[4] != obuf[4]) {
+ if (st->data[4] != seq) {
err("generic read/write, wrong sequence in reply.");
- return -EIO;
+ ret = -EIO;
+ goto ret;
}
/*
- Windows driver doesn't check these fields, in fact sometimes
- the register in the reply is different that what has been sent
-
- if (ibuf[5] != obuf[5] || ibuf[6] != obuf[6]) {
- err("generic read/write, wrong register in reply.");
- return -EIO;
- }
- if (ibuf[7] != command) {
- err("generic read/write wrong command in reply.");
- return -EIO;
- }
+ * In thesis, both input and output buffers should have
+ * identical values for st->data[5] to st->data[8].
+ * However, windows driver doesn't check these fields, in fact
+ * sometimes the register in the reply is different that what
+ * has been sent
*/
- if (ibuf[16] != 0x01) {
+ if (st->data[16] != 0x01) {
err("generic read/write wrong status code in reply.");
- return -EIO;
+ ret = -EIO;
+ goto ret;
}
+
if (readwrite == AF9005_CMD_READ)
for (i = 0; i < len; i++)
- values[i] = ibuf[8 + i];
+ values[i] = st->data[8 + i];
- return 0;
+ret:
+ mutex_unlock(&d->data_mutex);
+ return ret;
}
@@ -464,8 +464,7 @@ int af9005_send_command(struct dvb_usb_device *d, u8 command, u8 * wbuf,
struct af9005_device_state *st = d->priv;
int ret, i, packet_len;
- u8 buf[64];
- u8 ibuf[64];
+ u8 seq;
if (wlen < 0) {
err("send command, wlen less than 0 bytes. Makes no sense.");
@@ -480,94 +479,97 @@ int af9005_send_command(struct dvb_usb_device *d, u8 command, u8 * wbuf,
return -EINVAL;
}
packet_len = wlen + 5;
- buf[0] = (u8) (packet_len & 0xff);
- buf[1] = (u8) ((packet_len & 0xff00) >> 8);
-
- buf[2] = 0x26; /* packet type */
- buf[3] = wlen + 3;
- buf[4] = st->sequence++;
- buf[5] = command;
- buf[6] = wlen;
+
+ mutex_lock(&d->data_mutex);
+
+ st->data[0] = (u8) (packet_len & 0xff);
+ st->data[1] = (u8) ((packet_len & 0xff00) >> 8);
+
+ st->data[2] = 0x26; /* packet type */
+ st->data[3] = wlen + 3;
+ st->data[4] = seq = st->sequence++;
+ st->data[5] = command;
+ st->data[6] = wlen;
for (i = 0; i < wlen; i++)
- buf[7 + i] = wbuf[i];
- ret = dvb_usb_generic_rw(d, buf, wlen + 7, ibuf, rlen + 7, 0);
- if (ret)
- return ret;
- if (ibuf[2] != 0x27) {
+ st->data[7 + i] = wbuf[i];
+ ret = dvb_usb_generic_rw(d, st->data, wlen + 7, st->data, rlen + 7, 0);
+ if (st->data[2] != 0x27) {
err("send command, wrong reply code.");
- return -EIO;
- }
- if (ibuf[4] != buf[4]) {
+ ret = -EIO;
+ } else if (st->data[4] != seq) {
err("send command, wrong sequence in reply.");
- return -EIO;
- }
- if (ibuf[5] != 0x01) {
+ ret = -EIO;
+ } else if (st->data[5] != 0x01) {
err("send command, wrong status code in reply.");
- return -EIO;
- }
- if (ibuf[6] != rlen) {
+ ret = -EIO;
+ } else if (st->data[6] != rlen) {
err("send command, invalid data length in reply.");
- return -EIO;
+ ret = -EIO;
}
- for (i = 0; i < rlen; i++)
- rbuf[i] = ibuf[i + 7];
- return 0;
+ if (!ret) {
+ for (i = 0; i < rlen; i++)
+ rbuf[i] = st->data[i + 7];
+ }
+
+ mutex_unlock(&d->data_mutex);
+ return ret;
}
int af9005_read_eeprom(struct dvb_usb_device *d, u8 address, u8 * values,
int len)
{
struct af9005_device_state *st = d->priv;
- u8 obuf[16], ibuf[14];
+ u8 seq;
int ret, i;
- memset(obuf, 0, sizeof(obuf));
- memset(ibuf, 0, sizeof(ibuf));
+ mutex_lock(&d->data_mutex);
- obuf[0] = 14; /* length of rest of packet low */
- obuf[1] = 0; /* length of rest of packer high */
+ memset(st->data, 0, sizeof(st->data));
- obuf[2] = 0x2a; /* read/write eeprom */
+ st->data[0] = 14; /* length of rest of packet low */
+ st->data[1] = 0; /* length of rest of packer high */
- obuf[3] = 12; /* size */
+ st->data[2] = 0x2a; /* read/write eeprom */
- obuf[4] = st->sequence++;
+ st->data[3] = 12; /* size */
- obuf[5] = 0; /* read */
+ st->data[4] = seq = st->sequence++;
- obuf[6] = len;
- obuf[7] = address;
- ret = dvb_usb_generic_rw(d, obuf, 16, ibuf, 14, 0);
- if (ret)
- return ret;
- if (ibuf[2] != 0x2b) {
+ st->data[5] = 0; /* read */
+
+ st->data[6] = len;
+ st->data[7] = address;
+ ret = dvb_usb_generic_rw(d, st->data, 16, st->data, 14, 0);
+ if (st->data[2] != 0x2b) {
err("Read eeprom, invalid reply code");
- return -EIO;
- }
- if (ibuf[3] != 10) {
+ ret = -EIO;
+ } else if (st->data[3] != 10) {
err("Read eeprom, invalid reply length");
- return -EIO;
- }
- if (ibuf[4] != obuf[4]) {
+ ret = -EIO;
+ } else if (st->data[4] != seq) {
err("Read eeprom, wrong sequence in reply ");
- return -EIO;
- }
- if (ibuf[5] != 1) {
+ ret = -EIO;
+ } else if (st->data[5] != 1) {
err("Read eeprom, wrong status in reply ");
- return -EIO;
+ ret = -EIO;
}
- for (i = 0; i < len; i++) {
- values[i] = ibuf[6 + i];
+
+ if (!ret) {
+ for (i = 0; i < len; i++)
+ values[i] = st->data[6 + i];
}
- return 0;
+ mutex_unlock(&d->data_mutex);
+
+ return ret;
}
-static int af9005_boot_packet(struct usb_device *udev, int type, u8 * reply)
+static int af9005_boot_packet(struct usb_device *udev, int type, u8 *reply,
+ u8 *buf, int size)
{
- u8 buf[FW_BULKOUT_SIZE + 2];
u16 checksum;
int act_len, i, ret;
- memset(buf, 0, sizeof(buf));
+
+ memset(buf, 0, size);
buf[0] = (u8) (FW_BULKOUT_SIZE & 0xff);
buf[1] = (u8) ((FW_BULKOUT_SIZE >> 8) & 0xff);
switch (type) {
@@ -720,15 +722,21 @@ static int af9005_download_firmware(struct usb_device *udev, const struct firmwa
{
int i, packets, ret, act_len;
- u8 buf[FW_BULKOUT_SIZE + 2];
+ u8 *buf;
u8 reply;
- ret = af9005_boot_packet(udev, FW_CONFIG, &reply);
+ buf = kmalloc(FW_BULKOUT_SIZE + 2, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ ret = af9005_boot_packet(udev, FW_CONFIG, &reply, buf,
+ FW_BULKOUT_SIZE + 2);
if (ret)
- return ret;
+ goto err;
if (reply != 0x01) {
err("before downloading firmware, FW_CONFIG expected 0x01, received 0x%x", reply);
- return -EIO;
+ ret = -EIO;
+ goto err;
}
packets = fw->size / FW_BULKOUT_SIZE;
buf[0] = (u8) (FW_BULKOUT_SIZE & 0xff);
@@ -743,28 +751,35 @@ static int af9005_download_firmware(struct usb_device *udev, const struct firmwa
buf, FW_BULKOUT_SIZE + 2, &act_len, 1000);
if (ret) {
err("firmware download failed at packet %d with code %d", i, ret);
- return ret;
+ goto err;
}
}
- ret = af9005_boot_packet(udev, FW_CONFIRM, &reply);
+ ret = af9005_boot_packet(udev, FW_CONFIRM, &reply,
+ buf, FW_BULKOUT_SIZE + 2);
if (ret)
- return ret;
+ goto err;
if (reply != (u8) (packets & 0xff)) {
err("after downloading firmware, FW_CONFIRM expected 0x%x, received 0x%x", packets & 0xff, reply);
- return -EIO;
+ ret = -EIO;
+ goto err;
}
- ret = af9005_boot_packet(udev, FW_BOOT, &reply);
+ ret = af9005_boot_packet(udev, FW_BOOT, &reply, buf,
+ FW_BULKOUT_SIZE + 2);
if (ret)
- return ret;
- ret = af9005_boot_packet(udev, FW_CONFIG, &reply);
+ goto err;
+ ret = af9005_boot_packet(udev, FW_CONFIG, &reply, buf,
+ FW_BULKOUT_SIZE + 2);
if (ret)
- return ret;
+ goto err;
if (reply != 0x02) {
err("after downloading firmware, FW_CONFIG expected 0x02, received 0x%x", reply);
- return -EIO;
+ ret = -EIO;
+ goto err;
}
- return 0;
+err:
+ kfree(buf);
+ return ret;
}
@@ -823,53 +838,59 @@ static int af9005_rc_query(struct dvb_usb_device *d, u32 * event, int *state)
{
struct af9005_device_state *st = d->priv;
int ret, len;
-
- u8 obuf[5];
- u8 ibuf[256];
+ u8 seq;
*state = REMOTE_NO_KEY_PRESSED;
if (rc_decode == NULL) {
/* it shouldn't never come here */
return 0;
}
+
+ mutex_lock(&d->data_mutex);
+
/* deb_info("rc_query\n"); */
- obuf[0] = 3; /* rest of packet length low */
- obuf[1] = 0; /* rest of packet lentgh high */
- obuf[2] = 0x40; /* read remote */
- obuf[3] = 1; /* rest of packet length */
- obuf[4] = st->sequence++; /* sequence number */
- ret = dvb_usb_generic_rw(d, obuf, 5, ibuf, 256, 0);
+ st->data[0] = 3; /* rest of packet length low */
+ st->data[1] = 0; /* rest of packet lentgh high */
+ st->data[2] = 0x40; /* read remote */
+ st->data[3] = 1; /* rest of packet length */
+ st->data[4] = seq = st->sequence++; /* sequence number */
+ ret = dvb_usb_generic_rw(d, st->data, 5, st->data, 256, 0);
if (ret) {
err("rc query failed");
- return ret;
+ goto ret;
}
- if (ibuf[2] != 0x41) {
+ if (st->data[2] != 0x41) {
err("rc query bad header.");
- return -EIO;
- }
- if (ibuf[4] != obuf[4]) {
+ ret = -EIO;
+ goto ret;
+ } else if (st->data[4] != seq) {
err("rc query bad sequence.");
- return -EIO;
+ ret = -EIO;
+ goto ret;
}
- len = ibuf[5];
+ len = st->data[5];
if (len > 246) {
err("rc query invalid length");
- return -EIO;
+ ret = -EIO;
+ goto ret;
}
if (len > 0) {
deb_rc("rc data (%d) ", len);
- debug_dump((ibuf + 6), len, deb_rc);
- ret = rc_decode(d, &ibuf[6], len, event, state);
+ debug_dump((st->data + 6), len, deb_rc);
+ ret = rc_decode(d, &st->data[6], len, event, state);
if (ret) {
err("rc_decode failed");
- return ret;
+ goto ret;
} else {
deb_rc("rc_decode state %x event %x\n", *state, *event);
if (*state == REMOTE_KEY_REPEAT)
*event = d->last_event;
}
}
- return 0;
+
+ret:
+ mutex_unlock(&d->data_mutex);
+ return ret;
}
static int af9005_power_ctrl(struct dvb_usb_device *d, int onoff)
@@ -953,10 +974,16 @@ static int af9005_identify_state(struct usb_device *udev,
int *cold)
{
int ret;
- u8 reply;
- ret = af9005_boot_packet(udev, FW_CONFIG, &reply);
+ u8 reply, *buf;
+
+ buf = kmalloc(FW_BULKOUT_SIZE + 2, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ ret = af9005_boot_packet(udev, FW_CONFIG, &reply,
+ buf, FW_BULKOUT_SIZE + 2);
if (ret)
- return ret;
+ goto err;
deb_info("result of FW_CONFIG in identify state %d\n", reply);
if (reply == 0x01)
*cold = 1;
@@ -965,7 +992,10 @@ static int af9005_identify_state(struct usb_device *udev,
else
return -EIO;
deb_info("Identify state cold = %d\n", *cold);
- return 0;
+
+err:
+ kfree(buf);
+ return ret;
}
static struct dvb_usb_device_properties af9005_properties;
@@ -974,7 +1004,7 @@ static int af9005_usb_probe(struct usb_interface *intf,
const struct usb_device_id *id)
{
return dvb_usb_device_init(intf, &af9005_properties,
- THIS_MODULE, NULL, adapter_nr);
+ THIS_MODULE, NULL, adapter_nr);
}
enum af9005_usb_table_entry {
diff --git a/drivers/media/usb/dvb-usb/cinergyT2-core.c b/drivers/media/usb/dvb-usb/cinergyT2-core.c
index 9fd1527494eb..290275bc7fde 100644
--- a/drivers/media/usb/dvb-usb/cinergyT2-core.c
+++ b/drivers/media/usb/dvb-usb/cinergyT2-core.c
@@ -41,6 +41,7 @@ DVB_DEFINE_MOD_OPT_ADAPTER_NR(adapter_nr);
struct cinergyt2_state {
u8 rc_counter;
+ unsigned char data[64];
};
/* We are missing a release hook with usb_device data */
@@ -50,38 +51,57 @@ static struct dvb_usb_device_properties cinergyt2_properties;
static int cinergyt2_streaming_ctrl(struct dvb_usb_adapter *adap, int enable)
{
- char buf[] = { CINERGYT2_EP1_CONTROL_STREAM_TRANSFER, enable ? 1 : 0 };
- char result[64];
- return dvb_usb_generic_rw(adap->dev, buf, sizeof(buf), result,
- sizeof(result), 0);
+ struct dvb_usb_device *d = adap->dev;
+ struct cinergyt2_state *st = d->priv;
+ int ret;
+
+ mutex_lock(&d->data_mutex);
+ st->data[0] = CINERGYT2_EP1_CONTROL_STREAM_TRANSFER;
+ st->data[1] = enable ? 1 : 0;
+
+ ret = dvb_usb_generic_rw(d, st->data, 2, st->data, 64, 0);
+ mutex_unlock(&d->data_mutex);
+
+ return ret;
}
static int cinergyt2_power_ctrl(struct dvb_usb_device *d, int enable)
{
- char buf[] = { CINERGYT2_EP1_SLEEP_MODE, enable ? 0 : 1 };
- char state[3];
- return dvb_usb_generic_rw(d, buf, sizeof(buf), state, sizeof(state), 0);
+ struct cinergyt2_state *st = d->priv;
+ int ret;
+
+ mutex_lock(&d->data_mutex);
+ st->data[0] = CINERGYT2_EP1_SLEEP_MODE;
+ st->data[1] = enable ? 0 : 1;
+
+ ret = dvb_usb_generic_rw(d, st->data, 2, st->data, 3, 0);
+ mutex_unlock(&d->data_mutex);
+
+ return ret;
}
static int cinergyt2_frontend_attach(struct dvb_usb_adapter *adap)
{
- char query[] = { CINERGYT2_EP1_GET_FIRMWARE_VERSION };
- char state[3];
+ struct dvb_usb_device *d = adap->dev;
+ struct cinergyt2_state *st = d->priv;
int ret;
adap->fe_adap[0].fe = cinergyt2_fe_attach(adap->dev);
- ret = dvb_usb_generic_rw(adap->dev, query, sizeof(query), state,
- sizeof(state), 0);
+ mutex_lock(&d->data_mutex);
+ st->data[0] = CINERGYT2_EP1_GET_FIRMWARE_VERSION;
+
+ ret = dvb_usb_generic_rw(d, st->data, 1, st->data, 3, 0);
if (ret < 0) {
deb_rc("cinergyt2_power_ctrl() Failed to retrieve sleep "
"state info\n");
}
+ mutex_unlock(&d->data_mutex);
/* Copy this pointer as we are gonna need it in the release phase */
cinergyt2_usb_device = adap->dev;
- return 0;
+ return ret;
}
static struct rc_map_table rc_map_cinergyt2_table[] = {
@@ -141,13 +161,18 @@ static int repeatable_keys[] = {
static int cinergyt2_rc_query(struct dvb_usb_device *d, u32 *event, int *state)
{
struct cinergyt2_state *st = d->priv;
- u8 key[5] = {0, 0, 0, 0, 0}, cmd = CINERGYT2_EP1_GET_RC_EVENTS;
- int i;
+ int i, ret;
*state = REMOTE_NO_KEY_PRESSED;
- dvb_usb_generic_rw(d, &cmd, 1, key, sizeof(key), 0);
- if (key[4] == 0xff) {
+ mutex_lock(&d->data_mutex);
+ st->data[0] = CINERGYT2_EP1_GET_RC_EVENTS;
+
+ ret = dvb_usb_generic_rw(d, st->data, 1, st->data, 5, 0);
+ if (ret < 0)
+ goto ret;
+
+ if (st->data[4] == 0xff) {
/* key repeat */
st->rc_counter++;
if (st->rc_counter > RC_REPEAT_DELAY) {
@@ -157,34 +182,36 @@ static int cinergyt2_rc_query(struct dvb_usb_device *d, u32 *event, int *state)
*event = d->last_event;
deb_rc("repeat key, event %x\n",
*event);
- return 0;
+ goto ret;
}
}
deb_rc("repeated key (non repeatable)\n");
}
- return 0;
+ goto ret;
}
/* hack to pass checksum on the custom field */
- key[2] = ~key[1];
- dvb_usb_nec_rc_key_to_event(d, key, event, state);
- if (key[0] != 0) {
+ st->data[2] = ~st->data[1];
+ dvb_usb_nec_rc_key_to_event(d, st->data, event, state);
+ if (st->data[0] != 0) {
if (*event != d->last_event)
st->rc_counter = 0;
- deb_rc("key: %*ph\n", 5, key);
+ deb_rc("key: %*ph\n", 5, st->data);
}
- return 0;
+
+ret:
+ mutex_unlock(&d->data_mutex);
+ return ret;
}
static int cinergyt2_usb_probe(struct usb_interface *intf,
const struct usb_device_id *id)
{
return dvb_usb_device_init(intf, &cinergyt2_properties,
- THIS_MODULE, NULL, adapter_nr);
+ THIS_MODULE, NULL, adapter_nr);
}
-
static struct usb_device_id cinergyt2_usb_table[] = {
{ USB_DEVICE(USB_VID_TERRATEC, 0x0038) },
{ 0 }
diff --git a/drivers/media/usb/dvb-usb/cinergyT2-fe.c b/drivers/media/usb/dvb-usb/cinergyT2-fe.c
index b3ec743a7a2e..2d29b4174dba 100644
--- a/drivers/media/usb/dvb-usb/cinergyT2-fe.c
+++ b/drivers/media/usb/dvb-usb/cinergyT2-fe.c
@@ -139,32 +139,42 @@ static uint16_t compute_tps(struct dtv_frontend_properties *op)
struct cinergyt2_fe_state {
struct dvb_frontend fe;
struct dvb_usb_device *d;
+
+ unsigned char data[64];
+ struct mutex data_mutex;
+
+ struct dvbt_get_status_msg status;
};
static int cinergyt2_fe_read_status(struct dvb_frontend *fe,
enum fe_status *status)
{
struct cinergyt2_fe_state *state = fe->demodulator_priv;
- struct dvbt_get_status_msg result;
- u8 cmd[] = { CINERGYT2_EP1_GET_TUNER_STATUS };
int ret;
- ret = dvb_usb_generic_rw(state->d, cmd, sizeof(cmd), (u8 *)&result,
- sizeof(result), 0);
+ mutex_lock(&state->data_mutex);
+ state->data[0] = CINERGYT2_EP1_GET_TUNER_STATUS;
+
+ ret = dvb_usb_generic_rw(state->d, state->data, 1,
+ state->data, sizeof(state->status), 0);
+ if (!ret)
+ memcpy(&state->status, state->data, sizeof(state->status));
+ mutex_unlock(&state->data_mutex);
+
if (ret < 0)
return ret;
*status = 0;
- if (0xffff - le16_to_cpu(result.gain) > 30)
+ if (0xffff - le16_to_cpu(state->status.gain) > 30)
*status |= FE_HAS_SIGNAL;
- if (result.lock_bits & (1 << 6))
+ if (state->status.lock_bits & (1 << 6))
*status |= FE_HAS_LOCK;
- if (result.lock_bits & (1 << 5))
+ if (state->status.lock_bits & (1 << 5))
*status |= FE_HAS_SYNC;
- if (result.lock_bits & (1 << 4))
+ if (state->status.lock_bits & (1 << 4))
*status |= FE_HAS_CARRIER;
- if (result.lock_bits & (1 << 1))
+ if (state->status.lock_bits & (1 << 1))
*status |= FE_HAS_VITERBI;
if ((*status & (FE_HAS_CARRIER | FE_HAS_VITERBI | FE_HAS_SYNC)) !=
@@ -177,34 +187,16 @@ static int cinergyt2_fe_read_status(struct dvb_frontend *fe,
static int cinergyt2_fe_read_ber(struct dvb_frontend *fe, u32 *ber)
{
struct cinergyt2_fe_state *state = fe->demodulator_priv;
- struct dvbt_get_status_msg status;
- char cmd[] = { CINERGYT2_EP1_GET_TUNER_STATUS };
- int ret;
-
- ret = dvb_usb_generic_rw(state->d, cmd, sizeof(cmd), (char *)&status,
- sizeof(status), 0);
- if (ret < 0)
- return ret;
- *ber = le32_to_cpu(status.viterbi_error_rate);
+ *ber = le32_to_cpu(state->status.viterbi_error_rate);
return 0;
}
static int cinergyt2_fe_read_unc_blocks(struct dvb_frontend *fe, u32 *unc)
{
struct cinergyt2_fe_state *state = fe->demodulator_priv;
- struct dvbt_get_status_msg status;
- u8 cmd[] = { CINERGYT2_EP1_GET_TUNER_STATUS };
- int ret;
- ret = dvb_usb_generic_rw(state->d, cmd, sizeof(cmd), (u8 *)&status,
- sizeof(status), 0);
- if (ret < 0) {
- err("cinergyt2_fe_read_unc_blocks() Failed! (Error=%d)\n",
- ret);
- return ret;
- }
- *unc = le32_to_cpu(status.uncorrected_block_count);
+ *unc = le32_to_cpu(state->status.uncorrected_block_count);
return 0;
}
@@ -212,35 +204,16 @@ static int cinergyt2_fe_read_signal_strength(struct dvb_frontend *fe,
u16 *strength)
{
struct cinergyt2_fe_state *state = fe->demodulator_priv;
- struct dvbt_get_status_msg status;
- char cmd[] = { CINERGYT2_EP1_GET_TUNER_STATUS };
- int ret;
- ret = dvb_usb_generic_rw(state->d, cmd, sizeof(cmd), (char *)&status,
- sizeof(status), 0);
- if (ret < 0) {
- err("cinergyt2_fe_read_signal_strength() Failed!"
- " (Error=%d)\n", ret);
- return ret;
- }
- *strength = (0xffff - le16_to_cpu(status.gain));
+ *strength = (0xffff - le16_to_cpu(state->status.gain));
return 0;
}
static int cinergyt2_fe_read_snr(struct dvb_frontend *fe, u16 *snr)
{
struct cinergyt2_fe_state *state = fe->demodulator_priv;
- struct dvbt_get_status_msg status;
- char cmd[] = { CINERGYT2_EP1_GET_TUNER_STATUS };
- int ret;
- ret = dvb_usb_generic_rw(state->d, cmd, sizeof(cmd), (char *)&status,
- sizeof(status), 0);
- if (ret < 0) {
- err("cinergyt2_fe_read_snr() Failed! (Error=%d)\n", ret);
- return ret;
- }
- *snr = (status.snr << 8) | status.snr;
+ *snr = (state->status.snr << 8) | state->status.snr;
return 0;
}
@@ -266,34 +239,36 @@ static int cinergyt2_fe_set_frontend(struct dvb_frontend *fe)
{
struct dtv_frontend_properties *fep = &fe->dtv_property_cache;
struct cinergyt2_fe_state *state = fe->demodulator_priv;
- struct dvbt_set_parameters_msg param;
- char result[2];
+ struct dvbt_set_parameters_msg *param;
int err;
- param.cmd = CINERGYT2_EP1_SET_TUNER_PARAMETERS;
- param.tps = cpu_to_le16(compute_tps(fep));
- param.freq = cpu_to_le32(fep->frequency / 1000);
- param.flags = 0;
+ mutex_lock(&state->data_mutex);
+
+ param = (void *)state->data;
+ param->cmd = CINERGYT2_EP1_SET_TUNER_PARAMETERS;
+ param->tps = cpu_to_le16(compute_tps(fep));
+ param->freq = cpu_to_le32(fep->frequency / 1000);
+ param->flags = 0;
switch (fep->bandwidth_hz) {
default:
case 8000000:
- param.bandwidth = 8;
+ param->bandwidth = 8;
break;
case 7000000:
- param.bandwidth = 7;
+ param->bandwidth = 7;
break;
case 6000000:
- param.bandwidth = 6;
+ param->bandwidth = 6;
break;
}
- err = dvb_usb_generic_rw(state->d,
- (char *)&param, sizeof(param),
- result, sizeof(result), 0);
+ err = dvb_usb_generic_rw(state->d, state->data, sizeof(*param),
+ state->data, 2, 0);
if (err < 0)
err("cinergyt2_fe_set_frontend() Failed! err=%d\n", err);
+ mutex_unlock(&state->data_mutex);
return (err < 0) ? err : 0;
}
@@ -315,6 +290,7 @@ struct dvb_frontend *cinergyt2_fe_attach(struct dvb_usb_device *d)
s->d = d;
memcpy(&s->fe.ops, &cinergyt2_fe_ops, sizeof(struct dvb_frontend_ops));
s->fe.demodulator_priv = s;
+ mutex_init(&s->data_mutex);
return &s->fe;
}
diff --git a/drivers/media/usb/dvb-usb/cxusb.c b/drivers/media/usb/dvb-usb/cxusb.c
index 907ac01ae297..243403081fa5 100644
--- a/drivers/media/usb/dvb-usb/cxusb.c
+++ b/drivers/media/usb/dvb-usb/cxusb.c
@@ -45,9 +45,6 @@
#include "si2168.h"
#include "si2157.h"
-/* Max transfer size done by I2C transfer functions */
-#define MAX_XFER_SIZE 80
-
/* debug */
static int dvb_usb_cxusb_debug;
module_param_named(debug, dvb_usb_cxusb_debug, int, 0644);
@@ -61,23 +58,27 @@ DVB_DEFINE_MOD_OPT_ADAPTER_NR(adapter_nr);
static int cxusb_ctrl_msg(struct dvb_usb_device *d,
u8 cmd, u8 *wbuf, int wlen, u8 *rbuf, int rlen)
{
- int wo = (rbuf == NULL || rlen == 0); /* write-only */
- u8 sndbuf[MAX_XFER_SIZE];
+ struct cxusb_state *st = d->priv;
+ int ret, wo;
- if (1 + wlen > sizeof(sndbuf)) {
- warn("i2c wr: len=%d is too big!\n",
- wlen);
+ if (1 + wlen > MAX_XFER_SIZE) {
+ warn("i2c wr: len=%d is too big!\n", wlen);
return -EOPNOTSUPP;
}
- memset(sndbuf, 0, 1+wlen);
+ wo = (rbuf == NULL || rlen == 0); /* write-only */
- sndbuf[0] = cmd;
- memcpy(&sndbuf[1], wbuf, wlen);
+ mutex_lock(&d->data_mutex);
+ st->data[0] = cmd;
+ memcpy(&st->data[1], wbuf, wlen);
if (wo)
- return dvb_usb_generic_write(d, sndbuf, 1+wlen);
+ ret = dvb_usb_generic_write(d, st->data, 1 + wlen);
else
- return dvb_usb_generic_rw(d, sndbuf, 1+wlen, rbuf, rlen, 0);
+ ret = dvb_usb_generic_rw(d, st->data, 1 + wlen,
+ rbuf, rlen, 0);
+
+ mutex_unlock(&d->data_mutex);
+ return ret;
}
/* GPIO */
diff --git a/drivers/media/usb/dvb-usb/cxusb.h b/drivers/media/usb/dvb-usb/cxusb.h
index 527ff7905e15..18acda19527a 100644
--- a/drivers/media/usb/dvb-usb/cxusb.h
+++ b/drivers/media/usb/dvb-usb/cxusb.h
@@ -28,10 +28,15 @@
#define CMD_ANALOG 0x50
#define CMD_DIGITAL 0x51
+/* Max transfer size done by I2C transfer functions */
+#define MAX_XFER_SIZE 80
+
struct cxusb_state {
u8 gpio_write_state[3];
struct i2c_client *i2c_client_demod;
struct i2c_client *i2c_client_tuner;
+
+ unsigned char data[MAX_XFER_SIZE];
};
#endif
diff --git a/drivers/media/usb/dvb-usb/dib0700_core.c b/drivers/media/usb/dvb-usb/dib0700_core.c
index f3196658fb70..47ce9d5de4c6 100644
--- a/drivers/media/usb/dvb-usb/dib0700_core.c
+++ b/drivers/media/usb/dvb-usb/dib0700_core.c
@@ -213,7 +213,7 @@ static int dib0700_i2c_xfer_new(struct i2c_adapter *adap, struct i2c_msg *msg,
usb_rcvctrlpipe(d->udev, 0),
REQUEST_NEW_I2C_READ,
USB_TYPE_VENDOR | USB_DIR_IN,
- value, index, msg[i].buf,
+ value, index, st->buf,
msg[i].len,
USB_CTRL_GET_TIMEOUT);
if (result < 0) {
@@ -221,6 +221,14 @@ static int dib0700_i2c_xfer_new(struct i2c_adapter *adap, struct i2c_msg *msg,
break;
}
+ if (msg[i].len > sizeof(st->buf)) {
+ deb_info("buffer too small to fit %d bytes\n",
+ msg[i].len);
+ return -EIO;
+ }
+
+ memcpy(msg[i].buf, st->buf, msg[i].len);
+
deb_data("<<< ");
debug_dump(msg[i].buf, msg[i].len, deb_data);
@@ -238,6 +246,13 @@ static int dib0700_i2c_xfer_new(struct i2c_adapter *adap, struct i2c_msg *msg,
/* I2C ctrl + FE bus; */
st->buf[3] = ((gen_mode << 6) & 0xC0) |
((bus_mode << 4) & 0x30);
+
+ if (msg[i].len > sizeof(st->buf) - 4) {
+ deb_info("i2c message to big: %d\n",
+ msg[i].len);
+ return -EIO;
+ }
+
/* The Actual i2c payload */
memcpy(&st->buf[4], msg[i].buf, msg[i].len);
@@ -283,6 +298,11 @@ static int dib0700_i2c_xfer_legacy(struct i2c_adapter *adap,
/* fill in the address */
st->buf[1] = msg[i].addr << 1;
/* fill the buffer */
+ if (msg[i].len > sizeof(st->buf) - 2) {
+ deb_info("i2c xfer to big: %d\n",
+ msg[i].len);
+ return -EIO;
+ }
memcpy(&st->buf[2], msg[i].buf, msg[i].len);
/* write/read request */
@@ -292,13 +312,20 @@ static int dib0700_i2c_xfer_legacy(struct i2c_adapter *adap,
/* special thing in the current firmware: when length is zero the read-failed */
len = dib0700_ctrl_rd(d, st->buf, msg[i].len + 2,
- msg[i+1].buf, msg[i+1].len);
+ st->buf, msg[i + 1].len);
if (len <= 0) {
deb_info("I2C read failed on address 0x%02x\n",
msg[i].addr);
break;
}
+ if (msg[i + 1].len > sizeof(st->buf)) {
+ deb_info("i2c xfer buffer to small for %d\n",
+ msg[i].len);
+ return -EIO;
+ }
+ memcpy(msg[i + 1].buf, st->buf, msg[i + 1].len);
+
msg[i+1].len = len;
i++;
@@ -677,7 +704,7 @@ static void dib0700_rc_urb_completion(struct urb *purb)
struct dvb_usb_device *d = purb->context;
struct dib0700_rc_response *poll_reply;
enum rc_type protocol;
- u32 uninitialized_var(keycode);
+ u32 keycode;
u8 toggle;
deb_info("%s()\n", __func__);
@@ -718,7 +745,8 @@ static void dib0700_rc_urb_completion(struct urb *purb)
poll_reply->nec.data == 0x00 &&
poll_reply->nec.not_data == 0xff) {
poll_reply->data_state = 2;
- break;
+ rc_repeat(d->rc_dev);
+ goto resubmit;
}
if ((poll_reply->nec.data ^ poll_reply->nec.not_data) != 0xff) {
diff --git a/drivers/media/usb/dvb-usb/dib0700_devices.c b/drivers/media/usb/dvb-usb/dib0700_devices.c
index 0857b56e652c..ef1b8ee75c57 100644
--- a/drivers/media/usb/dvb-usb/dib0700_devices.c
+++ b/drivers/media/usb/dvb-usb/dib0700_devices.c
@@ -508,8 +508,6 @@ static int stk7700ph_tuner_attach(struct dvb_usb_adapter *adap)
#define DEFAULT_RC_INTERVAL 50
-static u8 rc_request[] = { REQUEST_POLL_RC, 0 };
-
/*
* This function is used only when firmware is < 1.20 version. Newer
* firmwares use bulk mode, with functions implemented at dib0700_core,
@@ -517,7 +515,6 @@ static u8 rc_request[] = { REQUEST_POLL_RC, 0 };
*/
static int dib0700_rc_query_old_firmware(struct dvb_usb_device *d)
{
- u8 key[4];
enum rc_type protocol;
u32 scancode;
u8 toggle;
@@ -532,39 +529,43 @@ static int dib0700_rc_query_old_firmware(struct dvb_usb_device *d)
return 0;
}
- i = dib0700_ctrl_rd(d, rc_request, 2, key, 4);
+ st->buf[0] = REQUEST_POLL_RC;
+ st->buf[1] = 0;
+
+ i = dib0700_ctrl_rd(d, st->buf, 2, st->buf, 4);
if (i <= 0) {
err("RC Query Failed");
- return -1;
+ return -EIO;
}
/* losing half of KEY_0 events from Philipps rc5 remotes.. */
- if (key[0] == 0 && key[1] == 0 && key[2] == 0 && key[3] == 0)
+ if (st->buf[0] == 0 && st->buf[1] == 0
+ && st->buf[2] == 0 && st->buf[3] == 0)
return 0;
- /* info("%d: %2X %2X %2X %2X",dvb_usb_dib0700_ir_proto,(int)key[3-2],(int)key[3-3],(int)key[3-1],(int)key[3]); */
+ /* info("%d: %2X %2X %2X %2X",dvb_usb_dib0700_ir_proto,(int)st->buf[3 - 2],(int)st->buf[3 - 3],(int)st->buf[3 - 1],(int)st->buf[3]); */
dib0700_rc_setup(d, NULL); /* reset ir sensor data to prevent false events */
switch (d->props.rc.core.protocol) {
case RC_BIT_NEC:
/* NEC protocol sends repeat code as 0 0 0 FF */
- if ((key[3-2] == 0x00) && (key[3-3] == 0x00) &&
- (key[3] == 0xff)) {
+ if ((st->buf[3 - 2] == 0x00) && (st->buf[3 - 3] == 0x00) &&
+ (st->buf[3] == 0xff)) {
rc_repeat(d->rc_dev);
return 0;
}
protocol = RC_TYPE_NEC;
- scancode = RC_SCANCODE_NEC(key[3-2], key[3-3]);
+ scancode = RC_SCANCODE_NEC(st->buf[3 - 2], st->buf[3 - 3]);
toggle = 0;
break;
default:
/* RC-5 protocol changes toggle bit on new keypress */
protocol = RC_TYPE_RC5;
- scancode = RC_SCANCODE_RC5(key[3-2], key[3-3]);
- toggle = key[3-1];
+ scancode = RC_SCANCODE_RC5(st->buf[3 - 2], st->buf[3 - 3]);
+ toggle = st->buf[3 - 1];
break;
}
diff --git a/drivers/media/usb/dvb-usb/dibusb-common.c b/drivers/media/usb/dvb-usb/dibusb-common.c
index 18ed3bfbb5e2..de3ee2547479 100644
--- a/drivers/media/usb/dvb-usb/dibusb-common.c
+++ b/drivers/media/usb/dvb-usb/dibusb-common.c
@@ -62,72 +62,117 @@ EXPORT_SYMBOL(dibusb_pid_filter_ctrl);
int dibusb_power_ctrl(struct dvb_usb_device *d, int onoff)
{
- u8 b[3];
+ u8 *b;
int ret;
+
+ b = kmalloc(3, GFP_KERNEL);
+ if (!b)
+ return -ENOMEM;
+
b[0] = DIBUSB_REQ_SET_IOCTL;
b[1] = DIBUSB_IOCTL_CMD_POWER_MODE;
b[2] = onoff ? DIBUSB_IOCTL_POWER_WAKEUP : DIBUSB_IOCTL_POWER_SLEEP;
- ret = dvb_usb_generic_write(d,b,3);
+
+ ret = dvb_usb_generic_write(d, b, 3);
+
+ kfree(b);
+
msleep(10);
+
return ret;
}
EXPORT_SYMBOL(dibusb_power_ctrl);
int dibusb2_0_streaming_ctrl(struct dvb_usb_adapter *adap, int onoff)
{
- u8 b[3] = { 0 };
int ret;
+ u8 *b;
+
+ b = kmalloc(3, GFP_KERNEL);
+ if (!b)
+ return -ENOMEM;
if ((ret = dibusb_streaming_ctrl(adap,onoff)) < 0)
- return ret;
+ goto ret;
if (onoff) {
b[0] = DIBUSB_REQ_SET_STREAMING_MODE;
b[1] = 0x00;
- if ((ret = dvb_usb_generic_write(adap->dev,b,2)) < 0)
- return ret;
+ ret = dvb_usb_generic_write(adap->dev, b, 2);
+ if (ret < 0)
+ goto ret;
}
b[0] = DIBUSB_REQ_SET_IOCTL;
b[1] = onoff ? DIBUSB_IOCTL_CMD_ENABLE_STREAM : DIBUSB_IOCTL_CMD_DISABLE_STREAM;
- return dvb_usb_generic_write(adap->dev,b,3);
+ ret = dvb_usb_generic_write(adap->dev, b, 3);
+
+ret:
+ kfree(b);
+ return ret;
}
EXPORT_SYMBOL(dibusb2_0_streaming_ctrl);
int dibusb2_0_power_ctrl(struct dvb_usb_device *d, int onoff)
{
- if (onoff) {
- u8 b[3] = { DIBUSB_REQ_SET_IOCTL, DIBUSB_IOCTL_CMD_POWER_MODE, DIBUSB_IOCTL_POWER_WAKEUP };
- return dvb_usb_generic_write(d,b,3);
- } else
+ u8 *b;
+ int ret;
+
+ if (!onoff)
return 0;
+
+ b = kmalloc(3, GFP_KERNEL);
+ if (!b)
+ return -ENOMEM;
+
+ b[0] = DIBUSB_REQ_SET_IOCTL;
+ b[1] = DIBUSB_IOCTL_CMD_POWER_MODE;
+ b[2] = DIBUSB_IOCTL_POWER_WAKEUP;
+
+ ret = dvb_usb_generic_write(d, b, 3);
+
+ kfree(b);
+
+ return ret;
}
EXPORT_SYMBOL(dibusb2_0_power_ctrl);
static int dibusb_i2c_msg(struct dvb_usb_device *d, u8 addr,
u8 *wbuf, u16 wlen, u8 *rbuf, u16 rlen)
{
- u8 sndbuf[MAX_XFER_SIZE]; /* lead(1) devaddr,direction(1) addr(2) data(wlen) (len(2) (when reading)) */
+ u8 *sndbuf;
+ int ret, wo, len;
+
/* write only ? */
- int wo = (rbuf == NULL || rlen == 0),
- len = 2 + wlen + (wo ? 0 : 2);
+ wo = (rbuf == NULL || rlen == 0);
+
+ len = 2 + wlen + (wo ? 0 : 2);
+
+ sndbuf = kmalloc(MAX_XFER_SIZE, GFP_KERNEL);
+ if (!sndbuf)
+ return -ENOMEM;
- if (4 + wlen > sizeof(sndbuf)) {
+ if (4 + wlen > MAX_XFER_SIZE) {
warn("i2c wr: len=%d is too big!\n", wlen);
- return -EOPNOTSUPP;
+ ret = -EOPNOTSUPP;
+ goto ret;
}
sndbuf[0] = wo ? DIBUSB_REQ_I2C_WRITE : DIBUSB_REQ_I2C_READ;
sndbuf[1] = (addr << 1) | (wo ? 0 : 1);
- memcpy(&sndbuf[2],wbuf,wlen);
+ memcpy(&sndbuf[2], wbuf, wlen);
if (!wo) {
- sndbuf[wlen+2] = (rlen >> 8) & 0xff;
- sndbuf[wlen+3] = rlen & 0xff;
+ sndbuf[wlen + 2] = (rlen >> 8) & 0xff;
+ sndbuf[wlen + 3] = rlen & 0xff;
}
- return dvb_usb_generic_rw(d,sndbuf,len,rbuf,rlen,0);
+ ret = dvb_usb_generic_rw(d, sndbuf, len, rbuf, rlen, 0);
+
+ret:
+ kfree(sndbuf);
+ return ret;
}
/*
@@ -319,11 +364,27 @@ EXPORT_SYMBOL(rc_map_dibusb_table);
int dibusb_rc_query(struct dvb_usb_device *d, u32 *event, int *state)
{
- u8 key[5],cmd = DIBUSB_REQ_POLL_REMOTE;
- dvb_usb_generic_rw(d,&cmd,1,key,5,0);
- dvb_usb_nec_rc_key_to_event(d,key,event,state);
- if (key[0] != 0)
- deb_info("key: %*ph\n", 5, key);
- return 0;
+ u8 *buf;
+ int ret;
+
+ buf = kmalloc(5, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ buf[0] = DIBUSB_REQ_POLL_REMOTE;
+
+ ret = dvb_usb_generic_rw(d, buf, 1, buf, 5, 0);
+ if (ret < 0)
+ goto ret;
+
+ dvb_usb_nec_rc_key_to_event(d, buf, event, state);
+
+ if (buf[0] != 0)
+ deb_info("key: %*ph\n", 5, buf);
+
+ kfree(buf);
+
+ret:
+ return ret;
}
EXPORT_SYMBOL(dibusb_rc_query);
diff --git a/drivers/media/usb/dvb-usb/dibusb.h b/drivers/media/usb/dvb-usb/dibusb.h
index 3f82163d8ab8..697be2a17ade 100644
--- a/drivers/media/usb/dvb-usb/dibusb.h
+++ b/drivers/media/usb/dvb-usb/dibusb.h
@@ -96,6 +96,9 @@
#define DIBUSB_IOCTL_CMD_ENABLE_STREAM 0x01
#define DIBUSB_IOCTL_CMD_DISABLE_STREAM 0x02
+/* Max transfer size done by I2C transfer functions */
+#define MAX_XFER_SIZE 64
+
struct dibusb_state {
struct dib_fe_xfer_ops ops;
int mt2060_present;
diff --git a/drivers/media/usb/dvb-usb/digitv.c b/drivers/media/usb/dvb-usb/digitv.c
index 63134335c994..4284f6984dc1 100644
--- a/drivers/media/usb/dvb-usb/digitv.c
+++ b/drivers/media/usb/dvb-usb/digitv.c
@@ -28,22 +28,26 @@ DVB_DEFINE_MOD_OPT_ADAPTER_NR(adapter_nr);
static int digitv_ctrl_msg(struct dvb_usb_device *d,
u8 cmd, u8 vv, u8 *wbuf, int wlen, u8 *rbuf, int rlen)
{
- int wo = (rbuf == NULL || rlen == 0); /* write-only */
- u8 sndbuf[7],rcvbuf[7];
- memset(sndbuf,0,7); memset(rcvbuf,0,7);
+ struct digitv_state *st = d->priv;
+ int ret, wo;
- sndbuf[0] = cmd;
- sndbuf[1] = vv;
- sndbuf[2] = wo ? wlen : rlen;
+ wo = (rbuf == NULL || rlen == 0); /* write-only */
+
+ memset(st->sndbuf, 0, 7);
+ memset(st->rcvbuf, 0, 7);
+
+ st->sndbuf[0] = cmd;
+ st->sndbuf[1] = vv;
+ st->sndbuf[2] = wo ? wlen : rlen;
if (wo) {
- memcpy(&sndbuf[3],wbuf,wlen);
- dvb_usb_generic_write(d,sndbuf,7);
+ memcpy(&st->sndbuf[3], wbuf, wlen);
+ ret = dvb_usb_generic_write(d, st->sndbuf, 7);
} else {
- dvb_usb_generic_rw(d,sndbuf,7,rcvbuf,7,10);
- memcpy(rbuf,&rcvbuf[3],rlen);
+ ret = dvb_usb_generic_rw(d, st->sndbuf, 7, st->rcvbuf, 7, 10);
+ memcpy(rbuf, &st->rcvbuf[3], rlen);
}
- return 0;
+ return ret;
}
/* I2C */
diff --git a/drivers/media/usb/dvb-usb/digitv.h b/drivers/media/usb/dvb-usb/digitv.h
index 908c09f4966b..581e09c25491 100644
--- a/drivers/media/usb/dvb-usb/digitv.h
+++ b/drivers/media/usb/dvb-usb/digitv.h
@@ -5,7 +5,10 @@
#include "dvb-usb.h"
struct digitv_state {
- int is_nxt6000;
+ int is_nxt6000;
+
+ unsigned char sndbuf[7];
+ unsigned char rcvbuf[7];
};
/* protocol (from usblogging and the SDK:
diff --git a/drivers/media/usb/dvb-usb/dtt200u-fe.c b/drivers/media/usb/dvb-usb/dtt200u-fe.c
index c09332bd99cb..f5c042baa254 100644
--- a/drivers/media/usb/dvb-usb/dtt200u-fe.c
+++ b/drivers/media/usb/dvb-usb/dtt200u-fe.c
@@ -18,17 +18,28 @@ struct dtt200u_fe_state {
struct dtv_frontend_properties fep;
struct dvb_frontend frontend;
+
+ unsigned char data[80];
+ struct mutex data_mutex;
};
static int dtt200u_fe_read_status(struct dvb_frontend *fe,
enum fe_status *stat)
{
struct dtt200u_fe_state *state = fe->demodulator_priv;
- u8 st = GET_TUNE_STATUS, b[3];
+ int ret;
+
+ mutex_lock(&state->data_mutex);
+ state->data[0] = GET_TUNE_STATUS;
- dvb_usb_generic_rw(state->d,&st,1,b,3,0);
+ ret = dvb_usb_generic_rw(state->d, state->data, 1, state->data, 3, 0);
+ if (ret < 0) {
+ *stat = 0;
+ mutex_unlock(&state->data_mutex);
+ return ret;
+ }
- switch (b[0]) {
+ switch (state->data[0]) {
case 0x01:
*stat = FE_HAS_SIGNAL | FE_HAS_CARRIER |
FE_HAS_VITERBI | FE_HAS_SYNC | FE_HAS_LOCK;
@@ -41,51 +52,86 @@ static int dtt200u_fe_read_status(struct dvb_frontend *fe,
*stat = 0;
break;
}
+ mutex_unlock(&state->data_mutex);
return 0;
}
static int dtt200u_fe_read_ber(struct dvb_frontend* fe, u32 *ber)
{
struct dtt200u_fe_state *state = fe->demodulator_priv;
- u8 bw = GET_VIT_ERR_CNT,b[3];
- dvb_usb_generic_rw(state->d,&bw,1,b,3,0);
- *ber = (b[0] << 16) | (b[1] << 8) | b[2];
- return 0;
+ int ret;
+
+ mutex_lock(&state->data_mutex);
+ state->data[0] = GET_VIT_ERR_CNT;
+
+ ret = dvb_usb_generic_rw(state->d, state->data, 1, state->data, 3, 0);
+ if (ret >= 0)
+ *ber = (state->data[0] << 16) | (state->data[1] << 8) | state->data[2];
+
+ mutex_unlock(&state->data_mutex);
+ return ret;
}
static int dtt200u_fe_read_unc_blocks(struct dvb_frontend* fe, u32 *unc)
{
struct dtt200u_fe_state *state = fe->demodulator_priv;
- u8 bw = GET_RS_UNCOR_BLK_CNT,b[2];
+ int ret;
- dvb_usb_generic_rw(state->d,&bw,1,b,2,0);
- *unc = (b[0] << 8) | b[1];
- return 0;
+ mutex_lock(&state->data_mutex);
+ state->data[0] = GET_RS_UNCOR_BLK_CNT;
+
+ ret = dvb_usb_generic_rw(state->d, state->data, 1, state->data, 2, 0);
+ if (ret >= 0)
+ *unc = (state->data[0] << 8) | state->data[1];
+
+ mutex_unlock(&state->data_mutex);
+ return ret;
}
static int dtt200u_fe_read_signal_strength(struct dvb_frontend* fe, u16 *strength)
{
struct dtt200u_fe_state *state = fe->demodulator_priv;
- u8 bw = GET_AGC, b;
- dvb_usb_generic_rw(state->d,&bw,1,&b,1,0);
- *strength = (b << 8) | b;
- return 0;
+ int ret;
+
+ mutex_lock(&state->data_mutex);
+ state->data[0] = GET_AGC;
+
+ ret = dvb_usb_generic_rw(state->d, state->data, 1, state->data, 1, 0);
+ if (ret >= 0)
+ *strength = (state->data[0] << 8) | state->data[0];
+
+ mutex_unlock(&state->data_mutex);
+ return ret;
}
static int dtt200u_fe_read_snr(struct dvb_frontend* fe, u16 *snr)
{
struct dtt200u_fe_state *state = fe->demodulator_priv;
- u8 bw = GET_SNR,br;
- dvb_usb_generic_rw(state->d,&bw,1,&br,1,0);
- *snr = ~((br << 8) | br);
- return 0;
+ int ret;
+
+ mutex_lock(&state->data_mutex);
+ state->data[0] = GET_SNR;
+
+ ret = dvb_usb_generic_rw(state->d, state->data, 1, state->data, 1, 0);
+ if (ret >= 0)
+ *snr = ~((state->data[0] << 8) | state->data[0]);
+
+ mutex_unlock(&state->data_mutex);
+ return ret;
}
static int dtt200u_fe_init(struct dvb_frontend* fe)
{
struct dtt200u_fe_state *state = fe->demodulator_priv;
- u8 b = SET_INIT;
- return dvb_usb_generic_write(state->d,&b,1);
+ int ret;
+
+ mutex_lock(&state->data_mutex);
+ state->data[0] = SET_INIT;
+
+ ret = dvb_usb_generic_write(state->d, state->data, 1);
+ mutex_unlock(&state->data_mutex);
+
+ return ret;
}
static int dtt200u_fe_sleep(struct dvb_frontend* fe)
@@ -105,39 +151,40 @@ static int dtt200u_fe_set_frontend(struct dvb_frontend *fe)
{
struct dtv_frontend_properties *fep = &fe->dtv_property_cache;
struct dtt200u_fe_state *state = fe->demodulator_priv;
- int i;
- enum fe_status st;
+ int ret;
u16 freq = fep->frequency / 250000;
- u8 bwbuf[2] = { SET_BANDWIDTH, 0 },freqbuf[3] = { SET_RF_FREQ, 0, 0 };
+ mutex_lock(&state->data_mutex);
+ state->data[0] = SET_BANDWIDTH;
switch (fep->bandwidth_hz) {
case 8000000:
- bwbuf[1] = 8;
+ state->data[1] = 8;
break;
case 7000000:
- bwbuf[1] = 7;
+ state->data[1] = 7;
break;
case 6000000:
- bwbuf[1] = 6;
+ state->data[1] = 6;
break;
default:
- return -EINVAL;
+ ret = -EINVAL;
+ goto ret;
}
- dvb_usb_generic_write(state->d,bwbuf,2);
+ ret = dvb_usb_generic_write(state->d, state->data, 2);
+ if (ret < 0)
+ goto ret;
- freqbuf[1] = freq & 0xff;
- freqbuf[2] = (freq >> 8) & 0xff;
- dvb_usb_generic_write(state->d,freqbuf,3);
+ state->data[0] = SET_RF_FREQ;
+ state->data[1] = freq & 0xff;
+ state->data[2] = (freq >> 8) & 0xff;
+ ret = dvb_usb_generic_write(state->d, state->data, 3);
+ if (ret < 0)
+ goto ret;
- for (i = 0; i < 30; i++) {
- msleep(20);
- dtt200u_fe_read_status(fe, &st);
- if (st & FE_TIMEDOUT)
- continue;
- }
-
- return 0;
+ret:
+ mutex_unlock(&state->data_mutex);
+ return ret;
}
static int dtt200u_fe_get_frontend(struct dvb_frontend* fe,
@@ -169,6 +216,7 @@ struct dvb_frontend* dtt200u_fe_attach(struct dvb_usb_device *d)
deb_info("attaching frontend dtt200u\n");
state->d = d;
+ mutex_init(&state->data_mutex);
memcpy(&state->frontend.ops,&dtt200u_fe_ops,sizeof(struct dvb_frontend_ops));
state->frontend.demodulator_priv = state;
diff --git a/drivers/media/usb/dvb-usb/dtt200u.c b/drivers/media/usb/dvb-usb/dtt200u.c
index d2a01b50af0d..fcbff7fb0c4e 100644
--- a/drivers/media/usb/dvb-usb/dtt200u.c
+++ b/drivers/media/usb/dvb-usb/dtt200u.c
@@ -20,75 +20,115 @@ MODULE_PARM_DESC(debug, "set debugging level (1=info,xfer=2 (or-able))." DVB_USB
DVB_DEFINE_MOD_OPT_ADAPTER_NR(adapter_nr);
+struct dtt200u_state {
+ unsigned char data[80];
+};
+
static int dtt200u_power_ctrl(struct dvb_usb_device *d, int onoff)
{
- u8 b = SET_INIT;
+ struct dtt200u_state *st = d->priv;
+ int ret = 0;
+
+ mutex_lock(&d->data_mutex);
+
+ st->data[0] = SET_INIT;
if (onoff)
- dvb_usb_generic_write(d,&b,2);
+ ret = dvb_usb_generic_write(d, st->data, 2);
- return 0;
+ mutex_unlock(&d->data_mutex);
+ return ret;
}
static int dtt200u_streaming_ctrl(struct dvb_usb_adapter *adap, int onoff)
{
- u8 b_streaming[2] = { SET_STREAMING, onoff };
- u8 b_rst_pid = RESET_PID_FILTER;
+ struct dvb_usb_device *d = adap->dev;
+ struct dtt200u_state *st = d->priv;
+ int ret;
- dvb_usb_generic_write(adap->dev, b_streaming, 2);
+ mutex_lock(&d->data_mutex);
+ st->data[0] = SET_STREAMING;
+ st->data[1] = onoff;
- if (onoff == 0)
- dvb_usb_generic_write(adap->dev, &b_rst_pid, 1);
- return 0;
+ ret = dvb_usb_generic_write(adap->dev, st->data, 2);
+ if (ret < 0)
+ goto ret;
+
+ if (onoff)
+ goto ret;
+
+ st->data[0] = RESET_PID_FILTER;
+ ret = dvb_usb_generic_write(adap->dev, st->data, 1);
+
+ret:
+ mutex_unlock(&d->data_mutex);
+
+ return ret;
}
static int dtt200u_pid_filter(struct dvb_usb_adapter *adap, int index, u16 pid, int onoff)
{
- u8 b_pid[4];
+ struct dvb_usb_device *d = adap->dev;
+ struct dtt200u_state *st = d->priv;
+ int ret;
+
pid = onoff ? pid : 0;
- b_pid[0] = SET_PID_FILTER;
- b_pid[1] = index;
- b_pid[2] = pid & 0xff;
- b_pid[3] = (pid >> 8) & 0x1f;
+ mutex_lock(&d->data_mutex);
+ st->data[0] = SET_PID_FILTER;
+ st->data[1] = index;
+ st->data[2] = pid & 0xff;
+ st->data[3] = (pid >> 8) & 0x1f;
+
+ ret = dvb_usb_generic_write(adap->dev, st->data, 4);
+ mutex_unlock(&d->data_mutex);
- return dvb_usb_generic_write(adap->dev, b_pid, 4);
+ return ret;
}
static int dtt200u_rc_query(struct dvb_usb_device *d)
{
- u8 key[5],cmd = GET_RC_CODE;
+ struct dtt200u_state *st = d->priv;
u32 scancode;
+ int ret;
+
+ mutex_lock(&d->data_mutex);
+ st->data[0] = GET_RC_CODE;
- dvb_usb_generic_rw(d,&cmd,1,key,5,0);
- if (key[0] == 1) {
+ ret = dvb_usb_generic_rw(d, st->data, 1, st->data, 5, 0);
+ if (ret < 0)
+ goto ret;
+
+ if (st->data[0] == 1) {
enum rc_type proto = RC_TYPE_NEC;
- scancode = key[1];
- if ((u8) ~key[1] != key[2]) {
+ scancode = st->data[1];
+ if ((u8) ~st->data[1] != st->data[2]) {
/* Extended NEC */
scancode = scancode << 8;
- scancode |= key[2];
+ scancode |= st->data[2];
proto = RC_TYPE_NECX;
}
scancode = scancode << 8;
- scancode |= key[3];
+ scancode |= st->data[3];
/* Check command checksum is ok */
- if ((u8) ~key[3] == key[4])
+ if ((u8) ~st->data[3] == st->data[4])
rc_keydown(d->rc_dev, proto, scancode, 0);
else
rc_keyup(d->rc_dev);
- } else if (key[0] == 2) {
+ } else if (st->data[0] == 2) {
rc_repeat(d->rc_dev);
} else {
rc_keyup(d->rc_dev);
}
- if (key[0] != 0)
- deb_info("key: %*ph\n", 5, key);
+ if (st->data[0] != 0)
+ deb_info("st->data: %*ph\n", 5, st->data);
- return 0;
+ret:
+ mutex_unlock(&d->data_mutex);
+ return ret;
}
static int dtt200u_frontend_attach(struct dvb_usb_adapter *adap)
@@ -140,6 +180,8 @@ static struct dvb_usb_device_properties dtt200u_properties = {
.usb_ctrl = CYPRESS_FX2,
.firmware = "dvb-usb-dtt200u-01.fw",
+ .size_of_priv = sizeof(struct dtt200u_state),
+
.num_adapters = 1,
.adapter = {
{
@@ -190,6 +232,8 @@ static struct dvb_usb_device_properties wt220u_properties = {
.usb_ctrl = CYPRESS_FX2,
.firmware = "dvb-usb-wt220u-02.fw",
+ .size_of_priv = sizeof(struct dtt200u_state),
+
.num_adapters = 1,
.adapter = {
{
@@ -240,6 +284,8 @@ static struct dvb_usb_device_properties wt220u_fc_properties = {
.usb_ctrl = CYPRESS_FX2,
.firmware = "dvb-usb-wt220u-fc03.fw",
+ .size_of_priv = sizeof(struct dtt200u_state),
+
.num_adapters = 1,
.adapter = {
{
@@ -290,6 +336,8 @@ static struct dvb_usb_device_properties wt220u_zl0353_properties = {
.usb_ctrl = CYPRESS_FX2,
.firmware = "dvb-usb-wt220u-zl0353-01.fw",
+ .size_of_priv = sizeof(struct dtt200u_state),
+
.num_adapters = 1,
.adapter = {
{
@@ -340,6 +388,8 @@ static struct dvb_usb_device_properties wt220u_miglia_properties = {
.usb_ctrl = CYPRESS_FX2,
.firmware = "dvb-usb-wt220u-miglia-01.fw",
+ .size_of_priv = sizeof(struct dtt200u_state),
+
.num_adapters = 1,
.generic_bulk_ctrl_endpoint = 0x01,
diff --git a/drivers/media/usb/dvb-usb/dtv5100.c b/drivers/media/usb/dvb-usb/dtv5100.c
index 3d11df41cac0..c60fb54f445f 100644
--- a/drivers/media/usb/dvb-usb/dtv5100.c
+++ b/drivers/media/usb/dvb-usb/dtv5100.c
@@ -31,9 +31,14 @@ module_param_named(debug, dvb_usb_dtv5100_debug, int, 0644);
MODULE_PARM_DESC(debug, "set debugging level" DVB_USB_DEBUG_STATUS);
DVB_DEFINE_MOD_OPT_ADAPTER_NR(adapter_nr);
+struct dtv5100_state {
+ unsigned char data[80];
+};
+
static int dtv5100_i2c_msg(struct dvb_usb_device *d, u8 addr,
u8 *wbuf, u16 wlen, u8 *rbuf, u16 rlen)
{
+ struct dtv5100_state *st = d->priv;
u8 request;
u8 type;
u16 value;
@@ -60,9 +65,10 @@ static int dtv5100_i2c_msg(struct dvb_usb_device *d, u8 addr,
}
index = (addr << 8) + wbuf[0];
+ memcpy(st->data, rbuf, rlen);
msleep(1); /* avoid I2C errors */
return usb_control_msg(d->udev, usb_rcvctrlpipe(d->udev, 0), request,
- type, value, index, rbuf, rlen,
+ type, value, index, st->data, rlen,
DTV5100_USB_TIMEOUT);
}
@@ -176,7 +182,7 @@ static struct dvb_usb_device_properties dtv5100_properties = {
.caps = DVB_USB_IS_AN_I2C_ADAPTER,
.usb_ctrl = DEVICE_SPECIFIC,
- .size_of_priv = 0,
+ .size_of_priv = sizeof(struct dtv5100_state),
.num_adapters = 1,
.adapter = {{
diff --git a/drivers/media/usb/dvb-usb/dvb-usb-init.c b/drivers/media/usb/dvb-usb/dvb-usb-init.c
index 3896ba9a4179..84308569e7dc 100644
--- a/drivers/media/usb/dvb-usb/dvb-usb-init.c
+++ b/drivers/media/usb/dvb-usb/dvb-usb-init.c
@@ -142,6 +142,7 @@ static int dvb_usb_init(struct dvb_usb_device *d, short *adapter_nums)
{
int ret = 0;
+ mutex_init(&d->data_mutex);
mutex_init(&d->usb_mutex);
mutex_init(&d->i2c_mutex);
diff --git a/drivers/media/usb/dvb-usb/dvb-usb.h b/drivers/media/usb/dvb-usb/dvb-usb.h
index 639c4678c65b..107255b08b2b 100644
--- a/drivers/media/usb/dvb-usb/dvb-usb.h
+++ b/drivers/media/usb/dvb-usb/dvb-usb.h
@@ -404,8 +404,12 @@ struct dvb_usb_adapter {
* Powered is in/decremented for each call to modify the state.
* @udev: pointer to the device's struct usb_device.
*
- * @usb_mutex: semaphore of USB control messages (reading needs two messages)
- * @i2c_mutex: semaphore for i2c-transfers
+ * @data_mutex: mutex to protect the data structure used to store URB data
+ * @usb_mutex: mutex of USB control messages (reading needs two messages).
+ * Please notice that this mutex is used internally at the generic
+ * URB control functions. So, drivers using dvb_usb_generic_rw() and
+ * derivated functions should not lock it internally.
+ * @i2c_mutex: mutex for i2c-transfers
*
* @i2c_adap: device's i2c_adapter if it uses I2CoverUSB
*
@@ -433,6 +437,7 @@ struct dvb_usb_device {
int powered;
/* locking */
+ struct mutex data_mutex;
struct mutex usb_mutex;
/* i2c */
diff --git a/drivers/media/usb/dvb-usb/dw2102.c b/drivers/media/usb/dvb-usb/dw2102.c
index 5fb0c650926e..2c720cb2fb00 100644
--- a/drivers/media/usb/dvb-usb/dw2102.c
+++ b/drivers/media/usb/dvb-usb/dw2102.c
@@ -852,7 +852,7 @@ static int su3000_power_ctrl(struct dvb_usb_device *d, int i)
if (i && !state->initialized) {
state->initialized = 1;
/* reset board */
- dvb_usb_generic_rw(d, obuf, 2, NULL, 0, 0);
+ return dvb_usb_generic_rw(d, obuf, 2, NULL, 0, 0);
}
return 0;
diff --git a/drivers/media/usb/dvb-usb/gp8psk.c b/drivers/media/usb/dvb-usb/gp8psk.c
index 5d0384dd45b5..993bb7a72985 100644
--- a/drivers/media/usb/dvb-usb/gp8psk.c
+++ b/drivers/media/usb/dvb-usb/gp8psk.c
@@ -15,6 +15,7 @@
* see Documentation/dvb/README.dvb-usb for more information
*/
#include "gp8psk.h"
+#include "gp8psk-fe.h"
/* debug */
static char bcm4500_firmware[] = "dvb-usb-gp8psk-02.fw";
@@ -24,37 +25,19 @@ MODULE_PARM_DESC(debug, "set debugging level (1=info,xfer=2,rc=4 (or-able))." DV
DVB_DEFINE_MOD_OPT_ADAPTER_NR(adapter_nr);
-static int gp8psk_get_fw_version(struct dvb_usb_device *d, u8 *fw_vers)
-{
- return (gp8psk_usb_in_op(d, GET_FW_VERS, 0, 0, fw_vers, 6));
-}
-
-static int gp8psk_get_fpga_version(struct dvb_usb_device *d, u8 *fpga_vers)
-{
- return (gp8psk_usb_in_op(d, GET_FPGA_VERS, 0, 0, fpga_vers, 1));
-}
-
-static void gp8psk_info(struct dvb_usb_device *d)
-{
- u8 fpga_vers, fw_vers[6];
-
- if (!gp8psk_get_fw_version(d, fw_vers))
- info("FW Version = %i.%02i.%i (0x%x) Build %4i/%02i/%02i",
- fw_vers[2], fw_vers[1], fw_vers[0], GP8PSK_FW_VERS(fw_vers),
- 2000 + fw_vers[5], fw_vers[4], fw_vers[3]);
- else
- info("failed to get FW version");
-
- if (!gp8psk_get_fpga_version(d, &fpga_vers))
- info("FPGA Version = %i", fpga_vers);
- else
- info("failed to get FPGA version");
-}
+struct gp8psk_state {
+ unsigned char data[80];
+};
-int gp8psk_usb_in_op(struct dvb_usb_device *d, u8 req, u16 value, u16 index, u8 *b, int blen)
+static int gp8psk_usb_in_op(struct dvb_usb_device *d, u8 req, u16 value,
+ u16 index, u8 *b, int blen)
{
+ struct gp8psk_state *st = d->priv;
int ret = 0,try = 0;
+ if (blen > sizeof(st->data))
+ return -EIO;
+
if ((ret = mutex_lock_interruptible(&d->usb_mutex)))
return ret;
@@ -63,7 +46,7 @@ int gp8psk_usb_in_op(struct dvb_usb_device *d, u8 req, u16 value, u16 index, u8
usb_rcvctrlpipe(d->udev,0),
req,
USB_TYPE_VENDOR | USB_DIR_IN,
- value,index,b,blen,
+ value, index, st->data, blen,
2000);
deb_info("reading number %d (ret: %d)\n",try,ret);
try++;
@@ -72,8 +55,10 @@ int gp8psk_usb_in_op(struct dvb_usb_device *d, u8 req, u16 value, u16 index, u8
if (ret < 0 || ret != blen) {
warn("usb in %d operation failed.", req);
ret = -EIO;
- } else
+ } else {
ret = 0;
+ memcpy(b, st->data, blen);
+ }
deb_xfer("in: req. %x, val: %x, ind: %x, buffer: ",req,value,index);
debug_dump(b,blen,deb_xfer);
@@ -83,22 +68,27 @@ int gp8psk_usb_in_op(struct dvb_usb_device *d, u8 req, u16 value, u16 index, u8
return ret;
}
-int gp8psk_usb_out_op(struct dvb_usb_device *d, u8 req, u16 value,
+static int gp8psk_usb_out_op(struct dvb_usb_device *d, u8 req, u16 value,
u16 index, u8 *b, int blen)
{
+ struct gp8psk_state *st = d->priv;
int ret;
deb_xfer("out: req. %x, val: %x, ind: %x, buffer: ",req,value,index);
debug_dump(b,blen,deb_xfer);
+ if (blen > sizeof(st->data))
+ return -EIO;
+
if ((ret = mutex_lock_interruptible(&d->usb_mutex)))
return ret;
+ memcpy(st->data, b, blen);
if (usb_control_msg(d->udev,
usb_sndctrlpipe(d->udev,0),
req,
USB_TYPE_VENDOR | USB_DIR_OUT,
- value,index,b,blen,
+ value, index, st->data, blen,
2000) != blen) {
warn("usb out operation failed.");
ret = -EIO;
@@ -109,6 +99,34 @@ int gp8psk_usb_out_op(struct dvb_usb_device *d, u8 req, u16 value,
return ret;
}
+
+static int gp8psk_get_fw_version(struct dvb_usb_device *d, u8 *fw_vers)
+{
+ return gp8psk_usb_in_op(d, GET_FW_VERS, 0, 0, fw_vers, 6);
+}
+
+static int gp8psk_get_fpga_version(struct dvb_usb_device *d, u8 *fpga_vers)
+{
+ return gp8psk_usb_in_op(d, GET_FPGA_VERS, 0, 0, fpga_vers, 1);
+}
+
+static void gp8psk_info(struct dvb_usb_device *d)
+{
+ u8 fpga_vers, fw_vers[6];
+
+ if (!gp8psk_get_fw_version(d, fw_vers))
+ info("FW Version = %i.%02i.%i (0x%x) Build %4i/%02i/%02i",
+ fw_vers[2], fw_vers[1], fw_vers[0], GP8PSK_FW_VERS(fw_vers),
+ 2000 + fw_vers[5], fw_vers[4], fw_vers[3]);
+ else
+ info("failed to get FW version");
+
+ if (!gp8psk_get_fpga_version(d, &fpga_vers))
+ info("FPGA Version = %i", fpga_vers);
+ else
+ info("failed to get FPGA version");
+}
+
static int gp8psk_load_bcm4500fw(struct dvb_usb_device *d)
{
int ret;
@@ -143,6 +161,11 @@ static int gp8psk_load_bcm4500fw(struct dvb_usb_device *d)
err("failed to load bcm4500 firmware.");
goto out_free;
}
+ if (buflen > 64) {
+ err("firmare chunk size bigger than 64 bytes.");
+ goto out_free;
+ }
+
memcpy(buf, ptr, buflen);
if (dvb_usb_generic_write(d, buf, buflen)) {
err("failed to load bcm4500 firmware.");
@@ -206,10 +229,13 @@ static int gp8psk_power_ctrl(struct dvb_usb_device *d, int onoff)
return 0;
}
-int gp8psk_bcm4500_reload(struct dvb_usb_device *d)
+static int gp8psk_bcm4500_reload(struct dvb_usb_device *d)
{
u8 buf;
int gp_product_id = le16_to_cpu(d->udev->descriptor.idProduct);
+
+ deb_xfer("reloading firmware\n");
+
/* Turn off 8psk power */
if (gp8psk_usb_in_op(d, BOOT_8PSK, 0, 0, &buf, 1))
return -EINVAL;
@@ -228,9 +254,47 @@ static int gp8psk_streaming_ctrl(struct dvb_usb_adapter *adap, int onoff)
return gp8psk_usb_out_op(adap->dev, ARM_TRANSFER, onoff, 0 , NULL, 0);
}
+/* Callbacks for gp8psk-fe.c */
+
+static int gp8psk_fe_in(void *priv, u8 req, u16 value,
+ u16 index, u8 *b, int blen)
+{
+ struct dvb_usb_device *d = priv;
+
+ return gp8psk_usb_in_op(d, req, value, index, b, blen);
+}
+
+static int gp8psk_fe_out(void *priv, u8 req, u16 value,
+ u16 index, u8 *b, int blen)
+{
+ struct dvb_usb_device *d = priv;
+
+ return gp8psk_usb_out_op(d, req, value, index, b, blen);
+}
+
+static int gp8psk_fe_reload(void *priv)
+{
+ struct dvb_usb_device *d = priv;
+
+ return gp8psk_bcm4500_reload(d);
+}
+
+const struct gp8psk_fe_ops gp8psk_fe_ops = {
+ .in = gp8psk_fe_in,
+ .out = gp8psk_fe_out,
+ .reload = gp8psk_fe_reload,
+};
+
static int gp8psk_frontend_attach(struct dvb_usb_adapter *adap)
{
- adap->fe_adap[0].fe = gp8psk_fe_attach(adap->dev);
+ struct dvb_usb_device *d = adap->dev;
+ int id = le16_to_cpu(d->udev->descriptor.idProduct);
+ int is_rev1;
+
+ is_rev1 = (id == USB_PID_GENPIX_8PSK_REV_1_WARM) ? true : false;
+
+ adap->fe_adap[0].fe = dvb_attach(gp8psk_fe_attach,
+ &gp8psk_fe_ops, d, is_rev1);
return 0;
}
@@ -265,6 +329,8 @@ static struct dvb_usb_device_properties gp8psk_properties = {
.usb_ctrl = CYPRESS_FX2,
.firmware = "dvb-usb-gp8psk-01.fw",
+ .size_of_priv = sizeof(struct gp8psk_state),
+
.num_adapters = 1,
.adapter = {
{
diff --git a/drivers/media/usb/dvb-usb/gp8psk.h b/drivers/media/usb/dvb-usb/gp8psk.h
index ed32b9da4843..d8975b866dee 100644
--- a/drivers/media/usb/dvb-usb/gp8psk.h
+++ b/drivers/media/usb/dvb-usb/gp8psk.h
@@ -24,58 +24,6 @@ extern int dvb_usb_gp8psk_debug;
#define deb_info(args...) dprintk(dvb_usb_gp8psk_debug,0x01,args)
#define deb_xfer(args...) dprintk(dvb_usb_gp8psk_debug,0x02,args)
#define deb_rc(args...) dprintk(dvb_usb_gp8psk_debug,0x04,args)
-#define deb_fe(args...) dprintk(dvb_usb_gp8psk_debug,0x08,args)
-
-/* Twinhan Vendor requests */
-#define TH_COMMAND_IN 0xC0
-#define TH_COMMAND_OUT 0xC1
-
-/* gp8psk commands */
-
-#define GET_8PSK_CONFIG 0x80 /* in */
-#define SET_8PSK_CONFIG 0x81
-#define I2C_WRITE 0x83
-#define I2C_READ 0x84
-#define ARM_TRANSFER 0x85
-#define TUNE_8PSK 0x86
-#define GET_SIGNAL_STRENGTH 0x87 /* in */
-#define LOAD_BCM4500 0x88
-#define BOOT_8PSK 0x89 /* in */
-#define START_INTERSIL 0x8A /* in */
-#define SET_LNB_VOLTAGE 0x8B
-#define SET_22KHZ_TONE 0x8C
-#define SEND_DISEQC_COMMAND 0x8D
-#define SET_DVB_MODE 0x8E
-#define SET_DN_SWITCH 0x8F
-#define GET_SIGNAL_LOCK 0x90 /* in */
-#define GET_FW_VERS 0x92
-#define GET_SERIAL_NUMBER 0x93 /* in */
-#define USE_EXTRA_VOLT 0x94
-#define GET_FPGA_VERS 0x95
-#define CW3K_INIT 0x9d
-
-/* PSK_configuration bits */
-#define bm8pskStarted 0x01
-#define bm8pskFW_Loaded 0x02
-#define bmIntersilOn 0x04
-#define bmDVBmode 0x08
-#define bm22kHz 0x10
-#define bmSEL18V 0x20
-#define bmDCtuned 0x40
-#define bmArmed 0x80
-
-/* Satellite modulation modes */
-#define ADV_MOD_DVB_QPSK 0 /* DVB-S QPSK */
-#define ADV_MOD_TURBO_QPSK 1 /* Turbo QPSK */
-#define ADV_MOD_TURBO_8PSK 2 /* Turbo 8PSK (also used for Trellis 8PSK) */
-#define ADV_MOD_TURBO_16QAM 3 /* Turbo 16QAM (also used for Trellis 8PSK) */
-
-#define ADV_MOD_DCII_C_QPSK 4 /* Digicipher II Combo */
-#define ADV_MOD_DCII_I_QPSK 5 /* Digicipher II I-stream */
-#define ADV_MOD_DCII_Q_QPSK 6 /* Digicipher II Q-stream */
-#define ADV_MOD_DCII_C_OQPSK 7 /* Digicipher II offset QPSK */
-#define ADV_MOD_DSS_QPSK 8 /* DSS (DIRECTV) QPSK */
-#define ADV_MOD_DVB_BPSK 9 /* DVB-S BPSK */
#define GET_USB_SPEED 0x07
@@ -86,15 +34,4 @@ extern int dvb_usb_gp8psk_debug;
#define PRODUCT_STRING_READ 0x0D
#define FW_BCD_VERSION_READ 0x14
-/* firmware revision id's */
-#define GP8PSK_FW_REV1 0x020604
-#define GP8PSK_FW_REV2 0x020704
-#define GP8PSK_FW_VERS(_fw_vers) ((_fw_vers)[2]<<0x10 | (_fw_vers)[1]<<0x08 | (_fw_vers)[0])
-
-extern struct dvb_frontend * gp8psk_fe_attach(struct dvb_usb_device *d);
-extern int gp8psk_usb_in_op(struct dvb_usb_device *d, u8 req, u16 value, u16 index, u8 *b, int blen);
-extern int gp8psk_usb_out_op(struct dvb_usb_device *d, u8 req, u16 value,
- u16 index, u8 *b, int blen);
-extern int gp8psk_bcm4500_reload(struct dvb_usb_device *d);
-
#endif
diff --git a/drivers/media/usb/dvb-usb/nova-t-usb2.c b/drivers/media/usb/dvb-usb/nova-t-usb2.c
index fc7569e2728d..1babd3341910 100644
--- a/drivers/media/usb/dvb-usb/nova-t-usb2.c
+++ b/drivers/media/usb/dvb-usb/nova-t-usb2.c
@@ -74,22 +74,31 @@ static struct rc_map_table rc_map_haupp_table[] = {
*/
static int nova_t_rc_query(struct dvb_usb_device *d, u32 *event, int *state)
{
- u8 key[5],cmd[2] = { DIBUSB_REQ_POLL_REMOTE, 0x35 }, data,toggle,custom;
+ u8 *buf, data, toggle, custom;
u16 raw;
- int i;
+ int i, ret;
struct dibusb_device_state *st = d->priv;
- dvb_usb_generic_rw(d,cmd,2,key,5,0);
+ buf = kmalloc(5, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ buf[0] = DIBUSB_REQ_POLL_REMOTE;
+ buf[1] = 0x35;
+ ret = dvb_usb_generic_rw(d, buf, 2, buf, 5, 0);
+ if (ret < 0)
+ goto ret;
*state = REMOTE_NO_KEY_PRESSED;
- switch (key[0]) {
+ switch (buf[0]) {
case DIBUSB_RC_HAUPPAUGE_KEY_PRESSED:
- raw = ((key[1] << 8) | key[2]) >> 3;
+ raw = ((buf[1] << 8) | buf[2]) >> 3;
toggle = !!(raw & 0x800);
data = raw & 0x3f;
custom = (raw >> 6) & 0x1f;
- deb_rc("raw key code 0x%02x, 0x%02x, 0x%02x to c: %02x d: %02x toggle: %d\n",key[1],key[2],key[3],custom,data,toggle);
+ deb_rc("raw key code 0x%02x, 0x%02x, 0x%02x to c: %02x d: %02x toggle: %d\n",
+ buf[1], buf[2], buf[3], custom, data, toggle);
for (i = 0; i < ARRAY_SIZE(rc_map_haupp_table); i++) {
if (rc5_data(&rc_map_haupp_table[i]) == data &&
@@ -117,7 +126,9 @@ static int nova_t_rc_query(struct dvb_usb_device *d, u32 *event, int *state)
break;
}
- return 0;
+ret:
+ kfree(buf);
+ return ret;
}
static int nova_t_read_mac_address (struct dvb_usb_device *d, u8 mac[6])
diff --git a/drivers/media/usb/dvb-usb/pctv452e.c b/drivers/media/usb/dvb-usb/pctv452e.c
index c05de1b088a4..07fa08be9e99 100644
--- a/drivers/media/usb/dvb-usb/pctv452e.c
+++ b/drivers/media/usb/dvb-usb/pctv452e.c
@@ -97,48 +97,53 @@ struct pctv452e_state {
u8 c; /* transaction counter, wraps around... */
u8 initialized; /* set to 1 if 0x15 has been sent */
u16 last_rc_key;
+
+ unsigned char data[80];
};
static int tt3650_ci_msg(struct dvb_usb_device *d, u8 cmd, u8 *data,
unsigned int write_len, unsigned int read_len)
{
struct pctv452e_state *state = (struct pctv452e_state *)d->priv;
- u8 buf[64];
u8 id;
unsigned int rlen;
int ret;
- BUG_ON(NULL == data && 0 != (write_len | read_len));
- BUG_ON(write_len > 64 - 4);
- BUG_ON(read_len > 64 - 4);
+ if (!data || (write_len > 64 - 4) || (read_len > 64 - 4)) {
+ err("%s: transfer data invalid", __func__);
+ return -EIO;
+ }
+ mutex_lock(&state->ca_mutex);
id = state->c++;
- buf[0] = SYNC_BYTE_OUT;
- buf[1] = id;
- buf[2] = cmd;
- buf[3] = write_len;
+ state->data[0] = SYNC_BYTE_OUT;
+ state->data[1] = id;
+ state->data[2] = cmd;
+ state->data[3] = write_len;
- memcpy(buf + 4, data, write_len);
+ memcpy(state->data + 4, data, write_len);
rlen = (read_len > 0) ? 64 : 0;
- ret = dvb_usb_generic_rw(d, buf, 4 + write_len,
- buf, rlen, /* delay_ms */ 0);
+ ret = dvb_usb_generic_rw(d, state->data, 4 + write_len,
+ state->data, rlen, /* delay_ms */ 0);
if (0 != ret)
goto failed;
ret = -EIO;
- if (SYNC_BYTE_IN != buf[0] || id != buf[1])
+ if (SYNC_BYTE_IN != state->data[0] || id != state->data[1])
goto failed;
- memcpy(data, buf + 4, read_len);
+ memcpy(data, state->data + 4, read_len);
+ mutex_unlock(&state->ca_mutex);
return 0;
failed:
err("CI error %d; %02X %02X %02X -> %*ph.",
- ret, SYNC_BYTE_OUT, id, cmd, 3, buf);
+ ret, SYNC_BYTE_OUT, id, cmd, 3, state->data);
+ mutex_unlock(&state->ca_mutex);
return ret;
}
@@ -405,52 +410,53 @@ static int pctv452e_i2c_msg(struct dvb_usb_device *d, u8 addr,
u8 *rcv_buf, u8 rcv_len)
{
struct pctv452e_state *state = (struct pctv452e_state *)d->priv;
- u8 buf[64];
u8 id;
int ret;
+ mutex_lock(&state->ca_mutex);
id = state->c++;
ret = -EINVAL;
if (snd_len > 64 - 7 || rcv_len > 64 - 7)
goto failed;
- buf[0] = SYNC_BYTE_OUT;
- buf[1] = id;
- buf[2] = PCTV_CMD_I2C;
- buf[3] = snd_len + 3;
- buf[4] = addr << 1;
- buf[5] = snd_len;
- buf[6] = rcv_len;
+ state->data[0] = SYNC_BYTE_OUT;
+ state->data[1] = id;
+ state->data[2] = PCTV_CMD_I2C;
+ state->data[3] = snd_len + 3;
+ state->data[4] = addr << 1;
+ state->data[5] = snd_len;
+ state->data[6] = rcv_len;
- memcpy(buf + 7, snd_buf, snd_len);
+ memcpy(state->data + 7, snd_buf, snd_len);
- ret = dvb_usb_generic_rw(d, buf, 7 + snd_len,
- buf, /* rcv_len */ 64,
+ ret = dvb_usb_generic_rw(d, state->data, 7 + snd_len,
+ state->data, /* rcv_len */ 64,
/* delay_ms */ 0);
if (ret < 0)
goto failed;
/* TT USB protocol error. */
ret = -EIO;
- if (SYNC_BYTE_IN != buf[0] || id != buf[1])
+ if (SYNC_BYTE_IN != state->data[0] || id != state->data[1])
goto failed;
/* I2C device didn't respond as expected. */
ret = -EREMOTEIO;
- if (buf[5] < snd_len || buf[6] < rcv_len)
+ if (state->data[5] < snd_len || state->data[6] < rcv_len)
goto failed;
- memcpy(rcv_buf, buf + 7, rcv_len);
+ memcpy(rcv_buf, state->data + 7, rcv_len);
+ mutex_unlock(&state->ca_mutex);
return rcv_len;
failed:
- err("I2C error %d; %02X %02X %02X %02X %02X -> "
- "%02X %02X %02X %02X %02X.",
+ err("I2C error %d; %02X %02X %02X %02X %02X -> %*ph",
ret, SYNC_BYTE_OUT, id, addr << 1, snd_len, rcv_len,
- buf[0], buf[1], buf[4], buf[5], buf[6]);
+ 7, state->data);
+ mutex_unlock(&state->ca_mutex);
return ret;
}
@@ -499,8 +505,7 @@ static u32 pctv452e_i2c_func(struct i2c_adapter *adapter)
static int pctv452e_power_ctrl(struct dvb_usb_device *d, int i)
{
struct pctv452e_state *state = (struct pctv452e_state *)d->priv;
- u8 b0[] = { 0xaa, 0, PCTV_CMD_RESET, 1, 0 };
- u8 rx[PCTV_ANSWER_LEN];
+ u8 *rx;
int ret;
info("%s: %d\n", __func__, i);
@@ -511,6 +516,11 @@ static int pctv452e_power_ctrl(struct dvb_usb_device *d, int i)
if (state->initialized)
return 0;
+ rx = kmalloc(PCTV_ANSWER_LEN, GFP_KERNEL);
+ if (!rx)
+ return -ENOMEM;
+
+ mutex_lock(&state->ca_mutex);
/* hmm where shoud this should go? */
ret = usb_set_interface(d->udev, 0, ISOC_INTERFACE_ALTERNATIVE);
if (ret != 0)
@@ -518,65 +528,75 @@ static int pctv452e_power_ctrl(struct dvb_usb_device *d, int i)
__func__, ret);
/* this is a one-time initialization, dont know where to put */
- b0[1] = state->c++;
+ state->data[0] = 0xaa;
+ state->data[1] = state->c++;
+ state->data[2] = PCTV_CMD_RESET;
+ state->data[3] = 1;
+ state->data[4] = 0;
/* reset board */
- ret = dvb_usb_generic_rw(d, b0, sizeof(b0), rx, PCTV_ANSWER_LEN, 0);
+ ret = dvb_usb_generic_rw(d, state->data, 5, rx, PCTV_ANSWER_LEN, 0);
if (ret)
- return ret;
+ goto ret;
- b0[1] = state->c++;
- b0[4] = 1;
+ state->data[1] = state->c++;
+ state->data[4] = 1;
/* reset board (again?) */
- ret = dvb_usb_generic_rw(d, b0, sizeof(b0), rx, PCTV_ANSWER_LEN, 0);
+ ret = dvb_usb_generic_rw(d, state->data, 5, rx, PCTV_ANSWER_LEN, 0);
if (ret)
- return ret;
+ goto ret;
state->initialized = 1;
- return 0;
+ret:
+ mutex_unlock(&state->ca_mutex);
+ kfree(rx);
+ return ret;
}
static int pctv452e_rc_query(struct dvb_usb_device *d)
{
struct pctv452e_state *state = (struct pctv452e_state *)d->priv;
- u8 b[CMD_BUFFER_SIZE];
- u8 rx[PCTV_ANSWER_LEN];
int ret, i;
- u8 id = state->c++;
+ u8 id;
+
+ mutex_lock(&state->ca_mutex);
+ id = state->c++;
/* prepare command header */
- b[0] = SYNC_BYTE_OUT;
- b[1] = id;
- b[2] = PCTV_CMD_IR;
- b[3] = 0;
+ state->data[0] = SYNC_BYTE_OUT;
+ state->data[1] = id;
+ state->data[2] = PCTV_CMD_IR;
+ state->data[3] = 0;
/* send ir request */
- ret = dvb_usb_generic_rw(d, b, 4, rx, PCTV_ANSWER_LEN, 0);
+ ret = dvb_usb_generic_rw(d, state->data, 4,
+ state->data, PCTV_ANSWER_LEN, 0);
if (ret != 0)
- return ret;
+ goto ret;
if (debug > 3) {
- info("%s: read: %2d: %*ph: ", __func__, ret, 3, rx);
- for (i = 0; (i < rx[3]) && ((i+3) < PCTV_ANSWER_LEN); i++)
- info(" %02x", rx[i+3]);
+ info("%s: read: %2d: %*ph: ", __func__, ret, 3, state->data);
+ for (i = 0; (i < state->data[3]) && ((i + 3) < PCTV_ANSWER_LEN); i++)
+ info(" %02x", state->data[i + 3]);
info("\n");
}
- if ((rx[3] == 9) && (rx[12] & 0x01)) {
+ if ((state->data[3] == 9) && (state->data[12] & 0x01)) {
/* got a "press" event */
- state->last_rc_key = RC_SCANCODE_RC5(rx[7], rx[6]);
+ state->last_rc_key = RC_SCANCODE_RC5(state->data[7], state->data[6]);
if (debug > 2)
info("%s: cmd=0x%02x sys=0x%02x\n",
- __func__, rx[6], rx[7]);
+ __func__, state->data[6], state->data[7]);
rc_keydown(d->rc_dev, RC_TYPE_RC5, state->last_rc_key, 0);
} else if (state->last_rc_key) {
rc_keyup(d->rc_dev);
state->last_rc_key = 0;
}
-
- return 0;
+ret:
+ mutex_unlock(&state->ca_mutex);
+ return ret;
}
static int pctv452e_read_mac_address(struct dvb_usb_device *d, u8 mac[6])
diff --git a/drivers/media/usb/dvb-usb/technisat-usb2.c b/drivers/media/usb/dvb-usb/technisat-usb2.c
index d9f3262bf071..4706628a3ed5 100644
--- a/drivers/media/usb/dvb-usb/technisat-usb2.c
+++ b/drivers/media/usb/dvb-usb/technisat-usb2.c
@@ -89,9 +89,13 @@ struct technisat_usb2_state {
static int technisat_usb2_i2c_access(struct usb_device *udev,
u8 device_addr, u8 *tx, u8 txlen, u8 *rx, u8 rxlen)
{
- u8 b[64];
+ u8 *b;
int ret, actual_length;
+ b = kmalloc(64, GFP_KERNEL);
+ if (!b)
+ return -ENOMEM;
+
deb_i2c("i2c-access: %02x, tx: ", device_addr);
debug_dump(tx, txlen, deb_i2c);
deb_i2c(" ");
@@ -123,7 +127,7 @@ static int technisat_usb2_i2c_access(struct usb_device *udev,
if (ret < 0) {
err("i2c-error: out failed %02x = %d", device_addr, ret);
- return -ENODEV;
+ goto err;
}
ret = usb_bulk_msg(udev,
@@ -131,7 +135,7 @@ static int technisat_usb2_i2c_access(struct usb_device *udev,
b, 64, &actual_length, 1000);
if (ret < 0) {
err("i2c-error: in failed %02x = %d", device_addr, ret);
- return -ENODEV;
+ goto err;
}
if (b[0] != I2C_STATUS_OK) {
@@ -140,7 +144,7 @@ static int technisat_usb2_i2c_access(struct usb_device *udev,
if (!(b[0] == I2C_STATUS_NAK &&
device_addr == 0x60
/* && device_is_technisat_usb2 */))
- return -ENODEV;
+ goto err;
}
deb_i2c("status: %d, ", b[0]);
@@ -154,7 +158,9 @@ static int technisat_usb2_i2c_access(struct usb_device *udev,
deb_i2c("\n");
- return 0;
+err:
+ kfree(b);
+ return ret;
}
static int technisat_usb2_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg *msg,
diff --git a/drivers/media/usb/s2255/s2255drv.c b/drivers/media/usb/s2255/s2255drv.c
index c3a0e87066eb..f7bb78c1873c 100644
--- a/drivers/media/usb/s2255/s2255drv.c
+++ b/drivers/media/usb/s2255/s2255drv.c
@@ -1901,19 +1901,30 @@ static long s2255_vendor_req(struct s2255_dev *dev, unsigned char Request,
s32 TransferBufferLength, int bOut)
{
int r;
+ unsigned char *buf;
+
+ buf = kmalloc(TransferBufferLength, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
if (!bOut) {
r = usb_control_msg(dev->udev, usb_rcvctrlpipe(dev->udev, 0),
Request,
USB_TYPE_VENDOR | USB_RECIP_DEVICE |
USB_DIR_IN,
- Value, Index, TransferBuffer,
+ Value, Index, buf,
TransferBufferLength, HZ * 5);
+
+ if (r >= 0)
+ memcpy(TransferBuffer, buf, TransferBufferLength);
} else {
+ memcpy(buf, TransferBuffer, TransferBufferLength);
r = usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, 0),
Request, USB_TYPE_VENDOR | USB_RECIP_DEVICE,
- Value, Index, TransferBuffer,
+ Value, Index, buf,
TransferBufferLength, HZ * 5);
}
+ kfree(buf);
return r;
}
diff --git a/drivers/media/usb/stkwebcam/stk-webcam.c b/drivers/media/usb/stkwebcam/stk-webcam.c
index db200c9d796d..22a9aae16291 100644
--- a/drivers/media/usb/stkwebcam/stk-webcam.c
+++ b/drivers/media/usb/stkwebcam/stk-webcam.c
@@ -147,20 +147,26 @@ int stk_camera_write_reg(struct stk_camera *dev, u16 index, u8 value)
int stk_camera_read_reg(struct stk_camera *dev, u16 index, int *value)
{
struct usb_device *udev = dev->udev;
+ unsigned char *buf;
int ret;
+ buf = kmalloc(sizeof(u8), GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
ret = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0),
0x00,
USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
0x00,
index,
- (u8 *) value,
+ buf,
sizeof(u8),
500);
- if (ret < 0)
- return ret;
- else
- return 0;
+ if (ret >= 0)
+ memcpy(value, buf, sizeof(u8));
+
+ kfree(buf);
+ return ret;
}
static int stk_start_stream(struct stk_camera *dev)
diff --git a/drivers/mfd/intel-lpss-pci.c b/drivers/mfd/intel-lpss-pci.c
index 3228fd182a99..9ff243970e93 100644
--- a/drivers/mfd/intel-lpss-pci.c
+++ b/drivers/mfd/intel-lpss-pci.c
@@ -123,19 +123,6 @@ static const struct intel_lpss_platform_info apl_i2c_info = {
.properties = apl_i2c_properties,
};
-static const struct intel_lpss_platform_info kbl_info = {
- .clk_rate = 120000000,
-};
-
-static const struct intel_lpss_platform_info kbl_uart_info = {
- .clk_rate = 120000000,
- .clk_con_id = "baudclk",
-};
-
-static const struct intel_lpss_platform_info kbl_i2c_info = {
- .clk_rate = 133000000,
-};
-
static const struct pci_device_id intel_lpss_pci_ids[] = {
/* BXT A-Step */
{ PCI_VDEVICE(INTEL, 0x0aac), (kernel_ulong_t)&bxt_i2c_info },
@@ -207,15 +194,15 @@ static const struct pci_device_id intel_lpss_pci_ids[] = {
{ PCI_VDEVICE(INTEL, 0xa161), (kernel_ulong_t)&spt_i2c_info },
{ PCI_VDEVICE(INTEL, 0xa166), (kernel_ulong_t)&spt_uart_info },
/* KBL-H */
- { PCI_VDEVICE(INTEL, 0xa2a7), (kernel_ulong_t)&kbl_uart_info },
- { PCI_VDEVICE(INTEL, 0xa2a8), (kernel_ulong_t)&kbl_uart_info },
- { PCI_VDEVICE(INTEL, 0xa2a9), (kernel_ulong_t)&kbl_info },
- { PCI_VDEVICE(INTEL, 0xa2aa), (kernel_ulong_t)&kbl_info },
- { PCI_VDEVICE(INTEL, 0xa2e0), (kernel_ulong_t)&kbl_i2c_info },
- { PCI_VDEVICE(INTEL, 0xa2e1), (kernel_ulong_t)&kbl_i2c_info },
- { PCI_VDEVICE(INTEL, 0xa2e2), (kernel_ulong_t)&kbl_i2c_info },
- { PCI_VDEVICE(INTEL, 0xa2e3), (kernel_ulong_t)&kbl_i2c_info },
- { PCI_VDEVICE(INTEL, 0xa2e6), (kernel_ulong_t)&kbl_uart_info },
+ { PCI_VDEVICE(INTEL, 0xa2a7), (kernel_ulong_t)&spt_uart_info },
+ { PCI_VDEVICE(INTEL, 0xa2a8), (kernel_ulong_t)&spt_uart_info },
+ { PCI_VDEVICE(INTEL, 0xa2a9), (kernel_ulong_t)&spt_info },
+ { PCI_VDEVICE(INTEL, 0xa2aa), (kernel_ulong_t)&spt_info },
+ { PCI_VDEVICE(INTEL, 0xa2e0), (kernel_ulong_t)&spt_i2c_info },
+ { PCI_VDEVICE(INTEL, 0xa2e1), (kernel_ulong_t)&spt_i2c_info },
+ { PCI_VDEVICE(INTEL, 0xa2e2), (kernel_ulong_t)&spt_i2c_info },
+ { PCI_VDEVICE(INTEL, 0xa2e3), (kernel_ulong_t)&spt_i2c_info },
+ { PCI_VDEVICE(INTEL, 0xa2e6), (kernel_ulong_t)&spt_uart_info },
{ }
};
MODULE_DEVICE_TABLE(pci, intel_lpss_pci_ids);
diff --git a/drivers/mfd/intel-lpss.c b/drivers/mfd/intel-lpss.c
index 41b113875d64..70c646b0097d 100644
--- a/drivers/mfd/intel-lpss.c
+++ b/drivers/mfd/intel-lpss.c
@@ -502,9 +502,6 @@ int intel_lpss_suspend(struct device *dev)
for (i = 0; i < LPSS_PRIV_REG_COUNT; i++)
lpss->priv_ctx[i] = readl(lpss->priv + i * 4);
- /* Put the device into reset state */
- writel(0, lpss->priv + LPSS_PRIV_RESETS);
-
return 0;
}
EXPORT_SYMBOL_GPL(intel_lpss_suspend);
diff --git a/drivers/mfd/intel_soc_pmic_bxtwc.c b/drivers/mfd/intel_soc_pmic_bxtwc.c
index 43e54b7e908f..f9a8c5203873 100644
--- a/drivers/mfd/intel_soc_pmic_bxtwc.c
+++ b/drivers/mfd/intel_soc_pmic_bxtwc.c
@@ -86,6 +86,7 @@ enum bxtwc_irqs_level2 {
BXTWC_THRM2_IRQ,
BXTWC_BCU_IRQ,
BXTWC_ADC_IRQ,
+ BXTWC_USBC_IRQ,
BXTWC_CHGR0_IRQ,
BXTWC_CHGR1_IRQ,
BXTWC_GPIO0_IRQ,
@@ -111,7 +112,8 @@ static const struct regmap_irq bxtwc_regmap_irqs_level2[] = {
REGMAP_IRQ_REG(BXTWC_THRM2_IRQ, 2, 0xff),
REGMAP_IRQ_REG(BXTWC_BCU_IRQ, 3, 0x1f),
REGMAP_IRQ_REG(BXTWC_ADC_IRQ, 4, 0xff),
- REGMAP_IRQ_REG(BXTWC_CHGR0_IRQ, 5, 0x3f),
+ REGMAP_IRQ_REG(BXTWC_USBC_IRQ, 5, BIT(5)),
+ REGMAP_IRQ_REG(BXTWC_CHGR0_IRQ, 5, 0x1f),
REGMAP_IRQ_REG(BXTWC_CHGR1_IRQ, 6, 0x1f),
REGMAP_IRQ_REG(BXTWC_GPIO0_IRQ, 7, 0xff),
REGMAP_IRQ_REG(BXTWC_GPIO1_IRQ, 8, 0x3f),
@@ -146,7 +148,7 @@ static struct resource adc_resources[] = {
};
static struct resource usbc_resources[] = {
- DEFINE_RES_IRQ_NAMED(BXTWC_CHGR0_IRQ, "USBC"),
+ DEFINE_RES_IRQ(BXTWC_USBC_IRQ),
};
static struct resource charger_resources[] = {
diff --git a/drivers/mfd/mfd-core.c b/drivers/mfd/mfd-core.c
index 3ac486a597f3..c57e407020f1 100644
--- a/drivers/mfd/mfd-core.c
+++ b/drivers/mfd/mfd-core.c
@@ -399,6 +399,8 @@ int mfd_clone_cell(const char *cell, const char **clones, size_t n_clones)
clones[i]);
}
+ put_device(dev);
+
return 0;
}
EXPORT_SYMBOL(mfd_clone_cell);
diff --git a/drivers/mfd/stmpe.c b/drivers/mfd/stmpe.c
index cfdae8a3d779..b0c7bcdaf5df 100644
--- a/drivers/mfd/stmpe.c
+++ b/drivers/mfd/stmpe.c
@@ -851,6 +851,8 @@ static int stmpe_reset(struct stmpe *stmpe)
if (ret < 0)
return ret;
+ msleep(10);
+
timeout = jiffies + msecs_to_jiffies(100);
while (time_before(jiffies, timeout)) {
ret = __stmpe_reg_read(stmpe, stmpe->regs[STMPE_IDX_SYS_CTRL]);
diff --git a/drivers/mfd/syscon.c b/drivers/mfd/syscon.c
index 2f2225e845ef..b93fe4c4957a 100644
--- a/drivers/mfd/syscon.c
+++ b/drivers/mfd/syscon.c
@@ -73,8 +73,10 @@ static struct syscon *of_syscon_register(struct device_node *np)
/* Parse the device's DT node for an endianness specification */
if (of_property_read_bool(np, "big-endian"))
syscon_config.val_format_endian = REGMAP_ENDIAN_BIG;
- else if (of_property_read_bool(np, "little-endian"))
+ else if (of_property_read_bool(np, "little-endian"))
syscon_config.val_format_endian = REGMAP_ENDIAN_LITTLE;
+ else if (of_property_read_bool(np, "native-endian"))
+ syscon_config.val_format_endian = REGMAP_ENDIAN_NATIVE;
/*
* search for reg-io-width property in DT. If it is not provided,
diff --git a/drivers/mfd/wm8994-core.c b/drivers/mfd/wm8994-core.c
index 7eec619a6023..8588dbad3301 100644
--- a/drivers/mfd/wm8994-core.c
+++ b/drivers/mfd/wm8994-core.c
@@ -393,8 +393,13 @@ static int wm8994_device_init(struct wm8994 *wm8994, int irq)
BUG();
goto err;
}
-
- ret = devm_regulator_bulk_get(wm8994->dev, wm8994->num_supplies,
+
+ /*
+ * Can't use devres helper here as some of the supplies are provided by
+ * wm8994->dev's children (regulators) and those regulators are
+ * unregistered by the devres core before the supplies are freed.
+ */
+ ret = regulator_bulk_get(wm8994->dev, wm8994->num_supplies,
wm8994->supplies);
if (ret != 0) {
dev_err(wm8994->dev, "Failed to get supplies: %d\n", ret);
@@ -405,7 +410,7 @@ static int wm8994_device_init(struct wm8994 *wm8994, int irq)
wm8994->supplies);
if (ret != 0) {
dev_err(wm8994->dev, "Failed to enable supplies: %d\n", ret);
- goto err;
+ goto err_regulator_free;
}
ret = wm8994_reg_read(wm8994, WM8994_SOFTWARE_RESET);
@@ -596,6 +601,8 @@ err_irq:
err_enable:
regulator_bulk_disable(wm8994->num_supplies,
wm8994->supplies);
+err_regulator_free:
+ regulator_bulk_free(wm8994->num_supplies, wm8994->supplies);
err:
mfd_remove_devices(wm8994->dev);
return ret;
@@ -604,10 +611,11 @@ err:
static void wm8994_device_exit(struct wm8994 *wm8994)
{
pm_runtime_disable(wm8994->dev);
- mfd_remove_devices(wm8994->dev);
wm8994_irq_exit(wm8994);
regulator_bulk_disable(wm8994->num_supplies,
wm8994->supplies);
+ regulator_bulk_free(wm8994->num_supplies, wm8994->supplies);
+ mfd_remove_devices(wm8994->dev);
}
static const struct of_device_id wm8994_of_match[] = {
diff --git a/drivers/misc/mei/bus-fixup.c b/drivers/misc/mei/bus-fixup.c
index e9e6ea3ab73c..75b9d4ac8b1e 100644
--- a/drivers/misc/mei/bus-fixup.c
+++ b/drivers/misc/mei/bus-fixup.c
@@ -178,7 +178,7 @@ static int mei_nfc_if_version(struct mei_cl *cl,
ret = 0;
bytes_recv = __mei_cl_recv(cl, (u8 *)reply, if_version_length);
- if (bytes_recv < 0 || bytes_recv < sizeof(struct mei_nfc_reply)) {
+ if (bytes_recv < if_version_length) {
dev_err(bus->dev, "Could not read IF version\n");
ret = -EIO;
goto err;
diff --git a/drivers/mmc/card/mmc_test.c b/drivers/mmc/card/mmc_test.c
index 5a8dc5a76e0d..3678220964fe 100644
--- a/drivers/mmc/card/mmc_test.c
+++ b/drivers/mmc/card/mmc_test.c
@@ -2347,7 +2347,7 @@ static int mmc_test_ongoing_transfer(struct mmc_test_card *test,
struct mmc_test_req *rq = mmc_test_req_alloc();
struct mmc_host *host = test->card->host;
struct mmc_test_area *t = &test->area;
- struct mmc_async_req areq;
+ struct mmc_test_async_req test_areq = { .test = test };
struct mmc_request *mrq;
unsigned long timeout;
bool expired = false;
@@ -2363,8 +2363,8 @@ static int mmc_test_ongoing_transfer(struct mmc_test_card *test,
mrq->sbc = &rq->sbc;
mrq->cap_cmd_during_tfr = true;
- areq.mrq = mrq;
- areq.err_check = mmc_test_check_result_async;
+ test_areq.areq.mrq = mrq;
+ test_areq.areq.err_check = mmc_test_check_result_async;
mmc_test_prepare_mrq(test, mrq, t->sg, t->sg_len, dev_addr, t->blocks,
512, write);
@@ -2378,7 +2378,7 @@ static int mmc_test_ongoing_transfer(struct mmc_test_card *test,
/* Start ongoing data request */
if (use_areq) {
- mmc_start_req(host, &areq, &ret);
+ mmc_start_req(host, &test_areq.areq, &ret);
if (ret)
goto out_free;
} else {
diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c
index 39fc5b2b96c5..df19777068a6 100644
--- a/drivers/mmc/core/mmc.c
+++ b/drivers/mmc/core/mmc.c
@@ -26,6 +26,8 @@
#include "mmc_ops.h"
#include "sd_ops.h"
+#define DEFAULT_CMD6_TIMEOUT_MS 500
+
static const unsigned int tran_exp[] = {
10000, 100000, 1000000, 10000000,
0, 0, 0, 0
@@ -571,6 +573,7 @@ static int mmc_decode_ext_csd(struct mmc_card *card, u8 *ext_csd)
card->erased_byte = 0x0;
/* eMMC v4.5 or later */
+ card->ext_csd.generic_cmd6_time = DEFAULT_CMD6_TIMEOUT_MS;
if (card->ext_csd.rev >= 6) {
card->ext_csd.feature_support |= MMC_DISCARD_FEATURE;
diff --git a/drivers/mmc/host/dw_mmc-pltfm.c b/drivers/mmc/host/dw_mmc-pltfm.c
index c0bb0c793e84..dbbc4303bdd0 100644
--- a/drivers/mmc/host/dw_mmc-pltfm.c
+++ b/drivers/mmc/host/dw_mmc-pltfm.c
@@ -46,12 +46,13 @@ int dw_mci_pltfm_register(struct platform_device *pdev,
host->pdata = pdev->dev.platform_data;
regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- /* Get registers' physical base address */
- host->phy_regs = regs->start;
host->regs = devm_ioremap_resource(&pdev->dev, regs);
if (IS_ERR(host->regs))
return PTR_ERR(host->regs);
+ /* Get registers' physical base address */
+ host->phy_regs = regs->start;
+
platform_set_drvdata(pdev, host);
return dw_mci_probe(host);
}
diff --git a/drivers/mmc/host/dw_mmc.c b/drivers/mmc/host/dw_mmc.c
index 4fcbc4012ed0..df478ae72e23 100644
--- a/drivers/mmc/host/dw_mmc.c
+++ b/drivers/mmc/host/dw_mmc.c
@@ -1058,6 +1058,7 @@ static int dw_mci_submit_data_dma(struct dw_mci *host, struct mmc_data *data)
spin_unlock_irqrestore(&host->irq_lock, irqflags);
if (host->dma_ops->start(host, sg_len)) {
+ host->dma_ops->stop(host);
/* We can't do DMA, try PIO for this one */
dev_dbg(host->dev,
"%s: fall back to PIO mode for current transfer\n",
@@ -2940,7 +2941,7 @@ static struct dw_mci_board *dw_mci_parse_dt(struct dw_mci *host)
return ERR_PTR(-ENOMEM);
/* find reset controller when exist */
- pdata->rstc = devm_reset_control_get_optional(dev, NULL);
+ pdata->rstc = devm_reset_control_get_optional(dev, "reset");
if (IS_ERR(pdata->rstc)) {
if (PTR_ERR(pdata->rstc) == -EPROBE_DEFER)
return ERR_PTR(-EPROBE_DEFER);
diff --git a/drivers/mmc/host/mxs-mmc.c b/drivers/mmc/host/mxs-mmc.c
index d839147e591d..44ecebd1ea8c 100644
--- a/drivers/mmc/host/mxs-mmc.c
+++ b/drivers/mmc/host/mxs-mmc.c
@@ -661,13 +661,13 @@ static int mxs_mmc_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, mmc);
+ spin_lock_init(&host->lock);
+
ret = devm_request_irq(&pdev->dev, irq_err, mxs_mmc_irq_handler, 0,
dev_name(&pdev->dev), host);
if (ret)
goto out_free_dma;
- spin_lock_init(&host->lock);
-
ret = mmc_add_host(mmc);
if (ret)
goto out_free_dma;
diff --git a/drivers/mmc/host/sdhci-msm.c b/drivers/mmc/host/sdhci-msm.c
index 8ef44a2a2fd9..90ed2e12d345 100644
--- a/drivers/mmc/host/sdhci-msm.c
+++ b/drivers/mmc/host/sdhci-msm.c
@@ -647,6 +647,7 @@ static int sdhci_msm_probe(struct platform_device *pdev)
if (msm_host->pwr_irq < 0) {
dev_err(&pdev->dev, "Get pwr_irq failed (%d)\n",
msm_host->pwr_irq);
+ ret = msm_host->pwr_irq;
goto clk_disable;
}
diff --git a/drivers/mmc/host/sdhci-of-esdhc.c b/drivers/mmc/host/sdhci-of-esdhc.c
index fb71c866eacc..1bb11e4a9fe5 100644
--- a/drivers/mmc/host/sdhci-of-esdhc.c
+++ b/drivers/mmc/host/sdhci-of-esdhc.c
@@ -66,6 +66,20 @@ static u32 esdhc_readl_fixup(struct sdhci_host *host,
return ret;
}
}
+ /*
+ * The DAT[3:0] line signal levels and the CMD line signal level are
+ * not compatible with standard SDHC register. The line signal levels
+ * DAT[7:0] are at bits 31:24 and the command line signal level is at
+ * bit 23. All other bits are the same as in the standard SDHC
+ * register.
+ */
+ if (spec_reg == SDHCI_PRESENT_STATE) {
+ ret = value & 0x000fffff;
+ ret |= (value >> 4) & SDHCI_DATA_LVL_MASK;
+ ret |= (value << 1) & SDHCI_CMD_LVL;
+ return ret;
+ }
+
ret = value;
return ret;
}
diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
index 71654b90227f..42ef3ebb1d8c 100644
--- a/drivers/mmc/host/sdhci.c
+++ b/drivers/mmc/host/sdhci.c
@@ -2086,6 +2086,10 @@ static int sdhci_execute_tuning(struct mmc_host *mmc, u32 opcode)
if (!host->tuning_done) {
pr_info(DRIVER_NAME ": Timeout waiting for Buffer Read Ready interrupt during tuning procedure, falling back to fixed sampling clock\n");
+
+ sdhci_do_reset(host, SDHCI_RESET_CMD);
+ sdhci_do_reset(host, SDHCI_RESET_DATA);
+
ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
ctrl &= ~SDHCI_CTRL_TUNED_CLK;
ctrl &= ~SDHCI_CTRL_EXEC_TUNING;
@@ -2286,10 +2290,8 @@ static bool sdhci_request_done(struct sdhci_host *host)
for (i = 0; i < SDHCI_MAX_MRQS; i++) {
mrq = host->mrqs_done[i];
- if (mrq) {
- host->mrqs_done[i] = NULL;
+ if (mrq)
break;
- }
}
if (!mrq) {
@@ -2320,6 +2322,17 @@ static bool sdhci_request_done(struct sdhci_host *host)
* upon error conditions.
*/
if (sdhci_needs_reset(host, mrq)) {
+ /*
+ * Do not finish until command and data lines are available for
+ * reset. Note there can only be one other mrq, so it cannot
+ * also be in mrqs_done, otherwise host->cmd and host->data_cmd
+ * would both be null.
+ */
+ if (host->cmd || host->data_cmd) {
+ spin_unlock_irqrestore(&host->lock, flags);
+ return true;
+ }
+
/* Some controllers need this kick or reset won't work here */
if (host->quirks & SDHCI_QUIRK_CLOCK_BEFORE_RESET)
/* This is to force an update */
@@ -2327,10 +2340,8 @@ static bool sdhci_request_done(struct sdhci_host *host)
/* Spec says we should do both at the same time, but Ricoh
controllers do not like that. */
- if (!host->cmd)
- sdhci_do_reset(host, SDHCI_RESET_CMD);
- if (!host->data_cmd)
- sdhci_do_reset(host, SDHCI_RESET_DATA);
+ sdhci_do_reset(host, SDHCI_RESET_CMD);
+ sdhci_do_reset(host, SDHCI_RESET_DATA);
host->pending_reset = false;
}
@@ -2338,6 +2349,8 @@ static bool sdhci_request_done(struct sdhci_host *host)
if (!sdhci_has_requests(host))
sdhci_led_deactivate(host);
+ host->mrqs_done[i] = NULL;
+
mmiowb();
spin_unlock_irqrestore(&host->lock, flags);
@@ -2512,9 +2525,6 @@ static void sdhci_data_irq(struct sdhci_host *host, u32 intmask)
if (!host->data) {
struct mmc_command *data_cmd = host->data_cmd;
- if (data_cmd)
- host->data_cmd = NULL;
-
/*
* The "data complete" interrupt is also used to
* indicate that a busy state has ended. See comment
@@ -2522,11 +2532,13 @@ static void sdhci_data_irq(struct sdhci_host *host, u32 intmask)
*/
if (data_cmd && (data_cmd->flags & MMC_RSP_BUSY)) {
if (intmask & SDHCI_INT_DATA_TIMEOUT) {
+ host->data_cmd = NULL;
data_cmd->error = -ETIMEDOUT;
sdhci_finish_mrq(host, data_cmd->mrq);
return;
}
if (intmask & SDHCI_INT_DATA_END) {
+ host->data_cmd = NULL;
/*
* Some cards handle busy-end interrupt
* before the command completed, so make
@@ -2912,6 +2924,10 @@ int sdhci_runtime_resume_host(struct sdhci_host *host)
spin_unlock_irqrestore(&host->lock, flags);
}
+ if ((mmc->caps2 & MMC_CAP2_HS400_ES) &&
+ mmc->ops->hs400_enhanced_strobe)
+ mmc->ops->hs400_enhanced_strobe(mmc, &mmc->ios);
+
spin_lock_irqsave(&host->lock, flags);
host->runtime_suspended = false;
diff --git a/drivers/mmc/host/sdhci.h b/drivers/mmc/host/sdhci.h
index 766df17fb7eb..2570455b219a 100644
--- a/drivers/mmc/host/sdhci.h
+++ b/drivers/mmc/host/sdhci.h
@@ -73,6 +73,7 @@
#define SDHCI_DATA_LVL_MASK 0x00F00000
#define SDHCI_DATA_LVL_SHIFT 20
#define SDHCI_DATA_0_LVL_MASK 0x00100000
+#define SDHCI_CMD_LVL 0x01000000
#define SDHCI_HOST_CONTROL 0x28
#define SDHCI_CTRL_LED 0x01
diff --git a/drivers/mtd/nand/gpmi-nand/gpmi-lib.c b/drivers/mtd/nand/gpmi-nand/gpmi-lib.c
index 0f68a99fc4ad..141bd70a49c2 100644
--- a/drivers/mtd/nand/gpmi-nand/gpmi-lib.c
+++ b/drivers/mtd/nand/gpmi-nand/gpmi-lib.c
@@ -161,7 +161,7 @@ int gpmi_init(struct gpmi_nand_data *this)
ret = gpmi_enable_clk(this);
if (ret)
- goto err_out;
+ return ret;
ret = gpmi_reset_block(r->gpmi_regs, false);
if (ret)
goto err_out;
@@ -197,6 +197,7 @@ int gpmi_init(struct gpmi_nand_data *this)
gpmi_disable_clk(this);
return 0;
err_out:
+ gpmi_disable_clk(this);
return ret;
}
@@ -270,7 +271,7 @@ int bch_set_geometry(struct gpmi_nand_data *this)
ret = gpmi_enable_clk(this);
if (ret)
- goto err_out;
+ return ret;
/*
* Due to erratum #2847 of the MX23, the BCH cannot be soft reset on this
@@ -308,6 +309,7 @@ int bch_set_geometry(struct gpmi_nand_data *this)
gpmi_disable_clk(this);
return 0;
err_out:
+ gpmi_disable_clk(this);
return ret;
}
diff --git a/drivers/mtd/nand/mtk_ecc.c b/drivers/mtd/nand/mtk_ecc.c
index d54f666417e1..dbf256217b3e 100644
--- a/drivers/mtd/nand/mtk_ecc.c
+++ b/drivers/mtd/nand/mtk_ecc.c
@@ -86,6 +86,8 @@ struct mtk_ecc {
struct completion done;
struct mutex lock;
u32 sectors;
+
+ u8 eccdata[112];
};
static inline void mtk_ecc_wait_idle(struct mtk_ecc *ecc,
@@ -366,9 +368,8 @@ int mtk_ecc_encode(struct mtk_ecc *ecc, struct mtk_ecc_config *config,
u8 *data, u32 bytes)
{
dma_addr_t addr;
- u8 *p;
- u32 len, i, val;
- int ret = 0;
+ u32 len;
+ int ret;
addr = dma_map_single(ecc->dev, data, bytes, DMA_TO_DEVICE);
ret = dma_mapping_error(ecc->dev, addr);
@@ -393,14 +394,12 @@ int mtk_ecc_encode(struct mtk_ecc *ecc, struct mtk_ecc_config *config,
/* Program ECC bytes to OOB: per sector oob = FDM + ECC + SPARE */
len = (config->strength * ECC_PARITY_BITS + 7) >> 3;
- p = data + bytes;
- /* write the parity bytes generated by the ECC back to the OOB region */
- for (i = 0; i < len; i++) {
- if ((i % 4) == 0)
- val = readl(ecc->regs + ECC_ENCPAR(i / 4));
- p[i] = (val >> ((i % 4) * 8)) & 0xff;
- }
+ /* write the parity bytes generated by the ECC back to temp buffer */
+ __ioread32_copy(ecc->eccdata, ecc->regs + ECC_ENCPAR(0), round_up(len, 4));
+
+ /* copy into possibly unaligned OOB region with actual length */
+ memcpy(data + bytes, ecc->eccdata, len);
timeout:
dma_unmap_single(ecc->dev, addr, bytes, DMA_TO_DEVICE);
diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c
index e5718e5ecf92..3bde96a3f7bf 100644
--- a/drivers/mtd/nand/nand_base.c
+++ b/drivers/mtd/nand/nand_base.c
@@ -1095,10 +1095,11 @@ static void nand_release_data_interface(struct nand_chip *chip)
/**
* nand_reset - Reset and initialize a NAND device
* @chip: The NAND chip
+ * @chipnr: Internal die id
*
* Returns 0 for success or negative error code otherwise
*/
-int nand_reset(struct nand_chip *chip)
+int nand_reset(struct nand_chip *chip, int chipnr)
{
struct mtd_info *mtd = nand_to_mtd(chip);
int ret;
@@ -1107,9 +1108,17 @@ int nand_reset(struct nand_chip *chip)
if (ret)
return ret;
+ /*
+ * The CS line has to be released before we can apply the new NAND
+ * interface settings, hence this weird ->select_chip() dance.
+ */
+ chip->select_chip(mtd, chipnr);
chip->cmdfunc(mtd, NAND_CMD_RESET, -1, -1);
+ chip->select_chip(mtd, -1);
+ chip->select_chip(mtd, chipnr);
ret = nand_setup_data_interface(chip);
+ chip->select_chip(mtd, -1);
if (ret)
return ret;
@@ -1185,8 +1194,6 @@ int nand_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
/* Shift to get chip number */
chipnr = ofs >> chip->chip_shift;
- chip->select_chip(mtd, chipnr);
-
/*
* Reset the chip.
* If we want to check the WP through READ STATUS and check the bit 7
@@ -1194,7 +1201,9 @@ int nand_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
* some operation can also clear the bit 7 of status register
* eg. erase/program a locked block
*/
- nand_reset(chip);
+ nand_reset(chip, chipnr);
+
+ chip->select_chip(mtd, chipnr);
/* Check, if it is write protected */
if (nand_check_wp(mtd)) {
@@ -1244,8 +1253,6 @@ int nand_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
/* Shift to get chip number */
chipnr = ofs >> chip->chip_shift;
- chip->select_chip(mtd, chipnr);
-
/*
* Reset the chip.
* If we want to check the WP through READ STATUS and check the bit 7
@@ -1253,7 +1260,9 @@ int nand_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
* some operation can also clear the bit 7 of status register
* eg. erase/program a locked block
*/
- nand_reset(chip);
+ nand_reset(chip, chipnr);
+
+ chip->select_chip(mtd, chipnr);
/* Check, if it is write protected */
if (nand_check_wp(mtd)) {
@@ -2940,10 +2949,6 @@ static int nand_do_write_oob(struct mtd_info *mtd, loff_t to,
}
chipnr = (int)(to >> chip->chip_shift);
- chip->select_chip(mtd, chipnr);
-
- /* Shift to get page */
- page = (int)(to >> chip->page_shift);
/*
* Reset the chip. Some chips (like the Toshiba TC5832DC found in one
@@ -2951,7 +2956,12 @@ static int nand_do_write_oob(struct mtd_info *mtd, loff_t to,
* if we don't do this. I have no clue why, but I seem to have 'fixed'
* it in the doc2000 driver in August 1999. dwmw2.
*/
- nand_reset(chip);
+ nand_reset(chip, chipnr);
+
+ chip->select_chip(mtd, chipnr);
+
+ /* Shift to get page */
+ page = (int)(to >> chip->page_shift);
/* Check, if it is write protected */
if (nand_check_wp(mtd)) {
@@ -3984,14 +3994,14 @@ static struct nand_flash_dev *nand_get_flash_type(struct mtd_info *mtd,
int i, maf_idx;
u8 id_data[8];
- /* Select the device */
- chip->select_chip(mtd, 0);
-
/*
* Reset the chip, required by some chips (e.g. Micron MT29FxGxxxxx)
* after power-up.
*/
- nand_reset(chip);
+ nand_reset(chip, 0);
+
+ /* Select the device */
+ chip->select_chip(mtd, 0);
/* Send the command for reading device ID */
chip->cmdfunc(mtd, NAND_CMD_READID, 0x00, -1);
@@ -4329,17 +4339,31 @@ int nand_scan_ident(struct mtd_info *mtd, int maxchips,
return PTR_ERR(type);
}
+ /* Initialize the ->data_interface field. */
ret = nand_init_data_interface(chip);
if (ret)
return ret;
+ /*
+ * Setup the data interface correctly on the chip and controller side.
+ * This explicit call to nand_setup_data_interface() is only required
+ * for the first die, because nand_reset() has been called before
+ * ->data_interface and ->default_onfi_timing_mode were set.
+ * For the other dies, nand_reset() will automatically switch to the
+ * best mode for us.
+ */
+ ret = nand_setup_data_interface(chip);
+ if (ret)
+ return ret;
+
chip->select_chip(mtd, -1);
/* Check for a chip array */
for (i = 1; i < maxchips; i++) {
- chip->select_chip(mtd, i);
/* See comment in nand_get_flash_type for reset */
- nand_reset(chip);
+ nand_reset(chip, i);
+
+ chip->select_chip(mtd, i);
/* Send the command for reading device ID */
chip->cmdfunc(mtd, NAND_CMD_READID, 0x00, -1);
/* Read manufacturer and device IDs */
diff --git a/drivers/net/can/sja1000/plx_pci.c b/drivers/net/can/sja1000/plx_pci.c
index 3eb7430dffbf..f8ff25c8ee2e 100644
--- a/drivers/net/can/sja1000/plx_pci.c
+++ b/drivers/net/can/sja1000/plx_pci.c
@@ -142,6 +142,9 @@ struct plx_pci_card {
#define CTI_PCI_VENDOR_ID 0x12c4
#define CTI_PCI_DEVICE_ID_CRG001 0x0900
+#define MOXA_PCI_VENDOR_ID 0x1393
+#define MOXA_PCI_DEVICE_ID 0x0100
+
static void plx_pci_reset_common(struct pci_dev *pdev);
static void plx9056_pci_reset_common(struct pci_dev *pdev);
static void plx_pci_reset_marathon_pci(struct pci_dev *pdev);
@@ -258,6 +261,14 @@ static struct plx_pci_card_info plx_pci_card_info_elcus = {
/* based on PLX9030 */
};
+static struct plx_pci_card_info plx_pci_card_info_moxa = {
+ "MOXA", 2,
+ PLX_PCI_CAN_CLOCK, PLX_PCI_OCR, PLX_PCI_CDR,
+ {0, 0x00, 0x00}, { {0, 0x00, 0x80}, {1, 0x00, 0x80} },
+ &plx_pci_reset_common
+ /* based on PLX9052 */
+};
+
static const struct pci_device_id plx_pci_tbl[] = {
{
/* Adlink PCI-7841/cPCI-7841 */
@@ -357,6 +368,13 @@ static const struct pci_device_id plx_pci_tbl[] = {
0, 0,
(kernel_ulong_t)&plx_pci_card_info_elcus
},
+ {
+ /* moxa */
+ MOXA_PCI_VENDOR_ID, MOXA_PCI_DEVICE_ID,
+ PCI_ANY_ID, PCI_ANY_ID,
+ 0, 0,
+ (kernel_ulong_t)&plx_pci_card_info_moxa
+ },
{ 0,}
};
MODULE_DEVICE_TABLE(pci, plx_pci_tbl);
diff --git a/drivers/net/can/usb/peak_usb/pcan_ucan.h b/drivers/net/can/usb/peak_usb/pcan_ucan.h
index e8fc4952c6b0..2147678f0225 100644
--- a/drivers/net/can/usb/peak_usb/pcan_ucan.h
+++ b/drivers/net/can/usb/peak_usb/pcan_ucan.h
@@ -43,11 +43,22 @@ struct __packed pucan_command {
u16 args[3];
};
+#define PUCAN_TSLOW_BRP_BITS 10
+#define PUCAN_TSLOW_TSGEG1_BITS 8
+#define PUCAN_TSLOW_TSGEG2_BITS 7
+#define PUCAN_TSLOW_SJW_BITS 7
+
+#define PUCAN_TSLOW_BRP_MASK ((1 << PUCAN_TSLOW_BRP_BITS) - 1)
+#define PUCAN_TSLOW_TSEG1_MASK ((1 << PUCAN_TSLOW_TSGEG1_BITS) - 1)
+#define PUCAN_TSLOW_TSEG2_MASK ((1 << PUCAN_TSLOW_TSGEG2_BITS) - 1)
+#define PUCAN_TSLOW_SJW_MASK ((1 << PUCAN_TSLOW_SJW_BITS) - 1)
+
/* uCAN TIMING_SLOW command fields */
-#define PUCAN_TSLOW_SJW_T(s, t) (((s) & 0xf) | ((!!(t)) << 7))
-#define PUCAN_TSLOW_TSEG2(t) ((t) & 0xf)
-#define PUCAN_TSLOW_TSEG1(t) ((t) & 0x3f)
-#define PUCAN_TSLOW_BRP(b) ((b) & 0x3ff)
+#define PUCAN_TSLOW_SJW_T(s, t) (((s) & PUCAN_TSLOW_SJW_MASK) | \
+ ((!!(t)) << 7))
+#define PUCAN_TSLOW_TSEG2(t) ((t) & PUCAN_TSLOW_TSEG2_MASK)
+#define PUCAN_TSLOW_TSEG1(t) ((t) & PUCAN_TSLOW_TSEG1_MASK)
+#define PUCAN_TSLOW_BRP(b) ((b) & PUCAN_TSLOW_BRP_MASK)
struct __packed pucan_timing_slow {
__le16 opcode_channel;
@@ -60,11 +71,21 @@ struct __packed pucan_timing_slow {
__le16 brp; /* BaudRate Prescaler */
};
+#define PUCAN_TFAST_BRP_BITS 10
+#define PUCAN_TFAST_TSGEG1_BITS 5
+#define PUCAN_TFAST_TSGEG2_BITS 4
+#define PUCAN_TFAST_SJW_BITS 4
+
+#define PUCAN_TFAST_BRP_MASK ((1 << PUCAN_TFAST_BRP_BITS) - 1)
+#define PUCAN_TFAST_TSEG1_MASK ((1 << PUCAN_TFAST_TSGEG1_BITS) - 1)
+#define PUCAN_TFAST_TSEG2_MASK ((1 << PUCAN_TFAST_TSGEG2_BITS) - 1)
+#define PUCAN_TFAST_SJW_MASK ((1 << PUCAN_TFAST_SJW_BITS) - 1)
+
/* uCAN TIMING_FAST command fields */
-#define PUCAN_TFAST_SJW(s) ((s) & 0x3)
-#define PUCAN_TFAST_TSEG2(t) ((t) & 0x7)
-#define PUCAN_TFAST_TSEG1(t) ((t) & 0xf)
-#define PUCAN_TFAST_BRP(b) ((b) & 0x3ff)
+#define PUCAN_TFAST_SJW(s) ((s) & PUCAN_TFAST_SJW_MASK)
+#define PUCAN_TFAST_TSEG2(t) ((t) & PUCAN_TFAST_TSEG2_MASK)
+#define PUCAN_TFAST_TSEG1(t) ((t) & PUCAN_TFAST_TSEG1_MASK)
+#define PUCAN_TFAST_BRP(b) ((b) & PUCAN_TFAST_BRP_MASK)
struct __packed pucan_timing_fast {
__le16 opcode_channel;
diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_core.c b/drivers/net/can/usb/peak_usb/pcan_usb_core.c
index c06382cdfdfe..f3141ca56bc3 100644
--- a/drivers/net/can/usb/peak_usb/pcan_usb_core.c
+++ b/drivers/net/can/usb/peak_usb/pcan_usb_core.c
@@ -39,6 +39,7 @@ static struct usb_device_id peak_usb_table[] = {
{USB_DEVICE(PCAN_USB_VENDOR_ID, PCAN_USBPRO_PRODUCT_ID)},
{USB_DEVICE(PCAN_USB_VENDOR_ID, PCAN_USBFD_PRODUCT_ID)},
{USB_DEVICE(PCAN_USB_VENDOR_ID, PCAN_USBPROFD_PRODUCT_ID)},
+ {USB_DEVICE(PCAN_USB_VENDOR_ID, PCAN_USBX6_PRODUCT_ID)},
{} /* Terminating entry */
};
@@ -50,6 +51,7 @@ static const struct peak_usb_adapter *const peak_usb_adapters_list[] = {
&pcan_usb_pro,
&pcan_usb_fd,
&pcan_usb_pro_fd,
+ &pcan_usb_x6,
};
/*
diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_core.h b/drivers/net/can/usb/peak_usb/pcan_usb_core.h
index 506fe506c9d3..3cbfb069893d 100644
--- a/drivers/net/can/usb/peak_usb/pcan_usb_core.h
+++ b/drivers/net/can/usb/peak_usb/pcan_usb_core.h
@@ -27,6 +27,7 @@
#define PCAN_USBPRO_PRODUCT_ID 0x000d
#define PCAN_USBPROFD_PRODUCT_ID 0x0011
#define PCAN_USBFD_PRODUCT_ID 0x0012
+#define PCAN_USBX6_PRODUCT_ID 0x0014
#define PCAN_USB_DRIVER_NAME "peak_usb"
@@ -90,6 +91,7 @@ extern const struct peak_usb_adapter pcan_usb;
extern const struct peak_usb_adapter pcan_usb_pro;
extern const struct peak_usb_adapter pcan_usb_fd;
extern const struct peak_usb_adapter pcan_usb_pro_fd;
+extern const struct peak_usb_adapter pcan_usb_x6;
struct peak_time_ref {
struct timeval tv_host_0, tv_host;
diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_fd.c b/drivers/net/can/usb/peak_usb/pcan_usb_fd.c
index ce44a033f63b..304732550f0a 100644
--- a/drivers/net/can/usb/peak_usb/pcan_usb_fd.c
+++ b/drivers/net/can/usb/peak_usb/pcan_usb_fd.c
@@ -993,24 +993,24 @@ static void pcan_usb_fd_free(struct peak_usb_device *dev)
static const struct can_bittiming_const pcan_usb_fd_const = {
.name = "pcan_usb_fd",
.tseg1_min = 1,
- .tseg1_max = 64,
+ .tseg1_max = (1 << PUCAN_TSLOW_TSGEG1_BITS),
.tseg2_min = 1,
- .tseg2_max = 16,
- .sjw_max = 16,
+ .tseg2_max = (1 << PUCAN_TSLOW_TSGEG2_BITS),
+ .sjw_max = (1 << PUCAN_TSLOW_SJW_BITS),
.brp_min = 1,
- .brp_max = 1024,
+ .brp_max = (1 << PUCAN_TSLOW_BRP_BITS),
.brp_inc = 1,
};
static const struct can_bittiming_const pcan_usb_fd_data_const = {
.name = "pcan_usb_fd",
.tseg1_min = 1,
- .tseg1_max = 16,
+ .tseg1_max = (1 << PUCAN_TFAST_TSGEG1_BITS),
.tseg2_min = 1,
- .tseg2_max = 8,
- .sjw_max = 4,
+ .tseg2_max = (1 << PUCAN_TFAST_TSGEG2_BITS),
+ .sjw_max = (1 << PUCAN_TFAST_SJW_BITS),
.brp_min = 1,
- .brp_max = 1024,
+ .brp_max = (1 << PUCAN_TFAST_BRP_BITS),
.brp_inc = 1,
};
@@ -1065,24 +1065,24 @@ const struct peak_usb_adapter pcan_usb_fd = {
static const struct can_bittiming_const pcan_usb_pro_fd_const = {
.name = "pcan_usb_pro_fd",
.tseg1_min = 1,
- .tseg1_max = 64,
+ .tseg1_max = (1 << PUCAN_TSLOW_TSGEG1_BITS),
.tseg2_min = 1,
- .tseg2_max = 16,
- .sjw_max = 16,
+ .tseg2_max = (1 << PUCAN_TSLOW_TSGEG2_BITS),
+ .sjw_max = (1 << PUCAN_TSLOW_SJW_BITS),
.brp_min = 1,
- .brp_max = 1024,
+ .brp_max = (1 << PUCAN_TSLOW_BRP_BITS),
.brp_inc = 1,
};
static const struct can_bittiming_const pcan_usb_pro_fd_data_const = {
.name = "pcan_usb_pro_fd",
.tseg1_min = 1,
- .tseg1_max = 16,
+ .tseg1_max = (1 << PUCAN_TFAST_TSGEG1_BITS),
.tseg2_min = 1,
- .tseg2_max = 8,
- .sjw_max = 4,
+ .tseg2_max = (1 << PUCAN_TFAST_TSGEG2_BITS),
+ .sjw_max = (1 << PUCAN_TFAST_SJW_BITS),
.brp_min = 1,
- .brp_max = 1024,
+ .brp_max = (1 << PUCAN_TFAST_BRP_BITS),
.brp_inc = 1,
};
@@ -1132,3 +1132,75 @@ const struct peak_usb_adapter pcan_usb_pro_fd = {
.do_get_berr_counter = pcan_usb_fd_get_berr_counter,
};
+
+/* describes the PCAN-USB X6 adapter */
+static const struct can_bittiming_const pcan_usb_x6_const = {
+ .name = "pcan_usb_x6",
+ .tseg1_min = 1,
+ .tseg1_max = (1 << PUCAN_TSLOW_TSGEG1_BITS),
+ .tseg2_min = 1,
+ .tseg2_max = (1 << PUCAN_TSLOW_TSGEG2_BITS),
+ .sjw_max = (1 << PUCAN_TSLOW_SJW_BITS),
+ .brp_min = 1,
+ .brp_max = (1 << PUCAN_TSLOW_BRP_BITS),
+ .brp_inc = 1,
+};
+
+static const struct can_bittiming_const pcan_usb_x6_data_const = {
+ .name = "pcan_usb_x6",
+ .tseg1_min = 1,
+ .tseg1_max = (1 << PUCAN_TFAST_TSGEG1_BITS),
+ .tseg2_min = 1,
+ .tseg2_max = (1 << PUCAN_TFAST_TSGEG2_BITS),
+ .sjw_max = (1 << PUCAN_TFAST_SJW_BITS),
+ .brp_min = 1,
+ .brp_max = (1 << PUCAN_TFAST_BRP_BITS),
+ .brp_inc = 1,
+};
+
+const struct peak_usb_adapter pcan_usb_x6 = {
+ .name = "PCAN-USB X6",
+ .device_id = PCAN_USBX6_PRODUCT_ID,
+ .ctrl_count = PCAN_USBPROFD_CHANNEL_COUNT,
+ .ctrlmode_supported = CAN_CTRLMODE_FD |
+ CAN_CTRLMODE_3_SAMPLES | CAN_CTRLMODE_LISTENONLY,
+ .clock = {
+ .freq = PCAN_UFD_CRYSTAL_HZ,
+ },
+ .bittiming_const = &pcan_usb_x6_const,
+ .data_bittiming_const = &pcan_usb_x6_data_const,
+
+ /* size of device private data */
+ .sizeof_dev_private = sizeof(struct pcan_usb_fd_device),
+
+ /* timestamps usage */
+ .ts_used_bits = 32,
+ .ts_period = 1000000, /* calibration period in ts. */
+ .us_per_ts_scale = 1, /* us = (ts * scale) >> shift */
+ .us_per_ts_shift = 0,
+
+ /* give here messages in/out endpoints */
+ .ep_msg_in = PCAN_USBPRO_EP_MSGIN,
+ .ep_msg_out = {PCAN_USBPRO_EP_MSGOUT_0, PCAN_USBPRO_EP_MSGOUT_1},
+
+ /* size of rx/tx usb buffers */
+ .rx_buffer_size = PCAN_UFD_RX_BUFFER_SIZE,
+ .tx_buffer_size = PCAN_UFD_TX_BUFFER_SIZE,
+
+ /* device callbacks */
+ .intf_probe = pcan_usb_pro_probe, /* same as PCAN-USB Pro */
+ .dev_init = pcan_usb_fd_init,
+
+ .dev_exit = pcan_usb_fd_exit,
+ .dev_free = pcan_usb_fd_free,
+ .dev_set_bus = pcan_usb_fd_set_bus,
+ .dev_set_bittiming = pcan_usb_fd_set_bittiming_slow,
+ .dev_set_data_bittiming = pcan_usb_fd_set_bittiming_fast,
+ .dev_decode_buf = pcan_usb_fd_decode_buf,
+ .dev_start = pcan_usb_fd_start,
+ .dev_stop = pcan_usb_fd_stop,
+ .dev_restart_async = pcan_usb_fd_restart_async,
+ .dev_encode_msg = pcan_usb_fd_encode_msg,
+
+ .do_get_berr_counter = pcan_usb_fd_get_berr_counter,
+};
diff --git a/drivers/net/dsa/b53/b53_common.c b/drivers/net/dsa/b53/b53_common.c
index 7717b19dc806..947adda3397d 100644
--- a/drivers/net/dsa/b53/b53_common.c
+++ b/drivers/net/dsa/b53/b53_common.c
@@ -962,9 +962,10 @@ static void b53_vlan_add(struct dsa_switch *ds, int port,
vl->members |= BIT(port) | BIT(cpu_port);
if (untagged)
- vl->untag |= BIT(port) | BIT(cpu_port);
+ vl->untag |= BIT(port);
else
- vl->untag &= ~(BIT(port) | BIT(cpu_port));
+ vl->untag &= ~BIT(port);
+ vl->untag &= ~BIT(cpu_port);
b53_set_vlan_entry(dev, vid, vl);
b53_fast_age_vlan(dev, vid);
@@ -973,8 +974,6 @@ static void b53_vlan_add(struct dsa_switch *ds, int port,
if (pvid) {
b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_PORT_DEF_TAG(port),
vlan->vid_end);
- b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_PORT_DEF_TAG(cpu_port),
- vlan->vid_end);
b53_fast_age_vlan(dev, vid);
}
}
@@ -984,7 +983,6 @@ static int b53_vlan_del(struct dsa_switch *ds, int port,
{
struct b53_device *dev = ds->priv;
bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
- unsigned int cpu_port = dev->cpu_port;
struct b53_vlan *vl;
u16 vid;
u16 pvid;
@@ -997,8 +995,6 @@ static int b53_vlan_del(struct dsa_switch *ds, int port,
b53_get_vlan_entry(dev, vid, vl);
vl->members &= ~BIT(port);
- if ((vl->members & BIT(cpu_port)) == BIT(cpu_port))
- vl->members = 0;
if (pvid == vid) {
if (is5325(dev) || is5365(dev))
@@ -1007,18 +1003,14 @@ static int b53_vlan_del(struct dsa_switch *ds, int port,
pvid = 0;
}
- if (untagged) {
+ if (untagged)
vl->untag &= ~(BIT(port));
- if ((vl->untag & BIT(cpu_port)) == BIT(cpu_port))
- vl->untag = 0;
- }
b53_set_vlan_entry(dev, vid, vl);
b53_fast_age_vlan(dev, vid);
}
b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_PORT_DEF_TAG(port), pvid);
- b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_PORT_DEF_TAG(cpu_port), pvid);
b53_fast_age_vlan(dev, pvid);
return 0;
diff --git a/drivers/net/dsa/b53/b53_mmap.c b/drivers/net/dsa/b53/b53_mmap.c
index 76fb8552c9d9..ef63d24fef81 100644
--- a/drivers/net/dsa/b53/b53_mmap.c
+++ b/drivers/net/dsa/b53/b53_mmap.c
@@ -256,6 +256,7 @@ static const struct of_device_id b53_mmap_of_table[] = {
{ .compatible = "brcm,bcm63xx-switch" },
{ /* sentinel */ },
};
+MODULE_DEVICE_TABLE(of, b53_mmap_of_table);
static struct platform_driver b53_mmap_driver = {
.probe = b53_mmap_probe,
diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c
index e218887f18b7..9ec33b51a0ed 100644
--- a/drivers/net/dsa/bcm_sf2.c
+++ b/drivers/net/dsa/bcm_sf2.c
@@ -588,6 +588,7 @@ static void bcm_sf2_sw_adjust_link(struct dsa_switch *ds, int port,
struct phy_device *phydev)
{
struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
+ struct ethtool_eee *p = &priv->port_sts[port].eee;
u32 id_mode_dis = 0, port_mode;
const char *str = NULL;
u32 reg;
@@ -662,6 +663,9 @@ force_link:
reg |= DUPLX_MODE;
core_writel(priv, reg, CORE_STS_OVERRIDE_GMIIP_PORT(port));
+
+ if (!phydev->is_pseudo_fixed_link)
+ p->eee_enabled = bcm_sf2_eee_init(ds, port, phydev);
}
static void bcm_sf2_sw_fixed_link_update(struct dsa_switch *ds, int port,
@@ -1133,6 +1137,20 @@ static int bcm_sf2_sw_remove(struct platform_device *pdev)
return 0;
}
+static void bcm_sf2_sw_shutdown(struct platform_device *pdev)
+{
+ struct bcm_sf2_priv *priv = platform_get_drvdata(pdev);
+
+ /* For a kernel about to be kexec'd we want to keep the GPHY on for a
+ * successful MDIO bus scan to occur. If we did turn off the GPHY
+ * before (e.g: port_disable), this will also power it back on.
+ *
+ * Do not rely on kexec_in_progress, just power the PHY on.
+ */
+ if (priv->hw_params.num_gphy == 1)
+ bcm_sf2_gphy_enable_set(priv->dev->ds, true);
+}
+
#ifdef CONFIG_PM_SLEEP
static int bcm_sf2_suspend(struct device *dev)
{
@@ -1158,10 +1176,12 @@ static const struct of_device_id bcm_sf2_of_match[] = {
{ .compatible = "brcm,bcm7445-switch-v4.0" },
{ /* sentinel */ },
};
+MODULE_DEVICE_TABLE(of, bcm_sf2_of_match);
static struct platform_driver bcm_sf2_driver = {
.probe = bcm_sf2_sw_probe,
.remove = bcm_sf2_sw_remove,
+ .shutdown = bcm_sf2_sw_shutdown,
.driver = {
.name = "brcm-sf2",
.of_match_table = bcm_sf2_of_match,
diff --git a/drivers/net/ethernet/altera/altera_tse_main.c b/drivers/net/ethernet/altera/altera_tse_main.c
index bda31f308cc2..a0eee7218695 100644
--- a/drivers/net/ethernet/altera/altera_tse_main.c
+++ b/drivers/net/ethernet/altera/altera_tse_main.c
@@ -400,12 +400,6 @@ static int tse_rx(struct altera_tse_private *priv, int limit)
skb_put(skb, pktlength);
- /* make cache consistent with receive packet buffer */
- dma_sync_single_for_cpu(priv->device,
- priv->rx_ring[entry].dma_addr,
- priv->rx_ring[entry].len,
- DMA_FROM_DEVICE);
-
dma_unmap_single(priv->device, priv->rx_ring[entry].dma_addr,
priv->rx_ring[entry].len, DMA_FROM_DEVICE);
@@ -469,7 +463,6 @@ static int tse_tx_complete(struct altera_tse_private *priv)
if (unlikely(netif_queue_stopped(priv->dev) &&
tse_tx_avail(priv) > TSE_TX_THRESH(priv))) {
- netif_tx_lock(priv->dev);
if (netif_queue_stopped(priv->dev) &&
tse_tx_avail(priv) > TSE_TX_THRESH(priv)) {
if (netif_msg_tx_done(priv))
@@ -477,7 +470,6 @@ static int tse_tx_complete(struct altera_tse_private *priv)
__func__);
netif_wake_queue(priv->dev);
}
- netif_tx_unlock(priv->dev);
}
spin_unlock(&priv->tx_lock);
@@ -592,10 +584,6 @@ static int tse_start_xmit(struct sk_buff *skb, struct net_device *dev)
buffer->dma_addr = dma_addr;
buffer->len = nopaged_len;
- /* Push data out of the cache hierarchy into main memory */
- dma_sync_single_for_device(priv->device, buffer->dma_addr,
- buffer->len, DMA_TO_DEVICE);
-
priv->dmaops->tx_buffer(priv, buffer);
skb_tx_timestamp(skb);
@@ -819,6 +807,8 @@ static int init_phy(struct net_device *dev)
if (!phydev) {
netdev_err(dev, "Could not find the PHY\n");
+ if (fixed_link)
+ of_phy_deregister_fixed_link(priv->device->of_node);
return -ENODEV;
}
@@ -1545,10 +1535,15 @@ err_free_netdev:
static int altera_tse_remove(struct platform_device *pdev)
{
struct net_device *ndev = platform_get_drvdata(pdev);
+ struct altera_tse_private *priv = netdev_priv(ndev);
- if (ndev->phydev)
+ if (ndev->phydev) {
phy_disconnect(ndev->phydev);
+ if (of_phy_is_fixed_link(priv->device->of_node))
+ of_phy_deregister_fixed_link(priv->device->of_node);
+ }
+
platform_set_drvdata(pdev, NULL);
altera_tse_mdio_destroy(ndev);
unregister_netdev(ndev);
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-main.c b/drivers/net/ethernet/amd/xgbe/xgbe-main.c
index 9de078819aa6..4f7635178200 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-main.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-main.c
@@ -829,7 +829,7 @@ static int xgbe_remove(struct platform_device *pdev)
return 0;
}
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
static int xgbe_suspend(struct device *dev)
{
struct net_device *netdev = dev_get_drvdata(dev);
@@ -874,7 +874,7 @@ static int xgbe_resume(struct device *dev)
return ret;
}
-#endif /* CONFIG_PM */
+#endif /* CONFIG_PM_SLEEP */
#ifdef CONFIG_ACPI
static const struct acpi_device_id xgbe_acpi_match[] = {
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c
index c481f104a8fe..5390ae89136c 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c
@@ -204,17 +204,6 @@ static u32 xgene_enet_ring_len(struct xgene_enet_desc_ring *ring)
return num_msgs;
}
-static void xgene_enet_setup_coalescing(struct xgene_enet_desc_ring *ring)
-{
- u32 data = 0x7777;
-
- xgene_enet_ring_wr32(ring, CSR_PBM_COAL, 0x8e);
- xgene_enet_ring_wr32(ring, CSR_PBM_CTICK1, data);
- xgene_enet_ring_wr32(ring, CSR_PBM_CTICK2, data << 16);
- xgene_enet_ring_wr32(ring, CSR_THRESHOLD0_SET1, 0x40);
- xgene_enet_ring_wr32(ring, CSR_THRESHOLD1_SET1, 0x80);
-}
-
void xgene_enet_parse_error(struct xgene_enet_desc_ring *ring,
struct xgene_enet_pdata *pdata,
enum xgene_enet_err_code status)
@@ -929,5 +918,4 @@ struct xgene_ring_ops xgene_ring1_ops = {
.clear = xgene_enet_clear_ring,
.wr_cmd = xgene_enet_wr_cmd,
.len = xgene_enet_ring_len,
- .coalesce = xgene_enet_setup_coalescing,
};
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.h b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.h
index 8456337a237d..06e598c8bc16 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.h
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.h
@@ -55,8 +55,10 @@ enum xgene_enet_rm {
#define PREFETCH_BUF_EN BIT(21)
#define CSR_RING_ID_BUF 0x000c
#define CSR_PBM_COAL 0x0014
+#define CSR_PBM_CTICK0 0x0018
#define CSR_PBM_CTICK1 0x001c
#define CSR_PBM_CTICK2 0x0020
+#define CSR_PBM_CTICK3 0x0024
#define CSR_THRESHOLD0_SET1 0x0030
#define CSR_THRESHOLD1_SET1 0x0034
#define CSR_RING_NE_INT_MODE 0x017c
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
index 429f18fc5503..8158d4698734 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
@@ -1188,7 +1188,8 @@ static int xgene_enet_create_desc_rings(struct net_device *ndev)
tx_ring->dst_ring_num = xgene_enet_dst_ring_num(cp_ring);
}
- pdata->ring_ops->coalesce(pdata->tx_ring[0]);
+ if (pdata->ring_ops->coalesce)
+ pdata->ring_ops->coalesce(pdata->tx_ring[0]);
pdata->tx_qcnt_hi = pdata->tx_ring[0]->slots - 128;
return 0;
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_ring2.c b/drivers/net/ethernet/apm/xgene/xgene_enet_ring2.c
index 2b76732add5d..af51dd5844ce 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_ring2.c
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_ring2.c
@@ -30,7 +30,7 @@ static void xgene_enet_ring_init(struct xgene_enet_desc_ring *ring)
ring_cfg[0] |= SET_VAL(X2_INTLINE, ring->id & RING_BUFNUM_MASK);
ring_cfg[3] |= SET_BIT(X2_DEQINTEN);
}
- ring_cfg[0] |= SET_VAL(X2_CFGCRID, 1);
+ ring_cfg[0] |= SET_VAL(X2_CFGCRID, 2);
addr >>= 8;
ring_cfg[2] |= QCOHERENT | SET_VAL(RINGADDRL, addr);
@@ -192,13 +192,15 @@ static u32 xgene_enet_ring_len(struct xgene_enet_desc_ring *ring)
static void xgene_enet_setup_coalescing(struct xgene_enet_desc_ring *ring)
{
- u32 data = 0x7777;
+ u32 data = 0x77777777;
xgene_enet_ring_wr32(ring, CSR_PBM_COAL, 0x8e);
+ xgene_enet_ring_wr32(ring, CSR_PBM_CTICK0, data);
xgene_enet_ring_wr32(ring, CSR_PBM_CTICK1, data);
- xgene_enet_ring_wr32(ring, CSR_PBM_CTICK2, data << 16);
- xgene_enet_ring_wr32(ring, CSR_THRESHOLD0_SET1, 0x40);
- xgene_enet_ring_wr32(ring, CSR_THRESHOLD1_SET1, 0x80);
+ xgene_enet_ring_wr32(ring, CSR_PBM_CTICK2, data);
+ xgene_enet_ring_wr32(ring, CSR_PBM_CTICK3, data);
+ xgene_enet_ring_wr32(ring, CSR_THRESHOLD0_SET1, 0x08);
+ xgene_enet_ring_wr32(ring, CSR_THRESHOLD1_SET1, 0x10);
}
struct xgene_ring_ops xgene_ring2_ops = {
diff --git a/drivers/net/ethernet/arc/emac_main.c b/drivers/net/ethernet/arc/emac_main.c
index b0da9693f28a..be865b4dada2 100644
--- a/drivers/net/ethernet/arc/emac_main.c
+++ b/drivers/net/ethernet/arc/emac_main.c
@@ -460,7 +460,7 @@ static void arc_emac_set_rx_mode(struct net_device *ndev)
if (ndev->flags & IFF_ALLMULTI) {
arc_reg_set(priv, R_LAFL, ~0);
arc_reg_set(priv, R_LAFH, ~0);
- } else {
+ } else if (ndev->flags & IFF_MULTICAST) {
struct netdev_hw_addr *ha;
unsigned int filter[2] = { 0, 0 };
int bit;
@@ -472,6 +472,9 @@ static void arc_emac_set_rx_mode(struct net_device *ndev)
arc_reg_set(priv, R_LAFL, filter[0]);
arc_reg_set(priv, R_LAFH, filter[1]);
+ } else {
+ arc_reg_set(priv, R_LAFL, 0);
+ arc_reg_set(priv, R_LAFH, 0);
}
}
}
@@ -764,8 +767,6 @@ int arc_emac_probe(struct net_device *ndev, int interface)
ndev->netdev_ops = &arc_emac_netdev_ops;
ndev->ethtool_ops = &arc_emac_ethtool_ops;
ndev->watchdog_timeo = TX_TIMEOUT;
- /* FIXME :: no multicast support yet */
- ndev->flags &= ~IFF_MULTICAST;
priv = netdev_priv(ndev);
priv->dev = dev;
diff --git a/drivers/net/ethernet/aurora/nb8800.c b/drivers/net/ethernet/aurora/nb8800.c
index b047fd607b83..e078d8da978c 100644
--- a/drivers/net/ethernet/aurora/nb8800.c
+++ b/drivers/net/ethernet/aurora/nb8800.c
@@ -1358,6 +1358,7 @@ static const struct of_device_id nb8800_dt_ids[] = {
},
{ }
};
+MODULE_DEVICE_TABLE(of, nb8800_dt_ids);
static int nb8800_probe(struct platform_device *pdev)
{
@@ -1465,12 +1466,12 @@ static int nb8800_probe(struct platform_device *pdev)
ret = nb8800_hw_init(dev);
if (ret)
- goto err_free_bus;
+ goto err_deregister_fixed_link;
if (ops && ops->init) {
ret = ops->init(dev);
if (ret)
- goto err_free_bus;
+ goto err_deregister_fixed_link;
}
dev->netdev_ops = &nb8800_netdev_ops;
@@ -1503,6 +1504,9 @@ static int nb8800_probe(struct platform_device *pdev)
err_free_dma:
nb8800_dma_free(dev);
+err_deregister_fixed_link:
+ if (of_phy_is_fixed_link(pdev->dev.of_node))
+ of_phy_deregister_fixed_link(pdev->dev.of_node);
err_free_bus:
of_node_put(priv->phy_node);
mdiobus_unregister(bus);
@@ -1520,6 +1524,8 @@ static int nb8800_remove(struct platform_device *pdev)
struct nb8800_priv *priv = netdev_priv(ndev);
unregister_netdev(ndev);
+ if (of_phy_is_fixed_link(pdev->dev.of_node))
+ of_phy_deregister_fixed_link(pdev->dev.of_node);
of_node_put(priv->phy_node);
mdiobus_unregister(priv->mii_bus);
diff --git a/drivers/net/ethernet/broadcom/bcm63xx_enet.c b/drivers/net/ethernet/broadcom/bcm63xx_enet.c
index ae364c74baf3..537090952c45 100644
--- a/drivers/net/ethernet/broadcom/bcm63xx_enet.c
+++ b/drivers/net/ethernet/broadcom/bcm63xx_enet.c
@@ -1126,7 +1126,8 @@ out_freeirq:
free_irq(dev->irq, dev);
out_phy_disconnect:
- phy_disconnect(phydev);
+ if (priv->has_phy)
+ phy_disconnect(phydev);
return ret;
}
diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c
index c3354b9941d1..25d1eb4933d0 100644
--- a/drivers/net/ethernet/broadcom/bcmsysport.c
+++ b/drivers/net/ethernet/broadcom/bcmsysport.c
@@ -1755,13 +1755,13 @@ static int bcm_sysport_probe(struct platform_device *pdev)
if (priv->irq0 <= 0 || priv->irq1 <= 0) {
dev_err(&pdev->dev, "invalid interrupts\n");
ret = -EINVAL;
- goto err;
+ goto err_free_netdev;
}
priv->base = devm_ioremap_resource(&pdev->dev, r);
if (IS_ERR(priv->base)) {
ret = PTR_ERR(priv->base);
- goto err;
+ goto err_free_netdev;
}
priv->netdev = dev;
@@ -1779,7 +1779,7 @@ static int bcm_sysport_probe(struct platform_device *pdev)
ret = of_phy_register_fixed_link(dn);
if (ret) {
dev_err(&pdev->dev, "failed to register fixed PHY\n");
- goto err;
+ goto err_free_netdev;
}
priv->phy_dn = dn;
@@ -1821,7 +1821,7 @@ static int bcm_sysport_probe(struct platform_device *pdev)
ret = register_netdev(dev);
if (ret) {
dev_err(&pdev->dev, "failed to register net_device\n");
- goto err;
+ goto err_deregister_fixed_link;
}
priv->rev = topctrl_readl(priv, REV_CNTL) & REV_MASK;
@@ -1832,7 +1832,11 @@ static int bcm_sysport_probe(struct platform_device *pdev)
priv->base, priv->irq0, priv->irq1, txq, rxq);
return 0;
-err:
+
+err_deregister_fixed_link:
+ if (of_phy_is_fixed_link(dn))
+ of_phy_deregister_fixed_link(dn);
+err_free_netdev:
free_netdev(dev);
return ret;
}
@@ -1840,11 +1844,14 @@ err:
static int bcm_sysport_remove(struct platform_device *pdev)
{
struct net_device *dev = dev_get_drvdata(&pdev->dev);
+ struct device_node *dn = pdev->dev.of_node;
/* Not much to do, ndo_close has been called
* and we use managed allocations
*/
unregister_netdev(dev);
+ if (of_phy_is_fixed_link(dn))
+ of_phy_deregister_fixed_link(dn);
free_netdev(dev);
dev_set_drvdata(&pdev->dev, NULL);
diff --git a/drivers/net/ethernet/broadcom/bgmac.c b/drivers/net/ethernet/broadcom/bgmac.c
index 856379cbb402..49f4cafe5438 100644
--- a/drivers/net/ethernet/broadcom/bgmac.c
+++ b/drivers/net/ethernet/broadcom/bgmac.c
@@ -307,6 +307,10 @@ static void bgmac_dma_rx_enable(struct bgmac *bgmac,
u32 ctl;
ctl = bgmac_read(bgmac, ring->mmio_base + BGMAC_DMA_RX_CTL);
+
+ /* preserve ONLY bits 16-17 from current hardware value */
+ ctl &= BGMAC_DMA_RX_ADDREXT_MASK;
+
if (bgmac->feature_flags & BGMAC_FEAT_RX_MASK_SETUP) {
ctl &= ~BGMAC_DMA_RX_BL_MASK;
ctl |= BGMAC_DMA_RX_BL_128 << BGMAC_DMA_RX_BL_SHIFT;
@@ -317,7 +321,6 @@ static void bgmac_dma_rx_enable(struct bgmac *bgmac,
ctl &= ~BGMAC_DMA_RX_PT_MASK;
ctl |= BGMAC_DMA_RX_PT_1 << BGMAC_DMA_RX_PT_SHIFT;
}
- ctl &= BGMAC_DMA_RX_ADDREXT_MASK;
ctl |= BGMAC_DMA_RX_ENABLE;
ctl |= BGMAC_DMA_RX_PARITY_DISABLE;
ctl |= BGMAC_DMA_RX_OVERFLOW_CONT;
@@ -1046,9 +1049,9 @@ static void bgmac_enable(struct bgmac *bgmac)
mode = (bgmac_read(bgmac, BGMAC_DEV_STATUS) & BGMAC_DS_MM_MASK) >>
BGMAC_DS_MM_SHIFT;
- if (!(bgmac->feature_flags & BGMAC_FEAT_CLKCTLST) || mode != 0)
+ if (bgmac->feature_flags & BGMAC_FEAT_CLKCTLST || mode != 0)
bgmac_set(bgmac, BCMA_CLKCTLST, BCMA_CLKCTLST_FORCEHT);
- if (bgmac->feature_flags & BGMAC_FEAT_CLKCTLST && mode == 2)
+ if (!(bgmac->feature_flags & BGMAC_FEAT_CLKCTLST) && mode == 2)
bgmac_cco_ctl_maskset(bgmac, 1, ~0,
BGMAC_CHIPCTL_1_RXC_DLL_BYPASS);
@@ -1449,7 +1452,7 @@ static int bgmac_phy_connect(struct bgmac *bgmac)
phy_dev = phy_connect(bgmac->net_dev, bus_id, &bgmac_adjust_link,
PHY_INTERFACE_MODE_MII);
if (IS_ERR(phy_dev)) {
- dev_err(bgmac->dev, "PHY connecton failed\n");
+ dev_err(bgmac->dev, "PHY connection failed\n");
return PTR_ERR(phy_dev);
}
diff --git a/drivers/net/ethernet/broadcom/bnx2.c b/drivers/net/ethernet/broadcom/bnx2.c
index 27f11a5d5fe2..1f7034d739b0 100644
--- a/drivers/net/ethernet/broadcom/bnx2.c
+++ b/drivers/net/ethernet/broadcom/bnx2.c
@@ -49,6 +49,7 @@
#include <linux/firmware.h>
#include <linux/log2.h>
#include <linux/aer.h>
+#include <linux/crash_dump.h>
#if IS_ENABLED(CONFIG_CNIC)
#define BCM_CNIC 1
@@ -271,22 +272,25 @@ static inline u32 bnx2_tx_avail(struct bnx2 *bp, struct bnx2_tx_ring_info *txr)
static u32
bnx2_reg_rd_ind(struct bnx2 *bp, u32 offset)
{
+ unsigned long flags;
u32 val;
- spin_lock_bh(&bp->indirect_lock);
+ spin_lock_irqsave(&bp->indirect_lock, flags);
BNX2_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
val = BNX2_RD(bp, BNX2_PCICFG_REG_WINDOW);
- spin_unlock_bh(&bp->indirect_lock);
+ spin_unlock_irqrestore(&bp->indirect_lock, flags);
return val;
}
static void
bnx2_reg_wr_ind(struct bnx2 *bp, u32 offset, u32 val)
{
- spin_lock_bh(&bp->indirect_lock);
+ unsigned long flags;
+
+ spin_lock_irqsave(&bp->indirect_lock, flags);
BNX2_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
BNX2_WR(bp, BNX2_PCICFG_REG_WINDOW, val);
- spin_unlock_bh(&bp->indirect_lock);
+ spin_unlock_irqrestore(&bp->indirect_lock, flags);
}
static void
@@ -304,8 +308,10 @@ bnx2_shmem_rd(struct bnx2 *bp, u32 offset)
static void
bnx2_ctx_wr(struct bnx2 *bp, u32 cid_addr, u32 offset, u32 val)
{
+ unsigned long flags;
+
offset += cid_addr;
- spin_lock_bh(&bp->indirect_lock);
+ spin_lock_irqsave(&bp->indirect_lock, flags);
if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
int i;
@@ -322,7 +328,7 @@ bnx2_ctx_wr(struct bnx2 *bp, u32 cid_addr, u32 offset, u32 val)
BNX2_WR(bp, BNX2_CTX_DATA_ADR, offset);
BNX2_WR(bp, BNX2_CTX_DATA, val);
}
- spin_unlock_bh(&bp->indirect_lock);
+ spin_unlock_irqrestore(&bp->indirect_lock, flags);
}
#ifdef BCM_CNIC
@@ -4759,15 +4765,16 @@ bnx2_setup_msix_tbl(struct bnx2 *bp)
BNX2_WR(bp, BNX2_PCI_GRC_WINDOW3_ADDR, BNX2_MSIX_PBA_ADDR);
}
-static int
-bnx2_reset_chip(struct bnx2 *bp, u32 reset_code)
+static void
+bnx2_wait_dma_complete(struct bnx2 *bp)
{
u32 val;
- int i, rc = 0;
- u8 old_port;
+ int i;
- /* Wait for the current PCI transaction to complete before
- * issuing a reset. */
+ /*
+ * Wait for the current PCI transaction to complete before
+ * issuing a reset.
+ */
if ((BNX2_CHIP(bp) == BNX2_CHIP_5706) ||
(BNX2_CHIP(bp) == BNX2_CHIP_5708)) {
BNX2_WR(bp, BNX2_MISC_ENABLE_CLR_BITS,
@@ -4791,6 +4798,21 @@ bnx2_reset_chip(struct bnx2 *bp, u32 reset_code)
}
}
+ return;
+}
+
+
+static int
+bnx2_reset_chip(struct bnx2 *bp, u32 reset_code)
+{
+ u32 val;
+ int i, rc = 0;
+ u8 old_port;
+
+ /* Wait for the current PCI transaction to complete before
+ * issuing a reset. */
+ bnx2_wait_dma_complete(bp);
+
/* Wait for the firmware to tell us it is ok to issue a reset. */
bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT0 | reset_code, 1, 1);
@@ -6356,6 +6378,10 @@ bnx2_open(struct net_device *dev)
struct bnx2 *bp = netdev_priv(dev);
int rc;
+ rc = bnx2_request_firmware(bp);
+ if (rc < 0)
+ goto out;
+
netif_carrier_off(dev);
bnx2_disable_int(bp);
@@ -6424,6 +6450,7 @@ open_err:
bnx2_free_irq(bp);
bnx2_free_mem(bp);
bnx2_del_napi(bp);
+ bnx2_release_firmware(bp);
goto out;
}
@@ -8570,12 +8597,15 @@ bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
pci_set_drvdata(pdev, dev);
- rc = bnx2_request_firmware(bp);
- if (rc < 0)
- goto error;
-
+ /*
+ * In-flight DMA from 1st kernel could continue going in kdump kernel.
+ * New io-page table has been created before bnx2 does reset at open stage.
+ * We have to wait for the in-flight DMA to complete to avoid it look up
+ * into the newly created io-page table.
+ */
+ if (is_kdump_kernel())
+ bnx2_wait_dma_complete(bp);
- bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
memcpy(dev->dev_addr, bp->mac_addr, ETH_ALEN);
dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG |
@@ -8608,7 +8638,6 @@ bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
return 0;
error:
- bnx2_release_firmware(bp);
pci_iounmap(pdev, bp->regview);
pci_release_regions(pdev);
pci_disable_device(pdev);
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index 20fe6a8c35c1..0cee4c0283f9 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -15241,7 +15241,7 @@ static void bnx2x_init_cyclecounter(struct bnx2x *bp)
memset(&bp->cyclecounter, 0, sizeof(bp->cyclecounter));
bp->cyclecounter.read = bnx2x_cyclecounter_read;
bp->cyclecounter.mask = CYCLECOUNTER_MASK(64);
- bp->cyclecounter.shift = 1;
+ bp->cyclecounter.shift = 0;
bp->cyclecounter.mult = 1;
}
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index a9f9f3738022..ee1a803aa11a 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -1811,6 +1811,9 @@ static int bnxt_busy_poll(struct napi_struct *napi)
if (atomic_read(&bp->intr_sem) != 0)
return LL_FLUSH_FAILED;
+ if (!bp->link_info.link_up)
+ return LL_FLUSH_FAILED;
+
if (!bnxt_lock_poll(bnapi))
return LL_FLUSH_BUSY;
@@ -3210,11 +3213,17 @@ static int bnxt_hwrm_tunnel_dst_port_alloc(struct bnxt *bp, __be16 port,
goto err_out;
}
- if (tunnel_type & TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN)
+ switch (tunnel_type) {
+ case TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN:
bp->vxlan_fw_dst_port_id = resp->tunnel_dst_port_id;
-
- else if (tunnel_type & TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_GENEVE)
+ break;
+ case TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_GENEVE:
bp->nge_fw_dst_port_id = resp->tunnel_dst_port_id;
+ break;
+ default:
+ break;
+ }
+
err_out:
mutex_unlock(&bp->hwrm_cmd_lock);
return rc;
@@ -4934,6 +4943,10 @@ static void bnxt_del_napi(struct bnxt *bp)
napi_hash_del(&bnapi->napi);
netif_napi_del(&bnapi->napi);
}
+ /* We called napi_hash_del() before netif_napi_del(), we need
+ * to respect an RCU grace period before freeing napi structures.
+ */
+ synchronize_net();
}
static void bnxt_init_napi(struct bnxt *bp)
@@ -6309,6 +6322,7 @@ static int bnxt_setup_tc(struct net_device *dev, u32 handle, __be16 proto,
struct tc_to_netdev *ntc)
{
struct bnxt *bp = netdev_priv(dev);
+ bool sh = false;
u8 tc;
if (ntc->type != TC_SETUP_MQPRIO)
@@ -6325,12 +6339,11 @@ static int bnxt_setup_tc(struct net_device *dev, u32 handle, __be16 proto,
if (netdev_get_num_tc(dev) == tc)
return 0;
+ if (bp->flags & BNXT_FLAG_SHARED_RINGS)
+ sh = true;
+
if (tc) {
int max_rx_rings, max_tx_rings, rc;
- bool sh = false;
-
- if (bp->flags & BNXT_FLAG_SHARED_RINGS)
- sh = true;
rc = bnxt_get_max_rings(bp, &max_rx_rings, &max_tx_rings, sh);
if (rc || bp->tx_nr_rings_per_tc * tc > max_tx_rings)
@@ -6348,7 +6361,8 @@ static int bnxt_setup_tc(struct net_device *dev, u32 handle, __be16 proto,
bp->tx_nr_rings = bp->tx_nr_rings_per_tc;
netdev_reset_tc(dev);
}
- bp->cp_nr_rings = max_t(int, bp->tx_nr_rings, bp->rx_nr_rings);
+ bp->cp_nr_rings = sh ? max_t(int, bp->tx_nr_rings, bp->rx_nr_rings) :
+ bp->tx_nr_rings + bp->rx_nr_rings;
bp->num_stat_ctxs = bp->cp_nr_rings;
if (netif_running(bp->dev))
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
index ec6cd18842c3..60e2af8678bd 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
@@ -774,8 +774,8 @@ static int bnxt_vf_set_link(struct bnxt *bp, struct bnxt_vf_info *vf)
if (vf->flags & BNXT_VF_LINK_UP) {
/* if physical link is down, force link up on VF */
- if (phy_qcfg_resp.link ==
- PORT_PHY_QCFG_RESP_LINK_NO_LINK) {
+ if (phy_qcfg_resp.link !=
+ PORT_PHY_QCFG_RESP_LINK_LINK) {
phy_qcfg_resp.link =
PORT_PHY_QCFG_RESP_LINK_LINK;
phy_qcfg_resp.link_speed = cpu_to_le16(
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
index 4464bc5db934..a4e60e56c14f 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
@@ -1172,6 +1172,7 @@ static unsigned int __bcmgenet_tx_reclaim(struct net_device *dev,
struct bcmgenet_tx_ring *ring)
{
struct bcmgenet_priv *priv = netdev_priv(dev);
+ struct device *kdev = &priv->pdev->dev;
struct enet_cb *tx_cb_ptr;
struct netdev_queue *txq;
unsigned int pkts_compl = 0;
@@ -1199,13 +1200,13 @@ static unsigned int __bcmgenet_tx_reclaim(struct net_device *dev,
if (tx_cb_ptr->skb) {
pkts_compl++;
bytes_compl += GENET_CB(tx_cb_ptr->skb)->bytes_sent;
- dma_unmap_single(&dev->dev,
+ dma_unmap_single(kdev,
dma_unmap_addr(tx_cb_ptr, dma_addr),
dma_unmap_len(tx_cb_ptr, dma_len),
DMA_TO_DEVICE);
bcmgenet_free_cb(tx_cb_ptr);
} else if (dma_unmap_addr(tx_cb_ptr, dma_addr)) {
- dma_unmap_page(&dev->dev,
+ dma_unmap_page(kdev,
dma_unmap_addr(tx_cb_ptr, dma_addr),
dma_unmap_len(tx_cb_ptr, dma_len),
DMA_TO_DEVICE);
@@ -1775,6 +1776,7 @@ static int bcmgenet_alloc_rx_buffers(struct bcmgenet_priv *priv,
static void bcmgenet_free_rx_buffers(struct bcmgenet_priv *priv)
{
+ struct device *kdev = &priv->pdev->dev;
struct enet_cb *cb;
int i;
@@ -1782,7 +1784,7 @@ static void bcmgenet_free_rx_buffers(struct bcmgenet_priv *priv)
cb = &priv->rx_cbs[i];
if (dma_unmap_addr(cb, dma_addr)) {
- dma_unmap_single(&priv->dev->dev,
+ dma_unmap_single(kdev,
dma_unmap_addr(cb, dma_addr),
priv->rx_buf_len, DMA_FROM_DEVICE);
dma_unmap_addr_set(cb, dma_addr, 0);
diff --git a/drivers/net/ethernet/broadcom/genet/bcmmii.c b/drivers/net/ethernet/broadcom/genet/bcmmii.c
index 457c3bc8cfff..e87607621e62 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmmii.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmmii.c
@@ -542,8 +542,10 @@ static int bcmgenet_mii_of_init(struct bcmgenet_priv *priv)
/* Make sure we initialize MoCA PHYs with a link down */
if (phy_mode == PHY_INTERFACE_MODE_MOCA) {
phydev = of_phy_find_device(dn);
- if (phydev)
+ if (phydev) {
phydev->link = 0;
+ put_device(&phydev->mdio.dev);
+ }
}
return 0;
@@ -625,6 +627,7 @@ static int bcmgenet_mii_bus_init(struct bcmgenet_priv *priv)
int bcmgenet_mii_init(struct net_device *dev)
{
struct bcmgenet_priv *priv = netdev_priv(dev);
+ struct device_node *dn = priv->pdev->dev.of_node;
int ret;
ret = bcmgenet_mii_alloc(priv);
@@ -638,6 +641,8 @@ int bcmgenet_mii_init(struct net_device *dev)
return 0;
out:
+ if (of_phy_is_fixed_link(dn))
+ of_phy_deregister_fixed_link(dn);
of_node_put(priv->phy_dn);
mdiobus_unregister(priv->mii_bus);
mdiobus_free(priv->mii_bus);
@@ -647,7 +652,10 @@ out:
void bcmgenet_mii_exit(struct net_device *dev)
{
struct bcmgenet_priv *priv = netdev_priv(dev);
+ struct device_node *dn = priv->pdev->dev.of_node;
+ if (of_phy_is_fixed_link(dn))
+ of_phy_deregister_fixed_link(dn);
of_node_put(priv->phy_dn);
mdiobus_unregister(priv->mii_bus);
mdiobus_free(priv->mii_bus);
diff --git a/drivers/net/ethernet/brocade/bna/bnad.c b/drivers/net/ethernet/brocade/bna/bnad.c
index f9df4b5ae90e..f42f672b0e7e 100644
--- a/drivers/net/ethernet/brocade/bna/bnad.c
+++ b/drivers/net/ethernet/brocade/bna/bnad.c
@@ -177,6 +177,7 @@ bnad_txcmpl_process(struct bnad *bnad, struct bna_tcb *tcb)
return 0;
hw_cons = *(tcb->hw_consumer_index);
+ rmb();
cons = tcb->consumer_index;
q_depth = tcb->q_depth;
@@ -3094,7 +3095,7 @@ bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev)
BNA_QE_INDX_INC(prod, q_depth);
tcb->producer_index = prod;
- smp_mb();
+ wmb();
if (unlikely(!test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags)))
return NETDEV_TX_OK;
@@ -3102,7 +3103,6 @@ bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev)
skb_tx_timestamp(skb);
bna_txq_prod_indx_doorbell(tcb);
- smp_mb();
return NETDEV_TX_OK;
}
diff --git a/drivers/net/ethernet/cadence/macb.c b/drivers/net/ethernet/cadence/macb.c
index b32444a3ed79..ec09fcece711 100644
--- a/drivers/net/ethernet/cadence/macb.c
+++ b/drivers/net/ethernet/cadence/macb.c
@@ -975,6 +975,7 @@ static inline void macb_init_rx_ring(struct macb *bp)
addr += bp->rx_buffer_size;
}
bp->rx_ring[RX_RING_SIZE - 1].addr |= MACB_BIT(RX_WRAP);
+ bp->rx_tail = 0;
}
static int macb_rx(struct macb *bp, int budget)
@@ -1156,6 +1157,7 @@ static irqreturn_t macb_interrupt(int irq, void *dev_id)
if (status & MACB_BIT(RXUBR)) {
ctrl = macb_readl(bp, NCR);
macb_writel(bp, NCR, ctrl & ~MACB_BIT(RE));
+ wmb();
macb_writel(bp, NCR, ctrl | MACB_BIT(RE));
if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
@@ -1616,8 +1618,6 @@ static void macb_init_rings(struct macb *bp)
bp->queues[0].tx_head = 0;
bp->queues[0].tx_tail = 0;
bp->queues[0].tx_ring[TX_RING_SIZE - 1].ctrl |= MACB_BIT(TX_WRAP);
-
- bp->rx_tail = 0;
}
static void macb_reset_hw(struct macb *bp)
@@ -2673,6 +2673,12 @@ static int at91ether_start_xmit(struct sk_buff *skb, struct net_device *dev)
lp->skb_length = skb->len;
lp->skb_physaddr = dma_map_single(NULL, skb->data, skb->len,
DMA_TO_DEVICE);
+ if (dma_mapping_error(NULL, lp->skb_physaddr)) {
+ dev_kfree_skb_any(skb);
+ dev->stats.tx_dropped++;
+ netdev_err(dev, "%s: DMA mapping error\n", __func__);
+ return NETDEV_TX_OK;
+ }
/* Set address of the data in the Transmit Address register */
macb_writel(lp, TAR, lp->skb_physaddr);
@@ -2764,6 +2770,7 @@ static irqreturn_t at91ether_interrupt(int irq, void *dev_id)
if (intstatus & MACB_BIT(RXUBR)) {
ctl = macb_readl(lp, NCR);
macb_writel(lp, NCR, ctl & ~MACB_BIT(RE));
+ wmb();
macb_writel(lp, NCR, ctl | MACB_BIT(RE));
}
diff --git a/drivers/net/ethernet/cavium/thunder/nic.h b/drivers/net/ethernet/cavium/thunder/nic.h
index 30426109711c..86bd93ce2ea3 100644
--- a/drivers/net/ethernet/cavium/thunder/nic.h
+++ b/drivers/net/ethernet/cavium/thunder/nic.h
@@ -47,7 +47,7 @@
/* Min/Max packet size */
#define NIC_HW_MIN_FRS 64
-#define NIC_HW_MAX_FRS 9200 /* 9216 max packet including FCS */
+#define NIC_HW_MAX_FRS 9190 /* Excluding L2 header and FCS */
/* Max pkinds */
#define NIC_MAX_PKIND 16
@@ -178,11 +178,11 @@ enum tx_stats_reg_offset {
struct nicvf_hw_stats {
u64 rx_bytes;
+ u64 rx_frames;
u64 rx_ucast_frames;
u64 rx_bcast_frames;
u64 rx_mcast_frames;
- u64 rx_fcs_errors;
- u64 rx_l2_errors;
+ u64 rx_drops;
u64 rx_drop_red;
u64 rx_drop_red_bytes;
u64 rx_drop_overrun;
@@ -191,6 +191,19 @@ struct nicvf_hw_stats {
u64 rx_drop_mcast;
u64 rx_drop_l3_bcast;
u64 rx_drop_l3_mcast;
+ u64 rx_fcs_errors;
+ u64 rx_l2_errors;
+
+ u64 tx_bytes;
+ u64 tx_frames;
+ u64 tx_ucast_frames;
+ u64 tx_bcast_frames;
+ u64 tx_mcast_frames;
+ u64 tx_drops;
+};
+
+struct nicvf_drv_stats {
+ /* CQE Rx errs */
u64 rx_bgx_truncated_pkts;
u64 rx_jabber_errs;
u64 rx_fcs_errs;
@@ -216,34 +229,30 @@ struct nicvf_hw_stats {
u64 rx_l4_pclp;
u64 rx_truncated_pkts;
- u64 tx_bytes_ok;
- u64 tx_ucast_frames_ok;
- u64 tx_bcast_frames_ok;
- u64 tx_mcast_frames_ok;
- u64 tx_drops;
-};
-
-struct nicvf_drv_stats {
- /* Rx */
- u64 rx_frames_ok;
- u64 rx_frames_64;
- u64 rx_frames_127;
- u64 rx_frames_255;
- u64 rx_frames_511;
- u64 rx_frames_1023;
- u64 rx_frames_1518;
- u64 rx_frames_jumbo;
- u64 rx_drops;
-
+ /* CQE Tx errs */
+ u64 tx_desc_fault;
+ u64 tx_hdr_cons_err;
+ u64 tx_subdesc_err;
+ u64 tx_max_size_exceeded;
+ u64 tx_imm_size_oflow;
+ u64 tx_data_seq_err;
+ u64 tx_mem_seq_err;
+ u64 tx_lock_viol;
+ u64 tx_data_fault;
+ u64 tx_tstmp_conflict;
+ u64 tx_tstmp_timeout;
+ u64 tx_mem_fault;
+ u64 tx_csum_overlap;
+ u64 tx_csum_overflow;
+
+ /* driver debug stats */
u64 rcv_buffer_alloc_failures;
-
- /* Tx */
- u64 tx_frames_ok;
- u64 tx_drops;
u64 tx_tso;
u64 tx_timeout;
u64 txq_stop;
u64 txq_wake;
+
+ struct u64_stats_sync syncp;
};
struct nicvf {
@@ -282,7 +291,6 @@ struct nicvf {
u8 node;
u8 cpi_alg;
- u16 mtu;
bool link_up;
u8 duplex;
u32 speed;
@@ -298,7 +306,7 @@ struct nicvf {
/* Stats */
struct nicvf_hw_stats hw_stats;
- struct nicvf_drv_stats drv_stats;
+ struct nicvf_drv_stats __percpu *drv_stats;
struct bgx_stats bgx_stats;
/* MSI-X */
diff --git a/drivers/net/ethernet/cavium/thunder/nic_main.c b/drivers/net/ethernet/cavium/thunder/nic_main.c
index 2bbf4cbf08b2..6677b96e1f3f 100644
--- a/drivers/net/ethernet/cavium/thunder/nic_main.c
+++ b/drivers/net/ethernet/cavium/thunder/nic_main.c
@@ -11,6 +11,7 @@
#include <linux/pci.h>
#include <linux/etherdevice.h>
#include <linux/of.h>
+#include <linux/if_vlan.h>
#include "nic_reg.h"
#include "nic.h"
@@ -260,18 +261,31 @@ static void nic_get_bgx_stats(struct nicpf *nic, struct bgx_stats_msg *bgx)
/* Update hardware min/max frame size */
static int nic_update_hw_frs(struct nicpf *nic, int new_frs, int vf)
{
- if ((new_frs > NIC_HW_MAX_FRS) || (new_frs < NIC_HW_MIN_FRS)) {
- dev_err(&nic->pdev->dev,
- "Invalid MTU setting from VF%d rejected, should be between %d and %d\n",
- vf, NIC_HW_MIN_FRS, NIC_HW_MAX_FRS);
+ int bgx, lmac, lmac_cnt;
+ u64 lmac_credits;
+
+ if ((new_frs > NIC_HW_MAX_FRS) || (new_frs < NIC_HW_MIN_FRS))
return 1;
- }
- new_frs += ETH_HLEN;
- if (new_frs <= nic->pkind.maxlen)
- return 0;
- nic->pkind.maxlen = new_frs;
- nic_reg_write(nic, NIC_PF_PKIND_0_15_CFG, *(u64 *)&nic->pkind);
+ bgx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]);
+ lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]);
+ lmac += bgx * MAX_LMAC_PER_BGX;
+
+ new_frs += VLAN_ETH_HLEN + ETH_FCS_LEN + 4;
+
+ /* Update corresponding LMAC credits */
+ lmac_cnt = bgx_get_lmac_count(nic->node, bgx);
+ lmac_credits = nic_reg_read(nic, NIC_PF_LMAC_0_7_CREDIT + (lmac * 8));
+ lmac_credits &= ~(0xFFFFFULL << 12);
+ lmac_credits |= (((((48 * 1024) / lmac_cnt) - new_frs) / 16) << 12);
+ nic_reg_write(nic, NIC_PF_LMAC_0_7_CREDIT + (lmac * 8), lmac_credits);
+
+ /* Enforce MTU in HW
+ * This config is supported only from 88xx pass 2.0 onwards.
+ */
+ if (!pass1_silicon(nic->pdev))
+ nic_reg_write(nic,
+ NIC_PF_LMAC_0_7_CFG2 + (lmac * 8), new_frs);
return 0;
}
@@ -464,7 +478,7 @@ static int nic_init_hw(struct nicpf *nic)
/* PKIND configuration */
nic->pkind.minlen = 0;
- nic->pkind.maxlen = NIC_HW_MAX_FRS + ETH_HLEN;
+ nic->pkind.maxlen = NIC_HW_MAX_FRS + VLAN_ETH_HLEN + ETH_FCS_LEN + 4;
nic->pkind.lenerr_en = 1;
nic->pkind.rx_hdr = 0;
nic->pkind.hdr_sl = 0;
@@ -837,6 +851,7 @@ static int nic_reset_stat_counters(struct nicpf *nic,
nic_reg_write(nic, reg_addr, 0);
}
}
+
return 0;
}
diff --git a/drivers/net/ethernet/cavium/thunder/nic_reg.h b/drivers/net/ethernet/cavium/thunder/nic_reg.h
index edf779f5a227..80d46337cf29 100644
--- a/drivers/net/ethernet/cavium/thunder/nic_reg.h
+++ b/drivers/net/ethernet/cavium/thunder/nic_reg.h
@@ -106,6 +106,7 @@
#define NIC_PF_MPI_0_2047_CFG (0x210000)
#define NIC_PF_RSSI_0_4097_RQ (0x220000)
#define NIC_PF_LMAC_0_7_CFG (0x240000)
+#define NIC_PF_LMAC_0_7_CFG2 (0x240100)
#define NIC_PF_LMAC_0_7_SW_XOFF (0x242000)
#define NIC_PF_LMAC_0_7_CREDIT (0x244000)
#define NIC_PF_CHAN_0_255_TX_CFG (0x400000)
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c b/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c
index ad4fddb55421..432bf6be57cb 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c
@@ -36,11 +36,11 @@ struct nicvf_stat {
static const struct nicvf_stat nicvf_hw_stats[] = {
NICVF_HW_STAT(rx_bytes),
+ NICVF_HW_STAT(rx_frames),
NICVF_HW_STAT(rx_ucast_frames),
NICVF_HW_STAT(rx_bcast_frames),
NICVF_HW_STAT(rx_mcast_frames),
- NICVF_HW_STAT(rx_fcs_errors),
- NICVF_HW_STAT(rx_l2_errors),
+ NICVF_HW_STAT(rx_drops),
NICVF_HW_STAT(rx_drop_red),
NICVF_HW_STAT(rx_drop_red_bytes),
NICVF_HW_STAT(rx_drop_overrun),
@@ -49,50 +49,59 @@ static const struct nicvf_stat nicvf_hw_stats[] = {
NICVF_HW_STAT(rx_drop_mcast),
NICVF_HW_STAT(rx_drop_l3_bcast),
NICVF_HW_STAT(rx_drop_l3_mcast),
- NICVF_HW_STAT(rx_bgx_truncated_pkts),
- NICVF_HW_STAT(rx_jabber_errs),
- NICVF_HW_STAT(rx_fcs_errs),
- NICVF_HW_STAT(rx_bgx_errs),
- NICVF_HW_STAT(rx_prel2_errs),
- NICVF_HW_STAT(rx_l2_hdr_malformed),
- NICVF_HW_STAT(rx_oversize),
- NICVF_HW_STAT(rx_undersize),
- NICVF_HW_STAT(rx_l2_len_mismatch),
- NICVF_HW_STAT(rx_l2_pclp),
- NICVF_HW_STAT(rx_ip_ver_errs),
- NICVF_HW_STAT(rx_ip_csum_errs),
- NICVF_HW_STAT(rx_ip_hdr_malformed),
- NICVF_HW_STAT(rx_ip_payload_malformed),
- NICVF_HW_STAT(rx_ip_ttl_errs),
- NICVF_HW_STAT(rx_l3_pclp),
- NICVF_HW_STAT(rx_l4_malformed),
- NICVF_HW_STAT(rx_l4_csum_errs),
- NICVF_HW_STAT(rx_udp_len_errs),
- NICVF_HW_STAT(rx_l4_port_errs),
- NICVF_HW_STAT(rx_tcp_flag_errs),
- NICVF_HW_STAT(rx_tcp_offset_errs),
- NICVF_HW_STAT(rx_l4_pclp),
- NICVF_HW_STAT(rx_truncated_pkts),
- NICVF_HW_STAT(tx_bytes_ok),
- NICVF_HW_STAT(tx_ucast_frames_ok),
- NICVF_HW_STAT(tx_bcast_frames_ok),
- NICVF_HW_STAT(tx_mcast_frames_ok),
+ NICVF_HW_STAT(rx_fcs_errors),
+ NICVF_HW_STAT(rx_l2_errors),
+ NICVF_HW_STAT(tx_bytes),
+ NICVF_HW_STAT(tx_frames),
+ NICVF_HW_STAT(tx_ucast_frames),
+ NICVF_HW_STAT(tx_bcast_frames),
+ NICVF_HW_STAT(tx_mcast_frames),
+ NICVF_HW_STAT(tx_drops),
};
static const struct nicvf_stat nicvf_drv_stats[] = {
- NICVF_DRV_STAT(rx_frames_ok),
- NICVF_DRV_STAT(rx_frames_64),
- NICVF_DRV_STAT(rx_frames_127),
- NICVF_DRV_STAT(rx_frames_255),
- NICVF_DRV_STAT(rx_frames_511),
- NICVF_DRV_STAT(rx_frames_1023),
- NICVF_DRV_STAT(rx_frames_1518),
- NICVF_DRV_STAT(rx_frames_jumbo),
- NICVF_DRV_STAT(rx_drops),
+ NICVF_DRV_STAT(rx_bgx_truncated_pkts),
+ NICVF_DRV_STAT(rx_jabber_errs),
+ NICVF_DRV_STAT(rx_fcs_errs),
+ NICVF_DRV_STAT(rx_bgx_errs),
+ NICVF_DRV_STAT(rx_prel2_errs),
+ NICVF_DRV_STAT(rx_l2_hdr_malformed),
+ NICVF_DRV_STAT(rx_oversize),
+ NICVF_DRV_STAT(rx_undersize),
+ NICVF_DRV_STAT(rx_l2_len_mismatch),
+ NICVF_DRV_STAT(rx_l2_pclp),
+ NICVF_DRV_STAT(rx_ip_ver_errs),
+ NICVF_DRV_STAT(rx_ip_csum_errs),
+ NICVF_DRV_STAT(rx_ip_hdr_malformed),
+ NICVF_DRV_STAT(rx_ip_payload_malformed),
+ NICVF_DRV_STAT(rx_ip_ttl_errs),
+ NICVF_DRV_STAT(rx_l3_pclp),
+ NICVF_DRV_STAT(rx_l4_malformed),
+ NICVF_DRV_STAT(rx_l4_csum_errs),
+ NICVF_DRV_STAT(rx_udp_len_errs),
+ NICVF_DRV_STAT(rx_l4_port_errs),
+ NICVF_DRV_STAT(rx_tcp_flag_errs),
+ NICVF_DRV_STAT(rx_tcp_offset_errs),
+ NICVF_DRV_STAT(rx_l4_pclp),
+ NICVF_DRV_STAT(rx_truncated_pkts),
+
+ NICVF_DRV_STAT(tx_desc_fault),
+ NICVF_DRV_STAT(tx_hdr_cons_err),
+ NICVF_DRV_STAT(tx_subdesc_err),
+ NICVF_DRV_STAT(tx_max_size_exceeded),
+ NICVF_DRV_STAT(tx_imm_size_oflow),
+ NICVF_DRV_STAT(tx_data_seq_err),
+ NICVF_DRV_STAT(tx_mem_seq_err),
+ NICVF_DRV_STAT(tx_lock_viol),
+ NICVF_DRV_STAT(tx_data_fault),
+ NICVF_DRV_STAT(tx_tstmp_conflict),
+ NICVF_DRV_STAT(tx_tstmp_timeout),
+ NICVF_DRV_STAT(tx_mem_fault),
+ NICVF_DRV_STAT(tx_csum_overlap),
+ NICVF_DRV_STAT(tx_csum_overflow),
+
NICVF_DRV_STAT(rcv_buffer_alloc_failures),
- NICVF_DRV_STAT(tx_frames_ok),
NICVF_DRV_STAT(tx_tso),
- NICVF_DRV_STAT(tx_drops),
NICVF_DRV_STAT(tx_timeout),
NICVF_DRV_STAT(txq_stop),
NICVF_DRV_STAT(txq_wake),
@@ -278,8 +287,8 @@ static void nicvf_get_ethtool_stats(struct net_device *netdev,
struct ethtool_stats *stats, u64 *data)
{
struct nicvf *nic = netdev_priv(netdev);
- int stat;
- int sqs;
+ int stat, tmp_stats;
+ int sqs, cpu;
nicvf_update_stats(nic);
@@ -289,9 +298,13 @@ static void nicvf_get_ethtool_stats(struct net_device *netdev,
for (stat = 0; stat < nicvf_n_hw_stats; stat++)
*(data++) = ((u64 *)&nic->hw_stats)
[nicvf_hw_stats[stat].index];
- for (stat = 0; stat < nicvf_n_drv_stats; stat++)
- *(data++) = ((u64 *)&nic->drv_stats)
- [nicvf_drv_stats[stat].index];
+ for (stat = 0; stat < nicvf_n_drv_stats; stat++) {
+ tmp_stats = 0;
+ for_each_possible_cpu(cpu)
+ tmp_stats += ((u64 *)per_cpu_ptr(nic->drv_stats, cpu))
+ [nicvf_drv_stats[stat].index];
+ *(data++) = tmp_stats;
+ }
nicvf_get_qset_stats(nic, stats, &data);
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_main.c b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
index 45a13f718863..8a37012c9c89 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_main.c
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
@@ -69,25 +69,6 @@ static inline u8 nicvf_netdev_qidx(struct nicvf *nic, u8 qidx)
return qidx;
}
-static inline void nicvf_set_rx_frame_cnt(struct nicvf *nic,
- struct sk_buff *skb)
-{
- if (skb->len <= 64)
- nic->drv_stats.rx_frames_64++;
- else if (skb->len <= 127)
- nic->drv_stats.rx_frames_127++;
- else if (skb->len <= 255)
- nic->drv_stats.rx_frames_255++;
- else if (skb->len <= 511)
- nic->drv_stats.rx_frames_511++;
- else if (skb->len <= 1023)
- nic->drv_stats.rx_frames_1023++;
- else if (skb->len <= 1518)
- nic->drv_stats.rx_frames_1518++;
- else
- nic->drv_stats.rx_frames_jumbo++;
-}
-
/* The Cavium ThunderX network controller can *only* be found in SoCs
* containing the ThunderX ARM64 CPU implementation. All accesses to the device
* registers on this platform are implicitly strongly ordered with respect
@@ -492,9 +473,6 @@ int nicvf_set_real_num_queues(struct net_device *netdev,
static int nicvf_init_resources(struct nicvf *nic)
{
int err;
- union nic_mbx mbx = {};
-
- mbx.msg.msg = NIC_MBOX_MSG_CFG_DONE;
/* Enable Qset */
nicvf_qset_config(nic, true);
@@ -507,14 +485,10 @@ static int nicvf_init_resources(struct nicvf *nic)
return err;
}
- /* Send VF config done msg to PF */
- nicvf_write_to_mbx(nic, &mbx);
-
return 0;
}
static void nicvf_snd_pkt_handler(struct net_device *netdev,
- struct cmp_queue *cq,
struct cqe_send_t *cqe_tx,
int cqe_type, int budget,
unsigned int *tx_pkts, unsigned int *tx_bytes)
@@ -536,7 +510,7 @@ static void nicvf_snd_pkt_handler(struct net_device *netdev,
__func__, cqe_tx->sq_qs, cqe_tx->sq_idx,
cqe_tx->sqe_ptr, hdr->subdesc_cnt);
- nicvf_check_cqe_tx_errs(nic, cq, cqe_tx);
+ nicvf_check_cqe_tx_errs(nic, cqe_tx);
skb = (struct sk_buff *)sq->skbuff[cqe_tx->sqe_ptr];
if (skb) {
/* Check for dummy descriptor used for HW TSO offload on 88xx */
@@ -630,8 +604,6 @@ static void nicvf_rcv_pkt_handler(struct net_device *netdev,
return;
}
- nicvf_set_rx_frame_cnt(nic, skb);
-
nicvf_set_rxhash(netdev, cqe_rx, skb);
skb_record_rx_queue(skb, rq_idx);
@@ -703,7 +675,7 @@ loop:
work_done++;
break;
case CQE_TYPE_SEND:
- nicvf_snd_pkt_handler(netdev, cq,
+ nicvf_snd_pkt_handler(netdev,
(void *)cq_desc, CQE_TYPE_SEND,
budget, &tx_pkts, &tx_bytes);
tx_done++;
@@ -740,7 +712,7 @@ done:
nic = nic->pnicvf;
if (netif_tx_queue_stopped(txq) && netif_carrier_ok(netdev)) {
netif_tx_start_queue(txq);
- nic->drv_stats.txq_wake++;
+ this_cpu_inc(nic->drv_stats->txq_wake);
if (netif_msg_tx_err(nic))
netdev_warn(netdev,
"%s: Transmit queue wakeup SQ%d\n",
@@ -1084,7 +1056,7 @@ static netdev_tx_t nicvf_xmit(struct sk_buff *skb, struct net_device *netdev)
if (!netif_tx_queue_stopped(txq) && !nicvf_sq_append_skb(nic, skb)) {
netif_tx_stop_queue(txq);
- nic->drv_stats.txq_stop++;
+ this_cpu_inc(nic->drv_stats->txq_stop);
if (netif_msg_tx_err(nic))
netdev_warn(netdev,
"%s: Transmit ring full, stopping SQ%d\n",
@@ -1189,14 +1161,24 @@ int nicvf_stop(struct net_device *netdev)
return 0;
}
+static int nicvf_update_hw_max_frs(struct nicvf *nic, int mtu)
+{
+ union nic_mbx mbx = {};
+
+ mbx.frs.msg = NIC_MBOX_MSG_SET_MAX_FRS;
+ mbx.frs.max_frs = mtu;
+ mbx.frs.vf_id = nic->vf_id;
+
+ return nicvf_send_msg_to_pf(nic, &mbx);
+}
+
int nicvf_open(struct net_device *netdev)
{
- int err, qidx;
+ int cpu, err, qidx;
struct nicvf *nic = netdev_priv(netdev);
struct queue_set *qs = nic->qs;
struct nicvf_cq_poll *cq_poll = NULL;
-
- nic->mtu = netdev->mtu;
+ union nic_mbx mbx = {};
netif_carrier_off(netdev);
@@ -1248,9 +1230,17 @@ int nicvf_open(struct net_device *netdev)
if (nic->sqs_mode)
nicvf_get_primary_vf_struct(nic);
- /* Configure receive side scaling */
- if (!nic->sqs_mode)
+ /* Configure receive side scaling and MTU */
+ if (!nic->sqs_mode) {
nicvf_rss_init(nic);
+ if (nicvf_update_hw_max_frs(nic, netdev->mtu))
+ goto cleanup;
+
+ /* Clear percpu stats */
+ for_each_possible_cpu(cpu)
+ memset(per_cpu_ptr(nic->drv_stats, cpu), 0,
+ sizeof(struct nicvf_drv_stats));
+ }
err = nicvf_register_interrupts(nic);
if (err)
@@ -1276,8 +1266,9 @@ int nicvf_open(struct net_device *netdev)
for (qidx = 0; qidx < qs->rbdr_cnt; qidx++)
nicvf_enable_intr(nic, NICVF_INTR_RBDR, qidx);
- nic->drv_stats.txq_stop = 0;
- nic->drv_stats.txq_wake = 0;
+ /* Send VF config done msg to PF */
+ mbx.msg.msg = NIC_MBOX_MSG_CFG_DONE;
+ nicvf_write_to_mbx(nic, &mbx);
return 0;
cleanup:
@@ -1297,17 +1288,6 @@ napi_del:
return err;
}
-static int nicvf_update_hw_max_frs(struct nicvf *nic, int mtu)
-{
- union nic_mbx mbx = {};
-
- mbx.frs.msg = NIC_MBOX_MSG_SET_MAX_FRS;
- mbx.frs.max_frs = mtu;
- mbx.frs.vf_id = nic->vf_id;
-
- return nicvf_send_msg_to_pf(nic, &mbx);
-}
-
static int nicvf_change_mtu(struct net_device *netdev, int new_mtu)
{
struct nicvf *nic = netdev_priv(netdev);
@@ -1318,10 +1298,13 @@ static int nicvf_change_mtu(struct net_device *netdev, int new_mtu)
if (new_mtu < NIC_HW_MIN_FRS)
return -EINVAL;
+ netdev->mtu = new_mtu;
+
+ if (!netif_running(netdev))
+ return 0;
+
if (nicvf_update_hw_max_frs(nic, new_mtu))
return -EINVAL;
- netdev->mtu = new_mtu;
- nic->mtu = new_mtu;
return 0;
}
@@ -1379,9 +1362,10 @@ void nicvf_update_lmac_stats(struct nicvf *nic)
void nicvf_update_stats(struct nicvf *nic)
{
- int qidx;
+ int qidx, cpu;
+ u64 tmp_stats = 0;
struct nicvf_hw_stats *stats = &nic->hw_stats;
- struct nicvf_drv_stats *drv_stats = &nic->drv_stats;
+ struct nicvf_drv_stats *drv_stats;
struct queue_set *qs = nic->qs;
#define GET_RX_STATS(reg) \
@@ -1404,21 +1388,33 @@ void nicvf_update_stats(struct nicvf *nic)
stats->rx_drop_l3_bcast = GET_RX_STATS(RX_DRP_L3BCAST);
stats->rx_drop_l3_mcast = GET_RX_STATS(RX_DRP_L3MCAST);
- stats->tx_bytes_ok = GET_TX_STATS(TX_OCTS);
- stats->tx_ucast_frames_ok = GET_TX_STATS(TX_UCAST);
- stats->tx_bcast_frames_ok = GET_TX_STATS(TX_BCAST);
- stats->tx_mcast_frames_ok = GET_TX_STATS(TX_MCAST);
+ stats->tx_bytes = GET_TX_STATS(TX_OCTS);
+ stats->tx_ucast_frames = GET_TX_STATS(TX_UCAST);
+ stats->tx_bcast_frames = GET_TX_STATS(TX_BCAST);
+ stats->tx_mcast_frames = GET_TX_STATS(TX_MCAST);
stats->tx_drops = GET_TX_STATS(TX_DROP);
- drv_stats->tx_frames_ok = stats->tx_ucast_frames_ok +
- stats->tx_bcast_frames_ok +
- stats->tx_mcast_frames_ok;
- drv_stats->rx_frames_ok = stats->rx_ucast_frames +
- stats->rx_bcast_frames +
- stats->rx_mcast_frames;
- drv_stats->rx_drops = stats->rx_drop_red +
- stats->rx_drop_overrun;
- drv_stats->tx_drops = stats->tx_drops;
+ /* On T88 pass 2.0, the dummy SQE added for TSO notification
+ * via CQE has 'dont_send' set. Hence HW drops the pkt pointed
+ * pointed by dummy SQE and results in tx_drops counter being
+ * incremented. Subtracting it from tx_tso counter will give
+ * exact tx_drops counter.
+ */
+ if (nic->t88 && nic->hw_tso) {
+ for_each_possible_cpu(cpu) {
+ drv_stats = per_cpu_ptr(nic->drv_stats, cpu);
+ tmp_stats += drv_stats->tx_tso;
+ }
+ stats->tx_drops = tmp_stats - stats->tx_drops;
+ }
+ stats->tx_frames = stats->tx_ucast_frames +
+ stats->tx_bcast_frames +
+ stats->tx_mcast_frames;
+ stats->rx_frames = stats->rx_ucast_frames +
+ stats->rx_bcast_frames +
+ stats->rx_mcast_frames;
+ stats->rx_drops = stats->rx_drop_red +
+ stats->rx_drop_overrun;
/* Update RQ and SQ stats */
for (qidx = 0; qidx < qs->rq_cnt; qidx++)
@@ -1432,18 +1428,17 @@ static struct rtnl_link_stats64 *nicvf_get_stats64(struct net_device *netdev,
{
struct nicvf *nic = netdev_priv(netdev);
struct nicvf_hw_stats *hw_stats = &nic->hw_stats;
- struct nicvf_drv_stats *drv_stats = &nic->drv_stats;
nicvf_update_stats(nic);
stats->rx_bytes = hw_stats->rx_bytes;
- stats->rx_packets = drv_stats->rx_frames_ok;
- stats->rx_dropped = drv_stats->rx_drops;
+ stats->rx_packets = hw_stats->rx_frames;
+ stats->rx_dropped = hw_stats->rx_drops;
stats->multicast = hw_stats->rx_mcast_frames;
- stats->tx_bytes = hw_stats->tx_bytes_ok;
- stats->tx_packets = drv_stats->tx_frames_ok;
- stats->tx_dropped = drv_stats->tx_drops;
+ stats->tx_bytes = hw_stats->tx_bytes;
+ stats->tx_packets = hw_stats->tx_frames;
+ stats->tx_dropped = hw_stats->tx_drops;
return stats;
}
@@ -1456,7 +1451,7 @@ static void nicvf_tx_timeout(struct net_device *dev)
netdev_warn(dev, "%s: Transmit timed out, resetting\n",
dev->name);
- nic->drv_stats.tx_timeout++;
+ this_cpu_inc(nic->drv_stats->tx_timeout);
schedule_work(&nic->reset_task);
}
@@ -1590,6 +1585,12 @@ static int nicvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto err_free_netdev;
}
+ nic->drv_stats = netdev_alloc_pcpu_stats(struct nicvf_drv_stats);
+ if (!nic->drv_stats) {
+ err = -ENOMEM;
+ goto err_free_netdev;
+ }
+
err = nicvf_set_qset_resources(nic);
if (err)
goto err_free_netdev;
@@ -1648,6 +1649,8 @@ err_unregister_interrupts:
nicvf_unregister_interrupts(nic);
err_free_netdev:
pci_set_drvdata(pdev, NULL);
+ if (nic->drv_stats)
+ free_percpu(nic->drv_stats);
free_netdev(netdev);
err_release_regions:
pci_release_regions(pdev);
@@ -1675,6 +1678,8 @@ static void nicvf_remove(struct pci_dev *pdev)
unregister_netdev(pnetdev);
nicvf_unregister_interrupts(nic);
pci_set_drvdata(pdev, NULL);
+ if (nic->drv_stats)
+ free_percpu(nic->drv_stats);
free_netdev(netdev);
pci_release_regions(pdev);
pci_disable_device(pdev);
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
index a4fc50155881..747ef0882976 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
@@ -104,7 +104,8 @@ static inline int nicvf_alloc_rcv_buffer(struct nicvf *nic, gfp_t gfp,
nic->rb_page = alloc_pages(gfp | __GFP_COMP | __GFP_NOWARN,
order);
if (!nic->rb_page) {
- nic->drv_stats.rcv_buffer_alloc_failures++;
+ this_cpu_inc(nic->pnicvf->drv_stats->
+ rcv_buffer_alloc_failures);
return -ENOMEM;
}
nic->rb_page_offset = 0;
@@ -270,7 +271,8 @@ refill:
rbdr_idx, new_rb);
next_rbdr:
/* Re-enable RBDR interrupts only if buffer allocation is success */
- if (!nic->rb_alloc_fail && rbdr->enable)
+ if (!nic->rb_alloc_fail && rbdr->enable &&
+ netif_running(nic->pnicvf->netdev))
nicvf_enable_intr(nic, NICVF_INTR_RBDR, rbdr_idx);
if (rbdr_idx)
@@ -361,6 +363,8 @@ static int nicvf_init_snd_queue(struct nicvf *nic,
static void nicvf_free_snd_queue(struct nicvf *nic, struct snd_queue *sq)
{
+ struct sk_buff *skb;
+
if (!sq)
return;
if (!sq->dmem.base)
@@ -371,6 +375,15 @@ static void nicvf_free_snd_queue(struct nicvf *nic, struct snd_queue *sq)
sq->dmem.q_len * TSO_HEADER_SIZE,
sq->tso_hdrs, sq->tso_hdrs_phys);
+ /* Free pending skbs in the queue */
+ smp_rmb();
+ while (sq->head != sq->tail) {
+ skb = (struct sk_buff *)sq->skbuff[sq->head];
+ if (skb)
+ dev_kfree_skb_any(skb);
+ sq->head++;
+ sq->head &= (sq->dmem.q_len - 1);
+ }
kfree(sq->skbuff);
nicvf_free_q_desc_mem(nic, &sq->dmem);
}
@@ -483,9 +496,12 @@ static void nicvf_reset_rcv_queue_stats(struct nicvf *nic)
{
union nic_mbx mbx = {};
- /* Reset all RXQ's stats */
+ /* Reset all RQ/SQ and VF stats */
mbx.reset_stat.msg = NIC_MBOX_MSG_RESET_STAT_COUNTER;
+ mbx.reset_stat.rx_stat_mask = 0x3FFF;
+ mbx.reset_stat.tx_stat_mask = 0x1F;
mbx.reset_stat.rq_stat_mask = 0xFFFF;
+ mbx.reset_stat.sq_stat_mask = 0xFFFF;
nicvf_send_msg_to_pf(nic, &mbx);
}
@@ -538,9 +554,12 @@ static void nicvf_rcv_queue_config(struct nicvf *nic, struct queue_set *qs,
mbx.rq.cfg = (1ULL << 62) | (RQ_CQ_DROP << 8);
nicvf_send_msg_to_pf(nic, &mbx);
- nicvf_queue_reg_write(nic, NIC_QSET_RQ_GEN_CFG, 0, 0x00);
- if (!nic->sqs_mode)
+ if (!nic->sqs_mode && (qidx == 0)) {
+ /* Enable checking L3/L4 length and TCP/UDP checksums */
+ nicvf_queue_reg_write(nic, NIC_QSET_RQ_GEN_CFG, 0,
+ (BIT(24) | BIT(23) | BIT(21)));
nicvf_config_vlan_stripping(nic, nic->netdev->features);
+ }
/* Enable Receive queue */
memset(&rq_cfg, 0, sizeof(struct rq_cfg));
@@ -1029,7 +1048,7 @@ nicvf_sq_add_hdr_subdesc(struct nicvf *nic, struct snd_queue *sq, int qentry,
hdr->tso_max_paysize = skb_shinfo(skb)->gso_size;
/* For non-tunneled pkts, point this to L2 ethertype */
hdr->inner_l3_offset = skb_network_offset(skb) - 2;
- nic->drv_stats.tx_tso++;
+ this_cpu_inc(nic->pnicvf->drv_stats->tx_tso);
}
}
@@ -1161,7 +1180,7 @@ static int nicvf_sq_append_tso(struct nicvf *nic, struct snd_queue *sq,
nicvf_sq_doorbell(nic, skb, sq_num, desc_cnt);
- nic->drv_stats.tx_tso++;
+ this_cpu_inc(nic->pnicvf->drv_stats->tx_tso);
return 1;
}
@@ -1422,8 +1441,6 @@ void nicvf_update_sq_stats(struct nicvf *nic, int sq_idx)
/* Check for errors in the receive cmp.queue entry */
int nicvf_check_cqe_rx_errs(struct nicvf *nic, struct cqe_rx_t *cqe_rx)
{
- struct nicvf_hw_stats *stats = &nic->hw_stats;
-
if (!cqe_rx->err_level && !cqe_rx->err_opcode)
return 0;
@@ -1435,76 +1452,76 @@ int nicvf_check_cqe_rx_errs(struct nicvf *nic, struct cqe_rx_t *cqe_rx)
switch (cqe_rx->err_opcode) {
case CQ_RX_ERROP_RE_PARTIAL:
- stats->rx_bgx_truncated_pkts++;
+ this_cpu_inc(nic->drv_stats->rx_bgx_truncated_pkts);
break;
case CQ_RX_ERROP_RE_JABBER:
- stats->rx_jabber_errs++;
+ this_cpu_inc(nic->drv_stats->rx_jabber_errs);
break;
case CQ_RX_ERROP_RE_FCS:
- stats->rx_fcs_errs++;
+ this_cpu_inc(nic->drv_stats->rx_fcs_errs);
break;
case CQ_RX_ERROP_RE_RX_CTL:
- stats->rx_bgx_errs++;
+ this_cpu_inc(nic->drv_stats->rx_bgx_errs);
break;
case CQ_RX_ERROP_PREL2_ERR:
- stats->rx_prel2_errs++;
+ this_cpu_inc(nic->drv_stats->rx_prel2_errs);
break;
case CQ_RX_ERROP_L2_MAL:
- stats->rx_l2_hdr_malformed++;
+ this_cpu_inc(nic->drv_stats->rx_l2_hdr_malformed);
break;
case CQ_RX_ERROP_L2_OVERSIZE:
- stats->rx_oversize++;
+ this_cpu_inc(nic->drv_stats->rx_oversize);
break;
case CQ_RX_ERROP_L2_UNDERSIZE:
- stats->rx_undersize++;
+ this_cpu_inc(nic->drv_stats->rx_undersize);
break;
case CQ_RX_ERROP_L2_LENMISM:
- stats->rx_l2_len_mismatch++;
+ this_cpu_inc(nic->drv_stats->rx_l2_len_mismatch);
break;
case CQ_RX_ERROP_L2_PCLP:
- stats->rx_l2_pclp++;
+ this_cpu_inc(nic->drv_stats->rx_l2_pclp);
break;
case CQ_RX_ERROP_IP_NOT:
- stats->rx_ip_ver_errs++;
+ this_cpu_inc(nic->drv_stats->rx_ip_ver_errs);
break;
case CQ_RX_ERROP_IP_CSUM_ERR:
- stats->rx_ip_csum_errs++;
+ this_cpu_inc(nic->drv_stats->rx_ip_csum_errs);
break;
case CQ_RX_ERROP_IP_MAL:
- stats->rx_ip_hdr_malformed++;
+ this_cpu_inc(nic->drv_stats->rx_ip_hdr_malformed);
break;
case CQ_RX_ERROP_IP_MALD:
- stats->rx_ip_payload_malformed++;
+ this_cpu_inc(nic->drv_stats->rx_ip_payload_malformed);
break;
case CQ_RX_ERROP_IP_HOP:
- stats->rx_ip_ttl_errs++;
+ this_cpu_inc(nic->drv_stats->rx_ip_ttl_errs);
break;
case CQ_RX_ERROP_L3_PCLP:
- stats->rx_l3_pclp++;
+ this_cpu_inc(nic->drv_stats->rx_l3_pclp);
break;
case CQ_RX_ERROP_L4_MAL:
- stats->rx_l4_malformed++;
+ this_cpu_inc(nic->drv_stats->rx_l4_malformed);
break;
case CQ_RX_ERROP_L4_CHK:
- stats->rx_l4_csum_errs++;
+ this_cpu_inc(nic->drv_stats->rx_l4_csum_errs);
break;
case CQ_RX_ERROP_UDP_LEN:
- stats->rx_udp_len_errs++;
+ this_cpu_inc(nic->drv_stats->rx_udp_len_errs);
break;
case CQ_RX_ERROP_L4_PORT:
- stats->rx_l4_port_errs++;
+ this_cpu_inc(nic->drv_stats->rx_l4_port_errs);
break;
case CQ_RX_ERROP_TCP_FLAG:
- stats->rx_tcp_flag_errs++;
+ this_cpu_inc(nic->drv_stats->rx_tcp_flag_errs);
break;
case CQ_RX_ERROP_TCP_OFFSET:
- stats->rx_tcp_offset_errs++;
+ this_cpu_inc(nic->drv_stats->rx_tcp_offset_errs);
break;
case CQ_RX_ERROP_L4_PCLP:
- stats->rx_l4_pclp++;
+ this_cpu_inc(nic->drv_stats->rx_l4_pclp);
break;
case CQ_RX_ERROP_RBDR_TRUNC:
- stats->rx_truncated_pkts++;
+ this_cpu_inc(nic->drv_stats->rx_truncated_pkts);
break;
}
@@ -1512,53 +1529,52 @@ int nicvf_check_cqe_rx_errs(struct nicvf *nic, struct cqe_rx_t *cqe_rx)
}
/* Check for errors in the send cmp.queue entry */
-int nicvf_check_cqe_tx_errs(struct nicvf *nic,
- struct cmp_queue *cq, struct cqe_send_t *cqe_tx)
+int nicvf_check_cqe_tx_errs(struct nicvf *nic, struct cqe_send_t *cqe_tx)
{
- struct cmp_queue_stats *stats = &cq->stats;
-
switch (cqe_tx->send_status) {
case CQ_TX_ERROP_GOOD:
- stats->tx.good++;
return 0;
case CQ_TX_ERROP_DESC_FAULT:
- stats->tx.desc_fault++;
+ this_cpu_inc(nic->drv_stats->tx_desc_fault);
break;
case CQ_TX_ERROP_HDR_CONS_ERR:
- stats->tx.hdr_cons_err++;
+ this_cpu_inc(nic->drv_stats->tx_hdr_cons_err);
break;
case CQ_TX_ERROP_SUBDC_ERR:
- stats->tx.subdesc_err++;
+ this_cpu_inc(nic->drv_stats->tx_subdesc_err);
+ break;
+ case CQ_TX_ERROP_MAX_SIZE_VIOL:
+ this_cpu_inc(nic->drv_stats->tx_max_size_exceeded);
break;
case CQ_TX_ERROP_IMM_SIZE_OFLOW:
- stats->tx.imm_size_oflow++;
+ this_cpu_inc(nic->drv_stats->tx_imm_size_oflow);
break;
case CQ_TX_ERROP_DATA_SEQUENCE_ERR:
- stats->tx.data_seq_err++;
+ this_cpu_inc(nic->drv_stats->tx_data_seq_err);
break;
case CQ_TX_ERROP_MEM_SEQUENCE_ERR:
- stats->tx.mem_seq_err++;
+ this_cpu_inc(nic->drv_stats->tx_mem_seq_err);
break;
case CQ_TX_ERROP_LOCK_VIOL:
- stats->tx.lock_viol++;
+ this_cpu_inc(nic->drv_stats->tx_lock_viol);
break;
case CQ_TX_ERROP_DATA_FAULT:
- stats->tx.data_fault++;
+ this_cpu_inc(nic->drv_stats->tx_data_fault);
break;
case CQ_TX_ERROP_TSTMP_CONFLICT:
- stats->tx.tstmp_conflict++;
+ this_cpu_inc(nic->drv_stats->tx_tstmp_conflict);
break;
case CQ_TX_ERROP_TSTMP_TIMEOUT:
- stats->tx.tstmp_timeout++;
+ this_cpu_inc(nic->drv_stats->tx_tstmp_timeout);
break;
case CQ_TX_ERROP_MEM_FAULT:
- stats->tx.mem_fault++;
+ this_cpu_inc(nic->drv_stats->tx_mem_fault);
break;
case CQ_TX_ERROP_CK_OVERLAP:
- stats->tx.csum_overlap++;
+ this_cpu_inc(nic->drv_stats->tx_csum_overlap);
break;
case CQ_TX_ERROP_CK_OFLOW:
- stats->tx.csum_overflow++;
+ this_cpu_inc(nic->drv_stats->tx_csum_overflow);
break;
}
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_queues.h b/drivers/net/ethernet/cavium/thunder/nicvf_queues.h
index 869f3386028b..2e3c940c1093 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_queues.h
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_queues.h
@@ -158,6 +158,7 @@ enum CQ_TX_ERROP_E {
CQ_TX_ERROP_DESC_FAULT = 0x10,
CQ_TX_ERROP_HDR_CONS_ERR = 0x11,
CQ_TX_ERROP_SUBDC_ERR = 0x12,
+ CQ_TX_ERROP_MAX_SIZE_VIOL = 0x13,
CQ_TX_ERROP_IMM_SIZE_OFLOW = 0x80,
CQ_TX_ERROP_DATA_SEQUENCE_ERR = 0x81,
CQ_TX_ERROP_MEM_SEQUENCE_ERR = 0x82,
@@ -171,25 +172,6 @@ enum CQ_TX_ERROP_E {
CQ_TX_ERROP_ENUM_LAST = 0x8a,
};
-struct cmp_queue_stats {
- struct tx_stats {
- u64 good;
- u64 desc_fault;
- u64 hdr_cons_err;
- u64 subdesc_err;
- u64 imm_size_oflow;
- u64 data_seq_err;
- u64 mem_seq_err;
- u64 lock_viol;
- u64 data_fault;
- u64 tstmp_conflict;
- u64 tstmp_timeout;
- u64 mem_fault;
- u64 csum_overlap;
- u64 csum_overflow;
- } tx;
-} ____cacheline_aligned_in_smp;
-
enum RQ_SQ_STATS {
RQ_SQ_STATS_OCTS,
RQ_SQ_STATS_PKTS,
@@ -241,7 +223,6 @@ struct cmp_queue {
spinlock_t lock; /* lock to serialize processing CQEs */
void *desc;
struct q_desc_mem dmem;
- struct cmp_queue_stats stats;
int irq;
} ____cacheline_aligned_in_smp;
@@ -336,6 +317,5 @@ u64 nicvf_queue_reg_read(struct nicvf *nic,
void nicvf_update_rq_stats(struct nicvf *nic, int rq_idx);
void nicvf_update_sq_stats(struct nicvf *nic, int sq_idx);
int nicvf_check_cqe_rx_errs(struct nicvf *nic, struct cqe_rx_t *cqe_rx);
-int nicvf_check_cqe_tx_errs(struct nicvf *nic,
- struct cmp_queue *cq, struct cqe_send_t *cqe_tx);
+int nicvf_check_cqe_tx_errs(struct nicvf *nic, struct cqe_send_t *cqe_tx);
#endif /* NICVF_QUEUES_H */
diff --git a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
index 8bbaedbb7b94..050e21fbb147 100644
--- a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
+++ b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
@@ -1242,8 +1242,8 @@ static int bgx_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
pci_read_config_word(pdev, PCI_DEVICE_ID, &sdevid);
if (sdevid != PCI_DEVICE_ID_THUNDER_RGX) {
- bgx->bgx_id =
- (pci_resource_start(pdev, PCI_CFG_REG_BAR_NUM) >> 24) & 1;
+ bgx->bgx_id = (pci_resource_start(pdev,
+ PCI_CFG_REG_BAR_NUM) >> 24) & BGX_ID_MASK;
bgx->bgx_id += nic_get_node_id(pdev) * MAX_BGX_PER_NODE;
bgx->max_lmac = MAX_LMAC_PER_BGX;
bgx_vnic[bgx->bgx_id] = bgx;
diff --git a/drivers/net/ethernet/cavium/thunder/thunder_bgx.h b/drivers/net/ethernet/cavium/thunder/thunder_bgx.h
index d59c71e4a000..01cc7c859131 100644
--- a/drivers/net/ethernet/cavium/thunder/thunder_bgx.h
+++ b/drivers/net/ethernet/cavium/thunder/thunder_bgx.h
@@ -28,6 +28,8 @@
#define MAX_DMAC_PER_LMAC 8
#define MAX_FRAME_SIZE 9216
+#define BGX_ID_MASK 0x3
+
#define MAX_DMAC_PER_LMAC_TNS_BYPASS_MODE 2
/* Registers */
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
index f320497368f4..57eb4e1345cb 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
@@ -4057,7 +4057,7 @@ static void cfg_queues(struct adapter *adap)
* capped by the number of available cores.
*/
if (n10g) {
- i = num_online_cpus();
+ i = min_t(int, MAX_OFLD_QSETS, num_online_cpus());
s->ofldqsets = roundup(i, adap->params.nports);
} else {
s->ofldqsets = adap->params.nports;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c
index 0945fa49a5dd..2471ff465d5c 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c
@@ -135,15 +135,17 @@ static int uldrx_handler(struct sge_rspq *q, const __be64 *rsp,
}
static int alloc_uld_rxqs(struct adapter *adap,
- struct sge_uld_rxq_info *rxq_info,
- unsigned int nq, unsigned int offset, bool lro)
+ struct sge_uld_rxq_info *rxq_info, bool lro)
{
struct sge *s = &adap->sge;
- struct sge_ofld_rxq *q = rxq_info->uldrxq + offset;
- unsigned short *ids = rxq_info->rspq_id + offset;
- unsigned int per_chan = nq / adap->params.nports;
+ unsigned int nq = rxq_info->nrxq + rxq_info->nciq;
+ struct sge_ofld_rxq *q = rxq_info->uldrxq;
+ unsigned short *ids = rxq_info->rspq_id;
unsigned int bmap_idx = 0;
- int i, err, msi_idx;
+ unsigned int per_chan;
+ int i, err, msi_idx, que_idx = 0;
+
+ per_chan = rxq_info->nrxq / adap->params.nports;
if (adap->flags & USING_MSIX)
msi_idx = 1;
@@ -151,12 +153,18 @@ static int alloc_uld_rxqs(struct adapter *adap,
msi_idx = -((int)s->intrq.abs_id + 1);
for (i = 0; i < nq; i++, q++) {
+ if (i == rxq_info->nrxq) {
+ /* start allocation of concentrator queues */
+ per_chan = rxq_info->nciq / adap->params.nports;
+ que_idx = 0;
+ }
+
if (msi_idx >= 0) {
bmap_idx = get_msix_idx_from_bmap(adap);
msi_idx = adap->msix_info_ulds[bmap_idx].idx;
}
err = t4_sge_alloc_rxq(adap, &q->rspq, false,
- adap->port[i / per_chan],
+ adap->port[que_idx++ / per_chan],
msi_idx,
q->fl.size ? &q->fl : NULL,
uldrx_handler,
@@ -165,29 +173,19 @@ static int alloc_uld_rxqs(struct adapter *adap,
if (err)
goto freeout;
if (msi_idx >= 0)
- rxq_info->msix_tbl[i + offset] = bmap_idx;
+ rxq_info->msix_tbl[i] = bmap_idx;
memset(&q->stats, 0, sizeof(q->stats));
if (ids)
ids[i] = q->rspq.abs_id;
}
return 0;
freeout:
- q = rxq_info->uldrxq + offset;
+ q = rxq_info->uldrxq;
for ( ; i; i--, q++) {
if (q->rspq.desc)
free_rspq_fl(adap, &q->rspq,
q->fl.size ? &q->fl : NULL);
}
-
- /* We need to free rxq also in case of ciq allocation failure */
- if (offset) {
- q = rxq_info->uldrxq + offset;
- for ( ; i; i--, q++) {
- if (q->rspq.desc)
- free_rspq_fl(adap, &q->rspq,
- q->fl.size ? &q->fl : NULL);
- }
- }
return err;
}
@@ -205,9 +203,7 @@ setup_sge_queues_uld(struct adapter *adap, unsigned int uld_type, bool lro)
return -ENOMEM;
}
- ret = !(!alloc_uld_rxqs(adap, rxq_info, rxq_info->nrxq, 0, lro) &&
- !alloc_uld_rxqs(adap, rxq_info, rxq_info->nciq,
- rxq_info->nrxq, lro));
+ ret = !(!alloc_uld_rxqs(adap, rxq_info, lro));
/* Tell uP to route control queue completions to rdma rspq */
if (adap->flags & FULL_INIT_DONE &&
diff --git a/drivers/net/ethernet/chelsio/cxgb4/sched.c b/drivers/net/ethernet/chelsio/cxgb4/sched.c
index 539de764bbd3..cbd68a8fe2e4 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/sched.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/sched.c
@@ -210,8 +210,10 @@ static int t4_sched_queue_bind(struct port_info *pi, struct ch_sched_queue *p)
/* Unbind queue from any existing class */
err = t4_sched_queue_unbind(pi, p);
- if (err)
+ if (err) {
+ t4_free_mem(qe);
goto out;
+ }
/* Bind queue to specified class */
memset(qe, 0, sizeof(*qe));
diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c
index 1e74fd6085df..e19a0ca8e5dd 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c
@@ -2951,7 +2951,6 @@ void free_rspq_fl(struct adapter *adap, struct sge_rspq *rq,
rq->cntxt_id, fl_id, 0xffff);
dma_free_coherent(adap->pdev_dev, (rq->size + 1) * rq->iqe_len,
rq->desc, rq->phys_addr);
- napi_hash_del(&rq->napi);
netif_napi_del(&rq->napi);
rq->netdev = NULL;
rq->cntxt_id = rq->abs_id = 0;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h b/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h
index 50812a1d67bd..ecf3ccc257bc 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h
@@ -168,6 +168,7 @@ CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN
CH_PCI_ID_TABLE_FENTRY(0x509a), /* Custom T520-CR */
CH_PCI_ID_TABLE_FENTRY(0x509b), /* Custom T540-CR LOM */
CH_PCI_ID_TABLE_FENTRY(0x509c), /* Custom T520-CR*/
+ CH_PCI_ID_TABLE_FENTRY(0x509d), /* Custom T540-CR*/
/* T6 adapters:
*/
@@ -178,9 +179,9 @@ CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN
CH_PCI_ID_TABLE_FENTRY(0x6005),
CH_PCI_ID_TABLE_FENTRY(0x6006),
CH_PCI_ID_TABLE_FENTRY(0x6007),
+ CH_PCI_ID_TABLE_FENTRY(0x6008),
CH_PCI_ID_TABLE_FENTRY(0x6009),
CH_PCI_ID_TABLE_FENTRY(0x600d),
- CH_PCI_ID_TABLE_FENTRY(0x6010),
CH_PCI_ID_TABLE_FENTRY(0x6011),
CH_PCI_ID_TABLE_FENTRY(0x6014),
CH_PCI_ID_TABLE_FENTRY(0x6015),
diff --git a/drivers/net/ethernet/cisco/enic/vnic_rq.c b/drivers/net/ethernet/cisco/enic/vnic_rq.c
index e572a527b18d..36bc2c71fba9 100644
--- a/drivers/net/ethernet/cisco/enic/vnic_rq.c
+++ b/drivers/net/ethernet/cisco/enic/vnic_rq.c
@@ -169,19 +169,28 @@ int vnic_rq_disable(struct vnic_rq *rq)
{
unsigned int wait;
struct vnic_dev *vdev = rq->vdev;
+ int i;
- iowrite32(0, &rq->ctrl->enable);
+ /* Due to a race condition with clearing RQ "mini-cache" in hw, we need
+ * to disable the RQ twice to guarantee that stale descriptors are not
+ * used when this RQ is re-enabled.
+ */
+ for (i = 0; i < 2; i++) {
+ iowrite32(0, &rq->ctrl->enable);
- /* Wait for HW to ACK disable request */
- for (wait = 0; wait < 1000; wait++) {
- if (!(ioread32(&rq->ctrl->running)))
- return 0;
- udelay(10);
- }
+ /* Wait for HW to ACK disable request */
+ for (wait = 20000; wait > 0; wait--)
+ if (!ioread32(&rq->ctrl->running))
+ break;
+ if (!wait) {
+ vdev_neterr(vdev, "Failed to disable RQ[%d]\n",
+ rq->index);
- vdev_neterr(vdev, "Failed to disable RQ[%d]\n", rq->index);
+ return -ETIMEDOUT;
+ }
+ }
- return -ETIMEDOUT;
+ return 0;
}
void vnic_rq_clean(struct vnic_rq *rq,
@@ -212,6 +221,11 @@ void vnic_rq_clean(struct vnic_rq *rq,
[fetch_index % VNIC_RQ_BUF_BLK_ENTRIES(count)];
iowrite32(fetch_index, &rq->ctrl->posted_index);
+ /* Anytime we write fetch_index, we need to re-write 0 to rq->enable
+ * to re-sync internal VIC state.
+ */
+ iowrite32(0, &rq->ctrl->enable);
+
vnic_dev_clear_desc_ring(&rq->ring);
}
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index cece8a08edca..93aa2939142a 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -2813,7 +2813,6 @@ static void be_evt_queues_destroy(struct be_adapter *adapter)
if (eqo->q.created) {
be_eq_clean(eqo);
be_cmd_q_destroy(adapter, &eqo->q, QTYPE_EQ);
- napi_hash_del(&eqo->napi);
netif_napi_del(&eqo->napi);
free_cpumask_var(eqo->affinity_mask);
}
diff --git a/drivers/net/ethernet/ezchip/nps_enet.c b/drivers/net/ethernet/ezchip/nps_enet.c
index f928e6f79c89..223f35cc034c 100644
--- a/drivers/net/ethernet/ezchip/nps_enet.c
+++ b/drivers/net/ethernet/ezchip/nps_enet.c
@@ -669,6 +669,7 @@ static const struct of_device_id nps_enet_dt_ids[] = {
{ .compatible = "ezchip,nps-mgt-enet" },
{ /* Sentinel */ }
};
+MODULE_DEVICE_TABLE(of, nps_enet_dt_ids);
static struct platform_driver nps_enet_driver = {
.probe = nps_enet_probe,
diff --git a/drivers/net/ethernet/freescale/fec.h b/drivers/net/ethernet/freescale/fec.h
index c865135f3cb9..5ea740b4cf14 100644
--- a/drivers/net/ethernet/freescale/fec.h
+++ b/drivers/net/ethernet/freescale/fec.h
@@ -574,6 +574,8 @@ struct fec_enet_private {
unsigned int reload_period;
int pps_enable;
unsigned int next_counter;
+
+ u64 ethtool_stats[0];
};
void fec_ptp_init(struct platform_device *pdev);
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index 48a033e64423..5f77caa59534 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -1430,14 +1430,14 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id)
skb_put(skb, pkt_len - 4);
data = skb->data;
+ if (!is_copybreak && need_swap)
+ swap_buffer(data, pkt_len);
+
#if !defined(CONFIG_M5272)
if (fep->quirks & FEC_QUIRK_HAS_RACC)
data = skb_pull_inline(skb, 2);
#endif
- if (!is_copybreak && need_swap)
- swap_buffer(data, pkt_len);
-
/* Extract the enhanced buffer descriptor */
ebdp = NULL;
if (fep->bufdesc_ex)
@@ -2313,14 +2313,24 @@ static const struct fec_stat {
{ "IEEE_rx_octets_ok", IEEE_R_OCTETS_OK },
};
-static void fec_enet_get_ethtool_stats(struct net_device *dev,
- struct ethtool_stats *stats, u64 *data)
+static void fec_enet_update_ethtool_stats(struct net_device *dev)
{
struct fec_enet_private *fep = netdev_priv(dev);
int i;
for (i = 0; i < ARRAY_SIZE(fec_stats); i++)
- data[i] = readl(fep->hwp + fec_stats[i].offset);
+ fep->ethtool_stats[i] = readl(fep->hwp + fec_stats[i].offset);
+}
+
+static void fec_enet_get_ethtool_stats(struct net_device *dev,
+ struct ethtool_stats *stats, u64 *data)
+{
+ struct fec_enet_private *fep = netdev_priv(dev);
+
+ if (netif_running(dev))
+ fec_enet_update_ethtool_stats(dev);
+
+ memcpy(data, fep->ethtool_stats, ARRAY_SIZE(fec_stats) * sizeof(u64));
}
static void fec_enet_get_strings(struct net_device *netdev,
@@ -2874,6 +2884,8 @@ fec_enet_close(struct net_device *ndev)
if (fep->quirks & FEC_QUIRK_ERR006687)
imx6q_cpuidle_fec_irqs_unused();
+ fec_enet_update_ethtool_stats(ndev);
+
fec_enet_clk_enable(ndev, false);
pinctrl_pm_select_sleep_state(&fep->pdev->dev);
pm_runtime_mark_last_busy(&fep->pdev->dev);
@@ -3180,6 +3192,8 @@ static int fec_enet_init(struct net_device *ndev)
fec_restart(ndev);
+ fec_enet_update_ethtool_stats(ndev);
+
return 0;
}
@@ -3278,7 +3292,8 @@ fec_probe(struct platform_device *pdev)
fec_enet_get_queue_num(pdev, &num_tx_qs, &num_rx_qs);
/* Init network device */
- ndev = alloc_etherdev_mqs(sizeof(struct fec_enet_private),
+ ndev = alloc_etherdev_mqs(sizeof(struct fec_enet_private) +
+ ARRAY_SIZE(fec_stats) * sizeof(u64),
num_tx_qs, num_rx_qs);
if (!ndev)
return -ENOMEM;
@@ -3475,6 +3490,8 @@ failed_regulator:
failed_clk_ipg:
fec_enet_clk_enable(ndev, false);
failed_clk:
+ if (of_phy_is_fixed_link(np))
+ of_phy_deregister_fixed_link(np);
failed_phy:
of_node_put(phy_node);
failed_ioremap:
@@ -3488,6 +3505,7 @@ fec_drv_remove(struct platform_device *pdev)
{
struct net_device *ndev = platform_get_drvdata(pdev);
struct fec_enet_private *fep = netdev_priv(ndev);
+ struct device_node *np = pdev->dev.of_node;
cancel_work_sync(&fep->tx_timeout_work);
fec_ptp_stop(pdev);
@@ -3495,6 +3513,8 @@ fec_drv_remove(struct platform_device *pdev)
fec_enet_mii_remove(fep);
if (fep->reg_phy)
regulator_disable(fep->reg_phy);
+ if (of_phy_is_fixed_link(np))
+ of_phy_deregister_fixed_link(np);
of_node_put(fep->phy_node);
free_netdev(ndev);
diff --git a/drivers/net/ethernet/freescale/fman/fman_memac.c b/drivers/net/ethernet/freescale/fman/fman_memac.c
index 53ef51e3bd9e..71a5ded9d1de 100644
--- a/drivers/net/ethernet/freescale/fman/fman_memac.c
+++ b/drivers/net/ethernet/freescale/fman/fman_memac.c
@@ -1107,6 +1107,9 @@ int memac_free(struct fman_mac *memac)
{
free_init_resources(memac);
+ if (memac->pcsphy)
+ put_device(&memac->pcsphy->mdio.dev);
+
kfree(memac->memac_drv_param);
kfree(memac);
diff --git a/drivers/net/ethernet/freescale/fman/fman_tgec.c b/drivers/net/ethernet/freescale/fman/fman_tgec.c
index efabb04a1ae8..4b0f3a50b293 100644
--- a/drivers/net/ethernet/freescale/fman/fman_tgec.c
+++ b/drivers/net/ethernet/freescale/fman/fman_tgec.c
@@ -722,9 +722,6 @@ int tgec_free(struct fman_mac *tgec)
{
free_init_resources(tgec);
- if (tgec->cfg)
- tgec->cfg = NULL;
-
kfree(tgec->cfg);
kfree(tgec);
diff --git a/drivers/net/ethernet/freescale/fman/mac.c b/drivers/net/ethernet/freescale/fman/mac.c
index 8fe6b3e253fa..736db9d9b0ad 100644
--- a/drivers/net/ethernet/freescale/fman/mac.c
+++ b/drivers/net/ethernet/freescale/fman/mac.c
@@ -892,6 +892,8 @@ static int mac_probe(struct platform_device *_of_dev)
priv->fixed_link->duplex = phy->duplex;
priv->fixed_link->pause = phy->pause;
priv->fixed_link->asym_pause = phy->asym_pause;
+
+ put_device(&phy->mdio.dev);
}
err = mac_dev->init(mac_dev);
diff --git a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
index dc120c148d97..4b86260584a0 100644
--- a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
+++ b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
@@ -980,7 +980,7 @@ static int fs_enet_probe(struct platform_device *ofdev)
err = clk_prepare_enable(clk);
if (err) {
ret = err;
- goto out_free_fpi;
+ goto out_deregister_fixed_link;
}
fpi->clk_per = clk;
}
@@ -1061,6 +1061,9 @@ out_put:
of_node_put(fpi->phy_node);
if (fpi->clk_per)
clk_disable_unprepare(fpi->clk_per);
+out_deregister_fixed_link:
+ if (of_phy_is_fixed_link(ofdev->dev.of_node))
+ of_phy_deregister_fixed_link(ofdev->dev.of_node);
out_free_fpi:
kfree(fpi);
return ret;
@@ -1079,6 +1082,8 @@ static int fs_enet_remove(struct platform_device *ofdev)
of_node_put(fep->fpi->phy_node);
if (fep->fpi->clk_per)
clk_disable_unprepare(fep->fpi->clk_per);
+ if (of_phy_is_fixed_link(ofdev->dev.of_node))
+ of_phy_deregister_fixed_link(ofdev->dev.of_node);
free_netdev(ndev);
return 0;
}
diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c
index 4b4f5bc0e279..9061c2f82b9c 100644
--- a/drivers/net/ethernet/freescale/gianfar.c
+++ b/drivers/net/ethernet/freescale/gianfar.c
@@ -1312,6 +1312,7 @@ static void gfar_init_addr_hash_table(struct gfar_private *priv)
*/
static int gfar_probe(struct platform_device *ofdev)
{
+ struct device_node *np = ofdev->dev.of_node;
struct net_device *dev = NULL;
struct gfar_private *priv = NULL;
int err = 0, i;
@@ -1462,6 +1463,8 @@ static int gfar_probe(struct platform_device *ofdev)
return 0;
register_fail:
+ if (of_phy_is_fixed_link(np))
+ of_phy_deregister_fixed_link(np);
unmap_group_regs(priv);
gfar_free_rx_queues(priv);
gfar_free_tx_queues(priv);
@@ -1474,11 +1477,16 @@ register_fail:
static int gfar_remove(struct platform_device *ofdev)
{
struct gfar_private *priv = platform_get_drvdata(ofdev);
+ struct device_node *np = ofdev->dev.of_node;
of_node_put(priv->phy_node);
of_node_put(priv->tbi_node);
unregister_netdev(priv->ndev);
+
+ if (of_phy_is_fixed_link(np))
+ of_phy_deregister_fixed_link(np);
+
unmap_group_regs(priv);
gfar_free_rx_queues(priv);
gfar_free_tx_queues(priv);
diff --git a/drivers/net/ethernet/freescale/ucc_geth.c b/drivers/net/ethernet/freescale/ucc_geth.c
index 186ef8f16c80..f76d33279454 100644
--- a/drivers/net/ethernet/freescale/ucc_geth.c
+++ b/drivers/net/ethernet/freescale/ucc_geth.c
@@ -3868,9 +3868,8 @@ static int ucc_geth_probe(struct platform_device* ofdev)
dev = alloc_etherdev(sizeof(*ugeth));
if (dev == NULL) {
- of_node_put(ug_info->tbi_node);
- of_node_put(ug_info->phy_node);
- return -ENOMEM;
+ err = -ENOMEM;
+ goto err_deregister_fixed_link;
}
ugeth = netdev_priv(dev);
@@ -3907,10 +3906,7 @@ static int ucc_geth_probe(struct platform_device* ofdev)
if (netif_msg_probe(ugeth))
pr_err("%s: Cannot register net device, aborting\n",
dev->name);
- free_netdev(dev);
- of_node_put(ug_info->tbi_node);
- of_node_put(ug_info->phy_node);
- return err;
+ goto err_free_netdev;
}
mac_addr = of_get_mac_address(np);
@@ -3923,16 +3919,29 @@ static int ucc_geth_probe(struct platform_device* ofdev)
ugeth->node = np;
return 0;
+
+err_free_netdev:
+ free_netdev(dev);
+err_deregister_fixed_link:
+ if (of_phy_is_fixed_link(np))
+ of_phy_deregister_fixed_link(np);
+ of_node_put(ug_info->tbi_node);
+ of_node_put(ug_info->phy_node);
+
+ return err;
}
static int ucc_geth_remove(struct platform_device* ofdev)
{
struct net_device *dev = platform_get_drvdata(ofdev);
struct ucc_geth_private *ugeth = netdev_priv(dev);
+ struct device_node *np = ofdev->dev.of_node;
unregister_netdev(dev);
free_netdev(dev);
ucc_geth_memclean(ugeth);
+ if (of_phy_is_fixed_link(np))
+ of_phy_deregister_fixed_link(np);
of_node_put(ugeth->ug_info->tbi_node);
of_node_put(ugeth->ug_info->phy_node);
diff --git a/drivers/net/ethernet/hisilicon/hns/hnae.c b/drivers/net/ethernet/hisilicon/hns/hnae.c
index c54c6fac0d1d..b6ed818f78ff 100644
--- a/drivers/net/ethernet/hisilicon/hns/hnae.c
+++ b/drivers/net/ethernet/hisilicon/hns/hnae.c
@@ -332,8 +332,10 @@ struct hnae_handle *hnae_get_handle(struct device *owner_dev,
return ERR_PTR(-ENODEV);
handle = dev->ops->get_handle(dev, port_id);
- if (IS_ERR(handle))
+ if (IS_ERR(handle)) {
+ put_device(&dev->cls_dev);
return handle;
+ }
handle->dev = dev;
handle->owner_dev = owner_dev;
@@ -356,6 +358,8 @@ out_when_init_queue:
for (j = i - 1; j >= 0; j--)
hnae_fini_queue(handle->qs[j]);
+ put_device(&dev->cls_dev);
+
return ERR_PTR(-ENOMEM);
}
EXPORT_SYMBOL(hnae_get_handle);
@@ -377,6 +381,8 @@ void hnae_put_handle(struct hnae_handle *h)
dev->ops->put_handle(h);
module_put(dev->owner);
+
+ put_device(&dev->cls_dev);
}
EXPORT_SYMBOL(hnae_put_handle);
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c
index 8d70377f6624..8ea3d95fa483 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c
@@ -2751,6 +2751,7 @@ static const struct of_device_id g_dsaf_match[] = {
{.compatible = "hisilicon,hns-dsaf-v2"},
{}
};
+MODULE_DEVICE_TABLE(of, g_dsaf_match);
static struct platform_driver g_dsaf_driver = {
.probe = hns_dsaf_probe,
diff --git a/drivers/net/ethernet/hisilicon/hns_mdio.c b/drivers/net/ethernet/hisilicon/hns_mdio.c
index 33f4c483af0f..501eb2090ca6 100644
--- a/drivers/net/ethernet/hisilicon/hns_mdio.c
+++ b/drivers/net/ethernet/hisilicon/hns_mdio.c
@@ -563,6 +563,7 @@ static const struct of_device_id hns_mdio_match[] = {
{.compatible = "hisilicon,hns-mdio"},
{}
};
+MODULE_DEVICE_TABLE(of, hns_mdio_match);
static const struct acpi_device_id hns_mdio_acpi_match[] = {
{ "HISI0141", 0 },
diff --git a/drivers/net/ethernet/ibm/ehea/ehea_main.c b/drivers/net/ethernet/ibm/ehea/ehea_main.c
index 54efa9a5167b..bd719e25dd76 100644
--- a/drivers/net/ethernet/ibm/ehea/ehea_main.c
+++ b/drivers/net/ethernet/ibm/ehea/ehea_main.c
@@ -2446,6 +2446,8 @@ static int ehea_open(struct net_device *dev)
netif_info(port, ifup, dev, "enabling port\n");
+ netif_carrier_off(dev);
+
ret = ehea_up(dev);
if (!ret) {
port_napi_enable(port);
diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
index bfe17d9c022d..0fbf686f5e7c 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.c
+++ b/drivers/net/ethernet/ibm/ibmvnic.c
@@ -74,7 +74,6 @@
#include <asm/iommu.h>
#include <linux/uaccess.h>
#include <asm/firmware.h>
-#include <linux/seq_file.h>
#include <linux/workqueue.h>
#include "ibmvnic.h"
@@ -1190,7 +1189,7 @@ static struct ibmvnic_sub_crq_queue *init_sub_crq_queue(struct ibmvnic_adapter
if (!scrq)
return NULL;
- scrq->msgs = (union sub_crq *)__get_free_pages(GFP_KERNEL, 2);
+ scrq->msgs = (union sub_crq *)__get_free_pages(GFP_ATOMIC, 2);
memset(scrq->msgs, 0, 4 * PAGE_SIZE);
if (!scrq->msgs) {
dev_warn(dev, "Couldn't allocate crq queue messages page\n");
@@ -1461,14 +1460,16 @@ static int init_sub_crq_irqs(struct ibmvnic_adapter *adapter)
return rc;
req_rx_irq_failed:
- for (j = 0; j < i; j++)
+ for (j = 0; j < i; j++) {
free_irq(adapter->rx_scrq[j]->irq, adapter->rx_scrq[j]);
irq_dispose_mapping(adapter->rx_scrq[j]->irq);
+ }
i = adapter->req_tx_queues;
req_tx_irq_failed:
- for (j = 0; j < i; j++)
+ for (j = 0; j < i; j++) {
free_irq(adapter->tx_scrq[j]->irq, adapter->tx_scrq[j]);
irq_dispose_mapping(adapter->rx_scrq[j]->irq);
+ }
release_sub_crqs_no_irqs(adapter);
return rc;
}
@@ -1503,9 +1504,8 @@ static void init_sub_crqs(struct ibmvnic_adapter *adapter, int retry)
adapter->max_rx_add_entries_per_subcrq > entries_page ?
entries_page : adapter->max_rx_add_entries_per_subcrq;
- /* Choosing the maximum number of queues supported by firmware*/
- adapter->req_tx_queues = adapter->max_tx_queues;
- adapter->req_rx_queues = adapter->max_rx_queues;
+ adapter->req_tx_queues = adapter->opt_tx_comp_sub_queues;
+ adapter->req_rx_queues = adapter->opt_rx_comp_queues;
adapter->req_rx_add_queues = adapter->max_rx_add_queues;
adapter->req_mtu = adapter->max_mtu;
@@ -3232,6 +3232,27 @@ static void ibmvnic_free_inflight(struct ibmvnic_adapter *adapter)
spin_unlock_irqrestore(&adapter->inflight_lock, flags);
}
+static void ibmvnic_xport_event(struct work_struct *work)
+{
+ struct ibmvnic_adapter *adapter = container_of(work,
+ struct ibmvnic_adapter,
+ ibmvnic_xport);
+ struct device *dev = &adapter->vdev->dev;
+ long rc;
+
+ ibmvnic_free_inflight(adapter);
+ release_sub_crqs(adapter);
+ if (adapter->migrated) {
+ rc = ibmvnic_reenable_crq_queue(adapter);
+ if (rc)
+ dev_err(dev, "Error after enable rc=%ld\n", rc);
+ adapter->migrated = false;
+ rc = ibmvnic_send_crq_init(adapter);
+ if (rc)
+ dev_err(dev, "Error sending init rc=%ld\n", rc);
+ }
+}
+
static void ibmvnic_handle_crq(union ibmvnic_crq *crq,
struct ibmvnic_adapter *adapter)
{
@@ -3267,15 +3288,7 @@ static void ibmvnic_handle_crq(union ibmvnic_crq *crq,
if (gen_crq->cmd == IBMVNIC_PARTITION_MIGRATED) {
dev_info(dev, "Re-enabling adapter\n");
adapter->migrated = true;
- ibmvnic_free_inflight(adapter);
- release_sub_crqs(adapter);
- rc = ibmvnic_reenable_crq_queue(adapter);
- if (rc)
- dev_err(dev, "Error after enable rc=%ld\n", rc);
- adapter->migrated = false;
- rc = ibmvnic_send_crq_init(adapter);
- if (rc)
- dev_err(dev, "Error sending init rc=%ld\n", rc);
+ schedule_work(&adapter->ibmvnic_xport);
} else if (gen_crq->cmd == IBMVNIC_DEVICE_FAILOVER) {
dev_info(dev, "Backing device failover detected\n");
netif_carrier_off(netdev);
@@ -3284,8 +3297,7 @@ static void ibmvnic_handle_crq(union ibmvnic_crq *crq,
/* The adapter lost the connection */
dev_err(dev, "Virtual Adapter failed (rc=%d)\n",
gen_crq->cmd);
- ibmvnic_free_inflight(adapter);
- release_sub_crqs(adapter);
+ schedule_work(&adapter->ibmvnic_xport);
}
return;
case IBMVNIC_CRQ_CMD_RSP:
@@ -3654,6 +3666,7 @@ static void handle_crq_init_rsp(struct work_struct *work)
goto task_failed;
netdev->real_num_tx_queues = adapter->req_tx_queues;
+ netdev->mtu = adapter->req_mtu;
if (adapter->failover) {
adapter->failover = false;
@@ -3691,7 +3704,7 @@ static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id)
struct net_device *netdev;
unsigned char *mac_addr_p;
struct dentry *ent;
- char buf[16]; /* debugfs name buf */
+ char buf[17]; /* debugfs name buf */
int rc;
dev_dbg(&dev->dev, "entering ibmvnic_probe for UA 0x%x\n",
@@ -3725,6 +3738,7 @@ static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id)
SET_NETDEV_DEV(netdev, &dev->dev);
INIT_WORK(&adapter->vnic_crq_init, handle_crq_init_rsp);
+ INIT_WORK(&adapter->ibmvnic_xport, ibmvnic_xport_event);
spin_lock_init(&adapter->stats_lock);
@@ -3792,6 +3806,7 @@ static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id)
}
netdev->real_num_tx_queues = adapter->req_tx_queues;
+ netdev->mtu = adapter->req_mtu;
rc = register_netdev(netdev);
if (rc) {
@@ -3828,6 +3843,9 @@ static int ibmvnic_remove(struct vio_dev *dev)
if (adapter->debugfs_dir && !IS_ERR(adapter->debugfs_dir))
debugfs_remove_recursive(adapter->debugfs_dir);
+ dma_unmap_single(&dev->dev, adapter->stats_token,
+ sizeof(struct ibmvnic_statistics), DMA_FROM_DEVICE);
+
if (adapter->ras_comps)
dma_free_coherent(&dev->dev,
adapter->ras_comp_num *
diff --git a/drivers/net/ethernet/ibm/ibmvnic.h b/drivers/net/ethernet/ibm/ibmvnic.h
index bfc84c7d0e11..dd775d951b73 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.h
+++ b/drivers/net/ethernet/ibm/ibmvnic.h
@@ -27,7 +27,7 @@
/**************************************************************************/
#define IBMVNIC_NAME "ibmvnic"
-#define IBMVNIC_DRIVER_VERSION "1.0"
+#define IBMVNIC_DRIVER_VERSION "1.0.1"
#define IBMVNIC_INVALID_MAP -1
#define IBMVNIC_STATS_TIMEOUT 1
/* basic structures plus 100 2k buffers */
@@ -1048,5 +1048,6 @@ struct ibmvnic_adapter {
u8 map_id;
struct work_struct vnic_crq_init;
+ struct work_struct ibmvnic_xport;
bool failover;
};
diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h
index 2030d7c1dc94..6d61e443bdf8 100644
--- a/drivers/net/ethernet/intel/i40e/i40e.h
+++ b/drivers/net/ethernet/intel/i40e/i40e.h
@@ -92,6 +92,7 @@
#define I40E_AQ_LEN 256
#define I40E_AQ_WORK_LIMIT 66 /* max number of VFs + a little */
#define I40E_MAX_USER_PRIORITY 8
+#define I40E_DEFAULT_TRAFFIC_CLASS BIT(0)
#define I40E_DEFAULT_MSG_ENABLE 4
#define I40E_QUEUE_WAIT_RETRY_LIMIT 10
#define I40E_INT_NAME_STR_LEN (IFNAMSIZ + 16)
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index ac1faee2a5b8..31c97e3937a4 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -4641,29 +4641,6 @@ static u8 i40e_pf_get_num_tc(struct i40e_pf *pf)
}
/**
- * i40e_pf_get_default_tc - Get bitmap for first enabled TC
- * @pf: PF being queried
- *
- * Return a bitmap for first enabled traffic class for this PF.
- **/
-static u8 i40e_pf_get_default_tc(struct i40e_pf *pf)
-{
- u8 enabled_tc = pf->hw.func_caps.enabled_tcmap;
- u8 i = 0;
-
- if (!enabled_tc)
- return 0x1; /* TC0 */
-
- /* Find the first enabled TC */
- for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
- if (enabled_tc & BIT(i))
- break;
- }
-
- return BIT(i);
-}
-
-/**
* i40e_pf_get_pf_tc_map - Get bitmap for enabled traffic classes
* @pf: PF being queried
*
@@ -4673,7 +4650,7 @@ static u8 i40e_pf_get_tc_map(struct i40e_pf *pf)
{
/* If DCB is not enabled for this PF then just return default TC */
if (!(pf->flags & I40E_FLAG_DCB_ENABLED))
- return i40e_pf_get_default_tc(pf);
+ return I40E_DEFAULT_TRAFFIC_CLASS;
/* SFP mode we want PF to be enabled for all TCs */
if (!(pf->flags & I40E_FLAG_MFP_ENABLED))
@@ -4683,7 +4660,7 @@ static u8 i40e_pf_get_tc_map(struct i40e_pf *pf)
if (pf->hw.func_caps.iscsi)
return i40e_get_iscsi_tc_map(pf);
else
- return i40e_pf_get_default_tc(pf);
+ return I40E_DEFAULT_TRAFFIC_CLASS;
}
/**
@@ -5029,7 +5006,7 @@ static void i40e_dcb_reconfigure(struct i40e_pf *pf)
if (v == pf->lan_vsi)
tc_map = i40e_pf_get_tc_map(pf);
else
- tc_map = i40e_pf_get_default_tc(pf);
+ tc_map = I40E_DEFAULT_TRAFFIC_CLASS;
#ifdef I40E_FCOE
if (pf->vsi[v]->type == I40E_VSI_FCOE)
tc_map = i40e_get_fcoe_tc_map(pf);
@@ -5717,7 +5694,7 @@ static int i40e_handle_lldp_event(struct i40e_pf *pf,
u8 type;
/* Not DCB capable or capability disabled */
- if (!(pf->flags & I40E_FLAG_DCB_ENABLED))
+ if (!(pf->flags & I40E_FLAG_DCB_CAPABLE))
return ret;
/* Ignore if event is not for Nearest Bridge */
@@ -7707,6 +7684,7 @@ static int i40e_init_msix(struct i40e_pf *pf)
pf->flags &= ~I40E_FLAG_MSIX_ENABLED;
kfree(pf->msix_entries);
pf->msix_entries = NULL;
+ pci_disable_msix(pf->pdev);
return -ENODEV;
} else if (v_actual == I40E_MIN_MSIX) {
@@ -9056,7 +9034,7 @@ static int i40e_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
return 0;
return ndo_dflt_bridge_getlink(skb, pid, seq, dev, veb->bridge_mode,
- nlflags, 0, 0, filter_mask, NULL);
+ 0, 0, nlflags, filter_mask, NULL);
}
/* Hardware supports L4 tunnel length of 128B (=2^7) which includes
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index edc9a6ac5169..9affd7c198bd 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -4931,11 +4931,15 @@ static int igb_tso(struct igb_ring *tx_ring,
/* initialize outer IP header fields */
if (ip.v4->version == 4) {
+ unsigned char *csum_start = skb_checksum_start(skb);
+ unsigned char *trans_start = ip.hdr + (ip.v4->ihl * 4);
+
/* IP header will have to cancel out any data that
* is not a part of the outer IP header
*/
- ip.v4->check = csum_fold(csum_add(lco_csum(skb),
- csum_unfold(l4.tcp->check)));
+ ip.v4->check = csum_fold(csum_partial(trans_start,
+ csum_start - trans_start,
+ 0));
type_tucmd |= E1000_ADVTXD_TUCMD_IPV4;
ip.v4->tot_len = 0;
diff --git a/drivers/net/ethernet/intel/igbvf/netdev.c b/drivers/net/ethernet/intel/igbvf/netdev.c
index 12bb877df860..7dff7f6239cd 100644
--- a/drivers/net/ethernet/intel/igbvf/netdev.c
+++ b/drivers/net/ethernet/intel/igbvf/netdev.c
@@ -1965,11 +1965,15 @@ static int igbvf_tso(struct igbvf_ring *tx_ring,
/* initialize outer IP header fields */
if (ip.v4->version == 4) {
+ unsigned char *csum_start = skb_checksum_start(skb);
+ unsigned char *trans_start = ip.hdr + (ip.v4->ihl * 4);
+
/* IP header will have to cancel out any data that
* is not a part of the outer IP header
*/
- ip.v4->check = csum_fold(csum_add(lco_csum(skb),
- csum_unfold(l4.tcp->check)));
+ ip.v4->check = csum_fold(csum_partial(trans_start,
+ csum_start - trans_start,
+ 0));
type_tucmd |= E1000_ADVTXD_TUCMD_IPV4;
ip.v4->tot_len = 0;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index a244d9a67264..fee1f2918ead 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -7277,11 +7277,15 @@ static int ixgbe_tso(struct ixgbe_ring *tx_ring,
/* initialize outer IP header fields */
if (ip.v4->version == 4) {
+ unsigned char *csum_start = skb_checksum_start(skb);
+ unsigned char *trans_start = ip.hdr + (ip.v4->ihl * 4);
+
/* IP header will have to cancel out any data that
* is not a part of the outer IP header
*/
- ip.v4->check = csum_fold(csum_add(lco_csum(skb),
- csum_unfold(l4.tcp->check)));
+ ip.v4->check = csum_fold(csum_partial(trans_start,
+ csum_start - trans_start,
+ 0));
type_tucmd |= IXGBE_ADVTXD_TUCMD_IPV4;
ip.v4->tot_len = 0;
@@ -9135,10 +9139,14 @@ static void *ixgbe_fwd_add(struct net_device *pdev, struct net_device *vdev)
goto fwd_add_err;
fwd_adapter->pool = pool;
fwd_adapter->real_adapter = adapter;
- err = ixgbe_fwd_ring_up(vdev, fwd_adapter);
- if (err)
- goto fwd_add_err;
- netif_tx_start_all_queues(vdev);
+
+ if (netif_running(pdev)) {
+ err = ixgbe_fwd_ring_up(vdev, fwd_adapter);
+ if (err)
+ goto fwd_add_err;
+ netif_tx_start_all_queues(vdev);
+ }
+
return fwd_adapter;
fwd_add_err:
/* unwind counter and free adapter struct */
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
index 7eaac3234049..cbf70fe4028a 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
@@ -3329,11 +3329,15 @@ static int ixgbevf_tso(struct ixgbevf_ring *tx_ring,
/* initialize outer IP header fields */
if (ip.v4->version == 4) {
+ unsigned char *csum_start = skb_checksum_start(skb);
+ unsigned char *trans_start = ip.hdr + (ip.v4->ihl * 4);
+
/* IP header will have to cancel out any data that
* is not a part of the outer IP header
*/
- ip.v4->check = csum_fold(csum_add(lco_csum(skb),
- csum_unfold(l4.tcp->check)));
+ ip.v4->check = csum_fold(csum_partial(trans_start,
+ csum_start - trans_start,
+ 0));
type_tucmd |= IXGBE_ADVTXD_TUCMD_IPV4;
ip.v4->tot_len = 0;
diff --git a/drivers/net/ethernet/marvell/mv643xx_eth.c b/drivers/net/ethernet/marvell/mv643xx_eth.c
index 55831188bc32..5b12022adf1f 100644
--- a/drivers/net/ethernet/marvell/mv643xx_eth.c
+++ b/drivers/net/ethernet/marvell/mv643xx_eth.c
@@ -1381,6 +1381,7 @@ static unsigned int get_rx_coal(struct mv643xx_eth_private *mp)
temp = (val & 0x003fff00) >> 8;
temp *= 64000000;
+ temp += mp->t_clk / 2;
do_div(temp, mp->t_clk);
return (unsigned int)temp;
@@ -1417,6 +1418,7 @@ static unsigned int get_tx_coal(struct mv643xx_eth_private *mp)
temp = (rdlp(mp, TX_FIFO_URGENT_THRESHOLD) & 0x3fff0) >> 4;
temp *= 64000000;
+ temp += mp->t_clk / 2;
do_div(temp, mp->t_clk);
return (unsigned int)temp;
@@ -2968,6 +2970,22 @@ static void set_params(struct mv643xx_eth_private *mp,
mp->txq_count = pd->tx_queue_count ? : 1;
}
+static int get_phy_mode(struct mv643xx_eth_private *mp)
+{
+ struct device *dev = mp->dev->dev.parent;
+ int iface = -1;
+
+ if (dev->of_node)
+ iface = of_get_phy_mode(dev->of_node);
+
+ /* Historical default if unspecified. We could also read/write
+ * the interface state in the PSC1
+ */
+ if (iface < 0)
+ iface = PHY_INTERFACE_MODE_GMII;
+ return iface;
+}
+
static struct phy_device *phy_scan(struct mv643xx_eth_private *mp,
int phy_addr)
{
@@ -2994,7 +3012,7 @@ static struct phy_device *phy_scan(struct mv643xx_eth_private *mp,
"orion-mdio-mii", addr);
phydev = phy_connect(mp->dev, phy_id, mv643xx_eth_adjust_link,
- PHY_INTERFACE_MODE_GMII);
+ get_phy_mode(mp));
if (!IS_ERR(phydev)) {
phy_addr_set(mp, addr);
break;
@@ -3090,6 +3108,7 @@ static int mv643xx_eth_probe(struct platform_device *pdev)
if (!dev)
return -ENOMEM;
+ SET_NETDEV_DEV(dev, &pdev->dev);
mp = netdev_priv(dev);
platform_set_drvdata(pdev, mp);
@@ -3129,7 +3148,7 @@ static int mv643xx_eth_probe(struct platform_device *pdev)
if (pd->phy_node) {
mp->phy = of_phy_connect(mp->dev, pd->phy_node,
mv643xx_eth_adjust_link, 0,
- PHY_INTERFACE_MODE_GMII);
+ get_phy_mode(mp));
if (!mp->phy)
err = -ENODEV;
else
@@ -3187,8 +3206,6 @@ static int mv643xx_eth_probe(struct platform_device *pdev)
dev->priv_flags |= IFF_UNICAST_FLT;
dev->gso_max_segs = MV643XX_MAX_TSO_SEGS;
- SET_NETDEV_DEV(dev, &pdev->dev);
-
if (mp->shared->win_protect)
wrl(mp, WINDOW_PROTECT(mp->port_num), mp->shared->win_protect);
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
index 5cb07c2017bf..707bc4680b9b 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -4151,7 +4151,7 @@ static int mvneta_probe(struct platform_device *pdev)
dev->features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO;
dev->hw_features |= dev->features;
dev->vlan_features |= dev->features;
- dev->priv_flags |= IFF_UNICAST_FLT | IFF_LIVE_ADDR_CHANGE;
+ dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
dev->gso_max_segs = MVNETA_MAX_TSO_SEGS;
err = register_netdev(dev);
@@ -4191,6 +4191,8 @@ err_clk:
clk_disable_unprepare(pp->clk);
err_put_phy_node:
of_node_put(phy_node);
+ if (of_phy_is_fixed_link(dn))
+ of_phy_deregister_fixed_link(dn);
err_free_irq:
irq_dispose_mapping(dev->irq);
err_free_netdev:
@@ -4202,6 +4204,7 @@ err_free_netdev:
static int mvneta_remove(struct platform_device *pdev)
{
struct net_device *dev = platform_get_drvdata(pdev);
+ struct device_node *dn = pdev->dev.of_node;
struct mvneta_port *pp = netdev_priv(dev);
unregister_netdev(dev);
@@ -4209,6 +4212,8 @@ static int mvneta_remove(struct platform_device *pdev)
clk_disable_unprepare(pp->clk);
free_percpu(pp->ports);
free_percpu(pp->stats);
+ if (of_phy_is_fixed_link(dn))
+ of_phy_deregister_fixed_link(dn);
irq_dispose_mapping(dev->irq);
of_node_put(pp->phy_node);
free_netdev(dev);
diff --git a/drivers/net/ethernet/marvell/mvpp2.c b/drivers/net/ethernet/marvell/mvpp2.c
index 60227a3452a4..1026c452e39d 100644
--- a/drivers/net/ethernet/marvell/mvpp2.c
+++ b/drivers/net/ethernet/marvell/mvpp2.c
@@ -3293,7 +3293,7 @@ static void mvpp2_cls_init(struct mvpp2 *priv)
mvpp2_write(priv, MVPP2_CLS_MODE_REG, MVPP2_CLS_MODE_ACTIVE_MASK);
/* Clear classifier flow table */
- memset(&fe.data, 0, MVPP2_CLS_FLOWS_TBL_DATA_WORDS);
+ memset(&fe.data, 0, sizeof(fe.data));
for (index = 0; index < MVPP2_CLS_FLOWS_TBL_SIZE; index++) {
fe.index = index;
mvpp2_cls_flow_write(priv, &fe);
diff --git a/drivers/net/ethernet/marvell/sky2.c b/drivers/net/ethernet/marvell/sky2.c
index f05ea56dcff2..941c8e2c944e 100644
--- a/drivers/net/ethernet/marvell/sky2.c
+++ b/drivers/net/ethernet/marvell/sky2.c
@@ -5220,6 +5220,19 @@ static SIMPLE_DEV_PM_OPS(sky2_pm_ops, sky2_suspend, sky2_resume);
static void sky2_shutdown(struct pci_dev *pdev)
{
+ struct sky2_hw *hw = pci_get_drvdata(pdev);
+ int port;
+
+ for (port = 0; port < hw->ports; port++) {
+ struct net_device *ndev = hw->dev[port];
+
+ rtnl_lock();
+ if (netif_running(ndev)) {
+ dev_close(ndev);
+ netif_device_detach(ndev);
+ }
+ rtnl_unlock();
+ }
sky2_suspend(&pdev->dev);
pci_wake_from_d3(pdev, device_may_wakeup(&pdev->dev));
pci_set_power_state(pdev, PCI_D3hot);
diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
index 4a62ffd7729d..86a89cbd3ec9 100644
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
@@ -318,6 +318,8 @@ static int mtk_phy_connect(struct net_device *dev)
return 0;
err_phy:
+ if (of_phy_is_fixed_link(mac->of_node))
+ of_phy_deregister_fixed_link(mac->of_node);
of_node_put(np);
dev_err(eth->dev, "%s: invalid phy\n", __func__);
return -EINVAL;
@@ -1923,6 +1925,8 @@ static void mtk_uninit(struct net_device *dev)
struct mtk_eth *eth = mac->hw;
phy_disconnect(dev->phydev);
+ if (of_phy_is_fixed_link(mac->of_node))
+ of_phy_deregister_fixed_link(mac->of_node);
mtk_irq_disable(eth, MTK_QDMA_INT_MASK, ~0);
mtk_irq_disable(eth, MTK_PDMA_INT_MASK, ~0);
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/cmd.c b/drivers/net/ethernet/mellanox/mlx4/cmd.c
index b1cef7a0f7ca..e36bebcab3f2 100644
--- a/drivers/net/ethernet/mellanox/mlx4/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx4/cmd.c
@@ -2469,6 +2469,7 @@ err_comm_admin:
kfree(priv->mfunc.master.slave_state);
err_comm:
iounmap(priv->mfunc.comm);
+ priv->mfunc.comm = NULL;
err_vhcr:
dma_free_coherent(&dev->persist->pdev->dev, PAGE_SIZE,
priv->mfunc.vhcr,
@@ -2537,6 +2538,13 @@ void mlx4_report_internal_err_comm_event(struct mlx4_dev *dev)
int slave;
u32 slave_read;
+ /* If the comm channel has not yet been initialized,
+ * skip reporting the internal error event to all
+ * the communication channels.
+ */
+ if (!priv->mfunc.comm)
+ return;
+
/* Report an internal error event to all
* communication channels.
*/
@@ -2571,6 +2579,7 @@ void mlx4_multi_func_cleanup(struct mlx4_dev *dev)
}
iounmap(priv->mfunc.comm);
+ priv->mfunc.comm = NULL;
}
void mlx4_cmd_cleanup(struct mlx4_dev *dev, int cleanup_mask)
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_clock.c b/drivers/net/ethernet/mellanox/mlx4/en_clock.c
index 08fc5fc56d43..a5fc46bbcbe2 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_clock.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_clock.c
@@ -245,8 +245,11 @@ static u32 freq_to_shift(u16 freq)
{
u32 freq_khz = freq * 1000;
u64 max_val_cycles = freq_khz * 1000 * MLX4_EN_WRAP_AROUND_SEC;
+ u64 tmp_rounded =
+ roundup_pow_of_two(max_val_cycles) > max_val_cycles ?
+ roundup_pow_of_two(max_val_cycles) - 1 : UINT_MAX;
u64 max_val_cycles_rounded = is_power_of_2(max_val_cycles + 1) ?
- max_val_cycles : roundup_pow_of_two(max_val_cycles) - 1;
+ max_val_cycles : tmp_rounded;
/* calculate max possible multiplier in order to fit in 64bit */
u64 max_mul = div_u64(0xffffffffffffffffULL, max_val_cycles_rounded);
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_cq.c b/drivers/net/ethernet/mellanox/mlx4/en_cq.c
index 132cea655920..e3be7e44ff51 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_cq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_cq.c
@@ -127,7 +127,15 @@ int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq,
/* For TX we use the same irq per
ring we assigned for the RX */
struct mlx4_en_cq *rx_cq;
-
+ int xdp_index;
+
+ /* The xdp tx irq must align with the rx ring that forwards to
+ * it, so reindex these from 0. This should only happen when
+ * tx_ring_num is not a multiple of rx_ring_num.
+ */
+ xdp_index = (priv->xdp_ring_num - priv->tx_ring_num) + cq_idx;
+ if (xdp_index >= 0)
+ cq_idx = xdp_index;
cq_idx = cq_idx % priv->rx_ring_num;
rx_cq = priv->rx_cq[cq_idx];
cq->vector = rx_cq->vector;
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
index 7e703bed7b82..fb8bb027b69c 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
@@ -129,6 +129,9 @@ static enum mlx4_net_trans_rule_id mlx4_ip_proto_to_trans_rule_id(u8 ip_proto)
}
};
+/* Must not acquire state_lock, as its corresponding work_sync
+ * is done under it.
+ */
static void mlx4_en_filter_work(struct work_struct *work)
{
struct mlx4_en_filter *filter = container_of(work,
@@ -1733,6 +1736,13 @@ int mlx4_en_start_port(struct net_device *dev)
udp_tunnel_get_rx_info(dev);
priv->port_up = true;
+
+ /* Process all completions if exist to prevent
+ * the queues freezing if they are full
+ */
+ for (i = 0; i < priv->rx_ring_num; i++)
+ napi_schedule(&priv->rx_cq[i]->napi);
+
netif_tx_start_all_queues(dev);
netif_device_attach(dev);
@@ -1910,8 +1920,9 @@ static void mlx4_en_clear_stats(struct net_device *dev)
struct mlx4_en_dev *mdev = priv->mdev;
int i;
- if (mlx4_en_DUMP_ETH_STATS(mdev, priv->port, 1))
- en_dbg(HW, priv, "Failed dumping statistics\n");
+ if (!mlx4_is_slave(mdev->dev))
+ if (mlx4_en_DUMP_ETH_STATS(mdev, priv->port, 1))
+ en_dbg(HW, priv, "Failed dumping statistics\n");
memset(&priv->pstats, 0, sizeof(priv->pstats));
memset(&priv->pkstats, 0, sizeof(priv->pkstats));
@@ -2068,13 +2079,6 @@ err:
return -ENOMEM;
}
-static void mlx4_en_shutdown(struct net_device *dev)
-{
- rtnl_lock();
- netif_device_detach(dev);
- mlx4_en_close(dev);
- rtnl_unlock();
-}
static int mlx4_en_copy_priv(struct mlx4_en_priv *dst,
struct mlx4_en_priv *src,
@@ -2151,8 +2155,6 @@ void mlx4_en_destroy_netdev(struct net_device *dev)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
struct mlx4_en_dev *mdev = priv->mdev;
- bool shutdown = mdev->dev->persist->interface_state &
- MLX4_INTERFACE_STATE_SHUTDOWN;
en_dbg(DRV, priv, "Destroying netdev on port:%d\n", priv->port);
@@ -2160,10 +2162,7 @@ void mlx4_en_destroy_netdev(struct net_device *dev)
if (priv->registered) {
devlink_port_type_clear(mlx4_get_devlink_port(mdev->dev,
priv->port));
- if (shutdown)
- mlx4_en_shutdown(dev);
- else
- unregister_netdev(dev);
+ unregister_netdev(dev);
}
if (priv->allocated)
@@ -2181,19 +2180,18 @@ void mlx4_en_destroy_netdev(struct net_device *dev)
mutex_lock(&mdev->state_lock);
mdev->pndev[priv->port] = NULL;
mdev->upper[priv->port] = NULL;
- mutex_unlock(&mdev->state_lock);
#ifdef CONFIG_RFS_ACCEL
mlx4_en_cleanup_filters(priv);
#endif
mlx4_en_free_resources(priv);
+ mutex_unlock(&mdev->state_lock);
kfree(priv->tx_ring);
kfree(priv->tx_cq);
- if (!shutdown)
- free_netdev(dev);
+ free_netdev(dev);
}
static int mlx4_en_change_mtu(struct net_device *dev, int new_mtu)
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_port.c b/drivers/net/ethernet/mellanox/mlx4/en_port.c
index 5aa8b751f417..59473a0ebcdf 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_port.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_port.c
@@ -166,7 +166,7 @@ int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset)
return PTR_ERR(mailbox);
err = mlx4_cmd_box(mdev->dev, 0, mailbox->dma, in_mod, 0,
MLX4_CMD_DUMP_ETH_STATS, MLX4_CMD_TIME_CLASS_B,
- MLX4_CMD_WRAPPED);
+ MLX4_CMD_NATIVE);
if (err)
goto out;
@@ -322,7 +322,7 @@ int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset)
err = mlx4_cmd_box(mdev->dev, 0, mailbox->dma,
in_mod | MLX4_DUMP_ETH_STATS_FLOW_CONTROL,
0, MLX4_CMD_DUMP_ETH_STATS,
- MLX4_CMD_TIME_CLASS_B, MLX4_CMD_WRAPPED);
+ MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
if (err)
goto out;
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_selftest.c b/drivers/net/ethernet/mellanox/mlx4/en_selftest.c
index b66e03d9711f..c06346a82496 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_selftest.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_selftest.c
@@ -118,6 +118,29 @@ mlx4_en_test_loopback_exit:
return !loopback_ok;
}
+static int mlx4_en_test_interrupts(struct mlx4_en_priv *priv)
+{
+ struct mlx4_en_dev *mdev = priv->mdev;
+ int err = 0;
+ int i = 0;
+
+ err = mlx4_test_async(mdev->dev);
+ /* When not in MSI_X or slave, test only async */
+ if (!(mdev->dev->flags & MLX4_FLAG_MSI_X) || mlx4_is_slave(mdev->dev))
+ return err;
+
+ /* A loop over all completion vectors of current port,
+ * for each vector check whether it works by mapping command
+ * completions to that vector and performing a NOP command
+ */
+ for (i = 0; i < priv->rx_ring_num; i++) {
+ err = mlx4_test_interrupt(mdev->dev, priv->rx_cq[i]->vector);
+ if (err)
+ break;
+ }
+
+ return err;
+}
static int mlx4_en_test_link(struct mlx4_en_priv *priv)
{
@@ -151,7 +174,6 @@ static int mlx4_en_test_speed(struct mlx4_en_priv *priv)
void mlx4_en_ex_selftest(struct net_device *dev, u32 *flags, u64 *buf)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
- struct mlx4_en_dev *mdev = priv->mdev;
int i, carrier_ok;
memset(buf, 0, sizeof(u64) * MLX4_EN_NUM_SELF_TEST);
@@ -177,7 +199,7 @@ void mlx4_en_ex_selftest(struct net_device *dev, u32 *flags, u64 *buf)
netif_carrier_on(dev);
}
- buf[0] = mlx4_test_interrupts(mdev->dev);
+ buf[0] = mlx4_en_test_interrupts(priv);
buf[1] = mlx4_en_test_link(priv);
buf[2] = mlx4_en_test_speed(priv);
diff --git a/drivers/net/ethernet/mellanox/mlx4/eq.c b/drivers/net/ethernet/mellanox/mlx4/eq.c
index cf8f8a72a801..cd3638e6fe25 100644
--- a/drivers/net/ethernet/mellanox/mlx4/eq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/eq.c
@@ -1361,53 +1361,49 @@ void mlx4_cleanup_eq_table(struct mlx4_dev *dev)
kfree(priv->eq_table.uar_map);
}
-/* A test that verifies that we can accept interrupts on all
- * the irq vectors of the device.
+/* A test that verifies that we can accept interrupts
+ * on the vector allocated for asynchronous events
+ */
+int mlx4_test_async(struct mlx4_dev *dev)
+{
+ return mlx4_NOP(dev);
+}
+EXPORT_SYMBOL(mlx4_test_async);
+
+/* A test that verifies that we can accept interrupts
+ * on the given irq vector of the tested port.
* Interrupts are checked using the NOP command.
*/
-int mlx4_test_interrupts(struct mlx4_dev *dev)
+int mlx4_test_interrupt(struct mlx4_dev *dev, int vector)
{
struct mlx4_priv *priv = mlx4_priv(dev);
- int i;
int err;
- err = mlx4_NOP(dev);
- /* When not in MSI_X, there is only one irq to check */
- if (!(dev->flags & MLX4_FLAG_MSI_X) || mlx4_is_slave(dev))
- return err;
-
- /* A loop over all completion vectors, for each vector we will check
- * whether it works by mapping command completions to that vector
- * and performing a NOP command
- */
- for(i = 0; !err && (i < dev->caps.num_comp_vectors); ++i) {
- /* Make sure request_irq was called */
- if (!priv->eq_table.eq[i].have_irq)
- continue;
-
- /* Temporary use polling for command completions */
- mlx4_cmd_use_polling(dev);
-
- /* Map the new eq to handle all asynchronous events */
- err = mlx4_MAP_EQ(dev, get_async_ev_mask(dev), 0,
- priv->eq_table.eq[i].eqn);
- if (err) {
- mlx4_warn(dev, "Failed mapping eq for interrupt test\n");
- mlx4_cmd_use_events(dev);
- break;
- }
+ /* Temporary use polling for command completions */
+ mlx4_cmd_use_polling(dev);
- /* Go back to using events */
- mlx4_cmd_use_events(dev);
- err = mlx4_NOP(dev);
+ /* Map the new eq to handle all asynchronous events */
+ err = mlx4_MAP_EQ(dev, get_async_ev_mask(dev), 0,
+ priv->eq_table.eq[MLX4_CQ_TO_EQ_VECTOR(vector)].eqn);
+ if (err) {
+ mlx4_warn(dev, "Failed mapping eq for interrupt test\n");
+ goto out;
}
+ /* Go back to using events */
+ mlx4_cmd_use_events(dev);
+ err = mlx4_NOP(dev);
+
/* Return to default */
+ mlx4_cmd_use_polling(dev);
+out:
mlx4_MAP_EQ(dev, get_async_ev_mask(dev), 0,
priv->eq_table.eq[MLX4_EQ_ASYNC].eqn);
+ mlx4_cmd_use_events(dev);
+
return err;
}
-EXPORT_SYMBOL(mlx4_test_interrupts);
+EXPORT_SYMBOL(mlx4_test_interrupt);
bool mlx4_is_eq_vector_valid(struct mlx4_dev *dev, u8 port, int vector)
{
diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.c b/drivers/net/ethernet/mellanox/mlx4/fw.c
index c41ab31a39f8..84bab9f0732e 100644
--- a/drivers/net/ethernet/mellanox/mlx4/fw.c
+++ b/drivers/net/ethernet/mellanox/mlx4/fw.c
@@ -49,9 +49,9 @@ enum {
extern void __buggy_use_of_MLX4_GET(void);
extern void __buggy_use_of_MLX4_PUT(void);
-static bool enable_qos = true;
+static bool enable_qos;
module_param(enable_qos, bool, 0444);
-MODULE_PARM_DESC(enable_qos, "Enable Enhanced QoS support (default: on)");
+MODULE_PARM_DESC(enable_qos, "Enable Enhanced QoS support (default: off)");
#define MLX4_GET(dest, source, offset) \
do { \
diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
index 7183ac4135d2..75d07fa9d0b1 100644
--- a/drivers/net/ethernet/mellanox/mlx4/main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/main.c
@@ -1102,6 +1102,14 @@ static int __set_port_type(struct mlx4_port_info *info,
int i;
int err = 0;
+ if ((port_type & mdev->caps.supported_type[info->port]) != port_type) {
+ mlx4_err(mdev,
+ "Requested port type for port %d is not supported on this HCA\n",
+ info->port);
+ err = -EINVAL;
+ goto err_sup;
+ }
+
mlx4_stop_sense(mdev);
mutex_lock(&priv->port_mutex);
info->tmp_type = port_type;
@@ -1147,7 +1155,7 @@ static int __set_port_type(struct mlx4_port_info *info,
out:
mlx4_start_sense(mdev);
mutex_unlock(&priv->port_mutex);
-
+err_sup:
return err;
}
@@ -4139,11 +4147,8 @@ static void mlx4_shutdown(struct pci_dev *pdev)
mlx4_info(persist->dev, "mlx4_shutdown was called\n");
mutex_lock(&persist->interface_state_mutex);
- if (persist->interface_state & MLX4_INTERFACE_STATE_UP) {
- /* Notify mlx4 clients that the kernel is being shut down */
- persist->interface_state |= MLX4_INTERFACE_STATE_SHUTDOWN;
+ if (persist->interface_state & MLX4_INTERFACE_STATE_UP)
mlx4_unload_one(pdev);
- }
mutex_unlock(&persist->interface_state_mutex);
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/mcg.c b/drivers/net/ethernet/mellanox/mlx4/mcg.c
index 94b891c118c1..1a670b681555 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mcg.c
+++ b/drivers/net/ethernet/mellanox/mlx4/mcg.c
@@ -1457,7 +1457,12 @@ EXPORT_SYMBOL_GPL(mlx4_multicast_detach);
int mlx4_flow_steer_promisc_add(struct mlx4_dev *dev, u8 port,
u32 qpn, enum mlx4_net_trans_promisc_mode mode)
{
- struct mlx4_net_trans_rule rule;
+ struct mlx4_net_trans_rule rule = {
+ .queue_mode = MLX4_NET_TRANS_Q_FIFO,
+ .exclusive = 0,
+ .allow_loopback = 1,
+ };
+
u64 *regid_p;
switch (mode) {
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4.h b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
index e4878f31e45d..88ee7d8a5923 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
@@ -145,9 +145,10 @@ enum mlx4_resource {
RES_MTT,
RES_MAC,
RES_VLAN,
- RES_EQ,
+ RES_NPORT_ID,
RES_COUNTER,
RES_FS_RULE,
+ RES_EQ,
MLX4_NUM_OF_RESOURCE_TYPE
};
@@ -1329,8 +1330,6 @@ int mlx4_SET_VLAN_FLTR_wrapper(struct mlx4_dev *dev, int slave,
struct mlx4_cmd_info *cmd);
int mlx4_common_set_vlan_fltr(struct mlx4_dev *dev, int function,
int port, void *buf);
-int mlx4_common_dump_eth_stats(struct mlx4_dev *dev, int slave, u32 in_mod,
- struct mlx4_cmd_mailbox *outbox);
int mlx4_DUMP_ETH_STATS_wrapper(struct mlx4_dev *dev, int slave,
struct mlx4_vhcr *vhcr,
struct mlx4_cmd_mailbox *inbox,
diff --git a/drivers/net/ethernet/mellanox/mlx4/port.c b/drivers/net/ethernet/mellanox/mlx4/port.c
index c5b2064297a1..b656dd5772e5 100644
--- a/drivers/net/ethernet/mellanox/mlx4/port.c
+++ b/drivers/net/ethernet/mellanox/mlx4/port.c
@@ -1728,24 +1728,13 @@ int mlx4_SET_VLAN_FLTR_wrapper(struct mlx4_dev *dev, int slave,
return err;
}
-int mlx4_common_dump_eth_stats(struct mlx4_dev *dev, int slave,
- u32 in_mod, struct mlx4_cmd_mailbox *outbox)
-{
- return mlx4_cmd_box(dev, 0, outbox->dma, in_mod, 0,
- MLX4_CMD_DUMP_ETH_STATS, MLX4_CMD_TIME_CLASS_B,
- MLX4_CMD_NATIVE);
-}
-
int mlx4_DUMP_ETH_STATS_wrapper(struct mlx4_dev *dev, int slave,
struct mlx4_vhcr *vhcr,
struct mlx4_cmd_mailbox *inbox,
struct mlx4_cmd_mailbox *outbox,
struct mlx4_cmd_info *cmd)
{
- if (slave != dev->caps.function)
- return 0;
- return mlx4_common_dump_eth_stats(dev, slave,
- vhcr->in_modifier, outbox);
+ return 0;
}
int mlx4_get_slave_from_roce_gid(struct mlx4_dev *dev, int port, u8 *gid,
diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
index 84d7857ccc27..c548beaaf910 100644
--- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
+++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
@@ -1605,13 +1605,14 @@ static int eq_res_start_move_to(struct mlx4_dev *dev, int slave, int index,
r->com.from_state = r->com.state;
r->com.to_state = state;
r->com.state = RES_EQ_BUSY;
- if (eq)
- *eq = r;
}
}
spin_unlock_irq(mlx4_tlock(dev));
+ if (!err && eq)
+ *eq = r;
+
return err;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/alloc.c b/drivers/net/ethernet/mellanox/mlx5/core/alloc.c
index 6cb38304669f..2c6e3c7b7417 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/alloc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/alloc.c
@@ -41,6 +41,13 @@
#include "mlx5_core.h"
+struct mlx5_db_pgdir {
+ struct list_head list;
+ unsigned long *bitmap;
+ __be32 *db_page;
+ dma_addr_t db_dma;
+};
+
/* Handling for queue buffers -- we allocate a bunch of memory and
* register it in a memory region at HCA virtual address 0.
*/
@@ -102,17 +109,28 @@ EXPORT_SYMBOL_GPL(mlx5_buf_free);
static struct mlx5_db_pgdir *mlx5_alloc_db_pgdir(struct mlx5_core_dev *dev,
int node)
{
+ u32 db_per_page = PAGE_SIZE / cache_line_size();
struct mlx5_db_pgdir *pgdir;
pgdir = kzalloc(sizeof(*pgdir), GFP_KERNEL);
if (!pgdir)
return NULL;
- bitmap_fill(pgdir->bitmap, MLX5_DB_PER_PAGE);
+ pgdir->bitmap = kcalloc(BITS_TO_LONGS(db_per_page),
+ sizeof(unsigned long),
+ GFP_KERNEL);
+
+ if (!pgdir->bitmap) {
+ kfree(pgdir);
+ return NULL;
+ }
+
+ bitmap_fill(pgdir->bitmap, db_per_page);
pgdir->db_page = mlx5_dma_zalloc_coherent_node(dev, PAGE_SIZE,
&pgdir->db_dma, node);
if (!pgdir->db_page) {
+ kfree(pgdir->bitmap);
kfree(pgdir);
return NULL;
}
@@ -123,18 +141,19 @@ static struct mlx5_db_pgdir *mlx5_alloc_db_pgdir(struct mlx5_core_dev *dev,
static int mlx5_alloc_db_from_pgdir(struct mlx5_db_pgdir *pgdir,
struct mlx5_db *db)
{
+ u32 db_per_page = PAGE_SIZE / cache_line_size();
int offset;
int i;
- i = find_first_bit(pgdir->bitmap, MLX5_DB_PER_PAGE);
- if (i >= MLX5_DB_PER_PAGE)
+ i = find_first_bit(pgdir->bitmap, db_per_page);
+ if (i >= db_per_page)
return -ENOMEM;
__clear_bit(i, pgdir->bitmap);
db->u.pgdir = pgdir;
db->index = i;
- offset = db->index * L1_CACHE_BYTES;
+ offset = db->index * cache_line_size();
db->db = pgdir->db_page + offset / sizeof(*pgdir->db_page);
db->dma = pgdir->db_dma + offset;
@@ -181,14 +200,16 @@ EXPORT_SYMBOL_GPL(mlx5_db_alloc);
void mlx5_db_free(struct mlx5_core_dev *dev, struct mlx5_db *db)
{
+ u32 db_per_page = PAGE_SIZE / cache_line_size();
mutex_lock(&dev->priv.pgdir_mutex);
__set_bit(db->index, db->u.pgdir->bitmap);
- if (bitmap_full(db->u.pgdir->bitmap, MLX5_DB_PER_PAGE)) {
+ if (bitmap_full(db->u.pgdir->bitmap, db_per_page)) {
dma_free_coherent(&(dev->pdev->dev), PAGE_SIZE,
db->u.pgdir->db_page, db->u.pgdir->db_dma);
list_del(&db->u.pgdir->list);
+ kfree(db->u.pgdir->bitmap);
kfree(db->u.pgdir);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h
index 460363b66cb1..7a43502a89cc 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h
@@ -85,6 +85,9 @@
#define MLX5_MPWRQ_SMALL_PACKET_THRESHOLD (128)
#define MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ (64 * 1024)
+#define MLX5E_DEFAULT_LRO_TIMEOUT 32
+#define MLX5E_LRO_TIMEOUT_ARR_SIZE 4
+
#define MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC 0x10
#define MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC_FROM_CQE 0x3
#define MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_PKTS 0x20
@@ -221,6 +224,7 @@ struct mlx5e_params {
struct ieee_ets ets;
#endif
bool rx_am_enabled;
+ u32 lro_timeout;
};
struct mlx5e_tstamp {
@@ -888,5 +892,6 @@ int mlx5e_attach_netdev(struct mlx5_core_dev *mdev, struct net_device *netdev);
void mlx5e_detach_netdev(struct mlx5_core_dev *mdev, struct net_device *netdev);
struct rtnl_link_stats64 *
mlx5e_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats);
+u32 mlx5e_choose_lro_timeout(struct mlx5_core_dev *mdev, u32 wanted_timeout);
#endif /* __MLX5_EN_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index 7eaf38020a8f..84e8b250e2af 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -1445,6 +1445,7 @@ static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix,
c->netdev = priv->netdev;
c->mkey_be = cpu_to_be32(priv->mdev->mlx5e_res.mkey.key);
c->num_tc = priv->params.num_tc;
+ c->xdp = !!priv->xdp_prog;
if (priv->params.rx_am_enabled)
rx_cq_profile = mlx5e_am_get_def_profile(priv->params.rx_cq_period_mode);
@@ -1468,6 +1469,12 @@ static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix,
if (err)
goto err_close_tx_cqs;
+ /* XDP SQ CQ params are same as normal TXQ sq CQ params */
+ err = c->xdp ? mlx5e_open_cq(c, &cparam->tx_cq, &c->xdp_sq.cq,
+ priv->params.tx_cq_moderation) : 0;
+ if (err)
+ goto err_close_rx_cq;
+
napi_enable(&c->napi);
err = mlx5e_open_sq(c, 0, &cparam->icosq, &c->icosq);
@@ -1488,21 +1495,10 @@ static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix,
}
}
- if (priv->xdp_prog) {
- /* XDP SQ CQ params are same as normal TXQ sq CQ params */
- err = mlx5e_open_cq(c, &cparam->tx_cq, &c->xdp_sq.cq,
- priv->params.tx_cq_moderation);
- if (err)
- goto err_close_sqs;
-
- err = mlx5e_open_sq(c, 0, &cparam->xdp_sq, &c->xdp_sq);
- if (err) {
- mlx5e_close_cq(&c->xdp_sq.cq);
- goto err_close_sqs;
- }
- }
+ err = c->xdp ? mlx5e_open_sq(c, 0, &cparam->xdp_sq, &c->xdp_sq) : 0;
+ if (err)
+ goto err_close_sqs;
- c->xdp = !!priv->xdp_prog;
err = mlx5e_open_rq(c, &cparam->rq, &c->rq);
if (err)
goto err_close_xdp_sq;
@@ -1512,7 +1508,8 @@ static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix,
return 0;
err_close_xdp_sq:
- mlx5e_close_sq(&c->xdp_sq);
+ if (c->xdp)
+ mlx5e_close_sq(&c->xdp_sq);
err_close_sqs:
mlx5e_close_sqs(c);
@@ -1522,6 +1519,10 @@ err_close_icosq:
err_disable_napi:
napi_disable(&c->napi);
+ if (c->xdp)
+ mlx5e_close_cq(&c->xdp_sq.cq);
+
+err_close_rx_cq:
mlx5e_close_cq(&c->rq.cq);
err_close_tx_cqs:
@@ -1971,9 +1972,7 @@ static void mlx5e_build_tir_ctx_lro(void *tirc, struct mlx5e_priv *priv)
MLX5_SET(tirc, tirc, lro_max_ip_payload_size,
(priv->params.lro_wqe_sz -
ROUGH_MAX_L2_L3_HDR_SZ) >> 8);
- MLX5_SET(tirc, tirc, lro_timeout_period_usecs,
- MLX5_CAP_ETH(priv->mdev,
- lro_timer_supported_periods[2]));
+ MLX5_SET(tirc, tirc, lro_timeout_period_usecs, priv->params.lro_timeout);
}
void mlx5e_build_tir_ctx_hash(void *tirc, struct mlx5e_priv *priv)
@@ -3401,6 +3400,18 @@ static void mlx5e_query_min_inline(struct mlx5_core_dev *mdev,
}
}
+u32 mlx5e_choose_lro_timeout(struct mlx5_core_dev *mdev, u32 wanted_timeout)
+{
+ int i;
+
+ /* The supported periods are organized in ascending order */
+ for (i = 0; i < MLX5E_LRO_TIMEOUT_ARR_SIZE - 1; i++)
+ if (MLX5_CAP_ETH(mdev, lro_timer_supported_periods[i]) >= wanted_timeout)
+ break;
+
+ return MLX5_CAP_ETH(mdev, lro_timer_supported_periods[i]);
+}
+
static void mlx5e_build_nic_netdev_priv(struct mlx5_core_dev *mdev,
struct net_device *netdev,
const struct mlx5e_profile *profile,
@@ -3419,6 +3430,9 @@ static void mlx5e_build_nic_netdev_priv(struct mlx5_core_dev *mdev,
priv->profile = profile;
priv->ppriv = ppriv;
+ priv->params.lro_timeout =
+ mlx5e_choose_lro_timeout(mdev, MLX5E_DEFAULT_LRO_TIMEOUT);
+
priv->params.log_sq_size = MLX5E_PARAMS_DEFAULT_LOG_SQ_SIZE;
/* set CQE compression */
@@ -4035,7 +4049,6 @@ void mlx5e_destroy_netdev(struct mlx5_core_dev *mdev, struct mlx5e_priv *priv)
const struct mlx5e_profile *profile = priv->profile;
struct net_device *netdev = priv->netdev;
- unregister_netdev(netdev);
destroy_workqueue(priv->wq);
if (profile->cleanup)
profile->cleanup(priv);
@@ -4052,6 +4065,7 @@ static void mlx5e_remove(struct mlx5_core_dev *mdev, void *vpriv)
for (vport = 1; vport < total_vfs; vport++)
mlx5_eswitch_unregister_vport_rep(esw, vport);
+ unregister_netdev(priv->netdev);
mlx5e_detach(mdev, vpriv);
mlx5e_destroy_netdev(mdev, priv);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
index 3c97da103d30..bf1c09ca73c0 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
@@ -308,7 +308,7 @@ static void mlx5e_build_rep_netdev(struct net_device *netdev)
netdev->switchdev_ops = &mlx5e_rep_switchdev_ops;
#endif
- netdev->features |= NETIF_F_VLAN_CHALLENGED | NETIF_F_HW_TC;
+ netdev->features |= NETIF_F_VLAN_CHALLENGED | NETIF_F_HW_TC | NETIF_F_NETNS_LOCAL;
netdev->hw_features |= NETIF_F_HW_TC;
eth_hw_addr_random(netdev);
@@ -457,6 +457,7 @@ void mlx5e_vport_rep_unload(struct mlx5_eswitch *esw,
struct mlx5e_priv *priv = rep->priv_data;
struct net_device *netdev = priv->netdev;
+ unregister_netdev(netdev);
mlx5e_detach_netdev(esw->dev, netdev);
mlx5e_destroy_netdev(esw->dev, priv);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
index ce8c54d18906..6bb21b31cfeb 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
@@ -237,12 +237,15 @@ static int parse_cls_flower(struct mlx5e_priv *priv, struct mlx5_flow_spec *spec
skb_flow_dissector_target(f->dissector,
FLOW_DISSECTOR_KEY_VLAN,
f->mask);
- if (mask->vlan_id) {
+ if (mask->vlan_id || mask->vlan_priority) {
MLX5_SET(fte_match_set_lyr_2_4, headers_c, vlan_tag, 1);
MLX5_SET(fte_match_set_lyr_2_4, headers_v, vlan_tag, 1);
MLX5_SET(fte_match_set_lyr_2_4, headers_c, first_vid, mask->vlan_id);
MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_vid, key->vlan_id);
+
+ MLX5_SET(fte_match_set_lyr_2_4, headers_c, first_prio, mask->vlan_priority);
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_prio, key->vlan_priority);
}
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
index abbf2c369923..be1f7333ab7f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
@@ -931,8 +931,8 @@ static void esw_vport_change_handler(struct work_struct *work)
mutex_unlock(&esw->state_lock);
}
-static void esw_vport_enable_egress_acl(struct mlx5_eswitch *esw,
- struct mlx5_vport *vport)
+static int esw_vport_enable_egress_acl(struct mlx5_eswitch *esw,
+ struct mlx5_vport *vport)
{
int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
struct mlx5_flow_group *vlan_grp = NULL;
@@ -949,9 +949,11 @@ static void esw_vport_enable_egress_acl(struct mlx5_eswitch *esw,
int table_size = 2;
int err = 0;
- if (!MLX5_CAP_ESW_EGRESS_ACL(dev, ft_support) ||
- !IS_ERR_OR_NULL(vport->egress.acl))
- return;
+ if (!MLX5_CAP_ESW_EGRESS_ACL(dev, ft_support))
+ return -EOPNOTSUPP;
+
+ if (!IS_ERR_OR_NULL(vport->egress.acl))
+ return 0;
esw_debug(dev, "Create vport[%d] egress ACL log_max_size(%d)\n",
vport->vport, MLX5_CAP_ESW_EGRESS_ACL(dev, log_max_ft_size));
@@ -959,12 +961,12 @@ static void esw_vport_enable_egress_acl(struct mlx5_eswitch *esw,
root_ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_ESW_EGRESS);
if (!root_ns) {
esw_warn(dev, "Failed to get E-Switch egress flow namespace\n");
- return;
+ return -EIO;
}
flow_group_in = mlx5_vzalloc(inlen);
if (!flow_group_in)
- return;
+ return -ENOMEM;
acl = mlx5_create_vport_flow_table(root_ns, 0, table_size, 0, vport->vport);
if (IS_ERR(acl)) {
@@ -1009,6 +1011,7 @@ out:
mlx5_destroy_flow_group(vlan_grp);
if (err && !IS_ERR_OR_NULL(acl))
mlx5_destroy_flow_table(acl);
+ return err;
}
static void esw_vport_cleanup_egress_rules(struct mlx5_eswitch *esw,
@@ -1041,8 +1044,8 @@ static void esw_vport_disable_egress_acl(struct mlx5_eswitch *esw,
vport->egress.acl = NULL;
}
-static void esw_vport_enable_ingress_acl(struct mlx5_eswitch *esw,
- struct mlx5_vport *vport)
+static int esw_vport_enable_ingress_acl(struct mlx5_eswitch *esw,
+ struct mlx5_vport *vport)
{
int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
struct mlx5_core_dev *dev = esw->dev;
@@ -1063,9 +1066,11 @@ static void esw_vport_enable_ingress_acl(struct mlx5_eswitch *esw,
int table_size = 4;
int err = 0;
- if (!MLX5_CAP_ESW_INGRESS_ACL(dev, ft_support) ||
- !IS_ERR_OR_NULL(vport->ingress.acl))
- return;
+ if (!MLX5_CAP_ESW_INGRESS_ACL(dev, ft_support))
+ return -EOPNOTSUPP;
+
+ if (!IS_ERR_OR_NULL(vport->ingress.acl))
+ return 0;
esw_debug(dev, "Create vport[%d] ingress ACL log_max_size(%d)\n",
vport->vport, MLX5_CAP_ESW_INGRESS_ACL(dev, log_max_ft_size));
@@ -1073,12 +1078,12 @@ static void esw_vport_enable_ingress_acl(struct mlx5_eswitch *esw,
root_ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_ESW_INGRESS);
if (!root_ns) {
esw_warn(dev, "Failed to get E-Switch ingress flow namespace\n");
- return;
+ return -EIO;
}
flow_group_in = mlx5_vzalloc(inlen);
if (!flow_group_in)
- return;
+ return -ENOMEM;
acl = mlx5_create_vport_flow_table(root_ns, 0, table_size, 0, vport->vport);
if (IS_ERR(acl)) {
@@ -1167,6 +1172,7 @@ out:
}
kvfree(flow_group_in);
+ return err;
}
static void esw_vport_cleanup_ingress_rules(struct mlx5_eswitch *esw,
@@ -1225,7 +1231,13 @@ static int esw_vport_ingress_config(struct mlx5_eswitch *esw,
return 0;
}
- esw_vport_enable_ingress_acl(esw, vport);
+ err = esw_vport_enable_ingress_acl(esw, vport);
+ if (err) {
+ mlx5_core_warn(esw->dev,
+ "failed to enable ingress acl (%d) on vport[%d]\n",
+ err, vport->vport);
+ return err;
+ }
esw_debug(esw->dev,
"vport[%d] configure ingress rules, vlan(%d) qos(%d)\n",
@@ -1299,7 +1311,13 @@ static int esw_vport_egress_config(struct mlx5_eswitch *esw,
return 0;
}
- esw_vport_enable_egress_acl(esw, vport);
+ err = esw_vport_enable_egress_acl(esw, vport);
+ if (err) {
+ mlx5_core_warn(esw->dev,
+ "failed to enable egress acl (%d) on vport[%d]\n",
+ err, vport->vport);
+ return err;
+ }
esw_debug(esw->dev,
"vport[%d] configure egress rules, vlan(%d) qos(%d)\n",
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
index c55ad8d00c05..d239f5d0ea36 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
@@ -57,7 +57,8 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
if (esw->mode != SRIOV_OFFLOADS)
return ERR_PTR(-EOPNOTSUPP);
- action = attr->action;
+ /* per flow vlan pop/push is emulated, don't set that into the firmware */
+ action = attr->action & ~(MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH | MLX5_FLOW_CONTEXT_ACTION_VLAN_POP);
if (action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
index 5da2cc878582..914e5466f729 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
@@ -436,6 +436,9 @@ static void del_flow_group(struct fs_node *node)
fs_get_obj(ft, fg->node.parent);
dev = get_dev(&ft->node);
+ if (ft->autogroup.active)
+ ft->autogroup.num_groups--;
+
if (mlx5_cmd_destroy_flow_group(dev, ft, fg->id))
mlx5_core_warn(dev, "flow steering can't destroy fg %d of ft %d\n",
fg->id, ft->id);
@@ -879,7 +882,7 @@ static struct mlx5_flow_group *create_flow_group_common(struct mlx5_flow_table *
tree_init_node(&fg->node, !is_auto_fg, del_flow_group);
tree_add_node(&fg->node, &ft->node);
/* Add node to group list */
- list_add(&fg->node.list, ft->node.children.prev);
+ list_add(&fg->node.list, prev_fg);
return fg;
}
@@ -893,7 +896,7 @@ struct mlx5_flow_group *mlx5_create_flow_group(struct mlx5_flow_table *ft,
return ERR_PTR(-EPERM);
lock_ref_node(&ft->node);
- fg = create_flow_group_common(ft, fg_in, &ft->node.children, false);
+ fg = create_flow_group_common(ft, fg_in, ft->node.children.prev, false);
unlock_ref_node(&ft->node);
return fg;
@@ -1012,7 +1015,7 @@ static struct mlx5_flow_group *create_autogroup(struct mlx5_flow_table *ft,
u32 *match_criteria)
{
int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
- struct list_head *prev = &ft->node.children;
+ struct list_head *prev = ft->node.children.prev;
unsigned int candidate_index = 0;
struct mlx5_flow_group *fg;
void *match_criteria_addr;
@@ -1687,7 +1690,7 @@ static int init_root_ns(struct mlx5_flow_steering *steering)
{
steering->root_ns = create_root_ns(steering, FS_FT_NIC_RX);
- if (IS_ERR_OR_NULL(steering->root_ns))
+ if (!steering->root_ns)
goto cleanup;
if (init_root_tree(steering, &root_fs, &steering->root_ns->ns.node))
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c
index 3a9195b4169d..3b026c151cf2 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c
@@ -218,6 +218,7 @@ struct mlx5_fc *mlx5_fc_create(struct mlx5_core_dev *dev, bool aging)
goto err_out;
if (aging) {
+ counter->cache.lastuse = jiffies;
counter->aging = true;
spin_lock(&fc_stats->addlist_lock);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/health.c b/drivers/net/ethernet/mellanox/mlx5/core/health.c
index 1a05fb965c8d..5bcf93422ee0 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/health.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/health.c
@@ -61,10 +61,15 @@ enum {
enum {
MLX5_NIC_IFC_FULL = 0,
MLX5_NIC_IFC_DISABLED = 1,
- MLX5_NIC_IFC_NO_DRAM_NIC = 2
+ MLX5_NIC_IFC_NO_DRAM_NIC = 2,
+ MLX5_NIC_IFC_INVALID = 3
};
-static u8 get_nic_interface(struct mlx5_core_dev *dev)
+enum {
+ MLX5_DROP_NEW_HEALTH_WORK,
+};
+
+static u8 get_nic_state(struct mlx5_core_dev *dev)
{
return (ioread32be(&dev->iseg->cmdq_addr_l_sz) >> 8) & 3;
}
@@ -97,7 +102,7 @@ static int in_fatal(struct mlx5_core_dev *dev)
struct mlx5_core_health *health = &dev->priv.health;
struct health_buffer __iomem *h = health->health;
- if (get_nic_interface(dev) == MLX5_NIC_IFC_DISABLED)
+ if (get_nic_state(dev) == MLX5_NIC_IFC_DISABLED)
return 1;
if (ioread32be(&h->fw_ver) == 0xffffffff)
@@ -127,7 +132,7 @@ unlock:
static void mlx5_handle_bad_state(struct mlx5_core_dev *dev)
{
- u8 nic_interface = get_nic_interface(dev);
+ u8 nic_interface = get_nic_state(dev);
switch (nic_interface) {
case MLX5_NIC_IFC_FULL:
@@ -149,8 +154,34 @@ static void mlx5_handle_bad_state(struct mlx5_core_dev *dev)
mlx5_disable_device(dev);
}
+static void health_recover(struct work_struct *work)
+{
+ struct mlx5_core_health *health;
+ struct delayed_work *dwork;
+ struct mlx5_core_dev *dev;
+ struct mlx5_priv *priv;
+ u8 nic_state;
+
+ dwork = container_of(work, struct delayed_work, work);
+ health = container_of(dwork, struct mlx5_core_health, recover_work);
+ priv = container_of(health, struct mlx5_priv, health);
+ dev = container_of(priv, struct mlx5_core_dev, priv);
+
+ nic_state = get_nic_state(dev);
+ if (nic_state == MLX5_NIC_IFC_INVALID) {
+ dev_err(&dev->pdev->dev, "health recovery flow aborted since the nic state is invalid\n");
+ return;
+ }
+
+ dev_err(&dev->pdev->dev, "starting health recovery flow\n");
+ mlx5_recover_device(dev);
+}
+
+/* How much time to wait until health resetting the driver (in msecs) */
+#define MLX5_RECOVERY_DELAY_MSECS 60000
static void health_care(struct work_struct *work)
{
+ unsigned long recover_delay = msecs_to_jiffies(MLX5_RECOVERY_DELAY_MSECS);
struct mlx5_core_health *health;
struct mlx5_core_dev *dev;
struct mlx5_priv *priv;
@@ -160,6 +191,14 @@ static void health_care(struct work_struct *work)
dev = container_of(priv, struct mlx5_core_dev, priv);
mlx5_core_warn(dev, "handling bad device here\n");
mlx5_handle_bad_state(dev);
+
+ spin_lock(&health->wq_lock);
+ if (!test_bit(MLX5_DROP_NEW_HEALTH_WORK, &health->flags))
+ schedule_delayed_work(&health->recover_work, recover_delay);
+ else
+ dev_err(&dev->pdev->dev,
+ "new health works are not permitted at this stage\n");
+ spin_unlock(&health->wq_lock);
}
static const char *hsynd_str(u8 synd)
@@ -272,7 +311,13 @@ static void poll_health(unsigned long data)
if (in_fatal(dev) && !health->sick) {
health->sick = true;
print_health_info(dev);
- schedule_work(&health->work);
+ spin_lock(&health->wq_lock);
+ if (!test_bit(MLX5_DROP_NEW_HEALTH_WORK, &health->flags))
+ queue_work(health->wq, &health->work);
+ else
+ dev_err(&dev->pdev->dev,
+ "new health works are not permitted at this stage\n");
+ spin_unlock(&health->wq_lock);
}
}
@@ -281,6 +326,8 @@ void mlx5_start_health_poll(struct mlx5_core_dev *dev)
struct mlx5_core_health *health = &dev->priv.health;
init_timer(&health->timer);
+ health->sick = 0;
+ clear_bit(MLX5_DROP_NEW_HEALTH_WORK, &health->flags);
health->health = &dev->iseg->health;
health->health_counter = &dev->iseg->health_counter;
@@ -297,11 +344,22 @@ void mlx5_stop_health_poll(struct mlx5_core_dev *dev)
del_timer_sync(&health->timer);
}
+void mlx5_drain_health_wq(struct mlx5_core_dev *dev)
+{
+ struct mlx5_core_health *health = &dev->priv.health;
+
+ spin_lock(&health->wq_lock);
+ set_bit(MLX5_DROP_NEW_HEALTH_WORK, &health->flags);
+ spin_unlock(&health->wq_lock);
+ cancel_delayed_work_sync(&health->recover_work);
+ cancel_work_sync(&health->work);
+}
+
void mlx5_health_cleanup(struct mlx5_core_dev *dev)
{
struct mlx5_core_health *health = &dev->priv.health;
- flush_work(&health->work);
+ destroy_workqueue(health->wq);
}
int mlx5_health_init(struct mlx5_core_dev *dev)
@@ -316,9 +374,13 @@ int mlx5_health_init(struct mlx5_core_dev *dev)
strcpy(name, "mlx5_health");
strcat(name, dev_name(&dev->pdev->dev));
+ health->wq = create_singlethread_workqueue(name);
kfree(name);
-
+ if (!health->wq)
+ return -ENOMEM;
+ spin_lock_init(&health->wq_lock);
INIT_WORK(&health->work, health_care);
+ INIT_DELAYED_WORK(&health->recover_work, health_recover);
return 0;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
index d9c3c70b29e4..3b7c6a9f2b5f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
@@ -46,7 +46,6 @@
#include <linux/mlx5/srq.h>
#include <linux/debugfs.h>
#include <linux/kmod.h>
-#include <linux/delay.h>
#include <linux/mlx5/mlx5_ifc.h>
#ifdef CONFIG_RFS_ACCEL
#include <linux/cpu_rmap.h>
@@ -844,12 +843,6 @@ static int mlx5_init_once(struct mlx5_core_dev *dev, struct mlx5_priv *priv)
struct pci_dev *pdev = dev->pdev;
int err;
- err = mlx5_query_hca_caps(dev);
- if (err) {
- dev_err(&pdev->dev, "query hca failed\n");
- goto out;
- }
-
err = mlx5_query_board_id(dev);
if (err) {
dev_err(&pdev->dev, "query board id failed\n");
@@ -1023,6 +1016,12 @@ static int mlx5_load_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv,
mlx5_start_health_poll(dev);
+ err = mlx5_query_hca_caps(dev);
+ if (err) {
+ dev_err(&pdev->dev, "query hca failed\n");
+ goto err_stop_poll;
+ }
+
if (boot && mlx5_init_once(dev, priv)) {
dev_err(&pdev->dev, "sw objs init failed\n");
goto err_stop_poll;
@@ -1226,6 +1225,9 @@ static int init_one(struct pci_dev *pdev,
pci_set_drvdata(pdev, dev);
+ dev->pdev = pdev;
+ dev->event = mlx5_core_event;
+
if (prof_sel < 0 || prof_sel >= ARRAY_SIZE(profile)) {
mlx5_core_warn(dev,
"selected profile out of range, selecting default (%d)\n",
@@ -1233,8 +1235,6 @@ static int init_one(struct pci_dev *pdev,
prof_sel = MLX5_DEFAULT_PROF;
}
dev->profile = &profile[prof_sel];
- dev->pdev = pdev;
- dev->event = mlx5_core_event;
INIT_LIST_HEAD(&priv->ctx_list);
spin_lock_init(&priv->ctx_lock);
@@ -1313,10 +1313,16 @@ static pci_ers_result_t mlx5_pci_err_detected(struct pci_dev *pdev,
struct mlx5_priv *priv = &dev->priv;
dev_info(&pdev->dev, "%s was called\n", __func__);
+
mlx5_enter_error_state(dev);
mlx5_unload_one(dev, priv, false);
- pci_save_state(pdev);
- mlx5_pci_disable_device(dev);
+ /* In case of kernel call save the pci state and drain health wq */
+ if (state) {
+ pci_save_state(pdev);
+ mlx5_drain_health_wq(dev);
+ mlx5_pci_disable_device(dev);
+ }
+
return state == pci_channel_io_perm_failure ?
PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_NEED_RESET;
}
@@ -1373,11 +1379,6 @@ static pci_ers_result_t mlx5_pci_slot_reset(struct pci_dev *pdev)
return PCI_ERS_RESULT_RECOVERED;
}
-void mlx5_disable_device(struct mlx5_core_dev *dev)
-{
- mlx5_pci_err_detected(dev->pdev, 0);
-}
-
static void mlx5_pci_resume(struct pci_dev *pdev)
{
struct mlx5_core_dev *dev = pci_get_drvdata(pdev);
@@ -1427,6 +1428,18 @@ static const struct pci_device_id mlx5_core_pci_table[] = {
MODULE_DEVICE_TABLE(pci, mlx5_core_pci_table);
+void mlx5_disable_device(struct mlx5_core_dev *dev)
+{
+ mlx5_pci_err_detected(dev->pdev, 0);
+}
+
+void mlx5_recover_device(struct mlx5_core_dev *dev)
+{
+ mlx5_pci_disable_device(dev);
+ if (mlx5_pci_slot_reset(dev->pdev) == PCI_ERS_RESULT_RECOVERED)
+ mlx5_pci_resume(dev->pdev);
+}
+
static struct pci_driver mlx5_core_driver = {
.name = DRIVER_NAME,
.id_table = mlx5_core_pci_table,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
index 3d0cfb9f18f9..187662c8ea96 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
@@ -83,6 +83,7 @@ void mlx5_core_event(struct mlx5_core_dev *dev, enum mlx5_dev_event event,
unsigned long param);
void mlx5_enter_error_state(struct mlx5_core_dev *dev);
void mlx5_disable_device(struct mlx5_core_dev *dev);
+void mlx5_recover_device(struct mlx5_core_dev *dev);
int mlx5_sriov_init(struct mlx5_core_dev *dev);
void mlx5_sriov_cleanup(struct mlx5_core_dev *dev);
int mlx5_sriov_attach(struct mlx5_core_dev *dev);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
index cc4fd61914d3..a57d5a81eb05 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
@@ -209,6 +209,7 @@ static void free_4k(struct mlx5_core_dev *dev, u64 addr)
static int alloc_system_page(struct mlx5_core_dev *dev, u16 func_id)
{
struct page *page;
+ u64 zero_addr = 1;
u64 addr;
int err;
int nid = dev_to_node(&dev->pdev->dev);
@@ -218,26 +219,35 @@ static int alloc_system_page(struct mlx5_core_dev *dev, u16 func_id)
mlx5_core_warn(dev, "failed to allocate page\n");
return -ENOMEM;
}
+map:
addr = dma_map_page(&dev->pdev->dev, page, 0,
PAGE_SIZE, DMA_BIDIRECTIONAL);
if (dma_mapping_error(&dev->pdev->dev, addr)) {
mlx5_core_warn(dev, "failed dma mapping page\n");
err = -ENOMEM;
- goto out_alloc;
+ goto err_mapping;
}
+
+ /* Firmware doesn't support page with physical address 0 */
+ if (addr == 0) {
+ zero_addr = addr;
+ goto map;
+ }
+
err = insert_page(dev, addr, page, func_id);
if (err) {
mlx5_core_err(dev, "failed to track allocated page\n");
- goto out_mapping;
+ dma_unmap_page(&dev->pdev->dev, addr, PAGE_SIZE,
+ DMA_BIDIRECTIONAL);
}
- return 0;
-
-out_mapping:
- dma_unmap_page(&dev->pdev->dev, addr, PAGE_SIZE, DMA_BIDIRECTIONAL);
+err_mapping:
+ if (err)
+ __free_page(page);
-out_alloc:
- __free_page(page);
+ if (zero_addr == 0)
+ dma_unmap_page(&dev->pdev->dev, zero_addr, PAGE_SIZE,
+ DMA_BIDIRECTIONAL);
return err;
}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci.c b/drivers/net/ethernet/mellanox/mlxsw/pci.c
index e742bd4e8894..912f71f84209 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/pci.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/pci.c
@@ -1838,11 +1838,17 @@ static const struct mlxsw_bus mlxsw_pci_bus = {
.cmd_exec = mlxsw_pci_cmd_exec,
};
-static int mlxsw_pci_sw_reset(struct mlxsw_pci *mlxsw_pci)
+static int mlxsw_pci_sw_reset(struct mlxsw_pci *mlxsw_pci,
+ const struct pci_device_id *id)
{
unsigned long end;
mlxsw_pci_write32(mlxsw_pci, SW_RESET, MLXSW_PCI_SW_RESET_RST_BIT);
+ if (id->device == PCI_DEVICE_ID_MELLANOX_SWITCHX2) {
+ msleep(MLXSW_PCI_SW_RESET_TIMEOUT_MSECS);
+ return 0;
+ }
+
wmb(); /* reset needs to be written before we read control register */
end = jiffies + msecs_to_jiffies(MLXSW_PCI_SW_RESET_TIMEOUT_MSECS);
do {
@@ -1909,7 +1915,7 @@ static int mlxsw_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
mlxsw_pci->pdev = pdev;
pci_set_drvdata(pdev, mlxsw_pci);
- err = mlxsw_pci_sw_reset(mlxsw_pci);
+ err = mlxsw_pci_sw_reset(mlxsw_pci, id);
if (err) {
dev_err(&pdev->dev, "Software reset failed\n");
goto err_sw_reset;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
index 1ec0a4ce3c46..dda5761e91bc 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
@@ -231,7 +231,7 @@ mlxsw_sp_span_entry_create(struct mlxsw_sp_port *port)
span_entry->used = true;
span_entry->id = index;
- span_entry->ref_count = 0;
+ span_entry->ref_count = 1;
span_entry->local_port = local_port;
return span_entry;
}
@@ -270,6 +270,7 @@ static struct mlxsw_sp_span_entry
span_entry = mlxsw_sp_span_entry_find(port);
if (span_entry) {
+ /* Already exists, just take a reference */
span_entry->ref_count++;
return span_entry;
}
@@ -280,6 +281,7 @@ static struct mlxsw_sp_span_entry
static int mlxsw_sp_span_entry_put(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_span_entry *span_entry)
{
+ WARN_ON(!span_entry->ref_count);
if (--span_entry->ref_count == 0)
mlxsw_sp_span_entry_destroy(mlxsw_sp, span_entry);
return 0;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
index 9b22863a924b..97bbc1d21df8 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
@@ -115,7 +115,7 @@ struct mlxsw_sp_rif {
struct mlxsw_sp_mid {
struct list_head list;
unsigned char addr[ETH_ALEN];
- u16 vid;
+ u16 fid;
u16 mid;
unsigned int ref_count;
};
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
index 78fc557d6dd7..e83072da6272 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
@@ -320,6 +320,8 @@ mlxsw_sp_lpm_tree_create(struct mlxsw_sp *mlxsw_sp,
lpm_tree);
if (err)
goto err_left_struct_set;
+ memcpy(&lpm_tree->prefix_usage, prefix_usage,
+ sizeof(lpm_tree->prefix_usage));
return lpm_tree;
err_left_struct_set:
@@ -343,7 +345,8 @@ mlxsw_sp_lpm_tree_get(struct mlxsw_sp *mlxsw_sp,
for (i = 0; i < MLXSW_SP_LPM_TREE_COUNT; i++) {
lpm_tree = &mlxsw_sp->router.lpm_trees[i];
- if (lpm_tree->proto == proto &&
+ if (lpm_tree->ref_count != 0 &&
+ lpm_tree->proto == proto &&
mlxsw_sp_prefix_usage_eq(&lpm_tree->prefix_usage,
prefix_usage))
goto inc_ref_count;
@@ -591,21 +594,22 @@ static int mlxsw_sp_vrs_init(struct mlxsw_sp *mlxsw_sp)
return 0;
}
+static void mlxsw_sp_router_fib_flush(struct mlxsw_sp *mlxsw_sp);
+
static void mlxsw_sp_vrs_fini(struct mlxsw_sp *mlxsw_sp)
{
+ mlxsw_sp_router_fib_flush(mlxsw_sp);
kfree(mlxsw_sp->router.vrs);
}
struct mlxsw_sp_neigh_key {
- unsigned char addr[sizeof(struct in6_addr)];
- struct net_device *dev;
+ struct neighbour *n;
};
struct mlxsw_sp_neigh_entry {
struct rhash_head ht_node;
struct mlxsw_sp_neigh_key key;
u16 rif;
- struct neighbour *n;
bool offloaded;
struct delayed_work dw;
struct mlxsw_sp_port *mlxsw_sp_port;
@@ -643,19 +647,15 @@ mlxsw_sp_neigh_entry_remove(struct mlxsw_sp *mlxsw_sp,
static void mlxsw_sp_router_neigh_update_hw(struct work_struct *work);
static struct mlxsw_sp_neigh_entry *
-mlxsw_sp_neigh_entry_create(const void *addr, size_t addr_len,
- struct net_device *dev, u16 rif,
- struct neighbour *n)
+mlxsw_sp_neigh_entry_create(struct neighbour *n, u16 rif)
{
struct mlxsw_sp_neigh_entry *neigh_entry;
neigh_entry = kzalloc(sizeof(*neigh_entry), GFP_ATOMIC);
if (!neigh_entry)
return NULL;
- memcpy(neigh_entry->key.addr, addr, addr_len);
- neigh_entry->key.dev = dev;
+ neigh_entry->key.n = n;
neigh_entry->rif = rif;
- neigh_entry->n = n;
INIT_DELAYED_WORK(&neigh_entry->dw, mlxsw_sp_router_neigh_update_hw);
INIT_LIST_HEAD(&neigh_entry->nexthop_list);
return neigh_entry;
@@ -668,13 +668,11 @@ mlxsw_sp_neigh_entry_destroy(struct mlxsw_sp_neigh_entry *neigh_entry)
}
static struct mlxsw_sp_neigh_entry *
-mlxsw_sp_neigh_entry_lookup(struct mlxsw_sp *mlxsw_sp, const void *addr,
- size_t addr_len, struct net_device *dev)
+mlxsw_sp_neigh_entry_lookup(struct mlxsw_sp *mlxsw_sp, struct neighbour *n)
{
- struct mlxsw_sp_neigh_key key = {{ 0 } };
+ struct mlxsw_sp_neigh_key key;
- memcpy(key.addr, addr, addr_len);
- key.dev = dev;
+ key.n = n;
return rhashtable_lookup_fast(&mlxsw_sp->router.neigh_ht,
&key, mlxsw_sp_neigh_ht_params);
}
@@ -686,26 +684,20 @@ int mlxsw_sp_router_neigh_construct(struct net_device *dev,
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
struct mlxsw_sp_neigh_entry *neigh_entry;
struct mlxsw_sp_rif *r;
- u32 dip;
int err;
if (n->tbl != &arp_tbl)
return 0;
- dip = ntohl(*((__be32 *) n->primary_key));
- neigh_entry = mlxsw_sp_neigh_entry_lookup(mlxsw_sp, &dip, sizeof(dip),
- n->dev);
- if (neigh_entry) {
- WARN_ON(neigh_entry->n != n);
+ neigh_entry = mlxsw_sp_neigh_entry_lookup(mlxsw_sp, n);
+ if (neigh_entry)
return 0;
- }
r = mlxsw_sp_rif_find_by_dev(mlxsw_sp, n->dev);
if (WARN_ON(!r))
return -EINVAL;
- neigh_entry = mlxsw_sp_neigh_entry_create(&dip, sizeof(dip), n->dev,
- r->rif, n);
+ neigh_entry = mlxsw_sp_neigh_entry_create(n, r->rif);
if (!neigh_entry)
return -ENOMEM;
err = mlxsw_sp_neigh_entry_insert(mlxsw_sp, neigh_entry);
@@ -724,14 +716,11 @@ void mlxsw_sp_router_neigh_destroy(struct net_device *dev,
struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
struct mlxsw_sp_neigh_entry *neigh_entry;
- u32 dip;
if (n->tbl != &arp_tbl)
return;
- dip = ntohl(*((__be32 *) n->primary_key));
- neigh_entry = mlxsw_sp_neigh_entry_lookup(mlxsw_sp, &dip, sizeof(dip),
- n->dev);
+ neigh_entry = mlxsw_sp_neigh_entry_lookup(mlxsw_sp, n);
if (!neigh_entry)
return;
mlxsw_sp_neigh_entry_remove(mlxsw_sp, neigh_entry);
@@ -814,6 +803,26 @@ static void mlxsw_sp_router_neigh_rec_process(struct mlxsw_sp *mlxsw_sp,
}
}
+static bool mlxsw_sp_router_rauhtd_is_full(char *rauhtd_pl)
+{
+ u8 num_rec, last_rec_index, num_entries;
+
+ num_rec = mlxsw_reg_rauhtd_num_rec_get(rauhtd_pl);
+ last_rec_index = num_rec - 1;
+
+ if (num_rec < MLXSW_REG_RAUHTD_REC_MAX_NUM)
+ return false;
+ if (mlxsw_reg_rauhtd_rec_type_get(rauhtd_pl, last_rec_index) ==
+ MLXSW_REG_RAUHTD_TYPE_IPV6)
+ return true;
+
+ num_entries = mlxsw_reg_rauhtd_ipv4_rec_num_entries_get(rauhtd_pl,
+ last_rec_index);
+ if (++num_entries == MLXSW_REG_RAUHTD_IPV4_ENT_PER_REC)
+ return true;
+ return false;
+}
+
static int mlxsw_sp_router_neighs_update_rauhtd(struct mlxsw_sp *mlxsw_sp)
{
char *rauhtd_pl;
@@ -840,7 +849,7 @@ static int mlxsw_sp_router_neighs_update_rauhtd(struct mlxsw_sp *mlxsw_sp)
for (i = 0; i < num_rec; i++)
mlxsw_sp_router_neigh_rec_process(mlxsw_sp, rauhtd_pl,
i);
- } while (num_rec);
+ } while (mlxsw_sp_router_rauhtd_is_full(rauhtd_pl));
rtnl_unlock();
kfree(rauhtd_pl);
@@ -859,7 +868,7 @@ static void mlxsw_sp_router_neighs_update_nh(struct mlxsw_sp *mlxsw_sp)
* is active regardless of the traffic.
*/
if (!list_empty(&neigh_entry->nexthop_list))
- neigh_event_send(neigh_entry->n, NULL);
+ neigh_event_send(neigh_entry->key.n, NULL);
}
rtnl_unlock();
}
@@ -905,9 +914,9 @@ static void mlxsw_sp_router_probe_unresolved_nexthops(struct work_struct *work)
rtnl_lock();
list_for_each_entry(neigh_entry, &mlxsw_sp->router.nexthop_neighs_list,
nexthop_neighs_list_node) {
- if (!(neigh_entry->n->nud_state & NUD_VALID) &&
+ if (!(neigh_entry->key.n->nud_state & NUD_VALID) &&
!list_empty(&neigh_entry->nexthop_list))
- neigh_event_send(neigh_entry->n, NULL);
+ neigh_event_send(neigh_entry->key.n, NULL);
}
rtnl_unlock();
@@ -924,7 +933,7 @@ static void mlxsw_sp_router_neigh_update_hw(struct work_struct *work)
{
struct mlxsw_sp_neigh_entry *neigh_entry =
container_of(work, struct mlxsw_sp_neigh_entry, dw.work);
- struct neighbour *n = neigh_entry->n;
+ struct neighbour *n = neigh_entry->key.n;
struct mlxsw_sp_port *mlxsw_sp_port = neigh_entry->mlxsw_sp_port;
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
char rauht_pl[MLXSW_REG_RAUHT_LEN];
@@ -1027,11 +1036,8 @@ int mlxsw_sp_router_netevent_event(struct notifier_block *unused,
mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
dip = ntohl(*((__be32 *) n->primary_key));
- neigh_entry = mlxsw_sp_neigh_entry_lookup(mlxsw_sp,
- &dip,
- sizeof(__be32),
- dev);
- if (WARN_ON(!neigh_entry) || WARN_ON(neigh_entry->n != n)) {
+ neigh_entry = mlxsw_sp_neigh_entry_lookup(mlxsw_sp, n);
+ if (WARN_ON(!neigh_entry)) {
mlxsw_sp_port_dev_put(mlxsw_sp_port);
return NOTIFY_DONE;
}
@@ -1340,33 +1346,26 @@ static int mlxsw_sp_nexthop_init(struct mlxsw_sp *mlxsw_sp,
struct fib_nh *fib_nh)
{
struct mlxsw_sp_neigh_entry *neigh_entry;
- u32 gwip = ntohl(fib_nh->nh_gw);
struct net_device *dev = fib_nh->nh_dev;
struct neighbour *n;
u8 nud_state;
- neigh_entry = mlxsw_sp_neigh_entry_lookup(mlxsw_sp, &gwip,
- sizeof(gwip), dev);
- if (!neigh_entry) {
- __be32 gwipn = htonl(gwip);
-
- n = neigh_create(&arp_tbl, &gwipn, dev);
+ /* Take a reference of neigh here ensuring that neigh would
+ * not be detructed before the nexthop entry is finished.
+ * The reference is taken either in neigh_lookup() or
+ * in neith_create() in case n is not found.
+ */
+ n = neigh_lookup(&arp_tbl, &fib_nh->nh_gw, dev);
+ if (!n) {
+ n = neigh_create(&arp_tbl, &fib_nh->nh_gw, dev);
if (IS_ERR(n))
return PTR_ERR(n);
neigh_event_send(n, NULL);
- neigh_entry = mlxsw_sp_neigh_entry_lookup(mlxsw_sp, &gwip,
- sizeof(gwip), dev);
- if (!neigh_entry) {
- neigh_release(n);
- return -EINVAL;
- }
- } else {
- /* Take a reference of neigh here ensuring that neigh would
- * not be detructed before the nexthop entry is finished.
- * The second branch takes the reference in neith_create()
- */
- n = neigh_entry->n;
- neigh_clone(n);
+ }
+ neigh_entry = mlxsw_sp_neigh_entry_lookup(mlxsw_sp, n);
+ if (!neigh_entry) {
+ neigh_release(n);
+ return -EINVAL;
}
/* If that is the first nexthop connected to that neigh, add to
@@ -1400,7 +1399,7 @@ static void mlxsw_sp_nexthop_fini(struct mlxsw_sp *mlxsw_sp,
if (list_empty(&nh->neigh_entry->nexthop_list))
list_del(&nh->neigh_entry->nexthop_neighs_list_node);
- neigh_release(neigh_entry->n);
+ neigh_release(neigh_entry->key.n);
}
static struct mlxsw_sp_nexthop_group *
@@ -1460,11 +1459,11 @@ static bool mlxsw_sp_nexthop_match(struct mlxsw_sp_nexthop *nh,
for (i = 0; i < fi->fib_nhs; i++) {
struct fib_nh *fib_nh = &fi->fib_nh[i];
- u32 gwip = ntohl(fib_nh->nh_gw);
+ struct neighbour *n = nh->neigh_entry->key.n;
- if (memcmp(nh->neigh_entry->key.addr,
- &gwip, sizeof(u32)) == 0 &&
- nh->neigh_entry->key.dev == fib_nh->nh_dev)
+ if (memcmp(n->primary_key, &fib_nh->nh_gw,
+ sizeof(fib_nh->nh_gw)) == 0 &&
+ n->dev == fib_nh->nh_dev)
return true;
}
return false;
@@ -1820,19 +1819,17 @@ err_fib_entry_insert:
return err;
}
-static int mlxsw_sp_router_fib4_del(struct mlxsw_sp *mlxsw_sp,
- struct fib_entry_notifier_info *fen_info)
+static void mlxsw_sp_router_fib4_del(struct mlxsw_sp *mlxsw_sp,
+ struct fib_entry_notifier_info *fen_info)
{
struct mlxsw_sp_fib_entry *fib_entry;
if (mlxsw_sp->router.aborted)
- return 0;
+ return;
fib_entry = mlxsw_sp_fib_entry_find(mlxsw_sp, fen_info);
- if (!fib_entry) {
- dev_warn(mlxsw_sp->bus_info->dev, "Failed to find FIB4 entry being removed.\n");
- return -ENOENT;
- }
+ if (!fib_entry)
+ return;
if (fib_entry->ref_count == 1) {
mlxsw_sp_fib_entry_del(mlxsw_sp, fib_entry);
@@ -1840,7 +1837,6 @@ static int mlxsw_sp_router_fib4_del(struct mlxsw_sp *mlxsw_sp,
}
mlxsw_sp_fib_entry_put(mlxsw_sp, fib_entry);
- return 0;
}
static int mlxsw_sp_router_set_abort_trap(struct mlxsw_sp *mlxsw_sp)
@@ -1862,7 +1858,8 @@ static int mlxsw_sp_router_set_abort_trap(struct mlxsw_sp *mlxsw_sp)
if (err)
return err;
- mlxsw_reg_raltb_pack(raltb_pl, 0, MLXSW_REG_RALXX_PROTOCOL_IPV4, 0);
+ mlxsw_reg_raltb_pack(raltb_pl, 0, MLXSW_REG_RALXX_PROTOCOL_IPV4,
+ MLXSW_SP_LPM_TREE_MIN);
err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raltb), raltb_pl);
if (err)
return err;
@@ -1873,18 +1870,18 @@ static int mlxsw_sp_router_set_abort_trap(struct mlxsw_sp *mlxsw_sp)
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl);
}
-static void mlxsw_sp_router_fib4_abort(struct mlxsw_sp *mlxsw_sp)
+static void mlxsw_sp_router_fib_flush(struct mlxsw_sp *mlxsw_sp)
{
struct mlxsw_resources *resources;
struct mlxsw_sp_fib_entry *fib_entry;
struct mlxsw_sp_fib_entry *tmp;
struct mlxsw_sp_vr *vr;
int i;
- int err;
resources = mlxsw_core_resources_get(mlxsw_sp->core);
for (i = 0; i < resources->max_virtual_routers; i++) {
vr = &mlxsw_sp->router.vrs[i];
+
if (!vr->used)
continue;
@@ -1900,6 +1897,13 @@ static void mlxsw_sp_router_fib4_abort(struct mlxsw_sp *mlxsw_sp)
break;
}
}
+}
+
+static void mlxsw_sp_router_fib4_abort(struct mlxsw_sp *mlxsw_sp)
+{
+ int err;
+
+ mlxsw_sp_router_fib_flush(mlxsw_sp);
mlxsw_sp->router.aborted = true;
err = mlxsw_sp_router_set_abort_trap(mlxsw_sp);
if (err)
@@ -1957,6 +1961,9 @@ static int mlxsw_sp_router_fib_event(struct notifier_block *nb,
struct fib_entry_notifier_info *fen_info = ptr;
int err;
+ if (!net_eq(fen_info->info.net, &init_net))
+ return NOTIFY_DONE;
+
switch (event) {
case FIB_EVENT_ENTRY_ADD:
err = mlxsw_sp_router_fib4_add(mlxsw_sp, fen_info);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
index 5e00c79e8133..1e2c8eca3af1 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
@@ -929,12 +929,12 @@ static int mlxsw_sp_port_smid_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 mid,
static struct mlxsw_sp_mid *__mlxsw_sp_mc_get(struct mlxsw_sp *mlxsw_sp,
const unsigned char *addr,
- u16 vid)
+ u16 fid)
{
struct mlxsw_sp_mid *mid;
list_for_each_entry(mid, &mlxsw_sp->br_mids.list, list) {
- if (ether_addr_equal(mid->addr, addr) && mid->vid == vid)
+ if (ether_addr_equal(mid->addr, addr) && mid->fid == fid)
return mid;
}
return NULL;
@@ -942,7 +942,7 @@ static struct mlxsw_sp_mid *__mlxsw_sp_mc_get(struct mlxsw_sp *mlxsw_sp,
static struct mlxsw_sp_mid *__mlxsw_sp_mc_alloc(struct mlxsw_sp *mlxsw_sp,
const unsigned char *addr,
- u16 vid)
+ u16 fid)
{
struct mlxsw_sp_mid *mid;
u16 mid_idx;
@@ -958,7 +958,7 @@ static struct mlxsw_sp_mid *__mlxsw_sp_mc_alloc(struct mlxsw_sp *mlxsw_sp,
set_bit(mid_idx, mlxsw_sp->br_mids.mapped);
ether_addr_copy(mid->addr, addr);
- mid->vid = vid;
+ mid->fid = fid;
mid->mid = mid_idx;
mid->ref_count = 0;
list_add_tail(&mid->list, &mlxsw_sp->br_mids.list);
@@ -991,9 +991,9 @@ static int mlxsw_sp_port_mdb_add(struct mlxsw_sp_port *mlxsw_sp_port,
if (switchdev_trans_ph_prepare(trans))
return 0;
- mid = __mlxsw_sp_mc_get(mlxsw_sp, mdb->addr, mdb->vid);
+ mid = __mlxsw_sp_mc_get(mlxsw_sp, mdb->addr, fid);
if (!mid) {
- mid = __mlxsw_sp_mc_alloc(mlxsw_sp, mdb->addr, mdb->vid);
+ mid = __mlxsw_sp_mc_alloc(mlxsw_sp, mdb->addr, fid);
if (!mid) {
netdev_err(dev, "Unable to allocate MC group\n");
return -ENOMEM;
@@ -1137,7 +1137,7 @@ static int mlxsw_sp_port_mdb_del(struct mlxsw_sp_port *mlxsw_sp_port,
u16 mid_idx;
int err = 0;
- mid = __mlxsw_sp_mc_get(mlxsw_sp, mdb->addr, mdb->vid);
+ mid = __mlxsw_sp_mc_get(mlxsw_sp, mdb->addr, fid);
if (!mid) {
netdev_err(dev, "Unable to remove port from MC DB\n");
return -EINVAL;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/switchx2.c b/drivers/net/ethernet/mellanox/mlxsw/switchx2.c
index c0c23e2f3275..92bda8703f87 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/switchx2.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/switchx2.c
@@ -1088,6 +1088,7 @@ err_port_stp_state_set:
err_port_admin_status_set:
err_port_mtu_set:
err_port_speed_set:
+ mlxsw_sx_port_swid_set(mlxsw_sx_port, MLXSW_PORT_SWID_DISABLED_PORT);
err_port_swid_set:
err_port_system_port_mapping_set:
port_not_usable:
diff --git a/drivers/net/ethernet/qlogic/Kconfig b/drivers/net/ethernet/qlogic/Kconfig
index 1e8339a67f6e..32f2a45f4ab2 100644
--- a/drivers/net/ethernet/qlogic/Kconfig
+++ b/drivers/net/ethernet/qlogic/Kconfig
@@ -107,4 +107,7 @@ config QEDE
---help---
This enables the support for ...
+config QED_RDMA
+ bool
+
endif # NET_VENDOR_QLOGIC
diff --git a/drivers/net/ethernet/qlogic/qed/Makefile b/drivers/net/ethernet/qlogic/qed/Makefile
index cda0af7fbc20..967acf322c09 100644
--- a/drivers/net/ethernet/qlogic/qed/Makefile
+++ b/drivers/net/ethernet/qlogic/qed/Makefile
@@ -5,4 +5,4 @@ qed-y := qed_cxt.o qed_dev.o qed_hw.o qed_init_fw_funcs.o qed_init_ops.o \
qed_selftest.o qed_dcbx.o qed_debug.o
qed-$(CONFIG_QED_SRIOV) += qed_sriov.o qed_vf.o
qed-$(CONFIG_QED_LL2) += qed_ll2.o
-qed-$(CONFIG_INFINIBAND_QEDR) += qed_roce.o
+qed-$(CONFIG_QED_RDMA) += qed_roce.o
diff --git a/drivers/net/ethernet/qlogic/qed/qed_cxt.c b/drivers/net/ethernet/qlogic/qed/qed_cxt.c
index 82370a1a59ad..0c42c240b5cf 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_cxt.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_cxt.c
@@ -47,13 +47,8 @@
#define TM_ALIGN BIT(TM_SHIFT)
#define TM_ELEM_SIZE 4
-/* ILT constants */
-#if IS_ENABLED(CONFIG_INFINIBAND_QEDR)
/* For RoCE we configure to 64K to cover for RoCE max tasks 256K purpose. */
-#define ILT_DEFAULT_HW_P_SIZE 4
-#else
-#define ILT_DEFAULT_HW_P_SIZE 3
-#endif
+#define ILT_DEFAULT_HW_P_SIZE (IS_ENABLED(CONFIG_QED_RDMA) ? 4 : 3)
#define ILT_PAGE_IN_BYTES(hw_p_size) (1U << ((hw_p_size) + 12))
#define ILT_CFG_REG(cli, reg) PSWRQ2_REG_ ## cli ## _ ## reg ## _RT_OFFSET
@@ -349,14 +344,14 @@ static struct qed_tid_seg *qed_cxt_tid_seg_info(struct qed_hwfn *p_hwfn,
return NULL;
}
-void qed_cxt_set_srq_count(struct qed_hwfn *p_hwfn, u32 num_srqs)
+static void qed_cxt_set_srq_count(struct qed_hwfn *p_hwfn, u32 num_srqs)
{
struct qed_cxt_mngr *p_mgr = p_hwfn->p_cxt_mngr;
p_mgr->srq_count = num_srqs;
}
-u32 qed_cxt_get_srq_count(struct qed_hwfn *p_hwfn)
+static u32 qed_cxt_get_srq_count(struct qed_hwfn *p_hwfn)
{
struct qed_cxt_mngr *p_mgr = p_hwfn->p_cxt_mngr;
@@ -1804,8 +1799,8 @@ int qed_cxt_get_cid_info(struct qed_hwfn *p_hwfn, struct qed_cxt_info *p_info)
return 0;
}
-void qed_rdma_set_pf_params(struct qed_hwfn *p_hwfn,
- struct qed_rdma_pf_params *p_params)
+static void qed_rdma_set_pf_params(struct qed_hwfn *p_hwfn,
+ struct qed_rdma_pf_params *p_params)
{
u32 num_cons, num_tasks, num_qps, num_mrs, num_srqs;
enum protocol_type proto;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_dcbx.c b/drivers/net/ethernet/qlogic/qed/qed_dcbx.c
index 130da1c0490b..a4789a93b692 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_dcbx.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_dcbx.c
@@ -1190,6 +1190,7 @@ int qed_dcbx_get_config_params(struct qed_hwfn *p_hwfn,
if (!dcbx_info)
return -ENOMEM;
+ memset(dcbx_info, 0, sizeof(*dcbx_info));
rc = qed_dcbx_query_params(p_hwfn, dcbx_info, QED_DCBX_OPERATIONAL_MIB);
if (rc) {
kfree(dcbx_info);
@@ -1225,6 +1226,7 @@ static struct qed_dcbx_get *qed_dcbnl_get_dcbx(struct qed_hwfn *hwfn,
if (!dcbx_info)
return NULL;
+ memset(dcbx_info, 0, sizeof(*dcbx_info));
if (qed_dcbx_query_params(hwfn, dcbx_info, type)) {
kfree(dcbx_info);
return NULL;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_debug.c b/drivers/net/ethernet/qlogic/qed/qed_debug.c
index 88e7d5bef909..68f19ca57f96 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_debug.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_debug.c
@@ -405,7 +405,7 @@ struct phy_defs {
/***************************** Constant Arrays *******************************/
/* Debug arrays */
-static struct dbg_array s_dbg_arrays[MAX_BIN_DBG_BUFFER_TYPE] = { {0} };
+static struct dbg_array s_dbg_arrays[MAX_BIN_DBG_BUFFER_TYPE] = { {NULL} };
/* Chip constant definitions array */
static struct chip_defs s_chip_defs[MAX_CHIP_IDS] = {
@@ -4028,10 +4028,10 @@ static enum dbg_status qed_mcp_trace_read_meta(struct qed_hwfn *p_hwfn,
}
/* Dump MCP Trace */
-enum dbg_status qed_mcp_trace_dump(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt,
- u32 *dump_buf,
- bool dump, u32 *num_dumped_dwords)
+static enum dbg_status qed_mcp_trace_dump(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *dump_buf,
+ bool dump, u32 *num_dumped_dwords)
{
u32 trace_data_grc_addr, trace_data_size_bytes, trace_data_size_dwords;
u32 trace_meta_size_dwords, running_bundle_id, offset = 0;
@@ -4130,10 +4130,10 @@ enum dbg_status qed_mcp_trace_dump(struct qed_hwfn *p_hwfn,
}
/* Dump GRC FIFO */
-enum dbg_status qed_reg_fifo_dump(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt,
- u32 *dump_buf,
- bool dump, u32 *num_dumped_dwords)
+static enum dbg_status qed_reg_fifo_dump(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *dump_buf,
+ bool dump, u32 *num_dumped_dwords)
{
u32 offset = 0, dwords_read, size_param_offset;
bool fifo_has_data;
@@ -4192,10 +4192,10 @@ enum dbg_status qed_reg_fifo_dump(struct qed_hwfn *p_hwfn,
}
/* Dump IGU FIFO */
-enum dbg_status qed_igu_fifo_dump(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt,
- u32 *dump_buf,
- bool dump, u32 *num_dumped_dwords)
+static enum dbg_status qed_igu_fifo_dump(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *dump_buf,
+ bool dump, u32 *num_dumped_dwords)
{
u32 offset = 0, dwords_read, size_param_offset;
bool fifo_has_data;
@@ -4255,10 +4255,11 @@ enum dbg_status qed_igu_fifo_dump(struct qed_hwfn *p_hwfn,
}
/* Protection Override dump */
-enum dbg_status qed_protection_override_dump(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt,
- u32 *dump_buf,
- bool dump, u32 *num_dumped_dwords)
+static enum dbg_status qed_protection_override_dump(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *dump_buf,
+ bool dump,
+ u32 *num_dumped_dwords)
{
u32 offset = 0, size_param_offset, override_window_dwords;
@@ -6339,10 +6340,11 @@ enum dbg_status qed_print_fw_asserts_results(struct qed_hwfn *p_hwfn,
}
/* Wrapper for unifying the idle_chk and mcp_trace api */
-enum dbg_status qed_print_idle_chk_results_wrapper(struct qed_hwfn *p_hwfn,
- u32 *dump_buf,
- u32 num_dumped_dwords,
- char *results_buf)
+static enum dbg_status
+qed_print_idle_chk_results_wrapper(struct qed_hwfn *p_hwfn,
+ u32 *dump_buf,
+ u32 num_dumped_dwords,
+ char *results_buf)
{
u32 num_errors, num_warnnings;
@@ -6413,8 +6415,8 @@ static void qed_dbg_print_feature(u8 *p_text_buf, u32 text_size)
#define QED_RESULTS_BUF_MIN_SIZE 16
/* Generic function for decoding debug feature info */
-enum dbg_status format_feature(struct qed_hwfn *p_hwfn,
- enum qed_dbg_features feature_idx)
+static enum dbg_status format_feature(struct qed_hwfn *p_hwfn,
+ enum qed_dbg_features feature_idx)
{
struct qed_dbg_feature *feature =
&p_hwfn->cdev->dbg_params.features[feature_idx];
@@ -6480,8 +6482,9 @@ enum dbg_status format_feature(struct qed_hwfn *p_hwfn,
}
/* Generic function for performing the dump of a debug feature. */
-enum dbg_status qed_dbg_dump(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
- enum qed_dbg_features feature_idx)
+static enum dbg_status qed_dbg_dump(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ enum qed_dbg_features feature_idx)
{
struct qed_dbg_feature *feature =
&p_hwfn->cdev->dbg_params.features[feature_idx];
diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev.c b/drivers/net/ethernet/qlogic/qed/qed_dev.c
index 754f6a908858..edae5fc5fccd 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_dev.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_dev.c
@@ -497,12 +497,13 @@ int qed_resc_alloc(struct qed_dev *cdev)
if (p_hwfn->hw_info.personality == QED_PCI_ETH_ROCE) {
num_cons = qed_cxt_get_proto_cid_count(p_hwfn,
PROTOCOLID_ROCE,
- 0) * 2;
+ NULL) * 2;
n_eqes += num_cons + 2 * MAX_NUM_VFS_BB;
} else if (p_hwfn->hw_info.personality == QED_PCI_ISCSI) {
num_cons =
qed_cxt_get_proto_cid_count(p_hwfn,
- PROTOCOLID_ISCSI, 0);
+ PROTOCOLID_ISCSI,
+ NULL);
n_eqes += 2 * num_cons;
}
@@ -1422,19 +1423,19 @@ static void qed_hw_set_feat(struct qed_hwfn *p_hwfn)
u32 *feat_num = p_hwfn->hw_info.feat_num;
int num_features = 1;
-#if IS_ENABLED(CONFIG_INFINIBAND_QEDR)
- /* Roce CNQ each requires: 1 status block + 1 CNQ. We divide the
- * status blocks equally between L2 / RoCE but with consideration as
- * to how many l2 queues / cnqs we have
- */
- if (p_hwfn->hw_info.personality == QED_PCI_ETH_ROCE) {
+ if (IS_ENABLED(CONFIG_QED_RDMA) &&
+ p_hwfn->hw_info.personality == QED_PCI_ETH_ROCE) {
+ /* Roce CNQ each requires: 1 status block + 1 CNQ. We divide
+ * the status blocks equally between L2 / RoCE but with
+ * consideration as to how many l2 queues / cnqs we have.
+ */
num_features++;
feat_num[QED_RDMA_CNQ] =
min_t(u32, RESC_NUM(p_hwfn, QED_SB) / num_features,
RESC_NUM(p_hwfn, QED_RDMA_CNQ_RAM));
}
-#endif
+
feat_num[QED_PF_L2_QUE] = min_t(u32, RESC_NUM(p_hwfn, QED_SB) /
num_features,
RESC_NUM(p_hwfn, QED_L2_QUEUE));
diff --git a/drivers/net/ethernet/qlogic/qed/qed_hsi.h b/drivers/net/ethernet/qlogic/qed/qed_hsi.h
index 72eee29c677f..2777d5bb4380 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_hsi.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_hsi.h
@@ -727,9 +727,6 @@ struct core_tx_bd_flags {
#define CORE_TX_BD_FLAGS_L4_PROTOCOL_SHIFT 6
#define CORE_TX_BD_FLAGS_L4_PSEUDO_CSUM_MODE_MASK 0x1
#define CORE_TX_BD_FLAGS_L4_PSEUDO_CSUM_MODE_SHIFT 7
-#define CORE_TX_BD_FLAGS_ROCE_FLAV_MASK 0x1
-#define CORE_TX_BD_FLAGS_ROCE_FLAV_SHIFT 12
-
};
struct core_tx_bd {
diff --git a/drivers/net/ethernet/qlogic/qed/qed_ll2.c b/drivers/net/ethernet/qlogic/qed/qed_ll2.c
index 02a8be2faed7..f95385cbbd40 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_ll2.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_ll2.c
@@ -38,6 +38,7 @@
#include "qed_mcp.h"
#include "qed_reg_addr.h"
#include "qed_sp.h"
+#include "qed_roce.h"
#define QED_LL2_RX_REGISTERED(ll2) ((ll2)->rx_queue.b_cb_registred)
#define QED_LL2_TX_REGISTERED(ll2) ((ll2)->tx_queue.b_cb_registred)
@@ -140,11 +141,11 @@ static void qed_ll2_kill_buffers(struct qed_dev *cdev)
qed_ll2_dealloc_buffer(cdev, buffer);
}
-void qed_ll2b_complete_rx_packet(struct qed_hwfn *p_hwfn,
- u8 connection_handle,
- struct qed_ll2_rx_packet *p_pkt,
- struct core_rx_fast_path_cqe *p_cqe,
- bool b_last_packet)
+static void qed_ll2b_complete_rx_packet(struct qed_hwfn *p_hwfn,
+ u8 connection_handle,
+ struct qed_ll2_rx_packet *p_pkt,
+ struct core_rx_fast_path_cqe *p_cqe,
+ bool b_last_packet)
{
u16 packet_length = le16_to_cpu(p_cqe->packet_length);
struct qed_ll2_buffer *buffer = p_pkt->cookie;
@@ -515,7 +516,7 @@ static int qed_ll2_rxq_completion(struct qed_hwfn *p_hwfn, void *cookie)
return rc;
}
-void qed_ll2_rxq_flush(struct qed_hwfn *p_hwfn, u8 connection_handle)
+static void qed_ll2_rxq_flush(struct qed_hwfn *p_hwfn, u8 connection_handle)
{
struct qed_ll2_info *p_ll2_conn = NULL;
struct qed_ll2_rx_packet *p_pkt = NULL;
@@ -537,8 +538,7 @@ void qed_ll2_rxq_flush(struct qed_hwfn *p_hwfn, u8 connection_handle)
if (!p_pkt)
break;
- list_del(&p_pkt->list_entry);
- list_add_tail(&p_pkt->list_entry, &p_rx->free_descq);
+ list_move_tail(&p_pkt->list_entry, &p_rx->free_descq);
rx_buf_addr = p_pkt->rx_buf_addr;
cookie = p_pkt->cookie;
@@ -992,9 +992,8 @@ static void qed_ll2_post_rx_buffer_notify_fw(struct qed_hwfn *p_hwfn,
p_posting_packet = list_first_entry(&p_rx->posting_descq,
struct qed_ll2_rx_packet,
list_entry);
- list_del(&p_posting_packet->list_entry);
- list_add_tail(&p_posting_packet->list_entry,
- &p_rx->active_descq);
+ list_move_tail(&p_posting_packet->list_entry,
+ &p_rx->active_descq);
b_notify_fw = true;
}
@@ -1120,12 +1119,10 @@ static void qed_ll2_prepare_tx_packet_set_bd(struct qed_hwfn *p_hwfn,
start_bd->bd_flags.as_bitfield |= CORE_TX_BD_FLAGS_START_BD_MASK <<
CORE_TX_BD_FLAGS_START_BD_SHIFT;
SET_FIELD(start_bd->bitfield0, CORE_TX_BD_NBDS, num_of_bds);
+ SET_FIELD(start_bd->bitfield0, CORE_TX_BD_ROCE_FLAV, type);
DMA_REGPAIR_LE(start_bd->addr, first_frag);
start_bd->nbytes = cpu_to_le16(first_frag_len);
- SET_FIELD(start_bd->bd_flags.as_bitfield, CORE_TX_BD_FLAGS_ROCE_FLAV,
- type);
-
DP_VERBOSE(p_hwfn,
(NETIF_MSG_TX_QUEUED | QED_MSG_LL2),
"LL2 [q 0x%02x cid 0x%08x type 0x%08x] Tx Producer at [0x%04x] - set with a %04x bytes %02x BDs buffer at %08x:%08x\n",
@@ -1188,8 +1185,7 @@ static void qed_ll2_tx_packet_notify(struct qed_hwfn *p_hwfn,
if (!p_pkt)
break;
- list_del(&p_pkt->list_entry);
- list_add_tail(&p_pkt->list_entry, &p_tx->active_descq);
+ list_move_tail(&p_pkt->list_entry, &p_tx->active_descq);
}
SET_FIELD(db_msg.params, CORE_DB_DATA_DEST, DB_DEST_XCM);
diff --git a/drivers/net/ethernet/qlogic/qed/qed_ll2.h b/drivers/net/ethernet/qlogic/qed/qed_ll2.h
index 80a5dc2d652d..4e3d62a16cab 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_ll2.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_ll2.h
@@ -293,24 +293,4 @@ void qed_ll2_setup(struct qed_hwfn *p_hwfn,
*/
void qed_ll2_free(struct qed_hwfn *p_hwfn,
struct qed_ll2_info *p_ll2_connections);
-void qed_ll2b_complete_rx_gsi_packet(struct qed_hwfn *p_hwfn,
- u8 connection_handle,
- void *cookie,
- dma_addr_t rx_buf_addr,
- u16 data_length,
- u8 data_length_error,
- u16 parse_flags,
- u16 vlan,
- u32 src_mac_addr_hi,
- u16 src_mac_addr_lo, bool b_last_packet);
-void qed_ll2b_complete_tx_gsi_packet(struct qed_hwfn *p_hwfn,
- u8 connection_handle,
- void *cookie,
- dma_addr_t first_frag_addr,
- bool b_last_fragment, bool b_last_packet);
-void qed_ll2b_release_tx_gsi_packet(struct qed_hwfn *p_hwfn,
- u8 connection_handle,
- void *cookie,
- dma_addr_t first_frag_addr,
- bool b_last_fragment, bool b_last_packet);
#endif
diff --git a/drivers/net/ethernet/qlogic/qed/qed_main.c b/drivers/net/ethernet/qlogic/qed/qed_main.c
index 4ee3151e80c2..333c7442e48a 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_main.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_main.c
@@ -33,10 +33,8 @@
#include "qed_hw.h"
#include "qed_selftest.h"
-#if IS_ENABLED(CONFIG_INFINIBAND_QEDR)
#define QED_ROCE_QPS (8192)
#define QED_ROCE_DPIS (8)
-#endif
static char version[] =
"QLogic FastLinQ 4xxxx Core Module qed " DRV_MODULE_VERSION "\n";
@@ -682,9 +680,7 @@ static int qed_slowpath_setup_int(struct qed_dev *cdev,
enum qed_int_mode int_mode)
{
struct qed_sb_cnt_info sb_cnt_info;
-#if IS_ENABLED(CONFIG_INFINIBAND_QEDR)
- int num_l2_queues;
-#endif
+ int num_l2_queues = 0;
int rc;
int i;
@@ -715,8 +711,9 @@ static int qed_slowpath_setup_int(struct qed_dev *cdev,
cdev->int_params.fp_msix_cnt = cdev->int_params.out.num_vectors -
cdev->num_hwfns;
-#if IS_ENABLED(CONFIG_INFINIBAND_QEDR)
- num_l2_queues = 0;
+ if (!IS_ENABLED(CONFIG_QED_RDMA))
+ return 0;
+
for_each_hwfn(cdev, i)
num_l2_queues += FEAT_NUM(&cdev->hwfns[i], QED_PF_L2_QUE);
@@ -738,7 +735,6 @@ static int qed_slowpath_setup_int(struct qed_dev *cdev,
DP_VERBOSE(cdev, QED_MSG_RDMA, "roce_msix_cnt=%d roce_msix_base=%d\n",
cdev->int_params.rdma_msix_cnt,
cdev->int_params.rdma_msix_base);
-#endif
return 0;
}
@@ -843,13 +839,14 @@ static void qed_update_pf_params(struct qed_dev *cdev,
{
int i;
-#if IS_ENABLED(CONFIG_INFINIBAND_QEDR)
- params->rdma_pf_params.num_qps = QED_ROCE_QPS;
- params->rdma_pf_params.min_dpis = QED_ROCE_DPIS;
- /* divide by 3 the MRs to avoid MF ILT overflow */
- params->rdma_pf_params.num_mrs = RDMA_MAX_TIDS;
- params->rdma_pf_params.gl_pi = QED_ROCE_PROTOCOL_INDEX;
-#endif
+ if (IS_ENABLED(CONFIG_QED_RDMA)) {
+ params->rdma_pf_params.num_qps = QED_ROCE_QPS;
+ params->rdma_pf_params.min_dpis = QED_ROCE_DPIS;
+ /* divide by 3 the MRs to avoid MF ILT overflow */
+ params->rdma_pf_params.num_mrs = RDMA_MAX_TIDS;
+ params->rdma_pf_params.gl_pi = QED_ROCE_PROTOCOL_INDEX;
+ }
+
for (i = 0; i < cdev->num_hwfns; i++) {
struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
@@ -880,6 +877,7 @@ static int qed_slowpath_start(struct qed_dev *cdev,
}
}
+ cdev->rx_coalesce_usecs = QED_DEFAULT_RX_USECS;
rc = qed_nic_setup(cdev);
if (rc)
goto err;
@@ -1432,7 +1430,7 @@ static int qed_set_led(struct qed_dev *cdev, enum qed_led_mode mode)
return status;
}
-struct qed_selftest_ops qed_selftest_ops_pass = {
+static struct qed_selftest_ops qed_selftest_ops_pass = {
.selftest_memory = &qed_selftest_memory,
.selftest_interrupt = &qed_selftest_interrupt,
.selftest_register = &qed_selftest_register,
diff --git a/drivers/net/ethernet/qlogic/qed/qed_roce.c b/drivers/net/ethernet/qlogic/qed/qed_roce.c
index 76831a398bed..f3a825a8f8d5 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_roce.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_roce.c
@@ -129,17 +129,12 @@ static void qed_bmap_release_id(struct qed_hwfn *p_hwfn,
}
}
-u32 qed_rdma_get_sb_id(void *p_hwfn, u32 rel_sb_id)
+static u32 qed_rdma_get_sb_id(void *p_hwfn, u32 rel_sb_id)
{
/* First sb id for RoCE is after all the l2 sb */
return FEAT_NUM((struct qed_hwfn *)p_hwfn, QED_PF_L2_QUE) + rel_sb_id;
}
-u32 qed_rdma_query_cau_timer_res(void *rdma_cxt)
-{
- return QED_CAU_DEF_RX_TIMER_RES;
-}
-
static int qed_rdma_alloc(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
struct qed_rdma_start_in_params *params)
@@ -162,7 +157,8 @@ static int qed_rdma_alloc(struct qed_hwfn *p_hwfn,
p_hwfn->p_rdma_info = p_rdma_info;
p_rdma_info->proto = PROTOCOLID_ROCE;
- num_cons = qed_cxt_get_proto_cid_count(p_hwfn, p_rdma_info->proto, 0);
+ num_cons = qed_cxt_get_proto_cid_count(p_hwfn, p_rdma_info->proto,
+ NULL);
p_rdma_info->num_qps = num_cons / 2;
@@ -275,7 +271,7 @@ free_rdma_info:
return rc;
}
-void qed_rdma_resc_free(struct qed_hwfn *p_hwfn)
+static void qed_rdma_resc_free(struct qed_hwfn *p_hwfn)
{
struct qed_rdma_info *p_rdma_info = p_hwfn->p_rdma_info;
@@ -527,6 +523,26 @@ static int qed_rdma_start_fw(struct qed_hwfn *p_hwfn,
return qed_spq_post(p_hwfn, p_ent, NULL);
}
+static int qed_rdma_alloc_tid(void *rdma_cxt, u32 *itid)
+{
+ struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
+ int rc;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Allocate TID\n");
+
+ spin_lock_bh(&p_hwfn->p_rdma_info->lock);
+ rc = qed_rdma_bmap_alloc_id(p_hwfn,
+ &p_hwfn->p_rdma_info->tid_map, itid);
+ spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
+ if (rc)
+ goto out;
+
+ rc = qed_cxt_dynamic_ilt_alloc(p_hwfn, QED_ELEM_TASK, *itid);
+out:
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Allocate TID - done, rc = %d\n", rc);
+ return rc;
+}
+
static int qed_rdma_reserve_lkey(struct qed_hwfn *p_hwfn)
{
struct qed_rdma_device *dev = p_hwfn->p_rdma_info->dev;
@@ -573,7 +589,7 @@ static int qed_rdma_setup(struct qed_hwfn *p_hwfn,
return qed_rdma_start_fw(p_hwfn, params, p_ptt);
}
-int qed_rdma_stop(void *rdma_cxt)
+static int qed_rdma_stop(void *rdma_cxt)
{
struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
struct rdma_close_func_ramrod_data *p_ramrod;
@@ -629,8 +645,8 @@ out:
return rc;
}
-int qed_rdma_add_user(void *rdma_cxt,
- struct qed_rdma_add_user_out_params *out_params)
+static int qed_rdma_add_user(void *rdma_cxt,
+ struct qed_rdma_add_user_out_params *out_params)
{
struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
u32 dpi_start_offset;
@@ -664,7 +680,7 @@ int qed_rdma_add_user(void *rdma_cxt,
return rc;
}
-struct qed_rdma_port *qed_rdma_query_port(void *rdma_cxt)
+static struct qed_rdma_port *qed_rdma_query_port(void *rdma_cxt)
{
struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
struct qed_rdma_port *p_port = p_hwfn->p_rdma_info->port;
@@ -680,7 +696,7 @@ struct qed_rdma_port *qed_rdma_query_port(void *rdma_cxt)
return p_port;
}
-struct qed_rdma_device *qed_rdma_query_device(void *rdma_cxt)
+static struct qed_rdma_device *qed_rdma_query_device(void *rdma_cxt)
{
struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
@@ -690,7 +706,7 @@ struct qed_rdma_device *qed_rdma_query_device(void *rdma_cxt)
return p_hwfn->p_rdma_info->dev;
}
-void qed_rdma_free_tid(void *rdma_cxt, u32 itid)
+static void qed_rdma_free_tid(void *rdma_cxt, u32 itid)
{
struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
@@ -701,27 +717,7 @@ void qed_rdma_free_tid(void *rdma_cxt, u32 itid)
spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
}
-int qed_rdma_alloc_tid(void *rdma_cxt, u32 *itid)
-{
- struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
- int rc;
-
- DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Allocate TID\n");
-
- spin_lock_bh(&p_hwfn->p_rdma_info->lock);
- rc = qed_rdma_bmap_alloc_id(p_hwfn,
- &p_hwfn->p_rdma_info->tid_map, itid);
- spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
- if (rc)
- goto out;
-
- rc = qed_cxt_dynamic_ilt_alloc(p_hwfn, QED_ELEM_TASK, *itid);
-out:
- DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Allocate TID - done, rc = %d\n", rc);
- return rc;
-}
-
-void qed_rdma_cnq_prod_update(void *rdma_cxt, u8 qz_offset, u16 prod)
+static void qed_rdma_cnq_prod_update(void *rdma_cxt, u8 qz_offset, u16 prod)
{
struct qed_hwfn *p_hwfn;
u16 qz_num;
@@ -816,7 +812,7 @@ static int qed_rdma_get_int(struct qed_dev *cdev, struct qed_int_info *info)
return 0;
}
-int qed_rdma_alloc_pd(void *rdma_cxt, u16 *pd)
+static int qed_rdma_alloc_pd(void *rdma_cxt, u16 *pd)
{
struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
u32 returned_id;
@@ -836,7 +832,7 @@ int qed_rdma_alloc_pd(void *rdma_cxt, u16 *pd)
return rc;
}
-void qed_rdma_free_pd(void *rdma_cxt, u16 pd)
+static void qed_rdma_free_pd(void *rdma_cxt, u16 pd)
{
struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
@@ -873,8 +869,9 @@ qed_rdma_toggle_bit_create_resize_cq(struct qed_hwfn *p_hwfn, u16 icid)
return toggle_bit;
}
-int qed_rdma_create_cq(void *rdma_cxt,
- struct qed_rdma_create_cq_in_params *params, u16 *icid)
+static int qed_rdma_create_cq(void *rdma_cxt,
+ struct qed_rdma_create_cq_in_params *params,
+ u16 *icid)
{
struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
struct qed_rdma_info *p_info = p_hwfn->p_rdma_info;
@@ -957,98 +954,10 @@ err:
return rc;
}
-int qed_rdma_resize_cq(void *rdma_cxt,
- struct qed_rdma_resize_cq_in_params *in_params,
- struct qed_rdma_resize_cq_out_params *out_params)
-{
- struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
- struct rdma_resize_cq_output_params *p_ramrod_res;
- struct rdma_resize_cq_ramrod_data *p_ramrod;
- enum qed_rdma_toggle_bit toggle_bit;
- struct qed_sp_init_data init_data;
- struct qed_spq_entry *p_ent;
- dma_addr_t ramrod_res_phys;
- u8 fw_return_code;
- int rc = -ENOMEM;
-
- DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", in_params->icid);
-
- p_ramrod_res =
- (struct rdma_resize_cq_output_params *)
- dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
- sizeof(struct rdma_resize_cq_output_params),
- &ramrod_res_phys, GFP_KERNEL);
- if (!p_ramrod_res) {
- DP_NOTICE(p_hwfn,
- "qed resize cq failed: cannot allocate memory (ramrod)\n");
- return rc;
- }
-
- /* Get SPQ entry */
- memset(&init_data, 0, sizeof(init_data));
- init_data.cid = in_params->icid;
- init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
- init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
-
- rc = qed_sp_init_request(p_hwfn, &p_ent,
- RDMA_RAMROD_RESIZE_CQ,
- p_hwfn->p_rdma_info->proto, &init_data);
- if (rc)
- goto err;
-
- p_ramrod = &p_ent->ramrod.rdma_resize_cq;
-
- p_ramrod->flags = 0;
-
- /* toggle the bit for every resize or create cq for a given icid */
- toggle_bit = qed_rdma_toggle_bit_create_resize_cq(p_hwfn,
- in_params->icid);
-
- SET_FIELD(p_ramrod->flags,
- RDMA_RESIZE_CQ_RAMROD_DATA_TOGGLE_BIT, toggle_bit);
-
- SET_FIELD(p_ramrod->flags,
- RDMA_RESIZE_CQ_RAMROD_DATA_IS_TWO_LEVEL_PBL,
- in_params->pbl_two_level);
-
- p_ramrod->pbl_log_page_size = in_params->pbl_page_size_log - 12;
- p_ramrod->pbl_num_pages = cpu_to_le16(in_params->pbl_num_pages);
- p_ramrod->max_cqes = cpu_to_le32(in_params->cq_size);
- DMA_REGPAIR_LE(p_ramrod->pbl_addr, in_params->pbl_ptr);
- DMA_REGPAIR_LE(p_ramrod->output_params_addr, ramrod_res_phys);
-
- rc = qed_spq_post(p_hwfn, p_ent, &fw_return_code);
- if (rc)
- goto err;
-
- if (fw_return_code != RDMA_RETURN_OK) {
- DP_NOTICE(p_hwfn, "fw_return_code = %d\n", fw_return_code);
- rc = -EINVAL;
- goto err;
- }
-
- out_params->prod = le32_to_cpu(p_ramrod_res->old_cq_prod);
- out_params->cons = le32_to_cpu(p_ramrod_res->old_cq_cons);
-
- dma_free_coherent(&p_hwfn->cdev->pdev->dev,
- sizeof(struct rdma_resize_cq_output_params),
- p_ramrod_res, ramrod_res_phys);
-
- DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Resized CQ, rc = %d\n", rc);
-
- return rc;
-
-err: dma_free_coherent(&p_hwfn->cdev->pdev->dev,
- sizeof(struct rdma_resize_cq_output_params),
- p_ramrod_res, ramrod_res_phys);
- DP_NOTICE(p_hwfn, "Resized CQ, Failed - rc = %d\n", rc);
-
- return rc;
-}
-
-int qed_rdma_destroy_cq(void *rdma_cxt,
- struct qed_rdma_destroy_cq_in_params *in_params,
- struct qed_rdma_destroy_cq_out_params *out_params)
+static int
+qed_rdma_destroy_cq(void *rdma_cxt,
+ struct qed_rdma_destroy_cq_in_params *in_params,
+ struct qed_rdma_destroy_cq_out_params *out_params)
{
struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
struct rdma_destroy_cq_output_params *p_ramrod_res;
@@ -1169,7 +1078,7 @@ static enum roce_flavor qed_roce_mode_to_flavor(enum roce_mode roce_mode)
return flavor;
}
-int qed_roce_alloc_cid(struct qed_hwfn *p_hwfn, u16 *cid)
+static int qed_roce_alloc_cid(struct qed_hwfn *p_hwfn, u16 *cid)
{
struct qed_rdma_info *p_rdma_info = p_hwfn->p_rdma_info;
u32 responder_icid;
@@ -1793,9 +1702,9 @@ err:
return rc;
}
-int qed_roce_query_qp(struct qed_hwfn *p_hwfn,
- struct qed_rdma_qp *qp,
- struct qed_rdma_query_qp_out_params *out_params)
+static int qed_roce_query_qp(struct qed_hwfn *p_hwfn,
+ struct qed_rdma_qp *qp,
+ struct qed_rdma_query_qp_out_params *out_params)
{
struct roce_query_qp_resp_output_params *p_resp_ramrod_res;
struct roce_query_qp_req_output_params *p_req_ramrod_res;
@@ -1936,7 +1845,7 @@ err_resp:
return rc;
}
-int qed_roce_destroy_qp(struct qed_hwfn *p_hwfn, struct qed_rdma_qp *qp)
+static int qed_roce_destroy_qp(struct qed_hwfn *p_hwfn, struct qed_rdma_qp *qp)
{
u32 num_invalidated_mw = 0;
u32 num_bound_mw = 0;
@@ -1985,9 +1894,9 @@ int qed_roce_destroy_qp(struct qed_hwfn *p_hwfn, struct qed_rdma_qp *qp)
return 0;
}
-int qed_rdma_query_qp(void *rdma_cxt,
- struct qed_rdma_qp *qp,
- struct qed_rdma_query_qp_out_params *out_params)
+static int qed_rdma_query_qp(void *rdma_cxt,
+ struct qed_rdma_qp *qp,
+ struct qed_rdma_query_qp_out_params *out_params)
{
struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
int rc;
@@ -2022,7 +1931,7 @@ int qed_rdma_query_qp(void *rdma_cxt,
return rc;
}
-int qed_rdma_destroy_qp(void *rdma_cxt, struct qed_rdma_qp *qp)
+static int qed_rdma_destroy_qp(void *rdma_cxt, struct qed_rdma_qp *qp)
{
struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
int rc = 0;
@@ -2038,7 +1947,7 @@ int qed_rdma_destroy_qp(void *rdma_cxt, struct qed_rdma_qp *qp)
return rc;
}
-struct qed_rdma_qp *
+static struct qed_rdma_qp *
qed_rdma_create_qp(void *rdma_cxt,
struct qed_rdma_create_qp_in_params *in_params,
struct qed_rdma_create_qp_out_params *out_params)
@@ -2215,9 +2124,9 @@ static int qed_roce_modify_qp(struct qed_hwfn *p_hwfn,
return rc;
}
-int qed_rdma_modify_qp(void *rdma_cxt,
- struct qed_rdma_qp *qp,
- struct qed_rdma_modify_qp_in_params *params)
+static int qed_rdma_modify_qp(void *rdma_cxt,
+ struct qed_rdma_qp *qp,
+ struct qed_rdma_modify_qp_in_params *params)
{
struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
enum qed_roce_qp_state prev_state;
@@ -2312,8 +2221,9 @@ int qed_rdma_modify_qp(void *rdma_cxt,
return rc;
}
-int qed_rdma_register_tid(void *rdma_cxt,
- struct qed_rdma_register_tid_in_params *params)
+static int
+qed_rdma_register_tid(void *rdma_cxt,
+ struct qed_rdma_register_tid_in_params *params)
{
struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
struct rdma_register_tid_ramrod_data *p_ramrod;
@@ -2450,7 +2360,7 @@ int qed_rdma_register_tid(void *rdma_cxt,
return rc;
}
-int qed_rdma_deregister_tid(void *rdma_cxt, u32 itid)
+static int qed_rdma_deregister_tid(void *rdma_cxt, u32 itid)
{
struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
struct rdma_deregister_tid_ramrod_data *p_ramrod;
@@ -2561,7 +2471,8 @@ void qed_rdma_dpm_bar(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
qed_rdma_dpm_conf(p_hwfn, p_ptt);
}
-int qed_rdma_start(void *rdma_cxt, struct qed_rdma_start_in_params *params)
+static int qed_rdma_start(void *rdma_cxt,
+ struct qed_rdma_start_in_params *params)
{
struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
struct qed_ptt *p_ptt;
@@ -2601,7 +2512,7 @@ static int qed_rdma_init(struct qed_dev *cdev,
return qed_rdma_start(QED_LEADING_HWFN(cdev), params);
}
-void qed_rdma_remove_user(void *rdma_cxt, u16 dpi)
+static void qed_rdma_remove_user(void *rdma_cxt, u16 dpi)
{
struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
@@ -2809,11 +2720,6 @@ static int qed_roce_ll2_stop(struct qed_dev *cdev)
struct qed_roce_ll2_info *roce_ll2 = hwfn->ll2;
int rc;
- if (!cdev) {
- DP_ERR(cdev, "qed roce ll2 stop: invalid cdev\n");
- return -EINVAL;
- }
-
if (roce_ll2->handle == QED_LL2_UNUSED_HANDLE) {
DP_ERR(cdev, "qed roce ll2 stop: cannot stop an unused LL2\n");
return -EINVAL;
@@ -2850,7 +2756,7 @@ static int qed_roce_ll2_tx(struct qed_dev *cdev,
int rc;
int i;
- if (!cdev || !pkt || !params) {
+ if (!pkt || !params) {
DP_ERR(cdev,
"roce ll2 tx: failed tx because one of the following is NULL - drv=%p, pkt=%p, params=%p\n",
cdev, pkt, params);
diff --git a/drivers/net/ethernet/qlogic/qed/qed_roce.h b/drivers/net/ethernet/qlogic/qed/qed_roce.h
index 2f091e8a0f40..279f342af8db 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_roce.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_roce.h
@@ -95,26 +95,6 @@ struct qed_rdma_info {
enum protocol_type proto;
};
-struct qed_rdma_resize_cq_in_params {
- u16 icid;
- u32 cq_size;
- bool pbl_two_level;
- u64 pbl_ptr;
- u16 pbl_num_pages;
- u8 pbl_page_size_log;
-};
-
-struct qed_rdma_resize_cq_out_params {
- u32 prod;
- u32 cons;
-};
-
-struct qed_rdma_resize_cnq_in_params {
- u32 cnq_id;
- u32 pbl_page_size_log;
- u64 pbl_ptr;
-};
-
struct qed_rdma_qp {
struct regpair qp_handle;
struct regpair qp_handle_async;
@@ -181,36 +161,55 @@ struct qed_rdma_qp {
dma_addr_t shared_queue_phys_addr;
};
-int
-qed_rdma_add_user(void *rdma_cxt,
- struct qed_rdma_add_user_out_params *out_params);
-int qed_rdma_alloc_pd(void *rdma_cxt, u16 *pd);
-int qed_rdma_alloc_tid(void *rdma_cxt, u32 *tid);
-int qed_rdma_deregister_tid(void *rdma_cxt, u32 tid);
-void qed_rdma_free_tid(void *rdma_cxt, u32 tid);
-struct qed_rdma_device *qed_rdma_query_device(void *rdma_cxt);
-struct qed_rdma_port *qed_rdma_query_port(void *rdma_cxt);
-int
-qed_rdma_register_tid(void *rdma_cxt,
- struct qed_rdma_register_tid_in_params *params);
-void qed_rdma_remove_user(void *rdma_cxt, u16 dpi);
-int qed_rdma_start(void *p_hwfn, struct qed_rdma_start_in_params *params);
-int qed_rdma_stop(void *rdma_cxt);
-u32 qed_rdma_get_sb_id(void *p_hwfn, u32 rel_sb_id);
-u32 qed_rdma_query_cau_timer_res(void *p_hwfn);
-void qed_rdma_cnq_prod_update(void *rdma_cxt, u8 cnq_index, u16 prod);
-void qed_rdma_resc_free(struct qed_hwfn *p_hwfn);
+#if IS_ENABLED(CONFIG_QED_RDMA)
+void qed_rdma_dpm_bar(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
void qed_async_roce_event(struct qed_hwfn *p_hwfn,
struct event_ring_entry *p_eqe);
-int qed_rdma_destroy_qp(void *rdma_cxt, struct qed_rdma_qp *qp);
-int qed_rdma_modify_qp(void *rdma_cxt, struct qed_rdma_qp *qp,
- struct qed_rdma_modify_qp_in_params *params);
-int qed_rdma_query_qp(void *rdma_cxt, struct qed_rdma_qp *qp,
- struct qed_rdma_query_qp_out_params *out_params);
-
-#if IS_ENABLED(CONFIG_INFINIBAND_QEDR)
-void qed_rdma_dpm_bar(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
+void qed_ll2b_complete_tx_gsi_packet(struct qed_hwfn *p_hwfn,
+ u8 connection_handle,
+ void *cookie,
+ dma_addr_t first_frag_addr,
+ bool b_last_fragment, bool b_last_packet);
+void qed_ll2b_release_tx_gsi_packet(struct qed_hwfn *p_hwfn,
+ u8 connection_handle,
+ void *cookie,
+ dma_addr_t first_frag_addr,
+ bool b_last_fragment, bool b_last_packet);
+void qed_ll2b_complete_rx_gsi_packet(struct qed_hwfn *p_hwfn,
+ u8 connection_handle,
+ void *cookie,
+ dma_addr_t rx_buf_addr,
+ u16 data_length,
+ u8 data_length_error,
+ u16 parse_flags,
+ u16 vlan,
+ u32 src_mac_addr_hi,
+ u16 src_mac_addr_lo, bool b_last_packet);
#else
-void qed_rdma_dpm_bar(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) {}
+static inline void qed_rdma_dpm_bar(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) {}
+static inline void qed_async_roce_event(struct qed_hwfn *p_hwfn, struct event_ring_entry *p_eqe) {}
+static inline void qed_ll2b_complete_tx_gsi_packet(struct qed_hwfn *p_hwfn,
+ u8 connection_handle,
+ void *cookie,
+ dma_addr_t first_frag_addr,
+ bool b_last_fragment,
+ bool b_last_packet) {}
+static inline void qed_ll2b_release_tx_gsi_packet(struct qed_hwfn *p_hwfn,
+ u8 connection_handle,
+ void *cookie,
+ dma_addr_t first_frag_addr,
+ bool b_last_fragment,
+ bool b_last_packet) {}
+static inline void qed_ll2b_complete_rx_gsi_packet(struct qed_hwfn *p_hwfn,
+ u8 connection_handle,
+ void *cookie,
+ dma_addr_t rx_buf_addr,
+ u16 data_length,
+ u8 data_length_error,
+ u16 parse_flags,
+ u16 vlan,
+ u32 src_mac_addr_hi,
+ u16 src_mac_addr_lo,
+ bool b_last_packet) {}
#endif
#endif
diff --git a/drivers/net/ethernet/qlogic/qed/qed_sp.h b/drivers/net/ethernet/qlogic/qed/qed_sp.h
index 652c90819758..b2c08e4d2a9b 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_sp.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_sp.h
@@ -80,7 +80,6 @@ union ramrod_data {
struct roce_destroy_qp_resp_ramrod_data roce_destroy_qp_resp;
struct roce_destroy_qp_req_ramrod_data roce_destroy_qp_req;
struct rdma_create_cq_ramrod_data rdma_create_cq;
- struct rdma_resize_cq_ramrod_data rdma_resize_cq;
struct rdma_destroy_cq_ramrod_data rdma_destroy_cq;
struct rdma_srq_create_ramrod_data rdma_create_srq;
struct rdma_srq_destroy_ramrod_data rdma_destroy_srq;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_spq.c b/drivers/net/ethernet/qlogic/qed/qed_spq.c
index caff41544898..9fbaf9429fd0 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_spq.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_spq.c
@@ -28,9 +28,7 @@
#include "qed_reg_addr.h"
#include "qed_sp.h"
#include "qed_sriov.h"
-#if IS_ENABLED(CONFIG_INFINIBAND_QEDR)
#include "qed_roce.h"
-#endif
/***************************************************************************
* Structures & Definitions
@@ -240,11 +238,9 @@ qed_async_event_completion(struct qed_hwfn *p_hwfn,
struct event_ring_entry *p_eqe)
{
switch (p_eqe->protocol_id) {
-#if IS_ENABLED(CONFIG_INFINIBAND_QEDR)
case PROTOCOLID_ROCE:
qed_async_roce_event(p_hwfn, p_eqe);
return 0;
-#endif
case PROTOCOLID_COMMON:
return qed_sriov_eqe_event(p_hwfn,
p_eqe->opcode,
diff --git a/drivers/net/ethernet/qlogic/qede/Makefile b/drivers/net/ethernet/qlogic/qede/Makefile
index 28dc58919c85..048a230c3ce0 100644
--- a/drivers/net/ethernet/qlogic/qede/Makefile
+++ b/drivers/net/ethernet/qlogic/qede/Makefile
@@ -2,4 +2,4 @@ obj-$(CONFIG_QEDE) := qede.o
qede-y := qede_main.o qede_ethtool.o
qede-$(CONFIG_DCB) += qede_dcbnl.o
-qede-$(CONFIG_INFINIBAND_QEDR) += qede_roce.o
+qede-$(CONFIG_QED_RDMA) += qede_roce.o
diff --git a/drivers/net/ethernet/qlogic/qede/qede.h b/drivers/net/ethernet/qlogic/qede/qede.h
index 28c0e9f42c9e..974689a13337 100644
--- a/drivers/net/ethernet/qlogic/qede/qede.h
+++ b/drivers/net/ethernet/qlogic/qede/qede.h
@@ -348,12 +348,13 @@ bool qede_has_rx_work(struct qede_rx_queue *rxq);
int qede_txq_has_work(struct qede_tx_queue *txq);
void qede_recycle_rx_bd_ring(struct qede_rx_queue *rxq, struct qede_dev *edev,
u8 count);
+void qede_update_rx_prod(struct qede_dev *edev, struct qede_rx_queue *rxq);
#define RX_RING_SIZE_POW 13
#define RX_RING_SIZE ((u16)BIT(RX_RING_SIZE_POW))
#define NUM_RX_BDS_MAX (RX_RING_SIZE - 1)
#define NUM_RX_BDS_MIN 128
-#define NUM_RX_BDS_DEF NUM_RX_BDS_MAX
+#define NUM_RX_BDS_DEF ((u16)BIT(10) - 1)
#define TX_RING_SIZE_POW 13
#define TX_RING_SIZE ((u16)BIT(TX_RING_SIZE_POW))
diff --git a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c
index 25a9b293ee8f..7567cc464b88 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c
@@ -175,16 +175,23 @@ static void qede_get_strings_stats(struct qede_dev *edev, u8 *buf)
for (i = 0, k = 0; i < QEDE_QUEUE_CNT(edev); i++) {
int tc;
- for (j = 0; j < QEDE_NUM_RQSTATS; j++)
- sprintf(buf + (k + j) * ETH_GSTRING_LEN,
- "%d: %s", i, qede_rqstats_arr[j].string);
- k += QEDE_NUM_RQSTATS;
- for (tc = 0; tc < edev->num_tc; tc++) {
- for (j = 0; j < QEDE_NUM_TQSTATS; j++)
+ if (edev->fp_array[i].type & QEDE_FASTPATH_RX) {
+ for (j = 0; j < QEDE_NUM_RQSTATS; j++)
sprintf(buf + (k + j) * ETH_GSTRING_LEN,
- "%d.%d: %s", i, tc,
- qede_tqstats_arr[j].string);
- k += QEDE_NUM_TQSTATS;
+ "%d: %s", i,
+ qede_rqstats_arr[j].string);
+ k += QEDE_NUM_RQSTATS;
+ }
+
+ if (edev->fp_array[i].type & QEDE_FASTPATH_TX) {
+ for (tc = 0; tc < edev->num_tc; tc++) {
+ for (j = 0; j < QEDE_NUM_TQSTATS; j++)
+ sprintf(buf + (k + j) *
+ ETH_GSTRING_LEN,
+ "%d.%d: %s", i, tc,
+ qede_tqstats_arr[j].string);
+ k += QEDE_NUM_TQSTATS;
+ }
}
}
@@ -756,6 +763,8 @@ static void qede_get_channels(struct net_device *dev,
struct qede_dev *edev = netdev_priv(dev);
channels->max_combined = QEDE_MAX_RSS_CNT(edev);
+ channels->max_rx = QEDE_MAX_RSS_CNT(edev);
+ channels->max_tx = QEDE_MAX_RSS_CNT(edev);
channels->combined_count = QEDE_QUEUE_CNT(edev) - edev->fp_num_tx -
edev->fp_num_rx;
channels->tx_count = edev->fp_num_tx;
@@ -820,6 +829,13 @@ static int qede_set_channels(struct net_device *dev,
edev->req_queues = count;
edev->req_num_tx = channels->tx_count;
edev->req_num_rx = channels->rx_count;
+ /* Reset the indirection table if rx queue count is updated */
+ if ((edev->req_queues - edev->req_num_tx) != QEDE_RSS_COUNT(edev)) {
+ edev->rss_params_inited &= ~QEDE_RSS_INDIR_INITED;
+ memset(&edev->rss_params.rss_ind_table, 0,
+ sizeof(edev->rss_params.rss_ind_table));
+ }
+
if (netif_running(dev))
qede_reload(edev, NULL, NULL);
@@ -1053,6 +1069,12 @@ static int qede_set_rxfh(struct net_device *dev, const u32 *indir,
struct qede_dev *edev = netdev_priv(dev);
int i;
+ if (edev->dev_info.common.num_hwfns > 1) {
+ DP_INFO(edev,
+ "RSS configuration is not supported for 100G devices\n");
+ return -EOPNOTSUPP;
+ }
+
if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)
return -EOPNOTSUPP;
@@ -1184,8 +1206,8 @@ static int qede_selftest_transmit_traffic(struct qede_dev *edev,
}
first_bd = (struct eth_tx_1st_bd *)qed_chain_consume(&txq->tx_pbl);
- dma_unmap_page(&edev->pdev->dev, BD_UNMAP_ADDR(first_bd),
- BD_UNMAP_LEN(first_bd), DMA_TO_DEVICE);
+ dma_unmap_single(&edev->pdev->dev, BD_UNMAP_ADDR(first_bd),
+ BD_UNMAP_LEN(first_bd), DMA_TO_DEVICE);
txq->sw_tx_cons++;
txq->sw_tx_ring[idx].skb = NULL;
@@ -1199,8 +1221,8 @@ static int qede_selftest_receive_traffic(struct qede_dev *edev)
struct qede_rx_queue *rxq = NULL;
struct sw_rx_data *sw_rx_data;
union eth_rx_cqe *cqe;
+ int i, rc = 0;
u8 *data_ptr;
- int i;
for_each_queue(i) {
if (edev->fp_array[i].type & QEDE_FASTPATH_RX) {
@@ -1219,46 +1241,60 @@ static int qede_selftest_receive_traffic(struct qede_dev *edev)
* queue and that the loopback traffic is not IP.
*/
for (i = 0; i < QEDE_SELFTEST_POLL_COUNT; i++) {
- if (qede_has_rx_work(rxq))
+ if (!qede_has_rx_work(rxq)) {
+ usleep_range(100, 200);
+ continue;
+ }
+
+ hw_comp_cons = le16_to_cpu(*rxq->hw_cons_ptr);
+ sw_comp_cons = qed_chain_get_cons_idx(&rxq->rx_comp_ring);
+
+ /* Memory barrier to prevent the CPU from doing speculative
+ * reads of CQE/BD before reading hw_comp_cons. If the CQE is
+ * read before it is written by FW, then FW writes CQE and SB,
+ * and then the CPU reads the hw_comp_cons, it will use an old
+ * CQE.
+ */
+ rmb();
+
+ /* Get the CQE from the completion ring */
+ cqe = (union eth_rx_cqe *)qed_chain_consume(&rxq->rx_comp_ring);
+
+ /* Get the data from the SW ring */
+ sw_rx_index = rxq->sw_rx_cons & NUM_RX_BDS_MAX;
+ sw_rx_data = &rxq->sw_rx_ring[sw_rx_index];
+ fp_cqe = &cqe->fast_path_regular;
+ len = le16_to_cpu(fp_cqe->len_on_first_bd);
+ data_ptr = (u8 *)(page_address(sw_rx_data->data) +
+ fp_cqe->placement_offset +
+ sw_rx_data->page_offset);
+ if (ether_addr_equal(data_ptr, edev->ndev->dev_addr) &&
+ ether_addr_equal(data_ptr + ETH_ALEN,
+ edev->ndev->dev_addr)) {
+ for (i = ETH_HLEN; i < len; i++)
+ if (data_ptr[i] != (unsigned char)(i & 0xff)) {
+ rc = -1;
+ break;
+ }
+
+ qede_recycle_rx_bd_ring(rxq, edev, 1);
+ qed_chain_recycle_consumed(&rxq->rx_comp_ring);
break;
- usleep_range(100, 200);
+ }
+
+ DP_INFO(edev, "Not the transmitted packet\n");
+ qede_recycle_rx_bd_ring(rxq, edev, 1);
+ qed_chain_recycle_consumed(&rxq->rx_comp_ring);
}
- if (!qede_has_rx_work(rxq)) {
+ if (i == QEDE_SELFTEST_POLL_COUNT) {
DP_NOTICE(edev, "Failed to receive the traffic\n");
return -1;
}
- hw_comp_cons = le16_to_cpu(*rxq->hw_cons_ptr);
- sw_comp_cons = qed_chain_get_cons_idx(&rxq->rx_comp_ring);
+ qede_update_rx_prod(edev, rxq);
- /* Memory barrier to prevent the CPU from doing speculative reads of CQE
- * / BD before reading hw_comp_cons. If the CQE is read before it is
- * written by FW, then FW writes CQE and SB, and then the CPU reads the
- * hw_comp_cons, it will use an old CQE.
- */
- rmb();
-
- /* Get the CQE from the completion ring */
- cqe = (union eth_rx_cqe *)qed_chain_consume(&rxq->rx_comp_ring);
-
- /* Get the data from the SW ring */
- sw_rx_index = rxq->sw_rx_cons & NUM_RX_BDS_MAX;
- sw_rx_data = &rxq->sw_rx_ring[sw_rx_index];
- fp_cqe = &cqe->fast_path_regular;
- len = le16_to_cpu(fp_cqe->len_on_first_bd);
- data_ptr = (u8 *)(page_address(sw_rx_data->data) +
- fp_cqe->placement_offset + sw_rx_data->page_offset);
- for (i = ETH_HLEN; i < len; i++)
- if (data_ptr[i] != (unsigned char)(i & 0xff)) {
- DP_NOTICE(edev, "Loopback test failed\n");
- qede_recycle_rx_bd_ring(rxq, edev, 1);
- return -1;
- }
-
- qede_recycle_rx_bd_ring(rxq, edev, 1);
-
- return 0;
+ return rc;
}
static int qede_selftest_run_loopback(struct qede_dev *edev, u32 loopback_mode)
diff --git a/drivers/net/ethernet/qlogic/qede/qede_main.c b/drivers/net/ethernet/qlogic/qede/qede_main.c
index 343038ca047d..85f46dbecd5b 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_main.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_main.c
@@ -313,8 +313,8 @@ static int qede_free_tx_pkt(struct qede_dev *edev,
split_bd_len = BD_UNMAP_LEN(split);
bds_consumed++;
}
- dma_unmap_page(&edev->pdev->dev, BD_UNMAP_ADDR(first_bd),
- BD_UNMAP_LEN(first_bd) + split_bd_len, DMA_TO_DEVICE);
+ dma_unmap_single(&edev->pdev->dev, BD_UNMAP_ADDR(first_bd),
+ BD_UNMAP_LEN(first_bd) + split_bd_len, DMA_TO_DEVICE);
/* Unmap the data of the skb frags */
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++, bds_consumed++) {
@@ -359,8 +359,8 @@ static void qede_free_failed_tx_pkt(struct qede_dev *edev,
nbd--;
}
- dma_unmap_page(&edev->pdev->dev, BD_UNMAP_ADDR(first_bd),
- BD_UNMAP_LEN(first_bd) + split_bd_len, DMA_TO_DEVICE);
+ dma_unmap_single(&edev->pdev->dev, BD_UNMAP_ADDR(first_bd),
+ BD_UNMAP_LEN(first_bd) + split_bd_len, DMA_TO_DEVICE);
/* Unmap the data of the skb frags */
for (i = 0; i < nbd; i++) {
@@ -943,8 +943,7 @@ static inline int qede_realloc_rx_buffer(struct qede_dev *edev,
return 0;
}
-static inline void qede_update_rx_prod(struct qede_dev *edev,
- struct qede_rx_queue *rxq)
+void qede_update_rx_prod(struct qede_dev *edev, struct qede_rx_queue *rxq)
{
u16 bd_prod = qed_chain_get_prod_idx(&rxq->rx_bd_ring);
u16 cqe_prod = qed_chain_get_prod_idx(&rxq->rx_comp_ring);
@@ -2840,7 +2839,7 @@ static int qede_alloc_sge_mem(struct qede_dev *edev, struct qede_rx_queue *rxq)
}
mapping = dma_map_page(&edev->pdev->dev, replace_buf->data, 0,
- rxq->rx_buf_size, DMA_FROM_DEVICE);
+ PAGE_SIZE, DMA_FROM_DEVICE);
if (unlikely(dma_mapping_error(&edev->pdev->dev, mapping))) {
DP_NOTICE(edev,
"Failed to map TPA replacement buffer\n");
@@ -2941,7 +2940,7 @@ static int qede_alloc_mem_txq(struct qede_dev *edev, struct qede_tx_queue *txq)
txq->num_tx_buffers = edev->q_num_tx_buffers;
/* Allocate the parallel driver ring for Tx buffers */
- size = sizeof(*txq->sw_tx_ring) * NUM_TX_BDS_MAX;
+ size = sizeof(*txq->sw_tx_ring) * TX_RING_SIZE;
txq->sw_tx_ring = kzalloc(size, GFP_KERNEL);
if (!txq->sw_tx_ring) {
DP_NOTICE(edev, "Tx buffers ring allocation failed\n");
@@ -2952,7 +2951,7 @@ static int qede_alloc_mem_txq(struct qede_dev *edev, struct qede_tx_queue *txq)
QED_CHAIN_USE_TO_CONSUME_PRODUCE,
QED_CHAIN_MODE_PBL,
QED_CHAIN_CNT_TYPE_U16,
- NUM_TX_BDS_MAX,
+ TX_RING_SIZE,
sizeof(*p_virt), &txq->tx_pbl);
if (rc)
goto err;
diff --git a/drivers/net/ethernet/qualcomm/emac/emac-mac.c b/drivers/net/ethernet/qualcomm/emac/emac-mac.c
index e97968ed4b8f..0b4deb31e742 100644
--- a/drivers/net/ethernet/qualcomm/emac/emac-mac.c
+++ b/drivers/net/ethernet/qualcomm/emac/emac-mac.c
@@ -575,10 +575,11 @@ void emac_mac_start(struct emac_adapter *adpt)
mac |= TXEN | RXEN; /* enable RX/TX */
- /* We don't have ethtool support yet, so force flow-control mode
- * to 'full' always.
- */
- mac |= TXFC | RXFC;
+ /* Configure MAC flow control to match the PHY's settings. */
+ if (phydev->pause)
+ mac |= RXFC;
+ if (phydev->pause != phydev->asym_pause)
+ mac |= TXFC;
/* setup link speed */
mac &= ~SPEED_MASK;
@@ -1003,6 +1004,12 @@ int emac_mac_up(struct emac_adapter *adpt)
writel((u32)~DIS_INT, adpt->base + EMAC_INT_STATUS);
writel(adpt->irq.mask, adpt->base + EMAC_INT_MASK);
+ /* Enable pause frames. Without this feature, the EMAC has been shown
+ * to receive (and drop) frames with FCS errors at gigabit connections.
+ */
+ adpt->phydev->supported |= SUPPORTED_Pause | SUPPORTED_Asym_Pause;
+ adpt->phydev->advertising |= SUPPORTED_Pause | SUPPORTED_Asym_Pause;
+
adpt->phydev->irq = PHY_IGNORE_INTERRUPT;
phy_start(adpt->phydev);
@@ -1021,14 +1028,18 @@ void emac_mac_down(struct emac_adapter *adpt)
napi_disable(&adpt->rx_q.napi);
phy_stop(adpt->phydev);
- phy_disconnect(adpt->phydev);
- /* disable mac irq */
+ /* Interrupts must be disabled before the PHY is disconnected, to
+ * avoid a race condition where adjust_link is null when we get
+ * an interrupt.
+ */
writel(DIS_INT, adpt->base + EMAC_INT_STATUS);
writel(0, adpt->base + EMAC_INT_MASK);
synchronize_irq(adpt->irq.irq);
free_irq(adpt->irq.irq, &adpt->irq);
+ phy_disconnect(adpt->phydev);
+
emac_mac_reset(adpt);
emac_tx_q_descs_free(adpt);
diff --git a/drivers/net/ethernet/qualcomm/emac/emac-phy.c b/drivers/net/ethernet/qualcomm/emac/emac-phy.c
index da4e90db4d98..99a14df28b96 100644
--- a/drivers/net/ethernet/qualcomm/emac/emac-phy.c
+++ b/drivers/net/ethernet/qualcomm/emac/emac-phy.c
@@ -212,6 +212,7 @@ int emac_phy_config(struct platform_device *pdev, struct emac_adapter *adpt)
phy_np = of_parse_phandle(np, "phy-handle", 0);
adpt->phydev = of_phy_find_device(phy_np);
+ of_node_put(phy_np);
}
if (!adpt->phydev) {
diff --git a/drivers/net/ethernet/qualcomm/emac/emac-sgmii.c b/drivers/net/ethernet/qualcomm/emac/emac-sgmii.c
index 75c1b530e39e..72fe343c7a36 100644
--- a/drivers/net/ethernet/qualcomm/emac/emac-sgmii.c
+++ b/drivers/net/ethernet/qualcomm/emac/emac-sgmii.c
@@ -421,7 +421,7 @@ static const struct emac_reg_write sgmii_v2_laned[] = {
/* CDR Settings */
{EMAC_SGMII_LN_UCDR_FO_GAIN_MODE0,
UCDR_STEP_BY_TWO_MODE0 | UCDR_xO_GAIN_MODE(10)},
- {EMAC_SGMII_LN_UCDR_SO_GAIN_MODE0, UCDR_xO_GAIN_MODE(6)},
+ {EMAC_SGMII_LN_UCDR_SO_GAIN_MODE0, UCDR_xO_GAIN_MODE(0)},
{EMAC_SGMII_LN_UCDR_SO_CONFIG, UCDR_ENABLE | UCDR_SO_SATURATION(12)},
/* TX/RX Settings */
diff --git a/drivers/net/ethernet/qualcomm/emac/emac.c b/drivers/net/ethernet/qualcomm/emac/emac.c
index 9bf3b2b82e95..57b35aeac51a 100644
--- a/drivers/net/ethernet/qualcomm/emac/emac.c
+++ b/drivers/net/ethernet/qualcomm/emac/emac.c
@@ -575,6 +575,7 @@ static const struct of_device_id emac_dt_match[] = {
},
{}
};
+MODULE_DEVICE_TABLE(of, emac_dt_match);
#if IS_ENABLED(CONFIG_ACPI)
static const struct acpi_device_id emac_acpi_match[] = {
@@ -710,6 +711,8 @@ static int emac_probe(struct platform_device *pdev)
err_undo_napi:
netif_napi_del(&adpt->rx_q.napi);
err_undo_mdiobus:
+ if (!has_acpi_companion(&pdev->dev))
+ put_device(&adpt->phydev->mdio.dev);
mdiobus_unregister(adpt->mii_bus);
err_undo_clocks:
emac_clks_teardown(adpt);
@@ -729,6 +732,8 @@ static int emac_remove(struct platform_device *pdev)
emac_clks_teardown(adpt);
+ if (!has_acpi_companion(&pdev->dev))
+ put_device(&adpt->phydev->mdio.dev);
mdiobus_unregister(adpt->mii_bus);
free_netdev(netdev);
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
index e55638c7505a..bf000d819a21 100644
--- a/drivers/net/ethernet/realtek/r8169.c
+++ b/drivers/net/ethernet/realtek/r8169.c
@@ -8273,7 +8273,8 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
if ((sizeof(dma_addr_t) > 4) &&
(use_dac == 1 || (use_dac == -1 && pci_is_pcie(pdev) &&
tp->mac_version >= RTL_GIGA_MAC_VER_18)) &&
- !pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
+ !pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) &&
+ !pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) {
/* CPlusCmd Dual Access Cycle is only needed for non-PCIe */
if (!pci_is_pcie(pdev))
diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c
index 630536bc72f9..d6a217874a8b 100644
--- a/drivers/net/ethernet/renesas/ravb_main.c
+++ b/drivers/net/ethernet/renesas/ravb_main.c
@@ -1008,20 +1008,18 @@ static int ravb_phy_init(struct net_device *ndev)
of_node_put(pn);
if (!phydev) {
netdev_err(ndev, "failed to connect PHY\n");
- return -ENOENT;
+ err = -ENOENT;
+ goto err_deregister_fixed_link;
}
/* This driver only support 10/100Mbit speeds on Gen3
* at this time.
*/
if (priv->chip_id == RCAR_GEN3) {
- int err;
-
err = phy_set_max_speed(phydev, SPEED_100);
if (err) {
netdev_err(ndev, "failed to limit PHY to 100Mbit/s\n");
- phy_disconnect(phydev);
- return err;
+ goto err_phy_disconnect;
}
netdev_info(ndev, "limited PHY to 100Mbit/s\n");
@@ -1033,6 +1031,14 @@ static int ravb_phy_init(struct net_device *ndev)
phy_attached_info(phydev);
return 0;
+
+err_phy_disconnect:
+ phy_disconnect(phydev);
+err_deregister_fixed_link:
+ if (of_phy_is_fixed_link(np))
+ of_phy_deregister_fixed_link(np);
+
+ return err;
}
/* PHY control start function */
@@ -1634,6 +1640,7 @@ static void ravb_set_rx_mode(struct net_device *ndev)
/* Device close function for Ethernet AVB */
static int ravb_close(struct net_device *ndev)
{
+ struct device_node *np = ndev->dev.parent->of_node;
struct ravb_private *priv = netdev_priv(ndev);
struct ravb_tstamp_skb *ts_skb, *ts_skb2;
@@ -1663,6 +1670,8 @@ static int ravb_close(struct net_device *ndev)
if (ndev->phydev) {
phy_stop(ndev->phydev);
phy_disconnect(ndev->phydev);
+ if (of_phy_is_fixed_link(np))
+ of_phy_deregister_fixed_link(np);
}
if (priv->chip_id != RCAR_GEN2) {
diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c
index 05b0dc55de77..1a92de705199 100644
--- a/drivers/net/ethernet/renesas/sh_eth.c
+++ b/drivers/net/ethernet/renesas/sh_eth.c
@@ -518,7 +518,7 @@ static struct sh_eth_cpu_data r7s72100_data = {
.ecsr_value = ECSR_ICD,
.ecsipr_value = ECSIPR_ICDIP,
- .eesipr_value = 0xff7f009f,
+ .eesipr_value = 0xe77f009f,
.tx_check = EESR_TC1 | EESR_FTC,
.eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT |
diff --git a/drivers/net/ethernet/rocker/rocker_main.c b/drivers/net/ethernet/rocker/rocker_main.c
index 5424fb341613..24b746406bc7 100644
--- a/drivers/net/ethernet/rocker/rocker_main.c
+++ b/drivers/net/ethernet/rocker/rocker_main.c
@@ -1471,7 +1471,7 @@ static int rocker_world_check_init(struct rocker_port *rocker_port)
if (rocker->wops) {
if (rocker->wops->mode != mode) {
dev_err(&rocker->pdev->dev, "hardware has ports in different worlds, which is not supported\n");
- return err;
+ return -EINVAL;
}
return 0;
}
diff --git a/drivers/net/ethernet/rocker/rocker_ofdpa.c b/drivers/net/ethernet/rocker/rocker_ofdpa.c
index 431a60804272..4ca461322d60 100644
--- a/drivers/net/ethernet/rocker/rocker_ofdpa.c
+++ b/drivers/net/ethernet/rocker/rocker_ofdpa.c
@@ -1493,8 +1493,6 @@ static int ofdpa_port_ipv4_nh(struct ofdpa_port *ofdpa_port,
spin_lock_irqsave(&ofdpa->neigh_tbl_lock, lock_flags);
found = ofdpa_neigh_tbl_find(ofdpa, ip_addr);
- if (found)
- *index = found->index;
updating = found && adding;
removing = found && !adding;
@@ -1508,9 +1506,11 @@ static int ofdpa_port_ipv4_nh(struct ofdpa_port *ofdpa_port,
resolved = false;
} else if (removing) {
ofdpa_neigh_del(trans, found);
+ *index = found->index;
} else if (updating) {
ofdpa_neigh_update(found, trans, NULL, false);
resolved = !is_zero_ether_addr(found->eth_dst);
+ *index = found->index;
} else {
err = -ENOENT;
}
diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c
index 3cf3557106c2..6b89e4a7b164 100644
--- a/drivers/net/ethernet/sfc/efx.c
+++ b/drivers/net/ethernet/sfc/efx.c
@@ -485,6 +485,9 @@ efx_copy_channel(const struct efx_channel *old_channel)
*channel = *old_channel;
channel->napi_dev = NULL;
+ INIT_HLIST_NODE(&channel->napi_str.napi_hash_node);
+ channel->napi_str.napi_id = 0;
+ channel->napi_str.state = 0;
memset(&channel->eventq, 0, sizeof(channel->eventq));
for (j = 0; j < EFX_TXQ_TYPES; j++) {
diff --git a/drivers/net/ethernet/stmicro/stmmac/Kconfig b/drivers/net/ethernet/stmicro/stmmac/Kconfig
index 3818c5e06eba..4b78168a5f3c 100644
--- a/drivers/net/ethernet/stmicro/stmmac/Kconfig
+++ b/drivers/net/ethernet/stmicro/stmmac/Kconfig
@@ -107,7 +107,7 @@ config DWMAC_STI
config DWMAC_STM32
tristate "STM32 DWMAC support"
default ARCH_STM32
- depends on OF && HAS_IOMEM
+ depends on OF && HAS_IOMEM && (ARCH_STM32 || COMPILE_TEST)
select MFD_SYSCON
---help---
Support for ethernet controller on STM32 SOCs.
diff --git a/drivers/net/ethernet/stmicro/stmmac/altr_tse_pcs.c b/drivers/net/ethernet/stmicro/stmmac/altr_tse_pcs.c
index 2920e2ee3864..489ef146201e 100644
--- a/drivers/net/ethernet/stmicro/stmmac/altr_tse_pcs.c
+++ b/drivers/net/ethernet/stmicro/stmmac/altr_tse_pcs.c
@@ -63,8 +63,8 @@
#define TSE_PCS_SGMII_LINK_TIMER_0 0x0D40
#define TSE_PCS_SGMII_LINK_TIMER_1 0x0003
#define TSE_PCS_SW_RESET_TIMEOUT 100
-#define TSE_PCS_USE_SGMII_AN_MASK BIT(2)
-#define TSE_PCS_USE_SGMII_ENA BIT(1)
+#define TSE_PCS_USE_SGMII_AN_MASK BIT(1)
+#define TSE_PCS_USE_SGMII_ENA BIT(0)
#define SGMII_ADAPTER_CTRL_REG 0x00
#define SGMII_ADAPTER_DISABLE 0x0001
diff --git a/drivers/net/ethernet/stmicro/stmmac/common.h b/drivers/net/ethernet/stmicro/stmmac/common.h
index d3292c4a6eda..6d2de4e01f6d 100644
--- a/drivers/net/ethernet/stmicro/stmmac/common.h
+++ b/drivers/net/ethernet/stmicro/stmmac/common.h
@@ -120,14 +120,17 @@ struct stmmac_extra_stats {
unsigned long ip_csum_bypassed;
unsigned long ipv4_pkt_rcvd;
unsigned long ipv6_pkt_rcvd;
- unsigned long rx_msg_type_ext_no_ptp;
- unsigned long rx_msg_type_sync;
- unsigned long rx_msg_type_follow_up;
- unsigned long rx_msg_type_delay_req;
- unsigned long rx_msg_type_delay_resp;
- unsigned long rx_msg_type_pdelay_req;
- unsigned long rx_msg_type_pdelay_resp;
- unsigned long rx_msg_type_pdelay_follow_up;
+ unsigned long no_ptp_rx_msg_type_ext;
+ unsigned long ptp_rx_msg_type_sync;
+ unsigned long ptp_rx_msg_type_follow_up;
+ unsigned long ptp_rx_msg_type_delay_req;
+ unsigned long ptp_rx_msg_type_delay_resp;
+ unsigned long ptp_rx_msg_type_pdelay_req;
+ unsigned long ptp_rx_msg_type_pdelay_resp;
+ unsigned long ptp_rx_msg_type_pdelay_follow_up;
+ unsigned long ptp_rx_msg_type_announce;
+ unsigned long ptp_rx_msg_type_management;
+ unsigned long ptp_rx_msg_pkt_reserved_type;
unsigned long ptp_frame_type;
unsigned long ptp_ver;
unsigned long timestamp_dropped;
@@ -482,11 +485,12 @@ struct stmmac_ops {
/* PTP and HW Timer helpers */
struct stmmac_hwtimestamp {
void (*config_hw_tstamping) (void __iomem *ioaddr, u32 data);
- u32 (*config_sub_second_increment) (void __iomem *ioaddr, u32 clk_rate);
+ u32 (*config_sub_second_increment)(void __iomem *ioaddr, u32 ptp_clock,
+ int gmac4);
int (*init_systime) (void __iomem *ioaddr, u32 sec, u32 nsec);
int (*config_addend) (void __iomem *ioaddr, u32 addend);
int (*adjust_systime) (void __iomem *ioaddr, u32 sec, u32 nsec,
- int add_sub);
+ int add_sub, int gmac4);
u64(*get_systime) (void __iomem *ioaddr);
};
diff --git a/drivers/net/ethernet/stmicro/stmmac/descs.h b/drivers/net/ethernet/stmicro/stmmac/descs.h
index 2e4c171a2b41..e3c86d422109 100644
--- a/drivers/net/ethernet/stmicro/stmmac/descs.h
+++ b/drivers/net/ethernet/stmicro/stmmac/descs.h
@@ -155,14 +155,18 @@
#define ERDES4_L3_L4_FILT_NO_MATCH_MASK GENMASK(27, 26)
/* Extended RDES4 message type definitions */
-#define RDES_EXT_NO_PTP 0
-#define RDES_EXT_SYNC 1
-#define RDES_EXT_FOLLOW_UP 2
-#define RDES_EXT_DELAY_REQ 3
-#define RDES_EXT_DELAY_RESP 4
-#define RDES_EXT_PDELAY_REQ 5
-#define RDES_EXT_PDELAY_RESP 6
-#define RDES_EXT_PDELAY_FOLLOW_UP 7
+#define RDES_EXT_NO_PTP 0x0
+#define RDES_EXT_SYNC 0x1
+#define RDES_EXT_FOLLOW_UP 0x2
+#define RDES_EXT_DELAY_REQ 0x3
+#define RDES_EXT_DELAY_RESP 0x4
+#define RDES_EXT_PDELAY_REQ 0x5
+#define RDES_EXT_PDELAY_RESP 0x6
+#define RDES_EXT_PDELAY_FOLLOW_UP 0x7
+#define RDES_PTP_ANNOUNCE 0x8
+#define RDES_PTP_MANAGEMENT 0x9
+#define RDES_PTP_SIGNALING 0xa
+#define RDES_PTP_PKT_RESERVED_TYPE 0xf
/* Basic descriptor structure for normal and alternate descriptors */
struct dma_desc {
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-generic.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-generic.c
index b1e5f24708c9..e6e6c2fcc4b7 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-generic.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-generic.c
@@ -50,10 +50,23 @@ static int dwmac_generic_probe(struct platform_device *pdev)
if (plat_dat->init) {
ret = plat_dat->init(pdev, plat_dat->bsp_priv);
if (ret)
- return ret;
+ goto err_remove_config_dt;
}
- return stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);
+ ret = stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);
+ if (ret)
+ goto err_exit;
+
+ return 0;
+
+err_exit:
+ if (plat_dat->exit)
+ plat_dat->exit(pdev, plat_dat->bsp_priv);
+err_remove_config_dt:
+ if (pdev->dev.of_node)
+ stmmac_remove_config_dt(pdev, plat_dat);
+
+ return ret;
}
static const struct of_device_id dwmac_generic_match[] = {
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c
index 36d3355f2fb0..866444b6c82f 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c
@@ -271,15 +271,17 @@ static int ipq806x_gmac_probe(struct platform_device *pdev)
return PTR_ERR(plat_dat);
gmac = devm_kzalloc(dev, sizeof(*gmac), GFP_KERNEL);
- if (!gmac)
- return -ENOMEM;
+ if (!gmac) {
+ err = -ENOMEM;
+ goto err_remove_config_dt;
+ }
gmac->pdev = pdev;
err = ipq806x_gmac_of_parse(gmac);
if (err) {
dev_err(dev, "device tree parsing error\n");
- return err;
+ goto err_remove_config_dt;
}
regmap_write(gmac->qsgmii_csr, QSGMII_PCS_CAL_LCKDT_CTL,
@@ -300,7 +302,8 @@ static int ipq806x_gmac_probe(struct platform_device *pdev)
default:
dev_err(&pdev->dev, "Unsupported PHY mode: \"%s\"\n",
phy_modes(gmac->phy_mode));
- return -EINVAL;
+ err = -EINVAL;
+ goto err_remove_config_dt;
}
regmap_write(gmac->nss_common, NSS_COMMON_GMAC_CTL(gmac->id), val);
@@ -319,7 +322,8 @@ static int ipq806x_gmac_probe(struct platform_device *pdev)
default:
dev_err(&pdev->dev, "Unsupported PHY mode: \"%s\"\n",
phy_modes(gmac->phy_mode));
- return -EINVAL;
+ err = -EINVAL;
+ goto err_remove_config_dt;
}
regmap_write(gmac->nss_common, NSS_COMMON_CLK_SRC_CTRL, val);
@@ -346,7 +350,16 @@ static int ipq806x_gmac_probe(struct platform_device *pdev)
plat_dat->bsp_priv = gmac;
plat_dat->fix_mac_speed = ipq806x_gmac_fix_mac_speed;
- return stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);
+ err = stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);
+ if (err)
+ goto err_remove_config_dt;
+
+ return 0;
+
+err_remove_config_dt:
+ stmmac_remove_config_dt(pdev, plat_dat);
+
+ return err;
}
static const struct of_device_id ipq806x_gmac_dwmac_match[] = {
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-lpc18xx.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-lpc18xx.c
index 78e9d1861896..3d3f43d91b98 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-lpc18xx.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-lpc18xx.c
@@ -46,7 +46,8 @@ static int lpc18xx_dwmac_probe(struct platform_device *pdev)
reg = syscon_regmap_lookup_by_compatible("nxp,lpc1850-creg");
if (IS_ERR(reg)) {
dev_err(&pdev->dev, "syscon lookup failed\n");
- return PTR_ERR(reg);
+ ret = PTR_ERR(reg);
+ goto err_remove_config_dt;
}
if (plat_dat->interface == PHY_INTERFACE_MODE_MII) {
@@ -55,13 +56,23 @@ static int lpc18xx_dwmac_probe(struct platform_device *pdev)
ethmode = LPC18XX_CREG_CREG6_ETHMODE_RMII;
} else {
dev_err(&pdev->dev, "Only MII and RMII mode supported\n");
- return -EINVAL;
+ ret = -EINVAL;
+ goto err_remove_config_dt;
}
regmap_update_bits(reg, LPC18XX_CREG_CREG6,
LPC18XX_CREG_CREG6_ETHMODE_MASK, ethmode);
- return stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);
+ ret = stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);
+ if (ret)
+ goto err_remove_config_dt;
+
+ return 0;
+
+err_remove_config_dt:
+ stmmac_remove_config_dt(pdev, plat_dat);
+
+ return ret;
}
static const struct of_device_id lpc18xx_dwmac_match[] = {
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-meson.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-meson.c
index 309d99536a2c..7fdd1760a74c 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-meson.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-meson.c
@@ -64,18 +64,31 @@ static int meson6_dwmac_probe(struct platform_device *pdev)
return PTR_ERR(plat_dat);
dwmac = devm_kzalloc(&pdev->dev, sizeof(*dwmac), GFP_KERNEL);
- if (!dwmac)
- return -ENOMEM;
+ if (!dwmac) {
+ ret = -ENOMEM;
+ goto err_remove_config_dt;
+ }
res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
dwmac->reg = devm_ioremap_resource(&pdev->dev, res);
- if (IS_ERR(dwmac->reg))
- return PTR_ERR(dwmac->reg);
+ if (IS_ERR(dwmac->reg)) {
+ ret = PTR_ERR(dwmac->reg);
+ goto err_remove_config_dt;
+ }
plat_dat->bsp_priv = dwmac;
plat_dat->fix_mac_speed = meson6_dwmac_fix_mac_speed;
- return stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);
+ ret = stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);
+ if (ret)
+ goto err_remove_config_dt;
+
+ return 0;
+
+err_remove_config_dt:
+ stmmac_remove_config_dt(pdev, plat_dat);
+
+ return ret;
}
static const struct of_device_id meson6_dwmac_match[] = {
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c
index 250e4ceafc8d..ffaed1f35efe 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c
@@ -264,32 +264,48 @@ static int meson8b_dwmac_probe(struct platform_device *pdev)
return PTR_ERR(plat_dat);
dwmac = devm_kzalloc(&pdev->dev, sizeof(*dwmac), GFP_KERNEL);
- if (!dwmac)
- return -ENOMEM;
+ if (!dwmac) {
+ ret = -ENOMEM;
+ goto err_remove_config_dt;
+ }
res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
dwmac->regs = devm_ioremap_resource(&pdev->dev, res);
- if (IS_ERR(dwmac->regs))
- return PTR_ERR(dwmac->regs);
+ if (IS_ERR(dwmac->regs)) {
+ ret = PTR_ERR(dwmac->regs);
+ goto err_remove_config_dt;
+ }
dwmac->pdev = pdev;
dwmac->phy_mode = of_get_phy_mode(pdev->dev.of_node);
if (dwmac->phy_mode < 0) {
dev_err(&pdev->dev, "missing phy-mode property\n");
- return -EINVAL;
+ ret = -EINVAL;
+ goto err_remove_config_dt;
}
ret = meson8b_init_clk(dwmac);
if (ret)
- return ret;
+ goto err_remove_config_dt;
ret = meson8b_init_prg_eth(dwmac);
if (ret)
- return ret;
+ goto err_remove_config_dt;
plat_dat->bsp_priv = dwmac;
- return stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);
+ ret = stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);
+ if (ret)
+ goto err_clk_disable;
+
+ return 0;
+
+err_clk_disable:
+ clk_disable_unprepare(dwmac->m25_div_clk);
+err_remove_config_dt:
+ stmmac_remove_config_dt(pdev, plat_dat);
+
+ return ret;
}
static int meson8b_dwmac_remove(struct platform_device *pdev)
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
index 3740a4417fa0..d80c88bd2bba 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
@@ -981,14 +981,27 @@ static int rk_gmac_probe(struct platform_device *pdev)
plat_dat->resume = rk_gmac_resume;
plat_dat->bsp_priv = rk_gmac_setup(pdev, data);
- if (IS_ERR(plat_dat->bsp_priv))
- return PTR_ERR(plat_dat->bsp_priv);
+ if (IS_ERR(plat_dat->bsp_priv)) {
+ ret = PTR_ERR(plat_dat->bsp_priv);
+ goto err_remove_config_dt;
+ }
ret = rk_gmac_init(pdev, plat_dat->bsp_priv);
if (ret)
- return ret;
+ goto err_remove_config_dt;
+
+ ret = stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);
+ if (ret)
+ goto err_gmac_exit;
+
+ return 0;
+
+err_gmac_exit:
+ rk_gmac_exit(pdev, plat_dat->bsp_priv);
+err_remove_config_dt:
+ stmmac_remove_config_dt(pdev, plat_dat);
- return stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);
+ return ret;
}
static const struct of_device_id rk_gmac_dwmac_match[] = {
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c
index bec6963ac71e..0c420e97de1e 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c
@@ -304,6 +304,8 @@ static int socfpga_dwmac_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
int ret;
struct socfpga_dwmac *dwmac;
+ struct net_device *ndev;
+ struct stmmac_priv *stpriv;
ret = stmmac_get_platform_resources(pdev, &stmmac_res);
if (ret)
@@ -314,32 +316,43 @@ static int socfpga_dwmac_probe(struct platform_device *pdev)
return PTR_ERR(plat_dat);
dwmac = devm_kzalloc(dev, sizeof(*dwmac), GFP_KERNEL);
- if (!dwmac)
- return -ENOMEM;
+ if (!dwmac) {
+ ret = -ENOMEM;
+ goto err_remove_config_dt;
+ }
ret = socfpga_dwmac_parse_data(dwmac, dev);
if (ret) {
dev_err(dev, "Unable to parse OF data\n");
- return ret;
+ goto err_remove_config_dt;
}
plat_dat->bsp_priv = dwmac;
plat_dat->fix_mac_speed = socfpga_dwmac_fix_mac_speed;
ret = stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);
+ if (ret)
+ goto err_remove_config_dt;
- if (!ret) {
- struct net_device *ndev = platform_get_drvdata(pdev);
- struct stmmac_priv *stpriv = netdev_priv(ndev);
+ ndev = platform_get_drvdata(pdev);
+ stpriv = netdev_priv(ndev);
- /* The socfpga driver needs to control the stmmac reset to
- * set the phy mode. Create a copy of the core reset handel
- * so it can be used by the driver later.
- */
- dwmac->stmmac_rst = stpriv->stmmac_rst;
+ /* The socfpga driver needs to control the stmmac reset to set the phy
+ * mode. Create a copy of the core reset handle so it can be used by
+ * the driver later.
+ */
+ dwmac->stmmac_rst = stpriv->stmmac_rst;
- ret = socfpga_dwmac_set_phy_mode(dwmac);
- }
+ ret = socfpga_dwmac_set_phy_mode(dwmac);
+ if (ret)
+ goto err_dvr_remove;
+
+ return 0;
+
+err_dvr_remove:
+ stmmac_dvr_remove(&pdev->dev);
+err_remove_config_dt:
+ stmmac_remove_config_dt(pdev, plat_dat);
return ret;
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c
index 58c05acc2aab..060b98c37a85 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c
@@ -345,13 +345,15 @@ static int sti_dwmac_probe(struct platform_device *pdev)
return PTR_ERR(plat_dat);
dwmac = devm_kzalloc(&pdev->dev, sizeof(*dwmac), GFP_KERNEL);
- if (!dwmac)
- return -ENOMEM;
+ if (!dwmac) {
+ ret = -ENOMEM;
+ goto err_remove_config_dt;
+ }
ret = sti_dwmac_parse_data(dwmac, pdev);
if (ret) {
dev_err(&pdev->dev, "Unable to parse OF data\n");
- return ret;
+ goto err_remove_config_dt;
}
dwmac->fix_retime_src = data->fix_retime_src;
@@ -363,9 +365,20 @@ static int sti_dwmac_probe(struct platform_device *pdev)
ret = sti_dwmac_init(pdev, plat_dat->bsp_priv);
if (ret)
- return ret;
+ goto err_remove_config_dt;
+
+ ret = stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);
+ if (ret)
+ goto err_dwmac_exit;
+
+ return 0;
+
+err_dwmac_exit:
+ sti_dwmac_exit(pdev, plat_dat->bsp_priv);
+err_remove_config_dt:
+ stmmac_remove_config_dt(pdev, plat_dat);
- return stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);
+ return ret;
}
static const struct sti_dwmac_of_data stih4xx_dwmac_data = {
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c
index e5a926b8bee7..61cb24810d10 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c
@@ -107,24 +107,33 @@ static int stm32_dwmac_probe(struct platform_device *pdev)
return PTR_ERR(plat_dat);
dwmac = devm_kzalloc(&pdev->dev, sizeof(*dwmac), GFP_KERNEL);
- if (!dwmac)
- return -ENOMEM;
+ if (!dwmac) {
+ ret = -ENOMEM;
+ goto err_remove_config_dt;
+ }
ret = stm32_dwmac_parse_data(dwmac, &pdev->dev);
if (ret) {
dev_err(&pdev->dev, "Unable to parse OF data\n");
- return ret;
+ goto err_remove_config_dt;
}
plat_dat->bsp_priv = dwmac;
ret = stm32_dwmac_init(plat_dat);
if (ret)
- return ret;
+ goto err_remove_config_dt;
ret = stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);
if (ret)
- stm32_dwmac_clk_disable(dwmac);
+ goto err_clk_disable;
+
+ return 0;
+
+err_clk_disable:
+ stm32_dwmac_clk_disable(dwmac);
+err_remove_config_dt:
+ stmmac_remove_config_dt(pdev, plat_dat);
return ret;
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c
index adff46375a32..d07520fb969e 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c
@@ -120,22 +120,27 @@ static int sun7i_gmac_probe(struct platform_device *pdev)
return PTR_ERR(plat_dat);
gmac = devm_kzalloc(dev, sizeof(*gmac), GFP_KERNEL);
- if (!gmac)
- return -ENOMEM;
+ if (!gmac) {
+ ret = -ENOMEM;
+ goto err_remove_config_dt;
+ }
gmac->interface = of_get_phy_mode(dev->of_node);
gmac->tx_clk = devm_clk_get(dev, "allwinner_gmac_tx");
if (IS_ERR(gmac->tx_clk)) {
dev_err(dev, "could not get tx clock\n");
- return PTR_ERR(gmac->tx_clk);
+ ret = PTR_ERR(gmac->tx_clk);
+ goto err_remove_config_dt;
}
/* Optional regulator for PHY */
gmac->regulator = devm_regulator_get_optional(dev, "phy");
if (IS_ERR(gmac->regulator)) {
- if (PTR_ERR(gmac->regulator) == -EPROBE_DEFER)
- return -EPROBE_DEFER;
+ if (PTR_ERR(gmac->regulator) == -EPROBE_DEFER) {
+ ret = -EPROBE_DEFER;
+ goto err_remove_config_dt;
+ }
dev_info(dev, "no regulator found\n");
gmac->regulator = NULL;
}
@@ -151,11 +156,18 @@ static int sun7i_gmac_probe(struct platform_device *pdev)
ret = sun7i_gmac_init(pdev, plat_dat->bsp_priv);
if (ret)
- return ret;
+ goto err_remove_config_dt;
ret = stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);
if (ret)
- sun7i_gmac_exit(pdev, plat_dat->bsp_priv);
+ goto err_gmac_exit;
+
+ return 0;
+
+err_gmac_exit:
+ sun7i_gmac_exit(pdev, plat_dat->bsp_priv);
+err_remove_config_dt:
+ stmmac_remove_config_dt(pdev, plat_dat);
return ret;
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c
index 4ec7397e7fb3..a601f8d43b75 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c
@@ -123,22 +123,29 @@ static int dwmac4_wrback_get_rx_status(void *data, struct stmmac_extra_stats *x,
x->ipv4_pkt_rcvd++;
if (rdes1 & RDES1_IPV6_HEADER)
x->ipv6_pkt_rcvd++;
- if (message_type == RDES_EXT_SYNC)
- x->rx_msg_type_sync++;
+
+ if (message_type == RDES_EXT_NO_PTP)
+ x->no_ptp_rx_msg_type_ext++;
+ else if (message_type == RDES_EXT_SYNC)
+ x->ptp_rx_msg_type_sync++;
else if (message_type == RDES_EXT_FOLLOW_UP)
- x->rx_msg_type_follow_up++;
+ x->ptp_rx_msg_type_follow_up++;
else if (message_type == RDES_EXT_DELAY_REQ)
- x->rx_msg_type_delay_req++;
+ x->ptp_rx_msg_type_delay_req++;
else if (message_type == RDES_EXT_DELAY_RESP)
- x->rx_msg_type_delay_resp++;
+ x->ptp_rx_msg_type_delay_resp++;
else if (message_type == RDES_EXT_PDELAY_REQ)
- x->rx_msg_type_pdelay_req++;
+ x->ptp_rx_msg_type_pdelay_req++;
else if (message_type == RDES_EXT_PDELAY_RESP)
- x->rx_msg_type_pdelay_resp++;
+ x->ptp_rx_msg_type_pdelay_resp++;
else if (message_type == RDES_EXT_PDELAY_FOLLOW_UP)
- x->rx_msg_type_pdelay_follow_up++;
- else
- x->rx_msg_type_ext_no_ptp++;
+ x->ptp_rx_msg_type_pdelay_follow_up++;
+ else if (message_type == RDES_PTP_ANNOUNCE)
+ x->ptp_rx_msg_type_announce++;
+ else if (message_type == RDES_PTP_MANAGEMENT)
+ x->ptp_rx_msg_type_management++;
+ else if (message_type == RDES_PTP_PKT_RESERVED_TYPE)
+ x->ptp_rx_msg_pkt_reserved_type++;
if (rdes1 & RDES1_PTP_PACKET_TYPE)
x->ptp_frame_type++;
@@ -204,14 +211,18 @@ static void dwmac4_rd_enable_tx_timestamp(struct dma_desc *p)
static int dwmac4_wrback_get_tx_timestamp_status(struct dma_desc *p)
{
- return (p->des3 & TDES3_TIMESTAMP_STATUS)
- >> TDES3_TIMESTAMP_STATUS_SHIFT;
+ /* Context type from W/B descriptor must be zero */
+ if (p->des3 & TDES3_CONTEXT_TYPE)
+ return -EINVAL;
+
+ /* Tx Timestamp Status is 1 so des0 and des1'll have valid values */
+ if (p->des3 & TDES3_TIMESTAMP_STATUS)
+ return 0;
+
+ return 1;
}
-/* NOTE: For RX CTX bit has to be checked before
- * HAVE a specific function for TX and another one for RX
- */
-static u64 dwmac4_wrback_get_timestamp(void *desc, u32 ats)
+static inline u64 dwmac4_get_timestamp(void *desc, u32 ats)
{
struct dma_desc *p = (struct dma_desc *)desc;
u64 ns;
@@ -223,12 +234,54 @@ static u64 dwmac4_wrback_get_timestamp(void *desc, u32 ats)
return ns;
}
-static int dwmac4_context_get_rx_timestamp_status(void *desc, u32 ats)
+static int dwmac4_rx_check_timestamp(void *desc)
+{
+ struct dma_desc *p = (struct dma_desc *)desc;
+ u32 own, ctxt;
+ int ret = 1;
+
+ own = p->des3 & RDES3_OWN;
+ ctxt = ((p->des3 & RDES3_CONTEXT_DESCRIPTOR)
+ >> RDES3_CONTEXT_DESCRIPTOR_SHIFT);
+
+ if (likely(!own && ctxt)) {
+ if ((p->des0 == 0xffffffff) && (p->des1 == 0xffffffff))
+ /* Corrupted value */
+ ret = -EINVAL;
+ else
+ /* A valid Timestamp is ready to be read */
+ ret = 0;
+ }
+
+ /* Timestamp not ready */
+ return ret;
+}
+
+static int dwmac4_wrback_get_rx_timestamp_status(void *desc, u32 ats)
{
struct dma_desc *p = (struct dma_desc *)desc;
+ int ret = -EINVAL;
+
+ /* Get the status from normal w/b descriptor */
+ if (likely(p->des3 & TDES3_RS1V)) {
+ if (likely(p->des1 & RDES1_TIMESTAMP_AVAILABLE)) {
+ int i = 0;
+
+ /* Check if timestamp is OK from context descriptor */
+ do {
+ ret = dwmac4_rx_check_timestamp(desc);
+ if (ret < 0)
+ goto exit;
+ i++;
- return (p->des1 & RDES1_TIMESTAMP_AVAILABLE)
- >> RDES1_TIMESTAMP_AVAILABLE_SHIFT;
+ } while ((ret == 1) || (i < 10));
+
+ if (i == 10)
+ ret = -EBUSY;
+ }
+ }
+exit:
+ return ret;
}
static void dwmac4_rd_init_rx_desc(struct dma_desc *p, int disable_rx_ic,
@@ -347,10 +400,9 @@ static void dwmac4_display_ring(void *head, unsigned int size, bool rx)
pr_info("%s descriptor ring:\n", rx ? "RX" : "TX");
for (i = 0; i < size; i++) {
- if (p->des0)
- pr_info("%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n",
- i, (unsigned int)virt_to_phys(p),
- p->des0, p->des1, p->des2, p->des3);
+ pr_info("%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n",
+ i, (unsigned int)virt_to_phys(p),
+ p->des0, p->des1, p->des2, p->des3);
p++;
}
}
@@ -374,8 +426,8 @@ const struct stmmac_desc_ops dwmac4_desc_ops = {
.get_rx_frame_len = dwmac4_wrback_get_rx_frame_len,
.enable_tx_timestamp = dwmac4_rd_enable_tx_timestamp,
.get_tx_timestamp_status = dwmac4_wrback_get_tx_timestamp_status,
- .get_timestamp = dwmac4_wrback_get_timestamp,
- .get_rx_timestamp_status = dwmac4_context_get_rx_timestamp_status,
+ .get_rx_timestamp_status = dwmac4_wrback_get_rx_timestamp_status,
+ .get_timestamp = dwmac4_get_timestamp,
.set_tx_ic = dwmac4_rd_set_tx_ic,
.prepare_tx_desc = dwmac4_rd_prepare_tx_desc,
.prepare_tso_tx_desc = dwmac4_rd_prepare_tso_tx_desc,
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.h b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.h
index 0902a2edeaa9..9736c505211a 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.h
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.h
@@ -59,10 +59,13 @@
#define TDES3_CTXT_TCMSSV BIT(26)
/* TDES3 Common */
+#define TDES3_RS1V BIT(26)
+#define TDES3_RS1V_SHIFT 26
#define TDES3_LAST_DESCRIPTOR BIT(28)
#define TDES3_LAST_DESCRIPTOR_SHIFT 28
#define TDES3_FIRST_DESCRIPTOR BIT(29)
#define TDES3_CONTEXT_TYPE BIT(30)
+#define TDES3_CONTEXT_TYPE_SHIFT 30
/* TDS3 use for both format (read and write back) */
#define TDES3_OWN BIT(31)
@@ -117,6 +120,7 @@
#define RDES3_LAST_DESCRIPTOR BIT(28)
#define RDES3_FIRST_DESCRIPTOR BIT(29)
#define RDES3_CONTEXT_DESCRIPTOR BIT(30)
+#define RDES3_CONTEXT_DESCRIPTOR_SHIFT 30
/* RDES3 (read format) */
#define RDES3_BUFFER1_VALID_ADDR BIT(24)
diff --git a/drivers/net/ethernet/stmicro/stmmac/enh_desc.c b/drivers/net/ethernet/stmicro/stmmac/enh_desc.c
index 38f19c99cf59..e75549327c34 100644
--- a/drivers/net/ethernet/stmicro/stmmac/enh_desc.c
+++ b/drivers/net/ethernet/stmicro/stmmac/enh_desc.c
@@ -150,22 +150,30 @@ static void enh_desc_get_ext_status(void *data, struct stmmac_extra_stats *x,
x->ipv4_pkt_rcvd++;
if (rdes4 & ERDES4_IPV6_PKT_RCVD)
x->ipv6_pkt_rcvd++;
- if (message_type == RDES_EXT_SYNC)
- x->rx_msg_type_sync++;
+
+ if (message_type == RDES_EXT_NO_PTP)
+ x->no_ptp_rx_msg_type_ext++;
+ else if (message_type == RDES_EXT_SYNC)
+ x->ptp_rx_msg_type_sync++;
else if (message_type == RDES_EXT_FOLLOW_UP)
- x->rx_msg_type_follow_up++;
+ x->ptp_rx_msg_type_follow_up++;
else if (message_type == RDES_EXT_DELAY_REQ)
- x->rx_msg_type_delay_req++;
+ x->ptp_rx_msg_type_delay_req++;
else if (message_type == RDES_EXT_DELAY_RESP)
- x->rx_msg_type_delay_resp++;
+ x->ptp_rx_msg_type_delay_resp++;
else if (message_type == RDES_EXT_PDELAY_REQ)
- x->rx_msg_type_pdelay_req++;
+ x->ptp_rx_msg_type_pdelay_req++;
else if (message_type == RDES_EXT_PDELAY_RESP)
- x->rx_msg_type_pdelay_resp++;
+ x->ptp_rx_msg_type_pdelay_resp++;
else if (message_type == RDES_EXT_PDELAY_FOLLOW_UP)
- x->rx_msg_type_pdelay_follow_up++;
- else
- x->rx_msg_type_ext_no_ptp++;
+ x->ptp_rx_msg_type_pdelay_follow_up++;
+ else if (message_type == RDES_PTP_ANNOUNCE)
+ x->ptp_rx_msg_type_announce++;
+ else if (message_type == RDES_PTP_MANAGEMENT)
+ x->ptp_rx_msg_type_management++;
+ else if (message_type == RDES_PTP_PKT_RESERVED_TYPE)
+ x->ptp_rx_msg_pkt_reserved_type++;
+
if (rdes4 & ERDES4_PTP_FRAME_TYPE)
x->ptp_frame_type++;
if (rdes4 & ERDES4_PTP_VER)
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac.h b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
index 8dc9056c1001..4d2a759b8465 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac.h
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
@@ -129,6 +129,7 @@ struct stmmac_priv {
int irq_wake;
spinlock_t ptp_lock;
void __iomem *mmcaddr;
+ void __iomem *ptpaddr;
u32 rx_tail_addr;
u32 tx_tail_addr;
u32 mss;
@@ -145,7 +146,7 @@ int stmmac_mdio_register(struct net_device *ndev);
int stmmac_mdio_reset(struct mii_bus *mii);
void stmmac_set_ethtool_ops(struct net_device *netdev);
-int stmmac_ptp_register(struct stmmac_priv *priv);
+void stmmac_ptp_register(struct stmmac_priv *priv);
void stmmac_ptp_unregister(struct stmmac_priv *priv);
int stmmac_resume(struct device *dev);
int stmmac_suspend(struct device *dev);
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
index 1e06173fc9d7..c5d0142adda2 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
@@ -115,14 +115,17 @@ static const struct stmmac_stats stmmac_gstrings_stats[] = {
STMMAC_STAT(ip_csum_bypassed),
STMMAC_STAT(ipv4_pkt_rcvd),
STMMAC_STAT(ipv6_pkt_rcvd),
- STMMAC_STAT(rx_msg_type_ext_no_ptp),
- STMMAC_STAT(rx_msg_type_sync),
- STMMAC_STAT(rx_msg_type_follow_up),
- STMMAC_STAT(rx_msg_type_delay_req),
- STMMAC_STAT(rx_msg_type_delay_resp),
- STMMAC_STAT(rx_msg_type_pdelay_req),
- STMMAC_STAT(rx_msg_type_pdelay_resp),
- STMMAC_STAT(rx_msg_type_pdelay_follow_up),
+ STMMAC_STAT(no_ptp_rx_msg_type_ext),
+ STMMAC_STAT(ptp_rx_msg_type_sync),
+ STMMAC_STAT(ptp_rx_msg_type_follow_up),
+ STMMAC_STAT(ptp_rx_msg_type_delay_req),
+ STMMAC_STAT(ptp_rx_msg_type_delay_resp),
+ STMMAC_STAT(ptp_rx_msg_type_pdelay_req),
+ STMMAC_STAT(ptp_rx_msg_type_pdelay_resp),
+ STMMAC_STAT(ptp_rx_msg_type_pdelay_follow_up),
+ STMMAC_STAT(ptp_rx_msg_type_announce),
+ STMMAC_STAT(ptp_rx_msg_type_management),
+ STMMAC_STAT(ptp_rx_msg_pkt_reserved_type),
STMMAC_STAT(ptp_frame_type),
STMMAC_STAT(ptp_ver),
STMMAC_STAT(timestamp_dropped),
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c
index a77f68918010..10d6059b2f26 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c
@@ -34,21 +34,29 @@ static void stmmac_config_hw_tstamping(void __iomem *ioaddr, u32 data)
}
static u32 stmmac_config_sub_second_increment(void __iomem *ioaddr,
- u32 ptp_clock)
+ u32 ptp_clock, int gmac4)
{
u32 value = readl(ioaddr + PTP_TCR);
unsigned long data;
- /* Convert the ptp_clock to nano second
- * formula = (2/ptp_clock) * 1000000000
- * where, ptp_clock = 50MHz.
+ /* For GMAC3.x, 4.x versions, convert the ptp_clock to nano second
+ * formula = (1/ptp_clock) * 1000000000
+ * where ptp_clock is 50MHz if fine method is used to update system
*/
- data = (2000000000ULL / ptp_clock);
+ if (value & PTP_TCR_TSCFUPDT)
+ data = (1000000000ULL / 50000000);
+ else
+ data = (1000000000ULL / ptp_clock);
/* 0.465ns accuracy */
if (!(value & PTP_TCR_TSCTRLSSR))
data = (data * 1000) / 465;
+ data &= PTP_SSIR_SSINC_MASK;
+
+ if (gmac4)
+ data = data << GMAC4_PTP_SSIR_SSINC_SHIFT;
+
writel(data, ioaddr + PTP_SSIR);
return data;
@@ -104,14 +112,30 @@ static int stmmac_config_addend(void __iomem *ioaddr, u32 addend)
}
static int stmmac_adjust_systime(void __iomem *ioaddr, u32 sec, u32 nsec,
- int add_sub)
+ int add_sub, int gmac4)
{
u32 value;
int limit;
+ if (add_sub) {
+ /* If the new sec value needs to be subtracted with
+ * the system time, then MAC_STSUR reg should be
+ * programmed with (2^32 – <new_sec_value>)
+ */
+ if (gmac4)
+ sec = (100000000ULL - sec);
+
+ value = readl(ioaddr + PTP_TCR);
+ if (value & PTP_TCR_TSCTRLSSR)
+ nsec = (PTP_DIGITAL_ROLLOVER_MODE - nsec);
+ else
+ nsec = (PTP_BINARY_ROLLOVER_MODE - nsec);
+ }
+
writel(sec, ioaddr + PTP_STSUR);
- writel(((add_sub << PTP_STNSUR_ADDSUB_SHIFT) | nsec),
- ioaddr + PTP_STNSUR);
+ value = (add_sub << PTP_STNSUR_ADDSUB_SHIFT) | nsec;
+ writel(value, ioaddr + PTP_STNSUR);
+
/* issue command to initialize the system time value */
value = readl(ioaddr + PTP_TCR);
value |= PTP_TCR_TSUPDT;
@@ -134,8 +158,9 @@ static u64 stmmac_get_systime(void __iomem *ioaddr)
{
u64 ns;
+ /* Get the TSSS value */
ns = readl(ioaddr + PTP_STNSR);
- /* convert sec time value to nanosecond */
+ /* Get the TSS and convert sec time value to nanosecond */
ns += readl(ioaddr + PTP_STSR) * 1000000000ULL;
return ns;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index 6c85b61aaa0b..caf069a465f2 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -340,18 +340,17 @@ out:
/* stmmac_get_tx_hwtstamp - get HW TX timestamps
* @priv: driver private structure
- * @entry : descriptor index to be used.
+ * @p : descriptor pointer
* @skb : the socket buffer
* Description :
* This function will read timestamp from the descriptor & pass it to stack.
* and also perform some sanity checks.
*/
static void stmmac_get_tx_hwtstamp(struct stmmac_priv *priv,
- unsigned int entry, struct sk_buff *skb)
+ struct dma_desc *p, struct sk_buff *skb)
{
struct skb_shared_hwtstamps shhwtstamp;
u64 ns;
- void *desc = NULL;
if (!priv->hwts_tx_en)
return;
@@ -360,58 +359,55 @@ static void stmmac_get_tx_hwtstamp(struct stmmac_priv *priv,
if (likely(!skb || !(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)))
return;
- if (priv->adv_ts)
- desc = (priv->dma_etx + entry);
- else
- desc = (priv->dma_tx + entry);
-
/* check tx tstamp status */
- if (!priv->hw->desc->get_tx_timestamp_status((struct dma_desc *)desc))
- return;
+ if (!priv->hw->desc->get_tx_timestamp_status(p)) {
+ /* get the valid tstamp */
+ ns = priv->hw->desc->get_timestamp(p, priv->adv_ts);
- /* get the valid tstamp */
- ns = priv->hw->desc->get_timestamp(desc, priv->adv_ts);
+ memset(&shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
+ shhwtstamp.hwtstamp = ns_to_ktime(ns);
- memset(&shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
- shhwtstamp.hwtstamp = ns_to_ktime(ns);
- /* pass tstamp to stack */
- skb_tstamp_tx(skb, &shhwtstamp);
+ netdev_info(priv->dev, "get valid TX hw timestamp %llu\n", ns);
+ /* pass tstamp to stack */
+ skb_tstamp_tx(skb, &shhwtstamp);
+ }
return;
}
/* stmmac_get_rx_hwtstamp - get HW RX timestamps
* @priv: driver private structure
- * @entry : descriptor index to be used.
+ * @p : descriptor pointer
+ * @np : next descriptor pointer
* @skb : the socket buffer
* Description :
* This function will read received packet's timestamp from the descriptor
* and pass it to stack. It also perform some sanity checks.
*/
-static void stmmac_get_rx_hwtstamp(struct stmmac_priv *priv,
- unsigned int entry, struct sk_buff *skb)
+static void stmmac_get_rx_hwtstamp(struct stmmac_priv *priv, struct dma_desc *p,
+ struct dma_desc *np, struct sk_buff *skb)
{
struct skb_shared_hwtstamps *shhwtstamp = NULL;
u64 ns;
- void *desc = NULL;
if (!priv->hwts_rx_en)
return;
- if (priv->adv_ts)
- desc = (priv->dma_erx + entry);
- else
- desc = (priv->dma_rx + entry);
-
- /* exit if rx tstamp is not valid */
- if (!priv->hw->desc->get_rx_timestamp_status(desc, priv->adv_ts))
- return;
+ /* Check if timestamp is available */
+ if (!priv->hw->desc->get_rx_timestamp_status(p, priv->adv_ts)) {
+ /* For GMAC4, the valid timestamp is from CTX next desc. */
+ if (priv->plat->has_gmac4)
+ ns = priv->hw->desc->get_timestamp(np, priv->adv_ts);
+ else
+ ns = priv->hw->desc->get_timestamp(p, priv->adv_ts);
- /* get valid tstamp */
- ns = priv->hw->desc->get_timestamp(desc, priv->adv_ts);
- shhwtstamp = skb_hwtstamps(skb);
- memset(shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
- shhwtstamp->hwtstamp = ns_to_ktime(ns);
+ netdev_info(priv->dev, "get valid RX hw timestamp %llu\n", ns);
+ shhwtstamp = skb_hwtstamps(skb);
+ memset(shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
+ shhwtstamp->hwtstamp = ns_to_ktime(ns);
+ } else {
+ netdev_err(priv->dev, "cannot get RX hw timestamp\n");
+ }
}
/**
@@ -598,17 +594,18 @@ static int stmmac_hwtstamp_ioctl(struct net_device *dev, struct ifreq *ifr)
priv->hwts_tx_en = config.tx_type == HWTSTAMP_TX_ON;
if (!priv->hwts_tx_en && !priv->hwts_rx_en)
- priv->hw->ptp->config_hw_tstamping(priv->ioaddr, 0);
+ priv->hw->ptp->config_hw_tstamping(priv->ptpaddr, 0);
else {
value = (PTP_TCR_TSENA | PTP_TCR_TSCFUPDT | PTP_TCR_TSCTRLSSR |
tstamp_all | ptp_v2 | ptp_over_ethernet |
ptp_over_ipv6_udp | ptp_over_ipv4_udp | ts_event_en |
ts_master_en | snap_type_sel);
- priv->hw->ptp->config_hw_tstamping(priv->ioaddr, value);
+ priv->hw->ptp->config_hw_tstamping(priv->ptpaddr, value);
/* program Sub Second Increment reg */
sec_inc = priv->hw->ptp->config_sub_second_increment(
- priv->ioaddr, priv->clk_ptp_rate);
+ priv->ptpaddr, priv->clk_ptp_rate,
+ priv->plat->has_gmac4);
temp = div_u64(1000000000ULL, sec_inc);
/* calculate default added value:
@@ -618,14 +615,14 @@ static int stmmac_hwtstamp_ioctl(struct net_device *dev, struct ifreq *ifr)
*/
temp = (u64)(temp << 32);
priv->default_addend = div_u64(temp, priv->clk_ptp_rate);
- priv->hw->ptp->config_addend(priv->ioaddr,
+ priv->hw->ptp->config_addend(priv->ptpaddr,
priv->default_addend);
/* initialize system time */
ktime_get_real_ts64(&now);
/* lower 32 bits of tv_sec are safe until y2106 */
- priv->hw->ptp->init_systime(priv->ioaddr, (u32)now.tv_sec,
+ priv->hw->ptp->init_systime(priv->ptpaddr, (u32)now.tv_sec,
now.tv_nsec);
}
@@ -676,7 +673,9 @@ static int stmmac_init_ptp(struct stmmac_priv *priv)
priv->hwts_tx_en = 0;
priv->hwts_rx_en = 0;
- return stmmac_ptp_register(priv);
+ stmmac_ptp_register(priv);
+
+ return 0;
}
static void stmmac_release_ptp(struct stmmac_priv *priv)
@@ -878,6 +877,13 @@ static int stmmac_init_phy(struct net_device *dev)
return -ENODEV;
}
+ /* stmmac_adjust_link will change this to PHY_IGNORE_INTERRUPT to avoid
+ * subsequent PHY polling, make sure we force a link transition if
+ * we have a UP/DOWN/UP transition
+ */
+ if (phydev->is_pseudo_fixed_link)
+ phydev->irq = PHY_POLL;
+
pr_debug("stmmac_init_phy: %s: attached to PHY (UID 0x%x)"
" Link = %d\n", dev->name, phydev->phy_id, phydev->link);
@@ -1331,7 +1337,7 @@ static void stmmac_tx_clean(struct stmmac_priv *priv)
priv->dev->stats.tx_packets++;
priv->xstats.tx_pkt_n++;
}
- stmmac_get_tx_hwtstamp(priv, entry, skb);
+ stmmac_get_tx_hwtstamp(priv, p, skb);
}
if (likely(priv->tx_skbuff_dma[entry].buf)) {
@@ -1477,10 +1483,13 @@ static void stmmac_mmc_setup(struct stmmac_priv *priv)
unsigned int mode = MMC_CNTRL_RESET_ON_READ | MMC_CNTRL_COUNTER_RESET |
MMC_CNTRL_PRESET | MMC_CNTRL_FULL_HALF_PRESET;
- if (priv->synopsys_id >= DWMAC_CORE_4_00)
+ if (priv->synopsys_id >= DWMAC_CORE_4_00) {
+ priv->ptpaddr = priv->ioaddr + PTP_GMAC4_OFFSET;
priv->mmcaddr = priv->ioaddr + MMC_GMAC4_OFFSET;
- else
+ } else {
+ priv->ptpaddr = priv->ioaddr + PTP_GMAC3_X_OFFSET;
priv->mmcaddr = priv->ioaddr + MMC_GMAC3_X_OFFSET;
+ }
dwmac_mmc_intr_all_mask(priv->mmcaddr);
@@ -1710,7 +1719,7 @@ static int stmmac_hw_setup(struct net_device *dev, bool init_ptp)
if (init_ptp) {
ret = stmmac_init_ptp(priv);
if (ret)
- netdev_warn(priv->dev, "PTP support cannot init.\n");
+ netdev_warn(priv->dev, "fail to init PTP.\n");
}
#ifdef CONFIG_DEBUG_FS
@@ -2475,7 +2484,7 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit)
if (netif_msg_rx_status(priv)) {
void *rx_head;
- pr_debug("%s: descriptor ring:\n", __func__);
+ pr_info(">>>>>> %s: descriptor ring:\n", __func__);
if (priv->extend_desc)
rx_head = (void *)priv->dma_erx;
else
@@ -2486,6 +2495,7 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit)
while (count < limit) {
int status;
struct dma_desc *p;
+ struct dma_desc *np;
if (priv->extend_desc)
p = (struct dma_desc *)(priv->dma_erx + entry);
@@ -2505,9 +2515,11 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit)
next_entry = priv->cur_rx;
if (priv->extend_desc)
- prefetch(priv->dma_erx + next_entry);
+ np = (struct dma_desc *)(priv->dma_erx + next_entry);
else
- prefetch(priv->dma_rx + next_entry);
+ np = priv->dma_rx + next_entry;
+
+ prefetch(np);
if ((priv->extend_desc) && (priv->hw->desc->rx_extended_status))
priv->hw->desc->rx_extended_status(&priv->dev->stats,
@@ -2559,7 +2571,7 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit)
frame_len -= ETH_FCS_LEN;
if (netif_msg_rx_status(priv)) {
- pr_debug("\tdesc: %p [entry %d] buff=0x%x\n",
+ pr_info("\tdesc: %p [entry %d] buff=0x%x\n",
p, entry, des);
if (frame_len > ETH_FRAME_LEN)
pr_debug("\tframe size %d, COE: %d\n",
@@ -2616,13 +2628,13 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit)
DMA_FROM_DEVICE);
}
- stmmac_get_rx_hwtstamp(priv, entry, skb);
-
if (netif_msg_pktdata(priv)) {
pr_debug("frame received (%dbytes)", frame_len);
print_pkt(skb->data, frame_len);
}
+ stmmac_get_rx_hwtstamp(priv, p, np, skb);
+
stmmac_rx_vlan(priv->dev, skb);
skb->protocol = eth_type_trans(skb, priv->dev);
@@ -3404,7 +3416,6 @@ int stmmac_dvr_remove(struct device *dev)
stmmac_set_mac(priv->ioaddr, false);
netif_carrier_off(ndev);
unregister_netdev(ndev);
- of_node_put(priv->plat->phy_node);
if (priv->stmmac_rst)
reset_control_assert(priv->stmmac_rst);
clk_disable_unprepare(priv->pclk);
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
index 0a0d6a86f397..a840818bf4df 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
@@ -200,7 +200,6 @@ static int stmmac_dt_phy(struct plat_stmmacenet_data *plat,
/**
* stmmac_probe_config_dt - parse device-tree driver parameters
* @pdev: platform_device structure
- * @plat: driver data platform structure
* @mac: MAC address to use
* Description:
* this function is to read the driver parameters from device-tree and
@@ -306,7 +305,7 @@ stmmac_probe_config_dt(struct platform_device *pdev, const char **mac)
dma_cfg = devm_kzalloc(&pdev->dev, sizeof(*dma_cfg),
GFP_KERNEL);
if (!dma_cfg) {
- of_node_put(plat->phy_node);
+ stmmac_remove_config_dt(pdev, plat);
return ERR_PTR(-ENOMEM);
}
plat->dma_cfg = dma_cfg;
@@ -329,14 +328,37 @@ stmmac_probe_config_dt(struct platform_device *pdev, const char **mac)
return plat;
}
+
+/**
+ * stmmac_remove_config_dt - undo the effects of stmmac_probe_config_dt()
+ * @pdev: platform_device structure
+ * @plat: driver data platform structure
+ *
+ * Release resources claimed by stmmac_probe_config_dt().
+ */
+void stmmac_remove_config_dt(struct platform_device *pdev,
+ struct plat_stmmacenet_data *plat)
+{
+ struct device_node *np = pdev->dev.of_node;
+
+ if (of_phy_is_fixed_link(np))
+ of_phy_deregister_fixed_link(np);
+ of_node_put(plat->phy_node);
+}
#else
struct plat_stmmacenet_data *
stmmac_probe_config_dt(struct platform_device *pdev, const char **mac)
{
return ERR_PTR(-ENOSYS);
}
+
+void stmmac_remove_config_dt(struct platform_device *pdev,
+ struct plat_stmmacenet_data *plat)
+{
+}
#endif /* CONFIG_OF */
EXPORT_SYMBOL_GPL(stmmac_probe_config_dt);
+EXPORT_SYMBOL_GPL(stmmac_remove_config_dt);
int stmmac_get_platform_resources(struct platform_device *pdev,
struct stmmac_resources *stmmac_res)
@@ -392,10 +414,13 @@ int stmmac_pltfr_remove(struct platform_device *pdev)
{
struct net_device *ndev = platform_get_drvdata(pdev);
struct stmmac_priv *priv = netdev_priv(ndev);
+ struct plat_stmmacenet_data *plat = priv->plat;
int ret = stmmac_dvr_remove(&pdev->dev);
- if (priv->plat->exit)
- priv->plat->exit(pdev, priv->plat->bsp_priv);
+ if (plat->exit)
+ plat->exit(pdev, plat->bsp_priv);
+
+ stmmac_remove_config_dt(pdev, plat);
return ret;
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.h b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.h
index 64e147f53a9c..b72eb0de57b7 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.h
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.h
@@ -23,6 +23,8 @@
struct plat_stmmacenet_data *
stmmac_probe_config_dt(struct platform_device *pdev, const char **mac);
+void stmmac_remove_config_dt(struct platform_device *pdev,
+ struct plat_stmmacenet_data *plat);
int stmmac_get_platform_resources(struct platform_device *pdev,
struct stmmac_resources *stmmac_res);
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c
index 289d52725a6c..3eb281d1db08 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c
@@ -54,7 +54,7 @@ static int stmmac_adjust_freq(struct ptp_clock_info *ptp, s32 ppb)
spin_lock_irqsave(&priv->ptp_lock, flags);
- priv->hw->ptp->config_addend(priv->ioaddr, addend);
+ priv->hw->ptp->config_addend(priv->ptpaddr, addend);
spin_unlock_irqrestore(&priv->ptp_lock, flags);
@@ -89,7 +89,8 @@ static int stmmac_adjust_time(struct ptp_clock_info *ptp, s64 delta)
spin_lock_irqsave(&priv->ptp_lock, flags);
- priv->hw->ptp->adjust_systime(priv->ioaddr, sec, nsec, neg_adj);
+ priv->hw->ptp->adjust_systime(priv->ptpaddr, sec, nsec, neg_adj,
+ priv->plat->has_gmac4);
spin_unlock_irqrestore(&priv->ptp_lock, flags);
@@ -114,7 +115,7 @@ static int stmmac_get_time(struct ptp_clock_info *ptp, struct timespec64 *ts)
spin_lock_irqsave(&priv->ptp_lock, flags);
- ns = priv->hw->ptp->get_systime(priv->ioaddr);
+ ns = priv->hw->ptp->get_systime(priv->ptpaddr);
spin_unlock_irqrestore(&priv->ptp_lock, flags);
@@ -141,7 +142,7 @@ static int stmmac_set_time(struct ptp_clock_info *ptp,
spin_lock_irqsave(&priv->ptp_lock, flags);
- priv->hw->ptp->init_systime(priv->ioaddr, ts->tv_sec, ts->tv_nsec);
+ priv->hw->ptp->init_systime(priv->ptpaddr, ts->tv_sec, ts->tv_nsec);
spin_unlock_irqrestore(&priv->ptp_lock, flags);
@@ -177,7 +178,7 @@ static struct ptp_clock_info stmmac_ptp_clock_ops = {
* Description: this function will register the ptp clock driver
* to kernel. It also does some house keeping work.
*/
-int stmmac_ptp_register(struct stmmac_priv *priv)
+void stmmac_ptp_register(struct stmmac_priv *priv)
{
spin_lock_init(&priv->ptp_lock);
priv->ptp_clock_ops = stmmac_ptp_clock_ops;
@@ -185,15 +186,10 @@ int stmmac_ptp_register(struct stmmac_priv *priv)
priv->ptp_clock = ptp_clock_register(&priv->ptp_clock_ops,
priv->device);
if (IS_ERR(priv->ptp_clock)) {
+ netdev_err(priv->dev, "ptp_clock_register failed\n");
priv->ptp_clock = NULL;
- return PTR_ERR(priv->ptp_clock);
- }
-
- spin_lock_init(&priv->ptp_lock);
-
- netdev_dbg(priv->dev, "Added PTP HW clock successfully\n");
-
- return 0;
+ } else if (priv->ptp_clock)
+ netdev_info(priv->dev, "registered PTP clock\n");
}
/**
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.h b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.h
index 4535df37c227..c06938c47af5 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.h
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.h
@@ -22,51 +22,53 @@
Author: Rayagond Kokatanur <rayagond@vayavyalabs.com>
******************************************************************************/
-#ifndef __STMMAC_PTP_H__
-#define __STMMAC_PTP_H__
+#ifndef __STMMAC_PTP_H__
+#define __STMMAC_PTP_H__
-/* IEEE 1588 PTP register offsets */
-#define PTP_TCR 0x0700 /* Timestamp Control Reg */
-#define PTP_SSIR 0x0704 /* Sub-Second Increment Reg */
-#define PTP_STSR 0x0708 /* System Time – Seconds Regr */
-#define PTP_STNSR 0x070C /* System Time – Nanoseconds Reg */
-#define PTP_STSUR 0x0710 /* System Time – Seconds Update Reg */
-#define PTP_STNSUR 0x0714 /* System Time – Nanoseconds Update Reg */
-#define PTP_TAR 0x0718 /* Timestamp Addend Reg */
-#define PTP_TTSR 0x071C /* Target Time Seconds Reg */
-#define PTP_TTNSR 0x0720 /* Target Time Nanoseconds Reg */
-#define PTP_STHWSR 0x0724 /* System Time - Higher Word Seconds Reg */
-#define PTP_TSR 0x0728 /* Timestamp Status */
+#define PTP_GMAC4_OFFSET 0xb00
+#define PTP_GMAC3_X_OFFSET 0x700
-#define PTP_STNSUR_ADDSUB_SHIFT 31
+/* IEEE 1588 PTP register offsets */
+#define PTP_TCR 0x00 /* Timestamp Control Reg */
+#define PTP_SSIR 0x04 /* Sub-Second Increment Reg */
+#define PTP_STSR 0x08 /* System Time – Seconds Regr */
+#define PTP_STNSR 0x0c /* System Time – Nanoseconds Reg */
+#define PTP_STSUR 0x10 /* System Time – Seconds Update Reg */
+#define PTP_STNSUR 0x14 /* System Time – Nanoseconds Update Reg */
+#define PTP_TAR 0x18 /* Timestamp Addend Reg */
-/* PTP TCR defines */
-#define PTP_TCR_TSENA 0x00000001 /* Timestamp Enable */
-#define PTP_TCR_TSCFUPDT 0x00000002 /* Timestamp Fine/Coarse Update */
-#define PTP_TCR_TSINIT 0x00000004 /* Timestamp Initialize */
-#define PTP_TCR_TSUPDT 0x00000008 /* Timestamp Update */
-/* Timestamp Interrupt Trigger Enable */
-#define PTP_TCR_TSTRIG 0x00000010
-#define PTP_TCR_TSADDREG 0x00000020 /* Addend Reg Update */
-#define PTP_TCR_TSENALL 0x00000100 /* Enable Timestamp for All Frames */
-/* Timestamp Digital or Binary Rollover Control */
-#define PTP_TCR_TSCTRLSSR 0x00000200
+#define PTP_STNSUR_ADDSUB_SHIFT 31
+#define PTP_DIGITAL_ROLLOVER_MODE 0x3B9ACA00 /* 10e9-1 ns */
+#define PTP_BINARY_ROLLOVER_MODE 0x80000000 /* ~0.466 ns */
+/* PTP Timestamp control register defines */
+#define PTP_TCR_TSENA BIT(0) /* Timestamp Enable */
+#define PTP_TCR_TSCFUPDT BIT(1) /* Timestamp Fine/Coarse Update */
+#define PTP_TCR_TSINIT BIT(2) /* Timestamp Initialize */
+#define PTP_TCR_TSUPDT BIT(3) /* Timestamp Update */
+#define PTP_TCR_TSTRIG BIT(4) /* Timestamp Interrupt Trigger Enable */
+#define PTP_TCR_TSADDREG BIT(5) /* Addend Reg Update */
+#define PTP_TCR_TSENALL BIT(8) /* Enable Timestamp for All Frames */
+#define PTP_TCR_TSCTRLSSR BIT(9) /* Digital or Binary Rollover Control */
/* Enable PTP packet Processing for Version 2 Format */
-#define PTP_TCR_TSVER2ENA 0x00000400
+#define PTP_TCR_TSVER2ENA BIT(10)
/* Enable Processing of PTP over Ethernet Frames */
-#define PTP_TCR_TSIPENA 0x00000800
+#define PTP_TCR_TSIPENA BIT(11)
/* Enable Processing of PTP Frames Sent over IPv6-UDP */
-#define PTP_TCR_TSIPV6ENA 0x00001000
+#define PTP_TCR_TSIPV6ENA BIT(12)
/* Enable Processing of PTP Frames Sent over IPv4-UDP */
-#define PTP_TCR_TSIPV4ENA 0x00002000
+#define PTP_TCR_TSIPV4ENA BIT(13)
/* Enable Timestamp Snapshot for Event Messages */
-#define PTP_TCR_TSEVNTENA 0x00004000
+#define PTP_TCR_TSEVNTENA BIT(14)
/* Enable Snapshot for Messages Relevant to Master */
-#define PTP_TCR_TSMSTRENA 0x00008000
+#define PTP_TCR_TSMSTRENA BIT(15)
/* Select PTP packets for Taking Snapshots */
-#define PTP_TCR_SNAPTYPSEL_1 0x00010000
+#define PTP_TCR_SNAPTYPSEL_1 GENMASK(17, 16)
/* Enable MAC address for PTP Frame Filtering */
-#define PTP_TCR_TSENMACADDR 0x00040000
+#define PTP_TCR_TSENMACADDR BIT(18)
+
+/* SSIR defines */
+#define PTP_SSIR_SSINC_MASK 0xff
+#define GMAC4_PTP_SSIR_SSINC_SHIFT 16
-#endif /* __STMMAC_PTP_H__ */
+#endif /* __STMMAC_PTP_H__ */
diff --git a/drivers/net/ethernet/sun/sunbmac.c b/drivers/net/ethernet/sun/sunbmac.c
index aa4f9d2d8fa9..02f452730d52 100644
--- a/drivers/net/ethernet/sun/sunbmac.c
+++ b/drivers/net/ethernet/sun/sunbmac.c
@@ -623,6 +623,7 @@ static int bigmac_init_hw(struct bigmac *bp, int from_irq)
void __iomem *gregs = bp->gregs;
void __iomem *cregs = bp->creg;
void __iomem *bregs = bp->bregs;
+ __u32 bblk_dvma = (__u32)bp->bblock_dvma;
unsigned char *e = &bp->dev->dev_addr[0];
/* Latch current counters into statistics. */
@@ -671,9 +672,9 @@ static int bigmac_init_hw(struct bigmac *bp, int from_irq)
bregs + BMAC_XIFCFG);
/* Tell the QEC where the ring descriptors are. */
- sbus_writel(bp->bblock_dvma + bib_offset(be_rxd, 0),
+ sbus_writel(bblk_dvma + bib_offset(be_rxd, 0),
cregs + CREG_RXDS);
- sbus_writel(bp->bblock_dvma + bib_offset(be_txd, 0),
+ sbus_writel(bblk_dvma + bib_offset(be_txd, 0),
cregs + CREG_TXDS);
/* Setup the FIFO pointers into QEC local memory. */
diff --git a/drivers/net/ethernet/sun/sunbmac.h b/drivers/net/ethernet/sun/sunbmac.h
index 06dd21707353..532fc56830cf 100644
--- a/drivers/net/ethernet/sun/sunbmac.h
+++ b/drivers/net/ethernet/sun/sunbmac.h
@@ -291,7 +291,7 @@ struct bigmac {
void __iomem *bregs; /* BigMAC Registers */
void __iomem *tregs; /* BigMAC Transceiver */
struct bmac_init_block *bmac_block; /* RX and TX descriptors */
- __u32 bblock_dvma; /* RX and TX descriptors */
+ dma_addr_t bblock_dvma; /* RX and TX descriptors */
spinlock_t lock;
diff --git a/drivers/net/ethernet/sun/sunqe.c b/drivers/net/ethernet/sun/sunqe.c
index 9b825780b3be..9582948145c1 100644
--- a/drivers/net/ethernet/sun/sunqe.c
+++ b/drivers/net/ethernet/sun/sunqe.c
@@ -124,7 +124,7 @@ static void qe_init_rings(struct sunqe *qep)
{
struct qe_init_block *qb = qep->qe_block;
struct sunqe_buffers *qbufs = qep->buffers;
- __u32 qbufs_dvma = qep->buffers_dvma;
+ __u32 qbufs_dvma = (__u32)qep->buffers_dvma;
int i;
qep->rx_new = qep->rx_old = qep->tx_new = qep->tx_old = 0;
@@ -144,6 +144,7 @@ static int qe_init(struct sunqe *qep, int from_irq)
void __iomem *mregs = qep->mregs;
void __iomem *gregs = qecp->gregs;
unsigned char *e = &qep->dev->dev_addr[0];
+ __u32 qblk_dvma = (__u32)qep->qblock_dvma;
u32 tmp;
int i;
@@ -152,8 +153,8 @@ static int qe_init(struct sunqe *qep, int from_irq)
return -EAGAIN;
/* Setup initial rx/tx init block pointers. */
- sbus_writel(qep->qblock_dvma + qib_offset(qe_rxd, 0), cregs + CREG_RXDS);
- sbus_writel(qep->qblock_dvma + qib_offset(qe_txd, 0), cregs + CREG_TXDS);
+ sbus_writel(qblk_dvma + qib_offset(qe_rxd, 0), cregs + CREG_RXDS);
+ sbus_writel(qblk_dvma + qib_offset(qe_txd, 0), cregs + CREG_TXDS);
/* Enable/mask the various irq's. */
sbus_writel(0, cregs + CREG_RIMASK);
@@ -413,7 +414,7 @@ static void qe_rx(struct sunqe *qep)
struct net_device *dev = qep->dev;
struct qe_rxd *this;
struct sunqe_buffers *qbufs = qep->buffers;
- __u32 qbufs_dvma = qep->buffers_dvma;
+ __u32 qbufs_dvma = (__u32)qep->buffers_dvma;
int elem = qep->rx_new;
u32 flags;
@@ -572,7 +573,7 @@ static int qe_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct sunqe *qep = netdev_priv(dev);
struct sunqe_buffers *qbufs = qep->buffers;
- __u32 txbuf_dvma, qbufs_dvma = qep->buffers_dvma;
+ __u32 txbuf_dvma, qbufs_dvma = (__u32)qep->buffers_dvma;
unsigned char *txbuf;
int len, entry;
diff --git a/drivers/net/ethernet/sun/sunqe.h b/drivers/net/ethernet/sun/sunqe.h
index 581781b6b2fa..ae190b77431b 100644
--- a/drivers/net/ethernet/sun/sunqe.h
+++ b/drivers/net/ethernet/sun/sunqe.h
@@ -334,12 +334,12 @@ struct sunqe {
void __iomem *qcregs; /* QEC per-channel Registers */
void __iomem *mregs; /* Per-channel MACE Registers */
struct qe_init_block *qe_block; /* RX and TX descriptors */
- __u32 qblock_dvma; /* RX and TX descriptors */
+ dma_addr_t qblock_dvma; /* RX and TX descriptors */
spinlock_t lock; /* Protects txfull state */
int rx_new, rx_old; /* RX ring extents */
int tx_new, tx_old; /* TX ring extents */
struct sunqe_buffers *buffers; /* CPU visible address. */
- __u32 buffers_dvma; /* DVMA visible address. */
+ dma_addr_t buffers_dvma; /* DVMA visible address. */
struct sunqec *parent;
u8 mconfig; /* Base MACE mconfig value */
struct platform_device *op; /* QE's OF device struct */
diff --git a/drivers/net/ethernet/synopsys/dwc_eth_qos.c b/drivers/net/ethernet/synopsys/dwc_eth_qos.c
index 0d0053128542..97d64bfed465 100644
--- a/drivers/net/ethernet/synopsys/dwc_eth_qos.c
+++ b/drivers/net/ethernet/synopsys/dwc_eth_qos.c
@@ -33,7 +33,6 @@
#include <linux/stat.h>
#include <linux/types.h>
-#include <linux/types.h>
#include <linux/slab.h>
#include <linux/delay.h>
#include <linux/mm.h>
@@ -43,7 +42,6 @@
#include <linux/phy.h>
#include <linux/mii.h>
-#include <linux/delay.h>
#include <linux/dma-mapping.h>
#include <linux/vmalloc.h>
@@ -982,11 +980,13 @@ static int dwceqos_mii_probe(struct net_device *ndev)
if (netif_msg_probe(lp))
phy_attached_info(phydev);
- phydev->supported &= PHY_GBIT_FEATURES;
+ phydev->supported &= PHY_GBIT_FEATURES | SUPPORTED_Pause |
+ SUPPORTED_Asym_Pause;
lp->link = 0;
lp->speed = 0;
lp->duplex = DUPLEX_UNKNOWN;
+ lp->flowcontrol.autoneg = AUTONEG_ENABLE;
return 0;
}
@@ -2881,7 +2881,7 @@ static int dwceqos_probe(struct platform_device *pdev)
ret = of_get_phy_mode(lp->pdev->dev.of_node);
if (ret < 0) {
dev_err(&lp->pdev->dev, "error in getting phy i/f\n");
- goto err_out_clk_dis_phy;
+ goto err_out_deregister_fixed_link;
}
lp->phy_interface = ret;
@@ -2889,14 +2889,14 @@ static int dwceqos_probe(struct platform_device *pdev)
ret = dwceqos_mii_init(lp);
if (ret) {
dev_err(&lp->pdev->dev, "error in dwceqos_mii_init\n");
- goto err_out_clk_dis_phy;
+ goto err_out_deregister_fixed_link;
}
ret = dwceqos_mii_probe(ndev);
if (ret != 0) {
netdev_err(ndev, "mii_probe fail.\n");
ret = -ENXIO;
- goto err_out_clk_dis_phy;
+ goto err_out_deregister_fixed_link;
}
dwceqos_set_umac_addr(lp, lp->ndev->dev_addr, 0);
@@ -2914,7 +2914,7 @@ static int dwceqos_probe(struct platform_device *pdev)
if (ret) {
dev_err(&lp->pdev->dev, "Unable to retrieve DT, error %d\n",
ret);
- goto err_out_clk_dis_phy;
+ goto err_out_deregister_fixed_link;
}
dev_info(&lp->pdev->dev, "pdev->id %d, baseaddr 0x%08lx, irq %d\n",
pdev->id, ndev->base_addr, ndev->irq);
@@ -2924,7 +2924,7 @@ static int dwceqos_probe(struct platform_device *pdev)
if (ret) {
dev_err(&lp->pdev->dev, "Unable to request IRQ %d, error %d\n",
ndev->irq, ret);
- goto err_out_clk_dis_phy;
+ goto err_out_deregister_fixed_link;
}
if (netif_msg_probe(lp))
@@ -2935,11 +2935,14 @@ static int dwceqos_probe(struct platform_device *pdev)
ret = register_netdev(ndev);
if (ret) {
dev_err(&pdev->dev, "Cannot register net device, aborting.\n");
- goto err_out_clk_dis_phy;
+ goto err_out_deregister_fixed_link;
}
return 0;
+err_out_deregister_fixed_link:
+ if (of_phy_is_fixed_link(pdev->dev.of_node))
+ of_phy_deregister_fixed_link(pdev->dev.of_node);
err_out_clk_dis_phy:
clk_disable_unprepare(lp->phy_ref_clk);
err_out_clk_dis_aper:
@@ -2959,8 +2962,11 @@ static int dwceqos_remove(struct platform_device *pdev)
if (ndev) {
lp = netdev_priv(ndev);
- if (ndev->phydev)
+ if (ndev->phydev) {
phy_disconnect(ndev->phydev);
+ if (of_phy_is_fixed_link(pdev->dev.of_node))
+ of_phy_deregister_fixed_link(pdev->dev.of_node);
+ }
mdiobus_unregister(lp->mii_bus);
mdiobus_free(lp->mii_bus);
diff --git a/drivers/net/ethernet/ti/cpsw-phy-sel.c b/drivers/net/ethernet/ti/cpsw-phy-sel.c
index 054a8dd23dae..ba1e45ff6aae 100644
--- a/drivers/net/ethernet/ti/cpsw-phy-sel.c
+++ b/drivers/net/ethernet/ti/cpsw-phy-sel.c
@@ -176,9 +176,12 @@ void cpsw_phy_sel(struct device *dev, phy_interface_t phy_mode, int slave)
}
dev = bus_find_device(&platform_bus_type, NULL, node, match);
+ of_node_put(node);
priv = dev_get_drvdata(dev);
priv->cpsw_phy_sel(priv, phy_mode, slave);
+
+ put_device(dev);
}
EXPORT_SYMBOL_GPL(cpsw_phy_sel);
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
index c6cff3d2ff05..b9087b828eff 100644
--- a/drivers/net/ethernet/ti/cpsw.c
+++ b/drivers/net/ethernet/ti/cpsw.c
@@ -2375,8 +2375,11 @@ static int cpsw_probe_dt(struct cpsw_platform_data *data,
* to the PHY is the Ethernet MAC DT node.
*/
ret = of_phy_register_fixed_link(slave_node);
- if (ret)
+ if (ret) {
+ if (ret != -EPROBE_DEFER)
+ dev_err(&pdev->dev, "failed to register fixed-link phy: %d\n", ret);
return ret;
+ }
slave_data->phy_node = of_node_get(slave_node);
} else if (parp) {
u32 phyid;
@@ -2397,6 +2400,7 @@ static int cpsw_probe_dt(struct cpsw_platform_data *data,
}
snprintf(slave_data->phy_id, sizeof(slave_data->phy_id),
PHY_ID_FMT, mdio->name, phyid);
+ put_device(&mdio->dev);
} else {
dev_err(&pdev->dev,
"No slave[%d] phy_id, phy-handle, or fixed-link property\n",
@@ -2440,6 +2444,34 @@ no_phy_slave:
return 0;
}
+static void cpsw_remove_dt(struct platform_device *pdev)
+{
+ struct net_device *ndev = platform_get_drvdata(pdev);
+ struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
+ struct cpsw_platform_data *data = &cpsw->data;
+ struct device_node *node = pdev->dev.of_node;
+ struct device_node *slave_node;
+ int i = 0;
+
+ for_each_available_child_of_node(node, slave_node) {
+ struct cpsw_slave_data *slave_data = &data->slave_data[i];
+
+ if (strcmp(slave_node->name, "slave"))
+ continue;
+
+ if (of_phy_is_fixed_link(slave_node))
+ of_phy_deregister_fixed_link(slave_node);
+
+ of_node_put(slave_data->phy_node);
+
+ i++;
+ if (i == data->slaves)
+ break;
+ }
+
+ of_platform_depopulate(&pdev->dev);
+}
+
static int cpsw_probe_dual_emac(struct cpsw_priv *priv)
{
struct cpsw_common *cpsw = priv->cpsw;
@@ -2547,6 +2579,9 @@ static int cpsw_probe(struct platform_device *pdev)
int irq;
cpsw = devm_kzalloc(&pdev->dev, sizeof(struct cpsw_common), GFP_KERNEL);
+ if (!cpsw)
+ return -ENOMEM;
+
cpsw->dev = &pdev->dev;
ndev = alloc_etherdev_mq(sizeof(struct cpsw_priv), CPSW_MAX_QUEUES);
@@ -2584,11 +2619,19 @@ static int cpsw_probe(struct platform_device *pdev)
/* Select default pin state */
pinctrl_pm_select_default_state(&pdev->dev);
- if (cpsw_probe_dt(&cpsw->data, pdev)) {
- dev_err(&pdev->dev, "cpsw: platform data missing\n");
- ret = -ENODEV;
+ /* Need to enable clocks with runtime PM api to access module
+ * registers
+ */
+ ret = pm_runtime_get_sync(&pdev->dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(&pdev->dev);
goto clean_runtime_disable_ret;
}
+
+ ret = cpsw_probe_dt(&cpsw->data, pdev);
+ if (ret)
+ goto clean_dt_ret;
+
data = &cpsw->data;
cpsw->rx_ch_num = 1;
cpsw->tx_ch_num = 1;
@@ -2608,7 +2651,7 @@ static int cpsw_probe(struct platform_device *pdev)
GFP_KERNEL);
if (!cpsw->slaves) {
ret = -ENOMEM;
- goto clean_runtime_disable_ret;
+ goto clean_dt_ret;
}
for (i = 0; i < data->slaves; i++)
cpsw->slaves[i].slave_num = i;
@@ -2620,7 +2663,7 @@ static int cpsw_probe(struct platform_device *pdev)
if (IS_ERR(clk)) {
dev_err(priv->dev, "fck is not found\n");
ret = -ENODEV;
- goto clean_runtime_disable_ret;
+ goto clean_dt_ret;
}
cpsw->bus_freq_mhz = clk_get_rate(clk) / 1000000;
@@ -2628,26 +2671,17 @@ static int cpsw_probe(struct platform_device *pdev)
ss_regs = devm_ioremap_resource(&pdev->dev, ss_res);
if (IS_ERR(ss_regs)) {
ret = PTR_ERR(ss_regs);
- goto clean_runtime_disable_ret;
+ goto clean_dt_ret;
}
cpsw->regs = ss_regs;
- /* Need to enable clocks with runtime PM api to access module
- * registers
- */
- ret = pm_runtime_get_sync(&pdev->dev);
- if (ret < 0) {
- pm_runtime_put_noidle(&pdev->dev);
- goto clean_runtime_disable_ret;
- }
cpsw->version = readl(&cpsw->regs->id_ver);
- pm_runtime_put_sync(&pdev->dev);
res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
cpsw->wr_regs = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(cpsw->wr_regs)) {
ret = PTR_ERR(cpsw->wr_regs);
- goto clean_runtime_disable_ret;
+ goto clean_dt_ret;
}
memset(&dma_params, 0, sizeof(dma_params));
@@ -2684,7 +2718,7 @@ static int cpsw_probe(struct platform_device *pdev)
default:
dev_err(priv->dev, "unknown version 0x%08x\n", cpsw->version);
ret = -ENODEV;
- goto clean_runtime_disable_ret;
+ goto clean_dt_ret;
}
for (i = 0; i < cpsw->data.slaves; i++) {
struct cpsw_slave *slave = &cpsw->slaves[i];
@@ -2713,7 +2747,7 @@ static int cpsw_probe(struct platform_device *pdev)
if (!cpsw->dma) {
dev_err(priv->dev, "error initializing dma\n");
ret = -ENOMEM;
- goto clean_runtime_disable_ret;
+ goto clean_dt_ret;
}
cpsw->txch[0] = cpdma_chan_create(cpsw->dma, 0, cpsw_tx_handler, 0);
@@ -2811,16 +2845,23 @@ static int cpsw_probe(struct platform_device *pdev)
ret = cpsw_probe_dual_emac(priv);
if (ret) {
cpsw_err(priv, probe, "error probe slave 2 emac interface\n");
- goto clean_ale_ret;
+ goto clean_unregister_netdev_ret;
}
}
+ pm_runtime_put(&pdev->dev);
+
return 0;
+clean_unregister_netdev_ret:
+ unregister_netdev(ndev);
clean_ale_ret:
cpsw_ale_destroy(cpsw->ale);
clean_dma_ret:
cpdma_ctlr_destroy(cpsw->dma);
+clean_dt_ret:
+ cpsw_remove_dt(pdev);
+ pm_runtime_put_sync(&pdev->dev);
clean_runtime_disable_ret:
pm_runtime_disable(&pdev->dev);
clean_ndev_ret:
@@ -2846,7 +2887,7 @@ static int cpsw_remove(struct platform_device *pdev)
cpsw_ale_destroy(cpsw->ale);
cpdma_ctlr_destroy(cpsw->dma);
- of_platform_depopulate(&pdev->dev);
+ cpsw_remove_dt(pdev);
pm_runtime_put_sync(&pdev->dev);
pm_runtime_disable(&pdev->dev);
if (cpsw->data.dual_emac)
@@ -2889,6 +2930,8 @@ static int cpsw_resume(struct device *dev)
/* Select default pin state */
pinctrl_pm_select_default_state(dev);
+ /* shut up ASSERT_RTNL() warning in netif_set_real_num_tx/rx_queues */
+ rtnl_lock();
if (cpsw->data.dual_emac) {
int i;
@@ -2900,6 +2943,8 @@ static int cpsw_resume(struct device *dev)
if (netif_running(ndev))
cpsw_ndo_open(ndev);
}
+ rtnl_unlock();
+
return 0;
}
#endif
diff --git a/drivers/net/ethernet/ti/davinci_emac.c b/drivers/net/ethernet/ti/davinci_emac.c
index 2fd94a5bc1f3..481c7bf0395b 100644
--- a/drivers/net/ethernet/ti/davinci_emac.c
+++ b/drivers/net/ethernet/ti/davinci_emac.c
@@ -1410,6 +1410,7 @@ static int emac_dev_open(struct net_device *ndev)
int i = 0;
struct emac_priv *priv = netdev_priv(ndev);
struct phy_device *phydev = NULL;
+ struct device *phy = NULL;
ret = pm_runtime_get_sync(&priv->pdev->dev);
if (ret < 0) {
@@ -1488,19 +1489,20 @@ static int emac_dev_open(struct net_device *ndev)
/* use the first phy on the bus if pdata did not give us a phy id */
if (!phydev && !priv->phy_id) {
- struct device *phy;
-
phy = bus_find_device(&mdio_bus_type, NULL, NULL,
match_first_device);
- if (phy)
+ if (phy) {
priv->phy_id = dev_name(phy);
+ if (!priv->phy_id || !*priv->phy_id)
+ put_device(phy);
+ }
}
if (!phydev && priv->phy_id && *priv->phy_id) {
phydev = phy_connect(ndev, priv->phy_id,
&emac_adjust_link,
PHY_INTERFACE_MODE_MII);
-
+ put_device(phy); /* reference taken by bus_find_device */
if (IS_ERR(phydev)) {
dev_err(emac_dev, "could not connect to phy %s\n",
priv->phy_id);
@@ -1765,6 +1767,7 @@ static int davinci_emac_try_get_mac(struct platform_device *pdev,
*/
static int davinci_emac_probe(struct platform_device *pdev)
{
+ struct device_node *np = pdev->dev.of_node;
int rc = 0;
struct resource *res, *res_ctrl;
struct net_device *ndev;
@@ -1803,7 +1806,7 @@ static int davinci_emac_probe(struct platform_device *pdev)
if (!pdata) {
dev_err(&pdev->dev, "no platform data\n");
rc = -ENODEV;
- goto no_pdata;
+ goto err_free_netdev;
}
/* MAC addr and PHY mask , RMII enable info from platform_data */
@@ -1939,6 +1942,10 @@ no_cpdma_chan:
cpdma_chan_destroy(priv->rxchan);
cpdma_ctlr_destroy(priv->dma);
no_pdata:
+ if (of_phy_is_fixed_link(np))
+ of_phy_deregister_fixed_link(np);
+ of_node_put(priv->phy_node);
+err_free_netdev:
free_netdev(ndev);
return rc;
}
@@ -1954,6 +1961,7 @@ static int davinci_emac_remove(struct platform_device *pdev)
{
struct net_device *ndev = platform_get_drvdata(pdev);
struct emac_priv *priv = netdev_priv(ndev);
+ struct device_node *np = pdev->dev.of_node;
dev_notice(&ndev->dev, "DaVinci EMAC: davinci_emac_remove()\n");
@@ -1966,6 +1974,8 @@ static int davinci_emac_remove(struct platform_device *pdev)
unregister_netdev(ndev);
of_node_put(priv->phy_node);
pm_runtime_disable(&pdev->dev);
+ if (of_phy_is_fixed_link(np))
+ of_phy_deregister_fixed_link(np);
free_netdev(ndev);
return 0;
diff --git a/drivers/net/ethernet/toshiba/ps3_gelic_wireless.c b/drivers/net/ethernet/toshiba/ps3_gelic_wireless.c
index 446ea580ad42..928c1dca2673 100644
--- a/drivers/net/ethernet/toshiba/ps3_gelic_wireless.c
+++ b/drivers/net/ethernet/toshiba/ps3_gelic_wireless.c
@@ -1694,7 +1694,7 @@ struct gelic_wl_scan_info *gelic_wl_find_best_bss(struct gelic_wl_info *wl)
pr_debug("%s: bssid matched\n", __func__);
break;
} else {
- pr_debug("%s: bssid unmached\n", __func__);
+ pr_debug("%s: bssid unmatched\n", __func__);
continue;
}
}
diff --git a/drivers/net/ethernet/xscale/ixp4xx_eth.c b/drivers/net/ethernet/xscale/ixp4xx_eth.c
index 7f127dc1b7ba..fa32391720fe 100644
--- a/drivers/net/ethernet/xscale/ixp4xx_eth.c
+++ b/drivers/net/ethernet/xscale/ixp4xx_eth.c
@@ -708,8 +708,7 @@ static int eth_poll(struct napi_struct *napi, int budget)
if (!qmgr_stat_below_low_watermark(rxq) &&
napi_reschedule(napi)) { /* not empty again */
#if DEBUG_RX
- printk(KERN_DEBUG "%s: eth_poll"
- " napi_reschedule successed\n",
+ printk(KERN_DEBUG "%s: eth_poll napi_reschedule succeeded\n",
dev->name);
#endif
qmgr_disable_irq(rxq);
diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c
index 3c20e87bb761..8b4822ad27cb 100644
--- a/drivers/net/geneve.c
+++ b/drivers/net/geneve.c
@@ -58,9 +58,9 @@ struct geneve_dev {
struct hlist_node hlist; /* vni hash table */
struct net *net; /* netns for packet i/o */
struct net_device *dev; /* netdev for geneve tunnel */
- struct geneve_sock *sock4; /* IPv4 socket used for geneve tunnel */
+ struct geneve_sock __rcu *sock4; /* IPv4 socket used for geneve tunnel */
#if IS_ENABLED(CONFIG_IPV6)
- struct geneve_sock *sock6; /* IPv6 socket used for geneve tunnel */
+ struct geneve_sock __rcu *sock6; /* IPv6 socket used for geneve tunnel */
#endif
u8 vni[3]; /* virtual network ID for tunnel */
u8 ttl; /* TTL override */
@@ -453,7 +453,7 @@ static struct sk_buff **geneve_gro_receive(struct sock *sk,
skb_gro_pull(skb, gh_len);
skb_gro_postpull_rcsum(skb, gh, gh_len);
- pp = ptype->callbacks.gro_receive(head, skb);
+ pp = call_gro_receive(ptype->callbacks.gro_receive, head, skb);
flush = 0;
out_unlock:
@@ -543,9 +543,19 @@ static void __geneve_sock_release(struct geneve_sock *gs)
static void geneve_sock_release(struct geneve_dev *geneve)
{
- __geneve_sock_release(geneve->sock4);
+ struct geneve_sock *gs4 = rtnl_dereference(geneve->sock4);
#if IS_ENABLED(CONFIG_IPV6)
- __geneve_sock_release(geneve->sock6);
+ struct geneve_sock *gs6 = rtnl_dereference(geneve->sock6);
+
+ rcu_assign_pointer(geneve->sock6, NULL);
+#endif
+
+ rcu_assign_pointer(geneve->sock4, NULL);
+ synchronize_net();
+
+ __geneve_sock_release(gs4);
+#if IS_ENABLED(CONFIG_IPV6)
+ __geneve_sock_release(gs6);
#endif
}
@@ -586,10 +596,10 @@ out:
gs->flags = geneve->flags;
#if IS_ENABLED(CONFIG_IPV6)
if (ipv6)
- geneve->sock6 = gs;
+ rcu_assign_pointer(geneve->sock6, gs);
else
#endif
- geneve->sock4 = gs;
+ rcu_assign_pointer(geneve->sock4, gs);
hash = geneve_net_vni_hash(geneve->vni);
hlist_add_head_rcu(&geneve->hlist, &gs->vni_list[hash]);
@@ -603,9 +613,7 @@ static int geneve_open(struct net_device *dev)
bool metadata = geneve->collect_md;
int ret = 0;
- geneve->sock4 = NULL;
#if IS_ENABLED(CONFIG_IPV6)
- geneve->sock6 = NULL;
if (ipv6 || metadata)
ret = geneve_sock_add(geneve, true);
#endif
@@ -720,6 +728,9 @@ static struct rtable *geneve_get_v4_rt(struct sk_buff *skb,
struct rtable *rt = NULL;
__u8 tos;
+ if (!rcu_dereference(geneve->sock4))
+ return ERR_PTR(-EIO);
+
memset(fl4, 0, sizeof(*fl4));
fl4->flowi4_mark = skb->mark;
fl4->flowi4_proto = IPPROTO_UDP;
@@ -772,11 +783,15 @@ static struct dst_entry *geneve_get_v6_dst(struct sk_buff *skb,
{
bool use_cache = ip_tunnel_dst_cache_usable(skb, info);
struct geneve_dev *geneve = netdev_priv(dev);
- struct geneve_sock *gs6 = geneve->sock6;
struct dst_entry *dst = NULL;
struct dst_cache *dst_cache;
+ struct geneve_sock *gs6;
__u8 prio;
+ gs6 = rcu_dereference(geneve->sock6);
+ if (!gs6)
+ return ERR_PTR(-EIO);
+
memset(fl6, 0, sizeof(*fl6));
fl6->flowi6_mark = skb->mark;
fl6->flowi6_proto = IPPROTO_UDP;
@@ -842,9 +857,8 @@ static netdev_tx_t geneve_xmit_skb(struct sk_buff *skb, struct net_device *dev,
struct ip_tunnel_info *info)
{
struct geneve_dev *geneve = netdev_priv(dev);
- struct geneve_sock *gs4 = geneve->sock4;
+ struct geneve_sock *gs4;
struct rtable *rt = NULL;
- const struct iphdr *iip; /* interior IP header */
int err = -EINVAL;
struct flowi4 fl4;
__u8 tos, ttl;
@@ -853,6 +867,10 @@ static netdev_tx_t geneve_xmit_skb(struct sk_buff *skb, struct net_device *dev,
bool xnet = !net_eq(geneve->net, dev_net(geneve->dev));
u32 flags = geneve->flags;
+ gs4 = rcu_dereference(geneve->sock4);
+ if (!gs4)
+ goto tx_error;
+
if (geneve->collect_md) {
if (unlikely(!info || !(info->mode & IP_TUNNEL_INFO_TX))) {
netdev_dbg(dev, "no tunnel metadata\n");
@@ -871,8 +889,6 @@ static netdev_tx_t geneve_xmit_skb(struct sk_buff *skb, struct net_device *dev,
sport = udp_flow_src_port(geneve->net, skb, 1, USHRT_MAX, true);
skb_reset_mac_header(skb);
- iip = ip_hdr(skb);
-
if (info) {
const struct ip_tunnel_key *key = &info->key;
u8 *opts = NULL;
@@ -892,7 +908,7 @@ static netdev_tx_t geneve_xmit_skb(struct sk_buff *skb, struct net_device *dev,
if (unlikely(err))
goto tx_error;
- tos = ip_tunnel_ecn_encap(key->tos, iip, skb);
+ tos = ip_tunnel_ecn_encap(key->tos, ip_hdr(skb), skb);
ttl = key->ttl;
df = key->tun_flags & TUNNEL_DONT_FRAGMENT ? htons(IP_DF) : 0;
} else {
@@ -901,7 +917,7 @@ static netdev_tx_t geneve_xmit_skb(struct sk_buff *skb, struct net_device *dev,
if (unlikely(err))
goto tx_error;
- tos = ip_tunnel_ecn_encap(fl4.flowi4_tos, iip, skb);
+ tos = ip_tunnel_ecn_encap(fl4.flowi4_tos, ip_hdr(skb), skb);
ttl = geneve->ttl;
if (!ttl && IN_MULTICAST(ntohl(fl4.daddr)))
ttl = 1;
@@ -932,9 +948,8 @@ static netdev_tx_t geneve6_xmit_skb(struct sk_buff *skb, struct net_device *dev,
struct ip_tunnel_info *info)
{
struct geneve_dev *geneve = netdev_priv(dev);
- struct geneve_sock *gs6 = geneve->sock6;
struct dst_entry *dst = NULL;
- const struct iphdr *iip; /* interior IP header */
+ struct geneve_sock *gs6;
int err = -EINVAL;
struct flowi6 fl6;
__u8 prio, ttl;
@@ -943,6 +958,10 @@ static netdev_tx_t geneve6_xmit_skb(struct sk_buff *skb, struct net_device *dev,
bool xnet = !net_eq(geneve->net, dev_net(geneve->dev));
u32 flags = geneve->flags;
+ gs6 = rcu_dereference(geneve->sock6);
+ if (!gs6)
+ goto tx_error;
+
if (geneve->collect_md) {
if (unlikely(!info || !(info->mode & IP_TUNNEL_INFO_TX))) {
netdev_dbg(dev, "no tunnel metadata\n");
@@ -959,8 +978,6 @@ static netdev_tx_t geneve6_xmit_skb(struct sk_buff *skb, struct net_device *dev,
sport = udp_flow_src_port(geneve->net, skb, 1, USHRT_MAX, true);
skb_reset_mac_header(skb);
- iip = ip_hdr(skb);
-
if (info) {
const struct ip_tunnel_key *key = &info->key;
u8 *opts = NULL;
@@ -981,7 +998,7 @@ static netdev_tx_t geneve6_xmit_skb(struct sk_buff *skb, struct net_device *dev,
if (unlikely(err))
goto tx_error;
- prio = ip_tunnel_ecn_encap(key->tos, iip, skb);
+ prio = ip_tunnel_ecn_encap(key->tos, ip_hdr(skb), skb);
ttl = key->ttl;
label = info->key.label;
} else {
@@ -991,7 +1008,7 @@ static netdev_tx_t geneve6_xmit_skb(struct sk_buff *skb, struct net_device *dev,
goto tx_error;
prio = ip_tunnel_ecn_encap(ip6_tclass(fl6.flowlabel),
- iip, skb);
+ ip_hdr(skb), skb);
ttl = geneve->ttl;
if (!ttl && ipv6_addr_is_multicast(&fl6.daddr))
ttl = 1;
diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
index f0919bd3a563..f6382150b16a 100644
--- a/drivers/net/hyperv/netvsc_drv.c
+++ b/drivers/net/hyperv/netvsc_drv.c
@@ -447,7 +447,7 @@ static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net)
* Setup the sendside checksum offload only if this is not a
* GSO packet.
*/
- if (skb_is_gso(skb)) {
+ if ((net_trans_info & (INFO_TCP | INFO_UDP)) && skb_is_gso(skb)) {
struct ndis_tcp_lso_info *lso_info;
rndis_msg_size += NDIS_LSO_PPI_SIZE;
@@ -607,15 +607,18 @@ static struct sk_buff *netvsc_alloc_recv_skb(struct net_device *net,
packet->total_data_buflen);
skb->protocol = eth_type_trans(skb, net);
- if (csum_info) {
- /* We only look at the IP checksum here.
- * Should we be dropping the packet if checksum
- * failed? How do we deal with other checksums - TCP/UDP?
- */
- if (csum_info->receive.ip_checksum_succeeded)
+
+ /* skb is already created with CHECKSUM_NONE */
+ skb_checksum_none_assert(skb);
+
+ /*
+ * In Linux, the IP checksum is always checked.
+ * Do L4 checksum offload if enabled and present.
+ */
+ if (csum_info && (net->features & NETIF_F_RXCSUM)) {
+ if (csum_info->receive.tcp_checksum_succeeded ||
+ csum_info->receive.udp_checksum_succeeded)
skb->ip_summed = CHECKSUM_UNNECESSARY;
- else
- skb->ip_summed = CHECKSUM_NONE;
}
if (vlan_tci & VLAN_TAG_PRESENT)
@@ -696,12 +699,8 @@ int netvsc_recv_callback(struct hv_device *device_obj,
static void netvsc_get_drvinfo(struct net_device *net,
struct ethtool_drvinfo *info)
{
- struct net_device_context *net_device_ctx = netdev_priv(net);
- struct hv_device *dev = net_device_ctx->device_ctx;
-
strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
strlcpy(info->fw_version, "N/A", sizeof(info->fw_version));
- strlcpy(info->bus_info, vmbus_dev_name(dev), sizeof(info->bus_info));
}
static void netvsc_get_channels(struct net_device *net,
diff --git a/drivers/net/ieee802154/adf7242.c b/drivers/net/ieee802154/adf7242.c
index 9fa7ac9f8e68..f355df7cf84a 100644
--- a/drivers/net/ieee802154/adf7242.c
+++ b/drivers/net/ieee802154/adf7242.c
@@ -20,7 +20,6 @@
#include <linux/skbuff.h>
#include <linux/of.h>
#include <linux/irq.h>
-#include <linux/delay.h>
#include <linux/debugfs.h>
#include <linux/bitops.h>
#include <linux/ieee802154.h>
diff --git a/drivers/net/ipvlan/ipvlan_main.c b/drivers/net/ipvlan/ipvlan_main.c
index f442eb366863..0fef17874d50 100644
--- a/drivers/net/ipvlan/ipvlan_main.c
+++ b/drivers/net/ipvlan/ipvlan_main.c
@@ -497,6 +497,7 @@ static int ipvlan_link_new(struct net *src_net, struct net_device *dev,
struct net_device *phy_dev;
int err;
u16 mode = IPVLAN_MODE_L3;
+ bool create = false;
if (!tb[IFLA_LINK])
return -EINVAL;
@@ -513,6 +514,7 @@ static int ipvlan_link_new(struct net *src_net, struct net_device *dev,
err = ipvlan_port_create(phy_dev);
if (err < 0)
return err;
+ create = true;
}
if (data && data[IFLA_IPVLAN_MODE])
@@ -536,22 +538,27 @@ static int ipvlan_link_new(struct net *src_net, struct net_device *dev,
err = register_netdevice(dev);
if (err < 0)
- return err;
+ goto destroy_ipvlan_port;
err = netdev_upper_dev_link(phy_dev, dev);
if (err) {
- unregister_netdevice(dev);
- return err;
+ goto unregister_netdev;
}
err = ipvlan_set_port_mode(port, mode);
if (err) {
- unregister_netdevice(dev);
- return err;
+ goto unregister_netdev;
}
list_add_tail_rcu(&ipvlan->pnode, &port->ipvlans);
netif_stacked_transfer_operstate(phy_dev, dev);
return 0;
+
+unregister_netdev:
+ unregister_netdevice(dev);
+destroy_ipvlan_port:
+ if (create)
+ ipvlan_port_destroy(phy_dev);
+ return err;
}
static void ipvlan_link_delete(struct net_device *dev, struct list_head *head)
diff --git a/drivers/net/irda/w83977af_ir.c b/drivers/net/irda/w83977af_ir.c
index 4e3d2e7c697c..e8c3a8c32534 100644
--- a/drivers/net/irda/w83977af_ir.c
+++ b/drivers/net/irda/w83977af_ir.c
@@ -518,7 +518,9 @@ static netdev_tx_t w83977af_hard_xmit(struct sk_buff *skb,
mtt = irda_get_mtt(skb);
pr_debug("%s(%ld), mtt=%d\n", __func__ , jiffies, mtt);
- if (mtt)
+ if (mtt > 1000)
+ mdelay(mtt/1000);
+ else if (mtt)
udelay(mtt);
/* Enable DMA interrupt */
diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c
index 3ea47f28e143..d2e61e002926 100644
--- a/drivers/net/macsec.c
+++ b/drivers/net/macsec.c
@@ -397,6 +397,14 @@ static struct macsec_cb *macsec_skb_cb(struct sk_buff *skb)
#define DEFAULT_ENCRYPT false
#define DEFAULT_ENCODING_SA 0
+static bool send_sci(const struct macsec_secy *secy)
+{
+ const struct macsec_tx_sc *tx_sc = &secy->tx_sc;
+
+ return tx_sc->send_sci ||
+ (secy->n_rx_sc > 1 && !tx_sc->end_station && !tx_sc->scb);
+}
+
static sci_t make_sci(u8 *addr, __be16 port)
{
sci_t sci;
@@ -437,15 +445,15 @@ static unsigned int macsec_extra_len(bool sci_present)
/* Fill SecTAG according to IEEE 802.1AE-2006 10.5.3 */
static void macsec_fill_sectag(struct macsec_eth_header *h,
- const struct macsec_secy *secy, u32 pn)
+ const struct macsec_secy *secy, u32 pn,
+ bool sci_present)
{
const struct macsec_tx_sc *tx_sc = &secy->tx_sc;
- memset(&h->tci_an, 0, macsec_sectag_len(tx_sc->send_sci));
+ memset(&h->tci_an, 0, macsec_sectag_len(sci_present));
h->eth.h_proto = htons(ETH_P_MACSEC);
- if (tx_sc->send_sci ||
- (secy->n_rx_sc > 1 && !tx_sc->end_station && !tx_sc->scb)) {
+ if (sci_present) {
h->tci_an |= MACSEC_TCI_SC;
memcpy(&h->secure_channel_id, &secy->sci,
sizeof(h->secure_channel_id));
@@ -650,6 +658,7 @@ static struct sk_buff *macsec_encrypt(struct sk_buff *skb,
struct macsec_tx_sc *tx_sc;
struct macsec_tx_sa *tx_sa;
struct macsec_dev *macsec = macsec_priv(dev);
+ bool sci_present;
u32 pn;
secy = &macsec->secy;
@@ -687,7 +696,8 @@ static struct sk_buff *macsec_encrypt(struct sk_buff *skb,
unprotected_len = skb->len;
eth = eth_hdr(skb);
- hh = (struct macsec_eth_header *)skb_push(skb, macsec_extra_len(tx_sc->send_sci));
+ sci_present = send_sci(secy);
+ hh = (struct macsec_eth_header *)skb_push(skb, macsec_extra_len(sci_present));
memmove(hh, eth, 2 * ETH_ALEN);
pn = tx_sa_update_pn(tx_sa, secy);
@@ -696,7 +706,7 @@ static struct sk_buff *macsec_encrypt(struct sk_buff *skb,
kfree_skb(skb);
return ERR_PTR(-ENOLINK);
}
- macsec_fill_sectag(hh, secy, pn);
+ macsec_fill_sectag(hh, secy, pn, sci_present);
macsec_set_shortlen(hh, unprotected_len - 2 * ETH_ALEN);
skb_put(skb, secy->icv_len);
@@ -726,10 +736,10 @@ static struct sk_buff *macsec_encrypt(struct sk_buff *skb,
skb_to_sgvec(skb, sg, 0, skb->len);
if (tx_sc->encrypt) {
- int len = skb->len - macsec_hdr_len(tx_sc->send_sci) -
+ int len = skb->len - macsec_hdr_len(sci_present) -
secy->icv_len;
aead_request_set_crypt(req, sg, sg, len, iv);
- aead_request_set_ad(req, macsec_hdr_len(tx_sc->send_sci));
+ aead_request_set_ad(req, macsec_hdr_len(sci_present));
} else {
aead_request_set_crypt(req, sg, sg, 0, iv);
aead_request_set_ad(req, skb->len - secy->icv_len);
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
index 3234fcdea317..26d6f0bbe14b 100644
--- a/drivers/net/macvlan.c
+++ b/drivers/net/macvlan.c
@@ -623,7 +623,8 @@ hash_add:
return 0;
clear_multi:
- dev_set_allmulti(lowerdev, -1);
+ if (dev->flags & IFF_ALLMULTI)
+ dev_set_allmulti(lowerdev, -1);
del_unicast:
dev_uc_del(lowerdev, dev->dev_addr);
out:
@@ -1278,6 +1279,7 @@ int macvlan_common_newlink(struct net *src_net, struct net_device *dev,
struct net_device *lowerdev;
int err;
int macmode;
+ bool create = false;
if (!tb[IFLA_LINK])
return -EINVAL;
@@ -1304,12 +1306,18 @@ int macvlan_common_newlink(struct net *src_net, struct net_device *dev,
err = macvlan_port_create(lowerdev);
if (err < 0)
return err;
+ create = true;
}
port = macvlan_port_get_rtnl(lowerdev);
/* Only 1 macvlan device can be created in passthru mode */
- if (port->passthru)
- return -EINVAL;
+ if (port->passthru) {
+ /* The macvlan port must be not created this time,
+ * still goto destroy_macvlan_port for readability.
+ */
+ err = -EINVAL;
+ goto destroy_macvlan_port;
+ }
vlan->lowerdev = lowerdev;
vlan->dev = dev;
@@ -1325,24 +1333,28 @@ int macvlan_common_newlink(struct net *src_net, struct net_device *dev,
vlan->flags = nla_get_u16(data[IFLA_MACVLAN_FLAGS]);
if (vlan->mode == MACVLAN_MODE_PASSTHRU) {
- if (port->count)
- return -EINVAL;
+ if (port->count) {
+ err = -EINVAL;
+ goto destroy_macvlan_port;
+ }
port->passthru = true;
eth_hw_addr_inherit(dev, lowerdev);
}
if (data && data[IFLA_MACVLAN_MACADDR_MODE]) {
- if (vlan->mode != MACVLAN_MODE_SOURCE)
- return -EINVAL;
+ if (vlan->mode != MACVLAN_MODE_SOURCE) {
+ err = -EINVAL;
+ goto destroy_macvlan_port;
+ }
macmode = nla_get_u32(data[IFLA_MACVLAN_MACADDR_MODE]);
err = macvlan_changelink_sources(vlan, macmode, data);
if (err)
- return err;
+ goto destroy_macvlan_port;
}
err = register_netdevice(dev);
if (err < 0)
- return err;
+ goto destroy_macvlan_port;
dev->priv_flags |= IFF_MACVLAN;
err = netdev_upper_dev_link(lowerdev, dev);
@@ -1357,7 +1369,9 @@ int macvlan_common_newlink(struct net *src_net, struct net_device *dev,
unregister_netdev:
unregister_netdevice(dev);
-
+destroy_macvlan_port:
+ if (create)
+ macvlan_port_destroy(port->dev);
return err;
}
EXPORT_SYMBOL_GPL(macvlan_common_newlink);
diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c
index 070e3290aa6e..7869b0651576 100644
--- a/drivers/net/macvtap.c
+++ b/drivers/net/macvtap.c
@@ -491,7 +491,13 @@ static int macvtap_newlink(struct net *src_net,
/* Don't put anything that may fail after macvlan_common_newlink
* because we can't undo what it does.
*/
- return macvlan_common_newlink(src_net, dev, tb, data);
+ err = macvlan_common_newlink(src_net, dev, tb, data);
+ if (err) {
+ netdev_rx_handler_unregister(dev);
+ return err;
+ }
+
+ return 0;
}
static void macvtap_dellink(struct net_device *dev,
@@ -736,13 +742,8 @@ static ssize_t macvtap_get_user(struct macvtap_queue *q, struct msghdr *m,
if (zerocopy)
err = zerocopy_sg_from_iter(skb, from);
- else {
+ else
err = skb_copy_datagram_from_iter(skb, 0, from, len);
- if (!err && m && m->msg_control) {
- struct ubuf_info *uarg = m->msg_control;
- uarg->callback(uarg, false);
- }
- }
if (err)
goto err_kfree;
@@ -773,7 +774,11 @@ static ssize_t macvtap_get_user(struct macvtap_queue *q, struct msghdr *m,
skb_shinfo(skb)->destructor_arg = m->msg_control;
skb_shinfo(skb)->tx_flags |= SKBTX_DEV_ZEROCOPY;
skb_shinfo(skb)->tx_flags |= SKBTX_SHARED_FRAG;
+ } else if (m && m->msg_control) {
+ struct ubuf_info *uarg = m->msg_control;
+ uarg->callback(uarg, false);
}
+
if (vlan) {
skb->dev = vlan->dev;
dev_queue_xmit(skb);
diff --git a/drivers/net/phy/at803x.c b/drivers/net/phy/at803x.c
index f279a897a5c7..a52b560e428b 100644
--- a/drivers/net/phy/at803x.c
+++ b/drivers/net/phy/at803x.c
@@ -42,19 +42,24 @@
#define AT803X_MMD_ACCESS_CONTROL 0x0D
#define AT803X_MMD_ACCESS_CONTROL_DATA 0x0E
#define AT803X_FUNC_DATA 0x4003
+#define AT803X_REG_CHIP_CONFIG 0x1f
+#define AT803X_BT_BX_REG_SEL 0x8000
#define AT803X_DEBUG_ADDR 0x1D
#define AT803X_DEBUG_DATA 0x1E
+#define AT803X_MODE_CFG_MASK 0x0F
+#define AT803X_MODE_CFG_SGMII 0x01
+
+#define AT803X_PSSR 0x11 /*PHY-Specific Status Register*/
+#define AT803X_PSSR_MR_AN_COMPLETE 0x0200
+
#define AT803X_DEBUG_REG_0 0x00
#define AT803X_DEBUG_RX_CLK_DLY_EN BIT(15)
#define AT803X_DEBUG_REG_5 0x05
#define AT803X_DEBUG_TX_CLK_DLY_EN BIT(8)
-#define AT803X_REG_CHIP_CONFIG 0x1f
-#define AT803X_BT_BX_REG_SEL 0x8000
-
#define ATH8030_PHY_ID 0x004dd076
#define ATH8031_PHY_ID 0x004dd074
#define ATH8035_PHY_ID 0x004dd072
@@ -209,7 +214,6 @@ static int at803x_suspend(struct phy_device *phydev)
{
int value;
int wol_enabled;
- int ccr;
mutex_lock(&phydev->lock);
@@ -225,16 +229,6 @@ static int at803x_suspend(struct phy_device *phydev)
phy_write(phydev, MII_BMCR, value);
- if (phydev->interface != PHY_INTERFACE_MODE_SGMII)
- goto done;
-
- /* also power-down SGMII interface */
- ccr = phy_read(phydev, AT803X_REG_CHIP_CONFIG);
- phy_write(phydev, AT803X_REG_CHIP_CONFIG, ccr & ~AT803X_BT_BX_REG_SEL);
- phy_write(phydev, MII_BMCR, phy_read(phydev, MII_BMCR) | BMCR_PDOWN);
- phy_write(phydev, AT803X_REG_CHIP_CONFIG, ccr | AT803X_BT_BX_REG_SEL);
-
-done:
mutex_unlock(&phydev->lock);
return 0;
@@ -243,7 +237,6 @@ done:
static int at803x_resume(struct phy_device *phydev)
{
int value;
- int ccr;
mutex_lock(&phydev->lock);
@@ -251,17 +244,6 @@ static int at803x_resume(struct phy_device *phydev)
value &= ~(BMCR_PDOWN | BMCR_ISOLATE);
phy_write(phydev, MII_BMCR, value);
- if (phydev->interface != PHY_INTERFACE_MODE_SGMII)
- goto done;
-
- /* also power-up SGMII interface */
- ccr = phy_read(phydev, AT803X_REG_CHIP_CONFIG);
- phy_write(phydev, AT803X_REG_CHIP_CONFIG, ccr & ~AT803X_BT_BX_REG_SEL);
- value = phy_read(phydev, MII_BMCR) & ~(BMCR_PDOWN | BMCR_ISOLATE);
- phy_write(phydev, MII_BMCR, value);
- phy_write(phydev, AT803X_REG_CHIP_CONFIG, ccr | AT803X_BT_BX_REG_SEL);
-
-done:
mutex_unlock(&phydev->lock);
return 0;
@@ -381,6 +363,36 @@ static void at803x_link_change_notify(struct phy_device *phydev)
}
}
+static int at803x_aneg_done(struct phy_device *phydev)
+{
+ int ccr;
+
+ int aneg_done = genphy_aneg_done(phydev);
+ if (aneg_done != BMSR_ANEGCOMPLETE)
+ return aneg_done;
+
+ /*
+ * in SGMII mode, if copper side autoneg is successful,
+ * also check SGMII side autoneg result
+ */
+ ccr = phy_read(phydev, AT803X_REG_CHIP_CONFIG);
+ if ((ccr & AT803X_MODE_CFG_MASK) != AT803X_MODE_CFG_SGMII)
+ return aneg_done;
+
+ /* switch to SGMII/fiber page */
+ phy_write(phydev, AT803X_REG_CHIP_CONFIG, ccr & ~AT803X_BT_BX_REG_SEL);
+
+ /* check if the SGMII link is OK. */
+ if (!(phy_read(phydev, AT803X_PSSR) & AT803X_PSSR_MR_AN_COMPLETE)) {
+ pr_warn("803x_aneg_done: SGMII link is not ok\n");
+ aneg_done = 0;
+ }
+ /* switch back to copper page */
+ phy_write(phydev, AT803X_REG_CHIP_CONFIG, ccr | AT803X_BT_BX_REG_SEL);
+
+ return aneg_done;
+}
+
static struct phy_driver at803x_driver[] = {
{
/* ATHEROS 8035 */
@@ -432,6 +444,7 @@ static struct phy_driver at803x_driver[] = {
.flags = PHY_HAS_INTERRUPT,
.config_aneg = genphy_config_aneg,
.read_status = genphy_read_status,
+ .aneg_done = at803x_aneg_done,
.ack_interrupt = &at803x_ack_interrupt,
.config_intr = &at803x_config_intr,
} };
diff --git a/drivers/net/phy/dp83848.c b/drivers/net/phy/dp83848.c
index 03d54c4adc88..800b39f06279 100644
--- a/drivers/net/phy/dp83848.c
+++ b/drivers/net/phy/dp83848.c
@@ -19,6 +19,7 @@
#define TI_DP83848C_PHY_ID 0x20005ca0
#define NS_DP83848C_PHY_ID 0x20005c90
#define TLK10X_PHY_ID 0x2000a210
+#define TI_DP83822_PHY_ID 0x2000a240
/* Registers */
#define DP83848_MICR 0x11 /* MII Interrupt Control Register */
@@ -77,6 +78,7 @@ static struct mdio_device_id __maybe_unused dp83848_tbl[] = {
{ TI_DP83848C_PHY_ID, 0xfffffff0 },
{ NS_DP83848C_PHY_ID, 0xfffffff0 },
{ TLK10X_PHY_ID, 0xfffffff0 },
+ { TI_DP83822_PHY_ID, 0xfffffff0 },
{ }
};
MODULE_DEVICE_TABLE(mdio, dp83848_tbl);
@@ -105,6 +107,7 @@ static struct phy_driver dp83848_driver[] = {
DP83848_PHY_DRIVER(TI_DP83848C_PHY_ID, "TI DP83848C 10/100 Mbps PHY"),
DP83848_PHY_DRIVER(NS_DP83848C_PHY_ID, "NS DP83848C 10/100 Mbps PHY"),
DP83848_PHY_DRIVER(TLK10X_PHY_ID, "TI TLK10X 10/100 Mbps PHY"),
+ DP83848_PHY_DRIVER(TI_DP83822_PHY_ID, "TI DP83822 10/100 Mbps PHY"),
};
module_phy_driver(dp83848_driver);
diff --git a/drivers/net/phy/fixed_phy.c b/drivers/net/phy/fixed_phy.c
index c649c101bbab..eb5167210681 100644
--- a/drivers/net/phy/fixed_phy.c
+++ b/drivers/net/phy/fixed_phy.c
@@ -279,7 +279,7 @@ EXPORT_SYMBOL_GPL(fixed_phy_register);
void fixed_phy_unregister(struct phy_device *phy)
{
phy_device_remove(phy);
-
+ of_node_put(phy->mdio.dev.of_node);
fixed_phy_del(phy->mdio.addr);
}
EXPORT_SYMBOL_GPL(fixed_phy_unregister);
diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c
index 081df68d2ce1..ea92d524d5a8 100644
--- a/drivers/net/phy/micrel.c
+++ b/drivers/net/phy/micrel.c
@@ -318,12 +318,12 @@ static int ksz8041_config_init(struct phy_device *phydev)
/* Limit supported and advertised modes in fiber mode */
if (of_property_read_bool(of_node, "micrel,fiber-mode")) {
phydev->dev_flags |= MICREL_PHY_FXEN;
- phydev->supported &= SUPPORTED_FIBRE |
- SUPPORTED_100baseT_Full |
+ phydev->supported &= SUPPORTED_100baseT_Full |
SUPPORTED_100baseT_Half;
- phydev->advertising &= ADVERTISED_FIBRE |
- ADVERTISED_100baseT_Full |
+ phydev->supported |= SUPPORTED_FIBRE;
+ phydev->advertising &= ADVERTISED_100baseT_Full |
ADVERTISED_100baseT_Half;
+ phydev->advertising |= ADVERTISED_FIBRE;
phydev->autoneg = AUTONEG_DISABLE;
}
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
index e977ba931878..1a4bf8acad78 100644
--- a/drivers/net/phy/phy_device.c
+++ b/drivers/net/phy/phy_device.c
@@ -723,6 +723,7 @@ struct phy_device *phy_connect(struct net_device *dev, const char *bus_id,
phydev = to_phy_device(d);
rc = phy_connect_direct(dev, phydev, handler, interface);
+ put_device(d);
if (rc)
return ERR_PTR(rc);
@@ -953,6 +954,7 @@ struct phy_device *phy_attach(struct net_device *dev, const char *bus_id,
phydev = to_phy_device(d);
rc = phy_attach_direct(dev, phydev, phydev->dev_flags, interface);
+ put_device(d);
if (rc)
return ERR_PTR(rc);
diff --git a/drivers/net/phy/realtek.c b/drivers/net/phy/realtek.c
index aadd6e9f54ad..9cbe645e3d89 100644
--- a/drivers/net/phy/realtek.c
+++ b/drivers/net/phy/realtek.c
@@ -102,15 +102,19 @@ static int rtl8211f_config_init(struct phy_device *phydev)
if (ret < 0)
return ret;
- if (phydev->interface == PHY_INTERFACE_MODE_RGMII) {
- /* enable TXDLY */
- phy_write(phydev, RTL8211F_PAGE_SELECT, 0xd08);
- reg = phy_read(phydev, 0x11);
+ phy_write(phydev, RTL8211F_PAGE_SELECT, 0xd08);
+ reg = phy_read(phydev, 0x11);
+
+ /* enable TX-delay for rgmii-id and rgmii-txid, otherwise disable it */
+ if (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID ||
+ phydev->interface == PHY_INTERFACE_MODE_RGMII_TXID)
reg |= RTL8211F_TX_DELAY;
- phy_write(phydev, 0x11, reg);
- /* restore to default page 0 */
- phy_write(phydev, RTL8211F_PAGE_SELECT, 0x0);
- }
+ else
+ reg &= ~RTL8211F_TX_DELAY;
+
+ phy_write(phydev, 0x11, reg);
+ /* restore to default page 0 */
+ phy_write(phydev, RTL8211F_PAGE_SELECT, 0x0);
return 0;
}
diff --git a/drivers/net/phy/vitesse.c b/drivers/net/phy/vitesse.c
index 2e37eb337d48..24b4a09468dd 100644
--- a/drivers/net/phy/vitesse.c
+++ b/drivers/net/phy/vitesse.c
@@ -62,6 +62,10 @@
/* Vitesse Extended Page Access Register */
#define MII_VSC82X4_EXT_PAGE_ACCESS 0x1f
+/* Vitesse VSC8601 Extended PHY Control Register 1 */
+#define MII_VSC8601_EPHY_CTL 0x17
+#define MII_VSC8601_EPHY_CTL_RGMII_SKEW (1 << 8)
+
#define PHY_ID_VSC8234 0x000fc620
#define PHY_ID_VSC8244 0x000fc6c0
#define PHY_ID_VSC8514 0x00070670
@@ -111,6 +115,34 @@ static int vsc824x_config_init(struct phy_device *phydev)
return err;
}
+/* This adds a skew for both TX and RX clocks, so the skew should only be
+ * applied to "rgmii-id" interfaces. It may not work as expected
+ * on "rgmii-txid", "rgmii-rxid" or "rgmii" interfaces. */
+static int vsc8601_add_skew(struct phy_device *phydev)
+{
+ int ret;
+
+ ret = phy_read(phydev, MII_VSC8601_EPHY_CTL);
+ if (ret < 0)
+ return ret;
+
+ ret |= MII_VSC8601_EPHY_CTL_RGMII_SKEW;
+ return phy_write(phydev, MII_VSC8601_EPHY_CTL, ret);
+}
+
+static int vsc8601_config_init(struct phy_device *phydev)
+{
+ int ret = 0;
+
+ if (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID)
+ ret = vsc8601_add_skew(phydev);
+
+ if (ret < 0)
+ return ret;
+
+ return genphy_config_init(phydev);
+}
+
static int vsc824x_ack_interrupt(struct phy_device *phydev)
{
int err = 0;
@@ -275,7 +307,7 @@ static struct phy_driver vsc82xx_driver[] = {
.phy_id_mask = 0x000ffff0,
.features = PHY_GBIT_FEATURES,
.flags = PHY_HAS_INTERRUPT,
- .config_init = &genphy_config_init,
+ .config_init = &vsc8601_config_init,
.config_aneg = &genphy_config_aneg,
.read_status = &genphy_read_status,
.ack_interrupt = &vsc824x_ack_interrupt,
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index 8093e39ae263..db6acecabeaa 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -1246,13 +1246,8 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
if (zerocopy)
err = zerocopy_sg_from_iter(skb, from);
- else {
+ else
err = skb_copy_datagram_from_iter(skb, 0, from, len);
- if (!err && msg_control) {
- struct ubuf_info *uarg = msg_control;
- uarg->callback(uarg, false);
- }
- }
if (err) {
this_cpu_inc(tun->pcpu_stats->rx_dropped);
@@ -1298,6 +1293,9 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
skb_shinfo(skb)->destructor_arg = msg_control;
skb_shinfo(skb)->tx_flags |= SKBTX_DEV_ZEROCOPY;
skb_shinfo(skb)->tx_flags |= SKBTX_SHARED_FRAG;
+ } else if (msg_control) {
+ struct ubuf_info *uarg = msg_control;
+ uarg->callback(uarg, false);
}
skb_reset_network_header(skb);
diff --git a/drivers/net/usb/asix_common.c b/drivers/net/usb/asix_common.c
index f79eb12c326a..125cff57c759 100644
--- a/drivers/net/usb/asix_common.c
+++ b/drivers/net/usb/asix_common.c
@@ -433,13 +433,13 @@ int asix_mdio_read(struct net_device *netdev, int phy_id, int loc)
mutex_lock(&dev->phy_mutex);
do {
ret = asix_set_sw_mii(dev, 0);
- if (ret == -ENODEV)
+ if (ret == -ENODEV || ret == -ETIMEDOUT)
break;
usleep_range(1000, 1100);
ret = asix_read_cmd(dev, AX_CMD_STATMNGSTS_REG,
0, 0, 1, &smsr, 0);
} while (!(smsr & AX_HOST_EN) && (i++ < 30) && (ret != -ENODEV));
- if (ret == -ENODEV) {
+ if (ret == -ENODEV || ret == -ETIMEDOUT) {
mutex_unlock(&dev->phy_mutex);
return ret;
}
@@ -497,13 +497,13 @@ int asix_mdio_read_nopm(struct net_device *netdev, int phy_id, int loc)
mutex_lock(&dev->phy_mutex);
do {
ret = asix_set_sw_mii(dev, 1);
- if (ret == -ENODEV)
+ if (ret == -ENODEV || ret == -ETIMEDOUT)
break;
usleep_range(1000, 1100);
ret = asix_read_cmd(dev, AX_CMD_STATMNGSTS_REG,
0, 0, 1, &smsr, 1);
} while (!(smsr & AX_HOST_EN) && (i++ < 30) && (ret != -ENODEV));
- if (ret == -ENODEV) {
+ if (ret == -ENODEV || ret == -ETIMEDOUT) {
mutex_unlock(&dev->phy_mutex);
return ret;
}
diff --git a/drivers/net/usb/asix_devices.c b/drivers/net/usb/asix_devices.c
index cce24950a0ab..dc7b6392e75a 100644
--- a/drivers/net/usb/asix_devices.c
+++ b/drivers/net/usb/asix_devices.c
@@ -603,12 +603,12 @@ static void ax88772_suspend(struct usbnet *dev)
u16 medium;
/* Stop MAC operation */
- medium = asix_read_medium_status(dev, 0);
+ medium = asix_read_medium_status(dev, 1);
medium &= ~AX_MEDIUM_RE;
- asix_write_medium_mode(dev, medium, 0);
+ asix_write_medium_mode(dev, medium, 1);
netdev_dbg(dev->net, "ax88772_suspend: medium=0x%04x\n",
- asix_read_medium_status(dev, 0));
+ asix_read_medium_status(dev, 1));
/* Preserve BMCR for restoring */
priv->presvd_phy_bmcr =
diff --git a/drivers/net/usb/ax88179_178a.c b/drivers/net/usb/ax88179_178a.c
index e6338c16081a..8a6675d92b98 100644
--- a/drivers/net/usb/ax88179_178a.c
+++ b/drivers/net/usb/ax88179_178a.c
@@ -1656,6 +1656,19 @@ static const struct driver_info ax88178a_info = {
.tx_fixup = ax88179_tx_fixup,
};
+static const struct driver_info cypress_GX3_info = {
+ .description = "Cypress GX3 SuperSpeed to Gigabit Ethernet Controller",
+ .bind = ax88179_bind,
+ .unbind = ax88179_unbind,
+ .status = ax88179_status,
+ .link_reset = ax88179_link_reset,
+ .reset = ax88179_reset,
+ .stop = ax88179_stop,
+ .flags = FLAG_ETHER | FLAG_FRAMING_AX,
+ .rx_fixup = ax88179_rx_fixup,
+ .tx_fixup = ax88179_tx_fixup,
+};
+
static const struct driver_info dlink_dub1312_info = {
.description = "D-Link DUB-1312 USB 3.0 to Gigabit Ethernet Adapter",
.bind = ax88179_bind,
@@ -1718,6 +1731,10 @@ static const struct usb_device_id products[] = {
USB_DEVICE(0x0b95, 0x178a),
.driver_info = (unsigned long)&ax88178a_info,
}, {
+ /* Cypress GX3 SuperSpeed to Gigabit Ethernet Bridge Controller */
+ USB_DEVICE(0x04b4, 0x3610),
+ .driver_info = (unsigned long)&cypress_GX3_info,
+}, {
/* D-Link DUB-1312 USB 3.0 to Gigabit Ethernet Adapter */
USB_DEVICE(0x2001, 0x4a00),
.driver_info = (unsigned long)&dlink_dub1312_info,
diff --git a/drivers/net/usb/cdc_ether.c b/drivers/net/usb/cdc_ether.c
index c47ec0a04c8e..dd623f674487 100644
--- a/drivers/net/usb/cdc_ether.c
+++ b/drivers/net/usb/cdc_ether.c
@@ -388,12 +388,6 @@ void usbnet_cdc_status(struct usbnet *dev, struct urb *urb)
case USB_CDC_NOTIFY_NETWORK_CONNECTION:
netif_dbg(dev, timer, dev->net, "CDC: carrier %s\n",
event->wValue ? "on" : "off");
-
- /* Work-around for devices with broken off-notifications */
- if (event->wValue &&
- !test_bit(__LINK_STATE_NOCARRIER, &dev->net->state))
- usbnet_link_change(dev, 0, 0);
-
usbnet_link_change(dev, !!event->wValue, 0);
break;
case USB_CDC_NOTIFY_SPEED_CHANGE: /* tx/rx rates */
@@ -466,6 +460,36 @@ static int usbnet_cdc_zte_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
return 1;
}
+/* Ensure correct link state
+ *
+ * Some devices (ZTE MF823/831/910) export two carrier on notifications when
+ * connected. This causes the link state to be incorrect. Work around this by
+ * always setting the state to off, then on.
+ */
+void usbnet_cdc_zte_status(struct usbnet *dev, struct urb *urb)
+{
+ struct usb_cdc_notification *event;
+
+ if (urb->actual_length < sizeof(*event))
+ return;
+
+ event = urb->transfer_buffer;
+
+ if (event->bNotificationType != USB_CDC_NOTIFY_NETWORK_CONNECTION) {
+ usbnet_cdc_status(dev, urb);
+ return;
+ }
+
+ netif_dbg(dev, timer, dev->net, "CDC: carrier %s\n",
+ event->wValue ? "on" : "off");
+
+ if (event->wValue &&
+ netif_carrier_ok(dev->net))
+ netif_carrier_off(dev->net);
+
+ usbnet_link_change(dev, !!event->wValue, 0);
+}
+
static const struct driver_info cdc_info = {
.description = "CDC Ethernet Device",
.flags = FLAG_ETHER | FLAG_POINTTOPOINT,
@@ -481,7 +505,7 @@ static const struct driver_info zte_cdc_info = {
.flags = FLAG_ETHER | FLAG_POINTTOPOINT,
.bind = usbnet_cdc_zte_bind,
.unbind = usbnet_cdc_unbind,
- .status = usbnet_cdc_status,
+ .status = usbnet_cdc_zte_status,
.set_rx_mode = usbnet_cdc_update_filter,
.manage_power = usbnet_manage_power,
.rx_fixup = usbnet_cdc_zte_rx_fixup,
diff --git a/drivers/net/usb/kalmia.c b/drivers/net/usb/kalmia.c
index 5662babf0583..3e37724d30ae 100644
--- a/drivers/net/usb/kalmia.c
+++ b/drivers/net/usb/kalmia.c
@@ -151,7 +151,7 @@ kalmia_bind(struct usbnet *dev, struct usb_interface *intf)
status = kalmia_init_and_get_ethernet_addr(dev, ethernet_addr);
- if (status < 0) {
+ if (status) {
usb_set_intfdata(intf, NULL);
usb_driver_release_interface(driver_of(intf), intf);
return status;
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
index 3ff76c6db4f6..6fe1cdb0174f 100644
--- a/drivers/net/usb/qmi_wwan.c
+++ b/drivers/net/usb/qmi_wwan.c
@@ -894,6 +894,7 @@ static const struct usb_device_id products[] = {
{QMI_FIXED_INTF(0x1bbb, 0x0203, 2)}, /* Alcatel L800MA */
{QMI_FIXED_INTF(0x2357, 0x0201, 4)}, /* TP-LINK HSUPA Modem MA180 */
{QMI_FIXED_INTF(0x2357, 0x9000, 4)}, /* TP-LINK MA260 */
+ {QMI_QUIRK_SET_DTR(0x1bc7, 0x1040, 2)}, /* Telit LE922A */
{QMI_FIXED_INTF(0x1bc7, 0x1200, 5)}, /* Telit LE920 */
{QMI_FIXED_INTF(0x1bc7, 0x1201, 2)}, /* Telit LE920 */
{QMI_FIXED_INTF(0x1c9e, 0x9b01, 3)}, /* XS Stick W100-2 from 4G Systems */
diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
index 44d439f50961..efb84f092492 100644
--- a/drivers/net/usb/r8152.c
+++ b/drivers/net/usb/r8152.c
@@ -1730,7 +1730,7 @@ static u8 r8152_rx_csum(struct r8152 *tp, struct rx_desc *rx_desc)
u8 checksum = CHECKSUM_NONE;
u32 opts2, opts3;
- if (tp->version == RTL_VER_01)
+ if (tp->version == RTL_VER_01 || tp->version == RTL_VER_02)
goto return_result;
opts2 = le32_to_cpu(rx_desc->opts2);
@@ -1745,7 +1745,7 @@ static u8 r8152_rx_csum(struct r8152 *tp, struct rx_desc *rx_desc)
checksum = CHECKSUM_NONE;
else
checksum = CHECKSUM_UNNECESSARY;
- } else if (RD_IPV6_CS) {
+ } else if (opts2 & RD_IPV6_CS) {
if ((opts2 & RD_UDP_CS) && !(opts3 & UDPF))
checksum = CHECKSUM_UNNECESSARY;
else if ((opts2 & RD_TCP_CS) && !(opts3 & TCPF))
@@ -3266,10 +3266,8 @@ static int rtl8152_open(struct net_device *netdev)
goto out;
res = usb_autopm_get_interface(tp->intf);
- if (res < 0) {
- free_all_mem(tp);
- goto out;
- }
+ if (res < 0)
+ goto out_free;
mutex_lock(&tp->control);
@@ -3285,10 +3283,9 @@ static int rtl8152_open(struct net_device *netdev)
netif_device_detach(tp->netdev);
netif_warn(tp, ifup, netdev, "intr_urb submit failed: %d\n",
res);
- free_all_mem(tp);
- } else {
- napi_enable(&tp->napi);
+ goto out_unlock;
}
+ napi_enable(&tp->napi);
mutex_unlock(&tp->control);
@@ -3297,7 +3294,13 @@ static int rtl8152_open(struct net_device *netdev)
tp->pm_notifier.notifier_call = rtl_notifier;
register_pm_notifier(&tp->pm_notifier);
#endif
+ return 0;
+out_unlock:
+ mutex_unlock(&tp->control);
+ usb_autopm_put_interface(tp->intf);
+out_free:
+ free_all_mem(tp);
out:
return res;
}
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index fad84f3f4109..7276d5a95bd0 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -1497,6 +1497,11 @@ static void virtnet_free_queues(struct virtnet_info *vi)
netif_napi_del(&vi->rq[i].napi);
}
+ /* We called napi_hash_del() before netif_napi_del(),
+ * we need to respect an RCU grace period before freeing vi->rq
+ */
+ synchronize_net();
+
kfree(vi->rq);
kfree(vi->sq);
}
@@ -2038,23 +2043,33 @@ static struct virtio_device_id id_table[] = {
{ 0 },
};
+#define VIRTNET_FEATURES \
+ VIRTIO_NET_F_CSUM, VIRTIO_NET_F_GUEST_CSUM, \
+ VIRTIO_NET_F_MAC, \
+ VIRTIO_NET_F_HOST_TSO4, VIRTIO_NET_F_HOST_UFO, VIRTIO_NET_F_HOST_TSO6, \
+ VIRTIO_NET_F_HOST_ECN, VIRTIO_NET_F_GUEST_TSO4, VIRTIO_NET_F_GUEST_TSO6, \
+ VIRTIO_NET_F_GUEST_ECN, VIRTIO_NET_F_GUEST_UFO, \
+ VIRTIO_NET_F_MRG_RXBUF, VIRTIO_NET_F_STATUS, VIRTIO_NET_F_CTRL_VQ, \
+ VIRTIO_NET_F_CTRL_RX, VIRTIO_NET_F_CTRL_VLAN, \
+ VIRTIO_NET_F_GUEST_ANNOUNCE, VIRTIO_NET_F_MQ, \
+ VIRTIO_NET_F_CTRL_MAC_ADDR, \
+ VIRTIO_NET_F_MTU
+
static unsigned int features[] = {
- VIRTIO_NET_F_CSUM, VIRTIO_NET_F_GUEST_CSUM,
- VIRTIO_NET_F_GSO, VIRTIO_NET_F_MAC,
- VIRTIO_NET_F_HOST_TSO4, VIRTIO_NET_F_HOST_UFO, VIRTIO_NET_F_HOST_TSO6,
- VIRTIO_NET_F_HOST_ECN, VIRTIO_NET_F_GUEST_TSO4, VIRTIO_NET_F_GUEST_TSO6,
- VIRTIO_NET_F_GUEST_ECN, VIRTIO_NET_F_GUEST_UFO,
- VIRTIO_NET_F_MRG_RXBUF, VIRTIO_NET_F_STATUS, VIRTIO_NET_F_CTRL_VQ,
- VIRTIO_NET_F_CTRL_RX, VIRTIO_NET_F_CTRL_VLAN,
- VIRTIO_NET_F_GUEST_ANNOUNCE, VIRTIO_NET_F_MQ,
- VIRTIO_NET_F_CTRL_MAC_ADDR,
+ VIRTNET_FEATURES,
+};
+
+static unsigned int features_legacy[] = {
+ VIRTNET_FEATURES,
+ VIRTIO_NET_F_GSO,
VIRTIO_F_ANY_LAYOUT,
- VIRTIO_NET_F_MTU,
};
static struct virtio_driver virtio_net_driver = {
.feature_table = features,
.feature_table_size = ARRAY_SIZE(features),
+ .feature_table_legacy = features_legacy,
+ .feature_table_size_legacy = ARRAY_SIZE(features_legacy),
.driver.name = KBUILD_MODNAME,
.driver.owner = THIS_MODULE,
.id_table = id_table,
diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c
index b5554f2ebee4..ef83ae3b0a44 100644
--- a/drivers/net/vmxnet3/vmxnet3_drv.c
+++ b/drivers/net/vmxnet3/vmxnet3_drv.c
@@ -2279,6 +2279,7 @@ vmxnet3_set_mc(struct net_device *netdev)
&adapter->shared->devRead.rxFilterConf;
u8 *new_table = NULL;
dma_addr_t new_table_pa = 0;
+ bool new_table_pa_valid = false;
u32 new_mode = VMXNET3_RXM_UCAST;
if (netdev->flags & IFF_PROMISC) {
@@ -2307,13 +2308,15 @@ vmxnet3_set_mc(struct net_device *netdev)
new_table,
sz,
PCI_DMA_TODEVICE);
+ if (!dma_mapping_error(&adapter->pdev->dev,
+ new_table_pa)) {
+ new_mode |= VMXNET3_RXM_MCAST;
+ new_table_pa_valid = true;
+ rxConf->mfTablePA = cpu_to_le64(
+ new_table_pa);
+ }
}
-
- if (!dma_mapping_error(&adapter->pdev->dev,
- new_table_pa)) {
- new_mode |= VMXNET3_RXM_MCAST;
- rxConf->mfTablePA = cpu_to_le64(new_table_pa);
- } else {
+ if (!new_table_pa_valid) {
netdev_info(netdev,
"failed to copy mcast list, setting ALL_MULTI\n");
new_mode |= VMXNET3_RXM_ALL_MULTI;
@@ -2338,7 +2341,7 @@ vmxnet3_set_mc(struct net_device *netdev)
VMXNET3_CMD_UPDATE_MAC_FILTERS);
spin_unlock_irqrestore(&adapter->cmd_lock, flags);
- if (new_table_pa)
+ if (new_table_pa_valid)
dma_unmap_single(&adapter->pdev->dev, new_table_pa,
rxConf->mfTableLen, PCI_DMA_TODEVICE);
kfree(new_table);
diff --git a/drivers/net/vrf.c b/drivers/net/vrf.c
index 85c271c70d42..820de6a9ddde 100644
--- a/drivers/net/vrf.c
+++ b/drivers/net/vrf.c
@@ -956,6 +956,7 @@ static struct sk_buff *vrf_ip6_rcv(struct net_device *vrf_dev,
if (skb->pkt_type == PACKET_LOOPBACK) {
skb->dev = vrf_dev;
skb->skb_iif = vrf_dev->ifindex;
+ IP6CB(skb)->flags |= IP6SKB_L3SLAVE;
skb->pkt_type = PACKET_HOST;
goto out;
}
@@ -996,6 +997,7 @@ static struct sk_buff *vrf_ip_rcv(struct net_device *vrf_dev,
{
skb->dev = vrf_dev;
skb->skb_iif = vrf_dev->ifindex;
+ IPCB(skb)->flags |= IPSKB_L3SLAVE;
/* loopback traffic; do not push through packet taps again.
* Reset pkt_type for upper layers to process skb
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
index e7d16687538b..2ba01ca02c9c 100644
--- a/drivers/net/vxlan.c
+++ b/drivers/net/vxlan.c
@@ -583,7 +583,7 @@ static struct sk_buff **vxlan_gro_receive(struct sock *sk,
}
}
- pp = eth_gro_receive(head, skb);
+ pp = call_gro_receive(eth_gro_receive, head, skb);
flush = 0;
out:
@@ -611,6 +611,7 @@ static int vxlan_fdb_create(struct vxlan_dev *vxlan,
struct vxlan_rdst *rd = NULL;
struct vxlan_fdb *f;
int notify = 0;
+ int rc;
f = __vxlan_find_mac(vxlan, mac);
if (f) {
@@ -641,8 +642,7 @@ static int vxlan_fdb_create(struct vxlan_dev *vxlan,
if ((flags & NLM_F_APPEND) &&
(is_multicast_ether_addr(f->eth_addr) ||
is_zero_ether_addr(f->eth_addr))) {
- int rc = vxlan_fdb_append(f, ip, port, vni, ifindex,
- &rd);
+ rc = vxlan_fdb_append(f, ip, port, vni, ifindex, &rd);
if (rc < 0)
return rc;
@@ -673,7 +673,11 @@ static int vxlan_fdb_create(struct vxlan_dev *vxlan,
INIT_LIST_HEAD(&f->remotes);
memcpy(f->eth_addr, mac, ETH_ALEN);
- vxlan_fdb_append(f, ip, port, vni, ifindex, &rd);
+ rc = vxlan_fdb_append(f, ip, port, vni, ifindex, &rd);
+ if (rc < 0) {
+ kfree(f);
+ return rc;
+ }
++vxlan->addrcnt;
hlist_add_head_rcu(&f->hlist,
@@ -943,17 +947,22 @@ static bool vxlan_snoop(struct net_device *dev,
static bool vxlan_group_used(struct vxlan_net *vn, struct vxlan_dev *dev)
{
struct vxlan_dev *vxlan;
+ struct vxlan_sock *sock4;
+#if IS_ENABLED(CONFIG_IPV6)
+ struct vxlan_sock *sock6;
+#endif
unsigned short family = dev->default_dst.remote_ip.sa.sa_family;
+ sock4 = rtnl_dereference(dev->vn4_sock);
+
/* The vxlan_sock is only used by dev, leaving group has
* no effect on other vxlan devices.
*/
- if (family == AF_INET && dev->vn4_sock &&
- atomic_read(&dev->vn4_sock->refcnt) == 1)
+ if (family == AF_INET && sock4 && atomic_read(&sock4->refcnt) == 1)
return false;
#if IS_ENABLED(CONFIG_IPV6)
- if (family == AF_INET6 && dev->vn6_sock &&
- atomic_read(&dev->vn6_sock->refcnt) == 1)
+ sock6 = rtnl_dereference(dev->vn6_sock);
+ if (family == AF_INET6 && sock6 && atomic_read(&sock6->refcnt) == 1)
return false;
#endif
@@ -961,10 +970,12 @@ static bool vxlan_group_used(struct vxlan_net *vn, struct vxlan_dev *dev)
if (!netif_running(vxlan->dev) || vxlan == dev)
continue;
- if (family == AF_INET && vxlan->vn4_sock != dev->vn4_sock)
+ if (family == AF_INET &&
+ rtnl_dereference(vxlan->vn4_sock) != sock4)
continue;
#if IS_ENABLED(CONFIG_IPV6)
- if (family == AF_INET6 && vxlan->vn6_sock != dev->vn6_sock)
+ if (family == AF_INET6 &&
+ rtnl_dereference(vxlan->vn6_sock) != sock6)
continue;
#endif
@@ -1005,22 +1016,25 @@ static bool __vxlan_sock_release_prep(struct vxlan_sock *vs)
static void vxlan_sock_release(struct vxlan_dev *vxlan)
{
- bool ipv4 = __vxlan_sock_release_prep(vxlan->vn4_sock);
+ struct vxlan_sock *sock4 = rtnl_dereference(vxlan->vn4_sock);
#if IS_ENABLED(CONFIG_IPV6)
- bool ipv6 = __vxlan_sock_release_prep(vxlan->vn6_sock);
+ struct vxlan_sock *sock6 = rtnl_dereference(vxlan->vn6_sock);
+
+ rcu_assign_pointer(vxlan->vn6_sock, NULL);
#endif
+ rcu_assign_pointer(vxlan->vn4_sock, NULL);
synchronize_net();
- if (ipv4) {
- udp_tunnel_sock_release(vxlan->vn4_sock->sock);
- kfree(vxlan->vn4_sock);
+ if (__vxlan_sock_release_prep(sock4)) {
+ udp_tunnel_sock_release(sock4->sock);
+ kfree(sock4);
}
#if IS_ENABLED(CONFIG_IPV6)
- if (ipv6) {
- udp_tunnel_sock_release(vxlan->vn6_sock->sock);
- kfree(vxlan->vn6_sock);
+ if (__vxlan_sock_release_prep(sock6)) {
+ udp_tunnel_sock_release(sock6->sock);
+ kfree(sock6);
}
#endif
}
@@ -1036,18 +1050,21 @@ static int vxlan_igmp_join(struct vxlan_dev *vxlan)
int ret = -EINVAL;
if (ip->sa.sa_family == AF_INET) {
+ struct vxlan_sock *sock4 = rtnl_dereference(vxlan->vn4_sock);
struct ip_mreqn mreq = {
.imr_multiaddr.s_addr = ip->sin.sin_addr.s_addr,
.imr_ifindex = ifindex,
};
- sk = vxlan->vn4_sock->sock->sk;
+ sk = sock4->sock->sk;
lock_sock(sk);
ret = ip_mc_join_group(sk, &mreq);
release_sock(sk);
#if IS_ENABLED(CONFIG_IPV6)
} else {
- sk = vxlan->vn6_sock->sock->sk;
+ struct vxlan_sock *sock6 = rtnl_dereference(vxlan->vn6_sock);
+
+ sk = sock6->sock->sk;
lock_sock(sk);
ret = ipv6_stub->ipv6_sock_mc_join(sk, ifindex,
&ip->sin6.sin6_addr);
@@ -1067,18 +1084,21 @@ static int vxlan_igmp_leave(struct vxlan_dev *vxlan)
int ret = -EINVAL;
if (ip->sa.sa_family == AF_INET) {
+ struct vxlan_sock *sock4 = rtnl_dereference(vxlan->vn4_sock);
struct ip_mreqn mreq = {
.imr_multiaddr.s_addr = ip->sin.sin_addr.s_addr,
.imr_ifindex = ifindex,
};
- sk = vxlan->vn4_sock->sock->sk;
+ sk = sock4->sock->sk;
lock_sock(sk);
ret = ip_mc_leave_group(sk, &mreq);
release_sock(sk);
#if IS_ENABLED(CONFIG_IPV6)
} else {
- sk = vxlan->vn6_sock->sock->sk;
+ struct vxlan_sock *sock6 = rtnl_dereference(vxlan->vn6_sock);
+
+ sk = sock6->sock->sk;
lock_sock(sk);
ret = ipv6_stub->ipv6_sock_mc_drop(sk, ifindex,
&ip->sin6.sin6_addr);
@@ -1828,11 +1848,15 @@ static struct dst_entry *vxlan6_get_route(struct vxlan_dev *vxlan,
struct dst_cache *dst_cache,
const struct ip_tunnel_info *info)
{
+ struct vxlan_sock *sock6 = rcu_dereference(vxlan->vn6_sock);
bool use_cache = ip_tunnel_dst_cache_usable(skb, info);
struct dst_entry *ndst;
struct flowi6 fl6;
int err;
+ if (!sock6)
+ return ERR_PTR(-EIO);
+
if (tos && !info)
use_cache = false;
if (use_cache) {
@@ -1850,7 +1874,7 @@ static struct dst_entry *vxlan6_get_route(struct vxlan_dev *vxlan,
fl6.flowi6_proto = IPPROTO_UDP;
err = ipv6_stub->ipv6_dst_lookup(vxlan->net,
- vxlan->vn6_sock->sock->sk,
+ sock6->sock->sk,
&ndst, &fl6);
if (err < 0)
return ERR_PTR(err);
@@ -1995,9 +2019,11 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
}
if (dst->sa.sa_family == AF_INET) {
- if (!vxlan->vn4_sock)
+ struct vxlan_sock *sock4 = rcu_dereference(vxlan->vn4_sock);
+
+ if (!sock4)
goto drop;
- sk = vxlan->vn4_sock->sock->sk;
+ sk = sock4->sock->sk;
rt = vxlan_get_route(vxlan, skb,
rdst ? rdst->remote_ifindex : 0, tos,
@@ -2050,12 +2076,13 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
src_port, dst_port, xnet, !udp_sum);
#if IS_ENABLED(CONFIG_IPV6)
} else {
+ struct vxlan_sock *sock6 = rcu_dereference(vxlan->vn6_sock);
struct dst_entry *ndst;
u32 rt6i_flags;
- if (!vxlan->vn6_sock)
+ if (!sock6)
goto drop;
- sk = vxlan->vn6_sock->sock->sk;
+ sk = sock6->sock->sk;
ndst = vxlan6_get_route(vxlan, skb,
rdst ? rdst->remote_ifindex : 0, tos,
@@ -2415,9 +2442,10 @@ static int vxlan_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb)
dport = info->key.tp_dst ? : vxlan->cfg.dst_port;
if (ip_tunnel_info_af(info) == AF_INET) {
+ struct vxlan_sock *sock4 = rcu_dereference(vxlan->vn4_sock);
struct rtable *rt;
- if (!vxlan->vn4_sock)
+ if (!sock4)
return -EINVAL;
rt = vxlan_get_route(vxlan, skb, 0, info->key.tos,
info->key.u.ipv4.dst,
@@ -2429,8 +2457,6 @@ static int vxlan_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb)
#if IS_ENABLED(CONFIG_IPV6)
struct dst_entry *ndst;
- if (!vxlan->vn6_sock)
- return -EINVAL;
ndst = vxlan6_get_route(vxlan, skb, 0, info->key.tos,
info->key.label, &info->key.u.ipv6.dst,
&info->key.u.ipv6.src, NULL, info);
@@ -2740,10 +2766,10 @@ static int __vxlan_sock_add(struct vxlan_dev *vxlan, bool ipv6)
return PTR_ERR(vs);
#if IS_ENABLED(CONFIG_IPV6)
if (ipv6)
- vxlan->vn6_sock = vs;
+ rcu_assign_pointer(vxlan->vn6_sock, vs);
else
#endif
- vxlan->vn4_sock = vs;
+ rcu_assign_pointer(vxlan->vn4_sock, vs);
vxlan_vs_add_dev(vs, vxlan);
return 0;
}
@@ -2754,9 +2780,9 @@ static int vxlan_sock_add(struct vxlan_dev *vxlan)
bool metadata = vxlan->flags & VXLAN_F_COLLECT_METADATA;
int ret = 0;
- vxlan->vn4_sock = NULL;
+ RCU_INIT_POINTER(vxlan->vn4_sock, NULL);
#if IS_ENABLED(CONFIG_IPV6)
- vxlan->vn6_sock = NULL;
+ RCU_INIT_POINTER(vxlan->vn6_sock, NULL);
if (ipv6 || metadata)
ret = __vxlan_sock_add(vxlan, true);
#endif
diff --git a/drivers/net/wan/Kconfig b/drivers/net/wan/Kconfig
index 33ab3345d333..4e9fe75d7067 100644
--- a/drivers/net/wan/Kconfig
+++ b/drivers/net/wan/Kconfig
@@ -294,7 +294,7 @@ config FSL_UCC_HDLC
config SLIC_DS26522
tristate "Slic Maxim ds26522 card support"
depends on SPI
- depends on FSL_SOC || ARCH_MXC || ARCH_LAYERSCAPE
+ depends on FSL_SOC || ARCH_MXC || ARCH_LAYERSCAPE || COMPILE_TEST
help
This module initializes and configures the slic maxim card
in T1 or E1 mode.
diff --git a/drivers/net/wan/slic_ds26522.c b/drivers/net/wan/slic_ds26522.c
index d06a887a2352..b776a0ab106c 100644
--- a/drivers/net/wan/slic_ds26522.c
+++ b/drivers/net/wan/slic_ds26522.c
@@ -223,12 +223,19 @@ static int slic_ds26522_probe(struct spi_device *spi)
return ret;
}
+static const struct spi_device_id slic_ds26522_id[] = {
+ { .name = "ds26522" },
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(spi, slic_ds26522_id);
+
static const struct of_device_id slic_ds26522_match[] = {
{
.compatible = "maxim,ds26522",
},
{},
};
+MODULE_DEVICE_TABLE(of, slic_ds26522_match);
static struct spi_driver slic_ds26522_driver = {
.driver = {
@@ -239,6 +246,7 @@ static struct spi_driver slic_ds26522_driver = {
},
.probe = slic_ds26522_probe,
.remove = slic_ds26522_remove,
+ .id_table = slic_ds26522_id,
};
static int __init slic_ds26522_init(void)
diff --git a/drivers/net/wireless/ath/ath10k/core.h b/drivers/net/wireless/ath/ath10k/core.h
index dda49af1eb74..521f1c55c19e 100644
--- a/drivers/net/wireless/ath/ath10k/core.h
+++ b/drivers/net/wireless/ath/ath10k/core.h
@@ -450,6 +450,7 @@ struct ath10k_debug {
u32 pktlog_filter;
u32 reg_addr;
u32 nf_cal_period;
+ void *cal_data;
struct ath10k_fw_crash_data *fw_crash_data;
};
diff --git a/drivers/net/wireless/ath/ath10k/debug.c b/drivers/net/wireless/ath/ath10k/debug.c
index 832da6ed9f13..82a4c67f3672 100644
--- a/drivers/net/wireless/ath/ath10k/debug.c
+++ b/drivers/net/wireless/ath/ath10k/debug.c
@@ -30,6 +30,8 @@
/* ms */
#define ATH10K_DEBUG_HTT_STATS_INTERVAL 1000
+#define ATH10K_DEBUG_CAL_DATA_LEN 12064
+
#define ATH10K_FW_CRASH_DUMP_VERSION 1
/**
@@ -1451,56 +1453,51 @@ static const struct file_operations fops_fw_dbglog = {
.llseek = default_llseek,
};
-static int ath10k_debug_cal_data_open(struct inode *inode, struct file *file)
+static int ath10k_debug_cal_data_fetch(struct ath10k *ar)
{
- struct ath10k *ar = inode->i_private;
- void *buf;
u32 hi_addr;
__le32 addr;
int ret;
- mutex_lock(&ar->conf_mutex);
-
- if (ar->state != ATH10K_STATE_ON &&
- ar->state != ATH10K_STATE_UTF) {
- ret = -ENETDOWN;
- goto err;
- }
+ lockdep_assert_held(&ar->conf_mutex);
- buf = vmalloc(ar->hw_params.cal_data_len);
- if (!buf) {
- ret = -ENOMEM;
- goto err;
- }
+ if (WARN_ON(ar->hw_params.cal_data_len > ATH10K_DEBUG_CAL_DATA_LEN))
+ return -EINVAL;
hi_addr = host_interest_item_address(HI_ITEM(hi_board_data));
ret = ath10k_hif_diag_read(ar, hi_addr, &addr, sizeof(addr));
if (ret) {
- ath10k_warn(ar, "failed to read hi_board_data address: %d\n", ret);
- goto err_vfree;
+ ath10k_warn(ar, "failed to read hi_board_data address: %d\n",
+ ret);
+ return ret;
}
- ret = ath10k_hif_diag_read(ar, le32_to_cpu(addr), buf,
+ ret = ath10k_hif_diag_read(ar, le32_to_cpu(addr), ar->debug.cal_data,
ar->hw_params.cal_data_len);
if (ret) {
ath10k_warn(ar, "failed to read calibration data: %d\n", ret);
- goto err_vfree;
+ return ret;
}
- file->private_data = buf;
+ return 0;
+}
- mutex_unlock(&ar->conf_mutex);
+static int ath10k_debug_cal_data_open(struct inode *inode, struct file *file)
+{
+ struct ath10k *ar = inode->i_private;
- return 0;
+ mutex_lock(&ar->conf_mutex);
-err_vfree:
- vfree(buf);
+ if (ar->state == ATH10K_STATE_ON ||
+ ar->state == ATH10K_STATE_UTF) {
+ ath10k_debug_cal_data_fetch(ar);
+ }
-err:
+ file->private_data = ar;
mutex_unlock(&ar->conf_mutex);
- return ret;
+ return 0;
}
static ssize_t ath10k_debug_cal_data_read(struct file *file,
@@ -1508,18 +1505,16 @@ static ssize_t ath10k_debug_cal_data_read(struct file *file,
size_t count, loff_t *ppos)
{
struct ath10k *ar = file->private_data;
- void *buf = file->private_data;
- return simple_read_from_buffer(user_buf, count, ppos,
- buf, ar->hw_params.cal_data_len);
-}
+ mutex_lock(&ar->conf_mutex);
-static int ath10k_debug_cal_data_release(struct inode *inode,
- struct file *file)
-{
- vfree(file->private_data);
+ count = simple_read_from_buffer(user_buf, count, ppos,
+ ar->debug.cal_data,
+ ar->hw_params.cal_data_len);
- return 0;
+ mutex_unlock(&ar->conf_mutex);
+
+ return count;
}
static ssize_t ath10k_write_ani_enable(struct file *file,
@@ -1580,7 +1575,6 @@ static const struct file_operations fops_ani_enable = {
static const struct file_operations fops_cal_data = {
.open = ath10k_debug_cal_data_open,
.read = ath10k_debug_cal_data_read,
- .release = ath10k_debug_cal_data_release,
.owner = THIS_MODULE,
.llseek = default_llseek,
};
@@ -1932,6 +1926,8 @@ void ath10k_debug_stop(struct ath10k *ar)
{
lockdep_assert_held(&ar->conf_mutex);
+ ath10k_debug_cal_data_fetch(ar);
+
/* Must not use _sync to avoid deadlock, we do that in
* ath10k_debug_destroy(). The check for htt_stats_mask is to avoid
* warning from del_timer(). */
@@ -2344,6 +2340,10 @@ int ath10k_debug_create(struct ath10k *ar)
if (!ar->debug.fw_crash_data)
return -ENOMEM;
+ ar->debug.cal_data = vzalloc(ATH10K_DEBUG_CAL_DATA_LEN);
+ if (!ar->debug.cal_data)
+ return -ENOMEM;
+
INIT_LIST_HEAD(&ar->debug.fw_stats.pdevs);
INIT_LIST_HEAD(&ar->debug.fw_stats.vdevs);
INIT_LIST_HEAD(&ar->debug.fw_stats.peers);
@@ -2357,6 +2357,9 @@ void ath10k_debug_destroy(struct ath10k *ar)
vfree(ar->debug.fw_crash_data);
ar->debug.fw_crash_data = NULL;
+ vfree(ar->debug.cal_data);
+ ar->debug.cal_data = NULL;
+
ath10k_debug_fw_stats_reset(ar);
kfree(ar->debug.tpc_stats);
diff --git a/drivers/net/wireless/ath/ath6kl/sdio.c b/drivers/net/wireless/ath/ath6kl/sdio.c
index eab0ab976af2..76eb33679d4b 100644
--- a/drivers/net/wireless/ath/ath6kl/sdio.c
+++ b/drivers/net/wireless/ath/ath6kl/sdio.c
@@ -1401,6 +1401,7 @@ static const struct sdio_device_id ath6kl_sdio_devices[] = {
{SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_AR6004_BASE | 0x0))},
{SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_AR6004_BASE | 0x1))},
{SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_AR6004_BASE | 0x2))},
+ {SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_AR6004_BASE | 0x18))},
{},
};
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_calib.c b/drivers/net/wireless/ath/ath9k/ar9003_calib.c
index b6f064a8d264..7e27a06e5df1 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_calib.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_calib.c
@@ -33,7 +33,6 @@ struct coeff {
enum ar9003_cal_types {
IQ_MISMATCH_CAL = BIT(0),
- TEMP_COMP_CAL = BIT(1),
};
static void ar9003_hw_setup_calibration(struct ath_hw *ah,
@@ -59,12 +58,6 @@ static void ar9003_hw_setup_calibration(struct ath_hw *ah,
/* Kick-off cal */
REG_SET_BIT(ah, AR_PHY_TIMING4, AR_PHY_TIMING4_DO_CAL);
break;
- case TEMP_COMP_CAL:
- ath_dbg(common, CALIBRATE,
- "starting Temperature Compensation Calibration\n");
- REG_SET_BIT(ah, AR_CH0_THERM, AR_CH0_THERM_LOCAL);
- REG_SET_BIT(ah, AR_CH0_THERM, AR_CH0_THERM_START);
- break;
default:
ath_err(common, "Invalid calibration type\n");
break;
@@ -93,8 +86,7 @@ static bool ar9003_hw_per_calibration(struct ath_hw *ah,
/*
* Accumulate cal measures for active chains
*/
- if (cur_caldata->calCollect)
- cur_caldata->calCollect(ah);
+ cur_caldata->calCollect(ah);
ah->cal_samples++;
if (ah->cal_samples >= cur_caldata->calNumSamples) {
@@ -107,8 +99,7 @@ static bool ar9003_hw_per_calibration(struct ath_hw *ah,
/*
* Process accumulated data
*/
- if (cur_caldata->calPostProc)
- cur_caldata->calPostProc(ah, numChains);
+ cur_caldata->calPostProc(ah, numChains);
/* Calibration has finished. */
caldata->CalValid |= cur_caldata->calType;
@@ -323,16 +314,9 @@ static const struct ath9k_percal_data iq_cal_single_sample = {
ar9003_hw_iqcalibrate
};
-static const struct ath9k_percal_data temp_cal_single_sample = {
- TEMP_COMP_CAL,
- MIN_CAL_SAMPLES,
- PER_MAX_LOG_COUNT,
-};
-
static void ar9003_hw_init_cal_settings(struct ath_hw *ah)
{
ah->iq_caldata.calData = &iq_cal_single_sample;
- ah->temp_caldata.calData = &temp_cal_single_sample;
if (AR_SREV_9300_20_OR_LATER(ah)) {
ah->enabled_cals |= TX_IQ_CAL;
@@ -340,7 +324,7 @@ static void ar9003_hw_init_cal_settings(struct ath_hw *ah)
ah->enabled_cals |= TX_IQ_ON_AGC_CAL;
}
- ah->supp_cals = IQ_MISMATCH_CAL | TEMP_COMP_CAL;
+ ah->supp_cals = IQ_MISMATCH_CAL;
}
#define OFF_UPPER_LT 24
@@ -1399,9 +1383,6 @@ static void ar9003_hw_init_cal_common(struct ath_hw *ah)
INIT_CAL(&ah->iq_caldata);
INSERT_CAL(ah, &ah->iq_caldata);
- INIT_CAL(&ah->temp_caldata);
- INSERT_CAL(ah, &ah->temp_caldata);
-
/* Initialize current pointer to first element in list */
ah->cal_list_curr = ah->cal_list;
diff --git a/drivers/net/wireless/ath/ath9k/hw.h b/drivers/net/wireless/ath/ath9k/hw.h
index 2a5d3ad1169c..9cbca1229bac 100644
--- a/drivers/net/wireless/ath/ath9k/hw.h
+++ b/drivers/net/wireless/ath/ath9k/hw.h
@@ -830,7 +830,6 @@ struct ath_hw {
/* Calibration */
u32 supp_cals;
struct ath9k_cal_list iq_caldata;
- struct ath9k_cal_list temp_caldata;
struct ath9k_cal_list adcgain_caldata;
struct ath9k_cal_list adcdc_caldata;
struct ath9k_cal_list *cal_list;
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
index b777e1b2f87a..78d9966a3957 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
@@ -4516,7 +4516,7 @@ brcmf_cfg80211_start_ap(struct wiphy *wiphy, struct net_device *ndev,
/* store current 11d setting */
if (brcmf_fil_cmd_int_get(ifp, BRCMF_C_GET_REGULATORY,
&ifp->vif->is_11d)) {
- supports_11d = false;
+ is_11d = supports_11d = false;
} else {
country_ie = brcmf_parse_tlvs((u8 *)settings->beacon.tail,
settings->beacon.tail_len,
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
index 4fdc3dad3e85..b88e2048ae0b 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
@@ -1087,6 +1087,15 @@ iwl_mvm_netdetect_config(struct iwl_mvm *mvm,
ret = iwl_mvm_switch_to_d3(mvm);
if (ret)
return ret;
+ } else {
+ /* In theory, we wouldn't have to stop a running sched
+ * scan in order to start another one (for
+ * net-detect). But in practice this doesn't seem to
+ * work properly, so stop any running sched_scan now.
+ */
+ ret = iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_SCHED, true);
+ if (ret)
+ return ret;
}
/* rfkill release can be either for wowlan or netdetect */
@@ -1254,7 +1263,10 @@ static int __iwl_mvm_suspend(struct ieee80211_hw *hw,
out:
if (ret < 0) {
iwl_mvm_ref(mvm, IWL_MVM_REF_UCODE_DOWN);
- ieee80211_restart_hw(mvm->hw);
+ if (mvm->restart_fw > 0) {
+ mvm->restart_fw--;
+ ieee80211_restart_hw(mvm->hw);
+ }
iwl_mvm_free_nd(mvm);
}
out_noreset:
@@ -2088,6 +2100,16 @@ static int __iwl_mvm_resume(struct iwl_mvm *mvm, bool test)
iwl_mvm_update_changed_regdom(mvm);
if (mvm->net_detect) {
+ /* If this is a non-unified image, we restart the FW,
+ * so no need to stop the netdetect scan. If that
+ * fails, continue and try to get the wake-up reasons,
+ * but trigger a HW restart by keeping a failure code
+ * in ret.
+ */
+ if (unified_image)
+ ret = iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_NETDETECT,
+ false);
+
iwl_mvm_query_netdetect_reasons(mvm, vif);
/* has unlocked the mutex, so skip that */
goto out;
@@ -2271,7 +2293,8 @@ static void iwl_mvm_d3_test_disconn_work_iter(void *_data, u8 *mac,
static int iwl_mvm_d3_test_release(struct inode *inode, struct file *file)
{
struct iwl_mvm *mvm = inode->i_private;
- int remaining_time = 10;
+ bool unified_image = fw_has_capa(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_CNSLDTD_D3_D0_IMG);
mvm->d3_test_active = false;
@@ -2282,17 +2305,21 @@ static int iwl_mvm_d3_test_release(struct inode *inode, struct file *file)
mvm->trans->system_pm_mode = IWL_PLAT_PM_MODE_DISABLED;
iwl_abort_notification_waits(&mvm->notif_wait);
- ieee80211_restart_hw(mvm->hw);
+ if (!unified_image) {
+ int remaining_time = 10;
- /* wait for restart and disconnect all interfaces */
- while (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status) &&
- remaining_time > 0) {
- remaining_time--;
- msleep(1000);
- }
+ ieee80211_restart_hw(mvm->hw);
+
+ /* wait for restart and disconnect all interfaces */
+ while (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status) &&
+ remaining_time > 0) {
+ remaining_time--;
+ msleep(1000);
+ }
- if (remaining_time == 0)
- IWL_ERR(mvm, "Timed out waiting for HW restart to finish!\n");
+ if (remaining_time == 0)
+ IWL_ERR(mvm, "Timed out waiting for HW restart!\n");
+ }
ieee80211_iterate_active_interfaces_atomic(
mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c
index 07da4efe8458..7b7d2a146e30 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c
@@ -1529,8 +1529,8 @@ static ssize_t iwl_dbgfs_mem_read(struct file *file, char __user *user_buf,
.data = { &cmd, },
.len = { sizeof(cmd) },
};
- size_t delta, len;
- ssize_t ret;
+ size_t delta;
+ ssize_t ret, len;
hcmd.id = iwl_cmd_id(*ppos >> 24 ? UMAC_RD_WR : LMAC_RD_WR,
DEBUG_GROUP, 0);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
index 318efd814037..1db1dc13e988 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
@@ -4121,7 +4121,6 @@ void iwl_mvm_sync_rx_queues_internal(struct iwl_mvm *mvm,
struct iwl_mvm_internal_rxq_notif *notif,
u32 size)
{
- DECLARE_WAIT_QUEUE_HEAD_ONSTACK(notif_waitq);
u32 qmask = BIT(mvm->trans->num_rx_queues) - 1;
int ret;
@@ -4143,7 +4142,7 @@ void iwl_mvm_sync_rx_queues_internal(struct iwl_mvm *mvm,
}
if (notif->sync)
- ret = wait_event_timeout(notif_waitq,
+ ret = wait_event_timeout(mvm->rx_sync_waitq,
atomic_read(&mvm->queue_sync_counter) == 0,
HZ);
WARN_ON_ONCE(!ret);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
index d17cbf603f7c..c60703e0c246 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
@@ -937,6 +937,7 @@ struct iwl_mvm {
/* sync d0i3_tx queue and IWL_MVM_STATUS_IN_D0I3 status flag */
spinlock_t d0i3_tx_lock;
wait_queue_head_t d0i3_exit_waitq;
+ wait_queue_head_t rx_sync_waitq;
/* BT-Coex */
struct iwl_bt_coex_profile_notif last_bt_notif;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
index 05fe6dd1a2c8..4d35deb628bc 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
@@ -619,6 +619,7 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
spin_lock_init(&mvm->refs_lock);
skb_queue_head_init(&mvm->d0i3_tx);
init_waitqueue_head(&mvm->d0i3_exit_waitq);
+ init_waitqueue_head(&mvm->rx_sync_waitq);
atomic_set(&mvm->queue_sync_counter, 0);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
index a57c6ef5bc14..6c802cee900c 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
@@ -547,7 +547,8 @@ void iwl_mvm_rx_queue_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
"Received expired RX queue sync message\n");
return;
}
- atomic_dec(&mvm->queue_sync_counter);
+ if (!atomic_dec_return(&mvm->queue_sync_counter))
+ wake_up(&mvm->rx_sync_waitq);
}
switch (internal_notif->type) {
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
index f279fdd6eb44..fa9743205491 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
@@ -1199,6 +1199,9 @@ static int iwl_mvm_num_scans(struct iwl_mvm *mvm)
static int iwl_mvm_check_running_scans(struct iwl_mvm *mvm, int type)
{
+ bool unified_image = fw_has_capa(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_CNSLDTD_D3_D0_IMG);
+
/* This looks a bit arbitrary, but the idea is that if we run
* out of possible simultaneous scans and the userspace is
* trying to run a scan type that is already running, we
@@ -1225,12 +1228,30 @@ static int iwl_mvm_check_running_scans(struct iwl_mvm *mvm, int type)
return -EBUSY;
return iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_REGULAR, true);
case IWL_MVM_SCAN_NETDETECT:
- /* No need to stop anything for net-detect since the
- * firmware is restarted anyway. This way, any sched
- * scans that were running will be restarted when we
- * resume.
- */
- return 0;
+ /* For non-unified images, there's no need to stop
+ * anything for net-detect since the firmware is
+ * restarted anyway. This way, any sched scans that
+ * were running will be restarted when we resume.
+ */
+ if (!unified_image)
+ return 0;
+
+ /* If this is a unified image and we ran out of scans,
+ * we need to stop something. Prefer stopping regular
+ * scans, because the results are useless at this
+ * point, and we should be able to keep running
+ * another scheduled scan while suspended.
+ */
+ if (mvm->scan_status & IWL_MVM_SCAN_REGULAR_MASK)
+ return iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_REGULAR,
+ true);
+ if (mvm->scan_status & IWL_MVM_SCAN_SCHED_MASK)
+ return iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_SCHED,
+ true);
+
+ /* fall through, something is wrong if no scan was
+ * running but we ran out of scans.
+ */
default:
WARN_ON(1);
break;
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
index 001be406a3d3..2f8134b2a504 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
@@ -541,48 +541,64 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
MODULE_DEVICE_TABLE(pci, iwl_hw_card_ids);
#ifdef CONFIG_ACPI
-#define SPL_METHOD "SPLC"
-#define SPL_DOMAINTYPE_MODULE BIT(0)
-#define SPL_DOMAINTYPE_WIFI BIT(1)
-#define SPL_DOMAINTYPE_WIGIG BIT(2)
-#define SPL_DOMAINTYPE_RFEM BIT(3)
+#define ACPI_SPLC_METHOD "SPLC"
+#define ACPI_SPLC_DOMAIN_WIFI (0x07)
-static u64 splx_get_pwr_limit(struct iwl_trans *trans, union acpi_object *splx)
+static u64 splc_get_pwr_limit(struct iwl_trans *trans, union acpi_object *splc)
{
- union acpi_object *limits, *domain_type, *power_limit;
-
- if (splx->type != ACPI_TYPE_PACKAGE ||
- splx->package.count != 2 ||
- splx->package.elements[0].type != ACPI_TYPE_INTEGER ||
- splx->package.elements[0].integer.value != 0) {
- IWL_ERR(trans, "Unsupported splx structure\n");
+ union acpi_object *data_pkg, *dflt_pwr_limit;
+ int i;
+
+ /* We need at least two elements, one for the revision and one
+ * for the data itself. Also check that the revision is
+ * supported (currently only revision 0).
+ */
+ if (splc->type != ACPI_TYPE_PACKAGE ||
+ splc->package.count < 2 ||
+ splc->package.elements[0].type != ACPI_TYPE_INTEGER ||
+ splc->package.elements[0].integer.value != 0) {
+ IWL_DEBUG_INFO(trans,
+ "Unsupported structure returned by the SPLC method. Ignoring.\n");
return 0;
}
- limits = &splx->package.elements[1];
- if (limits->type != ACPI_TYPE_PACKAGE ||
- limits->package.count < 2 ||
- limits->package.elements[0].type != ACPI_TYPE_INTEGER ||
- limits->package.elements[1].type != ACPI_TYPE_INTEGER) {
- IWL_ERR(trans, "Invalid limits element\n");
- return 0;
+ /* loop through all the packages to find the one for WiFi */
+ for (i = 1; i < splc->package.count; i++) {
+ union acpi_object *domain;
+
+ data_pkg = &splc->package.elements[i];
+
+ /* Skip anything that is not a package with the right
+ * amount of elements (i.e. at least 2 integers).
+ */
+ if (data_pkg->type != ACPI_TYPE_PACKAGE ||
+ data_pkg->package.count < 2 ||
+ data_pkg->package.elements[0].type != ACPI_TYPE_INTEGER ||
+ data_pkg->package.elements[1].type != ACPI_TYPE_INTEGER)
+ continue;
+
+ domain = &data_pkg->package.elements[0];
+ if (domain->integer.value == ACPI_SPLC_DOMAIN_WIFI)
+ break;
+
+ data_pkg = NULL;
}
- domain_type = &limits->package.elements[0];
- power_limit = &limits->package.elements[1];
- if (!(domain_type->integer.value & SPL_DOMAINTYPE_WIFI)) {
- IWL_DEBUG_INFO(trans, "WiFi power is not limited\n");
+ if (!data_pkg) {
+ IWL_DEBUG_INFO(trans,
+ "No element for the WiFi domain returned by the SPLC method.\n");
return 0;
}
- return power_limit->integer.value;
+ dflt_pwr_limit = &data_pkg->package.elements[1];
+ return dflt_pwr_limit->integer.value;
}
static void set_dflt_pwr_limit(struct iwl_trans *trans, struct pci_dev *pdev)
{
acpi_handle pxsx_handle;
acpi_handle handle;
- struct acpi_buffer splx = {ACPI_ALLOCATE_BUFFER, NULL};
+ struct acpi_buffer splc = {ACPI_ALLOCATE_BUFFER, NULL};
acpi_status status;
pxsx_handle = ACPI_HANDLE(&pdev->dev);
@@ -593,23 +609,24 @@ static void set_dflt_pwr_limit(struct iwl_trans *trans, struct pci_dev *pdev)
}
/* Get the method's handle */
- status = acpi_get_handle(pxsx_handle, (acpi_string)SPL_METHOD, &handle);
+ status = acpi_get_handle(pxsx_handle, (acpi_string)ACPI_SPLC_METHOD,
+ &handle);
if (ACPI_FAILURE(status)) {
- IWL_DEBUG_INFO(trans, "SPL method not found\n");
+ IWL_DEBUG_INFO(trans, "SPLC method not found\n");
return;
}
/* Call SPLC with no arguments */
- status = acpi_evaluate_object(handle, NULL, NULL, &splx);
+ status = acpi_evaluate_object(handle, NULL, NULL, &splc);
if (ACPI_FAILURE(status)) {
IWL_ERR(trans, "SPLC invocation failed (0x%x)\n", status);
return;
}
- trans->dflt_pwr_limit = splx_get_pwr_limit(trans, splx.pointer);
+ trans->dflt_pwr_limit = splc_get_pwr_limit(trans, splc.pointer);
IWL_DEBUG_INFO(trans, "Default power limit set to %lld\n",
trans->dflt_pwr_limit);
- kfree(splx.pointer);
+ kfree(splc.pointer);
}
#else /* CONFIG_ACPI */
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
index e9a278b60dfd..5f840f16f40b 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
@@ -592,6 +592,7 @@ error:
static int iwl_pcie_txq_init(struct iwl_trans *trans, struct iwl_txq *txq,
int slots_num, u32 txq_id)
{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
int ret;
txq->need_update = false;
@@ -606,6 +607,13 @@ static int iwl_pcie_txq_init(struct iwl_trans *trans, struct iwl_txq *txq,
return ret;
spin_lock_init(&txq->lock);
+
+ if (txq_id == trans_pcie->cmd_queue) {
+ static struct lock_class_key iwl_pcie_cmd_queue_lock_class;
+
+ lockdep_set_class(&txq->lock, &iwl_pcie_cmd_queue_lock_class);
+ }
+
__skb_queue_head_init(&txq->overflow_q);
/*
diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
index 431f13b4faf6..d3bad5779376 100644
--- a/drivers/net/wireless/mac80211_hwsim.c
+++ b/drivers/net/wireless/mac80211_hwsim.c
@@ -826,7 +826,7 @@ static void mac80211_hwsim_set_tsf(struct ieee80211_hw *hw,
data->bcn_delta = do_div(delta, bcn_int);
} else {
data->tsf_offset -= delta;
- data->bcn_delta = -do_div(delta, bcn_int);
+ data->bcn_delta = -(s64)do_div(delta, bcn_int);
}
}
diff --git a/drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c b/drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c
index 94480123efa3..274dd5a1574a 100644
--- a/drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c
+++ b/drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c
@@ -45,7 +45,7 @@ static int mwifiex_11n_dispatch_amsdu_pkt(struct mwifiex_private *priv,
skb_trim(skb, le16_to_cpu(local_rx_pd->rx_pkt_length));
ieee80211_amsdu_to_8023s(skb, &list, priv->curr_addr,
- priv->wdev.iftype, 0, false);
+ priv->wdev.iftype, 0, NULL, NULL);
while (!skb_queue_empty(&list)) {
struct rx_packet_hdr *rx_hdr;
diff --git a/drivers/net/wireless/marvell/mwifiex/cfg80211.c b/drivers/net/wireless/marvell/mwifiex/cfg80211.c
index 39ce76ad00bc..16241d21727b 100644
--- a/drivers/net/wireless/marvell/mwifiex/cfg80211.c
+++ b/drivers/net/wireless/marvell/mwifiex/cfg80211.c
@@ -2222,8 +2222,9 @@ done:
is_scanning_required = 1;
} else {
mwifiex_dbg(priv->adapter, MSG,
- "info: trying to associate to '%s' bssid %pM\n",
- (char *)req_ssid.ssid, bss->bssid);
+ "info: trying to associate to '%.*s' bssid %pM\n",
+ req_ssid.ssid_len, (char *)req_ssid.ssid,
+ bss->bssid);
memcpy(&priv->cfg_bssid, bss->bssid, ETH_ALEN);
break;
}
@@ -2283,8 +2284,8 @@ mwifiex_cfg80211_connect(struct wiphy *wiphy, struct net_device *dev,
}
mwifiex_dbg(adapter, INFO,
- "info: Trying to associate to %s and bssid %pM\n",
- (char *)sme->ssid, sme->bssid);
+ "info: Trying to associate to %.*s and bssid %pM\n",
+ (int)sme->ssid_len, (char *)sme->ssid, sme->bssid);
if (!mwifiex_stop_bg_scan(priv))
cfg80211_sched_scan_stopped_rtnl(priv->wdev.wiphy);
@@ -2417,8 +2418,8 @@ mwifiex_cfg80211_join_ibss(struct wiphy *wiphy, struct net_device *dev,
}
mwifiex_dbg(priv->adapter, MSG,
- "info: trying to join to %s and bssid %pM\n",
- (char *)params->ssid, params->bssid);
+ "info: trying to join to %.*s and bssid %pM\n",
+ params->ssid_len, (char *)params->ssid, params->bssid);
mwifiex_set_ibss_params(priv, params);
diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h
index 1016628926d2..08d587a342d3 100644
--- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h
+++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h
@@ -238,7 +238,7 @@ struct rtl8xxxu_rxdesc16 {
u32 pattern1match:1;
u32 pattern0match:1;
#endif
- __le32 tsfl;
+ u32 tsfl;
#if 0
u32 bassn:12;
u32 bavld:1;
@@ -368,7 +368,7 @@ struct rtl8xxxu_rxdesc24 {
u32 ldcp:1;
u32 splcp:1;
#endif
- __le32 tsfl;
+ u32 tsfl;
};
struct rtl8xxxu_txdesc32 {
diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192e.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192e.c
index df54d27e7851..a793fedc3654 100644
--- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192e.c
+++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192e.c
@@ -1461,7 +1461,9 @@ static int rtl8192eu_active_to_emu(struct rtl8xxxu_priv *priv)
int count, ret = 0;
/* Turn off RF */
- rtl8xxxu_write8(priv, REG_RF_CTRL, 0);
+ val8 = rtl8xxxu_read8(priv, REG_RF_CTRL);
+ val8 &= ~RF_ENABLE;
+ rtl8xxxu_write8(priv, REG_RF_CTRL, val8);
/* Switch DPDT_SEL_P output from register 0x65[2] */
val8 = rtl8xxxu_read8(priv, REG_LEDCFG2);
@@ -1593,6 +1595,10 @@ static void rtl8192e_enable_rf(struct rtl8xxxu_priv *priv)
u32 val32;
u8 val8;
+ val32 = rtl8xxxu_read32(priv, REG_RX_WAIT_CCA);
+ val32 |= (BIT(22) | BIT(23));
+ rtl8xxxu_write32(priv, REG_RX_WAIT_CCA, val32);
+
val8 = rtl8xxxu_read8(priv, REG_GPIO_MUXCFG);
val8 |= BIT(5);
rtl8xxxu_write8(priv, REG_GPIO_MUXCFG, val8);
diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8723b.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8723b.c
index 6c086b5657e9..02b8ddd98a95 100644
--- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8723b.c
+++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8723b.c
@@ -1498,6 +1498,10 @@ static void rtl8723b_enable_rf(struct rtl8xxxu_priv *priv)
u32 val32;
u8 val8;
+ val32 = rtl8xxxu_read32(priv, REG_RX_WAIT_CCA);
+ val32 |= (BIT(22) | BIT(23));
+ rtl8xxxu_write32(priv, REG_RX_WAIT_CCA, val32);
+
/*
* No indication anywhere as to what 0x0790 does. The 2 antenna
* vendor code preserves bits 6-7 here.
diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c
index b2d7f6e69667..a5e6ec2152bf 100644
--- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c
+++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c
@@ -5197,7 +5197,12 @@ int rtl8xxxu_parse_rxdesc16(struct rtl8xxxu_priv *priv, struct sk_buff *skb)
pkt_offset = roundup(pkt_len + drvinfo_sz + desc_shift +
sizeof(struct rtl8xxxu_rxdesc16), 128);
- if (pkt_cnt > 1)
+ /*
+ * Only clone the skb if there's enough data at the end to
+ * at least cover the rx descriptor
+ */
+ if (pkt_cnt > 1 &&
+ urb_len > (pkt_offset + sizeof(struct rtl8xxxu_rxdesc16)))
next_skb = skb_clone(skb, GFP_ATOMIC);
rx_status = IEEE80211_SKB_RXCB(skb);
@@ -5215,7 +5220,7 @@ int rtl8xxxu_parse_rxdesc16(struct rtl8xxxu_priv *priv, struct sk_buff *skb)
rtl8xxxu_rx_parse_phystats(priv, rx_status, phy_stats,
rx_desc->rxmcs);
- rx_status->mactime = le32_to_cpu(rx_desc->tsfl);
+ rx_status->mactime = rx_desc->tsfl;
rx_status->flag |= RX_FLAG_MACTIME_START;
if (!rx_desc->swdec)
@@ -5285,7 +5290,7 @@ int rtl8xxxu_parse_rxdesc24(struct rtl8xxxu_priv *priv, struct sk_buff *skb)
rtl8xxxu_rx_parse_phystats(priv, rx_status, phy_stats,
rx_desc->rxmcs);
- rx_status->mactime = le32_to_cpu(rx_desc->tsfl);
+ rx_status->mactime = rx_desc->tsfl;
rx_status->flag |= RX_FLAG_MACTIME_START;
if (!rx_desc->swdec)
diff --git a/drivers/net/wireless/realtek/rtlwifi/core.c b/drivers/net/wireless/realtek/rtlwifi/core.c
index f95760c13c56..8e7f23c11680 100644
--- a/drivers/net/wireless/realtek/rtlwifi/core.c
+++ b/drivers/net/wireless/realtek/rtlwifi/core.c
@@ -111,7 +111,7 @@ static void rtl_fw_do_work(const struct firmware *firmware, void *context,
if (!err)
goto found_alt;
}
- pr_err("Firmware %s not available\n", rtlpriv->cfg->fw_name);
+ pr_err("Selected firmware is not available\n");
rtlpriv->max_fw_size = 0;
return;
}
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/sw.c
index e7b11b40e68d..f361808def47 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/sw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/sw.c
@@ -86,6 +86,7 @@ int rtl88e_init_sw_vars(struct ieee80211_hw *hw)
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
u8 tid;
+ char *fw_name;
rtl8188ee_bt_reg_init(hw);
rtlpriv->dm.dm_initialgain_enable = 1;
@@ -169,10 +170,10 @@ int rtl88e_init_sw_vars(struct ieee80211_hw *hw)
return 1;
}
- rtlpriv->cfg->fw_name = "rtlwifi/rtl8188efw.bin";
+ fw_name = "rtlwifi/rtl8188efw.bin";
rtlpriv->max_fw_size = 0x8000;
- pr_info("Using firmware %s\n", rtlpriv->cfg->fw_name);
- err = request_firmware_nowait(THIS_MODULE, 1, rtlpriv->cfg->fw_name,
+ pr_info("Using firmware %s\n", fw_name);
+ err = request_firmware_nowait(THIS_MODULE, 1, fw_name,
rtlpriv->io.dev, GFP_KERNEL, hw,
rtl_fw_cb);
if (err) {
@@ -284,7 +285,6 @@ static const struct rtl_hal_cfg rtl88ee_hal_cfg = {
.bar_id = 2,
.write_readback = true,
.name = "rtl88e_pci",
- .fw_name = "rtlwifi/rtl8188efw.bin",
.ops = &rtl8188ee_hal_ops,
.mod_params = &rtl88ee_mod_params,
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/sw.c
index 87aa209ae325..8b6e37ce3f66 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/sw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/sw.c
@@ -96,6 +96,7 @@ int rtl92c_init_sw_vars(struct ieee80211_hw *hw)
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+ char *fw_name = "rtlwifi/rtl8192cfwU.bin";
rtl8192ce_bt_reg_init(hw);
@@ -167,15 +168,12 @@ int rtl92c_init_sw_vars(struct ieee80211_hw *hw)
}
/* request fw */
- if (IS_VENDOR_UMC_A_CUT(rtlhal->version) &&
- !IS_92C_SERIAL(rtlhal->version))
- rtlpriv->cfg->fw_name = "rtlwifi/rtl8192cfwU.bin";
- else if (IS_81XXC_VENDOR_UMC_B_CUT(rtlhal->version))
- rtlpriv->cfg->fw_name = "rtlwifi/rtl8192cfwU_B.bin";
+ if (IS_81XXC_VENDOR_UMC_B_CUT(rtlhal->version))
+ fw_name = "rtlwifi/rtl8192cfwU_B.bin";
rtlpriv->max_fw_size = 0x4000;
- pr_info("Using firmware %s\n", rtlpriv->cfg->fw_name);
- err = request_firmware_nowait(THIS_MODULE, 1, rtlpriv->cfg->fw_name,
+ pr_info("Using firmware %s\n", fw_name);
+ err = request_firmware_nowait(THIS_MODULE, 1, fw_name,
rtlpriv->io.dev, GFP_KERNEL, hw,
rtl_fw_cb);
if (err) {
@@ -262,7 +260,6 @@ static const struct rtl_hal_cfg rtl92ce_hal_cfg = {
.bar_id = 2,
.write_readback = true,
.name = "rtl92c_pci",
- .fw_name = "rtlwifi/rtl8192cfw.bin",
.ops = &rtl8192ce_hal_ops,
.mod_params = &rtl92ce_mod_params,
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/sw.c
index 7c6f7f0d18c6..f953320f0e23 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/sw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/sw.c
@@ -59,6 +59,7 @@ static int rtl92cu_init_sw_vars(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
int err;
+ char *fw_name;
rtlpriv->dm.dm_initialgain_enable = true;
rtlpriv->dm.dm_flag = 0;
@@ -77,18 +78,18 @@ static int rtl92cu_init_sw_vars(struct ieee80211_hw *hw)
}
if (IS_VENDOR_UMC_A_CUT(rtlpriv->rtlhal.version) &&
!IS_92C_SERIAL(rtlpriv->rtlhal.version)) {
- rtlpriv->cfg->fw_name = "rtlwifi/rtl8192cufw_A.bin";
+ fw_name = "rtlwifi/rtl8192cufw_A.bin";
} else if (IS_81XXC_VENDOR_UMC_B_CUT(rtlpriv->rtlhal.version)) {
- rtlpriv->cfg->fw_name = "rtlwifi/rtl8192cufw_B.bin";
+ fw_name = "rtlwifi/rtl8192cufw_B.bin";
} else {
- rtlpriv->cfg->fw_name = "rtlwifi/rtl8192cufw_TMSC.bin";
+ fw_name = "rtlwifi/rtl8192cufw_TMSC.bin";
}
/* provide name of alternative file */
rtlpriv->cfg->alt_fw_name = "rtlwifi/rtl8192cufw.bin";
- pr_info("Loading firmware %s\n", rtlpriv->cfg->fw_name);
+ pr_info("Loading firmware %s\n", fw_name);
rtlpriv->max_fw_size = 0x4000;
err = request_firmware_nowait(THIS_MODULE, 1,
- rtlpriv->cfg->fw_name, rtlpriv->io.dev,
+ fw_name, rtlpriv->io.dev,
GFP_KERNEL, hw, rtl_fw_cb);
return err;
}
@@ -187,7 +188,6 @@ static struct rtl_hal_usbint_cfg rtl92cu_interface_cfg = {
static struct rtl_hal_cfg rtl92cu_hal_cfg = {
.name = "rtl92c_usb",
- .fw_name = "rtlwifi/rtl8192cufw.bin",
.ops = &rtl8192cu_hal_ops,
.mod_params = &rtl92cu_mod_params,
.usb_interface_cfg = &rtl92cu_interface_cfg,
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/sw.c
index 0538a4d09568..1ebfee18882f 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/sw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/sw.c
@@ -92,6 +92,7 @@ static int rtl92d_init_sw_vars(struct ieee80211_hw *hw)
u8 tid;
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
+ char *fw_name = "rtlwifi/rtl8192defw.bin";
rtlpriv->dm.dm_initialgain_enable = true;
rtlpriv->dm.dm_flag = 0;
@@ -181,10 +182,10 @@ static int rtl92d_init_sw_vars(struct ieee80211_hw *hw)
rtlpriv->max_fw_size = 0x8000;
pr_info("Driver for Realtek RTL8192DE WLAN interface\n");
- pr_info("Loading firmware file %s\n", rtlpriv->cfg->fw_name);
+ pr_info("Loading firmware file %s\n", fw_name);
/* request fw */
- err = request_firmware_nowait(THIS_MODULE, 1, rtlpriv->cfg->fw_name,
+ err = request_firmware_nowait(THIS_MODULE, 1, fw_name,
rtlpriv->io.dev, GFP_KERNEL, hw,
rtl_fw_cb);
if (err) {
@@ -266,7 +267,6 @@ static const struct rtl_hal_cfg rtl92de_hal_cfg = {
.bar_id = 2,
.write_readback = true,
.name = "rtl8192de",
- .fw_name = "rtlwifi/rtl8192defw.bin",
.ops = &rtl8192de_hal_ops,
.mod_params = &rtl92de_mod_params,
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/sw.c
index ac299cbe59b0..46b605de36e7 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/sw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/sw.c
@@ -91,6 +91,7 @@ int rtl92ee_init_sw_vars(struct ieee80211_hw *hw)
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
int err = 0;
+ char *fw_name;
rtl92ee_bt_reg_init(hw);
rtlpci->msi_support = rtlpriv->cfg->mod_params->msi_support;
@@ -170,11 +171,11 @@ int rtl92ee_init_sw_vars(struct ieee80211_hw *hw)
}
/* request fw */
- rtlpriv->cfg->fw_name = "rtlwifi/rtl8192eefw.bin";
+ fw_name = "rtlwifi/rtl8192eefw.bin";
rtlpriv->max_fw_size = 0x8000;
- pr_info("Using firmware %s\n", rtlpriv->cfg->fw_name);
- err = request_firmware_nowait(THIS_MODULE, 1, rtlpriv->cfg->fw_name,
+ pr_info("Using firmware %s\n", fw_name);
+ err = request_firmware_nowait(THIS_MODULE, 1, fw_name,
rtlpriv->io.dev, GFP_KERNEL, hw,
rtl_fw_cb);
if (err) {
@@ -266,7 +267,6 @@ static const struct rtl_hal_cfg rtl92ee_hal_cfg = {
.bar_id = 2,
.write_readback = true,
.name = "rtl92ee_pci",
- .fw_name = "rtlwifi/rtl8192eefw.bin",
.ops = &rtl8192ee_hal_ops,
.mod_params = &rtl92ee_mod_params,
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/sw.c
index 5e8e02d5de8a..3e1eaeac4fdc 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/sw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/sw.c
@@ -89,12 +89,13 @@ static void rtl92se_fw_cb(const struct firmware *firmware, void *context)
struct ieee80211_hw *hw = context;
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rt_firmware *pfirmware = NULL;
+ char *fw_name = "rtlwifi/rtl8192sefw.bin";
RT_TRACE(rtlpriv, COMP_ERR, DBG_LOUD,
"Firmware callback routine entered!\n");
complete(&rtlpriv->firmware_loading_complete);
if (!firmware) {
- pr_err("Firmware %s not available\n", rtlpriv->cfg->fw_name);
+ pr_err("Firmware %s not available\n", fw_name);
rtlpriv->max_fw_size = 0;
return;
}
@@ -117,6 +118,7 @@ static int rtl92s_init_sw_vars(struct ieee80211_hw *hw)
struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
int err = 0;
u16 earlyrxthreshold = 7;
+ char *fw_name = "rtlwifi/rtl8192sefw.bin";
rtlpriv->dm.dm_initialgain_enable = true;
rtlpriv->dm.dm_flag = 0;
@@ -214,9 +216,9 @@ static int rtl92s_init_sw_vars(struct ieee80211_hw *hw)
rtlpriv->max_fw_size = RTL8190_MAX_FIRMWARE_CODE_SIZE*2 +
sizeof(struct fw_hdr);
pr_info("Driver for Realtek RTL8192SE/RTL8191SE\n"
- "Loading firmware %s\n", rtlpriv->cfg->fw_name);
+ "Loading firmware %s\n", fw_name);
/* request fw */
- err = request_firmware_nowait(THIS_MODULE, 1, rtlpriv->cfg->fw_name,
+ err = request_firmware_nowait(THIS_MODULE, 1, fw_name,
rtlpriv->io.dev, GFP_KERNEL, hw,
rtl92se_fw_cb);
if (err) {
@@ -310,7 +312,6 @@ static const struct rtl_hal_cfg rtl92se_hal_cfg = {
.bar_id = 1,
.write_readback = false,
.name = "rtl92s_pci",
- .fw_name = "rtlwifi/rtl8192sefw.bin",
.ops = &rtl8192se_hal_ops,
.mod_params = &rtl92se_mod_params,
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/sw.c
index 89c828ad89f4..c51a9e8234e9 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/sw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/sw.c
@@ -94,6 +94,7 @@ int rtl8723e_init_sw_vars(struct ieee80211_hw *hw)
struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
int err = 0;
+ char *fw_name = "rtlwifi/rtl8723fw.bin";
rtl8723e_bt_reg_init(hw);
@@ -176,14 +177,12 @@ int rtl8723e_init_sw_vars(struct ieee80211_hw *hw)
return 1;
}
- if (IS_VENDOR_8723_A_CUT(rtlhal->version))
- rtlpriv->cfg->fw_name = "rtlwifi/rtl8723fw.bin";
- else if (IS_81xxC_VENDOR_UMC_B_CUT(rtlhal->version))
- rtlpriv->cfg->fw_name = "rtlwifi/rtl8723fw_B.bin";
+ if (IS_81xxC_VENDOR_UMC_B_CUT(rtlhal->version))
+ fw_name = "rtlwifi/rtl8723fw_B.bin";
rtlpriv->max_fw_size = 0x6000;
- pr_info("Using firmware %s\n", rtlpriv->cfg->fw_name);
- err = request_firmware_nowait(THIS_MODULE, 1, rtlpriv->cfg->fw_name,
+ pr_info("Using firmware %s\n", fw_name);
+ err = request_firmware_nowait(THIS_MODULE, 1, fw_name,
rtlpriv->io.dev, GFP_KERNEL, hw,
rtl_fw_cb);
if (err) {
@@ -280,7 +279,6 @@ static const struct rtl_hal_cfg rtl8723e_hal_cfg = {
.bar_id = 2,
.write_readback = true,
.name = "rtl8723e_pci",
- .fw_name = "rtlwifi/rtl8723efw.bin",
.ops = &rtl8723e_hal_ops,
.mod_params = &rtl8723e_mod_params,
.maps[SYS_ISO_CTRL] = REG_SYS_ISO_CTRL,
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/sw.c
index 20b53f035483..847644d1f5f5 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/sw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/sw.c
@@ -91,6 +91,7 @@ int rtl8723be_init_sw_vars(struct ieee80211_hw *hw)
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+ char *fw_name = "rtlwifi/rtl8723befw.bin";
rtl8723be_bt_reg_init(hw);
rtlpriv->btcoexist.btc_ops = rtl_btc_get_ops_pointer();
@@ -184,8 +185,8 @@ int rtl8723be_init_sw_vars(struct ieee80211_hw *hw)
}
rtlpriv->max_fw_size = 0x8000;
- pr_info("Using firmware %s\n", rtlpriv->cfg->fw_name);
- err = request_firmware_nowait(THIS_MODULE, 1, rtlpriv->cfg->fw_name,
+ pr_info("Using firmware %s\n", fw_name);
+ err = request_firmware_nowait(THIS_MODULE, 1, fw_name,
rtlpriv->io.dev, GFP_KERNEL, hw,
rtl_fw_cb);
if (err) {
@@ -280,7 +281,6 @@ static const struct rtl_hal_cfg rtl8723be_hal_cfg = {
.bar_id = 2,
.write_readback = true,
.name = "rtl8723be_pci",
- .fw_name = "rtlwifi/rtl8723befw.bin",
.ops = &rtl8723be_hal_ops,
.mod_params = &rtl8723be_mod_params,
.maps[SYS_ISO_CTRL] = REG_SYS_ISO_CTRL,
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/sw.c
index 22f687b1f133..297938e0effd 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/sw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/sw.c
@@ -93,6 +93,7 @@ int rtl8821ae_init_sw_vars(struct ieee80211_hw *hw)
struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+ char *fw_name, *wowlan_fw_name;
rtl8821ae_bt_reg_init(hw);
rtlpriv->btcoexist.btc_ops = rtl_btc_get_ops_pointer();
@@ -203,17 +204,17 @@ int rtl8821ae_init_sw_vars(struct ieee80211_hw *hw)
}
if (rtlhal->hw_type == HARDWARE_TYPE_RTL8812AE) {
- rtlpriv->cfg->fw_name = "rtlwifi/rtl8812aefw.bin";
- rtlpriv->cfg->wowlan_fw_name = "rtlwifi/rtl8812aefw_wowlan.bin";
+ fw_name = "rtlwifi/rtl8812aefw.bin";
+ wowlan_fw_name = "rtlwifi/rtl8812aefw_wowlan.bin";
} else {
- rtlpriv->cfg->fw_name = "rtlwifi/rtl8821aefw.bin";
- rtlpriv->cfg->wowlan_fw_name = "rtlwifi/rtl8821aefw_wowlan.bin";
+ fw_name = "rtlwifi/rtl8821aefw.bin";
+ wowlan_fw_name = "rtlwifi/rtl8821aefw_wowlan.bin";
}
rtlpriv->max_fw_size = 0x8000;
/*load normal firmware*/
- pr_info("Using firmware %s\n", rtlpriv->cfg->fw_name);
- err = request_firmware_nowait(THIS_MODULE, 1, rtlpriv->cfg->fw_name,
+ pr_info("Using firmware %s\n", fw_name);
+ err = request_firmware_nowait(THIS_MODULE, 1, fw_name,
rtlpriv->io.dev, GFP_KERNEL, hw,
rtl_fw_cb);
if (err) {
@@ -222,9 +223,9 @@ int rtl8821ae_init_sw_vars(struct ieee80211_hw *hw)
return 1;
}
/*load wowlan firmware*/
- pr_info("Using firmware %s\n", rtlpriv->cfg->wowlan_fw_name);
+ pr_info("Using firmware %s\n", wowlan_fw_name);
err = request_firmware_nowait(THIS_MODULE, 1,
- rtlpriv->cfg->wowlan_fw_name,
+ wowlan_fw_name,
rtlpriv->io.dev, GFP_KERNEL, hw,
rtl_wowlan_fw_cb);
if (err) {
@@ -320,7 +321,6 @@ static const struct rtl_hal_cfg rtl8821ae_hal_cfg = {
.bar_id = 2,
.write_readback = true,
.name = "rtl8821ae_pci",
- .fw_name = "rtlwifi/rtl8821aefw.bin",
.ops = &rtl8821ae_hal_ops,
.mod_params = &rtl8821ae_mod_params,
.maps[SYS_ISO_CTRL] = REG_SYS_ISO_CTRL,
diff --git a/drivers/net/wireless/realtek/rtlwifi/wifi.h b/drivers/net/wireless/realtek/rtlwifi/wifi.h
index 595f7d5d091a..dafe486f8448 100644
--- a/drivers/net/wireless/realtek/rtlwifi/wifi.h
+++ b/drivers/net/wireless/realtek/rtlwifi/wifi.h
@@ -2278,9 +2278,7 @@ struct rtl_hal_cfg {
u8 bar_id;
bool write_readback;
char *name;
- char *fw_name;
char *alt_fw_name;
- char *wowlan_fw_name;
struct rtl_hal_ops *ops;
struct rtl_mod_params *mod_params;
struct rtl_hal_usbint_cfg *usb_interface_cfg;
diff --git a/drivers/net/wireless/ti/wlcore/sdio.c b/drivers/net/wireless/ti/wlcore/sdio.c
index a6e94b1a12cb..47fe7f96a242 100644
--- a/drivers/net/wireless/ti/wlcore/sdio.c
+++ b/drivers/net/wireless/ti/wlcore/sdio.c
@@ -391,7 +391,6 @@ static void wl1271_remove(struct sdio_func *func)
pm_runtime_get_noresume(&func->dev);
platform_device_unregister(glue->core);
- kfree(glue);
}
#ifdef CONFIG_PM
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
index e17879dd5d5a..bf2744e1e3db 100644
--- a/drivers/net/xen-netfront.c
+++ b/drivers/net/xen-netfront.c
@@ -304,7 +304,7 @@ static void xennet_alloc_rx_buffers(struct netfront_queue *queue)
queue->rx_skbs[id] = skb;
ref = gnttab_claim_grant_reference(&queue->gref_rx_head);
- BUG_ON((signed short)ref < 0);
+ WARN_ON_ONCE(IS_ERR_VALUE((unsigned long)(int)ref));
queue->grant_rx_ref[id] = ref;
page = skb_frag_page(&skb_shinfo(skb)->frags[0]);
@@ -428,7 +428,7 @@ static void xennet_tx_setup_grant(unsigned long gfn, unsigned int offset,
id = get_id_from_freelist(&queue->tx_skb_freelist, queue->tx_skbs);
tx = RING_GET_REQUEST(&queue->tx, queue->tx.req_prod_pvt++);
ref = gnttab_claim_grant_reference(&queue->gref_tx_head);
- BUG_ON((signed short)ref < 0);
+ WARN_ON_ONCE(IS_ERR_VALUE((unsigned long)(int)ref));
gnttab_grant_foreign_access_ref(ref, queue->info->xbdev->otherend_id,
gfn, GNTMAP_readonly);
diff --git a/drivers/nfc/mei_phy.c b/drivers/nfc/mei_phy.c
index 83deda4bb4d6..6f9563a96488 100644
--- a/drivers/nfc/mei_phy.c
+++ b/drivers/nfc/mei_phy.c
@@ -133,7 +133,7 @@ static int mei_nfc_if_version(struct nfc_mei_phy *phy)
return -ENOMEM;
bytes_recv = mei_cldev_recv(phy->cldev, (u8 *)reply, if_version_length);
- if (bytes_recv < 0 || bytes_recv < sizeof(struct mei_nfc_reply)) {
+ if (bytes_recv < 0 || bytes_recv < if_version_length) {
pr_err("Could not read IF version\n");
r = -EIO;
goto err;
diff --git a/drivers/ntb/hw/intel/ntb_hw_intel.c b/drivers/ntb/hw/intel/ntb_hw_intel.c
index 0d5c29ae51de..7310a261c858 100644
--- a/drivers/ntb/hw/intel/ntb_hw_intel.c
+++ b/drivers/ntb/hw/intel/ntb_hw_intel.c
@@ -112,17 +112,17 @@ MODULE_PARM_DESC(xeon_b2b_usd_bar2_addr64,
module_param_named(xeon_b2b_usd_bar4_addr64,
xeon_b2b_usd_addr.bar4_addr64, ullong, 0644);
-MODULE_PARM_DESC(xeon_b2b_usd_bar2_addr64,
+MODULE_PARM_DESC(xeon_b2b_usd_bar4_addr64,
"XEON B2B USD BAR 4 64-bit address");
module_param_named(xeon_b2b_usd_bar4_addr32,
xeon_b2b_usd_addr.bar4_addr32, ullong, 0644);
-MODULE_PARM_DESC(xeon_b2b_usd_bar2_addr64,
+MODULE_PARM_DESC(xeon_b2b_usd_bar4_addr32,
"XEON B2B USD split-BAR 4 32-bit address");
module_param_named(xeon_b2b_usd_bar5_addr32,
xeon_b2b_usd_addr.bar5_addr32, ullong, 0644);
-MODULE_PARM_DESC(xeon_b2b_usd_bar2_addr64,
+MODULE_PARM_DESC(xeon_b2b_usd_bar5_addr32,
"XEON B2B USD split-BAR 5 32-bit address");
module_param_named(xeon_b2b_dsd_bar2_addr64,
@@ -132,17 +132,17 @@ MODULE_PARM_DESC(xeon_b2b_dsd_bar2_addr64,
module_param_named(xeon_b2b_dsd_bar4_addr64,
xeon_b2b_dsd_addr.bar4_addr64, ullong, 0644);
-MODULE_PARM_DESC(xeon_b2b_dsd_bar2_addr64,
+MODULE_PARM_DESC(xeon_b2b_dsd_bar4_addr64,
"XEON B2B DSD BAR 4 64-bit address");
module_param_named(xeon_b2b_dsd_bar4_addr32,
xeon_b2b_dsd_addr.bar4_addr32, ullong, 0644);
-MODULE_PARM_DESC(xeon_b2b_dsd_bar2_addr64,
+MODULE_PARM_DESC(xeon_b2b_dsd_bar4_addr32,
"XEON B2B DSD split-BAR 4 32-bit address");
module_param_named(xeon_b2b_dsd_bar5_addr32,
xeon_b2b_dsd_addr.bar5_addr32, ullong, 0644);
-MODULE_PARM_DESC(xeon_b2b_dsd_bar2_addr64,
+MODULE_PARM_DESC(xeon_b2b_dsd_bar5_addr32,
"XEON B2B DSD split-BAR 5 32-bit address");
#ifndef ioread64
@@ -1755,6 +1755,8 @@ static int xeon_setup_b2b_mw(struct intel_ntb_dev *ndev,
XEON_B2B_MIN_SIZE);
if (!ndev->peer_mmio)
return -EIO;
+
+ ndev->peer_addr = pci_resource_start(pdev, b2b_bar);
}
return 0;
@@ -2019,6 +2021,7 @@ static int intel_ntb_init_pci(struct intel_ntb_dev *ndev, struct pci_dev *pdev)
goto err_mmio;
}
ndev->peer_mmio = ndev->self_mmio;
+ ndev->peer_addr = pci_resource_start(pdev, 0);
return 0;
diff --git a/drivers/ntb/ntb_transport.c b/drivers/ntb/ntb_transport.c
index 8601c10acf74..4eb8adb34508 100644
--- a/drivers/ntb/ntb_transport.c
+++ b/drivers/ntb/ntb_transport.c
@@ -257,7 +257,7 @@ enum {
#define NTB_QP_DEF_NUM_ENTRIES 100
#define NTB_LINK_DOWN_TIMEOUT 10
#define DMA_RETRIES 20
-#define DMA_OUT_RESOURCE_TO 50
+#define DMA_OUT_RESOURCE_TO msecs_to_jiffies(50)
static void ntb_transport_rxc_db(unsigned long data);
static const struct ntb_ctx_ops ntb_transport_ops;
diff --git a/drivers/ntb/test/ntb_perf.c b/drivers/ntb/test/ntb_perf.c
index 6a50f20bf1cd..e75d4fdc0866 100644
--- a/drivers/ntb/test/ntb_perf.c
+++ b/drivers/ntb/test/ntb_perf.c
@@ -72,7 +72,7 @@
#define MAX_THREADS 32
#define MAX_TEST_SIZE SZ_1M
#define MAX_SRCS 32
-#define DMA_OUT_RESOURCE_TO 50
+#define DMA_OUT_RESOURCE_TO msecs_to_jiffies(50)
#define DMA_RETRIES 20
#define SZ_4G (1ULL << 32)
#define MAX_SEG_ORDER 20 /* no larger than 1M for kmalloc buffer */
@@ -589,7 +589,7 @@ static ssize_t debugfs_run_read(struct file *filp, char __user *ubuf,
return -ENOMEM;
if (mutex_is_locked(&perf->run_mutex)) {
- out_off = snprintf(buf, 64, "running\n");
+ out_off = scnprintf(buf, 64, "running\n");
goto read_from_buf;
}
@@ -600,14 +600,14 @@ static ssize_t debugfs_run_read(struct file *filp, char __user *ubuf,
break;
if (pctx->status) {
- out_off += snprintf(buf + out_off, 1024 - out_off,
+ out_off += scnprintf(buf + out_off, 1024 - out_off,
"%d: error %d\n", i,
pctx->status);
continue;
}
rate = div64_u64(pctx->copied, pctx->diff_us);
- out_off += snprintf(buf + out_off, 1024 - out_off,
+ out_off += scnprintf(buf + out_off, 1024 - out_off,
"%d: copied %llu bytes in %llu usecs, %llu MBytes/s\n",
i, pctx->copied, pctx->diff_us, rate);
}
diff --git a/drivers/ntb/test/ntb_pingpong.c b/drivers/ntb/test/ntb_pingpong.c
index 7d311799fca1..435861189d97 100644
--- a/drivers/ntb/test/ntb_pingpong.c
+++ b/drivers/ntb/test/ntb_pingpong.c
@@ -88,7 +88,7 @@ MODULE_PARM_DESC(delay_ms, "Milliseconds to delay the response to peer");
static unsigned long db_init = 0x7;
module_param(db_init, ulong, 0644);
-MODULE_PARM_DESC(delay_ms, "Initial doorbell bits to ring on the peer");
+MODULE_PARM_DESC(db_init, "Initial doorbell bits to ring on the peer");
struct pp_ctx {
struct ntb_dev *ntb;
diff --git a/drivers/nvme/host/lightnvm.c b/drivers/nvme/host/lightnvm.c
index f5e3011e31fc..5daf2f4be0cd 100644
--- a/drivers/nvme/host/lightnvm.c
+++ b/drivers/nvme/host/lightnvm.c
@@ -612,7 +612,7 @@ int nvme_nvm_register(struct nvme_ns *ns, char *disk_name, int node,
ret = nvm_register(dev);
- ns->lba_shift = ilog2(dev->sec_size) - 9;
+ ns->lba_shift = ilog2(dev->sec_size);
if (sysfs_create_group(&dev->dev.kobj, attrs))
pr_warn("%s: failed to create sysfs group for identification\n",
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index 0248d0e21fee..5e52034ab010 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -1242,20 +1242,16 @@ static int nvme_configure_admin_queue(struct nvme_dev *dev)
result = nvme_enable_ctrl(&dev->ctrl, cap);
if (result)
- goto free_nvmeq;
+ return result;
nvmeq->cq_vector = 0;
result = queue_request_irq(nvmeq);
if (result) {
nvmeq->cq_vector = -1;
- goto free_nvmeq;
+ return result;
}
return result;
-
- free_nvmeq:
- nvme_free_queues(dev, 0);
- return result;
}
static bool nvme_should_reset(struct nvme_dev *dev, u32 csts)
@@ -1317,10 +1313,8 @@ static int nvme_create_io_queues(struct nvme_dev *dev)
max = min(dev->max_qid, dev->queue_count - 1);
for (i = dev->online_queues; i <= max; i++) {
ret = nvme_create_queue(dev->queues[i], i);
- if (ret) {
- nvme_free_queues(dev, i);
+ if (ret)
break;
- }
}
/*
@@ -1460,13 +1454,9 @@ static int nvme_setup_io_queues(struct nvme_dev *dev)
result = queue_request_irq(adminq);
if (result) {
adminq->cq_vector = -1;
- goto free_queues;
+ return result;
}
return nvme_create_io_queues(dev);
-
- free_queues:
- nvme_free_queues(dev, 1);
- return result;
}
static void nvme_del_queue_end(struct request *req, int error)
diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c
index 5a8388177959..3d25add36d91 100644
--- a/drivers/nvme/host/rdma.c
+++ b/drivers/nvme/host/rdma.c
@@ -83,6 +83,7 @@ enum nvme_rdma_queue_flags {
NVME_RDMA_Q_CONNECTED = (1 << 0),
NVME_RDMA_IB_QUEUE_ALLOCATED = (1 << 1),
NVME_RDMA_Q_DELETING = (1 << 2),
+ NVME_RDMA_Q_LIVE = (1 << 3),
};
struct nvme_rdma_queue {
@@ -624,10 +625,18 @@ static int nvme_rdma_connect_io_queues(struct nvme_rdma_ctrl *ctrl)
for (i = 1; i < ctrl->queue_count; i++) {
ret = nvmf_connect_io_queue(&ctrl->ctrl, i);
- if (ret)
- break;
+ if (ret) {
+ dev_info(ctrl->ctrl.device,
+ "failed to connect i/o queue: %d\n", ret);
+ goto out_free_queues;
+ }
+ set_bit(NVME_RDMA_Q_LIVE, &ctrl->queues[i].flags);
}
+ return 0;
+
+out_free_queues:
+ nvme_rdma_free_io_queues(ctrl);
return ret;
}
@@ -712,6 +721,8 @@ static void nvme_rdma_reconnect_ctrl_work(struct work_struct *work)
if (ret)
goto stop_admin_q;
+ set_bit(NVME_RDMA_Q_LIVE, &ctrl->queues[0].flags);
+
ret = nvme_enable_ctrl(&ctrl->ctrl, ctrl->cap);
if (ret)
goto stop_admin_q;
@@ -761,8 +772,10 @@ static void nvme_rdma_error_recovery_work(struct work_struct *work)
nvme_stop_keep_alive(&ctrl->ctrl);
- for (i = 0; i < ctrl->queue_count; i++)
+ for (i = 0; i < ctrl->queue_count; i++) {
clear_bit(NVME_RDMA_Q_CONNECTED, &ctrl->queues[i].flags);
+ clear_bit(NVME_RDMA_Q_LIVE, &ctrl->queues[i].flags);
+ }
if (ctrl->queue_count > 1)
nvme_stop_queues(&ctrl->ctrl);
@@ -1378,6 +1391,24 @@ nvme_rdma_timeout(struct request *rq, bool reserved)
return BLK_EH_HANDLED;
}
+/*
+ * We cannot accept any other command until the Connect command has completed.
+ */
+static inline bool nvme_rdma_queue_is_ready(struct nvme_rdma_queue *queue,
+ struct request *rq)
+{
+ if (unlikely(!test_bit(NVME_RDMA_Q_LIVE, &queue->flags))) {
+ struct nvme_command *cmd = (struct nvme_command *)rq->cmd;
+
+ if (rq->cmd_type != REQ_TYPE_DRV_PRIV ||
+ cmd->common.opcode != nvme_fabrics_command ||
+ cmd->fabrics.fctype != nvme_fabrics_type_connect)
+ return false;
+ }
+
+ return true;
+}
+
static int nvme_rdma_queue_rq(struct blk_mq_hw_ctx *hctx,
const struct blk_mq_queue_data *bd)
{
@@ -1394,6 +1425,9 @@ static int nvme_rdma_queue_rq(struct blk_mq_hw_ctx *hctx,
WARN_ON_ONCE(rq->tag < 0);
+ if (!nvme_rdma_queue_is_ready(queue, rq))
+ return BLK_MQ_RQ_QUEUE_BUSY;
+
dev = queue->device->dev;
ib_dma_sync_single_for_cpu(dev, sqe->dma,
sizeof(struct nvme_command), DMA_TO_DEVICE);
@@ -1544,6 +1578,8 @@ static int nvme_rdma_configure_admin_queue(struct nvme_rdma_ctrl *ctrl)
if (error)
goto out_cleanup_queue;
+ set_bit(NVME_RDMA_Q_LIVE, &ctrl->queues[0].flags);
+
error = nvmf_reg_read64(&ctrl->ctrl, NVME_REG_CAP, &ctrl->cap);
if (error) {
dev_err(ctrl->ctrl.device,
diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c
index b4cacb6f0258..a21437a33adb 100644
--- a/drivers/nvme/target/core.c
+++ b/drivers/nvme/target/core.c
@@ -838,9 +838,13 @@ static void nvmet_fatal_error_handler(struct work_struct *work)
void nvmet_ctrl_fatal_error(struct nvmet_ctrl *ctrl)
{
- ctrl->csts |= NVME_CSTS_CFS;
- INIT_WORK(&ctrl->fatal_err_work, nvmet_fatal_error_handler);
- schedule_work(&ctrl->fatal_err_work);
+ mutex_lock(&ctrl->lock);
+ if (!(ctrl->csts & NVME_CSTS_CFS)) {
+ ctrl->csts |= NVME_CSTS_CFS;
+ INIT_WORK(&ctrl->fatal_err_work, nvmet_fatal_error_handler);
+ schedule_work(&ctrl->fatal_err_work);
+ }
+ mutex_unlock(&ctrl->lock);
}
EXPORT_SYMBOL_GPL(nvmet_ctrl_fatal_error);
diff --git a/drivers/nvme/target/rdma.c b/drivers/nvme/target/rdma.c
index f8d23999e0f2..005ef5d17a19 100644
--- a/drivers/nvme/target/rdma.c
+++ b/drivers/nvme/target/rdma.c
@@ -951,6 +951,7 @@ err_destroy_cq:
static void nvmet_rdma_destroy_queue_ib(struct nvmet_rdma_queue *queue)
{
+ ib_drain_qp(queue->cm_id->qp);
rdma_destroy_qp(queue->cm_id);
ib_free_cq(queue->cq);
}
@@ -1066,6 +1067,7 @@ nvmet_rdma_alloc_queue(struct nvmet_rdma_device *ndev,
spin_lock_init(&queue->rsp_wr_wait_lock);
INIT_LIST_HEAD(&queue->free_rsps);
spin_lock_init(&queue->rsps_lock);
+ INIT_LIST_HEAD(&queue->queue_list);
queue->idx = ida_simple_get(&nvmet_rdma_queue_ida, 0, 0, GFP_KERNEL);
if (queue->idx < 0) {
@@ -1244,7 +1246,6 @@ static void __nvmet_rdma_queue_disconnect(struct nvmet_rdma_queue *queue)
if (disconnect) {
rdma_disconnect(queue->cm_id);
- ib_drain_qp(queue->cm_id->qp);
schedule_work(&queue->release_work);
}
}
@@ -1269,7 +1270,12 @@ static void nvmet_rdma_queue_connect_fail(struct rdma_cm_id *cm_id,
{
WARN_ON_ONCE(queue->state != NVMET_RDMA_Q_CONNECTING);
- pr_err("failed to connect queue\n");
+ mutex_lock(&nvmet_rdma_queue_mutex);
+ if (!list_empty(&queue->queue_list))
+ list_del_init(&queue->queue_list);
+ mutex_unlock(&nvmet_rdma_queue_mutex);
+
+ pr_err("failed to connect queue %d\n", queue->idx);
schedule_work(&queue->release_work);
}
@@ -1352,7 +1358,13 @@ static int nvmet_rdma_cm_handler(struct rdma_cm_id *cm_id,
case RDMA_CM_EVENT_ADDR_CHANGE:
case RDMA_CM_EVENT_DISCONNECTED:
case RDMA_CM_EVENT_TIMEWAIT_EXIT:
- nvmet_rdma_queue_disconnect(queue);
+ /*
+ * We might end up here when we already freed the qp
+ * which means queue release sequence is in progress,
+ * so don't get in the way...
+ */
+ if (queue)
+ nvmet_rdma_queue_disconnect(queue);
break;
case RDMA_CM_EVENT_DEVICE_REMOVAL:
ret = nvmet_rdma_device_removal(cm_id, queue);
diff --git a/drivers/of/base.c b/drivers/of/base.c
index d687e6de24a0..a0bccb54a9bd 100644
--- a/drivers/of/base.c
+++ b/drivers/of/base.c
@@ -2077,8 +2077,6 @@ void of_alias_scan(void * (*dt_alloc)(u64 size, u64 align))
name = of_get_property(of_aliases, "stdout", NULL);
if (name)
of_stdout = of_find_node_opts_by_path(name, &of_stdout_options);
- if (of_stdout)
- console_set_by_of();
}
if (!of_aliases)
diff --git a/drivers/of/of_mdio.c b/drivers/of/of_mdio.c
index b470f7e3521d..262281bd68fa 100644
--- a/drivers/of/of_mdio.c
+++ b/drivers/of/of_mdio.c
@@ -292,6 +292,7 @@ struct phy_device *of_phy_find_device(struct device_node *phy_np)
mdiodev = to_mdio_device(d);
if (mdiodev->flags & MDIO_DEVICE_FLAG_PHY)
return to_phy_device(d);
+ put_device(d);
}
return NULL;
@@ -456,8 +457,11 @@ int of_phy_register_fixed_link(struct device_node *np)
status.link = 1;
status.duplex = of_property_read_bool(fixed_link_node,
"full-duplex");
- if (of_property_read_u32(fixed_link_node, "speed", &status.speed))
+ if (of_property_read_u32(fixed_link_node, "speed",
+ &status.speed)) {
+ of_node_put(fixed_link_node);
return -EINVAL;
+ }
status.pause = of_property_read_bool(fixed_link_node, "pause");
status.asym_pause = of_property_read_bool(fixed_link_node,
"asym-pause");
@@ -486,3 +490,18 @@ int of_phy_register_fixed_link(struct device_node *np)
return -ENODEV;
}
EXPORT_SYMBOL(of_phy_register_fixed_link);
+
+void of_phy_deregister_fixed_link(struct device_node *np)
+{
+ struct phy_device *phydev;
+
+ phydev = of_phy_find_device(np);
+ if (!phydev)
+ return;
+
+ fixed_phy_unregister(phydev);
+
+ put_device(&phydev->mdio.dev); /* of_phy_find_device() */
+ phy_device_free(phydev); /* fixed_phy_register() */
+}
+EXPORT_SYMBOL(of_phy_deregister_fixed_link);
diff --git a/drivers/pci/host/pcie-designware-plat.c b/drivers/pci/host/pcie-designware-plat.c
index 8df6312ed300..1a02038c4640 100644
--- a/drivers/pci/host/pcie-designware-plat.c
+++ b/drivers/pci/host/pcie-designware-plat.c
@@ -3,7 +3,7 @@
*
* Copyright (C) 2015-2016 Synopsys, Inc. (www.synopsys.com)
*
- * Authors: Joao Pinto <jpmpinto@gmail.com>
+ * Authors: Joao Pinto <Joao.Pinto@synopsys.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
diff --git a/drivers/pci/host/pcie-designware.c b/drivers/pci/host/pcie-designware.c
index 035f50c03281..bed19994c1e9 100644
--- a/drivers/pci/host/pcie-designware.c
+++ b/drivers/pci/host/pcie-designware.c
@@ -637,8 +637,6 @@ int dw_pcie_host_init(struct pcie_port *pp)
}
}
- pp->iatu_unroll_enabled = dw_pcie_iatu_unroll_enabled(pp);
-
if (pp->ops->host_init)
pp->ops->host_init(pp);
@@ -809,6 +807,11 @@ void dw_pcie_setup_rc(struct pcie_port *pp)
{
u32 val;
+ /* get iATU unroll support */
+ pp->iatu_unroll_enabled = dw_pcie_iatu_unroll_enabled(pp);
+ dev_dbg(pp->dev, "iATU unroll: %s\n",
+ pp->iatu_unroll_enabled ? "enabled" : "disabled");
+
/* set the number of lanes */
val = dw_pcie_readl_rc(pp, PCIE_PORT_LINK_CONTROL);
val &= ~PORT_LINK_MODE_MASK;
diff --git a/drivers/pci/host/pcie-qcom.c b/drivers/pci/host/pcie-qcom.c
index ef0a84c7a588..35936409b2d4 100644
--- a/drivers/pci/host/pcie-qcom.c
+++ b/drivers/pci/host/pcie-qcom.c
@@ -533,11 +533,11 @@ static int qcom_pcie_probe(struct platform_device *pdev)
if (IS_ERR(pcie->phy))
return PTR_ERR(pcie->phy);
+ pp->dev = dev;
ret = pcie->ops->get_resources(pcie);
if (ret)
return ret;
- pp->dev = dev;
pp->root_bus_nr = -1;
pp->ops = &qcom_pcie_dw_ops;
diff --git a/drivers/pci/host/pcie-rockchip.c b/drivers/pci/host/pcie-rockchip.c
index e0b22dab9b7a..e04f69beb42d 100644
--- a/drivers/pci/host/pcie-rockchip.c
+++ b/drivers/pci/host/pcie-rockchip.c
@@ -190,6 +190,9 @@ struct rockchip_pcie {
struct reset_control *mgmt_rst;
struct reset_control *mgmt_sticky_rst;
struct reset_control *pipe_rst;
+ struct reset_control *pm_rst;
+ struct reset_control *aclk_rst;
+ struct reset_control *pclk_rst;
struct clk *aclk_pcie;
struct clk *aclk_perf_pcie;
struct clk *hclk_pcie;
@@ -408,6 +411,44 @@ static int rockchip_pcie_init_port(struct rockchip_pcie *rockchip)
gpiod_set_value(rockchip->ep_gpio, 0);
+ err = reset_control_assert(rockchip->aclk_rst);
+ if (err) {
+ dev_err(dev, "assert aclk_rst err %d\n", err);
+ return err;
+ }
+
+ err = reset_control_assert(rockchip->pclk_rst);
+ if (err) {
+ dev_err(dev, "assert pclk_rst err %d\n", err);
+ return err;
+ }
+
+ err = reset_control_assert(rockchip->pm_rst);
+ if (err) {
+ dev_err(dev, "assert pm_rst err %d\n", err);
+ return err;
+ }
+
+ udelay(10);
+
+ err = reset_control_deassert(rockchip->pm_rst);
+ if (err) {
+ dev_err(dev, "deassert pm_rst err %d\n", err);
+ return err;
+ }
+
+ err = reset_control_deassert(rockchip->aclk_rst);
+ if (err) {
+ dev_err(dev, "deassert mgmt_sticky_rst err %d\n", err);
+ return err;
+ }
+
+ err = reset_control_deassert(rockchip->pclk_rst);
+ if (err) {
+ dev_err(dev, "deassert mgmt_sticky_rst err %d\n", err);
+ return err;
+ }
+
err = phy_init(rockchip->phy);
if (err < 0) {
dev_err(dev, "fail to init phy, err %d\n", err);
@@ -781,6 +822,27 @@ static int rockchip_pcie_parse_dt(struct rockchip_pcie *rockchip)
return PTR_ERR(rockchip->pipe_rst);
}
+ rockchip->pm_rst = devm_reset_control_get(dev, "pm");
+ if (IS_ERR(rockchip->pm_rst)) {
+ if (PTR_ERR(rockchip->pm_rst) != -EPROBE_DEFER)
+ dev_err(dev, "missing pm reset property in node\n");
+ return PTR_ERR(rockchip->pm_rst);
+ }
+
+ rockchip->pclk_rst = devm_reset_control_get(dev, "pclk");
+ if (IS_ERR(rockchip->pclk_rst)) {
+ if (PTR_ERR(rockchip->pclk_rst) != -EPROBE_DEFER)
+ dev_err(dev, "missing pclk reset property in node\n");
+ return PTR_ERR(rockchip->pclk_rst);
+ }
+
+ rockchip->aclk_rst = devm_reset_control_get(dev, "aclk");
+ if (IS_ERR(rockchip->aclk_rst)) {
+ if (PTR_ERR(rockchip->aclk_rst) != -EPROBE_DEFER)
+ dev_err(dev, "missing aclk reset property in node\n");
+ return PTR_ERR(rockchip->aclk_rst);
+ }
+
rockchip->ep_gpio = devm_gpiod_get(dev, "ep", GPIOD_OUT_HIGH);
if (IS_ERR(rockchip->ep_gpio)) {
dev_err(dev, "missing ep-gpios property in node\n");
diff --git a/drivers/pci/pci-mid.c b/drivers/pci/pci-mid.c
index 55f453de562e..c7f3408e3148 100644
--- a/drivers/pci/pci-mid.c
+++ b/drivers/pci/pci-mid.c
@@ -29,6 +29,11 @@ static int mid_pci_set_power_state(struct pci_dev *pdev, pci_power_t state)
return intel_mid_pci_set_power_state(pdev, state);
}
+static pci_power_t mid_pci_get_power_state(struct pci_dev *pdev)
+{
+ return intel_mid_pci_get_power_state(pdev);
+}
+
static pci_power_t mid_pci_choose_state(struct pci_dev *pdev)
{
return PCI_D3hot;
@@ -52,6 +57,7 @@ static bool mid_pci_need_resume(struct pci_dev *dev)
static struct pci_platform_pm_ops mid_pci_platform_pm = {
.is_manageable = mid_pci_power_manageable,
.set_state = mid_pci_set_power_state,
+ .get_state = mid_pci_get_power_state,
.choose_state = mid_pci_choose_state,
.sleep_wake = mid_pci_sleep_wake,
.run_wake = mid_pci_run_wake,
diff --git a/drivers/pci/pcie/aer/aer_inject.c b/drivers/pci/pcie/aer/aer_inject.c
index db553dc22c8e..2b6a59266689 100644
--- a/drivers/pci/pcie/aer/aer_inject.c
+++ b/drivers/pci/pcie/aer/aer_inject.c
@@ -307,20 +307,6 @@ out:
return 0;
}
-static struct pci_dev *pcie_find_root_port(struct pci_dev *dev)
-{
- while (1) {
- if (!pci_is_pcie(dev))
- break;
- if (pci_pcie_type(dev) == PCI_EXP_TYPE_ROOT_PORT)
- return dev;
- if (!dev->bus->self)
- break;
- dev = dev->bus->self;
- }
- return NULL;
-}
-
static int find_aer_device_iter(struct device *device, void *data)
{
struct pcie_device **result = data;
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
index ab002671fa60..104c46d53121 100644
--- a/drivers/pci/probe.c
+++ b/drivers/pci/probe.c
@@ -1439,6 +1439,21 @@ static void program_hpp_type1(struct pci_dev *dev, struct hpp_type1 *hpp)
dev_warn(&dev->dev, "PCI-X settings not supported\n");
}
+static bool pcie_root_rcb_set(struct pci_dev *dev)
+{
+ struct pci_dev *rp = pcie_find_root_port(dev);
+ u16 lnkctl;
+
+ if (!rp)
+ return false;
+
+ pcie_capability_read_word(rp, PCI_EXP_LNKCTL, &lnkctl);
+ if (lnkctl & PCI_EXP_LNKCTL_RCB)
+ return true;
+
+ return false;
+}
+
static void program_hpp_type2(struct pci_dev *dev, struct hpp_type2 *hpp)
{
int pos;
@@ -1468,9 +1483,20 @@ static void program_hpp_type2(struct pci_dev *dev, struct hpp_type2 *hpp)
~hpp->pci_exp_devctl_and, hpp->pci_exp_devctl_or);
/* Initialize Link Control Register */
- if (pcie_cap_has_lnkctl(dev))
+ if (pcie_cap_has_lnkctl(dev)) {
+
+ /*
+ * If the Root Port supports Read Completion Boundary of
+ * 128, set RCB to 128. Otherwise, clear it.
+ */
+ hpp->pci_exp_lnkctl_and |= PCI_EXP_LNKCTL_RCB;
+ hpp->pci_exp_lnkctl_or &= ~PCI_EXP_LNKCTL_RCB;
+ if (pcie_root_rcb_set(dev))
+ hpp->pci_exp_lnkctl_or |= PCI_EXP_LNKCTL_RCB;
+
pcie_capability_clear_and_set_word(dev, PCI_EXP_LNKCTL,
~hpp->pci_exp_lnkctl_and, hpp->pci_exp_lnkctl_or);
+ }
/* Find Advanced Error Reporting Enhanced Capability */
pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR);
diff --git a/drivers/pci/setup-res.c b/drivers/pci/setup-res.c
index 66c4d8f42233..9526e341988b 100644
--- a/drivers/pci/setup-res.c
+++ b/drivers/pci/setup-res.c
@@ -121,6 +121,14 @@ int pci_claim_resource(struct pci_dev *dev, int resource)
return -EINVAL;
}
+ /*
+ * If we have a shadow copy in RAM, the PCI device doesn't respond
+ * to the shadow range, so we don't need to claim it, and upstream
+ * bridges don't need to route the range to the device.
+ */
+ if (res->flags & IORESOURCE_ROM_SHADOW)
+ return 0;
+
root = pci_find_parent_resource(dev, res);
if (!root) {
dev_info(&dev->dev, "can't claim BAR %d %pR: no compatible bridge window\n",
diff --git a/drivers/pcmcia/soc_common.c b/drivers/pcmcia/soc_common.c
index 153f3122283d..b6b316de055c 100644
--- a/drivers/pcmcia/soc_common.c
+++ b/drivers/pcmcia/soc_common.c
@@ -107,7 +107,7 @@ int soc_pcmcia_regulator_set(struct soc_pcmcia_socket *skt,
ret = regulator_enable(r->reg);
} else {
- regulator_disable(r->reg);
+ ret = regulator_disable(r->reg);
}
if (ret == 0)
r->on = on;
diff --git a/drivers/phy/phy-da8xx-usb.c b/drivers/phy/phy-da8xx-usb.c
index 32ae78c8ca17..c85fb0b59729 100644
--- a/drivers/phy/phy-da8xx-usb.c
+++ b/drivers/phy/phy-da8xx-usb.c
@@ -198,7 +198,8 @@ static int da8xx_usb_phy_probe(struct platform_device *pdev)
} else {
int ret;
- ret = phy_create_lookup(d_phy->usb11_phy, "usb-phy", "ohci.0");
+ ret = phy_create_lookup(d_phy->usb11_phy, "usb-phy",
+ "ohci-da8xx");
if (ret)
dev_warn(dev, "Failed to create usb11 phy lookup\n");
ret = phy_create_lookup(d_phy->usb20_phy, "usb-phy",
@@ -216,7 +217,7 @@ static int da8xx_usb_phy_remove(struct platform_device *pdev)
if (!pdev->dev.of_node) {
phy_remove_lookup(d_phy->usb20_phy, "usb-phy", "musb-da8xx");
- phy_remove_lookup(d_phy->usb11_phy, "usb-phy", "ohci.0");
+ phy_remove_lookup(d_phy->usb11_phy, "usb-phy", "ohci-da8xx");
}
return 0;
diff --git a/drivers/phy/phy-rockchip-pcie.c b/drivers/phy/phy-rockchip-pcie.c
index a2b4c6b58aea..6904633cad68 100644
--- a/drivers/phy/phy-rockchip-pcie.c
+++ b/drivers/phy/phy-rockchip-pcie.c
@@ -249,21 +249,10 @@ err_refclk:
static int rockchip_pcie_phy_exit(struct phy *phy)
{
struct rockchip_pcie_phy *rk_phy = phy_get_drvdata(phy);
- int err = 0;
clk_disable_unprepare(rk_phy->clk_pciephy_ref);
- err = reset_control_deassert(rk_phy->phy_rst);
- if (err) {
- dev_err(&phy->dev, "deassert phy_rst err %d\n", err);
- goto err_reset;
- }
-
- return err;
-
-err_reset:
- clk_prepare_enable(rk_phy->clk_pciephy_ref);
- return err;
+ return 0;
}
static const struct phy_ops ops = {
diff --git a/drivers/phy/phy-sun4i-usb.c b/drivers/phy/phy-sun4i-usb.c
index b9342a2af7b3..fec34f5213c4 100644
--- a/drivers/phy/phy-sun4i-usb.c
+++ b/drivers/phy/phy-sun4i-usb.c
@@ -264,7 +264,7 @@ static int sun4i_usb_phy_init(struct phy *_phy)
return ret;
}
- if (data->cfg->enable_pmu_unk1) {
+ if (phy->pmu && data->cfg->enable_pmu_unk1) {
val = readl(phy->pmu + REG_PMU_UNK1);
writel(val & ~2, phy->pmu + REG_PMU_UNK1);
}
diff --git a/drivers/phy/phy-twl4030-usb.c b/drivers/phy/phy-twl4030-usb.c
index 87e6334eab93..547ca7b3f098 100644
--- a/drivers/phy/phy-twl4030-usb.c
+++ b/drivers/phy/phy-twl4030-usb.c
@@ -459,8 +459,6 @@ static int twl4030_phy_power_off(struct phy *phy)
struct twl4030_usb *twl = phy_get_drvdata(phy);
dev_dbg(twl->dev, "%s\n", __func__);
- pm_runtime_mark_last_busy(twl->dev);
- pm_runtime_put_autosuspend(twl->dev);
return 0;
}
@@ -472,6 +470,8 @@ static int twl4030_phy_power_on(struct phy *phy)
dev_dbg(twl->dev, "%s\n", __func__);
pm_runtime_get_sync(twl->dev);
schedule_delayed_work(&twl->id_workaround_work, HZ);
+ pm_runtime_mark_last_busy(twl->dev);
+ pm_runtime_put_autosuspend(twl->dev);
return 0;
}
diff --git a/drivers/pinctrl/aspeed/pinctrl-aspeed-g5.c b/drivers/pinctrl/aspeed/pinctrl-aspeed-g5.c
index c8c72e8259d3..87b46390b695 100644
--- a/drivers/pinctrl/aspeed/pinctrl-aspeed-g5.c
+++ b/drivers/pinctrl/aspeed/pinctrl-aspeed-g5.c
@@ -26,7 +26,7 @@
#define ASPEED_G5_NR_PINS 228
-#define COND1 SIG_DESC_BIT(SCU90, 6, 0)
+#define COND1 { SCU90, BIT(6), 0, 0 }
#define COND2 { SCU94, GENMASK(1, 0), 0, 0 }
#define B14 0
diff --git a/drivers/pinctrl/bcm/pinctrl-iproc-gpio.c b/drivers/pinctrl/bcm/pinctrl-iproc-gpio.c
index 7f7700716398..5d1e505c3c63 100644
--- a/drivers/pinctrl/bcm/pinctrl-iproc-gpio.c
+++ b/drivers/pinctrl/bcm/pinctrl-iproc-gpio.c
@@ -844,6 +844,6 @@ static struct platform_driver iproc_gpio_driver = {
static int __init iproc_gpio_init(void)
{
- return platform_driver_probe(&iproc_gpio_driver, iproc_gpio_probe);
+ return platform_driver_register(&iproc_gpio_driver);
}
arch_initcall_sync(iproc_gpio_init);
diff --git a/drivers/pinctrl/bcm/pinctrl-nsp-gpio.c b/drivers/pinctrl/bcm/pinctrl-nsp-gpio.c
index 35783db1c10b..c8deb8be1da7 100644
--- a/drivers/pinctrl/bcm/pinctrl-nsp-gpio.c
+++ b/drivers/pinctrl/bcm/pinctrl-nsp-gpio.c
@@ -741,6 +741,6 @@ static struct platform_driver nsp_gpio_driver = {
static int __init nsp_gpio_init(void)
{
- return platform_driver_probe(&nsp_gpio_driver, nsp_gpio_probe);
+ return platform_driver_register(&nsp_gpio_driver);
}
arch_initcall_sync(nsp_gpio_init);
diff --git a/drivers/pinctrl/freescale/pinctrl-imx.c b/drivers/pinctrl/freescale/pinctrl-imx.c
index 47613201269a..79c4e14a5a75 100644
--- a/drivers/pinctrl/freescale/pinctrl-imx.c
+++ b/drivers/pinctrl/freescale/pinctrl-imx.c
@@ -687,6 +687,7 @@ static int imx_pinctrl_probe_dt(struct platform_device *pdev,
if (!info->functions)
return -ENOMEM;
+ info->group_index = 0;
if (flat_funcs) {
info->ngroups = of_get_child_count(np);
} else {
diff --git a/drivers/pinctrl/intel/pinctrl-cherryview.c b/drivers/pinctrl/intel/pinctrl-cherryview.c
index 30389f4ccab4..c43b1e9a06af 100644
--- a/drivers/pinctrl/intel/pinctrl-cherryview.c
+++ b/drivers/pinctrl/intel/pinctrl-cherryview.c
@@ -1652,12 +1652,15 @@ static int chv_pinctrl_probe(struct platform_device *pdev)
}
#ifdef CONFIG_PM_SLEEP
-static int chv_pinctrl_suspend(struct device *dev)
+static int chv_pinctrl_suspend_noirq(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
struct chv_pinctrl *pctrl = platform_get_drvdata(pdev);
+ unsigned long flags;
int i;
+ raw_spin_lock_irqsave(&chv_lock, flags);
+
pctrl->saved_intmask = readl(pctrl->regs + CHV_INTMASK);
for (i = 0; i < pctrl->community->npins; i++) {
@@ -1678,15 +1681,20 @@ static int chv_pinctrl_suspend(struct device *dev)
ctx->padctrl1 = readl(reg);
}
+ raw_spin_unlock_irqrestore(&chv_lock, flags);
+
return 0;
}
-static int chv_pinctrl_resume(struct device *dev)
+static int chv_pinctrl_resume_noirq(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
struct chv_pinctrl *pctrl = platform_get_drvdata(pdev);
+ unsigned long flags;
int i;
+ raw_spin_lock_irqsave(&chv_lock, flags);
+
/*
* Mask all interrupts before restoring per-pin configuration
* registers because we don't know in which state BIOS left them
@@ -1731,12 +1739,15 @@ static int chv_pinctrl_resume(struct device *dev)
chv_writel(0xffff, pctrl->regs + CHV_INTSTAT);
chv_writel(pctrl->saved_intmask, pctrl->regs + CHV_INTMASK);
+ raw_spin_unlock_irqrestore(&chv_lock, flags);
+
return 0;
}
#endif
static const struct dev_pm_ops chv_pinctrl_pm_ops = {
- SET_LATE_SYSTEM_SLEEP_PM_OPS(chv_pinctrl_suspend, chv_pinctrl_resume)
+ SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(chv_pinctrl_suspend_noirq,
+ chv_pinctrl_resume_noirq)
};
static const struct acpi_device_id chv_pinctrl_acpi_match[] = {
diff --git a/drivers/pinctrl/pinctrl-st.c b/drivers/pinctrl/pinctrl-st.c
index 99da4cf91031..b7bb37167969 100644
--- a/drivers/pinctrl/pinctrl-st.c
+++ b/drivers/pinctrl/pinctrl-st.c
@@ -1512,7 +1512,7 @@ static int st_gpiolib_register_bank(struct st_pinctrl *info,
if (info->irqmux_base || gpio_irq > 0) {
err = gpiochip_irqchip_add(&bank->gpio_chip, &st_gpio_irqchip,
0, handle_simple_irq,
- IRQ_TYPE_LEVEL_LOW);
+ IRQ_TYPE_NONE);
if (err) {
gpiochip_remove(&bank->gpio_chip);
dev_info(dev, "could not add irqchip\n");
diff --git a/drivers/pinctrl/stm32/pinctrl-stm32.c b/drivers/pinctrl/stm32/pinctrl-stm32.c
index 200667f08c37..efc43711ff5c 100644
--- a/drivers/pinctrl/stm32/pinctrl-stm32.c
+++ b/drivers/pinctrl/stm32/pinctrl-stm32.c
@@ -1092,9 +1092,11 @@ int stm32_pctl_probe(struct platform_device *pdev)
return -EINVAL;
}
- ret = stm32_pctrl_dt_setup_irq(pdev, pctl);
- if (ret)
- return ret;
+ if (of_find_property(np, "interrupt-parent", NULL)) {
+ ret = stm32_pctrl_dt_setup_irq(pdev, pctl);
+ if (ret)
+ return ret;
+ }
for_each_child_of_node(np, child)
if (of_property_read_bool(child, "gpio-controller"))
diff --git a/drivers/platform/x86/ideapad-laptop.c b/drivers/platform/x86/ideapad-laptop.c
index a2323941e677..a7614fc542b5 100644
--- a/drivers/platform/x86/ideapad-laptop.c
+++ b/drivers/platform/x86/ideapad-laptop.c
@@ -934,6 +934,13 @@ static const struct dmi_system_id no_hw_rfkill_list[] = {
},
},
{
+ .ident = "Lenovo Yoga 900",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_BOARD_NAME, "VIUU4"),
+ },
+ },
+ {
.ident = "Lenovo YOGA 910-13IKB",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
diff --git a/drivers/platform/x86/intel-hid.c b/drivers/platform/x86/intel-hid.c
index ed5874217ee7..12dbb5063376 100644
--- a/drivers/platform/x86/intel-hid.c
+++ b/drivers/platform/x86/intel-hid.c
@@ -264,7 +264,7 @@ check_acpi_dev(acpi_handle handle, u32 lvl, void *context, void **rv)
return AE_OK;
if (acpi_match_device_ids(dev, ids) == 0)
- if (acpi_create_platform_device(dev))
+ if (acpi_create_platform_device(dev, NULL))
dev_info(&dev->dev,
"intel-hid: created platform device\n");
diff --git a/drivers/platform/x86/intel-vbtn.c b/drivers/platform/x86/intel-vbtn.c
index 146d02f8c9bc..78080763df51 100644
--- a/drivers/platform/x86/intel-vbtn.c
+++ b/drivers/platform/x86/intel-vbtn.c
@@ -164,7 +164,7 @@ check_acpi_dev(acpi_handle handle, u32 lvl, void *context, void **rv)
return AE_OK;
if (acpi_match_device_ids(dev, ids) == 0)
- if (acpi_create_platform_device(dev))
+ if (acpi_create_platform_device(dev, NULL))
dev_info(&dev->dev,
"intel-vbtn: created platform device\n");
diff --git a/drivers/platform/x86/toshiba-wmi.c b/drivers/platform/x86/toshiba-wmi.c
index feac4576b837..2df07ee8f3c3 100644
--- a/drivers/platform/x86/toshiba-wmi.c
+++ b/drivers/platform/x86/toshiba-wmi.c
@@ -24,14 +24,15 @@
#include <linux/acpi.h>
#include <linux/input.h>
#include <linux/input/sparse-keymap.h>
+#include <linux/dmi.h>
MODULE_AUTHOR("Azael Avalos");
MODULE_DESCRIPTION("Toshiba WMI Hotkey Driver");
MODULE_LICENSE("GPL");
-#define TOSHIBA_WMI_EVENT_GUID "59142400-C6A3-40FA-BADB-8A2652834100"
+#define WMI_EVENT_GUID "59142400-C6A3-40FA-BADB-8A2652834100"
-MODULE_ALIAS("wmi:"TOSHIBA_WMI_EVENT_GUID);
+MODULE_ALIAS("wmi:"WMI_EVENT_GUID);
static struct input_dev *toshiba_wmi_input_dev;
@@ -63,6 +64,16 @@ static void toshiba_wmi_notify(u32 value, void *context)
kfree(response.pointer);
}
+static struct dmi_system_id toshiba_wmi_dmi_table[] __initdata = {
+ {
+ .ident = "Toshiba laptop",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
+ },
+ },
+ {}
+};
+
static int __init toshiba_wmi_input_setup(void)
{
acpi_status status;
@@ -81,7 +92,7 @@ static int __init toshiba_wmi_input_setup(void)
if (err)
goto err_free_dev;
- status = wmi_install_notify_handler(TOSHIBA_WMI_EVENT_GUID,
+ status = wmi_install_notify_handler(WMI_EVENT_GUID,
toshiba_wmi_notify, NULL);
if (ACPI_FAILURE(status)) {
err = -EIO;
@@ -95,7 +106,7 @@ static int __init toshiba_wmi_input_setup(void)
return 0;
err_remove_notifier:
- wmi_remove_notify_handler(TOSHIBA_WMI_EVENT_GUID);
+ wmi_remove_notify_handler(WMI_EVENT_GUID);
err_free_keymap:
sparse_keymap_free(toshiba_wmi_input_dev);
err_free_dev:
@@ -105,7 +116,7 @@ static int __init toshiba_wmi_input_setup(void)
static void toshiba_wmi_input_destroy(void)
{
- wmi_remove_notify_handler(TOSHIBA_WMI_EVENT_GUID);
+ wmi_remove_notify_handler(WMI_EVENT_GUID);
sparse_keymap_free(toshiba_wmi_input_dev);
input_unregister_device(toshiba_wmi_input_dev);
}
@@ -114,7 +125,8 @@ static int __init toshiba_wmi_init(void)
{
int ret;
- if (!wmi_has_guid(TOSHIBA_WMI_EVENT_GUID))
+ if (!wmi_has_guid(WMI_EVENT_GUID) ||
+ !dmi_check_system(toshiba_wmi_dmi_table))
return -ENODEV;
ret = toshiba_wmi_input_setup();
@@ -130,7 +142,7 @@ static int __init toshiba_wmi_init(void)
static void __exit toshiba_wmi_exit(void)
{
- if (wmi_has_guid(TOSHIBA_WMI_EVENT_GUID))
+ if (wmi_has_guid(WMI_EVENT_GUID))
toshiba_wmi_input_destroy();
}
diff --git a/drivers/pwm/pwm-meson.c b/drivers/pwm/pwm-meson.c
index 381871b2bb46..9d5bd7d5c610 100644
--- a/drivers/pwm/pwm-meson.c
+++ b/drivers/pwm/pwm-meson.c
@@ -474,6 +474,7 @@ static int meson_pwm_probe(struct platform_device *pdev)
if (IS_ERR(meson->base))
return PTR_ERR(meson->base);
+ spin_lock_init(&meson->lock);
meson->chip.dev = &pdev->dev;
meson->chip.ops = &meson_pwm_ops;
meson->chip.base = -1;
diff --git a/drivers/pwm/sysfs.c b/drivers/pwm/sysfs.c
index 0296d8178ae2..a813239300c3 100644
--- a/drivers/pwm/sysfs.c
+++ b/drivers/pwm/sysfs.c
@@ -425,6 +425,8 @@ void pwmchip_sysfs_unexport_children(struct pwm_chip *chip)
if (test_bit(PWMF_EXPORTED, &pwm->flags))
pwm_unexport_child(parent, pwm);
}
+
+ put_device(parent);
}
static int __init pwm_sysfs_init(void)
diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
index 67426c0477d3..5c1519b229e0 100644
--- a/drivers/regulator/core.c
+++ b/drivers/regulator/core.c
@@ -2754,7 +2754,7 @@ static int _regulator_set_voltage_time(struct regulator_dev *rdev,
ramp_delay = rdev->desc->ramp_delay;
if (ramp_delay == 0) {
- rdev_warn(rdev, "ramp_delay not set\n");
+ rdev_dbg(rdev, "ramp_delay not set\n");
return 0;
}
diff --git a/drivers/rtc/rtc-asm9260.c b/drivers/rtc/rtc-asm9260.c
index 18a93d3e3f93..d36534965635 100644
--- a/drivers/rtc/rtc-asm9260.c
+++ b/drivers/rtc/rtc-asm9260.c
@@ -327,6 +327,7 @@ static const struct of_device_id asm9260_dt_ids[] = {
{ .compatible = "alphascale,asm9260-rtc", },
{}
};
+MODULE_DEVICE_TABLE(of, asm9260_dt_ids);
static struct platform_driver asm9260_rtc_driver = {
.probe = asm9260_rtc_probe,
diff --git a/drivers/rtc/rtc-cmos.c b/drivers/rtc/rtc-cmos.c
index dd3d59806ffa..7030d7cd3861 100644
--- a/drivers/rtc/rtc-cmos.c
+++ b/drivers/rtc/rtc-cmos.c
@@ -776,7 +776,7 @@ static void cmos_do_shutdown(int rtc_irq)
spin_unlock_irq(&rtc_lock);
}
-static void __exit cmos_do_remove(struct device *dev)
+static void cmos_do_remove(struct device *dev)
{
struct cmos_rtc *cmos = dev_get_drvdata(dev);
struct resource *ports;
@@ -996,8 +996,9 @@ static u32 rtc_handler(void *context)
struct cmos_rtc *cmos = dev_get_drvdata(dev);
unsigned char rtc_control = 0;
unsigned char rtc_intr;
+ unsigned long flags;
- spin_lock_irq(&rtc_lock);
+ spin_lock_irqsave(&rtc_lock, flags);
if (cmos_rtc.suspend_ctrl)
rtc_control = CMOS_READ(RTC_CONTROL);
if (rtc_control & RTC_AIE) {
@@ -1006,7 +1007,7 @@ static u32 rtc_handler(void *context)
rtc_intr = CMOS_READ(RTC_INTR_FLAGS);
rtc_update_irq(cmos->rtc, 1, rtc_intr);
}
- spin_unlock_irq(&rtc_lock);
+ spin_unlock_irqrestore(&rtc_lock, flags);
pm_wakeup_event(dev, 0);
acpi_clear_event(ACPI_EVENT_RTC);
@@ -1129,7 +1130,7 @@ static int cmos_pnp_probe(struct pnp_dev *pnp, const struct pnp_device_id *id)
pnp_irq(pnp, 0));
}
-static void __exit cmos_pnp_remove(struct pnp_dev *pnp)
+static void cmos_pnp_remove(struct pnp_dev *pnp)
{
cmos_do_remove(&pnp->dev);
}
@@ -1161,7 +1162,7 @@ static struct pnp_driver cmos_pnp_driver = {
.name = (char *) driver_name,
.id_table = rtc_ids,
.probe = cmos_pnp_probe,
- .remove = __exit_p(cmos_pnp_remove),
+ .remove = cmos_pnp_remove,
.shutdown = cmos_pnp_shutdown,
/* flag ensures resume() gets called, and stops syslog spam */
@@ -1238,7 +1239,7 @@ static int __init cmos_platform_probe(struct platform_device *pdev)
return cmos_do_probe(&pdev->dev, resource, irq);
}
-static int __exit cmos_platform_remove(struct platform_device *pdev)
+static int cmos_platform_remove(struct platform_device *pdev)
{
cmos_do_remove(&pdev->dev);
return 0;
@@ -1263,7 +1264,7 @@ static void cmos_platform_shutdown(struct platform_device *pdev)
MODULE_ALIAS("platform:rtc_cmos");
static struct platform_driver cmos_platform_driver = {
- .remove = __exit_p(cmos_platform_remove),
+ .remove = cmos_platform_remove,
.shutdown = cmos_platform_shutdown,
.driver = {
.name = driver_name,
diff --git a/drivers/rtc/rtc-omap.c b/drivers/rtc/rtc-omap.c
index b04ea9b5ae67..51e52446eacb 100644
--- a/drivers/rtc/rtc-omap.c
+++ b/drivers/rtc/rtc-omap.c
@@ -113,6 +113,7 @@
/* OMAP_RTC_OSC_REG bit fields: */
#define OMAP_RTC_OSC_32KCLK_EN BIT(6)
#define OMAP_RTC_OSC_SEL_32KCLK_SRC BIT(3)
+#define OMAP_RTC_OSC_OSC32K_GZ_DISABLE BIT(4)
/* OMAP_RTC_IRQWAKEEN bit fields: */
#define OMAP_RTC_IRQWAKEEN_ALARM_WAKEEN BIT(1)
@@ -146,6 +147,7 @@ struct omap_rtc {
u8 interrupts_reg;
bool is_pmic_controller;
bool has_ext_clk;
+ bool is_suspending;
const struct omap_rtc_device_type *type;
struct pinctrl_dev *pctldev;
};
@@ -786,8 +788,9 @@ static int omap_rtc_probe(struct platform_device *pdev)
*/
if (rtc->has_ext_clk) {
reg = rtc_read(rtc, OMAP_RTC_OSC_REG);
- rtc_write(rtc, OMAP_RTC_OSC_REG,
- reg | OMAP_RTC_OSC_SEL_32KCLK_SRC);
+ reg &= ~OMAP_RTC_OSC_OSC32K_GZ_DISABLE;
+ reg |= OMAP_RTC_OSC_32KCLK_EN | OMAP_RTC_OSC_SEL_32KCLK_SRC;
+ rtc_writel(rtc, OMAP_RTC_OSC_REG, reg);
}
rtc->type->lock(rtc);
@@ -898,8 +901,7 @@ static int omap_rtc_suspend(struct device *dev)
rtc_write(rtc, OMAP_RTC_INTERRUPTS_REG, 0);
rtc->type->lock(rtc);
- /* Disable the clock/module */
- pm_runtime_put_sync(dev);
+ rtc->is_suspending = true;
return 0;
}
@@ -908,9 +910,6 @@ static int omap_rtc_resume(struct device *dev)
{
struct omap_rtc *rtc = dev_get_drvdata(dev);
- /* Enable the clock/module so that we can access the registers */
- pm_runtime_get_sync(dev);
-
rtc->type->unlock(rtc);
if (device_may_wakeup(dev))
disable_irq_wake(rtc->irq_alarm);
@@ -918,11 +917,34 @@ static int omap_rtc_resume(struct device *dev)
rtc_write(rtc, OMAP_RTC_INTERRUPTS_REG, rtc->interrupts_reg);
rtc->type->lock(rtc);
+ rtc->is_suspending = false;
+
return 0;
}
#endif
-static SIMPLE_DEV_PM_OPS(omap_rtc_pm_ops, omap_rtc_suspend, omap_rtc_resume);
+#ifdef CONFIG_PM
+static int omap_rtc_runtime_suspend(struct device *dev)
+{
+ struct omap_rtc *rtc = dev_get_drvdata(dev);
+
+ if (rtc->is_suspending && !rtc->has_ext_clk)
+ return -EBUSY;
+
+ return 0;
+}
+
+static int omap_rtc_runtime_resume(struct device *dev)
+{
+ return 0;
+}
+#endif
+
+static const struct dev_pm_ops omap_rtc_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(omap_rtc_suspend, omap_rtc_resume)
+ SET_RUNTIME_PM_OPS(omap_rtc_runtime_suspend,
+ omap_rtc_runtime_resume, NULL)
+};
static void omap_rtc_shutdown(struct platform_device *pdev)
{
diff --git a/drivers/scsi/arcmsr/arcmsr_hba.c b/drivers/scsi/arcmsr/arcmsr_hba.c
index 3d53d636b17b..f0cfb0451757 100644
--- a/drivers/scsi/arcmsr/arcmsr_hba.c
+++ b/drivers/scsi/arcmsr/arcmsr_hba.c
@@ -2636,18 +2636,9 @@ static int arcmsr_queue_command_lck(struct scsi_cmnd *cmd,
struct AdapterControlBlock *acb = (struct AdapterControlBlock *) host->hostdata;
struct CommandControlBlock *ccb;
int target = cmd->device->id;
- int lun = cmd->device->lun;
- uint8_t scsicmd = cmd->cmnd[0];
cmd->scsi_done = done;
cmd->host_scribble = NULL;
cmd->result = 0;
- if ((scsicmd == SYNCHRONIZE_CACHE) ||(scsicmd == SEND_DIAGNOSTIC)){
- if(acb->devstate[target][lun] == ARECA_RAID_GONE) {
- cmd->result = (DID_NO_CONNECT << 16);
- }
- cmd->scsi_done(cmd);
- return 0;
- }
if (target == 16) {
/* virtual device for iop message transfer */
arcmsr_handle_virtual_command(acb, cmd);
diff --git a/drivers/scsi/be2iscsi/be_mgmt.c b/drivers/scsi/be2iscsi/be_mgmt.c
index aebc4ddb3060..ac05317bba7f 100644
--- a/drivers/scsi/be2iscsi/be_mgmt.c
+++ b/drivers/scsi/be2iscsi/be_mgmt.c
@@ -1083,7 +1083,7 @@ unsigned int beiscsi_boot_get_sinfo(struct beiscsi_hba *phba)
nonemb_cmd = &phba->boot_struct.nonemb_cmd;
nonemb_cmd->size = sizeof(*resp);
nonemb_cmd->va = pci_alloc_consistent(phba->ctrl.pdev,
- sizeof(nonemb_cmd->size),
+ nonemb_cmd->size,
&nonemb_cmd->dma);
if (!nonemb_cmd->va) {
mutex_unlock(&ctrl->mbox_lock);
diff --git a/drivers/scsi/cxgbi/libcxgbi.c b/drivers/scsi/cxgbi/libcxgbi.c
index d1421139e6ea..2ffe029ff2b6 100644
--- a/drivers/scsi/cxgbi/libcxgbi.c
+++ b/drivers/scsi/cxgbi/libcxgbi.c
@@ -2081,9 +2081,10 @@ void cxgbi_cleanup_task(struct iscsi_task *task)
/* never reached the xmit task callout */
if (tdata->skb)
__kfree_skb(tdata->skb);
- memset(tdata, 0, sizeof(*tdata));
task_release_itt(task, task->hdr_itt);
+ memset(tdata, 0, sizeof(*tdata));
+
iscsi_tcp_cleanup_task(task);
}
EXPORT_SYMBOL_GPL(cxgbi_cleanup_task);
diff --git a/drivers/scsi/device_handler/scsi_dh_alua.c b/drivers/scsi/device_handler/scsi_dh_alua.c
index 241829e59668..7bb20684e9fa 100644
--- a/drivers/scsi/device_handler/scsi_dh_alua.c
+++ b/drivers/scsi/device_handler/scsi_dh_alua.c
@@ -793,6 +793,7 @@ static void alua_rtpg_work(struct work_struct *work)
WARN_ON(pg->flags & ALUA_PG_RUN_RTPG);
WARN_ON(pg->flags & ALUA_PG_RUN_STPG);
spin_unlock_irqrestore(&pg->lock, flags);
+ kref_put(&pg->kref, release_port_group);
return;
}
if (pg->flags & ALUA_SYNC_STPG)
@@ -890,6 +891,7 @@ static void alua_rtpg_queue(struct alua_port_group *pg,
/* Do not queue if the worker is already running */
if (!(pg->flags & ALUA_PG_RUNNING)) {
kref_get(&pg->kref);
+ sdev = NULL;
start_queue = 1;
}
}
@@ -901,7 +903,8 @@ static void alua_rtpg_queue(struct alua_port_group *pg,
if (start_queue &&
!queue_delayed_work(alua_wq, &pg->rtpg_work,
msecs_to_jiffies(ALUA_RTPG_DELAY_MSECS))) {
- scsi_device_put(sdev);
+ if (sdev)
+ scsi_device_put(sdev);
kref_put(&pg->kref, release_port_group);
}
}
diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
index d007ec18179a..a1d6ab76a514 100644
--- a/drivers/scsi/hpsa.c
+++ b/drivers/scsi/hpsa.c
@@ -2009,7 +2009,7 @@ static struct hpsa_scsi_dev_t *lookup_hpsa_scsi_dev(struct ctlr_info *h,
static int hpsa_slave_alloc(struct scsi_device *sdev)
{
- struct hpsa_scsi_dev_t *sd;
+ struct hpsa_scsi_dev_t *sd = NULL;
unsigned long flags;
struct ctlr_info *h;
@@ -2026,7 +2026,8 @@ static int hpsa_slave_alloc(struct scsi_device *sdev)
sd->target = sdev_id(sdev);
sd->lun = sdev->lun;
}
- } else
+ }
+ if (!sd)
sd = lookup_hpsa_scsi_dev(h, sdev_channel(sdev),
sdev_id(sdev), sdev->lun);
@@ -3840,6 +3841,7 @@ static int hpsa_update_device_info(struct ctlr_info *h,
sizeof(this_device->vendor));
memcpy(this_device->model, &inq_buff[16],
sizeof(this_device->model));
+ this_device->rev = inq_buff[2];
memset(this_device->device_id, 0,
sizeof(this_device->device_id));
if (hpsa_get_device_id(h, scsi3addr, this_device->device_id, 8,
@@ -3929,10 +3931,14 @@ static void figure_bus_target_lun(struct ctlr_info *h,
if (!is_logical_dev_addr_mode(lunaddrbytes)) {
/* physical device, target and lun filled in later */
- if (is_hba_lunid(lunaddrbytes))
+ if (is_hba_lunid(lunaddrbytes)) {
+ int bus = HPSA_HBA_BUS;
+
+ if (!device->rev)
+ bus = HPSA_LEGACY_HBA_BUS;
hpsa_set_bus_target_lun(device,
- HPSA_HBA_BUS, 0, lunid & 0x3fff);
- else
+ bus, 0, lunid & 0x3fff);
+ } else
/* defer target, lun assignment for physical devices */
hpsa_set_bus_target_lun(device,
HPSA_PHYSICAL_DEVICE_BUS, -1, -1);
diff --git a/drivers/scsi/hpsa.h b/drivers/scsi/hpsa.h
index 82cdfad874f3..9ea162de80dc 100644
--- a/drivers/scsi/hpsa.h
+++ b/drivers/scsi/hpsa.h
@@ -69,6 +69,7 @@ struct hpsa_scsi_dev_t {
u64 sas_address;
unsigned char vendor[8]; /* bytes 8-15 of inquiry data */
unsigned char model[16]; /* bytes 16-31 of inquiry data */
+ unsigned char rev; /* byte 2 of inquiry data */
unsigned char raid_level; /* from inquiry page 0xC1 */
unsigned char volume_offline; /* discovered via TUR or VPD */
u16 queue_depth; /* max queue_depth for this device */
@@ -402,6 +403,7 @@ struct offline_device_entry {
#define HPSA_RAID_VOLUME_BUS 1
#define HPSA_EXTERNAL_RAID_VOLUME_BUS 2
#define HPSA_HBA_BUS 0
+#define HPSA_LEGACY_HBA_BUS 3
/*
Send the command to the hardware
diff --git a/drivers/scsi/libfc/fc_lport.c b/drivers/scsi/libfc/fc_lport.c
index 04ce7cfb6d1b..50c71678a156 100644
--- a/drivers/scsi/libfc/fc_lport.c
+++ b/drivers/scsi/libfc/fc_lport.c
@@ -308,7 +308,7 @@ struct fc_host_statistics *fc_get_host_stats(struct Scsi_Host *shost)
fc_stats = &lport->host_stats;
memset(fc_stats, 0, sizeof(struct fc_host_statistics));
- fc_stats->seconds_since_last_reset = (lport->boot_time - jiffies) / HZ;
+ fc_stats->seconds_since_last_reset = (jiffies - lport->boot_time) / HZ;
for_each_possible_cpu(cpu) {
struct fc_stats *stats;
diff --git a/drivers/scsi/megaraid/megaraid_sas.h b/drivers/scsi/megaraid/megaraid_sas.h
index ca86c885dfaa..3aaea713bf37 100644
--- a/drivers/scsi/megaraid/megaraid_sas.h
+++ b/drivers/scsi/megaraid/megaraid_sas.h
@@ -2233,7 +2233,7 @@ struct megasas_instance_template {
};
#define MEGASAS_IS_LOGICAL(scp) \
- (scp->device->channel < MEGASAS_MAX_PD_CHANNELS) ? 0 : 1
+ ((scp->device->channel < MEGASAS_MAX_PD_CHANNELS) ? 0 : 1)
#define MEGASAS_DEV_INDEX(scp) \
(((scp->device->channel % 2) * MEGASAS_MAX_DEV_PER_CHANNEL) + \
diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c
index 9ff57dee72d7..d8b1fbd4c8aa 100644
--- a/drivers/scsi/megaraid/megaraid_sas_base.c
+++ b/drivers/scsi/megaraid/megaraid_sas_base.c
@@ -1700,16 +1700,13 @@ megasas_queue_command(struct Scsi_Host *shost, struct scsi_cmnd *scmd)
goto out_done;
}
- switch (scmd->cmnd[0]) {
- case SYNCHRONIZE_CACHE:
- /*
- * FW takes care of flush cache on its own
- * No need to send it down
- */
+ /*
+ * FW takes care of flush cache on its own for Virtual Disk.
+ * No need to send it down for VD. For JBOD send SYNCHRONIZE_CACHE to FW.
+ */
+ if ((scmd->cmnd[0] == SYNCHRONIZE_CACHE) && MEGASAS_IS_LOGICAL(scmd)) {
scmd->result = DID_OK << 16;
goto out_done;
- default:
- break;
}
return instance->instancet->build_and_issue_cmd(instance, scmd);
diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
index 209a969a979d..1c4744e78173 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
@@ -1273,9 +1273,9 @@ scsih_target_alloc(struct scsi_target *starget)
sas_target_priv_data->handle = raid_device->handle;
sas_target_priv_data->sas_address = raid_device->wwid;
sas_target_priv_data->flags |= MPT_TARGET_FLAGS_VOLUME;
- sas_target_priv_data->raid_device = raid_device;
if (ioc->is_warpdrive)
- raid_device->starget = starget;
+ sas_target_priv_data->raid_device = raid_device;
+ raid_device->starget = starget;
}
spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
return 0;
@@ -3885,6 +3885,11 @@ _scsih_temp_threshold_events(struct MPT3SAS_ADAPTER *ioc,
}
}
+static inline bool ata_12_16_cmd(struct scsi_cmnd *scmd)
+{
+ return (scmd->cmnd[0] == ATA_12 || scmd->cmnd[0] == ATA_16);
+}
+
/**
* _scsih_flush_running_cmds - completing outstanding commands.
* @ioc: per adapter object
@@ -3906,6 +3911,9 @@ _scsih_flush_running_cmds(struct MPT3SAS_ADAPTER *ioc)
if (!scmd)
continue;
count++;
+ if (ata_12_16_cmd(scmd))
+ scsi_internal_device_unblock(scmd->device,
+ SDEV_RUNNING);
mpt3sas_base_free_smid(ioc, smid);
scsi_dma_unmap(scmd);
if (ioc->pci_error_recovery)
@@ -4010,8 +4018,6 @@ _scsih_eedp_error_handling(struct scsi_cmnd *scmd, u16 ioc_status)
SAM_STAT_CHECK_CONDITION;
}
-
-
/**
* scsih_qcmd - main scsi request entry point
* @scmd: pointer to scsi command object
@@ -4038,6 +4044,13 @@ scsih_qcmd(struct Scsi_Host *shost, struct scsi_cmnd *scmd)
if (ioc->logging_level & MPT_DEBUG_SCSI)
scsi_print_command(scmd);
+ /*
+ * Lock the device for any subsequent command until command is
+ * done.
+ */
+ if (ata_12_16_cmd(scmd))
+ scsi_internal_device_block(scmd->device);
+
sas_device_priv_data = scmd->device->hostdata;
if (!sas_device_priv_data || !sas_device_priv_data->sas_target) {
scmd->result = DID_NO_CONNECT << 16;
@@ -4613,6 +4626,9 @@ _scsih_io_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply)
if (scmd == NULL)
return 1;
+ if (ata_12_16_cmd(scmd))
+ scsi_internal_device_unblock(scmd->device, SDEV_RUNNING);
+
mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
if (mpi_reply == NULL) {
diff --git a/drivers/scsi/mvsas/mv_sas.c b/drivers/scsi/mvsas/mv_sas.c
index 86eb19902bac..c7cc8035eacb 100644
--- a/drivers/scsi/mvsas/mv_sas.c
+++ b/drivers/scsi/mvsas/mv_sas.c
@@ -791,8 +791,10 @@ static int mvs_task_prep(struct sas_task *task, struct mvs_info *mvi, int is_tmf
slot->slot_tag = tag;
slot->buf = pci_pool_alloc(mvi->dma_pool, GFP_ATOMIC, &slot->buf_dma);
- if (!slot->buf)
+ if (!slot->buf) {
+ rc = -ENOMEM;
goto err_out_tag;
+ }
memset(slot->buf, 0, MVS_SLOT_BUF_SZ);
tei.task = task;
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index ace65db1d2a2..56d6142852a5 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -707,6 +707,11 @@ qla2xxx_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
srb_t *sp;
int rval;
+ if (unlikely(test_bit(UNLOADING, &base_vha->dpc_flags))) {
+ cmd->result = DID_NO_CONNECT << 16;
+ goto qc24_fail_command;
+ }
+
if (ha->flags.eeh_busy) {
if (ha->flags.pci_channel_io_perm_failure) {
ql_dbg(ql_dbg_aer, vha, 0x9010,
@@ -1451,6 +1456,20 @@ qla2x00_abort_all_cmds(scsi_qla_host_t *vha, int res)
for (cnt = 1; cnt < req->num_outstanding_cmds; cnt++) {
sp = req->outstanding_cmds[cnt];
if (sp) {
+ /* Don't abort commands in adapter during EEH
+ * recovery as it's not accessible/responding.
+ */
+ if (!ha->flags.eeh_busy) {
+ /* Get a reference to the sp and drop the lock.
+ * The reference ensures this sp->done() call
+ * - and not the call in qla2xxx_eh_abort() -
+ * ends the SCSI command (with result 'res').
+ */
+ sp_get(sp);
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+ qla2xxx_eh_abort(GET_CMD_SP(sp));
+ spin_lock_irqsave(&ha->hardware_lock, flags);
+ }
req->outstanding_cmds[cnt] = NULL;
sp->done(vha, sp, res);
}
@@ -2341,6 +2360,8 @@ qla2xxx_scan_finished(struct Scsi_Host *shost, unsigned long time)
{
scsi_qla_host_t *vha = shost_priv(shost);
+ if (test_bit(UNLOADING, &vha->dpc_flags))
+ return 1;
if (!vha->host)
return 1;
if (time > vha->hw->loop_reset_delay * HZ)
diff --git a/drivers/scsi/qlogicpti.h b/drivers/scsi/qlogicpti.h
index 4377e87ee79c..892a0b058b99 100644
--- a/drivers/scsi/qlogicpti.h
+++ b/drivers/scsi/qlogicpti.h
@@ -356,8 +356,8 @@ struct qlogicpti {
/* The rest of the elements are unimportant for performance. */
struct qlogicpti *next;
- __u32 res_dvma; /* Ptr to RESPONSE bufs (DVMA)*/
- __u32 req_dvma; /* Ptr to REQUEST bufs (DVMA) */
+ dma_addr_t res_dvma; /* Ptr to RESPONSE bufs (DVMA)*/
+ dma_addr_t req_dvma; /* Ptr to REQUEST bufs (DVMA) */
u_char fware_majrev, fware_minrev, fware_micrev;
struct Scsi_Host *qhost;
int qpti_id;
diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c
index c905709707f0..cf04a364fd8b 100644
--- a/drivers/scsi/scsi_debug.c
+++ b/drivers/scsi/scsi_debug.c
@@ -5134,6 +5134,7 @@ static void __exit scsi_debug_exit(void)
bus_unregister(&pseudo_lld_bus);
root_device_unregister(pseudo_primary);
+ vfree(map_storep);
vfree(dif_storep);
vfree(fake_storep);
kfree(sdebug_q_arr);
diff --git a/drivers/scsi/vmw_pvscsi.c b/drivers/scsi/vmw_pvscsi.c
index 4a0d3cdc607c..15ca09cd16f3 100644
--- a/drivers/scsi/vmw_pvscsi.c
+++ b/drivers/scsi/vmw_pvscsi.c
@@ -793,6 +793,7 @@ static int pvscsi_abort(struct scsi_cmnd *cmd)
unsigned long flags;
int result = SUCCESS;
DECLARE_COMPLETION_ONSTACK(abort_cmp);
+ int done;
scmd_printk(KERN_DEBUG, cmd, "task abort on host %u, %p\n",
adapter->host->host_no, cmd);
@@ -824,10 +825,10 @@ static int pvscsi_abort(struct scsi_cmnd *cmd)
pvscsi_abort_cmd(adapter, ctx);
spin_unlock_irqrestore(&adapter->hw_lock, flags);
/* Wait for 2 secs for the completion. */
- wait_for_completion_timeout(&abort_cmp, msecs_to_jiffies(2000));
+ done = wait_for_completion_timeout(&abort_cmp, msecs_to_jiffies(2000));
spin_lock_irqsave(&adapter->hw_lock, flags);
- if (!completion_done(&abort_cmp)) {
+ if (!done) {
/*
* Failed to abort the command, unmark the fact that it
* was requested to be aborted.
diff --git a/drivers/scsi/vmw_pvscsi.h b/drivers/scsi/vmw_pvscsi.h
index c097d2ccbde3..d41292ef85f2 100644
--- a/drivers/scsi/vmw_pvscsi.h
+++ b/drivers/scsi/vmw_pvscsi.h
@@ -26,7 +26,7 @@
#include <linux/types.h>
-#define PVSCSI_DRIVER_VERSION_STRING "1.0.6.0-k"
+#define PVSCSI_DRIVER_VERSION_STRING "1.0.7.0-k"
#define PVSCSI_MAX_NUM_SG_ENTRIES_PER_SEGMENT 128
diff --git a/drivers/spi/spi-fsl-dspi.c b/drivers/spi/spi-fsl-dspi.c
index 35c0dd945668..a67b0ff6a362 100644
--- a/drivers/spi/spi-fsl-dspi.c
+++ b/drivers/spi/spi-fsl-dspi.c
@@ -70,6 +70,7 @@
#define SPI_SR 0x2c
#define SPI_SR_EOQF 0x10000000
#define SPI_SR_TCFQF 0x80000000
+#define SPI_SR_CLEAR 0xdaad0000
#define SPI_RSER 0x30
#define SPI_RSER_EOQFE 0x10000000
@@ -646,6 +647,11 @@ static const struct regmap_config dspi_regmap_config = {
.max_register = 0x88,
};
+static void dspi_init(struct fsl_dspi *dspi)
+{
+ regmap_write(dspi->regmap, SPI_SR, SPI_SR_CLEAR);
+}
+
static int dspi_probe(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
@@ -709,6 +715,7 @@ static int dspi_probe(struct platform_device *pdev)
return PTR_ERR(dspi->regmap);
}
+ dspi_init(dspi);
dspi->irq = platform_get_irq(pdev, 0);
if (dspi->irq < 0) {
dev_err(&pdev->dev, "can't get platform irq\n");
diff --git a/drivers/spi/spi-fsl-espi.c b/drivers/spi/spi-fsl-espi.c
index 7451585a080e..2c175b9495f7 100644
--- a/drivers/spi/spi-fsl-espi.c
+++ b/drivers/spi/spi-fsl-espi.c
@@ -458,7 +458,7 @@ static void fsl_espi_cpu_irq(struct mpc8xxx_spi *mspi, u32 events)
mspi->len -= rx_nr_bytes;
- if (mspi->rx)
+ if (rx_nr_bytes && mspi->rx)
mspi->get_rx(rx_data, mspi);
}
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
index 5787b723b593..838783c3fed0 100644
--- a/drivers/spi/spi.c
+++ b/drivers/spi/spi.c
@@ -1618,9 +1618,11 @@ static void of_register_spi_devices(struct spi_master *master)
if (of_node_test_and_set_flag(nc, OF_POPULATED))
continue;
spi = of_register_spi_device(master, nc);
- if (IS_ERR(spi))
+ if (IS_ERR(spi)) {
dev_warn(&master->dev, "Failed to create SPI device for %s\n",
nc->full_name);
+ of_node_clear_flag(nc, OF_POPULATED);
+ }
}
}
#else
@@ -3131,6 +3133,7 @@ static int of_spi_notify(struct notifier_block *nb, unsigned long action,
if (IS_ERR(spi)) {
pr_err("%s: failed to create for '%s'\n",
__func__, rd->dn->full_name);
+ of_node_clear_flag(rd->dn, OF_POPULATED);
return notifier_from_errno(PTR_ERR(spi));
}
break;
diff --git a/drivers/staging/comedi/drivers/ni_tio.c b/drivers/staging/comedi/drivers/ni_tio.c
index 7043eb0543f6..5ab49a798164 100644
--- a/drivers/staging/comedi/drivers/ni_tio.c
+++ b/drivers/staging/comedi/drivers/ni_tio.c
@@ -207,7 +207,8 @@ static int ni_tio_clock_period_ps(const struct ni_gpct *counter,
* clock period is specified by user with prescaling
* already taken into account.
*/
- return counter->clock_period_ps;
+ *period_ps = counter->clock_period_ps;
+ return 0;
}
switch (generic_clock_source & NI_GPCT_PRESCALE_MODE_CLOCK_SRC_MASK) {
diff --git a/drivers/staging/greybus/arche-platform.c b/drivers/staging/greybus/arche-platform.c
index 34307ac3f255..d33d6fe078ad 100644
--- a/drivers/staging/greybus/arche-platform.c
+++ b/drivers/staging/greybus/arche-platform.c
@@ -186,6 +186,7 @@ int arche_platform_change_state(enum arche_platform_state state,
exit:
spin_unlock_irqrestore(&arche_pdata->wake_lock, flags);
mutex_unlock(&arche_pdata->platform_state_mutex);
+ put_device(&pdev->dev);
of_node_put(np);
return ret;
}
diff --git a/drivers/staging/iio/impedance-analyzer/ad5933.c b/drivers/staging/iio/impedance-analyzer/ad5933.c
index 5eecf1cb1028..3892a7470410 100644
--- a/drivers/staging/iio/impedance-analyzer/ad5933.c
+++ b/drivers/staging/iio/impedance-analyzer/ad5933.c
@@ -655,6 +655,7 @@ static void ad5933_work(struct work_struct *work)
__be16 buf[2];
int val[2];
unsigned char status;
+ int ret;
mutex_lock(&indio_dev->mlock);
if (st->state == AD5933_CTRL_INIT_START_FREQ) {
@@ -662,19 +663,22 @@ static void ad5933_work(struct work_struct *work)
ad5933_cmd(st, AD5933_CTRL_START_SWEEP);
st->state = AD5933_CTRL_START_SWEEP;
schedule_delayed_work(&st->work, st->poll_time_jiffies);
- mutex_unlock(&indio_dev->mlock);
- return;
+ goto out;
}
- ad5933_i2c_read(st->client, AD5933_REG_STATUS, 1, &status);
+ ret = ad5933_i2c_read(st->client, AD5933_REG_STATUS, 1, &status);
+ if (ret)
+ goto out;
if (status & AD5933_STAT_DATA_VALID) {
int scan_count = bitmap_weight(indio_dev->active_scan_mask,
indio_dev->masklength);
- ad5933_i2c_read(st->client,
+ ret = ad5933_i2c_read(st->client,
test_bit(1, indio_dev->active_scan_mask) ?
AD5933_REG_REAL_DATA : AD5933_REG_IMAG_DATA,
scan_count * 2, (u8 *)buf);
+ if (ret)
+ goto out;
if (scan_count == 2) {
val[0] = be16_to_cpu(buf[0]);
@@ -686,8 +690,7 @@ static void ad5933_work(struct work_struct *work)
} else {
/* no data available - try again later */
schedule_delayed_work(&st->work, st->poll_time_jiffies);
- mutex_unlock(&indio_dev->mlock);
- return;
+ goto out;
}
if (status & AD5933_STAT_SWEEP_DONE) {
@@ -700,7 +703,7 @@ static void ad5933_work(struct work_struct *work)
ad5933_cmd(st, AD5933_CTRL_INC_FREQ);
schedule_delayed_work(&st->work, st->poll_time_jiffies);
}
-
+out:
mutex_unlock(&indio_dev->mlock);
}
diff --git a/drivers/staging/media/bcm2048/radio-bcm2048.c b/drivers/staging/media/bcm2048/radio-bcm2048.c
index ea15cc638097..4d9bd02ede47 100644
--- a/drivers/staging/media/bcm2048/radio-bcm2048.c
+++ b/drivers/staging/media/bcm2048/radio-bcm2048.c
@@ -482,6 +482,8 @@ static int bcm2048_set_rds_no_lock(struct bcm2048_device *bdev, u8 rds_on)
flags);
memset(&bdev->rds_info, 0, sizeof(bdev->rds_info));
}
+ if (err)
+ return err;
return bcm2048_send_command(bdev, BCM2048_I2C_FM_RDS_SYSTEM,
bdev->cache_fm_rds_system);
diff --git a/drivers/staging/nvec/nvec_ps2.c b/drivers/staging/nvec/nvec_ps2.c
index a324322ee0ad..499952c8ef39 100644
--- a/drivers/staging/nvec/nvec_ps2.c
+++ b/drivers/staging/nvec/nvec_ps2.c
@@ -106,13 +106,12 @@ static int nvec_mouse_probe(struct platform_device *pdev)
{
struct nvec_chip *nvec = dev_get_drvdata(pdev->dev.parent);
struct serio *ser_dev;
- char mouse_reset[] = { NVEC_PS2, SEND_COMMAND, PSMOUSE_RST, 3 };
- ser_dev = devm_kzalloc(&pdev->dev, sizeof(struct serio), GFP_KERNEL);
+ ser_dev = kzalloc(sizeof(struct serio), GFP_KERNEL);
if (!ser_dev)
return -ENOMEM;
- ser_dev->id.type = SERIO_PS_PSTHRU;
+ ser_dev->id.type = SERIO_8042;
ser_dev->write = ps2_sendcommand;
ser_dev->start = ps2_startstreaming;
ser_dev->stop = ps2_stopstreaming;
@@ -127,9 +126,6 @@ static int nvec_mouse_probe(struct platform_device *pdev)
serio_register_port(ser_dev);
- /* mouse reset */
- nvec_write_async(nvec, mouse_reset, sizeof(mouse_reset));
-
return 0;
}
diff --git a/drivers/staging/sm750fb/ddk750_reg.h b/drivers/staging/sm750fb/ddk750_reg.h
index 955247979aaa..4ed6d8d7712a 100644
--- a/drivers/staging/sm750fb/ddk750_reg.h
+++ b/drivers/staging/sm750fb/ddk750_reg.h
@@ -601,13 +601,13 @@
#define PANEL_PLANE_TL 0x08001C
#define PANEL_PLANE_TL_TOP_SHIFT 16
-#define PANEL_PLANE_TL_TOP_MASK (0xeff << 16)
-#define PANEL_PLANE_TL_LEFT_MASK 0xeff
+#define PANEL_PLANE_TL_TOP_MASK (0x7ff << 16)
+#define PANEL_PLANE_TL_LEFT_MASK 0x7ff
#define PANEL_PLANE_BR 0x080020
#define PANEL_PLANE_BR_BOTTOM_SHIFT 16
-#define PANEL_PLANE_BR_BOTTOM_MASK (0xeff << 16)
-#define PANEL_PLANE_BR_RIGHT_MASK 0xeff
+#define PANEL_PLANE_BR_BOTTOM_MASK (0x7ff << 16)
+#define PANEL_PLANE_BR_RIGHT_MASK 0x7ff
#define PANEL_HORIZONTAL_TOTAL 0x080024
#define PANEL_HORIZONTAL_TOTAL_TOTAL_SHIFT 16
diff --git a/drivers/thermal/intel_powerclamp.c b/drivers/thermal/intel_powerclamp.c
index 7a223074df3d..afada655f861 100644
--- a/drivers/thermal/intel_powerclamp.c
+++ b/drivers/thermal/intel_powerclamp.c
@@ -669,9 +669,16 @@ static struct thermal_cooling_device_ops powerclamp_cooling_ops = {
.set_cur_state = powerclamp_set_cur_state,
};
+static const struct x86_cpu_id __initconst intel_powerclamp_ids[] = {
+ { X86_VENDOR_INTEL, X86_FAMILY_ANY, X86_MODEL_ANY, X86_FEATURE_MWAIT },
+ {}
+};
+MODULE_DEVICE_TABLE(x86cpu, intel_powerclamp_ids);
+
static int __init powerclamp_probe(void)
{
- if (!boot_cpu_has(X86_FEATURE_MWAIT)) {
+
+ if (!x86_match_cpu(intel_powerclamp_ids)) {
pr_err("CPU does not support MWAIT");
return -ENODEV;
}
diff --git a/drivers/usb/chipidea/core.c b/drivers/usb/chipidea/core.c
index 69426e644d17..3dbb4a21ab44 100644
--- a/drivers/usb/chipidea/core.c
+++ b/drivers/usb/chipidea/core.c
@@ -914,6 +914,7 @@ static int ci_hdrc_probe(struct platform_device *pdev)
if (!ci)
return -ENOMEM;
+ spin_lock_init(&ci->lock);
ci->dev = dev;
ci->platdata = dev_get_platdata(dev);
ci->imx28_write_fix = !!(ci->platdata->flags &
diff --git a/drivers/usb/chipidea/udc.c b/drivers/usb/chipidea/udc.c
index 661f43fe0f9e..c9e80ad48fdc 100644
--- a/drivers/usb/chipidea/udc.c
+++ b/drivers/usb/chipidea/udc.c
@@ -1889,8 +1889,6 @@ static int udc_start(struct ci_hdrc *ci)
struct usb_otg_caps *otg_caps = &ci->platdata->ci_otg_caps;
int retval = 0;
- spin_lock_init(&ci->lock);
-
ci->gadget.ops = &usb_gadget_ops;
ci->gadget.speed = USB_SPEED_UNKNOWN;
ci->gadget.max_speed = USB_SPEED_HIGH;
diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
index 78f0f85bebdc..fada988512a1 100644
--- a/drivers/usb/class/cdc-acm.c
+++ b/drivers/usb/class/cdc-acm.c
@@ -932,8 +932,6 @@ static int wait_serial_change(struct acm *acm, unsigned long arg)
DECLARE_WAITQUEUE(wait, current);
struct async_icount old, new;
- if (arg & (TIOCM_DSR | TIOCM_RI | TIOCM_CD))
- return -EINVAL;
do {
spin_lock_irq(&acm->read_lock);
old = acm->oldcount;
@@ -1161,6 +1159,8 @@ static int acm_probe(struct usb_interface *intf,
if (quirks == IGNORE_DEVICE)
return -ENODEV;
+ memset(&h, 0x00, sizeof(struct usb_cdc_parsed_header));
+
num_rx_buf = (quirks == SINGLE_RX_URB) ? 1 : ACM_NR;
/* handle quirks deadly to normal probing*/
diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c
index 7287a763cd0c..fea446900cad 100644
--- a/drivers/usb/dwc3/core.c
+++ b/drivers/usb/dwc3/core.c
@@ -769,15 +769,14 @@ static int dwc3_core_init(struct dwc3 *dwc)
return 0;
err4:
- phy_power_off(dwc->usb2_generic_phy);
+ phy_power_off(dwc->usb3_generic_phy);
err3:
- phy_power_off(dwc->usb3_generic_phy);
+ phy_power_off(dwc->usb2_generic_phy);
err2:
usb_phy_set_suspend(dwc->usb2_phy, 1);
usb_phy_set_suspend(dwc->usb3_phy, 1);
- dwc3_core_exit(dwc);
err1:
usb_phy_shutdown(dwc->usb2_phy);
diff --git a/drivers/usb/dwc3/dwc3-st.c b/drivers/usb/dwc3/dwc3-st.c
index 89a2f712fdfe..aaaf256f71dd 100644
--- a/drivers/usb/dwc3/dwc3-st.c
+++ b/drivers/usb/dwc3/dwc3-st.c
@@ -31,6 +31,7 @@
#include <linux/slab.h>
#include <linux/regmap.h>
#include <linux/reset.h>
+#include <linux/pinctrl/consumer.h>
#include <linux/usb/of.h>
#include "core.h"
diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c
index e40d47d47d82..17989b72cdae 100644
--- a/drivers/usb/gadget/function/f_fs.c
+++ b/drivers/usb/gadget/function/f_fs.c
@@ -3225,11 +3225,11 @@ static bool ffs_func_req_match(struct usb_function *f,
switch (creq->bRequestType & USB_RECIP_MASK) {
case USB_RECIP_INTERFACE:
- return ffs_func_revmap_intf(func,
- le16_to_cpu(creq->wIndex) >= 0);
+ return (ffs_func_revmap_intf(func,
+ le16_to_cpu(creq->wIndex)) >= 0);
case USB_RECIP_ENDPOINT:
- return ffs_func_revmap_ep(func,
- le16_to_cpu(creq->wIndex) >= 0);
+ return (ffs_func_revmap_ep(func,
+ le16_to_cpu(creq->wIndex)) >= 0);
default:
return (bool) (func->ffs->user_flags &
FUNCTIONFS_ALL_CTRL_RECIP);
diff --git a/drivers/usb/gadget/function/u_ether.c b/drivers/usb/gadget/function/u_ether.c
index fe1811650dbc..5d1bd13a56c1 100644
--- a/drivers/usb/gadget/function/u_ether.c
+++ b/drivers/usb/gadget/function/u_ether.c
@@ -588,14 +588,6 @@ static netdev_tx_t eth_start_xmit(struct sk_buff *skb,
req->length = length;
- /* throttle high/super speed IRQ rate back slightly */
- if (gadget_is_dualspeed(dev->gadget))
- req->no_interrupt = (((dev->gadget->speed == USB_SPEED_HIGH ||
- dev->gadget->speed == USB_SPEED_SUPER)) &&
- !list_empty(&dev->tx_reqs))
- ? ((atomic_read(&dev->tx_qlen) % dev->qmult) != 0)
- : 0;
-
retval = usb_ep_queue(in, req, GFP_ATOMIC);
switch (retval) {
default:
diff --git a/drivers/usb/host/pci-quirks.c b/drivers/usb/host/pci-quirks.c
index d793f548dfe2..a9a1e4c40480 100644
--- a/drivers/usb/host/pci-quirks.c
+++ b/drivers/usb/host/pci-quirks.c
@@ -995,6 +995,14 @@ static void quirk_usb_handoff_xhci(struct pci_dev *pdev)
}
val = readl(base + ext_cap_offset);
+ /* Auto handoff never worked for these devices. Force it and continue */
+ if ((pdev->vendor == PCI_VENDOR_ID_TI && pdev->device == 0x8241) ||
+ (pdev->vendor == PCI_VENDOR_ID_RENESAS
+ && pdev->device == 0x0014)) {
+ val = (val | XHCI_HC_OS_OWNED) & ~XHCI_HC_BIOS_OWNED;
+ writel(val, base + ext_cap_offset);
+ }
+
/* If the BIOS owns the HC, signal that the OS wants it, and wait */
if (val & XHCI_HC_BIOS_OWNED) {
writel(val | XHCI_HC_OS_OWNED, base + ext_cap_offset);
diff --git a/drivers/usb/musb/da8xx.c b/drivers/usb/musb/da8xx.c
index 210b7e43a6fd..2440f88e07a3 100644
--- a/drivers/usb/musb/da8xx.c
+++ b/drivers/usb/musb/da8xx.c
@@ -479,7 +479,8 @@ static int da8xx_probe(struct platform_device *pdev)
glue->phy = devm_phy_get(&pdev->dev, "usb-phy");
if (IS_ERR(glue->phy)) {
- dev_err(&pdev->dev, "failed to get phy\n");
+ if (PTR_ERR(glue->phy) != -EPROBE_DEFER)
+ dev_err(&pdev->dev, "failed to get phy\n");
return PTR_ERR(glue->phy);
}
diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c
index 27dadc0d9114..c3e172e15ec3 100644
--- a/drivers/usb/musb/musb_core.c
+++ b/drivers/usb/musb/musb_core.c
@@ -986,7 +986,7 @@ b_host:
}
#endif
- schedule_work(&musb->irq_work);
+ schedule_delayed_work(&musb->irq_work, 0);
return handled;
}
@@ -1855,14 +1855,23 @@ static void musb_pm_runtime_check_session(struct musb *musb)
MUSB_DEVCTL_HR;
switch (devctl & ~s) {
case MUSB_QUIRK_B_INVALID_VBUS_91:
- if (!musb->session && !musb->quirk_invalid_vbus) {
- musb->quirk_invalid_vbus = true;
+ if (musb->quirk_retries--) {
musb_dbg(musb,
- "First invalid vbus, assume no session");
+ "Poll devctl on invalid vbus, assume no session");
+ schedule_delayed_work(&musb->irq_work,
+ msecs_to_jiffies(1000));
+
return;
}
- break;
case MUSB_QUIRK_A_DISCONNECT_19:
+ if (musb->quirk_retries--) {
+ musb_dbg(musb,
+ "Poll devctl on possible host mode disconnect");
+ schedule_delayed_work(&musb->irq_work,
+ msecs_to_jiffies(1000));
+
+ return;
+ }
if (!musb->session)
break;
musb_dbg(musb, "Allow PM on possible host mode disconnect");
@@ -1886,9 +1895,9 @@ static void musb_pm_runtime_check_session(struct musb *musb)
if (error < 0)
dev_err(musb->controller, "Could not enable: %i\n",
error);
+ musb->quirk_retries = 3;
} else {
musb_dbg(musb, "Allow PM with no session: %02x", devctl);
- musb->quirk_invalid_vbus = false;
pm_runtime_mark_last_busy(musb->controller);
pm_runtime_put_autosuspend(musb->controller);
}
@@ -1899,7 +1908,7 @@ static void musb_pm_runtime_check_session(struct musb *musb)
/* Only used to provide driver mode change events */
static void musb_irq_work(struct work_struct *data)
{
- struct musb *musb = container_of(data, struct musb, irq_work);
+ struct musb *musb = container_of(data, struct musb, irq_work.work);
musb_pm_runtime_check_session(musb);
@@ -1969,6 +1978,7 @@ static struct musb *allocate_instance(struct device *dev,
INIT_LIST_HEAD(&musb->control);
INIT_LIST_HEAD(&musb->in_bulk);
INIT_LIST_HEAD(&musb->out_bulk);
+ INIT_LIST_HEAD(&musb->pending_list);
musb->vbuserr_retry = VBUSERR_RETRY_COUNT;
musb->a_wait_bcon = OTG_TIME_A_WAIT_BCON;
@@ -2018,6 +2028,84 @@ static void musb_free(struct musb *musb)
musb_host_free(musb);
}
+struct musb_pending_work {
+ int (*callback)(struct musb *musb, void *data);
+ void *data;
+ struct list_head node;
+};
+
+/*
+ * Called from musb_runtime_resume(), musb_resume(), and
+ * musb_queue_resume_work(). Callers must take musb->lock.
+ */
+static int musb_run_resume_work(struct musb *musb)
+{
+ struct musb_pending_work *w, *_w;
+ unsigned long flags;
+ int error = 0;
+
+ spin_lock_irqsave(&musb->list_lock, flags);
+ list_for_each_entry_safe(w, _w, &musb->pending_list, node) {
+ if (w->callback) {
+ error = w->callback(musb, w->data);
+ if (error < 0) {
+ dev_err(musb->controller,
+ "resume callback %p failed: %i\n",
+ w->callback, error);
+ }
+ }
+ list_del(&w->node);
+ devm_kfree(musb->controller, w);
+ }
+ spin_unlock_irqrestore(&musb->list_lock, flags);
+
+ return error;
+}
+
+/*
+ * Called to run work if device is active or else queue the work to happen
+ * on resume. Caller must take musb->lock and must hold an RPM reference.
+ *
+ * Note that we cowardly refuse queuing work after musb PM runtime
+ * resume is done calling musb_run_resume_work() and return -EINPROGRESS
+ * instead.
+ */
+int musb_queue_resume_work(struct musb *musb,
+ int (*callback)(struct musb *musb, void *data),
+ void *data)
+{
+ struct musb_pending_work *w;
+ unsigned long flags;
+ int error;
+
+ if (WARN_ON(!callback))
+ return -EINVAL;
+
+ if (pm_runtime_active(musb->controller))
+ return callback(musb, data);
+
+ w = devm_kzalloc(musb->controller, sizeof(*w), GFP_ATOMIC);
+ if (!w)
+ return -ENOMEM;
+
+ w->callback = callback;
+ w->data = data;
+ spin_lock_irqsave(&musb->list_lock, flags);
+ if (musb->is_runtime_suspended) {
+ list_add_tail(&w->node, &musb->pending_list);
+ error = 0;
+ } else {
+ dev_err(musb->controller, "could not add resume work %p\n",
+ callback);
+ devm_kfree(musb->controller, w);
+ error = -EINPROGRESS;
+ }
+ spin_unlock_irqrestore(&musb->list_lock, flags);
+
+ return error;
+}
+EXPORT_SYMBOL_GPL(musb_queue_resume_work);
+
static void musb_deassert_reset(struct work_struct *work)
{
struct musb *musb;
@@ -2065,6 +2153,7 @@ musb_init_controller(struct device *dev, int nIrq, void __iomem *ctrl)
}
spin_lock_init(&musb->lock);
+ spin_lock_init(&musb->list_lock);
musb->board_set_power = plat->set_power;
musb->min_power = plat->min_power;
musb->ops = plat->platform_ops;
@@ -2114,11 +2203,6 @@ musb_init_controller(struct device *dev, int nIrq, void __iomem *ctrl)
musb->io.ep_offset = musb_flat_ep_offset;
musb->io.ep_select = musb_flat_ep_select;
}
- /* And override them with platform specific ops if specified. */
- if (musb->ops->ep_offset)
- musb->io.ep_offset = musb->ops->ep_offset;
- if (musb->ops->ep_select)
- musb->io.ep_select = musb->ops->ep_select;
/* At least tusb6010 has its own offsets */
if (musb->ops->ep_offset)
@@ -2213,7 +2297,7 @@ musb_init_controller(struct device *dev, int nIrq, void __iomem *ctrl)
musb_generic_disable(musb);
/* Init IRQ workqueue before request_irq */
- INIT_WORK(&musb->irq_work, musb_irq_work);
+ INIT_DELAYED_WORK(&musb->irq_work, musb_irq_work);
INIT_DELAYED_WORK(&musb->deassert_reset_work, musb_deassert_reset);
INIT_DELAYED_WORK(&musb->finish_resume_work, musb_host_finish_resume);
@@ -2296,6 +2380,7 @@ musb_init_controller(struct device *dev, int nIrq, void __iomem *ctrl)
if (status)
goto fail5;
+ musb->is_initialized = 1;
pm_runtime_mark_last_busy(musb->controller);
pm_runtime_put_autosuspend(musb->controller);
@@ -2309,7 +2394,7 @@ fail4:
musb_host_cleanup(musb);
fail3:
- cancel_work_sync(&musb->irq_work);
+ cancel_delayed_work_sync(&musb->irq_work);
cancel_delayed_work_sync(&musb->finish_resume_work);
cancel_delayed_work_sync(&musb->deassert_reset_work);
if (musb->dma_controller)
@@ -2376,7 +2461,7 @@ static int musb_remove(struct platform_device *pdev)
*/
musb_exit_debugfs(musb);
- cancel_work_sync(&musb->irq_work);
+ cancel_delayed_work_sync(&musb->irq_work);
cancel_delayed_work_sync(&musb->finish_resume_work);
cancel_delayed_work_sync(&musb->deassert_reset_work);
pm_runtime_get_sync(musb->controller);
@@ -2562,6 +2647,7 @@ static int musb_suspend(struct device *dev)
musb_platform_disable(musb);
musb_generic_disable(musb);
+ WARN_ON(!list_empty(&musb->pending_list));
spin_lock_irqsave(&musb->lock, flags);
@@ -2583,9 +2669,11 @@ static int musb_suspend(struct device *dev)
static int musb_resume(struct device *dev)
{
- struct musb *musb = dev_to_musb(dev);
- u8 devctl;
- u8 mask;
+ struct musb *musb = dev_to_musb(dev);
+ unsigned long flags;
+ int error;
+ u8 devctl;
+ u8 mask;
/*
* For static cmos like DaVinci, register values were preserved
@@ -2619,6 +2707,13 @@ static int musb_resume(struct device *dev)
musb_start(musb);
+ spin_lock_irqsave(&musb->lock, flags);
+ error = musb_run_resume_work(musb);
+ if (error)
+ dev_err(musb->controller, "resume work failed with %i\n",
+ error);
+ spin_unlock_irqrestore(&musb->lock, flags);
+
return 0;
}
@@ -2627,14 +2722,16 @@ static int musb_runtime_suspend(struct device *dev)
struct musb *musb = dev_to_musb(dev);
musb_save_context(musb);
+ musb->is_runtime_suspended = 1;
return 0;
}
static int musb_runtime_resume(struct device *dev)
{
- struct musb *musb = dev_to_musb(dev);
- static int first = 1;
+ struct musb *musb = dev_to_musb(dev);
+ unsigned long flags;
+ int error;
/*
* When pm_runtime_get_sync called for the first time in driver
@@ -2645,9 +2742,10 @@ static int musb_runtime_resume(struct device *dev)
* Also context restore without save does not make
* any sense
*/
- if (!first)
- musb_restore_context(musb);
- first = 0;
+ if (!musb->is_initialized)
+ return 0;
+
+ musb_restore_context(musb);
if (musb->need_finish_resume) {
musb->need_finish_resume = 0;
@@ -2655,6 +2753,14 @@ static int musb_runtime_resume(struct device *dev)
msecs_to_jiffies(USB_RESUME_TIMEOUT));
}
+ spin_lock_irqsave(&musb->lock, flags);
+ error = musb_run_resume_work(musb);
+ if (error)
+ dev_err(musb->controller, "resume work failed with %i\n",
+ error);
+ musb->is_runtime_suspended = 0;
+ spin_unlock_irqrestore(&musb->lock, flags);
+
return 0;
}
diff --git a/drivers/usb/musb/musb_core.h b/drivers/usb/musb/musb_core.h
index 2cb88a498f8a..91817d77d59c 100644
--- a/drivers/usb/musb/musb_core.h
+++ b/drivers/usb/musb/musb_core.h
@@ -303,13 +303,14 @@ struct musb_context_registers {
struct musb {
/* device lock */
spinlock_t lock;
+ spinlock_t list_lock; /* resume work list lock */
struct musb_io io;
const struct musb_platform_ops *ops;
struct musb_context_registers context;
irqreturn_t (*isr)(int, void *);
- struct work_struct irq_work;
+ struct delayed_work irq_work;
struct delayed_work deassert_reset_work;
struct delayed_work finish_resume_work;
struct delayed_work gadget_work;
@@ -337,6 +338,7 @@ struct musb {
struct list_head control; /* of musb_qh */
struct list_head in_bulk; /* of musb_qh */
struct list_head out_bulk; /* of musb_qh */
+ struct list_head pending_list; /* pending work list */
struct timer_list otg_timer;
struct notifier_block nb;
@@ -379,12 +381,15 @@ struct musb {
int port_mode; /* MUSB_PORT_MODE_* */
bool session;
- bool quirk_invalid_vbus;
+ unsigned long quirk_retries;
bool is_host;
int a_wait_bcon; /* VBUS timeout in msecs */
unsigned long idle_timeout; /* Next timeout in jiffies */
+ unsigned is_initialized:1;
+ unsigned is_runtime_suspended:1;
+
/* active means connected and not suspended */
unsigned is_active:1;
@@ -540,6 +545,10 @@ extern irqreturn_t musb_interrupt(struct musb *);
extern void musb_hnp_stop(struct musb *musb);
+int musb_queue_resume_work(struct musb *musb,
+ int (*callback)(struct musb *musb, void *data),
+ void *data);
+
static inline void musb_platform_set_vbus(struct musb *musb, int is_on)
{
if (musb->ops->set_vbus)
diff --git a/drivers/usb/musb/musb_dsps.c b/drivers/usb/musb/musb_dsps.c
index 0f17d2140db6..feae1561b9ab 100644
--- a/drivers/usb/musb/musb_dsps.c
+++ b/drivers/usb/musb/musb_dsps.c
@@ -185,24 +185,19 @@ static void dsps_musb_disable(struct musb *musb)
musb_writel(reg_base, wrp->coreintr_clear, wrp->usb_bitmap);
musb_writel(reg_base, wrp->epintr_clear,
wrp->txep_bitmap | wrp->rxep_bitmap);
+ del_timer_sync(&glue->timer);
musb_writeb(musb->mregs, MUSB_DEVCTL, 0);
}
-static void otg_timer(unsigned long _musb)
+/* Caller must take musb->lock */
+static int dsps_check_status(struct musb *musb, void *unused)
{
- struct musb *musb = (void *)_musb;
void __iomem *mregs = musb->mregs;
struct device *dev = musb->controller;
struct dsps_glue *glue = dev_get_drvdata(dev->parent);
const struct dsps_musb_wrapper *wrp = glue->wrp;
u8 devctl;
- unsigned long flags;
int skip_session = 0;
- int err;
-
- err = pm_runtime_get_sync(dev);
- if (err < 0)
- dev_err(dev, "Poll could not pm_runtime_get: %i\n", err);
/*
* We poll because DSPS IP's won't expose several OTG-critical
@@ -212,7 +207,6 @@ static void otg_timer(unsigned long _musb)
dev_dbg(musb->controller, "Poll devctl %02x (%s)\n", devctl,
usb_otg_state_string(musb->xceiv->otg->state));
- spin_lock_irqsave(&musb->lock, flags);
switch (musb->xceiv->otg->state) {
case OTG_STATE_A_WAIT_VRISE:
mod_timer(&glue->timer, jiffies +
@@ -245,8 +239,30 @@ static void otg_timer(unsigned long _musb)
default:
break;
}
- spin_unlock_irqrestore(&musb->lock, flags);
+ return 0;
+}
+
+static void otg_timer(unsigned long _musb)
+{
+ struct musb *musb = (void *)_musb;
+ struct device *dev = musb->controller;
+ unsigned long flags;
+ int err;
+
+ err = pm_runtime_get(dev);
+ if ((err != -EINPROGRESS) && err < 0) {
+ dev_err(dev, "Poll could not pm_runtime_get: %i\n", err);
+ pm_runtime_put_noidle(dev);
+
+ return;
+ }
+
+ spin_lock_irqsave(&musb->lock, flags);
+ err = musb_queue_resume_work(musb, dsps_check_status, NULL);
+ if (err < 0)
+ dev_err(dev, "%s resume work: %i\n", __func__, err);
+ spin_unlock_irqrestore(&musb->lock, flags);
pm_runtime_mark_last_busy(dev);
pm_runtime_put_autosuspend(dev);
}
@@ -767,28 +783,13 @@ static int dsps_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, glue);
pm_runtime_enable(&pdev->dev);
- pm_runtime_use_autosuspend(&pdev->dev);
- pm_runtime_set_autosuspend_delay(&pdev->dev, 200);
-
- ret = pm_runtime_get_sync(&pdev->dev);
- if (ret < 0) {
- dev_err(&pdev->dev, "pm_runtime_get_sync FAILED");
- goto err2;
- }
-
ret = dsps_create_musb_pdev(glue, pdev);
if (ret)
- goto err3;
-
- pm_runtime_mark_last_busy(&pdev->dev);
- pm_runtime_put_autosuspend(&pdev->dev);
+ goto err;
return 0;
-err3:
- pm_runtime_put_sync(&pdev->dev);
-err2:
- pm_runtime_dont_use_autosuspend(&pdev->dev);
+err:
pm_runtime_disable(&pdev->dev);
return ret;
}
@@ -799,9 +800,6 @@ static int dsps_remove(struct platform_device *pdev)
platform_device_unregister(glue->musb);
- /* disable usbss clocks */
- pm_runtime_dont_use_autosuspend(&pdev->dev);
- pm_runtime_put_sync(&pdev->dev);
pm_runtime_disable(&pdev->dev);
return 0;
diff --git a/drivers/usb/musb/musb_gadget.c b/drivers/usb/musb/musb_gadget.c
index 4042ea017985..a55173c9e564 100644
--- a/drivers/usb/musb/musb_gadget.c
+++ b/drivers/usb/musb/musb_gadget.c
@@ -1114,7 +1114,7 @@ static int musb_gadget_enable(struct usb_ep *ep,
musb_ep->dma ? "dma, " : "",
musb_ep->packet_sz);
- schedule_work(&musb->irq_work);
+ schedule_delayed_work(&musb->irq_work, 0);
fail:
spin_unlock_irqrestore(&musb->lock, flags);
@@ -1158,7 +1158,7 @@ static int musb_gadget_disable(struct usb_ep *ep)
musb_ep->desc = NULL;
musb_ep->end_point.desc = NULL;
- schedule_work(&musb->irq_work);
+ schedule_delayed_work(&musb->irq_work, 0);
spin_unlock_irqrestore(&(musb->lock), flags);
@@ -1222,13 +1222,22 @@ void musb_ep_restart(struct musb *musb, struct musb_request *req)
rxstate(musb, req);
}
+static int musb_ep_restart_resume_work(struct musb *musb, void *data)
+{
+ struct musb_request *req = data;
+
+ musb_ep_restart(musb, req);
+
+ return 0;
+}
+
static int musb_gadget_queue(struct usb_ep *ep, struct usb_request *req,
gfp_t gfp_flags)
{
struct musb_ep *musb_ep;
struct musb_request *request;
struct musb *musb;
- int status = 0;
+ int status;
unsigned long lockflags;
if (!ep || !req)
@@ -1245,6 +1254,17 @@ static int musb_gadget_queue(struct usb_ep *ep, struct usb_request *req,
if (request->ep != musb_ep)
return -EINVAL;
+ status = pm_runtime_get(musb->controller);
+ if ((status != -EINPROGRESS) && status < 0) {
+ dev_err(musb->controller,
+ "pm runtime get failed in %s\n",
+ __func__);
+ pm_runtime_put_noidle(musb->controller);
+
+ return status;
+ }
+ status = 0;
+
trace_musb_req_enq(request);
/* request is mine now... */
@@ -1255,7 +1275,6 @@ static int musb_gadget_queue(struct usb_ep *ep, struct usb_request *req,
map_dma_buffer(request, musb, musb_ep);
- pm_runtime_get_sync(musb->controller);
spin_lock_irqsave(&musb->lock, lockflags);
/* don't queue if the ep is down */
@@ -1271,8 +1290,14 @@ static int musb_gadget_queue(struct usb_ep *ep, struct usb_request *req,
list_add_tail(&request->list, &musb_ep->req_list);
/* it this is the head of the queue, start i/o ... */
- if (!musb_ep->busy && &request->list == musb_ep->req_list.next)
- musb_ep_restart(musb, request);
+ if (!musb_ep->busy && &request->list == musb_ep->req_list.next) {
+ status = musb_queue_resume_work(musb,
+ musb_ep_restart_resume_work,
+ request);
+ if (status < 0)
+ dev_err(musb->controller, "%s resume work: %i\n",
+ __func__, status);
+ }
unlock:
spin_unlock_irqrestore(&musb->lock, lockflags);
@@ -1969,7 +1994,7 @@ static int musb_gadget_stop(struct usb_gadget *g)
*/
/* Force check of devctl register for PM runtime */
- schedule_work(&musb->irq_work);
+ schedule_delayed_work(&musb->irq_work, 0);
pm_runtime_mark_last_busy(musb->controller);
pm_runtime_put_autosuspend(musb->controller);
diff --git a/drivers/usb/musb/omap2430.c b/drivers/usb/musb/omap2430.c
index cc1225485509..e8be8e39ab8f 100644
--- a/drivers/usb/musb/omap2430.c
+++ b/drivers/usb/musb/omap2430.c
@@ -513,17 +513,18 @@ static int omap2430_probe(struct platform_device *pdev)
}
pm_runtime_enable(glue->dev);
- pm_runtime_use_autosuspend(glue->dev);
- pm_runtime_set_autosuspend_delay(glue->dev, 100);
ret = platform_device_add(musb);
if (ret) {
dev_err(&pdev->dev, "failed to register musb device\n");
- goto err2;
+ goto err3;
}
return 0;
+err3:
+ pm_runtime_disable(glue->dev);
+
err2:
platform_device_put(musb);
@@ -535,10 +536,7 @@ static int omap2430_remove(struct platform_device *pdev)
{
struct omap2430_glue *glue = platform_get_drvdata(pdev);
- pm_runtime_get_sync(glue->dev);
platform_device_unregister(glue->musb);
- pm_runtime_put_sync(glue->dev);
- pm_runtime_dont_use_autosuspend(glue->dev);
pm_runtime_disable(glue->dev);
return 0;
diff --git a/drivers/usb/musb/tusb6010.c b/drivers/usb/musb/tusb6010.c
index df7c9f46be54..e85cc8e4e7a9 100644
--- a/drivers/usb/musb/tusb6010.c
+++ b/drivers/usb/musb/tusb6010.c
@@ -724,7 +724,7 @@ tusb_otg_ints(struct musb *musb, u32 int_src, void __iomem *tbase)
dev_dbg(musb->controller, "vbus change, %s, otg %03x\n",
usb_otg_state_string(musb->xceiv->otg->state), otg_stat);
idle_timeout = jiffies + (1 * HZ);
- schedule_work(&musb->irq_work);
+ schedule_delayed_work(&musb->irq_work, 0);
} else /* A-dev state machine */ {
dev_dbg(musb->controller, "vbus change, %s, otg %03x\n",
@@ -814,7 +814,7 @@ tusb_otg_ints(struct musb *musb, u32 int_src, void __iomem *tbase)
break;
}
}
- schedule_work(&musb->irq_work);
+ schedule_delayed_work(&musb->irq_work, 0);
return idle_timeout;
}
@@ -864,7 +864,7 @@ static irqreturn_t tusb_musb_interrupt(int irq, void *__hci)
musb_writel(tbase, TUSB_PRCM_WAKEUP_CLEAR, reg);
if (reg & ~TUSB_PRCM_WNORCS) {
musb->is_active = 1;
- schedule_work(&musb->irq_work);
+ schedule_delayed_work(&musb->irq_work, 0);
}
dev_dbg(musb->controller, "wake %sactive %02x\n",
musb->is_active ? "" : "in", reg);
diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
index f61477bed3a8..243ac5ebe46a 100644
--- a/drivers/usb/serial/cp210x.c
+++ b/drivers/usb/serial/cp210x.c
@@ -131,6 +131,7 @@ static const struct usb_device_id id_table[] = {
{ USB_DEVICE(0x10C4, 0x88A4) }, /* MMB Networks ZigBee USB Device */
{ USB_DEVICE(0x10C4, 0x88A5) }, /* Planet Innovation Ingeni ZigBee USB Device */
{ USB_DEVICE(0x10C4, 0x8946) }, /* Ketra N1 Wireless Interface */
+ { USB_DEVICE(0x10C4, 0x8962) }, /* Brim Brothers charging dock */
{ USB_DEVICE(0x10C4, 0x8977) }, /* CEL MeshWorks DevKit Device */
{ USB_DEVICE(0x10C4, 0x8998) }, /* KCF Technologies PRN */
{ USB_DEVICE(0x10C4, 0x8A2A) }, /* HubZ dual ZigBee and Z-Wave dongle */
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
index 0ff7f38d7800..6e9fc8bcc285 100644
--- a/drivers/usb/serial/ftdi_sio.c
+++ b/drivers/usb/serial/ftdi_sio.c
@@ -1012,6 +1012,8 @@ static const struct usb_device_id id_table_combined[] = {
{ USB_DEVICE(ICPDAS_VID, ICPDAS_I7561U_PID) },
{ USB_DEVICE(ICPDAS_VID, ICPDAS_I7563U_PID) },
{ USB_DEVICE(WICED_VID, WICED_USB20706V2_PID) },
+ { USB_DEVICE(TI_VID, TI_CC3200_LAUNCHPAD_PID),
+ .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
{ } /* Terminating entry */
};
diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
index 21011c0a4c64..48ee04c94a75 100644
--- a/drivers/usb/serial/ftdi_sio_ids.h
+++ b/drivers/usb/serial/ftdi_sio_ids.h
@@ -596,6 +596,12 @@
#define STK541_PID 0x2109 /* Zigbee Controller */
/*
+ * Texas Instruments
+ */
+#define TI_VID 0x0451
+#define TI_CC3200_LAUNCHPAD_PID 0xC32A /* SimpleLink Wi-Fi CC3200 LaunchPad */
+
+/*
* Blackfin gnICE JTAG
* http://docs.blackfin.uclinux.org/doku.php?id=hw:jtag:gnice
*/
diff --git a/drivers/usb/storage/transport.c b/drivers/usb/storage/transport.c
index ffd086733421..1a59f335b063 100644
--- a/drivers/usb/storage/transport.c
+++ b/drivers/usb/storage/transport.c
@@ -954,10 +954,15 @@ int usb_stor_CB_transport(struct scsi_cmnd *srb, struct us_data *us)
/* COMMAND STAGE */
/* let's send the command via the control pipe */
+ /*
+ * Command is sometime (f.e. after scsi_eh_prep_cmnd) on the stack.
+ * Stack may be vmallocated. So no DMA for us. Make a copy.
+ */
+ memcpy(us->iobuf, srb->cmnd, srb->cmd_len);
result = usb_stor_ctrl_transfer(us, us->send_ctrl_pipe,
US_CBI_ADSC,
USB_TYPE_CLASS | USB_RECIP_INTERFACE, 0,
- us->ifnum, srb->cmnd, srb->cmd_len);
+ us->ifnum, us->iobuf, srb->cmd_len);
/* check the return code for the command */
usb_stor_dbg(us, "Call to usb_stor_ctrl_transfer() returned %d\n",
diff --git a/drivers/uwb/lc-rc.c b/drivers/uwb/lc-rc.c
index d059ad4d0dbd..97ee1b46db69 100644
--- a/drivers/uwb/lc-rc.c
+++ b/drivers/uwb/lc-rc.c
@@ -56,8 +56,11 @@ static struct uwb_rc *uwb_rc_find_by_index(int index)
struct uwb_rc *rc = NULL;
dev = class_find_device(&uwb_rc_class, NULL, &index, uwb_rc_index_match);
- if (dev)
+ if (dev) {
rc = dev_get_drvdata(dev);
+ put_device(dev);
+ }
+
return rc;
}
@@ -467,7 +470,9 @@ struct uwb_rc *__uwb_rc_try_get(struct uwb_rc *target_rc)
if (dev) {
rc = dev_get_drvdata(dev);
__uwb_rc_get(rc);
+ put_device(dev);
}
+
return rc;
}
EXPORT_SYMBOL_GPL(__uwb_rc_try_get);
@@ -520,8 +525,11 @@ struct uwb_rc *uwb_rc_get_by_grandpa(const struct device *grandpa_dev)
dev = class_find_device(&uwb_rc_class, NULL, grandpa_dev,
find_rc_grandpa);
- if (dev)
+ if (dev) {
rc = dev_get_drvdata(dev);
+ put_device(dev);
+ }
+
return rc;
}
EXPORT_SYMBOL_GPL(uwb_rc_get_by_grandpa);
@@ -553,8 +561,10 @@ struct uwb_rc *uwb_rc_get_by_dev(const struct uwb_dev_addr *addr)
struct uwb_rc *rc = NULL;
dev = class_find_device(&uwb_rc_class, NULL, addr, find_rc_dev);
- if (dev)
+ if (dev) {
rc = dev_get_drvdata(dev);
+ put_device(dev);
+ }
return rc;
}
diff --git a/drivers/uwb/pal.c b/drivers/uwb/pal.c
index c1304b8d4985..678e93741ae1 100644
--- a/drivers/uwb/pal.c
+++ b/drivers/uwb/pal.c
@@ -97,6 +97,8 @@ static bool uwb_rc_class_device_exists(struct uwb_rc *target_rc)
dev = class_find_device(&uwb_rc_class, NULL, target_rc, find_rc);
+ put_device(dev);
+
return (dev != NULL);
}
diff --git a/drivers/vfio/pci/vfio_pci.c b/drivers/vfio/pci/vfio_pci.c
index d624a527777f..031bc08d000d 100644
--- a/drivers/vfio/pci/vfio_pci.c
+++ b/drivers/vfio/pci/vfio_pci.c
@@ -829,8 +829,9 @@ static long vfio_pci_ioctl(void *device_data,
} else if (cmd == VFIO_DEVICE_SET_IRQS) {
struct vfio_irq_set hdr;
+ size_t size;
u8 *data = NULL;
- int ret = 0;
+ int max, ret = 0;
minsz = offsetofend(struct vfio_irq_set, count);
@@ -838,23 +839,31 @@ static long vfio_pci_ioctl(void *device_data,
return -EFAULT;
if (hdr.argsz < minsz || hdr.index >= VFIO_PCI_NUM_IRQS ||
+ hdr.count >= (U32_MAX - hdr.start) ||
hdr.flags & ~(VFIO_IRQ_SET_DATA_TYPE_MASK |
VFIO_IRQ_SET_ACTION_TYPE_MASK))
return -EINVAL;
- if (!(hdr.flags & VFIO_IRQ_SET_DATA_NONE)) {
- size_t size;
- int max = vfio_pci_get_irq_count(vdev, hdr.index);
+ max = vfio_pci_get_irq_count(vdev, hdr.index);
+ if (hdr.start >= max || hdr.start + hdr.count > max)
+ return -EINVAL;
- if (hdr.flags & VFIO_IRQ_SET_DATA_BOOL)
- size = sizeof(uint8_t);
- else if (hdr.flags & VFIO_IRQ_SET_DATA_EVENTFD)
- size = sizeof(int32_t);
- else
- return -EINVAL;
+ switch (hdr.flags & VFIO_IRQ_SET_DATA_TYPE_MASK) {
+ case VFIO_IRQ_SET_DATA_NONE:
+ size = 0;
+ break;
+ case VFIO_IRQ_SET_DATA_BOOL:
+ size = sizeof(uint8_t);
+ break;
+ case VFIO_IRQ_SET_DATA_EVENTFD:
+ size = sizeof(int32_t);
+ break;
+ default:
+ return -EINVAL;
+ }
- if (hdr.argsz - minsz < hdr.count * size ||
- hdr.start >= max || hdr.start + hdr.count > max)
+ if (size) {
+ if (hdr.argsz - minsz < hdr.count * size)
return -EINVAL;
data = memdup_user((void __user *)(arg + minsz),
diff --git a/drivers/vfio/pci/vfio_pci_intrs.c b/drivers/vfio/pci/vfio_pci_intrs.c
index c2e60893cd09..1c46045b0e7f 100644
--- a/drivers/vfio/pci/vfio_pci_intrs.c
+++ b/drivers/vfio/pci/vfio_pci_intrs.c
@@ -256,7 +256,7 @@ static int vfio_msi_enable(struct vfio_pci_device *vdev, int nvec, bool msix)
if (!is_irq_none(vdev))
return -EINVAL;
- vdev->ctx = kzalloc(nvec * sizeof(struct vfio_pci_irq_ctx), GFP_KERNEL);
+ vdev->ctx = kcalloc(nvec, sizeof(struct vfio_pci_irq_ctx), GFP_KERNEL);
if (!vdev->ctx)
return -ENOMEM;
diff --git a/drivers/video/fbdev/amba-clcd-versatile.c b/drivers/video/fbdev/amba-clcd-versatile.c
index 19ad8645d93c..e5d9bfc1703a 100644
--- a/drivers/video/fbdev/amba-clcd-versatile.c
+++ b/drivers/video/fbdev/amba-clcd-versatile.c
@@ -526,8 +526,8 @@ int versatile_clcd_init_panel(struct clcd_fb *fb,
np = of_find_matching_node_and_match(NULL, versatile_clcd_of_match,
&clcd_id);
if (!np) {
- dev_err(dev, "no Versatile syscon node\n");
- return -ENODEV;
+ /* Vexpress does not have this */
+ return 0;
}
versatile_clcd_type = (enum versatile_clcd)clcd_id->data;
diff --git a/drivers/video/hdmi.c b/drivers/video/hdmi.c
index 162689227a23..1cf907ecded4 100644
--- a/drivers/video/hdmi.c
+++ b/drivers/video/hdmi.c
@@ -533,6 +533,10 @@ hdmi_picture_aspect_get_name(enum hdmi_picture_aspect picture_aspect)
return "4:3";
case HDMI_PICTURE_ASPECT_16_9:
return "16:9";
+ case HDMI_PICTURE_ASPECT_64_27:
+ return "64:27";
+ case HDMI_PICTURE_ASPECT_256_135:
+ return "256:135";
case HDMI_PICTURE_ASPECT_RESERVED:
return "Reserved";
}
diff --git a/drivers/video/of_display_timing.c b/drivers/video/of_display_timing.c
index 8a1076beecd3..32b0a7543433 100644
--- a/drivers/video/of_display_timing.c
+++ b/drivers/video/of_display_timing.c
@@ -88,6 +88,15 @@ static int of_parse_display_timing(const struct device_node *np,
dt->flags |= val ? DISPLAY_FLAGS_PIXDATA_POSEDGE :
DISPLAY_FLAGS_PIXDATA_NEGEDGE;
+ if (!of_property_read_u32(np, "syncclk-active", &val))
+ dt->flags |= val ? DISPLAY_FLAGS_SYNC_POSEDGE :
+ DISPLAY_FLAGS_SYNC_NEGEDGE;
+ else if (dt->flags & (DISPLAY_FLAGS_PIXDATA_POSEDGE |
+ DISPLAY_FLAGS_PIXDATA_NEGEDGE))
+ dt->flags |= dt->flags & DISPLAY_FLAGS_PIXDATA_POSEDGE ?
+ DISPLAY_FLAGS_SYNC_POSEDGE :
+ DISPLAY_FLAGS_SYNC_NEGEDGE;
+
if (of_property_read_bool(np, "interlaced"))
dt->flags |= DISPLAY_FLAGS_INTERLACED;
if (of_property_read_bool(np, "doublescan"))
@@ -110,7 +119,7 @@ static int of_parse_display_timing(const struct device_node *np,
* @name: name of the timing node
* @dt: display_timing struct to fill
**/
-int of_get_display_timing(struct device_node *np, const char *name,
+int of_get_display_timing(const struct device_node *np, const char *name,
struct display_timing *dt)
{
struct device_node *timing_np;
@@ -133,7 +142,7 @@ EXPORT_SYMBOL_GPL(of_get_display_timing);
* of_get_display_timings - parse all display_timing entries from a device_node
* @np: device_node with the subnodes
**/
-struct display_timings *of_get_display_timings(struct device_node *np)
+struct display_timings *of_get_display_timings(const struct device_node *np)
{
struct device_node *timings_np;
struct device_node *entry;
@@ -249,7 +258,7 @@ EXPORT_SYMBOL_GPL(of_get_display_timings);
* of_display_timings_exist - check if a display-timings node is provided
* @np: device_node with the timing
**/
-int of_display_timings_exist(struct device_node *np)
+int of_display_timings_exist(const struct device_node *np)
{
struct device_node *timings_np;
diff --git a/drivers/virtio/config.c b/drivers/virtio/config.c
deleted file mode 100644
index f70bcd2ff98f..000000000000
--- a/drivers/virtio/config.c
+++ /dev/null
@@ -1,12 +0,0 @@
-/* Configuration space parsing helpers for virtio.
- *
- * The configuration is [type][len][... len bytes ...] fields.
- *
- * Copyright 2007 Rusty Russell, IBM Corporation.
- * GPL v2 or later.
- */
-#include <linux/err.h>
-#include <linux/virtio.h>
-#include <linux/virtio_config.h>
-#include <linux/bug.h>
-
diff --git a/drivers/virtio/virtio_balloon.c b/drivers/virtio/virtio_balloon.c
index 4e7003db12c4..181793f07852 100644
--- a/drivers/virtio/virtio_balloon.c
+++ b/drivers/virtio/virtio_balloon.c
@@ -577,6 +577,8 @@ static int virtballoon_probe(struct virtio_device *vdev)
virtio_device_ready(vdev);
+ if (towards_target(vb))
+ virtballoon_changed(vdev);
return 0;
out_del_vqs:
diff --git a/drivers/virtio/virtio_pci_legacy.c b/drivers/virtio/virtio_pci_legacy.c
index 8c4e61783441..6d9e5173d5fa 100644
--- a/drivers/virtio/virtio_pci_legacy.c
+++ b/drivers/virtio/virtio_pci_legacy.c
@@ -212,10 +212,18 @@ int virtio_pci_legacy_probe(struct virtio_pci_device *vp_dev)
return -ENODEV;
}
- rc = dma_set_mask_and_coherent(&pci_dev->dev, DMA_BIT_MASK(64));
- if (rc)
- rc = dma_set_mask_and_coherent(&pci_dev->dev,
- DMA_BIT_MASK(32));
+ rc = dma_set_mask(&pci_dev->dev, DMA_BIT_MASK(64));
+ if (rc) {
+ rc = dma_set_mask_and_coherent(&pci_dev->dev, DMA_BIT_MASK(32));
+ } else {
+ /*
+ * The virtio ring base address is expressed as a 32-bit PFN,
+ * with a page size of 1 << VIRTIO_PCI_QUEUE_ADDR_SHIFT.
+ */
+ dma_set_coherent_mask(&pci_dev->dev,
+ DMA_BIT_MASK(32 + VIRTIO_PCI_QUEUE_ADDR_SHIFT));
+ }
+
if (rc)
dev_warn(&pci_dev->dev, "Failed to enable 64-bit or 32-bit DMA. Trying to continue, but this might not work.\n");
diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c
index ed9c9eeedfe5..489bfc61cf30 100644
--- a/drivers/virtio/virtio_ring.c
+++ b/drivers/virtio/virtio_ring.c
@@ -167,7 +167,7 @@ static bool vring_use_dma_api(struct virtio_device *vdev)
* making all of the arch DMA ops work on the vring device itself
* is a mess. For now, we use the parent device for DMA ops.
*/
-static struct device *vring_dma_dev(const struct vring_virtqueue *vq)
+static inline struct device *vring_dma_dev(const struct vring_virtqueue *vq)
{
return vq->vq.vdev->dev.parent;
}
@@ -732,7 +732,8 @@ void virtqueue_disable_cb(struct virtqueue *_vq)
if (!(vq->avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT)) {
vq->avail_flags_shadow |= VRING_AVAIL_F_NO_INTERRUPT;
- vq->vring.avail->flags = cpu_to_virtio16(_vq->vdev, vq->avail_flags_shadow);
+ if (!vq->event)
+ vq->vring.avail->flags = cpu_to_virtio16(_vq->vdev, vq->avail_flags_shadow);
}
}
@@ -764,7 +765,8 @@ unsigned virtqueue_enable_cb_prepare(struct virtqueue *_vq)
* entry. Always do both to keep code simple. */
if (vq->avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT) {
vq->avail_flags_shadow &= ~VRING_AVAIL_F_NO_INTERRUPT;
- vq->vring.avail->flags = cpu_to_virtio16(_vq->vdev, vq->avail_flags_shadow);
+ if (!vq->event)
+ vq->vring.avail->flags = cpu_to_virtio16(_vq->vdev, vq->avail_flags_shadow);
}
vring_used_event(&vq->vring) = cpu_to_virtio16(_vq->vdev, last_used_idx = vq->last_used_idx);
END_USE(vq);
@@ -832,10 +834,11 @@ bool virtqueue_enable_cb_delayed(struct virtqueue *_vq)
* more to do. */
/* Depending on the VIRTIO_RING_F_USED_EVENT_IDX feature, we need to
* either clear the flags bit or point the event index at the next
- * entry. Always do both to keep code simple. */
+ * entry. Always update the event index to keep code simple. */
if (vq->avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT) {
vq->avail_flags_shadow &= ~VRING_AVAIL_F_NO_INTERRUPT;
- vq->vring.avail->flags = cpu_to_virtio16(_vq->vdev, vq->avail_flags_shadow);
+ if (!vq->event)
+ vq->vring.avail->flags = cpu_to_virtio16(_vq->vdev, vq->avail_flags_shadow);
}
/* TODO: tune this threshold */
bufs = (u16)(vq->avail_idx_shadow - vq->last_used_idx) * 3 / 4;
@@ -953,7 +956,8 @@ struct virtqueue *__vring_new_virtqueue(unsigned int index,
/* No callback? Tell other side not to bother us. */
if (!callback) {
vq->avail_flags_shadow |= VRING_AVAIL_F_NO_INTERRUPT;
- vq->vring.avail->flags = cpu_to_virtio16(vdev, vq->avail_flags_shadow);
+ if (!vq->event)
+ vq->vring.avail->flags = cpu_to_virtio16(vdev, vq->avail_flags_shadow);
}
/* Put everything in free lists. */
diff --git a/drivers/watchdog/Kconfig b/drivers/watchdog/Kconfig
index fdd3228e0678..3eb58cb51e56 100644
--- a/drivers/watchdog/Kconfig
+++ b/drivers/watchdog/Kconfig
@@ -155,6 +155,7 @@ config TANGOX_WATCHDOG
config WDAT_WDT
tristate "ACPI Watchdog Action Table (WDAT)"
depends on ACPI
+ select WATCHDOG_CORE
select ACPI_WATCHDOG
help
This driver adds support for systems with ACPI Watchdog Action
diff --git a/fs/afs/cmservice.c b/fs/afs/cmservice.c
index 2037e7a77a37..d764236072b1 100644
--- a/fs/afs/cmservice.c
+++ b/fs/afs/cmservice.c
@@ -91,11 +91,9 @@ static const struct afs_call_type afs_SRXCBTellMeAboutYourself = {
*/
bool afs_cm_incoming_call(struct afs_call *call)
{
- u32 operation_id = ntohl(call->operation_ID);
+ _enter("{CB.OP %u}", call->operation_ID);
- _enter("{CB.OP %u}", operation_id);
-
- switch (operation_id) {
+ switch (call->operation_ID) {
case CBCallBack:
call->type = &afs_SRXCBCallBack;
return true;
diff --git a/fs/afs/fsclient.c b/fs/afs/fsclient.c
index 96f4d764d1a6..31c616ab9b40 100644
--- a/fs/afs/fsclient.c
+++ b/fs/afs/fsclient.c
@@ -364,7 +364,7 @@ static int afs_deliver_fs_fetch_data(struct afs_call *call)
buffer = kmap(page);
ret = afs_extract_data(call, buffer,
call->count, true);
- kunmap(buffer);
+ kunmap(page);
if (ret < 0)
return ret;
}
@@ -397,7 +397,7 @@ static int afs_deliver_fs_fetch_data(struct afs_call *call)
page = call->reply3;
buffer = kmap(page);
memset(buffer + call->count, 0, PAGE_SIZE - call->count);
- kunmap(buffer);
+ kunmap(page);
}
_leave(" = 0 [done]");
diff --git a/fs/afs/internal.h b/fs/afs/internal.h
index 5497c8496055..535a38d2c1d0 100644
--- a/fs/afs/internal.h
+++ b/fs/afs/internal.h
@@ -112,7 +112,7 @@ struct afs_call {
bool need_attention; /* T if RxRPC poked us */
u16 service_id; /* RxRPC service ID to call */
__be16 port; /* target UDP port */
- __be32 operation_ID; /* operation ID for an incoming call */
+ u32 operation_ID; /* operation ID for an incoming call */
u32 count; /* count for use in unmarshalling */
__be32 tmp; /* place to extract temporary data */
afs_dataversion_t store_version; /* updated version expected from store */
diff --git a/fs/afs/rxrpc.c b/fs/afs/rxrpc.c
index 477928b25940..25f05a8d21b1 100644
--- a/fs/afs/rxrpc.c
+++ b/fs/afs/rxrpc.c
@@ -676,10 +676,11 @@ static int afs_deliver_cm_op_id(struct afs_call *call)
ASSERTCMP(call->offset, <, 4);
/* the operation ID forms the first four bytes of the request data */
- ret = afs_extract_data(call, &call->operation_ID, 4, true);
+ ret = afs_extract_data(call, &call->tmp, 4, true);
if (ret < 0)
return ret;
+ call->operation_ID = ntohl(call->tmp);
call->state = AFS_CALL_AWAIT_REQUEST;
call->offset = 0;
diff --git a/fs/aio.c b/fs/aio.c
index 1157e13a36d6..428484f2f841 100644
--- a/fs/aio.c
+++ b/fs/aio.c
@@ -1078,6 +1078,17 @@ static void aio_complete(struct kiocb *kiocb, long res, long res2)
unsigned tail, pos, head;
unsigned long flags;
+ if (kiocb->ki_flags & IOCB_WRITE) {
+ struct file *file = kiocb->ki_filp;
+
+ /*
+ * Tell lockdep we inherited freeze protection from submission
+ * thread.
+ */
+ __sb_writers_acquired(file_inode(file)->i_sb, SB_FREEZE_WRITE);
+ file_end_write(file);
+ }
+
/*
* Special case handling for sync iocbs:
* - events go directly into the iocb for fast handling
@@ -1392,122 +1403,106 @@ SYSCALL_DEFINE1(io_destroy, aio_context_t, ctx)
return -EINVAL;
}
-typedef ssize_t (rw_iter_op)(struct kiocb *, struct iov_iter *);
-
-static int aio_setup_vectored_rw(int rw, char __user *buf, size_t len,
- struct iovec **iovec,
- bool compat,
- struct iov_iter *iter)
+static int aio_setup_rw(int rw, struct iocb *iocb, struct iovec **iovec,
+ bool vectored, bool compat, struct iov_iter *iter)
{
+ void __user *buf = (void __user *)(uintptr_t)iocb->aio_buf;
+ size_t len = iocb->aio_nbytes;
+
+ if (!vectored) {
+ ssize_t ret = import_single_range(rw, buf, len, *iovec, iter);
+ *iovec = NULL;
+ return ret;
+ }
#ifdef CONFIG_COMPAT
if (compat)
- return compat_import_iovec(rw,
- (struct compat_iovec __user *)buf,
- len, UIO_FASTIOV, iovec, iter);
+ return compat_import_iovec(rw, buf, len, UIO_FASTIOV, iovec,
+ iter);
#endif
- return import_iovec(rw, (struct iovec __user *)buf,
- len, UIO_FASTIOV, iovec, iter);
+ return import_iovec(rw, buf, len, UIO_FASTIOV, iovec, iter);
}
-/*
- * aio_run_iocb:
- * Performs the initial checks and io submission.
- */
-static ssize_t aio_run_iocb(struct kiocb *req, unsigned opcode,
- char __user *buf, size_t len, bool compat)
+static inline ssize_t aio_ret(struct kiocb *req, ssize_t ret)
+{
+ switch (ret) {
+ case -EIOCBQUEUED:
+ return ret;
+ case -ERESTARTSYS:
+ case -ERESTARTNOINTR:
+ case -ERESTARTNOHAND:
+ case -ERESTART_RESTARTBLOCK:
+ /*
+ * There's no easy way to restart the syscall since other AIO's
+ * may be already running. Just fail this IO with EINTR.
+ */
+ ret = -EINTR;
+ /*FALLTHRU*/
+ default:
+ aio_complete(req, ret, 0);
+ return 0;
+ }
+}
+
+static ssize_t aio_read(struct kiocb *req, struct iocb *iocb, bool vectored,
+ bool compat)
{
struct file *file = req->ki_filp;
- ssize_t ret;
- int rw;
- fmode_t mode;
- rw_iter_op *iter_op;
struct iovec inline_vecs[UIO_FASTIOV], *iovec = inline_vecs;
struct iov_iter iter;
+ ssize_t ret;
- switch (opcode) {
- case IOCB_CMD_PREAD:
- case IOCB_CMD_PREADV:
- mode = FMODE_READ;
- rw = READ;
- iter_op = file->f_op->read_iter;
- goto rw_common;
-
- case IOCB_CMD_PWRITE:
- case IOCB_CMD_PWRITEV:
- mode = FMODE_WRITE;
- rw = WRITE;
- iter_op = file->f_op->write_iter;
- goto rw_common;
-rw_common:
- if (unlikely(!(file->f_mode & mode)))
- return -EBADF;
-
- if (!iter_op)
- return -EINVAL;
-
- if (opcode == IOCB_CMD_PREADV || opcode == IOCB_CMD_PWRITEV)
- ret = aio_setup_vectored_rw(rw, buf, len,
- &iovec, compat, &iter);
- else {
- ret = import_single_range(rw, buf, len, iovec, &iter);
- iovec = NULL;
- }
- if (!ret)
- ret = rw_verify_area(rw, file, &req->ki_pos,
- iov_iter_count(&iter));
- if (ret < 0) {
- kfree(iovec);
- return ret;
- }
-
- if (rw == WRITE)
- file_start_write(file);
-
- ret = iter_op(req, &iter);
-
- if (rw == WRITE)
- file_end_write(file);
- kfree(iovec);
- break;
-
- case IOCB_CMD_FDSYNC:
- if (!file->f_op->aio_fsync)
- return -EINVAL;
-
- ret = file->f_op->aio_fsync(req, 1);
- break;
+ if (unlikely(!(file->f_mode & FMODE_READ)))
+ return -EBADF;
+ if (unlikely(!file->f_op->read_iter))
+ return -EINVAL;
- case IOCB_CMD_FSYNC:
- if (!file->f_op->aio_fsync)
- return -EINVAL;
+ ret = aio_setup_rw(READ, iocb, &iovec, vectored, compat, &iter);
+ if (ret)
+ return ret;
+ ret = rw_verify_area(READ, file, &req->ki_pos, iov_iter_count(&iter));
+ if (!ret)
+ ret = aio_ret(req, file->f_op->read_iter(req, &iter));
+ kfree(iovec);
+ return ret;
+}
- ret = file->f_op->aio_fsync(req, 0);
- break;
+static ssize_t aio_write(struct kiocb *req, struct iocb *iocb, bool vectored,
+ bool compat)
+{
+ struct file *file = req->ki_filp;
+ struct iovec inline_vecs[UIO_FASTIOV], *iovec = inline_vecs;
+ struct iov_iter iter;
+ ssize_t ret;
- default:
- pr_debug("EINVAL: no operation provided\n");
+ if (unlikely(!(file->f_mode & FMODE_WRITE)))
+ return -EBADF;
+ if (unlikely(!file->f_op->write_iter))
return -EINVAL;
- }
- if (ret != -EIOCBQUEUED) {
+ ret = aio_setup_rw(WRITE, iocb, &iovec, vectored, compat, &iter);
+ if (ret)
+ return ret;
+ ret = rw_verify_area(WRITE, file, &req->ki_pos, iov_iter_count(&iter));
+ if (!ret) {
+ req->ki_flags |= IOCB_WRITE;
+ file_start_write(file);
+ ret = aio_ret(req, file->f_op->write_iter(req, &iter));
/*
- * There's no easy way to restart the syscall since other AIO's
- * may be already running. Just fail this IO with EINTR.
+ * We release freeze protection in aio_complete(). Fool lockdep
+ * by telling it the lock got released so that it doesn't
+ * complain about held lock when we return to userspace.
*/
- if (unlikely(ret == -ERESTARTSYS || ret == -ERESTARTNOINTR ||
- ret == -ERESTARTNOHAND ||
- ret == -ERESTART_RESTARTBLOCK))
- ret = -EINTR;
- aio_complete(req, ret, 0);
+ __sb_writers_release(file_inode(file)->i_sb, SB_FREEZE_WRITE);
}
-
- return 0;
+ kfree(iovec);
+ return ret;
}
static int io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb,
struct iocb *iocb, bool compat)
{
struct aio_kiocb *req;
+ struct file *file;
ssize_t ret;
/* enforce forwards compatibility on users */
@@ -1530,7 +1525,7 @@ static int io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb,
if (unlikely(!req))
return -EAGAIN;
- req->common.ki_filp = fget(iocb->aio_fildes);
+ req->common.ki_filp = file = fget(iocb->aio_fildes);
if (unlikely(!req->common.ki_filp)) {
ret = -EBADF;
goto out_put_req;
@@ -1565,13 +1560,29 @@ static int io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb,
req->ki_user_iocb = user_iocb;
req->ki_user_data = iocb->aio_data;
- ret = aio_run_iocb(&req->common, iocb->aio_lio_opcode,
- (char __user *)(unsigned long)iocb->aio_buf,
- iocb->aio_nbytes,
- compat);
- if (ret)
- goto out_put_req;
+ get_file(file);
+ switch (iocb->aio_lio_opcode) {
+ case IOCB_CMD_PREAD:
+ ret = aio_read(&req->common, iocb, false, compat);
+ break;
+ case IOCB_CMD_PWRITE:
+ ret = aio_write(&req->common, iocb, false, compat);
+ break;
+ case IOCB_CMD_PREADV:
+ ret = aio_read(&req->common, iocb, true, compat);
+ break;
+ case IOCB_CMD_PWRITEV:
+ ret = aio_write(&req->common, iocb, true, compat);
+ break;
+ default:
+ pr_debug("invalid aio operation %d\n", iocb->aio_lio_opcode);
+ ret = -EINVAL;
+ break;
+ }
+ fput(file);
+ if (ret && ret != -EIOCBQUEUED)
+ goto out_put_req;
return 0;
out_put_req:
put_reqs_available(ctx, 1);
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index 210c94ac8818..4607af38c72e 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -2647,7 +2647,10 @@ static noinline int __btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
btrfs_free_delayed_extent_op(extent_op);
if (ret) {
+ spin_lock(&delayed_refs->lock);
locked_ref->processing = 0;
+ delayed_refs->num_heads_ready++;
+ spin_unlock(&delayed_refs->lock);
btrfs_delayed_ref_unlock(locked_ref);
btrfs_put_delayed_ref(ref);
btrfs_debug(fs_info, "run_one_delayed_ref returned %d",
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
index 66a755150056..8ed05d95584a 100644
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@ -5569,7 +5569,7 @@ void le_bitmap_set(u8 *map, unsigned int start, int len)
*p |= mask_to_set;
len -= bits_to_set;
bits_to_set = BITS_PER_BYTE;
- mask_to_set = ~(u8)0;
+ mask_to_set = ~0;
p++;
}
if (len) {
@@ -5589,7 +5589,7 @@ void le_bitmap_clear(u8 *map, unsigned int start, int len)
*p &= ~mask_to_clear;
len -= bits_to_clear;
bits_to_clear = BITS_PER_BYTE;
- mask_to_clear = ~(u8)0;
+ mask_to_clear = ~0;
p++;
}
if (len) {
@@ -5679,7 +5679,7 @@ void extent_buffer_bitmap_set(struct extent_buffer *eb, unsigned long start,
kaddr[offset] |= mask_to_set;
len -= bits_to_set;
bits_to_set = BITS_PER_BYTE;
- mask_to_set = ~(u8)0;
+ mask_to_set = ~0;
if (++offset >= PAGE_SIZE && len > 0) {
offset = 0;
page = eb->pages[++i];
@@ -5721,7 +5721,7 @@ void extent_buffer_bitmap_clear(struct extent_buffer *eb, unsigned long start,
kaddr[offset] &= ~mask_to_clear;
len -= bits_to_clear;
bits_to_clear = BITS_PER_BYTE;
- mask_to_clear = ~(u8)0;
+ mask_to_clear = ~0;
if (++offset >= PAGE_SIZE && len > 0) {
offset = 0;
page = eb->pages[++i];
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index 2b790bda7998..8e3a5a266917 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -4605,8 +4605,8 @@ delete:
BUG_ON(ret);
if (btrfs_should_throttle_delayed_refs(trans, root))
btrfs_async_run_delayed_refs(root,
- trans->transid,
- trans->delayed_ref_updates * 2, 0);
+ trans->delayed_ref_updates * 2,
+ trans->transid, 0);
if (be_nice) {
if (truncate_space_check(trans, root,
extent_num_bytes)) {
@@ -8931,9 +8931,14 @@ again:
* So even we call qgroup_free_data(), it won't decrease reserved
* space.
* 2) Not written to disk
- * This means the reserved space should be freed here.
+ * This means the reserved space should be freed here. However,
+ * if a truncate invalidates the page (by clearing PageDirty)
+ * and the page is accounted for while allocating extent
+ * in btrfs_check_data_free_space() we let delayed_ref to
+ * free the entire extent.
*/
- btrfs_qgroup_free_data(inode, page_start, PAGE_SIZE);
+ if (PageDirty(page))
+ btrfs_qgroup_free_data(inode, page_start, PAGE_SIZE);
if (!inode_evicting) {
clear_extent_bit(tree, page_start, page_end,
EXTENT_LOCKED | EXTENT_DIRTY |
diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
index 18e1aa0f85f5..7acbd2cf6192 100644
--- a/fs/btrfs/ioctl.c
+++ b/fs/btrfs/ioctl.c
@@ -3814,6 +3814,11 @@ process_slot:
}
btrfs_release_path(path);
key.offset = next_key_min_offset;
+
+ if (fatal_signal_pending(current)) {
+ ret = -EINTR;
+ goto out;
+ }
}
ret = 0;
diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c
index 0ec8ffa37ab0..c4af0cdb783d 100644
--- a/fs/btrfs/relocation.c
+++ b/fs/btrfs/relocation.c
@@ -2728,7 +2728,14 @@ static int do_relocation(struct btrfs_trans_handle *trans,
bytenr = btrfs_node_blockptr(upper->eb, slot);
if (lowest) {
- BUG_ON(bytenr != node->bytenr);
+ if (bytenr != node->bytenr) {
+ btrfs_err(root->fs_info,
+ "lowest leaf/node mismatch: bytenr %llu node->bytenr %llu slot %d upper %llu",
+ bytenr, node->bytenr, slot,
+ upper->eb->start);
+ err = -EIO;
+ goto next;
+ }
} else {
if (node->eb->start == bytenr)
goto next;
diff --git a/fs/ceph/file.c b/fs/ceph/file.c
index 18630e800208..f995e3528a33 100644
--- a/fs/ceph/file.c
+++ b/fs/ceph/file.c
@@ -1770,7 +1770,6 @@ const struct file_operations ceph_file_fops = {
.fsync = ceph_fsync,
.lock = ceph_lock,
.flock = ceph_flock,
- .splice_read = generic_file_splice_read,
.splice_write = iter_file_splice_write,
.unlocked_ioctl = ceph_ioctl,
.compat_ioctl = ceph_ioctl,
diff --git a/fs/cifs/cifsencrypt.c b/fs/cifs/cifsencrypt.c
index 8347c90cf483..5eb04129f938 100644
--- a/fs/cifs/cifsencrypt.c
+++ b/fs/cifs/cifsencrypt.c
@@ -808,7 +808,11 @@ calc_seckey(struct cifs_ses *ses)
struct crypto_skcipher *tfm_arc4;
struct scatterlist sgin, sgout;
struct skcipher_request *req;
- unsigned char sec_key[CIFS_SESS_KEY_SIZE]; /* a nonce */
+ unsigned char *sec_key;
+
+ sec_key = kmalloc(CIFS_SESS_KEY_SIZE, GFP_KERNEL);
+ if (sec_key == NULL)
+ return -ENOMEM;
get_random_bytes(sec_key, CIFS_SESS_KEY_SIZE);
@@ -816,7 +820,7 @@ calc_seckey(struct cifs_ses *ses)
if (IS_ERR(tfm_arc4)) {
rc = PTR_ERR(tfm_arc4);
cifs_dbg(VFS, "could not allocate crypto API arc4\n");
- return rc;
+ goto out;
}
rc = crypto_skcipher_setkey(tfm_arc4, ses->auth_key.response,
@@ -854,7 +858,8 @@ calc_seckey(struct cifs_ses *ses)
out_free_cipher:
crypto_free_skcipher(tfm_arc4);
-
+out:
+ kfree(sec_key);
return rc;
}
diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c
index 3f3185febc58..e3fed9249a04 100644
--- a/fs/cifs/cifssmb.c
+++ b/fs/cifs/cifssmb.c
@@ -3427,6 +3427,7 @@ static __u16 ACL_to_cifs_posix(char *parm_data, const char *pACL,
__u16 rc = 0;
struct cifs_posix_acl *cifs_acl = (struct cifs_posix_acl *)parm_data;
struct posix_acl_xattr_header *local_acl = (void *)pACL;
+ struct posix_acl_xattr_entry *ace = (void *)(local_acl + 1);
int count;
int i;
@@ -3453,8 +3454,7 @@ static __u16 ACL_to_cifs_posix(char *parm_data, const char *pACL,
return 0;
}
for (i = 0; i < count; i++) {
- rc = convert_ace_to_cifs_ace(&cifs_acl->ace_array[i],
- (struct posix_acl_xattr_entry *)(local_acl + 1));
+ rc = convert_ace_to_cifs_ace(&cifs_acl->ace_array[i], &ace[i]);
if (rc != 0) {
/* ACE not converted */
break;
diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
index aab5227979e2..4547aeddd12b 100644
--- a/fs/cifs/connect.c
+++ b/fs/cifs/connect.c
@@ -412,6 +412,9 @@ cifs_reconnect(struct TCP_Server_Info *server)
}
} while (server->tcpStatus == CifsNeedReconnect);
+ if (server->tcpStatus == CifsNeedNegotiate)
+ mod_delayed_work(cifsiod_wq, &server->echo, 0);
+
return rc;
}
@@ -421,17 +424,25 @@ cifs_echo_request(struct work_struct *work)
int rc;
struct TCP_Server_Info *server = container_of(work,
struct TCP_Server_Info, echo.work);
- unsigned long echo_interval = server->echo_interval;
+ unsigned long echo_interval;
+
+ /*
+ * If we need to renegotiate, set echo interval to zero to
+ * immediately call echo service where we can renegotiate.
+ */
+ if (server->tcpStatus == CifsNeedNegotiate)
+ echo_interval = 0;
+ else
+ echo_interval = server->echo_interval;
/*
- * We cannot send an echo if it is disabled or until the
- * NEGOTIATE_PROTOCOL request is done, which is indicated by
- * server->ops->need_neg() == true. Also, no need to ping if
- * we got a response recently.
+ * We cannot send an echo if it is disabled.
+ * Also, no need to ping if we got a response recently.
*/
if (server->tcpStatus == CifsNeedReconnect ||
- server->tcpStatus == CifsExiting || server->tcpStatus == CifsNew ||
+ server->tcpStatus == CifsExiting ||
+ server->tcpStatus == CifsNew ||
(server->ops->can_echo && !server->ops->can_echo(server)) ||
time_before(jiffies, server->lstrp + echo_interval - HZ))
goto requeue_echo;
@@ -442,7 +453,7 @@ cifs_echo_request(struct work_struct *work)
server->hostname);
requeue_echo:
- queue_delayed_work(cifsiod_wq, &server->echo, echo_interval);
+ queue_delayed_work(cifsiod_wq, &server->echo, server->echo_interval);
}
static bool
diff --git a/fs/coredump.c b/fs/coredump.c
index 281b768000e6..eb9c92c9b20f 100644
--- a/fs/coredump.c
+++ b/fs/coredump.c
@@ -1,6 +1,7 @@
#include <linux/slab.h>
#include <linux/file.h>
#include <linux/fdtable.h>
+#include <linux/freezer.h>
#include <linux/mm.h>
#include <linux/stat.h>
#include <linux/fcntl.h>
@@ -423,7 +424,9 @@ static int coredump_wait(int exit_code, struct core_state *core_state)
if (core_waiters > 0) {
struct core_thread *ptr;
+ freezer_do_not_count();
wait_for_completion(&core_state->startup);
+ freezer_count();
/*
* Wait for all the threads to become inactive, so that
* all the thread context (extended register state, like
diff --git a/fs/crypto/fname.c b/fs/crypto/fname.c
index 9a28133ac3b8..9b774f4b50c8 100644
--- a/fs/crypto/fname.c
+++ b/fs/crypto/fname.c
@@ -39,65 +39,54 @@ static void fname_crypt_complete(struct crypto_async_request *req, int res)
static int fname_encrypt(struct inode *inode,
const struct qstr *iname, struct fscrypt_str *oname)
{
- u32 ciphertext_len;
struct skcipher_request *req = NULL;
DECLARE_FS_COMPLETION_RESULT(ecr);
struct fscrypt_info *ci = inode->i_crypt_info;
struct crypto_skcipher *tfm = ci->ci_ctfm;
int res = 0;
char iv[FS_CRYPTO_BLOCK_SIZE];
- struct scatterlist src_sg, dst_sg;
+ struct scatterlist sg;
int padding = 4 << (ci->ci_flags & FS_POLICY_FLAGS_PAD_MASK);
- char *workbuf, buf[32], *alloc_buf = NULL;
- unsigned lim;
+ unsigned int lim;
+ unsigned int cryptlen;
lim = inode->i_sb->s_cop->max_namelen(inode);
if (iname->len <= 0 || iname->len > lim)
return -EIO;
- ciphertext_len = max(iname->len, (u32)FS_CRYPTO_BLOCK_SIZE);
- ciphertext_len = round_up(ciphertext_len, padding);
- ciphertext_len = min(ciphertext_len, lim);
+ /*
+ * Copy the filename to the output buffer for encrypting in-place and
+ * pad it with the needed number of NUL bytes.
+ */
+ cryptlen = max_t(unsigned int, iname->len, FS_CRYPTO_BLOCK_SIZE);
+ cryptlen = round_up(cryptlen, padding);
+ cryptlen = min(cryptlen, lim);
+ memcpy(oname->name, iname->name, iname->len);
+ memset(oname->name + iname->len, 0, cryptlen - iname->len);
- if (ciphertext_len <= sizeof(buf)) {
- workbuf = buf;
- } else {
- alloc_buf = kmalloc(ciphertext_len, GFP_NOFS);
- if (!alloc_buf)
- return -ENOMEM;
- workbuf = alloc_buf;
- }
+ /* Initialize the IV */
+ memset(iv, 0, FS_CRYPTO_BLOCK_SIZE);
- /* Allocate request */
+ /* Set up the encryption request */
req = skcipher_request_alloc(tfm, GFP_NOFS);
if (!req) {
printk_ratelimited(KERN_ERR
- "%s: crypto_request_alloc() failed\n", __func__);
- kfree(alloc_buf);
+ "%s: skcipher_request_alloc() failed\n", __func__);
return -ENOMEM;
}
skcipher_request_set_callback(req,
CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP,
fname_crypt_complete, &ecr);
+ sg_init_one(&sg, oname->name, cryptlen);
+ skcipher_request_set_crypt(req, &sg, &sg, cryptlen, iv);
- /* Copy the input */
- memcpy(workbuf, iname->name, iname->len);
- if (iname->len < ciphertext_len)
- memset(workbuf + iname->len, 0, ciphertext_len - iname->len);
-
- /* Initialize IV */
- memset(iv, 0, FS_CRYPTO_BLOCK_SIZE);
-
- /* Create encryption request */
- sg_init_one(&src_sg, workbuf, ciphertext_len);
- sg_init_one(&dst_sg, oname->name, ciphertext_len);
- skcipher_request_set_crypt(req, &src_sg, &dst_sg, ciphertext_len, iv);
+ /* Do the encryption */
res = crypto_skcipher_encrypt(req);
if (res == -EINPROGRESS || res == -EBUSY) {
+ /* Request is being completed asynchronously; wait for it */
wait_for_completion(&ecr.completion);
res = ecr.res;
}
- kfree(alloc_buf);
skcipher_request_free(req);
if (res < 0) {
printk_ratelimited(KERN_ERR
@@ -105,7 +94,7 @@ static int fname_encrypt(struct inode *inode,
return res;
}
- oname->len = ciphertext_len;
+ oname->len = cryptlen;
return 0;
}
diff --git a/fs/crypto/keyinfo.c b/fs/crypto/keyinfo.c
index 82f0285f5d08..67fb6d8876d0 100644
--- a/fs/crypto/keyinfo.c
+++ b/fs/crypto/keyinfo.c
@@ -185,7 +185,7 @@ int get_crypt_info(struct inode *inode)
struct crypto_skcipher *ctfm;
const char *cipher_str;
int keysize;
- u8 raw_key[FS_MAX_KEY_SIZE];
+ u8 *raw_key = NULL;
int res;
res = fscrypt_initialize();
@@ -238,6 +238,15 @@ retry:
if (res)
goto out;
+ /*
+ * This cannot be a stack buffer because it is passed to the scatterlist
+ * crypto API as part of key derivation.
+ */
+ res = -ENOMEM;
+ raw_key = kmalloc(FS_MAX_KEY_SIZE, GFP_NOFS);
+ if (!raw_key)
+ goto out;
+
if (fscrypt_dummy_context_enabled(inode)) {
memset(raw_key, 0x42, FS_AES_256_XTS_KEY_SIZE);
goto got_key;
@@ -276,7 +285,8 @@ got_key:
if (res)
goto out;
- memzero_explicit(raw_key, sizeof(raw_key));
+ kzfree(raw_key);
+ raw_key = NULL;
if (cmpxchg(&inode->i_crypt_info, NULL, crypt_info) != NULL) {
put_crypt_info(crypt_info);
goto retry;
@@ -287,7 +297,7 @@ out:
if (res == -ENOKEY)
res = 0;
put_crypt_info(crypt_info);
- memzero_explicit(raw_key, sizeof(raw_key));
+ kzfree(raw_key);
return res;
}
diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
index 282a51b07c57..a8a750f59621 100644
--- a/fs/ext4/ext4.h
+++ b/fs/ext4/ext4.h
@@ -235,6 +235,7 @@ struct ext4_io_submit {
#define EXT4_MAX_BLOCK_SIZE 65536
#define EXT4_MIN_BLOCK_LOG_SIZE 10
#define EXT4_MAX_BLOCK_LOG_SIZE 16
+#define EXT4_MAX_CLUSTER_LOG_SIZE 30
#ifdef __KERNEL__
# define EXT4_BLOCK_SIZE(s) ((s)->s_blocksize)
#else
diff --git a/fs/ext4/super.c b/fs/ext4/super.c
index 20da99da0a34..52b0530c5d65 100644
--- a/fs/ext4/super.c
+++ b/fs/ext4/super.c
@@ -3565,7 +3565,15 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
if (blocksize < EXT4_MIN_BLOCK_SIZE ||
blocksize > EXT4_MAX_BLOCK_SIZE) {
ext4_msg(sb, KERN_ERR,
- "Unsupported filesystem blocksize %d", blocksize);
+ "Unsupported filesystem blocksize %d (%d log_block_size)",
+ blocksize, le32_to_cpu(es->s_log_block_size));
+ goto failed_mount;
+ }
+ if (le32_to_cpu(es->s_log_block_size) >
+ (EXT4_MAX_BLOCK_LOG_SIZE - EXT4_MIN_BLOCK_LOG_SIZE)) {
+ ext4_msg(sb, KERN_ERR,
+ "Invalid log block size: %u",
+ le32_to_cpu(es->s_log_block_size));
goto failed_mount;
}
@@ -3697,6 +3705,13 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
"block size (%d)", clustersize, blocksize);
goto failed_mount;
}
+ if (le32_to_cpu(es->s_log_cluster_size) >
+ (EXT4_MAX_CLUSTER_LOG_SIZE - EXT4_MIN_BLOCK_LOG_SIZE)) {
+ ext4_msg(sb, KERN_ERR,
+ "Invalid log cluster size: %u",
+ le32_to_cpu(es->s_log_cluster_size));
+ goto failed_mount;
+ }
sbi->s_cluster_bits = le32_to_cpu(es->s_log_cluster_size) -
le32_to_cpu(es->s_log_block_size);
sbi->s_clusters_per_group =
diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c
index 6a4d0e5418a1..b3ebe512d64c 100644
--- a/fs/fuse/dir.c
+++ b/fs/fuse/dir.c
@@ -286,6 +286,11 @@ const struct dentry_operations fuse_dentry_operations = {
.d_release = fuse_dentry_release,
};
+const struct dentry_operations fuse_root_dentry_operations = {
+ .d_init = fuse_dentry_init,
+ .d_release = fuse_dentry_release,
+};
+
int fuse_valid_type(int m)
{
return S_ISREG(m) || S_ISDIR(m) || S_ISLNK(m) || S_ISCHR(m) ||
diff --git a/fs/fuse/file.c b/fs/fuse/file.c
index abc66a6237fd..2401c5dabb2a 100644
--- a/fs/fuse/file.c
+++ b/fs/fuse/file.c
@@ -1985,6 +1985,10 @@ static int fuse_write_end(struct file *file, struct address_space *mapping,
{
struct inode *inode = page->mapping->host;
+ /* Haven't copied anything? Skip zeroing, size extending, dirtying. */
+ if (!copied)
+ goto unlock;
+
if (!PageUptodate(page)) {
/* Zero any unwritten bytes at the end of the page */
size_t endoff = (pos + copied) & ~PAGE_MASK;
@@ -1995,6 +1999,8 @@ static int fuse_write_end(struct file *file, struct address_space *mapping,
fuse_write_update_size(inode, pos + copied);
set_page_dirty(page);
+
+unlock:
unlock_page(page);
put_page(page);
diff --git a/fs/fuse/fuse_i.h b/fs/fuse/fuse_i.h
index 0dfbb136e59a..91307940c8ac 100644
--- a/fs/fuse/fuse_i.h
+++ b/fs/fuse/fuse_i.h
@@ -692,6 +692,7 @@ static inline u64 get_node_id(struct inode *inode)
extern const struct file_operations fuse_dev_operations;
extern const struct dentry_operations fuse_dentry_operations;
+extern const struct dentry_operations fuse_root_dentry_operations;
/**
* Inode to nodeid comparison.
diff --git a/fs/fuse/inode.c b/fs/fuse/inode.c
index 17141099f2e7..6fe6a88ecb4a 100644
--- a/fs/fuse/inode.c
+++ b/fs/fuse/inode.c
@@ -1131,10 +1131,11 @@ static int fuse_fill_super(struct super_block *sb, void *data, int silent)
err = -ENOMEM;
root = fuse_get_root_inode(sb, d.rootmode);
+ sb->s_d_op = &fuse_root_dentry_operations;
root_dentry = d_make_root(root);
if (!root_dentry)
goto err_dev_free;
- /* only now - we want root dentry with NULL ->d_op */
+ /* Root dentry doesn't have .d_revalidate */
sb->s_d_op = &fuse_dentry_operations;
init_req = fuse_request_alloc(0);
diff --git a/fs/isofs/rock.c b/fs/isofs/rock.c
index 98b3eb7d8eaf..0ec137310320 100644
--- a/fs/isofs/rock.c
+++ b/fs/isofs/rock.c
@@ -377,9 +377,9 @@ repeat:
{
int p;
for (p = 0; p < rr->u.ER.len_id; p++)
- printk("%c", rr->u.ER.data[p]);
+ printk(KERN_CONT "%c", rr->u.ER.data[p]);
}
- printk("\n");
+ printk(KERN_CONT "\n");
break;
case SIG('P', 'X'):
inode->i_mode = isonum_733(rr->u.PX.mode);
diff --git a/fs/nfs/callback.c b/fs/nfs/callback.c
index 532d8e242d4d..484bebc20bca 100644
--- a/fs/nfs/callback.c
+++ b/fs/nfs/callback.c
@@ -197,7 +197,7 @@ static int nfs_callback_up_net(int minorversion, struct svc_serv *serv,
}
ret = -EPROTONOSUPPORT;
- if (minorversion == 0)
+ if (!IS_ENABLED(CONFIG_NFS_V4_1) || minorversion == 0)
ret = nfs4_callback_up_net(serv, net);
else if (xprt->ops->bc_up)
ret = xprt->ops->bc_up(serv, net);
diff --git a/fs/nfs/client.c b/fs/nfs/client.c
index 7555ba889d1f..ebecfb8fba06 100644
--- a/fs/nfs/client.c
+++ b/fs/nfs/client.c
@@ -314,7 +314,8 @@ static struct nfs_client *nfs_match_client(const struct nfs_client_initdata *dat
/* Match the full socket address */
if (!rpc_cmp_addr_port(sap, clap))
/* Match all xprt_switch full socket addresses */
- if (!rpc_clnt_xprt_switch_has_addr(clp->cl_rpcclient,
+ if (IS_ERR(clp->cl_rpcclient) ||
+ !rpc_clnt_xprt_switch_has_addr(clp->cl_rpcclient,
sap))
continue;
diff --git a/fs/nfs/namespace.c b/fs/nfs/namespace.c
index c8162c660c44..5551e8ef67fd 100644
--- a/fs/nfs/namespace.c
+++ b/fs/nfs/namespace.c
@@ -98,7 +98,7 @@ rename_retry:
return end;
}
namelen = strlen(base);
- if (flags & NFS_PATH_CANONICAL) {
+ if (*end == '/') {
/* Strip off excess slashes in base string */
while (namelen > 0 && base[namelen - 1] == '/')
namelen--;
diff --git a/fs/nfs/nfs4_fs.h b/fs/nfs/nfs4_fs.h
index 9b3a82abab07..1452177c822d 100644
--- a/fs/nfs/nfs4_fs.h
+++ b/fs/nfs/nfs4_fs.h
@@ -542,6 +542,13 @@ static inline bool nfs4_valid_open_stateid(const struct nfs4_state *state)
return test_bit(NFS_STATE_RECOVERY_FAILED, &state->flags) == 0;
}
+static inline bool nfs4_state_match_open_stateid_other(const struct nfs4_state *state,
+ const nfs4_stateid *stateid)
+{
+ return test_bit(NFS_OPEN_STATE, &state->flags) &&
+ nfs4_stateid_match_other(&state->open_stateid, stateid);
+}
+
#else
#define nfs4_close_state(a, b) do { } while (0)
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
index 7897826d7c51..241da19b7da4 100644
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
@@ -1451,7 +1451,6 @@ static void nfs_resync_open_stateid_locked(struct nfs4_state *state)
}
static void nfs_clear_open_stateid_locked(struct nfs4_state *state,
- nfs4_stateid *arg_stateid,
nfs4_stateid *stateid, fmode_t fmode)
{
clear_bit(NFS_O_RDWR_STATE, &state->flags);
@@ -1469,10 +1468,9 @@ static void nfs_clear_open_stateid_locked(struct nfs4_state *state,
}
if (stateid == NULL)
return;
- /* Handle races with OPEN */
- if (!nfs4_stateid_match_other(arg_stateid, &state->open_stateid) ||
- (nfs4_stateid_match_other(stateid, &state->open_stateid) &&
- !nfs4_stateid_is_newer(stateid, &state->open_stateid))) {
+ /* Handle OPEN+OPEN_DOWNGRADE races */
+ if (nfs4_stateid_match_other(stateid, &state->open_stateid) &&
+ !nfs4_stateid_is_newer(stateid, &state->open_stateid)) {
nfs_resync_open_stateid_locked(state);
return;
}
@@ -1486,7 +1484,9 @@ static void nfs_clear_open_stateid(struct nfs4_state *state,
nfs4_stateid *stateid, fmode_t fmode)
{
write_seqlock(&state->seqlock);
- nfs_clear_open_stateid_locked(state, arg_stateid, stateid, fmode);
+ /* Ignore, if the CLOSE argment doesn't match the current stateid */
+ if (nfs4_state_match_open_stateid_other(state, arg_stateid))
+ nfs_clear_open_stateid_locked(state, stateid, fmode);
write_sequnlock(&state->seqlock);
if (test_bit(NFS_STATE_RECLAIM_NOGRACE, &state->flags))
nfs4_schedule_state_manager(state->owner->so_server->nfs_client);
@@ -2564,15 +2564,23 @@ static void nfs41_check_delegation_stateid(struct nfs4_state *state)
static int nfs41_check_expired_locks(struct nfs4_state *state)
{
int status, ret = NFS_OK;
- struct nfs4_lock_state *lsp;
+ struct nfs4_lock_state *lsp, *prev = NULL;
struct nfs_server *server = NFS_SERVER(state->inode);
if (!test_bit(LK_STATE_IN_USE, &state->flags))
goto out;
+
+ spin_lock(&state->state_lock);
list_for_each_entry(lsp, &state->lock_states, ls_locks) {
if (test_bit(NFS_LOCK_INITIALIZED, &lsp->ls_flags)) {
struct rpc_cred *cred = lsp->ls_state->owner->so_cred;
+ atomic_inc(&lsp->ls_count);
+ spin_unlock(&state->state_lock);
+
+ nfs4_put_lock_state(prev);
+ prev = lsp;
+
status = nfs41_test_and_free_expired_stateid(server,
&lsp->ls_stateid,
cred);
@@ -2585,10 +2593,14 @@ static int nfs41_check_expired_locks(struct nfs4_state *state)
set_bit(NFS_LOCK_LOST, &lsp->ls_flags);
} else if (status != NFS_OK) {
ret = status;
- break;
+ nfs4_put_lock_state(prev);
+ goto out;
}
+ spin_lock(&state->state_lock);
}
- };
+ }
+ spin_unlock(&state->state_lock);
+ nfs4_put_lock_state(prev);
out:
return ret;
}
@@ -3122,7 +3134,8 @@ static void nfs4_close_prepare(struct rpc_task *task, void *data)
} else if (is_rdwr)
calldata->arg.fmode |= FMODE_READ|FMODE_WRITE;
- if (!nfs4_valid_open_stateid(state))
+ if (!nfs4_valid_open_stateid(state) ||
+ test_bit(NFS_OPEN_STATE, &state->flags) == 0)
call_close = 0;
spin_unlock(&state->owner->so_lock);
@@ -5569,6 +5582,7 @@ static void nfs4_delegreturn_done(struct rpc_task *task, void *calldata)
switch (task->tk_status) {
case 0:
renew_lease(data->res.server, data->timestamp);
+ break;
case -NFS4ERR_ADMIN_REVOKED:
case -NFS4ERR_DELEG_REVOKED:
case -NFS4ERR_EXPIRED:
@@ -5579,8 +5593,6 @@ static void nfs4_delegreturn_done(struct rpc_task *task, void *calldata)
case -NFS4ERR_OLD_STATEID:
case -NFS4ERR_STALE_STATEID:
task->tk_status = 0;
- if (data->roc)
- pnfs_roc_set_barrier(data->inode, data->roc_barrier);
break;
default:
if (nfs4_async_handle_error(task, data->res.server,
@@ -5590,6 +5602,8 @@ static void nfs4_delegreturn_done(struct rpc_task *task, void *calldata)
}
}
data->rpc_status = task->tk_status;
+ if (data->roc && data->rpc_status == 0)
+ pnfs_roc_set_barrier(data->inode, data->roc_barrier);
}
static void nfs4_delegreturn_release(void *calldata)
diff --git a/fs/nfs/nfs4session.c b/fs/nfs/nfs4session.c
index b62973045a3e..a61350f75c74 100644
--- a/fs/nfs/nfs4session.c
+++ b/fs/nfs/nfs4session.c
@@ -178,12 +178,14 @@ static int nfs4_slot_get_seqid(struct nfs4_slot_table *tbl, u32 slotid,
__must_hold(&tbl->slot_tbl_lock)
{
struct nfs4_slot *slot;
+ int ret;
slot = nfs4_lookup_slot(tbl, slotid);
- if (IS_ERR(slot))
- return PTR_ERR(slot);
- *seq_nr = slot->seq_nr;
- return 0;
+ ret = PTR_ERR_OR_ZERO(slot);
+ if (!ret)
+ *seq_nr = slot->seq_nr;
+
+ return ret;
}
/*
@@ -196,7 +198,7 @@ static int nfs4_slot_get_seqid(struct nfs4_slot_table *tbl, u32 slotid,
static bool nfs4_slot_seqid_in_use(struct nfs4_slot_table *tbl,
u32 slotid, u32 seq_nr)
{
- u32 cur_seq;
+ u32 cur_seq = 0;
bool ret = false;
spin_lock(&tbl->slot_tbl_lock);
diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c
index 5f4281ec5f72..0959c9661662 100644
--- a/fs/nfs/nfs4state.c
+++ b/fs/nfs/nfs4state.c
@@ -1547,6 +1547,7 @@ restart:
ssleep(1);
case -NFS4ERR_ADMIN_REVOKED:
case -NFS4ERR_STALE_STATEID:
+ case -NFS4ERR_OLD_STATEID:
case -NFS4ERR_BAD_STATEID:
case -NFS4ERR_RECLAIM_BAD:
case -NFS4ERR_RECLAIM_CONFLICT:
diff --git a/fs/nfs/pnfs.c b/fs/nfs/pnfs.c
index 56b2d96f9103..259ef85f435a 100644
--- a/fs/nfs/pnfs.c
+++ b/fs/nfs/pnfs.c
@@ -146,6 +146,8 @@ set_pnfs_layoutdriver(struct nfs_server *server, const struct nfs_fh *mntfh,
u32 id;
int i;
+ if (fsinfo->nlayouttypes == 0)
+ goto out_no_driver;
if (!(server->nfs_client->cl_exchange_flags &
(EXCHGID4_FLAG_USE_NON_PNFS | EXCHGID4_FLAG_USE_PNFS_MDS))) {
printk(KERN_ERR "NFS: %s: cl_exchange_flags 0x%x\n",
diff --git a/fs/nfsd/netns.h b/fs/nfsd/netns.h
index b10d557f9c9e..ee36efd5aece 100644
--- a/fs/nfsd/netns.h
+++ b/fs/nfsd/netns.h
@@ -84,6 +84,8 @@ struct nfsd_net {
struct list_head client_lru;
struct list_head close_lru;
struct list_head del_recall_lru;
+
+ /* protected by blocked_locks_lock */
struct list_head blocked_locks_lru;
struct delayed_work laundromat_work;
@@ -91,6 +93,9 @@ struct nfsd_net {
/* client_lock protects the client lru list and session hash table */
spinlock_t client_lock;
+ /* protects blocked_locks_lru */
+ spinlock_t blocked_locks_lock;
+
struct file *rec_file;
bool in_grace;
const struct nfsd4_client_tracking_ops *client_tracking_ops;
diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
index 9752beb78659..4b4beaaa4eaa 100644
--- a/fs/nfsd/nfs4state.c
+++ b/fs/nfsd/nfs4state.c
@@ -217,7 +217,7 @@ find_blocked_lock(struct nfs4_lockowner *lo, struct knfsd_fh *fh,
{
struct nfsd4_blocked_lock *cur, *found = NULL;
- spin_lock(&nn->client_lock);
+ spin_lock(&nn->blocked_locks_lock);
list_for_each_entry(cur, &lo->lo_blocked, nbl_list) {
if (fh_match(fh, &cur->nbl_fh)) {
list_del_init(&cur->nbl_list);
@@ -226,7 +226,7 @@ find_blocked_lock(struct nfs4_lockowner *lo, struct knfsd_fh *fh,
break;
}
}
- spin_unlock(&nn->client_lock);
+ spin_unlock(&nn->blocked_locks_lock);
if (found)
posix_unblock_lock(&found->nbl_lock);
return found;
@@ -1227,9 +1227,7 @@ static void put_ol_stateid_locked(struct nfs4_ol_stateid *stp,
static bool unhash_lock_stateid(struct nfs4_ol_stateid *stp)
{
- struct nfs4_openowner *oo = openowner(stp->st_openstp->st_stateowner);
-
- lockdep_assert_held(&oo->oo_owner.so_client->cl_lock);
+ lockdep_assert_held(&stp->st_stid.sc_client->cl_lock);
list_del_init(&stp->st_locks);
nfs4_unhash_stid(&stp->st_stid);
@@ -1238,12 +1236,12 @@ static bool unhash_lock_stateid(struct nfs4_ol_stateid *stp)
static void release_lock_stateid(struct nfs4_ol_stateid *stp)
{
- struct nfs4_openowner *oo = openowner(stp->st_openstp->st_stateowner);
+ struct nfs4_client *clp = stp->st_stid.sc_client;
bool unhashed;
- spin_lock(&oo->oo_owner.so_client->cl_lock);
+ spin_lock(&clp->cl_lock);
unhashed = unhash_lock_stateid(stp);
- spin_unlock(&oo->oo_owner.so_client->cl_lock);
+ spin_unlock(&clp->cl_lock);
if (unhashed)
nfs4_put_stid(&stp->st_stid);
}
@@ -4665,7 +4663,7 @@ nfs4_laundromat(struct nfsd_net *nn)
* indefinitely once the lock does become free.
*/
BUG_ON(!list_empty(&reaplist));
- spin_lock(&nn->client_lock);
+ spin_lock(&nn->blocked_locks_lock);
while (!list_empty(&nn->blocked_locks_lru)) {
nbl = list_first_entry(&nn->blocked_locks_lru,
struct nfsd4_blocked_lock, nbl_lru);
@@ -4678,7 +4676,7 @@ nfs4_laundromat(struct nfsd_net *nn)
list_move(&nbl->nbl_lru, &reaplist);
list_del_init(&nbl->nbl_list);
}
- spin_unlock(&nn->client_lock);
+ spin_unlock(&nn->blocked_locks_lock);
while (!list_empty(&reaplist)) {
nbl = list_first_entry(&nn->blocked_locks_lru,
@@ -5439,13 +5437,13 @@ nfsd4_lm_notify(struct file_lock *fl)
bool queue = false;
/* An empty list means that something else is going to be using it */
- spin_lock(&nn->client_lock);
+ spin_lock(&nn->blocked_locks_lock);
if (!list_empty(&nbl->nbl_list)) {
list_del_init(&nbl->nbl_list);
list_del_init(&nbl->nbl_lru);
queue = true;
}
- spin_unlock(&nn->client_lock);
+ spin_unlock(&nn->blocked_locks_lock);
if (queue)
nfsd4_run_cb(&nbl->nbl_cb);
@@ -5868,10 +5866,10 @@ nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
if (fl_flags & FL_SLEEP) {
nbl->nbl_time = jiffies;
- spin_lock(&nn->client_lock);
+ spin_lock(&nn->blocked_locks_lock);
list_add_tail(&nbl->nbl_list, &lock_sop->lo_blocked);
list_add_tail(&nbl->nbl_lru, &nn->blocked_locks_lru);
- spin_unlock(&nn->client_lock);
+ spin_unlock(&nn->blocked_locks_lock);
}
err = vfs_lock_file(filp, F_SETLK, file_lock, conflock);
@@ -5900,10 +5898,10 @@ out:
if (nbl) {
/* dequeue it if we queued it before */
if (fl_flags & FL_SLEEP) {
- spin_lock(&nn->client_lock);
+ spin_lock(&nn->blocked_locks_lock);
list_del_init(&nbl->nbl_list);
list_del_init(&nbl->nbl_lru);
- spin_unlock(&nn->client_lock);
+ spin_unlock(&nn->blocked_locks_lock);
}
free_blocked_lock(nbl);
}
@@ -6943,9 +6941,11 @@ static int nfs4_state_create_net(struct net *net)
INIT_LIST_HEAD(&nn->client_lru);
INIT_LIST_HEAD(&nn->close_lru);
INIT_LIST_HEAD(&nn->del_recall_lru);
- INIT_LIST_HEAD(&nn->blocked_locks_lru);
spin_lock_init(&nn->client_lock);
+ spin_lock_init(&nn->blocked_locks_lock);
+ INIT_LIST_HEAD(&nn->blocked_locks_lru);
+
INIT_DELAYED_WORK(&nn->laundromat_work, laundromat_main);
get_net(net);
@@ -7063,14 +7063,14 @@ nfs4_state_shutdown_net(struct net *net)
}
BUG_ON(!list_empty(&reaplist));
- spin_lock(&nn->client_lock);
+ spin_lock(&nn->blocked_locks_lock);
while (!list_empty(&nn->blocked_locks_lru)) {
nbl = list_first_entry(&nn->blocked_locks_lru,
struct nfsd4_blocked_lock, nbl_lru);
list_move(&nbl->nbl_lru, &reaplist);
list_del_init(&nbl->nbl_list);
}
- spin_unlock(&nn->client_lock);
+ spin_unlock(&nn->blocked_locks_lock);
while (!list_empty(&reaplist)) {
nbl = list_first_entry(&nn->blocked_locks_lru,
diff --git a/fs/ntfs/dir.c b/fs/ntfs/dir.c
index a18613579001..0ee19ecc982d 100644
--- a/fs/ntfs/dir.c
+++ b/fs/ntfs/dir.c
@@ -1544,8 +1544,6 @@ const struct file_operations ntfs_dir_ops = {
.iterate = ntfs_readdir, /* Read directory contents. */
#ifdef NTFS_RW
.fsync = ntfs_dir_fsync, /* Sync a directory to disk. */
- /*.aio_fsync = ,*/ /* Sync all outstanding async
- i/o operations on a kiocb. */
#endif /* NTFS_RW */
/*.ioctl = ,*/ /* Perform function on the
mounted filesystem. */
diff --git a/fs/ocfs2/dir.c b/fs/ocfs2/dir.c
index e7054e2ac922..3ecb9f337b7d 100644
--- a/fs/ocfs2/dir.c
+++ b/fs/ocfs2/dir.c
@@ -3699,7 +3699,7 @@ static void ocfs2_dx_dir_transfer_leaf(struct inode *dir, u32 split_hash,
static int ocfs2_dx_dir_rebalance_credits(struct ocfs2_super *osb,
struct ocfs2_dx_root_block *dx_root)
{
- int credits = ocfs2_clusters_to_blocks(osb->sb, 2);
+ int credits = ocfs2_clusters_to_blocks(osb->sb, 3);
credits += ocfs2_calc_extend_credits(osb->sb, &dx_root->dr_list);
credits += ocfs2_quota_trans_credits(osb->sb);
diff --git a/fs/orangefs/orangefs-debugfs.c b/fs/orangefs/orangefs-debugfs.c
index eb09aa026723..38887cc5577f 100644
--- a/fs/orangefs/orangefs-debugfs.c
+++ b/fs/orangefs/orangefs-debugfs.c
@@ -114,6 +114,7 @@ static const struct seq_operations help_debug_ops = {
};
const struct file_operations debug_help_fops = {
+ .owner = THIS_MODULE,
.open = orangefs_debug_help_open,
.read = seq_read,
.release = seq_release,
@@ -121,6 +122,7 @@ const struct file_operations debug_help_fops = {
};
static const struct file_operations kernel_debug_fops = {
+ .owner = THIS_MODULE,
.open = orangefs_debug_open,
.read = orangefs_debug_read,
.write = orangefs_debug_write,
@@ -141,6 +143,9 @@ static struct client_debug_mask client_debug_mask;
*/
static DEFINE_MUTEX(orangefs_debug_lock);
+/* Used to protect data in ORANGEFS_KMOD_DEBUG_HELP_FILE */
+static DEFINE_MUTEX(orangefs_help_file_lock);
+
/*
* initialize kmod debug operations, create orangefs debugfs dir and
* ORANGEFS_KMOD_DEBUG_HELP_FILE.
@@ -289,6 +294,8 @@ static void *help_start(struct seq_file *m, loff_t *pos)
gossip_debug(GOSSIP_DEBUGFS_DEBUG, "help_start: start\n");
+ mutex_lock(&orangefs_help_file_lock);
+
if (*pos == 0)
payload = m->private;
@@ -305,6 +312,7 @@ static void *help_next(struct seq_file *m, void *v, loff_t *pos)
static void help_stop(struct seq_file *m, void *p)
{
gossip_debug(GOSSIP_DEBUGFS_DEBUG, "help_stop: start\n");
+ mutex_unlock(&orangefs_help_file_lock);
}
static int help_show(struct seq_file *m, void *v)
@@ -610,32 +618,54 @@ out:
* /sys/kernel/debug/orangefs/debug-help can be catted to
* see all the available kernel and client debug keywords.
*
- * When the kernel boots, we have no idea what keywords the
+ * When orangefs.ko initializes, we have no idea what keywords the
* client supports, nor their associated masks.
*
- * We pass through this function once at boot and stamp a
+ * We pass through this function once at module-load and stamp a
* boilerplate "we don't know" message for the client in the
* debug-help file. We pass through here again when the client
* starts and then we can fill out the debug-help file fully.
*
* The client might be restarted any number of times between
- * reboots, we only build the debug-help file the first time.
+ * module reloads, we only build the debug-help file the first time.
*/
int orangefs_prepare_debugfs_help_string(int at_boot)
{
- int rc = -EINVAL;
- int i;
- int byte_count = 0;
char *client_title = "Client Debug Keywords:\n";
char *kernel_title = "Kernel Debug Keywords:\n";
+ size_t string_size = DEBUG_HELP_STRING_SIZE;
+ size_t result_size;
+ size_t i;
+ char *new;
+ int rc = -EINVAL;
gossip_debug(GOSSIP_UTILS_DEBUG, "%s: start\n", __func__);
- if (at_boot) {
- byte_count += strlen(HELP_STRING_UNINITIALIZED);
+ if (at_boot)
client_title = HELP_STRING_UNINITIALIZED;
- } else {
- /*
+
+ /* build a new debug_help_string. */
+ new = kzalloc(DEBUG_HELP_STRING_SIZE, GFP_KERNEL);
+ if (!new) {
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ /*
+ * strlcat(dst, src, size) will append at most
+ * "size - strlen(dst) - 1" bytes of src onto dst,
+ * null terminating the result, and return the total
+ * length of the string it tried to create.
+ *
+ * We'll just plow through here building our new debug
+ * help string and let strlcat take care of assuring that
+ * dst doesn't overflow.
+ */
+ strlcat(new, client_title, string_size);
+
+ if (!at_boot) {
+
+ /*
* fill the client keyword/mask array and remember
* how many elements there were.
*/
@@ -644,64 +674,40 @@ int orangefs_prepare_debugfs_help_string(int at_boot)
if (cdm_element_count <= 0)
goto out;
- /* Count the bytes destined for debug_help_string. */
- byte_count += strlen(client_title);
-
for (i = 0; i < cdm_element_count; i++) {
- byte_count += strlen(cdm_array[i].keyword + 2);
- if (byte_count >= DEBUG_HELP_STRING_SIZE) {
- pr_info("%s: overflow 1!\n", __func__);
- goto out;
- }
+ strlcat(new, "\t", string_size);
+ strlcat(new, cdm_array[i].keyword, string_size);
+ strlcat(new, "\n", string_size);
}
-
- gossip_debug(GOSSIP_UTILS_DEBUG,
- "%s: cdm_element_count:%d:\n",
- __func__,
- cdm_element_count);
}
- byte_count += strlen(kernel_title);
+ strlcat(new, "\n", string_size);
+ strlcat(new, kernel_title, string_size);
+
for (i = 0; i < num_kmod_keyword_mask_map; i++) {
- byte_count +=
- strlen(s_kmod_keyword_mask_map[i].keyword + 2);
- if (byte_count >= DEBUG_HELP_STRING_SIZE) {
- pr_info("%s: overflow 2!\n", __func__);
- goto out;
- }
+ strlcat(new, "\t", string_size);
+ strlcat(new, s_kmod_keyword_mask_map[i].keyword, string_size);
+ result_size = strlcat(new, "\n", string_size);
}
- /* build debug_help_string. */
- debug_help_string = kzalloc(DEBUG_HELP_STRING_SIZE, GFP_KERNEL);
- if (!debug_help_string) {
- rc = -ENOMEM;
+ /* See if we tried to put too many bytes into "new"... */
+ if (result_size >= string_size) {
+ kfree(new);
goto out;
}
- strcat(debug_help_string, client_title);
-
- if (!at_boot) {
- for (i = 0; i < cdm_element_count; i++) {
- strcat(debug_help_string, "\t");
- strcat(debug_help_string, cdm_array[i].keyword);
- strcat(debug_help_string, "\n");
- }
- }
-
- strcat(debug_help_string, "\n");
- strcat(debug_help_string, kernel_title);
-
- for (i = 0; i < num_kmod_keyword_mask_map; i++) {
- strcat(debug_help_string, "\t");
- strcat(debug_help_string, s_kmod_keyword_mask_map[i].keyword);
- strcat(debug_help_string, "\n");
+ if (at_boot) {
+ debug_help_string = new;
+ } else {
+ mutex_lock(&orangefs_help_file_lock);
+ memset(debug_help_string, 0, DEBUG_HELP_STRING_SIZE);
+ strlcat(debug_help_string, new, string_size);
+ mutex_unlock(&orangefs_help_file_lock);
}
rc = 0;
-out:
-
- return rc;
+out: return rc;
}
@@ -959,8 +965,12 @@ int orangefs_debugfs_new_client_string(void __user *arg)
ret = copy_from_user(&client_debug_array_string,
(void __user *)arg,
ORANGEFS_MAX_DEBUG_STRING_LEN);
- if (ret != 0)
+
+ if (ret != 0) {
+ pr_info("%s: CLIENT_STRING: copy_from_user failed\n",
+ __func__);
return -EIO;
+ }
/*
* The real client-core makes an effort to ensure
@@ -975,45 +985,18 @@ int orangefs_debugfs_new_client_string(void __user *arg)
client_debug_array_string[ORANGEFS_MAX_DEBUG_STRING_LEN - 1] =
'\0';
- if (ret != 0) {
- pr_info("%s: CLIENT_STRING: copy_from_user failed\n",
- __func__);
- return -EIO;
- }
-
pr_info("%s: client debug array string has been received.\n",
__func__);
if (!help_string_initialized) {
- /* Free the "we don't know yet" default string... */
- kfree(debug_help_string);
-
- /* build a proper debug help string */
+ /* Build a proper debug help string. */
if (orangefs_prepare_debugfs_help_string(0)) {
gossip_err("%s: no debug help string \n",
__func__);
return -EIO;
}
- /* Replace the boilerplate boot-time debug-help file. */
- debugfs_remove(help_file_dentry);
-
- help_file_dentry =
- debugfs_create_file(
- ORANGEFS_KMOD_DEBUG_HELP_FILE,
- 0444,
- debug_dir,
- debug_help_string,
- &debug_help_fops);
-
- if (!help_file_dentry) {
- gossip_err("%s: debugfs_create_file failed for"
- " :%s:!\n",
- __func__,
- ORANGEFS_KMOD_DEBUG_HELP_FILE);
- return -EIO;
- }
}
debug_mask_to_string(&client_debug_mask, 1);
diff --git a/fs/orangefs/orangefs-mod.c b/fs/orangefs/orangefs-mod.c
index 2e5b03065f34..4113eb0495bf 100644
--- a/fs/orangefs/orangefs-mod.c
+++ b/fs/orangefs/orangefs-mod.c
@@ -124,7 +124,7 @@ static int __init orangefs_init(void)
* unknown at boot time.
*
* orangefs_prepare_debugfs_help_string will be used again
- * later to rebuild the debug-help file after the client starts
+ * later to rebuild the debug-help-string after the client starts
* and passes along the needed info. The argument signifies
* which time orangefs_prepare_debugfs_help_string is being
* called.
@@ -152,7 +152,9 @@ static int __init orangefs_init(void)
ret = register_filesystem(&orangefs_fs_type);
if (ret == 0) {
- pr_info("orangefs: module version %s loaded\n", ORANGEFS_VERSION);
+ pr_info("%s: module version %s loaded\n",
+ __func__,
+ ORANGEFS_VERSION);
ret = 0;
goto out;
}
diff --git a/fs/overlayfs/copy_up.c b/fs/overlayfs/copy_up.c
index aeb60f791418..36795eed40b0 100644
--- a/fs/overlayfs/copy_up.c
+++ b/fs/overlayfs/copy_up.c
@@ -178,6 +178,8 @@ static int ovl_copy_up_data(struct path *old, struct path *new, loff_t len)
len -= bytes;
}
+ if (!error)
+ error = vfs_fsync(new_file, 0);
fput(new_file);
out_fput:
fput(old_file);
diff --git a/fs/overlayfs/inode.c b/fs/overlayfs/inode.c
index c58f01babf30..7fb53d055537 100644
--- a/fs/overlayfs/inode.c
+++ b/fs/overlayfs/inode.c
@@ -270,9 +270,6 @@ struct posix_acl *ovl_get_acl(struct inode *inode, int type)
if (!IS_ENABLED(CONFIG_FS_POSIX_ACL) || !IS_POSIXACL(realinode))
return NULL;
- if (!realinode->i_op->get_acl)
- return NULL;
-
old_cred = ovl_override_creds(inode->i_sb);
acl = get_acl(realinode, type);
revert_creds(old_cred);
diff --git a/fs/overlayfs/super.c b/fs/overlayfs/super.c
index bcf3965be819..0e100856c7b8 100644
--- a/fs/overlayfs/super.c
+++ b/fs/overlayfs/super.c
@@ -328,11 +328,11 @@ static struct dentry *ovl_d_real(struct dentry *dentry,
if (!real)
goto bug;
+ /* Handle recursion */
+ real = d_real(real, inode, open_flags);
+
if (!inode || inode == d_inode(real))
return real;
-
- /* Handle recursion */
- return d_real(real, inode, open_flags);
bug:
WARN(1, "ovl_d_real(%pd4, %s:%lu): real dentry not found\n", dentry,
inode ? inode->i_sb->s_id : "NULL", inode ? inode->i_ino : 0);
@@ -1037,6 +1037,21 @@ ovl_posix_acl_xattr_set(const struct xattr_handler *handler,
posix_acl_release(acl);
+ /*
+ * Check if sgid bit needs to be cleared (actual setacl operation will
+ * be done with mounter's capabilities and so that won't do it for us).
+ */
+ if (unlikely(inode->i_mode & S_ISGID) &&
+ handler->flags == ACL_TYPE_ACCESS &&
+ !in_group_p(inode->i_gid) &&
+ !capable_wrt_inode_uidgid(inode, CAP_FSETID)) {
+ struct iattr iattr = { .ia_valid = ATTR_KILL_SGID };
+
+ err = ovl_setattr(dentry, &iattr);
+ if (err)
+ return err;
+ }
+
err = ovl_xattr_set(dentry, handler->name, value, size, flags);
if (!err)
ovl_copyattr(ovl_inode_real(inode, NULL), inode);
diff --git a/fs/splice.c b/fs/splice.c
index 153d4f3bd441..5a7750bd2eea 100644
--- a/fs/splice.c
+++ b/fs/splice.c
@@ -299,13 +299,8 @@ ssize_t generic_file_splice_read(struct file *in, loff_t *ppos,
{
struct iov_iter to;
struct kiocb kiocb;
- loff_t isize;
int idx, ret;
- isize = i_size_read(in->f_mapping->host);
- if (unlikely(*ppos >= isize))
- return 0;
-
iov_iter_pipe(&to, ITER_PIPE | READ, pipe, len);
idx = to.idx;
init_sync_kiocb(&kiocb, in);
@@ -413,7 +408,8 @@ static ssize_t default_file_splice_read(struct file *in, loff_t *ppos,
if (res <= 0)
return -ENOMEM;
- nr_pages = res / PAGE_SIZE;
+ BUG_ON(dummy);
+ nr_pages = DIV_ROUND_UP(res, PAGE_SIZE);
vec = __vec;
if (nr_pages > PIPE_DEF_BUFFERS) {
diff --git a/fs/xattr.c b/fs/xattr.c
index 3368659c471e..2d13b4e62fae 100644
--- a/fs/xattr.c
+++ b/fs/xattr.c
@@ -170,7 +170,7 @@ int __vfs_setxattr_noperm(struct dentry *dentry, const char *name,
const void *value, size_t size, int flags)
{
struct inode *inode = dentry->d_inode;
- int error = -EOPNOTSUPP;
+ int error = -EAGAIN;
int issec = !strncmp(name, XATTR_SECURITY_PREFIX,
XATTR_SECURITY_PREFIX_LEN);
@@ -183,15 +183,21 @@ int __vfs_setxattr_noperm(struct dentry *dentry, const char *name,
security_inode_post_setxattr(dentry, name, value,
size, flags);
}
- } else if (issec) {
- const char *suffix = name + XATTR_SECURITY_PREFIX_LEN;
-
+ } else {
if (unlikely(is_bad_inode(inode)))
return -EIO;
- error = security_inode_setsecurity(inode, suffix, value,
- size, flags);
- if (!error)
- fsnotify_xattr(dentry);
+ }
+ if (error == -EAGAIN) {
+ error = -EOPNOTSUPP;
+
+ if (issec) {
+ const char *suffix = name + XATTR_SECURITY_PREFIX_LEN;
+
+ error = security_inode_setsecurity(inode, suffix, value,
+ size, flags);
+ if (!error)
+ fsnotify_xattr(dentry);
+ }
}
return error;
diff --git a/fs/xfs/libxfs/xfs_defer.c b/fs/xfs/libxfs/xfs_defer.c
index 613c5cf19436..5c2929f94bd3 100644
--- a/fs/xfs/libxfs/xfs_defer.c
+++ b/fs/xfs/libxfs/xfs_defer.c
@@ -199,9 +199,9 @@ xfs_defer_intake_work(
struct xfs_defer_pending *dfp;
list_for_each_entry(dfp, &dop->dop_intake, dfp_list) {
- trace_xfs_defer_intake_work(tp->t_mountp, dfp);
dfp->dfp_intent = dfp->dfp_type->create_intent(tp,
dfp->dfp_count);
+ trace_xfs_defer_intake_work(tp->t_mountp, dfp);
list_sort(tp->t_mountp, &dfp->dfp_work,
dfp->dfp_type->diff_items);
list_for_each(li, &dfp->dfp_work)
@@ -221,21 +221,14 @@ xfs_defer_trans_abort(
struct xfs_defer_pending *dfp;
trace_xfs_defer_trans_abort(tp->t_mountp, dop);
- /*
- * If the transaction was committed, drop the intent reference
- * since we're bailing out of here. The other reference is
- * dropped when the intent hits the AIL. If the transaction
- * was not committed, the intent is freed by the intent item
- * unlock handler on abort.
- */
- if (!dop->dop_committed)
- return;
- /* Abort intent items. */
+ /* Abort intent items that don't have a done item. */
list_for_each_entry(dfp, &dop->dop_pending, dfp_list) {
trace_xfs_defer_pending_abort(tp->t_mountp, dfp);
- if (!dfp->dfp_done)
+ if (dfp->dfp_intent && !dfp->dfp_done) {
dfp->dfp_type->abort_intent(dfp->dfp_intent);
+ dfp->dfp_intent = NULL;
+ }
}
/* Shut down FS. */
diff --git a/include/acpi/actbl.h b/include/acpi/actbl.h
index 1b949e08015c..c19700e2a2fe 100644
--- a/include/acpi/actbl.h
+++ b/include/acpi/actbl.h
@@ -230,72 +230,62 @@ struct acpi_table_facs {
/* Fields common to all versions of the FADT */
struct acpi_table_fadt {
- struct acpi_table_header header; /* [V1] Common ACPI table header */
- u32 facs; /* [V1] 32-bit physical address of FACS */
- u32 dsdt; /* [V1] 32-bit physical address of DSDT */
- u8 model; /* [V1] System Interrupt Model (ACPI 1.0) - not used in ACPI 2.0+ */
- u8 preferred_profile; /* [V1] Conveys preferred power management profile to OSPM. */
- u16 sci_interrupt; /* [V1] System vector of SCI interrupt */
- u32 smi_command; /* [V1] 32-bit Port address of SMI command port */
- u8 acpi_enable; /* [V1] Value to write to SMI_CMD to enable ACPI */
- u8 acpi_disable; /* [V1] Value to write to SMI_CMD to disable ACPI */
- u8 s4_bios_request; /* [V1] Value to write to SMI_CMD to enter S4BIOS state */
- u8 pstate_control; /* [V1] Processor performance state control */
- u32 pm1a_event_block; /* [V1] 32-bit port address of Power Mgt 1a Event Reg Blk */
- u32 pm1b_event_block; /* [V1] 32-bit port address of Power Mgt 1b Event Reg Blk */
- u32 pm1a_control_block; /* [V1] 32-bit port address of Power Mgt 1a Control Reg Blk */
- u32 pm1b_control_block; /* [V1] 32-bit port address of Power Mgt 1b Control Reg Blk */
- u32 pm2_control_block; /* [V1] 32-bit port address of Power Mgt 2 Control Reg Blk */
- u32 pm_timer_block; /* [V1] 32-bit port address of Power Mgt Timer Ctrl Reg Blk */
- u32 gpe0_block; /* [V1] 32-bit port address of General Purpose Event 0 Reg Blk */
- u32 gpe1_block; /* [V1] 32-bit port address of General Purpose Event 1 Reg Blk */
- u8 pm1_event_length; /* [V1] Byte Length of ports at pm1x_event_block */
- u8 pm1_control_length; /* [V1] Byte Length of ports at pm1x_control_block */
- u8 pm2_control_length; /* [V1] Byte Length of ports at pm2_control_block */
- u8 pm_timer_length; /* [V1] Byte Length of ports at pm_timer_block */
- u8 gpe0_block_length; /* [V1] Byte Length of ports at gpe0_block */
- u8 gpe1_block_length; /* [V1] Byte Length of ports at gpe1_block */
- u8 gpe1_base; /* [V1] Offset in GPE number space where GPE1 events start */
- u8 cst_control; /* [V1] Support for the _CST object and C-States change notification */
- u16 c2_latency; /* [V1] Worst case HW latency to enter/exit C2 state */
- u16 c3_latency; /* [V1] Worst case HW latency to enter/exit C3 state */
- u16 flush_size; /* [V1] Processor memory cache line width, in bytes */
- u16 flush_stride; /* [V1] Number of flush strides that need to be read */
- u8 duty_offset; /* [V1] Processor duty cycle index in processor P_CNT reg */
- u8 duty_width; /* [V1] Processor duty cycle value bit width in P_CNT register */
- u8 day_alarm; /* [V1] Index to day-of-month alarm in RTC CMOS RAM */
- u8 month_alarm; /* [V1] Index to month-of-year alarm in RTC CMOS RAM */
- u8 century; /* [V1] Index to century in RTC CMOS RAM */
- u16 boot_flags; /* [V3] IA-PC Boot Architecture Flags (see below for individual flags) */
- u8 reserved; /* [V1] Reserved, must be zero */
- u32 flags; /* [V1] Miscellaneous flag bits (see below for individual flags) */
- /* End of Version 1 FADT fields (ACPI 1.0) */
-
- struct acpi_generic_address reset_register; /* [V3] 64-bit address of the Reset register */
- u8 reset_value; /* [V3] Value to write to the reset_register port to reset the system */
- u16 arm_boot_flags; /* [V5] ARM-Specific Boot Flags (see below for individual flags) (ACPI 5.1) */
- u8 minor_revision; /* [V5] FADT Minor Revision (ACPI 5.1) */
- u64 Xfacs; /* [V3] 64-bit physical address of FACS */
- u64 Xdsdt; /* [V3] 64-bit physical address of DSDT */
- struct acpi_generic_address xpm1a_event_block; /* [V3] 64-bit Extended Power Mgt 1a Event Reg Blk address */
- struct acpi_generic_address xpm1b_event_block; /* [V3] 64-bit Extended Power Mgt 1b Event Reg Blk address */
- struct acpi_generic_address xpm1a_control_block; /* [V3] 64-bit Extended Power Mgt 1a Control Reg Blk address */
- struct acpi_generic_address xpm1b_control_block; /* [V3] 64-bit Extended Power Mgt 1b Control Reg Blk address */
- struct acpi_generic_address xpm2_control_block; /* [V3] 64-bit Extended Power Mgt 2 Control Reg Blk address */
- struct acpi_generic_address xpm_timer_block; /* [V3] 64-bit Extended Power Mgt Timer Ctrl Reg Blk address */
- struct acpi_generic_address xgpe0_block; /* [V3] 64-bit Extended General Purpose Event 0 Reg Blk address */
- struct acpi_generic_address xgpe1_block; /* [V3] 64-bit Extended General Purpose Event 1 Reg Blk address */
- /* End of Version 3 FADT fields (ACPI 2.0) */
-
- struct acpi_generic_address sleep_control; /* [V4] 64-bit Sleep Control register (ACPI 5.0) */
- /* End of Version 4 FADT fields (ACPI 3.0 and ACPI 4.0) (Field was originally reserved in ACPI 3.0) */
-
- struct acpi_generic_address sleep_status; /* [V5] 64-bit Sleep Status register (ACPI 5.0) */
- /* End of Version 5 FADT fields (ACPI 5.0) */
-
- u64 hypervisor_id; /* [V6] Hypervisor Vendor ID (ACPI 6.0) */
- /* End of Version 6 FADT fields (ACPI 6.0) */
-
+ struct acpi_table_header header; /* Common ACPI table header */
+ u32 facs; /* 32-bit physical address of FACS */
+ u32 dsdt; /* 32-bit physical address of DSDT */
+ u8 model; /* System Interrupt Model (ACPI 1.0) - not used in ACPI 2.0+ */
+ u8 preferred_profile; /* Conveys preferred power management profile to OSPM. */
+ u16 sci_interrupt; /* System vector of SCI interrupt */
+ u32 smi_command; /* 32-bit Port address of SMI command port */
+ u8 acpi_enable; /* Value to write to SMI_CMD to enable ACPI */
+ u8 acpi_disable; /* Value to write to SMI_CMD to disable ACPI */
+ u8 s4_bios_request; /* Value to write to SMI_CMD to enter S4BIOS state */
+ u8 pstate_control; /* Processor performance state control */
+ u32 pm1a_event_block; /* 32-bit port address of Power Mgt 1a Event Reg Blk */
+ u32 pm1b_event_block; /* 32-bit port address of Power Mgt 1b Event Reg Blk */
+ u32 pm1a_control_block; /* 32-bit port address of Power Mgt 1a Control Reg Blk */
+ u32 pm1b_control_block; /* 32-bit port address of Power Mgt 1b Control Reg Blk */
+ u32 pm2_control_block; /* 32-bit port address of Power Mgt 2 Control Reg Blk */
+ u32 pm_timer_block; /* 32-bit port address of Power Mgt Timer Ctrl Reg Blk */
+ u32 gpe0_block; /* 32-bit port address of General Purpose Event 0 Reg Blk */
+ u32 gpe1_block; /* 32-bit port address of General Purpose Event 1 Reg Blk */
+ u8 pm1_event_length; /* Byte Length of ports at pm1x_event_block */
+ u8 pm1_control_length; /* Byte Length of ports at pm1x_control_block */
+ u8 pm2_control_length; /* Byte Length of ports at pm2_control_block */
+ u8 pm_timer_length; /* Byte Length of ports at pm_timer_block */
+ u8 gpe0_block_length; /* Byte Length of ports at gpe0_block */
+ u8 gpe1_block_length; /* Byte Length of ports at gpe1_block */
+ u8 gpe1_base; /* Offset in GPE number space where GPE1 events start */
+ u8 cst_control; /* Support for the _CST object and C-States change notification */
+ u16 c2_latency; /* Worst case HW latency to enter/exit C2 state */
+ u16 c3_latency; /* Worst case HW latency to enter/exit C3 state */
+ u16 flush_size; /* Processor memory cache line width, in bytes */
+ u16 flush_stride; /* Number of flush strides that need to be read */
+ u8 duty_offset; /* Processor duty cycle index in processor P_CNT reg */
+ u8 duty_width; /* Processor duty cycle value bit width in P_CNT register */
+ u8 day_alarm; /* Index to day-of-month alarm in RTC CMOS RAM */
+ u8 month_alarm; /* Index to month-of-year alarm in RTC CMOS RAM */
+ u8 century; /* Index to century in RTC CMOS RAM */
+ u16 boot_flags; /* IA-PC Boot Architecture Flags (see below for individual flags) */
+ u8 reserved; /* Reserved, must be zero */
+ u32 flags; /* Miscellaneous flag bits (see below for individual flags) */
+ struct acpi_generic_address reset_register; /* 64-bit address of the Reset register */
+ u8 reset_value; /* Value to write to the reset_register port to reset the system */
+ u16 arm_boot_flags; /* ARM-Specific Boot Flags (see below for individual flags) (ACPI 5.1) */
+ u8 minor_revision; /* FADT Minor Revision (ACPI 5.1) */
+ u64 Xfacs; /* 64-bit physical address of FACS */
+ u64 Xdsdt; /* 64-bit physical address of DSDT */
+ struct acpi_generic_address xpm1a_event_block; /* 64-bit Extended Power Mgt 1a Event Reg Blk address */
+ struct acpi_generic_address xpm1b_event_block; /* 64-bit Extended Power Mgt 1b Event Reg Blk address */
+ struct acpi_generic_address xpm1a_control_block; /* 64-bit Extended Power Mgt 1a Control Reg Blk address */
+ struct acpi_generic_address xpm1b_control_block; /* 64-bit Extended Power Mgt 1b Control Reg Blk address */
+ struct acpi_generic_address xpm2_control_block; /* 64-bit Extended Power Mgt 2 Control Reg Blk address */
+ struct acpi_generic_address xpm_timer_block; /* 64-bit Extended Power Mgt Timer Ctrl Reg Blk address */
+ struct acpi_generic_address xgpe0_block; /* 64-bit Extended General Purpose Event 0 Reg Blk address */
+ struct acpi_generic_address xgpe1_block; /* 64-bit Extended General Purpose Event 1 Reg Blk address */
+ struct acpi_generic_address sleep_control; /* 64-bit Sleep Control register (ACPI 5.0) */
+ struct acpi_generic_address sleep_status; /* 64-bit Sleep Status register (ACPI 5.0) */
+ u64 hypervisor_id; /* Hypervisor Vendor ID (ACPI 6.0) */
};
/* Masks for FADT IA-PC Boot Architecture Flags (boot_flags) [Vx]=Introduced in this FADT revision */
@@ -311,8 +301,8 @@ struct acpi_table_fadt {
/* Masks for FADT ARM Boot Architecture Flags (arm_boot_flags) ACPI 5.1 */
-#define ACPI_FADT_PSCI_COMPLIANT (1) /* 00: [V5] PSCI 0.2+ is implemented */
-#define ACPI_FADT_PSCI_USE_HVC (1<<1) /* 01: [V5] HVC must be used instead of SMC as the PSCI conduit */
+#define ACPI_FADT_PSCI_COMPLIANT (1) /* 00: [V5+] PSCI 0.2+ is implemented */
+#define ACPI_FADT_PSCI_USE_HVC (1<<1) /* 01: [V5+] HVC must be used instead of SMC as the PSCI conduit */
/* Masks for FADT flags */
@@ -409,34 +399,20 @@ struct acpi_table_desc {
* match the expected length. In other words, the length of the
* FADT is the bottom line as to what the version really is.
*
- * NOTE: There is no officialy released V2 of the FADT. This
- * version was used only for prototyping and testing during the
- * 32-bit to 64-bit transition. V3 was the first official 64-bit
- * version of the FADT.
- *
- * Update this list of defines when a new version of the FADT is
- * added to the ACPI specification. Note that the FADT version is
- * only incremented when new fields are appended to the existing
- * version. Therefore, the FADT version is competely independent
- * from the version of the ACPI specification where it is
- * defined.
- *
- * For reference, the various FADT lengths are as follows:
- * FADT V1 size: 0x074 ACPI 1.0
- * FADT V3 size: 0x0F4 ACPI 2.0
- * FADT V4 size: 0x100 ACPI 3.0 and ACPI 4.0
- * FADT V5 size: 0x10C ACPI 5.0
- * FADT V6 size: 0x114 ACPI 6.0
+ * For reference, the values below are as follows:
+ * FADT V1 size: 0x074
+ * FADT V2 size: 0x084
+ * FADT V3 size: 0x0F4
+ * FADT V4 size: 0x0F4
+ * FADT V5 size: 0x10C
+ * FADT V6 size: 0x114
*/
-#define ACPI_FADT_V1_SIZE (u32) (ACPI_FADT_OFFSET (flags) + 4) /* ACPI 1.0 */
-#define ACPI_FADT_V3_SIZE (u32) (ACPI_FADT_OFFSET (sleep_control)) /* ACPI 2.0 */
-#define ACPI_FADT_V4_SIZE (u32) (ACPI_FADT_OFFSET (sleep_status)) /* ACPI 3.0 and ACPI 4.0 */
-#define ACPI_FADT_V5_SIZE (u32) (ACPI_FADT_OFFSET (hypervisor_id)) /* ACPI 5.0 */
-#define ACPI_FADT_V6_SIZE (u32) (sizeof (struct acpi_table_fadt)) /* ACPI 6.0 */
-
-/* Update these when new FADT versions are added */
+#define ACPI_FADT_V1_SIZE (u32) (ACPI_FADT_OFFSET (flags) + 4)
+#define ACPI_FADT_V2_SIZE (u32) (ACPI_FADT_OFFSET (minor_revision) + 1)
+#define ACPI_FADT_V3_SIZE (u32) (ACPI_FADT_OFFSET (sleep_control))
+#define ACPI_FADT_V5_SIZE (u32) (ACPI_FADT_OFFSET (hypervisor_id))
+#define ACPI_FADT_V6_SIZE (u32) (sizeof (struct acpi_table_fadt))
-#define ACPI_FADT_MAX_VERSION 6
#define ACPI_FADT_CONFORMANCE "ACPI 6.1 (FADT version 6)"
#endif /* __ACTBL_H__ */
diff --git a/include/acpi/platform/aclinux.h b/include/acpi/platform/aclinux.h
index a5d98d171866..e861a24f06f2 100644
--- a/include/acpi/platform/aclinux.h
+++ b/include/acpi/platform/aclinux.h
@@ -191,6 +191,9 @@
#ifndef __init
#define __init
#endif
+#ifndef __iomem
+#define __iomem
+#endif
/* Host-dependent types and defines for user-space ACPICA */
diff --git a/include/asm-generic/export.h b/include/asm-generic/export.h
index 63554e9f6e0c..59a3b2f58c22 100644
--- a/include/asm-generic/export.h
+++ b/include/asm-generic/export.h
@@ -54,6 +54,7 @@ KSYM(__kstrtab_\name):
KSYM(__kcrctab_\name):
__put KSYM(__crc_\name)
.weak KSYM(__crc_\name)
+ .set KSYM(__crc_\name), 0
.previous
#endif
#endif
diff --git a/include/asm-generic/percpu.h b/include/asm-generic/percpu.h
index 40e887068da2..0504ef8f3aa3 100644
--- a/include/asm-generic/percpu.h
+++ b/include/asm-generic/percpu.h
@@ -118,9 +118,9 @@ do { \
#define this_cpu_generic_read(pcp) \
({ \
typeof(pcp) __ret; \
- preempt_disable(); \
+ preempt_disable_notrace(); \
__ret = raw_cpu_generic_read(pcp); \
- preempt_enable(); \
+ preempt_enable_notrace(); \
__ret; \
})
diff --git a/include/asm-generic/sections.h b/include/asm-generic/sections.h
index af0254c09424..4df64a1fc09e 100644
--- a/include/asm-generic/sections.h
+++ b/include/asm-generic/sections.h
@@ -14,6 +14,8 @@
* [_sdata, _edata]: contains .data.* sections, may also contain .rodata.*
* and/or .init.* sections.
* [__start_rodata, __end_rodata]: contains .rodata.* sections
+ * [__start_data_ro_after_init, __end_data_ro_after_init]:
+ * contains data.ro_after_init section
* [__init_begin, __init_end]: contains .init.* sections, but .init.text.*
* may be out of this range on some architectures.
* [_sinittext, _einittext]: contains .init.text.* sections
@@ -31,6 +33,7 @@ extern char _data[], _sdata[], _edata[];
extern char __bss_start[], __bss_stop[];
extern char __init_begin[], __init_end[];
extern char _sinittext[], _einittext[];
+extern char __start_data_ro_after_init[], __end_data_ro_after_init[];
extern char _end[];
extern char __per_cpu_load[], __per_cpu_start[], __per_cpu_end[];
extern char __kprobes_text_start[], __kprobes_text_end[];
diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
index 30747960bc54..31e1d639abed 100644
--- a/include/asm-generic/vmlinux.lds.h
+++ b/include/asm-generic/vmlinux.lds.h
@@ -259,7 +259,10 @@
* own by defining an empty RO_AFTER_INIT_DATA.
*/
#ifndef RO_AFTER_INIT_DATA
-#define RO_AFTER_INIT_DATA *(.data..ro_after_init)
+#define RO_AFTER_INIT_DATA \
+ __start_data_ro_after_init = .; \
+ *(.data..ro_after_init) \
+ __end_data_ro_after_init = .;
#endif
/*
diff --git a/include/drm/bridge/mhl.h b/include/drm/bridge/mhl.h
new file mode 100644
index 000000000000..3629b2734db6
--- /dev/null
+++ b/include/drm/bridge/mhl.h
@@ -0,0 +1,291 @@
+/*
+ * Defines for Mobile High-Definition Link (MHL) interface
+ *
+ * Copyright (C) 2015, Samsung Electronics, Co., Ltd.
+ * Andrzej Hajda <a.hajda@samsung.com>
+ *
+ * Based on MHL driver for Android devices.
+ * Copyright (C) 2013-2014 Silicon Image, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __MHL_H__
+#define __MHL_H__
+
+/* Device Capabilities Registers */
+enum {
+ MHL_DCAP_DEV_STATE,
+ MHL_DCAP_MHL_VERSION,
+ MHL_DCAP_CAT,
+ MHL_DCAP_ADOPTER_ID_H,
+ MHL_DCAP_ADOPTER_ID_L,
+ MHL_DCAP_VID_LINK_MODE,
+ MHL_DCAP_AUD_LINK_MODE,
+ MHL_DCAP_VIDEO_TYPE,
+ MHL_DCAP_LOG_DEV_MAP,
+ MHL_DCAP_BANDWIDTH,
+ MHL_DCAP_FEATURE_FLAG,
+ MHL_DCAP_DEVICE_ID_H,
+ MHL_DCAP_DEVICE_ID_L,
+ MHL_DCAP_SCRATCHPAD_SIZE,
+ MHL_DCAP_INT_STAT_SIZE,
+ MHL_DCAP_RESERVED,
+ MHL_DCAP_SIZE
+};
+
+#define MHL_DCAP_CAT_SINK 0x01
+#define MHL_DCAP_CAT_SOURCE 0x02
+#define MHL_DCAP_CAT_POWER 0x10
+#define MHL_DCAP_CAT_PLIM(x) ((x) << 5)
+
+#define MHL_DCAP_VID_LINK_RGB444 0x01
+#define MHL_DCAP_VID_LINK_YCBCR444 0x02
+#define MHL_DCAP_VID_LINK_YCBCR422 0x04
+#define MHL_DCAP_VID_LINK_PPIXEL 0x08
+#define MHL_DCAP_VID_LINK_ISLANDS 0x10
+#define MHL_DCAP_VID_LINK_VGA 0x20
+#define MHL_DCAP_VID_LINK_16BPP 0x40
+
+#define MHL_DCAP_AUD_LINK_2CH 0x01
+#define MHL_DCAP_AUD_LINK_8CH 0x02
+
+#define MHL_DCAP_VT_GRAPHICS 0x00
+#define MHL_DCAP_VT_PHOTO 0x02
+#define MHL_DCAP_VT_CINEMA 0x04
+#define MHL_DCAP_VT_GAMES 0x08
+#define MHL_DCAP_SUPP_VT 0x80
+
+#define MHL_DCAP_LD_DISPLAY 0x01
+#define MHL_DCAP_LD_VIDEO 0x02
+#define MHL_DCAP_LD_AUDIO 0x04
+#define MHL_DCAP_LD_MEDIA 0x08
+#define MHL_DCAP_LD_TUNER 0x10
+#define MHL_DCAP_LD_RECORD 0x20
+#define MHL_DCAP_LD_SPEAKER 0x40
+#define MHL_DCAP_LD_GUI 0x80
+#define MHL_DCAP_LD_ALL 0xFF
+
+#define MHL_DCAP_FEATURE_RCP_SUPPORT 0x01
+#define MHL_DCAP_FEATURE_RAP_SUPPORT 0x02
+#define MHL_DCAP_FEATURE_SP_SUPPORT 0x04
+#define MHL_DCAP_FEATURE_UCP_SEND_SUPPOR 0x08
+#define MHL_DCAP_FEATURE_UCP_RECV_SUPPORT 0x10
+#define MHL_DCAP_FEATURE_RBP_SUPPORT 0x40
+
+/* Extended Device Capabilities Registers */
+enum {
+ MHL_XDC_ECBUS_SPEEDS,
+ MHL_XDC_TMDS_SPEEDS,
+ MHL_XDC_ECBUS_ROLES,
+ MHL_XDC_LOG_DEV_MAPX,
+ MHL_XDC_SIZE
+};
+
+#define MHL_XDC_ECBUS_S_075 0x01
+#define MHL_XDC_ECBUS_S_8BIT 0x02
+#define MHL_XDC_ECBUS_S_12BIT 0x04
+#define MHL_XDC_ECBUS_D_150 0x10
+#define MHL_XDC_ECBUS_D_8BIT 0x20
+
+#define MHL_XDC_TMDS_000 0x00
+#define MHL_XDC_TMDS_150 0x01
+#define MHL_XDC_TMDS_300 0x02
+#define MHL_XDC_TMDS_600 0x04
+
+/* MHL_XDC_ECBUS_ROLES flags */
+#define MHL_XDC_DEV_HOST 0x01
+#define MHL_XDC_DEV_DEVICE 0x02
+#define MHL_XDC_DEV_CHARGER 0x04
+#define MHL_XDC_HID_HOST 0x08
+#define MHL_XDC_HID_DEVICE 0x10
+
+/* MHL_XDC_LOG_DEV_MAPX flags */
+#define MHL_XDC_LD_PHONE 0x01
+
+/* Device Status Registers */
+enum {
+ MHL_DST_CONNECTED_RDY,
+ MHL_DST_LINK_MODE,
+ MHL_DST_VERSION,
+ MHL_DST_SIZE
+};
+
+/* Offset of DEVSTAT registers */
+#define MHL_DST_OFFSET 0x30
+#define MHL_DST_REG(name) (MHL_DST_OFFSET + MHL_DST_##name)
+
+#define MHL_DST_CONN_DCAP_RDY 0x01
+#define MHL_DST_CONN_XDEVCAPP_SUPP 0x02
+#define MHL_DST_CONN_POW_STAT 0x04
+#define MHL_DST_CONN_PLIM_STAT_MASK 0x38
+
+#define MHL_DST_LM_CLK_MODE_MASK 0x07
+#define MHL_DST_LM_CLK_MODE_PACKED_PIXEL 0x02
+#define MHL_DST_LM_CLK_MODE_NORMAL 0x03
+#define MHL_DST_LM_PATH_EN_MASK 0x08
+#define MHL_DST_LM_PATH_ENABLED 0x08
+#define MHL_DST_LM_PATH_DISABLED 0x00
+#define MHL_DST_LM_MUTED_MASK 0x10
+
+/* Extended Device Status Registers */
+enum {
+ MHL_XDS_CURR_ECBUS_MODE,
+ MHL_XDS_AVLINK_MODE_STATUS,
+ MHL_XDS_AVLINK_MODE_CONTROL,
+ MHL_XDS_MULTI_SINK_STATUS,
+ MHL_XDS_SIZE
+};
+
+/* Offset of XDEVSTAT registers */
+#define MHL_XDS_OFFSET 0x90
+#define MHL_XDS_REG(name) (MHL_XDS_OFFSET + MHL_XDS_##name)
+
+/* MHL_XDS_REG_CURR_ECBUS_MODE flags */
+#define MHL_XDS_SLOT_MODE_8BIT 0x00
+#define MHL_XDS_SLOT_MODE_6BIT 0x01
+#define MHL_XDS_ECBUS_S 0x04
+#define MHL_XDS_ECBUS_D 0x08
+
+#define MHL_XDS_LINK_CLOCK_75MHZ 0x00
+#define MHL_XDS_LINK_CLOCK_150MHZ 0x10
+#define MHL_XDS_LINK_CLOCK_300MHZ 0x20
+#define MHL_XDS_LINK_CLOCK_600MHZ 0x30
+
+#define MHL_XDS_LINK_STATUS_NO_SIGNAL 0x00
+#define MHL_XDS_LINK_STATUS_CRU_LOCKED 0x01
+#define MHL_XDS_LINK_STATUS_TMDS_NORMAL 0x02
+#define MHL_XDS_LINK_STATUS_TMDS_RESERVED 0x03
+
+#define MHL_XDS_LINK_RATE_1_5_GBPS 0x00
+#define MHL_XDS_LINK_RATE_3_0_GBPS 0x01
+#define MHL_XDS_LINK_RATE_6_0_GBPS 0x02
+#define MHL_XDS_ATT_CAPABLE 0x08
+
+#define MHL_XDS_SINK_STATUS_1_HPD_LOW 0x00
+#define MHL_XDS_SINK_STATUS_1_HPD_HIGH 0x01
+#define MHL_XDS_SINK_STATUS_2_HPD_LOW 0x00
+#define MHL_XDS_SINK_STATUS_2_HPD_HIGH 0x04
+#define MHL_XDS_SINK_STATUS_3_HPD_LOW 0x00
+#define MHL_XDS_SINK_STATUS_3_HPD_HIGH 0x10
+#define MHL_XDS_SINK_STATUS_4_HPD_LOW 0x00
+#define MHL_XDS_SINK_STATUS_4_HPD_HIGH 0x40
+
+/* Interrupt Registers */
+enum {
+ MHL_INT_RCHANGE,
+ MHL_INT_DCHANGE,
+ MHL_INT_SIZE
+};
+
+/* Offset of DEVSTAT registers */
+#define MHL_INT_OFFSET 0x20
+#define MHL_INT_REG(name) (MHL_INT_OFFSET + MHL_INT_##name)
+
+#define MHL_INT_RC_DCAP_CHG 0x01
+#define MHL_INT_RC_DSCR_CHG 0x02
+#define MHL_INT_RC_REQ_WRT 0x04
+#define MHL_INT_RC_GRT_WRT 0x08
+#define MHL_INT_RC_3D_REQ 0x10
+#define MHL_INT_RC_FEAT_REQ 0x20
+#define MHL_INT_RC_FEAT_COMPLETE 0x40
+
+#define MHL_INT_DC_EDID_CHG 0x02
+
+enum {
+ MHL_ACK = 0x33, /* Command or Data byte acknowledge */
+ MHL_NACK = 0x34, /* Command or Data byte not acknowledge */
+ MHL_ABORT = 0x35, /* Transaction abort */
+ MHL_WRITE_STAT = 0xe0, /* Write one status register */
+ MHL_SET_INT = 0x60, /* Write one interrupt register */
+ MHL_READ_DEVCAP_REG = 0x61, /* Read one register */
+ MHL_GET_STATE = 0x62, /* Read CBUS revision level from follower */
+ MHL_GET_VENDOR_ID = 0x63, /* Read vendor ID value from follower */
+ MHL_SET_HPD = 0x64, /* Set Hot Plug Detect in follower */
+ MHL_CLR_HPD = 0x65, /* Clear Hot Plug Detect in follower */
+ MHL_SET_CAP_ID = 0x66, /* Set Capture ID for downstream device */
+ MHL_GET_CAP_ID = 0x67, /* Get Capture ID from downstream device */
+ MHL_MSC_MSG = 0x68, /* VS command to send RCP sub-commands */
+ MHL_GET_SC1_ERRORCODE = 0x69, /* Get Vendor-Specific error code */
+ MHL_GET_DDC_ERRORCODE = 0x6A, /* Get DDC channel command error code */
+ MHL_GET_MSC_ERRORCODE = 0x6B, /* Get MSC command error code */
+ MHL_WRITE_BURST = 0x6C, /* Write 1-16 bytes to responder's scratchpad */
+ MHL_GET_SC3_ERRORCODE = 0x6D, /* Get channel 3 command error code */
+ MHL_WRITE_XSTAT = 0x70, /* Write one extended status register */
+ MHL_READ_XDEVCAP_REG = 0x71, /* Read one extended devcap register */
+ /* let the rest of these float, they are software specific */
+ MHL_READ_EDID_BLOCK,
+ MHL_SEND_3D_REQ_OR_FEAT_REQ,
+ MHL_READ_DEVCAP,
+ MHL_READ_XDEVCAP
+};
+
+/* MSC message types */
+enum {
+ MHL_MSC_MSG_RCP = 0x10, /* RCP sub-command */
+ MHL_MSC_MSG_RCPK = 0x11, /* RCP Acknowledge sub-command */
+ MHL_MSC_MSG_RCPE = 0x12, /* RCP Error sub-command */
+ MHL_MSC_MSG_RAP = 0x20, /* Mode Change Warning sub-command */
+ MHL_MSC_MSG_RAPK = 0x21, /* MCW Acknowledge sub-command */
+ MHL_MSC_MSG_RBP = 0x22, /* Remote Button Protocol sub-command */
+ MHL_MSC_MSG_RBPK = 0x23, /* RBP Acknowledge sub-command */
+ MHL_MSC_MSG_RBPE = 0x24, /* RBP Error sub-command */
+ MHL_MSC_MSG_UCP = 0x30, /* UCP sub-command */
+ MHL_MSC_MSG_UCPK = 0x31, /* UCP Acknowledge sub-command */
+ MHL_MSC_MSG_UCPE = 0x32, /* UCP Error sub-command */
+ MHL_MSC_MSG_RUSB = 0x40, /* Request USB host role */
+ MHL_MSC_MSG_RUSBK = 0x41, /* Acknowledge request for USB host role */
+ MHL_MSC_MSG_RHID = 0x42, /* Request HID host role */
+ MHL_MSC_MSG_RHIDK = 0x43, /* Acknowledge request for HID host role */
+ MHL_MSC_MSG_ATT = 0x50, /* Request attention sub-command */
+ MHL_MSC_MSG_ATTK = 0x51, /* ATT Acknowledge sub-command */
+ MHL_MSC_MSG_BIST_TRIGGER = 0x60,
+ MHL_MSC_MSG_BIST_REQUEST_STAT = 0x61,
+ MHL_MSC_MSG_BIST_READY = 0x62,
+ MHL_MSC_MSG_BIST_STOP = 0x63,
+};
+
+/* RAP action codes */
+#define MHL_RAP_POLL 0x00 /* Just do an ack */
+#define MHL_RAP_CONTENT_ON 0x10 /* Turn content stream ON */
+#define MHL_RAP_CONTENT_OFF 0x11 /* Turn content stream OFF */
+#define MHL_RAP_CBUS_MODE_DOWN 0x20
+#define MHL_RAP_CBUS_MODE_UP 0x21
+
+/* RAPK status codes */
+#define MHL_RAPK_NO_ERR 0x00 /* RAP action recognized & supported */
+#define MHL_RAPK_UNRECOGNIZED 0x01 /* Unknown RAP action code received */
+#define MHL_RAPK_UNSUPPORTED 0x02 /* Rcvd RAP action code not supported */
+#define MHL_RAPK_BUSY 0x03 /* Responder too busy to respond */
+
+/*
+ * Error status codes for RCPE messages
+ */
+/* No error. (Not allowed in RCPE messages) */
+#define MHL_RCPE_STATUS_NO_ERROR 0x00
+/* Unsupported/unrecognized key code */
+#define MHL_RCPE_STATUS_INEFFECTIVE_KEY_CODE 0x01
+/* Responder busy. Initiator may retry message */
+#define MHL_RCPE_STATUS_BUSY 0x02
+
+/*
+ * Error status codes for RBPE messages
+ */
+/* No error. (Not allowed in RBPE messages) */
+#define MHL_RBPE_STATUS_NO_ERROR 0x00
+/* Unsupported/unrecognized button code */
+#define MHL_RBPE_STATUS_INEFFECTIVE_BUTTON_CODE 0x01
+/* Responder busy. Initiator may retry message */
+#define MHL_RBPE_STATUS_BUSY 0x02
+
+/*
+ * Error status codes for UCPE messages
+ */
+/* No error. (Not allowed in UCPE messages) */
+#define MHL_UCPE_STATUS_NO_ERROR 0x00
+/* Unsupported/unrecognized key code */
+#define MHL_UCPE_STATUS_INEFFECTIVE_KEY_CODE 0x01
+
+#endif /* __MHL_H__ */
diff --git a/include/drm/drmP.h b/include/drm/drmP.h
index 672644031bd5..a9cfd33c7b1a 100644
--- a/include/drm/drmP.h
+++ b/include/drm/drmP.h
@@ -57,7 +57,7 @@
#include <linux/types.h>
#include <linux/vmalloc.h>
#include <linux/workqueue.h>
-#include <linux/fence.h>
+#include <linux/dma-fence.h>
#include <asm/mman.h>
#include <asm/pgalloc.h>
@@ -76,6 +76,7 @@
#include <drm/drm_os_linux.h>
#include <drm/drm_sarea.h>
#include <drm/drm_vma_manager.h>
+#include <drm/drm_drv.h>
struct module;
@@ -135,35 +136,12 @@ struct dma_buf_attachment;
#define DRM_UT_PRIME 0x08
#define DRM_UT_ATOMIC 0x10
#define DRM_UT_VBL 0x20
-
-extern __printf(6, 7)
-void drm_dev_printk(const struct device *dev, const char *level,
- unsigned int category, const char *function_name,
- const char *prefix, const char *format, ...);
-
-extern __printf(3, 4)
-void drm_printk(const char *level, unsigned int category,
- const char *format, ...);
+#define DRM_UT_STATE 0x40
/***********************************************************************/
/** \name DRM template customization defaults */
/*@{*/
-/* driver capabilities and requirements mask */
-#define DRIVER_USE_AGP 0x1
-#define DRIVER_LEGACY 0x2
-#define DRIVER_PCI_DMA 0x8
-#define DRIVER_SG 0x10
-#define DRIVER_HAVE_DMA 0x20
-#define DRIVER_HAVE_IRQ 0x40
-#define DRIVER_IRQ_SHARED 0x80
-#define DRIVER_GEM 0x1000
-#define DRIVER_MODESET 0x2000
-#define DRIVER_PRIME 0x4000
-#define DRIVER_RENDER 0x8000
-#define DRIVER_ATOMIC 0x10000
-#define DRIVER_KMS_LEGACY_CONTEXT 0x20000
-
/***********************************************************************/
/** \name Macros to make printk easier */
/*@{*/
@@ -306,6 +284,27 @@ void drm_printk(const char *level, unsigned int category,
#define DRM_DEBUG_PRIME_RATELIMITED(fmt, args...) \
DRM_DEV_DEBUG_PRIME_RATELIMITED(NULL, fmt, ##args)
+/* Format strings and argument splitters to simplify printing
+ * various "complex" objects
+ */
+#define DRM_MODE_FMT "%d:\"%s\" %d %d %d %d %d %d %d %d %d %d 0x%x 0x%x"
+#define DRM_MODE_ARG(m) \
+ (m)->base.id, (m)->name, (m)->vrefresh, (m)->clock, \
+ (m)->hdisplay, (m)->hsync_start, (m)->hsync_end, (m)->htotal, \
+ (m)->vdisplay, (m)->vsync_start, (m)->vsync_end, (m)->vtotal, \
+ (m)->type, (m)->flags
+
+#define DRM_RECT_FMT "%dx%d%+d%+d"
+#define DRM_RECT_ARG(r) drm_rect_width(r), drm_rect_height(r), (r)->x1, (r)->y1
+
+/* for rect's in fixed-point format: */
+#define DRM_RECT_FP_FMT "%d.%06ux%d.%06u%+d.%06u%+d.%06u"
+#define DRM_RECT_FP_ARG(r) \
+ drm_rect_width(r) >> 16, ((drm_rect_width(r) & 0xffff) * 15625) >> 10, \
+ drm_rect_height(r) >> 16, ((drm_rect_height(r) & 0xffff) * 15625) >> 10, \
+ (r)->x1 >> 16, (((r)->x1 & 0xffff) * 15625) >> 10, \
+ (r)->y1 >> 16, (((r)->y1 & 0xffff) * 15625) >> 10
+
/*@}*/
/***********************************************************************/
@@ -362,7 +361,7 @@ struct drm_ioctl_desc {
struct drm_pending_event {
struct completion *completion;
struct drm_event *event;
- struct fence *fence;
+ struct dma_fence *fence;
struct list_head link;
struct list_head pending_link;
struct drm_file *file_priv;
@@ -458,263 +457,6 @@ struct drm_lock_data {
#define DRM_SCANOUTPOS_IN_VBLANK (1 << 1)
#define DRM_SCANOUTPOS_ACCURATE (1 << 2)
-/**
- * DRM driver structure. This structure represent the common code for
- * a family of cards. There will one drm_device for each card present
- * in this family
- */
-struct drm_driver {
- int (*load) (struct drm_device *, unsigned long flags);
- int (*firstopen) (struct drm_device *);
- int (*open) (struct drm_device *, struct drm_file *);
- void (*preclose) (struct drm_device *, struct drm_file *file_priv);
- void (*postclose) (struct drm_device *, struct drm_file *);
- void (*lastclose) (struct drm_device *);
- int (*unload) (struct drm_device *);
- int (*dma_ioctl) (struct drm_device *dev, void *data, struct drm_file *file_priv);
- int (*dma_quiescent) (struct drm_device *);
- int (*context_dtor) (struct drm_device *dev, int context);
- int (*set_busid)(struct drm_device *dev, struct drm_master *master);
-
- /**
- * get_vblank_counter - get raw hardware vblank counter
- * @dev: DRM device
- * @pipe: counter to fetch
- *
- * Driver callback for fetching a raw hardware vblank counter for @crtc.
- * If a device doesn't have a hardware counter, the driver can simply
- * use drm_vblank_no_hw_counter() function. The DRM core will account for
- * missed vblank events while interrupts where disabled based on system
- * timestamps.
- *
- * Wraparound handling and loss of events due to modesetting is dealt
- * with in the DRM core code.
- *
- * RETURNS
- * Raw vblank counter value.
- */
- u32 (*get_vblank_counter) (struct drm_device *dev, unsigned int pipe);
-
- /**
- * enable_vblank - enable vblank interrupt events
- * @dev: DRM device
- * @pipe: which irq to enable
- *
- * Enable vblank interrupts for @crtc. If the device doesn't have
- * a hardware vblank counter, the driver should use the
- * drm_vblank_no_hw_counter() function that keeps a virtual counter.
- *
- * RETURNS
- * Zero on success, appropriate errno if the given @crtc's vblank
- * interrupt cannot be enabled.
- */
- int (*enable_vblank) (struct drm_device *dev, unsigned int pipe);
-
- /**
- * disable_vblank - disable vblank interrupt events
- * @dev: DRM device
- * @pipe: which irq to enable
- *
- * Disable vblank interrupts for @crtc. If the device doesn't have
- * a hardware vblank counter, the driver should use the
- * drm_vblank_no_hw_counter() function that keeps a virtual counter.
- */
- void (*disable_vblank) (struct drm_device *dev, unsigned int pipe);
-
- /**
- * Called by \c drm_device_is_agp. Typically used to determine if a
- * card is really attached to AGP or not.
- *
- * \param dev DRM device handle
- *
- * \returns
- * One of three values is returned depending on whether or not the
- * card is absolutely \b not AGP (return of 0), absolutely \b is AGP
- * (return of 1), or may or may not be AGP (return of 2).
- */
- int (*device_is_agp) (struct drm_device *dev);
-
- /**
- * Called by vblank timestamping code.
- *
- * Return the current display scanout position from a crtc, and an
- * optional accurate ktime_get timestamp of when position was measured.
- *
- * \param dev DRM device.
- * \param pipe Id of the crtc to query.
- * \param flags Flags from the caller (DRM_CALLED_FROM_VBLIRQ or 0).
- * \param *vpos Target location for current vertical scanout position.
- * \param *hpos Target location for current horizontal scanout position.
- * \param *stime Target location for timestamp taken immediately before
- * scanout position query. Can be NULL to skip timestamp.
- * \param *etime Target location for timestamp taken immediately after
- * scanout position query. Can be NULL to skip timestamp.
- * \param mode Current display timings.
- *
- * Returns vpos as a positive number while in active scanout area.
- * Returns vpos as a negative number inside vblank, counting the number
- * of scanlines to go until end of vblank, e.g., -1 means "one scanline
- * until start of active scanout / end of vblank."
- *
- * \return Flags, or'ed together as follows:
- *
- * DRM_SCANOUTPOS_VALID = Query successful.
- * DRM_SCANOUTPOS_INVBL = Inside vblank.
- * DRM_SCANOUTPOS_ACCURATE = Returned position is accurate. A lack of
- * this flag means that returned position may be offset by a constant
- * but unknown small number of scanlines wrt. real scanout position.
- *
- */
- int (*get_scanout_position) (struct drm_device *dev, unsigned int pipe,
- unsigned int flags, int *vpos, int *hpos,
- ktime_t *stime, ktime_t *etime,
- const struct drm_display_mode *mode);
-
- /**
- * Called by \c drm_get_last_vbltimestamp. Should return a precise
- * timestamp when the most recent VBLANK interval ended or will end.
- *
- * Specifically, the timestamp in @vblank_time should correspond as
- * closely as possible to the time when the first video scanline of
- * the video frame after the end of VBLANK will start scanning out,
- * the time immediately after end of the VBLANK interval. If the
- * @crtc is currently inside VBLANK, this will be a time in the future.
- * If the @crtc is currently scanning out a frame, this will be the
- * past start time of the current scanout. This is meant to adhere
- * to the OpenML OML_sync_control extension specification.
- *
- * \param dev dev DRM device handle.
- * \param pipe crtc for which timestamp should be returned.
- * \param *max_error Maximum allowable timestamp error in nanoseconds.
- * Implementation should strive to provide timestamp
- * with an error of at most *max_error nanoseconds.
- * Returns true upper bound on error for timestamp.
- * \param *vblank_time Target location for returned vblank timestamp.
- * \param flags 0 = Defaults, no special treatment needed.
- * \param DRM_CALLED_FROM_VBLIRQ = Function is called from vblank
- * irq handler. Some drivers need to apply some workarounds
- * for gpu-specific vblank irq quirks if flag is set.
- *
- * \returns
- * Zero if timestamping isn't supported in current display mode or a
- * negative number on failure. A positive status code on success,
- * which describes how the vblank_time timestamp was computed.
- */
- int (*get_vblank_timestamp) (struct drm_device *dev, unsigned int pipe,
- int *max_error,
- struct timeval *vblank_time,
- unsigned flags);
-
- /* these have to be filled in */
-
- irqreturn_t(*irq_handler) (int irq, void *arg);
- void (*irq_preinstall) (struct drm_device *dev);
- int (*irq_postinstall) (struct drm_device *dev);
- void (*irq_uninstall) (struct drm_device *dev);
-
- /* Master routines */
- int (*master_create)(struct drm_device *dev, struct drm_master *master);
- void (*master_destroy)(struct drm_device *dev, struct drm_master *master);
- /**
- * master_set is called whenever the minor master is set.
- * master_drop is called whenever the minor master is dropped.
- */
-
- int (*master_set)(struct drm_device *dev, struct drm_file *file_priv,
- bool from_open);
- void (*master_drop)(struct drm_device *dev, struct drm_file *file_priv);
-
- int (*debugfs_init)(struct drm_minor *minor);
- void (*debugfs_cleanup)(struct drm_minor *minor);
-
- /**
- * @gem_free_object: deconstructor for drm_gem_objects
- *
- * This is deprecated and should not be used by new drivers. Use
- * @gem_free_object_unlocked instead.
- */
- void (*gem_free_object) (struct drm_gem_object *obj);
-
- /**
- * @gem_free_object_unlocked: deconstructor for drm_gem_objects
- *
- * This is for drivers which are not encumbered with dev->struct_mutex
- * legacy locking schemes. Use this hook instead of @gem_free_object.
- */
- void (*gem_free_object_unlocked) (struct drm_gem_object *obj);
-
- int (*gem_open_object) (struct drm_gem_object *, struct drm_file *);
- void (*gem_close_object) (struct drm_gem_object *, struct drm_file *);
-
- /**
- * Hook for allocating the GEM object struct, for use by core
- * helpers.
- */
- struct drm_gem_object *(*gem_create_object)(struct drm_device *dev,
- size_t size);
-
- /* prime: */
- /* export handle -> fd (see drm_gem_prime_handle_to_fd() helper) */
- int (*prime_handle_to_fd)(struct drm_device *dev, struct drm_file *file_priv,
- uint32_t handle, uint32_t flags, int *prime_fd);
- /* import fd -> handle (see drm_gem_prime_fd_to_handle() helper) */
- int (*prime_fd_to_handle)(struct drm_device *dev, struct drm_file *file_priv,
- int prime_fd, uint32_t *handle);
- /* export GEM -> dmabuf */
- struct dma_buf * (*gem_prime_export)(struct drm_device *dev,
- struct drm_gem_object *obj, int flags);
- /* import dmabuf -> GEM */
- struct drm_gem_object * (*gem_prime_import)(struct drm_device *dev,
- struct dma_buf *dma_buf);
- /* low-level interface used by drm_gem_prime_{import,export} */
- int (*gem_prime_pin)(struct drm_gem_object *obj);
- void (*gem_prime_unpin)(struct drm_gem_object *obj);
- struct reservation_object * (*gem_prime_res_obj)(
- struct drm_gem_object *obj);
- struct sg_table *(*gem_prime_get_sg_table)(struct drm_gem_object *obj);
- struct drm_gem_object *(*gem_prime_import_sg_table)(
- struct drm_device *dev,
- struct dma_buf_attachment *attach,
- struct sg_table *sgt);
- void *(*gem_prime_vmap)(struct drm_gem_object *obj);
- void (*gem_prime_vunmap)(struct drm_gem_object *obj, void *vaddr);
- int (*gem_prime_mmap)(struct drm_gem_object *obj,
- struct vm_area_struct *vma);
-
- /* vga arb irq handler */
- void (*vgaarb_irq)(struct drm_device *dev, bool state);
-
- /* dumb alloc support */
- int (*dumb_create)(struct drm_file *file_priv,
- struct drm_device *dev,
- struct drm_mode_create_dumb *args);
- int (*dumb_map_offset)(struct drm_file *file_priv,
- struct drm_device *dev, uint32_t handle,
- uint64_t *offset);
- int (*dumb_destroy)(struct drm_file *file_priv,
- struct drm_device *dev,
- uint32_t handle);
-
- /* Driver private ops for this object */
- const struct vm_operations_struct *gem_vm_ops;
-
- int major;
- int minor;
- int patchlevel;
- char *name;
- char *desc;
- char *date;
-
- u32 driver_features;
- int dev_priv_size;
- const struct drm_ioctl_desc *ioctls;
- int num_ioctls;
- const struct file_operations *fops;
-
- /* List of devices hanging off this driver with stealth attach. */
- struct list_head legacy_dev_list;
-};
-
enum drm_minor_type {
DRM_MINOR_PRIMARY,
DRM_MINOR_CONTROL,
@@ -941,8 +683,13 @@ static inline bool drm_is_primary_client(const struct drm_file *file_priv)
extern int drm_ioctl_permit(u32 flags, struct drm_file *file_priv);
extern long drm_ioctl(struct file *filp,
unsigned int cmd, unsigned long arg);
+#ifdef CONFIG_COMPAT
extern long drm_compat_ioctl(struct file *filp,
unsigned int cmd, unsigned long arg);
+#else
+/* Let drm_compat_ioctl be assigned to .compat_ioctl unconditionally */
+#define drm_compat_ioctl NULL
+#endif
extern bool drm_ioctl_flags(unsigned int nr, unsigned int *flags);
/* File Operations (drm_fops.c) */
@@ -980,15 +727,6 @@ void drm_clflush_virt_range(void *addr, unsigned long length);
* DMA quiscent + idle. DMA quiescent usually requires the hardware lock.
*/
-/* Modesetting support */
-extern void drm_vblank_pre_modeset(struct drm_device *dev, unsigned int pipe);
-extern void drm_vblank_post_modeset(struct drm_device *dev, unsigned int pipe);
-
-/* drm_drv.c */
-void drm_put_dev(struct drm_device *dev);
-void drm_unplug_dev(struct drm_device *dev);
-extern unsigned int drm_debug;
-
/* Debugfs support */
#if defined(CONFIG_DEBUG_FS)
extern int drm_debugfs_create_files(const struct drm_info_list *files,
@@ -1041,19 +779,6 @@ extern void drm_pci_free(struct drm_device *dev, struct drm_dma_handle * dmah);
extern void drm_sysfs_hotplug_event(struct drm_device *dev);
-struct drm_device *drm_dev_alloc(struct drm_driver *driver,
- struct device *parent);
-int drm_dev_init(struct drm_device *dev,
- struct drm_driver *driver,
- struct device *parent);
-void drm_dev_ref(struct drm_device *dev);
-void drm_dev_unref(struct drm_device *dev);
-int drm_dev_register(struct drm_device *dev, unsigned long flags);
-void drm_dev_unregister(struct drm_device *dev);
-
-struct drm_minor *drm_minor_acquire(unsigned int minor_id);
-void drm_minor_release(struct drm_minor *minor);
-
/*@}*/
/* PCI section */
diff --git a/include/drm/drm_atomic.h b/include/drm/drm_atomic.h
index 9701f2dfb784..d6d241f63b9f 100644
--- a/include/drm/drm_atomic.h
+++ b/include/drm/drm_atomic.h
@@ -144,6 +144,7 @@ struct __drm_crtcs_state {
struct drm_crtc *ptr;
struct drm_crtc_state *state;
struct drm_crtc_commit *commit;
+ s64 __user *out_fence_ptr;
};
struct __drm_connnectors_state {
@@ -153,6 +154,7 @@ struct __drm_connnectors_state {
/**
* struct drm_atomic_state - the global state object for atomic updates
+ * @ref: count of all references to this state (will not be freed until zero)
* @dev: parent DRM device
* @allow_modeset: allow full modeset
* @legacy_cursor_update: hint to enforce legacy cursor IOCTL semantics
@@ -164,6 +166,8 @@ struct __drm_connnectors_state {
* @acquire_ctx: acquire context for this atomic modeset state update
*/
struct drm_atomic_state {
+ struct kref ref;
+
struct drm_device *dev;
bool allow_modeset : 1;
bool legacy_cursor_update : 1;
@@ -193,7 +197,33 @@ static inline void drm_crtc_commit_get(struct drm_crtc_commit *commit)
struct drm_atomic_state * __must_check
drm_atomic_state_alloc(struct drm_device *dev);
void drm_atomic_state_clear(struct drm_atomic_state *state);
-void drm_atomic_state_free(struct drm_atomic_state *state);
+
+/**
+ * drm_atomic_state_get - acquire a reference to the atomic state
+ * @state: The atomic state
+ *
+ * Returns a new reference to the @state
+ */
+static inline struct drm_atomic_state *
+drm_atomic_state_get(struct drm_atomic_state *state)
+{
+ kref_get(&state->ref);
+ return state;
+}
+
+void __drm_atomic_state_free(struct kref *ref);
+
+/**
+ * drm_atomic_state_put - release a reference to the atomic state
+ * @state: The atomic state
+ *
+ * This releases a reference to @state which is freed after removing the
+ * final reference. No locking required and callable from any context.
+ */
+static inline void drm_atomic_state_put(struct drm_atomic_state *state)
+{
+ kref_put(&state->ref, __drm_atomic_state_free);
+}
int __must_check
drm_atomic_state_init(struct drm_device *dev, struct drm_atomic_state *state);
@@ -316,6 +346,8 @@ drm_atomic_set_crtc_for_plane(struct drm_plane_state *plane_state,
struct drm_crtc *crtc);
void drm_atomic_set_fb_for_plane(struct drm_plane_state *plane_state,
struct drm_framebuffer *fb);
+void drm_atomic_set_fence_for_plane(struct drm_plane_state *plane_state,
+ struct dma_fence *fence);
int __must_check
drm_atomic_set_crtc_for_connector(struct drm_connector_state *conn_state,
struct drm_crtc *crtc);
@@ -335,6 +367,14 @@ int __must_check drm_atomic_check_only(struct drm_atomic_state *state);
int __must_check drm_atomic_commit(struct drm_atomic_state *state);
int __must_check drm_atomic_nonblocking_commit(struct drm_atomic_state *state);
+void drm_state_dump(struct drm_device *dev, struct drm_printer *p);
+
+#ifdef CONFIG_DEBUG_FS
+struct drm_minor;
+int drm_atomic_debugfs_init(struct drm_minor *minor);
+int drm_atomic_debugfs_cleanup(struct drm_minor *minor);
+#endif
+
#define for_each_connector_in_state(__state, connector, connector_state, __i) \
for ((__i) = 0; \
(__i) < (__state)->num_connector && \
@@ -365,11 +405,20 @@ int __must_check drm_atomic_nonblocking_commit(struct drm_atomic_state *state);
*
* To give drivers flexibility struct &drm_crtc_state has 3 booleans to track
* whether the state CRTC changed enough to need a full modeset cycle:
- * connectors_changed, mode_changed and active_change. This helper simply
+ * connectors_changed, mode_changed and active_changed. This helper simply
* combines these three to compute the overall need for a modeset for @state.
+ *
+ * The atomic helper code sets these booleans, but drivers can and should
+ * change them appropriately to accurately represent whether a modeset is
+ * really needed. In general, drivers should avoid full modesets whenever
+ * possible.
+ *
+ * For example if the CRTC mode has changed, and the hardware is able to enact
+ * the requested mode change without going through a full modeset, the driver
+ * should clear mode_changed during its ->atomic_check.
*/
static inline bool
-drm_atomic_crtc_needs_modeset(struct drm_crtc_state *state)
+drm_atomic_crtc_needs_modeset(const struct drm_crtc_state *state)
{
return state->mode_changed || state->active_changed ||
state->connectors_changed;
diff --git a/include/drm/drm_blend.h b/include/drm/drm_blend.h
index 36baa175de99..13221cf9b3eb 100644
--- a/include/drm/drm_blend.h
+++ b/include/drm/drm_blend.h
@@ -47,8 +47,14 @@ struct drm_atomic_state;
#define DRM_REFLECT_Y BIT(5)
#define DRM_REFLECT_MASK (DRM_REFLECT_X | DRM_REFLECT_Y)
-struct drm_property *drm_mode_create_rotation_property(struct drm_device *dev,
- unsigned int supported_rotations);
+static inline bool drm_rotation_90_or_270(unsigned int rotation)
+{
+ return rotation & (DRM_ROTATE_90 | DRM_ROTATE_270);
+}
+
+int drm_plane_create_rotation_property(struct drm_plane *plane,
+ unsigned int rotation,
+ unsigned int supported_rotations);
unsigned int drm_rotation_simplify(unsigned int rotation,
unsigned int supported_rotations);
diff --git a/include/drm/drm_connector.h b/include/drm/drm_connector.h
index ac9d7d8e0e43..34f9741ebb5b 100644
--- a/include/drm/drm_connector.h
+++ b/include/drm/drm_connector.h
@@ -37,6 +37,7 @@ struct drm_crtc;
struct drm_encoder;
struct drm_property;
struct drm_property_blob;
+struct drm_printer;
struct edid;
enum drm_connector_force {
@@ -481,6 +482,18 @@ struct drm_connector_funcs {
const struct drm_connector_state *state,
struct drm_property *property,
uint64_t *val);
+
+ /**
+ * @atomic_print_state:
+ *
+ * If driver subclasses struct &drm_connector_state, it should implement
+ * this optional hook for printing additional driver specific state.
+ *
+ * Do not call this directly, use drm_atomic_connector_print_state()
+ * instead.
+ */
+ void (*atomic_print_state)(struct drm_printer *p,
+ const struct drm_connector_state *state);
};
/* mode specified on the command line */
@@ -762,6 +775,30 @@ int drm_mode_connector_update_edid_property(struct drm_connector *connector,
const struct edid *edid);
/**
+ * struct drm_tile_group - Tile group metadata
+ * @refcount: reference count
+ * @dev: DRM device
+ * @id: tile group id exposed to userspace
+ * @group_data: Sink-private data identifying this group
+ *
+ * @group_data corresponds to displayid vend/prod/serial for external screens
+ * with an EDID.
+ */
+struct drm_tile_group {
+ struct kref refcount;
+ struct drm_device *dev;
+ int id;
+ u8 group_data[8];
+};
+
+struct drm_tile_group *drm_mode_create_tile_group(struct drm_device *dev,
+ char topology[8]);
+struct drm_tile_group *drm_mode_get_tile_group(struct drm_device *dev,
+ char topology[8]);
+void drm_mode_put_tile_group(struct drm_device *dev,
+ struct drm_tile_group *tg);
+
+/**
* drm_for_each_connector - iterate over all connectors
* @connector: the loop cursor
* @dev: the DRM device
diff --git a/include/drm/drm_crtc.h b/include/drm/drm_crtc.h
index 0aa292526567..946672f97e1e 100644
--- a/include/drm/drm_crtc.h
+++ b/include/drm/drm_crtc.h
@@ -28,7 +28,6 @@
#include <linux/i2c.h>
#include <linux/spinlock.h>
#include <linux/types.h>
-#include <linux/idr.h>
#include <linux/fb.h>
#include <linux/hdmi.h>
#include <linux/media-bus-format.h>
@@ -47,13 +46,16 @@
#include <drm/drm_plane.h>
#include <drm/drm_blend.h>
#include <drm/drm_color_mgmt.h>
+#include <drm/drm_debugfs_crc.h>
+#include <drm/drm_mode_config.h>
struct drm_device;
struct drm_mode_set;
struct drm_file;
struct drm_clip_rect;
+struct drm_printer;
struct device_node;
-struct fence;
+struct dma_fence;
struct edid;
static inline int64_t U642I64(uint64_t val)
@@ -65,14 +67,6 @@ static inline uint64_t I642U64(int64_t val)
return (uint64_t)*((uint64_t *)&val);
}
-/* data corresponds to displayid vend/prod/serial */
-struct drm_tile_group {
- struct kref refcount;
- struct drm_device *dev;
- int id;
- u8 group_data[8];
-};
-
struct drm_crtc;
struct drm_encoder;
struct drm_pending_vblank_event;
@@ -116,6 +110,11 @@ struct drm_plane_helper_funcs;
* never return in a failure from the ->atomic_check callback. Userspace assumes
* that a DPMS On will always succeed. In other words: @enable controls resource
* assignment, @active controls the actual hardware state.
+ *
+ * The three booleans active_changed, connectors_changed and mode_changed are
+ * intended to indicate whether a full modeset is needed, rather than strictly
+ * describing what has changed in a commit.
+ * See also: drm_atomic_crtc_needs_modeset()
*/
struct drm_crtc_state {
struct drm_crtc *crtc;
@@ -564,6 +563,42 @@ struct drm_crtc_funcs {
* before data structures are torndown.
*/
void (*early_unregister)(struct drm_crtc *crtc);
+
+ /**
+ * @set_crc_source:
+ *
+ * Changes the source of CRC checksums of frames at the request of
+ * userspace, typically for testing purposes. The sources available are
+ * specific of each driver and a %NULL value indicates that CRC
+ * generation is to be switched off.
+ *
+ * When CRC generation is enabled, the driver should call
+ * drm_crtc_add_crc_entry() at each frame, providing any information
+ * that characterizes the frame contents in the crcN arguments, as
+ * provided from the configured source. Drivers must accept a "auto"
+ * source name that will select a default source for this CRTC.
+ *
+ * This callback is optional if the driver does not support any CRC
+ * generation functionality.
+ *
+ * RETURNS:
+ *
+ * 0 on success or a negative error code on failure.
+ */
+ int (*set_crc_source)(struct drm_crtc *crtc, const char *source,
+ size_t *values_cnt);
+
+ /**
+ * @atomic_print_state:
+ *
+ * If driver subclasses struct &drm_crtc_state, it should implement
+ * this optional hook for printing additional driver specific state.
+ *
+ * Do not call this directly, use drm_atomic_crtc_print_state()
+ * instead.
+ */
+ void (*atomic_print_state)(struct drm_printer *p,
+ const struct drm_crtc_state *state);
};
/**
@@ -680,660 +715,90 @@ struct drm_crtc {
* context.
*/
struct drm_modeset_acquire_ctx *acquire_ctx;
-};
-/**
- * struct drm_mode_set - new values for a CRTC config change
- * @fb: framebuffer to use for new config
- * @crtc: CRTC whose configuration we're about to change
- * @mode: mode timings to use
- * @x: position of this CRTC relative to @fb
- * @y: position of this CRTC relative to @fb
- * @connectors: array of connectors to drive with this CRTC if possible
- * @num_connectors: size of @connectors array
- *
- * Represents a single crtc the connectors that it drives with what mode
- * and from which framebuffer it scans out from.
- *
- * This is used to set modes.
- */
-struct drm_mode_set {
- struct drm_framebuffer *fb;
- struct drm_crtc *crtc;
- struct drm_display_mode *mode;
-
- uint32_t x;
- uint32_t y;
-
- struct drm_connector **connectors;
- size_t num_connectors;
-};
-
-/**
- * struct drm_mode_config_funcs - basic driver provided mode setting functions
- *
- * Some global (i.e. not per-CRTC, connector, etc) mode setting functions that
- * involve drivers.
- */
-struct drm_mode_config_funcs {
+#ifdef CONFIG_DEBUG_FS
/**
- * @fb_create:
- *
- * Create a new framebuffer object. The core does basic checks on the
- * requested metadata, but most of that is left to the driver. See
- * struct &drm_mode_fb_cmd2 for details.
- *
- * If the parameters are deemed valid and the backing storage objects in
- * the underlying memory manager all exist, then the driver allocates
- * a new &drm_framebuffer structure, subclassed to contain
- * driver-specific information (like the internal native buffer object
- * references). It also needs to fill out all relevant metadata, which
- * should be done by calling drm_helper_mode_fill_fb_struct().
+ * @debugfs_entry:
*
- * The initialization is finalized by calling drm_framebuffer_init(),
- * which registers the framebuffer and makes it accessible to other
- * threads.
- *
- * RETURNS:
- *
- * A new framebuffer with an initial reference count of 1 or a negative
- * error code encoded with ERR_PTR().
+ * Debugfs directory for this CRTC.
*/
- struct drm_framebuffer *(*fb_create)(struct drm_device *dev,
- struct drm_file *file_priv,
- const struct drm_mode_fb_cmd2 *mode_cmd);
+ struct dentry *debugfs_entry;
/**
- * @output_poll_changed:
- *
- * Callback used by helpers to inform the driver of output configuration
- * changes.
+ * @crc:
*
- * Drivers implementing fbdev emulation with the helpers can call
- * drm_fb_helper_hotplug_changed from this hook to inform the fbdev
- * helper of output changes.
- *
- * FIXME:
- *
- * Except that there's no vtable for device-level helper callbacks
- * there's no reason this is a core function.
+ * Configuration settings of CRC capture.
*/
- void (*output_poll_changed)(struct drm_device *dev);
+ struct drm_crtc_crc crc;
+#endif
/**
- * @atomic_check:
- *
- * This is the only hook to validate an atomic modeset update. This
- * function must reject any modeset and state changes which the hardware
- * or driver doesn't support. This includes but is of course not limited
- * to:
- *
- * - Checking that the modes, framebuffers, scaling and placement
- * requirements and so on are within the limits of the hardware.
- *
- * - Checking that any hidden shared resources are not oversubscribed.
- * This can be shared PLLs, shared lanes, overall memory bandwidth,
- * display fifo space (where shared between planes or maybe even
- * CRTCs).
- *
- * - Checking that virtualized resources exported to userspace are not
- * oversubscribed. For various reasons it can make sense to expose
- * more planes, crtcs or encoders than which are physically there. One
- * example is dual-pipe operations (which generally should be hidden
- * from userspace if when lockstepped in hardware, exposed otherwise),
- * where a plane might need 1 hardware plane (if it's just on one
- * pipe), 2 hardware planes (when it spans both pipes) or maybe even
- * shared a hardware plane with a 2nd plane (if there's a compatible
- * plane requested on the area handled by the other pipe).
- *
- * - Check that any transitional state is possible and that if
- * requested, the update can indeed be done in the vblank period
- * without temporarily disabling some functions.
- *
- * - Check any other constraints the driver or hardware might have.
- *
- * - This callback also needs to correctly fill out the &drm_crtc_state
- * in this update to make sure that drm_atomic_crtc_needs_modeset()
- * reflects the nature of the possible update and returns true if and
- * only if the update cannot be applied without tearing within one
- * vblank on that CRTC. The core uses that information to reject
- * updates which require a full modeset (i.e. blanking the screen, or
- * at least pausing updates for a substantial amount of time) if
- * userspace has disallowed that in its request.
- *
- * - The driver also does not need to repeat basic input validation
- * like done for the corresponding legacy entry points. The core does
- * that before calling this hook.
- *
- * See the documentation of @atomic_commit for an exhaustive list of
- * error conditions which don't have to be checked at the
- * ->atomic_check() stage?
- *
- * See the documentation for struct &drm_atomic_state for how exactly
- * an atomic modeset update is described.
- *
- * Drivers using the atomic helpers can implement this hook using
- * drm_atomic_helper_check(), or one of the exported sub-functions of
- * it.
+ * @fence_context:
*
- * RETURNS:
- *
- * 0 on success or one of the below negative error codes:
- *
- * - -EINVAL, if any of the above constraints are violated.
- *
- * - -EDEADLK, when returned from an attempt to acquire an additional
- * &drm_modeset_lock through drm_modeset_lock().
- *
- * - -ENOMEM, if allocating additional state sub-structures failed due
- * to lack of memory.
- *
- * - -EINTR, -EAGAIN or -ERESTARTSYS, if the IOCTL should be restarted.
- * This can either be due to a pending signal, or because the driver
- * needs to completely bail out to recover from an exceptional
- * situation like a GPU hang. From a userspace point all errors are
- * treated equally.
+ * timeline context used for fence operations.
*/
- int (*atomic_check)(struct drm_device *dev,
- struct drm_atomic_state *state);
+ unsigned int fence_context;
/**
- * @atomic_commit:
- *
- * This is the only hook to commit an atomic modeset update. The core
- * guarantees that @atomic_check has been called successfully before
- * calling this function, and that nothing has been changed in the
- * interim.
- *
- * See the documentation for struct &drm_atomic_state for how exactly
- * an atomic modeset update is described.
- *
- * Drivers using the atomic helpers can implement this hook using
- * drm_atomic_helper_commit(), or one of the exported sub-functions of
- * it.
- *
- * Nonblocking commits (as indicated with the nonblock parameter) must
- * do any preparatory work which might result in an unsuccessful commit
- * in the context of this callback. The only exceptions are hardware
- * errors resulting in -EIO. But even in that case the driver must
- * ensure that the display pipe is at least running, to avoid
- * compositors crashing when pageflips don't work. Anything else,
- * specifically committing the update to the hardware, should be done
- * without blocking the caller. For updates which do not require a
- * modeset this must be guaranteed.
- *
- * The driver must wait for any pending rendering to the new
- * framebuffers to complete before executing the flip. It should also
- * wait for any pending rendering from other drivers if the underlying
- * buffer is a shared dma-buf. Nonblocking commits must not wait for
- * rendering in the context of this callback.
- *
- * An application can request to be notified when the atomic commit has
- * completed. These events are per-CRTC and can be distinguished by the
- * CRTC index supplied in &drm_event to userspace.
- *
- * The drm core will supply a struct &drm_event in the event
- * member of each CRTC's &drm_crtc_state structure. See the
- * documentation for &drm_crtc_state for more details about the precise
- * semantics of this event.
- *
- * NOTE:
- *
- * Drivers are not allowed to shut down any display pipe successfully
- * enabled through an atomic commit on their own. Doing so can result in
- * compositors crashing if a page flip is suddenly rejected because the
- * pipe is off.
- *
- * RETURNS:
- *
- * 0 on success or one of the below negative error codes:
- *
- * - -EBUSY, if a nonblocking updated is requested and there is
- * an earlier updated pending. Drivers are allowed to support a queue
- * of outstanding updates, but currently no driver supports that.
- * Note that drivers must wait for preceding updates to complete if a
- * synchronous update is requested, they are not allowed to fail the
- * commit in that case.
- *
- * - -ENOMEM, if the driver failed to allocate memory. Specifically
- * this can happen when trying to pin framebuffers, which must only
- * be done when committing the state.
- *
- * - -ENOSPC, as a refinement of the more generic -ENOMEM to indicate
- * that the driver has run out of vram, iommu space or similar GPU
- * address space needed for framebuffer.
- *
- * - -EIO, if the hardware completely died.
+ * @fence_lock:
*
- * - -EINTR, -EAGAIN or -ERESTARTSYS, if the IOCTL should be restarted.
- * This can either be due to a pending signal, or because the driver
- * needs to completely bail out to recover from an exceptional
- * situation like a GPU hang. From a userspace point of view all errors are
- * treated equally.
- *
- * This list is exhaustive. Specifically this hook is not allowed to
- * return -EINVAL (any invalid requests should be caught in
- * @atomic_check) or -EDEADLK (this function must not acquire
- * additional modeset locks).
- */
- int (*atomic_commit)(struct drm_device *dev,
- struct drm_atomic_state *state,
- bool nonblock);
-
- /**
- * @atomic_state_alloc:
- *
- * This optional hook can be used by drivers that want to subclass struct
- * &drm_atomic_state to be able to track their own driver-private global
- * state easily. If this hook is implemented, drivers must also
- * implement @atomic_state_clear and @atomic_state_free.
- *
- * RETURNS:
- *
- * A new &drm_atomic_state on success or NULL on failure.
+ * spinlock to protect the fences in the fence_context.
*/
- struct drm_atomic_state *(*atomic_state_alloc)(struct drm_device *dev);
+ spinlock_t fence_lock;
/**
- * @atomic_state_clear:
- *
- * This hook must clear any driver private state duplicated into the
- * passed-in &drm_atomic_state. This hook is called when the caller
- * encountered a &drm_modeset_lock deadlock and needs to drop all
- * already acquired locks as part of the deadlock avoidance dance
- * implemented in drm_modeset_lock_backoff().
+ * @fence_seqno:
*
- * Any duplicated state must be invalidated since a concurrent atomic
- * update might change it, and the drm atomic interfaces always apply
- * updates as relative changes to the current state.
- *
- * Drivers that implement this must call drm_atomic_state_default_clear()
- * to clear common state.
+ * Seqno variable used as monotonic counter for the fences
+ * created on the CRTC's timeline.
*/
- void (*atomic_state_clear)(struct drm_atomic_state *state);
+ unsigned long fence_seqno;
/**
- * @atomic_state_free:
- *
- * This hook needs driver private resources and the &drm_atomic_state
- * itself. Note that the core first calls drm_atomic_state_clear() to
- * avoid code duplicate between the clear and free hooks.
+ * @timeline_name:
*
- * Drivers that implement this must call drm_atomic_state_default_free()
- * to release common resources.
+ * The name of the CRTC's fence timeline.
*/
- void (*atomic_state_free)(struct drm_atomic_state *state);
+ char timeline_name[32];
};
/**
- * struct drm_mode_config - Mode configuration control structure
- * @mutex: mutex protecting KMS related lists and structures
- * @connection_mutex: ww mutex protecting connector state and routing
- * @acquire_ctx: global implicit acquire context used by atomic drivers for
- * legacy IOCTLs
- * @fb_lock: mutex to protect fb state and lists
- * @num_fb: number of fbs available
- * @fb_list: list of framebuffers available
- * @num_encoder: number of encoders on this device
- * @encoder_list: list of encoder objects
- * @num_overlay_plane: number of overlay planes on this device
- * @num_total_plane: number of universal (i.e. with primary/curso) planes on this device
- * @plane_list: list of plane objects
- * @num_crtc: number of CRTCs on this device
- * @crtc_list: list of CRTC objects
- * @property_list: list of property objects
- * @min_width: minimum pixel width on this device
- * @min_height: minimum pixel height on this device
- * @max_width: maximum pixel width on this device
- * @max_height: maximum pixel height on this device
- * @funcs: core driver provided mode setting functions
- * @fb_base: base address of the framebuffer
- * @poll_enabled: track polling support for this device
- * @poll_running: track polling status for this device
- * @delayed_event: track delayed poll uevent deliver for this device
- * @output_poll_work: delayed work for polling in process context
- * @property_blob_list: list of all the blob property objects
- * @blob_lock: mutex for blob property allocation and management
- * @*_property: core property tracking
- * @preferred_depth: preferred RBG pixel depth, used by fb helpers
- * @prefer_shadow: hint to userspace to prefer shadow-fb rendering
- * @cursor_width: hint to userspace for max cursor width
- * @cursor_height: hint to userspace for max cursor height
- * @helper_private: mid-layer private data
+ * struct drm_mode_set - new values for a CRTC config change
+ * @fb: framebuffer to use for new config
+ * @crtc: CRTC whose configuration we're about to change
+ * @mode: mode timings to use
+ * @x: position of this CRTC relative to @fb
+ * @y: position of this CRTC relative to @fb
+ * @connectors: array of connectors to drive with this CRTC if possible
+ * @num_connectors: size of @connectors array
*
- * Core mode resource tracking structure. All CRTC, encoders, and connectors
- * enumerated by the driver are added here, as are global properties. Some
- * global restrictions are also here, e.g. dimension restrictions.
+ * Represents a single crtc the connectors that it drives with what mode
+ * and from which framebuffer it scans out from.
+ *
+ * This is used to set modes.
*/
-struct drm_mode_config {
- struct mutex mutex; /* protects configuration (mode lists etc.) */
- struct drm_modeset_lock connection_mutex; /* protects connector->encoder and encoder->crtc links */
- struct drm_modeset_acquire_ctx *acquire_ctx; /* for legacy _lock_all() / _unlock_all() */
-
- /**
- * @idr_mutex:
- *
- * Mutex for KMS ID allocation and management. Protects both @crtc_idr
- * and @tile_idr.
- */
- struct mutex idr_mutex;
-
- /**
- * @crtc_idr:
- *
- * Main KMS ID tracking object. Use this idr for all IDs, fb, crtc,
- * connector, modes - just makes life easier to have only one.
- */
- struct idr crtc_idr;
-
- /**
- * @tile_idr:
- *
- * Use this idr for allocating new IDs for tiled sinks like use in some
- * high-res DP MST screens.
- */
- struct idr tile_idr;
-
- struct mutex fb_lock; /* proctects global and per-file fb lists */
- int num_fb;
- struct list_head fb_list;
-
- /**
- * @num_connector: Number of connectors on this device.
- */
- int num_connector;
- /**
- * @connector_ida: ID allocator for connector indices.
- */
- struct ida connector_ida;
- /**
- * @connector_list: List of connector objects.
- */
- struct list_head connector_list;
- int num_encoder;
- struct list_head encoder_list;
-
- /*
- * Track # of overlay planes separately from # of total planes. By
- * default we only advertise overlay planes to userspace; if userspace
- * sets the "universal plane" capability bit, we'll go ahead and
- * expose all planes.
- */
- int num_overlay_plane;
- int num_total_plane;
- struct list_head plane_list;
-
- int num_crtc;
- struct list_head crtc_list;
-
- struct list_head property_list;
-
- int min_width, min_height;
- int max_width, max_height;
- const struct drm_mode_config_funcs *funcs;
- resource_size_t fb_base;
-
- /* output poll support */
- bool poll_enabled;
- bool poll_running;
- bool delayed_event;
- struct delayed_work output_poll_work;
-
- struct mutex blob_lock;
-
- /* pointers to standard properties */
- struct list_head property_blob_list;
- /**
- * @edid_property: Default connector property to hold the EDID of the
- * currently connected sink, if any.
- */
- struct drm_property *edid_property;
- /**
- * @dpms_property: Default connector property to control the
- * connector's DPMS state.
- */
- struct drm_property *dpms_property;
- /**
- * @path_property: Default connector property to hold the DP MST path
- * for the port.
- */
- struct drm_property *path_property;
- /**
- * @tile_property: Default connector property to store the tile
- * position of a tiled screen, for sinks which need to be driven with
- * multiple CRTCs.
- */
- struct drm_property *tile_property;
- /**
- * @plane_type_property: Default plane property to differentiate
- * CURSOR, PRIMARY and OVERLAY legacy uses of planes.
- */
- struct drm_property *plane_type_property;
- /**
- * @rotation_property: Optional property for planes or CRTCs to specifiy
- * rotation.
- */
- struct drm_property *rotation_property;
- /**
- * @prop_src_x: Default atomic plane property for the plane source
- * position in the connected &drm_framebuffer.
- */
- struct drm_property *prop_src_x;
- /**
- * @prop_src_y: Default atomic plane property for the plane source
- * position in the connected &drm_framebuffer.
- */
- struct drm_property *prop_src_y;
- /**
- * @prop_src_w: Default atomic plane property for the plane source
- * position in the connected &drm_framebuffer.
- */
- struct drm_property *prop_src_w;
- /**
- * @prop_src_h: Default atomic plane property for the plane source
- * position in the connected &drm_framebuffer.
- */
- struct drm_property *prop_src_h;
- /**
- * @prop_crtc_x: Default atomic plane property for the plane destination
- * position in the &drm_crtc is is being shown on.
- */
- struct drm_property *prop_crtc_x;
- /**
- * @prop_crtc_y: Default atomic plane property for the plane destination
- * position in the &drm_crtc is is being shown on.
- */
- struct drm_property *prop_crtc_y;
- /**
- * @prop_crtc_w: Default atomic plane property for the plane destination
- * position in the &drm_crtc is is being shown on.
- */
- struct drm_property *prop_crtc_w;
- /**
- * @prop_crtc_h: Default atomic plane property for the plane destination
- * position in the &drm_crtc is is being shown on.
- */
- struct drm_property *prop_crtc_h;
- /**
- * @prop_fb_id: Default atomic plane property to specify the
- * &drm_framebuffer.
- */
- struct drm_property *prop_fb_id;
- /**
- * @prop_crtc_id: Default atomic plane property to specify the
- * &drm_crtc.
- */
- struct drm_property *prop_crtc_id;
- /**
- * @prop_active: Default atomic CRTC property to control the active
- * state, which is the simplified implementation for DPMS in atomic
- * drivers.
- */
- struct drm_property *prop_active;
- /**
- * @prop_mode_id: Default atomic CRTC property to set the mode for a
- * CRTC. A 0 mode implies that the CRTC is entirely disabled - all
- * connectors must be of and active must be set to disabled, too.
- */
- struct drm_property *prop_mode_id;
-
- /**
- * @dvi_i_subconnector_property: Optional DVI-I property to
- * differentiate between analog or digital mode.
- */
- struct drm_property *dvi_i_subconnector_property;
- /**
- * @dvi_i_select_subconnector_property: Optional DVI-I property to
- * select between analog or digital mode.
- */
- struct drm_property *dvi_i_select_subconnector_property;
-
- /**
- * @tv_subconnector_property: Optional TV property to differentiate
- * between different TV connector types.
- */
- struct drm_property *tv_subconnector_property;
- /**
- * @tv_select_subconnector_property: Optional TV property to select
- * between different TV connector types.
- */
- struct drm_property *tv_select_subconnector_property;
- /**
- * @tv_mode_property: Optional TV property to select
- * the output TV mode.
- */
- struct drm_property *tv_mode_property;
- /**
- * @tv_left_margin_property: Optional TV property to set the left
- * margin.
- */
- struct drm_property *tv_left_margin_property;
- /**
- * @tv_right_margin_property: Optional TV property to set the right
- * margin.
- */
- struct drm_property *tv_right_margin_property;
- /**
- * @tv_top_margin_property: Optional TV property to set the right
- * margin.
- */
- struct drm_property *tv_top_margin_property;
- /**
- * @tv_bottom_margin_property: Optional TV property to set the right
- * margin.
- */
- struct drm_property *tv_bottom_margin_property;
- /**
- * @tv_brightness_property: Optional TV property to set the
- * brightness.
- */
- struct drm_property *tv_brightness_property;
- /**
- * @tv_contrast_property: Optional TV property to set the
- * contrast.
- */
- struct drm_property *tv_contrast_property;
- /**
- * @tv_flicker_reduction_property: Optional TV property to control the
- * flicker reduction mode.
- */
- struct drm_property *tv_flicker_reduction_property;
- /**
- * @tv_overscan_property: Optional TV property to control the overscan
- * setting.
- */
- struct drm_property *tv_overscan_property;
- /**
- * @tv_saturation_property: Optional TV property to set the
- * saturation.
- */
- struct drm_property *tv_saturation_property;
- /**
- * @tv_hue_property: Optional TV property to set the hue.
- */
- struct drm_property *tv_hue_property;
-
- /**
- * @scaling_mode_property: Optional connector property to control the
- * upscaling, mostly used for built-in panels.
- */
- struct drm_property *scaling_mode_property;
- /**
- * @aspect_ratio_property: Optional connector property to control the
- * HDMI infoframe aspect ratio setting.
- */
- struct drm_property *aspect_ratio_property;
- /**
- * @degamma_lut_property: Optional CRTC property to set the LUT used to
- * convert the framebuffer's colors to linear gamma.
- */
- struct drm_property *degamma_lut_property;
- /**
- * @degamma_lut_size_property: Optional CRTC property for the size of
- * the degamma LUT as supported by the driver (read-only).
- */
- struct drm_property *degamma_lut_size_property;
- /**
- * @ctm_property: Optional CRTC property to set the
- * matrix used to convert colors after the lookup in the
- * degamma LUT.
- */
- struct drm_property *ctm_property;
- /**
- * @gamma_lut_property: Optional CRTC property to set the LUT used to
- * convert the colors, after the CTM matrix, to the gamma space of the
- * connected screen.
- */
- struct drm_property *gamma_lut_property;
- /**
- * @gamma_lut_size_property: Optional CRTC property for the size of the
- * gamma LUT as supported by the driver (read-only).
- */
- struct drm_property *gamma_lut_size_property;
-
- /**
- * @suggested_x_property: Optional connector property with a hint for
- * the position of the output on the host's screen.
- */
- struct drm_property *suggested_x_property;
- /**
- * @suggested_y_property: Optional connector property with a hint for
- * the position of the output on the host's screen.
- */
- struct drm_property *suggested_y_property;
-
- /* dumb ioctl parameters */
- uint32_t preferred_depth, prefer_shadow;
-
- /**
- * @async_page_flip: Does this device support async flips on the primary
- * plane?
- */
- bool async_page_flip;
-
- /**
- * @allow_fb_modifiers:
- *
- * Whether the driver supports fb modifiers in the ADDFB2.1 ioctl call.
- */
- bool allow_fb_modifiers;
+struct drm_mode_set {
+ struct drm_framebuffer *fb;
+ struct drm_crtc *crtc;
+ struct drm_display_mode *mode;
- /* cursor size */
- uint32_t cursor_width, cursor_height;
+ uint32_t x;
+ uint32_t y;
- struct drm_mode_config_helper_funcs *helper_private;
+ struct drm_connector **connectors;
+ size_t num_connectors;
};
#define obj_to_crtc(x) container_of(x, struct drm_crtc, base)
-extern __printf(6, 7)
+__printf(6, 7)
int drm_crtc_init_with_planes(struct drm_device *dev,
struct drm_crtc *crtc,
struct drm_plane *primary,
struct drm_plane *cursor,
const struct drm_crtc_funcs *funcs,
const char *name, ...);
-extern void drm_crtc_cleanup(struct drm_crtc *crtc);
+void drm_crtc_cleanup(struct drm_crtc *crtc);
/**
* drm_crtc_index - find the index of a registered CRTC
@@ -1354,28 +819,17 @@ static inline unsigned int drm_crtc_index(const struct drm_crtc *crtc)
* Given a registered CRTC, return the mask bit of that CRTC for an
* encoder's possible_crtcs field.
*/
-static inline uint32_t drm_crtc_mask(struct drm_crtc *crtc)
+static inline uint32_t drm_crtc_mask(const struct drm_crtc *crtc)
{
return 1 << drm_crtc_index(crtc);
}
-extern void drm_crtc_get_hv_timing(const struct drm_display_mode *mode,
- int *hdisplay, int *vdisplay);
-extern int drm_crtc_force_disable(struct drm_crtc *crtc);
-extern int drm_crtc_force_disable_all(struct drm_device *dev);
-
-extern void drm_mode_config_init(struct drm_device *dev);
-extern void drm_mode_config_reset(struct drm_device *dev);
-extern void drm_mode_config_cleanup(struct drm_device *dev);
-
-extern int drm_mode_set_config_internal(struct drm_mode_set *set);
+void drm_crtc_get_hv_timing(const struct drm_display_mode *mode,
+ int *hdisplay, int *vdisplay);
+int drm_crtc_force_disable(struct drm_crtc *crtc);
+int drm_crtc_force_disable_all(struct drm_device *dev);
-extern struct drm_tile_group *drm_mode_create_tile_group(struct drm_device *dev,
- char topology[8]);
-extern struct drm_tile_group *drm_mode_get_tile_group(struct drm_device *dev,
- char topology[8]);
-extern void drm_mode_put_tile_group(struct drm_device *dev,
- struct drm_tile_group *tg);
+int drm_mode_set_config_internal(struct drm_mode_set *set);
/* Helpers */
static inline struct drm_crtc *drm_crtc_find(struct drm_device *dev,
diff --git a/include/drm/drm_debugfs_crc.h b/include/drm/drm_debugfs_crc.h
new file mode 100644
index 000000000000..7d63b1d4adb9
--- /dev/null
+++ b/include/drm/drm_debugfs_crc.h
@@ -0,0 +1,73 @@
+/*
+ * Copyright © 2016 Collabora Ltd.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#ifndef __DRM_DEBUGFS_CRC_H__
+#define __DRM_DEBUGFS_CRC_H__
+
+#define DRM_MAX_CRC_NR 10
+
+/**
+ * struct drm_crtc_crc_entry - entry describing a frame's content
+ * @has_frame_counter: whether the source was able to provide a frame number
+ * @frame: number of the frame this CRC is about, if @has_frame_counter is true
+ * @crc: array of values that characterize the frame
+ */
+struct drm_crtc_crc_entry {
+ bool has_frame_counter;
+ uint32_t frame;
+ uint32_t crcs[DRM_MAX_CRC_NR];
+};
+
+#define DRM_CRC_ENTRIES_NR 128
+
+/**
+ * struct drm_crtc_crc - data supporting CRC capture on a given CRTC
+ * @lock: protects the fields in this struct
+ * @source: name of the currently configured source of CRCs
+ * @opened: whether userspace has opened the data file for reading
+ * @entries: array of entries, with size of %DRM_CRC_ENTRIES_NR
+ * @head: head of circular queue
+ * @tail: tail of circular queue
+ * @values_cnt: number of CRC values per entry, up to %DRM_MAX_CRC_NR
+ * @wq: workqueue used to synchronize reading and writing
+ */
+struct drm_crtc_crc {
+ spinlock_t lock;
+ const char *source;
+ bool opened;
+ struct drm_crtc_crc_entry *entries;
+ int head, tail;
+ size_t values_cnt;
+ wait_queue_head_t wq;
+};
+
+#if defined(CONFIG_DEBUG_FS)
+int drm_crtc_add_crc_entry(struct drm_crtc *crtc, bool has_frame,
+ uint32_t frame, uint32_t *crcs);
+#else
+static inline int drm_crtc_add_crc_entry(struct drm_crtc *crtc, bool has_frame,
+ uint32_t frame, uint32_t *crcs)
+{
+ return -EINVAL;
+}
+#endif /* defined(CONFIG_DEBUG_FS) */
+
+#endif /* __DRM_DEBUGFS_CRC_H__ */
diff --git a/include/drm/drm_dp_dual_mode_helper.h b/include/drm/drm_dp_dual_mode_helper.h
index e8a9dfd0e055..4c42db81fcb4 100644
--- a/include/drm/drm_dp_dual_mode_helper.h
+++ b/include/drm/drm_dp_dual_mode_helper.h
@@ -40,6 +40,8 @@
#define DP_DUAL_MODE_REV_TYPE2 0x00
#define DP_DUAL_MODE_TYPE_MASK 0xf0
#define DP_DUAL_MODE_TYPE_TYPE2 0xa0
+/* This field is marked reserved in dual mode spec, used in LSPCON */
+#define DP_DUAL_MODE_TYPE_HAS_DPCD 0x08
#define DP_DUAL_MODE_IEEE_OUI 0x11 /* 11-13*/
#define DP_DUAL_IEEE_OUI_LEN 3
#define DP_DUAL_DEVICE_ID 0x14 /* 14-19 */
@@ -55,6 +57,11 @@
#define DP_DUAL_MODE_CEC_ENABLE 0x01
#define DP_DUAL_MODE_I2C_SPEED_CTRL 0x22
+/* LSPCON specific registers, defined by MCA */
+#define DP_DUAL_MODE_LSPCON_MODE_CHANGE 0x40
+#define DP_DUAL_MODE_LSPCON_CURRENT_MODE 0x41
+#define DP_DUAL_MODE_LSPCON_MODE_PCON 0x1
+
struct i2c_adapter;
ssize_t drm_dp_dual_mode_read(struct i2c_adapter *adapter,
@@ -63,6 +70,20 @@ ssize_t drm_dp_dual_mode_write(struct i2c_adapter *adapter,
u8 offset, const void *buffer, size_t size);
/**
+ * enum drm_lspcon_mode
+ * @DRM_LSPCON_MODE_INVALID: No LSPCON.
+ * @DRM_LSPCON_MODE_LS: Level shifter mode of LSPCON
+ * which drives DP++ to HDMI 1.4 conversion.
+ * @DRM_LSPCON_MODE_PCON: Protocol converter mode of LSPCON
+ * which drives DP++ to HDMI 2.0 active conversion.
+ */
+enum drm_lspcon_mode {
+ DRM_LSPCON_MODE_INVALID,
+ DRM_LSPCON_MODE_LS,
+ DRM_LSPCON_MODE_PCON,
+};
+
+/**
* enum drm_dp_dual_mode_type - Type of the DP dual mode adaptor
* @DRM_DP_DUAL_MODE_NONE: No DP dual mode adaptor
* @DRM_DP_DUAL_MODE_UNKNOWN: Could be either none or type 1 DVI adaptor
@@ -70,6 +91,7 @@ ssize_t drm_dp_dual_mode_write(struct i2c_adapter *adapter,
* @DRM_DP_DUAL_MODE_TYPE1_HDMI: Type 1 HDMI adaptor
* @DRM_DP_DUAL_MODE_TYPE2_DVI: Type 2 DVI adaptor
* @DRM_DP_DUAL_MODE_TYPE2_HDMI: Type 2 HDMI adaptor
+ * @DRM_DP_DUAL_MODE_LSPCON: Level shifter / protocol converter
*/
enum drm_dp_dual_mode_type {
DRM_DP_DUAL_MODE_NONE,
@@ -78,6 +100,7 @@ enum drm_dp_dual_mode_type {
DRM_DP_DUAL_MODE_TYPE1_HDMI,
DRM_DP_DUAL_MODE_TYPE2_DVI,
DRM_DP_DUAL_MODE_TYPE2_HDMI,
+ DRM_DP_DUAL_MODE_LSPCON,
};
enum drm_dp_dual_mode_type drm_dp_dual_mode_detect(struct i2c_adapter *adapter);
@@ -89,4 +112,8 @@ int drm_dp_dual_mode_set_tmds_output(enum drm_dp_dual_mode_type type,
struct i2c_adapter *adapter, bool enable);
const char *drm_dp_get_dual_mode_type_name(enum drm_dp_dual_mode_type type);
+int drm_lspcon_get_mode(struct i2c_adapter *adapter,
+ enum drm_lspcon_mode *current_mode);
+int drm_lspcon_set_mode(struct i2c_adapter *adapter,
+ enum drm_lspcon_mode reqd_mode);
#endif
diff --git a/include/drm/drm_dp_helper.h b/include/drm/drm_dp_helper.h
index 2a79882cb68e..55bbeb0ff594 100644
--- a/include/drm/drm_dp_helper.h
+++ b/include/drm/drm_dp_helper.h
@@ -690,6 +690,12 @@ drm_dp_tps3_supported(const u8 dpcd[DP_RECEIVER_CAP_SIZE])
dpcd[DP_MAX_LANE_COUNT] & DP_TPS3_SUPPORTED;
}
+static inline bool
+drm_dp_is_branch(const u8 dpcd[DP_RECEIVER_CAP_SIZE])
+{
+ return dpcd[DP_DOWNSTREAMPORT_PRESENT] & DP_DWN_STRM_PORT_PRESENT;
+}
+
/*
* DisplayPort AUX channel
*/
diff --git a/include/drm/drm_drv.h b/include/drm/drm_drv.h
new file mode 100644
index 000000000000..c4fc49583dc0
--- /dev/null
+++ b/include/drm/drm_drv.h
@@ -0,0 +1,435 @@
+/*
+ * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
+ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
+ * Copyright (c) 2009-2010, Code Aurora Forum.
+ * Copyright 2016 Intel Corp.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef _DRM_DRV_H_
+#define _DRM_DRV_H_
+
+#include <linux/list.h>
+#include <linux/irqreturn.h>
+
+struct drm_device;
+struct drm_file;
+struct drm_gem_object;
+struct drm_master;
+struct drm_minor;
+struct dma_buf_attachment;
+struct drm_display_mode;
+struct drm_mode_create_dumb;
+
+/* driver capabilities and requirements mask */
+#define DRIVER_USE_AGP 0x1
+#define DRIVER_LEGACY 0x2
+#define DRIVER_PCI_DMA 0x8
+#define DRIVER_SG 0x10
+#define DRIVER_HAVE_DMA 0x20
+#define DRIVER_HAVE_IRQ 0x40
+#define DRIVER_IRQ_SHARED 0x80
+#define DRIVER_GEM 0x1000
+#define DRIVER_MODESET 0x2000
+#define DRIVER_PRIME 0x4000
+#define DRIVER_RENDER 0x8000
+#define DRIVER_ATOMIC 0x10000
+#define DRIVER_KMS_LEGACY_CONTEXT 0x20000
+
+/**
+ * struct drm_driver - DRM driver structure
+ *
+ * This structure represent the common code for a family of cards. There will
+ * one drm_device for each card present in this family. It contains lots of
+ * vfunc entries, and a pile of those probably should be moved to more
+ * appropriate places like &drm_mode_config_funcs or into a new operations
+ * structure for GEM drivers.
+ */
+struct drm_driver {
+ int (*load) (struct drm_device *, unsigned long flags);
+ int (*firstopen) (struct drm_device *);
+ int (*open) (struct drm_device *, struct drm_file *);
+ void (*preclose) (struct drm_device *, struct drm_file *file_priv);
+ void (*postclose) (struct drm_device *, struct drm_file *);
+ void (*lastclose) (struct drm_device *);
+ int (*unload) (struct drm_device *);
+ int (*dma_ioctl) (struct drm_device *dev, void *data, struct drm_file *file_priv);
+ int (*dma_quiescent) (struct drm_device *);
+ int (*context_dtor) (struct drm_device *dev, int context);
+ int (*set_busid)(struct drm_device *dev, struct drm_master *master);
+
+ /**
+ * @get_vblank_counter:
+ *
+ * Driver callback for fetching a raw hardware vblank counter for the
+ * CRTC specified with the pipe argument. If a device doesn't have a
+ * hardware counter, the driver can simply use
+ * drm_vblank_no_hw_counter() function. The DRM core will account for
+ * missed vblank events while interrupts where disabled based on system
+ * timestamps.
+ *
+ * Wraparound handling and loss of events due to modesetting is dealt
+ * with in the DRM core code, as long as drivers call
+ * drm_crtc_vblank_off() and drm_crtc_vblank_on() when disabling or
+ * enabling a CRTC.
+ *
+ * Returns:
+ *
+ * Raw vblank counter value.
+ */
+ u32 (*get_vblank_counter) (struct drm_device *dev, unsigned int pipe);
+
+ /**
+ * @enable_vblank:
+ *
+ * Enable vblank interrupts for the CRTC specified with the pipe
+ * argument.
+ *
+ * Returns:
+ *
+ * Zero on success, appropriate errno if the given @crtc's vblank
+ * interrupt cannot be enabled.
+ */
+ int (*enable_vblank) (struct drm_device *dev, unsigned int pipe);
+
+ /**
+ * @disable_vblank:
+ *
+ * Disable vblank interrupts for the CRTC specified with the pipe
+ * argument.
+ */
+ void (*disable_vblank) (struct drm_device *dev, unsigned int pipe);
+
+ /**
+ * @device_is_agp:
+ *
+ * Called by drm_device_is_agp(). Typically used to determine if a card
+ * is really attached to AGP or not.
+ *
+ * Returns:
+ *
+ * One of three values is returned depending on whether or not the
+ * card is absolutely not AGP (return of 0), absolutely is AGP
+ * (return of 1), or may or may not be AGP (return of 2).
+ */
+ int (*device_is_agp) (struct drm_device *dev);
+
+ /**
+ * @get_scanout_position:
+ *
+ * Called by vblank timestamping code.
+ *
+ * Returns the current display scanout position from a crtc, and an
+ * optional accurate ktime_get() timestamp of when position was
+ * measured. Note that this is a helper callback which is only used if a
+ * driver uses drm_calc_vbltimestamp_from_scanoutpos() for the
+ * @get_vblank_timestamp callback.
+ *
+ * Parameters:
+ *
+ * dev:
+ * DRM device.
+ * pipe:
+ * Id of the crtc to query.
+ * flags:
+ * Flags from the caller (DRM_CALLED_FROM_VBLIRQ or 0).
+ * vpos:
+ * Target location for current vertical scanout position.
+ * hpos:
+ * Target location for current horizontal scanout position.
+ * stime:
+ * Target location for timestamp taken immediately before
+ * scanout position query. Can be NULL to skip timestamp.
+ * etime:
+ * Target location for timestamp taken immediately after
+ * scanout position query. Can be NULL to skip timestamp.
+ * mode:
+ * Current display timings.
+ *
+ * Returns vpos as a positive number while in active scanout area.
+ * Returns vpos as a negative number inside vblank, counting the number
+ * of scanlines to go until end of vblank, e.g., -1 means "one scanline
+ * until start of active scanout / end of vblank."
+ *
+ * Returns:
+ *
+ * Flags, or'ed together as follows:
+ *
+ * DRM_SCANOUTPOS_VALID:
+ * Query successful.
+ * DRM_SCANOUTPOS_INVBL:
+ * Inside vblank.
+ * DRM_SCANOUTPOS_ACCURATE: Returned position is accurate. A lack of
+ * this flag means that returned position may be offset by a
+ * constant but unknown small number of scanlines wrt. real scanout
+ * position.
+ *
+ */
+ int (*get_scanout_position) (struct drm_device *dev, unsigned int pipe,
+ unsigned int flags, int *vpos, int *hpos,
+ ktime_t *stime, ktime_t *etime,
+ const struct drm_display_mode *mode);
+
+ /**
+ * @get_vblank_timestamp:
+ *
+ * Called by drm_get_last_vbltimestamp(). Should return a precise
+ * timestamp when the most recent VBLANK interval ended or will end.
+ *
+ * Specifically, the timestamp in @vblank_time should correspond as
+ * closely as possible to the time when the first video scanline of
+ * the video frame after the end of VBLANK will start scanning out,
+ * the time immediately after end of the VBLANK interval. If the
+ * @crtc is currently inside VBLANK, this will be a time in the future.
+ * If the @crtc is currently scanning out a frame, this will be the
+ * past start time of the current scanout. This is meant to adhere
+ * to the OpenML OML_sync_control extension specification.
+ *
+ * Paramters:
+ *
+ * dev:
+ * dev DRM device handle.
+ * pipe:
+ * crtc for which timestamp should be returned.
+ * max_error:
+ * Maximum allowable timestamp error in nanoseconds.
+ * Implementation should strive to provide timestamp
+ * with an error of at most max_error nanoseconds.
+ * Returns true upper bound on error for timestamp.
+ * vblank_time:
+ * Target location for returned vblank timestamp.
+ * flags:
+ * 0 = Defaults, no special treatment needed.
+ * DRM_CALLED_FROM_VBLIRQ = Function is called from vblank
+ * irq handler. Some drivers need to apply some workarounds
+ * for gpu-specific vblank irq quirks if flag is set.
+ *
+ * Returns:
+ *
+ * Zero if timestamping isn't supported in current display mode or a
+ * negative number on failure. A positive status code on success,
+ * which describes how the vblank_time timestamp was computed.
+ */
+ int (*get_vblank_timestamp) (struct drm_device *dev, unsigned int pipe,
+ int *max_error,
+ struct timeval *vblank_time,
+ unsigned flags);
+
+ /* these have to be filled in */
+
+ irqreturn_t(*irq_handler) (int irq, void *arg);
+ void (*irq_preinstall) (struct drm_device *dev);
+ int (*irq_postinstall) (struct drm_device *dev);
+ void (*irq_uninstall) (struct drm_device *dev);
+
+ /**
+ * @master_create:
+ *
+ * Called whenever a new master is created. Only used by vmwgfx.
+ */
+ int (*master_create)(struct drm_device *dev, struct drm_master *master);
+
+ /**
+ * @master_destroy:
+ *
+ * Called whenever a master is destroyed. Only used by vmwgfx.
+ */
+ void (*master_destroy)(struct drm_device *dev, struct drm_master *master);
+
+ /**
+ * @master_set:
+ *
+ * Called whenever the minor master is set. Only used by vmwgfx.
+ */
+ int (*master_set)(struct drm_device *dev, struct drm_file *file_priv,
+ bool from_open);
+ /**
+ * @master_drop:
+ *
+ * Called whenever the minor master is dropped. Only used by vmwgfx.
+ */
+ void (*master_drop)(struct drm_device *dev, struct drm_file *file_priv);
+
+ int (*debugfs_init)(struct drm_minor *minor);
+ void (*debugfs_cleanup)(struct drm_minor *minor);
+
+ /**
+ * @gem_free_object: deconstructor for drm_gem_objects
+ *
+ * This is deprecated and should not be used by new drivers. Use
+ * @gem_free_object_unlocked instead.
+ */
+ void (*gem_free_object) (struct drm_gem_object *obj);
+
+ /**
+ * @gem_free_object_unlocked: deconstructor for drm_gem_objects
+ *
+ * This is for drivers which are not encumbered with dev->struct_mutex
+ * legacy locking schemes. Use this hook instead of @gem_free_object.
+ */
+ void (*gem_free_object_unlocked) (struct drm_gem_object *obj);
+
+ int (*gem_open_object) (struct drm_gem_object *, struct drm_file *);
+ void (*gem_close_object) (struct drm_gem_object *, struct drm_file *);
+
+ /**
+ * @gem_create_object: constructor for gem objects
+ *
+ * Hook for allocating the GEM object struct, for use by core
+ * helpers.
+ */
+ struct drm_gem_object *(*gem_create_object)(struct drm_device *dev,
+ size_t size);
+
+ /* prime: */
+ /* export handle -> fd (see drm_gem_prime_handle_to_fd() helper) */
+ int (*prime_handle_to_fd)(struct drm_device *dev, struct drm_file *file_priv,
+ uint32_t handle, uint32_t flags, int *prime_fd);
+ /* import fd -> handle (see drm_gem_prime_fd_to_handle() helper) */
+ int (*prime_fd_to_handle)(struct drm_device *dev, struct drm_file *file_priv,
+ int prime_fd, uint32_t *handle);
+ /* export GEM -> dmabuf */
+ struct dma_buf * (*gem_prime_export)(struct drm_device *dev,
+ struct drm_gem_object *obj, int flags);
+ /* import dmabuf -> GEM */
+ struct drm_gem_object * (*gem_prime_import)(struct drm_device *dev,
+ struct dma_buf *dma_buf);
+ /* low-level interface used by drm_gem_prime_{import,export} */
+ int (*gem_prime_pin)(struct drm_gem_object *obj);
+ void (*gem_prime_unpin)(struct drm_gem_object *obj);
+ struct reservation_object * (*gem_prime_res_obj)(
+ struct drm_gem_object *obj);
+ struct sg_table *(*gem_prime_get_sg_table)(struct drm_gem_object *obj);
+ struct drm_gem_object *(*gem_prime_import_sg_table)(
+ struct drm_device *dev,
+ struct dma_buf_attachment *attach,
+ struct sg_table *sgt);
+ void *(*gem_prime_vmap)(struct drm_gem_object *obj);
+ void (*gem_prime_vunmap)(struct drm_gem_object *obj, void *vaddr);
+ int (*gem_prime_mmap)(struct drm_gem_object *obj,
+ struct vm_area_struct *vma);
+
+ /* vga arb irq handler */
+ void (*vgaarb_irq)(struct drm_device *dev, bool state);
+
+ /**
+ * @dumb_create:
+ *
+ * This creates a new dumb buffer in the driver's backing storage manager (GEM,
+ * TTM or something else entirely) and returns the resulting buffer handle. This
+ * handle can then be wrapped up into a framebuffer modeset object.
+ *
+ * Note that userspace is not allowed to use such objects for render
+ * acceleration - drivers must create their own private ioctls for such a use
+ * case.
+ *
+ * Width, height and depth are specified in the &drm_mode_create_dumb
+ * argument. The callback needs to fill the handle, pitch and size for
+ * the created buffer.
+ *
+ * Called by the user via ioctl.
+ *
+ * Returns:
+ *
+ * Zero on success, negative errno on failure.
+ */
+ int (*dumb_create)(struct drm_file *file_priv,
+ struct drm_device *dev,
+ struct drm_mode_create_dumb *args);
+ /**
+ * @dumb_map_offset:
+ *
+ * Allocate an offset in the drm device node's address space to be able to
+ * memory map a dumb buffer. GEM-based drivers must use
+ * drm_gem_create_mmap_offset() to implement this.
+ *
+ * Called by the user via ioctl.
+ *
+ * Returns:
+ *
+ * Zero on success, negative errno on failure.
+ */
+ int (*dumb_map_offset)(struct drm_file *file_priv,
+ struct drm_device *dev, uint32_t handle,
+ uint64_t *offset);
+ /**
+ * @dumb_destroy:
+ *
+ * This destroys the userspace handle for the given dumb backing storage buffer.
+ * Since buffer objects must be reference counted in the kernel a buffer object
+ * won't be immediately freed if a framebuffer modeset object still uses it.
+ *
+ * Called by the user via ioctl.
+ *
+ * Returns:
+ *
+ * Zero on success, negative errno on failure.
+ */
+ int (*dumb_destroy)(struct drm_file *file_priv,
+ struct drm_device *dev,
+ uint32_t handle);
+
+ /* Driver private ops for this object */
+ const struct vm_operations_struct *gem_vm_ops;
+
+ int major;
+ int minor;
+ int patchlevel;
+ char *name;
+ char *desc;
+ char *date;
+
+ u32 driver_features;
+ int dev_priv_size;
+ const struct drm_ioctl_desc *ioctls;
+ int num_ioctls;
+ const struct file_operations *fops;
+
+ /* List of devices hanging off this driver with stealth attach. */
+ struct list_head legacy_dev_list;
+};
+
+extern __printf(6, 7)
+void drm_dev_printk(const struct device *dev, const char *level,
+ unsigned int category, const char *function_name,
+ const char *prefix, const char *format, ...);
+extern __printf(3, 4)
+void drm_printk(const char *level, unsigned int category,
+ const char *format, ...);
+extern unsigned int drm_debug;
+
+int drm_dev_init(struct drm_device *dev,
+ struct drm_driver *driver,
+ struct device *parent);
+struct drm_device *drm_dev_alloc(struct drm_driver *driver,
+ struct device *parent);
+int drm_dev_register(struct drm_device *dev, unsigned long flags);
+void drm_dev_unregister(struct drm_device *dev);
+
+void drm_dev_ref(struct drm_device *dev);
+void drm_dev_unref(struct drm_device *dev);
+void drm_put_dev(struct drm_device *dev);
+void drm_unplug_dev(struct drm_device *dev);
+
+int drm_dev_set_unique(struct drm_device *dev, const char *name);
+
+
+#endif
diff --git a/include/drm/drm_edid.h b/include/drm/drm_edid.h
index c3a7d440bc11..38eabf65f19d 100644
--- a/include/drm/drm_edid.h
+++ b/include/drm/drm_edid.h
@@ -330,7 +330,6 @@ int drm_edid_to_sad(struct edid *edid, struct cea_sad **sads);
int drm_edid_to_speaker_allocation(struct edid *edid, u8 **sadb);
int drm_av_sync_delay(struct drm_connector *connector,
const struct drm_display_mode *mode);
-struct drm_connector *drm_select_eld(struct drm_encoder *encoder);
#ifdef CONFIG_DRM_LOAD_EDID_FIRMWARE
int drm_load_edid_firmware(struct drm_connector *connector);
diff --git a/include/drm/drm_encoder.h b/include/drm/drm_encoder.h
index 387e33a4d6ee..c7438ff0d609 100644
--- a/include/drm/drm_encoder.h
+++ b/include/drm/drm_encoder.h
@@ -189,7 +189,7 @@ static inline unsigned int drm_encoder_index(struct drm_encoder *encoder)
}
/* FIXME: We have an include file mess still, drm_crtc.h needs untangling. */
-static inline uint32_t drm_crtc_mask(struct drm_crtc *crtc);
+static inline uint32_t drm_crtc_mask(const struct drm_crtc *crtc);
/**
* drm_encoder_crtc_ok - can a given crtc drive a given encoder?
diff --git a/include/drm/drm_fb_cma_helper.h b/include/drm/drm_fb_cma_helper.h
index f313211f8ed5..3b00f6480b83 100644
--- a/include/drm/drm_fb_cma_helper.h
+++ b/include/drm/drm_fb_cma_helper.h
@@ -12,6 +12,8 @@ struct drm_fb_helper;
struct drm_device;
struct drm_file;
struct drm_mode_fb_cmd2;
+struct drm_plane;
+struct drm_plane_state;
struct drm_fbdev_cma *drm_fbdev_cma_init_with_funcs(struct drm_device *dev,
unsigned int preferred_bpp, unsigned int num_crtc,
@@ -41,6 +43,9 @@ struct drm_framebuffer *drm_fb_cma_create(struct drm_device *dev,
struct drm_gem_cma_object *drm_fb_cma_get_gem_obj(struct drm_framebuffer *fb,
unsigned int plane);
+int drm_fb_cma_prepare_fb(struct drm_plane *plane,
+ struct drm_plane_state *state);
+
#ifdef CONFIG_DEBUG_FS
struct seq_file;
diff --git a/include/drm/drm_fb_helper.h b/include/drm/drm_fb_helper.h
index ed8edfef75b2..975deedd593e 100644
--- a/include/drm/drm_fb_helper.h
+++ b/include/drm/drm_fb_helper.h
@@ -228,7 +228,9 @@ struct drm_fb_helper {
.fb_set_par = drm_fb_helper_set_par, \
.fb_setcmap = drm_fb_helper_setcmap, \
.fb_blank = drm_fb_helper_blank, \
- .fb_pan_display = drm_fb_helper_pan_display
+ .fb_pan_display = drm_fb_helper_pan_display, \
+ .fb_debug_enter = drm_fb_helper_debug_enter, \
+ .fb_debug_leave = drm_fb_helper_debug_leave
#ifdef CONFIG_DRM_FBDEV_EMULATION
void drm_fb_helper_prepare(struct drm_device *dev, struct drm_fb_helper *helper,
diff --git a/include/drm/drm_fourcc.h b/include/drm/drm_fourcc.h
index 30c30fa87ee8..fcc08da850c8 100644
--- a/include/drm/drm_fourcc.h
+++ b/include/drm/drm_fourcc.h
@@ -25,14 +25,43 @@
#include <linux/types.h>
#include <uapi/drm/drm_fourcc.h>
+/**
+ * struct drm_format_info - information about a DRM format
+ * @format: 4CC format identifier (DRM_FORMAT_*)
+ * @depth: Color depth (number of bits per pixel excluding padding bits),
+ * valid for a subset of RGB formats only. This is a legacy field, do not
+ * use in new code and set to 0 for new formats.
+ * @num_planes: Number of color planes (1 to 3)
+ * @cpp: Number of bytes per pixel (per plane)
+ * @hsub: Horizontal chroma subsampling factor
+ * @vsub: Vertical chroma subsampling factor
+ */
+struct drm_format_info {
+ u32 format;
+ u8 depth;
+ u8 num_planes;
+ u8 cpp[3];
+ u8 hsub;
+ u8 vsub;
+};
+
+/**
+ * struct drm_format_name_buf - name of a DRM format
+ * @str: string buffer containing the format name
+ */
+struct drm_format_name_buf {
+ char str[32];
+};
+
+const struct drm_format_info *__drm_format_info(u32 format);
+const struct drm_format_info *drm_format_info(u32 format);
uint32_t drm_mode_legacy_fb_format(uint32_t bpp, uint32_t depth);
-void drm_fb_get_bpp_depth(uint32_t format, unsigned int *depth, int *bpp);
int drm_format_num_planes(uint32_t format);
int drm_format_plane_cpp(uint32_t format, int plane);
int drm_format_horz_chroma_subsampling(uint32_t format);
int drm_format_vert_chroma_subsampling(uint32_t format);
int drm_format_plane_width(int width, uint32_t format, int plane);
int drm_format_plane_height(int height, uint32_t format, int plane);
-char *drm_get_format_name(uint32_t format) __malloc;
+const char *drm_get_format_name(uint32_t format, struct drm_format_name_buf *buf);
#endif /* __DRM_FOURCC_H__ */
diff --git a/include/drm/drm_framebuffer.h b/include/drm/drm_framebuffer.h
index f5ae1f436a4b..1ddfa2928802 100644
--- a/include/drm/drm_framebuffer.h
+++ b/include/drm/drm_framebuffer.h
@@ -149,12 +149,12 @@ struct drm_framebuffer {
*/
unsigned int offsets[4];
/**
- * @modifier: Data layout modifier, per buffer. This is used to describe
+ * @modifier: Data layout modifier. This is used to describe
* tiling, or also special layouts (like compression) of auxiliary
* buffers. For userspace created object this is copied from
* drm_mode_fb_cmd2.
*/
- uint64_t modifier[4];
+ uint64_t modifier;
/**
* @width: Logical width of the visible area of the framebuffer, in
* pixels.
@@ -251,6 +251,24 @@ static inline uint32_t drm_framebuffer_read_refcount(struct drm_framebuffer *fb)
}
/**
+ * drm_framebuffer_assign - store a reference to the fb
+ * @p: location to store framebuffer
+ * @fb: new framebuffer (maybe NULL)
+ *
+ * This functions sets the location to store a reference to the framebuffer,
+ * unreferencing the framebuffer that was previously stored in that location.
+ */
+static inline void drm_framebuffer_assign(struct drm_framebuffer **p,
+ struct drm_framebuffer *fb)
+{
+ if (fb)
+ drm_framebuffer_reference(fb);
+ if (*p)
+ drm_framebuffer_unreference(*p);
+ *p = fb;
+}
+
+/*
* drm_for_each_fb - iterate over all framebuffers
* @fb: the loop cursor
* @dev: the DRM device
diff --git a/include/drm/drm_irq.h b/include/drm/drm_irq.h
index 2401b14d301f..293d08caab60 100644
--- a/include/drm/drm_irq.h
+++ b/include/drm/drm_irq.h
@@ -130,42 +130,37 @@ struct drm_vblank_crtc {
bool enabled;
};
-extern int drm_irq_install(struct drm_device *dev, int irq);
-extern int drm_irq_uninstall(struct drm_device *dev);
+int drm_irq_install(struct drm_device *dev, int irq);
+int drm_irq_uninstall(struct drm_device *dev);
-extern int drm_vblank_init(struct drm_device *dev, unsigned int num_crtcs);
-extern int drm_wait_vblank(struct drm_device *dev, void *data,
- struct drm_file *filp);
-extern u32 drm_vblank_count(struct drm_device *dev, unsigned int pipe);
-extern u32 drm_crtc_vblank_count(struct drm_crtc *crtc);
-extern u32 drm_crtc_vblank_count_and_time(struct drm_crtc *crtc,
- struct timeval *vblanktime);
-extern void drm_crtc_send_vblank_event(struct drm_crtc *crtc,
- struct drm_pending_vblank_event *e);
-extern void drm_crtc_arm_vblank_event(struct drm_crtc *crtc,
- struct drm_pending_vblank_event *e);
-extern bool drm_handle_vblank(struct drm_device *dev, unsigned int pipe);
-extern bool drm_crtc_handle_vblank(struct drm_crtc *crtc);
-extern int drm_crtc_vblank_get(struct drm_crtc *crtc);
-extern void drm_crtc_vblank_put(struct drm_crtc *crtc);
-extern void drm_wait_one_vblank(struct drm_device *dev, unsigned int pipe);
-extern void drm_crtc_wait_one_vblank(struct drm_crtc *crtc);
-extern void drm_vblank_off(struct drm_device *dev, unsigned int pipe);
-extern void drm_vblank_on(struct drm_device *dev, unsigned int pipe);
-extern void drm_crtc_vblank_off(struct drm_crtc *crtc);
-extern void drm_crtc_vblank_reset(struct drm_crtc *crtc);
-extern void drm_crtc_vblank_on(struct drm_crtc *crtc);
-extern void drm_vblank_cleanup(struct drm_device *dev);
-extern u32 drm_accurate_vblank_count(struct drm_crtc *crtc);
-extern u32 drm_vblank_no_hw_counter(struct drm_device *dev, unsigned int pipe);
+int drm_vblank_init(struct drm_device *dev, unsigned int num_crtcs);
+u32 drm_crtc_vblank_count(struct drm_crtc *crtc);
+u32 drm_crtc_vblank_count_and_time(struct drm_crtc *crtc,
+ struct timeval *vblanktime);
+void drm_crtc_send_vblank_event(struct drm_crtc *crtc,
+ struct drm_pending_vblank_event *e);
+void drm_crtc_arm_vblank_event(struct drm_crtc *crtc,
+ struct drm_pending_vblank_event *e);
+bool drm_handle_vblank(struct drm_device *dev, unsigned int pipe);
+bool drm_crtc_handle_vblank(struct drm_crtc *crtc);
+int drm_crtc_vblank_get(struct drm_crtc *crtc);
+void drm_crtc_vblank_put(struct drm_crtc *crtc);
+void drm_wait_one_vblank(struct drm_device *dev, unsigned int pipe);
+void drm_crtc_wait_one_vblank(struct drm_crtc *crtc);
+void drm_crtc_vblank_off(struct drm_crtc *crtc);
+void drm_crtc_vblank_reset(struct drm_crtc *crtc);
+void drm_crtc_vblank_on(struct drm_crtc *crtc);
+void drm_vblank_cleanup(struct drm_device *dev);
+u32 drm_accurate_vblank_count(struct drm_crtc *crtc);
+u32 drm_vblank_no_hw_counter(struct drm_device *dev, unsigned int pipe);
-extern int drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev,
- unsigned int pipe, int *max_error,
- struct timeval *vblank_time,
- unsigned flags,
- const struct drm_display_mode *mode);
-extern void drm_calc_timestamping_constants(struct drm_crtc *crtc,
- const struct drm_display_mode *mode);
+int drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev,
+ unsigned int pipe, int *max_error,
+ struct timeval *vblank_time,
+ unsigned flags,
+ const struct drm_display_mode *mode);
+void drm_calc_timestamping_constants(struct drm_crtc *crtc,
+ const struct drm_display_mode *mode);
/**
* drm_crtc_vblank_waitqueue - get vblank waitqueue for the CRTC
diff --git a/include/drm/drm_mm.h b/include/drm/drm_mm.h
index 205ddcf6d55d..0b8371795aeb 100644
--- a/include/drm/drm_mm.h
+++ b/include/drm/drm_mm.h
@@ -44,6 +44,9 @@
#ifdef CONFIG_DEBUG_FS
#include <linux/seq_file.h>
#endif
+#ifdef CONFIG_DRM_DEBUG_MM
+#include <linux/stackdepot.h>
+#endif
enum drm_mm_search_flags {
DRM_MM_SEARCH_DEFAULT = 0,
@@ -74,6 +77,9 @@ struct drm_mm_node {
u64 size;
u64 __subtree_last;
struct drm_mm *mm;
+#ifdef CONFIG_DRM_DEBUG_MM
+ depot_stack_handle_t stack;
+#endif
};
struct drm_mm {
@@ -302,10 +308,26 @@ void drm_mm_takedown(struct drm_mm *mm);
bool drm_mm_clean(struct drm_mm *mm);
struct drm_mm_node *
-drm_mm_interval_first(struct drm_mm *mm, u64 start, u64 last);
+__drm_mm_interval_first(struct drm_mm *mm, u64 start, u64 last);
-struct drm_mm_node *
-drm_mm_interval_next(struct drm_mm_node *node, u64 start, u64 last);
+/**
+ * drm_mm_for_each_node_in_range - iterator to walk over a range of
+ * allocated nodes
+ * @node__: drm_mm_node structure to assign to in each iteration step
+ * @mm__: drm_mm allocator to walk
+ * @start__: starting offset, the first node will overlap this
+ * @end__: ending offset, the last node will start before this (but may overlap)
+ *
+ * This iterator walks over all nodes in the range allocator that lie
+ * between @start and @end. It is implemented similarly to list_for_each(),
+ * but using the internal interval tree to accelerate the search for the
+ * starting node, and so not safe against removal of elements. It assumes
+ * that @end is within (or is the upper limit of) the drm_mm allocator.
+ */
+#define drm_mm_for_each_node_in_range(node__, mm__, start__, end__) \
+ for (node__ = __drm_mm_interval_first((mm__), (start__), (end__)-1); \
+ node__ && node__->start < (end__); \
+ node__ = list_next_entry(node__, node_list))
void drm_mm_init_scan(struct drm_mm *mm,
u64 size,
diff --git a/include/drm/drm_mode_config.h b/include/drm/drm_mode_config.h
new file mode 100644
index 000000000000..bf9991b20611
--- /dev/null
+++ b/include/drm/drm_mode_config.h
@@ -0,0 +1,663 @@
+/*
+ * Copyright (c) 2016 Intel Corporation
+ *
+ * Permission to use, copy, modify, distribute, and sell this software and its
+ * documentation for any purpose is hereby granted without fee, provided that
+ * the above copyright notice appear in all copies and that both that copyright
+ * notice and this permission notice appear in supporting documentation, and
+ * that the name of the copyright holders not be used in advertising or
+ * publicity pertaining to distribution of the software without specific,
+ * written prior permission. The copyright holders make no representations
+ * about the suitability of this software for any purpose. It is provided "as
+ * is" without express or implied warranty.
+ *
+ * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
+ * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
+ * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
+ * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
+ * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
+ * OF THIS SOFTWARE.
+ */
+
+#ifndef __DRM_MODE_CONFIG_H__
+#define __DRM_MODE_CONFIG_H__
+
+#include <linux/mutex.h>
+#include <linux/types.h>
+#include <linux/idr.h>
+#include <linux/workqueue.h>
+
+#include <drm/drm_modeset_lock.h>
+
+struct drm_file;
+struct drm_device;
+struct drm_atomic_state;
+struct drm_mode_fb_cmd2;
+
+/**
+ * struct drm_mode_config_funcs - basic driver provided mode setting functions
+ *
+ * Some global (i.e. not per-CRTC, connector, etc) mode setting functions that
+ * involve drivers.
+ */
+struct drm_mode_config_funcs {
+ /**
+ * @fb_create:
+ *
+ * Create a new framebuffer object. The core does basic checks on the
+ * requested metadata, but most of that is left to the driver. See
+ * struct &drm_mode_fb_cmd2 for details.
+ *
+ * If the parameters are deemed valid and the backing storage objects in
+ * the underlying memory manager all exist, then the driver allocates
+ * a new &drm_framebuffer structure, subclassed to contain
+ * driver-specific information (like the internal native buffer object
+ * references). It also needs to fill out all relevant metadata, which
+ * should be done by calling drm_helper_mode_fill_fb_struct().
+ *
+ * The initialization is finalized by calling drm_framebuffer_init(),
+ * which registers the framebuffer and makes it accessible to other
+ * threads.
+ *
+ * RETURNS:
+ *
+ * A new framebuffer with an initial reference count of 1 or a negative
+ * error code encoded with ERR_PTR().
+ */
+ struct drm_framebuffer *(*fb_create)(struct drm_device *dev,
+ struct drm_file *file_priv,
+ const struct drm_mode_fb_cmd2 *mode_cmd);
+
+ /**
+ * @output_poll_changed:
+ *
+ * Callback used by helpers to inform the driver of output configuration
+ * changes.
+ *
+ * Drivers implementing fbdev emulation with the helpers can call
+ * drm_fb_helper_hotplug_changed from this hook to inform the fbdev
+ * helper of output changes.
+ *
+ * FIXME:
+ *
+ * Except that there's no vtable for device-level helper callbacks
+ * there's no reason this is a core function.
+ */
+ void (*output_poll_changed)(struct drm_device *dev);
+
+ /**
+ * @atomic_check:
+ *
+ * This is the only hook to validate an atomic modeset update. This
+ * function must reject any modeset and state changes which the hardware
+ * or driver doesn't support. This includes but is of course not limited
+ * to:
+ *
+ * - Checking that the modes, framebuffers, scaling and placement
+ * requirements and so on are within the limits of the hardware.
+ *
+ * - Checking that any hidden shared resources are not oversubscribed.
+ * This can be shared PLLs, shared lanes, overall memory bandwidth,
+ * display fifo space (where shared between planes or maybe even
+ * CRTCs).
+ *
+ * - Checking that virtualized resources exported to userspace are not
+ * oversubscribed. For various reasons it can make sense to expose
+ * more planes, crtcs or encoders than which are physically there. One
+ * example is dual-pipe operations (which generally should be hidden
+ * from userspace if when lockstepped in hardware, exposed otherwise),
+ * where a plane might need 1 hardware plane (if it's just on one
+ * pipe), 2 hardware planes (when it spans both pipes) or maybe even
+ * shared a hardware plane with a 2nd plane (if there's a compatible
+ * plane requested on the area handled by the other pipe).
+ *
+ * - Check that any transitional state is possible and that if
+ * requested, the update can indeed be done in the vblank period
+ * without temporarily disabling some functions.
+ *
+ * - Check any other constraints the driver or hardware might have.
+ *
+ * - This callback also needs to correctly fill out the &drm_crtc_state
+ * in this update to make sure that drm_atomic_crtc_needs_modeset()
+ * reflects the nature of the possible update and returns true if and
+ * only if the update cannot be applied without tearing within one
+ * vblank on that CRTC. The core uses that information to reject
+ * updates which require a full modeset (i.e. blanking the screen, or
+ * at least pausing updates for a substantial amount of time) if
+ * userspace has disallowed that in its request.
+ *
+ * - The driver also does not need to repeat basic input validation
+ * like done for the corresponding legacy entry points. The core does
+ * that before calling this hook.
+ *
+ * See the documentation of @atomic_commit for an exhaustive list of
+ * error conditions which don't have to be checked at the
+ * ->atomic_check() stage?
+ *
+ * See the documentation for struct &drm_atomic_state for how exactly
+ * an atomic modeset update is described.
+ *
+ * Drivers using the atomic helpers can implement this hook using
+ * drm_atomic_helper_check(), or one of the exported sub-functions of
+ * it.
+ *
+ * RETURNS:
+ *
+ * 0 on success or one of the below negative error codes:
+ *
+ * - -EINVAL, if any of the above constraints are violated.
+ *
+ * - -EDEADLK, when returned from an attempt to acquire an additional
+ * &drm_modeset_lock through drm_modeset_lock().
+ *
+ * - -ENOMEM, if allocating additional state sub-structures failed due
+ * to lack of memory.
+ *
+ * - -EINTR, -EAGAIN or -ERESTARTSYS, if the IOCTL should be restarted.
+ * This can either be due to a pending signal, or because the driver
+ * needs to completely bail out to recover from an exceptional
+ * situation like a GPU hang. From a userspace point all errors are
+ * treated equally.
+ */
+ int (*atomic_check)(struct drm_device *dev,
+ struct drm_atomic_state *state);
+
+ /**
+ * @atomic_commit:
+ *
+ * This is the only hook to commit an atomic modeset update. The core
+ * guarantees that @atomic_check has been called successfully before
+ * calling this function, and that nothing has been changed in the
+ * interim.
+ *
+ * See the documentation for struct &drm_atomic_state for how exactly
+ * an atomic modeset update is described.
+ *
+ * Drivers using the atomic helpers can implement this hook using
+ * drm_atomic_helper_commit(), or one of the exported sub-functions of
+ * it.
+ *
+ * Nonblocking commits (as indicated with the nonblock parameter) must
+ * do any preparatory work which might result in an unsuccessful commit
+ * in the context of this callback. The only exceptions are hardware
+ * errors resulting in -EIO. But even in that case the driver must
+ * ensure that the display pipe is at least running, to avoid
+ * compositors crashing when pageflips don't work. Anything else,
+ * specifically committing the update to the hardware, should be done
+ * without blocking the caller. For updates which do not require a
+ * modeset this must be guaranteed.
+ *
+ * The driver must wait for any pending rendering to the new
+ * framebuffers to complete before executing the flip. It should also
+ * wait for any pending rendering from other drivers if the underlying
+ * buffer is a shared dma-buf. Nonblocking commits must not wait for
+ * rendering in the context of this callback.
+ *
+ * An application can request to be notified when the atomic commit has
+ * completed. These events are per-CRTC and can be distinguished by the
+ * CRTC index supplied in &drm_event to userspace.
+ *
+ * The drm core will supply a struct &drm_event in the event
+ * member of each CRTC's &drm_crtc_state structure. See the
+ * documentation for &drm_crtc_state for more details about the precise
+ * semantics of this event.
+ *
+ * NOTE:
+ *
+ * Drivers are not allowed to shut down any display pipe successfully
+ * enabled through an atomic commit on their own. Doing so can result in
+ * compositors crashing if a page flip is suddenly rejected because the
+ * pipe is off.
+ *
+ * RETURNS:
+ *
+ * 0 on success or one of the below negative error codes:
+ *
+ * - -EBUSY, if a nonblocking updated is requested and there is
+ * an earlier updated pending. Drivers are allowed to support a queue
+ * of outstanding updates, but currently no driver supports that.
+ * Note that drivers must wait for preceding updates to complete if a
+ * synchronous update is requested, they are not allowed to fail the
+ * commit in that case.
+ *
+ * - -ENOMEM, if the driver failed to allocate memory. Specifically
+ * this can happen when trying to pin framebuffers, which must only
+ * be done when committing the state.
+ *
+ * - -ENOSPC, as a refinement of the more generic -ENOMEM to indicate
+ * that the driver has run out of vram, iommu space or similar GPU
+ * address space needed for framebuffer.
+ *
+ * - -EIO, if the hardware completely died.
+ *
+ * - -EINTR, -EAGAIN or -ERESTARTSYS, if the IOCTL should be restarted.
+ * This can either be due to a pending signal, or because the driver
+ * needs to completely bail out to recover from an exceptional
+ * situation like a GPU hang. From a userspace point of view all errors are
+ * treated equally.
+ *
+ * This list is exhaustive. Specifically this hook is not allowed to
+ * return -EINVAL (any invalid requests should be caught in
+ * @atomic_check) or -EDEADLK (this function must not acquire
+ * additional modeset locks).
+ */
+ int (*atomic_commit)(struct drm_device *dev,
+ struct drm_atomic_state *state,
+ bool nonblock);
+
+ /**
+ * @atomic_state_alloc:
+ *
+ * This optional hook can be used by drivers that want to subclass struct
+ * &drm_atomic_state to be able to track their own driver-private global
+ * state easily. If this hook is implemented, drivers must also
+ * implement @atomic_state_clear and @atomic_state_free.
+ *
+ * RETURNS:
+ *
+ * A new &drm_atomic_state on success or NULL on failure.
+ */
+ struct drm_atomic_state *(*atomic_state_alloc)(struct drm_device *dev);
+
+ /**
+ * @atomic_state_clear:
+ *
+ * This hook must clear any driver private state duplicated into the
+ * passed-in &drm_atomic_state. This hook is called when the caller
+ * encountered a &drm_modeset_lock deadlock and needs to drop all
+ * already acquired locks as part of the deadlock avoidance dance
+ * implemented in drm_modeset_lock_backoff().
+ *
+ * Any duplicated state must be invalidated since a concurrent atomic
+ * update might change it, and the drm atomic interfaces always apply
+ * updates as relative changes to the current state.
+ *
+ * Drivers that implement this must call drm_atomic_state_default_clear()
+ * to clear common state.
+ */
+ void (*atomic_state_clear)(struct drm_atomic_state *state);
+
+ /**
+ * @atomic_state_free:
+ *
+ * This hook needs driver private resources and the &drm_atomic_state
+ * itself. Note that the core first calls drm_atomic_state_clear() to
+ * avoid code duplicate between the clear and free hooks.
+ *
+ * Drivers that implement this must call drm_atomic_state_default_free()
+ * to release common resources.
+ */
+ void (*atomic_state_free)(struct drm_atomic_state *state);
+};
+
+/**
+ * struct drm_mode_config - Mode configuration control structure
+ * @mutex: mutex protecting KMS related lists and structures
+ * @connection_mutex: ww mutex protecting connector state and routing
+ * @acquire_ctx: global implicit acquire context used by atomic drivers for
+ * legacy IOCTLs
+ * @fb_lock: mutex to protect fb state and lists
+ * @num_fb: number of fbs available
+ * @fb_list: list of framebuffers available
+ * @num_encoder: number of encoders on this device
+ * @encoder_list: list of encoder objects
+ * @num_overlay_plane: number of overlay planes on this device
+ * @num_total_plane: number of universal (i.e. with primary/curso) planes on this device
+ * @plane_list: list of plane objects
+ * @num_crtc: number of CRTCs on this device
+ * @crtc_list: list of CRTC objects
+ * @property_list: list of property objects
+ * @min_width: minimum pixel width on this device
+ * @min_height: minimum pixel height on this device
+ * @max_width: maximum pixel width on this device
+ * @max_height: maximum pixel height on this device
+ * @funcs: core driver provided mode setting functions
+ * @fb_base: base address of the framebuffer
+ * @poll_enabled: track polling support for this device
+ * @poll_running: track polling status for this device
+ * @delayed_event: track delayed poll uevent deliver for this device
+ * @output_poll_work: delayed work for polling in process context
+ * @property_blob_list: list of all the blob property objects
+ * @blob_lock: mutex for blob property allocation and management
+ * @*_property: core property tracking
+ * @preferred_depth: preferred RBG pixel depth, used by fb helpers
+ * @prefer_shadow: hint to userspace to prefer shadow-fb rendering
+ * @cursor_width: hint to userspace for max cursor width
+ * @cursor_height: hint to userspace for max cursor height
+ * @helper_private: mid-layer private data
+ *
+ * Core mode resource tracking structure. All CRTC, encoders, and connectors
+ * enumerated by the driver are added here, as are global properties. Some
+ * global restrictions are also here, e.g. dimension restrictions.
+ */
+struct drm_mode_config {
+ struct mutex mutex; /* protects configuration (mode lists etc.) */
+ struct drm_modeset_lock connection_mutex; /* protects connector->encoder and encoder->crtc links */
+ struct drm_modeset_acquire_ctx *acquire_ctx; /* for legacy _lock_all() / _unlock_all() */
+
+ /**
+ * @idr_mutex:
+ *
+ * Mutex for KMS ID allocation and management. Protects both @crtc_idr
+ * and @tile_idr.
+ */
+ struct mutex idr_mutex;
+
+ /**
+ * @crtc_idr:
+ *
+ * Main KMS ID tracking object. Use this idr for all IDs, fb, crtc,
+ * connector, modes - just makes life easier to have only one.
+ */
+ struct idr crtc_idr;
+
+ /**
+ * @tile_idr:
+ *
+ * Use this idr for allocating new IDs for tiled sinks like use in some
+ * high-res DP MST screens.
+ */
+ struct idr tile_idr;
+
+ struct mutex fb_lock; /* proctects global and per-file fb lists */
+ int num_fb;
+ struct list_head fb_list;
+
+ /**
+ * @num_connector: Number of connectors on this device.
+ */
+ int num_connector;
+ /**
+ * @connector_ida: ID allocator for connector indices.
+ */
+ struct ida connector_ida;
+ /**
+ * @connector_list: List of connector objects.
+ */
+ struct list_head connector_list;
+ int num_encoder;
+ struct list_head encoder_list;
+
+ /*
+ * Track # of overlay planes separately from # of total planes. By
+ * default we only advertise overlay planes to userspace; if userspace
+ * sets the "universal plane" capability bit, we'll go ahead and
+ * expose all planes.
+ */
+ int num_overlay_plane;
+ int num_total_plane;
+ struct list_head plane_list;
+
+ int num_crtc;
+ struct list_head crtc_list;
+
+ struct list_head property_list;
+
+ int min_width, min_height;
+ int max_width, max_height;
+ const struct drm_mode_config_funcs *funcs;
+ resource_size_t fb_base;
+
+ /* output poll support */
+ bool poll_enabled;
+ bool poll_running;
+ bool delayed_event;
+ struct delayed_work output_poll_work;
+
+ struct mutex blob_lock;
+
+ /* pointers to standard properties */
+ struct list_head property_blob_list;
+ /**
+ * @edid_property: Default connector property to hold the EDID of the
+ * currently connected sink, if any.
+ */
+ struct drm_property *edid_property;
+ /**
+ * @dpms_property: Default connector property to control the
+ * connector's DPMS state.
+ */
+ struct drm_property *dpms_property;
+ /**
+ * @path_property: Default connector property to hold the DP MST path
+ * for the port.
+ */
+ struct drm_property *path_property;
+ /**
+ * @tile_property: Default connector property to store the tile
+ * position of a tiled screen, for sinks which need to be driven with
+ * multiple CRTCs.
+ */
+ struct drm_property *tile_property;
+ /**
+ * @plane_type_property: Default plane property to differentiate
+ * CURSOR, PRIMARY and OVERLAY legacy uses of planes.
+ */
+ struct drm_property *plane_type_property;
+ /**
+ * @prop_src_x: Default atomic plane property for the plane source
+ * position in the connected &drm_framebuffer.
+ */
+ struct drm_property *prop_src_x;
+ /**
+ * @prop_src_y: Default atomic plane property for the plane source
+ * position in the connected &drm_framebuffer.
+ */
+ struct drm_property *prop_src_y;
+ /**
+ * @prop_src_w: Default atomic plane property for the plane source
+ * position in the connected &drm_framebuffer.
+ */
+ struct drm_property *prop_src_w;
+ /**
+ * @prop_src_h: Default atomic plane property for the plane source
+ * position in the connected &drm_framebuffer.
+ */
+ struct drm_property *prop_src_h;
+ /**
+ * @prop_crtc_x: Default atomic plane property for the plane destination
+ * position in the &drm_crtc is is being shown on.
+ */
+ struct drm_property *prop_crtc_x;
+ /**
+ * @prop_crtc_y: Default atomic plane property for the plane destination
+ * position in the &drm_crtc is is being shown on.
+ */
+ struct drm_property *prop_crtc_y;
+ /**
+ * @prop_crtc_w: Default atomic plane property for the plane destination
+ * position in the &drm_crtc is is being shown on.
+ */
+ struct drm_property *prop_crtc_w;
+ /**
+ * @prop_crtc_h: Default atomic plane property for the plane destination
+ * position in the &drm_crtc is is being shown on.
+ */
+ struct drm_property *prop_crtc_h;
+ /**
+ * @prop_fb_id: Default atomic plane property to specify the
+ * &drm_framebuffer.
+ */
+ struct drm_property *prop_fb_id;
+ /**
+ * @prop_in_fence_fd: Sync File fd representing the incoming fences
+ * for a Plane.
+ */
+ struct drm_property *prop_in_fence_fd;
+ /**
+ * @prop_out_fence_ptr: Sync File fd pointer representing the
+ * outgoing fences for a CRTC. Userspace should provide a pointer to a
+ * value of type s64, and then cast that pointer to u64.
+ */
+ struct drm_property *prop_out_fence_ptr;
+ /**
+ * @prop_crtc_id: Default atomic plane property to specify the
+ * &drm_crtc.
+ */
+ struct drm_property *prop_crtc_id;
+ /**
+ * @prop_active: Default atomic CRTC property to control the active
+ * state, which is the simplified implementation for DPMS in atomic
+ * drivers.
+ */
+ struct drm_property *prop_active;
+ /**
+ * @prop_mode_id: Default atomic CRTC property to set the mode for a
+ * CRTC. A 0 mode implies that the CRTC is entirely disabled - all
+ * connectors must be of and active must be set to disabled, too.
+ */
+ struct drm_property *prop_mode_id;
+
+ /**
+ * @dvi_i_subconnector_property: Optional DVI-I property to
+ * differentiate between analog or digital mode.
+ */
+ struct drm_property *dvi_i_subconnector_property;
+ /**
+ * @dvi_i_select_subconnector_property: Optional DVI-I property to
+ * select between analog or digital mode.
+ */
+ struct drm_property *dvi_i_select_subconnector_property;
+
+ /**
+ * @tv_subconnector_property: Optional TV property to differentiate
+ * between different TV connector types.
+ */
+ struct drm_property *tv_subconnector_property;
+ /**
+ * @tv_select_subconnector_property: Optional TV property to select
+ * between different TV connector types.
+ */
+ struct drm_property *tv_select_subconnector_property;
+ /**
+ * @tv_mode_property: Optional TV property to select
+ * the output TV mode.
+ */
+ struct drm_property *tv_mode_property;
+ /**
+ * @tv_left_margin_property: Optional TV property to set the left
+ * margin.
+ */
+ struct drm_property *tv_left_margin_property;
+ /**
+ * @tv_right_margin_property: Optional TV property to set the right
+ * margin.
+ */
+ struct drm_property *tv_right_margin_property;
+ /**
+ * @tv_top_margin_property: Optional TV property to set the right
+ * margin.
+ */
+ struct drm_property *tv_top_margin_property;
+ /**
+ * @tv_bottom_margin_property: Optional TV property to set the right
+ * margin.
+ */
+ struct drm_property *tv_bottom_margin_property;
+ /**
+ * @tv_brightness_property: Optional TV property to set the
+ * brightness.
+ */
+ struct drm_property *tv_brightness_property;
+ /**
+ * @tv_contrast_property: Optional TV property to set the
+ * contrast.
+ */
+ struct drm_property *tv_contrast_property;
+ /**
+ * @tv_flicker_reduction_property: Optional TV property to control the
+ * flicker reduction mode.
+ */
+ struct drm_property *tv_flicker_reduction_property;
+ /**
+ * @tv_overscan_property: Optional TV property to control the overscan
+ * setting.
+ */
+ struct drm_property *tv_overscan_property;
+ /**
+ * @tv_saturation_property: Optional TV property to set the
+ * saturation.
+ */
+ struct drm_property *tv_saturation_property;
+ /**
+ * @tv_hue_property: Optional TV property to set the hue.
+ */
+ struct drm_property *tv_hue_property;
+
+ /**
+ * @scaling_mode_property: Optional connector property to control the
+ * upscaling, mostly used for built-in panels.
+ */
+ struct drm_property *scaling_mode_property;
+ /**
+ * @aspect_ratio_property: Optional connector property to control the
+ * HDMI infoframe aspect ratio setting.
+ */
+ struct drm_property *aspect_ratio_property;
+ /**
+ * @degamma_lut_property: Optional CRTC property to set the LUT used to
+ * convert the framebuffer's colors to linear gamma.
+ */
+ struct drm_property *degamma_lut_property;
+ /**
+ * @degamma_lut_size_property: Optional CRTC property for the size of
+ * the degamma LUT as supported by the driver (read-only).
+ */
+ struct drm_property *degamma_lut_size_property;
+ /**
+ * @ctm_property: Optional CRTC property to set the
+ * matrix used to convert colors after the lookup in the
+ * degamma LUT.
+ */
+ struct drm_property *ctm_property;
+ /**
+ * @gamma_lut_property: Optional CRTC property to set the LUT used to
+ * convert the colors, after the CTM matrix, to the gamma space of the
+ * connected screen.
+ */
+ struct drm_property *gamma_lut_property;
+ /**
+ * @gamma_lut_size_property: Optional CRTC property for the size of the
+ * gamma LUT as supported by the driver (read-only).
+ */
+ struct drm_property *gamma_lut_size_property;
+
+ /**
+ * @suggested_x_property: Optional connector property with a hint for
+ * the position of the output on the host's screen.
+ */
+ struct drm_property *suggested_x_property;
+ /**
+ * @suggested_y_property: Optional connector property with a hint for
+ * the position of the output on the host's screen.
+ */
+ struct drm_property *suggested_y_property;
+
+ /* dumb ioctl parameters */
+ uint32_t preferred_depth, prefer_shadow;
+
+ /**
+ * @async_page_flip: Does this device support async flips on the primary
+ * plane?
+ */
+ bool async_page_flip;
+
+ /**
+ * @allow_fb_modifiers:
+ *
+ * Whether the driver supports fb modifiers in the ADDFB2.1 ioctl call.
+ */
+ bool allow_fb_modifiers;
+
+ /* cursor size */
+ uint32_t cursor_width, cursor_height;
+
+ struct drm_mode_config_helper_funcs *helper_private;
+};
+
+void drm_mode_config_init(struct drm_device *dev);
+void drm_mode_config_reset(struct drm_device *dev);
+void drm_mode_config_cleanup(struct drm_device *dev);
+
+#endif
diff --git a/include/drm/drm_modeset_helper_vtables.h b/include/drm/drm_modeset_helper_vtables.h
index 10e449c86dbd..69c3974bf133 100644
--- a/include/drm/drm_modeset_helper_vtables.h
+++ b/include/drm/drm_modeset_helper_vtables.h
@@ -361,8 +361,8 @@ struct drm_crtc_helper_funcs {
*
* Note that the power state of the display pipe when this function is
* called depends upon the exact helpers and calling sequence the driver
- * has picked. See drm_atomic_commit_planes() for a discussion of the
- * tradeoffs and variants of plane commit helpers.
+ * has picked. See drm_atomic_helper_commit_planes() for a discussion of
+ * the tradeoffs and variants of plane commit helpers.
*
* This callback is used by the atomic modeset helpers and by the
* transitional plane helpers, but it is optional.
@@ -385,8 +385,8 @@ struct drm_crtc_helper_funcs {
*
* Note that the power state of the display pipe when this function is
* called depends upon the exact helpers and calling sequence the driver
- * has picked. See drm_atomic_commit_planes() for a discussion of the
- * tradeoffs and variants of plane commit helpers.
+ * has picked. See drm_atomic_helper_commit_planes() for a discussion of
+ * the tradeoffs and variants of plane commit helpers.
*
* This callback is used by the atomic modeset helpers and by the
* transitional plane helpers, but it is optional.
@@ -940,8 +940,8 @@ struct drm_plane_helper_funcs {
*
* Note that the power state of the display pipe when this function is
* called depends upon the exact helpers and calling sequence the driver
- * has picked. See drm_atomic_commit_planes() for a discussion of the
- * tradeoffs and variants of plane commit helpers.
+ * has picked. See drm_atomic_helper_commit_planes() for a discussion of
+ * the tradeoffs and variants of plane commit helpers.
*
* This callback is used by the atomic modeset helpers and by the
* transitional plane helpers, but it is optional.
@@ -963,8 +963,8 @@ struct drm_plane_helper_funcs {
*
* Note that the power state of the display pipe when this function is
* called depends upon the exact helpers and calling sequence the driver
- * has picked. See drm_atomic_commit_planes() for a discussion of the
- * tradeoffs and variants of plane commit helpers.
+ * has picked. See drm_atomic_helper_commit_planes() for a discussion of
+ * the tradeoffs and variants of plane commit helpers.
*
* This callback is used by the atomic modeset helpers and by the
* transitional plane helpers, but it is optional.
@@ -999,10 +999,14 @@ struct drm_mode_config_helper_funcs {
* to implement blocking and nonblocking commits easily. It is not used
* by the atomic helpers
*
- * This hook should first commit the given atomic state to the hardware.
- * But drivers can add more waiting calls at the start of their
- * implementation, e.g. to wait for driver-internal request for implicit
- * syncing, before starting to commit the update to the hardware.
+ * This function is called when the new atomic state has already been
+ * swapped into the various state pointers. The passed in state
+ * therefore contains copies of the old/previous state. This hook should
+ * commit the new state into hardware. Note that the helpers have
+ * already waited for preceeding atomic commits and fences, but drivers
+ * can add more waiting calls at the start of their implementation, e.g.
+ * to wait for driver-internal request for implicit syncing, before
+ * starting to commit the update to the hardware.
*
* After the atomic update is committed to the hardware this hook needs
* to call drm_atomic_helper_commit_hw_done(). Then wait for the upate
diff --git a/include/drm/drm_modeset_lock.h b/include/drm/drm_modeset_lock.h
index c5576fbcb909..d918ce45ec2c 100644
--- a/include/drm/drm_modeset_lock.h
+++ b/include/drm/drm_modeset_lock.h
@@ -82,8 +82,6 @@ struct drm_modeset_lock {
struct list_head head;
};
-extern struct ww_class crtc_ww_class;
-
void drm_modeset_acquire_init(struct drm_modeset_acquire_ctx *ctx,
uint32_t flags);
void drm_modeset_acquire_fini(struct drm_modeset_acquire_ctx *ctx);
@@ -91,15 +89,7 @@ void drm_modeset_drop_locks(struct drm_modeset_acquire_ctx *ctx);
void drm_modeset_backoff(struct drm_modeset_acquire_ctx *ctx);
int drm_modeset_backoff_interruptible(struct drm_modeset_acquire_ctx *ctx);
-/**
- * drm_modeset_lock_init - initialize lock
- * @lock: lock to init
- */
-static inline void drm_modeset_lock_init(struct drm_modeset_lock *lock)
-{
- ww_mutex_init(&lock->mutex, &crtc_ww_class);
- INIT_LIST_HEAD(&lock->head);
-}
+void drm_modeset_lock_init(struct drm_modeset_lock *lock);
/**
* drm_modeset_lock_fini - cleanup lock
diff --git a/include/drm/drm_of.h b/include/drm/drm_of.h
index 3fd87b386ed7..26a64805cc15 100644
--- a/include/drm/drm_of.h
+++ b/include/drm/drm_of.h
@@ -4,6 +4,7 @@
#include <linux/of_graph.h>
struct component_master_ops;
+struct component_match;
struct device;
struct drm_device;
struct drm_encoder;
@@ -12,6 +13,10 @@ struct device_node;
#ifdef CONFIG_OF
extern uint32_t drm_of_find_possible_crtcs(struct drm_device *dev,
struct device_node *port);
+extern void drm_of_component_match_add(struct device *master,
+ struct component_match **matchptr,
+ int (*compare)(struct device *, void *),
+ struct device_node *node);
extern int drm_of_component_probe(struct device *dev,
int (*compare_of)(struct device *, void *),
const struct component_master_ops *m_ops);
@@ -25,6 +30,14 @@ static inline uint32_t drm_of_find_possible_crtcs(struct drm_device *dev,
return 0;
}
+static inline void
+drm_of_component_match_add(struct device *master,
+ struct component_match **matchptr,
+ int (*compare)(struct device *, void *),
+ struct device_node *node)
+{
+}
+
static inline int
drm_of_component_probe(struct device *dev,
int (*compare_of)(struct device *, void *),
diff --git a/include/drm/drm_plane.h b/include/drm/drm_plane.h
index 43cf193e54d6..5b38eb94783b 100644
--- a/include/drm/drm_plane.h
+++ b/include/drm/drm_plane.h
@@ -28,15 +28,11 @@
#include <drm/drm_mode_object.h>
struct drm_crtc;
+struct drm_printer;
/**
* struct drm_plane_state - mutable plane state
* @plane: backpointer to the plane
- * @crtc: currently bound CRTC, NULL if disabled
- * @fb: currently bound framebuffer
- * @fence: optional fence to wait for before scanning out @fb
- * @crtc_x: left position of visible portion of plane on crtc
- * @crtc_y: upper position of visible portion of plane on crtc
* @crtc_w: width of visible portion of plane on crtc
* @crtc_h: height of visible portion of plane on crtc
* @src_x: left position of visible portion of plane within
@@ -47,22 +43,61 @@ struct drm_crtc;
* @src_h: height of visible portion of plane (in 16.16)
* @rotation: rotation of the plane
* @zpos: priority of the given plane on crtc (optional)
+ * Note that multiple active planes on the same crtc can have an identical
+ * zpos value. The rule to solving the conflict is to compare the plane
+ * object IDs; the plane with a higher ID must be stacked on top of a
+ * plane with a lower ID.
* @normalized_zpos: normalized value of zpos: unique, range from 0 to N-1
- * where N is the number of active planes for given crtc
+ * where N is the number of active planes for given crtc. Note that
+ * the driver must call drm_atomic_normalize_zpos() to update this before
+ * it can be trusted.
* @src: clipped source coordinates of the plane (in 16.16)
* @dst: clipped destination coordinates of the plane
- * @visible: visibility of the plane
* @state: backpointer to global drm_atomic_state
*/
struct drm_plane_state {
struct drm_plane *plane;
- struct drm_crtc *crtc; /* do not write directly, use drm_atomic_set_crtc_for_plane() */
- struct drm_framebuffer *fb; /* do not write directly, use drm_atomic_set_fb_for_plane() */
- struct fence *fence;
+ /**
+ * @crtc:
+ *
+ * Currently bound CRTC, NULL if disabled. Do not this write directly,
+ * use drm_atomic_set_crtc_for_plane()
+ */
+ struct drm_crtc *crtc;
+
+ /**
+ * @fb:
+ *
+ * Currently bound framebuffer. Do not write this directly, use
+ * drm_atomic_set_fb_for_plane()
+ */
+ struct drm_framebuffer *fb;
+
+ /**
+ * @fence:
+ *
+ * Optional fence to wait for before scanning out @fb. Do not write this
+ * directly, use drm_atomic_set_fence_for_plane()
+ */
+ struct dma_fence *fence;
+
+ /**
+ * @crtc_x:
+ *
+ * Left position of visible portion of plane on crtc, signed dest
+ * location allows it to be partially off screen.
+ */
+
+ int32_t crtc_x;
+ /**
+ * @crtc_y:
+ *
+ * Upper position of visible portion of plane on crtc, signed dest
+ * location allows it to be partially off screen.
+ */
+ int32_t crtc_y;
- /* Signed dest location allows it to be partially off screen */
- int32_t crtc_x, crtc_y;
uint32_t crtc_w, crtc_h;
/* Source values are 16.16 fixed point */
@@ -79,15 +114,40 @@ struct drm_plane_state {
/* Clipped coordinates */
struct drm_rect src, dst;
- /*
- * Is the plane actually visible? Can be false even
- * if fb!=NULL and crtc!=NULL, due to clipping.
+ /**
+ * @visible:
+ *
+ * Visibility of the plane. This can be false even if fb!=NULL and
+ * crtc!=NULL, due to clipping.
*/
bool visible;
struct drm_atomic_state *state;
};
+static inline struct drm_rect
+drm_plane_state_src(const struct drm_plane_state *state)
+{
+ struct drm_rect src = {
+ .x1 = state->src_x,
+ .y1 = state->src_y,
+ .x2 = state->src_x + state->src_w,
+ .y2 = state->src_y + state->src_h,
+ };
+ return src;
+}
+
+static inline struct drm_rect
+drm_plane_state_dest(const struct drm_plane_state *state)
+{
+ struct drm_rect dest = {
+ .x1 = state->crtc_x,
+ .y1 = state->crtc_y,
+ .x2 = state->crtc_x + state->crtc_w,
+ .y2 = state->crtc_y + state->crtc_h,
+ };
+ return dest;
+}
/**
* struct drm_plane_funcs - driver plane control functions
@@ -317,6 +377,18 @@ struct drm_plane_funcs {
* before data structures are torndown.
*/
void (*early_unregister)(struct drm_plane *plane);
+
+ /**
+ * @atomic_print_state:
+ *
+ * If driver subclasses struct &drm_plane_state, it should implement
+ * this optional hook for printing additional driver specific state.
+ *
+ * Do not call this directly, use drm_atomic_plane_print_state()
+ * instead.
+ */
+ void (*atomic_print_state)(struct drm_printer *p,
+ const struct drm_plane_state *state);
};
/**
@@ -386,6 +458,7 @@ enum drm_plane_type {
* @type: type of plane (overlay, primary, cursor)
* @state: current atomic state for this plane
* @zpos_property: zpos property for this plane
+ * @rotation_property: rotation property for this plane
* @helper_private: mid-layer private data
*/
struct drm_plane {
@@ -432,6 +505,7 @@ struct drm_plane {
struct drm_plane_state *state;
struct drm_property *zpos_property;
+ struct drm_property *rotation_property;
};
#define obj_to_plane(x) container_of(x, struct drm_plane, base)
diff --git a/include/drm/drm_print.h b/include/drm/drm_print.h
new file mode 100644
index 000000000000..1adf84aea622
--- /dev/null
+++ b/include/drm/drm_print.h
@@ -0,0 +1,112 @@
+/*
+ * Copyright (C) 2016 Red Hat
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Rob Clark <robdclark@gmail.com>
+ */
+
+#ifndef DRM_PRINT_H_
+#define DRM_PRINT_H_
+
+#include <linux/seq_file.h>
+#include <linux/device.h>
+
+/**
+ * DOC: print
+ *
+ * A simple wrapper for dev_printk(), seq_printf(), etc. Allows same
+ * debug code to be used for both debugfs and printk logging.
+ *
+ * For example::
+ *
+ * void log_some_info(struct drm_printer *p)
+ * {
+ * drm_printf(p, "foo=%d\n", foo);
+ * drm_printf(p, "bar=%d\n", bar);
+ * }
+ *
+ * #ifdef CONFIG_DEBUG_FS
+ * void debugfs_show(struct seq_file *f)
+ * {
+ * struct drm_printer p = drm_seq_file_printer(f);
+ * log_some_info(&p);
+ * }
+ * #endif
+ *
+ * void some_other_function(...)
+ * {
+ * struct drm_printer p = drm_info_printer(drm->dev);
+ * log_some_info(&p);
+ * }
+ */
+
+/**
+ * struct drm_printer - drm output "stream"
+ * @printfn: actual output fxn
+ * @arg: output fxn specific data
+ *
+ * Do not use struct members directly. Use drm_printer_seq_file(),
+ * drm_printer_info(), etc to initialize. And drm_printf() for output.
+ */
+struct drm_printer {
+ void (*printfn)(struct drm_printer *p, struct va_format *vaf);
+ void *arg;
+};
+
+void __drm_printfn_seq_file(struct drm_printer *p, struct va_format *vaf);
+void __drm_printfn_info(struct drm_printer *p, struct va_format *vaf);
+
+void drm_printf(struct drm_printer *p, const char *f, ...);
+
+
+/**
+ * drm_seq_file_printer - construct a &drm_printer that outputs to &seq_file
+ * @f: the struct &seq_file to output to
+ *
+ * RETURNS:
+ * The &drm_printer object
+ */
+static inline struct drm_printer drm_seq_file_printer(struct seq_file *f)
+{
+ struct drm_printer p = {
+ .printfn = __drm_printfn_seq_file,
+ .arg = f,
+ };
+ return p;
+}
+
+/**
+ * drm_info_printer - construct a &drm_printer that outputs to dev_printk()
+ * @dev: the struct &device pointer
+ *
+ * RETURNS:
+ * The &drm_printer object
+ */
+static inline struct drm_printer drm_info_printer(struct device *dev)
+{
+ struct drm_printer p = {
+ .printfn = __drm_printfn_info,
+ .arg = dev,
+ };
+ return p;
+}
+
+#endif /* DRM_PRINT_H_ */
diff --git a/include/drm/i915_component.h b/include/drm/i915_component.h
index b46fa0ef3005..545c6e0fea7d 100644
--- a/include/drm/i915_component.h
+++ b/include/drm/i915_component.h
@@ -64,7 +64,7 @@ struct i915_audio_component_ops {
* Called from audio driver. After audio driver sets the
* sample rate, it will call this function to set n/cts
*/
- int (*sync_audio_rate)(struct device *, int port, int rate);
+ int (*sync_audio_rate)(struct device *, int port, int pipe, int rate);
/**
* @get_eld: fill the audio state and ELD bytes for the given port
*
@@ -77,7 +77,7 @@ struct i915_audio_component_ops {
* Note that the returned size may be over @max_bytes. Then it
* implies that only a part of ELD has been copied to the buffer.
*/
- int (*get_eld)(struct device *, int port, bool *enabled,
+ int (*get_eld)(struct device *, int port, int pipe, bool *enabled,
unsigned char *buf, int max_bytes);
};
@@ -97,7 +97,7 @@ struct i915_audio_component_audio_ops {
* status accordingly (even when the HDA controller is in power save
* mode).
*/
- void (*pin_eld_notify)(void *audio_ptr, int port);
+ void (*pin_eld_notify)(void *audio_ptr, int port, int pipe);
};
/**
diff --git a/include/drm/ttm/ttm_bo_api.h b/include/drm/ttm/ttm_bo_api.h
index 9eb940d6755f..652e45be97c8 100644
--- a/include/drm/ttm/ttm_bo_api.h
+++ b/include/drm/ttm/ttm_bo_api.h
@@ -47,6 +47,8 @@ struct drm_mm_node;
struct ttm_placement;
+struct ttm_place;
+
/**
* struct ttm_bus_placement
*
@@ -209,7 +211,7 @@ struct ttm_buffer_object {
* Members protected by a bo reservation.
*/
- struct fence *moving;
+ struct dma_fence *moving;
struct drm_vma_offset_node vma_node;
@@ -396,6 +398,17 @@ extern void ttm_bo_unlock_delayed_workqueue(struct ttm_bo_device *bdev,
int resched);
/**
+ * ttm_bo_eviction_valuable
+ *
+ * @bo: The buffer object to evict
+ * @place: the placement we need to make room for
+ *
+ * Check if it is valuable to evict the BO to make room for the given placement.
+ */
+bool ttm_bo_eviction_valuable(struct ttm_buffer_object *bo,
+ const struct ttm_place *place);
+
+/**
* ttm_bo_synccpu_write_grab
*
* @bo: The buffer object:
diff --git a/include/drm/ttm/ttm_bo_driver.h b/include/drm/ttm/ttm_bo_driver.h
index 4f0a92185995..cdbdb40eb5bd 100644
--- a/include/drm/ttm/ttm_bo_driver.h
+++ b/include/drm/ttm/ttm_bo_driver.h
@@ -303,7 +303,7 @@ struct ttm_mem_type_manager {
/*
* Protected by @move_lock.
*/
- struct fence *move;
+ struct dma_fence *move;
};
/**
@@ -371,9 +371,21 @@ struct ttm_bo_driver {
* submission as a consequence.
*/
- int (*invalidate_caches) (struct ttm_bo_device *bdev, uint32_t flags);
- int (*init_mem_type) (struct ttm_bo_device *bdev, uint32_t type,
- struct ttm_mem_type_manager *man);
+ int (*invalidate_caches)(struct ttm_bo_device *bdev, uint32_t flags);
+ int (*init_mem_type)(struct ttm_bo_device *bdev, uint32_t type,
+ struct ttm_mem_type_manager *man);
+
+ /**
+ * struct ttm_bo_driver member eviction_valuable
+ *
+ * @bo: the buffer object to be evicted
+ * @place: placement we need room for
+ *
+ * Check with the driver if it is valuable to evict a BO to make room
+ * for a certain placement.
+ */
+ bool (*eviction_valuable)(struct ttm_buffer_object *bo,
+ const struct ttm_place *place);
/**
* struct ttm_bo_driver member evict_flags:
*
@@ -384,8 +396,9 @@ struct ttm_bo_driver {
* finished, they'll end up in bo->mem.flags
*/
- void(*evict_flags) (struct ttm_buffer_object *bo,
- struct ttm_placement *placement);
+ void (*evict_flags)(struct ttm_buffer_object *bo,
+ struct ttm_placement *placement);
+
/**
* struct ttm_bo_driver member move:
*
@@ -399,10 +412,9 @@ struct ttm_bo_driver {
*
* Move a buffer between two memory regions.
*/
- int (*move) (struct ttm_buffer_object *bo,
- bool evict, bool interruptible,
- bool no_wait_gpu,
- struct ttm_mem_reg *new_mem);
+ int (*move)(struct ttm_buffer_object *bo, bool evict,
+ bool interruptible, bool no_wait_gpu,
+ struct ttm_mem_reg *new_mem);
/**
* struct ttm_bo_driver_member verify_access
@@ -416,8 +428,8 @@ struct ttm_bo_driver {
* access for all buffer objects.
* This function should return 0 if access is granted, -EPERM otherwise.
*/
- int (*verify_access) (struct ttm_buffer_object *bo,
- struct file *filp);
+ int (*verify_access)(struct ttm_buffer_object *bo,
+ struct file *filp);
/* hook to notify driver about a driver move so it
* can do tiling things */
@@ -430,7 +442,7 @@ struct ttm_bo_driver {
/**
* notify the driver that we're about to swap out this bo
*/
- void (*swap_notify) (struct ttm_buffer_object *bo);
+ void (*swap_notify)(struct ttm_buffer_object *bo);
/**
* Driver callback on when mapping io memory (for bo_move_memcpy
@@ -438,8 +450,10 @@ struct ttm_bo_driver {
* the mapping is not use anymore. io_mem_reserve & io_mem_free
* are balanced.
*/
- int (*io_mem_reserve)(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem);
- void (*io_mem_free)(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem);
+ int (*io_mem_reserve)(struct ttm_bo_device *bdev,
+ struct ttm_mem_reg *mem);
+ void (*io_mem_free)(struct ttm_bo_device *bdev,
+ struct ttm_mem_reg *mem);
/**
* Optional driver callback for when BO is removed from the LRU.
@@ -1025,7 +1039,7 @@ extern void ttm_bo_free_old_node(struct ttm_buffer_object *bo);
*/
extern int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
- struct fence *fence, bool evict,
+ struct dma_fence *fence, bool evict,
struct ttm_mem_reg *new_mem);
/**
@@ -1040,7 +1054,7 @@ extern int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
* immediately or hang it on a temporary buffer object.
*/
int ttm_bo_pipeline_move(struct ttm_buffer_object *bo,
- struct fence *fence, bool evict,
+ struct dma_fence *fence, bool evict,
struct ttm_mem_reg *new_mem);
/**
diff --git a/include/drm/ttm/ttm_execbuf_util.h b/include/drm/ttm/ttm_execbuf_util.h
index b620c317c772..47f35b8e6d09 100644
--- a/include/drm/ttm/ttm_execbuf_util.h
+++ b/include/drm/ttm/ttm_execbuf_util.h
@@ -114,6 +114,6 @@ extern int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket,
extern void ttm_eu_fence_buffer_objects(struct ww_acquire_ctx *ticket,
struct list_head *list,
- struct fence *fence);
+ struct dma_fence *fence);
#endif
diff --git a/include/linux/acpi.h b/include/linux/acpi.h
index 689a8b9b9c8f..61a3d90f32b3 100644
--- a/include/linux/acpi.h
+++ b/include/linux/acpi.h
@@ -555,7 +555,8 @@ int acpi_device_uevent_modalias(struct device *, struct kobj_uevent_env *);
int acpi_device_modalias(struct device *, char *, int);
void acpi_walk_dep_device_list(acpi_handle handle);
-struct platform_device *acpi_create_platform_device(struct acpi_device *);
+struct platform_device *acpi_create_platform_device(struct acpi_device *,
+ struct property_entry *);
#define ACPI_PTR(_ptr) (_ptr)
static inline void acpi_device_set_enumerated(struct acpi_device *adev)
diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h
index 7035b997aaa5..6aaf425cebc3 100644
--- a/include/linux/bpf_verifier.h
+++ b/include/linux/bpf_verifier.h
@@ -14,7 +14,7 @@
* are obviously wrong for any sort of memory access.
*/
#define BPF_REGISTER_MAX_RANGE (1024 * 1024 * 1024)
-#define BPF_REGISTER_MIN_RANGE -(1024 * 1024 * 1024)
+#define BPF_REGISTER_MIN_RANGE -1
struct bpf_reg_state {
enum bpf_reg_type type;
@@ -22,7 +22,8 @@ struct bpf_reg_state {
* Used to determine if any memory access using this register will
* result in a bad access.
*/
- u64 min_value, max_value;
+ s64 min_value;
+ u64 max_value;
union {
/* valid when type == CONST_IMM | PTR_TO_STACK | UNKNOWN_VALUE */
s64 imm;
diff --git a/include/linux/ceph/osd_client.h b/include/linux/ceph/osd_client.h
index 96337b15a60d..a8e66344bacc 100644
--- a/include/linux/ceph/osd_client.h
+++ b/include/linux/ceph/osd_client.h
@@ -258,6 +258,8 @@ struct ceph_watch_item {
struct ceph_entity_addr addr;
};
+#define CEPH_LINGER_ID_START 0xffff000000000000ULL
+
struct ceph_osd_client {
struct ceph_client *client;
diff --git a/include/linux/compiler-gcc.h b/include/linux/compiler-gcc.h
index 432f5c97e18f..928e5ca0caee 100644
--- a/include/linux/compiler-gcc.h
+++ b/include/linux/compiler-gcc.h
@@ -263,7 +263,9 @@
#endif
#endif /* CONFIG_ARCH_USE_BUILTIN_BSWAP && !__CHECKER__ */
-#if GCC_VERSION >= 50000
+#if GCC_VERSION >= 70000
+#define KASAN_ABI_VERSION 5
+#elif GCC_VERSION >= 50000
#define KASAN_ABI_VERSION 4
#elif GCC_VERSION >= 40902
#define KASAN_ABI_VERSION 3
diff --git a/include/linux/console.h b/include/linux/console.h
index 3672809234a7..d530c4627e54 100644
--- a/include/linux/console.h
+++ b/include/linux/console.h
@@ -173,12 +173,6 @@ static inline void console_sysfs_notify(void)
#endif
extern bool console_suspend_enabled;
-#ifdef CONFIG_OF
-extern void console_set_by_of(void);
-#else
-static inline void console_set_by_of(void) {}
-#endif
-
/* Suspend and resume console messages over PM events */
extern void suspend_console(void);
extern void resume_console(void);
diff --git a/include/linux/dma-buf.h b/include/linux/dma-buf.h
index e0b0741ae671..8daeb3ce0016 100644
--- a/include/linux/dma-buf.h
+++ b/include/linux/dma-buf.h
@@ -30,7 +30,7 @@
#include <linux/list.h>
#include <linux/dma-mapping.h>
#include <linux/fs.h>
-#include <linux/fence.h>
+#include <linux/dma-fence.h>
#include <linux/wait.h>
struct device;
@@ -143,7 +143,7 @@ struct dma_buf {
wait_queue_head_t poll;
struct dma_buf_poll_cb_t {
- struct fence_cb cb;
+ struct dma_fence_cb cb;
wait_queue_head_t *poll;
unsigned long active;
diff --git a/include/linux/dma-fence-array.h b/include/linux/dma-fence-array.h
new file mode 100644
index 000000000000..5900945f962d
--- /dev/null
+++ b/include/linux/dma-fence-array.h
@@ -0,0 +1,86 @@
+/*
+ * fence-array: aggregates fence to be waited together
+ *
+ * Copyright (C) 2016 Collabora Ltd
+ * Copyright (C) 2016 Advanced Micro Devices, Inc.
+ * Authors:
+ * Gustavo Padovan <gustavo@padovan.org>
+ * Christian König <christian.koenig@amd.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __LINUX_DMA_FENCE_ARRAY_H
+#define __LINUX_DMA_FENCE_ARRAY_H
+
+#include <linux/dma-fence.h>
+
+/**
+ * struct dma_fence_array_cb - callback helper for fence array
+ * @cb: fence callback structure for signaling
+ * @array: reference to the parent fence array object
+ */
+struct dma_fence_array_cb {
+ struct dma_fence_cb cb;
+ struct dma_fence_array *array;
+};
+
+/**
+ * struct dma_fence_array - fence to represent an array of fences
+ * @base: fence base class
+ * @lock: spinlock for fence handling
+ * @num_fences: number of fences in the array
+ * @num_pending: fences in the array still pending
+ * @fences: array of the fences
+ */
+struct dma_fence_array {
+ struct dma_fence base;
+
+ spinlock_t lock;
+ unsigned num_fences;
+ atomic_t num_pending;
+ struct dma_fence **fences;
+};
+
+extern const struct dma_fence_ops dma_fence_array_ops;
+
+/**
+ * dma_fence_is_array - check if a fence is from the array subsclass
+ * @fence: fence to test
+ *
+ * Return true if it is a dma_fence_array and false otherwise.
+ */
+static inline bool dma_fence_is_array(struct dma_fence *fence)
+{
+ return fence->ops == &dma_fence_array_ops;
+}
+
+/**
+ * to_dma_fence_array - cast a fence to a dma_fence_array
+ * @fence: fence to cast to a dma_fence_array
+ *
+ * Returns NULL if the fence is not a dma_fence_array,
+ * or the dma_fence_array otherwise.
+ */
+static inline struct dma_fence_array *
+to_dma_fence_array(struct dma_fence *fence)
+{
+ if (fence->ops != &dma_fence_array_ops)
+ return NULL;
+
+ return container_of(fence, struct dma_fence_array, base);
+}
+
+struct dma_fence_array *dma_fence_array_create(int num_fences,
+ struct dma_fence **fences,
+ u64 context, unsigned seqno,
+ bool signal_on_any);
+
+#endif /* __LINUX_DMA_FENCE_ARRAY_H */
diff --git a/include/linux/dma-fence.h b/include/linux/dma-fence.h
new file mode 100644
index 000000000000..d51a7d23c358
--- /dev/null
+++ b/include/linux/dma-fence.h
@@ -0,0 +1,438 @@
+/*
+ * Fence mechanism for dma-buf to allow for asynchronous dma access
+ *
+ * Copyright (C) 2012 Canonical Ltd
+ * Copyright (C) 2012 Texas Instruments
+ *
+ * Authors:
+ * Rob Clark <robdclark@gmail.com>
+ * Maarten Lankhorst <maarten.lankhorst@canonical.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __LINUX_DMA_FENCE_H
+#define __LINUX_DMA_FENCE_H
+
+#include <linux/err.h>
+#include <linux/wait.h>
+#include <linux/list.h>
+#include <linux/bitops.h>
+#include <linux/kref.h>
+#include <linux/sched.h>
+#include <linux/printk.h>
+#include <linux/rcupdate.h>
+
+struct dma_fence;
+struct dma_fence_ops;
+struct dma_fence_cb;
+
+/**
+ * struct dma_fence - software synchronization primitive
+ * @refcount: refcount for this fence
+ * @ops: dma_fence_ops associated with this fence
+ * @rcu: used for releasing fence with kfree_rcu
+ * @cb_list: list of all callbacks to call
+ * @lock: spin_lock_irqsave used for locking
+ * @context: execution context this fence belongs to, returned by
+ * dma_fence_context_alloc()
+ * @seqno: the sequence number of this fence inside the execution context,
+ * can be compared to decide which fence would be signaled later.
+ * @flags: A mask of DMA_FENCE_FLAG_* defined below
+ * @timestamp: Timestamp when the fence was signaled.
+ * @status: Optional, only valid if < 0, must be set before calling
+ * dma_fence_signal, indicates that the fence has completed with an error.
+ *
+ * the flags member must be manipulated and read using the appropriate
+ * atomic ops (bit_*), so taking the spinlock will not be needed most
+ * of the time.
+ *
+ * DMA_FENCE_FLAG_SIGNALED_BIT - fence is already signaled
+ * DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT - enable_signaling might have been called
+ * DMA_FENCE_FLAG_USER_BITS - start of the unused bits, can be used by the
+ * implementer of the fence for its own purposes. Can be used in different
+ * ways by different fence implementers, so do not rely on this.
+ *
+ * Since atomic bitops are used, this is not guaranteed to be the case.
+ * Particularly, if the bit was set, but dma_fence_signal was called right
+ * before this bit was set, it would have been able to set the
+ * DMA_FENCE_FLAG_SIGNALED_BIT, before enable_signaling was called.
+ * Adding a check for DMA_FENCE_FLAG_SIGNALED_BIT after setting
+ * DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT closes this race, and makes sure that
+ * after dma_fence_signal was called, any enable_signaling call will have either
+ * been completed, or never called at all.
+ */
+struct dma_fence {
+ struct kref refcount;
+ const struct dma_fence_ops *ops;
+ struct rcu_head rcu;
+ struct list_head cb_list;
+ spinlock_t *lock;
+ u64 context;
+ unsigned seqno;
+ unsigned long flags;
+ ktime_t timestamp;
+ int status;
+};
+
+enum dma_fence_flag_bits {
+ DMA_FENCE_FLAG_SIGNALED_BIT,
+ DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT,
+ DMA_FENCE_FLAG_USER_BITS, /* must always be last member */
+};
+
+typedef void (*dma_fence_func_t)(struct dma_fence *fence,
+ struct dma_fence_cb *cb);
+
+/**
+ * struct dma_fence_cb - callback for dma_fence_add_callback
+ * @node: used by dma_fence_add_callback to append this struct to fence::cb_list
+ * @func: dma_fence_func_t to call
+ *
+ * This struct will be initialized by dma_fence_add_callback, additional
+ * data can be passed along by embedding dma_fence_cb in another struct.
+ */
+struct dma_fence_cb {
+ struct list_head node;
+ dma_fence_func_t func;
+};
+
+/**
+ * struct dma_fence_ops - operations implemented for fence
+ * @get_driver_name: returns the driver name.
+ * @get_timeline_name: return the name of the context this fence belongs to.
+ * @enable_signaling: enable software signaling of fence.
+ * @signaled: [optional] peek whether the fence is signaled, can be null.
+ * @wait: custom wait implementation, or dma_fence_default_wait.
+ * @release: [optional] called on destruction of fence, can be null
+ * @fill_driver_data: [optional] callback to fill in free-form debug info
+ * Returns amount of bytes filled, or -errno.
+ * @fence_value_str: [optional] fills in the value of the fence as a string
+ * @timeline_value_str: [optional] fills in the current value of the timeline
+ * as a string
+ *
+ * Notes on enable_signaling:
+ * For fence implementations that have the capability for hw->hw
+ * signaling, they can implement this op to enable the necessary
+ * irqs, or insert commands into cmdstream, etc. This is called
+ * in the first wait() or add_callback() path to let the fence
+ * implementation know that there is another driver waiting on
+ * the signal (ie. hw->sw case).
+ *
+ * This function can be called called from atomic context, but not
+ * from irq context, so normal spinlocks can be used.
+ *
+ * A return value of false indicates the fence already passed,
+ * or some failure occurred that made it impossible to enable
+ * signaling. True indicates successful enabling.
+ *
+ * fence->status may be set in enable_signaling, but only when false is
+ * returned.
+ *
+ * Calling dma_fence_signal before enable_signaling is called allows
+ * for a tiny race window in which enable_signaling is called during,
+ * before, or after dma_fence_signal. To fight this, it is recommended
+ * that before enable_signaling returns true an extra reference is
+ * taken on the fence, to be released when the fence is signaled.
+ * This will mean dma_fence_signal will still be called twice, but
+ * the second time will be a noop since it was already signaled.
+ *
+ * Notes on signaled:
+ * May set fence->status if returning true.
+ *
+ * Notes on wait:
+ * Must not be NULL, set to dma_fence_default_wait for default implementation.
+ * the dma_fence_default_wait implementation should work for any fence, as long
+ * as enable_signaling works correctly.
+ *
+ * Must return -ERESTARTSYS if the wait is intr = true and the wait was
+ * interrupted, and remaining jiffies if fence has signaled, or 0 if wait
+ * timed out. Can also return other error values on custom implementations,
+ * which should be treated as if the fence is signaled. For example a hardware
+ * lockup could be reported like that.
+ *
+ * Notes on release:
+ * Can be NULL, this function allows additional commands to run on
+ * destruction of the fence. Can be called from irq context.
+ * If pointer is set to NULL, kfree will get called instead.
+ */
+
+struct dma_fence_ops {
+ const char * (*get_driver_name)(struct dma_fence *fence);
+ const char * (*get_timeline_name)(struct dma_fence *fence);
+ bool (*enable_signaling)(struct dma_fence *fence);
+ bool (*signaled)(struct dma_fence *fence);
+ signed long (*wait)(struct dma_fence *fence,
+ bool intr, signed long timeout);
+ void (*release)(struct dma_fence *fence);
+
+ int (*fill_driver_data)(struct dma_fence *fence, void *data, int size);
+ void (*fence_value_str)(struct dma_fence *fence, char *str, int size);
+ void (*timeline_value_str)(struct dma_fence *fence,
+ char *str, int size);
+};
+
+void dma_fence_init(struct dma_fence *fence, const struct dma_fence_ops *ops,
+ spinlock_t *lock, u64 context, unsigned seqno);
+
+void dma_fence_release(struct kref *kref);
+void dma_fence_free(struct dma_fence *fence);
+
+/**
+ * dma_fence_put - decreases refcount of the fence
+ * @fence: [in] fence to reduce refcount of
+ */
+static inline void dma_fence_put(struct dma_fence *fence)
+{
+ if (fence)
+ kref_put(&fence->refcount, dma_fence_release);
+}
+
+/**
+ * dma_fence_get - increases refcount of the fence
+ * @fence: [in] fence to increase refcount of
+ *
+ * Returns the same fence, with refcount increased by 1.
+ */
+static inline struct dma_fence *dma_fence_get(struct dma_fence *fence)
+{
+ if (fence)
+ kref_get(&fence->refcount);
+ return fence;
+}
+
+/**
+ * dma_fence_get_rcu - get a fence from a reservation_object_list with
+ * rcu read lock
+ * @fence: [in] fence to increase refcount of
+ *
+ * Function returns NULL if no refcount could be obtained, or the fence.
+ */
+static inline struct dma_fence *dma_fence_get_rcu(struct dma_fence *fence)
+{
+ if (kref_get_unless_zero(&fence->refcount))
+ return fence;
+ else
+ return NULL;
+}
+
+/**
+ * dma_fence_get_rcu_safe - acquire a reference to an RCU tracked fence
+ * @fencep: [in] pointer to fence to increase refcount of
+ *
+ * Function returns NULL if no refcount could be obtained, or the fence.
+ * This function handles acquiring a reference to a fence that may be
+ * reallocated within the RCU grace period (such as with SLAB_DESTROY_BY_RCU),
+ * so long as the caller is using RCU on the pointer to the fence.
+ *
+ * An alternative mechanism is to employ a seqlock to protect a bunch of
+ * fences, such as used by struct reservation_object. When using a seqlock,
+ * the seqlock must be taken before and checked after a reference to the
+ * fence is acquired (as shown here).
+ *
+ * The caller is required to hold the RCU read lock.
+ */
+static inline struct dma_fence *
+dma_fence_get_rcu_safe(struct dma_fence * __rcu *fencep)
+{
+ do {
+ struct dma_fence *fence;
+
+ fence = rcu_dereference(*fencep);
+ if (!fence || !dma_fence_get_rcu(fence))
+ return NULL;
+
+ /* The atomic_inc_not_zero() inside dma_fence_get_rcu()
+ * provides a full memory barrier upon success (such as now).
+ * This is paired with the write barrier from assigning
+ * to the __rcu protected fence pointer so that if that
+ * pointer still matches the current fence, we know we
+ * have successfully acquire a reference to it. If it no
+ * longer matches, we are holding a reference to some other
+ * reallocated pointer. This is possible if the allocator
+ * is using a freelist like SLAB_DESTROY_BY_RCU where the
+ * fence remains valid for the RCU grace period, but it
+ * may be reallocated. When using such allocators, we are
+ * responsible for ensuring the reference we get is to
+ * the right fence, as below.
+ */
+ if (fence == rcu_access_pointer(*fencep))
+ return rcu_pointer_handoff(fence);
+
+ dma_fence_put(fence);
+ } while (1);
+}
+
+int dma_fence_signal(struct dma_fence *fence);
+int dma_fence_signal_locked(struct dma_fence *fence);
+signed long dma_fence_default_wait(struct dma_fence *fence,
+ bool intr, signed long timeout);
+int dma_fence_add_callback(struct dma_fence *fence,
+ struct dma_fence_cb *cb,
+ dma_fence_func_t func);
+bool dma_fence_remove_callback(struct dma_fence *fence,
+ struct dma_fence_cb *cb);
+void dma_fence_enable_sw_signaling(struct dma_fence *fence);
+
+/**
+ * dma_fence_is_signaled_locked - Return an indication if the fence
+ * is signaled yet.
+ * @fence: [in] the fence to check
+ *
+ * Returns true if the fence was already signaled, false if not. Since this
+ * function doesn't enable signaling, it is not guaranteed to ever return
+ * true if dma_fence_add_callback, dma_fence_wait or
+ * dma_fence_enable_sw_signaling haven't been called before.
+ *
+ * This function requires fence->lock to be held.
+ */
+static inline bool
+dma_fence_is_signaled_locked(struct dma_fence *fence)
+{
+ if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
+ return true;
+
+ if (fence->ops->signaled && fence->ops->signaled(fence)) {
+ dma_fence_signal_locked(fence);
+ return true;
+ }
+
+ return false;
+}
+
+/**
+ * dma_fence_is_signaled - Return an indication if the fence is signaled yet.
+ * @fence: [in] the fence to check
+ *
+ * Returns true if the fence was already signaled, false if not. Since this
+ * function doesn't enable signaling, it is not guaranteed to ever return
+ * true if dma_fence_add_callback, dma_fence_wait or
+ * dma_fence_enable_sw_signaling haven't been called before.
+ *
+ * It's recommended for seqno fences to call dma_fence_signal when the
+ * operation is complete, it makes it possible to prevent issues from
+ * wraparound between time of issue and time of use by checking the return
+ * value of this function before calling hardware-specific wait instructions.
+ */
+static inline bool
+dma_fence_is_signaled(struct dma_fence *fence)
+{
+ if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
+ return true;
+
+ if (fence->ops->signaled && fence->ops->signaled(fence)) {
+ dma_fence_signal(fence);
+ return true;
+ }
+
+ return false;
+}
+
+/**
+ * dma_fence_is_later - return if f1 is chronologically later than f2
+ * @f1: [in] the first fence from the same context
+ * @f2: [in] the second fence from the same context
+ *
+ * Returns true if f1 is chronologically later than f2. Both fences must be
+ * from the same context, since a seqno is not re-used across contexts.
+ */
+static inline bool dma_fence_is_later(struct dma_fence *f1,
+ struct dma_fence *f2)
+{
+ if (WARN_ON(f1->context != f2->context))
+ return false;
+
+ return (int)(f1->seqno - f2->seqno) > 0;
+}
+
+/**
+ * dma_fence_later - return the chronologically later fence
+ * @f1: [in] the first fence from the same context
+ * @f2: [in] the second fence from the same context
+ *
+ * Returns NULL if both fences are signaled, otherwise the fence that would be
+ * signaled last. Both fences must be from the same context, since a seqno is
+ * not re-used across contexts.
+ */
+static inline struct dma_fence *dma_fence_later(struct dma_fence *f1,
+ struct dma_fence *f2)
+{
+ if (WARN_ON(f1->context != f2->context))
+ return NULL;
+
+ /*
+ * Can't check just DMA_FENCE_FLAG_SIGNALED_BIT here, it may never
+ * have been set if enable_signaling wasn't called, and enabling that
+ * here is overkill.
+ */
+ if (dma_fence_is_later(f1, f2))
+ return dma_fence_is_signaled(f1) ? NULL : f1;
+ else
+ return dma_fence_is_signaled(f2) ? NULL : f2;
+}
+
+signed long dma_fence_wait_timeout(struct dma_fence *,
+ bool intr, signed long timeout);
+signed long dma_fence_wait_any_timeout(struct dma_fence **fences,
+ uint32_t count,
+ bool intr, signed long timeout,
+ uint32_t *idx);
+
+/**
+ * dma_fence_wait - sleep until the fence gets signaled
+ * @fence: [in] the fence to wait on
+ * @intr: [in] if true, do an interruptible wait
+ *
+ * This function will return -ERESTARTSYS if interrupted by a signal,
+ * or 0 if the fence was signaled. Other error values may be
+ * returned on custom implementations.
+ *
+ * Performs a synchronous wait on this fence. It is assumed the caller
+ * directly or indirectly holds a reference to the fence, otherwise the
+ * fence might be freed before return, resulting in undefined behavior.
+ */
+static inline signed long dma_fence_wait(struct dma_fence *fence, bool intr)
+{
+ signed long ret;
+
+ /* Since dma_fence_wait_timeout cannot timeout with
+ * MAX_SCHEDULE_TIMEOUT, only valid return values are
+ * -ERESTARTSYS and MAX_SCHEDULE_TIMEOUT.
+ */
+ ret = dma_fence_wait_timeout(fence, intr, MAX_SCHEDULE_TIMEOUT);
+
+ return ret < 0 ? ret : 0;
+}
+
+u64 dma_fence_context_alloc(unsigned num);
+
+#define DMA_FENCE_TRACE(f, fmt, args...) \
+ do { \
+ struct dma_fence *__ff = (f); \
+ if (IS_ENABLED(CONFIG_DMA_FENCE_TRACE)) \
+ pr_info("f %llu#%u: " fmt, \
+ __ff->context, __ff->seqno, ##args); \
+ } while (0)
+
+#define DMA_FENCE_WARN(f, fmt, args...) \
+ do { \
+ struct dma_fence *__ff = (f); \
+ pr_warn("f %llu#%u: " fmt, __ff->context, __ff->seqno, \
+ ##args); \
+ } while (0)
+
+#define DMA_FENCE_ERR(f, fmt, args...) \
+ do { \
+ struct dma_fence *__ff = (f); \
+ pr_err("f %llu#%u: " fmt, __ff->context, __ff->seqno, \
+ ##args); \
+ } while (0)
+
+#endif /* __LINUX_DMA_FENCE_H */
diff --git a/include/linux/fence-array.h b/include/linux/fence-array.h
deleted file mode 100644
index a44794e508df..000000000000
--- a/include/linux/fence-array.h
+++ /dev/null
@@ -1,83 +0,0 @@
-/*
- * fence-array: aggregates fence to be waited together
- *
- * Copyright (C) 2016 Collabora Ltd
- * Copyright (C) 2016 Advanced Micro Devices, Inc.
- * Authors:
- * Gustavo Padovan <gustavo@padovan.org>
- * Christian König <christian.koenig@amd.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published by
- * the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- */
-
-#ifndef __LINUX_FENCE_ARRAY_H
-#define __LINUX_FENCE_ARRAY_H
-
-#include <linux/fence.h>
-
-/**
- * struct fence_array_cb - callback helper for fence array
- * @cb: fence callback structure for signaling
- * @array: reference to the parent fence array object
- */
-struct fence_array_cb {
- struct fence_cb cb;
- struct fence_array *array;
-};
-
-/**
- * struct fence_array - fence to represent an array of fences
- * @base: fence base class
- * @lock: spinlock for fence handling
- * @num_fences: number of fences in the array
- * @num_pending: fences in the array still pending
- * @fences: array of the fences
- */
-struct fence_array {
- struct fence base;
-
- spinlock_t lock;
- unsigned num_fences;
- atomic_t num_pending;
- struct fence **fences;
-};
-
-extern const struct fence_ops fence_array_ops;
-
-/**
- * fence_is_array - check if a fence is from the array subsclass
- *
- * Return true if it is a fence_array and false otherwise.
- */
-static inline bool fence_is_array(struct fence *fence)
-{
- return fence->ops == &fence_array_ops;
-}
-
-/**
- * to_fence_array - cast a fence to a fence_array
- * @fence: fence to cast to a fence_array
- *
- * Returns NULL if the fence is not a fence_array,
- * or the fence_array otherwise.
- */
-static inline struct fence_array *to_fence_array(struct fence *fence)
-{
- if (fence->ops != &fence_array_ops)
- return NULL;
-
- return container_of(fence, struct fence_array, base);
-}
-
-struct fence_array *fence_array_create(int num_fences, struct fence **fences,
- u64 context, unsigned seqno,
- bool signal_on_any);
-
-#endif /* __LINUX_FENCE_ARRAY_H */
diff --git a/include/linux/fence.h b/include/linux/fence.h
deleted file mode 100644
index 0d763053f97a..000000000000
--- a/include/linux/fence.h
+++ /dev/null
@@ -1,378 +0,0 @@
-/*
- * Fence mechanism for dma-buf to allow for asynchronous dma access
- *
- * Copyright (C) 2012 Canonical Ltd
- * Copyright (C) 2012 Texas Instruments
- *
- * Authors:
- * Rob Clark <robdclark@gmail.com>
- * Maarten Lankhorst <maarten.lankhorst@canonical.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published by
- * the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- */
-
-#ifndef __LINUX_FENCE_H
-#define __LINUX_FENCE_H
-
-#include <linux/err.h>
-#include <linux/wait.h>
-#include <linux/list.h>
-#include <linux/bitops.h>
-#include <linux/kref.h>
-#include <linux/sched.h>
-#include <linux/printk.h>
-#include <linux/rcupdate.h>
-
-struct fence;
-struct fence_ops;
-struct fence_cb;
-
-/**
- * struct fence - software synchronization primitive
- * @refcount: refcount for this fence
- * @ops: fence_ops associated with this fence
- * @rcu: used for releasing fence with kfree_rcu
- * @cb_list: list of all callbacks to call
- * @lock: spin_lock_irqsave used for locking
- * @context: execution context this fence belongs to, returned by
- * fence_context_alloc()
- * @seqno: the sequence number of this fence inside the execution context,
- * can be compared to decide which fence would be signaled later.
- * @flags: A mask of FENCE_FLAG_* defined below
- * @timestamp: Timestamp when the fence was signaled.
- * @status: Optional, only valid if < 0, must be set before calling
- * fence_signal, indicates that the fence has completed with an error.
- *
- * the flags member must be manipulated and read using the appropriate
- * atomic ops (bit_*), so taking the spinlock will not be needed most
- * of the time.
- *
- * FENCE_FLAG_SIGNALED_BIT - fence is already signaled
- * FENCE_FLAG_ENABLE_SIGNAL_BIT - enable_signaling might have been called*
- * FENCE_FLAG_USER_BITS - start of the unused bits, can be used by the
- * implementer of the fence for its own purposes. Can be used in different
- * ways by different fence implementers, so do not rely on this.
- *
- * Since atomic bitops are used, this is not guaranteed to be the case.
- * Particularly, if the bit was set, but fence_signal was called right
- * before this bit was set, it would have been able to set the
- * FENCE_FLAG_SIGNALED_BIT, before enable_signaling was called.
- * Adding a check for FENCE_FLAG_SIGNALED_BIT after setting
- * FENCE_FLAG_ENABLE_SIGNAL_BIT closes this race, and makes sure that
- * after fence_signal was called, any enable_signaling call will have either
- * been completed, or never called at all.
- */
-struct fence {
- struct kref refcount;
- const struct fence_ops *ops;
- struct rcu_head rcu;
- struct list_head cb_list;
- spinlock_t *lock;
- u64 context;
- unsigned seqno;
- unsigned long flags;
- ktime_t timestamp;
- int status;
-};
-
-enum fence_flag_bits {
- FENCE_FLAG_SIGNALED_BIT,
- FENCE_FLAG_ENABLE_SIGNAL_BIT,
- FENCE_FLAG_USER_BITS, /* must always be last member */
-};
-
-typedef void (*fence_func_t)(struct fence *fence, struct fence_cb *cb);
-
-/**
- * struct fence_cb - callback for fence_add_callback
- * @node: used by fence_add_callback to append this struct to fence::cb_list
- * @func: fence_func_t to call
- *
- * This struct will be initialized by fence_add_callback, additional
- * data can be passed along by embedding fence_cb in another struct.
- */
-struct fence_cb {
- struct list_head node;
- fence_func_t func;
-};
-
-/**
- * struct fence_ops - operations implemented for fence
- * @get_driver_name: returns the driver name.
- * @get_timeline_name: return the name of the context this fence belongs to.
- * @enable_signaling: enable software signaling of fence.
- * @signaled: [optional] peek whether the fence is signaled, can be null.
- * @wait: custom wait implementation, or fence_default_wait.
- * @release: [optional] called on destruction of fence, can be null
- * @fill_driver_data: [optional] callback to fill in free-form debug info
- * Returns amount of bytes filled, or -errno.
- * @fence_value_str: [optional] fills in the value of the fence as a string
- * @timeline_value_str: [optional] fills in the current value of the timeline
- * as a string
- *
- * Notes on enable_signaling:
- * For fence implementations that have the capability for hw->hw
- * signaling, they can implement this op to enable the necessary
- * irqs, or insert commands into cmdstream, etc. This is called
- * in the first wait() or add_callback() path to let the fence
- * implementation know that there is another driver waiting on
- * the signal (ie. hw->sw case).
- *
- * This function can be called called from atomic context, but not
- * from irq context, so normal spinlocks can be used.
- *
- * A return value of false indicates the fence already passed,
- * or some failure occurred that made it impossible to enable
- * signaling. True indicates successful enabling.
- *
- * fence->status may be set in enable_signaling, but only when false is
- * returned.
- *
- * Calling fence_signal before enable_signaling is called allows
- * for a tiny race window in which enable_signaling is called during,
- * before, or after fence_signal. To fight this, it is recommended
- * that before enable_signaling returns true an extra reference is
- * taken on the fence, to be released when the fence is signaled.
- * This will mean fence_signal will still be called twice, but
- * the second time will be a noop since it was already signaled.
- *
- * Notes on signaled:
- * May set fence->status if returning true.
- *
- * Notes on wait:
- * Must not be NULL, set to fence_default_wait for default implementation.
- * the fence_default_wait implementation should work for any fence, as long
- * as enable_signaling works correctly.
- *
- * Must return -ERESTARTSYS if the wait is intr = true and the wait was
- * interrupted, and remaining jiffies if fence has signaled, or 0 if wait
- * timed out. Can also return other error values on custom implementations,
- * which should be treated as if the fence is signaled. For example a hardware
- * lockup could be reported like that.
- *
- * Notes on release:
- * Can be NULL, this function allows additional commands to run on
- * destruction of the fence. Can be called from irq context.
- * If pointer is set to NULL, kfree will get called instead.
- */
-
-struct fence_ops {
- const char * (*get_driver_name)(struct fence *fence);
- const char * (*get_timeline_name)(struct fence *fence);
- bool (*enable_signaling)(struct fence *fence);
- bool (*signaled)(struct fence *fence);
- signed long (*wait)(struct fence *fence, bool intr, signed long timeout);
- void (*release)(struct fence *fence);
-
- int (*fill_driver_data)(struct fence *fence, void *data, int size);
- void (*fence_value_str)(struct fence *fence, char *str, int size);
- void (*timeline_value_str)(struct fence *fence, char *str, int size);
-};
-
-void fence_init(struct fence *fence, const struct fence_ops *ops,
- spinlock_t *lock, u64 context, unsigned seqno);
-
-void fence_release(struct kref *kref);
-void fence_free(struct fence *fence);
-
-/**
- * fence_get - increases refcount of the fence
- * @fence: [in] fence to increase refcount of
- *
- * Returns the same fence, with refcount increased by 1.
- */
-static inline struct fence *fence_get(struct fence *fence)
-{
- if (fence)
- kref_get(&fence->refcount);
- return fence;
-}
-
-/**
- * fence_get_rcu - get a fence from a reservation_object_list with rcu read lock
- * @fence: [in] fence to increase refcount of
- *
- * Function returns NULL if no refcount could be obtained, or the fence.
- */
-static inline struct fence *fence_get_rcu(struct fence *fence)
-{
- if (kref_get_unless_zero(&fence->refcount))
- return fence;
- else
- return NULL;
-}
-
-/**
- * fence_put - decreases refcount of the fence
- * @fence: [in] fence to reduce refcount of
- */
-static inline void fence_put(struct fence *fence)
-{
- if (fence)
- kref_put(&fence->refcount, fence_release);
-}
-
-int fence_signal(struct fence *fence);
-int fence_signal_locked(struct fence *fence);
-signed long fence_default_wait(struct fence *fence, bool intr, signed long timeout);
-int fence_add_callback(struct fence *fence, struct fence_cb *cb,
- fence_func_t func);
-bool fence_remove_callback(struct fence *fence, struct fence_cb *cb);
-void fence_enable_sw_signaling(struct fence *fence);
-
-/**
- * fence_is_signaled_locked - Return an indication if the fence is signaled yet.
- * @fence: [in] the fence to check
- *
- * Returns true if the fence was already signaled, false if not. Since this
- * function doesn't enable signaling, it is not guaranteed to ever return
- * true if fence_add_callback, fence_wait or fence_enable_sw_signaling
- * haven't been called before.
- *
- * This function requires fence->lock to be held.
- */
-static inline bool
-fence_is_signaled_locked(struct fence *fence)
-{
- if (test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->flags))
- return true;
-
- if (fence->ops->signaled && fence->ops->signaled(fence)) {
- fence_signal_locked(fence);
- return true;
- }
-
- return false;
-}
-
-/**
- * fence_is_signaled - Return an indication if the fence is signaled yet.
- * @fence: [in] the fence to check
- *
- * Returns true if the fence was already signaled, false if not. Since this
- * function doesn't enable signaling, it is not guaranteed to ever return
- * true if fence_add_callback, fence_wait or fence_enable_sw_signaling
- * haven't been called before.
- *
- * It's recommended for seqno fences to call fence_signal when the
- * operation is complete, it makes it possible to prevent issues from
- * wraparound between time of issue and time of use by checking the return
- * value of this function before calling hardware-specific wait instructions.
- */
-static inline bool
-fence_is_signaled(struct fence *fence)
-{
- if (test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->flags))
- return true;
-
- if (fence->ops->signaled && fence->ops->signaled(fence)) {
- fence_signal(fence);
- return true;
- }
-
- return false;
-}
-
-/**
- * fence_is_later - return if f1 is chronologically later than f2
- * @f1: [in] the first fence from the same context
- * @f2: [in] the second fence from the same context
- *
- * Returns true if f1 is chronologically later than f2. Both fences must be
- * from the same context, since a seqno is not re-used across contexts.
- */
-static inline bool fence_is_later(struct fence *f1, struct fence *f2)
-{
- if (WARN_ON(f1->context != f2->context))
- return false;
-
- return (int)(f1->seqno - f2->seqno) > 0;
-}
-
-/**
- * fence_later - return the chronologically later fence
- * @f1: [in] the first fence from the same context
- * @f2: [in] the second fence from the same context
- *
- * Returns NULL if both fences are signaled, otherwise the fence that would be
- * signaled last. Both fences must be from the same context, since a seqno is
- * not re-used across contexts.
- */
-static inline struct fence *fence_later(struct fence *f1, struct fence *f2)
-{
- if (WARN_ON(f1->context != f2->context))
- return NULL;
-
- /*
- * can't check just FENCE_FLAG_SIGNALED_BIT here, it may never have been
- * set if enable_signaling wasn't called, and enabling that here is
- * overkill.
- */
- if (fence_is_later(f1, f2))
- return fence_is_signaled(f1) ? NULL : f1;
- else
- return fence_is_signaled(f2) ? NULL : f2;
-}
-
-signed long fence_wait_timeout(struct fence *, bool intr, signed long timeout);
-signed long fence_wait_any_timeout(struct fence **fences, uint32_t count,
- bool intr, signed long timeout);
-
-/**
- * fence_wait - sleep until the fence gets signaled
- * @fence: [in] the fence to wait on
- * @intr: [in] if true, do an interruptible wait
- *
- * This function will return -ERESTARTSYS if interrupted by a signal,
- * or 0 if the fence was signaled. Other error values may be
- * returned on custom implementations.
- *
- * Performs a synchronous wait on this fence. It is assumed the caller
- * directly or indirectly holds a reference to the fence, otherwise the
- * fence might be freed before return, resulting in undefined behavior.
- */
-static inline signed long fence_wait(struct fence *fence, bool intr)
-{
- signed long ret;
-
- /* Since fence_wait_timeout cannot timeout with
- * MAX_SCHEDULE_TIMEOUT, only valid return values are
- * -ERESTARTSYS and MAX_SCHEDULE_TIMEOUT.
- */
- ret = fence_wait_timeout(fence, intr, MAX_SCHEDULE_TIMEOUT);
-
- return ret < 0 ? ret : 0;
-}
-
-u64 fence_context_alloc(unsigned num);
-
-#define FENCE_TRACE(f, fmt, args...) \
- do { \
- struct fence *__ff = (f); \
- if (IS_ENABLED(CONFIG_FENCE_TRACE)) \
- pr_info("f %llu#%u: " fmt, \
- __ff->context, __ff->seqno, ##args); \
- } while (0)
-
-#define FENCE_WARN(f, fmt, args...) \
- do { \
- struct fence *__ff = (f); \
- pr_warn("f %llu#%u: " fmt, __ff->context, __ff->seqno, \
- ##args); \
- } while (0)
-
-#define FENCE_ERR(f, fmt, args...) \
- do { \
- struct fence *__ff = (f); \
- pr_err("f %llu#%u: " fmt, __ff->context, __ff->seqno, \
- ##args); \
- } while (0)
-
-#endif /* __LINUX_FENCE_H */
diff --git a/include/linux/frontswap.h b/include/linux/frontswap.h
index c46d2aa16d81..1d18af034554 100644
--- a/include/linux/frontswap.h
+++ b/include/linux/frontswap.h
@@ -106,8 +106,9 @@ static inline void frontswap_invalidate_area(unsigned type)
static inline void frontswap_init(unsigned type, unsigned long *map)
{
- if (frontswap_enabled())
- __frontswap_init(type, map);
+#ifdef CONFIG_FRONTSWAP
+ __frontswap_init(type, map);
+#endif
}
#endif /* _LINUX_FRONTSWAP_H */
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 16d2b6e874d6..dc0478c07b2a 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -321,6 +321,7 @@ struct writeback_control;
#define IOCB_HIPRI (1 << 3)
#define IOCB_DSYNC (1 << 4)
#define IOCB_SYNC (1 << 5)
+#define IOCB_WRITE (1 << 6)
struct kiocb {
struct file *ki_filp;
@@ -1709,7 +1710,6 @@ struct file_operations {
int (*flush) (struct file *, fl_owner_t id);
int (*release) (struct inode *, struct file *);
int (*fsync) (struct file *, loff_t, loff_t, int datasync);
- int (*aio_fsync) (struct kiocb *, int datasync);
int (*fasync) (int, struct file *, int);
int (*lock) (struct file *, int, struct file_lock *);
ssize_t (*sendpage) (struct file *, struct page *, int, size_t, loff_t *, int);
diff --git a/include/linux/hdmi.h b/include/linux/hdmi.h
index e9744202fa29..edbb4fc674ed 100644
--- a/include/linux/hdmi.h
+++ b/include/linux/hdmi.h
@@ -78,6 +78,8 @@ enum hdmi_picture_aspect {
HDMI_PICTURE_ASPECT_NONE,
HDMI_PICTURE_ASPECT_4_3,
HDMI_PICTURE_ASPECT_16_9,
+ HDMI_PICTURE_ASPECT_64_27,
+ HDMI_PICTURE_ASPECT_256_135,
HDMI_PICTURE_ASPECT_RESERVED,
};
diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h
index 9b9f65d99873..e35e6de633b9 100644
--- a/include/linux/huge_mm.h
+++ b/include/linux/huge_mm.h
@@ -22,7 +22,7 @@ extern int mincore_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
unsigned char *vec);
extern bool move_huge_pmd(struct vm_area_struct *vma, unsigned long old_addr,
unsigned long new_addr, unsigned long old_end,
- pmd_t *old_pmd, pmd_t *new_pmd);
+ pmd_t *old_pmd, pmd_t *new_pmd, bool *need_flush);
extern int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
unsigned long addr, pgprot_t newprot,
int prot_numa);
diff --git a/include/linux/hyperv.h b/include/linux/hyperv.h
index 6824556d37ed..cd184bdca58f 100644
--- a/include/linux/hyperv.h
+++ b/include/linux/hyperv.h
@@ -1169,13 +1169,6 @@ int __must_check __vmbus_driver_register(struct hv_driver *hv_driver,
const char *mod_name);
void vmbus_driver_unregister(struct hv_driver *hv_driver);
-static inline const char *vmbus_dev_name(const struct hv_device *device_obj)
-{
- const struct kobject *kobj = &device_obj->device.kobj;
-
- return kobj->name;
-}
-
void vmbus_hvsock_device_unregister(struct vmbus_channel *channel);
int vmbus_allocate_mmio(struct resource **new, struct hv_device *device_obj,
diff --git a/include/linux/intel-iommu.h b/include/linux/intel-iommu.h
index 2d9b650047a5..d49e26c6cdc7 100644
--- a/include/linux/intel-iommu.h
+++ b/include/linux/intel-iommu.h
@@ -429,6 +429,7 @@ struct intel_iommu {
struct page_req_dsc *prq;
unsigned char prq_name[16]; /* Name for PRQ interrupt */
struct idr pasid_idr;
+ u32 pasid_max;
#endif
struct q_inval *qi; /* Queued invalidation info */
u32 *iommu_state; /* Store iommu states between suspend and resume.*/
diff --git a/include/linux/ipv6.h b/include/linux/ipv6.h
index 7e9a789be5e0..a0649973ee5b 100644
--- a/include/linux/ipv6.h
+++ b/include/linux/ipv6.h
@@ -123,12 +123,12 @@ struct inet6_skb_parm {
};
#if defined(CONFIG_NET_L3_MASTER_DEV)
-static inline bool skb_l3mdev_slave(__u16 flags)
+static inline bool ipv6_l3mdev_skb(__u16 flags)
{
return flags & IP6SKB_L3SLAVE;
}
#else
-static inline bool skb_l3mdev_slave(__u16 flags)
+static inline bool ipv6_l3mdev_skb(__u16 flags)
{
return false;
}
@@ -139,11 +139,22 @@ static inline bool skb_l3mdev_slave(__u16 flags)
static inline int inet6_iif(const struct sk_buff *skb)
{
- bool l3_slave = skb_l3mdev_slave(IP6CB(skb)->flags);
+ bool l3_slave = ipv6_l3mdev_skb(IP6CB(skb)->flags);
return l3_slave ? skb->skb_iif : IP6CB(skb)->iif;
}
+/* can not be used in TCP layer after tcp_v6_fill_cb */
+static inline bool inet6_exact_dif_match(struct net *net, struct sk_buff *skb)
+{
+#if defined(CONFIG_NET_L3_MASTER_DEV)
+ if (!net->ipv4.sysctl_tcp_l3mdev_accept &&
+ skb && ipv6_l3mdev_skb(IP6CB(skb)->flags))
+ return true;
+#endif
+ return false;
+}
+
struct tcp6_request_sock {
struct tcp_request_sock tcp6rsk_tcp;
};
diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h
index f6a164297358..c9f379689dd0 100644
--- a/include/linux/mlx4/device.h
+++ b/include/linux/mlx4/device.h
@@ -476,7 +476,6 @@ enum {
enum {
MLX4_INTERFACE_STATE_UP = 1 << 0,
MLX4_INTERFACE_STATE_DELETION = 1 << 1,
- MLX4_INTERFACE_STATE_SHUTDOWN = 1 << 2,
};
#define MSTR_SM_CHANGE_MASK (MLX4_EQ_PORT_INFO_MSTR_SM_SL_CHANGE_MASK | \
@@ -1399,7 +1398,8 @@ void mlx4_fmr_unmap(struct mlx4_dev *dev, struct mlx4_fmr *fmr,
u32 *lkey, u32 *rkey);
int mlx4_fmr_free(struct mlx4_dev *dev, struct mlx4_fmr *fmr);
int mlx4_SYNC_TPT(struct mlx4_dev *dev);
-int mlx4_test_interrupts(struct mlx4_dev *dev);
+int mlx4_test_interrupt(struct mlx4_dev *dev, int vector);
+int mlx4_test_async(struct mlx4_dev *dev);
int mlx4_query_diag_counters(struct mlx4_dev *dev, u8 op_modifier,
const u32 offset[], u32 value[],
size_t array_len, u8 port);
diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h
index 85c4786427e4..ecc451d89ccd 100644
--- a/include/linux/mlx5/driver.h
+++ b/include/linux/mlx5/driver.h
@@ -418,8 +418,12 @@ struct mlx5_core_health {
u32 prev;
int miss_counter;
bool sick;
+ /* wq spinlock to synchronize draining */
+ spinlock_t wq_lock;
struct workqueue_struct *wq;
+ unsigned long flags;
struct work_struct work;
+ struct delayed_work recover_work;
};
struct mlx5_cq_table {
@@ -626,10 +630,6 @@ struct mlx5_db {
};
enum {
- MLX5_DB_PER_PAGE = PAGE_SIZE / L1_CACHE_BYTES,
-};
-
-enum {
MLX5_COMP_EQ_SIZE = 1024,
};
@@ -638,13 +638,6 @@ enum {
MLX5_PTYS_EN = 1 << 2,
};
-struct mlx5_db_pgdir {
- struct list_head list;
- DECLARE_BITMAP(bitmap, MLX5_DB_PER_PAGE);
- __be32 *db_page;
- dma_addr_t db_dma;
-};
-
typedef void (*mlx5_cmd_cbk_t)(int status, void *context);
struct mlx5_cmd_work_ent {
@@ -789,6 +782,7 @@ void mlx5_health_cleanup(struct mlx5_core_dev *dev);
int mlx5_health_init(struct mlx5_core_dev *dev);
void mlx5_start_health_poll(struct mlx5_core_dev *dev);
void mlx5_stop_health_poll(struct mlx5_core_dev *dev);
+void mlx5_drain_health_wq(struct mlx5_core_dev *dev);
int mlx5_buf_alloc_node(struct mlx5_core_dev *dev, int size,
struct mlx5_buf *buf, int node);
int mlx5_buf_alloc(struct mlx5_core_dev *dev, int size, struct mlx5_buf *buf);
diff --git a/include/linux/mtd/nand.h b/include/linux/mtd/nand.h
index c5d3d5024fc8..d8905a229f34 100644
--- a/include/linux/mtd/nand.h
+++ b/include/linux/mtd/nand.h
@@ -1184,7 +1184,7 @@ int nand_read_oob_syndrome(struct mtd_info *mtd, struct nand_chip *chip,
int page);
/* Reset and initialize a NAND device */
-int nand_reset(struct nand_chip *chip);
+int nand_reset(struct nand_chip *chip, int chipnr);
/* Free resources held by the NAND device */
void nand_cleanup(struct nand_chip *chip);
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index 136ae6bbe81e..e16a2a980ea8 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -1619,7 +1619,7 @@ enum netdev_priv_flags {
* @dcbnl_ops: Data Center Bridging netlink ops
* @num_tc: Number of traffic classes in the net device
* @tc_to_txq: XXX: need comments on this one
- * @prio_tc_map XXX: need comments on this one
+ * @prio_tc_map: XXX: need comments on this one
*
* @fcoe_ddp_xid: Max exchange id for FCoE LRO by ddp
*
@@ -2169,7 +2169,10 @@ struct napi_gro_cb {
/* Used to determine if flush_id can be ignored */
u8 is_atomic:1;
- /* 5 bit hole */
+ /* Number of gro_receive callbacks this packet already went through */
+ u8 recursion_counter:4;
+
+ /* 1 bit hole */
/* used to support CHECKSUM_COMPLETE for tunneling protocols */
__wsum csum;
@@ -2180,6 +2183,40 @@ struct napi_gro_cb {
#define NAPI_GRO_CB(skb) ((struct napi_gro_cb *)(skb)->cb)
+#define GRO_RECURSION_LIMIT 15
+static inline int gro_recursion_inc_test(struct sk_buff *skb)
+{
+ return ++NAPI_GRO_CB(skb)->recursion_counter == GRO_RECURSION_LIMIT;
+}
+
+typedef struct sk_buff **(*gro_receive_t)(struct sk_buff **, struct sk_buff *);
+static inline struct sk_buff **call_gro_receive(gro_receive_t cb,
+ struct sk_buff **head,
+ struct sk_buff *skb)
+{
+ if (unlikely(gro_recursion_inc_test(skb))) {
+ NAPI_GRO_CB(skb)->flush |= 1;
+ return NULL;
+ }
+
+ return cb(head, skb);
+}
+
+typedef struct sk_buff **(*gro_receive_sk_t)(struct sock *, struct sk_buff **,
+ struct sk_buff *);
+static inline struct sk_buff **call_gro_receive_sk(gro_receive_sk_t cb,
+ struct sock *sk,
+ struct sk_buff **head,
+ struct sk_buff *skb)
+{
+ if (unlikely(gro_recursion_inc_test(skb))) {
+ NAPI_GRO_CB(skb)->flush |= 1;
+ return NULL;
+ }
+
+ return cb(sk, head, skb);
+}
+
struct packet_type {
__be16 type; /* This is really htons(ether_type). */
struct net_device *dev; /* NULL is wildcarded here */
@@ -3317,6 +3354,21 @@ int dev_forward_skb(struct net_device *dev, struct sk_buff *skb);
bool is_skb_forwardable(const struct net_device *dev,
const struct sk_buff *skb);
+static __always_inline int ____dev_forward_skb(struct net_device *dev,
+ struct sk_buff *skb)
+{
+ if (skb_orphan_frags(skb, GFP_ATOMIC) ||
+ unlikely(!is_skb_forwardable(dev, skb))) {
+ atomic_long_inc(&dev->rx_dropped);
+ kfree_skb(skb);
+ return NET_RX_DROP;
+ }
+
+ skb_scrub_packet(skb, true);
+ skb->priority = 0;
+ return 0;
+}
+
void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev);
extern int netdev_budget;
@@ -3877,7 +3929,7 @@ struct net_device *netdev_all_lower_get_next_rcu(struct net_device *dev,
ldev = netdev_all_lower_get_next(dev, &(iter)))
#define netdev_for_each_all_lower_dev_rcu(dev, ldev, iter) \
- for (iter = (dev)->all_adj_list.lower.next, \
+ for (iter = &(dev)->all_adj_list.lower, \
ldev = netdev_all_lower_get_next_rcu(dev, &(iter)); \
ldev; \
ldev = netdev_all_lower_get_next_rcu(dev, &(iter)))
diff --git a/include/linux/of_mdio.h b/include/linux/of_mdio.h
index 2ab233661ae5..a58cca8bcb29 100644
--- a/include/linux/of_mdio.h
+++ b/include/linux/of_mdio.h
@@ -29,6 +29,7 @@ struct phy_device *of_phy_attach(struct net_device *dev,
extern struct mii_bus *of_mdio_find_bus(struct device_node *mdio_np);
extern int of_mdio_parse_addr(struct device *dev, const struct device_node *np);
extern int of_phy_register_fixed_link(struct device_node *np);
+extern void of_phy_deregister_fixed_link(struct device_node *np);
extern bool of_phy_is_fixed_link(struct device_node *np);
#else /* CONFIG_OF */
@@ -83,6 +84,9 @@ static inline int of_phy_register_fixed_link(struct device_node *np)
{
return -ENOSYS;
}
+static inline void of_phy_deregister_fixed_link(struct device_node *np)
+{
+}
static inline bool of_phy_is_fixed_link(struct device_node *np)
{
return false;
diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
index dd15d39e1985..7dbe9148b2f8 100644
--- a/include/linux/pagemap.h
+++ b/include/linux/pagemap.h
@@ -374,16 +374,13 @@ static inline struct page *read_mapping_page(struct address_space *mapping,
}
/*
- * Get the offset in PAGE_SIZE.
- * (TODO: hugepage should have ->index in PAGE_SIZE)
+ * Get index of the page with in radix-tree
+ * (TODO: remove once hugetlb pages will have ->index in PAGE_SIZE)
*/
-static inline pgoff_t page_to_pgoff(struct page *page)
+static inline pgoff_t page_to_index(struct page *page)
{
pgoff_t pgoff;
- if (unlikely(PageHeadHuge(page)))
- return page->index << compound_order(page);
-
if (likely(!PageTransTail(page)))
return page->index;
@@ -397,6 +394,18 @@ static inline pgoff_t page_to_pgoff(struct page *page)
}
/*
+ * Get the offset in PAGE_SIZE.
+ * (TODO: hugepage should have ->index in PAGE_SIZE)
+ */
+static inline pgoff_t page_to_pgoff(struct page *page)
+{
+ if (unlikely(PageHeadHuge(page)))
+ return page->index << compound_order(page);
+
+ return page_to_index(page);
+}
+
+/*
* Return byte-offset into filesystem object for page.
*/
static inline loff_t page_offset(struct page *page)
diff --git a/include/linux/pci.h b/include/linux/pci.h
index 0e49f70dbd9b..a38772a85588 100644
--- a/include/linux/pci.h
+++ b/include/linux/pci.h
@@ -1928,6 +1928,20 @@ static inline int pci_pcie_type(const struct pci_dev *dev)
return (pcie_caps_reg(dev) & PCI_EXP_FLAGS_TYPE) >> 4;
}
+static inline struct pci_dev *pcie_find_root_port(struct pci_dev *dev)
+{
+ while (1) {
+ if (!pci_is_pcie(dev))
+ break;
+ if (pci_pcie_type(dev) == PCI_EXP_TYPE_ROOT_PORT)
+ return dev;
+ if (!dev->bus->self)
+ break;
+ dev = dev->bus->self;
+ }
+ return NULL;
+}
+
void pci_request_acs(void);
bool pci_acs_enabled(struct pci_dev *pdev, u16 acs_flags);
bool pci_acs_path_enabled(struct pci_dev *start,
diff --git a/include/linux/phy/phy.h b/include/linux/phy/phy.h
index ee1bed7dbfc6..78bb0d7f6b11 100644
--- a/include/linux/phy/phy.h
+++ b/include/linux/phy/phy.h
@@ -253,6 +253,13 @@ static inline int phy_set_mode(struct phy *phy, enum phy_mode mode)
return -ENOSYS;
}
+static inline int phy_reset(struct phy *phy)
+{
+ if (!phy)
+ return 0;
+ return -ENOSYS;
+}
+
static inline int phy_get_bus_width(struct phy *phy)
{
return -ENOSYS;
diff --git a/include/linux/qed/qed_if.h b/include/linux/qed/qed_if.h
index f9ae903bbb84..8978a60371f4 100644
--- a/include/linux/qed/qed_if.h
+++ b/include/linux/qed/qed_if.h
@@ -146,6 +146,7 @@ enum qed_led_mode {
#define DIRECT_REG_RD(reg_addr) readl((void __iomem *)(reg_addr))
#define QED_COALESCE_MAX 0xFF
+#define QED_DEFAULT_RX_USECS 12
/* forward */
struct qed_dev;
diff --git a/include/linux/qed/qede_roce.h b/include/linux/qed/qede_roce.h
index 99fbe6d55acb..f48d64b0e2fb 100644
--- a/include/linux/qed/qede_roce.h
+++ b/include/linux/qed/qede_roce.h
@@ -68,7 +68,7 @@ void qede_roce_unregister_driver(struct qedr_driver *drv);
bool qede_roce_supported(struct qede_dev *dev);
-#if IS_ENABLED(CONFIG_INFINIBAND_QEDR)
+#if IS_ENABLED(CONFIG_QED_RDMA)
int qede_roce_dev_add(struct qede_dev *dev);
void qede_roce_dev_event_open(struct qede_dev *dev);
void qede_roce_dev_event_close(struct qede_dev *dev);
diff --git a/include/linux/regmap.h b/include/linux/regmap.h
index 9adc7b21903d..f6673132431d 100644
--- a/include/linux/regmap.h
+++ b/include/linux/regmap.h
@@ -15,6 +15,7 @@
#include <linux/list.h>
#include <linux/rbtree.h>
+#include <linux/delay.h>
#include <linux/err.h>
#include <linux/bug.h>
#include <linux/lockdep.h>
@@ -116,22 +117,22 @@ struct reg_sequence {
#define regmap_read_poll_timeout(map, addr, val, cond, sleep_us, timeout_us) \
({ \
ktime_t timeout = ktime_add_us(ktime_get(), timeout_us); \
- int ret; \
+ int pollret; \
might_sleep_if(sleep_us); \
for (;;) { \
- ret = regmap_read((map), (addr), &(val)); \
- if (ret) \
+ pollret = regmap_read((map), (addr), &(val)); \
+ if (pollret) \
break; \
if (cond) \
break; \
if (timeout_us && ktime_compare(ktime_get(), timeout) > 0) { \
- ret = regmap_read((map), (addr), &(val)); \
+ pollret = regmap_read((map), (addr), &(val)); \
break; \
} \
if (sleep_us) \
usleep_range((sleep_us >> 2) + 1, sleep_us); \
} \
- ret ?: ((cond) ? 0 : -ETIMEDOUT); \
+ pollret ?: ((cond) ? 0 : -ETIMEDOUT); \
})
#ifdef CONFIG_REGMAP
diff --git a/include/linux/reservation.h b/include/linux/reservation.h
index b0f305e77b7f..d9706a6f5ae2 100644
--- a/include/linux/reservation.h
+++ b/include/linux/reservation.h
@@ -40,7 +40,7 @@
#define _LINUX_RESERVATION_H
#include <linux/ww_mutex.h>
-#include <linux/fence.h>
+#include <linux/dma-fence.h>
#include <linux/slab.h>
#include <linux/seqlock.h>
#include <linux/rcupdate.h>
@@ -59,7 +59,7 @@ extern const char reservation_seqcount_string[];
struct reservation_object_list {
struct rcu_head rcu;
u32 shared_count, shared_max;
- struct fence __rcu *shared[];
+ struct dma_fence __rcu *shared[];
};
/**
@@ -74,7 +74,7 @@ struct reservation_object {
struct ww_mutex lock;
seqcount_t seq;
- struct fence __rcu *fence_excl;
+ struct dma_fence __rcu *fence_excl;
struct reservation_object_list __rcu *fence;
struct reservation_object_list *staged;
};
@@ -107,7 +107,7 @@ reservation_object_fini(struct reservation_object *obj)
{
int i;
struct reservation_object_list *fobj;
- struct fence *excl;
+ struct dma_fence *excl;
/*
* This object should be dead and all references must have
@@ -115,12 +115,12 @@ reservation_object_fini(struct reservation_object *obj)
*/
excl = rcu_dereference_protected(obj->fence_excl, 1);
if (excl)
- fence_put(excl);
+ dma_fence_put(excl);
fobj = rcu_dereference_protected(obj->fence, 1);
if (fobj) {
for (i = 0; i < fobj->shared_count; ++i)
- fence_put(rcu_dereference_protected(fobj->shared[i], 1));
+ dma_fence_put(rcu_dereference_protected(fobj->shared[i], 1));
kfree(fobj);
}
@@ -155,7 +155,7 @@ reservation_object_get_list(struct reservation_object *obj)
* RETURNS
* The exclusive fence or NULL
*/
-static inline struct fence *
+static inline struct dma_fence *
reservation_object_get_excl(struct reservation_object *obj)
{
return rcu_dereference_protected(obj->fence_excl,
@@ -173,35 +173,32 @@ reservation_object_get_excl(struct reservation_object *obj)
* RETURNS
* The exclusive fence or NULL if none
*/
-static inline struct fence *
+static inline struct dma_fence *
reservation_object_get_excl_rcu(struct reservation_object *obj)
{
- struct fence *fence;
- unsigned seq;
-retry:
- seq = read_seqcount_begin(&obj->seq);
+ struct dma_fence *fence;
+
+ if (!rcu_access_pointer(obj->fence_excl))
+ return NULL;
+
rcu_read_lock();
- fence = rcu_dereference(obj->fence_excl);
- if (read_seqcount_retry(&obj->seq, seq)) {
- rcu_read_unlock();
- goto retry;
- }
- fence = fence_get(fence);
+ fence = dma_fence_get_rcu_safe(&obj->fence_excl);
rcu_read_unlock();
+
return fence;
}
int reservation_object_reserve_shared(struct reservation_object *obj);
void reservation_object_add_shared_fence(struct reservation_object *obj,
- struct fence *fence);
+ struct dma_fence *fence);
void reservation_object_add_excl_fence(struct reservation_object *obj,
- struct fence *fence);
+ struct dma_fence *fence);
int reservation_object_get_fences_rcu(struct reservation_object *obj,
- struct fence **pfence_excl,
+ struct dma_fence **pfence_excl,
unsigned *pshared_count,
- struct fence ***pshared);
+ struct dma_fence ***pshared);
long reservation_object_wait_timeout_rcu(struct reservation_object *obj,
bool wait_all, bool intr,
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 348f51b0ec92..e9c009dc3a4a 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -2567,6 +2567,7 @@ extern void sched_autogroup_create_attach(struct task_struct *p);
extern void sched_autogroup_detach(struct task_struct *p);
extern void sched_autogroup_fork(struct signal_struct *sig);
extern void sched_autogroup_exit(struct signal_struct *sig);
+extern void sched_autogroup_exit_task(struct task_struct *p);
#ifdef CONFIG_PROC_FS
extern void proc_sched_autogroup_show_task(struct task_struct *p, struct seq_file *m);
extern int proc_sched_autogroup_set_nice(struct task_struct *p, int nice);
@@ -2576,6 +2577,7 @@ static inline void sched_autogroup_create_attach(struct task_struct *p) { }
static inline void sched_autogroup_detach(struct task_struct *p) { }
static inline void sched_autogroup_fork(struct signal_struct *sig) { }
static inline void sched_autogroup_exit(struct signal_struct *sig) { }
+static inline void sched_autogroup_exit_task(struct task_struct *p) { }
#endif
extern int yield_to(struct task_struct *p, bool preempt);
diff --git a/include/linux/seqno-fence.h b/include/linux/seqno-fence.h
index a1ba6a5ccdd6..c58c535d12a8 100644
--- a/include/linux/seqno-fence.h
+++ b/include/linux/seqno-fence.h
@@ -20,7 +20,7 @@
#ifndef __LINUX_SEQNO_FENCE_H
#define __LINUX_SEQNO_FENCE_H
-#include <linux/fence.h>
+#include <linux/dma-fence.h>
#include <linux/dma-buf.h>
enum seqno_fence_condition {
@@ -29,15 +29,15 @@ enum seqno_fence_condition {
};
struct seqno_fence {
- struct fence base;
+ struct dma_fence base;
- const struct fence_ops *ops;
+ const struct dma_fence_ops *ops;
struct dma_buf *sync_buf;
uint32_t seqno_ofs;
enum seqno_fence_condition condition;
};
-extern const struct fence_ops seqno_fence_ops;
+extern const struct dma_fence_ops seqno_fence_ops;
/**
* to_seqno_fence - cast a fence to a seqno_fence
@@ -47,7 +47,7 @@ extern const struct fence_ops seqno_fence_ops;
* or the seqno_fence otherwise.
*/
static inline struct seqno_fence *
-to_seqno_fence(struct fence *fence)
+to_seqno_fence(struct dma_fence *fence)
{
if (fence->ops != &seqno_fence_ops)
return NULL;
@@ -83,9 +83,9 @@ to_seqno_fence(struct fence *fence)
* dma-buf for sync_buf, since mapping or unmapping the sync_buf to the
* device's vm can be expensive.
*
- * It is recommended for creators of seqno_fence to call fence_signal
+ * It is recommended for creators of seqno_fence to call dma_fence_signal()
* before destruction. This will prevent possible issues from wraparound at
- * time of issue vs time of check, since users can check fence_is_signaled
+ * time of issue vs time of check, since users can check dma_fence_is_signaled()
* before submitting instructions for the hardware to wait on the fence.
* However, when ops.enable_signaling is not called, it doesn't have to be
* done as soon as possible, just before there's any real danger of seqno
@@ -96,18 +96,18 @@ seqno_fence_init(struct seqno_fence *fence, spinlock_t *lock,
struct dma_buf *sync_buf, uint32_t context,
uint32_t seqno_ofs, uint32_t seqno,
enum seqno_fence_condition cond,
- const struct fence_ops *ops)
+ const struct dma_fence_ops *ops)
{
BUG_ON(!fence || !sync_buf || !ops);
BUG_ON(!ops->wait || !ops->enable_signaling ||
!ops->get_driver_name || !ops->get_timeline_name);
/*
- * ops is used in fence_init for get_driver_name, so needs to be
+ * ops is used in dma_fence_init for get_driver_name, so needs to be
* initialized first
*/
fence->ops = ops;
- fence_init(&fence->base, &seqno_fence_ops, lock, context, seqno);
+ dma_fence_init(&fence->base, &seqno_fence_ops, lock, context, seqno);
get_dma_buf(sync_buf);
fence->sync_buf = sync_buf;
fence->seqno_ofs = seqno_ofs;
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index 601258f6e621..32810f279f8e 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -936,6 +936,7 @@ struct sk_buff_fclones {
/**
* skb_fclone_busy - check if fclone is busy
+ * @sk: socket
* @skb: buffer
*
* Returns true if skb is a fast clone, and its clone is not freed.
diff --git a/include/linux/sunrpc/svc_xprt.h b/include/linux/sunrpc/svc_xprt.h
index ab02a457da1f..e5d193440374 100644
--- a/include/linux/sunrpc/svc_xprt.h
+++ b/include/linux/sunrpc/svc_xprt.h
@@ -25,6 +25,7 @@ struct svc_xprt_ops {
void (*xpo_detach)(struct svc_xprt *);
void (*xpo_free)(struct svc_xprt *);
int (*xpo_secure_port)(struct svc_rqst *);
+ void (*xpo_kill_temp_xprt)(struct svc_xprt *);
};
struct svc_xprt_class {
diff --git a/include/linux/sync_file.h b/include/linux/sync_file.h
index aa17ccfc2f57..3e3ab84fc4cd 100644
--- a/include/linux/sync_file.h
+++ b/include/linux/sync_file.h
@@ -18,8 +18,8 @@
#include <linux/ktime.h>
#include <linux/list.h>
#include <linux/spinlock.h>
-#include <linux/fence.h>
-#include <linux/fence-array.h>
+#include <linux/dma-fence.h>
+#include <linux/dma-fence-array.h>
/**
* struct sync_file - sync file to export to the userspace
@@ -41,13 +41,13 @@ struct sync_file {
wait_queue_head_t wq;
- struct fence *fence;
- struct fence_cb cb;
+ struct dma_fence *fence;
+ struct dma_fence_cb cb;
};
-#define POLL_ENABLED FENCE_FLAG_USER_BITS
+#define POLL_ENABLED DMA_FENCE_FLAG_USER_BITS
-struct sync_file *sync_file_create(struct fence *fence);
-struct fence *sync_file_get_fence(int fd);
+struct sync_file *sync_file_create(struct dma_fence *fence);
+struct dma_fence *sync_file_get_fence(int fd);
#endif /* _LINUX_SYNC_H */
diff --git a/include/net/addrconf.h b/include/net/addrconf.h
index f2d072787947..8f998afc1384 100644
--- a/include/net/addrconf.h
+++ b/include/net/addrconf.h
@@ -174,6 +174,7 @@ int ipv6_sock_mc_join(struct sock *sk, int ifindex,
const struct in6_addr *addr);
int ipv6_sock_mc_drop(struct sock *sk, int ifindex,
const struct in6_addr *addr);
+void __ipv6_sock_mc_close(struct sock *sk);
void ipv6_sock_mc_close(struct sock *sk);
bool inet6_mc_check(struct sock *sk, const struct in6_addr *mc_addr,
const struct in6_addr *src_addr);
diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h
index f00bf667ec33..554671c81f4a 100644
--- a/include/net/bluetooth/hci_core.h
+++ b/include/net/bluetooth/hci_core.h
@@ -1018,7 +1018,7 @@ static inline void hci_set_drvdata(struct hci_dev *hdev, void *data)
}
struct hci_dev *hci_dev_get(int index);
-struct hci_dev *hci_get_route(bdaddr_t *dst, bdaddr_t *src);
+struct hci_dev *hci_get_route(bdaddr_t *dst, bdaddr_t *src, u8 src_type);
struct hci_dev *hci_alloc_dev(void);
void hci_free_dev(struct hci_dev *hdev);
diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h
index bd19faad0d96..14b51d739c3b 100644
--- a/include/net/cfg80211.h
+++ b/include/net/cfg80211.h
@@ -4047,14 +4047,29 @@ unsigned int ieee80211_get_mesh_hdrlen(struct ieee80211s_hdr *meshhdr);
*/
/**
+ * ieee80211_data_to_8023_exthdr - convert an 802.11 data frame to 802.3
+ * @skb: the 802.11 data frame
+ * @ehdr: pointer to a &struct ethhdr that will get the header, instead
+ * of it being pushed into the SKB
+ * @addr: the device MAC address
+ * @iftype: the virtual interface type
+ * Return: 0 on success. Non-zero on error.
+ */
+int ieee80211_data_to_8023_exthdr(struct sk_buff *skb, struct ethhdr *ehdr,
+ const u8 *addr, enum nl80211_iftype iftype);
+
+/**
* ieee80211_data_to_8023 - convert an 802.11 data frame to 802.3
* @skb: the 802.11 data frame
* @addr: the device MAC address
* @iftype: the virtual interface type
* Return: 0 on success. Non-zero on error.
*/
-int ieee80211_data_to_8023(struct sk_buff *skb, const u8 *addr,
- enum nl80211_iftype iftype);
+static inline int ieee80211_data_to_8023(struct sk_buff *skb, const u8 *addr,
+ enum nl80211_iftype iftype)
+{
+ return ieee80211_data_to_8023_exthdr(skb, NULL, addr, iftype);
+}
/**
* ieee80211_data_from_8023 - convert an 802.3 frame to 802.11
@@ -4072,22 +4087,23 @@ int ieee80211_data_from_8023(struct sk_buff *skb, const u8 *addr,
/**
* ieee80211_amsdu_to_8023s - decode an IEEE 802.11n A-MSDU frame
*
- * Decode an IEEE 802.11n A-MSDU frame and convert it to a list of
- * 802.3 frames. The @list will be empty if the decode fails. The
- * @skb is consumed after the function returns.
+ * Decode an IEEE 802.11 A-MSDU and convert it to a list of 802.3 frames.
+ * The @list will be empty if the decode fails. The @skb must be fully
+ * header-less before being passed in here; it is freed in this function.
*
- * @skb: The input IEEE 802.11n A-MSDU frame.
+ * @skb: The input A-MSDU frame without any headers.
* @list: The output list of 802.3 frames. It must be allocated and
* initialized by by the caller.
* @addr: The device MAC address.
* @iftype: The device interface type.
* @extra_headroom: The hardware extra headroom for SKBs in the @list.
- * @has_80211_header: Set it true if SKB is with IEEE 802.11 header.
+ * @check_da: DA to check in the inner ethernet header, or NULL
+ * @check_sa: SA to check in the inner ethernet header, or NULL
*/
void ieee80211_amsdu_to_8023s(struct sk_buff *skb, struct sk_buff_head *list,
const u8 *addr, enum nl80211_iftype iftype,
const unsigned int extra_headroom,
- bool has_80211_header);
+ const u8 *check_da, const u8 *check_sa);
/**
* cfg80211_classify8021d - determine the 802.1p/1d tag for a data frame
diff --git a/include/net/gro_cells.h b/include/net/gro_cells.h
index d15214d673b2..2a1abbf8da74 100644
--- a/include/net/gro_cells.h
+++ b/include/net/gro_cells.h
@@ -68,6 +68,9 @@ static inline int gro_cells_init(struct gro_cells *gcells, struct net_device *de
struct gro_cell *cell = per_cpu_ptr(gcells->cells, i);
__skb_queue_head_init(&cell->napi_skbs);
+
+ set_bit(NAPI_STATE_NO_BUSY_POLL, &cell->napi.state);
+
netif_napi_add(dev, &cell->napi, gro_cell_poll, 64);
napi_enable(&cell->napi);
}
diff --git a/include/net/if_inet6.h b/include/net/if_inet6.h
index 515352c6280a..b0576cb2ab25 100644
--- a/include/net/if_inet6.h
+++ b/include/net/if_inet6.h
@@ -190,8 +190,8 @@ struct inet6_dev {
__u32 if_flags;
int dead;
+ u32 desync_factor;
u8 rndid[8];
- struct timer_list regen_timer;
struct list_head tempaddr_list;
struct in6_addr token;
diff --git a/include/net/ip.h b/include/net/ip.h
index bc43c0fcae12..d3a107850a41 100644
--- a/include/net/ip.h
+++ b/include/net/ip.h
@@ -38,7 +38,7 @@ struct sock;
struct inet_skb_parm {
int iif;
struct ip_options opt; /* Compiled IP options */
- unsigned char flags;
+ u16 flags;
#define IPSKB_FORWARDED BIT(0)
#define IPSKB_XFRM_TUNNEL_SIZE BIT(1)
@@ -47,11 +47,16 @@ struct inet_skb_parm {
#define IPSKB_REROUTED BIT(4)
#define IPSKB_DOREDIRECT BIT(5)
#define IPSKB_FRAG_PMTU BIT(6)
-#define IPSKB_FRAG_SEGS BIT(7)
+#define IPSKB_L3SLAVE BIT(7)
u16 frag_max_size;
};
+static inline bool ipv4_l3mdev_skb(u16 flags)
+{
+ return !!(flags & IPSKB_L3SLAVE);
+}
+
static inline unsigned int ip_hdrlen(const struct sk_buff *skb)
{
return ip_hdr(skb)->ihl * 4;
@@ -572,7 +577,7 @@ int ip_options_rcv_srr(struct sk_buff *skb);
*/
void ipv4_pktinfo_prepare(const struct sock *sk, struct sk_buff *skb);
-void ip_cmsg_recv_offset(struct msghdr *msg, struct sk_buff *skb, int offset);
+void ip_cmsg_recv_offset(struct msghdr *msg, struct sk_buff *skb, int tlen, int offset);
int ip_cmsg_send(struct sock *sk, struct msghdr *msg,
struct ipcm_cookie *ipc, bool allow_ipv6);
int ip_setsockopt(struct sock *sk, int level, int optname, char __user *optval,
@@ -594,7 +599,7 @@ void ip_local_error(struct sock *sk, int err, __be32 daddr, __be16 dport,
static inline void ip_cmsg_recv(struct msghdr *msg, struct sk_buff *skb)
{
- ip_cmsg_recv_offset(msg, skb, 0);
+ ip_cmsg_recv_offset(msg, skb, 0, 0);
}
bool icmp_global_allow(void);
diff --git a/include/net/ip6_fib.h b/include/net/ip6_fib.h
index fb961a576abe..a74e2aa40ef4 100644
--- a/include/net/ip6_fib.h
+++ b/include/net/ip6_fib.h
@@ -230,6 +230,8 @@ struct fib6_table {
rwlock_t tb6_lock;
struct fib6_node tb6_root;
struct inet_peer_base tb6_peers;
+ unsigned int flags;
+#define RT6_TABLE_HAS_DFLT_ROUTER BIT(0)
};
#define RT6_TABLE_UNSPEC RT_TABLE_UNSPEC
diff --git a/include/net/ip6_route.h b/include/net/ip6_route.h
index e0cd318d5103..f83e78d071a3 100644
--- a/include/net/ip6_route.h
+++ b/include/net/ip6_route.h
@@ -32,6 +32,7 @@ struct route_info {
#define RT6_LOOKUP_F_SRCPREF_TMP 0x00000008
#define RT6_LOOKUP_F_SRCPREF_PUBLIC 0x00000010
#define RT6_LOOKUP_F_SRCPREF_COA 0x00000020
+#define RT6_LOOKUP_F_IGNORE_LINKSTATE 0x00000040
/* We do not (yet ?) support IPv6 jumbograms (RFC 2675)
* Unlike IPv4, hdr->seg_len doesn't include the IPv6 header
diff --git a/include/net/ip6_tunnel.h b/include/net/ip6_tunnel.h
index 20ed9699fcd4..1b1cf33cbfb0 100644
--- a/include/net/ip6_tunnel.h
+++ b/include/net/ip6_tunnel.h
@@ -146,6 +146,7 @@ static inline void ip6tunnel_xmit(struct sock *sk, struct sk_buff *skb,
{
int pkt_len, err;
+ memset(skb->cb, 0, sizeof(struct inet6_skb_parm));
pkt_len = skb->len - skb_inner_network_offset(skb);
err = ip6_local_out(dev_net(skb_dst(skb)->dev), sk, skb);
if (unlikely(net_xmit_eval(err)))
diff --git a/include/net/ip_fib.h b/include/net/ip_fib.h
index b9314b48e39f..f390c3bb05c5 100644
--- a/include/net/ip_fib.h
+++ b/include/net/ip_fib.h
@@ -243,6 +243,7 @@ int fib_table_dump(struct fib_table *table, struct sk_buff *skb,
struct netlink_callback *cb);
int fib_table_flush(struct net *net, struct fib_table *table);
struct fib_table *fib_trie_unmerge(struct fib_table *main_tb);
+void fib_table_flush_external(struct fib_table *table);
void fib_free_table(struct fib_table *tb);
#ifndef CONFIG_IP_MULTIPLE_TABLES
diff --git a/include/net/ipv6.h b/include/net/ipv6.h
index 8fed1cd78658..f11ca837361b 100644
--- a/include/net/ipv6.h
+++ b/include/net/ipv6.h
@@ -970,6 +970,8 @@ int compat_ipv6_setsockopt(struct sock *sk, int level, int optname,
int compat_ipv6_getsockopt(struct sock *sk, int level, int optname,
char __user *optval, int __user *optlen);
+int __ip6_datagram_connect(struct sock *sk, struct sockaddr *addr,
+ int addr_len);
int ip6_datagram_connect(struct sock *sk, struct sockaddr *addr, int addr_len);
int ip6_datagram_connect_v6_only(struct sock *sk, struct sockaddr *addr,
int addr_len);
diff --git a/include/net/mac80211.h b/include/net/mac80211.h
index a810dfcb83c2..e2dba93e374f 100644
--- a/include/net/mac80211.h
+++ b/include/net/mac80211.h
@@ -811,14 +811,18 @@ enum mac80211_rate_control_flags {
* in the control information, and it will be filled by the rate
* control algorithm according to what should be sent. For example,
* if this array contains, in the format { <idx>, <count> } the
- * information
+ * information::
+ *
* { 3, 2 }, { 2, 2 }, { 1, 4 }, { -1, 0 }, { -1, 0 }
+ *
* then this means that the frame should be transmitted
* up to twice at rate 3, up to twice at rate 2, and up to four
* times at rate 1 if it doesn't get acknowledged. Say it gets
* acknowledged by the peer after the fifth attempt, the status
- * information should then contain
+ * information should then contain::
+ *
* { 3, 2 }, { 2, 2 }, { 1, 1 }, { -1, 0 } ...
+ *
* since it was transmitted twice at rate 3, twice at rate 2
* and once at rate 1 after which we received an acknowledgement.
*/
@@ -1168,8 +1172,8 @@ enum mac80211_rx_vht_flags {
* @rate_idx: index of data rate into band's supported rates or MCS index if
* HT or VHT is used (%RX_FLAG_HT/%RX_FLAG_VHT)
* @vht_nss: number of streams (VHT only)
- * @flag: %RX_FLAG_*
- * @vht_flag: %RX_VHT_FLAG_*
+ * @flag: %RX_FLAG_\*
+ * @vht_flag: %RX_VHT_FLAG_\*
* @rx_flags: internal RX flags for mac80211
* @ampdu_reference: A-MPDU reference number, must be a different value for
* each A-MPDU but the same for each subframe within one A-MPDU
@@ -1432,7 +1436,7 @@ enum ieee80211_vif_flags {
* @probe_req_reg: probe requests should be reported to mac80211 for this
* interface.
* @drv_priv: data area for driver use, will always be aligned to
- * sizeof(void *).
+ * sizeof(void \*).
* @txq: the multicast data TX queue (if driver uses the TXQ abstraction)
*/
struct ieee80211_vif {
@@ -1743,7 +1747,7 @@ struct ieee80211_sta_rates {
* @wme: indicates whether the STA supports QoS/WME (if local devices does,
* otherwise always false)
* @drv_priv: data area for driver use, will always be aligned to
- * sizeof(void *), size is determined in hw information.
+ * sizeof(void \*), size is determined in hw information.
* @uapsd_queues: bitmap of queues configured for uapsd. Only valid
* if wme is supported.
* @max_sp: max Service Period. Only valid if wme is supported.
@@ -2146,12 +2150,12 @@ enum ieee80211_hw_flags {
*
* @radiotap_mcs_details: lists which MCS information can the HW
* reports, by default it is set to _MCS, _GI and _BW but doesn't
- * include _FMT. Use %IEEE80211_RADIOTAP_MCS_HAVE_* values, only
+ * include _FMT. Use %IEEE80211_RADIOTAP_MCS_HAVE_\* values, only
* adding _BW is supported today.
*
* @radiotap_vht_details: lists which VHT MCS information the HW reports,
* the default is _GI | _BANDWIDTH.
- * Use the %IEEE80211_RADIOTAP_VHT_KNOWN_* values.
+ * Use the %IEEE80211_RADIOTAP_VHT_KNOWN_\* values.
*
* @radiotap_timestamp: Information for the radiotap timestamp field; if the
* 'units_pos' member is set to a non-negative value it must be set to
@@ -2486,6 +2490,7 @@ void ieee80211_free_txskb(struct ieee80211_hw *hw, struct sk_buff *skb);
* in the software stack cares about, we will, in the future, have mac80211
* tell the driver which information elements are interesting in the sense
* that we want to see changes in them. This will include
+ *
* - a list of information element IDs
* - a list of OUIs for the vendor information element
*
diff --git a/include/net/net_namespace.h b/include/net/net_namespace.h
index fc4f757107df..0940598c002f 100644
--- a/include/net/net_namespace.h
+++ b/include/net/net_namespace.h
@@ -170,7 +170,7 @@ static inline struct net *copy_net_ns(unsigned long flags,
extern struct list_head net_namespace_list;
struct net *get_net_ns_by_pid(pid_t pid);
-struct net *get_net_ns_by_fd(int pid);
+struct net *get_net_ns_by_fd(int fd);
#ifdef CONFIG_SYSCTL
void ipx_register_sysctl(void);
diff --git a/include/net/netfilter/nf_conntrack.h b/include/net/netfilter/nf_conntrack.h
index 50418052a520..d9d52c020a70 100644
--- a/include/net/netfilter/nf_conntrack.h
+++ b/include/net/netfilter/nf_conntrack.h
@@ -100,6 +100,9 @@ struct nf_conn {
possible_net_t ct_net;
+#if IS_ENABLED(CONFIG_NF_NAT)
+ struct rhlist_head nat_bysource;
+#endif
/* all members below initialized via memset */
u8 __nfct_init_offset[0];
@@ -117,9 +120,6 @@ struct nf_conn {
/* Extensions */
struct nf_ct_ext *ext;
-#if IS_ENABLED(CONFIG_NF_NAT)
- struct rhash_head nat_bysource;
-#endif
/* Storage reserved for other modules, must be the last member */
union nf_conntrack_proto proto;
};
diff --git a/include/net/netfilter/nf_conntrack_labels.h b/include/net/netfilter/nf_conntrack_labels.h
index 498814626e28..1723a67c0b0a 100644
--- a/include/net/netfilter/nf_conntrack_labels.h
+++ b/include/net/netfilter/nf_conntrack_labels.h
@@ -30,8 +30,7 @@ static inline struct nf_conn_labels *nf_ct_labels_ext_add(struct nf_conn *ct)
if (net->ct.labels_used == 0)
return NULL;
- return nf_ct_ext_add_length(ct, NF_CT_EXT_LABELS,
- sizeof(struct nf_conn_labels), GFP_ATOMIC);
+ return nf_ct_ext_add(ct, NF_CT_EXT_LABELS, GFP_ATOMIC);
#else
return NULL;
#endif
diff --git a/include/net/netfilter/nf_tables.h b/include/net/netfilter/nf_tables.h
index 5031e072567b..b02af0bf5777 100644
--- a/include/net/netfilter/nf_tables.h
+++ b/include/net/netfilter/nf_tables.h
@@ -145,7 +145,7 @@ static inline enum nft_registers nft_type_to_reg(enum nft_data_types type)
return type == NFT_DATA_VERDICT ? NFT_REG_VERDICT : NFT_REG_1 * NFT_REG_SIZE / NFT_REG32_SIZE;
}
-unsigned int nft_parse_u32_check(const struct nlattr *attr, int max, u32 *dest);
+int nft_parse_u32_check(const struct nlattr *attr, int max, u32 *dest);
unsigned int nft_parse_register(const struct nlattr *attr);
int nft_dump_register(struct sk_buff *skb, unsigned int attr, unsigned int reg);
@@ -313,7 +313,7 @@ void nft_unregister_set(struct nft_set_ops *ops);
* @size: maximum set size
* @nelems: number of elements
* @ndeact: number of deactivated elements queued for removal
- * @timeout: default timeout value in msecs
+ * @timeout: default timeout value in jiffies
* @gc_int: garbage collection interval in msecs
* @policy: set parameterization (see enum nft_set_policies)
* @udlen: user data length
@@ -542,7 +542,8 @@ void *nft_set_elem_init(const struct nft_set *set,
const struct nft_set_ext_tmpl *tmpl,
const u32 *key, const u32 *data,
u64 timeout, gfp_t gfp);
-void nft_set_elem_destroy(const struct nft_set *set, void *elem);
+void nft_set_elem_destroy(const struct nft_set *set, void *elem,
+ bool destroy_expr);
/**
* struct nft_set_gc_batch_head - nf_tables set garbage collection batch
@@ -693,7 +694,6 @@ static inline int nft_expr_clone(struct nft_expr *dst, struct nft_expr *src)
{
int err;
- __module_get(src->ops->type->owner);
if (src->ops->clone) {
dst->ops = src->ops;
err = src->ops->clone(dst, src);
@@ -702,6 +702,8 @@ static inline int nft_expr_clone(struct nft_expr *dst, struct nft_expr *src)
} else {
memcpy(dst, src, src->ops->size);
}
+
+ __module_get(src->ops->type->owner);
return 0;
}
diff --git a/include/net/sctp/sctp.h b/include/net/sctp/sctp.h
index 87a7f42e7639..31acc3f4f132 100644
--- a/include/net/sctp/sctp.h
+++ b/include/net/sctp/sctp.h
@@ -152,7 +152,7 @@ void sctp_unhash_endpoint(struct sctp_endpoint *);
struct sock *sctp_err_lookup(struct net *net, int family, struct sk_buff *,
struct sctphdr *, struct sctp_association **,
struct sctp_transport **);
-void sctp_err_finish(struct sock *, struct sctp_association *);
+void sctp_err_finish(struct sock *, struct sctp_transport *);
void sctp_icmp_frag_needed(struct sock *, struct sctp_association *,
struct sctp_transport *t, __u32 pmtu);
void sctp_icmp_redirect(struct sock *, struct sctp_transport *,
diff --git a/include/net/sock.h b/include/net/sock.h
index ebf75db08e06..92b269709b9a 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -252,6 +252,7 @@ struct sock_common {
* @sk_pacing_rate: Pacing rate (if supported by transport/packet scheduler)
* @sk_max_pacing_rate: Maximum pacing rate (%SO_MAX_PACING_RATE)
* @sk_sndbuf: size of send buffer in bytes
+ * @sk_padding: unused element for alignment
* @sk_no_check_tx: %SO_NO_CHECK setting, set checksum in TX packets
* @sk_no_check_rx: allow zero checksum in RX packets
* @sk_route_caps: route capabilities (e.g. %NETIF_F_TSO)
@@ -302,7 +303,8 @@ struct sock_common {
* @sk_backlog_rcv: callback to process the backlog
* @sk_destruct: called at sock freeing time, i.e. when all refcnt == 0
* @sk_reuseport_cb: reuseport group container
- */
+ * @sk_rcu: used during RCU grace period
+ */
struct sock {
/*
* Now struct inet_timewait_sock also uses sock_common, so please just
@@ -1594,11 +1596,11 @@ static inline void sock_put(struct sock *sk)
void sock_gen_put(struct sock *sk);
int __sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested,
- unsigned int trim_cap);
+ unsigned int trim_cap, bool refcounted);
static inline int sk_receive_skb(struct sock *sk, struct sk_buff *skb,
const int nested)
{
- return __sk_receive_skb(sk, skb, nested, 1);
+ return __sk_receive_skb(sk, skb, nested, 1, true);
}
static inline void sk_tx_queue_set(struct sock *sk, int tx_queue)
diff --git a/include/net/tcp.h b/include/net/tcp.h
index f83b7f220a65..123979fe12bf 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -794,12 +794,23 @@ struct tcp_skb_cb {
*/
static inline int tcp_v6_iif(const struct sk_buff *skb)
{
- bool l3_slave = skb_l3mdev_slave(TCP_SKB_CB(skb)->header.h6.flags);
+ bool l3_slave = ipv6_l3mdev_skb(TCP_SKB_CB(skb)->header.h6.flags);
return l3_slave ? skb->skb_iif : TCP_SKB_CB(skb)->header.h6.iif;
}
#endif
+/* TCP_SKB_CB reference means this can not be used from early demux */
+static inline bool inet_exact_dif_match(struct net *net, struct sk_buff *skb)
+{
+#if IS_ENABLED(CONFIG_NET_L3_MASTER_DEV)
+ if (!net->ipv4.sysctl_tcp_l3mdev_accept &&
+ skb && ipv4_l3mdev_skb(TCP_SKB_CB(skb)->header.h4.flags))
+ return true;
+#endif
+ return false;
+}
+
/* Due to TSO, an SKB can be composed of multiple actual
* packets. To keep these tracked properly, we use this.
*/
@@ -1209,6 +1220,7 @@ static inline void tcp_prequeue_init(struct tcp_sock *tp)
bool tcp_prequeue(struct sock *sk, struct sk_buff *skb);
bool tcp_add_backlog(struct sock *sk, struct sk_buff *skb);
+int tcp_filter(struct sock *sk, struct sk_buff *skb);
#undef STATE_TRACE
diff --git a/include/net/udp.h b/include/net/udp.h
index ea53a87d880f..4948790d393d 100644
--- a/include/net/udp.h
+++ b/include/net/udp.h
@@ -258,6 +258,7 @@ void udp_flush_pending_frames(struct sock *sk);
void udp4_hwcsum(struct sk_buff *skb, __be32 src, __be32 dst);
int udp_rcv(struct sk_buff *skb);
int udp_ioctl(struct sock *sk, int cmd, unsigned long arg);
+int __udp_disconnect(struct sock *sk, int flags);
int udp_disconnect(struct sock *sk, int flags);
unsigned int udp_poll(struct file *file, struct socket *sock, poll_table *wait);
struct sk_buff *skb_udp_tunnel_segment(struct sk_buff *skb,
diff --git a/include/net/vxlan.h b/include/net/vxlan.h
index 0255613a54a4..308adc4154f4 100644
--- a/include/net/vxlan.h
+++ b/include/net/vxlan.h
@@ -225,9 +225,9 @@ struct vxlan_config {
struct vxlan_dev {
struct hlist_node hlist; /* vni hash table */
struct list_head next; /* vxlan's per namespace list */
- struct vxlan_sock *vn4_sock; /* listening socket for IPv4 */
+ struct vxlan_sock __rcu *vn4_sock; /* listening socket for IPv4 */
#if IS_ENABLED(CONFIG_IPV6)
- struct vxlan_sock *vn6_sock; /* listening socket for IPv6 */
+ struct vxlan_sock __rcu *vn6_sock; /* listening socket for IPv6 */
#endif
struct net_device *dev;
struct net *net; /* netns for packet i/o */
diff --git a/include/sound/hda_i915.h b/include/sound/hda_i915.h
index 796cabf6be5e..5ab972e116ec 100644
--- a/include/sound/hda_i915.h
+++ b/include/sound/hda_i915.h
@@ -10,8 +10,9 @@
int snd_hdac_set_codec_wakeup(struct hdac_bus *bus, bool enable);
int snd_hdac_display_power(struct hdac_bus *bus, bool enable);
void snd_hdac_i915_set_bclk(struct hdac_bus *bus);
-int snd_hdac_sync_audio_rate(struct hdac_device *codec, hda_nid_t nid, int rate);
-int snd_hdac_acomp_get_eld(struct hdac_device *codec, hda_nid_t nid,
+int snd_hdac_sync_audio_rate(struct hdac_device *codec, hda_nid_t nid,
+ int dev_id, int rate);
+int snd_hdac_acomp_get_eld(struct hdac_device *codec, hda_nid_t nid, int dev_id,
bool *audio_enabled, char *buffer, int max_bytes);
int snd_hdac_i915_init(struct hdac_bus *bus);
int snd_hdac_i915_exit(struct hdac_bus *bus);
@@ -29,13 +30,13 @@ static inline void snd_hdac_i915_set_bclk(struct hdac_bus *bus)
{
}
static inline int snd_hdac_sync_audio_rate(struct hdac_device *codec,
- hda_nid_t nid, int rate)
+ hda_nid_t nid, int dev_id, int rate)
{
return 0;
}
static inline int snd_hdac_acomp_get_eld(struct hdac_device *codec, hda_nid_t nid,
- bool *audio_enabled, char *buffer,
- int max_bytes)
+ int dev_id, bool *audio_enabled,
+ char *buffer, int max_bytes)
{
return -ENODEV;
}
diff --git a/include/trace/events/fence.h b/include/trace/events/dma_fence.h
index d6dfa05ba322..1157cb4c3c6f 100644
--- a/include/trace/events/fence.h
+++ b/include/trace/events/dma_fence.h
@@ -1,17 +1,17 @@
#undef TRACE_SYSTEM
-#define TRACE_SYSTEM fence
+#define TRACE_SYSTEM dma_fence
#if !defined(_TRACE_FENCE_H) || defined(TRACE_HEADER_MULTI_READ)
-#define _TRACE_FENCE_H
+#define _TRACE_DMA_FENCE_H
#include <linux/tracepoint.h>
-struct fence;
+struct dma_fence;
-TRACE_EVENT(fence_annotate_wait_on,
+TRACE_EVENT(dma_fence_annotate_wait_on,
/* fence: the fence waiting on f1, f1: the fence to be waited on. */
- TP_PROTO(struct fence *fence, struct fence *f1),
+ TP_PROTO(struct dma_fence *fence, struct dma_fence *f1),
TP_ARGS(fence, f1),
@@ -48,9 +48,9 @@ TRACE_EVENT(fence_annotate_wait_on,
__entry->waiting_context, __entry->waiting_seqno)
);
-DECLARE_EVENT_CLASS(fence,
+DECLARE_EVENT_CLASS(dma_fence,
- TP_PROTO(struct fence *fence),
+ TP_PROTO(struct dma_fence *fence),
TP_ARGS(fence),
@@ -73,56 +73,56 @@ DECLARE_EVENT_CLASS(fence,
__entry->seqno)
);
-DEFINE_EVENT(fence, fence_emit,
+DEFINE_EVENT(dma_fence, dma_fence_emit,
- TP_PROTO(struct fence *fence),
+ TP_PROTO(struct dma_fence *fence),
TP_ARGS(fence)
);
-DEFINE_EVENT(fence, fence_init,
+DEFINE_EVENT(dma_fence, dma_fence_init,
- TP_PROTO(struct fence *fence),
+ TP_PROTO(struct dma_fence *fence),
TP_ARGS(fence)
);
-DEFINE_EVENT(fence, fence_destroy,
+DEFINE_EVENT(dma_fence, dma_fence_destroy,
- TP_PROTO(struct fence *fence),
+ TP_PROTO(struct dma_fence *fence),
TP_ARGS(fence)
);
-DEFINE_EVENT(fence, fence_enable_signal,
+DEFINE_EVENT(dma_fence, dma_fence_enable_signal,
- TP_PROTO(struct fence *fence),
+ TP_PROTO(struct dma_fence *fence),
TP_ARGS(fence)
);
-DEFINE_EVENT(fence, fence_signaled,
+DEFINE_EVENT(dma_fence, dma_fence_signaled,
- TP_PROTO(struct fence *fence),
+ TP_PROTO(struct dma_fence *fence),
TP_ARGS(fence)
);
-DEFINE_EVENT(fence, fence_wait_start,
+DEFINE_EVENT(dma_fence, dma_fence_wait_start,
- TP_PROTO(struct fence *fence),
+ TP_PROTO(struct dma_fence *fence),
TP_ARGS(fence)
);
-DEFINE_EVENT(fence, fence_wait_end,
+DEFINE_EVENT(dma_fence, dma_fence_wait_end,
- TP_PROTO(struct fence *fence),
+ TP_PROTO(struct dma_fence *fence),
TP_ARGS(fence)
);
-#endif /* _TRACE_FENCE_H */
+#endif /* _TRACE_DMA_FENCE_H */
/* This part must be outside protection */
#include <trace/define_trace.h>
diff --git a/include/uapi/drm/amdgpu_drm.h b/include/uapi/drm/amdgpu_drm.h
index d6b5a21f3d3c..2191a9e4f3db 100644
--- a/include/uapi/drm/amdgpu_drm.h
+++ b/include/uapi/drm/amdgpu_drm.h
@@ -50,6 +50,7 @@ extern "C" {
#define DRM_AMDGPU_WAIT_CS 0x09
#define DRM_AMDGPU_GEM_OP 0x10
#define DRM_AMDGPU_GEM_USERPTR 0x11
+#define DRM_AMDGPU_WAIT_FENCES 0x12
#define DRM_IOCTL_AMDGPU_GEM_CREATE DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_GEM_CREATE, union drm_amdgpu_gem_create)
#define DRM_IOCTL_AMDGPU_GEM_MMAP DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_GEM_MMAP, union drm_amdgpu_gem_mmap)
@@ -63,6 +64,7 @@ extern "C" {
#define DRM_IOCTL_AMDGPU_WAIT_CS DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_WAIT_CS, union drm_amdgpu_wait_cs)
#define DRM_IOCTL_AMDGPU_GEM_OP DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_GEM_OP, struct drm_amdgpu_gem_op)
#define DRM_IOCTL_AMDGPU_GEM_USERPTR DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_GEM_USERPTR, struct drm_amdgpu_gem_userptr)
+#define DRM_IOCTL_AMDGPU_WAIT_FENCES DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_WAIT_FENCES, union drm_amdgpu_wait_fences)
#define AMDGPU_GEM_DOMAIN_CPU 0x1
#define AMDGPU_GEM_DOMAIN_GTT 0x2
@@ -81,6 +83,8 @@ extern "C" {
#define AMDGPU_GEM_CREATE_VRAM_CLEARED (1 << 3)
/* Flag that create shadow bo(GTT) while allocating vram bo */
#define AMDGPU_GEM_CREATE_SHADOW (1 << 4)
+/* Flag that allocating the BO should use linear VRAM */
+#define AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS (1 << 5)
struct drm_amdgpu_gem_create_in {
/** the requested memory size */
@@ -305,6 +309,32 @@ union drm_amdgpu_wait_cs {
struct drm_amdgpu_wait_cs_out out;
};
+struct drm_amdgpu_fence {
+ __u32 ctx_id;
+ __u32 ip_type;
+ __u32 ip_instance;
+ __u32 ring;
+ __u64 seq_no;
+};
+
+struct drm_amdgpu_wait_fences_in {
+ /** This points to uint64_t * which points to fences */
+ __u64 fences;
+ __u32 fence_count;
+ __u32 wait_all;
+ __u64 timeout_ns;
+};
+
+struct drm_amdgpu_wait_fences_out {
+ __u32 status;
+ __u32 first_signaled;
+};
+
+union drm_amdgpu_wait_fences {
+ struct drm_amdgpu_wait_fences_in in;
+ struct drm_amdgpu_wait_fences_out out;
+};
+
#define AMDGPU_GEM_OP_GET_GEM_CREATE_INFO 0
#define AMDGPU_GEM_OP_SET_PLACEMENT 1
@@ -436,6 +466,7 @@ struct drm_amdgpu_cs_chunk_data {
*
*/
#define AMDGPU_IDS_FLAGS_FUSION 0x1
+#define AMDGPU_IDS_FLAGS_PREEMPTION 0x2
/* indicate if acceleration can be working */
#define AMDGPU_INFO_ACCEL_WORKING 0x00
@@ -487,6 +518,10 @@ struct drm_amdgpu_cs_chunk_data {
#define AMDGPU_INFO_VIS_VRAM_USAGE 0x17
/* number of TTM buffer evictions */
#define AMDGPU_INFO_NUM_EVICTIONS 0x18
+/* Query memory about VRAM and GTT domains */
+#define AMDGPU_INFO_MEMORY 0x19
+/* Query vce clock table */
+#define AMDGPU_INFO_VCE_CLOCK_TABLE 0x1A
#define AMDGPU_INFO_MMR_SE_INDEX_SHIFT 0
#define AMDGPU_INFO_MMR_SE_INDEX_MASK 0xff
@@ -572,6 +607,34 @@ struct drm_amdgpu_info_vram_gtt {
__u64 gtt_size;
};
+struct drm_amdgpu_heap_info {
+ /** max. physical memory */
+ __u64 total_heap_size;
+
+ /** Theoretical max. available memory in the given heap */
+ __u64 usable_heap_size;
+
+ /**
+ * Number of bytes allocated in the heap. This includes all processes
+ * and private allocations in the kernel. It changes when new buffers
+ * are allocated, freed, and moved. It cannot be larger than
+ * heap_size.
+ */
+ __u64 heap_usage;
+
+ /**
+ * Theoretical possible max. size of buffer which
+ * could be allocated in the given heap
+ */
+ __u64 max_allocation;
+};
+
+struct drm_amdgpu_memory_info {
+ struct drm_amdgpu_heap_info vram;
+ struct drm_amdgpu_heap_info cpu_accessible_vram;
+ struct drm_amdgpu_heap_info gtt;
+};
+
struct drm_amdgpu_info_firmware {
__u32 ver;
__u32 feature;
@@ -645,6 +708,24 @@ struct drm_amdgpu_info_hw_ip {
__u32 _pad;
};
+#define AMDGPU_VCE_CLOCK_TABLE_ENTRIES 6
+
+struct drm_amdgpu_info_vce_clock_table_entry {
+ /** System clock */
+ __u32 sclk;
+ /** Memory clock */
+ __u32 mclk;
+ /** VCE clock */
+ __u32 eclk;
+ __u32 pad;
+};
+
+struct drm_amdgpu_info_vce_clock_table {
+ struct drm_amdgpu_info_vce_clock_table_entry entries[AMDGPU_VCE_CLOCK_TABLE_ENTRIES];
+ __u32 num_valid_entries;
+ __u32 pad;
+};
+
/*
* Supported GPU families
*/
diff --git a/include/uapi/drm/drm_mode.h b/include/uapi/drm/drm_mode.h
index df0e3504c349..728790b92354 100644
--- a/include/uapi/drm/drm_mode.h
+++ b/include/uapi/drm/drm_mode.h
@@ -47,7 +47,15 @@ extern "C" {
#define DRM_MODE_TYPE_DRIVER (1<<6)
/* Video mode flags */
-/* bit compatible with the xorg definitions. */
+/* bit compatible with the xrandr RR_ definitions (bits 0-13)
+ *
+ * ABI warning: Existing userspace really expects
+ * the mode flags to match the xrandr definitions. Any
+ * changes that don't match the xrandr definitions will
+ * likely need a new client cap or some other mechanism
+ * to avoid breaking existing userspace. This includes
+ * allocating new flags in the previously unused bits!
+ */
#define DRM_MODE_FLAG_PHSYNC (1<<0)
#define DRM_MODE_FLAG_NHSYNC (1<<1)
#define DRM_MODE_FLAG_PVSYNC (1<<2)
@@ -77,6 +85,19 @@ extern "C" {
#define DRM_MODE_FLAG_3D_TOP_AND_BOTTOM (7<<14)
#define DRM_MODE_FLAG_3D_SIDE_BY_SIDE_HALF (8<<14)
+/* Picture aspect ratio options */
+#define DRM_MODE_PICTURE_ASPECT_NONE 0
+#define DRM_MODE_PICTURE_ASPECT_4_3 1
+#define DRM_MODE_PICTURE_ASPECT_16_9 2
+
+/* Aspect ratio flag bitmask (4 bits 22:19) */
+#define DRM_MODE_FLAG_PIC_AR_MASK (0x0F<<19)
+#define DRM_MODE_FLAG_PIC_AR_NONE \
+ (DRM_MODE_PICTURE_ASPECT_NONE<<19)
+#define DRM_MODE_FLAG_PIC_AR_4_3 \
+ (DRM_MODE_PICTURE_ASPECT_4_3<<19)
+#define DRM_MODE_FLAG_PIC_AR_16_9 \
+ (DRM_MODE_PICTURE_ASPECT_16_9<<19)
/* DPMS flags */
/* bit compatible with the xorg definitions. */
@@ -92,11 +113,6 @@ extern "C" {
#define DRM_MODE_SCALE_CENTER 2 /* Centered, no scaling */
#define DRM_MODE_SCALE_ASPECT 3 /* Full screen, preserve aspect */
-/* Picture aspect ratio options */
-#define DRM_MODE_PICTURE_ASPECT_NONE 0
-#define DRM_MODE_PICTURE_ASPECT_4_3 1
-#define DRM_MODE_PICTURE_ASPECT_16_9 2
-
/* Dithering mode options */
#define DRM_MODE_DITHERING_OFF 0
#define DRM_MODE_DITHERING_ON 1
@@ -392,17 +408,20 @@ struct drm_mode_fb_cmd2 {
* offsets[1]. Note that offsets[0] will generally
* be 0 (but this is not required).
*
- * To accommodate tiled, compressed, etc formats, a per-plane
+ * To accommodate tiled, compressed, etc formats, a
* modifier can be specified. The default value of zero
* indicates "native" format as specified by the fourcc.
- * Vendor specific modifier token. This allows, for example,
- * different tiling/swizzling pattern on different planes.
- * See discussion above of DRM_FORMAT_MOD_xxx.
+ * Vendor specific modifier token. Note that even though
+ * it looks like we have a modifier per-plane, we in fact
+ * do not. The modifier for each plane must be identical.
+ * Thus all combinations of different data layouts for
+ * multi plane formats must be enumerated as separate
+ * modifiers.
*/
__u32 handles[4];
__u32 pitches[4]; /* pitch for each plane */
__u32 offsets[4]; /* offset of each plane */
- __u64 modifier[4]; /* ie, tiling, compressed (per plane) */
+ __u64 modifier[4]; /* ie, tiling, compress */
};
#define DRM_MODE_FB_DIRTY_ANNOTATE_COPY 0x01
diff --git a/include/uapi/drm/i915_drm.h b/include/uapi/drm/i915_drm.h
index 03725fe89859..1c12a350eca3 100644
--- a/include/uapi/drm/i915_drm.h
+++ b/include/uapi/drm/i915_drm.h
@@ -389,6 +389,11 @@ typedef struct drm_i915_irq_wait {
#define I915_PARAM_MIN_EU_IN_POOL 39
#define I915_PARAM_MMAP_GTT_VERSION 40
+/* Query whether DRM_I915_GEM_EXECBUFFER2 supports user defined execution
+ * priorities and the driver will attempt to execute batches in priority order.
+ */
+#define I915_PARAM_HAS_SCHEDULER 41
+
typedef struct drm_i915_getparam {
__s32 param;
/*
diff --git a/include/uapi/drm/msm_drm.h b/include/uapi/drm/msm_drm.h
index 8c51e8a0df89..4d5d6a2bc59e 100644
--- a/include/uapi/drm/msm_drm.h
+++ b/include/uapi/drm/msm_drm.h
@@ -2,17 +2,24 @@
* Copyright (C) 2013 Red Hat
* Author: Rob Clark <robdclark@gmail.com>
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published by
- * the Free Software Foundation.
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
*
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
*
- * You should have received a copy of the GNU General Public License along with
- * this program. If not, see <http://www.gnu.org/licenses/>.
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
*/
#ifndef __MSM_DRM_H__
diff --git a/include/uapi/drm/vc4_drm.h b/include/uapi/drm/vc4_drm.h
index ad7edc3edf7c..f07a09016726 100644
--- a/include/uapi/drm/vc4_drm.h
+++ b/include/uapi/drm/vc4_drm.h
@@ -286,6 +286,8 @@ struct drm_vc4_get_hang_state {
#define DRM_VC4_PARAM_V3D_IDENT1 1
#define DRM_VC4_PARAM_V3D_IDENT2 2
#define DRM_VC4_PARAM_SUPPORTS_BRANCHES 3
+#define DRM_VC4_PARAM_SUPPORTS_ETC1 4
+#define DRM_VC4_PARAM_SUPPORTS_THREADED_FS 5
struct drm_vc4_get_param {
__u32 param;
diff --git a/include/uapi/linux/atm_zatm.h b/include/uapi/linux/atm_zatm.h
index 5cd4d4d2dd1d..9c9c6ad55f14 100644
--- a/include/uapi/linux/atm_zatm.h
+++ b/include/uapi/linux/atm_zatm.h
@@ -14,7 +14,6 @@
#include <linux/atmapi.h>
#include <linux/atmioc.h>
-#include <linux/time.h>
#define ZATM_GETPOOL _IOW('a',ATMIOC_SARPRV+1,struct atmif_sioc)
/* get pool statistics */
diff --git a/include/uapi/linux/bpqether.h b/include/uapi/linux/bpqether.h
index a6c35e1a89ad..05865edaefda 100644
--- a/include/uapi/linux/bpqether.h
+++ b/include/uapi/linux/bpqether.h
@@ -5,9 +5,7 @@
* Defines for the BPQETHER pseudo device driver
*/
-#ifndef __LINUX_IF_ETHER_H
#include <linux/if_ether.h>
-#endif
#define SIOCSBPQETHOPT (SIOCDEVPRIVATE+0) /* reserved */
#define SIOCSBPQETHADDR (SIOCDEVPRIVATE+1)
diff --git a/include/uapi/linux/ethtool.h b/include/uapi/linux/ethtool.h
index 099a4200732c..8e547231c1b7 100644
--- a/include/uapi/linux/ethtool.h
+++ b/include/uapi/linux/ethtool.h
@@ -119,8 +119,7 @@ struct ethtool_cmd {
static inline void ethtool_cmd_speed_set(struct ethtool_cmd *ep,
__u32 speed)
{
-
- ep->speed = (__u16)speed;
+ ep->speed = (__u16)(speed & 0xFFFF);
ep->speed_hi = (__u16)(speed >> 16);
}
diff --git a/include/uapi/linux/input-event-codes.h b/include/uapi/linux/input-event-codes.h
index d6d071fc3c56..3af60ee69053 100644
--- a/include/uapi/linux/input-event-codes.h
+++ b/include/uapi/linux/input-event-codes.h
@@ -640,7 +640,7 @@
* Control a data application associated with the currently viewed channel,
* e.g. teletext or data broadcast application (MHEG, MHP, HbbTV, etc.)
*/
-#define KEY_DATA 0x275
+#define KEY_DATA 0x277
#define BTN_TRIGGER_HAPPY 0x2c0
#define BTN_TRIGGER_HAPPY1 0x2c0
diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h
index 300ef255d1e0..4ee67cb99143 100644
--- a/include/uapi/linux/kvm.h
+++ b/include/uapi/linux/kvm.h
@@ -972,12 +972,19 @@ struct kvm_irqfd {
__u8 pad[16];
};
+/* For KVM_CAP_ADJUST_CLOCK */
+
+/* Do not use 1, KVM_CHECK_EXTENSION returned it before we had flags. */
+#define KVM_CLOCK_TSC_STABLE 2
+
struct kvm_clock_data {
__u64 clock;
__u32 flags;
__u32 pad[9];
};
+/* For KVM_CAP_SW_TLB */
+
#define KVM_MMU_FSL_BOOKE_NOHV 0
#define KVM_MMU_FSL_BOOKE_HV 1
diff --git a/include/uapi/linux/rtnetlink.h b/include/uapi/linux/rtnetlink.h
index 262f0379d83a..5a78be518101 100644
--- a/include/uapi/linux/rtnetlink.h
+++ b/include/uapi/linux/rtnetlink.h
@@ -350,7 +350,7 @@ struct rtnexthop {
#define RTNH_F_OFFLOAD 8 /* offloaded route */
#define RTNH_F_LINKDOWN 16 /* carrier-down on nexthop */
-#define RTNH_COMPARE_MASK (RTNH_F_DEAD | RTNH_F_LINKDOWN)
+#define RTNH_COMPARE_MASK (RTNH_F_DEAD | RTNH_F_LINKDOWN | RTNH_F_OFFLOAD)
/* Macros to handle hexthops */
diff --git a/include/uapi/linux/tc_act/Kbuild b/include/uapi/linux/tc_act/Kbuild
index e3969bd939e4..9611c7b6c18f 100644
--- a/include/uapi/linux/tc_act/Kbuild
+++ b/include/uapi/linux/tc_act/Kbuild
@@ -11,3 +11,4 @@ header-y += tc_vlan.h
header-y += tc_bpf.h
header-y += tc_connmark.h
header-y += tc_ife.h
+header-y += tc_tunnel_key.h
diff --git a/include/uapi/sound/asoc.h b/include/uapi/sound/asoc.h
index 33d00a4ce656..819d895edfdc 100644
--- a/include/uapi/sound/asoc.h
+++ b/include/uapi/sound/asoc.h
@@ -18,12 +18,6 @@
#include <linux/types.h>
#include <sound/asound.h>
-#ifndef __KERNEL__
-#error This API is an early revision and not enabled in the current
-#error kernel release, it will be enabled in a future kernel version
-#error with incompatible changes to what is here.
-#endif
-
/*
* Maximum number of channels topology kcontrol can represent.
*/
diff --git a/include/video/display_timing.h b/include/video/display_timing.h
index 28d9d0d566ca..3d289e990aca 100644
--- a/include/video/display_timing.h
+++ b/include/video/display_timing.h
@@ -28,6 +28,10 @@ enum display_flags {
DISPLAY_FLAGS_INTERLACED = BIT(8),
DISPLAY_FLAGS_DOUBLESCAN = BIT(9),
DISPLAY_FLAGS_DOUBLECLK = BIT(10),
+ /* drive sync on pos. edge */
+ DISPLAY_FLAGS_SYNC_POSEDGE = BIT(11),
+ /* drive sync on neg. edge */
+ DISPLAY_FLAGS_SYNC_NEGEDGE = BIT(12),
};
/*
diff --git a/include/video/imx-ipu-v3.h b/include/video/imx-ipu-v3.h
index 173073eb6aaf..53cd07ccaa4c 100644
--- a/include/video/imx-ipu-v3.h
+++ b/include/video/imx-ipu-v3.h
@@ -247,8 +247,6 @@ void ipu_cpmem_set_yuv_planar_full(struct ipuv3_channel *ch,
unsigned int uv_stride,
unsigned int u_offset,
unsigned int v_offset);
-void ipu_cpmem_set_yuv_planar(struct ipuv3_channel *ch,
- u32 pixel_format, int stride, int height);
int ipu_cpmem_set_fmt(struct ipuv3_channel *ch, u32 drm_fourcc);
int ipu_cpmem_set_image(struct ipuv3_channel *ch, struct ipu_image *image);
void ipu_cpmem_dump(struct ipuv3_channel *ch);
@@ -320,6 +318,7 @@ int ipu_csi_init_interface(struct ipu_csi *csi,
bool ipu_csi_is_interlaced(struct ipu_csi *csi);
void ipu_csi_get_window(struct ipu_csi *csi, struct v4l2_rect *w);
void ipu_csi_set_window(struct ipu_csi *csi, struct v4l2_rect *w);
+void ipu_csi_set_downsize(struct ipu_csi *csi, bool horiz, bool vert);
void ipu_csi_set_test_generator(struct ipu_csi *csi, bool active,
u32 r_value, u32 g_value, u32 b_value,
u32 pix_clk);
diff --git a/include/video/of_display_timing.h b/include/video/of_display_timing.h
index ea755b5616d8..956455fc9f9a 100644
--- a/include/video/of_display_timing.h
+++ b/include/video/of_display_timing.h
@@ -16,21 +16,22 @@ struct display_timings;
#define OF_USE_NATIVE_MODE -1
#ifdef CONFIG_OF
-int of_get_display_timing(struct device_node *np, const char *name,
+int of_get_display_timing(const struct device_node *np, const char *name,
struct display_timing *dt);
-struct display_timings *of_get_display_timings(struct device_node *np);
-int of_display_timings_exist(struct device_node *np);
+struct display_timings *of_get_display_timings(const struct device_node *np);
+int of_display_timings_exist(const struct device_node *np);
#else
-static inline int of_get_display_timing(struct device_node *np, const char *name,
- struct display_timing *dt)
+static inline int of_get_display_timing(const struct device_node *np,
+ const char *name, struct display_timing *dt)
{
return -ENOSYS;
}
-static inline struct display_timings *of_get_display_timings(struct device_node *np)
+static inline struct display_timings *
+of_get_display_timings(const struct device_node *np)
{
return NULL;
}
-static inline int of_display_timings_exist(struct device_node *np)
+static inline int of_display_timings_exist(const struct device_node *np)
{
return -ENOSYS;
}
diff --git a/init/do_mounts_rd.c b/init/do_mounts_rd.c
index 8a09b32e07d6..dd4104c9aa12 100644
--- a/init/do_mounts_rd.c
+++ b/init/do_mounts_rd.c
@@ -272,7 +272,7 @@ int __init rd_load_image(char *from)
sys_write(out_fd, buf, BLOCK_SIZE);
#if !defined(CONFIG_S390)
if (!(i % 16)) {
- printk("%c\b", rotator[rotate & 0x3]);
+ pr_cont("%c\b", rotator[rotate & 0x3]);
rotate++;
}
#endif
diff --git a/kernel/bpf/hashtab.c b/kernel/bpf/hashtab.c
index 570eeca7bdfa..ad1bc67aff1b 100644
--- a/kernel/bpf/hashtab.c
+++ b/kernel/bpf/hashtab.c
@@ -687,7 +687,8 @@ static void delete_all_elements(struct bpf_htab *htab)
hlist_for_each_entry_safe(l, n, head, hash_node) {
hlist_del_rcu(&l->hash_node);
- htab_elem_free(htab, l);
+ if (l->state != HTAB_EXTRA_ELEM_USED)
+ htab_elem_free(htab, l);
}
}
}
diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c
index 228f962447a5..237f3d6a7ddc 100644
--- a/kernel/bpf/syscall.c
+++ b/kernel/bpf/syscall.c
@@ -194,7 +194,7 @@ static int map_create(union bpf_attr *attr)
err = bpf_map_charge_memlock(map);
if (err)
- goto free_map;
+ goto free_map_nouncharge;
err = bpf_map_new_fd(map);
if (err < 0)
@@ -204,6 +204,8 @@ static int map_create(union bpf_attr *attr)
return err;
free_map:
+ bpf_map_uncharge_memlock(map);
+free_map_nouncharge:
map->ops->map_free(map);
return err;
}
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index 99a7e5b388f2..8199821f54cf 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -216,8 +216,8 @@ static void print_verifier_state(struct bpf_verifier_state *state)
reg->map_ptr->key_size,
reg->map_ptr->value_size);
if (reg->min_value != BPF_REGISTER_MIN_RANGE)
- verbose(",min_value=%llu",
- (unsigned long long)reg->min_value);
+ verbose(",min_value=%lld",
+ (long long)reg->min_value);
if (reg->max_value != BPF_REGISTER_MAX_RANGE)
verbose(",max_value=%llu",
(unsigned long long)reg->max_value);
@@ -758,7 +758,7 @@ static int check_mem_access(struct bpf_verifier_env *env, u32 regno, int off,
* index'es we need to make sure that whatever we use
* will have a set floor within our range.
*/
- if ((s64)reg->min_value < 0) {
+ if (reg->min_value < 0) {
verbose("R%d min value is negative, either use unsigned index or do a if (index >=0) check.\n",
regno);
return -EACCES;
@@ -1468,7 +1468,8 @@ static void check_reg_overflow(struct bpf_reg_state *reg)
{
if (reg->max_value > BPF_REGISTER_MAX_RANGE)
reg->max_value = BPF_REGISTER_MAX_RANGE;
- if ((s64)reg->min_value < BPF_REGISTER_MIN_RANGE)
+ if (reg->min_value < BPF_REGISTER_MIN_RANGE ||
+ reg->min_value > BPF_REGISTER_MAX_RANGE)
reg->min_value = BPF_REGISTER_MIN_RANGE;
}
@@ -1476,7 +1477,8 @@ static void adjust_reg_min_max_vals(struct bpf_verifier_env *env,
struct bpf_insn *insn)
{
struct bpf_reg_state *regs = env->cur_state.regs, *dst_reg;
- u64 min_val = BPF_REGISTER_MIN_RANGE, max_val = BPF_REGISTER_MAX_RANGE;
+ s64 min_val = BPF_REGISTER_MIN_RANGE;
+ u64 max_val = BPF_REGISTER_MAX_RANGE;
bool min_set = false, max_set = false;
u8 opcode = BPF_OP(insn->code);
@@ -1512,22 +1514,43 @@ static void adjust_reg_min_max_vals(struct bpf_verifier_env *env,
return;
}
+ /* If one of our values was at the end of our ranges then we can't just
+ * do our normal operations to the register, we need to set the values
+ * to the min/max since they are undefined.
+ */
+ if (min_val == BPF_REGISTER_MIN_RANGE)
+ dst_reg->min_value = BPF_REGISTER_MIN_RANGE;
+ if (max_val == BPF_REGISTER_MAX_RANGE)
+ dst_reg->max_value = BPF_REGISTER_MAX_RANGE;
+
switch (opcode) {
case BPF_ADD:
- dst_reg->min_value += min_val;
- dst_reg->max_value += max_val;
+ if (dst_reg->min_value != BPF_REGISTER_MIN_RANGE)
+ dst_reg->min_value += min_val;
+ if (dst_reg->max_value != BPF_REGISTER_MAX_RANGE)
+ dst_reg->max_value += max_val;
break;
case BPF_SUB:
- dst_reg->min_value -= min_val;
- dst_reg->max_value -= max_val;
+ if (dst_reg->min_value != BPF_REGISTER_MIN_RANGE)
+ dst_reg->min_value -= min_val;
+ if (dst_reg->max_value != BPF_REGISTER_MAX_RANGE)
+ dst_reg->max_value -= max_val;
break;
case BPF_MUL:
- dst_reg->min_value *= min_val;
- dst_reg->max_value *= max_val;
+ if (dst_reg->min_value != BPF_REGISTER_MIN_RANGE)
+ dst_reg->min_value *= min_val;
+ if (dst_reg->max_value != BPF_REGISTER_MAX_RANGE)
+ dst_reg->max_value *= max_val;
break;
case BPF_AND:
- /* & is special since it could end up with 0 bits set. */
- dst_reg->min_value &= min_val;
+ /* Disallow AND'ing of negative numbers, ain't nobody got time
+ * for that. Otherwise the minimum is 0 and the max is the max
+ * value we could AND against.
+ */
+ if (min_val < 0)
+ dst_reg->min_value = BPF_REGISTER_MIN_RANGE;
+ else
+ dst_reg->min_value = 0;
dst_reg->max_value = max_val;
break;
case BPF_LSH:
@@ -1537,24 +1560,25 @@ static void adjust_reg_min_max_vals(struct bpf_verifier_env *env,
*/
if (min_val > ilog2(BPF_REGISTER_MAX_RANGE))
dst_reg->min_value = BPF_REGISTER_MIN_RANGE;
- else
+ else if (dst_reg->min_value != BPF_REGISTER_MIN_RANGE)
dst_reg->min_value <<= min_val;
if (max_val > ilog2(BPF_REGISTER_MAX_RANGE))
dst_reg->max_value = BPF_REGISTER_MAX_RANGE;
- else
+ else if (dst_reg->max_value != BPF_REGISTER_MAX_RANGE)
dst_reg->max_value <<= max_val;
break;
case BPF_RSH:
- dst_reg->min_value >>= min_val;
- dst_reg->max_value >>= max_val;
- break;
- case BPF_MOD:
- /* % is special since it is an unsigned modulus, so the floor
- * will always be 0.
+ /* RSH by a negative number is undefined, and the BPF_RSH is an
+ * unsigned shift, so make the appropriate casts.
*/
- dst_reg->min_value = 0;
- dst_reg->max_value = max_val - 1;
+ if (min_val < 0 || dst_reg->min_value < 0)
+ dst_reg->min_value = BPF_REGISTER_MIN_RANGE;
+ else
+ dst_reg->min_value =
+ (u64)(dst_reg->min_value) >> min_val;
+ if (dst_reg->max_value != BPF_REGISTER_MAX_RANGE)
+ dst_reg->max_value >>= max_val;
break;
default:
reset_reg_range_values(regs, insn->dst_reg);
@@ -2430,6 +2454,7 @@ static bool states_equal(struct bpf_verifier_env *env,
struct bpf_verifier_state *old,
struct bpf_verifier_state *cur)
{
+ bool varlen_map_access = env->varlen_map_value_access;
struct bpf_reg_state *rold, *rcur;
int i;
@@ -2443,12 +2468,17 @@ static bool states_equal(struct bpf_verifier_env *env,
/* If the ranges were not the same, but everything else was and
* we didn't do a variable access into a map then we are a-ok.
*/
- if (!env->varlen_map_value_access &&
+ if (!varlen_map_access &&
rold->type == rcur->type && rold->imm == rcur->imm)
continue;
+ /* If we didn't map access then again we don't care about the
+ * mismatched range values and it's ok if our old type was
+ * UNKNOWN and we didn't go to a NOT_INIT'ed reg.
+ */
if (rold->type == NOT_INIT ||
- (rold->type == UNKNOWN_VALUE && rcur->type != NOT_INIT))
+ (!varlen_map_access && rold->type == UNKNOWN_VALUE &&
+ rcur->type != NOT_INIT))
continue;
if (rold->type == PTR_TO_PACKET && rcur->type == PTR_TO_PACKET &&
diff --git a/kernel/events/core.c b/kernel/events/core.c
index 0e292132efac..6ee1febdf6ff 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -902,6 +902,17 @@ list_update_cgroup_event(struct perf_event *event,
* this will always be called from the right CPU.
*/
cpuctx = __get_cpu_context(ctx);
+
+ /* Only set/clear cpuctx->cgrp if current task uses event->cgrp. */
+ if (perf_cgroup_from_task(current, ctx) != event->cgrp) {
+ /*
+ * We are removing the last cpu event in this context.
+ * If that event is not active in this cpu, cpuctx->cgrp
+ * should've been cleared by perf_cgroup_switch.
+ */
+ WARN_ON_ONCE(!add && cpuctx->cgrp);
+ return;
+ }
cpuctx->cgrp = add ? event->cgrp : NULL;
}
@@ -8018,6 +8029,7 @@ restart:
* if <size> is not specified, the range is treated as a single address.
*/
enum {
+ IF_ACT_NONE = -1,
IF_ACT_FILTER,
IF_ACT_START,
IF_ACT_STOP,
@@ -8041,6 +8053,7 @@ static const match_table_t if_tokens = {
{ IF_SRC_KERNEL, "%u/%u" },
{ IF_SRC_FILEADDR, "%u@%s" },
{ IF_SRC_KERNELADDR, "%u" },
+ { IF_ACT_NONE, NULL },
};
/*
diff --git a/kernel/exit.c b/kernel/exit.c
index 9d68c45ebbe3..3076f3089919 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -836,6 +836,7 @@ void __noreturn do_exit(long code)
*/
perf_event_exit_task(tsk);
+ sched_autogroup_exit_task(tsk);
cgroup_exit(tsk);
/*
diff --git a/kernel/fork.c b/kernel/fork.c
index 623259fc794d..997ac1d584f7 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -315,6 +315,9 @@ static void account_kernel_stack(struct task_struct *tsk, int account)
static void release_task_stack(struct task_struct *tsk)
{
+ if (WARN_ON(tsk->state != TASK_DEAD))
+ return; /* Better to leak the stack than to free prematurely */
+
account_kernel_stack(tsk, -1);
arch_release_thread_stack(tsk->stack);
free_thread_stack(tsk);
@@ -1862,6 +1865,7 @@ bad_fork_cleanup_count:
atomic_dec(&p->cred->user->processes);
exit_creds(p);
bad_fork_free:
+ p->state = TASK_DEAD;
put_task_stack(p);
free_task(p);
fork_out:
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
index 9c4d30483264..6b669593e7eb 100644
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
@@ -1341,12 +1341,12 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
} else if (new->flags & IRQF_TRIGGER_MASK) {
unsigned int nmsk = new->flags & IRQF_TRIGGER_MASK;
- unsigned int omsk = irq_settings_get_trigger_mask(desc);
+ unsigned int omsk = irqd_get_trigger_type(&desc->irq_data);
if (nmsk != omsk)
/* hope the handler works with current trigger mode */
pr_warn("irq %d uses trigger mode %u; requested %u\n",
- irq, nmsk, omsk);
+ irq, omsk, nmsk);
}
*old_ptr = new;
diff --git a/kernel/locking/lockdep_internals.h b/kernel/locking/lockdep_internals.h
index 51c4b24b6328..c2b88490d857 100644
--- a/kernel/locking/lockdep_internals.h
+++ b/kernel/locking/lockdep_internals.h
@@ -46,6 +46,14 @@ enum {
(LOCKF_USED_IN_HARDIRQ_READ | LOCKF_USED_IN_SOFTIRQ_READ)
/*
+ * CONFIG_PROVE_LOCKING_SMALL is defined for sparc. Sparc requires .text,
+ * .data and .bss to fit in required 32MB limit for the kernel. With
+ * PROVE_LOCKING we could go over this limit and cause system boot-up problems.
+ * So, reduce the static allocations for lockdeps related structures so that
+ * everything fits in current required size limit.
+ */
+#ifdef CONFIG_PROVE_LOCKING_SMALL
+/*
* MAX_LOCKDEP_ENTRIES is the maximum number of lock dependencies
* we track.
*
@@ -54,18 +62,24 @@ enum {
* table (if it's not there yet), and we check it for lock order
* conflicts and deadlocks.
*/
+#define MAX_LOCKDEP_ENTRIES 16384UL
+#define MAX_LOCKDEP_CHAINS_BITS 15
+#define MAX_STACK_TRACE_ENTRIES 262144UL
+#else
#define MAX_LOCKDEP_ENTRIES 32768UL
#define MAX_LOCKDEP_CHAINS_BITS 16
-#define MAX_LOCKDEP_CHAINS (1UL << MAX_LOCKDEP_CHAINS_BITS)
-
-#define MAX_LOCKDEP_CHAIN_HLOCKS (MAX_LOCKDEP_CHAINS*5)
/*
* Stack-trace: tightly packed array of stack backtrace
* addresses. Protected by the hash_lock.
*/
#define MAX_STACK_TRACE_ENTRIES 524288UL
+#endif
+
+#define MAX_LOCKDEP_CHAINS (1UL << MAX_LOCKDEP_CHAINS_BITS)
+
+#define MAX_LOCKDEP_CHAIN_HLOCKS (MAX_LOCKDEP_CHAINS*5)
extern struct list_head all_lock_classes;
extern struct lock_chain lock_chains[];
diff --git a/kernel/module.c b/kernel/module.c
index f57dd63186e6..0e54d5bf0097 100644
--- a/kernel/module.c
+++ b/kernel/module.c
@@ -1301,8 +1301,9 @@ static int check_version(Elf_Shdr *sechdrs,
goto bad_version;
}
- pr_warn("%s: no symbol version for %s\n", mod->name, symname);
- return 0;
+ /* Broken toolchain. Warn once, then let it go.. */
+ pr_warn_once("%s: no symbol version for %s\n", mod->name, symname);
+ return 1;
bad_version:
pr_warn("%s: disagrees about version of symbol %s\n",
diff --git a/kernel/power/suspend_test.c b/kernel/power/suspend_test.c
index 084452e34a12..bdff5ed57f10 100644
--- a/kernel/power/suspend_test.c
+++ b/kernel/power/suspend_test.c
@@ -203,8 +203,10 @@ static int __init test_suspend(void)
/* RTCs have initialized by now too ... can we use one? */
dev = class_find_device(rtc_class, NULL, NULL, has_wakealarm);
- if (dev)
+ if (dev) {
rtc = rtc_class_open(dev_name(dev));
+ put_device(dev);
+ }
if (!rtc) {
printk(warn_no_rtc);
return 0;
diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c
index de08fc90baaf..f7a55e9ff2f7 100644
--- a/kernel/printk/printk.c
+++ b/kernel/printk/printk.c
@@ -253,17 +253,6 @@ static int preferred_console = -1;
int console_set_on_cmdline;
EXPORT_SYMBOL(console_set_on_cmdline);
-#ifdef CONFIG_OF
-static bool of_specified_console;
-
-void console_set_by_of(void)
-{
- of_specified_console = true;
-}
-#else
-# define of_specified_console false
-#endif
-
/* Flag: console code may call schedule() */
static int console_may_schedule;
@@ -794,8 +783,6 @@ static ssize_t devkmsg_write(struct kiocb *iocb, struct iov_iter *from)
return ret;
}
-static void cont_flush(void);
-
static ssize_t devkmsg_read(struct file *file, char __user *buf,
size_t count, loff_t *ppos)
{
@@ -811,7 +798,6 @@ static ssize_t devkmsg_read(struct file *file, char __user *buf,
if (ret)
return ret;
raw_spin_lock_irq(&logbuf_lock);
- cont_flush();
while (user->seq == log_next_seq) {
if (file->f_flags & O_NONBLOCK) {
ret = -EAGAIN;
@@ -874,7 +860,6 @@ static loff_t devkmsg_llseek(struct file *file, loff_t offset, int whence)
return -ESPIPE;
raw_spin_lock_irq(&logbuf_lock);
- cont_flush();
switch (whence) {
case SEEK_SET:
/* the first record */
@@ -913,7 +898,6 @@ static unsigned int devkmsg_poll(struct file *file, poll_table *wait)
poll_wait(file, &log_wait, wait);
raw_spin_lock_irq(&logbuf_lock);
- cont_flush();
if (user->seq < log_next_seq) {
/* return error when data has vanished underneath us */
if (user->seq < log_first_seq)
@@ -1300,7 +1284,6 @@ static int syslog_print(char __user *buf, int size)
size_t skip;
raw_spin_lock_irq(&logbuf_lock);
- cont_flush();
if (syslog_seq < log_first_seq) {
/* messages are gone, move to first one */
syslog_seq = log_first_seq;
@@ -1360,7 +1343,6 @@ static int syslog_print_all(char __user *buf, int size, bool clear)
return -ENOMEM;
raw_spin_lock_irq(&logbuf_lock);
- cont_flush();
if (buf) {
u64 next_seq;
u64 seq;
@@ -1522,7 +1504,6 @@ int do_syslog(int type, char __user *buf, int len, int source)
/* Number of chars in the log buffer */
case SYSLOG_ACTION_SIZE_UNREAD:
raw_spin_lock_irq(&logbuf_lock);
- cont_flush();
if (syslog_seq < log_first_seq) {
/* messages are gone, move to first one */
syslog_seq = log_first_seq;
@@ -2657,7 +2638,7 @@ void register_console(struct console *newcon)
* didn't select a console we take the first one
* that registers here.
*/
- if (preferred_console < 0 && !of_specified_console) {
+ if (preferred_console < 0) {
if (newcon->index < 0)
newcon->index = 0;
if (newcon->setup == NULL ||
@@ -3039,7 +3020,6 @@ void kmsg_dump(enum kmsg_dump_reason reason)
dumper->active = true;
raw_spin_lock_irqsave(&logbuf_lock, flags);
- cont_flush();
dumper->cur_seq = clear_seq;
dumper->cur_idx = clear_idx;
dumper->next_seq = log_next_seq;
@@ -3130,7 +3110,6 @@ bool kmsg_dump_get_line(struct kmsg_dumper *dumper, bool syslog,
bool ret;
raw_spin_lock_irqsave(&logbuf_lock, flags);
- cont_flush();
ret = kmsg_dump_get_line_nolock(dumper, syslog, line, size, len);
raw_spin_unlock_irqrestore(&logbuf_lock, flags);
@@ -3173,7 +3152,6 @@ bool kmsg_dump_get_buffer(struct kmsg_dumper *dumper, bool syslog,
goto out;
raw_spin_lock_irqsave(&logbuf_lock, flags);
- cont_flush();
if (dumper->cur_seq < log_first_seq) {
/* messages are gone, move to first available one */
dumper->cur_seq = log_first_seq;
diff --git a/kernel/sched/auto_group.c b/kernel/sched/auto_group.c
index a5d966cb8891..f1c8fd566246 100644
--- a/kernel/sched/auto_group.c
+++ b/kernel/sched/auto_group.c
@@ -111,10 +111,13 @@ bool task_wants_autogroup(struct task_struct *p, struct task_group *tg)
{
if (tg != &root_task_group)
return false;
-
/*
- * We can only assume the task group can't go away on us if
- * autogroup_move_group() can see us on ->thread_group list.
+ * If we race with autogroup_move_group() the caller can use the old
+ * value of signal->autogroup but in this case sched_move_task() will
+ * be called again before autogroup_kref_put().
+ *
+ * However, there is no way sched_autogroup_exit_task() could tell us
+ * to avoid autogroup->tg, so we abuse PF_EXITING flag for this case.
*/
if (p->flags & PF_EXITING)
return false;
@@ -122,6 +125,16 @@ bool task_wants_autogroup(struct task_struct *p, struct task_group *tg)
return true;
}
+void sched_autogroup_exit_task(struct task_struct *p)
+{
+ /*
+ * We are going to call exit_notify() and autogroup_move_group() can't
+ * see this thread after that: we can no longer use signal->autogroup.
+ * See the PF_EXITING check in task_wants_autogroup().
+ */
+ sched_move_task(p);
+}
+
static void
autogroup_move_group(struct task_struct *p, struct autogroup *ag)
{
@@ -138,13 +151,20 @@ autogroup_move_group(struct task_struct *p, struct autogroup *ag)
}
p->signal->autogroup = autogroup_kref_get(ag);
-
- if (!READ_ONCE(sysctl_sched_autogroup_enabled))
- goto out;
-
+ /*
+ * We can't avoid sched_move_task() after we changed signal->autogroup,
+ * this process can already run with task_group() == prev->tg or we can
+ * race with cgroup code which can read autogroup = prev under rq->lock.
+ * In the latter case for_each_thread() can not miss a migrating thread,
+ * cpu_cgroup_attach() must not be possible after cgroup_exit() and it
+ * can't be removed from thread list, we hold ->siglock.
+ *
+ * If an exiting thread was already removed from thread list we rely on
+ * sched_autogroup_exit_task().
+ */
for_each_thread(p, t)
sched_move_task(t);
-out:
+
unlock_task_sighand(p, &flags);
autogroup_kref_put(prev);
}
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 42d4027f9e26..154fd689fe02 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -5192,21 +5192,14 @@ void sched_show_task(struct task_struct *p)
int ppid;
unsigned long state = p->state;
+ if (!try_get_task_stack(p))
+ return;
if (state)
state = __ffs(state) + 1;
printk(KERN_INFO "%-15.15s %c", p->comm,
state < sizeof(stat_nam) - 1 ? stat_nam[state] : '?');
-#if BITS_PER_LONG == 32
- if (state == TASK_RUNNING)
- printk(KERN_CONT " running ");
- else
- printk(KERN_CONT " %08lx ", thread_saved_pc(p));
-#else
if (state == TASK_RUNNING)
printk(KERN_CONT " running task ");
- else
- printk(KERN_CONT " %016lx ", thread_saved_pc(p));
-#endif
#ifdef CONFIG_DEBUG_STACK_USAGE
free = stack_not_used(p);
#endif
@@ -5221,6 +5214,7 @@ void sched_show_task(struct task_struct *p)
print_worker_info(KERN_INFO, p);
show_stack(p, NULL);
+ put_task_stack(p);
}
void show_state_filter(unsigned long state_filter)
diff --git a/kernel/taskstats.c b/kernel/taskstats.c
index b3f05ee20d18..cbb387a265db 100644
--- a/kernel/taskstats.c
+++ b/kernel/taskstats.c
@@ -54,7 +54,11 @@ static const struct nla_policy taskstats_cmd_get_policy[TASKSTATS_CMD_ATTR_MAX+1
[TASKSTATS_CMD_ATTR_REGISTER_CPUMASK] = { .type = NLA_STRING },
[TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK] = { .type = NLA_STRING },};
-static const struct nla_policy cgroupstats_cmd_get_policy[CGROUPSTATS_CMD_ATTR_MAX+1] = {
+/*
+ * We have to use TASKSTATS_CMD_ATTR_MAX here, it is the maxattr in the family.
+ * Make sure they are always aligned.
+ */
+static const struct nla_policy cgroupstats_cmd_get_policy[TASKSTATS_CMD_ATTR_MAX+1] = {
[CGROUPSTATS_CMD_ATTR_FD] = { .type = NLA_U32 },
};
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index 2050a7652a86..da87b3cba5b3 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -1862,6 +1862,10 @@ static int __ftrace_hash_update_ipmodify(struct ftrace_ops *ops,
/* Update rec->flags */
do_for_each_ftrace_rec(pg, rec) {
+
+ if (rec->flags & FTRACE_FL_DISABLED)
+ continue;
+
/* We need to update only differences of filter_hash */
in_old = !!ftrace_lookup_ip(old_hash, rec->ip);
in_new = !!ftrace_lookup_ip(new_hash, rec->ip);
@@ -1884,6 +1888,10 @@ rollback:
/* Roll back what we did above */
do_for_each_ftrace_rec(pg, rec) {
+
+ if (rec->flags & FTRACE_FL_DISABLED)
+ continue;
+
if (rec == end)
goto err_out;
@@ -2397,6 +2405,10 @@ void __weak ftrace_replace_code(int enable)
return;
do_for_each_ftrace_rec(pg, rec) {
+
+ if (rec->flags & FTRACE_FL_DISABLED)
+ continue;
+
failed = __ftrace_replace_code(rec, enable);
if (failed) {
ftrace_bug(failed, rec);
@@ -2763,7 +2775,7 @@ static int ftrace_shutdown(struct ftrace_ops *ops, int command)
struct dyn_ftrace *rec;
do_for_each_ftrace_rec(pg, rec) {
- if (FTRACE_WARN_ON_ONCE(rec->flags))
+ if (FTRACE_WARN_ON_ONCE(rec->flags & ~FTRACE_FL_DISABLED))
pr_warn(" %pS flags:%lx\n",
(void *)rec->ip, rec->flags);
} while_for_each_ftrace_rec();
@@ -3598,6 +3610,10 @@ match_records(struct ftrace_hash *hash, char *func, int len, char *mod)
goto out_unlock;
do_for_each_ftrace_rec(pg, rec) {
+
+ if (rec->flags & FTRACE_FL_DISABLED)
+ continue;
+
if (ftrace_match_record(rec, &func_g, mod_match, exclude_mod)) {
ret = enter_record(hash, rec, clear_filter);
if (ret < 0) {
@@ -3793,6 +3809,9 @@ register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
do_for_each_ftrace_rec(pg, rec) {
+ if (rec->flags & FTRACE_FL_DISABLED)
+ continue;
+
if (!ftrace_match_record(rec, &func_g, NULL, 0))
continue;
@@ -4685,6 +4704,9 @@ ftrace_set_func(unsigned long *array, int *idx, int size, char *buffer)
do_for_each_ftrace_rec(pg, rec) {
+ if (rec->flags & FTRACE_FL_DISABLED)
+ continue;
+
if (ftrace_match_record(rec, &func_g, NULL, 0)) {
/* if it is in the array */
exists = false;
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index b01e547d4d04..a6c8db1d62f6 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -1085,6 +1085,9 @@ config PROVE_LOCKING
For more details, see Documentation/locking/lockdep-design.txt.
+config PROVE_LOCKING_SMALL
+ bool
+
config LOCKDEP
bool
depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT
diff --git a/lib/debugobjects.c b/lib/debugobjects.c
index a8e12601eb37..056052dc8e91 100644
--- a/lib/debugobjects.c
+++ b/lib/debugobjects.c
@@ -362,6 +362,7 @@ void debug_object_init(void *addr, struct debug_obj_descr *descr)
__debug_object_init(addr, descr, 0);
}
+EXPORT_SYMBOL_GPL(debug_object_init);
/**
* debug_object_init_on_stack - debug checks when an object on stack is
@@ -376,6 +377,7 @@ void debug_object_init_on_stack(void *addr, struct debug_obj_descr *descr)
__debug_object_init(addr, descr, 1);
}
+EXPORT_SYMBOL_GPL(debug_object_init_on_stack);
/**
* debug_object_activate - debug checks when an object is activated
@@ -449,6 +451,7 @@ int debug_object_activate(void *addr, struct debug_obj_descr *descr)
}
return 0;
}
+EXPORT_SYMBOL_GPL(debug_object_activate);
/**
* debug_object_deactivate - debug checks when an object is deactivated
@@ -496,6 +499,7 @@ void debug_object_deactivate(void *addr, struct debug_obj_descr *descr)
raw_spin_unlock_irqrestore(&db->lock, flags);
}
+EXPORT_SYMBOL_GPL(debug_object_deactivate);
/**
* debug_object_destroy - debug checks when an object is destroyed
@@ -542,6 +546,7 @@ void debug_object_destroy(void *addr, struct debug_obj_descr *descr)
out_unlock:
raw_spin_unlock_irqrestore(&db->lock, flags);
}
+EXPORT_SYMBOL_GPL(debug_object_destroy);
/**
* debug_object_free - debug checks when an object is freed
@@ -582,6 +587,7 @@ void debug_object_free(void *addr, struct debug_obj_descr *descr)
out_unlock:
raw_spin_unlock_irqrestore(&db->lock, flags);
}
+EXPORT_SYMBOL_GPL(debug_object_free);
/**
* debug_object_assert_init - debug checks when object should be init-ed
@@ -626,6 +632,7 @@ void debug_object_assert_init(void *addr, struct debug_obj_descr *descr)
raw_spin_unlock_irqrestore(&db->lock, flags);
}
+EXPORT_SYMBOL_GPL(debug_object_assert_init);
/**
* debug_object_active_state - debug checks object usage state machine
@@ -673,6 +680,7 @@ debug_object_active_state(void *addr, struct debug_obj_descr *descr,
raw_spin_unlock_irqrestore(&db->lock, flags);
}
+EXPORT_SYMBOL_GPL(debug_object_active_state);
#ifdef CONFIG_DEBUG_OBJECTS_FREE
static void __debug_check_no_obj_freed(const void *address, unsigned long size)
diff --git a/lib/iov_iter.c b/lib/iov_iter.c
index f0c7f1481bae..f2bd21b93dfc 100644
--- a/lib/iov_iter.c
+++ b/lib/iov_iter.c
@@ -683,10 +683,11 @@ static void pipe_advance(struct iov_iter *i, size_t size)
struct pipe_inode_info *pipe = i->pipe;
struct pipe_buffer *buf;
int idx = i->idx;
- size_t off = i->iov_offset;
+ size_t off = i->iov_offset, orig_sz;
if (unlikely(i->count < size))
size = i->count;
+ orig_sz = size;
if (size) {
if (off) /* make it relative to the beginning of buffer */
@@ -713,6 +714,7 @@ static void pipe_advance(struct iov_iter *i, size_t size)
pipe->nrbufs--;
}
}
+ i->count -= orig_sz;
}
void iov_iter_advance(struct iov_iter *i, size_t size)
diff --git a/lib/mpi/mpi-pow.c b/lib/mpi/mpi-pow.c
index 5464c8744ea9..e24388a863a7 100644
--- a/lib/mpi/mpi-pow.c
+++ b/lib/mpi/mpi-pow.c
@@ -64,8 +64,13 @@ int mpi_powm(MPI res, MPI base, MPI exp, MPI mod)
if (!esize) {
/* Exponent is zero, result is 1 mod MOD, i.e., 1 or 0
* depending on if MOD equals 1. */
- rp[0] = 1;
res->nlimbs = (msize == 1 && mod->d[0] == 1) ? 0 : 1;
+ if (res->nlimbs) {
+ if (mpi_resize(res, 1) < 0)
+ goto enomem;
+ rp = res->d;
+ rp[0] = 1;
+ }
res->sign = 0;
goto leave;
}
diff --git a/lib/stackdepot.c b/lib/stackdepot.c
index 4d830e299989..f87d138e9672 100644
--- a/lib/stackdepot.c
+++ b/lib/stackdepot.c
@@ -192,6 +192,7 @@ void depot_fetch_stack(depot_stack_handle_t handle, struct stack_trace *trace)
trace->entries = stack->entries;
trace->skip = 0;
}
+EXPORT_SYMBOL_GPL(depot_fetch_stack);
/**
* depot_save_stack - save stack in a stack depot.
@@ -283,3 +284,4 @@ exit:
fast_exit:
return retval;
}
+EXPORT_SYMBOL_GPL(depot_save_stack);
diff --git a/lib/test_bpf.c b/lib/test_bpf.c
index 94346b4d8984..0362da0b66c3 100644
--- a/lib/test_bpf.c
+++ b/lib/test_bpf.c
@@ -4831,7 +4831,7 @@ static struct bpf_test tests[] = {
{ },
INTERNAL,
{ 0x34 },
- { { 1, 0xbef } },
+ { { ETH_HLEN, 0xbef } },
.fill_helper = bpf_fill_ld_abs_vlan_push_pop,
},
/*
diff --git a/lib/test_kasan.c b/lib/test_kasan.c
index 5e51872b3fc1..fbdf87920093 100644
--- a/lib/test_kasan.c
+++ b/lib/test_kasan.c
@@ -20,6 +20,11 @@
#include <linux/uaccess.h>
#include <linux/module.h>
+/*
+ * Note: test functions are marked noinline so that their names appear in
+ * reports.
+ */
+
static noinline void __init kmalloc_oob_right(void)
{
char *ptr;
@@ -411,6 +416,29 @@ static noinline void __init copy_user_test(void)
kfree(kmem);
}
+static noinline void __init use_after_scope_test(void)
+{
+ volatile char *volatile p;
+
+ pr_info("use-after-scope on int\n");
+ {
+ int local = 0;
+
+ p = (char *)&local;
+ }
+ p[0] = 1;
+ p[3] = 1;
+
+ pr_info("use-after-scope on array\n");
+ {
+ char local[1024] = {0};
+
+ p = local;
+ }
+ p[0] = 1;
+ p[1023] = 1;
+}
+
static int __init kmalloc_tests_init(void)
{
kmalloc_oob_right();
@@ -436,6 +464,7 @@ static int __init kmalloc_tests_init(void)
kasan_global_oob();
ksize_unpoisons_memory();
copy_user_test();
+ use_after_scope_test();
return -EAGAIN;
}
diff --git a/mm/cma.c b/mm/cma.c
index 384c2cb51b56..c960459eda7e 100644
--- a/mm/cma.c
+++ b/mm/cma.c
@@ -385,6 +385,9 @@ struct page *cma_alloc(struct cma *cma, size_t count, unsigned int align)
bitmap_maxno = cma_bitmap_maxno(cma);
bitmap_count = cma_bitmap_pages_to_bits(cma, count);
+ if (bitmap_count > bitmap_maxno)
+ return NULL;
+
for (;;) {
mutex_lock(&cma->lock);
bitmap_no = bitmap_find_next_zero_area_off(cma->bitmap,
diff --git a/mm/filemap.c b/mm/filemap.c
index c7fe2f16503f..50b52fe51937 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -1732,6 +1732,9 @@ find_page:
if (inode->i_blkbits == PAGE_SHIFT ||
!mapping->a_ops->is_partially_uptodate)
goto page_not_up_to_date;
+ /* pipes can't handle partially uptodate pages */
+ if (unlikely(iter->type & ITER_PIPE))
+ goto page_not_up_to_date;
if (!trylock_page(page))
goto page_not_up_to_date;
/* Did it get truncated before we got the lock? */
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index cdcd25cb30fe..d4a6e4001512 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -1426,11 +1426,12 @@ int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
bool move_huge_pmd(struct vm_area_struct *vma, unsigned long old_addr,
unsigned long new_addr, unsigned long old_end,
- pmd_t *old_pmd, pmd_t *new_pmd)
+ pmd_t *old_pmd, pmd_t *new_pmd, bool *need_flush)
{
spinlock_t *old_ptl, *new_ptl;
pmd_t pmd;
struct mm_struct *mm = vma->vm_mm;
+ bool force_flush = false;
if ((old_addr & ~HPAGE_PMD_MASK) ||
(new_addr & ~HPAGE_PMD_MASK) ||
@@ -1456,6 +1457,8 @@ bool move_huge_pmd(struct vm_area_struct *vma, unsigned long old_addr,
if (new_ptl != old_ptl)
spin_lock_nested(new_ptl, SINGLE_DEPTH_NESTING);
pmd = pmdp_huge_get_and_clear(mm, old_addr, old_pmd);
+ if (pmd_present(pmd) && pmd_dirty(pmd))
+ force_flush = true;
VM_BUG_ON(!pmd_none(*new_pmd));
if (pmd_move_must_withdraw(new_ptl, old_ptl) &&
@@ -1467,6 +1470,10 @@ bool move_huge_pmd(struct vm_area_struct *vma, unsigned long old_addr,
set_pmd_at(mm, new_addr, new_pmd, pmd_mksoft_dirty(pmd));
if (new_ptl != old_ptl)
spin_unlock(new_ptl);
+ if (force_flush)
+ flush_tlb_range(vma, old_addr, old_addr + PMD_SIZE);
+ else
+ *need_flush = true;
spin_unlock(old_ptl);
return true;
}
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index ec49d9ef1eef..418bf01a50ed 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -1826,11 +1826,17 @@ static void return_unused_surplus_pages(struct hstate *h,
* is not the case is if a reserve map was changed between calls. It
* is the responsibility of the caller to notice the difference and
* take appropriate action.
+ *
+ * vma_add_reservation is used in error paths where a reservation must
+ * be restored when a newly allocated huge page must be freed. It is
+ * to be called after calling vma_needs_reservation to determine if a
+ * reservation exists.
*/
enum vma_resv_mode {
VMA_NEEDS_RESV,
VMA_COMMIT_RESV,
VMA_END_RESV,
+ VMA_ADD_RESV,
};
static long __vma_reservation_common(struct hstate *h,
struct vm_area_struct *vma, unsigned long addr,
@@ -1856,6 +1862,14 @@ static long __vma_reservation_common(struct hstate *h,
region_abort(resv, idx, idx + 1);
ret = 0;
break;
+ case VMA_ADD_RESV:
+ if (vma->vm_flags & VM_MAYSHARE)
+ ret = region_add(resv, idx, idx + 1);
+ else {
+ region_abort(resv, idx, idx + 1);
+ ret = region_del(resv, idx, idx + 1);
+ }
+ break;
default:
BUG();
}
@@ -1903,6 +1917,56 @@ static void vma_end_reservation(struct hstate *h,
(void)__vma_reservation_common(h, vma, addr, VMA_END_RESV);
}
+static long vma_add_reservation(struct hstate *h,
+ struct vm_area_struct *vma, unsigned long addr)
+{
+ return __vma_reservation_common(h, vma, addr, VMA_ADD_RESV);
+}
+
+/*
+ * This routine is called to restore a reservation on error paths. In the
+ * specific error paths, a huge page was allocated (via alloc_huge_page)
+ * and is about to be freed. If a reservation for the page existed,
+ * alloc_huge_page would have consumed the reservation and set PagePrivate
+ * in the newly allocated page. When the page is freed via free_huge_page,
+ * the global reservation count will be incremented if PagePrivate is set.
+ * However, free_huge_page can not adjust the reserve map. Adjust the
+ * reserve map here to be consistent with global reserve count adjustments
+ * to be made by free_huge_page.
+ */
+static void restore_reserve_on_error(struct hstate *h,
+ struct vm_area_struct *vma, unsigned long address,
+ struct page *page)
+{
+ if (unlikely(PagePrivate(page))) {
+ long rc = vma_needs_reservation(h, vma, address);
+
+ if (unlikely(rc < 0)) {
+ /*
+ * Rare out of memory condition in reserve map
+ * manipulation. Clear PagePrivate so that
+ * global reserve count will not be incremented
+ * by free_huge_page. This will make it appear
+ * as though the reservation for this page was
+ * consumed. This may prevent the task from
+ * faulting in the page at a later time. This
+ * is better than inconsistent global huge page
+ * accounting of reserve counts.
+ */
+ ClearPagePrivate(page);
+ } else if (rc) {
+ rc = vma_add_reservation(h, vma, address);
+ if (unlikely(rc < 0))
+ /*
+ * See above comment about rare out of
+ * memory condition.
+ */
+ ClearPagePrivate(page);
+ } else
+ vma_end_reservation(h, vma, address);
+ }
+}
+
struct page *alloc_huge_page(struct vm_area_struct *vma,
unsigned long addr, int avoid_reserve)
{
@@ -3498,6 +3562,7 @@ retry_avoidcopy:
spin_unlock(ptl);
mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
out_release_all:
+ restore_reserve_on_error(h, vma, address, new_page);
put_page(new_page);
out_release_old:
put_page(old_page);
@@ -3680,6 +3745,7 @@ backout:
spin_unlock(ptl);
backout_unlocked:
unlock_page(page);
+ restore_reserve_on_error(h, vma, address, page);
put_page(page);
goto out;
}
diff --git a/mm/kasan/kasan.c b/mm/kasan/kasan.c
index 70c009741aab..0e9505f66ec1 100644
--- a/mm/kasan/kasan.c
+++ b/mm/kasan/kasan.c
@@ -764,6 +764,25 @@ EXPORT_SYMBOL(__asan_storeN_noabort);
void __asan_handle_no_return(void) {}
EXPORT_SYMBOL(__asan_handle_no_return);
+/* Emitted by compiler to poison large objects when they go out of scope. */
+void __asan_poison_stack_memory(const void *addr, size_t size)
+{
+ /*
+ * Addr is KASAN_SHADOW_SCALE_SIZE-aligned and the object is surrounded
+ * by redzones, so we simply round up size to simplify logic.
+ */
+ kasan_poison_shadow(addr, round_up(size, KASAN_SHADOW_SCALE_SIZE),
+ KASAN_USE_AFTER_SCOPE);
+}
+EXPORT_SYMBOL(__asan_poison_stack_memory);
+
+/* Emitted by compiler to unpoison large objects when they go into scope. */
+void __asan_unpoison_stack_memory(const void *addr, size_t size)
+{
+ kasan_unpoison_shadow(addr, size);
+}
+EXPORT_SYMBOL(__asan_unpoison_stack_memory);
+
#ifdef CONFIG_MEMORY_HOTPLUG
static int kasan_mem_notifier(struct notifier_block *nb,
unsigned long action, void *data)
diff --git a/mm/kasan/kasan.h b/mm/kasan/kasan.h
index e5c2181fee6f..1c260e6b3b3c 100644
--- a/mm/kasan/kasan.h
+++ b/mm/kasan/kasan.h
@@ -21,6 +21,7 @@
#define KASAN_STACK_MID 0xF2
#define KASAN_STACK_RIGHT 0xF3
#define KASAN_STACK_PARTIAL 0xF4
+#define KASAN_USE_AFTER_SCOPE 0xF8
/* Don't break randconfig/all*config builds */
#ifndef KASAN_ABI_VERSION
@@ -53,6 +54,9 @@ struct kasan_global {
#if KASAN_ABI_VERSION >= 4
struct kasan_source_location *location;
#endif
+#if KASAN_ABI_VERSION >= 5
+ char *odr_indicator;
+#endif
};
/**
diff --git a/mm/kasan/report.c b/mm/kasan/report.c
index 24c1211fe9d5..073325aedc68 100644
--- a/mm/kasan/report.c
+++ b/mm/kasan/report.c
@@ -90,6 +90,9 @@ static void print_error_description(struct kasan_access_info *info)
case KASAN_KMALLOC_FREE:
bug_type = "use-after-free";
break;
+ case KASAN_USE_AFTER_SCOPE:
+ bug_type = "use-after-scope";
+ break;
}
pr_err("BUG: KASAN: %s in %pS at addr %p\n",
diff --git a/mm/khugepaged.c b/mm/khugepaged.c
index 728d7790dc2d..87e1a7ca3846 100644
--- a/mm/khugepaged.c
+++ b/mm/khugepaged.c
@@ -103,6 +103,7 @@ static struct khugepaged_scan khugepaged_scan = {
.mm_head = LIST_HEAD_INIT(khugepaged_scan.mm_head),
};
+#ifdef CONFIG_SYSFS
static ssize_t scan_sleep_millisecs_show(struct kobject *kobj,
struct kobj_attribute *attr,
char *buf)
@@ -295,6 +296,7 @@ struct attribute_group khugepaged_attr_group = {
.attrs = khugepaged_attr,
.name = "khugepaged",
};
+#endif /* CONFIG_SYSFS */
#define VM_NO_KHUGEPAGED (VM_SPECIAL | VM_HUGETLB)
diff --git a/mm/kmemleak.c b/mm/kmemleak.c
index e5355a5b423f..d1380ed93fdf 100644
--- a/mm/kmemleak.c
+++ b/mm/kmemleak.c
@@ -1414,6 +1414,7 @@ static void kmemleak_scan(void)
/* data/bss scanning */
scan_large_block(_sdata, _edata);
scan_large_block(__bss_start, __bss_stop);
+ scan_large_block(__start_data_ro_after_init, __end_data_ro_after_init);
#ifdef CONFIG_SMP
/* per-cpu sections scanning */
diff --git a/mm/memory-failure.c b/mm/memory-failure.c
index de88f33519c0..19e796d36a62 100644
--- a/mm/memory-failure.c
+++ b/mm/memory-failure.c
@@ -1112,10 +1112,10 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
}
if (!PageHuge(p) && PageTransHuge(hpage)) {
- lock_page(hpage);
- if (!PageAnon(hpage) || unlikely(split_huge_page(hpage))) {
- unlock_page(hpage);
- if (!PageAnon(hpage))
+ lock_page(p);
+ if (!PageAnon(p) || unlikely(split_huge_page(p))) {
+ unlock_page(p);
+ if (!PageAnon(p))
pr_err("Memory failure: %#lx: non anonymous thp\n",
pfn);
else
@@ -1126,9 +1126,7 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
put_hwpoison_page(p);
return -EBUSY;
}
- unlock_page(hpage);
- get_hwpoison_page(p);
- put_hwpoison_page(hpage);
+ unlock_page(p);
VM_BUG_ON_PAGE(!page_count(p), p);
hpage = compound_head(p);
}
diff --git a/mm/mlock.c b/mm/mlock.c
index 145a4258ddbc..cdbed8aaa426 100644
--- a/mm/mlock.c
+++ b/mm/mlock.c
@@ -190,10 +190,13 @@ unsigned int munlock_vma_page(struct page *page)
*/
spin_lock_irq(zone_lru_lock(zone));
- nr_pages = hpage_nr_pages(page);
- if (!TestClearPageMlocked(page))
+ if (!TestClearPageMlocked(page)) {
+ /* Potentially, PTE-mapped THP: do not skip the rest PTEs */
+ nr_pages = 1;
goto unlock_out;
+ }
+ nr_pages = hpage_nr_pages(page);
__mod_zone_page_state(zone, NR_MLOCK, -nr_pages);
if (__munlock_isolate_lru_page(page, true)) {
diff --git a/mm/mremap.c b/mm/mremap.c
index da22ad2a5678..30d7d2482eea 100644
--- a/mm/mremap.c
+++ b/mm/mremap.c
@@ -104,11 +104,13 @@ static pte_t move_soft_dirty_pte(pte_t pte)
static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd,
unsigned long old_addr, unsigned long old_end,
struct vm_area_struct *new_vma, pmd_t *new_pmd,
- unsigned long new_addr, bool need_rmap_locks)
+ unsigned long new_addr, bool need_rmap_locks, bool *need_flush)
{
struct mm_struct *mm = vma->vm_mm;
pte_t *old_pte, *new_pte, pte;
spinlock_t *old_ptl, *new_ptl;
+ bool force_flush = false;
+ unsigned long len = old_end - old_addr;
/*
* When need_rmap_locks is true, we take the i_mmap_rwsem and anon_vma
@@ -146,7 +148,19 @@ static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd,
new_pte++, new_addr += PAGE_SIZE) {
if (pte_none(*old_pte))
continue;
+
pte = ptep_get_and_clear(mm, old_addr, old_pte);
+ /*
+ * If we are remapping a dirty PTE, make sure
+ * to flush TLB before we drop the PTL for the
+ * old PTE or we may race with page_mkclean().
+ *
+ * This check has to be done after we removed the
+ * old PTE from page tables or another thread may
+ * dirty it after the check and before the removal.
+ */
+ if (pte_present(pte) && pte_dirty(pte))
+ force_flush = true;
pte = move_pte(pte, new_vma->vm_page_prot, old_addr, new_addr);
pte = move_soft_dirty_pte(pte);
set_pte_at(mm, new_addr, new_pte, pte);
@@ -156,6 +170,10 @@ static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd,
if (new_ptl != old_ptl)
spin_unlock(new_ptl);
pte_unmap(new_pte - 1);
+ if (force_flush)
+ flush_tlb_range(vma, old_end - len, old_end);
+ else
+ *need_flush = true;
pte_unmap_unlock(old_pte - 1, old_ptl);
if (need_rmap_locks)
drop_rmap_locks(vma);
@@ -201,13 +219,12 @@ unsigned long move_page_tables(struct vm_area_struct *vma,
if (need_rmap_locks)
take_rmap_locks(vma);
moved = move_huge_pmd(vma, old_addr, new_addr,
- old_end, old_pmd, new_pmd);
+ old_end, old_pmd, new_pmd,
+ &need_flush);
if (need_rmap_locks)
drop_rmap_locks(vma);
- if (moved) {
- need_flush = true;
+ if (moved)
continue;
- }
}
split_huge_pmd(vma, old_pmd, old_addr);
if (pmd_trans_unstable(old_pmd))
@@ -220,11 +237,10 @@ unsigned long move_page_tables(struct vm_area_struct *vma,
extent = next - new_addr;
if (extent > LATENCY_LIMIT)
extent = LATENCY_LIMIT;
- move_ptes(vma, old_pmd, old_addr, old_addr + extent,
- new_vma, new_pmd, new_addr, need_rmap_locks);
- need_flush = true;
+ move_ptes(vma, old_pmd, old_addr, old_addr + extent, new_vma,
+ new_pmd, new_addr, need_rmap_locks, &need_flush);
}
- if (likely(need_flush))
+ if (need_flush)
flush_tlb_range(vma, old_end-len, old_addr);
mmu_notifier_invalidate_range_end(vma->vm_mm, mmun_start, mmun_end);
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 8fd42aa7c4bd..6de9440e3ae2 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -92,7 +92,7 @@ int _node_numa_mem_[MAX_NUMNODES];
#endif
#ifdef CONFIG_GCC_PLUGIN_LATENT_ENTROPY
-volatile u64 latent_entropy __latent_entropy;
+volatile unsigned long latent_entropy __latent_entropy;
EXPORT_SYMBOL(latent_entropy);
#endif
@@ -3658,7 +3658,7 @@ retry:
/* Make sure we know about allocations which stall for too long */
if (time_after(jiffies, alloc_start + stall_timeout)) {
warn_alloc(gfp_mask,
- "page alloction stalls for %ums, order:%u\n",
+ "page allocation stalls for %ums, order:%u",
jiffies_to_msecs(jiffies-alloc_start), order);
stall_timeout += 10 * HZ;
}
diff --git a/mm/shmem.c b/mm/shmem.c
index ad7813d73ea7..166ebf5d2bce 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -1483,6 +1483,8 @@ static int shmem_replace_page(struct page **pagep, gfp_t gfp,
copy_highpage(newpage, oldpage);
flush_dcache_page(newpage);
+ __SetPageLocked(newpage);
+ __SetPageSwapBacked(newpage);
SetPageUptodate(newpage);
set_page_private(newpage, swap_index);
SetPageSwapCache(newpage);
diff --git a/mm/slab_common.c b/mm/slab_common.c
index 71f0b28a1bec..329b03843863 100644
--- a/mm/slab_common.c
+++ b/mm/slab_common.c
@@ -533,8 +533,8 @@ void memcg_create_kmem_cache(struct mem_cgroup *memcg,
s = create_cache(cache_name, root_cache->object_size,
root_cache->size, root_cache->align,
- root_cache->flags, root_cache->ctor,
- memcg, root_cache);
+ root_cache->flags & CACHE_CREATE_MASK,
+ root_cache->ctor, memcg, root_cache);
/*
* If we could not create a memcg cache, do not complain, because
* that's not critical at all as we can always proceed with the root
diff --git a/mm/swapfile.c b/mm/swapfile.c
index 2210de290b54..f30438970cd1 100644
--- a/mm/swapfile.c
+++ b/mm/swapfile.c
@@ -2224,6 +2224,8 @@ static unsigned long read_swap_header(struct swap_info_struct *p,
swab32s(&swap_header->info.version);
swab32s(&swap_header->info.last_page);
swab32s(&swap_header->info.nr_badpages);
+ if (swap_header->info.nr_badpages > MAX_SWAP_BADPAGES)
+ return 0;
for (i = 0; i < swap_header->info.nr_badpages; i++)
swab32s(&swap_header->info.badpages[i]);
}
diff --git a/mm/truncate.c b/mm/truncate.c
index a01cce450a26..8d8c62d89e6d 100644
--- a/mm/truncate.c
+++ b/mm/truncate.c
@@ -283,7 +283,7 @@ void truncate_inode_pages_range(struct address_space *mapping,
if (!trylock_page(page))
continue;
- WARN_ON(page_to_pgoff(page) != index);
+ WARN_ON(page_to_index(page) != index);
if (PageWriteback(page)) {
unlock_page(page);
continue;
@@ -371,7 +371,7 @@ void truncate_inode_pages_range(struct address_space *mapping,
}
lock_page(page);
- WARN_ON(page_to_pgoff(page) != index);
+ WARN_ON(page_to_index(page) != index);
wait_on_page_writeback(page);
truncate_inode_page(mapping, page);
unlock_page(page);
@@ -492,7 +492,7 @@ unsigned long invalidate_mapping_pages(struct address_space *mapping,
if (!trylock_page(page))
continue;
- WARN_ON(page_to_pgoff(page) != index);
+ WARN_ON(page_to_index(page) != index);
/* Middle of THP: skip */
if (PageTransTail(page)) {
@@ -612,7 +612,7 @@ int invalidate_inode_pages2_range(struct address_space *mapping,
}
lock_page(page);
- WARN_ON(page_to_pgoff(page) != index);
+ WARN_ON(page_to_index(page) != index);
if (page->mapping != mapping) {
unlock_page(page);
continue;
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 76fda2268148..d75cdf360730 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -2354,6 +2354,8 @@ static void shrink_node_memcg(struct pglist_data *pgdat, struct mem_cgroup *memc
}
}
+ cond_resched();
+
if (nr_reclaimed < nr_to_reclaim || scan_adjusted)
continue;
diff --git a/mm/workingset.c b/mm/workingset.c
index 617475f529f4..fb1f9183d89a 100644
--- a/mm/workingset.c
+++ b/mm/workingset.c
@@ -348,7 +348,7 @@ static unsigned long count_shadow_nodes(struct shrinker *shrinker,
shadow_nodes = list_lru_shrink_count(&workingset_shadow_nodes, sc);
local_irq_enable();
- if (memcg_kmem_enabled()) {
+ if (sc->memcg) {
pages = mem_cgroup_node_nr_lru_pages(sc->memcg, sc->nid,
LRU_ALL_FILE);
} else {
diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c
index 8de138d3306b..f2531ad66b68 100644
--- a/net/8021q/vlan.c
+++ b/net/8021q/vlan.c
@@ -664,7 +664,7 @@ static struct sk_buff **vlan_gro_receive(struct sk_buff **head,
skb_gro_pull(skb, sizeof(*vhdr));
skb_gro_postpull_rcsum(skb, vhdr, sizeof(*vhdr));
- pp = ptype->callbacks.gro_receive(head, skb);
+ pp = call_gro_receive(ptype->callbacks.gro_receive, head, skb);
out_unlock:
rcu_read_unlock();
diff --git a/net/batman-adv/log.h b/net/batman-adv/log.h
index e0e1a88c3e58..d2905a855d1b 100644
--- a/net/batman-adv/log.h
+++ b/net/batman-adv/log.h
@@ -63,7 +63,7 @@ enum batadv_dbg_level {
BATADV_DBG_NC = BIT(5),
BATADV_DBG_MCAST = BIT(6),
BATADV_DBG_TP_METER = BIT(7),
- BATADV_DBG_ALL = 127,
+ BATADV_DBG_ALL = 255,
};
#ifdef CONFIG_BATMAN_ADV_DEBUG
diff --git a/net/batman-adv/originator.c b/net/batman-adv/originator.c
index 5f3bfc41aeb1..7c8d16086f0f 100644
--- a/net/batman-adv/originator.c
+++ b/net/batman-adv/originator.c
@@ -544,7 +544,7 @@ batadv_hardif_neigh_create(struct batadv_hard_iface *hard_iface,
if (bat_priv->algo_ops->neigh.hardif_init)
bat_priv->algo_ops->neigh.hardif_init(hardif_neigh);
- hlist_add_head(&hardif_neigh->list, &hard_iface->neigh_list);
+ hlist_add_head_rcu(&hardif_neigh->list, &hard_iface->neigh_list);
out:
spin_unlock_bh(&hard_iface->neigh_list_lock);
diff --git a/net/batman-adv/tp_meter.c b/net/batman-adv/tp_meter.c
index 2333777f919d..8af1611b8ab2 100644
--- a/net/batman-adv/tp_meter.c
+++ b/net/batman-adv/tp_meter.c
@@ -837,6 +837,7 @@ static int batadv_tp_send(void *arg)
primary_if = batadv_primary_if_get_selected(bat_priv);
if (unlikely(!primary_if)) {
err = BATADV_TP_REASON_DST_UNREACHABLE;
+ tp_vars->reason = err;
goto out;
}
diff --git a/net/bluetooth/6lowpan.c b/net/bluetooth/6lowpan.c
index d020299baba4..1904a93f47d5 100644
--- a/net/bluetooth/6lowpan.c
+++ b/net/bluetooth/6lowpan.c
@@ -1090,7 +1090,6 @@ static int get_l2cap_conn(char *buf, bdaddr_t *addr, u8 *addr_type,
{
struct hci_conn *hcon;
struct hci_dev *hdev;
- bdaddr_t *src = BDADDR_ANY;
int n;
n = sscanf(buf, "%hhx:%hhx:%hhx:%hhx:%hhx:%hhx %hhu",
@@ -1101,7 +1100,8 @@ static int get_l2cap_conn(char *buf, bdaddr_t *addr, u8 *addr_type,
if (n < 7)
return -EINVAL;
- hdev = hci_get_route(addr, src);
+ /* The LE_PUBLIC address type is ignored because of BDADDR_ANY */
+ hdev = hci_get_route(addr, BDADDR_ANY, BDADDR_LE_PUBLIC);
if (!hdev)
return -ENOENT;
diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c
index 3809617aa98d..dc59eae54717 100644
--- a/net/bluetooth/hci_conn.c
+++ b/net/bluetooth/hci_conn.c
@@ -613,7 +613,7 @@ int hci_conn_del(struct hci_conn *conn)
return 0;
}
-struct hci_dev *hci_get_route(bdaddr_t *dst, bdaddr_t *src)
+struct hci_dev *hci_get_route(bdaddr_t *dst, bdaddr_t *src, uint8_t src_type)
{
int use_src = bacmp(src, BDADDR_ANY);
struct hci_dev *hdev = NULL, *d;
@@ -634,7 +634,29 @@ struct hci_dev *hci_get_route(bdaddr_t *dst, bdaddr_t *src)
*/
if (use_src) {
- if (!bacmp(&d->bdaddr, src)) {
+ bdaddr_t id_addr;
+ u8 id_addr_type;
+
+ if (src_type == BDADDR_BREDR) {
+ if (!lmp_bredr_capable(d))
+ continue;
+ bacpy(&id_addr, &d->bdaddr);
+ id_addr_type = BDADDR_BREDR;
+ } else {
+ if (!lmp_le_capable(d))
+ continue;
+
+ hci_copy_identity_address(d, &id_addr,
+ &id_addr_type);
+
+ /* Convert from HCI to three-value type */
+ if (id_addr_type == ADDR_LE_DEV_PUBLIC)
+ id_addr_type = BDADDR_LE_PUBLIC;
+ else
+ id_addr_type = BDADDR_LE_RANDOM;
+ }
+
+ if (!bacmp(&id_addr, src) && id_addr_type == src_type) {
hdev = d; break;
}
} else {
diff --git a/net/bluetooth/hci_request.c b/net/bluetooth/hci_request.c
index e2288421fe6b..1015d9c8d97d 100644
--- a/net/bluetooth/hci_request.c
+++ b/net/bluetooth/hci_request.c
@@ -969,41 +969,38 @@ void __hci_req_enable_advertising(struct hci_request *req)
hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
}
-static u8 append_local_name(struct hci_dev *hdev, u8 *ptr, u8 ad_len)
+u8 append_local_name(struct hci_dev *hdev, u8 *ptr, u8 ad_len)
{
- size_t complete_len;
size_t short_len;
- int max_len;
-
- max_len = HCI_MAX_AD_LENGTH - ad_len - 2;
- complete_len = strlen(hdev->dev_name);
- short_len = strlen(hdev->short_name);
-
- /* no space left for name */
- if (max_len < 1)
- return ad_len;
+ size_t complete_len;
- /* no name set */
- if (!complete_len)
+ /* no space left for name (+ NULL + type + len) */
+ if ((HCI_MAX_AD_LENGTH - ad_len) < HCI_MAX_SHORT_NAME_LENGTH + 3)
return ad_len;
- /* complete name fits and is eq to max short name len or smaller */
- if (complete_len <= max_len &&
- complete_len <= HCI_MAX_SHORT_NAME_LENGTH) {
+ /* use complete name if present and fits */
+ complete_len = strlen(hdev->dev_name);
+ if (complete_len && complete_len <= HCI_MAX_SHORT_NAME_LENGTH)
return eir_append_data(ptr, ad_len, EIR_NAME_COMPLETE,
- hdev->dev_name, complete_len);
- }
+ hdev->dev_name, complete_len + 1);
- /* short name set and fits */
- if (short_len && short_len <= max_len) {
+ /* use short name if present */
+ short_len = strlen(hdev->short_name);
+ if (short_len)
return eir_append_data(ptr, ad_len, EIR_NAME_SHORT,
- hdev->short_name, short_len);
- }
+ hdev->short_name, short_len + 1);
- /* no short name set so shorten complete name */
- if (!short_len) {
- return eir_append_data(ptr, ad_len, EIR_NAME_SHORT,
- hdev->dev_name, max_len);
+ /* use shortened full name if present, we already know that name
+ * is longer then HCI_MAX_SHORT_NAME_LENGTH
+ */
+ if (complete_len) {
+ u8 name[HCI_MAX_SHORT_NAME_LENGTH + 1];
+
+ memcpy(name, hdev->dev_name, HCI_MAX_SHORT_NAME_LENGTH);
+ name[HCI_MAX_SHORT_NAME_LENGTH] = '\0';
+
+ return eir_append_data(ptr, ad_len, EIR_NAME_SHORT, name,
+ sizeof(name));
}
return ad_len;
diff --git a/net/bluetooth/hci_request.h b/net/bluetooth/hci_request.h
index 6b06629245a8..dde77bd59f91 100644
--- a/net/bluetooth/hci_request.h
+++ b/net/bluetooth/hci_request.h
@@ -106,6 +106,8 @@ static inline void hci_update_background_scan(struct hci_dev *hdev)
void hci_request_setup(struct hci_dev *hdev);
void hci_request_cancel_all(struct hci_dev *hdev);
+u8 append_local_name(struct hci_dev *hdev, u8 *ptr, u8 ad_len);
+
static inline u16 eir_append_data(u8 *eir, u16 eir_len, u8 type,
u8 *data, u8 data_len)
{
diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
index d4cad29b033f..577f1c01454a 100644
--- a/net/bluetooth/l2cap_core.c
+++ b/net/bluetooth/l2cap_core.c
@@ -7060,7 +7060,7 @@ int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid,
BT_DBG("%pMR -> %pMR (type %u) psm 0x%2.2x", &chan->src, dst,
dst_type, __le16_to_cpu(psm));
- hdev = hci_get_route(dst, &chan->src);
+ hdev = hci_get_route(dst, &chan->src, chan->src_type);
if (!hdev)
return -EHOSTUNREACH;
diff --git a/net/bluetooth/mgmt.c b/net/bluetooth/mgmt.c
index 736038085feb..1fba2a03f8ae 100644
--- a/net/bluetooth/mgmt.c
+++ b/net/bluetooth/mgmt.c
@@ -6017,7 +6017,15 @@ static int read_adv_features(struct sock *sk, struct hci_dev *hdev,
return err;
}
-static u8 tlv_data_max_len(u32 adv_flags, bool is_adv_data)
+static u8 calculate_name_len(struct hci_dev *hdev)
+{
+ u8 buf[HCI_MAX_SHORT_NAME_LENGTH + 3];
+
+ return append_local_name(hdev, buf, 0);
+}
+
+static u8 tlv_data_max_len(struct hci_dev *hdev, u32 adv_flags,
+ bool is_adv_data)
{
u8 max_len = HCI_MAX_AD_LENGTH;
@@ -6030,9 +6038,8 @@ static u8 tlv_data_max_len(u32 adv_flags, bool is_adv_data)
if (adv_flags & MGMT_ADV_FLAG_TX_POWER)
max_len -= 3;
} else {
- /* at least 1 byte of name should fit in */
if (adv_flags & MGMT_ADV_FLAG_LOCAL_NAME)
- max_len -= 3;
+ max_len -= calculate_name_len(hdev);
if (adv_flags & (MGMT_ADV_FLAG_APPEARANCE))
max_len -= 4;
@@ -6063,12 +6070,13 @@ static bool appearance_managed(u32 adv_flags)
return adv_flags & MGMT_ADV_FLAG_APPEARANCE;
}
-static bool tlv_data_is_valid(u32 adv_flags, u8 *data, u8 len, bool is_adv_data)
+static bool tlv_data_is_valid(struct hci_dev *hdev, u32 adv_flags, u8 *data,
+ u8 len, bool is_adv_data)
{
int i, cur_len;
u8 max_len;
- max_len = tlv_data_max_len(adv_flags, is_adv_data);
+ max_len = tlv_data_max_len(hdev, adv_flags, is_adv_data);
if (len > max_len)
return false;
@@ -6215,8 +6223,8 @@ static int add_advertising(struct sock *sk, struct hci_dev *hdev,
goto unlock;
}
- if (!tlv_data_is_valid(flags, cp->data, cp->adv_data_len, true) ||
- !tlv_data_is_valid(flags, cp->data + cp->adv_data_len,
+ if (!tlv_data_is_valid(hdev, flags, cp->data, cp->adv_data_len, true) ||
+ !tlv_data_is_valid(hdev, flags, cp->data + cp->adv_data_len,
cp->scan_rsp_len, false)) {
err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
MGMT_STATUS_INVALID_PARAMS);
@@ -6429,8 +6437,8 @@ static int get_adv_size_info(struct sock *sk, struct hci_dev *hdev,
rp.instance = cp->instance;
rp.flags = cp->flags;
- rp.max_adv_data_len = tlv_data_max_len(flags, true);
- rp.max_scan_rsp_len = tlv_data_max_len(flags, false);
+ rp.max_adv_data_len = tlv_data_max_len(hdev, flags, true);
+ rp.max_scan_rsp_len = tlv_data_max_len(hdev, flags, false);
err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
diff --git a/net/bluetooth/rfcomm/tty.c b/net/bluetooth/rfcomm/tty.c
index 8e385a0ae60e..2f2cb5e27cdd 100644
--- a/net/bluetooth/rfcomm/tty.c
+++ b/net/bluetooth/rfcomm/tty.c
@@ -178,7 +178,7 @@ static void rfcomm_reparent_device(struct rfcomm_dev *dev)
struct hci_dev *hdev;
struct hci_conn *conn;
- hdev = hci_get_route(&dev->dst, &dev->src);
+ hdev = hci_get_route(&dev->dst, &dev->src, BDADDR_BREDR);
if (!hdev)
return;
diff --git a/net/bluetooth/sco.c b/net/bluetooth/sco.c
index f52bcbf2e58c..3125ce670c2f 100644
--- a/net/bluetooth/sco.c
+++ b/net/bluetooth/sco.c
@@ -219,7 +219,7 @@ static int sco_connect(struct sock *sk)
BT_DBG("%pMR -> %pMR", &sco_pi(sk)->src, &sco_pi(sk)->dst);
- hdev = hci_get_route(&sco_pi(sk)->dst, &sco_pi(sk)->src);
+ hdev = hci_get_route(&sco_pi(sk)->dst, &sco_pi(sk)->src, BDADDR_BREDR);
if (!hdev)
return -EHOSTUNREACH;
diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c
index c5fea9393946..2136e45f5277 100644
--- a/net/bridge/br_multicast.c
+++ b/net/bridge/br_multicast.c
@@ -972,13 +972,12 @@ static void br_multicast_enable(struct bridge_mcast_own_query *query)
mod_timer(&query->timer, jiffies);
}
-void br_multicast_enable_port(struct net_bridge_port *port)
+static void __br_multicast_enable_port(struct net_bridge_port *port)
{
struct net_bridge *br = port->br;
- spin_lock(&br->multicast_lock);
if (br->multicast_disabled || !netif_running(br->dev))
- goto out;
+ return;
br_multicast_enable(&port->ip4_own_query);
#if IS_ENABLED(CONFIG_IPV6)
@@ -987,8 +986,14 @@ void br_multicast_enable_port(struct net_bridge_port *port)
if (port->multicast_router == MDB_RTR_TYPE_PERM &&
hlist_unhashed(&port->rlist))
br_multicast_add_router(br, port);
+}
-out:
+void br_multicast_enable_port(struct net_bridge_port *port)
+{
+ struct net_bridge *br = port->br;
+
+ spin_lock(&br->multicast_lock);
+ __br_multicast_enable_port(port);
spin_unlock(&br->multicast_lock);
}
@@ -1994,8 +1999,9 @@ static void br_multicast_start_querier(struct net_bridge *br,
int br_multicast_toggle(struct net_bridge *br, unsigned long val)
{
- int err = 0;
struct net_bridge_mdb_htable *mdb;
+ struct net_bridge_port *port;
+ int err = 0;
spin_lock_bh(&br->multicast_lock);
if (br->multicast_disabled == !val)
@@ -2023,10 +2029,9 @@ rollback:
goto rollback;
}
- br_multicast_start_querier(br, &br->ip4_own_query);
-#if IS_ENABLED(CONFIG_IPV6)
- br_multicast_start_querier(br, &br->ip6_own_query);
-#endif
+ br_multicast_open(br);
+ list_for_each_entry(port, &br->port_list, list)
+ __br_multicast_enable_port(port);
unlock:
spin_unlock_bh(&br->multicast_lock);
diff --git a/net/can/bcm.c b/net/can/bcm.c
index 8e999ffdf28b..436a7537e6a9 100644
--- a/net/can/bcm.c
+++ b/net/can/bcm.c
@@ -77,7 +77,7 @@
(CAN_EFF_MASK | CAN_EFF_FLAG | CAN_RTR_FLAG) : \
(CAN_SFF_MASK | CAN_EFF_FLAG | CAN_RTR_FLAG))
-#define CAN_BCM_VERSION "20160617"
+#define CAN_BCM_VERSION "20161123"
MODULE_DESCRIPTION("PF_CAN broadcast manager protocol");
MODULE_LICENSE("Dual BSD/GPL");
@@ -109,8 +109,9 @@ struct bcm_op {
u32 count;
u32 nframes;
u32 currframe;
- struct canfd_frame *frames;
- struct canfd_frame *last_frames;
+ /* void pointers to arrays of struct can[fd]_frame */
+ void *frames;
+ void *last_frames;
struct canfd_frame sframe;
struct canfd_frame last_sframe;
struct sock *sk;
@@ -681,7 +682,7 @@ static void bcm_rx_handler(struct sk_buff *skb, void *data)
if (op->flags & RX_FILTER_ID) {
/* the easiest case */
- bcm_rx_update_and_send(op, &op->last_frames[0], rxframe);
+ bcm_rx_update_and_send(op, op->last_frames, rxframe);
goto rx_starttimer;
}
@@ -1068,7 +1069,7 @@ static int bcm_rx_setup(struct bcm_msg_head *msg_head, struct msghdr *msg,
if (msg_head->nframes) {
/* update CAN frames content */
- err = memcpy_from_msg((u8 *)op->frames, msg,
+ err = memcpy_from_msg(op->frames, msg,
msg_head->nframes * op->cfsiz);
if (err < 0)
return err;
@@ -1118,7 +1119,7 @@ static int bcm_rx_setup(struct bcm_msg_head *msg_head, struct msghdr *msg,
}
if (msg_head->nframes) {
- err = memcpy_from_msg((u8 *)op->frames, msg,
+ err = memcpy_from_msg(op->frames, msg,
msg_head->nframes * op->cfsiz);
if (err < 0) {
if (op->frames != &op->sframe)
@@ -1163,6 +1164,7 @@ static int bcm_rx_setup(struct bcm_msg_head *msg_head, struct msghdr *msg,
/* check flags */
if (op->flags & RX_RTR_FRAME) {
+ struct canfd_frame *frame0 = op->frames;
/* no timers in RTR-mode */
hrtimer_cancel(&op->thrtimer);
@@ -1174,8 +1176,8 @@ static int bcm_rx_setup(struct bcm_msg_head *msg_head, struct msghdr *msg,
* prevent a full-load-loopback-test ... ;-]
*/
if ((op->flags & TX_CP_CAN_ID) ||
- (op->frames[0].can_id == op->can_id))
- op->frames[0].can_id = op->can_id & ~CAN_RTR_FLAG;
+ (frame0->can_id == op->can_id))
+ frame0->can_id = op->can_id & ~CAN_RTR_FLAG;
} else {
if (op->flags & SETTIMER) {
@@ -1549,24 +1551,31 @@ static int bcm_connect(struct socket *sock, struct sockaddr *uaddr, int len,
struct sockaddr_can *addr = (struct sockaddr_can *)uaddr;
struct sock *sk = sock->sk;
struct bcm_sock *bo = bcm_sk(sk);
+ int ret = 0;
if (len < sizeof(*addr))
return -EINVAL;
- if (bo->bound)
- return -EISCONN;
+ lock_sock(sk);
+
+ if (bo->bound) {
+ ret = -EISCONN;
+ goto fail;
+ }
/* bind a device to this socket */
if (addr->can_ifindex) {
struct net_device *dev;
dev = dev_get_by_index(&init_net, addr->can_ifindex);
- if (!dev)
- return -ENODEV;
-
+ if (!dev) {
+ ret = -ENODEV;
+ goto fail;
+ }
if (dev->type != ARPHRD_CAN) {
dev_put(dev);
- return -ENODEV;
+ ret = -ENODEV;
+ goto fail;
}
bo->ifindex = dev->ifindex;
@@ -1577,17 +1586,24 @@ static int bcm_connect(struct socket *sock, struct sockaddr *uaddr, int len,
bo->ifindex = 0;
}
- bo->bound = 1;
-
if (proc_dir) {
/* unique socket address as filename */
sprintf(bo->procname, "%lu", sock_i_ino(sk));
bo->bcm_proc_read = proc_create_data(bo->procname, 0644,
proc_dir,
&bcm_proc_fops, sk);
+ if (!bo->bcm_proc_read) {
+ ret = -ENOMEM;
+ goto fail;
+ }
}
- return 0;
+ bo->bound = 1;
+
+fail:
+ release_sock(sk);
+
+ return ret;
}
static int bcm_recvmsg(struct socket *sock, struct msghdr *msg, size_t size,
diff --git a/net/ceph/ceph_fs.c b/net/ceph/ceph_fs.c
index 7d54e944de5e..dcbe67ff3e2b 100644
--- a/net/ceph/ceph_fs.c
+++ b/net/ceph/ceph_fs.c
@@ -34,7 +34,8 @@ void ceph_file_layout_from_legacy(struct ceph_file_layout *fl,
fl->stripe_count = le32_to_cpu(legacy->fl_stripe_count);
fl->object_size = le32_to_cpu(legacy->fl_object_size);
fl->pool_id = le32_to_cpu(legacy->fl_pg_pool);
- if (fl->pool_id == 0)
+ if (fl->pool_id == 0 && fl->stripe_unit == 0 &&
+ fl->stripe_count == 0 && fl->object_size == 0)
fl->pool_id = -1;
}
EXPORT_SYMBOL(ceph_file_layout_from_legacy);
diff --git a/net/ceph/osd_client.c b/net/ceph/osd_client.c
index d9bf7a1d0a58..e6ae15bc41b7 100644
--- a/net/ceph/osd_client.c
+++ b/net/ceph/osd_client.c
@@ -4094,6 +4094,7 @@ int ceph_osdc_init(struct ceph_osd_client *osdc, struct ceph_client *client)
osd_init(&osdc->homeless_osd);
osdc->homeless_osd.o_osdc = osdc;
osdc->homeless_osd.o_osd = CEPH_HOMELESS_OSD;
+ osdc->last_linger_id = CEPH_LINGER_ID_START;
osdc->linger_requests = RB_ROOT;
osdc->map_checks = RB_ROOT;
osdc->linger_map_checks = RB_ROOT;
diff --git a/net/core/dev.c b/net/core/dev.c
index 4bc19a164ba5..6666b28b6815 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -1766,19 +1766,14 @@ EXPORT_SYMBOL_GPL(is_skb_forwardable);
int __dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
{
- if (skb_orphan_frags(skb, GFP_ATOMIC) ||
- unlikely(!is_skb_forwardable(dev, skb))) {
- atomic_long_inc(&dev->rx_dropped);
- kfree_skb(skb);
- return NET_RX_DROP;
- }
+ int ret = ____dev_forward_skb(dev, skb);
- skb_scrub_packet(skb, true);
- skb->priority = 0;
- skb->protocol = eth_type_trans(skb, dev);
- skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN);
+ if (likely(!ret)) {
+ skb->protocol = eth_type_trans(skb, dev);
+ skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN);
+ }
- return 0;
+ return ret;
}
EXPORT_SYMBOL_GPL(__dev_forward_skb);
@@ -2484,7 +2479,7 @@ int skb_checksum_help(struct sk_buff *skb)
goto out;
}
- *(__sum16 *)(skb->data + offset) = csum_fold(csum);
+ *(__sum16 *)(skb->data + offset) = csum_fold(csum) ?: CSUM_MANGLED_0;
out_set_summed:
skb->ip_summed = CHECKSUM_NONE;
out:
@@ -3035,6 +3030,7 @@ struct sk_buff *validate_xmit_skb_list(struct sk_buff *skb, struct net_device *d
}
return head;
}
+EXPORT_SYMBOL_GPL(validate_xmit_skb_list);
static void qdisc_pkt_len_init(struct sk_buff *skb)
{
@@ -4511,6 +4507,7 @@ static enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff
NAPI_GRO_CB(skb)->flush = 0;
NAPI_GRO_CB(skb)->free = 0;
NAPI_GRO_CB(skb)->encap_mark = 0;
+ NAPI_GRO_CB(skb)->recursion_counter = 0;
NAPI_GRO_CB(skb)->is_fou = 0;
NAPI_GRO_CB(skb)->is_atomic = 1;
NAPI_GRO_CB(skb)->gro_remcsum_start = 0;
@@ -5511,10 +5508,14 @@ struct net_device *netdev_all_lower_get_next_rcu(struct net_device *dev,
{
struct netdev_adjacent *lower;
- lower = list_first_or_null_rcu(&dev->all_adj_list.lower,
- struct netdev_adjacent, list);
+ lower = list_entry_rcu((*iter)->next, struct netdev_adjacent, list);
- return lower ? lower->dev : NULL;
+ if (&lower->list == &dev->all_adj_list.lower)
+ return NULL;
+
+ *iter = &lower->list;
+
+ return lower->dev;
}
EXPORT_SYMBOL(netdev_all_lower_get_next_rcu);
diff --git a/net/core/ethtool.c b/net/core/ethtool.c
index 977489820eb9..047a1752ece1 100644
--- a/net/core/ethtool.c
+++ b/net/core/ethtool.c
@@ -2479,6 +2479,7 @@ int dev_ethtool(struct net *net, struct ifreq *ifr)
case ETHTOOL_GET_TS_INFO:
case ETHTOOL_GEEE:
case ETHTOOL_GTUNABLE:
+ case ETHTOOL_GLINKSETTINGS:
break;
default:
if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
diff --git a/net/core/filter.c b/net/core/filter.c
index 00351cdf7d0c..b391209838ef 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -1628,6 +1628,19 @@ static inline int __bpf_rx_skb(struct net_device *dev, struct sk_buff *skb)
return dev_forward_skb(dev, skb);
}
+static inline int __bpf_rx_skb_no_mac(struct net_device *dev,
+ struct sk_buff *skb)
+{
+ int ret = ____dev_forward_skb(dev, skb);
+
+ if (likely(!ret)) {
+ skb->dev = dev;
+ ret = netif_rx(skb);
+ }
+
+ return ret;
+}
+
static inline int __bpf_tx_skb(struct net_device *dev, struct sk_buff *skb)
{
int ret;
@@ -1647,6 +1660,51 @@ static inline int __bpf_tx_skb(struct net_device *dev, struct sk_buff *skb)
return ret;
}
+static int __bpf_redirect_no_mac(struct sk_buff *skb, struct net_device *dev,
+ u32 flags)
+{
+ /* skb->mac_len is not set on normal egress */
+ unsigned int mlen = skb->network_header - skb->mac_header;
+
+ __skb_pull(skb, mlen);
+
+ /* At ingress, the mac header has already been pulled once.
+ * At egress, skb_pospull_rcsum has to be done in case that
+ * the skb is originated from ingress (i.e. a forwarded skb)
+ * to ensure that rcsum starts at net header.
+ */
+ if (!skb_at_tc_ingress(skb))
+ skb_postpull_rcsum(skb, skb_mac_header(skb), mlen);
+ skb_pop_mac_header(skb);
+ skb_reset_mac_len(skb);
+ return flags & BPF_F_INGRESS ?
+ __bpf_rx_skb_no_mac(dev, skb) : __bpf_tx_skb(dev, skb);
+}
+
+static int __bpf_redirect_common(struct sk_buff *skb, struct net_device *dev,
+ u32 flags)
+{
+ bpf_push_mac_rcsum(skb);
+ return flags & BPF_F_INGRESS ?
+ __bpf_rx_skb(dev, skb) : __bpf_tx_skb(dev, skb);
+}
+
+static int __bpf_redirect(struct sk_buff *skb, struct net_device *dev,
+ u32 flags)
+{
+ switch (dev->type) {
+ case ARPHRD_TUNNEL:
+ case ARPHRD_TUNNEL6:
+ case ARPHRD_SIT:
+ case ARPHRD_IPGRE:
+ case ARPHRD_VOID:
+ case ARPHRD_NONE:
+ return __bpf_redirect_no_mac(skb, dev, flags);
+ default:
+ return __bpf_redirect_common(skb, dev, flags);
+ }
+}
+
BPF_CALL_3(bpf_clone_redirect, struct sk_buff *, skb, u32, ifindex, u64, flags)
{
struct net_device *dev;
@@ -1675,10 +1733,7 @@ BPF_CALL_3(bpf_clone_redirect, struct sk_buff *, skb, u32, ifindex, u64, flags)
return -ENOMEM;
}
- bpf_push_mac_rcsum(clone);
-
- return flags & BPF_F_INGRESS ?
- __bpf_rx_skb(dev, clone) : __bpf_tx_skb(dev, clone);
+ return __bpf_redirect(clone, dev, flags);
}
static const struct bpf_func_proto bpf_clone_redirect_proto = {
@@ -1722,10 +1777,7 @@ int skb_do_redirect(struct sk_buff *skb)
return -EINVAL;
}
- bpf_push_mac_rcsum(skb);
-
- return ri->flags & BPF_F_INGRESS ?
- __bpf_rx_skb(dev, skb) : __bpf_tx_skb(dev, skb);
+ return __bpf_redirect(skb, dev, ri->flags);
}
static const struct bpf_func_proto bpf_redirect_proto = {
diff --git a/net/core/flow.c b/net/core/flow.c
index 3937b1b68d5b..18e8893d4be5 100644
--- a/net/core/flow.c
+++ b/net/core/flow.c
@@ -95,7 +95,6 @@ static void flow_cache_gc_task(struct work_struct *work)
list_for_each_entry_safe(fce, n, &gc_list, u.gc_list) {
flow_entry_kill(fce, xfrm);
atomic_dec(&xfrm->flow_cache_gc_count);
- WARN_ON(atomic_read(&xfrm->flow_cache_gc_count) < 0);
}
}
@@ -236,9 +235,8 @@ flow_cache_lookup(struct net *net, const struct flowi *key, u16 family, u8 dir,
if (fcp->hash_count > fc->high_watermark)
flow_cache_shrink(fc, fcp);
- if (fcp->hash_count > 2 * fc->high_watermark ||
- atomic_read(&net->xfrm.flow_cache_gc_count) > fc->high_watermark) {
- atomic_inc(&net->xfrm.flow_cache_genid);
+ if (atomic_read(&net->xfrm.flow_cache_gc_count) >
+ 2 * num_online_cpus() * fc->high_watermark) {
flo = ERR_PTR(-ENOBUFS);
goto ret_object;
}
diff --git a/net/core/flow_dissector.c b/net/core/flow_dissector.c
index 1a7b80f73376..c6d8207ffa7e 100644
--- a/net/core/flow_dissector.c
+++ b/net/core/flow_dissector.c
@@ -122,7 +122,7 @@ bool __skb_flow_dissect(const struct sk_buff *skb,
struct flow_dissector_key_keyid *key_keyid;
bool skip_vlan = false;
u8 ip_proto = 0;
- bool ret = false;
+ bool ret;
if (!data) {
data = skb->data;
@@ -246,15 +246,13 @@ ipv6:
case htons(ETH_P_8021AD):
case htons(ETH_P_8021Q): {
const struct vlan_hdr *vlan;
+ struct vlan_hdr _vlan;
+ bool vlan_tag_present = skb && skb_vlan_tag_present(skb);
- if (skb_vlan_tag_present(skb))
+ if (vlan_tag_present)
proto = skb->protocol;
- if (!skb_vlan_tag_present(skb) ||
- proto == cpu_to_be16(ETH_P_8021Q) ||
- proto == cpu_to_be16(ETH_P_8021AD)) {
- struct vlan_hdr _vlan;
-
+ if (!vlan_tag_present || eth_type_vlan(skb->protocol)) {
vlan = __skb_header_pointer(skb, nhoff, sizeof(_vlan),
data, hlen, &_vlan);
if (!vlan)
@@ -272,7 +270,7 @@ ipv6:
FLOW_DISSECTOR_KEY_VLAN,
target_container);
- if (skb_vlan_tag_present(skb)) {
+ if (vlan_tag_present) {
key_vlan->vlan_id = skb_vlan_tag_get_id(skb);
key_vlan->vlan_priority =
(skb_vlan_tag_get_prio(skb) >> VLAN_PRIO_SHIFT);
@@ -551,12 +549,17 @@ ip_proto_again:
out_good:
ret = true;
-out_bad:
+ key_control->thoff = (u16)nhoff;
+out:
key_basic->n_proto = proto;
key_basic->ip_proto = ip_proto;
- key_control->thoff = (u16)nhoff;
return ret;
+
+out_bad:
+ ret = false;
+ key_control->thoff = min_t(u16, nhoff, skb ? skb->len : hlen);
+ goto out;
}
EXPORT_SYMBOL(__skb_flow_dissect);
@@ -1010,4 +1013,4 @@ static int __init init_default_flow_dissectors(void)
return 0;
}
-late_initcall_sync(init_default_flow_dissectors);
+core_initcall(init_default_flow_dissectors);
diff --git a/net/core/net_namespace.c b/net/core/net_namespace.c
index 989434f36f96..7001da910c6b 100644
--- a/net/core/net_namespace.c
+++ b/net/core/net_namespace.c
@@ -215,13 +215,16 @@ static void rtnl_net_notifyid(struct net *net, int cmd, int id);
*/
int peernet2id_alloc(struct net *net, struct net *peer)
{
+ unsigned long flags;
bool alloc;
int id;
- spin_lock_bh(&net->nsid_lock);
+ if (atomic_read(&net->count) == 0)
+ return NETNSA_NSID_NOT_ASSIGNED;
+ spin_lock_irqsave(&net->nsid_lock, flags);
alloc = atomic_read(&peer->count) == 0 ? false : true;
id = __peernet2id_alloc(net, peer, &alloc);
- spin_unlock_bh(&net->nsid_lock);
+ spin_unlock_irqrestore(&net->nsid_lock, flags);
if (alloc && id >= 0)
rtnl_net_notifyid(net, RTM_NEWNSID, id);
return id;
@@ -230,11 +233,12 @@ int peernet2id_alloc(struct net *net, struct net *peer)
/* This function returns, if assigned, the id of a peer netns. */
int peernet2id(struct net *net, struct net *peer)
{
+ unsigned long flags;
int id;
- spin_lock_bh(&net->nsid_lock);
+ spin_lock_irqsave(&net->nsid_lock, flags);
id = __peernet2id(net, peer);
- spin_unlock_bh(&net->nsid_lock);
+ spin_unlock_irqrestore(&net->nsid_lock, flags);
return id;
}
EXPORT_SYMBOL(peernet2id);
@@ -249,17 +253,18 @@ bool peernet_has_id(struct net *net, struct net *peer)
struct net *get_net_ns_by_id(struct net *net, int id)
{
+ unsigned long flags;
struct net *peer;
if (id < 0)
return NULL;
rcu_read_lock();
- spin_lock_bh(&net->nsid_lock);
+ spin_lock_irqsave(&net->nsid_lock, flags);
peer = idr_find(&net->netns_ids, id);
if (peer)
get_net(peer);
- spin_unlock_bh(&net->nsid_lock);
+ spin_unlock_irqrestore(&net->nsid_lock, flags);
rcu_read_unlock();
return peer;
@@ -422,17 +427,17 @@ static void cleanup_net(struct work_struct *work)
for_each_net(tmp) {
int id;
- spin_lock_bh(&tmp->nsid_lock);
+ spin_lock_irq(&tmp->nsid_lock);
id = __peernet2id(tmp, net);
if (id >= 0)
idr_remove(&tmp->netns_ids, id);
- spin_unlock_bh(&tmp->nsid_lock);
+ spin_unlock_irq(&tmp->nsid_lock);
if (id >= 0)
rtnl_net_notifyid(tmp, RTM_DELNSID, id);
}
- spin_lock_bh(&net->nsid_lock);
+ spin_lock_irq(&net->nsid_lock);
idr_destroy(&net->netns_ids);
- spin_unlock_bh(&net->nsid_lock);
+ spin_unlock_irq(&net->nsid_lock);
}
rtnl_unlock();
@@ -561,6 +566,7 @@ static int rtnl_net_newid(struct sk_buff *skb, struct nlmsghdr *nlh)
{
struct net *net = sock_net(skb->sk);
struct nlattr *tb[NETNSA_MAX + 1];
+ unsigned long flags;
struct net *peer;
int nsid, err;
@@ -581,15 +587,15 @@ static int rtnl_net_newid(struct sk_buff *skb, struct nlmsghdr *nlh)
if (IS_ERR(peer))
return PTR_ERR(peer);
- spin_lock_bh(&net->nsid_lock);
+ spin_lock_irqsave(&net->nsid_lock, flags);
if (__peernet2id(net, peer) >= 0) {
- spin_unlock_bh(&net->nsid_lock);
+ spin_unlock_irqrestore(&net->nsid_lock, flags);
err = -EEXIST;
goto out;
}
err = alloc_netid(net, peer, nsid);
- spin_unlock_bh(&net->nsid_lock);
+ spin_unlock_irqrestore(&net->nsid_lock, flags);
if (err >= 0) {
rtnl_net_notifyid(net, RTM_NEWNSID, err);
err = 0;
@@ -711,10 +717,11 @@ static int rtnl_net_dumpid(struct sk_buff *skb, struct netlink_callback *cb)
.idx = 0,
.s_idx = cb->args[0],
};
+ unsigned long flags;
- spin_lock_bh(&net->nsid_lock);
+ spin_lock_irqsave(&net->nsid_lock, flags);
idr_for_each(&net->netns_ids, rtnl_net_dumpid_one, &net_cb);
- spin_unlock_bh(&net->nsid_lock);
+ spin_unlock_irqrestore(&net->nsid_lock, flags);
cb->args[0] = net_cb.idx;
return skb->len;
diff --git a/net/core/pktgen.c b/net/core/pktgen.c
index 5219a9e2127a..306b8f0e03c1 100644
--- a/net/core/pktgen.c
+++ b/net/core/pktgen.c
@@ -216,8 +216,8 @@
#define M_QUEUE_XMIT 2 /* Inject packet into qdisc */
/* If lock -- protects updating of if_list */
-#define if_lock(t) spin_lock(&(t->if_lock));
-#define if_unlock(t) spin_unlock(&(t->if_lock));
+#define if_lock(t) mutex_lock(&(t->if_lock));
+#define if_unlock(t) mutex_unlock(&(t->if_lock));
/* Used to help with determining the pkts on receive */
#define PKTGEN_MAGIC 0xbe9be955
@@ -423,7 +423,7 @@ struct pktgen_net {
};
struct pktgen_thread {
- spinlock_t if_lock; /* for list of devices */
+ struct mutex if_lock; /* for list of devices */
struct list_head if_list; /* All device here */
struct list_head th_list;
struct task_struct *tsk;
@@ -2010,11 +2010,13 @@ static void pktgen_change_name(const struct pktgen_net *pn, struct net_device *d
{
struct pktgen_thread *t;
+ mutex_lock(&pktgen_thread_lock);
+
list_for_each_entry(t, &pn->pktgen_threads, th_list) {
struct pktgen_dev *pkt_dev;
- rcu_read_lock();
- list_for_each_entry_rcu(pkt_dev, &t->if_list, list) {
+ if_lock(t);
+ list_for_each_entry(pkt_dev, &t->if_list, list) {
if (pkt_dev->odev != dev)
continue;
@@ -2029,8 +2031,9 @@ static void pktgen_change_name(const struct pktgen_net *pn, struct net_device *d
dev->name);
break;
}
- rcu_read_unlock();
+ if_unlock(t);
}
+ mutex_unlock(&pktgen_thread_lock);
}
static int pktgen_device_event(struct notifier_block *unused,
@@ -3762,7 +3765,7 @@ static int __net_init pktgen_create_thread(int cpu, struct pktgen_net *pn)
return -ENOMEM;
}
- spin_lock_init(&t->if_lock);
+ mutex_init(&t->if_lock);
t->cpu = cpu;
INIT_LIST_HEAD(&t->if_list);
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index fb7348f13501..a6196cf844f6 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -275,6 +275,7 @@ int rtnl_unregister(int protocol, int msgtype)
rtnl_msg_handlers[protocol][msgindex].doit = NULL;
rtnl_msg_handlers[protocol][msgindex].dumpit = NULL;
+ rtnl_msg_handlers[protocol][msgindex].calcit = NULL;
return 0;
}
@@ -839,18 +840,20 @@ static inline int rtnl_vfinfo_size(const struct net_device *dev,
if (dev->dev.parent && dev_is_pci(dev->dev.parent) &&
(ext_filter_mask & RTEXT_FILTER_VF)) {
int num_vfs = dev_num_vf(dev->dev.parent);
- size_t size = nla_total_size(sizeof(struct nlattr));
- size += nla_total_size(num_vfs * sizeof(struct nlattr));
+ size_t size = nla_total_size(0);
size += num_vfs *
- (nla_total_size(sizeof(struct ifla_vf_mac)) +
- nla_total_size(MAX_VLAN_LIST_LEN *
- sizeof(struct nlattr)) +
+ (nla_total_size(0) +
+ nla_total_size(sizeof(struct ifla_vf_mac)) +
+ nla_total_size(sizeof(struct ifla_vf_vlan)) +
+ nla_total_size(0) + /* nest IFLA_VF_VLAN_LIST */
nla_total_size(MAX_VLAN_LIST_LEN *
sizeof(struct ifla_vf_vlan_info)) +
nla_total_size(sizeof(struct ifla_vf_spoofchk)) +
+ nla_total_size(sizeof(struct ifla_vf_tx_rate)) +
nla_total_size(sizeof(struct ifla_vf_rate)) +
nla_total_size(sizeof(struct ifla_vf_link_state)) +
nla_total_size(sizeof(struct ifla_vf_rss_query_en)) +
+ nla_total_size(0) + /* nest IFLA_VF_STATS */
/* IFLA_VF_STATS_RX_PACKETS */
nla_total_size_64bit(sizeof(__u64)) +
/* IFLA_VF_STATS_TX_PACKETS */
@@ -898,7 +901,8 @@ static size_t rtnl_port_size(const struct net_device *dev,
static size_t rtnl_xdp_size(const struct net_device *dev)
{
- size_t xdp_size = nla_total_size(1); /* XDP_ATTACHED */
+ size_t xdp_size = nla_total_size(0) + /* nest IFLA_XDP */
+ nla_total_size(1); /* XDP_ATTACHED */
if (!dev->netdev_ops->ndo_xdp)
return 0;
@@ -927,8 +931,8 @@ static noinline size_t if_nlmsg_size(const struct net_device *dev,
+ nla_total_size(4) /* IFLA_PROMISCUITY */
+ nla_total_size(4) /* IFLA_NUM_TX_QUEUES */
+ nla_total_size(4) /* IFLA_NUM_RX_QUEUES */
- + nla_total_size(4) /* IFLA_MAX_GSO_SEGS */
- + nla_total_size(4) /* IFLA_MAX_GSO_SIZE */
+ + nla_total_size(4) /* IFLA_GSO_MAX_SEGS */
+ + nla_total_size(4) /* IFLA_GSO_MAX_SIZE */
+ nla_total_size(1) /* IFLA_OPERSTATE */
+ nla_total_size(1) /* IFLA_LINKMODE */
+ nla_total_size(4) /* IFLA_CARRIER_CHANGES */
@@ -1605,7 +1609,7 @@ static int rtnl_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb)
head = &net->dev_index_head[h];
hlist_for_each_entry(dev, head, index_hlist) {
if (link_dump_filtered(dev, master_idx, kind_ops))
- continue;
+ goto cont;
if (idx < s_idx)
goto cont;
err = rtnl_fill_ifinfo(skb, dev, RTM_NEWLINK,
@@ -2733,7 +2737,7 @@ static u16 rtnl_calcit(struct sk_buff *skb, struct nlmsghdr *nlh)
ext_filter_mask));
}
- return min_ifinfo_dump_size;
+ return nlmsg_total_size(min_ifinfo_dump_size);
}
static int rtnl_dump_all(struct sk_buff *skb, struct netlink_callback *cb)
@@ -2848,7 +2852,10 @@ nla_put_failure:
static inline size_t rtnl_fdb_nlmsg_size(void)
{
- return NLMSG_ALIGN(sizeof(struct ndmsg)) + nla_total_size(ETH_ALEN);
+ return NLMSG_ALIGN(sizeof(struct ndmsg)) +
+ nla_total_size(ETH_ALEN) + /* NDA_LLADDR */
+ nla_total_size(sizeof(u16)) + /* NDA_VLAN */
+ 0;
}
static void rtnl_fdb_notify(struct net_device *dev, u8 *addr, u16 vid, int type,
diff --git a/net/core/sock.c b/net/core/sock.c
index c73e28fc9c2a..00a074dbfe9b 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -453,7 +453,7 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
EXPORT_SYMBOL(sock_queue_rcv_skb);
int __sk_receive_skb(struct sock *sk, struct sk_buff *skb,
- const int nested, unsigned int trim_cap)
+ const int nested, unsigned int trim_cap, bool refcounted)
{
int rc = NET_RX_SUCCESS;
@@ -487,7 +487,8 @@ int __sk_receive_skb(struct sock *sk, struct sk_buff *skb,
bh_unlock_sock(sk);
out:
- sock_put(sk);
+ if (refcounted)
+ sock_put(sk);
return rc;
discard_and_relse:
kfree_skb(skb);
@@ -714,7 +715,7 @@ int sock_setsockopt(struct socket *sock, int level, int optname,
val = min_t(u32, val, sysctl_wmem_max);
set_sndbuf:
sk->sk_userlocks |= SOCK_SNDBUF_LOCK;
- sk->sk_sndbuf = max_t(u32, val * 2, SOCK_MIN_SNDBUF);
+ sk->sk_sndbuf = max_t(int, val * 2, SOCK_MIN_SNDBUF);
/* Wake up sending tasks if we upped the value. */
sk->sk_write_space(sk);
break;
@@ -750,7 +751,7 @@ set_rcvbuf:
* returning the value we actually used in getsockopt
* is the most desirable behavior.
*/
- sk->sk_rcvbuf = max_t(u32, val * 2, SOCK_MIN_RCVBUF);
+ sk->sk_rcvbuf = max_t(int, val * 2, SOCK_MIN_RCVBUF);
break;
case SO_RCVBUFFORCE:
@@ -1543,6 +1544,7 @@ struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority)
RCU_INIT_POINTER(newsk->sk_reuseport_cb, NULL);
newsk->sk_err = 0;
+ newsk->sk_err_soft = 0;
newsk->sk_priority = 0;
newsk->sk_incoming_cpu = raw_smp_processor_id();
atomic64_set(&newsk->sk_cookie, 0);
diff --git a/net/core/sock_reuseport.c b/net/core/sock_reuseport.c
index e92b759d906c..9a1a352fd1eb 100644
--- a/net/core/sock_reuseport.c
+++ b/net/core/sock_reuseport.c
@@ -129,7 +129,6 @@ int reuseport_add_sock(struct sock *sk, struct sock *sk2)
return 0;
}
-EXPORT_SYMBOL(reuseport_add_sock);
static void reuseport_free_rcu(struct rcu_head *head)
{
diff --git a/net/dccp/ipv4.c b/net/dccp/ipv4.c
index 345a3aeb8c7e..edbe59d203ef 100644
--- a/net/dccp/ipv4.c
+++ b/net/dccp/ipv4.c
@@ -235,7 +235,7 @@ static void dccp_v4_err(struct sk_buff *skb, u32 info)
{
const struct iphdr *iph = (struct iphdr *)skb->data;
const u8 offset = iph->ihl << 2;
- const struct dccp_hdr *dh = (struct dccp_hdr *)(skb->data + offset);
+ const struct dccp_hdr *dh;
struct dccp_sock *dp;
struct inet_sock *inet;
const int type = icmp_hdr(skb)->type;
@@ -245,11 +245,13 @@ static void dccp_v4_err(struct sk_buff *skb, u32 info)
int err;
struct net *net = dev_net(skb->dev);
- if (skb->len < offset + sizeof(*dh) ||
- skb->len < offset + __dccp_basic_hdr_len(dh)) {
- __ICMP_INC_STATS(net, ICMP_MIB_INERRORS);
- return;
- }
+ /* Only need dccph_dport & dccph_sport which are the first
+ * 4 bytes in dccp header.
+ * Our caller (icmp_socket_deliver()) already pulled 8 bytes for us.
+ */
+ BUILD_BUG_ON(offsetofend(struct dccp_hdr, dccph_sport) > 8);
+ BUILD_BUG_ON(offsetofend(struct dccp_hdr, dccph_dport) > 8);
+ dh = (struct dccp_hdr *)(skb->data + offset);
sk = __inet_lookup_established(net, &dccp_hashinfo,
iph->daddr, dh->dccph_dport,
@@ -698,6 +700,7 @@ int dccp_invalid_packet(struct sk_buff *skb)
{
const struct dccp_hdr *dh;
unsigned int cscov;
+ u8 dccph_doff;
if (skb->pkt_type != PACKET_HOST)
return 1;
@@ -719,18 +722,19 @@ int dccp_invalid_packet(struct sk_buff *skb)
/*
* If P.Data Offset is too small for packet type, drop packet and return
*/
- if (dh->dccph_doff < dccp_hdr_len(skb) / sizeof(u32)) {
- DCCP_WARN("P.Data Offset(%u) too small\n", dh->dccph_doff);
+ dccph_doff = dh->dccph_doff;
+ if (dccph_doff < dccp_hdr_len(skb) / sizeof(u32)) {
+ DCCP_WARN("P.Data Offset(%u) too small\n", dccph_doff);
return 1;
}
/*
* If P.Data Offset is too too large for packet, drop packet and return
*/
- if (!pskb_may_pull(skb, dh->dccph_doff * sizeof(u32))) {
- DCCP_WARN("P.Data Offset(%u) too large\n", dh->dccph_doff);
+ if (!pskb_may_pull(skb, dccph_doff * sizeof(u32))) {
+ DCCP_WARN("P.Data Offset(%u) too large\n", dccph_doff);
return 1;
}
-
+ dh = dccp_hdr(skb);
/*
* If P.type is not Data, Ack, or DataAck and P.X == 0 (the packet
* has short sequence numbers), drop packet and return
@@ -868,7 +872,7 @@ lookup:
goto discard_and_relse;
nf_reset(skb);
- return __sk_receive_skb(sk, skb, 1, dh->dccph_doff * 4);
+ return __sk_receive_skb(sk, skb, 1, dh->dccph_doff * 4, refcounted);
no_dccp_socket:
if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb))
diff --git a/net/dccp/ipv6.c b/net/dccp/ipv6.c
index 3828f94b234c..715e5d1dc107 100644
--- a/net/dccp/ipv6.c
+++ b/net/dccp/ipv6.c
@@ -70,7 +70,7 @@ static void dccp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
u8 type, u8 code, int offset, __be32 info)
{
const struct ipv6hdr *hdr = (const struct ipv6hdr *)skb->data;
- const struct dccp_hdr *dh = (struct dccp_hdr *)(skb->data + offset);
+ const struct dccp_hdr *dh;
struct dccp_sock *dp;
struct ipv6_pinfo *np;
struct sock *sk;
@@ -78,12 +78,13 @@ static void dccp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
__u64 seq;
struct net *net = dev_net(skb->dev);
- if (skb->len < offset + sizeof(*dh) ||
- skb->len < offset + __dccp_basic_hdr_len(dh)) {
- __ICMP6_INC_STATS(net, __in6_dev_get(skb->dev),
- ICMP6_MIB_INERRORS);
- return;
- }
+ /* Only need dccph_dport & dccph_sport which are the first
+ * 4 bytes in dccp header.
+ * Our caller (icmpv6_notify()) already pulled 8 bytes for us.
+ */
+ BUILD_BUG_ON(offsetofend(struct dccp_hdr, dccph_sport) > 8);
+ BUILD_BUG_ON(offsetofend(struct dccp_hdr, dccph_dport) > 8);
+ dh = (struct dccp_hdr *)(skb->data + offset);
sk = __inet6_lookup_established(net, &dccp_hashinfo,
&hdr->daddr, dh->dccph_dport,
@@ -738,7 +739,8 @@ lookup:
if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb))
goto discard_and_relse;
- return __sk_receive_skb(sk, skb, 1, dh->dccph_doff * 4) ? -1 : 0;
+ return __sk_receive_skb(sk, skb, 1, dh->dccph_doff * 4,
+ refcounted) ? -1 : 0;
no_dccp_socket:
if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb))
@@ -956,6 +958,7 @@ static const struct inet_connection_sock_af_ops dccp_ipv6_mapped = {
.getsockopt = ipv6_getsockopt,
.addr2sockaddr = inet6_csk_addr2sockaddr,
.sockaddr_len = sizeof(struct sockaddr_in6),
+ .bind_conflict = inet6_csk_bind_conflict,
#ifdef CONFIG_COMPAT
.compat_setsockopt = compat_ipv6_setsockopt,
.compat_getsockopt = compat_ipv6_getsockopt,
diff --git a/net/dccp/proto.c b/net/dccp/proto.c
index 41e65804ddf5..9fe25bf63296 100644
--- a/net/dccp/proto.c
+++ b/net/dccp/proto.c
@@ -1009,6 +1009,10 @@ void dccp_close(struct sock *sk, long timeout)
__kfree_skb(skb);
}
+ /* If socket has been already reset kill it. */
+ if (sk->sk_state == DCCP_CLOSED)
+ goto adjudge_to_death;
+
if (data_was_unread) {
/* Unread data was tossed, send an appropriate Reset Code */
DCCP_WARN("ABORT with %u bytes unread\n", data_was_unread);
diff --git a/net/dsa/dsa.c b/net/dsa/dsa.c
index a6902c1e2f28..7899919cd9f0 100644
--- a/net/dsa/dsa.c
+++ b/net/dsa/dsa.c
@@ -233,6 +233,8 @@ int dsa_cpu_dsa_setup(struct dsa_switch *ds, struct device *dev,
genphy_read_status(phydev);
if (ds->ops->adjust_link)
ds->ops->adjust_link(ds, port, phydev);
+
+ put_device(&phydev->mdio.dev);
}
return 0;
@@ -504,15 +506,8 @@ dsa_switch_setup(struct dsa_switch_tree *dst, int index,
void dsa_cpu_dsa_destroy(struct device_node *port_dn)
{
- struct phy_device *phydev;
-
- if (of_phy_is_fixed_link(port_dn)) {
- phydev = of_phy_find_device(port_dn);
- if (phydev) {
- phy_device_free(phydev);
- fixed_phy_unregister(phydev);
- }
- }
+ if (of_phy_is_fixed_link(port_dn))
+ of_phy_deregister_fixed_link(port_dn);
}
static void dsa_switch_destroy(struct dsa_switch *ds)
diff --git a/net/dsa/dsa2.c b/net/dsa/dsa2.c
index f8a7d9aab437..5fff951a0a49 100644
--- a/net/dsa/dsa2.c
+++ b/net/dsa/dsa2.c
@@ -28,8 +28,10 @@ static struct dsa_switch_tree *dsa_get_dst(u32 tree)
struct dsa_switch_tree *dst;
list_for_each_entry(dst, &dsa_switch_trees, list)
- if (dst->tree == tree)
+ if (dst->tree == tree) {
+ kref_get(&dst->refcount);
return dst;
+ }
return NULL;
}
diff --git a/net/dsa/slave.c b/net/dsa/slave.c
index 6b1282c006b1..30e2e21d7619 100644
--- a/net/dsa/slave.c
+++ b/net/dsa/slave.c
@@ -1125,7 +1125,7 @@ static int dsa_slave_phy_setup(struct dsa_slave_priv *p,
p->phy_interface = mode;
phy_dn = of_parse_phandle(port_dn, "phy-handle", 0);
- if (of_phy_is_fixed_link(port_dn)) {
+ if (!phy_dn && of_phy_is_fixed_link(port_dn)) {
/* In the case of a fixed PHY, the DT node associated
* to the fixed PHY is the Port DT node
*/
@@ -1135,7 +1135,7 @@ static int dsa_slave_phy_setup(struct dsa_slave_priv *p,
return ret;
}
phy_is_fixed = true;
- phy_dn = port_dn;
+ phy_dn = of_node_get(port_dn);
}
if (ds->ops->get_phy_flags)
@@ -1154,6 +1154,7 @@ static int dsa_slave_phy_setup(struct dsa_slave_priv *p,
ret = dsa_slave_phy_connect(p, slave_dev, phy_id);
if (ret) {
netdev_err(slave_dev, "failed to connect to phy%d: %d\n", phy_id, ret);
+ of_node_put(phy_dn);
return ret;
}
} else {
@@ -1162,6 +1163,8 @@ static int dsa_slave_phy_setup(struct dsa_slave_priv *p,
phy_flags,
p->phy_interface);
}
+
+ of_node_put(phy_dn);
}
if (p->phy && phy_is_fixed)
@@ -1174,6 +1177,8 @@ static int dsa_slave_phy_setup(struct dsa_slave_priv *p,
ret = dsa_slave_phy_connect(p, slave_dev, p->port);
if (ret) {
netdev_err(slave_dev, "failed to connect to port %d: %d\n", p->port, ret);
+ if (phy_is_fixed)
+ of_phy_deregister_fixed_link(port_dn);
return ret;
}
}
@@ -1289,10 +1294,18 @@ int dsa_slave_create(struct dsa_switch *ds, struct device *parent,
void dsa_slave_destroy(struct net_device *slave_dev)
{
struct dsa_slave_priv *p = netdev_priv(slave_dev);
+ struct dsa_switch *ds = p->parent;
+ struct device_node *port_dn;
+
+ port_dn = ds->ports[p->port].dn;
netif_carrier_off(slave_dev);
- if (p->phy)
+ if (p->phy) {
phy_disconnect(p->phy);
+
+ if (of_phy_is_fixed_link(port_dn))
+ of_phy_deregister_fixed_link(port_dn);
+ }
unregister_netdev(slave_dev);
free_netdev(slave_dev);
}
diff --git a/net/ethernet/eth.c b/net/ethernet/eth.c
index 66dff5e3d772..02acfff36028 100644
--- a/net/ethernet/eth.c
+++ b/net/ethernet/eth.c
@@ -439,7 +439,7 @@ struct sk_buff **eth_gro_receive(struct sk_buff **head,
skb_gro_pull(skb, sizeof(*eh));
skb_gro_postpull_rcsum(skb, eh, sizeof(*eh));
- pp = ptype->callbacks.gro_receive(head, skb);
+ pp = call_gro_receive(ptype->callbacks.gro_receive, head, skb);
out_unlock:
rcu_read_unlock();
diff --git a/net/hsr/hsr_forward.c b/net/hsr/hsr_forward.c
index 5ee1d43f1310..4ebe2aa3e7d3 100644
--- a/net/hsr/hsr_forward.c
+++ b/net/hsr/hsr_forward.c
@@ -300,10 +300,6 @@ static void hsr_forward_do(struct hsr_frame_info *frame)
static void check_local_dest(struct hsr_priv *hsr, struct sk_buff *skb,
struct hsr_frame_info *frame)
{
- struct net_device *master_dev;
-
- master_dev = hsr_port_get_hsr(hsr, HSR_PT_MASTER)->dev;
-
if (hsr_addr_is_self(hsr, eth_hdr(skb)->h_dest)) {
frame->is_local_exclusive = true;
skb->pkt_type = PACKET_HOST;
diff --git a/net/ipv4/Kconfig b/net/ipv4/Kconfig
index 300b06888fdf..b54b3ca939db 100644
--- a/net/ipv4/Kconfig
+++ b/net/ipv4/Kconfig
@@ -715,6 +715,7 @@ config DEFAULT_TCP_CONG
default "reno" if DEFAULT_RENO
default "dctcp" if DEFAULT_DCTCP
default "cdg" if DEFAULT_CDG
+ default "bbr" if DEFAULT_BBR
default "cubic"
config TCP_MD5SIG
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
index 1effc986739e..215143246e4b 100644
--- a/net/ipv4/af_inet.c
+++ b/net/ipv4/af_inet.c
@@ -533,9 +533,9 @@ EXPORT_SYMBOL(inet_dgram_connect);
static long inet_wait_for_connect(struct sock *sk, long timeo, int writebias)
{
- DEFINE_WAIT(wait);
+ DEFINE_WAIT_FUNC(wait, woken_wake_function);
- prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
+ add_wait_queue(sk_sleep(sk), &wait);
sk->sk_write_pending += writebias;
/* Basic assumption: if someone sets sk->sk_err, he _must_
@@ -545,13 +545,12 @@ static long inet_wait_for_connect(struct sock *sk, long timeo, int writebias)
*/
while ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) {
release_sock(sk);
- timeo = schedule_timeout(timeo);
+ timeo = wait_woken(&wait, TASK_INTERRUPTIBLE, timeo);
lock_sock(sk);
if (signal_pending(current) || !timeo)
break;
- prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
}
- finish_wait(sk_sleep(sk), &wait);
+ remove_wait_queue(sk_sleep(sk), &wait);
sk->sk_write_pending -= writebias;
return timeo;
}
@@ -1234,7 +1233,7 @@ struct sk_buff *inet_gso_segment(struct sk_buff *skb,
fixedid = !!(skb_shinfo(skb)->gso_type & SKB_GSO_TCP_FIXEDID);
/* fixed ID is invalid if DF bit is not set */
- if (fixedid && !(iph->frag_off & htons(IP_DF)))
+ if (fixedid && !(ip_hdr(skb)->frag_off & htons(IP_DF)))
goto out;
}
@@ -1391,7 +1390,7 @@ struct sk_buff **inet_gro_receive(struct sk_buff **head, struct sk_buff *skb)
skb_gro_pull(skb, sizeof(*iph));
skb_set_transport_header(skb, skb_gro_offset(skb));
- pp = ops->callbacks.gro_receive(head, skb);
+ pp = call_gro_receive(ops->callbacks.gro_receive, head, skb);
out_unlock:
rcu_read_unlock();
diff --git a/net/ipv4/esp4.c b/net/ipv4/esp4.c
index d95631d09248..20fb25e3027b 100644
--- a/net/ipv4/esp4.c
+++ b/net/ipv4/esp4.c
@@ -476,7 +476,7 @@ static int esp_input(struct xfrm_state *x, struct sk_buff *skb)
esph = (void *)skb_push(skb, 4);
*seqhi = esph->spi;
esph->spi = esph->seq_no;
- esph->seq_no = htonl(XFRM_SKB_CB(skb)->seq.input.hi);
+ esph->seq_no = XFRM_SKB_CB(skb)->seq.input.hi;
aead_request_set_callback(req, 0, esp_input_done_esn, skb);
}
diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
index c3b80478226e..161fc0f0d752 100644
--- a/net/ipv4/fib_frontend.c
+++ b/net/ipv4/fib_frontend.c
@@ -151,7 +151,7 @@ static void fib_replace_table(struct net *net, struct fib_table *old,
int fib_unmerge(struct net *net)
{
- struct fib_table *old, *new;
+ struct fib_table *old, *new, *main_table;
/* attempt to fetch local table if it has been allocated */
old = fib_get_table(net, RT_TABLE_LOCAL);
@@ -162,11 +162,21 @@ int fib_unmerge(struct net *net)
if (!new)
return -ENOMEM;
+ /* table is already unmerged */
+ if (new == old)
+ return 0;
+
/* replace merged table with clean table */
- if (new != old) {
- fib_replace_table(net, old, new);
- fib_free_table(old);
- }
+ fib_replace_table(net, old, new);
+ fib_free_table(old);
+
+ /* attempt to fetch main table if it has been allocated */
+ main_table = fib_get_table(net, RT_TABLE_MAIN);
+ if (!main_table)
+ return 0;
+
+ /* flush local entries from main table */
+ fib_table_flush_external(main_table);
return 0;
}
diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c
index 31cef3602585..026f309c51e9 100644
--- a/net/ipv4/fib_trie.c
+++ b/net/ipv4/fib_trie.c
@@ -1743,8 +1743,10 @@ struct fib_table *fib_trie_unmerge(struct fib_table *oldtb)
local_l = fib_find_node(lt, &local_tp, l->key);
if (fib_insert_alias(lt, local_tp, local_l, new_fa,
- NULL, l->key))
+ NULL, l->key)) {
+ kmem_cache_free(fn_alias_kmem, new_fa);
goto out;
+ }
}
/* stop loop if key wrapped back to 0 */
@@ -1760,6 +1762,71 @@ out:
return NULL;
}
+/* Caller must hold RTNL */
+void fib_table_flush_external(struct fib_table *tb)
+{
+ struct trie *t = (struct trie *)tb->tb_data;
+ struct key_vector *pn = t->kv;
+ unsigned long cindex = 1;
+ struct hlist_node *tmp;
+ struct fib_alias *fa;
+
+ /* walk trie in reverse order */
+ for (;;) {
+ unsigned char slen = 0;
+ struct key_vector *n;
+
+ if (!(cindex--)) {
+ t_key pkey = pn->key;
+
+ /* cannot resize the trie vector */
+ if (IS_TRIE(pn))
+ break;
+
+ /* resize completed node */
+ pn = resize(t, pn);
+ cindex = get_index(pkey, pn);
+
+ continue;
+ }
+
+ /* grab the next available node */
+ n = get_child(pn, cindex);
+ if (!n)
+ continue;
+
+ if (IS_TNODE(n)) {
+ /* record pn and cindex for leaf walking */
+ pn = n;
+ cindex = 1ul << n->bits;
+
+ continue;
+ }
+
+ hlist_for_each_entry_safe(fa, tmp, &n->leaf, fa_list) {
+ /* if alias was cloned to local then we just
+ * need to remove the local copy from main
+ */
+ if (tb->tb_id != fa->tb_id) {
+ hlist_del_rcu(&fa->fa_list);
+ alias_free_mem_rcu(fa);
+ continue;
+ }
+
+ /* record local slen */
+ slen = fa->fa_slen;
+ }
+
+ /* update leaf slen */
+ n->slen = slen;
+
+ if (hlist_empty(&n->leaf)) {
+ put_child_root(pn, n->key, NULL);
+ node_free(n);
+ }
+ }
+}
+
/* Caller must hold RTNL. */
int fib_table_flush(struct net *net, struct fib_table *tb)
{
@@ -2413,22 +2480,19 @@ static struct key_vector *fib_route_get_idx(struct fib_route_iter *iter,
struct key_vector *l, **tp = &iter->tnode;
t_key key;
- /* use cache location of next-to-find key */
+ /* use cached location of previously found key */
if (iter->pos > 0 && pos >= iter->pos) {
- pos -= iter->pos;
key = iter->key;
} else {
- iter->pos = 0;
+ iter->pos = 1;
key = 0;
}
- while ((l = leaf_walk_rcu(tp, key)) != NULL) {
+ pos -= iter->pos;
+
+ while ((l = leaf_walk_rcu(tp, key)) && (pos-- > 0)) {
key = l->key + 1;
iter->pos++;
-
- if (--pos <= 0)
- break;
-
l = NULL;
/* handle unlikely case of a key wrap */
@@ -2437,7 +2501,7 @@ static struct key_vector *fib_route_get_idx(struct fib_route_iter *iter,
}
if (l)
- iter->key = key; /* remember it */
+ iter->key = l->key; /* remember it */
else
iter->pos = 0; /* forget it */
@@ -2465,7 +2529,7 @@ static void *fib_route_seq_start(struct seq_file *seq, loff_t *pos)
return fib_route_get_idx(iter, *pos);
iter->pos = 0;
- iter->key = 0;
+ iter->key = KEY_MAX;
return SEQ_START_TOKEN;
}
@@ -2474,7 +2538,7 @@ static void *fib_route_seq_next(struct seq_file *seq, void *v, loff_t *pos)
{
struct fib_route_iter *iter = seq->private;
struct key_vector *l = NULL;
- t_key key = iter->key;
+ t_key key = iter->key + 1;
++*pos;
@@ -2483,7 +2547,7 @@ static void *fib_route_seq_next(struct seq_file *seq, void *v, loff_t *pos)
l = leaf_walk_rcu(&iter->tnode, key);
if (l) {
- iter->key = l->key + 1;
+ iter->key = l->key;
iter->pos++;
} else {
iter->pos = 0;
diff --git a/net/ipv4/fou.c b/net/ipv4/fou.c
index cf50f7e2b012..030d1531e897 100644
--- a/net/ipv4/fou.c
+++ b/net/ipv4/fou.c
@@ -249,7 +249,7 @@ static struct sk_buff **fou_gro_receive(struct sock *sk,
if (!ops || !ops->callbacks.gro_receive)
goto out_unlock;
- pp = ops->callbacks.gro_receive(head, skb);
+ pp = call_gro_receive(ops->callbacks.gro_receive, head, skb);
out_unlock:
rcu_read_unlock();
@@ -441,7 +441,7 @@ next_proto:
if (WARN_ON_ONCE(!ops || !ops->callbacks.gro_receive))
goto out_unlock;
- pp = ops->callbacks.gro_receive(head, skb);
+ pp = call_gro_receive(ops->callbacks.gro_receive, head, skb);
flush = 0;
out_unlock:
diff --git a/net/ipv4/gre_offload.c b/net/ipv4/gre_offload.c
index 96e0efecefa6..d5cac99170b1 100644
--- a/net/ipv4/gre_offload.c
+++ b/net/ipv4/gre_offload.c
@@ -229,7 +229,7 @@ static struct sk_buff **gre_gro_receive(struct sk_buff **head,
/* Adjusted NAPI_GRO_CB(skb)->csum after skb_gro_pull()*/
skb_gro_postpull_rcsum(skb, greh, grehlen);
- pp = ptype->callbacks.gro_receive(head, skb);
+ pp = call_gro_receive(ptype->callbacks.gro_receive, head, skb);
flush = 0;
out_unlock:
diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c
index 38abe70e595f..48734ee6293f 100644
--- a/net/ipv4/icmp.c
+++ b/net/ipv4/icmp.c
@@ -477,7 +477,7 @@ static struct rtable *icmp_route_lookup(struct net *net,
fl4->flowi4_proto = IPPROTO_ICMP;
fl4->fl4_icmp_type = type;
fl4->fl4_icmp_code = code;
- fl4->flowi4_oif = l3mdev_master_ifindex(skb_in->dev);
+ fl4->flowi4_oif = l3mdev_master_ifindex(skb_dst(skb_in)->dev);
security_skb_classify_flow(skb_in, flowi4_to_flowi(fl4));
rt = __ip_route_output_key_hash(net, fl4,
@@ -502,7 +502,7 @@ static struct rtable *icmp_route_lookup(struct net *net,
if (err)
goto relookup_failed;
- if (inet_addr_type_dev_table(net, skb_in->dev,
+ if (inet_addr_type_dev_table(net, skb_dst(skb_in)->dev,
fl4_dec.saddr) == RTN_LOCAL) {
rt2 = __ip_route_output_key(net, &fl4_dec);
if (IS_ERR(rt2))
diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c
index 606cc3e85d2b..15db786d50ed 100644
--- a/net/ipv4/igmp.c
+++ b/net/ipv4/igmp.c
@@ -162,7 +162,7 @@ static int unsolicited_report_interval(struct in_device *in_dev)
}
static void igmpv3_add_delrec(struct in_device *in_dev, struct ip_mc_list *im);
-static void igmpv3_del_delrec(struct in_device *in_dev, __be32 multiaddr);
+static void igmpv3_del_delrec(struct in_device *in_dev, struct ip_mc_list *im);
static void igmpv3_clear_delrec(struct in_device *in_dev);
static int sf_setstate(struct ip_mc_list *pmc);
static void sf_markstate(struct ip_mc_list *pmc);
@@ -1130,10 +1130,15 @@ static void igmpv3_add_delrec(struct in_device *in_dev, struct ip_mc_list *im)
spin_unlock_bh(&in_dev->mc_tomb_lock);
}
-static void igmpv3_del_delrec(struct in_device *in_dev, __be32 multiaddr)
+/*
+ * restore ip_mc_list deleted records
+ */
+static void igmpv3_del_delrec(struct in_device *in_dev, struct ip_mc_list *im)
{
struct ip_mc_list *pmc, *pmc_prev;
- struct ip_sf_list *psf, *psf_next;
+ struct ip_sf_list *psf;
+ struct net *net = dev_net(in_dev->dev);
+ __be32 multiaddr = im->multiaddr;
spin_lock_bh(&in_dev->mc_tomb_lock);
pmc_prev = NULL;
@@ -1149,16 +1154,26 @@ static void igmpv3_del_delrec(struct in_device *in_dev, __be32 multiaddr)
in_dev->mc_tomb = pmc->next;
}
spin_unlock_bh(&in_dev->mc_tomb_lock);
+
+ spin_lock_bh(&im->lock);
if (pmc) {
- for (psf = pmc->tomb; psf; psf = psf_next) {
- psf_next = psf->sf_next;
- kfree(psf);
+ im->interface = pmc->interface;
+ im->crcount = in_dev->mr_qrv ?: net->ipv4.sysctl_igmp_qrv;
+ im->sfmode = pmc->sfmode;
+ if (pmc->sfmode == MCAST_INCLUDE) {
+ im->tomb = pmc->tomb;
+ im->sources = pmc->sources;
+ for (psf = im->sources; psf; psf = psf->sf_next)
+ psf->sf_crcount = im->crcount;
}
in_dev_put(pmc->interface);
- kfree(pmc);
}
+ spin_unlock_bh(&im->lock);
}
+/*
+ * flush ip_mc_list deleted records
+ */
static void igmpv3_clear_delrec(struct in_device *in_dev)
{
struct ip_mc_list *pmc, *nextpmc;
@@ -1366,7 +1381,7 @@ void ip_mc_inc_group(struct in_device *in_dev, __be32 addr)
ip_mc_hash_add(in_dev, im);
#ifdef CONFIG_IP_MULTICAST
- igmpv3_del_delrec(in_dev, im->multiaddr);
+ igmpv3_del_delrec(in_dev, im);
#endif
igmp_group_added(im);
if (!in_dev->dead)
@@ -1626,8 +1641,12 @@ void ip_mc_remap(struct in_device *in_dev)
ASSERT_RTNL();
- for_each_pmc_rtnl(in_dev, pmc)
+ for_each_pmc_rtnl(in_dev, pmc) {
+#ifdef CONFIG_IP_MULTICAST
+ igmpv3_del_delrec(in_dev, pmc);
+#endif
igmp_group_added(pmc);
+ }
}
/* Device going down */
@@ -1648,7 +1667,6 @@ void ip_mc_down(struct in_device *in_dev)
in_dev->mr_gq_running = 0;
if (del_timer(&in_dev->mr_gq_timer))
__in_dev_put(in_dev);
- igmpv3_clear_delrec(in_dev);
#endif
ip_mc_dec_group(in_dev, IGMP_ALL_HOSTS);
@@ -1688,8 +1706,12 @@ void ip_mc_up(struct in_device *in_dev)
#endif
ip_mc_inc_group(in_dev, IGMP_ALL_HOSTS);
- for_each_pmc_rtnl(in_dev, pmc)
+ for_each_pmc_rtnl(in_dev, pmc) {
+#ifdef CONFIG_IP_MULTICAST
+ igmpv3_del_delrec(in_dev, pmc);
+#endif
igmp_group_added(pmc);
+ }
}
/*
@@ -1704,13 +1726,13 @@ void ip_mc_destroy_dev(struct in_device *in_dev)
/* Deactivate timers */
ip_mc_down(in_dev);
+#ifdef CONFIG_IP_MULTICAST
+ igmpv3_clear_delrec(in_dev);
+#endif
while ((i = rtnl_dereference(in_dev->mc_list)) != NULL) {
in_dev->mc_list = i->next_rcu;
in_dev->mc_count--;
-
- /* We've dropped the groups in ip_mc_down already */
- ip_mc_clear_src(i);
ip_ma_put(i);
}
}
diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c
index 77c20a489218..ca97835bfec4 100644
--- a/net/ipv4/inet_hashtables.c
+++ b/net/ipv4/inet_hashtables.c
@@ -25,6 +25,7 @@
#include <net/inet_hashtables.h>
#include <net/secure_seq.h>
#include <net/ip.h>
+#include <net/tcp.h>
#include <net/sock_reuseport.h>
static u32 inet_ehashfn(const struct net *net, const __be32 laddr,
@@ -172,7 +173,7 @@ EXPORT_SYMBOL_GPL(__inet_inherit_port);
static inline int compute_score(struct sock *sk, struct net *net,
const unsigned short hnum, const __be32 daddr,
- const int dif)
+ const int dif, bool exact_dif)
{
int score = -1;
struct inet_sock *inet = inet_sk(sk);
@@ -186,7 +187,7 @@ static inline int compute_score(struct sock *sk, struct net *net,
return -1;
score += 4;
}
- if (sk->sk_bound_dev_if) {
+ if (sk->sk_bound_dev_if || exact_dif) {
if (sk->sk_bound_dev_if != dif)
return -1;
score += 4;
@@ -215,11 +216,12 @@ struct sock *__inet_lookup_listener(struct net *net,
unsigned int hash = inet_lhashfn(net, hnum);
struct inet_listen_hashbucket *ilb = &hashinfo->listening_hash[hash];
int score, hiscore = 0, matches = 0, reuseport = 0;
+ bool exact_dif = inet_exact_dif_match(net, skb);
struct sock *sk, *result = NULL;
u32 phash = 0;
sk_for_each_rcu(sk, &ilb->head) {
- score = compute_score(sk, net, hnum, daddr, dif);
+ score = compute_score(sk, net, hnum, daddr, dif, exact_dif);
if (score > hiscore) {
reuseport = sk->sk_reuseport;
if (reuseport) {
diff --git a/net/ipv4/ip_forward.c b/net/ipv4/ip_forward.c
index 8b4ffd216839..9f0a7b96646f 100644
--- a/net/ipv4/ip_forward.c
+++ b/net/ipv4/ip_forward.c
@@ -117,7 +117,7 @@ int ip_forward(struct sk_buff *skb)
if (opt->is_strictroute && rt->rt_uses_gateway)
goto sr_failed;
- IPCB(skb)->flags |= IPSKB_FORWARDED | IPSKB_FRAG_SEGS;
+ IPCB(skb)->flags |= IPSKB_FORWARDED;
mtu = ip_dst_mtu_maybe_forward(&rt->dst, true);
if (ip_exceeds_mtu(skb, mtu)) {
IP_INC_STATS(net, IPSTATS_MIB_FRAGFAILS);
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
index 05d105832bdb..877bdb02e887 100644
--- a/net/ipv4/ip_output.c
+++ b/net/ipv4/ip_output.c
@@ -107,6 +107,8 @@ int __ip_local_out(struct net *net, struct sock *sk, struct sk_buff *skb)
if (unlikely(!skb))
return 0;
+ skb->protocol = htons(ETH_P_IP);
+
return nf_hook(NFPROTO_IPV4, NF_INET_LOCAL_OUT,
net, sk, skb, NULL, skb_dst(skb)->dev,
dst_output);
@@ -239,19 +241,23 @@ static int ip_finish_output_gso(struct net *net, struct sock *sk,
struct sk_buff *segs;
int ret = 0;
- /* common case: fragmentation of segments is not allowed,
- * or seglen is <= mtu
+ /* common case: seglen is <= mtu
*/
- if (((IPCB(skb)->flags & IPSKB_FRAG_SEGS) == 0) ||
- skb_gso_validate_mtu(skb, mtu))
+ if (skb_gso_validate_mtu(skb, mtu))
return ip_finish_output2(net, sk, skb);
- /* Slowpath - GSO segment length is exceeding the dst MTU.
+ /* Slowpath - GSO segment length exceeds the egress MTU.
*
- * This can happen in two cases:
- * 1) TCP GRO packet, DF bit not set
- * 2) skb arrived via virtio-net, we thus get TSO/GSO skbs directly
- * from host network stack.
+ * This can happen in several cases:
+ * - Forwarding of a TCP GRO skb, when DF flag is not set.
+ * - Forwarding of an skb that arrived on a virtualization interface
+ * (virtio-net/vhost/tap) with TSO/GSO size set by other network
+ * stack.
+ * - Local GSO skb transmitted on an NETIF_F_TSO tunnel stacked over an
+ * interface with a smaller MTU.
+ * - Arriving GRO skb (or GSO skb in a virtualized environment) that is
+ * bridged to a NETIF_F_TSO tunnel stacked over an interface with an
+ * insufficent MTU.
*/
features = netif_skb_features(skb);
BUILD_BUG_ON(sizeof(*IPCB(skb)) > SKB_SGO_CB_OFFSET);
@@ -538,7 +544,6 @@ int ip_do_fragment(struct net *net, struct sock *sk, struct sk_buff *skb,
{
struct iphdr *iph;
int ptr;
- struct net_device *dev;
struct sk_buff *skb2;
unsigned int mtu, hlen, left, len, ll_rs;
int offset;
@@ -546,8 +551,6 @@ int ip_do_fragment(struct net *net, struct sock *sk, struct sk_buff *skb,
struct rtable *rt = skb_rtable(skb);
int err = 0;
- dev = rt->dst.dev;
-
/* for offloaded checksums cleanup checksum before fragmentation */
if (skb->ip_summed == CHECKSUM_PARTIAL &&
(err = skb_checksum_help(skb)))
@@ -1582,7 +1585,8 @@ void ip_send_unicast_reply(struct sock *sk, struct sk_buff *skb,
}
oif = arg->bound_dev_if;
- oif = oif ? : skb->skb_iif;
+ if (!oif && netif_index_is_l3_master(net, skb->skb_iif))
+ oif = skb->skb_iif;
flowi4_init_output(&fl4, oif,
IP4_REPLY_MARK(net, skb->mark),
diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
index af4919792b6a..b8a2d63d1fb8 100644
--- a/net/ipv4/ip_sockglue.c
+++ b/net/ipv4/ip_sockglue.c
@@ -98,7 +98,7 @@ static void ip_cmsg_recv_retopts(struct msghdr *msg, struct sk_buff *skb)
}
static void ip_cmsg_recv_checksum(struct msghdr *msg, struct sk_buff *skb,
- int offset)
+ int tlen, int offset)
{
__wsum csum = skb->csum;
@@ -106,8 +106,9 @@ static void ip_cmsg_recv_checksum(struct msghdr *msg, struct sk_buff *skb,
return;
if (offset != 0)
- csum = csum_sub(csum, csum_partial(skb_transport_header(skb),
- offset, 0));
+ csum = csum_sub(csum,
+ csum_partial(skb_transport_header(skb) + tlen,
+ offset, 0));
put_cmsg(msg, SOL_IP, IP_CHECKSUM, sizeof(__wsum), &csum);
}
@@ -153,7 +154,7 @@ static void ip_cmsg_recv_dstaddr(struct msghdr *msg, struct sk_buff *skb)
}
void ip_cmsg_recv_offset(struct msghdr *msg, struct sk_buff *skb,
- int offset)
+ int tlen, int offset)
{
struct inet_sock *inet = inet_sk(skb->sk);
unsigned int flags = inet->cmsg_flags;
@@ -216,7 +217,7 @@ void ip_cmsg_recv_offset(struct msghdr *msg, struct sk_buff *skb,
}
if (flags & IP_CMSG_CHECKSUM)
- ip_cmsg_recv_checksum(msg, skb, offset);
+ ip_cmsg_recv_checksum(msg, skb, tlen, offset);
}
EXPORT_SYMBOL(ip_cmsg_recv_offset);
diff --git a/net/ipv4/ip_tunnel_core.c b/net/ipv4/ip_tunnel_core.c
index 777bc1883870..fed3d29f9eb3 100644
--- a/net/ipv4/ip_tunnel_core.c
+++ b/net/ipv4/ip_tunnel_core.c
@@ -63,7 +63,6 @@ void iptunnel_xmit(struct sock *sk, struct rtable *rt, struct sk_buff *skb,
int pkt_len = skb->len - skb_inner_network_offset(skb);
struct net *net = dev_net(rt->dst.dev);
struct net_device *dev = skb->dev;
- int skb_iif = skb->skb_iif;
struct iphdr *iph;
int err;
@@ -73,16 +72,6 @@ void iptunnel_xmit(struct sock *sk, struct rtable *rt, struct sk_buff *skb,
skb_dst_set(skb, &rt->dst);
memset(IPCB(skb), 0, sizeof(*IPCB(skb)));
- if (skb_iif && !(df & htons(IP_DF))) {
- /* Arrived from an ingress interface, got encapsulated, with
- * fragmentation of encapulating frames allowed.
- * If skb is gso, the resulting encapsulated network segments
- * may exceed dst mtu.
- * Allow IP Fragmentation of segments.
- */
- IPCB(skb)->flags |= IPSKB_FRAG_SEGS;
- }
-
/* Push down and install the IP header. */
skb_push(skb, sizeof(struct iphdr));
skb_reset_network_header(skb);
diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c
index 5f006e13de56..27089f5ebbb1 100644
--- a/net/ipv4/ipmr.c
+++ b/net/ipv4/ipmr.c
@@ -1749,7 +1749,7 @@ static void ipmr_queue_xmit(struct net *net, struct mr_table *mrt,
vif->dev->stats.tx_bytes += skb->len;
}
- IPCB(skb)->flags |= IPSKB_FORWARDED | IPSKB_FRAG_SEGS;
+ IPCB(skb)->flags |= IPSKB_FORWARDED;
/* RFC1584 teaches, that DVMRP/PIM router must deliver packets locally
* not only before forwarding, but after forwarding on all output
diff --git a/net/ipv4/netfilter.c b/net/ipv4/netfilter.c
index c3776ff6749f..b3cc1335adbc 100644
--- a/net/ipv4/netfilter.c
+++ b/net/ipv4/netfilter.c
@@ -24,10 +24,11 @@ int ip_route_me_harder(struct net *net, struct sk_buff *skb, unsigned int addr_t
struct flowi4 fl4 = {};
__be32 saddr = iph->saddr;
__u8 flags = skb->sk ? inet_sk_flowi_flags(skb->sk) : 0;
+ struct net_device *dev = skb_dst(skb)->dev;
unsigned int hh_len;
if (addr_type == RTN_UNSPEC)
- addr_type = inet_addr_type(net, saddr);
+ addr_type = inet_addr_type_dev_table(net, dev, saddr);
if (addr_type == RTN_LOCAL || addr_type == RTN_UNICAST)
flags |= FLOWI_FLAG_ANYSRC;
else
@@ -40,6 +41,8 @@ int ip_route_me_harder(struct net *net, struct sk_buff *skb, unsigned int addr_t
fl4.saddr = saddr;
fl4.flowi4_tos = RT_TOS(iph->tos);
fl4.flowi4_oif = skb->sk ? skb->sk->sk_bound_dev_if : 0;
+ if (!fl4.flowi4_oif)
+ fl4.flowi4_oif = l3mdev_master_ifindex(dev);
fl4.flowi4_mark = skb->mark;
fl4.flowi4_flags = flags;
rt = ip_route_output_key(net, &fl4);
diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c
index b31df597fd37..697538464e6e 100644
--- a/net/ipv4/netfilter/arp_tables.c
+++ b/net/ipv4/netfilter/arp_tables.c
@@ -1201,8 +1201,8 @@ static int translate_compat_table(struct xt_table_info **pinfo,
newinfo->number = compatr->num_entries;
for (i = 0; i < NF_ARP_NUMHOOKS; i++) {
- newinfo->hook_entry[i] = info->hook_entry[i];
- newinfo->underflow[i] = info->underflow[i];
+ newinfo->hook_entry[i] = compatr->hook_entry[i];
+ newinfo->underflow[i] = compatr->underflow[i];
}
entry1 = newinfo->entries;
pos = entry1;
diff --git a/net/ipv4/netfilter/nft_dup_ipv4.c b/net/ipv4/netfilter/nft_dup_ipv4.c
index bf855e64fc45..0c01a270bf9f 100644
--- a/net/ipv4/netfilter/nft_dup_ipv4.c
+++ b/net/ipv4/netfilter/nft_dup_ipv4.c
@@ -28,7 +28,7 @@ static void nft_dup_ipv4_eval(const struct nft_expr *expr,
struct in_addr gw = {
.s_addr = (__force __be32)regs->data[priv->sreg_addr],
};
- int oif = regs->data[priv->sreg_dev];
+ int oif = priv->sreg_dev ? regs->data[priv->sreg_dev] : -1;
nf_dup_ipv4(pkt->net, pkt->skb, pkt->hook, &gw, oif);
}
@@ -59,7 +59,9 @@ static int nft_dup_ipv4_dump(struct sk_buff *skb, const struct nft_expr *expr)
{
struct nft_dup_ipv4 *priv = nft_expr_priv(expr);
- if (nft_dump_register(skb, NFTA_DUP_SREG_ADDR, priv->sreg_addr) ||
+ if (nft_dump_register(skb, NFTA_DUP_SREG_ADDR, priv->sreg_addr))
+ goto nla_put_failure;
+ if (priv->sreg_dev &&
nft_dump_register(skb, NFTA_DUP_SREG_DEV, priv->sreg_dev))
goto nla_put_failure;
diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c
index 7cf7d6e380c2..205e2000d395 100644
--- a/net/ipv4/ping.c
+++ b/net/ipv4/ping.c
@@ -994,7 +994,7 @@ struct proto ping_prot = {
.init = ping_init_sock,
.close = ping_close,
.connect = ip4_datagram_connect,
- .disconnect = udp_disconnect,
+ .disconnect = __udp_disconnect,
.setsockopt = ip_setsockopt,
.getsockopt = ip_getsockopt,
.sendmsg = ping_v4_sendmsg,
diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
index 90a85c955872..ecbe5a7c2d6d 100644
--- a/net/ipv4/raw.c
+++ b/net/ipv4/raw.c
@@ -918,7 +918,7 @@ struct proto raw_prot = {
.close = raw_close,
.destroy = raw_destroy,
.connect = ip4_datagram_connect,
- .disconnect = udp_disconnect,
+ .disconnect = __udp_disconnect,
.ioctl = raw_ioctl,
.init = raw_init,
.setsockopt = raw_setsockopt,
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index 62d4d90c1389..2a57566e6e91 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -753,7 +753,9 @@ static void __ip_do_redirect(struct rtable *rt, struct sk_buff *skb, struct flow
goto reject_redirect;
}
- n = ipv4_neigh_lookup(&rt->dst, NULL, &new_gw);
+ n = __ipv4_neigh_lookup(rt->dst.dev, new_gw);
+ if (!n)
+ n = neigh_create(&arp_tbl, &new_gw, rt->dst.dev);
if (!IS_ERR(n)) {
if (!(n->nud_state & NUD_VALID)) {
neigh_event_send(n, NULL);
diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c
index 1cb67de106fe..80bc36b25de2 100644
--- a/net/ipv4/sysctl_net_ipv4.c
+++ b/net/ipv4/sysctl_net_ipv4.c
@@ -96,11 +96,11 @@ static void inet_get_ping_group_range_table(struct ctl_table *table, kgid_t *low
container_of(table->data, struct net, ipv4.ping_group_range.range);
unsigned int seq;
do {
- seq = read_seqbegin(&net->ipv4.ip_local_ports.lock);
+ seq = read_seqbegin(&net->ipv4.ping_group_range.lock);
*low = data[0];
*high = data[1];
- } while (read_seqretry(&net->ipv4.ip_local_ports.lock, seq));
+ } while (read_seqretry(&net->ipv4.ping_group_range.lock, seq));
}
/* Update system visible IP port range */
@@ -109,10 +109,10 @@ static void set_ping_group_range(struct ctl_table *table, kgid_t low, kgid_t hig
kgid_t *data = table->data;
struct net *net =
container_of(table->data, struct net, ipv4.ping_group_range.range);
- write_seqlock(&net->ipv4.ip_local_ports.lock);
+ write_seqlock(&net->ipv4.ping_group_range.lock);
data[0] = low;
data[1] = high;
- write_sequnlock(&net->ipv4.ip_local_ports.lock);
+ write_sequnlock(&net->ipv4.ping_group_range.lock);
}
/* Validate changes from /proc interface. */
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 3251fe71f39f..814af89c1bd3 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -1164,7 +1164,7 @@ restart:
err = -EPIPE;
if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN))
- goto out_err;
+ goto do_error;
sg = !!(sk->sk_route_caps & NETIF_F_SG);
@@ -1241,7 +1241,7 @@ new_segment:
if (!skb_can_coalesce(skb, i, pfrag->page,
pfrag->offset)) {
- if (i == sysctl_max_skb_frags || !sg) {
+ if (i >= sysctl_max_skb_frags || !sg) {
tcp_mark_push(tp, skb);
goto new_segment;
}
diff --git a/net/ipv4/tcp_cong.c b/net/ipv4/tcp_cong.c
index 1294af4e0127..f9038d6b109e 100644
--- a/net/ipv4/tcp_cong.c
+++ b/net/ipv4/tcp_cong.c
@@ -200,8 +200,10 @@ static void tcp_reinit_congestion_control(struct sock *sk,
icsk->icsk_ca_ops = ca;
icsk->icsk_ca_setsockopt = 1;
- if (sk->sk_state != TCP_CLOSE)
+ if (sk->sk_state != TCP_CLOSE) {
+ memset(icsk->icsk_ca_priv, 0, sizeof(icsk->icsk_ca_priv));
tcp_init_congestion_control(sk);
+ }
}
/* Manage refcounts on socket close. */
diff --git a/net/ipv4/tcp_dctcp.c b/net/ipv4/tcp_dctcp.c
index 10d728b6804c..ab37c6775630 100644
--- a/net/ipv4/tcp_dctcp.c
+++ b/net/ipv4/tcp_dctcp.c
@@ -56,6 +56,7 @@ struct dctcp {
u32 next_seq;
u32 ce_state;
u32 delayed_ack_reserved;
+ u32 loss_cwnd;
};
static unsigned int dctcp_shift_g __read_mostly = 4; /* g = 1/2^4 */
@@ -96,6 +97,7 @@ static void dctcp_init(struct sock *sk)
ca->dctcp_alpha = min(dctcp_alpha_on_init, DCTCP_MAX_ALPHA);
ca->delayed_ack_reserved = 0;
+ ca->loss_cwnd = 0;
ca->ce_state = 0;
dctcp_reset(tp, ca);
@@ -111,9 +113,10 @@ static void dctcp_init(struct sock *sk)
static u32 dctcp_ssthresh(struct sock *sk)
{
- const struct dctcp *ca = inet_csk_ca(sk);
+ struct dctcp *ca = inet_csk_ca(sk);
struct tcp_sock *tp = tcp_sk(sk);
+ ca->loss_cwnd = tp->snd_cwnd;
return max(tp->snd_cwnd - ((tp->snd_cwnd * ca->dctcp_alpha) >> 11U), 2U);
}
@@ -308,12 +311,20 @@ static size_t dctcp_get_info(struct sock *sk, u32 ext, int *attr,
return 0;
}
+static u32 dctcp_cwnd_undo(struct sock *sk)
+{
+ const struct dctcp *ca = inet_csk_ca(sk);
+
+ return max(tcp_sk(sk)->snd_cwnd, ca->loss_cwnd);
+}
+
static struct tcp_congestion_ops dctcp __read_mostly = {
.init = dctcp_init,
.in_ack_event = dctcp_update_alpha,
.cwnd_event = dctcp_cwnd_event,
.ssthresh = dctcp_ssthresh,
.cong_avoid = tcp_reno_cong_avoid,
+ .undo_cwnd = dctcp_cwnd_undo,
.set_state = dctcp_state,
.get_info = dctcp_get_info,
.flags = TCP_CONG_NEEDS_ECN,
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index bd5e8d10893f..2259114c7242 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -86,7 +86,6 @@
int sysctl_tcp_tw_reuse __read_mostly;
int sysctl_tcp_low_latency __read_mostly;
-EXPORT_SYMBOL(sysctl_tcp_low_latency);
#ifdef CONFIG_TCP_MD5SIG
static int tcp_v4_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
@@ -1565,6 +1564,21 @@ bool tcp_add_backlog(struct sock *sk, struct sk_buff *skb)
}
EXPORT_SYMBOL(tcp_add_backlog);
+int tcp_filter(struct sock *sk, struct sk_buff *skb)
+{
+ struct tcphdr *th = (struct tcphdr *)skb->data;
+ unsigned int eaten = skb->len;
+ int err;
+
+ err = sk_filter_trim_cap(sk, skb, th->doff * 4);
+ if (!err) {
+ eaten -= skb->len;
+ TCP_SKB_CB(skb)->end_seq -= eaten;
+ }
+ return err;
+}
+EXPORT_SYMBOL(tcp_filter);
+
/*
* From tcp_input.c
*/
@@ -1677,8 +1691,10 @@ process:
nf_reset(skb);
- if (sk_filter(sk, skb))
+ if (tcp_filter(sk, skb))
goto discard_and_relse;
+ th = (const struct tcphdr *)skb->data;
+ iph = ip_hdr(skb);
skb->dev = NULL;
@@ -1887,7 +1903,6 @@ static void *listening_get_next(struct seq_file *seq, void *cur)
struct tcp_iter_state *st = seq->private;
struct net *net = seq_file_net(seq);
struct inet_listen_hashbucket *ilb;
- struct inet_connection_sock *icsk;
struct sock *sk = cur;
if (!sk) {
@@ -1909,7 +1924,6 @@ get_sk:
continue;
if (sk->sk_family == st->family)
return sk;
- icsk = inet_csk(sk);
}
spin_unlock_bh(&ilb->lock);
st->offset = 0;
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index 7d96dc2d3d08..5bab6c3f7a2f 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -1322,7 +1322,7 @@ try_again:
*addr_len = sizeof(*sin);
}
if (inet->cmsg_flags)
- ip_cmsg_recv_offset(msg, skb, sizeof(struct udphdr) + off);
+ ip_cmsg_recv_offset(msg, skb, sizeof(struct udphdr), off);
err = copied;
if (flags & MSG_TRUNC)
@@ -1345,7 +1345,7 @@ csum_copy_err:
goto try_again;
}
-int udp_disconnect(struct sock *sk, int flags)
+int __udp_disconnect(struct sock *sk, int flags)
{
struct inet_sock *inet = inet_sk(sk);
/*
@@ -1367,6 +1367,15 @@ int udp_disconnect(struct sock *sk, int flags)
sk_dst_reset(sk);
return 0;
}
+EXPORT_SYMBOL(__udp_disconnect);
+
+int udp_disconnect(struct sock *sk, int flags)
+{
+ lock_sock(sk);
+ __udp_disconnect(sk, flags);
+ release_sock(sk);
+ return 0;
+}
EXPORT_SYMBOL(udp_disconnect);
void udp_lib_unhash(struct sock *sk)
@@ -1446,7 +1455,7 @@ static void udp_v4_rehash(struct sock *sk)
udp_lib_rehash(sk, new_hash);
}
-static int __udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
+int __udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
{
int rc;
@@ -1643,10 +1652,10 @@ static int __udp4_lib_mcast_deliver(struct net *net, struct sk_buff *skb,
if (use_hash2) {
hash2_any = udp4_portaddr_hash(net, htonl(INADDR_ANY), hnum) &
- udp_table.mask;
- hash2 = udp4_portaddr_hash(net, daddr, hnum) & udp_table.mask;
+ udptable->mask;
+ hash2 = udp4_portaddr_hash(net, daddr, hnum) & udptable->mask;
start_lookup:
- hslot = &udp_table.hash2[hash2];
+ hslot = &udptable->hash2[hash2];
offset = offsetof(typeof(*sk), __sk_common.skc_portaddr_node);
}
@@ -2193,7 +2202,7 @@ int udp_abort(struct sock *sk, int err)
sk->sk_err = err;
sk->sk_error_report(sk);
- udp_disconnect(sk, 0);
+ __udp_disconnect(sk, 0);
release_sock(sk);
diff --git a/net/ipv4/udp_impl.h b/net/ipv4/udp_impl.h
index 7e0fe4bdd967..feb50a16398d 100644
--- a/net/ipv4/udp_impl.h
+++ b/net/ipv4/udp_impl.h
@@ -25,7 +25,7 @@ int udp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int noblock,
int flags, int *addr_len);
int udp_sendpage(struct sock *sk, struct page *page, int offset, size_t size,
int flags);
-int udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb);
+int __udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb);
void udp_destroy_sock(struct sock *sk);
#ifdef CONFIG_PROC_FS
diff --git a/net/ipv4/udp_offload.c b/net/ipv4/udp_offload.c
index f9333c963607..b2be1d9757ef 100644
--- a/net/ipv4/udp_offload.c
+++ b/net/ipv4/udp_offload.c
@@ -295,7 +295,7 @@ unflush:
skb_gro_pull(skb, sizeof(struct udphdr)); /* pull encapsulating udp header */
skb_gro_postpull_rcsum(skb, uh, sizeof(struct udphdr));
- pp = udp_sk(sk)->gro_receive(sk, head, skb);
+ pp = call_gro_receive_sk(udp_sk(sk)->gro_receive, sk, head, skb);
out_unlock:
rcu_read_unlock();
diff --git a/net/ipv4/udplite.c b/net/ipv4/udplite.c
index af817158d830..ff450c2aad9b 100644
--- a/net/ipv4/udplite.c
+++ b/net/ipv4/udplite.c
@@ -50,7 +50,7 @@ struct proto udplite_prot = {
.sendmsg = udp_sendmsg,
.recvmsg = udp_recvmsg,
.sendpage = udp_sendpage,
- .backlog_rcv = udp_queue_rcv_skb,
+ .backlog_rcv = __udp_queue_rcv_skb,
.hash = udp_lib_hash,
.unhash = udp_lib_unhash,
.get_port = udp_v4_get_port,
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index d8983e15f859..4bc5ba3ae452 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -147,9 +147,8 @@ static inline void addrconf_sysctl_unregister(struct inet6_dev *idev)
}
#endif
-static void __ipv6_regen_rndid(struct inet6_dev *idev);
-static void __ipv6_try_regen_rndid(struct inet6_dev *idev, struct in6_addr *tmpaddr);
-static void ipv6_regen_rndid(unsigned long data);
+static void ipv6_regen_rndid(struct inet6_dev *idev);
+static void ipv6_try_regen_rndid(struct inet6_dev *idev, struct in6_addr *tmpaddr);
static int ipv6_generate_eui64(u8 *eui, struct net_device *dev);
static int ipv6_count_addresses(struct inet6_dev *idev);
@@ -184,7 +183,7 @@ static struct rt6_info *addrconf_get_prefix_route(const struct in6_addr *pfx,
static void addrconf_dad_start(struct inet6_ifaddr *ifp);
static void addrconf_dad_work(struct work_struct *w);
-static void addrconf_dad_completed(struct inet6_ifaddr *ifp);
+static void addrconf_dad_completed(struct inet6_ifaddr *ifp, bool bump_id);
static void addrconf_dad_run(struct inet6_dev *idev);
static void addrconf_rs_timer(unsigned long data);
static void __ipv6_ifa_notify(int event, struct inet6_ifaddr *ifa);
@@ -409,9 +408,7 @@ static struct inet6_dev *ipv6_add_dev(struct net_device *dev)
goto err_release;
}
- /* One reference from device. We must do this before
- * we invoke __ipv6_regen_rndid().
- */
+ /* One reference from device. */
in6_dev_hold(ndev);
if (dev->flags & (IFF_NOARP | IFF_LOOPBACK))
@@ -425,17 +422,15 @@ static struct inet6_dev *ipv6_add_dev(struct net_device *dev)
#endif
INIT_LIST_HEAD(&ndev->tempaddr_list);
- setup_timer(&ndev->regen_timer, ipv6_regen_rndid, (unsigned long)ndev);
+ ndev->desync_factor = U32_MAX;
if ((dev->flags&IFF_LOOPBACK) ||
dev->type == ARPHRD_TUNNEL ||
dev->type == ARPHRD_TUNNEL6 ||
dev->type == ARPHRD_SIT ||
dev->type == ARPHRD_NONE) {
ndev->cnf.use_tempaddr = -1;
- } else {
- in6_dev_hold(ndev);
- ipv6_regen_rndid((unsigned long) ndev);
- }
+ } else
+ ipv6_regen_rndid(ndev);
ndev->token = in6addr_any;
@@ -447,7 +442,6 @@ static struct inet6_dev *ipv6_add_dev(struct net_device *dev)
err = addrconf_sysctl_register(ndev);
if (err) {
ipv6_mc_destroy_dev(ndev);
- del_timer(&ndev->regen_timer);
snmp6_unregister_dev(ndev);
goto err_release;
}
@@ -1190,6 +1184,8 @@ static int ipv6_create_tempaddr(struct inet6_ifaddr *ifp, struct inet6_ifaddr *i
int ret = 0;
u32 addr_flags;
unsigned long now = jiffies;
+ long max_desync_factor;
+ s32 cnf_temp_preferred_lft;
write_lock_bh(&idev->lock);
if (ift) {
@@ -1222,23 +1218,42 @@ retry:
}
in6_ifa_hold(ifp);
memcpy(addr.s6_addr, ifp->addr.s6_addr, 8);
- __ipv6_try_regen_rndid(idev, tmpaddr);
+ ipv6_try_regen_rndid(idev, tmpaddr);
memcpy(&addr.s6_addr[8], idev->rndid, 8);
age = (now - ifp->tstamp) / HZ;
+
+ regen_advance = idev->cnf.regen_max_retry *
+ idev->cnf.dad_transmits *
+ NEIGH_VAR(idev->nd_parms, RETRANS_TIME) / HZ;
+
+ /* recalculate max_desync_factor each time and update
+ * idev->desync_factor if it's larger
+ */
+ cnf_temp_preferred_lft = READ_ONCE(idev->cnf.temp_prefered_lft);
+ max_desync_factor = min_t(__u32,
+ idev->cnf.max_desync_factor,
+ cnf_temp_preferred_lft - regen_advance);
+
+ if (unlikely(idev->desync_factor > max_desync_factor)) {
+ if (max_desync_factor > 0) {
+ get_random_bytes(&idev->desync_factor,
+ sizeof(idev->desync_factor));
+ idev->desync_factor %= max_desync_factor;
+ } else {
+ idev->desync_factor = 0;
+ }
+ }
+
tmp_valid_lft = min_t(__u32,
ifp->valid_lft,
idev->cnf.temp_valid_lft + age);
- tmp_prefered_lft = min_t(__u32,
- ifp->prefered_lft,
- idev->cnf.temp_prefered_lft + age -
- idev->cnf.max_desync_factor);
+ tmp_prefered_lft = cnf_temp_preferred_lft + age -
+ idev->desync_factor;
+ tmp_prefered_lft = min_t(__u32, ifp->prefered_lft, tmp_prefered_lft);
tmp_plen = ifp->prefix_len;
tmp_tstamp = ifp->tstamp;
spin_unlock_bh(&ifp->lock);
- regen_advance = idev->cnf.regen_max_retry *
- idev->cnf.dad_transmits *
- NEIGH_VAR(idev->nd_parms, RETRANS_TIME) / HZ;
write_unlock_bh(&idev->lock);
/* A temporary address is created only if this calculated Preferred
@@ -2150,7 +2165,7 @@ static int ipv6_inherit_eui64(u8 *eui, struct inet6_dev *idev)
}
/* (re)generation of randomized interface identifier (RFC 3041 3.2, 3.5) */
-static void __ipv6_regen_rndid(struct inet6_dev *idev)
+static void ipv6_regen_rndid(struct inet6_dev *idev)
{
regen:
get_random_bytes(idev->rndid, sizeof(idev->rndid));
@@ -2179,43 +2194,10 @@ regen:
}
}
-static void ipv6_regen_rndid(unsigned long data)
-{
- struct inet6_dev *idev = (struct inet6_dev *) data;
- unsigned long expires;
-
- rcu_read_lock_bh();
- write_lock_bh(&idev->lock);
-
- if (idev->dead)
- goto out;
-
- __ipv6_regen_rndid(idev);
-
- expires = jiffies +
- idev->cnf.temp_prefered_lft * HZ -
- idev->cnf.regen_max_retry * idev->cnf.dad_transmits *
- NEIGH_VAR(idev->nd_parms, RETRANS_TIME) -
- idev->cnf.max_desync_factor * HZ;
- if (time_before(expires, jiffies)) {
- pr_warn("%s: too short regeneration interval; timer disabled for %s\n",
- __func__, idev->dev->name);
- goto out;
- }
-
- if (!mod_timer(&idev->regen_timer, expires))
- in6_dev_hold(idev);
-
-out:
- write_unlock_bh(&idev->lock);
- rcu_read_unlock_bh();
- in6_dev_put(idev);
-}
-
-static void __ipv6_try_regen_rndid(struct inet6_dev *idev, struct in6_addr *tmpaddr)
+static void ipv6_try_regen_rndid(struct inet6_dev *idev, struct in6_addr *tmpaddr)
{
if (tmpaddr && memcmp(idev->rndid, &tmpaddr->s6_addr[8], 8) == 0)
- __ipv6_regen_rndid(idev);
+ ipv6_regen_rndid(idev);
}
/*
@@ -2356,7 +2338,7 @@ static void manage_tempaddrs(struct inet6_dev *idev,
max_valid = 0;
max_prefered = idev->cnf.temp_prefered_lft -
- idev->cnf.max_desync_factor - age;
+ idev->desync_factor - age;
if (max_prefered < 0)
max_prefered = 0;
@@ -2916,6 +2898,7 @@ static void add_addr(struct inet6_dev *idev, const struct in6_addr *addr,
spin_lock_bh(&ifp->lock);
ifp->flags &= ~IFA_F_TENTATIVE;
spin_unlock_bh(&ifp->lock);
+ rt_genid_bump_ipv6(dev_net(idev->dev));
ipv6_ifa_notify(RTM_NEWADDR, ifp);
in6_ifa_put(ifp);
}
@@ -3018,7 +3001,7 @@ static void init_loopback(struct net_device *dev)
* lo device down, release this obsolete dst and
* reallocate a new router for ifa.
*/
- if (sp_ifa->rt->dst.obsolete > 0) {
+ if (!atomic_read(&sp_ifa->rt->rt6i_ref)) {
ip6_rt_put(sp_ifa->rt);
sp_ifa->rt = NULL;
} else {
@@ -3594,9 +3577,6 @@ restart:
if (!how)
idev->if_flags &= ~(IF_RS_SENT|IF_RA_RCVD|IF_READY);
- if (how && del_timer(&idev->regen_timer))
- in6_dev_put(idev);
-
/* Step 3: clear tempaddr list */
while (!list_empty(&idev->tempaddr_list)) {
ifa = list_first_entry(&idev->tempaddr_list,
@@ -3761,7 +3741,7 @@ static void addrconf_dad_begin(struct inet6_ifaddr *ifp)
{
struct inet6_dev *idev = ifp->idev;
struct net_device *dev = idev->dev;
- bool notify = false;
+ bool bump_id, notify = false;
addrconf_join_solict(dev, &ifp->addr);
@@ -3776,11 +3756,12 @@ static void addrconf_dad_begin(struct inet6_ifaddr *ifp)
idev->cnf.accept_dad < 1 ||
!(ifp->flags&IFA_F_TENTATIVE) ||
ifp->flags & IFA_F_NODAD) {
+ bump_id = ifp->flags & IFA_F_TENTATIVE;
ifp->flags &= ~(IFA_F_TENTATIVE|IFA_F_OPTIMISTIC|IFA_F_DADFAILED);
spin_unlock(&ifp->lock);
read_unlock_bh(&idev->lock);
- addrconf_dad_completed(ifp);
+ addrconf_dad_completed(ifp, bump_id);
return;
}
@@ -3840,8 +3821,8 @@ static void addrconf_dad_work(struct work_struct *w)
struct inet6_ifaddr,
dad_work);
struct inet6_dev *idev = ifp->idev;
+ bool bump_id, disable_ipv6 = false;
struct in6_addr mcaddr;
- bool disable_ipv6 = false;
enum {
DAD_PROCESS,
@@ -3911,11 +3892,12 @@ static void addrconf_dad_work(struct work_struct *w)
* DAD was successful
*/
+ bump_id = ifp->flags & IFA_F_TENTATIVE;
ifp->flags &= ~(IFA_F_TENTATIVE|IFA_F_OPTIMISTIC|IFA_F_DADFAILED);
spin_unlock(&ifp->lock);
write_unlock_bh(&idev->lock);
- addrconf_dad_completed(ifp);
+ addrconf_dad_completed(ifp, bump_id);
goto out;
}
@@ -3952,7 +3934,7 @@ static bool ipv6_lonely_lladdr(struct inet6_ifaddr *ifp)
return true;
}
-static void addrconf_dad_completed(struct inet6_ifaddr *ifp)
+static void addrconf_dad_completed(struct inet6_ifaddr *ifp, bool bump_id)
{
struct net_device *dev = ifp->idev->dev;
struct in6_addr lladdr;
@@ -4004,6 +3986,9 @@ static void addrconf_dad_completed(struct inet6_ifaddr *ifp)
spin_unlock(&ifp->lock);
write_unlock_bh(&ifp->idev->lock);
}
+
+ if (bump_id)
+ rt_genid_bump_ipv6(dev_net(dev));
}
static void addrconf_dad_run(struct inet6_dev *idev)
diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c
index 37874e2f30ed..ccf40550c475 100644
--- a/net/ipv6/datagram.c
+++ b/net/ipv6/datagram.c
@@ -139,7 +139,8 @@ void ip6_datagram_release_cb(struct sock *sk)
}
EXPORT_SYMBOL_GPL(ip6_datagram_release_cb);
-static int __ip6_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
+int __ip6_datagram_connect(struct sock *sk, struct sockaddr *uaddr,
+ int addr_len)
{
struct sockaddr_in6 *usin = (struct sockaddr_in6 *) uaddr;
struct inet_sock *inet = inet_sk(sk);
@@ -252,6 +253,7 @@ ipv4_connected:
out:
return err;
}
+EXPORT_SYMBOL_GPL(__ip6_datagram_connect);
int ip6_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
{
diff --git a/net/ipv6/esp6.c b/net/ipv6/esp6.c
index 060a60b2f8a6..111ba55fd512 100644
--- a/net/ipv6/esp6.c
+++ b/net/ipv6/esp6.c
@@ -418,7 +418,7 @@ static int esp6_input(struct xfrm_state *x, struct sk_buff *skb)
esph = (void *)skb_push(skb, 4);
*seqhi = esph->spi;
esph->spi = esph->seq_no;
- esph->seq_no = htonl(XFRM_SKB_CB(skb)->seq.input.hi);
+ esph->seq_no = XFRM_SKB_CB(skb)->seq.input.hi;
aead_request_set_callback(req, 0, esp_input_done_esn, skb);
}
diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c
index bd59c343d35f..2772004ba5a1 100644
--- a/net/ipv6/icmp.c
+++ b/net/ipv6/icmp.c
@@ -447,8 +447,10 @@ static void icmp6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info,
if (__ipv6_addr_needs_scope_id(addr_type))
iif = skb->dev->ifindex;
- else
- iif = l3mdev_master_ifindex(skb->dev);
+ else {
+ dst = skb_dst(skb);
+ iif = l3mdev_master_ifindex(dst ? dst->dev : skb->dev);
+ }
/*
* Must not send error if the source does not uniquely
diff --git a/net/ipv6/inet6_hashtables.c b/net/ipv6/inet6_hashtables.c
index 00cf28ad4565..02761c9fe43e 100644
--- a/net/ipv6/inet6_hashtables.c
+++ b/net/ipv6/inet6_hashtables.c
@@ -96,7 +96,7 @@ EXPORT_SYMBOL(__inet6_lookup_established);
static inline int compute_score(struct sock *sk, struct net *net,
const unsigned short hnum,
const struct in6_addr *daddr,
- const int dif)
+ const int dif, bool exact_dif)
{
int score = -1;
@@ -109,7 +109,7 @@ static inline int compute_score(struct sock *sk, struct net *net,
return -1;
score++;
}
- if (sk->sk_bound_dev_if) {
+ if (sk->sk_bound_dev_if || exact_dif) {
if (sk->sk_bound_dev_if != dif)
return -1;
score++;
@@ -131,11 +131,12 @@ struct sock *inet6_lookup_listener(struct net *net,
unsigned int hash = inet_lhashfn(net, hnum);
struct inet_listen_hashbucket *ilb = &hashinfo->listening_hash[hash];
int score, hiscore = 0, matches = 0, reuseport = 0;
+ bool exact_dif = inet6_exact_dif_match(net, skb);
struct sock *sk, *result = NULL;
u32 phash = 0;
sk_for_each(sk, &ilb->head) {
- score = compute_score(sk, net, hnum, daddr, dif);
+ score = compute_score(sk, net, hnum, daddr, dif, exact_dif);
if (score > hiscore) {
reuseport = sk->sk_reuseport;
if (reuseport) {
@@ -263,13 +264,15 @@ EXPORT_SYMBOL_GPL(inet6_hash_connect);
int inet6_hash(struct sock *sk)
{
+ int err = 0;
+
if (sk->sk_state != TCP_CLOSE) {
local_bh_disable();
- __inet_hash(sk, NULL, ipv6_rcv_saddr_equal);
+ err = __inet_hash(sk, NULL, ipv6_rcv_saddr_equal);
local_bh_enable();
}
- return 0;
+ return err;
}
EXPORT_SYMBOL_GPL(inet6_hash);
diff --git a/net/ipv6/ip6_offload.c b/net/ipv6/ip6_offload.c
index e7bfd55899a3..89c59e656f44 100644
--- a/net/ipv6/ip6_offload.c
+++ b/net/ipv6/ip6_offload.c
@@ -99,7 +99,7 @@ static struct sk_buff *ipv6_gso_segment(struct sk_buff *skb,
segs = ops->callbacks.gso_segment(skb, features);
}
- if (IS_ERR(segs))
+ if (IS_ERR_OR_NULL(segs))
goto out;
gso_partial = !!(skb_shinfo(segs)->gso_type & SKB_GSO_PARTIAL);
@@ -246,7 +246,7 @@ static struct sk_buff **ipv6_gro_receive(struct sk_buff **head,
skb_gro_postpull_rcsum(skb, iph, nlen);
- pp = ops->callbacks.gro_receive(head, skb);
+ pp = call_gro_receive(ops->callbacks.gro_receive, head, skb);
out_unlock:
rcu_read_unlock();
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index 6001e781164e..59eb4ed99ce8 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -1366,7 +1366,7 @@ emsgsize:
if (((length > mtu) ||
(skb && skb_is_gso(skb))) &&
(sk->sk_protocol == IPPROTO_UDP) &&
- (rt->dst.dev->features & NETIF_F_UFO) &&
+ (rt->dst.dev->features & NETIF_F_UFO) && !rt->dst.header_len &&
(sk->sk_type == SOCK_DGRAM) && !udp_get_no_check6_tx(sk)) {
err = ip6_ufo_append_data(sk, queue, getfrag, from, length,
hh_len, fragheaderlen, exthdrlen,
diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
index 6a66adba0c22..d76674efe523 100644
--- a/net/ipv6/ip6_tunnel.c
+++ b/net/ipv6/ip6_tunnel.c
@@ -157,6 +157,7 @@ ip6_tnl_lookup(struct net *net, const struct in6_addr *remote, const struct in6_
hash = HASH(&any, local);
for_each_ip6_tunnel_rcu(ip6n->tnls_r_l[hash]) {
if (ipv6_addr_equal(local, &t->parms.laddr) &&
+ ipv6_addr_any(&t->parms.raddr) &&
(t->dev->flags & IFF_UP))
return t;
}
@@ -164,6 +165,7 @@ ip6_tnl_lookup(struct net *net, const struct in6_addr *remote, const struct in6_
hash = HASH(remote, &any);
for_each_ip6_tunnel_rcu(ip6n->tnls_r_l[hash]) {
if (ipv6_addr_equal(remote, &t->parms.raddr) &&
+ ipv6_addr_any(&t->parms.laddr) &&
(t->dev->flags & IFF_UP))
return t;
}
@@ -1032,6 +1034,7 @@ int ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev, __u8 dsfield,
int mtu;
unsigned int psh_hlen = sizeof(struct ipv6hdr) + t->encap_hlen;
unsigned int max_headroom = psh_hlen;
+ bool use_cache = false;
u8 hop_limit;
int err = -1;
@@ -1064,7 +1067,15 @@ int ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev, __u8 dsfield,
memcpy(&fl6->daddr, addr6, sizeof(fl6->daddr));
neigh_release(neigh);
- } else if (!fl6->flowi6_mark)
+ } else if (!(t->parms.flags &
+ (IP6_TNL_F_USE_ORIG_TCLASS | IP6_TNL_F_USE_ORIG_FWMARK))) {
+ /* enable the cache only only if the routing decision does
+ * not depend on the current inner header value
+ */
+ use_cache = true;
+ }
+
+ if (use_cache)
dst = dst_cache_get(&t->dst_cache);
if (!ip6_tnl_xmit_ctl(t, &fl6->saddr, &fl6->daddr))
@@ -1148,7 +1159,7 @@ route_lookup:
if (t->encap.type != TUNNEL_ENCAP_NONE)
goto tx_err_dst_release;
} else {
- if (!fl6->flowi6_mark && ndst)
+ if (use_cache && ndst)
dst_cache_set_ip6(&t->dst_cache, ndst, &fl6->saddr);
}
skb_dst_set(skb, dst);
diff --git a/net/ipv6/ip6_udp_tunnel.c b/net/ipv6/ip6_udp_tunnel.c
index a7520528ecd2..b283f293ee4a 100644
--- a/net/ipv6/ip6_udp_tunnel.c
+++ b/net/ipv6/ip6_udp_tunnel.c
@@ -88,9 +88,6 @@ int udp_tunnel6_xmit_skb(struct dst_entry *dst, struct sock *sk,
uh->len = htons(skb->len);
- memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
- IPCB(skb)->flags &= ~(IPSKB_XFRM_TUNNEL_SIZE | IPSKB_XFRM_TRANSFORMED
- | IPSKB_REROUTED);
skb_dst_set(skb, dst);
udp6_set_csum(nocheck, skb, saddr, daddr, skb->len);
diff --git a/net/ipv6/ip6_vti.c b/net/ipv6/ip6_vti.c
index 8a02ca8a11af..c299c1e2bbf0 100644
--- a/net/ipv6/ip6_vti.c
+++ b/net/ipv6/ip6_vti.c
@@ -1138,6 +1138,33 @@ static struct xfrm6_protocol vti_ipcomp6_protocol __read_mostly = {
.priority = 100,
};
+static bool is_vti6_tunnel(const struct net_device *dev)
+{
+ return dev->netdev_ops == &vti6_netdev_ops;
+}
+
+static int vti6_device_event(struct notifier_block *unused,
+ unsigned long event, void *ptr)
+{
+ struct net_device *dev = netdev_notifier_info_to_dev(ptr);
+ struct ip6_tnl *t = netdev_priv(dev);
+
+ if (!is_vti6_tunnel(dev))
+ return NOTIFY_DONE;
+
+ switch (event) {
+ case NETDEV_DOWN:
+ if (!net_eq(t->net, dev_net(dev)))
+ xfrm_garbage_collect(t->net);
+ break;
+ }
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block vti6_notifier_block __read_mostly = {
+ .notifier_call = vti6_device_event,
+};
+
/**
* vti6_tunnel_init - register protocol and reserve needed resources
*
@@ -1148,6 +1175,8 @@ static int __init vti6_tunnel_init(void)
const char *msg;
int err;
+ register_netdevice_notifier(&vti6_notifier_block);
+
msg = "tunnel device";
err = register_pernet_device(&vti6_net_ops);
if (err < 0)
@@ -1180,6 +1209,7 @@ xfrm_proto_ah_failed:
xfrm_proto_esp_failed:
unregister_pernet_device(&vti6_net_ops);
pernet_dev_failed:
+ unregister_netdevice_notifier(&vti6_notifier_block);
pr_err("vti6 init: failed to register %s\n", msg);
return err;
}
@@ -1194,6 +1224,7 @@ static void __exit vti6_tunnel_cleanup(void)
xfrm6_protocol_deregister(&vti_ah6_protocol, IPPROTO_AH);
xfrm6_protocol_deregister(&vti_esp6_protocol, IPPROTO_ESP);
unregister_pernet_device(&vti6_net_ops);
+ unregister_netdevice_notifier(&vti6_notifier_block);
}
module_init(vti6_tunnel_init);
diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c
index 5330262ab673..636ec56f5f50 100644
--- a/net/ipv6/ipv6_sockglue.c
+++ b/net/ipv6/ipv6_sockglue.c
@@ -120,6 +120,7 @@ struct ipv6_txoptions *ipv6_update_options(struct sock *sk,
static bool setsockopt_needs_rtnl(int optname)
{
switch (optname) {
+ case IPV6_ADDRFORM:
case IPV6_ADD_MEMBERSHIP:
case IPV6_DROP_MEMBERSHIP:
case IPV6_JOIN_ANYCAST:
@@ -198,7 +199,7 @@ static int do_ipv6_setsockopt(struct sock *sk, int level, int optname,
}
fl6_free_socklist(sk);
- ipv6_sock_mc_close(sk);
+ __ipv6_sock_mc_close(sk);
/*
* Sock is moving from IPv6 to IPv4 (sk_prot), so
diff --git a/net/ipv6/mcast.c b/net/ipv6/mcast.c
index 75c1fc54f188..14a3903f1c82 100644
--- a/net/ipv6/mcast.c
+++ b/net/ipv6/mcast.c
@@ -276,16 +276,14 @@ static struct inet6_dev *ip6_mc_find_dev_rcu(struct net *net,
return idev;
}
-void ipv6_sock_mc_close(struct sock *sk)
+void __ipv6_sock_mc_close(struct sock *sk)
{
struct ipv6_pinfo *np = inet6_sk(sk);
struct ipv6_mc_socklist *mc_lst;
struct net *net = sock_net(sk);
- if (!rcu_access_pointer(np->ipv6_mc_list))
- return;
+ ASSERT_RTNL();
- rtnl_lock();
while ((mc_lst = rtnl_dereference(np->ipv6_mc_list)) != NULL) {
struct net_device *dev;
@@ -303,8 +301,17 @@ void ipv6_sock_mc_close(struct sock *sk)
atomic_sub(sizeof(*mc_lst), &sk->sk_omem_alloc);
kfree_rcu(mc_lst, rcu);
-
}
+}
+
+void ipv6_sock_mc_close(struct sock *sk)
+{
+ struct ipv6_pinfo *np = inet6_sk(sk);
+
+ if (!rcu_access_pointer(np->ipv6_mc_list))
+ return;
+ rtnl_lock();
+ __ipv6_sock_mc_close(sk);
rtnl_unlock();
}
diff --git a/net/ipv6/netfilter/nf_conntrack_reasm.c b/net/ipv6/netfilter/nf_conntrack_reasm.c
index e4347aeb2e65..9948b5ce52da 100644
--- a/net/ipv6/netfilter/nf_conntrack_reasm.c
+++ b/net/ipv6/netfilter/nf_conntrack_reasm.c
@@ -576,11 +576,11 @@ int nf_ct_frag6_gather(struct net *net, struct sk_buff *skb, u32 user)
/* Jumbo payload inhibits frag. header */
if (ipv6_hdr(skb)->payload_len == 0) {
pr_debug("payload len = 0\n");
- return -EINVAL;
+ return 0;
}
if (find_prev_fhdr(skb, &prevhdr, &nhoff, &fhoff) < 0)
- return -EINVAL;
+ return 0;
if (!pskb_may_pull(skb, fhoff + sizeof(*fhdr)))
return -ENOMEM;
diff --git a/net/ipv6/netfilter/nf_defrag_ipv6_hooks.c b/net/ipv6/netfilter/nf_defrag_ipv6_hooks.c
index f7aab5ab93a5..f06b0471f39f 100644
--- a/net/ipv6/netfilter/nf_defrag_ipv6_hooks.c
+++ b/net/ipv6/netfilter/nf_defrag_ipv6_hooks.c
@@ -69,7 +69,7 @@ static unsigned int ipv6_defrag(void *priv,
if (err == -EINPROGRESS)
return NF_STOLEN;
- return NF_ACCEPT;
+ return err == 0 ? NF_ACCEPT : NF_DROP;
}
static struct nf_hook_ops ipv6_defrag_ops[] = {
diff --git a/net/ipv6/netfilter/nf_reject_ipv6.c b/net/ipv6/netfilter/nf_reject_ipv6.c
index a5400223fd74..10090400c72f 100644
--- a/net/ipv6/netfilter/nf_reject_ipv6.c
+++ b/net/ipv6/netfilter/nf_reject_ipv6.c
@@ -156,6 +156,7 @@ void nf_send_reset6(struct net *net, struct sk_buff *oldskb, int hook)
fl6.daddr = oip6h->saddr;
fl6.fl6_sport = otcph->dest;
fl6.fl6_dport = otcph->source;
+ fl6.flowi6_oif = l3mdev_master_ifindex(skb_dst(oldskb)->dev);
security_skb_classify_flow(oldskb, flowi6_to_flowi(&fl6));
dst = ip6_route_output(net, NULL, &fl6);
if (dst->error) {
diff --git a/net/ipv6/netfilter/nft_dup_ipv6.c b/net/ipv6/netfilter/nft_dup_ipv6.c
index 8bfd470cbe72..831f86e1ec08 100644
--- a/net/ipv6/netfilter/nft_dup_ipv6.c
+++ b/net/ipv6/netfilter/nft_dup_ipv6.c
@@ -26,7 +26,7 @@ static void nft_dup_ipv6_eval(const struct nft_expr *expr,
{
struct nft_dup_ipv6 *priv = nft_expr_priv(expr);
struct in6_addr *gw = (struct in6_addr *)&regs->data[priv->sreg_addr];
- int oif = regs->data[priv->sreg_dev];
+ int oif = priv->sreg_dev ? regs->data[priv->sreg_dev] : -1;
nf_dup_ipv6(pkt->net, pkt->skb, pkt->hook, gw, oif);
}
@@ -57,7 +57,9 @@ static int nft_dup_ipv6_dump(struct sk_buff *skb, const struct nft_expr *expr)
{
struct nft_dup_ipv6 *priv = nft_expr_priv(expr);
- if (nft_dump_register(skb, NFTA_DUP_SREG_ADDR, priv->sreg_addr) ||
+ if (nft_dump_register(skb, NFTA_DUP_SREG_ADDR, priv->sreg_addr))
+ goto nla_put_failure;
+ if (priv->sreg_dev &&
nft_dump_register(skb, NFTA_DUP_SREG_DEV, priv->sreg_dev))
goto nla_put_failure;
diff --git a/net/ipv6/output_core.c b/net/ipv6/output_core.c
index 7cca8ac66fe9..cd4252346a32 100644
--- a/net/ipv6/output_core.c
+++ b/net/ipv6/output_core.c
@@ -155,6 +155,8 @@ int __ip6_local_out(struct net *net, struct sock *sk, struct sk_buff *skb)
if (unlikely(!skb))
return 0;
+ skb->protocol = htons(ETH_P_IPV6);
+
return nf_hook(NFPROTO_IPV6, NF_INET_LOCAL_OUT,
net, sk, skb, NULL, skb_dst(skb)->dev,
dst_output);
diff --git a/net/ipv6/ping.c b/net/ipv6/ping.c
index 0e983b694ee8..66e2d9dfc43a 100644
--- a/net/ipv6/ping.c
+++ b/net/ipv6/ping.c
@@ -180,7 +180,7 @@ struct proto pingv6_prot = {
.init = ping_init_sock,
.close = ping_close,
.connect = ip6_datagram_connect_v6_only,
- .disconnect = udp_disconnect,
+ .disconnect = __udp_disconnect,
.setsockopt = ipv6_setsockopt,
.getsockopt = ipv6_getsockopt,
.sendmsg = ping_v6_sendmsg,
diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
index 54404f08efcc..054a1d84fc5e 100644
--- a/net/ipv6/raw.c
+++ b/net/ipv6/raw.c
@@ -1241,7 +1241,7 @@ struct proto rawv6_prot = {
.close = rawv6_close,
.destroy = raw6_destroy,
.connect = ip6_datagram_connect_v6_only,
- .disconnect = udp_disconnect,
+ .disconnect = __udp_disconnect,
.ioctl = rawv6_ioctl,
.init = rawv6_init_sk,
.setsockopt = rawv6_setsockopt,
diff --git a/net/ipv6/reassembly.c b/net/ipv6/reassembly.c
index 2160d5d009cb..3815e8505ed2 100644
--- a/net/ipv6/reassembly.c
+++ b/net/ipv6/reassembly.c
@@ -456,7 +456,8 @@ static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *prev,
skb_network_header(head)[nhoff] = skb_transport_header(head)[0];
memmove(head->head + sizeof(struct frag_hdr), head->head,
(head->data - head->head) - sizeof(struct frag_hdr));
- head->mac_header += sizeof(struct frag_hdr);
+ if (skb_mac_header_was_set(head))
+ head->mac_header += sizeof(struct frag_hdr);
head->network_header += sizeof(struct frag_hdr);
skb_reset_transport_header(head);
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index bdbc38e8bf29..1b57e11e6e0d 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -102,11 +102,13 @@ static int rt6_score_route(struct rt6_info *rt, int oif, int strict);
#ifdef CONFIG_IPV6_ROUTE_INFO
static struct rt6_info *rt6_add_route_info(struct net *net,
const struct in6_addr *prefix, int prefixlen,
- const struct in6_addr *gwaddr, int ifindex,
+ const struct in6_addr *gwaddr,
+ struct net_device *dev,
unsigned int pref);
static struct rt6_info *rt6_get_route_info(struct net *net,
const struct in6_addr *prefix, int prefixlen,
- const struct in6_addr *gwaddr, int ifindex);
+ const struct in6_addr *gwaddr,
+ struct net_device *dev);
#endif
struct uncached_list {
@@ -656,7 +658,8 @@ static struct rt6_info *find_match(struct rt6_info *rt, int oif, int strict,
struct net_device *dev = rt->dst.dev;
if (dev && !netif_carrier_ok(dev) &&
- idev->cnf.ignore_routes_with_linkdown)
+ idev->cnf.ignore_routes_with_linkdown &&
+ !(strict & RT6_LOOKUP_F_IGNORE_LINKSTATE))
goto out;
if (rt6_check_expired(rt))
@@ -803,7 +806,7 @@ int rt6_route_rcv(struct net_device *dev, u8 *opt, int len,
rt = rt6_get_dflt_router(gwaddr, dev);
else
rt = rt6_get_route_info(net, prefix, rinfo->prefix_len,
- gwaddr, dev->ifindex);
+ gwaddr, dev);
if (rt && !lifetime) {
ip6_del_rt(rt);
@@ -811,8 +814,8 @@ int rt6_route_rcv(struct net_device *dev, u8 *opt, int len,
}
if (!rt && lifetime)
- rt = rt6_add_route_info(net, prefix, rinfo->prefix_len, gwaddr, dev->ifindex,
- pref);
+ rt = rt6_add_route_info(net, prefix, rinfo->prefix_len, gwaddr,
+ dev, pref);
else if (rt)
rt->rt6i_flags = RTF_ROUTEINFO |
(rt->rt6i_flags & ~RTF_PREF_MASK) | RTF_PREF(pref);
@@ -1050,6 +1053,7 @@ struct rt6_info *ip6_pol_route(struct net *net, struct fib6_table *table,
int strict = 0;
strict |= flags & RT6_LOOKUP_F_IFACE;
+ strict |= flags & RT6_LOOKUP_F_IGNORE_LINKSTATE;
if (net->ipv6.devconf_all->forwarding == 0)
strict |= RT6_LOOKUP_F_REACHABLE;
@@ -1360,6 +1364,9 @@ static void __ip6_rt_update_pmtu(struct dst_entry *dst, const struct sock *sk,
if (rt6->rt6i_flags & RTF_LOCAL)
return;
+ if (dst_metric_locked(dst, RTAX_MTU))
+ return;
+
dst_confirm(dst);
mtu = max_t(u32, mtu, IPV6_MIN_MTU);
if (mtu >= dst_mtu(dst))
@@ -1789,7 +1796,7 @@ static struct rt6_info *ip6_nh_lookup_table(struct net *net,
};
struct fib6_table *table;
struct rt6_info *rt;
- int flags = RT6_LOOKUP_F_IFACE;
+ int flags = RT6_LOOKUP_F_IFACE | RT6_LOOKUP_F_IGNORE_LINKSTATE;
table = fib6_get_table(net, cfg->fc_table);
if (!table)
@@ -2325,13 +2332,16 @@ static void ip6_rt_copy_init(struct rt6_info *rt, struct rt6_info *ort)
#ifdef CONFIG_IPV6_ROUTE_INFO
static struct rt6_info *rt6_get_route_info(struct net *net,
const struct in6_addr *prefix, int prefixlen,
- const struct in6_addr *gwaddr, int ifindex)
+ const struct in6_addr *gwaddr,
+ struct net_device *dev)
{
+ u32 tb_id = l3mdev_fib_table(dev) ? : RT6_TABLE_INFO;
+ int ifindex = dev->ifindex;
struct fib6_node *fn;
struct rt6_info *rt = NULL;
struct fib6_table *table;
- table = fib6_get_table(net, RT6_TABLE_INFO);
+ table = fib6_get_table(net, tb_id);
if (!table)
return NULL;
@@ -2357,12 +2367,13 @@ out:
static struct rt6_info *rt6_add_route_info(struct net *net,
const struct in6_addr *prefix, int prefixlen,
- const struct in6_addr *gwaddr, int ifindex,
+ const struct in6_addr *gwaddr,
+ struct net_device *dev,
unsigned int pref)
{
struct fib6_config cfg = {
.fc_metric = IP6_RT_PRIO_USER,
- .fc_ifindex = ifindex,
+ .fc_ifindex = dev->ifindex,
.fc_dst_len = prefixlen,
.fc_flags = RTF_GATEWAY | RTF_ADDRCONF | RTF_ROUTEINFO |
RTF_UP | RTF_PREF(pref),
@@ -2371,7 +2382,7 @@ static struct rt6_info *rt6_add_route_info(struct net *net,
.fc_nlinfo.nl_net = net,
};
- cfg.fc_table = l3mdev_fib_table_by_index(net, ifindex) ? : RT6_TABLE_INFO;
+ cfg.fc_table = l3mdev_fib_table(dev) ? : RT6_TABLE_INFO,
cfg.fc_dst = *prefix;
cfg.fc_gateway = *gwaddr;
@@ -2381,16 +2392,17 @@ static struct rt6_info *rt6_add_route_info(struct net *net,
ip6_route_add(&cfg);
- return rt6_get_route_info(net, prefix, prefixlen, gwaddr, ifindex);
+ return rt6_get_route_info(net, prefix, prefixlen, gwaddr, dev);
}
#endif
struct rt6_info *rt6_get_dflt_router(const struct in6_addr *addr, struct net_device *dev)
{
+ u32 tb_id = l3mdev_fib_table(dev) ? : RT6_TABLE_DFLT;
struct rt6_info *rt;
struct fib6_table *table;
- table = fib6_get_table(dev_net(dev), RT6_TABLE_DFLT);
+ table = fib6_get_table(dev_net(dev), tb_id);
if (!table)
return NULL;
@@ -2424,20 +2436,20 @@ struct rt6_info *rt6_add_dflt_router(const struct in6_addr *gwaddr,
cfg.fc_gateway = *gwaddr;
- ip6_route_add(&cfg);
+ if (!ip6_route_add(&cfg)) {
+ struct fib6_table *table;
+
+ table = fib6_get_table(dev_net(dev), cfg.fc_table);
+ if (table)
+ table->flags |= RT6_TABLE_HAS_DFLT_ROUTER;
+ }
return rt6_get_dflt_router(gwaddr, dev);
}
-void rt6_purge_dflt_routers(struct net *net)
+static void __rt6_purge_dflt_routers(struct fib6_table *table)
{
struct rt6_info *rt;
- struct fib6_table *table;
-
- /* NOTE: Keep consistent with rt6_get_dflt_router */
- table = fib6_get_table(net, RT6_TABLE_DFLT);
- if (!table)
- return;
restart:
read_lock_bh(&table->tb6_lock);
@@ -2451,6 +2463,27 @@ restart:
}
}
read_unlock_bh(&table->tb6_lock);
+
+ table->flags &= ~RT6_TABLE_HAS_DFLT_ROUTER;
+}
+
+void rt6_purge_dflt_routers(struct net *net)
+{
+ struct fib6_table *table;
+ struct hlist_head *head;
+ unsigned int h;
+
+ rcu_read_lock();
+
+ for (h = 0; h < FIB6_TABLE_HASHSZ; h++) {
+ head = &net->ipv6.fib_table_hash[h];
+ hlist_for_each_entry_rcu(table, head, tb6_hlist) {
+ if (table->flags & RT6_TABLE_HAS_DFLT_ROUTER)
+ __rt6_purge_dflt_routers(table);
+ }
+ }
+
+ rcu_read_unlock();
}
static void rtmsg_to_fib6_config(struct net *net,
@@ -2728,6 +2761,7 @@ static int rt6_mtu_change_route(struct rt6_info *rt, void *p_arg)
PMTU discouvery.
*/
if (rt->dst.dev == arg->dev &&
+ dst_metric_raw(&rt->dst, RTAX_MTU) &&
!dst_metric_locked(&rt->dst, RTAX_MTU)) {
if (rt->rt6i_flags & RTF_CACHE) {
/* For RTF_CACHE with rt6i_pmtu == 0
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index 5a27ab4eab39..b9f1fee9a886 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -818,8 +818,12 @@ static void tcp_v6_send_response(const struct sock *sk, struct sk_buff *skb, u32
fl6.flowi6_proto = IPPROTO_TCP;
if (rt6_need_strict(&fl6.daddr) && !oif)
fl6.flowi6_oif = tcp_v6_iif(skb);
- else
- fl6.flowi6_oif = oif ? : skb->skb_iif;
+ else {
+ if (!oif && netif_index_is_l3_master(net, skb->skb_iif))
+ oif = skb->skb_iif;
+
+ fl6.flowi6_oif = oif;
+ }
fl6.flowi6_mark = IP6_REPLY_MARK(net, skb->mark);
fl6.fl6_dport = t1->dest;
@@ -1225,7 +1229,7 @@ static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
if (skb->protocol == htons(ETH_P_IP))
return tcp_v4_do_rcv(sk, skb);
- if (sk_filter(sk, skb))
+ if (tcp_filter(sk, skb))
goto discard;
/*
@@ -1453,8 +1457,10 @@ process:
if (tcp_v6_inbound_md5_hash(sk, skb))
goto discard_and_relse;
- if (sk_filter(sk, skb))
+ if (tcp_filter(sk, skb))
goto discard_and_relse;
+ th = (const struct tcphdr *)skb->data;
+ hdr = ipv6_hdr(skb);
skb->dev = NULL;
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
index 9aa7c1c7a9ce..e4a8000d59ad 100644
--- a/net/ipv6/udp.c
+++ b/net/ipv6/udp.c
@@ -427,7 +427,8 @@ try_again:
if (is_udp4) {
if (inet->cmsg_flags)
- ip_cmsg_recv(msg, skb);
+ ip_cmsg_recv_offset(msg, skb,
+ sizeof(struct udphdr), off);
} else {
if (np->rxopt.all)
ip6_datagram_recv_specific_ctl(sk, msg, skb);
@@ -513,7 +514,7 @@ out:
return;
}
-static int __udpv6_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
+int __udpv6_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
{
int rc;
@@ -705,10 +706,10 @@ static int __udp6_lib_mcast_deliver(struct net *net, struct sk_buff *skb,
if (use_hash2) {
hash2_any = udp6_portaddr_hash(net, &in6addr_any, hnum) &
- udp_table.mask;
- hash2 = udp6_portaddr_hash(net, daddr, hnum) & udp_table.mask;
+ udptable->mask;
+ hash2 = udp6_portaddr_hash(net, daddr, hnum) & udptable->mask;
start_lookup:
- hslot = &udp_table.hash2[hash2];
+ hslot = &udptable->hash2[hash2];
offset = offsetof(typeof(*sk), __sk_common.skc_portaddr_node);
}
diff --git a/net/ipv6/udp_impl.h b/net/ipv6/udp_impl.h
index f6eb1ab34f4b..e78bdc76dcc3 100644
--- a/net/ipv6/udp_impl.h
+++ b/net/ipv6/udp_impl.h
@@ -26,7 +26,7 @@ int compat_udpv6_getsockopt(struct sock *sk, int level, int optname,
int udpv6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len);
int udpv6_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int noblock,
int flags, int *addr_len);
-int udpv6_queue_rcv_skb(struct sock *sk, struct sk_buff *skb);
+int __udpv6_queue_rcv_skb(struct sock *sk, struct sk_buff *skb);
void udpv6_destroy_sock(struct sock *sk);
#ifdef CONFIG_PROC_FS
diff --git a/net/ipv6/udplite.c b/net/ipv6/udplite.c
index 47d0d2b87106..2f5101a12283 100644
--- a/net/ipv6/udplite.c
+++ b/net/ipv6/udplite.c
@@ -45,7 +45,7 @@ struct proto udplitev6_prot = {
.getsockopt = udpv6_getsockopt,
.sendmsg = udpv6_sendmsg,
.recvmsg = udpv6_recvmsg,
- .backlog_rcv = udpv6_queue_rcv_skb,
+ .backlog_rcv = __udpv6_queue_rcv_skb,
.hash = udp_lib_hash,
.unhash = udp_lib_unhash,
.get_port = udp_v6_get_port,
diff --git a/net/l2tp/l2tp_ip.c b/net/l2tp/l2tp_ip.c
index 42de4ccd159f..8938b6ba57a0 100644
--- a/net/l2tp/l2tp_ip.c
+++ b/net/l2tp/l2tp_ip.c
@@ -61,7 +61,8 @@ static struct sock *__l2tp_ip_bind_lookup(struct net *net, __be32 laddr, int dif
if ((l2tp->conn_id == tunnel_id) &&
net_eq(sock_net(sk), net) &&
!(inet->inet_rcv_saddr && inet->inet_rcv_saddr != laddr) &&
- !(sk->sk_bound_dev_if && sk->sk_bound_dev_if != dif))
+ (!sk->sk_bound_dev_if || !dif ||
+ sk->sk_bound_dev_if == dif))
goto found;
}
@@ -182,15 +183,17 @@ pass_up:
struct iphdr *iph = (struct iphdr *) skb_network_header(skb);
read_lock_bh(&l2tp_ip_lock);
- sk = __l2tp_ip_bind_lookup(net, iph->daddr, 0, tunnel_id);
+ sk = __l2tp_ip_bind_lookup(net, iph->daddr, inet_iif(skb),
+ tunnel_id);
+ if (!sk) {
+ read_unlock_bh(&l2tp_ip_lock);
+ goto discard;
+ }
+
+ sock_hold(sk);
read_unlock_bh(&l2tp_ip_lock);
}
- if (sk == NULL)
- goto discard;
-
- sock_hold(sk);
-
if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb))
goto discard_put;
@@ -251,22 +254,17 @@ static int l2tp_ip_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len)
int ret;
int chk_addr_ret;
- if (!sock_flag(sk, SOCK_ZAPPED))
- return -EINVAL;
if (addr_len < sizeof(struct sockaddr_l2tpip))
return -EINVAL;
if (addr->l2tp_family != AF_INET)
return -EINVAL;
- ret = -EADDRINUSE;
- read_lock_bh(&l2tp_ip_lock);
- if (__l2tp_ip_bind_lookup(net, addr->l2tp_addr.s_addr,
- sk->sk_bound_dev_if, addr->l2tp_conn_id))
- goto out_in_use;
+ lock_sock(sk);
- read_unlock_bh(&l2tp_ip_lock);
+ ret = -EINVAL;
+ if (!sock_flag(sk, SOCK_ZAPPED))
+ goto out;
- lock_sock(sk);
if (sk->sk_state != TCP_CLOSE || addr_len < sizeof(struct sockaddr_l2tpip))
goto out;
@@ -280,14 +278,22 @@ static int l2tp_ip_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len)
inet->inet_rcv_saddr = inet->inet_saddr = addr->l2tp_addr.s_addr;
if (chk_addr_ret == RTN_MULTICAST || chk_addr_ret == RTN_BROADCAST)
inet->inet_saddr = 0; /* Use device */
- sk_dst_reset(sk);
+ write_lock_bh(&l2tp_ip_lock);
+ if (__l2tp_ip_bind_lookup(net, addr->l2tp_addr.s_addr,
+ sk->sk_bound_dev_if, addr->l2tp_conn_id)) {
+ write_unlock_bh(&l2tp_ip_lock);
+ ret = -EADDRINUSE;
+ goto out;
+ }
+
+ sk_dst_reset(sk);
l2tp_ip_sk(sk)->conn_id = addr->l2tp_conn_id;
- write_lock_bh(&l2tp_ip_lock);
sk_add_bind_node(sk, &l2tp_ip_bind_table);
sk_del_node_init(sk);
write_unlock_bh(&l2tp_ip_lock);
+
ret = 0;
sock_reset_flag(sk, SOCK_ZAPPED);
@@ -295,11 +301,6 @@ out:
release_sock(sk);
return ret;
-
-out_in_use:
- read_unlock_bh(&l2tp_ip_lock);
-
- return ret;
}
static int l2tp_ip_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
@@ -307,21 +308,24 @@ static int l2tp_ip_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len
struct sockaddr_l2tpip *lsa = (struct sockaddr_l2tpip *) uaddr;
int rc;
- if (sock_flag(sk, SOCK_ZAPPED)) /* Must bind first - autobinding does not work */
- return -EINVAL;
-
if (addr_len < sizeof(*lsa))
return -EINVAL;
if (ipv4_is_multicast(lsa->l2tp_addr.s_addr))
return -EINVAL;
- rc = ip4_datagram_connect(sk, uaddr, addr_len);
- if (rc < 0)
- return rc;
-
lock_sock(sk);
+ /* Must bind first - autobinding does not work */
+ if (sock_flag(sk, SOCK_ZAPPED)) {
+ rc = -EINVAL;
+ goto out_sk;
+ }
+
+ rc = __ip4_datagram_connect(sk, uaddr, addr_len);
+ if (rc < 0)
+ goto out_sk;
+
l2tp_ip_sk(sk)->peer_conn_id = lsa->l2tp_conn_id;
write_lock_bh(&l2tp_ip_lock);
@@ -329,7 +333,9 @@ static int l2tp_ip_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len
sk_add_bind_node(sk, &l2tp_ip_bind_table);
write_unlock_bh(&l2tp_ip_lock);
+out_sk:
release_sock(sk);
+
return rc;
}
@@ -338,7 +344,7 @@ static int l2tp_ip_disconnect(struct sock *sk, int flags)
if (sock_flag(sk, SOCK_ZAPPED))
return 0;
- return udp_disconnect(sk, flags);
+ return __udp_disconnect(sk, flags);
}
static int l2tp_ip_getname(struct socket *sock, struct sockaddr *uaddr,
diff --git a/net/l2tp/l2tp_ip6.c b/net/l2tp/l2tp_ip6.c
index ea2ae6664cc8..aa821cb639e5 100644
--- a/net/l2tp/l2tp_ip6.c
+++ b/net/l2tp/l2tp_ip6.c
@@ -72,8 +72,9 @@ static struct sock *__l2tp_ip6_bind_lookup(struct net *net,
if ((l2tp->conn_id == tunnel_id) &&
net_eq(sock_net(sk), net) &&
- !(addr && ipv6_addr_equal(addr, laddr)) &&
- !(sk->sk_bound_dev_if && sk->sk_bound_dev_if != dif))
+ (!addr || ipv6_addr_equal(addr, laddr)) &&
+ (!sk->sk_bound_dev_if || !dif ||
+ sk->sk_bound_dev_if == dif))
goto found;
}
@@ -196,16 +197,17 @@ pass_up:
struct ipv6hdr *iph = ipv6_hdr(skb);
read_lock_bh(&l2tp_ip6_lock);
- sk = __l2tp_ip6_bind_lookup(net, &iph->daddr,
- 0, tunnel_id);
+ sk = __l2tp_ip6_bind_lookup(net, &iph->daddr, inet6_iif(skb),
+ tunnel_id);
+ if (!sk) {
+ read_unlock_bh(&l2tp_ip6_lock);
+ goto discard;
+ }
+
+ sock_hold(sk);
read_unlock_bh(&l2tp_ip6_lock);
}
- if (sk == NULL)
- goto discard;
-
- sock_hold(sk);
-
if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb))
goto discard_put;
@@ -266,11 +268,10 @@ static int l2tp_ip6_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len)
struct sockaddr_l2tpip6 *addr = (struct sockaddr_l2tpip6 *) uaddr;
struct net *net = sock_net(sk);
__be32 v4addr = 0;
+ int bound_dev_if;
int addr_type;
int err;
- if (!sock_flag(sk, SOCK_ZAPPED))
- return -EINVAL;
if (addr->l2tp_family != AF_INET6)
return -EINVAL;
if (addr_len < sizeof(*addr))
@@ -286,41 +287,34 @@ static int l2tp_ip6_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len)
if (addr_type & IPV6_ADDR_MULTICAST)
return -EADDRNOTAVAIL;
- err = -EADDRINUSE;
- read_lock_bh(&l2tp_ip6_lock);
- if (__l2tp_ip6_bind_lookup(net, &addr->l2tp_addr,
- sk->sk_bound_dev_if, addr->l2tp_conn_id))
- goto out_in_use;
- read_unlock_bh(&l2tp_ip6_lock);
-
lock_sock(sk);
err = -EINVAL;
+ if (!sock_flag(sk, SOCK_ZAPPED))
+ goto out_unlock;
+
if (sk->sk_state != TCP_CLOSE)
goto out_unlock;
+ bound_dev_if = sk->sk_bound_dev_if;
+
/* Check if the address belongs to the host. */
rcu_read_lock();
if (addr_type != IPV6_ADDR_ANY) {
struct net_device *dev = NULL;
if (addr_type & IPV6_ADDR_LINKLOCAL) {
- if (addr_len >= sizeof(struct sockaddr_in6) &&
- addr->l2tp_scope_id) {
- /* Override any existing binding, if another
- * one is supplied by user.
- */
- sk->sk_bound_dev_if = addr->l2tp_scope_id;
- }
+ if (addr->l2tp_scope_id)
+ bound_dev_if = addr->l2tp_scope_id;
/* Binding to link-local address requires an
- interface */
- if (!sk->sk_bound_dev_if)
+ * interface.
+ */
+ if (!bound_dev_if)
goto out_unlock_rcu;
err = -ENODEV;
- dev = dev_get_by_index_rcu(sock_net(sk),
- sk->sk_bound_dev_if);
+ dev = dev_get_by_index_rcu(sock_net(sk), bound_dev_if);
if (!dev)
goto out_unlock_rcu;
}
@@ -335,13 +329,22 @@ static int l2tp_ip6_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len)
}
rcu_read_unlock();
- inet->inet_rcv_saddr = inet->inet_saddr = v4addr;
+ write_lock_bh(&l2tp_ip6_lock);
+ if (__l2tp_ip6_bind_lookup(net, &addr->l2tp_addr, bound_dev_if,
+ addr->l2tp_conn_id)) {
+ write_unlock_bh(&l2tp_ip6_lock);
+ err = -EADDRINUSE;
+ goto out_unlock;
+ }
+
+ inet->inet_saddr = v4addr;
+ inet->inet_rcv_saddr = v4addr;
+ sk->sk_bound_dev_if = bound_dev_if;
sk->sk_v6_rcv_saddr = addr->l2tp_addr;
np->saddr = addr->l2tp_addr;
l2tp_ip6_sk(sk)->conn_id = addr->l2tp_conn_id;
- write_lock_bh(&l2tp_ip6_lock);
sk_add_bind_node(sk, &l2tp_ip6_bind_table);
sk_del_node_init(sk);
write_unlock_bh(&l2tp_ip6_lock);
@@ -354,10 +357,7 @@ out_unlock_rcu:
rcu_read_unlock();
out_unlock:
release_sock(sk);
- return err;
-out_in_use:
- read_unlock_bh(&l2tp_ip6_lock);
return err;
}
@@ -370,9 +370,6 @@ static int l2tp_ip6_connect(struct sock *sk, struct sockaddr *uaddr,
int addr_type;
int rc;
- if (sock_flag(sk, SOCK_ZAPPED)) /* Must bind first - autobinding does not work */
- return -EINVAL;
-
if (addr_len < sizeof(*lsa))
return -EINVAL;
@@ -389,10 +386,18 @@ static int l2tp_ip6_connect(struct sock *sk, struct sockaddr *uaddr,
return -EINVAL;
}
- rc = ip6_datagram_connect(sk, uaddr, addr_len);
-
lock_sock(sk);
+ /* Must bind first - autobinding does not work */
+ if (sock_flag(sk, SOCK_ZAPPED)) {
+ rc = -EINVAL;
+ goto out_sk;
+ }
+
+ rc = __ip6_datagram_connect(sk, uaddr, addr_len);
+ if (rc < 0)
+ goto out_sk;
+
l2tp_ip6_sk(sk)->peer_conn_id = lsa->l2tp_conn_id;
write_lock_bh(&l2tp_ip6_lock);
@@ -400,6 +405,7 @@ static int l2tp_ip6_connect(struct sock *sk, struct sockaddr *uaddr,
sk_add_bind_node(sk, &l2tp_ip6_bind_table);
write_unlock_bh(&l2tp_ip6_lock);
+out_sk:
release_sock(sk);
return rc;
@@ -410,7 +416,7 @@ static int l2tp_ip6_disconnect(struct sock *sk, int flags)
if (sock_flag(sk, SOCK_ZAPPED))
return 0;
- return udp_disconnect(sk, flags);
+ return __udp_disconnect(sk, flags);
}
static int l2tp_ip6_getname(struct socket *sock, struct sockaddr *uaddr,
diff --git a/net/mac80211/aes_ccm.c b/net/mac80211/aes_ccm.c
index 7663c28ba353..a4e0d59a40dd 100644
--- a/net/mac80211/aes_ccm.c
+++ b/net/mac80211/aes_ccm.c
@@ -18,21 +18,24 @@
#include "key.h"
#include "aes_ccm.h"
-void ieee80211_aes_ccm_encrypt(struct crypto_aead *tfm, u8 *b_0, u8 *aad,
- u8 *data, size_t data_len, u8 *mic,
- size_t mic_len)
+int ieee80211_aes_ccm_encrypt(struct crypto_aead *tfm, u8 *b_0, u8 *aad,
+ u8 *data, size_t data_len, u8 *mic,
+ size_t mic_len)
{
struct scatterlist sg[3];
+ struct aead_request *aead_req;
+ int reqsize = sizeof(*aead_req) + crypto_aead_reqsize(tfm);
+ u8 *__aad;
- char aead_req_data[sizeof(struct aead_request) +
- crypto_aead_reqsize(tfm)]
- __aligned(__alignof__(struct aead_request));
- struct aead_request *aead_req = (void *) aead_req_data;
+ aead_req = kzalloc(reqsize + CCM_AAD_LEN, GFP_ATOMIC);
+ if (!aead_req)
+ return -ENOMEM;
- memset(aead_req, 0, sizeof(aead_req_data));
+ __aad = (u8 *)aead_req + reqsize;
+ memcpy(__aad, aad, CCM_AAD_LEN);
sg_init_table(sg, 3);
- sg_set_buf(&sg[0], &aad[2], be16_to_cpup((__be16 *)aad));
+ sg_set_buf(&sg[0], &__aad[2], be16_to_cpup((__be16 *)__aad));
sg_set_buf(&sg[1], data, data_len);
sg_set_buf(&sg[2], mic, mic_len);
@@ -41,6 +44,9 @@ void ieee80211_aes_ccm_encrypt(struct crypto_aead *tfm, u8 *b_0, u8 *aad,
aead_request_set_ad(aead_req, sg[0].length);
crypto_aead_encrypt(aead_req);
+ kzfree(aead_req);
+
+ return 0;
}
int ieee80211_aes_ccm_decrypt(struct crypto_aead *tfm, u8 *b_0, u8 *aad,
@@ -48,18 +54,23 @@ int ieee80211_aes_ccm_decrypt(struct crypto_aead *tfm, u8 *b_0, u8 *aad,
size_t mic_len)
{
struct scatterlist sg[3];
- char aead_req_data[sizeof(struct aead_request) +
- crypto_aead_reqsize(tfm)]
- __aligned(__alignof__(struct aead_request));
- struct aead_request *aead_req = (void *) aead_req_data;
+ struct aead_request *aead_req;
+ int reqsize = sizeof(*aead_req) + crypto_aead_reqsize(tfm);
+ u8 *__aad;
+ int err;
if (data_len == 0)
return -EINVAL;
- memset(aead_req, 0, sizeof(aead_req_data));
+ aead_req = kzalloc(reqsize + CCM_AAD_LEN, GFP_ATOMIC);
+ if (!aead_req)
+ return -ENOMEM;
+
+ __aad = (u8 *)aead_req + reqsize;
+ memcpy(__aad, aad, CCM_AAD_LEN);
sg_init_table(sg, 3);
- sg_set_buf(&sg[0], &aad[2], be16_to_cpup((__be16 *)aad));
+ sg_set_buf(&sg[0], &__aad[2], be16_to_cpup((__be16 *)__aad));
sg_set_buf(&sg[1], data, data_len);
sg_set_buf(&sg[2], mic, mic_len);
@@ -67,7 +78,10 @@ int ieee80211_aes_ccm_decrypt(struct crypto_aead *tfm, u8 *b_0, u8 *aad,
aead_request_set_crypt(aead_req, sg, sg, data_len + mic_len, b_0);
aead_request_set_ad(aead_req, sg[0].length);
- return crypto_aead_decrypt(aead_req);
+ err = crypto_aead_decrypt(aead_req);
+ kzfree(aead_req);
+
+ return err;
}
struct crypto_aead *ieee80211_aes_key_setup_encrypt(const u8 key[],
diff --git a/net/mac80211/aes_ccm.h b/net/mac80211/aes_ccm.h
index 6a73d1e4d186..fcd3254c5cf0 100644
--- a/net/mac80211/aes_ccm.h
+++ b/net/mac80211/aes_ccm.h
@@ -12,12 +12,14 @@
#include <linux/crypto.h>
+#define CCM_AAD_LEN 32
+
struct crypto_aead *ieee80211_aes_key_setup_encrypt(const u8 key[],
size_t key_len,
size_t mic_len);
-void ieee80211_aes_ccm_encrypt(struct crypto_aead *tfm, u8 *b_0, u8 *aad,
- u8 *data, size_t data_len, u8 *mic,
- size_t mic_len);
+int ieee80211_aes_ccm_encrypt(struct crypto_aead *tfm, u8 *b_0, u8 *aad,
+ u8 *data, size_t data_len, u8 *mic,
+ size_t mic_len);
int ieee80211_aes_ccm_decrypt(struct crypto_aead *tfm, u8 *b_0, u8 *aad,
u8 *data, size_t data_len, u8 *mic,
size_t mic_len);
diff --git a/net/mac80211/aes_gcm.c b/net/mac80211/aes_gcm.c
index 3afe361fd27c..8a4397cc1b08 100644
--- a/net/mac80211/aes_gcm.c
+++ b/net/mac80211/aes_gcm.c
@@ -15,20 +15,23 @@
#include "key.h"
#include "aes_gcm.h"
-void ieee80211_aes_gcm_encrypt(struct crypto_aead *tfm, u8 *j_0, u8 *aad,
- u8 *data, size_t data_len, u8 *mic)
+int ieee80211_aes_gcm_encrypt(struct crypto_aead *tfm, u8 *j_0, u8 *aad,
+ u8 *data, size_t data_len, u8 *mic)
{
struct scatterlist sg[3];
+ struct aead_request *aead_req;
+ int reqsize = sizeof(*aead_req) + crypto_aead_reqsize(tfm);
+ u8 *__aad;
- char aead_req_data[sizeof(struct aead_request) +
- crypto_aead_reqsize(tfm)]
- __aligned(__alignof__(struct aead_request));
- struct aead_request *aead_req = (void *)aead_req_data;
+ aead_req = kzalloc(reqsize + GCM_AAD_LEN, GFP_ATOMIC);
+ if (!aead_req)
+ return -ENOMEM;
- memset(aead_req, 0, sizeof(aead_req_data));
+ __aad = (u8 *)aead_req + reqsize;
+ memcpy(__aad, aad, GCM_AAD_LEN);
sg_init_table(sg, 3);
- sg_set_buf(&sg[0], &aad[2], be16_to_cpup((__be16 *)aad));
+ sg_set_buf(&sg[0], &__aad[2], be16_to_cpup((__be16 *)__aad));
sg_set_buf(&sg[1], data, data_len);
sg_set_buf(&sg[2], mic, IEEE80211_GCMP_MIC_LEN);
@@ -37,24 +40,31 @@ void ieee80211_aes_gcm_encrypt(struct crypto_aead *tfm, u8 *j_0, u8 *aad,
aead_request_set_ad(aead_req, sg[0].length);
crypto_aead_encrypt(aead_req);
+ kzfree(aead_req);
+ return 0;
}
int ieee80211_aes_gcm_decrypt(struct crypto_aead *tfm, u8 *j_0, u8 *aad,
u8 *data, size_t data_len, u8 *mic)
{
struct scatterlist sg[3];
- char aead_req_data[sizeof(struct aead_request) +
- crypto_aead_reqsize(tfm)]
- __aligned(__alignof__(struct aead_request));
- struct aead_request *aead_req = (void *)aead_req_data;
+ struct aead_request *aead_req;
+ int reqsize = sizeof(*aead_req) + crypto_aead_reqsize(tfm);
+ u8 *__aad;
+ int err;
if (data_len == 0)
return -EINVAL;
- memset(aead_req, 0, sizeof(aead_req_data));
+ aead_req = kzalloc(reqsize + GCM_AAD_LEN, GFP_ATOMIC);
+ if (!aead_req)
+ return -ENOMEM;
+
+ __aad = (u8 *)aead_req + reqsize;
+ memcpy(__aad, aad, GCM_AAD_LEN);
sg_init_table(sg, 3);
- sg_set_buf(&sg[0], &aad[2], be16_to_cpup((__be16 *)aad));
+ sg_set_buf(&sg[0], &__aad[2], be16_to_cpup((__be16 *)__aad));
sg_set_buf(&sg[1], data, data_len);
sg_set_buf(&sg[2], mic, IEEE80211_GCMP_MIC_LEN);
@@ -63,7 +73,10 @@ int ieee80211_aes_gcm_decrypt(struct crypto_aead *tfm, u8 *j_0, u8 *aad,
data_len + IEEE80211_GCMP_MIC_LEN, j_0);
aead_request_set_ad(aead_req, sg[0].length);
- return crypto_aead_decrypt(aead_req);
+ err = crypto_aead_decrypt(aead_req);
+ kzfree(aead_req);
+
+ return err;
}
struct crypto_aead *ieee80211_aes_gcm_key_setup_encrypt(const u8 key[],
diff --git a/net/mac80211/aes_gcm.h b/net/mac80211/aes_gcm.h
index 1347fda6b76a..55aed5352494 100644
--- a/net/mac80211/aes_gcm.h
+++ b/net/mac80211/aes_gcm.h
@@ -11,8 +11,10 @@
#include <linux/crypto.h>
-void ieee80211_aes_gcm_encrypt(struct crypto_aead *tfm, u8 *j_0, u8 *aad,
- u8 *data, size_t data_len, u8 *mic);
+#define GCM_AAD_LEN 32
+
+int ieee80211_aes_gcm_encrypt(struct crypto_aead *tfm, u8 *j_0, u8 *aad,
+ u8 *data, size_t data_len, u8 *mic);
int ieee80211_aes_gcm_decrypt(struct crypto_aead *tfm, u8 *j_0, u8 *aad,
u8 *data, size_t data_len, u8 *mic);
struct crypto_aead *ieee80211_aes_gcm_key_setup_encrypt(const u8 key[],
diff --git a/net/mac80211/aes_gmac.c b/net/mac80211/aes_gmac.c
index 3ddd927aaf30..bd72a862ddb7 100644
--- a/net/mac80211/aes_gmac.c
+++ b/net/mac80211/aes_gmac.c
@@ -17,28 +17,27 @@
#include "key.h"
#include "aes_gmac.h"
-#define GMAC_MIC_LEN 16
-#define GMAC_NONCE_LEN 12
-#define AAD_LEN 20
-
int ieee80211_aes_gmac(struct crypto_aead *tfm, const u8 *aad, u8 *nonce,
const u8 *data, size_t data_len, u8 *mic)
{
struct scatterlist sg[4];
- char aead_req_data[sizeof(struct aead_request) +
- crypto_aead_reqsize(tfm)]
- __aligned(__alignof__(struct aead_request));
- struct aead_request *aead_req = (void *)aead_req_data;
- u8 zero[GMAC_MIC_LEN], iv[AES_BLOCK_SIZE];
+ u8 *zero, *__aad, iv[AES_BLOCK_SIZE];
+ struct aead_request *aead_req;
+ int reqsize = sizeof(*aead_req) + crypto_aead_reqsize(tfm);
if (data_len < GMAC_MIC_LEN)
return -EINVAL;
- memset(aead_req, 0, sizeof(aead_req_data));
+ aead_req = kzalloc(reqsize + GMAC_MIC_LEN + GMAC_AAD_LEN, GFP_ATOMIC);
+ if (!aead_req)
+ return -ENOMEM;
+
+ zero = (u8 *)aead_req + reqsize;
+ __aad = zero + GMAC_MIC_LEN;
+ memcpy(__aad, aad, GMAC_AAD_LEN);
- memset(zero, 0, GMAC_MIC_LEN);
sg_init_table(sg, 4);
- sg_set_buf(&sg[0], aad, AAD_LEN);
+ sg_set_buf(&sg[0], __aad, GMAC_AAD_LEN);
sg_set_buf(&sg[1], data, data_len - GMAC_MIC_LEN);
sg_set_buf(&sg[2], zero, GMAC_MIC_LEN);
sg_set_buf(&sg[3], mic, GMAC_MIC_LEN);
@@ -49,9 +48,10 @@ int ieee80211_aes_gmac(struct crypto_aead *tfm, const u8 *aad, u8 *nonce,
aead_request_set_tfm(aead_req, tfm);
aead_request_set_crypt(aead_req, sg, sg, 0, iv);
- aead_request_set_ad(aead_req, AAD_LEN + data_len);
+ aead_request_set_ad(aead_req, GMAC_AAD_LEN + data_len);
crypto_aead_encrypt(aead_req);
+ kzfree(aead_req);
return 0;
}
diff --git a/net/mac80211/aes_gmac.h b/net/mac80211/aes_gmac.h
index d328204d73a8..32e6442c95be 100644
--- a/net/mac80211/aes_gmac.h
+++ b/net/mac80211/aes_gmac.h
@@ -11,6 +11,10 @@
#include <linux/crypto.h>
+#define GMAC_AAD_LEN 20
+#define GMAC_MIC_LEN 16
+#define GMAC_NONCE_LEN 12
+
struct crypto_aead *ieee80211_aes_gmac_key_setup(const u8 key[],
size_t key_len);
int ieee80211_aes_gmac(struct crypto_aead *tfm, const u8 *aad, u8 *nonce,
diff --git a/net/mac80211/offchannel.c b/net/mac80211/offchannel.c
index c3f610bba3fe..eede5c6db8d5 100644
--- a/net/mac80211/offchannel.c
+++ b/net/mac80211/offchannel.c
@@ -820,7 +820,7 @@ int ieee80211_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev,
mgmt->u.action.category == WLAN_CATEGORY_SPECTRUM_MGMT)
break;
rcu_read_lock();
- sta = sta_info_get(sdata, mgmt->da);
+ sta = sta_info_get_bss(sdata, mgmt->da);
rcu_read_unlock();
if (!sta)
return -ENOLINK;
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
index 6175db385ba7..a47bbc973f2d 100644
--- a/net/mac80211/rx.c
+++ b/net/mac80211/rx.c
@@ -2298,6 +2298,8 @@ ieee80211_rx_h_amsdu(struct ieee80211_rx_data *rx)
__le16 fc = hdr->frame_control;
struct sk_buff_head frame_list;
struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb);
+ struct ethhdr ethhdr;
+ const u8 *check_da = ethhdr.h_dest, *check_sa = ethhdr.h_source;
if (unlikely(!ieee80211_is_data(fc)))
return RX_CONTINUE;
@@ -2308,24 +2310,53 @@ ieee80211_rx_h_amsdu(struct ieee80211_rx_data *rx)
if (!(status->rx_flags & IEEE80211_RX_AMSDU))
return RX_CONTINUE;
- if (ieee80211_has_a4(hdr->frame_control) &&
- rx->sdata->vif.type == NL80211_IFTYPE_AP_VLAN &&
- !rx->sdata->u.vlan.sta)
- return RX_DROP_UNUSABLE;
+ if (unlikely(ieee80211_has_a4(hdr->frame_control))) {
+ switch (rx->sdata->vif.type) {
+ case NL80211_IFTYPE_AP_VLAN:
+ if (!rx->sdata->u.vlan.sta)
+ return RX_DROP_UNUSABLE;
+ break;
+ case NL80211_IFTYPE_STATION:
+ if (!rx->sdata->u.mgd.use_4addr)
+ return RX_DROP_UNUSABLE;
+ break;
+ default:
+ return RX_DROP_UNUSABLE;
+ }
+ check_da = NULL;
+ check_sa = NULL;
+ } else switch (rx->sdata->vif.type) {
+ case NL80211_IFTYPE_AP:
+ case NL80211_IFTYPE_AP_VLAN:
+ check_da = NULL;
+ break;
+ case NL80211_IFTYPE_STATION:
+ if (!rx->sta ||
+ !test_sta_flag(rx->sta, WLAN_STA_TDLS_PEER))
+ check_sa = NULL;
+ break;
+ case NL80211_IFTYPE_MESH_POINT:
+ check_sa = NULL;
+ break;
+ default:
+ break;
+ }
- if (is_multicast_ether_addr(hdr->addr1) &&
- ((rx->sdata->vif.type == NL80211_IFTYPE_AP_VLAN &&
- rx->sdata->u.vlan.sta) ||
- (rx->sdata->vif.type == NL80211_IFTYPE_STATION &&
- rx->sdata->u.mgd.use_4addr)))
+ if (is_multicast_ether_addr(hdr->addr1))
return RX_DROP_UNUSABLE;
skb->dev = dev;
__skb_queue_head_init(&frame_list);
+ if (ieee80211_data_to_8023_exthdr(skb, &ethhdr,
+ rx->sdata->vif.addr,
+ rx->sdata->vif.type))
+ return RX_DROP_UNUSABLE;
+
ieee80211_amsdu_to_8023s(skb, &frame_list, dev->dev_addr,
rx->sdata->vif.type,
- rx->local->hw.extra_tx_headroom, true);
+ rx->local->hw.extra_tx_headroom,
+ check_da, check_sa);
while (!skb_queue_empty(&frame_list)) {
rx->skb = __skb_dequeue(&frame_list);
diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c
index 78e9ecbc96e6..8e05032689f0 100644
--- a/net/mac80211/sta_info.c
+++ b/net/mac80211/sta_info.c
@@ -688,7 +688,7 @@ static void __sta_info_recalc_tim(struct sta_info *sta, bool ignore_pending)
}
/* No need to do anything if the driver does all */
- if (!local->ops->set_tim)
+ if (ieee80211_hw_check(&local->hw, AP_LINK_PS))
return;
if (sta->dead)
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
index 1c56abc49627..bd5f4be89435 100644
--- a/net/mac80211/tx.c
+++ b/net/mac80211/tx.c
@@ -1501,7 +1501,6 @@ static bool ieee80211_queue_skb(struct ieee80211_local *local,
struct sta_info *sta,
struct sk_buff *skb)
{
- struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
struct fq *fq = &local->fq;
struct ieee80211_vif *vif;
struct txq_info *txqi;
@@ -1526,8 +1525,6 @@ static bool ieee80211_queue_skb(struct ieee80211_local *local,
if (!txqi)
return false;
- info->control.vif = vif;
-
spin_lock_bh(&fq->lock);
ieee80211_txq_enqueue(local, txqi, skb);
spin_unlock_bh(&fq->lock);
@@ -3213,7 +3210,6 @@ static void ieee80211_xmit_fast_finish(struct ieee80211_sub_if_data *sdata,
if (hdr->frame_control & cpu_to_le16(IEEE80211_STYPE_QOS_DATA)) {
tid = skb->priority & IEEE80211_QOS_CTL_TAG1D_MASK;
- *ieee80211_get_qos_ctl(hdr) = tid;
hdr->seq_ctrl = ieee80211_tx_next_seq(sta, tid);
} else {
info->flags |= IEEE80211_TX_CTL_ASSIGN_SEQ;
@@ -3338,6 +3334,11 @@ static bool ieee80211_xmit_fast(struct ieee80211_sub_if_data *sdata,
(tid_tx ? IEEE80211_TX_CTL_AMPDU : 0);
info->control.flags = IEEE80211_TX_CTRL_FAST_XMIT;
+ if (hdr->frame_control & cpu_to_le16(IEEE80211_STYPE_QOS_DATA)) {
+ tid = skb->priority & IEEE80211_QOS_CTL_TAG1D_MASK;
+ *ieee80211_get_qos_ctl(hdr) = tid;
+ }
+
__skb_queue_head_init(&tx.skbs);
tx.flags = IEEE80211_TX_UNICAST;
@@ -3426,6 +3427,11 @@ begin:
goto begin;
}
+ if (test_bit(IEEE80211_TXQ_AMPDU, &txqi->flags))
+ info->flags |= IEEE80211_TX_CTL_AMPDU;
+ else
+ info->flags &= ~IEEE80211_TX_CTL_AMPDU;
+
if (info->control.flags & IEEE80211_TX_CTRL_FAST_XMIT) {
struct sta_info *sta = container_of(txq->sta, struct sta_info,
sta);
diff --git a/net/mac80211/vht.c b/net/mac80211/vht.c
index ee715764a828..6832bf6ab69f 100644
--- a/net/mac80211/vht.c
+++ b/net/mac80211/vht.c
@@ -270,6 +270,22 @@ ieee80211_vht_cap_ie_to_sta_vht_cap(struct ieee80211_sub_if_data *sdata,
vht_cap->vht_mcs.tx_mcs_map |= cpu_to_le16(peer_tx << i * 2);
}
+ /*
+ * This is a workaround for VHT-enabled STAs which break the spec
+ * and have the VHT-MCS Rx map filled in with value 3 for all eight
+ * spacial streams, an example is AR9462.
+ *
+ * As per spec, in section 22.1.1 Introduction to the VHT PHY
+ * A VHT STA shall support at least single spactial stream VHT-MCSs
+ * 0 to 7 (transmit and receive) in all supported channel widths.
+ */
+ if (vht_cap->vht_mcs.rx_mcs_map == cpu_to_le16(0xFFFF)) {
+ vht_cap->vht_supported = false;
+ sdata_info(sdata, "Ignoring VHT IE from %pM due to invalid rx_mcs_map\n",
+ sta->addr);
+ return;
+ }
+
/* finally set up the bandwidth */
switch (vht_cap->cap & IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_MASK) {
case IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160MHZ:
diff --git a/net/mac80211/wpa.c b/net/mac80211/wpa.c
index b48c1e13e281..42ce9bd4426f 100644
--- a/net/mac80211/wpa.c
+++ b/net/mac80211/wpa.c
@@ -405,7 +405,7 @@ static int ccmp_encrypt_skb(struct ieee80211_tx_data *tx, struct sk_buff *skb,
u8 *pos;
u8 pn[6];
u64 pn64;
- u8 aad[2 * AES_BLOCK_SIZE];
+ u8 aad[CCM_AAD_LEN];
u8 b_0[AES_BLOCK_SIZE];
if (info->control.hw_key &&
@@ -461,10 +461,8 @@ static int ccmp_encrypt_skb(struct ieee80211_tx_data *tx, struct sk_buff *skb,
pos += IEEE80211_CCMP_HDR_LEN;
ccmp_special_blocks(skb, pn, b_0, aad);
- ieee80211_aes_ccm_encrypt(key->u.ccmp.tfm, b_0, aad, pos, len,
- skb_put(skb, mic_len), mic_len);
-
- return 0;
+ return ieee80211_aes_ccm_encrypt(key->u.ccmp.tfm, b_0, aad, pos, len,
+ skb_put(skb, mic_len), mic_len);
}
@@ -639,7 +637,7 @@ static int gcmp_encrypt_skb(struct ieee80211_tx_data *tx, struct sk_buff *skb)
u8 *pos;
u8 pn[6];
u64 pn64;
- u8 aad[2 * AES_BLOCK_SIZE];
+ u8 aad[GCM_AAD_LEN];
u8 j_0[AES_BLOCK_SIZE];
if (info->control.hw_key &&
@@ -696,10 +694,8 @@ static int gcmp_encrypt_skb(struct ieee80211_tx_data *tx, struct sk_buff *skb)
pos += IEEE80211_GCMP_HDR_LEN;
gcmp_special_blocks(skb, pn, j_0, aad);
- ieee80211_aes_gcm_encrypt(key->u.gcmp.tfm, j_0, aad, pos, len,
- skb_put(skb, IEEE80211_GCMP_MIC_LEN));
-
- return 0;
+ return ieee80211_aes_gcm_encrypt(key->u.gcmp.tfm, j_0, aad, pos, len,
+ skb_put(skb, IEEE80211_GCMP_MIC_LEN));
}
ieee80211_tx_result
@@ -1123,9 +1119,9 @@ ieee80211_crypto_aes_gmac_encrypt(struct ieee80211_tx_data *tx)
struct ieee80211_key *key = tx->key;
struct ieee80211_mmie_16 *mmie;
struct ieee80211_hdr *hdr;
- u8 aad[20];
+ u8 aad[GMAC_AAD_LEN];
u64 pn64;
- u8 nonce[12];
+ u8 nonce[GMAC_NONCE_LEN];
if (WARN_ON(skb_queue_len(&tx->skbs) != 1))
return TX_DROP;
@@ -1171,7 +1167,7 @@ ieee80211_crypto_aes_gmac_decrypt(struct ieee80211_rx_data *rx)
struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
struct ieee80211_key *key = rx->key;
struct ieee80211_mmie_16 *mmie;
- u8 aad[20], mic[16], ipn[6], nonce[12];
+ u8 aad[GMAC_AAD_LEN], mic[GMAC_MIC_LEN], ipn[6], nonce[GMAC_NONCE_LEN];
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
if (!ieee80211_is_mgmt(hdr->frame_control))
diff --git a/net/ncsi/internal.h b/net/ncsi/internal.h
index 13290a70fa71..1308a56f2591 100644
--- a/net/ncsi/internal.h
+++ b/net/ncsi/internal.h
@@ -246,6 +246,7 @@ enum {
ncsi_dev_state_config_gls,
ncsi_dev_state_config_done,
ncsi_dev_state_suspend_select = 0x0401,
+ ncsi_dev_state_suspend_gls,
ncsi_dev_state_suspend_dcnt,
ncsi_dev_state_suspend_dc,
ncsi_dev_state_suspend_deselect,
@@ -264,6 +265,7 @@ struct ncsi_dev_priv {
#endif
unsigned int package_num; /* Number of packages */
struct list_head packages; /* List of packages */
+ struct ncsi_channel *hot_channel; /* Channel was ever active */
struct ncsi_request requests[256]; /* Request table */
unsigned int request_id; /* Last used request ID */
#define NCSI_REQ_START_IDX 1
diff --git a/net/ncsi/ncsi-aen.c b/net/ncsi/ncsi-aen.c
index b41a6617d498..6898e7229285 100644
--- a/net/ncsi/ncsi-aen.c
+++ b/net/ncsi/ncsi-aen.c
@@ -141,23 +141,35 @@ static int ncsi_aen_handler_hncdsc(struct ncsi_dev_priv *ndp,
return -ENODEV;
/* If the channel is active one, we need reconfigure it */
+ spin_lock_irqsave(&nc->lock, flags);
ncm = &nc->modes[NCSI_MODE_LINK];
hncdsc = (struct ncsi_aen_hncdsc_pkt *)h;
ncm->data[3] = ntohl(hncdsc->status);
if (!list_empty(&nc->link) ||
- nc->state != NCSI_CHANNEL_ACTIVE ||
- (ncm->data[3] & 0x1))
+ nc->state != NCSI_CHANNEL_ACTIVE) {
+ spin_unlock_irqrestore(&nc->lock, flags);
return 0;
+ }
- if (ndp->flags & NCSI_DEV_HWA)
+ spin_unlock_irqrestore(&nc->lock, flags);
+ if (!(ndp->flags & NCSI_DEV_HWA) && !(ncm->data[3] & 0x1))
ndp->flags |= NCSI_DEV_RESHUFFLE;
/* If this channel is the active one and the link doesn't
* work, we have to choose another channel to be active one.
* The logic here is exactly similar to what we do when link
* is down on the active channel.
+ *
+ * On the other hand, we need configure it when host driver
+ * state on the active channel becomes ready.
*/
ncsi_stop_channel_monitor(nc);
+
+ spin_lock_irqsave(&nc->lock, flags);
+ nc->state = (ncm->data[3] & 0x1) ? NCSI_CHANNEL_INACTIVE :
+ NCSI_CHANNEL_ACTIVE;
+ spin_unlock_irqrestore(&nc->lock, flags);
+
spin_lock_irqsave(&ndp->lock, flags);
list_add_tail_rcu(&nc->link, &ndp->channel_queue);
spin_unlock_irqrestore(&ndp->lock, flags);
diff --git a/net/ncsi/ncsi-manage.c b/net/ncsi/ncsi-manage.c
index 5e509e547c2d..a3bd5fa8ad09 100644
--- a/net/ncsi/ncsi-manage.c
+++ b/net/ncsi/ncsi-manage.c
@@ -540,42 +540,86 @@ static void ncsi_suspend_channel(struct ncsi_dev_priv *ndp)
nd->state = ncsi_dev_state_suspend_select;
/* Fall through */
case ncsi_dev_state_suspend_select:
- case ncsi_dev_state_suspend_dcnt:
- case ncsi_dev_state_suspend_dc:
- case ncsi_dev_state_suspend_deselect:
ndp->pending_req_num = 1;
- np = ndp->active_package;
- nc = ndp->active_channel;
+ nca.type = NCSI_PKT_CMD_SP;
nca.package = np->id;
- if (nd->state == ncsi_dev_state_suspend_select) {
- nca.type = NCSI_PKT_CMD_SP;
- nca.channel = NCSI_RESERVED_CHANNEL;
- if (ndp->flags & NCSI_DEV_HWA)
- nca.bytes[0] = 0;
- else
- nca.bytes[0] = 1;
+ nca.channel = NCSI_RESERVED_CHANNEL;
+ if (ndp->flags & NCSI_DEV_HWA)
+ nca.bytes[0] = 0;
+ else
+ nca.bytes[0] = 1;
+
+ /* To retrieve the last link states of channels in current
+ * package when current active channel needs fail over to
+ * another one. It means we will possibly select another
+ * channel as next active one. The link states of channels
+ * are most important factor of the selection. So we need
+ * accurate link states. Unfortunately, the link states on
+ * inactive channels can't be updated with LSC AEN in time.
+ */
+ if (ndp->flags & NCSI_DEV_RESHUFFLE)
+ nd->state = ncsi_dev_state_suspend_gls;
+ else
nd->state = ncsi_dev_state_suspend_dcnt;
- } else if (nd->state == ncsi_dev_state_suspend_dcnt) {
- nca.type = NCSI_PKT_CMD_DCNT;
- nca.channel = nc->id;
- nd->state = ncsi_dev_state_suspend_dc;
- } else if (nd->state == ncsi_dev_state_suspend_dc) {
- nca.type = NCSI_PKT_CMD_DC;
+ ret = ncsi_xmit_cmd(&nca);
+ if (ret)
+ goto error;
+
+ break;
+ case ncsi_dev_state_suspend_gls:
+ ndp->pending_req_num = np->channel_num;
+
+ nca.type = NCSI_PKT_CMD_GLS;
+ nca.package = np->id;
+
+ nd->state = ncsi_dev_state_suspend_dcnt;
+ NCSI_FOR_EACH_CHANNEL(np, nc) {
nca.channel = nc->id;
- nca.bytes[0] = 1;
- nd->state = ncsi_dev_state_suspend_deselect;
- } else if (nd->state == ncsi_dev_state_suspend_deselect) {
- nca.type = NCSI_PKT_CMD_DP;
- nca.channel = NCSI_RESERVED_CHANNEL;
- nd->state = ncsi_dev_state_suspend_done;
+ ret = ncsi_xmit_cmd(&nca);
+ if (ret)
+ goto error;
}
+ break;
+ case ncsi_dev_state_suspend_dcnt:
+ ndp->pending_req_num = 1;
+
+ nca.type = NCSI_PKT_CMD_DCNT;
+ nca.package = np->id;
+ nca.channel = nc->id;
+
+ nd->state = ncsi_dev_state_suspend_dc;
ret = ncsi_xmit_cmd(&nca);
- if (ret) {
- nd->state = ncsi_dev_state_functional;
- return;
- }
+ if (ret)
+ goto error;
+
+ break;
+ case ncsi_dev_state_suspend_dc:
+ ndp->pending_req_num = 1;
+
+ nca.type = NCSI_PKT_CMD_DC;
+ nca.package = np->id;
+ nca.channel = nc->id;
+ nca.bytes[0] = 1;
+
+ nd->state = ncsi_dev_state_suspend_deselect;
+ ret = ncsi_xmit_cmd(&nca);
+ if (ret)
+ goto error;
+
+ break;
+ case ncsi_dev_state_suspend_deselect:
+ ndp->pending_req_num = 1;
+
+ nca.type = NCSI_PKT_CMD_DP;
+ nca.package = np->id;
+ nca.channel = NCSI_RESERVED_CHANNEL;
+
+ nd->state = ncsi_dev_state_suspend_done;
+ ret = ncsi_xmit_cmd(&nca);
+ if (ret)
+ goto error;
break;
case ncsi_dev_state_suspend_done:
@@ -589,6 +633,10 @@ static void ncsi_suspend_channel(struct ncsi_dev_priv *ndp)
netdev_warn(nd->dev, "Wrong NCSI state 0x%x in suspend\n",
nd->state);
}
+
+ return;
+error:
+ nd->state = ncsi_dev_state_functional;
}
static void ncsi_configure_channel(struct ncsi_dev_priv *ndp)
@@ -597,6 +645,7 @@ static void ncsi_configure_channel(struct ncsi_dev_priv *ndp)
struct net_device *dev = nd->dev;
struct ncsi_package *np = ndp->active_package;
struct ncsi_channel *nc = ndp->active_channel;
+ struct ncsi_channel *hot_nc = NULL;
struct ncsi_cmd_arg nca;
unsigned char index;
unsigned long flags;
@@ -702,12 +751,20 @@ static void ncsi_configure_channel(struct ncsi_dev_priv *ndp)
break;
case ncsi_dev_state_config_done:
spin_lock_irqsave(&nc->lock, flags);
- if (nc->modes[NCSI_MODE_LINK].data[2] & 0x1)
+ if (nc->modes[NCSI_MODE_LINK].data[2] & 0x1) {
+ hot_nc = nc;
nc->state = NCSI_CHANNEL_ACTIVE;
- else
+ } else {
+ hot_nc = NULL;
nc->state = NCSI_CHANNEL_INACTIVE;
+ }
spin_unlock_irqrestore(&nc->lock, flags);
+ /* Update the hot channel */
+ spin_lock_irqsave(&ndp->lock, flags);
+ ndp->hot_channel = hot_nc;
+ spin_unlock_irqrestore(&ndp->lock, flags);
+
ncsi_start_channel_monitor(nc);
ncsi_process_next_channel(ndp);
break;
@@ -725,10 +782,14 @@ error:
static int ncsi_choose_active_channel(struct ncsi_dev_priv *ndp)
{
struct ncsi_package *np;
- struct ncsi_channel *nc, *found;
+ struct ncsi_channel *nc, *found, *hot_nc;
struct ncsi_channel_mode *ncm;
unsigned long flags;
+ spin_lock_irqsave(&ndp->lock, flags);
+ hot_nc = ndp->hot_channel;
+ spin_unlock_irqrestore(&ndp->lock, flags);
+
/* The search is done once an inactive channel with up
* link is found.
*/
@@ -746,6 +807,9 @@ static int ncsi_choose_active_channel(struct ncsi_dev_priv *ndp)
if (!found)
found = nc;
+ if (nc == hot_nc)
+ found = nc;
+
ncm = &nc->modes[NCSI_MODE_LINK];
if (ncm->data[2] & 0x1) {
spin_unlock_irqrestore(&nc->lock, flags);
diff --git a/net/netfilter/core.c b/net/netfilter/core.c
index fcb5d1df11e9..004af030ef1a 100644
--- a/net/netfilter/core.c
+++ b/net/netfilter/core.c
@@ -361,16 +361,9 @@ next_hook:
if (ret == 0)
ret = -EPERM;
} else if ((verdict & NF_VERDICT_MASK) == NF_QUEUE) {
- int err;
-
- RCU_INIT_POINTER(state->hook_entries, entry);
- err = nf_queue(skb, state, verdict >> NF_VERDICT_QBITS);
- if (err < 0) {
- if (err == -ESRCH &&
- (verdict & NF_VERDICT_FLAG_QUEUE_BYPASS))
- goto next_hook;
- kfree_skb(skb);
- }
+ ret = nf_queue(skb, state, &entry, verdict);
+ if (ret == 1 && entry)
+ goto next_hook;
}
return ret;
}
diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
index c3c809b2e712..a6e44ef2ec9a 100644
--- a/net/netfilter/ipvs/ip_vs_ctl.c
+++ b/net/netfilter/ipvs/ip_vs_ctl.c
@@ -2845,7 +2845,7 @@ static struct genl_family ip_vs_genl_family = {
.hdrsize = 0,
.name = IPVS_GENL_NAME,
.version = IPVS_GENL_VERSION,
- .maxattr = IPVS_CMD_MAX,
+ .maxattr = IPVS_CMD_ATTR_MAX,
.netnsok = true, /* Make ipvsadm to work on netns */
};
diff --git a/net/netfilter/ipvs/ip_vs_sync.c b/net/netfilter/ipvs/ip_vs_sync.c
index 1b07578bedf3..9350530c16c1 100644
--- a/net/netfilter/ipvs/ip_vs_sync.c
+++ b/net/netfilter/ipvs/ip_vs_sync.c
@@ -283,6 +283,7 @@ struct ip_vs_sync_buff {
*/
static void ntoh_seq(struct ip_vs_seq *no, struct ip_vs_seq *ho)
{
+ memset(ho, 0, sizeof(*ho));
ho->init_seq = get_unaligned_be32(&no->init_seq);
ho->delta = get_unaligned_be32(&no->delta);
ho->previous_delta = get_unaligned_be32(&no->previous_delta);
@@ -917,8 +918,10 @@ static void ip_vs_proc_conn(struct netns_ipvs *ipvs, struct ip_vs_conn_param *pa
kfree(param->pe_data);
}
- if (opt)
- memcpy(&cp->in_seq, opt, sizeof(*opt));
+ if (opt) {
+ cp->in_seq = opt->in_seq;
+ cp->out_seq = opt->out_seq;
+ }
atomic_set(&cp->in_pkts, sysctl_sync_threshold(ipvs));
cp->state = state;
cp->old_state = cp->state;
diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
index ba6a1d421222..0f87e5d21be7 100644
--- a/net/netfilter/nf_conntrack_core.c
+++ b/net/netfilter/nf_conntrack_core.c
@@ -76,6 +76,7 @@ struct conntrack_gc_work {
struct delayed_work dwork;
u32 last_bucket;
bool exiting;
+ long next_gc_run;
};
static __read_mostly struct kmem_cache *nf_conntrack_cachep;
@@ -83,9 +84,11 @@ static __read_mostly spinlock_t nf_conntrack_locks_all_lock;
static __read_mostly DEFINE_SPINLOCK(nf_conntrack_locks_all_lock);
static __read_mostly bool nf_conntrack_locks_all;
+/* every gc cycle scans at most 1/GC_MAX_BUCKETS_DIV part of table */
#define GC_MAX_BUCKETS_DIV 64u
-#define GC_MAX_BUCKETS 8192u
-#define GC_INTERVAL (5 * HZ)
+/* upper bound of scan intervals */
+#define GC_INTERVAL_MAX (2 * HZ)
+/* maximum conntracks to evict per gc run */
#define GC_MAX_EVICTS 256u
static struct conntrack_gc_work conntrack_gc_work;
@@ -936,13 +939,13 @@ static noinline int early_drop(struct net *net, unsigned int _hash)
static void gc_worker(struct work_struct *work)
{
unsigned int i, goal, buckets = 0, expired_count = 0;
- unsigned long next_run = GC_INTERVAL;
- unsigned int ratio, scanned = 0;
struct conntrack_gc_work *gc_work;
+ unsigned int ratio, scanned = 0;
+ unsigned long next_run;
gc_work = container_of(work, struct conntrack_gc_work, dwork.work);
- goal = min(nf_conntrack_htable_size / GC_MAX_BUCKETS_DIV, GC_MAX_BUCKETS);
+ goal = nf_conntrack_htable_size / GC_MAX_BUCKETS_DIV;
i = gc_work->last_bucket;
do {
@@ -982,17 +985,47 @@ static void gc_worker(struct work_struct *work)
if (gc_work->exiting)
return;
+ /*
+ * Eviction will normally happen from the packet path, and not
+ * from this gc worker.
+ *
+ * This worker is only here to reap expired entries when system went
+ * idle after a busy period.
+ *
+ * The heuristics below are supposed to balance conflicting goals:
+ *
+ * 1. Minimize time until we notice a stale entry
+ * 2. Maximize scan intervals to not waste cycles
+ *
+ * Normally, expired_count will be 0, this increases the next_run time
+ * to priorize 2) above.
+ *
+ * As soon as a timed-out entry is found, move towards 1) and increase
+ * the scan frequency.
+ * In case we have lots of evictions next scan is done immediately.
+ */
ratio = scanned ? expired_count * 100 / scanned : 0;
- if (ratio >= 90)
+ if (ratio >= 90 || expired_count == GC_MAX_EVICTS) {
+ gc_work->next_gc_run = 0;
next_run = 0;
+ } else if (expired_count) {
+ gc_work->next_gc_run /= 2U;
+ next_run = msecs_to_jiffies(1);
+ } else {
+ if (gc_work->next_gc_run < GC_INTERVAL_MAX)
+ gc_work->next_gc_run += msecs_to_jiffies(1);
+
+ next_run = gc_work->next_gc_run;
+ }
gc_work->last_bucket = i;
- schedule_delayed_work(&gc_work->dwork, next_run);
+ queue_delayed_work(system_long_wq, &gc_work->dwork, next_run);
}
static void conntrack_gc_work_init(struct conntrack_gc_work *gc_work)
{
INIT_DELAYED_WORK(&gc_work->dwork, gc_worker);
+ gc_work->next_gc_run = GC_INTERVAL_MAX;
gc_work->exiting = false;
}
@@ -1885,7 +1918,7 @@ int nf_conntrack_init_start(void)
nf_ct_untracked_status_or(IPS_CONFIRMED | IPS_UNTRACKED);
conntrack_gc_work_init(&conntrack_gc_work);
- schedule_delayed_work(&conntrack_gc_work.dwork, GC_INTERVAL);
+ queue_delayed_work(system_long_wq, &conntrack_gc_work.dwork, GC_INTERVAL_MAX);
return 0;
diff --git a/net/netfilter/nf_conntrack_helper.c b/net/netfilter/nf_conntrack_helper.c
index 336e21559e01..7341adf7059d 100644
--- a/net/netfilter/nf_conntrack_helper.c
+++ b/net/netfilter/nf_conntrack_helper.c
@@ -138,9 +138,14 @@ __nf_conntrack_helper_find(const char *name, u16 l3num, u8 protonum)
for (i = 0; i < nf_ct_helper_hsize; i++) {
hlist_for_each_entry_rcu(h, &nf_ct_helper_hash[i], hnode) {
- if (!strcmp(h->name, name) &&
- h->tuple.src.l3num == l3num &&
- h->tuple.dst.protonum == protonum)
+ if (strcmp(h->name, name))
+ continue;
+
+ if (h->tuple.src.l3num != NFPROTO_UNSPEC &&
+ h->tuple.src.l3num != l3num)
+ continue;
+
+ if (h->tuple.dst.protonum == protonum)
return h;
}
}
diff --git a/net/netfilter/nf_conntrack_sip.c b/net/netfilter/nf_conntrack_sip.c
index 621b81c7bddc..c3fc14e021ec 100644
--- a/net/netfilter/nf_conntrack_sip.c
+++ b/net/netfilter/nf_conntrack_sip.c
@@ -1436,9 +1436,12 @@ static int process_sip_request(struct sk_buff *skb, unsigned int protoff,
handler = &sip_handlers[i];
if (handler->request == NULL)
continue;
- if (*datalen < handler->len ||
+ if (*datalen < handler->len + 2 ||
strncasecmp(*dptr, handler->method, handler->len))
continue;
+ if ((*dptr)[handler->len] != ' ' ||
+ !isalpha((*dptr)[handler->len+1]))
+ continue;
if (ct_sip_get_header(ct, *dptr, 0, *datalen, SIP_HDR_CSEQ,
&matchoff, &matchlen) <= 0) {
diff --git a/net/netfilter/nf_internals.h b/net/netfilter/nf_internals.h
index e0adb5959342..9fdb655f85bc 100644
--- a/net/netfilter/nf_internals.h
+++ b/net/netfilter/nf_internals.h
@@ -18,7 +18,7 @@ unsigned int nf_iterate(struct sk_buff *skb, struct nf_hook_state *state,
/* nf_queue.c */
int nf_queue(struct sk_buff *skb, struct nf_hook_state *state,
- unsigned int queuenum);
+ struct nf_hook_entry **entryp, unsigned int verdict);
void nf_queue_nf_hook_drop(struct net *net, const struct nf_hook_entry *entry);
int __init netfilter_queue_init(void);
diff --git a/net/netfilter/nf_nat_core.c b/net/netfilter/nf_nat_core.c
index bbb8f3df79f7..5b9c884a452e 100644
--- a/net/netfilter/nf_nat_core.c
+++ b/net/netfilter/nf_nat_core.c
@@ -42,7 +42,7 @@ struct nf_nat_conn_key {
const struct nf_conntrack_zone *zone;
};
-static struct rhashtable nf_nat_bysource_table;
+static struct rhltable nf_nat_bysource_table;
inline const struct nf_nat_l3proto *
__nf_nat_l3proto_find(u8 family)
@@ -193,9 +193,12 @@ static int nf_nat_bysource_cmp(struct rhashtable_compare_arg *arg,
const struct nf_nat_conn_key *key = arg->key;
const struct nf_conn *ct = obj;
- return same_src(ct, key->tuple) &&
- net_eq(nf_ct_net(ct), key->net) &&
- nf_ct_zone_equal(ct, key->zone, IP_CT_DIR_ORIGINAL);
+ if (!same_src(ct, key->tuple) ||
+ !net_eq(nf_ct_net(ct), key->net) ||
+ !nf_ct_zone_equal(ct, key->zone, IP_CT_DIR_ORIGINAL))
+ return 1;
+
+ return 0;
}
static struct rhashtable_params nf_nat_bysource_params = {
@@ -204,7 +207,6 @@ static struct rhashtable_params nf_nat_bysource_params = {
.obj_cmpfn = nf_nat_bysource_cmp,
.nelem_hint = 256,
.min_size = 1024,
- .nulls_base = (1U << RHT_BASE_SHIFT),
};
/* Only called for SRC manip */
@@ -223,12 +225,15 @@ find_appropriate_src(struct net *net,
.tuple = tuple,
.zone = zone
};
+ struct rhlist_head *hl;
- ct = rhashtable_lookup_fast(&nf_nat_bysource_table, &key,
- nf_nat_bysource_params);
- if (!ct)
+ hl = rhltable_lookup(&nf_nat_bysource_table, &key,
+ nf_nat_bysource_params);
+ if (!hl)
return 0;
+ ct = container_of(hl, typeof(*ct), nat_bysource);
+
nf_ct_invert_tuplepr(result,
&ct->tuplehash[IP_CT_DIR_REPLY].tuple);
result->dst = tuple->dst;
@@ -446,11 +451,17 @@ nf_nat_setup_info(struct nf_conn *ct,
}
if (maniptype == NF_NAT_MANIP_SRC) {
+ struct nf_nat_conn_key key = {
+ .net = nf_ct_net(ct),
+ .tuple = &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple,
+ .zone = nf_ct_zone(ct),
+ };
int err;
- err = rhashtable_insert_fast(&nf_nat_bysource_table,
- &ct->nat_bysource,
- nf_nat_bysource_params);
+ err = rhltable_insert_key(&nf_nat_bysource_table,
+ &key,
+ &ct->nat_bysource,
+ nf_nat_bysource_params);
if (err)
return NF_DROP;
}
@@ -567,8 +578,8 @@ static int nf_nat_proto_clean(struct nf_conn *ct, void *data)
* will delete entry from already-freed table.
*/
ct->status &= ~IPS_NAT_DONE_MASK;
- rhashtable_remove_fast(&nf_nat_bysource_table, &ct->nat_bysource,
- nf_nat_bysource_params);
+ rhltable_remove(&nf_nat_bysource_table, &ct->nat_bysource,
+ nf_nat_bysource_params);
/* don't delete conntrack. Although that would make things a lot
* simpler, we'd end up flushing all conntracks on nat rmmod.
@@ -698,8 +709,8 @@ static void nf_nat_cleanup_conntrack(struct nf_conn *ct)
if (!nat)
return;
- rhashtable_remove_fast(&nf_nat_bysource_table, &ct->nat_bysource,
- nf_nat_bysource_params);
+ rhltable_remove(&nf_nat_bysource_table, &ct->nat_bysource,
+ nf_nat_bysource_params);
}
static struct nf_ct_ext_type nat_extend __read_mostly = {
@@ -834,13 +845,13 @@ static int __init nf_nat_init(void)
{
int ret;
- ret = rhashtable_init(&nf_nat_bysource_table, &nf_nat_bysource_params);
+ ret = rhltable_init(&nf_nat_bysource_table, &nf_nat_bysource_params);
if (ret)
return ret;
ret = nf_ct_extend_register(&nat_extend);
if (ret < 0) {
- rhashtable_destroy(&nf_nat_bysource_table);
+ rhltable_destroy(&nf_nat_bysource_table);
printk(KERN_ERR "nf_nat_core: Unable to register extension\n");
return ret;
}
@@ -864,7 +875,7 @@ static int __init nf_nat_init(void)
return 0;
cleanup_extend:
- rhashtable_destroy(&nf_nat_bysource_table);
+ rhltable_destroy(&nf_nat_bysource_table);
nf_ct_extend_unregister(&nat_extend);
return ret;
}
@@ -883,7 +894,7 @@ static void __exit nf_nat_cleanup(void)
for (i = 0; i < NFPROTO_NUMPROTO; i++)
kfree(nf_nat_l4protos[i]);
- rhashtable_destroy(&nf_nat_bysource_table);
+ rhltable_destroy(&nf_nat_bysource_table);
}
MODULE_LICENSE("GPL");
diff --git a/net/netfilter/nf_queue.c b/net/netfilter/nf_queue.c
index 96964a0070e1..8f08d759844a 100644
--- a/net/netfilter/nf_queue.c
+++ b/net/netfilter/nf_queue.c
@@ -107,13 +107,8 @@ void nf_queue_nf_hook_drop(struct net *net, const struct nf_hook_entry *entry)
rcu_read_unlock();
}
-/*
- * Any packet that leaves via this function must come back
- * through nf_reinject().
- */
-int nf_queue(struct sk_buff *skb,
- struct nf_hook_state *state,
- unsigned int queuenum)
+static int __nf_queue(struct sk_buff *skb, const struct nf_hook_state *state,
+ unsigned int queuenum)
{
int status = -ENOENT;
struct nf_queue_entry *entry = NULL;
@@ -161,6 +156,27 @@ err:
return status;
}
+/* Packets leaving via this function must come back through nf_reinject(). */
+int nf_queue(struct sk_buff *skb, struct nf_hook_state *state,
+ struct nf_hook_entry **entryp, unsigned int verdict)
+{
+ struct nf_hook_entry *entry = *entryp;
+ int ret;
+
+ RCU_INIT_POINTER(state->hook_entries, entry);
+ ret = __nf_queue(skb, state, verdict >> NF_VERDICT_QBITS);
+ if (ret < 0) {
+ if (ret == -ESRCH &&
+ (verdict & NF_VERDICT_FLAG_QUEUE_BYPASS)) {
+ *entryp = rcu_dereference(entry->next);
+ return 1;
+ }
+ kfree_skb(skb);
+ }
+
+ return 0;
+}
+
void nf_reinject(struct nf_queue_entry *entry, unsigned int verdict)
{
struct nf_hook_entry *hook_entry;
@@ -187,26 +203,26 @@ void nf_reinject(struct nf_queue_entry *entry, unsigned int verdict)
entry->state.thresh = INT_MIN;
if (verdict == NF_ACCEPT) {
- next_hook:
- verdict = nf_iterate(skb, &entry->state, &hook_entry);
+ hook_entry = rcu_dereference(hook_entry->next);
+ if (hook_entry)
+next_hook:
+ verdict = nf_iterate(skb, &entry->state, &hook_entry);
}
switch (verdict & NF_VERDICT_MASK) {
case NF_ACCEPT:
case NF_STOP:
+okfn:
local_bh_disable();
entry->state.okfn(entry->state.net, entry->state.sk, skb);
local_bh_enable();
break;
case NF_QUEUE:
- RCU_INIT_POINTER(entry->state.hook_entries, hook_entry);
- err = nf_queue(skb, &entry->state,
- verdict >> NF_VERDICT_QBITS);
- if (err < 0) {
- if (err == -ESRCH &&
- (verdict & NF_VERDICT_FLAG_QUEUE_BYPASS))
+ err = nf_queue(skb, &entry->state, &hook_entry, verdict);
+ if (err == 1) {
+ if (hook_entry)
goto next_hook;
- kfree_skb(skb);
+ goto okfn;
}
break;
case NF_STOLEN:
diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
index b70d3ea1430e..e5194f6f906c 100644
--- a/net/netfilter/nf_tables_api.c
+++ b/net/netfilter/nf_tables_api.c
@@ -2570,7 +2570,8 @@ static int nf_tables_fill_set(struct sk_buff *skb, const struct nft_ctx *ctx,
}
if (set->timeout &&
- nla_put_be64(skb, NFTA_SET_TIMEOUT, cpu_to_be64(set->timeout),
+ nla_put_be64(skb, NFTA_SET_TIMEOUT,
+ cpu_to_be64(jiffies_to_msecs(set->timeout)),
NFTA_SET_PAD))
goto nla_put_failure;
if (set->gc_int &&
@@ -2859,7 +2860,8 @@ static int nf_tables_newset(struct net *net, struct sock *nlsk,
if (nla[NFTA_SET_TIMEOUT] != NULL) {
if (!(flags & NFT_SET_TIMEOUT))
return -EINVAL;
- timeout = be64_to_cpu(nla_get_be64(nla[NFTA_SET_TIMEOUT]));
+ timeout = msecs_to_jiffies(be64_to_cpu(nla_get_be64(
+ nla[NFTA_SET_TIMEOUT])));
}
gc_int = 0;
if (nla[NFTA_SET_GC_INTERVAL] != NULL) {
@@ -2956,12 +2958,14 @@ static int nf_tables_newset(struct net *net, struct sock *nlsk,
err = nft_trans_set_add(&ctx, NFT_MSG_NEWSET, set);
if (err < 0)
- goto err2;
+ goto err3;
list_add_tail_rcu(&set->list, &table->sets);
table->use++;
return 0;
+err3:
+ ops->destroy(set);
err2:
kfree(set);
err1:
@@ -3176,7 +3180,8 @@ static int nf_tables_fill_setelem(struct sk_buff *skb,
if (nft_set_ext_exists(ext, NFT_SET_EXT_TIMEOUT) &&
nla_put_be64(skb, NFTA_SET_ELEM_TIMEOUT,
- cpu_to_be64(*nft_set_ext_timeout(ext)),
+ cpu_to_be64(jiffies_to_msecs(
+ *nft_set_ext_timeout(ext))),
NFTA_SET_ELEM_PAD))
goto nla_put_failure;
@@ -3445,21 +3450,22 @@ void *nft_set_elem_init(const struct nft_set *set,
memcpy(nft_set_ext_data(ext), data, set->dlen);
if (nft_set_ext_exists(ext, NFT_SET_EXT_EXPIRATION))
*nft_set_ext_expiration(ext) =
- jiffies + msecs_to_jiffies(timeout);
+ jiffies + timeout;
if (nft_set_ext_exists(ext, NFT_SET_EXT_TIMEOUT))
*nft_set_ext_timeout(ext) = timeout;
return elem;
}
-void nft_set_elem_destroy(const struct nft_set *set, void *elem)
+void nft_set_elem_destroy(const struct nft_set *set, void *elem,
+ bool destroy_expr)
{
struct nft_set_ext *ext = nft_set_elem_ext(set, elem);
nft_data_uninit(nft_set_ext_key(ext), NFT_DATA_VALUE);
if (nft_set_ext_exists(ext, NFT_SET_EXT_DATA))
nft_data_uninit(nft_set_ext_data(ext), set->dtype);
- if (nft_set_ext_exists(ext, NFT_SET_EXT_EXPR))
+ if (destroy_expr && nft_set_ext_exists(ext, NFT_SET_EXT_EXPR))
nf_tables_expr_destroy(NULL, nft_set_ext_expr(ext));
kfree(elem);
@@ -3532,7 +3538,8 @@ static int nft_add_set_elem(struct nft_ctx *ctx, struct nft_set *set,
if (nla[NFTA_SET_ELEM_TIMEOUT] != NULL) {
if (!(set->flags & NFT_SET_TIMEOUT))
return -EINVAL;
- timeout = be64_to_cpu(nla_get_be64(nla[NFTA_SET_ELEM_TIMEOUT]));
+ timeout = msecs_to_jiffies(be64_to_cpu(nla_get_be64(
+ nla[NFTA_SET_ELEM_TIMEOUT])));
} else if (set->flags & NFT_SET_TIMEOUT) {
timeout = set->timeout;
}
@@ -3565,6 +3572,7 @@ static int nft_add_set_elem(struct nft_ctx *ctx, struct nft_set *set,
dreg = nft_type_to_reg(set->dtype);
list_for_each_entry(binding, &set->bindings, list) {
struct nft_ctx bind_ctx = {
+ .net = ctx->net,
.afi = ctx->afi,
.table = ctx->table,
.chain = (struct nft_chain *)binding->chain,
@@ -3812,7 +3820,7 @@ void nft_set_gc_batch_release(struct rcu_head *rcu)
gcb = container_of(rcu, struct nft_set_gc_batch, head.rcu);
for (i = 0; i < gcb->head.cnt; i++)
- nft_set_elem_destroy(gcb->head.set, gcb->elems[i]);
+ nft_set_elem_destroy(gcb->head.set, gcb->elems[i], true);
kfree(gcb);
}
EXPORT_SYMBOL_GPL(nft_set_gc_batch_release);
@@ -4030,7 +4038,7 @@ static void nf_tables_commit_release(struct nft_trans *trans)
break;
case NFT_MSG_DELSETELEM:
nft_set_elem_destroy(nft_trans_elem_set(trans),
- nft_trans_elem(trans).priv);
+ nft_trans_elem(trans).priv, true);
break;
}
kfree(trans);
@@ -4171,7 +4179,7 @@ static void nf_tables_abort_release(struct nft_trans *trans)
break;
case NFT_MSG_NEWSETELEM:
nft_set_elem_destroy(nft_trans_elem_set(trans),
- nft_trans_elem(trans).priv);
+ nft_trans_elem(trans).priv, true);
break;
}
kfree(trans);
@@ -4421,9 +4429,9 @@ static int nf_tables_check_loops(const struct nft_ctx *ctx,
* Otherwise a 0 is returned and the attribute value is stored in the
* destination variable.
*/
-unsigned int nft_parse_u32_check(const struct nlattr *attr, int max, u32 *dest)
+int nft_parse_u32_check(const struct nlattr *attr, int max, u32 *dest)
{
- int val;
+ u32 val;
val = ntohl(nla_get_be32(attr));
if (val > max)
diff --git a/net/netfilter/nft_dynset.c b/net/netfilter/nft_dynset.c
index e3b83c31da2e..31ca94793aa9 100644
--- a/net/netfilter/nft_dynset.c
+++ b/net/netfilter/nft_dynset.c
@@ -44,18 +44,22 @@ static void *nft_dynset_new(struct nft_set *set, const struct nft_expr *expr,
&regs->data[priv->sreg_key],
&regs->data[priv->sreg_data],
timeout, GFP_ATOMIC);
- if (elem == NULL) {
- if (set->size)
- atomic_dec(&set->nelems);
- return NULL;
- }
+ if (elem == NULL)
+ goto err1;
ext = nft_set_elem_ext(set, elem);
if (priv->expr != NULL &&
nft_expr_clone(nft_set_ext_expr(ext), priv->expr) < 0)
- return NULL;
+ goto err2;
return elem;
+
+err2:
+ nft_set_elem_destroy(set, elem, false);
+err1:
+ if (set->size)
+ atomic_dec(&set->nelems);
+ return NULL;
}
static void nft_dynset_eval(const struct nft_expr *expr,
@@ -139,6 +143,9 @@ static int nft_dynset_init(const struct nft_ctx *ctx,
return PTR_ERR(set);
}
+ if (set->ops->update == NULL)
+ return -EOPNOTSUPP;
+
if (set->flags & NFT_SET_CONSTANT)
return -EBUSY;
@@ -158,7 +165,8 @@ static int nft_dynset_init(const struct nft_ctx *ctx,
if (tb[NFTA_DYNSET_TIMEOUT] != NULL) {
if (!(set->flags & NFT_SET_TIMEOUT))
return -EINVAL;
- timeout = be64_to_cpu(nla_get_be64(tb[NFTA_DYNSET_TIMEOUT]));
+ timeout = msecs_to_jiffies(be64_to_cpu(nla_get_be64(
+ tb[NFTA_DYNSET_TIMEOUT])));
}
priv->sreg_key = nft_parse_register(tb[NFTA_DYNSET_SREG_KEY]);
@@ -246,7 +254,8 @@ static int nft_dynset_dump(struct sk_buff *skb, const struct nft_expr *expr)
goto nla_put_failure;
if (nla_put_string(skb, NFTA_DYNSET_SET_NAME, priv->set->name))
goto nla_put_failure;
- if (nla_put_be64(skb, NFTA_DYNSET_TIMEOUT, cpu_to_be64(priv->timeout),
+ if (nla_put_be64(skb, NFTA_DYNSET_TIMEOUT,
+ cpu_to_be64(jiffies_to_msecs(priv->timeout)),
NFTA_DYNSET_PAD))
goto nla_put_failure;
if (priv->expr && nft_expr_dump(skb, NFTA_DYNSET_EXPR, priv->expr))
diff --git a/net/netfilter/nft_exthdr.c b/net/netfilter/nft_exthdr.c
index a84cf3d66056..47beb3abcc9d 100644
--- a/net/netfilter/nft_exthdr.c
+++ b/net/netfilter/nft_exthdr.c
@@ -59,7 +59,8 @@ static int nft_exthdr_init(const struct nft_ctx *ctx,
const struct nlattr * const tb[])
{
struct nft_exthdr *priv = nft_expr_priv(expr);
- u32 offset, len, err;
+ u32 offset, len;
+ int err;
if (tb[NFTA_EXTHDR_DREG] == NULL ||
tb[NFTA_EXTHDR_TYPE] == NULL ||
diff --git a/net/netfilter/nft_hash.c b/net/netfilter/nft_hash.c
index 09473b415b95..d5447a22275c 100644
--- a/net/netfilter/nft_hash.c
+++ b/net/netfilter/nft_hash.c
@@ -44,6 +44,7 @@ static const struct nla_policy nft_hash_policy[NFTA_HASH_MAX + 1] = {
[NFTA_HASH_LEN] = { .type = NLA_U32 },
[NFTA_HASH_MODULUS] = { .type = NLA_U32 },
[NFTA_HASH_SEED] = { .type = NLA_U32 },
+ [NFTA_HASH_OFFSET] = { .type = NLA_U32 },
};
static int nft_hash_init(const struct nft_ctx *ctx,
@@ -52,6 +53,7 @@ static int nft_hash_init(const struct nft_ctx *ctx,
{
struct nft_hash *priv = nft_expr_priv(expr);
u32 len;
+ int err;
if (!tb[NFTA_HASH_SREG] ||
!tb[NFTA_HASH_DREG] ||
@@ -66,8 +68,10 @@ static int nft_hash_init(const struct nft_ctx *ctx,
priv->sreg = nft_parse_register(tb[NFTA_HASH_SREG]);
priv->dreg = nft_parse_register(tb[NFTA_HASH_DREG]);
- len = ntohl(nla_get_be32(tb[NFTA_HASH_LEN]));
- if (len == 0 || len > U8_MAX)
+ err = nft_parse_u32_check(tb[NFTA_HASH_LEN], U8_MAX, &len);
+ if (err < 0)
+ return err;
+ if (len == 0)
return -ERANGE;
priv->len = len;
diff --git a/net/netfilter/nft_range.c b/net/netfilter/nft_range.c
index c6d5358482d1..8f0aaaea1376 100644
--- a/net/netfilter/nft_range.c
+++ b/net/netfilter/nft_range.c
@@ -28,22 +28,20 @@ static void nft_range_eval(const struct nft_expr *expr,
const struct nft_pktinfo *pkt)
{
const struct nft_range_expr *priv = nft_expr_priv(expr);
- bool mismatch;
int d1, d2;
d1 = memcmp(&regs->data[priv->sreg], &priv->data_from, priv->len);
d2 = memcmp(&regs->data[priv->sreg], &priv->data_to, priv->len);
switch (priv->op) {
case NFT_RANGE_EQ:
- mismatch = (d1 < 0 || d2 > 0);
+ if (d1 < 0 || d2 > 0)
+ regs->verdict.code = NFT_BREAK;
break;
case NFT_RANGE_NEQ:
- mismatch = (d1 >= 0 && d2 <= 0);
+ if (d1 >= 0 && d2 <= 0)
+ regs->verdict.code = NFT_BREAK;
break;
}
-
- if (mismatch)
- regs->verdict.code = NFT_BREAK;
}
static const struct nla_policy nft_range_policy[NFTA_RANGE_MAX + 1] = {
@@ -59,6 +57,13 @@ static int nft_range_init(const struct nft_ctx *ctx, const struct nft_expr *expr
struct nft_range_expr *priv = nft_expr_priv(expr);
struct nft_data_desc desc_from, desc_to;
int err;
+ u32 op;
+
+ if (!tb[NFTA_RANGE_SREG] ||
+ !tb[NFTA_RANGE_OP] ||
+ !tb[NFTA_RANGE_FROM_DATA] ||
+ !tb[NFTA_RANGE_TO_DATA])
+ return -EINVAL;
err = nft_data_init(NULL, &priv->data_from, sizeof(priv->data_from),
&desc_from, tb[NFTA_RANGE_FROM_DATA]);
@@ -80,7 +85,20 @@ static int nft_range_init(const struct nft_ctx *ctx, const struct nft_expr *expr
if (err < 0)
goto err2;
- priv->op = ntohl(nla_get_be32(tb[NFTA_RANGE_OP]));
+ err = nft_parse_u32_check(tb[NFTA_RANGE_OP], U8_MAX, &op);
+ if (err < 0)
+ goto err2;
+
+ switch (op) {
+ case NFT_RANGE_EQ:
+ case NFT_RANGE_NEQ:
+ break;
+ default:
+ err = -EINVAL;
+ goto err2;
+ }
+
+ priv->op = op;
priv->len = desc_from.len;
return 0;
err2:
diff --git a/net/netfilter/nft_set_hash.c b/net/netfilter/nft_set_hash.c
index 3794cb2fc788..a3dface3e6e6 100644
--- a/net/netfilter/nft_set_hash.c
+++ b/net/netfilter/nft_set_hash.c
@@ -98,7 +98,7 @@ static bool nft_hash_update(struct nft_set *set, const u32 *key,
const struct nft_set_ext **ext)
{
struct nft_hash *priv = nft_set_priv(set);
- struct nft_hash_elem *he;
+ struct nft_hash_elem *he, *prev;
struct nft_hash_cmp_arg arg = {
.genmask = NFT_GENMASK_ANY,
.set = set,
@@ -112,15 +112,24 @@ static bool nft_hash_update(struct nft_set *set, const u32 *key,
he = new(set, expr, regs);
if (he == NULL)
goto err1;
- if (rhashtable_lookup_insert_key(&priv->ht, &arg, &he->node,
- nft_hash_params))
+
+ prev = rhashtable_lookup_get_insert_key(&priv->ht, &arg, &he->node,
+ nft_hash_params);
+ if (IS_ERR(prev))
goto err2;
+
+ /* Another cpu may race to insert the element with the same key */
+ if (prev) {
+ nft_set_elem_destroy(set, he, true);
+ he = prev;
+ }
+
out:
*ext = &he->ext;
return true;
err2:
- nft_set_elem_destroy(set, he);
+ nft_set_elem_destroy(set, he, true);
err1:
return false;
}
@@ -332,7 +341,7 @@ static int nft_hash_init(const struct nft_set *set,
static void nft_hash_elem_destroy(void *ptr, void *arg)
{
- nft_set_elem_destroy((const struct nft_set *)arg, ptr);
+ nft_set_elem_destroy((const struct nft_set *)arg, ptr, true);
}
static void nft_hash_destroy(const struct nft_set *set)
diff --git a/net/netfilter/nft_set_rbtree.c b/net/netfilter/nft_set_rbtree.c
index 38b5bda242f8..36493a7cae88 100644
--- a/net/netfilter/nft_set_rbtree.c
+++ b/net/netfilter/nft_set_rbtree.c
@@ -266,7 +266,7 @@ static void nft_rbtree_destroy(const struct nft_set *set)
while ((node = priv->root.rb_node) != NULL) {
rb_erase(node, &priv->root);
rbe = rb_entry(node, struct nft_rbtree_elem, node);
- nft_set_elem_destroy(set, rbe);
+ nft_set_elem_destroy(set, rbe, true);
}
}
diff --git a/net/netfilter/x_tables.c b/net/netfilter/x_tables.c
index e0aa7c1d0224..fc4977456c30 100644
--- a/net/netfilter/x_tables.c
+++ b/net/netfilter/x_tables.c
@@ -1513,7 +1513,7 @@ xt_hook_ops_alloc(const struct xt_table *table, nf_hookfn *fn)
if (!num_hooks)
return ERR_PTR(-EINVAL);
- ops = kmalloc(sizeof(*ops) * num_hooks, GFP_KERNEL);
+ ops = kcalloc(num_hooks, sizeof(*ops), GFP_KERNEL);
if (ops == NULL)
return ERR_PTR(-ENOMEM);
diff --git a/net/netfilter/xt_NFLOG.c b/net/netfilter/xt_NFLOG.c
index 018eed7e1ff1..8668a5c18dc3 100644
--- a/net/netfilter/xt_NFLOG.c
+++ b/net/netfilter/xt_NFLOG.c
@@ -32,6 +32,7 @@ nflog_tg(struct sk_buff *skb, const struct xt_action_param *par)
li.u.ulog.copy_len = info->len;
li.u.ulog.group = info->group;
li.u.ulog.qthreshold = info->threshold;
+ li.u.ulog.flags = 0;
if (info->flags & XT_NFLOG_F_COPY_LEN)
li.u.ulog.flags |= NF_LOG_F_COPY_LEN;
diff --git a/net/netfilter/xt_connmark.c b/net/netfilter/xt_connmark.c
index 69f78e96fdb4..b83e158e116a 100644
--- a/net/netfilter/xt_connmark.c
+++ b/net/netfilter/xt_connmark.c
@@ -44,7 +44,7 @@ connmark_tg(struct sk_buff *skb, const struct xt_action_param *par)
u_int32_t newmark;
ct = nf_ct_get(skb, &ctinfo);
- if (ct == NULL)
+ if (ct == NULL || nf_ct_is_untracked(ct))
return XT_CONTINUE;
switch (info->mode) {
@@ -97,7 +97,7 @@ connmark_mt(const struct sk_buff *skb, struct xt_action_param *par)
const struct nf_conn *ct;
ct = nf_ct_get(skb, &ctinfo);
- if (ct == NULL)
+ if (ct == NULL || nf_ct_is_untracked(ct))
return false;
return ((ct->mark & info->mask) == info->mark) ^ info->invert;
diff --git a/net/netfilter/xt_hashlimit.c b/net/netfilter/xt_hashlimit.c
index 2fab0c65aa94..b89b688e9d01 100644
--- a/net/netfilter/xt_hashlimit.c
+++ b/net/netfilter/xt_hashlimit.c
@@ -431,7 +431,7 @@ static void htable_put(struct xt_hashlimit_htable *hinfo)
CREDITS_PER_JIFFY*HZ*60*60*24 < 2^32 ie.
*/
#define MAX_CPJ_v1 (0xFFFFFFFF / (HZ*60*60*24))
-#define MAX_CPJ (0xFFFFFFFFFFFFFFFF / (HZ*60*60*24))
+#define MAX_CPJ (0xFFFFFFFFFFFFFFFFULL / (HZ*60*60*24))
/* Repeated shift and or gives us all 1s, final shift and add 1 gives
* us the power of 2 below the theoretical max, so GCC simply does a
@@ -473,7 +473,7 @@ static u64 user2credits(u64 user, int revision)
return div64_u64(user * HZ * CREDITS_PER_JIFFY_v1,
XT_HASHLIMIT_SCALE);
} else {
- if (user > 0xFFFFFFFFFFFFFFFF / (HZ*CREDITS_PER_JIFFY))
+ if (user > 0xFFFFFFFFFFFFFFFFULL / (HZ*CREDITS_PER_JIFFY))
return div64_u64(user, XT_HASHLIMIT_SCALE_v2)
* HZ * CREDITS_PER_JIFFY;
diff --git a/net/netfilter/xt_ipcomp.c b/net/netfilter/xt_ipcomp.c
index 89d53104c6b3..000e70377f85 100644
--- a/net/netfilter/xt_ipcomp.c
+++ b/net/netfilter/xt_ipcomp.c
@@ -26,6 +26,8 @@
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Fan Du <fan.du@windriver.com>");
MODULE_DESCRIPTION("Xtables: IPv4/6 IPsec-IPComp SPI match");
+MODULE_ALIAS("ipt_ipcomp");
+MODULE_ALIAS("ip6t_ipcomp");
/* Returns 1 if the spi is matched by the range, 0 otherwise */
static inline bool
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
index 62bea4591054..602e5ebe9db3 100644
--- a/net/netlink/af_netlink.c
+++ b/net/netlink/af_netlink.c
@@ -322,14 +322,11 @@ static void netlink_skb_set_owner_r(struct sk_buff *skb, struct sock *sk)
sk_mem_charge(sk, skb->truesize);
}
-static void netlink_sock_destruct(struct sock *sk)
+static void __netlink_sock_destruct(struct sock *sk)
{
struct netlink_sock *nlk = nlk_sk(sk);
if (nlk->cb_running) {
- if (nlk->cb.done)
- nlk->cb.done(&nlk->cb);
-
module_put(nlk->cb.module);
kfree_skb(nlk->cb.skb);
}
@@ -346,6 +343,28 @@ static void netlink_sock_destruct(struct sock *sk)
WARN_ON(nlk_sk(sk)->groups);
}
+static void netlink_sock_destruct_work(struct work_struct *work)
+{
+ struct netlink_sock *nlk = container_of(work, struct netlink_sock,
+ work);
+
+ nlk->cb.done(&nlk->cb);
+ __netlink_sock_destruct(&nlk->sk);
+}
+
+static void netlink_sock_destruct(struct sock *sk)
+{
+ struct netlink_sock *nlk = nlk_sk(sk);
+
+ if (nlk->cb_running && nlk->cb.done) {
+ INIT_WORK(&nlk->work, netlink_sock_destruct_work);
+ schedule_work(&nlk->work);
+ return;
+ }
+
+ __netlink_sock_destruct(sk);
+}
+
/* This lock without WQ_FLAG_EXCLUSIVE is good on UP and it is _very_ bad on
* SMP. Look, when several writers sleep and reader wakes them up, all but one
* immediately hit write lock and grab all the cpus. Exclusive sleep solves
diff --git a/net/netlink/af_netlink.h b/net/netlink/af_netlink.h
index 3cfd6cc60504..4fdb38318977 100644
--- a/net/netlink/af_netlink.h
+++ b/net/netlink/af_netlink.h
@@ -3,6 +3,7 @@
#include <linux/rhashtable.h>
#include <linux/atomic.h>
+#include <linux/workqueue.h>
#include <net/sock.h>
#define NLGRPSZ(x) (ALIGN(x, sizeof(unsigned long) * 8) / 8)
@@ -33,6 +34,7 @@ struct netlink_sock {
struct rhash_head node;
struct rcu_head rcu;
+ struct work_struct work;
};
static inline struct netlink_sock *nlk_sk(struct sock *sk)
diff --git a/net/netlink/diag.c b/net/netlink/diag.c
index b2f0e986a6f4..a5546249fb10 100644
--- a/net/netlink/diag.c
+++ b/net/netlink/diag.c
@@ -178,11 +178,8 @@ static int netlink_diag_dump(struct sk_buff *skb, struct netlink_callback *cb)
}
cb->args[1] = i;
} else {
- if (req->sdiag_protocol >= MAX_LINKS) {
- read_unlock(&nl_table_lock);
- rcu_read_unlock();
+ if (req->sdiag_protocol >= MAX_LINKS)
return -ENOENT;
- }
err = __netlink_diag_dump(skb, cb, req->sdiag_protocol, s_num);
}
diff --git a/net/netlink/genetlink.c b/net/netlink/genetlink.c
index 23cc12639ba7..49c28e8ef01b 100644
--- a/net/netlink/genetlink.c
+++ b/net/netlink/genetlink.c
@@ -404,7 +404,7 @@ int __genl_register_family(struct genl_family *family)
err = genl_validate_assign_mc_groups(family);
if (err)
- goto errout_locked;
+ goto errout_free;
list_add_tail(&family->family_list, genl_family_chain(family->id));
genl_unlock_all();
@@ -417,6 +417,8 @@ int __genl_register_family(struct genl_family *family)
return 0;
+errout_free:
+ kfree(family->attrbuf);
errout_locked:
genl_unlock_all();
errout:
diff --git a/net/openvswitch/conntrack.c b/net/openvswitch/conntrack.c
index 31045ef44a82..fecefa2dc94e 100644
--- a/net/openvswitch/conntrack.c
+++ b/net/openvswitch/conntrack.c
@@ -370,8 +370,11 @@ static int handle_fragments(struct net *net, struct sw_flow_key *key,
skb_orphan(skb);
memset(IP6CB(skb), 0, sizeof(struct inet6_skb_parm));
err = nf_ct_frag6_gather(net, skb, user);
- if (err)
+ if (err) {
+ if (err != -EINPROGRESS)
+ kfree_skb(skb);
return err;
+ }
key->ip.proto = ipv6_hdr(skb)->nexthdr;
ovs_cb.mru = IP6CB(skb)->frag_max_size;
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index 11db0d619c00..dd2332390c45 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -250,7 +250,7 @@ static void __fanout_link(struct sock *sk, struct packet_sock *po);
static int packet_direct_xmit(struct sk_buff *skb)
{
struct net_device *dev = skb->dev;
- netdev_features_t features;
+ struct sk_buff *orig_skb = skb;
struct netdev_queue *txq;
int ret = NETDEV_TX_BUSY;
@@ -258,9 +258,8 @@ static int packet_direct_xmit(struct sk_buff *skb)
!netif_carrier_ok(dev)))
goto drop;
- features = netif_skb_features(skb);
- if (skb_needs_linearize(skb, features) &&
- __skb_linearize(skb))
+ skb = validate_xmit_skb_list(skb, dev);
+ if (skb != orig_skb)
goto drop;
txq = skb_get_tx_queue(dev, skb);
@@ -280,7 +279,7 @@ static int packet_direct_xmit(struct sk_buff *skb)
return ret;
drop:
atomic_long_inc(&dev->tx_dropped);
- kfree_skb(skb);
+ kfree_skb_list(skb);
return NET_XMIT_DROP;
}
@@ -3649,19 +3648,25 @@ packet_setsockopt(struct socket *sock, int level, int optname, char __user *optv
if (optlen != sizeof(val))
return -EINVAL;
- if (po->rx_ring.pg_vec || po->tx_ring.pg_vec)
- return -EBUSY;
if (copy_from_user(&val, optval, sizeof(val)))
return -EFAULT;
switch (val) {
case TPACKET_V1:
case TPACKET_V2:
case TPACKET_V3:
- po->tp_version = val;
- return 0;
+ break;
default:
return -EINVAL;
}
+ lock_sock(sk);
+ if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) {
+ ret = -EBUSY;
+ } else {
+ po->tp_version = val;
+ ret = 0;
+ }
+ release_sock(sk);
+ return ret;
}
case PACKET_RESERVE:
{
@@ -4165,6 +4170,7 @@ static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
/* Added to avoid minimal code churn */
struct tpacket_req *req = &req_u->req;
+ lock_sock(sk);
/* Opening a Tx-ring is NOT supported in TPACKET_V3 */
if (!closing && tx_ring && (po->tp_version > TPACKET_V2)) {
net_warn_ratelimited("Tx-ring is not supported.\n");
@@ -4246,7 +4252,6 @@ static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
goto out;
}
- lock_sock(sk);
/* Detach socket from network */
spin_lock(&po->bind_lock);
@@ -4295,11 +4300,11 @@ static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
if (!tx_ring)
prb_shutdown_retire_blk_timer(po, rb_queue);
}
- release_sock(sk);
if (pg_vec)
free_pg_vec(pg_vec, order, req->tp_block_nr);
out:
+ release_sock(sk);
return err;
}
diff --git a/net/rds/Makefile b/net/rds/Makefile
index 0e72bec1529f..56c7d27eefee 100644
--- a/net/rds/Makefile
+++ b/net/rds/Makefile
@@ -13,5 +13,5 @@ obj-$(CONFIG_RDS_TCP) += rds_tcp.o
rds_tcp-y := tcp.o tcp_connect.o tcp_listen.o tcp_recv.o \
tcp_send.o tcp_stats.o
-ccflags-$(CONFIG_RDS_DEBUG) := -DDEBUG
+ccflags-$(CONFIG_RDS_DEBUG) := -DRDS_DEBUG
diff --git a/net/rds/rds.h b/net/rds/rds.h
index fd0bccb2f9f9..67ba67c058b1 100644
--- a/net/rds/rds.h
+++ b/net/rds/rds.h
@@ -33,7 +33,7 @@
#define KERNEL_HAS_ATOMIC64
#endif
-#ifdef DEBUG
+#ifdef RDS_DEBUG
#define rdsdebug(fmt, args...) pr_debug("%s(): " fmt, __func__ , ##args)
#else
/* sigh, pr_debug() causes unused variable warnings */
diff --git a/net/rds/tcp.c b/net/rds/tcp.c
index fcddacc92e01..20e2923dc827 100644
--- a/net/rds/tcp.c
+++ b/net/rds/tcp.c
@@ -659,6 +659,8 @@ out_recv:
out_pernet:
unregister_pernet_subsys(&rds_tcp_net_ops);
out_slab:
+ if (unregister_netdevice_notifier(&rds_tcp_dev_notifier))
+ pr_warn("could not unregister rds_tcp_dev_notifier\n");
kmem_cache_destroy(rds_tcp_conn_slab);
out:
return ret;
diff --git a/net/rxrpc/call_object.c b/net/rxrpc/call_object.c
index 4353a29f3b57..1ed18d8c9c9f 100644
--- a/net/rxrpc/call_object.c
+++ b/net/rxrpc/call_object.c
@@ -276,7 +276,7 @@ struct rxrpc_call *rxrpc_new_client_call(struct rxrpc_sock *rx,
goto error;
trace_rxrpc_call(call, rxrpc_call_connected, atomic_read(&call->usage),
- here, ERR_PTR(ret));
+ here, NULL);
spin_lock_bh(&call->conn->params.peer->lock);
hlist_add_head(&call->error_link,
diff --git a/net/rxrpc/peer_object.c b/net/rxrpc/peer_object.c
index 941b724d523b..862eea6b266c 100644
--- a/net/rxrpc/peer_object.c
+++ b/net/rxrpc/peer_object.c
@@ -193,8 +193,8 @@ static void rxrpc_assess_MTU_size(struct rxrpc_peer *peer)
fl6->fl6_dport = htons(7001);
fl6->fl6_sport = htons(7000);
dst = ip6_route_output(&init_net, NULL, fl6);
- if (IS_ERR(dst)) {
- _leave(" [route err %ld]", PTR_ERR(dst));
+ if (dst->error) {
+ _leave(" [route err %d]", dst->error);
return;
}
break;
diff --git a/net/sched/act_api.c b/net/sched/act_api.c
index a512b18c0088..f893d180da1c 100644
--- a/net/sched/act_api.c
+++ b/net/sched/act_api.c
@@ -1028,8 +1028,7 @@ static struct nlattr *find_dump_kind(const struct nlmsghdr *n)
if (tb[1] == NULL)
return NULL;
- if (nla_parse(tb2, TCA_ACT_MAX, nla_data(tb[1]),
- nla_len(tb[1]), NULL) < 0)
+ if (nla_parse_nested(tb2, TCA_ACT_MAX, tb[1], NULL) < 0)
return NULL;
kind = tb2[TCA_ACT_KIND];
diff --git a/net/sched/act_mirred.c b/net/sched/act_mirred.c
index 667dc382df82..6b07fba5770b 100644
--- a/net/sched/act_mirred.c
+++ b/net/sched/act_mirred.c
@@ -207,8 +207,11 @@ out:
static void tcf_stats_update(struct tc_action *a, u64 bytes, u32 packets,
u64 lastuse)
{
- tcf_lastuse_update(&a->tcfa_tm);
+ struct tcf_mirred *m = to_mirred(a);
+ struct tcf_t *tm = &m->tcf_tm;
+
_bstats_cpu_update(this_cpu_ptr(a->cpu_bstats), bytes, packets);
+ tm->lastuse = lastuse;
}
static int tcf_mirred_dump(struct sk_buff *skb, struct tc_action *a, int bind,
diff --git a/net/sched/act_pedit.c b/net/sched/act_pedit.c
index b54d56d4959b..cf9b2fe8eac6 100644
--- a/net/sched/act_pedit.c
+++ b/net/sched/act_pedit.c
@@ -108,6 +108,17 @@ static void tcf_pedit_cleanup(struct tc_action *a, int bind)
kfree(keys);
}
+static bool offset_valid(struct sk_buff *skb, int offset)
+{
+ if (offset > 0 && offset > skb->len)
+ return false;
+
+ if (offset < 0 && -offset > skb_headroom(skb))
+ return false;
+
+ return true;
+}
+
static int tcf_pedit(struct sk_buff *skb, const struct tc_action *a,
struct tcf_result *res)
{
@@ -134,6 +145,11 @@ static int tcf_pedit(struct sk_buff *skb, const struct tc_action *a,
if (tkey->offmask) {
char *d, _d;
+ if (!offset_valid(skb, off + tkey->at)) {
+ pr_info("tc filter pedit 'at' offset %d out of bounds\n",
+ off + tkey->at);
+ goto bad;
+ }
d = skb_header_pointer(skb, off + tkey->at, 1,
&_d);
if (!d)
@@ -146,10 +162,10 @@ static int tcf_pedit(struct sk_buff *skb, const struct tc_action *a,
" offset must be on 32 bit boundaries\n");
goto bad;
}
- if (offset > 0 && offset > skb->len) {
- pr_info("tc filter pedit"
- " offset %d can't exceed pkt length %d\n",
- offset, skb->len);
+
+ if (!offset_valid(skb, off + offset)) {
+ pr_info("tc filter pedit offset %d out of bounds\n",
+ offset);
goto bad;
}
diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c
index 2ee29a3375f6..b05d4a2155b0 100644
--- a/net/sched/cls_api.c
+++ b/net/sched/cls_api.c
@@ -345,7 +345,8 @@ replay:
if (err == 0) {
struct tcf_proto *next = rtnl_dereference(tp->next);
- tfilter_notify(net, skb, n, tp, fh,
+ tfilter_notify(net, skb, n, tp,
+ t->tcm_handle,
RTM_DELTFILTER, false);
if (tcf_destroy(tp, false))
RCU_INIT_POINTER(*back, next);
@@ -429,7 +430,8 @@ static int tfilter_notify(struct net *net, struct sk_buff *oskb,
if (!skb)
return -ENOBUFS;
- if (tcf_fill_node(net, skb, tp, fh, portid, n->nlmsg_seq, 0, event) <= 0) {
+ if (tcf_fill_node(net, skb, tp, fh, portid, n->nlmsg_seq,
+ n->nlmsg_flags, event) <= 0) {
kfree_skb(skb);
return -EINVAL;
}
diff --git a/net/sched/cls_basic.c b/net/sched/cls_basic.c
index eb219b78cd49..5877f6061b57 100644
--- a/net/sched/cls_basic.c
+++ b/net/sched/cls_basic.c
@@ -62,9 +62,6 @@ static unsigned long basic_get(struct tcf_proto *tp, u32 handle)
struct basic_head *head = rtnl_dereference(tp->root);
struct basic_filter *f;
- if (head == NULL)
- return 0UL;
-
list_for_each_entry(f, &head->flist, link) {
if (f->handle == handle) {
l = (unsigned long) f;
@@ -109,7 +106,6 @@ static bool basic_destroy(struct tcf_proto *tp, bool force)
tcf_unbind_filter(tp, &f->res);
call_rcu(&f->rcu, basic_delete_filter);
}
- RCU_INIT_POINTER(tp->root, NULL);
kfree_rcu(head, rcu);
return true;
}
diff --git a/net/sched/cls_bpf.c b/net/sched/cls_bpf.c
index bb1d5a487081..0a47ba5e6109 100644
--- a/net/sched/cls_bpf.c
+++ b/net/sched/cls_bpf.c
@@ -292,7 +292,6 @@ static bool cls_bpf_destroy(struct tcf_proto *tp, bool force)
call_rcu(&prog->rcu, __cls_bpf_delete_prog);
}
- RCU_INIT_POINTER(tp->root, NULL);
kfree_rcu(head, rcu);
return true;
}
@@ -303,9 +302,6 @@ static unsigned long cls_bpf_get(struct tcf_proto *tp, u32 handle)
struct cls_bpf_prog *prog;
unsigned long ret = 0UL;
- if (head == NULL)
- return 0UL;
-
list_for_each_entry(prog, &head->plist, link) {
if (prog->handle == handle) {
ret = (unsigned long) prog;
diff --git a/net/sched/cls_cgroup.c b/net/sched/cls_cgroup.c
index 85233c470035..c1f20077837f 100644
--- a/net/sched/cls_cgroup.c
+++ b/net/sched/cls_cgroup.c
@@ -137,11 +137,10 @@ static bool cls_cgroup_destroy(struct tcf_proto *tp, bool force)
if (!force)
return false;
-
- if (head) {
- RCU_INIT_POINTER(tp->root, NULL);
+ /* Head can still be NULL due to cls_cgroup_init(). */
+ if (head)
call_rcu(&head->rcu, cls_cgroup_destroy_rcu);
- }
+
return true;
}
diff --git a/net/sched/cls_flow.c b/net/sched/cls_flow.c
index e39672394c7b..6575aba87630 100644
--- a/net/sched/cls_flow.c
+++ b/net/sched/cls_flow.c
@@ -596,7 +596,6 @@ static bool flow_destroy(struct tcf_proto *tp, bool force)
list_del_rcu(&f->list);
call_rcu(&f->rcu, flow_destroy_filter);
}
- RCU_INIT_POINTER(tp->root, NULL);
kfree_rcu(head, rcu);
return true;
}
diff --git a/net/sched/cls_flower.c b/net/sched/cls_flower.c
index f6f40fba599b..904442421db3 100644
--- a/net/sched/cls_flower.c
+++ b/net/sched/cls_flower.c
@@ -13,6 +13,7 @@
#include <linux/init.h>
#include <linux/module.h>
#include <linux/rhashtable.h>
+#include <linux/workqueue.h>
#include <linux/if_ether.h>
#include <linux/in6.h>
@@ -64,7 +65,10 @@ struct cls_fl_head {
bool mask_assigned;
struct list_head filters;
struct rhashtable_params ht_params;
- struct rcu_head rcu;
+ union {
+ struct work_struct work;
+ struct rcu_head rcu;
+ };
};
struct cls_fl_filter {
@@ -269,6 +273,24 @@ static void fl_hw_update_stats(struct tcf_proto *tp, struct cls_fl_filter *f)
dev->netdev_ops->ndo_setup_tc(dev, tp->q->handle, tp->protocol, &tc);
}
+static void fl_destroy_sleepable(struct work_struct *work)
+{
+ struct cls_fl_head *head = container_of(work, struct cls_fl_head,
+ work);
+ if (head->mask_assigned)
+ rhashtable_destroy(&head->ht);
+ kfree(head);
+ module_put(THIS_MODULE);
+}
+
+static void fl_destroy_rcu(struct rcu_head *rcu)
+{
+ struct cls_fl_head *head = container_of(rcu, struct cls_fl_head, rcu);
+
+ INIT_WORK(&head->work, fl_destroy_sleepable);
+ schedule_work(&head->work);
+}
+
static bool fl_destroy(struct tcf_proto *tp, bool force)
{
struct cls_fl_head *head = rtnl_dereference(tp->root);
@@ -282,10 +304,9 @@ static bool fl_destroy(struct tcf_proto *tp, bool force)
list_del_rcu(&f->list);
call_rcu(&f->rcu, fl_destroy_filter);
}
- RCU_INIT_POINTER(tp->root, NULL);
- if (head->mask_assigned)
- rhashtable_destroy(&head->ht);
- kfree_rcu(head, rcu);
+
+ __module_get(THIS_MODULE);
+ call_rcu(&head->rcu, fl_destroy_rcu);
return true;
}
@@ -711,8 +732,9 @@ static int fl_change(struct net *net, struct sk_buff *in_skb,
goto errout;
if (fold) {
- rhashtable_remove_fast(&head->ht, &fold->ht_node,
- head->ht_params);
+ if (!tc_skip_sw(fold->flags))
+ rhashtable_remove_fast(&head->ht, &fold->ht_node,
+ head->ht_params);
fl_hw_destroy_filter(tp, (unsigned long)fold);
}
@@ -739,8 +761,9 @@ static int fl_delete(struct tcf_proto *tp, unsigned long arg)
struct cls_fl_head *head = rtnl_dereference(tp->root);
struct cls_fl_filter *f = (struct cls_fl_filter *) arg;
- rhashtable_remove_fast(&head->ht, &f->ht_node,
- head->ht_params);
+ if (!tc_skip_sw(f->flags))
+ rhashtable_remove_fast(&head->ht, &f->ht_node,
+ head->ht_params);
list_del_rcu(&f->list);
fl_hw_destroy_filter(tp, (unsigned long)f);
tcf_unbind_filter(tp, &f->res);
diff --git a/net/sched/cls_matchall.c b/net/sched/cls_matchall.c
index 25927b6c4436..f935429bd5ef 100644
--- a/net/sched/cls_matchall.c
+++ b/net/sched/cls_matchall.c
@@ -114,7 +114,6 @@ static bool mall_destroy(struct tcf_proto *tp, bool force)
call_rcu(&f->rcu, mall_destroy_filter);
}
- RCU_INIT_POINTER(tp->root, NULL);
kfree_rcu(head, rcu);
return true;
}
diff --git a/net/sched/cls_rsvp.h b/net/sched/cls_rsvp.h
index 4f05a19fb073..322438fb3ffc 100644
--- a/net/sched/cls_rsvp.h
+++ b/net/sched/cls_rsvp.h
@@ -152,7 +152,8 @@ static int rsvp_classify(struct sk_buff *skb, const struct tcf_proto *tp,
return -1;
nhptr = ip_hdr(skb);
#endif
-
+ if (unlikely(!head))
+ return -1;
restart:
#if RSVP_DST_LEN == 4
diff --git a/net/sched/cls_tcindex.c b/net/sched/cls_tcindex.c
index 96144bdf30db..0751245a6ace 100644
--- a/net/sched/cls_tcindex.c
+++ b/net/sched/cls_tcindex.c
@@ -543,7 +543,6 @@ static bool tcindex_destroy(struct tcf_proto *tp, bool force)
walker.fn = tcindex_destroy_element;
tcindex_walk(tp, &walker);
- RCU_INIT_POINTER(tp->root, NULL);
call_rcu(&p->rcu, __tcindex_destroy);
return true;
}
diff --git a/net/sctp/input.c b/net/sctp/input.c
index a2ea1d1cc06a..a01a56ec8b8c 100644
--- a/net/sctp/input.c
+++ b/net/sctp/input.c
@@ -181,9 +181,10 @@ int sctp_rcv(struct sk_buff *skb)
* bound to another interface, via SO_BINDTODEVICE, treat it as OOTB
*/
if (sk->sk_bound_dev_if && (sk->sk_bound_dev_if != af->skb_iif(skb))) {
- if (asoc) {
- sctp_association_put(asoc);
+ if (transport) {
+ sctp_transport_put(transport);
asoc = NULL;
+ transport = NULL;
} else {
sctp_endpoint_put(ep);
ep = NULL;
@@ -269,8 +270,8 @@ int sctp_rcv(struct sk_buff *skb)
bh_unlock_sock(sk);
/* Release the asoc/ep ref we took in the lookup calls. */
- if (asoc)
- sctp_association_put(asoc);
+ if (transport)
+ sctp_transport_put(transport);
else
sctp_endpoint_put(ep);
@@ -283,8 +284,8 @@ discard_it:
discard_release:
/* Release the asoc/ep ref we took in the lookup calls. */
- if (asoc)
- sctp_association_put(asoc);
+ if (transport)
+ sctp_transport_put(transport);
else
sctp_endpoint_put(ep);
@@ -300,6 +301,7 @@ int sctp_backlog_rcv(struct sock *sk, struct sk_buff *skb)
{
struct sctp_chunk *chunk = SCTP_INPUT_CB(skb)->chunk;
struct sctp_inq *inqueue = &chunk->rcvr->inqueue;
+ struct sctp_transport *t = chunk->transport;
struct sctp_ep_common *rcvr = NULL;
int backloged = 0;
@@ -351,7 +353,7 @@ int sctp_backlog_rcv(struct sock *sk, struct sk_buff *skb)
done:
/* Release the refs we took in sctp_add_backlog */
if (SCTP_EP_TYPE_ASSOCIATION == rcvr->type)
- sctp_association_put(sctp_assoc(rcvr));
+ sctp_transport_put(t);
else if (SCTP_EP_TYPE_SOCKET == rcvr->type)
sctp_endpoint_put(sctp_ep(rcvr));
else
@@ -363,6 +365,7 @@ done:
static int sctp_add_backlog(struct sock *sk, struct sk_buff *skb)
{
struct sctp_chunk *chunk = SCTP_INPUT_CB(skb)->chunk;
+ struct sctp_transport *t = chunk->transport;
struct sctp_ep_common *rcvr = chunk->rcvr;
int ret;
@@ -373,7 +376,7 @@ static int sctp_add_backlog(struct sock *sk, struct sk_buff *skb)
* from us
*/
if (SCTP_EP_TYPE_ASSOCIATION == rcvr->type)
- sctp_association_hold(sctp_assoc(rcvr));
+ sctp_transport_hold(t);
else if (SCTP_EP_TYPE_SOCKET == rcvr->type)
sctp_endpoint_hold(sctp_ep(rcvr));
else
@@ -537,15 +540,15 @@ struct sock *sctp_err_lookup(struct net *net, int family, struct sk_buff *skb,
return sk;
out:
- sctp_association_put(asoc);
+ sctp_transport_put(transport);
return NULL;
}
/* Common cleanup code for icmp/icmpv6 error handler. */
-void sctp_err_finish(struct sock *sk, struct sctp_association *asoc)
+void sctp_err_finish(struct sock *sk, struct sctp_transport *t)
{
bh_unlock_sock(sk);
- sctp_association_put(asoc);
+ sctp_transport_put(t);
}
/*
@@ -641,7 +644,7 @@ void sctp_v4_err(struct sk_buff *skb, __u32 info)
}
out_unlock:
- sctp_err_finish(sk, asoc);
+ sctp_err_finish(sk, transport);
}
/*
@@ -952,11 +955,8 @@ static struct sctp_association *__sctp_lookup_association(
goto out;
asoc = t->asoc;
- sctp_association_hold(asoc);
*pt = t;
- sctp_transport_put(t);
-
out:
return asoc;
}
@@ -986,7 +986,7 @@ int sctp_has_association(struct net *net,
struct sctp_transport *transport;
if ((asoc = sctp_lookup_association(net, laddr, paddr, &transport))) {
- sctp_association_put(asoc);
+ sctp_transport_put(transport);
return 1;
}
@@ -1021,7 +1021,6 @@ static struct sctp_association *__sctp_rcv_init_lookup(struct net *net,
struct sctphdr *sh = sctp_hdr(skb);
union sctp_params params;
sctp_init_chunk_t *init;
- struct sctp_transport *transport;
struct sctp_af *af;
/*
@@ -1052,7 +1051,7 @@ static struct sctp_association *__sctp_rcv_init_lookup(struct net *net,
af->from_addr_param(paddr, params.addr, sh->source, 0);
- asoc = __sctp_lookup_association(net, laddr, paddr, &transport);
+ asoc = __sctp_lookup_association(net, laddr, paddr, transportp);
if (asoc)
return asoc;
}
diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c
index f473779e8b1c..176af3080a2b 100644
--- a/net/sctp/ipv6.c
+++ b/net/sctp/ipv6.c
@@ -198,7 +198,7 @@ static void sctp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
}
out_unlock:
- sctp_err_finish(sk, asoc);
+ sctp_err_finish(sk, transport);
out:
if (likely(idev != NULL))
in6_dev_put(idev);
diff --git a/net/sctp/output.c b/net/sctp/output.c
index 2a5c1896d18f..6cb0df859195 100644
--- a/net/sctp/output.c
+++ b/net/sctp/output.c
@@ -418,6 +418,7 @@ int sctp_packet_transmit(struct sctp_packet *packet, gfp_t gfp)
__u8 has_data = 0;
int gso = 0;
int pktcount = 0;
+ int auth_len = 0;
struct dst_entry *dst;
unsigned char *auth = NULL; /* pointer to auth in skb data */
@@ -510,7 +511,12 @@ int sctp_packet_transmit(struct sctp_packet *packet, gfp_t gfp)
list_for_each_entry(chunk, &packet->chunk_list, list) {
int padded = SCTP_PAD4(chunk->skb->len);
- if (pkt_size + padded > tp->pathmtu)
+ if (chunk == packet->auth)
+ auth_len = padded;
+ else if (auth_len + padded + packet->overhead >
+ tp->pathmtu)
+ goto nomem;
+ else if (pkt_size + padded > tp->pathmtu)
break;
pkt_size += padded;
}
diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c
index 026e3bca4a94..8ec20a64a3f8 100644
--- a/net/sctp/sm_statefuns.c
+++ b/net/sctp/sm_statefuns.c
@@ -3422,6 +3422,12 @@ sctp_disposition_t sctp_sf_ootb(struct net *net,
return sctp_sf_violation_chunklen(net, ep, asoc, type, arg,
commands);
+ /* Report violation if chunk len overflows */
+ ch_end = ((__u8 *)ch) + SCTP_PAD4(ntohs(ch->length));
+ if (ch_end > skb_tail_pointer(skb))
+ return sctp_sf_violation_chunklen(net, ep, asoc, type, arg,
+ commands);
+
/* Now that we know we at least have a chunk header,
* do things that are type appropriate.
*/
@@ -3453,12 +3459,6 @@ sctp_disposition_t sctp_sf_ootb(struct net *net,
}
}
- /* Report violation if chunk len overflows */
- ch_end = ((__u8 *)ch) + SCTP_PAD4(ntohs(ch->length));
- if (ch_end > skb_tail_pointer(skb))
- return sctp_sf_violation_chunklen(net, ep, asoc, type, arg,
- commands);
-
ch = (sctp_chunkhdr_t *) ch_end;
} while (ch_end < skb_tail_pointer(skb));
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index fb02c7033307..f23ad913dc7a 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -1214,9 +1214,12 @@ static int __sctp_connect(struct sock *sk,
timeo = sock_sndtimeo(sk, f_flags & O_NONBLOCK);
- err = sctp_wait_for_connect(asoc, &timeo);
- if ((err == 0 || err == -EINPROGRESS) && assoc_id)
+ if (assoc_id)
*assoc_id = asoc->assoc_id;
+ err = sctp_wait_for_connect(asoc, &timeo);
+ /* Note: the asoc may be freed after the return of
+ * sctp_wait_for_connect.
+ */
/* Don't free association on exit. */
asoc = NULL;
@@ -4282,19 +4285,18 @@ static void sctp_shutdown(struct sock *sk, int how)
{
struct net *net = sock_net(sk);
struct sctp_endpoint *ep;
- struct sctp_association *asoc;
if (!sctp_style(sk, TCP))
return;
- if (how & SEND_SHUTDOWN) {
+ ep = sctp_sk(sk)->ep;
+ if (how & SEND_SHUTDOWN && !list_empty(&ep->asocs)) {
+ struct sctp_association *asoc;
+
sk->sk_state = SCTP_SS_CLOSING;
- ep = sctp_sk(sk)->ep;
- if (!list_empty(&ep->asocs)) {
- asoc = list_entry(ep->asocs.next,
- struct sctp_association, asocs);
- sctp_primitive_SHUTDOWN(net, asoc, NULL);
- }
+ asoc = list_entry(ep->asocs.next,
+ struct sctp_association, asocs);
+ sctp_primitive_SHUTDOWN(net, asoc, NULL);
}
}
@@ -4480,12 +4482,9 @@ int sctp_transport_lookup_process(int (*cb)(struct sctp_transport *, void *),
if (!transport || !sctp_transport_hold(transport))
goto out;
- sctp_association_hold(transport->asoc);
- sctp_transport_put(transport);
-
rcu_read_unlock();
err = cb(transport, p);
- sctp_association_put(transport->asoc);
+ sctp_transport_put(transport);
out:
return err;
@@ -4687,7 +4686,7 @@ static int sctp_getsockopt_disable_fragments(struct sock *sk, int len,
static int sctp_getsockopt_events(struct sock *sk, int len, char __user *optval,
int __user *optlen)
{
- if (len <= 0)
+ if (len == 0)
return -EINVAL;
if (len > sizeof(struct sctp_event_subscribe))
len = sizeof(struct sctp_event_subscribe);
@@ -6430,6 +6429,9 @@ static int sctp_getsockopt(struct sock *sk, int level, int optname,
if (get_user(len, optlen))
return -EFAULT;
+ if (len < 0)
+ return -EINVAL;
+
lock_sock(sk);
switch (optname) {
diff --git a/net/socket.c b/net/socket.c
index 5a9bf5ee2464..73dc69f9681e 100644
--- a/net/socket.c
+++ b/net/socket.c
@@ -341,8 +341,23 @@ static const struct xattr_handler sockfs_xattr_handler = {
.get = sockfs_xattr_get,
};
+static int sockfs_security_xattr_set(const struct xattr_handler *handler,
+ struct dentry *dentry, struct inode *inode,
+ const char *suffix, const void *value,
+ size_t size, int flags)
+{
+ /* Handled by LSM. */
+ return -EAGAIN;
+}
+
+static const struct xattr_handler sockfs_security_xattr_handler = {
+ .prefix = XATTR_SECURITY_PREFIX,
+ .set = sockfs_security_xattr_set,
+};
+
static const struct xattr_handler *sockfs_xattr_handlers[] = {
&sockfs_xattr_handler,
+ &sockfs_security_xattr_handler,
NULL
};
@@ -2038,6 +2053,8 @@ int __sys_sendmmsg(int fd, struct mmsghdr __user *mmsg, unsigned int vlen,
if (err)
break;
++datagrams;
+ if (msg_data_left(&msg_sys))
+ break;
cond_resched();
}
diff --git a/net/sunrpc/auth_gss/auth_gss.c b/net/sunrpc/auth_gss/auth_gss.c
index d8bd97a5a7c9..3dfd769dc5b5 100644
--- a/net/sunrpc/auth_gss/auth_gss.c
+++ b/net/sunrpc/auth_gss/auth_gss.c
@@ -1616,7 +1616,7 @@ gss_validate(struct rpc_task *task, __be32 *p)
{
struct rpc_cred *cred = task->tk_rqstp->rq_cred;
struct gss_cl_ctx *ctx = gss_cred_get_ctx(cred);
- __be32 seq;
+ __be32 *seq = NULL;
struct kvec iov;
struct xdr_buf verf_buf;
struct xdr_netobj mic;
@@ -1631,9 +1631,12 @@ gss_validate(struct rpc_task *task, __be32 *p)
goto out_bad;
if (flav != RPC_AUTH_GSS)
goto out_bad;
- seq = htonl(task->tk_rqstp->rq_seqno);
- iov.iov_base = &seq;
- iov.iov_len = sizeof(seq);
+ seq = kmalloc(4, GFP_NOFS);
+ if (!seq)
+ goto out_bad;
+ *seq = htonl(task->tk_rqstp->rq_seqno);
+ iov.iov_base = seq;
+ iov.iov_len = 4;
xdr_buf_from_iov(&iov, &verf_buf);
mic.data = (u8 *)p;
mic.len = len;
@@ -1653,11 +1656,13 @@ gss_validate(struct rpc_task *task, __be32 *p)
gss_put_ctx(ctx);
dprintk("RPC: %5u %s: gss_verify_mic succeeded.\n",
task->tk_pid, __func__);
+ kfree(seq);
return p + XDR_QUADLEN(len);
out_bad:
gss_put_ctx(ctx);
dprintk("RPC: %5u %s failed ret %ld.\n", task->tk_pid, __func__,
PTR_ERR(ret));
+ kfree(seq);
return ret;
}
diff --git a/net/sunrpc/auth_gss/gss_krb5_crypto.c b/net/sunrpc/auth_gss/gss_krb5_crypto.c
index 244245bcbbd2..90115ceefd49 100644
--- a/net/sunrpc/auth_gss/gss_krb5_crypto.c
+++ b/net/sunrpc/auth_gss/gss_krb5_crypto.c
@@ -166,8 +166,8 @@ make_checksum_hmac_md5(struct krb5_ctx *kctx, char *header, int hdrlen,
unsigned int usage, struct xdr_netobj *cksumout)
{
struct scatterlist sg[1];
- int err;
- u8 checksumdata[GSS_KRB5_MAX_CKSUM_LEN];
+ int err = -1;
+ u8 *checksumdata;
u8 rc4salt[4];
struct crypto_ahash *md5;
struct crypto_ahash *hmac_md5;
@@ -187,23 +187,22 @@ make_checksum_hmac_md5(struct krb5_ctx *kctx, char *header, int hdrlen,
return GSS_S_FAILURE;
}
+ checksumdata = kmalloc(GSS_KRB5_MAX_CKSUM_LEN, GFP_NOFS);
+ if (!checksumdata)
+ return GSS_S_FAILURE;
+
md5 = crypto_alloc_ahash("md5", 0, CRYPTO_ALG_ASYNC);
if (IS_ERR(md5))
- return GSS_S_FAILURE;
+ goto out_free_cksum;
hmac_md5 = crypto_alloc_ahash(kctx->gk5e->cksum_name, 0,
CRYPTO_ALG_ASYNC);
- if (IS_ERR(hmac_md5)) {
- crypto_free_ahash(md5);
- return GSS_S_FAILURE;
- }
+ if (IS_ERR(hmac_md5))
+ goto out_free_md5;
req = ahash_request_alloc(md5, GFP_KERNEL);
- if (!req) {
- crypto_free_ahash(hmac_md5);
- crypto_free_ahash(md5);
- return GSS_S_FAILURE;
- }
+ if (!req)
+ goto out_free_hmac_md5;
ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_SLEEP, NULL, NULL);
@@ -232,11 +231,8 @@ make_checksum_hmac_md5(struct krb5_ctx *kctx, char *header, int hdrlen,
ahash_request_free(req);
req = ahash_request_alloc(hmac_md5, GFP_KERNEL);
- if (!req) {
- crypto_free_ahash(hmac_md5);
- crypto_free_ahash(md5);
- return GSS_S_FAILURE;
- }
+ if (!req)
+ goto out_free_hmac_md5;
ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_SLEEP, NULL, NULL);
@@ -258,8 +254,12 @@ make_checksum_hmac_md5(struct krb5_ctx *kctx, char *header, int hdrlen,
cksumout->len = kctx->gk5e->cksumlength;
out:
ahash_request_free(req);
- crypto_free_ahash(md5);
+out_free_hmac_md5:
crypto_free_ahash(hmac_md5);
+out_free_md5:
+ crypto_free_ahash(md5);
+out_free_cksum:
+ kfree(checksumdata);
return err ? GSS_S_FAILURE : 0;
}
@@ -276,8 +276,8 @@ make_checksum(struct krb5_ctx *kctx, char *header, int hdrlen,
struct crypto_ahash *tfm;
struct ahash_request *req;
struct scatterlist sg[1];
- int err;
- u8 checksumdata[GSS_KRB5_MAX_CKSUM_LEN];
+ int err = -1;
+ u8 *checksumdata;
unsigned int checksumlen;
if (kctx->gk5e->ctype == CKSUMTYPE_HMAC_MD5_ARCFOUR)
@@ -291,15 +291,17 @@ make_checksum(struct krb5_ctx *kctx, char *header, int hdrlen,
return GSS_S_FAILURE;
}
+ checksumdata = kmalloc(GSS_KRB5_MAX_CKSUM_LEN, GFP_NOFS);
+ if (checksumdata == NULL)
+ return GSS_S_FAILURE;
+
tfm = crypto_alloc_ahash(kctx->gk5e->cksum_name, 0, CRYPTO_ALG_ASYNC);
if (IS_ERR(tfm))
- return GSS_S_FAILURE;
+ goto out_free_cksum;
req = ahash_request_alloc(tfm, GFP_KERNEL);
- if (!req) {
- crypto_free_ahash(tfm);
- return GSS_S_FAILURE;
- }
+ if (!req)
+ goto out_free_ahash;
ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_SLEEP, NULL, NULL);
@@ -349,7 +351,10 @@ make_checksum(struct krb5_ctx *kctx, char *header, int hdrlen,
cksumout->len = kctx->gk5e->cksumlength;
out:
ahash_request_free(req);
+out_free_ahash:
crypto_free_ahash(tfm);
+out_free_cksum:
+ kfree(checksumdata);
return err ? GSS_S_FAILURE : 0;
}
@@ -368,8 +373,8 @@ make_checksum_v2(struct krb5_ctx *kctx, char *header, int hdrlen,
struct crypto_ahash *tfm;
struct ahash_request *req;
struct scatterlist sg[1];
- int err;
- u8 checksumdata[GSS_KRB5_MAX_CKSUM_LEN];
+ int err = -1;
+ u8 *checksumdata;
unsigned int checksumlen;
if (kctx->gk5e->keyed_cksum == 0) {
@@ -383,16 +388,18 @@ make_checksum_v2(struct krb5_ctx *kctx, char *header, int hdrlen,
return GSS_S_FAILURE;
}
+ checksumdata = kmalloc(GSS_KRB5_MAX_CKSUM_LEN, GFP_NOFS);
+ if (!checksumdata)
+ return GSS_S_FAILURE;
+
tfm = crypto_alloc_ahash(kctx->gk5e->cksum_name, 0, CRYPTO_ALG_ASYNC);
if (IS_ERR(tfm))
- return GSS_S_FAILURE;
+ goto out_free_cksum;
checksumlen = crypto_ahash_digestsize(tfm);
req = ahash_request_alloc(tfm, GFP_KERNEL);
- if (!req) {
- crypto_free_ahash(tfm);
- return GSS_S_FAILURE;
- }
+ if (!req)
+ goto out_free_ahash;
ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_SLEEP, NULL, NULL);
@@ -433,7 +440,10 @@ make_checksum_v2(struct krb5_ctx *kctx, char *header, int hdrlen,
}
out:
ahash_request_free(req);
+out_free_ahash:
crypto_free_ahash(tfm);
+out_free_cksum:
+ kfree(checksumdata);
return err ? GSS_S_FAILURE : 0;
}
@@ -666,14 +676,17 @@ gss_krb5_cts_crypt(struct crypto_skcipher *cipher, struct xdr_buf *buf,
u32 ret;
struct scatterlist sg[1];
SKCIPHER_REQUEST_ON_STACK(req, cipher);
- u8 data[GSS_KRB5_MAX_BLOCKSIZE * 2];
+ u8 *data;
struct page **save_pages;
u32 len = buf->len - offset;
- if (len > ARRAY_SIZE(data)) {
+ if (len > GSS_KRB5_MAX_BLOCKSIZE * 2) {
WARN_ON(0);
return -ENOMEM;
}
+ data = kmalloc(GSS_KRB5_MAX_BLOCKSIZE * 2, GFP_NOFS);
+ if (!data)
+ return -ENOMEM;
/*
* For encryption, we want to read from the cleartext
@@ -708,6 +721,7 @@ gss_krb5_cts_crypt(struct crypto_skcipher *cipher, struct xdr_buf *buf,
ret = write_bytes_to_xdr_buf(buf, offset, data, len);
out:
+ kfree(data);
return ret;
}
diff --git a/net/sunrpc/auth_gss/svcauth_gss.c b/net/sunrpc/auth_gss/svcauth_gss.c
index d67f7e1bc82d..45662d7f0943 100644
--- a/net/sunrpc/auth_gss/svcauth_gss.c
+++ b/net/sunrpc/auth_gss/svcauth_gss.c
@@ -718,30 +718,37 @@ gss_write_null_verf(struct svc_rqst *rqstp)
static int
gss_write_verf(struct svc_rqst *rqstp, struct gss_ctx *ctx_id, u32 seq)
{
- __be32 xdr_seq;
+ __be32 *xdr_seq;
u32 maj_stat;
struct xdr_buf verf_data;
struct xdr_netobj mic;
__be32 *p;
struct kvec iov;
+ int err = -1;
svc_putnl(rqstp->rq_res.head, RPC_AUTH_GSS);
- xdr_seq = htonl(seq);
+ xdr_seq = kmalloc(4, GFP_KERNEL);
+ if (!xdr_seq)
+ return -1;
+ *xdr_seq = htonl(seq);
- iov.iov_base = &xdr_seq;
- iov.iov_len = sizeof(xdr_seq);
+ iov.iov_base = xdr_seq;
+ iov.iov_len = 4;
xdr_buf_from_iov(&iov, &verf_data);
p = rqstp->rq_res.head->iov_base + rqstp->rq_res.head->iov_len;
mic.data = (u8 *)(p + 1);
maj_stat = gss_get_mic(ctx_id, &verf_data, &mic);
if (maj_stat != GSS_S_COMPLETE)
- return -1;
+ goto out;
*p++ = htonl(mic.len);
memset((u8 *)p + mic.len, 0, round_up_to_quad(mic.len) - mic.len);
p += XDR_QUADLEN(mic.len);
if (!xdr_ressize_check(rqstp, p))
- return -1;
- return 0;
+ goto out;
+ err = 0;
+out:
+ kfree(xdr_seq);
+ return err;
}
struct gss_domain {
diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c
index 34dd7b26ee5f..62a482790937 100644
--- a/net/sunrpc/clnt.c
+++ b/net/sunrpc/clnt.c
@@ -2753,14 +2753,18 @@ EXPORT_SYMBOL_GPL(rpc_cap_max_reconnect_timeout);
void rpc_clnt_xprt_switch_put(struct rpc_clnt *clnt)
{
+ rcu_read_lock();
xprt_switch_put(rcu_dereference(clnt->cl_xpi.xpi_xpswitch));
+ rcu_read_unlock();
}
EXPORT_SYMBOL_GPL(rpc_clnt_xprt_switch_put);
void rpc_clnt_xprt_switch_add_xprt(struct rpc_clnt *clnt, struct rpc_xprt *xprt)
{
+ rcu_read_lock();
rpc_xprt_switch_add_xprt(rcu_dereference(clnt->cl_xpi.xpi_xpswitch),
xprt);
+ rcu_read_unlock();
}
EXPORT_SYMBOL_GPL(rpc_clnt_xprt_switch_add_xprt);
@@ -2770,9 +2774,8 @@ bool rpc_clnt_xprt_switch_has_addr(struct rpc_clnt *clnt,
struct rpc_xprt_switch *xps;
bool ret;
- xps = rcu_dereference(clnt->cl_xpi.xpi_xpswitch);
-
rcu_read_lock();
+ xps = rcu_dereference(clnt->cl_xpi.xpi_xpswitch);
ret = rpc_xprt_switch_has_addr(xps, sap);
rcu_read_unlock();
return ret;
diff --git a/net/sunrpc/svc_xprt.c b/net/sunrpc/svc_xprt.c
index c3f652395a80..3bc1d61694cb 100644
--- a/net/sunrpc/svc_xprt.c
+++ b/net/sunrpc/svc_xprt.c
@@ -1002,14 +1002,8 @@ static void svc_age_temp_xprts(unsigned long closure)
void svc_age_temp_xprts_now(struct svc_serv *serv, struct sockaddr *server_addr)
{
struct svc_xprt *xprt;
- struct svc_sock *svsk;
- struct socket *sock;
struct list_head *le, *next;
LIST_HEAD(to_be_closed);
- struct linger no_linger = {
- .l_onoff = 1,
- .l_linger = 0,
- };
spin_lock_bh(&serv->sv_lock);
list_for_each_safe(le, next, &serv->sv_tempsocks) {
@@ -1027,10 +1021,7 @@ void svc_age_temp_xprts_now(struct svc_serv *serv, struct sockaddr *server_addr)
list_del_init(le);
xprt = list_entry(le, struct svc_xprt, xpt_list);
dprintk("svc_age_temp_xprts_now: closing %p\n", xprt);
- svsk = container_of(xprt, struct svc_sock, sk_xprt);
- sock = svsk->sk_sock;
- kernel_setsockopt(sock, SOL_SOCKET, SO_LINGER,
- (char *)&no_linger, sizeof(no_linger));
+ xprt->xpt_ops->xpo_kill_temp_xprt(xprt);
svc_close_xprt(xprt);
}
}
diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c
index 57625f64efd5..a4bc98265d88 100644
--- a/net/sunrpc/svcsock.c
+++ b/net/sunrpc/svcsock.c
@@ -438,6 +438,21 @@ static int svc_tcp_has_wspace(struct svc_xprt *xprt)
return !test_bit(SOCK_NOSPACE, &svsk->sk_sock->flags);
}
+static void svc_tcp_kill_temp_xprt(struct svc_xprt *xprt)
+{
+ struct svc_sock *svsk;
+ struct socket *sock;
+ struct linger no_linger = {
+ .l_onoff = 1,
+ .l_linger = 0,
+ };
+
+ svsk = container_of(xprt, struct svc_sock, sk_xprt);
+ sock = svsk->sk_sock;
+ kernel_setsockopt(sock, SOL_SOCKET, SO_LINGER,
+ (char *)&no_linger, sizeof(no_linger));
+}
+
/*
* See net/ipv6/ip_sockglue.c : ip_cmsg_recv_pktinfo
*/
@@ -648,6 +663,10 @@ static struct svc_xprt *svc_udp_accept(struct svc_xprt *xprt)
return NULL;
}
+static void svc_udp_kill_temp_xprt(struct svc_xprt *xprt)
+{
+}
+
static struct svc_xprt *svc_udp_create(struct svc_serv *serv,
struct net *net,
struct sockaddr *sa, int salen,
@@ -667,6 +686,7 @@ static struct svc_xprt_ops svc_udp_ops = {
.xpo_has_wspace = svc_udp_has_wspace,
.xpo_accept = svc_udp_accept,
.xpo_secure_port = svc_sock_secure_port,
+ .xpo_kill_temp_xprt = svc_udp_kill_temp_xprt,
};
static struct svc_xprt_class svc_udp_class = {
@@ -1242,6 +1262,7 @@ static struct svc_xprt_ops svc_tcp_ops = {
.xpo_has_wspace = svc_tcp_has_wspace,
.xpo_accept = svc_tcp_accept,
.xpo_secure_port = svc_sock_secure_port,
+ .xpo_kill_temp_xprt = svc_tcp_kill_temp_xprt,
};
static struct svc_xprt_class svc_tcp_class = {
diff --git a/net/sunrpc/xprtrdma/frwr_ops.c b/net/sunrpc/xprtrdma/frwr_ops.c
index 210949562786..26b26beef2d4 100644
--- a/net/sunrpc/xprtrdma/frwr_ops.c
+++ b/net/sunrpc/xprtrdma/frwr_ops.c
@@ -44,18 +44,20 @@
* being done.
*
* When the underlying transport disconnects, MRs are left in one of
- * three states:
+ * four states:
*
* INVALID: The MR was not in use before the QP entered ERROR state.
- * (Or, the LOCAL_INV WR has not completed or flushed yet).
- *
- * STALE: The MR was being registered or unregistered when the QP
- * entered ERROR state, and the pending WR was flushed.
*
* VALID: The MR was registered before the QP entered ERROR state.
*
- * When frwr_op_map encounters STALE and VALID MRs, they are recovered
- * with ib_dereg_mr and then are re-initialized. Beause MR recovery
+ * FLUSHED_FR: The MR was being registered when the QP entered ERROR
+ * state, and the pending WR was flushed.
+ *
+ * FLUSHED_LI: The MR was being invalidated when the QP entered ERROR
+ * state, and the pending WR was flushed.
+ *
+ * When frwr_op_map encounters FLUSHED and VALID MRs, they are recovered
+ * with ib_dereg_mr and then are re-initialized. Because MR recovery
* allocates fresh resources, it is deferred to a workqueue, and the
* recovered MRs are placed back on the rb_mws list when recovery is
* complete. frwr_op_map allocates another MR for the current RPC while
@@ -177,12 +179,15 @@ __frwr_reset_mr(struct rpcrdma_ia *ia, struct rpcrdma_mw *r)
static void
frwr_op_recover_mr(struct rpcrdma_mw *mw)
{
+ enum rpcrdma_frmr_state state = mw->frmr.fr_state;
struct rpcrdma_xprt *r_xprt = mw->mw_xprt;
struct rpcrdma_ia *ia = &r_xprt->rx_ia;
int rc;
rc = __frwr_reset_mr(ia, mw);
- ib_dma_unmap_sg(ia->ri_device, mw->mw_sg, mw->mw_nents, mw->mw_dir);
+ if (state != FRMR_FLUSHED_LI)
+ ib_dma_unmap_sg(ia->ri_device,
+ mw->mw_sg, mw->mw_nents, mw->mw_dir);
if (rc)
goto out_release;
@@ -262,10 +267,8 @@ frwr_op_maxpages(struct rpcrdma_xprt *r_xprt)
}
static void
-__frwr_sendcompletion_flush(struct ib_wc *wc, struct rpcrdma_frmr *frmr,
- const char *wr)
+__frwr_sendcompletion_flush(struct ib_wc *wc, const char *wr)
{
- frmr->fr_state = FRMR_IS_STALE;
if (wc->status != IB_WC_WR_FLUSH_ERR)
pr_err("rpcrdma: %s: %s (%u/0x%x)\n",
wr, ib_wc_status_msg(wc->status),
@@ -288,7 +291,8 @@ frwr_wc_fastreg(struct ib_cq *cq, struct ib_wc *wc)
if (wc->status != IB_WC_SUCCESS) {
cqe = wc->wr_cqe;
frmr = container_of(cqe, struct rpcrdma_frmr, fr_cqe);
- __frwr_sendcompletion_flush(wc, frmr, "fastreg");
+ frmr->fr_state = FRMR_FLUSHED_FR;
+ __frwr_sendcompletion_flush(wc, "fastreg");
}
}
@@ -308,7 +312,8 @@ frwr_wc_localinv(struct ib_cq *cq, struct ib_wc *wc)
if (wc->status != IB_WC_SUCCESS) {
cqe = wc->wr_cqe;
frmr = container_of(cqe, struct rpcrdma_frmr, fr_cqe);
- __frwr_sendcompletion_flush(wc, frmr, "localinv");
+ frmr->fr_state = FRMR_FLUSHED_LI;
+ __frwr_sendcompletion_flush(wc, "localinv");
}
}
@@ -328,8 +333,10 @@ frwr_wc_localinv_wake(struct ib_cq *cq, struct ib_wc *wc)
/* WARNING: Only wr_cqe and status are reliable at this point */
cqe = wc->wr_cqe;
frmr = container_of(cqe, struct rpcrdma_frmr, fr_cqe);
- if (wc->status != IB_WC_SUCCESS)
- __frwr_sendcompletion_flush(wc, frmr, "localinv");
+ if (wc->status != IB_WC_SUCCESS) {
+ frmr->fr_state = FRMR_FLUSHED_LI;
+ __frwr_sendcompletion_flush(wc, "localinv");
+ }
complete(&frmr->fr_linv_done);
}
diff --git a/net/sunrpc/xprtrdma/svc_rdma_backchannel.c b/net/sunrpc/xprtrdma/svc_rdma_backchannel.c
index 2d8545c34095..20027f8de129 100644
--- a/net/sunrpc/xprtrdma/svc_rdma_backchannel.c
+++ b/net/sunrpc/xprtrdma/svc_rdma_backchannel.c
@@ -177,18 +177,26 @@ xprt_rdma_bc_allocate(struct rpc_task *task)
return -EINVAL;
}
+ /* svc_rdma_sendto releases this page */
page = alloc_page(RPCRDMA_DEF_GFP);
if (!page)
return -ENOMEM;
-
rqst->rq_buffer = page_address(page);
+
+ rqst->rq_rbuffer = kmalloc(rqst->rq_rcvsize, RPCRDMA_DEF_GFP);
+ if (!rqst->rq_rbuffer) {
+ put_page(page);
+ return -ENOMEM;
+ }
return 0;
}
static void
xprt_rdma_bc_free(struct rpc_task *task)
{
- /* No-op: ctxt and page have already been freed. */
+ struct rpc_rqst *rqst = task->tk_rqstp;
+
+ kfree(rqst->rq_rbuffer);
}
static int
diff --git a/net/sunrpc/xprtrdma/svc_rdma_transport.c b/net/sunrpc/xprtrdma/svc_rdma_transport.c
index 6864fb967038..1334de2715c2 100644
--- a/net/sunrpc/xprtrdma/svc_rdma_transport.c
+++ b/net/sunrpc/xprtrdma/svc_rdma_transport.c
@@ -67,6 +67,7 @@ static void svc_rdma_detach(struct svc_xprt *xprt);
static void svc_rdma_free(struct svc_xprt *xprt);
static int svc_rdma_has_wspace(struct svc_xprt *xprt);
static int svc_rdma_secure_port(struct svc_rqst *);
+static void svc_rdma_kill_temp_xprt(struct svc_xprt *);
static struct svc_xprt_ops svc_rdma_ops = {
.xpo_create = svc_rdma_create,
@@ -79,6 +80,7 @@ static struct svc_xprt_ops svc_rdma_ops = {
.xpo_has_wspace = svc_rdma_has_wspace,
.xpo_accept = svc_rdma_accept,
.xpo_secure_port = svc_rdma_secure_port,
+ .xpo_kill_temp_xprt = svc_rdma_kill_temp_xprt,
};
struct svc_xprt_class svc_rdma_class = {
@@ -1317,6 +1319,10 @@ static int svc_rdma_secure_port(struct svc_rqst *rqstp)
return 1;
}
+static void svc_rdma_kill_temp_xprt(struct svc_xprt *xprt)
+{
+}
+
int svc_rdma_send(struct svcxprt_rdma *xprt, struct ib_send_wr *wr)
{
struct ib_send_wr *bad_wr, *n_wr;
diff --git a/net/sunrpc/xprtrdma/xprt_rdma.h b/net/sunrpc/xprtrdma/xprt_rdma.h
index 0d35b761c883..6e1bba358203 100644
--- a/net/sunrpc/xprtrdma/xprt_rdma.h
+++ b/net/sunrpc/xprtrdma/xprt_rdma.h
@@ -216,7 +216,8 @@ struct rpcrdma_rep {
enum rpcrdma_frmr_state {
FRMR_IS_INVALID, /* ready to be used */
FRMR_IS_VALID, /* in use */
- FRMR_IS_STALE, /* failed completion */
+ FRMR_FLUSHED_FR, /* flushed FASTREG WR */
+ FRMR_FLUSHED_LI, /* flushed LOCALINV WR */
};
struct rpcrdma_frmr {
diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c
index 0137af1c0916..e01c825bc683 100644
--- a/net/sunrpc/xprtsock.c
+++ b/net/sunrpc/xprtsock.c
@@ -2563,6 +2563,7 @@ static int bc_malloc(struct rpc_task *task)
buf->len = PAGE_SIZE;
rqst->rq_buffer = buf->data;
+ rqst->rq_rbuffer = (char *)rqst->rq_buffer + rqst->rq_callsize;
return 0;
}
diff --git a/net/switchdev/switchdev.c b/net/switchdev/switchdev.c
index 02beb35f577f..3b95fe980fa2 100644
--- a/net/switchdev/switchdev.c
+++ b/net/switchdev/switchdev.c
@@ -771,6 +771,9 @@ int switchdev_port_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
u32 mask = BR_LEARNING | BR_LEARNING_SYNC | BR_FLOOD;
int err;
+ if (!netif_is_bridge_port(dev))
+ return -EOPNOTSUPP;
+
err = switchdev_port_attr_get(dev, &attr);
if (err && err != -EOPNOTSUPP)
return err;
@@ -926,6 +929,9 @@ int switchdev_port_bridge_setlink(struct net_device *dev,
struct nlattr *afspec;
int err = 0;
+ if (!netif_is_bridge_port(dev))
+ return -EOPNOTSUPP;
+
protinfo = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg),
IFLA_PROTINFO);
if (protinfo) {
@@ -959,6 +965,9 @@ int switchdev_port_bridge_dellink(struct net_device *dev,
{
struct nlattr *afspec;
+ if (!netif_is_bridge_port(dev))
+ return -EOPNOTSUPP;
+
afspec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg),
IFLA_AF_SPEC);
if (afspec)
diff --git a/net/tipc/bcast.c b/net/tipc/bcast.c
index 753f774cb46f..aa1babbea385 100644
--- a/net/tipc/bcast.c
+++ b/net/tipc/bcast.c
@@ -247,11 +247,17 @@ int tipc_bcast_rcv(struct net *net, struct tipc_link *l, struct sk_buff *skb)
*
* RCU is locked, no other locks set
*/
-void tipc_bcast_ack_rcv(struct net *net, struct tipc_link *l, u32 acked)
+void tipc_bcast_ack_rcv(struct net *net, struct tipc_link *l,
+ struct tipc_msg *hdr)
{
struct sk_buff_head *inputq = &tipc_bc_base(net)->inputq;
+ u16 acked = msg_bcast_ack(hdr);
struct sk_buff_head xmitq;
+ /* Ignore bc acks sent by peer before bcast synch point was received */
+ if (msg_bc_ack_invalid(hdr))
+ return;
+
__skb_queue_head_init(&xmitq);
tipc_bcast_lock(net);
@@ -279,11 +285,11 @@ int tipc_bcast_sync_rcv(struct net *net, struct tipc_link *l,
__skb_queue_head_init(&xmitq);
tipc_bcast_lock(net);
- if (msg_type(hdr) == STATE_MSG) {
+ if (msg_type(hdr) != STATE_MSG) {
+ tipc_link_bc_init_rcv(l, hdr);
+ } else if (!msg_bc_ack_invalid(hdr)) {
tipc_link_bc_ack_rcv(l, msg_bcast_ack(hdr), &xmitq);
rc = tipc_link_bc_sync_rcv(l, hdr, &xmitq);
- } else {
- tipc_link_bc_init_rcv(l, hdr);
}
tipc_bcast_unlock(net);
diff --git a/net/tipc/bcast.h b/net/tipc/bcast.h
index 5ffe34472ccd..855d53c64ab3 100644
--- a/net/tipc/bcast.h
+++ b/net/tipc/bcast.h
@@ -55,7 +55,8 @@ void tipc_bcast_dec_bearer_dst_cnt(struct net *net, int bearer_id);
int tipc_bcast_get_mtu(struct net *net);
int tipc_bcast_xmit(struct net *net, struct sk_buff_head *list);
int tipc_bcast_rcv(struct net *net, struct tipc_link *l, struct sk_buff *skb);
-void tipc_bcast_ack_rcv(struct net *net, struct tipc_link *l, u32 acked);
+void tipc_bcast_ack_rcv(struct net *net, struct tipc_link *l,
+ struct tipc_msg *hdr);
int tipc_bcast_sync_rcv(struct net *net, struct tipc_link *l,
struct tipc_msg *hdr);
int tipc_nl_add_bc_link(struct net *net, struct tipc_nl_msg *msg);
diff --git a/net/tipc/bearer.c b/net/tipc/bearer.c
index 975dbeb60ab0..52d74760fb68 100644
--- a/net/tipc/bearer.c
+++ b/net/tipc/bearer.c
@@ -421,6 +421,10 @@ int tipc_enable_l2_media(struct net *net, struct tipc_bearer *b,
dev = dev_get_by_name(net, driver_name);
if (!dev)
return -ENODEV;
+ if (tipc_mtu_bad(dev, 0)) {
+ dev_put(dev);
+ return -EINVAL;
+ }
/* Associate TIPC bearer with L2 bearer */
rcu_assign_pointer(b->media_ptr, dev);
@@ -610,8 +614,6 @@ static int tipc_l2_device_event(struct notifier_block *nb, unsigned long evt,
if (!b)
return NOTIFY_DONE;
- b->mtu = dev->mtu;
-
switch (evt) {
case NETDEV_CHANGE:
if (netif_carrier_ok(dev))
@@ -624,6 +626,11 @@ static int tipc_l2_device_event(struct notifier_block *nb, unsigned long evt,
tipc_reset_bearer(net, b);
break;
case NETDEV_CHANGEMTU:
+ if (tipc_mtu_bad(dev, 0)) {
+ bearer_disable(net, b);
+ break;
+ }
+ b->mtu = dev->mtu;
tipc_reset_bearer(net, b);
break;
case NETDEV_CHANGEADDR:
diff --git a/net/tipc/bearer.h b/net/tipc/bearer.h
index 78892e2f53e3..278ff7f616f9 100644
--- a/net/tipc/bearer.h
+++ b/net/tipc/bearer.h
@@ -39,6 +39,7 @@
#include "netlink.h"
#include "core.h"
+#include "msg.h"
#include <net/genetlink.h>
#define MAX_MEDIA 3
@@ -59,6 +60,9 @@
#define TIPC_MEDIA_TYPE_IB 2
#define TIPC_MEDIA_TYPE_UDP 3
+/* minimum bearer MTU */
+#define TIPC_MIN_BEARER_MTU (MAX_H_SIZE + INT_H_SIZE)
+
/**
* struct tipc_media_addr - destination address used by TIPC bearers
* @value: address info (format defined by media)
@@ -215,4 +219,13 @@ void tipc_bearer_xmit(struct net *net, u32 bearer_id,
void tipc_bearer_bc_xmit(struct net *net, u32 bearer_id,
struct sk_buff_head *xmitq);
+/* check if device MTU is too low for tipc headers */
+static inline bool tipc_mtu_bad(struct net_device *dev, unsigned int reserve)
+{
+ if (dev->mtu >= TIPC_MIN_BEARER_MTU + reserve)
+ return false;
+ netdev_warn(dev, "MTU too low for tipc bearer\n");
+ return true;
+}
+
#endif /* _TIPC_BEARER_H */
diff --git a/net/tipc/link.c b/net/tipc/link.c
index b36e16cdc945..bda89bf9f4ff 100644
--- a/net/tipc/link.c
+++ b/net/tipc/link.c
@@ -47,8 +47,8 @@
#include <linux/pkt_sched.h>
struct tipc_stats {
- u32 sent_info; /* used in counting # sent packets */
- u32 recv_info; /* used in counting # recv'd packets */
+ u32 sent_pkts;
+ u32 recv_pkts;
u32 sent_states;
u32 recv_states;
u32 sent_probes;
@@ -857,7 +857,6 @@ void tipc_link_reset(struct tipc_link *l)
l->acked = 0;
l->silent_intv_cnt = 0;
l->rst_cnt = 0;
- l->stats.recv_info = 0;
l->stale_count = 0;
l->bc_peer_is_up = false;
memset(&l->mon_state, 0, sizeof(l->mon_state));
@@ -888,6 +887,7 @@ int tipc_link_xmit(struct tipc_link *l, struct sk_buff_head *list,
struct sk_buff_head *transmq = &l->transmq;
struct sk_buff_head *backlogq = &l->backlogq;
struct sk_buff *skb, *_skb, *bskb;
+ int pkt_cnt = skb_queue_len(list);
/* Match msg importance against this and all higher backlog limits: */
if (!skb_queue_empty(backlogq)) {
@@ -901,6 +901,11 @@ int tipc_link_xmit(struct tipc_link *l, struct sk_buff_head *list,
return -EMSGSIZE;
}
+ if (pkt_cnt > 1) {
+ l->stats.sent_fragmented++;
+ l->stats.sent_fragments += pkt_cnt;
+ }
+
/* Prepare each packet for sending, and add to relevant queue: */
while (skb_queue_len(list)) {
skb = skb_peek(list);
@@ -920,6 +925,7 @@ int tipc_link_xmit(struct tipc_link *l, struct sk_buff_head *list,
__skb_queue_tail(xmitq, _skb);
TIPC_SKB_CB(skb)->ackers = l->ackers;
l->rcv_unacked = 0;
+ l->stats.sent_pkts++;
seqno++;
continue;
}
@@ -968,6 +974,7 @@ void tipc_link_advance_backlog(struct tipc_link *l, struct sk_buff_head *xmitq)
msg_set_ack(hdr, ack);
msg_set_bcast_ack(hdr, bc_ack);
l->rcv_unacked = 0;
+ l->stats.sent_pkts++;
seqno++;
}
l->snd_nxt = seqno;
@@ -1260,7 +1267,7 @@ int tipc_link_rcv(struct tipc_link *l, struct sk_buff *skb,
/* Deliver packet */
l->rcv_nxt++;
- l->stats.recv_info++;
+ l->stats.recv_pkts++;
if (!tipc_data_input(l, skb, l->inputq))
rc |= tipc_link_input(l, skb, l->inputq);
if (unlikely(++l->rcv_unacked >= TIPC_MIN_LINK_WIN))
@@ -1312,6 +1319,7 @@ static void tipc_link_build_proto_msg(struct tipc_link *l, int mtyp, bool probe,
msg_set_next_sent(hdr, l->snd_nxt);
msg_set_ack(hdr, l->rcv_nxt - 1);
msg_set_bcast_ack(hdr, bcl->rcv_nxt - 1);
+ msg_set_bc_ack_invalid(hdr, !node_up);
msg_set_last_bcast(hdr, l->bc_sndlink->snd_nxt - 1);
msg_set_link_tolerance(hdr, tolerance);
msg_set_linkprio(hdr, priority);
@@ -1491,8 +1499,9 @@ static int tipc_link_proto_rcv(struct tipc_link *l, struct sk_buff *skb,
if (in_range(peers_tol, TIPC_MIN_LINK_TOL, TIPC_MAX_LINK_TOL))
l->tolerance = peers_tol;
- if (peers_prio && in_range(peers_prio, TIPC_MIN_LINK_PRI,
- TIPC_MAX_LINK_PRI)) {
+ /* Update own prio if peer indicates a different value */
+ if ((peers_prio != l->priority) &&
+ in_range(peers_prio, 1, TIPC_MAX_LINK_PRI)) {
l->priority = peers_prio;
rc = tipc_link_fsm_evt(l, LINK_FAILURE_EVT);
}
@@ -1574,6 +1583,7 @@ static void tipc_link_build_bc_init_msg(struct tipc_link *l,
__skb_queue_head_init(&list);
if (!tipc_link_build_bc_proto_msg(l->bc_rcvlink, false, 0, &list))
return;
+ msg_set_bc_ack_invalid(buf_msg(skb_peek(&list)), true);
tipc_link_xmit(l, &list, xmitq);
}
@@ -1797,10 +1807,6 @@ void tipc_link_set_queue_limits(struct tipc_link *l, u32 win)
void tipc_link_reset_stats(struct tipc_link *l)
{
memset(&l->stats, 0, sizeof(l->stats));
- if (!link_is_bc_sndlink(l)) {
- l->stats.sent_info = l->snd_nxt;
- l->stats.recv_info = l->rcv_nxt;
- }
}
static void link_print(struct tipc_link *l, const char *str)
@@ -1864,12 +1870,12 @@ static int __tipc_nl_add_stats(struct sk_buff *skb, struct tipc_stats *s)
};
struct nla_map map[] = {
- {TIPC_NLA_STATS_RX_INFO, s->recv_info},
+ {TIPC_NLA_STATS_RX_INFO, 0},
{TIPC_NLA_STATS_RX_FRAGMENTS, s->recv_fragments},
{TIPC_NLA_STATS_RX_FRAGMENTED, s->recv_fragmented},
{TIPC_NLA_STATS_RX_BUNDLES, s->recv_bundles},
{TIPC_NLA_STATS_RX_BUNDLED, s->recv_bundled},
- {TIPC_NLA_STATS_TX_INFO, s->sent_info},
+ {TIPC_NLA_STATS_TX_INFO, 0},
{TIPC_NLA_STATS_TX_FRAGMENTS, s->sent_fragments},
{TIPC_NLA_STATS_TX_FRAGMENTED, s->sent_fragmented},
{TIPC_NLA_STATS_TX_BUNDLES, s->sent_bundles},
@@ -1944,9 +1950,9 @@ int __tipc_nl_add_link(struct net *net, struct tipc_nl_msg *msg,
goto attr_msg_full;
if (nla_put_u32(msg->skb, TIPC_NLA_LINK_MTU, link->mtu))
goto attr_msg_full;
- if (nla_put_u32(msg->skb, TIPC_NLA_LINK_RX, link->rcv_nxt))
+ if (nla_put_u32(msg->skb, TIPC_NLA_LINK_RX, link->stats.recv_pkts))
goto attr_msg_full;
- if (nla_put_u32(msg->skb, TIPC_NLA_LINK_TX, link->snd_nxt))
+ if (nla_put_u32(msg->skb, TIPC_NLA_LINK_TX, link->stats.sent_pkts))
goto attr_msg_full;
if (tipc_link_is_up(link))
@@ -2001,12 +2007,12 @@ static int __tipc_nl_add_bc_link_stat(struct sk_buff *skb,
};
struct nla_map map[] = {
- {TIPC_NLA_STATS_RX_INFO, stats->recv_info},
+ {TIPC_NLA_STATS_RX_INFO, stats->recv_pkts},
{TIPC_NLA_STATS_RX_FRAGMENTS, stats->recv_fragments},
{TIPC_NLA_STATS_RX_FRAGMENTED, stats->recv_fragmented},
{TIPC_NLA_STATS_RX_BUNDLES, stats->recv_bundles},
{TIPC_NLA_STATS_RX_BUNDLED, stats->recv_bundled},
- {TIPC_NLA_STATS_TX_INFO, stats->sent_info},
+ {TIPC_NLA_STATS_TX_INFO, stats->sent_pkts},
{TIPC_NLA_STATS_TX_FRAGMENTS, stats->sent_fragments},
{TIPC_NLA_STATS_TX_FRAGMENTED, stats->sent_fragmented},
{TIPC_NLA_STATS_TX_BUNDLES, stats->sent_bundles},
@@ -2073,9 +2079,9 @@ int tipc_nl_add_bc_link(struct net *net, struct tipc_nl_msg *msg)
goto attr_msg_full;
if (nla_put_string(msg->skb, TIPC_NLA_LINK_NAME, bcl->name))
goto attr_msg_full;
- if (nla_put_u32(msg->skb, TIPC_NLA_LINK_RX, bcl->rcv_nxt))
+ if (nla_put_u32(msg->skb, TIPC_NLA_LINK_RX, 0))
goto attr_msg_full;
- if (nla_put_u32(msg->skb, TIPC_NLA_LINK_TX, bcl->snd_nxt))
+ if (nla_put_u32(msg->skb, TIPC_NLA_LINK_TX, 0))
goto attr_msg_full;
prop = nla_nest_start(msg->skb, TIPC_NLA_LINK_PROP);
diff --git a/net/tipc/monitor.c b/net/tipc/monitor.c
index ed97a5876ebe..9e109bb1a207 100644
--- a/net/tipc/monitor.c
+++ b/net/tipc/monitor.c
@@ -455,14 +455,14 @@ void tipc_mon_rcv(struct net *net, void *data, u16 dlen, u32 addr,
int i, applied_bef;
state->probing = false;
- if (!dlen)
- return;
/* Sanity check received domain record */
- if ((dlen < new_dlen) || ntohs(arrv_dom->len) != new_dlen) {
- pr_warn_ratelimited("Received illegal domain record\n");
+ if (dlen < dom_rec_len(arrv_dom, 0))
+ return;
+ if (dlen != dom_rec_len(arrv_dom, new_member_cnt))
+ return;
+ if ((dlen < new_dlen) || ntohs(arrv_dom->len) != new_dlen)
return;
- }
/* Synch generation numbers with peer if link just came up */
if (!state->synched) {
diff --git a/net/tipc/msg.h b/net/tipc/msg.h
index c3832cdf2278..50a739860d37 100644
--- a/net/tipc/msg.h
+++ b/net/tipc/msg.h
@@ -714,6 +714,23 @@ static inline void msg_set_peer_stopping(struct tipc_msg *m, u32 s)
msg_set_bits(m, 5, 13, 0x1, s);
}
+static inline bool msg_bc_ack_invalid(struct tipc_msg *m)
+{
+ switch (msg_user(m)) {
+ case BCAST_PROTOCOL:
+ case NAME_DISTRIBUTOR:
+ case LINK_PROTOCOL:
+ return msg_bits(m, 5, 14, 0x1);
+ default:
+ return false;
+ }
+}
+
+static inline void msg_set_bc_ack_invalid(struct tipc_msg *m, bool invalid)
+{
+ msg_set_bits(m, 5, 14, 0x1, invalid);
+}
+
static inline char *msg_media_addr(struct tipc_msg *m)
{
return (char *)&m->hdr[TIPC_MEDIA_INFO_OFFSET];
diff --git a/net/tipc/name_distr.c b/net/tipc/name_distr.c
index a04fe9be1c60..c1cfd92de17a 100644
--- a/net/tipc/name_distr.c
+++ b/net/tipc/name_distr.c
@@ -156,6 +156,7 @@ static void named_distribute(struct net *net, struct sk_buff_head *list,
pr_warn("Bulk publication failure\n");
return;
}
+ msg_set_bc_ack_invalid(buf_msg(skb), true);
item = (struct distr_item *)msg_data(buf_msg(skb));
}
diff --git a/net/tipc/node.c b/net/tipc/node.c
index 7ef14e2d2356..9d2f4c2b08ab 100644
--- a/net/tipc/node.c
+++ b/net/tipc/node.c
@@ -1535,7 +1535,7 @@ void tipc_rcv(struct net *net, struct sk_buff *skb, struct tipc_bearer *b)
if (unlikely(usr == LINK_PROTOCOL))
tipc_node_bc_sync_rcv(n, hdr, bearer_id, &xmitq);
else if (unlikely(tipc_link_acked(n->bc_entry.link) != bc_ack))
- tipc_bcast_ack_rcv(net, n->bc_entry.link, bc_ack);
+ tipc_bcast_ack_rcv(net, n->bc_entry.link, hdr);
/* Receive packet directly if conditions permit */
tipc_node_read_lock(n);
diff --git a/net/tipc/socket.c b/net/tipc/socket.c
index f9f5f3c3dab5..41f013888f07 100644
--- a/net/tipc/socket.c
+++ b/net/tipc/socket.c
@@ -1,7 +1,7 @@
/*
* net/tipc/socket.c: TIPC socket API
*
- * Copyright (c) 2001-2007, 2012-2015, Ericsson AB
+ * Copyright (c) 2001-2007, 2012-2016, Ericsson AB
* Copyright (c) 2004-2008, 2010-2013, Wind River Systems
* All rights reserved.
*
@@ -129,54 +129,8 @@ static const struct proto_ops packet_ops;
static const struct proto_ops stream_ops;
static const struct proto_ops msg_ops;
static struct proto tipc_proto;
-
static const struct rhashtable_params tsk_rht_params;
-/*
- * Revised TIPC socket locking policy:
- *
- * Most socket operations take the standard socket lock when they start
- * and hold it until they finish (or until they need to sleep). Acquiring
- * this lock grants the owner exclusive access to the fields of the socket
- * data structures, with the exception of the backlog queue. A few socket
- * operations can be done without taking the socket lock because they only
- * read socket information that never changes during the life of the socket.
- *
- * Socket operations may acquire the lock for the associated TIPC port if they
- * need to perform an operation on the port. If any routine needs to acquire
- * both the socket lock and the port lock it must take the socket lock first
- * to avoid the risk of deadlock.
- *
- * The dispatcher handling incoming messages cannot grab the socket lock in
- * the standard fashion, since invoked it runs at the BH level and cannot block.
- * Instead, it checks to see if the socket lock is currently owned by someone,
- * and either handles the message itself or adds it to the socket's backlog
- * queue; in the latter case the queued message is processed once the process
- * owning the socket lock releases it.
- *
- * NOTE: Releasing the socket lock while an operation is sleeping overcomes
- * the problem of a blocked socket operation preventing any other operations
- * from occurring. However, applications must be careful if they have
- * multiple threads trying to send (or receive) on the same socket, as these
- * operations might interfere with each other. For example, doing a connect
- * and a receive at the same time might allow the receive to consume the
- * ACK message meant for the connect. While additional work could be done
- * to try and overcome this, it doesn't seem to be worthwhile at the present.
- *
- * NOTE: Releasing the socket lock while an operation is sleeping also ensures
- * that another operation that must be performed in a non-blocking manner is
- * not delayed for very long because the lock has already been taken.
- *
- * NOTE: This code assumes that certain fields of a port/socket pair are
- * constant over its lifetime; such fields can be examined without taking
- * the socket lock and/or port lock, and do not need to be re-read even
- * after resuming processing after waiting. These fields include:
- * - socket type
- * - pointer to socket sk structure (aka tipc_sock structure)
- * - pointer to port structure
- * - port reference
- */
-
static u32 tsk_own_node(struct tipc_sock *tsk)
{
return msg_prevnode(&tsk->phdr);
@@ -232,7 +186,7 @@ static struct tipc_sock *tipc_sk(const struct sock *sk)
static bool tsk_conn_cong(struct tipc_sock *tsk)
{
- return tsk->snt_unacked >= tsk->snd_win;
+ return tsk->snt_unacked > tsk->snd_win;
}
/* tsk_blocks(): translate a buffer size in bytes to number of
diff --git a/net/tipc/udp_media.c b/net/tipc/udp_media.c
index 78cab9c5a445..b58dc95f3d35 100644
--- a/net/tipc/udp_media.c
+++ b/net/tipc/udp_media.c
@@ -697,6 +697,11 @@ static int tipc_udp_enable(struct net *net, struct tipc_bearer *b,
udp_conf.local_ip.s_addr = htonl(INADDR_ANY);
udp_conf.use_udp_checksums = false;
ub->ifindex = dev->ifindex;
+ if (tipc_mtu_bad(dev, sizeof(struct iphdr) +
+ sizeof(struct udphdr))) {
+ err = -EINVAL;
+ goto err;
+ }
b->mtu = dev->mtu - sizeof(struct iphdr)
- sizeof(struct udphdr);
#if IS_ENABLED(CONFIG_IPV6)
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
index 145082e2ba36..2358f2690ec5 100644
--- a/net/unix/af_unix.c
+++ b/net/unix/af_unix.c
@@ -2199,7 +2199,8 @@ out:
* Sleep until more data has arrived. But check for races..
*/
static long unix_stream_data_wait(struct sock *sk, long timeo,
- struct sk_buff *last, unsigned int last_len)
+ struct sk_buff *last, unsigned int last_len,
+ bool freezable)
{
struct sk_buff *tail;
DEFINE_WAIT(wait);
@@ -2220,7 +2221,10 @@ static long unix_stream_data_wait(struct sock *sk, long timeo,
sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk);
unix_state_unlock(sk);
- timeo = freezable_schedule_timeout(timeo);
+ if (freezable)
+ timeo = freezable_schedule_timeout(timeo);
+ else
+ timeo = schedule_timeout(timeo);
unix_state_lock(sk);
if (sock_flag(sk, SOCK_DEAD))
@@ -2250,7 +2254,8 @@ struct unix_stream_read_state {
unsigned int splice_flags;
};
-static int unix_stream_read_generic(struct unix_stream_read_state *state)
+static int unix_stream_read_generic(struct unix_stream_read_state *state,
+ bool freezable)
{
struct scm_cookie scm;
struct socket *sock = state->socket;
@@ -2330,7 +2335,7 @@ again:
mutex_unlock(&u->iolock);
timeo = unix_stream_data_wait(sk, timeo, last,
- last_len);
+ last_len, freezable);
if (signal_pending(current)) {
err = sock_intr_errno(timeo);
@@ -2472,7 +2477,7 @@ static int unix_stream_recvmsg(struct socket *sock, struct msghdr *msg,
.flags = flags
};
- return unix_stream_read_generic(&state);
+ return unix_stream_read_generic(&state, true);
}
static int unix_stream_splice_actor(struct sk_buff *skb,
@@ -2503,7 +2508,7 @@ static ssize_t unix_stream_splice_read(struct socket *sock, loff_t *ppos,
flags & SPLICE_F_NONBLOCK)
state.flags = MSG_DONTWAIT;
- return unix_stream_read_generic(&state);
+ return unix_stream_read_generic(&state, false);
}
static int unix_shutdown(struct socket *sock, int mode)
@@ -2812,7 +2817,8 @@ static int unix_seq_show(struct seq_file *seq, void *v)
i++;
}
for ( ; i < len; i++)
- seq_putc(seq, u->addr->name->sun_path[i]);
+ seq_putc(seq, u->addr->name->sun_path[i] ?:
+ '@');
}
unix_state_unlock(s);
seq_putc(seq, '\n');
diff --git a/net/wireless/core.h b/net/wireless/core.h
index 08d2e948c9ad..f0c0c8a48c92 100644
--- a/net/wireless/core.h
+++ b/net/wireless/core.h
@@ -71,6 +71,7 @@ struct cfg80211_registered_device {
struct list_head bss_list;
struct rb_root bss_tree;
u32 bss_generation;
+ u32 bss_entries;
struct cfg80211_scan_request *scan_req; /* protected by RTNL */
struct sk_buff *scan_msg;
struct cfg80211_sched_scan_request __rcu *sched_scan_req;
diff --git a/net/wireless/scan.c b/net/wireless/scan.c
index b5bd58d0f731..35ad69fd0838 100644
--- a/net/wireless/scan.c
+++ b/net/wireless/scan.c
@@ -57,6 +57,19 @@
* also linked into the probe response struct.
*/
+/*
+ * Limit the number of BSS entries stored in mac80211. Each one is
+ * a bit over 4k at most, so this limits to roughly 4-5M of memory.
+ * If somebody wants to really attack this though, they'd likely
+ * use small beacons, and only one type of frame, limiting each of
+ * the entries to a much smaller size (in order to generate more
+ * entries in total, so overhead is bigger.)
+ */
+static int bss_entries_limit = 1000;
+module_param(bss_entries_limit, int, 0644);
+MODULE_PARM_DESC(bss_entries_limit,
+ "limit to number of scan BSS entries (per wiphy, default 1000)");
+
#define IEEE80211_SCAN_RESULT_EXPIRE (30 * HZ)
static void bss_free(struct cfg80211_internal_bss *bss)
@@ -137,6 +150,10 @@ static bool __cfg80211_unlink_bss(struct cfg80211_registered_device *rdev,
list_del_init(&bss->list);
rb_erase(&bss->rbn, &rdev->bss_tree);
+ rdev->bss_entries--;
+ WARN_ONCE((rdev->bss_entries == 0) ^ list_empty(&rdev->bss_list),
+ "rdev bss entries[%d]/list[empty:%d] corruption\n",
+ rdev->bss_entries, list_empty(&rdev->bss_list));
bss_ref_put(rdev, bss);
return true;
}
@@ -163,6 +180,40 @@ static void __cfg80211_bss_expire(struct cfg80211_registered_device *rdev,
rdev->bss_generation++;
}
+static bool cfg80211_bss_expire_oldest(struct cfg80211_registered_device *rdev)
+{
+ struct cfg80211_internal_bss *bss, *oldest = NULL;
+ bool ret;
+
+ lockdep_assert_held(&rdev->bss_lock);
+
+ list_for_each_entry(bss, &rdev->bss_list, list) {
+ if (atomic_read(&bss->hold))
+ continue;
+
+ if (!list_empty(&bss->hidden_list) &&
+ !bss->pub.hidden_beacon_bss)
+ continue;
+
+ if (oldest && time_before(oldest->ts, bss->ts))
+ continue;
+ oldest = bss;
+ }
+
+ if (WARN_ON(!oldest))
+ return false;
+
+ /*
+ * The callers make sure to increase rdev->bss_generation if anything
+ * gets removed (and a new entry added), so there's no need to also do
+ * it here.
+ */
+
+ ret = __cfg80211_unlink_bss(rdev, oldest);
+ WARN_ON(!ret);
+ return ret;
+}
+
void ___cfg80211_scan_done(struct cfg80211_registered_device *rdev,
bool send_message)
{
@@ -689,6 +740,7 @@ static bool cfg80211_combine_bsses(struct cfg80211_registered_device *rdev,
const u8 *ie;
int i, ssidlen;
u8 fold = 0;
+ u32 n_entries = 0;
ies = rcu_access_pointer(new->pub.beacon_ies);
if (WARN_ON(!ies))
@@ -712,6 +764,12 @@ static bool cfg80211_combine_bsses(struct cfg80211_registered_device *rdev,
/* This is the bad part ... */
list_for_each_entry(bss, &rdev->bss_list, list) {
+ /*
+ * we're iterating all the entries anyway, so take the
+ * opportunity to validate the list length accounting
+ */
+ n_entries++;
+
if (!ether_addr_equal(bss->pub.bssid, new->pub.bssid))
continue;
if (bss->pub.channel != new->pub.channel)
@@ -740,6 +798,10 @@ static bool cfg80211_combine_bsses(struct cfg80211_registered_device *rdev,
new->pub.beacon_ies);
}
+ WARN_ONCE(n_entries != rdev->bss_entries,
+ "rdev bss entries[%d]/list[len:%d] corruption\n",
+ rdev->bss_entries, n_entries);
+
return true;
}
@@ -894,7 +956,14 @@ cfg80211_bss_update(struct cfg80211_registered_device *rdev,
}
}
+ if (rdev->bss_entries >= bss_entries_limit &&
+ !cfg80211_bss_expire_oldest(rdev)) {
+ kfree(new);
+ goto drop;
+ }
+
list_add_tail(&new->list, &rdev->bss_list);
+ rdev->bss_entries++;
rb_insert_bss(rdev, new);
found = new;
}
diff --git a/net/wireless/sysfs.c b/net/wireless/sysfs.c
index 0082f4b01795..14b3f007826d 100644
--- a/net/wireless/sysfs.c
+++ b/net/wireless/sysfs.c
@@ -104,13 +104,16 @@ static int wiphy_suspend(struct device *dev)
rtnl_lock();
if (rdev->wiphy.registered) {
- if (!rdev->wiphy.wowlan_config)
+ if (!rdev->wiphy.wowlan_config) {
cfg80211_leave_all(rdev);
+ cfg80211_process_rdev_events(rdev);
+ }
if (rdev->ops->suspend)
ret = rdev_suspend(rdev, rdev->wiphy.wowlan_config);
if (ret == 1) {
/* Driver refuse to configure wowlan */
cfg80211_leave_all(rdev);
+ cfg80211_process_rdev_events(rdev);
ret = rdev_suspend(rdev, NULL);
}
}
diff --git a/net/wireless/util.c b/net/wireless/util.c
index 8edce22d1b93..659b507b347d 100644
--- a/net/wireless/util.c
+++ b/net/wireless/util.c
@@ -420,8 +420,8 @@ unsigned int ieee80211_get_mesh_hdrlen(struct ieee80211s_hdr *meshhdr)
}
EXPORT_SYMBOL(ieee80211_get_mesh_hdrlen);
-static int __ieee80211_data_to_8023(struct sk_buff *skb, struct ethhdr *ehdr,
- const u8 *addr, enum nl80211_iftype iftype)
+int ieee80211_data_to_8023_exthdr(struct sk_buff *skb, struct ethhdr *ehdr,
+ const u8 *addr, enum nl80211_iftype iftype)
{
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
struct {
@@ -525,13 +525,7 @@ static int __ieee80211_data_to_8023(struct sk_buff *skb, struct ethhdr *ehdr,
return 0;
}
-
-int ieee80211_data_to_8023(struct sk_buff *skb, const u8 *addr,
- enum nl80211_iftype iftype)
-{
- return __ieee80211_data_to_8023(skb, NULL, addr, iftype);
-}
-EXPORT_SYMBOL(ieee80211_data_to_8023);
+EXPORT_SYMBOL(ieee80211_data_to_8023_exthdr);
int ieee80211_data_from_8023(struct sk_buff *skb, const u8 *addr,
enum nl80211_iftype iftype,
@@ -746,24 +740,18 @@ __ieee80211_amsdu_copy(struct sk_buff *skb, unsigned int hlen,
void ieee80211_amsdu_to_8023s(struct sk_buff *skb, struct sk_buff_head *list,
const u8 *addr, enum nl80211_iftype iftype,
const unsigned int extra_headroom,
- bool has_80211_header)
+ const u8 *check_da, const u8 *check_sa)
{
unsigned int hlen = ALIGN(extra_headroom, 4);
struct sk_buff *frame = NULL;
u16 ethertype;
u8 *payload;
- int offset = 0, remaining, err;
+ int offset = 0, remaining;
struct ethhdr eth;
bool reuse_frag = skb->head_frag && !skb_has_frag_list(skb);
bool reuse_skb = false;
bool last = false;
- if (has_80211_header) {
- err = __ieee80211_data_to_8023(skb, &eth, addr, iftype);
- if (err)
- goto out;
- }
-
while (!last) {
unsigned int subframe_len;
int len;
@@ -780,8 +768,17 @@ void ieee80211_amsdu_to_8023s(struct sk_buff *skb, struct sk_buff_head *list,
goto purge;
offset += sizeof(struct ethhdr);
- /* reuse skb for the last subframe */
last = remaining <= subframe_len + padding;
+
+ /* FIXME: should we really accept multicast DA? */
+ if ((check_da && !is_multicast_ether_addr(eth.h_dest) &&
+ !ether_addr_equal(check_da, eth.h_dest)) ||
+ (check_sa && !ether_addr_equal(check_sa, eth.h_source))) {
+ offset += len + padding;
+ continue;
+ }
+
+ /* reuse skb for the last subframe */
if (!skb_is_nonlinear(skb) && !reuse_frag && last) {
skb_pull(skb, offset);
frame = skb;
@@ -819,7 +816,6 @@ void ieee80211_amsdu_to_8023s(struct sk_buff *skb, struct sk_buff_head *list,
purge:
__skb_queue_purge(list);
- out:
dev_kfree_skb(skb);
}
EXPORT_SYMBOL(ieee80211_amsdu_to_8023s);
@@ -1162,7 +1158,8 @@ static u32 cfg80211_calculate_bitrate_vht(struct rate_info *rate)
58500000,
65000000,
78000000,
- 0,
+ /* not in the spec, but some devices use this: */
+ 86500000,
},
{ 13500000,
27000000,
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
index fd6986634e6f..5bf7e1bfeac7 100644
--- a/net/xfrm/xfrm_policy.c
+++ b/net/xfrm/xfrm_policy.c
@@ -1268,12 +1268,14 @@ static struct xfrm_policy *xfrm_sk_policy_lookup(const struct sock *sk, int dir,
err = security_xfrm_policy_lookup(pol->security,
fl->flowi_secid,
policy_to_flow_dir(dir));
- if (!err && !xfrm_pol_hold_rcu(pol))
- goto again;
- else if (err == -ESRCH)
+ if (!err) {
+ if (!xfrm_pol_hold_rcu(pol))
+ goto again;
+ } else if (err == -ESRCH) {
pol = NULL;
- else
+ } else {
pol = ERR_PTR(err);
+ }
} else
pol = NULL;
}
diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c
index 08892091cfe3..671a1d0333f0 100644
--- a/net/xfrm/xfrm_user.c
+++ b/net/xfrm/xfrm_user.c
@@ -2450,7 +2450,7 @@ static int xfrm_user_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
#ifdef CONFIG_COMPAT
if (in_compat_syscall())
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
#endif
type = nlh->nlmsg_type;
diff --git a/samples/bpf/Makefile b/samples/bpf/Makefile
index 12b7304d55dc..72c58675973e 100644
--- a/samples/bpf/Makefile
+++ b/samples/bpf/Makefile
@@ -27,6 +27,7 @@ hostprogs-y += xdp2
hostprogs-y += test_current_task_under_cgroup
hostprogs-y += trace_event
hostprogs-y += sampleip
+hostprogs-y += tc_l2_redirect
test_verifier-objs := test_verifier.o libbpf.o
test_maps-objs := test_maps.o libbpf.o
@@ -56,6 +57,7 @@ test_current_task_under_cgroup-objs := bpf_load.o libbpf.o \
test_current_task_under_cgroup_user.o
trace_event-objs := bpf_load.o libbpf.o trace_event_user.o
sampleip-objs := bpf_load.o libbpf.o sampleip_user.o
+tc_l2_redirect-objs := bpf_load.o libbpf.o tc_l2_redirect_user.o
# Tell kbuild to always build the programs
always := $(hostprogs-y)
@@ -72,6 +74,7 @@ always += test_probe_write_user_kern.o
always += trace_output_kern.o
always += tcbpf1_kern.o
always += tcbpf2_kern.o
+always += tc_l2_redirect_kern.o
always += lathist_kern.o
always += offwaketime_kern.o
always += spintest_kern.o
@@ -111,6 +114,7 @@ HOSTLOADLIBES_xdp2 += -lelf
HOSTLOADLIBES_test_current_task_under_cgroup += -lelf
HOSTLOADLIBES_trace_event += -lelf
HOSTLOADLIBES_sampleip += -lelf
+HOSTLOADLIBES_tc_l2_redirect += -l elf
# Allows pointing LLC/CLANG to a LLVM backend with bpf support, redefine on cmdline:
# make samples/bpf/ LLC=~/git/llvm/build/bin/llc CLANG=~/git/llvm/build/bin/clang
diff --git a/samples/bpf/bpf_helpers.h b/samples/bpf/bpf_helpers.h
index 90f44bd2045e..dadd5161bd91 100644
--- a/samples/bpf/bpf_helpers.h
+++ b/samples/bpf/bpf_helpers.h
@@ -113,7 +113,7 @@ static int (*bpf_skb_under_cgroup)(void *ctx, void *map, int index) =
#define PT_REGS_FP(x) ((x)->gprs[11]) /* Works only with CONFIG_FRAME_POINTER */
#define PT_REGS_RC(x) ((x)->gprs[2])
#define PT_REGS_SP(x) ((x)->gprs[15])
-#define PT_REGS_IP(x) ((x)->ip)
+#define PT_REGS_IP(x) ((x)->psw.addr)
#elif defined(__aarch64__)
diff --git a/samples/bpf/parse_ldabs.c b/samples/bpf/parse_ldabs.c
index d17550198d06..6db6b21fdc6d 100644
--- a/samples/bpf/parse_ldabs.c
+++ b/samples/bpf/parse_ldabs.c
@@ -4,6 +4,7 @@
* modify it under the terms of version 2 of the GNU General Public
* License as published by the Free Software Foundation.
*/
+#define KBUILD_MODNAME "foo"
#include <linux/ip.h>
#include <linux/ipv6.h>
#include <linux/in.h>
diff --git a/samples/bpf/parse_simple.c b/samples/bpf/parse_simple.c
index cf2511c33905..10af53d33cc2 100644
--- a/samples/bpf/parse_simple.c
+++ b/samples/bpf/parse_simple.c
@@ -4,6 +4,7 @@
* modify it under the terms of version 2 of the GNU General Public
* License as published by the Free Software Foundation.
*/
+#define KBUILD_MODNAME "foo"
#include <linux/ip.h>
#include <linux/ipv6.h>
#include <linux/in.h>
diff --git a/samples/bpf/parse_varlen.c b/samples/bpf/parse_varlen.c
index edab34dce79b..95c16324760c 100644
--- a/samples/bpf/parse_varlen.c
+++ b/samples/bpf/parse_varlen.c
@@ -4,6 +4,7 @@
* modify it under the terms of version 2 of the GNU General Public
* License as published by the Free Software Foundation.
*/
+#define KBUILD_MODNAME "foo"
#include <linux/if_ether.h>
#include <linux/ip.h>
#include <linux/ipv6.h>
diff --git a/samples/bpf/sampleip_kern.c b/samples/bpf/sampleip_kern.c
index 774a681f374a..ceabf31079cf 100644
--- a/samples/bpf/sampleip_kern.c
+++ b/samples/bpf/sampleip_kern.c
@@ -25,7 +25,7 @@ int do_sample(struct bpf_perf_event_data *ctx)
u64 ip;
u32 *value, init_val = 1;
- ip = ctx->regs.ip;
+ ip = PT_REGS_IP(&ctx->regs);
value = bpf_map_lookup_elem(&ip_map, &ip);
if (value)
*value += 1;
diff --git a/samples/bpf/tc_l2_redirect.sh b/samples/bpf/tc_l2_redirect.sh
new file mode 100755
index 000000000000..80a05591a140
--- /dev/null
+++ b/samples/bpf/tc_l2_redirect.sh
@@ -0,0 +1,173 @@
+#!/bin/bash
+
+[[ -z $TC ]] && TC='tc'
+[[ -z $IP ]] && IP='ip'
+
+REDIRECT_USER='./tc_l2_redirect'
+REDIRECT_BPF='./tc_l2_redirect_kern.o'
+
+RP_FILTER=$(< /proc/sys/net/ipv4/conf/all/rp_filter)
+IPV6_FORWARDING=$(< /proc/sys/net/ipv6/conf/all/forwarding)
+
+function config_common {
+ local tun_type=$1
+
+ $IP netns add ns1
+ $IP netns add ns2
+ $IP link add ve1 type veth peer name vens1
+ $IP link add ve2 type veth peer name vens2
+ $IP link set dev ve1 up
+ $IP link set dev ve2 up
+ $IP link set dev ve1 mtu 1500
+ $IP link set dev ve2 mtu 1500
+ $IP link set dev vens1 netns ns1
+ $IP link set dev vens2 netns ns2
+
+ $IP -n ns1 link set dev lo up
+ $IP -n ns1 link set dev vens1 up
+ $IP -n ns1 addr add 10.1.1.101/24 dev vens1
+ $IP -n ns1 addr add 2401:db01::65/64 dev vens1 nodad
+ $IP -n ns1 route add default via 10.1.1.1 dev vens1
+ $IP -n ns1 route add default via 2401:db01::1 dev vens1
+
+ $IP -n ns2 link set dev lo up
+ $IP -n ns2 link set dev vens2 up
+ $IP -n ns2 addr add 10.2.1.102/24 dev vens2
+ $IP -n ns2 addr add 2401:db02::66/64 dev vens2 nodad
+ $IP -n ns2 addr add 10.10.1.102 dev lo
+ $IP -n ns2 addr add 2401:face::66/64 dev lo nodad
+ $IP -n ns2 link add ipt2 type ipip local 10.2.1.102 remote 10.2.1.1
+ $IP -n ns2 link add ip6t2 type ip6tnl mode any local 2401:db02::66 remote 2401:db02::1
+ $IP -n ns2 link set dev ipt2 up
+ $IP -n ns2 link set dev ip6t2 up
+ $IP netns exec ns2 $TC qdisc add dev vens2 clsact
+ $IP netns exec ns2 $TC filter add dev vens2 ingress bpf da obj $REDIRECT_BPF sec drop_non_tun_vip
+ if [[ $tun_type == "ipip" ]]; then
+ $IP -n ns2 route add 10.1.1.0/24 dev ipt2
+ $IP netns exec ns2 sysctl -q -w net.ipv4.conf.all.rp_filter=0
+ $IP netns exec ns2 sysctl -q -w net.ipv4.conf.ipt2.rp_filter=0
+ else
+ $IP -n ns2 route add 10.1.1.0/24 dev ip6t2
+ $IP -n ns2 route add 2401:db01::/64 dev ip6t2
+ $IP netns exec ns2 sysctl -q -w net.ipv4.conf.all.rp_filter=0
+ $IP netns exec ns2 sysctl -q -w net.ipv4.conf.ip6t2.rp_filter=0
+ fi
+
+ $IP addr add 10.1.1.1/24 dev ve1
+ $IP addr add 2401:db01::1/64 dev ve1 nodad
+ $IP addr add 10.2.1.1/24 dev ve2
+ $IP addr add 2401:db02::1/64 dev ve2 nodad
+
+ $TC qdisc add dev ve2 clsact
+ $TC filter add dev ve2 ingress bpf da obj $REDIRECT_BPF sec l2_to_iptun_ingress_forward
+
+ sysctl -q -w net.ipv4.conf.all.rp_filter=0
+ sysctl -q -w net.ipv6.conf.all.forwarding=1
+}
+
+function cleanup {
+ set +e
+ [[ -z $DEBUG ]] || set +x
+ $IP netns delete ns1 >& /dev/null
+ $IP netns delete ns2 >& /dev/null
+ $IP link del ve1 >& /dev/null
+ $IP link del ve2 >& /dev/null
+ $IP link del ipt >& /dev/null
+ $IP link del ip6t >& /dev/null
+ sysctl -q -w net.ipv4.conf.all.rp_filter=$RP_FILTER
+ sysctl -q -w net.ipv6.conf.all.forwarding=$IPV6_FORWARDING
+ rm -f /sys/fs/bpf/tc/globals/tun_iface
+ [[ -z $DEBUG ]] || set -x
+ set -e
+}
+
+function l2_to_ipip {
+ echo -n "l2_to_ipip $1: "
+
+ local dir=$1
+
+ config_common ipip
+
+ $IP link add ipt type ipip external
+ $IP link set dev ipt up
+ sysctl -q -w net.ipv4.conf.ipt.rp_filter=0
+ sysctl -q -w net.ipv4.conf.ipt.forwarding=1
+
+ if [[ $dir == "egress" ]]; then
+ $IP route add 10.10.1.0/24 via 10.2.1.102 dev ve2
+ $TC filter add dev ve2 egress bpf da obj $REDIRECT_BPF sec l2_to_iptun_ingress_redirect
+ sysctl -q -w net.ipv4.conf.ve1.forwarding=1
+ else
+ $TC qdisc add dev ve1 clsact
+ $TC filter add dev ve1 ingress bpf da obj $REDIRECT_BPF sec l2_to_iptun_ingress_redirect
+ fi
+
+ $REDIRECT_USER -U /sys/fs/bpf/tc/globals/tun_iface -i $(< /sys/class/net/ipt/ifindex)
+
+ $IP netns exec ns1 ping -c1 10.10.1.102 >& /dev/null
+
+ if [[ $dir == "egress" ]]; then
+ # test direct egress to ve2 (i.e. not forwarding from
+ # ve1 to ve2).
+ ping -c1 10.10.1.102 >& /dev/null
+ fi
+
+ cleanup
+
+ echo "OK"
+}
+
+function l2_to_ip6tnl {
+ echo -n "l2_to_ip6tnl $1: "
+
+ local dir=$1
+
+ config_common ip6tnl
+
+ $IP link add ip6t type ip6tnl mode any external
+ $IP link set dev ip6t up
+ sysctl -q -w net.ipv4.conf.ip6t.rp_filter=0
+ sysctl -q -w net.ipv4.conf.ip6t.forwarding=1
+
+ if [[ $dir == "egress" ]]; then
+ $IP route add 10.10.1.0/24 via 10.2.1.102 dev ve2
+ $IP route add 2401:face::/64 via 2401:db02::66 dev ve2
+ $TC filter add dev ve2 egress bpf da obj $REDIRECT_BPF sec l2_to_ip6tun_ingress_redirect
+ sysctl -q -w net.ipv4.conf.ve1.forwarding=1
+ else
+ $TC qdisc add dev ve1 clsact
+ $TC filter add dev ve1 ingress bpf da obj $REDIRECT_BPF sec l2_to_ip6tun_ingress_redirect
+ fi
+
+ $REDIRECT_USER -U /sys/fs/bpf/tc/globals/tun_iface -i $(< /sys/class/net/ip6t/ifindex)
+
+ $IP netns exec ns1 ping -c1 10.10.1.102 >& /dev/null
+ $IP netns exec ns1 ping -6 -c1 2401:face::66 >& /dev/null
+
+ if [[ $dir == "egress" ]]; then
+ # test direct egress to ve2 (i.e. not forwarding from
+ # ve1 to ve2).
+ ping -c1 10.10.1.102 >& /dev/null
+ ping -6 -c1 2401:face::66 >& /dev/null
+ fi
+
+ cleanup
+
+ echo "OK"
+}
+
+cleanup
+test_names="l2_to_ipip l2_to_ip6tnl"
+test_dirs="ingress egress"
+if [[ $# -ge 2 ]]; then
+ test_names=$1
+ test_dirs=$2
+elif [[ $# -ge 1 ]]; then
+ test_names=$1
+fi
+
+for t in $test_names; do
+ for d in $test_dirs; do
+ $t $d
+ done
+done
diff --git a/samples/bpf/tc_l2_redirect_kern.c b/samples/bpf/tc_l2_redirect_kern.c
new file mode 100644
index 000000000000..92a44729dbe4
--- /dev/null
+++ b/samples/bpf/tc_l2_redirect_kern.c
@@ -0,0 +1,236 @@
+/* Copyright (c) 2016 Facebook
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of version 2 of the GNU General Public
+ * License as published by the Free Software Foundation.
+ */
+#include <uapi/linux/bpf.h>
+#include <uapi/linux/if_ether.h>
+#include <uapi/linux/if_packet.h>
+#include <uapi/linux/ip.h>
+#include <uapi/linux/ipv6.h>
+#include <uapi/linux/in.h>
+#include <uapi/linux/tcp.h>
+#include <uapi/linux/filter.h>
+#include <uapi/linux/pkt_cls.h>
+#include <net/ipv6.h>
+#include "bpf_helpers.h"
+
+#define _htonl __builtin_bswap32
+
+#define PIN_GLOBAL_NS 2
+struct bpf_elf_map {
+ __u32 type;
+ __u32 size_key;
+ __u32 size_value;
+ __u32 max_elem;
+ __u32 flags;
+ __u32 id;
+ __u32 pinning;
+};
+
+/* copy of 'struct ethhdr' without __packed */
+struct eth_hdr {
+ unsigned char h_dest[ETH_ALEN];
+ unsigned char h_source[ETH_ALEN];
+ unsigned short h_proto;
+};
+
+struct bpf_elf_map SEC("maps") tun_iface = {
+ .type = BPF_MAP_TYPE_ARRAY,
+ .size_key = sizeof(int),
+ .size_value = sizeof(int),
+ .pinning = PIN_GLOBAL_NS,
+ .max_elem = 1,
+};
+
+static __always_inline bool is_vip_addr(__be16 eth_proto, __be32 daddr)
+{
+ if (eth_proto == htons(ETH_P_IP))
+ return (_htonl(0xffffff00) & daddr) == _htonl(0x0a0a0100);
+ else if (eth_proto == htons(ETH_P_IPV6))
+ return (daddr == _htonl(0x2401face));
+
+ return false;
+}
+
+SEC("l2_to_iptun_ingress_forward")
+int _l2_to_iptun_ingress_forward(struct __sk_buff *skb)
+{
+ struct bpf_tunnel_key tkey = {};
+ void *data = (void *)(long)skb->data;
+ struct eth_hdr *eth = data;
+ void *data_end = (void *)(long)skb->data_end;
+ int key = 0, *ifindex;
+
+ int ret;
+
+ if (data + sizeof(*eth) > data_end)
+ return TC_ACT_OK;
+
+ ifindex = bpf_map_lookup_elem(&tun_iface, &key);
+ if (!ifindex)
+ return TC_ACT_OK;
+
+ if (eth->h_proto == htons(ETH_P_IP)) {
+ char fmt4[] = "ingress forward to ifindex:%d daddr4:%x\n";
+ struct iphdr *iph = data + sizeof(*eth);
+
+ if (data + sizeof(*eth) + sizeof(*iph) > data_end)
+ return TC_ACT_OK;
+
+ if (iph->protocol != IPPROTO_IPIP)
+ return TC_ACT_OK;
+
+ bpf_trace_printk(fmt4, sizeof(fmt4), *ifindex,
+ _htonl(iph->daddr));
+ return bpf_redirect(*ifindex, BPF_F_INGRESS);
+ } else if (eth->h_proto == htons(ETH_P_IPV6)) {
+ char fmt6[] = "ingress forward to ifindex:%d daddr6:%x::%x\n";
+ struct ipv6hdr *ip6h = data + sizeof(*eth);
+
+ if (data + sizeof(*eth) + sizeof(*ip6h) > data_end)
+ return TC_ACT_OK;
+
+ if (ip6h->nexthdr != IPPROTO_IPIP &&
+ ip6h->nexthdr != IPPROTO_IPV6)
+ return TC_ACT_OK;
+
+ bpf_trace_printk(fmt6, sizeof(fmt6), *ifindex,
+ _htonl(ip6h->daddr.s6_addr32[0]),
+ _htonl(ip6h->daddr.s6_addr32[3]));
+ return bpf_redirect(*ifindex, BPF_F_INGRESS);
+ }
+
+ return TC_ACT_OK;
+}
+
+SEC("l2_to_iptun_ingress_redirect")
+int _l2_to_iptun_ingress_redirect(struct __sk_buff *skb)
+{
+ struct bpf_tunnel_key tkey = {};
+ void *data = (void *)(long)skb->data;
+ struct eth_hdr *eth = data;
+ void *data_end = (void *)(long)skb->data_end;
+ int key = 0, *ifindex;
+
+ int ret;
+
+ if (data + sizeof(*eth) > data_end)
+ return TC_ACT_OK;
+
+ ifindex = bpf_map_lookup_elem(&tun_iface, &key);
+ if (!ifindex)
+ return TC_ACT_OK;
+
+ if (eth->h_proto == htons(ETH_P_IP)) {
+ char fmt4[] = "e/ingress redirect daddr4:%x to ifindex:%d\n";
+ struct iphdr *iph = data + sizeof(*eth);
+ __be32 daddr = iph->daddr;
+
+ if (data + sizeof(*eth) + sizeof(*iph) > data_end)
+ return TC_ACT_OK;
+
+ if (!is_vip_addr(eth->h_proto, daddr))
+ return TC_ACT_OK;
+
+ bpf_trace_printk(fmt4, sizeof(fmt4), _htonl(daddr), *ifindex);
+ } else {
+ return TC_ACT_OK;
+ }
+
+ tkey.tunnel_id = 10000;
+ tkey.tunnel_ttl = 64;
+ tkey.remote_ipv4 = 0x0a020166; /* 10.2.1.102 */
+ bpf_skb_set_tunnel_key(skb, &tkey, sizeof(tkey), 0);
+ return bpf_redirect(*ifindex, 0);
+}
+
+SEC("l2_to_ip6tun_ingress_redirect")
+int _l2_to_ip6tun_ingress_redirect(struct __sk_buff *skb)
+{
+ struct bpf_tunnel_key tkey = {};
+ void *data = (void *)(long)skb->data;
+ struct eth_hdr *eth = data;
+ void *data_end = (void *)(long)skb->data_end;
+ int key = 0, *ifindex;
+
+ if (data + sizeof(*eth) > data_end)
+ return TC_ACT_OK;
+
+ ifindex = bpf_map_lookup_elem(&tun_iface, &key);
+ if (!ifindex)
+ return TC_ACT_OK;
+
+ if (eth->h_proto == htons(ETH_P_IP)) {
+ char fmt4[] = "e/ingress redirect daddr4:%x to ifindex:%d\n";
+ struct iphdr *iph = data + sizeof(*eth);
+
+ if (data + sizeof(*eth) + sizeof(*iph) > data_end)
+ return TC_ACT_OK;
+
+ if (!is_vip_addr(eth->h_proto, iph->daddr))
+ return TC_ACT_OK;
+
+ bpf_trace_printk(fmt4, sizeof(fmt4), _htonl(iph->daddr),
+ *ifindex);
+ } else if (eth->h_proto == htons(ETH_P_IPV6)) {
+ char fmt6[] = "e/ingress redirect daddr6:%x to ifindex:%d\n";
+ struct ipv6hdr *ip6h = data + sizeof(*eth);
+
+ if (data + sizeof(*eth) + sizeof(*ip6h) > data_end)
+ return TC_ACT_OK;
+
+ if (!is_vip_addr(eth->h_proto, ip6h->daddr.s6_addr32[0]))
+ return TC_ACT_OK;
+
+ bpf_trace_printk(fmt6, sizeof(fmt6),
+ _htonl(ip6h->daddr.s6_addr32[0]), *ifindex);
+ } else {
+ return TC_ACT_OK;
+ }
+
+ tkey.tunnel_id = 10000;
+ tkey.tunnel_ttl = 64;
+ /* 2401:db02:0:0:0:0:0:66 */
+ tkey.remote_ipv6[0] = _htonl(0x2401db02);
+ tkey.remote_ipv6[1] = 0;
+ tkey.remote_ipv6[2] = 0;
+ tkey.remote_ipv6[3] = _htonl(0x00000066);
+ bpf_skb_set_tunnel_key(skb, &tkey, sizeof(tkey), BPF_F_TUNINFO_IPV6);
+ return bpf_redirect(*ifindex, 0);
+}
+
+SEC("drop_non_tun_vip")
+int _drop_non_tun_vip(struct __sk_buff *skb)
+{
+ struct bpf_tunnel_key tkey = {};
+ void *data = (void *)(long)skb->data;
+ struct eth_hdr *eth = data;
+ void *data_end = (void *)(long)skb->data_end;
+
+ if (data + sizeof(*eth) > data_end)
+ return TC_ACT_OK;
+
+ if (eth->h_proto == htons(ETH_P_IP)) {
+ struct iphdr *iph = data + sizeof(*eth);
+
+ if (data + sizeof(*eth) + sizeof(*iph) > data_end)
+ return TC_ACT_OK;
+
+ if (is_vip_addr(eth->h_proto, iph->daddr))
+ return TC_ACT_SHOT;
+ } else if (eth->h_proto == htons(ETH_P_IPV6)) {
+ struct ipv6hdr *ip6h = data + sizeof(*eth);
+
+ if (data + sizeof(*eth) + sizeof(*ip6h) > data_end)
+ return TC_ACT_OK;
+
+ if (is_vip_addr(eth->h_proto, ip6h->daddr.s6_addr32[0]))
+ return TC_ACT_SHOT;
+ }
+
+ return TC_ACT_OK;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/samples/bpf/tc_l2_redirect_user.c b/samples/bpf/tc_l2_redirect_user.c
new file mode 100644
index 000000000000..4013c5337b91
--- /dev/null
+++ b/samples/bpf/tc_l2_redirect_user.c
@@ -0,0 +1,73 @@
+/* Copyright (c) 2016 Facebook
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of version 2 of the GNU General Public
+ * License as published by the Free Software Foundation.
+ */
+#include <linux/unistd.h>
+#include <linux/bpf.h>
+
+#include <stdlib.h>
+#include <stdio.h>
+#include <unistd.h>
+#include <string.h>
+#include <errno.h>
+
+#include "libbpf.h"
+
+static void usage(void)
+{
+ printf("Usage: tc_l2_ipip_redirect [...]\n");
+ printf(" -U <file> Update an already pinned BPF array\n");
+ printf(" -i <ifindex> Interface index\n");
+ printf(" -h Display this help\n");
+}
+
+int main(int argc, char **argv)
+{
+ const char *pinned_file = NULL;
+ int ifindex = -1;
+ int array_key = 0;
+ int array_fd = -1;
+ int ret = -1;
+ int opt;
+
+ while ((opt = getopt(argc, argv, "F:U:i:")) != -1) {
+ switch (opt) {
+ /* General args */
+ case 'U':
+ pinned_file = optarg;
+ break;
+ case 'i':
+ ifindex = atoi(optarg);
+ break;
+ default:
+ usage();
+ goto out;
+ }
+ }
+
+ if (ifindex < 0 || !pinned_file) {
+ usage();
+ goto out;
+ }
+
+ array_fd = bpf_obj_get(pinned_file);
+ if (array_fd < 0) {
+ fprintf(stderr, "bpf_obj_get(%s): %s(%d)\n",
+ pinned_file, strerror(errno), errno);
+ goto out;
+ }
+
+ /* bpf_tunnel_key.remote_ipv4 expects host byte orders */
+ ret = bpf_update_elem(array_fd, &array_key, &ifindex, 0);
+ if (ret) {
+ perror("bpf_update_elem");
+ goto out;
+ }
+
+out:
+ if (array_fd != -1)
+ close(array_fd);
+ return ret;
+}
diff --git a/samples/bpf/tcbpf1_kern.c b/samples/bpf/tcbpf1_kern.c
index fa051b3d53ee..274c884c87fe 100644
--- a/samples/bpf/tcbpf1_kern.c
+++ b/samples/bpf/tcbpf1_kern.c
@@ -1,3 +1,4 @@
+#define KBUILD_MODNAME "foo"
#include <uapi/linux/bpf.h>
#include <uapi/linux/if_ether.h>
#include <uapi/linux/if_packet.h>
diff --git a/samples/bpf/tcbpf2_kern.c b/samples/bpf/tcbpf2_kern.c
index 3303bb85593b..9c823a609e75 100644
--- a/samples/bpf/tcbpf2_kern.c
+++ b/samples/bpf/tcbpf2_kern.c
@@ -5,6 +5,7 @@
* modify it under the terms of version 2 of the GNU General Public
* License as published by the Free Software Foundation.
*/
+#define KBUILD_MODNAME "foo"
#include <uapi/linux/bpf.h>
#include <uapi/linux/if_ether.h>
#include <uapi/linux/if_packet.h>
diff --git a/samples/bpf/test_cgrp2_tc_kern.c b/samples/bpf/test_cgrp2_tc_kern.c
index 10ff73404e3a..1547b36a7b7b 100644
--- a/samples/bpf/test_cgrp2_tc_kern.c
+++ b/samples/bpf/test_cgrp2_tc_kern.c
@@ -4,6 +4,7 @@
* modify it under the terms of version 2 of the GNU General Public
* License as published by the Free Software Foundation.
*/
+#define KBUILD_MODNAME "foo"
#include <uapi/linux/if_ether.h>
#include <uapi/linux/in6.h>
#include <uapi/linux/ipv6.h>
diff --git a/samples/bpf/trace_event_kern.c b/samples/bpf/trace_event_kern.c
index 71a8ed32823e..41b6115a32eb 100644
--- a/samples/bpf/trace_event_kern.c
+++ b/samples/bpf/trace_event_kern.c
@@ -50,7 +50,7 @@ int bpf_prog1(struct bpf_perf_event_data *ctx)
key.userstack = bpf_get_stackid(ctx, &stackmap, USER_STACKID_FLAGS);
if ((int)key.kernstack < 0 && (int)key.userstack < 0) {
bpf_trace_printk(fmt, sizeof(fmt), cpu, ctx->sample_period,
- ctx->regs.ip);
+ PT_REGS_IP(&ctx->regs));
return 0;
}
diff --git a/scripts/Makefile.build b/scripts/Makefile.build
index de46ab03f063..7675d11ee65e 100644
--- a/scripts/Makefile.build
+++ b/scripts/Makefile.build
@@ -159,7 +159,8 @@ cmd_cpp_i_c = $(CPP) $(c_flags) -o $@ $<
$(obj)/%.i: $(src)/%.c FORCE
$(call if_changed_dep,cpp_i_c)
-cmd_gensymtypes = \
+# These mirror gensymtypes_S and co below, keep them in synch.
+cmd_gensymtypes_c = \
$(CPP) -D__GENKSYMS__ $(c_flags) $< | \
$(GENKSYMS) $(if $(1), -T $(2)) \
$(patsubst y,-s _,$(CONFIG_HAVE_UNDERSCORE_SYMBOL_PREFIX)) \
@@ -169,7 +170,7 @@ cmd_gensymtypes = \
quiet_cmd_cc_symtypes_c = SYM $(quiet_modtag) $@
cmd_cc_symtypes_c = \
set -e; \
- $(call cmd_gensymtypes,true,$@) >/dev/null; \
+ $(call cmd_gensymtypes_c,true,$@) >/dev/null; \
test -s $@ || rm -f $@
$(obj)/%.symtypes : $(src)/%.c FORCE
@@ -198,9 +199,10 @@ else
# the actual value of the checksum generated by genksyms
cmd_cc_o_c = $(CC) $(c_flags) -c -o $(@D)/.tmp_$(@F) $<
-cmd_modversions = \
+
+cmd_modversions_c = \
if $(OBJDUMP) -h $(@D)/.tmp_$(@F) | grep -q __ksymtab; then \
- $(call cmd_gensymtypes,$(KBUILD_SYMTYPES),$(@:.o=.symtypes)) \
+ $(call cmd_gensymtypes_c,$(KBUILD_SYMTYPES),$(@:.o=.symtypes)) \
> $(@D)/.tmp_$(@F:.o=.ver); \
\
$(LD) $(LDFLAGS) -r -o $@ $(@D)/.tmp_$(@F) \
@@ -268,13 +270,14 @@ endif # CONFIG_STACK_VALIDATION
define rule_cc_o_c
$(call echo-cmd,checksrc) $(cmd_checksrc) \
$(call cmd_and_fixdep,cc_o_c) \
- $(cmd_modversions) \
+ $(cmd_modversions_c) \
$(cmd_objtool) \
$(call echo-cmd,record_mcount) $(cmd_record_mcount)
endef
define rule_as_o_S
$(call cmd_and_fixdep,as_o_S) \
+ $(cmd_modversions_S) \
$(cmd_objtool)
endef
@@ -314,6 +317,39 @@ modkern_aflags := $(KBUILD_AFLAGS_KERNEL) $(AFLAGS_KERNEL)
$(real-objs-m) : modkern_aflags := $(KBUILD_AFLAGS_MODULE) $(AFLAGS_MODULE)
$(real-objs-m:.o=.s): modkern_aflags := $(KBUILD_AFLAGS_MODULE) $(AFLAGS_MODULE)
+# .S file exports must have their C prototypes defined in asm/asm-prototypes.h
+# or a file that it includes, in order to get versioned symbols. We build a
+# dummy C file that includes asm-prototypes and the EXPORT_SYMBOL lines from
+# the .S file (with trailing ';'), and run genksyms on that, to extract vers.
+#
+# This is convoluted. The .S file must first be preprocessed to run guards and
+# expand names, then the resulting exports must be constructed into plain
+# EXPORT_SYMBOL(symbol); to build our dummy C file, and that gets preprocessed
+# to make the genksyms input.
+#
+# These mirror gensymtypes_c and co above, keep them in synch.
+cmd_gensymtypes_S = \
+ (echo "\#include <linux/kernel.h>" ; \
+ echo "\#include <asm/asm-prototypes.h>" ; \
+ $(CPP) $(a_flags) $< | \
+ grep "\<___EXPORT_SYMBOL\>" | \
+ sed 's/.*___EXPORT_SYMBOL[[:space:]]*\([a-zA-Z0-9_]*\)[[:space:]]*,.*/EXPORT_SYMBOL(\1);/' ) | \
+ $(CPP) -D__GENKSYMS__ $(c_flags) -xc - | \
+ $(GENKSYMS) $(if $(1), -T $(2)) \
+ $(patsubst y,-s _,$(CONFIG_HAVE_UNDERSCORE_SYMBOL_PREFIX)) \
+ $(if $(KBUILD_PRESERVE),-p) \
+ -r $(firstword $(wildcard $(2:.symtypes=.symref) /dev/null))
+
+quiet_cmd_cc_symtypes_S = SYM $(quiet_modtag) $@
+cmd_cc_symtypes_S = \
+ set -e; \
+ $(call cmd_gensymtypes_S,true,$@) >/dev/null; \
+ test -s $@ || rm -f $@
+
+$(obj)/%.symtypes : $(src)/%.S FORCE
+ $(call cmd,cc_symtypes_S)
+
+
quiet_cmd_cpp_s_S = CPP $(quiet_modtag) $@
cmd_cpp_s_S = $(CPP) $(a_flags) -o $@ $<
@@ -321,7 +357,37 @@ $(obj)/%.s: $(src)/%.S FORCE
$(call if_changed_dep,cpp_s_S)
quiet_cmd_as_o_S = AS $(quiet_modtag) $@
-cmd_as_o_S = $(CC) $(a_flags) -c -o $@ $<
+
+ifndef CONFIG_MODVERSIONS
+cmd_as_o_S = $(CC) $(a_flags) -c -o $@ $<
+
+else
+
+ASM_PROTOTYPES := $(wildcard $(srctree)/arch/$(SRCARCH)/include/asm/asm-prototypes.h)
+
+ifeq ($(ASM_PROTOTYPES),)
+cmd_as_o_S = $(CC) $(a_flags) -c -o $@ $<
+
+else
+
+# versioning matches the C process described above, with difference that
+# we parse asm-prototypes.h C header to get function definitions.
+
+cmd_as_o_S = $(CC) $(a_flags) -c -o $(@D)/.tmp_$(@F) $<
+
+cmd_modversions_S = \
+ if $(OBJDUMP) -h $(@D)/.tmp_$(@F) | grep -q __ksymtab; then \
+ $(call cmd_gensymtypes_S,$(KBUILD_SYMTYPES),$(@:.o=.symtypes)) \
+ > $(@D)/.tmp_$(@F:.o=.ver); \
+ \
+ $(LD) $(LDFLAGS) -r -o $@ $(@D)/.tmp_$(@F) \
+ -T $(@D)/.tmp_$(@F:.o=.ver); \
+ rm -f $(@D)/.tmp_$(@F) $(@D)/.tmp_$(@F:.o=.ver); \
+ else \
+ mv -f $(@D)/.tmp_$(@F) $@; \
+ fi;
+endif
+endif
$(obj)/%.o: $(src)/%.S $(objtool_obj) FORCE
$(call if_changed_rule,as_o_S)
@@ -430,6 +496,9 @@ cmd_export_list = $(OBJDUMP) -h $< | \
$(obj)/lib-ksyms.o: $(lib-target) FORCE
$(call if_changed,export_list)
+
+targets += $(obj)/lib-ksyms.o
+
endif
#
diff --git a/scripts/Makefile.extrawarn b/scripts/Makefile.extrawarn
index 53449a6ff6aa..7c321a603b07 100644
--- a/scripts/Makefile.extrawarn
+++ b/scripts/Makefile.extrawarn
@@ -36,6 +36,7 @@ warning-2 += -Wshadow
warning-2 += $(call cc-option, -Wlogical-op)
warning-2 += $(call cc-option, -Wmissing-field-initializers)
warning-2 += $(call cc-option, -Wsign-compare)
+warning-2 += $(call cc-option, -Wmaybe-uninitialized)
warning-3 := -Wbad-function-cast
warning-3 += -Wcast-qual
diff --git a/scripts/Makefile.ubsan b/scripts/Makefile.ubsan
index dd779c40c8e6..3b1b13818d59 100644
--- a/scripts/Makefile.ubsan
+++ b/scripts/Makefile.ubsan
@@ -17,4 +17,8 @@ endif
ifdef CONFIG_UBSAN_NULL
CFLAGS_UBSAN += $(call cc-option, -fsanitize=null)
endif
+
+ # -fsanitize=* options makes GCC less smart than usual and
+ # increase number of 'maybe-uninitialized false-positives
+ CFLAGS_UBSAN += $(call cc-option, -Wno-maybe-uninitialized)
endif
diff --git a/scripts/bloat-o-meter b/scripts/bloat-o-meter
index 19f5adfd877d..d9ff038c1b28 100755
--- a/scripts/bloat-o-meter
+++ b/scripts/bloat-o-meter
@@ -8,6 +8,9 @@
# of the GNU General Public License, incorporated herein by reference.
import sys, os, re
+from signal import signal, SIGPIPE, SIG_DFL
+
+signal(SIGPIPE, SIG_DFL)
if len(sys.argv) != 3:
sys.stderr.write("usage: %s file1 file2\n" % sys.argv[0])
diff --git a/scripts/gcc-plugins/cyc_complexity_plugin.c b/scripts/gcc-plugins/cyc_complexity_plugin.c
index 34df974c6ba3..8af7db06122d 100644
--- a/scripts/gcc-plugins/cyc_complexity_plugin.c
+++ b/scripts/gcc-plugins/cyc_complexity_plugin.c
@@ -20,7 +20,7 @@
#include "gcc-common.h"
-int plugin_is_GPL_compatible;
+__visible int plugin_is_GPL_compatible;
static struct plugin_info cyc_complexity_plugin_info = {
.version = "20160225",
@@ -49,7 +49,7 @@ static unsigned int cyc_complexity_execute(void)
#include "gcc-generate-gimple-pass.h"
-int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
+__visible int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
{
const char * const plugin_name = plugin_info->base_name;
struct register_pass_info cyc_complexity_pass_info;
diff --git a/scripts/gcc-plugins/gcc-common.h b/scripts/gcc-plugins/gcc-common.h
index 172850bcd0d9..950fd2e64bb7 100644
--- a/scripts/gcc-plugins/gcc-common.h
+++ b/scripts/gcc-plugins/gcc-common.h
@@ -130,6 +130,7 @@ extern void dump_gimple_stmt(pretty_printer *, gimple, int, int);
#endif
#define __unused __attribute__((__unused__))
+#define __visible __attribute__((visibility("default")))
#define DECL_NAME_POINTER(node) IDENTIFIER_POINTER(DECL_NAME(node))
#define DECL_NAME_LENGTH(node) IDENTIFIER_LENGTH(DECL_NAME(node))
diff --git a/scripts/gcc-plugins/latent_entropy_plugin.c b/scripts/gcc-plugins/latent_entropy_plugin.c
index ff1939b804ae..8160f1c1b56e 100644
--- a/scripts/gcc-plugins/latent_entropy_plugin.c
+++ b/scripts/gcc-plugins/latent_entropy_plugin.c
@@ -77,7 +77,7 @@
#include "gcc-common.h"
-int plugin_is_GPL_compatible;
+__visible int plugin_is_GPL_compatible;
static GTY(()) tree latent_entropy_decl;
@@ -340,7 +340,7 @@ static enum tree_code get_op(tree *rhs)
break;
}
if (rhs)
- *rhs = build_int_cstu(unsigned_intDI_type_node, random_const);
+ *rhs = build_int_cstu(long_unsigned_type_node, random_const);
return op;
}
@@ -372,7 +372,7 @@ static void __perturb_latent_entropy(gimple_stmt_iterator *gsi,
enum tree_code op;
/* 1. create temporary copy of latent_entropy */
- temp = create_var(unsigned_intDI_type_node, "tmp_latent_entropy");
+ temp = create_var(long_unsigned_type_node, "temp_latent_entropy");
/* 2. read... */
add_referenced_var(latent_entropy_decl);
@@ -459,13 +459,13 @@ static void init_local_entropy(basic_block bb, tree local_entropy)
gsi_insert_before(&gsi, call, GSI_NEW_STMT);
update_stmt(call);
- udi_frame_addr = fold_convert(unsigned_intDI_type_node, frame_addr);
+ udi_frame_addr = fold_convert(long_unsigned_type_node, frame_addr);
assign = gimple_build_assign(local_entropy, udi_frame_addr);
gsi_insert_after(&gsi, assign, GSI_NEW_STMT);
update_stmt(assign);
/* 3. create temporary copy of latent_entropy */
- tmp = create_var(unsigned_intDI_type_node, "tmp_latent_entropy");
+ tmp = create_var(long_unsigned_type_node, "temp_latent_entropy");
/* 4. read the global entropy variable into local entropy */
add_referenced_var(latent_entropy_decl);
@@ -480,7 +480,7 @@ static void init_local_entropy(basic_block bb, tree local_entropy)
update_stmt(assign);
rand_cst = get_random_const();
- rand_const = build_int_cstu(unsigned_intDI_type_node, rand_cst);
+ rand_const = build_int_cstu(long_unsigned_type_node, rand_cst);
op = get_op(NULL);
assign = create_assign(op, local_entropy, local_entropy, rand_const);
gsi_insert_after(&gsi, assign, GSI_NEW_STMT);
@@ -529,7 +529,7 @@ static unsigned int latent_entropy_execute(void)
}
/* 1. create the local entropy variable */
- local_entropy = create_var(unsigned_intDI_type_node, "local_entropy");
+ local_entropy = create_var(long_unsigned_type_node, "local_entropy");
/* 2. initialize the local entropy variable */
init_local_entropy(bb, local_entropy);
@@ -561,10 +561,9 @@ static void latent_entropy_start_unit(void *gcc_data __unused,
if (in_lto_p)
return;
- /* extern volatile u64 latent_entropy */
- gcc_assert(TYPE_PRECISION(long_long_unsigned_type_node) == 64);
- quals = TYPE_QUALS(long_long_unsigned_type_node) | TYPE_QUAL_VOLATILE;
- type = build_qualified_type(long_long_unsigned_type_node, quals);
+ /* extern volatile unsigned long latent_entropy */
+ quals = TYPE_QUALS(long_unsigned_type_node) | TYPE_QUAL_VOLATILE;
+ type = build_qualified_type(long_unsigned_type_node, quals);
id = get_identifier("latent_entropy");
latent_entropy_decl = build_decl(UNKNOWN_LOCATION, VAR_DECL, id, type);
@@ -584,8 +583,8 @@ static void latent_entropy_start_unit(void *gcc_data __unused,
| TODO_update_ssa
#include "gcc-generate-gimple-pass.h"
-int plugin_init(struct plugin_name_args *plugin_info,
- struct plugin_gcc_version *version)
+__visible int plugin_init(struct plugin_name_args *plugin_info,
+ struct plugin_gcc_version *version)
{
bool enabled = true;
const char * const plugin_name = plugin_info->base_name;
diff --git a/scripts/gcc-plugins/sancov_plugin.c b/scripts/gcc-plugins/sancov_plugin.c
index aedd6113cb73..7ea0b3f50739 100644
--- a/scripts/gcc-plugins/sancov_plugin.c
+++ b/scripts/gcc-plugins/sancov_plugin.c
@@ -21,7 +21,7 @@
#include "gcc-common.h"
-int plugin_is_GPL_compatible;
+__visible int plugin_is_GPL_compatible;
tree sancov_fndecl;
@@ -86,7 +86,7 @@ static void sancov_start_unit(void __unused *gcc_data, void __unused *user_data)
#endif
}
-int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
+__visible int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
{
int i;
struct register_pass_info sancov_plugin_pass_info;
diff --git a/scripts/gcc-x86_64-has-stack-protector.sh b/scripts/gcc-x86_64-has-stack-protector.sh
index 973e8c141567..17867e723a51 100755
--- a/scripts/gcc-x86_64-has-stack-protector.sh
+++ b/scripts/gcc-x86_64-has-stack-protector.sh
@@ -1,6 +1,6 @@
#!/bin/sh
-echo "int foo(void) { char X[200]; return 3; }" | $* -S -x c -c -O0 -mcmodel=kernel -fstack-protector - -o - 2> /dev/null | grep -q "%gs"
+echo "int foo(void) { char X[200]; return 3; }" | $* -S -x c -c -O0 -mcmodel=kernel -fno-PIE -fstack-protector - -o - 2> /dev/null | grep -q "%gs"
if [ "$?" -eq "0" ] ; then
echo y
else
diff --git a/scripts/kconfig/Makefile b/scripts/kconfig/Makefile
index ebced77deb9c..90a091b6ae4d 100644
--- a/scripts/kconfig/Makefile
+++ b/scripts/kconfig/Makefile
@@ -35,6 +35,8 @@ nconfig: $(obj)/nconf
silentoldconfig: $(obj)/conf
$(Q)mkdir -p include/config include/generated
+ $(Q)test -e include/generated/autoksyms.h || \
+ touch include/generated/autoksyms.h
$< $(silent) --$@ $(Kconfig)
localyesconfig localmodconfig: $(obj)/streamline_config.pl $(obj)/conf
diff --git a/security/apparmor/domain.c b/security/apparmor/domain.c
index fc3036b34e51..a4d90aa1045a 100644
--- a/security/apparmor/domain.c
+++ b/security/apparmor/domain.c
@@ -621,8 +621,8 @@ int aa_change_hat(const char *hats[], int count, u64 token, bool permtest)
/* released below */
cred = get_current_cred();
cxt = cred_cxt(cred);
- profile = aa_cred_profile(cred);
- previous_profile = cxt->previous;
+ profile = aa_get_newest_profile(aa_cred_profile(cred));
+ previous_profile = aa_get_newest_profile(cxt->previous);
if (unconfined(profile)) {
info = "unconfined";
@@ -718,6 +718,8 @@ audit:
out:
aa_put_profile(hat);
kfree(name);
+ aa_put_profile(profile);
+ aa_put_profile(previous_profile);
put_cred(cred);
return error;
diff --git a/sound/core/info.c b/sound/core/info.c
index 895362a696c9..8ab72e0f5932 100644
--- a/sound/core/info.c
+++ b/sound/core/info.c
@@ -325,10 +325,15 @@ static ssize_t snd_info_text_entry_write(struct file *file,
size_t next;
int err = 0;
+ if (!entry->c.text.write)
+ return -EIO;
pos = *offset;
if (!valid_pos(pos, count))
return -EIO;
next = pos + count;
+ /* don't handle too large text inputs */
+ if (next > 16 * 1024)
+ return -EIO;
mutex_lock(&entry->access);
buf = data->wbuffer;
if (!buf) {
@@ -366,7 +371,9 @@ static int snd_info_seq_show(struct seq_file *seq, void *p)
struct snd_info_private_data *data = seq->private;
struct snd_info_entry *entry = data->entry;
- if (entry->c.text.read) {
+ if (!entry->c.text.read) {
+ return -EIO;
+ } else {
data->rbuffer->buffer = (char *)seq; /* XXX hack! */
entry->c.text.read(entry, data->rbuffer);
}
diff --git a/sound/hda/hdac_i915.c b/sound/hda/hdac_i915.c
index c9af022676c2..0659bf389489 100644
--- a/sound/hda/hdac_i915.c
+++ b/sound/hda/hdac_i915.c
@@ -193,6 +193,7 @@ static int pin2port(struct hdac_device *codec, hda_nid_t pin_nid)
* snd_hdac_sync_audio_rate - Set N/CTS based on the sample rate
* @codec: HDA codec
* @nid: the pin widget NID
+ * @dev_id: device identifier
* @rate: the sample rate to set
*
* This function is supposed to be used only by a HD-audio controller
@@ -201,18 +202,20 @@ static int pin2port(struct hdac_device *codec, hda_nid_t pin_nid)
* This function sets N/CTS value based on the given sample rate.
* Returns zero for success, or a negative error code.
*/
-int snd_hdac_sync_audio_rate(struct hdac_device *codec, hda_nid_t nid, int rate)
+int snd_hdac_sync_audio_rate(struct hdac_device *codec, hda_nid_t nid,
+ int dev_id, int rate)
{
struct hdac_bus *bus = codec->bus;
struct i915_audio_component *acomp = bus->audio_component;
- int port;
+ int port, pipe;
if (!acomp || !acomp->ops || !acomp->ops->sync_audio_rate)
return -ENODEV;
port = pin2port(codec, nid);
if (port < 0)
return -EINVAL;
- return acomp->ops->sync_audio_rate(acomp->dev, port, rate);
+ pipe = dev_id;
+ return acomp->ops->sync_audio_rate(acomp->dev, port, pipe, rate);
}
EXPORT_SYMBOL_GPL(snd_hdac_sync_audio_rate);
@@ -220,6 +223,7 @@ EXPORT_SYMBOL_GPL(snd_hdac_sync_audio_rate);
* snd_hdac_acomp_get_eld - Get the audio state and ELD via component
* @codec: HDA codec
* @nid: the pin widget NID
+ * @dev_id: device identifier
* @audio_enabled: the pointer to store the current audio state
* @buffer: the buffer pointer to store ELD bytes
* @max_bytes: the max bytes to be stored on @buffer
@@ -236,12 +240,12 @@ EXPORT_SYMBOL_GPL(snd_hdac_sync_audio_rate);
* thus it may be over @max_bytes. If it's over @max_bytes, it implies
* that only a part of ELD bytes have been fetched.
*/
-int snd_hdac_acomp_get_eld(struct hdac_device *codec, hda_nid_t nid,
+int snd_hdac_acomp_get_eld(struct hdac_device *codec, hda_nid_t nid, int dev_id,
bool *audio_enabled, char *buffer, int max_bytes)
{
struct hdac_bus *bus = codec->bus;
struct i915_audio_component *acomp = bus->audio_component;
- int port;
+ int port, pipe;
if (!acomp || !acomp->ops || !acomp->ops->get_eld)
return -ENODEV;
@@ -249,7 +253,9 @@ int snd_hdac_acomp_get_eld(struct hdac_device *codec, hda_nid_t nid,
port = pin2port(codec, nid);
if (port < 0)
return -EINVAL;
- return acomp->ops->get_eld(acomp->dev, port, audio_enabled,
+
+ pipe = dev_id;
+ return acomp->ops->get_eld(acomp->dev, port, pipe, audio_enabled,
buffer, max_bytes);
}
EXPORT_SYMBOL_GPL(snd_hdac_acomp_get_eld);
diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c
index 56e5204ac9c1..cf9bc042fe96 100644
--- a/sound/pci/hda/patch_hdmi.c
+++ b/sound/pci/hda/patch_hdmi.c
@@ -1485,7 +1485,7 @@ static void sync_eld_via_acomp(struct hda_codec *codec,
mutex_lock(&per_pin->lock);
eld->monitor_present = false;
- size = snd_hdac_acomp_get_eld(&codec->core, per_pin->pin_nid,
+ size = snd_hdac_acomp_get_eld(&codec->core, per_pin->pin_nid, -1,
&eld->monitor_present, eld->eld_buffer,
ELD_MAX_SIZE);
if (size > 0) {
@@ -1744,7 +1744,8 @@ static int generic_hdmi_playback_pcm_prepare(struct hda_pcm_stream *hinfo,
/* Call sync_audio_rate to set the N/CTS/M manually if necessary */
/* Todo: add DP1.2 MST audio support later */
if (codec_has_acomp(codec))
- snd_hdac_sync_audio_rate(&codec->core, pin_nid, runtime->rate);
+ snd_hdac_sync_audio_rate(&codec->core, pin_nid, -1,
+ runtime->rate);
non_pcm = check_non_pcm_per_cvt(codec, cvt_nid);
mutex_lock(&per_pin->lock);
@@ -2290,7 +2291,7 @@ static void haswell_set_power_state(struct hda_codec *codec, hda_nid_t fg,
snd_hda_codec_set_power_to_all(codec, fg, power_state);
}
-static void intel_pin_eld_notify(void *audio_ptr, int port)
+static void intel_pin_eld_notify(void *audio_ptr, int port, int pipe)
{
struct hda_codec *codec = audio_ptr;
int pin_nid;
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index 2f909dd8b7b8..ea81c08ddc7a 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -6907,8 +6907,6 @@ static const struct hda_fixup alc662_fixups[] = {
.v.pins = (const struct hda_pintbl[]) {
{ 0x15, 0x40f000f0 }, /* disabled */
{ 0x16, 0x40f000f0 }, /* disabled */
- { 0x18, 0x01014011 }, /* LO */
- { 0x1a, 0x01014012 }, /* LO */
{ }
}
},
diff --git a/sound/pci/hda/thinkpad_helper.c b/sound/pci/hda/thinkpad_helper.c
index 6a23302297c9..4d9d320a7971 100644
--- a/sound/pci/hda/thinkpad_helper.c
+++ b/sound/pci/hda/thinkpad_helper.c
@@ -13,7 +13,8 @@ static void (*old_vmaster_hook)(void *, int);
static bool is_thinkpad(struct hda_codec *codec)
{
return (codec->core.subsystem_id >> 16 == 0x17aa) &&
- (acpi_dev_found("LEN0068") || acpi_dev_found("IBM0068"));
+ (acpi_dev_found("LEN0068") || acpi_dev_found("LEN0268") ||
+ acpi_dev_found("IBM0068"));
}
static void update_tpacpi_mute_led(void *private_data, int enabled)
diff --git a/sound/soc/codecs/cs4270.c b/sound/soc/codecs/cs4270.c
index 18baea2f7d65..84f86745c30e 100644
--- a/sound/soc/codecs/cs4270.c
+++ b/sound/soc/codecs/cs4270.c
@@ -148,11 +148,11 @@ SND_SOC_DAPM_OUTPUT("AOUTR"),
};
static const struct snd_soc_dapm_route cs4270_dapm_routes[] = {
- { "Capture", NULL, "AINA" },
- { "Capture", NULL, "AINB" },
+ { "Capture", NULL, "AINL" },
+ { "Capture", NULL, "AINR" },
- { "AOUTA", NULL, "Playback" },
- { "AOUTB", NULL, "Playback" },
+ { "AOUTL", NULL, "Playback" },
+ { "AOUTR", NULL, "Playback" },
};
/**
diff --git a/sound/soc/codecs/da7219.c b/sound/soc/codecs/da7219.c
index 1152aa5e7c39..cf37936bfe3a 100644
--- a/sound/soc/codecs/da7219.c
+++ b/sound/soc/codecs/da7219.c
@@ -880,7 +880,8 @@ static const struct snd_soc_dapm_widget da7219_dapm_widgets[] = {
SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
/* DAI */
- SND_SOC_DAPM_AIF_OUT("DAIOUT", "Capture", 0, SND_SOC_NOPM, 0, 0),
+ SND_SOC_DAPM_AIF_OUT("DAIOUT", "Capture", 0, DA7219_DAI_TDM_CTRL,
+ DA7219_DAI_OE_SHIFT, DA7219_NO_INVERT),
SND_SOC_DAPM_AIF_IN("DAIIN", "Playback", 0, SND_SOC_NOPM, 0, 0),
/* Output Muxes */
diff --git a/sound/soc/codecs/hdac_hdmi.c b/sound/soc/codecs/hdac_hdmi.c
index c602c4960924..0c6228a0bf95 100644
--- a/sound/soc/codecs/hdac_hdmi.c
+++ b/sound/soc/codecs/hdac_hdmi.c
@@ -1368,7 +1368,7 @@ static int hdac_hdmi_parse_and_map_nid(struct hdac_ext_device *edev,
return hdac_hdmi_init_dai_map(edev);
}
-static void hdac_hdmi_eld_notify_cb(void *aptr, int port)
+static void hdac_hdmi_eld_notify_cb(void *aptr, int port, int pipe)
{
struct hdac_ext_device *edev = aptr;
struct hdac_hdmi_priv *hdmi = edev->private_data;
diff --git a/sound/soc/codecs/hdmi-codec.c b/sound/soc/codecs/hdmi-codec.c
index b904492d7744..90b5948e0ff3 100644
--- a/sound/soc/codecs/hdmi-codec.c
+++ b/sound/soc/codecs/hdmi-codec.c
@@ -364,7 +364,12 @@ static int hdmi_of_xlate_dai_name(struct snd_soc_component *component,
struct of_phandle_args *args,
const char **dai_name)
{
- int id = args->args[0];
+ int id;
+
+ if (args->args_count)
+ id = args->args[0];
+ else
+ id = 0;
if (id < ARRAY_SIZE(hdmi_dai_name)) {
*dai_name = hdmi_dai_name[id];
diff --git a/sound/soc/codecs/rt298.c b/sound/soc/codecs/rt298.c
index 55558643166f..2db8179047ae 100644
--- a/sound/soc/codecs/rt298.c
+++ b/sound/soc/codecs/rt298.c
@@ -249,6 +249,11 @@ static int rt298_jack_detect(struct rt298_priv *rt298, bool *hp, bool *mic)
snd_soc_dapm_force_enable_pin(dapm, "LDO1");
snd_soc_dapm_sync(dapm);
+ regmap_update_bits(rt298->regmap,
+ RT298_POWER_CTRL1, 0x1001, 0);
+ regmap_update_bits(rt298->regmap,
+ RT298_POWER_CTRL2, 0x4, 0x4);
+
regmap_write(rt298->regmap, RT298_SET_MIC1, 0x24);
msleep(50);
diff --git a/sound/soc/codecs/rt5663.c b/sound/soc/codecs/rt5663.c
index 01a18d88f1eb..00ff2788879e 100644
--- a/sound/soc/codecs/rt5663.c
+++ b/sound/soc/codecs/rt5663.c
@@ -1547,11 +1547,11 @@ static int rt5663_jack_detect(struct snd_soc_codec *codec, int jack_insert)
msleep(sleep_time[i]);
val = snd_soc_read(codec, RT5663_EM_JACK_TYPE_2) &
0x0003;
+ dev_dbg(codec->dev, "%s: MX-00e7 val=%x sleep %d\n",
+ __func__, val, sleep_time[i]);
i++;
if (val == 0x1 || val == 0x2 || val == 0x3)
break;
- dev_dbg(codec->dev, "%s: MX-00e7 val=%x sleep %d\n",
- __func__, val, sleep_time[i]);
}
dev_dbg(codec->dev, "%s val = %d\n", __func__, val);
switch (val) {
diff --git a/sound/soc/codecs/sti-sas.c b/sound/soc/codecs/sti-sas.c
index 7b31ee9b82bc..d6e00c77edcd 100644
--- a/sound/soc/codecs/sti-sas.c
+++ b/sound/soc/codecs/sti-sas.c
@@ -424,7 +424,7 @@ static const struct snd_soc_dai_ops stih407_dac_ops = {
static const struct regmap_config stih407_sas_regmap = {
.reg_bits = 32,
.val_bits = 32,
-
+ .fast_io = true,
.max_register = STIH407_AUDIO_DAC_CTRL,
.reg_defaults = stih407_sas_reg_defaults,
.num_reg_defaults = ARRAY_SIZE(stih407_sas_reg_defaults),
diff --git a/sound/soc/codecs/tas571x.c b/sound/soc/codecs/tas571x.c
index df5e5cb33baa..810369f687d7 100644
--- a/sound/soc/codecs/tas571x.c
+++ b/sound/soc/codecs/tas571x.c
@@ -341,20 +341,9 @@ static int tas571x_set_bias_level(struct snd_soc_codec *codec,
return ret;
}
}
-
- gpiod_set_value(priv->pdn_gpio, 0);
- usleep_range(5000, 6000);
-
- regcache_cache_only(priv->regmap, false);
- ret = regcache_sync(priv->regmap);
- if (ret)
- return ret;
}
break;
case SND_SOC_BIAS_OFF:
- regcache_cache_only(priv->regmap, true);
- gpiod_set_value(priv->pdn_gpio, 1);
-
if (!IS_ERR(priv->mclk))
clk_disable_unprepare(priv->mclk);
break;
@@ -401,16 +390,6 @@ static const struct snd_kcontrol_new tas5711_controls[] = {
TAS571X_SOFT_MUTE_REG,
TAS571X_SOFT_MUTE_CH1_SHIFT, TAS571X_SOFT_MUTE_CH2_SHIFT,
1, 1),
-
- SOC_DOUBLE_R_RANGE("CH1 Mixer Volume",
- TAS5717_CH1_LEFT_CH_MIX_REG,
- TAS5717_CH1_RIGHT_CH_MIX_REG,
- 16, 0, 0x80, 0),
-
- SOC_DOUBLE_R_RANGE("CH2 Mixer Volume",
- TAS5717_CH2_LEFT_CH_MIX_REG,
- TAS5717_CH2_RIGHT_CH_MIX_REG,
- 16, 0, 0x80, 0),
};
static const struct regmap_range tas571x_readonly_regs_range[] = {
@@ -488,6 +467,16 @@ static const struct snd_kcontrol_new tas5717_controls[] = {
TAS571X_SOFT_MUTE_CH1_SHIFT, TAS571X_SOFT_MUTE_CH2_SHIFT,
1, 1),
+ SOC_DOUBLE_R_RANGE("CH1 Mixer Volume",
+ TAS5717_CH1_LEFT_CH_MIX_REG,
+ TAS5717_CH1_RIGHT_CH_MIX_REG,
+ 16, 0, 0x80, 0),
+
+ SOC_DOUBLE_R_RANGE("CH2 Mixer Volume",
+ TAS5717_CH2_LEFT_CH_MIX_REG,
+ TAS5717_CH2_RIGHT_CH_MIX_REG,
+ 16, 0, 0x80, 0),
+
/*
* The biquads are named according to the register names.
* Please note that TI's TAS57xx Graphical Development Environment
@@ -747,13 +736,14 @@ static int tas571x_i2c_probe(struct i2c_client *client,
/* pulse the active low reset line for ~100us */
usleep_range(100, 200);
gpiod_set_value(priv->reset_gpio, 0);
- usleep_range(12000, 20000);
+ usleep_range(13500, 20000);
}
ret = regmap_write(priv->regmap, TAS571X_OSC_TRIM_REG, 0);
if (ret)
return ret;
+ usleep_range(50000, 60000);
memcpy(&priv->codec_driver, &tas571x_codec, sizeof(priv->codec_driver));
priv->codec_driver.component_driver.controls = priv->chip->controls;
@@ -770,9 +760,6 @@ static int tas571x_i2c_probe(struct i2c_client *client,
return ret;
}
- regcache_cache_only(priv->regmap, true);
- gpiod_set_value(priv->pdn_gpio, 1);
-
return snd_soc_register_codec(&client->dev, &priv->codec_driver,
&tas571x_dai, 1);
}
diff --git a/sound/soc/intel/Kconfig b/sound/soc/intel/Kconfig
index 26eb5a0a5575..fd5d1e091038 100644
--- a/sound/soc/intel/Kconfig
+++ b/sound/soc/intel/Kconfig
@@ -47,6 +47,7 @@ config SND_SOC_INTEL_SST_MATCH
config SND_SOC_INTEL_HASWELL
tristate
+ select SND_SOC_INTEL_SST_FIRMWARE
config SND_SOC_INTEL_BAYTRAIL
tristate
@@ -56,7 +57,6 @@ config SND_SOC_INTEL_HASWELL_MACH
depends on X86_INTEL_LPSS && I2C && I2C_DESIGNWARE_PLATFORM
depends on DW_DMAC_CORE
select SND_SOC_INTEL_SST
- select SND_SOC_INTEL_SST_FIRMWARE
select SND_SOC_INTEL_HASWELL
select SND_SOC_RT5640
help
@@ -138,7 +138,6 @@ config SND_SOC_INTEL_BROADWELL_MACH
I2C_DESIGNWARE_PLATFORM
depends on DW_DMAC_CORE
select SND_SOC_INTEL_SST
- select SND_SOC_INTEL_SST_FIRMWARE
select SND_SOC_INTEL_HASWELL
select SND_SOC_RT286
help
diff --git a/sound/soc/intel/atom/sst/sst_acpi.c b/sound/soc/intel/atom/sst/sst_acpi.c
index ba5c0d71720a..0a88537ca58a 100644
--- a/sound/soc/intel/atom/sst/sst_acpi.c
+++ b/sound/soc/intel/atom/sst/sst_acpi.c
@@ -416,6 +416,7 @@ static const struct dmi_system_id cht_table[] = {
DMI_MATCH(DMI_PRODUCT_NAME, "Surface 3"),
},
},
+ { }
};
diff --git a/sound/soc/intel/boards/bxt_da7219_max98357a.c b/sound/soc/intel/boards/bxt_da7219_max98357a.c
index 6532b8f0ab2f..865a21e557cc 100644
--- a/sound/soc/intel/boards/bxt_da7219_max98357a.c
+++ b/sound/soc/intel/boards/bxt_da7219_max98357a.c
@@ -130,8 +130,8 @@ static int broxton_da7219_codec_init(struct snd_soc_pcm_runtime *rtd)
*/
ret = snd_soc_card_jack_new(rtd->card, "Headset Jack",
SND_JACK_HEADSET | SND_JACK_BTN_0 | SND_JACK_BTN_1 |
- SND_JACK_BTN_2 | SND_JACK_BTN_3, &broxton_headset,
- NULL, 0);
+ SND_JACK_BTN_2 | SND_JACK_BTN_3 | SND_JACK_LINEOUT,
+ &broxton_headset, NULL, 0);
if (ret) {
dev_err(rtd->dev, "Headset Jack creation failed: %d\n", ret);
return ret;
diff --git a/sound/soc/intel/skylake/skl.c b/sound/soc/intel/skylake/skl.c
index 2989c164dafe..06fa5e85dd0e 100644
--- a/sound/soc/intel/skylake/skl.c
+++ b/sound/soc/intel/skylake/skl.c
@@ -674,7 +674,7 @@ static int skl_probe(struct pci_dev *pci,
if (skl->nhlt == NULL) {
err = -ENODEV;
- goto out_free;
+ goto out_display_power_off;
}
skl_nhlt_update_topology_bin(skl);
@@ -746,6 +746,9 @@ out_mach_free:
skl_machine_device_unregister(skl);
out_nhlt_free:
skl_nhlt_free(skl->nhlt);
+out_display_power_off:
+ if (IS_ENABLED(CONFIG_SND_SOC_HDAC_HDMI))
+ snd_hdac_display_power(bus, false);
out_free:
skl->init_failed = 1;
skl_free(ebus);
@@ -785,8 +788,7 @@ static void skl_remove(struct pci_dev *pci)
release_firmware(skl->tplg);
- if (pci_dev_run_wake(pci))
- pm_runtime_get_noresume(&pci->dev);
+ pm_runtime_get_noresume(&pci->dev);
/* codec removal, invoke bus_device_remove */
snd_hdac_ext_bus_device_remove(ebus);
diff --git a/sound/soc/pxa/Kconfig b/sound/soc/pxa/Kconfig
index f2bf8661dd21..823b5a236d8d 100644
--- a/sound/soc/pxa/Kconfig
+++ b/sound/soc/pxa/Kconfig
@@ -208,7 +208,7 @@ config SND_PXA2XX_SOC_IMOTE2
config SND_MMP_SOC_BROWNSTONE
tristate "SoC Audio support for Marvell Brownstone"
- depends on SND_MMP_SOC && MACH_BROWNSTONE
+ depends on SND_MMP_SOC && MACH_BROWNSTONE && I2C
select SND_MMP_SOC_SSPA
select MFD_WM8994
select SND_SOC_WM8994
diff --git a/sound/soc/qcom/lpass-cpu.c b/sound/soc/qcom/lpass-cpu.c
index 3cde9fb977fa..eff3f9a8b685 100644
--- a/sound/soc/qcom/lpass-cpu.c
+++ b/sound/soc/qcom/lpass-cpu.c
@@ -586,3 +586,6 @@ int asoc_qcom_lpass_cpu_platform_remove(struct platform_device *pdev)
return 0;
}
EXPORT_SYMBOL_GPL(asoc_qcom_lpass_cpu_platform_remove);
+
+MODULE_DESCRIPTION("QTi LPASS CPU Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/sound/soc/qcom/lpass-platform.c b/sound/soc/qcom/lpass-platform.c
index e2ff538a8aa5..b392e51de94d 100644
--- a/sound/soc/qcom/lpass-platform.c
+++ b/sound/soc/qcom/lpass-platform.c
@@ -61,7 +61,41 @@ static int lpass_platform_pcmops_open(struct snd_pcm_substream *substream)
{
struct snd_pcm_runtime *runtime = substream->runtime;
struct snd_soc_pcm_runtime *soc_runtime = substream->private_data;
- int ret;
+ struct snd_soc_dai *cpu_dai = soc_runtime->cpu_dai;
+ struct lpass_data *drvdata =
+ snd_soc_platform_get_drvdata(soc_runtime->platform);
+ struct lpass_variant *v = drvdata->variant;
+ int ret, dma_ch, dir = substream->stream;
+ struct lpass_pcm_data *data;
+
+ data = devm_kzalloc(soc_runtime->dev, sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ data->i2s_port = cpu_dai->driver->id;
+ runtime->private_data = data;
+
+ dma_ch = 0;
+ if (v->alloc_dma_channel)
+ dma_ch = v->alloc_dma_channel(drvdata, dir);
+ if (dma_ch < 0)
+ return dma_ch;
+
+ drvdata->substream[dma_ch] = substream;
+
+ ret = regmap_write(drvdata->lpaif_map,
+ LPAIF_DMACTL_REG(v, dma_ch, dir), 0);
+ if (ret) {
+ dev_err(soc_runtime->dev,
+ "%s() error writing to rdmactl reg: %d\n",
+ __func__, ret);
+ return ret;
+ }
+
+ if (dir == SNDRV_PCM_STREAM_PLAYBACK)
+ data->rdma_ch = dma_ch;
+ else
+ data->wrdma_ch = dma_ch;
snd_soc_set_runtime_hwparams(substream, &lpass_platform_pcm_hardware);
@@ -80,13 +114,40 @@ static int lpass_platform_pcmops_open(struct snd_pcm_substream *substream)
return 0;
}
+static int lpass_platform_pcmops_close(struct snd_pcm_substream *substream)
+{
+ struct snd_pcm_runtime *runtime = substream->runtime;
+ struct snd_soc_pcm_runtime *soc_runtime = substream->private_data;
+ struct lpass_data *drvdata =
+ snd_soc_platform_get_drvdata(soc_runtime->platform);
+ struct lpass_variant *v = drvdata->variant;
+ struct lpass_pcm_data *data;
+ int dma_ch, dir = substream->stream;
+
+ data = runtime->private_data;
+ v = drvdata->variant;
+
+ if (dir == SNDRV_PCM_STREAM_PLAYBACK)
+ dma_ch = data->rdma_ch;
+ else
+ dma_ch = data->wrdma_ch;
+
+ drvdata->substream[dma_ch] = NULL;
+
+ if (v->free_dma_channel)
+ v->free_dma_channel(drvdata, dma_ch);
+
+ return 0;
+}
+
static int lpass_platform_pcmops_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params)
{
struct snd_soc_pcm_runtime *soc_runtime = substream->private_data;
struct lpass_data *drvdata =
snd_soc_platform_get_drvdata(soc_runtime->platform);
- struct lpass_pcm_data *pcm_data = drvdata->private_data;
+ struct snd_pcm_runtime *rt = substream->runtime;
+ struct lpass_pcm_data *pcm_data = rt->private_data;
struct lpass_variant *v = drvdata->variant;
snd_pcm_format_t format = params_format(params);
unsigned int channels = params_channels(params);
@@ -179,7 +240,8 @@ static int lpass_platform_pcmops_hw_free(struct snd_pcm_substream *substream)
struct snd_soc_pcm_runtime *soc_runtime = substream->private_data;
struct lpass_data *drvdata =
snd_soc_platform_get_drvdata(soc_runtime->platform);
- struct lpass_pcm_data *pcm_data = drvdata->private_data;
+ struct snd_pcm_runtime *rt = substream->runtime;
+ struct lpass_pcm_data *pcm_data = rt->private_data;
struct lpass_variant *v = drvdata->variant;
unsigned int reg;
int ret;
@@ -203,7 +265,8 @@ static int lpass_platform_pcmops_prepare(struct snd_pcm_substream *substream)
struct snd_soc_pcm_runtime *soc_runtime = substream->private_data;
struct lpass_data *drvdata =
snd_soc_platform_get_drvdata(soc_runtime->platform);
- struct lpass_pcm_data *pcm_data = drvdata->private_data;
+ struct snd_pcm_runtime *rt = substream->runtime;
+ struct lpass_pcm_data *pcm_data = rt->private_data;
struct lpass_variant *v = drvdata->variant;
int ret, ch, dir = substream->stream;
@@ -257,7 +320,8 @@ static int lpass_platform_pcmops_trigger(struct snd_pcm_substream *substream,
struct snd_soc_pcm_runtime *soc_runtime = substream->private_data;
struct lpass_data *drvdata =
snd_soc_platform_get_drvdata(soc_runtime->platform);
- struct lpass_pcm_data *pcm_data = drvdata->private_data;
+ struct snd_pcm_runtime *rt = substream->runtime;
+ struct lpass_pcm_data *pcm_data = rt->private_data;
struct lpass_variant *v = drvdata->variant;
int ret, ch, dir = substream->stream;
@@ -333,7 +397,8 @@ static snd_pcm_uframes_t lpass_platform_pcmops_pointer(
struct snd_soc_pcm_runtime *soc_runtime = substream->private_data;
struct lpass_data *drvdata =
snd_soc_platform_get_drvdata(soc_runtime->platform);
- struct lpass_pcm_data *pcm_data = drvdata->private_data;
+ struct snd_pcm_runtime *rt = substream->runtime;
+ struct lpass_pcm_data *pcm_data = rt->private_data;
struct lpass_variant *v = drvdata->variant;
unsigned int base_addr, curr_addr;
int ret, ch, dir = substream->stream;
@@ -374,6 +439,7 @@ static int lpass_platform_pcmops_mmap(struct snd_pcm_substream *substream,
static const struct snd_pcm_ops lpass_platform_pcm_ops = {
.open = lpass_platform_pcmops_open,
+ .close = lpass_platform_pcmops_close,
.ioctl = snd_pcm_lib_ioctl,
.hw_params = lpass_platform_pcmops_hw_params,
.hw_free = lpass_platform_pcmops_hw_free,
@@ -470,117 +536,45 @@ static int lpass_platform_pcm_new(struct snd_soc_pcm_runtime *soc_runtime)
{
struct snd_pcm *pcm = soc_runtime->pcm;
struct snd_pcm_substream *psubstream, *csubstream;
- struct snd_soc_dai *cpu_dai = soc_runtime->cpu_dai;
- struct lpass_data *drvdata =
- snd_soc_platform_get_drvdata(soc_runtime->platform);
- struct lpass_variant *v = drvdata->variant;
int ret = -EINVAL;
- struct lpass_pcm_data *data;
size_t size = lpass_platform_pcm_hardware.buffer_bytes_max;
- data = devm_kzalloc(soc_runtime->dev, sizeof(*data), GFP_KERNEL);
- if (!data)
- return -ENOMEM;
-
- data->i2s_port = cpu_dai->driver->id;
- drvdata->private_data = data;
-
psubstream = pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream;
if (psubstream) {
- if (v->alloc_dma_channel)
- data->rdma_ch = v->alloc_dma_channel(drvdata,
- SNDRV_PCM_STREAM_PLAYBACK);
-
- if (data->rdma_ch < 0)
- return data->rdma_ch;
-
- drvdata->substream[data->rdma_ch] = psubstream;
-
ret = snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV,
soc_runtime->platform->dev,
size, &psubstream->dma_buffer);
- if (ret)
- goto playback_alloc_err;
-
- ret = regmap_write(drvdata->lpaif_map,
- LPAIF_RDMACTL_REG(v, data->rdma_ch), 0);
if (ret) {
- dev_err(soc_runtime->dev,
- "%s() error writing to rdmactl reg: %d\n",
- __func__, ret);
- goto capture_alloc_err;
+ dev_err(soc_runtime->dev, "Cannot allocate buffer(s)\n");
+ return ret;
}
}
csubstream = pcm->streams[SNDRV_PCM_STREAM_CAPTURE].substream;
if (csubstream) {
- if (v->alloc_dma_channel)
- data->wrdma_ch = v->alloc_dma_channel(drvdata,
- SNDRV_PCM_STREAM_CAPTURE);
-
- if (data->wrdma_ch < 0) {
- ret = data->wrdma_ch;
- goto capture_alloc_err;
- }
-
- drvdata->substream[data->wrdma_ch] = csubstream;
-
ret = snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV,
soc_runtime->platform->dev,
size, &csubstream->dma_buffer);
- if (ret)
- goto capture_alloc_err;
-
- ret = regmap_write(drvdata->lpaif_map,
- LPAIF_WRDMACTL_REG(v, data->wrdma_ch), 0);
if (ret) {
- dev_err(soc_runtime->dev,
- "%s() error writing to wrdmactl reg: %d\n",
- __func__, ret);
- goto capture_reg_err;
+ dev_err(soc_runtime->dev, "Cannot allocate buffer(s)\n");
+ if (psubstream)
+ snd_dma_free_pages(&psubstream->dma_buffer);
+ return ret;
}
+
}
return 0;
-
-capture_reg_err:
- if (csubstream)
- snd_dma_free_pages(&csubstream->dma_buffer);
-
-capture_alloc_err:
- if (psubstream)
- snd_dma_free_pages(&psubstream->dma_buffer);
-
- playback_alloc_err:
- dev_err(soc_runtime->dev, "Cannot allocate buffer(s)\n");
-
- return ret;
}
static void lpass_platform_pcm_free(struct snd_pcm *pcm)
{
- struct snd_soc_pcm_runtime *rt;
- struct lpass_data *drvdata;
- struct lpass_pcm_data *data;
- struct lpass_variant *v;
struct snd_pcm_substream *substream;
- int ch, i;
+ int i;
for (i = 0; i < ARRAY_SIZE(pcm->streams); i++) {
substream = pcm->streams[i].substream;
if (substream) {
- rt = substream->private_data;
- drvdata = snd_soc_platform_get_drvdata(rt->platform);
- data = drvdata->private_data;
-
- ch = (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
- ? data->rdma_ch
- : data->wrdma_ch;
- v = drvdata->variant;
- drvdata->substream[ch] = NULL;
- if (v->free_dma_channel)
- v->free_dma_channel(drvdata, ch);
-
snd_dma_free_pages(&substream->dma_buffer);
substream->dma_buffer.area = NULL;
substream->dma_buffer.addr = 0;
diff --git a/sound/soc/qcom/lpass.h b/sound/soc/qcom/lpass.h
index 35b3cea8207d..924971b6ded5 100644
--- a/sound/soc/qcom/lpass.h
+++ b/sound/soc/qcom/lpass.h
@@ -59,7 +59,6 @@ struct lpass_data {
struct clk *pcnoc_mport_clk;
struct clk *pcnoc_sway_clk;
- void *private_data;
};
/* Vairant data per each SOC */
diff --git a/sound/soc/samsung/ac97.c b/sound/soc/samsung/ac97.c
index 97d6700b1009..cbc0023c2bc8 100644
--- a/sound/soc/samsung/ac97.c
+++ b/sound/soc/samsung/ac97.c
@@ -383,11 +383,6 @@ static int s3c_ac97_probe(struct platform_device *pdev)
goto err4;
}
- ret = devm_snd_soc_register_component(&pdev->dev, &s3c_ac97_component,
- s3c_ac97_dai, ARRAY_SIZE(s3c_ac97_dai));
- if (ret)
- goto err5;
-
ret = samsung_asoc_dma_platform_register(&pdev->dev,
ac97_pdata->dma_filter,
NULL, NULL);
@@ -396,6 +391,11 @@ static int s3c_ac97_probe(struct platform_device *pdev)
goto err5;
}
+ ret = devm_snd_soc_register_component(&pdev->dev, &s3c_ac97_component,
+ s3c_ac97_dai, ARRAY_SIZE(s3c_ac97_dai));
+ if (ret)
+ goto err5;
+
return 0;
err5:
free_irq(irq_res->start, NULL);
diff --git a/sound/soc/samsung/i2s.c b/sound/soc/samsung/i2s.c
index 7e32cf4581f8..7825bff45ae3 100644
--- a/sound/soc/samsung/i2s.c
+++ b/sound/soc/samsung/i2s.c
@@ -1237,14 +1237,14 @@ static int samsung_i2s_probe(struct platform_device *pdev)
dev_err(&pdev->dev, "Unable to get drvdata\n");
return -EFAULT;
}
- ret = devm_snd_soc_register_component(&sec_dai->pdev->dev,
- &samsung_i2s_component,
- &sec_dai->i2s_dai_drv, 1);
+ ret = samsung_asoc_dma_platform_register(&pdev->dev,
+ sec_dai->filter, "tx-sec", NULL);
if (ret != 0)
return ret;
- return samsung_asoc_dma_platform_register(&pdev->dev,
- sec_dai->filter, "tx-sec", NULL);
+ return devm_snd_soc_register_component(&sec_dai->pdev->dev,
+ &samsung_i2s_component,
+ &sec_dai->i2s_dai_drv, 1);
}
pri_dai = i2s_alloc_dai(pdev, false);
@@ -1314,6 +1314,11 @@ static int samsung_i2s_probe(struct platform_device *pdev)
if (quirks & QUIRK_PRI_6CHAN)
pri_dai->i2s_dai_drv.playback.channels_max = 6;
+ ret = samsung_asoc_dma_platform_register(&pdev->dev, pri_dai->filter,
+ NULL, NULL);
+ if (ret < 0)
+ goto err_disable_clk;
+
if (quirks & QUIRK_SEC_DAI) {
sec_dai = i2s_alloc_dai(pdev, true);
if (!sec_dai) {
@@ -1353,10 +1358,6 @@ static int samsung_i2s_probe(struct platform_device *pdev)
if (ret < 0)
goto err_free_dai;
- ret = samsung_asoc_dma_platform_register(&pdev->dev, pri_dai->filter,
- NULL, NULL);
- if (ret < 0)
- goto err_free_dai;
pm_runtime_enable(&pdev->dev);
diff --git a/sound/soc/samsung/pcm.c b/sound/soc/samsung/pcm.c
index 43e367a9acc3..c484985812ed 100644
--- a/sound/soc/samsung/pcm.c
+++ b/sound/soc/samsung/pcm.c
@@ -565,24 +565,25 @@ static int s3c_pcm_dev_probe(struct platform_device *pdev)
pcm->dma_capture = &s3c_pcm_stereo_in[pdev->id];
pcm->dma_playback = &s3c_pcm_stereo_out[pdev->id];
+ ret = samsung_asoc_dma_platform_register(&pdev->dev, filter,
+ NULL, NULL);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to get register DMA: %d\n", ret);
+ goto err5;
+ }
+
pm_runtime_enable(&pdev->dev);
ret = devm_snd_soc_register_component(&pdev->dev, &s3c_pcm_component,
&s3c_pcm_dai[pdev->id], 1);
if (ret != 0) {
dev_err(&pdev->dev, "failed to get register DAI: %d\n", ret);
- goto err5;
- }
-
- ret = samsung_asoc_dma_platform_register(&pdev->dev, filter,
- NULL, NULL);
- if (ret) {
- dev_err(&pdev->dev, "failed to get register DMA: %d\n", ret);
- goto err5;
+ goto err6;
}
return 0;
-
+err6:
+ pm_runtime_disable(&pdev->dev);
err5:
clk_disable_unprepare(pcm->pclk);
err4:
diff --git a/sound/soc/samsung/s3c2412-i2s.c b/sound/soc/samsung/s3c2412-i2s.c
index 3e89fbc0c51d..0a4718207e6e 100644
--- a/sound/soc/samsung/s3c2412-i2s.c
+++ b/sound/soc/samsung/s3c2412-i2s.c
@@ -168,19 +168,19 @@ static int s3c2412_iis_dev_probe(struct platform_device *pdev)
s3c2412_i2s_pcm_stereo_in.addr = res->start + S3C2412_IISRXD;
s3c2412_i2s_pcm_stereo_in.filter_data = pdata->dma_capture;
- ret = s3c_i2sv2_register_component(&pdev->dev, -1,
- &s3c2412_i2s_component,
- &s3c2412_i2s_dai);
+ ret = samsung_asoc_dma_platform_register(&pdev->dev,
+ pdata->dma_filter,
+ NULL, NULL);
if (ret) {
- pr_err("failed to register the dai\n");
+ pr_err("failed to register the DMA: %d\n", ret);
return ret;
}
- ret = samsung_asoc_dma_platform_register(&pdev->dev,
- pdata->dma_filter,
- NULL, NULL);
+ ret = s3c_i2sv2_register_component(&pdev->dev, -1,
+ &s3c2412_i2s_component,
+ &s3c2412_i2s_dai);
if (ret)
- pr_err("failed to register the DMA: %d\n", ret);
+ pr_err("failed to register the dai\n");
return ret;
}
diff --git a/sound/soc/samsung/s3c24xx-i2s.c b/sound/soc/samsung/s3c24xx-i2s.c
index c78a936a3099..9052f6a7073e 100644
--- a/sound/soc/samsung/s3c24xx-i2s.c
+++ b/sound/soc/samsung/s3c24xx-i2s.c
@@ -474,18 +474,18 @@ static int s3c24xx_iis_dev_probe(struct platform_device *pdev)
s3c24xx_i2s_pcm_stereo_in.addr = res->start + S3C2410_IISFIFO;
s3c24xx_i2s_pcm_stereo_in.filter_data = pdata->dma_capture;
- ret = devm_snd_soc_register_component(&pdev->dev,
- &s3c24xx_i2s_component, &s3c24xx_i2s_dai, 1);
+ ret = samsung_asoc_dma_platform_register(&pdev->dev,
+ pdata->dma_filter,
+ NULL, NULL);
if (ret) {
- pr_err("failed to register the dai\n");
+ pr_err("failed to register the dma: %d\n", ret);
return ret;
}
- ret = samsung_asoc_dma_platform_register(&pdev->dev,
- pdata->dma_filter,
- NULL, NULL);
+ ret = devm_snd_soc_register_component(&pdev->dev,
+ &s3c24xx_i2s_component, &s3c24xx_i2s_dai, 1);
if (ret)
- pr_err("failed to register the dma: %d\n", ret);
+ pr_err("failed to register the dai\n");
return ret;
}
diff --git a/sound/soc/samsung/spdif.c b/sound/soc/samsung/spdif.c
index 26c1fbed4d35..779504f54bc0 100644
--- a/sound/soc/samsung/spdif.c
+++ b/sound/soc/samsung/spdif.c
@@ -416,15 +416,6 @@ static int spdif_probe(struct platform_device *pdev)
goto err3;
}
- dev_set_drvdata(&pdev->dev, spdif);
-
- ret = devm_snd_soc_register_component(&pdev->dev,
- &samsung_spdif_component, &samsung_spdif_dai, 1);
- if (ret != 0) {
- dev_err(&pdev->dev, "fail to register dai\n");
- goto err4;
- }
-
spdif_stereo_out.addr_width = 2;
spdif_stereo_out.addr = mem_res->start + DATA_OUTBUF;
filter = NULL;
@@ -432,7 +423,6 @@ static int spdif_probe(struct platform_device *pdev)
spdif_stereo_out.filter_data = spdif_pdata->dma_playback;
filter = spdif_pdata->dma_filter;
}
-
spdif->dma_playback = &spdif_stereo_out;
ret = samsung_asoc_dma_platform_register(&pdev->dev, filter,
@@ -442,6 +432,15 @@ static int spdif_probe(struct platform_device *pdev)
goto err4;
}
+ dev_set_drvdata(&pdev->dev, spdif);
+
+ ret = devm_snd_soc_register_component(&pdev->dev,
+ &samsung_spdif_component, &samsung_spdif_dai, 1);
+ if (ret != 0) {
+ dev_err(&pdev->dev, "fail to register dai\n");
+ goto err4;
+ }
+
return 0;
err4:
iounmap(spdif->regs);
diff --git a/sound/soc/sti/uniperif_player.c b/sound/soc/sti/uniperif_player.c
index 1bc8ebc2528e..ad54d4cf58ad 100644
--- a/sound/soc/sti/uniperif_player.c
+++ b/sound/soc/sti/uniperif_player.c
@@ -614,7 +614,11 @@ static int uni_player_ctl_iec958_put(struct snd_kcontrol *kcontrol,
iec958->status[3] = ucontrol->value.iec958.status[3];
mutex_unlock(&player->ctrl_lock);
- uni_player_set_channel_status(player, NULL);
+ if (player->substream && player->substream->runtime)
+ uni_player_set_channel_status(player,
+ player->substream->runtime);
+ else
+ uni_player_set_channel_status(player, NULL);
return 0;
}
diff --git a/sound/soc/sunxi/sun4i-codec.c b/sound/soc/sunxi/sun4i-codec.c
index e047ec06d538..56ed9472e89f 100644
--- a/sound/soc/sunxi/sun4i-codec.c
+++ b/sound/soc/sunxi/sun4i-codec.c
@@ -765,11 +765,11 @@ static struct snd_soc_card *sun4i_codec_create_card(struct device *dev)
card = devm_kzalloc(dev, sizeof(*card), GFP_KERNEL);
if (!card)
- return NULL;
+ return ERR_PTR(-ENOMEM);
card->dai_link = sun4i_codec_create_link(dev, &card->num_links);
if (!card->dai_link)
- return NULL;
+ return ERR_PTR(-ENOMEM);
card->dev = dev;
card->name = "sun4i-codec";
@@ -829,12 +829,6 @@ static int sun4i_codec_probe(struct platform_device *pdev)
return PTR_ERR(scodec->clk_module);
}
- /* Enable the bus clock */
- if (clk_prepare_enable(scodec->clk_apb)) {
- dev_err(&pdev->dev, "Failed to enable the APB clock\n");
- return -EINVAL;
- }
-
scodec->gpio_pa = devm_gpiod_get_optional(&pdev->dev, "allwinner,pa",
GPIOD_OUT_LOW);
if (IS_ERR(scodec->gpio_pa)) {
@@ -844,6 +838,12 @@ static int sun4i_codec_probe(struct platform_device *pdev)
return ret;
}
+ /* Enable the bus clock */
+ if (clk_prepare_enable(scodec->clk_apb)) {
+ dev_err(&pdev->dev, "Failed to enable the APB clock\n");
+ return -EINVAL;
+ }
+
/* DMA configuration for TX FIFO */
scodec->playback_dma_data.addr = res->start + SUN4I_CODEC_DAC_TXDATA;
scodec->playback_dma_data.maxburst = 4;
@@ -876,7 +876,8 @@ static int sun4i_codec_probe(struct platform_device *pdev)
}
card = sun4i_codec_create_card(&pdev->dev);
- if (!card) {
+ if (IS_ERR(card)) {
+ ret = PTR_ERR(card);
dev_err(&pdev->dev, "Failed to create our card\n");
goto err_unregister_codec;
}
diff --git a/sound/sparc/dbri.c b/sound/sparc/dbri.c
index 0190cb6332f2..3fe4468ea2c5 100644
--- a/sound/sparc/dbri.c
+++ b/sound/sparc/dbri.c
@@ -304,7 +304,7 @@ struct snd_dbri {
spinlock_t lock;
struct dbri_dma *dma; /* Pointer to our DMA block */
- u32 dma_dvma; /* DBRI visible DMA address */
+ dma_addr_t dma_dvma; /* DBRI visible DMA address */
void __iomem *regs; /* dbri HW regs */
int dbri_irqp; /* intr queue pointer */
@@ -657,12 +657,14 @@ static void dbri_cmdwait(struct snd_dbri *dbri)
*/
static s32 *dbri_cmdlock(struct snd_dbri *dbri, int len)
{
+ u32 dvma_addr = (u32)dbri->dma_dvma;
+
/* Space for 2 WAIT cmds (replaced later by 1 JUMP cmd) */
len += 2;
spin_lock(&dbri->cmdlock);
if (dbri->cmdptr - dbri->dma->cmd + len < DBRI_NO_CMDS - 2)
return dbri->cmdptr + 2;
- else if (len < sbus_readl(dbri->regs + REG8) - dbri->dma_dvma)
+ else if (len < sbus_readl(dbri->regs + REG8) - dvma_addr)
return dbri->dma->cmd;
else
printk(KERN_ERR "DBRI: no space for commands.");
@@ -680,6 +682,7 @@ static s32 *dbri_cmdlock(struct snd_dbri *dbri, int len)
*/
static void dbri_cmdsend(struct snd_dbri *dbri, s32 *cmd, int len)
{
+ u32 dvma_addr = (u32)dbri->dma_dvma;
s32 tmp, addr;
static int wait_id = 0;
@@ -689,7 +692,7 @@ static void dbri_cmdsend(struct snd_dbri *dbri, s32 *cmd, int len)
*(cmd+1) = DBRI_CMD(D_WAIT, 1, wait_id);
/* Replace the last command with JUMP */
- addr = dbri->dma_dvma + (cmd - len - dbri->dma->cmd) * sizeof(s32);
+ addr = dvma_addr + (cmd - len - dbri->dma->cmd) * sizeof(s32);
*(dbri->cmdptr+1) = addr;
*(dbri->cmdptr) = DBRI_CMD(D_JUMP, 0, 0);
@@ -747,6 +750,7 @@ static void dbri_reset(struct snd_dbri *dbri)
/* Lock must not be held before calling this */
static void dbri_initialize(struct snd_dbri *dbri)
{
+ u32 dvma_addr = (u32)dbri->dma_dvma;
s32 *cmd;
u32 dma_addr;
unsigned long flags;
@@ -764,7 +768,7 @@ static void dbri_initialize(struct snd_dbri *dbri)
/*
* Initialize the interrupt ring buffer.
*/
- dma_addr = dbri->dma_dvma + dbri_dma_off(intr, 0);
+ dma_addr = dvma_addr + dbri_dma_off(intr, 0);
dbri->dma->intr[0] = dma_addr;
dbri->dbri_irqp = 1;
/*
@@ -778,7 +782,7 @@ static void dbri_initialize(struct snd_dbri *dbri)
dbri->cmdptr = cmd;
*(cmd++) = DBRI_CMD(D_WAIT, 1, 0);
*(cmd++) = DBRI_CMD(D_WAIT, 1, 0);
- dma_addr = dbri->dma_dvma + dbri_dma_off(cmd, 0);
+ dma_addr = dvma_addr + dbri_dma_off(cmd, 0);
sbus_writel(dma_addr, dbri->regs + REG8);
spin_unlock(&dbri->cmdlock);
@@ -1077,6 +1081,7 @@ static void recv_fixed(struct snd_dbri *dbri, int pipe, volatile __u32 *ptr)
static int setup_descs(struct snd_dbri *dbri, int streamno, unsigned int period)
{
struct dbri_streaminfo *info = &dbri->stream_info[streamno];
+ u32 dvma_addr = (u32)dbri->dma_dvma;
__u32 dvma_buffer;
int desc;
int len;
@@ -1177,7 +1182,7 @@ static int setup_descs(struct snd_dbri *dbri, int streamno, unsigned int period)
else {
dbri->next_desc[last_desc] = desc;
dbri->dma->desc[last_desc].nda =
- dbri->dma_dvma + dbri_dma_off(desc, desc);
+ dvma_addr + dbri_dma_off(desc, desc);
}
last_desc = desc;
@@ -1192,7 +1197,7 @@ static int setup_descs(struct snd_dbri *dbri, int streamno, unsigned int period)
}
dbri->dma->desc[last_desc].nda =
- dbri->dma_dvma + dbri_dma_off(desc, first_desc);
+ dvma_addr + dbri_dma_off(desc, first_desc);
dbri->next_desc[last_desc] = first_desc;
dbri->pipes[info->pipe].first_desc = first_desc;
dbri->pipes[info->pipe].desc = first_desc;
@@ -1697,6 +1702,7 @@ interrupts are disabled.
static void xmit_descs(struct snd_dbri *dbri)
{
struct dbri_streaminfo *info;
+ u32 dvma_addr = (u32)dbri->dma_dvma;
s32 *cmd;
unsigned long flags;
int first_td;
@@ -1718,7 +1724,7 @@ static void xmit_descs(struct snd_dbri *dbri)
*(cmd++) = DBRI_CMD(D_SDP, 0,
dbri->pipes[info->pipe].sdp
| D_SDP_P | D_SDP_EVERY | D_SDP_C);
- *(cmd++) = dbri->dma_dvma +
+ *(cmd++) = dvma_addr +
dbri_dma_off(desc, first_td);
dbri_cmdsend(dbri, cmd, 2);
@@ -1740,7 +1746,7 @@ static void xmit_descs(struct snd_dbri *dbri)
*(cmd++) = DBRI_CMD(D_SDP, 0,
dbri->pipes[info->pipe].sdp
| D_SDP_P | D_SDP_EVERY | D_SDP_C);
- *(cmd++) = dbri->dma_dvma +
+ *(cmd++) = dvma_addr +
dbri_dma_off(desc, first_td);
dbri_cmdsend(dbri, cmd, 2);
@@ -2539,7 +2545,7 @@ static int snd_dbri_create(struct snd_card *card,
if (!dbri->dma)
return -ENOMEM;
- dprintk(D_GEN, "DMA Cmd Block 0x%p (0x%08x)\n",
+ dprintk(D_GEN, "DMA Cmd Block 0x%p (%pad)\n",
dbri->dma, dbri->dma_dvma);
/* Map the registers into memory. */
diff --git a/sound/usb/card.c b/sound/usb/card.c
index 9e5276d6dda0..2ddc034673a8 100644
--- a/sound/usb/card.c
+++ b/sound/usb/card.c
@@ -315,7 +315,8 @@ static int snd_usb_audio_free(struct snd_usb_audio *chip)
snd_usb_endpoint_free(ep);
mutex_destroy(&chip->mutex);
- dev_set_drvdata(&chip->dev->dev, NULL);
+ if (!atomic_read(&chip->shutdown))
+ dev_set_drvdata(&chip->dev->dev, NULL);
kfree(chip);
return 0;
}
diff --git a/tools/perf/ui/browsers/hists.c b/tools/perf/ui/browsers/hists.c
index 4ffff7be9299..a53fef0c673b 100644
--- a/tools/perf/ui/browsers/hists.c
+++ b/tools/perf/ui/browsers/hists.c
@@ -1337,8 +1337,8 @@ static int hist_browser__show_hierarchy_entry(struct hist_browser *browser,
}
if (first) {
- ui_browser__printf(&browser->b, "%c", folded_sign);
- width--;
+ ui_browser__printf(&browser->b, "%c ", folded_sign);
+ width -= 2;
first = false;
} else {
ui_browser__printf(&browser->b, " ");
@@ -1361,8 +1361,10 @@ static int hist_browser__show_hierarchy_entry(struct hist_browser *browser,
width -= hpp.buf - s;
}
- ui_browser__write_nstring(&browser->b, "", hierarchy_indent);
- width -= hierarchy_indent;
+ if (!first) {
+ ui_browser__write_nstring(&browser->b, "", hierarchy_indent);
+ width -= hierarchy_indent;
+ }
if (column >= browser->b.horiz_scroll) {
char s[2048];
@@ -1381,7 +1383,13 @@ static int hist_browser__show_hierarchy_entry(struct hist_browser *browser,
}
perf_hpp_list__for_each_format(entry->hpp_list, fmt) {
- ui_browser__write_nstring(&browser->b, "", 2);
+ if (first) {
+ ui_browser__printf(&browser->b, "%c ", folded_sign);
+ first = false;
+ } else {
+ ui_browser__write_nstring(&browser->b, "", 2);
+ }
+
width -= 2;
/*
@@ -1555,10 +1563,11 @@ static int hists_browser__scnprintf_hierarchy_headers(struct hist_browser *brows
int indent = hists->nr_hpp_node - 2;
bool first_node, first_col;
- ret = scnprintf(buf, size, " ");
+ ret = scnprintf(buf, size, " ");
if (advance_hpp_check(&dummy_hpp, ret))
return ret;
+ first_node = true;
/* the first hpp_list_node is for overhead columns */
fmt_node = list_first_entry(&hists->hpp_formats,
struct perf_hpp_list_node, list);
@@ -1573,12 +1582,16 @@ static int hists_browser__scnprintf_hierarchy_headers(struct hist_browser *brows
ret = scnprintf(dummy_hpp.buf, dummy_hpp.size, " ");
if (advance_hpp_check(&dummy_hpp, ret))
break;
+
+ first_node = false;
}
- ret = scnprintf(dummy_hpp.buf, dummy_hpp.size, "%*s",
- indent * HIERARCHY_INDENT, "");
- if (advance_hpp_check(&dummy_hpp, ret))
- return ret;
+ if (!first_node) {
+ ret = scnprintf(dummy_hpp.buf, dummy_hpp.size, "%*s",
+ indent * HIERARCHY_INDENT, "");
+ if (advance_hpp_check(&dummy_hpp, ret))
+ return ret;
+ }
first_node = true;
list_for_each_entry_continue(fmt_node, &hists->hpp_formats, list) {
@@ -2076,8 +2089,21 @@ void hist_browser__init(struct hist_browser *browser,
browser->b.use_navkeypressed = true;
browser->show_headers = symbol_conf.show_hist_headers;
- hists__for_each_format(hists, fmt)
+ if (symbol_conf.report_hierarchy) {
+ struct perf_hpp_list_node *fmt_node;
+
+ /* count overhead columns (in the first node) */
+ fmt_node = list_first_entry(&hists->hpp_formats,
+ struct perf_hpp_list_node, list);
+ perf_hpp_list__for_each_format(&fmt_node->hpp, fmt)
+ ++browser->b.columns;
+
+ /* add a single column for whole hierarchy sort keys*/
++browser->b.columns;
+ } else {
+ hists__for_each_format(hists, fmt)
+ ++browser->b.columns;
+ }
hists__reset_column_width(hists);
}
diff --git a/tools/perf/util/hist.c b/tools/perf/util/hist.c
index b02992efb513..a69f027368ef 100644
--- a/tools/perf/util/hist.c
+++ b/tools/perf/util/hist.c
@@ -1600,18 +1600,18 @@ static void hists__hierarchy_output_resort(struct hists *hists,
if (prog)
ui_progress__update(prog, 1);
+ hists->nr_entries++;
+ if (!he->filtered) {
+ hists->nr_non_filtered_entries++;
+ hists__calc_col_len(hists, he);
+ }
+
if (!he->leaf) {
hists__hierarchy_output_resort(hists, prog,
&he->hroot_in,
&he->hroot_out,
min_callchain_hits,
use_callchain);
- hists->nr_entries++;
- if (!he->filtered) {
- hists->nr_non_filtered_entries++;
- hists__calc_col_len(hists, he);
- }
-
continue;
}
diff --git a/tools/power/acpi/Makefile.config b/tools/power/acpi/Makefile.config
index a538ff44b108..a1883bbb0144 100644
--- a/tools/power/acpi/Makefile.config
+++ b/tools/power/acpi/Makefile.config
@@ -8,18 +8,19 @@
# as published by the Free Software Foundation; version 2
# of the License.
-include ../../../../scripts/Makefile.include
-
-OUTPUT=./
-ifeq ("$(origin O)", "command line")
- OUTPUT := $(O)/
+ifeq ($(srctree),)
+srctree := $(patsubst %/,%,$(dir $(shell pwd)))
+srctree := $(patsubst %/,%,$(dir $(srctree)))
+#$(info Determined 'srctree' to be $(srctree))
endif
-ifneq ($(OUTPUT),)
-# check that the output directory actually exists
-OUTDIR := $(shell cd $(OUTPUT) && /bin/pwd)
-$(if $(OUTDIR),, $(error output directory "$(OUTPUT)" does not exist))
+include $(srctree)/../../scripts/Makefile.include
+
+OUTPUT=$(srctree)/
+ifeq ("$(origin O)", "command line")
+ OUTPUT := $(O)/power/acpi/
endif
+#$(info Determined 'OUTPUT' to be $(OUTPUT))
# --- CONFIGURATION BEGIN ---
@@ -70,8 +71,8 @@ WARNINGS := -Wall
WARNINGS += $(call cc-supports,-Wstrict-prototypes)
WARNINGS += $(call cc-supports,-Wdeclaration-after-statement)
-KERNEL_INCLUDE := ../../../include
-ACPICA_INCLUDE := ../../../drivers/acpi/acpica
+KERNEL_INCLUDE := $(OUTPUT)include
+ACPICA_INCLUDE := $(srctree)/../../../drivers/acpi/acpica
CFLAGS += -D_LINUX -I$(KERNEL_INCLUDE) -I$(ACPICA_INCLUDE)
CFLAGS += $(WARNINGS)
diff --git a/tools/power/acpi/Makefile.rules b/tools/power/acpi/Makefile.rules
index ec87a9e562c0..373738338f51 100644
--- a/tools/power/acpi/Makefile.rules
+++ b/tools/power/acpi/Makefile.rules
@@ -8,28 +8,42 @@
# as published by the Free Software Foundation; version 2
# of the License.
-$(OUTPUT)$(TOOL): $(TOOL_OBJS) FORCE
- $(ECHO) " LD " $@
- $(QUIET) $(LD) $(CFLAGS) $(LDFLAGS) $(TOOL_OBJS) -L$(OUTPUT) -o $@
+objdir := $(OUTPUT)tools/$(TOOL)/
+toolobjs := $(addprefix $(objdir),$(TOOL_OBJS))
+$(OUTPUT)$(TOOL): $(toolobjs) FORCE
+ $(ECHO) " LD " $(subst $(OUTPUT),,$@)
+ $(QUIET) $(LD) $(CFLAGS) $(LDFLAGS) $(toolobjs) -L$(OUTPUT) -o $@
+ $(ECHO) " STRIP " $(subst $(OUTPUT),,$@)
$(QUIET) $(STRIPCMD) $@
-$(OUTPUT)%.o: %.c
- $(ECHO) " CC " $@
+$(KERNEL_INCLUDE):
+ $(ECHO) " MKDIR " $(subst $(OUTPUT),,$@)
+ $(QUIET) mkdir -p $(KERNEL_INCLUDE)
+ $(ECHO) " CP " $(subst $(OUTPUT),,$@)
+ $(QUIET) cp -rf $(srctree)/../../../include/acpi $(KERNEL_INCLUDE)/
+
+$(objdir)%.o: %.c $(KERNEL_INCLUDE)
+ $(ECHO) " CC " $(subst $(OUTPUT),,$@)
$(QUIET) $(CC) -c $(CFLAGS) -o $@ $<
all: $(OUTPUT)$(TOOL)
clean:
- -find $(OUTPUT) \( -not -type d \) \
- -and \( -name '*~' -o -name '*.[oas]' \) \
- -type f -print \
- | xargs rm -f
- -rm -f $(OUTPUT)$(TOOL)
+ $(ECHO) " RMOBJ " $(subst $(OUTPUT),,$(objdir))
+ $(QUIET) find $(objdir) \( -not -type d \)\
+ -and \( -name '*~' -o -name '*.[oas]' \)\
+ -type f -print | xargs rm -f
+ $(ECHO) " RM " $(TOOL)
+ $(QUIET) rm -f $(OUTPUT)$(TOOL)
+ $(ECHO) " RMINC " $(subst $(OUTPUT),,$(KERNEL_INCLUDE))
+ $(QUIET) rm -rf $(KERNEL_INCLUDE)
install-tools:
- $(INSTALL) -d $(DESTDIR)${sbindir}
- $(INSTALL_PROGRAM) $(OUTPUT)$(TOOL) $(DESTDIR)${sbindir}
+ $(ECHO) " INST " $(TOOL)
+ $(QUIET) $(INSTALL) -d $(DESTDIR)$(sbindir)
+ $(QUIET) $(INSTALL_PROGRAM) $(OUTPUT)$(TOOL) $(DESTDIR)$(sbindir)
uninstall-tools:
- - rm -f $(DESTDIR)${sbindir}/$(TOOL)
+ $(ECHO) " UNINST " $(TOOL)
+ $(QUIET) rm -f $(DESTDIR)$(sbindir)/$(TOOL)
install: all install-tools $(EXTRA_INSTALL)
uninstall: uninstall-tools $(EXTRA_UNINSTALL)
diff --git a/tools/power/acpi/tools/acpidbg/Makefile b/tools/power/acpi/tools/acpidbg/Makefile
index 352df4b41ae9..f2d06e773eb4 100644
--- a/tools/power/acpi/tools/acpidbg/Makefile
+++ b/tools/power/acpi/tools/acpidbg/Makefile
@@ -17,9 +17,7 @@ vpath %.c \
../../os_specific/service_layers\
.
CFLAGS += -DACPI_APPLICATION -DACPI_SINGLE_THREAD -DACPI_DEBUGGER\
- -I.\
- -I../../../../../drivers/acpi/acpica\
- -I../../../../../include
+ -I.
LDFLAGS += -lpthread
TOOL_OBJS = \
acpidbg.o
diff --git a/tools/power/acpi/tools/acpidbg/acpidbg.c b/tools/power/acpi/tools/acpidbg/acpidbg.c
index a88ac45b7756..4308362d7068 100644
--- a/tools/power/acpi/tools/acpidbg/acpidbg.c
+++ b/tools/power/acpi/tools/acpidbg/acpidbg.c
@@ -12,10 +12,16 @@
#include <acpi/acpi.h>
/* Headers not included by include/acpi/platform/aclinux.h */
+#include <unistd.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <error.h>
#include <stdbool.h>
#include <fcntl.h>
#include <assert.h>
-#include <linux/circ_buf.h>
+#include <sys/select.h>
+#include "../../../../../include/linux/circ_buf.h"
#define ACPI_AML_FILE "/sys/kernel/debug/acpi/acpidbg"
#define ACPI_AML_SEC_TICK 1
diff --git a/tools/power/acpi/tools/acpidump/Makefile b/tools/power/acpi/tools/acpidump/Makefile
index 04b5db7c7c0b..f7c7af1f9258 100644
--- a/tools/power/acpi/tools/acpidump/Makefile
+++ b/tools/power/acpi/tools/acpidump/Makefile
@@ -19,9 +19,7 @@ vpath %.c \
./\
../../common\
../../os_specific/service_layers
-CFLAGS += -DACPI_DUMP_APP -I.\
- -I../../../../../drivers/acpi/acpica\
- -I../../../../../include
+CFLAGS += -DACPI_DUMP_APP -I.
TOOL_OBJS = \
apdump.o\
apfiles.o\
@@ -49,7 +47,9 @@ TOOL_OBJS = \
include ../../Makefile.rules
-install-man: ../../man/acpidump.8
- $(INSTALL_DATA) -D $< $(DESTDIR)${mandir}/man8/acpidump.8
+install-man: $(srctree)/man/acpidump.8
+ $(ECHO) " INST " acpidump.8
+ $(QUIET) $(INSTALL_DATA) -D $< $(DESTDIR)$(mandir)/man8/acpidump.8
uninstall-man:
- - rm -f $(DESTDIR)${mandir}/man8/acpidump.8
+ $(ECHO) " UNINST " acpidump.8
+ $(QUIET) rm -f $(DESTDIR)$(mandir)/man8/acpidump.8
diff --git a/tools/power/cpupower/utils/cpufreq-set.c b/tools/power/cpupower/utils/cpufreq-set.c
index b4bf76971dc9..1eef0aed6423 100644
--- a/tools/power/cpupower/utils/cpufreq-set.c
+++ b/tools/power/cpupower/utils/cpufreq-set.c
@@ -296,7 +296,7 @@ int cmd_freq_set(int argc, char **argv)
struct cpufreq_affected_cpus *cpus;
if (!bitmask_isbitset(cpus_chosen, cpu) ||
- cpupower_is_cpu_online(cpu))
+ cpupower_is_cpu_online(cpu) != 1)
continue;
cpus = cpufreq_get_related_cpus(cpu);
@@ -316,10 +316,7 @@ int cmd_freq_set(int argc, char **argv)
cpu <= bitmask_last(cpus_chosen); cpu++) {
if (!bitmask_isbitset(cpus_chosen, cpu) ||
- cpupower_is_cpu_online(cpu))
- continue;
-
- if (cpupower_is_cpu_online(cpu) != 1)
+ cpupower_is_cpu_online(cpu) != 1)
continue;
printf(_("Setting cpu: %d\n"), cpu);
diff --git a/tools/virtio/ringtest/Makefile b/tools/virtio/ringtest/Makefile
index 877a8a4721b6..c012edbdb13b 100644
--- a/tools/virtio/ringtest/Makefile
+++ b/tools/virtio/ringtest/Makefile
@@ -3,8 +3,8 @@ all:
all: ring virtio_ring_0_9 virtio_ring_poll virtio_ring_inorder ptr_ring noring
CFLAGS += -Wall
-CFLAGS += -pthread -O2 -ggdb
-LDFLAGS += -pthread -O2 -ggdb
+CFLAGS += -pthread -O2 -ggdb -flto -fwhole-program
+LDFLAGS += -pthread -O2 -ggdb -flto -fwhole-program
main.o: main.c main.h
ring.o: ring.c main.h
diff --git a/tools/virtio/ringtest/main.c b/tools/virtio/ringtest/main.c
index 147abb452a6c..f31353fac541 100644
--- a/tools/virtio/ringtest/main.c
+++ b/tools/virtio/ringtest/main.c
@@ -96,7 +96,13 @@ void set_affinity(const char *arg)
assert(!ret);
}
-static void run_guest(void)
+void poll_used(void)
+{
+ while (used_empty())
+ busy_wait();
+}
+
+static void __attribute__((__flatten__)) run_guest(void)
{
int completed_before;
int completed = 0;
@@ -141,7 +147,7 @@ static void run_guest(void)
assert(completed <= bufs);
assert(started <= bufs);
if (do_sleep) {
- if (enable_call())
+ if (used_empty() && enable_call())
wait_for_call();
} else {
poll_used();
@@ -149,7 +155,13 @@ static void run_guest(void)
}
}
-static void run_host(void)
+void poll_avail(void)
+{
+ while (avail_empty())
+ busy_wait();
+}
+
+static void __attribute__((__flatten__)) run_host(void)
{
int completed_before;
int completed = 0;
@@ -160,7 +172,7 @@ static void run_host(void)
for (;;) {
if (do_sleep) {
- if (enable_kick())
+ if (avail_empty() && enable_kick())
wait_for_kick();
} else {
poll_avail();
diff --git a/tools/virtio/ringtest/main.h b/tools/virtio/ringtest/main.h
index 16917acb0ade..34e63cc4c572 100644
--- a/tools/virtio/ringtest/main.h
+++ b/tools/virtio/ringtest/main.h
@@ -56,15 +56,15 @@ void alloc_ring(void);
int add_inbuf(unsigned, void *, void *);
void *get_buf(unsigned *, void **);
void disable_call();
+bool used_empty();
bool enable_call();
void kick_available();
-void poll_used();
/* host side */
void disable_kick();
+bool avail_empty();
bool enable_kick();
bool use_buf(unsigned *, void **);
void call_used();
-void poll_avail();
/* implemented by main */
extern bool do_sleep;
diff --git a/tools/virtio/ringtest/noring.c b/tools/virtio/ringtest/noring.c
index eda2f4824130..b8d1c1daac7c 100644
--- a/tools/virtio/ringtest/noring.c
+++ b/tools/virtio/ringtest/noring.c
@@ -24,8 +24,9 @@ void *get_buf(unsigned *lenp, void **bufp)
return "Buffer";
}
-void poll_used(void)
+bool used_empty()
{
+ return false;
}
void disable_call()
@@ -54,8 +55,9 @@ bool enable_kick()
assert(0);
}
-void poll_avail(void)
+bool avail_empty()
{
+ return false;
}
bool use_buf(unsigned *lenp, void **bufp)
diff --git a/tools/virtio/ringtest/ptr_ring.c b/tools/virtio/ringtest/ptr_ring.c
index bd2ad1d3b7a9..635b07b4fdd3 100644
--- a/tools/virtio/ringtest/ptr_ring.c
+++ b/tools/virtio/ringtest/ptr_ring.c
@@ -133,18 +133,9 @@ void *get_buf(unsigned *lenp, void **bufp)
return datap;
}
-void poll_used(void)
+bool used_empty()
{
- void *b;
-
- do {
- if (tailcnt == headcnt || __ptr_ring_full(&array)) {
- b = NULL;
- barrier();
- } else {
- b = "Buffer\n";
- }
- } while (!b);
+ return (tailcnt == headcnt || __ptr_ring_full(&array));
}
void disable_call()
@@ -173,14 +164,9 @@ bool enable_kick()
assert(0);
}
-void poll_avail(void)
+bool avail_empty()
{
- void *b;
-
- do {
- barrier();
- b = __ptr_ring_peek(&array);
- } while (!b);
+ return !__ptr_ring_peek(&array);
}
bool use_buf(unsigned *lenp, void **bufp)
diff --git a/tools/virtio/ringtest/ring.c b/tools/virtio/ringtest/ring.c
index c25c8d248b6b..747c5dd47be8 100644
--- a/tools/virtio/ringtest/ring.c
+++ b/tools/virtio/ringtest/ring.c
@@ -163,12 +163,11 @@ void *get_buf(unsigned *lenp, void **bufp)
return datap;
}
-void poll_used(void)
+bool used_empty()
{
unsigned head = (ring_size - 1) & guest.last_used_idx;
- while (ring[head].flags & DESC_HW)
- busy_wait();
+ return (ring[head].flags & DESC_HW);
}
void disable_call()
@@ -180,13 +179,11 @@ void disable_call()
bool enable_call()
{
- unsigned head = (ring_size - 1) & guest.last_used_idx;
-
event->call_index = guest.last_used_idx;
/* Flush call index write */
/* Barrier D (for pairing) */
smp_mb();
- return ring[head].flags & DESC_HW;
+ return used_empty();
}
void kick_available(void)
@@ -213,20 +210,17 @@ void disable_kick()
bool enable_kick()
{
- unsigned head = (ring_size - 1) & host.used_idx;
-
event->kick_index = host.used_idx;
/* Barrier C (for pairing) */
smp_mb();
- return !(ring[head].flags & DESC_HW);
+ return avail_empty();
}
-void poll_avail(void)
+bool avail_empty()
{
unsigned head = (ring_size - 1) & host.used_idx;
- while (!(ring[head].flags & DESC_HW))
- busy_wait();
+ return !(ring[head].flags & DESC_HW);
}
bool use_buf(unsigned *lenp, void **bufp)
diff --git a/tools/virtio/ringtest/virtio_ring_0_9.c b/tools/virtio/ringtest/virtio_ring_0_9.c
index 761866212aac..bbc3043b2fb1 100644
--- a/tools/virtio/ringtest/virtio_ring_0_9.c
+++ b/tools/virtio/ringtest/virtio_ring_0_9.c
@@ -194,24 +194,16 @@ void *get_buf(unsigned *lenp, void **bufp)
return datap;
}
-void poll_used(void)
+bool used_empty()
{
+ unsigned short last_used_idx = guest.last_used_idx;
#ifdef RING_POLL
- unsigned head = (ring_size - 1) & guest.last_used_idx;
+ unsigned short head = last_used_idx & (ring_size - 1);
+ unsigned index = ring.used->ring[head].id;
- for (;;) {
- unsigned index = ring.used->ring[head].id;
-
- if ((index ^ guest.last_used_idx ^ 0x8000) & ~(ring_size - 1))
- busy_wait();
- else
- break;
- }
+ return (index ^ last_used_idx ^ 0x8000) & ~(ring_size - 1);
#else
- unsigned head = guest.last_used_idx;
-
- while (ring.used->idx == head)
- busy_wait();
+ return ring.used->idx == last_used_idx;
#endif
}
@@ -224,22 +216,11 @@ void disable_call()
bool enable_call()
{
- unsigned short last_used_idx;
-
- vring_used_event(&ring) = (last_used_idx = guest.last_used_idx);
+ vring_used_event(&ring) = guest.last_used_idx;
/* Flush call index write */
/* Barrier D (for pairing) */
smp_mb();
-#ifdef RING_POLL
- {
- unsigned short head = last_used_idx & (ring_size - 1);
- unsigned index = ring.used->ring[head].id;
-
- return (index ^ last_used_idx ^ 0x8000) & ~(ring_size - 1);
- }
-#else
- return ring.used->idx == last_used_idx;
-#endif
+ return used_empty();
}
void kick_available(void)
@@ -266,36 +247,21 @@ void disable_kick()
bool enable_kick()
{
- unsigned head = host.used_idx;
-
- vring_avail_event(&ring) = head;
+ vring_avail_event(&ring) = host.used_idx;
/* Barrier C (for pairing) */
smp_mb();
-#ifdef RING_POLL
- {
- unsigned index = ring.avail->ring[head & (ring_size - 1)];
-
- return (index ^ head ^ 0x8000) & ~(ring_size - 1);
- }
-#else
- return head == ring.avail->idx;
-#endif
+ return avail_empty();
}
-void poll_avail(void)
+bool avail_empty()
{
unsigned head = host.used_idx;
#ifdef RING_POLL
- for (;;) {
- unsigned index = ring.avail->ring[head & (ring_size - 1)];
- if ((index ^ head ^ 0x8000) & ~(ring_size - 1))
- busy_wait();
- else
- break;
- }
+ unsigned index = ring.avail->ring[head & (ring_size - 1)];
+
+ return ((index ^ head ^ 0x8000) & ~(ring_size - 1));
#else
- while (ring.avail->idx == head)
- busy_wait();
+ return head == ring.avail->idx;
#endif
}
diff --git a/virt/kvm/arm/pmu.c b/virt/kvm/arm/pmu.c
index 6e9c40eea208..69ccce308458 100644
--- a/virt/kvm/arm/pmu.c
+++ b/virt/kvm/arm/pmu.c
@@ -305,7 +305,7 @@ void kvm_pmu_software_increment(struct kvm_vcpu *vcpu, u64 val)
continue;
type = vcpu_sys_reg(vcpu, PMEVTYPER0_EL0 + i)
& ARMV8_PMU_EVTYPE_EVENT;
- if ((type == ARMV8_PMU_EVTYPE_EVENT_SW_INCR)
+ if ((type == ARMV8_PMUV3_PERFCTR_SW_INCR)
&& (enable & BIT(i))) {
reg = vcpu_sys_reg(vcpu, PMEVCNTR0_EL0 + i) + 1;
reg = lower_32_bits(reg);
@@ -379,7 +379,8 @@ void kvm_pmu_set_counter_event_type(struct kvm_vcpu *vcpu, u64 data,
eventsel = data & ARMV8_PMU_EVTYPE_EVENT;
/* Software increment event does't need to be backed by a perf event */
- if (eventsel == ARMV8_PMU_EVTYPE_EVENT_SW_INCR)
+ if (eventsel == ARMV8_PMUV3_PERFCTR_SW_INCR &&
+ select_idx != ARMV8_PMU_CYCLE_IDX)
return;
memset(&attr, 0, sizeof(struct perf_event_attr));
@@ -391,7 +392,8 @@ void kvm_pmu_set_counter_event_type(struct kvm_vcpu *vcpu, u64 data,
attr.exclude_kernel = data & ARMV8_PMU_EXCLUDE_EL1 ? 1 : 0;
attr.exclude_hv = 1; /* Don't count EL2 events */
attr.exclude_host = 1; /* Don't count host events */
- attr.config = eventsel;
+ attr.config = (select_idx == ARMV8_PMU_CYCLE_IDX) ?
+ ARMV8_PMUV3_PERFCTR_CPU_CYCLES : eventsel;
counter = kvm_pmu_get_counter_value(vcpu, select_idx);
/* The initial sample period (overflow count) of an event. */
diff --git a/virt/kvm/arm/vgic/vgic-mmio.c b/virt/kvm/arm/vgic/vgic-mmio.c
index e18b30ddcdce..ebe1b9fa3c4d 100644
--- a/virt/kvm/arm/vgic/vgic-mmio.c
+++ b/virt/kvm/arm/vgic/vgic-mmio.c
@@ -453,17 +453,33 @@ struct vgic_io_device *kvm_to_vgic_iodev(const struct kvm_io_device *dev)
return container_of(dev, struct vgic_io_device, dev);
}
-static bool check_region(const struct vgic_register_region *region,
+static bool check_region(const struct kvm *kvm,
+ const struct vgic_register_region *region,
gpa_t addr, int len)
{
- if ((region->access_flags & VGIC_ACCESS_8bit) && len == 1)
- return true;
- if ((region->access_flags & VGIC_ACCESS_32bit) &&
- len == sizeof(u32) && !(addr & 3))
- return true;
- if ((region->access_flags & VGIC_ACCESS_64bit) &&
- len == sizeof(u64) && !(addr & 7))
- return true;
+ int flags, nr_irqs = kvm->arch.vgic.nr_spis + VGIC_NR_PRIVATE_IRQS;
+
+ switch (len) {
+ case sizeof(u8):
+ flags = VGIC_ACCESS_8bit;
+ break;
+ case sizeof(u32):
+ flags = VGIC_ACCESS_32bit;
+ break;
+ case sizeof(u64):
+ flags = VGIC_ACCESS_64bit;
+ break;
+ default:
+ return false;
+ }
+
+ if ((region->access_flags & flags) && IS_ALIGNED(addr, len)) {
+ if (!region->bits_per_irq)
+ return true;
+
+ /* Do we access a non-allocated IRQ? */
+ return VGIC_ADDR_TO_INTID(addr, region->bits_per_irq) < nr_irqs;
+ }
return false;
}
@@ -477,7 +493,7 @@ static int dispatch_mmio_read(struct kvm_vcpu *vcpu, struct kvm_io_device *dev,
region = vgic_find_mmio_region(iodev->regions, iodev->nr_regions,
addr - iodev->base_addr);
- if (!region || !check_region(region, addr, len)) {
+ if (!region || !check_region(vcpu->kvm, region, addr, len)) {
memset(val, 0, len);
return 0;
}
@@ -510,10 +526,7 @@ static int dispatch_mmio_write(struct kvm_vcpu *vcpu, struct kvm_io_device *dev,
region = vgic_find_mmio_region(iodev->regions, iodev->nr_regions,
addr - iodev->base_addr);
- if (!region)
- return 0;
-
- if (!check_region(region, addr, len))
+ if (!region || !check_region(vcpu->kvm, region, addr, len))
return 0;
switch (iodev->iodev_type) {
diff --git a/virt/kvm/arm/vgic/vgic-mmio.h b/virt/kvm/arm/vgic/vgic-mmio.h
index 4c34d39d44a0..84961b4e4422 100644
--- a/virt/kvm/arm/vgic/vgic-mmio.h
+++ b/virt/kvm/arm/vgic/vgic-mmio.h
@@ -50,15 +50,15 @@ extern struct kvm_io_device_ops kvm_io_gic_ops;
#define VGIC_ADDR_IRQ_MASK(bits) (((bits) * 1024 / 8) - 1)
/*
- * (addr & mask) gives us the byte offset for the INT ID, so we want to
- * divide this with 'bytes per irq' to get the INT ID, which is given
- * by '(bits) / 8'. But we do this with fixed-point-arithmetic and
- * take advantage of the fact that division by a fraction equals
- * multiplication with the inverted fraction, and scale up both the
- * numerator and denominator with 8 to support at most 64 bits per IRQ:
+ * (addr & mask) gives us the _byte_ offset for the INT ID.
+ * We multiply this by 8 the get the _bit_ offset, then divide this by
+ * the number of bits to learn the actual INT ID.
+ * But instead of a division (which requires a "long long div" implementation),
+ * we shift by the binary logarithm of <bits>.
+ * This assumes that <bits> is a power of two.
*/
#define VGIC_ADDR_TO_INTID(addr, bits) (((addr) & VGIC_ADDR_IRQ_MASK(bits)) * \
- 64 / (bits) / 8)
+ 8 >> ilog2(bits))
/*
* Some VGIC registers store per-IRQ information, with a different number
diff --git a/virt/kvm/arm/vgic/vgic-v2.c b/virt/kvm/arm/vgic/vgic-v2.c
index 0a063af40565..9bab86757fa4 100644
--- a/virt/kvm/arm/vgic/vgic-v2.c
+++ b/virt/kvm/arm/vgic/vgic-v2.c
@@ -50,8 +50,10 @@ void vgic_v2_process_maintenance(struct kvm_vcpu *vcpu)
WARN_ON(cpuif->vgic_lr[lr] & GICH_LR_STATE);
- kvm_notify_acked_irq(vcpu->kvm, 0,
- intid - VGIC_NR_PRIVATE_IRQS);
+ /* Only SPIs require notification */
+ if (vgic_valid_spi(vcpu->kvm, intid))
+ kvm_notify_acked_irq(vcpu->kvm, 0,
+ intid - VGIC_NR_PRIVATE_IRQS);
}
}
diff --git a/virt/kvm/arm/vgic/vgic-v3.c b/virt/kvm/arm/vgic/vgic-v3.c
index 9f0dae397d9c..5c9f9745e6ca 100644
--- a/virt/kvm/arm/vgic/vgic-v3.c
+++ b/virt/kvm/arm/vgic/vgic-v3.c
@@ -41,8 +41,10 @@ void vgic_v3_process_maintenance(struct kvm_vcpu *vcpu)
WARN_ON(cpuif->vgic_lr[lr] & ICH_LR_STATE);
- kvm_notify_acked_irq(vcpu->kvm, 0,
- intid - VGIC_NR_PRIVATE_IRQS);
+ /* Only SPIs require notification */
+ if (vgic_valid_spi(vcpu->kvm, intid))
+ kvm_notify_acked_irq(vcpu->kvm, 0,
+ intid - VGIC_NR_PRIVATE_IRQS);
}
/*
diff --git a/virt/kvm/arm/vgic/vgic.c b/virt/kvm/arm/vgic/vgic.c
index 2893d5ba523a..6440b56ec90e 100644
--- a/virt/kvm/arm/vgic/vgic.c
+++ b/virt/kvm/arm/vgic/vgic.c
@@ -273,6 +273,18 @@ retry:
* no more work for us to do.
*/
spin_unlock(&irq->irq_lock);
+
+ /*
+ * We have to kick the VCPU here, because we could be
+ * queueing an edge-triggered interrupt for which we
+ * get no EOI maintenance interrupt. In that case,
+ * while the IRQ is already on the VCPU's AP list, the
+ * VCPU could have EOI'ed the original interrupt and
+ * won't see this one until it exits for some other
+ * reason.
+ */
+ if (vcpu)
+ kvm_vcpu_kick(vcpu);
return false;
}
diff --git a/virt/kvm/async_pf.c b/virt/kvm/async_pf.c
index 8035cc1eb955..efeceb0a222d 100644
--- a/virt/kvm/async_pf.c
+++ b/virt/kvm/async_pf.c
@@ -91,6 +91,7 @@ static void async_pf_execute(struct work_struct *work)
spin_lock(&vcpu->async_pf.lock);
list_add_tail(&apf->link, &vcpu->async_pf.done);
+ apf->vcpu = NULL;
spin_unlock(&vcpu->async_pf.lock);
/*
@@ -113,6 +114,8 @@ static void async_pf_execute(struct work_struct *work)
void kvm_clear_async_pf_completion_queue(struct kvm_vcpu *vcpu)
{
+ spin_lock(&vcpu->async_pf.lock);
+
/* cancel outstanding work queue item */
while (!list_empty(&vcpu->async_pf.queue)) {
struct kvm_async_pf *work =
@@ -120,6 +123,14 @@ void kvm_clear_async_pf_completion_queue(struct kvm_vcpu *vcpu)
typeof(*work), queue);
list_del(&work->queue);
+ /*
+ * We know it's present in vcpu->async_pf.done, do
+ * nothing here.
+ */
+ if (!work->vcpu)
+ continue;
+
+ spin_unlock(&vcpu->async_pf.lock);
#ifdef CONFIG_KVM_ASYNC_PF_SYNC
flush_work(&work->work);
#else
@@ -129,9 +140,9 @@ void kvm_clear_async_pf_completion_queue(struct kvm_vcpu *vcpu)
kmem_cache_free(async_pf_cache, work);
}
#endif
+ spin_lock(&vcpu->async_pf.lock);
}
- spin_lock(&vcpu->async_pf.lock);
while (!list_empty(&vcpu->async_pf.done)) {
struct kvm_async_pf *work =
list_first_entry(&vcpu->async_pf.done,
diff --git a/virt/kvm/eventfd.c b/virt/kvm/eventfd.c
index f397e9b20370..a29786dd9522 100644
--- a/virt/kvm/eventfd.c
+++ b/virt/kvm/eventfd.c
@@ -42,6 +42,7 @@
#ifdef CONFIG_HAVE_KVM_IRQFD
+static struct workqueue_struct *irqfd_cleanup_wq;
static void
irqfd_inject(struct work_struct *work)
@@ -167,7 +168,7 @@ irqfd_deactivate(struct kvm_kernel_irqfd *irqfd)
list_del_init(&irqfd->list);
- schedule_work(&irqfd->shutdown);
+ queue_work(irqfd_cleanup_wq, &irqfd->shutdown);
}
int __attribute__((weak)) kvm_arch_set_irq_inatomic(
@@ -554,7 +555,7 @@ kvm_irqfd_deassign(struct kvm *kvm, struct kvm_irqfd *args)
* so that we guarantee there will not be any more interrupts on this
* gsi once this deassign function returns.
*/
- flush_work(&irqfd->shutdown);
+ flush_workqueue(irqfd_cleanup_wq);
return 0;
}
@@ -591,7 +592,7 @@ kvm_irqfd_release(struct kvm *kvm)
* Block until we know all outstanding shutdown jobs have completed
* since we do not take a kvm* reference.
*/
- flush_work(&irqfd->shutdown);
+ flush_workqueue(irqfd_cleanup_wq);
}
@@ -621,8 +622,23 @@ void kvm_irq_routing_update(struct kvm *kvm)
spin_unlock_irq(&kvm->irqfds.lock);
}
+/*
+ * create a host-wide workqueue for issuing deferred shutdown requests
+ * aggregated from all vm* instances. We need our own isolated
+ * queue to ease flushing work items when a VM exits.
+ */
+int kvm_irqfd_init(void)
+{
+ irqfd_cleanup_wq = alloc_workqueue("kvm-irqfd-cleanup", 0, 0);
+ if (!irqfd_cleanup_wq)
+ return -ENOMEM;
+
+ return 0;
+}
+
void kvm_irqfd_exit(void)
{
+ destroy_workqueue(irqfd_cleanup_wq);
}
#endif
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index 2907b7b78654..7f9ee2929cfe 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -2889,10 +2889,10 @@ static int kvm_ioctl_create_device(struct kvm *kvm,
ret = anon_inode_getfd(ops->name, &kvm_device_fops, dev, O_RDWR | O_CLOEXEC);
if (ret < 0) {
- ops->destroy(dev);
mutex_lock(&kvm->lock);
list_del(&dev->vm_node);
mutex_unlock(&kvm->lock);
+ ops->destroy(dev);
return ret;
}
@@ -3844,7 +3844,12 @@ int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
* kvm_arch_init makes sure there's at most one caller
* for architectures that support multiple implementations,
* like intel and amd on x86.
+ * kvm_arch_init must be called before kvm_irqfd_init to avoid creating
+ * conflicts in case kvm is already setup for another implementation.
*/
+ r = kvm_irqfd_init();
+ if (r)
+ goto out_irqfd;
if (!zalloc_cpumask_var(&cpus_hardware_enabled, GFP_KERNEL)) {
r = -ENOMEM;
@@ -3926,6 +3931,7 @@ out_free_0a:
free_cpumask_var(cpus_hardware_enabled);
out_free_0:
kvm_irqfd_exit();
+out_irqfd:
kvm_arch_exit();
out_fail:
return r;
OpenPOWER on IntegriCloud