diff options
363 files changed, 10986 insertions, 5223 deletions
diff --git a/Documentation/devicetree/bindings/display/brcm,bcm-vc4.txt b/Documentation/devicetree/bindings/display/brcm,bcm-vc4.txt index 284e2b14cfbe..26649b4c4dd8 100644 --- a/Documentation/devicetree/bindings/display/brcm,bcm-vc4.txt +++ b/Documentation/devicetree/bindings/display/brcm,bcm-vc4.txt @@ -74,6 +74,12 @@ Required properties for DSI: The 3 clocks output from the DSI analog PHY: dsi[01]_byte, dsi[01]_ddr2, and dsi[01]_ddr +Required properties for the TXP (writeback) block: +- compatible: Should be "brcm,bcm2835-txp" +- reg: Physical base address and length of the TXP block's registers +- interrupts: The interrupt number + See bindings/interrupt-controller/brcm,bcm2835-armctrl-ic.txt + [1] Documentation/devicetree/bindings/media/video-interfaces.txt Example: diff --git a/Documentation/devicetree/bindings/display/mediatek/mediatek,disp.txt b/Documentation/devicetree/bindings/display/mediatek/mediatek,disp.txt index 383183a89164..8469de510001 100644 --- a/Documentation/devicetree/bindings/display/mediatek/mediatek,disp.txt +++ b/Documentation/devicetree/bindings/display/mediatek/mediatek,disp.txt @@ -40,7 +40,7 @@ Required properties (all function blocks): "mediatek,<chip>-dpi" - DPI controller, see mediatek,dpi.txt "mediatek,<chip>-disp-mutex" - display mutex "mediatek,<chip>-disp-od" - overdrive - the supported chips are mt2701 and mt8173. + the supported chips are mt2701, mt2712 and mt8173. - reg: Physical base address and length of the function block register space - interrupts: The interrupt signal from the function block (required, except for merge and split function blocks). diff --git a/Documentation/devicetree/bindings/display/panel/boe,hv070wsa-100.txt b/Documentation/devicetree/bindings/display/panel/boe,hv070wsa-100.txt new file mode 100644 index 000000000000..55183d360032 --- /dev/null +++ b/Documentation/devicetree/bindings/display/panel/boe,hv070wsa-100.txt @@ -0,0 +1,28 @@ +BOE HV070WSA-100 7.01" WSVGA TFT LCD panel + +Required properties: +- compatible: should be "boe,hv070wsa-100" +- power-supply: regulator to provide the VCC supply voltage (3.3 volts) +- enable-gpios: GPIO pin to enable and disable panel (active high) + +This binding is compatible with the simple-panel binding, which is specified +in simple-panel.txt in this directory. + +The device node can contain one 'port' child node with one child +'endpoint' node, according to the bindings defined in [1]. This +node should describe panel's video bus. + +[1]: Documentation/devicetree/bindings/media/video-interfaces.txt + +Example: + + panel: panel { + compatible = "boe,hv070wsa-100"; + power-supply = <&vcc_3v3_reg>; + enable-gpios = <&gpd1 3 GPIO_ACTIVE_HIGH>; + port { + panel_ep: endpoint { + remote-endpoint = <&bridge_out_ep>; + }; + }; + }; diff --git a/Documentation/devicetree/bindings/display/panel/dataimage,scf0700c48ggu18.txt b/Documentation/devicetree/bindings/display/panel/dataimage,scf0700c48ggu18.txt new file mode 100644 index 000000000000..897085ee3cd4 --- /dev/null +++ b/Documentation/devicetree/bindings/display/panel/dataimage,scf0700c48ggu18.txt @@ -0,0 +1,8 @@ +DataImage, Inc. 7" WVGA (800x480) TFT LCD panel with 24-bit parallel interface. + +Required properties: +- compatible: should be "dataimage,scf0700c48ggu18" +- power-supply: as specified in the base binding + +This binding is compatible with the simple-panel binding, which is specified +in simple-panel.txt in this directory. diff --git a/Documentation/devicetree/bindings/display/panel/dlc,dlc0700yzg-1.txt b/Documentation/devicetree/bindings/display/panel/dlc,dlc0700yzg-1.txt new file mode 100644 index 000000000000..bf06bb025b08 --- /dev/null +++ b/Documentation/devicetree/bindings/display/panel/dlc,dlc0700yzg-1.txt @@ -0,0 +1,13 @@ +DLC Display Co. DLC0700YZG-1 7.0" WSVGA TFT LCD panel + +Required properties: +- compatible: should be "dlc,dlc0700yzg-1" +- power-supply: See simple-panel.txt + +Optional properties: +- reset-gpios: See panel-common.txt +- enable-gpios: See simple-panel.txt +- backlight: See simple-panel.txt + +This binding is compatible with the simple-panel binding, which is specified +in simple-panel.txt in this directory. diff --git a/Documentation/devicetree/bindings/display/panel/edt,et-series.txt b/Documentation/devicetree/bindings/display/panel/edt,et-series.txt new file mode 100644 index 000000000000..f56b99ebd9be --- /dev/null +++ b/Documentation/devicetree/bindings/display/panel/edt,et-series.txt @@ -0,0 +1,39 @@ +Emerging Display Technology Corp. Displays +========================================== + + +Display bindings for EDT Display Technology Corp. Displays which are +compatible with the simple-panel binding, which is specified in +simple-panel.txt + + +5,7" WVGA TFT Panels +-------------------- + ++-----------------+---------------------+-------------------------------------+ +| Identifier | compatbile | description | ++=================+=====================+=====================================+ +| ET057090DHU | edt,et057090dhu | 5.7" VGA TFT LCD panel | ++-----------------+---------------------+-------------------------------------+ + + +7,0" WVGA TFT Panels +-------------------- + ++-----------------+---------------------+-------------------------------------+ +| Identifier | compatbile | description | ++=================+=====================+=====================================+ +| ETM0700G0DH6 | edt,etm070080dh6 | WVGA TFT Display with capacitive | +| | | Touchscreen | ++-----------------+---------------------+-------------------------------------+ +| ETM0700G0BDH6 | edt,etm070080bdh6 | Same as ETM0700G0DH6 but with | +| | | inverted pixel clock. | ++-----------------+---------------------+-------------------------------------+ +| ETM0700G0EDH6 | edt,etm070080edh6 | Same display as the ETM0700G0BDH6, | +| | | but with changed Hardware for the | +| | | backlight and the touch interface | ++-----------------+---------------------+-------------------------------------+ +| ET070080DH6 | edt,etm070080dh6 | Same timings as the ETM0700G0DH6, | +| | | but with resistive touch. | ++-----------------+---------------------+-------------------------------------+ + diff --git a/Documentation/devicetree/bindings/display/panel/edt,et070080dh6.txt b/Documentation/devicetree/bindings/display/panel/edt,et070080dh6.txt deleted file mode 100644 index 20cb38e836e4..000000000000 --- a/Documentation/devicetree/bindings/display/panel/edt,et070080dh6.txt +++ /dev/null @@ -1,10 +0,0 @@ -Emerging Display Technology Corp. ET070080DH6 7.0" WVGA TFT LCD panel - -Required properties: -- compatible: should be "edt,et070080dh6" - -This panel is the same as ETM0700G0DH6 except for the touchscreen. -ET070080DH6 is the model with resistive touch. - -This binding is compatible with the simple-panel binding, which is specified -in simple-panel.txt in this directory. diff --git a/Documentation/devicetree/bindings/display/panel/edt,etm0700g0dh6.txt b/Documentation/devicetree/bindings/display/panel/edt,etm0700g0dh6.txt deleted file mode 100644 index ee4b18053e40..000000000000 --- a/Documentation/devicetree/bindings/display/panel/edt,etm0700g0dh6.txt +++ /dev/null @@ -1,10 +0,0 @@ -Emerging Display Technology Corp. ETM0700G0DH6 7.0" WVGA TFT LCD panel - -Required properties: -- compatible: should be "edt,etm0700g0dh6" - -This panel is the same as ET070080DH6 except for the touchscreen. -ETM0700G0DH6 is the model with capacitive multitouch. - -This binding is compatible with the simple-panel binding, which is specified -in simple-panel.txt in this directory. diff --git a/Documentation/devicetree/bindings/display/panel/innolux,g070y2-l01.txt b/Documentation/devicetree/bindings/display/panel/innolux,g070y2-l01.txt new file mode 100644 index 000000000000..7c234cf68e11 --- /dev/null +++ b/Documentation/devicetree/bindings/display/panel/innolux,g070y2-l01.txt @@ -0,0 +1,12 @@ +Innolux G070Y2-L01 7" WVGA (800x480) TFT LCD panel + +Required properties: +- compatible: should be "innolux,g070y2-l01" +- power-supply: as specified in the base binding + +Optional properties: +- backlight: as specified in the base binding +- enable-gpios: as specified in the base binding + +This binding is compatible with the simple-panel binding, which is specified +in simple-panel.txt in this directory. diff --git a/Documentation/devicetree/bindings/display/panel/innolux,p097pfg.txt b/Documentation/devicetree/bindings/display/panel/innolux,p097pfg.txt new file mode 100644 index 000000000000..595d9dfeffd3 --- /dev/null +++ b/Documentation/devicetree/bindings/display/panel/innolux,p097pfg.txt @@ -0,0 +1,24 @@ +Innolux P097PFG 9.7" 1536x2048 TFT LCD panel + +Required properties: +- compatible: should be "innolux,p097pfg" +- reg: DSI virtual channel of the peripheral +- avdd-supply: phandle of the regulator that provides positive voltage +- avee-supply: phandle of the regulator that provides negative voltage +- enable-gpios: panel enable gpio + +Optional properties: +- backlight: phandle of the backlight device attached to the panel + +Example: + + &mipi_dsi { + panel { + compatible = "innolux,p079zca"; + reg = <0>; + avdd-supply = <...>; + avee-supply = <...>; + backlight = <&backlight>; + enable-gpios = <&gpio1 13 GPIO_ACTIVE_HIGH>; + }; + }; diff --git a/Documentation/devicetree/bindings/display/panel/kingdisplay,kd097d04.txt b/Documentation/devicetree/bindings/display/panel/kingdisplay,kd097d04.txt new file mode 100644 index 000000000000..164a5fa236da --- /dev/null +++ b/Documentation/devicetree/bindings/display/panel/kingdisplay,kd097d04.txt @@ -0,0 +1,22 @@ +Kingdisplay KD097D04 9.7" 1536x2048 TFT LCD panel + +Required properties: +- compatible: should be "kingdisplay,kd097d04" +- reg: DSI virtual channel of the peripheral +- power-supply: phandle of the regulator that provides the supply voltage +- enable-gpios: panel enable gpio + +Optional properties: +- backlight: phandle of the backlight device attached to the panel + +Example: + + &mipi_dsi { + panel { + compatible = "kingdisplay,kd097d04"; + reg = <0>; + power-supply = <...>; + backlight = <&backlight>; + enable-gpios = <&gpio1 13 GPIO_ACTIVE_HIGH>; + }; + }; diff --git a/Documentation/devicetree/bindings/display/panel/edt,et057090dhu.txt b/Documentation/devicetree/bindings/display/panel/newhaven,nhd-4.3-480272ef-atxl.txt index 4903d7b1d947..e78292b1a131 100644 --- a/Documentation/devicetree/bindings/display/panel/edt,et057090dhu.txt +++ b/Documentation/devicetree/bindings/display/panel/newhaven,nhd-4.3-480272ef-atxl.txt @@ -1,7 +1,7 @@ -Emerging Display Technology Corp. 5.7" VGA TFT LCD panel +Newhaven Display International 480 x 272 TFT LCD panel Required properties: -- compatible: should be "edt,et057090dhu" +- compatible: should be "newhaven,nhd-4.3-480272ef-atxl" This binding is compatible with the simple-panel binding, which is specified in simple-panel.txt in this directory. diff --git a/Documentation/devicetree/bindings/display/panel/rocktech,rk070er9427.txt b/Documentation/devicetree/bindings/display/panel/rocktech,rk070er9427.txt new file mode 100644 index 000000000000..eb1fb9f8d1f4 --- /dev/null +++ b/Documentation/devicetree/bindings/display/panel/rocktech,rk070er9427.txt @@ -0,0 +1,25 @@ +Rocktech Display Ltd. RK070ER9427 800(RGB)x480 TFT LCD panel + +This binding is compatible with the simple-panel binding, which is specified +in simple-panel.txt in this directory. + +Required properties: +- compatible: should be "rocktech,rk070er9427" + +Optional properties: +- backlight: phandle of the backlight device attached to the panel + +Optional nodes: +- Video port for LCD panel input. + +Example: + panel { + compatible = "rocktech,rk070er9427"; + backlight = <&backlight_lcd>; + + port { + lcd_panel_in: endpoint { + remote-endpoint = <&lcd_display_out>; + }; + }; + }; diff --git a/Documentation/devicetree/bindings/display/panel/sharp,lq035q7db03.txt b/Documentation/devicetree/bindings/display/panel/sharp,lq035q7db03.txt new file mode 100644 index 000000000000..0753f6967279 --- /dev/null +++ b/Documentation/devicetree/bindings/display/panel/sharp,lq035q7db03.txt @@ -0,0 +1,12 @@ +Sharp LQ035Q7DB03 3.5" QVGA TFT LCD panel + +Required properties: +- compatible: should be "sharp,lq035q7db03" +- power-supply: phandle of the regulator that provides the supply voltage + +Optional properties: +- enable-gpios: GPIO pin to enable or disable the panel +- backlight: phandle of the backlight device attached to the panel + +This binding is compatible with the simple-panel binding, which is specified +in simple-panel.txt in this directory. diff --git a/Documentation/devicetree/bindings/display/sunxi/sun4i-drm.txt b/Documentation/devicetree/bindings/display/sunxi/sun4i-drm.txt index 5a9319ad8861..f8773ecb7525 100644 --- a/Documentation/devicetree/bindings/display/sunxi/sun4i-drm.txt +++ b/Documentation/devicetree/bindings/display/sunxi/sun4i-drm.txt @@ -101,9 +101,9 @@ DWC HDMI PHY Required properties: - compatible: value must be one of: - * allwinner,sun50i-a64-hdmi-phy * allwinner,sun8i-a83t-hdmi-phy * allwinner,sun8i-h3-hdmi-phy + * allwinner,sun50i-a64-hdmi-phy - reg: base address and size of memory-mapped region - clocks: phandles to the clocks feeding the HDMI PHY * bus: the HDMI PHY interface clock @@ -147,6 +147,7 @@ Required properties: * allwinner,sun8i-a33-tcon * allwinner,sun8i-a83t-tcon-lcd * allwinner,sun8i-a83t-tcon-tv + * allwinner,sun8i-r40-tcon-tv * allwinner,sun8i-v3s-tcon * allwinner,sun9i-a80-tcon-lcd * allwinner,sun9i-a80-tcon-tv @@ -181,7 +182,7 @@ For TCONs with channel 0, there is one more clock required: For TCONs with channel 1, there is one more clock required: - 'tcon-ch1': The clock driving the TCON channel 1 -When TCON support LVDS (all TCONs except TV TCON on A83T and those found +When TCON support LVDS (all TCONs except TV TCONs on A83T, R40 and those found in A13, H3, H5 and V3s SoCs), you need one more reset line: - 'lvds': The reset line driving the LVDS logic @@ -399,6 +400,7 @@ Required properties: * allwinner,sun8i-a33-display-engine * allwinner,sun8i-a83t-display-engine * allwinner,sun8i-h3-display-engine + * allwinner,sun8i-r40-display-engine * allwinner,sun8i-v3s-display-engine * allwinner,sun9i-a80-display-engine diff --git a/Documentation/devicetree/bindings/vendor-prefixes.txt b/Documentation/devicetree/bindings/vendor-prefixes.txt index 698453943789..2afaa633ffc8 100644 --- a/Documentation/devicetree/bindings/vendor-prefixes.txt +++ b/Documentation/devicetree/bindings/vendor-prefixes.txt @@ -86,6 +86,7 @@ cubietech Cubietech, Ltd. cypress Cypress Semiconductor Corporation cznic CZ.NIC, z.s.p.o. dallas Maxim Integrated Products (formerly Dallas Semiconductor) +dataimage DataImage, Inc. davicom DAVICOM Semiconductor, Inc. delta Delta Electronics, Inc. denx Denx Software Engineering @@ -94,6 +95,7 @@ dh DH electronics GmbH digi Digi International Inc. digilent Diglent, Inc. dioo Dioo Microcircuit Co., Ltd +dlc DLC Display Co., Ltd. dlg Dialog Semiconductor dlink D-Link Corporation dmo Data Modul AG @@ -189,6 +191,7 @@ keymile Keymile GmbH khadas Khadas kiebackpeter Kieback & Peter GmbH kinetic Kinetic Technologies +kingdisplay King & Display Technology Co., Ltd. kingnovel Kingnovel Technology Co., Ltd. koe Kaohsiung Opto-Electronics Inc. kosagi Sutajio Ko-Usagi PTE Ltd. diff --git a/Documentation/gpu/drm-client.rst b/Documentation/gpu/drm-client.rst new file mode 100644 index 000000000000..7e672063e7eb --- /dev/null +++ b/Documentation/gpu/drm-client.rst @@ -0,0 +1,12 @@ +================= +Kernel clients +================= + +.. kernel-doc:: drivers/gpu/drm/drm_client.c + :doc: overview + +.. kernel-doc:: include/drm/drm_client.h + :internal: + +.. kernel-doc:: drivers/gpu/drm/drm_client.c + :export: diff --git a/Documentation/gpu/drm-kms-helpers.rst b/Documentation/gpu/drm-kms-helpers.rst index e37557b30f62..f9cfcdcdf024 100644 --- a/Documentation/gpu/drm-kms-helpers.rst +++ b/Documentation/gpu/drm-kms-helpers.rst @@ -109,6 +109,15 @@ Framebuffer CMA Helper Functions Reference .. _drm_bridges: +Framebuffer GEM Helper Reference +================================ + +.. kernel-doc:: drivers/gpu/drm/drm_gem_framebuffer_helper.c + :doc: overview + +.. kernel-doc:: drivers/gpu/drm/drm_gem_framebuffer_helper.c + :export: + Bridges ======= @@ -169,6 +178,15 @@ Display Port Helper Functions Reference .. kernel-doc:: drivers/gpu/drm/drm_dp_helper.c :export: +Display Port CEC Helper Functions Reference +=========================================== + +.. kernel-doc:: drivers/gpu/drm/drm_dp_cec.c + :doc: dp cec helpers + +.. kernel-doc:: drivers/gpu/drm/drm_dp_cec.c + :export: + Display Port Dual Mode Adaptor Helper Functions Reference ========================================================= @@ -282,13 +300,13 @@ Auxiliary Modeset Helpers .. kernel-doc:: drivers/gpu/drm/drm_modeset_helper.c :export: -Framebuffer GEM Helper Reference -================================ +OF/DT Helpers +============= -.. kernel-doc:: drivers/gpu/drm/drm_gem_framebuffer_helper.c +.. kernel-doc:: drivers/gpu/drm/drm_of.c :doc: overview -.. kernel-doc:: drivers/gpu/drm/drm_gem_framebuffer_helper.c +.. kernel-doc:: drivers/gpu/drm/drm_of.c :export: Legacy Plane Helper Reference diff --git a/Documentation/gpu/drm-kms.rst b/Documentation/gpu/drm-kms.rst index 514939433004..5dee6b8a4c12 100644 --- a/Documentation/gpu/drm-kms.rst +++ b/Documentation/gpu/drm-kms.rst @@ -56,11 +56,12 @@ Overview The basic object structure KMS presents to userspace is fairly simple. Framebuffers (represented by :c:type:`struct drm_framebuffer <drm_framebuffer>`, -see `Frame Buffer Abstraction`_) feed into planes. One or more (or even no) -planes feed their pixel data into a CRTC (represented by :c:type:`struct -drm_crtc <drm_crtc>`, see `CRTC Abstraction`_) for blending. The precise -blending step is explained in more detail in `Plane Composition Properties`_ and -related chapters. +see `Frame Buffer Abstraction`_) feed into planes. Planes are represented by +:c:type:`struct drm_plane <drm_plane>`, see `Plane Abstraction`_ for more +details. One or more (or even no) planes feed their pixel data into a CRTC +(represented by :c:type:`struct drm_crtc <drm_crtc>`, see `CRTC Abstraction`_) +for blending. The precise blending step is explained in more detail in `Plane +Composition Properties`_ and related chapters. For the output routing the first step is encoders (represented by :c:type:`struct drm_encoder <drm_encoder>`, see `Encoder Abstraction`_). Those @@ -466,7 +467,7 @@ Output discovery and initialization example drm_encoder_init(dev, &intel_output->enc, &intel_crt_enc_funcs, DRM_MODE_ENCODER_DAC); - drm_mode_connector_attach_encoder(&intel_output->base, + drm_connector_attach_encoder(&intel_output->base, &intel_output->enc); /* Set up the DDC bus. */ diff --git a/Documentation/gpu/index.rst b/Documentation/gpu/index.rst index 00288f34c5a6..1fcf8e851e15 100644 --- a/Documentation/gpu/index.rst +++ b/Documentation/gpu/index.rst @@ -10,6 +10,7 @@ Linux GPU Driver Developer's Guide drm-kms drm-kms-helpers drm-uapi + drm-client drivers vga-switcheroo vgaarbiter diff --git a/Documentation/gpu/v3d.rst b/Documentation/gpu/v3d.rst new file mode 100644 index 000000000000..543f7fbf526e --- /dev/null +++ b/Documentation/gpu/v3d.rst @@ -0,0 +1,28 @@ +===================================== + drm/v3d Broadcom V3D Graphics Driver +===================================== + +.. kernel-doc:: drivers/gpu/drm/v3d/v3d_drv.c + :doc: Broadcom V3D Graphics Driver + +GPU buffer object (BO) management +--------------------------------- + +.. kernel-doc:: drivers/gpu/drm/v3d/v3d_bo.c + :doc: V3D GEM BO management support + +Address space management +=========================================== +.. kernel-doc:: drivers/gpu/drm/v3d/v3d_mmu.c + :doc: Broadcom V3D MMU + +GPU Scheduling +=========================================== +.. kernel-doc:: drivers/gpu/drm/v3d/v3d_sched.c + :doc: Broadcom V3D scheduling + +Interrupts +-------------- + +.. kernel-doc:: drivers/gpu/drm/v3d/v3d_irq.c + :doc: Interrupt management for the V3D engine diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c index be0fa7e39c26..ba0e786c952e 100644 --- a/arch/arm/mm/dma-mapping.c +++ b/arch/arm/mm/dma-mapping.c @@ -1151,6 +1151,11 @@ int arm_dma_supported(struct device *dev, u64 mask) return __dma_supported(dev, mask, false); } +static const struct dma_map_ops *arm_get_dma_map_ops(bool coherent) +{ + return coherent ? &arm_coherent_dma_ops : &arm_dma_ops; +} + #ifdef CONFIG_ARM_DMA_USE_IOMMU static int __dma_info_to_prot(enum dma_data_direction dir, unsigned long attrs) @@ -2296,7 +2301,7 @@ void arm_iommu_detach_device(struct device *dev) iommu_detach_device(mapping->domain, dev); kref_put(&mapping->kref, release_iommu_mapping); to_dma_iommu_mapping(dev) = NULL; - set_dma_ops(dev, NULL); + set_dma_ops(dev, arm_get_dma_map_ops(dev->archdata.dma_coherent)); pr_debug("Detached IOMMU controller from %s device.\n", dev_name(dev)); } @@ -2357,11 +2362,6 @@ static void arm_teardown_iommu_dma_ops(struct device *dev) { } #endif /* CONFIG_ARM_DMA_USE_IOMMU */ -static const struct dma_map_ops *arm_get_dma_map_ops(bool coherent) -{ - return coherent ? &arm_coherent_dma_ops : &arm_dma_ops; -} - void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size, const struct iommu_ops *iommu, bool coherent) { diff --git a/arch/x86/kernel/early-quirks.c b/arch/x86/kernel/early-quirks.c index da5d8ac60062..50d5848bf22e 100644 --- a/arch/x86/kernel/early-quirks.c +++ b/arch/x86/kernel/early-quirks.c @@ -338,6 +338,18 @@ static resource_size_t __init gen3_stolen_base(int num, int slot, int func, return bsm & INTEL_BSM_MASK; } +static resource_size_t __init gen11_stolen_base(int num, int slot, int func, + resource_size_t stolen_size) +{ + u64 bsm; + + bsm = read_pci_config(num, slot, func, INTEL_GEN11_BSM_DW0); + bsm &= INTEL_BSM_MASK; + bsm |= (u64)read_pci_config(num, slot, func, INTEL_GEN11_BSM_DW1) << 32; + + return bsm; +} + static resource_size_t __init i830_stolen_size(int num, int slot, int func) { u16 gmch_ctrl; @@ -498,6 +510,11 @@ static const struct intel_early_ops chv_early_ops __initconst = { .stolen_size = chv_stolen_size, }; +static const struct intel_early_ops gen11_early_ops __initconst = { + .stolen_base = gen11_stolen_base, + .stolen_size = gen9_stolen_size, +}; + static const struct pci_device_id intel_early_ids[] __initconst = { INTEL_I830_IDS(&i830_early_ops), INTEL_I845G_IDS(&i845_early_ops), @@ -529,6 +546,7 @@ static const struct pci_device_id intel_early_ids[] __initconst = { INTEL_CFL_IDS(&gen9_early_ops), INTEL_GLK_IDS(&gen9_early_ops), INTEL_CNL_IDS(&gen9_early_ops), + INTEL_ICL_11_IDS(&gen11_early_ops), }; struct resource intel_graphics_stolen_res __ro_after_init = DEFINE_RES_MEM(0, 0); diff --git a/drivers/dma-buf/reservation.c b/drivers/dma-buf/reservation.c index 20bf90f4ee63..6c95f61a32e7 100644 --- a/drivers/dma-buf/reservation.c +++ b/drivers/dma-buf/reservation.c @@ -141,6 +141,7 @@ reservation_object_add_shared_inplace(struct reservation_object *obj, if (signaled) { RCU_INIT_POINTER(fobj->shared[signaled_idx], fence); } else { + BUG_ON(fobj->shared_count >= fobj->shared_max); RCU_INIT_POINTER(fobj->shared[fobj->shared_count], fence); fobj->shared_count++; } @@ -230,10 +231,9 @@ void reservation_object_add_shared_fence(struct reservation_object *obj, old = reservation_object_get_list(obj); obj->staged = NULL; - if (!fobj) { - BUG_ON(old->shared_count >= old->shared_max); + if (!fobj) reservation_object_add_shared_inplace(obj, old, fence); - } else + else reservation_object_add_shared_replace(obj, old, fobj, fence); } EXPORT_SYMBOL(reservation_object_add_shared_fence); diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig index 2a72d2feb76d..cb88528e7b10 100644 --- a/drivers/gpu/drm/Kconfig +++ b/drivers/gpu/drm/Kconfig @@ -122,6 +122,16 @@ config DRM_LOAD_EDID_FIRMWARE default case is N. Details and instructions how to build your own EDID data are given in Documentation/EDID/HOWTO.txt. +config DRM_DP_CEC + bool "Enable DisplayPort CEC-Tunneling-over-AUX HDMI support" + select CEC_CORE + help + Choose this option if you want to enable HDMI CEC support for + DisplayPort/USB-C to HDMI adapters. + + Note: not all adapters support this feature, and even for those + that do support this they often do not hook up the CEC pin. + config DRM_TTM tristate depends on DRM && MMU @@ -213,6 +223,17 @@ config DRM_VGEM as used by Mesa's software renderer for enhanced performance. If M is selected the module will be called vgem. +config DRM_VKMS + tristate "Virtual KMS (EXPERIMENTAL)" + depends on DRM + select DRM_KMS_HELPER + default n + help + Virtual Kernel Mode-Setting (VKMS) is used for testing or for + running GPU in a headless machines. Choose this option to get + a VKMS. + + If M is selected the module will be called vkms. source "drivers/gpu/drm/exynos/Kconfig" diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile index 69c13517ea3a..a6771cef85e2 100644 --- a/drivers/gpu/drm/Makefile +++ b/drivers/gpu/drm/Makefile @@ -18,7 +18,7 @@ drm-y := drm_auth.o drm_bufs.o drm_cache.o \ drm_encoder.o drm_mode_object.o drm_property.o \ drm_plane.o drm_color_mgmt.o drm_print.o \ drm_dumb_buffers.o drm_mode_config.o drm_vblank.o \ - drm_syncobj.o drm_lease.o drm_writeback.o + drm_syncobj.o drm_lease.o drm_writeback.o drm_client.o drm-$(CONFIG_DRM_LIB_RANDOM) += lib/drm_random.o drm-$(CONFIG_DRM_VM) += drm_vm.o @@ -41,6 +41,7 @@ drm_kms_helper-$(CONFIG_DRM_PANEL_BRIDGE) += bridge/panel.o drm_kms_helper-$(CONFIG_DRM_FBDEV_EMULATION) += drm_fb_helper.o drm_kms_helper-$(CONFIG_DRM_KMS_CMA_HELPER) += drm_fb_cma_helper.o drm_kms_helper-$(CONFIG_DRM_DP_AUX_CHARDEV) += drm_dp_aux_dev.o +drm_kms_helper-$(CONFIG_DRM_DP_CEC) += drm_dp_cec.o obj-$(CONFIG_DRM_KMS_HELPER) += drm_kms_helper.o obj-$(CONFIG_DRM_DEBUG_SELFTEST) += selftests/ @@ -69,6 +70,7 @@ obj-$(CONFIG_DRM_SAVAGE)+= savage/ obj-$(CONFIG_DRM_VMWGFX)+= vmwgfx/ obj-$(CONFIG_DRM_VIA) +=via/ obj-$(CONFIG_DRM_VGEM) += vgem/ +obj-$(CONFIG_DRM_VKMS) += vkms/ obj-$(CONFIG_DRM_NOUVEAU) +=nouveau/ obj-$(CONFIG_DRM_EXYNOS) +=exynos/ obj-$(CONFIG_DRM_ROCKCHIP) +=rockchip/ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c index 8e66851eb427..c770d73352a7 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c @@ -212,30 +212,21 @@ static void amdgpu_connector_update_scratch_regs(struct drm_connector *connector, enum drm_connector_status status) { - struct drm_encoder *best_encoder = NULL; - struct drm_encoder *encoder = NULL; + struct drm_encoder *best_encoder; + struct drm_encoder *encoder; const struct drm_connector_helper_funcs *connector_funcs = connector->helper_private; bool connected; int i; best_encoder = connector_funcs->best_encoder(connector); - for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) { - if (connector->encoder_ids[i] == 0) - break; - - encoder = drm_encoder_find(connector->dev, NULL, - connector->encoder_ids[i]); - if (!encoder) - continue; - + drm_connector_for_each_possible_encoder(connector, encoder, i) { if ((encoder == best_encoder) && (status == connector_status_connected)) connected = true; else connected = false; amdgpu_atombios_encoder_set_bios_scratch_regs(connector, encoder, connected); - } } @@ -246,17 +237,11 @@ amdgpu_connector_find_encoder(struct drm_connector *connector, struct drm_encoder *encoder; int i; - for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) { - if (connector->encoder_ids[i] == 0) - break; - encoder = drm_encoder_find(connector->dev, NULL, - connector->encoder_ids[i]); - if (!encoder) - continue; - + drm_connector_for_each_possible_encoder(connector, encoder, i) { if (encoder->encoder_type == encoder_type) return encoder; } + return NULL; } @@ -349,22 +334,24 @@ static int amdgpu_connector_ddc_get_modes(struct drm_connector *connector) int ret; if (amdgpu_connector->edid) { - drm_mode_connector_update_edid_property(connector, amdgpu_connector->edid); + drm_connector_update_edid_property(connector, amdgpu_connector->edid); ret = drm_add_edid_modes(connector, amdgpu_connector->edid); return ret; } - drm_mode_connector_update_edid_property(connector, NULL); + drm_connector_update_edid_property(connector, NULL); return 0; } static struct drm_encoder * amdgpu_connector_best_single_encoder(struct drm_connector *connector) { - int enc_id = connector->encoder_ids[0]; + struct drm_encoder *encoder; + int i; + + /* pick the first one */ + drm_connector_for_each_possible_encoder(connector, encoder, i) + return encoder; - /* pick the encoder ids */ - if (enc_id) - return drm_encoder_find(connector->dev, NULL, enc_id); return NULL; } @@ -985,9 +972,8 @@ amdgpu_connector_dvi_detect(struct drm_connector *connector, bool force) struct drm_device *dev = connector->dev; struct amdgpu_device *adev = dev->dev_private; struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector); - struct drm_encoder *encoder = NULL; const struct drm_encoder_helper_funcs *encoder_funcs; - int i, r; + int r; enum drm_connector_status ret = connector_status_disconnected; bool dret = false, broken_edid = false; @@ -1077,14 +1063,10 @@ amdgpu_connector_dvi_detect(struct drm_connector *connector, bool force) /* find analog encoder */ if (amdgpu_connector->dac_load_detect) { - for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) { - if (connector->encoder_ids[i] == 0) - break; - - encoder = drm_encoder_find(connector->dev, NULL, connector->encoder_ids[i]); - if (!encoder) - continue; + struct drm_encoder *encoder; + int i; + drm_connector_for_each_possible_encoder(connector, encoder, i) { if (encoder->encoder_type != DRM_MODE_ENCODER_DAC && encoder->encoder_type != DRM_MODE_ENCODER_TVDAC) continue; @@ -1132,18 +1114,11 @@ exit: static struct drm_encoder * amdgpu_connector_dvi_encoder(struct drm_connector *connector) { - int enc_id = connector->encoder_ids[0]; struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector); struct drm_encoder *encoder; int i; - for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) { - if (connector->encoder_ids[i] == 0) - break; - - encoder = drm_encoder_find(connector->dev, NULL, connector->encoder_ids[i]); - if (!encoder) - continue; + drm_connector_for_each_possible_encoder(connector, encoder, i) { if (amdgpu_connector->use_digital == true) { if (encoder->encoder_type == DRM_MODE_ENCODER_TMDS) return encoder; @@ -1158,8 +1133,9 @@ amdgpu_connector_dvi_encoder(struct drm_connector *connector) /* then check use digitial */ /* pick the first one */ - if (enc_id) - return drm_encoder_find(connector->dev, NULL, enc_id); + drm_connector_for_each_possible_encoder(connector, encoder, i) + return encoder; + return NULL; } @@ -1296,15 +1272,7 @@ u16 amdgpu_connector_encoder_get_dp_bridge_encoder_id(struct drm_connector *conn struct amdgpu_encoder *amdgpu_encoder; int i; - for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) { - if (connector->encoder_ids[i] == 0) - break; - - encoder = drm_encoder_find(connector->dev, NULL, - connector->encoder_ids[i]); - if (!encoder) - continue; - + drm_connector_for_each_possible_encoder(connector, encoder, i) { amdgpu_encoder = to_amdgpu_encoder(encoder); switch (amdgpu_encoder->encoder_id) { @@ -1326,14 +1294,7 @@ static bool amdgpu_connector_encoder_is_hbr2(struct drm_connector *connector) int i; bool found = false; - for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) { - if (connector->encoder_ids[i] == 0) - break; - encoder = drm_encoder_find(connector->dev, NULL, - connector->encoder_ids[i]); - if (!encoder) - continue; - + drm_connector_for_each_possible_encoder(connector, encoder, i) { amdgpu_encoder = to_amdgpu_encoder(encoder); if (amdgpu_encoder->caps & ATOM_ENCODER_CAP_RECORD_HBR2) found = true; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_encoders.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_encoders.c index 94138abe093b..ae8fac34f7a5 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_encoders.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_encoders.c @@ -46,7 +46,7 @@ amdgpu_link_encoder_connector(struct drm_device *dev) list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { amdgpu_encoder = to_amdgpu_encoder(encoder); if (amdgpu_encoder->devices & amdgpu_connector->devices) { - drm_mode_connector_attach_encoder(connector, encoder); + drm_connector_attach_encoder(connector, encoder); if (amdgpu_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) { amdgpu_atombios_encoder_init_backlight(amdgpu_encoder, connector); adev->mode_info.bl_encoder = amdgpu_encoder; diff --git a/drivers/gpu/drm/amd/amdgpu/dce_virtual.c b/drivers/gpu/drm/amd/amdgpu/dce_virtual.c index aea8b89765c6..15257634a53a 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_virtual.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_virtual.c @@ -270,25 +270,18 @@ static int dce_virtual_early_init(void *handle) static struct drm_encoder * dce_virtual_encoder(struct drm_connector *connector) { - int enc_id = connector->encoder_ids[0]; struct drm_encoder *encoder; int i; - for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) { - if (connector->encoder_ids[i] == 0) - break; - - encoder = drm_encoder_find(connector->dev, NULL, connector->encoder_ids[i]); - if (!encoder) - continue; - + drm_connector_for_each_possible_encoder(connector, encoder, i) { if (encoder->encoder_type == DRM_MODE_ENCODER_VIRTUAL) return encoder; } /* pick the first one */ - if (enc_id) - return drm_encoder_find(connector->dev, NULL, enc_id); + drm_connector_for_each_possible_encoder(connector, encoder, i) + return encoder; + return NULL; } @@ -635,7 +628,7 @@ static int dce_virtual_connector_encoder_init(struct amdgpu_device *adev, drm_connector_register(connector); /* link them */ - drm_mode_connector_attach_encoder(connector, encoder); + drm_connector_attach_encoder(connector, encoder); return 0; } diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index 2064cb21d0ed..5fc13e71a3b5 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -903,14 +903,14 @@ amdgpu_dm_update_connector_after_detect(struct amdgpu_dm_connector *aconnector) (struct edid *) sink->dc_edid.raw_edid; - drm_mode_connector_update_edid_property(connector, + drm_connector_update_edid_property(connector, aconnector->edid); } amdgpu_dm_add_sink_to_freesync_module(connector, aconnector->edid); } else { amdgpu_dm_remove_sink_from_freesync_module(connector); - drm_mode_connector_update_edid_property(connector, NULL); + drm_connector_update_edid_property(connector, NULL); aconnector->num_modes = 0; aconnector->dc_sink = NULL; aconnector->edid = NULL; @@ -3676,7 +3676,7 @@ static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm, link, link_index); - drm_mode_connector_attach_encoder( + drm_connector_attach_encoder( &aconnector->base, &aencoder->base); drm_connector_register(&aconnector->base); diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c index 8348d4d46b30..9a300732ba37 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c @@ -251,7 +251,7 @@ static int dm_dp_mst_get_modes(struct drm_connector *connector) edid = drm_dp_mst_get_edid(connector, &aconnector->mst_port->mst_mgr, aconnector->port); if (!edid) { - drm_mode_connector_update_edid_property( + drm_connector_update_edid_property( &aconnector->base, NULL); return ret; @@ -279,7 +279,7 @@ static int dm_dp_mst_get_modes(struct drm_connector *connector) connector, aconnector->edid); } - drm_mode_connector_update_edid_property( + drm_connector_update_edid_property( &aconnector->base, aconnector->edid); ret = drm_add_edid_modes(connector, aconnector->edid); @@ -363,7 +363,7 @@ dm_dp_add_mst_connector(struct drm_dp_mst_topology_mgr *mgr, aconnector, connector->base.id, aconnector->mst_port); aconnector->port = port; - drm_mode_connector_set_path_property(connector, pathprop); + drm_connector_set_path_property(connector, pathprop); drm_connector_list_iter_end(&conn_iter); aconnector->mst_connected = true; @@ -411,7 +411,7 @@ dm_dp_add_mst_connector(struct drm_dp_mst_topology_mgr *mgr, dev->mode_config.tile_property, 0); - drm_mode_connector_set_path_property(connector, pathprop); + drm_connector_set_path_property(connector, pathprop); /* * Initialize connector state before adding the connectror to drm and @@ -459,7 +459,7 @@ static void dm_dp_mst_hotplug(struct drm_dp_mst_topology_mgr *mgr) static void dm_dp_mst_link_status_reset(struct drm_connector *connector) { mutex_lock(&connector->dev->mode_config.mutex); - drm_mode_connector_set_link_status_property(connector, DRM_MODE_LINK_STATUS_BAD); + drm_connector_set_link_status_property(connector, DRM_MODE_LINK_STATUS_BAD); mutex_unlock(&connector->dev->mode_config.mutex); } diff --git a/drivers/gpu/drm/arc/arcpgu_sim.c b/drivers/gpu/drm/arc/arcpgu_sim.c index b8f6f9a5dfbe..68629e614990 100644 --- a/drivers/gpu/drm/arc/arcpgu_sim.c +++ b/drivers/gpu/drm/arc/arcpgu_sim.c @@ -99,7 +99,7 @@ int arcpgu_drm_sim_init(struct drm_device *drm, struct device_node *np) goto error_encoder_cleanup; } - ret = drm_mode_connector_attach_encoder(connector, encoder); + ret = drm_connector_attach_encoder(connector, encoder); if (ret < 0) { dev_err(drm->dev, "could not attach connector to encoder\n"); drm_connector_unregister(connector); diff --git a/drivers/gpu/drm/ast/ast_mode.c b/drivers/gpu/drm/ast/ast_mode.c index 036dff8a1f33..5e77d456d9bb 100644 --- a/drivers/gpu/drm/ast/ast_mode.c +++ b/drivers/gpu/drm/ast/ast_mode.c @@ -790,12 +790,12 @@ static int ast_get_modes(struct drm_connector *connector) if (!flags) edid = drm_get_edid(connector, &ast_connector->i2c->adapter); if (edid) { - drm_mode_connector_update_edid_property(&ast_connector->base, edid); + drm_connector_update_edid_property(&ast_connector->base, edid); ret = drm_add_edid_modes(connector, edid); kfree(edid); return ret; } else - drm_mode_connector_update_edid_property(&ast_connector->base, NULL); + drm_connector_update_edid_property(&ast_connector->base, NULL); return 0; } @@ -900,7 +900,7 @@ static int ast_connector_init(struct drm_device *dev) connector->polled = DRM_CONNECTOR_POLL_CONNECT; encoder = list_first_entry(&dev->mode_config.encoder_list, struct drm_encoder, head); - drm_mode_connector_attach_encoder(connector, encoder); + drm_connector_attach_encoder(connector, encoder); ast_connector->i2c = ast_i2c_create(dev); if (!ast_connector->i2c) diff --git a/drivers/gpu/drm/bochs/bochs_kms.c b/drivers/gpu/drm/bochs/bochs_kms.c index 233980a78591..ca5a9afdd5cf 100644 --- a/drivers/gpu/drm/bochs/bochs_kms.c +++ b/drivers/gpu/drm/bochs/bochs_kms.c @@ -259,7 +259,7 @@ int bochs_kms_init(struct bochs_device *bochs) bochs_crtc_init(bochs->dev); bochs_encoder_init(bochs->dev); bochs_connector_init(bochs->dev); - drm_mode_connector_attach_encoder(&bochs->connector, + drm_connector_attach_encoder(&bochs->connector, &bochs->encoder); return 0; diff --git a/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c b/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c index 73021b388e12..6437b878724a 100644 --- a/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c +++ b/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c @@ -601,7 +601,7 @@ static int adv7511_get_modes(struct adv7511 *adv7511, __adv7511_power_off(adv7511); - drm_mode_connector_update_edid_property(connector, edid); + drm_connector_update_edid_property(connector, edid); count = drm_add_edid_modes(connector, edid); adv7511_set_config_csc(adv7511, connector, adv7511->rgb, @@ -860,7 +860,7 @@ static int adv7511_bridge_attach(struct drm_bridge *bridge) } drm_connector_helper_add(&adv->connector, &adv7511_connector_helper_funcs); - drm_mode_connector_attach_encoder(&adv->connector, bridge->encoder); + drm_connector_attach_encoder(&adv->connector, bridge->encoder); if (adv->type == ADV7533) ret = adv7533_attach_dsi(adv); diff --git a/drivers/gpu/drm/bridge/analogix-anx78xx.c b/drivers/gpu/drm/bridge/analogix-anx78xx.c index b49043866be6..f8433c93f463 100644 --- a/drivers/gpu/drm/bridge/analogix-anx78xx.c +++ b/drivers/gpu/drm/bridge/analogix-anx78xx.c @@ -969,8 +969,8 @@ static int anx78xx_get_modes(struct drm_connector *connector) goto unlock; } - err = drm_mode_connector_update_edid_property(connector, - anx78xx->edid); + err = drm_connector_update_edid_property(connector, + anx78xx->edid); if (err) { DRM_ERROR("Failed to update EDID property: %d\n", err); goto unlock; @@ -1048,8 +1048,8 @@ static int anx78xx_bridge_attach(struct drm_bridge *bridge) anx78xx->connector.polled = DRM_CONNECTOR_POLL_HPD; - err = drm_mode_connector_attach_encoder(&anx78xx->connector, - bridge->encoder); + err = drm_connector_attach_encoder(&anx78xx->connector, + bridge->encoder); if (err) { DRM_ERROR("Failed to link up connector to encoder: %d\n", err); return err; diff --git a/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c b/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c index 2bcbfadb6ac5..d68986cea132 100644 --- a/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c +++ b/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c @@ -1119,8 +1119,8 @@ static int analogix_dp_get_modes(struct drm_connector *connector) edid = drm_get_edid(connector, &dp->aux.ddc); pm_runtime_put(dp->dev); if (edid) { - drm_mode_connector_update_edid_property(&dp->connector, - edid); + drm_connector_update_edid_property(&dp->connector, + edid); num_modes += drm_add_edid_modes(&dp->connector, edid); kfree(edid); } @@ -1210,7 +1210,7 @@ static int analogix_dp_bridge_attach(struct drm_bridge *bridge) drm_connector_helper_add(connector, &analogix_dp_connector_helper_funcs); - drm_mode_connector_attach_encoder(connector, encoder); + drm_connector_attach_encoder(connector, encoder); } /* diff --git a/drivers/gpu/drm/bridge/cdns-dsi.c b/drivers/gpu/drm/bridge/cdns-dsi.c index f2d43f24acfb..ce9496d13986 100644 --- a/drivers/gpu/drm/bridge/cdns-dsi.c +++ b/drivers/gpu/drm/bridge/cdns-dsi.c @@ -1152,7 +1152,7 @@ static int cdns_dsi_attach(struct mipi_dsi_host *host, np = of_node_get(dev->dev.of_node); panel = of_drm_find_panel(np); - if (panel) { + if (!IS_ERR(panel)) { bridge = drm_panel_bridge_add(panel, DRM_MODE_CONNECTOR_DSI); } else { bridge = of_drm_find_bridge(dev->dev.of_node); diff --git a/drivers/gpu/drm/bridge/dumb-vga-dac.c b/drivers/gpu/drm/bridge/dumb-vga-dac.c index 9837c8d69e69..9b706789a341 100644 --- a/drivers/gpu/drm/bridge/dumb-vga-dac.c +++ b/drivers/gpu/drm/bridge/dumb-vga-dac.c @@ -55,7 +55,7 @@ static int dumb_vga_get_modes(struct drm_connector *connector) goto fallback; } - drm_mode_connector_update_edid_property(connector, edid); + drm_connector_update_edid_property(connector, edid); ret = drm_add_edid_modes(connector, edid); kfree(edid); return ret; @@ -122,7 +122,7 @@ static int dumb_vga_attach(struct drm_bridge *bridge) return ret; } - drm_mode_connector_attach_encoder(&vga->connector, + drm_connector_attach_encoder(&vga->connector, bridge->encoder); return 0; diff --git a/drivers/gpu/drm/bridge/lvds-encoder.c b/drivers/gpu/drm/bridge/lvds-encoder.c index 75b0d3f6e4de..f56c92f7af7c 100644 --- a/drivers/gpu/drm/bridge/lvds-encoder.c +++ b/drivers/gpu/drm/bridge/lvds-encoder.c @@ -68,9 +68,9 @@ static int lvds_encoder_probe(struct platform_device *pdev) panel = of_drm_find_panel(panel_node); of_node_put(panel_node); - if (!panel) { + if (IS_ERR(panel)) { dev_dbg(&pdev->dev, "panel not found, deferring probe\n"); - return -EPROBE_DEFER; + return PTR_ERR(panel); } lvds_encoder->panel_bridge = diff --git a/drivers/gpu/drm/bridge/megachips-stdpxxxx-ge-b850v3-fw.c b/drivers/gpu/drm/bridge/megachips-stdpxxxx-ge-b850v3-fw.c index 7ccadba7c98c..2136c97aeb8e 100644 --- a/drivers/gpu/drm/bridge/megachips-stdpxxxx-ge-b850v3-fw.c +++ b/drivers/gpu/drm/bridge/megachips-stdpxxxx-ge-b850v3-fw.c @@ -152,7 +152,7 @@ static int ge_b850v3_lvds_get_modes(struct drm_connector *connector) ge_b850v3_lvds_ptr->edid = (struct edid *)stdp2690_get_edid(client); if (ge_b850v3_lvds_ptr->edid) { - drm_mode_connector_update_edid_property(connector, + drm_connector_update_edid_property(connector, ge_b850v3_lvds_ptr->edid); num_modes = drm_add_edid_modes(connector, ge_b850v3_lvds_ptr->edid); @@ -241,7 +241,7 @@ static int ge_b850v3_lvds_attach(struct drm_bridge *bridge) return ret; } - ret = drm_mode_connector_attach_encoder(connector, bridge->encoder); + ret = drm_connector_attach_encoder(connector, bridge->encoder); if (ret) return ret; diff --git a/drivers/gpu/drm/bridge/nxp-ptn3460.c b/drivers/gpu/drm/bridge/nxp-ptn3460.c index d64a3283822a..a3e817abace1 100644 --- a/drivers/gpu/drm/bridge/nxp-ptn3460.c +++ b/drivers/gpu/drm/bridge/nxp-ptn3460.c @@ -222,7 +222,7 @@ static int ptn3460_get_modes(struct drm_connector *connector) } ptn_bridge->edid = (struct edid *)edid; - drm_mode_connector_update_edid_property(connector, ptn_bridge->edid); + drm_connector_update_edid_property(connector, ptn_bridge->edid); num_modes = drm_add_edid_modes(connector, ptn_bridge->edid); @@ -265,7 +265,7 @@ static int ptn3460_bridge_attach(struct drm_bridge *bridge) drm_connector_helper_add(&ptn_bridge->connector, &ptn3460_connector_helper_funcs); drm_connector_register(&ptn_bridge->connector); - drm_mode_connector_attach_encoder(&ptn_bridge->connector, + drm_connector_attach_encoder(&ptn_bridge->connector, bridge->encoder); if (ptn_bridge->panel) diff --git a/drivers/gpu/drm/bridge/panel.c b/drivers/gpu/drm/bridge/panel.c index 6d99d4a3beb3..7cbaba213ef6 100644 --- a/drivers/gpu/drm/bridge/panel.c +++ b/drivers/gpu/drm/bridge/panel.c @@ -79,7 +79,7 @@ static int panel_bridge_attach(struct drm_bridge *bridge) return ret; } - drm_mode_connector_attach_encoder(&panel_bridge->connector, + drm_connector_attach_encoder(&panel_bridge->connector, bridge->encoder); ret = drm_panel_attach(panel_bridge->panel, &panel_bridge->connector); diff --git a/drivers/gpu/drm/bridge/parade-ps8622.c b/drivers/gpu/drm/bridge/parade-ps8622.c index 81198f5e9afa..7334d1b62b71 100644 --- a/drivers/gpu/drm/bridge/parade-ps8622.c +++ b/drivers/gpu/drm/bridge/parade-ps8622.c @@ -503,7 +503,7 @@ static int ps8622_attach(struct drm_bridge *bridge) drm_connector_helper_add(&ps8622->connector, &ps8622_connector_helper_funcs); drm_connector_register(&ps8622->connector); - drm_mode_connector_attach_encoder(&ps8622->connector, + drm_connector_attach_encoder(&ps8622->connector, bridge->encoder); if (ps8622->panel) diff --git a/drivers/gpu/drm/bridge/sii902x.c b/drivers/gpu/drm/bridge/sii902x.c index 60373d7eb220..e59a13542333 100644 --- a/drivers/gpu/drm/bridge/sii902x.c +++ b/drivers/gpu/drm/bridge/sii902x.c @@ -170,7 +170,7 @@ static int sii902x_get_modes(struct drm_connector *connector) return ret; edid = drm_get_edid(connector, sii902x->i2c->adapter); - drm_mode_connector_update_edid_property(connector, edid); + drm_connector_update_edid_property(connector, edid); if (edid) { num = drm_add_edid_modes(connector, edid); kfree(edid); @@ -324,7 +324,7 @@ static int sii902x_bridge_attach(struct drm_bridge *bridge) else sii902x->connector.polled = DRM_CONNECTOR_POLL_CONNECT; - drm_mode_connector_attach_encoder(&sii902x->connector, bridge->encoder); + drm_connector_attach_encoder(&sii902x->connector, bridge->encoder); return 0; } diff --git a/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c b/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c index 3c136f2b954f..5971976284bf 100644 --- a/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c +++ b/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c @@ -1922,7 +1922,7 @@ static int dw_hdmi_connector_get_modes(struct drm_connector *connector) hdmi->sink_is_hdmi = drm_detect_hdmi_monitor(edid); hdmi->sink_has_audio = drm_detect_monitor_audio(edid); - drm_mode_connector_update_edid_property(connector, edid); + drm_connector_update_edid_property(connector, edid); cec_notifier_set_phys_addr_from_edid(hdmi->cec_notifier, edid); ret = drm_add_edid_modes(connector, edid); kfree(edid); @@ -1974,7 +1974,7 @@ static int dw_hdmi_bridge_attach(struct drm_bridge *bridge) drm_connector_init(bridge->dev, connector, &dw_hdmi_connector_funcs, DRM_MODE_CONNECTOR_HDMIA); - drm_mode_connector_attach_encoder(connector, encoder); + drm_connector_attach_encoder(connector, encoder); return 0; } diff --git a/drivers/gpu/drm/bridge/tc358767.c b/drivers/gpu/drm/bridge/tc358767.c index 0fd9cf27542c..8e28e738cb52 100644 --- a/drivers/gpu/drm/bridge/tc358767.c +++ b/drivers/gpu/drm/bridge/tc358767.c @@ -1140,7 +1140,7 @@ static int tc_connector_get_modes(struct drm_connector *connector) if (!edid) return 0; - drm_mode_connector_update_edid_property(connector, edid); + drm_connector_update_edid_property(connector, edid); count = drm_add_edid_modes(connector, edid); return count; @@ -1195,7 +1195,7 @@ static int tc_bridge_attach(struct drm_bridge *bridge) drm_display_info_set_bus_formats(&tc->connector.display_info, &bus_format, 1); - drm_mode_connector_attach_encoder(&tc->connector, tc->bridge.encoder); + drm_connector_attach_encoder(&tc->connector, tc->bridge.encoder); return 0; } diff --git a/drivers/gpu/drm/bridge/ti-tfp410.c b/drivers/gpu/drm/bridge/ti-tfp410.c index acb857030951..c3e32138c6bb 100644 --- a/drivers/gpu/drm/bridge/ti-tfp410.c +++ b/drivers/gpu/drm/bridge/ti-tfp410.c @@ -62,7 +62,7 @@ static int tfp410_get_modes(struct drm_connector *connector) goto fallback; } - drm_mode_connector_update_edid_property(connector, edid); + drm_connector_update_edid_property(connector, edid); return drm_add_edid_modes(connector, edid); fallback: @@ -132,7 +132,7 @@ static int tfp410_attach(struct drm_bridge *bridge) return ret; } - drm_mode_connector_attach_encoder(&dvi->connector, + drm_connector_attach_encoder(&dvi->connector, bridge->encoder); return 0; diff --git a/drivers/gpu/drm/cirrus/cirrus_mode.c b/drivers/gpu/drm/cirrus/cirrus_mode.c index b529f8c8e2a6..336bfda40125 100644 --- a/drivers/gpu/drm/cirrus/cirrus_mode.c +++ b/drivers/gpu/drm/cirrus/cirrus_mode.c @@ -530,7 +530,7 @@ int cirrus_modeset_init(struct cirrus_device *cdev) return -1; } - drm_mode_connector_attach_encoder(connector, encoder); + drm_connector_attach_encoder(connector, encoder); ret = cirrus_fbdev_init(cdev); if (ret) { diff --git a/drivers/gpu/drm/drm_atomic.c b/drivers/gpu/drm/drm_atomic.c index 4215be9a9fc5..3eb061e11e2e 100644 --- a/drivers/gpu/drm/drm_atomic.c +++ b/drivers/gpu/drm/drm_atomic.c @@ -1111,6 +1111,7 @@ static void drm_atomic_plane_print_state(struct drm_printer *p, drm_printf(p, "\tcrtc-pos=" DRM_RECT_FMT "\n", DRM_RECT_ARG(&dest)); drm_printf(p, "\tsrc-pos=" DRM_RECT_FP_FMT "\n", DRM_RECT_FP_ARG(&src)); drm_printf(p, "\trotation=%x\n", state->rotation); + drm_printf(p, "\tnormalized-zpos=%x\n", state->normalized_zpos); drm_printf(p, "\tcolor-encoding=%s\n", drm_get_color_encoding_name(state->color_encoding)); drm_printf(p, "\tcolor-range=%s\n", @@ -2427,6 +2428,7 @@ static int prepare_signaling(struct drm_device *dev, } for_each_new_connector_in_state(state, conn, conn_state, i) { + struct drm_writeback_connector *wb_conn; struct drm_writeback_job *job; struct drm_out_fence_state *f; struct dma_fence *fence; @@ -2450,7 +2452,8 @@ static int prepare_signaling(struct drm_device *dev, f[*num_fences].out_fence_ptr = fence_ptr; *fence_state = f; - fence = drm_writeback_get_out_fence((struct drm_writeback_connector *)conn); + wb_conn = drm_connector_to_writeback(conn); + fence = drm_writeback_get_out_fence(wb_conn); if (!fence) return -ENOMEM; diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c index 8008a7de2e10..866a2cc72ef6 100644 --- a/drivers/gpu/drm/drm_atomic_helper.c +++ b/drivers/gpu/drm/drm_atomic_helper.c @@ -645,7 +645,7 @@ drm_atomic_helper_check_modeset(struct drm_device *dev, if (ret) return ret; - connectors_mask += BIT(i); + connectors_mask |= BIT(i); } /* @@ -1184,10 +1184,12 @@ static void drm_atomic_helper_commit_writebacks(struct drm_device *dev, const struct drm_connector_helper_funcs *funcs; funcs = connector->helper_private; + if (!funcs->atomic_commit) + continue; if (new_conn_state->writeback_job && new_conn_state->writeback_job->fb) { WARN_ON(connector->connector_type != DRM_MODE_CONNECTOR_WRITEBACK); - funcs->atomic_commit(connector, new_conn_state->writeback_job); + funcs->atomic_commit(connector, new_conn_state); } } } @@ -1448,6 +1450,8 @@ void drm_atomic_helper_commit_tail(struct drm_atomic_state *old_state) drm_atomic_helper_commit_modeset_enables(dev, old_state); + drm_atomic_helper_fake_vblank(old_state); + drm_atomic_helper_commit_hw_done(old_state); drm_atomic_helper_wait_for_vblanks(dev, old_state); @@ -1477,6 +1481,8 @@ void drm_atomic_helper_commit_tail_rpm(struct drm_atomic_state *old_state) drm_atomic_helper_commit_planes(dev, old_state, DRM_PLANE_COMMIT_ACTIVE_ONLY); + drm_atomic_helper_fake_vblank(old_state); + drm_atomic_helper_commit_hw_done(old_state); drm_atomic_helper_wait_for_vblanks(dev, old_state); @@ -2052,6 +2058,45 @@ void drm_atomic_helper_wait_for_dependencies(struct drm_atomic_state *old_state) EXPORT_SYMBOL(drm_atomic_helper_wait_for_dependencies); /** + * drm_atomic_helper_fake_vblank - fake VBLANK events if needed + * @old_state: atomic state object with old state structures + * + * This function walks all CRTCs and fake VBLANK events on those with + * &drm_crtc_state.no_vblank set to true and &drm_crtc_state.event != NULL. + * The primary use of this function is writeback connectors working in oneshot + * mode and faking VBLANK events. In this case they only fake the VBLANK event + * when a job is queued, and any change to the pipeline that does not touch the + * connector is leading to timeouts when calling + * drm_atomic_helper_wait_for_vblanks() or + * drm_atomic_helper_wait_for_flip_done(). + * + * This is part of the atomic helper support for nonblocking commits, see + * drm_atomic_helper_setup_commit() for an overview. + */ +void drm_atomic_helper_fake_vblank(struct drm_atomic_state *old_state) +{ + struct drm_crtc_state *new_crtc_state; + struct drm_crtc *crtc; + int i; + + for_each_new_crtc_in_state(old_state, crtc, new_crtc_state, i) { + unsigned long flags; + + if (!new_crtc_state->no_vblank) + continue; + + spin_lock_irqsave(&old_state->dev->event_lock, flags); + if (new_crtc_state->event) { + drm_crtc_send_vblank_event(crtc, + new_crtc_state->event); + new_crtc_state->event = NULL; + } + spin_unlock_irqrestore(&old_state->dev->event_lock, flags); + } +} +EXPORT_SYMBOL(drm_atomic_helper_fake_vblank); + +/** * drm_atomic_helper_commit_hw_done - setup possible nonblocking commit * @old_state: atomic state object with old state structures * @@ -2822,7 +2867,7 @@ static int update_output_state(struct drm_atomic_state *state, * resets the "link-status" property to GOOD, to force any link * re-training. The SETCRTC ioctl does not define whether an update does * need a full modeset or just a plane update, hence we're allowed to do - * that. See also drm_mode_connector_set_link_status_property(). + * that. See also drm_connector_set_link_status_property(). * * Returns: * Returns 0 on success, negative errno numbers on failure. diff --git a/drivers/gpu/drm/drm_client.c b/drivers/gpu/drm/drm_client.c new file mode 100644 index 000000000000..baff50a4c234 --- /dev/null +++ b/drivers/gpu/drm/drm_client.c @@ -0,0 +1,406 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright 2018 Noralf Trønnes + */ + +#include <linux/list.h> +#include <linux/module.h> +#include <linux/mutex.h> +#include <linux/seq_file.h> +#include <linux/slab.h> + +#include <drm/drm_client.h> +#include <drm/drm_debugfs.h> +#include <drm/drm_device.h> +#include <drm/drm_drv.h> +#include <drm/drm_file.h> +#include <drm/drm_fourcc.h> +#include <drm/drm_gem.h> +#include <drm/drm_mode.h> +#include <drm/drm_print.h> +#include <drm/drmP.h> + +#include "drm_crtc_internal.h" +#include "drm_internal.h" + +/** + * DOC: overview + * + * This library provides support for clients running in the kernel like fbdev and bootsplash. + * Currently it's only partially implemented, just enough to support fbdev. + * + * GEM drivers which provide a GEM based dumb buffer with a virtual address are supported. + */ + +static int drm_client_open(struct drm_client_dev *client) +{ + struct drm_device *dev = client->dev; + struct drm_file *file; + + file = drm_file_alloc(dev->primary); + if (IS_ERR(file)) + return PTR_ERR(file); + + mutex_lock(&dev->filelist_mutex); + list_add(&file->lhead, &dev->filelist_internal); + mutex_unlock(&dev->filelist_mutex); + + client->file = file; + + return 0; +} + +static void drm_client_close(struct drm_client_dev *client) +{ + struct drm_device *dev = client->dev; + + mutex_lock(&dev->filelist_mutex); + list_del(&client->file->lhead); + mutex_unlock(&dev->filelist_mutex); + + drm_file_free(client->file); +} +EXPORT_SYMBOL(drm_client_close); + +/** + * drm_client_new - Create a DRM client + * @dev: DRM device + * @client: DRM client + * @name: Client name + * @funcs: DRM client functions (optional) + * + * The caller needs to hold a reference on @dev before calling this function. + * The client is freed when the &drm_device is unregistered. See drm_client_release(). + * + * Returns: + * Zero on success or negative error code on failure. + */ +int drm_client_new(struct drm_device *dev, struct drm_client_dev *client, + const char *name, const struct drm_client_funcs *funcs) +{ + int ret; + + if (!drm_core_check_feature(dev, DRIVER_MODESET) || + !dev->driver->dumb_create || !dev->driver->gem_prime_vmap) + return -ENOTSUPP; + + if (funcs && !try_module_get(funcs->owner)) + return -ENODEV; + + client->dev = dev; + client->name = name; + client->funcs = funcs; + + ret = drm_client_open(client); + if (ret) + goto err_put_module; + + mutex_lock(&dev->clientlist_mutex); + list_add(&client->list, &dev->clientlist); + mutex_unlock(&dev->clientlist_mutex); + + drm_dev_get(dev); + + return 0; + +err_put_module: + if (funcs) + module_put(funcs->owner); + + return ret; +} +EXPORT_SYMBOL(drm_client_new); + +/** + * drm_client_release - Release DRM client resources + * @client: DRM client + * + * Releases resources by closing the &drm_file that was opened by drm_client_new(). + * It is called automatically if the &drm_client_funcs.unregister callback is _not_ set. + * + * This function should only be called from the unregister callback. An exception + * is fbdev which cannot free the buffer if userspace has open file descriptors. + * + * Note: + * Clients cannot initiate a release by themselves. This is done to keep the code simple. + * The driver has to be unloaded before the client can be unloaded. + */ +void drm_client_release(struct drm_client_dev *client) +{ + struct drm_device *dev = client->dev; + + DRM_DEV_DEBUG_KMS(dev->dev, "%s\n", client->name); + + drm_client_close(client); + drm_dev_put(dev); + if (client->funcs) + module_put(client->funcs->owner); +} +EXPORT_SYMBOL(drm_client_release); + +void drm_client_dev_unregister(struct drm_device *dev) +{ + struct drm_client_dev *client, *tmp; + + if (!drm_core_check_feature(dev, DRIVER_MODESET)) + return; + + mutex_lock(&dev->clientlist_mutex); + list_for_each_entry_safe(client, tmp, &dev->clientlist, list) { + list_del(&client->list); + if (client->funcs && client->funcs->unregister) { + client->funcs->unregister(client); + } else { + drm_client_release(client); + kfree(client); + } + } + mutex_unlock(&dev->clientlist_mutex); +} + +/** + * drm_client_dev_hotplug - Send hotplug event to clients + * @dev: DRM device + * + * This function calls the &drm_client_funcs.hotplug callback on the attached clients. + * + * drm_kms_helper_hotplug_event() calls this function, so drivers that use it + * don't need to call this function themselves. + */ +void drm_client_dev_hotplug(struct drm_device *dev) +{ + struct drm_client_dev *client; + int ret; + + if (!drm_core_check_feature(dev, DRIVER_MODESET)) + return; + + mutex_lock(&dev->clientlist_mutex); + list_for_each_entry(client, &dev->clientlist, list) { + if (!client->funcs || !client->funcs->hotplug) + continue; + + ret = client->funcs->hotplug(client); + DRM_DEV_DEBUG_KMS(dev->dev, "%s: ret=%d\n", client->name, ret); + } + mutex_unlock(&dev->clientlist_mutex); +} +EXPORT_SYMBOL(drm_client_dev_hotplug); + +void drm_client_dev_restore(struct drm_device *dev) +{ + struct drm_client_dev *client; + int ret; + + if (!drm_core_check_feature(dev, DRIVER_MODESET)) + return; + + mutex_lock(&dev->clientlist_mutex); + list_for_each_entry(client, &dev->clientlist, list) { + if (!client->funcs || !client->funcs->restore) + continue; + + ret = client->funcs->restore(client); + DRM_DEV_DEBUG_KMS(dev->dev, "%s: ret=%d\n", client->name, ret); + if (!ret) /* The first one to return zero gets the privilege to restore */ + break; + } + mutex_unlock(&dev->clientlist_mutex); +} + +static void drm_client_buffer_delete(struct drm_client_buffer *buffer) +{ + struct drm_device *dev = buffer->client->dev; + + if (buffer->vaddr && dev->driver->gem_prime_vunmap) + dev->driver->gem_prime_vunmap(buffer->gem, buffer->vaddr); + + if (buffer->gem) + drm_gem_object_put_unlocked(buffer->gem); + + if (buffer->handle) + drm_mode_destroy_dumb(dev, buffer->handle, buffer->client->file); + + kfree(buffer); +} + +static struct drm_client_buffer * +drm_client_buffer_create(struct drm_client_dev *client, u32 width, u32 height, u32 format) +{ + struct drm_mode_create_dumb dumb_args = { }; + struct drm_device *dev = client->dev; + struct drm_client_buffer *buffer; + struct drm_gem_object *obj; + void *vaddr; + int ret; + + buffer = kzalloc(sizeof(*buffer), GFP_KERNEL); + if (!buffer) + return ERR_PTR(-ENOMEM); + + buffer->client = client; + + dumb_args.width = width; + dumb_args.height = height; + dumb_args.bpp = drm_format_plane_cpp(format, 0) * 8; + ret = drm_mode_create_dumb(dev, &dumb_args, client->file); + if (ret) + goto err_delete; + + buffer->handle = dumb_args.handle; + buffer->pitch = dumb_args.pitch; + + obj = drm_gem_object_lookup(client->file, dumb_args.handle); + if (!obj) { + ret = -ENOENT; + goto err_delete; + } + + buffer->gem = obj; + + /* + * FIXME: The dependency on GEM here isn't required, we could + * convert the driver handle to a dma-buf instead and use the + * backend-agnostic dma-buf vmap support instead. This would + * require that the handle2fd prime ioctl is reworked to pull the + * fd_install step out of the driver backend hooks, to make that + * final step optional for internal users. + */ + vaddr = dev->driver->gem_prime_vmap(obj); + if (!vaddr) { + ret = -ENOMEM; + goto err_delete; + } + + buffer->vaddr = vaddr; + + return buffer; + +err_delete: + drm_client_buffer_delete(buffer); + + return ERR_PTR(ret); +} + +static void drm_client_buffer_rmfb(struct drm_client_buffer *buffer) +{ + int ret; + + if (!buffer->fb) + return; + + ret = drm_mode_rmfb(buffer->client->dev, buffer->fb->base.id, buffer->client->file); + if (ret) + DRM_DEV_ERROR(buffer->client->dev->dev, + "Error removing FB:%u (%d)\n", buffer->fb->base.id, ret); + + buffer->fb = NULL; +} + +static int drm_client_buffer_addfb(struct drm_client_buffer *buffer, + u32 width, u32 height, u32 format) +{ + struct drm_client_dev *client = buffer->client; + struct drm_mode_fb_cmd fb_req = { }; + const struct drm_format_info *info; + int ret; + + info = drm_format_info(format); + fb_req.bpp = info->cpp[0] * 8; + fb_req.depth = info->depth; + fb_req.width = width; + fb_req.height = height; + fb_req.handle = buffer->handle; + fb_req.pitch = buffer->pitch; + + ret = drm_mode_addfb(client->dev, &fb_req, client->file); + if (ret) + return ret; + + buffer->fb = drm_framebuffer_lookup(client->dev, buffer->client->file, fb_req.fb_id); + if (WARN_ON(!buffer->fb)) + return -ENOENT; + + /* drop the reference we picked up in framebuffer lookup */ + drm_framebuffer_put(buffer->fb); + + strscpy(buffer->fb->comm, client->name, TASK_COMM_LEN); + + return 0; +} + +/** + * drm_client_framebuffer_create - Create a client framebuffer + * @client: DRM client + * @width: Framebuffer width + * @height: Framebuffer height + * @format: Buffer format + * + * This function creates a &drm_client_buffer which consists of a + * &drm_framebuffer backed by a dumb buffer. + * Call drm_client_framebuffer_delete() to free the buffer. + * + * Returns: + * Pointer to a client buffer or an error pointer on failure. + */ +struct drm_client_buffer * +drm_client_framebuffer_create(struct drm_client_dev *client, u32 width, u32 height, u32 format) +{ + struct drm_client_buffer *buffer; + int ret; + + buffer = drm_client_buffer_create(client, width, height, format); + if (IS_ERR(buffer)) + return buffer; + + ret = drm_client_buffer_addfb(buffer, width, height, format); + if (ret) { + drm_client_buffer_delete(buffer); + return ERR_PTR(ret); + } + + return buffer; +} +EXPORT_SYMBOL(drm_client_framebuffer_create); + +/** + * drm_client_framebuffer_delete - Delete a client framebuffer + * @buffer: DRM client buffer (can be NULL) + */ +void drm_client_framebuffer_delete(struct drm_client_buffer *buffer) +{ + if (!buffer) + return; + + drm_client_buffer_rmfb(buffer); + drm_client_buffer_delete(buffer); +} +EXPORT_SYMBOL(drm_client_framebuffer_delete); + +#ifdef CONFIG_DEBUG_FS +static int drm_client_debugfs_internal_clients(struct seq_file *m, void *data) +{ + struct drm_info_node *node = m->private; + struct drm_device *dev = node->minor->dev; + struct drm_printer p = drm_seq_file_printer(m); + struct drm_client_dev *client; + + mutex_lock(&dev->clientlist_mutex); + list_for_each_entry(client, &dev->clientlist, list) + drm_printf(&p, "%s\n", client->name); + mutex_unlock(&dev->clientlist_mutex); + + return 0; +} + +static const struct drm_info_list drm_client_debugfs_list[] = { + { "internal_clients", drm_client_debugfs_internal_clients, 0 }, +}; + +int drm_client_debugfs_init(struct drm_minor *minor) +{ + return drm_debugfs_create_files(drm_client_debugfs_list, + ARRAY_SIZE(drm_client_debugfs_list), + minor->debugfs_root, minor); +} +#endif diff --git a/drivers/gpu/drm/drm_connector.c b/drivers/gpu/drm/drm_connector.c index b09b3a3e4024..6011d769d50b 100644 --- a/drivers/gpu/drm/drm_connector.c +++ b/drivers/gpu/drm/drm_connector.c @@ -48,7 +48,7 @@ * * Connectors must be attached to an encoder to be used. For devices that map * connectors to encoders 1:1, the connector should be attached at - * initialization time with a call to drm_mode_connector_attach_encoder(). The + * initialization time with a call to drm_connector_attach_encoder(). The * driver must also set the &drm_connector.encoder field to point to the * attached encoder. * @@ -291,7 +291,7 @@ out_put: EXPORT_SYMBOL(drm_connector_init); /** - * drm_mode_connector_attach_encoder - attach a connector to an encoder + * drm_connector_attach_encoder - attach a connector to an encoder * @connector: connector to attach * @encoder: encoder to attach @connector to * @@ -302,8 +302,8 @@ EXPORT_SYMBOL(drm_connector_init); * Returns: * Zero on success, negative errno on failure. */ -int drm_mode_connector_attach_encoder(struct drm_connector *connector, - struct drm_encoder *encoder) +int drm_connector_attach_encoder(struct drm_connector *connector, + struct drm_encoder *encoder) { int i; @@ -321,7 +321,7 @@ int drm_mode_connector_attach_encoder(struct drm_connector *connector, if (WARN_ON(connector->encoder)) return -EINVAL; - for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) { + for (i = 0; i < ARRAY_SIZE(connector->encoder_ids); i++) { if (connector->encoder_ids[i] == 0) { connector->encoder_ids[i] = encoder->base.id; return 0; @@ -329,7 +329,30 @@ int drm_mode_connector_attach_encoder(struct drm_connector *connector, } return -ENOMEM; } -EXPORT_SYMBOL(drm_mode_connector_attach_encoder); +EXPORT_SYMBOL(drm_connector_attach_encoder); + +/** + * drm_connector_has_possible_encoder - check if the connector and encoder are assosicated with each other + * @connector: the connector + * @encoder: the encoder + * + * Returns: + * True if @encoder is one of the possible encoders for @connector. + */ +bool drm_connector_has_possible_encoder(struct drm_connector *connector, + struct drm_encoder *encoder) +{ + struct drm_encoder *enc; + int i; + + drm_connector_for_each_possible_encoder(connector, enc, i) { + if (enc == encoder) + return true; + } + + return false; +} +EXPORT_SYMBOL(drm_connector_has_possible_encoder); static void drm_mode_remove(struct drm_connector *connector, struct drm_display_mode *mode) @@ -583,7 +606,7 @@ __drm_connector_put_safe(struct drm_connector *conn) /** * drm_connector_list_iter_next - return next connector - * @iter: connectr_list iterator + * @iter: connector_list iterator * * Returns the next connector for @iter, or NULL when the list walk has * completed. @@ -791,7 +814,7 @@ DRM_ENUM_NAME_FN(drm_get_content_protection_name, drm_cp_enum_list) * Blob property which contains the current EDID read from the sink. This * is useful to parse sink identification information like vendor, model * and serial. Drivers should update this property by calling - * drm_mode_connector_update_edid_property(), usually after having parsed + * drm_connector_update_edid_property(), usually after having parsed * the EDID using drm_add_edid_modes(). Userspace cannot change this * property. * DPMS: @@ -829,7 +852,7 @@ DRM_ENUM_NAME_FN(drm_get_content_protection_name, drm_cp_enum_list) * PATH: * Connector path property to identify how this sink is physically * connected. Used by DP MST. This should be set by calling - * drm_mode_connector_set_path_property(), in the case of DP MST with the + * drm_connector_set_path_property(), in the case of DP MST with the * path property the MST manager created. Userspace cannot change this * property. * TILE: @@ -840,14 +863,14 @@ DRM_ENUM_NAME_FN(drm_get_content_protection_name, drm_cp_enum_list) * are not gen-locked. Note that for tiled panels which are genlocked, like * dual-link LVDS or dual-link DSI, the driver should try to not expose the * tiling and virtualize both &drm_crtc and &drm_plane if needed. Drivers - * should update this value using drm_mode_connector_set_tile_property(). + * should update this value using drm_connector_set_tile_property(). * Userspace cannot change this property. * link-status: * Connector link-status property to indicate the status of link. The * default value of link-status is "GOOD". If something fails during or * after modeset, the kernel driver may set this to "BAD" and issue a * hotplug uevent. Drivers should update this value using - * drm_mode_connector_set_link_status_property(). + * drm_connector_set_link_status_property(). * non_desktop: * Indicates the output should be ignored for purposes of displaying a * standard desktop environment or console. This is most likely because @@ -1402,7 +1425,7 @@ int drm_mode_create_suggested_offset_properties(struct drm_device *dev) EXPORT_SYMBOL(drm_mode_create_suggested_offset_properties); /** - * drm_mode_connector_set_path_property - set tile property on connector + * drm_connector_set_path_property - set tile property on connector * @connector: connector to set property on. * @path: path to use for property; must not be NULL. * @@ -1414,8 +1437,8 @@ EXPORT_SYMBOL(drm_mode_create_suggested_offset_properties); * Returns: * Zero on success, negative errno on failure. */ -int drm_mode_connector_set_path_property(struct drm_connector *connector, - const char *path) +int drm_connector_set_path_property(struct drm_connector *connector, + const char *path) { struct drm_device *dev = connector->dev; int ret; @@ -1428,10 +1451,10 @@ int drm_mode_connector_set_path_property(struct drm_connector *connector, dev->mode_config.path_property); return ret; } -EXPORT_SYMBOL(drm_mode_connector_set_path_property); +EXPORT_SYMBOL(drm_connector_set_path_property); /** - * drm_mode_connector_set_tile_property - set tile property on connector + * drm_connector_set_tile_property - set tile property on connector * @connector: connector to set property on. * * This looks up the tile information for a connector, and creates a @@ -1441,7 +1464,7 @@ EXPORT_SYMBOL(drm_mode_connector_set_path_property); * Returns: * Zero on success, errno on failure. */ -int drm_mode_connector_set_tile_property(struct drm_connector *connector) +int drm_connector_set_tile_property(struct drm_connector *connector) { struct drm_device *dev = connector->dev; char tile[256]; @@ -1471,10 +1494,10 @@ int drm_mode_connector_set_tile_property(struct drm_connector *connector) dev->mode_config.tile_property); return ret; } -EXPORT_SYMBOL(drm_mode_connector_set_tile_property); +EXPORT_SYMBOL(drm_connector_set_tile_property); /** - * drm_mode_connector_update_edid_property - update the edid property of a connector + * drm_connector_update_edid_property - update the edid property of a connector * @connector: drm connector * @edid: new value of the edid property * @@ -1484,8 +1507,8 @@ EXPORT_SYMBOL(drm_mode_connector_set_tile_property); * Returns: * Zero on success, negative errno on failure. */ -int drm_mode_connector_update_edid_property(struct drm_connector *connector, - const struct edid *edid) +int drm_connector_update_edid_property(struct drm_connector *connector, + const struct edid *edid) { struct drm_device *dev = connector->dev; size_t size = 0; @@ -1523,10 +1546,10 @@ int drm_mode_connector_update_edid_property(struct drm_connector *connector, dev->mode_config.edid_property); return ret; } -EXPORT_SYMBOL(drm_mode_connector_update_edid_property); +EXPORT_SYMBOL(drm_connector_update_edid_property); /** - * drm_mode_connector_set_link_status_property - Set link status property of a connector + * drm_connector_set_link_status_property - Set link status property of a connector * @connector: drm connector * @link_status: new value of link status property (0: Good, 1: Bad) * @@ -1544,8 +1567,8 @@ EXPORT_SYMBOL(drm_mode_connector_update_edid_property); * it is not limited to DP or link training. For example, if we implement * asynchronous setcrtc, this property can be used to report any failures in that. */ -void drm_mode_connector_set_link_status_property(struct drm_connector *connector, - uint64_t link_status) +void drm_connector_set_link_status_property(struct drm_connector *connector, + uint64_t link_status) { struct drm_device *dev = connector->dev; @@ -1553,7 +1576,7 @@ void drm_mode_connector_set_link_status_property(struct drm_connector *connector connector->state->link_status = link_status; drm_modeset_unlock(&dev->mode_config.connection_mutex); } -EXPORT_SYMBOL(drm_mode_connector_set_link_status_property); +EXPORT_SYMBOL(drm_connector_set_link_status_property); /** * drm_connector_init_panel_orientation_property - @@ -1606,7 +1629,7 @@ int drm_connector_init_panel_orientation_property( } EXPORT_SYMBOL(drm_connector_init_panel_orientation_property); -int drm_mode_connector_set_obj_prop(struct drm_mode_object *obj, +int drm_connector_set_obj_prop(struct drm_mode_object *obj, struct drm_property *property, uint64_t value) { @@ -1624,8 +1647,8 @@ int drm_mode_connector_set_obj_prop(struct drm_mode_object *obj, return ret; } -int drm_mode_connector_property_set_ioctl(struct drm_device *dev, - void *data, struct drm_file *file_priv) +int drm_connector_property_set_ioctl(struct drm_device *dev, + void *data, struct drm_file *file_priv) { struct drm_mode_connector_set_property *conn_set_prop = data; struct drm_mode_obj_set_property obj_set_prop = { @@ -1706,22 +1729,19 @@ int drm_mode_getconnector(struct drm_device *dev, void *data, if (!connector) return -ENOENT; - for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) - if (connector->encoder_ids[i] != 0) - encoders_count++; + drm_connector_for_each_possible_encoder(connector, encoder, i) + encoders_count++; if ((out_resp->count_encoders >= encoders_count) && encoders_count) { copied = 0; encoder_ptr = (uint32_t __user *)(unsigned long)(out_resp->encoders_ptr); - for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) { - if (connector->encoder_ids[i] != 0) { - if (put_user(connector->encoder_ids[i], - encoder_ptr + copied)) { - ret = -EFAULT; - goto out; - } - copied++; + + drm_connector_for_each_possible_encoder(connector, encoder, i) { + if (put_user(encoder->base.id, encoder_ptr + copied)) { + ret = -EFAULT; + goto out; } + copied++; } } out_resp->count_encoders = encoders_count; diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c index a6906c4ab880..bae43938c8f6 100644 --- a/drivers/gpu/drm/drm_crtc.c +++ b/drivers/gpu/drm/drm_crtc.c @@ -461,6 +461,8 @@ static int __drm_mode_set_config_internal(struct drm_mode_set *set, struct drm_crtc *tmp; int ret; + WARN_ON(drm_drv_uses_atomic_modeset(crtc->dev)); + /* * NOTE: ->set_config can also disable other crtcs (if we steal all * connectors from it), hence we need to refcount the fbs across all @@ -478,10 +480,8 @@ static int __drm_mode_set_config_internal(struct drm_mode_set *set, if (ret == 0) { struct drm_plane *plane = crtc->primary; - if (!plane->state) { - plane->crtc = fb ? crtc : NULL; - plane->fb = fb; - } + plane->crtc = fb ? crtc : NULL; + plane->fb = fb; } drm_for_each_crtc(tmp, crtc->dev) { @@ -496,6 +496,7 @@ static int __drm_mode_set_config_internal(struct drm_mode_set *set, return ret; } + /** * drm_mode_set_config_internal - helper to call &drm_mode_config_funcs.set_config * @set: modeset config to set @@ -740,7 +741,11 @@ retry: set.connectors = connector_set; set.num_connectors = crtc_req->count_connectors; set.fb = fb; - ret = __drm_mode_set_config_internal(&set, &ctx); + + if (drm_drv_uses_atomic_modeset(dev)) + ret = crtc->funcs->set_config(&set, &ctx); + else + ret = __drm_mode_set_config_internal(&set, &ctx); out: if (fb) diff --git a/drivers/gpu/drm/drm_crtc_internal.h b/drivers/gpu/drm/drm_crtc_internal.h index 235d40fce8b5..b61322763394 100644 --- a/drivers/gpu/drm/drm_crtc_internal.h +++ b/drivers/gpu/drm/drm_crtc_internal.h @@ -148,7 +148,7 @@ void drm_connector_ida_init(void); void drm_connector_ida_destroy(void); void drm_connector_unregister_all(struct drm_device *dev); int drm_connector_register_all(struct drm_device *dev); -int drm_mode_connector_set_obj_prop(struct drm_mode_object *obj, +int drm_connector_set_obj_prop(struct drm_mode_object *obj, struct drm_property *property, uint64_t value); int drm_connector_create_standard_properties(struct drm_device *dev); @@ -156,8 +156,8 @@ const char *drm_get_connector_force_name(enum drm_connector_force force); void drm_connector_free_work_fn(struct work_struct *work); /* IOCTL */ -int drm_mode_connector_property_set_ioctl(struct drm_device *dev, - void *data, struct drm_file *file_priv); +int drm_connector_property_set_ioctl(struct drm_device *dev, + void *data, struct drm_file *file_priv); int drm_mode_getconnector(struct drm_device *dev, void *data, struct drm_file *file_priv); diff --git a/drivers/gpu/drm/drm_debugfs.c b/drivers/gpu/drm/drm_debugfs.c index b2482818fee8..6f28fe58f169 100644 --- a/drivers/gpu/drm/drm_debugfs.c +++ b/drivers/gpu/drm/drm_debugfs.c @@ -28,6 +28,7 @@ #include <linux/slab.h> #include <linux/export.h> +#include <drm/drm_client.h> #include <drm/drm_debugfs.h> #include <drm/drm_edid.h> #include <drm/drm_atomic.h> @@ -164,6 +165,12 @@ int drm_debugfs_init(struct drm_minor *minor, int minor_id, DRM_ERROR("Failed to create framebuffer debugfs file\n"); return ret; } + + ret = drm_client_debugfs_init(minor); + if (ret) { + DRM_ERROR("Failed to create client debugfs file\n"); + return ret; + } } if (dev->driver->debugfs_init) { @@ -307,13 +314,13 @@ static ssize_t edid_write(struct file *file, const char __user *ubuf, if (len == 5 && !strncmp(buf, "reset", 5)) { connector->override_edid = false; - ret = drm_mode_connector_update_edid_property(connector, NULL); + ret = drm_connector_update_edid_property(connector, NULL); } else if (len < EDID_LENGTH || EDID_LENGTH * (1 + edid->extensions) > len) ret = -EINVAL; else { connector->override_edid = false; - ret = drm_mode_connector_update_edid_property(connector, edid); + ret = drm_connector_update_edid_property(connector, edid); if (!ret) connector->override_edid = true; } diff --git a/drivers/gpu/drm/drm_debugfs_crc.c b/drivers/gpu/drm/drm_debugfs_crc.c index 9f8312137cad..99961192bf03 100644 --- a/drivers/gpu/drm/drm_debugfs_crc.c +++ b/drivers/gpu/drm/drm_debugfs_crc.c @@ -139,6 +139,7 @@ static int crtc_crc_data_count(struct drm_crtc_crc *crc) static void crtc_crc_cleanup(struct drm_crtc_crc *crc) { kfree(crc->entries); + crc->overflow = false; crc->entries = NULL; crc->head = 0; crc->tail = 0; @@ -391,8 +392,14 @@ int drm_crtc_add_crc_entry(struct drm_crtc *crtc, bool has_frame, tail = crc->tail; if (CIRC_SPACE(head, tail, DRM_CRC_ENTRIES_NR) < 1) { + bool was_overflow = crc->overflow; + + crc->overflow = true; spin_unlock(&crc->lock); - DRM_ERROR("Overflow of CRC buffer, userspace reads too slow.\n"); + + if (!was_overflow) + DRM_ERROR("Overflow of CRC buffer, userspace reads too slow.\n"); + return -ENOBUFS; } diff --git a/drivers/gpu/drm/drm_dp_cec.c b/drivers/gpu/drm/drm_dp_cec.c new file mode 100644 index 000000000000..ddb1c5adebb9 --- /dev/null +++ b/drivers/gpu/drm/drm_dp_cec.c @@ -0,0 +1,428 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * DisplayPort CEC-Tunneling-over-AUX support + * + * Copyright 2018 Cisco Systems, Inc. and/or its affiliates. All rights reserved. + */ + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/slab.h> +#include <drm/drm_dp_helper.h> +#include <media/cec.h> + +/* + * Unfortunately it turns out that we have a chicken-and-egg situation + * here. Quite a few active (mini-)DP-to-HDMI or USB-C-to-HDMI adapters + * have a converter chip that supports CEC-Tunneling-over-AUX (usually the + * Parade PS176), but they do not wire up the CEC pin, thus making CEC + * useless. + * + * Sadly there is no way for this driver to know this. What happens is + * that a /dev/cecX device is created that is isolated and unable to see + * any of the other CEC devices. Quite literally the CEC wire is cut + * (or in this case, never connected in the first place). + * + * The reason so few adapters support this is that this tunneling protocol + * was never supported by any OS. So there was no easy way of testing it, + * and no incentive to correctly wire up the CEC pin. + * + * Hopefully by creating this driver it will be easier for vendors to + * finally fix their adapters and test the CEC functionality. + * + * I keep a list of known working adapters here: + * + * https://hverkuil.home.xs4all.nl/cec-status.txt + * + * Please mail me (hverkuil@xs4all.nl) if you find an adapter that works + * and is not yet listed there. + * + * Note that the current implementation does not support CEC over an MST hub. + * As far as I can see there is no mechanism defined in the DisplayPort + * standard to transport CEC interrupts over an MST device. It might be + * possible to do this through polling, but I have not been able to get that + * to work. + */ + +/** + * DOC: dp cec helpers + * + * These functions take care of supporting the CEC-Tunneling-over-AUX + * feature of DisplayPort-to-HDMI adapters. + */ + +/* + * When the EDID is unset because the HPD went low, then the CEC DPCD registers + * typically can no longer be read (true for a DP-to-HDMI adapter since it is + * powered by the HPD). However, some displays toggle the HPD off and on for a + * short period for one reason or another, and that would cause the CEC adapter + * to be removed and added again, even though nothing else changed. + * + * This module parameter sets a delay in seconds before the CEC adapter is + * actually unregistered. Only if the HPD does not return within that time will + * the CEC adapter be unregistered. + * + * If it is set to a value >= NEVER_UNREG_DELAY, then the CEC adapter will never + * be unregistered for as long as the connector remains registered. + * + * If it is set to 0, then the CEC adapter will be unregistered immediately as + * soon as the HPD disappears. + * + * The default is one second to prevent short HPD glitches from unregistering + * the CEC adapter. + * + * Note that for integrated HDMI branch devices that support CEC the DPCD + * registers remain available even if the HPD goes low since it is not powered + * by the HPD. In that case the CEC adapter will never be unregistered during + * the life time of the connector. At least, this is the theory since I do not + * have hardware with an integrated HDMI branch device that supports CEC. + */ +#define NEVER_UNREG_DELAY 1000 +static unsigned int drm_dp_cec_unregister_delay = 1; +module_param(drm_dp_cec_unregister_delay, uint, 0600); +MODULE_PARM_DESC(drm_dp_cec_unregister_delay, + "CEC unregister delay in seconds, 0: no delay, >= 1000: never unregister"); + +static int drm_dp_cec_adap_enable(struct cec_adapter *adap, bool enable) +{ + struct drm_dp_aux *aux = cec_get_drvdata(adap); + u32 val = enable ? DP_CEC_TUNNELING_ENABLE : 0; + ssize_t err = 0; + + err = drm_dp_dpcd_writeb(aux, DP_CEC_TUNNELING_CONTROL, val); + return (enable && err < 0) ? err : 0; +} + +static int drm_dp_cec_adap_log_addr(struct cec_adapter *adap, u8 addr) +{ + struct drm_dp_aux *aux = cec_get_drvdata(adap); + /* Bit 15 (logical address 15) should always be set */ + u16 la_mask = 1 << CEC_LOG_ADDR_BROADCAST; + u8 mask[2]; + ssize_t err; + + if (addr != CEC_LOG_ADDR_INVALID) + la_mask |= adap->log_addrs.log_addr_mask | (1 << addr); + mask[0] = la_mask & 0xff; + mask[1] = la_mask >> 8; + err = drm_dp_dpcd_write(aux, DP_CEC_LOGICAL_ADDRESS_MASK, mask, 2); + return (addr != CEC_LOG_ADDR_INVALID && err < 0) ? err : 0; +} + +static int drm_dp_cec_adap_transmit(struct cec_adapter *adap, u8 attempts, + u32 signal_free_time, struct cec_msg *msg) +{ + struct drm_dp_aux *aux = cec_get_drvdata(adap); + unsigned int retries = min(5, attempts - 1); + ssize_t err; + + err = drm_dp_dpcd_write(aux, DP_CEC_TX_MESSAGE_BUFFER, + msg->msg, msg->len); + if (err < 0) + return err; + + err = drm_dp_dpcd_writeb(aux, DP_CEC_TX_MESSAGE_INFO, + (msg->len - 1) | (retries << 4) | + DP_CEC_TX_MESSAGE_SEND); + return err < 0 ? err : 0; +} + +static int drm_dp_cec_adap_monitor_all_enable(struct cec_adapter *adap, + bool enable) +{ + struct drm_dp_aux *aux = cec_get_drvdata(adap); + ssize_t err; + u8 val; + + if (!(adap->capabilities & CEC_CAP_MONITOR_ALL)) + return 0; + + err = drm_dp_dpcd_readb(aux, DP_CEC_TUNNELING_CONTROL, &val); + if (err >= 0) { + if (enable) + val |= DP_CEC_SNOOPING_ENABLE; + else + val &= ~DP_CEC_SNOOPING_ENABLE; + err = drm_dp_dpcd_writeb(aux, DP_CEC_TUNNELING_CONTROL, val); + } + return (enable && err < 0) ? err : 0; +} + +static void drm_dp_cec_adap_status(struct cec_adapter *adap, + struct seq_file *file) +{ + struct drm_dp_aux *aux = cec_get_drvdata(adap); + struct drm_dp_desc desc; + struct drm_dp_dpcd_ident *id = &desc.ident; + + if (drm_dp_read_desc(aux, &desc, true)) + return; + seq_printf(file, "OUI: %*pdH\n", + (int)sizeof(id->oui), id->oui); + seq_printf(file, "ID: %*pE\n", + (int)strnlen(id->device_id, sizeof(id->device_id)), + id->device_id); + seq_printf(file, "HW Rev: %d.%d\n", id->hw_rev >> 4, id->hw_rev & 0xf); + /* + * Show this both in decimal and hex: at least one vendor + * always reports this in hex. + */ + seq_printf(file, "FW/SW Rev: %d.%d (0x%02x.0x%02x)\n", + id->sw_major_rev, id->sw_minor_rev, + id->sw_major_rev, id->sw_minor_rev); +} + +static const struct cec_adap_ops drm_dp_cec_adap_ops = { + .adap_enable = drm_dp_cec_adap_enable, + .adap_log_addr = drm_dp_cec_adap_log_addr, + .adap_transmit = drm_dp_cec_adap_transmit, + .adap_monitor_all_enable = drm_dp_cec_adap_monitor_all_enable, + .adap_status = drm_dp_cec_adap_status, +}; + +static int drm_dp_cec_received(struct drm_dp_aux *aux) +{ + struct cec_adapter *adap = aux->cec.adap; + struct cec_msg msg; + u8 rx_msg_info; + ssize_t err; + + err = drm_dp_dpcd_readb(aux, DP_CEC_RX_MESSAGE_INFO, &rx_msg_info); + if (err < 0) + return err; + + if (!(rx_msg_info & DP_CEC_RX_MESSAGE_ENDED)) + return 0; + + msg.len = (rx_msg_info & DP_CEC_RX_MESSAGE_LEN_MASK) + 1; + err = drm_dp_dpcd_read(aux, DP_CEC_RX_MESSAGE_BUFFER, msg.msg, msg.len); + if (err < 0) + return err; + + cec_received_msg(adap, &msg); + return 0; +} + +static void drm_dp_cec_handle_irq(struct drm_dp_aux *aux) +{ + struct cec_adapter *adap = aux->cec.adap; + u8 flags; + + if (drm_dp_dpcd_readb(aux, DP_CEC_TUNNELING_IRQ_FLAGS, &flags) < 0) + return; + + if (flags & DP_CEC_RX_MESSAGE_INFO_VALID) + drm_dp_cec_received(aux); + + if (flags & DP_CEC_TX_MESSAGE_SENT) + cec_transmit_attempt_done(adap, CEC_TX_STATUS_OK); + else if (flags & DP_CEC_TX_LINE_ERROR) + cec_transmit_attempt_done(adap, CEC_TX_STATUS_ERROR | + CEC_TX_STATUS_MAX_RETRIES); + else if (flags & + (DP_CEC_TX_ADDRESS_NACK_ERROR | DP_CEC_TX_DATA_NACK_ERROR)) + cec_transmit_attempt_done(adap, CEC_TX_STATUS_NACK | + CEC_TX_STATUS_MAX_RETRIES); + drm_dp_dpcd_writeb(aux, DP_CEC_TUNNELING_IRQ_FLAGS, flags); +} + +/** + * drm_dp_cec_irq() - handle CEC interrupt, if any + * @aux: DisplayPort AUX channel + * + * Should be called when handling an IRQ_HPD request. If CEC-tunneling-over-AUX + * is present, then it will check for a CEC_IRQ and handle it accordingly. + */ +void drm_dp_cec_irq(struct drm_dp_aux *aux) +{ + u8 cec_irq; + int ret; + + mutex_lock(&aux->cec.lock); + if (!aux->cec.adap) + goto unlock; + + ret = drm_dp_dpcd_readb(aux, DP_DEVICE_SERVICE_IRQ_VECTOR_ESI1, + &cec_irq); + if (ret < 0 || !(cec_irq & DP_CEC_IRQ)) + goto unlock; + + drm_dp_cec_handle_irq(aux); + drm_dp_dpcd_writeb(aux, DP_DEVICE_SERVICE_IRQ_VECTOR_ESI1, DP_CEC_IRQ); +unlock: + mutex_unlock(&aux->cec.lock); +} +EXPORT_SYMBOL(drm_dp_cec_irq); + +static bool drm_dp_cec_cap(struct drm_dp_aux *aux, u8 *cec_cap) +{ + u8 cap = 0; + + if (drm_dp_dpcd_readb(aux, DP_CEC_TUNNELING_CAPABILITY, &cap) != 1 || + !(cap & DP_CEC_TUNNELING_CAPABLE)) + return false; + if (cec_cap) + *cec_cap = cap; + return true; +} + +/* + * Called if the HPD was low for more than drm_dp_cec_unregister_delay + * seconds. This unregisters the CEC adapter. + */ +static void drm_dp_cec_unregister_work(struct work_struct *work) +{ + struct drm_dp_aux *aux = container_of(work, struct drm_dp_aux, + cec.unregister_work.work); + + mutex_lock(&aux->cec.lock); + cec_unregister_adapter(aux->cec.adap); + aux->cec.adap = NULL; + mutex_unlock(&aux->cec.lock); +} + +/* + * A new EDID is set. If there is no CEC adapter, then create one. If + * there was a CEC adapter, then check if the CEC adapter properties + * were unchanged and just update the CEC physical address. Otherwise + * unregister the old CEC adapter and create a new one. + */ +void drm_dp_cec_set_edid(struct drm_dp_aux *aux, const struct edid *edid) +{ + u32 cec_caps = CEC_CAP_DEFAULTS | CEC_CAP_NEEDS_HPD; + unsigned int num_las = 1; + u8 cap; + +#ifndef CONFIG_MEDIA_CEC_RC + /* + * CEC_CAP_RC is part of CEC_CAP_DEFAULTS, but it is stripped by + * cec_allocate_adapter() if CONFIG_MEDIA_CEC_RC is undefined. + * + * Do this here as well to ensure the tests against cec_caps are + * correct. + */ + cec_caps &= ~CEC_CAP_RC; +#endif + cancel_delayed_work_sync(&aux->cec.unregister_work); + + mutex_lock(&aux->cec.lock); + if (!drm_dp_cec_cap(aux, &cap)) { + /* CEC is not supported, unregister any existing adapter */ + cec_unregister_adapter(aux->cec.adap); + aux->cec.adap = NULL; + goto unlock; + } + + if (cap & DP_CEC_SNOOPING_CAPABLE) + cec_caps |= CEC_CAP_MONITOR_ALL; + if (cap & DP_CEC_MULTIPLE_LA_CAPABLE) + num_las = CEC_MAX_LOG_ADDRS; + + if (aux->cec.adap) { + if (aux->cec.adap->capabilities == cec_caps && + aux->cec.adap->available_log_addrs == num_las) { + /* Unchanged, so just set the phys addr */ + cec_s_phys_addr_from_edid(aux->cec.adap, edid); + goto unlock; + } + /* + * The capabilities changed, so unregister the old + * adapter first. + */ + cec_unregister_adapter(aux->cec.adap); + } + + /* Create a new adapter */ + aux->cec.adap = cec_allocate_adapter(&drm_dp_cec_adap_ops, + aux, aux->cec.name, cec_caps, + num_las); + if (IS_ERR(aux->cec.adap)) { + aux->cec.adap = NULL; + goto unlock; + } + if (cec_register_adapter(aux->cec.adap, aux->cec.parent)) { + cec_delete_adapter(aux->cec.adap); + aux->cec.adap = NULL; + } else { + /* + * Update the phys addr for the new CEC adapter. When called + * from drm_dp_cec_register_connector() edid == NULL, so in + * that case the phys addr is just invalidated. + */ + cec_s_phys_addr_from_edid(aux->cec.adap, edid); + } +unlock: + mutex_unlock(&aux->cec.lock); +} +EXPORT_SYMBOL(drm_dp_cec_set_edid); + +/* + * The EDID disappeared (likely because of the HPD going down). + */ +void drm_dp_cec_unset_edid(struct drm_dp_aux *aux) +{ + cancel_delayed_work_sync(&aux->cec.unregister_work); + + mutex_lock(&aux->cec.lock); + if (!aux->cec.adap) + goto unlock; + + cec_phys_addr_invalidate(aux->cec.adap); + /* + * We're done if we want to keep the CEC device + * (drm_dp_cec_unregister_delay is >= NEVER_UNREG_DELAY) or if the + * DPCD still indicates the CEC capability (expected for an integrated + * HDMI branch device). + */ + if (drm_dp_cec_unregister_delay < NEVER_UNREG_DELAY && + !drm_dp_cec_cap(aux, NULL)) { + /* + * Unregister the CEC adapter after drm_dp_cec_unregister_delay + * seconds. This to debounce short HPD off-and-on cycles from + * displays. + */ + schedule_delayed_work(&aux->cec.unregister_work, + drm_dp_cec_unregister_delay * HZ); + } +unlock: + mutex_unlock(&aux->cec.lock); +} +EXPORT_SYMBOL(drm_dp_cec_unset_edid); + +/** + * drm_dp_cec_register_connector() - register a new connector + * @aux: DisplayPort AUX channel + * @name: name of the CEC device + * @parent: parent device + * + * A new connector was registered with associated CEC adapter name and + * CEC adapter parent device. After registering the name and parent + * drm_dp_cec_set_edid() is called to check if the connector supports + * CEC and to register a CEC adapter if that is the case. + */ +void drm_dp_cec_register_connector(struct drm_dp_aux *aux, const char *name, + struct device *parent) +{ + WARN_ON(aux->cec.adap); + aux->cec.name = name; + aux->cec.parent = parent; + INIT_DELAYED_WORK(&aux->cec.unregister_work, + drm_dp_cec_unregister_work); + + drm_dp_cec_set_edid(aux, NULL); +} +EXPORT_SYMBOL(drm_dp_cec_register_connector); + +/** + * drm_dp_cec_unregister_connector() - unregister the CEC adapter, if any + * @aux: DisplayPort AUX channel + */ +void drm_dp_cec_unregister_connector(struct drm_dp_aux *aux) +{ + if (!aux->cec.adap) + return; + cancel_delayed_work_sync(&aux->cec.unregister_work); + cec_unregister_adapter(aux->cec.adap); + aux->cec.adap = NULL; +} +EXPORT_SYMBOL(drm_dp_cec_unregister_connector); diff --git a/drivers/gpu/drm/drm_dp_helper.c b/drivers/gpu/drm/drm_dp_helper.c index a7ba602a43a8..0cccbcb2d03e 100644 --- a/drivers/gpu/drm/drm_dp_helper.c +++ b/drivers/gpu/drm/drm_dp_helper.c @@ -185,6 +185,20 @@ EXPORT_SYMBOL(drm_dp_bw_code_to_link_rate); #define AUX_RETRY_INTERVAL 500 /* us */ +static inline void +drm_dp_dump_access(const struct drm_dp_aux *aux, + u8 request, uint offset, void *buffer, int ret) +{ + const char *arrow = request == DP_AUX_NATIVE_READ ? "->" : "<-"; + + if (ret > 0) + drm_dbg(DRM_UT_DP, "%s: 0x%05x AUX %s (ret=%3d) %*ph\n", + aux->name, offset, arrow, ret, min(ret, 20), buffer); + else + drm_dbg(DRM_UT_DP, "%s: 0x%05x AUX %s (ret=%3d)\n", + aux->name, offset, arrow, ret); +} + /** * DOC: dp helpers * @@ -288,10 +302,14 @@ ssize_t drm_dp_dpcd_read(struct drm_dp_aux *aux, unsigned int offset, ret = drm_dp_dpcd_access(aux, DP_AUX_NATIVE_READ, DP_DPCD_REV, buffer, 1); if (ret != 1) - return ret; + goto out; - return drm_dp_dpcd_access(aux, DP_AUX_NATIVE_READ, offset, buffer, - size); + ret = drm_dp_dpcd_access(aux, DP_AUX_NATIVE_READ, offset, buffer, + size); + +out: + drm_dp_dump_access(aux, DP_AUX_NATIVE_READ, offset, buffer, ret); + return ret; } EXPORT_SYMBOL(drm_dp_dpcd_read); @@ -312,8 +330,12 @@ EXPORT_SYMBOL(drm_dp_dpcd_read); ssize_t drm_dp_dpcd_write(struct drm_dp_aux *aux, unsigned int offset, void *buffer, size_t size) { - return drm_dp_dpcd_access(aux, DP_AUX_NATIVE_WRITE, offset, buffer, - size); + int ret; + + ret = drm_dp_dpcd_access(aux, DP_AUX_NATIVE_WRITE, offset, buffer, + size); + drm_dp_dump_access(aux, DP_AUX_NATIVE_WRITE, offset, buffer, ret); + return ret; } EXPORT_SYMBOL(drm_dp_dpcd_write); @@ -1087,6 +1109,7 @@ static void drm_dp_aux_crc_work(struct work_struct *work) void drm_dp_aux_init(struct drm_dp_aux *aux) { mutex_init(&aux->hw_mutex); + mutex_init(&aux->cec.lock); INIT_WORK(&aux->crc_work, drm_dp_aux_crc_work); aux->ddc.algo = &drm_dp_i2c_algo; diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c index 658830620ca3..7780567aa669 100644 --- a/drivers/gpu/drm/drm_dp_mst_topology.c +++ b/drivers/gpu/drm/drm_dp_mst_topology.c @@ -1215,7 +1215,7 @@ static void drm_dp_add_port(struct drm_dp_mst_branch *mstb, port->pdt == DP_PEER_DEVICE_SST_SINK) && port->port_num >= DP_MST_LOGICAL_PORT_0) { port->cached_edid = drm_get_edid(port->connector, &port->aux.ddc); - drm_mode_connector_set_tile_property(port->connector); + drm_connector_set_tile_property(port->connector); } (*mstb->mgr->cbs->register_connector)(port->connector); } @@ -2559,7 +2559,7 @@ struct edid *drm_dp_mst_get_edid(struct drm_connector *connector, struct drm_dp_ edid = drm_edid_duplicate(port->cached_edid); else { edid = drm_get_edid(connector, &port->aux.ddc); - drm_mode_connector_set_tile_property(connector); + drm_connector_set_tile_property(connector); } port->has_audio = drm_detect_monitor_audio(edid); drm_dp_put_port(port); diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c index 7af748ed1c58..ea4941da9b27 100644 --- a/drivers/gpu/drm/drm_drv.c +++ b/drivers/gpu/drm/drm_drv.c @@ -34,6 +34,7 @@ #include <linux/slab.h> #include <linux/srcu.h> +#include <drm/drm_client.h> #include <drm/drm_drv.h> #include <drm/drmP.h> @@ -53,13 +54,14 @@ MODULE_AUTHOR("Gareth Hughes, Leif Delgass, JosĂ© Fonseca, Jon Smirl"); MODULE_DESCRIPTION("DRM shared core routines"); MODULE_LICENSE("GPL and additional rights"); MODULE_PARM_DESC(debug, "Enable debug output, where each bit enables a debug category.\n" -"\t\tBit 0 (0x01) will enable CORE messages (drm core code)\n" -"\t\tBit 1 (0x02) will enable DRIVER messages (drm controller code)\n" -"\t\tBit 2 (0x04) will enable KMS messages (modesetting code)\n" -"\t\tBit 3 (0x08) will enable PRIME messages (prime code)\n" -"\t\tBit 4 (0x10) will enable ATOMIC messages (atomic code)\n" -"\t\tBit 5 (0x20) will enable VBL messages (vblank code)\n" -"\t\tBit 7 (0x80) will enable LEASE messages (leasing code)"); +"\t\tBit 0 (0x01) will enable CORE messages (drm core code)\n" +"\t\tBit 1 (0x02) will enable DRIVER messages (drm controller code)\n" +"\t\tBit 2 (0x04) will enable KMS messages (modesetting code)\n" +"\t\tBit 3 (0x08) will enable PRIME messages (prime code)\n" +"\t\tBit 4 (0x10) will enable ATOMIC messages (atomic code)\n" +"\t\tBit 5 (0x20) will enable VBL messages (vblank code)\n" +"\t\tBit 7 (0x80) will enable LEASE messages (leasing code)\n" +"\t\tBit 8 (0x100) will enable DP messages (displayport code)"); module_param_named(debug, drm_debug, int, 0600); static DEFINE_SPINLOCK(drm_minor_lock); @@ -505,6 +507,8 @@ int drm_dev_init(struct drm_device *dev, dev->driver = driver; INIT_LIST_HEAD(&dev->filelist); + INIT_LIST_HEAD(&dev->filelist_internal); + INIT_LIST_HEAD(&dev->clientlist); INIT_LIST_HEAD(&dev->ctxlist); INIT_LIST_HEAD(&dev->vmalist); INIT_LIST_HEAD(&dev->maplist); @@ -514,6 +518,7 @@ int drm_dev_init(struct drm_device *dev, spin_lock_init(&dev->event_lock); mutex_init(&dev->struct_mutex); mutex_init(&dev->filelist_mutex); + mutex_init(&dev->clientlist_mutex); mutex_init(&dev->ctxlist_mutex); mutex_init(&dev->master_mutex); @@ -569,6 +574,7 @@ err_minors: err_free: mutex_destroy(&dev->master_mutex); mutex_destroy(&dev->ctxlist_mutex); + mutex_destroy(&dev->clientlist_mutex); mutex_destroy(&dev->filelist_mutex); mutex_destroy(&dev->struct_mutex); return ret; @@ -603,6 +609,7 @@ void drm_dev_fini(struct drm_device *dev) mutex_destroy(&dev->master_mutex); mutex_destroy(&dev->ctxlist_mutex); + mutex_destroy(&dev->clientlist_mutex); mutex_destroy(&dev->filelist_mutex); mutex_destroy(&dev->struct_mutex); kfree(dev->unique); @@ -858,6 +865,8 @@ void drm_dev_unregister(struct drm_device *dev) dev->registered = false; + drm_client_dev_unregister(dev); + if (drm_core_check_feature(dev, DRIVER_MODESET)) drm_modeset_unregister_all(dev); diff --git a/drivers/gpu/drm/drm_fb_cma_helper.c b/drivers/gpu/drm/drm_fb_cma_helper.c index 186d00adfb5f..9da36a6271d3 100644 --- a/drivers/gpu/drm/drm_fb_cma_helper.c +++ b/drivers/gpu/drm/drm_fb_cma_helper.c @@ -18,6 +18,7 @@ */ #include <drm/drmP.h> +#include <drm/drm_client.h> #include <drm/drm_fb_helper.h> #include <drm/drm_framebuffer.h> #include <drm/drm_gem_cma_helper.h> @@ -26,11 +27,8 @@ #include <drm/drm_print.h> #include <linux/module.h> -#define DEFAULT_FBDEFIO_DELAY_MS 50 - struct drm_fbdev_cma { struct drm_fb_helper fb_helper; - const struct drm_framebuffer_funcs *fb_funcs; }; /** @@ -44,36 +42,6 @@ struct drm_fbdev_cma { * * An fbdev framebuffer backed by cma is also available by calling * drm_fb_cma_fbdev_init(). drm_fb_cma_fbdev_fini() tears it down. - * If the &drm_framebuffer_funcs.dirty callback is set, fb_deferred_io will be - * set up automatically. &drm_framebuffer_funcs.dirty is called by - * drm_fb_helper_deferred_io() in process context (&struct delayed_work). - * - * Example fbdev deferred io code:: - * - * static int driver_fb_dirty(struct drm_framebuffer *fb, - * struct drm_file *file_priv, - * unsigned flags, unsigned color, - * struct drm_clip_rect *clips, - * unsigned num_clips) - * { - * struct drm_gem_cma_object *cma = drm_fb_cma_get_gem_obj(fb, 0); - * ... push changes ... - * return 0; - * } - * - * static struct drm_framebuffer_funcs driver_fb_funcs = { - * .destroy = drm_gem_fb_destroy, - * .create_handle = drm_gem_fb_create_handle, - * .dirty = driver_fb_dirty, - * }; - * - * Initialize:: - * - * fbdev = drm_fb_cma_fbdev_init_with_funcs(dev, 16, - * dev->mode_config.num_crtc, - * dev->mode_config.num_connector, - * &driver_fb_funcs); - * */ static inline struct drm_fbdev_cma *to_fbdev_cma(struct drm_fb_helper *helper) @@ -131,236 +99,28 @@ dma_addr_t drm_fb_cma_get_gem_addr(struct drm_framebuffer *fb, } EXPORT_SYMBOL_GPL(drm_fb_cma_get_gem_addr); -static int drm_fb_cma_mmap(struct fb_info *info, struct vm_area_struct *vma) -{ - return dma_mmap_writecombine(info->device, vma, info->screen_base, - info->fix.smem_start, info->fix.smem_len); -} - -static struct fb_ops drm_fbdev_cma_ops = { - .owner = THIS_MODULE, - DRM_FB_HELPER_DEFAULT_OPS, - .fb_fillrect = drm_fb_helper_sys_fillrect, - .fb_copyarea = drm_fb_helper_sys_copyarea, - .fb_imageblit = drm_fb_helper_sys_imageblit, - .fb_mmap = drm_fb_cma_mmap, -}; - -static int drm_fbdev_cma_deferred_io_mmap(struct fb_info *info, - struct vm_area_struct *vma) -{ - fb_deferred_io_mmap(info, vma); - vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot); - - return 0; -} - -static int drm_fbdev_cma_defio_init(struct fb_info *fbi, - struct drm_gem_cma_object *cma_obj) -{ - struct fb_deferred_io *fbdefio; - struct fb_ops *fbops; - - /* - * Per device structures are needed because: - * fbops: fb_deferred_io_cleanup() clears fbops.fb_mmap - * fbdefio: individual delays - */ - fbdefio = kzalloc(sizeof(*fbdefio), GFP_KERNEL); - fbops = kzalloc(sizeof(*fbops), GFP_KERNEL); - if (!fbdefio || !fbops) { - kfree(fbdefio); - kfree(fbops); - return -ENOMEM; - } - - /* can't be offset from vaddr since dirty() uses cma_obj */ - fbi->screen_buffer = cma_obj->vaddr; - /* fb_deferred_io_fault() needs a physical address */ - fbi->fix.smem_start = page_to_phys(virt_to_page(fbi->screen_buffer)); - - *fbops = *fbi->fbops; - fbi->fbops = fbops; - - fbdefio->delay = msecs_to_jiffies(DEFAULT_FBDEFIO_DELAY_MS); - fbdefio->deferred_io = drm_fb_helper_deferred_io; - fbi->fbdefio = fbdefio; - fb_deferred_io_init(fbi); - fbi->fbops->fb_mmap = drm_fbdev_cma_deferred_io_mmap; - - return 0; -} - -static void drm_fbdev_cma_defio_fini(struct fb_info *fbi) -{ - if (!fbi->fbdefio) - return; - - fb_deferred_io_cleanup(fbi); - kfree(fbi->fbdefio); - kfree(fbi->fbops); -} - -static int -drm_fbdev_cma_create(struct drm_fb_helper *helper, - struct drm_fb_helper_surface_size *sizes) -{ - struct drm_fbdev_cma *fbdev_cma = to_fbdev_cma(helper); - struct drm_device *dev = helper->dev; - struct drm_gem_cma_object *obj; - struct drm_framebuffer *fb; - unsigned int bytes_per_pixel; - unsigned long offset; - struct fb_info *fbi; - size_t size; - int ret; - - DRM_DEBUG_KMS("surface width(%d), height(%d) and bpp(%d)\n", - sizes->surface_width, sizes->surface_height, - sizes->surface_bpp); - - bytes_per_pixel = DIV_ROUND_UP(sizes->surface_bpp, 8); - size = sizes->surface_width * sizes->surface_height * bytes_per_pixel; - obj = drm_gem_cma_create(dev, size); - if (IS_ERR(obj)) - return -ENOMEM; - - fbi = drm_fb_helper_alloc_fbi(helper); - if (IS_ERR(fbi)) { - ret = PTR_ERR(fbi); - goto err_gem_free_object; - } - - fb = drm_gem_fbdev_fb_create(dev, sizes, 0, &obj->base, - fbdev_cma->fb_funcs); - if (IS_ERR(fb)) { - dev_err(dev->dev, "Failed to allocate DRM framebuffer.\n"); - ret = PTR_ERR(fb); - goto err_fb_info_destroy; - } - - helper->fb = fb; - - fbi->par = helper; - fbi->flags = FBINFO_FLAG_DEFAULT; - fbi->fbops = &drm_fbdev_cma_ops; - - drm_fb_helper_fill_fix(fbi, fb->pitches[0], fb->format->depth); - drm_fb_helper_fill_var(fbi, helper, sizes->fb_width, sizes->fb_height); - - offset = fbi->var.xoffset * bytes_per_pixel; - offset += fbi->var.yoffset * fb->pitches[0]; - - dev->mode_config.fb_base = (resource_size_t)obj->paddr; - fbi->screen_base = obj->vaddr + offset; - fbi->fix.smem_start = (unsigned long)(obj->paddr + offset); - fbi->screen_size = size; - fbi->fix.smem_len = size; - - if (fb->funcs->dirty) { - ret = drm_fbdev_cma_defio_init(fbi, obj); - if (ret) - goto err_cma_destroy; - } - - return 0; - -err_cma_destroy: - drm_framebuffer_remove(fb); -err_fb_info_destroy: - drm_fb_helper_fini(helper); -err_gem_free_object: - drm_gem_object_put_unlocked(&obj->base); - return ret; -} - -static const struct drm_fb_helper_funcs drm_fb_cma_helper_funcs = { - .fb_probe = drm_fbdev_cma_create, -}; - /** - * drm_fb_cma_fbdev_init_with_funcs() - Allocate and initialize fbdev emulation + * drm_fb_cma_fbdev_init() - Allocate and initialize fbdev emulation * @dev: DRM device * @preferred_bpp: Preferred bits per pixel for the device. * @dev->mode_config.preferred_depth is used if this is zero. * @max_conn_count: Maximum number of connectors. * @dev->mode_config.num_connector is used if this is zero. - * @funcs: Framebuffer functions, in particular a custom dirty() callback. - * Can be NULL. * * Returns: * Zero on success or negative error code on failure. */ -int drm_fb_cma_fbdev_init_with_funcs(struct drm_device *dev, - unsigned int preferred_bpp, unsigned int max_conn_count, - const struct drm_framebuffer_funcs *funcs) +int drm_fb_cma_fbdev_init(struct drm_device *dev, unsigned int preferred_bpp, + unsigned int max_conn_count) { struct drm_fbdev_cma *fbdev_cma; - struct drm_fb_helper *fb_helper; - int ret; - - if (!preferred_bpp) - preferred_bpp = dev->mode_config.preferred_depth; - if (!preferred_bpp) - preferred_bpp = 32; - - if (!max_conn_count) - max_conn_count = dev->mode_config.num_connector; - - fbdev_cma = kzalloc(sizeof(*fbdev_cma), GFP_KERNEL); - if (!fbdev_cma) - return -ENOMEM; - fbdev_cma->fb_funcs = funcs; - fb_helper = &fbdev_cma->fb_helper; - - drm_fb_helper_prepare(dev, fb_helper, &drm_fb_cma_helper_funcs); - - ret = drm_fb_helper_init(dev, fb_helper, max_conn_count); - if (ret < 0) { - DRM_DEV_ERROR(dev->dev, "Failed to initialize fbdev helper.\n"); - goto err_free; - } - - ret = drm_fb_helper_single_add_all_connectors(fb_helper); - if (ret < 0) { - DRM_DEV_ERROR(dev->dev, "Failed to add connectors.\n"); - goto err_drm_fb_helper_fini; - } - - ret = drm_fb_helper_initial_config(fb_helper, preferred_bpp); - if (ret < 0) { - DRM_DEV_ERROR(dev->dev, "Failed to set fbdev configuration.\n"); - goto err_drm_fb_helper_fini; - } + /* dev->fb_helper will indirectly point to fbdev_cma after this call */ + fbdev_cma = drm_fbdev_cma_init(dev, preferred_bpp, max_conn_count); + if (IS_ERR(fbdev_cma)) + return PTR_ERR(fbdev_cma); return 0; - -err_drm_fb_helper_fini: - drm_fb_helper_fini(fb_helper); -err_free: - kfree(fbdev_cma); - - return ret; -} -EXPORT_SYMBOL_GPL(drm_fb_cma_fbdev_init_with_funcs); - -/** - * drm_fb_cma_fbdev_init() - Allocate and initialize fbdev emulation - * @dev: DRM device - * @preferred_bpp: Preferred bits per pixel for the device. - * @dev->mode_config.preferred_depth is used if this is zero. - * @max_conn_count: Maximum number of connectors. - * @dev->mode_config.num_connector is used if this is zero. - * - * Returns: - * Zero on success or negative error code on failure. - */ -int drm_fb_cma_fbdev_init(struct drm_device *dev, unsigned int preferred_bpp, - unsigned int max_conn_count) -{ - return drm_fb_cma_fbdev_init_with_funcs(dev, preferred_bpp, - max_conn_count, NULL); } EXPORT_SYMBOL_GPL(drm_fb_cma_fbdev_init); @@ -370,104 +130,54 @@ EXPORT_SYMBOL_GPL(drm_fb_cma_fbdev_init); */ void drm_fb_cma_fbdev_fini(struct drm_device *dev) { - struct drm_fb_helper *fb_helper = dev->fb_helper; - - if (!fb_helper) - return; - - /* Unregister if it hasn't been done already */ - if (fb_helper->fbdev && fb_helper->fbdev->dev) - drm_fb_helper_unregister_fbi(fb_helper); - - if (fb_helper->fbdev) - drm_fbdev_cma_defio_fini(fb_helper->fbdev); - - if (fb_helper->fb) - drm_framebuffer_remove(fb_helper->fb); - - drm_fb_helper_fini(fb_helper); - kfree(to_fbdev_cma(fb_helper)); + if (dev->fb_helper) + drm_fbdev_cma_fini(to_fbdev_cma(dev->fb_helper)); } EXPORT_SYMBOL_GPL(drm_fb_cma_fbdev_fini); +static const struct drm_fb_helper_funcs drm_fb_cma_helper_funcs = { + .fb_probe = drm_fb_helper_generic_probe, +}; + /** - * drm_fbdev_cma_init_with_funcs() - Allocate and initializes a drm_fbdev_cma struct + * drm_fbdev_cma_init() - Allocate and initializes a drm_fbdev_cma struct * @dev: DRM device * @preferred_bpp: Preferred bits per pixel for the device * @max_conn_count: Maximum number of connectors - * @funcs: fb helper functions, in particular a custom dirty() callback * * Returns a newly allocated drm_fbdev_cma struct or a ERR_PTR. */ -struct drm_fbdev_cma *drm_fbdev_cma_init_with_funcs(struct drm_device *dev, - unsigned int preferred_bpp, unsigned int max_conn_count, - const struct drm_framebuffer_funcs *funcs) +struct drm_fbdev_cma *drm_fbdev_cma_init(struct drm_device *dev, + unsigned int preferred_bpp, unsigned int max_conn_count) { struct drm_fbdev_cma *fbdev_cma; - struct drm_fb_helper *helper; + struct drm_fb_helper *fb_helper; int ret; fbdev_cma = kzalloc(sizeof(*fbdev_cma), GFP_KERNEL); - if (!fbdev_cma) { - dev_err(dev->dev, "Failed to allocate drm fbdev.\n"); + if (!fbdev_cma) return ERR_PTR(-ENOMEM); - } - fbdev_cma->fb_funcs = funcs; - helper = &fbdev_cma->fb_helper; - - drm_fb_helper_prepare(dev, helper, &drm_fb_cma_helper_funcs); + fb_helper = &fbdev_cma->fb_helper; - ret = drm_fb_helper_init(dev, helper, max_conn_count); - if (ret < 0) { - dev_err(dev->dev, "Failed to initialize drm fb helper.\n"); + ret = drm_client_new(dev, &fb_helper->client, "fbdev", NULL); + if (ret) goto err_free; - } - - ret = drm_fb_helper_single_add_all_connectors(helper); - if (ret < 0) { - dev_err(dev->dev, "Failed to add connectors.\n"); - goto err_drm_fb_helper_fini; - - } - ret = drm_fb_helper_initial_config(helper, preferred_bpp); - if (ret < 0) { - dev_err(dev->dev, "Failed to set initial hw configuration.\n"); - goto err_drm_fb_helper_fini; - } + ret = drm_fb_helper_fbdev_setup(dev, fb_helper, &drm_fb_cma_helper_funcs, + preferred_bpp, max_conn_count); + if (ret) + goto err_client_put; return fbdev_cma; -err_drm_fb_helper_fini: - drm_fb_helper_fini(helper); +err_client_put: + drm_client_release(&fb_helper->client); err_free: kfree(fbdev_cma); return ERR_PTR(ret); } -EXPORT_SYMBOL_GPL(drm_fbdev_cma_init_with_funcs); - -static const struct drm_framebuffer_funcs drm_fb_cma_funcs = { - .destroy = drm_gem_fb_destroy, - .create_handle = drm_gem_fb_create_handle, -}; - -/** - * drm_fbdev_cma_init() - Allocate and initializes a drm_fbdev_cma struct - * @dev: DRM device - * @preferred_bpp: Preferred bits per pixel for the device - * @max_conn_count: Maximum number of connectors - * - * Returns a newly allocated drm_fbdev_cma struct or a ERR_PTR. - */ -struct drm_fbdev_cma *drm_fbdev_cma_init(struct drm_device *dev, - unsigned int preferred_bpp, unsigned int max_conn_count) -{ - return drm_fbdev_cma_init_with_funcs(dev, preferred_bpp, - max_conn_count, - &drm_fb_cma_funcs); -} EXPORT_SYMBOL_GPL(drm_fbdev_cma_init); /** @@ -477,14 +187,7 @@ EXPORT_SYMBOL_GPL(drm_fbdev_cma_init); void drm_fbdev_cma_fini(struct drm_fbdev_cma *fbdev_cma) { drm_fb_helper_unregister_fbi(&fbdev_cma->fb_helper); - if (fbdev_cma->fb_helper.fbdev) - drm_fbdev_cma_defio_fini(fbdev_cma->fb_helper.fbdev); - - if (fbdev_cma->fb_helper.fb) - drm_framebuffer_remove(fbdev_cma->fb_helper.fb); - - drm_fb_helper_fini(&fbdev_cma->fb_helper); - kfree(fbdev_cma); + /* All resources have now been freed by drm_fbdev_fb_destroy() */ } EXPORT_SYMBOL_GPL(drm_fbdev_cma_fini); diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c index cab14f253384..4b0dd20bccb8 100644 --- a/drivers/gpu/drm/drm_fb_helper.c +++ b/drivers/gpu/drm/drm_fb_helper.c @@ -30,6 +30,7 @@ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/console.h> +#include <linux/dma-buf.h> #include <linux/kernel.h> #include <linux/sysrq.h> #include <linux/slab.h> @@ -66,6 +67,9 @@ static DEFINE_MUTEX(kernel_fb_helper_lock); * helper functions used by many drivers to implement the kernel mode setting * interfaces. * + * Drivers that support a dumb buffer with a virtual address and mmap support, + * should try out the generic fbdev emulation using drm_fbdev_generic_setup(). + * * Setup fbdev emulation by calling drm_fb_helper_fbdev_setup() and tear it * down by calling drm_fb_helper_fbdev_teardown(). * @@ -738,6 +742,24 @@ static void drm_fb_helper_resume_worker(struct work_struct *work) console_unlock(); } +static void drm_fb_helper_dirty_blit_real(struct drm_fb_helper *fb_helper, + struct drm_clip_rect *clip) +{ + struct drm_framebuffer *fb = fb_helper->fb; + unsigned int cpp = drm_format_plane_cpp(fb->format->format, 0); + size_t offset = clip->y1 * fb->pitches[0] + clip->x1 * cpp; + void *src = fb_helper->fbdev->screen_buffer + offset; + void *dst = fb_helper->buffer->vaddr + offset; + size_t len = (clip->x2 - clip->x1) * cpp; + unsigned int y; + + for (y = clip->y1; y < clip->y2; y++) { + memcpy(dst, src, len); + src += fb->pitches[0]; + dst += fb->pitches[0]; + } +} + static void drm_fb_helper_dirty_work(struct work_struct *work) { struct drm_fb_helper *helper = container_of(work, struct drm_fb_helper, @@ -753,8 +775,12 @@ static void drm_fb_helper_dirty_work(struct work_struct *work) spin_unlock_irqrestore(&helper->dirty_lock, flags); /* call dirty callback only when it has been really touched */ - if (clip_copy.x1 < clip_copy.x2 && clip_copy.y1 < clip_copy.y2) + if (clip_copy.x1 < clip_copy.x2 && clip_copy.y1 < clip_copy.y2) { + /* Generic fbdev uses a shadow buffer */ + if (helper->buffer) + drm_fb_helper_dirty_blit_real(helper, &clip_copy); helper->fb->funcs->dirty(helper->fb, NULL, 0, 0, &clip_copy, 1); + } } /** @@ -2323,6 +2349,20 @@ retry: return true; } +static bool connector_has_possible_crtc(struct drm_connector *connector, + struct drm_crtc *crtc) +{ + struct drm_encoder *encoder; + int i; + + drm_connector_for_each_possible_encoder(connector, encoder, i) { + if (encoder->possible_crtcs & drm_crtc_mask(crtc)) + return true; + } + + return false; +} + static int drm_pick_crtcs(struct drm_fb_helper *fb_helper, struct drm_fb_helper_crtc **best_crtcs, struct drm_display_mode **modes, @@ -2331,7 +2371,6 @@ static int drm_pick_crtcs(struct drm_fb_helper *fb_helper, int c, o; struct drm_connector *connector; const struct drm_connector_helper_funcs *connector_funcs; - struct drm_encoder *encoder; int my_score, best_score, score; struct drm_fb_helper_crtc **crtcs, *crtc; struct drm_fb_helper_connector *fb_helper_conn; @@ -2363,27 +2402,14 @@ static int drm_pick_crtcs(struct drm_fb_helper *fb_helper, connector_funcs = connector->helper_private; /* - * If the DRM device implements atomic hooks and ->best_encoder() is - * NULL we fallback to the default drm_atomic_helper_best_encoder() - * helper. - */ - if (drm_drv_uses_atomic_modeset(fb_helper->dev) && - !connector_funcs->best_encoder) - encoder = drm_atomic_helper_best_encoder(connector); - else - encoder = connector_funcs->best_encoder(connector); - - if (!encoder) - goto out; - - /* * select a crtc for this connector and then attempt to configure * remaining connectors */ for (c = 0; c < fb_helper->crtc_count; c++) { crtc = &fb_helper->crtc_info[c]; - if ((encoder->possible_crtcs & (1 << c)) == 0) + if (!connector_has_possible_crtc(connector, + crtc->mode_set.crtc)) continue; for (o = 0; o < n; o++) @@ -2410,7 +2436,7 @@ static int drm_pick_crtcs(struct drm_fb_helper *fb_helper, sizeof(struct drm_fb_helper_crtc *)); } } -out: + kfree(crtcs); return best_score; } @@ -2921,6 +2947,294 @@ void drm_fb_helper_output_poll_changed(struct drm_device *dev) } EXPORT_SYMBOL(drm_fb_helper_output_poll_changed); +/* @user: 1=userspace, 0=fbcon */ +static int drm_fbdev_fb_open(struct fb_info *info, int user) +{ + struct drm_fb_helper *fb_helper = info->par; + + if (!try_module_get(fb_helper->dev->driver->fops->owner)) + return -ENODEV; + + return 0; +} + +static int drm_fbdev_fb_release(struct fb_info *info, int user) +{ + struct drm_fb_helper *fb_helper = info->par; + + module_put(fb_helper->dev->driver->fops->owner); + + return 0; +} + +/* + * fb_ops.fb_destroy is called by the last put_fb_info() call at the end of + * unregister_framebuffer() or fb_release(). + */ +static void drm_fbdev_fb_destroy(struct fb_info *info) +{ + struct drm_fb_helper *fb_helper = info->par; + struct fb_info *fbi = fb_helper->fbdev; + struct fb_ops *fbops = NULL; + void *shadow = NULL; + + if (fbi->fbdefio) { + fb_deferred_io_cleanup(fbi); + shadow = fbi->screen_buffer; + fbops = fbi->fbops; + } + + drm_fb_helper_fini(fb_helper); + + if (shadow) { + vfree(shadow); + kfree(fbops); + } + + drm_client_framebuffer_delete(fb_helper->buffer); + /* + * FIXME: + * Remove conditional when all CMA drivers have been moved over to using + * drm_fbdev_generic_setup(). + */ + if (fb_helper->client.funcs) { + drm_client_release(&fb_helper->client); + kfree(fb_helper); + } +} + +static int drm_fbdev_fb_mmap(struct fb_info *info, struct vm_area_struct *vma) +{ + struct drm_fb_helper *fb_helper = info->par; + + if (fb_helper->dev->driver->gem_prime_mmap) + return fb_helper->dev->driver->gem_prime_mmap(fb_helper->buffer->gem, vma); + else + return -ENODEV; +} + +static struct fb_ops drm_fbdev_fb_ops = { + .owner = THIS_MODULE, + DRM_FB_HELPER_DEFAULT_OPS, + .fb_open = drm_fbdev_fb_open, + .fb_release = drm_fbdev_fb_release, + .fb_destroy = drm_fbdev_fb_destroy, + .fb_mmap = drm_fbdev_fb_mmap, + .fb_read = drm_fb_helper_sys_read, + .fb_write = drm_fb_helper_sys_write, + .fb_fillrect = drm_fb_helper_sys_fillrect, + .fb_copyarea = drm_fb_helper_sys_copyarea, + .fb_imageblit = drm_fb_helper_sys_imageblit, +}; + +static struct fb_deferred_io drm_fbdev_defio = { + .delay = HZ / 20, + .deferred_io = drm_fb_helper_deferred_io, +}; + +/** + * drm_fb_helper_generic_probe - Generic fbdev emulation probe helper + * @fb_helper: fbdev helper structure + * @sizes: describes fbdev size and scanout surface size + * + * This function uses the client API to crate a framebuffer backed by a dumb buffer. + * + * The _sys_ versions are used for &fb_ops.fb_read, fb_write, fb_fillrect, + * fb_copyarea, fb_imageblit. + * + * Returns: + * Zero on success or negative error code on failure. + */ +int drm_fb_helper_generic_probe(struct drm_fb_helper *fb_helper, + struct drm_fb_helper_surface_size *sizes) +{ + struct drm_client_dev *client = &fb_helper->client; + struct drm_client_buffer *buffer; + struct drm_framebuffer *fb; + struct fb_info *fbi; + u32 format; + int ret; + + DRM_DEBUG_KMS("surface width(%d), height(%d) and bpp(%d)\n", + sizes->surface_width, sizes->surface_height, + sizes->surface_bpp); + + format = drm_mode_legacy_fb_format(sizes->surface_bpp, sizes->surface_depth); + buffer = drm_client_framebuffer_create(client, sizes->surface_width, + sizes->surface_height, format); + if (IS_ERR(buffer)) + return PTR_ERR(buffer); + + fb_helper->buffer = buffer; + fb_helper->fb = buffer->fb; + fb = buffer->fb; + + fbi = drm_fb_helper_alloc_fbi(fb_helper); + if (IS_ERR(fbi)) { + ret = PTR_ERR(fbi); + goto err_free_buffer; + } + + fbi->par = fb_helper; + fbi->fbops = &drm_fbdev_fb_ops; + fbi->screen_size = fb->height * fb->pitches[0]; + fbi->fix.smem_len = fbi->screen_size; + fbi->screen_buffer = buffer->vaddr; + strcpy(fbi->fix.id, "DRM emulated"); + + drm_fb_helper_fill_fix(fbi, fb->pitches[0], fb->format->depth); + drm_fb_helper_fill_var(fbi, fb_helper, sizes->fb_width, sizes->fb_height); + + if (fb->funcs->dirty) { + struct fb_ops *fbops; + void *shadow; + + /* + * fb_deferred_io_cleanup() clears &fbops->fb_mmap so a per + * instance version is necessary. + */ + fbops = kzalloc(sizeof(*fbops), GFP_KERNEL); + shadow = vzalloc(fbi->screen_size); + if (!fbops || !shadow) { + kfree(fbops); + vfree(shadow); + ret = -ENOMEM; + goto err_fb_info_destroy; + } + + *fbops = *fbi->fbops; + fbi->fbops = fbops; + fbi->screen_buffer = shadow; + fbi->fbdefio = &drm_fbdev_defio; + + fb_deferred_io_init(fbi); + } + + return 0; + +err_fb_info_destroy: + drm_fb_helper_fini(fb_helper); +err_free_buffer: + drm_client_framebuffer_delete(buffer); + + return ret; +} +EXPORT_SYMBOL(drm_fb_helper_generic_probe); + +static const struct drm_fb_helper_funcs drm_fb_helper_generic_funcs = { + .fb_probe = drm_fb_helper_generic_probe, +}; + +static void drm_fbdev_client_unregister(struct drm_client_dev *client) +{ + struct drm_fb_helper *fb_helper = drm_fb_helper_from_client(client); + + if (fb_helper->fbdev) { + drm_fb_helper_unregister_fbi(fb_helper); + /* drm_fbdev_fb_destroy() takes care of cleanup */ + return; + } + + /* Did drm_fb_helper_fbdev_setup() run? */ + if (fb_helper->dev) + drm_fb_helper_fini(fb_helper); + + drm_client_release(client); + kfree(fb_helper); +} + +static int drm_fbdev_client_restore(struct drm_client_dev *client) +{ + struct drm_fb_helper *fb_helper = drm_fb_helper_from_client(client); + + drm_fb_helper_restore_fbdev_mode_unlocked(fb_helper); + + return 0; +} + +static int drm_fbdev_client_hotplug(struct drm_client_dev *client) +{ + struct drm_fb_helper *fb_helper = drm_fb_helper_from_client(client); + struct drm_device *dev = client->dev; + int ret; + + /* If drm_fb_helper_fbdev_setup() failed, we only try once */ + if (!fb_helper->dev && fb_helper->funcs) + return 0; + + if (dev->fb_helper) + return drm_fb_helper_hotplug_event(dev->fb_helper); + + if (!dev->mode_config.num_connector) + return 0; + + ret = drm_fb_helper_fbdev_setup(dev, fb_helper, &drm_fb_helper_generic_funcs, + fb_helper->preferred_bpp, 0); + if (ret) { + fb_helper->dev = NULL; + fb_helper->fbdev = NULL; + return ret; + } + + return 0; +} + +static const struct drm_client_funcs drm_fbdev_client_funcs = { + .owner = THIS_MODULE, + .unregister = drm_fbdev_client_unregister, + .restore = drm_fbdev_client_restore, + .hotplug = drm_fbdev_client_hotplug, +}; + +/** + * drm_fb_helper_generic_fbdev_setup() - Setup generic fbdev emulation + * @dev: DRM device + * @preferred_bpp: Preferred bits per pixel for the device. + * @dev->mode_config.preferred_depth is used if this is zero. + * + * This function sets up generic fbdev emulation for drivers that supports + * dumb buffers with a virtual address and that can be mmap'ed. + * + * Restore, hotplug events and teardown are all taken care of. Drivers that do + * suspend/resume need to call drm_fb_helper_set_suspend_unlocked() themselves. + * Simple drivers might use drm_mode_config_helper_suspend(). + * + * Drivers that set the dirty callback on their framebuffer will get a shadow + * fbdev buffer that is blitted onto the real buffer. This is done in order to + * make deferred I/O work with all kinds of buffers. + * + * This function is safe to call even when there are no connectors present. + * Setup will be retried on the next hotplug event. + * + * Returns: + * Zero on success or negative error code on failure. + */ +int drm_fbdev_generic_setup(struct drm_device *dev, unsigned int preferred_bpp) +{ + struct drm_fb_helper *fb_helper; + int ret; + + if (!drm_fbdev_emulation) + return 0; + + fb_helper = kzalloc(sizeof(*fb_helper), GFP_KERNEL); + if (!fb_helper) + return -ENOMEM; + + ret = drm_client_new(dev, &fb_helper->client, "fbdev", &drm_fbdev_client_funcs); + if (ret) { + kfree(fb_helper); + return ret; + } + + fb_helper->preferred_bpp = preferred_bpp; + + drm_fbdev_client_hotplug(&fb_helper->client); + + return 0; +} +EXPORT_SYMBOL(drm_fbdev_generic_setup); + /* The Kconfig DRM_KMS_HELPER selects FRAMEBUFFER_CONSOLE (if !EXPERT) * but the module doesn't depend on any fb console symbols. At least * attempt to load fbcon to avoid leaving the system without a usable console. diff --git a/drivers/gpu/drm/drm_file.c b/drivers/gpu/drm/drm_file.c index 66bb403dc8ab..ffa8dc35515f 100644 --- a/drivers/gpu/drm/drm_file.c +++ b/drivers/gpu/drm/drm_file.c @@ -35,6 +35,7 @@ #include <linux/slab.h> #include <linux/module.h> +#include <drm/drm_client.h> #include <drm/drm_file.h> #include <drm/drmP.h> @@ -444,6 +445,8 @@ void drm_lastclose(struct drm_device * dev) if (drm_core_check_feature(dev, DRIVER_LEGACY)) drm_legacy_dev_reinit(dev); + + drm_client_dev_restore(dev); } /** diff --git a/drivers/gpu/drm/drm_fourcc.c b/drivers/gpu/drm/drm_fourcc.c index 5ca6395cd4d3..35c1e2742c27 100644 --- a/drivers/gpu/drm/drm_fourcc.c +++ b/drivers/gpu/drm/drm_fourcc.c @@ -152,27 +152,27 @@ const struct drm_format_info *__drm_format_info(u32 format) { .format = DRM_FORMAT_XBGR8888_A8, .depth = 32, .num_planes = 2, .cpp = { 4, 1, 0 }, .hsub = 1, .vsub = 1, .has_alpha = true }, { .format = DRM_FORMAT_RGBX8888_A8, .depth = 32, .num_planes = 2, .cpp = { 4, 1, 0 }, .hsub = 1, .vsub = 1, .has_alpha = true }, { .format = DRM_FORMAT_BGRX8888_A8, .depth = 32, .num_planes = 2, .cpp = { 4, 1, 0 }, .hsub = 1, .vsub = 1, .has_alpha = true }, - { .format = DRM_FORMAT_YUV410, .depth = 0, .num_planes = 3, .cpp = { 1, 1, 1 }, .hsub = 4, .vsub = 4 }, - { .format = DRM_FORMAT_YVU410, .depth = 0, .num_planes = 3, .cpp = { 1, 1, 1 }, .hsub = 4, .vsub = 4 }, - { .format = DRM_FORMAT_YUV411, .depth = 0, .num_planes = 3, .cpp = { 1, 1, 1 }, .hsub = 4, .vsub = 1 }, - { .format = DRM_FORMAT_YVU411, .depth = 0, .num_planes = 3, .cpp = { 1, 1, 1 }, .hsub = 4, .vsub = 1 }, - { .format = DRM_FORMAT_YUV420, .depth = 0, .num_planes = 3, .cpp = { 1, 1, 1 }, .hsub = 2, .vsub = 2 }, - { .format = DRM_FORMAT_YVU420, .depth = 0, .num_planes = 3, .cpp = { 1, 1, 1 }, .hsub = 2, .vsub = 2 }, - { .format = DRM_FORMAT_YUV422, .depth = 0, .num_planes = 3, .cpp = { 1, 1, 1 }, .hsub = 2, .vsub = 1 }, - { .format = DRM_FORMAT_YVU422, .depth = 0, .num_planes = 3, .cpp = { 1, 1, 1 }, .hsub = 2, .vsub = 1 }, - { .format = DRM_FORMAT_YUV444, .depth = 0, .num_planes = 3, .cpp = { 1, 1, 1 }, .hsub = 1, .vsub = 1 }, - { .format = DRM_FORMAT_YVU444, .depth = 0, .num_planes = 3, .cpp = { 1, 1, 1 }, .hsub = 1, .vsub = 1 }, - { .format = DRM_FORMAT_NV12, .depth = 0, .num_planes = 2, .cpp = { 1, 2, 0 }, .hsub = 2, .vsub = 2 }, - { .format = DRM_FORMAT_NV21, .depth = 0, .num_planes = 2, .cpp = { 1, 2, 0 }, .hsub = 2, .vsub = 2 }, - { .format = DRM_FORMAT_NV16, .depth = 0, .num_planes = 2, .cpp = { 1, 2, 0 }, .hsub = 2, .vsub = 1 }, - { .format = DRM_FORMAT_NV61, .depth = 0, .num_planes = 2, .cpp = { 1, 2, 0 }, .hsub = 2, .vsub = 1 }, - { .format = DRM_FORMAT_NV24, .depth = 0, .num_planes = 2, .cpp = { 1, 2, 0 }, .hsub = 1, .vsub = 1 }, - { .format = DRM_FORMAT_NV42, .depth = 0, .num_planes = 2, .cpp = { 1, 2, 0 }, .hsub = 1, .vsub = 1 }, - { .format = DRM_FORMAT_YUYV, .depth = 0, .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 2, .vsub = 1 }, - { .format = DRM_FORMAT_YVYU, .depth = 0, .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 2, .vsub = 1 }, - { .format = DRM_FORMAT_UYVY, .depth = 0, .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 2, .vsub = 1 }, - { .format = DRM_FORMAT_VYUY, .depth = 0, .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 2, .vsub = 1 }, - { .format = DRM_FORMAT_AYUV, .depth = 0, .num_planes = 1, .cpp = { 4, 0, 0 }, .hsub = 1, .vsub = 1, .has_alpha = true }, + { .format = DRM_FORMAT_YUV410, .depth = 0, .num_planes = 3, .cpp = { 1, 1, 1 }, .hsub = 4, .vsub = 4, .is_yuv = true }, + { .format = DRM_FORMAT_YVU410, .depth = 0, .num_planes = 3, .cpp = { 1, 1, 1 }, .hsub = 4, .vsub = 4, .is_yuv = true }, + { .format = DRM_FORMAT_YUV411, .depth = 0, .num_planes = 3, .cpp = { 1, 1, 1 }, .hsub = 4, .vsub = 1, .is_yuv = true }, + { .format = DRM_FORMAT_YVU411, .depth = 0, .num_planes = 3, .cpp = { 1, 1, 1 }, .hsub = 4, .vsub = 1, .is_yuv = true }, + { .format = DRM_FORMAT_YUV420, .depth = 0, .num_planes = 3, .cpp = { 1, 1, 1 }, .hsub = 2, .vsub = 2, .is_yuv = true }, + { .format = DRM_FORMAT_YVU420, .depth = 0, .num_planes = 3, .cpp = { 1, 1, 1 }, .hsub = 2, .vsub = 2, .is_yuv = true }, + { .format = DRM_FORMAT_YUV422, .depth = 0, .num_planes = 3, .cpp = { 1, 1, 1 }, .hsub = 2, .vsub = 1, .is_yuv = true }, + { .format = DRM_FORMAT_YVU422, .depth = 0, .num_planes = 3, .cpp = { 1, 1, 1 }, .hsub = 2, .vsub = 1, .is_yuv = true }, + { .format = DRM_FORMAT_YUV444, .depth = 0, .num_planes = 3, .cpp = { 1, 1, 1 }, .hsub = 1, .vsub = 1, .is_yuv = true }, + { .format = DRM_FORMAT_YVU444, .depth = 0, .num_planes = 3, .cpp = { 1, 1, 1 }, .hsub = 1, .vsub = 1, .is_yuv = true }, + { .format = DRM_FORMAT_NV12, .depth = 0, .num_planes = 2, .cpp = { 1, 2, 0 }, .hsub = 2, .vsub = 2, .is_yuv = true }, + { .format = DRM_FORMAT_NV21, .depth = 0, .num_planes = 2, .cpp = { 1, 2, 0 }, .hsub = 2, .vsub = 2, .is_yuv = true }, + { .format = DRM_FORMAT_NV16, .depth = 0, .num_planes = 2, .cpp = { 1, 2, 0 }, .hsub = 2, .vsub = 1, .is_yuv = true }, + { .format = DRM_FORMAT_NV61, .depth = 0, .num_planes = 2, .cpp = { 1, 2, 0 }, .hsub = 2, .vsub = 1, .is_yuv = true }, + { .format = DRM_FORMAT_NV24, .depth = 0, .num_planes = 2, .cpp = { 1, 2, 0 }, .hsub = 1, .vsub = 1, .is_yuv = true }, + { .format = DRM_FORMAT_NV42, .depth = 0, .num_planes = 2, .cpp = { 1, 2, 0 }, .hsub = 1, .vsub = 1, .is_yuv = true }, + { .format = DRM_FORMAT_YUYV, .depth = 0, .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 2, .vsub = 1, .is_yuv = true }, + { .format = DRM_FORMAT_YVYU, .depth = 0, .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 2, .vsub = 1, .is_yuv = true }, + { .format = DRM_FORMAT_UYVY, .depth = 0, .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 2, .vsub = 1, .is_yuv = true }, + { .format = DRM_FORMAT_VYUY, .depth = 0, .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 2, .vsub = 1, .is_yuv = true }, + { .format = DRM_FORMAT_AYUV, .depth = 0, .num_planes = 1, .cpp = { 4, 0, 0 }, .hsub = 1, .vsub = 1, .has_alpha = true, .is_yuv = true }, }; unsigned int i; diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c index 4a16d7b26c89..bf90625df3c5 100644 --- a/drivers/gpu/drm/drm_gem.c +++ b/drivers/gpu/drm/drm_gem.c @@ -1036,6 +1036,15 @@ int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma) return -EACCES; } + if (node->readonly) { + if (vma->vm_flags & VM_WRITE) { + drm_gem_object_put_unlocked(obj); + return -EINVAL; + } + + vma->vm_flags &= ~VM_MAYWRITE; + } + ret = drm_gem_mmap_obj(obj, drm_vma_node_size(node) << PAGE_SHIFT, vma); diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c index 3c125041a597..ea10e9a26aad 100644 --- a/drivers/gpu/drm/drm_ioctl.c +++ b/drivers/gpu/drm/drm_ioctl.c @@ -641,7 +641,7 @@ static const struct drm_ioctl_desc drm_ioctls[] = { DRM_IOCTL_DEF(DRM_IOCTL_MODE_ATTACHMODE, drm_noop, DRM_MASTER|DRM_UNLOCKED), DRM_IOCTL_DEF(DRM_IOCTL_MODE_DETACHMODE, drm_noop, DRM_MASTER|DRM_UNLOCKED), DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPROPERTY, drm_mode_getproperty_ioctl, DRM_UNLOCKED), - DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETPROPERTY, drm_mode_connector_property_set_ioctl, DRM_MASTER|DRM_UNLOCKED), + DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETPROPERTY, drm_connector_property_set_ioctl, DRM_MASTER|DRM_UNLOCKED), DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPROPBLOB, drm_mode_getblob_ioctl, DRM_UNLOCKED), DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETFB, drm_mode_getfb, DRM_UNLOCKED), DRM_IOCTL_DEF(DRM_IOCTL_MODE_ADDFB, drm_mode_addfb_ioctl, DRM_UNLOCKED), diff --git a/drivers/gpu/drm/drm_mode_object.c b/drivers/gpu/drm/drm_mode_object.c index ce4d2fb32810..fcb0ab0abb75 100644 --- a/drivers/gpu/drm/drm_mode_object.c +++ b/drivers/gpu/drm/drm_mode_object.c @@ -433,8 +433,7 @@ static int set_property_legacy(struct drm_mode_object *obj, drm_modeset_lock_all(dev); switch (obj->type) { case DRM_MODE_OBJECT_CONNECTOR: - ret = drm_mode_connector_set_obj_prop(obj, prop, - prop_value); + ret = drm_connector_set_obj_prop(obj, prop, prop_value); break; case DRM_MODE_OBJECT_CRTC: ret = drm_mode_crtc_set_obj_prop(obj, prop, prop_value); diff --git a/drivers/gpu/drm/drm_modes.c b/drivers/gpu/drm/drm_modes.c index 7f552d5fa88e..02db9ac82d7a 100644 --- a/drivers/gpu/drm/drm_modes.c +++ b/drivers/gpu/drm/drm_modes.c @@ -659,10 +659,12 @@ EXPORT_SYMBOL_GPL(drm_display_mode_to_videomode); * drm_bus_flags_from_videomode - extract information about pixelclk and * DE polarity from videomode and store it in a separate variable * @vm: videomode structure to use - * @bus_flags: information about pixelclk and DE polarity will be stored here + * @bus_flags: information about pixelclk, sync and DE polarity will be stored + * here * - * Sets DRM_BUS_FLAG_DE_(LOW|HIGH) and DRM_BUS_FLAG_PIXDATA_(POS|NEG)EDGE - * in @bus_flags according to DISPLAY_FLAGS found in @vm + * Sets DRM_BUS_FLAG_DE_(LOW|HIGH), DRM_BUS_FLAG_PIXDATA_(POS|NEG)EDGE and + * DISPLAY_FLAGS_SYNC_(POS|NEG)EDGE in @bus_flags according to DISPLAY_FLAGS + * found in @vm */ void drm_bus_flags_from_videomode(const struct videomode *vm, u32 *bus_flags) { @@ -672,6 +674,11 @@ void drm_bus_flags_from_videomode(const struct videomode *vm, u32 *bus_flags) if (vm->flags & DISPLAY_FLAGS_PIXDATA_NEGEDGE) *bus_flags |= DRM_BUS_FLAG_PIXDATA_NEGEDGE; + if (vm->flags & DISPLAY_FLAGS_SYNC_POSEDGE) + *bus_flags |= DRM_BUS_FLAG_SYNC_POSEDGE; + if (vm->flags & DISPLAY_FLAGS_SYNC_NEGEDGE) + *bus_flags |= DRM_BUS_FLAG_SYNC_NEGEDGE; + if (vm->flags & DISPLAY_FLAGS_DE_LOW) *bus_flags |= DRM_BUS_FLAG_DE_LOW; if (vm->flags & DISPLAY_FLAGS_DE_HIGH) @@ -684,7 +691,7 @@ EXPORT_SYMBOL_GPL(drm_bus_flags_from_videomode); * of_get_drm_display_mode - get a drm_display_mode from devicetree * @np: device_node with the timing specification * @dmode: will be set to the return value - * @bus_flags: information about pixelclk and DE polarity + * @bus_flags: information about pixelclk, sync and DE polarity * @index: index into the list of display timings in devicetree * * This function is expensive and should only be used, if only one mode is to be @@ -1346,7 +1353,7 @@ void drm_mode_sort(struct list_head *mode_list) EXPORT_SYMBOL(drm_mode_sort); /** - * drm_mode_connector_list_update - update the mode list for the connector + * drm_connector_list_update - update the mode list for the connector * @connector: the connector to update * * This moves the modes from the @connector probed_modes list @@ -1356,7 +1363,7 @@ EXPORT_SYMBOL(drm_mode_sort); * This is just a helper functions doesn't validate any modes itself and also * doesn't prune any invalid modes. Callers need to do that themselves. */ -void drm_mode_connector_list_update(struct drm_connector *connector) +void drm_connector_list_update(struct drm_connector *connector) { struct drm_display_mode *pmode, *pt; @@ -1405,7 +1412,7 @@ void drm_mode_connector_list_update(struct drm_connector *connector) } } } -EXPORT_SYMBOL(drm_mode_connector_list_update); +EXPORT_SYMBOL(drm_connector_list_update); /** * drm_mode_parse_command_line_for_connector - parse command line modeline for connector diff --git a/drivers/gpu/drm/drm_of.c b/drivers/gpu/drm/drm_of.c index 3b8c7a6a5720..2763a5ec845b 100644 --- a/drivers/gpu/drm/drm_of.c +++ b/drivers/gpu/drm/drm_of.c @@ -9,6 +9,13 @@ #include <drm/drm_panel.h> #include <drm/drm_of.h> +/** + * DOC: overview + * + * A set of helper functions to aid DRM drivers in parsing standard DT + * properties. + */ + static void drm_release_of(struct device *dev, void *data) { of_node_put(data); @@ -94,7 +101,7 @@ EXPORT_SYMBOL_GPL(drm_of_component_match_add); * drm_of_component_probe - Generic probe function for a component based master * @dev: master device containing the OF node * @compare_of: compare function used for matching components - * @master_ops: component master ops to be used + * @m_ops: component master ops to be used * * Parse the platform device OF node and bind all the components associated * with the master. Interface ports are added before the encoders in order to @@ -239,10 +246,17 @@ int drm_of_find_panel_or_bridge(const struct device_node *np, if (!remote) return -ENODEV; + if (!of_device_is_available(remote)) { + of_node_put(remote); + return -ENODEV; + } + if (panel) { *panel = of_drm_find_panel(remote); - if (*panel) + if (!IS_ERR(*panel)) ret = 0; + else + *panel = NULL; } /* No panel found yet, check for a bridge next. */ diff --git a/drivers/gpu/drm/drm_panel.c b/drivers/gpu/drm/drm_panel.c index 965530a6f4cd..b902361dee6e 100644 --- a/drivers/gpu/drm/drm_panel.c +++ b/drivers/gpu/drm/drm_panel.c @@ -151,12 +151,19 @@ EXPORT_SYMBOL(drm_panel_detach); * tree node. If a matching panel is found, return a pointer to it. * * Return: A pointer to the panel registered for the specified device tree - * node or NULL if no panel matching the device tree node can be found. + * node or an ERR_PTR() if no panel matching the device tree node can be found. + * Possible error codes returned by this function: + * - EPROBE_DEFER: the panel device has not been probed yet, and the caller + * should retry later + * - ENODEV: the device is not available (status != "okay" or "ok") */ struct drm_panel *of_drm_find_panel(const struct device_node *np) { struct drm_panel *panel; + if (!of_device_is_available(np)) + return ERR_PTR(-ENODEV); + mutex_lock(&panel_lock); list_for_each_entry(panel, &panel_list, list) { @@ -167,7 +174,7 @@ struct drm_panel *of_drm_find_panel(const struct device_node *np) } mutex_unlock(&panel_lock); - return NULL; + return ERR_PTR(-EPROBE_DEFER); } EXPORT_SYMBOL(of_drm_find_panel); #endif diff --git a/drivers/gpu/drm/drm_plane.c b/drivers/gpu/drm/drm_plane.c index df0b4ebbedbf..6153cbda239f 100644 --- a/drivers/gpu/drm/drm_plane.c +++ b/drivers/gpu/drm/drm_plane.c @@ -583,6 +583,52 @@ int drm_plane_check_pixel_format(struct drm_plane *plane, return 0; } +static int __setplane_check(struct drm_plane *plane, + struct drm_crtc *crtc, + struct drm_framebuffer *fb, + int32_t crtc_x, int32_t crtc_y, + uint32_t crtc_w, uint32_t crtc_h, + uint32_t src_x, uint32_t src_y, + uint32_t src_w, uint32_t src_h) +{ + int ret; + + /* Check whether this plane is usable on this CRTC */ + if (!(plane->possible_crtcs & drm_crtc_mask(crtc))) { + DRM_DEBUG_KMS("Invalid crtc for plane\n"); + return -EINVAL; + } + + /* Check whether this plane supports the fb pixel format. */ + ret = drm_plane_check_pixel_format(plane, fb->format->format, + fb->modifier); + if (ret) { + struct drm_format_name_buf format_name; + + DRM_DEBUG_KMS("Invalid pixel format %s, modifier 0x%llx\n", + drm_get_format_name(fb->format->format, + &format_name), + fb->modifier); + return ret; + } + + /* Give drivers some help against integer overflows */ + if (crtc_w > INT_MAX || + crtc_x > INT_MAX - (int32_t) crtc_w || + crtc_h > INT_MAX || + crtc_y > INT_MAX - (int32_t) crtc_h) { + DRM_DEBUG_KMS("Invalid CRTC coordinates %ux%u+%d+%d\n", + crtc_w, crtc_h, crtc_x, crtc_y); + return -ERANGE; + } + + ret = drm_framebuffer_check_src_coords(src_x, src_y, src_w, src_h, fb); + if (ret) + return ret; + + return 0; +} + /* * __setplane_internal - setplane handler for internal callers * @@ -603,6 +649,8 @@ static int __setplane_internal(struct drm_plane *plane, { int ret = 0; + WARN_ON(drm_drv_uses_atomic_modeset(plane->dev)); + /* No fb means shut it down */ if (!fb) { plane->old_fb = plane->fb; @@ -616,37 +664,9 @@ static int __setplane_internal(struct drm_plane *plane, goto out; } - /* Check whether this plane is usable on this CRTC */ - if (!(plane->possible_crtcs & drm_crtc_mask(crtc))) { - DRM_DEBUG_KMS("Invalid crtc for plane\n"); - ret = -EINVAL; - goto out; - } - - /* Check whether this plane supports the fb pixel format. */ - ret = drm_plane_check_pixel_format(plane, fb->format->format, - fb->modifier); - if (ret) { - struct drm_format_name_buf format_name; - DRM_DEBUG_KMS("Invalid pixel format %s, modifier 0x%llx\n", - drm_get_format_name(fb->format->format, - &format_name), - fb->modifier); - goto out; - } - - /* Give drivers some help against integer overflows */ - if (crtc_w > INT_MAX || - crtc_x > INT_MAX - (int32_t) crtc_w || - crtc_h > INT_MAX || - crtc_y > INT_MAX - (int32_t) crtc_h) { - DRM_DEBUG_KMS("Invalid CRTC coordinates %ux%u+%d+%d\n", - crtc_w, crtc_h, crtc_x, crtc_y); - ret = -ERANGE; - goto out; - } - - ret = drm_framebuffer_check_src_coords(src_x, src_y, src_w, src_h, fb); + ret = __setplane_check(plane, crtc, fb, + crtc_x, crtc_y, crtc_w, crtc_h, + src_x, src_y, src_w, src_h); if (ret) goto out; @@ -655,11 +675,9 @@ static int __setplane_internal(struct drm_plane *plane, crtc_x, crtc_y, crtc_w, crtc_h, src_x, src_y, src_w, src_h, ctx); if (!ret) { - if (!plane->state) { - plane->crtc = crtc; - plane->fb = fb; - drm_framebuffer_get(plane->fb); - } + plane->crtc = crtc; + plane->fb = fb; + drm_framebuffer_get(plane->fb); } else { plane->old_fb = NULL; } @@ -672,6 +690,41 @@ out: return ret; } +static int __setplane_atomic(struct drm_plane *plane, + struct drm_crtc *crtc, + struct drm_framebuffer *fb, + int32_t crtc_x, int32_t crtc_y, + uint32_t crtc_w, uint32_t crtc_h, + uint32_t src_x, uint32_t src_y, + uint32_t src_w, uint32_t src_h, + struct drm_modeset_acquire_ctx *ctx) +{ + int ret; + + WARN_ON(!drm_drv_uses_atomic_modeset(plane->dev)); + + /* No fb means shut it down */ + if (!fb) + return plane->funcs->disable_plane(plane, ctx); + + /* + * FIXME: This is redundant with drm_atomic_plane_check(), + * but the legacy cursor/"async" .update_plane() tricks + * don't call that so we still need this here. Should remove + * this when all .update_plane() implementations have been + * fixed to call drm_atomic_plane_check(). + */ + ret = __setplane_check(plane, crtc, fb, + crtc_x, crtc_y, crtc_w, crtc_h, + src_x, src_y, src_w, src_h); + if (ret) + return ret; + + return plane->funcs->update_plane(plane, crtc, fb, + crtc_x, crtc_y, crtc_w, crtc_h, + src_x, src_y, src_w, src_h, ctx); +} + static int setplane_internal(struct drm_plane *plane, struct drm_crtc *crtc, struct drm_framebuffer *fb, @@ -689,9 +742,15 @@ retry: ret = drm_modeset_lock_all_ctx(plane->dev, &ctx); if (ret) goto fail; - ret = __setplane_internal(plane, crtc, fb, - crtc_x, crtc_y, crtc_w, crtc_h, - src_x, src_y, src_w, src_h, &ctx); + + if (drm_drv_uses_atomic_modeset(plane->dev)) + ret = __setplane_atomic(plane, crtc, fb, + crtc_x, crtc_y, crtc_w, crtc_h, + src_x, src_y, src_w, src_h, &ctx); + else + ret = __setplane_internal(plane, crtc, fb, + crtc_x, crtc_y, crtc_w, crtc_h, + src_x, src_y, src_w, src_h, &ctx); fail: if (ret == -EDEADLK) { @@ -823,9 +882,14 @@ static int drm_mode_cursor_universal(struct drm_crtc *crtc, src_h = fb->height << 16; } - ret = __setplane_internal(plane, crtc, fb, - crtc_x, crtc_y, crtc_w, crtc_h, - 0, 0, src_w, src_h, ctx); + if (drm_drv_uses_atomic_modeset(dev)) + ret = __setplane_atomic(plane, crtc, fb, + crtc_x, crtc_y, crtc_w, crtc_h, + 0, 0, src_w, src_h, ctx); + else + ret = __setplane_internal(plane, crtc, fb, + crtc_x, crtc_y, crtc_w, crtc_h, + 0, 0, src_w, src_h, ctx); if (fb) drm_framebuffer_put(fb); diff --git a/drivers/gpu/drm/drm_probe_helper.c b/drivers/gpu/drm/drm_probe_helper.c index 527743394150..a1bb157bfdfa 100644 --- a/drivers/gpu/drm/drm_probe_helper.c +++ b/drivers/gpu/drm/drm_probe_helper.c @@ -33,6 +33,7 @@ #include <linux/moduleparam.h> #include <drm/drmP.h> +#include <drm/drm_client.h> #include <drm/drm_crtc.h> #include <drm/drm_fourcc.h> #include <drm/drm_crtc_helper.h> @@ -88,9 +89,9 @@ drm_mode_validate_pipeline(struct drm_display_mode *mode, struct drm_connector *connector) { struct drm_device *dev = connector->dev; - uint32_t *ids = connector->encoder_ids; enum drm_mode_status ret = MODE_OK; - unsigned int i; + struct drm_encoder *encoder; + int i; /* Step 1: Validate against connector */ ret = drm_connector_mode_valid(connector, mode); @@ -98,13 +99,9 @@ drm_mode_validate_pipeline(struct drm_display_mode *mode, return ret; /* Step 2: Validate against encoders and crtcs */ - for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) { - struct drm_encoder *encoder = drm_encoder_find(dev, NULL, ids[i]); + drm_connector_for_each_possible_encoder(connector, encoder, i) { struct drm_crtc *crtc; - if (!encoder) - continue; - ret = drm_encoder_mode_valid(encoder, mode); if (ret != MODE_OK) { /* No point in continuing for crtc check as this encoder @@ -363,7 +360,7 @@ EXPORT_SYMBOL(drm_helper_probe_detect); * using the VESA GTF/CVT formulas. * * 3. Modes are moved from the probed_modes list to the modes list. Potential - * duplicates are merged together (see drm_mode_connector_list_update()). + * duplicates are merged together (see drm_connector_list_update()). * After this step the probed_modes list will be empty again. * * 4. Any non-stale mode on the modes list then undergoes validation @@ -475,7 +472,7 @@ retry: if (connector->status == connector_status_disconnected) { DRM_DEBUG_KMS("[CONNECTOR:%d:%s] disconnected\n", connector->base.id, connector->name); - drm_mode_connector_update_edid_property(connector, NULL); + drm_connector_update_edid_property(connector, NULL); verbose_prune = false; goto prune; } @@ -488,7 +485,7 @@ retry: if (count == 0) goto prune; - drm_mode_connector_list_update(connector); + drm_connector_list_update(connector); if (connector->interlace_allowed) mode_flags |= DRM_MODE_FLAG_INTERLACE; @@ -563,6 +560,8 @@ void drm_kms_helper_hotplug_event(struct drm_device *dev) drm_sysfs_hotplug_event(dev); if (dev->mode_config.funcs->output_poll_changed) dev->mode_config.funcs->output_poll_changed(dev); + + drm_client_dev_hotplug(dev); } EXPORT_SYMBOL(drm_kms_helper_hotplug_event); diff --git a/drivers/gpu/drm/drm_simple_kms_helper.c b/drivers/gpu/drm/drm_simple_kms_helper.c index b72fcf1e9605..51fa978f0d23 100644 --- a/drivers/gpu/drm/drm_simple_kms_helper.c +++ b/drivers/gpu/drm/drm_simple_kms_helper.c @@ -287,7 +287,7 @@ int drm_simple_display_pipe_init(struct drm_device *dev, if (ret || !connector) return ret; - return drm_mode_connector_attach_encoder(connector, encoder); + return drm_connector_attach_encoder(connector, encoder); } EXPORT_SYMBOL(drm_simple_display_pipe_init); diff --git a/drivers/gpu/drm/drm_writeback.c b/drivers/gpu/drm/drm_writeback.c index 827395071f0b..c20e6fe00cb3 100644 --- a/drivers/gpu/drm/drm_writeback.c +++ b/drivers/gpu/drm/drm_writeback.c @@ -22,10 +22,13 @@ * Writeback connectors are used to expose hardware which can write the output * from a CRTC to a memory buffer. They are used and act similarly to other * types of connectors, with some important differences: - * - Writeback connectors don't provide a way to output visually to the user. - * - Writeback connectors should always report as "disconnected" (so that - * clients which don't understand them will ignore them). - * - Writeback connectors don't have EDID. + * + * * Writeback connectors don't provide a way to output visually to the user. + * + * * Writeback connectors are visible to userspace only when the client sets + * DRM_CLIENT_CAP_WRITEBACK_CONNECTORS. + * + * * Writeback connectors don't have EDID. * * A framebuffer may only be attached to a writeback connector when the * connector is attached to a CRTC. The WRITEBACK_FB_ID property which sets the @@ -199,7 +202,7 @@ int drm_writeback_connector_init(struct drm_device *dev, if (ret) goto connector_fail; - ret = drm_mode_connector_attach_encoder(connector, + ret = drm_connector_attach_encoder(connector, &wb_connector->encoder); if (ret) goto attach_fail; diff --git a/drivers/gpu/drm/exynos/exynos_dp.c b/drivers/gpu/drm/exynos/exynos_dp.c index 86330f396784..af7ab1ceb50f 100644 --- a/drivers/gpu/drm/exynos/exynos_dp.c +++ b/drivers/gpu/drm/exynos/exynos_dp.c @@ -232,9 +232,11 @@ static int exynos_dp_probe(struct platform_device *pdev) np = of_parse_phandle(dev->of_node, "panel", 0); if (np) { dp->plat_data.panel = of_drm_find_panel(np); + of_node_put(np); - if (!dp->plat_data.panel) - return -EPROBE_DEFER; + if (IS_ERR(dp->plat_data.panel)) + return PTR_ERR(dp->plat_data.panel); + goto out; } diff --git a/drivers/gpu/drm/exynos/exynos_drm_dpi.c b/drivers/gpu/drm/exynos/exynos_drm_dpi.c index 66945e0dc57f..2f0babb67c51 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_dpi.c +++ b/drivers/gpu/drm/exynos/exynos_drm_dpi.c @@ -113,7 +113,7 @@ static int exynos_dpi_create_connector(struct drm_encoder *encoder) } drm_connector_helper_add(connector, &exynos_dpi_connector_helper_funcs); - drm_mode_connector_attach_encoder(connector, encoder); + drm_connector_attach_encoder(connector, encoder); return 0; } @@ -240,8 +240,8 @@ struct drm_encoder *exynos_dpi_probe(struct device *dev) if (ctx->panel_node) { ctx->panel = of_drm_find_panel(ctx->panel_node); - if (!ctx->panel) - return ERR_PTR(-EPROBE_DEFER); + if (IS_ERR(ctx->panel)) + return ERR_CAST(ctx->panel); } return &ctx->encoder; diff --git a/drivers/gpu/drm/exynos/exynos_drm_dsi.c b/drivers/gpu/drm/exynos/exynos_drm_dsi.c index 6d29777884f9..a1ed6146a3b5 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_dsi.c +++ b/drivers/gpu/drm/exynos/exynos_drm_dsi.c @@ -1479,7 +1479,7 @@ static int exynos_dsi_create_connector(struct drm_encoder *encoder) connector->status = connector_status_disconnected; drm_connector_helper_add(connector, &exynos_dsi_connector_helper_funcs); - drm_mode_connector_attach_encoder(connector, encoder); + drm_connector_attach_encoder(connector, encoder); return 0; } @@ -1519,6 +1519,9 @@ static int exynos_dsi_host_attach(struct mipi_dsi_host *host, dsi->format = device->format; dsi->mode_flags = device->mode_flags; dsi->panel = of_drm_find_panel(device->dev.of_node); + if (IS_ERR(dsi->panel)) + dsi->panel = NULL; + if (dsi->panel) { drm_panel_attach(dsi->panel, &dsi->connector); dsi->connector.status = connector_status_connected; diff --git a/drivers/gpu/drm/exynos/exynos_drm_vidi.c b/drivers/gpu/drm/exynos/exynos_drm_vidi.c index e6b0940b1ac2..19697c1362d8 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_vidi.c +++ b/drivers/gpu/drm/exynos/exynos_drm_vidi.c @@ -319,7 +319,7 @@ static int vidi_get_modes(struct drm_connector *connector) return -ENOMEM; } - drm_mode_connector_update_edid_property(connector, edid); + drm_connector_update_edid_property(connector, edid); return drm_add_edid_modes(connector, edid); } @@ -344,7 +344,7 @@ static int vidi_create_connector(struct drm_encoder *encoder) } drm_connector_helper_add(connector, &vidi_connector_helper_funcs); - drm_mode_connector_attach_encoder(connector, encoder); + drm_connector_attach_encoder(connector, encoder); return 0; } diff --git a/drivers/gpu/drm/exynos/exynos_hdmi.c b/drivers/gpu/drm/exynos/exynos_hdmi.c index db91932550cf..3a11c719a580 100644 --- a/drivers/gpu/drm/exynos/exynos_hdmi.c +++ b/drivers/gpu/drm/exynos/exynos_hdmi.c @@ -888,7 +888,7 @@ static int hdmi_get_modes(struct drm_connector *connector) (hdata->dvi_mode ? "dvi monitor" : "hdmi monitor"), edid->width_cm, edid->height_cm); - drm_mode_connector_update_edid_property(connector, edid); + drm_connector_update_edid_property(connector, edid); cec_notifier_set_phys_addr_from_edid(hdata->notifier, edid); ret = drm_add_edid_modes(connector, edid); @@ -951,7 +951,7 @@ static int hdmi_create_connector(struct drm_encoder *encoder) } drm_connector_helper_add(connector, &hdmi_connector_helper_funcs); - drm_mode_connector_attach_encoder(connector, encoder); + drm_connector_attach_encoder(connector, encoder); if (hdata->bridge) { ret = drm_bridge_attach(encoder, hdata->bridge, NULL); diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c index c54806d08dd7..2298ed2a9e1c 100644 --- a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c +++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c @@ -117,7 +117,7 @@ static int fsl_dcu_attach_panel(struct fsl_dcu_drm_device *fsl_dev, if (ret < 0) goto err_cleanup; - ret = drm_mode_connector_attach_encoder(connector, encoder); + ret = drm_connector_attach_encoder(connector, encoder); if (ret < 0) goto err_sysfs; @@ -148,8 +148,9 @@ int fsl_dcu_create_outputs(struct fsl_dcu_drm_device *fsl_dev) if (panel_node) { fsl_dev->connector.panel = of_drm_find_panel(panel_node); of_node_put(panel_node); - if (!fsl_dev->connector.panel) - return -EPROBE_DEFER; + if (IS_ERR(fsl_dev->connector.panel)) + return PTR_ERR(fsl_dev->connector.panel); + return fsl_dcu_attach_panel(fsl_dev, fsl_dev->connector.panel); } diff --git a/drivers/gpu/drm/gma500/cdv_intel_dp.c b/drivers/gpu/drm/gma500/cdv_intel_dp.c index 5ea785f07ba8..90ed20083009 100644 --- a/drivers/gpu/drm/gma500/cdv_intel_dp.c +++ b/drivers/gpu/drm/gma500/cdv_intel_dp.c @@ -1770,7 +1770,7 @@ static int cdv_intel_dp_get_modes(struct drm_connector *connector) edid = drm_get_edid(connector, &intel_dp->adapter); if (edid) { - drm_mode_connector_update_edid_property(connector, edid); + drm_connector_update_edid_property(connector, edid); ret = drm_add_edid_modes(connector, edid); kfree(edid); } diff --git a/drivers/gpu/drm/gma500/cdv_intel_hdmi.c b/drivers/gpu/drm/gma500/cdv_intel_hdmi.c index f0878998526a..4e4e4a66eaee 100644 --- a/drivers/gpu/drm/gma500/cdv_intel_hdmi.c +++ b/drivers/gpu/drm/gma500/cdv_intel_hdmi.c @@ -216,7 +216,7 @@ static int cdv_hdmi_get_modes(struct drm_connector *connector) edid = drm_get_edid(connector, &gma_encoder->i2c_bus->adapter); if (edid) { - drm_mode_connector_update_edid_property(connector, edid); + drm_connector_update_edid_property(connector, edid); ret = drm_add_edid_modes(connector, edid); kfree(edid); } diff --git a/drivers/gpu/drm/gma500/gma_display.c b/drivers/gpu/drm/gma500/gma_display.c index 08f17f85b801..09c1161a7ac6 100644 --- a/drivers/gpu/drm/gma500/gma_display.c +++ b/drivers/gpu/drm/gma500/gma_display.c @@ -665,7 +665,7 @@ void gma_connector_attach_encoder(struct gma_connector *connector, struct gma_encoder *encoder) { connector->encoder = encoder; - drm_mode_connector_attach_encoder(&connector->base, + drm_connector_attach_encoder(&connector->base, &encoder->base); } diff --git a/drivers/gpu/drm/gma500/intel_bios.h b/drivers/gpu/drm/gma500/intel_bios.h index 978ae4b25e82..e0ccf1d19a4d 100644 --- a/drivers/gpu/drm/gma500/intel_bios.h +++ b/drivers/gpu/drm/gma500/intel_bios.h @@ -34,7 +34,7 @@ struct vbt_header { u8 reserved0; u32 bdb_offset; /**< from beginning of VBT */ u32 aim_offset[4]; /**< from beginning of VBT */ -} __attribute__((packed)); +} __packed; struct bdb_header { @@ -61,7 +61,7 @@ struct vbios_data { u8 rsvd4; /* popup memory size */ u8 resize_pci_bios; u8 rsvd5; /* is crt already on ddc2 */ -} __attribute__((packed)); +} __packed; /* * There are several types of BIOS data blocks (BDBs), each block has @@ -133,7 +133,7 @@ struct bdb_general_features { u8 dp_ssc_enb:1; /* PCH attached eDP supports SSC */ u8 dp_ssc_freq:1; /* SSC freq for PCH attached eDP */ u8 rsvd11:3; /* finish byte */ -} __attribute__((packed)); +} __packed; /* pre-915 */ #define GPIO_PIN_DVI_LVDS 0x03 /* "DVI/LVDS DDC GPIO pins" */ @@ -213,7 +213,7 @@ struct child_device_config { u8 dvo2_wiring; u16 extended_type; u8 dvo_function; -} __attribute__((packed)); +} __packed; struct bdb_general_definitions { @@ -256,7 +256,7 @@ struct bdb_lvds_options { u8 lvds_edid:1; u8 rsvd2:1; u8 rsvd4; -} __attribute__((packed)); +} __packed; struct bdb_lvds_backlight { u8 type:2; @@ -268,7 +268,7 @@ struct bdb_lvds_backlight { u8 i2caddr; u8 brightnesscmd; /*FIXME: more...*/ -} __attribute__((packed)); +} __packed; /* LFP pointer table contains entries to the struct below */ struct bdb_lvds_lfp_data_ptr { @@ -278,12 +278,12 @@ struct bdb_lvds_lfp_data_ptr { u8 dvo_table_size; u16 panel_pnp_id_offset; u8 pnp_table_size; -} __attribute__((packed)); +} __packed; struct bdb_lvds_lfp_data_ptrs { u8 lvds_entries; /* followed by one or more lvds_data_ptr structs */ struct bdb_lvds_lfp_data_ptr ptr[16]; -} __attribute__((packed)); +} __packed; /* LFP data has 3 blocks per entry */ struct lvds_fp_timing { @@ -300,7 +300,7 @@ struct lvds_fp_timing { u32 pfit_reg; u32 pfit_reg_val; u16 terminator; -} __attribute__((packed)); +} __packed; struct lvds_dvo_timing { u16 clock; /**< In 10khz */ @@ -328,7 +328,7 @@ struct lvds_dvo_timing { u8 vsync_positive:1; u8 hsync_positive:1; u8 rsvd2:1; -} __attribute__((packed)); +} __packed; struct lvds_pnp_id { u16 mfg_name; @@ -336,17 +336,17 @@ struct lvds_pnp_id { u32 serial; u8 mfg_week; u8 mfg_year; -} __attribute__((packed)); +} __packed; struct bdb_lvds_lfp_data_entry { struct lvds_fp_timing fp_timing; struct lvds_dvo_timing dvo_timing; struct lvds_pnp_id pnp_id; -} __attribute__((packed)); +} __packed; struct bdb_lvds_lfp_data { struct bdb_lvds_lfp_data_entry data[16]; -} __attribute__((packed)); +} __packed; struct aimdb_header { char signature[16]; @@ -354,12 +354,12 @@ struct aimdb_header { u16 aimdb_version; u16 aimdb_header_size; u16 aimdb_size; -} __attribute__((packed)); +} __packed; struct aimdb_block { u8 aimdb_id; u16 aimdb_size; -} __attribute__((packed)); +} __packed; struct vch_panel_data { u16 fp_timing_offset; @@ -370,12 +370,12 @@ struct vch_panel_data { u8 text_fitting_size; u16 graphics_fitting_offset; u8 graphics_fitting_size; -} __attribute__((packed)); +} __packed; struct vch_bdb_22 { struct aimdb_block aimdb_block; struct vch_panel_data panels[16]; -} __attribute__((packed)); +} __packed; struct bdb_sdvo_lvds_options { u8 panel_backlight; @@ -391,7 +391,7 @@ struct bdb_sdvo_lvds_options { u8 panel_misc_bits_2; u8 panel_misc_bits_3; u8 panel_misc_bits_4; -} __attribute__((packed)); +} __packed; #define BDB_DRIVER_FEATURE_NO_LVDS 0 #define BDB_DRIVER_FEATURE_INT_LVDS 1 @@ -436,7 +436,7 @@ struct bdb_driver_features { u8 hdmi_termination; u8 custom_vbt_version; -} __attribute__((packed)); +} __packed; #define EDP_18BPP 0 #define EDP_24BPP 1 diff --git a/drivers/gpu/drm/gma500/mdfld_dsi_dpi.c b/drivers/gpu/drm/gma500/mdfld_dsi_dpi.c index a05c020602bd..d0bf5a1e94e8 100644 --- a/drivers/gpu/drm/gma500/mdfld_dsi_dpi.c +++ b/drivers/gpu/drm/gma500/mdfld_dsi_dpi.c @@ -999,7 +999,7 @@ struct mdfld_dsi_encoder *mdfld_dsi_dpi_init(struct drm_device *dev, p_funcs->encoder_helper_funcs); /*attach to given connector*/ - drm_mode_connector_attach_encoder(connector, encoder); + drm_connector_attach_encoder(connector, encoder); /*set possible crtcs and clones*/ if (dsi_connector->pipe) { diff --git a/drivers/gpu/drm/gma500/oaktrail_hdmi.c b/drivers/gpu/drm/gma500/oaktrail_hdmi.c index 78566a80ad25..c6d72de1c054 100644 --- a/drivers/gpu/drm/gma500/oaktrail_hdmi.c +++ b/drivers/gpu/drm/gma500/oaktrail_hdmi.c @@ -578,7 +578,7 @@ static int oaktrail_hdmi_get_modes(struct drm_connector *connector) } if (edid) { - drm_mode_connector_update_edid_property(connector, edid); + drm_connector_update_edid_property(connector, edid); ret = drm_add_edid_modes(connector, edid); } return ret; diff --git a/drivers/gpu/drm/gma500/oaktrail_lvds.c b/drivers/gpu/drm/gma500/oaktrail_lvds.c index e6943fef0611..83babb815a5d 100644 --- a/drivers/gpu/drm/gma500/oaktrail_lvds.c +++ b/drivers/gpu/drm/gma500/oaktrail_lvds.c @@ -376,7 +376,7 @@ void oaktrail_lvds_init(struct drm_device *dev, * preferred mode is the right one. */ if (edid) { - drm_mode_connector_update_edid_property(connector, edid); + drm_connector_update_edid_property(connector, edid); drm_add_edid_modes(connector, edid); kfree(edid); diff --git a/drivers/gpu/drm/gma500/psb_intel_modes.c b/drivers/gpu/drm/gma500/psb_intel_modes.c index e5360726d80b..fb4da3cd6681 100644 --- a/drivers/gpu/drm/gma500/psb_intel_modes.c +++ b/drivers/gpu/drm/gma500/psb_intel_modes.c @@ -66,7 +66,7 @@ int psb_intel_ddc_get_modes(struct drm_connector *connector, edid = drm_get_edid(connector, adapter); if (edid) { - drm_mode_connector_update_edid_property(connector, edid); + drm_connector_update_edid_property(connector, edid); ret = drm_add_edid_modes(connector, edid); kfree(edid); } diff --git a/drivers/gpu/drm/gma500/psb_intel_sdvo.c b/drivers/gpu/drm/gma500/psb_intel_sdvo.c index 1d40746ab625..dd3cec0e3190 100644 --- a/drivers/gpu/drm/gma500/psb_intel_sdvo.c +++ b/drivers/gpu/drm/gma500/psb_intel_sdvo.c @@ -1472,7 +1472,7 @@ static void psb_intel_sdvo_get_ddc_modes(struct drm_connector *connector) bool connector_is_digital = !!IS_TMDS(psb_intel_sdvo_connector); if (connector_is_digital == monitor_is_digital) { - drm_mode_connector_update_edid_property(connector, edid); + drm_connector_update_edid_property(connector, edid); drm_add_edid_modes(connector, edid); } diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_vdac.c b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_vdac.c index d2f4749ebf8d..744956cea749 100644 --- a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_vdac.c +++ b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_vdac.c @@ -133,7 +133,7 @@ int hibmc_vdac_init(struct hibmc_drm_private *priv) } drm_encoder_helper_add(encoder, &hibmc_encoder_helper_funcs); - drm_mode_connector_attach_encoder(connector, encoder); + drm_connector_attach_encoder(connector, encoder); return 0; } diff --git a/drivers/gpu/drm/i2c/tda998x_drv.c b/drivers/gpu/drm/i2c/tda998x_drv.c index 0038c976536a..eecdc327b9f8 100644 --- a/drivers/gpu/drm/i2c/tda998x_drv.c +++ b/drivers/gpu/drm/i2c/tda998x_drv.c @@ -1243,7 +1243,7 @@ static int tda998x_connector_get_modes(struct drm_connector *connector) return 0; } - drm_mode_connector_update_edid_property(connector, edid); + drm_connector_update_edid_property(connector, edid); n = drm_add_edid_modes(connector, edid); kfree(edid); @@ -1301,7 +1301,7 @@ static int tda998x_connector_init(struct tda998x_priv *priv, if (ret) return ret; - drm_mode_connector_attach_encoder(&priv->connector, &priv->encoder); + drm_connector_attach_encoder(&priv->connector, &priv->encoder); return 0; } diff --git a/drivers/gpu/drm/i915/Kconfig.debug b/drivers/gpu/drm/i915/Kconfig.debug index 9de8b1c51a5c..459f8f88a34c 100644 --- a/drivers/gpu/drm/i915/Kconfig.debug +++ b/drivers/gpu/drm/i915/Kconfig.debug @@ -51,6 +51,18 @@ config DRM_I915_DEBUG_GEM If in doubt, say "N". +config DRM_I915_ERRLOG_GEM + bool "Insert extra logging (very verbose) for common GEM errors" + default n + depends on DRM_I915_DEBUG_GEM + help + Enable additional logging that may help track down the cause of + principally userspace errors. + + Recommended for driver developers only. + + If in doubt, say "N". + config DRM_I915_TRACE_GEM bool "Insert extra ftrace output from the GEM internals" depends on DRM_I915_DEBUG_GEM diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile index 4c6adae23e18..5794f102f9b8 100644 --- a/drivers/gpu/drm/i915/Makefile +++ b/drivers/gpu/drm/i915/Makefile @@ -135,15 +135,14 @@ i915-y += dvo_ch7017.o \ dvo_ns2501.o \ dvo_sil164.o \ dvo_tfp410.o \ + icl_dsi.o \ intel_crt.o \ intel_ddi.o \ intel_dp_aux_backlight.o \ intel_dp_link_training.o \ intel_dp_mst.o \ intel_dp.o \ - intel_dsi.o \ intel_dsi_dcs_backlight.o \ - intel_dsi_pll.o \ intel_dsi_vbt.o \ intel_dvo.o \ intel_hdmi.o \ @@ -152,7 +151,9 @@ i915-y += dvo_ch7017.o \ intel_lvds.o \ intel_panel.o \ intel_sdvo.o \ - intel_tv.o + intel_tv.o \ + vlv_dsi.o \ + vlv_dsi_pll.o # Post-mortem debug and GPU hang state capture i915-$(CONFIG_DRM_I915_CAPTURE_ERROR) += i915_gpu_error.o diff --git a/drivers/gpu/drm/i915/gvt/gtt.c b/drivers/gpu/drm/i915/gvt/gtt.c index 642e216e0a5b..39980dfbbebd 100644 --- a/drivers/gpu/drm/i915/gvt/gtt.c +++ b/drivers/gpu/drm/i915/gvt/gtt.c @@ -216,16 +216,22 @@ static struct gtt_type_table_entry gtt_type_table[] = { GTT_TYPE_PPGTT_PDE_PT, GTT_TYPE_PPGTT_PTE_PT, GTT_TYPE_PPGTT_PTE_2M_ENTRY), + /* We take IPS bit as 'PSE' for PTE level. */ GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PTE_PT, GTT_TYPE_PPGTT_PTE_4K_ENTRY, GTT_TYPE_PPGTT_PTE_PT, GTT_TYPE_INVALID, - GTT_TYPE_INVALID), + GTT_TYPE_PPGTT_PTE_64K_ENTRY), GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PTE_4K_ENTRY, GTT_TYPE_PPGTT_PTE_4K_ENTRY, GTT_TYPE_PPGTT_PTE_PT, GTT_TYPE_INVALID, - GTT_TYPE_INVALID), + GTT_TYPE_PPGTT_PTE_64K_ENTRY), + GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PTE_64K_ENTRY, + GTT_TYPE_PPGTT_PTE_4K_ENTRY, + GTT_TYPE_PPGTT_PTE_PT, + GTT_TYPE_INVALID, + GTT_TYPE_PPGTT_PTE_64K_ENTRY), GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PTE_2M_ENTRY, GTT_TYPE_PPGTT_PDE_ENTRY, GTT_TYPE_PPGTT_PDE_PT, @@ -339,8 +345,14 @@ static inline int gtt_set_entry64(void *pt, #define ADDR_1G_MASK GENMASK_ULL(GTT_HAW - 1, 30) #define ADDR_2M_MASK GENMASK_ULL(GTT_HAW - 1, 21) +#define ADDR_64K_MASK GENMASK_ULL(GTT_HAW - 1, 16) #define ADDR_4K_MASK GENMASK_ULL(GTT_HAW - 1, 12) +#define GTT_SPTE_FLAG_MASK GENMASK_ULL(62, 52) +#define GTT_SPTE_FLAG_64K_SPLITED BIT(52) /* splited 64K gtt entry */ + +#define GTT_64K_PTE_STRIDE 16 + static unsigned long gen8_gtt_get_pfn(struct intel_gvt_gtt_entry *e) { unsigned long pfn; @@ -349,6 +361,8 @@ static unsigned long gen8_gtt_get_pfn(struct intel_gvt_gtt_entry *e) pfn = (e->val64 & ADDR_1G_MASK) >> PAGE_SHIFT; else if (e->type == GTT_TYPE_PPGTT_PTE_2M_ENTRY) pfn = (e->val64 & ADDR_2M_MASK) >> PAGE_SHIFT; + else if (e->type == GTT_TYPE_PPGTT_PTE_64K_ENTRY) + pfn = (e->val64 & ADDR_64K_MASK) >> PAGE_SHIFT; else pfn = (e->val64 & ADDR_4K_MASK) >> PAGE_SHIFT; return pfn; @@ -362,6 +376,9 @@ static void gen8_gtt_set_pfn(struct intel_gvt_gtt_entry *e, unsigned long pfn) } else if (e->type == GTT_TYPE_PPGTT_PTE_2M_ENTRY) { e->val64 &= ~ADDR_2M_MASK; pfn &= (ADDR_2M_MASK >> PAGE_SHIFT); + } else if (e->type == GTT_TYPE_PPGTT_PTE_64K_ENTRY) { + e->val64 &= ~ADDR_64K_MASK; + pfn &= (ADDR_64K_MASK >> PAGE_SHIFT); } else { e->val64 &= ~ADDR_4K_MASK; pfn &= (ADDR_4K_MASK >> PAGE_SHIFT); @@ -372,16 +389,41 @@ static void gen8_gtt_set_pfn(struct intel_gvt_gtt_entry *e, unsigned long pfn) static bool gen8_gtt_test_pse(struct intel_gvt_gtt_entry *e) { - /* Entry doesn't have PSE bit. */ - if (get_pse_type(e->type) == GTT_TYPE_INVALID) - return false; + return !!(e->val64 & _PAGE_PSE); +} - e->type = get_entry_type(e->type); - if (!(e->val64 & _PAGE_PSE)) +static void gen8_gtt_clear_pse(struct intel_gvt_gtt_entry *e) +{ + if (gen8_gtt_test_pse(e)) { + switch (e->type) { + case GTT_TYPE_PPGTT_PTE_2M_ENTRY: + e->val64 &= ~_PAGE_PSE; + e->type = GTT_TYPE_PPGTT_PDE_ENTRY; + break; + case GTT_TYPE_PPGTT_PTE_1G_ENTRY: + e->type = GTT_TYPE_PPGTT_PDP_ENTRY; + e->val64 &= ~_PAGE_PSE; + break; + default: + WARN_ON(1); + } + } +} + +static bool gen8_gtt_test_ips(struct intel_gvt_gtt_entry *e) +{ + if (GEM_WARN_ON(e->type != GTT_TYPE_PPGTT_PDE_ENTRY)) return false; - e->type = get_pse_type(e->type); - return true; + return !!(e->val64 & GEN8_PDE_IPS_64K); +} + +static void gen8_gtt_clear_ips(struct intel_gvt_gtt_entry *e) +{ + if (GEM_WARN_ON(e->type != GTT_TYPE_PPGTT_PDE_ENTRY)) + return; + + e->val64 &= ~GEN8_PDE_IPS_64K; } static bool gen8_gtt_test_present(struct intel_gvt_gtt_entry *e) @@ -408,6 +450,21 @@ static void gtt_entry_set_present(struct intel_gvt_gtt_entry *e) e->val64 |= _PAGE_PRESENT; } +static bool gen8_gtt_test_64k_splited(struct intel_gvt_gtt_entry *e) +{ + return !!(e->val64 & GTT_SPTE_FLAG_64K_SPLITED); +} + +static void gen8_gtt_set_64k_splited(struct intel_gvt_gtt_entry *e) +{ + e->val64 |= GTT_SPTE_FLAG_64K_SPLITED; +} + +static void gen8_gtt_clear_64k_splited(struct intel_gvt_gtt_entry *e) +{ + e->val64 &= ~GTT_SPTE_FLAG_64K_SPLITED; +} + /* * Per-platform GMA routines. */ @@ -440,6 +497,12 @@ static struct intel_gvt_gtt_pte_ops gen8_gtt_pte_ops = { .set_present = gtt_entry_set_present, .test_present = gen8_gtt_test_present, .test_pse = gen8_gtt_test_pse, + .clear_pse = gen8_gtt_clear_pse, + .clear_ips = gen8_gtt_clear_ips, + .test_ips = gen8_gtt_test_ips, + .clear_64k_splited = gen8_gtt_clear_64k_splited, + .set_64k_splited = gen8_gtt_set_64k_splited, + .test_64k_splited = gen8_gtt_test_64k_splited, .get_pfn = gen8_gtt_get_pfn, .set_pfn = gen8_gtt_set_pfn, }; @@ -453,6 +516,27 @@ static struct intel_gvt_gtt_gma_ops gen8_gtt_gma_ops = { .gma_to_pml4_index = gen8_gma_to_pml4_index, }; +/* Update entry type per pse and ips bit. */ +static void update_entry_type_for_real(struct intel_gvt_gtt_pte_ops *pte_ops, + struct intel_gvt_gtt_entry *entry, bool ips) +{ + switch (entry->type) { + case GTT_TYPE_PPGTT_PDE_ENTRY: + case GTT_TYPE_PPGTT_PDP_ENTRY: + if (pte_ops->test_pse(entry)) + entry->type = get_pse_type(entry->type); + break; + case GTT_TYPE_PPGTT_PTE_4K_ENTRY: + if (ips) + entry->type = get_pse_type(entry->type); + break; + default: + GEM_BUG_ON(!gtt_type_is_entry(entry->type)); + } + + GEM_BUG_ON(entry->type == GTT_TYPE_INVALID); +} + /* * MM helpers. */ @@ -468,8 +552,7 @@ static void _ppgtt_get_root_entry(struct intel_vgpu_mm *mm, pte_ops->get_entry(guest ? mm->ppgtt_mm.guest_pdps : mm->ppgtt_mm.shadow_pdps, entry, index, false, 0, mm->vgpu); - - pte_ops->test_pse(entry); + update_entry_type_for_real(pte_ops, entry, false); } static inline void ppgtt_get_guest_root_entry(struct intel_vgpu_mm *mm, @@ -574,7 +657,8 @@ static inline int ppgtt_spt_get_entry( if (ret) return ret; - ops->test_pse(e); + update_entry_type_for_real(ops, e, guest ? + spt->guest_page.pde_ips : false); gvt_vdbg_mm("read ppgtt entry, spt type %d, entry type %d, index %lu, value %llx\n", type, e->type, index, e->val64); @@ -653,10 +737,12 @@ static void ppgtt_free_spt(struct intel_vgpu_ppgtt_spt *spt) radix_tree_delete(&spt->vgpu->gtt.spt_tree, spt->shadow_page.mfn); - if (spt->guest_page.oos_page) - detach_oos_page(spt->vgpu, spt->guest_page.oos_page); + if (spt->guest_page.gfn) { + if (spt->guest_page.oos_page) + detach_oos_page(spt->vgpu, spt->guest_page.oos_page); - intel_vgpu_unregister_page_track(spt->vgpu, spt->guest_page.gfn); + intel_vgpu_unregister_page_track(spt->vgpu, spt->guest_page.gfn); + } list_del_init(&spt->post_shadow_list); free_spt(spt); @@ -717,8 +803,9 @@ static inline struct intel_vgpu_ppgtt_spt *intel_vgpu_find_spt_by_mfn( static int reclaim_one_ppgtt_mm(struct intel_gvt *gvt); +/* Allocate shadow page table without guest page. */ static struct intel_vgpu_ppgtt_spt *ppgtt_alloc_spt( - struct intel_vgpu *vgpu, int type, unsigned long gfn) + struct intel_vgpu *vgpu, intel_gvt_gtt_type_t type) { struct device *kdev = &vgpu->gvt->dev_priv->drm.pdev->dev; struct intel_vgpu_ppgtt_spt *spt = NULL; @@ -753,26 +840,12 @@ retry: spt->shadow_page.vaddr = page_address(spt->shadow_page.page); spt->shadow_page.mfn = daddr >> I915_GTT_PAGE_SHIFT; - /* - * Init guest_page. - */ - spt->guest_page.type = type; - spt->guest_page.gfn = gfn; - - ret = intel_vgpu_register_page_track(vgpu, spt->guest_page.gfn, - ppgtt_write_protection_handler, spt); - if (ret) - goto err_unmap_dma; - ret = radix_tree_insert(&vgpu->gtt.spt_tree, spt->shadow_page.mfn, spt); if (ret) - goto err_unreg_page_track; + goto err_unmap_dma; - trace_spt_alloc(vgpu->id, spt, type, spt->shadow_page.mfn, gfn); return spt; -err_unreg_page_track: - intel_vgpu_unregister_page_track(vgpu, spt->guest_page.gfn); err_unmap_dma: dma_unmap_page(kdev, daddr, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); err_free_spt: @@ -780,6 +853,37 @@ err_free_spt: return ERR_PTR(ret); } +/* Allocate shadow page table associated with specific gfn. */ +static struct intel_vgpu_ppgtt_spt *ppgtt_alloc_spt_gfn( + struct intel_vgpu *vgpu, intel_gvt_gtt_type_t type, + unsigned long gfn, bool guest_pde_ips) +{ + struct intel_vgpu_ppgtt_spt *spt; + int ret; + + spt = ppgtt_alloc_spt(vgpu, type); + if (IS_ERR(spt)) + return spt; + + /* + * Init guest_page. + */ + ret = intel_vgpu_register_page_track(vgpu, gfn, + ppgtt_write_protection_handler, spt); + if (ret) { + ppgtt_free_spt(spt); + return ERR_PTR(ret); + } + + spt->guest_page.type = type; + spt->guest_page.gfn = gfn; + spt->guest_page.pde_ips = guest_pde_ips; + + trace_spt_alloc(vgpu->id, spt, type, spt->shadow_page.mfn, gfn); + + return spt; +} + #define pt_entry_size_shift(spt) \ ((spt)->vgpu->gvt->device_info.gtt_entry_size_shift) @@ -787,24 +891,38 @@ err_free_spt: (I915_GTT_PAGE_SIZE >> pt_entry_size_shift(spt)) #define for_each_present_guest_entry(spt, e, i) \ - for (i = 0; i < pt_entries(spt); i++) \ + for (i = 0; i < pt_entries(spt); \ + i += spt->guest_page.pde_ips ? GTT_64K_PTE_STRIDE : 1) \ if (!ppgtt_get_guest_entry(spt, e, i) && \ spt->vgpu->gvt->gtt.pte_ops->test_present(e)) #define for_each_present_shadow_entry(spt, e, i) \ - for (i = 0; i < pt_entries(spt); i++) \ + for (i = 0; i < pt_entries(spt); \ + i += spt->shadow_page.pde_ips ? GTT_64K_PTE_STRIDE : 1) \ if (!ppgtt_get_shadow_entry(spt, e, i) && \ spt->vgpu->gvt->gtt.pte_ops->test_present(e)) -static void ppgtt_get_spt(struct intel_vgpu_ppgtt_spt *spt) +#define for_each_shadow_entry(spt, e, i) \ + for (i = 0; i < pt_entries(spt); \ + i += (spt->shadow_page.pde_ips ? GTT_64K_PTE_STRIDE : 1)) \ + if (!ppgtt_get_shadow_entry(spt, e, i)) + +static inline void ppgtt_get_spt(struct intel_vgpu_ppgtt_spt *spt) { int v = atomic_read(&spt->refcount); trace_spt_refcount(spt->vgpu->id, "inc", spt, v, (v + 1)); - atomic_inc(&spt->refcount); } +static inline int ppgtt_put_spt(struct intel_vgpu_ppgtt_spt *spt) +{ + int v = atomic_read(&spt->refcount); + + trace_spt_refcount(spt->vgpu->id, "dec", spt, v, (v - 1)); + return atomic_dec_return(&spt->refcount); +} + static int ppgtt_invalidate_spt(struct intel_vgpu_ppgtt_spt *spt); static int ppgtt_invalidate_spt_by_shadow_entry(struct intel_vgpu *vgpu, @@ -843,7 +961,8 @@ static inline void ppgtt_invalidate_pte(struct intel_vgpu_ppgtt_spt *spt, pfn = ops->get_pfn(entry); type = spt->shadow_page.type; - if (pfn == vgpu->gtt.scratch_pt[type].page_mfn) + /* Uninitialized spte or unshadowed spte. */ + if (!pfn || pfn == vgpu->gtt.scratch_pt[type].page_mfn) return; intel_gvt_hypervisor_dma_unmap_guest_page(vgpu, pfn << PAGE_SHIFT); @@ -855,14 +974,11 @@ static int ppgtt_invalidate_spt(struct intel_vgpu_ppgtt_spt *spt) struct intel_gvt_gtt_entry e; unsigned long index; int ret; - int v = atomic_read(&spt->refcount); trace_spt_change(spt->vgpu->id, "die", spt, spt->guest_page.gfn, spt->shadow_page.type); - trace_spt_refcount(spt->vgpu->id, "dec", spt, v, (v - 1)); - - if (atomic_dec_return(&spt->refcount) > 0) + if (ppgtt_put_spt(spt) > 0) return 0; for_each_present_shadow_entry(spt, &e, index) { @@ -871,9 +987,15 @@ static int ppgtt_invalidate_spt(struct intel_vgpu_ppgtt_spt *spt) gvt_vdbg_mm("invalidate 4K entry\n"); ppgtt_invalidate_pte(spt, &e); break; + case GTT_TYPE_PPGTT_PTE_64K_ENTRY: + /* We don't setup 64K shadow entry so far. */ + WARN(1, "suspicious 64K gtt entry\n"); + continue; case GTT_TYPE_PPGTT_PTE_2M_ENTRY: + gvt_vdbg_mm("invalidate 2M entry\n"); + continue; case GTT_TYPE_PPGTT_PTE_1G_ENTRY: - WARN(1, "GVT doesn't support 2M/1GB page\n"); + WARN(1, "GVT doesn't support 1GB page\n"); continue; case GTT_TYPE_PPGTT_PML4_ENTRY: case GTT_TYPE_PPGTT_PDP_ENTRY: @@ -899,6 +1021,22 @@ fail: return ret; } +static bool vgpu_ips_enabled(struct intel_vgpu *vgpu) +{ + struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; + + if (INTEL_GEN(dev_priv) == 9 || INTEL_GEN(dev_priv) == 10) { + u32 ips = vgpu_vreg_t(vgpu, GEN8_GAMW_ECO_DEV_RW_IA) & + GAMW_ECO_ENABLE_64K_IPS_FIELD; + + return ips == GAMW_ECO_ENABLE_64K_IPS_FIELD; + } else if (INTEL_GEN(dev_priv) >= 11) { + /* 64K paging only controlled by IPS bit in PTE now. */ + return true; + } else + return false; +} + static int ppgtt_populate_spt(struct intel_vgpu_ppgtt_spt *spt); static struct intel_vgpu_ppgtt_spt *ppgtt_populate_spt_by_guest_entry( @@ -906,35 +1044,54 @@ static struct intel_vgpu_ppgtt_spt *ppgtt_populate_spt_by_guest_entry( { struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops; struct intel_vgpu_ppgtt_spt *spt = NULL; + bool ips = false; int ret; GEM_BUG_ON(!gtt_type_is_pt(get_next_pt_type(we->type))); + if (we->type == GTT_TYPE_PPGTT_PDE_ENTRY) + ips = vgpu_ips_enabled(vgpu) && ops->test_ips(we); + spt = intel_vgpu_find_spt_by_gfn(vgpu, ops->get_pfn(we)); - if (spt) + if (spt) { ppgtt_get_spt(spt); - else { + + if (ips != spt->guest_page.pde_ips) { + spt->guest_page.pde_ips = ips; + + gvt_dbg_mm("reshadow PDE since ips changed\n"); + clear_page(spt->shadow_page.vaddr); + ret = ppgtt_populate_spt(spt); + if (ret) { + ppgtt_put_spt(spt); + goto err; + } + } + } else { int type = get_next_pt_type(we->type); - spt = ppgtt_alloc_spt(vgpu, type, ops->get_pfn(we)); + spt = ppgtt_alloc_spt_gfn(vgpu, type, ops->get_pfn(we), ips); if (IS_ERR(spt)) { ret = PTR_ERR(spt); - goto fail; + goto err; } ret = intel_vgpu_enable_page_track(vgpu, spt->guest_page.gfn); if (ret) - goto fail; + goto err_free_spt; ret = ppgtt_populate_spt(spt); if (ret) - goto fail; + goto err_free_spt; trace_spt_change(vgpu->id, "new", spt, spt->guest_page.gfn, spt->shadow_page.type); } return spt; -fail: + +err_free_spt: + ppgtt_free_spt(spt); +err: gvt_vgpu_err("fail: shadow page %p guest entry 0x%llx type %d\n", spt, we->val64, we->type); return ERR_PTR(ret); @@ -948,16 +1105,118 @@ static inline void ppgtt_generate_shadow_entry(struct intel_gvt_gtt_entry *se, se->type = ge->type; se->val64 = ge->val64; + /* Because we always split 64KB pages, so clear IPS in shadow PDE. */ + if (se->type == GTT_TYPE_PPGTT_PDE_ENTRY) + ops->clear_ips(se); + ops->set_pfn(se, s->shadow_page.mfn); } +/** + * Return 1 if 2MB huge gtt shadowing is possilbe, 0 if miscondition, + * negtive if found err. + */ +static int is_2MB_gtt_possible(struct intel_vgpu *vgpu, + struct intel_gvt_gtt_entry *entry) +{ + struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops; + unsigned long pfn; + + if (!HAS_PAGE_SIZES(vgpu->gvt->dev_priv, I915_GTT_PAGE_SIZE_2M)) + return 0; + + pfn = intel_gvt_hypervisor_gfn_to_mfn(vgpu, ops->get_pfn(entry)); + if (pfn == INTEL_GVT_INVALID_ADDR) + return -EINVAL; + + return PageTransHuge(pfn_to_page(pfn)); +} + +static int split_2MB_gtt_entry(struct intel_vgpu *vgpu, + struct intel_vgpu_ppgtt_spt *spt, unsigned long index, + struct intel_gvt_gtt_entry *se) +{ + struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops; + struct intel_vgpu_ppgtt_spt *sub_spt; + struct intel_gvt_gtt_entry sub_se; + unsigned long start_gfn; + dma_addr_t dma_addr; + unsigned long sub_index; + int ret; + + gvt_dbg_mm("Split 2M gtt entry, index %lu\n", index); + + start_gfn = ops->get_pfn(se); + + sub_spt = ppgtt_alloc_spt(vgpu, GTT_TYPE_PPGTT_PTE_PT); + if (IS_ERR(sub_spt)) + return PTR_ERR(sub_spt); + + for_each_shadow_entry(sub_spt, &sub_se, sub_index) { + ret = intel_gvt_hypervisor_dma_map_guest_page(vgpu, + start_gfn + sub_index, PAGE_SIZE, &dma_addr); + if (ret) { + ppgtt_invalidate_spt(spt); + return ret; + } + sub_se.val64 = se->val64; + + /* Copy the PAT field from PDE. */ + sub_se.val64 &= ~_PAGE_PAT; + sub_se.val64 |= (se->val64 & _PAGE_PAT_LARGE) >> 5; + + ops->set_pfn(&sub_se, dma_addr >> PAGE_SHIFT); + ppgtt_set_shadow_entry(sub_spt, &sub_se, sub_index); + } + + /* Clear dirty field. */ + se->val64 &= ~_PAGE_DIRTY; + + ops->clear_pse(se); + ops->clear_ips(se); + ops->set_pfn(se, sub_spt->shadow_page.mfn); + ppgtt_set_shadow_entry(spt, se, index); + return 0; +} + +static int split_64KB_gtt_entry(struct intel_vgpu *vgpu, + struct intel_vgpu_ppgtt_spt *spt, unsigned long index, + struct intel_gvt_gtt_entry *se) +{ + struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops; + struct intel_gvt_gtt_entry entry = *se; + unsigned long start_gfn; + dma_addr_t dma_addr; + int i, ret; + + gvt_vdbg_mm("Split 64K gtt entry, index %lu\n", index); + + GEM_BUG_ON(index % GTT_64K_PTE_STRIDE); + + start_gfn = ops->get_pfn(se); + + entry.type = GTT_TYPE_PPGTT_PTE_4K_ENTRY; + ops->set_64k_splited(&entry); + + for (i = 0; i < GTT_64K_PTE_STRIDE; i++) { + ret = intel_gvt_hypervisor_dma_map_guest_page(vgpu, + start_gfn + i, PAGE_SIZE, &dma_addr); + if (ret) + return ret; + + ops->set_pfn(&entry, dma_addr >> PAGE_SHIFT); + ppgtt_set_shadow_entry(spt, &entry, index + i); + } + return 0; +} + static int ppgtt_populate_shadow_entry(struct intel_vgpu *vgpu, struct intel_vgpu_ppgtt_spt *spt, unsigned long index, struct intel_gvt_gtt_entry *ge) { struct intel_gvt_gtt_pte_ops *pte_ops = vgpu->gvt->gtt.pte_ops; struct intel_gvt_gtt_entry se = *ge; - unsigned long gfn; + unsigned long gfn, page_size = PAGE_SIZE; dma_addr_t dma_addr; int ret; @@ -970,16 +1229,33 @@ static int ppgtt_populate_shadow_entry(struct intel_vgpu *vgpu, case GTT_TYPE_PPGTT_PTE_4K_ENTRY: gvt_vdbg_mm("shadow 4K gtt entry\n"); break; + case GTT_TYPE_PPGTT_PTE_64K_ENTRY: + gvt_vdbg_mm("shadow 64K gtt entry\n"); + /* + * The layout of 64K page is special, the page size is + * controlled by uper PDE. To be simple, we always split + * 64K page to smaller 4K pages in shadow PT. + */ + return split_64KB_gtt_entry(vgpu, spt, index, &se); case GTT_TYPE_PPGTT_PTE_2M_ENTRY: + gvt_vdbg_mm("shadow 2M gtt entry\n"); + ret = is_2MB_gtt_possible(vgpu, ge); + if (ret == 0) + return split_2MB_gtt_entry(vgpu, spt, index, &se); + else if (ret < 0) + return ret; + page_size = I915_GTT_PAGE_SIZE_2M; + break; case GTT_TYPE_PPGTT_PTE_1G_ENTRY: - gvt_vgpu_err("GVT doesn't support 2M/1GB entry\n"); + gvt_vgpu_err("GVT doesn't support 1GB entry\n"); return -EINVAL; default: GEM_BUG_ON(1); }; /* direct shadow */ - ret = intel_gvt_hypervisor_dma_map_guest_page(vgpu, gfn, &dma_addr); + ret = intel_gvt_hypervisor_dma_map_guest_page(vgpu, gfn, page_size, + &dma_addr); if (ret) return -ENXIO; @@ -1062,8 +1338,12 @@ static int ppgtt_handle_guest_entry_removal(struct intel_vgpu_ppgtt_spt *spt, ret = ppgtt_invalidate_spt(s); if (ret) goto fail; - } else + } else { + /* We don't setup 64K shadow entry so far. */ + WARN(se->type == GTT_TYPE_PPGTT_PTE_64K_ENTRY, + "suspicious 64K entry\n"); ppgtt_invalidate_pte(spt, se); + } return 0; fail: @@ -1286,7 +1566,7 @@ static int ppgtt_handle_guest_write_page_table( struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops; struct intel_gvt_gtt_entry old_se; int new_present; - int ret; + int i, ret; new_present = ops->test_present(we); @@ -1308,8 +1588,27 @@ static int ppgtt_handle_guest_write_page_table( goto fail; if (!new_present) { - ops->set_pfn(&old_se, vgpu->gtt.scratch_pt[type].page_mfn); - ppgtt_set_shadow_entry(spt, &old_se, index); + /* For 64KB splited entries, we need clear them all. */ + if (ops->test_64k_splited(&old_se) && + !(index % GTT_64K_PTE_STRIDE)) { + gvt_vdbg_mm("remove splited 64K shadow entries\n"); + for (i = 0; i < GTT_64K_PTE_STRIDE; i++) { + ops->clear_64k_splited(&old_se); + ops->set_pfn(&old_se, + vgpu->gtt.scratch_pt[type].page_mfn); + ppgtt_set_shadow_entry(spt, &old_se, index + i); + } + } else if (old_se.type == GTT_TYPE_PPGTT_PTE_2M_ENTRY || + old_se.type == GTT_TYPE_PPGTT_PTE_1G_ENTRY) { + ops->clear_pse(&old_se); + ops->set_pfn(&old_se, + vgpu->gtt.scratch_pt[type].page_mfn); + ppgtt_set_shadow_entry(spt, &old_se, index); + } else { + ops->set_pfn(&old_se, + vgpu->gtt.scratch_pt[type].page_mfn); + ppgtt_set_shadow_entry(spt, &old_se, index); + } } return 0; @@ -1391,7 +1690,17 @@ static int ppgtt_handle_guest_write_page_table_bytes( ppgtt_get_guest_entry(spt, &we, index); - ops->test_pse(&we); + /* + * For page table which has 64K gtt entry, only PTE#0, PTE#16, + * PTE#32, ... PTE#496 are used. Unused PTEs update should be + * ignored. + */ + if (we.type == GTT_TYPE_PPGTT_PTE_64K_ENTRY && + (index % GTT_64K_PTE_STRIDE)) { + gvt_vdbg_mm("Ignore write to unused PTE entry, index %lu\n", + index); + return 0; + } if (bytes == info->gtt_entry_size) { ret = ppgtt_handle_guest_write_page_table(spt, &we, index); @@ -1881,7 +2190,7 @@ static int emulate_ggtt_mmio_write(struct intel_vgpu *vgpu, unsigned int off, } ret = intel_gvt_hypervisor_dma_map_guest_page(vgpu, gfn, - &dma_addr); + PAGE_SIZE, &dma_addr); if (ret) { gvt_vgpu_err("fail to populate guest ggtt entry\n"); /* guest driver may read/write the entry when partial diff --git a/drivers/gpu/drm/i915/gvt/gtt.h b/drivers/gpu/drm/i915/gvt/gtt.h index 3792f2b7f4ff..b7bf68cc8418 100644 --- a/drivers/gpu/drm/i915/gvt/gtt.h +++ b/drivers/gpu/drm/i915/gvt/gtt.h @@ -63,6 +63,12 @@ struct intel_gvt_gtt_pte_ops { void (*clear_present)(struct intel_gvt_gtt_entry *e); void (*set_present)(struct intel_gvt_gtt_entry *e); bool (*test_pse)(struct intel_gvt_gtt_entry *e); + void (*clear_pse)(struct intel_gvt_gtt_entry *e); + bool (*test_ips)(struct intel_gvt_gtt_entry *e); + void (*clear_ips)(struct intel_gvt_gtt_entry *e); + bool (*test_64k_splited)(struct intel_gvt_gtt_entry *e); + void (*clear_64k_splited)(struct intel_gvt_gtt_entry *e); + void (*set_64k_splited)(struct intel_gvt_gtt_entry *e); void (*set_pfn)(struct intel_gvt_gtt_entry *e, unsigned long pfn); unsigned long (*get_pfn)(struct intel_gvt_gtt_entry *e); }; @@ -95,6 +101,7 @@ typedef enum { GTT_TYPE_GGTT_PTE, GTT_TYPE_PPGTT_PTE_4K_ENTRY, + GTT_TYPE_PPGTT_PTE_64K_ENTRY, GTT_TYPE_PPGTT_PTE_2M_ENTRY, GTT_TYPE_PPGTT_PTE_1G_ENTRY, @@ -220,6 +227,7 @@ struct intel_vgpu_ppgtt_spt { struct { intel_gvt_gtt_type_t type; + bool pde_ips; /* for 64KB PTEs */ void *vaddr; struct page *page; unsigned long mfn; @@ -227,6 +235,7 @@ struct intel_vgpu_ppgtt_spt { struct { intel_gvt_gtt_type_t type; + bool pde_ips; /* for 64KB PTEs */ unsigned long gfn; unsigned long write_cnt; struct intel_vgpu_oos_page *oos_page; diff --git a/drivers/gpu/drm/i915/gvt/gvt.c b/drivers/gpu/drm/i915/gvt/gvt.c index 4e65266e7b95..712f9d14e720 100644 --- a/drivers/gpu/drm/i915/gvt/gvt.c +++ b/drivers/gpu/drm/i915/gvt/gvt.c @@ -468,3 +468,7 @@ out_clean_idr: kfree(gvt); return ret; } + +#if IS_ENABLED(CONFIG_DRM_I915_GVT_KVMGT) +MODULE_SOFTDEP("pre: kvmgt"); +#endif diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c index e39492aaff6c..6b50f850dc28 100644 --- a/drivers/gpu/drm/i915/gvt/handlers.c +++ b/drivers/gpu/drm/i915/gvt/handlers.c @@ -210,6 +210,31 @@ static int sanitize_fence_mmio_access(struct intel_vgpu *vgpu, return 0; } +static int gamw_echo_dev_rw_ia_write(struct intel_vgpu *vgpu, + unsigned int offset, void *p_data, unsigned int bytes) +{ + u32 ips = (*(u32 *)p_data) & GAMW_ECO_ENABLE_64K_IPS_FIELD; + + if (INTEL_GEN(vgpu->gvt->dev_priv) <= 10) { + if (ips == GAMW_ECO_ENABLE_64K_IPS_FIELD) + gvt_dbg_core("vgpu%d: ips enabled\n", vgpu->id); + else if (!ips) + gvt_dbg_core("vgpu%d: ips disabled\n", vgpu->id); + else { + /* All engines must be enabled together for vGPU, + * since we don't know which engine the ppgtt will + * bind to when shadowing. + */ + gvt_vgpu_err("Unsupported IPS setting %x, cannot enable 64K gtt.\n", + ips); + return -EINVAL; + } + } + + write_vreg(vgpu, offset, p_data, bytes); + return 0; +} + static int fence_mmio_read(struct intel_vgpu *vgpu, unsigned int off, void *p_data, unsigned int bytes) { @@ -1564,6 +1589,13 @@ static int bxt_gt_disp_pwron_write(struct intel_vgpu *vgpu, return 0; } +static int bxt_edp_psr_imr_iir_write(struct intel_vgpu *vgpu, + unsigned int offset, void *p_data, unsigned int bytes) +{ + vgpu_vreg(vgpu, offset) = 0; + return 0; +} + static int mmio_read_from_hw(struct intel_vgpu *vgpu, unsigned int offset, void *p_data, unsigned int bytes) { @@ -1774,7 +1806,9 @@ static int init_generic_mmio_info(struct intel_gvt *gvt) MMIO_RING_DFH(RING_HWSTAM, D_ALL, F_CMD_ACCESS, NULL, NULL); - MMIO_GM_RDR(RENDER_HWS_PGA_GEN7, D_ALL, NULL, NULL); + MMIO_DH(GEN8_GAMW_ECO_DEV_RW_IA, D_BDW_PLUS, NULL, + gamw_echo_dev_rw_ia_write); + MMIO_GM_RDR(BSD_HWS_PGA_GEN7, D_ALL, NULL, NULL); MMIO_GM_RDR(BLT_HWS_PGA_GEN7, D_ALL, NULL, NULL); MMIO_GM_RDR(VEBOX_HWS_PGA_GEN7, D_ALL, NULL, NULL); @@ -3160,6 +3194,9 @@ static int init_bxt_mmio_info(struct intel_gvt *gvt) MMIO_D(HSW_TVIDEO_DIP_GCP(TRANSCODER_B), D_BXT); MMIO_D(HSW_TVIDEO_DIP_GCP(TRANSCODER_C), D_BXT); + MMIO_DH(EDP_PSR_IMR, D_BXT, NULL, bxt_edp_psr_imr_iir_write); + MMIO_DH(EDP_PSR_IIR, D_BXT, NULL, bxt_edp_psr_imr_iir_write); + MMIO_D(RC6_CTX_BASE, D_BXT); MMIO_D(GEN8_PUSHBUS_CONTROL, D_BXT); diff --git a/drivers/gpu/drm/i915/gvt/hypercall.h b/drivers/gpu/drm/i915/gvt/hypercall.h index f6dd9f717888..5af11cf1b482 100644 --- a/drivers/gpu/drm/i915/gvt/hypercall.h +++ b/drivers/gpu/drm/i915/gvt/hypercall.h @@ -53,7 +53,7 @@ struct intel_gvt_mpt { unsigned long (*gfn_to_mfn)(unsigned long handle, unsigned long gfn); int (*dma_map_guest_page)(unsigned long handle, unsigned long gfn, - dma_addr_t *dma_addr); + unsigned long size, dma_addr_t *dma_addr); void (*dma_unmap_guest_page)(unsigned long handle, dma_addr_t dma_addr); int (*map_gfn_to_mfn)(unsigned long handle, unsigned long gfn, diff --git a/drivers/gpu/drm/i915/gvt/kvmgt.c b/drivers/gpu/drm/i915/gvt/kvmgt.c index df4e4a07db3d..718ab307a500 100644 --- a/drivers/gpu/drm/i915/gvt/kvmgt.c +++ b/drivers/gpu/drm/i915/gvt/kvmgt.c @@ -94,6 +94,7 @@ struct gvt_dma { struct rb_node dma_addr_node; gfn_t gfn; dma_addr_t dma_addr; + unsigned long size; struct kref ref; }; @@ -106,22 +107,83 @@ static int kvmgt_guest_init(struct mdev_device *mdev); static void intel_vgpu_release_work(struct work_struct *work); static bool kvmgt_guest_exit(struct kvmgt_guest_info *info); +static void gvt_unpin_guest_page(struct intel_vgpu *vgpu, unsigned long gfn, + unsigned long size) +{ + int total_pages; + int npage; + int ret; + + total_pages = roundup(size, PAGE_SIZE) / PAGE_SIZE; + + for (npage = 0; npage < total_pages; npage++) { + unsigned long cur_gfn = gfn + npage; + + ret = vfio_unpin_pages(mdev_dev(vgpu->vdev.mdev), &cur_gfn, 1); + WARN_ON(ret != 1); + } +} + +/* Pin a normal or compound guest page for dma. */ +static int gvt_pin_guest_page(struct intel_vgpu *vgpu, unsigned long gfn, + unsigned long size, struct page **page) +{ + unsigned long base_pfn = 0; + int total_pages; + int npage; + int ret; + + total_pages = roundup(size, PAGE_SIZE) / PAGE_SIZE; + /* + * We pin the pages one-by-one to avoid allocating a big arrary + * on stack to hold pfns. + */ + for (npage = 0; npage < total_pages; npage++) { + unsigned long cur_gfn = gfn + npage; + unsigned long pfn; + + ret = vfio_pin_pages(mdev_dev(vgpu->vdev.mdev), &cur_gfn, 1, + IOMMU_READ | IOMMU_WRITE, &pfn); + if (ret != 1) { + gvt_vgpu_err("vfio_pin_pages failed for gfn 0x%lx, ret %d\n", + cur_gfn, ret); + goto err; + } + + if (!pfn_valid(pfn)) { + gvt_vgpu_err("pfn 0x%lx is not mem backed\n", pfn); + npage++; + ret = -EFAULT; + goto err; + } + + if (npage == 0) + base_pfn = pfn; + else if (base_pfn + npage != pfn) { + gvt_vgpu_err("The pages are not continuous\n"); + ret = -EINVAL; + npage++; + goto err; + } + } + + *page = pfn_to_page(base_pfn); + return 0; +err: + gvt_unpin_guest_page(vgpu, gfn, npage * PAGE_SIZE); + return ret; +} + static int gvt_dma_map_page(struct intel_vgpu *vgpu, unsigned long gfn, - dma_addr_t *dma_addr) + dma_addr_t *dma_addr, unsigned long size) { struct device *dev = &vgpu->gvt->dev_priv->drm.pdev->dev; - struct page *page; - unsigned long pfn; + struct page *page = NULL; int ret; - /* Pin the page first. */ - ret = vfio_pin_pages(mdev_dev(vgpu->vdev.mdev), &gfn, 1, - IOMMU_READ | IOMMU_WRITE, &pfn); - if (ret != 1) { - gvt_vgpu_err("vfio_pin_pages failed for gfn 0x%lx: %d\n", - gfn, ret); - return -EINVAL; - } + ret = gvt_pin_guest_page(vgpu, gfn, size, &page); + if (ret) + return ret; if (!pfn_valid(pfn)) { gvt_vgpu_err("pfn 0x%lx is not mem backed\n", pfn); @@ -130,27 +192,24 @@ static int gvt_dma_map_page(struct intel_vgpu *vgpu, unsigned long gfn, } /* Setup DMA mapping. */ - page = pfn_to_page(pfn); - *dma_addr = dma_map_page(dev, page, 0, PAGE_SIZE, - PCI_DMA_BIDIRECTIONAL); - if (dma_mapping_error(dev, *dma_addr)) { - gvt_vgpu_err("DMA mapping failed for gfn 0x%lx\n", gfn); - vfio_unpin_pages(mdev_dev(vgpu->vdev.mdev), &gfn, 1); - return -ENOMEM; + *dma_addr = dma_map_page(dev, page, 0, size, PCI_DMA_BIDIRECTIONAL); + ret = dma_mapping_error(dev, *dma_addr); + if (ret) { + gvt_vgpu_err("DMA mapping failed for pfn 0x%lx, ret %d\n", + page_to_pfn(page), ret); + gvt_unpin_guest_page(vgpu, gfn, size); } - return 0; + return ret; } static void gvt_dma_unmap_page(struct intel_vgpu *vgpu, unsigned long gfn, - dma_addr_t dma_addr) + dma_addr_t dma_addr, unsigned long size) { struct device *dev = &vgpu->gvt->dev_priv->drm.pdev->dev; - int ret; - dma_unmap_page(dev, dma_addr, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); - ret = vfio_unpin_pages(mdev_dev(vgpu->vdev.mdev), &gfn, 1); - WARN_ON(ret != 1); + dma_unmap_page(dev, dma_addr, size, PCI_DMA_BIDIRECTIONAL); + gvt_unpin_guest_page(vgpu, gfn, size); } static struct gvt_dma *__gvt_cache_find_dma_addr(struct intel_vgpu *vgpu, @@ -191,7 +250,7 @@ static struct gvt_dma *__gvt_cache_find_gfn(struct intel_vgpu *vgpu, gfn_t gfn) } static int __gvt_cache_add(struct intel_vgpu *vgpu, gfn_t gfn, - dma_addr_t dma_addr) + dma_addr_t dma_addr, unsigned long size) { struct gvt_dma *new, *itr; struct rb_node **link, *parent = NULL; @@ -203,6 +262,7 @@ static int __gvt_cache_add(struct intel_vgpu *vgpu, gfn_t gfn, new->vgpu = vgpu; new->gfn = gfn; new->dma_addr = dma_addr; + new->size = size; kref_init(&new->ref); /* gfn_cache maps gfn to struct gvt_dma. */ @@ -260,7 +320,7 @@ static void gvt_cache_destroy(struct intel_vgpu *vgpu) break; } dma = rb_entry(node, struct gvt_dma, gfn_node); - gvt_dma_unmap_page(vgpu, dma->gfn, dma->dma_addr); + gvt_dma_unmap_page(vgpu, dma->gfn, dma->dma_addr, dma->size); __gvt_cache_remove_entry(vgpu, dma); mutex_unlock(&vgpu->vdev.cache_lock); } @@ -515,7 +575,8 @@ static int intel_vgpu_iommu_notifier(struct notifier_block *nb, if (!entry) continue; - gvt_dma_unmap_page(vgpu, entry->gfn, entry->dma_addr); + gvt_dma_unmap_page(vgpu, entry->gfn, entry->dma_addr, + entry->size); __gvt_cache_remove_entry(vgpu, entry); } mutex_unlock(&vgpu->vdev.cache_lock); @@ -1648,7 +1709,7 @@ static unsigned long kvmgt_gfn_to_pfn(unsigned long handle, unsigned long gfn) } int kvmgt_dma_map_guest_page(unsigned long handle, unsigned long gfn, - dma_addr_t *dma_addr) + unsigned long size, dma_addr_t *dma_addr) { struct kvmgt_guest_info *info; struct intel_vgpu *vgpu; @@ -1665,11 +1726,11 @@ int kvmgt_dma_map_guest_page(unsigned long handle, unsigned long gfn, entry = __gvt_cache_find_gfn(info->vgpu, gfn); if (!entry) { - ret = gvt_dma_map_page(vgpu, gfn, dma_addr); + ret = gvt_dma_map_page(vgpu, gfn, dma_addr, size); if (ret) goto err_unlock; - ret = __gvt_cache_add(info->vgpu, gfn, *dma_addr); + ret = __gvt_cache_add(info->vgpu, gfn, *dma_addr, size); if (ret) goto err_unmap; } else { @@ -1681,7 +1742,7 @@ int kvmgt_dma_map_guest_page(unsigned long handle, unsigned long gfn, return 0; err_unmap: - gvt_dma_unmap_page(vgpu, gfn, *dma_addr); + gvt_dma_unmap_page(vgpu, gfn, *dma_addr, size); err_unlock: mutex_unlock(&info->vgpu->vdev.cache_lock); return ret; @@ -1691,7 +1752,8 @@ static void __gvt_dma_release(struct kref *ref) { struct gvt_dma *entry = container_of(ref, typeof(*entry), ref); - gvt_dma_unmap_page(entry->vgpu, entry->gfn, entry->dma_addr); + gvt_dma_unmap_page(entry->vgpu, entry->gfn, entry->dma_addr, + entry->size); __gvt_cache_remove_entry(entry->vgpu, entry); } diff --git a/drivers/gpu/drm/i915/gvt/mpt.h b/drivers/gpu/drm/i915/gvt/mpt.h index 32ffcd566cdd..67f19992b226 100644 --- a/drivers/gpu/drm/i915/gvt/mpt.h +++ b/drivers/gpu/drm/i915/gvt/mpt.h @@ -230,17 +230,18 @@ static inline unsigned long intel_gvt_hypervisor_gfn_to_mfn( /** * intel_gvt_hypervisor_dma_map_guest_page - setup dma map for guest page * @vgpu: a vGPU - * @gpfn: guest pfn + * @gfn: guest pfn + * @size: page size * @dma_addr: retrieve allocated dma addr * * Returns: * 0 on success, negative error code if failed. */ static inline int intel_gvt_hypervisor_dma_map_guest_page( - struct intel_vgpu *vgpu, unsigned long gfn, + struct intel_vgpu *vgpu, unsigned long gfn, unsigned long size, dma_addr_t *dma_addr) { - return intel_gvt_host.mpt->dma_map_guest_page(vgpu->handle, gfn, + return intel_gvt_host.mpt->dma_map_guest_page(vgpu->handle, gfn, size, dma_addr); } diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c index 928818f218f7..b0e566956b8d 100644 --- a/drivers/gpu/drm/i915/gvt/scheduler.c +++ b/drivers/gpu/drm/i915/gvt/scheduler.c @@ -476,7 +476,11 @@ static int prepare_shadow_batch_buffer(struct intel_vgpu_workload *workload) i915_gem_obj_finish_shmem_access(bb->obj); bb->accessing = false; - i915_vma_move_to_active(bb->vma, workload->req, 0); + ret = i915_vma_move_to_active(bb->vma, + workload->req, + 0); + if (ret) + goto err; } } return 0; diff --git a/drivers/gpu/drm/i915/gvt/vgpu.c b/drivers/gpu/drm/i915/gvt/vgpu.c index 83a43970783f..f6fa916517c3 100644 --- a/drivers/gpu/drm/i915/gvt/vgpu.c +++ b/drivers/gpu/drm/i915/gvt/vgpu.c @@ -46,6 +46,7 @@ void populate_pvinfo_page(struct intel_vgpu *vgpu) vgpu_vreg_t(vgpu, vgtif_reg(vgt_caps)) = VGT_CAPS_FULL_48BIT_PPGTT; vgpu_vreg_t(vgpu, vgtif_reg(vgt_caps)) |= VGT_CAPS_HWSP_EMULATION; + vgpu_vreg_t(vgpu, vgtif_reg(vgt_caps)) |= VGT_CAPS_HUGE_GTT; vgpu_vreg_t(vgpu, vgtif_reg(avail_rs.mappable_gmadr.base)) = vgpu_aperture_gmadr_base(vgpu); diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index c400f42a54ec..b3aefd623557 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c @@ -1659,11 +1659,6 @@ static int i915_fbc_status(struct seq_file *m, void *unused) else seq_printf(m, "FBC disabled: %s\n", fbc->no_fbc_reason); - if (fbc->work.scheduled) - seq_printf(m, "FBC worker scheduled on vblank %llu, now %llu\n", - fbc->work.scheduled_vblank, - drm_crtc_vblank_count(&fbc->crtc->base)); - if (intel_fbc_is_active(dev_priv)) { u32 mask; @@ -2597,31 +2592,9 @@ static const struct file_operations i915_guc_log_relay_fops = { .release = i915_guc_log_relay_release, }; -static const char *psr2_live_status(u32 val) -{ - static const char * const live_status[] = { - "IDLE", - "CAPTURE", - "CAPTURE_FS", - "SLEEP", - "BUFON_FW", - "ML_UP", - "SU_STANDBY", - "FAST_SLEEP", - "DEEP_SLEEP", - "BUF_ON", - "TG_ON" - }; - - val = (val & EDP_PSR2_STATUS_STATE_MASK) >> EDP_PSR2_STATUS_STATE_SHIFT; - if (val < ARRAY_SIZE(live_status)) - return live_status[val]; - - return "unknown"; -} - -static const char *psr_sink_status(u8 val) +static int i915_psr_sink_status_show(struct seq_file *m, void *data) { + u8 val; static const char * const sink_status[] = { "inactive", "transition to active, capture and display", @@ -2630,14 +2603,79 @@ static const char *psr_sink_status(u8 val) "transition to inactive, capture and display, timing re-sync", "reserved", "reserved", - "sink internal error" + "sink internal error", }; + struct drm_connector *connector = m->private; + struct intel_dp *intel_dp = + enc_to_intel_dp(&intel_attached_encoder(connector)->base); - val &= DP_PSR_SINK_STATE_MASK; - if (val < ARRAY_SIZE(sink_status)) - return sink_status[val]; + if (connector->status != connector_status_connected) + return -ENODEV; - return "unknown"; + if (drm_dp_dpcd_readb(&intel_dp->aux, DP_PSR_STATUS, &val) == 1) { + const char *str = "unknown"; + + val &= DP_PSR_SINK_STATE_MASK; + if (val < ARRAY_SIZE(sink_status)) + str = sink_status[val]; + seq_printf(m, "Sink PSR status: 0x%x [%s]\n", val, str); + } else { + DRM_ERROR("dpcd read (at %u) failed\n", DP_PSR_STATUS); + } + + return 0; +} +DEFINE_SHOW_ATTRIBUTE(i915_psr_sink_status); + +static void +psr_source_status(struct drm_i915_private *dev_priv, struct seq_file *m) +{ + u32 val, psr_status; + + if (dev_priv->psr.psr2_enabled) { + static const char * const live_status[] = { + "IDLE", + "CAPTURE", + "CAPTURE_FS", + "SLEEP", + "BUFON_FW", + "ML_UP", + "SU_STANDBY", + "FAST_SLEEP", + "DEEP_SLEEP", + "BUF_ON", + "TG_ON" + }; + psr_status = I915_READ(EDP_PSR2_STATUS); + val = (psr_status & EDP_PSR2_STATUS_STATE_MASK) >> + EDP_PSR2_STATUS_STATE_SHIFT; + if (val < ARRAY_SIZE(live_status)) { + seq_printf(m, "Source PSR status: 0x%x [%s]\n", + psr_status, live_status[val]); + return; + } + } else { + static const char * const live_status[] = { + "IDLE", + "SRDONACK", + "SRDENT", + "BUFOFF", + "BUFON", + "AUXACK", + "SRDOFFACK", + "SRDENT_ON", + }; + psr_status = I915_READ(EDP_PSR_STATUS); + val = (psr_status & EDP_PSR_STATUS_STATE_MASK) >> + EDP_PSR_STATUS_STATE_SHIFT; + if (val < ARRAY_SIZE(live_status)) { + seq_printf(m, "Source PSR status: 0x%x [%s]\n", + psr_status, live_status[val]); + return; + } + } + + seq_printf(m, "Source PSR status: 0x%x [%s]\n", psr_status, "unknown"); } static int i915_edp_psr_status(struct seq_file *m, void *data) @@ -2681,21 +2719,8 @@ static int i915_edp_psr_status(struct seq_file *m, void *data) seq_printf(m, "Performance_Counter: %u\n", psrperf); } - if (dev_priv->psr.psr2_enabled) { - u32 psr2 = I915_READ(EDP_PSR2_STATUS); - - seq_printf(m, "EDP_PSR2_STATUS: %x [%s]\n", - psr2, psr2_live_status(psr2)); - } - if (dev_priv->psr.enabled) { - struct drm_dp_aux *aux = &dev_priv->psr.enabled->aux; - u8 val; - - if (drm_dp_dpcd_readb(aux, DP_PSR_STATUS, &val) == 1) - seq_printf(m, "Sink PSR status: 0x%x [%s]\n", val, - psr_sink_status(val)); - } + psr_source_status(dev_priv, m); mutex_unlock(&dev_priv->psr.lock); if (READ_ONCE(dev_priv->psr.debug)) { @@ -2742,86 +2767,6 @@ DEFINE_SIMPLE_ATTRIBUTE(i915_edp_psr_debug_fops, i915_edp_psr_debug_get, i915_edp_psr_debug_set, "%llu\n"); -static int i915_sink_crc(struct seq_file *m, void *data) -{ - struct drm_i915_private *dev_priv = node_to_i915(m->private); - struct drm_device *dev = &dev_priv->drm; - struct intel_connector *connector; - struct drm_connector_list_iter conn_iter; - struct intel_dp *intel_dp = NULL; - struct drm_modeset_acquire_ctx ctx; - int ret; - u8 crc[6]; - - drm_modeset_acquire_init(&ctx, DRM_MODESET_ACQUIRE_INTERRUPTIBLE); - - drm_connector_list_iter_begin(dev, &conn_iter); - - for_each_intel_connector_iter(connector, &conn_iter) { - struct drm_crtc *crtc; - struct drm_connector_state *state; - struct intel_crtc_state *crtc_state; - - if (connector->base.connector_type != DRM_MODE_CONNECTOR_eDP) - continue; - -retry: - ret = drm_modeset_lock(&dev->mode_config.connection_mutex, &ctx); - if (ret) - goto err; - - state = connector->base.state; - if (!state->best_encoder) - continue; - - crtc = state->crtc; - ret = drm_modeset_lock(&crtc->mutex, &ctx); - if (ret) - goto err; - - crtc_state = to_intel_crtc_state(crtc->state); - if (!crtc_state->base.active) - continue; - - /* - * We need to wait for all crtc updates to complete, to make - * sure any pending modesets and plane updates are completed. - */ - if (crtc_state->base.commit) { - ret = wait_for_completion_interruptible(&crtc_state->base.commit->hw_done); - - if (ret) - goto err; - } - - intel_dp = enc_to_intel_dp(state->best_encoder); - - ret = intel_dp_sink_crc(intel_dp, crtc_state, crc); - if (ret) - goto err; - - seq_printf(m, "%02x%02x%02x%02x%02x%02x\n", - crc[0], crc[1], crc[2], - crc[3], crc[4], crc[5]); - goto out; - -err: - if (ret == -EDEADLK) { - ret = drm_modeset_backoff(&ctx); - if (!ret) - goto retry; - } - goto out; - } - ret = -ENODEV; -out: - drm_connector_list_iter_end(&conn_iter); - drm_modeset_drop_locks(&ctx); - drm_modeset_acquire_fini(&ctx); - - return ret; -} - static int i915_energy_uJ(struct seq_file *m, void *data) { struct drm_i915_private *dev_priv = node_to_i915(m->private); @@ -4086,7 +4031,8 @@ fault_irq_set(struct drm_i915_private *i915, err = i915_gem_wait_for_idle(i915, I915_WAIT_LOCKED | - I915_WAIT_INTERRUPTIBLE); + I915_WAIT_INTERRUPTIBLE, + MAX_SCHEDULE_TIMEOUT); if (err) goto err_unlock; @@ -4191,7 +4137,8 @@ i915_drop_caches_set(void *data, u64 val) if (val & DROP_ACTIVE) ret = i915_gem_wait_for_idle(dev_priv, I915_WAIT_INTERRUPTIBLE | - I915_WAIT_LOCKED); + I915_WAIT_LOCKED, + MAX_SCHEDULE_TIMEOUT); if (val & DROP_RETIRE) i915_retire_requests(dev_priv); @@ -4765,7 +4712,6 @@ static const struct drm_info_list i915_debugfs_list[] = { {"i915_ppgtt_info", i915_ppgtt_info, 0}, {"i915_llc", i915_llc, 0}, {"i915_edp_psr_status", i915_edp_psr_status, 0}, - {"i915_sink_crc_eDP1", i915_sink_crc, 0}, {"i915_energy_uJ", i915_energy_uJ, 0}, {"i915_runtime_pm_status", i915_runtime_pm_status, 0}, {"i915_power_domain_info", i915_power_domain_info, 0}, @@ -4799,7 +4745,6 @@ static const struct i915_debugfs_files { #endif {"i915_fifo_underrun_reset", &i915_fifo_underrun_reset_ops}, {"i915_next_seqno", &i915_next_seqno_fops}, - {"i915_display_crc_ctl", &i915_display_crc_ctl_fops}, {"i915_pri_wm_latency", &i915_pri_wm_latency_fops}, {"i915_spr_wm_latency", &i915_spr_wm_latency_fops}, {"i915_cur_wm_latency", &i915_cur_wm_latency_fops}, @@ -4819,7 +4764,7 @@ int i915_debugfs_register(struct drm_i915_private *dev_priv) { struct drm_minor *minor = dev_priv->drm.primary; struct dentry *ent; - int ret, i; + int i; ent = debugfs_create_file("i915_forcewake_user", S_IRUSR, minor->debugfs_root, to_i915(minor->dev), @@ -4827,10 +4772,6 @@ int i915_debugfs_register(struct drm_i915_private *dev_priv) if (!ent) return -ENOMEM; - ret = intel_pipe_crc_create(minor); - if (ret) - return ret; - for (i = 0; i < ARRAY_SIZE(i915_debugfs_files); i++) { ent = debugfs_create_file(i915_debugfs_files[i].name, S_IRUGO | S_IWUSR, @@ -4952,9 +4893,12 @@ int i915_debugfs_connector_add(struct drm_connector *connector) debugfs_create_file("i915_dpcd", S_IRUGO, root, connector, &i915_dpcd_fops); - if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) + if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) { debugfs_create_file("i915_panel_timings", S_IRUGO, root, connector, &i915_panel_fops); + debugfs_create_file("i915_psr_sink_status", S_IRUGO, root, + connector, &i915_psr_sink_status_fops); + } return 0; } diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index beb0951001ce..f8cfd16be534 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -104,8 +104,13 @@ __i915_printk(struct drm_i915_private *dev_priv, const char *level, vaf.fmt = fmt; vaf.va = &args; - dev_printk(level, kdev, "[" DRM_NAME ":%ps] %pV", - __builtin_return_address(0), &vaf); + if (is_error) + dev_printk(level, kdev, "%pV", &vaf); + else + dev_printk(level, kdev, "[" DRM_NAME ":%ps] %pV", + __builtin_return_address(0), &vaf); + + va_end(args); if (is_error && !shown_bug_once) { /* @@ -117,8 +122,6 @@ __i915_printk(struct drm_i915_private *dev_priv, const char *level, dev_notice(kdev, "%s", FDO_BUG_MSG); shown_bug_once = true; } - - va_end(args); } /* Map PCH device id to PCH type, or PCH_NONE if unknown. */ @@ -679,7 +682,7 @@ static int i915_load_modeset_init(struct drm_device *dev) ret = i915_gem_init(dev_priv); if (ret) - goto cleanup_irq; + goto cleanup_modeset; intel_setup_overlay(dev_priv); @@ -699,6 +702,8 @@ cleanup_gem: if (i915_gem_suspend(dev_priv)) DRM_ERROR("failed to idle hardware; continuing to unload!\n"); i915_gem_fini(dev_priv); +cleanup_modeset: + intel_modeset_cleanup(dev); cleanup_irq: drm_irq_uninstall(dev); intel_teardown_gmbus(dev_priv); @@ -895,7 +900,6 @@ static int i915_driver_init_early(struct drm_i915_private *dev_priv, spin_lock_init(&dev_priv->uncore.lock); mutex_init(&dev_priv->sb_lock); - mutex_init(&dev_priv->modeset_restore_lock); mutex_init(&dev_priv->av_mutex); mutex_init(&dev_priv->wm.wm_mutex); mutex_init(&dev_priv->pps_mutex); @@ -1149,8 +1153,6 @@ static int i915_driver_init_hw(struct drm_i915_private *dev_priv) intel_uncore_sanitize(dev_priv); - intel_opregion_setup(dev_priv); - i915_gem_load_init_fences(dev_priv); /* On the 945G/GM, the chipset reports the MSI capability on the @@ -1165,6 +1167,12 @@ static int i915_driver_init_hw(struct drm_i915_private *dev_priv) * get lost on g4x as well, and interrupt delivery seems to stay * properly dead afterwards. So we'll just disable them for all * pre-gen5 chipsets. + * + * dp aux and gmbus irq on gen4 seems to be able to generate legacy + * interrupts even when in MSI mode. This results in spurious + * interrupt warnings if the legacy irq no. is shared with another + * device. The kernel then disables that interrupt source and so + * prevents the other device from working properly. */ if (INTEL_GEN(dev_priv) >= 5) { if (pci_enable_msi(pdev) < 0) @@ -1173,10 +1181,16 @@ static int i915_driver_init_hw(struct drm_i915_private *dev_priv) ret = intel_gvt_init(dev_priv); if (ret) - goto err_ggtt; + goto err_msi; + + intel_opregion_setup(dev_priv); return 0; +err_msi: + if (pdev->msi_enabled) + pci_disable_msi(pdev); + pm_qos_remove_request(&dev_priv->pm_qos); err_ggtt: i915_ggtt_cleanup_hw(dev_priv); err_perf: @@ -1409,6 +1423,7 @@ out_fini: drm_dev_fini(&dev_priv->drm); out_free: kfree(dev_priv); + pci_set_drvdata(pdev, NULL); return ret; } @@ -1554,11 +1569,6 @@ static int i915_drm_suspend(struct drm_device *dev) struct pci_dev *pdev = dev_priv->drm.pdev; pci_power_t opregion_target_state; - /* ignore lid events during suspend */ - mutex_lock(&dev_priv->modeset_restore_lock); - dev_priv->modeset_restore = MODESET_SUSPENDED; - mutex_unlock(&dev_priv->modeset_restore_lock); - disable_rpm_wakeref_asserts(dev_priv); /* We do a lot of poking in a lot of registers, make sure they work @@ -1571,7 +1581,7 @@ static int i915_drm_suspend(struct drm_device *dev) intel_display_suspend(dev); - intel_dp_mst_suspend(dev); + intel_dp_mst_suspend(dev_priv); intel_runtime_pm_disable_interrupts(dev_priv); intel_hpd_cancel_work(dev_priv); @@ -1736,7 +1746,7 @@ static int i915_drm_resume(struct drm_device *dev) dev_priv->display.hpd_irq_setup(dev_priv); spin_unlock_irq(&dev_priv->irq_lock); - intel_dp_mst_resume(dev); + intel_dp_mst_resume(dev_priv); intel_display_resume(dev); @@ -1754,10 +1764,6 @@ static int i915_drm_resume(struct drm_device *dev) intel_fbdev_set_suspend(dev, FBINFO_STATE_RUNNING, false); - mutex_lock(&dev_priv->modeset_restore_lock); - dev_priv->modeset_restore = MODESET_DONE; - mutex_unlock(&dev_priv->modeset_restore_lock); - intel_opregion_notify_adapter(dev_priv, PCI_D0); enable_rpm_wakeref_asserts(dev_priv); diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index f4751b383858..0f49f9988dfa 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -86,8 +86,8 @@ #define DRIVER_NAME "i915" #define DRIVER_DESC "Intel Graphics" -#define DRIVER_DATE "20180620" -#define DRIVER_TIMESTAMP 1529529048 +#define DRIVER_DATE "20180719" +#define DRIVER_TIMESTAMP 1532015279 /* Use I915_STATE_WARN(x) and I915_STATE_WARN_ON() (rather than WARN() and * WARN_ON()) for hw state sanity checks to check for unexpected conditions @@ -299,7 +299,6 @@ struct i915_hotplug { u32 event_bits; struct delayed_work reenable_work; - struct intel_digital_port *irq_port[I915_MAX_PORTS]; u32 long_port_mask; u32 short_port_mask; struct work_struct dig_port_work; @@ -512,6 +511,7 @@ struct intel_fbc { bool enabled; bool active; + bool flip_pending; bool underrun_detected; struct work_struct underrun_work; @@ -579,12 +579,6 @@ struct intel_fbc { unsigned int gen9_wa_cfb_stride; } params; - struct intel_fbc_work { - bool scheduled; - u64 scheduled_vblank; - struct work_struct work; - } work; - const char *no_fbc_reason; }; @@ -631,14 +625,6 @@ struct i915_psr { bool debug; ktime_t last_entry_attempt; ktime_t last_exit; - - void (*enable_source)(struct intel_dp *, - const struct intel_crtc_state *); - void (*disable_source)(struct intel_dp *, - const struct intel_crtc_state *); - void (*enable_sink)(struct intel_dp *); - void (*activate)(struct intel_dp *); - void (*setup_vsc)(struct intel_dp *, const struct intel_crtc_state *); }; enum intel_pch { @@ -663,6 +649,7 @@ enum intel_sbi_destination { #define QUIRK_BACKLIGHT_PRESENT (1<<3) #define QUIRK_PIN_SWIZZLED_PAGES (1<<5) #define QUIRK_INCREASE_T12_DELAY (1<<6) +#define QUIRK_INCREASE_DDI_DISABLED_TIME (1<<7) struct intel_fbdev; struct intel_fbc_work; @@ -965,7 +952,7 @@ struct i915_gem_mm { /** * Small stash of WC pages */ - struct pagevec wc_stash; + struct pagestash wc_stash; /** * tmpfs instance used for shmem backed objects @@ -1015,12 +1002,6 @@ struct i915_gem_mm { #define I915_ENGINE_WEDGED_TIMEOUT (60 * HZ) /* Reset but no recovery? */ -enum modeset_restore { - MODESET_ON_LID_OPEN, - MODESET_DONE, - MODESET_SUSPENDED, -}; - #define DP_AUX_A 0x40 #define DP_AUX_B 0x10 #define DP_AUX_C 0x20 @@ -1284,20 +1265,11 @@ enum intel_pipe_crc_source { INTEL_PIPE_CRC_SOURCE_MAX, }; -struct intel_pipe_crc_entry { - uint32_t frame; - uint32_t crc[5]; -}; - #define INTEL_PIPE_CRC_ENTRIES_NR 128 struct intel_pipe_crc { spinlock_t lock; - bool opened; /* exclusive access to the result file */ - struct intel_pipe_crc_entry *entries; - enum intel_pipe_crc_source source; - int head, tail; - wait_queue_head_t wq; int skipped; + enum intel_pipe_crc_source source; }; struct i915_frontbuffer_tracking { @@ -1752,12 +1724,9 @@ struct drm_i915_private { unsigned long quirks; - enum modeset_restore modeset_restore; - struct mutex modeset_restore_lock; struct drm_atomic_state *modeset_restore_state; struct drm_modeset_acquire_ctx reset_ctx; - struct list_head vm_list; /* Global list of all address spaces */ struct i915_ggtt ggtt; /* VM representing the global address space */ struct i915_gem_mm mm; @@ -2326,6 +2295,7 @@ intel_info(const struct drm_i915_private *dev_priv) } #define INTEL_INFO(dev_priv) intel_info((dev_priv)) +#define DRIVER_CAPS(dev_priv) (&(dev_priv)->caps) #define INTEL_GEN(dev_priv) ((dev_priv)->info.gen) #define INTEL_DEVID(dev_priv) ((dev_priv)->info.device_id) @@ -2578,17 +2548,10 @@ intel_info(const struct drm_i915_private *dev_priv) (IS_CANNONLAKE(dev_priv) || \ IS_SKL_GT3(dev_priv) || IS_SKL_GT4(dev_priv)) -/* - * dp aux and gmbus irq on gen4 seems to be able to generate legacy interrupts - * even when in MSI mode. This results in spurious interrupt warnings if the - * legacy irq no. is shared with another device. The kernel then disables that - * interrupt source and so prevents the other device from working properly. - * - * Since we don't enable MSI anymore on gen4, we can always use GMBUS/AUX - * interrupts. - */ -#define HAS_AUX_IRQ(dev_priv) true #define HAS_GMBUS_IRQ(dev_priv) (INTEL_GEN(dev_priv) >= 4) +#define HAS_GMBUS_BURST_READ(dev_priv) (INTEL_GEN(dev_priv) >= 10 || \ + IS_GEMINILAKE(dev_priv) || \ + IS_KABYLAKE(dev_priv)) /* With the 945 and later, Y tiling got adjusted so that it was 32 128-byte * rows, which changed the alignment requirements and fence programming. @@ -2771,8 +2734,6 @@ void intel_hpd_irq_handler(struct drm_i915_private *dev_priv, void intel_hpd_init(struct drm_i915_private *dev_priv); void intel_hpd_init_work(struct drm_i915_private *dev_priv); void intel_hpd_cancel_work(struct drm_i915_private *dev_priv); -enum port intel_hpd_pin_to_port(struct drm_i915_private *dev_priv, - enum hpd_pin pin); enum hpd_pin intel_hpd_pin_default(struct drm_i915_private *dev_priv, enum port port); bool intel_hpd_disable(struct drm_i915_private *dev_priv, enum hpd_pin pin); @@ -3119,9 +3080,6 @@ i915_gem_obj_finish_shmem_access(struct drm_i915_gem_object *obj) } int __must_check i915_mutex_lock_interruptible(struct drm_device *dev); -void i915_vma_move_to_active(struct i915_vma *vma, - struct i915_request *rq, - unsigned int flags); int i915_gem_dumb_create(struct drm_file *file_priv, struct drm_device *dev, struct drm_mode_create_dumb *args); @@ -3189,7 +3147,7 @@ void i915_gem_init_swizzling(struct drm_i915_private *dev_priv); void i915_gem_fini(struct drm_i915_private *dev_priv); void i915_gem_cleanup_engines(struct drm_i915_private *dev_priv); int i915_gem_wait_for_idle(struct drm_i915_private *dev_priv, - unsigned int flags); + unsigned int flags, long timeout); int __must_check i915_gem_suspend(struct drm_i915_private *dev_priv); void i915_gem_suspend_late(struct drm_i915_private *dev_priv); void i915_gem_resume(struct drm_i915_private *dev_priv); @@ -3339,7 +3297,7 @@ unsigned long i915_gem_shrink(struct drm_i915_private *i915, unsigned long i915_gem_shrink_all(struct drm_i915_private *i915); void i915_gem_shrinker_register(struct drm_i915_private *i915); void i915_gem_shrinker_unregister(struct drm_i915_private *i915); - +void i915_gem_shrinker_taints_mutex(struct mutex *mutex); /* i915_gem_tiling.c */ static inline bool i915_gem_object_needs_bit17_swizzle(struct drm_i915_gem_object *obj) diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 858d188dd33b..fcc73a6ab503 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -802,7 +802,7 @@ void i915_gem_flush_ggtt_writes(struct drm_i915_private *dev_priv) * that was!). */ - wmb(); + i915_gem_chipset_flush(dev_priv); intel_runtime_pm_get(dev_priv); spin_lock_irq(&dev_priv->uncore.lock); @@ -837,6 +837,10 @@ flush_write_domain(struct drm_i915_gem_object *obj, unsigned int flush_domains) } break; + case I915_GEM_DOMAIN_WC: + wmb(); + break; + case I915_GEM_DOMAIN_CPU: i915_gem_clflush_object(obj, I915_CLFLUSH_SYNC); break; @@ -1623,6 +1627,12 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data, goto err; } + /* Writes not allowed into this read-only object */ + if (i915_gem_object_is_readonly(obj)) { + ret = -EINVAL; + goto err; + } + trace_i915_gem_object_pwrite(obj, args->offset, args->size); ret = -ENODEV; @@ -2006,9 +2016,12 @@ vm_fault_t i915_gem_fault(struct vm_fault *vmf) bool write = !!(vmf->flags & FAULT_FLAG_WRITE); struct i915_vma *vma; pgoff_t page_offset; - unsigned int flags; int ret; + /* Sanity check that we allow writing into this object */ + if (i915_gem_object_is_readonly(obj) && write) + return VM_FAULT_SIGBUS; + /* We don't use vmf->pgoff since that has the fake offset */ page_offset = (vmf->address - area->vm_start) >> PAGE_SHIFT; @@ -2042,27 +2055,34 @@ vm_fault_t i915_gem_fault(struct vm_fault *vmf) goto err_unlock; } - /* If the object is smaller than a couple of partial vma, it is - * not worth only creating a single partial vma - we may as well - * clear enough space for the full object. - */ - flags = PIN_MAPPABLE; - if (obj->base.size > 2 * MIN_CHUNK_PAGES << PAGE_SHIFT) - flags |= PIN_NONBLOCK | PIN_NONFAULT; /* Now pin it into the GTT as needed */ - vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, flags); + vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, + PIN_MAPPABLE | + PIN_NONBLOCK | + PIN_NONFAULT); if (IS_ERR(vma)) { /* Use a partial view if it is bigger than available space */ struct i915_ggtt_view view = compute_partial_view(obj, page_offset, MIN_CHUNK_PAGES); + unsigned int flags; + + flags = PIN_MAPPABLE; + if (view.type == I915_GGTT_VIEW_NORMAL) + flags |= PIN_NONBLOCK; /* avoid warnings for pinned */ - /* Userspace is now writing through an untracked VMA, abandon + /* + * Userspace is now writing through an untracked VMA, abandon * all hope that the hardware is able to track future writes. */ obj->frontbuffer_ggtt_origin = ORIGIN_CPU; - vma = i915_gem_object_ggtt_pin(obj, &view, 0, 0, PIN_MAPPABLE); + vma = i915_gem_object_ggtt_pin(obj, &view, 0, 0, flags); + if (IS_ERR(vma) && !view.type) { + flags = PIN_MAPPABLE; + view.type = I915_GGTT_VIEW_PARTIAL; + vma = i915_gem_object_ggtt_pin(obj, &view, 0, 0, flags); + } } if (IS_ERR(vma)) { ret = PTR_ERR(vma); @@ -2114,6 +2134,7 @@ err: */ if (!i915_terminally_wedged(&dev_priv->gpu_error)) return VM_FAULT_SIGBUS; + /* else: fall through */ case -EAGAIN: /* * EAGAIN means the gpu is hung and we'll wait for the error @@ -2256,7 +2277,9 @@ static int i915_gem_object_create_mmap_offset(struct drm_i915_gem_object *obj) /* Attempt to reap some mmap space from dead objects */ do { - err = i915_gem_wait_for_idle(dev_priv, I915_WAIT_INTERRUPTIBLE); + err = i915_gem_wait_for_idle(dev_priv, + I915_WAIT_INTERRUPTIBLE, + MAX_SCHEDULE_TIMEOUT); if (err) break; @@ -3074,25 +3097,6 @@ int i915_gem_reset_prepare(struct drm_i915_private *dev_priv) return err; } -static void skip_request(struct i915_request *request) -{ - void *vaddr = request->ring->vaddr; - u32 head; - - /* As this request likely depends on state from the lost - * context, clear out all the user operations leaving the - * breadcrumb at the end (so we get the fence notifications). - */ - head = request->head; - if (request->postfix < head) { - memset(vaddr + head, 0, request->ring->size - head); - head = 0; - } - memset(vaddr + head, 0, request->postfix - head); - - dma_fence_set_error(&request->fence, -EIO); -} - static void engine_skip_context(struct i915_request *request) { struct intel_engine_cs *engine = request->engine; @@ -3103,14 +3107,14 @@ static void engine_skip_context(struct i915_request *request) GEM_BUG_ON(timeline == &engine->timeline); spin_lock_irqsave(&engine->timeline.lock, flags); - spin_lock_nested(&timeline->lock, SINGLE_DEPTH_NESTING); + spin_lock(&timeline->lock); list_for_each_entry_continue(request, &engine->timeline.requests, link) if (request->gem_context == hung_ctx) - skip_request(request); + i915_request_skip(request, -EIO); list_for_each_entry(request, &timeline->requests, link) - skip_request(request); + i915_request_skip(request, -EIO); spin_unlock(&timeline->lock); spin_unlock_irqrestore(&engine->timeline.lock, flags); @@ -3153,7 +3157,7 @@ i915_gem_reset_request(struct intel_engine_cs *engine, if (stalled) { i915_gem_context_mark_guilty(request->gem_context); - skip_request(request); + i915_request_skip(request, -EIO); /* If this context is now banned, skip all pending requests. */ if (i915_gem_context_is_banned(request->gem_context)) @@ -3750,14 +3754,14 @@ i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file) return ret; } -static int wait_for_timeline(struct i915_timeline *tl, unsigned int flags) +static long wait_for_timeline(struct i915_timeline *tl, + unsigned int flags, long timeout) { struct i915_request *rq; - long ret; rq = i915_gem_active_get_unlocked(&tl->last_request); if (!rq) - return 0; + return timeout; /* * "Race-to-idle". @@ -3771,10 +3775,10 @@ static int wait_for_timeline(struct i915_timeline *tl, unsigned int flags) if (flags & I915_WAIT_FOR_IDLE_BOOST) gen6_rps_boost(rq, NULL); - ret = i915_request_wait(rq, flags, MAX_SCHEDULE_TIMEOUT); + timeout = i915_request_wait(rq, flags, timeout); i915_request_put(rq); - return ret < 0 ? ret : 0; + return timeout; } static int wait_for_engines(struct drm_i915_private *i915) @@ -3790,10 +3794,12 @@ static int wait_for_engines(struct drm_i915_private *i915) return 0; } -int i915_gem_wait_for_idle(struct drm_i915_private *i915, unsigned int flags) +int i915_gem_wait_for_idle(struct drm_i915_private *i915, + unsigned int flags, long timeout) { - GEM_TRACE("flags=%x (%s)\n", - flags, flags & I915_WAIT_LOCKED ? "locked" : "unlocked"); + GEM_TRACE("flags=%x (%s), timeout=%ld%s\n", + flags, flags & I915_WAIT_LOCKED ? "locked" : "unlocked", + timeout, timeout == MAX_SCHEDULE_TIMEOUT ? " (forever)" : ""); /* If the device is asleep, we have no requests outstanding */ if (!READ_ONCE(i915->gt.awake)) @@ -3806,27 +3812,31 @@ int i915_gem_wait_for_idle(struct drm_i915_private *i915, unsigned int flags) lockdep_assert_held(&i915->drm.struct_mutex); list_for_each_entry(tl, &i915->gt.timelines, link) { - err = wait_for_timeline(tl, flags); - if (err) - return err; + timeout = wait_for_timeline(tl, flags, timeout); + if (timeout < 0) + return timeout; } + + err = wait_for_engines(i915); + if (err) + return err; + i915_retire_requests(i915); GEM_BUG_ON(i915->gt.active_requests); - - return wait_for_engines(i915); } else { struct intel_engine_cs *engine; enum intel_engine_id id; - int err; for_each_engine(engine, i915, id) { - err = wait_for_timeline(&engine->timeline, flags); - if (err) - return err; - } + struct i915_timeline *tl = &engine->timeline; - return 0; + timeout = wait_for_timeline(tl, flags, timeout); + if (timeout < 0) + return timeout; + } } + + return 0; } static void __i915_gem_object_flush_for_display(struct drm_i915_gem_object *obj) @@ -5029,65 +5039,70 @@ void i915_gem_sanitize(struct drm_i915_private *i915) mutex_unlock(&i915->drm.struct_mutex); } -int i915_gem_suspend(struct drm_i915_private *dev_priv) +int i915_gem_suspend(struct drm_i915_private *i915) { - struct drm_device *dev = &dev_priv->drm; int ret; GEM_TRACE("\n"); - intel_runtime_pm_get(dev_priv); - intel_suspend_gt_powersave(dev_priv); + intel_runtime_pm_get(i915); + intel_suspend_gt_powersave(i915); - mutex_lock(&dev->struct_mutex); + mutex_lock(&i915->drm.struct_mutex); - /* We have to flush all the executing contexts to main memory so + /* + * We have to flush all the executing contexts to main memory so * that they can saved in the hibernation image. To ensure the last * context image is coherent, we have to switch away from it. That - * leaves the dev_priv->kernel_context still active when + * leaves the i915->kernel_context still active when * we actually suspend, and its image in memory may not match the GPU * state. Fortunately, the kernel_context is disposable and we do * not rely on its state. */ - if (!i915_terminally_wedged(&dev_priv->gpu_error)) { - ret = i915_gem_switch_to_kernel_context(dev_priv); + if (!i915_terminally_wedged(&i915->gpu_error)) { + ret = i915_gem_switch_to_kernel_context(i915); if (ret) goto err_unlock; - ret = i915_gem_wait_for_idle(dev_priv, + ret = i915_gem_wait_for_idle(i915, I915_WAIT_INTERRUPTIBLE | I915_WAIT_LOCKED | - I915_WAIT_FOR_IDLE_BOOST); + I915_WAIT_FOR_IDLE_BOOST, + MAX_SCHEDULE_TIMEOUT); if (ret && ret != -EIO) goto err_unlock; - assert_kernel_context_is_current(dev_priv); + assert_kernel_context_is_current(i915); } - mutex_unlock(&dev->struct_mutex); + i915_retire_requests(i915); /* ensure we flush after wedging */ - intel_uc_suspend(dev_priv); + mutex_unlock(&i915->drm.struct_mutex); + + intel_uc_suspend(i915); - cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work); - cancel_delayed_work_sync(&dev_priv->gt.retire_work); + cancel_delayed_work_sync(&i915->gpu_error.hangcheck_work); + cancel_delayed_work_sync(&i915->gt.retire_work); - /* As the idle_work is rearming if it detects a race, play safe and + /* + * As the idle_work is rearming if it detects a race, play safe and * repeat the flush until it is definitely idle. */ - drain_delayed_work(&dev_priv->gt.idle_work); + drain_delayed_work(&i915->gt.idle_work); - /* Assert that we sucessfully flushed all the work and + /* + * Assert that we successfully flushed all the work and * reset the GPU back to its idle, low power state. */ - WARN_ON(dev_priv->gt.awake); - if (WARN_ON(!intel_engines_are_idle(dev_priv))) - i915_gem_set_wedged(dev_priv); /* no hope, discard everything */ + WARN_ON(i915->gt.awake); + if (WARN_ON(!intel_engines_are_idle(i915))) + i915_gem_set_wedged(i915); /* no hope, discard everything */ - intel_runtime_pm_put(dev_priv); + intel_runtime_pm_put(i915); return 0; err_unlock: - mutex_unlock(&dev->struct_mutex); - intel_runtime_pm_put(dev_priv); + mutex_unlock(&i915->drm.struct_mutex); + intel_runtime_pm_put(i915); return ret; } @@ -5310,13 +5325,17 @@ int i915_gem_init_hw(struct drm_i915_private *dev_priv) ret = __i915_gem_restart_engines(dev_priv); if (ret) goto cleanup_uc; -out: + intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); - return ret; + + return 0; cleanup_uc: intel_uc_fini_hw(dev_priv); - goto out; +out: + intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); + + return ret; } static int __intel_engines_record_defaults(struct drm_i915_private *i915) @@ -5361,9 +5380,11 @@ static int __intel_engines_record_defaults(struct drm_i915_private *i915) if (err) goto err_active; - err = i915_gem_wait_for_idle(i915, I915_WAIT_LOCKED); - if (err) + if (i915_gem_wait_for_idle(i915, I915_WAIT_LOCKED, HZ / 5)) { + i915_gem_set_wedged(i915); + err = -EIO; /* Caller will declare us wedged */ goto err_active; + } assert_kernel_context_is_current(i915); @@ -5426,7 +5447,9 @@ err_active: if (WARN_ON(i915_gem_switch_to_kernel_context(i915))) goto out_ctx; - if (WARN_ON(i915_gem_wait_for_idle(i915, I915_WAIT_LOCKED))) + if (WARN_ON(i915_gem_wait_for_idle(i915, + I915_WAIT_LOCKED, + MAX_SCHEDULE_TIMEOUT))) goto out_ctx; i915_gem_contexts_lost(i915); @@ -5456,13 +5479,13 @@ int i915_gem_init(struct drm_i915_private *dev_priv) if (ret) return ret; - ret = intel_wopcm_init(&dev_priv->wopcm); + ret = intel_uc_init_misc(dev_priv); if (ret) return ret; - ret = intel_uc_init_misc(dev_priv); + ret = intel_wopcm_init(&dev_priv->wopcm); if (ret) - return ret; + goto err_uc_misc; /* This is just a security blanket to placate dragons. * On some systems, we very sporadically observe that the first TLBs @@ -5543,6 +5566,8 @@ err_init_hw: WARN_ON(i915_gem_suspend(dev_priv)); i915_gem_suspend_late(dev_priv); + i915_gem_drain_workqueue(dev_priv); + mutex_lock(&dev_priv->drm.struct_mutex); intel_uc_fini_hw(dev_priv); err_uc_init: @@ -5560,6 +5585,7 @@ err_unlock: intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); mutex_unlock(&dev_priv->drm.struct_mutex); +err_uc_misc: intel_uc_fini_misc(dev_priv); if (ret != -EIO) diff --git a/drivers/gpu/drm/i915/i915_gem.h b/drivers/gpu/drm/i915/i915_gem.h index 261da577829a..e46592956872 100644 --- a/drivers/gpu/drm/i915/i915_gem.h +++ b/drivers/gpu/drm/i915/i915_gem.h @@ -88,4 +88,9 @@ static inline void __tasklet_enable_sync_once(struct tasklet_struct *t) tasklet_kill(t); } +static inline bool __tasklet_is_enabled(const struct tasklet_struct *t) +{ + return !atomic_read(&t->count); +} + #endif /* __I915_GEM_H__ */ diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c index ccf463ab6562..b10770cfccd2 100644 --- a/drivers/gpu/drm/i915/i915_gem_context.c +++ b/drivers/gpu/drm/i915/i915_gem_context.c @@ -374,7 +374,7 @@ i915_gem_create_context(struct drm_i915_private *dev_priv, if (USES_FULL_PPGTT(dev_priv)) { struct i915_hw_ppgtt *ppgtt; - ppgtt = i915_ppgtt_create(dev_priv, file_priv, ctx->name); + ppgtt = i915_ppgtt_create(dev_priv, file_priv); if (IS_ERR(ppgtt)) { DRM_DEBUG_DRIVER("PPGTT setup failed (%ld)\n", PTR_ERR(ppgtt)); @@ -512,8 +512,8 @@ int i915_gem_contexts_init(struct drm_i915_private *dev_priv) } DRM_DEBUG_DRIVER("%s context support initialized\n", - dev_priv->engine[RCS]->context_size ? "logical" : - "fake"); + DRIVER_CAPS(dev_priv)->has_logical_contexts ? + "logical" : "fake"); return 0; } @@ -720,7 +720,7 @@ int i915_gem_context_create_ioctl(struct drm_device *dev, void *data, struct i915_gem_context *ctx; int ret; - if (!dev_priv->engine[RCS]->context_size) + if (!DRIVER_CAPS(dev_priv)->has_logical_contexts) return -ENODEV; if (args->pad != 0) diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c index 54814a196ee4..02b83a5ed96c 100644 --- a/drivers/gpu/drm/i915/i915_gem_evict.c +++ b/drivers/gpu/drm/i915/i915_gem_evict.c @@ -69,7 +69,8 @@ static int ggtt_flush(struct drm_i915_private *i915) err = i915_gem_wait_for_idle(i915, I915_WAIT_INTERRUPTIBLE | - I915_WAIT_LOCKED); + I915_WAIT_LOCKED, + MAX_SCHEDULE_TIMEOUT); if (err) return err; diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c index 60dc2a865f5f..3f0c612d42e7 100644 --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c @@ -66,6 +66,15 @@ enum { #define __I915_EXEC_ILLEGAL_FLAGS \ (__I915_EXEC_UNKNOWN_FLAGS | I915_EXEC_CONSTANTS_MASK) +/* Catch emission of unexpected errors for CI! */ +#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM) +#undef EINVAL +#define EINVAL ({ \ + DRM_DEBUG_DRIVER("EINVAL at %s:%d\n", __func__, __LINE__); \ + 22; \ +}) +#endif + /** * DOC: User command execution * @@ -534,7 +543,8 @@ eb_add_vma(struct i915_execbuffer *eb, * paranoia do it everywhere. */ if (i == batch_idx) { - if (!(eb->flags[i] & EXEC_OBJECT_PINNED)) + if (entry->relocation_count && + !(eb->flags[i] & EXEC_OBJECT_PINNED)) eb->flags[i] |= __EXEC_OBJECT_NEEDS_BIAS; if (eb->reloc_cache.has_fence) eb->flags[i] |= EXEC_OBJECT_NEEDS_FENCE; @@ -1155,18 +1165,16 @@ static int __reloc_gpu_alloc(struct i915_execbuffer *eb, goto err_request; GEM_BUG_ON(!reservation_object_test_signaled_rcu(batch->resv, true)); - i915_vma_move_to_active(batch, rq, 0); - reservation_object_lock(batch->resv, NULL); - reservation_object_add_excl_fence(batch->resv, &rq->fence); - reservation_object_unlock(batch->resv); - i915_vma_unpin(batch); + err = i915_vma_move_to_active(batch, rq, 0); + if (err) + goto skip_request; - i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE); - reservation_object_lock(vma->resv, NULL); - reservation_object_add_excl_fence(vma->resv, &rq->fence); - reservation_object_unlock(vma->resv); + err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE); + if (err) + goto skip_request; rq->batch = batch; + i915_vma_unpin(batch); cache->rq = rq; cache->rq_cmd = cmd; @@ -1175,6 +1183,8 @@ static int __reloc_gpu_alloc(struct i915_execbuffer *eb, /* Return with batch mapping (cmd) still pinned */ return 0; +skip_request: + i915_request_skip(rq, err); err_request: i915_request_add(rq); err_unpin: @@ -1761,25 +1771,6 @@ slow: return eb_relocate_slow(eb); } -static void eb_export_fence(struct i915_vma *vma, - struct i915_request *rq, - unsigned int flags) -{ - struct reservation_object *resv = vma->resv; - - /* - * Ignore errors from failing to allocate the new fence, we can't - * handle an error right now. Worst case should be missed - * synchronisation leading to rendering corruption. - */ - reservation_object_lock(resv, NULL); - if (flags & EXEC_OBJECT_WRITE) - reservation_object_add_excl_fence(resv, &rq->fence); - else if (reservation_object_reserve_shared(resv) == 0) - reservation_object_add_shared_fence(resv, &rq->fence); - reservation_object_unlock(resv); -} - static int eb_move_to_gpu(struct i915_execbuffer *eb) { const unsigned int count = eb->buffer_count; @@ -1833,8 +1824,11 @@ static int eb_move_to_gpu(struct i915_execbuffer *eb) unsigned int flags = eb->flags[i]; struct i915_vma *vma = eb->vma[i]; - i915_vma_move_to_active(vma, eb->request, flags); - eb_export_fence(vma, eb->request, flags); + err = i915_vma_move_to_active(vma, eb->request, flags); + if (unlikely(err)) { + i915_request_skip(eb->request, err); + return err; + } __eb_unreserve_vma(vma, flags); vma->exec_flags = NULL; @@ -1874,45 +1868,6 @@ static bool i915_gem_check_execbuffer(struct drm_i915_gem_execbuffer2 *exec) return true; } -void i915_vma_move_to_active(struct i915_vma *vma, - struct i915_request *rq, - unsigned int flags) -{ - struct drm_i915_gem_object *obj = vma->obj; - const unsigned int idx = rq->engine->id; - - lockdep_assert_held(&rq->i915->drm.struct_mutex); - GEM_BUG_ON(!drm_mm_node_allocated(&vma->node)); - - /* - * Add a reference if we're newly entering the active list. - * The order in which we add operations to the retirement queue is - * vital here: mark_active adds to the start of the callback list, - * such that subsequent callbacks are called first. Therefore we - * add the active reference first and queue for it to be dropped - * *last*. - */ - if (!i915_vma_is_active(vma)) - obj->active_count++; - i915_vma_set_active(vma, idx); - i915_gem_active_set(&vma->last_read[idx], rq); - list_move_tail(&vma->vm_link, &vma->vm->active_list); - - obj->write_domain = 0; - if (flags & EXEC_OBJECT_WRITE) { - obj->write_domain = I915_GEM_DOMAIN_RENDER; - - if (intel_fb_obj_invalidate(obj, ORIGIN_CS)) - i915_gem_active_set(&obj->frontbuffer_write, rq); - - obj->read_domains = 0; - } - obj->read_domains |= I915_GEM_GPU_DOMAINS; - - if (flags & EXEC_OBJECT_NEEDS_FENCE) - i915_gem_active_set(&vma->last_fence, rq); -} - static int i915_reset_gen7_sol_offsets(struct i915_request *rq) { u32 *cs; diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index c6aa761ca085..f00c7fbef79e 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c @@ -204,9 +204,9 @@ static int ppgtt_bind_vma(struct i915_vma *vma, return err; } - /* Currently applicable only to VLV */ + /* Applicable to VLV, and gen8+ */ pte_flags = 0; - if (vma->obj->gt_ro) + if (i915_gem_object_is_readonly(vma->obj)) pte_flags |= PTE_READ_ONLY; vma->vm->insert_entries(vma->vm, vma, cache_level, pte_flags); @@ -244,10 +244,13 @@ static void clear_pages(struct i915_vma *vma) } static gen8_pte_t gen8_pte_encode(dma_addr_t addr, - enum i915_cache_level level) + enum i915_cache_level level, + u32 flags) { - gen8_pte_t pte = _PAGE_PRESENT | _PAGE_RW; - pte |= addr; + gen8_pte_t pte = addr | _PAGE_PRESENT | _PAGE_RW; + + if (unlikely(flags & PTE_READ_ONLY)) + pte &= ~_PAGE_RW; switch (level) { case I915_CACHE_NONE: @@ -375,37 +378,70 @@ static gen6_pte_t iris_pte_encode(dma_addr_t addr, return pte; } +static void stash_init(struct pagestash *stash) +{ + pagevec_init(&stash->pvec); + spin_lock_init(&stash->lock); +} + +static struct page *stash_pop_page(struct pagestash *stash) +{ + struct page *page = NULL; + + spin_lock(&stash->lock); + if (likely(stash->pvec.nr)) + page = stash->pvec.pages[--stash->pvec.nr]; + spin_unlock(&stash->lock); + + return page; +} + +static void stash_push_pagevec(struct pagestash *stash, struct pagevec *pvec) +{ + int nr; + + spin_lock_nested(&stash->lock, SINGLE_DEPTH_NESTING); + + nr = min_t(int, pvec->nr, pagevec_space(&stash->pvec)); + memcpy(stash->pvec.pages + stash->pvec.nr, + pvec->pages + pvec->nr - nr, + sizeof(pvec->pages[0]) * nr); + stash->pvec.nr += nr; + + spin_unlock(&stash->lock); + + pvec->nr -= nr; +} + static struct page *vm_alloc_page(struct i915_address_space *vm, gfp_t gfp) { - struct pagevec *pvec = &vm->free_pages; - struct pagevec stash; + struct pagevec stack; + struct page *page; if (I915_SELFTEST_ONLY(should_fail(&vm->fault_attr, 1))) i915_gem_shrink_all(vm->i915); - if (likely(pvec->nr)) - return pvec->pages[--pvec->nr]; + page = stash_pop_page(&vm->free_pages); + if (page) + return page; if (!vm->pt_kmap_wc) return alloc_page(gfp); - /* A placeholder for a specific mutex to guard the WC stash */ - lockdep_assert_held(&vm->i915->drm.struct_mutex); - /* Look in our global stash of WC pages... */ - pvec = &vm->i915->mm.wc_stash; - if (likely(pvec->nr)) - return pvec->pages[--pvec->nr]; + page = stash_pop_page(&vm->i915->mm.wc_stash); + if (page) + return page; /* - * Otherwise batch allocate pages to amoritize cost of set_pages_wc. + * Otherwise batch allocate pages to amortize cost of set_pages_wc. * * We have to be careful as page allocation may trigger the shrinker * (via direct reclaim) which will fill up the WC stash underneath us. * So we add our WB pages into a temporary pvec on the stack and merge * them into the WC stash after all the allocations are complete. */ - pagevec_init(&stash); + pagevec_init(&stack); do { struct page *page; @@ -413,59 +449,67 @@ static struct page *vm_alloc_page(struct i915_address_space *vm, gfp_t gfp) if (unlikely(!page)) break; - stash.pages[stash.nr++] = page; - } while (stash.nr < pagevec_space(pvec)); + stack.pages[stack.nr++] = page; + } while (pagevec_space(&stack)); - if (stash.nr) { - int nr = min_t(int, stash.nr, pagevec_space(pvec)); - struct page **pages = stash.pages + stash.nr - nr; + if (stack.nr && !set_pages_array_wc(stack.pages, stack.nr)) { + page = stack.pages[--stack.nr]; - if (nr && !set_pages_array_wc(pages, nr)) { - memcpy(pvec->pages + pvec->nr, - pages, sizeof(pages[0]) * nr); - pvec->nr += nr; - stash.nr -= nr; - } + /* Merge spare WC pages to the global stash */ + stash_push_pagevec(&vm->i915->mm.wc_stash, &stack); + + /* Push any surplus WC pages onto the local VM stash */ + if (stack.nr) + stash_push_pagevec(&vm->free_pages, &stack); + } - pagevec_release(&stash); + /* Return unwanted leftovers */ + if (unlikely(stack.nr)) { + WARN_ON_ONCE(set_pages_array_wb(stack.pages, stack.nr)); + __pagevec_release(&stack); } - return likely(pvec->nr) ? pvec->pages[--pvec->nr] : NULL; + return page; } static void vm_free_pages_release(struct i915_address_space *vm, bool immediate) { - struct pagevec *pvec = &vm->free_pages; + struct pagevec *pvec = &vm->free_pages.pvec; + struct pagevec stack; + lockdep_assert_held(&vm->free_pages.lock); GEM_BUG_ON(!pagevec_count(pvec)); if (vm->pt_kmap_wc) { - struct pagevec *stash = &vm->i915->mm.wc_stash; - - /* When we use WC, first fill up the global stash and then + /* + * When we use WC, first fill up the global stash and then * only if full immediately free the overflow. */ + stash_push_pagevec(&vm->i915->mm.wc_stash, pvec); - lockdep_assert_held(&vm->i915->drm.struct_mutex); - if (pagevec_space(stash)) { - do { - stash->pages[stash->nr++] = - pvec->pages[--pvec->nr]; - if (!pvec->nr) - return; - } while (pagevec_space(stash)); - - /* As we have made some room in the VM's free_pages, - * we can wait for it to fill again. Unless we are - * inside i915_address_space_fini() and must - * immediately release the pages! - */ - if (!immediate) - return; - } + /* + * As we have made some room in the VM's free_pages, + * we can wait for it to fill again. Unless we are + * inside i915_address_space_fini() and must + * immediately release the pages! + */ + if (pvec->nr <= (immediate ? 0 : PAGEVEC_SIZE - 1)) + return; + /* + * We have to drop the lock to allow ourselves to sleep, + * so take a copy of the pvec and clear the stash for + * others to use it as we sleep. + */ + stack = *pvec; + pagevec_reinit(pvec); + spin_unlock(&vm->free_pages.lock); + + pvec = &stack; set_pages_array_wb(pvec->pages, pvec->nr); + + spin_lock(&vm->free_pages.lock); } __pagevec_release(pvec); @@ -481,8 +525,45 @@ static void vm_free_page(struct i915_address_space *vm, struct page *page) * unconditional might_sleep() for everybody. */ might_sleep(); - if (!pagevec_add(&vm->free_pages, page)) + spin_lock(&vm->free_pages.lock); + if (!pagevec_add(&vm->free_pages.pvec, page)) vm_free_pages_release(vm, false); + spin_unlock(&vm->free_pages.lock); +} + +static void i915_address_space_init(struct i915_address_space *vm, + struct drm_i915_private *dev_priv) +{ + /* + * The vm->mutex must be reclaim safe (for use in the shrinker). + * Do a dummy acquire now under fs_reclaim so that any allocation + * attempt holding the lock is immediately reported by lockdep. + */ + mutex_init(&vm->mutex); + i915_gem_shrinker_taints_mutex(&vm->mutex); + + GEM_BUG_ON(!vm->total); + drm_mm_init(&vm->mm, 0, vm->total); + vm->mm.head_node.color = I915_COLOR_UNEVICTABLE; + + stash_init(&vm->free_pages); + + INIT_LIST_HEAD(&vm->active_list); + INIT_LIST_HEAD(&vm->inactive_list); + INIT_LIST_HEAD(&vm->unbound_list); +} + +static void i915_address_space_fini(struct i915_address_space *vm) +{ + spin_lock(&vm->free_pages.lock); + if (pagevec_count(&vm->free_pages.pvec)) + vm_free_pages_release(vm, true); + GEM_BUG_ON(pagevec_count(&vm->free_pages.pvec)); + spin_unlock(&vm->free_pages.lock); + + drm_mm_takedown(&vm->mm); + + mutex_destroy(&vm->mutex); } static int __setup_page_dma(struct i915_address_space *vm, @@ -493,8 +574,11 @@ static int __setup_page_dma(struct i915_address_space *vm, if (unlikely(!p->page)) return -ENOMEM; - p->daddr = dma_map_page(vm->dma, p->page, 0, PAGE_SIZE, - PCI_DMA_BIDIRECTIONAL); + p->daddr = dma_map_page_attrs(vm->dma, + p->page, 0, PAGE_SIZE, + PCI_DMA_BIDIRECTIONAL, + DMA_ATTR_SKIP_CPU_SYNC | + DMA_ATTR_NO_WARN); if (unlikely(dma_mapping_error(vm->dma, p->daddr))) { vm_free_page(vm, p->page); return -ENOMEM; @@ -575,8 +659,11 @@ setup_scratch_page(struct i915_address_space *vm, gfp_t gfp) if (unlikely(!page)) goto skip; - addr = dma_map_page(vm->dma, page, 0, size, - PCI_DMA_BIDIRECTIONAL); + addr = dma_map_page_attrs(vm->dma, + page, 0, size, + PCI_DMA_BIDIRECTIONAL, + DMA_ATTR_SKIP_CPU_SYNC | + DMA_ATTR_NO_WARN); if (unlikely(dma_mapping_error(vm->dma, addr))) goto free_page; @@ -637,7 +724,7 @@ static void gen8_initialize_pt(struct i915_address_space *vm, struct i915_page_table *pt) { fill_px(vm, pt, - gen8_pte_encode(vm->scratch_page.daddr, I915_CACHE_LLC)); + gen8_pte_encode(vm->scratch_page.daddr, I915_CACHE_LLC, 0)); } static void gen6_initialize_pt(struct gen6_hw_ppgtt *ppgtt, @@ -785,7 +872,7 @@ static bool gen8_ppgtt_clear_pt(struct i915_address_space *vm, unsigned int pte = gen8_pte_index(start); unsigned int pte_end = pte + num_entries; const gen8_pte_t scratch_pte = - gen8_pte_encode(vm->scratch_page.daddr, I915_CACHE_LLC); + gen8_pte_encode(vm->scratch_page.daddr, I915_CACHE_LLC, 0); gen8_pte_t *vaddr; GEM_BUG_ON(num_entries > pt->used_ptes); @@ -957,10 +1044,11 @@ gen8_ppgtt_insert_pte_entries(struct i915_hw_ppgtt *ppgtt, struct i915_page_directory_pointer *pdp, struct sgt_dma *iter, struct gen8_insert_pte *idx, - enum i915_cache_level cache_level) + enum i915_cache_level cache_level, + u32 flags) { struct i915_page_directory *pd; - const gen8_pte_t pte_encode = gen8_pte_encode(0, cache_level); + const gen8_pte_t pte_encode = gen8_pte_encode(0, cache_level, flags); gen8_pte_t *vaddr; bool ret; @@ -1011,14 +1099,14 @@ gen8_ppgtt_insert_pte_entries(struct i915_hw_ppgtt *ppgtt, static void gen8_ppgtt_insert_3lvl(struct i915_address_space *vm, struct i915_vma *vma, enum i915_cache_level cache_level, - u32 unused) + u32 flags) { struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm); struct sgt_dma iter = sgt_dma(vma); struct gen8_insert_pte idx = gen8_insert_pte(vma->node.start); gen8_ppgtt_insert_pte_entries(ppgtt, &ppgtt->pdp, &iter, &idx, - cache_level); + cache_level, flags); vma->page_sizes.gtt = I915_GTT_PAGE_SIZE; } @@ -1026,9 +1114,10 @@ static void gen8_ppgtt_insert_3lvl(struct i915_address_space *vm, static void gen8_ppgtt_insert_huge_entries(struct i915_vma *vma, struct i915_page_directory_pointer **pdps, struct sgt_dma *iter, - enum i915_cache_level cache_level) + enum i915_cache_level cache_level, + u32 flags) { - const gen8_pte_t pte_encode = gen8_pte_encode(0, cache_level); + const gen8_pte_t pte_encode = gen8_pte_encode(0, cache_level, flags); u64 start = vma->node.start; dma_addr_t rem = iter->sg->length; @@ -1144,19 +1233,21 @@ static void gen8_ppgtt_insert_huge_entries(struct i915_vma *vma, static void gen8_ppgtt_insert_4lvl(struct i915_address_space *vm, struct i915_vma *vma, enum i915_cache_level cache_level, - u32 unused) + u32 flags) { struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm); struct sgt_dma iter = sgt_dma(vma); struct i915_page_directory_pointer **pdps = ppgtt->pml4.pdps; if (vma->page_sizes.sg > I915_GTT_PAGE_SIZE) { - gen8_ppgtt_insert_huge_entries(vma, pdps, &iter, cache_level); + gen8_ppgtt_insert_huge_entries(vma, pdps, &iter, cache_level, + flags); } else { struct gen8_insert_pte idx = gen8_insert_pte(vma->node.start); while (gen8_ppgtt_insert_pte_entries(ppgtt, pdps[idx.pml4e++], - &iter, &idx, cache_level)) + &iter, &idx, cache_level, + flags)) GEM_BUG_ON(idx.pml4e >= GEN8_PML4ES_PER_PML4); vma->page_sizes.gtt = I915_GTT_PAGE_SIZE; @@ -1494,7 +1585,7 @@ static void gen8_dump_ppgtt(struct i915_hw_ppgtt *ppgtt, struct seq_file *m) { struct i915_address_space *vm = &ppgtt->vm; const gen8_pte_t scratch_pte = - gen8_pte_encode(vm->scratch_page.daddr, I915_CACHE_LLC); + gen8_pte_encode(vm->scratch_page.daddr, I915_CACHE_LLC, 0); u64 start = 0, length = ppgtt->vm.total; if (use_4lvl(vm)) { @@ -1562,6 +1653,8 @@ static struct i915_hw_ppgtt *gen8_ppgtt_create(struct drm_i915_private *i915) if (!ppgtt) return ERR_PTR(-ENOMEM); + kref_init(&ppgtt->ref); + ppgtt->vm.i915 = i915; ppgtt->vm.dma = &i915->drm.pdev->dev; @@ -1569,6 +1662,15 @@ static struct i915_hw_ppgtt *gen8_ppgtt_create(struct drm_i915_private *i915) 1ULL << 48 : 1ULL << 32; + /* + * From bdw, there is support for read-only pages in the PPGTT. + * + * XXX GVT is not honouring the lack of RW in the PTE bits. + */ + ppgtt->vm.has_read_only = !intel_vgpu_active(i915); + + i915_address_space_init(&ppgtt->vm, i915); + /* There are only few exceptions for gen >=6. chv and bxt. * And we are not sure about the latter so play safe for now. */ @@ -1996,7 +2098,6 @@ static struct i915_vma *pd_vma_create(struct gen6_hw_ppgtt *ppgtt, int size) struct drm_i915_private *i915 = ppgtt->base.vm.i915; struct i915_ggtt *ggtt = &i915->ggtt; struct i915_vma *vma; - int i; GEM_BUG_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE)); GEM_BUG_ON(size > ggtt->vm.total); @@ -2005,14 +2106,14 @@ static struct i915_vma *pd_vma_create(struct gen6_hw_ppgtt *ppgtt, int size) if (!vma) return ERR_PTR(-ENOMEM); - for (i = 0; i < ARRAY_SIZE(vma->last_read); i++) - init_request_active(&vma->last_read[i], NULL); init_request_active(&vma->last_fence, NULL); vma->vm = &ggtt->vm; vma->ops = &pd_vma_ops; vma->private = ppgtt; + vma->active = RB_ROOT; + vma->size = size; vma->fence_size = size; vma->flags = I915_VMA_GGTT; @@ -2068,11 +2169,15 @@ static struct i915_hw_ppgtt *gen6_ppgtt_create(struct drm_i915_private *i915) if (!ppgtt) return ERR_PTR(-ENOMEM); + kref_init(&ppgtt->base.ref); + ppgtt->base.vm.i915 = i915; ppgtt->base.vm.dma = &i915->drm.pdev->dev; ppgtt->base.vm.total = I915_PDES * GEN6_PTES * PAGE_SIZE; + i915_address_space_init(&ppgtt->base.vm, i915); + ppgtt->base.vm.allocate_va_range = gen6_alloc_va_range; ppgtt->base.vm.clear_range = gen6_ppgtt_clear_range; ppgtt->base.vm.insert_entries = gen6_ppgtt_insert_entries; @@ -2105,30 +2210,6 @@ err_free: return ERR_PTR(err); } -static void i915_address_space_init(struct i915_address_space *vm, - struct drm_i915_private *dev_priv, - const char *name) -{ - drm_mm_init(&vm->mm, 0, vm->total); - vm->mm.head_node.color = I915_COLOR_UNEVICTABLE; - - INIT_LIST_HEAD(&vm->active_list); - INIT_LIST_HEAD(&vm->inactive_list); - INIT_LIST_HEAD(&vm->unbound_list); - - list_add_tail(&vm->global_link, &dev_priv->vm_list); - pagevec_init(&vm->free_pages); -} - -static void i915_address_space_fini(struct i915_address_space *vm) -{ - if (pagevec_count(&vm->free_pages)) - vm_free_pages_release(vm, true); - - drm_mm_takedown(&vm->mm); - list_del(&vm->global_link); -} - static void gtt_write_workarounds(struct drm_i915_private *dev_priv) { /* This function is for gtt related workarounds. This function is @@ -2199,8 +2280,7 @@ __hw_ppgtt_create(struct drm_i915_private *i915) struct i915_hw_ppgtt * i915_ppgtt_create(struct drm_i915_private *i915, - struct drm_i915_file_private *fpriv, - const char *name) + struct drm_i915_file_private *fpriv) { struct i915_hw_ppgtt *ppgtt; @@ -2208,8 +2288,6 @@ i915_ppgtt_create(struct drm_i915_private *i915, if (IS_ERR(ppgtt)) return ppgtt; - kref_init(&ppgtt->ref); - i915_address_space_init(&ppgtt->vm, i915, name); ppgtt->vm.file = fpriv; trace_i915_ppgtt_create(&ppgtt->vm); @@ -2397,7 +2475,7 @@ static void gen8_ggtt_insert_page(struct i915_address_space *vm, gen8_pte_t __iomem *pte = (gen8_pte_t __iomem *)ggtt->gsm + (offset >> PAGE_SHIFT); - gen8_set_pte(pte, gen8_pte_encode(addr, level)); + gen8_set_pte(pte, gen8_pte_encode(addr, level, 0)); ggtt->invalidate(vm->i915); } @@ -2405,14 +2483,19 @@ static void gen8_ggtt_insert_page(struct i915_address_space *vm, static void gen8_ggtt_insert_entries(struct i915_address_space *vm, struct i915_vma *vma, enum i915_cache_level level, - u32 unused) + u32 flags) { struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm); struct sgt_iter sgt_iter; gen8_pte_t __iomem *gtt_entries; - const gen8_pte_t pte_encode = gen8_pte_encode(0, level); + const gen8_pte_t pte_encode = gen8_pte_encode(0, level, 0); dma_addr_t addr; + /* + * Note that we ignore PTE_READ_ONLY here. The caller must be careful + * not to allow the user to override access to a read only page. + */ + gtt_entries = (gen8_pte_t __iomem *)ggtt->gsm; gtt_entries += vma->node.start >> PAGE_SHIFT; for_each_sgt_dma(addr, sgt_iter, vma->pages) @@ -2478,7 +2561,7 @@ static void gen8_ggtt_clear_range(struct i915_address_space *vm, unsigned first_entry = start >> PAGE_SHIFT; unsigned num_entries = length >> PAGE_SHIFT; const gen8_pte_t scratch_pte = - gen8_pte_encode(vm->scratch_page.daddr, I915_CACHE_LLC); + gen8_pte_encode(vm->scratch_page.daddr, I915_CACHE_LLC, 0); gen8_pte_t __iomem *gtt_base = (gen8_pte_t __iomem *)ggtt->gsm + first_entry; const int max_entries = ggtt_total_entries(ggtt) - first_entry; @@ -2539,13 +2622,14 @@ struct insert_entries { struct i915_address_space *vm; struct i915_vma *vma; enum i915_cache_level level; + u32 flags; }; static int bxt_vtd_ggtt_insert_entries__cb(void *_arg) { struct insert_entries *arg = _arg; - gen8_ggtt_insert_entries(arg->vm, arg->vma, arg->level, 0); + gen8_ggtt_insert_entries(arg->vm, arg->vma, arg->level, arg->flags); bxt_vtd_ggtt_wa(arg->vm); return 0; @@ -2554,9 +2638,9 @@ static int bxt_vtd_ggtt_insert_entries__cb(void *_arg) static void bxt_vtd_ggtt_insert_entries__BKL(struct i915_address_space *vm, struct i915_vma *vma, enum i915_cache_level level, - u32 unused) + u32 flags) { - struct insert_entries arg = { vm, vma, level }; + struct insert_entries arg = { vm, vma, level, flags }; stop_machine(bxt_vtd_ggtt_insert_entries__cb, &arg, NULL); } @@ -2647,9 +2731,9 @@ static int ggtt_bind_vma(struct i915_vma *vma, struct drm_i915_gem_object *obj = vma->obj; u32 pte_flags; - /* Currently applicable only to VLV */ + /* Applicable to VLV (gen8+ do not support RO in the GGTT) */ pte_flags = 0; - if (obj->gt_ro) + if (i915_gem_object_is_readonly(obj)) pte_flags |= PTE_READ_ONLY; intel_runtime_pm_get(i915); @@ -2687,7 +2771,7 @@ static int aliasing_gtt_bind_vma(struct i915_vma *vma, /* Currently applicable only to VLV */ pte_flags = 0; - if (vma->obj->gt_ro) + if (i915_gem_object_is_readonly(vma->obj)) pte_flags |= PTE_READ_ONLY; if (flags & I915_VMA_LOCAL_BIND) { @@ -2739,7 +2823,7 @@ void i915_gem_gtt_finish_pages(struct drm_i915_gem_object *obj, struct i915_ggtt *ggtt = &dev_priv->ggtt; if (unlikely(ggtt->do_idle_maps)) { - if (i915_gem_wait_for_idle(dev_priv, 0)) { + if (i915_gem_wait_for_idle(dev_priv, 0, MAX_SCHEDULE_TIMEOUT)) { DRM_ERROR("Failed to wait for idle; VT'd may hang.\n"); /* Wait a bit, in hopes it avoids the hang */ udelay(10); @@ -2788,7 +2872,7 @@ int i915_gem_init_aliasing_ppgtt(struct drm_i915_private *i915) struct i915_hw_ppgtt *ppgtt; int err; - ppgtt = i915_ppgtt_create(i915, ERR_PTR(-EPERM), "[alias]"); + ppgtt = i915_ppgtt_create(i915, ERR_PTR(-EPERM)); if (IS_ERR(ppgtt)) return PTR_ERR(ppgtt); @@ -2918,7 +3002,7 @@ void i915_ggtt_cleanup_hw(struct drm_i915_private *dev_priv) ggtt->vm.cleanup(&ggtt->vm); - pvec = &dev_priv->mm.wc_stash; + pvec = &dev_priv->mm.wc_stash.pvec; if (pvec->nr) { set_pages_array_wb(pvec->pages, pvec->nr); __pagevec_release(pvec); @@ -3518,7 +3602,7 @@ int i915_ggtt_init_hw(struct drm_i915_private *dev_priv) struct i915_ggtt *ggtt = &dev_priv->ggtt; int ret; - INIT_LIST_HEAD(&dev_priv->vm_list); + stash_init(&dev_priv->mm.wc_stash); /* Note that we use page colouring to enforce a guard page at the * end of the address space. This is required as the CS may prefetch @@ -3526,7 +3610,11 @@ int i915_ggtt_init_hw(struct drm_i915_private *dev_priv) * and beyond the end of the GTT if we do not provide a guard. */ mutex_lock(&dev_priv->drm.struct_mutex); - i915_address_space_init(&ggtt->vm, dev_priv, "[global]"); + i915_address_space_init(&ggtt->vm, dev_priv); + + /* Only VLV supports read-only GGTT mappings */ + ggtt->vm.has_read_only = IS_VALLEYVIEW(dev_priv); + if (!HAS_LLC(dev_priv) && !USES_PPGTT(dev_priv)) ggtt->vm.mm.color_adjust = i915_gtt_color_adjust; mutex_unlock(&dev_priv->drm.struct_mutex); diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.h b/drivers/gpu/drm/i915/i915_gem_gtt.h index 9a4824cae68d..2a116a91420b 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.h +++ b/drivers/gpu/drm/i915/i915_gem_gtt.h @@ -270,6 +270,11 @@ struct i915_vma_ops { void (*clear_pages)(struct i915_vma *vma); }; +struct pagestash { + spinlock_t lock; + struct pagevec pvec; +}; + struct i915_address_space { struct drm_mm mm; struct drm_i915_private *i915; @@ -283,12 +288,13 @@ struct i915_address_space { * assign blame. */ struct drm_i915_file_private *file; - struct list_head global_link; u64 total; /* size addr space maps (ex. 2GB for ggtt) */ u64 reserved; /* size addr space reserved */ bool closed; + struct mutex mutex; /* protects vma and our lists */ + struct i915_page_dma scratch_page; struct i915_page_table *scratch_pt; struct i915_page_directory *scratch_pd; @@ -324,8 +330,13 @@ struct i915_address_space { */ struct list_head unbound_list; - struct pagevec free_pages; - bool pt_kmap_wc; + struct pagestash free_pages; + + /* Some systems require uncached updates of the page directories */ + bool pt_kmap_wc:1; + + /* Some systems support read-only mappings for GGTT and/or PPGTT */ + bool has_read_only:1; /* FIXME: Need a more generic return type */ gen6_pte_t (*pte_encode)(dma_addr_t addr, @@ -615,8 +626,7 @@ void i915_ggtt_cleanup_hw(struct drm_i915_private *dev_priv); int i915_ppgtt_init_hw(struct drm_i915_private *dev_priv); void i915_ppgtt_release(struct kref *kref); struct i915_hw_ppgtt *i915_ppgtt_create(struct drm_i915_private *dev_priv, - struct drm_i915_file_private *fpriv, - const char *name); + struct drm_i915_file_private *fpriv); void i915_ppgtt_close(struct i915_address_space *vm); static inline void i915_ppgtt_get(struct i915_hw_ppgtt *ppgtt) { diff --git a/drivers/gpu/drm/i915/i915_gem_object.h b/drivers/gpu/drm/i915/i915_gem_object.h index 54f00b350779..83e5e01fa9ea 100644 --- a/drivers/gpu/drm/i915/i915_gem_object.h +++ b/drivers/gpu/drm/i915/i915_gem_object.h @@ -141,7 +141,6 @@ struct drm_i915_gem_object { * Is the object to be mapped as read-only to the GPU * Only honoured if hardware has relevant pte bit */ - unsigned long gt_ro:1; unsigned int cache_level:3; unsigned int cache_coherent:2; #define I915_BO_CACHE_COHERENT_FOR_READ BIT(0) @@ -268,7 +267,6 @@ struct drm_i915_gem_object { union { struct i915_gem_userptr { uintptr_t ptr; - unsigned read_only :1; struct i915_mm_struct *mm; struct i915_mmu_object *mmu_object; @@ -337,26 +335,17 @@ __attribute__((nonnull)) static inline struct drm_i915_gem_object * i915_gem_object_get(struct drm_i915_gem_object *obj) { - drm_gem_object_reference(&obj->base); + drm_gem_object_get(&obj->base); return obj; } -__deprecated -extern void drm_gem_object_reference(struct drm_gem_object *); - __attribute__((nonnull)) static inline void i915_gem_object_put(struct drm_i915_gem_object *obj) { - __drm_gem_object_unreference(&obj->base); + __drm_gem_object_put(&obj->base); } -__deprecated -extern void drm_gem_object_unreference(struct drm_gem_object *); - -__deprecated -extern void drm_gem_object_unreference_unlocked(struct drm_gem_object *); - static inline void i915_gem_object_lock(struct drm_i915_gem_object *obj) { reservation_object_lock(obj->resv, NULL); @@ -367,6 +356,18 @@ static inline void i915_gem_object_unlock(struct drm_i915_gem_object *obj) reservation_object_unlock(obj->resv); } +static inline void +i915_gem_object_set_readonly(struct drm_i915_gem_object *obj) +{ + obj->base.vma_node.readonly = true; +} + +static inline bool +i915_gem_object_is_readonly(const struct drm_i915_gem_object *obj) +{ + return obj->base.vma_node.readonly; +} + static inline bool i915_gem_object_has_struct_page(const struct drm_i915_gem_object *obj) { diff --git a/drivers/gpu/drm/i915/i915_gem_render_state.c b/drivers/gpu/drm/i915/i915_gem_render_state.c index 3210cedfa46c..90baf9086d0a 100644 --- a/drivers/gpu/drm/i915/i915_gem_render_state.c +++ b/drivers/gpu/drm/i915/i915_gem_render_state.c @@ -222,7 +222,7 @@ int i915_gem_render_state_emit(struct i915_request *rq) goto err_unpin; } - i915_vma_move_to_active(so.vma, rq, 0); + err = i915_vma_move_to_active(so.vma, rq, 0); err_unpin: i915_vma_unpin(so.vma); err_vma: diff --git a/drivers/gpu/drm/i915/i915_gem_shrinker.c b/drivers/gpu/drm/i915/i915_gem_shrinker.c index 55e84e71f526..ea90d3a0d511 100644 --- a/drivers/gpu/drm/i915/i915_gem_shrinker.c +++ b/drivers/gpu/drm/i915/i915_gem_shrinker.c @@ -23,6 +23,7 @@ */ #include <linux/oom.h> +#include <linux/sched/mm.h> #include <linux/shmem_fs.h> #include <linux/slab.h> #include <linux/swap.h> @@ -172,7 +173,9 @@ i915_gem_shrink(struct drm_i915_private *i915, * we will free as much as we can and hope to get a second chance. */ if (flags & I915_SHRINK_ACTIVE) - i915_gem_wait_for_idle(i915, I915_WAIT_LOCKED); + i915_gem_wait_for_idle(i915, + I915_WAIT_LOCKED, + MAX_SCHEDULE_TIMEOUT); trace_i915_gem_shrink(i915, target, flags); i915_retire_requests(i915); @@ -392,7 +395,8 @@ shrinker_lock_uninterruptible(struct drm_i915_private *i915, bool *unlock, unsigned long timeout = jiffies + msecs_to_jiffies_timeout(timeout_ms); do { - if (i915_gem_wait_for_idle(i915, 0) == 0 && + if (i915_gem_wait_for_idle(i915, + 0, MAX_SCHEDULE_TIMEOUT) == 0 && shrinker_lock(i915, unlock)) break; @@ -466,7 +470,9 @@ i915_gem_shrinker_vmap(struct notifier_block *nb, unsigned long event, void *ptr return NOTIFY_DONE; /* Force everything onto the inactive lists */ - ret = i915_gem_wait_for_idle(i915, I915_WAIT_LOCKED); + ret = i915_gem_wait_for_idle(i915, + I915_WAIT_LOCKED, + MAX_SCHEDULE_TIMEOUT); if (ret) goto out; @@ -526,3 +532,14 @@ void i915_gem_shrinker_unregister(struct drm_i915_private *i915) WARN_ON(unregister_oom_notifier(&i915->mm.oom_notifier)); unregister_shrinker(&i915->mm.shrinker); } + +void i915_gem_shrinker_taints_mutex(struct mutex *mutex) +{ + if (!IS_ENABLED(CONFIG_LOCKDEP)) + return; + + fs_reclaim_acquire(GFP_KERNEL); + mutex_lock(mutex); + mutex_unlock(mutex); + fs_reclaim_release(GFP_KERNEL); +} diff --git a/drivers/gpu/drm/i915/i915_gem_stolen.c b/drivers/gpu/drm/i915/i915_gem_stolen.c index 79a347295e00..53440bf87650 100644 --- a/drivers/gpu/drm/i915/i915_gem_stolen.c +++ b/drivers/gpu/drm/i915/i915_gem_stolen.c @@ -254,6 +254,7 @@ static void vlv_get_stolen_reserved(struct drm_i915_private *dev_priv, switch (reg_val & GEN7_STOLEN_RESERVED_SIZE_MASK) { default: MISSING_CASE(reg_val & GEN7_STOLEN_RESERVED_SIZE_MASK); + /* fall through */ case GEN7_STOLEN_RESERVED_1M: *size = 1024 * 1024; break; @@ -343,6 +344,35 @@ static void bdw_get_stolen_reserved(struct drm_i915_private *dev_priv, *size = stolen_top - *base; } +static void icl_get_stolen_reserved(struct drm_i915_private *dev_priv, + resource_size_t *base, + resource_size_t *size) +{ + u64 reg_val = I915_READ64(GEN6_STOLEN_RESERVED); + + DRM_DEBUG_DRIVER("GEN6_STOLEN_RESERVED = 0x%016llx\n", reg_val); + + *base = reg_val & GEN11_STOLEN_RESERVED_ADDR_MASK; + + switch (reg_val & GEN8_STOLEN_RESERVED_SIZE_MASK) { + case GEN8_STOLEN_RESERVED_1M: + *size = 1024 * 1024; + break; + case GEN8_STOLEN_RESERVED_2M: + *size = 2 * 1024 * 1024; + break; + case GEN8_STOLEN_RESERVED_4M: + *size = 4 * 1024 * 1024; + break; + case GEN8_STOLEN_RESERVED_8M: + *size = 8 * 1024 * 1024; + break; + default: + *size = 8 * 1024 * 1024; + MISSING_CASE(reg_val & GEN8_STOLEN_RESERVED_SIZE_MASK); + } +} + int i915_gem_init_stolen(struct drm_i915_private *dev_priv) { resource_size_t reserved_base, stolen_top; @@ -399,7 +429,9 @@ int i915_gem_init_stolen(struct drm_i915_private *dev_priv) gen7_get_stolen_reserved(dev_priv, &reserved_base, &reserved_size); break; - default: + case 8: + case 9: + case 10: if (IS_LP(dev_priv)) chv_get_stolen_reserved(dev_priv, &reserved_base, &reserved_size); @@ -407,6 +439,11 @@ int i915_gem_init_stolen(struct drm_i915_private *dev_priv) bdw_get_stolen_reserved(dev_priv, &reserved_base, &reserved_size); break; + case 11: + default: + icl_get_stolen_reserved(dev_priv, &reserved_base, + &reserved_size); + break; } /* diff --git a/drivers/gpu/drm/i915/i915_gem_userptr.c b/drivers/gpu/drm/i915/i915_gem_userptr.c index 854bd51b9478..dcd6e230d16a 100644 --- a/drivers/gpu/drm/i915/i915_gem_userptr.c +++ b/drivers/gpu/drm/i915/i915_gem_userptr.c @@ -507,7 +507,7 @@ __i915_gem_userptr_get_pages_worker(struct work_struct *_work) struct mm_struct *mm = obj->userptr.mm->mm; unsigned int flags = 0; - if (!obj->userptr.read_only) + if (!i915_gem_object_is_readonly(obj)) flags |= FOLL_WRITE; ret = -EFAULT; @@ -643,7 +643,7 @@ static int i915_gem_userptr_get_pages(struct drm_i915_gem_object *obj) if (pvec) /* defer to worker if malloc fails */ pinned = __get_user_pages_fast(obj->userptr.ptr, num_pages, - !obj->userptr.read_only, + !i915_gem_object_is_readonly(obj), pvec); } @@ -789,10 +789,15 @@ i915_gem_userptr_ioctl(struct drm_device *dev, return -EFAULT; if (args->flags & I915_USERPTR_READ_ONLY) { - /* On almost all of the current hw, we cannot tell the GPU that a - * page is readonly, so this is just a placeholder in the uAPI. + struct i915_hw_ppgtt *ppgtt; + + /* + * On almost all of the older hw, we cannot tell the GPU that + * a page is readonly. */ - return -ENODEV; + ppgtt = dev_priv->kernel_context->ppgtt; + if (!ppgtt || !ppgtt->vm.has_read_only) + return -ENODEV; } obj = i915_gem_object_alloc(dev_priv); @@ -806,7 +811,8 @@ i915_gem_userptr_ioctl(struct drm_device *dev, i915_gem_object_set_cache_coherency(obj, I915_CACHE_LLC); obj->userptr.ptr = args->user_ptr; - obj->userptr.read_only = !!(args->flags & I915_USERPTR_READ_ONLY); + if (args->flags & I915_USERPTR_READ_ONLY) + i915_gem_object_set_readonly(obj); /* And keep a pointer to the current->mm for resolving the user pages * at binding. This means that we need to hook into the mmu_notifier diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c index df524c9cad40..8c81cf3aa182 100644 --- a/drivers/gpu/drm/i915/i915_gpu_error.c +++ b/drivers/gpu/drm/i915/i915_gpu_error.c @@ -335,21 +335,16 @@ static void print_error_buffers(struct drm_i915_error_state_buf *m, struct drm_i915_error_buffer *err, int count) { - int i; - err_printf(m, "%s [%d]:\n", name, count); while (count--) { - err_printf(m, " %08x_%08x %8u %02x %02x [ ", + err_printf(m, " %08x_%08x %8u %02x %02x %02x", upper_32_bits(err->gtt_offset), lower_32_bits(err->gtt_offset), err->size, err->read_domains, - err->write_domain); - for (i = 0; i < I915_NUM_ENGINES; i++) - err_printf(m, "%02x ", err->rseqno[i]); - - err_printf(m, "] %02x", err->wseqno); + err->write_domain, + err->wseqno); err_puts(m, tiling_flag(err->tiling)); err_puts(m, dirty_flag(err->dirty)); err_puts(m, purgeable_flag(err->purgeable)); @@ -1021,13 +1016,10 @@ static void capture_bo(struct drm_i915_error_buffer *err, struct i915_vma *vma) { struct drm_i915_gem_object *obj = vma->obj; - int i; err->size = obj->base.size; err->name = obj->base.name; - for (i = 0; i < I915_NUM_ENGINES; i++) - err->rseqno[i] = __active_get_seqno(&vma->last_read[i]); err->wseqno = __active_get_seqno(&obj->frontbuffer_write); err->engine = __active_get_engine_id(&obj->frontbuffer_write); diff --git a/drivers/gpu/drm/i915/i915_gpu_error.h b/drivers/gpu/drm/i915/i915_gpu_error.h index 58910f1dc67c..f893a4e8b783 100644 --- a/drivers/gpu/drm/i915/i915_gpu_error.h +++ b/drivers/gpu/drm/i915/i915_gpu_error.h @@ -177,7 +177,7 @@ struct i915_gpu_state { struct drm_i915_error_buffer { u32 size; u32 name; - u32 rseqno[I915_NUM_ENGINES], wseqno; + u32 wseqno; u64 gtt_offset; u32 read_domains; u32 write_domain; diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index 46aaef5c1851..5dadefca2ad2 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -122,6 +122,15 @@ static const u32 hpd_gen11[HPD_NUM_PINS] = { [HPD_PORT_F] = GEN11_TC4_HOTPLUG | GEN11_TBT4_HOTPLUG }; +static const u32 hpd_icp[HPD_NUM_PINS] = { + [HPD_PORT_A] = SDE_DDIA_HOTPLUG_ICP, + [HPD_PORT_B] = SDE_DDIB_HOTPLUG_ICP, + [HPD_PORT_C] = SDE_TC1_HOTPLUG_ICP, + [HPD_PORT_D] = SDE_TC2_HOTPLUG_ICP, + [HPD_PORT_E] = SDE_TC3_HOTPLUG_ICP, + [HPD_PORT_F] = SDE_TC4_HOTPLUG_ICP +}; + /* IIR can theoretically queue up two events. Be paranoid. */ #define GEN8_IRQ_RESET_NDX(type, which) do { \ I915_WRITE(GEN8_##type##_IMR(which), 0xffffffff); \ @@ -254,9 +263,9 @@ static u32 gen11_gt_engine_identity(struct drm_i915_private * const i915, const unsigned int bank, const unsigned int bit); -bool gen11_reset_one_iir(struct drm_i915_private * const i915, - const unsigned int bank, - const unsigned int bit) +static bool gen11_reset_one_iir(struct drm_i915_private * const i915, + const unsigned int bank, + const unsigned int bit) { void __iomem * const regs = i915->regs; u32 dw; @@ -1145,21 +1154,21 @@ static void ironlake_rps_change_irq_handler(struct drm_i915_private *dev_priv) static void notify_ring(struct intel_engine_cs *engine) { + const u32 seqno = intel_engine_get_seqno(engine); struct i915_request *rq = NULL; + struct task_struct *tsk = NULL; struct intel_wait *wait; - if (!engine->breadcrumbs.irq_armed) + if (unlikely(!engine->breadcrumbs.irq_armed)) return; - atomic_inc(&engine->irq_count); - set_bit(ENGINE_IRQ_BREADCRUMB, &engine->irq_posted); + rcu_read_lock(); spin_lock(&engine->breadcrumbs.irq_lock); wait = engine->breadcrumbs.irq_wait; if (wait) { - bool wakeup = engine->irq_seqno_barrier; - - /* We use a callback from the dma-fence to submit + /* + * We use a callback from the dma-fence to submit * requests after waiting on our own requests. To * ensure minimum delay in queuing the next request to * hardware, signal the fence now rather than wait for @@ -1170,19 +1179,26 @@ static void notify_ring(struct intel_engine_cs *engine) * and to handle coalescing of multiple seqno updates * and many waiters. */ - if (i915_seqno_passed(intel_engine_get_seqno(engine), - wait->seqno)) { + if (i915_seqno_passed(seqno, wait->seqno)) { struct i915_request *waiter = wait->request; - wakeup = true; - if (!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, + if (waiter && + !test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &waiter->fence.flags) && intel_wait_check_request(wait, waiter)) rq = i915_request_get(waiter); + + tsk = wait->tsk; + } else { + if (engine->irq_seqno_barrier && + i915_seqno_passed(seqno, wait->seqno - 1)) { + set_bit(ENGINE_IRQ_BREADCRUMB, + &engine->irq_posted); + tsk = wait->tsk; + } } - if (wakeup) - wake_up_process(wait->tsk); + engine->breadcrumbs.irq_count++; } else { if (engine->breadcrumbs.irq_armed) __intel_engine_disarm_breadcrumbs(engine); @@ -1190,11 +1206,19 @@ static void notify_ring(struct intel_engine_cs *engine) spin_unlock(&engine->breadcrumbs.irq_lock); if (rq) { - dma_fence_signal(&rq->fence); + spin_lock(&rq->lock); + dma_fence_signal_locked(&rq->fence); GEM_BUG_ON(!i915_request_completed(rq)); + spin_unlock(&rq->lock); + i915_request_put(rq); } + if (tsk && tsk->state & TASK_NORMAL) + wake_up_process(tsk); + + rcu_read_unlock(); + trace_intel_engine_notify(engine, wait); } @@ -1469,14 +1493,10 @@ static void snb_gt_irq_handler(struct drm_i915_private *dev_priv, static void gen8_cs_irq_handler(struct intel_engine_cs *engine, u32 iir) { - struct intel_engine_execlists * const execlists = &engine->execlists; bool tasklet = false; - if (iir & GT_CONTEXT_SWITCH_INTERRUPT) { - if (READ_ONCE(engine->execlists.active)) - tasklet = !test_and_set_bit(ENGINE_IRQ_EXECLIST, - &engine->irq_posted); - } + if (iir & GT_CONTEXT_SWITCH_INTERRUPT) + tasklet = true; if (iir & GT_RENDER_USER_INTERRUPT) { notify_ring(engine); @@ -1484,7 +1504,7 @@ gen8_cs_irq_handler(struct intel_engine_cs *engine, u32 iir) } if (tasklet) - tasklet_hi_schedule(&execlists->tasklet); + tasklet_hi_schedule(&engine->execlists.tasklet); } static void gen8_gt_irq_ack(struct drm_i915_private *i915, @@ -1556,94 +1576,122 @@ static void gen8_gt_irq_handler(struct drm_i915_private *i915, } } -static bool gen11_port_hotplug_long_detect(enum port port, u32 val) +static bool gen11_port_hotplug_long_detect(enum hpd_pin pin, u32 val) { - switch (port) { - case PORT_C: + switch (pin) { + case HPD_PORT_C: return val & GEN11_HOTPLUG_CTL_LONG_DETECT(PORT_TC1); - case PORT_D: + case HPD_PORT_D: return val & GEN11_HOTPLUG_CTL_LONG_DETECT(PORT_TC2); - case PORT_E: + case HPD_PORT_E: return val & GEN11_HOTPLUG_CTL_LONG_DETECT(PORT_TC3); - case PORT_F: + case HPD_PORT_F: return val & GEN11_HOTPLUG_CTL_LONG_DETECT(PORT_TC4); default: return false; } } -static bool bxt_port_hotplug_long_detect(enum port port, u32 val) +static bool bxt_port_hotplug_long_detect(enum hpd_pin pin, u32 val) { - switch (port) { - case PORT_A: + switch (pin) { + case HPD_PORT_A: return val & PORTA_HOTPLUG_LONG_DETECT; - case PORT_B: + case HPD_PORT_B: return val & PORTB_HOTPLUG_LONG_DETECT; - case PORT_C: + case HPD_PORT_C: return val & PORTC_HOTPLUG_LONG_DETECT; default: return false; } } -static bool spt_port_hotplug2_long_detect(enum port port, u32 val) +static bool icp_ddi_port_hotplug_long_detect(enum hpd_pin pin, u32 val) { - switch (port) { - case PORT_E: + switch (pin) { + case HPD_PORT_A: + return val & ICP_DDIA_HPD_LONG_DETECT; + case HPD_PORT_B: + return val & ICP_DDIB_HPD_LONG_DETECT; + default: + return false; + } +} + +static bool icp_tc_port_hotplug_long_detect(enum hpd_pin pin, u32 val) +{ + switch (pin) { + case HPD_PORT_C: + return val & ICP_TC_HPD_LONG_DETECT(PORT_TC1); + case HPD_PORT_D: + return val & ICP_TC_HPD_LONG_DETECT(PORT_TC2); + case HPD_PORT_E: + return val & ICP_TC_HPD_LONG_DETECT(PORT_TC3); + case HPD_PORT_F: + return val & ICP_TC_HPD_LONG_DETECT(PORT_TC4); + default: + return false; + } +} + +static bool spt_port_hotplug2_long_detect(enum hpd_pin pin, u32 val) +{ + switch (pin) { + case HPD_PORT_E: return val & PORTE_HOTPLUG_LONG_DETECT; default: return false; } } -static bool spt_port_hotplug_long_detect(enum port port, u32 val) +static bool spt_port_hotplug_long_detect(enum hpd_pin pin, u32 val) { - switch (port) { - case PORT_A: + switch (pin) { + case HPD_PORT_A: return val & PORTA_HOTPLUG_LONG_DETECT; - case PORT_B: + case HPD_PORT_B: return val & PORTB_HOTPLUG_LONG_DETECT; - case PORT_C: + case HPD_PORT_C: return val & PORTC_HOTPLUG_LONG_DETECT; - case PORT_D: + case HPD_PORT_D: return val & PORTD_HOTPLUG_LONG_DETECT; default: return false; } } -static bool ilk_port_hotplug_long_detect(enum port port, u32 val) +static bool ilk_port_hotplug_long_detect(enum hpd_pin pin, u32 val) { - switch (port) { - case PORT_A: + switch (pin) { + case HPD_PORT_A: return val & DIGITAL_PORTA_HOTPLUG_LONG_DETECT; default: return false; } } -static bool pch_port_hotplug_long_detect(enum port port, u32 val) +static bool pch_port_hotplug_long_detect(enum hpd_pin pin, u32 val) { - switch (port) { - case PORT_B: + switch (pin) { + case HPD_PORT_B: return val & PORTB_HOTPLUG_LONG_DETECT; - case PORT_C: + case HPD_PORT_C: return val & PORTC_HOTPLUG_LONG_DETECT; - case PORT_D: + case HPD_PORT_D: return val & PORTD_HOTPLUG_LONG_DETECT; default: return false; } } -static bool i9xx_port_hotplug_long_detect(enum port port, u32 val) +static bool i9xx_port_hotplug_long_detect(enum hpd_pin pin, u32 val) { - switch (port) { - case PORT_B: + switch (pin) { + case HPD_PORT_B: return val & PORTB_HOTPLUG_INT_LONG_PULSE; - case PORT_C: + case HPD_PORT_C: return val & PORTC_HOTPLUG_INT_LONG_PULSE; - case PORT_D: + case HPD_PORT_D: return val & PORTD_HOTPLUG_INT_LONG_PULSE; default: return false; @@ -1661,27 +1709,22 @@ static void intel_get_hpd_pins(struct drm_i915_private *dev_priv, u32 *pin_mask, u32 *long_mask, u32 hotplug_trigger, u32 dig_hotplug_reg, const u32 hpd[HPD_NUM_PINS], - bool long_pulse_detect(enum port port, u32 val)) + bool long_pulse_detect(enum hpd_pin pin, u32 val)) { - enum port port; - int i; + enum hpd_pin pin; - for_each_hpd_pin(i) { - if ((hpd[i] & hotplug_trigger) == 0) + for_each_hpd_pin(pin) { + if ((hpd[pin] & hotplug_trigger) == 0) continue; - *pin_mask |= BIT(i); - - port = intel_hpd_pin_to_port(dev_priv, i); - if (port == PORT_NONE) - continue; + *pin_mask |= BIT(pin); - if (long_pulse_detect(port, dig_hotplug_reg)) - *long_mask |= BIT(i); + if (long_pulse_detect(pin, dig_hotplug_reg)) + *long_mask |= BIT(pin); } - DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x, dig 0x%08x, pins 0x%08x\n", - hotplug_trigger, dig_hotplug_reg, *pin_mask); + DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x, dig 0x%08x, pins 0x%08x, long 0x%08x\n", + hotplug_trigger, dig_hotplug_reg, *pin_mask, *long_mask); } @@ -1703,69 +1746,34 @@ static void display_pipe_crc_irq_handler(struct drm_i915_private *dev_priv, uint32_t crc4) { struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe]; - struct intel_pipe_crc_entry *entry; struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe); - struct drm_driver *driver = dev_priv->drm.driver; uint32_t crcs[5]; - int head, tail; spin_lock(&pipe_crc->lock); - if (pipe_crc->source && !crtc->base.crc.opened) { - if (!pipe_crc->entries) { - spin_unlock(&pipe_crc->lock); - DRM_DEBUG_KMS("spurious interrupt\n"); - return; - } - - head = pipe_crc->head; - tail = pipe_crc->tail; - - if (CIRC_SPACE(head, tail, INTEL_PIPE_CRC_ENTRIES_NR) < 1) { - spin_unlock(&pipe_crc->lock); - DRM_ERROR("CRC buffer overflowing\n"); - return; - } - - entry = &pipe_crc->entries[head]; - - entry->frame = driver->get_vblank_counter(&dev_priv->drm, pipe); - entry->crc[0] = crc0; - entry->crc[1] = crc1; - entry->crc[2] = crc2; - entry->crc[3] = crc3; - entry->crc[4] = crc4; - - head = (head + 1) & (INTEL_PIPE_CRC_ENTRIES_NR - 1); - pipe_crc->head = head; - - spin_unlock(&pipe_crc->lock); - - wake_up_interruptible(&pipe_crc->wq); - } else { - /* - * For some not yet identified reason, the first CRC is - * bonkers. So let's just wait for the next vblank and read - * out the buggy result. - * - * On GEN8+ sometimes the second CRC is bonkers as well, so - * don't trust that one either. - */ - if (pipe_crc->skipped <= 0 || - (INTEL_GEN(dev_priv) >= 8 && pipe_crc->skipped == 1)) { - pipe_crc->skipped++; - spin_unlock(&pipe_crc->lock); - return; - } + /* + * For some not yet identified reason, the first CRC is + * bonkers. So let's just wait for the next vblank and read + * out the buggy result. + * + * On GEN8+ sometimes the second CRC is bonkers as well, so + * don't trust that one either. + */ + if (pipe_crc->skipped <= 0 || + (INTEL_GEN(dev_priv) >= 8 && pipe_crc->skipped == 1)) { + pipe_crc->skipped++; spin_unlock(&pipe_crc->lock); - crcs[0] = crc0; - crcs[1] = crc1; - crcs[2] = crc2; - crcs[3] = crc3; - crcs[4] = crc4; - drm_crtc_add_crc_entry(&crtc->base, true, - drm_crtc_accurate_vblank_count(&crtc->base), - crcs); + return; } + spin_unlock(&pipe_crc->lock); + + crcs[0] = crc0; + crcs[1] = crc1; + crcs[2] = crc2; + crcs[3] = crc3; + crcs[4] = crc4; + drm_crtc_add_crc_entry(&crtc->base, true, + drm_crtc_accurate_vblank_count(&crtc->base), + crcs); } #else static inline void @@ -2021,10 +2029,38 @@ static void valleyview_pipestat_irq_handler(struct drm_i915_private *dev_priv, static u32 i9xx_hpd_irq_ack(struct drm_i915_private *dev_priv) { - u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT); + u32 hotplug_status = 0, hotplug_status_mask; + int i; + + if (IS_G4X(dev_priv) || + IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) + hotplug_status_mask = HOTPLUG_INT_STATUS_G4X | + DP_AUX_CHANNEL_MASK_INT_STATUS_G4X; + else + hotplug_status_mask = HOTPLUG_INT_STATUS_I915; + + /* + * We absolutely have to clear all the pending interrupt + * bits in PORT_HOTPLUG_STAT. Otherwise the ISR port + * interrupt bit won't have an edge, and the i965/g4x + * edge triggered IIR will not notice that an interrupt + * is still pending. We can't use PORT_HOTPLUG_EN to + * guarantee the edge as the act of toggling the enable + * bits can itself generate a new hotplug interrupt :( + */ + for (i = 0; i < 10; i++) { + u32 tmp = I915_READ(PORT_HOTPLUG_STAT) & hotplug_status_mask; + + if (tmp == 0) + return hotplug_status; - if (hotplug_status) + hotplug_status |= tmp; I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status); + } + + WARN_ONCE(1, + "PORT_HOTPLUG_STAT did not clear (0x%08x)\n", + I915_READ(PORT_HOTPLUG_STAT)); return hotplug_status; } @@ -2131,7 +2167,6 @@ static irqreturn_t valleyview_irq_handler(int irq, void *arg) I915_WRITE(VLV_IER, ier); I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE); - POSTING_READ(VLV_MASTER_IER); if (gt_iir) snb_gt_irq_handler(dev_priv, gt_iir); @@ -2216,7 +2251,6 @@ static irqreturn_t cherryview_irq_handler(int irq, void *arg) I915_WRITE(VLV_IER, ier); I915_WRITE(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL); - POSTING_READ(GEN8_MASTER_IRQ); gen8_gt_irq_handler(dev_priv, master_ctl, gt_iir); @@ -2385,6 +2419,43 @@ static void cpt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir) cpt_serr_int_handler(dev_priv); } +static void icp_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir) +{ + u32 ddi_hotplug_trigger = pch_iir & SDE_DDI_MASK_ICP; + u32 tc_hotplug_trigger = pch_iir & SDE_TC_MASK_ICP; + u32 pin_mask = 0, long_mask = 0; + + if (ddi_hotplug_trigger) { + u32 dig_hotplug_reg; + + dig_hotplug_reg = I915_READ(SHOTPLUG_CTL_DDI); + I915_WRITE(SHOTPLUG_CTL_DDI, dig_hotplug_reg); + + intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, + ddi_hotplug_trigger, + dig_hotplug_reg, hpd_icp, + icp_ddi_port_hotplug_long_detect); + } + + if (tc_hotplug_trigger) { + u32 dig_hotplug_reg; + + dig_hotplug_reg = I915_READ(SHOTPLUG_CTL_TC); + I915_WRITE(SHOTPLUG_CTL_TC, dig_hotplug_reg); + + intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, + tc_hotplug_trigger, + dig_hotplug_reg, hpd_icp, + icp_tc_port_hotplug_long_detect); + } + + if (pin_mask) + intel_hpd_irq_handler(dev_priv, pin_mask, long_mask); + + if (pch_iir & SDE_GMBUS_ICP) + gmbus_irq_handler(dev_priv); +} + static void spt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir) { u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_SPT & @@ -2548,7 +2619,6 @@ static irqreturn_t ironlake_irq_handler(int irq, void *arg) /* disable master interrupt before clearing iir */ de_ier = I915_READ(DEIER); I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL); - POSTING_READ(DEIER); /* Disable south interrupts. We'll only write to SDEIIR once, so further * interrupts will will be stored on its back queue, and then we'll be @@ -2558,7 +2628,6 @@ static irqreturn_t ironlake_irq_handler(int irq, void *arg) if (!HAS_PCH_NOP(dev_priv)) { sde_ier = I915_READ(SDEIER); I915_WRITE(SDEIER, 0); - POSTING_READ(SDEIER); } /* Find, clear, then process each source of interrupt */ @@ -2593,11 +2662,8 @@ static irqreturn_t ironlake_irq_handler(int irq, void *arg) } I915_WRITE(DEIER, de_ier); - POSTING_READ(DEIER); - if (!HAS_PCH_NOP(dev_priv)) { + if (!HAS_PCH_NOP(dev_priv)) I915_WRITE(SDEIER, sde_ier); - POSTING_READ(SDEIER); - } /* IRQs are synced during runtime_suspend, we don't require a wakeref */ enable_rpm_wakeref_asserts(dev_priv); @@ -2804,8 +2870,11 @@ gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl) I915_WRITE(SDEIIR, iir); ret = IRQ_HANDLED; - if (HAS_PCH_SPT(dev_priv) || HAS_PCH_KBP(dev_priv) || - HAS_PCH_CNP(dev_priv)) + if (HAS_PCH_ICP(dev_priv)) + icp_irq_handler(dev_priv, iir); + else if (HAS_PCH_SPT(dev_priv) || + HAS_PCH_KBP(dev_priv) || + HAS_PCH_CNP(dev_priv)) spt_irq_handler(dev_priv, iir); else cpt_irq_handler(dev_priv, iir); @@ -3170,7 +3239,7 @@ static void i915_clear_error_registers(struct drm_i915_private *dev_priv) */ DRM_DEBUG_DRIVER("EIR stuck: 0x%08x, masking\n", eir); I915_WRITE(EMR, I915_READ(EMR) | eir); - I915_WRITE(IIR, I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT); + I915_WRITE(IIR, I915_MASTER_ERROR_INTERRUPT); } } @@ -3584,6 +3653,9 @@ static void gen11_irq_reset(struct drm_device *dev) GEN3_IRQ_RESET(GEN11_DE_HPD_); GEN3_IRQ_RESET(GEN11_GU_MISC_); GEN3_IRQ_RESET(GEN8_PCU_); + + if (HAS_PCH_ICP(dev_priv)) + GEN3_IRQ_RESET(SDE); } void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv, @@ -3700,6 +3772,35 @@ static void ibx_hpd_irq_setup(struct drm_i915_private *dev_priv) ibx_hpd_detection_setup(dev_priv); } +static void icp_hpd_detection_setup(struct drm_i915_private *dev_priv) +{ + u32 hotplug; + + hotplug = I915_READ(SHOTPLUG_CTL_DDI); + hotplug |= ICP_DDIA_HPD_ENABLE | + ICP_DDIB_HPD_ENABLE; + I915_WRITE(SHOTPLUG_CTL_DDI, hotplug); + + hotplug = I915_READ(SHOTPLUG_CTL_TC); + hotplug |= ICP_TC_HPD_ENABLE(PORT_TC1) | + ICP_TC_HPD_ENABLE(PORT_TC2) | + ICP_TC_HPD_ENABLE(PORT_TC3) | + ICP_TC_HPD_ENABLE(PORT_TC4); + I915_WRITE(SHOTPLUG_CTL_TC, hotplug); +} + +static void icp_hpd_irq_setup(struct drm_i915_private *dev_priv) +{ + u32 hotplug_irqs, enabled_irqs; + + hotplug_irqs = SDE_DDI_MASK_ICP | SDE_TC_MASK_ICP; + enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_icp); + + ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs); + + icp_hpd_detection_setup(dev_priv); +} + static void gen11_hpd_detection_setup(struct drm_i915_private *dev_priv) { u32 hotplug; @@ -3733,6 +3834,9 @@ static void gen11_hpd_irq_setup(struct drm_i915_private *dev_priv) POSTING_READ(GEN11_DE_HPD_IMR); gen11_hpd_detection_setup(dev_priv); + + if (HAS_PCH_ICP(dev_priv)) + icp_hpd_irq_setup(dev_priv); } static void spt_hpd_detection_setup(struct drm_i915_private *dev_priv) @@ -4168,11 +4272,29 @@ static void gen11_gt_irq_postinstall(struct drm_i915_private *dev_priv) I915_WRITE(GEN11_GPM_WGBOXPERF_INTR_MASK, ~0); } +static void icp_irq_postinstall(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = to_i915(dev); + u32 mask = SDE_GMBUS_ICP; + + WARN_ON(I915_READ(SDEIER) != 0); + I915_WRITE(SDEIER, 0xffffffff); + POSTING_READ(SDEIER); + + gen3_assert_iir_is_zero(dev_priv, SDEIIR); + I915_WRITE(SDEIMR, ~mask); + + icp_hpd_detection_setup(dev_priv); +} + static int gen11_irq_postinstall(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; u32 gu_misc_masked = GEN11_GU_MISC_GSE; + if (HAS_PCH_ICP(dev_priv)) + icp_irq_postinstall(dev); + gen11_gt_irq_postinstall(dev_priv); gen8_de_irq_postinstall(dev_priv); @@ -4225,11 +4347,13 @@ static int i8xx_irq_postinstall(struct drm_device *dev) /* Unmask the interrupts that we always want on. */ dev_priv->irq_mask = ~(I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | - I915_DISPLAY_PIPE_B_EVENT_INTERRUPT); + I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | + I915_MASTER_ERROR_INTERRUPT); enable_mask = I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | + I915_MASTER_ERROR_INTERRUPT | I915_USER_INTERRUPT; GEN2_IRQ_INIT(, dev_priv->irq_mask, enable_mask); @@ -4244,6 +4368,81 @@ static int i8xx_irq_postinstall(struct drm_device *dev) return 0; } +static void i8xx_error_irq_ack(struct drm_i915_private *dev_priv, + u16 *eir, u16 *eir_stuck) +{ + u16 emr; + + *eir = I915_READ16(EIR); + + if (*eir) + I915_WRITE16(EIR, *eir); + + *eir_stuck = I915_READ16(EIR); + if (*eir_stuck == 0) + return; + + /* + * Toggle all EMR bits to make sure we get an edge + * in the ISR master error bit if we don't clear + * all the EIR bits. Otherwise the edge triggered + * IIR on i965/g4x wouldn't notice that an interrupt + * is still pending. Also some EIR bits can't be + * cleared except by handling the underlying error + * (or by a GPU reset) so we mask any bit that + * remains set. + */ + emr = I915_READ16(EMR); + I915_WRITE16(EMR, 0xffff); + I915_WRITE16(EMR, emr | *eir_stuck); +} + +static void i8xx_error_irq_handler(struct drm_i915_private *dev_priv, + u16 eir, u16 eir_stuck) +{ + DRM_DEBUG("Master Error: EIR 0x%04x\n", eir); + + if (eir_stuck) + DRM_DEBUG_DRIVER("EIR stuck: 0x%04x, masked\n", eir_stuck); +} + +static void i9xx_error_irq_ack(struct drm_i915_private *dev_priv, + u32 *eir, u32 *eir_stuck) +{ + u32 emr; + + *eir = I915_READ(EIR); + + I915_WRITE(EIR, *eir); + + *eir_stuck = I915_READ(EIR); + if (*eir_stuck == 0) + return; + + /* + * Toggle all EMR bits to make sure we get an edge + * in the ISR master error bit if we don't clear + * all the EIR bits. Otherwise the edge triggered + * IIR on i965/g4x wouldn't notice that an interrupt + * is still pending. Also some EIR bits can't be + * cleared except by handling the underlying error + * (or by a GPU reset) so we mask any bit that + * remains set. + */ + emr = I915_READ(EMR); + I915_WRITE(EMR, 0xffffffff); + I915_WRITE(EMR, emr | *eir_stuck); +} + +static void i9xx_error_irq_handler(struct drm_i915_private *dev_priv, + u32 eir, u32 eir_stuck) +{ + DRM_DEBUG("Master Error, EIR 0x%08x\n", eir); + + if (eir_stuck) + DRM_DEBUG_DRIVER("EIR stuck: 0x%08x, masked\n", eir_stuck); +} + static irqreturn_t i8xx_irq_handler(int irq, void *arg) { struct drm_device *dev = arg; @@ -4258,6 +4457,7 @@ static irqreturn_t i8xx_irq_handler(int irq, void *arg) do { u32 pipe_stats[I915_MAX_PIPES] = {}; + u16 eir = 0, eir_stuck = 0; u16 iir; iir = I915_READ16(IIR); @@ -4270,13 +4470,16 @@ static irqreturn_t i8xx_irq_handler(int irq, void *arg) * signalled in iir */ i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats); + if (iir & I915_MASTER_ERROR_INTERRUPT) + i8xx_error_irq_ack(dev_priv, &eir, &eir_stuck); + I915_WRITE16(IIR, iir); if (iir & I915_USER_INTERRUPT) notify_ring(dev_priv->engine[RCS]); - if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT) - DRM_DEBUG("Command parser error, iir 0x%08x\n", iir); + if (iir & I915_MASTER_ERROR_INTERRUPT) + i8xx_error_irq_handler(dev_priv, eir, eir_stuck); i8xx_pipestat_irq_handler(dev_priv, iir, pipe_stats); } while (0); @@ -4314,12 +4517,14 @@ static int i915_irq_postinstall(struct drm_device *dev) dev_priv->irq_mask = ~(I915_ASLE_INTERRUPT | I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | - I915_DISPLAY_PIPE_B_EVENT_INTERRUPT); + I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | + I915_MASTER_ERROR_INTERRUPT); enable_mask = I915_ASLE_INTERRUPT | I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | + I915_MASTER_ERROR_INTERRUPT | I915_USER_INTERRUPT; if (I915_HAS_HOTPLUG(dev_priv)) { @@ -4357,6 +4562,7 @@ static irqreturn_t i915_irq_handler(int irq, void *arg) do { u32 pipe_stats[I915_MAX_PIPES] = {}; + u32 eir = 0, eir_stuck = 0; u32 hotplug_status = 0; u32 iir; @@ -4374,13 +4580,16 @@ static irqreturn_t i915_irq_handler(int irq, void *arg) * signalled in iir */ i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats); + if (iir & I915_MASTER_ERROR_INTERRUPT) + i9xx_error_irq_ack(dev_priv, &eir, &eir_stuck); + I915_WRITE(IIR, iir); if (iir & I915_USER_INTERRUPT) notify_ring(dev_priv->engine[RCS]); - if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT) - DRM_DEBUG("Command parser error, iir 0x%08x\n", iir); + if (iir & I915_MASTER_ERROR_INTERRUPT) + i9xx_error_irq_handler(dev_priv, eir, eir_stuck); if (hotplug_status) i9xx_hpd_irq_handler(dev_priv, hotplug_status); @@ -4434,14 +4643,14 @@ static int i965_irq_postinstall(struct drm_device *dev) I915_DISPLAY_PORT_INTERRUPT | I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | - I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT); + I915_MASTER_ERROR_INTERRUPT); enable_mask = I915_ASLE_INTERRUPT | I915_DISPLAY_PORT_INTERRUPT | I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | - I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT | + I915_MASTER_ERROR_INTERRUPT | I915_USER_INTERRUPT; if (IS_G4X(dev_priv)) @@ -4501,6 +4710,7 @@ static irqreturn_t i965_irq_handler(int irq, void *arg) do { u32 pipe_stats[I915_MAX_PIPES] = {}; + u32 eir = 0, eir_stuck = 0; u32 hotplug_status = 0; u32 iir; @@ -4517,6 +4727,9 @@ static irqreturn_t i965_irq_handler(int irq, void *arg) * signalled in iir */ i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats); + if (iir & I915_MASTER_ERROR_INTERRUPT) + i9xx_error_irq_ack(dev_priv, &eir, &eir_stuck); + I915_WRITE(IIR, iir); if (iir & I915_USER_INTERRUPT) @@ -4525,8 +4738,8 @@ static irqreturn_t i965_irq_handler(int irq, void *arg) if (iir & I915_BSD_USER_INTERRUPT) notify_ring(dev_priv->engine[VCS]); - if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT) - DRM_DEBUG("Command parser error, iir 0x%08x\n", iir); + if (iir & I915_MASTER_ERROR_INTERRUPT) + i9xx_error_irq_handler(dev_priv, eir, eir_stuck); if (hotplug_status) i9xx_hpd_irq_handler(dev_priv, hotplug_status); diff --git a/drivers/gpu/drm/i915/i915_params.c b/drivers/gpu/drm/i915/i915_params.c index 49fcc4679db6..295e981e4a39 100644 --- a/drivers/gpu/drm/i915/i915_params.c +++ b/drivers/gpu/drm/i915/i915_params.c @@ -44,10 +44,6 @@ i915_param_named(modeset, int, 0400, "Use kernel modesetting [KMS] (0=disable, " "1=on, -1=force vga console preference [default])"); -i915_param_named_unsafe(panel_ignore_lid, int, 0600, - "Override lid status (0=autodetect, 1=autodetect disabled [default], " - "-1=force lid closed, -2=force lid open)"); - i915_param_named_unsafe(enable_dc, int, 0400, "Enable power-saving display C-states. " "(-1=auto [default]; 0=disable; 1=up to DC5; 2=up to DC6)"); @@ -92,7 +88,7 @@ i915_param_named_unsafe(enable_ppgtt, int, 0400, i915_param_named_unsafe(enable_psr, int, 0600, "Enable PSR " - "(0=disabled, 1=enabled - link mode chosen per-platform, 2=force link-standby mode, 3=force link-off mode) " + "(0=disabled, 1=enabled) " "Default: -1 (use per-chip default)"); i915_param_named_unsafe(alpha_support, bool, 0400, diff --git a/drivers/gpu/drm/i915/i915_params.h b/drivers/gpu/drm/i915/i915_params.h index aebe0469ddaa..6c4d4a21474b 100644 --- a/drivers/gpu/drm/i915/i915_params.h +++ b/drivers/gpu/drm/i915/i915_params.h @@ -36,7 +36,6 @@ struct drm_printer; #define I915_PARAMS_FOR_EACH(param) \ param(char *, vbt_firmware, NULL) \ param(int, modeset, -1) \ - param(int, panel_ignore_lid, 1) \ param(int, lvds_channel_mode, 0) \ param(int, panel_use_ssc, -1) \ param(int, vbt_sdvo_panel_type, -1) \ diff --git a/drivers/gpu/drm/i915/i915_pci.c b/drivers/gpu/drm/i915/i915_pci.c index 55543f1b0236..6a4d1388ad2d 100644 --- a/drivers/gpu/drm/i915/i915_pci.c +++ b/drivers/gpu/drm/i915/i915_pci.c @@ -674,10 +674,16 @@ MODULE_DEVICE_TABLE(pci, pciidlist); static void i915_pci_remove(struct pci_dev *pdev) { - struct drm_device *dev = pci_get_drvdata(pdev); + struct drm_device *dev; + + dev = pci_get_drvdata(pdev); + if (!dev) /* driver load aborted, nothing to cleanup */ + return; i915_driver_unload(dev); drm_dev_put(dev); + + pci_set_drvdata(pdev, NULL); } static int i915_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) @@ -712,6 +718,11 @@ static int i915_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) if (err) return err; + if (i915_inject_load_failure()) { + i915_pci_remove(pdev); + return -ENODEV; + } + err = i915_live_selftests(pdev); if (err) { i915_pci_remove(pdev); diff --git a/drivers/gpu/drm/i915/i915_perf.c b/drivers/gpu/drm/i915/i915_perf.c index 447407fee3b8..6bf10952c724 100644 --- a/drivers/gpu/drm/i915/i915_perf.c +++ b/drivers/gpu/drm/i915/i915_perf.c @@ -1836,7 +1836,9 @@ static int gen8_configure_all_contexts(struct drm_i915_private *dev_priv, * So far the best way to work around this issue seems to be draining * the GPU from any submitted work. */ - ret = i915_gem_wait_for_idle(dev_priv, wait_flags); + ret = i915_gem_wait_for_idle(dev_priv, + wait_flags, + MAX_SCHEDULE_TIMEOUT); if (ret) goto out; diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 4bfd7a9bd75f..8af945d8a995 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -139,19 +139,35 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg) return !i915_mmio_reg_equal(reg, INVALID_MMIO_REG); } +/* + * Given the first two numbers __a and __b of arbitrarily many evenly spaced + * numbers, pick the 0-based __index'th value. + * + * Always prefer this over _PICK() if the numbers are evenly spaced. + */ +#define _PICK_EVEN(__index, __a, __b) ((__a) + (__index) * ((__b) - (__a))) + +/* + * Given the arbitrary numbers in varargs, pick the 0-based __index'th number. + * + * Always prefer _PICK_EVEN() over this if the numbers are evenly spaced. + */ #define _PICK(__index, ...) (((const u32 []){ __VA_ARGS__ })[__index]) -#define _PIPE(pipe, a, b) ((a) + (pipe) * ((b) - (a))) +/* + * Named helper wrappers around _PICK_EVEN() and _PICK(). + */ +#define _PIPE(pipe, a, b) _PICK_EVEN(pipe, a, b) #define _MMIO_PIPE(pipe, a, b) _MMIO(_PIPE(pipe, a, b)) -#define _PLANE(plane, a, b) _PIPE(plane, a, b) +#define _PLANE(plane, a, b) _PICK_EVEN(plane, a, b) #define _MMIO_PLANE(plane, a, b) _MMIO_PIPE(plane, a, b) -#define _TRANS(tran, a, b) ((a) + (tran) * ((b) - (a))) +#define _TRANS(tran, a, b) _PICK_EVEN(tran, a, b) #define _MMIO_TRANS(tran, a, b) _MMIO(_TRANS(tran, a, b)) -#define _PORT(port, a, b) ((a) + (port) * ((b) - (a))) +#define _PORT(port, a, b) _PICK_EVEN(port, a, b) #define _MMIO_PORT(port, a, b) _MMIO(_PORT(port, a, b)) #define _MMIO_PIPE3(pipe, a, b, c) _MMIO(_PICK(pipe, a, b, c)) #define _MMIO_PORT3(pipe, a, b, c) _MMIO(_PICK(pipe, a, b, c)) -#define _PLL(pll, a, b) ((a) + (pll) * ((b) - (a))) +#define _PLL(pll, a, b) _PICK_EVEN(pll, a, b) #define _MMIO_PLL(pll, a, b) _MMIO(_PLL(pll, a, b)) #define _PHY3(phy, ...) _PICK(phy, __VA_ARGS__) #define _MMIO_PHY3(phy, a, b, c) _MMIO(_PHY3(phy, a, b, c)) @@ -396,6 +412,7 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg) #define GEN8_STOLEN_RESERVED_4M (2 << 7) #define GEN8_STOLEN_RESERVED_8M (3 << 7) #define GEN6_STOLEN_RESERVED_ENABLE (1 << 0) +#define GEN11_STOLEN_RESERVED_ADDR_MASK (0xFFFFFFFFFFFULL << 20) /* VGA stuff */ @@ -1045,13 +1062,13 @@ enum i915_power_well_id { /* * HSW/BDW - * - HSW_PWR_WELL_CTL_DRIVER(0) (status bit: id*2, req bit: id*2+1) + * - _HSW_PWR_WELL_CTL1-4 (status bit: id*2, req bit: id*2+1) */ HSW_DISP_PW_GLOBAL = 15, /* * GEN9+ - * - HSW_PWR_WELL_CTL_DRIVER(0) (status bit: id*2, req bit: id*2+1) + * - _HSW_PWR_WELL_CTL1-4 (status bit: id*2, req bit: id*2+1) */ SKL_DISP_PW_MISC_IO = 0, SKL_DISP_PW_DDI_A_E, @@ -1075,17 +1092,54 @@ enum i915_power_well_id { SKL_DISP_PW_2, /* - custom power wells */ - SKL_DISP_PW_DC_OFF, BXT_DPIO_CMN_A, BXT_DPIO_CMN_BC, - GLK_DPIO_CMN_C, /* 19 */ + GLK_DPIO_CMN_C, /* 18 */ + + /* + * GEN11+ + * - _HSW_PWR_WELL_CTL1-4 + * (status bit: (id&15)*2, req bit:(id&15)*2+1) + */ + ICL_DISP_PW_1 = 0, + ICL_DISP_PW_2, + ICL_DISP_PW_3, + ICL_DISP_PW_4, + + /* + * - _HSW_PWR_WELL_CTL_AUX1/2/4 + * (status bit: (id&15)*2, req bit:(id&15)*2+1) + */ + ICL_DISP_PW_AUX_A = 16, + ICL_DISP_PW_AUX_B, + ICL_DISP_PW_AUX_C, + ICL_DISP_PW_AUX_D, + ICL_DISP_PW_AUX_E, + ICL_DISP_PW_AUX_F, + + ICL_DISP_PW_AUX_TBT1 = 24, + ICL_DISP_PW_AUX_TBT2, + ICL_DISP_PW_AUX_TBT3, + ICL_DISP_PW_AUX_TBT4, + + /* + * - _HSW_PWR_WELL_CTL_DDI1/2/4 + * (status bit: (id&15)*2, req bit:(id&15)*2+1) + */ + ICL_DISP_PW_DDI_A = 32, + ICL_DISP_PW_DDI_B, + ICL_DISP_PW_DDI_C, + ICL_DISP_PW_DDI_D, + ICL_DISP_PW_DDI_E, + ICL_DISP_PW_DDI_F, /* 37 */ /* * Multiple platforms. * Must start following the highest ID of any platform. * - custom power wells */ - I915_DISP_PW_ALWAYS_ON = 20, + SKL_DISP_PW_DC_OFF = 38, + I915_DISP_PW_ALWAYS_ON, }; #define PUNIT_REG_PWRGT_CTRL 0x60 @@ -1667,6 +1721,26 @@ enum i915_power_well_id { #define ICL_PORT_CL_DW5(port) _MMIO_PORT(port, _ICL_PORT_CL_DW5_A, \ _ICL_PORT_CL_DW5_B) +#define _CNL_PORT_CL_DW10_A 0x162028 +#define _ICL_PORT_CL_DW10_B 0x6c028 +#define ICL_PORT_CL_DW10(port) _MMIO_PORT(port, \ + _CNL_PORT_CL_DW10_A, \ + _ICL_PORT_CL_DW10_B) +#define PG_SEQ_DELAY_OVERRIDE_MASK (3 << 25) +#define PG_SEQ_DELAY_OVERRIDE_SHIFT 25 +#define PG_SEQ_DELAY_OVERRIDE_ENABLE (1 << 24) +#define PWR_UP_ALL_LANES (0x0 << 4) +#define PWR_DOWN_LN_3_2_1 (0xe << 4) +#define PWR_DOWN_LN_3_2 (0xc << 4) +#define PWR_DOWN_LN_3 (0x8 << 4) +#define PWR_DOWN_LN_2_1_0 (0x7 << 4) +#define PWR_DOWN_LN_1_0 (0x3 << 4) +#define PWR_DOWN_LN_1 (0x2 << 4) +#define PWR_DOWN_LN_3_1 (0xa << 4) +#define PWR_DOWN_LN_3_1_0 (0xb << 4) +#define PWR_DOWN_LN_MASK (0xf << 4) +#define PWR_DOWN_LN_SHIFT 4 + #define _PORT_CL1CM_DW9_A 0x162024 #define _PORT_CL1CM_DW9_BC 0x6C024 #define IREF0RC_OFFSET_SHIFT 8 @@ -1679,6 +1753,13 @@ enum i915_power_well_id { #define IREF1RC_OFFSET_MASK (0xFF << IREF1RC_OFFSET_SHIFT) #define BXT_PORT_CL1CM_DW10(phy) _BXT_PHY((phy), _PORT_CL1CM_DW10_BC) +#define _ICL_PORT_CL_DW12_A 0x162030 +#define _ICL_PORT_CL_DW12_B 0x6C030 +#define ICL_LANE_ENABLE_AUX (1 << 0) +#define ICL_PORT_CL_DW12(port) _MMIO_PORT((port), \ + _ICL_PORT_CL_DW12_A, \ + _ICL_PORT_CL_DW12_B) + #define _PORT_CL1CM_DW28_A 0x162070 #define _PORT_CL1CM_DW28_BC 0x6C070 #define OCL1_POWER_DOWN_EN (1 << 23) @@ -1716,16 +1797,22 @@ enum i915_power_well_id { _CNL_PORT_PCS_DW1_LN0_D, \ _CNL_PORT_PCS_DW1_LN0_AE, \ _CNL_PORT_PCS_DW1_LN0_F)) + #define _ICL_PORT_PCS_DW1_GRP_A 0x162604 #define _ICL_PORT_PCS_DW1_GRP_B 0x6C604 #define _ICL_PORT_PCS_DW1_LN0_A 0x162804 #define _ICL_PORT_PCS_DW1_LN0_B 0x6C804 +#define _ICL_PORT_PCS_DW1_AUX_A 0x162304 +#define _ICL_PORT_PCS_DW1_AUX_B 0x6c304 #define ICL_PORT_PCS_DW1_GRP(port) _MMIO_PORT(port,\ _ICL_PORT_PCS_DW1_GRP_A, \ _ICL_PORT_PCS_DW1_GRP_B) #define ICL_PORT_PCS_DW1_LN0(port) _MMIO_PORT(port, \ _ICL_PORT_PCS_DW1_LN0_A, \ _ICL_PORT_PCS_DW1_LN0_B) +#define ICL_PORT_PCS_DW1_AUX(port) _MMIO_PORT(port, \ + _ICL_PORT_PCS_DW1_AUX_A, \ + _ICL_PORT_PCS_DW1_AUX_B) #define COMMON_KEEPER_EN (1 << 26) /* CNL Port TX registers */ @@ -1762,16 +1849,23 @@ enum i915_power_well_id { #define _ICL_PORT_TX_DW2_GRP_B 0x6C688 #define _ICL_PORT_TX_DW2_LN0_A 0x162888 #define _ICL_PORT_TX_DW2_LN0_B 0x6C888 +#define _ICL_PORT_TX_DW2_AUX_A 0x162388 +#define _ICL_PORT_TX_DW2_AUX_B 0x6c388 #define ICL_PORT_TX_DW2_GRP(port) _MMIO_PORT(port, \ _ICL_PORT_TX_DW2_GRP_A, \ _ICL_PORT_TX_DW2_GRP_B) #define ICL_PORT_TX_DW2_LN0(port) _MMIO_PORT(port, \ _ICL_PORT_TX_DW2_LN0_A, \ _ICL_PORT_TX_DW2_LN0_B) +#define ICL_PORT_TX_DW2_AUX(port) _MMIO_PORT(port, \ + _ICL_PORT_TX_DW2_AUX_A, \ + _ICL_PORT_TX_DW2_AUX_B) #define SWING_SEL_UPPER(x) (((x) >> 3) << 15) #define SWING_SEL_UPPER_MASK (1 << 15) #define SWING_SEL_LOWER(x) (((x) & 0x7) << 11) #define SWING_SEL_LOWER_MASK (0x7 << 11) +#define FRC_LATENCY_OPTIM_MASK (0x7 << 8) +#define FRC_LATENCY_OPTIM_VAL(x) ((x) << 8) #define RCOMP_SCALAR(x) ((x) << 0) #define RCOMP_SCALAR_MASK (0xFF << 0) @@ -1787,6 +1881,8 @@ enum i915_power_well_id { #define _ICL_PORT_TX_DW4_LN0_A 0x162890 #define _ICL_PORT_TX_DW4_LN1_A 0x162990 #define _ICL_PORT_TX_DW4_LN0_B 0x6C890 +#define _ICL_PORT_TX_DW4_AUX_A 0x162390 +#define _ICL_PORT_TX_DW4_AUX_B 0x6c390 #define ICL_PORT_TX_DW4_GRP(port) _MMIO_PORT(port, \ _ICL_PORT_TX_DW4_GRP_A, \ _ICL_PORT_TX_DW4_GRP_B) @@ -1795,6 +1891,9 @@ enum i915_power_well_id { _ICL_PORT_TX_DW4_LN0_B) + \ ((ln) * (_ICL_PORT_TX_DW4_LN1_A - \ _ICL_PORT_TX_DW4_LN0_A))) +#define ICL_PORT_TX_DW4_AUX(port) _MMIO_PORT(port, \ + _ICL_PORT_TX_DW4_AUX_A, \ + _ICL_PORT_TX_DW4_AUX_B) #define LOADGEN_SELECT (1 << 31) #define POST_CURSOR_1(x) ((x) << 12) #define POST_CURSOR_1_MASK (0x3F << 12) @@ -1809,12 +1908,17 @@ enum i915_power_well_id { #define _ICL_PORT_TX_DW5_GRP_B 0x6C694 #define _ICL_PORT_TX_DW5_LN0_A 0x162894 #define _ICL_PORT_TX_DW5_LN0_B 0x6C894 +#define _ICL_PORT_TX_DW5_AUX_A 0x162394 +#define _ICL_PORT_TX_DW5_AUX_B 0x6c394 #define ICL_PORT_TX_DW5_GRP(port) _MMIO_PORT(port, \ _ICL_PORT_TX_DW5_GRP_A, \ _ICL_PORT_TX_DW5_GRP_B) #define ICL_PORT_TX_DW5_LN0(port) _MMIO_PORT(port, \ _ICL_PORT_TX_DW5_LN0_A, \ _ICL_PORT_TX_DW5_LN0_B) +#define ICL_PORT_TX_DW5_AUX(port) _MMIO_PORT(port, \ + _ICL_PORT_TX_DW5_AUX_A, \ + _ICL_PORT_TX_DW5_AUX_B) #define TX_TRAINING_EN (1 << 31) #define TAP2_DISABLE (1 << 30) #define TAP3_DISABLE (1 << 29) @@ -2811,7 +2915,6 @@ enum i915_power_well_id { #define I915_DISPLAY_PORT_INTERRUPT (1 << 17) #define I915_DISPLAY_PIPE_C_HBLANK_INTERRUPT (1 << 16) #define I915_MASTER_ERROR_INTERRUPT (1 << 15) -#define I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT (1 << 15) #define I915_DISPLAY_PIPE_B_HBLANK_INTERRUPT (1 << 14) #define I915_GMCH_THERMAL_SENSOR_EVENT_INTERRUPT (1 << 14) /* p-state */ #define I915_DISPLAY_PIPE_A_HBLANK_INTERRUPT (1 << 13) @@ -3020,6 +3123,7 @@ enum i915_power_well_id { #define GMBUS_RATE_400KHZ (2 << 8) /* reserved on Pineview */ #define GMBUS_RATE_1MHZ (3 << 8) /* reserved on Pineview */ #define GMBUS_HOLD_EXT (1 << 7) /* 300ns hold time, rsvd on Pineview */ +#define GMBUS_BYTE_CNT_OVERRIDE (1 << 6) #define GMBUS_PIN_DISABLED 0 #define GMBUS_PIN_SSC 1 #define GMBUS_PIN_VGADDC 2 @@ -3049,6 +3153,7 @@ enum i915_power_well_id { #define GMBUS_CYCLE_STOP (4 << 25) #define GMBUS_BYTE_COUNT_SHIFT 16 #define GMBUS_BYTE_COUNT_MAX 256U +#define GEN9_GMBUS_BYTE_COUNT_MAX 511U #define GMBUS_SLAVE_INDEX_SHIFT 8 #define GMBUS_SLAVE_ADDR_SHIFT 1 #define GMBUS_SLAVE_READ (1 << 0) @@ -4044,6 +4149,7 @@ enum { #define EDP_PSR_SKIP_AUX_EXIT (1 << 12) #define EDP_PSR_TP1_TP2_SEL (0 << 11) #define EDP_PSR_TP1_TP3_SEL (1 << 11) +#define EDP_PSR_CRC_ENABLE (1 << 10) /* BDW+ */ #define EDP_PSR_TP2_TP3_TIME_500us (0 << 8) #define EDP_PSR_TP2_TP3_TIME_100us (1 << 8) #define EDP_PSR_TP2_TP3_TIME_2500us (2 << 8) @@ -4072,6 +4178,7 @@ enum { #define EDP_PSR_STATUS _MMIO(dev_priv->psr_mmio_base + 0x40) #define EDP_PSR_STATUS_STATE_MASK (7 << 29) +#define EDP_PSR_STATUS_STATE_SHIFT 29 #define EDP_PSR_STATUS_STATE_IDLE (0 << 29) #define EDP_PSR_STATUS_STATE_SRDONACK (1 << 29) #define EDP_PSR_STATUS_STATE_SRDENT (2 << 29) @@ -4498,6 +4605,16 @@ enum { #define VIDEO_DIP_ENABLE_GMP_HSW (1 << 4) #define VIDEO_DIP_ENABLE_SPD_HSW (1 << 0) +#define DRM_DIP_ENABLE (1 << 28) +#define PSR_VSC_BIT_7_SET (1 << 27) +#define VSC_SELECT_MASK (0x3 << 26) +#define VSC_SELECT_SHIFT 26 +#define VSC_DIP_HW_HEA_DATA (0 << 26) +#define VSC_DIP_HW_HEA_SW_DATA (1 << 26) +#define VSC_DIP_HW_DATA_SW_HEA (2 << 26) +#define VSC_DIP_SW_HEA_DATA (3 << 26) +#define VDIP_ENABLE_PPS (1 << 24) + /* Panel power sequencing */ #define PPS_BASE 0x61200 #define VLV_PPS_BASE (VLV_DISPLAY_BASE + PPS_BASE) @@ -6829,7 +6946,7 @@ enum { #define _PS_ECC_STAT_2B 0x68AD0 #define _PS_ECC_STAT_1C 0x691D0 -#define _ID(id, a, b) ((a) + (id) * ((b) - (a))) +#define _ID(id, a, b) _PICK_EVEN(id, a, b) #define SKL_PS_CTRL(pipe, id) _MMIO_PIPE(pipe, \ _ID(id, _PS_1A_CTRL, _PS_2A_CTRL), \ _ID(id, _PS_1B_CTRL, _PS_2B_CTRL)) @@ -7366,6 +7483,14 @@ enum { #define BDW_SCRATCH1 _MMIO(0xb11c) #define GEN9_LBS_SLA_RETRY_TIMER_DECREMENT_ENABLE (1 << 2) +/*GEN11 chicken */ +#define _PIPEA_CHICKEN 0x70038 +#define _PIPEB_CHICKEN 0x71038 +#define _PIPEC_CHICKEN 0x72038 +#define PER_PIXEL_ALPHA_BYPASS_EN (1 << 7) +#define PIPE_CHICKEN(pipe) _MMIO_PIPE(pipe, _PIPEA_CHICKEN,\ + _PIPEB_CHICKEN) + /* PCH */ /* south display engine interrupt: IBX */ @@ -7409,7 +7534,7 @@ enum { #define SDE_TRANSA_FIFO_UNDER (1 << 0) #define SDE_TRANS_MASK (0x3f) -/* south display engine interrupt: CPT/PPT */ +/* south display engine interrupt: CPT - CNP */ #define SDE_AUDIO_POWER_D_CPT (1 << 31) #define SDE_AUDIO_POWER_C_CPT (1 << 30) #define SDE_AUDIO_POWER_B_CPT (1 << 29) @@ -7457,6 +7582,21 @@ enum { SDE_FDI_RXB_CPT | \ SDE_FDI_RXA_CPT) +/* south display engine interrupt: ICP */ +#define SDE_TC4_HOTPLUG_ICP (1 << 27) +#define SDE_TC3_HOTPLUG_ICP (1 << 26) +#define SDE_TC2_HOTPLUG_ICP (1 << 25) +#define SDE_TC1_HOTPLUG_ICP (1 << 24) +#define SDE_GMBUS_ICP (1 << 23) +#define SDE_DDIB_HOTPLUG_ICP (1 << 17) +#define SDE_DDIA_HOTPLUG_ICP (1 << 16) +#define SDE_DDI_MASK_ICP (SDE_DDIB_HOTPLUG_ICP | \ + SDE_DDIA_HOTPLUG_ICP) +#define SDE_TC_MASK_ICP (SDE_TC4_HOTPLUG_ICP | \ + SDE_TC3_HOTPLUG_ICP | \ + SDE_TC2_HOTPLUG_ICP | \ + SDE_TC1_HOTPLUG_ICP) + #define SDEISR _MMIO(0xc4000) #define SDEIMR _MMIO(0xc4004) #define SDEIIR _MMIO(0xc4008) @@ -7517,6 +7657,134 @@ enum { #define PORTE_HOTPLUG_SHORT_DETECT (1 << 0) #define PORTE_HOTPLUG_LONG_DETECT (2 << 0) +/* This register is a reuse of PCH_PORT_HOTPLUG register. The + * functionality covered in PCH_PORT_HOTPLUG is split into + * SHOTPLUG_CTL_DDI and SHOTPLUG_CTL_TC. + */ + +#define SHOTPLUG_CTL_DDI _MMIO(0xc4030) +#define ICP_DDIB_HPD_ENABLE (1 << 7) +#define ICP_DDIB_HPD_STATUS_MASK (3 << 4) +#define ICP_DDIB_HPD_NO_DETECT (0 << 4) +#define ICP_DDIB_HPD_SHORT_DETECT (1 << 4) +#define ICP_DDIB_HPD_LONG_DETECT (2 << 4) +#define ICP_DDIB_HPD_SHORT_LONG_DETECT (3 << 4) +#define ICP_DDIA_HPD_ENABLE (1 << 3) +#define ICP_DDIA_HPD_STATUS_MASK (3 << 0) +#define ICP_DDIA_HPD_NO_DETECT (0 << 0) +#define ICP_DDIA_HPD_SHORT_DETECT (1 << 0) +#define ICP_DDIA_HPD_LONG_DETECT (2 << 0) +#define ICP_DDIA_HPD_SHORT_LONG_DETECT (3 << 0) + +#define SHOTPLUG_CTL_TC _MMIO(0xc4034) +#define ICP_TC_HPD_ENABLE(tc_port) (8 << (tc_port) * 4) +/* Icelake DSC Rate Control Range Parameter Registers */ +#define DSCA_RC_RANGE_PARAMETERS_0 _MMIO(0x6B240) +#define DSCA_RC_RANGE_PARAMETERS_0_UDW _MMIO(0x6B240 + 4) +#define DSCC_RC_RANGE_PARAMETERS_0 _MMIO(0x6BA40) +#define DSCC_RC_RANGE_PARAMETERS_0_UDW _MMIO(0x6BA40 + 4) +#define _ICL_DSC0_RC_RANGE_PARAMETERS_0_PB (0x78208) +#define _ICL_DSC0_RC_RANGE_PARAMETERS_0_UDW_PB (0x78208 + 4) +#define _ICL_DSC1_RC_RANGE_PARAMETERS_0_PB (0x78308) +#define _ICL_DSC1_RC_RANGE_PARAMETERS_0_UDW_PB (0x78308 + 4) +#define _ICL_DSC0_RC_RANGE_PARAMETERS_0_PC (0x78408) +#define _ICL_DSC0_RC_RANGE_PARAMETERS_0_UDW_PC (0x78408 + 4) +#define _ICL_DSC1_RC_RANGE_PARAMETERS_0_PC (0x78508) +#define _ICL_DSC1_RC_RANGE_PARAMETERS_0_UDW_PC (0x78508 + 4) +#define ICL_DSC0_RC_RANGE_PARAMETERS_0(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ + _ICL_DSC0_RC_RANGE_PARAMETERS_0_PB, \ + _ICL_DSC0_RC_RANGE_PARAMETERS_0_PC) +#define ICL_DSC0_RC_RANGE_PARAMETERS_0_UDW(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ + _ICL_DSC0_RC_RANGE_PARAMETERS_0_UDW_PB, \ + _ICL_DSC0_RC_RANGE_PARAMETERS_0_UDW_PC) +#define ICL_DSC1_RC_RANGE_PARAMETERS_0(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ + _ICL_DSC1_RC_RANGE_PARAMETERS_0_PB, \ + _ICL_DSC1_RC_RANGE_PARAMETERS_0_PC) +#define ICL_DSC1_RC_RANGE_PARAMETERS_0_UDW(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ + _ICL_DSC1_RC_RANGE_PARAMETERS_0_UDW_PB, \ + _ICL_DSC1_RC_RANGE_PARAMETERS_0_UDW_PC) +#define RC_BPG_OFFSET_SHIFT 10 +#define RC_MAX_QP_SHIFT 5 +#define RC_MIN_QP_SHIFT 0 + +#define DSCA_RC_RANGE_PARAMETERS_1 _MMIO(0x6B248) +#define DSCA_RC_RANGE_PARAMETERS_1_UDW _MMIO(0x6B248 + 4) +#define DSCC_RC_RANGE_PARAMETERS_1 _MMIO(0x6BA48) +#define DSCC_RC_RANGE_PARAMETERS_1_UDW _MMIO(0x6BA48 + 4) +#define _ICL_DSC0_RC_RANGE_PARAMETERS_1_PB (0x78210) +#define _ICL_DSC0_RC_RANGE_PARAMETERS_1_UDW_PB (0x78210 + 4) +#define _ICL_DSC1_RC_RANGE_PARAMETERS_1_PB (0x78310) +#define _ICL_DSC1_RC_RANGE_PARAMETERS_1_UDW_PB (0x78310 + 4) +#define _ICL_DSC0_RC_RANGE_PARAMETERS_1_PC (0x78410) +#define _ICL_DSC0_RC_RANGE_PARAMETERS_1_UDW_PC (0x78410 + 4) +#define _ICL_DSC1_RC_RANGE_PARAMETERS_1_PC (0x78510) +#define _ICL_DSC1_RC_RANGE_PARAMETERS_1_UDW_PC (0x78510 + 4) +#define ICL_DSC0_RC_RANGE_PARAMETERS_1(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ + _ICL_DSC0_RC_RANGE_PARAMETERS_1_PB, \ + _ICL_DSC0_RC_RANGE_PARAMETERS_1_PC) +#define ICL_DSC0_RC_RANGE_PARAMETERS_1_UDW(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ + _ICL_DSC0_RC_RANGE_PARAMETERS_1_UDW_PB, \ + _ICL_DSC0_RC_RANGE_PARAMETERS_1_UDW_PC) +#define ICL_DSC1_RC_RANGE_PARAMETERS_1(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ + _ICL_DSC1_RC_RANGE_PARAMETERS_1_PB, \ + _ICL_DSC1_RC_RANGE_PARAMETERS_1_PC) +#define ICL_DSC1_RC_RANGE_PARAMETERS_1_UDW(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ + _ICL_DSC1_RC_RANGE_PARAMETERS_1_UDW_PB, \ + _ICL_DSC1_RC_RANGE_PARAMETERS_1_UDW_PC) + +#define DSCA_RC_RANGE_PARAMETERS_2 _MMIO(0x6B250) +#define DSCA_RC_RANGE_PARAMETERS_2_UDW _MMIO(0x6B250 + 4) +#define DSCC_RC_RANGE_PARAMETERS_2 _MMIO(0x6BA50) +#define DSCC_RC_RANGE_PARAMETERS_2_UDW _MMIO(0x6BA50 + 4) +#define _ICL_DSC0_RC_RANGE_PARAMETERS_2_PB (0x78218) +#define _ICL_DSC0_RC_RANGE_PARAMETERS_2_UDW_PB (0x78218 + 4) +#define _ICL_DSC1_RC_RANGE_PARAMETERS_2_PB (0x78318) +#define _ICL_DSC1_RC_RANGE_PARAMETERS_2_UDW_PB (0x78318 + 4) +#define _ICL_DSC0_RC_RANGE_PARAMETERS_2_PC (0x78418) +#define _ICL_DSC0_RC_RANGE_PARAMETERS_2_UDW_PC (0x78418 + 4) +#define _ICL_DSC1_RC_RANGE_PARAMETERS_2_PC (0x78518) +#define _ICL_DSC1_RC_RANGE_PARAMETERS_2_UDW_PC (0x78518 + 4) +#define ICL_DSC0_RC_RANGE_PARAMETERS_2(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ + _ICL_DSC0_RC_RANGE_PARAMETERS_2_PB, \ + _ICL_DSC0_RC_RANGE_PARAMETERS_2_PC) +#define ICL_DSC0_RC_RANGE_PARAMETERS_2_UDW(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ + _ICL_DSC0_RC_RANGE_PARAMETERS_2_UDW_PB, \ + _ICL_DSC0_RC_RANGE_PARAMETERS_2_UDW_PC) +#define ICL_DSC1_RC_RANGE_PARAMETERS_2(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ + _ICL_DSC1_RC_RANGE_PARAMETERS_2_PB, \ + _ICL_DSC1_RC_RANGE_PARAMETERS_2_PC) +#define ICL_DSC1_RC_RANGE_PARAMETERS_2_UDW(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ + _ICL_DSC1_RC_RANGE_PARAMETERS_2_UDW_PB, \ + _ICL_DSC1_RC_RANGE_PARAMETERS_2_UDW_PC) + +#define DSCA_RC_RANGE_PARAMETERS_3 _MMIO(0x6B258) +#define DSCA_RC_RANGE_PARAMETERS_3_UDW _MMIO(0x6B258 + 4) +#define DSCC_RC_RANGE_PARAMETERS_3 _MMIO(0x6BA58) +#define DSCC_RC_RANGE_PARAMETERS_3_UDW _MMIO(0x6BA58 + 4) +#define _ICL_DSC0_RC_RANGE_PARAMETERS_3_PB (0x78220) +#define _ICL_DSC0_RC_RANGE_PARAMETERS_3_UDW_PB (0x78220 + 4) +#define _ICL_DSC1_RC_RANGE_PARAMETERS_3_PB (0x78320) +#define _ICL_DSC1_RC_RANGE_PARAMETERS_3_UDW_PB (0x78320 + 4) +#define _ICL_DSC0_RC_RANGE_PARAMETERS_3_PC (0x78420) +#define _ICL_DSC0_RC_RANGE_PARAMETERS_3_UDW_PC (0x78420 + 4) +#define _ICL_DSC1_RC_RANGE_PARAMETERS_3_PC (0x78520) +#define _ICL_DSC1_RC_RANGE_PARAMETERS_3_UDW_PC (0x78520 + 4) +#define ICL_DSC0_RC_RANGE_PARAMETERS_3(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ + _ICL_DSC0_RC_RANGE_PARAMETERS_3_PB, \ + _ICL_DSC0_RC_RANGE_PARAMETERS_3_PC) +#define ICL_DSC0_RC_RANGE_PARAMETERS_3_UDW(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ + _ICL_DSC0_RC_RANGE_PARAMETERS_3_UDW_PB, \ + _ICL_DSC0_RC_RANGE_PARAMETERS_3_UDW_PC) +#define ICL_DSC1_RC_RANGE_PARAMETERS_3(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ + _ICL_DSC1_RC_RANGE_PARAMETERS_3_PB, \ + _ICL_DSC1_RC_RANGE_PARAMETERS_3_PC) +#define ICL_DSC1_RC_RANGE_PARAMETERS_3_UDW(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ + _ICL_DSC1_RC_RANGE_PARAMETERS_3_UDW_PB, \ + _ICL_DSC1_RC_RANGE_PARAMETERS_3_UDW_PC) + +#define ICP_TC_HPD_LONG_DETECT(tc_port) (2 << (tc_port) * 4) +#define ICP_TC_HPD_SHORT_DETECT(tc_port) (1 << (tc_port) * 4) + #define PCH_GPIOA _MMIO(0xc5010) #define PCH_GPIOB _MMIO(0xc5014) #define PCH_GPIOC _MMIO(0xc5018) @@ -7689,12 +7957,25 @@ enum { #define _HSW_VIDEO_DIP_VSC_ECC_B 0x61344 #define _HSW_VIDEO_DIP_GCP_B 0x61210 +/* Icelake PPS_DATA and _ECC DIP Registers. + * These are available for transcoders B,C and eDP. + * Adding the _A so as to reuse the _MMIO_TRANS2 + * definition, with which it offsets to the right location. + */ + +#define _ICL_VIDEO_DIP_PPS_DATA_A 0x60350 +#define _ICL_VIDEO_DIP_PPS_DATA_B 0x61350 +#define _ICL_VIDEO_DIP_PPS_ECC_A 0x603D4 +#define _ICL_VIDEO_DIP_PPS_ECC_B 0x613D4 + #define HSW_TVIDEO_DIP_CTL(trans) _MMIO_TRANS2(trans, _HSW_VIDEO_DIP_CTL_A) #define HSW_TVIDEO_DIP_AVI_DATA(trans, i) _MMIO_TRANS2(trans, _HSW_VIDEO_DIP_AVI_DATA_A + (i) * 4) #define HSW_TVIDEO_DIP_VS_DATA(trans, i) _MMIO_TRANS2(trans, _HSW_VIDEO_DIP_VS_DATA_A + (i) * 4) #define HSW_TVIDEO_DIP_SPD_DATA(trans, i) _MMIO_TRANS2(trans, _HSW_VIDEO_DIP_SPD_DATA_A + (i) * 4) #define HSW_TVIDEO_DIP_GCP(trans) _MMIO_TRANS2(trans, _HSW_VIDEO_DIP_GCP_A) #define HSW_TVIDEO_DIP_VSC_DATA(trans, i) _MMIO_TRANS2(trans, _HSW_VIDEO_DIP_VSC_DATA_A + (i) * 4) +#define ICL_VIDEO_DIP_PPS_DATA(trans, i) _MMIO_TRANS2(trans, _ICL_VIDEO_DIP_PPS_DATA_A + (i) * 4) +#define ICL_VIDEO_DIP_PPS_ECC(trans, i) _MMIO_TRANS2(trans, _ICL_VIDEO_DIP_PPS_ECC_A + (i) * 4) #define _HSW_STEREO_3D_CTL_A 0x70020 #define S3D_ENABLE (1 << 31) @@ -8555,6 +8836,14 @@ enum { #define _HSW_PWR_WELL_CTL3 0x45408 #define _HSW_PWR_WELL_CTL4 0x4540C +#define _ICL_PWR_WELL_CTL_AUX1 0x45440 +#define _ICL_PWR_WELL_CTL_AUX2 0x45444 +#define _ICL_PWR_WELL_CTL_AUX4 0x4544C + +#define _ICL_PWR_WELL_CTL_DDI1 0x45450 +#define _ICL_PWR_WELL_CTL_DDI2 0x45454 +#define _ICL_PWR_WELL_CTL_DDI4 0x4545C + /* * Each power well control register contains up to 16 (request, status) HW * flag tuples. The register index and HW flag shift is determined by the @@ -8564,14 +8853,20 @@ enum { */ #define _HSW_PW_REG_IDX(pw) ((pw) >> 4) #define _HSW_PW_SHIFT(pw) (((pw) & 0xf) * 2) -/* TODO: Add all PWR_WELL_CTL registers below for new platforms */ #define HSW_PWR_WELL_CTL_BIOS(pw) _MMIO(_PICK(_HSW_PW_REG_IDX(pw), \ - _HSW_PWR_WELL_CTL1)) + _HSW_PWR_WELL_CTL1, \ + _ICL_PWR_WELL_CTL_AUX1, \ + _ICL_PWR_WELL_CTL_DDI1)) #define HSW_PWR_WELL_CTL_DRIVER(pw) _MMIO(_PICK(_HSW_PW_REG_IDX(pw), \ - _HSW_PWR_WELL_CTL2)) + _HSW_PWR_WELL_CTL2, \ + _ICL_PWR_WELL_CTL_AUX2, \ + _ICL_PWR_WELL_CTL_DDI2)) +/* KVMR doesn't have a reg for AUX or DDI power well control */ #define HSW_PWR_WELL_CTL_KVMR _MMIO(_HSW_PWR_WELL_CTL3) #define HSW_PWR_WELL_CTL_DEBUG(pw) _MMIO(_PICK(_HSW_PW_REG_IDX(pw), \ - _HSW_PWR_WELL_CTL4)) + _HSW_PWR_WELL_CTL4, \ + _ICL_PWR_WELL_CTL_AUX4, \ + _ICL_PWR_WELL_CTL_DDI4)) #define HSW_PWR_WELL_CTL_REQ(pw) (1 << (_HSW_PW_SHIFT(pw) + 1)) #define HSW_PWR_WELL_CTL_STATE(pw) (1 << _HSW_PW_SHIFT(pw)) @@ -8592,6 +8887,8 @@ enum skl_power_gate { #define SKL_FUSE_DOWNLOAD_STATUS (1 << 31) /* PG0 (HW control->no power well ID), PG1..PG2 (SKL_DISP_PW1..SKL_DISP_PW2) */ #define SKL_PW_TO_PG(pw) ((pw) - SKL_DISP_PW_1 + SKL_PG1) +/* PG0 (HW control->no power well ID), PG1..PG4 (ICL_DISP_PW1..ICL_DISP_PW4) */ +#define ICL_PW_TO_PG(pw) ((pw) - ICL_DISP_PW_1 + SKL_PG1) #define SKL_FUSE_PG_DIST_STATUS(pg) (1 << (27 - (pg))) #define _CNL_AUX_REG_IDX(pw) ((pw) - 9) @@ -9047,6 +9344,7 @@ enum skl_power_gate { #define _MG_REFCLKIN_CTL_PORT3 0x16A92C #define _MG_REFCLKIN_CTL_PORT4 0x16B92C #define MG_REFCLKIN_CTL_OD_2_MUX(x) ((x) << 8) +#define MG_REFCLKIN_CTL_OD_2_MUX_MASK (0x7 << 8) #define MG_REFCLKIN_CTL(port) _MMIO_PORT((port) - PORT_C, \ _MG_REFCLKIN_CTL_PORT1, \ _MG_REFCLKIN_CTL_PORT2) @@ -9056,7 +9354,9 @@ enum skl_power_gate { #define _MG_CLKTOP2_CORECLKCTL1_PORT3 0x16A8D8 #define _MG_CLKTOP2_CORECLKCTL1_PORT4 0x16B8D8 #define MG_CLKTOP2_CORECLKCTL1_B_DIVRATIO(x) ((x) << 16) +#define MG_CLKTOP2_CORECLKCTL1_B_DIVRATIO_MASK (0xff << 16) #define MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO(x) ((x) << 8) +#define MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO_MASK (0xff << 8) #define MG_CLKTOP2_CORECLKCTL1(port) _MMIO_PORT((port) - PORT_C, \ _MG_CLKTOP2_CORECLKCTL1_PORT1, \ _MG_CLKTOP2_CORECLKCTL1_PORT2) @@ -9066,9 +9366,13 @@ enum skl_power_gate { #define _MG_CLKTOP2_HSCLKCTL_PORT3 0x16A8D4 #define _MG_CLKTOP2_HSCLKCTL_PORT4 0x16B8D4 #define MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL(x) ((x) << 16) +#define MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL_MASK (0x1 << 16) #define MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL(x) ((x) << 14) +#define MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL_MASK (0x3 << 14) #define MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO(x) ((x) << 12) +#define MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK (0x3 << 12) #define MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO(x) ((x) << 8) +#define MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK (0xf << 8) #define MG_CLKTOP2_HSCLKCTL(port) _MMIO_PORT((port) - PORT_C, \ _MG_CLKTOP2_HSCLKCTL_PORT1, \ _MG_CLKTOP2_HSCLKCTL_PORT2) @@ -9142,12 +9446,18 @@ enum skl_power_gate { #define _MG_PLL_BIAS_PORT3 0x16AA14 #define _MG_PLL_BIAS_PORT4 0x16BA14 #define MG_PLL_BIAS_BIAS_GB_SEL(x) ((x) << 30) +#define MG_PLL_BIAS_BIAS_GB_SEL_MASK (0x3 << 30) #define MG_PLL_BIAS_INIT_DCOAMP(x) ((x) << 24) +#define MG_PLL_BIAS_INIT_DCOAMP_MASK (0x3f << 24) #define MG_PLL_BIAS_BIAS_BONUS(x) ((x) << 16) +#define MG_PLL_BIAS_BIAS_BONUS_MASK (0xff << 16) #define MG_PLL_BIAS_BIASCAL_EN (1 << 15) #define MG_PLL_BIAS_CTRIM(x) ((x) << 8) +#define MG_PLL_BIAS_CTRIM_MASK (0x1f << 8) #define MG_PLL_BIAS_VREF_RDAC(x) ((x) << 5) +#define MG_PLL_BIAS_VREF_RDAC_MASK (0x7 << 5) #define MG_PLL_BIAS_IREFTRIM(x) ((x) << 0) +#define MG_PLL_BIAS_IREFTRIM_MASK (0x1f << 0) #define MG_PLL_BIAS(port) _MMIO_PORT((port) - PORT_C, _MG_PLL_BIAS_PORT1, \ _MG_PLL_BIAS_PORT2) @@ -9401,6 +9711,22 @@ enum skl_power_gate { #define MIPIO_TXESC_CLK_DIV2 _MMIO(0x160008) #define GLK_TX_ESC_CLK_DIV2_MASK 0x3FF +#define _ICL_DSI_ESC_CLK_DIV0 0x6b090 +#define _ICL_DSI_ESC_CLK_DIV1 0x6b890 +#define ICL_DSI_ESC_CLK_DIV(port) _MMIO_PORT((port), \ + _ICL_DSI_ESC_CLK_DIV0, \ + _ICL_DSI_ESC_CLK_DIV1) +#define _ICL_DPHY_ESC_CLK_DIV0 0x162190 +#define _ICL_DPHY_ESC_CLK_DIV1 0x6C190 +#define ICL_DPHY_ESC_CLK_DIV(port) _MMIO_PORT((port), \ + _ICL_DPHY_ESC_CLK_DIV0, \ + _ICL_DPHY_ESC_CLK_DIV1) +#define ICL_BYTE_CLK_PER_ESC_CLK_MASK (0x1f << 16) +#define ICL_BYTE_CLK_PER_ESC_CLK_SHIFT 16 +#define ICL_ESC_CLK_DIV_MASK 0x1ff +#define ICL_ESC_CLK_DIV_SHIFT 0 +#define DSI_MAX_ESC_CLK 20000 /* in KHz */ + /* Gen4+ Timestamp and Pipe Frame time stamp registers */ #define GEN4_TIMESTAMP _MMIO(0x2358) #define ILK_TIMESTAMP_HI _MMIO(0x70070) @@ -9535,6 +9861,14 @@ enum skl_power_gate { #define _BXT_MIPIC_PORT_CTRL 0x6B8C0 #define BXT_MIPI_PORT_CTRL(tc) _MMIO_MIPI(tc, _BXT_MIPIA_PORT_CTRL, _BXT_MIPIC_PORT_CTRL) +/* ICL DSI MODE control */ +#define _ICL_DSI_IO_MODECTL_0 0x6B094 +#define _ICL_DSI_IO_MODECTL_1 0x6B894 +#define ICL_DSI_IO_MODECTL(port) _MMIO_PORT(port, \ + _ICL_DSI_IO_MODECTL_0, \ + _ICL_DSI_IO_MODECTL_1) +#define COMBO_PHY_MODE_DSI (1 << 0) + #define BXT_P_DSI_REGULATOR_CFG _MMIO(0x160020) #define STAP_SELECT (1 << 0) @@ -10014,4 +10348,310 @@ enum skl_power_gate { _ICL_PHY_MISC_B) #define ICL_PHY_MISC_DE_IO_COMP_PWR_DOWN (1 << 23) +/* Icelake Display Stream Compression Registers */ +#define DSCA_PICTURE_PARAMETER_SET_0 0x6B200 +#define DSCC_PICTURE_PARAMETER_SET_0 0x6BA00 +#define _ICL_DSC0_PICTURE_PARAMETER_SET_0_PB 0x78270 +#define _ICL_DSC1_PICTURE_PARAMETER_SET_0_PB 0x78370 +#define _ICL_DSC0_PICTURE_PARAMETER_SET_0_PC 0x78470 +#define _ICL_DSC1_PICTURE_PARAMETER_SET_0_PC 0x78570 +#define ICL_DSC0_PICTURE_PARAMETER_SET_0(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ + _ICL_DSC0_PICTURE_PARAMETER_SET_0_PB, \ + _ICL_DSC0_PICTURE_PARAMETER_SET_0_PC) +#define ICL_DSC1_PICTURE_PARAMETER_SET_0(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ + _ICL_DSC1_PICTURE_PARAMETER_SET_0_PB, \ + _ICL_DSC1_PICTURE_PARAMETER_SET_0_PC) +#define DSC_VBR_ENABLE (1 << 19) +#define DSC_422_ENABLE (1 << 18) +#define DSC_COLOR_SPACE_CONVERSION (1 << 17) +#define DSC_BLOCK_PREDICTION (1 << 16) +#define DSC_LINE_BUF_DEPTH_SHIFT 12 +#define DSC_BPC_SHIFT 8 +#define DSC_VER_MIN_SHIFT 4 +#define DSC_VER_MAJ (0x1 << 0) + +#define DSCA_PICTURE_PARAMETER_SET_1 0x6B204 +#define DSCC_PICTURE_PARAMETER_SET_1 0x6BA04 +#define _ICL_DSC0_PICTURE_PARAMETER_SET_1_PB 0x78274 +#define _ICL_DSC1_PICTURE_PARAMETER_SET_1_PB 0x78374 +#define _ICL_DSC0_PICTURE_PARAMETER_SET_1_PC 0x78474 +#define _ICL_DSC1_PICTURE_PARAMETER_SET_1_PC 0x78574 +#define ICL_DSC0_PICTURE_PARAMETER_SET_1(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ + _ICL_DSC0_PICTURE_PARAMETER_SET_1_PB, \ + _ICL_DSC0_PICTURE_PARAMETER_SET_1_PC) +#define ICL_DSC1_PICTURE_PARAMETER_SET_1(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ + _ICL_DSC1_PICTURE_PARAMETER_SET_1_PB, \ + _ICL_DSC1_PICTURE_PARAMETER_SET_1_PC) +#define DSC_BPP(bpp) ((bpp) << 0) + +#define DSCA_PICTURE_PARAMETER_SET_2 0x6B208 +#define DSCC_PICTURE_PARAMETER_SET_2 0x6BA08 +#define _ICL_DSC0_PICTURE_PARAMETER_SET_2_PB 0x78278 +#define _ICL_DSC1_PICTURE_PARAMETER_SET_2_PB 0x78378 +#define _ICL_DSC0_PICTURE_PARAMETER_SET_2_PC 0x78478 +#define _ICL_DSC1_PICTURE_PARAMETER_SET_2_PC 0x78578 +#define ICL_DSC0_PICTURE_PARAMETER_SET_2(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ + _ICL_DSC0_PICTURE_PARAMETER_SET_2_PB, \ + _ICL_DSC0_PICTURE_PARAMETER_SET_2_PC) +#define ICL_DSC1_PICTURE_PARAMETER_SET_2(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ + _ICL_DSC1_PICTURE_PARAMETER_SET_2_PB, \ + _ICL_DSC1_PICTURE_PARAMETER_SET_2_PC) +#define DSC_PIC_WIDTH(pic_width) ((pic_width) << 16) +#define DSC_PIC_HEIGHT(pic_height) ((pic_height) << 0) + +#define DSCA_PICTURE_PARAMETER_SET_3 0x6B20C +#define DSCC_PICTURE_PARAMETER_SET_3 0x6BA0C +#define _ICL_DSC0_PICTURE_PARAMETER_SET_3_PB 0x7827C +#define _ICL_DSC1_PICTURE_PARAMETER_SET_3_PB 0x7837C +#define _ICL_DSC0_PICTURE_PARAMETER_SET_3_PC 0x7847C +#define _ICL_DSC1_PICTURE_PARAMETER_SET_3_PC 0x7857C +#define ICL_DSC0_PICTURE_PARAMETER_SET_3(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ + _ICL_DSC0_PICTURE_PARAMETER_SET_3_PB, \ + _ICL_DSC0_PICTURE_PARAMETER_SET_3_PC) +#define ICL_DSC1_PICTURE_PARAMETER_SET_3(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ + _ICL_DSC1_PICTURE_PARAMETER_SET_3_PB, \ + _ICL_DSC1_PICTURE_PARAMETER_SET_3_PC) +#define DSC_SLICE_WIDTH(slice_width) ((slice_width) << 16) +#define DSC_SLICE_HEIGHT(slice_height) ((slice_height) << 0) + +#define DSCA_PICTURE_PARAMETER_SET_4 0x6B210 +#define DSCC_PICTURE_PARAMETER_SET_4 0x6BA10 +#define _ICL_DSC0_PICTURE_PARAMETER_SET_4_PB 0x78280 +#define _ICL_DSC1_PICTURE_PARAMETER_SET_4_PB 0x78380 +#define _ICL_DSC0_PICTURE_PARAMETER_SET_4_PC 0x78480 +#define _ICL_DSC1_PICTURE_PARAMETER_SET_4_PC 0x78580 +#define ICL_DSC0_PICTURE_PARAMETER_SET_4(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ + _ICL_DSC0_PICTURE_PARAMETER_SET_4_PB, \ + _ICL_DSC0_PICTURE_PARAMETER_SET_4_PC) +#define ICL_DSC1_PICTURE_PARAMETER_SET_4(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ + _ICL_DSC0_PICTURE_PARAMETER_SET_4_PB, \ + _ICL_DSC1_PICTURE_PARAMETER_SET_4_PC) +#define DSC_INITIAL_DEC_DELAY(dec_delay) ((dec_delay) << 16) +#define DSC_INITIAL_XMIT_DELAY(xmit_delay) ((xmit_delay) << 0) + +#define DSCA_PICTURE_PARAMETER_SET_5 0x6B214 +#define DSCC_PICTURE_PARAMETER_SET_5 0x6BA14 +#define _ICL_DSC0_PICTURE_PARAMETER_SET_5_PB 0x78284 +#define _ICL_DSC1_PICTURE_PARAMETER_SET_5_PB 0x78384 +#define _ICL_DSC0_PICTURE_PARAMETER_SET_5_PC 0x78484 +#define _ICL_DSC1_PICTURE_PARAMETER_SET_5_PC 0x78584 +#define ICL_DSC0_PICTURE_PARAMETER_SET_5(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ + _ICL_DSC0_PICTURE_PARAMETER_SET_5_PB, \ + _ICL_DSC0_PICTURE_PARAMETER_SET_5_PC) +#define ICL_DSC1_PICTURE_PARAMETER_SET_5(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ + _ICL_DSC1_PICTURE_PARAMETER_SET_5_PC, \ + _ICL_DSC1_PICTURE_PARAMETER_SET_5_PC) +#define DSC_SCALE_DEC_INTINT(scale_dec) ((scale_dec) << 16) +#define DSC_SCALE_INC_INT(scale_inc) ((scale_inc) << 0) + +#define DSCA_PICTURE_PARAMETER_SET_6 0x6B218 +#define DSCC_PICTURE_PARAMETER_SET_6 0x6BA18 +#define _ICL_DSC0_PICTURE_PARAMETER_SET_6_PB 0x78288 +#define _ICL_DSC1_PICTURE_PARAMETER_SET_6_PB 0x78388 +#define _ICL_DSC0_PICTURE_PARAMETER_SET_6_PC 0x78488 +#define _ICL_DSC1_PICTURE_PARAMETER_SET_6_PC 0x78588 +#define ICL_DSC0_PICTURE_PARAMETER_SET_6(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ + _ICL_DSC0_PICTURE_PARAMETER_SET_6_PB, \ + _ICL_DSC0_PICTURE_PARAMETER_SET_6_PC) +#define ICL_DSC1_PICTURE_PARAMETER_SET_6(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ + _ICL_DSC1_PICTURE_PARAMETER_SET_6_PB, \ + _ICL_DSC1_PICTURE_PARAMETER_SET_6_PC) +#define DSC_FLATNESS_MAX_QP(max_qp) (qp << 24) +#define DSC_FLATNESS_MIN_QP(min_qp) (qp << 16) +#define DSC_FIRST_LINE_BPG_OFFSET(offset) ((offset) << 8) +#define DSC_INITIAL_SCALE_VALUE(value) ((value) << 0) + +#define DSCA_PICTURE_PARAMETER_SET_7 0x6B21C +#define DSCC_PICTURE_PARAMETER_SET_7 0x6BA1C +#define _ICL_DSC0_PICTURE_PARAMETER_SET_7_PB 0x7828C +#define _ICL_DSC1_PICTURE_PARAMETER_SET_7_PB 0x7838C +#define _ICL_DSC0_PICTURE_PARAMETER_SET_7_PC 0x7848C +#define _ICL_DSC1_PICTURE_PARAMETER_SET_7_PC 0x7858C +#define ICL_DSC0_PICTURE_PARAMETER_SET_7(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ + _ICL_DSC0_PICTURE_PARAMETER_SET_7_PB, \ + _ICL_DSC0_PICTURE_PARAMETER_SET_7_PC) +#define ICL_DSC1_PICTURE_PARAMETER_SET_7(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ + _ICL_DSC1_PICTURE_PARAMETER_SET_7_PB, \ + _ICL_DSC1_PICTURE_PARAMETER_SET_7_PC) +#define DSC_NFL_BPG_OFFSET(bpg_offset) ((bpg_offset) << 16) +#define DSC_SLICE_BPG_OFFSET(bpg_offset) ((bpg_offset) << 0) + +#define DSCA_PICTURE_PARAMETER_SET_8 0x6B220 +#define DSCC_PICTURE_PARAMETER_SET_8 0x6BA20 +#define _ICL_DSC0_PICTURE_PARAMETER_SET_8_PB 0x78290 +#define _ICL_DSC1_PICTURE_PARAMETER_SET_8_PB 0x78390 +#define _ICL_DSC0_PICTURE_PARAMETER_SET_8_PC 0x78490 +#define _ICL_DSC1_PICTURE_PARAMETER_SET_8_PC 0x78590 +#define ICL_DSC0_PICTURE_PARAMETER_SET_8(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ + _ICL_DSC0_PICTURE_PARAMETER_SET_8_PB, \ + _ICL_DSC0_PICTURE_PARAMETER_SET_8_PC) +#define ICL_DSC1_PICTURE_PARAMETER_SET_8(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ + _ICL_DSC1_PICTURE_PARAMETER_SET_8_PB, \ + _ICL_DSC1_PICTURE_PARAMETER_SET_8_PC) +#define DSC_INITIAL_OFFSET(initial_offset) ((initial_offset) << 16) +#define DSC_FINAL_OFFSET(final_offset) ((final_offset) << 0) + +#define DSCA_PICTURE_PARAMETER_SET_9 0x6B224 +#define DSCC_PICTURE_PARAMETER_SET_9 0x6BA24 +#define _ICL_DSC0_PICTURE_PARAMETER_SET_9_PB 0x78294 +#define _ICL_DSC1_PICTURE_PARAMETER_SET_9_PB 0x78394 +#define _ICL_DSC0_PICTURE_PARAMETER_SET_9_PC 0x78494 +#define _ICL_DSC1_PICTURE_PARAMETER_SET_9_PC 0x78594 +#define ICL_DSC0_PICTURE_PARAMETER_SET_9(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ + _ICL_DSC0_PICTURE_PARAMETER_SET_9_PB, \ + _ICL_DSC0_PICTURE_PARAMETER_SET_9_PC) +#define ICL_DSC1_PICTURE_PARAMETER_SET_9(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ + _ICL_DSC1_PICTURE_PARAMETER_SET_9_PB, \ + _ICL_DSC1_PICTURE_PARAMETER_SET_9_PC) +#define DSC_RC_EDGE_FACTOR(rc_edge_fact) ((rc_edge_fact) << 16) +#define DSC_RC_MODEL_SIZE(rc_model_size) ((rc_model_size) << 0) + +#define DSCA_PICTURE_PARAMETER_SET_10 0x6B228 +#define DSCC_PICTURE_PARAMETER_SET_10 0x6BA28 +#define _ICL_DSC0_PICTURE_PARAMETER_SET_10_PB 0x78298 +#define _ICL_DSC1_PICTURE_PARAMETER_SET_10_PB 0x78398 +#define _ICL_DSC0_PICTURE_PARAMETER_SET_10_PC 0x78498 +#define _ICL_DSC1_PICTURE_PARAMETER_SET_10_PC 0x78598 +#define ICL_DSC0_PICTURE_PARAMETER_SET_10(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ + _ICL_DSC0_PICTURE_PARAMETER_SET_10_PB, \ + _ICL_DSC0_PICTURE_PARAMETER_SET_10_PC) +#define ICL_DSC1_PICTURE_PARAMETER_SET_10(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ + _ICL_DSC1_PICTURE_PARAMETER_SET_10_PB, \ + _ICL_DSC1_PICTURE_PARAMETER_SET_10_PC) +#define DSC_RC_TARGET_OFF_LOW(rc_tgt_off_low) ((rc_tgt_off_low) << 20) +#define DSC_RC_TARGET_OFF_HIGH(rc_tgt_off_high) ((rc_tgt_off_high) << 16) +#define DSC_RC_QUANT_INC_LIMIT1(lim) ((lim) << 8) +#define DSC_RC_QUANT_INC_LIMIT0(lim) ((lim) << 0) + +#define DSCA_PICTURE_PARAMETER_SET_11 0x6B22C +#define DSCC_PICTURE_PARAMETER_SET_11 0x6BA2C +#define _ICL_DSC0_PICTURE_PARAMETER_SET_11_PB 0x7829C +#define _ICL_DSC1_PICTURE_PARAMETER_SET_11_PB 0x7839C +#define _ICL_DSC0_PICTURE_PARAMETER_SET_11_PC 0x7849C +#define _ICL_DSC1_PICTURE_PARAMETER_SET_11_PC 0x7859C +#define ICL_DSC0_PICTURE_PARAMETER_SET_11(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ + _ICL_DSC0_PICTURE_PARAMETER_SET_11_PB, \ + _ICL_DSC0_PICTURE_PARAMETER_SET_11_PC) +#define ICL_DSC1_PICTURE_PARAMETER_SET_11(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ + _ICL_DSC1_PICTURE_PARAMETER_SET_11_PB, \ + _ICL_DSC1_PICTURE_PARAMETER_SET_11_PC) + +#define DSCA_PICTURE_PARAMETER_SET_12 0x6B260 +#define DSCC_PICTURE_PARAMETER_SET_12 0x6BA60 +#define _ICL_DSC0_PICTURE_PARAMETER_SET_12_PB 0x782A0 +#define _ICL_DSC1_PICTURE_PARAMETER_SET_12_PB 0x783A0 +#define _ICL_DSC0_PICTURE_PARAMETER_SET_12_PC 0x784A0 +#define _ICL_DSC1_PICTURE_PARAMETER_SET_12_PC 0x785A0 +#define ICL_DSC0_PICTURE_PARAMETER_SET_12(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ + _ICL_DSC0_PICTURE_PARAMETER_SET_12_PB, \ + _ICL_DSC0_PICTURE_PARAMETER_SET_12_PC) +#define ICL_DSC1_PICTURE_PARAMETER_SET_12(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ + _ICL_DSC1_PICTURE_PARAMETER_SET_12_PB, \ + _ICL_DSC1_PICTURE_PARAMETER_SET_12_PC) + +#define DSCA_PICTURE_PARAMETER_SET_13 0x6B264 +#define DSCC_PICTURE_PARAMETER_SET_13 0x6BA64 +#define _ICL_DSC0_PICTURE_PARAMETER_SET_13_PB 0x782A4 +#define _ICL_DSC1_PICTURE_PARAMETER_SET_13_PB 0x783A4 +#define _ICL_DSC0_PICTURE_PARAMETER_SET_13_PC 0x784A4 +#define _ICL_DSC1_PICTURE_PARAMETER_SET_13_PC 0x785A4 +#define ICL_DSC0_PICTURE_PARAMETER_SET_13(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ + _ICL_DSC0_PICTURE_PARAMETER_SET_13_PB, \ + _ICL_DSC0_PICTURE_PARAMETER_SET_13_PC) +#define ICL_DSC1_PICTURE_PARAMETER_SET_13(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ + _ICL_DSC1_PICTURE_PARAMETER_SET_13_PB, \ + _ICL_DSC1_PICTURE_PARAMETER_SET_13_PC) + +#define DSCA_PICTURE_PARAMETER_SET_14 0x6B268 +#define DSCC_PICTURE_PARAMETER_SET_14 0x6BA68 +#define _ICL_DSC0_PICTURE_PARAMETER_SET_14_PB 0x782A8 +#define _ICL_DSC1_PICTURE_PARAMETER_SET_14_PB 0x783A8 +#define _ICL_DSC0_PICTURE_PARAMETER_SET_14_PC 0x784A8 +#define _ICL_DSC1_PICTURE_PARAMETER_SET_14_PC 0x785A8 +#define ICL_DSC0_PICTURE_PARAMETER_SET_14(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ + _ICL_DSC0_PICTURE_PARAMETER_SET_14_PB, \ + _ICL_DSC0_PICTURE_PARAMETER_SET_14_PC) +#define ICL_DSC1_PICTURE_PARAMETER_SET_14(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ + _ICL_DSC1_PICTURE_PARAMETER_SET_14_PB, \ + _ICL_DSC1_PICTURE_PARAMETER_SET_14_PC) + +#define DSCA_PICTURE_PARAMETER_SET_15 0x6B26C +#define DSCC_PICTURE_PARAMETER_SET_15 0x6BA6C +#define _ICL_DSC0_PICTURE_PARAMETER_SET_15_PB 0x782AC +#define _ICL_DSC1_PICTURE_PARAMETER_SET_15_PB 0x783AC +#define _ICL_DSC0_PICTURE_PARAMETER_SET_15_PC 0x784AC +#define _ICL_DSC1_PICTURE_PARAMETER_SET_15_PC 0x785AC +#define ICL_DSC0_PICTURE_PARAMETER_SET_15(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ + _ICL_DSC0_PICTURE_PARAMETER_SET_15_PB, \ + _ICL_DSC0_PICTURE_PARAMETER_SET_15_PC) +#define ICL_DSC1_PICTURE_PARAMETER_SET_15(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ + _ICL_DSC1_PICTURE_PARAMETER_SET_15_PB, \ + _ICL_DSC1_PICTURE_PARAMETER_SET_15_PC) + +#define DSCA_PICTURE_PARAMETER_SET_16 0x6B270 +#define DSCC_PICTURE_PARAMETER_SET_16 0x6BA70 +#define _ICL_DSC0_PICTURE_PARAMETER_SET_16_PB 0x782B0 +#define _ICL_DSC1_PICTURE_PARAMETER_SET_16_PB 0x783B0 +#define _ICL_DSC0_PICTURE_PARAMETER_SET_16_PC 0x784B0 +#define _ICL_DSC1_PICTURE_PARAMETER_SET_16_PC 0x785B0 +#define ICL_DSC0_PICTURE_PARAMETER_SET_16(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ + _ICL_DSC0_PICTURE_PARAMETER_SET_16_PB, \ + _ICL_DSC0_PICTURE_PARAMETER_SET_16_PC) +#define ICL_DSC1_PICTURE_PARAMETER_SET_16(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ + _ICL_DSC1_PICTURE_PARAMETER_SET_16_PB, \ + _ICL_DSC1_PICTURE_PARAMETER_SET_16_PC) +#define DSC_SLICE_PER_LINE(slice_per_line) ((slice_per_line) << 16) +#define DSC_SLICE_CHUNK_SIZE(slice_chunk_aize) (slice_chunk_size << 0) + +/* Icelake Rate Control Buffer Threshold Registers */ +#define DSCA_RC_BUF_THRESH_0 _MMIO(0x6B230) +#define DSCA_RC_BUF_THRESH_0_UDW _MMIO(0x6B230 + 4) +#define DSCC_RC_BUF_THRESH_0 _MMIO(0x6BA30) +#define DSCC_RC_BUF_THRESH_0_UDW _MMIO(0x6BA30 + 4) +#define _ICL_DSC0_RC_BUF_THRESH_0_PB (0x78254) +#define _ICL_DSC0_RC_BUF_THRESH_0_UDW_PB (0x78254 + 4) +#define _ICL_DSC1_RC_BUF_THRESH_0_PB (0x78354) +#define _ICL_DSC1_RC_BUF_THRESH_0_UDW_PB (0x78354 + 4) +#define _ICL_DSC0_RC_BUF_THRESH_0_PC (0x78454) +#define _ICL_DSC0_RC_BUF_THRESH_0_UDW_PC (0x78454 + 4) +#define _ICL_DSC1_RC_BUF_THRESH_0_PC (0x78554) +#define _ICL_DSC1_RC_BUF_THRESH_0_UDW_PC (0x78554 + 4) +#define ICL_DSC0_RC_BUF_THRESH_0(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ + _ICL_DSC0_RC_BUF_THRESH_0_PB, \ + _ICL_DSC0_RC_BUF_THRESH_0_PC) +#define ICL_DSC0_RC_BUF_THRESH_0_UDW(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ + _ICL_DSC0_RC_BUF_THRESH_0_UDW_PB, \ + _ICL_DSC0_RC_BUF_THRESH_0_UDW_PC) +#define ICL_DSC1_RC_BUF_THRESH_0(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ + _ICL_DSC1_RC_BUF_THRESH_0_PB, \ + _ICL_DSC1_RC_BUF_THRESH_0_PC) +#define ICL_DSC1_RC_BUF_THRESH_0_UDW(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ + _ICL_DSC1_RC_BUF_THRESH_0_UDW_PB, \ + _ICL_DSC1_RC_BUF_THRESH_0_UDW_PC) + +#define DSCA_RC_BUF_THRESH_1 _MMIO(0x6B238) +#define DSCA_RC_BUF_THRESH_1_UDW _MMIO(0x6B238 + 4) +#define DSCC_RC_BUF_THRESH_1 _MMIO(0x6BA38) +#define DSCC_RC_BUF_THRESH_1_UDW _MMIO(0x6BA38 + 4) +#define _ICL_DSC0_RC_BUF_THRESH_1_PB (0x7825C) +#define _ICL_DSC0_RC_BUF_THRESH_1_UDW_PB (0x7825C + 4) +#define _ICL_DSC1_RC_BUF_THRESH_1_PB (0x7835C) +#define _ICL_DSC1_RC_BUF_THRESH_1_UDW_PB (0x7835C + 4) +#define _ICL_DSC0_RC_BUF_THRESH_1_PC (0x7845C) +#define _ICL_DSC0_RC_BUF_THRESH_1_UDW_PC (0x7845C + 4) +#define _ICL_DSC1_RC_BUF_THRESH_1_PC (0x7855C) +#define _ICL_DSC1_RC_BUF_THRESH_1_UDW_PC (0x7855C + 4) +#define ICL_DSC0_RC_BUF_THRESH_1(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ + _ICL_DSC0_RC_BUF_THRESH_1_PB, \ + _ICL_DSC0_RC_BUF_THRESH_1_PC) +#define ICL_DSC0_RC_BUF_THRESH_1_UDW(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ + _ICL_DSC0_RC_BUF_THRESH_1_UDW_PB, \ + _ICL_DSC0_RC_BUF_THRESH_1_UDW_PC) +#define ICL_DSC1_RC_BUF_THRESH_1(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ + _ICL_DSC1_RC_BUF_THRESH_1_PB, \ + _ICL_DSC1_RC_BUF_THRESH_1_PC) +#define ICL_DSC1_RC_BUF_THRESH_1_UDW(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ + _ICL_DSC1_RC_BUF_THRESH_1_UDW_PB, \ + _ICL_DSC1_RC_BUF_THRESH_1_UDW_PC) + #endif /* _I915_REG_H_ */ diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c index e1dbb544046f..5c2c93cbab12 100644 --- a/drivers/gpu/drm/i915/i915_request.c +++ b/drivers/gpu/drm/i915/i915_request.c @@ -206,7 +206,8 @@ static int reset_all_global_seqno(struct drm_i915_private *i915, u32 seqno) /* Carefully retire all requests without writing to the rings */ ret = i915_gem_wait_for_idle(i915, I915_WAIT_INTERRUPTIBLE | - I915_WAIT_LOCKED); + I915_WAIT_LOCKED, + MAX_SCHEDULE_TIMEOUT); if (ret) return ret; @@ -503,7 +504,7 @@ static void move_to_timeline(struct i915_request *request, GEM_BUG_ON(request->timeline == &request->engine->timeline); lockdep_assert_held(&request->engine->timeline.lock); - spin_lock_nested(&request->timeline->lock, SINGLE_DEPTH_NESTING); + spin_lock(&request->timeline->lock); list_move_tail(&request->link, &timeline->requests); spin_unlock(&request->timeline->lock); } @@ -735,7 +736,8 @@ i915_request_alloc(struct intel_engine_cs *engine, struct i915_gem_context *ctx) /* Ratelimit ourselves to prevent oom from malicious clients */ ret = i915_gem_wait_for_idle(i915, I915_WAIT_LOCKED | - I915_WAIT_INTERRUPTIBLE); + I915_WAIT_INTERRUPTIBLE, + MAX_SCHEDULE_TIMEOUT); if (ret) goto err_unreserve; @@ -1013,6 +1015,27 @@ i915_request_await_object(struct i915_request *to, return ret; } +void i915_request_skip(struct i915_request *rq, int error) +{ + void *vaddr = rq->ring->vaddr; + u32 head; + + GEM_BUG_ON(!IS_ERR_VALUE((long)error)); + dma_fence_set_error(&rq->fence, error); + + /* + * As this request likely depends on state from the lost + * context, clear out all the user operations leaving the + * breadcrumb at the end (so we get the fence notifications). + */ + head = rq->infix; + if (rq->postfix < head) { + memset(vaddr + head, 0, rq->ring->size - head); + head = 0; + } + memset(vaddr + head, 0, rq->postfix - head); +} + /* * NB: This function is not allowed to fail. Doing so would mean the the * request is not being tracked for completion but the work itself is @@ -1196,7 +1219,7 @@ static bool __i915_spin_request(const struct i915_request *rq, * takes to sleep on a request, on the order of a microsecond. */ - irq = atomic_read(&engine->irq_count); + irq = READ_ONCE(engine->breadcrumbs.irq_count); timeout_us += local_clock_us(&cpu); do { if (i915_seqno_passed(intel_engine_get_seqno(engine), seqno)) @@ -1208,7 +1231,7 @@ static bool __i915_spin_request(const struct i915_request *rq, * assume we won't see one in the near future but require * the engine->seqno_barrier() to fixup coherency. */ - if (atomic_read(&engine->irq_count) != irq) + if (READ_ONCE(engine->breadcrumbs.irq_count) != irq) break; if (signal_pending_state(state, current)) @@ -1285,7 +1308,7 @@ long i915_request_wait(struct i915_request *rq, if (flags & I915_WAIT_LOCKED) add_wait_queue(errq, &reset); - intel_wait_init(&wait, rq); + intel_wait_init(&wait); restart: do { diff --git a/drivers/gpu/drm/i915/i915_request.h b/drivers/gpu/drm/i915/i915_request.h index 7ee220ded9c9..e1c9365dfefb 100644 --- a/drivers/gpu/drm/i915/i915_request.h +++ b/drivers/gpu/drm/i915/i915_request.h @@ -258,6 +258,8 @@ void i915_request_add(struct i915_request *rq); void __i915_request_submit(struct i915_request *request); void i915_request_submit(struct i915_request *request); +void i915_request_skip(struct i915_request *request, int error); + void __i915_request_unsubmit(struct i915_request *request); void i915_request_unsubmit(struct i915_request *request); @@ -378,6 +380,7 @@ static inline void init_request_active(struct i915_gem_active *active, i915_gem_retire_fn retire) { + RCU_INIT_POINTER(active->request, NULL); INIT_LIST_HEAD(&active->link); active->retire = retire ?: i915_gem_retire_noop; } diff --git a/drivers/gpu/drm/i915/i915_selftest.h b/drivers/gpu/drm/i915/i915_selftest.h index 9766e806dce6..a73472dd12fd 100644 --- a/drivers/gpu/drm/i915/i915_selftest.h +++ b/drivers/gpu/drm/i915/i915_selftest.h @@ -99,6 +99,6 @@ __printf(2, 3) bool __igt_timeout(unsigned long timeout, const char *fmt, ...); #define igt_timeout(t, fmt, ...) \ - __igt_timeout((t), KERN_NOTICE pr_fmt(fmt), ##__VA_ARGS__) + __igt_timeout((t), KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__) #endif /* !__I915_SELFTEST_H__ */ diff --git a/drivers/gpu/drm/i915/i915_timeline.h b/drivers/gpu/drm/i915/i915_timeline.h index dc2a4632faa7..a2c2c3ab5fb0 100644 --- a/drivers/gpu/drm/i915/i915_timeline.h +++ b/drivers/gpu/drm/i915/i915_timeline.h @@ -37,6 +37,8 @@ struct i915_timeline { u32 seqno; spinlock_t lock; +#define TIMELINE_CLIENT 0 /* default subclass */ +#define TIMELINE_ENGINE 1 /** * List of breadcrumbs associated with GPU requests currently diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c index e82aa804cdba..11d834f94220 100644 --- a/drivers/gpu/drm/i915/i915_vma.c +++ b/drivers/gpu/drm/i915/i915_vma.c @@ -21,7 +21,7 @@ * IN THE SOFTWARE. * */ - + #include "i915_vma.h" #include "i915_drv.h" @@ -30,18 +30,53 @@ #include <drm/drm_gem.h> +#if IS_ENABLED(CONFIG_DRM_I915_ERRLOG_GEM) && IS_ENABLED(CONFIG_DRM_DEBUG_MM) + +#include <linux/stackdepot.h> + +static void vma_print_allocator(struct i915_vma *vma, const char *reason) +{ + unsigned long entries[12]; + struct stack_trace trace = { + .entries = entries, + .max_entries = ARRAY_SIZE(entries), + }; + char buf[512]; + + if (!vma->node.stack) { + DRM_DEBUG_DRIVER("vma.node [%08llx + %08llx] %s: unknown owner\n", + vma->node.start, vma->node.size, reason); + return; + } + + depot_fetch_stack(vma->node.stack, &trace); + snprint_stack_trace(buf, sizeof(buf), &trace, 0); + DRM_DEBUG_DRIVER("vma.node [%08llx + %08llx] %s: inserted at %s\n", + vma->node.start, vma->node.size, reason, buf); +} + +#else + +static void vma_print_allocator(struct i915_vma *vma, const char *reason) +{ +} + +#endif + +struct i915_vma_active { + struct i915_gem_active base; + struct i915_vma *vma; + struct rb_node node; + u64 timeline; +}; + static void -i915_vma_retire(struct i915_gem_active *active, struct i915_request *rq) +__i915_vma_retire(struct i915_vma *vma, struct i915_request *rq) { - const unsigned int idx = rq->engine->id; - struct i915_vma *vma = - container_of(active, struct i915_vma, last_read[idx]); struct drm_i915_gem_object *obj = vma->obj; - GEM_BUG_ON(!i915_vma_has_active_engine(vma, idx)); - - i915_vma_clear_active(vma, idx); - if (i915_vma_is_active(vma)) + GEM_BUG_ON(!i915_vma_is_active(vma)); + if (--vma->active_count) return; GEM_BUG_ON(!drm_mm_node_allocated(&vma->node)); @@ -75,6 +110,21 @@ i915_vma_retire(struct i915_gem_active *active, struct i915_request *rq) } } +static void +i915_vma_retire(struct i915_gem_active *base, struct i915_request *rq) +{ + struct i915_vma_active *active = + container_of(base, typeof(*active), base); + + __i915_vma_retire(active->vma, rq); +} + +static void +i915_vma_last_retire(struct i915_gem_active *base, struct i915_request *rq) +{ + __i915_vma_retire(container_of(base, struct i915_vma, last_active), rq); +} + static struct i915_vma * vma_create(struct drm_i915_gem_object *obj, struct i915_address_space *vm, @@ -82,7 +132,6 @@ vma_create(struct drm_i915_gem_object *obj, { struct i915_vma *vma; struct rb_node *rb, **p; - int i; /* The aliasing_ppgtt should never be used directly! */ GEM_BUG_ON(vm == &vm->i915->mm.aliasing_ppgtt->vm); @@ -91,8 +140,9 @@ vma_create(struct drm_i915_gem_object *obj, if (vma == NULL) return ERR_PTR(-ENOMEM); - for (i = 0; i < ARRAY_SIZE(vma->last_read); i++) - init_request_active(&vma->last_read[i], i915_vma_retire); + vma->active = RB_ROOT; + + init_request_active(&vma->last_active, i915_vma_last_retire); init_request_active(&vma->last_fence, NULL); vma->vm = vm; vma->ops = &vm->vma_ops; @@ -110,7 +160,7 @@ vma_create(struct drm_i915_gem_object *obj, obj->base.size >> PAGE_SHIFT)); vma->size = view->partial.size; vma->size <<= PAGE_SHIFT; - GEM_BUG_ON(vma->size >= obj->base.size); + GEM_BUG_ON(vma->size > obj->base.size); } else if (view->type == I915_GGTT_VIEW_ROTATED) { vma->size = intel_rotation_info_size(&view->rotated); vma->size <<= PAGE_SHIFT; @@ -745,13 +795,11 @@ void i915_vma_reopen(struct i915_vma *vma) static void __i915_vma_destroy(struct i915_vma *vma) { struct drm_i915_private *i915 = vma->vm->i915; - int i; + struct i915_vma_active *iter, *n; GEM_BUG_ON(vma->node.allocated); GEM_BUG_ON(vma->fence); - for (i = 0; i < ARRAY_SIZE(vma->last_read); i++) - GEM_BUG_ON(i915_gem_active_isset(&vma->last_read[i])); GEM_BUG_ON(i915_gem_active_isset(&vma->last_fence)); list_del(&vma->obj_link); @@ -762,6 +810,11 @@ static void __i915_vma_destroy(struct i915_vma *vma) if (!i915_vma_is_ggtt(vma)) i915_ppgtt_put(i915_vm_to_ppgtt(vma->vm)); + rbtree_postorder_for_each_entry_safe(iter, n, &vma->active, node) { + GEM_BUG_ON(i915_gem_active_isset(&iter->base)); + kfree(iter); + } + kmem_cache_free(i915->vmas, vma); } @@ -826,9 +879,159 @@ void i915_vma_revoke_mmap(struct i915_vma *vma) list_del(&vma->obj->userfault_link); } +static void export_fence(struct i915_vma *vma, + struct i915_request *rq, + unsigned int flags) +{ + struct reservation_object *resv = vma->resv; + + /* + * Ignore errors from failing to allocate the new fence, we can't + * handle an error right now. Worst case should be missed + * synchronisation leading to rendering corruption. + */ + reservation_object_lock(resv, NULL); + if (flags & EXEC_OBJECT_WRITE) + reservation_object_add_excl_fence(resv, &rq->fence); + else if (reservation_object_reserve_shared(resv) == 0) + reservation_object_add_shared_fence(resv, &rq->fence); + reservation_object_unlock(resv); +} + +static struct i915_gem_active *active_instance(struct i915_vma *vma, u64 idx) +{ + struct i915_vma_active *active; + struct rb_node **p, *parent; + struct i915_request *old; + + /* + * We track the most recently used timeline to skip a rbtree search + * for the common case, under typical loads we never need the rbtree + * at all. We can reuse the last_active slot if it is empty, that is + * after the previous activity has been retired, or if the active + * matches the current timeline. + * + * Note that we allow the timeline to be active simultaneously in + * the rbtree and the last_active cache. We do this to avoid having + * to search and replace the rbtree element for a new timeline, with + * the cost being that we must be aware that the vma may be retired + * twice for the same timeline (as the older rbtree element will be + * retired before the new request added to last_active). + */ + old = i915_gem_active_raw(&vma->last_active, + &vma->vm->i915->drm.struct_mutex); + if (!old || old->fence.context == idx) + goto out; + + /* Move the currently active fence into the rbtree */ + idx = old->fence.context; + + parent = NULL; + p = &vma->active.rb_node; + while (*p) { + parent = *p; + + active = rb_entry(parent, struct i915_vma_active, node); + if (active->timeline == idx) + goto replace; + + if (active->timeline < idx) + p = &parent->rb_right; + else + p = &parent->rb_left; + } + + active = kmalloc(sizeof(*active), GFP_KERNEL); + + /* kmalloc may retire the vma->last_active request (thanks shrinker)! */ + if (unlikely(!i915_gem_active_raw(&vma->last_active, + &vma->vm->i915->drm.struct_mutex))) { + kfree(active); + goto out; + } + + if (unlikely(!active)) + return ERR_PTR(-ENOMEM); + + init_request_active(&active->base, i915_vma_retire); + active->vma = vma; + active->timeline = idx; + + rb_link_node(&active->node, parent, p); + rb_insert_color(&active->node, &vma->active); + +replace: + /* + * Overwrite the previous active slot in the rbtree with last_active, + * leaving last_active zeroed. If the previous slot is still active, + * we must be careful as we now only expect to receive one retire + * callback not two, and so much undo the active counting for the + * overwritten slot. + */ + if (i915_gem_active_isset(&active->base)) { + /* Retire ourselves from the old rq->active_list */ + __list_del_entry(&active->base.link); + vma->active_count--; + GEM_BUG_ON(!vma->active_count); + } + GEM_BUG_ON(list_empty(&vma->last_active.link)); + list_replace_init(&vma->last_active.link, &active->base.link); + active->base.request = fetch_and_zero(&vma->last_active.request); + +out: + return &vma->last_active; +} + +int i915_vma_move_to_active(struct i915_vma *vma, + struct i915_request *rq, + unsigned int flags) +{ + struct drm_i915_gem_object *obj = vma->obj; + struct i915_gem_active *active; + + lockdep_assert_held(&rq->i915->drm.struct_mutex); + GEM_BUG_ON(!drm_mm_node_allocated(&vma->node)); + + active = active_instance(vma, rq->fence.context); + if (IS_ERR(active)) + return PTR_ERR(active); + + /* + * Add a reference if we're newly entering the active list. + * The order in which we add operations to the retirement queue is + * vital here: mark_active adds to the start of the callback list, + * such that subsequent callbacks are called first. Therefore we + * add the active reference first and queue for it to be dropped + * *last*. + */ + if (!i915_gem_active_isset(active) && !vma->active_count++) { + list_move_tail(&vma->vm_link, &vma->vm->active_list); + obj->active_count++; + } + i915_gem_active_set(active, rq); + GEM_BUG_ON(!i915_vma_is_active(vma)); + GEM_BUG_ON(!obj->active_count); + + obj->write_domain = 0; + if (flags & EXEC_OBJECT_WRITE) { + obj->write_domain = I915_GEM_DOMAIN_RENDER; + + if (intel_fb_obj_invalidate(obj, ORIGIN_CS)) + i915_gem_active_set(&obj->frontbuffer_write, rq); + + obj->read_domains = 0; + } + obj->read_domains |= I915_GEM_GPU_DOMAINS; + + if (flags & EXEC_OBJECT_NEEDS_FENCE) + i915_gem_active_set(&vma->last_fence, rq); + + export_fence(vma, rq, flags); + return 0; +} + int i915_vma_unbind(struct i915_vma *vma) { - unsigned long active; int ret; lockdep_assert_held(&vma->vm->i915->drm.struct_mutex); @@ -838,9 +1041,8 @@ int i915_vma_unbind(struct i915_vma *vma) * have side-effects such as unpinning or even unbinding this vma. */ might_sleep(); - active = i915_vma_get_active(vma); - if (active) { - int idx; + if (i915_vma_is_active(vma)) { + struct i915_vma_active *active, *n; /* * When a closed VMA is retired, it is unbound - eek. @@ -857,26 +1059,32 @@ int i915_vma_unbind(struct i915_vma *vma) */ __i915_vma_pin(vma); - for_each_active(active, idx) { - ret = i915_gem_active_retire(&vma->last_read[idx], - &vma->vm->i915->drm.struct_mutex); - if (ret) - break; - } + ret = i915_gem_active_retire(&vma->last_active, + &vma->vm->i915->drm.struct_mutex); + if (ret) + goto unpin; - if (!ret) { - ret = i915_gem_active_retire(&vma->last_fence, + rbtree_postorder_for_each_entry_safe(active, n, + &vma->active, node) { + ret = i915_gem_active_retire(&active->base, &vma->vm->i915->drm.struct_mutex); + if (ret) + goto unpin; } + ret = i915_gem_active_retire(&vma->last_fence, + &vma->vm->i915->drm.struct_mutex); +unpin: __i915_vma_unpin(vma); if (ret) return ret; } GEM_BUG_ON(i915_vma_is_active(vma)); - if (i915_vma_is_pinned(vma)) + if (i915_vma_is_pinned(vma)) { + vma_print_allocator(vma, "is pinned"); return -EBUSY; + } if (!drm_mm_node_allocated(&vma->node)) return 0; diff --git a/drivers/gpu/drm/i915/i915_vma.h b/drivers/gpu/drm/i915/i915_vma.h index 66a228931517..f06d66377107 100644 --- a/drivers/gpu/drm/i915/i915_vma.h +++ b/drivers/gpu/drm/i915/i915_vma.h @@ -26,6 +26,7 @@ #define __I915_VMA_H__ #include <linux/io-mapping.h> +#include <linux/rbtree.h> #include <drm/drm_mm.h> @@ -94,8 +95,9 @@ struct i915_vma { #define I915_VMA_USERFAULT BIT(I915_VMA_USERFAULT_BIT) #define I915_VMA_GGTT_WRITE BIT(12) - unsigned int active; - struct i915_gem_active last_read[I915_NUM_ENGINES]; + unsigned int active_count; + struct rb_root active; + struct i915_gem_active last_active; struct i915_gem_active last_fence; /** @@ -138,6 +140,15 @@ i915_vma_instance(struct drm_i915_gem_object *obj, void i915_vma_unpin_and_release(struct i915_vma **p_vma); +static inline bool i915_vma_is_active(struct i915_vma *vma) +{ + return vma->active_count; +} + +int __must_check i915_vma_move_to_active(struct i915_vma *vma, + struct i915_request *rq, + unsigned int flags); + static inline bool i915_vma_is_ggtt(const struct i915_vma *vma) { return vma->flags & I915_VMA_GGTT; @@ -187,34 +198,6 @@ static inline bool i915_vma_has_userfault(const struct i915_vma *vma) return test_bit(I915_VMA_USERFAULT_BIT, &vma->flags); } -static inline unsigned int i915_vma_get_active(const struct i915_vma *vma) -{ - return vma->active; -} - -static inline bool i915_vma_is_active(const struct i915_vma *vma) -{ - return i915_vma_get_active(vma); -} - -static inline void i915_vma_set_active(struct i915_vma *vma, - unsigned int engine) -{ - vma->active |= BIT(engine); -} - -static inline void i915_vma_clear_active(struct i915_vma *vma, - unsigned int engine) -{ - vma->active &= ~BIT(engine); -} - -static inline bool i915_vma_has_active_engine(const struct i915_vma *vma, - unsigned int engine) -{ - return vma->active & BIT(engine); -} - static inline u32 i915_ggtt_offset(const struct i915_vma *vma) { GEM_BUG_ON(!i915_vma_is_ggtt(vma)); diff --git a/drivers/gpu/drm/i915/icl_dsi.c b/drivers/gpu/drm/i915/icl_dsi.c new file mode 100644 index 000000000000..13830e43a4d1 --- /dev/null +++ b/drivers/gpu/drm/i915/icl_dsi.c @@ -0,0 +1,127 @@ +/* + * Copyright © 2018 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + * + * Authors: + * Madhav Chauhan <madhav.chauhan@intel.com> + * Jani Nikula <jani.nikula@intel.com> + */ + +#include "intel_dsi.h" + +static void gen11_dsi_program_esc_clk_div(struct intel_encoder *encoder) +{ + struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); + enum port port; + u32 bpp = mipi_dsi_pixel_format_to_bpp(intel_dsi->pixel_format); + u32 afe_clk_khz; /* 8X Clock */ + u32 esc_clk_div_m; + + afe_clk_khz = DIV_ROUND_CLOSEST(intel_dsi->pclk * bpp, + intel_dsi->lane_count); + + esc_clk_div_m = DIV_ROUND_UP(afe_clk_khz, DSI_MAX_ESC_CLK); + + for_each_dsi_port(port, intel_dsi->ports) { + I915_WRITE(ICL_DSI_ESC_CLK_DIV(port), + esc_clk_div_m & ICL_ESC_CLK_DIV_MASK); + POSTING_READ(ICL_DSI_ESC_CLK_DIV(port)); + } + + for_each_dsi_port(port, intel_dsi->ports) { + I915_WRITE(ICL_DPHY_ESC_CLK_DIV(port), + esc_clk_div_m & ICL_ESC_CLK_DIV_MASK); + POSTING_READ(ICL_DPHY_ESC_CLK_DIV(port)); + } +} + +static void gen11_dsi_enable_io_power(struct intel_encoder *encoder) +{ + struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); + enum port port; + u32 tmp; + + for_each_dsi_port(port, intel_dsi->ports) { + tmp = I915_READ(ICL_DSI_IO_MODECTL(port)); + tmp |= COMBO_PHY_MODE_DSI; + I915_WRITE(ICL_DSI_IO_MODECTL(port), tmp); + } + + for_each_dsi_port(port, intel_dsi->ports) { + intel_display_power_get(dev_priv, port == PORT_A ? + POWER_DOMAIN_PORT_DDI_A_IO : + POWER_DOMAIN_PORT_DDI_B_IO); + } +} + +static void gen11_dsi_power_up_lanes(struct intel_encoder *encoder) +{ + struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); + enum port port; + u32 tmp; + u32 lane_mask; + + switch (intel_dsi->lane_count) { + case 1: + lane_mask = PWR_DOWN_LN_3_1_0; + break; + case 2: + lane_mask = PWR_DOWN_LN_3_1; + break; + case 3: + lane_mask = PWR_DOWN_LN_3; + break; + case 4: + default: + lane_mask = PWR_UP_ALL_LANES; + break; + } + + for_each_dsi_port(port, intel_dsi->ports) { + tmp = I915_READ(ICL_PORT_CL_DW10(port)); + tmp &= ~PWR_DOWN_LN_MASK; + I915_WRITE(ICL_PORT_CL_DW10(port), tmp | lane_mask); + } +} + +static void gen11_dsi_enable_port_and_phy(struct intel_encoder *encoder) +{ + /* step 4a: power up all lanes of the DDI used by DSI */ + gen11_dsi_power_up_lanes(encoder); +} + +static void __attribute__((unused)) +gen11_dsi_pre_enable(struct intel_encoder *encoder, + const struct intel_crtc_state *pipe_config, + const struct drm_connector_state *conn_state) +{ + /* step2: enable IO power */ + gen11_dsi_enable_io_power(encoder); + + /* step3: enable DSI PLL */ + gen11_dsi_program_esc_clk_div(encoder); + + /* step4: enable DSI port and DPHY */ + gen11_dsi_enable_port_and_phy(encoder); +} diff --git a/drivers/gpu/drm/i915/intel_breadcrumbs.c b/drivers/gpu/drm/i915/intel_breadcrumbs.c index 86a987b8ac66..1db6ba7d926e 100644 --- a/drivers/gpu/drm/i915/intel_breadcrumbs.c +++ b/drivers/gpu/drm/i915/intel_breadcrumbs.c @@ -98,12 +98,14 @@ static void intel_breadcrumbs_hangcheck(struct timer_list *t) struct intel_engine_cs *engine = from_timer(engine, t, breadcrumbs.hangcheck); struct intel_breadcrumbs *b = &engine->breadcrumbs; + unsigned int irq_count; if (!b->irq_armed) return; - if (b->hangcheck_interrupts != atomic_read(&engine->irq_count)) { - b->hangcheck_interrupts = atomic_read(&engine->irq_count); + irq_count = READ_ONCE(b->irq_count); + if (b->hangcheck_interrupts != irq_count) { + b->hangcheck_interrupts = irq_count; mod_timer(&b->hangcheck, wait_timeout()); return; } @@ -272,13 +274,14 @@ static bool use_fake_irq(const struct intel_breadcrumbs *b) if (!test_bit(engine->id, &engine->i915->gpu_error.missed_irq_rings)) return false; - /* Only start with the heavy weight fake irq timer if we have not + /* + * Only start with the heavy weight fake irq timer if we have not * seen any interrupts since enabling it the first time. If the * interrupts are still arriving, it means we made a mistake in our * engine->seqno_barrier(), a timing error that should be transient * and unlikely to reoccur. */ - return atomic_read(&engine->irq_count) == b->hangcheck_interrupts; + return READ_ONCE(b->irq_count) == b->hangcheck_interrupts; } static void enable_fake_irq(struct intel_breadcrumbs *b) diff --git a/drivers/gpu/drm/i915/intel_cdclk.c b/drivers/gpu/drm/i915/intel_cdclk.c index bf9433d7964d..29075c763428 100644 --- a/drivers/gpu/drm/i915/intel_cdclk.c +++ b/drivers/gpu/drm/i915/intel_cdclk.c @@ -316,6 +316,7 @@ static void pnv_get_cdclk(struct drm_i915_private *dev_priv, break; default: DRM_ERROR("Unknown pnv display core clock 0x%04x\n", gcfgc); + /* fall through */ case GC_DISPLAY_CLOCK_133_MHZ_PNV: cdclk_state->cdclk = 133333; break; @@ -1797,6 +1798,7 @@ static int icl_calc_cdclk(int min_cdclk, unsigned int ref) switch (ref) { default: MISSING_CASE(ref); + /* fall through */ case 24000: ranges = ranges_24; break; @@ -1824,6 +1826,7 @@ static int icl_calc_cdclk_pll_vco(struct drm_i915_private *dev_priv, int cdclk) switch (cdclk) { default: MISSING_CASE(cdclk); + /* fall through */ case 307200: case 556800: case 652800: @@ -1896,6 +1899,7 @@ static u8 icl_calc_voltage_level(int cdclk) return 1; default: MISSING_CASE(cdclk); + /* fall through */ case 652800: case 648000: return 2; @@ -1913,6 +1917,7 @@ static void icl_get_cdclk(struct drm_i915_private *dev_priv, switch (val & ICL_DSSM_CDCLK_PLL_REFCLK_MASK) { default: MISSING_CASE(val); + /* fall through */ case ICL_DSSM_CDCLK_PLL_REFCLK_24MHz: cdclk_state->ref = 24000; break; diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c index 044fe1fb9872..39d66f8493fa 100644 --- a/drivers/gpu/drm/i915/intel_ddi.c +++ b/drivers/gpu/drm/i915/intel_ddi.c @@ -1069,6 +1069,7 @@ static uint32_t icl_pll_to_ddi_pll_sel(struct intel_encoder *encoder, switch (id) { default: MISSING_CASE(id); + /* fall through */ case DPLL_ID_ICL_DPLL0: case DPLL_ID_ICL_DPLL1: return DDI_CLK_SEL_NONE; @@ -1807,15 +1808,24 @@ void intel_ddi_enable_transcoder_func(const struct intel_crtc_state *crtc_state) I915_WRITE(TRANS_DDI_FUNC_CTL(cpu_transcoder), temp); } -void intel_ddi_disable_transcoder_func(struct drm_i915_private *dev_priv, - enum transcoder cpu_transcoder) +void intel_ddi_disable_transcoder_func(const struct intel_crtc_state *crtc_state) { + struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; i915_reg_t reg = TRANS_DDI_FUNC_CTL(cpu_transcoder); uint32_t val = I915_READ(reg); val &= ~(TRANS_DDI_FUNC_ENABLE | TRANS_DDI_PORT_MASK | TRANS_DDI_DP_VC_PAYLOAD_ALLOC); val |= TRANS_DDI_PORT_NONE; I915_WRITE(reg, val); + + if (dev_priv->quirks & QUIRK_INCREASE_DDI_DISABLED_TIME && + intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) { + DRM_DEBUG_KMS("Quirk Increase DDI disabled time\n"); + /* Quirk time at 100ms for reliable operation */ + msleep(100); + } } int intel_ddi_toggle_hdcp_signalling(struct intel_encoder *intel_encoder, @@ -1983,15 +1993,50 @@ out: return ret; } -static u64 intel_ddi_get_power_domains(struct intel_encoder *encoder) +static inline enum intel_display_power_domain +intel_ddi_main_link_aux_domain(struct intel_dp *intel_dp) +{ + /* CNL HW requires corresponding AUX IOs to be powered up for PSR with + * DC states enabled at the same time, while for driver initiated AUX + * transfers we need the same AUX IOs to be powered but with DC states + * disabled. Accordingly use the AUX power domain here which leaves DC + * states enabled. + * However, for non-A AUX ports the corresponding non-EDP transcoders + * would have already enabled power well 2 and DC_OFF. This means we can + * acquire a wider POWER_DOMAIN_AUX_{B,C,D,F} reference instead of a + * specific AUX_IO reference without powering up any extra wells. + * Note that PSR is enabled only on Port A even though this function + * returns the correct domain for other ports too. + */ + return intel_dp->aux_ch == AUX_CH_A ? POWER_DOMAIN_AUX_IO_A : + intel_dp->aux_power_domain; +} + +static u64 intel_ddi_get_power_domains(struct intel_encoder *encoder, + struct intel_crtc_state *crtc_state) { - struct intel_digital_port *dig_port = enc_to_dig_port(&encoder->base); - enum pipe pipe; + struct intel_digital_port *dig_port; + u64 domains; - if (intel_ddi_get_hw_state(encoder, &pipe)) - return BIT_ULL(dig_port->ddi_io_power_domain); + /* + * TODO: Add support for MST encoders. Atm, the following should never + * happen since fake-MST encoders don't set their get_power_domains() + * hook. + */ + if (WARN_ON(intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP_MST))) + return 0; - return 0; + dig_port = enc_to_dig_port(&encoder->base); + domains = BIT_ULL(dig_port->ddi_io_power_domain); + + /* AUX power is only needed for (e)DP mode, not for HDMI. */ + if (intel_crtc_has_dp_encoder(crtc_state)) { + struct intel_dp *intel_dp = &dig_port->dp; + + domains |= BIT_ULL(intel_ddi_main_link_aux_domain(intel_dp)); + } + + return domains; } void intel_ddi_enable_pipe_clock(const struct intel_crtc_state *crtc_state) @@ -2631,6 +2676,9 @@ static void intel_ddi_pre_enable_dp(struct intel_encoder *encoder, WARN_ON(is_mst && (port == PORT_A || port == PORT_E)); + intel_display_power_get(dev_priv, + intel_ddi_main_link_aux_domain(intel_dp)); + intel_dp_set_link_params(intel_dp, crtc_state->port_clock, crtc_state->lane_count, is_mst); @@ -2775,6 +2823,9 @@ static void intel_ddi_post_disable_dp(struct intel_encoder *encoder, intel_display_power_put(dev_priv, dig_port->ddi_io_power_domain); intel_ddi_clk_disable(encoder); + + intel_display_power_put(dev_priv, + intel_ddi_main_link_aux_domain(intel_dp)); } static void intel_ddi_post_disable_hdmi(struct intel_encoder *encoder, @@ -3588,7 +3639,6 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port) goto err; intel_dig_port->hpd_pulse = intel_dp_hpd_pulse; - dev_priv->hotplug.irq_port[port] = intel_dig_port; } /* In theory we don't need the encoder->type check, but leave it just in diff --git a/drivers/gpu/drm/i915/intel_device_info.c b/drivers/gpu/drm/i915/intel_device_info.c index 0fd13df424cf..0ef0c6448d53 100644 --- a/drivers/gpu/drm/i915/intel_device_info.c +++ b/drivers/gpu/drm/i915/intel_device_info.c @@ -858,6 +858,8 @@ void intel_device_info_runtime_init(struct intel_device_info *info) void intel_driver_caps_print(const struct intel_driver_caps *caps, struct drm_printer *p) { + drm_printf(p, "Has logical contexts? %s\n", + yesno(caps->has_logical_contexts)); drm_printf(p, "scheduler: %x\n", caps->scheduler); } diff --git a/drivers/gpu/drm/i915/intel_device_info.h b/drivers/gpu/drm/i915/intel_device_info.h index 933e31669557..633f9fbf72ea 100644 --- a/drivers/gpu/drm/i915/intel_device_info.h +++ b/drivers/gpu/drm/i915/intel_device_info.h @@ -186,6 +186,7 @@ struct intel_device_info { struct intel_driver_caps { unsigned int scheduler; + bool has_logical_contexts:1; }; static inline unsigned int sseu_subslice_total(const struct sseu_dev_info *sseu) diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 694a4703042f..87e4cfbfd096 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -3657,7 +3657,7 @@ u32 glk_plane_color_ctl(const struct intel_crtc_state *crtc_state, plane_color_ctl |= PLANE_COLOR_PLANE_GAMMA_DISABLE; plane_color_ctl |= glk_plane_color_ctl_alpha(fb->format->format); - if (intel_format_is_yuv(fb->format->format)) { + if (fb->format->is_yuv) { if (plane_state->base.color_encoding == DRM_COLOR_YCBCR_BT709) plane_color_ctl |= PLANE_COLOR_CSC_MODE_YUV709_TO_RGB709; else @@ -5632,6 +5632,7 @@ static void haswell_crtc_enable(struct intel_crtc_state *pipe_config, struct intel_atomic_state *old_intel_state = to_intel_atomic_state(old_state); bool psl_clkgate_wa; + u32 pipe_chicken; if (WARN_ON(intel_crtc->active)) return; @@ -5691,6 +5692,17 @@ static void haswell_crtc_enable(struct intel_crtc_state *pipe_config, */ intel_color_load_luts(&pipe_config->base); + /* + * Display WA #1153: enable hardware to bypass the alpha math + * and rounding for per-pixel values 00 and 0xff + */ + if (INTEL_GEN(dev_priv) >= 11) { + pipe_chicken = I915_READ(PIPE_CHICKEN(pipe)); + if (!(pipe_chicken & PER_PIXEL_ALPHA_BYPASS_EN)) + I915_WRITE_FW(PIPE_CHICKEN(pipe), + pipe_chicken | PER_PIXEL_ALPHA_BYPASS_EN); + } + intel_ddi_set_pipe_settings(pipe_config); if (!transcoder_is_dsi(cpu_transcoder)) intel_ddi_enable_transcoder_func(pipe_config); @@ -5825,7 +5837,7 @@ static void haswell_crtc_disable(struct intel_crtc_state *old_crtc_state, intel_ddi_set_vc_payload_alloc(old_crtc_state, false); if (!transcoder_is_dsi(cpu_transcoder)) - intel_ddi_disable_transcoder_func(dev_priv, cpu_transcoder); + intel_ddi_disable_transcoder_func(old_crtc_state); if (INTEL_GEN(dev_priv) >= 9) skylake_scaler_disable(intel_crtc); @@ -9347,6 +9359,7 @@ static bool hsw_get_transcoder_state(struct intel_crtc *crtc, switch (tmp & TRANS_DDI_EDP_INPUT_MASK) { default: WARN(1, "unknown pipe linked to edp transcoder\n"); + /* fall through */ case TRANS_DDI_EDP_INPUT_A_ONOFF: case TRANS_DDI_EDP_INPUT_A_ON: trans_edp_pipe = PIPE_A; @@ -9402,7 +9415,7 @@ static bool bxt_get_dsi_transcoder_state(struct intel_crtc *crtc, * registers/MIPI[BXT]. We can break out here early, since we * need the same DSI PLL to be enabled for both DSI ports. */ - if (!intel_dsi_pll_is_enabled(dev_priv)) + if (!bxt_dsi_pll_is_enabled(dev_priv)) break; /* XXX: this works for video mode only */ @@ -10724,7 +10737,7 @@ static void intel_modeset_update_connector_atomic_state(struct drm_device *dev) drm_connector_list_iter_begin(dev, &conn_iter); for_each_intel_connector_iter(connector, &conn_iter) { if (connector->base.state->crtc) - drm_connector_unreference(&connector->base); + drm_connector_put(&connector->base); if (connector->base.encoder) { connector->base.state->best_encoder = @@ -10732,7 +10745,7 @@ static void intel_modeset_update_connector_atomic_state(struct drm_device *dev) connector->base.state->crtc = connector->base.encoder->crtc; - drm_connector_reference(&connector->base); + drm_connector_get(&connector->base); } else { connector->base.state->best_encoder = NULL; connector->base.state->crtc = NULL; @@ -11011,6 +11024,7 @@ static bool check_digital_port_conflicts(struct drm_atomic_state *state) case INTEL_OUTPUT_DDI: if (WARN_ON(!HAS_DDI(to_i915(dev)))) break; + /* else: fall through */ case INTEL_OUTPUT_DP: case INTEL_OUTPUT_HDMI: case INTEL_OUTPUT_EDP: @@ -12542,6 +12556,19 @@ static void intel_atomic_commit_fence_wait(struct intel_atomic_state *intel_stat finish_wait(&dev_priv->gpu_error.wait_queue, &wait_reset); } +static void intel_atomic_cleanup_work(struct work_struct *work) +{ + struct drm_atomic_state *state = + container_of(work, struct drm_atomic_state, commit_work); + struct drm_i915_private *i915 = to_i915(state->dev); + + drm_atomic_helper_cleanup_planes(&i915->drm, state); + drm_atomic_helper_commit_cleanup_done(state); + drm_atomic_state_put(state); + + intel_atomic_helper_free_state(i915); +} + static void intel_atomic_commit_tail(struct drm_atomic_state *state) { struct drm_device *dev = state->dev; @@ -12702,13 +12729,16 @@ static void intel_atomic_commit_tail(struct drm_atomic_state *state) intel_display_power_put(dev_priv, POWER_DOMAIN_MODESET); } - drm_atomic_helper_cleanup_planes(dev, state); - - drm_atomic_helper_commit_cleanup_done(state); - - drm_atomic_state_put(state); - - intel_atomic_helper_free_state(dev_priv); + /* + * Defer the cleanup of the old state to a separate worker to not + * impede the current task (userspace for blocking modesets) that + * are executed inline. For out-of-line asynchronous modesets/flips, + * deferring to a new worker seems overkill, but we would place a + * schedule point (cond_resched()) here anyway to keep latencies + * down. + */ + INIT_WORK(&state->commit_work, intel_atomic_cleanup_work); + schedule_work(&state->commit_work); } static void intel_atomic_commit_work(struct work_struct *work) @@ -14105,7 +14135,7 @@ static void intel_setup_outputs(struct drm_i915_private *dev_priv) intel_ddi_init(dev_priv, PORT_B); intel_ddi_init(dev_priv, PORT_C); - intel_dsi_init(dev_priv); + vlv_dsi_init(dev_priv); } else if (HAS_DDI(dev_priv)) { int found; @@ -14211,7 +14241,7 @@ static void intel_setup_outputs(struct drm_i915_private *dev_priv) intel_hdmi_init(dev_priv, CHV_HDMID, PORT_D); } - intel_dsi_init(dev_priv); + vlv_dsi_init(dev_priv); } else if (!IS_GEN2(dev_priv) && !IS_PINEVIEW(dev_priv)) { bool found = false; @@ -14493,11 +14523,6 @@ static int intel_framebuffer_init(struct intel_framebuffer *intel_fb, } break; case DRM_FORMAT_NV12: - if (mode_cmd->modifier[0] == I915_FORMAT_MOD_Y_TILED_CCS || - mode_cmd->modifier[0] == I915_FORMAT_MOD_Yf_TILED_CCS) { - DRM_DEBUG_KMS("RC not to be enabled with NV12\n"); - goto err; - } if (INTEL_GEN(dev_priv) < 9 || IS_SKYLAKE(dev_priv) || IS_BROXTON(dev_priv)) { DRM_DEBUG_KMS("unsupported pixel format: %s\n", @@ -14824,6 +14849,18 @@ static void quirk_increase_t12_delay(struct drm_device *dev) DRM_INFO("Applying T12 delay quirk\n"); } +/* + * GeminiLake NUC HDMI outputs require additional off time + * this allows the onboard retimer to correctly sync to signal + */ +static void quirk_increase_ddi_disabled_time(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = to_i915(dev); + + dev_priv->quirks |= QUIRK_INCREASE_DDI_DISABLED_TIME; + DRM_INFO("Applying Increase DDI Disabled quirk\n"); +} + struct intel_quirk { int device; int subsystem_vendor; @@ -14910,6 +14947,13 @@ static struct intel_quirk intel_quirks[] = { /* Toshiba Satellite P50-C-18C */ { 0x191B, 0x1179, 0xF840, quirk_increase_t12_delay }, + + /* GeminiLake NUC */ + { 0x3185, 0x8086, 0x2072, quirk_increase_ddi_disabled_time }, + { 0x3184, 0x8086, 0x2072, quirk_increase_ddi_disabled_time }, + /* ASRock ITX*/ + { 0x3185, 0x1849, 0x2212, quirk_increase_ddi_disabled_time }, + { 0x3184, 0x1849, 0x2212, quirk_increase_ddi_disabled_time }, }; static void intel_init_quirks(struct drm_device *dev) @@ -15676,11 +15720,20 @@ get_encoder_power_domains(struct drm_i915_private *dev_priv) for_each_intel_encoder(&dev_priv->drm, encoder) { u64 get_domains; enum intel_display_power_domain domain; + struct intel_crtc_state *crtc_state; if (!encoder->get_power_domains) continue; - get_domains = encoder->get_power_domains(encoder); + /* + * MST-primary and inactive encoders don't have a crtc state + * and neither of these require any power domain references. + */ + if (!encoder->base.crtc) + continue; + + crtc_state = to_intel_crtc_state(encoder->base.crtc->state); + get_domains = encoder->get_power_domains(encoder, crtc_state); for_each_power_domain(domain, get_domains) intel_display_power_get(dev_priv, domain); } @@ -15856,6 +15909,8 @@ void intel_modeset_cleanup(struct drm_device *dev) { struct drm_i915_private *dev_priv = to_i915(dev); + flush_workqueue(dev_priv->modeset_wq); + flush_work(&dev_priv->atomic_helper.free_work); WARN_ON(!llist_empty(&dev_priv->atomic_helper.free_list)); @@ -15899,8 +15954,7 @@ void intel_connector_attach_encoder(struct intel_connector *connector, struct intel_encoder *encoder) { connector->encoder = encoder; - drm_mode_connector_attach_encoder(&connector->base, - &encoder->base); + drm_connector_attach_encoder(&connector->base, &encoder->base); } /* diff --git a/drivers/gpu/drm/i915/intel_display.h b/drivers/gpu/drm/i915/intel_display.h index dd30cae5eb00..9292001cdd14 100644 --- a/drivers/gpu/drm/i915/intel_display.h +++ b/drivers/gpu/drm/i915/intel_display.h @@ -199,6 +199,10 @@ enum intel_display_power_domain { POWER_DOMAIN_AUX_E, POWER_DOMAIN_AUX_F, POWER_DOMAIN_AUX_IO_A, + POWER_DOMAIN_AUX_TBT1, + POWER_DOMAIN_AUX_TBT2, + POWER_DOMAIN_AUX_TBT3, + POWER_DOMAIN_AUX_TBT4, POWER_DOMAIN_GMBUS, POWER_DOMAIN_MODESET, POWER_DOMAIN_GT_IRQ, @@ -285,6 +289,10 @@ struct intel_link_m_n { &(dev)->mode_config.encoder_list, \ base.head) +#define for_each_intel_dp(dev, intel_encoder) \ + for_each_intel_encoder(dev, intel_encoder) \ + for_each_if(intel_encoder_is_dp(intel_encoder)) + #define for_each_intel_connector_iter(intel_connector, iter) \ while ((intel_connector = to_intel_connector(drm_connector_list_iter_next(iter)))) diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c index 6ac6c8787dcf..cd0f649b57a5 100644 --- a/drivers/gpu/drm/i915/intel_dp.c +++ b/drivers/gpu/drm/i915/intel_dp.c @@ -600,14 +600,8 @@ static enum pipe vlv_find_free_pps(struct drm_i915_private *dev_priv) * We don't have power sequencer currently. * Pick one that's not used by other ports. */ - for_each_intel_encoder(&dev_priv->drm, encoder) { - struct intel_dp *intel_dp; - - if (encoder->type != INTEL_OUTPUT_DP && - encoder->type != INTEL_OUTPUT_EDP) - continue; - - intel_dp = enc_to_intel_dp(&encoder->base); + for_each_intel_dp(&dev_priv->drm, encoder) { + struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); if (encoder->type == INTEL_OUTPUT_EDP) { WARN_ON(intel_dp->active_pipe != INVALID_PIPE && @@ -799,19 +793,8 @@ void intel_power_sequencer_reset(struct drm_i915_private *dev_priv) * should use them always. */ - for_each_intel_encoder(&dev_priv->drm, encoder) { - struct intel_dp *intel_dp; - - if (encoder->type != INTEL_OUTPUT_DP && - encoder->type != INTEL_OUTPUT_EDP && - encoder->type != INTEL_OUTPUT_DDI) - continue; - - intel_dp = enc_to_intel_dp(&encoder->base); - - /* Skip pure DVI/HDMI DDI encoders */ - if (!i915_mmio_reg_valid(intel_dp->output_reg)) - continue; + for_each_intel_dp(&dev_priv->drm, encoder) { + struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); WARN_ON(intel_dp->active_pipe != INVALID_PIPE); @@ -953,7 +936,7 @@ intel_dp_check_edp(struct intel_dp *intel_dp) } static uint32_t -intel_dp_aux_wait_done(struct intel_dp *intel_dp, bool has_aux_irq) +intel_dp_aux_wait_done(struct intel_dp *intel_dp) { struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp)); i915_reg_t ch_ctl = intel_dp->aux_ch_ctl_reg(intel_dp); @@ -961,14 +944,10 @@ intel_dp_aux_wait_done(struct intel_dp *intel_dp, bool has_aux_irq) bool done; #define C (((status = I915_READ_NOTRACE(ch_ctl)) & DP_AUX_CH_CTL_SEND_BUSY) == 0) - if (has_aux_irq) - done = wait_event_timeout(dev_priv->gmbus_wait_queue, C, - msecs_to_jiffies_timeout(10)); - else - done = wait_for(C, 10) == 0; + done = wait_event_timeout(dev_priv->gmbus_wait_queue, C, + msecs_to_jiffies_timeout(10)); if (!done) - DRM_ERROR("dp aux hw did not signal timeout (has irq: %i)!\n", - has_aux_irq); + DRM_ERROR("dp aux hw did not signal timeout!\n"); #undef C return status; @@ -1033,7 +1012,6 @@ static uint32_t skl_get_aux_clock_divider(struct intel_dp *intel_dp, int index) } static uint32_t g4x_get_aux_send_ctl(struct intel_dp *intel_dp, - bool has_aux_irq, int send_bytes, uint32_t aux_clock_divider) { @@ -1054,7 +1032,7 @@ static uint32_t g4x_get_aux_send_ctl(struct intel_dp *intel_dp, return DP_AUX_CH_CTL_SEND_BUSY | DP_AUX_CH_CTL_DONE | - (has_aux_irq ? DP_AUX_CH_CTL_INTERRUPT : 0) | + DP_AUX_CH_CTL_INTERRUPT | DP_AUX_CH_CTL_TIME_OUT_ERROR | timeout | DP_AUX_CH_CTL_RECEIVE_ERROR | @@ -1064,13 +1042,12 @@ static uint32_t g4x_get_aux_send_ctl(struct intel_dp *intel_dp, } static uint32_t skl_get_aux_send_ctl(struct intel_dp *intel_dp, - bool has_aux_irq, int send_bytes, uint32_t unused) { return DP_AUX_CH_CTL_SEND_BUSY | DP_AUX_CH_CTL_DONE | - (has_aux_irq ? DP_AUX_CH_CTL_INTERRUPT : 0) | + DP_AUX_CH_CTL_INTERRUPT | DP_AUX_CH_CTL_TIME_OUT_ERROR | DP_AUX_CH_CTL_TIME_OUT_MAX | DP_AUX_CH_CTL_RECEIVE_ERROR | @@ -1093,7 +1070,6 @@ intel_dp_aux_xfer(struct intel_dp *intel_dp, int i, ret, recv_bytes; uint32_t status; int try, clock = 0; - bool has_aux_irq = HAS_AUX_IRQ(dev_priv); bool vdd; ch_ctl = intel_dp->aux_ch_ctl_reg(intel_dp); @@ -1148,7 +1124,6 @@ intel_dp_aux_xfer(struct intel_dp *intel_dp, while ((aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, clock++))) { u32 send_ctl = intel_dp->get_aux_send_ctl(intel_dp, - has_aux_irq, send_bytes, aux_clock_divider); @@ -1165,7 +1140,7 @@ intel_dp_aux_xfer(struct intel_dp *intel_dp, /* Send the command and wait for it to complete */ I915_WRITE(ch_ctl, send_ctl); - status = intel_dp_aux_wait_done(intel_dp, has_aux_irq); + status = intel_dp_aux_wait_done(intel_dp); /* Clear done status and any errors */ I915_WRITE(ch_ctl, @@ -2838,10 +2813,6 @@ static void vlv_disable_dp(struct intel_encoder *encoder, const struct intel_crtc_state *old_crtc_state, const struct drm_connector_state *old_conn_state) { - struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); - - intel_psr_disable(intel_dp, old_crtc_state); - intel_disable_dp(encoder, old_crtc_state, old_conn_state); } @@ -3054,10 +3025,7 @@ static void vlv_enable_dp(struct intel_encoder *encoder, const struct intel_crtc_state *pipe_config, const struct drm_connector_state *conn_state) { - struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); - intel_edp_backlight_on(pipe_config, conn_state); - intel_psr_enable(intel_dp, pipe_config); } static void g4x_pre_enable_dp(struct intel_encoder *encoder, @@ -3112,16 +3080,9 @@ static void vlv_steal_power_sequencer(struct drm_i915_private *dev_priv, lockdep_assert_held(&dev_priv->pps_mutex); - for_each_intel_encoder(&dev_priv->drm, encoder) { - struct intel_dp *intel_dp; - enum port port; - - if (encoder->type != INTEL_OUTPUT_DP && - encoder->type != INTEL_OUTPUT_EDP) - continue; - - intel_dp = enc_to_intel_dp(&encoder->base); - port = dp_to_dig_port(intel_dp)->base.port; + for_each_intel_dp(&dev_priv->drm, encoder) { + struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); + enum port port = encoder->port; WARN(intel_dp->active_pipe == pipe, "stealing pipe %c power sequencer from active (e)DP port %c\n", @@ -3913,129 +3874,6 @@ intel_dp_configure_mst(struct intel_dp *intel_dp) intel_dp->is_mst); } -static int intel_dp_sink_crc_stop(struct intel_dp *intel_dp, - struct intel_crtc_state *crtc_state, bool disable_wa) -{ - struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); - struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev); - struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc); - u8 buf; - int ret = 0; - int count = 0; - int attempts = 10; - - if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK, &buf) < 0) { - DRM_DEBUG_KMS("Sink CRC couldn't be stopped properly\n"); - ret = -EIO; - goto out; - } - - if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK, - buf & ~DP_TEST_SINK_START) < 0) { - DRM_DEBUG_KMS("Sink CRC couldn't be stopped properly\n"); - ret = -EIO; - goto out; - } - - do { - intel_wait_for_vblank(dev_priv, intel_crtc->pipe); - - if (drm_dp_dpcd_readb(&intel_dp->aux, - DP_TEST_SINK_MISC, &buf) < 0) { - ret = -EIO; - goto out; - } - count = buf & DP_TEST_COUNT_MASK; - } while (--attempts && count); - - if (attempts == 0) { - DRM_DEBUG_KMS("TIMEOUT: Sink CRC counter is not zeroed after calculation is stopped\n"); - ret = -ETIMEDOUT; - } - - out: - if (disable_wa) - hsw_enable_ips(crtc_state); - return ret; -} - -static int intel_dp_sink_crc_start(struct intel_dp *intel_dp, - struct intel_crtc_state *crtc_state) -{ - struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); - struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev); - struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc); - u8 buf; - int ret; - - if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK_MISC, &buf) < 0) - return -EIO; - - if (!(buf & DP_TEST_CRC_SUPPORTED)) - return -ENOTTY; - - if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK, &buf) < 0) - return -EIO; - - if (buf & DP_TEST_SINK_START) { - ret = intel_dp_sink_crc_stop(intel_dp, crtc_state, false); - if (ret) - return ret; - } - - hsw_disable_ips(crtc_state); - - if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK, - buf | DP_TEST_SINK_START) < 0) { - hsw_enable_ips(crtc_state); - return -EIO; - } - - intel_wait_for_vblank(dev_priv, intel_crtc->pipe); - return 0; -} - -int intel_dp_sink_crc(struct intel_dp *intel_dp, struct intel_crtc_state *crtc_state, u8 *crc) -{ - struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); - struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev); - struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc); - u8 buf; - int count, ret; - int attempts = 6; - - ret = intel_dp_sink_crc_start(intel_dp, crtc_state); - if (ret) - return ret; - - do { - intel_wait_for_vblank(dev_priv, intel_crtc->pipe); - - if (drm_dp_dpcd_readb(&intel_dp->aux, - DP_TEST_SINK_MISC, &buf) < 0) { - ret = -EIO; - goto stop; - } - count = buf & DP_TEST_COUNT_MASK; - - } while (--attempts && count == 0); - - if (attempts == 0) { - DRM_ERROR("Panel is unable to calculate any CRC after 6 vblanks\n"); - ret = -ETIMEDOUT; - goto stop; - } - - if (drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_CRC_R_CR, crc, 6) < 0) { - ret = -EIO; - goto stop; - } - -stop: - intel_dp_sink_crc_stop(intel_dp, crtc_state, true); - return ret; -} - static bool intel_dp_get_sink_irq(struct intel_dp *intel_dp, u8 *sink_irq_vector) { @@ -4495,10 +4333,15 @@ intel_dp_short_pulse(struct intel_dp *intel_dp) DRM_DEBUG_DRIVER("CP or sink specific irq unhandled\n"); } + /* Handle CEC interrupts, if any */ + drm_dp_cec_irq(&intel_dp->aux); + /* defer to the hotplug work for link retraining if needed */ if (intel_dp_needs_link_retrain(intel_dp)) return false; + intel_psr_short_pulse(intel_dp); + if (intel_dp->compliance.test_type == DP_TEST_LINK_TRAINING) { DRM_DEBUG_KMS("Link Training Compliance Test requested\n"); /* Send a Hotplug Uevent to userspace to start modeset */ @@ -4566,14 +4409,7 @@ intel_dp_detect_dpcd(struct intel_dp *intel_dp) static enum drm_connector_status edp_detect(struct intel_dp *intel_dp) { - struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp)); - enum drm_connector_status status; - - status = intel_panel_detect(dev_priv); - if (status == connector_status_unknown) - status = connector_status_connected; - - return status; + return connector_status_connected; } static bool ibx_digital_port_connected(struct intel_encoder *encoder) @@ -4809,6 +4645,7 @@ intel_dp_set_edid(struct intel_dp *intel_dp) intel_connector->detect_edid = edid; intel_dp->has_audio = drm_detect_monitor_audio(edid); + drm_dp_cec_set_edid(&intel_dp->aux, edid); } static void @@ -4816,6 +4653,7 @@ intel_dp_unset_edid(struct intel_dp *intel_dp) { struct intel_connector *intel_connector = intel_dp->attached_connector; + drm_dp_cec_unset_edid(&intel_dp->aux); kfree(intel_connector->detect_edid); intel_connector->detect_edid = NULL; @@ -4834,7 +4672,7 @@ intel_dp_long_pulse(struct intel_connector *connector) intel_display_power_get(dev_priv, intel_dp->aux_power_domain); - /* Can't disconnect eDP, but you can close the lid... */ + /* Can't disconnect eDP */ if (intel_dp_is_edp(intel_dp)) status = edp_detect(intel_dp); else if (intel_digital_port_connected(&dp_to_dig_port(intel_dp)->base)) @@ -5004,6 +4842,7 @@ static int intel_dp_connector_register(struct drm_connector *connector) { struct intel_dp *intel_dp = intel_attached_dp(connector); + struct drm_device *dev = connector->dev; int ret; ret = intel_connector_register(connector); @@ -5016,13 +4855,20 @@ intel_dp_connector_register(struct drm_connector *connector) intel_dp->aux.name, connector->kdev->kobj.name); intel_dp->aux.dev = connector->kdev; - return drm_dp_aux_register(&intel_dp->aux); + ret = drm_dp_aux_register(&intel_dp->aux); + if (!ret) + drm_dp_cec_register_connector(&intel_dp->aux, + connector->name, dev->dev); + return ret; } static void intel_dp_connector_unregister(struct drm_connector *connector) { - drm_dp_aux_unregister(&intel_attached_dp(connector)->aux); + struct intel_dp *intel_dp = intel_attached_dp(connector); + + drm_dp_cec_unregister_connector(&intel_dp->aux); + drm_dp_aux_unregister(&intel_dp->aux); intel_connector_unregister(connector); } @@ -6218,7 +6064,7 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp, edid = drm_get_edid(connector, &intel_dp->aux.ddc); if (edid) { if (drm_add_edid_modes(connector, edid)) { - drm_mode_connector_update_edid_property(connector, + drm_connector_update_edid_property(connector, edid); } else { kfree(edid); @@ -6307,8 +6153,8 @@ static void intel_dp_modeset_retry_work_fn(struct work_struct *work) /* Set connector link status to BAD and send a Uevent to notify * userspace to do a modeset. */ - drm_mode_connector_set_link_status_property(connector, - DRM_MODE_LINK_STATUS_BAD); + drm_connector_set_link_status_property(connector, + DRM_MODE_LINK_STATUS_BAD); mutex_unlock(&connector->dev->mode_config.mutex); /* Send Hotplug uevent so userspace can reprobe */ drm_kms_helper_hotplug_event(connector->dev); @@ -6501,7 +6347,6 @@ bool intel_dp_init(struct drm_i915_private *dev_priv, intel_encoder->port = port; intel_dig_port->hpd_pulse = intel_dp_hpd_pulse; - dev_priv->hotplug.irq_port[port] = intel_dig_port; if (port != PORT_A) intel_infoframe_init(intel_dig_port); @@ -6520,37 +6365,44 @@ err_connector_alloc: return false; } -void intel_dp_mst_suspend(struct drm_device *dev) +void intel_dp_mst_suspend(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = to_i915(dev); - int i; + struct intel_encoder *encoder; - /* disable MST */ - for (i = 0; i < I915_MAX_PORTS; i++) { - struct intel_digital_port *intel_dig_port = dev_priv->hotplug.irq_port[i]; + for_each_intel_encoder(&dev_priv->drm, encoder) { + struct intel_dp *intel_dp; + + if (encoder->type != INTEL_OUTPUT_DDI) + continue; - if (!intel_dig_port || !intel_dig_port->dp.can_mst) + intel_dp = enc_to_intel_dp(&encoder->base); + + if (!intel_dp->can_mst) continue; - if (intel_dig_port->dp.is_mst) - drm_dp_mst_topology_mgr_suspend(&intel_dig_port->dp.mst_mgr); + if (intel_dp->is_mst) + drm_dp_mst_topology_mgr_suspend(&intel_dp->mst_mgr); } } -void intel_dp_mst_resume(struct drm_device *dev) +void intel_dp_mst_resume(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = to_i915(dev); - int i; + struct intel_encoder *encoder; - for (i = 0; i < I915_MAX_PORTS; i++) { - struct intel_digital_port *intel_dig_port = dev_priv->hotplug.irq_port[i]; + for_each_intel_encoder(&dev_priv->drm, encoder) { + struct intel_dp *intel_dp; int ret; - if (!intel_dig_port || !intel_dig_port->dp.can_mst) + if (encoder->type != INTEL_OUTPUT_DDI) + continue; + + intel_dp = enc_to_intel_dp(&encoder->base); + + if (!intel_dp->can_mst) continue; - ret = drm_dp_mst_topology_mgr_resume(&intel_dig_port->dp.mst_mgr); + ret = drm_dp_mst_topology_mgr_resume(&intel_dp->mst_mgr); if (ret) - intel_dp_check_mst_status(&intel_dig_port->dp); + intel_dp_check_mst_status(intel_dp); } } diff --git a/drivers/gpu/drm/i915/intel_dp_mst.c b/drivers/gpu/drm/i915/intel_dp_mst.c index 5890500a3a8b..7e3e01607643 100644 --- a/drivers/gpu/drm/i915/intel_dp_mst.c +++ b/drivers/gpu/drm/i915/intel_dp_mst.c @@ -403,20 +403,10 @@ static struct drm_encoder *intel_mst_atomic_best_encoder(struct drm_connector *c return &intel_dp->mst_encoders[crtc->pipe]->base.base; } -static struct drm_encoder *intel_mst_best_encoder(struct drm_connector *connector) -{ - struct intel_connector *intel_connector = to_intel_connector(connector); - struct intel_dp *intel_dp = intel_connector->mst_port; - if (!intel_dp) - return NULL; - return &intel_dp->mst_encoders[0]->base.base; -} - static const struct drm_connector_helper_funcs intel_dp_mst_connector_helper_funcs = { .get_modes = intel_dp_mst_get_modes, .mode_valid = intel_dp_mst_mode_valid, .atomic_best_encoder = intel_mst_atomic_best_encoder, - .best_encoder = intel_mst_best_encoder, .atomic_check = intel_dp_mst_atomic_check, }; @@ -476,8 +466,7 @@ static struct drm_connector *intel_dp_add_mst_connector(struct drm_dp_mst_topolo struct drm_encoder *enc = &intel_dp->mst_encoders[pipe]->base.base; - ret = drm_mode_connector_attach_encoder(&intel_connector->base, - enc); + ret = drm_connector_attach_encoder(&intel_connector->base, enc); if (ret) goto err; } @@ -485,7 +474,7 @@ static struct drm_connector *intel_dp_add_mst_connector(struct drm_dp_mst_topolo drm_object_attach_property(&connector->base, dev->mode_config.path_property, 0); drm_object_attach_property(&connector->base, dev->mode_config.tile_property, 0); - ret = drm_mode_connector_set_path_property(connector, pathprop); + ret = drm_connector_set_path_property(connector, pathprop); if (ret) goto err; @@ -524,7 +513,7 @@ static void intel_dp_destroy_mst_connector(struct drm_dp_mst_topology_mgr *mgr, intel_connector->mst_port = NULL; drm_modeset_unlock(&connector->dev->mode_config.connection_mutex); - drm_connector_unreference(connector); + drm_connector_put(connector); } static void intel_dp_mst_hotplug(struct drm_dp_mst_topology_mgr *mgr) diff --git a/drivers/gpu/drm/i915/intel_dpll_mgr.c b/drivers/gpu/drm/i915/intel_dpll_mgr.c index 156f8e4cbe4c..b51ad2917dbe 100644 --- a/drivers/gpu/drm/i915/intel_dpll_mgr.c +++ b/drivers/gpu/drm/i915/intel_dpll_mgr.c @@ -2566,6 +2566,7 @@ int icl_calc_dp_combo_pll_link(struct drm_i915_private *dev_priv, switch (index) { default: MISSING_CASE(index); + /* fall through */ case 0: link_clock = 540000; break; @@ -2639,6 +2640,7 @@ static bool icl_mg_pll_find_divisors(int clock_khz, bool is_dp, bool use_ssc, switch (div1) { default: MISSING_CASE(div1); + /* fall through */ case 2: hsdiv = 0; break; @@ -2812,25 +2814,31 @@ static bool icl_calc_mg_pll_state(struct intel_crtc_state *crtc_state, MG_PLL_SSC_FLLEN | MG_PLL_SSC_STEPSIZE(ssc_stepsize); - pll_state->mg_pll_tdc_coldst_bias = MG_PLL_TDC_COLDST_COLDSTART; - - if (refclk_khz != 38400) { - pll_state->mg_pll_tdc_coldst_bias |= - MG_PLL_TDC_COLDST_IREFINT_EN | - MG_PLL_TDC_COLDST_REFBIAS_START_PULSE_W(iref_pulse_w) | - MG_PLL_TDC_COLDST_COLDSTART | - MG_PLL_TDC_TDCOVCCORR_EN | - MG_PLL_TDC_TDCSEL(3); - - pll_state->mg_pll_bias = MG_PLL_BIAS_BIAS_GB_SEL(3) | - MG_PLL_BIAS_INIT_DCOAMP(0x3F) | - MG_PLL_BIAS_BIAS_BONUS(10) | - MG_PLL_BIAS_BIASCAL_EN | - MG_PLL_BIAS_CTRIM(12) | - MG_PLL_BIAS_VREF_RDAC(4) | - MG_PLL_BIAS_IREFTRIM(iref_trim); + pll_state->mg_pll_tdc_coldst_bias = MG_PLL_TDC_COLDST_COLDSTART | + MG_PLL_TDC_COLDST_IREFINT_EN | + MG_PLL_TDC_COLDST_REFBIAS_START_PULSE_W(iref_pulse_w) | + MG_PLL_TDC_TDCOVCCORR_EN | + MG_PLL_TDC_TDCSEL(3); + + pll_state->mg_pll_bias = MG_PLL_BIAS_BIAS_GB_SEL(3) | + MG_PLL_BIAS_INIT_DCOAMP(0x3F) | + MG_PLL_BIAS_BIAS_BONUS(10) | + MG_PLL_BIAS_BIASCAL_EN | + MG_PLL_BIAS_CTRIM(12) | + MG_PLL_BIAS_VREF_RDAC(4) | + MG_PLL_BIAS_IREFTRIM(iref_trim); + + if (refclk_khz == 38400) { + pll_state->mg_pll_tdc_coldst_bias_mask = MG_PLL_TDC_COLDST_COLDSTART; + pll_state->mg_pll_bias_mask = 0; + } else { + pll_state->mg_pll_tdc_coldst_bias_mask = -1U; + pll_state->mg_pll_bias_mask = -1U; } + pll_state->mg_pll_tdc_coldst_bias &= pll_state->mg_pll_tdc_coldst_bias_mask; + pll_state->mg_pll_bias &= pll_state->mg_pll_bias_mask; + return true; } @@ -2897,6 +2905,7 @@ static i915_reg_t icl_pll_id_to_enable_reg(enum intel_dpll_id id) switch (id) { default: MISSING_CASE(id); + /* fall through */ case DPLL_ID_ICL_DPLL0: case DPLL_ID_ICL_DPLL1: return CNL_DPLL_ENABLE(id); @@ -2939,18 +2948,41 @@ static bool icl_pll_get_hw_state(struct drm_i915_private *dev_priv, case DPLL_ID_ICL_MGPLL4: port = icl_mg_pll_id_to_port(id); hw_state->mg_refclkin_ctl = I915_READ(MG_REFCLKIN_CTL(port)); + hw_state->mg_refclkin_ctl &= MG_REFCLKIN_CTL_OD_2_MUX_MASK; + hw_state->mg_clktop2_coreclkctl1 = I915_READ(MG_CLKTOP2_CORECLKCTL1(port)); + hw_state->mg_clktop2_coreclkctl1 &= + MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO_MASK; + hw_state->mg_clktop2_hsclkctl = I915_READ(MG_CLKTOP2_HSCLKCTL(port)); + hw_state->mg_clktop2_hsclkctl &= + MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL_MASK | + MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL_MASK | + MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK | + MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK; + hw_state->mg_pll_div0 = I915_READ(MG_PLL_DIV0(port)); hw_state->mg_pll_div1 = I915_READ(MG_PLL_DIV1(port)); hw_state->mg_pll_lf = I915_READ(MG_PLL_LF(port)); hw_state->mg_pll_frac_lock = I915_READ(MG_PLL_FRAC_LOCK(port)); hw_state->mg_pll_ssc = I915_READ(MG_PLL_SSC(port)); + hw_state->mg_pll_bias = I915_READ(MG_PLL_BIAS(port)); hw_state->mg_pll_tdc_coldst_bias = I915_READ(MG_PLL_TDC_COLDST_BIAS(port)); + + if (dev_priv->cdclk.hw.ref == 38400) { + hw_state->mg_pll_tdc_coldst_bias_mask = MG_PLL_TDC_COLDST_COLDSTART; + hw_state->mg_pll_bias_mask = 0; + } else { + hw_state->mg_pll_tdc_coldst_bias_mask = -1U; + hw_state->mg_pll_bias_mask = -1U; + } + + hw_state->mg_pll_tdc_coldst_bias &= hw_state->mg_pll_tdc_coldst_bias_mask; + hw_state->mg_pll_bias &= hw_state->mg_pll_bias_mask; break; default: MISSING_CASE(id); @@ -2978,19 +3010,48 @@ static void icl_mg_pll_write(struct drm_i915_private *dev_priv, { struct intel_dpll_hw_state *hw_state = &pll->state.hw_state; enum port port = icl_mg_pll_id_to_port(pll->info->id); + u32 val; + + /* + * Some of the following registers have reserved fields, so program + * these with RMW based on a mask. The mask can be fixed or generated + * during the calc/readout phase if the mask depends on some other HW + * state like refclk, see icl_calc_mg_pll_state(). + */ + val = I915_READ(MG_REFCLKIN_CTL(port)); + val &= ~MG_REFCLKIN_CTL_OD_2_MUX_MASK; + val |= hw_state->mg_refclkin_ctl; + I915_WRITE(MG_REFCLKIN_CTL(port), val); + + val = I915_READ(MG_CLKTOP2_CORECLKCTL1(port)); + val &= ~MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO_MASK; + val |= hw_state->mg_clktop2_coreclkctl1; + I915_WRITE(MG_CLKTOP2_CORECLKCTL1(port), val); + + val = I915_READ(MG_CLKTOP2_HSCLKCTL(port)); + val &= ~(MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL_MASK | + MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL_MASK | + MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK | + MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK); + val |= hw_state->mg_clktop2_hsclkctl; + I915_WRITE(MG_CLKTOP2_HSCLKCTL(port), val); - I915_WRITE(MG_REFCLKIN_CTL(port), hw_state->mg_refclkin_ctl); - I915_WRITE(MG_CLKTOP2_CORECLKCTL1(port), - hw_state->mg_clktop2_coreclkctl1); - I915_WRITE(MG_CLKTOP2_HSCLKCTL(port), hw_state->mg_clktop2_hsclkctl); I915_WRITE(MG_PLL_DIV0(port), hw_state->mg_pll_div0); I915_WRITE(MG_PLL_DIV1(port), hw_state->mg_pll_div1); I915_WRITE(MG_PLL_LF(port), hw_state->mg_pll_lf); I915_WRITE(MG_PLL_FRAC_LOCK(port), hw_state->mg_pll_frac_lock); I915_WRITE(MG_PLL_SSC(port), hw_state->mg_pll_ssc); - I915_WRITE(MG_PLL_BIAS(port), hw_state->mg_pll_bias); - I915_WRITE(MG_PLL_TDC_COLDST_BIAS(port), - hw_state->mg_pll_tdc_coldst_bias); + + val = I915_READ(MG_PLL_BIAS(port)); + val &= ~hw_state->mg_pll_bias_mask; + val |= hw_state->mg_pll_bias; + I915_WRITE(MG_PLL_BIAS(port), val); + + val = I915_READ(MG_PLL_TDC_COLDST_BIAS(port)); + val &= ~hw_state->mg_pll_tdc_coldst_bias_mask; + val |= hw_state->mg_pll_tdc_coldst_bias; + I915_WRITE(MG_PLL_TDC_COLDST_BIAS(port), val); + POSTING_READ(MG_PLL_TDC_COLDST_BIAS(port)); } diff --git a/drivers/gpu/drm/i915/intel_dpll_mgr.h b/drivers/gpu/drm/i915/intel_dpll_mgr.h index ba925c7ee482..7e522cf4f13f 100644 --- a/drivers/gpu/drm/i915/intel_dpll_mgr.h +++ b/drivers/gpu/drm/i915/intel_dpll_mgr.h @@ -180,6 +180,8 @@ struct intel_dpll_hw_state { uint32_t mg_pll_ssc; uint32_t mg_pll_bias; uint32_t mg_pll_tdc_coldst_bias; + uint32_t mg_pll_bias_mask; + uint32_t mg_pll_tdc_coldst_bias_mask; }; /** diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index 0c3ac0eafde0..c275f91244a6 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h @@ -254,7 +254,8 @@ struct intel_encoder { struct intel_crtc_state *pipe_config); /* Returns a mask of power domains that need to be referenced as part * of the hardware state readout code. */ - u64 (*get_power_domains)(struct intel_encoder *encoder); + u64 (*get_power_domains)(struct intel_encoder *encoder, + struct intel_crtc_state *crtc_state); /* * Called during system suspend after all pending requests for the * encoder are flushed (for example for DP AUX transactions) and @@ -303,6 +304,8 @@ struct intel_panel { } backlight; }; +struct intel_digital_port; + /* * This structure serves as a translation layer between the generic HDCP code * and the bus-specific code. What that means is that HDCP over HDMI differs @@ -1133,7 +1136,6 @@ struct intel_dp { * register with to kick off an AUX transaction. */ uint32_t (*get_aux_send_ctl)(struct intel_dp *dp, - bool has_aux_irq, int send_bytes, uint32_t aux_clock_divider); @@ -1246,22 +1248,29 @@ intel_attached_encoder(struct drm_connector *connector) return to_intel_connector(connector)->encoder; } -static inline struct intel_digital_port * -enc_to_dig_port(struct drm_encoder *encoder) +static inline bool intel_encoder_is_dig_port(struct intel_encoder *encoder) { - struct intel_encoder *intel_encoder = to_intel_encoder(encoder); - - switch (intel_encoder->type) { + switch (encoder->type) { case INTEL_OUTPUT_DDI: - WARN_ON(!HAS_DDI(to_i915(encoder->dev))); case INTEL_OUTPUT_DP: case INTEL_OUTPUT_EDP: case INTEL_OUTPUT_HDMI: + return true; + default: + return false; + } +} + +static inline struct intel_digital_port * +enc_to_dig_port(struct drm_encoder *encoder) +{ + struct intel_encoder *intel_encoder = to_intel_encoder(encoder); + + if (intel_encoder_is_dig_port(intel_encoder)) return container_of(encoder, struct intel_digital_port, base.base); - default: + else return NULL; - } } static inline struct intel_dp_mst_encoder * @@ -1275,6 +1284,20 @@ static inline struct intel_dp *enc_to_intel_dp(struct drm_encoder *encoder) return &enc_to_dig_port(encoder)->dp; } +static inline bool intel_encoder_is_dp(struct intel_encoder *encoder) +{ + switch (encoder->type) { + case INTEL_OUTPUT_DP: + case INTEL_OUTPUT_EDP: + return true; + case INTEL_OUTPUT_DDI: + /* Skip pure HDMI/DVI DDI encoders */ + return i915_mmio_reg_valid(enc_to_intel_dp(&encoder->base)->output_reg); + default: + return false; + } +} + static inline struct intel_digital_port * dp_to_dig_port(struct intel_dp *intel_dp) { @@ -1331,9 +1354,6 @@ void intel_check_cpu_fifo_underruns(struct drm_i915_private *dev_priv); void intel_check_pch_fifo_underruns(struct drm_i915_private *dev_priv); /* i915_irq.c */ -bool gen11_reset_one_iir(struct drm_i915_private * const i915, - const unsigned int bank, - const unsigned int bit); void gen5_enable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask); void gen5_disable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask); void gen6_mask_pm_irq(struct drm_i915_private *dev_priv, u32 mask); @@ -1384,8 +1404,7 @@ void hsw_fdi_link_train(struct intel_crtc *crtc, void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port); bool intel_ddi_get_hw_state(struct intel_encoder *encoder, enum pipe *pipe); void intel_ddi_enable_transcoder_func(const struct intel_crtc_state *crtc_state); -void intel_ddi_disable_transcoder_func(struct drm_i915_private *dev_priv, - enum transcoder cpu_transcoder); +void intel_ddi_disable_transcoder_func(const struct intel_crtc_state *crtc_state); void intel_ddi_enable_pipe_clock(const struct intel_crtc_state *crtc_state); void intel_ddi_disable_pipe_clock(const struct intel_crtc_state *crtc_state); void intel_ddi_set_pipe_settings(const struct intel_crtc_state *crtc_state); @@ -1664,8 +1683,6 @@ void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode); void intel_dp_encoder_reset(struct drm_encoder *encoder); void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder); void intel_dp_encoder_destroy(struct drm_encoder *encoder); -int intel_dp_sink_crc(struct intel_dp *intel_dp, - struct intel_crtc_state *crtc_state, u8 *crc); bool intel_dp_compute_config(struct intel_encoder *encoder, struct intel_crtc_state *pipe_config, struct drm_connector_state *conn_state); @@ -1679,8 +1696,8 @@ void intel_edp_backlight_off(const struct drm_connector_state *conn_state); void intel_edp_panel_vdd_on(struct intel_dp *intel_dp); void intel_edp_panel_on(struct intel_dp *intel_dp); void intel_edp_panel_off(struct intel_dp *intel_dp); -void intel_dp_mst_suspend(struct drm_device *dev); -void intel_dp_mst_resume(struct drm_device *dev); +void intel_dp_mst_suspend(struct drm_i915_private *dev_priv); +void intel_dp_mst_resume(struct drm_i915_private *dev_priv); int intel_dp_max_link_rate(struct intel_dp *intel_dp); int intel_dp_max_lane_count(struct intel_dp *intel_dp); int intel_dp_rate_select(struct intel_dp *intel_dp, int rate); @@ -1730,8 +1747,8 @@ int intel_dp_aux_init_backlight_funcs(struct intel_connector *intel_connector); /* intel_dp_mst.c */ int intel_dp_mst_encoder_init(struct intel_digital_port *intel_dig_port, int conn_id); void intel_dp_mst_encoder_cleanup(struct intel_digital_port *intel_dig_port); -/* intel_dsi.c */ -void intel_dsi_init(struct drm_i915_private *dev_priv); +/* vlv_dsi.c */ +void vlv_dsi_init(struct drm_i915_private *dev_priv); /* intel_dsi_dcs_backlight.c */ int intel_dsi_dcs_init_backlight_funcs(struct intel_connector *intel_connector); @@ -1873,7 +1890,6 @@ void intel_panel_enable_backlight(const struct intel_crtc_state *crtc_state, const struct drm_connector_state *conn_state); void intel_panel_disable_backlight(const struct drm_connector_state *old_conn_state); void intel_panel_destroy_backlight(struct drm_connector *connector); -enum drm_connector_status intel_panel_detect(struct drm_i915_private *dev_priv); extern struct drm_display_mode *intel_find_panel_downclock( struct drm_i915_private *dev_priv, struct drm_display_mode *fixed_mode, @@ -1921,6 +1937,8 @@ void intel_psr_compute_config(struct intel_dp *intel_dp, struct intel_crtc_state *crtc_state); void intel_psr_irq_control(struct drm_i915_private *dev_priv, bool debug); void intel_psr_irq_handler(struct drm_i915_private *dev_priv, u32 psr_iir); +void intel_psr_short_pulse(struct intel_dp *intel_dp); +int intel_psr_wait_for_idle(const struct intel_crtc_state *new_crtc_state); /* intel_runtime_pm.c */ int intel_power_domains_init(struct drm_i915_private *); @@ -2069,7 +2087,6 @@ bool intel_sdvo_init(struct drm_i915_private *dev_priv, /* intel_sprite.c */ -bool intel_format_is_yuv(u32 format); int intel_usecs_to_scanlines(const struct drm_display_mode *adjusted_mode, int usecs); struct intel_plane *intel_sprite_plane_create(struct drm_i915_private *dev_priv, @@ -2085,7 +2102,6 @@ void skl_disable_plane(struct intel_plane *plane, struct intel_crtc *crtc); bool skl_plane_get_hw_state(struct intel_plane *plane, enum pipe *pipe); bool skl_plane_has_ccs(struct drm_i915_private *dev_priv, enum pipe pipe, enum plane_id plane_id); -bool intel_format_is_yuv(uint32_t format); bool skl_plane_has_planar(struct drm_i915_private *dev_priv, enum pipe pipe, enum plane_id plane_id); @@ -2151,7 +2167,6 @@ void lspcon_resume(struct intel_lspcon *lspcon); void lspcon_wait_pcon_mode(struct intel_lspcon *lspcon); /* intel_pipe_crc.c */ -int intel_pipe_crc_create(struct drm_minor *minor); #ifdef CONFIG_DEBUG_FS int intel_crtc_set_crc_source(struct drm_crtc *crtc, const char *source_name, size_t *values_cnt); @@ -2167,5 +2182,4 @@ static inline void intel_crtc_enable_pipe_crc(struct intel_crtc *crtc) { } #endif -extern const struct file_operations i915_display_crc_ctl_fops; #endif /* __INTEL_DRV_H__ */ diff --git a/drivers/gpu/drm/i915/intel_dsi.h b/drivers/gpu/drm/i915/intel_dsi.h index 7afeb9580f41..ad7c1cb32983 100644 --- a/drivers/gpu/drm/i915/intel_dsi.h +++ b/drivers/gpu/drm/i915/intel_dsi.h @@ -129,21 +129,29 @@ static inline struct intel_dsi *enc_to_intel_dsi(struct drm_encoder *encoder) return container_of(encoder, struct intel_dsi, base.base); } -/* intel_dsi.c */ -void wait_for_dsi_fifo_empty(struct intel_dsi *intel_dsi, enum port port); +/* vlv_dsi.c */ +void vlv_dsi_wait_for_fifo_empty(struct intel_dsi *intel_dsi, enum port port); enum mipi_dsi_pixel_format pixel_format_from_register_bits(u32 fmt); -/* intel_dsi_pll.c */ -bool intel_dsi_pll_is_enabled(struct drm_i915_private *dev_priv); -int intel_compute_dsi_pll(struct intel_encoder *encoder, - struct intel_crtc_state *config); -void intel_enable_dsi_pll(struct intel_encoder *encoder, - const struct intel_crtc_state *config); -void intel_disable_dsi_pll(struct intel_encoder *encoder); -u32 intel_dsi_get_pclk(struct intel_encoder *encoder, int pipe_bpp, - struct intel_crtc_state *config); -void intel_dsi_reset_clocks(struct intel_encoder *encoder, - enum port port); +/* vlv_dsi_pll.c */ +int vlv_dsi_pll_compute(struct intel_encoder *encoder, + struct intel_crtc_state *config); +void vlv_dsi_pll_enable(struct intel_encoder *encoder, + const struct intel_crtc_state *config); +void vlv_dsi_pll_disable(struct intel_encoder *encoder); +u32 vlv_dsi_get_pclk(struct intel_encoder *encoder, int pipe_bpp, + struct intel_crtc_state *config); +void vlv_dsi_reset_clocks(struct intel_encoder *encoder, enum port port); + +bool bxt_dsi_pll_is_enabled(struct drm_i915_private *dev_priv); +int bxt_dsi_pll_compute(struct intel_encoder *encoder, + struct intel_crtc_state *config); +void bxt_dsi_pll_enable(struct intel_encoder *encoder, + const struct intel_crtc_state *config); +void bxt_dsi_pll_disable(struct intel_encoder *encoder); +u32 bxt_dsi_get_pclk(struct intel_encoder *encoder, int pipe_bpp, + struct intel_crtc_state *config); +void bxt_dsi_reset_clocks(struct intel_encoder *encoder, enum port port); /* intel_dsi_vbt.c */ bool intel_dsi_vbt_init(struct intel_dsi *intel_dsi, u16 panel_id); diff --git a/drivers/gpu/drm/i915/intel_dsi_vbt.c b/drivers/gpu/drm/i915/intel_dsi_vbt.c index 4d6ffa7b3e7b..ac83d6b89ae0 100644 --- a/drivers/gpu/drm/i915/intel_dsi_vbt.c +++ b/drivers/gpu/drm/i915/intel_dsi_vbt.c @@ -181,7 +181,7 @@ static const u8 *mipi_exec_send_packet(struct intel_dsi *intel_dsi, break; } - wait_for_dsi_fifo_empty(intel_dsi, port); + vlv_dsi_wait_for_fifo_empty(intel_dsi, port); out: data += len; diff --git a/drivers/gpu/drm/i915/intel_engine_cs.c b/drivers/gpu/drm/i915/intel_engine_cs.c index 32bf3a408d46..2d1952849d69 100644 --- a/drivers/gpu/drm/i915/intel_engine_cs.c +++ b/drivers/gpu/drm/i915/intel_engine_cs.c @@ -25,7 +25,6 @@ #include <drm/drm_print.h> #include "i915_drv.h" -#include "i915_vgpu.h" #include "intel_ringbuffer.h" #include "intel_lrc.h" @@ -230,6 +229,7 @@ __intel_engine_context_size(struct drm_i915_private *dev_priv, u8 class) break; default: MISSING_CASE(class); + /* fall through */ case VIDEO_DECODE_CLASS: case VIDEO_ENHANCEMENT_CLASS: case COPY_ENGINE_CLASS: @@ -302,6 +302,8 @@ intel_engine_setup(struct drm_i915_private *dev_priv, engine->class); if (WARN_ON(engine->context_size > BIT(20))) engine->context_size = 0; + if (engine->context_size) + DRIVER_CAPS(dev_priv)->has_logical_contexts = true; /* Nothing to do here, execute in order of dependencies */ engine->schedule = NULL; @@ -456,28 +458,16 @@ static void intel_engine_init_batch_pool(struct intel_engine_cs *engine) i915_gem_batch_pool_init(&engine->batch_pool, engine); } -static bool csb_force_mmio(struct drm_i915_private *i915) -{ - /* Older GVT emulation depends upon intercepting CSB mmio */ - if (intel_vgpu_active(i915) && !intel_vgpu_has_hwsp_emulation(i915)) - return true; - - return false; -} - static void intel_engine_init_execlist(struct intel_engine_cs *engine) { struct intel_engine_execlists * const execlists = &engine->execlists; - execlists->csb_use_mmio = csb_force_mmio(engine->i915); - execlists->port_mask = 1; BUILD_BUG_ON_NOT_POWER_OF_2(execlists_num_ports(execlists)); GEM_BUG_ON(execlists_num_ports(execlists) > EXECLIST_MAX_PORTS); execlists->queue_priority = INT_MIN; - execlists->queue = RB_ROOT; - execlists->first = NULL; + execlists->queue = RB_ROOT_CACHED; } /** @@ -492,6 +482,7 @@ static void intel_engine_init_execlist(struct intel_engine_cs *engine) void intel_engine_setup_common(struct intel_engine_cs *engine) { i915_timeline_init(engine->i915, &engine->timeline, engine->name); + lockdep_set_subclass(&engine->timeline.lock, TIMELINE_ENGINE); intel_engine_init_execlist(engine); intel_engine_init_hangcheck(engine); @@ -998,19 +989,23 @@ bool intel_engine_is_idle(struct intel_engine_cs *engine) /* Waiting to drain ELSP? */ if (READ_ONCE(engine->execlists.active)) { - struct intel_engine_execlists *execlists = &engine->execlists; - - if (tasklet_trylock(&execlists->tasklet)) { - execlists->tasklet.func(execlists->tasklet.data); - tasklet_unlock(&execlists->tasklet); + struct tasklet_struct *t = &engine->execlists.tasklet; + + local_bh_disable(); + if (tasklet_trylock(t)) { + /* Must wait for any GPU reset in progress. */ + if (__tasklet_is_enabled(t)) + t->func(t->data); + tasklet_unlock(t); } + local_bh_enable(); - if (READ_ONCE(execlists->active)) + if (READ_ONCE(engine->execlists.active)) return false; } /* ELSP is empty, but there are ready requests? E.g. after reset */ - if (READ_ONCE(engine->execlists.first)) + if (!RB_EMPTY_ROOT(&engine->execlists.queue.rb_root)) return false; /* Ring stopped? */ @@ -1363,12 +1358,10 @@ static void intel_engine_print_registers(const struct intel_engine_cs *engine, ptr = I915_READ(RING_CONTEXT_STATUS_PTR(engine)); read = GEN8_CSB_READ_PTR(ptr); write = GEN8_CSB_WRITE_PTR(ptr); - drm_printf(m, "\tExeclist CSB read %d [%d cached], write %d [%d from hws], interrupt posted? %s, tasklet queued? %s (%s)\n", + drm_printf(m, "\tExeclist CSB read %d [%d cached], write %d [%d from hws], tasklet queued? %s (%s)\n", read, execlists->csb_head, write, intel_read_status_page(engine, intel_hws_csb_write_index(engine->i915)), - yesno(test_bit(ENGINE_IRQ_EXECLIST, - &engine->irq_posted)), yesno(test_bit(TASKLET_STATE_SCHED, &engine->execlists.tasklet.state)), enableddisabled(!atomic_read(&engine->execlists.tasklet.count))); @@ -1548,7 +1541,7 @@ void intel_engine_dump(struct intel_engine_cs *engine, last = NULL; count = 0; drm_printf(m, "\t\tQueue priority: %d\n", execlists->queue_priority); - for (rb = execlists->first; rb; rb = rb_next(rb)) { + for (rb = rb_first_cached(&execlists->queue); rb; rb = rb_next(rb)) { struct i915_priolist *p = rb_entry(rb, typeof(*p), node); @@ -1580,11 +1573,9 @@ void intel_engine_dump(struct intel_engine_cs *engine, spin_unlock(&b->rb_lock); local_irq_restore(flags); - drm_printf(m, "IRQ? 0x%lx (breadcrumbs? %s) (execlists? %s)\n", + drm_printf(m, "IRQ? 0x%lx (breadcrumbs? %s)\n", engine->irq_posted, yesno(test_bit(ENGINE_IRQ_BREADCRUMB, - &engine->irq_posted)), - yesno(test_bit(ENGINE_IRQ_EXECLIST, &engine->irq_posted))); drm_printf(m, "HWSP:\n"); @@ -1633,8 +1624,8 @@ int intel_enable_engine_stats(struct intel_engine_cs *engine) if (!intel_engine_supports_stats(engine)) return -ENODEV; - tasklet_disable(&execlists->tasklet); - write_seqlock_irqsave(&engine->stats.lock, flags); + spin_lock_irqsave(&engine->timeline.lock, flags); + write_seqlock(&engine->stats.lock); if (unlikely(engine->stats.enabled == ~0)) { err = -EBUSY; @@ -1658,8 +1649,8 @@ int intel_enable_engine_stats(struct intel_engine_cs *engine) } unlock: - write_sequnlock_irqrestore(&engine->stats.lock, flags); - tasklet_enable(&execlists->tasklet); + write_sequnlock(&engine->stats.lock); + spin_unlock_irqrestore(&engine->timeline.lock, flags); return err; } diff --git a/drivers/gpu/drm/i915/intel_fbc.c b/drivers/gpu/drm/i915/intel_fbc.c index b431b6733cc1..01d1d2088f04 100644 --- a/drivers/gpu/drm/i915/intel_fbc.c +++ b/drivers/gpu/drm/i915/intel_fbc.c @@ -399,89 +399,6 @@ bool intel_fbc_is_active(struct drm_i915_private *dev_priv) return dev_priv->fbc.active; } -static void intel_fbc_work_fn(struct work_struct *__work) -{ - struct drm_i915_private *dev_priv = - container_of(__work, struct drm_i915_private, fbc.work.work); - struct intel_fbc *fbc = &dev_priv->fbc; - struct intel_fbc_work *work = &fbc->work; - struct intel_crtc *crtc = fbc->crtc; - struct drm_vblank_crtc *vblank = &dev_priv->drm.vblank[crtc->pipe]; - - if (drm_crtc_vblank_get(&crtc->base)) { - /* CRTC is now off, leave FBC deactivated */ - mutex_lock(&fbc->lock); - work->scheduled = false; - mutex_unlock(&fbc->lock); - return; - } - -retry: - /* Delay the actual enabling to let pageflipping cease and the - * display to settle before starting the compression. Note that - * this delay also serves a second purpose: it allows for a - * vblank to pass after disabling the FBC before we attempt - * to modify the control registers. - * - * WaFbcWaitForVBlankBeforeEnable:ilk,snb - * - * It is also worth mentioning that since work->scheduled_vblank can be - * updated multiple times by the other threads, hitting the timeout is - * not an error condition. We'll just end up hitting the "goto retry" - * case below. - */ - wait_event_timeout(vblank->queue, - drm_crtc_vblank_count(&crtc->base) != work->scheduled_vblank, - msecs_to_jiffies(50)); - - mutex_lock(&fbc->lock); - - /* Were we cancelled? */ - if (!work->scheduled) - goto out; - - /* Were we delayed again while this function was sleeping? */ - if (drm_crtc_vblank_count(&crtc->base) == work->scheduled_vblank) { - mutex_unlock(&fbc->lock); - goto retry; - } - - intel_fbc_hw_activate(dev_priv); - - work->scheduled = false; - -out: - mutex_unlock(&fbc->lock); - drm_crtc_vblank_put(&crtc->base); -} - -static void intel_fbc_schedule_activation(struct intel_crtc *crtc) -{ - struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); - struct intel_fbc *fbc = &dev_priv->fbc; - struct intel_fbc_work *work = &fbc->work; - - WARN_ON(!mutex_is_locked(&fbc->lock)); - if (WARN_ON(!fbc->enabled)) - return; - - if (drm_crtc_vblank_get(&crtc->base)) { - DRM_ERROR("vblank not available for FBC on pipe %c\n", - pipe_name(crtc->pipe)); - return; - } - - /* It is useless to call intel_fbc_cancel_work() or cancel_work() in - * this function since we're not releasing fbc.lock, so it won't have an - * opportunity to grab it to discover that it was cancelled. So we just - * update the expected jiffy count. */ - work->scheduled = true; - work->scheduled_vblank = drm_crtc_vblank_count(&crtc->base); - drm_crtc_vblank_put(&crtc->base); - - schedule_work(&work->work); -} - static void intel_fbc_deactivate(struct drm_i915_private *dev_priv, const char *reason) { @@ -489,11 +406,6 @@ static void intel_fbc_deactivate(struct drm_i915_private *dev_priv, WARN_ON(!mutex_is_locked(&fbc->lock)); - /* Calling cancel_work() here won't help due to the fact that the work - * function grabs fbc->lock. Just set scheduled to false so the work - * function can know it was cancelled. */ - fbc->work.scheduled = false; - if (fbc->active) intel_fbc_hw_deactivate(dev_priv); @@ -924,13 +836,6 @@ static void intel_fbc_get_reg_params(struct intel_crtc *crtc, 32 * fbc->threshold) * 8; } -static bool intel_fbc_reg_params_equal(struct intel_fbc_reg_params *params1, - struct intel_fbc_reg_params *params2) -{ - /* We can use this since intel_fbc_get_reg_params() does a memset. */ - return memcmp(params1, params2, sizeof(*params1)) == 0; -} - void intel_fbc_pre_update(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state, struct intel_plane_state *plane_state) @@ -953,6 +858,7 @@ void intel_fbc_pre_update(struct intel_crtc *crtc, goto unlock; intel_fbc_update_state_cache(crtc, crtc_state, plane_state); + fbc->flip_pending = true; deactivate: intel_fbc_deactivate(dev_priv, reason); @@ -988,13 +894,15 @@ static void __intel_fbc_post_update(struct intel_crtc *crtc) { struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); struct intel_fbc *fbc = &dev_priv->fbc; - struct intel_fbc_reg_params old_params; WARN_ON(!mutex_is_locked(&fbc->lock)); if (!fbc->enabled || fbc->crtc != crtc) return; + fbc->flip_pending = false; + WARN_ON(fbc->active); + if (!i915_modparams.enable_fbc) { intel_fbc_deactivate(dev_priv, "disabled at runtime per module param"); __intel_fbc_disable(dev_priv); @@ -1002,25 +910,16 @@ static void __intel_fbc_post_update(struct intel_crtc *crtc) return; } - if (!intel_fbc_can_activate(crtc)) { - WARN_ON(fbc->active); - return; - } - - old_params = fbc->params; intel_fbc_get_reg_params(crtc, &fbc->params); - /* If the scanout has not changed, don't modify the FBC settings. - * Note that we make the fundamental assumption that the fb->obj - * cannot be unpinned (and have its GTT offset and fence revoked) - * without first being decoupled from the scanout and FBC disabled. - */ - if (fbc->active && - intel_fbc_reg_params_equal(&old_params, &fbc->params)) + if (!intel_fbc_can_activate(crtc)) return; - intel_fbc_deactivate(dev_priv, "FBC enabled (active or scheduled)"); - intel_fbc_schedule_activation(crtc); + if (!fbc->busy_bits) { + intel_fbc_deactivate(dev_priv, "FBC enabled (active or scheduled)"); + intel_fbc_hw_activate(dev_priv); + } else + intel_fbc_deactivate(dev_priv, "frontbuffer write"); } void intel_fbc_post_update(struct intel_crtc *crtc) @@ -1085,7 +984,7 @@ void intel_fbc_flush(struct drm_i915_private *dev_priv, (frontbuffer_bits & intel_fbc_get_frontbuffer_bit(fbc))) { if (fbc->active) intel_fbc_recompress(dev_priv); - else + else if (!fbc->flip_pending) __intel_fbc_post_update(fbc->crtc); } @@ -1225,8 +1124,6 @@ void intel_fbc_disable(struct intel_crtc *crtc) if (fbc->crtc == crtc) __intel_fbc_disable(dev_priv); mutex_unlock(&fbc->lock); - - cancel_work_sync(&fbc->work.work); } /** @@ -1248,8 +1145,6 @@ void intel_fbc_global_disable(struct drm_i915_private *dev_priv) __intel_fbc_disable(dev_priv); } mutex_unlock(&fbc->lock); - - cancel_work_sync(&fbc->work.work); } static void intel_fbc_underrun_work_fn(struct work_struct *work) @@ -1400,12 +1295,10 @@ void intel_fbc_init(struct drm_i915_private *dev_priv) { struct intel_fbc *fbc = &dev_priv->fbc; - INIT_WORK(&fbc->work.work, intel_fbc_work_fn); INIT_WORK(&fbc->underrun_work, intel_fbc_underrun_work_fn); mutex_init(&fbc->lock); fbc->enabled = false; fbc->active = false; - fbc->work.scheduled = false; if (need_fbc_vtd_wa(dev_priv)) mkwrite_device_info(dev_priv)->has_fbc = false; diff --git a/drivers/gpu/drm/i915/intel_guc.c b/drivers/gpu/drm/i915/intel_guc.c index 1aff30b0870c..560c7406ae40 100644 --- a/drivers/gpu/drm/i915/intel_guc.c +++ b/drivers/gpu/drm/i915/intel_guc.c @@ -27,6 +27,8 @@ #include "intel_guc_submission.h" #include "i915_drv.h" +static void guc_init_ggtt_pin_bias(struct intel_guc *guc); + static void gen8_guc_raise_irq(struct intel_guc *guc) { struct drm_i915_private *dev_priv = guc_to_i915(guc); @@ -73,7 +75,7 @@ void intel_guc_init_early(struct intel_guc *guc) guc->notify = gen8_guc_raise_irq; } -int intel_guc_init_wq(struct intel_guc *guc) +static int guc_init_wq(struct intel_guc *guc) { struct drm_i915_private *dev_priv = guc_to_i915(guc); @@ -124,7 +126,7 @@ int intel_guc_init_wq(struct intel_guc *guc) return 0; } -void intel_guc_fini_wq(struct intel_guc *guc) +static void guc_fini_wq(struct intel_guc *guc) { struct drm_i915_private *dev_priv = guc_to_i915(guc); @@ -135,6 +137,28 @@ void intel_guc_fini_wq(struct intel_guc *guc) destroy_workqueue(guc->log.relay.flush_wq); } +int intel_guc_init_misc(struct intel_guc *guc) +{ + struct drm_i915_private *i915 = guc_to_i915(guc); + int ret; + + guc_init_ggtt_pin_bias(guc); + + ret = guc_init_wq(guc); + if (ret) + return ret; + + intel_uc_fw_fetch(i915, &guc->fw); + + return 0; +} + +void intel_guc_fini_misc(struct intel_guc *guc) +{ + intel_uc_fw_fini(&guc->fw); + guc_fini_wq(guc); +} + static int guc_shared_data_create(struct intel_guc *guc) { struct i915_vma *vma; @@ -169,7 +193,7 @@ int intel_guc_init(struct intel_guc *guc) ret = guc_shared_data_create(guc); if (ret) - return ret; + goto err_fetch; GEM_BUG_ON(!guc->shared_data); ret = intel_guc_log_create(&guc->log); @@ -190,6 +214,8 @@ err_log: intel_guc_log_destroy(&guc->log); err_shared: guc_shared_data_destroy(guc); +err_fetch: + intel_uc_fw_fini(&guc->fw); return ret; } @@ -201,12 +227,17 @@ void intel_guc_fini(struct intel_guc *guc) intel_guc_ads_destroy(guc); intel_guc_log_destroy(&guc->log); guc_shared_data_destroy(guc); + intel_uc_fw_fini(&guc->fw); } static u32 guc_ctl_debug_flags(struct intel_guc *guc) { u32 level = intel_guc_log_get_level(&guc->log); - u32 flags = 0; + u32 flags; + u32 ads; + + ads = intel_guc_ggtt_offset(guc, guc->ads_vma) >> PAGE_SHIFT; + flags = ads << GUC_ADS_ADDR_SHIFT | GUC_ADS_ENABLED; if (!GUC_LOG_LEVEL_IS_ENABLED(level)) flags |= GUC_LOG_DEFAULT_DISABLED; @@ -217,13 +248,6 @@ static u32 guc_ctl_debug_flags(struct intel_guc *guc) flags |= GUC_LOG_LEVEL_TO_VERBOSITY(level) << GUC_LOG_VERBOSITY_SHIFT; - if (USES_GUC_SUBMISSION(guc_to_i915(guc))) { - u32 ads = intel_guc_ggtt_offset(guc, guc->ads_vma) - >> PAGE_SHIFT; - - flags |= ads << GUC_ADS_ADDR_SHIFT | GUC_ADS_ENABLED; - } - return flags; } @@ -327,6 +351,9 @@ void intel_guc_init_params(struct intel_guc *guc) params[GUC_CTL_DEBUG] = guc_ctl_debug_flags(guc); params[GUC_CTL_CTXINFO] = guc_ctl_ctxinfo_flags(guc); + for (i = 0; i < GUC_CTL_MAX_DWORDS; i++) + DRM_DEBUG_DRIVER("param[%2d] = %#x\n", i, params[i]); + /* * All SOFT_SCRATCH registers are in FORCEWAKE_BLITTER domain and * they are power context saved so it's ok to release forcewake @@ -439,11 +466,13 @@ void intel_guc_to_host_event_handler_mmio(struct intel_guc *guc) * could happen that GuC sets the bit for 2nd interrupt but Host * clears out the bit on handling the 1st interrupt. */ + disable_rpm_wakeref_asserts(dev_priv); spin_lock(&guc->irq_lock); val = I915_READ(SOFT_SCRATCH(15)); msg = val & guc->msg_enabled_mask; I915_WRITE(SOFT_SCRATCH(15), val & ~msg); spin_unlock(&guc->irq_lock); + enable_rpm_wakeref_asserts(dev_priv); intel_guc_to_host_process_recv_msg(guc, msg); } @@ -585,13 +614,13 @@ int intel_guc_resume(struct intel_guc *guc) */ /** - * intel_guc_init_ggtt_pin_bias() - Initialize the GuC ggtt_pin_bias value. + * guc_init_ggtt_pin_bias() - Initialize the GuC ggtt_pin_bias value. * @guc: intel_guc structure. * * This function will calculate and initialize the ggtt_pin_bias value based on * overall WOPCM size and GuC WOPCM size. */ -void intel_guc_init_ggtt_pin_bias(struct intel_guc *guc) +static void guc_init_ggtt_pin_bias(struct intel_guc *guc) { struct drm_i915_private *i915 = guc_to_i915(guc); diff --git a/drivers/gpu/drm/i915/intel_guc.h b/drivers/gpu/drm/i915/intel_guc.h index f1265e122d30..4121928a495e 100644 --- a/drivers/gpu/drm/i915/intel_guc.h +++ b/drivers/gpu/drm/i915/intel_guc.h @@ -151,11 +151,10 @@ static inline u32 intel_guc_ggtt_offset(struct intel_guc *guc, void intel_guc_init_early(struct intel_guc *guc); void intel_guc_init_send_regs(struct intel_guc *guc); void intel_guc_init_params(struct intel_guc *guc); -void intel_guc_init_ggtt_pin_bias(struct intel_guc *guc); -int intel_guc_init_wq(struct intel_guc *guc); -void intel_guc_fini_wq(struct intel_guc *guc); +int intel_guc_init_misc(struct intel_guc *guc); int intel_guc_init(struct intel_guc *guc); void intel_guc_fini(struct intel_guc *guc); +void intel_guc_fini_misc(struct intel_guc *guc); int intel_guc_send_nop(struct intel_guc *guc, const u32 *action, u32 len, u32 *response_buf, u32 response_buf_size); int intel_guc_send_mmio(struct intel_guc *guc, const u32 *action, u32 len, diff --git a/drivers/gpu/drm/i915/intel_guc_submission.c b/drivers/gpu/drm/i915/intel_guc_submission.c index f3945258fe1b..4aa5e6463e7b 100644 --- a/drivers/gpu/drm/i915/intel_guc_submission.c +++ b/drivers/gpu/drm/i915/intel_guc_submission.c @@ -628,13 +628,14 @@ static void complete_preempt_context(struct intel_engine_cs *engine) GEM_BUG_ON(!execlists_is_active(execlists, EXECLISTS_ACTIVE_PREEMPT)); + if (inject_preempt_hang(execlists)) + return; + execlists_cancel_port_requests(execlists); execlists_unwind_incomplete_requests(execlists); wait_for_guc_preempt_report(engine); intel_write_status_page(engine, I915_GEM_HWS_PREEMPT_INDEX, 0); - - execlists_clear_active(execlists, EXECLISTS_ACTIVE_PREEMPT); } /** @@ -695,9 +696,6 @@ static bool __guc_dequeue(struct intel_engine_cs *engine) lockdep_assert_held(&engine->timeline.lock); - rb = execlists->first; - GEM_BUG_ON(rb_first(&execlists->queue) != rb); - if (port_isset(port)) { if (intel_engine_has_preemption(engine)) { struct guc_preempt_work *preempt_work = @@ -719,7 +717,7 @@ static bool __guc_dequeue(struct intel_engine_cs *engine) } GEM_BUG_ON(port_isset(port)); - while (rb) { + while ((rb = rb_first_cached(&execlists->queue))) { struct i915_priolist *p = to_priolist(rb); struct i915_request *rq, *rn; @@ -744,15 +742,13 @@ static bool __guc_dequeue(struct intel_engine_cs *engine) submit = true; } - rb = rb_next(rb); - rb_erase(&p->node, &execlists->queue); + rb_erase_cached(&p->node, &execlists->queue); INIT_LIST_HEAD(&p->requests); if (p->priority != I915_PRIORITY_NORMAL) kmem_cache_free(engine->i915->priorities, p); } done: execlists->queue_priority = rb ? to_priolist(rb)->priority : INT_MIN; - execlists->first = rb; if (submit) port_assign(port, last); if (last) @@ -761,7 +757,8 @@ done: /* We must always keep the beast fed if we have work piled up */ GEM_BUG_ON(port_isset(execlists->port) && !execlists_is_active(execlists, EXECLISTS_ACTIVE_USER)); - GEM_BUG_ON(execlists->first && !port_isset(execlists->port)); + GEM_BUG_ON(rb_first_cached(&execlists->queue) && + !port_isset(execlists->port)); return submit; } @@ -914,8 +911,12 @@ static void guc_clients_doorbell_fini(struct intel_guc *guc) __update_doorbell_desc(guc->preempt_client, GUC_DOORBELL_INVALID); } - __destroy_doorbell(guc->execbuf_client); - __update_doorbell_desc(guc->execbuf_client, GUC_DOORBELL_INVALID); + + if (guc->execbuf_client) { + __destroy_doorbell(guc->execbuf_client); + __update_doorbell_desc(guc->execbuf_client, + GUC_DOORBELL_INVALID); + } } /** @@ -1128,7 +1129,8 @@ static void guc_clients_destroy(struct intel_guc *guc) guc_client_free(client); client = fetch_and_zero(&guc->execbuf_client); - guc_client_free(client); + if (client) + guc_client_free(client); } /* @@ -1183,7 +1185,8 @@ void intel_guc_submission_fini(struct intel_guc *guc) guc_clients_destroy(guc); WARN_ON(!guc_verify_doorbells(guc)); - guc_stage_desc_pool_destroy(guc); + if (guc->stage_desc_pool) + guc_stage_desc_pool_destroy(guc); } static void guc_interrupts_capture(struct drm_i915_private *dev_priv) @@ -1266,6 +1269,31 @@ static void guc_submission_unpark(struct intel_engine_cs *engine) intel_engine_pin_breadcrumbs_irq(engine); } +static void guc_set_default_submission(struct intel_engine_cs *engine) +{ + /* + * We inherit a bunch of functions from execlists that we'd like + * to keep using: + * + * engine->submit_request = execlists_submit_request; + * engine->cancel_requests = execlists_cancel_requests; + * engine->schedule = execlists_schedule; + * + * But we need to override the actual submission backend in order + * to talk to the GuC. + */ + intel_execlists_set_default_submission(engine); + + engine->execlists.tasklet.func = guc_submission_tasklet; + + engine->park = guc_submission_park; + engine->unpark = guc_submission_unpark; + + engine->reset.prepare = guc_reset_prepare; + + engine->flags &= ~I915_ENGINE_SUPPORTS_STATS; +} + int intel_guc_submission_enable(struct intel_guc *guc) { struct drm_i915_private *dev_priv = guc_to_i915(guc); @@ -1304,17 +1332,8 @@ int intel_guc_submission_enable(struct intel_guc *guc) guc_interrupts_capture(dev_priv); for_each_engine(engine, dev_priv, id) { - struct intel_engine_execlists * const execlists = - &engine->execlists; - - execlists->tasklet.func = guc_submission_tasklet; - - engine->reset.prepare = guc_reset_prepare; - - engine->park = guc_submission_park; - engine->unpark = guc_submission_unpark; - - engine->flags &= ~I915_ENGINE_SUPPORTS_STATS; + engine->set_default_submission = guc_set_default_submission; + engine->set_default_submission(engine); } return 0; @@ -1328,9 +1347,6 @@ void intel_guc_submission_disable(struct intel_guc *guc) guc_interrupts_release(dev_priv); guc_clients_doorbell_fini(guc); - - /* Revert back to manual ELSP submission */ - intel_engines_reset_default_submission(dev_priv); } #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST) diff --git a/drivers/gpu/drm/i915/intel_gvt.c b/drivers/gpu/drm/i915/intel_gvt.c index a6291f60545b..c22b3e18a0f5 100644 --- a/drivers/gpu/drm/i915/intel_gvt.c +++ b/drivers/gpu/drm/i915/intel_gvt.c @@ -92,6 +92,9 @@ int intel_gvt_init(struct drm_i915_private *dev_priv) { int ret; + if (i915_inject_load_failure()) + return -ENODEV; + if (!i915_modparams.enable_gvt) { DRM_DEBUG_DRIVER("GVT-g is disabled by kernel params\n"); return 0; diff --git a/drivers/gpu/drm/i915/intel_hotplug.c b/drivers/gpu/drm/i915/intel_hotplug.c index 43aa92beff2a..648a13c6043c 100644 --- a/drivers/gpu/drm/i915/intel_hotplug.c +++ b/drivers/gpu/drm/i915/intel_hotplug.c @@ -77,37 +77,6 @@ */ /** - * intel_hpd_port - return port hard associated with certain pin. - * @dev_priv: private driver data pointer - * @pin: the hpd pin to get associated port - * - * Return port that is associatade with @pin and PORT_NONE if no port is - * hard associated with that @pin. - */ -enum port intel_hpd_pin_to_port(struct drm_i915_private *dev_priv, - enum hpd_pin pin) -{ - switch (pin) { - case HPD_PORT_A: - return PORT_A; - case HPD_PORT_B: - return PORT_B; - case HPD_PORT_C: - return PORT_C; - case HPD_PORT_D: - return PORT_D; - case HPD_PORT_E: - if (IS_CNL_WITH_PORT_F(dev_priv)) - return PORT_F; - return PORT_E; - case HPD_PORT_F: - return PORT_F; - default: - return PORT_NONE; /* no port for this pin */ - } -} - -/** * intel_hpd_pin_default - return default pin associated with certain port. * @dev_priv: private driver data pointer * @port: the hpd port to get associated pin @@ -241,25 +210,25 @@ static void intel_hpd_irq_storm_reenable_work(struct work_struct *work) container_of(work, typeof(*dev_priv), hotplug.reenable_work.work); struct drm_device *dev = &dev_priv->drm; - int i; + enum hpd_pin pin; intel_runtime_pm_get(dev_priv); spin_lock_irq(&dev_priv->irq_lock); - for_each_hpd_pin(i) { + for_each_hpd_pin(pin) { struct drm_connector *connector; struct drm_connector_list_iter conn_iter; - if (dev_priv->hotplug.stats[i].state != HPD_DISABLED) + if (dev_priv->hotplug.stats[pin].state != HPD_DISABLED) continue; - dev_priv->hotplug.stats[i].state = HPD_ENABLED; + dev_priv->hotplug.stats[pin].state = HPD_ENABLED; drm_connector_list_iter_begin(dev, &conn_iter); drm_for_each_connector_iter(connector, &conn_iter) { struct intel_connector *intel_connector = to_intel_connector(connector); - if (intel_connector->encoder->hpd_pin == i) { + if (intel_connector->encoder->hpd_pin == pin) { if (connector->polled != intel_connector->polled) DRM_DEBUG_DRIVER("Reenabling HPD on connector %s\n", connector->name); @@ -301,13 +270,18 @@ bool intel_encoder_hotplug(struct intel_encoder *encoder, return true; } +static bool intel_encoder_has_hpd_pulse(struct intel_encoder *encoder) +{ + return intel_encoder_is_dig_port(encoder) && + enc_to_dig_port(&encoder->base)->hpd_pulse != NULL; +} + static void i915_digport_work_func(struct work_struct *work) { struct drm_i915_private *dev_priv = container_of(work, struct drm_i915_private, hotplug.dig_port_work); u32 long_port_mask, short_port_mask; - struct intel_digital_port *intel_dig_port; - int i; + struct intel_encoder *encoder; u32 old_bits = 0; spin_lock_irq(&dev_priv->irq_lock); @@ -317,27 +291,27 @@ static void i915_digport_work_func(struct work_struct *work) dev_priv->hotplug.short_port_mask = 0; spin_unlock_irq(&dev_priv->irq_lock); - for (i = 0; i < I915_MAX_PORTS; i++) { - bool valid = false; - bool long_hpd = false; - intel_dig_port = dev_priv->hotplug.irq_port[i]; - if (!intel_dig_port || !intel_dig_port->hpd_pulse) + for_each_intel_encoder(&dev_priv->drm, encoder) { + struct intel_digital_port *dig_port; + enum port port = encoder->port; + bool long_hpd, short_hpd; + enum irqreturn ret; + + if (!intel_encoder_has_hpd_pulse(encoder)) continue; - if (long_port_mask & (1 << i)) { - valid = true; - long_hpd = true; - } else if (short_port_mask & (1 << i)) - valid = true; + long_hpd = long_port_mask & BIT(port); + short_hpd = short_port_mask & BIT(port); - if (valid) { - enum irqreturn ret; + if (!long_hpd && !short_hpd) + continue; - ret = intel_dig_port->hpd_pulse(intel_dig_port, long_hpd); - if (ret == IRQ_NONE) { - /* fall back to old school hpd */ - old_bits |= (1 << intel_dig_port->base.hpd_pin); - } + dig_port = enc_to_dig_port(&encoder->base); + + ret = dig_port->hpd_pulse(dig_port, long_hpd); + if (ret == IRQ_NONE) { + /* fall back to old school hpd */ + old_bits |= BIT(encoder->hpd_pin); } } @@ -418,26 +392,24 @@ static void i915_hotplug_work_func(struct work_struct *work) void intel_hpd_irq_handler(struct drm_i915_private *dev_priv, u32 pin_mask, u32 long_mask) { - int i; - enum port port; + struct intel_encoder *encoder; bool storm_detected = false; bool queue_dig = false, queue_hp = false; - bool is_dig_port; if (!pin_mask) return; spin_lock(&dev_priv->irq_lock); - for_each_hpd_pin(i) { - if (!(BIT(i) & pin_mask)) - continue; + for_each_intel_encoder(&dev_priv->drm, encoder) { + enum hpd_pin pin = encoder->hpd_pin; + bool has_hpd_pulse = intel_encoder_has_hpd_pulse(encoder); - port = intel_hpd_pin_to_port(dev_priv, i); - is_dig_port = port != PORT_NONE && - dev_priv->hotplug.irq_port[port]; + if (!(BIT(pin) & pin_mask)) + continue; - if (is_dig_port) { - bool long_hpd = long_mask & BIT(i); + if (has_hpd_pulse) { + bool long_hpd = long_mask & BIT(pin); + enum port port = encoder->port; DRM_DEBUG_DRIVER("digital hpd port %c - %s\n", port_name(port), long_hpd ? "long" : "short"); @@ -455,7 +427,7 @@ void intel_hpd_irq_handler(struct drm_i915_private *dev_priv, } } - if (dev_priv->hotplug.stats[i].state == HPD_DISABLED) { + if (dev_priv->hotplug.stats[pin].state == HPD_DISABLED) { /* * On GMCH platforms the interrupt mask bits only * prevent irq generation, not the setting of the @@ -463,20 +435,20 @@ void intel_hpd_irq_handler(struct drm_i915_private *dev_priv, * interrupts on saner platforms. */ WARN_ONCE(!HAS_GMCH_DISPLAY(dev_priv), - "Received HPD interrupt on pin %d although disabled\n", i); + "Received HPD interrupt on pin %d although disabled\n", pin); continue; } - if (dev_priv->hotplug.stats[i].state != HPD_ENABLED) + if (dev_priv->hotplug.stats[pin].state != HPD_ENABLED) continue; - if (!is_dig_port) { - dev_priv->hotplug.event_bits |= BIT(i); + if (!has_hpd_pulse) { + dev_priv->hotplug.event_bits |= BIT(pin); queue_hp = true; } - if (intel_hpd_irq_storm_detect(dev_priv, i)) { - dev_priv->hotplug.event_bits &= ~BIT(i); + if (intel_hpd_irq_storm_detect(dev_priv, pin)) { + dev_priv->hotplug.event_bits &= ~BIT(pin); storm_detected = true; } } diff --git a/drivers/gpu/drm/i915/intel_huc.c b/drivers/gpu/drm/i915/intel_huc.c index 291285277403..ffcad5fad6a7 100644 --- a/drivers/gpu/drm/i915/intel_huc.c +++ b/drivers/gpu/drm/i915/intel_huc.c @@ -32,6 +32,14 @@ void intel_huc_init_early(struct intel_huc *huc) intel_huc_fw_init_early(huc); } +int intel_huc_init_misc(struct intel_huc *huc) +{ + struct drm_i915_private *i915 = huc_to_i915(huc); + + intel_uc_fw_fetch(i915, &huc->fw); + return 0; +} + /** * intel_huc_auth() - Authenticate HuC uCode * @huc: intel_huc structure diff --git a/drivers/gpu/drm/i915/intel_huc.h b/drivers/gpu/drm/i915/intel_huc.h index aa854907abac..7e41d870b509 100644 --- a/drivers/gpu/drm/i915/intel_huc.h +++ b/drivers/gpu/drm/i915/intel_huc.h @@ -36,9 +36,15 @@ struct intel_huc { }; void intel_huc_init_early(struct intel_huc *huc); +int intel_huc_init_misc(struct intel_huc *huc); int intel_huc_auth(struct intel_huc *huc); int intel_huc_check_status(struct intel_huc *huc); +static inline void intel_huc_fini_misc(struct intel_huc *huc) +{ + intel_uc_fw_fini(&huc->fw); +} + static inline int intel_huc_sanitize(struct intel_huc *huc) { intel_uc_fw_sanitize(&huc->fw); diff --git a/drivers/gpu/drm/i915/intel_i2c.c b/drivers/gpu/drm/i915/intel_i2c.c index 97606c1be70d..bef32b7c248e 100644 --- a/drivers/gpu/drm/i915/intel_i2c.c +++ b/drivers/gpu/drm/i915/intel_i2c.c @@ -361,15 +361,39 @@ gmbus_wait_idle(struct drm_i915_private *dev_priv) return ret; } +static inline +unsigned int gmbus_max_xfer_size(struct drm_i915_private *dev_priv) +{ + return INTEL_GEN(dev_priv) >= 9 ? GEN9_GMBUS_BYTE_COUNT_MAX : + GMBUS_BYTE_COUNT_MAX; +} + static int gmbus_xfer_read_chunk(struct drm_i915_private *dev_priv, unsigned short addr, u8 *buf, unsigned int len, - u32 gmbus1_index) + u32 gmbus0_reg, u32 gmbus1_index) { + unsigned int size = len; + bool burst_read = len > gmbus_max_xfer_size(dev_priv); + bool extra_byte_added = false; + + if (burst_read) { + /* + * As per HW Spec, for 512Bytes need to read extra Byte and + * Ignore the extra byte read. + */ + if (len == 512) { + extra_byte_added = true; + len++; + } + size = len % 256 + 256; + I915_WRITE_FW(GMBUS0, gmbus0_reg | GMBUS_BYTE_CNT_OVERRIDE); + } + I915_WRITE_FW(GMBUS1, gmbus1_index | GMBUS_CYCLE_WAIT | - (len << GMBUS_BYTE_COUNT_SHIFT) | + (size << GMBUS_BYTE_COUNT_SHIFT) | (addr << GMBUS_SLAVE_ADDR_SHIFT) | GMBUS_SLAVE_READ | GMBUS_SW_RDY); while (len) { @@ -382,17 +406,34 @@ gmbus_xfer_read_chunk(struct drm_i915_private *dev_priv, val = I915_READ_FW(GMBUS3); do { + if (extra_byte_added && len == 1) + break; + *buf++ = val & 0xff; val >>= 8; } while (--len && ++loop < 4); + + if (burst_read && len == size - 4) + /* Reset the override bit */ + I915_WRITE_FW(GMBUS0, gmbus0_reg); } return 0; } +/* + * HW spec says that 512Bytes in Burst read need special treatment. + * But it doesn't talk about other multiple of 256Bytes. And couldn't locate + * an I2C slave, which supports such a lengthy burst read too for experiments. + * + * So until things get clarified on HW support, to avoid the burst read length + * in fold of 256Bytes except 512, max burst read length is fixed at 767Bytes. + */ +#define INTEL_GMBUS_BURST_READ_MAX_LEN 767U + static int gmbus_xfer_read(struct drm_i915_private *dev_priv, struct i2c_msg *msg, - u32 gmbus1_index) + u32 gmbus0_reg, u32 gmbus1_index) { u8 *buf = msg->buf; unsigned int rx_size = msg->len; @@ -400,10 +441,13 @@ gmbus_xfer_read(struct drm_i915_private *dev_priv, struct i2c_msg *msg, int ret; do { - len = min(rx_size, GMBUS_BYTE_COUNT_MAX); + if (HAS_GMBUS_BURST_READ(dev_priv)) + len = min(rx_size, INTEL_GMBUS_BURST_READ_MAX_LEN); + else + len = min(rx_size, gmbus_max_xfer_size(dev_priv)); - ret = gmbus_xfer_read_chunk(dev_priv, msg->addr, - buf, len, gmbus1_index); + ret = gmbus_xfer_read_chunk(dev_priv, msg->addr, buf, len, + gmbus0_reg, gmbus1_index); if (ret) return ret; @@ -462,7 +506,7 @@ gmbus_xfer_write(struct drm_i915_private *dev_priv, struct i2c_msg *msg, int ret; do { - len = min(tx_size, GMBUS_BYTE_COUNT_MAX); + len = min(tx_size, gmbus_max_xfer_size(dev_priv)); ret = gmbus_xfer_write_chunk(dev_priv, msg->addr, buf, len, gmbus1_index); @@ -491,7 +535,8 @@ gmbus_is_index_xfer(struct i2c_msg *msgs, int i, int num) } static int -gmbus_index_xfer(struct drm_i915_private *dev_priv, struct i2c_msg *msgs) +gmbus_index_xfer(struct drm_i915_private *dev_priv, struct i2c_msg *msgs, + u32 gmbus0_reg) { u32 gmbus1_index = 0; u32 gmbus5 = 0; @@ -509,7 +554,8 @@ gmbus_index_xfer(struct drm_i915_private *dev_priv, struct i2c_msg *msgs) I915_WRITE_FW(GMBUS5, gmbus5); if (msgs[1].flags & I2C_M_RD) - ret = gmbus_xfer_read(dev_priv, &msgs[1], gmbus1_index); + ret = gmbus_xfer_read(dev_priv, &msgs[1], gmbus0_reg, + gmbus1_index); else ret = gmbus_xfer_write(dev_priv, &msgs[1], gmbus1_index); @@ -544,10 +590,12 @@ retry: for (; i < num; i += inc) { inc = 1; if (gmbus_is_index_xfer(msgs, i, num)) { - ret = gmbus_index_xfer(dev_priv, &msgs[i]); + ret = gmbus_index_xfer(dev_priv, &msgs[i], + gmbus0_source | bus->reg0); inc = 2; /* an index transmission is two msgs */ } else if (msgs[i].flags & I2C_M_RD) { - ret = gmbus_xfer_read(dev_priv, &msgs[i], 0); + ret = gmbus_xfer_read(dev_priv, &msgs[i], + gmbus0_source | bus->reg0, 0); } else { ret = gmbus_xfer_write(dev_priv, &msgs[i], 0); } diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c index 33bc914c2ef5..174479232e94 100644 --- a/drivers/gpu/drm/i915/intel_lrc.c +++ b/drivers/gpu/drm/i915/intel_lrc.c @@ -137,6 +137,7 @@ #include <drm/i915_drm.h> #include "i915_drv.h" #include "i915_gem_render_state.h" +#include "i915_vgpu.h" #include "intel_lrc_reg.h" #include "intel_mocs.h" #include "intel_workarounds.h" @@ -272,7 +273,7 @@ lookup_priolist(struct intel_engine_cs *engine, int prio) find_priolist: /* most positive priority is scheduled first, equal priorities fifo */ rb = NULL; - parent = &execlists->queue.rb_node; + parent = &execlists->queue.rb_root.rb_node; while (*parent) { rb = *parent; p = to_priolist(rb); @@ -310,10 +311,7 @@ find_priolist: p->priority = prio; INIT_LIST_HEAD(&p->requests); rb_link_node(&p->node, rb, parent); - rb_insert_color(&p->node, &execlists->queue); - - if (first) - execlists->first = &p->node; + rb_insert_color_cached(&p->node, &execlists->queue, first); return p; } @@ -455,6 +453,16 @@ static void execlists_submit_ports(struct intel_engine_cs *engine) unsigned int n; /* + * We can skip acquiring intel_runtime_pm_get() here as it was taken + * on our behalf by the request (see i915_gem_mark_busy()) and it will + * not be relinquished until the device is idle (see + * i915_gem_idle_work_handler()). As a precaution, we make sure + * that all ELSP are drained i.e. we have processed the CSB, + * before allowing ourselves to idle and calling intel_runtime_pm_put(). + */ + GEM_BUG_ON(!engine->i915->gt.awake); + + /* * ELSQ note: the submit queue is not cleared after being submitted * to the HW so we need to make sure we always clean it up. This is * currently ensured by the fact that we always write the same number @@ -561,13 +569,16 @@ static void complete_preempt_context(struct intel_engine_execlists *execlists) { GEM_BUG_ON(!execlists_is_active(execlists, EXECLISTS_ACTIVE_PREEMPT)); - execlists_cancel_port_requests(execlists); - execlists_unwind_incomplete_requests(execlists); + if (inject_preempt_hang(execlists)) + return; - execlists_clear_active(execlists, EXECLISTS_ACTIVE_PREEMPT); + execlists_cancel_port_requests(execlists); + __unwind_incomplete_requests(container_of(execlists, + struct intel_engine_cs, + execlists)); } -static bool __execlists_dequeue(struct intel_engine_cs *engine) +static void execlists_dequeue(struct intel_engine_cs *engine) { struct intel_engine_execlists * const execlists = &engine->execlists; struct execlist_port *port = execlists->port; @@ -577,9 +588,8 @@ static bool __execlists_dequeue(struct intel_engine_cs *engine) struct rb_node *rb; bool submit = false; - lockdep_assert_held(&engine->timeline.lock); - - /* Hardware submission is through 2 ports. Conceptually each port + /* + * Hardware submission is through 2 ports. Conceptually each port * has a (RING_START, RING_HEAD, RING_TAIL) tuple. RING_START is * static for a context, and unique to each, so we only execute * requests belonging to a single context from each ring. RING_HEAD @@ -600,9 +610,6 @@ static bool __execlists_dequeue(struct intel_engine_cs *engine) * and context switches) submission. */ - rb = execlists->first; - GEM_BUG_ON(rb_first(&execlists->queue) != rb); - if (last) { /* * Don't resubmit or switch until all outstanding @@ -622,11 +629,11 @@ static bool __execlists_dequeue(struct intel_engine_cs *engine) * the HW to indicate that it has had a chance to respond. */ if (!execlists_is_active(execlists, EXECLISTS_ACTIVE_HWACK)) - return false; + return; if (need_preempt(engine, last, execlists->queue_priority)) { inject_preempt_context(engine); - return false; + return; } /* @@ -651,7 +658,7 @@ static bool __execlists_dequeue(struct intel_engine_cs *engine) * priorities of the ports haven't been switch. */ if (port_count(&port[1])) - return false; + return; /* * WaIdleLiteRestore:bdw,skl @@ -664,7 +671,7 @@ static bool __execlists_dequeue(struct intel_engine_cs *engine) last->tail = last->wa_tail; } - while (rb) { + while ((rb = rb_first_cached(&execlists->queue))) { struct i915_priolist *p = to_priolist(rb); struct i915_request *rq, *rn; @@ -723,8 +730,7 @@ static bool __execlists_dequeue(struct intel_engine_cs *engine) submit = true; } - rb = rb_next(rb); - rb_erase(&p->node, &execlists->queue); + rb_erase_cached(&p->node, &execlists->queue); INIT_LIST_HEAD(&p->requests); if (p->priority != I915_PRIORITY_NORMAL) kmem_cache_free(engine->i915->priorities, p); @@ -750,35 +756,23 @@ done: execlists->queue_priority = port != execlists->port ? rq_prio(last) : INT_MIN; - execlists->first = rb; - if (submit) + if (submit) { port_assign(port, last); + execlists_submit_ports(engine); + } /* We must always keep the beast fed if we have work piled up */ - GEM_BUG_ON(execlists->first && !port_isset(execlists->port)); + GEM_BUG_ON(rb_first_cached(&execlists->queue) && + !port_isset(execlists->port)); /* Re-evaluate the executing context setup after each preemptive kick */ if (last) execlists_user_begin(execlists, execlists->port); - return submit; -} - -static void execlists_dequeue(struct intel_engine_cs *engine) -{ - struct intel_engine_execlists * const execlists = &engine->execlists; - unsigned long flags; - bool submit; - - spin_lock_irqsave(&engine->timeline.lock, flags); - submit = __execlists_dequeue(engine); - spin_unlock_irqrestore(&engine->timeline.lock, flags); - - if (submit) - execlists_submit_ports(engine); - - GEM_BUG_ON(port_isset(execlists->port) && - !execlists_is_active(execlists, EXECLISTS_ACTIVE_USER)); + /* If the engine is now idle, so should be the flag; and vice versa. */ + GEM_BUG_ON(execlists_is_active(&engine->execlists, + EXECLISTS_ACTIVE_USER) == + !port_isset(engine->execlists.port)); } void @@ -809,82 +803,27 @@ execlists_cancel_port_requests(struct intel_engine_execlists * const execlists) port++; } - execlists_user_end(execlists); + execlists_clear_all_active(execlists); } -static void clear_gtiir(struct intel_engine_cs *engine) +static void reset_csb_pointers(struct intel_engine_execlists *execlists) { - struct drm_i915_private *dev_priv = engine->i915; - int i; - /* - * Clear any pending interrupt state. - * - * We do it twice out of paranoia that some of the IIR are - * double buffered, and so if we only reset it once there may - * still be an interrupt pending. + * After a reset, the HW starts writing into CSB entry [0]. We + * therefore have to set our HEAD pointer back one entry so that + * the *first* entry we check is entry 0. To complicate this further, + * as we don't wait for the first interrupt after reset, we have to + * fake the HW write to point back to the last entry so that our + * inline comparison of our cached head position against the last HW + * write works even before the first interrupt. */ - if (INTEL_GEN(dev_priv) >= 11) { - static const struct { - u8 bank; - u8 bit; - } gen11_gtiir[] = { - [RCS] = {0, GEN11_RCS0}, - [BCS] = {0, GEN11_BCS}, - [_VCS(0)] = {1, GEN11_VCS(0)}, - [_VCS(1)] = {1, GEN11_VCS(1)}, - [_VCS(2)] = {1, GEN11_VCS(2)}, - [_VCS(3)] = {1, GEN11_VCS(3)}, - [_VECS(0)] = {1, GEN11_VECS(0)}, - [_VECS(1)] = {1, GEN11_VECS(1)}, - }; - unsigned long irqflags; - - GEM_BUG_ON(engine->id >= ARRAY_SIZE(gen11_gtiir)); - - spin_lock_irqsave(&dev_priv->irq_lock, irqflags); - for (i = 0; i < 2; i++) { - gen11_reset_one_iir(dev_priv, - gen11_gtiir[engine->id].bank, - gen11_gtiir[engine->id].bit); - } - spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); - } else { - static const u8 gtiir[] = { - [RCS] = 0, - [BCS] = 0, - [VCS] = 1, - [VCS2] = 1, - [VECS] = 3, - }; - - GEM_BUG_ON(engine->id >= ARRAY_SIZE(gtiir)); - - for (i = 0; i < 2; i++) { - I915_WRITE(GEN8_GT_IIR(gtiir[engine->id]), - engine->irq_keep_mask); - POSTING_READ(GEN8_GT_IIR(gtiir[engine->id])); - } - GEM_BUG_ON(I915_READ(GEN8_GT_IIR(gtiir[engine->id])) & - engine->irq_keep_mask); - } + execlists->csb_head = execlists->csb_write_reset; + WRITE_ONCE(*execlists->csb_write, execlists->csb_write_reset); } -static void reset_irq(struct intel_engine_cs *engine) +static void nop_submission_tasklet(unsigned long data) { - /* Mark all CS interrupts as complete */ - smp_store_mb(engine->execlists.active, 0); - synchronize_hardirq(engine->i915->drm.irq); - - clear_gtiir(engine); - - /* - * The port is checked prior to scheduling a tasklet, but - * just in case we have suspended the tasklet to do the - * wedging make sure that when it wakes, it decides there - * is no work to do by clearing the irq_posted bit. - */ - clear_bit(ENGINE_IRQ_EXECLIST, &engine->irq_posted); + /* The driver is wedged; don't process any more events. */ } static void execlists_cancel_requests(struct intel_engine_cs *engine) @@ -911,13 +850,11 @@ static void execlists_cancel_requests(struct intel_engine_cs *engine) * submission's irq state, we also wish to remind ourselves that * it is irq state.) */ - local_irq_save(flags); + spin_lock_irqsave(&engine->timeline.lock, flags); /* Cancel the requests on the HW and clear the ELSP tracker. */ execlists_cancel_port_requests(execlists); - reset_irq(engine); - - spin_lock(&engine->timeline.lock); + execlists_user_end(execlists); /* Mark all executing requests as skipped. */ list_for_each_entry(rq, &engine->timeline.requests, link) { @@ -927,8 +864,7 @@ static void execlists_cancel_requests(struct intel_engine_cs *engine) } /* Flush the queued requests to the timeline list (for retiring). */ - rb = execlists->first; - while (rb) { + while ((rb = rb_first_cached(&execlists->queue))) { struct i915_priolist *p = to_priolist(rb); list_for_each_entry_safe(rq, rn, &p->requests, sched.link) { @@ -938,8 +874,7 @@ static void execlists_cancel_requests(struct intel_engine_cs *engine) __i915_request_submit(rq); } - rb = rb_next(rb); - rb_erase(&p->node, &execlists->queue); + rb_erase_cached(&p->node, &execlists->queue); INIT_LIST_HEAD(&p->requests); if (p->priority != I915_PRIORITY_NORMAL) kmem_cache_free(engine->i915->priorities, p); @@ -948,183 +883,179 @@ static void execlists_cancel_requests(struct intel_engine_cs *engine) /* Remaining _unready_ requests will be nop'ed when submitted */ execlists->queue_priority = INT_MIN; - execlists->queue = RB_ROOT; - execlists->first = NULL; + execlists->queue = RB_ROOT_CACHED; GEM_BUG_ON(port_isset(execlists->port)); - spin_unlock(&engine->timeline.lock); + GEM_BUG_ON(__tasklet_is_enabled(&execlists->tasklet)); + execlists->tasklet.func = nop_submission_tasklet; - local_irq_restore(flags); + spin_unlock_irqrestore(&engine->timeline.lock, flags); +} + +static inline bool +reset_in_progress(const struct intel_engine_execlists *execlists) +{ + return unlikely(!__tasklet_is_enabled(&execlists->tasklet)); } static void process_csb(struct intel_engine_cs *engine) { struct intel_engine_execlists * const execlists = &engine->execlists; struct execlist_port *port = execlists->port; - struct drm_i915_private *i915 = engine->i915; - bool fw = false; + const u32 * const buf = execlists->csb_status; + u8 head, tail; + + /* + * Note that csb_write, csb_status may be either in HWSP or mmio. + * When reading from the csb_write mmio register, we have to be + * careful to only use the GEN8_CSB_WRITE_PTR portion, which is + * the low 4bits. As it happens we know the next 4bits are always + * zero and so we can simply masked off the low u8 of the register + * and treat it identically to reading from the HWSP (without having + * to use explicit shifting and masking, and probably bifurcating + * the code to handle the legacy mmio read). + */ + head = execlists->csb_head; + tail = READ_ONCE(*execlists->csb_write); + GEM_TRACE("%s cs-irq head=%d, tail=%d\n", engine->name, head, tail); + if (unlikely(head == tail)) + return; + + /* + * Hopefully paired with a wmb() in HW! + * + * We must complete the read of the write pointer before any reads + * from the CSB, so that we do not see stale values. Without an rmb + * (lfence) the HW may speculatively perform the CSB[] reads *before* + * we perform the READ_ONCE(*csb_write). + */ + rmb(); do { - /* The HWSP contains a (cacheable) mirror of the CSB */ - const u32 *buf = - &engine->status_page.page_addr[I915_HWS_CSB_BUF0_INDEX]; - unsigned int head, tail; + struct i915_request *rq; + unsigned int status; + unsigned int count; - /* Clear before reading to catch new interrupts */ - clear_bit(ENGINE_IRQ_EXECLIST, &engine->irq_posted); - smp_mb__after_atomic(); + if (++head == GEN8_CSB_ENTRIES) + head = 0; - if (unlikely(execlists->csb_use_mmio)) { - if (!fw) { - intel_uncore_forcewake_get(i915, execlists->fw_domains); - fw = true; - } + /* + * We are flying near dragons again. + * + * We hold a reference to the request in execlist_port[] + * but no more than that. We are operating in softirq + * context and so cannot hold any mutex or sleep. That + * prevents us stopping the requests we are processing + * in port[] from being retired simultaneously (the + * breadcrumb will be complete before we see the + * context-switch). As we only hold the reference to the + * request, any pointer chasing underneath the request + * is subject to a potential use-after-free. Thus we + * store all of the bookkeeping within port[] as + * required, and avoid using unguarded pointers beneath + * request itself. The same applies to the atomic + * status notifier. + */ - buf = (u32 * __force) - (i915->regs + i915_mmio_reg_offset(RING_CONTEXT_STATUS_BUF_LO(engine, 0))); + GEM_TRACE("%s csb[%d]: status=0x%08x:0x%08x, active=0x%x\n", + engine->name, head, + buf[2 * head + 0], buf[2 * head + 1], + execlists->active); + + status = buf[2 * head]; + if (status & (GEN8_CTX_STATUS_IDLE_ACTIVE | + GEN8_CTX_STATUS_PREEMPTED)) + execlists_set_active(execlists, + EXECLISTS_ACTIVE_HWACK); + if (status & GEN8_CTX_STATUS_ACTIVE_IDLE) + execlists_clear_active(execlists, + EXECLISTS_ACTIVE_HWACK); + + if (!(status & GEN8_CTX_STATUS_COMPLETED_MASK)) + continue; - head = readl(i915->regs + i915_mmio_reg_offset(RING_CONTEXT_STATUS_PTR(engine))); - tail = GEN8_CSB_WRITE_PTR(head); - head = GEN8_CSB_READ_PTR(head); - execlists->csb_head = head; - } else { - const int write_idx = - intel_hws_csb_write_index(i915) - - I915_HWS_CSB_BUF0_INDEX; + /* We should never get a COMPLETED | IDLE_ACTIVE! */ + GEM_BUG_ON(status & GEN8_CTX_STATUS_IDLE_ACTIVE); - head = execlists->csb_head; - tail = READ_ONCE(buf[write_idx]); - rmb(); /* Hopefully paired with a wmb() in HW */ + if (status & GEN8_CTX_STATUS_COMPLETE && + buf[2*head + 1] == execlists->preempt_complete_status) { + GEM_TRACE("%s preempt-idle\n", engine->name); + complete_preempt_context(execlists); + continue; } - GEM_TRACE("%s cs-irq head=%d [%d%s], tail=%d [%d%s]\n", - engine->name, - head, GEN8_CSB_READ_PTR(readl(i915->regs + i915_mmio_reg_offset(RING_CONTEXT_STATUS_PTR(engine)))), fw ? "" : "?", - tail, GEN8_CSB_WRITE_PTR(readl(i915->regs + i915_mmio_reg_offset(RING_CONTEXT_STATUS_PTR(engine)))), fw ? "" : "?"); - while (head != tail) { - struct i915_request *rq; - unsigned int status; - unsigned int count; + if (status & GEN8_CTX_STATUS_PREEMPTED && + execlists_is_active(execlists, + EXECLISTS_ACTIVE_PREEMPT)) + continue; - if (++head == GEN8_CSB_ENTRIES) - head = 0; + GEM_BUG_ON(!execlists_is_active(execlists, + EXECLISTS_ACTIVE_USER)); + rq = port_unpack(port, &count); + GEM_TRACE("%s out[0]: ctx=%d.%d, global=%d (fence %llx:%d) (current %d), prio=%d\n", + engine->name, + port->context_id, count, + rq ? rq->global_seqno : 0, + rq ? rq->fence.context : 0, + rq ? rq->fence.seqno : 0, + intel_engine_get_seqno(engine), + rq ? rq_prio(rq) : 0); + + /* Check the context/desc id for this event matches */ + GEM_DEBUG_BUG_ON(buf[2 * head + 1] != port->context_id); + + GEM_BUG_ON(count == 0); + if (--count == 0) { /* - * We are flying near dragons again. - * - * We hold a reference to the request in execlist_port[] - * but no more than that. We are operating in softirq - * context and so cannot hold any mutex or sleep. That - * prevents us stopping the requests we are processing - * in port[] from being retired simultaneously (the - * breadcrumb will be complete before we see the - * context-switch). As we only hold the reference to the - * request, any pointer chasing underneath the request - * is subject to a potential use-after-free. Thus we - * store all of the bookkeeping within port[] as - * required, and avoid using unguarded pointers beneath - * request itself. The same applies to the atomic - * status notifier. + * On the final event corresponding to the + * submission of this context, we expect either + * an element-switch event or a completion + * event (and on completion, the active-idle + * marker). No more preemptions, lite-restore + * or otherwise. */ + GEM_BUG_ON(status & GEN8_CTX_STATUS_PREEMPTED); + GEM_BUG_ON(port_isset(&port[1]) && + !(status & GEN8_CTX_STATUS_ELEMENT_SWITCH)); + GEM_BUG_ON(!port_isset(&port[1]) && + !(status & GEN8_CTX_STATUS_ACTIVE_IDLE)); - status = READ_ONCE(buf[2 * head]); /* maybe mmio! */ - GEM_TRACE("%s csb[%d]: status=0x%08x:0x%08x, active=0x%x\n", - engine->name, head, - status, buf[2*head + 1], - execlists->active); - - if (status & (GEN8_CTX_STATUS_IDLE_ACTIVE | - GEN8_CTX_STATUS_PREEMPTED)) - execlists_set_active(execlists, - EXECLISTS_ACTIVE_HWACK); - if (status & GEN8_CTX_STATUS_ACTIVE_IDLE) - execlists_clear_active(execlists, - EXECLISTS_ACTIVE_HWACK); - - if (!(status & GEN8_CTX_STATUS_COMPLETED_MASK)) - continue; - - /* We should never get a COMPLETED | IDLE_ACTIVE! */ - GEM_BUG_ON(status & GEN8_CTX_STATUS_IDLE_ACTIVE); - - if (status & GEN8_CTX_STATUS_COMPLETE && - buf[2*head + 1] == execlists->preempt_complete_status) { - GEM_TRACE("%s preempt-idle\n", engine->name); - complete_preempt_context(execlists); - continue; - } - - if (status & GEN8_CTX_STATUS_PREEMPTED && - execlists_is_active(execlists, - EXECLISTS_ACTIVE_PREEMPT)) - continue; - - GEM_BUG_ON(!execlists_is_active(execlists, - EXECLISTS_ACTIVE_USER)); - - rq = port_unpack(port, &count); - GEM_TRACE("%s out[0]: ctx=%d.%d, global=%d (fence %llx:%d) (current %d), prio=%d\n", - engine->name, - port->context_id, count, - rq ? rq->global_seqno : 0, - rq ? rq->fence.context : 0, - rq ? rq->fence.seqno : 0, - intel_engine_get_seqno(engine), - rq ? rq_prio(rq) : 0); + /* + * We rely on the hardware being strongly + * ordered, that the breadcrumb write is + * coherent (visible from the CPU) before the + * user interrupt and CSB is processed. + */ + GEM_BUG_ON(!i915_request_completed(rq)); - /* Check the context/desc id for this event matches */ - GEM_DEBUG_BUG_ON(buf[2 * head + 1] != port->context_id); + execlists_context_schedule_out(rq, + INTEL_CONTEXT_SCHEDULE_OUT); + i915_request_put(rq); - GEM_BUG_ON(count == 0); - if (--count == 0) { - /* - * On the final event corresponding to the - * submission of this context, we expect either - * an element-switch event or a completion - * event (and on completion, the active-idle - * marker). No more preemptions, lite-restore - * or otherwise. - */ - GEM_BUG_ON(status & GEN8_CTX_STATUS_PREEMPTED); - GEM_BUG_ON(port_isset(&port[1]) && - !(status & GEN8_CTX_STATUS_ELEMENT_SWITCH)); - GEM_BUG_ON(!port_isset(&port[1]) && - !(status & GEN8_CTX_STATUS_ACTIVE_IDLE)); + GEM_TRACE("%s completed ctx=%d\n", + engine->name, port->context_id); - /* - * We rely on the hardware being strongly - * ordered, that the breadcrumb write is - * coherent (visible from the CPU) before the - * user interrupt and CSB is processed. - */ - GEM_BUG_ON(!i915_request_completed(rq)); - - execlists_context_schedule_out(rq, - INTEL_CONTEXT_SCHEDULE_OUT); - i915_request_put(rq); - - GEM_TRACE("%s completed ctx=%d\n", - engine->name, port->context_id); - - port = execlists_port_complete(execlists, port); - if (port_isset(port)) - execlists_user_begin(execlists, port); - else - execlists_user_end(execlists); - } else { - port_set(port, port_pack(rq, count)); - } + port = execlists_port_complete(execlists, port); + if (port_isset(port)) + execlists_user_begin(execlists, port); + else + execlists_user_end(execlists); + } else { + port_set(port, port_pack(rq, count)); } + } while (head != tail); - if (head != execlists->csb_head) { - execlists->csb_head = head; - writel(_MASKED_FIELD(GEN8_CSB_READ_PTR_MASK, head << 8), - i915->regs + i915_mmio_reg_offset(RING_CONTEXT_STATUS_PTR(engine))); - } - } while (test_bit(ENGINE_IRQ_EXECLIST, &engine->irq_posted)); + execlists->csb_head = head; +} - if (unlikely(fw)) - intel_uncore_forcewake_put(i915, execlists->fw_domains); +static void __execlists_submission_tasklet(struct intel_engine_cs *const engine) +{ + lockdep_assert_held(&engine->timeline.lock); + + process_csb(engine); + if (!execlists_is_active(&engine->execlists, EXECLISTS_ACTIVE_PREEMPT)) + execlists_dequeue(engine); } /* @@ -1134,38 +1065,16 @@ static void process_csb(struct intel_engine_cs *engine) static void execlists_submission_tasklet(unsigned long data) { struct intel_engine_cs * const engine = (struct intel_engine_cs *)data; + unsigned long flags; - GEM_TRACE("%s awake?=%d, active=%x, irq-posted?=%d\n", + GEM_TRACE("%s awake?=%d, active=%x\n", engine->name, engine->i915->gt.awake, - engine->execlists.active, - test_bit(ENGINE_IRQ_EXECLIST, &engine->irq_posted)); - - /* - * We can skip acquiring intel_runtime_pm_get() here as it was taken - * on our behalf by the request (see i915_gem_mark_busy()) and it will - * not be relinquished until the device is idle (see - * i915_gem_idle_work_handler()). As a precaution, we make sure - * that all ELSP are drained i.e. we have processed the CSB, - * before allowing ourselves to idle and calling intel_runtime_pm_put(). - */ - GEM_BUG_ON(!engine->i915->gt.awake); - - /* - * Prefer doing test_and_clear_bit() as a two stage operation to avoid - * imposing the cost of a locked atomic transaction when submitting a - * new request (outside of the context-switch interrupt). - */ - if (test_bit(ENGINE_IRQ_EXECLIST, &engine->irq_posted)) - process_csb(engine); + engine->execlists.active); - if (!execlists_is_active(&engine->execlists, EXECLISTS_ACTIVE_PREEMPT)) - execlists_dequeue(engine); - - /* If the engine is now idle, so should be the flag; and vice versa. */ - GEM_BUG_ON(execlists_is_active(&engine->execlists, - EXECLISTS_ACTIVE_USER) == - !port_isset(engine->execlists.port)); + spin_lock_irqsave(&engine->timeline.lock, flags); + __execlists_submission_tasklet(engine); + spin_unlock_irqrestore(&engine->timeline.lock, flags); } static void queue_request(struct intel_engine_cs *engine, @@ -1176,16 +1085,30 @@ static void queue_request(struct intel_engine_cs *engine, &lookup_priolist(engine, prio)->requests); } -static void __submit_queue(struct intel_engine_cs *engine, int prio) +static void __update_queue(struct intel_engine_cs *engine, int prio) { engine->execlists.queue_priority = prio; - tasklet_hi_schedule(&engine->execlists.tasklet); +} + +static void __submit_queue_imm(struct intel_engine_cs *engine) +{ + struct intel_engine_execlists * const execlists = &engine->execlists; + + if (reset_in_progress(execlists)) + return; /* defer until we restart the engine following reset */ + + if (execlists->tasklet.func == execlists_submission_tasklet) + __execlists_submission_tasklet(engine); + else + tasklet_hi_schedule(&execlists->tasklet); } static void submit_queue(struct intel_engine_cs *engine, int prio) { - if (prio > engine->execlists.queue_priority) - __submit_queue(engine, prio); + if (prio > engine->execlists.queue_priority) { + __update_queue(engine, prio); + __submit_queue_imm(engine); + } } static void execlists_submit_request(struct i915_request *request) @@ -1197,11 +1120,12 @@ static void execlists_submit_request(struct i915_request *request) spin_lock_irqsave(&engine->timeline.lock, flags); queue_request(engine, &request->sched, rq_prio(request)); - submit_queue(engine, rq_prio(request)); - GEM_BUG_ON(!engine->execlists.first); + GEM_BUG_ON(RB_EMPTY_ROOT(&engine->execlists.queue.rb_root)); GEM_BUG_ON(list_empty(&request->sched.link)); + submit_queue(engine, rq_prio(request)); + spin_unlock_irqrestore(&engine->timeline.lock, flags); } @@ -1328,8 +1252,11 @@ static void execlists_schedule(struct i915_request *request, } if (prio > engine->execlists.queue_priority && - i915_sw_fence_done(&sched_to_request(node)->submit)) - __submit_queue(engine, prio); + i915_sw_fence_done(&sched_to_request(node)->submit)) { + /* defer submission until after all of our updates */ + __update_queue(engine, prio); + tasklet_hi_schedule(&engine->execlists.tasklet); + } } spin_unlock_irq(&engine->timeline.lock); @@ -1337,11 +1264,15 @@ static void execlists_schedule(struct i915_request *request, static void execlists_context_destroy(struct intel_context *ce) { - GEM_BUG_ON(!ce->state); GEM_BUG_ON(ce->pin_count); + if (!ce->state) + return; + intel_ring_free(ce->ring); - __i915_gem_object_release_unless_active(ce->state->obj); + + GEM_BUG_ON(i915_gem_object_is_active(ce->state->obj)); + i915_gem_object_put(ce->state->obj); } static void execlists_context_unpin(struct intel_context *ce) @@ -1851,7 +1782,6 @@ static int gen8_init_common_ring(struct intel_engine_cs *engine) return ret; intel_engine_reset_breadcrumbs(engine); - intel_engine_init_hangcheck(engine); if (GEM_SHOW_DEBUG() && unexpected_starting_state(engine)) { struct drm_printer p = drm_debug_printer(__func__); @@ -1906,6 +1836,7 @@ execlists_reset_prepare(struct intel_engine_cs *engine) { struct intel_engine_execlists * const execlists = &engine->execlists; struct i915_request *request, *active; + unsigned long flags; GEM_TRACE("%s\n", engine->name); @@ -1920,6 +1851,8 @@ execlists_reset_prepare(struct intel_engine_cs *engine) */ __tasklet_disable_sync_once(&execlists->tasklet); + spin_lock_irqsave(&engine->timeline.lock, flags); + /* * We want to flush the pending context switches, having disabled * the tasklet above, we can assume exclusive access to the execlists. @@ -1927,8 +1860,7 @@ execlists_reset_prepare(struct intel_engine_cs *engine) * and avoid blaming an innocent request if the stall was due to the * preemption itself. */ - if (test_bit(ENGINE_IRQ_EXECLIST, &engine->irq_posted)) - process_csb(engine); + process_csb(engine); /* * The last active request can then be no later than the last request @@ -1938,15 +1870,12 @@ execlists_reset_prepare(struct intel_engine_cs *engine) active = NULL; request = port_request(execlists->port); if (request) { - unsigned long flags; - /* * Prevent the breadcrumb from advancing before we decide * which request is currently active. */ intel_engine_stop_cs(engine); - spin_lock_irqsave(&engine->timeline.lock, flags); list_for_each_entry_from_reverse(request, &engine->timeline.requests, link) { @@ -1956,9 +1885,10 @@ execlists_reset_prepare(struct intel_engine_cs *engine) active = request; } - spin_unlock_irqrestore(&engine->timeline.lock, flags); } + spin_unlock_irqrestore(&engine->timeline.lock, flags); + return active; } @@ -1973,8 +1903,7 @@ static void execlists_reset(struct intel_engine_cs *engine, engine->name, request ? request->global_seqno : 0, intel_engine_get_seqno(engine)); - /* See execlists_cancel_requests() for the irq/spinlock split. */ - local_irq_save(flags); + spin_lock_irqsave(&engine->timeline.lock, flags); /* * Catch up with any missed context-switch interrupts. @@ -1986,17 +1915,14 @@ static void execlists_reset(struct intel_engine_cs *engine, * requests were completed. */ execlists_cancel_port_requests(execlists); - reset_irq(engine); /* Push back any incomplete requests for replay after the reset. */ - spin_lock(&engine->timeline.lock); __unwind_incomplete_requests(engine); - spin_unlock(&engine->timeline.lock); /* Following the reset, we need to reload the CSB read/write pointers */ - engine->execlists.csb_head = GEN8_CSB_ENTRIES - 1; + reset_csb_pointers(&engine->execlists); - local_irq_restore(flags); + spin_unlock_irqrestore(&engine->timeline.lock, flags); /* * If the request was innocent, we leave the request in the ELSP @@ -2046,7 +1972,7 @@ static void execlists_reset_finish(struct intel_engine_cs *engine) struct intel_engine_execlists * const execlists = &engine->execlists; /* After a GPU reset, we may have requests to replay */ - if (execlists->first) + if (!RB_EMPTY_ROOT(&execlists->queue.rb_root)) tasklet_schedule(&execlists->tasklet); /* @@ -2366,7 +2292,7 @@ void intel_logical_ring_cleanup(struct intel_engine_cs *engine) kfree(engine); } -static void execlists_set_default_submission(struct intel_engine_cs *engine) +void intel_execlists_set_default_submission(struct intel_engine_cs *engine) { engine->submit_request = execlists_submit_request; engine->cancel_requests = execlists_cancel_requests; @@ -2406,7 +2332,7 @@ logical_ring_default_vfuncs(struct intel_engine_cs *engine) engine->emit_breadcrumb = gen8_emit_breadcrumb; engine->emit_breadcrumb_sz = gen8_emit_breadcrumb_sz; - engine->set_default_submission = execlists_set_default_submission; + engine->set_default_submission = intel_execlists_set_default_submission; if (INTEL_GEN(engine->i915) < 11) { engine->irq_enable = gen8_logical_ring_enable_irq; @@ -2446,28 +2372,11 @@ logical_ring_default_irqs(struct intel_engine_cs *engine) static void logical_ring_setup(struct intel_engine_cs *engine) { - struct drm_i915_private *dev_priv = engine->i915; - enum forcewake_domains fw_domains; - intel_engine_setup_common(engine); /* Intentionally left blank. */ engine->buffer = NULL; - fw_domains = intel_uncore_forcewake_for_reg(dev_priv, - RING_ELSP(engine), - FW_REG_WRITE); - - fw_domains |= intel_uncore_forcewake_for_reg(dev_priv, - RING_CONTEXT_STATUS_PTR(engine), - FW_REG_READ | FW_REG_WRITE); - - fw_domains |= intel_uncore_forcewake_for_reg(dev_priv, - RING_CONTEXT_STATUS_BUF_BASE(engine), - FW_REG_READ); - - engine->execlists.fw_domains = fw_domains; - tasklet_init(&engine->execlists.tasklet, execlists_submission_tasklet, (unsigned long)engine); @@ -2475,34 +2384,60 @@ logical_ring_setup(struct intel_engine_cs *engine) logical_ring_default_irqs(engine); } +static bool csb_force_mmio(struct drm_i915_private *i915) +{ + /* Older GVT emulation depends upon intercepting CSB mmio */ + return intel_vgpu_active(i915) && !intel_vgpu_has_hwsp_emulation(i915); +} + static int logical_ring_init(struct intel_engine_cs *engine) { + struct drm_i915_private *i915 = engine->i915; + struct intel_engine_execlists * const execlists = &engine->execlists; int ret; ret = intel_engine_init_common(engine); if (ret) goto error; - if (HAS_LOGICAL_RING_ELSQ(engine->i915)) { - engine->execlists.submit_reg = engine->i915->regs + + if (HAS_LOGICAL_RING_ELSQ(i915)) { + execlists->submit_reg = i915->regs + i915_mmio_reg_offset(RING_EXECLIST_SQ_CONTENTS(engine)); - engine->execlists.ctrl_reg = engine->i915->regs + + execlists->ctrl_reg = i915->regs + i915_mmio_reg_offset(RING_EXECLIST_CONTROL(engine)); } else { - engine->execlists.submit_reg = engine->i915->regs + + execlists->submit_reg = i915->regs + i915_mmio_reg_offset(RING_ELSP(engine)); } - engine->execlists.preempt_complete_status = ~0u; - if (engine->i915->preempt_context) { + execlists->preempt_complete_status = ~0u; + if (i915->preempt_context) { struct intel_context *ce = - to_intel_context(engine->i915->preempt_context, engine); + to_intel_context(i915->preempt_context, engine); - engine->execlists.preempt_complete_status = + execlists->preempt_complete_status = upper_32_bits(ce->lrc_desc); } - engine->execlists.csb_head = GEN8_CSB_ENTRIES - 1; + execlists->csb_read = + i915->regs + i915_mmio_reg_offset(RING_CONTEXT_STATUS_PTR(engine)); + if (csb_force_mmio(i915)) { + execlists->csb_status = (u32 __force *) + (i915->regs + i915_mmio_reg_offset(RING_CONTEXT_STATUS_BUF_LO(engine, 0))); + + execlists->csb_write = (u32 __force *)execlists->csb_read; + execlists->csb_write_reset = + _MASKED_FIELD(GEN8_CSB_WRITE_PTR_MASK, + GEN8_CSB_ENTRIES - 1); + } else { + execlists->csb_status = + &engine->status_page.page_addr[I915_HWS_CSB_BUF0_INDEX]; + + execlists->csb_write = + &engine->status_page.page_addr[intel_hws_csb_write_index(i915)]; + execlists->csb_write_reset = GEN8_CSB_ENTRIES - 1; + } + reset_csb_pointers(execlists); return 0; diff --git a/drivers/gpu/drm/i915/intel_lrc.h b/drivers/gpu/drm/i915/intel_lrc.h index 1593194e930c..4dfb78e3ec7e 100644 --- a/drivers/gpu/drm/i915/intel_lrc.h +++ b/drivers/gpu/drm/i915/intel_lrc.h @@ -104,4 +104,6 @@ struct i915_gem_context; void intel_lr_context_resume(struct drm_i915_private *dev_priv); +void intel_execlists_set_default_submission(struct intel_engine_cs *engine); + #endif /* _INTEL_LRC_H_ */ diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c index bb06744d28a4..f9f3b0885ba5 100644 --- a/drivers/gpu/drm/i915/intel_lvds.c +++ b/drivers/gpu/drm/i915/intel_lvds.c @@ -44,8 +44,6 @@ /* Private structure for the integrated LVDS support */ struct intel_lvds_connector { struct intel_connector base; - - struct notifier_block lid_notifier; }; struct intel_lvds_pps { @@ -452,26 +450,9 @@ static bool intel_lvds_compute_config(struct intel_encoder *intel_encoder, return true; } -/* - * Detect the LVDS connection. - * - * Since LVDS doesn't have hotlug, we use the lid as a proxy. Open means - * connected and closed means disconnected. We also send hotplug events as - * needed, using lid status notification from the input layer. - */ static enum drm_connector_status intel_lvds_detect(struct drm_connector *connector, bool force) { - struct drm_i915_private *dev_priv = to_i915(connector->dev); - enum drm_connector_status status; - - DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n", - connector->base.id, connector->name); - - status = intel_panel_detect(dev_priv); - if (status != connector_status_unknown) - return status; - return connector_status_connected; } @@ -496,117 +477,6 @@ static int intel_lvds_get_modes(struct drm_connector *connector) return 1; } -static int intel_no_modeset_on_lid_dmi_callback(const struct dmi_system_id *id) -{ - DRM_INFO("Skipping forced modeset for %s\n", id->ident); - return 1; -} - -/* The GPU hangs up on these systems if modeset is performed on LID open */ -static const struct dmi_system_id intel_no_modeset_on_lid[] = { - { - .callback = intel_no_modeset_on_lid_dmi_callback, - .ident = "Toshiba Tecra A11", - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"), - DMI_MATCH(DMI_PRODUCT_NAME, "TECRA A11"), - }, - }, - - { } /* terminating entry */ -}; - -/* - * Lid events. Note the use of 'modeset': - * - we set it to MODESET_ON_LID_OPEN on lid close, - * and set it to MODESET_DONE on open - * - we use it as a "only once" bit (ie we ignore - * duplicate events where it was already properly set) - * - the suspend/resume paths will set it to - * MODESET_SUSPENDED and ignore the lid open event, - * because they restore the mode ("lid open"). - */ -static int intel_lid_notify(struct notifier_block *nb, unsigned long val, - void *unused) -{ - struct intel_lvds_connector *lvds_connector = - container_of(nb, struct intel_lvds_connector, lid_notifier); - struct drm_connector *connector = &lvds_connector->base.base; - struct drm_device *dev = connector->dev; - struct drm_i915_private *dev_priv = to_i915(dev); - - if (dev->switch_power_state != DRM_SWITCH_POWER_ON) - return NOTIFY_OK; - - mutex_lock(&dev_priv->modeset_restore_lock); - if (dev_priv->modeset_restore == MODESET_SUSPENDED) - goto exit; - /* - * check and update the status of LVDS connector after receiving - * the LID nofication event. - */ - connector->status = connector->funcs->detect(connector, false); - - /* Don't force modeset on machines where it causes a GPU lockup */ - if (dmi_check_system(intel_no_modeset_on_lid)) - goto exit; - if (!acpi_lid_open()) { - /* do modeset on next lid open event */ - dev_priv->modeset_restore = MODESET_ON_LID_OPEN; - goto exit; - } - - if (dev_priv->modeset_restore == MODESET_DONE) - goto exit; - - /* - * Some old platform's BIOS love to wreak havoc while the lid is closed. - * We try to detect this here and undo any damage. The split for PCH - * platforms is rather conservative and a bit arbitrary expect that on - * those platforms VGA disabling requires actual legacy VGA I/O access, - * and as part of the cleanup in the hw state restore we also redisable - * the vga plane. - */ - if (!HAS_PCH_SPLIT(dev_priv)) - intel_display_resume(dev); - - dev_priv->modeset_restore = MODESET_DONE; - -exit: - mutex_unlock(&dev_priv->modeset_restore_lock); - return NOTIFY_OK; -} - -static int -intel_lvds_connector_register(struct drm_connector *connector) -{ - struct intel_lvds_connector *lvds = to_lvds_connector(connector); - int ret; - - ret = intel_connector_register(connector); - if (ret) - return ret; - - lvds->lid_notifier.notifier_call = intel_lid_notify; - if (acpi_lid_notifier_register(&lvds->lid_notifier)) { - DRM_DEBUG_KMS("lid notifier registration failed\n"); - lvds->lid_notifier.notifier_call = NULL; - } - - return 0; -} - -static void -intel_lvds_connector_unregister(struct drm_connector *connector) -{ - struct intel_lvds_connector *lvds = to_lvds_connector(connector); - - if (lvds->lid_notifier.notifier_call) - acpi_lid_notifier_unregister(&lvds->lid_notifier); - - intel_connector_unregister(connector); -} - /** * intel_lvds_destroy - unregister and free LVDS structures * @connector: connector to free @@ -639,8 +509,8 @@ static const struct drm_connector_funcs intel_lvds_connector_funcs = { .fill_modes = drm_helper_probe_single_connector_modes, .atomic_get_property = intel_digital_connector_atomic_get_property, .atomic_set_property = intel_digital_connector_atomic_set_property, - .late_register = intel_lvds_connector_register, - .early_unregister = intel_lvds_connector_unregister, + .late_register = intel_connector_register, + .early_unregister = intel_connector_unregister, .destroy = intel_lvds_destroy, .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, .atomic_duplicate_state = intel_digital_connector_duplicate_state, @@ -1114,8 +984,6 @@ void intel_lvds_init(struct drm_i915_private *dev_priv) * 2) check for VBT data * 3) check to see if LVDS is already on * if none of the above, no panel - * 4) make sure lid is open - * if closed, act like it's not there for now */ /* @@ -1131,7 +999,7 @@ void intel_lvds_init(struct drm_i915_private *dev_priv) intel_gmbus_get_adapter(dev_priv, pin)); if (edid) { if (drm_add_edid_modes(connector, edid)) { - drm_mode_connector_update_edid_property(connector, + drm_connector_update_edid_property(connector, edid); } else { kfree(edid); diff --git a/drivers/gpu/drm/i915/intel_modes.c b/drivers/gpu/drm/i915/intel_modes.c index b39846613e3c..ca44bf368e24 100644 --- a/drivers/gpu/drm/i915/intel_modes.c +++ b/drivers/gpu/drm/i915/intel_modes.c @@ -40,7 +40,7 @@ int intel_connector_update_modes(struct drm_connector *connector, { int ret; - drm_mode_connector_update_edid_property(connector, edid); + drm_connector_update_edid_property(connector, edid); ret = drm_add_edid_modes(connector, edid); return ret; diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c index 14b827ec5427..4a9f139e7b73 100644 --- a/drivers/gpu/drm/i915/intel_panel.c +++ b/drivers/gpu/drm/i915/intel_panel.c @@ -375,26 +375,6 @@ out: pipe_config->gmch_pfit.lvds_border_bits = border; } -enum drm_connector_status -intel_panel_detect(struct drm_i915_private *dev_priv) -{ - /* Assume that the BIOS does not lie through the OpRegion... */ - if (!i915_modparams.panel_ignore_lid && dev_priv->opregion.lid_state) { - return *dev_priv->opregion.lid_state & 0x1 ? - connector_status_connected : - connector_status_disconnected; - } - - switch (i915_modparams.panel_ignore_lid) { - case -2: - return connector_status_connected; - case -1: - return connector_status_disconnected; - default: - return connector_status_unknown; - } -} - /** * scale - scale values from one range to another * @source_val: value in range [@source_min..@source_max] diff --git a/drivers/gpu/drm/i915/intel_pipe_crc.c b/drivers/gpu/drm/i915/intel_pipe_crc.c index 39a4e4edda07..849e1b69ba73 100644 --- a/drivers/gpu/drm/i915/intel_pipe_crc.c +++ b/drivers/gpu/drm/i915/intel_pipe_crc.c @@ -30,160 +30,6 @@ #include <linux/debugfs.h> #include "intel_drv.h" -struct pipe_crc_info { - const char *name; - struct drm_i915_private *dev_priv; - enum pipe pipe; -}; - -static int i915_pipe_crc_open(struct inode *inode, struct file *filep) -{ - struct pipe_crc_info *info = inode->i_private; - struct drm_i915_private *dev_priv = info->dev_priv; - struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[info->pipe]; - - if (info->pipe >= INTEL_INFO(dev_priv)->num_pipes) - return -ENODEV; - - spin_lock_irq(&pipe_crc->lock); - - if (pipe_crc->opened) { - spin_unlock_irq(&pipe_crc->lock); - return -EBUSY; /* already open */ - } - - pipe_crc->opened = true; - filep->private_data = inode->i_private; - - spin_unlock_irq(&pipe_crc->lock); - - return 0; -} - -static int i915_pipe_crc_release(struct inode *inode, struct file *filep) -{ - struct pipe_crc_info *info = inode->i_private; - struct drm_i915_private *dev_priv = info->dev_priv; - struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[info->pipe]; - - spin_lock_irq(&pipe_crc->lock); - pipe_crc->opened = false; - spin_unlock_irq(&pipe_crc->lock); - - return 0; -} - -/* (6 fields, 8 chars each, space separated (5) + '\n') */ -#define PIPE_CRC_LINE_LEN (6 * 8 + 5 + 1) -/* account for \'0' */ -#define PIPE_CRC_BUFFER_LEN (PIPE_CRC_LINE_LEN + 1) - -static int pipe_crc_data_count(struct intel_pipe_crc *pipe_crc) -{ - lockdep_assert_held(&pipe_crc->lock); - return CIRC_CNT(pipe_crc->head, pipe_crc->tail, - INTEL_PIPE_CRC_ENTRIES_NR); -} - -static ssize_t -i915_pipe_crc_read(struct file *filep, char __user *user_buf, size_t count, - loff_t *pos) -{ - struct pipe_crc_info *info = filep->private_data; - struct drm_i915_private *dev_priv = info->dev_priv; - struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[info->pipe]; - char buf[PIPE_CRC_BUFFER_LEN]; - int n_entries; - ssize_t bytes_read; - - /* - * Don't allow user space to provide buffers not big enough to hold - * a line of data. - */ - if (count < PIPE_CRC_LINE_LEN) - return -EINVAL; - - if (pipe_crc->source == INTEL_PIPE_CRC_SOURCE_NONE) - return 0; - - /* nothing to read */ - spin_lock_irq(&pipe_crc->lock); - while (pipe_crc_data_count(pipe_crc) == 0) { - int ret; - - if (filep->f_flags & O_NONBLOCK) { - spin_unlock_irq(&pipe_crc->lock); - return -EAGAIN; - } - - ret = wait_event_interruptible_lock_irq(pipe_crc->wq, - pipe_crc_data_count(pipe_crc), pipe_crc->lock); - if (ret) { - spin_unlock_irq(&pipe_crc->lock); - return ret; - } - } - - /* We now have one or more entries to read */ - n_entries = count / PIPE_CRC_LINE_LEN; - - bytes_read = 0; - while (n_entries > 0) { - struct intel_pipe_crc_entry *entry = - &pipe_crc->entries[pipe_crc->tail]; - - if (CIRC_CNT(pipe_crc->head, pipe_crc->tail, - INTEL_PIPE_CRC_ENTRIES_NR) < 1) - break; - - BUILD_BUG_ON_NOT_POWER_OF_2(INTEL_PIPE_CRC_ENTRIES_NR); - pipe_crc->tail = (pipe_crc->tail + 1) & - (INTEL_PIPE_CRC_ENTRIES_NR - 1); - - bytes_read += snprintf(buf, PIPE_CRC_BUFFER_LEN, - "%8u %8x %8x %8x %8x %8x\n", - entry->frame, entry->crc[0], - entry->crc[1], entry->crc[2], - entry->crc[3], entry->crc[4]); - - spin_unlock_irq(&pipe_crc->lock); - - if (copy_to_user(user_buf, buf, PIPE_CRC_LINE_LEN)) - return -EFAULT; - - user_buf += PIPE_CRC_LINE_LEN; - n_entries--; - - spin_lock_irq(&pipe_crc->lock); - } - - spin_unlock_irq(&pipe_crc->lock); - - return bytes_read; -} - -static const struct file_operations i915_pipe_crc_fops = { - .owner = THIS_MODULE, - .open = i915_pipe_crc_open, - .read = i915_pipe_crc_read, - .release = i915_pipe_crc_release, -}; - -static struct pipe_crc_info i915_pipe_crc_data[I915_MAX_PIPES] = { - { - .name = "i915_pipe_A_crc", - .pipe = PIPE_A, - }, - { - .name = "i915_pipe_B_crc", - .pipe = PIPE_B, - }, - { - .name = "i915_pipe_C_crc", - .pipe = PIPE_C, - }, -}; - static const char * const pipe_crc_sources[] = { "none", "plane1", @@ -197,29 +43,6 @@ static const char * const pipe_crc_sources[] = { "auto", }; -static const char *pipe_crc_source_name(enum intel_pipe_crc_source source) -{ - BUILD_BUG_ON(ARRAY_SIZE(pipe_crc_sources) != INTEL_PIPE_CRC_SOURCE_MAX); - return pipe_crc_sources[source]; -} - -static int display_crc_ctl_show(struct seq_file *m, void *data) -{ - struct drm_i915_private *dev_priv = m->private; - enum pipe pipe; - - for_each_pipe(dev_priv, pipe) - seq_printf(m, "%c %s\n", pipe_name(pipe), - pipe_crc_source_name(dev_priv->pipe_crc[pipe].source)); - - return 0; -} - -static int display_crc_ctl_open(struct inode *inode, struct file *file) -{ - return single_open(file, display_crc_ctl_show, inode->i_private); -} - static int i8xx_pipe_crc_ctl_reg(enum intel_pipe_crc_source *source, uint32_t *val) { @@ -616,177 +439,6 @@ static int get_new_crc_ctl_reg(struct drm_i915_private *dev_priv, return ivb_pipe_crc_ctl_reg(dev_priv, pipe, source, val, set_wa); } -static int pipe_crc_set_source(struct drm_i915_private *dev_priv, - enum pipe pipe, - enum intel_pipe_crc_source source) -{ - struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe]; - enum intel_display_power_domain power_domain; - u32 val = 0; /* shut up gcc */ - int ret; - - if (pipe_crc->source == source) - return 0; - - /* forbid changing the source without going back to 'none' */ - if (pipe_crc->source && source) - return -EINVAL; - - power_domain = POWER_DOMAIN_PIPE(pipe); - if (!intel_display_power_get_if_enabled(dev_priv, power_domain)) { - DRM_DEBUG_KMS("Trying to capture CRC while pipe is off\n"); - return -EIO; - } - - ret = get_new_crc_ctl_reg(dev_priv, pipe, &source, &val, true); - if (ret != 0) - goto out; - - /* none -> real source transition */ - if (source) { - struct intel_pipe_crc_entry *entries; - - DRM_DEBUG_DRIVER("collecting CRCs for pipe %c, %s\n", - pipe_name(pipe), pipe_crc_source_name(source)); - - entries = kcalloc(INTEL_PIPE_CRC_ENTRIES_NR, - sizeof(pipe_crc->entries[0]), - GFP_KERNEL); - if (!entries) { - ret = -ENOMEM; - goto out; - } - - spin_lock_irq(&pipe_crc->lock); - kfree(pipe_crc->entries); - pipe_crc->entries = entries; - pipe_crc->head = 0; - pipe_crc->tail = 0; - spin_unlock_irq(&pipe_crc->lock); - } - - pipe_crc->source = source; - - I915_WRITE(PIPE_CRC_CTL(pipe), val); - POSTING_READ(PIPE_CRC_CTL(pipe)); - - /* real source -> none transition */ - if (!source) { - struct intel_pipe_crc_entry *entries; - struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, - pipe); - - DRM_DEBUG_DRIVER("stopping CRCs for pipe %c\n", - pipe_name(pipe)); - - drm_modeset_lock(&crtc->base.mutex, NULL); - if (crtc->base.state->active) - intel_wait_for_vblank(dev_priv, pipe); - drm_modeset_unlock(&crtc->base.mutex); - - spin_lock_irq(&pipe_crc->lock); - entries = pipe_crc->entries; - pipe_crc->entries = NULL; - pipe_crc->head = 0; - pipe_crc->tail = 0; - spin_unlock_irq(&pipe_crc->lock); - - kfree(entries); - - if (IS_G4X(dev_priv)) - g4x_undo_pipe_scramble_reset(dev_priv, pipe); - else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) - vlv_undo_pipe_scramble_reset(dev_priv, pipe); - else if ((IS_HASWELL(dev_priv) || - IS_BROADWELL(dev_priv)) && pipe == PIPE_A) - hsw_pipe_A_crc_wa(dev_priv, false); - } - - ret = 0; - -out: - intel_display_power_put(dev_priv, power_domain); - - return ret; -} - -/* - * Parse pipe CRC command strings: - * command: wsp* object wsp+ name wsp+ source wsp* - * object: 'pipe' - * name: (A | B | C) - * source: (none | plane1 | plane2 | pf) - * wsp: (#0x20 | #0x9 | #0xA)+ - * - * eg.: - * "pipe A plane1" -> Start CRC computations on plane1 of pipe A - * "pipe A none" -> Stop CRC - */ -static int display_crc_ctl_tokenize(char *buf, char *words[], int max_words) -{ - int n_words = 0; - - while (*buf) { - char *end; - - /* skip leading white space */ - buf = skip_spaces(buf); - if (!*buf) - break; /* end of buffer */ - - /* find end of word */ - for (end = buf; *end && !isspace(*end); end++) - ; - - if (n_words == max_words) { - DRM_DEBUG_DRIVER("too many words, allowed <= %d\n", - max_words); - return -EINVAL; /* ran out of words[] before bytes */ - } - - if (*end) - *end++ = '\0'; - words[n_words++] = buf; - buf = end; - } - - return n_words; -} - -enum intel_pipe_crc_object { - PIPE_CRC_OBJECT_PIPE, -}; - -static const char * const pipe_crc_objects[] = { - "pipe", -}; - -static int -display_crc_ctl_parse_object(const char *buf, enum intel_pipe_crc_object *o) -{ - int i; - - i = match_string(pipe_crc_objects, ARRAY_SIZE(pipe_crc_objects), buf); - if (i < 0) - return i; - - *o = i; - return 0; -} - -static int display_crc_ctl_parse_pipe(struct drm_i915_private *dev_priv, - const char *buf, enum pipe *pipe) -{ - const char name = buf[0]; - - if (name < 'A' || name >= pipe_name(INTEL_INFO(dev_priv)->num_pipes)) - return -EINVAL; - - *pipe = name - 'A'; - - return 0; -} - static int display_crc_ctl_parse_source(const char *buf, enum intel_pipe_crc_source *s) { @@ -805,81 +457,6 @@ display_crc_ctl_parse_source(const char *buf, enum intel_pipe_crc_source *s) return 0; } -static int display_crc_ctl_parse(struct drm_i915_private *dev_priv, - char *buf, size_t len) -{ -#define N_WORDS 3 - int n_words; - char *words[N_WORDS]; - enum pipe pipe; - enum intel_pipe_crc_object object; - enum intel_pipe_crc_source source; - - n_words = display_crc_ctl_tokenize(buf, words, N_WORDS); - if (n_words != N_WORDS) { - DRM_DEBUG_DRIVER("tokenize failed, a command is %d words\n", - N_WORDS); - return -EINVAL; - } - - if (display_crc_ctl_parse_object(words[0], &object) < 0) { - DRM_DEBUG_DRIVER("unknown object %s\n", words[0]); - return -EINVAL; - } - - if (display_crc_ctl_parse_pipe(dev_priv, words[1], &pipe) < 0) { - DRM_DEBUG_DRIVER("unknown pipe %s\n", words[1]); - return -EINVAL; - } - - if (display_crc_ctl_parse_source(words[2], &source) < 0) { - DRM_DEBUG_DRIVER("unknown source %s\n", words[2]); - return -EINVAL; - } - - return pipe_crc_set_source(dev_priv, pipe, source); -} - -static ssize_t display_crc_ctl_write(struct file *file, const char __user *ubuf, - size_t len, loff_t *offp) -{ - struct seq_file *m = file->private_data; - struct drm_i915_private *dev_priv = m->private; - char *tmpbuf; - int ret; - - if (len == 0) - return 0; - - if (len > PAGE_SIZE - 1) { - DRM_DEBUG_DRIVER("expected <%lu bytes into pipe crc control\n", - PAGE_SIZE); - return -E2BIG; - } - - tmpbuf = memdup_user_nul(ubuf, len); - if (IS_ERR(tmpbuf)) - return PTR_ERR(tmpbuf); - - ret = display_crc_ctl_parse(dev_priv, tmpbuf, len); - - kfree(tmpbuf); - if (ret < 0) - return ret; - - *offp += len; - return len; -} - -const struct file_operations i915_display_crc_ctl_fops = { - .owner = THIS_MODULE, - .open = display_crc_ctl_open, - .read = seq_read, - .llseek = seq_lseek, - .release = single_release, - .write = display_crc_ctl_write -}; - void intel_display_crc_init(struct drm_i915_private *dev_priv) { enum pipe pipe; @@ -887,30 +464,8 @@ void intel_display_crc_init(struct drm_i915_private *dev_priv) for_each_pipe(dev_priv, pipe) { struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe]; - pipe_crc->opened = false; spin_lock_init(&pipe_crc->lock); - init_waitqueue_head(&pipe_crc->wq); - } -} - -int intel_pipe_crc_create(struct drm_minor *minor) -{ - struct drm_i915_private *dev_priv = to_i915(minor->dev); - struct dentry *ent; - int i; - - for (i = 0; i < ARRAY_SIZE(i915_pipe_crc_data); i++) { - struct pipe_crc_info *info = &i915_pipe_crc_data[i]; - - info->dev_priv = dev_priv; - ent = debugfs_create_file(info->name, S_IRUGO, - minor->debugfs_root, info, - &i915_pipe_crc_fops); - if (!ent) - return -ENOMEM; } - - return 0; } int intel_crtc_set_crc_source(struct drm_crtc *crtc, const char *source_name, diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index 53aaaa3e6886..7312ecb73415 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c @@ -7347,11 +7347,11 @@ out: static void valleyview_cleanup_pctx(struct drm_i915_private *dev_priv) { - if (WARN_ON(!dev_priv->vlv_pctx)) - return; + struct drm_i915_gem_object *pctx; - i915_gem_object_put(dev_priv->vlv_pctx); - dev_priv->vlv_pctx = NULL; + pctx = fetch_and_zero(&dev_priv->vlv_pctx); + if (pctx) + i915_gem_object_put(pctx); } static void vlv_init_gpll_ref_freq(struct drm_i915_private *dev_priv) diff --git a/drivers/gpu/drm/i915/intel_psr.c b/drivers/gpu/drm/i915/intel_psr.c index d4cd19fea148..4bd5768731ee 100644 --- a/drivers/gpu/drm/i915/intel_psr.c +++ b/drivers/gpu/drm/i915/intel_psr.c @@ -56,43 +56,6 @@ #include "intel_drv.h" #include "i915_drv.h" -static inline enum intel_display_power_domain -psr_aux_domain(struct intel_dp *intel_dp) -{ - /* CNL HW requires corresponding AUX IOs to be powered up for PSR. - * However, for non-A AUX ports the corresponding non-EDP transcoders - * would have already enabled power well 2 and DC_OFF. This means we can - * acquire a wider POWER_DOMAIN_AUX_{B,C,D,F} reference instead of a - * specific AUX_IO reference without powering up any extra wells. - * Note that PSR is enabled only on Port A even though this function - * returns the correct domain for other ports too. - */ - return intel_dp->aux_ch == AUX_CH_A ? POWER_DOMAIN_AUX_IO_A : - intel_dp->aux_power_domain; -} - -static void psr_aux_io_power_get(struct intel_dp *intel_dp) -{ - struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); - struct drm_i915_private *dev_priv = to_i915(intel_dig_port->base.base.dev); - - if (INTEL_GEN(dev_priv) < 10) - return; - - intel_display_power_get(dev_priv, psr_aux_domain(intel_dp)); -} - -static void psr_aux_io_power_put(struct intel_dp *intel_dp) -{ - struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); - struct drm_i915_private *dev_priv = to_i915(intel_dig_port->base.base.dev); - - if (INTEL_GEN(dev_priv) < 10) - return; - - intel_display_power_put(dev_priv, psr_aux_domain(intel_dp)); -} - void intel_psr_irq_control(struct drm_i915_private *dev_priv, bool debug) { u32 debug_mask, mask; @@ -278,8 +241,8 @@ void intel_psr_init_dpcd(struct intel_dp *intel_dp) } } -static void hsw_psr_setup_vsc(struct intel_dp *intel_dp, - const struct intel_crtc_state *crtc_state) +static void intel_psr_setup_vsc(struct intel_dp *intel_dp, + const struct intel_crtc_state *crtc_state) { struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); struct drm_i915_private *dev_priv = to_i915(intel_dig_port->base.base.dev); @@ -336,7 +299,7 @@ static void hsw_psr_setup_aux(struct intel_dp *intel_dp) aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, 0); /* Start with bits set for DDI_AUX_CTL register */ - aux_ctl = intel_dp->get_aux_send_ctl(intel_dp, 0, sizeof(aux_msg), + aux_ctl = intel_dp->get_aux_send_ctl(intel_dp, sizeof(aux_msg), aux_clock_divider); /* Select only valid bits for SRD_AUX_CTL */ @@ -344,7 +307,7 @@ static void hsw_psr_setup_aux(struct intel_dp *intel_dp) I915_WRITE(EDP_PSR_AUX_CTL, aux_ctl); } -static void hsw_psr_enable_sink(struct intel_dp *intel_dp) +static void intel_psr_enable_sink(struct intel_dp *intel_dp) { struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); struct drm_device *dev = dig_port->base.base.dev; @@ -360,6 +323,8 @@ static void hsw_psr_enable_sink(struct intel_dp *intel_dp) if (dev_priv->psr.link_standby) dpcd_val |= DP_PSR_MAIN_LINK_ACTIVE; + if (!dev_priv->psr.psr2_enabled && INTEL_GEN(dev_priv) >= 8) + dpcd_val |= DP_PSR_CRC_VERIFICATION; drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG, dpcd_val); drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER, DP_SET_POWER_D0); @@ -415,6 +380,9 @@ static void hsw_activate_psr1(struct intel_dp *intel_dp) else val |= EDP_PSR_TP1_TP2_SEL; + if (INTEL_GEN(dev_priv) >= 8) + val |= EDP_PSR_CRC_ENABLE; + val |= I915_READ(EDP_PSR_CTL) & EDP_PSR_RESTORE_PSR_ACTIVE_CTX_MASK; I915_WRITE(EDP_PSR_CTL, val); } @@ -456,24 +424,6 @@ static void hsw_activate_psr2(struct intel_dp *intel_dp) I915_WRITE(EDP_PSR2_CTL, val); } -static void hsw_psr_activate(struct intel_dp *intel_dp) -{ - struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); - struct drm_device *dev = dig_port->base.base.dev; - struct drm_i915_private *dev_priv = to_i915(dev); - - /* On HSW+ after we enable PSR on source it will activate it - * as soon as it match configure idle_frame count. So - * we just actually enable it here on activation time. - */ - - /* psr1 and psr2 are mutually exclusive.*/ - if (dev_priv->psr.psr2_enabled) - hsw_activate_psr2(intel_dp); - else - hsw_activate_psr1(intel_dp); -} - static bool intel_psr2_config_valid(struct intel_dp *intel_dp, struct intel_crtc_state *crtc_state) { @@ -576,27 +526,29 @@ static void intel_psr_activate(struct intel_dp *intel_dp) struct drm_device *dev = intel_dig_port->base.base.dev; struct drm_i915_private *dev_priv = to_i915(dev); - if (dev_priv->psr.psr2_enabled) + if (INTEL_GEN(dev_priv) >= 9) WARN_ON(I915_READ(EDP_PSR2_CTL) & EDP_PSR2_ENABLE); - else - WARN_ON(I915_READ(EDP_PSR_CTL) & EDP_PSR_ENABLE); + WARN_ON(I915_READ(EDP_PSR_CTL) & EDP_PSR_ENABLE); WARN_ON(dev_priv->psr.active); lockdep_assert_held(&dev_priv->psr.lock); - dev_priv->psr.activate(intel_dp); + /* psr1 and psr2 are mutually exclusive.*/ + if (dev_priv->psr.psr2_enabled) + hsw_activate_psr2(intel_dp); + else + hsw_activate_psr1(intel_dp); + dev_priv->psr.active = true; } -static void hsw_psr_enable_source(struct intel_dp *intel_dp, - const struct intel_crtc_state *crtc_state) +static void intel_psr_enable_source(struct intel_dp *intel_dp, + const struct intel_crtc_state *crtc_state) { struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); struct drm_device *dev = dig_port->base.base.dev; struct drm_i915_private *dev_priv = to_i915(dev); enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; - psr_aux_io_power_get(intel_dp); - /* Only HSW and BDW have PSR AUX registers that need to be setup. SKL+ * use hardcoded values PSR AUX transactions */ @@ -632,7 +584,8 @@ static void hsw_psr_enable_source(struct intel_dp *intel_dp, EDP_PSR_DEBUG_MASK_MEMUP | EDP_PSR_DEBUG_MASK_HPD | EDP_PSR_DEBUG_MASK_LPSP | - EDP_PSR_DEBUG_MASK_DISP_REG_WRITE); + EDP_PSR_DEBUG_MASK_DISP_REG_WRITE | + EDP_PSR_DEBUG_MASK_MAX_SLEEP); } } @@ -666,9 +619,9 @@ void intel_psr_enable(struct intel_dp *intel_dp, dev_priv->psr.psr2_enabled = crtc_state->has_psr2; dev_priv->psr.busy_frontbuffer_bits = 0; - dev_priv->psr.setup_vsc(intel_dp, crtc_state); - dev_priv->psr.enable_sink(intel_dp); - dev_priv->psr.enable_source(intel_dp, crtc_state); + intel_psr_setup_vsc(intel_dp, crtc_state); + intel_psr_enable_sink(intel_dp); + intel_psr_enable_source(intel_dp, crtc_state); dev_priv->psr.enabled = intel_dp; intel_psr_activate(intel_dp); @@ -677,8 +630,8 @@ unlock: mutex_unlock(&dev_priv->psr.lock); } -static void hsw_psr_disable(struct intel_dp *intel_dp, - const struct intel_crtc_state *old_crtc_state) +static void +intel_psr_disable_source(struct intel_dp *intel_dp) { struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); struct drm_device *dev = intel_dig_port->base.base.dev; @@ -717,8 +670,25 @@ static void hsw_psr_disable(struct intel_dp *intel_dp, else WARN_ON(I915_READ(EDP_PSR_CTL) & EDP_PSR_ENABLE); } +} + +static void intel_psr_disable_locked(struct intel_dp *intel_dp) +{ + struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); + struct drm_device *dev = intel_dig_port->base.base.dev; + struct drm_i915_private *dev_priv = to_i915(dev); + + lockdep_assert_held(&dev_priv->psr.lock); - psr_aux_io_power_put(intel_dp); + if (!dev_priv->psr.enabled) + return; + + intel_psr_disable_source(intel_dp); + + /* Disable PSR on Sink */ + drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG, 0); + + dev_priv->psr.enabled = NULL; } /** @@ -742,22 +712,49 @@ void intel_psr_disable(struct intel_dp *intel_dp, return; mutex_lock(&dev_priv->psr.lock); - if (!dev_priv->psr.enabled) { - mutex_unlock(&dev_priv->psr.lock); - return; - } + intel_psr_disable_locked(intel_dp); + mutex_unlock(&dev_priv->psr.lock); + cancel_work_sync(&dev_priv->psr.work); +} - dev_priv->psr.disable_source(intel_dp, old_crtc_state); +int intel_psr_wait_for_idle(const struct intel_crtc_state *new_crtc_state) +{ + struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->base.crtc); + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + i915_reg_t reg; + u32 mask; - /* Disable PSR on Sink */ - drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG, 0); + if (!new_crtc_state->has_psr) + return 0; - dev_priv->psr.enabled = NULL; - mutex_unlock(&dev_priv->psr.lock); - cancel_work_sync(&dev_priv->psr.work); + /* + * The sole user right now is intel_pipe_update_start(), + * which won't race with psr_enable/disable, which is + * where psr2_enabled is written to. So, we don't need + * to acquire the psr.lock. More importantly, we want the + * latency inside intel_pipe_update_start() to be as low + * as possible, so no need to acquire psr.lock when it is + * not needed and will induce latencies in the atomic + * update path. + */ + if (dev_priv->psr.psr2_enabled) { + reg = EDP_PSR2_STATUS; + mask = EDP_PSR2_STATUS_STATE_MASK; + } else { + reg = EDP_PSR_STATUS; + mask = EDP_PSR_STATUS_STATE_MASK; + } + + /* + * Max time for PSR to idle = Inverse of the refresh rate + + * 6 ms of exit training time + 1.5 ms of aux channel + * handshake. 50 msec is defesive enough to cover everything. + */ + return intel_wait_for_register(dev_priv, reg, mask, + EDP_PSR_STATUS_STATE_IDLE, 50); } -static bool psr_wait_for_idle(struct drm_i915_private *dev_priv) +static bool __psr_wait_for_idle_locked(struct drm_i915_private *dev_priv) { struct intel_dp *intel_dp; i915_reg_t reg; @@ -803,7 +800,7 @@ static void intel_psr_work(struct work_struct *work) * PSR might take some time to get fully disabled * and be ready for re-enable. */ - if (!psr_wait_for_idle(dev_priv)) + if (!__psr_wait_for_idle_locked(dev_priv)) goto unlock; /* @@ -811,7 +808,7 @@ static void intel_psr_work(struct work_struct *work) * recheck. Since psr_flush first clears this and then reschedules we * won't ever miss a flush when bailing out here. */ - if (dev_priv->psr.busy_frontbuffer_bits) + if (dev_priv->psr.busy_frontbuffer_bits || dev_priv->psr.active) goto unlock; intel_psr_activate(dev_priv->psr.enabled); @@ -974,23 +971,60 @@ void intel_psr_init(struct drm_i915_private *dev_priv) /* For new platforms let's respect VBT back again */ dev_priv->psr.link_standby = dev_priv->vbt.psr.full_link; - /* Override link_standby x link_off defaults */ - if (i915_modparams.enable_psr == 2 && !dev_priv->psr.link_standby) { - DRM_DEBUG_KMS("PSR: Forcing link standby\n"); - dev_priv->psr.link_standby = true; - } - if (i915_modparams.enable_psr == 3 && dev_priv->psr.link_standby) { - DRM_DEBUG_KMS("PSR: Forcing main link off\n"); - dev_priv->psr.link_standby = false; - } - INIT_WORK(&dev_priv->psr.work, intel_psr_work); mutex_init(&dev_priv->psr.lock); +} - dev_priv->psr.enable_source = hsw_psr_enable_source; - dev_priv->psr.disable_source = hsw_psr_disable; - dev_priv->psr.enable_sink = hsw_psr_enable_sink; - dev_priv->psr.activate = hsw_psr_activate; - dev_priv->psr.setup_vsc = hsw_psr_setup_vsc; +void intel_psr_short_pulse(struct intel_dp *intel_dp) +{ + struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); + struct drm_device *dev = intel_dig_port->base.base.dev; + struct drm_i915_private *dev_priv = to_i915(dev); + struct i915_psr *psr = &dev_priv->psr; + u8 val; + const u8 errors = DP_PSR_RFB_STORAGE_ERROR | + DP_PSR_VSC_SDP_UNCORRECTABLE_ERROR | + DP_PSR_LINK_CRC_ERROR; + + if (!CAN_PSR(dev_priv) || !intel_dp_is_edp(intel_dp)) + return; + + mutex_lock(&psr->lock); + + if (psr->enabled != intel_dp) + goto exit; + + if (drm_dp_dpcd_readb(&intel_dp->aux, DP_PSR_STATUS, &val) != 1) { + DRM_ERROR("PSR_STATUS dpcd read failed\n"); + goto exit; + } + + if ((val & DP_PSR_SINK_STATE_MASK) == DP_PSR_SINK_INTERNAL_ERROR) { + DRM_DEBUG_KMS("PSR sink internal error, disabling PSR\n"); + intel_psr_disable_locked(intel_dp); + } + + if (drm_dp_dpcd_readb(&intel_dp->aux, DP_PSR_ERROR_STATUS, &val) != 1) { + DRM_ERROR("PSR_ERROR_STATUS dpcd read failed\n"); + goto exit; + } + if (val & DP_PSR_RFB_STORAGE_ERROR) + DRM_DEBUG_KMS("PSR RFB storage error, disabling PSR\n"); + if (val & DP_PSR_VSC_SDP_UNCORRECTABLE_ERROR) + DRM_DEBUG_KMS("PSR VSC SDP uncorrectable error, disabling PSR\n"); + if (val & DP_PSR_LINK_CRC_ERROR) + DRM_ERROR("PSR Link CRC error, disabling PSR\n"); + + if (val & ~errors) + DRM_ERROR("PSR_ERROR_STATUS unhandled errors %x\n", + val & ~errors); + if (val & errors) + intel_psr_disable_locked(intel_dp); + /* clear status register */ + drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_ERROR_STATUS, val); + + /* TODO: handle PSR2 errors */ +exit: + mutex_unlock(&psr->lock); } diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index e0448eff12bd..33faad3197fe 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c @@ -524,8 +524,6 @@ static int init_ring_common(struct intel_engine_cs *engine) goto out; } - intel_engine_init_hangcheck(engine); - if (INTEL_GEN(dev_priv) > 2) I915_WRITE_MODE(engine, _MASKED_BIT_DISABLE(STOP_RING)); @@ -1089,6 +1087,7 @@ void intel_ring_unpin(struct intel_ring *ring) static struct i915_vma * intel_ring_create_vma(struct drm_i915_private *dev_priv, int size) { + struct i915_address_space *vm = &dev_priv->ggtt.vm; struct drm_i915_gem_object *obj; struct i915_vma *vma; @@ -1098,10 +1097,14 @@ intel_ring_create_vma(struct drm_i915_private *dev_priv, int size) if (IS_ERR(obj)) return ERR_CAST(obj); - /* mark ring buffers as read-only from GPU side by default */ - obj->gt_ro = 1; + /* + * Mark ring buffers as read-only from GPU side (so no stray overwrites) + * if supported by the platform's GGTT. + */ + if (vm->has_read_only) + i915_gem_object_set_readonly(obj); - vma = i915_vma_instance(obj, &dev_priv->ggtt.vm, NULL); + vma = i915_vma_instance(obj, vm, NULL); if (IS_ERR(vma)) goto err; @@ -1169,8 +1172,11 @@ static void intel_ring_context_destroy(struct intel_context *ce) { GEM_BUG_ON(ce->pin_count); - if (ce->state) - __i915_gem_object_release_unless_active(ce->state->obj); + if (!ce->state) + return; + + GEM_BUG_ON(i915_gem_object_is_active(ce->state->obj)); + i915_gem_object_put(ce->state->obj); } static int __context_pin_ppgtt(struct i915_gem_context *ctx) diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h index a0bc7a8222b4..f5ffa6d31e82 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.h +++ b/drivers/gpu/drm/i915/intel_ringbuffer.h @@ -193,6 +193,11 @@ struct i915_priolist { int priority; }; +struct st_preempt_hang { + struct completion completion; + bool inject_hang; +}; + /** * struct intel_engine_execlists - execlist submission queue and port state * @@ -292,32 +297,49 @@ struct intel_engine_execlists { /** * @queue: queue of requests, in priority lists */ - struct rb_root queue; + struct rb_root_cached queue; /** - * @first: leftmost level in priority @queue + * @csb_read: control register for Context Switch buffer + * + * Note this register is always in mmio. */ - struct rb_node *first; + u32 __iomem *csb_read; /** - * @fw_domains: forcewake domains for irq tasklet + * @csb_write: control register for Context Switch buffer + * + * Note this register may be either mmio or HWSP shadow. */ - unsigned int fw_domains; + u32 *csb_write; /** - * @csb_head: context status buffer head + * @csb_status: status array for Context Switch buffer + * + * Note these register may be either mmio or HWSP shadow. + */ + u32 *csb_status; + + /** + * @preempt_complete_status: expected CSB upon completing preemption */ - unsigned int csb_head; + u32 preempt_complete_status; /** - * @csb_use_mmio: access csb through mmio, instead of hwsp + * @csb_write_reset: reset value for CSB write pointer + * + * As the CSB write pointer maybe either in HWSP or as a field + * inside an mmio register, we want to reprogram it slightly + * differently to avoid later confusion. */ - bool csb_use_mmio; + u32 csb_write_reset; /** - * @preempt_complete_status: expected CSB upon completing preemption + * @csb_head: context status buffer head */ - u32 preempt_complete_status; + u8 csb_head; + + I915_SELFTEST_DECLARE(struct st_preempt_hang preempt_hang;) }; #define INTEL_ENGINE_CS_MAX_NAME 8 @@ -345,10 +367,8 @@ struct intel_engine_cs { struct drm_i915_gem_object *default_state; void *pinned_default_state; - atomic_t irq_count; unsigned long irq_posted; #define ENGINE_IRQ_BREADCRUMB 0 -#define ENGINE_IRQ_EXECLIST 1 /* Rather than have every client wait upon all user interrupts, * with the herd waking after every interrupt and each doing the @@ -380,6 +400,7 @@ struct intel_engine_cs { unsigned int hangcheck_interrupts; unsigned int irq_enabled; + unsigned int irq_count; bool irq_armed : 1; I915_SELFTEST_DECLARE(bool mock : 1); @@ -669,6 +690,12 @@ execlists_clear_active(struct intel_engine_execlists *execlists, __clear_bit(bit, (unsigned long *)&execlists->active); } +static inline void +execlists_clear_all_active(struct intel_engine_execlists *execlists) +{ + execlists->active = 0; +} + static inline bool execlists_is_active(const struct intel_engine_execlists *execlists, unsigned int bit) @@ -928,11 +955,10 @@ static inline u32 intel_hws_preempt_done_address(struct intel_engine_cs *engine) /* intel_breadcrumbs.c -- user interrupt bottom-half for waiters */ int intel_engine_init_breadcrumbs(struct intel_engine_cs *engine); -static inline void intel_wait_init(struct intel_wait *wait, - struct i915_request *rq) +static inline void intel_wait_init(struct intel_wait *wait) { wait->tsk = current; - wait->request = rq; + wait->request = NULL; } static inline void intel_wait_init_for_seqno(struct intel_wait *wait, u32 seqno) @@ -1136,4 +1162,24 @@ void intel_disable_engine_stats(struct intel_engine_cs *engine); ktime_t intel_engine_get_busy_time(struct intel_engine_cs *engine); +#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST) + +static inline bool inject_preempt_hang(struct intel_engine_execlists *execlists) +{ + if (!execlists->preempt_hang.inject_hang) + return false; + + complete(&execlists->preempt_hang.completion); + return true; +} + +#else + +static inline bool inject_preempt_hang(struct intel_engine_execlists *execlists) +{ + return false; +} + +#endif + #endif /* _INTEL_RINGBUFFER_H_ */ diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c index de3a81034f77..6b5aa3b074ec 100644 --- a/drivers/gpu/drm/i915/intel_runtime_pm.c +++ b/drivers/gpu/drm/i915/intel_runtime_pm.c @@ -134,6 +134,14 @@ intel_display_power_domain_str(enum intel_display_power_domain domain) return "AUX_F"; case POWER_DOMAIN_AUX_IO_A: return "AUX_IO_A"; + case POWER_DOMAIN_AUX_TBT1: + return "AUX_TBT1"; + case POWER_DOMAIN_AUX_TBT2: + return "AUX_TBT2"; + case POWER_DOMAIN_AUX_TBT3: + return "AUX_TBT3"; + case POWER_DOMAIN_AUX_TBT4: + return "AUX_TBT4"; case POWER_DOMAIN_GMBUS: return "GMBUS"; case POWER_DOMAIN_INIT: @@ -384,7 +392,8 @@ static void hsw_power_well_enable(struct drm_i915_private *dev_priv, u32 val; if (wait_fuses) { - pg = SKL_PW_TO_PG(id); + pg = INTEL_GEN(dev_priv) >= 11 ? ICL_PW_TO_PG(id) : + SKL_PW_TO_PG(id); /* * For PW1 we have to wait both for the PW0/PG0 fuse state * before enabling the power well and PW1/PG1's own fuse @@ -430,6 +439,43 @@ static void hsw_power_well_disable(struct drm_i915_private *dev_priv, hsw_wait_for_power_well_disable(dev_priv, power_well); } +#define ICL_AUX_PW_TO_PORT(pw) ((pw) - ICL_DISP_PW_AUX_A) + +static void +icl_combo_phy_aux_power_well_enable(struct drm_i915_private *dev_priv, + struct i915_power_well *power_well) +{ + enum i915_power_well_id id = power_well->id; + enum port port = ICL_AUX_PW_TO_PORT(id); + u32 val; + + val = I915_READ(HSW_PWR_WELL_CTL_DRIVER(id)); + I915_WRITE(HSW_PWR_WELL_CTL_DRIVER(id), val | HSW_PWR_WELL_CTL_REQ(id)); + + val = I915_READ(ICL_PORT_CL_DW12(port)); + I915_WRITE(ICL_PORT_CL_DW12(port), val | ICL_LANE_ENABLE_AUX); + + hsw_wait_for_power_well_enable(dev_priv, power_well); +} + +static void +icl_combo_phy_aux_power_well_disable(struct drm_i915_private *dev_priv, + struct i915_power_well *power_well) +{ + enum i915_power_well_id id = power_well->id; + enum port port = ICL_AUX_PW_TO_PORT(id); + u32 val; + + val = I915_READ(ICL_PORT_CL_DW12(port)); + I915_WRITE(ICL_PORT_CL_DW12(port), val & ~ICL_LANE_ENABLE_AUX); + + val = I915_READ(HSW_PWR_WELL_CTL_DRIVER(id)); + I915_WRITE(HSW_PWR_WELL_CTL_DRIVER(id), + val & ~HSW_PWR_WELL_CTL_REQ(id)); + + hsw_wait_for_power_well_disable(dev_priv, power_well); +} + /* * We should only use the power well if we explicitly asked the hardware to * enable it, so check if it's enabled and also check if we've requested it to @@ -1824,6 +1870,7 @@ void intel_display_power_put(struct drm_i915_private *dev_priv, BIT_ULL(POWER_DOMAIN_INIT)) #define GLK_DISPLAY_AUX_A_POWER_DOMAINS ( \ BIT_ULL(POWER_DOMAIN_AUX_A) | \ + BIT_ULL(POWER_DOMAIN_AUX_IO_A) | \ BIT_ULL(POWER_DOMAIN_INIT)) #define GLK_DISPLAY_AUX_B_POWER_DOMAINS ( \ BIT_ULL(POWER_DOMAIN_AUX_B) | \ @@ -1896,6 +1943,105 @@ void intel_display_power_put(struct drm_i915_private *dev_priv, BIT_ULL(POWER_DOMAIN_AUX_A) | \ BIT_ULL(POWER_DOMAIN_INIT)) +/* + * ICL PW_0/PG_0 domains (HW/DMC control): + * - PCI + * - clocks except port PLL + * - central power except FBC + * - shared functions except pipe interrupts, pipe MBUS, DBUF registers + * ICL PW_1/PG_1 domains (HW/DMC control): + * - DBUF function + * - PIPE_A and its planes, except VGA + * - transcoder EDP + PSR + * - transcoder DSI + * - DDI_A + * - FBC + */ +#define ICL_PW_4_POWER_DOMAINS ( \ + BIT_ULL(POWER_DOMAIN_PIPE_C) | \ + BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \ + BIT_ULL(POWER_DOMAIN_INIT)) + /* VDSC/joining */ +#define ICL_PW_3_POWER_DOMAINS ( \ + ICL_PW_4_POWER_DOMAINS | \ + BIT_ULL(POWER_DOMAIN_PIPE_B) | \ + BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \ + BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \ + BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \ + BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \ + BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \ + BIT_ULL(POWER_DOMAIN_PORT_DDI_B_IO) | \ + BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \ + BIT_ULL(POWER_DOMAIN_PORT_DDI_C_IO) | \ + BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) | \ + BIT_ULL(POWER_DOMAIN_PORT_DDI_D_IO) | \ + BIT_ULL(POWER_DOMAIN_PORT_DDI_E_LANES) | \ + BIT_ULL(POWER_DOMAIN_PORT_DDI_E_IO) | \ + BIT_ULL(POWER_DOMAIN_PORT_DDI_F_LANES) | \ + BIT_ULL(POWER_DOMAIN_PORT_DDI_F_IO) | \ + BIT_ULL(POWER_DOMAIN_AUX_B) | \ + BIT_ULL(POWER_DOMAIN_AUX_C) | \ + BIT_ULL(POWER_DOMAIN_AUX_D) | \ + BIT_ULL(POWER_DOMAIN_AUX_E) | \ + BIT_ULL(POWER_DOMAIN_AUX_F) | \ + BIT_ULL(POWER_DOMAIN_AUX_TBT1) | \ + BIT_ULL(POWER_DOMAIN_AUX_TBT2) | \ + BIT_ULL(POWER_DOMAIN_AUX_TBT3) | \ + BIT_ULL(POWER_DOMAIN_AUX_TBT4) | \ + BIT_ULL(POWER_DOMAIN_VGA) | \ + BIT_ULL(POWER_DOMAIN_AUDIO) | \ + BIT_ULL(POWER_DOMAIN_INIT)) + /* + * - transcoder WD + * - KVMR (HW control) + */ +#define ICL_PW_2_POWER_DOMAINS ( \ + ICL_PW_3_POWER_DOMAINS | \ + BIT_ULL(POWER_DOMAIN_INIT)) + /* + * - eDP/DSI VDSC + * - KVMR (HW control) + */ +#define ICL_DISPLAY_DC_OFF_POWER_DOMAINS ( \ + ICL_PW_2_POWER_DOMAINS | \ + BIT_ULL(POWER_DOMAIN_MODESET) | \ + BIT_ULL(POWER_DOMAIN_AUX_A) | \ + BIT_ULL(POWER_DOMAIN_INIT)) + +#define ICL_DDI_IO_A_POWER_DOMAINS ( \ + BIT_ULL(POWER_DOMAIN_PORT_DDI_A_IO)) +#define ICL_DDI_IO_B_POWER_DOMAINS ( \ + BIT_ULL(POWER_DOMAIN_PORT_DDI_B_IO)) +#define ICL_DDI_IO_C_POWER_DOMAINS ( \ + BIT_ULL(POWER_DOMAIN_PORT_DDI_C_IO)) +#define ICL_DDI_IO_D_POWER_DOMAINS ( \ + BIT_ULL(POWER_DOMAIN_PORT_DDI_D_IO)) +#define ICL_DDI_IO_E_POWER_DOMAINS ( \ + BIT_ULL(POWER_DOMAIN_PORT_DDI_E_IO)) +#define ICL_DDI_IO_F_POWER_DOMAINS ( \ + BIT_ULL(POWER_DOMAIN_PORT_DDI_F_IO)) + +#define ICL_AUX_A_IO_POWER_DOMAINS ( \ + BIT_ULL(POWER_DOMAIN_AUX_A)) +#define ICL_AUX_B_IO_POWER_DOMAINS ( \ + BIT_ULL(POWER_DOMAIN_AUX_B)) +#define ICL_AUX_C_IO_POWER_DOMAINS ( \ + BIT_ULL(POWER_DOMAIN_AUX_C)) +#define ICL_AUX_D_IO_POWER_DOMAINS ( \ + BIT_ULL(POWER_DOMAIN_AUX_D)) +#define ICL_AUX_E_IO_POWER_DOMAINS ( \ + BIT_ULL(POWER_DOMAIN_AUX_E)) +#define ICL_AUX_F_IO_POWER_DOMAINS ( \ + BIT_ULL(POWER_DOMAIN_AUX_F)) +#define ICL_AUX_TBT1_IO_POWER_DOMAINS ( \ + BIT_ULL(POWER_DOMAIN_AUX_TBT1)) +#define ICL_AUX_TBT2_IO_POWER_DOMAINS ( \ + BIT_ULL(POWER_DOMAIN_AUX_TBT2)) +#define ICL_AUX_TBT3_IO_POWER_DOMAINS ( \ + BIT_ULL(POWER_DOMAIN_AUX_TBT3)) +#define ICL_AUX_TBT4_IO_POWER_DOMAINS ( \ + BIT_ULL(POWER_DOMAIN_AUX_TBT4)) + static const struct i915_power_well_ops i9xx_always_on_power_well_ops = { .sync_hw = i9xx_power_well_sync_hw_noop, .enable = i9xx_always_on_power_well_noop, @@ -2453,6 +2599,157 @@ static struct i915_power_well cnl_power_wells[] = { }, }; +static const struct i915_power_well_ops icl_combo_phy_aux_power_well_ops = { + .sync_hw = hsw_power_well_sync_hw, + .enable = icl_combo_phy_aux_power_well_enable, + .disable = icl_combo_phy_aux_power_well_disable, + .is_enabled = hsw_power_well_enabled, +}; + +static struct i915_power_well icl_power_wells[] = { + { + .name = "always-on", + .always_on = 1, + .domains = POWER_DOMAIN_MASK, + .ops = &i9xx_always_on_power_well_ops, + .id = I915_DISP_PW_ALWAYS_ON, + }, + { + .name = "power well 1", + /* Handled by the DMC firmware */ + .domains = 0, + .ops = &hsw_power_well_ops, + .id = ICL_DISP_PW_1, + .hsw.has_fuses = true, + }, + { + .name = "power well 2", + .domains = ICL_PW_2_POWER_DOMAINS, + .ops = &hsw_power_well_ops, + .id = ICL_DISP_PW_2, + .hsw.has_fuses = true, + }, + { + .name = "DC off", + .domains = ICL_DISPLAY_DC_OFF_POWER_DOMAINS, + .ops = &gen9_dc_off_power_well_ops, + .id = SKL_DISP_PW_DC_OFF, + }, + { + .name = "power well 3", + .domains = ICL_PW_3_POWER_DOMAINS, + .ops = &hsw_power_well_ops, + .id = ICL_DISP_PW_3, + .hsw.irq_pipe_mask = BIT(PIPE_B), + .hsw.has_vga = true, + .hsw.has_fuses = true, + }, + { + .name = "DDI A IO", + .domains = ICL_DDI_IO_A_POWER_DOMAINS, + .ops = &hsw_power_well_ops, + .id = ICL_DISP_PW_DDI_A, + }, + { + .name = "DDI B IO", + .domains = ICL_DDI_IO_B_POWER_DOMAINS, + .ops = &hsw_power_well_ops, + .id = ICL_DISP_PW_DDI_B, + }, + { + .name = "DDI C IO", + .domains = ICL_DDI_IO_C_POWER_DOMAINS, + .ops = &hsw_power_well_ops, + .id = ICL_DISP_PW_DDI_C, + }, + { + .name = "DDI D IO", + .domains = ICL_DDI_IO_D_POWER_DOMAINS, + .ops = &hsw_power_well_ops, + .id = ICL_DISP_PW_DDI_D, + }, + { + .name = "DDI E IO", + .domains = ICL_DDI_IO_E_POWER_DOMAINS, + .ops = &hsw_power_well_ops, + .id = ICL_DISP_PW_DDI_E, + }, + { + .name = "DDI F IO", + .domains = ICL_DDI_IO_F_POWER_DOMAINS, + .ops = &hsw_power_well_ops, + .id = ICL_DISP_PW_DDI_F, + }, + { + .name = "AUX A", + .domains = ICL_AUX_A_IO_POWER_DOMAINS, + .ops = &icl_combo_phy_aux_power_well_ops, + .id = ICL_DISP_PW_AUX_A, + }, + { + .name = "AUX B", + .domains = ICL_AUX_B_IO_POWER_DOMAINS, + .ops = &icl_combo_phy_aux_power_well_ops, + .id = ICL_DISP_PW_AUX_B, + }, + { + .name = "AUX C", + .domains = ICL_AUX_C_IO_POWER_DOMAINS, + .ops = &hsw_power_well_ops, + .id = ICL_DISP_PW_AUX_C, + }, + { + .name = "AUX D", + .domains = ICL_AUX_D_IO_POWER_DOMAINS, + .ops = &hsw_power_well_ops, + .id = ICL_DISP_PW_AUX_D, + }, + { + .name = "AUX E", + .domains = ICL_AUX_E_IO_POWER_DOMAINS, + .ops = &hsw_power_well_ops, + .id = ICL_DISP_PW_AUX_E, + }, + { + .name = "AUX F", + .domains = ICL_AUX_F_IO_POWER_DOMAINS, + .ops = &hsw_power_well_ops, + .id = ICL_DISP_PW_AUX_F, + }, + { + .name = "AUX TBT1", + .domains = ICL_AUX_TBT1_IO_POWER_DOMAINS, + .ops = &hsw_power_well_ops, + .id = ICL_DISP_PW_AUX_TBT1, + }, + { + .name = "AUX TBT2", + .domains = ICL_AUX_TBT2_IO_POWER_DOMAINS, + .ops = &hsw_power_well_ops, + .id = ICL_DISP_PW_AUX_TBT2, + }, + { + .name = "AUX TBT3", + .domains = ICL_AUX_TBT3_IO_POWER_DOMAINS, + .ops = &hsw_power_well_ops, + .id = ICL_DISP_PW_AUX_TBT3, + }, + { + .name = "AUX TBT4", + .domains = ICL_AUX_TBT4_IO_POWER_DOMAINS, + .ops = &hsw_power_well_ops, + .id = ICL_DISP_PW_AUX_TBT4, + }, + { + .name = "power well 4", + .domains = ICL_PW_4_POWER_DOMAINS, + .ops = &hsw_power_well_ops, + .id = ICL_DISP_PW_4, + .hsw.has_fuses = true, + .hsw.irq_pipe_mask = BIT(PIPE_C), + }, +}; + static int sanitize_disable_power_well_option(const struct drm_i915_private *dev_priv, int disable_power_well) @@ -2470,7 +2767,7 @@ static uint32_t get_allowed_dc_mask(const struct drm_i915_private *dev_priv, int requested_dc; int max_dc; - if (IS_GEN9_BC(dev_priv) || IS_CANNONLAKE(dev_priv)) { + if (IS_GEN9_BC(dev_priv) || INTEL_INFO(dev_priv)->gen >= 10) { max_dc = 2; mask = 0; } else if (IS_GEN9_LP(dev_priv)) { @@ -2558,7 +2855,9 @@ int intel_power_domains_init(struct drm_i915_private *dev_priv) * The enabling order will be from lower to higher indexed wells, * the disabling order is reversed. */ - if (IS_HASWELL(dev_priv)) { + if (IS_ICELAKE(dev_priv)) { + set_power_wells(power_domains, icl_power_wells); + } else if (IS_HASWELL(dev_priv)) { set_power_wells(power_domains, hsw_power_wells); } else if (IS_BROADWELL(dev_priv)) { set_power_wells(power_domains, bdw_power_wells); @@ -2913,6 +3212,7 @@ static void cnl_set_procmon_ref_values(struct drm_i915_private *dev_priv, switch (val & (PROCESS_INFO_MASK | VOLTAGE_INFO_MASK)) { default: MISSING_CASE(val); + /* fall through */ case VOLTAGE_INFO_0_85V | PROCESS_INFO_DOT_0: procmon = &cnl_procmon_values[PROCMON_0_85V_DOT_0]; break; @@ -3025,6 +3325,8 @@ static void cnl_display_core_uninit(struct drm_i915_private *dev_priv) static void icl_display_core_init(struct drm_i915_private *dev_priv, bool resume) { + struct i915_power_domains *power_domains = &dev_priv->power_domains; + struct i915_power_well *well; enum port port; u32 val; @@ -3053,8 +3355,14 @@ static void icl_display_core_init(struct drm_i915_private *dev_priv, I915_WRITE(ICL_PORT_CL_DW5(port), val); } - /* 4. Enable power well 1 (PG1) and aux IO power. */ - /* FIXME: ICL power wells code not here yet. */ + /* + * 4. Enable Power Well 1 (PG1). + * The AUX IO power wells will be enabled on demand. + */ + mutex_lock(&power_domains->lock); + well = lookup_power_well(dev_priv, ICL_DISP_PW_1); + intel_power_well_enable(dev_priv, well); + mutex_unlock(&power_domains->lock); /* 5. Enable CDCLK. */ icl_init_cdclk(dev_priv); @@ -3072,6 +3380,8 @@ static void icl_display_core_init(struct drm_i915_private *dev_priv, static void icl_display_core_uninit(struct drm_i915_private *dev_priv) { + struct i915_power_domains *power_domains = &dev_priv->power_domains; + struct i915_power_well *well; enum port port; u32 val; @@ -3085,8 +3395,15 @@ static void icl_display_core_uninit(struct drm_i915_private *dev_priv) /* 3. Disable CD clock */ icl_uninit_cdclk(dev_priv); - /* 4. Disable Power Well 1 (PG1) and Aux IO Power */ - /* FIXME: ICL power wells code not here yet. */ + /* + * 4. Disable Power Well 1 (PG1). + * The AUX IO power wells are toggled on demand, so they are already + * disabled at this point. + */ + mutex_lock(&power_domains->lock); + well = lookup_power_well(dev_priv, ICL_DISP_PW_1); + intel_power_well_disable(dev_priv, well); + mutex_unlock(&power_domains->lock); /* 5. Disable Comp */ for (port = PORT_A; port <= PORT_B; port++) { diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c index e6a64b3ecd91..812fe7b06f87 100644 --- a/drivers/gpu/drm/i915/intel_sdvo.c +++ b/drivers/gpu/drm/i915/intel_sdvo.c @@ -1340,6 +1340,7 @@ static void intel_sdvo_pre_enable(struct intel_encoder *intel_encoder, switch (crtc_state->pixel_multiplier) { default: WARN(1, "unknown pixel multiplier specified\n"); + /* fall through */ case 1: rate = SDVO_CLOCK_RATE_MULT_1X; break; case 2: rate = SDVO_CLOCK_RATE_MULT_2X; break; case 4: rate = SDVO_CLOCK_RATE_MULT_4X; break; @@ -1400,10 +1401,7 @@ static bool intel_sdvo_connector_get_hw_state(struct intel_connector *connector) intel_sdvo_get_active_outputs(intel_sdvo, &active_outputs); - if (active_outputs & intel_sdvo_connector->output_flag) - return true; - else - return false; + return active_outputs & intel_sdvo_connector->output_flag; } bool intel_sdvo_port_enabled(struct drm_i915_private *dev_priv, @@ -1913,7 +1911,7 @@ static void intel_sdvo_get_ddc_modes(struct drm_connector *connector) if (edid != NULL) { if (intel_sdvo_connector_matches_edid(to_intel_sdvo_connector(connector), edid)) { - drm_mode_connector_update_edid_property(connector, edid); + drm_connector_update_edid_property(connector, edid); drm_add_edid_modes(connector, edid); } @@ -2316,14 +2314,19 @@ intel_sdvo_guess_ddc_bus(struct intel_sdvo *sdvo) switch (sdvo->controlled_output) { case SDVO_OUTPUT_LVDS1: mask |= SDVO_OUTPUT_LVDS1; + /* fall through */ case SDVO_OUTPUT_LVDS0: mask |= SDVO_OUTPUT_LVDS0; + /* fall through */ case SDVO_OUTPUT_TMDS1: mask |= SDVO_OUTPUT_TMDS1; + /* fall through */ case SDVO_OUTPUT_TMDS0: mask |= SDVO_OUTPUT_TMDS0; + /* fall through */ case SDVO_OUTPUT_RGB1: mask |= SDVO_OUTPUT_RGB1; + /* fall through */ case SDVO_OUTPUT_RGB0: mask |= SDVO_OUTPUT_RGB0; break; diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c index 344c0e709b19..f7026e887fa9 100644 --- a/drivers/gpu/drm/i915/intel_sprite.c +++ b/drivers/gpu/drm/i915/intel_sprite.c @@ -41,20 +41,6 @@ #include <drm/i915_drm.h> #include "i915_drv.h" -bool intel_format_is_yuv(u32 format) -{ - switch (format) { - case DRM_FORMAT_YUYV: - case DRM_FORMAT_UYVY: - case DRM_FORMAT_VYUY: - case DRM_FORMAT_YVYU: - case DRM_FORMAT_NV12: - return true; - default: - return false; - } -} - int intel_usecs_to_scanlines(const struct drm_display_mode *adjusted_mode, int usecs) { @@ -107,13 +93,21 @@ void intel_pipe_update_start(const struct intel_crtc_state *new_crtc_state) VBLANK_EVASION_TIME_US); max = vblank_start - 1; - local_irq_disable(); - if (min <= 0 || max <= 0) - return; + goto irq_disable; if (WARN_ON(drm_crtc_vblank_get(&crtc->base))) - return; + goto irq_disable; + + /* + * Wait for psr to idle out after enabling the VBL interrupts + * VBL interrupts will start the PSR exit and prevent a PSR + * re-entry as well. + */ + if (intel_psr_wait_for_idle(new_crtc_state)) + DRM_ERROR("PSR idle timed out, atomic update may fail\n"); + + local_irq_disable(); crtc->debug.min_vbl = min; crtc->debug.max_vbl = max; @@ -171,6 +165,10 @@ void intel_pipe_update_start(const struct intel_crtc_state *new_crtc_state) crtc->debug.start_vbl_count = intel_crtc_get_vblank_counter(crtc); trace_i915_pipe_update_vblank_evaded(crtc); + return; + +irq_disable: + local_irq_disable(); } /** @@ -404,7 +402,7 @@ chv_update_csc(const struct intel_plane_state *plane_state) const s16 *csc = csc_matrix[plane_state->base.color_encoding]; /* Seems RGB data bypasses the CSC always */ - if (!intel_format_is_yuv(fb->format->format)) + if (!fb->format->is_yuv) return; I915_WRITE_FW(SPCSCYGOFF(plane_id), SPCSC_OOFF(0) | SPCSC_IOFF(0)); @@ -439,7 +437,7 @@ vlv_update_clrc(const struct intel_plane_state *plane_state) enum plane_id plane_id = plane->id; int contrast, brightness, sh_scale, sh_sin, sh_cos; - if (intel_format_is_yuv(fb->format->format) && + if (fb->format->is_yuv && plane_state->base.color_range == DRM_COLOR_YCBCR_LIMITED_RANGE) { /* * Expand limited range to full range: @@ -1040,7 +1038,7 @@ intel_check_sprite_plane(struct intel_plane *plane, src->y1 = src_y << 16; src->y2 = (src_y + src_h) << 16; - if (intel_format_is_yuv(fb->format->format) && + if (fb->format->is_yuv && fb->format->format != DRM_FORMAT_NV12 && (src_x % 2 || src_w % 2)) { DRM_DEBUG_KMS("src x/w (%u, %u) must be a multiple of 2 for YUV planes\n", diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c index 24dc368fdaa1..b5b04cb892e9 100644 --- a/drivers/gpu/drm/i915/intel_tv.c +++ b/drivers/gpu/drm/i915/intel_tv.c @@ -1347,8 +1347,7 @@ intel_tv_get_modes(struct drm_connector *connector) mode_ptr = drm_mode_create(connector->dev); if (!mode_ptr) continue; - strncpy(mode_ptr->name, input->name, DRM_DISPLAY_MODE_LEN); - mode_ptr->name[DRM_DISPLAY_MODE_LEN - 1] = '\0'; + strlcpy(mode_ptr->name, input->name, DRM_DISPLAY_MODE_LEN); mode_ptr->hdisplay = hactive_s; mode_ptr->hsync_start = hactive_s + 1; diff --git a/drivers/gpu/drm/i915/intel_uc.c b/drivers/gpu/drm/i915/intel_uc.c index 94e8863bd97c..7c95697e1a35 100644 --- a/drivers/gpu/drm/i915/intel_uc.c +++ b/drivers/gpu/drm/i915/intel_uc.c @@ -171,24 +171,11 @@ void intel_uc_init_early(struct drm_i915_private *i915) intel_huc_init_early(huc); sanitize_options_early(i915); - - if (USES_GUC(i915)) - intel_uc_fw_fetch(i915, &guc->fw); - - if (USES_HUC(i915)) - intel_uc_fw_fetch(i915, &huc->fw); } void intel_uc_cleanup_early(struct drm_i915_private *i915) { struct intel_guc *guc = &i915->guc; - struct intel_huc *huc = &i915->huc; - - if (USES_HUC(i915)) - intel_uc_fw_fini(&huc->fw); - - if (USES_GUC(i915)) - intel_uc_fw_fini(&guc->fw); guc_free_load_err_log(guc); } @@ -252,28 +239,41 @@ static void guc_disable_communication(struct intel_guc *guc) int intel_uc_init_misc(struct drm_i915_private *i915) { struct intel_guc *guc = &i915->guc; + struct intel_huc *huc = &i915->huc; int ret; if (!USES_GUC(i915)) return 0; - intel_guc_init_ggtt_pin_bias(guc); - - ret = intel_guc_init_wq(guc); + ret = intel_guc_init_misc(guc); if (ret) return ret; + if (USES_HUC(i915)) { + ret = intel_huc_init_misc(huc); + if (ret) + goto err_guc; + } + return 0; + +err_guc: + intel_guc_fini_misc(guc); + return ret; } void intel_uc_fini_misc(struct drm_i915_private *i915) { struct intel_guc *guc = &i915->guc; + struct intel_huc *huc = &i915->huc; if (!USES_GUC(i915)) return; - intel_guc_fini_wq(guc); + if (USES_HUC(i915)) + intel_huc_fini_misc(huc); + + intel_guc_fini_misc(guc); } int intel_uc_init(struct drm_i915_private *i915) diff --git a/drivers/gpu/drm/i915/selftests/huge_pages.c b/drivers/gpu/drm/i915/selftests/huge_pages.c index fbe4324116d7..7efb326badcd 100644 --- a/drivers/gpu/drm/i915/selftests/huge_pages.c +++ b/drivers/gpu/drm/i915/selftests/huge_pages.c @@ -570,6 +570,7 @@ static int igt_mock_ppgtt_misaligned_dma(void *arg) i915_vma_close(vma); i915_gem_object_unpin_pages(obj); + __i915_gem_object_put_pages(obj, I915_MM_NORMAL); i915_gem_object_put(obj); } @@ -597,6 +598,7 @@ static void close_object_list(struct list_head *objects, list_del(&obj->st_link); i915_gem_object_unpin_pages(obj); + __i915_gem_object_put_pages(obj, I915_MM_NORMAL); i915_gem_object_put(obj); } } @@ -866,6 +868,7 @@ static int igt_mock_ppgtt_64K(void *arg) i915_vma_close(vma); i915_gem_object_unpin_pages(obj); + __i915_gem_object_put_pages(obj, I915_MM_NORMAL); i915_gem_object_put(obj); } } @@ -919,12 +922,12 @@ gpu_write_dw(struct i915_vma *vma, u64 offset, u32 val) *cmd++ = val; } else if (gen >= 4) { *cmd++ = MI_STORE_DWORD_IMM_GEN4 | - (gen < 6 ? 1 << 22 : 0); + (gen < 6 ? MI_USE_GGTT : 0); *cmd++ = 0; *cmd++ = offset; *cmd++ = val; } else { - *cmd++ = MI_STORE_DWORD_IMM | 1 << 22; + *cmd++ = MI_STORE_DWORD_IMM | MI_MEM_VIRTUAL; *cmd++ = offset; *cmd++ = val; } @@ -985,7 +988,10 @@ static int gpu_write(struct i915_vma *vma, goto err_request; } - i915_vma_move_to_active(batch, rq, 0); + err = i915_vma_move_to_active(batch, rq, 0); + if (err) + goto err_request; + i915_gem_object_set_active_reference(batch->obj); i915_vma_unpin(batch); i915_vma_close(batch); @@ -996,11 +1002,9 @@ static int gpu_write(struct i915_vma *vma, if (err) goto err_request; - i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE); - - reservation_object_lock(vma->resv, NULL); - reservation_object_add_excl_fence(vma->resv, &rq->fence); - reservation_object_unlock(vma->resv); + err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE); + if (err) + i915_request_skip(rq, err); err_request: i915_request_add(rq); @@ -1264,6 +1268,7 @@ static int igt_ppgtt_exhaust_huge(void *arg) } i915_gem_object_unpin_pages(obj); + __i915_gem_object_put_pages(obj, I915_MM_NORMAL); i915_gem_object_put(obj); } } @@ -1325,6 +1330,7 @@ static int igt_ppgtt_internal_huge(void *arg) } i915_gem_object_unpin_pages(obj); + __i915_gem_object_put_pages(obj, I915_MM_NORMAL); i915_gem_object_put(obj); } @@ -1393,6 +1399,7 @@ static int igt_ppgtt_gemfs_huge(void *arg) } i915_gem_object_unpin_pages(obj); + __i915_gem_object_put_pages(obj, I915_MM_NORMAL); i915_gem_object_put(obj); } @@ -1694,7 +1701,7 @@ int i915_gem_huge_page_mock_selftests(void) dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(39)); mutex_lock(&dev_priv->drm.struct_mutex); - ppgtt = i915_ppgtt_create(dev_priv, ERR_PTR(-ENODEV), "mock"); + ppgtt = i915_ppgtt_create(dev_priv, ERR_PTR(-ENODEV)); if (IS_ERR(ppgtt)) { err = PTR_ERR(ppgtt); goto out_unlock; @@ -1724,7 +1731,7 @@ out_unlock: i915_modparams.enable_ppgtt = saved_ppgtt; - drm_dev_unref(&dev_priv->drm); + drm_dev_put(&dev_priv->drm); return err; } @@ -1748,6 +1755,9 @@ int i915_gem_huge_page_live_selftests(struct drm_i915_private *dev_priv) return 0; } + if (i915_terminally_wedged(&dev_priv->gpu_error)) + return 0; + file = mock_file(dev_priv); if (IS_ERR(file)) return PTR_ERR(file); diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_coherency.c b/drivers/gpu/drm/i915/selftests/i915_gem_coherency.c index a4900091ae3d..3a095c37c120 100644 --- a/drivers/gpu/drm/i915/selftests/i915_gem_coherency.c +++ b/drivers/gpu/drm/i915/selftests/i915_gem_coherency.c @@ -42,11 +42,21 @@ static int cpu_set(struct drm_i915_gem_object *obj, page = i915_gem_object_get_page(obj, offset >> PAGE_SHIFT); map = kmap_atomic(page); - if (needs_clflush & CLFLUSH_BEFORE) + + if (needs_clflush & CLFLUSH_BEFORE) { + mb(); clflush(map+offset_in_page(offset) / sizeof(*map)); + mb(); + } + map[offset_in_page(offset) / sizeof(*map)] = v; - if (needs_clflush & CLFLUSH_AFTER) + + if (needs_clflush & CLFLUSH_AFTER) { + mb(); clflush(map+offset_in_page(offset) / sizeof(*map)); + mb(); + } + kunmap_atomic(map); i915_gem_obj_finish_shmem_access(obj); @@ -68,8 +78,13 @@ static int cpu_get(struct drm_i915_gem_object *obj, page = i915_gem_object_get_page(obj, offset >> PAGE_SHIFT); map = kmap_atomic(page); - if (needs_clflush & CLFLUSH_BEFORE) + + if (needs_clflush & CLFLUSH_BEFORE) { + mb(); clflush(map+offset_in_page(offset) / sizeof(*map)); + mb(); + } + *v = map[offset_in_page(offset) / sizeof(*map)]; kunmap_atomic(map); @@ -210,28 +225,24 @@ static int gpu_set(struct drm_i915_gem_object *obj, *cs++ = upper_32_bits(i915_ggtt_offset(vma) + offset); *cs++ = v; } else if (INTEL_GEN(i915) >= 4) { - *cs++ = MI_STORE_DWORD_IMM_GEN4 | 1 << 22; + *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT; *cs++ = 0; *cs++ = i915_ggtt_offset(vma) + offset; *cs++ = v; } else { - *cs++ = MI_STORE_DWORD_IMM | 1 << 22; + *cs++ = MI_STORE_DWORD_IMM | MI_MEM_VIRTUAL; *cs++ = i915_ggtt_offset(vma) + offset; *cs++ = v; *cs++ = MI_NOOP; } intel_ring_advance(rq, cs); - i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE); + err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE); i915_vma_unpin(vma); - reservation_object_lock(obj->resv, NULL); - reservation_object_add_excl_fence(obj->resv, &rq->fence); - reservation_object_unlock(obj->resv); - i915_request_add(rq); - return 0; + return err; } static bool always_valid(struct drm_i915_private *i915) @@ -239,8 +250,16 @@ static bool always_valid(struct drm_i915_private *i915) return true; } +static bool needs_fence_registers(struct drm_i915_private *i915) +{ + return !i915_terminally_wedged(&i915->gpu_error); +} + static bool needs_mi_store_dword(struct drm_i915_private *i915) { + if (i915_terminally_wedged(&i915->gpu_error)) + return false; + return intel_engine_can_store_dword(i915->engine[RCS]); } @@ -251,7 +270,7 @@ static const struct igt_coherency_mode { bool (*valid)(struct drm_i915_private *i915); } igt_coherency_mode[] = { { "cpu", cpu_set, cpu_get, always_valid }, - { "gtt", gtt_set, gtt_get, always_valid }, + { "gtt", gtt_set, gtt_get, needs_fence_registers }, { "wc", wc_set, wc_get, always_valid }, { "gpu", gpu_set, NULL, needs_mi_store_dword }, { }, diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_context.c b/drivers/gpu/drm/i915/selftests/i915_gem_context.c index 90c3c36173ba..1c92560d35da 100644 --- a/drivers/gpu/drm/i915/selftests/i915_gem_context.c +++ b/drivers/gpu/drm/i915/selftests/i915_gem_context.c @@ -23,6 +23,7 @@ */ #include "../i915_selftest.h" +#include "i915_random.h" #include "igt_flush_test.h" #include "mock_drm.h" @@ -63,12 +64,12 @@ gpu_fill_dw(struct i915_vma *vma, u64 offset, unsigned long count, u32 value) *cmd++ = value; } else if (gen >= 4) { *cmd++ = MI_STORE_DWORD_IMM_GEN4 | - (gen < 6 ? 1 << 22 : 0); + (gen < 6 ? MI_USE_GGTT : 0); *cmd++ = 0; *cmd++ = offset; *cmd++ = value; } else { - *cmd++ = MI_STORE_DWORD_IMM | 1 << 22; + *cmd++ = MI_STORE_DWORD_IMM | MI_MEM_VIRTUAL; *cmd++ = offset; *cmd++ = value; } @@ -170,22 +171,26 @@ static int gpu_fill(struct drm_i915_gem_object *obj, if (err) goto err_request; - i915_vma_move_to_active(batch, rq, 0); + err = i915_vma_move_to_active(batch, rq, 0); + if (err) + goto skip_request; + + err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE); + if (err) + goto skip_request; + i915_gem_object_set_active_reference(batch->obj); i915_vma_unpin(batch); i915_vma_close(batch); - i915_vma_move_to_active(vma, rq, 0); i915_vma_unpin(vma); - reservation_object_lock(obj->resv, NULL); - reservation_object_add_excl_fence(obj->resv, &rq->fence); - reservation_object_unlock(obj->resv); - i915_request_add(rq); return 0; +skip_request: + i915_request_skip(rq, err); err_request: i915_request_add(rq); err_batch: @@ -248,9 +253,9 @@ static int cpu_check(struct drm_i915_gem_object *obj, unsigned int max) } for (; m < DW_PER_PAGE; m++) { - if (map[m] != 0xdeadbeef) { + if (map[m] != STACK_MAGIC) { pr_err("Invalid value at page %d, offset %d: found %x expected %x\n", - n, m, map[m], 0xdeadbeef); + n, m, map[m], STACK_MAGIC); err = -EINVAL; goto out_unmap; } @@ -306,7 +311,7 @@ create_test_object(struct i915_gem_context *ctx, if (err) return ERR_PTR(err); - err = cpu_fill(obj, 0xdeadbeef); + err = cpu_fill(obj, STACK_MAGIC); if (err) { pr_err("Failed to fill object with cpu, err=%d\n", err); @@ -336,11 +341,15 @@ static int igt_ctx_exec(void *arg) bool first_shared_gtt = true; int err = -ENODEV; - /* Create a few different contexts (with different mm) and write + /* + * Create a few different contexts (with different mm) and write * through each ctx/mm using the GPU making sure those writes end * up in the expected pages of our obj. */ + if (!DRIVER_CAPS(i915)->has_logical_contexts) + return 0; + file = mock_file(i915); if (IS_ERR(file)) return PTR_ERR(file); @@ -367,6 +376,9 @@ static int igt_ctx_exec(void *arg) } for_each_engine(engine, i915, id) { + if (!engine->context_size) + continue; /* No logical context support in HW */ + if (!intel_engine_can_store_dword(engine)) continue; @@ -421,6 +433,111 @@ out_unlock: return err; } +static int igt_ctx_readonly(void *arg) +{ + struct drm_i915_private *i915 = arg; + struct drm_i915_gem_object *obj = NULL; + struct drm_file *file; + I915_RND_STATE(prng); + IGT_TIMEOUT(end_time); + LIST_HEAD(objects); + struct i915_gem_context *ctx; + struct i915_hw_ppgtt *ppgtt; + unsigned long ndwords, dw; + int err = -ENODEV; + + /* + * Create a few read-only objects (with the occasional writable object) + * and try to write into these object checking that the GPU discards + * any write to a read-only object. + */ + + file = mock_file(i915); + if (IS_ERR(file)) + return PTR_ERR(file); + + mutex_lock(&i915->drm.struct_mutex); + + ctx = i915_gem_create_context(i915, file->driver_priv); + if (IS_ERR(ctx)) { + err = PTR_ERR(ctx); + goto out_unlock; + } + + ppgtt = ctx->ppgtt ?: i915->mm.aliasing_ppgtt; + if (!ppgtt || !ppgtt->vm.has_read_only) { + err = 0; + goto out_unlock; + } + + ndwords = 0; + dw = 0; + while (!time_after(jiffies, end_time)) { + struct intel_engine_cs *engine; + unsigned int id; + + for_each_engine(engine, i915, id) { + if (!intel_engine_can_store_dword(engine)) + continue; + + if (!obj) { + obj = create_test_object(ctx, file, &objects); + if (IS_ERR(obj)) { + err = PTR_ERR(obj); + goto out_unlock; + } + + if (prandom_u32_state(&prng) & 1) + i915_gem_object_set_readonly(obj); + } + + intel_runtime_pm_get(i915); + err = gpu_fill(obj, ctx, engine, dw); + intel_runtime_pm_put(i915); + if (err) { + pr_err("Failed to fill dword %lu [%lu/%lu] with gpu (%s) in ctx %u [full-ppgtt? %s], err=%d\n", + ndwords, dw, max_dwords(obj), + engine->name, ctx->hw_id, + yesno(!!ctx->ppgtt), err); + goto out_unlock; + } + + if (++dw == max_dwords(obj)) { + obj = NULL; + dw = 0; + } + ndwords++; + } + } + pr_info("Submitted %lu dwords (across %u engines)\n", + ndwords, INTEL_INFO(i915)->num_rings); + + dw = 0; + list_for_each_entry(obj, &objects, st_link) { + unsigned int rem = + min_t(unsigned int, ndwords - dw, max_dwords(obj)); + unsigned int num_writes; + + num_writes = rem; + if (i915_gem_object_is_readonly(obj)) + num_writes = 0; + + err = cpu_check(obj, num_writes); + if (err) + break; + + dw += rem; + } + +out_unlock: + if (igt_flush_test(i915, I915_WAIT_LOCKED)) + err = -EIO; + mutex_unlock(&i915->drm.struct_mutex); + + mock_file_free(i915, file); + return err; +} + static __maybe_unused const char * __engine_name(struct drm_i915_private *i915, unsigned int engines) { @@ -467,7 +584,9 @@ static int __igt_switch_to_kernel_context(struct drm_i915_private *i915, } } - err = i915_gem_wait_for_idle(i915, I915_WAIT_LOCKED); + err = i915_gem_wait_for_idle(i915, + I915_WAIT_LOCKED, + MAX_SCHEDULE_TIMEOUT); if (err) return err; @@ -586,7 +705,7 @@ int i915_gem_context_mock_selftests(void) err = i915_subtests(tests, i915); - drm_dev_unref(&i915->drm); + drm_dev_put(&i915->drm); return err; } @@ -595,10 +714,14 @@ int i915_gem_context_live_selftests(struct drm_i915_private *dev_priv) static const struct i915_subtest tests[] = { SUBTEST(igt_switch_to_kernel_context), SUBTEST(igt_ctx_exec), + SUBTEST(igt_ctx_readonly), }; bool fake_alias = false; int err; + if (i915_terminally_wedged(&dev_priv->gpu_error)) + return 0; + /* Install a fake aliasing gtt for exercise */ if (USES_PPGTT(dev_priv) && !dev_priv->mm.aliasing_ppgtt) { mutex_lock(&dev_priv->drm.struct_mutex); diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/selftests/i915_gem_dmabuf.c index 89dc25a5a53b..a7055b12e53c 100644 --- a/drivers/gpu/drm/i915/selftests/i915_gem_dmabuf.c +++ b/drivers/gpu/drm/i915/selftests/i915_gem_dmabuf.c @@ -389,7 +389,7 @@ int i915_gem_dmabuf_mock_selftests(void) err = i915_subtests(tests, i915); - drm_dev_unref(&i915->drm); + drm_dev_put(&i915->drm); return err; } diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_evict.c b/drivers/gpu/drm/i915/selftests/i915_gem_evict.c index 2dc72a984d45..128ad1cf0647 100644 --- a/drivers/gpu/drm/i915/selftests/i915_gem_evict.c +++ b/drivers/gpu/drm/i915/selftests/i915_gem_evict.c @@ -490,7 +490,7 @@ int i915_gem_evict_mock_selftests(void) err = i915_subtests(tests, i915); mutex_unlock(&i915->drm.struct_mutex); - drm_dev_unref(&i915->drm); + drm_dev_put(&i915->drm); return err; } @@ -500,5 +500,8 @@ int i915_gem_evict_live_selftests(struct drm_i915_private *i915) SUBTEST(igt_evict_contexts), }; + if (i915_terminally_wedged(&i915->gpu_error)) + return 0; + return i915_subtests(tests, i915); } diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c index a4060238bef0..8e2e269db97e 100644 --- a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c @@ -32,6 +32,20 @@ #include "mock_drm.h" #include "mock_gem_device.h" +static void cleanup_freed_objects(struct drm_i915_private *i915) +{ + /* + * As we may hold onto the struct_mutex for inordinate lengths of + * time, the NMI khungtaskd detector may fire for the free objects + * worker. + */ + mutex_unlock(&i915->drm.struct_mutex); + + i915_gem_drain_freed_objects(i915); + + mutex_lock(&i915->drm.struct_mutex); +} + static void fake_free_pages(struct drm_i915_gem_object *obj, struct sg_table *pages) { @@ -134,7 +148,7 @@ static int igt_ppgtt_alloc(void *arg) { struct drm_i915_private *dev_priv = arg; struct i915_hw_ppgtt *ppgtt; - u64 size, last; + u64 size, last, limit; int err = 0; /* Allocate a ppggt and try to fill the entire range */ @@ -142,20 +156,25 @@ static int igt_ppgtt_alloc(void *arg) if (!USES_PPGTT(dev_priv)) return 0; - mutex_lock(&dev_priv->drm.struct_mutex); ppgtt = __hw_ppgtt_create(dev_priv); - if (IS_ERR(ppgtt)) { - err = PTR_ERR(ppgtt); - goto err_unlock; - } + if (IS_ERR(ppgtt)) + return PTR_ERR(ppgtt); if (!ppgtt->vm.allocate_va_range) goto err_ppgtt_cleanup; + /* + * While we only allocate the page tables here and so we could + * address a much larger GTT than we could actually fit into + * RAM, a practical limit is the amount of physical pages in the system. + * This should ensure that we do not run into the oomkiller during + * the test and take down the machine wilfully. + */ + limit = totalram_pages << PAGE_SHIFT; + limit = min(ppgtt->vm.total, limit); + /* Check we can allocate the entire range */ - for (size = 4096; - size <= ppgtt->vm.total; - size <<= 2) { + for (size = 4096; size <= limit; size <<= 2) { err = ppgtt->vm.allocate_va_range(&ppgtt->vm, 0, size); if (err) { if (err == -ENOMEM) { @@ -166,13 +185,13 @@ static int igt_ppgtt_alloc(void *arg) goto err_ppgtt_cleanup; } + cond_resched(); + ppgtt->vm.clear_range(&ppgtt->vm, 0, size); } /* Check we can incrementally allocate the entire range */ - for (last = 0, size = 4096; - size <= ppgtt->vm.total; - last = size, size <<= 2) { + for (last = 0, size = 4096; size <= limit; last = size, size <<= 2) { err = ppgtt->vm.allocate_va_range(&ppgtt->vm, last, size - last); if (err) { @@ -183,12 +202,13 @@ static int igt_ppgtt_alloc(void *arg) } goto err_ppgtt_cleanup; } + + cond_resched(); } err_ppgtt_cleanup: - ppgtt->vm.cleanup(&ppgtt->vm); - kfree(ppgtt); -err_unlock: + mutex_lock(&dev_priv->drm.struct_mutex); + i915_ppgtt_put(ppgtt); mutex_unlock(&dev_priv->drm.struct_mutex); return err; } @@ -291,6 +311,8 @@ static int lowlevel_hole(struct drm_i915_private *i915, i915_gem_object_put(obj); kfree(order); + + cleanup_freed_objects(i915); } return 0; @@ -519,6 +541,7 @@ static int fill_hole(struct drm_i915_private *i915, } close_object_list(&objects, vm); + cleanup_freed_objects(i915); } return 0; @@ -605,6 +628,8 @@ err_put: i915_gem_object_put(obj); if (err) return err; + + cleanup_freed_objects(i915); } return 0; @@ -789,6 +814,8 @@ err_obj: kfree(order); if (err) return err; + + cleanup_freed_objects(i915); } return 0; @@ -857,6 +884,7 @@ static int __shrink_hole(struct drm_i915_private *i915, } close_object_list(&objects, vm); + cleanup_freed_objects(i915); return err; } @@ -949,6 +977,7 @@ static int shrink_boom(struct drm_i915_private *i915, i915_gem_object_put(explode); memset(&vm->fault_attr, 0, sizeof(vm->fault_attr)); + cleanup_freed_objects(i915); } return 0; @@ -980,7 +1009,7 @@ static int exercise_ppgtt(struct drm_i915_private *dev_priv, return PTR_ERR(file); mutex_lock(&dev_priv->drm.struct_mutex); - ppgtt = i915_ppgtt_create(dev_priv, file->driver_priv, "mock"); + ppgtt = i915_ppgtt_create(dev_priv, file->driver_priv); if (IS_ERR(ppgtt)) { err = PTR_ERR(ppgtt); goto out_unlock; @@ -1215,6 +1244,7 @@ static int exercise_mock(struct drm_i915_private *i915, u64 hole_start, u64 hole_end, unsigned long end_time)) { + const u64 limit = totalram_pages << PAGE_SHIFT; struct i915_gem_context *ctx; struct i915_hw_ppgtt *ppgtt; IGT_TIMEOUT(end_time); @@ -1227,7 +1257,7 @@ static int exercise_mock(struct drm_i915_private *i915, ppgtt = ctx->ppgtt; GEM_BUG_ON(!ppgtt); - err = func(i915, &ppgtt->vm, 0, ppgtt->vm.total, end_time); + err = func(i915, &ppgtt->vm, 0, min(ppgtt->vm.total, limit), end_time); mock_context_close(ctx); return err; @@ -1644,7 +1674,7 @@ int i915_gem_gtt_mock_selftests(void) err = i915_subtests(tests, i915); mutex_unlock(&i915->drm.struct_mutex); - drm_dev_unref(&i915->drm); + drm_dev_put(&i915->drm); return err; } diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_object.c b/drivers/gpu/drm/i915/selftests/i915_gem_object.c index 2b2dde94526f..c69cbd5aed52 100644 --- a/drivers/gpu/drm/i915/selftests/i915_gem_object.c +++ b/drivers/gpu/drm/i915/selftests/i915_gem_object.c @@ -169,9 +169,16 @@ static u64 tiled_offset(const struct tile *tile, u64 v) v += y * tile->width; v += div64_u64_rem(x, tile->width, &x) << tile->size; v += x; - } else { + } else if (tile->width == 128) { const unsigned int ytile_span = 16; - const unsigned int ytile_height = 32 * ytile_span; + const unsigned int ytile_height = 512; + + v += y * ytile_span; + v += div64_u64_rem(x, ytile_span, &x) * ytile_height; + v += x; + } else { + const unsigned int ytile_span = 32; + const unsigned int ytile_height = 256; v += y * ytile_span; v += div64_u64_rem(x, ytile_span, &x) * ytile_height; @@ -288,6 +295,8 @@ static int check_partial_mapping(struct drm_i915_gem_object *obj, kunmap(p); if (err) return err; + + i915_vma_destroy(vma); } return 0; @@ -347,6 +356,14 @@ static int igt_partial_tiling(void *arg) unsigned int pitch; struct tile tile; + if (i915->quirks & QUIRK_PIN_SWIZZLED_PAGES) + /* + * The swizzling pattern is actually unknown as it + * varies based on physical address of each page. + * See i915_gem_detect_bit_6_swizzle(). + */ + break; + tile.tiling = tiling; switch (tiling) { case I915_TILING_X: @@ -357,7 +374,8 @@ static int igt_partial_tiling(void *arg) break; } - if (tile.swizzle == I915_BIT_6_SWIZZLE_UNKNOWN || + GEM_BUG_ON(tile.swizzle == I915_BIT_6_SWIZZLE_UNKNOWN); + if (tile.swizzle == I915_BIT_6_SWIZZLE_9_17 || tile.swizzle == I915_BIT_6_SWIZZLE_9_10_17) continue; @@ -454,12 +472,14 @@ static int make_obj_busy(struct drm_i915_gem_object *obj) return PTR_ERR(rq); } - i915_vma_move_to_active(vma, rq, 0); + err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE); + i915_request_add(rq); - i915_gem_object_set_active_reference(obj); + __i915_gem_object_release_unless_active(obj); i915_vma_unpin(vma); - return 0; + + return err; } static bool assert_mmap_offset(struct drm_i915_private *i915, @@ -488,6 +508,15 @@ static int igt_mmap_offset_exhaustion(void *arg) u64 hole_start, hole_end; int loop, err; + /* Disable background reaper */ + mutex_lock(&i915->drm.struct_mutex); + if (!i915->gt.active_requests++) + i915_gem_unpark(i915); + mutex_unlock(&i915->drm.struct_mutex); + cancel_delayed_work_sync(&i915->gt.retire_work); + cancel_delayed_work_sync(&i915->gt.idle_work); + GEM_BUG_ON(!i915->gt.awake); + /* Trim the device mmap space to only a page */ memset(&resv, 0, sizeof(resv)); drm_mm_for_each_hole(hole, mm, hole_start, hole_end) { @@ -496,7 +525,7 @@ static int igt_mmap_offset_exhaustion(void *arg) err = drm_mm_reserve_node(mm, &resv); if (err) { pr_err("Failed to trim VMA manager, err=%d\n", err); - return err; + goto out_park; } break; } @@ -538,6 +567,9 @@ static int igt_mmap_offset_exhaustion(void *arg) /* Now fill with busy dead objects that we expect to reap */ for (loop = 0; loop < 3; loop++) { + if (i915_terminally_wedged(&i915->gpu_error)) + break; + obj = i915_gem_object_create_internal(i915, PAGE_SIZE); if (IS_ERR(obj)) { err = PTR_ERR(obj); @@ -554,6 +586,7 @@ static int igt_mmap_offset_exhaustion(void *arg) goto err_obj; } + /* NB we rely on the _active_ reference to access obj now */ GEM_BUG_ON(!i915_gem_object_is_active(obj)); err = i915_gem_object_create_mmap_offset(obj); if (err) { @@ -565,6 +598,13 @@ static int igt_mmap_offset_exhaustion(void *arg) out: drm_mm_remove_node(&resv); +out_park: + mutex_lock(&i915->drm.struct_mutex); + if (--i915->gt.active_requests) + queue_delayed_work(i915->wq, &i915->gt.retire_work, 0); + else + queue_delayed_work(i915->wq, &i915->gt.idle_work, 0); + mutex_unlock(&i915->drm.struct_mutex); return err; err_obj: i915_gem_object_put(obj); @@ -586,7 +626,7 @@ int i915_gem_object_mock_selftests(void) err = i915_subtests(tests, i915); - drm_dev_unref(&i915->drm); + drm_dev_put(&i915->drm); return err; } diff --git a/drivers/gpu/drm/i915/selftests/i915_request.c b/drivers/gpu/drm/i915/selftests/i915_request.c index 63cd9486cc13..c4aac6141e04 100644 --- a/drivers/gpu/drm/i915/selftests/i915_request.c +++ b/drivers/gpu/drm/i915/selftests/i915_request.c @@ -262,7 +262,7 @@ int i915_request_mock_selftests(void) return -ENOMEM; err = i915_subtests(tests, i915); - drm_dev_unref(&i915->drm); + drm_dev_put(&i915->drm); return err; } @@ -286,7 +286,9 @@ static int begin_live_test(struct live_test *t, t->func = func; t->name = name; - err = i915_gem_wait_for_idle(i915, I915_WAIT_LOCKED); + err = i915_gem_wait_for_idle(i915, + I915_WAIT_LOCKED, + MAX_SCHEDULE_TIMEOUT); if (err) { pr_err("%s(%s): failed to idle before, with err=%d!", func, name, err); @@ -594,11 +596,8 @@ static struct i915_vma *recursive_batch(struct drm_i915_private *i915) } else if (gen >= 6) { *cmd++ = MI_BATCH_BUFFER_START | 1 << 8; *cmd++ = lower_32_bits(vma->node.start); - } else if (gen >= 4) { - *cmd++ = MI_BATCH_BUFFER_START | MI_BATCH_GTT; - *cmd++ = lower_32_bits(vma->node.start); } else { - *cmd++ = MI_BATCH_BUFFER_START | MI_BATCH_GTT | 1; + *cmd++ = MI_BATCH_BUFFER_START | MI_BATCH_GTT; *cmd++ = lower_32_bits(vma->node.start); } *cmd++ = MI_BATCH_BUFFER_END; /* terminate early in case of error */ @@ -678,7 +677,9 @@ static int live_all_engines(void *arg) i915_gem_object_set_active_reference(batch->obj); } - i915_vma_move_to_active(batch, request[id], 0); + err = i915_vma_move_to_active(batch, request[id], 0); + GEM_BUG_ON(err); + i915_request_get(request[id]); i915_request_add(request[id]); } @@ -788,7 +789,9 @@ static int live_sequential_engines(void *arg) GEM_BUG_ON(err); request[id]->batch = batch; - i915_vma_move_to_active(batch, request[id], 0); + err = i915_vma_move_to_active(batch, request[id], 0); + GEM_BUG_ON(err); + i915_gem_object_set_active_reference(batch->obj); i915_vma_get(batch); @@ -862,5 +865,9 @@ int i915_request_live_selftests(struct drm_i915_private *i915) SUBTEST(live_sequential_engines), SUBTEST(live_empty_request), }; + + if (i915_terminally_wedged(&i915->gpu_error)) + return 0; + return i915_subtests(tests, i915); } diff --git a/drivers/gpu/drm/i915/selftests/i915_selftest.c b/drivers/gpu/drm/i915/selftests/i915_selftest.c index addc5a599c4a..86c54ea37f48 100644 --- a/drivers/gpu/drm/i915/selftests/i915_selftest.c +++ b/drivers/gpu/drm/i915/selftests/i915_selftest.c @@ -210,6 +210,8 @@ int __i915_subtests(const char *caller, return -EINTR; pr_debug(DRIVER_NAME ": Running %s/%s\n", caller, st->name); + GEM_TRACE("Running %s/%s\n", caller, st->name); + err = st->func(data); if (err && err != -EINTR) { pr_err(DRIVER_NAME "/%s: %s failed with error %d\n", diff --git a/drivers/gpu/drm/i915/selftests/i915_vma.c b/drivers/gpu/drm/i915/selftests/i915_vma.c index 8400a8cc5cf2..ffa74290e054 100644 --- a/drivers/gpu/drm/i915/selftests/i915_vma.c +++ b/drivers/gpu/drm/i915/selftests/i915_vma.c @@ -733,7 +733,7 @@ int i915_vma_mock_selftests(void) err = i915_subtests(tests, i915); mutex_unlock(&i915->drm.struct_mutex); - drm_dev_unref(&i915->drm); + drm_dev_put(&i915->drm); return err; } diff --git a/drivers/gpu/drm/i915/selftests/igt_flush_test.c b/drivers/gpu/drm/i915/selftests/igt_flush_test.c index 0d06f559243f..af66e3d4e23a 100644 --- a/drivers/gpu/drm/i915/selftests/igt_flush_test.c +++ b/drivers/gpu/drm/i915/selftests/igt_flush_test.c @@ -9,52 +9,8 @@ #include "../i915_selftest.h" #include "igt_flush_test.h" -struct wedge_me { - struct delayed_work work; - struct drm_i915_private *i915; - const void *symbol; -}; - -static void wedge_me(struct work_struct *work) -{ - struct wedge_me *w = container_of(work, typeof(*w), work.work); - - pr_err("%pS timed out, cancelling all further testing.\n", w->symbol); - - GEM_TRACE("%pS timed out.\n", w->symbol); - GEM_TRACE_DUMP(); - - i915_gem_set_wedged(w->i915); -} - -static void __init_wedge(struct wedge_me *w, - struct drm_i915_private *i915, - long timeout, - const void *symbol) -{ - w->i915 = i915; - w->symbol = symbol; - - INIT_DELAYED_WORK_ONSTACK(&w->work, wedge_me); - schedule_delayed_work(&w->work, timeout); -} - -static void __fini_wedge(struct wedge_me *w) -{ - cancel_delayed_work_sync(&w->work); - destroy_delayed_work_on_stack(&w->work); - w->i915 = NULL; -} - -#define wedge_on_timeout(W, DEV, TIMEOUT) \ - for (__init_wedge((W), (DEV), (TIMEOUT), __builtin_return_address(0)); \ - (W)->i915; \ - __fini_wedge((W))) - int igt_flush_test(struct drm_i915_private *i915, unsigned int flags) { - struct wedge_me w; - cond_resched(); if (flags & I915_WAIT_LOCKED && @@ -63,8 +19,15 @@ int igt_flush_test(struct drm_i915_private *i915, unsigned int flags) i915_gem_set_wedged(i915); } - wedge_on_timeout(&w, i915, HZ) - i915_gem_wait_for_idle(i915, flags); + if (i915_gem_wait_for_idle(i915, flags, HZ / 5) == -ETIME) { + pr_err("%pS timed out, cancelling all further testing.\n", + __builtin_return_address(0)); + + GEM_TRACE("%pS timed out.\n", __builtin_return_address(0)); + GEM_TRACE_DUMP(); + + i915_gem_set_wedged(i915); + } return i915_terminally_wedged(&i915->gpu_error) ? -EIO : 0; } diff --git a/drivers/gpu/drm/i915/selftests/igt_wedge_me.h b/drivers/gpu/drm/i915/selftests/igt_wedge_me.h new file mode 100644 index 000000000000..08e5ff11bbd9 --- /dev/null +++ b/drivers/gpu/drm/i915/selftests/igt_wedge_me.h @@ -0,0 +1,58 @@ +/* + * SPDX-License-Identifier: MIT + * + * Copyright © 2018 Intel Corporation + */ + +#ifndef IGT_WEDGE_ME_H +#define IGT_WEDGE_ME_H + +#include <linux/workqueue.h> + +#include "../i915_gem.h" + +struct drm_i915_private; + +struct igt_wedge_me { + struct delayed_work work; + struct drm_i915_private *i915; + const char *name; +}; + +static void __igt_wedge_me(struct work_struct *work) +{ + struct igt_wedge_me *w = container_of(work, typeof(*w), work.work); + + pr_err("%s timed out, cancelling test.\n", w->name); + + GEM_TRACE("%s timed out.\n", w->name); + GEM_TRACE_DUMP(); + + i915_gem_set_wedged(w->i915); +} + +static void __igt_init_wedge(struct igt_wedge_me *w, + struct drm_i915_private *i915, + long timeout, + const char *name) +{ + w->i915 = i915; + w->name = name; + + INIT_DELAYED_WORK_ONSTACK(&w->work, __igt_wedge_me); + schedule_delayed_work(&w->work, timeout); +} + +static void __igt_fini_wedge(struct igt_wedge_me *w) +{ + cancel_delayed_work_sync(&w->work); + destroy_delayed_work_on_stack(&w->work); + w->i915 = NULL; +} + +#define igt_wedge_on_timeout(W, DEV, TIMEOUT) \ + for (__igt_init_wedge((W), (DEV), (TIMEOUT), __func__); \ + (W)->i915; \ + __igt_fini_wedge((W))) + +#endif /* IGT_WEDGE_ME_H */ diff --git a/drivers/gpu/drm/i915/selftests/intel_breadcrumbs.c b/drivers/gpu/drm/i915/selftests/intel_breadcrumbs.c index d6926e7820e5..f03b407fdbe2 100644 --- a/drivers/gpu/drm/i915/selftests/intel_breadcrumbs.c +++ b/drivers/gpu/drm/i915/selftests/intel_breadcrumbs.c @@ -464,7 +464,7 @@ int intel_breadcrumbs_mock_selftests(void) return -ENOMEM; err = i915_subtests(tests, i915->engine[RCS]); - drm_dev_unref(&i915->drm); + drm_dev_put(&i915->drm); return err; } diff --git a/drivers/gpu/drm/i915/selftests/intel_guc.c b/drivers/gpu/drm/i915/selftests/intel_guc.c index fb74e2cf8a0a..407c98fb9170 100644 --- a/drivers/gpu/drm/i915/selftests/intel_guc.c +++ b/drivers/gpu/drm/i915/selftests/intel_guc.c @@ -196,19 +196,23 @@ static int igt_guc_clients(void *args) } unreserve_doorbell(guc->execbuf_client); - err = guc_clients_doorbell_init(guc); + + __create_doorbell(guc->execbuf_client); + err = __guc_allocate_doorbell(guc, guc->execbuf_client->stage_id); if (err != -EIO) { pr_err("unexpected (err = %d)", err); - goto out; + goto out_db; } if (!available_dbs(guc, guc->execbuf_client->priority)) { pr_err("doorbell not available when it should\n"); err = -EIO; - goto out; + goto out_db; } +out_db: /* clean after test */ + __destroy_doorbell(guc->execbuf_client); err = reserve_doorbell(guc->execbuf_client); if (err) { pr_err("failed to reserve back the doorbell back\n"); diff --git a/drivers/gpu/drm/i915/selftests/intel_hangcheck.c b/drivers/gpu/drm/i915/selftests/intel_hangcheck.c index fe7d3190ebfe..65d66cdedd26 100644 --- a/drivers/gpu/drm/i915/selftests/intel_hangcheck.c +++ b/drivers/gpu/drm/i915/selftests/intel_hangcheck.c @@ -27,6 +27,7 @@ #include "../i915_selftest.h" #include "i915_random.h" #include "igt_flush_test.h" +#include "igt_wedge_me.h" #include "mock_context.h" #include "mock_drm.h" @@ -130,13 +131,19 @@ static int emit_recurse_batch(struct hang *h, if (err) goto unpin_vma; - i915_vma_move_to_active(vma, rq, 0); + err = i915_vma_move_to_active(vma, rq, 0); + if (err) + goto unpin_hws; + if (!i915_gem_object_has_active_reference(vma->obj)) { i915_gem_object_get(vma->obj); i915_gem_object_set_active_reference(vma->obj); } - i915_vma_move_to_active(hws, rq, 0); + err = i915_vma_move_to_active(hws, rq, 0); + if (err) + goto unpin_hws; + if (!i915_gem_object_has_active_reference(hws->obj)) { i915_gem_object_get(hws->obj); i915_gem_object_set_active_reference(hws->obj); @@ -171,7 +178,7 @@ static int emit_recurse_batch(struct hang *h, *batch++ = MI_BATCH_BUFFER_START | 1 << 8; *batch++ = lower_32_bits(vma->node.start); } else if (INTEL_GEN(i915) >= 4) { - *batch++ = MI_STORE_DWORD_IMM_GEN4 | 1 << 22; + *batch++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT; *batch++ = 0; *batch++ = lower_32_bits(hws_address(hws, rq)); *batch++ = rq->fence.seqno; @@ -184,7 +191,7 @@ static int emit_recurse_batch(struct hang *h, *batch++ = MI_BATCH_BUFFER_START | 2 << 6; *batch++ = lower_32_bits(vma->node.start); } else { - *batch++ = MI_STORE_DWORD_IMM; + *batch++ = MI_STORE_DWORD_IMM | MI_MEM_VIRTUAL; *batch++ = lower_32_bits(hws_address(hws, rq)); *batch++ = rq->fence.seqno; *batch++ = MI_ARB_CHECK; @@ -193,7 +200,7 @@ static int emit_recurse_batch(struct hang *h, batch += 1024 / sizeof(*batch); *batch++ = MI_ARB_CHECK; - *batch++ = MI_BATCH_BUFFER_START | 2 << 6 | 1; + *batch++ = MI_BATCH_BUFFER_START | 2 << 6; *batch++ = lower_32_bits(vma->node.start); } *batch++ = MI_BATCH_BUFFER_END; /* not reached */ @@ -205,6 +212,7 @@ static int emit_recurse_batch(struct hang *h, err = rq->engine->emit_bb_start(rq, vma->node.start, PAGE_SIZE, flags); +unpin_hws: i915_vma_unpin(hws); unpin_vma: i915_vma_unpin(vma); @@ -914,7 +922,7 @@ static u32 fake_hangcheck(struct i915_request *rq, u32 mask) return reset_count; } -static int igt_wait_reset(void *arg) +static int igt_reset_wait(void *arg) { struct drm_i915_private *i915 = arg; struct i915_request *rq; @@ -988,6 +996,170 @@ unlock: return err; } +struct evict_vma { + struct completion completion; + struct i915_vma *vma; +}; + +static int evict_vma(void *data) +{ + struct evict_vma *arg = data; + struct i915_address_space *vm = arg->vma->vm; + struct drm_i915_private *i915 = vm->i915; + struct drm_mm_node evict = arg->vma->node; + int err; + + complete(&arg->completion); + + mutex_lock(&i915->drm.struct_mutex); + err = i915_gem_evict_for_node(vm, &evict, 0); + mutex_unlock(&i915->drm.struct_mutex); + + return err; +} + +static int __igt_reset_evict_vma(struct drm_i915_private *i915, + struct i915_address_space *vm) +{ + struct drm_i915_gem_object *obj; + struct task_struct *tsk = NULL; + struct i915_request *rq; + struct evict_vma arg; + struct hang h; + int err; + + if (!intel_engine_can_store_dword(i915->engine[RCS])) + return 0; + + /* Check that we can recover an unbind stuck on a hanging request */ + + global_reset_lock(i915); + + mutex_lock(&i915->drm.struct_mutex); + err = hang_init(&h, i915); + if (err) + goto unlock; + + obj = i915_gem_object_create_internal(i915, PAGE_SIZE); + if (IS_ERR(obj)) { + err = PTR_ERR(obj); + goto fini; + } + + arg.vma = i915_vma_instance(obj, vm, NULL); + if (IS_ERR(arg.vma)) { + err = PTR_ERR(arg.vma); + goto out_obj; + } + + rq = hang_create_request(&h, i915->engine[RCS]); + if (IS_ERR(rq)) { + err = PTR_ERR(rq); + goto out_obj; + } + + err = i915_vma_pin(arg.vma, 0, 0, + i915_vma_is_ggtt(arg.vma) ? PIN_GLOBAL : PIN_USER); + if (err) + goto out_obj; + + err = i915_vma_move_to_active(arg.vma, rq, EXEC_OBJECT_WRITE); + i915_vma_unpin(arg.vma); + + i915_request_get(rq); + i915_request_add(rq); + if (err) + goto out_rq; + + mutex_unlock(&i915->drm.struct_mutex); + + if (!wait_until_running(&h, rq)) { + struct drm_printer p = drm_info_printer(i915->drm.dev); + + pr_err("%s: Failed to start request %x, at %x\n", + __func__, rq->fence.seqno, hws_seqno(&h, rq)); + intel_engine_dump(rq->engine, &p, "%s\n", rq->engine->name); + + i915_gem_set_wedged(i915); + goto out_reset; + } + + init_completion(&arg.completion); + + tsk = kthread_run(evict_vma, &arg, "igt/evict_vma"); + if (IS_ERR(tsk)) { + err = PTR_ERR(tsk); + tsk = NULL; + goto out_reset; + } + + wait_for_completion(&arg.completion); + + if (wait_for(waitqueue_active(&rq->execute), 10)) { + struct drm_printer p = drm_info_printer(i915->drm.dev); + + pr_err("igt/evict_vma kthread did not wait\n"); + intel_engine_dump(rq->engine, &p, "%s\n", rq->engine->name); + + i915_gem_set_wedged(i915); + goto out_reset; + } + +out_reset: + fake_hangcheck(rq, intel_engine_flag(rq->engine)); + + if (tsk) { + struct igt_wedge_me w; + + /* The reset, even indirectly, should take less than 10ms. */ + igt_wedge_on_timeout(&w, i915, HZ / 10 /* 100ms timeout*/) + err = kthread_stop(tsk); + } + + mutex_lock(&i915->drm.struct_mutex); +out_rq: + i915_request_put(rq); +out_obj: + i915_gem_object_put(obj); +fini: + hang_fini(&h); +unlock: + mutex_unlock(&i915->drm.struct_mutex); + global_reset_unlock(i915); + + if (i915_terminally_wedged(&i915->gpu_error)) + return -EIO; + + return err; +} + +static int igt_reset_evict_ggtt(void *arg) +{ + struct drm_i915_private *i915 = arg; + + return __igt_reset_evict_vma(i915, &i915->ggtt.vm); +} + +static int igt_reset_evict_ppgtt(void *arg) +{ + struct drm_i915_private *i915 = arg; + struct i915_gem_context *ctx; + int err; + + mutex_lock(&i915->drm.struct_mutex); + ctx = kernel_context(i915); + mutex_unlock(&i915->drm.struct_mutex); + if (IS_ERR(ctx)) + return PTR_ERR(ctx); + + err = 0; + if (ctx->ppgtt) /* aliasing == global gtt locking, covered above */ + err = __igt_reset_evict_vma(i915, &ctx->ppgtt->vm); + + kernel_context_close(ctx); + return err; +} + static int wait_for_others(struct drm_i915_private *i915, struct intel_engine_cs *exclude) { @@ -1233,8 +1405,10 @@ int intel_hangcheck_live_selftests(struct drm_i915_private *i915) SUBTEST(igt_reset_idle_engine), SUBTEST(igt_reset_active_engine), SUBTEST(igt_reset_engines), - SUBTEST(igt_wait_reset), SUBTEST(igt_reset_queue), + SUBTEST(igt_reset_wait), + SUBTEST(igt_reset_evict_ggtt), + SUBTEST(igt_reset_evict_ppgtt), SUBTEST(igt_handle_error), }; bool saved_hangcheck; @@ -1243,6 +1417,9 @@ int intel_hangcheck_live_selftests(struct drm_i915_private *i915) if (!intel_has_gpu_reset(i915)) return 0; + if (i915_terminally_wedged(&i915->gpu_error)) + return -EIO; /* we're long past hope of a successful reset */ + intel_runtime_pm_get(i915); saved_hangcheck = fetch_and_zero(&i915_modparams.enable_hangcheck); diff --git a/drivers/gpu/drm/i915/selftests/intel_lrc.c b/drivers/gpu/drm/i915/selftests/intel_lrc.c index ea27c7cfbf96..582566faef09 100644 --- a/drivers/gpu/drm/i915/selftests/intel_lrc.c +++ b/drivers/gpu/drm/i915/selftests/intel_lrc.c @@ -104,13 +104,19 @@ static int emit_recurse_batch(struct spinner *spin, if (err) goto unpin_vma; - i915_vma_move_to_active(vma, rq, 0); + err = i915_vma_move_to_active(vma, rq, 0); + if (err) + goto unpin_hws; + if (!i915_gem_object_has_active_reference(vma->obj)) { i915_gem_object_get(vma->obj); i915_gem_object_set_active_reference(vma->obj); } - i915_vma_move_to_active(hws, rq, 0); + err = i915_vma_move_to_active(hws, rq, 0); + if (err) + goto unpin_hws; + if (!i915_gem_object_has_active_reference(hws->obj)) { i915_gem_object_get(hws->obj); i915_gem_object_set_active_reference(hws->obj); @@ -134,6 +140,7 @@ static int emit_recurse_batch(struct spinner *spin, err = rq->engine->emit_bb_start(rq, vma->node.start, PAGE_SIZE, 0); +unpin_hws: i915_vma_unpin(hws); unpin_vma: i915_vma_unpin(vma); @@ -444,16 +451,134 @@ err_wedged: goto err_ctx_lo; } +static int live_preempt_hang(void *arg) +{ + struct drm_i915_private *i915 = arg; + struct i915_gem_context *ctx_hi, *ctx_lo; + struct spinner spin_hi, spin_lo; + struct intel_engine_cs *engine; + enum intel_engine_id id; + int err = -ENOMEM; + + if (!HAS_LOGICAL_RING_PREEMPTION(i915)) + return 0; + + if (!intel_has_reset_engine(i915)) + return 0; + + mutex_lock(&i915->drm.struct_mutex); + + if (spinner_init(&spin_hi, i915)) + goto err_unlock; + + if (spinner_init(&spin_lo, i915)) + goto err_spin_hi; + + ctx_hi = kernel_context(i915); + if (!ctx_hi) + goto err_spin_lo; + ctx_hi->sched.priority = I915_CONTEXT_MAX_USER_PRIORITY; + + ctx_lo = kernel_context(i915); + if (!ctx_lo) + goto err_ctx_hi; + ctx_lo->sched.priority = I915_CONTEXT_MIN_USER_PRIORITY; + + for_each_engine(engine, i915, id) { + struct i915_request *rq; + + if (!intel_engine_has_preemption(engine)) + continue; + + rq = spinner_create_request(&spin_lo, ctx_lo, engine, + MI_ARB_CHECK); + if (IS_ERR(rq)) { + err = PTR_ERR(rq); + goto err_ctx_lo; + } + + i915_request_add(rq); + if (!wait_for_spinner(&spin_lo, rq)) { + GEM_TRACE("lo spinner failed to start\n"); + GEM_TRACE_DUMP(); + i915_gem_set_wedged(i915); + err = -EIO; + goto err_ctx_lo; + } + + rq = spinner_create_request(&spin_hi, ctx_hi, engine, + MI_ARB_CHECK); + if (IS_ERR(rq)) { + spinner_end(&spin_lo); + err = PTR_ERR(rq); + goto err_ctx_lo; + } + + init_completion(&engine->execlists.preempt_hang.completion); + engine->execlists.preempt_hang.inject_hang = true; + + i915_request_add(rq); + + if (!wait_for_completion_timeout(&engine->execlists.preempt_hang.completion, + HZ / 10)) { + pr_err("Preemption did not occur within timeout!"); + GEM_TRACE_DUMP(); + i915_gem_set_wedged(i915); + err = -EIO; + goto err_ctx_lo; + } + + set_bit(I915_RESET_ENGINE + id, &i915->gpu_error.flags); + i915_reset_engine(engine, NULL); + clear_bit(I915_RESET_ENGINE + id, &i915->gpu_error.flags); + + engine->execlists.preempt_hang.inject_hang = false; + + if (!wait_for_spinner(&spin_hi, rq)) { + GEM_TRACE("hi spinner failed to start\n"); + GEM_TRACE_DUMP(); + i915_gem_set_wedged(i915); + err = -EIO; + goto err_ctx_lo; + } + + spinner_end(&spin_hi); + spinner_end(&spin_lo); + if (igt_flush_test(i915, I915_WAIT_LOCKED)) { + err = -EIO; + goto err_ctx_lo; + } + } + + err = 0; +err_ctx_lo: + kernel_context_close(ctx_lo); +err_ctx_hi: + kernel_context_close(ctx_hi); +err_spin_lo: + spinner_fini(&spin_lo); +err_spin_hi: + spinner_fini(&spin_hi); +err_unlock: + igt_flush_test(i915, I915_WAIT_LOCKED); + mutex_unlock(&i915->drm.struct_mutex); + return err; +} + int intel_execlists_live_selftests(struct drm_i915_private *i915) { static const struct i915_subtest tests[] = { SUBTEST(live_sanitycheck), SUBTEST(live_preempt), SUBTEST(live_late_preempt), + SUBTEST(live_preempt_hang), }; if (!HAS_EXECLISTS(i915)) return 0; + if (i915_terminally_wedged(&i915->gpu_error)) + return 0; + return i915_subtests(tests, i915); } diff --git a/drivers/gpu/drm/i915/selftests/intel_workarounds.c b/drivers/gpu/drm/i915/selftests/intel_workarounds.c index e1ea2d2bedd2..0d39b3bf0c0d 100644 --- a/drivers/gpu/drm/i915/selftests/intel_workarounds.c +++ b/drivers/gpu/drm/i915/selftests/intel_workarounds.c @@ -6,6 +6,7 @@ #include "../i915_selftest.h" +#include "igt_wedge_me.h" #include "mock_context.h" static struct drm_i915_gem_object * @@ -49,6 +50,10 @@ read_nonprivs(struct i915_gem_context *ctx, struct intel_engine_cs *engine) goto err_pin; } + err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE); + if (err) + goto err_req; + srm = MI_STORE_REGISTER_MEM | MI_SRM_LRM_GLOBAL_GTT; if (INTEL_GEN(ctx->i915) >= 8) srm++; @@ -67,11 +72,6 @@ read_nonprivs(struct i915_gem_context *ctx, struct intel_engine_cs *engine) } intel_ring_advance(rq, cs); - i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE); - reservation_object_lock(vma->resv, NULL); - reservation_object_add_excl_fence(vma->resv, &rq->fence); - reservation_object_unlock(vma->resv); - i915_gem_object_get(result); i915_gem_object_set_active_reference(result); @@ -112,6 +112,7 @@ static int check_whitelist(const struct whitelist *w, struct intel_engine_cs *engine) { struct drm_i915_gem_object *results; + struct igt_wedge_me wedge; u32 *vaddr; int err; int i; @@ -120,7 +121,11 @@ static int check_whitelist(const struct whitelist *w, if (IS_ERR(results)) return PTR_ERR(results); - err = i915_gem_object_set_to_cpu_domain(results, false); + err = 0; + igt_wedge_on_timeout(&wedge, ctx->i915, HZ / 5) /* a safety net! */ + err = i915_gem_object_set_to_cpu_domain(results, false); + if (i915_terminally_wedged(&ctx->i915->gpu_error)) + err = -EIO; if (err) goto out_put; @@ -283,6 +288,9 @@ int intel_workarounds_live_selftests(struct drm_i915_private *i915) }; int err; + if (i915_terminally_wedged(&i915->gpu_error)) + return 0; + mutex_lock(&i915->drm.struct_mutex); err = i915_subtests(tests, i915); mutex_unlock(&i915->drm.struct_mutex); diff --git a/drivers/gpu/drm/i915/selftests/mock_engine.c b/drivers/gpu/drm/i915/selftests/mock_engine.c index c2a0451336cf..22a73da45ad5 100644 --- a/drivers/gpu/drm/i915/selftests/mock_engine.c +++ b/drivers/gpu/drm/i915/selftests/mock_engine.c @@ -200,6 +200,8 @@ struct intel_engine_cs *mock_engine(struct drm_i915_private *i915, engine->base.submit_request = mock_submit_request; i915_timeline_init(i915, &engine->base.timeline, engine->base.name); + lockdep_set_subclass(&engine->base.timeline.lock, TIMELINE_ENGINE); + intel_engine_init_breadcrumbs(&engine->base); engine->base.breadcrumbs.mock = true; /* prevent touching HW for irqs */ diff --git a/drivers/gpu/drm/i915/selftests/mock_gem_device.c b/drivers/gpu/drm/i915/selftests/mock_gem_device.c index c97075c5ccaf..43ed8b28aeaa 100644 --- a/drivers/gpu/drm/i915/selftests/mock_gem_device.c +++ b/drivers/gpu/drm/i915/selftests/mock_gem_device.c @@ -157,7 +157,8 @@ struct drm_i915_private *mock_gem_device(void) dev_pm_domain_set(&pdev->dev, &pm_domain); pm_runtime_enable(&pdev->dev); pm_runtime_dont_use_autosuspend(&pdev->dev); - WARN_ON(pm_runtime_get_sync(&pdev->dev)); + if (pm_runtime_enabled(&pdev->dev)) + WARN_ON(pm_runtime_get_sync(&pdev->dev)); i915 = (struct drm_i915_private *)(pdev + 1); pci_set_drvdata(pdev, i915); diff --git a/drivers/gpu/drm/i915/selftests/mock_gtt.c b/drivers/gpu/drm/i915/selftests/mock_gtt.c index 6a7f4da7b523..a140ea5c3a7c 100644 --- a/drivers/gpu/drm/i915/selftests/mock_gtt.c +++ b/drivers/gpu/drm/i915/selftests/mock_gtt.c @@ -70,12 +70,7 @@ mock_ppgtt(struct drm_i915_private *i915, ppgtt->vm.total = round_down(U64_MAX, PAGE_SIZE); ppgtt->vm.file = ERR_PTR(-ENODEV); - INIT_LIST_HEAD(&ppgtt->vm.active_list); - INIT_LIST_HEAD(&ppgtt->vm.inactive_list); - INIT_LIST_HEAD(&ppgtt->vm.unbound_list); - - INIT_LIST_HEAD(&ppgtt->vm.global_link); - drm_mm_init(&ppgtt->vm.mm, 0, ppgtt->vm.total); + i915_address_space_init(&ppgtt->vm, i915); ppgtt->vm.clear_range = nop_clear_range; ppgtt->vm.insert_page = mock_insert_page; @@ -106,8 +101,6 @@ void mock_init_ggtt(struct drm_i915_private *i915) { struct i915_ggtt *ggtt = &i915->ggtt; - INIT_LIST_HEAD(&i915->vm_list); - ggtt->vm.i915 = i915; ggtt->gmadr = (struct resource) DEFINE_RES_MEM(0, 2048 * PAGE_SIZE); @@ -124,7 +117,7 @@ void mock_init_ggtt(struct drm_i915_private *i915) ggtt->vm.vma_ops.set_pages = ggtt_set_pages; ggtt->vm.vma_ops.clear_pages = clear_pages; - i915_address_space_init(&ggtt->vm, i915, "global"); + i915_address_space_init(&ggtt->vm, i915); } void mock_fini_ggtt(struct drm_i915_private *i915) diff --git a/drivers/gpu/drm/i915/intel_dsi.c b/drivers/gpu/drm/i915/vlv_dsi.c index 3b7acb5a70b3..435a2c35ee8c 100644 --- a/drivers/gpu/drm/i915/intel_dsi.c +++ b/drivers/gpu/drm/i915/vlv_dsi.c @@ -69,7 +69,7 @@ enum mipi_dsi_pixel_format pixel_format_from_register_bits(u32 fmt) } } -void wait_for_dsi_fifo_empty(struct intel_dsi *intel_dsi, enum port port) +void vlv_dsi_wait_for_fifo_empty(struct intel_dsi *intel_dsi, enum port port) { struct drm_encoder *encoder = &intel_dsi->base.base; struct drm_device *dev = encoder->dev; @@ -342,11 +342,15 @@ static bool intel_dsi_compute_config(struct intel_encoder *encoder, pipe_config->cpu_transcoder = TRANSCODER_DSI_C; else pipe_config->cpu_transcoder = TRANSCODER_DSI_A; - } - ret = intel_compute_dsi_pll(encoder, pipe_config); - if (ret) - return false; + ret = bxt_dsi_pll_compute(encoder, pipe_config); + if (ret) + return false; + } else { + ret = vlv_dsi_pll_compute(encoder, pipe_config); + if (ret) + return false; + } pipe_config->clock_set = true; @@ -546,12 +550,12 @@ static void intel_dsi_device_ready(struct intel_encoder *encoder) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) - vlv_dsi_device_ready(encoder); - else if (IS_BROXTON(dev_priv)) - bxt_dsi_device_ready(encoder); - else if (IS_GEMINILAKE(dev_priv)) + if (IS_GEMINILAKE(dev_priv)) glk_dsi_device_ready(encoder); + else if (IS_GEN9_LP(dev_priv)) + bxt_dsi_device_ready(encoder); + else + vlv_dsi_device_ready(encoder); } static void glk_dsi_enter_low_power_mode(struct intel_encoder *encoder) @@ -810,8 +814,13 @@ static void intel_dsi_pre_enable(struct intel_encoder *encoder, * The BIOS may leave the PLL in a wonky state where it doesn't * lock. It needs to be fully powered down to fix it. */ - intel_disable_dsi_pll(encoder); - intel_enable_dsi_pll(encoder, pipe_config); + if (IS_GEN9_LP(dev_priv)) { + bxt_dsi_pll_disable(encoder); + bxt_dsi_pll_enable(encoder, pipe_config); + } else { + vlv_dsi_pll_disable(encoder); + vlv_dsi_pll_enable(encoder, pipe_config); + } if (IS_BROXTON(dev_priv)) { /* Add MIPI IO reset programming for modeset */ @@ -929,11 +938,10 @@ static void intel_dsi_clear_device_ready(struct intel_encoder *encoder) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv) || - IS_BROXTON(dev_priv)) - vlv_dsi_clear_device_ready(encoder); - else if (IS_GEMINILAKE(dev_priv)) + if (IS_GEMINILAKE(dev_priv)) glk_dsi_clear_device_ready(encoder); + else + vlv_dsi_clear_device_ready(encoder); } static void intel_dsi_post_disable(struct intel_encoder *encoder, @@ -949,7 +957,7 @@ static void intel_dsi_post_disable(struct intel_encoder *encoder, if (is_vid_mode(intel_dsi)) { for_each_dsi_port(port, intel_dsi->ports) - wait_for_dsi_fifo_empty(intel_dsi, port); + vlv_dsi_wait_for_fifo_empty(intel_dsi, port); intel_dsi_port_disable(encoder); usleep_range(2000, 5000); @@ -979,11 +987,13 @@ static void intel_dsi_post_disable(struct intel_encoder *encoder, val & ~MIPIO_RST_CTRL); } - intel_disable_dsi_pll(encoder); - - if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { + if (IS_GEN9_LP(dev_priv)) { + bxt_dsi_pll_disable(encoder); + } else { u32 val; + vlv_dsi_pll_disable(encoder); + val = I915_READ(DSPCLK_GATE_D); val &= ~DPOUNIT_CLOCK_GATE_DISABLE; I915_WRITE(DSPCLK_GATE_D, val); @@ -1024,7 +1034,7 @@ static bool intel_dsi_get_hw_state(struct intel_encoder *encoder, * configuration, otherwise accessing DSI registers will hang the * machine. See BSpec North Display Engine registers/MIPI[BXT]. */ - if (IS_GEN9_LP(dev_priv) && !intel_dsi_pll_is_enabled(dev_priv)) + if (IS_GEN9_LP(dev_priv) && !bxt_dsi_pll_is_enabled(dev_priv)) goto out_put_power; /* XXX: this only works for one DSI output */ @@ -1247,16 +1257,19 @@ static void intel_dsi_get_config(struct intel_encoder *encoder, pipe_config->output_types |= BIT(INTEL_OUTPUT_DSI); - if (IS_GEN9_LP(dev_priv)) + if (IS_GEN9_LP(dev_priv)) { bxt_dsi_get_pipe_config(encoder, pipe_config); + pclk = bxt_dsi_get_pclk(encoder, pipe_config->pipe_bpp, + pipe_config); + } else { + pclk = vlv_dsi_get_pclk(encoder, pipe_config->pipe_bpp, + pipe_config); + } - pclk = intel_dsi_get_pclk(encoder, pipe_config->pipe_bpp, - pipe_config); - if (!pclk) - return; - - pipe_config->base.adjusted_mode.crtc_clock = pclk; - pipe_config->port_clock = pclk; + if (pclk) { + pipe_config->base.adjusted_mode.crtc_clock = pclk; + pipe_config->port_clock = pclk; + } } static enum drm_mode_status @@ -1585,20 +1598,24 @@ static void intel_dsi_unprepare(struct intel_encoder *encoder) enum port port; u32 val; - if (!IS_GEMINILAKE(dev_priv)) { - for_each_dsi_port(port, intel_dsi->ports) { - /* Panel commands can be sent when clock is in LP11 */ - I915_WRITE(MIPI_DEVICE_READY(port), 0x0); + if (IS_GEMINILAKE(dev_priv)) + return; - intel_dsi_reset_clocks(encoder, port); - I915_WRITE(MIPI_EOT_DISABLE(port), CLOCKSTOP); + for_each_dsi_port(port, intel_dsi->ports) { + /* Panel commands can be sent when clock is in LP11 */ + I915_WRITE(MIPI_DEVICE_READY(port), 0x0); - val = I915_READ(MIPI_DSI_FUNC_PRG(port)); - val &= ~VID_MODE_FORMAT_MASK; - I915_WRITE(MIPI_DSI_FUNC_PRG(port), val); + if (IS_GEN9_LP(dev_priv)) + bxt_dsi_reset_clocks(encoder, port); + else + vlv_dsi_reset_clocks(encoder, port); + I915_WRITE(MIPI_EOT_DISABLE(port), CLOCKSTOP); - I915_WRITE(MIPI_DEVICE_READY(port), 0x1); - } + val = I915_READ(MIPI_DSI_FUNC_PRG(port)); + val &= ~VID_MODE_FORMAT_MASK; + I915_WRITE(MIPI_DSI_FUNC_PRG(port), val); + + I915_WRITE(MIPI_DEVICE_READY(port), 0x1); } } @@ -1713,7 +1730,7 @@ static void intel_dsi_add_properties(struct intel_connector *connector) } } -void intel_dsi_init(struct drm_i915_private *dev_priv) +void vlv_dsi_init(struct drm_i915_private *dev_priv) { struct drm_device *dev = &dev_priv->drm; struct intel_dsi *intel_dsi; @@ -1730,14 +1747,10 @@ void intel_dsi_init(struct drm_i915_private *dev_priv) if (!intel_bios_is_dsi_present(dev_priv, &port)) return; - if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { - dev_priv->mipi_mmio_base = VLV_MIPI_BASE; - } else if (IS_GEN9_LP(dev_priv)) { + if (IS_GEN9_LP(dev_priv)) dev_priv->mipi_mmio_base = BXT_MIPI_BASE; - } else { - DRM_ERROR("Unsupported Mipi device to reg base"); - return; - } + else + dev_priv->mipi_mmio_base = VLV_MIPI_BASE; intel_dsi = kzalloc(sizeof(*intel_dsi), GFP_KERNEL); if (!intel_dsi) diff --git a/drivers/gpu/drm/i915/intel_dsi_pll.c b/drivers/gpu/drm/i915/vlv_dsi_pll.c index 2ff2ee7f3b78..a132a8037ecc 100644 --- a/drivers/gpu/drm/i915/intel_dsi_pll.c +++ b/drivers/gpu/drm/i915/vlv_dsi_pll.c @@ -111,8 +111,8 @@ static int dsi_calc_mnp(struct drm_i915_private *dev_priv, * XXX: The muxing and gating is hard coded for now. Need to add support for * sharing PLLs with two DSI outputs. */ -static int vlv_compute_dsi_pll(struct intel_encoder *encoder, - struct intel_crtc_state *config) +int vlv_dsi_pll_compute(struct intel_encoder *encoder, + struct intel_crtc_state *config) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); @@ -142,8 +142,8 @@ static int vlv_compute_dsi_pll(struct intel_encoder *encoder, return 0; } -static void vlv_enable_dsi_pll(struct intel_encoder *encoder, - const struct intel_crtc_state *config) +void vlv_dsi_pll_enable(struct intel_encoder *encoder, + const struct intel_crtc_state *config) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); @@ -175,7 +175,7 @@ static void vlv_enable_dsi_pll(struct intel_encoder *encoder, DRM_DEBUG_KMS("DSI PLL locked\n"); } -static void vlv_disable_dsi_pll(struct intel_encoder *encoder) +void vlv_dsi_pll_disable(struct intel_encoder *encoder) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); u32 tmp; @@ -192,7 +192,7 @@ static void vlv_disable_dsi_pll(struct intel_encoder *encoder) mutex_unlock(&dev_priv->sb_lock); } -static bool bxt_dsi_pll_is_enabled(struct drm_i915_private *dev_priv) +bool bxt_dsi_pll_is_enabled(struct drm_i915_private *dev_priv) { bool enabled; u32 val; @@ -229,7 +229,7 @@ static bool bxt_dsi_pll_is_enabled(struct drm_i915_private *dev_priv) return enabled; } -static void bxt_disable_dsi_pll(struct intel_encoder *encoder) +void bxt_dsi_pll_disable(struct intel_encoder *encoder) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); u32 val; @@ -261,8 +261,8 @@ static void assert_bpp_mismatch(enum mipi_dsi_pixel_format fmt, int pipe_bpp) bpp, pipe_bpp); } -static u32 vlv_dsi_get_pclk(struct intel_encoder *encoder, int pipe_bpp, - struct intel_crtc_state *config) +u32 vlv_dsi_get_pclk(struct intel_encoder *encoder, int pipe_bpp, + struct intel_crtc_state *config) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); @@ -327,8 +327,8 @@ static u32 vlv_dsi_get_pclk(struct intel_encoder *encoder, int pipe_bpp, return pclk; } -static u32 bxt_dsi_get_pclk(struct intel_encoder *encoder, int pipe_bpp, - struct intel_crtc_state *config) +u32 bxt_dsi_get_pclk(struct intel_encoder *encoder, int pipe_bpp, + struct intel_crtc_state *config) { u32 pclk; u32 dsi_clk; @@ -357,16 +357,7 @@ static u32 bxt_dsi_get_pclk(struct intel_encoder *encoder, int pipe_bpp, return pclk; } -u32 intel_dsi_get_pclk(struct intel_encoder *encoder, int pipe_bpp, - struct intel_crtc_state *config) -{ - if (IS_GEN9_LP(to_i915(encoder->base.dev))) - return bxt_dsi_get_pclk(encoder, pipe_bpp, config); - else - return vlv_dsi_get_pclk(encoder, pipe_bpp, config); -} - -static void vlv_dsi_reset_clocks(struct intel_encoder *encoder, enum port port) +void vlv_dsi_reset_clocks(struct intel_encoder *encoder, enum port port) { u32 temp; struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); @@ -480,8 +471,8 @@ static void bxt_dsi_program_clocks(struct drm_device *dev, enum port port, I915_WRITE(BXT_MIPI_CLOCK_CTL, tmp); } -static int gen9lp_compute_dsi_pll(struct intel_encoder *encoder, - struct intel_crtc_state *config) +int bxt_dsi_pll_compute(struct intel_encoder *encoder, + struct intel_crtc_state *config) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); @@ -528,8 +519,8 @@ static int gen9lp_compute_dsi_pll(struct intel_encoder *encoder, return 0; } -static void gen9lp_enable_dsi_pll(struct intel_encoder *encoder, - const struct intel_crtc_state *config) +void bxt_dsi_pll_enable(struct intel_encoder *encoder, + const struct intel_crtc_state *config) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); @@ -568,52 +559,7 @@ static void gen9lp_enable_dsi_pll(struct intel_encoder *encoder, DRM_DEBUG_KMS("DSI PLL locked\n"); } -bool intel_dsi_pll_is_enabled(struct drm_i915_private *dev_priv) -{ - if (IS_GEN9_LP(dev_priv)) - return bxt_dsi_pll_is_enabled(dev_priv); - - MISSING_CASE(INTEL_DEVID(dev_priv)); - - return false; -} - -int intel_compute_dsi_pll(struct intel_encoder *encoder, - struct intel_crtc_state *config) -{ - struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - - if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) - return vlv_compute_dsi_pll(encoder, config); - else if (IS_GEN9_LP(dev_priv)) - return gen9lp_compute_dsi_pll(encoder, config); - - return -ENODEV; -} - -void intel_enable_dsi_pll(struct intel_encoder *encoder, - const struct intel_crtc_state *config) -{ - struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - - if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) - vlv_enable_dsi_pll(encoder, config); - else if (IS_GEN9_LP(dev_priv)) - gen9lp_enable_dsi_pll(encoder, config); -} - -void intel_disable_dsi_pll(struct intel_encoder *encoder) -{ - struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - - if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) - vlv_disable_dsi_pll(encoder); - else if (IS_GEN9_LP(dev_priv)) - bxt_disable_dsi_pll(encoder); -} - -static void gen9lp_dsi_reset_clocks(struct intel_encoder *encoder, - enum port port) +void bxt_dsi_reset_clocks(struct intel_encoder *encoder, enum port port) { u32 tmp; struct drm_device *dev = encoder->base.dev; @@ -638,13 +584,3 @@ static void gen9lp_dsi_reset_clocks(struct intel_encoder *encoder, } I915_WRITE(MIPI_EOT_DISABLE(port), CLOCKSTOP); } - -void intel_dsi_reset_clocks(struct intel_encoder *encoder, enum port port) -{ - struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - - if (IS_GEN9_LP(dev_priv)) - gen9lp_dsi_reset_clocks(encoder, port); - else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) - vlv_dsi_reset_clocks(encoder, port); -} diff --git a/drivers/gpu/drm/imx/imx-ldb.c b/drivers/gpu/drm/imx/imx-ldb.c index 56dd7a9a8e25..7312beb6f1fc 100644 --- a/drivers/gpu/drm/imx/imx-ldb.c +++ b/drivers/gpu/drm/imx/imx-ldb.c @@ -143,7 +143,7 @@ static int imx_ldb_connector_get_modes(struct drm_connector *connector) imx_ldb_ch->edid = drm_get_edid(connector, imx_ldb_ch->ddc); if (imx_ldb_ch->edid) { - drm_mode_connector_update_edid_property(connector, + drm_connector_update_edid_property(connector, imx_ldb_ch->edid); num_modes = drm_add_edid_modes(connector, imx_ldb_ch->edid); } @@ -471,8 +471,7 @@ static int imx_ldb_register(struct drm_device *drm, drm_connector_init(drm, &imx_ldb_ch->connector, &imx_ldb_connector_funcs, DRM_MODE_CONNECTOR_LVDS); - drm_mode_connector_attach_encoder(&imx_ldb_ch->connector, - encoder); + drm_connector_attach_encoder(&imx_ldb_ch->connector, encoder); } if (imx_ldb_ch->panel) { diff --git a/drivers/gpu/drm/imx/imx-tve.c b/drivers/gpu/drm/imx/imx-tve.c index bc27c2699464..cffd3310240e 100644 --- a/drivers/gpu/drm/imx/imx-tve.c +++ b/drivers/gpu/drm/imx/imx-tve.c @@ -235,7 +235,7 @@ static int imx_tve_connector_get_modes(struct drm_connector *connector) edid = drm_get_edid(connector, tve->ddc); if (edid) { - drm_mode_connector_update_edid_property(connector, edid); + drm_connector_update_edid_property(connector, edid); ret = drm_add_edid_modes(connector, edid); kfree(edid); } @@ -493,7 +493,7 @@ static int imx_tve_register(struct drm_device *drm, struct imx_tve *tve) drm_connector_init(drm, &tve->connector, &imx_tve_connector_funcs, DRM_MODE_CONNECTOR_VGA); - drm_mode_connector_attach_encoder(&tve->connector, &tve->encoder); + drm_connector_attach_encoder(&tve->connector, &tve->encoder); return 0; } diff --git a/drivers/gpu/drm/imx/parallel-display.c b/drivers/gpu/drm/imx/parallel-display.c index aedecda9728a..aefd04e18f93 100644 --- a/drivers/gpu/drm/imx/parallel-display.c +++ b/drivers/gpu/drm/imx/parallel-display.c @@ -63,7 +63,7 @@ static int imx_pd_connector_get_modes(struct drm_connector *connector) } if (imxpd->edid) { - drm_mode_connector_update_edid_property(connector, imxpd->edid); + drm_connector_update_edid_property(connector, imxpd->edid); num_modes = drm_add_edid_modes(connector, imxpd->edid); } @@ -197,7 +197,7 @@ static int imx_pd_register(struct drm_device *drm, return ret; } } else { - drm_mode_connector_attach_encoder(&imxpd->connector, encoder); + drm_connector_attach_encoder(&imxpd->connector, encoder); } return 0; diff --git a/drivers/gpu/drm/mediatek/mtk_drm_crtc.c b/drivers/gpu/drm/mediatek/mtk_drm_crtc.c index 658b8dd45b83..2d6aa150a9ff 100644 --- a/drivers/gpu/drm/mediatek/mtk_drm_crtc.c +++ b/drivers/gpu/drm/mediatek/mtk_drm_crtc.c @@ -539,6 +539,9 @@ int mtk_drm_crtc_create(struct drm_device *drm_dev, int ret; int i; + if (!path) + return 0; + for (i = 0; i < path_len; i++) { enum mtk_ddp_comp_id comp_id = path[i]; struct device_node *node; diff --git a/drivers/gpu/drm/mediatek/mtk_drm_ddp.c b/drivers/gpu/drm/mediatek/mtk_drm_ddp.c index 8130f3dab661..87e4191c250e 100644 --- a/drivers/gpu/drm/mediatek/mtk_drm_ddp.c +++ b/drivers/gpu/drm/mediatek/mtk_drm_ddp.c @@ -28,8 +28,12 @@ #define DISP_REG_CONFIG_DISP_UFOE_MOUT_EN 0x050 #define DISP_REG_CONFIG_DISP_COLOR0_SEL_IN 0x084 #define DISP_REG_CONFIG_DISP_COLOR1_SEL_IN 0x088 +#define DISP_REG_CONFIG_DSIE_SEL_IN 0x0a4 +#define DISP_REG_CONFIG_DSIO_SEL_IN 0x0a8 #define DISP_REG_CONFIG_DPI_SEL_IN 0x0ac -#define DISP_REG_CONFIG_DISP_RDMA1_MOUT_EN 0x0c8 +#define DISP_REG_CONFIG_DISP_RDMA2_SOUT 0x0b8 +#define DISP_REG_CONFIG_DISP_RDMA0_SOUT_EN 0x0c4 +#define DISP_REG_CONFIG_DISP_RDMA1_SOUT_EN 0x0c8 #define DISP_REG_CONFIG_MMSYS_CG_CON0 0x100 #define DISP_REG_CONFIG_DISP_OVL_MOUT_EN 0x030 @@ -41,45 +45,89 @@ #define DISP_REG_MUTEX_RST(n) (0x28 + 0x20 * (n)) #define DISP_REG_MUTEX_MOD(n) (0x2c + 0x20 * (n)) #define DISP_REG_MUTEX_SOF(n) (0x30 + 0x20 * (n)) +#define DISP_REG_MUTEX_MOD2(n) (0x34 + 0x20 * (n)) #define INT_MUTEX BIT(1) -#define MT8173_MUTEX_MOD_DISP_OVL0 BIT(11) -#define MT8173_MUTEX_MOD_DISP_OVL1 BIT(12) -#define MT8173_MUTEX_MOD_DISP_RDMA0 BIT(13) -#define MT8173_MUTEX_MOD_DISP_RDMA1 BIT(14) -#define MT8173_MUTEX_MOD_DISP_RDMA2 BIT(15) -#define MT8173_MUTEX_MOD_DISP_WDMA0 BIT(16) -#define MT8173_MUTEX_MOD_DISP_WDMA1 BIT(17) -#define MT8173_MUTEX_MOD_DISP_COLOR0 BIT(18) -#define MT8173_MUTEX_MOD_DISP_COLOR1 BIT(19) -#define MT8173_MUTEX_MOD_DISP_AAL BIT(20) -#define MT8173_MUTEX_MOD_DISP_GAMMA BIT(21) -#define MT8173_MUTEX_MOD_DISP_UFOE BIT(22) -#define MT8173_MUTEX_MOD_DISP_PWM0 BIT(23) -#define MT8173_MUTEX_MOD_DISP_PWM1 BIT(24) -#define MT8173_MUTEX_MOD_DISP_OD BIT(25) - -#define MT2701_MUTEX_MOD_DISP_OVL BIT(3) -#define MT2701_MUTEX_MOD_DISP_WDMA BIT(6) -#define MT2701_MUTEX_MOD_DISP_COLOR BIT(7) -#define MT2701_MUTEX_MOD_DISP_BLS BIT(9) -#define MT2701_MUTEX_MOD_DISP_RDMA0 BIT(10) -#define MT2701_MUTEX_MOD_DISP_RDMA1 BIT(12) +#define MT8173_MUTEX_MOD_DISP_OVL0 11 +#define MT8173_MUTEX_MOD_DISP_OVL1 12 +#define MT8173_MUTEX_MOD_DISP_RDMA0 13 +#define MT8173_MUTEX_MOD_DISP_RDMA1 14 +#define MT8173_MUTEX_MOD_DISP_RDMA2 15 +#define MT8173_MUTEX_MOD_DISP_WDMA0 16 +#define MT8173_MUTEX_MOD_DISP_WDMA1 17 +#define MT8173_MUTEX_MOD_DISP_COLOR0 18 +#define MT8173_MUTEX_MOD_DISP_COLOR1 19 +#define MT8173_MUTEX_MOD_DISP_AAL 20 +#define MT8173_MUTEX_MOD_DISP_GAMMA 21 +#define MT8173_MUTEX_MOD_DISP_UFOE 22 +#define MT8173_MUTEX_MOD_DISP_PWM0 23 +#define MT8173_MUTEX_MOD_DISP_PWM1 24 +#define MT8173_MUTEX_MOD_DISP_OD 25 + +#define MT2712_MUTEX_MOD_DISP_PWM2 10 +#define MT2712_MUTEX_MOD_DISP_OVL0 11 +#define MT2712_MUTEX_MOD_DISP_OVL1 12 +#define MT2712_MUTEX_MOD_DISP_RDMA0 13 +#define MT2712_MUTEX_MOD_DISP_RDMA1 14 +#define MT2712_MUTEX_MOD_DISP_RDMA2 15 +#define MT2712_MUTEX_MOD_DISP_WDMA0 16 +#define MT2712_MUTEX_MOD_DISP_WDMA1 17 +#define MT2712_MUTEX_MOD_DISP_COLOR0 18 +#define MT2712_MUTEX_MOD_DISP_COLOR1 19 +#define MT2712_MUTEX_MOD_DISP_AAL0 20 +#define MT2712_MUTEX_MOD_DISP_UFOE 22 +#define MT2712_MUTEX_MOD_DISP_PWM0 23 +#define MT2712_MUTEX_MOD_DISP_PWM1 24 +#define MT2712_MUTEX_MOD_DISP_OD0 25 +#define MT2712_MUTEX_MOD2_DISP_AAL1 33 +#define MT2712_MUTEX_MOD2_DISP_OD1 34 + +#define MT2701_MUTEX_MOD_DISP_OVL 3 +#define MT2701_MUTEX_MOD_DISP_WDMA 6 +#define MT2701_MUTEX_MOD_DISP_COLOR 7 +#define MT2701_MUTEX_MOD_DISP_BLS 9 +#define MT2701_MUTEX_MOD_DISP_RDMA0 10 +#define MT2701_MUTEX_MOD_DISP_RDMA1 12 #define MUTEX_SOF_SINGLE_MODE 0 #define MUTEX_SOF_DSI0 1 #define MUTEX_SOF_DSI1 2 #define MUTEX_SOF_DPI0 3 +#define MUTEX_SOF_DPI1 4 +#define MUTEX_SOF_DSI2 5 +#define MUTEX_SOF_DSI3 6 #define OVL0_MOUT_EN_COLOR0 0x1 #define OD_MOUT_EN_RDMA0 0x1 +#define OD1_MOUT_EN_RDMA1 BIT(16) #define UFOE_MOUT_EN_DSI0 0x1 #define COLOR0_SEL_IN_OVL0 0x1 #define OVL1_MOUT_EN_COLOR1 0x1 #define GAMMA_MOUT_EN_RDMA1 0x1 -#define RDMA1_MOUT_DPI0 0x2 +#define RDMA0_SOUT_DPI0 0x2 +#define RDMA0_SOUT_DSI2 0x4 +#define RDMA0_SOUT_DSI3 0x5 +#define RDMA1_SOUT_DPI0 0x2 +#define RDMA1_SOUT_DPI1 0x3 +#define RDMA1_SOUT_DSI1 0x1 +#define RDMA1_SOUT_DSI2 0x4 +#define RDMA1_SOUT_DSI3 0x5 +#define RDMA2_SOUT_DPI0 0x2 +#define RDMA2_SOUT_DPI1 0x3 +#define RDMA2_SOUT_DSI1 0x1 +#define RDMA2_SOUT_DSI2 0x4 +#define RDMA2_SOUT_DSI3 0x5 #define DPI0_SEL_IN_RDMA1 0x1 +#define DPI0_SEL_IN_RDMA2 0x3 +#define DPI1_SEL_IN_RDMA1 (0x1 << 8) +#define DPI1_SEL_IN_RDMA2 (0x3 << 8) +#define DSI1_SEL_IN_RDMA1 0x1 +#define DSI1_SEL_IN_RDMA2 0x4 +#define DSI2_SEL_IN_RDMA1 (0x1 << 16) +#define DSI2_SEL_IN_RDMA2 (0x4 << 16) +#define DSI3_SEL_IN_RDMA1 (0x1 << 16) +#define DSI3_SEL_IN_RDMA2 (0x4 << 16) #define COLOR1_SEL_IN_OVL1 0x1 #define OVL_MOUT_EN_RDMA 0x1 @@ -108,12 +156,32 @@ static const unsigned int mt2701_mutex_mod[DDP_COMPONENT_ID_MAX] = { [DDP_COMPONENT_WDMA0] = MT2701_MUTEX_MOD_DISP_WDMA, }; +static const unsigned int mt2712_mutex_mod[DDP_COMPONENT_ID_MAX] = { + [DDP_COMPONENT_AAL0] = MT2712_MUTEX_MOD_DISP_AAL0, + [DDP_COMPONENT_AAL1] = MT2712_MUTEX_MOD2_DISP_AAL1, + [DDP_COMPONENT_COLOR0] = MT2712_MUTEX_MOD_DISP_COLOR0, + [DDP_COMPONENT_COLOR1] = MT2712_MUTEX_MOD_DISP_COLOR1, + [DDP_COMPONENT_OD0] = MT2712_MUTEX_MOD_DISP_OD0, + [DDP_COMPONENT_OD1] = MT2712_MUTEX_MOD2_DISP_OD1, + [DDP_COMPONENT_OVL0] = MT2712_MUTEX_MOD_DISP_OVL0, + [DDP_COMPONENT_OVL1] = MT2712_MUTEX_MOD_DISP_OVL1, + [DDP_COMPONENT_PWM0] = MT2712_MUTEX_MOD_DISP_PWM0, + [DDP_COMPONENT_PWM1] = MT2712_MUTEX_MOD_DISP_PWM1, + [DDP_COMPONENT_PWM2] = MT2712_MUTEX_MOD_DISP_PWM2, + [DDP_COMPONENT_RDMA0] = MT2712_MUTEX_MOD_DISP_RDMA0, + [DDP_COMPONENT_RDMA1] = MT2712_MUTEX_MOD_DISP_RDMA1, + [DDP_COMPONENT_RDMA2] = MT2712_MUTEX_MOD_DISP_RDMA2, + [DDP_COMPONENT_UFOE] = MT2712_MUTEX_MOD_DISP_UFOE, + [DDP_COMPONENT_WDMA0] = MT2712_MUTEX_MOD_DISP_WDMA0, + [DDP_COMPONENT_WDMA1] = MT2712_MUTEX_MOD_DISP_WDMA1, +}; + static const unsigned int mt8173_mutex_mod[DDP_COMPONENT_ID_MAX] = { - [DDP_COMPONENT_AAL] = MT8173_MUTEX_MOD_DISP_AAL, + [DDP_COMPONENT_AAL0] = MT8173_MUTEX_MOD_DISP_AAL, [DDP_COMPONENT_COLOR0] = MT8173_MUTEX_MOD_DISP_COLOR0, [DDP_COMPONENT_COLOR1] = MT8173_MUTEX_MOD_DISP_COLOR1, [DDP_COMPONENT_GAMMA] = MT8173_MUTEX_MOD_DISP_GAMMA, - [DDP_COMPONENT_OD] = MT8173_MUTEX_MOD_DISP_OD, + [DDP_COMPONENT_OD0] = MT8173_MUTEX_MOD_DISP_OD, [DDP_COMPONENT_OVL0] = MT8173_MUTEX_MOD_DISP_OVL0, [DDP_COMPONENT_OVL1] = MT8173_MUTEX_MOD_DISP_OVL1, [DDP_COMPONENT_PWM0] = MT8173_MUTEX_MOD_DISP_PWM0, @@ -138,7 +206,7 @@ static unsigned int mtk_ddp_mout_en(enum mtk_ddp_comp_id cur, } else if (cur == DDP_COMPONENT_OVL0 && next == DDP_COMPONENT_RDMA0) { *addr = DISP_REG_CONFIG_DISP_OVL_MOUT_EN; value = OVL_MOUT_EN_RDMA; - } else if (cur == DDP_COMPONENT_OD && next == DDP_COMPONENT_RDMA0) { + } else if (cur == DDP_COMPONENT_OD0 && next == DDP_COMPONENT_RDMA0) { *addr = DISP_REG_CONFIG_DISP_OD_MOUT_EN; value = OD_MOUT_EN_RDMA0; } else if (cur == DDP_COMPONENT_UFOE && next == DDP_COMPONENT_DSI0) { @@ -150,9 +218,48 @@ static unsigned int mtk_ddp_mout_en(enum mtk_ddp_comp_id cur, } else if (cur == DDP_COMPONENT_GAMMA && next == DDP_COMPONENT_RDMA1) { *addr = DISP_REG_CONFIG_DISP_GAMMA_MOUT_EN; value = GAMMA_MOUT_EN_RDMA1; + } else if (cur == DDP_COMPONENT_OD1 && next == DDP_COMPONENT_RDMA1) { + *addr = DISP_REG_CONFIG_DISP_OD_MOUT_EN; + value = OD1_MOUT_EN_RDMA1; + } else if (cur == DDP_COMPONENT_RDMA0 && next == DDP_COMPONENT_DPI0) { + *addr = DISP_REG_CONFIG_DISP_RDMA0_SOUT_EN; + value = RDMA0_SOUT_DPI0; + } else if (cur == DDP_COMPONENT_RDMA0 && next == DDP_COMPONENT_DSI2) { + *addr = DISP_REG_CONFIG_DISP_RDMA0_SOUT_EN; + value = RDMA0_SOUT_DSI2; + } else if (cur == DDP_COMPONENT_RDMA0 && next == DDP_COMPONENT_DSI3) { + *addr = DISP_REG_CONFIG_DISP_RDMA0_SOUT_EN; + value = RDMA0_SOUT_DSI3; + } else if (cur == DDP_COMPONENT_RDMA1 && next == DDP_COMPONENT_DSI1) { + *addr = DISP_REG_CONFIG_DISP_RDMA1_SOUT_EN; + value = RDMA1_SOUT_DSI1; + } else if (cur == DDP_COMPONENT_RDMA1 && next == DDP_COMPONENT_DSI2) { + *addr = DISP_REG_CONFIG_DISP_RDMA1_SOUT_EN; + value = RDMA1_SOUT_DSI2; + } else if (cur == DDP_COMPONENT_RDMA1 && next == DDP_COMPONENT_DSI3) { + *addr = DISP_REG_CONFIG_DISP_RDMA1_SOUT_EN; + value = RDMA1_SOUT_DSI3; } else if (cur == DDP_COMPONENT_RDMA1 && next == DDP_COMPONENT_DPI0) { - *addr = DISP_REG_CONFIG_DISP_RDMA1_MOUT_EN; - value = RDMA1_MOUT_DPI0; + *addr = DISP_REG_CONFIG_DISP_RDMA1_SOUT_EN; + value = RDMA1_SOUT_DPI0; + } else if (cur == DDP_COMPONENT_RDMA1 && next == DDP_COMPONENT_DPI1) { + *addr = DISP_REG_CONFIG_DISP_RDMA1_SOUT_EN; + value = RDMA1_SOUT_DPI1; + } else if (cur == DDP_COMPONENT_RDMA2 && next == DDP_COMPONENT_DPI0) { + *addr = DISP_REG_CONFIG_DISP_RDMA2_SOUT; + value = RDMA2_SOUT_DPI0; + } else if (cur == DDP_COMPONENT_RDMA2 && next == DDP_COMPONENT_DPI1) { + *addr = DISP_REG_CONFIG_DISP_RDMA2_SOUT; + value = RDMA2_SOUT_DPI1; + } else if (cur == DDP_COMPONENT_RDMA2 && next == DDP_COMPONENT_DSI1) { + *addr = DISP_REG_CONFIG_DISP_RDMA2_SOUT; + value = RDMA2_SOUT_DSI1; + } else if (cur == DDP_COMPONENT_RDMA2 && next == DDP_COMPONENT_DSI2) { + *addr = DISP_REG_CONFIG_DISP_RDMA2_SOUT; + value = RDMA2_SOUT_DSI2; + } else if (cur == DDP_COMPONENT_RDMA2 && next == DDP_COMPONENT_DSI3) { + *addr = DISP_REG_CONFIG_DISP_RDMA2_SOUT; + value = RDMA2_SOUT_DSI3; } else { value = 0; } @@ -172,6 +279,33 @@ static unsigned int mtk_ddp_sel_in(enum mtk_ddp_comp_id cur, } else if (cur == DDP_COMPONENT_RDMA1 && next == DDP_COMPONENT_DPI0) { *addr = DISP_REG_CONFIG_DPI_SEL_IN; value = DPI0_SEL_IN_RDMA1; + } else if (cur == DDP_COMPONENT_RDMA1 && next == DDP_COMPONENT_DPI1) { + *addr = DISP_REG_CONFIG_DPI_SEL_IN; + value = DPI1_SEL_IN_RDMA1; + } else if (cur == DDP_COMPONENT_RDMA1 && next == DDP_COMPONENT_DSI1) { + *addr = DISP_REG_CONFIG_DSIO_SEL_IN; + value = DSI1_SEL_IN_RDMA1; + } else if (cur == DDP_COMPONENT_RDMA1 && next == DDP_COMPONENT_DSI2) { + *addr = DISP_REG_CONFIG_DSIE_SEL_IN; + value = DSI2_SEL_IN_RDMA1; + } else if (cur == DDP_COMPONENT_RDMA1 && next == DDP_COMPONENT_DSI3) { + *addr = DISP_REG_CONFIG_DSIO_SEL_IN; + value = DSI3_SEL_IN_RDMA1; + } else if (cur == DDP_COMPONENT_RDMA2 && next == DDP_COMPONENT_DPI0) { + *addr = DISP_REG_CONFIG_DPI_SEL_IN; + value = DPI0_SEL_IN_RDMA2; + } else if (cur == DDP_COMPONENT_RDMA2 && next == DDP_COMPONENT_DPI1) { + *addr = DISP_REG_CONFIG_DPI_SEL_IN; + value = DPI1_SEL_IN_RDMA2; + } else if (cur == DDP_COMPONENT_RDMA2 && next == DDP_COMPONENT_DSI1) { + *addr = DISP_REG_CONFIG_DSIE_SEL_IN; + value = DSI1_SEL_IN_RDMA2; + } else if (cur == DDP_COMPONENT_RDMA2 && next == DDP_COMPONENT_DSI2) { + *addr = DISP_REG_CONFIG_DSIE_SEL_IN; + value = DSI2_SEL_IN_RDMA2; + } else if (cur == DDP_COMPONENT_RDMA2 && next == DDP_COMPONENT_DSI3) { + *addr = DISP_REG_CONFIG_DSIE_SEL_IN; + value = DSI3_SEL_IN_RDMA2; } else if (cur == DDP_COMPONENT_OVL1 && next == DDP_COMPONENT_COLOR1) { *addr = DISP_REG_CONFIG_DISP_COLOR1_SEL_IN; value = COLOR1_SEL_IN_OVL1; @@ -278,6 +412,7 @@ void mtk_disp_mutex_add_comp(struct mtk_disp_mutex *mutex, struct mtk_ddp *ddp = container_of(mutex, struct mtk_ddp, mutex[mutex->id]); unsigned int reg; + unsigned int offset; WARN_ON(&ddp->mutex[mutex->id] != mutex); @@ -288,13 +423,30 @@ void mtk_disp_mutex_add_comp(struct mtk_disp_mutex *mutex, case DDP_COMPONENT_DSI1: reg = MUTEX_SOF_DSI0; break; + case DDP_COMPONENT_DSI2: + reg = MUTEX_SOF_DSI2; + break; + case DDP_COMPONENT_DSI3: + reg = MUTEX_SOF_DSI3; + break; case DDP_COMPONENT_DPI0: reg = MUTEX_SOF_DPI0; break; + case DDP_COMPONENT_DPI1: + reg = MUTEX_SOF_DPI1; + break; default: - reg = readl_relaxed(ddp->regs + DISP_REG_MUTEX_MOD(mutex->id)); - reg |= ddp->mutex_mod[id]; - writel_relaxed(reg, ddp->regs + DISP_REG_MUTEX_MOD(mutex->id)); + if (ddp->mutex_mod[id] < 32) { + offset = DISP_REG_MUTEX_MOD(mutex->id); + reg = readl_relaxed(ddp->regs + offset); + reg |= 1 << ddp->mutex_mod[id]; + writel_relaxed(reg, ddp->regs + offset); + } else { + offset = DISP_REG_MUTEX_MOD2(mutex->id); + reg = readl_relaxed(ddp->regs + offset); + reg |= 1 << (ddp->mutex_mod[id] - 32); + writel_relaxed(reg, ddp->regs + offset); + } return; } @@ -307,20 +459,32 @@ void mtk_disp_mutex_remove_comp(struct mtk_disp_mutex *mutex, struct mtk_ddp *ddp = container_of(mutex, struct mtk_ddp, mutex[mutex->id]); unsigned int reg; + unsigned int offset; WARN_ON(&ddp->mutex[mutex->id] != mutex); switch (id) { case DDP_COMPONENT_DSI0: case DDP_COMPONENT_DSI1: + case DDP_COMPONENT_DSI2: + case DDP_COMPONENT_DSI3: case DDP_COMPONENT_DPI0: + case DDP_COMPONENT_DPI1: writel_relaxed(MUTEX_SOF_SINGLE_MODE, ddp->regs + DISP_REG_MUTEX_SOF(mutex->id)); break; default: - reg = readl_relaxed(ddp->regs + DISP_REG_MUTEX_MOD(mutex->id)); - reg &= ~(ddp->mutex_mod[id]); - writel_relaxed(reg, ddp->regs + DISP_REG_MUTEX_MOD(mutex->id)); + if (ddp->mutex_mod[id] < 32) { + offset = DISP_REG_MUTEX_MOD(mutex->id); + reg = readl_relaxed(ddp->regs + offset); + reg &= ~(1 << ddp->mutex_mod[id]); + writel_relaxed(reg, ddp->regs + offset); + } else { + offset = DISP_REG_MUTEX_MOD2(mutex->id); + reg = readl_relaxed(ddp->regs + offset); + reg &= ~(1 << (ddp->mutex_mod[id] - 32)); + writel_relaxed(reg, ddp->regs + offset); + } break; } } @@ -407,6 +571,7 @@ static int mtk_ddp_remove(struct platform_device *pdev) static const struct of_device_id ddp_driver_dt_match[] = { { .compatible = "mediatek,mt2701-disp-mutex", .data = mt2701_mutex_mod}, + { .compatible = "mediatek,mt2712-disp-mutex", .data = mt2712_mutex_mod}, { .compatible = "mediatek,mt8173-disp-mutex", .data = mt8173_mutex_mod}, {}, }; diff --git a/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c b/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c index 4672317e3ad1..ff974d82a4a6 100644 --- a/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c +++ b/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c @@ -218,18 +218,25 @@ struct mtk_ddp_comp_match { }; static const struct mtk_ddp_comp_match mtk_ddp_matches[DDP_COMPONENT_ID_MAX] = { - [DDP_COMPONENT_AAL] = { MTK_DISP_AAL, 0, &ddp_aal }, + [DDP_COMPONENT_AAL0] = { MTK_DISP_AAL, 0, &ddp_aal }, + [DDP_COMPONENT_AAL1] = { MTK_DISP_AAL, 1, &ddp_aal }, [DDP_COMPONENT_BLS] = { MTK_DISP_BLS, 0, NULL }, [DDP_COMPONENT_COLOR0] = { MTK_DISP_COLOR, 0, NULL }, [DDP_COMPONENT_COLOR1] = { MTK_DISP_COLOR, 1, NULL }, [DDP_COMPONENT_DPI0] = { MTK_DPI, 0, NULL }, + [DDP_COMPONENT_DPI1] = { MTK_DPI, 1, NULL }, [DDP_COMPONENT_DSI0] = { MTK_DSI, 0, NULL }, [DDP_COMPONENT_DSI1] = { MTK_DSI, 1, NULL }, + [DDP_COMPONENT_DSI2] = { MTK_DSI, 2, NULL }, + [DDP_COMPONENT_DSI3] = { MTK_DSI, 3, NULL }, [DDP_COMPONENT_GAMMA] = { MTK_DISP_GAMMA, 0, &ddp_gamma }, - [DDP_COMPONENT_OD] = { MTK_DISP_OD, 0, &ddp_od }, + [DDP_COMPONENT_OD0] = { MTK_DISP_OD, 0, &ddp_od }, + [DDP_COMPONENT_OD1] = { MTK_DISP_OD, 1, &ddp_od }, [DDP_COMPONENT_OVL0] = { MTK_DISP_OVL, 0, NULL }, [DDP_COMPONENT_OVL1] = { MTK_DISP_OVL, 1, NULL }, [DDP_COMPONENT_PWM0] = { MTK_DISP_PWM, 0, NULL }, + [DDP_COMPONENT_PWM1] = { MTK_DISP_PWM, 1, NULL }, + [DDP_COMPONENT_PWM2] = { MTK_DISP_PWM, 2, NULL }, [DDP_COMPONENT_RDMA0] = { MTK_DISP_RDMA, 0, NULL }, [DDP_COMPONENT_RDMA1] = { MTK_DISP_RDMA, 1, NULL }, [DDP_COMPONENT_RDMA2] = { MTK_DISP_RDMA, 2, NULL }, @@ -271,7 +278,11 @@ int mtk_ddp_comp_init(struct device *dev, struct device_node *node, if (comp_id == DDP_COMPONENT_BLS || comp_id == DDP_COMPONENT_DPI0 || + comp_id == DDP_COMPONENT_DPI1 || comp_id == DDP_COMPONENT_DSI0 || + comp_id == DDP_COMPONENT_DSI1 || + comp_id == DDP_COMPONENT_DSI2 || + comp_id == DDP_COMPONENT_DSI3 || comp_id == DDP_COMPONENT_PWM0) { comp->regs = NULL; comp->clk = NULL; diff --git a/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.h b/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.h index 0828cf8bf85c..7413ffeb3c9d 100644 --- a/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.h +++ b/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.h @@ -41,19 +41,25 @@ enum mtk_ddp_comp_type { }; enum mtk_ddp_comp_id { - DDP_COMPONENT_AAL, + DDP_COMPONENT_AAL0, + DDP_COMPONENT_AAL1, DDP_COMPONENT_BLS, DDP_COMPONENT_COLOR0, DDP_COMPONENT_COLOR1, DDP_COMPONENT_DPI0, + DDP_COMPONENT_DPI1, DDP_COMPONENT_DSI0, DDP_COMPONENT_DSI1, + DDP_COMPONENT_DSI2, + DDP_COMPONENT_DSI3, DDP_COMPONENT_GAMMA, - DDP_COMPONENT_OD, + DDP_COMPONENT_OD0, + DDP_COMPONENT_OD1, DDP_COMPONENT_OVL0, DDP_COMPONENT_OVL1, DDP_COMPONENT_PWM0, DDP_COMPONENT_PWM1, + DDP_COMPONENT_PWM2, DDP_COMPONENT_RDMA0, DDP_COMPONENT_RDMA1, DDP_COMPONENT_RDMA2, diff --git a/drivers/gpu/drm/mediatek/mtk_drm_drv.c b/drivers/gpu/drm/mediatek/mtk_drm_drv.c index a2ca90fc403c..39721119713b 100644 --- a/drivers/gpu/drm/mediatek/mtk_drm_drv.c +++ b/drivers/gpu/drm/mediatek/mtk_drm_drv.c @@ -146,11 +146,37 @@ static const enum mtk_ddp_comp_id mt2701_mtk_ddp_ext[] = { DDP_COMPONENT_DPI0, }; +static const enum mtk_ddp_comp_id mt2712_mtk_ddp_main[] = { + DDP_COMPONENT_OVL0, + DDP_COMPONENT_COLOR0, + DDP_COMPONENT_AAL0, + DDP_COMPONENT_OD0, + DDP_COMPONENT_RDMA0, + DDP_COMPONENT_DPI0, + DDP_COMPONENT_PWM0, +}; + +static const enum mtk_ddp_comp_id mt2712_mtk_ddp_ext[] = { + DDP_COMPONENT_OVL1, + DDP_COMPONENT_COLOR1, + DDP_COMPONENT_AAL1, + DDP_COMPONENT_OD1, + DDP_COMPONENT_RDMA1, + DDP_COMPONENT_DPI1, + DDP_COMPONENT_PWM1, +}; + +static const enum mtk_ddp_comp_id mt2712_mtk_ddp_third[] = { + DDP_COMPONENT_RDMA2, + DDP_COMPONENT_DSI3, + DDP_COMPONENT_PWM2, +}; + static const enum mtk_ddp_comp_id mt8173_mtk_ddp_main[] = { DDP_COMPONENT_OVL0, DDP_COMPONENT_COLOR0, - DDP_COMPONENT_AAL, - DDP_COMPONENT_OD, + DDP_COMPONENT_AAL0, + DDP_COMPONENT_OD0, DDP_COMPONENT_RDMA0, DDP_COMPONENT_UFOE, DDP_COMPONENT_DSI0, @@ -173,6 +199,15 @@ static const struct mtk_mmsys_driver_data mt2701_mmsys_driver_data = { .shadow_register = true, }; +static const struct mtk_mmsys_driver_data mt2712_mmsys_driver_data = { + .main_path = mt2712_mtk_ddp_main, + .main_len = ARRAY_SIZE(mt2712_mtk_ddp_main), + .ext_path = mt2712_mtk_ddp_ext, + .ext_len = ARRAY_SIZE(mt2712_mtk_ddp_ext), + .third_path = mt2712_mtk_ddp_third, + .third_len = ARRAY_SIZE(mt2712_mtk_ddp_third), +}; + static const struct mtk_mmsys_driver_data mt8173_mmsys_driver_data = { .main_path = mt8173_mtk_ddp_main, .main_len = ARRAY_SIZE(mt8173_mtk_ddp_main), @@ -232,6 +267,11 @@ static int mtk_drm_kms_init(struct drm_device *drm) if (ret < 0) goto err_component_unbind; + ret = mtk_drm_crtc_create(drm, private->data->third_path, + private->data->third_len); + if (ret < 0) + goto err_component_unbind; + /* Use OVL device for all DMA memory allocations */ np = private->comp_node[private->data->main_path[0]] ?: private->comp_node[private->data->ext_path[0]]; @@ -360,24 +400,44 @@ static const struct component_master_ops mtk_drm_ops = { }; static const struct of_device_id mtk_ddp_comp_dt_ids[] = { - { .compatible = "mediatek,mt2701-disp-ovl", .data = (void *)MTK_DISP_OVL }, - { .compatible = "mediatek,mt8173-disp-ovl", .data = (void *)MTK_DISP_OVL }, - { .compatible = "mediatek,mt2701-disp-rdma", .data = (void *)MTK_DISP_RDMA }, - { .compatible = "mediatek,mt8173-disp-rdma", .data = (void *)MTK_DISP_RDMA }, - { .compatible = "mediatek,mt8173-disp-wdma", .data = (void *)MTK_DISP_WDMA }, - { .compatible = "mediatek,mt2701-disp-color", .data = (void *)MTK_DISP_COLOR }, - { .compatible = "mediatek,mt8173-disp-color", .data = (void *)MTK_DISP_COLOR }, - { .compatible = "mediatek,mt8173-disp-aal", .data = (void *)MTK_DISP_AAL}, - { .compatible = "mediatek,mt8173-disp-gamma", .data = (void *)MTK_DISP_GAMMA, }, - { .compatible = "mediatek,mt8173-disp-ufoe", .data = (void *)MTK_DISP_UFOE }, - { .compatible = "mediatek,mt2701-dsi", .data = (void *)MTK_DSI }, - { .compatible = "mediatek,mt8173-dsi", .data = (void *)MTK_DSI }, - { .compatible = "mediatek,mt8173-dpi", .data = (void *)MTK_DPI }, - { .compatible = "mediatek,mt2701-disp-mutex", .data = (void *)MTK_DISP_MUTEX }, - { .compatible = "mediatek,mt8173-disp-mutex", .data = (void *)MTK_DISP_MUTEX }, - { .compatible = "mediatek,mt2701-disp-pwm", .data = (void *)MTK_DISP_BLS }, - { .compatible = "mediatek,mt8173-disp-pwm", .data = (void *)MTK_DISP_PWM }, - { .compatible = "mediatek,mt8173-disp-od", .data = (void *)MTK_DISP_OD }, + { .compatible = "mediatek,mt2701-disp-ovl", + .data = (void *)MTK_DISP_OVL }, + { .compatible = "mediatek,mt8173-disp-ovl", + .data = (void *)MTK_DISP_OVL }, + { .compatible = "mediatek,mt2701-disp-rdma", + .data = (void *)MTK_DISP_RDMA }, + { .compatible = "mediatek,mt8173-disp-rdma", + .data = (void *)MTK_DISP_RDMA }, + { .compatible = "mediatek,mt8173-disp-wdma", + .data = (void *)MTK_DISP_WDMA }, + { .compatible = "mediatek,mt2701-disp-color", + .data = (void *)MTK_DISP_COLOR }, + { .compatible = "mediatek,mt8173-disp-color", + .data = (void *)MTK_DISP_COLOR }, + { .compatible = "mediatek,mt8173-disp-aal", + .data = (void *)MTK_DISP_AAL}, + { .compatible = "mediatek,mt8173-disp-gamma", + .data = (void *)MTK_DISP_GAMMA, }, + { .compatible = "mediatek,mt8173-disp-ufoe", + .data = (void *)MTK_DISP_UFOE }, + { .compatible = "mediatek,mt2701-dsi", + .data = (void *)MTK_DSI }, + { .compatible = "mediatek,mt8173-dsi", + .data = (void *)MTK_DSI }, + { .compatible = "mediatek,mt8173-dpi", + .data = (void *)MTK_DPI }, + { .compatible = "mediatek,mt2701-disp-mutex", + .data = (void *)MTK_DISP_MUTEX }, + { .compatible = "mediatek,mt2712-disp-mutex", + .data = (void *)MTK_DISP_MUTEX }, + { .compatible = "mediatek,mt8173-disp-mutex", + .data = (void *)MTK_DISP_MUTEX }, + { .compatible = "mediatek,mt2701-disp-pwm", + .data = (void *)MTK_DISP_BLS }, + { .compatible = "mediatek,mt8173-disp-pwm", + .data = (void *)MTK_DISP_PWM }, + { .compatible = "mediatek,mt8173-disp-od", + .data = (void *)MTK_DISP_OD }, { } }; @@ -552,6 +612,8 @@ static SIMPLE_DEV_PM_OPS(mtk_drm_pm_ops, mtk_drm_sys_suspend, static const struct of_device_id mtk_drm_of_ids[] = { { .compatible = "mediatek,mt2701-mmsys", .data = &mt2701_mmsys_driver_data}, + { .compatible = "mediatek,mt2712-mmsys", + .data = &mt2712_mmsys_driver_data}, { .compatible = "mediatek,mt8173-mmsys", .data = &mt8173_mmsys_driver_data}, { } diff --git a/drivers/gpu/drm/mediatek/mtk_drm_drv.h b/drivers/gpu/drm/mediatek/mtk_drm_drv.h index c3378c452c0a..ecc00ca3221d 100644 --- a/drivers/gpu/drm/mediatek/mtk_drm_drv.h +++ b/drivers/gpu/drm/mediatek/mtk_drm_drv.h @@ -17,7 +17,7 @@ #include <linux/io.h> #include "mtk_drm_ddp_comp.h" -#define MAX_CRTC 2 +#define MAX_CRTC 3 #define MAX_CONNECTOR 2 struct device; @@ -33,6 +33,9 @@ struct mtk_mmsys_driver_data { unsigned int main_len; const enum mtk_ddp_comp_id *ext_path; unsigned int ext_len; + const enum mtk_ddp_comp_id *third_path; + unsigned int third_len; + bool shadow_register; }; diff --git a/drivers/gpu/drm/mediatek/mtk_dsi.c b/drivers/gpu/drm/mediatek/mtk_dsi.c index aa0943ec32b0..66df1b177959 100644 --- a/drivers/gpu/drm/mediatek/mtk_dsi.c +++ b/drivers/gpu/drm/mediatek/mtk_dsi.c @@ -782,7 +782,7 @@ static int mtk_dsi_create_connector(struct drm_device *drm, struct mtk_dsi *dsi) drm_connector_helper_add(&dsi->conn, &mtk_dsi_connector_helper_funcs); dsi->conn.dpms = DRM_MODE_DPMS_OFF; - drm_mode_connector_attach_encoder(&dsi->conn, &dsi->encoder); + drm_connector_attach_encoder(&dsi->conn, &dsi->encoder); if (dsi->panel) { ret = drm_panel_attach(dsi->panel, &dsi->conn); diff --git a/drivers/gpu/drm/mediatek/mtk_hdmi.c b/drivers/gpu/drm/mediatek/mtk_hdmi.c index 59a11026dceb..2d45d1dd9554 100644 --- a/drivers/gpu/drm/mediatek/mtk_hdmi.c +++ b/drivers/gpu/drm/mediatek/mtk_hdmi.c @@ -1220,7 +1220,7 @@ static int mtk_hdmi_conn_get_modes(struct drm_connector *conn) hdmi->dvi_mode = !drm_detect_monitor_audio(edid); - drm_mode_connector_update_edid_property(conn, edid); + drm_connector_update_edid_property(conn, edid); ret = drm_add_edid_modes(conn, edid); kfree(edid); @@ -1306,7 +1306,7 @@ static int mtk_hdmi_bridge_attach(struct drm_bridge *bridge) hdmi->conn.interlace_allowed = true; hdmi->conn.doublescan_allowed = false; - ret = drm_mode_connector_attach_encoder(&hdmi->conn, + ret = drm_connector_attach_encoder(&hdmi->conn, bridge->encoder); if (ret) { dev_err(hdmi->dev, diff --git a/drivers/gpu/drm/meson/meson_dw_hdmi.c b/drivers/gpu/drm/meson/meson_dw_hdmi.c index c9ad45686e7a..df7247cd93f9 100644 --- a/drivers/gpu/drm/meson/meson_dw_hdmi.c +++ b/drivers/gpu/drm/meson/meson_dw_hdmi.c @@ -329,6 +329,12 @@ static void dw_hdmi_set_vclk(struct meson_dw_hdmi *dw_hdmi, vclk_freq = mode->clock; + if (!vic) { + meson_vclk_setup(priv, MESON_VCLK_TARGET_DMT, vclk_freq, + vclk_freq, vclk_freq, false); + return; + } + if (mode->flags & DRM_MODE_FLAG_DBLCLK) vclk_freq *= 2; @@ -542,10 +548,12 @@ static enum drm_mode_status dw_hdmi_mode_valid(struct drm_connector *connector, const struct drm_display_mode *mode) { + struct meson_drm *priv = connector->dev->dev_private; unsigned int vclk_freq; unsigned int venc_freq; unsigned int hdmi_freq; int vic = drm_match_cea_mode(mode); + enum drm_mode_status status; DRM_DEBUG_DRIVER("Modeline %d:\"%s\" %d %d %d %d %d %d %d %d %d %d 0x%x 0x%x\n", mode->base.id, mode->name, mode->vrefresh, mode->clock, @@ -556,8 +564,11 @@ dw_hdmi_mode_valid(struct drm_connector *connector, /* Check against non-VIC supported modes */ if (!vic) { - if (!meson_venc_hdmi_supported_mode(mode)) - return MODE_BAD; + status = meson_venc_hdmi_supported_mode(mode); + if (status != MODE_OK) + return status; + + return meson_vclk_dmt_supported_freq(priv, mode->clock); /* Check against supported VIC modes */ } else if (!meson_venc_hdmi_supported_vic(vic)) return MODE_BAD; @@ -583,16 +594,11 @@ dw_hdmi_mode_valid(struct drm_connector *connector, dev_dbg(connector->dev->dev, "%s: vclk:%d venc=%d hdmi=%d\n", __func__, vclk_freq, venc_freq, hdmi_freq); - /* Finally filter by configurable vclk frequencies */ + /* Finally filter by configurable vclk frequencies for VIC modes */ switch (vclk_freq) { - case 25175: - case 40000: case 54000: - case 65000: case 74250: - case 108000: case 148500: - case 162000: case 297000: case 594000: return MODE_OK; diff --git a/drivers/gpu/drm/meson/meson_vclk.c b/drivers/gpu/drm/meson/meson_vclk.c index f0511220317f..ae5473257f72 100644 --- a/drivers/gpu/drm/meson/meson_vclk.c +++ b/drivers/gpu/drm/meson/meson_vclk.c @@ -320,32 +320,23 @@ static void meson_venci_cvbs_clock_config(struct meson_drm *priv) CTS_VDAC_EN, CTS_VDAC_EN); } - +enum { /* PLL O1 O2 O3 VP DV EN TX */ /* 4320 /4 /4 /1 /5 /1 => /2 /2 */ -#define MESON_VCLK_HDMI_ENCI_54000 1 + MESON_VCLK_HDMI_ENCI_54000 = 1, /* 4320 /4 /4 /1 /5 /1 => /1 /2 */ -#define MESON_VCLK_HDMI_DDR_54000 2 + MESON_VCLK_HDMI_DDR_54000, /* 2970 /4 /1 /1 /5 /1 => /1 /2 */ -#define MESON_VCLK_HDMI_DDR_148500 3 -/* 4028 /4 /4 /1 /5 /2 => /1 /1 */ -#define MESON_VCLK_HDMI_25175 4 -/* 3200 /4 /2 /1 /5 /2 => /1 /1 */ -#define MESON_VCLK_HDMI_40000 5 -/* 5200 /4 /2 /1 /5 /2 => /1 /1 */ -#define MESON_VCLK_HDMI_65000 6 + MESON_VCLK_HDMI_DDR_148500, /* 2970 /2 /2 /2 /5 /1 => /1 /1 */ -#define MESON_VCLK_HDMI_74250 7 -/* 4320 /4 /1 /1 /5 /2 => /1 /1 */ -#define MESON_VCLK_HDMI_108000 8 + MESON_VCLK_HDMI_74250, /* 2970 /1 /2 /2 /5 /1 => /1 /1 */ -#define MESON_VCLK_HDMI_148500 9 -/* 3240 /2 /1 /1 /5 /2 => /1 /1 */ -#define MESON_VCLK_HDMI_162000 10 + MESON_VCLK_HDMI_148500, /* 2970 /1 /1 /1 /5 /2 => /1 /1 */ -#define MESON_VCLK_HDMI_297000 11 + MESON_VCLK_HDMI_297000, /* 5940 /1 /1 /2 /5 /1 => /1 /1 */ -#define MESON_VCLK_HDMI_594000 12 + MESON_VCLK_HDMI_594000 +}; struct meson_vclk_params { unsigned int pll_base_freq; @@ -411,46 +402,6 @@ struct meson_vclk_params { .vid_pll_div = VID_PLL_DIV_5, .vclk_div = 1, }, - [MESON_VCLK_HDMI_25175] = { - .pll_base_freq = 4028000, - .pll_od1 = 4, - .pll_od2 = 4, - .pll_od3 = 1, - .vid_pll_div = VID_PLL_DIV_5, - .vclk_div = 2, - }, - [MESON_VCLK_HDMI_40000] = { - .pll_base_freq = 3200000, - .pll_od1 = 4, - .pll_od2 = 2, - .pll_od3 = 1, - .vid_pll_div = VID_PLL_DIV_5, - .vclk_div = 2, - }, - [MESON_VCLK_HDMI_65000] = { - .pll_base_freq = 5200000, - .pll_od1 = 4, - .pll_od2 = 2, - .pll_od3 = 1, - .vid_pll_div = VID_PLL_DIV_5, - .vclk_div = 2, - }, - [MESON_VCLK_HDMI_108000] = { - .pll_base_freq = 4320000, - .pll_od1 = 4, - .pll_od2 = 1, - .pll_od3 = 1, - .vid_pll_div = VID_PLL_DIV_5, - .vclk_div = 2, - }, - [MESON_VCLK_HDMI_162000] = { - .pll_base_freq = 3240000, - .pll_od1 = 2, - .pll_od2 = 1, - .pll_od3 = 1, - .vid_pll_div = VID_PLL_DIV_5, - .vclk_div = 2, - }, }; static inline unsigned int pll_od_to_reg(unsigned int od) @@ -470,358 +421,217 @@ static inline unsigned int pll_od_to_reg(unsigned int od) return 0; } -void meson_hdmi_pll_set(struct meson_drm *priv, - unsigned int base, - unsigned int od1, - unsigned int od2, - unsigned int od3) +void meson_hdmi_pll_set_params(struct meson_drm *priv, unsigned int m, + unsigned int frac, unsigned int od1, + unsigned int od2, unsigned int od3) { unsigned int val; if (meson_vpu_is_compatible(priv, "amlogic,meson-gxbb-vpu")) { - switch (base) { - case 2970000: - regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL, 0x5800023d); - regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL2, 0x00000000); - regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL3, 0x0d5c5091); - regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL4, 0x801da72c); - regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL5, 0x71486980); - regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL6, 0x00000e55); - - /* Enable and unreset */ - regmap_update_bits(priv->hhi, HHI_HDMI_PLL_CNTL, - 0x7 << 28, 0x4 << 28); - - /* Poll for lock bit */ - regmap_read_poll_timeout(priv->hhi, HHI_HDMI_PLL_CNTL, - val, (val & HDMI_PLL_LOCK), 10, 0); - - /* div_frac */ - regmap_update_bits(priv->hhi, HHI_HDMI_PLL_CNTL2, - 0xFFFF, 0x4e00); - break; - - case 3200000: - regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL, 0x58000242); - regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL2, 0x00000000); - regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL3, 0x0d5c5091); - regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL4, 0x801da72c); - regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL5, 0x71486980); - regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL6, 0x00000e55); - - /* unreset */ - regmap_update_bits(priv->hhi, HHI_HDMI_PLL_CNTL, - BIT(28), 0); - - /* Poll for lock bit */ - regmap_read_poll_timeout(priv->hhi, HHI_HDMI_PLL_CNTL, - val, (val & HDMI_PLL_LOCK), 10, 0); - - /* div_frac */ - regmap_update_bits(priv->hhi, HHI_HDMI_PLL_CNTL2, - 0xFFFF, 0x4aab); - break; - - case 3240000: - regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL, 0x58000243); - regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL2, 0x00000000); - regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL3, 0x0d5c5091); - regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL4, 0x801da72c); - regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL5, 0x71486980); - regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL6, 0x00000e55); - - /* unreset */ - regmap_update_bits(priv->hhi, HHI_HDMI_PLL_CNTL, - BIT(28), 0); - - /* Poll for lock bit */ - regmap_read_poll_timeout(priv->hhi, HHI_HDMI_PLL_CNTL, - val, (val & HDMI_PLL_LOCK), 10, 0); - - /* div_frac */ - regmap_update_bits(priv->hhi, HHI_HDMI_PLL_CNTL2, - 0xFFFF, 0x4800); - break; - - case 3865000: - regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL, 0x58000250); - regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL2, 0x00000000); - regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL3, 0x0d5c5091); - regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL4, 0x801da72c); - regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL5, 0x71486980); - regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL6, 0x00000e55); - - /* unreset */ - regmap_update_bits(priv->hhi, HHI_HDMI_PLL_CNTL, - BIT(28), 0); - - /* Poll for lock bit */ - regmap_read_poll_timeout(priv->hhi, HHI_HDMI_PLL_CNTL, - val, (val & HDMI_PLL_LOCK), 10, 0); - - /* div_frac */ - regmap_update_bits(priv->hhi, HHI_HDMI_PLL_CNTL2, - 0xFFFF, 0x4855); - break; - - case 4028000: - regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL, 0x58000253); - regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL2, 0x00000000); - regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL3, 0x0d5c5091); - regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL4, 0x801da72c); - regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL5, 0x71486980); - regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL6, 0x00000e55); - - /* unreset */ - regmap_update_bits(priv->hhi, HHI_HDMI_PLL_CNTL, - BIT(28), 0); - - /* Poll for lock bit */ - regmap_read_poll_timeout(priv->hhi, HHI_HDMI_PLL_CNTL, - val, (val & HDMI_PLL_LOCK), 10, 0); - - /* div_frac */ - regmap_update_bits(priv->hhi, HHI_HDMI_PLL_CNTL2, - 0xFFFF, 0x4eab); - break; - - case 4320000: - regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL, 0x5800025a); - regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL2, 0x00000000); - regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL3, 0x0d5c5091); - regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL4, 0x801da72c); - regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL5, 0x71486980); - regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL6, 0x00000e55); - - /* unreset */ - regmap_update_bits(priv->hhi, HHI_HDMI_PLL_CNTL, - BIT(28), 0); - - /* Poll for lock bit */ - regmap_read_poll_timeout(priv->hhi, HHI_HDMI_PLL_CNTL, - val, (val & HDMI_PLL_LOCK), 10, 0); - break; + regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL, 0x58000200 | m); + if (frac) + regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL2, + 0x00004000 | frac); + else + regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL2, + 0x00000000); + regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL3, 0x0d5c5091); + regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL4, 0x801da72c); + regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL5, 0x71486980); + regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL6, 0x00000e55); - case 5940000: - regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL, 0x5800027b); - regmap_update_bits(priv->hhi, HHI_HDMI_PLL_CNTL2, - 0xFFFF, 0x4c00); - regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL3, 0x135c5091); - regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL4, 0x801da72c); - regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL5, 0x71486980); - regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL6, 0x00000e55); - - /* unreset */ - regmap_update_bits(priv->hhi, HHI_HDMI_PLL_CNTL, - BIT(28), 0); - - /* Poll for lock bit */ - regmap_read_poll_timeout(priv->hhi, HHI_HDMI_PLL_CNTL, - val, (val & HDMI_PLL_LOCK), 10, 0); - break; + /* Enable and unreset */ + regmap_update_bits(priv->hhi, HHI_HDMI_PLL_CNTL, + 0x7 << 28, 0x4 << 28); - case 5200000: - regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL, 0x5800026c); - regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL2, 0x00000000); - regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL3, 0x135c5091); - regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL4, 0x801da72c); - regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL5, 0x71486980); - regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL6, 0x00000e55); - - /* unreset */ - regmap_update_bits(priv->hhi, HHI_HDMI_PLL_CNTL, - BIT(28), 0); - - /* Poll for lock bit */ - regmap_read_poll_timeout(priv->hhi, HHI_HDMI_PLL_CNTL, - val, (val & HDMI_PLL_LOCK), 10, 0); - break; - }; + /* Poll for lock bit */ + regmap_read_poll_timeout(priv->hhi, HHI_HDMI_PLL_CNTL, + val, (val & HDMI_PLL_LOCK), 10, 0); } else if (meson_vpu_is_compatible(priv, "amlogic,meson-gxm-vpu") || meson_vpu_is_compatible(priv, "amlogic,meson-gxl-vpu")) { - switch (base) { - case 2970000: - regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL, 0x4000027b); - regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL2, 0x800cb300); - regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL3, 0x860f30c4); - regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL4, 0x0c8e0000); - regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL5, 0x001fa729); - regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL6, 0x01a31500); - break; - - case 3200000: - regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL, 0x40000285); - regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL2, 0x800cb155); - regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL3, 0x860f30c4); - regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL4, 0x0c8e0000); - regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL5, 0x001fa729); - regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL6, 0x01a31500); - break; - - case 3240000: - regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL, 0x40000287); - regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL2, 0x800cb000); - regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL3, 0x860f30c4); - regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL4, 0x0c8e0000); - regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL5, 0x001fa729); - regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL6, 0x01a31500); - break; - - case 3865000: - regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL, 0x400002a1); - regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL2, 0x800cb02b); - regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL3, 0x860f30c4); - regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL4, 0x0c8e0000); - regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL5, 0x001fa729); - regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL6, 0x01a31500); - break; - - case 4028000: - regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL, 0x400002a7); - regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL2, 0x800cb355); - regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL3, 0x860f30c4); - regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL4, 0x0c8e0000); - regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL5, 0x001fa729); - regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL6, 0x01a31500); - break; - - case 4320000: - regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL, 0x400002b4); - regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL2, 0x800cb000); - regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL3, 0x860f30c4); - regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL4, 0x0c8e0000); - regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL5, 0x001fa729); - regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL6, 0x01a31500); - break; - - case 5940000: - regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL, 0x400002f7); - regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL2, 0x800cb200); - regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL3, 0x860f30c4); - regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL4, 0x0c8e0000); - regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL5, 0x001fa729); - regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL6, 0x01a31500); - break; - - case 5200000: - regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL, 0x400002d8); - regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL2, 0x800cb2ab); - regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL3, 0x860f30c4); - regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL4, 0x0c8e0000); - regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL5, 0x001fa729); - regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL6, 0x01a31500); - break; - - }; + regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL, 0x40000200 | m); + regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL2, 0x800cb000 | frac); + regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL3, 0x860f30c4); + regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL4, 0x0c8e0000); + regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL5, 0x001fa729); + regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL6, 0x01a31500); /* Reset PLL */ regmap_update_bits(priv->hhi, HHI_HDMI_PLL_CNTL, - HDMI_PLL_RESET, HDMI_PLL_RESET); + HDMI_PLL_RESET, HDMI_PLL_RESET); regmap_update_bits(priv->hhi, HHI_HDMI_PLL_CNTL, - HDMI_PLL_RESET, 0); + HDMI_PLL_RESET, 0); /* Poll for lock bit */ regmap_read_poll_timeout(priv->hhi, HHI_HDMI_PLL_CNTL, val, (val & HDMI_PLL_LOCK), 10, 0); - }; + } if (meson_vpu_is_compatible(priv, "amlogic,meson-gxbb-vpu")) regmap_update_bits(priv->hhi, HHI_HDMI_PLL_CNTL2, - 3 << 16, pll_od_to_reg(od1) << 16); + 3 << 16, pll_od_to_reg(od1) << 16); else if (meson_vpu_is_compatible(priv, "amlogic,meson-gxm-vpu") || - meson_vpu_is_compatible(priv, "amlogic,meson-gxl-vpu")) + meson_vpu_is_compatible(priv, "amlogic,meson-gxl-vpu")) regmap_update_bits(priv->hhi, HHI_HDMI_PLL_CNTL3, - 3 << 21, pll_od_to_reg(od1) << 21); + 3 << 21, pll_od_to_reg(od1) << 21); if (meson_vpu_is_compatible(priv, "amlogic,meson-gxbb-vpu")) regmap_update_bits(priv->hhi, HHI_HDMI_PLL_CNTL2, - 3 << 22, pll_od_to_reg(od2) << 22); + 3 << 22, pll_od_to_reg(od2) << 22); else if (meson_vpu_is_compatible(priv, "amlogic,meson-gxm-vpu") || - meson_vpu_is_compatible(priv, "amlogic,meson-gxl-vpu")) + meson_vpu_is_compatible(priv, "amlogic,meson-gxl-vpu")) regmap_update_bits(priv->hhi, HHI_HDMI_PLL_CNTL3, - 3 << 23, pll_od_to_reg(od2) << 23); + 3 << 23, pll_od_to_reg(od2) << 23); if (meson_vpu_is_compatible(priv, "amlogic,meson-gxbb-vpu")) regmap_update_bits(priv->hhi, HHI_HDMI_PLL_CNTL2, - 3 << 18, pll_od_to_reg(od3) << 18); + 3 << 18, pll_od_to_reg(od3) << 18); else if (meson_vpu_is_compatible(priv, "amlogic,meson-gxm-vpu") || - meson_vpu_is_compatible(priv, "amlogic,meson-gxl-vpu")) + meson_vpu_is_compatible(priv, "amlogic,meson-gxl-vpu")) regmap_update_bits(priv->hhi, HHI_HDMI_PLL_CNTL3, - 3 << 19, pll_od_to_reg(od3) << 19); + 3 << 19, pll_od_to_reg(od3) << 19); + } -void meson_vclk_setup(struct meson_drm *priv, unsigned int target, - unsigned int vclk_freq, unsigned int venc_freq, - unsigned int dac_freq, bool hdmi_use_enci) +#define XTAL_FREQ 24000 + +static unsigned int meson_hdmi_pll_get_m(struct meson_drm *priv, + unsigned int pll_freq) { - unsigned int freq; - unsigned int hdmi_tx_div; - unsigned int venc_div; + /* The GXBB PLL has a /2 pre-multiplier */ + if (meson_vpu_is_compatible(priv, "amlogic,meson-gxbb-vpu")) + pll_freq /= 2; - if (target == MESON_VCLK_TARGET_CVBS) { - meson_venci_cvbs_clock_config(priv); - return; + return pll_freq / XTAL_FREQ; +} + +#define HDMI_FRAC_MAX_GXBB 4096 +#define HDMI_FRAC_MAX_GXL 1024 + +static unsigned int meson_hdmi_pll_get_frac(struct meson_drm *priv, + unsigned int m, + unsigned int pll_freq) +{ + unsigned int parent_freq = XTAL_FREQ; + unsigned int frac_max = HDMI_FRAC_MAX_GXL; + unsigned int frac_m; + unsigned int frac; + + /* The GXBB PLL has a /2 pre-multiplier and a larger FRAC width */ + if (meson_vpu_is_compatible(priv, "amlogic,meson-gxbb-vpu")) { + frac_max = HDMI_FRAC_MAX_GXBB; + parent_freq *= 2; } - hdmi_tx_div = vclk_freq / dac_freq; + /* We can have a perfect match !*/ + if (pll_freq / m == parent_freq && + pll_freq % m == 0) + return 0; - if (hdmi_tx_div == 0) { - pr_err("Fatal Error, invalid HDMI-TX freq %d\n", - dac_freq); - return; + frac = div_u64((u64)pll_freq * (u64)frac_max, parent_freq); + frac_m = m * frac_max; + if (frac_m > frac) + return frac_max; + frac -= frac_m; + + return min((u16)frac, (u16)(frac_max - 1)); +} + +static bool meson_hdmi_pll_validate_params(struct meson_drm *priv, + unsigned int m, + unsigned int frac) +{ + if (meson_vpu_is_compatible(priv, "amlogic,meson-gxbb-vpu")) { + /* Empiric supported min/max dividers */ + if (m < 53 || m > 123) + return false; + if (frac >= HDMI_FRAC_MAX_GXBB) + return false; + } else if (meson_vpu_is_compatible(priv, "amlogic,meson-gxm-vpu") || + meson_vpu_is_compatible(priv, "amlogic,meson-gxl-vpu")) { + /* Empiric supported min/max dividers */ + if (m < 106 || m > 247) + return false; + if (frac >= HDMI_FRAC_MAX_GXL) + return false; } - venc_div = vclk_freq / venc_freq; + return true; +} - if (venc_div == 0) { - pr_err("Fatal Error, invalid HDMI venc freq %d\n", - venc_freq); - return; +static bool meson_hdmi_pll_find_params(struct meson_drm *priv, + unsigned int freq, + unsigned int *m, + unsigned int *frac, + unsigned int *od) +{ + /* Cycle from /16 to /2 */ + for (*od = 16 ; *od > 1 ; *od >>= 1) { + *m = meson_hdmi_pll_get_m(priv, freq * *od); + if (!*m) + continue; + *frac = meson_hdmi_pll_get_frac(priv, *m, freq * *od); + + DRM_DEBUG_DRIVER("PLL params for %dkHz: m=%x frac=%x od=%d\n", + freq, *m, *frac, *od); + + if (meson_hdmi_pll_validate_params(priv, *m, *frac)) + return true; } - switch (vclk_freq) { - case 54000: - if (hdmi_use_enci) - freq = MESON_VCLK_HDMI_ENCI_54000; - else - freq = MESON_VCLK_HDMI_DDR_54000; - break; - case 25175: - freq = MESON_VCLK_HDMI_25175; - break; - case 40000: - freq = MESON_VCLK_HDMI_40000; - break; - case 65000: - freq = MESON_VCLK_HDMI_65000; - break; - case 74250: - freq = MESON_VCLK_HDMI_74250; - break; - case 108000: - freq = MESON_VCLK_HDMI_108000; - break; - case 148500: - if (dac_freq != 148500) - freq = MESON_VCLK_HDMI_DDR_148500; - else - freq = MESON_VCLK_HDMI_148500; - break; - case 162000: - freq = MESON_VCLK_HDMI_162000; - break; - case 297000: - freq = MESON_VCLK_HDMI_297000; - break; - case 594000: - freq = MESON_VCLK_HDMI_594000; - break; - default: - pr_err("Fatal Error, invalid HDMI vclk freq %d\n", - vclk_freq); + return false; +} + +/* pll_freq is the frequency after the OD dividers */ +enum drm_mode_status +meson_vclk_dmt_supported_freq(struct meson_drm *priv, unsigned int freq) +{ + unsigned int od, m, frac; + + /* In DMT mode, path after PLL is always /10 */ + freq *= 10; + + if (meson_hdmi_pll_find_params(priv, freq, &m, &frac, &od)) + return MODE_OK; + + return MODE_CLOCK_RANGE; +} +EXPORT_SYMBOL_GPL(meson_vclk_dmt_supported_freq); + +/* pll_freq is the frequency after the OD dividers */ +static void meson_hdmi_pll_generic_set(struct meson_drm *priv, + unsigned int pll_freq) +{ + unsigned int od, m, frac, od1, od2, od3; + + if (meson_hdmi_pll_find_params(priv, pll_freq, &m, &frac, &od)) { + od3 = 1; + if (od < 4) { + od1 = 2; + od2 = 1; + } else { + od2 = od / 4; + od1 = od / od2; + } + + DRM_DEBUG_DRIVER("PLL params for %dkHz: m=%x frac=%x od=%d/%d/%d\n", + pll_freq, m, frac, od1, od2, od3); + + meson_hdmi_pll_set_params(priv, m, frac, od1, od2, od3); + return; } + DRM_ERROR("Fatal, unable to find parameters for PLL freq %d\n", + pll_freq); +} + +static void meson_vclk_set(struct meson_drm *priv, unsigned int pll_base_freq, + unsigned int od1, unsigned int od2, unsigned int od3, + unsigned int vid_pll_div, unsigned int vclk_div, + unsigned int hdmi_tx_div, unsigned int venc_div, + bool hdmi_use_enci) +{ /* Set HDMI-TX sys clock */ regmap_update_bits(priv->hhi, HHI_HDMI_CLK_CNTL, CTS_HDMI_SYS_SEL_MASK, 0); @@ -831,19 +641,49 @@ void meson_vclk_setup(struct meson_drm *priv, unsigned int target, CTS_HDMI_SYS_EN, CTS_HDMI_SYS_EN); /* Set HDMI PLL rate */ - meson_hdmi_pll_set(priv, params[freq].pll_base_freq, - params[freq].pll_od1, - params[freq].pll_od2, - params[freq].pll_od3); + if (!od1 && !od2 && !od3) { + meson_hdmi_pll_generic_set(priv, pll_base_freq); + } else if (meson_vpu_is_compatible(priv, "amlogic,meson-gxbb-vpu")) { + switch (pll_base_freq) { + case 2970000: + meson_hdmi_pll_set_params(priv, 0x3d, 0xe00, + od1, od2, od3); + break; + case 4320000: + meson_hdmi_pll_set_params(priv, 0x5a, 0, + od1, od2, od3); + break; + case 5940000: + meson_hdmi_pll_set_params(priv, 0x7b, 0xc00, + od1, od2, od3); + break; + } + } else if (meson_vpu_is_compatible(priv, "amlogic,meson-gxm-vpu") || + meson_vpu_is_compatible(priv, "amlogic,meson-gxl-vpu")) { + switch (pll_base_freq) { + case 2970000: + meson_hdmi_pll_set_params(priv, 0x7b, 0x300, + od1, od2, od3); + break; + case 4320000: + meson_hdmi_pll_set_params(priv, 0xb4, 0, + od1, od2, od3); + break; + case 5940000: + meson_hdmi_pll_set_params(priv, 0xf7, 0x200, + od1, od2, od3); + break; + } + } /* Setup vid_pll divider */ - meson_vid_pll_set(priv, params[freq].vid_pll_div); + meson_vid_pll_set(priv, vid_pll_div); /* Set VCLK div */ regmap_update_bits(priv->hhi, HHI_VID_CLK_CNTL, VCLK_SEL_MASK, 0); regmap_update_bits(priv->hhi, HHI_VID_CLK_DIV, - VCLK_DIV_MASK, params[freq].vclk_div - 1); + VCLK_DIV_MASK, vclk_div - 1); /* Set HDMI-TX source */ switch (hdmi_tx_div) { @@ -981,4 +821,80 @@ void meson_vclk_setup(struct meson_drm *priv, unsigned int target, regmap_update_bits(priv->hhi, HHI_VID_CLK_CNTL, VCLK_EN, VCLK_EN); } + +void meson_vclk_setup(struct meson_drm *priv, unsigned int target, + unsigned int vclk_freq, unsigned int venc_freq, + unsigned int dac_freq, bool hdmi_use_enci) +{ + unsigned int freq; + unsigned int hdmi_tx_div; + unsigned int venc_div; + + if (target == MESON_VCLK_TARGET_CVBS) { + meson_venci_cvbs_clock_config(priv); + return; + } else if (target == MESON_VCLK_TARGET_DMT) { + /* The DMT clock path is fixed after the PLL: + * - automatic PLL freq + OD management + * - vid_pll_div = VID_PLL_DIV_5 + * - vclk_div = 2 + * - hdmi_tx_div = 1 + * - venc_div = 1 + * - encp encoder + */ + meson_vclk_set(priv, vclk_freq * 10, 0, 0, 0, + VID_PLL_DIV_5, 2, 1, 1, false); + return; + } + + hdmi_tx_div = vclk_freq / dac_freq; + + if (hdmi_tx_div == 0) { + pr_err("Fatal Error, invalid HDMI-TX freq %d\n", + dac_freq); + return; + } + + venc_div = vclk_freq / venc_freq; + + if (venc_div == 0) { + pr_err("Fatal Error, invalid HDMI venc freq %d\n", + venc_freq); + return; + } + + switch (vclk_freq) { + case 54000: + if (hdmi_use_enci) + freq = MESON_VCLK_HDMI_ENCI_54000; + else + freq = MESON_VCLK_HDMI_DDR_54000; + break; + case 74250: + freq = MESON_VCLK_HDMI_74250; + break; + case 148500: + if (dac_freq != 148500) + freq = MESON_VCLK_HDMI_DDR_148500; + else + freq = MESON_VCLK_HDMI_148500; + break; + case 297000: + freq = MESON_VCLK_HDMI_297000; + break; + case 594000: + freq = MESON_VCLK_HDMI_594000; + break; + default: + pr_err("Fatal Error, invalid HDMI vclk freq %d\n", + vclk_freq); + return; + } + + meson_vclk_set(priv, params[freq].pll_base_freq, + params[freq].pll_od1, params[freq].pll_od2, + params[freq].pll_od3, params[freq].vid_pll_div, + params[freq].vclk_div, hdmi_tx_div, venc_div, + hdmi_use_enci); +} EXPORT_SYMBOL_GPL(meson_vclk_setup); diff --git a/drivers/gpu/drm/meson/meson_vclk.h b/drivers/gpu/drm/meson/meson_vclk.h index 0401b5213471..869fa3a3073e 100644 --- a/drivers/gpu/drm/meson/meson_vclk.h +++ b/drivers/gpu/drm/meson/meson_vclk.h @@ -24,11 +24,15 @@ enum { MESON_VCLK_TARGET_CVBS = 0, MESON_VCLK_TARGET_HDMI = 1, + MESON_VCLK_TARGET_DMT = 2, }; /* 27MHz is the CVBS Pixel Clock */ #define MESON_VCLK_CVBS 27000 +enum drm_mode_status +meson_vclk_dmt_supported_freq(struct meson_drm *priv, unsigned int freq); + void meson_vclk_setup(struct meson_drm *priv, unsigned int target, unsigned int vclk_freq, unsigned int venc_freq, unsigned int dac_freq, bool hdmi_use_enci); diff --git a/drivers/gpu/drm/meson/meson_venc.c b/drivers/gpu/drm/meson/meson_venc.c index 6e2701389801..514245e69b38 100644 --- a/drivers/gpu/drm/meson/meson_venc.c +++ b/drivers/gpu/drm/meson/meson_venc.c @@ -697,314 +697,6 @@ union meson_hdmi_venc_mode meson_hdmi_encp_mode_1080p60 = { }, }; -union meson_hdmi_venc_mode meson_hdmi_encp_mode_640x480_60 = { - .encp = { - .dvi_settings = 0x21, - .video_mode = 0x4040, - .video_mode_adv = 0x18, - /* video_prog_mode */ - /* video_sync_mode */ - /* video_yc_dly */ - /* video_rgb_ctrl */ - /* video_filt_ctrl */ - /* video_ofld_voav_ofst */ - /* yfp1_htime */ - /* yfp2_htime */ - .max_pxcnt = 0x31f, - /* hspuls_begin */ - /* hspuls_end */ - /* hspuls_switch */ - /* vspuls_begin */ - /* vspuls_end */ - /* vspuls_bline */ - /* vspuls_eline */ - .havon_begin = 0x90, - .havon_end = 0x30f, - .vavon_bline = 0x23, - .vavon_eline = 0x202, - /* eqpuls_begin */ - /* eqpuls_end */ - /* eqpuls_bline */ - /* eqpuls_eline */ - .hso_begin = 0, - .hso_end = 0x60, - .vso_begin = 0x1e, - .vso_end = 0x32, - .vso_bline = 0, - .vso_eline = 2, - .vso_eline_present = true, - /* sy_val */ - /* sy2_val */ - .max_lncnt = 0x20c, - }, -}; - -union meson_hdmi_venc_mode meson_hdmi_encp_mode_800x600_60 = { - .encp = { - .dvi_settings = 0x21, - .video_mode = 0x4040, - .video_mode_adv = 0x18, - /* video_prog_mode */ - /* video_sync_mode */ - /* video_yc_dly */ - /* video_rgb_ctrl */ - /* video_filt_ctrl */ - /* video_ofld_voav_ofst */ - /* yfp1_htime */ - /* yfp2_htime */ - .max_pxcnt = 0x41f, - /* hspuls_begin */ - /* hspuls_end */ - /* hspuls_switch */ - /* vspuls_begin */ - /* vspuls_end */ - /* vspuls_bline */ - /* vspuls_eline */ - .havon_begin = 0xD8, - .havon_end = 0x3f7, - .vavon_bline = 0x1b, - .vavon_eline = 0x272, - /* eqpuls_begin */ - /* eqpuls_end */ - /* eqpuls_bline */ - /* eqpuls_eline */ - .hso_begin = 0, - .hso_end = 0x80, - .vso_begin = 0x1e, - .vso_end = 0x32, - .vso_bline = 0, - .vso_eline = 4, - .vso_eline_present = true, - /* sy_val */ - /* sy2_val */ - .max_lncnt = 0x273, - }, -}; - -union meson_hdmi_venc_mode meson_hdmi_encp_mode_1024x768_60 = { - .encp = { - .dvi_settings = 0x21, - .video_mode = 0x4040, - .video_mode_adv = 0x18, - /* video_prog_mode */ - /* video_sync_mode */ - /* video_yc_dly */ - /* video_rgb_ctrl */ - /* video_filt_ctrl */ - /* video_ofld_voav_ofst */ - /* yfp1_htime */ - /* yfp2_htime */ - .max_pxcnt = 1343, - /* hspuls_begin */ - /* hspuls_end */ - /* hspuls_switch */ - /* vspuls_begin */ - /* vspuls_end */ - /* vspuls_bline */ - /* vspuls_eline */ - .havon_begin = 296, - .havon_end = 1319, - .vavon_bline = 35, - .vavon_eline = 802, - /* eqpuls_begin */ - /* eqpuls_end */ - /* eqpuls_bline */ - /* eqpuls_eline */ - .hso_begin = 0, - .hso_end = 136, - .vso_begin = 30, - .vso_end = 50, - .vso_bline = 0, - .vso_eline = 6, - .vso_eline_present = true, - /* sy_val */ - /* sy2_val */ - .max_lncnt = 805, - }, -}; - -union meson_hdmi_venc_mode meson_hdmi_encp_mode_1152x864_75 = { - .encp = { - .dvi_settings = 0x21, - .video_mode = 0x4040, - .video_mode_adv = 0x18, - /* video_prog_mode */ - /* video_sync_mode */ - /* video_yc_dly */ - /* video_rgb_ctrl */ - /* video_filt_ctrl */ - /* video_ofld_voav_ofst */ - /* yfp1_htime */ - /* yfp2_htime */ - .max_pxcnt = 0x63f, - /* hspuls_begin */ - /* hspuls_end */ - /* hspuls_switch */ - /* vspuls_begin */ - /* vspuls_end */ - /* vspuls_bline */ - /* vspuls_eline */ - .havon_begin = 0x180, - .havon_end = 0x5ff, - .vavon_bline = 0x23, - .vavon_eline = 0x382, - /* eqpuls_begin */ - /* eqpuls_end */ - /* eqpuls_bline */ - /* eqpuls_eline */ - .hso_begin = 0, - .hso_end = 0x80, - .vso_begin = 0x1e, - .vso_end = 0x32, - .vso_bline = 0, - .vso_eline = 3, - .vso_eline_present = true, - /* sy_val */ - /* sy2_val */ - .max_lncnt = 0x383, - }, -}; - -union meson_hdmi_venc_mode meson_hdmi_encp_mode_1280x1024_60 = { - .encp = { - .dvi_settings = 0x21, - .video_mode = 0x4040, - .video_mode_adv = 0x18, - /* video_prog_mode */ - /* video_sync_mode */ - /* video_yc_dly */ - /* video_rgb_ctrl */ - /* video_filt_ctrl */ - /* video_ofld_voav_ofst */ - /* yfp1_htime */ - /* yfp2_htime */ - .max_pxcnt = 0x697, - /* hspuls_begin */ - /* hspuls_end */ - /* hspuls_switch */ - /* vspuls_begin */ - /* vspuls_end */ - /* vspuls_bline */ - /* vspuls_eline */ - .havon_begin = 0x168, - .havon_end = 0x667, - .vavon_bline = 0x29, - .vavon_eline = 0x428, - /* eqpuls_begin */ - /* eqpuls_end */ - /* eqpuls_bline */ - /* eqpuls_eline */ - .hso_begin = 0, - .hso_end = 0x70, - .vso_begin = 0x1e, - .vso_end = 0x32, - .vso_bline = 0, - .vso_eline = 3, - .vso_eline_present = true, - /* sy_val */ - /* sy2_val */ - .max_lncnt = 0x429, - }, -}; - -union meson_hdmi_venc_mode meson_hdmi_encp_mode_1600x1200_60 = { - .encp = { - .dvi_settings = 0x21, - .video_mode = 0x4040, - .video_mode_adv = 0x18, - /* video_prog_mode */ - /* video_sync_mode */ - /* video_yc_dly */ - /* video_rgb_ctrl */ - /* video_filt_ctrl */ - /* video_ofld_voav_ofst */ - /* yfp1_htime */ - /* yfp2_htime */ - .max_pxcnt = 0x86f, - /* hspuls_begin */ - /* hspuls_end */ - /* hspuls_switch */ - /* vspuls_begin */ - /* vspuls_end */ - /* vspuls_bline */ - /* vspuls_eline */ - .havon_begin = 0x1f0, - .havon_end = 0x82f, - .vavon_bline = 0x31, - .vavon_eline = 0x4e0, - /* eqpuls_begin */ - /* eqpuls_end */ - /* eqpuls_bline */ - /* eqpuls_eline */ - .hso_begin = 0, - .hso_end = 0xc0, - .vso_begin = 0x1e, - .vso_end = 0x32, - .vso_bline = 0, - .vso_eline = 3, - .vso_eline_present = true, - /* sy_val */ - /* sy2_val */ - .max_lncnt = 0x4e1, - }, -}; - -struct meson_hdmi_venc_dmt_mode { - struct drm_display_mode drm_mode; - union meson_hdmi_venc_mode *mode; -} meson_hdmi_venc_dmt_modes[] = { - /* 640x480@60Hz */ - { - { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 25175, 640, 656, - 752, 800, 0, 480, 490, 492, 525, 0, - DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, - &meson_hdmi_encp_mode_640x480_60, - }, - /* 800x600@60Hz */ - { - { DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 40000, 800, 840, - 968, 1056, 0, 600, 601, 605, 628, 0, - DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, - &meson_hdmi_encp_mode_800x600_60, - }, - /* 1024x768@60Hz */ - { - { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 65000, 1024, - 1048, 1184, 1344, 0, 768, 771, 777, 806, 0, - DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, - &meson_hdmi_encp_mode_1024x768_60, - }, - /* 1152x864@75Hz */ - { - { DRM_MODE("1152x864", DRM_MODE_TYPE_DRIVER, 108000, 1152, - 1216, 1344, 1600, 0, 864, 865, 868, 900, 0, - DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, - &meson_hdmi_encp_mode_1152x864_75, - }, - /* 1280x1024@60Hz */ - { - { DRM_MODE("1280x1024", DRM_MODE_TYPE_DRIVER, 108000, 1280, - 1328, 1440, 1688, 0, 1024, 1025, 1028, 1066, 0, - DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, - &meson_hdmi_encp_mode_1280x1024_60, - }, - /* 1600x1200@60Hz */ - { - { DRM_MODE("1600x1200", DRM_MODE_TYPE_DRIVER, 162000, 1600, - 1664, 1856, 2160, 0, 1200, 1201, 1204, 1250, 0, - DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, - &meson_hdmi_encp_mode_1600x1200_60, - }, - /* 1920x1080@60Hz */ - { - { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 148500, 1920, - 2008, 2052, 2200, 0, 1080, 1084, 1089, 1125, 0, - DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, - &meson_hdmi_encp_mode_1080p60 - }, - { }, /* sentinel */ -}; - struct meson_hdmi_venc_vic_mode { unsigned int vic; union meson_hdmi_venc_mode *mode; @@ -1044,17 +736,20 @@ static unsigned long modulo(unsigned long a, unsigned long b) return a; } -bool meson_venc_hdmi_supported_mode(const struct drm_display_mode *mode) +enum drm_mode_status +meson_venc_hdmi_supported_mode(const struct drm_display_mode *mode) { - struct meson_hdmi_venc_dmt_mode *vmode = meson_hdmi_venc_dmt_modes; + if (mode->flags & ~(DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NHSYNC | + DRM_MODE_FLAG_PVSYNC | DRM_MODE_FLAG_NVSYNC)) + return MODE_BAD; - while (vmode->mode) { - if (drm_mode_equal(&vmode->drm_mode, mode)) - return true; - vmode++; - } + if (mode->hdisplay < 640 || mode->hdisplay > 1920) + return MODE_BAD_HVALUE; - return false; + if (mode->vdisplay < 480 || mode->vdisplay > 1200) + return MODE_BAD_VVALUE; + + return MODE_OK; } EXPORT_SYMBOL_GPL(meson_venc_hdmi_supported_mode); @@ -1072,18 +767,29 @@ bool meson_venc_hdmi_supported_vic(int vic) } EXPORT_SYMBOL_GPL(meson_venc_hdmi_supported_vic); -static union meson_hdmi_venc_mode -*meson_venc_hdmi_get_dmt_vmode(const struct drm_display_mode *mode) +void meson_venc_hdmi_get_dmt_vmode(const struct drm_display_mode *mode, + union meson_hdmi_venc_mode *dmt_mode) { - struct meson_hdmi_venc_dmt_mode *vmode = meson_hdmi_venc_dmt_modes; - - while (vmode->mode) { - if (drm_mode_equal(&vmode->drm_mode, mode)) - return vmode->mode; - vmode++; - } - - return NULL; + memset(dmt_mode, 0, sizeof(*dmt_mode)); + + dmt_mode->encp.dvi_settings = 0x21; + dmt_mode->encp.video_mode = 0x4040; + dmt_mode->encp.video_mode_adv = 0x18; + dmt_mode->encp.max_pxcnt = mode->htotal - 1; + dmt_mode->encp.havon_begin = mode->htotal - mode->hsync_start; + dmt_mode->encp.havon_end = dmt_mode->encp.havon_begin + + mode->hdisplay - 1; + dmt_mode->encp.vavon_bline = mode->vtotal - mode->vsync_start; + dmt_mode->encp.vavon_eline = dmt_mode->encp.vavon_bline + + mode->vdisplay - 1; + dmt_mode->encp.hso_begin = 0; + dmt_mode->encp.hso_end = mode->hsync_end - mode->hsync_start; + dmt_mode->encp.vso_begin = 30; + dmt_mode->encp.vso_end = 50; + dmt_mode->encp.vso_bline = 0; + dmt_mode->encp.vso_eline = mode->vsync_end - mode->vsync_start; + dmt_mode->encp.vso_eline_present = true; + dmt_mode->encp.max_lncnt = mode->vtotal - 1; } static union meson_hdmi_venc_mode *meson_venc_hdmi_get_vic_vmode(int vic) @@ -1120,6 +826,7 @@ void meson_venc_hdmi_mode_set(struct meson_drm *priv, int vic, struct drm_display_mode *mode) { union meson_hdmi_venc_mode *vmode = NULL; + union meson_hdmi_venc_mode vmode_dmt; bool use_enci = false; bool venc_repeat = false; bool hdmi_repeat = false; @@ -1147,14 +854,17 @@ void meson_venc_hdmi_mode_set(struct meson_drm *priv, int vic, unsigned int sof_lines; unsigned int vsync_lines; - if (meson_venc_hdmi_supported_vic(vic)) + if (meson_venc_hdmi_supported_vic(vic)) { vmode = meson_venc_hdmi_get_vic_vmode(vic); - else - vmode = meson_venc_hdmi_get_dmt_vmode(mode); - if (!vmode) { - dev_err(priv->dev, "%s: Fatal Error, unsupported mode " - DRM_MODE_FMT "\n", __func__, DRM_MODE_ARG(mode)); - return; + if (!vmode) { + dev_err(priv->dev, "%s: Fatal Error, unsupported mode " + DRM_MODE_FMT "\n", __func__, + DRM_MODE_ARG(mode)); + return; + } + } else { + meson_venc_hdmi_get_dmt_vmode(mode, &vmode_dmt); + vmode = &vmode_dmt; } /* Use VENCI for 480i and 576i and double HDMI pixels */ diff --git a/drivers/gpu/drm/meson/meson_venc.h b/drivers/gpu/drm/meson/meson_venc.h index 7c18a36a0dd0..97eaebbfa0c4 100644 --- a/drivers/gpu/drm/meson/meson_venc.h +++ b/drivers/gpu/drm/meson/meson_venc.h @@ -58,7 +58,8 @@ struct meson_cvbs_enci_mode { }; /* HDMI Clock parameters */ -bool meson_venc_hdmi_supported_mode(const struct drm_display_mode *mode); +enum drm_mode_status +meson_venc_hdmi_supported_mode(const struct drm_display_mode *mode); bool meson_venc_hdmi_supported_vic(int vic); bool meson_venc_hdmi_venc_repeat(int vic); diff --git a/drivers/gpu/drm/meson/meson_venc_cvbs.c b/drivers/gpu/drm/meson/meson_venc_cvbs.c index 79d95ca8a0c0..f7945bae3b4a 100644 --- a/drivers/gpu/drm/meson/meson_venc_cvbs.c +++ b/drivers/gpu/drm/meson/meson_venc_cvbs.c @@ -282,7 +282,7 @@ int meson_venc_cvbs_create(struct meson_drm *priv) encoder->possible_crtcs = BIT(0); - drm_mode_connector_attach_encoder(connector, encoder); + drm_connector_attach_encoder(connector, encoder); return 0; } diff --git a/drivers/gpu/drm/mgag200/mgag200_mode.c b/drivers/gpu/drm/mgag200/mgag200_mode.c index 8918539a19aa..acf7bfe68454 100644 --- a/drivers/gpu/drm/mgag200/mgag200_mode.c +++ b/drivers/gpu/drm/mgag200/mgag200_mode.c @@ -1553,7 +1553,7 @@ static int mga_vga_get_modes(struct drm_connector *connector) edid = drm_get_edid(connector, &mga_connector->i2c->adapter); if (edid) { - drm_mode_connector_update_edid_property(connector, edid); + drm_connector_update_edid_property(connector, edid); ret = drm_add_edid_modes(connector, edid); kfree(edid); } @@ -1747,7 +1747,7 @@ int mgag200_modeset_init(struct mga_device *mdev) return -1; } - drm_mode_connector_attach_encoder(connector, encoder); + drm_connector_attach_encoder(connector, encoder); ret = mgag200_fbdev_init(mdev); if (ret) { diff --git a/drivers/gpu/drm/msm/disp/mdp4/mdp4_lcdc_encoder.c b/drivers/gpu/drm/msm/disp/mdp4/mdp4_lcdc_encoder.c index 4a645926edb7..2bfb39082f54 100644 --- a/drivers/gpu/drm/msm/disp/mdp4/mdp4_lcdc_encoder.c +++ b/drivers/gpu/drm/msm/disp/mdp4/mdp4_lcdc_encoder.c @@ -341,7 +341,7 @@ static void mdp4_lcdc_encoder_disable(struct drm_encoder *encoder) mdp4_write(mdp4_kms, REG_MDP4_LCDC_ENABLE, 0); panel = of_drm_find_panel(mdp4_lcdc_encoder->panel_node); - if (panel) { + if (!IS_ERR(panel)) { drm_panel_disable(panel); drm_panel_unprepare(panel); } @@ -410,7 +410,7 @@ static void mdp4_lcdc_encoder_enable(struct drm_encoder *encoder) dev_err(dev->dev, "failed to enable lcdc_clk: %d\n", ret); panel = of_drm_find_panel(mdp4_lcdc_encoder->panel_node); - if (panel) { + if (!IS_ERR(panel)) { drm_panel_prepare(panel); drm_panel_enable(panel); } diff --git a/drivers/gpu/drm/msm/disp/mdp4/mdp4_lvds_connector.c b/drivers/gpu/drm/msm/disp/mdp4/mdp4_lvds_connector.c index e3b1c86b7aae..5368e621999c 100644 --- a/drivers/gpu/drm/msm/disp/mdp4/mdp4_lvds_connector.c +++ b/drivers/gpu/drm/msm/disp/mdp4/mdp4_lvds_connector.c @@ -34,9 +34,12 @@ static enum drm_connector_status mdp4_lvds_connector_detect( struct mdp4_lvds_connector *mdp4_lvds_connector = to_mdp4_lvds_connector(connector); - if (!mdp4_lvds_connector->panel) + if (!mdp4_lvds_connector->panel) { mdp4_lvds_connector->panel = of_drm_find_panel(mdp4_lvds_connector->panel_node); + if (IS_ERR(mdp4_lvds_connector->panel)) + mdp4_lvds_connector->panel = NULL; + } return mdp4_lvds_connector->panel ? connector_status_connected : @@ -129,7 +132,7 @@ struct drm_connector *mdp4_lvds_connector_init(struct drm_device *dev, connector->interlace_allowed = 0; connector->doublescan_allowed = 0; - drm_mode_connector_attach_encoder(connector, encoder); + drm_connector_attach_encoder(connector, encoder); return connector; } diff --git a/drivers/gpu/drm/msm/dsi/dsi_host.c b/drivers/gpu/drm/msm/dsi/dsi_host.c index 2f1a2780658a..29841f440111 100644 --- a/drivers/gpu/drm/msm/dsi/dsi_host.c +++ b/drivers/gpu/drm/msm/dsi/dsi_host.c @@ -1898,7 +1898,7 @@ int msm_dsi_host_register(struct mipi_dsi_host *host, bool check_defer) * output */ if (check_defer && msm_host->device_node) { - if (!of_drm_find_panel(msm_host->device_node)) + if (IS_ERR(of_drm_find_panel(msm_host->device_node))) if (!of_drm_find_bridge(msm_host->device_node)) return -EPROBE_DEFER; } diff --git a/drivers/gpu/drm/msm/dsi/dsi_manager.c b/drivers/gpu/drm/msm/dsi/dsi_manager.c index 4cb1cb68878b..d5006d6923e0 100644 --- a/drivers/gpu/drm/msm/dsi/dsi_manager.c +++ b/drivers/gpu/drm/msm/dsi/dsi_manager.c @@ -393,7 +393,7 @@ static int dsi_mgr_connector_get_modes(struct drm_connector *connector) ret = dsi_dual_connector_tile_init(connector, id); if (ret) return ret; - ret = drm_mode_connector_set_tile_property(connector); + ret = drm_connector_set_tile_property(connector); if (ret) { pr_err("%s: set tile property failed, %d\n", __func__, ret); @@ -684,7 +684,7 @@ struct drm_connector *msm_dsi_manager_connector_init(u8 id) connector->interlace_allowed = 0; connector->doublescan_allowed = 0; - drm_mode_connector_attach_encoder(connector, msm_dsi->encoder); + drm_connector_attach_encoder(connector, msm_dsi->encoder); return connector; } @@ -751,12 +751,8 @@ struct drm_connector *msm_dsi_manager_ext_bridge_init(u8 id) connector_list = &dev->mode_config.connector_list; list_for_each_entry(connector, connector_list, head) { - int i; - - for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) { - if (connector->encoder_ids[i] == encoder->base.id) - return connector; - } + if (drm_connector_has_possible_encoder(connector, encoder)) + return connector; } return ERR_PTR(-ENODEV); diff --git a/drivers/gpu/drm/msm/edp/edp_connector.c b/drivers/gpu/drm/msm/edp/edp_connector.c index 6f3fc6b0f0a3..058ff92a0207 100644 --- a/drivers/gpu/drm/msm/edp/edp_connector.c +++ b/drivers/gpu/drm/msm/edp/edp_connector.c @@ -56,7 +56,7 @@ static int edp_connector_get_modes(struct drm_connector *connector) if (ret) return ret; - drm_mode_connector_update_edid_property(connector, drm_edid); + drm_connector_update_edid_property(connector, drm_edid); if (drm_edid) ret = drm_add_edid_modes(connector, drm_edid); @@ -134,7 +134,7 @@ struct drm_connector *msm_edp_connector_init(struct msm_edp *edp) connector->interlace_allowed = false; connector->doublescan_allowed = false; - drm_mode_connector_attach_encoder(connector, edp->encoder); + drm_connector_attach_encoder(connector, edp->encoder); return connector; } diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_connector.c b/drivers/gpu/drm/msm/hdmi/hdmi_connector.c index c0848dfedd50..e9c9a0af508e 100644 --- a/drivers/gpu/drm/msm/hdmi/hdmi_connector.c +++ b/drivers/gpu/drm/msm/hdmi/hdmi_connector.c @@ -392,7 +392,7 @@ static int msm_hdmi_connector_get_modes(struct drm_connector *connector) hdmi_write(hdmi, REG_HDMI_CTRL, hdmi_ctrl); hdmi->hdmi_mode = drm_detect_hdmi_monitor(edid); - drm_mode_connector_update_edid_property(connector, edid); + drm_connector_update_edid_property(connector, edid); if (edid) { ret = drm_add_edid_modes(connector, edid); @@ -477,7 +477,7 @@ struct drm_connector *msm_hdmi_connector_init(struct hdmi *hdmi) return ERR_PTR(ret); } - drm_mode_connector_attach_encoder(connector, hdmi->encoder); + drm_connector_attach_encoder(connector, hdmi->encoder); return connector; } diff --git a/drivers/gpu/drm/nouveau/dispnv04/crtc.c b/drivers/gpu/drm/nouveau/dispnv04/crtc.c index 6aa6ee16dcbd..2c569e264df3 100644 --- a/drivers/gpu/drm/nouveau/dispnv04/crtc.c +++ b/drivers/gpu/drm/nouveau/dispnv04/crtc.c @@ -1017,7 +1017,7 @@ nv04_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv, nv_crtc->cursor.set_offset(nv_crtc, nv_crtc->cursor.offset); nv_crtc->cursor.show(nv_crtc, true); out: - drm_gem_object_unreference_unlocked(gem); + drm_gem_object_put_unlocked(gem); return ret; } diff --git a/drivers/gpu/drm/nouveau/dispnv04/dac.c b/drivers/gpu/drm/nouveau/dispnv04/dac.c index 4feab0a5419d..e7af95d37ddb 100644 --- a/drivers/gpu/drm/nouveau/dispnv04/dac.c +++ b/drivers/gpu/drm/nouveau/dispnv04/dac.c @@ -556,6 +556,6 @@ nv04_dac_create(struct drm_connector *connector, struct dcb_output *entry) encoder->possible_crtcs = entry->heads; encoder->possible_clones = 0; - drm_mode_connector_attach_encoder(connector, encoder); + drm_connector_attach_encoder(connector, encoder); return 0; } diff --git a/drivers/gpu/drm/nouveau/dispnv04/dfp.c b/drivers/gpu/drm/nouveau/dispnv04/dfp.c index 9805d2cdc1a1..73d41abbb510 100644 --- a/drivers/gpu/drm/nouveau/dispnv04/dfp.c +++ b/drivers/gpu/drm/nouveau/dispnv04/dfp.c @@ -716,6 +716,6 @@ nv04_dfp_create(struct drm_connector *connector, struct dcb_output *entry) entry->location != DCB_LOC_ON_CHIP) nv04_tmds_slave_init(encoder); - drm_mode_connector_attach_encoder(connector, encoder); + drm_connector_attach_encoder(connector, encoder); return 0; } diff --git a/drivers/gpu/drm/nouveau/dispnv04/tvnv04.c b/drivers/gpu/drm/nouveau/dispnv04/tvnv04.c index 01664357d3e1..de4490b4ed30 100644 --- a/drivers/gpu/drm/nouveau/dispnv04/tvnv04.c +++ b/drivers/gpu/drm/nouveau/dispnv04/tvnv04.c @@ -244,7 +244,7 @@ nv04_tv_create(struct drm_connector *connector, struct dcb_output *entry) /* Attach it to the specified connector. */ get_slave_funcs(encoder)->create_resources(encoder, connector); - drm_mode_connector_attach_encoder(connector, encoder); + drm_connector_attach_encoder(connector, encoder); return 0; diff --git a/drivers/gpu/drm/nouveau/dispnv04/tvnv17.c b/drivers/gpu/drm/nouveau/dispnv04/tvnv17.c index 6d99f11fee4e..6a4ca139cf5d 100644 --- a/drivers/gpu/drm/nouveau/dispnv04/tvnv17.c +++ b/drivers/gpu/drm/nouveau/dispnv04/tvnv17.c @@ -821,6 +821,6 @@ nv17_tv_create(struct drm_connector *connector, struct dcb_output *entry) encoder->possible_clones = 0; nv17_tv_create_resources(encoder, connector); - drm_mode_connector_attach_encoder(connector, encoder); + drm_connector_attach_encoder(connector, encoder); return 0; } diff --git a/drivers/gpu/drm/nouveau/dispnv50/disp.c b/drivers/gpu/drm/nouveau/dispnv50/disp.c index b83465ae7c1b..0190377b02a6 100644 --- a/drivers/gpu/drm/nouveau/dispnv50/disp.c +++ b/drivers/gpu/drm/nouveau/dispnv50/disp.c @@ -136,12 +136,24 @@ nv50_dmac_create(struct nvif_device *device, struct nvif_object *disp, { struct nouveau_cli *cli = (void *)device->object.client; struct nv50_disp_core_channel_dma_v0 *args = data; + u8 type = NVIF_MEM_COHERENT; int ret; mutex_init(&dmac->lock); - ret = nvif_mem_init_map(&cli->mmu, NVIF_MEM_COHERENT, 0x1000, - &dmac->push); + /* Pascal added support for 47-bit physical addresses, but some + * parts of EVO still only accept 40-bit PAs. + * + * To avoid issues on systems with large amounts of RAM, and on + * systems where an IOMMU maps pages at a high address, we need + * to allocate push buffers in VRAM instead. + * + * This appears to match NVIDIA's behaviour on Pascal. + */ + if (device->info.family == NV_DEVICE_INFO_V0_PASCAL) + type |= NVIF_MEM_VRAM; + + ret = nvif_mem_init_map(&cli->mmu, type, 0x1000, &dmac->push); if (ret) return ret; @@ -216,6 +228,19 @@ void evo_kick(u32 *push, struct nv50_dmac *evoc) { struct nv50_dmac *dmac = evoc; + + /* Push buffer fetches are not coherent with BAR1, we need to ensure + * writes have been flushed right through to VRAM before writing PUT. + */ + if (dmac->push.type & NVIF_MEM_VRAM) { + struct nvif_device *device = dmac->base.device; + nvif_wr32(&device->object, 0x070000, 0x00000001); + nvif_msec(device, 2000, + if (!(nvif_rd32(&device->object, 0x070000) & 0x00000002)) + break; + ); + } + nvif_wr32(&dmac->base.user, 0x0000, (push - dmac->ptr) << 2); mutex_unlock(&dmac->lock); } @@ -424,7 +449,7 @@ nv50_dac_create(struct drm_connector *connector, struct dcb_output *dcbe) "dac-%04x-%04x", dcbe->hasht, dcbe->hashm); drm_encoder_helper_add(encoder, &nv50_dac_help); - drm_mode_connector_attach_encoder(connector, encoder); + drm_connector_attach_encoder(connector, encoder); return 0; } @@ -850,7 +875,7 @@ nv50_mstc_get_modes(struct drm_connector *connector) int ret = 0; mstc->edid = drm_dp_mst_get_edid(&mstc->connector, mstc->port->mgr, mstc->port); - drm_mode_connector_update_edid_property(&mstc->connector, mstc->edid); + drm_connector_update_edid_property(&mstc->connector, mstc->edid); if (mstc->edid) ret = drm_add_edid_modes(&mstc->connector, mstc->edid); @@ -927,11 +952,11 @@ nv50_mstc_new(struct nv50_mstm *mstm, struct drm_dp_mst_port *port, nouveau_conn_attach_properties(&mstc->connector); for (i = 0; i < ARRAY_SIZE(mstm->msto) && mstm->msto[i]; i++) - drm_mode_connector_attach_encoder(&mstc->connector, &mstm->msto[i]->encoder); + drm_connector_attach_encoder(&mstc->connector, &mstm->msto[i]->encoder); drm_object_attach_property(&mstc->connector.base, dev->mode_config.path_property, 0); drm_object_attach_property(&mstc->connector.base, dev->mode_config.tile_property, 0); - drm_mode_connector_set_path_property(&mstc->connector, path); + drm_connector_set_path_property(&mstc->connector, path); return 0; } @@ -1007,7 +1032,7 @@ nv50_mstm_destroy_connector(struct drm_dp_mst_topology_mgr *mgr, mstc->port = NULL; drm_modeset_unlock(&drm->dev->mode_config.connection_mutex); - drm_connector_unreference(&mstc->connector); + drm_connector_put(&mstc->connector); } static void @@ -1418,7 +1443,7 @@ nv50_sor_create(struct drm_connector *connector, struct dcb_output *dcbe) "sor-%04x-%04x", dcbe->hasht, dcbe->hashm); drm_encoder_helper_add(encoder, &nv50_sor_help); - drm_mode_connector_attach_encoder(connector, encoder); + drm_connector_attach_encoder(connector, encoder); if (dcbe->type == DCB_OUTPUT_DP) { struct nv50_disp *disp = nv50_disp(encoder->dev); @@ -1576,7 +1601,7 @@ nv50_pior_create(struct drm_connector *connector, struct dcb_output *dcbe) "pior-%04x-%04x", dcbe->hasht, dcbe->hashm); drm_encoder_helper_add(encoder, &nv50_pior_help); - drm_mode_connector_attach_encoder(connector, encoder); + drm_connector_attach_encoder(connector, encoder); return 0; } @@ -2231,6 +2256,9 @@ nv50_display_create(struct drm_device *dev) connector->funcs->destroy(connector); } + /* Disable vblank irqs aggressively for power-saving, safe on nv50+ */ + dev->vblank_disable_immediate = true; + out: if (ret) nv50_display_destroy(dev); diff --git a/drivers/gpu/drm/nouveau/dispnv50/wndw.c b/drivers/gpu/drm/nouveau/dispnv50/wndw.c index c5a9bc1af5af..2187922e8dc2 100644 --- a/drivers/gpu/drm/nouveau/dispnv50/wndw.c +++ b/drivers/gpu/drm/nouveau/dispnv50/wndw.c @@ -586,7 +586,6 @@ nv50_wndw_new_(const struct nv50_wndw_func *func, struct drm_device *dev, wndw->id = index; wndw->interlock.type = interlock_type; wndw->interlock.data = interlock_data; - wndw->ctxdma.parent = &wndw->wndw.base.user; wndw->ctxdma.parent = &wndw->wndw.base.user; INIT_LIST_HEAD(&wndw->ctxdma.list); diff --git a/drivers/gpu/drm/nouveau/include/nvif/object.h b/drivers/gpu/drm/nouveau/include/nvif/object.h index 20754d9e6883..8407651f6ac6 100644 --- a/drivers/gpu/drm/nouveau/include/nvif/object.h +++ b/drivers/gpu/drm/nouveau/include/nvif/object.h @@ -78,7 +78,7 @@ struct nvif_mclass { #define nvif_mclass(o,m) ({ \ struct nvif_object *object = (o); \ struct nvif_sclass *sclass; \ - const typeof(m[0]) *mclass = (m); \ + typeof(m[0]) *mclass = (m); \ int ret = -ENODEV; \ int cnt, i, j; \ \ diff --git a/drivers/gpu/drm/nouveau/nouveau_abi16.c b/drivers/gpu/drm/nouveau/nouveau_abi16.c index e2211bb2cf79..e67a471331b5 100644 --- a/drivers/gpu/drm/nouveau/nouveau_abi16.c +++ b/drivers/gpu/drm/nouveau/nouveau_abi16.c @@ -139,7 +139,7 @@ nouveau_abi16_chan_fini(struct nouveau_abi16 *abi16, if (chan->ntfy) { nouveau_vma_del(&chan->ntfy_vma); nouveau_bo_unpin(chan->ntfy); - drm_gem_object_unreference_unlocked(&chan->ntfy->gem); + drm_gem_object_put_unlocked(&chan->ntfy->gem); } if (chan->heap.block_size) diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c index 7b557c354307..22a15478d23d 100644 --- a/drivers/gpu/drm/nouveau/nouveau_connector.c +++ b/drivers/gpu/drm/nouveau/nouveau_connector.c @@ -363,19 +363,11 @@ module_param_named(hdmimhz, nouveau_hdmimhz, int, 0400); struct nouveau_encoder * find_encoder(struct drm_connector *connector, int type) { - struct drm_device *dev = connector->dev; struct nouveau_encoder *nv_encoder; struct drm_encoder *enc; - int i, id; - - for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) { - id = connector->encoder_ids[i]; - if (!id) - break; + int i; - enc = drm_encoder_find(dev, NULL, id); - if (!enc) - continue; + drm_connector_for_each_possible_encoder(connector, enc, i) { nv_encoder = nouveau_encoder(enc); if (type == DCB_OUTPUT_ANY || @@ -420,7 +412,7 @@ nouveau_connector_ddc_detect(struct drm_connector *connector) struct nouveau_connector *nv_connector = nouveau_connector(connector); struct nouveau_drm *drm = nouveau_drm(dev); struct nvkm_gpio *gpio = nvxx_gpio(&drm->client.device); - struct nouveau_encoder *nv_encoder; + struct nouveau_encoder *nv_encoder = NULL; struct drm_encoder *encoder; int i, panel = -ENODEV; @@ -436,14 +428,7 @@ nouveau_connector_ddc_detect(struct drm_connector *connector) } } - for (i = 0; nv_encoder = NULL, i < DRM_CONNECTOR_MAX_ENCODER; i++) { - int id = connector->encoder_ids[i]; - if (id == 0) - break; - - encoder = drm_encoder_find(dev, NULL, id); - if (!encoder) - continue; + drm_connector_for_each_possible_encoder(connector, encoder, i) { nv_encoder = nouveau_encoder(encoder); if (nv_encoder->dcb->type == DCB_OUTPUT_DP) { @@ -565,7 +550,7 @@ nouveau_connector_detect(struct drm_connector *connector, bool force) /* Cleanup the previous EDID block. */ if (nv_connector->edid) { - drm_mode_connector_update_edid_property(connector, NULL); + drm_connector_update_edid_property(connector, NULL); kfree(nv_connector->edid); nv_connector->edid = NULL; } @@ -590,7 +575,7 @@ nouveau_connector_detect(struct drm_connector *connector, bool force) else nv_connector->edid = drm_get_edid(connector, i2c); - drm_mode_connector_update_edid_property(connector, + drm_connector_update_edid_property(connector, nv_connector->edid); if (!nv_connector->edid) { NV_ERROR(drm, "DDC responded, but no EDID for %s\n", @@ -672,7 +657,7 @@ nouveau_connector_detect_lvds(struct drm_connector *connector, bool force) /* Cleanup the previous EDID block. */ if (nv_connector->edid) { - drm_mode_connector_update_edid_property(connector, NULL); + drm_connector_update_edid_property(connector, NULL); kfree(nv_connector->edid); nv_connector->edid = NULL; } @@ -736,7 +721,7 @@ out: status = connector_status_unknown; #endif - drm_mode_connector_update_edid_property(connector, nv_connector->edid); + drm_connector_update_edid_property(connector, nv_connector->edid); nouveau_connector_set_encoder(connector, nv_encoder); return status; } diff --git a/drivers/gpu/drm/nouveau/nouveau_debugfs.c b/drivers/gpu/drm/nouveau/nouveau_debugfs.c index 963a4dba8213..9109b69cd052 100644 --- a/drivers/gpu/drm/nouveau/nouveau_debugfs.c +++ b/drivers/gpu/drm/nouveau/nouveau_debugfs.c @@ -160,7 +160,11 @@ nouveau_debugfs_pstate_set(struct file *file, const char __user *ubuf, args.ustate = value; } + ret = pm_runtime_get_sync(drm->dev); + if (IS_ERR_VALUE(ret) && ret != -EACCES) + return ret; ret = nvif_mthd(ctrl, NVIF_CONTROL_PSTATE_USER, &args, sizeof(args)); + pm_runtime_put_autosuspend(drm->dev); if (ret < 0) return ret; diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c index 774b429142bc..dfa236370726 100644 --- a/drivers/gpu/drm/nouveau/nouveau_display.c +++ b/drivers/gpu/drm/nouveau/nouveau_display.c @@ -205,7 +205,7 @@ nouveau_user_framebuffer_destroy(struct drm_framebuffer *drm_fb) struct nouveau_framebuffer *fb = nouveau_framebuffer(drm_fb); if (fb->nvbo) - drm_gem_object_unreference_unlocked(&fb->nvbo->gem); + drm_gem_object_put_unlocked(&fb->nvbo->gem); drm_framebuffer_cleanup(drm_fb); kfree(fb); @@ -287,7 +287,7 @@ nouveau_user_framebuffer_create(struct drm_device *dev, if (ret == 0) return &fb->base; - drm_gem_object_unreference_unlocked(gem); + drm_gem_object_put_unlocked(gem); return ERR_PTR(ret); } @@ -939,7 +939,7 @@ nouveau_display_dumb_create(struct drm_file *file_priv, struct drm_device *dev, return ret; ret = drm_gem_handle_create(file_priv, &bo->gem, &args->handle); - drm_gem_object_unreference_unlocked(&bo->gem); + drm_gem_object_put_unlocked(&bo->gem); return ret; } @@ -954,7 +954,7 @@ nouveau_display_dumb_map_offset(struct drm_file *file_priv, if (gem) { struct nouveau_bo *bo = nouveau_gem_object(gem); *poffset = drm_vma_node_offset_addr(&bo->bo.vma_node); - drm_gem_object_unreference_unlocked(gem); + drm_gem_object_put_unlocked(gem); return 0; } diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c index 775443c9af94..c779ee3c665b 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drm.c +++ b/drivers/gpu/drm/nouveau/nouveau_drm.c @@ -912,8 +912,10 @@ nouveau_drm_open(struct drm_device *dev, struct drm_file *fpriv) get_task_comm(tmpname, current); snprintf(name, sizeof(name), "%s[%d]", tmpname, pid_nr(fpriv->pid)); - if (!(cli = kzalloc(sizeof(*cli), GFP_KERNEL))) - return ret; + if (!(cli = kzalloc(sizeof(*cli), GFP_KERNEL))) { + ret = -ENOMEM; + goto done; + } ret = nouveau_cli_init(drm, name, cli); if (ret) diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c index 85c1f10bc2b6..844498c4267c 100644 --- a/drivers/gpu/drm/nouveau/nouveau_fbcon.c +++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c @@ -429,7 +429,7 @@ nouveau_fbcon_destroy(struct drm_device *dev, struct nouveau_fbdev *fbcon) nouveau_vma_del(&nouveau_fb->vma); nouveau_bo_unmap(nouveau_fb->nvbo); nouveau_bo_unpin(nouveau_fb->nvbo); - drm_framebuffer_unreference(&nouveau_fb->base); + drm_framebuffer_put(&nouveau_fb->base); } return 0; diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c index 300daee74209..df73bec354e8 100644 --- a/drivers/gpu/drm/nouveau/nouveau_gem.c +++ b/drivers/gpu/drm/nouveau/nouveau_gem.c @@ -274,7 +274,7 @@ nouveau_gem_ioctl_new(struct drm_device *dev, void *data, } /* drop reference from allocate - handle holds it now */ - drm_gem_object_unreference_unlocked(&nvbo->gem); + drm_gem_object_put_unlocked(&nvbo->gem); return ret; } @@ -354,7 +354,7 @@ validate_fini_no_ticket(struct validate_op *op, struct nouveau_fence *fence, list_del(&nvbo->entry); nvbo->reserved_by = NULL; ttm_bo_unreserve(&nvbo->bo); - drm_gem_object_unreference_unlocked(&nvbo->gem); + drm_gem_object_put_unlocked(&nvbo->gem); } } @@ -400,14 +400,14 @@ retry: nvbo = nouveau_gem_object(gem); if (nvbo == res_bo) { res_bo = NULL; - drm_gem_object_unreference_unlocked(gem); + drm_gem_object_put_unlocked(gem); continue; } if (nvbo->reserved_by && nvbo->reserved_by == file_priv) { NV_PRINTK(err, cli, "multiple instances of buffer %d on " "validation list\n", b->handle); - drm_gem_object_unreference_unlocked(gem); + drm_gem_object_put_unlocked(gem); ret = -EINVAL; break; } @@ -894,7 +894,7 @@ nouveau_gem_ioctl_cpu_prep(struct drm_device *dev, void *data, ret = lret; nouveau_bo_sync_for_cpu(nvbo); - drm_gem_object_unreference_unlocked(gem); + drm_gem_object_put_unlocked(gem); return ret; } @@ -913,7 +913,7 @@ nouveau_gem_ioctl_cpu_fini(struct drm_device *dev, void *data, nvbo = nouveau_gem_object(gem); nouveau_bo_sync_for_device(nvbo); - drm_gem_object_unreference_unlocked(gem); + drm_gem_object_put_unlocked(gem); return 0; } @@ -930,7 +930,7 @@ nouveau_gem_ioctl_info(struct drm_device *dev, void *data, return -ENOENT; ret = nouveau_gem_info(file_priv, gem, req); - drm_gem_object_unreference_unlocked(gem); + drm_gem_object_put_unlocked(gem); return ret; } diff --git a/drivers/gpu/drm/nouveau/nouveau_hwmon.c b/drivers/gpu/drm/nouveau/nouveau_hwmon.c index 44178b4c3599..08a1ab6b150d 100644 --- a/drivers/gpu/drm/nouveau/nouveau_hwmon.c +++ b/drivers/gpu/drm/nouveau/nouveau_hwmon.c @@ -69,8 +69,8 @@ nouveau_hwmon_set_temp1_auto_point1_temp(struct device *d, struct nvkm_therm *therm = nvxx_therm(&drm->client.device); long value; - if (kstrtol(buf, 10, &value) == -EINVAL) - return count; + if (kstrtol(buf, 10, &value)) + return -EINVAL; therm->attr_set(therm, NVKM_THERM_ATTR_THRS_FAN_BOOST, value / 1000); @@ -102,8 +102,8 @@ nouveau_hwmon_set_temp1_auto_point1_temp_hyst(struct device *d, struct nvkm_therm *therm = nvxx_therm(&drm->client.device); long value; - if (kstrtol(buf, 10, &value) == -EINVAL) - return count; + if (kstrtol(buf, 10, &value)) + return -EINVAL; therm->attr_set(therm, NVKM_THERM_ATTR_THRS_FAN_BOOST_HYST, value / 1000); @@ -156,7 +156,7 @@ nouveau_hwmon_set_pwm1_min(struct device *d, struct device_attribute *a, long value; int ret; - if (kstrtol(buf, 10, &value) == -EINVAL) + if (kstrtol(buf, 10, &value)) return -EINVAL; ret = therm->attr_set(therm, NVKM_THERM_ATTR_FAN_MIN_DUTY, value); @@ -179,7 +179,7 @@ nouveau_hwmon_set_pwm1_max(struct device *d, struct device_attribute *a, long value; int ret; - if (kstrtol(buf, 10, &value) == -EINVAL) + if (kstrtol(buf, 10, &value)) return -EINVAL; ret = therm->attr_set(therm, NVKM_THERM_ATTR_FAN_MAX_DUTY, value); diff --git a/drivers/gpu/drm/nouveau/nouveau_platform.c b/drivers/gpu/drm/nouveau/nouveau_platform.c index 1ada186fab77..039e23548e08 100644 --- a/drivers/gpu/drm/nouveau/nouveau_platform.c +++ b/drivers/gpu/drm/nouveau/nouveau_platform.c @@ -36,7 +36,7 @@ static int nouveau_platform_probe(struct platform_device *pdev) ret = drm_dev_register(drm, 0); if (ret < 0) { - drm_dev_unref(drm); + drm_dev_put(drm); return ret; } diff --git a/drivers/gpu/drm/nouveau/nvkm/core/engine.c b/drivers/gpu/drm/nouveau/nvkm/core/engine.c index d0322ce85172..1a47c40e171b 100644 --- a/drivers/gpu/drm/nouveau/nvkm/core/engine.c +++ b/drivers/gpu/drm/nouveau/nvkm/core/engine.c @@ -87,11 +87,12 @@ nvkm_engine_info(struct nvkm_subdev *subdev, u64 mthd, u64 *data) { struct nvkm_engine *engine = nvkm_engine(subdev); if (engine->func->info) { - if ((engine = nvkm_engine_ref(engine))) { + if (!IS_ERR((engine = nvkm_engine_ref(engine)))) { int ret = engine->func->info(engine, mthd, data); nvkm_engine_unref(&engine); return ret; } + return PTR_ERR(engine); } return -ENOSYS; } diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c index 78597da6313a..0e372a190d3f 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c @@ -23,6 +23,10 @@ #ifdef CONFIG_NOUVEAU_PLATFORM_DRIVER #include "priv.h" +#if IS_ENABLED(CONFIG_ARM_DMA_USE_IOMMU) +#include <asm/dma-iommu.h> +#endif + static int nvkm_device_tegra_power_up(struct nvkm_device_tegra *tdev) { @@ -105,6 +109,15 @@ nvkm_device_tegra_probe_iommu(struct nvkm_device_tegra *tdev) unsigned long pgsize_bitmap; int ret; +#if IS_ENABLED(CONFIG_ARM_DMA_USE_IOMMU) + if (dev->archdata.mapping) { + struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev); + + arm_iommu_detach_device(dev); + arm_iommu_release_mapping(mapping); + } +#endif + if (!tdev->func->iommu_bit) return; diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/changf119.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/changf119.c index 29e6dd58ac48..525f95d06429 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/changf119.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/changf119.c @@ -52,7 +52,7 @@ void gf119_disp_chan_intr(struct nv50_disp_chan *chan, bool en) { struct nvkm_device *device = chan->disp->base.engine.subdev.device; - const u64 mask = 0x00000001 << chan->chid.user; + const u32 mask = 0x00000001 << chan->chid.user; if (!en) { nvkm_mask(device, 0x610090, mask, 0x00000000); nvkm_mask(device, 0x6100a0, mask, 0x00000000); diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/channv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/channv50.c index 57719f675eec..bcf32d92ee5a 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/channv50.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/channv50.c @@ -166,8 +166,8 @@ void nv50_disp_chan_intr(struct nv50_disp_chan *chan, bool en) { struct nvkm_device *device = chan->disp->base.engine.subdev.device; - const u64 mask = 0x00010001 << chan->chid.user; - const u64 data = en ? 0x00010000 : 0x00000000; + const u32 mask = 0x00010001 << chan->chid.user; + const u32 data = en ? 0x00010000 << chan->chid.user : 0x00000000; nvkm_mask(device, 0x610028, mask, data); } diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gv100.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gv100.c index 19173ea19096..3b3327789ae7 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gv100.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gv100.c @@ -25,24 +25,31 @@ #include <nvif/class.h> static void -gv100_gr_trap_mp(struct gf100_gr *gr, int gpc, int tpc) +gv100_gr_trap_sm(struct gf100_gr *gr, int gpc, int tpc, int sm) { struct nvkm_subdev *subdev = &gr->base.engine.subdev; struct nvkm_device *device = subdev->device; - u32 werr = nvkm_rd32(device, TPC_UNIT(gpc, tpc, 0x730)); - u32 gerr = nvkm_rd32(device, TPC_UNIT(gpc, tpc, 0x734)); + u32 werr = nvkm_rd32(device, TPC_UNIT(gpc, tpc, 0x730 + (sm * 0x80))); + u32 gerr = nvkm_rd32(device, TPC_UNIT(gpc, tpc, 0x734 + (sm * 0x80))); const struct nvkm_enum *warp; char glob[128]; nvkm_snprintbf(glob, sizeof(glob), gf100_mp_global_error, gerr); warp = nvkm_enum_find(gf100_mp_warp_error, werr & 0xffff); - nvkm_error(subdev, "GPC%i/TPC%i/MP trap: " + nvkm_error(subdev, "GPC%i/TPC%i/SM%d trap: " "global %08x [%s] warp %04x [%s]\n", - gpc, tpc, gerr, glob, werr, warp ? warp->name : ""); + gpc, tpc, sm, gerr, glob, werr, warp ? warp->name : ""); + + nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x730 + sm * 0x80), 0x00000000); + nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x734 + sm * 0x80), gerr); +} - nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x730), 0x00000000); - nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x734), gerr); +static void +gv100_gr_trap_mp(struct gf100_gr *gr, int gpc, int tpc) +{ + gv100_gr_trap_sm(gr, gpc, tpc, 0); + gv100_gr_trap_sm(gr, gpc, tpc, 1); } static void diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/vpstate.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/vpstate.c index 20b6fc8243e0..71524548de32 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/vpstate.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/vpstate.c @@ -58,8 +58,14 @@ nvbios_vpstate_parse(struct nvkm_bios *b, struct nvbios_vpstate_header *h) h->ecount = nvbios_rd08(b, h->offset + 0x5); h->base_id = nvbios_rd08(b, h->offset + 0x0f); - h->boost_id = nvbios_rd08(b, h->offset + 0x10); - h->tdp_id = nvbios_rd08(b, h->offset + 0x11); + if (h->hlen > 0x10) + h->boost_id = nvbios_rd08(b, h->offset + 0x10); + else + h->boost_id = 0xff; + if (h->hlen > 0x11) + h->tdp_id = nvbios_rd08(b, h->offset + 0x11); + else + h->tdp_id = 0xff; return 0; default: return -EINVAL; diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fault/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fault/base.c index 007bf4af33b9..16ad91c91a7b 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/fault/base.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fault/base.c @@ -133,8 +133,14 @@ nvkm_fault_oneinit(struct nvkm_subdev *subdev) } } - return nvkm_event_init(&nvkm_fault_ntfy, 1, fault->buffer_nr, - &fault->event); + ret = nvkm_event_init(&nvkm_fault_ntfy, 1, fault->buffer_nr, + &fault->event); + if (ret) + return ret; + + if (fault->func->oneinit) + ret = fault->func->oneinit(fault); + return ret; } static void * diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fault/gv100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fault/gv100.c index 73c7728b5969..3cd610d7deb5 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/fault/gv100.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fault/gv100.c @@ -176,8 +176,17 @@ gv100_fault_init(struct nvkm_fault *fault) nvkm_notify_get(&fault->nrpfb); } +static int +gv100_fault_oneinit(struct nvkm_fault *fault) +{ + return nvkm_notify_init(&fault->buffer[0]->object, &fault->event, + gv100_fault_ntfy_nrpfb, false, NULL, 0, 0, + &fault->nrpfb); +} + static const struct nvkm_fault_func gv100_fault = { + .oneinit = gv100_fault_oneinit, .init = gv100_fault_init, .fini = gv100_fault_fini, .intr = gv100_fault_intr, @@ -192,15 +201,5 @@ int gv100_fault_new(struct nvkm_device *device, int index, struct nvkm_fault **pfault) { - struct nvkm_fault *fault; - int ret; - - ret = nvkm_fault_new_(&gv100_fault, device, index, &fault); - *pfault = fault; - if (ret) - return ret; - - return nvkm_notify_init(&fault->buffer[0]->object, &fault->event, - gv100_fault_ntfy_nrpfb, false, NULL, 0, 0, - &fault->nrpfb); + return nvkm_fault_new_(&gv100_fault, device, index, pfault); } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fault/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/fault/priv.h index 44843ecf12b0..e4d2f5234fd1 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/fault/priv.h +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fault/priv.h @@ -20,6 +20,7 @@ int nvkm_fault_new_(const struct nvkm_fault_func *, struct nvkm_device *, int index, struct nvkm_fault **); struct nvkm_fault_func { + int (*oneinit)(struct nvkm_fault *); void (*init)(struct nvkm_fault *); void (*fini)(struct nvkm_fault *); void (*intr)(struct nvkm_fault *); diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gp10b. b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gp10b. deleted file mode 100644 index e69de29bb2d1..000000000000 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gp10b. +++ /dev/null diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r352.c b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r352.c index a721354249ce..d02e183717dc 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r352.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r352.c @@ -414,6 +414,20 @@ acr_r352_ls_write_wpr(struct acr_r352 *acr, struct list_head *imgs, { struct ls_ucode_img *_img; u32 pos = 0; + u32 max_desc_size = 0; + u8 *gdesc; + + /* Figure out how large we need gdesc to be. */ + list_for_each_entry(_img, imgs, node) { + const struct acr_r352_ls_func *ls_func = + acr->func->ls_func[_img->falcon_id]; + + max_desc_size = max(max_desc_size, ls_func->bl_desc_size); + } + + gdesc = kmalloc(max_desc_size, GFP_KERNEL); + if (!gdesc) + return -ENOMEM; nvkm_kmap(wpr_blob); @@ -421,7 +435,6 @@ acr_r352_ls_write_wpr(struct acr_r352 *acr, struct list_head *imgs, struct ls_ucode_img_r352 *img = ls_ucode_img_r352(_img); const struct acr_r352_ls_func *ls_func = acr->func->ls_func[_img->falcon_id]; - u8 gdesc[ls_func->bl_desc_size]; nvkm_gpuobj_memcpy_to(wpr_blob, pos, &img->wpr_header, sizeof(img->wpr_header)); @@ -447,6 +460,8 @@ acr_r352_ls_write_wpr(struct acr_r352 *acr, struct list_head *imgs, nvkm_done(wpr_blob); + kfree(gdesc); + return 0; } @@ -771,7 +786,11 @@ acr_r352_load(struct nvkm_acr *_acr, struct nvkm_falcon *falcon, struct fw_bl_desc *hsbl_desc; void *bl, *blob_data, *hsbl_code, *hsbl_data; u32 code_size; - u8 bl_desc[bl_desc_size]; + u8 *bl_desc; + + bl_desc = kzalloc(bl_desc_size, GFP_KERNEL); + if (!bl_desc) + return -ENOMEM; /* Find the bootloader descriptor for our blob and copy it */ if (blob == acr->load_blob) { @@ -802,7 +821,6 @@ acr_r352_load(struct nvkm_acr *_acr, struct nvkm_falcon *falcon, code_size, hsbl_desc->start_tag, 0, false); /* Generate the BL header */ - memset(bl_desc, 0, bl_desc_size); acr->func->generate_hs_bl_desc(load_hdr, bl_desc, offset); /* @@ -811,6 +829,7 @@ acr_r352_load(struct nvkm_acr *_acr, struct nvkm_falcon *falcon, nvkm_falcon_load_dmem(falcon, bl_desc, hsbl_desc->dmem_load_off, bl_desc_size, 0); + kfree(bl_desc); return hsbl_desc->start_tag << 8; } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r367.c b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r367.c index 866877b88797..978ad0790367 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r367.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r367.c @@ -265,6 +265,19 @@ acr_r367_ls_write_wpr(struct acr_r352 *acr, struct list_head *imgs, { struct ls_ucode_img *_img; u32 pos = 0; + u32 max_desc_size = 0; + u8 *gdesc; + + list_for_each_entry(_img, imgs, node) { + const struct acr_r352_ls_func *ls_func = + acr->func->ls_func[_img->falcon_id]; + + max_desc_size = max(max_desc_size, ls_func->bl_desc_size); + } + + gdesc = kmalloc(max_desc_size, GFP_KERNEL); + if (!gdesc) + return -ENOMEM; nvkm_kmap(wpr_blob); @@ -272,7 +285,6 @@ acr_r367_ls_write_wpr(struct acr_r352 *acr, struct list_head *imgs, struct ls_ucode_img_r367 *img = ls_ucode_img_r367(_img); const struct acr_r352_ls_func *ls_func = acr->func->ls_func[_img->falcon_id]; - u8 gdesc[ls_func->bl_desc_size]; nvkm_gpuobj_memcpy_to(wpr_blob, pos, &img->wpr_header, sizeof(img->wpr_header)); @@ -298,6 +310,8 @@ acr_r367_ls_write_wpr(struct acr_r352 *acr, struct list_head *imgs, nvkm_done(wpr_blob); + kfree(gdesc); + return 0; } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gm20b.c b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gm20b.c index 30491d132d59..df8b919dcf09 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gm20b.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gm20b.c @@ -129,6 +129,7 @@ gm20b_secboot_new(struct nvkm_device *device, int index, return 0; } +#if IS_ENABLED(CONFIG_ARCH_TEGRA_210_SOC) MODULE_FIRMWARE("nvidia/gm20b/acr/bl.bin"); MODULE_FIRMWARE("nvidia/gm20b/acr/ucode_load.bin"); MODULE_FIRMWARE("nvidia/gm20b/gr/fecs_bl.bin"); @@ -144,3 +145,4 @@ MODULE_FIRMWARE("nvidia/gm20b/gr/sw_method_init.bin"); MODULE_FIRMWARE("nvidia/gm20b/pmu/desc.bin"); MODULE_FIRMWARE("nvidia/gm20b/pmu/image.bin"); MODULE_FIRMWARE("nvidia/gm20b/pmu/sig.bin"); +#endif diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gp10b.c b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gp10b.c index 632e9545e292..28ca29d0eeee 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gp10b.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gp10b.c @@ -74,6 +74,7 @@ gp10b_secboot_new(struct nvkm_device *device, int index, return 0; } +#if IS_ENABLED(CONFIG_ARCH_TEGRA_186_SOC) MODULE_FIRMWARE("nvidia/gp10b/acr/bl.bin"); MODULE_FIRMWARE("nvidia/gp10b/acr/ucode_load.bin"); MODULE_FIRMWARE("nvidia/gp10b/gr/fecs_bl.bin"); @@ -91,3 +92,4 @@ MODULE_FIRMWARE("nvidia/gp10b/gr/sw_method_init.bin"); MODULE_FIRMWARE("nvidia/gp10b/pmu/desc.bin"); MODULE_FIRMWARE("nvidia/gp10b/pmu/image.bin"); MODULE_FIRMWARE("nvidia/gp10b/pmu/sig.bin"); +#endif diff --git a/drivers/gpu/drm/omapdrm/omap_connector.c b/drivers/gpu/drm/omapdrm/omap_connector.c index 5cde26ac937b..2ddb856666c4 100644 --- a/drivers/gpu/drm/omapdrm/omap_connector.c +++ b/drivers/gpu/drm/omapdrm/omap_connector.c @@ -126,14 +126,14 @@ static int omap_connector_get_modes(struct drm_connector *connector) if ((dssdrv->read_edid(dssdev, edid, MAX_EDID) > 0) && drm_edid_is_valid(edid)) { - drm_mode_connector_update_edid_property( + drm_connector_update_edid_property( connector, edid); n = drm_add_edid_modes(connector, edid); omap_connector->hdmi_mode = drm_detect_hdmi_monitor(edid); } else { - drm_mode_connector_update_edid_property( + drm_connector_update_edid_property( connector, NULL); } diff --git a/drivers/gpu/drm/omapdrm/omap_drv.c b/drivers/gpu/drm/omapdrm/omap_drv.c index 5005ecc284d2..1b6601e9b107 100644 --- a/drivers/gpu/drm/omapdrm/omap_drv.c +++ b/drivers/gpu/drm/omapdrm/omap_drv.c @@ -274,7 +274,7 @@ static int omap_modeset_init(struct drm_device *dev) if (IS_ERR(crtc)) return PTR_ERR(crtc); - drm_mode_connector_attach_encoder(connector, encoder); + drm_connector_attach_encoder(connector, encoder); encoder->possible_crtcs = (1 << crtc_idx); priv->crtcs[priv->num_crtcs++] = crtc; diff --git a/drivers/gpu/drm/panel/panel-ilitek-ili9881c.c b/drivers/gpu/drm/panel/panel-ilitek-ili9881c.c index e848af235df5..3ad4a46c4e94 100644 --- a/drivers/gpu/drm/panel/panel-ilitek-ili9881c.c +++ b/drivers/gpu/drm/panel/panel-ilitek-ili9881c.c @@ -334,7 +334,7 @@ static int ili9881c_prepare(struct drm_panel *panel) if (ret) return ret; - mipi_dsi_dcs_exit_sleep_mode(ctx->dsi); + ret = mipi_dsi_dcs_exit_sleep_mode(ctx->dsi); if (ret) return ret; diff --git a/drivers/gpu/drm/panel/panel-innolux-p079zca.c b/drivers/gpu/drm/panel/panel-innolux-p079zca.c index bb53e0850764..72edb334d997 100644 --- a/drivers/gpu/drm/panel/panel-innolux-p079zca.c +++ b/drivers/gpu/drm/panel/panel-innolux-p079zca.c @@ -11,6 +11,7 @@ #include <linux/gpio/consumer.h> #include <linux/module.h> #include <linux/of.h> +#include <linux/of_device.h> #include <linux/regulator/consumer.h> #include <drm/drmP.h> @@ -20,12 +21,41 @@ #include <video/mipi_display.h> +struct panel_init_cmd { + size_t len; + const char *data; +}; + +#define _INIT_CMD(...) { \ + .len = sizeof((char[]){__VA_ARGS__}), \ + .data = (char[]){__VA_ARGS__} } + +struct panel_desc { + const struct drm_display_mode *mode; + unsigned int bpc; + struct { + unsigned int width; + unsigned int height; + } size; + + unsigned long flags; + enum mipi_dsi_pixel_format format; + const struct panel_init_cmd *init_cmds; + unsigned int lanes; + const char * const *supply_names; + unsigned int num_supplies; + unsigned int sleep_mode_delay; + unsigned int power_down_delay; +}; + struct innolux_panel { struct drm_panel base; struct mipi_dsi_device *link; + const struct panel_desc *desc; struct backlight_device *backlight; - struct regulator *supply; + struct regulator_bulk_data *supplies; + unsigned int num_supplies; struct gpio_desc *enable_gpio; bool prepared; @@ -72,12 +102,16 @@ static int innolux_panel_unprepare(struct drm_panel *panel) return err; } + if (innolux->desc->sleep_mode_delay) + msleep(innolux->desc->sleep_mode_delay); + gpiod_set_value_cansleep(innolux->enable_gpio, 0); - /* T8: 80ms - 1000ms */ - msleep(80); + if (innolux->desc->power_down_delay) + msleep(innolux->desc->power_down_delay); - err = regulator_disable(innolux->supply); + err = regulator_bulk_disable(innolux->desc->num_supplies, + innolux->supplies); if (err < 0) return err; @@ -89,24 +123,55 @@ static int innolux_panel_unprepare(struct drm_panel *panel) static int innolux_panel_prepare(struct drm_panel *panel) { struct innolux_panel *innolux = to_innolux_panel(panel); - int err, regulator_err; + int err; if (innolux->prepared) return 0; gpiod_set_value_cansleep(innolux->enable_gpio, 0); - err = regulator_enable(innolux->supply); + err = regulator_bulk_enable(innolux->desc->num_supplies, + innolux->supplies); if (err < 0) return err; - /* T2: 15ms - 1000ms */ - usleep_range(15000, 16000); + /* p079zca: t2 (20ms), p097pfg: t4 (15ms) */ + usleep_range(20000, 21000); gpiod_set_value_cansleep(innolux->enable_gpio, 1); - /* T4: 15ms - 1000ms */ - usleep_range(15000, 16000); + /* p079zca: t4, p097pfg: t5 */ + usleep_range(20000, 21000); + + if (innolux->desc->init_cmds) { + const struct panel_init_cmd *cmds = + innolux->desc->init_cmds; + unsigned int i; + + for (i = 0; cmds[i].len != 0; i++) { + const struct panel_init_cmd *cmd = &cmds[i]; + + err = mipi_dsi_generic_write(innolux->link, cmd->data, + cmd->len); + if (err < 0) { + dev_err(panel->dev, + "failed to write command %u\n", i); + goto poweroff; + } + + /* + * Included by random guessing, because without this + * (or at least, some delay), the panel sometimes + * didn't appear to pick up the command sequence. + */ + err = mipi_dsi_dcs_nop(innolux->link); + if (err < 0) { + dev_err(panel->dev, + "failed to send DCS nop: %d\n", err); + goto poweroff; + } + } + } err = mipi_dsi_dcs_exit_sleep_mode(innolux->link); if (err < 0) { @@ -133,12 +198,9 @@ static int innolux_panel_prepare(struct drm_panel *panel) return 0; poweroff: - regulator_err = regulator_disable(innolux->supply); - if (regulator_err) - DRM_DEV_ERROR(panel->dev, "failed to disable regulator: %d\n", - regulator_err); - gpiod_set_value_cansleep(innolux->enable_gpio, 0); + regulator_bulk_disable(innolux->desc->num_supplies, innolux->supplies); + return err; } @@ -162,7 +224,11 @@ static int innolux_panel_enable(struct drm_panel *panel) return 0; } -static const struct drm_display_mode default_mode = { +static const char * const innolux_p079zca_supply_names[] = { + "power", +}; + +static const struct drm_display_mode innolux_p079zca_mode = { .clock = 56900, .hdisplay = 768, .hsync_start = 768 + 40, @@ -175,15 +241,181 @@ static const struct drm_display_mode default_mode = { .vrefresh = 60, }; +static const struct panel_desc innolux_p079zca_panel_desc = { + .mode = &innolux_p079zca_mode, + .bpc = 8, + .size = { + .width = 120, + .height = 160, + }, + .flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_SYNC_PULSE | + MIPI_DSI_MODE_LPM, + .format = MIPI_DSI_FMT_RGB888, + .lanes = 4, + .supply_names = innolux_p079zca_supply_names, + .num_supplies = ARRAY_SIZE(innolux_p079zca_supply_names), + .power_down_delay = 80, /* T8: 80ms - 1000ms */ +}; + +static const char * const innolux_p097pfg_supply_names[] = { + "avdd", + "avee", +}; + +static const struct drm_display_mode innolux_p097pfg_mode = { + .clock = 229000, + .hdisplay = 1536, + .hsync_start = 1536 + 100, + .hsync_end = 1536 + 100 + 24, + .htotal = 1536 + 100 + 24 + 100, + .vdisplay = 2048, + .vsync_start = 2048 + 100, + .vsync_end = 2048 + 100 + 2, + .vtotal = 2048 + 100 + 2 + 18, + .vrefresh = 60, +}; + +/* + * Display manufacturer failed to provide init sequencing according to + * https://chromium-review.googlesource.com/c/chromiumos/third_party/coreboot/+/892065/ + * so the init sequence stems from a register dump of a working panel. + */ +static const struct panel_init_cmd innolux_p097pfg_init_cmds[] = { + /* page 0 */ + _INIT_CMD(0xF0, 0x55, 0xAA, 0x52, 0x08, 0x00), + _INIT_CMD(0xB1, 0xE8, 0x11), + _INIT_CMD(0xB2, 0x25, 0x02), + _INIT_CMD(0xB5, 0x08, 0x00), + _INIT_CMD(0xBC, 0x0F, 0x00), + _INIT_CMD(0xB8, 0x03, 0x06, 0x00, 0x00), + _INIT_CMD(0xBD, 0x01, 0x90, 0x14, 0x14), + _INIT_CMD(0x6F, 0x01), + _INIT_CMD(0xC0, 0x03), + _INIT_CMD(0x6F, 0x02), + _INIT_CMD(0xC1, 0x0D), + _INIT_CMD(0xD9, 0x01, 0x09, 0x70), + _INIT_CMD(0xC5, 0x12, 0x21, 0x00), + _INIT_CMD(0xBB, 0x93, 0x93), + + /* page 1 */ + _INIT_CMD(0xF0, 0x55, 0xAA, 0x52, 0x08, 0x01), + _INIT_CMD(0xB3, 0x3C, 0x3C), + _INIT_CMD(0xB4, 0x0F, 0x0F), + _INIT_CMD(0xB9, 0x45, 0x45), + _INIT_CMD(0xBA, 0x14, 0x14), + _INIT_CMD(0xCA, 0x02), + _INIT_CMD(0xCE, 0x04), + _INIT_CMD(0xC3, 0x9B, 0x9B), + _INIT_CMD(0xD8, 0xC0, 0x03), + _INIT_CMD(0xBC, 0x82, 0x01), + _INIT_CMD(0xBD, 0x9E, 0x01), + + /* page 2 */ + _INIT_CMD(0xF0, 0x55, 0xAA, 0x52, 0x08, 0x02), + _INIT_CMD(0xB0, 0x82), + _INIT_CMD(0xD1, 0x00, 0x00, 0x00, 0x3E, 0x00, 0x82, 0x00, 0xA5, + 0x00, 0xC1, 0x00, 0xEA, 0x01, 0x0D, 0x01, 0x40), + _INIT_CMD(0xD2, 0x01, 0x6A, 0x01, 0xA8, 0x01, 0xDC, 0x02, 0x29, + 0x02, 0x67, 0x02, 0x68, 0x02, 0xA8, 0x02, 0xF0), + _INIT_CMD(0xD3, 0x03, 0x19, 0x03, 0x49, 0x03, 0x67, 0x03, 0x8C, + 0x03, 0xA6, 0x03, 0xC7, 0x03, 0xDE, 0x03, 0xEC), + _INIT_CMD(0xD4, 0x03, 0xFF, 0x03, 0xFF), + _INIT_CMD(0xE0, 0x00, 0x00, 0x00, 0x86, 0x00, 0xC5, 0x00, 0xE5, + 0x00, 0xFF, 0x01, 0x26, 0x01, 0x45, 0x01, 0x75), + _INIT_CMD(0xE1, 0x01, 0x9C, 0x01, 0xD5, 0x02, 0x05, 0x02, 0x4D, + 0x02, 0x86, 0x02, 0x87, 0x02, 0xC3, 0x03, 0x03), + _INIT_CMD(0xE2, 0x03, 0x2A, 0x03, 0x56, 0x03, 0x72, 0x03, 0x94, + 0x03, 0xAC, 0x03, 0xCB, 0x03, 0xE0, 0x03, 0xED), + _INIT_CMD(0xE3, 0x03, 0xFF, 0x03, 0xFF), + + /* page 3 */ + _INIT_CMD(0xF0, 0x55, 0xAA, 0x52, 0x08, 0x03), + _INIT_CMD(0xB0, 0x00, 0x00, 0x00, 0x00), + _INIT_CMD(0xB1, 0x00, 0x00, 0x00, 0x00), + _INIT_CMD(0xB2, 0x00, 0x00, 0x06, 0x04, 0x01, 0x40, 0x85), + _INIT_CMD(0xB3, 0x10, 0x07, 0xFC, 0x04, 0x01, 0x40, 0x80), + _INIT_CMD(0xB6, 0xF0, 0x08, 0x00, 0x04, 0x00, 0x00, 0x00, 0x01, + 0x40, 0x80), + _INIT_CMD(0xBA, 0xC5, 0x07, 0x00, 0x04, 0x11, 0x25, 0x8C), + _INIT_CMD(0xBB, 0xC5, 0x07, 0x00, 0x03, 0x11, 0x25, 0x8C), + _INIT_CMD(0xC0, 0x00, 0x3C, 0x00, 0x00, 0x00, 0x80, 0x80), + _INIT_CMD(0xC1, 0x00, 0x3C, 0x00, 0x00, 0x00, 0x80, 0x80), + _INIT_CMD(0xC4, 0x00, 0x00), + _INIT_CMD(0xEF, 0x41), + + /* page 4 */ + _INIT_CMD(0xF0, 0x55, 0xAA, 0x52, 0x08, 0x04), + _INIT_CMD(0xEC, 0x4C), + + /* page 5 */ + _INIT_CMD(0xF0, 0x55, 0xAA, 0x52, 0x08, 0x05), + _INIT_CMD(0xB0, 0x13, 0x03, 0x03, 0x01), + _INIT_CMD(0xB1, 0x30, 0x00), + _INIT_CMD(0xB2, 0x02, 0x02, 0x00), + _INIT_CMD(0xB3, 0x82, 0x23, 0x82, 0x9D), + _INIT_CMD(0xB4, 0xC5, 0x75, 0x24, 0x57), + _INIT_CMD(0xB5, 0x00, 0xD4, 0x72, 0x11, 0x11, 0xAB, 0x0A), + _INIT_CMD(0xB6, 0x00, 0x00, 0xD5, 0x72, 0x24, 0x56), + _INIT_CMD(0xB7, 0x5C, 0xDC, 0x5C, 0x5C), + _INIT_CMD(0xB9, 0x0C, 0x00, 0x00, 0x01, 0x00), + _INIT_CMD(0xC0, 0x75, 0x11, 0x11, 0x54, 0x05), + _INIT_CMD(0xC6, 0x00, 0x00, 0x00, 0x00), + _INIT_CMD(0xD0, 0x00, 0x48, 0x08, 0x00, 0x00), + _INIT_CMD(0xD1, 0x00, 0x48, 0x09, 0x00, 0x00), + + /* page 6 */ + _INIT_CMD(0xF0, 0x55, 0xAA, 0x52, 0x08, 0x06), + _INIT_CMD(0xB0, 0x02, 0x32, 0x32, 0x08, 0x2F), + _INIT_CMD(0xB1, 0x2E, 0x15, 0x14, 0x13, 0x12), + _INIT_CMD(0xB2, 0x11, 0x10, 0x00, 0x3D, 0x3D), + _INIT_CMD(0xB3, 0x3D, 0x3D, 0x3D, 0x3D, 0x3D), + _INIT_CMD(0xB4, 0x3D, 0x32), + _INIT_CMD(0xB5, 0x03, 0x32, 0x32, 0x09, 0x2F), + _INIT_CMD(0xB6, 0x2E, 0x1B, 0x1A, 0x19, 0x18), + _INIT_CMD(0xB7, 0x17, 0x16, 0x01, 0x3D, 0x3D), + _INIT_CMD(0xB8, 0x3D, 0x3D, 0x3D, 0x3D, 0x3D), + _INIT_CMD(0xB9, 0x3D, 0x32), + _INIT_CMD(0xC0, 0x01, 0x32, 0x32, 0x09, 0x2F), + _INIT_CMD(0xC1, 0x2E, 0x1A, 0x1B, 0x16, 0x17), + _INIT_CMD(0xC2, 0x18, 0x19, 0x03, 0x3D, 0x3D), + _INIT_CMD(0xC3, 0x3D, 0x3D, 0x3D, 0x3D, 0x3D), + _INIT_CMD(0xC4, 0x3D, 0x32), + _INIT_CMD(0xC5, 0x00, 0x32, 0x32, 0x08, 0x2F), + _INIT_CMD(0xC6, 0x2E, 0x14, 0x15, 0x10, 0x11), + _INIT_CMD(0xC7, 0x12, 0x13, 0x02, 0x3D, 0x3D), + _INIT_CMD(0xC8, 0x3D, 0x3D, 0x3D, 0x3D, 0x3D), + _INIT_CMD(0xC9, 0x3D, 0x32), + + {}, +}; + +static const struct panel_desc innolux_p097pfg_panel_desc = { + .mode = &innolux_p097pfg_mode, + .bpc = 8, + .size = { + .width = 147, + .height = 196, + }, + .flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_SYNC_PULSE | + MIPI_DSI_MODE_LPM, + .format = MIPI_DSI_FMT_RGB888, + .init_cmds = innolux_p097pfg_init_cmds, + .lanes = 4, + .supply_names = innolux_p097pfg_supply_names, + .num_supplies = ARRAY_SIZE(innolux_p097pfg_supply_names), + .sleep_mode_delay = 100, /* T15 */ +}; + static int innolux_panel_get_modes(struct drm_panel *panel) { + struct innolux_panel *innolux = to_innolux_panel(panel); + const struct drm_display_mode *m = innolux->desc->mode; struct drm_display_mode *mode; - mode = drm_mode_duplicate(panel->drm, &default_mode); + mode = drm_mode_duplicate(panel->drm, m); if (!mode) { DRM_DEV_ERROR(panel->drm->dev, "failed to add mode %ux%ux@%u\n", - default_mode.hdisplay, default_mode.vdisplay, - default_mode.vrefresh); + m->hdisplay, m->vdisplay, m->vrefresh); return -ENOMEM; } @@ -191,9 +423,11 @@ static int innolux_panel_get_modes(struct drm_panel *panel) drm_mode_probed_add(panel->connector, mode); - panel->connector->display_info.width_mm = 120; - panel->connector->display_info.height_mm = 160; - panel->connector->display_info.bpc = 8; + panel->connector->display_info.width_mm = + innolux->desc->size.width; + panel->connector->display_info.height_mm = + innolux->desc->size.height; + panel->connector->display_info.bpc = innolux->desc->bpc; return 1; } @@ -207,19 +441,42 @@ static const struct drm_panel_funcs innolux_panel_funcs = { }; static const struct of_device_id innolux_of_match[] = { - { .compatible = "innolux,p079zca", }, + { .compatible = "innolux,p079zca", + .data = &innolux_p079zca_panel_desc + }, + { .compatible = "innolux,p097pfg", + .data = &innolux_p097pfg_panel_desc + }, { } }; MODULE_DEVICE_TABLE(of, innolux_of_match); -static int innolux_panel_add(struct innolux_panel *innolux) +static int innolux_panel_add(struct mipi_dsi_device *dsi, + const struct panel_desc *desc) { - struct device *dev = &innolux->link->dev; - int err; + struct innolux_panel *innolux; + struct device *dev = &dsi->dev; + int err, i; + + innolux = devm_kzalloc(dev, sizeof(*innolux), GFP_KERNEL); + if (!innolux) + return -ENOMEM; + + innolux->desc = desc; + + innolux->supplies = devm_kcalloc(dev, desc->num_supplies, + sizeof(*innolux->supplies), + GFP_KERNEL); + if (!innolux->supplies) + return -ENOMEM; + + for (i = 0; i < desc->num_supplies; i++) + innolux->supplies[i].supply = desc->supply_names[i]; - innolux->supply = devm_regulator_get(dev, "power"); - if (IS_ERR(innolux->supply)) - return PTR_ERR(innolux->supply); + err = devm_regulator_bulk_get(dev, desc->num_supplies, + innolux->supplies); + if (err < 0) + return err; innolux->enable_gpio = devm_gpiod_get_optional(dev, "enable", GPIOD_OUT_HIGH); @@ -230,15 +487,21 @@ static int innolux_panel_add(struct innolux_panel *innolux) } innolux->backlight = devm_of_find_backlight(dev); - if (IS_ERR(innolux->backlight)) return PTR_ERR(innolux->backlight); drm_panel_init(&innolux->base); innolux->base.funcs = &innolux_panel_funcs; - innolux->base.dev = &innolux->link->dev; + innolux->base.dev = dev; + + err = drm_panel_add(&innolux->base); + if (err < 0) + return err; + + mipi_dsi_set_drvdata(dsi, innolux); + innolux->link = dsi; - return drm_panel_add(&innolux->base); + return 0; } static void innolux_panel_del(struct innolux_panel *innolux) @@ -249,28 +512,19 @@ static void innolux_panel_del(struct innolux_panel *innolux) static int innolux_panel_probe(struct mipi_dsi_device *dsi) { - struct innolux_panel *innolux; + const struct panel_desc *desc; int err; - dsi->lanes = 4; - dsi->format = MIPI_DSI_FMT_RGB888; - dsi->mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_SYNC_PULSE | - MIPI_DSI_MODE_LPM; - - innolux = devm_kzalloc(&dsi->dev, sizeof(*innolux), GFP_KERNEL); - if (!innolux) - return -ENOMEM; - - mipi_dsi_set_drvdata(dsi, innolux); + desc = of_device_get_match_data(&dsi->dev); + dsi->mode_flags = desc->flags; + dsi->format = desc->format; + dsi->lanes = desc->lanes; - innolux->link = dsi; - - err = innolux_panel_add(innolux); + err = innolux_panel_add(dsi, desc); if (err < 0) return err; - err = mipi_dsi_attach(dsi); - return err; + return mipi_dsi_attach(dsi); } static int innolux_panel_remove(struct mipi_dsi_device *dsi) @@ -317,5 +571,6 @@ static struct mipi_dsi_driver innolux_panel_driver = { module_mipi_dsi_driver(innolux_panel_driver); MODULE_AUTHOR("Chris Zhong <zyw@rock-chips.com>"); +MODULE_AUTHOR("Lin Huang <hl@rock-chips.com>"); MODULE_DESCRIPTION("Innolux P079ZCA panel driver"); MODULE_LICENSE("GPL v2"); diff --git a/drivers/gpu/drm/panel/panel-samsung-s6e8aa0.c b/drivers/gpu/drm/panel/panel-samsung-s6e8aa0.c index a188a3959f1a..6ad827b93ae1 100644 --- a/drivers/gpu/drm/panel/panel-samsung-s6e8aa0.c +++ b/drivers/gpu/drm/panel/panel-samsung-s6e8aa0.c @@ -823,7 +823,7 @@ static void s6e8aa0_read_mtp_id(struct s6e8aa0 *ctx) int ret, i; ret = s6e8aa0_dcs_read(ctx, 0xd1, id, ARRAY_SIZE(id)); - if (ret < ARRAY_SIZE(id) || id[0] == 0x00) { + if (ret < 0 || ret < ARRAY_SIZE(id) || id[0] == 0x00) { dev_err(ctx->dev, "read id failed\n"); ctx->error = -EIO; return; diff --git a/drivers/gpu/drm/panel/panel-simple.c b/drivers/gpu/drm/panel/panel-simple.c index ac6aaa174c0b..5b5d0a24e713 100644 --- a/drivers/gpu/drm/panel/panel-simple.c +++ b/drivers/gpu/drm/panel/panel-simple.c @@ -252,7 +252,7 @@ static int panel_simple_get_modes(struct drm_panel *panel) /* probe EDID if a DDC bus is available */ if (p->ddc) { struct edid *edid = drm_get_edid(panel->connector, p->ddc); - drm_mode_connector_update_edid_property(panel->connector, edid); + drm_connector_update_edid_property(panel->connector, edid); if (edid) { num += drm_add_edid_modes(panel->connector, edid); kfree(edid); @@ -772,6 +772,28 @@ static const struct panel_desc avic_tm070ddh03 = { }, }; +static const struct drm_display_mode boe_hv070wsa_mode = { + .clock = 40800, + .hdisplay = 1024, + .hsync_start = 1024 + 90, + .hsync_end = 1024 + 90 + 90, + .htotal = 1024 + 90 + 90 + 90, + .vdisplay = 600, + .vsync_start = 600 + 3, + .vsync_end = 600 + 3 + 4, + .vtotal = 600 + 3 + 4 + 3, + .vrefresh = 60, +}; + +static const struct panel_desc boe_hv070wsa = { + .modes = &boe_hv070wsa_mode, + .num_modes = 1, + .size = { + .width = 154, + .height = 90, + }, +}; + static const struct drm_display_mode boe_nv101wxmn51_modes[] = { { .clock = 71900, @@ -884,6 +906,61 @@ static const struct panel_desc chunghwa_claa101wb01 = { }, }; +static const struct drm_display_mode dataimage_scf0700c48ggu18_mode = { + .clock = 33260, + .hdisplay = 800, + .hsync_start = 800 + 40, + .hsync_end = 800 + 40 + 128, + .htotal = 800 + 40 + 128 + 88, + .vdisplay = 480, + .vsync_start = 480 + 10, + .vsync_end = 480 + 10 + 2, + .vtotal = 480 + 10 + 2 + 33, + .vrefresh = 60, + .flags = DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_NHSYNC, +}; + +static const struct panel_desc dataimage_scf0700c48ggu18 = { + .modes = &dataimage_scf0700c48ggu18_mode, + .num_modes = 1, + .bpc = 8, + .size = { + .width = 152, + .height = 91, + }, + .bus_format = MEDIA_BUS_FMT_RGB888_1X24, + .bus_flags = DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_PIXDATA_POSEDGE, +}; + +static const struct display_timing dlc_dlc0700yzg_1_timing = { + .pixelclock = { 45000000, 51200000, 57000000 }, + .hactive = { 1024, 1024, 1024 }, + .hfront_porch = { 100, 106, 113 }, + .hback_porch = { 100, 106, 113 }, + .hsync_len = { 100, 108, 114 }, + .vactive = { 600, 600, 600 }, + .vfront_porch = { 8, 11, 15 }, + .vback_porch = { 8, 11, 15 }, + .vsync_len = { 9, 13, 15 }, + .flags = DISPLAY_FLAGS_DE_HIGH, +}; + +static const struct panel_desc dlc_dlc0700yzg_1 = { + .timings = &dlc_dlc0700yzg_1_timing, + .num_timings = 1, + .bpc = 6, + .size = { + .width = 154, + .height = 86, + }, + .delay = { + .prepare = 30, + .enable = 200, + .disable = 200, + }, + .bus_format = MEDIA_BUS_FMT_RGB666_1X7X3_SPWG, +}; + static const struct drm_display_mode edt_et057090dhu_mode = { .clock = 25175, .hdisplay = 640, @@ -936,6 +1013,18 @@ static const struct panel_desc edt_etm0700g0dh6 = { .bus_flags = DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_PIXDATA_NEGEDGE, }; +static const struct panel_desc edt_etm0700g0bdh6 = { + .modes = &edt_etm0700g0dh6_mode, + .num_modes = 1, + .bpc = 6, + .size = { + .width = 152, + .height = 91, + }, + .bus_format = MEDIA_BUS_FMT_RGB666_1X18, + .bus_flags = DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_PIXDATA_POSEDGE, +}; + static const struct drm_display_mode foxlink_fl500wvr00_a0t_mode = { .clock = 32260, .hdisplay = 800, @@ -1113,6 +1202,36 @@ static const struct panel_desc innolux_at070tn92 = { .bus_format = MEDIA_BUS_FMT_RGB888_1X24, }; +static const struct display_timing innolux_g070y2_l01_timing = { + .pixelclock = { 28000000, 29500000, 32000000 }, + .hactive = { 800, 800, 800 }, + .hfront_porch = { 61, 91, 141 }, + .hback_porch = { 60, 90, 140 }, + .hsync_len = { 12, 12, 12 }, + .vactive = { 480, 480, 480 }, + .vfront_porch = { 4, 9, 30 }, + .vback_porch = { 4, 8, 28 }, + .vsync_len = { 2, 2, 2 }, + .flags = DISPLAY_FLAGS_DE_HIGH, +}; + +static const struct panel_desc innolux_g070y2_l01 = { + .timings = &innolux_g070y2_l01_timing, + .num_timings = 1, + .bpc = 6, + .size = { + .width = 152, + .height = 91, + }, + .delay = { + .prepare = 10, + .enable = 100, + .disable = 100, + .unprepare = 800, + }, + .bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_SPWG, +}; + static const struct display_timing innolux_g101ice_l01_timing = { .pixelclock = { 60400000, 71100000, 74700000 }, .hactive = { 1280, 1280, 1280 }, @@ -1562,6 +1681,33 @@ static const struct panel_desc netron_dy_e231732 = { .bus_format = MEDIA_BUS_FMT_RGB666_1X18, }; +static const struct drm_display_mode newhaven_nhd_43_480272ef_atxl_mode = { + .clock = 9000, + .hdisplay = 480, + .hsync_start = 480 + 2, + .hsync_end = 480 + 2 + 41, + .htotal = 480 + 2 + 41 + 2, + .vdisplay = 272, + .vsync_start = 272 + 2, + .vsync_end = 272 + 2 + 10, + .vtotal = 272 + 2 + 10 + 2, + .vrefresh = 60, + .flags = DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_NHSYNC, +}; + +static const struct panel_desc newhaven_nhd_43_480272ef_atxl = { + .modes = &newhaven_nhd_43_480272ef_atxl_mode, + .num_modes = 1, + .bpc = 8, + .size = { + .width = 95, + .height = 54, + }, + .bus_format = MEDIA_BUS_FMT_RGB888_1X24, + .bus_flags = DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_PIXDATA_POSEDGE | + DRM_BUS_FLAG_SYNC_POSEDGE, +}; + static const struct display_timing nlt_nl192108ac18_02d_timing = { .pixelclock = { 130000000, 148350000, 163000000 }, .hactive = { 1920, 1920, 1920 }, @@ -1747,6 +1893,36 @@ static const struct panel_desc qd43003c0_40 = { .bus_format = MEDIA_BUS_FMT_RGB888_1X24, }; +static const struct display_timing rocktech_rk070er9427_timing = { + .pixelclock = { 26400000, 33300000, 46800000 }, + .hactive = { 800, 800, 800 }, + .hfront_porch = { 16, 210, 354 }, + .hback_porch = { 46, 46, 46 }, + .hsync_len = { 1, 1, 1 }, + .vactive = { 480, 480, 480 }, + .vfront_porch = { 7, 22, 147 }, + .vback_porch = { 23, 23, 23 }, + .vsync_len = { 1, 1, 1 }, + .flags = DISPLAY_FLAGS_DE_HIGH, +}; + +static const struct panel_desc rocktech_rk070er9427 = { + .timings = &rocktech_rk070er9427_timing, + .num_timings = 1, + .bpc = 6, + .size = { + .width = 154, + .height = 86, + }, + .delay = { + .prepare = 41, + .enable = 50, + .unprepare = 41, + .disable = 50, + }, + .bus_format = MEDIA_BUS_FMT_RGB666_1X18, +}; + static const struct drm_display_mode samsung_lsn122dl01_c01_mode = { .clock = 271560, .hdisplay = 2560, @@ -1815,6 +1991,30 @@ static const struct panel_desc samsung_ltn140at29_301 = { }, }; +static const struct drm_display_mode sharp_lq035q7db03_mode = { + .clock = 5500, + .hdisplay = 240, + .hsync_start = 240 + 16, + .hsync_end = 240 + 16 + 7, + .htotal = 240 + 16 + 7 + 5, + .vdisplay = 320, + .vsync_start = 320 + 9, + .vsync_end = 320 + 9 + 1, + .vtotal = 320 + 9 + 1 + 7, + .vrefresh = 60, +}; + +static const struct panel_desc sharp_lq035q7db03 = { + .modes = &sharp_lq035q7db03_mode, + .num_modes = 1, + .bpc = 6, + .size = { + .width = 54, + .height = 72, + }, + .bus_format = MEDIA_BUS_FMT_RGB666_1X18, +}; + static const struct display_timing sharp_lq101k1ly04_timing = { .pixelclock = { 60000000, 65000000, 80000000 }, .hactive = { 1280, 1280, 1280 }, @@ -2167,6 +2367,9 @@ static const struct of_device_id platform_of_match[] = { .compatible = "avic,tm070ddh03", .data = &avic_tm070ddh03, }, { + .compatible = "boe,hv070wsa-100", + .data = &boe_hv070wsa + }, { .compatible = "boe,nv101wxmn51", .data = &boe_nv101wxmn51, }, { @@ -2179,6 +2382,12 @@ static const struct of_device_id platform_of_match[] = { .compatible = "chunghwa,claa101wb01", .data = &chunghwa_claa101wb01 }, { + .compatible = "dataimage,scf0700c48ggu18", + .data = &dataimage_scf0700c48ggu18, + }, { + .compatible = "dlc,dlc0700yzg-1", + .data = &dlc_dlc0700yzg_1, + }, { .compatible = "edt,et057090dhu", .data = &edt_et057090dhu, }, { @@ -2188,6 +2397,12 @@ static const struct of_device_id platform_of_match[] = { .compatible = "edt,etm0700g0dh6", .data = &edt_etm0700g0dh6, }, { + .compatible = "edt,etm0700g0bdh6", + .data = &edt_etm0700g0bdh6, + }, { + .compatible = "edt,etm0700g0edh6", + .data = &edt_etm0700g0bdh6, + }, { .compatible = "foxlink,fl500wvr00-a0t", .data = &foxlink_fl500wvr00_a0t, }, { @@ -2209,10 +2424,13 @@ static const struct of_device_id platform_of_match[] = { .compatible = "innolux,at070tn92", .data = &innolux_at070tn92, }, { - .compatible ="innolux,g101ice-l01", + .compatible = "innolux,g070y2-l01", + .data = &innolux_g070y2_l01, + }, { + .compatible = "innolux,g101ice-l01", .data = &innolux_g101ice_l01 }, { - .compatible ="innolux,g121i1-l01", + .compatible = "innolux,g121i1-l01", .data = &innolux_g121i1_l01 }, { .compatible = "innolux,g121x1-l03", @@ -2263,6 +2481,9 @@ static const struct of_device_id platform_of_match[] = { .compatible = "netron-dy,e231732", .data = &netron_dy_e231732, }, { + .compatible = "newhaven,nhd-4.3-480272ef-atxl", + .data = &newhaven_nhd_43_480272ef_atxl, + }, { .compatible = "nlt,nl192108ac18-02d", .data = &nlt_nl192108ac18_02d, }, { @@ -2284,6 +2505,9 @@ static const struct of_device_id platform_of_match[] = { .compatible = "qiaodian,qd43003c0-40", .data = &qd43003c0_40, }, { + .compatible = "rocktech,rk070er9427", + .data = &rocktech_rk070er9427, + }, { .compatible = "samsung,lsn122dl01-c01", .data = &samsung_lsn122dl01_c01, }, { @@ -2293,6 +2517,9 @@ static const struct of_device_id platform_of_match[] = { .compatible = "samsung,ltn140at29-301", .data = &samsung_ltn140at29_301, }, { + .compatible = "sharp,lq035q7db03", + .data = &sharp_lq035q7db03, + }, { .compatible = "sharp,lq101k1ly04", .data = &sharp_lq101k1ly04, }, { diff --git a/drivers/gpu/drm/pl111/pl111_display.c b/drivers/gpu/drm/pl111/pl111_display.c index a432eb7ad445..754f6b25f265 100644 --- a/drivers/gpu/drm/pl111/pl111_display.c +++ b/drivers/gpu/drm/pl111/pl111_display.c @@ -63,7 +63,7 @@ pl111_mode_valid(struct drm_crtc *crtc, * We use the pixelclock to also account for interlaced modes, the * resulting bandwidth is in bytes per second. */ - bw = mode->clock * 1000; /* In Hz */ + bw = mode->clock * 1000ULL; /* In Hz */ bw = bw * mode->hdisplay * mode->vdisplay * cpp; bw = div_u64(bw, mode->htotal * mode->vtotal); diff --git a/drivers/gpu/drm/pl111/pl111_drv.c b/drivers/gpu/drm/pl111/pl111_drv.c index 054b93689d94..47fe30223444 100644 --- a/drivers/gpu/drm/pl111/pl111_drv.c +++ b/drivers/gpu/drm/pl111/pl111_drv.c @@ -250,6 +250,8 @@ static struct drm_driver pl111_drm_driver = { .gem_prime_import_sg_table = pl111_gem_import_sg_table, .gem_prime_export = drm_gem_prime_export, .gem_prime_get_sg_table = drm_gem_cma_prime_get_sg_table, + .gem_prime_mmap = drm_gem_cma_prime_mmap, + .gem_prime_vmap = drm_gem_cma_prime_vmap, #if defined(CONFIG_DEBUG_FS) .debugfs_init = pl111_debugfs_init, @@ -302,13 +304,14 @@ static int pl111_amba_probe(struct amba_device *amba_dev, if (IS_ERR(priv->regs)) { dev_err(dev, "%s failed mmio\n", __func__); ret = PTR_ERR(priv->regs); - goto dev_unref; + goto dev_put; } /* This may override some variant settings */ ret = pl111_versatile_init(dev, priv); if (ret) - goto dev_unref; + goto dev_put; + pl111_nomadik_init(dev); /* turn off interrupts before requesting the irq */ @@ -323,16 +326,16 @@ static int pl111_amba_probe(struct amba_device *amba_dev, ret = pl111_modeset_init(drm); if (ret != 0) - goto dev_unref; + goto dev_put; ret = drm_dev_register(drm, 0); if (ret < 0) - goto dev_unref; + goto dev_put; return 0; -dev_unref: - drm_dev_unref(drm); +dev_put: + drm_dev_put(drm); of_reserved_mem_device_release(dev); return ret; @@ -349,7 +352,7 @@ static int pl111_amba_remove(struct amba_device *amba_dev) if (priv->panel) drm_panel_bridge_remove(priv->bridge); drm_mode_config_cleanup(drm); - drm_dev_unref(drm); + drm_dev_put(drm); of_reserved_mem_device_release(dev); return 0; diff --git a/drivers/gpu/drm/qxl/qxl_display.c b/drivers/gpu/drm/qxl/qxl_display.c index 768207fbbae3..0570c6826bff 100644 --- a/drivers/gpu/drm/qxl/qxl_display.c +++ b/drivers/gpu/drm/qxl/qxl_display.c @@ -1086,7 +1086,7 @@ static int qdev_output_init(struct drm_device *dev, int num_output) /* we get HPD via client monitors config */ connector->polled = DRM_CONNECTOR_POLL_HPD; encoder->possible_crtcs = 1 << num_output; - drm_mode_connector_attach_encoder(&qxl_output->base, + drm_connector_attach_encoder(&qxl_output->base, &qxl_output->enc); drm_encoder_helper_add(encoder, &qxl_enc_helper_funcs); drm_connector_helper_add(connector, &qxl_connector_helper_funcs); diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c index 2aea2bdff99b..414642e5b7a3 100644 --- a/drivers/gpu/drm/radeon/radeon_connectors.c +++ b/drivers/gpu/drm/radeon/radeon_connectors.c @@ -244,23 +244,15 @@ radeon_connector_update_scratch_regs(struct drm_connector *connector, enum drm_c { struct drm_device *dev = connector->dev; struct radeon_device *rdev = dev->dev_private; - struct drm_encoder *best_encoder = NULL; - struct drm_encoder *encoder = NULL; + struct drm_encoder *best_encoder; + struct drm_encoder *encoder; const struct drm_connector_helper_funcs *connector_funcs = connector->helper_private; bool connected; int i; best_encoder = connector_funcs->best_encoder(connector); - for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) { - if (connector->encoder_ids[i] == 0) - break; - - encoder = drm_encoder_find(connector->dev, NULL, - connector->encoder_ids[i]); - if (!encoder) - continue; - + drm_connector_for_each_possible_encoder(connector, encoder, i) { if ((encoder == best_encoder) && (status == connector_status_connected)) connected = true; else @@ -270,7 +262,6 @@ radeon_connector_update_scratch_regs(struct drm_connector *connector, enum drm_c radeon_atombios_connected_scratch_regs(connector, encoder, connected); else radeon_combios_connected_scratch_regs(connector, encoder, connected); - } } @@ -279,17 +270,11 @@ static struct drm_encoder *radeon_find_encoder(struct drm_connector *connector, struct drm_encoder *encoder; int i; - for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) { - if (connector->encoder_ids[i] == 0) - break; - - encoder = drm_encoder_find(connector->dev, NULL, connector->encoder_ids[i]); - if (!encoder) - continue; - + drm_connector_for_each_possible_encoder(connector, encoder, i) { if (encoder->encoder_type == encoder_type) return encoder; } + return NULL; } @@ -383,20 +368,23 @@ static int radeon_ddc_get_modes(struct drm_connector *connector) int ret; if (radeon_connector->edid) { - drm_mode_connector_update_edid_property(connector, radeon_connector->edid); + drm_connector_update_edid_property(connector, radeon_connector->edid); ret = drm_add_edid_modes(connector, radeon_connector->edid); return ret; } - drm_mode_connector_update_edid_property(connector, NULL); + drm_connector_update_edid_property(connector, NULL); return 0; } static struct drm_encoder *radeon_best_single_encoder(struct drm_connector *connector) { - int enc_id = connector->encoder_ids[0]; - /* pick the encoder ids */ - if (enc_id) - return drm_encoder_find(connector->dev, NULL, enc_id); + struct drm_encoder *encoder; + int i; + + /* pick the first one */ + drm_connector_for_each_possible_encoder(connector, encoder, i) + return encoder; + return NULL; } @@ -436,19 +424,19 @@ radeon_connector_analog_encoder_conflict_solve(struct drm_connector *connector, struct drm_device *dev = connector->dev; struct drm_connector *conflict; struct radeon_connector *radeon_conflict; - int i; list_for_each_entry(conflict, &dev->mode_config.connector_list, head) { + struct drm_encoder *enc; + int i; + if (conflict == connector) continue; radeon_conflict = to_radeon_connector(conflict); - for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) { - if (conflict->encoder_ids[i] == 0) - break; + drm_connector_for_each_possible_encoder(conflict, enc, i) { /* if the IDs match */ - if (conflict->encoder_ids[i] == encoder->base.id) { + if (enc == encoder) { if (conflict->status != connector_status_connected) continue; @@ -1256,7 +1244,7 @@ radeon_dvi_detect(struct drm_connector *connector, bool force) struct radeon_connector *radeon_connector = to_radeon_connector(connector); struct drm_encoder *encoder = NULL; const struct drm_encoder_helper_funcs *encoder_funcs; - int i, r; + int r; enum drm_connector_status ret = connector_status_disconnected; bool dret = false, broken_edid = false; @@ -1374,15 +1362,9 @@ radeon_dvi_detect(struct drm_connector *connector, bool force) /* find analog encoder */ if (radeon_connector->dac_load_detect) { - for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) { - if (connector->encoder_ids[i] == 0) - break; - - encoder = drm_encoder_find(connector->dev, NULL, - connector->encoder_ids[i]); - if (!encoder) - continue; + int i; + drm_connector_for_each_possible_encoder(connector, encoder, i) { if (encoder->encoder_type != DRM_MODE_ENCODER_DAC && encoder->encoder_type != DRM_MODE_ENCODER_TVDAC) continue; @@ -1458,18 +1440,11 @@ exit: /* okay need to be smart in here about which encoder to pick */ static struct drm_encoder *radeon_dvi_encoder(struct drm_connector *connector) { - int enc_id = connector->encoder_ids[0]; struct radeon_connector *radeon_connector = to_radeon_connector(connector); struct drm_encoder *encoder; int i; - for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) { - if (connector->encoder_ids[i] == 0) - break; - - encoder = drm_encoder_find(connector->dev, NULL, connector->encoder_ids[i]); - if (!encoder) - continue; + drm_connector_for_each_possible_encoder(connector, encoder, i) { if (radeon_connector->use_digital == true) { if (encoder->encoder_type == DRM_MODE_ENCODER_TMDS) return encoder; @@ -1484,8 +1459,9 @@ static struct drm_encoder *radeon_dvi_encoder(struct drm_connector *connector) /* then check use digitial */ /* pick the first one */ - if (enc_id) - return drm_encoder_find(connector->dev, NULL, enc_id); + drm_connector_for_each_possible_encoder(connector, encoder, i) + return encoder; + return NULL; } @@ -1628,14 +1604,7 @@ u16 radeon_connector_encoder_get_dp_bridge_encoder_id(struct drm_connector *conn struct radeon_encoder *radeon_encoder; int i; - for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) { - if (connector->encoder_ids[i] == 0) - break; - - encoder = drm_encoder_find(connector->dev, NULL, connector->encoder_ids[i]); - if (!encoder) - continue; - + drm_connector_for_each_possible_encoder(connector, encoder, i) { radeon_encoder = to_radeon_encoder(encoder); switch (radeon_encoder->encoder_id) { @@ -1657,14 +1626,7 @@ static bool radeon_connector_encoder_is_hbr2(struct drm_connector *connector) int i; bool found = false; - for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) { - if (connector->encoder_ids[i] == 0) - break; - - encoder = drm_encoder_find(connector->dev, NULL, connector->encoder_ids[i]); - if (!encoder) - continue; - + drm_connector_for_each_possible_encoder(connector, encoder, i) { radeon_encoder = to_radeon_encoder(encoder); if (radeon_encoder->caps & ATOM_ENCODER_CAP_RECORD_HBR2) found = true; diff --git a/drivers/gpu/drm/radeon/radeon_dp_mst.c b/drivers/gpu/drm/radeon/radeon_dp_mst.c index cd8a3ee16649..f920be236cc9 100644 --- a/drivers/gpu/drm/radeon/radeon_dp_mst.c +++ b/drivers/gpu/drm/radeon/radeon_dp_mst.c @@ -195,11 +195,11 @@ static int radeon_dp_mst_get_ddc_modes(struct drm_connector *connector) radeon_connector->edid = edid; DRM_DEBUG_KMS("edid retrieved %p\n", edid); if (radeon_connector->edid) { - drm_mode_connector_update_edid_property(&radeon_connector->base, radeon_connector->edid); + drm_connector_update_edid_property(&radeon_connector->base, radeon_connector->edid); ret = drm_add_edid_modes(&radeon_connector->base, radeon_connector->edid); return ret; } - drm_mode_connector_update_edid_property(&radeon_connector->base, NULL); + drm_connector_update_edid_property(&radeon_connector->base, NULL); return ret; } @@ -290,7 +290,7 @@ static struct drm_connector *radeon_dp_add_mst_connector(struct drm_dp_mst_topol drm_object_attach_property(&connector->base, dev->mode_config.path_property, 0); drm_object_attach_property(&connector->base, dev->mode_config.tile_property, 0); - drm_mode_connector_set_path_property(connector, pathprop); + drm_connector_set_path_property(connector, pathprop); return connector; } diff --git a/drivers/gpu/drm/radeon/radeon_encoders.c b/drivers/gpu/drm/radeon/radeon_encoders.c index c6ee80216cf4..c341fb2a5b56 100644 --- a/drivers/gpu/drm/radeon/radeon_encoders.c +++ b/drivers/gpu/drm/radeon/radeon_encoders.c @@ -211,7 +211,7 @@ radeon_link_encoder_connector(struct drm_device *dev) list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { radeon_encoder = to_radeon_encoder(encoder); if (radeon_encoder->devices & radeon_connector->devices) { - drm_mode_connector_attach_encoder(connector, encoder); + drm_connector_attach_encoder(connector, encoder); if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) radeon_encoder_add_backlight(radeon_encoder, connector); } diff --git a/drivers/gpu/drm/rcar-du/rcar_lvds.c b/drivers/gpu/drm/rcar-du/rcar_lvds.c index 155ad840f3c5..4c39de3f4f0f 100644 --- a/drivers/gpu/drm/rcar-du/rcar_lvds.c +++ b/drivers/gpu/drm/rcar-du/rcar_lvds.c @@ -353,7 +353,7 @@ static int rcar_lvds_attach(struct drm_bridge *bridge) drm_connector_helper_add(connector, &rcar_lvds_conn_helper_funcs); - ret = drm_mode_connector_attach_encoder(connector, encoder); + ret = drm_connector_attach_encoder(connector, encoder); if (ret < 0) return ret; @@ -434,8 +434,8 @@ static int rcar_lvds_parse_dt(struct rcar_lvds *lvds) ret = -EPROBE_DEFER; } else { lvds->panel = of_drm_find_panel(remote); - if (!lvds->panel) - ret = -EPROBE_DEFER; + if (IS_ERR(lvds->panel)) + ret = PTR_ERR(lvds->panel); } done: diff --git a/drivers/gpu/drm/rockchip/cdn-dp-core.c b/drivers/gpu/drm/rockchip/cdn-dp-core.c index c6fbdcd87c16..8ad0d773dc33 100644 --- a/drivers/gpu/drm/rockchip/cdn-dp-core.c +++ b/drivers/gpu/drm/rockchip/cdn-dp-core.c @@ -275,7 +275,7 @@ static int cdn_dp_connector_get_modes(struct drm_connector *connector) dp->sink_has_audio = drm_detect_monitor_audio(edid); ret = drm_add_edid_modes(connector, edid); if (ret) - drm_mode_connector_update_edid_property(connector, + drm_connector_update_edid_property(connector, edid); } mutex_unlock(&dp->lock); @@ -1062,7 +1062,7 @@ static int cdn_dp_bind(struct device *dev, struct device *master, void *data) drm_connector_helper_add(connector, &cdn_dp_connector_helper_funcs); - ret = drm_mode_connector_attach_encoder(connector, encoder); + ret = drm_connector_attach_encoder(connector, encoder); if (ret) { DRM_ERROR("failed to attach connector and encoder\n"); goto err_free_connector; diff --git a/drivers/gpu/drm/rockchip/dw-mipi-dsi.c b/drivers/gpu/drm/rockchip/dw-mipi-dsi.c index d53d5a09547f..662b6cb5d3f0 100644 --- a/drivers/gpu/drm/rockchip/dw-mipi-dsi.c +++ b/drivers/gpu/drm/rockchip/dw-mipi-dsi.c @@ -595,7 +595,7 @@ static int dw_mipi_dsi_host_attach(struct mipi_dsi_host *host, dsi->format = device->format; dsi->mode_flags = device->mode_flags; dsi->panel = of_drm_find_panel(device->dev.of_node); - if (dsi->panel) + if (!IS_ERR(dsi->panel)) return drm_panel_attach(dsi->panel, &dsi->connector); return -EINVAL; @@ -1129,7 +1129,7 @@ static int dw_mipi_dsi_register(struct drm_device *drm, &dw_mipi_dsi_atomic_connector_funcs, DRM_MODE_CONNECTOR_DSI); - drm_mode_connector_attach_encoder(connector, encoder); + drm_connector_attach_encoder(connector, encoder); return 0; } diff --git a/drivers/gpu/drm/rockchip/inno_hdmi.c b/drivers/gpu/drm/rockchip/inno_hdmi.c index 88d0774c97bd..1c02b3e61299 100644 --- a/drivers/gpu/drm/rockchip/inno_hdmi.c +++ b/drivers/gpu/drm/rockchip/inno_hdmi.c @@ -565,7 +565,7 @@ static int inno_hdmi_connector_get_modes(struct drm_connector *connector) if (edid) { hdmi->hdmi_data.sink_is_hdmi = drm_detect_hdmi_monitor(edid); hdmi->hdmi_data.sink_has_audio = drm_detect_monitor_audio(edid); - drm_mode_connector_update_edid_property(connector, edid); + drm_connector_update_edid_property(connector, edid); ret = drm_add_edid_modes(connector, edid); kfree(edid); } @@ -634,7 +634,7 @@ static int inno_hdmi_register(struct drm_device *drm, struct inno_hdmi *hdmi) drm_connector_init(drm, &hdmi->connector, &inno_hdmi_connector_funcs, DRM_MODE_CONNECTOR_HDMIA); - drm_mode_connector_attach_encoder(&hdmi->connector, encoder); + drm_connector_attach_encoder(&hdmi->connector, encoder); return 0; } diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c index effecbed2d11..1359e5c773e4 100644 --- a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c +++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c @@ -243,18 +243,6 @@ static enum vop_data_format vop_convert_format(uint32_t format) } } -static bool is_yuv_support(uint32_t format) -{ - switch (format) { - case DRM_FORMAT_NV12: - case DRM_FORMAT_NV16: - case DRM_FORMAT_NV24: - return true; - default: - return false; - } -} - static uint16_t scl_vop_cal_scale(enum scale_mode mode, uint32_t src, uint32_t dst, bool is_horizontal, int vsu_mode, int *vskiplines) @@ -298,7 +286,8 @@ static void scl_vop_cal_scl_fac(struct vop *vop, const struct vop_win_data *win, uint16_t cbcr_ver_scl_mode = SCALE_NONE; int hsub = drm_format_horz_chroma_subsampling(pixel_format); int vsub = drm_format_vert_chroma_subsampling(pixel_format); - bool is_yuv = is_yuv_support(pixel_format); + const struct drm_format_info *info; + bool is_yuv = false; uint16_t cbcr_src_w = src_w / hsub; uint16_t cbcr_src_h = src_h / vsub; uint16_t vsu_mode; @@ -306,6 +295,11 @@ static void scl_vop_cal_scl_fac(struct vop *vop, const struct vop_win_data *win, uint32_t val; int vskiplines; + info = drm_format_info(pixel_format); + + if (info->is_yuv) + is_yuv = true; + if (dst_w > 3840) { DRM_DEV_ERROR(vop->dev, "Maximum dst width (3840) exceeded\n"); return; @@ -680,7 +674,7 @@ static int vop_plane_atomic_check(struct drm_plane *plane, * Src.x1 can be odd when do clip, but yuv plane start point * need align with 2 pixel. */ - if (is_yuv_support(fb->format->format) && ((state->src.x1 >> 16) % 2)) { + if (fb->format->is_yuv && ((state->src.x1 >> 16) % 2)) { DRM_ERROR("Invalid Source: Yuv format not support odd xpos\n"); return -EINVAL; } @@ -767,7 +761,7 @@ static void vop_plane_atomic_update(struct drm_plane *plane, VOP_WIN_SET(vop, win, format, format); VOP_WIN_SET(vop, win, yrgb_vir, DIV_ROUND_UP(fb->pitches[0], 4)); VOP_WIN_SET(vop, win, yrgb_mst, dma_addr); - if (is_yuv_support(fb->format->format)) { + if (fb->format->is_yuv) { int hsub = drm_format_horz_chroma_subsampling(fb->format->format); int vsub = drm_format_vert_chroma_subsampling(fb->format->format); int bpp = fb->format->cpp[1]; diff --git a/drivers/gpu/drm/rockchip/rockchip_lvds.c b/drivers/gpu/drm/rockchip/rockchip_lvds.c index b3f6f524b402..456bd9f13bae 100644 --- a/drivers/gpu/drm/rockchip/rockchip_lvds.c +++ b/drivers/gpu/drm/rockchip/rockchip_lvds.c @@ -434,7 +434,7 @@ static int rockchip_lvds_bind(struct device *dev, struct device *master, drm_connector_helper_add(connector, &rockchip_lvds_connector_helper_funcs); - ret = drm_mode_connector_attach_encoder(connector, encoder); + ret = drm_connector_attach_encoder(connector, encoder); if (ret < 0) { DRM_DEV_ERROR(drm_dev->dev, "failed to attach encoder: %d\n", ret); diff --git a/drivers/gpu/drm/shmobile/shmob_drm_crtc.c b/drivers/gpu/drm/shmobile/shmob_drm_crtc.c index 40df8887fc17..fc66167b0641 100644 --- a/drivers/gpu/drm/shmobile/shmob_drm_crtc.c +++ b/drivers/gpu/drm/shmobile/shmob_drm_crtc.c @@ -675,7 +675,7 @@ int shmob_drm_connector_create(struct shmob_drm_device *sdev, if (ret < 0) goto err_cleanup; - ret = drm_mode_connector_attach_encoder(connector, encoder); + ret = drm_connector_attach_encoder(connector, encoder); if (ret < 0) goto err_backlight; diff --git a/drivers/gpu/drm/sti/sti_drv.c b/drivers/gpu/drm/sti/sti_drv.c index 90c46b49c931..832fc43960ee 100644 --- a/drivers/gpu/drm/sti/sti_drv.c +++ b/drivers/gpu/drm/sti/sti_drv.c @@ -224,7 +224,7 @@ static int sti_bind(struct device *dev) ret = sti_init(ddev); if (ret) - goto err_drm_dev_unref; + goto err_drm_dev_put; ret = component_bind_all(ddev->dev, ddev); if (ret) @@ -248,8 +248,8 @@ err_register: drm_mode_config_cleanup(ddev); err_cleanup: sti_cleanup(ddev); -err_drm_dev_unref: - drm_dev_unref(ddev); +err_drm_dev_put: + drm_dev_put(ddev); return ret; } @@ -259,7 +259,7 @@ static void sti_unbind(struct device *dev) drm_dev_unregister(ddev); sti_cleanup(ddev); - drm_dev_unref(ddev); + drm_dev_put(ddev); } static const struct component_master_ops sti_ops = { diff --git a/drivers/gpu/drm/sti/sti_dvo.c b/drivers/gpu/drm/sti/sti_dvo.c index a5979cd25cc7..b08376b7611b 100644 --- a/drivers/gpu/drm/sti/sti_dvo.c +++ b/drivers/gpu/drm/sti/sti_dvo.c @@ -387,7 +387,9 @@ sti_dvo_connector_detect(struct drm_connector *connector, bool force) if (!dvo->panel) { dvo->panel = of_drm_find_panel(dvo->panel_node); - if (dvo->panel) + if (IS_ERR(dvo->panel)) + dvo->panel = NULL; + else drm_panel_attach(dvo->panel, connector); } @@ -484,7 +486,7 @@ static int sti_dvo_bind(struct device *dev, struct device *master, void *data) drm_connector_helper_add(drm_connector, &sti_dvo_connector_helper_funcs); - err = drm_mode_connector_attach_encoder(drm_connector, encoder); + err = drm_connector_attach_encoder(drm_connector, encoder); if (err) { DRM_ERROR("Failed to attach a connector to a encoder\n"); goto err_sysfs; diff --git a/drivers/gpu/drm/sti/sti_hda.c b/drivers/gpu/drm/sti/sti_hda.c index 67bbdb49fffc..49438337f70d 100644 --- a/drivers/gpu/drm/sti/sti_hda.c +++ b/drivers/gpu/drm/sti/sti_hda.c @@ -709,7 +709,7 @@ static int sti_hda_bind(struct device *dev, struct device *master, void *data) drm_connector_helper_add(drm_connector, &sti_hda_connector_helper_funcs); - err = drm_mode_connector_attach_encoder(drm_connector, encoder); + err = drm_connector_attach_encoder(drm_connector, encoder); if (err) { DRM_ERROR("Failed to attach a connector to a encoder\n"); goto err_sysfs; diff --git a/drivers/gpu/drm/sti/sti_hdmi.c b/drivers/gpu/drm/sti/sti_hdmi.c index 58f431102512..34cdc4644435 100644 --- a/drivers/gpu/drm/sti/sti_hdmi.c +++ b/drivers/gpu/drm/sti/sti_hdmi.c @@ -977,7 +977,7 @@ static int sti_hdmi_connector_get_modes(struct drm_connector *connector) cec_notifier_set_phys_addr_from_edid(hdmi->notifier, edid); count = drm_add_edid_modes(connector, edid); - drm_mode_connector_update_edid_property(connector, edid); + drm_connector_update_edid_property(connector, edid); kfree(edid); return count; @@ -1290,7 +1290,7 @@ static int sti_hdmi_bind(struct device *dev, struct device *master, void *data) hdmi->drm_connector = drm_connector; - err = drm_mode_connector_attach_encoder(drm_connector, encoder); + err = drm_connector_attach_encoder(drm_connector, encoder); if (err) { DRM_ERROR("Failed to attach a connector to a encoder\n"); goto err_sysfs; diff --git a/drivers/gpu/drm/stm/drv.c b/drivers/gpu/drm/stm/drv.c index 8698e08313e1..f2021b23554d 100644 --- a/drivers/gpu/drm/stm/drv.c +++ b/drivers/gpu/drm/stm/drv.c @@ -148,16 +148,16 @@ static int stm_drm_platform_probe(struct platform_device *pdev) ret = drv_load(ddev); if (ret) - goto err_unref; + goto err_put; ret = drm_dev_register(ddev, 0); if (ret) - goto err_unref; + goto err_put; return 0; -err_unref: - drm_dev_unref(ddev); +err_put: + drm_dev_put(ddev); return ret; } @@ -170,7 +170,7 @@ static int stm_drm_platform_remove(struct platform_device *pdev) drm_dev_unregister(ddev); drv_unload(ddev); - drm_dev_unref(ddev); + drm_dev_put(ddev); return 0; } diff --git a/drivers/gpu/drm/stm/ltdc.c b/drivers/gpu/drm/stm/ltdc.c index d997a6014d6c..808d9fb627e9 100644 --- a/drivers/gpu/drm/stm/ltdc.c +++ b/drivers/gpu/drm/stm/ltdc.c @@ -457,6 +457,14 @@ ltdc_crtc_mode_valid(struct drm_crtc *crtc, int target_max = target + CLK_TOLERANCE_HZ; int result; + result = clk_round_rate(ldev->pixel_clk, target); + + DRM_DEBUG_DRIVER("clk rate target %d, available %d\n", target, result); + + /* Filter modes according to the max frequency supported by the pads */ + if (result > ldev->caps.pad_max_freq_hz) + return MODE_CLOCK_HIGH; + /* * Accept all "preferred" modes: * - this is important for panels because panel clock tolerances are @@ -468,10 +476,6 @@ ltdc_crtc_mode_valid(struct drm_crtc *crtc, if (mode->type & DRM_MODE_TYPE_PREFERRED) return MODE_OK; - result = clk_round_rate(ldev->pixel_clk, target); - - DRM_DEBUG_DRIVER("clk rate target %d, available %d\n", target, result); - /* * Filter modes according to the clock value, particularly useful for * hdmi modes that require precise pixel clocks. @@ -991,11 +995,15 @@ static int ltdc_get_caps(struct drm_device *ddev) * does not work on 2nd layer. */ ldev->caps.non_alpha_only_l1 = true; + ldev->caps.pad_max_freq_hz = 90000000; + if (ldev->caps.hw_version == HWVER_10200) + ldev->caps.pad_max_freq_hz = 65000000; break; case HWVER_20101: ldev->caps.reg_ofs = REG_OFS_4; ldev->caps.pix_fmt_hw = ltdc_pix_fmt_a1; ldev->caps.non_alpha_only_l1 = false; + ldev->caps.pad_max_freq_hz = 150000000; break; default: return -ENODEV; @@ -1074,8 +1082,11 @@ int ltdc_load(struct drm_device *ddev) } } - if (!IS_ERR(rstc)) + if (!IS_ERR(rstc)) { + reset_control_assert(rstc); + usleep_range(10, 20); reset_control_deassert(rstc); + } /* Disable interrupts */ reg_clear(ldev->regs, LTDC_IER, diff --git a/drivers/gpu/drm/stm/ltdc.h b/drivers/gpu/drm/stm/ltdc.h index 1e16d6afb0d2..d5afb8960867 100644 --- a/drivers/gpu/drm/stm/ltdc.h +++ b/drivers/gpu/drm/stm/ltdc.h @@ -18,6 +18,7 @@ struct ltdc_caps { u32 bus_width; /* bus width (32 or 64 bits) */ const u32 *pix_fmt_hw; /* supported pixel formats */ bool non_alpha_only_l1; /* non-native no-alpha formats on layer 1 */ + int pad_max_freq_hz; /* max frequency supported by pad */ }; #define LTDC_MAX_LAYER 4 diff --git a/drivers/gpu/drm/sun4i/Kconfig b/drivers/gpu/drm/sun4i/Kconfig index 156a865c3e6d..c2c042287c19 100644 --- a/drivers/gpu/drm/sun4i/Kconfig +++ b/drivers/gpu/drm/sun4i/Kconfig @@ -68,4 +68,11 @@ config DRM_SUN8I_MIXER graphics mixture and feed graphics to TCON, If M is selected the module will be called sun8i-mixer. +config DRM_SUN8I_TCON_TOP + tristate + default DRM_SUN4I if DRM_SUN8I_MIXER!=n + help + TCON TOP is responsible for configuring display pipeline for + HTMI, TVE and LCD. + endif diff --git a/drivers/gpu/drm/sun4i/Makefile b/drivers/gpu/drm/sun4i/Makefile index 14f420f1d4ae..b04ea0f3da75 100644 --- a/drivers/gpu/drm/sun4i/Makefile +++ b/drivers/gpu/drm/sun4i/Makefile @@ -36,4 +36,5 @@ obj-$(CONFIG_DRM_SUN4I_BACKEND) += sun4i-backend.o sun4i-frontend.o obj-$(CONFIG_DRM_SUN4I_HDMI) += sun4i-drm-hdmi.o obj-$(CONFIG_DRM_SUN6I_DSI) += sun6i-dsi.o obj-$(CONFIG_DRM_SUN8I_DW_HDMI) += sun8i-drm-hdmi.o -obj-$(CONFIG_DRM_SUN8I_MIXER) += sun8i-mixer.o sun8i_tcon_top.o +obj-$(CONFIG_DRM_SUN8I_MIXER) += sun8i-mixer.o +obj-$(CONFIG_DRM_SUN8I_TCON_TOP) += sun8i_tcon_top.o diff --git a/drivers/gpu/drm/sun4i/sun4i_backend.c b/drivers/gpu/drm/sun4i/sun4i_backend.c index de0a76dfa1a2..d7950b52a1fd 100644 --- a/drivers/gpu/drm/sun4i/sun4i_backend.c +++ b/drivers/gpu/drm/sun4i/sun4i_backend.c @@ -86,12 +86,6 @@ static inline bool sun4i_backend_format_is_packed_yuv422(uint32_t format) } } -static inline bool sun4i_backend_format_is_yuv(uint32_t format) -{ - return sun4i_backend_format_is_planar_yuv(format) || - sun4i_backend_format_is_packed_yuv422(format); -} - static void sun4i_backend_apply_color_correction(struct sunxi_engine *engine) { int i; @@ -304,7 +298,7 @@ int sun4i_backend_update_layer_formats(struct sun4i_backend *backend, SUN4I_BACKEND_ATTCTL_REG0_LAY_GLBALPHA_EN, val); - if (sun4i_backend_format_is_yuv(fb->format->format)) + if (fb->format->is_yuv) return sun4i_backend_update_yuv_format(backend, layer, plane); ret = sun4i_backend_drm_format_to_layer(fb->format->format, &val); @@ -384,7 +378,7 @@ int sun4i_backend_update_layer_buffer(struct sun4i_backend *backend, */ paddr -= PHYS_OFFSET; - if (sun4i_backend_format_is_yuv(fb->format->format)) + if (fb->format->is_yuv) return sun4i_backend_update_yuv_buffer(backend, fb, paddr); /* Write the 32 lower bits of the address (in bits) */ @@ -502,7 +496,7 @@ static int sun4i_backend_atomic_check(struct sunxi_engine *engine, if (fb->format->has_alpha || (plane_state->alpha != DRM_BLEND_ALPHA_OPAQUE)) num_alpha_planes++; - if (sun4i_backend_format_is_yuv(fb->format->format)) { + if (fb->format->is_yuv) { DRM_DEBUG_DRIVER("Plane FB format is YUV\n"); num_yuv_planes++; } diff --git a/drivers/gpu/drm/sun4i/sun4i_drv.c b/drivers/gpu/drm/sun4i/sun4i_drv.c index 6ddf4eaccb40..dd19d674055c 100644 --- a/drivers/gpu/drm/sun4i/sun4i_drv.c +++ b/drivers/gpu/drm/sun4i/sun4i_drv.c @@ -144,7 +144,7 @@ cleanup_mode_config: drm_mode_config_cleanup(drm); of_reserved_mem_device_release(dev); free_drm: - drm_dev_unref(drm); + drm_dev_put(drm); return ret; } @@ -157,7 +157,7 @@ static void sun4i_drv_unbind(struct device *dev) sun4i_framebuffer_free(drm); drm_mode_config_cleanup(drm); of_reserved_mem_device_release(dev); - drm_dev_unref(drm); + drm_dev_put(drm); } static const struct component_master_ops sun4i_drv_master_ops = { @@ -216,7 +216,8 @@ static bool sun4i_drv_node_is_tcon_with_ch0(struct device_node *node) static bool sun4i_drv_node_is_tcon_top(struct device_node *node) { - return !!of_match_node(sun8i_tcon_top_of_table, node); + return IS_ENABLED(CONFIG_DRM_SUN8I_TCON_TOP) && + !!of_match_node(sun8i_tcon_top_of_table, node); } static int compare_of(struct device *dev, void *data) @@ -417,6 +418,7 @@ static const struct of_device_id sun4i_drv_of_table[] = { { .compatible = "allwinner,sun8i-a33-display-engine" }, { .compatible = "allwinner,sun8i-a83t-display-engine" }, { .compatible = "allwinner,sun8i-h3-display-engine" }, + { .compatible = "allwinner,sun8i-r40-display-engine" }, { .compatible = "allwinner,sun8i-v3s-display-engine" }, { .compatible = "allwinner,sun9i-a80-display-engine" }, { } diff --git a/drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c b/drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c index fa4bcd092eaf..061d2e0d9011 100644 --- a/drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c +++ b/drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c @@ -220,7 +220,7 @@ static int sun4i_hdmi_get_modes(struct drm_connector *connector) DRM_DEBUG_DRIVER("Monitor is %s monitor\n", hdmi->hdmi_monitor ? "an HDMI" : "a DVI"); - drm_mode_connector_update_edid_property(connector, edid); + drm_connector_update_edid_property(connector, edid); cec_s_phys_addr_from_edid(hdmi->cec_adap, edid); ret = drm_add_edid_modes(connector, edid); kfree(edid); @@ -623,7 +623,7 @@ static int sun4i_hdmi_bind(struct device *dev, struct device *master, ret = cec_register_adapter(hdmi->cec_adap, dev); if (ret < 0) goto err_cleanup_connector; - drm_mode_connector_attach_encoder(&hdmi->connector, &hdmi->encoder); + drm_connector_attach_encoder(&hdmi->connector, &hdmi->encoder); return 0; diff --git a/drivers/gpu/drm/sun4i/sun4i_lvds.c b/drivers/gpu/drm/sun4i/sun4i_lvds.c index a69fe2e1f9d1..af7dcb6da351 100644 --- a/drivers/gpu/drm/sun4i/sun4i_lvds.c +++ b/drivers/gpu/drm/sun4i/sun4i_lvds.c @@ -149,7 +149,7 @@ int sun4i_lvds_init(struct drm_device *drm, struct sun4i_tcon *tcon) goto err_cleanup_connector; } - drm_mode_connector_attach_encoder(&lvds->connector, + drm_connector_attach_encoder(&lvds->connector, &lvds->encoder); ret = drm_panel_attach(tcon->panel, &lvds->connector); diff --git a/drivers/gpu/drm/sun4i/sun4i_rgb.c b/drivers/gpu/drm/sun4i/sun4i_rgb.c index 96d21b07f8fc..bf068da6b12e 100644 --- a/drivers/gpu/drm/sun4i/sun4i_rgb.c +++ b/drivers/gpu/drm/sun4i/sun4i_rgb.c @@ -215,7 +215,7 @@ int sun4i_rgb_init(struct drm_device *drm, struct sun4i_tcon *tcon) goto err_cleanup_connector; } - drm_mode_connector_attach_encoder(&rgb->connector, + drm_connector_attach_encoder(&rgb->connector, &rgb->encoder); ret = drm_panel_attach(tcon->panel, &rgb->connector); diff --git a/drivers/gpu/drm/sun4i/sun4i_tcon.c b/drivers/gpu/drm/sun4i/sun4i_tcon.c index aacc841d3dc6..3fb084f802e2 100644 --- a/drivers/gpu/drm/sun4i/sun4i_tcon.c +++ b/drivers/gpu/drm/sun4i/sun4i_tcon.c @@ -811,6 +811,7 @@ sun4i_tcon_find_engine_traverse(struct sun4i_drv *drv, * remote output id. If this for some reason can't be done, 0 * is used as input port id. */ + of_node_put(port); port = of_graph_get_remote_port(ep); if (!of_property_read_u32(port, "reg", ®) && reg > 0) reg -= 1; diff --git a/drivers/gpu/drm/sun4i/sun4i_tv.c b/drivers/gpu/drm/sun4i/sun4i_tv.c index b070d522ed8d..1a838d208211 100644 --- a/drivers/gpu/drm/sun4i/sun4i_tv.c +++ b/drivers/gpu/drm/sun4i/sun4i_tv.c @@ -623,7 +623,7 @@ static int sun4i_tv_bind(struct device *dev, struct device *master, } tv->connector.interlace_allowed = true; - drm_mode_connector_attach_encoder(&tv->connector, &tv->encoder); + drm_connector_attach_encoder(&tv->connector, &tv->encoder); return 0; diff --git a/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c b/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c index d4e7d16a2514..e3b34a345546 100644 --- a/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c +++ b/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c @@ -13,6 +13,7 @@ #include <linux/pm_runtime.h> #include <linux/regmap.h> #include <linux/reset.h> +#include <linux/slab.h> #include <linux/phy/phy.h> @@ -247,10 +248,8 @@ static u16 sun6i_dsi_crc_compute(u8 const *buffer, size_t len) return crc_ccitt(0xffff, buffer, len); } -static u16 sun6i_dsi_crc_repeat_compute(u8 pd, size_t len) +static u16 sun6i_dsi_crc_repeat(u8 pd, u8 *buffer, size_t len) { - u8 buffer[len]; - memset(buffer, pd, len); return sun6i_dsi_crc_compute(buffer, len); @@ -274,11 +273,11 @@ static u32 sun6i_dsi_build_blk0_pkt(u8 vc, u16 wc) wc & 0xff, wc >> 8); } -static u32 sun6i_dsi_build_blk1_pkt(u16 pd, size_t len) +static u32 sun6i_dsi_build_blk1_pkt(u16 pd, u8 *buffer, size_t len) { u32 val = SUN6I_DSI_BLK_PD(pd); - return val | SUN6I_DSI_BLK_PF(sun6i_dsi_crc_repeat_compute(pd, len)); + return val | SUN6I_DSI_BLK_PF(sun6i_dsi_crc_repeat(pd, buffer, len)); } static void sun6i_dsi_inst_abort(struct sun6i_dsi *dsi) @@ -452,6 +451,54 @@ static void sun6i_dsi_setup_timings(struct sun6i_dsi *dsi, struct mipi_dsi_device *device = dsi->device; unsigned int Bpp = mipi_dsi_pixel_format_to_bpp(device->format) / 8; u16 hbp, hfp, hsa, hblk, vblk; + size_t bytes; + u8 *buffer; + + /* Do all timing calculations up front to allocate buffer space */ + + /* + * A sync period is composed of a blanking packet (4 bytes + + * payload + 2 bytes) and a sync event packet (4 bytes). Its + * minimal size is therefore 10 bytes + */ +#define HSA_PACKET_OVERHEAD 10 + hsa = max((unsigned int)HSA_PACKET_OVERHEAD, + (mode->hsync_end - mode->hsync_start) * Bpp - HSA_PACKET_OVERHEAD); + + /* + * The backporch is set using a blanking packet (4 bytes + + * payload + 2 bytes). Its minimal size is therefore 6 bytes + */ +#define HBP_PACKET_OVERHEAD 6 + hbp = max((unsigned int)HBP_PACKET_OVERHEAD, + (mode->hsync_start - mode->hdisplay) * Bpp - HBP_PACKET_OVERHEAD); + + /* + * The frontporch is set using a blanking packet (4 bytes + + * payload + 2 bytes). Its minimal size is therefore 6 bytes + */ +#define HFP_PACKET_OVERHEAD 6 + hfp = max((unsigned int)HFP_PACKET_OVERHEAD, + (mode->htotal - mode->hsync_end) * Bpp - HFP_PACKET_OVERHEAD); + + /* + * hblk seems to be the line + porches length. + */ + hblk = mode->htotal * Bpp - hsa; + + /* + * And I'm not entirely sure what vblk is about. The driver in + * Allwinner BSP is using a rather convoluted calculation + * there only for 4 lanes. However, using 0 (the !4 lanes + * case) even with a 4 lanes screen seems to work... + */ + vblk = 0; + + /* How many bytes do we need to send all payloads? */ + bytes = max_t(size_t, max(max(hfp, hblk), max(hsa, hbp)), vblk); + buffer = kmalloc(bytes, GFP_KERNEL); + if (WARN_ON(!buffer)) + return; regmap_write(dsi->regs, SUN6I_DSI_BASIC_CTL_REG, 0); @@ -485,63 +532,37 @@ static void sun6i_dsi_setup_timings(struct sun6i_dsi *dsi, SUN6I_DSI_BASIC_SIZE1_VACT(mode->vdisplay) | SUN6I_DSI_BASIC_SIZE1_VT(mode->vtotal)); - /* - * A sync period is composed of a blanking packet (4 bytes + - * payload + 2 bytes) and a sync event packet (4 bytes). Its - * minimal size is therefore 10 bytes - */ -#define HSA_PACKET_OVERHEAD 10 - hsa = max((unsigned int)HSA_PACKET_OVERHEAD, - (mode->hsync_end - mode->hsync_start) * Bpp - HSA_PACKET_OVERHEAD); + /* sync */ regmap_write(dsi->regs, SUN6I_DSI_BLK_HSA0_REG, sun6i_dsi_build_blk0_pkt(device->channel, hsa)); regmap_write(dsi->regs, SUN6I_DSI_BLK_HSA1_REG, - sun6i_dsi_build_blk1_pkt(0, hsa)); + sun6i_dsi_build_blk1_pkt(0, buffer, hsa)); - /* - * The backporch is set using a blanking packet (4 bytes + - * payload + 2 bytes). Its minimal size is therefore 6 bytes - */ -#define HBP_PACKET_OVERHEAD 6 - hbp = max((unsigned int)HBP_PACKET_OVERHEAD, - (mode->hsync_start - mode->hdisplay) * Bpp - HBP_PACKET_OVERHEAD); + /* backporch */ regmap_write(dsi->regs, SUN6I_DSI_BLK_HBP0_REG, sun6i_dsi_build_blk0_pkt(device->channel, hbp)); regmap_write(dsi->regs, SUN6I_DSI_BLK_HBP1_REG, - sun6i_dsi_build_blk1_pkt(0, hbp)); + sun6i_dsi_build_blk1_pkt(0, buffer, hbp)); - /* - * The frontporch is set using a blanking packet (4 bytes + - * payload + 2 bytes). Its minimal size is therefore 6 bytes - */ -#define HFP_PACKET_OVERHEAD 6 - hfp = max((unsigned int)HFP_PACKET_OVERHEAD, - (mode->htotal - mode->hsync_end) * Bpp - HFP_PACKET_OVERHEAD); + /* frontporch */ regmap_write(dsi->regs, SUN6I_DSI_BLK_HFP0_REG, sun6i_dsi_build_blk0_pkt(device->channel, hfp)); regmap_write(dsi->regs, SUN6I_DSI_BLK_HFP1_REG, - sun6i_dsi_build_blk1_pkt(0, hfp)); + sun6i_dsi_build_blk1_pkt(0, buffer, hfp)); - /* - * hblk seems to be the line + porches length. - */ - hblk = mode->htotal * Bpp - hsa; + /* hblk */ regmap_write(dsi->regs, SUN6I_DSI_BLK_HBLK0_REG, sun6i_dsi_build_blk0_pkt(device->channel, hblk)); regmap_write(dsi->regs, SUN6I_DSI_BLK_HBLK1_REG, - sun6i_dsi_build_blk1_pkt(0, hblk)); + sun6i_dsi_build_blk1_pkt(0, buffer, hblk)); - /* - * And I'm not entirely sure what vblk is about. The driver in - * Allwinner BSP is using a rather convoluted calculation - * there only for 4 lanes. However, using 0 (the !4 lanes - * case) even with a 4 lanes screen seems to work... - */ - vblk = 0; + /* vblk */ regmap_write(dsi->regs, SUN6I_DSI_BLK_VBLK0_REG, sun6i_dsi_build_blk0_pkt(device->channel, vblk)); regmap_write(dsi->regs, SUN6I_DSI_BLK_VBLK1_REG, - sun6i_dsi_build_blk1_pkt(0, vblk)); + sun6i_dsi_build_blk1_pkt(0, buffer, vblk)); + + kfree(buffer); } static int sun6i_dsi_start(struct sun6i_dsi *dsi, @@ -812,8 +833,8 @@ static int sun6i_dsi_attach(struct mipi_dsi_host *host, dsi->device = device; dsi->panel = of_drm_find_panel(device->dev.of_node); - if (!dsi->panel) - return -EINVAL; + if (IS_ERR(dsi->panel)) + return PTR_ERR(dsi->panel); dev_info(host->dev, "Attached device %s\n", device->name); @@ -920,7 +941,7 @@ static int sun6i_dsi_bind(struct device *dev, struct device *master, goto err_cleanup_connector; } - drm_mode_connector_attach_encoder(&dsi->connector, &dsi->encoder); + drm_connector_attach_encoder(&dsi->connector, &dsi->encoder); drm_panel_attach(dsi->panel, &dsi->connector); return 0; diff --git a/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.c b/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.c index 3459b9ec56c9..31875b636434 100644 --- a/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.c +++ b/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.c @@ -44,7 +44,8 @@ sun8i_dw_hdmi_mode_valid(struct drm_connector *connector, static bool sun8i_dw_hdmi_node_is_tcon_top(struct device_node *node) { - return !!of_match_node(sun8i_tcon_top_of_table, node); + return IS_ENABLED(CONFIG_DRM_SUN8I_TCON_TOP) && + !!of_match_node(sun8i_tcon_top_of_table, node); } static u32 sun8i_dw_hdmi_find_possible_crtcs(struct drm_device *drm, @@ -53,22 +54,14 @@ static u32 sun8i_dw_hdmi_find_possible_crtcs(struct drm_device *drm, struct device_node *port, *ep, *remote, *remote_port; u32 crtcs = 0; - port = of_graph_get_port_by_id(node, 0); - if (!port) - return 0; - - ep = of_get_next_available_child(port, NULL); - if (!ep) - return 0; - - remote = of_graph_get_remote_port_parent(ep); + remote = of_graph_get_remote_node(node, 0, -1); if (!remote) return 0; if (sun8i_dw_hdmi_node_is_tcon_top(remote)) { port = of_graph_get_port_by_id(remote, 4); if (!port) - return 0; + goto crtcs_exit; for_each_child_of_node(port, ep) { remote_port = of_graph_get_remote_port(ep); @@ -81,6 +74,9 @@ static u32 sun8i_dw_hdmi_find_possible_crtcs(struct drm_device *drm, crtcs = drm_of_find_possible_crtcs(drm, node); } +crtcs_exit: + of_node_put(remote); + return crtcs; } @@ -225,7 +221,7 @@ static const struct of_device_id sun8i_dw_hdmi_dt_ids[] = { }; MODULE_DEVICE_TABLE(of, sun8i_dw_hdmi_dt_ids); -struct platform_driver sun8i_dw_hdmi_pltfm_driver = { +static struct platform_driver sun8i_dw_hdmi_pltfm_driver = { .probe = sun8i_dw_hdmi_probe, .remove = sun8i_dw_hdmi_remove, .driver = { diff --git a/drivers/gpu/drm/sun4i/sun8i_mixer.c b/drivers/gpu/drm/sun4i/sun8i_mixer.c index ee8febb25903..fc3713608f78 100644 --- a/drivers/gpu/drm/sun4i/sun8i_mixer.c +++ b/drivers/gpu/drm/sun4i/sun8i_mixer.c @@ -21,8 +21,9 @@ #include <linux/component.h> #include <linux/dma-mapping.h> -#include <linux/reset.h> #include <linux/of_device.h> +#include <linux/of_graph.h> +#include <linux/reset.h> #include "sun4i_drv.h" #include "sun8i_mixer.h" @@ -322,6 +323,42 @@ static struct regmap_config sun8i_mixer_regmap_config = { .max_register = 0xbfffc, /* guessed */ }; +static int sun8i_mixer_of_get_id(struct device_node *node) +{ + struct device_node *port, *ep; + int ret = -EINVAL; + + /* output is port 1 */ + port = of_graph_get_port_by_id(node, 1); + if (!port) + return -EINVAL; + + /* try to find downstream endpoint */ + for_each_available_child_of_node(port, ep) { + struct device_node *remote; + u32 reg; + + remote = of_graph_get_remote_endpoint(ep); + if (!remote) + continue; + + ret = of_property_read_u32(remote, "reg", ®); + if (!ret) { + of_node_put(remote); + of_node_put(ep); + of_node_put(port); + + return reg; + } + + of_node_put(remote); + } + + of_node_put(port); + + return ret; +} + static int sun8i_mixer_bind(struct device *dev, struct device *master, void *data) { @@ -353,8 +390,16 @@ static int sun8i_mixer_bind(struct device *dev, struct device *master, dev_set_drvdata(dev, mixer); mixer->engine.ops = &sun8i_engine_ops; mixer->engine.node = dev->of_node; - /* The ID of the mixer currently doesn't matter */ - mixer->engine.id = -1; + + /* + * While this function can fail, we shouldn't do anything + * if this happens. Some early DE2 DT entries don't provide + * mixer id but work nevertheless because matching between + * TCON and mixer is done by comparing node pointers (old + * way) instead comparing ids. If this function fails and + * id is needed, it will fail during id matching anyway. + */ + mixer->engine.id = sun8i_mixer_of_get_id(dev->of_node); mixer->cfg = of_device_get_match_data(dev); if (!mixer->cfg) @@ -432,14 +477,14 @@ static int sun8i_mixer_bind(struct device *dev, struct device *master, regmap_write(mixer->engine.regs, SUN8I_MIXER_BLEND_ATTR_FCOLOR(0), SUN8I_MIXER_BLEND_COLOR_BLACK); - /* Fixed zpos for now */ - regmap_write(mixer->engine.regs, SUN8I_MIXER_BLEND_ROUTE, 0x43210); - plane_cnt = mixer->cfg->vi_num + mixer->cfg->ui_num; for (i = 0; i < plane_cnt; i++) regmap_write(mixer->engine.regs, SUN8I_MIXER_BLEND_MODE(i), SUN8I_MIXER_BLEND_MODE_DEF); + regmap_update_bits(mixer->engine.regs, SUN8I_MIXER_BLEND_PIPE_CTL, + SUN8I_MIXER_BLEND_PIPE_CTL_EN_MSK, 0); + return 0; err_disable_bus_clk: diff --git a/drivers/gpu/drm/sun4i/sun8i_mixer.h b/drivers/gpu/drm/sun4i/sun8i_mixer.h index f34e70c42adf..406c42e752d7 100644 --- a/drivers/gpu/drm/sun4i/sun8i_mixer.h +++ b/drivers/gpu/drm/sun4i/sun8i_mixer.h @@ -44,6 +44,7 @@ #define SUN8I_MIXER_BLEND_CK_MIN(x) (0x10e0 + 0x04 * (x)) #define SUN8I_MIXER_BLEND_OUTCTL 0x10fc +#define SUN8I_MIXER_BLEND_PIPE_CTL_EN_MSK GENMASK(12, 8) #define SUN8I_MIXER_BLEND_PIPE_CTL_EN(pipe) BIT(8 + pipe) #define SUN8I_MIXER_BLEND_PIPE_CTL_FC_EN(pipe) BIT(pipe) /* colors are always in AARRGGBB format */ @@ -51,6 +52,9 @@ /* The following numbers are some still unknown magic numbers */ #define SUN8I_MIXER_BLEND_MODE_DEF 0x03010301 +#define SUN8I_MIXER_BLEND_ROUTE_PIPE_MSK(n) (0xf << ((n) << 2)) +#define SUN8I_MIXER_BLEND_ROUTE_PIPE_SHIFT(n) ((n) << 2) + #define SUN8I_MIXER_BLEND_OUTCTL_INTERLACED BIT(1) #define SUN8I_MIXER_FBFMT_ARGB8888 0 diff --git a/drivers/gpu/drm/sun4i/sun8i_tcon_top.c b/drivers/gpu/drm/sun4i/sun8i_tcon_top.c index 8da0460e0028..55fe398d8290 100644 --- a/drivers/gpu/drm/sun4i/sun8i_tcon_top.c +++ b/drivers/gpu/drm/sun4i/sun8i_tcon_top.c @@ -14,45 +14,95 @@ #include "sun8i_tcon_top.h" -static int sun8i_tcon_top_get_connected_ep_id(struct device_node *node, - int port_id) +static bool sun8i_tcon_top_node_is_tcon_top(struct device_node *node) { - struct device_node *ep, *remote, *port; - struct of_endpoint endpoint; + return !!of_match_node(sun8i_tcon_top_of_table, node); +} - port = of_graph_get_port_by_id(node, port_id); - if (!port) - return -ENOENT; +int sun8i_tcon_top_set_hdmi_src(struct device *dev, int tcon) +{ + struct sun8i_tcon_top *tcon_top = dev_get_drvdata(dev); + unsigned long flags; + u32 val; - for_each_available_child_of_node(port, ep) { - remote = of_graph_get_remote_port_parent(ep); - if (!remote) - continue; + if (!sun8i_tcon_top_node_is_tcon_top(dev->of_node)) { + dev_err(dev, "Device is not TCON TOP!\n"); + return -EINVAL; + } - if (of_device_is_available(remote)) { - of_graph_parse_endpoint(ep, &endpoint); + if (tcon < 2 || tcon > 3) { + dev_err(dev, "TCON index must be 2 or 3!\n"); + return -EINVAL; + } - of_node_put(remote); + spin_lock_irqsave(&tcon_top->reg_lock, flags); - return endpoint.id; - } + val = readl(tcon_top->regs + TCON_TOP_GATE_SRC_REG); + val &= ~TCON_TOP_HDMI_SRC_MSK; + val |= FIELD_PREP(TCON_TOP_HDMI_SRC_MSK, tcon - 1); + writel(val, tcon_top->regs + TCON_TOP_GATE_SRC_REG); + + spin_unlock_irqrestore(&tcon_top->reg_lock, flags); + + return 0; +} +EXPORT_SYMBOL(sun8i_tcon_top_set_hdmi_src); + +int sun8i_tcon_top_de_config(struct device *dev, int mixer, int tcon) +{ + struct sun8i_tcon_top *tcon_top = dev_get_drvdata(dev); + unsigned long flags; + u32 reg; + + if (!sun8i_tcon_top_node_is_tcon_top(dev->of_node)) { + dev_err(dev, "Device is not TCON TOP!\n"); + return -EINVAL; + } - of_node_put(remote); + if (mixer > 1) { + dev_err(dev, "Mixer index is too high!\n"); + return -EINVAL; } - return -ENOENT; + if (tcon > 3) { + dev_err(dev, "TCON index is too high!\n"); + return -EINVAL; + } + + spin_lock_irqsave(&tcon_top->reg_lock, flags); + + reg = readl(tcon_top->regs + TCON_TOP_PORT_SEL_REG); + if (mixer == 0) { + reg &= ~TCON_TOP_PORT_DE0_MSK; + reg |= FIELD_PREP(TCON_TOP_PORT_DE0_MSK, tcon); + } else { + reg &= ~TCON_TOP_PORT_DE1_MSK; + reg |= FIELD_PREP(TCON_TOP_PORT_DE1_MSK, tcon); + } + writel(reg, tcon_top->regs + TCON_TOP_PORT_SEL_REG); + + spin_unlock_irqrestore(&tcon_top->reg_lock, flags); + + return 0; } +EXPORT_SYMBOL(sun8i_tcon_top_de_config); + static struct clk_hw *sun8i_tcon_top_register_gate(struct device *dev, - struct clk *parent, + const char *parent, void __iomem *regs, spinlock_t *lock, u8 bit, int name_index) { const char *clk_name, *parent_name; - int ret; + int ret, index; + + index = of_property_match_string(dev->of_node, "clock-names", parent); + if (index < 0) + return ERR_PTR(index); + + parent_name = of_clk_get_parent_name(dev->of_node, index); - parent_name = __clk_get_name(parent); ret = of_property_read_string_index(dev->of_node, "clock-output-names", name_index, &clk_name); @@ -69,14 +119,11 @@ static int sun8i_tcon_top_bind(struct device *dev, struct device *master, void *data) { struct platform_device *pdev = to_platform_device(dev); - struct clk *dsi, *tcon_tv0, *tcon_tv1, *tve0, *tve1; struct clk_hw_onecell_data *clk_data; struct sun8i_tcon_top *tcon_top; - bool mixer0_unused = false; struct resource *res; void __iomem *regs; - int ret, i, id; - u32 val; + int ret, i; tcon_top = devm_kzalloc(dev, sizeof(*tcon_top), GFP_KERNEL); if (!tcon_top) @@ -103,38 +150,9 @@ static int sun8i_tcon_top_bind(struct device *dev, struct device *master, return PTR_ERR(tcon_top->bus); } - dsi = devm_clk_get(dev, "dsi"); - if (IS_ERR(dsi)) { - dev_err(dev, "Couldn't get the dsi clock\n"); - return PTR_ERR(dsi); - } - - tcon_tv0 = devm_clk_get(dev, "tcon-tv0"); - if (IS_ERR(tcon_tv0)) { - dev_err(dev, "Couldn't get the tcon-tv0 clock\n"); - return PTR_ERR(tcon_tv0); - } - - tcon_tv1 = devm_clk_get(dev, "tcon-tv1"); - if (IS_ERR(tcon_tv1)) { - dev_err(dev, "Couldn't get the tcon-tv1 clock\n"); - return PTR_ERR(tcon_tv1); - } - - tve0 = devm_clk_get(dev, "tve0"); - if (IS_ERR(tve0)) { - dev_err(dev, "Couldn't get the tve0 clock\n"); - return PTR_ERR(tve0); - } - - tve1 = devm_clk_get(dev, "tve1"); - if (IS_ERR(tve1)) { - dev_err(dev, "Couldn't get the tve1 clock\n"); - return PTR_ERR(tve1); - } - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); regs = devm_ioremap_resource(dev, res); + tcon_top->regs = regs; if (IS_ERR(regs)) return PTR_ERR(regs); @@ -150,50 +168,6 @@ static int sun8i_tcon_top_bind(struct device *dev, struct device *master, goto err_assert_reset; } - val = 0; - - /* check if HDMI mux output is connected */ - if (sun8i_tcon_top_get_connected_ep_id(dev->of_node, 5) >= 0) { - /* find HDMI input endpoint id, if it is connected at all*/ - id = sun8i_tcon_top_get_connected_ep_id(dev->of_node, 4); - if (id >= 0) - val = FIELD_PREP(TCON_TOP_HDMI_SRC_MSK, id + 1); - else - DRM_DEBUG_DRIVER("TCON TOP HDMI input is not connected\n"); - } else { - DRM_DEBUG_DRIVER("TCON TOP HDMI output is not connected\n"); - } - - writel(val, regs + TCON_TOP_GATE_SRC_REG); - - val = 0; - - /* process mixer0 mux output */ - id = sun8i_tcon_top_get_connected_ep_id(dev->of_node, 1); - if (id >= 0) { - val = FIELD_PREP(TCON_TOP_PORT_DE0_MSK, id); - } else { - DRM_DEBUG_DRIVER("TCON TOP mixer0 output is not connected\n"); - mixer0_unused = true; - } - - /* process mixer1 mux output */ - id = sun8i_tcon_top_get_connected_ep_id(dev->of_node, 3); - if (id >= 0) { - val |= FIELD_PREP(TCON_TOP_PORT_DE1_MSK, id); - - /* - * mixer0 mux has priority over mixer1 mux. We have to - * make sure mixer0 doesn't overtake TCON from mixer1. - */ - if (mixer0_unused && id == 0) - val |= FIELD_PREP(TCON_TOP_PORT_DE0_MSK, 1); - } else { - DRM_DEBUG_DRIVER("TCON TOP mixer1 output is not connected\n"); - } - - writel(val, regs + TCON_TOP_PORT_SEL_REG); - /* * TCON TOP has two muxes, which select parent clock for each TCON TV * channel clock. Parent could be either TCON TV or TVE clock. For now @@ -203,17 +177,17 @@ static int sun8i_tcon_top_bind(struct device *dev, struct device *master, * to TVE clock parent. */ clk_data->hws[CLK_TCON_TOP_TV0] = - sun8i_tcon_top_register_gate(dev, tcon_tv0, regs, + sun8i_tcon_top_register_gate(dev, "tcon-tv0", regs, &tcon_top->reg_lock, TCON_TOP_TCON_TV0_GATE, 0); clk_data->hws[CLK_TCON_TOP_TV1] = - sun8i_tcon_top_register_gate(dev, tcon_tv1, regs, + sun8i_tcon_top_register_gate(dev, "tcon-tv1", regs, &tcon_top->reg_lock, TCON_TOP_TCON_TV1_GATE, 1); clk_data->hws[CLK_TCON_TOP_DSI] = - sun8i_tcon_top_register_gate(dev, dsi, regs, + sun8i_tcon_top_register_gate(dev, "dsi", regs, &tcon_top->reg_lock, TCON_TOP_TCON_DSI_GATE, 2); diff --git a/drivers/gpu/drm/sun4i/sun8i_tcon_top.h b/drivers/gpu/drm/sun4i/sun8i_tcon_top.h index 39838bbfeaee..0390584a330e 100644 --- a/drivers/gpu/drm/sun4i/sun8i_tcon_top.h +++ b/drivers/gpu/drm/sun4i/sun8i_tcon_top.h @@ -26,6 +26,7 @@ struct sun8i_tcon_top { struct clk *bus; struct clk_hw_onecell_data *clk_data; + void __iomem *regs; struct reset_control *rst; /* @@ -37,4 +38,7 @@ struct sun8i_tcon_top { extern const struct of_device_id sun8i_tcon_top_of_table[]; +int sun8i_tcon_top_set_hdmi_src(struct device *dev, int tcon); +int sun8i_tcon_top_de_config(struct device *dev, int mixer, int tcon); + #endif /* _SUN8I_TCON_TOP_H_ */ diff --git a/drivers/gpu/drm/sun4i/sun8i_ui_layer.c b/drivers/gpu/drm/sun4i/sun8i_ui_layer.c index 9a540330cb79..28c15c6ef1ef 100644 --- a/drivers/gpu/drm/sun4i/sun8i_ui_layer.c +++ b/drivers/gpu/drm/sun4i/sun8i_ui_layer.c @@ -27,7 +27,8 @@ #include "sun8i_ui_scaler.h" static void sun8i_ui_layer_enable(struct sun8i_mixer *mixer, int channel, - int overlay, bool enable) + int overlay, bool enable, unsigned int zpos, + unsigned int old_zpos) { u32 val; @@ -43,18 +44,36 @@ static void sun8i_ui_layer_enable(struct sun8i_mixer *mixer, int channel, SUN8I_MIXER_CHAN_UI_LAYER_ATTR(channel, overlay), SUN8I_MIXER_CHAN_UI_LAYER_ATTR_EN, val); - if (enable) - val = SUN8I_MIXER_BLEND_PIPE_CTL_EN(channel); - else - val = 0; + if (!enable || zpos != old_zpos) { + regmap_update_bits(mixer->engine.regs, + SUN8I_MIXER_BLEND_PIPE_CTL, + SUN8I_MIXER_BLEND_PIPE_CTL_EN(old_zpos), + 0); - regmap_update_bits(mixer->engine.regs, - SUN8I_MIXER_BLEND_PIPE_CTL, - SUN8I_MIXER_BLEND_PIPE_CTL_EN(channel), val); + regmap_update_bits(mixer->engine.regs, + SUN8I_MIXER_BLEND_ROUTE, + SUN8I_MIXER_BLEND_ROUTE_PIPE_MSK(old_zpos), + 0); + } + + if (enable) { + val = SUN8I_MIXER_BLEND_PIPE_CTL_EN(zpos); + + regmap_update_bits(mixer->engine.regs, + SUN8I_MIXER_BLEND_PIPE_CTL, val, val); + + val = channel << SUN8I_MIXER_BLEND_ROUTE_PIPE_SHIFT(zpos); + + regmap_update_bits(mixer->engine.regs, + SUN8I_MIXER_BLEND_ROUTE, + SUN8I_MIXER_BLEND_ROUTE_PIPE_MSK(zpos), + val); + } } static int sun8i_ui_layer_update_coord(struct sun8i_mixer *mixer, int channel, - int overlay, struct drm_plane *plane) + int overlay, struct drm_plane *plane, + unsigned int zpos) { struct drm_plane_state *state = plane->state; u32 src_w, src_h, dst_w, dst_h; @@ -137,10 +156,10 @@ static int sun8i_ui_layer_update_coord(struct sun8i_mixer *mixer, int channel, state->dst.x1, state->dst.y1); DRM_DEBUG_DRIVER("Layer destination size W: %d H: %d\n", dst_w, dst_h); regmap_write(mixer->engine.regs, - SUN8I_MIXER_BLEND_ATTR_COORD(channel), + SUN8I_MIXER_BLEND_ATTR_COORD(zpos), SUN8I_MIXER_COORD(state->dst.x1, state->dst.y1)); regmap_write(mixer->engine.regs, - SUN8I_MIXER_BLEND_ATTR_INSIZE(channel), + SUN8I_MIXER_BLEND_ATTR_INSIZE(zpos), outsize); return 0; @@ -236,30 +255,35 @@ static void sun8i_ui_layer_atomic_disable(struct drm_plane *plane, struct drm_plane_state *old_state) { struct sun8i_ui_layer *layer = plane_to_sun8i_ui_layer(plane); + unsigned int old_zpos = old_state->normalized_zpos; struct sun8i_mixer *mixer = layer->mixer; - sun8i_ui_layer_enable(mixer, layer->channel, layer->overlay, false); + sun8i_ui_layer_enable(mixer, layer->channel, layer->overlay, false, 0, + old_zpos); } static void sun8i_ui_layer_atomic_update(struct drm_plane *plane, struct drm_plane_state *old_state) { struct sun8i_ui_layer *layer = plane_to_sun8i_ui_layer(plane); + unsigned int zpos = plane->state->normalized_zpos; + unsigned int old_zpos = old_state->normalized_zpos; struct sun8i_mixer *mixer = layer->mixer; if (!plane->state->visible) { sun8i_ui_layer_enable(mixer, layer->channel, - layer->overlay, false); + layer->overlay, false, 0, old_zpos); return; } sun8i_ui_layer_update_coord(mixer, layer->channel, - layer->overlay, plane); + layer->overlay, plane, zpos); sun8i_ui_layer_update_formats(mixer, layer->channel, layer->overlay, plane); sun8i_ui_layer_update_buffer(mixer, layer->channel, layer->overlay, plane); - sun8i_ui_layer_enable(mixer, layer->channel, layer->overlay, true); + sun8i_ui_layer_enable(mixer, layer->channel, layer->overlay, + true, zpos, old_zpos); } static struct drm_plane_helper_funcs sun8i_ui_layer_helper_funcs = { @@ -307,6 +331,7 @@ struct sun8i_ui_layer *sun8i_ui_layer_init_one(struct drm_device *drm, enum drm_plane_type type = DRM_PLANE_TYPE_OVERLAY; int channel = mixer->cfg->vi_num + index; struct sun8i_ui_layer *layer; + unsigned int plane_cnt; int ret; layer = devm_kzalloc(drm->dev, sizeof(*layer), GFP_KERNEL); @@ -327,8 +352,10 @@ struct sun8i_ui_layer *sun8i_ui_layer_init_one(struct drm_device *drm, return ERR_PTR(ret); } - /* fixed zpos for now */ - ret = drm_plane_create_zpos_immutable_property(&layer->plane, channel); + plane_cnt = mixer->cfg->ui_num + mixer->cfg->vi_num; + + ret = drm_plane_create_zpos_property(&layer->plane, channel, + 0, plane_cnt - 1); if (ret) { dev_err(drm->dev, "Couldn't add zpos property\n"); return ERR_PTR(ret); diff --git a/drivers/gpu/drm/sun4i/sun8i_vi_layer.c b/drivers/gpu/drm/sun4i/sun8i_vi_layer.c index 5877f8ef5895..f4fe97813f94 100644 --- a/drivers/gpu/drm/sun4i/sun8i_vi_layer.c +++ b/drivers/gpu/drm/sun4i/sun8i_vi_layer.c @@ -21,7 +21,8 @@ #include "sun8i_vi_scaler.h" static void sun8i_vi_layer_enable(struct sun8i_mixer *mixer, int channel, - int overlay, bool enable) + int overlay, bool enable, unsigned int zpos, + unsigned int old_zpos) { u32 val; @@ -37,18 +38,36 @@ static void sun8i_vi_layer_enable(struct sun8i_mixer *mixer, int channel, SUN8I_MIXER_CHAN_VI_LAYER_ATTR(channel, overlay), SUN8I_MIXER_CHAN_VI_LAYER_ATTR_EN, val); - if (enable) - val = SUN8I_MIXER_BLEND_PIPE_CTL_EN(channel); - else - val = 0; + if (!enable || zpos != old_zpos) { + regmap_update_bits(mixer->engine.regs, + SUN8I_MIXER_BLEND_PIPE_CTL, + SUN8I_MIXER_BLEND_PIPE_CTL_EN(old_zpos), + 0); - regmap_update_bits(mixer->engine.regs, - SUN8I_MIXER_BLEND_PIPE_CTL, - SUN8I_MIXER_BLEND_PIPE_CTL_EN(channel), val); + regmap_update_bits(mixer->engine.regs, + SUN8I_MIXER_BLEND_ROUTE, + SUN8I_MIXER_BLEND_ROUTE_PIPE_MSK(old_zpos), + 0); + } + + if (enable) { + val = SUN8I_MIXER_BLEND_PIPE_CTL_EN(zpos); + + regmap_update_bits(mixer->engine.regs, + SUN8I_MIXER_BLEND_PIPE_CTL, val, val); + + val = channel << SUN8I_MIXER_BLEND_ROUTE_PIPE_SHIFT(zpos); + + regmap_update_bits(mixer->engine.regs, + SUN8I_MIXER_BLEND_ROUTE, + SUN8I_MIXER_BLEND_ROUTE_PIPE_MSK(zpos), + val); + } } static int sun8i_vi_layer_update_coord(struct sun8i_mixer *mixer, int channel, - int overlay, struct drm_plane *plane) + int overlay, struct drm_plane *plane, + unsigned int zpos) { struct drm_plane_state *state = plane->state; const struct drm_format_info *format = state->fb->format; @@ -130,10 +149,10 @@ static int sun8i_vi_layer_update_coord(struct sun8i_mixer *mixer, int channel, state->dst.x1, state->dst.y1); DRM_DEBUG_DRIVER("Layer destination size W: %d H: %d\n", dst_w, dst_h); regmap_write(mixer->engine.regs, - SUN8I_MIXER_BLEND_ATTR_COORD(channel), + SUN8I_MIXER_BLEND_ATTR_COORD(zpos), SUN8I_MIXER_COORD(state->dst.x1, state->dst.y1)); regmap_write(mixer->engine.regs, - SUN8I_MIXER_BLEND_ATTR_INSIZE(channel), + SUN8I_MIXER_BLEND_ATTR_INSIZE(zpos), outsize); return 0; @@ -264,30 +283,35 @@ static void sun8i_vi_layer_atomic_disable(struct drm_plane *plane, struct drm_plane_state *old_state) { struct sun8i_vi_layer *layer = plane_to_sun8i_vi_layer(plane); + unsigned int old_zpos = old_state->normalized_zpos; struct sun8i_mixer *mixer = layer->mixer; - sun8i_vi_layer_enable(mixer, layer->channel, layer->overlay, false); + sun8i_vi_layer_enable(mixer, layer->channel, layer->overlay, false, 0, + old_zpos); } static void sun8i_vi_layer_atomic_update(struct drm_plane *plane, struct drm_plane_state *old_state) { struct sun8i_vi_layer *layer = plane_to_sun8i_vi_layer(plane); + unsigned int zpos = plane->state->normalized_zpos; + unsigned int old_zpos = old_state->normalized_zpos; struct sun8i_mixer *mixer = layer->mixer; if (!plane->state->visible) { sun8i_vi_layer_enable(mixer, layer->channel, - layer->overlay, false); + layer->overlay, false, 0, old_zpos); return; } sun8i_vi_layer_update_coord(mixer, layer->channel, - layer->overlay, plane); + layer->overlay, plane, zpos); sun8i_vi_layer_update_formats(mixer, layer->channel, layer->overlay, plane); sun8i_vi_layer_update_buffer(mixer, layer->channel, layer->overlay, plane); - sun8i_vi_layer_enable(mixer, layer->channel, layer->overlay, true); + sun8i_vi_layer_enable(mixer, layer->channel, layer->overlay, + true, zpos, old_zpos); } static struct drm_plane_helper_funcs sun8i_vi_layer_helper_funcs = { @@ -351,6 +375,7 @@ struct sun8i_vi_layer *sun8i_vi_layer_init_one(struct drm_device *drm, int index) { struct sun8i_vi_layer *layer; + unsigned int plane_cnt; int ret; layer = devm_kzalloc(drm->dev, sizeof(*layer), GFP_KERNEL); @@ -368,8 +393,10 @@ struct sun8i_vi_layer *sun8i_vi_layer_init_one(struct drm_device *drm, return ERR_PTR(ret); } - /* fixed zpos for now */ - ret = drm_plane_create_zpos_immutable_property(&layer->plane, index); + plane_cnt = mixer->cfg->ui_num + mixer->cfg->vi_num; + + ret = drm_plane_create_zpos_property(&layer->plane, index, + 0, plane_cnt - 1); if (ret) { dev_err(drm->dev, "Couldn't add zpos property\n"); return ERR_PTR(ret); diff --git a/drivers/gpu/drm/tegra/dsi.c b/drivers/gpu/drm/tegra/dsi.c index 87c5d89bc9ba..ee6ca8fa1c65 100644 --- a/drivers/gpu/drm/tegra/dsi.c +++ b/drivers/gpu/drm/tegra/dsi.c @@ -1052,7 +1052,7 @@ static int tegra_dsi_init(struct host1x_client *client) drm_encoder_helper_add(&dsi->output.encoder, &tegra_dsi_encoder_helper_funcs); - drm_mode_connector_attach_encoder(&dsi->output.connector, + drm_connector_attach_encoder(&dsi->output.connector, &dsi->output.encoder); drm_connector_register(&dsi->output.connector); @@ -1411,6 +1411,9 @@ static int tegra_dsi_host_attach(struct mipi_dsi_host *host, struct tegra_output *output = &dsi->output; output->panel = of_drm_find_panel(device->dev.of_node); + if (IS_ERR(output->panel)) + output->panel = NULL; + if (output->panel && output->connector.dev) { drm_panel_attach(output->panel, &output->connector); drm_helper_hpd_irq_event(output->connector.dev); diff --git a/drivers/gpu/drm/tegra/hdmi.c b/drivers/gpu/drm/tegra/hdmi.c index 784739a9f497..0082468f703c 100644 --- a/drivers/gpu/drm/tegra/hdmi.c +++ b/drivers/gpu/drm/tegra/hdmi.c @@ -1488,7 +1488,7 @@ static int tegra_hdmi_init(struct host1x_client *client) drm_encoder_helper_add(&hdmi->output.encoder, &tegra_hdmi_encoder_helper_funcs); - drm_mode_connector_attach_encoder(&hdmi->output.connector, + drm_connector_attach_encoder(&hdmi->output.connector, &hdmi->output.encoder); drm_connector_register(&hdmi->output.connector); diff --git a/drivers/gpu/drm/tegra/output.c b/drivers/gpu/drm/tegra/output.c index ffe34bd0bb9d..c662efc7e413 100644 --- a/drivers/gpu/drm/tegra/output.c +++ b/drivers/gpu/drm/tegra/output.c @@ -37,7 +37,7 @@ int tegra_output_connector_get_modes(struct drm_connector *connector) edid = drm_get_edid(connector, output->ddc); cec_notifier_set_phys_addr_from_edid(output->notifier, edid); - drm_mode_connector_update_edid_property(connector, edid); + drm_connector_update_edid_property(connector, edid); if (edid) { err = drm_add_edid_modes(connector, edid); @@ -110,8 +110,8 @@ int tegra_output_probe(struct tegra_output *output) panel = of_parse_phandle(output->of_node, "nvidia,panel", 0); if (panel) { output->panel = of_drm_find_panel(panel); - if (!output->panel) - return -EPROBE_DEFER; + if (IS_ERR(output->panel)) + return PTR_ERR(output->panel); of_node_put(panel); } diff --git a/drivers/gpu/drm/tegra/rgb.c b/drivers/gpu/drm/tegra/rgb.c index 78ec5193741d..28a78d3120bc 100644 --- a/drivers/gpu/drm/tegra/rgb.c +++ b/drivers/gpu/drm/tegra/rgb.c @@ -289,7 +289,7 @@ int tegra_dc_rgb_init(struct drm_device *drm, struct tegra_dc *dc) drm_encoder_helper_add(&output->encoder, &tegra_rgb_encoder_helper_funcs); - drm_mode_connector_attach_encoder(&output->connector, + drm_connector_attach_encoder(&output->connector, &output->encoder); drm_connector_register(&output->connector); diff --git a/drivers/gpu/drm/tegra/sor.c b/drivers/gpu/drm/tegra/sor.c index 7d2a955fc515..d7fe9f15def1 100644 --- a/drivers/gpu/drm/tegra/sor.c +++ b/drivers/gpu/drm/tegra/sor.c @@ -2622,7 +2622,7 @@ static int tegra_sor_init(struct host1x_client *client) encoder, NULL); drm_encoder_helper_add(&sor->output.encoder, helpers); - drm_mode_connector_attach_encoder(&sor->output.connector, + drm_connector_attach_encoder(&sor->output.connector, &sor->output.encoder); drm_connector_register(&sor->output.connector); diff --git a/drivers/gpu/drm/tilcdc/tilcdc_external.c b/drivers/gpu/drm/tilcdc/tilcdc_external.c index d651bdd6597e..b4eaf9bc87f8 100644 --- a/drivers/gpu/drm/tilcdc/tilcdc_external.c +++ b/drivers/gpu/drm/tilcdc/tilcdc_external.c @@ -103,12 +103,11 @@ struct drm_connector *tilcdc_encoder_find_connector(struct drm_device *ddev, struct drm_encoder *encoder) { struct drm_connector *connector; - int i; - list_for_each_entry(connector, &ddev->mode_config.connector_list, head) - for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) - if (connector->encoder_ids[i] == encoder->base.id) - return connector; + list_for_each_entry(connector, &ddev->mode_config.connector_list, head) { + if (drm_connector_has_possible_encoder(connector, encoder)) + return connector; + } dev_err(ddev->dev, "No connector found for %s encoder (id %d)\n", encoder->name, encoder->base.id); diff --git a/drivers/gpu/drm/tilcdc/tilcdc_panel.c b/drivers/gpu/drm/tilcdc/tilcdc_panel.c index d616d64a6725..a1acab39d87f 100644 --- a/drivers/gpu/drm/tilcdc/tilcdc_panel.c +++ b/drivers/gpu/drm/tilcdc/tilcdc_panel.c @@ -223,7 +223,7 @@ static struct drm_connector *panel_connector_create(struct drm_device *dev, connector->interlace_allowed = 0; connector->doublescan_allowed = 0; - ret = drm_mode_connector_attach_encoder(connector, encoder); + ret = drm_connector_attach_encoder(connector, encoder); if (ret) goto fail; diff --git a/drivers/gpu/drm/tilcdc/tilcdc_tfp410.c b/drivers/gpu/drm/tilcdc/tilcdc_tfp410.c index c45cabb38db0..daebf1aa6b0a 100644 --- a/drivers/gpu/drm/tilcdc/tilcdc_tfp410.c +++ b/drivers/gpu/drm/tilcdc/tilcdc_tfp410.c @@ -173,7 +173,7 @@ static int tfp410_connector_get_modes(struct drm_connector *connector) edid = drm_get_edid(connector, tfp410_connector->mod->i2c); - drm_mode_connector_update_edid_property(connector, edid); + drm_connector_update_edid_property(connector, edid); if (edid) { ret = drm_add_edid_modes(connector, edid); @@ -240,7 +240,7 @@ static struct drm_connector *tfp410_connector_create(struct drm_device *dev, connector->interlace_allowed = 0; connector->doublescan_allowed = 0; - ret = drm_mode_connector_attach_encoder(connector, encoder); + ret = drm_connector_attach_encoder(connector, encoder); if (ret) goto fail; diff --git a/drivers/gpu/drm/tinydrm/Kconfig b/drivers/gpu/drm/tinydrm/Kconfig index 7a8008b0783f..16f4b5c91f1b 100644 --- a/drivers/gpu/drm/tinydrm/Kconfig +++ b/drivers/gpu/drm/tinydrm/Kconfig @@ -23,6 +23,7 @@ config TINYDRM_ILI9225 config TINYDRM_ILI9341 tristate "DRM support for ILI9341 display panels" depends on DRM_TINYDRM && SPI + depends on BACKLIGHT_CLASS_DEVICE select TINYDRM_MIPI_DBI help DRM driver for the following Ilitek ILI9341 panels: diff --git a/drivers/gpu/drm/tinydrm/core/tinydrm-core.c b/drivers/gpu/drm/tinydrm/core/tinydrm-core.c index 24a33bf862fa..19c7f70adfa5 100644 --- a/drivers/gpu/drm/tinydrm/core/tinydrm-core.c +++ b/drivers/gpu/drm/tinydrm/core/tinydrm-core.c @@ -204,7 +204,7 @@ static int tinydrm_register(struct tinydrm_device *tdev) if (ret) return ret; - ret = drm_fb_cma_fbdev_init_with_funcs(drm, 0, 0, tdev->fb_funcs); + ret = drm_fbdev_generic_setup(drm, 0); if (ret) DRM_ERROR("Failed to initialize fbdev: %d\n", ret); @@ -214,7 +214,6 @@ static int tinydrm_register(struct tinydrm_device *tdev) static void tinydrm_unregister(struct tinydrm_device *tdev) { drm_atomic_helper_shutdown(tdev->drm); - drm_fb_cma_fbdev_fini(tdev->drm); drm_dev_unregister(tdev->drm); } diff --git a/drivers/gpu/drm/tinydrm/ili9225.c b/drivers/gpu/drm/tinydrm/ili9225.c index 841c69aba059..455fefe012f5 100644 --- a/drivers/gpu/drm/tinydrm/ili9225.c +++ b/drivers/gpu/drm/tinydrm/ili9225.c @@ -368,7 +368,6 @@ static struct drm_driver ili9225_driver = { DRIVER_ATOMIC, .fops = &ili9225_fops, TINYDRM_GEM_DRIVER_OPS, - .lastclose = drm_fb_helper_lastclose, .name = "ili9225", .desc = "Ilitek ILI9225", .date = "20171106", diff --git a/drivers/gpu/drm/tinydrm/ili9341.c b/drivers/gpu/drm/tinydrm/ili9341.c index 8864dcde6edc..6701037749a7 100644 --- a/drivers/gpu/drm/tinydrm/ili9341.c +++ b/drivers/gpu/drm/tinydrm/ili9341.c @@ -145,7 +145,6 @@ static struct drm_driver ili9341_driver = { .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_PRIME | DRIVER_ATOMIC, .fops = &ili9341_fops, TINYDRM_GEM_DRIVER_OPS, - .lastclose = drm_fb_helper_lastclose, .debugfs_init = mipi_dbi_debugfs_init, .name = "ili9341", .desc = "Ilitek ILI9341", diff --git a/drivers/gpu/drm/tinydrm/mi0283qt.c b/drivers/gpu/drm/tinydrm/mi0283qt.c index 015d03f2acba..d7bb4c5e6657 100644 --- a/drivers/gpu/drm/tinydrm/mi0283qt.c +++ b/drivers/gpu/drm/tinydrm/mi0283qt.c @@ -154,7 +154,6 @@ static struct drm_driver mi0283qt_driver = { DRIVER_ATOMIC, .fops = &mi0283qt_fops, TINYDRM_GEM_DRIVER_OPS, - .lastclose = drm_fb_helper_lastclose, .debugfs_init = mipi_dbi_debugfs_init, .name = "mi0283qt", .desc = "Multi-Inno MI0283QT", diff --git a/drivers/gpu/drm/tinydrm/mipi-dbi.c b/drivers/gpu/drm/tinydrm/mipi-dbi.c index 4d1fb31a781f..cb3441e51d5f 100644 --- a/drivers/gpu/drm/tinydrm/mipi-dbi.c +++ b/drivers/gpu/drm/tinydrm/mipi-dbi.c @@ -260,6 +260,8 @@ static const struct drm_framebuffer_funcs mipi_dbi_fb_funcs = { /** * mipi_dbi_enable_flush - MIPI DBI enable helper * @mipi: MIPI DBI structure + * @crtc_state: CRTC state + * @plane_state: Plane state * * This function sets &mipi_dbi->enabled, flushes the whole framebuffer and * enables the backlight. Drivers can use this in their diff --git a/drivers/gpu/drm/tinydrm/st7586.c b/drivers/gpu/drm/tinydrm/st7586.c index 5c29e3803ecb..2fcbc3067d71 100644 --- a/drivers/gpu/drm/tinydrm/st7586.c +++ b/drivers/gpu/drm/tinydrm/st7586.c @@ -304,7 +304,6 @@ static struct drm_driver st7586_driver = { DRIVER_ATOMIC, .fops = &st7586_fops, TINYDRM_GEM_DRIVER_OPS, - .lastclose = drm_fb_helper_lastclose, .debugfs_init = mipi_dbi_debugfs_init, .name = "st7586", .desc = "Sitronix ST7586", diff --git a/drivers/gpu/drm/tinydrm/st7735r.c b/drivers/gpu/drm/tinydrm/st7735r.c index 6c7b15c9da4f..3081bc57c116 100644 --- a/drivers/gpu/drm/tinydrm/st7735r.c +++ b/drivers/gpu/drm/tinydrm/st7735r.c @@ -120,7 +120,6 @@ static struct drm_driver st7735r_driver = { DRIVER_ATOMIC, .fops = &st7735r_fops, TINYDRM_GEM_DRIVER_OPS, - .lastclose = drm_fb_helper_lastclose, .debugfs_init = mipi_dbi_debugfs_init, .name = "st7735r", .desc = "Sitronix ST7735R", diff --git a/drivers/gpu/drm/udl/udl_connector.c b/drivers/gpu/drm/udl/udl_connector.c index 09dc585aa46f..68e88bed77ca 100644 --- a/drivers/gpu/drm/udl/udl_connector.c +++ b/drivers/gpu/drm/udl/udl_connector.c @@ -99,7 +99,7 @@ static int udl_get_modes(struct drm_connector *connector) struct udl_drm_connector, connector); - drm_mode_connector_update_edid_property(connector, udl_connector->edid); + drm_connector_update_edid_property(connector, udl_connector->edid); if (udl_connector->edid) return drm_add_edid_modes(connector, udl_connector->edid); return 0; @@ -200,7 +200,7 @@ int udl_connector_init(struct drm_device *dev, struct drm_encoder *encoder) drm_connector_helper_add(connector, &udl_connector_helper_funcs); drm_connector_register(connector); - drm_mode_connector_attach_encoder(connector, encoder); + drm_connector_attach_encoder(connector, encoder); connector->polled = DRM_CONNECTOR_POLL_HPD | DRM_CONNECTOR_POLL_CONNECT | DRM_CONNECTOR_POLL_DISCONNECT; diff --git a/drivers/gpu/drm/v3d/v3d_bo.c b/drivers/gpu/drm/v3d/v3d_bo.c index 7b1e2a549a71..54d96518a131 100644 --- a/drivers/gpu/drm/v3d/v3d_bo.c +++ b/drivers/gpu/drm/v3d/v3d_bo.c @@ -227,37 +227,19 @@ v3d_set_mmap_vma_flags(struct vm_area_struct *vma) vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags)); } -int v3d_gem_fault(struct vm_fault *vmf) +vm_fault_t v3d_gem_fault(struct vm_fault *vmf) { struct vm_area_struct *vma = vmf->vma; struct drm_gem_object *obj = vma->vm_private_data; struct v3d_bo *bo = to_v3d_bo(obj); - unsigned long pfn; + pfn_t pfn; pgoff_t pgoff; - int ret; /* We don't use vmf->pgoff since that has the fake offset: */ pgoff = (vmf->address - vma->vm_start) >> PAGE_SHIFT; - pfn = page_to_pfn(bo->pages[pgoff]); - - ret = vm_insert_mixed(vma, vmf->address, __pfn_to_pfn_t(pfn, PFN_DEV)); - - switch (ret) { - case -EAGAIN: - case 0: - case -ERESTARTSYS: - case -EINTR: - case -EBUSY: - /* - * EBUSY is ok: this just means that another thread - * already did the job. - */ - return VM_FAULT_NOPAGE; - case -ENOMEM: - return VM_FAULT_OOM; - default: - return VM_FAULT_SIGBUS; - } + pfn = __pfn_to_pfn_t(page_to_pfn(bo->pages[pgoff]), PFN_DEV); + + return vmf_insert_mixed(vma, vmf->address, pfn); } int v3d_mmap(struct file *filp, struct vm_area_struct *vma) diff --git a/drivers/gpu/drm/v3d/v3d_drv.h b/drivers/gpu/drm/v3d/v3d_drv.h index f32ac8c98f37..e6fed696ad86 100644 --- a/drivers/gpu/drm/v3d/v3d_drv.h +++ b/drivers/gpu/drm/v3d/v3d_drv.h @@ -2,6 +2,7 @@ /* Copyright (C) 2015-2018 Broadcom */ #include <linux/reservation.h> +#include <linux/mm_types.h> #include <drm/drmP.h> #include <drm/drm_encoder.h> #include <drm/drm_gem.h> @@ -183,6 +184,8 @@ struct v3d_job { /* GPU virtual addresses of the start/end of the CL job. */ u32 start, end; + + u32 timedout_ctca, timedout_ctra; }; struct v3d_exec_info { @@ -252,7 +255,7 @@ int v3d_mmap_bo_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); int v3d_get_bo_offset_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); -int v3d_gem_fault(struct vm_fault *vmf); +vm_fault_t v3d_gem_fault(struct vm_fault *vmf); int v3d_mmap(struct file *filp, struct vm_area_struct *vma); struct reservation_object *v3d_prime_res_obj(struct drm_gem_object *obj); int v3d_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma); diff --git a/drivers/gpu/drm/v3d/v3d_fence.c b/drivers/gpu/drm/v3d/v3d_fence.c index bfe31a89668b..50bfcf9a8a1a 100644 --- a/drivers/gpu/drm/v3d/v3d_fence.c +++ b/drivers/gpu/drm/v3d/v3d_fence.c @@ -35,19 +35,7 @@ static const char *v3d_fence_get_timeline_name(struct dma_fence *fence) return "v3d-render"; } -static bool v3d_fence_enable_signaling(struct dma_fence *fence) -{ - return true; -} - const struct dma_fence_ops v3d_fence_ops = { .get_driver_name = v3d_fence_get_driver_name, .get_timeline_name = v3d_fence_get_timeline_name, - .enable_signaling = v3d_fence_enable_signaling, - /* Each of our fences gets signaled as complete by the IRQ - * handler, so we rely on the core's tracking of signaling. - */ - .signaled = NULL, - .wait = dma_fence_default_wait, - .release = dma_fence_free, }; diff --git a/drivers/gpu/drm/v3d/v3d_regs.h b/drivers/gpu/drm/v3d/v3d_regs.h index fc13282dfc2f..854046565989 100644 --- a/drivers/gpu/drm/v3d/v3d_regs.h +++ b/drivers/gpu/drm/v3d/v3d_regs.h @@ -222,6 +222,7 @@ #define V3D_CLE_CTNCA(n) (V3D_CLE_CT0CA + 4 * n) #define V3D_CLE_CT0RA 0x00118 #define V3D_CLE_CT1RA 0x0011c +#define V3D_CLE_CTNRA(n) (V3D_CLE_CT0RA + 4 * n) #define V3D_CLE_CT0LC 0x00120 #define V3D_CLE_CT1LC 0x00124 #define V3D_CLE_CT0PC 0x00128 diff --git a/drivers/gpu/drm/v3d/v3d_sched.c b/drivers/gpu/drm/v3d/v3d_sched.c index 808bc901f567..a5501581d96b 100644 --- a/drivers/gpu/drm/v3d/v3d_sched.c +++ b/drivers/gpu/drm/v3d/v3d_sched.c @@ -14,8 +14,8 @@ * to the HW only when it has completed the last one, instead of * filling up the CT[01]Q FIFOs with jobs. Similarly, we use * v3d_job_dependency() to manage the dependency between bin and - * render, instead of having the clients submit jobs with using the - * HW's semaphores to interlock between them. + * render, instead of having the clients submit jobs using the HW's + * semaphores to interlock between them. */ #include <linux/kthread.h> @@ -153,7 +153,25 @@ v3d_job_timedout(struct drm_sched_job *sched_job) struct v3d_job *job = to_v3d_job(sched_job); struct v3d_exec_info *exec = job->exec; struct v3d_dev *v3d = exec->v3d; + enum v3d_queue job_q = job == &exec->bin ? V3D_BIN : V3D_RENDER; enum v3d_queue q; + u32 ctca = V3D_CORE_READ(0, V3D_CLE_CTNCA(job_q)); + u32 ctra = V3D_CORE_READ(0, V3D_CLE_CTNRA(job_q)); + + /* If the current address or return address have changed, then + * the GPU has probably made progress and we should delay the + * reset. This could fail if the GPU got in an infinite loop + * in the CL, but that is pretty unlikely outside of an i-g-t + * testcase. + */ + if (job->timedout_ctca != ctca || job->timedout_ctra != ctra) { + job->timedout_ctca = ctca; + job->timedout_ctra = ctra; + + schedule_delayed_work(&job->base.work_tdr, + job->base.sched->timeout); + return; + } mutex_lock(&v3d->reset_lock); diff --git a/drivers/gpu/drm/vc4/Makefile b/drivers/gpu/drm/vc4/Makefile index 4a3a868235f8..b303703bc7f3 100644 --- a/drivers/gpu/drm/vc4/Makefile +++ b/drivers/gpu/drm/vc4/Makefile @@ -19,6 +19,7 @@ vc4-y := \ vc4_plane.o \ vc4_render_cl.o \ vc4_trace_points.o \ + vc4_txp.o \ vc4_v3d.o \ vc4_validate.o \ vc4_validate_shaders.o diff --git a/drivers/gpu/drm/vc4/vc4_crtc.c b/drivers/gpu/drm/vc4/vc4_crtc.c index d222358fa8a7..0e6a121858d1 100644 --- a/drivers/gpu/drm/vc4/vc4_crtc.c +++ b/drivers/gpu/drm/vc4/vc4_crtc.c @@ -46,6 +46,8 @@ struct vc4_crtc_state { struct drm_crtc_state base; /* Dlist area for this CRTC configuration. */ struct drm_mm_node mm; + bool feed_txp; + bool txp_armed; }; static inline struct vc4_crtc_state * @@ -324,10 +326,8 @@ static struct drm_encoder *vc4_get_crtc_encoder(struct drm_crtc *crtc) return NULL; } -static void vc4_crtc_mode_set_nofb(struct drm_crtc *crtc) +static void vc4_crtc_config_pv(struct drm_crtc *crtc) { - struct drm_device *dev = crtc->dev; - struct vc4_dev *vc4 = to_vc4_dev(dev); struct drm_encoder *encoder = vc4_get_crtc_encoder(crtc); struct vc4_encoder *vc4_encoder = to_vc4_encoder(encoder); struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc); @@ -338,12 +338,6 @@ static void vc4_crtc_mode_set_nofb(struct drm_crtc *crtc) bool is_dsi = (vc4_encoder->type == VC4_ENCODER_TYPE_DSI0 || vc4_encoder->type == VC4_ENCODER_TYPE_DSI1); u32 format = is_dsi ? PV_CONTROL_FORMAT_DSIV_24 : PV_CONTROL_FORMAT_24; - bool debug_dump_regs = false; - - if (debug_dump_regs) { - DRM_INFO("CRTC %d regs before:\n", drm_crtc_index(crtc)); - vc4_crtc_dump_regs(vc4_crtc); - } /* Reset the PV fifo. */ CRTC_WRITE(PV_CONTROL, 0); @@ -419,6 +413,49 @@ static void vc4_crtc_mode_set_nofb(struct drm_crtc *crtc) PV_CONTROL_CLK_SELECT) | PV_CONTROL_FIFO_CLR | PV_CONTROL_EN); +} + +static void vc4_crtc_mode_set_nofb(struct drm_crtc *crtc) +{ + struct drm_device *dev = crtc->dev; + struct vc4_dev *vc4 = to_vc4_dev(dev); + struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc); + struct vc4_crtc_state *vc4_state = to_vc4_crtc_state(crtc->state); + struct drm_display_mode *mode = &crtc->state->adjusted_mode; + bool interlace = mode->flags & DRM_MODE_FLAG_INTERLACE; + bool debug_dump_regs = false; + + if (debug_dump_regs) { + DRM_INFO("CRTC %d regs before:\n", drm_crtc_index(crtc)); + vc4_crtc_dump_regs(vc4_crtc); + } + + if (vc4_crtc->channel == 2) { + u32 dispctrl; + u32 dsp3_mux; + + /* + * SCALER_DISPCTRL_DSP3 = X, where X < 2 means 'connect DSP3 to + * FIFO X'. + * SCALER_DISPCTRL_DSP3 = 3 means 'disable DSP 3'. + * + * DSP3 is connected to FIFO2 unless the transposer is + * enabled. In this case, FIFO 2 is directly accessed by the + * TXP IP, and we need to disable the FIFO2 -> pixelvalve1 + * route. + */ + if (vc4_state->feed_txp) + dsp3_mux = VC4_SET_FIELD(3, SCALER_DISPCTRL_DSP3_MUX); + else + dsp3_mux = VC4_SET_FIELD(2, SCALER_DISPCTRL_DSP3_MUX); + + dispctrl = HVS_READ(SCALER_DISPCTRL) & + ~SCALER_DISPCTRL_DSP3_MUX_MASK; + HVS_WRITE(SCALER_DISPCTRL, dispctrl | dsp3_mux); + } + + if (!vc4_state->feed_txp) + vc4_crtc_config_pv(crtc); HVS_WRITE(SCALER_DISPBKGNDX(vc4_crtc->channel), SCALER_DISPBKGND_AUTOHS | @@ -499,6 +536,13 @@ static void vc4_crtc_atomic_disable(struct drm_crtc *crtc, } } +void vc4_crtc_txp_armed(struct drm_crtc_state *state) +{ + struct vc4_crtc_state *vc4_state = to_vc4_crtc_state(state); + + vc4_state->txp_armed = true; +} + static void vc4_crtc_update_dlist(struct drm_crtc *crtc) { struct drm_device *dev = crtc->dev; @@ -514,8 +558,11 @@ static void vc4_crtc_update_dlist(struct drm_crtc *crtc) WARN_ON(drm_crtc_vblank_get(crtc) != 0); spin_lock_irqsave(&dev->event_lock, flags); - vc4_crtc->event = crtc->state->event; - crtc->state->event = NULL; + + if (!vc4_state->feed_txp || vc4_state->txp_armed) { + vc4_crtc->event = crtc->state->event; + crtc->state->event = NULL; + } HVS_WRITE(SCALER_DISPLISTX(vc4_crtc->channel), vc4_state->mm.start); @@ -533,8 +580,8 @@ static void vc4_crtc_atomic_enable(struct drm_crtc *crtc, struct drm_device *dev = crtc->dev; struct vc4_dev *vc4 = to_vc4_dev(dev); struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc); - struct drm_crtc_state *state = crtc->state; - struct drm_display_mode *mode = &state->adjusted_mode; + struct vc4_crtc_state *vc4_state = to_vc4_crtc_state(crtc->state); + struct drm_display_mode *mode = &crtc->state->adjusted_mode; require_hvs_enabled(dev); @@ -546,15 +593,21 @@ static void vc4_crtc_atomic_enable(struct drm_crtc *crtc, /* Turn on the scaler, which will wait for vstart to start * compositing. + * When feeding the transposer, we should operate in oneshot + * mode. */ HVS_WRITE(SCALER_DISPCTRLX(vc4_crtc->channel), VC4_SET_FIELD(mode->hdisplay, SCALER_DISPCTRLX_WIDTH) | VC4_SET_FIELD(mode->vdisplay, SCALER_DISPCTRLX_HEIGHT) | - SCALER_DISPCTRLX_ENABLE); + SCALER_DISPCTRLX_ENABLE | + (vc4_state->feed_txp ? SCALER_DISPCTRLX_ONESHOT : 0)); - /* Turn on the pixel valve, which will emit the vstart signal. */ - CRTC_WRITE(PV_V_CONTROL, - CRTC_READ(PV_V_CONTROL) | PV_VCONTROL_VIDEN); + /* When feeding the transposer block the pixelvalve is unneeded and + * should not be enabled. + */ + if (!vc4_state->feed_txp) + CRTC_WRITE(PV_V_CONTROL, + CRTC_READ(PV_V_CONTROL) | PV_VCONTROL_VIDEN); } static enum drm_mode_status vc4_crtc_mode_valid(struct drm_crtc *crtc, @@ -579,8 +632,10 @@ static int vc4_crtc_atomic_check(struct drm_crtc *crtc, struct drm_plane *plane; unsigned long flags; const struct drm_plane_state *plane_state; + struct drm_connector *conn; + struct drm_connector_state *conn_state; u32 dlist_count = 0; - int ret; + int ret, i; /* The pixelvalve can only feed one encoder (and encoders are * 1:1 with connectors.) @@ -600,6 +655,24 @@ static int vc4_crtc_atomic_check(struct drm_crtc *crtc, if (ret) return ret; + for_each_new_connector_in_state(state->state, conn, conn_state, i) { + if (conn_state->crtc != crtc) + continue; + + /* The writeback connector is implemented using the transposer + * block which is directly taking its data from the HVS FIFO. + */ + if (conn->connector_type == DRM_MODE_CONNECTOR_WRITEBACK) { + state->no_vblank = true; + vc4_state->feed_txp = true; + } else { + state->no_vblank = false; + vc4_state->feed_txp = false; + } + + break; + } + return 0; } @@ -713,7 +786,8 @@ static void vc4_crtc_handle_page_flip(struct vc4_crtc *vc4_crtc) spin_lock_irqsave(&dev->event_lock, flags); if (vc4_crtc->event && - (vc4_state->mm.start == HVS_READ(SCALER_DISPLACTX(chan)))) { + (vc4_state->mm.start == HVS_READ(SCALER_DISPLACTX(chan)) || + vc4_state->feed_txp)) { drm_crtc_send_vblank_event(crtc, vc4_crtc->event); vc4_crtc->event = NULL; drm_crtc_vblank_put(crtc); @@ -721,6 +795,13 @@ static void vc4_crtc_handle_page_flip(struct vc4_crtc *vc4_crtc) spin_unlock_irqrestore(&dev->event_lock, flags); } +void vc4_crtc_handle_vblank(struct vc4_crtc *crtc) +{ + crtc->t_vblank = ktime_get(); + drm_crtc_handle_vblank(&crtc->base); + vc4_crtc_handle_page_flip(crtc); +} + static irqreturn_t vc4_crtc_irq_handler(int irq, void *data) { struct vc4_crtc *vc4_crtc = data; @@ -728,10 +809,8 @@ static irqreturn_t vc4_crtc_irq_handler(int irq, void *data) irqreturn_t ret = IRQ_NONE; if (stat & PV_INT_VFP_START) { - vc4_crtc->t_vblank = ktime_get(); CRTC_WRITE(PV_INTSTAT, PV_INT_VFP_START); - drm_crtc_handle_vblank(&vc4_crtc->base); - vc4_crtc_handle_page_flip(vc4_crtc); + vc4_crtc_handle_vblank(vc4_crtc); ret = IRQ_HANDLED; } @@ -884,12 +963,15 @@ static int vc4_page_flip(struct drm_crtc *crtc, static struct drm_crtc_state *vc4_crtc_duplicate_state(struct drm_crtc *crtc) { - struct vc4_crtc_state *vc4_state; + struct vc4_crtc_state *vc4_state, *old_vc4_state; vc4_state = kzalloc(sizeof(*vc4_state), GFP_KERNEL); if (!vc4_state) return NULL; + old_vc4_state = to_vc4_crtc_state(crtc->state); + vc4_state->feed_txp = old_vc4_state->feed_txp; + __drm_atomic_helper_crtc_duplicate_state(crtc, &vc4_state->base); return &vc4_state->base; } @@ -987,9 +1069,17 @@ static void vc4_set_crtc_possible_masks(struct drm_device *drm, struct drm_encoder *encoder; drm_for_each_encoder(encoder, drm) { - struct vc4_encoder *vc4_encoder = to_vc4_encoder(encoder); + struct vc4_encoder *vc4_encoder; int i; + /* HVS FIFO2 can feed the TXP IP. */ + if (crtc_data->hvs_channel == 2 && + encoder->encoder_type == DRM_MODE_ENCODER_VIRTUAL) { + encoder->possible_crtcs |= drm_crtc_mask(crtc); + continue; + } + + vc4_encoder = to_vc4_encoder(encoder); for (i = 0; i < ARRAY_SIZE(crtc_data->encoder_types); i++) { if (vc4_encoder->type == encoder_types[i]) { vc4_encoder->clock_select = i; diff --git a/drivers/gpu/drm/vc4/vc4_debugfs.c b/drivers/gpu/drm/vc4/vc4_debugfs.c index 5db06bdb5f27..7a0003de71ab 100644 --- a/drivers/gpu/drm/vc4/vc4_debugfs.c +++ b/drivers/gpu/drm/vc4/vc4_debugfs.c @@ -21,6 +21,7 @@ static const struct drm_info_list vc4_debugfs_list[] = { {"dsi1_regs", vc4_dsi_debugfs_regs, 0, (void *)(uintptr_t)1}, {"hdmi_regs", vc4_hdmi_debugfs_regs, 0}, {"vec_regs", vc4_vec_debugfs_regs, 0}, + {"txp_regs", vc4_txp_debugfs_regs, 0}, {"hvs_regs", vc4_hvs_debugfs_regs, 0}, {"crtc0_regs", vc4_crtc_debugfs_regs, 0, (void *)(uintptr_t)0}, {"crtc1_regs", vc4_crtc_debugfs_regs, 0, (void *)(uintptr_t)1}, diff --git a/drivers/gpu/drm/vc4/vc4_drv.c b/drivers/gpu/drm/vc4/vc4_drv.c index 466d0a27b415..04270a14fcaa 100644 --- a/drivers/gpu/drm/vc4/vc4_drv.c +++ b/drivers/gpu/drm/vc4/vc4_drv.c @@ -288,7 +288,7 @@ static int vc4_drm_bind(struct device *dev) ret = vc4_bo_cache_init(drm); if (ret) - goto dev_unref; + goto dev_put; drm_mode_config_init(drm); @@ -313,8 +313,8 @@ unbind_all: gem_destroy: vc4_gem_destroy(drm); vc4_bo_cache_destroy(drm); -dev_unref: - drm_dev_unref(drm); +dev_put: + drm_dev_put(drm); return ret; } @@ -331,7 +331,7 @@ static void vc4_drm_unbind(struct device *dev) drm_atomic_private_obj_fini(&vc4->ctm_manager); - drm_dev_unref(drm); + drm_dev_put(drm); } static const struct component_master_ops vc4_drm_ops = { @@ -344,6 +344,7 @@ static struct platform_driver *const component_drivers[] = { &vc4_vec_driver, &vc4_dpi_driver, &vc4_dsi_driver, + &vc4_txp_driver, &vc4_hvs_driver, &vc4_crtc_driver, &vc4_v3d_driver, diff --git a/drivers/gpu/drm/vc4/vc4_drv.h b/drivers/gpu/drm/vc4/vc4_drv.h index eace76c621a1..bd6ef1f31822 100644 --- a/drivers/gpu/drm/vc4/vc4_drv.h +++ b/drivers/gpu/drm/vc4/vc4_drv.h @@ -73,6 +73,7 @@ struct vc4_dev { struct vc4_dpi *dpi; struct vc4_dsi *dsi1; struct vc4_vec *vec; + struct vc4_txp *txp; struct vc4_hang_state *hang_state; @@ -698,6 +699,8 @@ bool vc4_crtc_get_scanoutpos(struct drm_device *dev, unsigned int crtc_id, bool in_vblank_irq, int *vpos, int *hpos, ktime_t *stime, ktime_t *etime, const struct drm_display_mode *mode); +void vc4_crtc_handle_vblank(struct vc4_crtc *crtc); +void vc4_crtc_txp_armed(struct drm_crtc_state *state); /* vc4_debugfs.c */ int vc4_debugfs_init(struct drm_minor *minor); @@ -745,6 +748,10 @@ int vc4_hdmi_debugfs_regs(struct seq_file *m, void *unused); extern struct platform_driver vc4_vec_driver; int vc4_vec_debugfs_regs(struct seq_file *m, void *unused); +/* vc4_txp.c */ +extern struct platform_driver vc4_txp_driver; +int vc4_txp_debugfs_regs(struct seq_file *m, void *unused); + /* vc4_irq.c */ irqreturn_t vc4_irq(int irq, void *arg); void vc4_irq_preinstall(struct drm_device *dev); diff --git a/drivers/gpu/drm/vc4/vc4_dsi.c b/drivers/gpu/drm/vc4/vc4_dsi.c index 9c8e89372d1c..0c607eb33d7e 100644 --- a/drivers/gpu/drm/vc4/vc4_dsi.c +++ b/drivers/gpu/drm/vc4/vc4_dsi.c @@ -1612,8 +1612,18 @@ static int vc4_dsi_bind(struct device *dev, struct device *master, void *data) ret = drm_of_find_panel_or_bridge(dev->of_node, 0, 0, &panel, &dsi->bridge); - if (ret) + if (ret) { + /* If the bridge or panel pointed by dev->of_node is not + * enabled, just return 0 here so that we don't prevent the DRM + * dev from being registered. Of course that means the DSI + * encoder won't be exposed, but that's not a problem since + * nothing is connected to it. + */ + if (ret == -ENODEV) + return 0; + return ret; + } if (panel) { dsi->bridge = devm_drm_panel_bridge_add(dev, panel, @@ -1664,7 +1674,8 @@ static void vc4_dsi_unbind(struct device *dev, struct device *master, struct vc4_dev *vc4 = to_vc4_dev(drm); struct vc4_dsi *dsi = dev_get_drvdata(dev); - pm_runtime_disable(dev); + if (dsi->bridge) + pm_runtime_disable(dev); vc4_dsi_encoder_destroy(dsi->encoder); diff --git a/drivers/gpu/drm/vc4/vc4_hdmi.c b/drivers/gpu/drm/vc4/vc4_hdmi.c index b8d50533e2bb..fd5522fd179e 100644 --- a/drivers/gpu/drm/vc4/vc4_hdmi.c +++ b/drivers/gpu/drm/vc4/vc4_hdmi.c @@ -285,7 +285,7 @@ static int vc4_hdmi_connector_get_modes(struct drm_connector *connector) drm_rgb_quant_range_selectable(edid); } - drm_mode_connector_update_edid_property(connector, edid); + drm_connector_update_edid_property(connector, edid); ret = drm_add_edid_modes(connector, edid); kfree(edid); @@ -329,7 +329,7 @@ static struct drm_connector *vc4_hdmi_connector_init(struct drm_device *dev, connector->interlace_allowed = 1; connector->doublescan_allowed = 0; - drm_mode_connector_attach_encoder(connector, encoder); + drm_connector_attach_encoder(connector, encoder); return connector; } diff --git a/drivers/gpu/drm/vc4/vc4_kms.c b/drivers/gpu/drm/vc4/vc4_kms.c index 8a411e5f8776..ca5aa7fba769 100644 --- a/drivers/gpu/drm/vc4/vc4_kms.c +++ b/drivers/gpu/drm/vc4/vc4_kms.c @@ -153,18 +153,11 @@ vc4_atomic_complete_commit(struct drm_atomic_state *state) drm_atomic_helper_commit_modeset_enables(dev, state); - /* Make sure that drm_atomic_helper_wait_for_vblanks() - * actually waits for vblank. If we're doing a full atomic - * modeset (as opposed to a vc4_update_plane() short circuit), - * then we need to wait for scanout to be done with our - * display lists before we free it and potentially reallocate - * and overwrite the dlist memory with a new modeset. - */ - state->legacy_cursor_update = false; + drm_atomic_helper_fake_vblank(state); drm_atomic_helper_commit_hw_done(state); - drm_atomic_helper_wait_for_vblanks(dev, state); + drm_atomic_helper_wait_for_flip_done(dev, state); drm_atomic_helper_cleanup_planes(dev, state); diff --git a/drivers/gpu/drm/vc4/vc4_txp.c b/drivers/gpu/drm/vc4/vc4_txp.c new file mode 100644 index 000000000000..6e23c50168f9 --- /dev/null +++ b/drivers/gpu/drm/vc4/vc4_txp.c @@ -0,0 +1,477 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright © 2018 Broadcom + * + * Authors: + * Eric Anholt <eric@anholt.net> + * Boris Brezillon <boris.brezillon@bootlin.com> + */ + +#include <drm/drm_atomic_helper.h> +#include <drm/drm_fb_cma_helper.h> +#include <drm/drm_crtc_helper.h> +#include <drm/drm_edid.h> +#include <drm/drm_panel.h> +#include <drm/drm_writeback.h> +#include <linux/clk.h> +#include <linux/component.h> +#include <linux/of_graph.h> +#include <linux/of_platform.h> +#include <linux/pm_runtime.h> + +#include "vc4_drv.h" +#include "vc4_regs.h" + +/* Base address of the output. Raster formats must be 4-byte aligned, + * T and LT must be 16-byte aligned or maybe utile-aligned (docs are + * inconsistent, but probably utile). + */ +#define TXP_DST_PTR 0x00 + +/* Pitch in bytes for raster images, 16-byte aligned. For tiled, it's + * the width in tiles. + */ +#define TXP_DST_PITCH 0x04 +/* For T-tiled imgaes, DST_PITCH should be the number of tiles wide, + * shifted up. + */ +# define TXP_T_TILE_WIDTH_SHIFT 7 +/* For LT-tiled images, DST_PITCH should be the number of utiles wide, + * shifted up. + */ +# define TXP_LT_TILE_WIDTH_SHIFT 4 + +/* Pre-rotation width/height of the image. Must match HVS config. + * + * If TFORMAT and 32-bit, limit is 1920 for 32-bit and 3840 to 16-bit + * and width/height must be tile or utile-aligned as appropriate. If + * transposing (rotating), width is limited to 1920. + * + * Height is limited to various numbers between 4088 and 4095. I'd + * just use 4088 to be safe. + */ +#define TXP_DIM 0x08 +# define TXP_HEIGHT_SHIFT 16 +# define TXP_HEIGHT_MASK GENMASK(31, 16) +# define TXP_WIDTH_SHIFT 0 +# define TXP_WIDTH_MASK GENMASK(15, 0) + +#define TXP_DST_CTRL 0x0c +/* These bits are set to 0x54 */ +#define TXP_PILOT_SHIFT 24 +#define TXP_PILOT_MASK GENMASK(31, 24) +/* Bits 22-23 are set to 0x01 */ +#define TXP_VERSION_SHIFT 22 +#define TXP_VERSION_MASK GENMASK(23, 22) + +/* Powers down the internal memory. */ +# define TXP_POWERDOWN BIT(21) + +/* Enables storing the alpha component in 8888/4444, instead of + * filling with ~ALPHA_INVERT. + */ +# define TXP_ALPHA_ENABLE BIT(20) + +/* 4 bits, each enables stores for a channel in each set of 4 bytes. + * Set to 0xf for normal operation. + */ +# define TXP_BYTE_ENABLE_SHIFT 16 +# define TXP_BYTE_ENABLE_MASK GENMASK(19, 16) + +/* Debug: Generate VSTART again at EOF. */ +# define TXP_VSTART_AT_EOF BIT(15) + +/* Debug: Terminate the current frame immediately. Stops AXI + * writes. + */ +# define TXP_ABORT BIT(14) + +# define TXP_DITHER BIT(13) + +/* Inverts alpha if TXP_ALPHA_ENABLE, chooses fill value for + * !TXP_ALPHA_ENABLE. + */ +# define TXP_ALPHA_INVERT BIT(12) + +/* Note: I've listed the channels here in high bit (in byte 3/2/1) to + * low bit (in byte 0) order. + */ +# define TXP_FORMAT_SHIFT 8 +# define TXP_FORMAT_MASK GENMASK(11, 8) +# define TXP_FORMAT_ABGR4444 0 +# define TXP_FORMAT_ARGB4444 1 +# define TXP_FORMAT_BGRA4444 2 +# define TXP_FORMAT_RGBA4444 3 +# define TXP_FORMAT_BGR565 6 +# define TXP_FORMAT_RGB565 7 +/* 888s are non-rotated, raster-only */ +# define TXP_FORMAT_BGR888 8 +# define TXP_FORMAT_RGB888 9 +# define TXP_FORMAT_ABGR8888 12 +# define TXP_FORMAT_ARGB8888 13 +# define TXP_FORMAT_BGRA8888 14 +# define TXP_FORMAT_RGBA8888 15 + +/* If TFORMAT is set, generates LT instead of T format. */ +# define TXP_LINEAR_UTILE BIT(7) + +/* Rotate output by 90 degrees. */ +# define TXP_TRANSPOSE BIT(6) + +/* Generate a tiled format for V3D. */ +# define TXP_TFORMAT BIT(5) + +/* Generates some undefined test mode output. */ +# define TXP_TEST_MODE BIT(4) + +/* Request odd field from HVS. */ +# define TXP_FIELD BIT(3) + +/* Raise interrupt when idle. */ +# define TXP_EI BIT(2) + +/* Set when generating a frame, clears when idle. */ +# define TXP_BUSY BIT(1) + +/* Starts a frame. Self-clearing. */ +# define TXP_GO BIT(0) + +/* Number of lines received and committed to memory. */ +#define TXP_PROGRESS 0x10 + +#define TXP_READ(offset) readl(txp->regs + (offset)) +#define TXP_WRITE(offset, val) writel(val, txp->regs + (offset)) + +struct vc4_txp { + struct platform_device *pdev; + + struct drm_writeback_connector connector; + + void __iomem *regs; +}; + +static inline struct vc4_txp *encoder_to_vc4_txp(struct drm_encoder *encoder) +{ + return container_of(encoder, struct vc4_txp, connector.encoder); +} + +static inline struct vc4_txp *connector_to_vc4_txp(struct drm_connector *conn) +{ + return container_of(conn, struct vc4_txp, connector.base); +} + +#define TXP_REG(reg) { reg, #reg } +static const struct { + u32 reg; + const char *name; +} txp_regs[] = { + TXP_REG(TXP_DST_PTR), + TXP_REG(TXP_DST_PITCH), + TXP_REG(TXP_DIM), + TXP_REG(TXP_DST_CTRL), + TXP_REG(TXP_PROGRESS), +}; + +#ifdef CONFIG_DEBUG_FS +int vc4_txp_debugfs_regs(struct seq_file *m, void *unused) +{ + struct drm_info_node *node = (struct drm_info_node *)m->private; + struct drm_device *dev = node->minor->dev; + struct vc4_dev *vc4 = to_vc4_dev(dev); + struct vc4_txp *txp = vc4->txp; + int i; + + if (!txp) + return 0; + + for (i = 0; i < ARRAY_SIZE(txp_regs); i++) { + seq_printf(m, "%s (0x%04x): 0x%08x\n", + txp_regs[i].name, txp_regs[i].reg, + TXP_READ(txp_regs[i].reg)); + } + + return 0; +} +#endif + +static int vc4_txp_connector_get_modes(struct drm_connector *connector) +{ + struct drm_device *dev = connector->dev; + + return drm_add_modes_noedid(connector, dev->mode_config.max_width, + dev->mode_config.max_height); +} + +static enum drm_mode_status +vc4_txp_connector_mode_valid(struct drm_connector *connector, + struct drm_display_mode *mode) +{ + struct drm_device *dev = connector->dev; + struct drm_mode_config *mode_config = &dev->mode_config; + int w = mode->hdisplay, h = mode->vdisplay; + + if (w < mode_config->min_width || w > mode_config->max_width) + return MODE_BAD_HVALUE; + + if (h < mode_config->min_height || h > mode_config->max_height) + return MODE_BAD_VVALUE; + + return MODE_OK; +} + +static const u32 drm_fmts[] = { + DRM_FORMAT_RGB888, + DRM_FORMAT_BGR888, + DRM_FORMAT_XRGB8888, + DRM_FORMAT_XBGR8888, + DRM_FORMAT_ARGB8888, + DRM_FORMAT_ABGR8888, + DRM_FORMAT_RGBX8888, + DRM_FORMAT_BGRX8888, + DRM_FORMAT_RGBA8888, + DRM_FORMAT_BGRA8888, +}; + +static const u32 txp_fmts[] = { + TXP_FORMAT_RGB888, + TXP_FORMAT_BGR888, + TXP_FORMAT_ARGB8888, + TXP_FORMAT_ABGR8888, + TXP_FORMAT_ARGB8888, + TXP_FORMAT_ABGR8888, + TXP_FORMAT_RGBA8888, + TXP_FORMAT_BGRA8888, + TXP_FORMAT_RGBA8888, + TXP_FORMAT_BGRA8888, +}; + +static int vc4_txp_connector_atomic_check(struct drm_connector *conn, + struct drm_connector_state *conn_state) +{ + struct drm_crtc_state *crtc_state; + struct drm_gem_cma_object *gem; + struct drm_framebuffer *fb; + int i; + + if (!conn_state->writeback_job || !conn_state->writeback_job->fb) + return 0; + + crtc_state = drm_atomic_get_new_crtc_state(conn_state->state, + conn_state->crtc); + + fb = conn_state->writeback_job->fb; + if (fb->width != crtc_state->mode.hdisplay || + fb->height != crtc_state->mode.vdisplay) { + DRM_DEBUG_KMS("Invalid framebuffer size %ux%u\n", + fb->width, fb->height); + return -EINVAL; + } + + for (i = 0; i < ARRAY_SIZE(drm_fmts); i++) { + if (fb->format->format == drm_fmts[i]) + break; + } + + if (i == ARRAY_SIZE(drm_fmts)) + return -EINVAL; + + gem = drm_fb_cma_get_gem_obj(fb, 0); + + /* Pitch must be aligned on 16 bytes. */ + if (fb->pitches[0] & GENMASK(3, 0)) + return -EINVAL; + + vc4_crtc_txp_armed(crtc_state); + + return 0; +} + +static void vc4_txp_connector_atomic_commit(struct drm_connector *conn, + struct drm_connector_state *conn_state) +{ + struct vc4_txp *txp = connector_to_vc4_txp(conn); + struct drm_gem_cma_object *gem; + struct drm_display_mode *mode; + struct drm_framebuffer *fb; + u32 ctrl; + int i; + + if (WARN_ON(!conn_state->writeback_job || + !conn_state->writeback_job->fb)) + return; + + mode = &conn_state->crtc->state->adjusted_mode; + fb = conn_state->writeback_job->fb; + + for (i = 0; i < ARRAY_SIZE(drm_fmts); i++) { + if (fb->format->format == drm_fmts[i]) + break; + } + + if (WARN_ON(i == ARRAY_SIZE(drm_fmts))) + return; + + ctrl = TXP_GO | TXP_VSTART_AT_EOF | TXP_EI | + VC4_SET_FIELD(0xf, TXP_BYTE_ENABLE) | + VC4_SET_FIELD(txp_fmts[i], TXP_FORMAT); + + if (fb->format->has_alpha) + ctrl |= TXP_ALPHA_ENABLE; + + gem = drm_fb_cma_get_gem_obj(fb, 0); + TXP_WRITE(TXP_DST_PTR, gem->paddr + fb->offsets[0]); + TXP_WRITE(TXP_DST_PITCH, fb->pitches[0]); + TXP_WRITE(TXP_DIM, + VC4_SET_FIELD(mode->hdisplay, TXP_WIDTH) | + VC4_SET_FIELD(mode->vdisplay, TXP_HEIGHT)); + + TXP_WRITE(TXP_DST_CTRL, ctrl); + + drm_writeback_queue_job(&txp->connector, conn_state->writeback_job); +} + +static const struct drm_connector_helper_funcs vc4_txp_connector_helper_funcs = { + .get_modes = vc4_txp_connector_get_modes, + .mode_valid = vc4_txp_connector_mode_valid, + .atomic_check = vc4_txp_connector_atomic_check, + .atomic_commit = vc4_txp_connector_atomic_commit, +}; + +static enum drm_connector_status +vc4_txp_connector_detect(struct drm_connector *connector, bool force) +{ + return connector_status_connected; +} + +static void vc4_txp_connector_destroy(struct drm_connector *connector) +{ + drm_connector_unregister(connector); + drm_connector_cleanup(connector); +} + +static const struct drm_connector_funcs vc4_txp_connector_funcs = { + .detect = vc4_txp_connector_detect, + .fill_modes = drm_helper_probe_single_connector_modes, + .destroy = vc4_txp_connector_destroy, + .reset = drm_atomic_helper_connector_reset, + .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state, + .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, +}; + +static void vc4_txp_encoder_disable(struct drm_encoder *encoder) +{ + struct vc4_txp *txp = encoder_to_vc4_txp(encoder); + + if (TXP_READ(TXP_DST_CTRL) & TXP_BUSY) { + unsigned long timeout = jiffies + msecs_to_jiffies(1000); + + TXP_WRITE(TXP_DST_CTRL, TXP_ABORT); + + while (TXP_READ(TXP_DST_CTRL) & TXP_BUSY && + time_before(jiffies, timeout)) + ; + + WARN_ON(TXP_READ(TXP_DST_CTRL) & TXP_BUSY); + } + + TXP_WRITE(TXP_DST_CTRL, TXP_POWERDOWN); +} + +static const struct drm_encoder_helper_funcs vc4_txp_encoder_helper_funcs = { + .disable = vc4_txp_encoder_disable, +}; + +static irqreturn_t vc4_txp_interrupt(int irq, void *data) +{ + struct vc4_txp *txp = data; + + TXP_WRITE(TXP_DST_CTRL, TXP_READ(TXP_DST_CTRL) & ~TXP_EI); + vc4_crtc_handle_vblank(to_vc4_crtc(txp->connector.base.state->crtc)); + drm_writeback_signal_completion(&txp->connector, 0); + + return IRQ_HANDLED; +} + +static int vc4_txp_bind(struct device *dev, struct device *master, void *data) +{ + struct platform_device *pdev = to_platform_device(dev); + struct drm_device *drm = dev_get_drvdata(master); + struct vc4_dev *vc4 = to_vc4_dev(drm); + struct vc4_txp *txp; + int ret, irq; + + irq = platform_get_irq(pdev, 0); + if (irq < 0) + return irq; + + txp = devm_kzalloc(dev, sizeof(*txp), GFP_KERNEL); + if (!txp) + return -ENOMEM; + + txp->pdev = pdev; + + txp->regs = vc4_ioremap_regs(pdev, 0); + if (IS_ERR(txp->regs)) + return PTR_ERR(txp->regs); + + drm_connector_helper_add(&txp->connector.base, + &vc4_txp_connector_helper_funcs); + ret = drm_writeback_connector_init(drm, &txp->connector, + &vc4_txp_connector_funcs, + &vc4_txp_encoder_helper_funcs, + drm_fmts, ARRAY_SIZE(drm_fmts)); + if (ret) + return ret; + + ret = devm_request_irq(dev, irq, vc4_txp_interrupt, 0, + dev_name(dev), txp); + if (ret) + return ret; + + dev_set_drvdata(dev, txp); + vc4->txp = txp; + + return 0; +} + +static void vc4_txp_unbind(struct device *dev, struct device *master, + void *data) +{ + struct drm_device *drm = dev_get_drvdata(master); + struct vc4_dev *vc4 = to_vc4_dev(drm); + struct vc4_txp *txp = dev_get_drvdata(dev); + + vc4_txp_connector_destroy(&txp->connector.base); + + vc4->txp = NULL; +} + +static const struct component_ops vc4_txp_ops = { + .bind = vc4_txp_bind, + .unbind = vc4_txp_unbind, +}; + +static int vc4_txp_probe(struct platform_device *pdev) +{ + return component_add(&pdev->dev, &vc4_txp_ops); +} + +static int vc4_txp_remove(struct platform_device *pdev) +{ + component_del(&pdev->dev, &vc4_txp_ops); + return 0; +} + +static const struct of_device_id vc4_txp_dt_match[] = { + { .compatible = "brcm,bcm2835-txp" }, + { /* sentinel */ }, +}; + +struct platform_driver vc4_txp_driver = { + .probe = vc4_txp_probe, + .remove = vc4_txp_remove, + .driver = { + .name = "vc4_txp", + .of_match_table = vc4_txp_dt_match, + }, +}; diff --git a/drivers/gpu/drm/vc4/vc4_vec.c b/drivers/gpu/drm/vc4/vc4_vec.c index 3a9a302247a2..8e7facb6514e 100644 --- a/drivers/gpu/drm/vc4/vc4_vec.c +++ b/drivers/gpu/drm/vc4/vc4_vec.c @@ -404,7 +404,7 @@ static struct drm_connector *vc4_vec_connector_init(struct drm_device *dev, VC4_VEC_TV_MODE_NTSC); vec->tv_mode = &vc4_vec_tv_modes[VC4_VEC_TV_MODE_NTSC]; - drm_mode_connector_attach_encoder(connector, vec->encoder); + drm_connector_attach_encoder(connector, vec->encoder); return connector; } diff --git a/drivers/gpu/drm/virtio/virtgpu_display.c b/drivers/gpu/drm/virtio/virtgpu_display.c index ff9933e79416..25503b933599 100644 --- a/drivers/gpu/drm/virtio/virtgpu_display.c +++ b/drivers/gpu/drm/virtio/virtgpu_display.c @@ -292,7 +292,7 @@ static int vgdev_output_init(struct virtio_gpu_device *vgdev, int index) drm_encoder_helper_add(encoder, &virtio_gpu_enc_helper_funcs); encoder->possible_crtcs = 1 << index; - drm_mode_connector_attach_encoder(connector, encoder); + drm_connector_attach_encoder(connector, encoder); drm_connector_register(connector); return 0; } diff --git a/drivers/gpu/drm/vkms/Makefile b/drivers/gpu/drm/vkms/Makefile new file mode 100644 index 000000000000..986297da51bf --- /dev/null +++ b/drivers/gpu/drm/vkms/Makefile @@ -0,0 +1,3 @@ +vkms-y := vkms_drv.o vkms_plane.o vkms_output.o vkms_crtc.o vkms_gem.o + +obj-$(CONFIG_DRM_VKMS) += vkms.o diff --git a/drivers/gpu/drm/vkms/vkms_crtc.c b/drivers/gpu/drm/vkms/vkms_crtc.c new file mode 100644 index 000000000000..875fca662ac0 --- /dev/null +++ b/drivers/gpu/drm/vkms/vkms_crtc.c @@ -0,0 +1,130 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#include "vkms_drv.h" +#include <drm/drm_atomic_helper.h> +#include <drm/drm_crtc_helper.h> + +static enum hrtimer_restart vkms_vblank_simulate(struct hrtimer *timer) +{ + struct vkms_output *output = container_of(timer, struct vkms_output, + vblank_hrtimer); + struct drm_crtc *crtc = &output->crtc; + int ret_overrun; + bool ret; + + ret = drm_crtc_handle_vblank(crtc); + if (!ret) + DRM_ERROR("vkms failure on handling vblank"); + + ret_overrun = hrtimer_forward_now(&output->vblank_hrtimer, + output->period_ns); + + return HRTIMER_RESTART; +} + +static int vkms_enable_vblank(struct drm_crtc *crtc) +{ + struct drm_device *dev = crtc->dev; + unsigned int pipe = drm_crtc_index(crtc); + struct drm_vblank_crtc *vblank = &dev->vblank[pipe]; + struct vkms_output *out = drm_crtc_to_vkms_output(crtc); + + drm_calc_timestamping_constants(crtc, &crtc->mode); + + hrtimer_init(&out->vblank_hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); + out->vblank_hrtimer.function = &vkms_vblank_simulate; + out->period_ns = ktime_set(0, vblank->framedur_ns); + hrtimer_start(&out->vblank_hrtimer, out->period_ns, HRTIMER_MODE_REL); + + return 0; +} + +static void vkms_disable_vblank(struct drm_crtc *crtc) +{ + struct vkms_output *out = drm_crtc_to_vkms_output(crtc); + + hrtimer_cancel(&out->vblank_hrtimer); +} + +bool vkms_get_vblank_timestamp(struct drm_device *dev, unsigned int pipe, + int *max_error, ktime_t *vblank_time, + bool in_vblank_irq) +{ + struct vkms_device *vkmsdev = drm_device_to_vkms_device(dev); + struct vkms_output *output = &vkmsdev->output; + + *vblank_time = output->vblank_hrtimer.node.expires; + + return true; +} + +static const struct drm_crtc_funcs vkms_crtc_funcs = { + .set_config = drm_atomic_helper_set_config, + .destroy = drm_crtc_cleanup, + .page_flip = drm_atomic_helper_page_flip, + .reset = drm_atomic_helper_crtc_reset, + .atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state, + .atomic_destroy_state = drm_atomic_helper_crtc_destroy_state, + .enable_vblank = vkms_enable_vblank, + .disable_vblank = vkms_disable_vblank, +}; + +static void vkms_crtc_atomic_enable(struct drm_crtc *crtc, + struct drm_crtc_state *old_state) +{ + drm_crtc_vblank_on(crtc); +} + +static void vkms_crtc_atomic_disable(struct drm_crtc *crtc, + struct drm_crtc_state *old_state) +{ + drm_crtc_vblank_off(crtc); +} + +static void vkms_crtc_atomic_flush(struct drm_crtc *crtc, + struct drm_crtc_state *old_crtc_state) +{ + unsigned long flags; + + if (crtc->state->event) { + spin_lock_irqsave(&crtc->dev->event_lock, flags); + + if (drm_crtc_vblank_get(crtc) != 0) + drm_crtc_send_vblank_event(crtc, crtc->state->event); + else + drm_crtc_arm_vblank_event(crtc, crtc->state->event); + + spin_unlock_irqrestore(&crtc->dev->event_lock, flags); + + crtc->state->event = NULL; + } +} + +static const struct drm_crtc_helper_funcs vkms_crtc_helper_funcs = { + .atomic_flush = vkms_crtc_atomic_flush, + .atomic_enable = vkms_crtc_atomic_enable, + .atomic_disable = vkms_crtc_atomic_disable, +}; + +int vkms_crtc_init(struct drm_device *dev, struct drm_crtc *crtc, + struct drm_plane *primary, struct drm_plane *cursor) +{ + int ret; + + ret = drm_crtc_init_with_planes(dev, crtc, primary, cursor, + &vkms_crtc_funcs, NULL); + if (ret) { + DRM_ERROR("Failed to init CRTC\n"); + return ret; + } + + drm_crtc_helper_add(crtc, &vkms_crtc_helper_funcs); + + return ret; +} diff --git a/drivers/gpu/drm/vkms/vkms_drv.c b/drivers/gpu/drm/vkms/vkms_drv.c new file mode 100644 index 000000000000..37aa2ef33b21 --- /dev/null +++ b/drivers/gpu/drm/vkms/vkms_drv.c @@ -0,0 +1,155 @@ +/* + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#include <linux/module.h> +#include <drm/drm_gem.h> +#include <drm/drm_crtc_helper.h> +#include <drm/drm_atomic_helper.h> +#include <drm/drm_gem_framebuffer_helper.h> +#include <drm/drm_fb_helper.h> +#include "vkms_drv.h" + +#define DRIVER_NAME "vkms" +#define DRIVER_DESC "Virtual Kernel Mode Setting" +#define DRIVER_DATE "20180514" +#define DRIVER_MAJOR 1 +#define DRIVER_MINOR 0 + +static struct vkms_device *vkms_device; + +static const struct file_operations vkms_driver_fops = { + .owner = THIS_MODULE, + .open = drm_open, + .mmap = drm_gem_mmap, + .unlocked_ioctl = drm_ioctl, + .compat_ioctl = drm_compat_ioctl, + .poll = drm_poll, + .read = drm_read, + .llseek = no_llseek, + .release = drm_release, +}; + +static const struct vm_operations_struct vkms_gem_vm_ops = { + .fault = vkms_gem_fault, + .open = drm_gem_vm_open, + .close = drm_gem_vm_close, +}; + +static void vkms_release(struct drm_device *dev) +{ + struct vkms_device *vkms = container_of(dev, struct vkms_device, drm); + + platform_device_unregister(vkms->platform); + drm_mode_config_cleanup(&vkms->drm); + drm_dev_fini(&vkms->drm); +} + +static struct drm_driver vkms_driver = { + .driver_features = DRIVER_MODESET | DRIVER_ATOMIC | DRIVER_GEM, + .release = vkms_release, + .fops = &vkms_driver_fops, + .dumb_create = vkms_dumb_create, + .dumb_map_offset = vkms_dumb_map, + .gem_vm_ops = &vkms_gem_vm_ops, + .gem_free_object_unlocked = vkms_gem_free_object, + .get_vblank_timestamp = vkms_get_vblank_timestamp, + + .name = DRIVER_NAME, + .desc = DRIVER_DESC, + .date = DRIVER_DATE, + .major = DRIVER_MAJOR, + .minor = DRIVER_MINOR, +}; + +static const struct drm_mode_config_funcs vkms_mode_funcs = { + .fb_create = drm_gem_fb_create, + .atomic_check = drm_atomic_helper_check, + .atomic_commit = drm_atomic_helper_commit, +}; + +static int vkms_modeset_init(struct vkms_device *vkmsdev) +{ + struct drm_device *dev = &vkmsdev->drm; + + drm_mode_config_init(dev); + dev->mode_config.funcs = &vkms_mode_funcs; + dev->mode_config.min_width = XRES_MIN; + dev->mode_config.min_height = YRES_MIN; + dev->mode_config.max_width = XRES_MAX; + dev->mode_config.max_height = YRES_MAX; + + return vkms_output_init(vkmsdev); +} + +static int __init vkms_init(void) +{ + int ret; + + vkms_device = kzalloc(sizeof(*vkms_device), GFP_KERNEL); + if (!vkms_device) + return -ENOMEM; + + ret = drm_dev_init(&vkms_device->drm, &vkms_driver, NULL); + if (ret) + goto out_free; + + vkms_device->platform = + platform_device_register_simple(DRIVER_NAME, -1, NULL, 0); + if (IS_ERR(vkms_device->platform)) { + ret = PTR_ERR(vkms_device->platform); + goto out_fini; + } + + vkms_device->drm.irq_enabled = true; + + ret = drm_vblank_init(&vkms_device->drm, 1); + if (ret) { + DRM_ERROR("Failed to vblank\n"); + goto out_fini; + } + + ret = vkms_modeset_init(vkms_device); + if (ret) + goto out_unregister; + + ret = drm_dev_register(&vkms_device->drm, 0); + if (ret) + goto out_unregister; + + return 0; + +out_unregister: + platform_device_unregister(vkms_device->platform); + +out_fini: + drm_dev_fini(&vkms_device->drm); + +out_free: + kfree(vkms_device); + return ret; +} + +static void __exit vkms_exit(void) +{ + if (!vkms_device) { + DRM_INFO("vkms_device is NULL.\n"); + return; + } + + drm_dev_unregister(&vkms_device->drm); + drm_dev_put(&vkms_device->drm); + + kfree(vkms_device); +} + +module_init(vkms_init); +module_exit(vkms_exit); + +MODULE_AUTHOR("Haneen Mohammed <hamohammed.sa@gmail.com>"); +MODULE_AUTHOR("Rodrigo Siqueira <rodrigosiqueiramelo@gmail.com>"); +MODULE_DESCRIPTION(DRIVER_DESC); +MODULE_LICENSE("GPL"); diff --git a/drivers/gpu/drm/vkms/vkms_drv.h b/drivers/gpu/drm/vkms/vkms_drv.h new file mode 100644 index 000000000000..07be29f2dc44 --- /dev/null +++ b/drivers/gpu/drm/vkms/vkms_drv.h @@ -0,0 +1,78 @@ +#ifndef _VKMS_DRV_H_ +#define _VKMS_DRV_H_ + +#include <drm/drmP.h> +#include <drm/drm.h> +#include <drm/drm_gem.h> +#include <drm/drm_encoder.h> +#include <linux/hrtimer.h> + +#define XRES_MIN 32 +#define YRES_MIN 32 + +#define XRES_DEF 1024 +#define YRES_DEF 768 + +#define XRES_MAX 8192 +#define YRES_MAX 8192 + +static const u32 vkms_formats[] = { + DRM_FORMAT_XRGB8888, +}; + +struct vkms_output { + struct drm_crtc crtc; + struct drm_encoder encoder; + struct drm_connector connector; + struct hrtimer vblank_hrtimer; + ktime_t period_ns; + struct drm_pending_vblank_event *event; +}; + +struct vkms_device { + struct drm_device drm; + struct platform_device *platform; + struct vkms_output output; +}; + +struct vkms_gem_object { + struct drm_gem_object gem; + struct mutex pages_lock; /* Page lock used in page fault handler */ + struct page **pages; +}; + +#define drm_crtc_to_vkms_output(target) \ + container_of(target, struct vkms_output, crtc) + +#define drm_device_to_vkms_device(target) \ + container_of(target, struct vkms_device, drm) + +/* CRTC */ +int vkms_crtc_init(struct drm_device *dev, struct drm_crtc *crtc, + struct drm_plane *primary, struct drm_plane *cursor); + +bool vkms_get_vblank_timestamp(struct drm_device *dev, unsigned int pipe, + int *max_error, ktime_t *vblank_time, + bool in_vblank_irq); + +int vkms_output_init(struct vkms_device *vkmsdev); + +struct drm_plane *vkms_plane_init(struct vkms_device *vkmsdev); + +/* Gem stuff */ +struct drm_gem_object *vkms_gem_create(struct drm_device *dev, + struct drm_file *file, + u32 *handle, + u64 size); + +int vkms_gem_fault(struct vm_fault *vmf); + +int vkms_dumb_create(struct drm_file *file, struct drm_device *dev, + struct drm_mode_create_dumb *args); + +int vkms_dumb_map(struct drm_file *file, struct drm_device *dev, + u32 handle, u64 *offset); + +void vkms_gem_free_object(struct drm_gem_object *obj); + +#endif /* _VKMS_DRV_H_ */ diff --git a/drivers/gpu/drm/vkms/vkms_gem.c b/drivers/gpu/drm/vkms/vkms_gem.c new file mode 100644 index 000000000000..c7e38368602b --- /dev/null +++ b/drivers/gpu/drm/vkms/vkms_gem.c @@ -0,0 +1,179 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#include <linux/shmem_fs.h> + +#include "vkms_drv.h" + +static struct vkms_gem_object *__vkms_gem_create(struct drm_device *dev, + u64 size) +{ + struct vkms_gem_object *obj; + int ret; + + obj = kzalloc(sizeof(*obj), GFP_KERNEL); + if (!obj) + return ERR_PTR(-ENOMEM); + + size = roundup(size, PAGE_SIZE); + ret = drm_gem_object_init(dev, &obj->gem, size); + if (ret) { + kfree(obj); + return ERR_PTR(ret); + } + + mutex_init(&obj->pages_lock); + + return obj; +} + +void vkms_gem_free_object(struct drm_gem_object *obj) +{ + struct vkms_gem_object *gem = container_of(obj, struct vkms_gem_object, + gem); + + kvfree(gem->pages); + mutex_destroy(&gem->pages_lock); + drm_gem_object_release(obj); + kfree(gem); +} + +int vkms_gem_fault(struct vm_fault *vmf) +{ + struct vm_area_struct *vma = vmf->vma; + struct vkms_gem_object *obj = vma->vm_private_data; + unsigned long vaddr = vmf->address; + pgoff_t page_offset; + loff_t num_pages; + int ret; + + page_offset = (vaddr - vma->vm_start) >> PAGE_SHIFT; + num_pages = DIV_ROUND_UP(obj->gem.size, PAGE_SIZE); + + if (page_offset > num_pages) + return VM_FAULT_SIGBUS; + + ret = -ENOENT; + mutex_lock(&obj->pages_lock); + if (obj->pages) { + get_page(obj->pages[page_offset]); + vmf->page = obj->pages[page_offset]; + ret = 0; + } + mutex_unlock(&obj->pages_lock); + if (ret) { + struct page *page; + struct address_space *mapping; + + mapping = file_inode(obj->gem.filp)->i_mapping; + page = shmem_read_mapping_page(mapping, page_offset); + + if (!IS_ERR(page)) { + vmf->page = page; + ret = 0; + } else { + switch (PTR_ERR(page)) { + case -ENOSPC: + case -ENOMEM: + ret = VM_FAULT_OOM; + break; + case -EBUSY: + ret = VM_FAULT_RETRY; + break; + case -EFAULT: + case -EINVAL: + ret = VM_FAULT_SIGBUS; + break; + default: + WARN_ON(PTR_ERR(page)); + ret = VM_FAULT_SIGBUS; + break; + } + } + } + return ret; +} + +struct drm_gem_object *vkms_gem_create(struct drm_device *dev, + struct drm_file *file, + u32 *handle, + u64 size) +{ + struct vkms_gem_object *obj; + int ret; + + if (!file || !dev || !handle) + return ERR_PTR(-EINVAL); + + obj = __vkms_gem_create(dev, size); + if (IS_ERR(obj)) + return ERR_CAST(obj); + + ret = drm_gem_handle_create(file, &obj->gem, handle); + drm_gem_object_put_unlocked(&obj->gem); + if (ret) { + drm_gem_object_release(&obj->gem); + kfree(obj); + return ERR_PTR(ret); + } + + return &obj->gem; +} + +int vkms_dumb_create(struct drm_file *file, struct drm_device *dev, + struct drm_mode_create_dumb *args) +{ + struct drm_gem_object *gem_obj; + u64 pitch, size; + + if (!args || !dev || !file) + return -EINVAL; + + pitch = args->width * DIV_ROUND_UP(args->bpp, 8); + size = pitch * args->height; + + if (!size) + return -EINVAL; + + gem_obj = vkms_gem_create(dev, file, &args->handle, size); + if (IS_ERR(gem_obj)) + return PTR_ERR(gem_obj); + + args->size = gem_obj->size; + args->pitch = pitch; + + DRM_DEBUG_DRIVER("Created object of size %lld\n", size); + + return 0; +} + +int vkms_dumb_map(struct drm_file *file, struct drm_device *dev, + u32 handle, u64 *offset) +{ + struct drm_gem_object *obj; + int ret; + + obj = drm_gem_object_lookup(file, handle); + if (!obj) + return -ENOENT; + + if (!obj->filp) { + ret = -EINVAL; + goto unref; + } + + ret = drm_gem_create_mmap_offset(obj); + if (ret) + goto unref; + + *offset = drm_vma_node_offset_addr(&obj->vma_node); +unref: + drm_gem_object_put_unlocked(obj); + + return ret; +} diff --git a/drivers/gpu/drm/vkms/vkms_output.c b/drivers/gpu/drm/vkms/vkms_output.c new file mode 100644 index 000000000000..901012cb1af1 --- /dev/null +++ b/drivers/gpu/drm/vkms/vkms_output.c @@ -0,0 +1,111 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#include "vkms_drv.h" +#include <drm/drm_crtc_helper.h> +#include <drm/drm_atomic_helper.h> + +static void vkms_connector_destroy(struct drm_connector *connector) +{ + drm_connector_unregister(connector); + drm_connector_cleanup(connector); +} + +static const struct drm_connector_funcs vkms_connector_funcs = { + .fill_modes = drm_helper_probe_single_connector_modes, + .destroy = vkms_connector_destroy, + .reset = drm_atomic_helper_connector_reset, + .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state, + .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, +}; + +static const struct drm_encoder_funcs vkms_encoder_funcs = { + .destroy = drm_encoder_cleanup, +}; + +static int vkms_conn_get_modes(struct drm_connector *connector) +{ + int count; + + count = drm_add_modes_noedid(connector, XRES_MAX, YRES_MAX); + drm_set_preferred_mode(connector, XRES_DEF, YRES_DEF); + + return count; +} + +static const struct drm_connector_helper_funcs vkms_conn_helper_funcs = { + .get_modes = vkms_conn_get_modes, +}; + +int vkms_output_init(struct vkms_device *vkmsdev) +{ + struct vkms_output *output = &vkmsdev->output; + struct drm_device *dev = &vkmsdev->drm; + struct drm_connector *connector = &output->connector; + struct drm_encoder *encoder = &output->encoder; + struct drm_crtc *crtc = &output->crtc; + struct drm_plane *primary; + int ret; + + primary = vkms_plane_init(vkmsdev); + if (IS_ERR(primary)) + return PTR_ERR(primary); + + ret = vkms_crtc_init(dev, crtc, primary, NULL); + if (ret) + goto err_crtc; + + ret = drm_connector_init(dev, connector, &vkms_connector_funcs, + DRM_MODE_CONNECTOR_VIRTUAL); + if (ret) { + DRM_ERROR("Failed to init connector\n"); + goto err_connector; + } + + drm_connector_helper_add(connector, &vkms_conn_helper_funcs); + + ret = drm_connector_register(connector); + if (ret) { + DRM_ERROR("Failed to register connector\n"); + goto err_connector_register; + } + + ret = drm_encoder_init(dev, encoder, &vkms_encoder_funcs, + DRM_MODE_ENCODER_VIRTUAL, NULL); + if (ret) { + DRM_ERROR("Failed to init encoder\n"); + goto err_encoder; + } + encoder->possible_crtcs = 1; + + ret = drm_connector_attach_encoder(connector, encoder); + if (ret) { + DRM_ERROR("Failed to attach connector to encoder\n"); + goto err_attach; + } + + drm_mode_config_reset(dev); + + return 0; + +err_attach: + drm_encoder_cleanup(encoder); + +err_encoder: + drm_connector_unregister(connector); + +err_connector_register: + drm_connector_cleanup(connector); + +err_connector: + drm_crtc_cleanup(crtc); + +err_crtc: + drm_plane_cleanup(primary); + return ret; +} diff --git a/drivers/gpu/drm/vkms/vkms_plane.c b/drivers/gpu/drm/vkms/vkms_plane.c new file mode 100644 index 000000000000..9f75b1e2c1c4 --- /dev/null +++ b/drivers/gpu/drm/vkms/vkms_plane.c @@ -0,0 +1,57 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#include "vkms_drv.h" +#include <drm/drm_plane_helper.h> +#include <drm/drm_atomic_helper.h> + +static const struct drm_plane_funcs vkms_plane_funcs = { + .update_plane = drm_atomic_helper_update_plane, + .disable_plane = drm_atomic_helper_disable_plane, + .destroy = drm_plane_cleanup, + .reset = drm_atomic_helper_plane_reset, + .atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state, + .atomic_destroy_state = drm_atomic_helper_plane_destroy_state, +}; + +static void vkms_primary_plane_update(struct drm_plane *plane, + struct drm_plane_state *old_state) +{ +} + +static const struct drm_plane_helper_funcs vkms_primary_helper_funcs = { + .atomic_update = vkms_primary_plane_update, +}; + +struct drm_plane *vkms_plane_init(struct vkms_device *vkmsdev) +{ + struct drm_device *dev = &vkmsdev->drm; + struct drm_plane *plane; + const u32 *formats; + int ret, nformats; + + plane = kzalloc(sizeof(*plane), GFP_KERNEL); + if (!plane) + return ERR_PTR(-ENOMEM); + + formats = vkms_formats; + nformats = ARRAY_SIZE(vkms_formats); + + ret = drm_universal_plane_init(dev, plane, 0, + &vkms_plane_funcs, + formats, nformats, + NULL, DRM_PLANE_TYPE_PRIMARY, NULL); + if (ret) { + kfree(plane); + return ERR_PTR(ret); + } + + drm_plane_helper_add(plane, &vkms_primary_helper_funcs); + + return plane; +} diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c index 466336b34fff..23beff5d8e3c 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c @@ -2268,7 +2268,7 @@ int vmw_du_connector_fill_modes(struct drm_connector *connector, drm_mode_probed_add(connector, mode); } - drm_mode_connector_list_update(connector); + drm_connector_list_update(connector); /* Move the prefered mode first, help apps pick the right mode. */ drm_mode_sort(&connector->modes); diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c index 030d49c243e1..723578117191 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c @@ -438,7 +438,7 @@ static int vmw_ldu_init(struct vmw_private *dev_priv, unsigned unit) goto err_free_connector; } - (void) drm_mode_connector_attach_encoder(connector, encoder); + (void) drm_connector_attach_encoder(connector, encoder); encoder->possible_crtcs = (1 << unit); encoder->possible_clones = 0; diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c index 4eea456b9d4e..ad0de7f0cd60 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c @@ -703,7 +703,7 @@ static int vmw_sou_init(struct vmw_private *dev_priv, unsigned unit) goto err_free_connector; } - (void) drm_mode_connector_attach_encoder(connector, encoder); + (void) drm_connector_attach_encoder(connector, encoder); encoder->possible_crtcs = (1 << unit); encoder->possible_clones = 0; diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c index 8d13628e8a86..93f6b96ca7bb 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c @@ -1498,7 +1498,7 @@ static int vmw_stdu_init(struct vmw_private *dev_priv, unsigned unit) goto err_free_connector; } - (void) drm_mode_connector_attach_encoder(connector, encoder); + (void) drm_connector_attach_encoder(connector, encoder); encoder->possible_crtcs = (1 << unit); encoder->possible_clones = 0; diff --git a/drivers/gpu/drm/zte/zx_hdmi.c b/drivers/gpu/drm/zte/zx_hdmi.c index 13ea90f7a185..78655269d843 100644 --- a/drivers/gpu/drm/zte/zx_hdmi.c +++ b/drivers/gpu/drm/zte/zx_hdmi.c @@ -272,7 +272,7 @@ static int zx_hdmi_connector_get_modes(struct drm_connector *connector) hdmi->sink_is_hdmi = drm_detect_hdmi_monitor(edid); hdmi->sink_has_audio = drm_detect_monitor_audio(edid); - drm_mode_connector_update_edid_property(connector, edid); + drm_connector_update_edid_property(connector, edid); ret = drm_add_edid_modes(connector, edid); kfree(edid); @@ -326,7 +326,7 @@ static int zx_hdmi_register(struct drm_device *drm, struct zx_hdmi *hdmi) drm_connector_helper_add(&hdmi->connector, &zx_hdmi_connector_helper_funcs); - drm_mode_connector_attach_encoder(&hdmi->connector, encoder); + drm_connector_attach_encoder(&hdmi->connector, encoder); return 0; } diff --git a/drivers/gpu/drm/zte/zx_tvenc.c b/drivers/gpu/drm/zte/zx_tvenc.c index 0de1a71ca4e0..b73afb212fb2 100644 --- a/drivers/gpu/drm/zte/zx_tvenc.c +++ b/drivers/gpu/drm/zte/zx_tvenc.c @@ -297,7 +297,7 @@ static int zx_tvenc_register(struct drm_device *drm, struct zx_tvenc *tvenc) DRM_MODE_CONNECTOR_Composite); drm_connector_helper_add(connector, &zx_tvenc_connector_helper_funcs); - drm_mode_connector_attach_encoder(connector, encoder); + drm_connector_attach_encoder(connector, encoder); return 0; } diff --git a/drivers/gpu/drm/zte/zx_vga.c b/drivers/gpu/drm/zte/zx_vga.c index 3e7e33cd3dfa..23d1ff4355a0 100644 --- a/drivers/gpu/drm/zte/zx_vga.c +++ b/drivers/gpu/drm/zte/zx_vga.c @@ -109,7 +109,7 @@ static int zx_vga_connector_get_modes(struct drm_connector *connector) */ zx_writel(vga->mmio + VGA_AUTO_DETECT_SEL, VGA_DETECT_SEL_HAS_DEVICE); - drm_mode_connector_update_edid_property(connector, edid); + drm_connector_update_edid_property(connector, edid); ret = drm_add_edid_modes(connector, edid); kfree(edid); @@ -175,7 +175,7 @@ static int zx_vga_register(struct drm_device *drm, struct zx_vga *vga) drm_connector_helper_add(connector, &zx_vga_connector_helper_funcs); - ret = drm_mode_connector_attach_encoder(connector, encoder); + ret = drm_connector_attach_encoder(connector, encoder); if (ret) { DRM_DEV_ERROR(dev, "failed to attach encoder: %d\n", ret); goto clean_connector; diff --git a/drivers/staging/vboxvideo/vbox_mode.c b/drivers/staging/vboxvideo/vbox_mode.c index 5c7ea237893e..da4a93df8d75 100644 --- a/drivers/staging/vboxvideo/vbox_mode.c +++ b/drivers/staging/vboxvideo/vbox_mode.c @@ -504,7 +504,7 @@ static void vbox_set_edid(struct drm_connector *connector, int width, for (i = 0; i < EDID_SIZE - 1; ++i) sum += edid[i]; edid[EDID_SIZE - 1] = (0x100 - (sum & 0xFF)) & 0xFF; - drm_mode_connector_update_edid_property(connector, (struct edid *)edid); + drm_connector_update_edid_property(connector, (struct edid *)edid); } static int vbox_get_modes(struct drm_connector *connector) @@ -655,7 +655,7 @@ static int vbox_connector_init(struct drm_device *dev, dev->mode_config.suggested_y_property, 0); drm_connector_register(connector); - drm_mode_connector_attach_encoder(connector, encoder); + drm_connector_attach_encoder(connector, encoder); return 0; } diff --git a/include/drm/drmP.h b/include/drm/drmP.h index c5dfbdb7271d..f7a19c2a7a80 100644 --- a/include/drm/drmP.h +++ b/include/drm/drmP.h @@ -102,25 +102,6 @@ struct pci_controller; #define DRM_SWITCH_POWER_CHANGING 2 #define DRM_SWITCH_POWER_DYNAMIC_OFF 3 -static inline bool drm_core_check_feature(struct drm_device *dev, int feature) -{ - return dev->driver->driver_features & feature; -} - -/** - * drm_drv_uses_atomic_modeset - check if the driver implements - * atomic_commit() - * @dev: DRM device - * - * This check is useful if drivers do not have DRIVER_ATOMIC set but - * have atomic modesetting internally implemented. - */ -static inline bool drm_drv_uses_atomic_modeset(struct drm_device *dev) -{ - return drm_core_check_feature(dev, DRIVER_ATOMIC) || - dev->mode_config.funcs->atomic_commit != NULL; -} - /* returns true if currently okay to sleep */ static inline bool drm_can_sleep(void) { diff --git a/include/drm/drm_atomic_helper.h b/include/drm/drm_atomic_helper.h index 26aaba58d6ce..99e2a5297c69 100644 --- a/include/drm/drm_atomic_helper.h +++ b/include/drm/drm_atomic_helper.h @@ -100,6 +100,7 @@ int __must_check drm_atomic_helper_swap_state(struct drm_atomic_state *state, int drm_atomic_helper_setup_commit(struct drm_atomic_state *state, bool nonblock); void drm_atomic_helper_wait_for_dependencies(struct drm_atomic_state *state); +void drm_atomic_helper_fake_vblank(struct drm_atomic_state *state); void drm_atomic_helper_commit_hw_done(struct drm_atomic_state *state); void drm_atomic_helper_commit_cleanup_done(struct drm_atomic_state *state); diff --git a/include/drm/drm_client.h b/include/drm/drm_client.h new file mode 100644 index 000000000000..989f8e52864d --- /dev/null +++ b/include/drm/drm_client.h @@ -0,0 +1,139 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef _DRM_CLIENT_H_ +#define _DRM_CLIENT_H_ + +#include <linux/types.h> + +struct drm_client_dev; +struct drm_device; +struct drm_file; +struct drm_framebuffer; +struct drm_gem_object; +struct drm_minor; +struct module; + +/** + * struct drm_client_funcs - DRM client callbacks + */ +struct drm_client_funcs { + /** + * @owner: The module owner + */ + struct module *owner; + + /** + * @unregister: + * + * Called when &drm_device is unregistered. The client should respond by + * releasing it's resources using drm_client_release(). + * + * This callback is optional. + */ + void (*unregister)(struct drm_client_dev *client); + + /** + * @restore: + * + * Called on drm_lastclose(). The first client instance in the list that + * returns zero gets the privilege to restore and no more clients are + * called. This callback is not called after @unregister has been called. + * + * This callback is optional. + */ + int (*restore)(struct drm_client_dev *client); + + /** + * @hotplug: + * + * Called on drm_kms_helper_hotplug_event(). + * This callback is not called after @unregister has been called. + * + * This callback is optional. + */ + int (*hotplug)(struct drm_client_dev *client); +}; + +/** + * struct drm_client_dev - DRM client instance + */ +struct drm_client_dev { + /** + * @dev: DRM device + */ + struct drm_device *dev; + + /** + * @name: Name of the client. + */ + const char *name; + + /** + * @list: + * + * List of all clients of a DRM device, linked into + * &drm_device.clientlist. Protected by &drm_device.clientlist_mutex. + */ + struct list_head list; + + /** + * @funcs: DRM client functions (optional) + */ + const struct drm_client_funcs *funcs; + + /** + * @file: DRM file + */ + struct drm_file *file; +}; + +int drm_client_new(struct drm_device *dev, struct drm_client_dev *client, + const char *name, const struct drm_client_funcs *funcs); +void drm_client_release(struct drm_client_dev *client); + +void drm_client_dev_unregister(struct drm_device *dev); +void drm_client_dev_hotplug(struct drm_device *dev); +void drm_client_dev_restore(struct drm_device *dev); + +/** + * struct drm_client_buffer - DRM client buffer + */ +struct drm_client_buffer { + /** + * @client: DRM client + */ + struct drm_client_dev *client; + + /** + * @handle: Buffer handle + */ + u32 handle; + + /** + * @pitch: Buffer pitch + */ + u32 pitch; + + /** + * @gem: GEM object backing this buffer + */ + struct drm_gem_object *gem; + + /** + * @vaddr: Virtual address for the buffer + */ + void *vaddr; + + /** + * @fb: DRM framebuffer + */ + struct drm_framebuffer *fb; +}; + +struct drm_client_buffer * +drm_client_framebuffer_create(struct drm_client_dev *client, u32 width, u32 height, u32 format); +void drm_client_framebuffer_delete(struct drm_client_buffer *buffer); + +int drm_client_debugfs_init(struct drm_minor *minor); + +#endif diff --git a/include/drm/drm_connector.h b/include/drm/drm_connector.h index bf0f0f0786d3..97ea41dc678f 100644 --- a/include/drm/drm_connector.h +++ b/include/drm/drm_connector.h @@ -290,6 +290,10 @@ struct drm_display_info { #define DRM_BUS_FLAG_DATA_MSB_TO_LSB (1<<4) /* data is transmitted LSB to MSB on the bus */ #define DRM_BUS_FLAG_DATA_LSB_TO_MSB (1<<5) +/* drive sync on pos. edge */ +#define DRM_BUS_FLAG_SYNC_POSEDGE (1<<6) +/* drive sync on neg. edge */ +#define DRM_BUS_FLAG_SYNC_NEGEDGE (1<<7) /** * @bus_flags: Additional information (like pixel signal polarity) for @@ -374,12 +378,9 @@ struct drm_tv_connector_state { /** * struct drm_connector_state - mutable connector state - * @connector: backpointer to the connector - * @best_encoder: can be used by helpers and drivers to select the encoder - * @state: backpointer to global drm_atomic_state - * @tv: TV connector state */ struct drm_connector_state { + /** @connector: backpointer to the connector */ struct drm_connector *connector; /** @@ -390,6 +391,13 @@ struct drm_connector_state { */ struct drm_crtc *crtc; + /** + * @best_encoder: + * + * Used by the atomic helpers to select the encoder, through the + * &drm_connector_helper_funcs.atomic_best_encoder or + * &drm_connector_helper_funcs.best_encoder callbacks. + */ struct drm_encoder *best_encoder; /** @@ -398,6 +406,7 @@ struct drm_connector_state { */ enum drm_link_status link_status; + /** @state: backpointer to global drm_atomic_state */ struct drm_atomic_state *state; /** @@ -407,6 +416,7 @@ struct drm_connector_state { */ struct drm_crtc_commit *commit; + /** @tv: TV connector state */ struct drm_tv_connector_state tv; /** @@ -551,8 +561,7 @@ struct drm_connector_funcs { * received for this output connector->edid must be NULL. * * Drivers using the probe helpers should use - * drm_helper_probe_single_connector_modes() or - * drm_helper_probe_single_connector_modes_nomerge() to implement this + * drm_helper_probe_single_connector_modes() to implement this * function. * * RETURNS: @@ -763,45 +772,6 @@ struct drm_cmdline_mode { /** * struct drm_connector - central DRM connector control structure - * @dev: parent DRM device - * @kdev: kernel device for sysfs attributes - * @attr: sysfs attributes - * @head: list management - * @base: base KMS object - * @name: human readable name, can be overwritten by the driver - * @connector_type: one of the DRM_MODE_CONNECTOR_<foo> types from drm_mode.h - * @connector_type_id: index into connector type enum - * @interlace_allowed: can this connector handle interlaced modes? - * @doublescan_allowed: can this connector handle doublescan? - * @stereo_allowed: can this connector handle stereo modes? - * @funcs: connector control functions - * @edid_blob_ptr: DRM property containing EDID if present - * @properties: property tracking for this connector - * @dpms: current dpms state - * @helper_private: mid-layer private data - * @cmdline_mode: mode line parsed from the kernel cmdline for this connector - * @force: a DRM_FORCE_<foo> state for forced mode sets - * @override_edid: has the EDID been overwritten through debugfs for testing? - * @encoder_ids: valid encoders for this connector - * @eld: EDID-like data, if present - * @latency_present: AV delay info from ELD, if found - * @video_latency: video latency info from ELD, if found - * @audio_latency: audio latency info from ELD, if found - * @null_edid_counter: track sinks that give us all zeros for the EDID - * @bad_edid_counter: track sinks that give us an EDID with invalid checksum - * @edid_corrupt: indicates whether the last read EDID was corrupt - * @debugfs_entry: debugfs directory for this connector - * @has_tile: is this connector connected to a tiled monitor - * @tile_group: tile group for the connected monitor - * @tile_is_single_monitor: whether the tile is one monitor housing - * @num_h_tile: number of horizontal tiles in the tile group - * @num_v_tile: number of vertical tiles in the tile group - * @tile_h_loc: horizontal location of this tile - * @tile_v_loc: vertical location of this tile - * @tile_h_size: horizontal size of this tile. - * @tile_v_size: vertical size of this tile. - * @scaling_mode_property: Optional atomic property to control the upscaling. - * @content_protection_property: Optional property to control content protection * * Each connector may be connected to one or more CRTCs, or may be clonable by * another connector if they can share a CRTC. Each connector also has a specific @@ -809,13 +779,27 @@ struct drm_cmdline_mode { * span multiple monitors). */ struct drm_connector { + /** @dev: parent DRM device */ struct drm_device *dev; + /** @kdev: kernel device for sysfs attributes */ struct device *kdev; + /** @attr: sysfs attributes */ struct device_attribute *attr; + + /** + * @head: + * + * List of all connectors on a @dev, linked from + * &drm_mode_config.connector_list. Protected by + * &drm_mode_config.connector_list_lock, but please only use + * &drm_connector_list_iter to walk this list. + */ struct list_head head; + /** @base: base KMS object */ struct drm_mode_object base; + /** @name: human readable name, can be overwritten by the driver */ char *name; /** @@ -833,10 +817,30 @@ struct drm_connector { */ unsigned index; + /** + * @connector_type: + * one of the DRM_MODE_CONNECTOR_<foo> types from drm_mode.h + */ int connector_type; + /** @connector_type_id: index into connector type enum */ int connector_type_id; + /** + * @interlace_allowed: + * Can this connector handle interlaced modes? Only used by + * drm_helper_probe_single_connector_modes() for mode filtering. + */ bool interlace_allowed; + /** + * @doublescan_allowed: + * Can this connector handle doublescan? Only used by + * drm_helper_probe_single_connector_modes() for mode filtering. + */ bool doublescan_allowed; + /** + * @stereo_allowed: + * Can this connector handle stereo modes? Only used by + * drm_helper_probe_single_connector_modes() for mode filtering. + */ bool stereo_allowed; /** @@ -885,45 +889,42 @@ struct drm_connector { * Protected by &drm_mode_config.mutex. */ struct drm_display_info display_info; + + /** @funcs: connector control functions */ const struct drm_connector_funcs *funcs; + /** + * @edid_blob_ptr: DRM property containing EDID if present. Protected by + * &drm_mode_config.mutex. This should be updated only by calling + * drm_connector_update_edid_property(). + */ struct drm_property_blob *edid_blob_ptr; + + /** @properties: property tracking for this connector */ struct drm_object_properties properties; + /** + * @scaling_mode_property: Optional atomic property to control the + * upscaling. See drm_connector_attach_content_protection_property(). + */ struct drm_property *scaling_mode_property; /** * @content_protection_property: DRM ENUM property for content - * protection + * protection. See drm_connector_attach_content_protection_property(). */ struct drm_property *content_protection_property; /** * @path_blob_ptr: * - * DRM blob property data for the DP MST path property. + * DRM blob property data for the DP MST path property. This should only + * be updated by calling drm_connector_set_path_property(). */ struct drm_property_blob *path_blob_ptr; - /** - * @tile_blob_ptr: - * - * DRM blob property data for the tile property (used mostly by DP MST). - * This is meant for screens which are driven through separate display - * pipelines represented by &drm_crtc, which might not be running with - * genlocked clocks. For tiled panels which are genlocked, like - * dual-link LVDS or dual-link DSI, the driver should try to not expose - * the tiling and virtualize both &drm_crtc and &drm_plane if needed. - */ - struct drm_property_blob *tile_blob_ptr; - -/* should we poll this connector for connects and disconnects */ -/* hot plug detectable */ #define DRM_CONNECTOR_POLL_HPD (1 << 0) -/* poll for connections */ #define DRM_CONNECTOR_POLL_CONNECT (1 << 1) -/* can cleanly poll for disconnections without flickering the screen */ -/* DACs should rarely do this without a lot of testing */ #define DRM_CONNECTOR_POLL_DISCONNECT (1 << 2) /** @@ -940,25 +941,40 @@ struct drm_connector { * Periodically poll the connector for connection. * * DRM_CONNECTOR_POLL_DISCONNECT - * Periodically poll the connector for disconnection. + * Periodically poll the connector for disconnection, without + * causing flickering even when the connector is in use. DACs should + * rarely do this without a lot of testing. * * Set to 0 for connectors that don't support connection status * discovery. */ uint8_t polled; - /* requested DPMS state */ + /** + * @dpms: Current dpms state. For legacy drivers the + * &drm_connector_funcs.dpms callback must update this. For atomic + * drivers, this is handled by the core atomic code, and drivers must + * only take &drm_crtc_state.active into account. + */ int dpms; + /** @helper_private: mid-layer private data */ const struct drm_connector_helper_funcs *helper_private; - /* forced on connector */ + /** @cmdline_mode: mode line parsed from the kernel cmdline for this connector */ struct drm_cmdline_mode cmdline_mode; + /** @force: a DRM_FORCE_<foo> state for forced mode sets */ enum drm_connector_force force; + /** @override_edid: has the EDID been overwritten through debugfs for testing? */ bool override_edid; #define DRM_CONNECTOR_MAX_ENCODER 3 + /** + * @encoder_ids: Valid encoders for this connector. Please only use + * drm_connector_for_each_possible_encoder() to enumerate these. + */ uint32_t encoder_ids[DRM_CONNECTOR_MAX_ENCODER]; + /** * @encoder: Currently bound encoder driving this connector, if any. * Only really meaningful for non-atomic drivers. Atomic drivers should @@ -968,19 +984,37 @@ struct drm_connector { struct drm_encoder *encoder; #define MAX_ELD_BYTES 128 - /* EDID bits */ + /** @eld: EDID-like data, if present */ uint8_t eld[MAX_ELD_BYTES]; + /** @latency_present: AV delay info from ELD, if found */ bool latency_present[2]; - int video_latency[2]; /* [0]: progressive, [1]: interlaced */ + /** + * @video_latency: Video latency info from ELD, if found. + * [0]: progressive, [1]: interlaced + */ + int video_latency[2]; + /** + * @audio_latency: audio latency info from ELD, if found + * [0]: progressive, [1]: interlaced + */ int audio_latency[2]; - int null_edid_counter; /* needed to workaround some HW bugs where we get all 0s */ + /** + * @null_edid_counter: track sinks that give us all zeros for the EDID. + * Needed to workaround some HW bugs where we get all 0s + */ + int null_edid_counter; + + /** @bad_edid_counter: track sinks that give us an EDID with invalid checksum */ unsigned bad_edid_counter; - /* Flag for raw EDID header corruption - used in Displayport - * compliance testing - * Displayport Link CTS Core 1.2 rev1.1 4.2.2.6 + /** + * @edid_corrupt: Indicates whether the last read EDID was corrupt. Used + * in Displayport compliance testing - Displayport Link CTS Core 1.2 + * rev1.1 4.2.2.6 */ bool edid_corrupt; + /** @debugfs_entry: debugfs directory for this connector */ struct dentry *debugfs_entry; /** @@ -988,7 +1022,7 @@ struct drm_connector { * * Current atomic state for this connector. * - * This is protected by @drm_mode_config.connection_mutex. Note that + * This is protected by &drm_mode_config.connection_mutex. Note that * nonblocking atomic commits access the current connector state without * taking locks. Either by going through the &struct drm_atomic_state * pointers, see for_each_oldnew_connector_in_state(), @@ -999,19 +1033,44 @@ struct drm_connector { */ struct drm_connector_state *state; - /* DisplayID bits */ + /* DisplayID bits. FIXME: Extract into a substruct? */ + + /** + * @tile_blob_ptr: + * + * DRM blob property data for the tile property (used mostly by DP MST). + * This is meant for screens which are driven through separate display + * pipelines represented by &drm_crtc, which might not be running with + * genlocked clocks. For tiled panels which are genlocked, like + * dual-link LVDS or dual-link DSI, the driver should try to not expose + * the tiling and virtualize both &drm_crtc and &drm_plane if needed. + * + * This should only be updated by calling + * drm_connector_set_tile_property(). + */ + struct drm_property_blob *tile_blob_ptr; + + /** @has_tile: is this connector connected to a tiled monitor */ bool has_tile; + /** @tile_group: tile group for the connected monitor */ struct drm_tile_group *tile_group; + /** @tile_is_single_monitor: whether the tile is one monitor housing */ bool tile_is_single_monitor; + /** @num_h_tile: number of horizontal tiles in the tile group */ + /** @num_v_tile: number of vertical tiles in the tile group */ uint8_t num_h_tile, num_v_tile; + /** @tile_h_loc: horizontal location of this tile */ + /** @tile_v_loc: vertical location of this tile */ uint8_t tile_h_loc, tile_v_loc; + /** @tile_h_size: horizontal size of this tile. */ + /** @tile_v_size: vertical size of this tile. */ uint16_t tile_h_size, tile_v_size; /** * @free_node: * - * List used only by &drm_connector_iter to be able to clean up a + * List used only by &drm_connector_list_iter to be able to clean up a * connector from any context, in conjunction with * &drm_mode_config.connector_free_work. */ @@ -1026,7 +1085,7 @@ int drm_connector_init(struct drm_device *dev, int connector_type); int drm_connector_register(struct drm_connector *connector); void drm_connector_unregister(struct drm_connector *connector); -int drm_mode_connector_attach_encoder(struct drm_connector *connector, +int drm_connector_attach_encoder(struct drm_connector *connector, struct drm_encoder *encoder); void drm_connector_cleanup(struct drm_connector *connector); @@ -1132,13 +1191,13 @@ void drm_hdmi_avi_infoframe_content_type(struct hdmi_avi_infoframe *frame, int drm_mode_create_suggested_offset_properties(struct drm_device *dev); -int drm_mode_connector_set_path_property(struct drm_connector *connector, - const char *path); -int drm_mode_connector_set_tile_property(struct drm_connector *connector); -int drm_mode_connector_update_edid_property(struct drm_connector *connector, - const struct edid *edid); -void drm_mode_connector_set_link_status_property(struct drm_connector *connector, - uint64_t link_status); +int drm_connector_set_path_property(struct drm_connector *connector, + const char *path); +int drm_connector_set_tile_property(struct drm_connector *connector); +int drm_connector_update_edid_property(struct drm_connector *connector, + const struct edid *edid); +void drm_connector_set_link_status_property(struct drm_connector *connector, + uint64_t link_status); int drm_connector_init_panel_orientation_property( struct drm_connector *connector, int width, int height); @@ -1187,6 +1246,9 @@ struct drm_connector * drm_connector_list_iter_next(struct drm_connector_list_iter *iter); void drm_connector_list_iter_end(struct drm_connector_list_iter *iter); +bool drm_connector_has_possible_encoder(struct drm_connector *connector, + struct drm_encoder *encoder); + /** * drm_for_each_connector_iter - connector_list iterator macro * @connector: &struct drm_connector pointer used as cursor @@ -1199,4 +1261,17 @@ void drm_connector_list_iter_end(struct drm_connector_list_iter *iter); #define drm_for_each_connector_iter(connector, iter) \ while ((connector = drm_connector_list_iter_next(iter))) +/** + * drm_connector_for_each_possible_encoder - iterate connector's possible encoders + * @connector: &struct drm_connector pointer + * @encoder: &struct drm_encoder pointer used as cursor + * @__i: int iteration cursor, for macro-internal use + */ +#define drm_connector_for_each_possible_encoder(connector, encoder, __i) \ + for ((__i) = 0; (__i) < ARRAY_SIZE((connector)->encoder_ids) && \ + (connector)->encoder_ids[(__i)] != 0; (__i)++) \ + for_each_if((encoder) = \ + drm_encoder_find((connector)->dev, NULL, \ + (connector)->encoder_ids[(__i)])) \ + #endif diff --git a/include/drm/drm_crtc.h b/include/drm/drm_crtc.h index 23eddbccab10..92e7fc7f05a4 100644 --- a/include/drm/drm_crtc.h +++ b/include/drm/drm_crtc.h @@ -77,21 +77,6 @@ struct drm_plane_helper_funcs; /** * struct drm_crtc_state - mutable CRTC state - * @crtc: backpointer to the CRTC - * @enable: whether the CRTC should be enabled, gates all other state - * @active: whether the CRTC is actively displaying (used for DPMS) - * @planes_changed: planes on this crtc are updated - * @mode_changed: @mode or @enable has been changed - * @active_changed: @active has been toggled. - * @connectors_changed: connectors to this crtc have been updated - * @zpos_changed: zpos values of planes on this crtc have been updated - * @color_mgmt_changed: color management properties have changed (degamma or - * gamma LUT or CSC matrix) - * @plane_mask: bitmask of (1 << drm_plane_index(plane)) of attached planes - * @connector_mask: bitmask of (1 << drm_connector_index(connector)) of attached connectors - * @encoder_mask: bitmask of (1 << drm_encoder_index(encoder)) of attached encoders - * @mode_blob: &drm_property_blob for @mode - * @state: backpointer to global drm_atomic_state * * Note that the distinction between @enable and @active is rather subtile: * Flipping @active while @enable is set without changing anything else may @@ -102,31 +87,127 @@ struct drm_plane_helper_funcs; * * The three booleans active_changed, connectors_changed and mode_changed are * intended to indicate whether a full modeset is needed, rather than strictly - * describing what has changed in a commit. - * See also: drm_atomic_crtc_needs_modeset() + * describing what has changed in a commit. See also: + * drm_atomic_crtc_needs_modeset() + * + * WARNING: Transitional helpers (like drm_helper_crtc_mode_set() or + * drm_helper_crtc_mode_set_base()) do not maintain many of the derived control + * state like @plane_mask so drivers not converted over to atomic helpers should + * not rely on these being accurate! */ struct drm_crtc_state { + /** @crtc: backpointer to the CRTC */ struct drm_crtc *crtc; + /** + * @enable: Whether the CRTC should be enabled, gates all other state. + * This controls reservations of shared resources. Actual hardware state + * is controlled by @active. + */ bool enable; + + /** + * @active: Whether the CRTC is actively displaying (used for DPMS). + * Implies that @enable is set. The driver must not release any shared + * resources if @active is set to false but @enable still true, because + * userspace expects that a DPMS ON always succeeds. + * + * Hence drivers must not consult @active in their various + * &drm_mode_config_funcs.atomic_check callback to reject an atomic + * commit. They can consult it to aid in the computation of derived + * hardware state, since even in the DPMS OFF state the display hardware + * should be as much powered down as when the CRTC is completely + * disabled through setting @enable to false. + */ bool active; - /* computed state bits used by helpers and drivers */ + /** + * @planes_changed: Planes on this crtc are updated. Used by the atomic + * helpers and drivers to steer the atomic commit control flow. + */ bool planes_changed : 1; + + /** + * @mode_changed: @mode or @enable has been changed. Used by the atomic + * helpers and drivers to steer the atomic commit control flow. See also + * drm_atomic_crtc_needs_modeset(). + * + * Drivers are supposed to set this for any CRTC state changes that + * require a full modeset. They can also reset it to false if e.g. a + * @mode change can be done without a full modeset by only changing + * scaler settings. + */ bool mode_changed : 1; + + /** + * @active_changed: @active has been toggled. Used by the atomic + * helpers and drivers to steer the atomic commit control flow. See also + * drm_atomic_crtc_needs_modeset(). + */ bool active_changed : 1; + + /** + * @connectors_changed: Connectors to this crtc have been updated, + * either in their state or routing. Used by the atomic + * helpers and drivers to steer the atomic commit control flow. See also + * drm_atomic_crtc_needs_modeset(). + * + * Drivers are supposed to set this as-needed from their own atomic + * check code, e.g. from &drm_encoder_helper_funcs.atomic_check + */ bool connectors_changed : 1; + /** + * @zpos_changed: zpos values of planes on this crtc have been updated. + * Used by the atomic helpers and drivers to steer the atomic commit + * control flow. + */ bool zpos_changed : 1; + /** + * @color_mgmt_changed: Color management properties have changed + * (@gamma_lut, @degamma_lut or @ctm). Used by the atomic helpers and + * drivers to steer the atomic commit control flow. + */ bool color_mgmt_changed : 1; - /* attached planes bitmask: - * WARNING: transitional helpers do not maintain plane_mask so - * drivers not converted over to atomic helpers should not rely - * on plane_mask being accurate! + /** + * @no_vblank: + * + * Reflects the ability of a CRTC to send VBLANK events. This state + * usually depends on the pipeline configuration, and the main usuage + * is CRTCs feeding a writeback connector operating in oneshot mode. + * In this case the VBLANK event is only generated when a job is queued + * to the writeback connector, and we want the core to fake VBLANK + * events when this part of the pipeline hasn't changed but others had + * or when the CRTC and connectors are being disabled. + * + * __drm_atomic_helper_crtc_duplicate_state() will not reset the value + * from the current state, the CRTC driver is then responsible for + * updating this field when needed. + * + * Note that the combination of &drm_crtc_state.event == NULL and + * &drm_crtc_state.no_blank == true is valid and usually used when the + * writeback connector attached to the CRTC has a new job queued. In + * this case the driver will send the VBLANK event on its own when the + * writeback job is complete. + */ + bool no_vblank : 1; + + /** + * @plane_mask: Bitmask of drm_plane_mask(plane) of planes attached to + * this CRTC. */ u32 plane_mask; + /** + * @connector_mask: Bitmask of drm_connector_mask(connector) of + * connectors attached to this CRTC. + */ u32 connector_mask; + + /** + * @encoder_mask: Bitmask of drm_encoder_mask(encoder) of encoders + * attached to this CRTC. + */ u32 encoder_mask; /** @@ -136,7 +217,7 @@ struct drm_crtc_state { * differences between the mode requested by userspace in @mode and what * is actually programmed into the hardware. * - * For drivers using drm_bridge, this stores hardware display timings + * For drivers using &drm_bridge, this stores hardware display timings * used between the CRTC and the first bridge. For other drivers, the * meaning of the adjusted_mode field is purely driver implementation * defined information, and will usually be used to store the hardware @@ -161,7 +242,10 @@ struct drm_crtc_state { */ struct drm_display_mode mode; - /* blob property to expose current mode to atomic userspace */ + /** + * @mode_blob: &drm_property_blob for @mode, for exposing the mode to + * atomic userspace. + */ struct drm_property_blob *mode_blob; /** @@ -265,6 +349,7 @@ struct drm_crtc_state { */ struct drm_crtc_commit *commit; + /** @state: backpointer to global drm_atomic_state */ struct drm_atomic_state *state; }; @@ -724,35 +809,25 @@ struct drm_crtc_funcs { /** * struct drm_crtc - central CRTC control structure - * @dev: parent DRM device - * @port: OF node used by drm_of_find_possible_crtcs() - * @head: list management - * @name: human readable name, can be overwritten by the driver - * @mutex: per-CRTC locking - * @base: base KMS object for ID tracking etc. - * @primary: primary plane for this CRTC - * @cursor: cursor plane for this CRTC - * @cursor_x: current x position of the cursor, used for universal cursor planes - * @cursor_y: current y position of the cursor, used for universal cursor planes - * @enabled: is this CRTC enabled? - * @mode: current mode timings - * @hwmode: mode timings as programmed to hw regs - * @x: x position on screen - * @y: y position on screen - * @funcs: CRTC control functions - * @gamma_size: size of gamma ramp - * @gamma_store: gamma ramp values - * @helper_private: mid-layer private data - * @properties: property tracking for this CRTC * * Each CRTC may have one or more connectors associated with it. This structure * allows the CRTC to be controlled. */ struct drm_crtc { + /** @dev: parent DRM device */ struct drm_device *dev; + /** @port: OF node used by drm_of_find_possible_crtcs(). */ struct device_node *port; + /** + * @head: + * + * List of all CRTCs on @dev, linked from &drm_mode_config.crtc_list. + * Invariant over the lifetime of @dev and therefore does not need + * locking. + */ struct list_head head; + /** @name: human readable name, can be overwritten by the driver */ char *name; /** @@ -767,10 +842,25 @@ struct drm_crtc { */ struct drm_modeset_lock mutex; + /** @base: base KMS object for ID tracking etc. */ struct drm_mode_object base; - /* primary and cursor planes for CRTC */ + /** + * @primary: + * Primary plane for this CRTC. Note that this is only + * relevant for legacy IOCTL, it specifies the plane implicitly used by + * the SETCRTC and PAGE_FLIP IOCTLs. It does not have any significance + * beyond that. + */ struct drm_plane *primary; + + /** + * @cursor: + * Cursor plane for this CRTC. Note that this is only relevant for + * legacy IOCTL, it specifies the plane implicitly used by the SETCURSOR + * and SETCURSOR2 IOCTLs. It does not have any significance + * beyond that. + */ struct drm_plane *cursor; /** @@ -779,30 +869,94 @@ struct drm_crtc { */ unsigned index; - /* position of cursor plane on crtc */ + /** + * @cursor_x: Current x position of the cursor, used for universal + * cursor planes because the SETCURSOR IOCTL only can update the + * framebuffer without supplying the coordinates. Drivers should not use + * this directly, atomic drivers should look at &drm_plane_state.crtc_x + * of the cursor plane instead. + */ int cursor_x; + /** + * @cursor_y: Current y position of the cursor, used for universal + * cursor planes because the SETCURSOR IOCTL only can update the + * framebuffer without supplying the coordinates. Drivers should not use + * this directly, atomic drivers should look at &drm_plane_state.crtc_y + * of the cursor plane instead. + */ int cursor_y; + /** + * @enabled: + * + * Is this CRTC enabled? Should only be used by legacy drivers, atomic + * drivers should instead consult &drm_crtc_state.enable and + * &drm_crtc_state.active. Atomic drivers can update this by calling + * drm_atomic_helper_update_legacy_modeset_state(). + */ bool enabled; - /* Requested mode from modesetting. */ + /** + * @mode: + * + * Current mode timings. Should only be used by legacy drivers, atomic + * drivers should instead consult &drm_crtc_state.mode. Atomic drivers + * can update this by calling + * drm_atomic_helper_update_legacy_modeset_state(). + */ struct drm_display_mode mode; - /* Programmed mode in hw, after adjustments for encoders, - * crtc, panel scaling etc. Needed for timestamping etc. + /** + * @hwmode: + * + * Programmed mode in hw, after adjustments for encoders, crtc, panel + * scaling etc. Should only be used by legacy drivers, for high + * precision vblank timestamps in + * drm_calc_vbltimestamp_from_scanoutpos(). + * + * Note that atomic drivers should not use this, but instead use + * &drm_crtc_state.adjusted_mode. And for high-precision timestamps + * drm_calc_vbltimestamp_from_scanoutpos() used &drm_vblank_crtc.hwmode, + * which is filled out by calling drm_calc_timestamping_constants(). */ struct drm_display_mode hwmode; - int x, y; + /** + * @x: + * x position on screen. Should only be used by legacy drivers, atomic + * drivers should look at &drm_plane_state.crtc_x of the primary plane + * instead. Updated by calling + * drm_atomic_helper_update_legacy_modeset_state(). + */ + int x; + /** + * @y: + * y position on screen. Should only be used by legacy drivers, atomic + * drivers should look at &drm_plane_state.crtc_y of the primary plane + * instead. Updated by calling + * drm_atomic_helper_update_legacy_modeset_state(). + */ + int y; + + /** @funcs: CRTC control functions */ const struct drm_crtc_funcs *funcs; - /* Legacy FB CRTC gamma size for reporting to userspace */ + /** + * @gamma_size: Size of legacy gamma ramp reported to userspace. Set up + * by calling drm_mode_crtc_set_gamma_size(). + */ uint32_t gamma_size; + + /** + * @gamma_store: Gamma ramp values used by the legacy SETGAMMA and + * GETGAMMA IOCTls. Set up by calling drm_mode_crtc_set_gamma_size(). + */ uint16_t *gamma_store; - /* if you are using the helper */ + /** @helper_private: mid-layer private data */ const struct drm_crtc_helper_funcs *helper_private; + /** @properties: property tracking for this CRTC */ struct drm_object_properties properties; /** @@ -872,7 +1026,6 @@ struct drm_crtc { * * spinlock to protect the fences in the fence_context. */ - spinlock_t fence_lock; /** * @fence_seqno: @@ -942,8 +1095,8 @@ static inline unsigned int drm_crtc_index(const struct drm_crtc *crtc) * drm_crtc_mask - find the mask of a registered CRTC * @crtc: CRTC to find mask for * - * Given a registered CRTC, return the mask bit of that CRTC for an - * encoder's possible_crtcs field. + * Given a registered CRTC, return the mask bit of that CRTC for the + * &drm_encoder.possible_crtcs and &drm_plane.possible_crtcs fields. */ static inline uint32_t drm_crtc_mask(const struct drm_crtc *crtc) { diff --git a/include/drm/drm_debugfs_crc.h b/include/drm/drm_debugfs_crc.h index 7d63b1d4adb9..b225eeb30d05 100644 --- a/include/drm/drm_debugfs_crc.h +++ b/include/drm/drm_debugfs_crc.h @@ -43,6 +43,7 @@ struct drm_crtc_crc_entry { * @lock: protects the fields in this struct * @source: name of the currently configured source of CRCs * @opened: whether userspace has opened the data file for reading + * @overflow: whether an overflow occured. * @entries: array of entries, with size of %DRM_CRC_ENTRIES_NR * @head: head of circular queue * @tail: tail of circular queue @@ -52,7 +53,7 @@ struct drm_crtc_crc_entry { struct drm_crtc_crc { spinlock_t lock; const char *source; - bool opened; + bool opened, overflow; struct drm_crtc_crc_entry *entries; int head, tail; size_t values_cnt; diff --git a/include/drm/drm_device.h b/include/drm/drm_device.h index 858ba19a3e29..f9c6e0e3aec7 100644 --- a/include/drm/drm_device.h +++ b/include/drm/drm_device.h @@ -74,6 +74,27 @@ struct drm_device { struct mutex filelist_mutex; struct list_head filelist; + /** + * @filelist_internal: + * + * List of open DRM files for in-kernel clients. Protected by @filelist_mutex. + */ + struct list_head filelist_internal; + + /** + * @clientlist_mutex: + * + * Protects @clientlist access. + */ + struct mutex clientlist_mutex; + + /** + * @clientlist: + * + * List of in-kernel clients. Protected by @clientlist_mutex. + */ + struct list_head clientlist; + /** \name Memory management */ /*@{ */ struct list_head maplist; /**< Linked list of regions */ diff --git a/include/drm/drm_dp_helper.h b/include/drm/drm_dp_helper.h index c01564991a9f..05cc31b5db16 100644 --- a/include/drm/drm_dp_helper.h +++ b/include/drm/drm_dp_helper.h @@ -1078,6 +1078,25 @@ struct drm_dp_aux_msg { size_t size; }; +struct cec_adapter; +struct edid; + +/** + * struct drm_dp_aux_cec - DisplayPort CEC-Tunneling-over-AUX + * @lock: mutex protecting this struct + * @adap: the CEC adapter for CEC-Tunneling-over-AUX support. + * @name: name of the CEC adapter + * @parent: parent device of the CEC adapter + * @unregister_work: unregister the CEC adapter + */ +struct drm_dp_aux_cec { + struct mutex lock; + struct cec_adapter *adap; + const char *name; + struct device *parent; + struct delayed_work unregister_work; +}; + /** * struct drm_dp_aux - DisplayPort AUX channel * @name: user-visible name of this AUX channel and the I2C-over-AUX adapter @@ -1136,6 +1155,10 @@ struct drm_dp_aux { * @i2c_defer_count: Counts I2C DEFERs, used for DP validation. */ unsigned i2c_defer_count; + /** + * @cec: struct containing fields used for CEC-Tunneling-over-AUX. + */ + struct drm_dp_aux_cec cec; }; ssize_t drm_dp_dpcd_read(struct drm_dp_aux *aux, unsigned int offset, @@ -1258,4 +1281,37 @@ drm_dp_has_quirk(const struct drm_dp_desc *desc, enum drm_dp_quirk quirk) return desc->quirks & BIT(quirk); } +#ifdef CONFIG_DRM_DP_CEC +void drm_dp_cec_irq(struct drm_dp_aux *aux); +void drm_dp_cec_register_connector(struct drm_dp_aux *aux, const char *name, + struct device *parent); +void drm_dp_cec_unregister_connector(struct drm_dp_aux *aux); +void drm_dp_cec_set_edid(struct drm_dp_aux *aux, const struct edid *edid); +void drm_dp_cec_unset_edid(struct drm_dp_aux *aux); +#else +static inline void drm_dp_cec_irq(struct drm_dp_aux *aux) +{ +} + +static inline void drm_dp_cec_register_connector(struct drm_dp_aux *aux, + const char *name, + struct device *parent) +{ +} + +static inline void drm_dp_cec_unregister_connector(struct drm_dp_aux *aux) +{ +} + +static inline void drm_dp_cec_set_edid(struct drm_dp_aux *aux, + const struct edid *edid) +{ +} + +static inline void drm_dp_cec_unset_edid(struct drm_dp_aux *aux) +{ +} + +#endif + #endif /* _DRM_DP_HELPER_H_ */ diff --git a/include/drm/drm_drv.h b/include/drm/drm_drv.h index 7e545f5f94d3..46a8009784df 100644 --- a/include/drm/drm_drv.h +++ b/include/drm/drm_drv.h @@ -649,6 +649,35 @@ static inline bool drm_dev_is_unplugged(struct drm_device *dev) return true; } +/** + * drm_core_check_feature - check driver feature flags + * @dev: DRM device to check + * @feature: feature flag + * + * This checks @dev for driver features, see &drm_driver.driver_features and the + * various DRIVER_\* flags. + * + * Returns true if the @feature is supported, false otherwise. + */ +static inline bool drm_core_check_feature(struct drm_device *dev, int feature) +{ + return dev->driver->driver_features & feature; +} + +/** + * drm_drv_uses_atomic_modeset - check if the driver implements + * atomic_commit() + * @dev: DRM device + * + * This check is useful if drivers do not have DRIVER_ATOMIC set but + * have atomic modesetting internally implemented. + */ +static inline bool drm_drv_uses_atomic_modeset(struct drm_device *dev) +{ + return drm_core_check_feature(dev, DRIVER_ATOMIC) || + dev->mode_config.funcs->atomic_commit != NULL; +} + int drm_dev_set_unique(struct drm_device *dev, const char *name); diff --git a/include/drm/drm_fb_cma_helper.h b/include/drm/drm_fb_cma_helper.h index d532f88a8d55..96e26e3b9a0c 100644 --- a/include/drm/drm_fb_cma_helper.h +++ b/include/drm/drm_fb_cma_helper.h @@ -16,16 +16,10 @@ struct drm_mode_fb_cmd2; struct drm_plane; struct drm_plane_state; -int drm_fb_cma_fbdev_init_with_funcs(struct drm_device *dev, - unsigned int preferred_bpp, unsigned int max_conn_count, - const struct drm_framebuffer_funcs *funcs); int drm_fb_cma_fbdev_init(struct drm_device *dev, unsigned int preferred_bpp, unsigned int max_conn_count); void drm_fb_cma_fbdev_fini(struct drm_device *dev); -struct drm_fbdev_cma *drm_fbdev_cma_init_with_funcs(struct drm_device *dev, - unsigned int preferred_bpp, unsigned int max_conn_count, - const struct drm_framebuffer_funcs *funcs); struct drm_fbdev_cma *drm_fbdev_cma_init(struct drm_device *dev, unsigned int preferred_bpp, unsigned int max_conn_count); void drm_fbdev_cma_fini(struct drm_fbdev_cma *fbdev_cma); diff --git a/include/drm/drm_fb_helper.h b/include/drm/drm_fb_helper.h index b069433e7fc1..5db08c8f1d25 100644 --- a/include/drm/drm_fb_helper.h +++ b/include/drm/drm_fb_helper.h @@ -32,6 +32,7 @@ struct drm_fb_helper; +#include <drm/drm_client.h> #include <drm/drm_crtc.h> #include <drm/drm_device.h> #include <linux/kgdb.h> @@ -154,6 +155,20 @@ struct drm_fb_helper_connector { * operations. */ struct drm_fb_helper { + /** + * @client: + * + * DRM client used by the generic fbdev emulation. + */ + struct drm_client_dev client; + + /** + * @buffer: + * + * Framebuffer used by the generic fbdev emulation. + */ + struct drm_client_buffer *buffer; + struct drm_framebuffer *fb; struct drm_device *dev; int crtc_count; @@ -234,6 +249,12 @@ struct drm_fb_helper { int preferred_bpp; }; +static inline struct drm_fb_helper * +drm_fb_helper_from_client(struct drm_client_dev *client) +{ + return container_of(client, struct drm_fb_helper, client); +} + /** * define DRM_FB_HELPER_DEFAULT_OPS - helper define for drm drivers * @@ -330,6 +351,10 @@ void drm_fb_helper_fbdev_teardown(struct drm_device *dev); void drm_fb_helper_lastclose(struct drm_device *dev); void drm_fb_helper_output_poll_changed(struct drm_device *dev); + +int drm_fb_helper_generic_probe(struct drm_fb_helper *fb_helper, + struct drm_fb_helper_surface_size *sizes); +int drm_fbdev_generic_setup(struct drm_device *dev, unsigned int preferred_bpp); #else static inline void drm_fb_helper_prepare(struct drm_device *dev, struct drm_fb_helper *helper, @@ -564,6 +589,19 @@ static inline void drm_fb_helper_output_poll_changed(struct drm_device *dev) { } +static inline int +drm_fb_helper_generic_probe(struct drm_fb_helper *fb_helper, + struct drm_fb_helper_surface_size *sizes) +{ + return 0; +} + +static inline int +drm_fbdev_generic_setup(struct drm_device *dev, unsigned int preferred_bpp) +{ + return 0; +} + #endif static inline int diff --git a/include/drm/drm_fourcc.h b/include/drm/drm_fourcc.h index 3e86408dac9f..f9c15845f465 100644 --- a/include/drm/drm_fourcc.h +++ b/include/drm/drm_fourcc.h @@ -39,6 +39,7 @@ struct drm_mode_fb_cmd2; * @hsub: Horizontal chroma subsampling factor * @vsub: Vertical chroma subsampling factor * @has_alpha: Does the format embeds an alpha component? + * @is_yuv: Is it a YUV format? */ struct drm_format_info { u32 format; @@ -48,6 +49,7 @@ struct drm_format_info { u8 hsub; u8 vsub; bool has_alpha; + bool is_yuv; }; /** diff --git a/include/drm/drm_modes.h b/include/drm/drm_modes.h index b159fe07fcf9..baded6514456 100644 --- a/include/drm/drm_modes.h +++ b/include/drm/drm_modes.h @@ -530,7 +530,7 @@ drm_mode_validate_ycbcr420(const struct drm_display_mode *mode, void drm_mode_prune_invalid(struct drm_device *dev, struct list_head *mode_list, bool verbose); void drm_mode_sort(struct list_head *mode_list); -void drm_mode_connector_list_update(struct drm_connector *connector); +void drm_connector_list_update(struct drm_connector *connector); /* parsing cmdline modes */ bool diff --git a/include/drm/drm_modeset_helper_vtables.h b/include/drm/drm_modeset_helper_vtables.h index 3b289773297c..61142aa0ab23 100644 --- a/include/drm/drm_modeset_helper_vtables.h +++ b/include/drm/drm_modeset_helper_vtables.h @@ -785,7 +785,7 @@ struct drm_connector_helper_funcs { * * This function should fill in all modes currently valid for the sink * into the &drm_connector.probed_modes list. It should also update the - * EDID property by calling drm_mode_connector_update_edid_property(). + * EDID property by calling drm_connector_update_edid_property(). * * The usual way to implement this is to cache the EDID retrieved in the * probe callback somewhere in the driver-private connector structure. @@ -980,11 +980,15 @@ struct drm_connector_helper_funcs { * * This hook is to be used by drivers implementing writeback connectors * that need a point when to commit the writeback job to the hardware. + * The writeback_job to commit is available in + * &drm_connector_state.writeback_job. + * + * This hook is optional. * * This callback is used by the atomic modeset helpers. */ void (*atomic_commit)(struct drm_connector *connector, - struct drm_writeback_job *writeback_job); + struct drm_connector_state *state); }; /** diff --git a/include/drm/drm_panel.h b/include/drm/drm_panel.h index 26a1b5fd8796..582a0ec0aa70 100644 --- a/include/drm/drm_panel.h +++ b/include/drm/drm_panel.h @@ -200,7 +200,7 @@ struct drm_panel *of_drm_find_panel(const struct device_node *np); #else static inline struct drm_panel *of_drm_find_panel(const struct device_node *np) { - return NULL; + return ERR_PTR(-ENODEV); } #endif diff --git a/include/drm/drm_plane.h b/include/drm/drm_plane.h index cee9dfaaa740..8a152dc16ea5 100644 --- a/include/drm/drm_plane.h +++ b/include/drm/drm_plane.h @@ -34,31 +34,15 @@ struct drm_modeset_acquire_ctx; /** * struct drm_plane_state - mutable plane state - * @plane: backpointer to the plane - * @crtc_w: width of visible portion of plane on crtc - * @crtc_h: height of visible portion of plane on crtc - * @src_x: left position of visible portion of plane within - * plane (in 16.16) - * @src_y: upper position of visible portion of plane within - * plane (in 16.16) - * @src_w: width of visible portion of plane (in 16.16) - * @src_h: height of visible portion of plane (in 16.16) - * @alpha: opacity of the plane - * @rotation: rotation of the plane - * @zpos: priority of the given plane on crtc (optional) - * Note that multiple active planes on the same crtc can have an identical - * zpos value. The rule to solving the conflict is to compare the plane - * object IDs; the plane with a higher ID must be stacked on top of a - * plane with a lower ID. - * @normalized_zpos: normalized value of zpos: unique, range from 0 to N-1 - * where N is the number of active planes for given crtc. Note that - * the driver must set drm_mode_config.normalize_zpos or call - * drm_atomic_normalize_zpos() to update this before it can be trusted. - * @src: clipped source coordinates of the plane (in 16.16) - * @dst: clipped destination coordinates of the plane - * @state: backpointer to global drm_atomic_state + * + * Please not that the destination coordinates @crtc_x, @crtc_y, @crtc_h and + * @crtc_w and the source coordinates @src_x, @src_y, @src_h and @src_w are the + * raw coordinates provided by userspace. Drivers should use + * drm_atomic_helper_check_plane_state() and only use the derived rectangles in + * @src and @dst to program the hardware. */ struct drm_plane_state { + /** @plane: backpointer to the plane */ struct drm_plane *plane; /** @@ -87,7 +71,7 @@ struct drm_plane_state { * preserved. * * Drivers should store any implicit fence in this from their - * &drm_plane_helper.prepare_fb callback. See drm_gem_fb_prepare_fb() + * &drm_plane_helper_funcs.prepare_fb callback. See drm_gem_fb_prepare_fb() * and drm_gem_fb_simple_display_pipe_prepare_fb() for suitable helpers. */ struct dma_fence *fence; @@ -108,20 +92,60 @@ struct drm_plane_state { */ int32_t crtc_y; + /** @crtc_w: width of visible portion of plane on crtc */ + /** @crtc_h: height of visible portion of plane on crtc */ uint32_t crtc_w, crtc_h; - /* Source values are 16.16 fixed point */ - uint32_t src_x, src_y; + /** + * @src_x: left position of visible portion of plane within plane (in + * 16.16 fixed point). + */ + uint32_t src_x; + /** + * @src_y: upper position of visible portion of plane within plane (in + * 16.16 fixed point). + */ + uint32_t src_y; + /** @src_w: width of visible portion of plane (in 16.16) */ + /** @src_h: height of visible portion of plane (in 16.16) */ uint32_t src_h, src_w; - /* Plane opacity */ + /** + * @alpha: + * Opacity of the plane with 0 as completely transparent and 0xffff as + * completely opaque. See drm_plane_create_alpha_property() for more + * details. + */ u16 alpha; - /* Plane rotation */ + /** + * @rotation: + * Rotation of the plane. See drm_plane_create_rotation_property() for + * more details. + */ unsigned int rotation; - /* Plane zpos */ + /** + * @zpos: + * Priority of the given plane on crtc (optional). + * + * Note that multiple active planes on the same crtc can have an + * identical zpos value. The rule to solving the conflict is to compare + * the plane object IDs; the plane with a higher ID must be stacked on + * top of a plane with a lower ID. + * + * See drm_plane_create_zpos_property() and + * drm_plane_create_zpos_immutable_property() for more details. + */ unsigned int zpos; + + /** + * @normalized_zpos: + * Normalized value of zpos: unique, range from 0 to N-1 where N is the + * number of active planes for given crtc. Note that the driver must set + * &drm_mode_config.normalize_zpos or call drm_atomic_normalize_zpos() to + * update this before it can be trusted. + */ unsigned int normalized_zpos; /** @@ -138,7 +162,8 @@ struct drm_plane_state { */ enum drm_color_range color_range; - /* Clipped coordinates */ + /** @src: clipped source coordinates of the plane (in 16.16) */ + /** @dst: clipped destination coordinates of the plane */ struct drm_rect src, dst; /** @@ -157,6 +182,7 @@ struct drm_plane_state { */ struct drm_crtc_commit *commit; + /** @state: backpointer to global drm_atomic_state */ struct drm_atomic_state *state; }; @@ -499,30 +525,27 @@ enum drm_plane_type { /** * struct drm_plane - central DRM plane control structure - * @dev: DRM device this plane belongs to - * @head: for list management - * @name: human readable name, can be overwritten by the driver - * @base: base mode object - * @possible_crtcs: pipes this plane can be bound to - * @format_types: array of formats supported by this plane - * @format_count: number of formats supported - * @format_default: driver hasn't supplied supported formats for the plane - * @modifiers: array of modifiers supported by this plane - * @modifier_count: number of modifiers supported - * @old_fb: Temporary tracking of the old fb while a modeset is ongoing. Used by - * drm_mode_set_config_internal() to implement correct refcounting. - * @funcs: helper functions - * @properties: property tracking for this plane - * @type: type of plane (overlay, primary, cursor) - * @alpha_property: alpha property for this plane - * @zpos_property: zpos property for this plane - * @rotation_property: rotation property for this plane - * @helper_private: mid-layer private data + * + * Planes represent the scanout hardware of a display block. They receive their + * input data from a &drm_framebuffer and feed it to a &drm_crtc. Planes control + * the color conversion, see `Plane Composition Properties`_ for more details, + * and are also involved in the color conversion of input pixels, see `Color + * Management Properties`_ for details on that. */ struct drm_plane { + /** @dev: DRM device this plane belongs to */ struct drm_device *dev; + + /** + * @head: + * + * List of all planes on @dev, linked from &drm_mode_config.plane_list. + * Invariant over the lifetime of @dev and therefore does not need + * locking. + */ struct list_head head; + /** @name: human readable name, can be overwritten by the driver */ char *name; /** @@ -536,35 +559,62 @@ struct drm_plane { */ struct drm_modeset_lock mutex; + /** @base: base mode object */ struct drm_mode_object base; + /** + * @possible_crtcs: pipes this plane can be bound to constructed from + * drm_crtc_mask() + */ uint32_t possible_crtcs; + /** @format_types: array of formats supported by this plane */ uint32_t *format_types; + /** @format_count: Size of the array pointed at by @format_types. */ unsigned int format_count; + /** + * @format_default: driver hasn't supplied supported formats for the + * plane. Used by the drm_plane_init compatibility wrapper only. + */ bool format_default; + /** @modifiers: array of modifiers supported by this plane */ uint64_t *modifiers; + /** @modifier_count: Size of the array pointed at by @modifier_count. */ unsigned int modifier_count; /** - * @crtc: Currently bound CRTC, only really meaningful for non-atomic - * drivers. Atomic drivers should instead check &drm_plane_state.crtc. + * @crtc: + * + * Currently bound CRTC, only meaningful for non-atomic drivers. For + * atomic drivers this is forced to be NULL, atomic drivers should + * instead check &drm_plane_state.crtc. */ struct drm_crtc *crtc; /** - * @fb: Currently bound framebuffer, only really meaningful for - * non-atomic drivers. Atomic drivers should instead check - * &drm_plane_state.fb. + * @fb: + * + * Currently bound framebuffer, only meaningful for non-atomic drivers. + * For atomic drivers this is forced to be NULL, atomic drivers should + * instead check &drm_plane_state.fb. */ struct drm_framebuffer *fb; + /** + * @old_fb: + * + * Temporary tracking of the old fb while a modeset is ongoing. Only + * used by non-atomic drivers, forced to be NULL for atomic drivers. + */ struct drm_framebuffer *old_fb; + /** @funcs: plane control functions */ const struct drm_plane_funcs *funcs; + /** @properties: property tracking for this plane */ struct drm_object_properties properties; + /** @type: Type of plane, see &enum drm_plane_type for details. */ enum drm_plane_type type; /** @@ -573,6 +623,7 @@ struct drm_plane { */ unsigned index; + /** @helper_private: mid-layer private data */ const struct drm_plane_helper_funcs *helper_private; /** @@ -590,8 +641,23 @@ struct drm_plane { */ struct drm_plane_state *state; + /** + * @alpha_property: + * Optional alpha property for this plane. See + * drm_plane_create_alpha_property(). + */ struct drm_property *alpha_property; + /** + * @zpos_property: + * Optional zpos property for this plane. See + * drm_plane_create_zpos_property(). + */ struct drm_property *zpos_property; + /** + * @rotation_property: + * Optional rotation property for this plane. See + * drm_plane_create_rotation_property(). + */ struct drm_property *rotation_property; /** diff --git a/include/drm/drm_print.h b/include/drm/drm_print.h index e1a46e9991cc..767c90b654c5 100644 --- a/include/drm/drm_print.h +++ b/include/drm/drm_print.h @@ -195,6 +195,7 @@ static inline struct drm_printer drm_debug_printer(const char *prefix) #define DRM_UT_VBL 0x20 #define DRM_UT_STATE 0x40 #define DRM_UT_LEASE 0x80 +#define DRM_UT_DP 0x100 __printf(3, 4) void drm_dev_printk(const struct device *dev, const char *level, @@ -307,6 +308,11 @@ void drm_err(const char *format, ...); #define DRM_DEBUG_LEASE(fmt, ...) \ drm_dbg(DRM_UT_LEASE, fmt, ##__VA_ARGS__) +#define DRM_DEV_DEBUG_DP(dev, fmt, ...) \ + drm_dev_dbg(dev, DRM_UT_DP, fmt, ## __VA_ARGS__) +#define DRM_DEBUG_DP(dev, fmt, ...) \ + drm_dbg(DRM_UT_DP, fmt, ## __VA_ARGS__) + #define _DRM_DEV_DEFINE_DEBUG_RATELIMITED(dev, category, fmt, ...) \ ({ \ static DEFINE_RATELIMIT_STATE(_rs, \ diff --git a/include/drm/drm_property.h b/include/drm/drm_property.h index 1d5c0b2a8956..c030f6ccab99 100644 --- a/include/drm/drm_property.h +++ b/include/drm/drm_property.h @@ -147,10 +147,10 @@ struct drm_property { * properties are not exposed to legacy userspace. * * DRM_MODE_PROP_IMMUTABLE - * Set for properties where userspace cannot be changed by + * Set for properties whose values cannot be changed by * userspace. The kernel is allowed to update the value of these * properties. This is generally used to expose probe state to - * usersapce, e.g. the EDID, or the connector path property on DP + * userspace, e.g. the EDID, or the connector path property on DP * MST sinks. */ uint32_t flags; diff --git a/include/drm/drm_vma_manager.h b/include/drm/drm_vma_manager.h index 8758df94e9a0..c7987daeaed0 100644 --- a/include/drm/drm_vma_manager.h +++ b/include/drm/drm_vma_manager.h @@ -41,6 +41,7 @@ struct drm_vma_offset_node { rwlock_t vm_lock; struct drm_mm_node vm_node; struct rb_root vm_files; + bool readonly:1; }; struct drm_vma_offset_manager { diff --git a/include/drm/drm_writeback.h b/include/drm/drm_writeback.h index a10fe556dfd4..23df9d463003 100644 --- a/include/drm/drm_writeback.h +++ b/include/drm/drm_writeback.h @@ -110,6 +110,12 @@ struct drm_writeback_job { struct dma_fence *out_fence; }; +static inline struct drm_writeback_connector * +drm_connector_to_writeback(struct drm_connector *connector) +{ + return container_of(connector, struct drm_writeback_connector, base); +} + int drm_writeback_connector_init(struct drm_device *dev, struct drm_writeback_connector *wb_connector, const struct drm_connector_funcs *con_funcs, diff --git a/include/drm/i915_drm.h b/include/drm/i915_drm.h index c9e5a6621b95..c44703f471b3 100644 --- a/include/drm/i915_drm.h +++ b/include/drm/i915_drm.h @@ -95,7 +95,9 @@ extern struct resource intel_graphics_stolen_res; #define I845_TSEG_SIZE_512K (2 << 1) #define I845_TSEG_SIZE_1M (3 << 1) -#define INTEL_BSM 0x5c +#define INTEL_BSM 0x5c +#define INTEL_GEN11_BSM_DW0 0xc0 +#define INTEL_GEN11_BSM_DW1 0xc4 #define INTEL_BSM_MASK (-(1u << 20)) #endif /* _I915_DRM_H_ */ diff --git a/include/drm/tinydrm/tinydrm.h b/include/drm/tinydrm/tinydrm.h index 56e4a916b5e8..fe9827d0ca8a 100644 --- a/include/drm/tinydrm/tinydrm.h +++ b/include/drm/tinydrm/tinydrm.h @@ -16,16 +16,31 @@ /** * struct tinydrm_device - tinydrm device - * @drm: DRM device - * @pipe: Display pipe structure - * @dirty_lock: Serializes framebuffer flushing - * @fb_funcs: Framebuffer functions used when creating framebuffers */ struct tinydrm_device { + /** + * @drm: DRM device + */ struct drm_device *drm; + + /** + * @pipe: Display pipe structure + */ struct drm_simple_display_pipe pipe; + + /** + * @dirty_lock: Serializes framebuffer flushing + */ struct mutex dirty_lock; + + /** + * @fb_funcs: Framebuffer functions used when creating framebuffers + */ const struct drm_framebuffer_funcs *fb_funcs; + + /** + * @fb_dirty: Framebuffer dirty callback + */ int (*fb_dirty)(struct drm_framebuffer *framebuffer, struct drm_file *file_priv, unsigned flags, unsigned color, struct drm_clip_rect *clips, diff --git a/include/uapi/drm/drm_fourcc.h b/include/uapi/drm/drm_fourcc.h index d5e52350a3aa..d43949b5bb3e 100644 --- a/include/uapi/drm/drm_fourcc.h +++ b/include/uapi/drm/drm_fourcc.h @@ -183,6 +183,7 @@ extern "C" { #define DRM_FORMAT_MOD_VENDOR_QCOM 0x05 #define DRM_FORMAT_MOD_VENDOR_VIVANTE 0x06 #define DRM_FORMAT_MOD_VENDOR_BROADCOM 0x07 +#define DRM_FORMAT_MOD_VENDOR_ARM 0x08 /* add more to the end as needed */ #define DRM_FORMAT_RESERVED ((1ULL << 56) - 1) @@ -485,6 +486,88 @@ extern "C" { */ #define DRM_FORMAT_MOD_BROADCOM_UIF fourcc_mod_code(BROADCOM, 6) +/* + * Arm Framebuffer Compression (AFBC) modifiers + * + * AFBC is a proprietary lossless image compression protocol and format. + * It provides fine-grained random access and minimizes the amount of data + * transferred between IP blocks. + * + * AFBC has several features which may be supported and/or used, which are + * represented using bits in the modifier. Not all combinations are valid, + * and different devices or use-cases may support different combinations. + */ +#define DRM_FORMAT_MOD_ARM_AFBC(__afbc_mode) fourcc_mod_code(ARM, __afbc_mode) + +/* + * AFBC superblock size + * + * Indicates the superblock size(s) used for the AFBC buffer. The buffer + * size (in pixels) must be aligned to a multiple of the superblock size. + * Four lowest significant bits(LSBs) are reserved for block size. + */ +#define AFBC_FORMAT_MOD_BLOCK_SIZE_MASK 0xf +#define AFBC_FORMAT_MOD_BLOCK_SIZE_16x16 (1ULL) +#define AFBC_FORMAT_MOD_BLOCK_SIZE_32x8 (2ULL) + +/* + * AFBC lossless colorspace transform + * + * Indicates that the buffer makes use of the AFBC lossless colorspace + * transform. + */ +#define AFBC_FORMAT_MOD_YTR (1ULL << 4) + +/* + * AFBC block-split + * + * Indicates that the payload of each superblock is split. The second + * half of the payload is positioned at a predefined offset from the start + * of the superblock payload. + */ +#define AFBC_FORMAT_MOD_SPLIT (1ULL << 5) + +/* + * AFBC sparse layout + * + * This flag indicates that the payload of each superblock must be stored at a + * predefined position relative to the other superblocks in the same AFBC + * buffer. This order is the same order used by the header buffer. In this mode + * each superblock is given the same amount of space as an uncompressed + * superblock of the particular format would require, rounding up to the next + * multiple of 128 bytes in size. + */ +#define AFBC_FORMAT_MOD_SPARSE (1ULL << 6) + +/* + * AFBC copy-block restrict + * + * Buffers with this flag must obey the copy-block restriction. The restriction + * is such that there are no copy-blocks referring across the border of 8x8 + * blocks. For the subsampled data the 8x8 limitation is also subsampled. + */ +#define AFBC_FORMAT_MOD_CBR (1ULL << 7) + +/* + * AFBC tiled layout + * + * The tiled layout groups superblocks in 8x8 or 4x4 tiles, where all + * superblocks inside a tile are stored together in memory. 8x8 tiles are used + * for pixel formats up to and including 32 bpp while 4x4 tiles are used for + * larger bpp formats. The order between the tiles is scan line. + * When the tiled layout is used, the buffer size (in pixels) must be aligned + * to the tile size. + */ +#define AFBC_FORMAT_MOD_TILED (1ULL << 8) + +/* + * AFBC solid color blocks + * + * Indicates that the buffer makes use of solid-color blocks, whereby bandwidth + * can be reduced if a whole superblock is a single color. + */ +#define AFBC_FORMAT_MOD_SC (1ULL << 9) + #if defined(__cplusplus) } #endif |