summaryrefslogtreecommitdiffstats
path: root/drivers/gpu
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu')
-rw-r--r--drivers/gpu/drm/Kconfig20
-rw-r--r--drivers/gpu/drm/Makefile2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/Makefile6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu.h19
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c267
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h65
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c670
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c543
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c19
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_device.c41
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c45
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c16
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c27
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c17
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cik.c11
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cik_sdma.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cikd.h6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cz_dpm.c86
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v10_0.c26
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v11_0.c26
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v8_0.c26
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c54
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c90
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vce_v3_0.c48
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vi.c35
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vid.h5
-rw-r--r--drivers/gpu/drm/amd/amdkfd/Kconfig2
-rw-r--r--drivers/gpu/drm/amd/amdkfd/Makefile3
-rw-r--r--drivers/gpu/drm/amd/amdkfd/cik_regs.h11
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device.c7
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_cik.c12
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_vi.c103
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c20
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c249
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c99
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_pm4_headers_vi.h398
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_process.c9
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_topology.c5
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_topology.h1
-rw-r--r--drivers/gpu/drm/amd/include/kgd_kfd_interface.h3
-rw-r--r--drivers/gpu/drm/amd/include/vi_structs.h417
-rw-r--r--drivers/gpu/drm/armada/armada_crtc.c2
-rw-r--r--drivers/gpu/drm/armada/armada_fbdev.c33
-rw-r--r--drivers/gpu/drm/armada/armada_gem.c5
-rw-r--r--drivers/gpu/drm/armada/armada_overlay.c121
-rw-r--r--drivers/gpu/drm/ast/ast_fb.c48
-rw-r--r--drivers/gpu/drm/ast/ast_main.c16
-rw-r--r--drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c7
-rw-r--r--drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c12
-rw-r--r--drivers/gpu/drm/bochs/bochs_drv.c4
-rw-r--r--drivers/gpu/drm/bochs/bochs_fbdev.c36
-rw-r--r--drivers/gpu/drm/bochs/bochs_mm.c16
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_drv.c4
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_fbdev.c41
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_main.c15
-rw-r--r--drivers/gpu/drm/drm_atomic.c93
-rw-r--r--drivers/gpu/drm/drm_atomic_helper.c135
-rw-r--r--drivers/gpu/drm/drm_context.c51
-rw-r--r--drivers/gpu/drm/drm_crtc.c239
-rw-r--r--drivers/gpu/drm/drm_crtc_helper.c75
-rw-r--r--drivers/gpu/drm/drm_dp_mst_topology.c1
-rw-r--r--drivers/gpu/drm/drm_drv.c19
-rw-r--r--drivers/gpu/drm/drm_edid.c4
-rw-r--r--drivers/gpu/drm/drm_fb_cma_helper.c63
-rw-r--r--drivers/gpu/drm/drm_fb_helper.c379
-rw-r--r--drivers/gpu/drm/drm_gem.c13
-rw-r--r--drivers/gpu/drm/drm_gem_cma_helper.c10
-rw-r--r--drivers/gpu/drm/drm_ioc32.c115
-rw-r--r--drivers/gpu/drm/drm_irq.c334
-rw-r--r--drivers/gpu/drm/drm_legacy.h2
-rw-r--r--drivers/gpu/drm/drm_lock.c6
-rw-r--r--drivers/gpu/drm/drm_modeset_lock.c59
-rw-r--r--drivers/gpu/drm/drm_of.c2
-rw-r--r--drivers/gpu/drm/drm_plane_helper.c23
-rw-r--r--drivers/gpu/drm/drm_probe_helper.c45
-rw-r--r--drivers/gpu/drm/exynos/Makefile7
-rw-r--r--drivers/gpu/drm/exynos/exynos5433_drm_decon.c65
-rw-r--r--drivers/gpu/drm/exynos/exynos7_drm_decon.c94
-rw-r--r--drivers/gpu/drm/exynos/exynos_dp_core.c123
-rw-r--r--drivers/gpu/drm/exynos/exynos_dp_core.h3
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_buf.c186
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_buf.h33
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_core.c36
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_crtc.c25
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_crtc.h4
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_dmabuf.c286
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_dmabuf.h20
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_dpi.c111
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_drv.c28
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_drv.h110
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_dsi.c138
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_encoder.c174
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_encoder.h23
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fb.c14
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fb.h4
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fbdev.c77
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fimc.c1
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fimd.c106
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_g2d.c6
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_gem.c348
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_gem.h58
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_gsc.c22
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_iommu.c20
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_iommu.h15
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_ipp.c16
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_plane.c41
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_vidi.c107
-rw-r--r--drivers/gpu/drm/exynos/exynos_hdmi.c1021
-rw-r--r--drivers/gpu/drm/exynos/exynos_mixer.c198
-rw-r--r--drivers/gpu/drm/gma500/accel_2d.c6
-rw-r--r--drivers/gpu/drm/gma500/framebuffer.c48
-rw-r--r--drivers/gpu/drm/i2c/tda998x_drv.c4
-rw-r--r--drivers/gpu/drm/i915/Kconfig24
-rw-r--r--drivers/gpu/drm/i915/Makefile21
-rw-r--r--drivers/gpu/drm/i915/dvo_ivch.c63
-rw-r--r--drivers/gpu/drm/i915/i915_cmd_parser.c10
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c340
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c42
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c74
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h314
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c896
-rw-r--r--drivers/gpu/drm/i915/i915_gem_context.c102
-rw-r--r--drivers/gpu/drm/i915/i915_gem_dmabuf.c2
-rw-r--r--drivers/gpu/drm/i915/i915_gem_execbuffer.c160
-rw-r--r--drivers/gpu/drm/i915/i915_gem_fence.c787
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.c775
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.h64
-rw-r--r--drivers/gpu/drm/i915/i915_gem_render_state.c70
-rw-r--r--drivers/gpu/drm/i915/i915_gem_render_state.h4
-rw-r--r--drivers/gpu/drm/i915/i915_gem_stolen.c307
-rw-r--r--drivers/gpu/drm/i915/i915_gem_tiling.c308
-rw-r--r--drivers/gpu/drm/i915/i915_gem_userptr.c29
-rw-r--r--drivers/gpu/drm/i915/i915_gpu_error.c5
-rw-r--r--drivers/gpu/drm/i915/i915_guc_reg.h102
-rw-r--r--drivers/gpu/drm/i915/i915_ioc32.c140
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c514
-rw-r--r--drivers/gpu/drm/i915/i915_params.c24
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h169
-rw-r--r--drivers/gpu/drm/i915/i915_suspend.c2
-rw-r--r--drivers/gpu/drm/i915/i915_sysfs.c22
-rw-r--r--drivers/gpu/drm/i915/i915_trace.h18
-rw-r--r--drivers/gpu/drm/i915/intel_atomic.c249
-rw-r--r--drivers/gpu/drm/i915/intel_atomic_plane.c41
-rw-r--r--drivers/gpu/drm/i915/intel_audio.c3
-rw-r--r--drivers/gpu/drm/i915/intel_bios.c133
-rw-r--r--drivers/gpu/drm/i915/intel_bios.h16
-rw-r--r--drivers/gpu/drm/i915/intel_crt.c49
-rw-r--r--drivers/gpu/drm/i915/intel_csr.c22
-rw-r--r--drivers/gpu/drm/i915/intel_ddi.c1131
-rw-r--r--drivers/gpu/drm/i915/intel_display.c4240
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c413
-rw-r--r--drivers/gpu/drm/i915/intel_dp_mst.c45
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h162
-rw-r--r--drivers/gpu/drm/i915/intel_dsi.c51
-rw-r--r--drivers/gpu/drm/i915/intel_dsi.h3
-rw-r--r--drivers/gpu/drm/i915/intel_dsi_pll.c97
-rw-r--r--drivers/gpu/drm/i915/intel_dvo.c46
-rw-r--r--drivers/gpu/drm/i915/intel_fbc.c540
-rw-r--r--drivers/gpu/drm/i915/intel_fbdev.c110
-rw-r--r--drivers/gpu/drm/i915/intel_frontbuffer.c117
-rw-r--r--drivers/gpu/drm/i915/intel_guc_fwif.h245
-rw-r--r--drivers/gpu/drm/i915/intel_hdmi.c425
-rw-r--r--drivers/gpu/drm/i915/intel_hotplug.c505
-rw-r--r--drivers/gpu/drm/i915/intel_lrc.c893
-rw-r--r--drivers/gpu/drm/i915/intel_lrc.h21
-rw-r--r--drivers/gpu/drm/i915/intel_lvds.c74
-rw-r--r--drivers/gpu/drm/i915/intel_mocs.c335
-rw-r--r--drivers/gpu/drm/i915/intel_mocs.h57
-rw-r--r--drivers/gpu/drm/i915/intel_opregion.c104
-rw-r--r--drivers/gpu/drm/i915/intel_overlay.c63
-rw-r--r--drivers/gpu/drm/i915/intel_panel.c94
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c827
-rw-r--r--drivers/gpu/drm/i915/intel_psr.c81
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c408
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.h95
-rw-r--r--drivers/gpu/drm/i915/intel_runtime_pm.c113
-rw-r--r--drivers/gpu/drm/i915/intel_sdvo.c47
-rw-r--r--drivers/gpu/drm/i915/intel_sprite.c205
-rw-r--r--drivers/gpu/drm/i915/intel_tv.c2
-rw-r--r--drivers/gpu/drm/i915/intel_uncore.c100
-rw-r--r--drivers/gpu/drm/imx/imx-tve.c2
-rw-r--r--drivers/gpu/drm/imx/parallel-display.c21
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_cursor.c22
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_fb.c41
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_main.c16
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_mode.c9
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_ttm.c8
-rw-r--r--drivers/gpu/drm/msm/Kconfig15
-rw-r--r--drivers/gpu/drm/msm/Makefile15
-rw-r--r--drivers/gpu/drm/msm/adreno/a2xx.xml.h18
-rw-r--r--drivers/gpu/drm/msm/adreno/a3xx.xml.h33
-rw-r--r--drivers/gpu/drm/msm/adreno/a4xx.xml.h206
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_common.xml.h18
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_pm4.xml.h18
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi.c58
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi.h43
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi.xml.h211
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi_cfg.c92
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi_cfg.h44
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi_host.c270
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi_manager.c216
-rw-r--r--drivers/gpu/drm/msm/dsi/mmss_cc.xml.h26
-rw-r--r--drivers/gpu/drm/msm/dsi/phy/dsi_phy.c (renamed from drivers/gpu/drm/msm/dsi/dsi_phy.c)413
-rw-r--r--drivers/gpu/drm/msm/dsi/phy/dsi_phy.h89
-rw-r--r--drivers/gpu/drm/msm/dsi/phy/dsi_phy_20nm.c150
-rw-r--r--drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm.c166
-rw-r--r--drivers/gpu/drm/msm/dsi/pll/dsi_pll.c42
-rw-r--r--drivers/gpu/drm/msm/dsi/pll/dsi_pll.h9
-rw-r--r--drivers/gpu/drm/msm/dsi/pll/dsi_pll_28nm.c31
-rw-r--r--drivers/gpu/drm/msm/dsi/sfpb.xml.h26
-rw-r--r--drivers/gpu/drm/msm/edp/edp.xml.h22
-rw-r--r--drivers/gpu/drm/msm/edp/edp_ctrl.c17
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi.c79
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi.h32
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi.xml.h28
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi_audio.c1
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi_bridge.c16
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi_connector.c101
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi_hdcp.c1437
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi_phy_8960.c52
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi_phy_8x60.c32
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi_phy_8x74.c57
-rw-r--r--drivers/gpu/drm/msm/hdmi/qfprom.xml.h26
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp4/mdp4.xml.h22
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c8
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp4/mdp4_irq.c19
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c38
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.h24
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp4/mdp4_lcdc_encoder.c8
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c13
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5.xml.h180
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.c180
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.h13
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_cmd_encoder.c12
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c139
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.c243
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.h43
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c18
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_irq.c19
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c93
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h59
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c363
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.c113
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.h4
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp_common.xml.h28
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp_format.c46
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp_kms.c3
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp_kms.h20
-rw-r--r--drivers/gpu/drm/msm/msm_atomic.c8
-rw-r--r--drivers/gpu/drm/msm/msm_drv.c95
-rw-r--r--drivers/gpu/drm/msm/msm_drv.h23
-rw-r--r--drivers/gpu/drm/msm/msm_fbdev.c34
-rw-r--r--drivers/gpu/drm/msm/msm_gem.c2
-rw-r--r--drivers/gpu/drm/msm/msm_gem_prime.c8
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/overlay.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_connector.c4
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drm.c6
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fbcon.c39
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_platform.c16
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_ttm.c11
-rw-r--r--drivers/gpu/drm/nouveau/nv04_fbcon.c2
-rw-r--r--drivers/gpu/drm/nouveau/nv50_display.c2
-rw-r--r--drivers/gpu/drm/nouveau/nv50_fbcon.c3
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_fbcon.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/gf110.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c39
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/pm/base.c14
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bios/init.c40
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/clk/gt215.c3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gk20a.c8
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv04.c8
-rw-r--r--drivers/gpu/drm/omapdrm/omap_crtc.c6
-rw-r--r--drivers/gpu/drm/omapdrm/omap_dmm_tiler.c2
-rw-r--r--drivers/gpu/drm/omapdrm/omap_drv.h6
-rw-r--r--drivers/gpu/drm/omapdrm/omap_fb.c16
-rw-r--r--drivers/gpu/drm/omapdrm/omap_fbdev.c40
-rw-r--r--drivers/gpu/drm/omapdrm/omap_gem.c26
-rw-r--r--drivers/gpu/drm/omapdrm/omap_plane.c26
-rw-r--r--drivers/gpu/drm/qxl/qxl_fb.c40
-rw-r--r--drivers/gpu/drm/qxl/qxl_object.c4
-rw-r--r--drivers/gpu/drm/radeon/atombios_encoders.c3
-rw-r--r--drivers/gpu/drm/radeon/ci_dpm.c2
-rw-r--r--drivers/gpu/drm/radeon/cik.c336
-rw-r--r--drivers/gpu/drm/radeon/dce6_afmt.c62
-rw-r--r--drivers/gpu/drm/radeon/evergreen.c392
-rw-r--r--drivers/gpu/drm/radeon/ni.c25
-rw-r--r--drivers/gpu/drm/radeon/r600.c155
-rw-r--r--drivers/gpu/drm/radeon/r600_cp.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_audio.c204
-rw-r--r--drivers/gpu/drm/radeon/radeon_audio.h3
-rw-r--r--drivers/gpu/drm/radeon/radeon_combios.c7
-rw-r--r--drivers/gpu/drm/radeon/radeon_connectors.c18
-rw-r--r--drivers/gpu/drm/radeon/radeon_cursor.c109
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c66
-rw-r--r--drivers/gpu/drm/radeon/radeon_dp_mst.c14
-rw-r--r--drivers/gpu/drm/radeon/radeon_fb.c43
-rw-r--r--drivers/gpu/drm/radeon/radeon_gart.c12
-rw-r--r--drivers/gpu/drm/radeon/radeon_gem.c13
-rw-r--r--drivers/gpu/drm/radeon/radeon_kfd.c3
-rw-r--r--drivers/gpu/drm/radeon/radeon_mode.h3
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.c1
-rw-r--r--drivers/gpu/drm/radeon/radeon_vm.c40
-rw-r--r--drivers/gpu/drm/radeon/si.c336
-rw-r--r--drivers/gpu/drm/radeon/si_dpm.c1
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_crtc.c6
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_drv.c1
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_fb.c3
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_fbdev.c47
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_gem.c79
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_vop.c49
-rw-r--r--drivers/gpu/drm/shmobile/shmob_drm_crtc.c2
-rw-r--r--drivers/gpu/drm/sti/Makefile7
-rw-r--r--drivers/gpu/drm/sti/sti_compositor.c141
-rw-r--r--drivers/gpu/drm/sti/sti_compositor.h12
-rw-r--r--drivers/gpu/drm/sti/sti_crtc.c (renamed from drivers/gpu/drm/sti/sti_drm_crtc.c)211
-rw-r--r--drivers/gpu/drm/sti/sti_crtc.h22
-rw-r--r--drivers/gpu/drm/sti/sti_cursor.c243
-rw-r--r--drivers/gpu/drm/sti/sti_cursor.h5
-rw-r--r--drivers/gpu/drm/sti/sti_drm_crtc.h22
-rw-r--r--drivers/gpu/drm/sti/sti_drm_plane.c251
-rw-r--r--drivers/gpu/drm/sti/sti_drm_plane.h18
-rw-r--r--drivers/gpu/drm/sti/sti_drv.c (renamed from drivers/gpu/drm/sti/sti_drm_drv.c)147
-rw-r--r--drivers/gpu/drm/sti/sti_drv.h (renamed from drivers/gpu/drm/sti/sti_drm_drv.h)6
-rw-r--r--drivers/gpu/drm/sti/sti_gdp.c536
-rw-r--r--drivers/gpu/drm/sti/sti_gdp.h7
-rw-r--r--drivers/gpu/drm/sti/sti_hdmi.c27
-rw-r--r--drivers/gpu/drm/sti/sti_hqvdp.c482
-rw-r--r--drivers/gpu/drm/sti/sti_hqvdp.h12
-rw-r--r--drivers/gpu/drm/sti/sti_layer.c213
-rw-r--r--drivers/gpu/drm/sti/sti_layer.h131
-rw-r--r--drivers/gpu/drm/sti/sti_mixer.c72
-rw-r--r--drivers/gpu/drm/sti/sti_mixer.h27
-rw-r--r--drivers/gpu/drm/sti/sti_plane.c122
-rw-r--r--drivers/gpu/drm/sti/sti_plane.h71
-rw-r--r--drivers/gpu/drm/sti/sti_tvout.c54
-rw-r--r--drivers/gpu/drm/sti/sti_vid.c72
-rw-r--r--drivers/gpu/drm/sti/sti_vid.h19
-rw-r--r--drivers/gpu/drm/tegra/dc.c300
-rw-r--r--drivers/gpu/drm/tegra/dc.h24
-rw-r--r--drivers/gpu/drm/tegra/dpaux.c63
-rw-r--r--drivers/gpu/drm/tegra/dpaux.h2
-rw-r--r--drivers/gpu/drm/tegra/drm.c16
-rw-r--r--drivers/gpu/drm/tegra/drm.h10
-rw-r--r--drivers/gpu/drm/tegra/dsi.c126
-rw-r--r--drivers/gpu/drm/tegra/dsi.h4
-rw-r--r--drivers/gpu/drm/tegra/fb.c35
-rw-r--r--drivers/gpu/drm/tegra/hdmi.c78
-rw-r--r--drivers/gpu/drm/tegra/output.c20
-rw-r--r--drivers/gpu/drm/tegra/rgb.c49
-rw-r--r--drivers/gpu/drm/tegra/sor.c1606
-rw-r--r--drivers/gpu/drm/tegra/sor.h298
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_util.c3
-rw-r--r--drivers/gpu/drm/ttm/ttm_page_alloc_dma.c13
-rw-r--r--drivers/gpu/drm/ttm/ttm_tt.c4
-rw-r--r--drivers/gpu/drm/udl/udl_fb.c41
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_fb.c32
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_context.c2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.c2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c4
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.c3
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.h2
-rw-r--r--drivers/gpu/host1x/mipi.c253
-rw-r--r--drivers/gpu/ipu-v3/ipu-common.c3
-rw-r--r--drivers/gpu/vga/vga_switcheroo.c95
-rw-r--r--drivers/gpu/vga/vgaarb.c142
371 files changed, 25660 insertions, 15711 deletions
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
index beaedd78c4b9..86191586340f 100644
--- a/drivers/gpu/drm/Kconfig
+++ b/drivers/gpu/drm/Kconfig
@@ -37,9 +37,29 @@ config DRM_KMS_FB_HELPER
select FB
select FRAMEBUFFER_CONSOLE if !EXPERT
select FRAMEBUFFER_CONSOLE_DETECT_PRIMARY if FRAMEBUFFER_CONSOLE
+ select FB_SYS_FOPS
+ select FB_SYS_FILLRECT
+ select FB_SYS_COPYAREA
+ select FB_SYS_IMAGEBLIT
+ select FB_CFB_FILLRECT
+ select FB_CFB_COPYAREA
+ select FB_CFB_IMAGEBLIT
help
FBDEV helpers for KMS drivers.
+config DRM_FBDEV_EMULATION
+ bool "Enable legacy fbdev support for your modesetting driver"
+ depends on DRM
+ select DRM_KMS_HELPER
+ select DRM_KMS_FB_HELPER
+ default y
+ help
+ Choose this option if you have a need for the legacy fbdev
+ support. Note that this support also provides the linux console
+ support on top of your modesetting driver.
+
+ If in doubt, say "Y".
+
config DRM_LOAD_EDID_FIRMWARE
bool "Allow to specify an EDID data set instead of probing for it"
depends on DRM_KMS_HELPER
diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile
index 5713d0534504..8858510437ea 100644
--- a/drivers/gpu/drm/Makefile
+++ b/drivers/gpu/drm/Makefile
@@ -23,7 +23,7 @@ drm-$(CONFIG_OF) += drm_of.o
drm_kms_helper-y := drm_crtc_helper.o drm_dp_helper.o drm_probe_helper.o \
drm_plane_helper.o drm_dp_mst_topology.o drm_atomic_helper.o
drm_kms_helper-$(CONFIG_DRM_LOAD_EDID_FIRMWARE) += drm_edid_load.o
-drm_kms_helper-$(CONFIG_DRM_KMS_FB_HELPER) += drm_fb_helper.o
+drm_kms_helper-$(CONFIG_DRM_FBDEV_EMULATION) += drm_fb_helper.o
drm_kms_helper-$(CONFIG_DRM_KMS_CMA_HELPER) += drm_fb_cma_helper.o
obj-$(CONFIG_DRM_KMS_HELPER) += drm_kms_helper.o
diff --git a/drivers/gpu/drm/amd/amdgpu/Makefile b/drivers/gpu/drm/amd/amdgpu/Makefile
index 616dfd4a1398..908360584e4d 100644
--- a/drivers/gpu/drm/amd/amdgpu/Makefile
+++ b/drivers/gpu/drm/amd/amdgpu/Makefile
@@ -71,6 +71,12 @@ amdgpu-y += \
amdgpu_vce.o \
vce_v3_0.o
+# add amdkfd interfaces
+amdgpu-y += \
+ amdgpu_amdkfd.o \
+ amdgpu_amdkfd_gfx_v7.o \
+ amdgpu_amdkfd_gfx_v8.o
+
amdgpu-$(CONFIG_COMPAT) += amdgpu_ioc32.o
amdgpu-$(CONFIG_VGA_SWITCHEROO) += amdgpu_atpx_handler.o
amdgpu-$(CONFIG_ACPI) += amdgpu_acpi.o
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index 01657830b470..baefa635169a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -1130,6 +1130,9 @@ struct amdgpu_gfx {
uint32_t me_feature_version;
uint32_t ce_feature_version;
uint32_t pfp_feature_version;
+ uint32_t rlc_feature_version;
+ uint32_t mec_feature_version;
+ uint32_t mec2_feature_version;
struct amdgpu_ring gfx_ring[AMDGPU_MAX_GFX_RINGS];
unsigned num_gfx_rings;
struct amdgpu_ring compute_ring[AMDGPU_MAX_COMPUTE_RINGS];
@@ -1614,6 +1617,9 @@ struct amdgpu_uvd {
#define AMDGPU_MAX_VCE_HANDLES 16
#define AMDGPU_VCE_FIRMWARE_OFFSET 256
+#define AMDGPU_VCE_HARVEST_VCE0 (1 << 0)
+#define AMDGPU_VCE_HARVEST_VCE1 (1 << 1)
+
struct amdgpu_vce {
struct amdgpu_bo *vcpu_bo;
uint64_t gpu_addr;
@@ -1626,6 +1632,7 @@ struct amdgpu_vce {
const struct firmware *fw; /* VCE firmware */
struct amdgpu_ring ring[AMDGPU_MAX_VCE_RINGS];
struct amdgpu_irq_src irq;
+ unsigned harvest_config;
};
/*
@@ -1635,6 +1642,7 @@ struct amdgpu_sdma {
/* SDMA firmware */
const struct firmware *fw;
uint32_t fw_version;
+ uint32_t feature_version;
struct amdgpu_ring ring;
};
@@ -1862,6 +1870,12 @@ typedef void (*amdgpu_wreg_t)(struct amdgpu_device*, uint32_t, uint32_t);
typedef uint32_t (*amdgpu_block_rreg_t)(struct amdgpu_device*, uint32_t, uint32_t);
typedef void (*amdgpu_block_wreg_t)(struct amdgpu_device*, uint32_t, uint32_t, uint32_t);
+struct amdgpu_ip_block_status {
+ bool valid;
+ bool sw;
+ bool hw;
+};
+
struct amdgpu_device {
struct device *dev;
struct drm_device *ddev;
@@ -2004,13 +2018,16 @@ struct amdgpu_device {
const struct amdgpu_ip_block_version *ip_blocks;
int num_ip_blocks;
- bool *ip_block_enabled;
+ struct amdgpu_ip_block_status *ip_block_status;
struct mutex mn_lock;
DECLARE_HASHTABLE(mn_hash, 7);
/* tracking pinned memory */
u64 vram_pin_size;
u64 gart_pin_size;
+
+ /* amdkfd interface */
+ struct kfd_dev *kfd;
};
bool amdgpu_device_is_px(struct drm_device *dev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
new file mode 100644
index 000000000000..bc763e0c8f4c
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
@@ -0,0 +1,267 @@
+/*
+ * Copyright 2014 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include "amdgpu_amdkfd.h"
+#include "amdgpu_family.h"
+#include <drm/drmP.h>
+#include "amdgpu.h"
+#include <linux/module.h>
+
+const struct kfd2kgd_calls *kfd2kgd;
+const struct kgd2kfd_calls *kgd2kfd;
+bool (*kgd2kfd_init_p)(unsigned, const struct kgd2kfd_calls**);
+
+bool amdgpu_amdkfd_init(void)
+{
+#if defined(CONFIG_HSA_AMD_MODULE)
+ bool (*kgd2kfd_init_p)(unsigned, const struct kgd2kfd_calls**);
+
+ kgd2kfd_init_p = symbol_request(kgd2kfd_init);
+
+ if (kgd2kfd_init_p == NULL)
+ return false;
+#endif
+ return true;
+}
+
+bool amdgpu_amdkfd_load_interface(struct amdgpu_device *rdev)
+{
+#if defined(CONFIG_HSA_AMD_MODULE)
+ bool (*kgd2kfd_init_p)(unsigned, const struct kgd2kfd_calls**);
+#endif
+
+ switch (rdev->asic_type) {
+ case CHIP_KAVERI:
+ kfd2kgd = amdgpu_amdkfd_gfx_7_get_functions();
+ break;
+ case CHIP_CARRIZO:
+ kfd2kgd = amdgpu_amdkfd_gfx_8_0_get_functions();
+ break;
+ default:
+ return false;
+ }
+
+#if defined(CONFIG_HSA_AMD_MODULE)
+ kgd2kfd_init_p = symbol_request(kgd2kfd_init);
+
+ if (kgd2kfd_init_p == NULL) {
+ kfd2kgd = NULL;
+ return false;
+ }
+
+ if (!kgd2kfd_init_p(KFD_INTERFACE_VERSION, &kgd2kfd)) {
+ symbol_put(kgd2kfd_init);
+ kfd2kgd = NULL;
+ kgd2kfd = NULL;
+
+ return false;
+ }
+
+ return true;
+#elif defined(CONFIG_HSA_AMD)
+ if (!kgd2kfd_init(KFD_INTERFACE_VERSION, &kgd2kfd)) {
+ kfd2kgd = NULL;
+ kgd2kfd = NULL;
+ return false;
+ }
+
+ return true;
+#else
+ kfd2kgd = NULL;
+ return false;
+#endif
+}
+
+void amdgpu_amdkfd_fini(void)
+{
+ if (kgd2kfd) {
+ kgd2kfd->exit();
+ symbol_put(kgd2kfd_init);
+ }
+}
+
+void amdgpu_amdkfd_device_probe(struct amdgpu_device *rdev)
+{
+ if (kgd2kfd)
+ rdev->kfd = kgd2kfd->probe((struct kgd_dev *)rdev,
+ rdev->pdev, kfd2kgd);
+}
+
+void amdgpu_amdkfd_device_init(struct amdgpu_device *rdev)
+{
+ if (rdev->kfd) {
+ struct kgd2kfd_shared_resources gpu_resources = {
+ .compute_vmid_bitmap = 0xFF00,
+
+ .first_compute_pipe = 1,
+ .compute_pipe_count = 4 - 1,
+ };
+
+ amdgpu_doorbell_get_kfd_info(rdev,
+ &gpu_resources.doorbell_physical_address,
+ &gpu_resources.doorbell_aperture_size,
+ &gpu_resources.doorbell_start_offset);
+
+ kgd2kfd->device_init(rdev->kfd, &gpu_resources);
+ }
+}
+
+void amdgpu_amdkfd_device_fini(struct amdgpu_device *rdev)
+{
+ if (rdev->kfd) {
+ kgd2kfd->device_exit(rdev->kfd);
+ rdev->kfd = NULL;
+ }
+}
+
+void amdgpu_amdkfd_interrupt(struct amdgpu_device *rdev,
+ const void *ih_ring_entry)
+{
+ if (rdev->kfd)
+ kgd2kfd->interrupt(rdev->kfd, ih_ring_entry);
+}
+
+void amdgpu_amdkfd_suspend(struct amdgpu_device *rdev)
+{
+ if (rdev->kfd)
+ kgd2kfd->suspend(rdev->kfd);
+}
+
+int amdgpu_amdkfd_resume(struct amdgpu_device *rdev)
+{
+ int r = 0;
+
+ if (rdev->kfd)
+ r = kgd2kfd->resume(rdev->kfd);
+
+ return r;
+}
+
+u32 pool_to_domain(enum kgd_memory_pool p)
+{
+ switch (p) {
+ case KGD_POOL_FRAMEBUFFER: return AMDGPU_GEM_DOMAIN_VRAM;
+ default: return AMDGPU_GEM_DOMAIN_GTT;
+ }
+}
+
+int alloc_gtt_mem(struct kgd_dev *kgd, size_t size,
+ void **mem_obj, uint64_t *gpu_addr,
+ void **cpu_ptr)
+{
+ struct amdgpu_device *rdev = (struct amdgpu_device *)kgd;
+ struct kgd_mem **mem = (struct kgd_mem **) mem_obj;
+ int r;
+
+ BUG_ON(kgd == NULL);
+ BUG_ON(gpu_addr == NULL);
+ BUG_ON(cpu_ptr == NULL);
+
+ *mem = kmalloc(sizeof(struct kgd_mem), GFP_KERNEL);
+ if ((*mem) == NULL)
+ return -ENOMEM;
+
+ r = amdgpu_bo_create(rdev, size, PAGE_SIZE, true, AMDGPU_GEM_DOMAIN_GTT,
+ AMDGPU_GEM_CREATE_CPU_GTT_USWC, NULL, &(*mem)->bo);
+ if (r) {
+ dev_err(rdev->dev,
+ "failed to allocate BO for amdkfd (%d)\n", r);
+ return r;
+ }
+
+ /* map the buffer */
+ r = amdgpu_bo_reserve((*mem)->bo, true);
+ if (r) {
+ dev_err(rdev->dev, "(%d) failed to reserve bo for amdkfd\n", r);
+ goto allocate_mem_reserve_bo_failed;
+ }
+
+ r = amdgpu_bo_pin((*mem)->bo, AMDGPU_GEM_DOMAIN_GTT,
+ &(*mem)->gpu_addr);
+ if (r) {
+ dev_err(rdev->dev, "(%d) failed to pin bo for amdkfd\n", r);
+ goto allocate_mem_pin_bo_failed;
+ }
+ *gpu_addr = (*mem)->gpu_addr;
+
+ r = amdgpu_bo_kmap((*mem)->bo, &(*mem)->cpu_ptr);
+ if (r) {
+ dev_err(rdev->dev,
+ "(%d) failed to map bo to kernel for amdkfd\n", r);
+ goto allocate_mem_kmap_bo_failed;
+ }
+ *cpu_ptr = (*mem)->cpu_ptr;
+
+ amdgpu_bo_unreserve((*mem)->bo);
+
+ return 0;
+
+allocate_mem_kmap_bo_failed:
+ amdgpu_bo_unpin((*mem)->bo);
+allocate_mem_pin_bo_failed:
+ amdgpu_bo_unreserve((*mem)->bo);
+allocate_mem_reserve_bo_failed:
+ amdgpu_bo_unref(&(*mem)->bo);
+
+ return r;
+}
+
+void free_gtt_mem(struct kgd_dev *kgd, void *mem_obj)
+{
+ struct kgd_mem *mem = (struct kgd_mem *) mem_obj;
+
+ BUG_ON(mem == NULL);
+
+ amdgpu_bo_reserve(mem->bo, true);
+ amdgpu_bo_kunmap(mem->bo);
+ amdgpu_bo_unpin(mem->bo);
+ amdgpu_bo_unreserve(mem->bo);
+ amdgpu_bo_unref(&(mem->bo));
+ kfree(mem);
+}
+
+uint64_t get_vmem_size(struct kgd_dev *kgd)
+{
+ struct amdgpu_device *rdev =
+ (struct amdgpu_device *)kgd;
+
+ BUG_ON(kgd == NULL);
+
+ return rdev->mc.real_vram_size;
+}
+
+uint64_t get_gpu_clock_counter(struct kgd_dev *kgd)
+{
+ struct amdgpu_device *rdev = (struct amdgpu_device *)kgd;
+
+ if (rdev->asic_funcs->get_gpu_clock_counter)
+ return rdev->asic_funcs->get_gpu_clock_counter(rdev);
+ return 0;
+}
+
+uint32_t get_max_engine_clock_in_mhz(struct kgd_dev *kgd)
+{
+ struct amdgpu_device *rdev = (struct amdgpu_device *)kgd;
+
+ /* The sclk is in quantas of 10kHz */
+ return rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac.sclk / 100;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
new file mode 100644
index 000000000000..a8be765542e6
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
@@ -0,0 +1,65 @@
+/*
+ * Copyright 2014 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+/* amdgpu_amdkfd.h defines the private interface between amdgpu and amdkfd. */
+
+#ifndef AMDGPU_AMDKFD_H_INCLUDED
+#define AMDGPU_AMDKFD_H_INCLUDED
+
+#include <linux/types.h>
+#include <kgd_kfd_interface.h>
+
+struct amdgpu_device;
+
+struct kgd_mem {
+ struct amdgpu_bo *bo;
+ uint64_t gpu_addr;
+ void *cpu_ptr;
+};
+
+bool amdgpu_amdkfd_init(void);
+void amdgpu_amdkfd_fini(void);
+
+bool amdgpu_amdkfd_load_interface(struct amdgpu_device *rdev);
+
+void amdgpu_amdkfd_suspend(struct amdgpu_device *rdev);
+int amdgpu_amdkfd_resume(struct amdgpu_device *rdev);
+void amdgpu_amdkfd_interrupt(struct amdgpu_device *rdev,
+ const void *ih_ring_entry);
+void amdgpu_amdkfd_device_probe(struct amdgpu_device *rdev);
+void amdgpu_amdkfd_device_init(struct amdgpu_device *rdev);
+void amdgpu_amdkfd_device_fini(struct amdgpu_device *rdev);
+
+struct kfd2kgd_calls *amdgpu_amdkfd_gfx_7_get_functions(void);
+struct kfd2kgd_calls *amdgpu_amdkfd_gfx_8_0_get_functions(void);
+
+/* Shared API */
+int alloc_gtt_mem(struct kgd_dev *kgd, size_t size,
+ void **mem_obj, uint64_t *gpu_addr,
+ void **cpu_ptr);
+void free_gtt_mem(struct kgd_dev *kgd, void *mem_obj);
+uint64_t get_vmem_size(struct kgd_dev *kgd);
+uint64_t get_gpu_clock_counter(struct kgd_dev *kgd);
+
+uint32_t get_max_engine_clock_in_mhz(struct kgd_dev *kgd);
+
+#endif /* AMDGPU_AMDKFD_H_INCLUDED */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c
new file mode 100644
index 000000000000..dd2037bc0b4a
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c
@@ -0,0 +1,670 @@
+/*
+ * Copyright 2014 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include <linux/fdtable.h>
+#include <linux/uaccess.h>
+#include <linux/firmware.h>
+#include <drm/drmP.h>
+#include "amdgpu.h"
+#include "amdgpu_amdkfd.h"
+#include "cikd.h"
+#include "cik_sdma.h"
+#include "amdgpu_ucode.h"
+#include "gca/gfx_7_2_d.h"
+#include "gca/gfx_7_2_enum.h"
+#include "gca/gfx_7_2_sh_mask.h"
+#include "oss/oss_2_0_d.h"
+#include "oss/oss_2_0_sh_mask.h"
+#include "gmc/gmc_7_1_d.h"
+#include "gmc/gmc_7_1_sh_mask.h"
+#include "cik_structs.h"
+
+#define CIK_PIPE_PER_MEC (4)
+
+enum {
+ MAX_TRAPID = 8, /* 3 bits in the bitfield. */
+ MAX_WATCH_ADDRESSES = 4
+};
+
+enum {
+ ADDRESS_WATCH_REG_ADDR_HI = 0,
+ ADDRESS_WATCH_REG_ADDR_LO,
+ ADDRESS_WATCH_REG_CNTL,
+ ADDRESS_WATCH_REG_MAX
+};
+
+/* not defined in the CI/KV reg file */
+enum {
+ ADDRESS_WATCH_REG_CNTL_ATC_BIT = 0x10000000UL,
+ ADDRESS_WATCH_REG_CNTL_DEFAULT_MASK = 0x00FFFFFF,
+ ADDRESS_WATCH_REG_ADDLOW_MASK_EXTENSION = 0x03000000,
+ /* extend the mask to 26 bits to match the low address field */
+ ADDRESS_WATCH_REG_ADDLOW_SHIFT = 6,
+ ADDRESS_WATCH_REG_ADDHIGH_MASK = 0xFFFF
+};
+
+static const uint32_t watchRegs[MAX_WATCH_ADDRESSES * ADDRESS_WATCH_REG_MAX] = {
+ mmTCP_WATCH0_ADDR_H, mmTCP_WATCH0_ADDR_L, mmTCP_WATCH0_CNTL,
+ mmTCP_WATCH1_ADDR_H, mmTCP_WATCH1_ADDR_L, mmTCP_WATCH1_CNTL,
+ mmTCP_WATCH2_ADDR_H, mmTCP_WATCH2_ADDR_L, mmTCP_WATCH2_CNTL,
+ mmTCP_WATCH3_ADDR_H, mmTCP_WATCH3_ADDR_L, mmTCP_WATCH3_CNTL
+};
+
+union TCP_WATCH_CNTL_BITS {
+ struct {
+ uint32_t mask:24;
+ uint32_t vmid:4;
+ uint32_t atc:1;
+ uint32_t mode:2;
+ uint32_t valid:1;
+ } bitfields, bits;
+ uint32_t u32All;
+ signed int i32All;
+ float f32All;
+};
+
+/*
+ * Register access functions
+ */
+
+static void kgd_program_sh_mem_settings(struct kgd_dev *kgd, uint32_t vmid,
+ uint32_t sh_mem_config, uint32_t sh_mem_ape1_base,
+ uint32_t sh_mem_ape1_limit, uint32_t sh_mem_bases);
+
+static int kgd_set_pasid_vmid_mapping(struct kgd_dev *kgd, unsigned int pasid,
+ unsigned int vmid);
+
+static int kgd_init_pipeline(struct kgd_dev *kgd, uint32_t pipe_id,
+ uint32_t hpd_size, uint64_t hpd_gpu_addr);
+static int kgd_init_interrupts(struct kgd_dev *kgd, uint32_t pipe_id);
+static int kgd_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id,
+ uint32_t queue_id, uint32_t __user *wptr);
+static int kgd_hqd_sdma_load(struct kgd_dev *kgd, void *mqd);
+static bool kgd_hqd_is_occupied(struct kgd_dev *kgd, uint64_t queue_address,
+ uint32_t pipe_id, uint32_t queue_id);
+
+static int kgd_hqd_destroy(struct kgd_dev *kgd, uint32_t reset_type,
+ unsigned int timeout, uint32_t pipe_id,
+ uint32_t queue_id);
+static bool kgd_hqd_sdma_is_occupied(struct kgd_dev *kgd, void *mqd);
+static int kgd_hqd_sdma_destroy(struct kgd_dev *kgd, void *mqd,
+ unsigned int timeout);
+static int kgd_address_watch_disable(struct kgd_dev *kgd);
+static int kgd_address_watch_execute(struct kgd_dev *kgd,
+ unsigned int watch_point_id,
+ uint32_t cntl_val,
+ uint32_t addr_hi,
+ uint32_t addr_lo);
+static int kgd_wave_control_execute(struct kgd_dev *kgd,
+ uint32_t gfx_index_val,
+ uint32_t sq_cmd);
+static uint32_t kgd_address_watch_get_offset(struct kgd_dev *kgd,
+ unsigned int watch_point_id,
+ unsigned int reg_offset);
+
+static bool get_atc_vmid_pasid_mapping_valid(struct kgd_dev *kgd, uint8_t vmid);
+static uint16_t get_atc_vmid_pasid_mapping_pasid(struct kgd_dev *kgd,
+ uint8_t vmid);
+static void write_vmid_invalidate_request(struct kgd_dev *kgd, uint8_t vmid);
+
+static uint16_t get_fw_version(struct kgd_dev *kgd, enum kgd_engine_type type);
+
+static const struct kfd2kgd_calls kfd2kgd = {
+ .init_gtt_mem_allocation = alloc_gtt_mem,
+ .free_gtt_mem = free_gtt_mem,
+ .get_vmem_size = get_vmem_size,
+ .get_gpu_clock_counter = get_gpu_clock_counter,
+ .get_max_engine_clock_in_mhz = get_max_engine_clock_in_mhz,
+ .program_sh_mem_settings = kgd_program_sh_mem_settings,
+ .set_pasid_vmid_mapping = kgd_set_pasid_vmid_mapping,
+ .init_pipeline = kgd_init_pipeline,
+ .init_interrupts = kgd_init_interrupts,
+ .hqd_load = kgd_hqd_load,
+ .hqd_sdma_load = kgd_hqd_sdma_load,
+ .hqd_is_occupied = kgd_hqd_is_occupied,
+ .hqd_sdma_is_occupied = kgd_hqd_sdma_is_occupied,
+ .hqd_destroy = kgd_hqd_destroy,
+ .hqd_sdma_destroy = kgd_hqd_sdma_destroy,
+ .address_watch_disable = kgd_address_watch_disable,
+ .address_watch_execute = kgd_address_watch_execute,
+ .wave_control_execute = kgd_wave_control_execute,
+ .address_watch_get_offset = kgd_address_watch_get_offset,
+ .get_atc_vmid_pasid_mapping_pasid = get_atc_vmid_pasid_mapping_pasid,
+ .get_atc_vmid_pasid_mapping_valid = get_atc_vmid_pasid_mapping_valid,
+ .write_vmid_invalidate_request = write_vmid_invalidate_request,
+ .get_fw_version = get_fw_version
+};
+
+struct kfd2kgd_calls *amdgpu_amdkfd_gfx_7_get_functions()
+{
+ return (struct kfd2kgd_calls *)&kfd2kgd;
+}
+
+static inline struct amdgpu_device *get_amdgpu_device(struct kgd_dev *kgd)
+{
+ return (struct amdgpu_device *)kgd;
+}
+
+static void lock_srbm(struct kgd_dev *kgd, uint32_t mec, uint32_t pipe,
+ uint32_t queue, uint32_t vmid)
+{
+ struct amdgpu_device *adev = get_amdgpu_device(kgd);
+ uint32_t value = PIPEID(pipe) | MEID(mec) | VMID(vmid) | QUEUEID(queue);
+
+ mutex_lock(&adev->srbm_mutex);
+ WREG32(mmSRBM_GFX_CNTL, value);
+}
+
+static void unlock_srbm(struct kgd_dev *kgd)
+{
+ struct amdgpu_device *adev = get_amdgpu_device(kgd);
+
+ WREG32(mmSRBM_GFX_CNTL, 0);
+ mutex_unlock(&adev->srbm_mutex);
+}
+
+static void acquire_queue(struct kgd_dev *kgd, uint32_t pipe_id,
+ uint32_t queue_id)
+{
+ uint32_t mec = (++pipe_id / CIK_PIPE_PER_MEC) + 1;
+ uint32_t pipe = (pipe_id % CIK_PIPE_PER_MEC);
+
+ lock_srbm(kgd, mec, pipe, queue_id, 0);
+}
+
+static void release_queue(struct kgd_dev *kgd)
+{
+ unlock_srbm(kgd);
+}
+
+static void kgd_program_sh_mem_settings(struct kgd_dev *kgd, uint32_t vmid,
+ uint32_t sh_mem_config,
+ uint32_t sh_mem_ape1_base,
+ uint32_t sh_mem_ape1_limit,
+ uint32_t sh_mem_bases)
+{
+ struct amdgpu_device *adev = get_amdgpu_device(kgd);
+
+ lock_srbm(kgd, 0, 0, 0, vmid);
+
+ WREG32(mmSH_MEM_CONFIG, sh_mem_config);
+ WREG32(mmSH_MEM_APE1_BASE, sh_mem_ape1_base);
+ WREG32(mmSH_MEM_APE1_LIMIT, sh_mem_ape1_limit);
+ WREG32(mmSH_MEM_BASES, sh_mem_bases);
+
+ unlock_srbm(kgd);
+}
+
+static int kgd_set_pasid_vmid_mapping(struct kgd_dev *kgd, unsigned int pasid,
+ unsigned int vmid)
+{
+ struct amdgpu_device *adev = get_amdgpu_device(kgd);
+
+ /*
+ * We have to assume that there is no outstanding mapping.
+ * The ATC_VMID_PASID_MAPPING_UPDATE_STATUS bit could be 0 because
+ * a mapping is in progress or because a mapping finished and the
+ * SW cleared it. So the protocol is to always wait & clear.
+ */
+ uint32_t pasid_mapping = (pasid == 0) ? 0 : (uint32_t)pasid |
+ ATC_VMID0_PASID_MAPPING__VALID_MASK;
+
+ WREG32(mmATC_VMID0_PASID_MAPPING + vmid, pasid_mapping);
+
+ while (!(RREG32(mmATC_VMID_PASID_MAPPING_UPDATE_STATUS) & (1U << vmid)))
+ cpu_relax();
+ WREG32(mmATC_VMID_PASID_MAPPING_UPDATE_STATUS, 1U << vmid);
+
+ /* Mapping vmid to pasid also for IH block */
+ WREG32(mmIH_VMID_0_LUT + vmid, pasid_mapping);
+
+ return 0;
+}
+
+static int kgd_init_pipeline(struct kgd_dev *kgd, uint32_t pipe_id,
+ uint32_t hpd_size, uint64_t hpd_gpu_addr)
+{
+ struct amdgpu_device *adev = get_amdgpu_device(kgd);
+
+ uint32_t mec = (++pipe_id / CIK_PIPE_PER_MEC) + 1;
+ uint32_t pipe = (pipe_id % CIK_PIPE_PER_MEC);
+
+ lock_srbm(kgd, mec, pipe, 0, 0);
+ WREG32(mmCP_HPD_EOP_BASE_ADDR, lower_32_bits(hpd_gpu_addr >> 8));
+ WREG32(mmCP_HPD_EOP_BASE_ADDR_HI, upper_32_bits(hpd_gpu_addr >> 8));
+ WREG32(mmCP_HPD_EOP_VMID, 0);
+ WREG32(mmCP_HPD_EOP_CONTROL, hpd_size);
+ unlock_srbm(kgd);
+
+ return 0;
+}
+
+static int kgd_init_interrupts(struct kgd_dev *kgd, uint32_t pipe_id)
+{
+ struct amdgpu_device *adev = get_amdgpu_device(kgd);
+ uint32_t mec;
+ uint32_t pipe;
+
+ mec = (pipe_id / CIK_PIPE_PER_MEC) + 1;
+ pipe = (pipe_id % CIK_PIPE_PER_MEC);
+
+ lock_srbm(kgd, mec, pipe, 0, 0);
+
+ WREG32(mmCPC_INT_CNTL, CP_INT_CNTL_RING0__TIME_STAMP_INT_ENABLE_MASK |
+ CP_INT_CNTL_RING0__OPCODE_ERROR_INT_ENABLE_MASK);
+
+ unlock_srbm(kgd);
+
+ return 0;
+}
+
+static inline uint32_t get_sdma_base_addr(struct cik_sdma_rlc_registers *m)
+{
+ uint32_t retval;
+
+ retval = m->sdma_engine_id * SDMA1_REGISTER_OFFSET +
+ m->sdma_queue_id * KFD_CIK_SDMA_QUEUE_OFFSET;
+
+ pr_debug("kfd: sdma base address: 0x%x\n", retval);
+
+ return retval;
+}
+
+static inline struct cik_mqd *get_mqd(void *mqd)
+{
+ return (struct cik_mqd *)mqd;
+}
+
+static inline struct cik_sdma_rlc_registers *get_sdma_mqd(void *mqd)
+{
+ return (struct cik_sdma_rlc_registers *)mqd;
+}
+
+static int kgd_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id,
+ uint32_t queue_id, uint32_t __user *wptr)
+{
+ struct amdgpu_device *adev = get_amdgpu_device(kgd);
+ uint32_t wptr_shadow, is_wptr_shadow_valid;
+ struct cik_mqd *m;
+
+ m = get_mqd(mqd);
+
+ is_wptr_shadow_valid = !get_user(wptr_shadow, wptr);
+
+ acquire_queue(kgd, pipe_id, queue_id);
+ WREG32(mmCP_MQD_BASE_ADDR, m->cp_mqd_base_addr_lo);
+ WREG32(mmCP_MQD_BASE_ADDR_HI, m->cp_mqd_base_addr_hi);
+ WREG32(mmCP_MQD_CONTROL, m->cp_mqd_control);
+
+ WREG32(mmCP_HQD_PQ_BASE, m->cp_hqd_pq_base_lo);
+ WREG32(mmCP_HQD_PQ_BASE_HI, m->cp_hqd_pq_base_hi);
+ WREG32(mmCP_HQD_PQ_CONTROL, m->cp_hqd_pq_control);
+
+ WREG32(mmCP_HQD_IB_CONTROL, m->cp_hqd_ib_control);
+ WREG32(mmCP_HQD_IB_BASE_ADDR, m->cp_hqd_ib_base_addr_lo);
+ WREG32(mmCP_HQD_IB_BASE_ADDR_HI, m->cp_hqd_ib_base_addr_hi);
+
+ WREG32(mmCP_HQD_IB_RPTR, m->cp_hqd_ib_rptr);
+
+ WREG32(mmCP_HQD_PERSISTENT_STATE, m->cp_hqd_persistent_state);
+ WREG32(mmCP_HQD_SEMA_CMD, m->cp_hqd_sema_cmd);
+ WREG32(mmCP_HQD_MSG_TYPE, m->cp_hqd_msg_type);
+
+ WREG32(mmCP_HQD_ATOMIC0_PREOP_LO, m->cp_hqd_atomic0_preop_lo);
+ WREG32(mmCP_HQD_ATOMIC0_PREOP_HI, m->cp_hqd_atomic0_preop_hi);
+ WREG32(mmCP_HQD_ATOMIC1_PREOP_LO, m->cp_hqd_atomic1_preop_lo);
+ WREG32(mmCP_HQD_ATOMIC1_PREOP_HI, m->cp_hqd_atomic1_preop_hi);
+
+ WREG32(mmCP_HQD_PQ_RPTR_REPORT_ADDR, m->cp_hqd_pq_rptr_report_addr_lo);
+ WREG32(mmCP_HQD_PQ_RPTR_REPORT_ADDR_HI,
+ m->cp_hqd_pq_rptr_report_addr_hi);
+
+ WREG32(mmCP_HQD_PQ_RPTR, m->cp_hqd_pq_rptr);
+
+ WREG32(mmCP_HQD_PQ_WPTR_POLL_ADDR, m->cp_hqd_pq_wptr_poll_addr_lo);
+ WREG32(mmCP_HQD_PQ_WPTR_POLL_ADDR_HI, m->cp_hqd_pq_wptr_poll_addr_hi);
+
+ WREG32(mmCP_HQD_PQ_DOORBELL_CONTROL, m->cp_hqd_pq_doorbell_control);
+
+ WREG32(mmCP_HQD_VMID, m->cp_hqd_vmid);
+
+ WREG32(mmCP_HQD_QUANTUM, m->cp_hqd_quantum);
+
+ WREG32(mmCP_HQD_PIPE_PRIORITY, m->cp_hqd_pipe_priority);
+ WREG32(mmCP_HQD_QUEUE_PRIORITY, m->cp_hqd_queue_priority);
+
+ WREG32(mmCP_HQD_IQ_RPTR, m->cp_hqd_iq_rptr);
+
+ if (is_wptr_shadow_valid)
+ WREG32(mmCP_HQD_PQ_WPTR, wptr_shadow);
+
+ WREG32(mmCP_HQD_ACTIVE, m->cp_hqd_active);
+ release_queue(kgd);
+
+ return 0;
+}
+
+static int kgd_hqd_sdma_load(struct kgd_dev *kgd, void *mqd)
+{
+ struct amdgpu_device *adev = get_amdgpu_device(kgd);
+ struct cik_sdma_rlc_registers *m;
+ uint32_t sdma_base_addr;
+
+ m = get_sdma_mqd(mqd);
+ sdma_base_addr = get_sdma_base_addr(m);
+
+ WREG32(sdma_base_addr + mmSDMA0_RLC0_VIRTUAL_ADDR,
+ m->sdma_rlc_virtual_addr);
+
+ WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_BASE,
+ m->sdma_rlc_rb_base);
+
+ WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_BASE_HI,
+ m->sdma_rlc_rb_base_hi);
+
+ WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR_ADDR_LO,
+ m->sdma_rlc_rb_rptr_addr_lo);
+
+ WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR_ADDR_HI,
+ m->sdma_rlc_rb_rptr_addr_hi);
+
+ WREG32(sdma_base_addr + mmSDMA0_RLC0_DOORBELL,
+ m->sdma_rlc_doorbell);
+
+ WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL,
+ m->sdma_rlc_rb_cntl);
+
+ return 0;
+}
+
+static bool kgd_hqd_is_occupied(struct kgd_dev *kgd, uint64_t queue_address,
+ uint32_t pipe_id, uint32_t queue_id)
+{
+ struct amdgpu_device *adev = get_amdgpu_device(kgd);
+ uint32_t act;
+ bool retval = false;
+ uint32_t low, high;
+
+ acquire_queue(kgd, pipe_id, queue_id);
+ act = RREG32(mmCP_HQD_ACTIVE);
+ if (act) {
+ low = lower_32_bits(queue_address >> 8);
+ high = upper_32_bits(queue_address >> 8);
+
+ if (low == RREG32(mmCP_HQD_PQ_BASE) &&
+ high == RREG32(mmCP_HQD_PQ_BASE_HI))
+ retval = true;
+ }
+ release_queue(kgd);
+ return retval;
+}
+
+static bool kgd_hqd_sdma_is_occupied(struct kgd_dev *kgd, void *mqd)
+{
+ struct amdgpu_device *adev = get_amdgpu_device(kgd);
+ struct cik_sdma_rlc_registers *m;
+ uint32_t sdma_base_addr;
+ uint32_t sdma_rlc_rb_cntl;
+
+ m = get_sdma_mqd(mqd);
+ sdma_base_addr = get_sdma_base_addr(m);
+
+ sdma_rlc_rb_cntl = RREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL);
+
+ if (sdma_rlc_rb_cntl & SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK)
+ return true;
+
+ return false;
+}
+
+static int kgd_hqd_destroy(struct kgd_dev *kgd, uint32_t reset_type,
+ unsigned int timeout, uint32_t pipe_id,
+ uint32_t queue_id)
+{
+ struct amdgpu_device *adev = get_amdgpu_device(kgd);
+ uint32_t temp;
+
+ acquire_queue(kgd, pipe_id, queue_id);
+ WREG32(mmCP_HQD_PQ_DOORBELL_CONTROL, 0);
+
+ WREG32(mmCP_HQD_DEQUEUE_REQUEST, reset_type);
+
+ while (true) {
+ temp = RREG32(mmCP_HQD_ACTIVE);
+ if (temp & CP_HQD_ACTIVE__ACTIVE_MASK)
+ break;
+ if (timeout == 0) {
+ pr_err("kfd: cp queue preemption time out (%dms)\n",
+ temp);
+ release_queue(kgd);
+ return -ETIME;
+ }
+ msleep(20);
+ timeout -= 20;
+ }
+
+ release_queue(kgd);
+ return 0;
+}
+
+static int kgd_hqd_sdma_destroy(struct kgd_dev *kgd, void *mqd,
+ unsigned int timeout)
+{
+ struct amdgpu_device *adev = get_amdgpu_device(kgd);
+ struct cik_sdma_rlc_registers *m;
+ uint32_t sdma_base_addr;
+ uint32_t temp;
+
+ m = get_sdma_mqd(mqd);
+ sdma_base_addr = get_sdma_base_addr(m);
+
+ temp = RREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL);
+ temp = temp & ~SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK;
+ WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL, temp);
+
+ while (true) {
+ temp = RREG32(sdma_base_addr + mmSDMA0_RLC0_CONTEXT_STATUS);
+ if (temp & SDMA0_STATUS_REG__RB_CMD_IDLE__SHIFT)
+ break;
+ if (timeout == 0)
+ return -ETIME;
+ msleep(20);
+ timeout -= 20;
+ }
+
+ WREG32(sdma_base_addr + mmSDMA0_RLC0_DOORBELL, 0);
+ WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR, 0);
+ WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_WPTR, 0);
+ WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_BASE, 0);
+
+ return 0;
+}
+
+static int kgd_address_watch_disable(struct kgd_dev *kgd)
+{
+ struct amdgpu_device *adev = get_amdgpu_device(kgd);
+ union TCP_WATCH_CNTL_BITS cntl;
+ unsigned int i;
+
+ cntl.u32All = 0;
+
+ cntl.bitfields.valid = 0;
+ cntl.bitfields.mask = ADDRESS_WATCH_REG_CNTL_DEFAULT_MASK;
+ cntl.bitfields.atc = 1;
+
+ /* Turning off this address until we set all the registers */
+ for (i = 0; i < MAX_WATCH_ADDRESSES; i++)
+ WREG32(watchRegs[i * ADDRESS_WATCH_REG_MAX +
+ ADDRESS_WATCH_REG_CNTL], cntl.u32All);
+
+ return 0;
+}
+
+static int kgd_address_watch_execute(struct kgd_dev *kgd,
+ unsigned int watch_point_id,
+ uint32_t cntl_val,
+ uint32_t addr_hi,
+ uint32_t addr_lo)
+{
+ struct amdgpu_device *adev = get_amdgpu_device(kgd);
+ union TCP_WATCH_CNTL_BITS cntl;
+
+ cntl.u32All = cntl_val;
+
+ /* Turning off this watch point until we set all the registers */
+ cntl.bitfields.valid = 0;
+ WREG32(watchRegs[watch_point_id * ADDRESS_WATCH_REG_MAX +
+ ADDRESS_WATCH_REG_CNTL], cntl.u32All);
+
+ WREG32(watchRegs[watch_point_id * ADDRESS_WATCH_REG_MAX +
+ ADDRESS_WATCH_REG_ADDR_HI], addr_hi);
+
+ WREG32(watchRegs[watch_point_id * ADDRESS_WATCH_REG_MAX +
+ ADDRESS_WATCH_REG_ADDR_LO], addr_lo);
+
+ /* Enable the watch point */
+ cntl.bitfields.valid = 1;
+
+ WREG32(watchRegs[watch_point_id * ADDRESS_WATCH_REG_MAX +
+ ADDRESS_WATCH_REG_CNTL], cntl.u32All);
+
+ return 0;
+}
+
+static int kgd_wave_control_execute(struct kgd_dev *kgd,
+ uint32_t gfx_index_val,
+ uint32_t sq_cmd)
+{
+ struct amdgpu_device *adev = get_amdgpu_device(kgd);
+ uint32_t data;
+
+ mutex_lock(&adev->grbm_idx_mutex);
+
+ WREG32(mmGRBM_GFX_INDEX, gfx_index_val);
+ WREG32(mmSQ_CMD, sq_cmd);
+
+ /* Restore the GRBM_GFX_INDEX register */
+
+ data = GRBM_GFX_INDEX__INSTANCE_BROADCAST_WRITES_MASK |
+ GRBM_GFX_INDEX__SH_BROADCAST_WRITES_MASK |
+ GRBM_GFX_INDEX__SE_BROADCAST_WRITES_MASK;
+
+ WREG32(mmGRBM_GFX_INDEX, data);
+
+ mutex_unlock(&adev->grbm_idx_mutex);
+
+ return 0;
+}
+
+static uint32_t kgd_address_watch_get_offset(struct kgd_dev *kgd,
+ unsigned int watch_point_id,
+ unsigned int reg_offset)
+{
+ return watchRegs[watch_point_id * ADDRESS_WATCH_REG_MAX + reg_offset];
+}
+
+static bool get_atc_vmid_pasid_mapping_valid(struct kgd_dev *kgd,
+ uint8_t vmid)
+{
+ uint32_t reg;
+ struct amdgpu_device *adev = (struct amdgpu_device *) kgd;
+
+ reg = RREG32(mmATC_VMID0_PASID_MAPPING + vmid);
+ return reg & ATC_VMID0_PASID_MAPPING__VALID_MASK;
+}
+
+static uint16_t get_atc_vmid_pasid_mapping_pasid(struct kgd_dev *kgd,
+ uint8_t vmid)
+{
+ uint32_t reg;
+ struct amdgpu_device *adev = (struct amdgpu_device *) kgd;
+
+ reg = RREG32(mmATC_VMID0_PASID_MAPPING + vmid);
+ return reg & ATC_VMID0_PASID_MAPPING__VALID_MASK;
+}
+
+static void write_vmid_invalidate_request(struct kgd_dev *kgd, uint8_t vmid)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *) kgd;
+
+ WREG32(mmVM_INVALIDATE_REQUEST, 1 << vmid);
+}
+
+static uint16_t get_fw_version(struct kgd_dev *kgd, enum kgd_engine_type type)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *) kgd;
+ const union amdgpu_firmware_header *hdr;
+
+ BUG_ON(kgd == NULL);
+
+ switch (type) {
+ case KGD_ENGINE_PFP:
+ hdr = (const union amdgpu_firmware_header *)
+ adev->gfx.pfp_fw->data;
+ break;
+
+ case KGD_ENGINE_ME:
+ hdr = (const union amdgpu_firmware_header *)
+ adev->gfx.me_fw->data;
+ break;
+
+ case KGD_ENGINE_CE:
+ hdr = (const union amdgpu_firmware_header *)
+ adev->gfx.ce_fw->data;
+ break;
+
+ case KGD_ENGINE_MEC1:
+ hdr = (const union amdgpu_firmware_header *)
+ adev->gfx.mec_fw->data;
+ break;
+
+ case KGD_ENGINE_MEC2:
+ hdr = (const union amdgpu_firmware_header *)
+ adev->gfx.mec2_fw->data;
+ break;
+
+ case KGD_ENGINE_RLC:
+ hdr = (const union amdgpu_firmware_header *)
+ adev->gfx.rlc_fw->data;
+ break;
+
+ case KGD_ENGINE_SDMA1:
+ hdr = (const union amdgpu_firmware_header *)
+ adev->sdma[0].fw->data;
+ break;
+
+ case KGD_ENGINE_SDMA2:
+ hdr = (const union amdgpu_firmware_header *)
+ adev->sdma[1].fw->data;
+ break;
+
+ default:
+ return 0;
+ }
+
+ if (hdr == NULL)
+ return 0;
+
+ /* Only 12 bit in use*/
+ return hdr->common.ucode_version;
+}
+
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c
new file mode 100644
index 000000000000..dfd1d503bccf
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c
@@ -0,0 +1,543 @@
+/*
+ * Copyright 2014 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include <linux/module.h>
+#include <linux/fdtable.h>
+#include <linux/uaccess.h>
+#include <linux/firmware.h>
+#include <drm/drmP.h>
+#include "amdgpu.h"
+#include "amdgpu_amdkfd.h"
+#include "amdgpu_ucode.h"
+#include "gca/gfx_8_0_sh_mask.h"
+#include "gca/gfx_8_0_d.h"
+#include "gca/gfx_8_0_enum.h"
+#include "oss/oss_3_0_sh_mask.h"
+#include "oss/oss_3_0_d.h"
+#include "gmc/gmc_8_1_sh_mask.h"
+#include "gmc/gmc_8_1_d.h"
+#include "vi_structs.h"
+#include "vid.h"
+
+#define VI_PIPE_PER_MEC (4)
+
+struct cik_sdma_rlc_registers;
+
+/*
+ * Register access functions
+ */
+
+static void kgd_program_sh_mem_settings(struct kgd_dev *kgd, uint32_t vmid,
+ uint32_t sh_mem_config,
+ uint32_t sh_mem_ape1_base, uint32_t sh_mem_ape1_limit,
+ uint32_t sh_mem_bases);
+static int kgd_set_pasid_vmid_mapping(struct kgd_dev *kgd, unsigned int pasid,
+ unsigned int vmid);
+static int kgd_init_pipeline(struct kgd_dev *kgd, uint32_t pipe_id,
+ uint32_t hpd_size, uint64_t hpd_gpu_addr);
+static int kgd_init_interrupts(struct kgd_dev *kgd, uint32_t pipe_id);
+static int kgd_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id,
+ uint32_t queue_id, uint32_t __user *wptr);
+static int kgd_hqd_sdma_load(struct kgd_dev *kgd, void *mqd);
+static bool kgd_hqd_is_occupied(struct kgd_dev *kgd, uint64_t queue_address,
+ uint32_t pipe_id, uint32_t queue_id);
+static bool kgd_hqd_sdma_is_occupied(struct kgd_dev *kgd, void *mqd);
+static int kgd_hqd_destroy(struct kgd_dev *kgd, uint32_t reset_type,
+ unsigned int timeout, uint32_t pipe_id,
+ uint32_t queue_id);
+static int kgd_hqd_sdma_destroy(struct kgd_dev *kgd, void *mqd,
+ unsigned int timeout);
+static void write_vmid_invalidate_request(struct kgd_dev *kgd, uint8_t vmid);
+static int kgd_address_watch_disable(struct kgd_dev *kgd);
+static int kgd_address_watch_execute(struct kgd_dev *kgd,
+ unsigned int watch_point_id,
+ uint32_t cntl_val,
+ uint32_t addr_hi,
+ uint32_t addr_lo);
+static int kgd_wave_control_execute(struct kgd_dev *kgd,
+ uint32_t gfx_index_val,
+ uint32_t sq_cmd);
+static uint32_t kgd_address_watch_get_offset(struct kgd_dev *kgd,
+ unsigned int watch_point_id,
+ unsigned int reg_offset);
+
+static bool get_atc_vmid_pasid_mapping_valid(struct kgd_dev *kgd,
+ uint8_t vmid);
+static uint16_t get_atc_vmid_pasid_mapping_pasid(struct kgd_dev *kgd,
+ uint8_t vmid);
+static void write_vmid_invalidate_request(struct kgd_dev *kgd, uint8_t vmid);
+static uint16_t get_fw_version(struct kgd_dev *kgd, enum kgd_engine_type type);
+
+static const struct kfd2kgd_calls kfd2kgd = {
+ .init_gtt_mem_allocation = alloc_gtt_mem,
+ .free_gtt_mem = free_gtt_mem,
+ .get_vmem_size = get_vmem_size,
+ .get_gpu_clock_counter = get_gpu_clock_counter,
+ .get_max_engine_clock_in_mhz = get_max_engine_clock_in_mhz,
+ .program_sh_mem_settings = kgd_program_sh_mem_settings,
+ .set_pasid_vmid_mapping = kgd_set_pasid_vmid_mapping,
+ .init_pipeline = kgd_init_pipeline,
+ .init_interrupts = kgd_init_interrupts,
+ .hqd_load = kgd_hqd_load,
+ .hqd_sdma_load = kgd_hqd_sdma_load,
+ .hqd_is_occupied = kgd_hqd_is_occupied,
+ .hqd_sdma_is_occupied = kgd_hqd_sdma_is_occupied,
+ .hqd_destroy = kgd_hqd_destroy,
+ .hqd_sdma_destroy = kgd_hqd_sdma_destroy,
+ .address_watch_disable = kgd_address_watch_disable,
+ .address_watch_execute = kgd_address_watch_execute,
+ .wave_control_execute = kgd_wave_control_execute,
+ .address_watch_get_offset = kgd_address_watch_get_offset,
+ .get_atc_vmid_pasid_mapping_pasid =
+ get_atc_vmid_pasid_mapping_pasid,
+ .get_atc_vmid_pasid_mapping_valid =
+ get_atc_vmid_pasid_mapping_valid,
+ .write_vmid_invalidate_request = write_vmid_invalidate_request,
+ .get_fw_version = get_fw_version
+};
+
+struct kfd2kgd_calls *amdgpu_amdkfd_gfx_8_0_get_functions()
+{
+ return (struct kfd2kgd_calls *)&kfd2kgd;
+}
+
+static inline struct amdgpu_device *get_amdgpu_device(struct kgd_dev *kgd)
+{
+ return (struct amdgpu_device *)kgd;
+}
+
+static void lock_srbm(struct kgd_dev *kgd, uint32_t mec, uint32_t pipe,
+ uint32_t queue, uint32_t vmid)
+{
+ struct amdgpu_device *adev = get_amdgpu_device(kgd);
+ uint32_t value = PIPEID(pipe) | MEID(mec) | VMID(vmid) | QUEUEID(queue);
+
+ mutex_lock(&adev->srbm_mutex);
+ WREG32(mmSRBM_GFX_CNTL, value);
+}
+
+static void unlock_srbm(struct kgd_dev *kgd)
+{
+ struct amdgpu_device *adev = get_amdgpu_device(kgd);
+
+ WREG32(mmSRBM_GFX_CNTL, 0);
+ mutex_unlock(&adev->srbm_mutex);
+}
+
+static void acquire_queue(struct kgd_dev *kgd, uint32_t pipe_id,
+ uint32_t queue_id)
+{
+ uint32_t mec = (++pipe_id / VI_PIPE_PER_MEC) + 1;
+ uint32_t pipe = (pipe_id % VI_PIPE_PER_MEC);
+
+ lock_srbm(kgd, mec, pipe, queue_id, 0);
+}
+
+static void release_queue(struct kgd_dev *kgd)
+{
+ unlock_srbm(kgd);
+}
+
+static void kgd_program_sh_mem_settings(struct kgd_dev *kgd, uint32_t vmid,
+ uint32_t sh_mem_config,
+ uint32_t sh_mem_ape1_base,
+ uint32_t sh_mem_ape1_limit,
+ uint32_t sh_mem_bases)
+{
+ struct amdgpu_device *adev = get_amdgpu_device(kgd);
+
+ lock_srbm(kgd, 0, 0, 0, vmid);
+
+ WREG32(mmSH_MEM_CONFIG, sh_mem_config);
+ WREG32(mmSH_MEM_APE1_BASE, sh_mem_ape1_base);
+ WREG32(mmSH_MEM_APE1_LIMIT, sh_mem_ape1_limit);
+ WREG32(mmSH_MEM_BASES, sh_mem_bases);
+
+ unlock_srbm(kgd);
+}
+
+static int kgd_set_pasid_vmid_mapping(struct kgd_dev *kgd, unsigned int pasid,
+ unsigned int vmid)
+{
+ struct amdgpu_device *adev = get_amdgpu_device(kgd);
+
+ /*
+ * We have to assume that there is no outstanding mapping.
+ * The ATC_VMID_PASID_MAPPING_UPDATE_STATUS bit could be 0 because
+ * a mapping is in progress or because a mapping finished
+ * and the SW cleared it.
+ * So the protocol is to always wait & clear.
+ */
+ uint32_t pasid_mapping = (pasid == 0) ? 0 : (uint32_t)pasid |
+ ATC_VMID0_PASID_MAPPING__VALID_MASK;
+
+ WREG32(mmATC_VMID0_PASID_MAPPING + vmid, pasid_mapping);
+
+ while (!(RREG32(mmATC_VMID_PASID_MAPPING_UPDATE_STATUS) & (1U << vmid)))
+ cpu_relax();
+ WREG32(mmATC_VMID_PASID_MAPPING_UPDATE_STATUS, 1U << vmid);
+
+ /* Mapping vmid to pasid also for IH block */
+ WREG32(mmIH_VMID_0_LUT + vmid, pasid_mapping);
+
+ return 0;
+}
+
+static int kgd_init_pipeline(struct kgd_dev *kgd, uint32_t pipe_id,
+ uint32_t hpd_size, uint64_t hpd_gpu_addr)
+{
+ return 0;
+}
+
+static int kgd_init_interrupts(struct kgd_dev *kgd, uint32_t pipe_id)
+{
+ struct amdgpu_device *adev = get_amdgpu_device(kgd);
+ uint32_t mec;
+ uint32_t pipe;
+
+ mec = (++pipe_id / VI_PIPE_PER_MEC) + 1;
+ pipe = (pipe_id % VI_PIPE_PER_MEC);
+
+ lock_srbm(kgd, mec, pipe, 0, 0);
+
+ WREG32(mmCPC_INT_CNTL, CP_INT_CNTL_RING0__TIME_STAMP_INT_ENABLE_MASK);
+
+ unlock_srbm(kgd);
+
+ return 0;
+}
+
+static inline uint32_t get_sdma_base_addr(struct cik_sdma_rlc_registers *m)
+{
+ return 0;
+}
+
+static inline struct vi_mqd *get_mqd(void *mqd)
+{
+ return (struct vi_mqd *)mqd;
+}
+
+static inline struct cik_sdma_rlc_registers *get_sdma_mqd(void *mqd)
+{
+ return (struct cik_sdma_rlc_registers *)mqd;
+}
+
+static int kgd_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id,
+ uint32_t queue_id, uint32_t __user *wptr)
+{
+ struct vi_mqd *m;
+ uint32_t shadow_wptr, valid_wptr;
+ struct amdgpu_device *adev = get_amdgpu_device(kgd);
+
+ m = get_mqd(mqd);
+
+ valid_wptr = copy_from_user(&shadow_wptr, wptr, sizeof(shadow_wptr));
+ acquire_queue(kgd, pipe_id, queue_id);
+
+ WREG32(mmCP_MQD_CONTROL, m->cp_mqd_control);
+ WREG32(mmCP_MQD_BASE_ADDR, m->cp_mqd_base_addr_lo);
+ WREG32(mmCP_MQD_BASE_ADDR_HI, m->cp_mqd_base_addr_hi);
+
+ WREG32(mmCP_HQD_VMID, m->cp_hqd_vmid);
+ WREG32(mmCP_HQD_PERSISTENT_STATE, m->cp_hqd_persistent_state);
+ WREG32(mmCP_HQD_PIPE_PRIORITY, m->cp_hqd_pipe_priority);
+ WREG32(mmCP_HQD_QUEUE_PRIORITY, m->cp_hqd_queue_priority);
+ WREG32(mmCP_HQD_QUANTUM, m->cp_hqd_quantum);
+ WREG32(mmCP_HQD_PQ_BASE, m->cp_hqd_pq_base_lo);
+ WREG32(mmCP_HQD_PQ_BASE_HI, m->cp_hqd_pq_base_hi);
+ WREG32(mmCP_HQD_PQ_RPTR_REPORT_ADDR, m->cp_hqd_pq_rptr_report_addr_lo);
+ WREG32(mmCP_HQD_PQ_RPTR_REPORT_ADDR_HI,
+ m->cp_hqd_pq_rptr_report_addr_hi);
+
+ if (valid_wptr > 0)
+ WREG32(mmCP_HQD_PQ_WPTR, shadow_wptr);
+
+ WREG32(mmCP_HQD_PQ_CONTROL, m->cp_hqd_pq_control);
+ WREG32(mmCP_HQD_PQ_DOORBELL_CONTROL, m->cp_hqd_pq_doorbell_control);
+
+ WREG32(mmCP_HQD_EOP_BASE_ADDR, m->cp_hqd_eop_base_addr_lo);
+ WREG32(mmCP_HQD_EOP_BASE_ADDR_HI, m->cp_hqd_eop_base_addr_hi);
+ WREG32(mmCP_HQD_EOP_CONTROL, m->cp_hqd_eop_control);
+ WREG32(mmCP_HQD_EOP_RPTR, m->cp_hqd_eop_rptr);
+ WREG32(mmCP_HQD_EOP_WPTR, m->cp_hqd_eop_wptr);
+ WREG32(mmCP_HQD_EOP_EVENTS, m->cp_hqd_eop_done_events);
+
+ WREG32(mmCP_HQD_CTX_SAVE_BASE_ADDR_LO, m->cp_hqd_ctx_save_base_addr_lo);
+ WREG32(mmCP_HQD_CTX_SAVE_BASE_ADDR_HI, m->cp_hqd_ctx_save_base_addr_hi);
+ WREG32(mmCP_HQD_CTX_SAVE_CONTROL, m->cp_hqd_ctx_save_control);
+ WREG32(mmCP_HQD_CNTL_STACK_OFFSET, m->cp_hqd_cntl_stack_offset);
+ WREG32(mmCP_HQD_CNTL_STACK_SIZE, m->cp_hqd_cntl_stack_size);
+ WREG32(mmCP_HQD_WG_STATE_OFFSET, m->cp_hqd_wg_state_offset);
+ WREG32(mmCP_HQD_CTX_SAVE_SIZE, m->cp_hqd_ctx_save_size);
+
+ WREG32(mmCP_HQD_IB_CONTROL, m->cp_hqd_ib_control);
+
+ WREG32(mmCP_HQD_DEQUEUE_REQUEST, m->cp_hqd_dequeue_request);
+ WREG32(mmCP_HQD_ERROR, m->cp_hqd_error);
+ WREG32(mmCP_HQD_EOP_WPTR_MEM, m->cp_hqd_eop_wptr_mem);
+ WREG32(mmCP_HQD_EOP_DONES, m->cp_hqd_eop_dones);
+
+ WREG32(mmCP_HQD_ACTIVE, m->cp_hqd_active);
+
+ release_queue(kgd);
+
+ return 0;
+}
+
+static int kgd_hqd_sdma_load(struct kgd_dev *kgd, void *mqd)
+{
+ return 0;
+}
+
+static bool kgd_hqd_is_occupied(struct kgd_dev *kgd, uint64_t queue_address,
+ uint32_t pipe_id, uint32_t queue_id)
+{
+ struct amdgpu_device *adev = get_amdgpu_device(kgd);
+ uint32_t act;
+ bool retval = false;
+ uint32_t low, high;
+
+ acquire_queue(kgd, pipe_id, queue_id);
+ act = RREG32(mmCP_HQD_ACTIVE);
+ if (act) {
+ low = lower_32_bits(queue_address >> 8);
+ high = upper_32_bits(queue_address >> 8);
+
+ if (low == RREG32(mmCP_HQD_PQ_BASE) &&
+ high == RREG32(mmCP_HQD_PQ_BASE_HI))
+ retval = true;
+ }
+ release_queue(kgd);
+ return retval;
+}
+
+static bool kgd_hqd_sdma_is_occupied(struct kgd_dev *kgd, void *mqd)
+{
+ struct amdgpu_device *adev = get_amdgpu_device(kgd);
+ struct cik_sdma_rlc_registers *m;
+ uint32_t sdma_base_addr;
+ uint32_t sdma_rlc_rb_cntl;
+
+ m = get_sdma_mqd(mqd);
+ sdma_base_addr = get_sdma_base_addr(m);
+
+ sdma_rlc_rb_cntl = RREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL);
+
+ if (sdma_rlc_rb_cntl & SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK)
+ return true;
+
+ return false;
+}
+
+static int kgd_hqd_destroy(struct kgd_dev *kgd, uint32_t reset_type,
+ unsigned int timeout, uint32_t pipe_id,
+ uint32_t queue_id)
+{
+ struct amdgpu_device *adev = get_amdgpu_device(kgd);
+ uint32_t temp;
+
+ acquire_queue(kgd, pipe_id, queue_id);
+
+ WREG32(mmCP_HQD_DEQUEUE_REQUEST, reset_type);
+
+ while (true) {
+ temp = RREG32(mmCP_HQD_ACTIVE);
+ if (temp & CP_HQD_ACTIVE__ACTIVE_MASK)
+ break;
+ if (timeout == 0) {
+ pr_err("kfd: cp queue preemption time out (%dms)\n",
+ temp);
+ release_queue(kgd);
+ return -ETIME;
+ }
+ msleep(20);
+ timeout -= 20;
+ }
+
+ release_queue(kgd);
+ return 0;
+}
+
+static int kgd_hqd_sdma_destroy(struct kgd_dev *kgd, void *mqd,
+ unsigned int timeout)
+{
+ struct amdgpu_device *adev = get_amdgpu_device(kgd);
+ struct cik_sdma_rlc_registers *m;
+ uint32_t sdma_base_addr;
+ uint32_t temp;
+
+ m = get_sdma_mqd(mqd);
+ sdma_base_addr = get_sdma_base_addr(m);
+
+ temp = RREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL);
+ temp = temp & ~SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK;
+ WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL, temp);
+
+ while (true) {
+ temp = RREG32(sdma_base_addr + mmSDMA0_RLC0_CONTEXT_STATUS);
+ if (temp & SDMA0_STATUS_REG__RB_CMD_IDLE__SHIFT)
+ break;
+ if (timeout == 0)
+ return -ETIME;
+ msleep(20);
+ timeout -= 20;
+ }
+
+ WREG32(sdma_base_addr + mmSDMA0_RLC0_DOORBELL, 0);
+ WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR, 0);
+ WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_WPTR, 0);
+ WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_BASE, 0);
+
+ return 0;
+}
+
+static bool get_atc_vmid_pasid_mapping_valid(struct kgd_dev *kgd,
+ uint8_t vmid)
+{
+ uint32_t reg;
+ struct amdgpu_device *adev = (struct amdgpu_device *) kgd;
+
+ reg = RREG32(mmATC_VMID0_PASID_MAPPING + vmid);
+ return reg & ATC_VMID0_PASID_MAPPING__VALID_MASK;
+}
+
+static uint16_t get_atc_vmid_pasid_mapping_pasid(struct kgd_dev *kgd,
+ uint8_t vmid)
+{
+ uint32_t reg;
+ struct amdgpu_device *adev = (struct amdgpu_device *) kgd;
+
+ reg = RREG32(mmATC_VMID0_PASID_MAPPING + vmid);
+ return reg & ATC_VMID0_PASID_MAPPING__VALID_MASK;
+}
+
+static void write_vmid_invalidate_request(struct kgd_dev *kgd, uint8_t vmid)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *) kgd;
+
+ WREG32(mmVM_INVALIDATE_REQUEST, 1 << vmid);
+}
+
+static int kgd_address_watch_disable(struct kgd_dev *kgd)
+{
+ return 0;
+}
+
+static int kgd_address_watch_execute(struct kgd_dev *kgd,
+ unsigned int watch_point_id,
+ uint32_t cntl_val,
+ uint32_t addr_hi,
+ uint32_t addr_lo)
+{
+ return 0;
+}
+
+static int kgd_wave_control_execute(struct kgd_dev *kgd,
+ uint32_t gfx_index_val,
+ uint32_t sq_cmd)
+{
+ struct amdgpu_device *adev = get_amdgpu_device(kgd);
+ uint32_t data = 0;
+
+ mutex_lock(&adev->grbm_idx_mutex);
+
+ WREG32(mmGRBM_GFX_INDEX, gfx_index_val);
+ WREG32(mmSQ_CMD, sq_cmd);
+
+ data = REG_SET_FIELD(data, GRBM_GFX_INDEX,
+ INSTANCE_BROADCAST_WRITES, 1);
+ data = REG_SET_FIELD(data, GRBM_GFX_INDEX,
+ SH_BROADCAST_WRITES, 1);
+ data = REG_SET_FIELD(data, GRBM_GFX_INDEX,
+ SE_BROADCAST_WRITES, 1);
+
+ WREG32(mmGRBM_GFX_INDEX, data);
+ mutex_unlock(&adev->grbm_idx_mutex);
+
+ return 0;
+}
+
+static uint32_t kgd_address_watch_get_offset(struct kgd_dev *kgd,
+ unsigned int watch_point_id,
+ unsigned int reg_offset)
+{
+ return 0;
+}
+
+static uint16_t get_fw_version(struct kgd_dev *kgd, enum kgd_engine_type type)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *) kgd;
+ const union amdgpu_firmware_header *hdr;
+
+ BUG_ON(kgd == NULL);
+
+ switch (type) {
+ case KGD_ENGINE_PFP:
+ hdr = (const union amdgpu_firmware_header *)
+ adev->gfx.pfp_fw->data;
+ break;
+
+ case KGD_ENGINE_ME:
+ hdr = (const union amdgpu_firmware_header *)
+ adev->gfx.me_fw->data;
+ break;
+
+ case KGD_ENGINE_CE:
+ hdr = (const union amdgpu_firmware_header *)
+ adev->gfx.ce_fw->data;
+ break;
+
+ case KGD_ENGINE_MEC1:
+ hdr = (const union amdgpu_firmware_header *)
+ adev->gfx.mec_fw->data;
+ break;
+
+ case KGD_ENGINE_MEC2:
+ hdr = (const union amdgpu_firmware_header *)
+ adev->gfx.mec2_fw->data;
+ break;
+
+ case KGD_ENGINE_RLC:
+ hdr = (const union amdgpu_firmware_header *)
+ adev->gfx.rlc_fw->data;
+ break;
+
+ case KGD_ENGINE_SDMA1:
+ hdr = (const union amdgpu_firmware_header *)
+ adev->sdma[0].fw->data;
+ break;
+
+ case KGD_ENGINE_SDMA2:
+ hdr = (const union amdgpu_firmware_header *)
+ adev->sdma[1].fw->data;
+ break;
+
+ default:
+ return 0;
+ }
+
+ if (hdr == NULL)
+ return 0;
+
+ /* Only 12 bit in use*/
+ return hdr->common.ucode_version;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
index d63135bf29c0..1f040d85ac47 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
@@ -669,6 +669,7 @@ static int amdgpu_cs_ib_fill(struct amdgpu_device *adev,
static int amdgpu_cs_dependencies(struct amdgpu_device *adev,
struct amdgpu_cs_parser *p)
{
+ struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
struct amdgpu_ib *ib;
int i, j, r;
@@ -694,6 +695,7 @@ static int amdgpu_cs_dependencies(struct amdgpu_device *adev,
for (j = 0; j < num_deps; ++j) {
struct amdgpu_fence *fence;
struct amdgpu_ring *ring;
+ struct amdgpu_ctx *ctx;
r = amdgpu_cs_get_ring(adev, deps[j].ip_type,
deps[j].ip_instance,
@@ -701,14 +703,21 @@ static int amdgpu_cs_dependencies(struct amdgpu_device *adev,
if (r)
return r;
+ ctx = amdgpu_ctx_get(fpriv, deps[j].ctx_id);
+ if (ctx == NULL)
+ return -EINVAL;
+
r = amdgpu_fence_recreate(ring, p->filp,
deps[j].handle,
&fence);
- if (r)
+ if (r) {
+ amdgpu_ctx_put(ctx);
return r;
+ }
amdgpu_sync_fence(&ib->sync, fence);
amdgpu_fence_unref(&fence);
+ amdgpu_ctx_put(ctx);
}
}
@@ -808,12 +817,16 @@ int amdgpu_cs_wait_ioctl(struct drm_device *dev, void *data,
r = amdgpu_cs_get_ring(adev, wait->in.ip_type, wait->in.ip_instance,
wait->in.ring, &ring);
- if (r)
+ if (r) {
+ amdgpu_ctx_put(ctx);
return r;
+ }
r = amdgpu_fence_recreate(ring, filp, wait->in.handle, &fence);
- if (r)
+ if (r) {
+ amdgpu_ctx_put(ctx);
return r;
+ }
r = fence_wait_timeout(&fence->base, true, timeout);
amdgpu_fence_unref(&fence);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index ba46be361c9b..99f158e1baff 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -1191,8 +1191,9 @@ static int amdgpu_early_init(struct amdgpu_device *adev)
return -EINVAL;
}
- adev->ip_block_enabled = kcalloc(adev->num_ip_blocks, sizeof(bool), GFP_KERNEL);
- if (adev->ip_block_enabled == NULL)
+ adev->ip_block_status = kcalloc(adev->num_ip_blocks,
+ sizeof(struct amdgpu_ip_block_status), GFP_KERNEL);
+ if (adev->ip_block_status == NULL)
return -ENOMEM;
if (adev->ip_blocks == NULL) {
@@ -1203,14 +1204,19 @@ static int amdgpu_early_init(struct amdgpu_device *adev)
for (i = 0; i < adev->num_ip_blocks; i++) {
if ((amdgpu_ip_block_mask & (1 << i)) == 0) {
DRM_ERROR("disabled ip block: %d\n", i);
- adev->ip_block_enabled[i] = false;
+ adev->ip_block_status[i].valid = false;
} else {
if (adev->ip_blocks[i].funcs->early_init) {
r = adev->ip_blocks[i].funcs->early_init((void *)adev);
- if (r)
+ if (r == -ENOENT)
+ adev->ip_block_status[i].valid = false;
+ else if (r)
return r;
+ else
+ adev->ip_block_status[i].valid = true;
+ } else {
+ adev->ip_block_status[i].valid = true;
}
- adev->ip_block_enabled[i] = true;
}
}
@@ -1222,11 +1228,12 @@ static int amdgpu_init(struct amdgpu_device *adev)
int i, r;
for (i = 0; i < adev->num_ip_blocks; i++) {
- if (!adev->ip_block_enabled[i])
+ if (!adev->ip_block_status[i].valid)
continue;
r = adev->ip_blocks[i].funcs->sw_init((void *)adev);
if (r)
return r;
+ adev->ip_block_status[i].sw = true;
/* need to do gmc hw init early so we can allocate gpu mem */
if (adev->ip_blocks[i].type == AMD_IP_BLOCK_TYPE_GMC) {
r = amdgpu_vram_scratch_init(adev);
@@ -1238,11 +1245,12 @@ static int amdgpu_init(struct amdgpu_device *adev)
r = amdgpu_wb_init(adev);
if (r)
return r;
+ adev->ip_block_status[i].hw = true;
}
}
for (i = 0; i < adev->num_ip_blocks; i++) {
- if (!adev->ip_block_enabled[i])
+ if (!adev->ip_block_status[i].sw)
continue;
/* gmc hw init is done early */
if (adev->ip_blocks[i].type == AMD_IP_BLOCK_TYPE_GMC)
@@ -1250,6 +1258,7 @@ static int amdgpu_init(struct amdgpu_device *adev)
r = adev->ip_blocks[i].funcs->hw_init((void *)adev);
if (r)
return r;
+ adev->ip_block_status[i].hw = true;
}
return 0;
@@ -1260,7 +1269,7 @@ static int amdgpu_late_init(struct amdgpu_device *adev)
int i = 0, r;
for (i = 0; i < adev->num_ip_blocks; i++) {
- if (!adev->ip_block_enabled[i])
+ if (!adev->ip_block_status[i].valid)
continue;
/* enable clockgating to save power */
r = adev->ip_blocks[i].funcs->set_clockgating_state((void *)adev,
@@ -1282,7 +1291,7 @@ static int amdgpu_fini(struct amdgpu_device *adev)
int i, r;
for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
- if (!adev->ip_block_enabled[i])
+ if (!adev->ip_block_status[i].hw)
continue;
if (adev->ip_blocks[i].type == AMD_IP_BLOCK_TYPE_GMC) {
amdgpu_wb_fini(adev);
@@ -1295,14 +1304,16 @@ static int amdgpu_fini(struct amdgpu_device *adev)
return r;
r = adev->ip_blocks[i].funcs->hw_fini((void *)adev);
/* XXX handle errors */
+ adev->ip_block_status[i].hw = false;
}
for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
- if (!adev->ip_block_enabled[i])
+ if (!adev->ip_block_status[i].sw)
continue;
r = adev->ip_blocks[i].funcs->sw_fini((void *)adev);
/* XXX handle errors */
- adev->ip_block_enabled[i] = false;
+ adev->ip_block_status[i].sw = false;
+ adev->ip_block_status[i].valid = false;
}
return 0;
@@ -1313,7 +1324,7 @@ static int amdgpu_suspend(struct amdgpu_device *adev)
int i, r;
for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
- if (!adev->ip_block_enabled[i])
+ if (!adev->ip_block_status[i].valid)
continue;
/* ungate blocks so that suspend can properly shut them down */
r = adev->ip_blocks[i].funcs->set_clockgating_state((void *)adev,
@@ -1331,7 +1342,7 @@ static int amdgpu_resume(struct amdgpu_device *adev)
int i, r;
for (i = 0; i < adev->num_ip_blocks; i++) {
- if (!adev->ip_block_enabled[i])
+ if (!adev->ip_block_status[i].valid)
continue;
r = adev->ip_blocks[i].funcs->resume(adev);
if (r)
@@ -1577,8 +1588,8 @@ void amdgpu_device_fini(struct amdgpu_device *adev)
amdgpu_fence_driver_fini(adev);
amdgpu_fbdev_fini(adev);
r = amdgpu_fini(adev);
- kfree(adev->ip_block_enabled);
- adev->ip_block_enabled = NULL;
+ kfree(adev->ip_block_status);
+ adev->ip_block_status = NULL;
adev->accel_working = false;
/* free i2c buses */
amdgpu_i2c_fini(adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
index 56da962231fc..115906f5fda0 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
@@ -44,6 +44,8 @@
#include "amdgpu.h"
#include "amdgpu_irq.h"
+#include "amdgpu_amdkfd.h"
+
/*
* KMS wrapper.
* - 3.0.0 - initial driver
@@ -527,12 +529,15 @@ static int __init amdgpu_init(void)
driver->num_ioctls = amdgpu_max_kms_ioctl;
amdgpu_register_atpx_handler();
+ amdgpu_amdkfd_init();
+
/* let modprobe override vga console setting */
return drm_pci_init(driver, pdriver);
}
static void __exit amdgpu_exit(void)
{
+ amdgpu_amdkfd_fini();
drm_pci_exit(driver, pdriver);
amdgpu_unregister_atpx_handler();
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c
index c1645d21f8e2..81b821247dde 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c
@@ -53,9 +53,9 @@ static struct fb_ops amdgpufb_ops = {
.owner = THIS_MODULE,
.fb_check_var = drm_fb_helper_check_var,
.fb_set_par = drm_fb_helper_set_par,
- .fb_fillrect = cfb_fillrect,
- .fb_copyarea = cfb_copyarea,
- .fb_imageblit = cfb_imageblit,
+ .fb_fillrect = drm_fb_helper_cfb_fillrect,
+ .fb_copyarea = drm_fb_helper_cfb_copyarea,
+ .fb_imageblit = drm_fb_helper_cfb_imageblit,
.fb_pan_display = drm_fb_helper_pan_display,
.fb_blank = drm_fb_helper_blank,
.fb_setcmap = drm_fb_helper_setcmap,
@@ -179,7 +179,6 @@ static int amdgpufb_create(struct drm_fb_helper *helper,
struct drm_mode_fb_cmd2 mode_cmd;
struct drm_gem_object *gobj = NULL;
struct amdgpu_bo *rbo = NULL;
- struct device *device = &adev->pdev->dev;
int ret;
unsigned long tmp;
@@ -201,9 +200,9 @@ static int amdgpufb_create(struct drm_fb_helper *helper,
rbo = gem_to_amdgpu_bo(gobj);
/* okay we have an object now allocate the framebuffer */
- info = framebuffer_alloc(0, device);
- if (info == NULL) {
- ret = -ENOMEM;
+ info = drm_fb_helper_alloc_fbi(helper);
+ if (IS_ERR(info)) {
+ ret = PTR_ERR(info);
goto out_unref;
}
@@ -212,14 +211,13 @@ static int amdgpufb_create(struct drm_fb_helper *helper,
ret = amdgpu_framebuffer_init(adev->ddev, &rfbdev->rfb, &mode_cmd, gobj);
if (ret) {
DRM_ERROR("failed to initialize framebuffer %d\n", ret);
- goto out_unref;
+ goto out_destroy_fbi;
}
fb = &rfbdev->rfb.base;
/* setup helper */
rfbdev->helper.fb = fb;
- rfbdev->helper.fbdev = info;
memset_io(rbo->kptr, 0x0, amdgpu_bo_size(rbo));
@@ -239,11 +237,6 @@ static int amdgpufb_create(struct drm_fb_helper *helper,
drm_fb_helper_fill_var(info, &rfbdev->helper, sizes->fb_width, sizes->fb_height);
/* setup aperture base/size for vesafb takeover */
- info->apertures = alloc_apertures(1);
- if (!info->apertures) {
- ret = -ENOMEM;
- goto out_unref;
- }
info->apertures->ranges[0].base = adev->ddev->mode_config.fb_base;
info->apertures->ranges[0].size = adev->mc.aper_size;
@@ -251,13 +244,7 @@ static int amdgpufb_create(struct drm_fb_helper *helper,
if (info->screen_base == NULL) {
ret = -ENOSPC;
- goto out_unref;
- }
-
- ret = fb_alloc_cmap(&info->cmap, 256, 0);
- if (ret) {
- ret = -ENOMEM;
- goto out_unref;
+ goto out_destroy_fbi;
}
DRM_INFO("fb mappable at 0x%lX\n", info->fix.smem_start);
@@ -269,6 +256,8 @@ static int amdgpufb_create(struct drm_fb_helper *helper,
vga_switcheroo_client_fb_set(adev->ddev->pdev, info);
return 0;
+out_destroy_fbi:
+ drm_fb_helper_release_fbi(helper);
out_unref:
if (rbo) {
@@ -290,17 +279,10 @@ void amdgpu_fb_output_poll_changed(struct amdgpu_device *adev)
static int amdgpu_fbdev_destroy(struct drm_device *dev, struct amdgpu_fbdev *rfbdev)
{
- struct fb_info *info;
struct amdgpu_framebuffer *rfb = &rfbdev->rfb;
- if (rfbdev->helper.fbdev) {
- info = rfbdev->helper.fbdev;
-
- unregister_framebuffer(info);
- if (info->cmap.len)
- fb_dealloc_cmap(&info->cmap);
- framebuffer_release(info);
- }
+ drm_fb_helper_unregister_fbi(&rfbdev->helper);
+ drm_fb_helper_release_fbi(&rfbdev->helper);
if (rfb->obj) {
amdgpufb_destroy_pinned_object(rfb->obj);
@@ -395,7 +377,8 @@ void amdgpu_fbdev_fini(struct amdgpu_device *adev)
void amdgpu_fbdev_set_suspend(struct amdgpu_device *adev, int state)
{
if (adev->mode_info.rfbdev)
- fb_set_suspend(adev->mode_info.rfbdev->helper.fbdev, state);
+ drm_fb_helper_set_suspend(&adev->mode_info.rfbdev->helper,
+ state);
}
int amdgpu_fbdev_total_size(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
index 975edb1000a2..4afc507820c0 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
@@ -352,7 +352,7 @@ unsigned long amdgpu_gem_timeout(uint64_t timeout_ns)
if (((int64_t)timeout_ns) < 0)
return MAX_SCHEDULE_TIMEOUT;
- timeout = ktime_sub_ns(ktime_get(), timeout_ns);
+ timeout = ktime_sub(ns_to_ktime(timeout_ns), ktime_get());
if (ktime_to_ns(timeout) < 0)
return 0;
@@ -449,7 +449,7 @@ out:
* vital here, so they are not reported back to userspace.
*/
static void amdgpu_gem_va_update_vm(struct amdgpu_device *adev,
- struct amdgpu_bo_va *bo_va)
+ struct amdgpu_bo_va *bo_va, uint32_t operation)
{
struct ttm_validate_buffer tv, *entry;
struct amdgpu_bo_list_entry *vm_bos;
@@ -485,7 +485,9 @@ static void amdgpu_gem_va_update_vm(struct amdgpu_device *adev,
if (r)
goto error_unlock;
- r = amdgpu_vm_bo_update(adev, bo_va, &bo_va->bo->tbo.mem);
+
+ if (operation == AMDGPU_VA_OP_MAP)
+ r = amdgpu_vm_bo_update(adev, bo_va, &bo_va->bo->tbo.mem);
error_unlock:
mutex_unlock(&bo_va->vm->mutex);
@@ -580,7 +582,7 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
}
if (!r && !(args->flags & AMDGPU_VM_DELAY_UPDATE))
- amdgpu_gem_va_update_vm(adev, bo_va);
+ amdgpu_gem_va_update_vm(adev, bo_va, args->operation);
drm_gem_object_unreference_unlocked(gobj);
return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
index 52dff75aac6f..bc0fac618a3f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
@@ -180,16 +180,16 @@ int amdgpu_ib_schedule(struct amdgpu_device *adev, unsigned num_ibs,
if (vm) {
/* do context switch */
amdgpu_vm_flush(ring, vm, ib->sync.last_vm_update);
- }
- if (vm && ring->funcs->emit_gds_switch)
- amdgpu_ring_emit_gds_switch(ring, ib->vm->ids[ring->idx].id,
- ib->gds_base, ib->gds_size,
- ib->gws_base, ib->gws_size,
- ib->oa_base, ib->oa_size);
+ if (ring->funcs->emit_gds_switch)
+ amdgpu_ring_emit_gds_switch(ring, ib->vm->ids[ring->idx].id,
+ ib->gds_base, ib->gds_size,
+ ib->gws_base, ib->gws_size,
+ ib->oa_base, ib->oa_size);
- if (ring->funcs->emit_hdp_flush)
- amdgpu_ring_emit_hdp_flush(ring);
+ if (ring->funcs->emit_hdp_flush)
+ amdgpu_ring_emit_hdp_flush(ring);
+ }
old_ctx = ring->current_ctx;
for (i = 0; i < num_ibs; ++i) {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c
index db5422e65ec5..fb44dd2231b1 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c
@@ -24,6 +24,7 @@
#include <drm/drmP.h>
#include "amdgpu.h"
#include "amdgpu_ih.h"
+#include "amdgpu_amdkfd.h"
/**
* amdgpu_ih_ring_alloc - allocate memory for the IH ring
@@ -199,6 +200,12 @@ restart_ih:
rmb();
while (adev->irq.ih.rptr != wptr) {
+ u32 ring_index = adev->irq.ih.rptr >> 2;
+
+ /* Before dispatching irq to IP blocks, send it to amdkfd */
+ amdgpu_amdkfd_interrupt(adev,
+ (const void *) &adev->irq.ih.ring[ring_index]);
+
amdgpu_ih_decode_iv(adev, &entry);
adev->irq.ih.rptr &= adev->irq.ih.ptr_mask;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
index 5533434c7a8f..93000af92283 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
@@ -34,6 +34,7 @@
#include <linux/vga_switcheroo.h>
#include <linux/slab.h>
#include <linux/pm_runtime.h>
+#include "amdgpu_amdkfd.h"
#if defined(CONFIG_VGA_SWITCHEROO)
bool amdgpu_has_atpx(void);
@@ -61,6 +62,8 @@ int amdgpu_driver_unload_kms(struct drm_device *dev)
pm_runtime_get_sync(dev->dev);
+ amdgpu_amdkfd_device_fini(adev);
+
amdgpu_acpi_fini(adev);
amdgpu_device_fini(adev);
@@ -118,6 +121,10 @@ int amdgpu_driver_load_kms(struct drm_device *dev, unsigned long flags)
"Error during ACPI methods call\n");
}
+ amdgpu_amdkfd_load_interface(adev);
+ amdgpu_amdkfd_device_probe(adev);
+ amdgpu_amdkfd_device_init(adev);
+
if (amdgpu_device_is_px(dev)) {
pm_runtime_use_autosuspend(dev->dev);
pm_runtime_set_autosuspend_delay(dev->dev, 5000);
@@ -235,7 +242,7 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
for (i = 0; i < adev->num_ip_blocks; i++) {
if (adev->ip_blocks[i].type == type &&
- adev->ip_block_enabled[i]) {
+ adev->ip_block_status[i].valid) {
ip.hw_ip_version_major = adev->ip_blocks[i].major;
ip.hw_ip_version_minor = adev->ip_blocks[i].minor;
ip.capabilities_flags = 0;
@@ -274,7 +281,7 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
for (i = 0; i < adev->num_ip_blocks; i++)
if (adev->ip_blocks[i].type == type &&
- adev->ip_block_enabled[i] &&
+ adev->ip_block_status[i].valid &&
count < AMDGPU_HW_IP_INSTANCE_MAX_COUNT)
count++;
@@ -317,16 +324,17 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
break;
case AMDGPU_INFO_FW_GFX_RLC:
fw_info.ver = adev->gfx.rlc_fw_version;
- fw_info.feature = 0;
+ fw_info.feature = adev->gfx.rlc_feature_version;
break;
case AMDGPU_INFO_FW_GFX_MEC:
- if (info->query_fw.index == 0)
+ if (info->query_fw.index == 0) {
fw_info.ver = adev->gfx.mec_fw_version;
- else if (info->query_fw.index == 1)
+ fw_info.feature = adev->gfx.mec_feature_version;
+ } else if (info->query_fw.index == 1) {
fw_info.ver = adev->gfx.mec2_fw_version;
- else
+ fw_info.feature = adev->gfx.mec2_feature_version;
+ } else
return -EINVAL;
- fw_info.feature = 0;
break;
case AMDGPU_INFO_FW_SMC:
fw_info.ver = adev->pm.fw_version;
@@ -336,7 +344,7 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
if (info->query_fw.index >= 2)
return -EINVAL;
fw_info.ver = adev->sdma[info->query_fw.index].fw_version;
- fw_info.feature = 0;
+ fw_info.feature = adev->sdma[info->query_fw.index].feature_version;
break;
default:
return -EINVAL;
@@ -416,7 +424,7 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
return n ? -EFAULT : 0;
}
case AMDGPU_INFO_DEV_INFO: {
- struct drm_amdgpu_info_device dev_info;
+ struct drm_amdgpu_info_device dev_info = {};
struct amdgpu_cu_info cu_info;
dev_info.device_id = dev->pdev->device;
@@ -459,6 +467,7 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
memcpy(&dev_info.cu_bitmap[0], &cu_info.bitmap[0], sizeof(cu_info.bitmap));
dev_info.vram_type = adev->mc.vram_type;
dev_info.vram_bit_width = adev->mc.vram_width;
+ dev_info.vce_harvest_config = adev->vce.harvest_config;
return copy_to_user(out, &dev_info,
min((size_t)size, sizeof(dev_info))) ? -EFAULT : 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
index 2f7a5efa21c2..f5c22556ec2c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
@@ -374,7 +374,7 @@ static int amdgpu_uvd_cs_msg_decode(uint32_t *msg, unsigned buf_sizes[])
unsigned height_in_mb = ALIGN(height / 16, 2);
unsigned fs_in_mb = width_in_mb * height_in_mb;
- unsigned image_size, tmp, min_dpb_size, num_dpb_buffer;
+ unsigned image_size, tmp, min_dpb_size, num_dpb_buffer, min_ctx_size;
image_size = width * height;
image_size += image_size / 2;
@@ -466,6 +466,8 @@ static int amdgpu_uvd_cs_msg_decode(uint32_t *msg, unsigned buf_sizes[])
num_dpb_buffer = (le32_to_cpu(msg[59]) & 0xff) + 2;
min_dpb_size = image_size * num_dpb_buffer;
+ min_ctx_size = ((width + 255) / 16) * ((height + 255) / 16)
+ * 16 * num_dpb_buffer + 52 * 1024;
break;
default:
@@ -486,6 +488,7 @@ static int amdgpu_uvd_cs_msg_decode(uint32_t *msg, unsigned buf_sizes[])
buf_sizes[0x1] = dpb_size;
buf_sizes[0x2] = image_size;
+ buf_sizes[0x4] = min_ctx_size;
return 0;
}
@@ -628,6 +631,13 @@ static int amdgpu_uvd_cs_pass2(struct amdgpu_uvd_cs_ctx *ctx)
return -EINVAL;
}
+ } else if (cmd == 0x206) {
+ if ((end - start) < ctx->buf_sizes[4]) {
+ DRM_ERROR("buffer (%d) to small (%d / %d)!\n", cmd,
+ (unsigned)(end - start),
+ ctx->buf_sizes[4]);
+ return -EINVAL;
+ }
} else if ((cmd != 0x100) && (cmd != 0x204)) {
DRM_ERROR("invalid UVD command %X!\n", cmd);
return -EINVAL;
@@ -755,9 +765,10 @@ int amdgpu_uvd_ring_parse_cs(struct amdgpu_cs_parser *parser, uint32_t ib_idx)
struct amdgpu_uvd_cs_ctx ctx = {};
unsigned buf_sizes[] = {
[0x00000000] = 2048,
- [0x00000001] = 32 * 1024 * 1024,
- [0x00000002] = 2048 * 1152 * 3,
+ [0x00000001] = 0xFFFFFFFF,
+ [0x00000002] = 0xFFFFFFFF,
[0x00000003] = 2048,
+ [0x00000004] = 0xFFFFFFFF,
};
struct amdgpu_ib *ib = &parser->ibs[ib_idx];
int r;
diff --git a/drivers/gpu/drm/amd/amdgpu/cik.c b/drivers/gpu/drm/amd/amdgpu/cik.c
index 341c56681841..b3b66a0d5ff7 100644
--- a/drivers/gpu/drm/amd/amdgpu/cik.c
+++ b/drivers/gpu/drm/amd/amdgpu/cik.c
@@ -64,6 +64,8 @@
#include "oss/oss_2_0_d.h"
#include "oss/oss_2_0_sh_mask.h"
+#include "amdgpu_amdkfd.h"
+
/*
* Indirect registers accessor
*/
@@ -2448,14 +2450,21 @@ static int cik_common_suspend(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ amdgpu_amdkfd_suspend(adev);
+
return cik_common_hw_fini(adev);
}
static int cik_common_resume(void *handle)
{
+ int r;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- return cik_common_hw_init(adev);
+ r = cik_common_hw_init(adev);
+ if (r)
+ return r;
+
+ return amdgpu_amdkfd_resume(adev);
}
static bool cik_common_is_idle(void *handle)
diff --git a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
index ab83cc1ca4cc..15df46c93f0a 100644
--- a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
+++ b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
@@ -500,6 +500,7 @@ static int cik_sdma_load_microcode(struct amdgpu_device *adev)
amdgpu_ucode_print_sdma_hdr(&hdr->header);
fw_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4;
adev->sdma[i].fw_version = le32_to_cpu(hdr->header.ucode_version);
+ adev->sdma[i].feature_version = le32_to_cpu(hdr->ucode_feature_version);
fw_data = (const __le32 *)
(adev->sdma[i].fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes));
WREG32(mmSDMA0_UCODE_ADDR + sdma_offsets[i], 0);
diff --git a/drivers/gpu/drm/amd/amdgpu/cikd.h b/drivers/gpu/drm/amd/amdgpu/cikd.h
index d19085a97064..a3e3dfaa01a4 100644
--- a/drivers/gpu/drm/amd/amdgpu/cikd.h
+++ b/drivers/gpu/drm/amd/amdgpu/cikd.h
@@ -552,6 +552,12 @@
#define VCE_CMD_IB_AUTO 0x00000005
#define VCE_CMD_SEMAPHORE 0x00000006
+/* if PTR32, these are the bases for scratch and lds */
+#define PRIVATE_BASE(x) ((x) << 0) /* scratch */
+#define SHARED_BASE(x) ((x) << 16) /* LDS */
+
+#define KFD_CIK_SDMA_QUEUE_OFFSET 0x200
+
/* valid for both DEFAULT_MTYPE and APE1_MTYPE */
enum {
MTYPE_CACHED = 0,
diff --git a/drivers/gpu/drm/amd/amdgpu/cz_dpm.c b/drivers/gpu/drm/amd/amdgpu/cz_dpm.c
index f75a31df30bd..ace870afc7d4 100644
--- a/drivers/gpu/drm/amd/amdgpu/cz_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/cz_dpm.c
@@ -494,29 +494,67 @@ static void cz_dpm_fini(struct amdgpu_device *adev)
amdgpu_free_extended_power_table(adev);
}
+#define ixSMUSVI_NB_CURRENTVID 0xD8230044
+#define CURRENT_NB_VID_MASK 0xff000000
+#define CURRENT_NB_VID__SHIFT 24
+#define ixSMUSVI_GFX_CURRENTVID 0xD8230048
+#define CURRENT_GFX_VID_MASK 0xff000000
+#define CURRENT_GFX_VID__SHIFT 24
+
static void
cz_dpm_debugfs_print_current_performance_level(struct amdgpu_device *adev,
struct seq_file *m)
{
+ struct cz_power_info *pi = cz_get_pi(adev);
struct amdgpu_clock_voltage_dependency_table *table =
&adev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
- u32 current_index =
- (RREG32_SMC(ixTARGET_AND_CURRENT_PROFILE_INDEX) &
- TARGET_AND_CURRENT_PROFILE_INDEX__CURR_SCLK_INDEX_MASK) >>
- TARGET_AND_CURRENT_PROFILE_INDEX__CURR_SCLK_INDEX__SHIFT;
- u32 sclk, tmp;
- u16 vddc;
-
- if (current_index >= NUM_SCLK_LEVELS) {
- seq_printf(m, "invalid dpm profile %d\n", current_index);
+ struct amdgpu_uvd_clock_voltage_dependency_table *uvd_table =
+ &adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table;
+ struct amdgpu_vce_clock_voltage_dependency_table *vce_table =
+ &adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table;
+ u32 sclk_index = REG_GET_FIELD(RREG32_SMC(ixTARGET_AND_CURRENT_PROFILE_INDEX),
+ TARGET_AND_CURRENT_PROFILE_INDEX, CURR_SCLK_INDEX);
+ u32 uvd_index = REG_GET_FIELD(RREG32_SMC(ixTARGET_AND_CURRENT_PROFILE_INDEX_2),
+ TARGET_AND_CURRENT_PROFILE_INDEX_2, CURR_UVD_INDEX);
+ u32 vce_index = REG_GET_FIELD(RREG32_SMC(ixTARGET_AND_CURRENT_PROFILE_INDEX_2),
+ TARGET_AND_CURRENT_PROFILE_INDEX_2, CURR_VCE_INDEX);
+ u32 sclk, vclk, dclk, ecclk, tmp;
+ u16 vddnb, vddgfx;
+
+ if (sclk_index >= NUM_SCLK_LEVELS) {
+ seq_printf(m, "invalid sclk dpm profile %d\n", sclk_index);
} else {
- sclk = table->entries[current_index].clk;
- tmp = (RREG32_SMC(ixSMU_VOLTAGE_STATUS) &
- SMU_VOLTAGE_STATUS__SMU_VOLTAGE_CURRENT_LEVEL_MASK) >>
- SMU_VOLTAGE_STATUS__SMU_VOLTAGE_CURRENT_LEVEL__SHIFT;
- vddc = cz_convert_8bit_index_to_voltage(adev, (u16)tmp);
- seq_printf(m, "power level %d sclk: %u vddc: %u\n",
- current_index, sclk, vddc);
+ sclk = table->entries[sclk_index].clk;
+ seq_printf(m, "%u sclk: %u\n", sclk_index, sclk);
+ }
+
+ tmp = (RREG32_SMC(ixSMUSVI_NB_CURRENTVID) &
+ CURRENT_NB_VID_MASK) >> CURRENT_NB_VID__SHIFT;
+ vddnb = cz_convert_8bit_index_to_voltage(adev, (u16)tmp);
+ tmp = (RREG32_SMC(ixSMUSVI_GFX_CURRENTVID) &
+ CURRENT_GFX_VID_MASK) >> CURRENT_GFX_VID__SHIFT;
+ vddgfx = cz_convert_8bit_index_to_voltage(adev, (u16)tmp);
+ seq_printf(m, "vddnb: %u vddgfx: %u\n", vddnb, vddgfx);
+
+ seq_printf(m, "uvd %sabled\n", pi->uvd_power_gated ? "dis" : "en");
+ if (!pi->uvd_power_gated) {
+ if (uvd_index >= CZ_MAX_HARDWARE_POWERLEVELS) {
+ seq_printf(m, "invalid uvd dpm level %d\n", uvd_index);
+ } else {
+ vclk = uvd_table->entries[uvd_index].vclk;
+ dclk = uvd_table->entries[uvd_index].dclk;
+ seq_printf(m, "%u uvd vclk: %u dclk: %u\n", uvd_index, vclk, dclk);
+ }
+ }
+
+ seq_printf(m, "vce %sabled\n", pi->vce_power_gated ? "dis" : "en");
+ if (!pi->vce_power_gated) {
+ if (vce_index >= CZ_MAX_HARDWARE_POWERLEVELS) {
+ seq_printf(m, "invalid vce dpm level %d\n", vce_index);
+ } else {
+ ecclk = vce_table->entries[vce_index].ecclk;
+ seq_printf(m, "%u vce ecclk: %u\n", vce_index, ecclk);
+ }
}
}
@@ -1679,25 +1717,31 @@ static int cz_dpm_unforce_dpm_levels(struct amdgpu_device *adev)
if (ret)
return ret;
- DRM_INFO("DPM unforce state min=%d, max=%d.\n",
- pi->sclk_dpm.soft_min_clk,
- pi->sclk_dpm.soft_max_clk);
+ DRM_DEBUG("DPM unforce state min=%d, max=%d.\n",
+ pi->sclk_dpm.soft_min_clk,
+ pi->sclk_dpm.soft_max_clk);
return 0;
}
static int cz_dpm_force_dpm_level(struct amdgpu_device *adev,
- enum amdgpu_dpm_forced_level level)
+ enum amdgpu_dpm_forced_level level)
{
int ret = 0;
switch (level) {
case AMDGPU_DPM_FORCED_LEVEL_HIGH:
+ ret = cz_dpm_unforce_dpm_levels(adev);
+ if (ret)
+ return ret;
ret = cz_dpm_force_highest(adev);
if (ret)
return ret;
break;
case AMDGPU_DPM_FORCED_LEVEL_LOW:
+ ret = cz_dpm_unforce_dpm_levels(adev);
+ if (ret)
+ return ret;
ret = cz_dpm_force_lowest(adev);
if (ret)
return ret;
@@ -1711,6 +1755,8 @@ static int cz_dpm_force_dpm_level(struct amdgpu_device *adev,
break;
}
+ adev->pm.dpm.forced_level = level;
+
return ret;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
index 5cde635978f9..e70a26f587a0 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
@@ -2632,6 +2632,7 @@ static void dce_v10_0_crtc_dpms(struct drm_crtc *crtc, int mode)
struct drm_device *dev = crtc->dev;
struct amdgpu_device *adev = dev->dev_private;
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
+ unsigned type;
switch (mode) {
case DRM_MODE_DPMS_ON:
@@ -2640,6 +2641,9 @@ static void dce_v10_0_crtc_dpms(struct drm_crtc *crtc, int mode)
dce_v10_0_vga_enable(crtc, true);
amdgpu_atombios_crtc_blank(crtc, ATOM_DISABLE);
dce_v10_0_vga_enable(crtc, false);
+ /* Make sure VBLANK interrupt is still enabled */
+ type = amdgpu_crtc_idx_to_irq_type(adev, amdgpu_crtc->crtc_id);
+ amdgpu_irq_update(adev, &adev->crtc_irq, type);
drm_vblank_post_modeset(dev, amdgpu_crtc->crtc_id);
dce_v10_0_crtc_load_lut(crtc);
break;
@@ -3403,19 +3407,25 @@ static int dce_v10_0_crtc_irq(struct amdgpu_device *adev,
switch (entry->src_data) {
case 0: /* vblank */
- if (disp_int & interrupt_status_offsets[crtc].vblank) {
+ if (disp_int & interrupt_status_offsets[crtc].vblank)
dce_v10_0_crtc_vblank_int_ack(adev, crtc);
- if (amdgpu_irq_enabled(adev, source, irq_type)) {
- drm_handle_vblank(adev->ddev, crtc);
- }
- DRM_DEBUG("IH: D%d vblank\n", crtc + 1);
+ else
+ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+ if (amdgpu_irq_enabled(adev, source, irq_type)) {
+ drm_handle_vblank(adev->ddev, crtc);
}
+ DRM_DEBUG("IH: D%d vblank\n", crtc + 1);
+
break;
case 1: /* vline */
- if (disp_int & interrupt_status_offsets[crtc].vline) {
+ if (disp_int & interrupt_status_offsets[crtc].vline)
dce_v10_0_crtc_vline_int_ack(adev, crtc);
- DRM_DEBUG("IH: D%d vline\n", crtc + 1);
- }
+ else
+ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+ DRM_DEBUG("IH: D%d vline\n", crtc + 1);
+
break;
default:
DRM_DEBUG("Unhandled interrupt: %d %d\n", entry->src_id, entry->src_data);
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
index 95efd98b202d..dcb402ee048a 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
@@ -2631,6 +2631,7 @@ static void dce_v11_0_crtc_dpms(struct drm_crtc *crtc, int mode)
struct drm_device *dev = crtc->dev;
struct amdgpu_device *adev = dev->dev_private;
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
+ unsigned type;
switch (mode) {
case DRM_MODE_DPMS_ON:
@@ -2639,6 +2640,9 @@ static void dce_v11_0_crtc_dpms(struct drm_crtc *crtc, int mode)
dce_v11_0_vga_enable(crtc, true);
amdgpu_atombios_crtc_blank(crtc, ATOM_DISABLE);
dce_v11_0_vga_enable(crtc, false);
+ /* Make sure VBLANK interrupt is still enabled */
+ type = amdgpu_crtc_idx_to_irq_type(adev, amdgpu_crtc->crtc_id);
+ amdgpu_irq_update(adev, &adev->crtc_irq, type);
drm_vblank_post_modeset(dev, amdgpu_crtc->crtc_id);
dce_v11_0_crtc_load_lut(crtc);
break;
@@ -3402,19 +3406,25 @@ static int dce_v11_0_crtc_irq(struct amdgpu_device *adev,
switch (entry->src_data) {
case 0: /* vblank */
- if (disp_int & interrupt_status_offsets[crtc].vblank) {
+ if (disp_int & interrupt_status_offsets[crtc].vblank)
dce_v11_0_crtc_vblank_int_ack(adev, crtc);
- if (amdgpu_irq_enabled(adev, source, irq_type)) {
- drm_handle_vblank(adev->ddev, crtc);
- }
- DRM_DEBUG("IH: D%d vblank\n", crtc + 1);
+ else
+ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+ if (amdgpu_irq_enabled(adev, source, irq_type)) {
+ drm_handle_vblank(adev->ddev, crtc);
}
+ DRM_DEBUG("IH: D%d vblank\n", crtc + 1);
+
break;
case 1: /* vline */
- if (disp_int & interrupt_status_offsets[crtc].vline) {
+ if (disp_int & interrupt_status_offsets[crtc].vline)
dce_v11_0_crtc_vline_int_ack(adev, crtc);
- DRM_DEBUG("IH: D%d vline\n", crtc + 1);
- }
+ else
+ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+ DRM_DEBUG("IH: D%d vline\n", crtc + 1);
+
break;
default:
DRM_DEBUG("Unhandled interrupt: %d %d\n", entry->src_id, entry->src_data);
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
index aaca8d663f2c..cc050a329c49 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
@@ -2566,6 +2566,7 @@ static void dce_v8_0_crtc_dpms(struct drm_crtc *crtc, int mode)
struct drm_device *dev = crtc->dev;
struct amdgpu_device *adev = dev->dev_private;
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
+ unsigned type;
switch (mode) {
case DRM_MODE_DPMS_ON:
@@ -2574,6 +2575,9 @@ static void dce_v8_0_crtc_dpms(struct drm_crtc *crtc, int mode)
dce_v8_0_vga_enable(crtc, true);
amdgpu_atombios_crtc_blank(crtc, ATOM_DISABLE);
dce_v8_0_vga_enable(crtc, false);
+ /* Make sure VBLANK interrupt is still enabled */
+ type = amdgpu_crtc_idx_to_irq_type(adev, amdgpu_crtc->crtc_id);
+ amdgpu_irq_update(adev, &adev->crtc_irq, type);
drm_vblank_post_modeset(dev, amdgpu_crtc->crtc_id);
dce_v8_0_crtc_load_lut(crtc);
break;
@@ -3237,19 +3241,25 @@ static int dce_v8_0_crtc_irq(struct amdgpu_device *adev,
switch (entry->src_data) {
case 0: /* vblank */
- if (disp_int & interrupt_status_offsets[crtc].vblank) {
+ if (disp_int & interrupt_status_offsets[crtc].vblank)
WREG32(mmLB_VBLANK_STATUS + crtc_offsets[crtc], LB_VBLANK_STATUS__VBLANK_ACK_MASK);
- if (amdgpu_irq_enabled(adev, source, irq_type)) {
- drm_handle_vblank(adev->ddev, crtc);
- }
- DRM_DEBUG("IH: D%d vblank\n", crtc + 1);
+ else
+ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+ if (amdgpu_irq_enabled(adev, source, irq_type)) {
+ drm_handle_vblank(adev->ddev, crtc);
}
+ DRM_DEBUG("IH: D%d vblank\n", crtc + 1);
+
break;
case 1: /* vline */
- if (disp_int & interrupt_status_offsets[crtc].vline) {
+ if (disp_int & interrupt_status_offsets[crtc].vline)
WREG32(mmLB_VLINE_STATUS + crtc_offsets[crtc], LB_VLINE_STATUS__VLINE_ACK_MASK);
- DRM_DEBUG("IH: D%d vline\n", crtc + 1);
- }
+ else
+ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+ DRM_DEBUG("IH: D%d vline\n", crtc + 1);
+
break;
default:
DRM_DEBUG("Unhandled interrupt: %d %d\n", entry->src_id, entry->src_data);
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
index 2c188fb9fd22..0d8bf2cb1956 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
@@ -2561,7 +2561,7 @@ static bool gfx_v7_0_ring_emit_semaphore(struct amdgpu_ring *ring,
* sheduling on the ring. This function schedules the IB
* on the gfx ring for execution by the GPU.
*/
-static void gfx_v7_0_ring_emit_ib(struct amdgpu_ring *ring,
+static void gfx_v7_0_ring_emit_ib_gfx(struct amdgpu_ring *ring,
struct amdgpu_ib *ib)
{
bool need_ctx_switch = ring->current_ctx != ib->ctx;
@@ -2569,15 +2569,10 @@ static void gfx_v7_0_ring_emit_ib(struct amdgpu_ring *ring,
u32 next_rptr = ring->wptr + 5;
/* drop the CE preamble IB for the same context */
- if ((ring->type == AMDGPU_RING_TYPE_GFX) &&
- (ib->flags & AMDGPU_IB_FLAG_PREAMBLE) &&
- !need_ctx_switch)
+ if ((ib->flags & AMDGPU_IB_FLAG_PREAMBLE) && !need_ctx_switch)
return;
- if (ring->type == AMDGPU_RING_TYPE_COMPUTE)
- control |= INDIRECT_BUFFER_VALID;
-
- if (need_ctx_switch && ring->type == AMDGPU_RING_TYPE_GFX)
+ if (need_ctx_switch)
next_rptr += 2;
next_rptr += 4;
@@ -2588,7 +2583,7 @@ static void gfx_v7_0_ring_emit_ib(struct amdgpu_ring *ring,
amdgpu_ring_write(ring, next_rptr);
/* insert SWITCH_BUFFER packet before first IB in the ring frame */
- if (need_ctx_switch && ring->type == AMDGPU_RING_TYPE_GFX) {
+ if (need_ctx_switch) {
amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));
amdgpu_ring_write(ring, 0);
}
@@ -2611,6 +2606,35 @@ static void gfx_v7_0_ring_emit_ib(struct amdgpu_ring *ring,
amdgpu_ring_write(ring, control);
}
+static void gfx_v7_0_ring_emit_ib_compute(struct amdgpu_ring *ring,
+ struct amdgpu_ib *ib)
+{
+ u32 header, control = 0;
+ u32 next_rptr = ring->wptr + 5;
+
+ control |= INDIRECT_BUFFER_VALID;
+ next_rptr += 4;
+ amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
+ amdgpu_ring_write(ring, WRITE_DATA_DST_SEL(5) | WR_CONFIRM);
+ amdgpu_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc);
+ amdgpu_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr) & 0xffffffff);
+ amdgpu_ring_write(ring, next_rptr);
+
+ header = PACKET3(PACKET3_INDIRECT_BUFFER, 2);
+
+ control |= ib->length_dw |
+ (ib->vm ? (ib->vm->ids[ring->idx].id << 24) : 0);
+
+ amdgpu_ring_write(ring, header);
+ amdgpu_ring_write(ring,
+#ifdef __BIG_ENDIAN
+ (2 << 0) |
+#endif
+ (ib->gpu_addr & 0xFFFFFFFC));
+ amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr) & 0xFFFF);
+ amdgpu_ring_write(ring, control);
+}
+
/**
* gfx_v7_0_ring_test_ib - basic ring IB test
*
@@ -3056,6 +3080,8 @@ static int gfx_v7_0_cp_compute_load_microcode(struct amdgpu_device *adev)
mec_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data;
amdgpu_ucode_print_gfx_hdr(&mec_hdr->header);
adev->gfx.mec_fw_version = le32_to_cpu(mec_hdr->header.ucode_version);
+ adev->gfx.mec_feature_version = le32_to_cpu(
+ mec_hdr->ucode_feature_version);
gfx_v7_0_cp_compute_enable(adev, false);
@@ -3078,6 +3104,8 @@ static int gfx_v7_0_cp_compute_load_microcode(struct amdgpu_device *adev)
mec2_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec2_fw->data;
amdgpu_ucode_print_gfx_hdr(&mec2_hdr->header);
adev->gfx.mec2_fw_version = le32_to_cpu(mec2_hdr->header.ucode_version);
+ adev->gfx.mec2_feature_version = le32_to_cpu(
+ mec2_hdr->ucode_feature_version);
/* MEC2 */
fw_data = (const __le32 *)
@@ -4042,6 +4070,8 @@ static int gfx_v7_0_rlc_resume(struct amdgpu_device *adev)
hdr = (const struct rlc_firmware_header_v1_0 *)adev->gfx.rlc_fw->data;
amdgpu_ucode_print_rlc_hdr(&hdr->header);
adev->gfx.rlc_fw_version = le32_to_cpu(hdr->header.ucode_version);
+ adev->gfx.rlc_feature_version = le32_to_cpu(
+ hdr->ucode_feature_version);
gfx_v7_0_rlc_stop(adev);
@@ -5098,7 +5128,7 @@ static void gfx_v7_0_print_status(void *handle)
dev_info(adev->dev, " CP_HPD_EOP_CONTROL=0x%08X\n",
RREG32(mmCP_HPD_EOP_CONTROL));
- for (queue = 0; queue < 8; i++) {
+ for (queue = 0; queue < 8; queue++) {
cik_srbm_select(adev, me, pipe, queue, 0);
dev_info(adev->dev, " queue: %d\n", queue);
dev_info(adev->dev, " CP_PQ_WPTR_POLL_CNTL=0x%08X\n",
@@ -5555,7 +5585,7 @@ static const struct amdgpu_ring_funcs gfx_v7_0_ring_funcs_gfx = {
.get_wptr = gfx_v7_0_ring_get_wptr_gfx,
.set_wptr = gfx_v7_0_ring_set_wptr_gfx,
.parse_cs = NULL,
- .emit_ib = gfx_v7_0_ring_emit_ib,
+ .emit_ib = gfx_v7_0_ring_emit_ib_gfx,
.emit_fence = gfx_v7_0_ring_emit_fence_gfx,
.emit_semaphore = gfx_v7_0_ring_emit_semaphore,
.emit_vm_flush = gfx_v7_0_ring_emit_vm_flush,
@@ -5571,7 +5601,7 @@ static const struct amdgpu_ring_funcs gfx_v7_0_ring_funcs_compute = {
.get_wptr = gfx_v7_0_ring_get_wptr_compute,
.set_wptr = gfx_v7_0_ring_set_wptr_compute,
.parse_cs = NULL,
- .emit_ib = gfx_v7_0_ring_emit_ib,
+ .emit_ib = gfx_v7_0_ring_emit_ib_compute,
.emit_fence = gfx_v7_0_ring_emit_fence_compute,
.emit_semaphore = gfx_v7_0_ring_emit_semaphore,
.emit_vm_flush = gfx_v7_0_ring_emit_vm_flush,
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
index 7b683fb2173c..20e2cfd521d5 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
@@ -587,6 +587,7 @@ static int gfx_v8_0_init_microcode(struct amdgpu_device *adev)
int err;
struct amdgpu_firmware_info *info = NULL;
const struct common_firmware_header *header = NULL;
+ const struct gfx_firmware_header_v1_0 *cp_hdr;
DRM_DEBUG("\n");
@@ -611,6 +612,9 @@ static int gfx_v8_0_init_microcode(struct amdgpu_device *adev)
err = amdgpu_ucode_validate(adev->gfx.pfp_fw);
if (err)
goto out;
+ cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.pfp_fw->data;
+ adev->gfx.pfp_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
+ adev->gfx.pfp_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_me.bin", chip_name);
err = request_firmware(&adev->gfx.me_fw, fw_name, adev->dev);
@@ -619,6 +623,9 @@ static int gfx_v8_0_init_microcode(struct amdgpu_device *adev)
err = amdgpu_ucode_validate(adev->gfx.me_fw);
if (err)
goto out;
+ cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.me_fw->data;
+ adev->gfx.me_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
+ adev->gfx.me_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_ce.bin", chip_name);
err = request_firmware(&adev->gfx.ce_fw, fw_name, adev->dev);
@@ -627,12 +634,18 @@ static int gfx_v8_0_init_microcode(struct amdgpu_device *adev)
err = amdgpu_ucode_validate(adev->gfx.ce_fw);
if (err)
goto out;
+ cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.ce_fw->data;
+ adev->gfx.ce_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
+ adev->gfx.ce_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_rlc.bin", chip_name);
err = request_firmware(&adev->gfx.rlc_fw, fw_name, adev->dev);
if (err)
goto out;
err = amdgpu_ucode_validate(adev->gfx.rlc_fw);
+ cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.rlc_fw->data;
+ adev->gfx.rlc_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
+ adev->gfx.rlc_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec.bin", chip_name);
err = request_firmware(&adev->gfx.mec_fw, fw_name, adev->dev);
@@ -641,6 +654,9 @@ static int gfx_v8_0_init_microcode(struct amdgpu_device *adev)
err = amdgpu_ucode_validate(adev->gfx.mec_fw);
if (err)
goto out;
+ cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data;
+ adev->gfx.mec_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
+ adev->gfx.mec_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec2.bin", chip_name);
err = request_firmware(&adev->gfx.mec2_fw, fw_name, adev->dev);
@@ -648,6 +664,12 @@ static int gfx_v8_0_init_microcode(struct amdgpu_device *adev)
err = amdgpu_ucode_validate(adev->gfx.mec2_fw);
if (err)
goto out;
+ cp_hdr = (const struct gfx_firmware_header_v1_0 *)
+ adev->gfx.mec2_fw->data;
+ adev->gfx.mec2_fw_version = le32_to_cpu(
+ cp_hdr->header.ucode_version);
+ adev->gfx.mec2_feature_version = le32_to_cpu(
+ cp_hdr->ucode_feature_version);
} else {
err = 0;
adev->gfx.mec2_fw = NULL;
@@ -1813,10 +1835,7 @@ static u32 gfx_v8_0_get_rb_disabled(struct amdgpu_device *adev,
u32 data, mask;
data = RREG32(mmCC_RB_BACKEND_DISABLE);
- if (data & 1)
- data &= CC_RB_BACKEND_DISABLE__BACKEND_DISABLE_MASK;
- else
- data = 0;
+ data &= CC_RB_BACKEND_DISABLE__BACKEND_DISABLE_MASK;
data |= RREG32(mmGC_USER_RB_BACKEND_DISABLE);
@@ -1986,6 +2005,7 @@ static void gfx_v8_0_gpu_init(struct amdgpu_device *adev)
adev->gfx.config.max_shader_engines = 1;
adev->gfx.config.max_tile_pipes = 2;
adev->gfx.config.max_sh_per_se = 1;
+ adev->gfx.config.max_backends_per_se = 2;
switch (adev->pdev->revision) {
case 0xc4:
@@ -1994,7 +2014,6 @@ static void gfx_v8_0_gpu_init(struct amdgpu_device *adev)
case 0xcc:
/* B10 */
adev->gfx.config.max_cu_per_sh = 8;
- adev->gfx.config.max_backends_per_se = 2;
break;
case 0xc5:
case 0x81:
@@ -2003,14 +2022,12 @@ static void gfx_v8_0_gpu_init(struct amdgpu_device *adev)
case 0xcd:
/* B8 */
adev->gfx.config.max_cu_per_sh = 6;
- adev->gfx.config.max_backends_per_se = 2;
break;
case 0xc6:
case 0xca:
case 0xce:
/* B6 */
adev->gfx.config.max_cu_per_sh = 6;
- adev->gfx.config.max_backends_per_se = 2;
break;
case 0xc7:
case 0x87:
@@ -2018,7 +2035,6 @@ static void gfx_v8_0_gpu_init(struct amdgpu_device *adev)
default:
/* B4 */
adev->gfx.config.max_cu_per_sh = 4;
- adev->gfx.config.max_backends_per_se = 1;
break;
}
@@ -2278,7 +2294,6 @@ static int gfx_v8_0_rlc_load_microcode(struct amdgpu_device *adev)
hdr = (const struct rlc_firmware_header_v2_0 *)adev->gfx.rlc_fw->data;
amdgpu_ucode_print_rlc_hdr(&hdr->header);
- adev->gfx.rlc_fw_version = le32_to_cpu(hdr->header.ucode_version);
fw_data = (const __le32 *)(adev->gfx.rlc_fw->data +
le32_to_cpu(hdr->header.ucode_array_offset_bytes));
@@ -2364,12 +2379,6 @@ static int gfx_v8_0_cp_gfx_load_microcode(struct amdgpu_device *adev)
amdgpu_ucode_print_gfx_hdr(&pfp_hdr->header);
amdgpu_ucode_print_gfx_hdr(&ce_hdr->header);
amdgpu_ucode_print_gfx_hdr(&me_hdr->header);
- adev->gfx.pfp_fw_version = le32_to_cpu(pfp_hdr->header.ucode_version);
- adev->gfx.ce_fw_version = le32_to_cpu(ce_hdr->header.ucode_version);
- adev->gfx.me_fw_version = le32_to_cpu(me_hdr->header.ucode_version);
- adev->gfx.me_feature_version = le32_to_cpu(me_hdr->ucode_feature_version);
- adev->gfx.ce_feature_version = le32_to_cpu(ce_hdr->ucode_feature_version);
- adev->gfx.pfp_feature_version = le32_to_cpu(pfp_hdr->ucode_feature_version);
gfx_v8_0_cp_gfx_enable(adev, false);
@@ -2625,7 +2634,6 @@ static int gfx_v8_0_cp_compute_load_microcode(struct amdgpu_device *adev)
mec_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data;
amdgpu_ucode_print_gfx_hdr(&mec_hdr->header);
- adev->gfx.mec_fw_version = le32_to_cpu(mec_hdr->header.ucode_version);
fw_data = (const __le32 *)
(adev->gfx.mec_fw->data +
@@ -2644,7 +2652,6 @@ static int gfx_v8_0_cp_compute_load_microcode(struct amdgpu_device *adev)
mec2_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec2_fw->data;
amdgpu_ucode_print_gfx_hdr(&mec2_hdr->header);
- adev->gfx.mec2_fw_version = le32_to_cpu(mec2_hdr->header.ucode_version);
fw_data = (const __le32 *)
(adev->gfx.mec2_fw->data +
@@ -3128,7 +3135,7 @@ static int gfx_v8_0_cp_compute_resume(struct amdgpu_device *adev)
WREG32(mmCP_MEC_DOORBELL_RANGE_LOWER,
AMDGPU_DOORBELL_KIQ << 2);
WREG32(mmCP_MEC_DOORBELL_RANGE_UPPER,
- 0x7FFFF << 2);
+ AMDGPU_DOORBELL_MEC_RING7 << 2);
}
tmp = RREG32(mmCP_HQD_PQ_DOORBELL_CONTROL);
tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
@@ -3756,7 +3763,7 @@ static void gfx_v8_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
amdgpu_ring_write(ring, 0x20); /* poll interval */
}
-static void gfx_v8_0_ring_emit_ib(struct amdgpu_ring *ring,
+static void gfx_v8_0_ring_emit_ib_gfx(struct amdgpu_ring *ring,
struct amdgpu_ib *ib)
{
bool need_ctx_switch = ring->current_ctx != ib->ctx;
@@ -3764,15 +3771,10 @@ static void gfx_v8_0_ring_emit_ib(struct amdgpu_ring *ring,
u32 next_rptr = ring->wptr + 5;
/* drop the CE preamble IB for the same context */
- if ((ring->type == AMDGPU_RING_TYPE_GFX) &&
- (ib->flags & AMDGPU_IB_FLAG_PREAMBLE) &&
- !need_ctx_switch)
+ if ((ib->flags & AMDGPU_IB_FLAG_PREAMBLE) && !need_ctx_switch)
return;
- if (ring->type == AMDGPU_RING_TYPE_COMPUTE)
- control |= INDIRECT_BUFFER_VALID;
-
- if (need_ctx_switch && ring->type == AMDGPU_RING_TYPE_GFX)
+ if (need_ctx_switch)
next_rptr += 2;
next_rptr += 4;
@@ -3783,7 +3785,7 @@ static void gfx_v8_0_ring_emit_ib(struct amdgpu_ring *ring,
amdgpu_ring_write(ring, next_rptr);
/* insert SWITCH_BUFFER packet before first IB in the ring frame */
- if (need_ctx_switch && ring->type == AMDGPU_RING_TYPE_GFX) {
+ if (need_ctx_switch) {
amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));
amdgpu_ring_write(ring, 0);
}
@@ -3806,6 +3808,36 @@ static void gfx_v8_0_ring_emit_ib(struct amdgpu_ring *ring,
amdgpu_ring_write(ring, control);
}
+static void gfx_v8_0_ring_emit_ib_compute(struct amdgpu_ring *ring,
+ struct amdgpu_ib *ib)
+{
+ u32 header, control = 0;
+ u32 next_rptr = ring->wptr + 5;
+
+ control |= INDIRECT_BUFFER_VALID;
+
+ next_rptr += 4;
+ amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
+ amdgpu_ring_write(ring, WRITE_DATA_DST_SEL(5) | WR_CONFIRM);
+ amdgpu_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc);
+ amdgpu_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr) & 0xffffffff);
+ amdgpu_ring_write(ring, next_rptr);
+
+ header = PACKET3(PACKET3_INDIRECT_BUFFER, 2);
+
+ control |= ib->length_dw |
+ (ib->vm ? (ib->vm->ids[ring->idx].id << 24) : 0);
+
+ amdgpu_ring_write(ring, header);
+ amdgpu_ring_write(ring,
+#ifdef __BIG_ENDIAN
+ (2 << 0) |
+#endif
+ (ib->gpu_addr & 0xFFFFFFFC));
+ amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr) & 0xFFFF);
+ amdgpu_ring_write(ring, control);
+}
+
static void gfx_v8_0_ring_emit_fence_gfx(struct amdgpu_ring *ring, u64 addr,
u64 seq, unsigned flags)
{
@@ -4227,7 +4259,7 @@ static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_gfx = {
.get_wptr = gfx_v8_0_ring_get_wptr_gfx,
.set_wptr = gfx_v8_0_ring_set_wptr_gfx,
.parse_cs = NULL,
- .emit_ib = gfx_v8_0_ring_emit_ib,
+ .emit_ib = gfx_v8_0_ring_emit_ib_gfx,
.emit_fence = gfx_v8_0_ring_emit_fence_gfx,
.emit_semaphore = gfx_v8_0_ring_emit_semaphore,
.emit_vm_flush = gfx_v8_0_ring_emit_vm_flush,
@@ -4243,7 +4275,7 @@ static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_compute = {
.get_wptr = gfx_v8_0_ring_get_wptr_compute,
.set_wptr = gfx_v8_0_ring_set_wptr_compute,
.parse_cs = NULL,
- .emit_ib = gfx_v8_0_ring_emit_ib,
+ .emit_ib = gfx_v8_0_ring_emit_ib_compute,
.emit_fence = gfx_v8_0_ring_emit_fence_compute,
.emit_semaphore = gfx_v8_0_ring_emit_semaphore,
.emit_vm_flush = gfx_v8_0_ring_emit_vm_flush,
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
index d7895885fe0c..a988dfb1d394 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
@@ -121,6 +121,7 @@ static int sdma_v2_4_init_microcode(struct amdgpu_device *adev)
int err, i;
struct amdgpu_firmware_info *info = NULL;
const struct common_firmware_header *header = NULL;
+ const struct sdma_firmware_header_v1_0 *hdr;
DRM_DEBUG("\n");
@@ -142,6 +143,9 @@ static int sdma_v2_4_init_microcode(struct amdgpu_device *adev)
err = amdgpu_ucode_validate(adev->sdma[i].fw);
if (err)
goto out;
+ hdr = (const struct sdma_firmware_header_v1_0 *)adev->sdma[i].fw->data;
+ adev->sdma[i].fw_version = le32_to_cpu(hdr->header.ucode_version);
+ adev->sdma[i].feature_version = le32_to_cpu(hdr->ucode_feature_version);
if (adev->firmware.smu_load) {
info = &adev->firmware.ucode[AMDGPU_UCODE_ID_SDMA0 + i];
@@ -541,8 +545,6 @@ static int sdma_v2_4_load_microcode(struct amdgpu_device *adev)
hdr = (const struct sdma_firmware_header_v1_0 *)adev->sdma[i].fw->data;
amdgpu_ucode_print_sdma_hdr(&hdr->header);
fw_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4;
- adev->sdma[i].fw_version = le32_to_cpu(hdr->header.ucode_version);
-
fw_data = (const __le32 *)
(adev->sdma[i].fw->data +
le32_to_cpu(hdr->header.ucode_array_offset_bytes));
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
index 7bb37b93993f..2b86569b18d3 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
@@ -159,6 +159,7 @@ static int sdma_v3_0_init_microcode(struct amdgpu_device *adev)
int err, i;
struct amdgpu_firmware_info *info = NULL;
const struct common_firmware_header *header = NULL;
+ const struct sdma_firmware_header_v1_0 *hdr;
DRM_DEBUG("\n");
@@ -183,6 +184,9 @@ static int sdma_v3_0_init_microcode(struct amdgpu_device *adev)
err = amdgpu_ucode_validate(adev->sdma[i].fw);
if (err)
goto out;
+ hdr = (const struct sdma_firmware_header_v1_0 *)adev->sdma[i].fw->data;
+ adev->sdma[i].fw_version = le32_to_cpu(hdr->header.ucode_version);
+ adev->sdma[i].feature_version = le32_to_cpu(hdr->ucode_feature_version);
if (adev->firmware.smu_load) {
info = &adev->firmware.ucode[AMDGPU_UCODE_ID_SDMA0 + i];
@@ -630,8 +634,6 @@ static int sdma_v3_0_load_microcode(struct amdgpu_device *adev)
hdr = (const struct sdma_firmware_header_v1_0 *)adev->sdma[i].fw->data;
amdgpu_ucode_print_sdma_hdr(&hdr->header);
fw_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4;
- adev->sdma[i].fw_version = le32_to_cpu(hdr->header.ucode_version);
-
fw_data = (const __le32 *)
(adev->sdma[i].fw->data +
le32_to_cpu(hdr->header.ucode_array_offset_bytes));
diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
index d62c4002e39c..d1064ca3670e 100644
--- a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
@@ -35,6 +35,8 @@
#include "oss/oss_2_0_d.h"
#include "oss/oss_2_0_sh_mask.h"
#include "gca/gfx_8_0_d.h"
+#include "smu/smu_7_1_2_d.h"
+#include "smu/smu_7_1_2_sh_mask.h"
#define GRBM_GFX_INDEX__VCE_INSTANCE__SHIFT 0x04
#define GRBM_GFX_INDEX__VCE_INSTANCE_MASK 0x10
@@ -112,6 +114,10 @@ static int vce_v3_0_start(struct amdgpu_device *adev)
mutex_lock(&adev->grbm_idx_mutex);
for (idx = 0; idx < 2; ++idx) {
+
+ if (adev->vce.harvest_config & (1 << idx))
+ continue;
+
if(idx == 0)
WREG32_P(mmGRBM_GFX_INDEX, 0,
~GRBM_GFX_INDEX__VCE_INSTANCE_MASK);
@@ -190,10 +196,52 @@ static int vce_v3_0_start(struct amdgpu_device *adev)
return 0;
}
+#define ixVCE_HARVEST_FUSE_MACRO__ADDRESS 0xC0014074
+#define VCE_HARVEST_FUSE_MACRO__SHIFT 27
+#define VCE_HARVEST_FUSE_MACRO__MASK 0x18000000
+
+static unsigned vce_v3_0_get_harvest_config(struct amdgpu_device *adev)
+{
+ u32 tmp;
+ unsigned ret;
+
+ if (adev->flags & AMDGPU_IS_APU)
+ tmp = (RREG32_SMC(ixVCE_HARVEST_FUSE_MACRO__ADDRESS) &
+ VCE_HARVEST_FUSE_MACRO__MASK) >>
+ VCE_HARVEST_FUSE_MACRO__SHIFT;
+ else
+ tmp = (RREG32_SMC(ixCC_HARVEST_FUSES) &
+ CC_HARVEST_FUSES__VCE_DISABLE_MASK) >>
+ CC_HARVEST_FUSES__VCE_DISABLE__SHIFT;
+
+ switch (tmp) {
+ case 1:
+ ret = AMDGPU_VCE_HARVEST_VCE0;
+ break;
+ case 2:
+ ret = AMDGPU_VCE_HARVEST_VCE1;
+ break;
+ case 3:
+ ret = AMDGPU_VCE_HARVEST_VCE0 | AMDGPU_VCE_HARVEST_VCE1;
+ break;
+ default:
+ ret = 0;
+ }
+
+ return ret;
+}
+
static int vce_v3_0_early_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ adev->vce.harvest_config = vce_v3_0_get_harvest_config(adev);
+
+ if ((adev->vce.harvest_config &
+ (AMDGPU_VCE_HARVEST_VCE0 | AMDGPU_VCE_HARVEST_VCE1)) ==
+ (AMDGPU_VCE_HARVEST_VCE0 | AMDGPU_VCE_HARVEST_VCE1))
+ return -ENOENT;
+
vce_v3_0_set_ring_funcs(adev);
vce_v3_0_set_irq_funcs(adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/vi.c b/drivers/gpu/drm/amd/amdgpu/vi.c
index fa5a4448531d..68552da40287 100644
--- a/drivers/gpu/drm/amd/amdgpu/vi.c
+++ b/drivers/gpu/drm/amd/amdgpu/vi.c
@@ -122,6 +122,32 @@ static void vi_smc_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
}
+/* smu_8_0_d.h */
+#define mmMP0PUB_IND_INDEX 0x180
+#define mmMP0PUB_IND_DATA 0x181
+
+static u32 cz_smc_rreg(struct amdgpu_device *adev, u32 reg)
+{
+ unsigned long flags;
+ u32 r;
+
+ spin_lock_irqsave(&adev->smc_idx_lock, flags);
+ WREG32(mmMP0PUB_IND_INDEX, (reg));
+ r = RREG32(mmMP0PUB_IND_DATA);
+ spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
+ return r;
+}
+
+static void cz_smc_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&adev->smc_idx_lock, flags);
+ WREG32(mmMP0PUB_IND_INDEX, (reg));
+ WREG32(mmMP0PUB_IND_DATA, (v));
+ spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
+}
+
static u32 vi_uvd_ctx_rreg(struct amdgpu_device *adev, u32 reg)
{
unsigned long flags;
@@ -1222,8 +1248,13 @@ static int vi_common_early_init(void *handle)
bool smc_enabled = false;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- adev->smc_rreg = &vi_smc_rreg;
- adev->smc_wreg = &vi_smc_wreg;
+ if (adev->flags & AMDGPU_IS_APU) {
+ adev->smc_rreg = &cz_smc_rreg;
+ adev->smc_wreg = &cz_smc_wreg;
+ } else {
+ adev->smc_rreg = &vi_smc_rreg;
+ adev->smc_wreg = &vi_smc_wreg;
+ }
adev->pcie_rreg = &vi_pcie_rreg;
adev->pcie_wreg = &vi_pcie_wreg;
adev->uvd_ctx_rreg = &vi_uvd_ctx_rreg;
diff --git a/drivers/gpu/drm/amd/amdgpu/vid.h b/drivers/gpu/drm/amd/amdgpu/vid.h
index 31bb89452e12..d98aa9d82fa1 100644
--- a/drivers/gpu/drm/amd/amdgpu/vid.h
+++ b/drivers/gpu/drm/amd/amdgpu/vid.h
@@ -66,6 +66,11 @@
#define AMDGPU_NUM_OF_VMIDS 8
+#define PIPEID(x) ((x) << 0)
+#define MEID(x) ((x) << 2)
+#define VMID(x) ((x) << 4)
+#define QUEUEID(x) ((x) << 8)
+
#define RB_BITMAP_WIDTH_PER_SH 2
#define MC_SEQ_MISC0__MT__MASK 0xf0000000
diff --git a/drivers/gpu/drm/amd/amdkfd/Kconfig b/drivers/gpu/drm/amd/amdkfd/Kconfig
index 8dfac37ff327..e13c67c8d2c0 100644
--- a/drivers/gpu/drm/amd/amdkfd/Kconfig
+++ b/drivers/gpu/drm/amd/amdkfd/Kconfig
@@ -4,6 +4,6 @@
config HSA_AMD
tristate "HSA kernel driver for AMD GPU devices"
- depends on DRM_RADEON && AMD_IOMMU_V2 && X86_64
+ depends on (DRM_RADEON || DRM_AMDGPU) && AMD_IOMMU_V2 && X86_64
help
Enable this if you want to use HSA features on AMD GPU devices.
diff --git a/drivers/gpu/drm/amd/amdkfd/Makefile b/drivers/gpu/drm/amd/amdkfd/Makefile
index 28551153ec6d..7fc9b0f444cb 100644
--- a/drivers/gpu/drm/amd/amdkfd/Makefile
+++ b/drivers/gpu/drm/amd/amdkfd/Makefile
@@ -2,7 +2,8 @@
# Makefile for Heterogenous System Architecture support for AMD GPU devices
#
-ccflags-y := -Iinclude/drm -Idrivers/gpu/drm/amd/include/
+ccflags-y := -Iinclude/drm -Idrivers/gpu/drm/amd/include/ \
+ -Idrivers/gpu/drm/amd/include/asic_reg
amdkfd-y := kfd_module.o kfd_device.o kfd_chardev.o kfd_topology.o \
kfd_pasid.o kfd_doorbell.o kfd_flat_memory.o \
diff --git a/drivers/gpu/drm/amd/amdkfd/cik_regs.h b/drivers/gpu/drm/amd/amdkfd/cik_regs.h
index 183be5b8414f..48769d12dd7b 100644
--- a/drivers/gpu/drm/amd/amdkfd/cik_regs.h
+++ b/drivers/gpu/drm/amd/amdkfd/cik_regs.h
@@ -65,17 +65,6 @@
#define AQL_ENABLE 1
-#define SDMA_RB_VMID(x) (x << 24)
-#define SDMA_RB_ENABLE (1 << 0)
-#define SDMA_RB_SIZE(x) ((x) << 1) /* log2 */
-#define SDMA_RPTR_WRITEBACK_ENABLE (1 << 12)
-#define SDMA_RPTR_WRITEBACK_TIMER(x) ((x) << 16) /* log2 */
-#define SDMA_OFFSET(x) (x << 0)
-#define SDMA_DB_ENABLE (1 << 28)
-#define SDMA_ATC (1 << 0)
-#define SDMA_VA_PTR32 (1 << 4)
-#define SDMA_VA_SHARED_BASE(x) (x << 8)
-
#define GRBM_GFX_INDEX 0x30800
#define ATC_VMID_PASID_MAPPING_VALID (1U << 31)
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
index 75312c82969f..3f95f7cb4019 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
@@ -80,7 +80,12 @@ static const struct kfd_deviceid supported_devices[] = {
{ 0x1318, &kaveri_device_info }, /* Kaveri */
{ 0x131B, &kaveri_device_info }, /* Kaveri */
{ 0x131C, &kaveri_device_info }, /* Kaveri */
- { 0x131D, &kaveri_device_info } /* Kaveri */
+ { 0x131D, &kaveri_device_info }, /* Kaveri */
+ { 0x9870, &carrizo_device_info }, /* Carrizo */
+ { 0x9874, &carrizo_device_info }, /* Carrizo */
+ { 0x9875, &carrizo_device_info }, /* Carrizo */
+ { 0x9876, &carrizo_device_info }, /* Carrizo */
+ { 0x9877, &carrizo_device_info } /* Carrizo */
};
static int kfd_gtt_sa_init(struct kfd_dev *kfd, unsigned int buf_size,
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_cik.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_cik.c
index 9ce8a20a7aff..c6f435aa803f 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_cik.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_cik.c
@@ -23,6 +23,7 @@
#include "kfd_device_queue_manager.h"
#include "cik_regs.h"
+#include "oss/oss_2_4_sh_mask.h"
static bool set_cache_memory_policy_cik(struct device_queue_manager *dqm,
struct qcm_process_device *qpd,
@@ -135,13 +136,16 @@ static int register_process_cik(struct device_queue_manager *dqm,
static void init_sdma_vm(struct device_queue_manager *dqm, struct queue *q,
struct qcm_process_device *qpd)
{
- uint32_t value = SDMA_ATC;
+ uint32_t value = (1 << SDMA0_RLC0_VIRTUAL_ADDR__ATC__SHIFT);
if (q->process->is_32bit_user_mode)
- value |= SDMA_VA_PTR32 | get_sh_mem_bases_32(qpd_to_pdd(qpd));
+ value |= (1 << SDMA0_RLC0_VIRTUAL_ADDR__PTR32__SHIFT) |
+ get_sh_mem_bases_32(qpd_to_pdd(qpd));
else
- value |= SDMA_VA_SHARED_BASE(get_sh_mem_bases_nybble_64(
- qpd_to_pdd(qpd)));
+ value |= ((get_sh_mem_bases_nybble_64(qpd_to_pdd(qpd))) <<
+ SDMA0_RLC0_VIRTUAL_ADDR__SHARED_BASE__SHIFT) &
+ SDMA0_RLC0_VIRTUAL_ADDR__SHARED_BASE_MASK;
+
q->properties.sdma_vm_addr = value;
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_vi.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_vi.c
index 4c15212a3899..7e9cae9d349b 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_vi.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_vi.c
@@ -22,6 +22,10 @@
*/
#include "kfd_device_queue_manager.h"
+#include "gca/gfx_8_0_enum.h"
+#include "gca/gfx_8_0_sh_mask.h"
+#include "gca/gfx_8_0_enum.h"
+#include "oss/oss_3_0_sh_mask.h"
static bool set_cache_memory_policy_vi(struct device_queue_manager *dqm,
struct qcm_process_device *qpd,
@@ -37,14 +41,40 @@ static void init_sdma_vm(struct device_queue_manager *dqm, struct queue *q,
void device_queue_manager_init_vi(struct device_queue_manager_asic_ops *ops)
{
- pr_warn("amdkfd: VI DQM is not currently supported\n");
-
ops->set_cache_memory_policy = set_cache_memory_policy_vi;
ops->register_process = register_process_vi;
ops->initialize = initialize_cpsch_vi;
ops->init_sdma_vm = init_sdma_vm;
}
+static uint32_t compute_sh_mem_bases_64bit(unsigned int top_address_nybble)
+{
+ /* In 64-bit mode, we can only control the top 3 bits of the LDS,
+ * scratch and GPUVM apertures.
+ * The hardware fills in the remaining 59 bits according to the
+ * following pattern:
+ * LDS: X0000000'00000000 - X0000001'00000000 (4GB)
+ * Scratch: X0000001'00000000 - X0000002'00000000 (4GB)
+ * GPUVM: Y0010000'00000000 - Y0020000'00000000 (1TB)
+ *
+ * (where X/Y is the configurable nybble with the low-bit 0)
+ *
+ * LDS and scratch will have the same top nybble programmed in the
+ * top 3 bits of SH_MEM_BASES.PRIVATE_BASE.
+ * GPUVM can have a different top nybble programmed in the
+ * top 3 bits of SH_MEM_BASES.SHARED_BASE.
+ * We don't bother to support different top nybbles
+ * for LDS/Scratch and GPUVM.
+ */
+
+ BUG_ON((top_address_nybble & 1) || top_address_nybble > 0xE ||
+ top_address_nybble == 0);
+
+ return top_address_nybble << 12 |
+ (top_address_nybble << 12) <<
+ SH_MEM_BASES__SHARED_BASE__SHIFT;
+}
+
static bool set_cache_memory_policy_vi(struct device_queue_manager *dqm,
struct qcm_process_device *qpd,
enum cache_policy default_policy,
@@ -52,18 +82,83 @@ static bool set_cache_memory_policy_vi(struct device_queue_manager *dqm,
void __user *alternate_aperture_base,
uint64_t alternate_aperture_size)
{
- return false;
+ uint32_t default_mtype;
+ uint32_t ape1_mtype;
+
+ default_mtype = (default_policy == cache_policy_coherent) ?
+ MTYPE_CC :
+ MTYPE_NC;
+
+ ape1_mtype = (alternate_policy == cache_policy_coherent) ?
+ MTYPE_CC :
+ MTYPE_NC;
+
+ qpd->sh_mem_config = (qpd->sh_mem_config &
+ SH_MEM_CONFIG__ADDRESS_MODE_MASK) |
+ SH_MEM_ALIGNMENT_MODE_UNALIGNED <<
+ SH_MEM_CONFIG__ALIGNMENT_MODE__SHIFT |
+ default_mtype << SH_MEM_CONFIG__DEFAULT_MTYPE__SHIFT |
+ ape1_mtype << SH_MEM_CONFIG__APE1_MTYPE__SHIFT |
+ SH_MEM_CONFIG__PRIVATE_ATC_MASK;
+
+ return true;
}
static int register_process_vi(struct device_queue_manager *dqm,
struct qcm_process_device *qpd)
{
- return -1;
+ struct kfd_process_device *pdd;
+ unsigned int temp;
+
+ BUG_ON(!dqm || !qpd);
+
+ pdd = qpd_to_pdd(qpd);
+
+ /* check if sh_mem_config register already configured */
+ if (qpd->sh_mem_config == 0) {
+ qpd->sh_mem_config =
+ SH_MEM_ALIGNMENT_MODE_UNALIGNED <<
+ SH_MEM_CONFIG__ALIGNMENT_MODE__SHIFT |
+ MTYPE_CC << SH_MEM_CONFIG__DEFAULT_MTYPE__SHIFT |
+ MTYPE_CC << SH_MEM_CONFIG__APE1_MTYPE__SHIFT |
+ SH_MEM_CONFIG__PRIVATE_ATC_MASK;
+
+ qpd->sh_mem_ape1_limit = 0;
+ qpd->sh_mem_ape1_base = 0;
+ }
+
+ if (qpd->pqm->process->is_32bit_user_mode) {
+ temp = get_sh_mem_bases_32(pdd);
+ qpd->sh_mem_bases = temp << SH_MEM_BASES__SHARED_BASE__SHIFT;
+ qpd->sh_mem_config |= SH_MEM_ADDRESS_MODE_HSA32 <<
+ SH_MEM_CONFIG__ADDRESS_MODE__SHIFT;
+ } else {
+ temp = get_sh_mem_bases_nybble_64(pdd);
+ qpd->sh_mem_bases = compute_sh_mem_bases_64bit(temp);
+ qpd->sh_mem_config |= SH_MEM_ADDRESS_MODE_HSA64 <<
+ SH_MEM_CONFIG__ADDRESS_MODE__SHIFT;
+ }
+
+ pr_debug("kfd: is32bit process: %d sh_mem_bases nybble: 0x%X and register 0x%X\n",
+ qpd->pqm->process->is_32bit_user_mode, temp, qpd->sh_mem_bases);
+
+ return 0;
}
static void init_sdma_vm(struct device_queue_manager *dqm, struct queue *q,
struct qcm_process_device *qpd)
{
+ uint32_t value = (1 << SDMA0_RLC0_VIRTUAL_ADDR__ATC__SHIFT);
+
+ if (q->process->is_32bit_user_mode)
+ value |= (1 << SDMA0_RLC0_VIRTUAL_ADDR__PTR32__SHIFT) |
+ get_sh_mem_bases_32(qpd_to_pdd(qpd));
+ else
+ value |= ((get_sh_mem_bases_nybble_64(qpd_to_pdd(qpd))) <<
+ SDMA0_RLC0_VIRTUAL_ADDR__SHARED_BASE__SHIFT) &
+ SDMA0_RLC0_VIRTUAL_ADDR__SHARED_BASE_MASK;
+
+ q->properties.sdma_vm_addr = value;
}
static int initialize_cpsch_vi(struct device_queue_manager *dqm)
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c
index 434979428fc0..d83de985e88c 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c
@@ -27,6 +27,7 @@
#include "kfd_mqd_manager.h"
#include "cik_regs.h"
#include "cik_structs.h"
+#include "oss/oss_2_4_sh_mask.h"
static inline struct cik_mqd *get_mqd(void *mqd)
{
@@ -214,17 +215,20 @@ static int update_mqd_sdma(struct mqd_manager *mm, void *mqd,
BUG_ON(!mm || !mqd || !q);
m = get_sdma_mqd(mqd);
- m->sdma_rlc_rb_cntl =
- SDMA_RB_SIZE((ffs(q->queue_size / sizeof(unsigned int)))) |
- SDMA_RB_VMID(q->vmid) |
- SDMA_RPTR_WRITEBACK_ENABLE |
- SDMA_RPTR_WRITEBACK_TIMER(6);
+ m->sdma_rlc_rb_cntl = ffs(q->queue_size / sizeof(unsigned int)) <<
+ SDMA0_RLC0_RB_CNTL__RB_SIZE__SHIFT |
+ q->vmid << SDMA0_RLC0_RB_CNTL__RB_VMID__SHIFT |
+ 1 << SDMA0_RLC0_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT |
+ 6 << SDMA0_RLC0_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT;
m->sdma_rlc_rb_base = lower_32_bits(q->queue_address >> 8);
m->sdma_rlc_rb_base_hi = upper_32_bits(q->queue_address >> 8);
m->sdma_rlc_rb_rptr_addr_lo = lower_32_bits((uint64_t)q->read_ptr);
m->sdma_rlc_rb_rptr_addr_hi = upper_32_bits((uint64_t)q->read_ptr);
- m->sdma_rlc_doorbell = SDMA_OFFSET(q->doorbell_off) | SDMA_DB_ENABLE;
+ m->sdma_rlc_doorbell = q->doorbell_off <<
+ SDMA0_RLC0_DOORBELL__OFFSET__SHIFT |
+ 1 << SDMA0_RLC0_DOORBELL__ENABLE__SHIFT;
+
m->sdma_rlc_virtual_addr = q->sdma_vm_addr;
m->sdma_engine_id = q->sdma_engine_id;
@@ -234,7 +238,9 @@ static int update_mqd_sdma(struct mqd_manager *mm, void *mqd,
if (q->queue_size > 0 &&
q->queue_address != 0 &&
q->queue_percent > 0) {
- m->sdma_rlc_rb_cntl |= SDMA_RB_ENABLE;
+ m->sdma_rlc_rb_cntl |=
+ 1 << SDMA0_RLC0_RB_CNTL__RB_ENABLE__SHIFT;
+
q->is_active = true;
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c
index b3a7e3ba1e38..fa32c32fa1c2 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c
@@ -22,12 +22,255 @@
*/
#include <linux/printk.h>
+#include <linux/slab.h>
#include "kfd_priv.h"
#include "kfd_mqd_manager.h"
+#include "vi_structs.h"
+#include "gca/gfx_8_0_sh_mask.h"
+#include "gca/gfx_8_0_enum.h"
+
+#define CP_MQD_CONTROL__PRIV_STATE__SHIFT 0x8
+
+static inline struct vi_mqd *get_mqd(void *mqd)
+{
+ return (struct vi_mqd *)mqd;
+}
+
+static int init_mqd(struct mqd_manager *mm, void **mqd,
+ struct kfd_mem_obj **mqd_mem_obj, uint64_t *gart_addr,
+ struct queue_properties *q)
+{
+ int retval;
+ uint64_t addr;
+ struct vi_mqd *m;
+
+ retval = kfd_gtt_sa_allocate(mm->dev, sizeof(struct vi_mqd),
+ mqd_mem_obj);
+ if (retval != 0)
+ return -ENOMEM;
+
+ m = (struct vi_mqd *) (*mqd_mem_obj)->cpu_ptr;
+ addr = (*mqd_mem_obj)->gpu_addr;
+
+ memset(m, 0, sizeof(struct vi_mqd));
+
+ m->header = 0xC0310800;
+ m->compute_pipelinestat_enable = 1;
+ m->compute_static_thread_mgmt_se0 = 0xFFFFFFFF;
+ m->compute_static_thread_mgmt_se1 = 0xFFFFFFFF;
+ m->compute_static_thread_mgmt_se2 = 0xFFFFFFFF;
+ m->compute_static_thread_mgmt_se3 = 0xFFFFFFFF;
+
+ m->cp_hqd_persistent_state = CP_HQD_PERSISTENT_STATE__PRELOAD_REQ_MASK |
+ 0x53 << CP_HQD_PERSISTENT_STATE__PRELOAD_SIZE__SHIFT;
+
+ m->cp_mqd_control = 1 << CP_MQD_CONTROL__PRIV_STATE__SHIFT |
+ MTYPE_UC << CP_MQD_CONTROL__MTYPE__SHIFT;
+
+ m->cp_mqd_base_addr_lo = lower_32_bits(addr);
+ m->cp_mqd_base_addr_hi = upper_32_bits(addr);
+
+ m->cp_hqd_quantum = 1 << CP_HQD_QUANTUM__QUANTUM_EN__SHIFT |
+ 1 << CP_HQD_QUANTUM__QUANTUM_SCALE__SHIFT |
+ 10 << CP_HQD_QUANTUM__QUANTUM_DURATION__SHIFT;
+
+ m->cp_hqd_pipe_priority = 1;
+ m->cp_hqd_queue_priority = 15;
+
+ m->cp_hqd_eop_rptr = 1 << CP_HQD_EOP_RPTR__INIT_FETCHER__SHIFT;
+
+ if (q->format == KFD_QUEUE_FORMAT_AQL)
+ m->cp_hqd_iq_rptr = 1;
+
+ *mqd = m;
+ if (gart_addr != NULL)
+ *gart_addr = addr;
+ retval = mm->update_mqd(mm, m, q);
+
+ return retval;
+}
+
+static int load_mqd(struct mqd_manager *mm, void *mqd,
+ uint32_t pipe_id, uint32_t queue_id,
+ uint32_t __user *wptr)
+{
+ return mm->dev->kfd2kgd->hqd_load
+ (mm->dev->kgd, mqd, pipe_id, queue_id, wptr);
+}
+
+static int __update_mqd(struct mqd_manager *mm, void *mqd,
+ struct queue_properties *q, unsigned int mtype,
+ unsigned int atc_bit)
+{
+ struct vi_mqd *m;
+
+ BUG_ON(!mm || !q || !mqd);
+
+ pr_debug("kfd: In func %s\n", __func__);
+
+ m = get_mqd(mqd);
+
+ m->cp_hqd_pq_control = 5 << CP_HQD_PQ_CONTROL__RPTR_BLOCK_SIZE__SHIFT |
+ atc_bit << CP_HQD_PQ_CONTROL__PQ_ATC__SHIFT |
+ mtype << CP_HQD_PQ_CONTROL__MTYPE__SHIFT;
+ m->cp_hqd_pq_control |=
+ ffs(q->queue_size / sizeof(unsigned int)) - 1 - 1;
+ pr_debug("kfd: cp_hqd_pq_control 0x%x\n", m->cp_hqd_pq_control);
+
+ m->cp_hqd_pq_base_lo = lower_32_bits((uint64_t)q->queue_address >> 8);
+ m->cp_hqd_pq_base_hi = upper_32_bits((uint64_t)q->queue_address >> 8);
+
+ m->cp_hqd_pq_rptr_report_addr_lo = lower_32_bits((uint64_t)q->read_ptr);
+ m->cp_hqd_pq_rptr_report_addr_hi = upper_32_bits((uint64_t)q->read_ptr);
+
+ m->cp_hqd_pq_doorbell_control =
+ 1 << CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_EN__SHIFT |
+ q->doorbell_off <<
+ CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_OFFSET__SHIFT;
+ pr_debug("kfd: cp_hqd_pq_doorbell_control 0x%x\n",
+ m->cp_hqd_pq_doorbell_control);
+
+ m->cp_hqd_eop_control = atc_bit << CP_HQD_EOP_CONTROL__EOP_ATC__SHIFT |
+ mtype << CP_HQD_EOP_CONTROL__MTYPE__SHIFT;
+
+ m->cp_hqd_ib_control = atc_bit << CP_HQD_IB_CONTROL__IB_ATC__SHIFT |
+ 3 << CP_HQD_IB_CONTROL__MIN_IB_AVAIL_SIZE__SHIFT |
+ mtype << CP_HQD_IB_CONTROL__MTYPE__SHIFT;
+
+ m->cp_hqd_eop_control |=
+ ffs(q->eop_ring_buffer_size / sizeof(unsigned int)) - 1 - 1;
+ m->cp_hqd_eop_base_addr_lo =
+ lower_32_bits(q->eop_ring_buffer_address >> 8);
+ m->cp_hqd_eop_base_addr_hi =
+ upper_32_bits(q->eop_ring_buffer_address >> 8);
+
+ m->cp_hqd_iq_timer = atc_bit << CP_HQD_IQ_TIMER__IQ_ATC__SHIFT |
+ mtype << CP_HQD_IQ_TIMER__MTYPE__SHIFT;
+
+ m->cp_hqd_vmid = q->vmid;
+
+ if (q->format == KFD_QUEUE_FORMAT_AQL) {
+ m->cp_hqd_pq_control |= CP_HQD_PQ_CONTROL__NO_UPDATE_RPTR_MASK |
+ 2 << CP_HQD_PQ_CONTROL__SLOT_BASED_WPTR__SHIFT;
+ }
+
+ m->cp_hqd_active = 0;
+ q->is_active = false;
+ if (q->queue_size > 0 &&
+ q->queue_address != 0 &&
+ q->queue_percent > 0) {
+ m->cp_hqd_active = 1;
+ q->is_active = true;
+ }
+
+ return 0;
+}
+
+
+static int update_mqd(struct mqd_manager *mm, void *mqd,
+ struct queue_properties *q)
+{
+ return __update_mqd(mm, mqd, q, MTYPE_CC, 1);
+}
+
+static int destroy_mqd(struct mqd_manager *mm, void *mqd,
+ enum kfd_preempt_type type,
+ unsigned int timeout, uint32_t pipe_id,
+ uint32_t queue_id)
+{
+ return mm->dev->kfd2kgd->hqd_destroy
+ (mm->dev->kgd, type, timeout,
+ pipe_id, queue_id);
+}
+
+static void uninit_mqd(struct mqd_manager *mm, void *mqd,
+ struct kfd_mem_obj *mqd_mem_obj)
+{
+ BUG_ON(!mm || !mqd);
+ kfd_gtt_sa_free(mm->dev, mqd_mem_obj);
+}
+
+static bool is_occupied(struct mqd_manager *mm, void *mqd,
+ uint64_t queue_address, uint32_t pipe_id,
+ uint32_t queue_id)
+{
+ return mm->dev->kfd2kgd->hqd_is_occupied(
+ mm->dev->kgd, queue_address,
+ pipe_id, queue_id);
+}
+
+static int init_mqd_hiq(struct mqd_manager *mm, void **mqd,
+ struct kfd_mem_obj **mqd_mem_obj, uint64_t *gart_addr,
+ struct queue_properties *q)
+{
+ struct vi_mqd *m;
+ int retval = init_mqd(mm, mqd, mqd_mem_obj, gart_addr, q);
+
+ if (retval != 0)
+ return retval;
+
+ m = get_mqd(*mqd);
+
+ m->cp_hqd_pq_control |= 1 << CP_HQD_PQ_CONTROL__PRIV_STATE__SHIFT |
+ 1 << CP_HQD_PQ_CONTROL__KMD_QUEUE__SHIFT;
+
+ return retval;
+}
+
+static int update_mqd_hiq(struct mqd_manager *mm, void *mqd,
+ struct queue_properties *q)
+{
+ struct vi_mqd *m;
+ int retval = __update_mqd(mm, mqd, q, MTYPE_UC, 0);
+
+ if (retval != 0)
+ return retval;
+
+ m = get_mqd(mqd);
+ m->cp_hqd_vmid = q->vmid;
+ return retval;
+}
struct mqd_manager *mqd_manager_init_vi(enum KFD_MQD_TYPE type,
- struct kfd_dev *dev)
+ struct kfd_dev *dev)
{
- pr_warn("amdkfd: VI MQD is not currently supported\n");
- return NULL;
+ struct mqd_manager *mqd;
+
+ BUG_ON(!dev);
+ BUG_ON(type >= KFD_MQD_TYPE_MAX);
+
+ pr_debug("kfd: In func %s\n", __func__);
+
+ mqd = kzalloc(sizeof(struct mqd_manager), GFP_KERNEL);
+ if (!mqd)
+ return NULL;
+
+ mqd->dev = dev;
+
+ switch (type) {
+ case KFD_MQD_TYPE_CP:
+ case KFD_MQD_TYPE_COMPUTE:
+ mqd->init_mqd = init_mqd;
+ mqd->uninit_mqd = uninit_mqd;
+ mqd->load_mqd = load_mqd;
+ mqd->update_mqd = update_mqd;
+ mqd->destroy_mqd = destroy_mqd;
+ mqd->is_occupied = is_occupied;
+ break;
+ case KFD_MQD_TYPE_HIQ:
+ mqd->init_mqd = init_mqd_hiq;
+ mqd->uninit_mqd = uninit_mqd;
+ mqd->load_mqd = load_mqd;
+ mqd->update_mqd = update_mqd_hiq;
+ mqd->destroy_mqd = destroy_mqd;
+ mqd->is_occupied = is_occupied;
+ break;
+ case KFD_MQD_TYPE_SDMA:
+ break;
+ default:
+ kfree(mqd);
+ return NULL;
+ }
+
+ return mqd;
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c
index 99b6d28a11c3..90f391434fa3 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c
@@ -27,6 +27,7 @@
#include "kfd_kernel_queue.h"
#include "kfd_priv.h"
#include "kfd_pm4_headers.h"
+#include "kfd_pm4_headers_vi.h"
#include "kfd_pm4_opcodes.h"
static inline void inc_wptr(unsigned int *wptr, unsigned int increment_bytes,
@@ -55,6 +56,7 @@ static void pm_calc_rlib_size(struct packet_manager *pm,
bool *over_subscription)
{
unsigned int process_count, queue_count;
+ unsigned int map_queue_size;
BUG_ON(!pm || !rlib_size || !over_subscription);
@@ -69,9 +71,13 @@ static void pm_calc_rlib_size(struct packet_manager *pm,
pr_debug("kfd: over subscribed runlist\n");
}
+ map_queue_size =
+ (pm->dqm->dev->device_info->asic_family == CHIP_CARRIZO) ?
+ sizeof(struct pm4_mes_map_queues) :
+ sizeof(struct pm4_map_queues);
/* calculate run list ib allocation size */
*rlib_size = process_count * sizeof(struct pm4_map_process) +
- queue_count * sizeof(struct pm4_map_queues);
+ queue_count * map_queue_size;
/*
* Increase the allocation size in case we need a chained run list
@@ -176,6 +182,71 @@ static int pm_create_map_process(struct packet_manager *pm, uint32_t *buffer,
return 0;
}
+static int pm_create_map_queue_vi(struct packet_manager *pm, uint32_t *buffer,
+ struct queue *q, bool is_static)
+{
+ struct pm4_mes_map_queues *packet;
+ bool use_static = is_static;
+
+ BUG_ON(!pm || !buffer || !q);
+
+ pr_debug("kfd: In func %s\n", __func__);
+
+ packet = (struct pm4_mes_map_queues *)buffer;
+ memset(buffer, 0, sizeof(struct pm4_map_queues));
+
+ packet->header.u32all = build_pm4_header(IT_MAP_QUEUES,
+ sizeof(struct pm4_map_queues));
+ packet->bitfields2.alloc_format =
+ alloc_format__mes_map_queues__one_per_pipe_vi;
+ packet->bitfields2.num_queues = 1;
+ packet->bitfields2.queue_sel =
+ queue_sel__mes_map_queues__map_to_hws_determined_queue_slots_vi;
+
+ packet->bitfields2.engine_sel =
+ engine_sel__mes_map_queues__compute_vi;
+ packet->bitfields2.queue_type =
+ queue_type__mes_map_queues__normal_compute_vi;
+
+ switch (q->properties.type) {
+ case KFD_QUEUE_TYPE_COMPUTE:
+ if (use_static)
+ packet->bitfields2.queue_type =
+ queue_type__mes_map_queues__normal_latency_static_queue_vi;
+ break;
+ case KFD_QUEUE_TYPE_DIQ:
+ packet->bitfields2.queue_type =
+ queue_type__mes_map_queues__debug_interface_queue_vi;
+ break;
+ case KFD_QUEUE_TYPE_SDMA:
+ packet->bitfields2.engine_sel =
+ engine_sel__mes_map_queues__sdma0_vi;
+ use_static = false; /* no static queues under SDMA */
+ break;
+ default:
+ pr_err("kfd: in %s queue type %d\n", __func__,
+ q->properties.type);
+ BUG();
+ break;
+ }
+ packet->bitfields3.doorbell_offset =
+ q->properties.doorbell_off;
+
+ packet->mqd_addr_lo =
+ lower_32_bits(q->gart_mqd_addr);
+
+ packet->mqd_addr_hi =
+ upper_32_bits(q->gart_mqd_addr);
+
+ packet->wptr_addr_lo =
+ lower_32_bits((uint64_t)q->properties.write_ptr);
+
+ packet->wptr_addr_hi =
+ upper_32_bits((uint64_t)q->properties.write_ptr);
+
+ return 0;
+}
+
static int pm_create_map_queue(struct packet_manager *pm, uint32_t *buffer,
struct queue *q, bool is_static)
{
@@ -292,8 +363,17 @@ static int pm_create_runlist_ib(struct packet_manager *pm,
pr_debug("kfd: static_queue, mapping kernel q %d, is debug status %d\n",
kq->queue->queue, qpd->is_debug);
- retval = pm_create_map_queue(pm, &rl_buffer[rl_wptr],
- kq->queue, qpd->is_debug);
+ if (pm->dqm->dev->device_info->asic_family ==
+ CHIP_CARRIZO)
+ retval = pm_create_map_queue_vi(pm,
+ &rl_buffer[rl_wptr],
+ kq->queue,
+ qpd->is_debug);
+ else
+ retval = pm_create_map_queue(pm,
+ &rl_buffer[rl_wptr],
+ kq->queue,
+ qpd->is_debug);
if (retval != 0)
return retval;
@@ -309,8 +389,17 @@ static int pm_create_runlist_ib(struct packet_manager *pm,
pr_debug("kfd: static_queue, mapping user queue %d, is debug status %d\n",
q->queue, qpd->is_debug);
- retval = pm_create_map_queue(pm, &rl_buffer[rl_wptr],
- q, qpd->is_debug);
+ if (pm->dqm->dev->device_info->asic_family ==
+ CHIP_CARRIZO)
+ retval = pm_create_map_queue_vi(pm,
+ &rl_buffer[rl_wptr],
+ q,
+ qpd->is_debug);
+ else
+ retval = pm_create_map_queue(pm,
+ &rl_buffer[rl_wptr],
+ q,
+ qpd->is_debug);
if (retval != 0)
return retval;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_pm4_headers_vi.h b/drivers/gpu/drm/amd/amdkfd/kfd_pm4_headers_vi.h
new file mode 100644
index 000000000000..08c721922812
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_pm4_headers_vi.h
@@ -0,0 +1,398 @@
+/*
+ * Copyright 2014 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef F32_MES_PM4_PACKETS_H
+#define F32_MES_PM4_PACKETS_H
+
+#ifndef PM4_MES_HEADER_DEFINED
+#define PM4_MES_HEADER_DEFINED
+union PM4_MES_TYPE_3_HEADER {
+ struct {
+ uint32_t reserved1 : 8; /* < reserved */
+ uint32_t opcode : 8; /* < IT opcode */
+ uint32_t count : 14;/* < number of DWORDs - 1 in the
+ information body. */
+ uint32_t type : 2; /* < packet identifier.
+ It should be 3 for type 3 packets */
+ };
+ uint32_t u32All;
+};
+#endif /* PM4_MES_HEADER_DEFINED */
+
+/*--------------------MES_SET_RESOURCES--------------------*/
+
+#ifndef PM4_MES_SET_RESOURCES_DEFINED
+#define PM4_MES_SET_RESOURCES_DEFINED
+enum mes_set_resources_queue_type_enum {
+ queue_type__mes_set_resources__kernel_interface_queue_kiq = 0,
+ queue_type__mes_set_resources__hsa_interface_queue_hiq = 1,
+ queue_type__mes_set_resources__hsa_debug_interface_queue = 4
+};
+
+
+struct pm4_mes_set_resources {
+ union {
+ union PM4_MES_TYPE_3_HEADER header; /* header */
+ uint32_t ordinal1;
+ };
+
+ union {
+ struct {
+ uint32_t vmid_mask:16;
+ uint32_t unmap_latency:8;
+ uint32_t reserved1:5;
+ enum mes_set_resources_queue_type_enum queue_type:3;
+ } bitfields2;
+ uint32_t ordinal2;
+ };
+
+ uint32_t queue_mask_lo;
+ uint32_t queue_mask_hi;
+ uint32_t gws_mask_lo;
+ uint32_t gws_mask_hi;
+
+ union {
+ struct {
+ uint32_t oac_mask:16;
+ uint32_t reserved2:16;
+ } bitfields7;
+ uint32_t ordinal7;
+ };
+
+ union {
+ struct {
+ uint32_t gds_heap_base:6;
+ uint32_t reserved3:5;
+ uint32_t gds_heap_size:6;
+ uint32_t reserved4:15;
+ } bitfields8;
+ uint32_t ordinal8;
+ };
+
+};
+#endif
+
+/*--------------------MES_RUN_LIST--------------------*/
+
+#ifndef PM4_MES_RUN_LIST_DEFINED
+#define PM4_MES_RUN_LIST_DEFINED
+
+struct pm4_mes_runlist {
+ union {
+ union PM4_MES_TYPE_3_HEADER header; /* header */
+ uint32_t ordinal1;
+ };
+
+ union {
+ struct {
+ uint32_t reserved1:2;
+ uint32_t ib_base_lo:30;
+ } bitfields2;
+ uint32_t ordinal2;
+ };
+
+ union {
+ struct {
+ uint32_t ib_base_hi:16;
+ uint32_t reserved2:16;
+ } bitfields3;
+ uint32_t ordinal3;
+ };
+
+ union {
+ struct {
+ uint32_t ib_size:20;
+ uint32_t chain:1;
+ uint32_t offload_polling:1;
+ uint32_t reserved3:1;
+ uint32_t valid:1;
+ uint32_t reserved4:8;
+ } bitfields4;
+ uint32_t ordinal4;
+ };
+
+};
+#endif
+
+/*--------------------MES_MAP_PROCESS--------------------*/
+
+#ifndef PM4_MES_MAP_PROCESS_DEFINED
+#define PM4_MES_MAP_PROCESS_DEFINED
+
+struct pm4_mes_map_process {
+ union {
+ union PM4_MES_TYPE_3_HEADER header; /* header */
+ uint32_t ordinal1;
+ };
+
+ union {
+ struct {
+ uint32_t pasid:16;
+ uint32_t reserved1:8;
+ uint32_t diq_enable:1;
+ uint32_t process_quantum:7;
+ } bitfields2;
+ uint32_t ordinal2;
+};
+
+ union {
+ struct {
+ uint32_t page_table_base:28;
+ uint32_t reserved2:4;
+ } bitfields3;
+ uint32_t ordinal3;
+ };
+
+ uint32_t sh_mem_bases;
+ uint32_t sh_mem_ape1_base;
+ uint32_t sh_mem_ape1_limit;
+ uint32_t sh_mem_config;
+ uint32_t gds_addr_lo;
+ uint32_t gds_addr_hi;
+
+ union {
+ struct {
+ uint32_t num_gws:6;
+ uint32_t reserved3:2;
+ uint32_t num_oac:4;
+ uint32_t reserved4:4;
+ uint32_t gds_size:6;
+ uint32_t num_queues:10;
+ } bitfields10;
+ uint32_t ordinal10;
+ };
+
+};
+#endif
+
+/*--------------------MES_MAP_QUEUES--------------------*/
+
+#ifndef PM4_MES_MAP_QUEUES_VI_DEFINED
+#define PM4_MES_MAP_QUEUES_VI_DEFINED
+enum mes_map_queues_queue_sel_vi_enum {
+ queue_sel__mes_map_queues__map_to_specified_queue_slots_vi = 0,
+queue_sel__mes_map_queues__map_to_hws_determined_queue_slots_vi = 1
+};
+
+enum mes_map_queues_queue_type_vi_enum {
+ queue_type__mes_map_queues__normal_compute_vi = 0,
+ queue_type__mes_map_queues__debug_interface_queue_vi = 1,
+ queue_type__mes_map_queues__normal_latency_static_queue_vi = 2,
+queue_type__mes_map_queues__low_latency_static_queue_vi = 3
+};
+
+enum mes_map_queues_alloc_format_vi_enum {
+ alloc_format__mes_map_queues__one_per_pipe_vi = 0,
+alloc_format__mes_map_queues__all_on_one_pipe_vi = 1
+};
+
+enum mes_map_queues_engine_sel_vi_enum {
+ engine_sel__mes_map_queues__compute_vi = 0,
+ engine_sel__mes_map_queues__sdma0_vi = 2,
+ engine_sel__mes_map_queues__sdma1_vi = 3
+};
+
+
+struct pm4_mes_map_queues {
+ union {
+ union PM4_MES_TYPE_3_HEADER header; /* header */
+ uint32_t ordinal1;
+ };
+
+ union {
+ struct {
+ uint32_t reserved1:4;
+ enum mes_map_queues_queue_sel_vi_enum queue_sel:2;
+ uint32_t reserved2:15;
+ enum mes_map_queues_queue_type_vi_enum queue_type:3;
+ enum mes_map_queues_alloc_format_vi_enum alloc_format:2;
+ enum mes_map_queues_engine_sel_vi_enum engine_sel:3;
+ uint32_t num_queues:3;
+ } bitfields2;
+ uint32_t ordinal2;
+ };
+
+ union {
+ struct {
+ uint32_t reserved3:1;
+ uint32_t check_disable:1;
+ uint32_t doorbell_offset:21;
+ uint32_t reserved4:3;
+ uint32_t queue:6;
+ } bitfields3;
+ uint32_t ordinal3;
+ };
+
+ uint32_t mqd_addr_lo;
+ uint32_t mqd_addr_hi;
+ uint32_t wptr_addr_lo;
+ uint32_t wptr_addr_hi;
+};
+#endif
+
+/*--------------------MES_QUERY_STATUS--------------------*/
+
+#ifndef PM4_MES_QUERY_STATUS_DEFINED
+#define PM4_MES_QUERY_STATUS_DEFINED
+enum mes_query_status_interrupt_sel_enum {
+ interrupt_sel__mes_query_status__completion_status = 0,
+ interrupt_sel__mes_query_status__process_status = 1,
+ interrupt_sel__mes_query_status__queue_status = 2
+};
+
+enum mes_query_status_command_enum {
+ command__mes_query_status__interrupt_only = 0,
+ command__mes_query_status__fence_only_immediate = 1,
+ command__mes_query_status__fence_only_after_write_ack = 2,
+ command__mes_query_status__fence_wait_for_write_ack_send_interrupt = 3
+};
+
+enum mes_query_status_engine_sel_enum {
+ engine_sel__mes_query_status__compute = 0,
+ engine_sel__mes_query_status__sdma0_queue = 2,
+ engine_sel__mes_query_status__sdma1_queue = 3
+};
+
+struct pm4_mes_query_status {
+ union {
+ union PM4_MES_TYPE_3_HEADER header; /* header */
+ uint32_t ordinal1;
+ };
+
+ union {
+ struct {
+ uint32_t context_id:28;
+ enum mes_query_status_interrupt_sel_enum
+ interrupt_sel:2;
+ enum mes_query_status_command_enum command:2;
+ } bitfields2;
+ uint32_t ordinal2;
+ };
+
+ union {
+ struct {
+ uint32_t pasid:16;
+ uint32_t reserved1:16;
+ } bitfields3a;
+ struct {
+ uint32_t reserved2:2;
+ uint32_t doorbell_offset:21;
+ uint32_t reserved3:2;
+ enum mes_query_status_engine_sel_enum engine_sel:3;
+ uint32_t reserved4:4;
+ } bitfields3b;
+ uint32_t ordinal3;
+ };
+
+ uint32_t addr_lo;
+ uint32_t addr_hi;
+ uint32_t data_lo;
+ uint32_t data_hi;
+};
+#endif
+
+/*--------------------MES_UNMAP_QUEUES--------------------*/
+
+#ifndef PM4_MES_UNMAP_QUEUES_DEFINED
+#define PM4_MES_UNMAP_QUEUES_DEFINED
+enum mes_unmap_queues_action_enum {
+ action__mes_unmap_queues__preempt_queues = 0,
+ action__mes_unmap_queues__reset_queues = 1,
+ action__mes_unmap_queues__disable_process_queues = 2,
+ action__mes_unmap_queues__reserved = 3
+};
+
+enum mes_unmap_queues_queue_sel_enum {
+ queue_sel__mes_unmap_queues__perform_request_on_specified_queues = 0,
+ queue_sel__mes_unmap_queues__perform_request_on_pasid_queues = 1,
+ queue_sel__mes_unmap_queues__unmap_all_queues = 2,
+ queue_sel__mes_unmap_queues__unmap_all_non_static_queues = 3
+};
+
+enum mes_unmap_queues_engine_sel_enum {
+ engine_sel__mes_unmap_queues__compute = 0,
+ engine_sel__mes_unmap_queues__sdma0 = 2,
+ engine_sel__mes_unmap_queues__sdmal = 3
+};
+
+struct PM4_MES_UNMAP_QUEUES {
+ union {
+ union PM4_MES_TYPE_3_HEADER header; /* header */
+ uint32_t ordinal1;
+ };
+
+ union {
+ struct {
+ enum mes_unmap_queues_action_enum action:2;
+ uint32_t reserved1:2;
+ enum mes_unmap_queues_queue_sel_enum queue_sel:2;
+ uint32_t reserved2:20;
+ enum mes_unmap_queues_engine_sel_enum engine_sel:3;
+ uint32_t num_queues:3;
+ } bitfields2;
+ uint32_t ordinal2;
+ };
+
+ union {
+ struct {
+ uint32_t pasid:16;
+ uint32_t reserved3:16;
+ } bitfields3a;
+ struct {
+ uint32_t reserved4:2;
+ uint32_t doorbell_offset0:21;
+ uint32_t reserved5:9;
+ } bitfields3b;
+ uint32_t ordinal3;
+ };
+
+ union {
+ struct {
+ uint32_t reserved6:2;
+ uint32_t doorbell_offset1:21;
+ uint32_t reserved7:9;
+ } bitfields4;
+ uint32_t ordinal4;
+ };
+
+ union {
+ struct {
+ uint32_t reserved8:2;
+ uint32_t doorbell_offset2:21;
+ uint32_t reserved9:9;
+ } bitfields5;
+ uint32_t ordinal5;
+ };
+
+ union {
+ struct {
+ uint32_t reserved10:2;
+ uint32_t doorbell_offset3:21;
+ uint32_t reserved11:9;
+ } bitfields6;
+ uint32_t ordinal6;
+ };
+};
+#endif
+
+#endif
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process.c b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
index 8a1f999daa24..9be007081b72 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_process.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
@@ -420,6 +420,12 @@ void kfd_unbind_process_from_device(struct kfd_dev *dev, unsigned int pasid)
pqm_uninit(&p->pqm);
pdd = kfd_get_process_device_data(dev, p);
+
+ if (!pdd) {
+ mutex_unlock(&p->mutex);
+ return;
+ }
+
if (pdd->reset_wavefronts) {
dbgdev_wave_reset_wavefronts(pdd->dev, p);
pdd->reset_wavefronts = false;
@@ -431,8 +437,7 @@ void kfd_unbind_process_from_device(struct kfd_dev *dev, unsigned int pasid)
* We don't call amd_iommu_unbind_pasid() here
* because the IOMMU called us.
*/
- if (pdd)
- pdd->bound = false;
+ pdd->bound = false;
mutex_unlock(&p->mutex);
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
index c25728bc388a..74909e72a009 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
@@ -1186,6 +1186,11 @@ int kfd_topology_add_device(struct kfd_dev *gpu)
* TODO: Retrieve max engine clock values from KGD
*/
+ if (dev->gpu->device_info->asic_family == CHIP_CARRIZO) {
+ dev->node_props.capability |= HSA_CAP_DOORBELL_PACKET_TYPE;
+ pr_info("amdkfd: adding doorbell packet type capability\n");
+ }
+
res = 0;
err:
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.h b/drivers/gpu/drm/amd/amdkfd/kfd_topology.h
index 989624b3cd14..c3ddb9b95ff8 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.h
@@ -40,6 +40,7 @@
#define HSA_CAP_WATCH_POINTS_TOTALBITS_MASK 0x00000f00
#define HSA_CAP_WATCH_POINTS_TOTALBITS_SHIFT 8
#define HSA_CAP_RESERVED 0xfffff000
+#define HSA_CAP_DOORBELL_PACKET_TYPE 0x00001000
struct kfd_node_properties {
uint32_t cpu_cores_count;
diff --git a/drivers/gpu/drm/amd/include/kgd_kfd_interface.h b/drivers/gpu/drm/amd/include/kgd_kfd_interface.h
index 9080daa116b6..888250b33ea8 100644
--- a/drivers/gpu/drm/amd/include/kgd_kfd_interface.h
+++ b/drivers/gpu/drm/amd/include/kgd_kfd_interface.h
@@ -52,7 +52,8 @@ enum kgd_engine_type {
KGD_ENGINE_MEC1,
KGD_ENGINE_MEC2,
KGD_ENGINE_RLC,
- KGD_ENGINE_SDMA,
+ KGD_ENGINE_SDMA1,
+ KGD_ENGINE_SDMA2,
KGD_ENGINE_MAX
};
diff --git a/drivers/gpu/drm/amd/include/vi_structs.h b/drivers/gpu/drm/amd/include/vi_structs.h
new file mode 100644
index 000000000000..65cfacd7a66c
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/vi_structs.h
@@ -0,0 +1,417 @@
+/*
+ * Copyright 2012 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef VI_STRUCTS_H_
+#define VI_STRUCTS_H_
+
+struct vi_sdma_mqd {
+ uint32_t sdmax_rlcx_rb_cntl;
+ uint32_t sdmax_rlcx_rb_base;
+ uint32_t sdmax_rlcx_rb_base_hi;
+ uint32_t sdmax_rlcx_rb_rptr;
+ uint32_t sdmax_rlcx_rb_wptr;
+ uint32_t sdmax_rlcx_rb_wptr_poll_cntl;
+ uint32_t sdmax_rlcx_rb_wptr_poll_addr_hi;
+ uint32_t sdmax_rlcx_rb_wptr_poll_addr_lo;
+ uint32_t sdmax_rlcx_rb_rptr_addr_hi;
+ uint32_t sdmax_rlcx_rb_rptr_addr_lo;
+ uint32_t sdmax_rlcx_ib_cntl;
+ uint32_t sdmax_rlcx_ib_rptr;
+ uint32_t sdmax_rlcx_ib_offset;
+ uint32_t sdmax_rlcx_ib_base_lo;
+ uint32_t sdmax_rlcx_ib_base_hi;
+ uint32_t sdmax_rlcx_ib_size;
+ uint32_t sdmax_rlcx_skip_cntl;
+ uint32_t sdmax_rlcx_context_status;
+ uint32_t sdmax_rlcx_doorbell;
+ uint32_t sdmax_rlcx_virtual_addr;
+ uint32_t sdmax_rlcx_ape1_cntl;
+ uint32_t sdmax_rlcx_doorbell_log;
+ uint32_t reserved_22;
+ uint32_t reserved_23;
+ uint32_t reserved_24;
+ uint32_t reserved_25;
+ uint32_t reserved_26;
+ uint32_t reserved_27;
+ uint32_t reserved_28;
+ uint32_t reserved_29;
+ uint32_t reserved_30;
+ uint32_t reserved_31;
+ uint32_t reserved_32;
+ uint32_t reserved_33;
+ uint32_t reserved_34;
+ uint32_t reserved_35;
+ uint32_t reserved_36;
+ uint32_t reserved_37;
+ uint32_t reserved_38;
+ uint32_t reserved_39;
+ uint32_t reserved_40;
+ uint32_t reserved_41;
+ uint32_t reserved_42;
+ uint32_t reserved_43;
+ uint32_t reserved_44;
+ uint32_t reserved_45;
+ uint32_t reserved_46;
+ uint32_t reserved_47;
+ uint32_t reserved_48;
+ uint32_t reserved_49;
+ uint32_t reserved_50;
+ uint32_t reserved_51;
+ uint32_t reserved_52;
+ uint32_t reserved_53;
+ uint32_t reserved_54;
+ uint32_t reserved_55;
+ uint32_t reserved_56;
+ uint32_t reserved_57;
+ uint32_t reserved_58;
+ uint32_t reserved_59;
+ uint32_t reserved_60;
+ uint32_t reserved_61;
+ uint32_t reserved_62;
+ uint32_t reserved_63;
+ uint32_t reserved_64;
+ uint32_t reserved_65;
+ uint32_t reserved_66;
+ uint32_t reserved_67;
+ uint32_t reserved_68;
+ uint32_t reserved_69;
+ uint32_t reserved_70;
+ uint32_t reserved_71;
+ uint32_t reserved_72;
+ uint32_t reserved_73;
+ uint32_t reserved_74;
+ uint32_t reserved_75;
+ uint32_t reserved_76;
+ uint32_t reserved_77;
+ uint32_t reserved_78;
+ uint32_t reserved_79;
+ uint32_t reserved_80;
+ uint32_t reserved_81;
+ uint32_t reserved_82;
+ uint32_t reserved_83;
+ uint32_t reserved_84;
+ uint32_t reserved_85;
+ uint32_t reserved_86;
+ uint32_t reserved_87;
+ uint32_t reserved_88;
+ uint32_t reserved_89;
+ uint32_t reserved_90;
+ uint32_t reserved_91;
+ uint32_t reserved_92;
+ uint32_t reserved_93;
+ uint32_t reserved_94;
+ uint32_t reserved_95;
+ uint32_t reserved_96;
+ uint32_t reserved_97;
+ uint32_t reserved_98;
+ uint32_t reserved_99;
+ uint32_t reserved_100;
+ uint32_t reserved_101;
+ uint32_t reserved_102;
+ uint32_t reserved_103;
+ uint32_t reserved_104;
+ uint32_t reserved_105;
+ uint32_t reserved_106;
+ uint32_t reserved_107;
+ uint32_t reserved_108;
+ uint32_t reserved_109;
+ uint32_t reserved_110;
+ uint32_t reserved_111;
+ uint32_t reserved_112;
+ uint32_t reserved_113;
+ uint32_t reserved_114;
+ uint32_t reserved_115;
+ uint32_t reserved_116;
+ uint32_t reserved_117;
+ uint32_t reserved_118;
+ uint32_t reserved_119;
+ uint32_t reserved_120;
+ uint32_t reserved_121;
+ uint32_t reserved_122;
+ uint32_t reserved_123;
+ uint32_t reserved_124;
+ uint32_t reserved_125;
+ uint32_t reserved_126;
+ uint32_t reserved_127;
+};
+
+struct vi_mqd {
+ uint32_t header;
+ uint32_t compute_dispatch_initiator;
+ uint32_t compute_dim_x;
+ uint32_t compute_dim_y;
+ uint32_t compute_dim_z;
+ uint32_t compute_start_x;
+ uint32_t compute_start_y;
+ uint32_t compute_start_z;
+ uint32_t compute_num_thread_x;
+ uint32_t compute_num_thread_y;
+ uint32_t compute_num_thread_z;
+ uint32_t compute_pipelinestat_enable;
+ uint32_t compute_perfcount_enable;
+ uint32_t compute_pgm_lo;
+ uint32_t compute_pgm_hi;
+ uint32_t compute_tba_lo;
+ uint32_t compute_tba_hi;
+ uint32_t compute_tma_lo;
+ uint32_t compute_tma_hi;
+ uint32_t compute_pgm_rsrc1;
+ uint32_t compute_pgm_rsrc2;
+ uint32_t compute_vmid;
+ uint32_t compute_resource_limits;
+ uint32_t compute_static_thread_mgmt_se0;
+ uint32_t compute_static_thread_mgmt_se1;
+ uint32_t compute_tmpring_size;
+ uint32_t compute_static_thread_mgmt_se2;
+ uint32_t compute_static_thread_mgmt_se3;
+ uint32_t compute_restart_x;
+ uint32_t compute_restart_y;
+ uint32_t compute_restart_z;
+ uint32_t compute_thread_trace_enable;
+ uint32_t compute_misc_reserved;
+ uint32_t compute_dispatch_id;
+ uint32_t compute_threadgroup_id;
+ uint32_t compute_relaunch;
+ uint32_t compute_wave_restore_addr_lo;
+ uint32_t compute_wave_restore_addr_hi;
+ uint32_t compute_wave_restore_control;
+ uint32_t reserved_39;
+ uint32_t reserved_40;
+ uint32_t reserved_41;
+ uint32_t reserved_42;
+ uint32_t reserved_43;
+ uint32_t reserved_44;
+ uint32_t reserved_45;
+ uint32_t reserved_46;
+ uint32_t reserved_47;
+ uint32_t reserved_48;
+ uint32_t reserved_49;
+ uint32_t reserved_50;
+ uint32_t reserved_51;
+ uint32_t reserved_52;
+ uint32_t reserved_53;
+ uint32_t reserved_54;
+ uint32_t reserved_55;
+ uint32_t reserved_56;
+ uint32_t reserved_57;
+ uint32_t reserved_58;
+ uint32_t reserved_59;
+ uint32_t reserved_60;
+ uint32_t reserved_61;
+ uint32_t reserved_62;
+ uint32_t reserved_63;
+ uint32_t reserved_64;
+ uint32_t compute_user_data_0;
+ uint32_t compute_user_data_1;
+ uint32_t compute_user_data_2;
+ uint32_t compute_user_data_3;
+ uint32_t compute_user_data_4;
+ uint32_t compute_user_data_5;
+ uint32_t compute_user_data_6;
+ uint32_t compute_user_data_7;
+ uint32_t compute_user_data_8;
+ uint32_t compute_user_data_9;
+ uint32_t compute_user_data_10;
+ uint32_t compute_user_data_11;
+ uint32_t compute_user_data_12;
+ uint32_t compute_user_data_13;
+ uint32_t compute_user_data_14;
+ uint32_t compute_user_data_15;
+ uint32_t cp_compute_csinvoc_count_lo;
+ uint32_t cp_compute_csinvoc_count_hi;
+ uint32_t reserved_83;
+ uint32_t reserved_84;
+ uint32_t reserved_85;
+ uint32_t cp_mqd_query_time_lo;
+ uint32_t cp_mqd_query_time_hi;
+ uint32_t cp_mqd_connect_start_time_lo;
+ uint32_t cp_mqd_connect_start_time_hi;
+ uint32_t cp_mqd_connect_end_time_lo;
+ uint32_t cp_mqd_connect_end_time_hi;
+ uint32_t cp_mqd_connect_end_wf_count;
+ uint32_t cp_mqd_connect_end_pq_rptr;
+ uint32_t cp_mqd_connect_end_pq_wptr;
+ uint32_t cp_mqd_connect_end_ib_rptr;
+ uint32_t reserved_96;
+ uint32_t reserved_97;
+ uint32_t cp_mqd_save_start_time_lo;
+ uint32_t cp_mqd_save_start_time_hi;
+ uint32_t cp_mqd_save_end_time_lo;
+ uint32_t cp_mqd_save_end_time_hi;
+ uint32_t cp_mqd_restore_start_time_lo;
+ uint32_t cp_mqd_restore_start_time_hi;
+ uint32_t cp_mqd_restore_end_time_lo;
+ uint32_t cp_mqd_restore_end_time_hi;
+ uint32_t reserved_106;
+ uint32_t reserved_107;
+ uint32_t gds_cs_ctxsw_cnt0;
+ uint32_t gds_cs_ctxsw_cnt1;
+ uint32_t gds_cs_ctxsw_cnt2;
+ uint32_t gds_cs_ctxsw_cnt3;
+ uint32_t reserved_112;
+ uint32_t reserved_113;
+ uint32_t cp_pq_exe_status_lo;
+ uint32_t cp_pq_exe_status_hi;
+ uint32_t cp_packet_id_lo;
+ uint32_t cp_packet_id_hi;
+ uint32_t cp_packet_exe_status_lo;
+ uint32_t cp_packet_exe_status_hi;
+ uint32_t gds_save_base_addr_lo;
+ uint32_t gds_save_base_addr_hi;
+ uint32_t gds_save_mask_lo;
+ uint32_t gds_save_mask_hi;
+ uint32_t ctx_save_base_addr_lo;
+ uint32_t ctx_save_base_addr_hi;
+ uint32_t reserved_126;
+ uint32_t reserved_127;
+ uint32_t cp_mqd_base_addr_lo;
+ uint32_t cp_mqd_base_addr_hi;
+ uint32_t cp_hqd_active;
+ uint32_t cp_hqd_vmid;
+ uint32_t cp_hqd_persistent_state;
+ uint32_t cp_hqd_pipe_priority;
+ uint32_t cp_hqd_queue_priority;
+ uint32_t cp_hqd_quantum;
+ uint32_t cp_hqd_pq_base_lo;
+ uint32_t cp_hqd_pq_base_hi;
+ uint32_t cp_hqd_pq_rptr;
+ uint32_t cp_hqd_pq_rptr_report_addr_lo;
+ uint32_t cp_hqd_pq_rptr_report_addr_hi;
+ uint32_t cp_hqd_pq_wptr_poll_addr_lo;
+ uint32_t cp_hqd_pq_wptr_poll_addr_hi;
+ uint32_t cp_hqd_pq_doorbell_control;
+ uint32_t cp_hqd_pq_wptr;
+ uint32_t cp_hqd_pq_control;
+ uint32_t cp_hqd_ib_base_addr_lo;
+ uint32_t cp_hqd_ib_base_addr_hi;
+ uint32_t cp_hqd_ib_rptr;
+ uint32_t cp_hqd_ib_control;
+ uint32_t cp_hqd_iq_timer;
+ uint32_t cp_hqd_iq_rptr;
+ uint32_t cp_hqd_dequeue_request;
+ uint32_t cp_hqd_dma_offload;
+ uint32_t cp_hqd_sema_cmd;
+ uint32_t cp_hqd_msg_type;
+ uint32_t cp_hqd_atomic0_preop_lo;
+ uint32_t cp_hqd_atomic0_preop_hi;
+ uint32_t cp_hqd_atomic1_preop_lo;
+ uint32_t cp_hqd_atomic1_preop_hi;
+ uint32_t cp_hqd_hq_status0;
+ uint32_t cp_hqd_hq_control0;
+ uint32_t cp_mqd_control;
+ uint32_t cp_hqd_hq_status1;
+ uint32_t cp_hqd_hq_control1;
+ uint32_t cp_hqd_eop_base_addr_lo;
+ uint32_t cp_hqd_eop_base_addr_hi;
+ uint32_t cp_hqd_eop_control;
+ uint32_t cp_hqd_eop_rptr;
+ uint32_t cp_hqd_eop_wptr;
+ uint32_t cp_hqd_eop_done_events;
+ uint32_t cp_hqd_ctx_save_base_addr_lo;
+ uint32_t cp_hqd_ctx_save_base_addr_hi;
+ uint32_t cp_hqd_ctx_save_control;
+ uint32_t cp_hqd_cntl_stack_offset;
+ uint32_t cp_hqd_cntl_stack_size;
+ uint32_t cp_hqd_wg_state_offset;
+ uint32_t cp_hqd_ctx_save_size;
+ uint32_t cp_hqd_gds_resource_state;
+ uint32_t cp_hqd_error;
+ uint32_t cp_hqd_eop_wptr_mem;
+ uint32_t cp_hqd_eop_dones;
+ uint32_t reserved_182;
+ uint32_t reserved_183;
+ uint32_t reserved_184;
+ uint32_t reserved_185;
+ uint32_t reserved_186;
+ uint32_t reserved_187;
+ uint32_t reserved_188;
+ uint32_t reserved_189;
+ uint32_t reserved_190;
+ uint32_t reserved_191;
+ uint32_t iqtimer_pkt_header;
+ uint32_t iqtimer_pkt_dw0;
+ uint32_t iqtimer_pkt_dw1;
+ uint32_t iqtimer_pkt_dw2;
+ uint32_t iqtimer_pkt_dw3;
+ uint32_t iqtimer_pkt_dw4;
+ uint32_t iqtimer_pkt_dw5;
+ uint32_t iqtimer_pkt_dw6;
+ uint32_t iqtimer_pkt_dw7;
+ uint32_t iqtimer_pkt_dw8;
+ uint32_t iqtimer_pkt_dw9;
+ uint32_t iqtimer_pkt_dw10;
+ uint32_t iqtimer_pkt_dw11;
+ uint32_t iqtimer_pkt_dw12;
+ uint32_t iqtimer_pkt_dw13;
+ uint32_t iqtimer_pkt_dw14;
+ uint32_t iqtimer_pkt_dw15;
+ uint32_t iqtimer_pkt_dw16;
+ uint32_t iqtimer_pkt_dw17;
+ uint32_t iqtimer_pkt_dw18;
+ uint32_t iqtimer_pkt_dw19;
+ uint32_t iqtimer_pkt_dw20;
+ uint32_t iqtimer_pkt_dw21;
+ uint32_t iqtimer_pkt_dw22;
+ uint32_t iqtimer_pkt_dw23;
+ uint32_t iqtimer_pkt_dw24;
+ uint32_t iqtimer_pkt_dw25;
+ uint32_t iqtimer_pkt_dw26;
+ uint32_t iqtimer_pkt_dw27;
+ uint32_t iqtimer_pkt_dw28;
+ uint32_t iqtimer_pkt_dw29;
+ uint32_t iqtimer_pkt_dw30;
+ uint32_t iqtimer_pkt_dw31;
+ uint32_t reserved_225;
+ uint32_t reserved_226;
+ uint32_t reserved_227;
+ uint32_t set_resources_header;
+ uint32_t set_resources_dw1;
+ uint32_t set_resources_dw2;
+ uint32_t set_resources_dw3;
+ uint32_t set_resources_dw4;
+ uint32_t set_resources_dw5;
+ uint32_t set_resources_dw6;
+ uint32_t set_resources_dw7;
+ uint32_t reserved_236;
+ uint32_t reserved_237;
+ uint32_t reserved_238;
+ uint32_t reserved_239;
+ uint32_t queue_doorbell_id0;
+ uint32_t queue_doorbell_id1;
+ uint32_t queue_doorbell_id2;
+ uint32_t queue_doorbell_id3;
+ uint32_t queue_doorbell_id4;
+ uint32_t queue_doorbell_id5;
+ uint32_t queue_doorbell_id6;
+ uint32_t queue_doorbell_id7;
+ uint32_t queue_doorbell_id8;
+ uint32_t queue_doorbell_id9;
+ uint32_t queue_doorbell_id10;
+ uint32_t queue_doorbell_id11;
+ uint32_t queue_doorbell_id12;
+ uint32_t queue_doorbell_id13;
+ uint32_t queue_doorbell_id14;
+ uint32_t queue_doorbell_id15;
+};
+
+#endif /* VI_STRUCTS_H_ */
diff --git a/drivers/gpu/drm/armada/armada_crtc.c b/drivers/gpu/drm/armada/armada_crtc.c
index 42d2ffa08716..01ffe9bffe38 100644
--- a/drivers/gpu/drm/armada/armada_crtc.c
+++ b/drivers/gpu/drm/armada/armada_crtc.c
@@ -531,8 +531,6 @@ static int armada_drm_crtc_mode_set(struct drm_crtc *crtc,
drm_crtc_vblank_off(crtc);
- crtc->mode = *adj;
-
val = dcrtc->dumb_ctrl & ~CFG_DUMB_ENA;
if (val != dcrtc->dumb_ctrl) {
dcrtc->dumb_ctrl = val;
diff --git a/drivers/gpu/drm/armada/armada_fbdev.c b/drivers/gpu/drm/armada/armada_fbdev.c
index 7838e731b0de..7d03c51abcb9 100644
--- a/drivers/gpu/drm/armada/armada_fbdev.c
+++ b/drivers/gpu/drm/armada/armada_fbdev.c
@@ -22,9 +22,9 @@ static /*const*/ struct fb_ops armada_fb_ops = {
.owner = THIS_MODULE,
.fb_check_var = drm_fb_helper_check_var,
.fb_set_par = drm_fb_helper_set_par,
- .fb_fillrect = cfb_fillrect,
- .fb_copyarea = cfb_copyarea,
- .fb_imageblit = cfb_imageblit,
+ .fb_fillrect = drm_fb_helper_cfb_fillrect,
+ .fb_copyarea = drm_fb_helper_cfb_copyarea,
+ .fb_imageblit = drm_fb_helper_cfb_imageblit,
.fb_pan_display = drm_fb_helper_pan_display,
.fb_blank = drm_fb_helper_blank,
.fb_setcmap = drm_fb_helper_setcmap,
@@ -80,18 +80,12 @@ static int armada_fb_create(struct drm_fb_helper *fbh,
if (IS_ERR(dfb))
return PTR_ERR(dfb);
- info = framebuffer_alloc(0, dev->dev);
- if (!info) {
- ret = -ENOMEM;
+ info = drm_fb_helper_alloc_fbi(fbh);
+ if (IS_ERR(info)) {
+ ret = PTR_ERR(info);
goto err_fballoc;
}
- ret = fb_alloc_cmap(&info->cmap, 256, 0);
- if (ret) {
- ret = -ENOMEM;
- goto err_fbcmap;
- }
-
strlcpy(info->fix.id, "armada-drmfb", sizeof(info->fix.id));
info->par = fbh;
info->flags = FBINFO_DEFAULT | FBINFO_CAN_FORCE_OUTPUT;
@@ -101,7 +95,7 @@ static int armada_fb_create(struct drm_fb_helper *fbh,
info->screen_size = obj->obj.size;
info->screen_base = ptr;
fbh->fb = &dfb->fb;
- fbh->fbdev = info;
+
drm_fb_helper_fill_fix(info, dfb->fb.pitches[0], dfb->fb.depth);
drm_fb_helper_fill_var(info, fbh, sizes->fb_width, sizes->fb_height);
@@ -111,8 +105,6 @@ static int armada_fb_create(struct drm_fb_helper *fbh,
return 0;
- err_fbcmap:
- framebuffer_release(info);
err_fballoc:
dfb->fb.funcs->destroy(&dfb->fb);
return ret;
@@ -171,6 +163,7 @@ int armada_fbdev_init(struct drm_device *dev)
return 0;
err_fb_setup:
+ drm_fb_helper_release_fbi(fbh);
drm_fb_helper_fini(fbh);
err_fb_helper:
priv->fbdev = NULL;
@@ -191,14 +184,8 @@ void armada_fbdev_fini(struct drm_device *dev)
struct drm_fb_helper *fbh = priv->fbdev;
if (fbh) {
- struct fb_info *info = fbh->fbdev;
-
- if (info) {
- unregister_framebuffer(info);
- if (info->cmap.len)
- fb_dealloc_cmap(&info->cmap);
- framebuffer_release(info);
- }
+ drm_fb_helper_unregister_fbi(fbh);
+ drm_fb_helper_release_fbi(fbh);
drm_fb_helper_fini(fbh);
diff --git a/drivers/gpu/drm/armada/armada_gem.c b/drivers/gpu/drm/armada/armada_gem.c
index 580e10acaa3a..60a688ef81c7 100644
--- a/drivers/gpu/drm/armada/armada_gem.c
+++ b/drivers/gpu/drm/armada/armada_gem.c
@@ -69,8 +69,9 @@ void armada_gem_free_object(struct drm_gem_object *obj)
if (dobj->obj.import_attach) {
/* We only ever display imported data */
- dma_buf_unmap_attachment(dobj->obj.import_attach, dobj->sgt,
- DMA_TO_DEVICE);
+ if (dobj->sgt)
+ dma_buf_unmap_attachment(dobj->obj.import_attach,
+ dobj->sgt, DMA_TO_DEVICE);
drm_prime_gem_destroy(&dobj->obj, NULL);
}
diff --git a/drivers/gpu/drm/armada/armada_overlay.c b/drivers/gpu/drm/armada/armada_overlay.c
index c5b06fdb459c..e939faba7fcc 100644
--- a/drivers/gpu/drm/armada/armada_overlay.c
+++ b/drivers/gpu/drm/armada/armada_overlay.c
@@ -7,6 +7,7 @@
* published by the Free Software Foundation.
*/
#include <drm/drmP.h>
+#include <drm/drm_plane_helper.h>
#include "armada_crtc.h"
#include "armada_drm.h"
#include "armada_fb.h"
@@ -85,16 +86,8 @@ static void armada_plane_vbl(struct armada_crtc *dcrtc, void *data)
if (fb)
armada_drm_queue_unref_work(dcrtc->crtc.dev, fb);
-}
-static unsigned armada_limit(int start, unsigned size, unsigned max)
-{
- int end = start + size;
- if (end < 0)
- return 0;
- if (start < 0)
- start = 0;
- return (unsigned)end > max ? max - start : end - start;
+ wake_up(&dplane->vbl.wait);
}
static int
@@ -105,26 +98,39 @@ armada_plane_update(struct drm_plane *plane, struct drm_crtc *crtc,
{
struct armada_plane *dplane = drm_to_armada_plane(plane);
struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc);
+ struct drm_rect src = {
+ .x1 = src_x,
+ .y1 = src_y,
+ .x2 = src_x + src_w,
+ .y2 = src_y + src_h,
+ };
+ struct drm_rect dest = {
+ .x1 = crtc_x,
+ .y1 = crtc_y,
+ .x2 = crtc_x + crtc_w,
+ .y2 = crtc_y + crtc_h,
+ };
+ const struct drm_rect clip = {
+ .x2 = crtc->mode.hdisplay,
+ .y2 = crtc->mode.vdisplay,
+ };
uint32_t val, ctrl0;
unsigned idx = 0;
+ bool visible;
int ret;
- crtc_w = armada_limit(crtc_x, crtc_w, dcrtc->crtc.mode.hdisplay);
- crtc_h = armada_limit(crtc_y, crtc_h, dcrtc->crtc.mode.vdisplay);
+ ret = drm_plane_helper_check_update(plane, crtc, fb, &src, &dest, &clip,
+ 0, INT_MAX, true, false, &visible);
+ if (ret)
+ return ret;
+
ctrl0 = CFG_DMA_FMT(drm_fb_to_armada_fb(fb)->fmt) |
CFG_DMA_MOD(drm_fb_to_armada_fb(fb)->mod) |
CFG_CBSH_ENA | CFG_DMA_HSMOOTH | CFG_DMA_ENA;
/* Does the position/size result in nothing to display? */
- if (crtc_w == 0 || crtc_h == 0) {
+ if (!visible)
ctrl0 &= ~CFG_DMA_ENA;
- }
-
- /*
- * FIXME: if the starting point is off screen, we need to
- * adjust src_x, src_y, src_w, src_h appropriately, and
- * according to the scale.
- */
if (!dcrtc->plane) {
dcrtc->plane = plane;
@@ -134,15 +140,19 @@ armada_plane_update(struct drm_plane *plane, struct drm_crtc *crtc,
/* FIXME: overlay on an interlaced display */
/* Just updating the position/size? */
if (plane->fb == fb && dplane->ctrl0 == ctrl0) {
- val = (src_h & 0xffff0000) | src_w >> 16;
+ val = (drm_rect_height(&src) & 0xffff0000) |
+ drm_rect_width(&src) >> 16;
dplane->src_hw = val;
writel_relaxed(val, dcrtc->base + LCD_SPU_DMA_HPXL_VLN);
- val = crtc_h << 16 | crtc_w;
+
+ val = drm_rect_height(&dest) << 16 | drm_rect_width(&dest);
dplane->dst_hw = val;
writel_relaxed(val, dcrtc->base + LCD_SPU_DZM_HPXL_VLN);
- val = crtc_y << 16 | crtc_x;
+
+ val = dest.y1 << 16 | dest.x1;
dplane->dst_yx = val;
writel_relaxed(val, dcrtc->base + LCD_SPU_DMA_OVSA_HPXL_VLN);
+
return 0;
} else if (~dplane->ctrl0 & ctrl0 & CFG_DMA_ENA) {
/* Power up the Y/U/V FIFOs on ENA 0->1 transitions */
@@ -150,15 +160,14 @@ armada_plane_update(struct drm_plane *plane, struct drm_crtc *crtc,
dcrtc->base + LCD_SPU_SRAM_PARA1);
}
- ret = wait_event_timeout(dplane->vbl.wait,
- list_empty(&dplane->vbl.update.node),
- HZ/25);
- if (ret < 0)
- return ret;
+ wait_event_timeout(dplane->vbl.wait,
+ list_empty(&dplane->vbl.update.node),
+ HZ/25);
if (plane->fb != fb) {
struct armada_gem_object *obj = drm_fb_obj(fb);
- uint32_t sy, su, sv;
+ uint32_t addr[3], pixel_format;
+ int i, num_planes, hsub;
/*
* Take a reference on the new framebuffer - we want to
@@ -178,26 +187,39 @@ armada_plane_update(struct drm_plane *plane, struct drm_crtc *crtc,
older_fb);
}
- src_y >>= 16;
- src_x >>= 16;
- sy = obj->dev_addr + fb->offsets[0] + src_y * fb->pitches[0] +
- src_x * fb->bits_per_pixel / 8;
- su = obj->dev_addr + fb->offsets[1] + src_y * fb->pitches[1] +
- src_x;
- sv = obj->dev_addr + fb->offsets[2] + src_y * fb->pitches[2] +
- src_x;
+ src_y = src.y1 >> 16;
+ src_x = src.x1 >> 16;
- armada_reg_queue_set(dplane->vbl.regs, idx, sy,
+ pixel_format = fb->pixel_format;
+ hsub = drm_format_horz_chroma_subsampling(pixel_format);
+ num_planes = drm_format_num_planes(pixel_format);
+
+ /*
+ * Annoyingly, shifting a YUYV-format image by one pixel
+ * causes the U/V planes to toggle. Toggle the UV swap.
+ * (Unfortunately, this causes momentary colour flickering.)
+ */
+ if (src_x & (hsub - 1) && num_planes == 1)
+ ctrl0 ^= CFG_DMA_MOD(CFG_SWAPUV);
+
+ for (i = 0; i < num_planes; i++)
+ addr[i] = obj->dev_addr + fb->offsets[i] +
+ src_y * fb->pitches[i] +
+ src_x * drm_format_plane_cpp(pixel_format, i);
+ for (; i < ARRAY_SIZE(addr); i++)
+ addr[i] = 0;
+
+ armada_reg_queue_set(dplane->vbl.regs, idx, addr[0],
LCD_SPU_DMA_START_ADDR_Y0);
- armada_reg_queue_set(dplane->vbl.regs, idx, su,
+ armada_reg_queue_set(dplane->vbl.regs, idx, addr[1],
LCD_SPU_DMA_START_ADDR_U0);
- armada_reg_queue_set(dplane->vbl.regs, idx, sv,
+ armada_reg_queue_set(dplane->vbl.regs, idx, addr[2],
LCD_SPU_DMA_START_ADDR_V0);
- armada_reg_queue_set(dplane->vbl.regs, idx, sy,
+ armada_reg_queue_set(dplane->vbl.regs, idx, addr[0],
LCD_SPU_DMA_START_ADDR_Y1);
- armada_reg_queue_set(dplane->vbl.regs, idx, su,
+ armada_reg_queue_set(dplane->vbl.regs, idx, addr[1],
LCD_SPU_DMA_START_ADDR_U1);
- armada_reg_queue_set(dplane->vbl.regs, idx, sv,
+ armada_reg_queue_set(dplane->vbl.regs, idx, addr[2],
LCD_SPU_DMA_START_ADDR_V1);
val = fb->pitches[0] << 16 | fb->pitches[0];
@@ -208,24 +230,27 @@ armada_plane_update(struct drm_plane *plane, struct drm_crtc *crtc,
LCD_SPU_DMA_PITCH_UV);
}
- val = (src_h & 0xffff0000) | src_w >> 16;
+ val = (drm_rect_height(&src) & 0xffff0000) | drm_rect_width(&src) >> 16;
if (dplane->src_hw != val) {
dplane->src_hw = val;
armada_reg_queue_set(dplane->vbl.regs, idx, val,
LCD_SPU_DMA_HPXL_VLN);
}
- val = crtc_h << 16 | crtc_w;
+
+ val = drm_rect_height(&dest) << 16 | drm_rect_width(&dest);
if (dplane->dst_hw != val) {
dplane->dst_hw = val;
armada_reg_queue_set(dplane->vbl.regs, idx, val,
LCD_SPU_DZM_HPXL_VLN);
}
- val = crtc_y << 16 | crtc_x;
+
+ val = dest.y1 << 16 | dest.x1;
if (dplane->dst_yx != val) {
dplane->dst_yx = val;
armada_reg_queue_set(dplane->vbl.regs, idx, val,
LCD_SPU_DMA_OVSA_HPXL_VLN);
}
+
if (dplane->ctrl0 != ctrl0) {
dplane->ctrl0 = ctrl0;
armada_reg_queue_mod(dplane->vbl.regs, idx, ctrl0,
@@ -279,7 +304,11 @@ static int armada_plane_disable(struct drm_plane *plane)
static void armada_plane_destroy(struct drm_plane *plane)
{
- kfree(plane);
+ struct armada_plane *dplane = drm_to_armada_plane(plane);
+
+ drm_plane_cleanup(plane);
+
+ kfree(dplane);
}
static int armada_plane_set_property(struct drm_plane *plane,
diff --git a/drivers/gpu/drm/ast/ast_fb.c b/drivers/gpu/drm/ast/ast_fb.c
index ff68eefae273..f31db28a684b 100644
--- a/drivers/gpu/drm/ast/ast_fb.c
+++ b/drivers/gpu/drm/ast/ast_fb.c
@@ -125,7 +125,7 @@ static void ast_fillrect(struct fb_info *info,
const struct fb_fillrect *rect)
{
struct ast_fbdev *afbdev = info->par;
- sys_fillrect(info, rect);
+ drm_fb_helper_sys_fillrect(info, rect);
ast_dirty_update(afbdev, rect->dx, rect->dy, rect->width,
rect->height);
}
@@ -134,7 +134,7 @@ static void ast_copyarea(struct fb_info *info,
const struct fb_copyarea *area)
{
struct ast_fbdev *afbdev = info->par;
- sys_copyarea(info, area);
+ drm_fb_helper_sys_copyarea(info, area);
ast_dirty_update(afbdev, area->dx, area->dy, area->width,
area->height);
}
@@ -143,7 +143,7 @@ static void ast_imageblit(struct fb_info *info,
const struct fb_image *image)
{
struct ast_fbdev *afbdev = info->par;
- sys_imageblit(info, image);
+ drm_fb_helper_sys_imageblit(info, image);
ast_dirty_update(afbdev, image->dx, image->dy, image->width,
image->height);
}
@@ -193,7 +193,6 @@ static int astfb_create(struct drm_fb_helper *helper,
struct drm_framebuffer *fb;
struct fb_info *info;
int size, ret;
- struct device *device = &dev->pdev->dev;
void *sysram;
struct drm_gem_object *gobj = NULL;
struct ast_bo *bo = NULL;
@@ -217,40 +216,28 @@ static int astfb_create(struct drm_fb_helper *helper,
if (!sysram)
return -ENOMEM;
- info = framebuffer_alloc(0, device);
- if (!info) {
- ret = -ENOMEM;
- goto out;
+ info = drm_fb_helper_alloc_fbi(helper);
+ if (IS_ERR(info)) {
+ ret = PTR_ERR(info);
+ goto err_free_vram;
}
info->par = afbdev;
ret = ast_framebuffer_init(dev, &afbdev->afb, &mode_cmd, gobj);
if (ret)
- goto out;
+ goto err_release_fbi;
afbdev->sysram = sysram;
afbdev->size = size;
fb = &afbdev->afb.base;
afbdev->helper.fb = fb;
- afbdev->helper.fbdev = info;
strcpy(info->fix.id, "astdrmfb");
info->flags = FBINFO_DEFAULT | FBINFO_CAN_FORCE_OUTPUT;
info->fbops = &astfb_ops;
- ret = fb_alloc_cmap(&info->cmap, 256, 0);
- if (ret) {
- ret = -ENOMEM;
- goto out;
- }
-
- info->apertures = alloc_apertures(1);
- if (!info->apertures) {
- ret = -ENOMEM;
- goto out;
- }
info->apertures->ranges[0].base = pci_resource_start(dev->pdev, 0);
info->apertures->ranges[0].size = pci_resource_len(dev->pdev, 0);
@@ -266,7 +253,11 @@ static int astfb_create(struct drm_fb_helper *helper,
fb->width, fb->height);
return 0;
-out:
+
+err_release_fbi:
+ drm_fb_helper_release_fbi(helper);
+err_free_vram:
+ vfree(afbdev->sysram);
return ret;
}
@@ -297,15 +288,10 @@ static const struct drm_fb_helper_funcs ast_fb_helper_funcs = {
static void ast_fbdev_destroy(struct drm_device *dev,
struct ast_fbdev *afbdev)
{
- struct fb_info *info;
struct ast_framebuffer *afb = &afbdev->afb;
- if (afbdev->helper.fbdev) {
- info = afbdev->helper.fbdev;
- unregister_framebuffer(info);
- if (info->cmap.len)
- fb_dealloc_cmap(&info->cmap);
- framebuffer_release(info);
- }
+
+ drm_fb_helper_unregister_fbi(&afbdev->helper);
+ drm_fb_helper_release_fbi(&afbdev->helper);
if (afb->obj) {
drm_gem_object_unreference_unlocked(afb->obj);
@@ -377,5 +363,5 @@ void ast_fbdev_set_suspend(struct drm_device *dev, int state)
if (!ast->fbdev)
return;
- fb_set_suspend(ast->fbdev->helper.fbdev, state);
+ drm_fb_helper_set_suspend(&ast->fbdev->helper, state);
}
diff --git a/drivers/gpu/drm/ast/ast_main.c b/drivers/gpu/drm/ast/ast_main.c
index 035dacc93382..838217f8ce7d 100644
--- a/drivers/gpu/drm/ast/ast_main.c
+++ b/drivers/gpu/drm/ast/ast_main.c
@@ -571,24 +571,18 @@ ast_dumb_mmap_offset(struct drm_file *file,
uint64_t *offset)
{
struct drm_gem_object *obj;
- int ret;
struct ast_bo *bo;
- mutex_lock(&dev->struct_mutex);
obj = drm_gem_object_lookup(dev, file, handle);
- if (obj == NULL) {
- ret = -ENOENT;
- goto out_unlock;
- }
+ if (obj == NULL)
+ return -ENOENT;
bo = gem_to_ast_bo(obj);
*offset = ast_bo_mmap_offset(bo);
- drm_gem_object_unreference(obj);
- ret = 0;
-out_unlock:
- mutex_unlock(&dev->struct_mutex);
- return ret;
+ drm_gem_object_unreference_unlocked(obj);
+
+ return 0;
}
diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c
index f69b92535505..9f6e234e7029 100644
--- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c
+++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c
@@ -239,7 +239,8 @@ static int atmel_hlcdc_crtc_atomic_check(struct drm_crtc *c,
return atmel_hlcdc_plane_prepare_disc_area(s);
}
-static void atmel_hlcdc_crtc_atomic_begin(struct drm_crtc *c)
+static void atmel_hlcdc_crtc_atomic_begin(struct drm_crtc *c,
+ struct drm_crtc_state *old_s)
{
struct atmel_hlcdc_crtc *crtc = drm_crtc_to_atmel_hlcdc_crtc(c);
@@ -253,7 +254,8 @@ static void atmel_hlcdc_crtc_atomic_begin(struct drm_crtc *c)
}
}
-static void atmel_hlcdc_crtc_atomic_flush(struct drm_crtc *crtc)
+static void atmel_hlcdc_crtc_atomic_flush(struct drm_crtc *crtc,
+ struct drm_crtc_state *old_s)
{
/* TODO: write common plane control register if available */
}
@@ -355,6 +357,7 @@ int atmel_hlcdc_crtc_create(struct drm_device *dev)
planes->overlays[i]->base.possible_crtcs = 1 << crtc->id;
drm_crtc_helper_add(&crtc->base, &lcdc_crtc_helper_funcs);
+ drm_crtc_vblank_reset(&crtc->base);
dc->crtc = &crtc->base;
diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c
index 60b0c13d7ff5..6fad1f9648f3 100644
--- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c
+++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c
@@ -313,20 +313,20 @@ static int atmel_hlcdc_dc_load(struct drm_device *dev)
pm_runtime_enable(dev->dev);
- ret = atmel_hlcdc_dc_modeset_init(dev);
+ ret = drm_vblank_init(dev, 1);
if (ret < 0) {
- dev_err(dev->dev, "failed to initialize mode setting\n");
+ dev_err(dev->dev, "failed to initialize vblank\n");
goto err_periph_clk_disable;
}
- drm_mode_config_reset(dev);
-
- ret = drm_vblank_init(dev, 1);
+ ret = atmel_hlcdc_dc_modeset_init(dev);
if (ret < 0) {
- dev_err(dev->dev, "failed to initialize vblank\n");
+ dev_err(dev->dev, "failed to initialize mode setting\n");
goto err_periph_clk_disable;
}
+ drm_mode_config_reset(dev);
+
pm_runtime_get_sync(dev->dev);
ret = drm_irq_install(dev, dc->hlcdc->irq);
pm_runtime_put_sync(dev->dev);
diff --git a/drivers/gpu/drm/bochs/bochs_drv.c b/drivers/gpu/drm/bochs/bochs_drv.c
index 98837bde2d25..7f1a3604b19f 100644
--- a/drivers/gpu/drm/bochs/bochs_drv.c
+++ b/drivers/gpu/drm/bochs/bochs_drv.c
@@ -109,7 +109,7 @@ static int bochs_pm_suspend(struct device *dev)
if (bochs->fb.initialized) {
console_lock();
- fb_set_suspend(bochs->fb.helper.fbdev, 1);
+ drm_fb_helper_set_suspend(&bochs->fb.helper, 1);
console_unlock();
}
@@ -126,7 +126,7 @@ static int bochs_pm_resume(struct device *dev)
if (bochs->fb.initialized) {
console_lock();
- fb_set_suspend(bochs->fb.helper.fbdev, 0);
+ drm_fb_helper_set_suspend(&bochs->fb.helper, 0);
console_unlock();
}
diff --git a/drivers/gpu/drm/bochs/bochs_fbdev.c b/drivers/gpu/drm/bochs/bochs_fbdev.c
index 976d9798dc99..09a0637aab3e 100644
--- a/drivers/gpu/drm/bochs/bochs_fbdev.c
+++ b/drivers/gpu/drm/bochs/bochs_fbdev.c
@@ -24,9 +24,9 @@ static struct fb_ops bochsfb_ops = {
.owner = THIS_MODULE,
.fb_check_var = drm_fb_helper_check_var,
.fb_set_par = drm_fb_helper_set_par,
- .fb_fillrect = sys_fillrect,
- .fb_copyarea = sys_copyarea,
- .fb_imageblit = sys_imageblit,
+ .fb_fillrect = drm_fb_helper_sys_fillrect,
+ .fb_copyarea = drm_fb_helper_sys_copyarea,
+ .fb_imageblit = drm_fb_helper_sys_imageblit,
.fb_pan_display = drm_fb_helper_pan_display,
.fb_blank = drm_fb_helper_blank,
.fb_setcmap = drm_fb_helper_setcmap,
@@ -56,11 +56,9 @@ static int bochsfb_create(struct drm_fb_helper *helper,
{
struct bochs_device *bochs =
container_of(helper, struct bochs_device, fb.helper);
- struct drm_device *dev = bochs->dev;
struct fb_info *info;
struct drm_framebuffer *fb;
struct drm_mode_fb_cmd2 mode_cmd;
- struct device *device = &dev->pdev->dev;
struct drm_gem_object *gobj = NULL;
struct bochs_bo *bo = NULL;
int size, ret;
@@ -106,22 +104,23 @@ static int bochsfb_create(struct drm_fb_helper *helper,
ttm_bo_unreserve(&bo->bo);
/* init fb device */
- info = framebuffer_alloc(0, device);
- if (info == NULL)
- return -ENOMEM;
+ info = drm_fb_helper_alloc_fbi(helper);
+ if (IS_ERR(info))
+ return PTR_ERR(info);
info->par = &bochs->fb.helper;
ret = bochs_framebuffer_init(bochs->dev, &bochs->fb.gfb, &mode_cmd, gobj);
- if (ret)
+ if (ret) {
+ drm_fb_helper_release_fbi(helper);
return ret;
+ }
bochs->fb.size = size;
/* setup helper */
fb = &bochs->fb.gfb.base;
bochs->fb.helper.fb = fb;
- bochs->fb.helper.fbdev = info;
strcpy(info->fix.id, "bochsdrmfb");
@@ -139,30 +138,17 @@ static int bochsfb_create(struct drm_fb_helper *helper,
info->fix.smem_start = 0;
info->fix.smem_len = size;
- ret = fb_alloc_cmap(&info->cmap, 256, 0);
- if (ret) {
- DRM_ERROR("%s: can't allocate color map\n", info->fix.id);
- return -ENOMEM;
- }
-
return 0;
}
static int bochs_fbdev_destroy(struct bochs_device *bochs)
{
struct bochs_framebuffer *gfb = &bochs->fb.gfb;
- struct fb_info *info;
DRM_DEBUG_DRIVER("\n");
- if (bochs->fb.helper.fbdev) {
- info = bochs->fb.helper.fbdev;
-
- unregister_framebuffer(info);
- if (info->cmap.len)
- fb_dealloc_cmap(&info->cmap);
- framebuffer_release(info);
- }
+ drm_fb_helper_unregister_fbi(&bochs->fb.helper);
+ drm_fb_helper_release_fbi(&bochs->fb.helper);
if (gfb->obj) {
drm_gem_object_unreference_unlocked(gfb->obj);
diff --git a/drivers/gpu/drm/bochs/bochs_mm.c b/drivers/gpu/drm/bochs/bochs_mm.c
index 66286ff518d4..f69e6bf9bb0e 100644
--- a/drivers/gpu/drm/bochs/bochs_mm.c
+++ b/drivers/gpu/drm/bochs/bochs_mm.c
@@ -454,25 +454,17 @@ int bochs_dumb_mmap_offset(struct drm_file *file, struct drm_device *dev,
uint32_t handle, uint64_t *offset)
{
struct drm_gem_object *obj;
- int ret;
struct bochs_bo *bo;
- mutex_lock(&dev->struct_mutex);
obj = drm_gem_object_lookup(dev, file, handle);
- if (obj == NULL) {
- ret = -ENOENT;
- goto out_unlock;
- }
+ if (obj == NULL)
+ return -ENOENT;
bo = gem_to_bochs_bo(obj);
*offset = bochs_bo_mmap_offset(bo);
- drm_gem_object_unreference(obj);
- ret = 0;
-out_unlock:
- mutex_unlock(&dev->struct_mutex);
- return ret;
-
+ drm_gem_object_unreference_unlocked(obj);
+ return 0;
}
/* ---------------------------------------------------------------------- */
diff --git a/drivers/gpu/drm/cirrus/cirrus_drv.c b/drivers/gpu/drm/cirrus/cirrus_drv.c
index b9140032962d..b1619e29a564 100644
--- a/drivers/gpu/drm/cirrus/cirrus_drv.c
+++ b/drivers/gpu/drm/cirrus/cirrus_drv.c
@@ -92,7 +92,7 @@ static int cirrus_pm_suspend(struct device *dev)
if (cdev->mode_info.gfbdev) {
console_lock();
- fb_set_suspend(cdev->mode_info.gfbdev->helper.fbdev, 1);
+ drm_fb_helper_set_suspend(&cdev->mode_info.gfbdev->helper, 1);
console_unlock();
}
@@ -109,7 +109,7 @@ static int cirrus_pm_resume(struct device *dev)
if (cdev->mode_info.gfbdev) {
console_lock();
- fb_set_suspend(cdev->mode_info.gfbdev->helper.fbdev, 0);
+ drm_fb_helper_set_suspend(&cdev->mode_info.gfbdev->helper, 0);
console_unlock();
}
diff --git a/drivers/gpu/drm/cirrus/cirrus_fbdev.c b/drivers/gpu/drm/cirrus/cirrus_fbdev.c
index 13ddf1c4bb8e..589103bcc06c 100644
--- a/drivers/gpu/drm/cirrus/cirrus_fbdev.c
+++ b/drivers/gpu/drm/cirrus/cirrus_fbdev.c
@@ -98,7 +98,7 @@ static void cirrus_fillrect(struct fb_info *info,
const struct fb_fillrect *rect)
{
struct cirrus_fbdev *afbdev = info->par;
- sys_fillrect(info, rect);
+ drm_fb_helper_sys_fillrect(info, rect);
cirrus_dirty_update(afbdev, rect->dx, rect->dy, rect->width,
rect->height);
}
@@ -107,7 +107,7 @@ static void cirrus_copyarea(struct fb_info *info,
const struct fb_copyarea *area)
{
struct cirrus_fbdev *afbdev = info->par;
- sys_copyarea(info, area);
+ drm_fb_helper_sys_copyarea(info, area);
cirrus_dirty_update(afbdev, area->dx, area->dy, area->width,
area->height);
}
@@ -116,7 +116,7 @@ static void cirrus_imageblit(struct fb_info *info,
const struct fb_image *image)
{
struct cirrus_fbdev *afbdev = info->par;
- sys_imageblit(info, image);
+ drm_fb_helper_sys_imageblit(info, image);
cirrus_dirty_update(afbdev, image->dx, image->dy, image->width,
image->height);
}
@@ -165,12 +165,10 @@ static int cirrusfb_create(struct drm_fb_helper *helper,
{
struct cirrus_fbdev *gfbdev =
container_of(helper, struct cirrus_fbdev, helper);
- struct drm_device *dev = gfbdev->helper.dev;
struct cirrus_device *cdev = gfbdev->helper.dev->dev_private;
struct fb_info *info;
struct drm_framebuffer *fb;
struct drm_mode_fb_cmd2 mode_cmd;
- struct device *device = &dev->pdev->dev;
void *sysram;
struct drm_gem_object *gobj = NULL;
struct cirrus_bo *bo = NULL;
@@ -195,9 +193,9 @@ static int cirrusfb_create(struct drm_fb_helper *helper,
if (!sysram)
return -ENOMEM;
- info = framebuffer_alloc(0, device);
- if (info == NULL)
- return -ENOMEM;
+ info = drm_fb_helper_alloc_fbi(helper);
+ if (IS_ERR(info))
+ return PTR_ERR(info);
info->par = gfbdev;
@@ -216,11 +214,9 @@ static int cirrusfb_create(struct drm_fb_helper *helper,
/* setup helper */
gfbdev->helper.fb = fb;
- gfbdev->helper.fbdev = info;
strcpy(info->fix.id, "cirrusdrmfb");
-
info->flags = FBINFO_DEFAULT;
info->fbops = &cirrusfb_ops;
@@ -229,11 +225,6 @@ static int cirrusfb_create(struct drm_fb_helper *helper,
sizes->fb_height);
/* setup aperture base/size for vesafb takeover */
- info->apertures = alloc_apertures(1);
- if (!info->apertures) {
- ret = -ENOMEM;
- goto out_iounmap;
- }
info->apertures->ranges[0].base = cdev->dev->mode_config.fb_base;
info->apertures->ranges[0].size = cdev->mc.vram_size;
@@ -246,13 +237,6 @@ static int cirrusfb_create(struct drm_fb_helper *helper,
info->fix.mmio_start = 0;
info->fix.mmio_len = 0;
- ret = fb_alloc_cmap(&info->cmap, 256, 0);
- if (ret) {
- DRM_ERROR("%s: can't allocate color map\n", info->fix.id);
- ret = -ENOMEM;
- goto out_iounmap;
- }
-
DRM_INFO("fb mappable at 0x%lX\n", info->fix.smem_start);
DRM_INFO("vram aper at 0x%lX\n", (unsigned long)info->fix.smem_start);
DRM_INFO("size %lu\n", (unsigned long)info->fix.smem_len);
@@ -260,24 +244,15 @@ static int cirrusfb_create(struct drm_fb_helper *helper,
DRM_INFO(" pitch is %d\n", fb->pitches[0]);
return 0;
-out_iounmap:
- return ret;
}
static int cirrus_fbdev_destroy(struct drm_device *dev,
struct cirrus_fbdev *gfbdev)
{
- struct fb_info *info;
struct cirrus_framebuffer *gfb = &gfbdev->gfb;
- if (gfbdev->helper.fbdev) {
- info = gfbdev->helper.fbdev;
-
- unregister_framebuffer(info);
- if (info->cmap.len)
- fb_dealloc_cmap(&info->cmap);
- framebuffer_release(info);
- }
+ drm_fb_helper_unregister_fbi(&gfbdev->helper);
+ drm_fb_helper_release_fbi(&gfbdev->helper);
if (gfb->obj) {
drm_gem_object_unreference_unlocked(gfb->obj);
diff --git a/drivers/gpu/drm/cirrus/cirrus_main.c b/drivers/gpu/drm/cirrus/cirrus_main.c
index e4b976658087..055fd86ba717 100644
--- a/drivers/gpu/drm/cirrus/cirrus_main.c
+++ b/drivers/gpu/drm/cirrus/cirrus_main.c
@@ -293,25 +293,18 @@ cirrus_dumb_mmap_offset(struct drm_file *file,
uint64_t *offset)
{
struct drm_gem_object *obj;
- int ret;
struct cirrus_bo *bo;
- mutex_lock(&dev->struct_mutex);
obj = drm_gem_object_lookup(dev, file, handle);
- if (obj == NULL) {
- ret = -ENOENT;
- goto out_unlock;
- }
+ if (obj == NULL)
+ return -ENOENT;
bo = gem_to_cirrus_bo(obj);
*offset = cirrus_bo_mmap_offset(bo);
- drm_gem_object_unreference(obj);
- ret = 0;
-out_unlock:
- mutex_unlock(&dev->struct_mutex);
- return ret;
+ drm_gem_object_unreference_unlocked(obj);
+ return 0;
}
bool cirrus_check_framebuffer(struct cirrus_device *cdev, int width, int height,
diff --git a/drivers/gpu/drm/drm_atomic.c b/drivers/gpu/drm/drm_atomic.c
index f6f2fb58eb37..1066e4b658cf 100644
--- a/drivers/gpu/drm/drm_atomic.c
+++ b/drivers/gpu/drm/drm_atomic.c
@@ -153,9 +153,15 @@ void drm_atomic_state_default_clear(struct drm_atomic_state *state)
if (!connector)
continue;
- WARN_ON(!drm_modeset_is_locked(&config->connection_mutex));
-
- connector->funcs->atomic_destroy_state(connector,
+ /*
+ * FIXME: Async commits can race with connector unplugging and
+ * there's currently nothing that prevents cleanup up state for
+ * deleted connectors. As long as the callback doesn't look at
+ * the connector we'll be fine though, so make sure that's the
+ * case by setting all connector pointers to NULL.
+ */
+ state->connector_states[i]->connector = NULL;
+ connector->funcs->atomic_destroy_state(NULL,
state->connector_states[i]);
state->connectors[i] = NULL;
state->connector_states[i] = NULL;
@@ -1063,7 +1069,7 @@ drm_atomic_add_affected_connectors(struct drm_atomic_state *state,
* Changed connectors are already in @state, so only need to look at the
* current configuration.
*/
- list_for_each_entry(connector, &config->connector_list, head) {
+ drm_for_each_connector(connector, state->dev) {
if (connector->state->crtc != crtc)
continue;
@@ -1224,6 +1230,9 @@ int drm_atomic_check_only(struct drm_atomic_state *state)
}
}
+ if (ret == 0)
+ ww_acquire_done(&state->acquire_ctx->ww_ctx);
+
return ret;
}
EXPORT_SYMBOL(drm_atomic_check_only);
@@ -1463,24 +1472,18 @@ retry:
if (get_user(obj_id, objs_ptr + copied_objs)) {
ret = -EFAULT;
- goto fail;
+ goto out;
}
obj = drm_mode_object_find(dev, obj_id, DRM_MODE_OBJECT_ANY);
if (!obj || !obj->properties) {
ret = -ENOENT;
- goto fail;
- }
-
- if (obj->type == DRM_MODE_OBJECT_PLANE) {
- plane = obj_to_plane(obj);
- plane_mask |= (1 << drm_plane_index(plane));
- plane->old_fb = plane->fb;
+ goto out;
}
if (get_user(count_props, count_props_ptr + copied_objs)) {
ret = -EFAULT;
- goto fail;
+ goto out;
}
copied_objs++;
@@ -1492,28 +1495,34 @@ retry:
if (get_user(prop_id, props_ptr + copied_props)) {
ret = -EFAULT;
- goto fail;
+ goto out;
}
prop = drm_property_find(dev, prop_id);
if (!prop) {
ret = -ENOENT;
- goto fail;
+ goto out;
}
if (copy_from_user(&prop_value,
prop_values_ptr + copied_props,
sizeof(prop_value))) {
ret = -EFAULT;
- goto fail;
+ goto out;
}
ret = atomic_set_prop(state, obj, prop, prop_value);
if (ret)
- goto fail;
+ goto out;
copied_props++;
}
+
+ if (obj->type == DRM_MODE_OBJECT_PLANE && count_props) {
+ plane = obj_to_plane(obj);
+ plane_mask |= (1 << drm_plane_index(plane));
+ plane->old_fb = plane->fb;
+ }
}
if (arg->flags & DRM_MODE_PAGE_FLIP_EVENT) {
@@ -1523,7 +1532,7 @@ retry:
e = create_vblank_event(dev, file_priv, arg->user_data);
if (!e) {
ret = -ENOMEM;
- goto fail;
+ goto out;
}
crtc_state->event = e;
@@ -1533,13 +1542,15 @@ retry:
if (arg->flags & DRM_MODE_ATOMIC_TEST_ONLY) {
ret = drm_atomic_check_only(state);
/* _check_only() does not free state, unlike _commit() */
- drm_atomic_state_free(state);
+ if (!ret)
+ drm_atomic_state_free(state);
} else if (arg->flags & DRM_MODE_ATOMIC_NONBLOCK) {
ret = drm_atomic_async_commit(state);
} else {
ret = drm_atomic_commit(state);
}
+out:
/* if succeeded, fixup legacy plane crtc/fb ptrs before dropping
* locks (ie. while it is still safe to deref plane->state). We
* need to do this here because the driver entry points cannot
@@ -1552,41 +1563,35 @@ retry:
drm_framebuffer_reference(new_fb);
plane->fb = new_fb;
plane->crtc = plane->state->crtc;
- } else {
- plane->old_fb = NULL;
- }
- if (plane->old_fb) {
- drm_framebuffer_unreference(plane->old_fb);
- plane->old_fb = NULL;
+
+ if (plane->old_fb)
+ drm_framebuffer_unreference(plane->old_fb);
}
+ plane->old_fb = NULL;
}
- drm_modeset_drop_locks(&ctx);
- drm_modeset_acquire_fini(&ctx);
-
- return ret;
+ if (ret == -EDEADLK) {
+ drm_atomic_state_clear(state);
+ drm_modeset_backoff(&ctx);
+ goto retry;
+ }
-fail:
- if (ret == -EDEADLK)
- goto backoff;
+ if (ret) {
+ if (arg->flags & DRM_MODE_PAGE_FLIP_EVENT) {
+ for_each_crtc_in_state(state, crtc, crtc_state, i) {
+ if (!crtc_state->event)
+ continue;
- if (arg->flags & DRM_MODE_PAGE_FLIP_EVENT) {
- for_each_crtc_in_state(state, crtc, crtc_state, i) {
- destroy_vblank_event(dev, file_priv, crtc_state->event);
- crtc_state->event = NULL;
+ destroy_vblank_event(dev, file_priv,
+ crtc_state->event);
+ }
}
- }
- drm_atomic_state_free(state);
+ drm_atomic_state_free(state);
+ }
drm_modeset_drop_locks(&ctx);
drm_modeset_acquire_fini(&ctx);
return ret;
-
-backoff:
- drm_atomic_state_clear(state);
- drm_modeset_backoff(&ctx);
-
- goto retry;
}
diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c
index 5b59d5ad7d1c..d432348837a5 100644
--- a/drivers/gpu/drm/drm_atomic_helper.c
+++ b/drivers/gpu/drm/drm_atomic_helper.c
@@ -89,7 +89,7 @@ get_current_crtc_for_encoder(struct drm_device *dev,
WARN_ON(!drm_modeset_is_locked(&config->connection_mutex));
- list_for_each_entry(connector, &config->connector_list, head) {
+ drm_for_each_connector(connector, dev) {
if (connector->state->best_encoder != encoder)
continue;
@@ -124,7 +124,7 @@ steal_encoder(struct drm_atomic_state *state,
if (IS_ERR(crtc_state))
return PTR_ERR(crtc_state);
- crtc_state->mode_changed = true;
+ crtc_state->connectors_changed = true;
list_for_each_entry(connector, &config->connector_list, head) {
if (connector->state->best_encoder != encoder)
@@ -174,14 +174,14 @@ update_connector_routing(struct drm_atomic_state *state, int conn_idx)
idx = drm_crtc_index(connector->state->crtc);
crtc_state = state->crtc_states[idx];
- crtc_state->mode_changed = true;
+ crtc_state->connectors_changed = true;
}
if (connector_state->crtc) {
idx = drm_crtc_index(connector_state->crtc);
crtc_state = state->crtc_states[idx];
- crtc_state->mode_changed = true;
+ crtc_state->connectors_changed = true;
}
}
@@ -196,7 +196,12 @@ update_connector_routing(struct drm_atomic_state *state, int conn_idx)
}
funcs = connector->helper_private;
- new_encoder = funcs->best_encoder(connector);
+
+ if (funcs->atomic_best_encoder)
+ new_encoder = funcs->atomic_best_encoder(connector,
+ connector_state);
+ else
+ new_encoder = funcs->best_encoder(connector);
if (!new_encoder) {
DRM_DEBUG_ATOMIC("No suitable encoder found for [CONNECTOR:%d:%s]\n",
@@ -229,11 +234,14 @@ update_connector_routing(struct drm_atomic_state *state, int conn_idx)
}
}
+ if (WARN_ON(!connector_state->crtc))
+ return -EINVAL;
+
connector_state->best_encoder = new_encoder;
idx = drm_crtc_index(connector_state->crtc);
crtc_state = state->crtc_states[idx];
- crtc_state->mode_changed = true;
+ crtc_state->connectors_changed = true;
DRM_DEBUG_ATOMIC("[CONNECTOR:%d:%s] using [ENCODER:%d:%s] on [CRTC:%d]\n",
connector->base.id,
@@ -256,7 +264,8 @@ mode_fixup(struct drm_atomic_state *state)
bool ret;
for_each_crtc_in_state(state, crtc, crtc_state, i) {
- if (!crtc_state->mode_changed)
+ if (!crtc_state->mode_changed &&
+ !crtc_state->connectors_changed)
continue;
drm_mode_copy(&crtc_state->adjusted_mode, &crtc_state->mode);
@@ -298,7 +307,7 @@ mode_fixup(struct drm_atomic_state *state)
encoder->base.id, encoder->name);
return ret;
}
- } else {
+ } else if (funcs->mode_fixup) {
ret = funcs->mode_fixup(encoder, &crtc_state->mode,
&crtc_state->adjusted_mode);
if (!ret) {
@@ -312,7 +321,8 @@ mode_fixup(struct drm_atomic_state *state)
for_each_crtc_in_state(state, crtc, crtc_state, i) {
const struct drm_crtc_helper_funcs *funcs;
- if (!crtc_state->mode_changed)
+ if (!crtc_state->mode_changed &&
+ !crtc_state->connectors_changed)
continue;
funcs = crtc->helper_private;
@@ -338,9 +348,14 @@ mode_fixup(struct drm_atomic_state *state)
*
* Check the state object to see if the requested state is physically possible.
* This does all the crtc and connector related computations for an atomic
- * update. It computes and updates crtc_state->mode_changed, adds any additional
- * connectors needed for full modesets and calls down into ->mode_fixup
- * functions of the driver backend.
+ * update and adds any additional connectors needed for full modesets and calls
+ * down into ->mode_fixup functions of the driver backend.
+ *
+ * crtc_state->mode_changed is set when the input mode is changed.
+ * crtc_state->connectors_changed is set when a connector is added or
+ * removed from the crtc.
+ * crtc_state->active_changed is set when crtc_state->active changes,
+ * which is used for dpms.
*
* IMPORTANT:
*
@@ -373,7 +388,17 @@ drm_atomic_helper_check_modeset(struct drm_device *dev,
if (crtc->state->enable != crtc_state->enable) {
DRM_DEBUG_ATOMIC("[CRTC:%d] enable changed\n",
crtc->base.id);
+
+ /*
+ * For clarity this assignment is done here, but
+ * enable == 0 is only true when there are no
+ * connectors and a NULL mode.
+ *
+ * The other way around is true as well. enable != 0
+ * iff connectors are attached and a mode is set.
+ */
crtc_state->mode_changed = true;
+ crtc_state->connectors_changed = true;
}
}
@@ -448,6 +473,9 @@ EXPORT_SYMBOL(drm_atomic_helper_check_modeset);
* This does all the plane update related checks using by calling into the
* ->atomic_check hooks provided by the driver.
*
+ * It also sets crtc_state->planes_changed to indicate that a crtc has
+ * updated planes.
+ *
* RETURNS
* Zero for success or -errno
*/
@@ -640,15 +668,29 @@ drm_atomic_helper_update_legacy_modeset_state(struct drm_device *dev,
struct drm_crtc_state *old_crtc_state;
int i;
- /* clear out existing links */
+ /* clear out existing links and update dpms */
for_each_connector_in_state(old_state, connector, old_conn_state, i) {
- if (!connector->encoder)
- continue;
+ if (connector->encoder) {
+ WARN_ON(!connector->encoder->crtc);
+
+ connector->encoder->crtc = NULL;
+ connector->encoder = NULL;
+ }
+
+ crtc = connector->state->crtc;
+ if ((!crtc && old_conn_state->crtc) ||
+ (crtc && drm_atomic_crtc_needs_modeset(crtc->state))) {
+ struct drm_property *dpms_prop =
+ dev->mode_config.dpms_property;
+ int mode = DRM_MODE_DPMS_OFF;
- WARN_ON(!connector->encoder->crtc);
+ if (crtc && crtc->state->active)
+ mode = DRM_MODE_DPMS_ON;
- connector->encoder->crtc = NULL;
- connector->encoder = NULL;
+ connector->dpms = mode;
+ drm_object_property_set_value(&connector->base,
+ dpms_prop, mode);
+ }
}
/* set new links */
@@ -665,10 +707,16 @@ drm_atomic_helper_update_legacy_modeset_state(struct drm_device *dev,
/* set legacy state in the crtc structure */
for_each_crtc_in_state(old_state, crtc, old_crtc_state, i) {
+ struct drm_plane *primary = crtc->primary;
+
crtc->mode = crtc->state->mode;
crtc->enabled = crtc->state->enable;
- crtc->x = crtc->primary->state->src_x >> 16;
- crtc->y = crtc->primary->state->src_y >> 16;
+
+ if (drm_atomic_get_existing_plane_state(old_state, primary) &&
+ primary->state->crtc == crtc) {
+ crtc->x = primary->state->src_x >> 16;
+ crtc->y = primary->state->src_y >> 16;
+ }
if (crtc->state->enable)
drm_calc_timestamping_constants(crtc,
@@ -918,7 +966,7 @@ drm_atomic_helper_wait_for_vblanks(struct drm_device *dev,
continue;
old_crtc_state->enable = true;
- old_crtc_state->last_vblank_count = drm_vblank_count(dev, i);
+ old_crtc_state->last_vblank_count = drm_crtc_vblank_count(crtc);
}
for_each_crtc_in_state(old_state, crtc, old_crtc_state, i) {
@@ -927,7 +975,7 @@ drm_atomic_helper_wait_for_vblanks(struct drm_device *dev,
ret = wait_event_timeout(dev->vblank[i].queue,
old_crtc_state->last_vblank_count !=
- drm_vblank_count(dev, i),
+ drm_crtc_vblank_count(crtc),
msecs_to_jiffies(50));
drm_crtc_vblank_put(crtc);
@@ -1138,7 +1186,7 @@ void drm_atomic_helper_commit_planes(struct drm_device *dev,
if (!funcs || !funcs->atomic_begin)
continue;
- funcs->atomic_begin(crtc);
+ funcs->atomic_begin(crtc, old_crtc_state);
}
for_each_plane_in_state(old_state, plane, old_plane_state, i) {
@@ -1168,7 +1216,7 @@ void drm_atomic_helper_commit_planes(struct drm_device *dev,
if (!funcs || !funcs->atomic_flush)
continue;
- funcs->atomic_flush(crtc);
+ funcs->atomic_flush(crtc, old_crtc_state);
}
}
EXPORT_SYMBOL(drm_atomic_helper_commit_planes);
@@ -1204,7 +1252,7 @@ drm_atomic_helper_commit_planes_on_crtc(struct drm_crtc_state *old_crtc_state)
crtc_funcs = crtc->helper_private;
if (crtc_funcs && crtc_funcs->atomic_begin)
- crtc_funcs->atomic_begin(crtc);
+ crtc_funcs->atomic_begin(crtc, old_crtc_state);
drm_for_each_plane_mask(plane, crtc->dev, plane_mask) {
struct drm_plane_state *old_plane_state =
@@ -1227,7 +1275,7 @@ drm_atomic_helper_commit_planes_on_crtc(struct drm_crtc_state *old_crtc_state)
}
if (crtc_funcs && crtc_funcs->atomic_flush)
- crtc_funcs->atomic_flush(crtc);
+ crtc_funcs->atomic_flush(crtc, old_crtc_state);
}
EXPORT_SYMBOL(drm_atomic_helper_commit_planes_on_crtc);
@@ -1915,10 +1963,6 @@ retry:
if (ret != 0)
goto fail;
- /* TODO: ->page_flip is the only driver callback where the core
- * doesn't update plane->fb. For now patch it up here. */
- plane->fb = plane->state->fb;
-
/* Driver takes ownership of state on successful async commit. */
return 0;
fail:
@@ -1952,9 +1996,12 @@ EXPORT_SYMBOL(drm_atomic_helper_page_flip);
* implementing the legacy DPMS connector interface. It computes the new desired
* ->active state for the corresponding CRTC (if the connector is enabled) and
* updates it.
+ *
+ * Returns:
+ * Returns 0 on success, negative errno numbers on failure.
*/
-void drm_atomic_helper_connector_dpms(struct drm_connector *connector,
- int mode)
+int drm_atomic_helper_connector_dpms(struct drm_connector *connector,
+ int mode)
{
struct drm_mode_config *config = &connector->dev->mode_config;
struct drm_atomic_state *state;
@@ -1963,6 +2010,7 @@ void drm_atomic_helper_connector_dpms(struct drm_connector *connector,
struct drm_connector *tmp_connector;
int ret;
bool active = false;
+ int old_mode = connector->dpms;
if (mode != DRM_MODE_DPMS_ON)
mode = DRM_MODE_DPMS_OFF;
@@ -1971,22 +2019,23 @@ void drm_atomic_helper_connector_dpms(struct drm_connector *connector,
crtc = connector->state->crtc;
if (!crtc)
- return;
+ return 0;
- /* FIXME: ->dpms has no return value so can't forward the -ENOMEM. */
state = drm_atomic_state_alloc(connector->dev);
if (!state)
- return;
+ return -ENOMEM;
state->acquire_ctx = drm_modeset_legacy_acquire_ctx(crtc);
retry:
crtc_state = drm_atomic_get_crtc_state(state, crtc);
- if (IS_ERR(crtc_state))
- return;
+ if (IS_ERR(crtc_state)) {
+ ret = PTR_ERR(crtc_state);
+ goto fail;
+ }
WARN_ON(!drm_modeset_is_locked(&config->connection_mutex));
- list_for_each_entry(tmp_connector, &config->connector_list, head) {
+ drm_for_each_connector(tmp_connector, connector->dev) {
if (tmp_connector->state->crtc != crtc)
continue;
@@ -2001,17 +2050,16 @@ retry:
if (ret != 0)
goto fail;
- /* Driver takes ownership of state on successful async commit. */
- return;
+ /* Driver takes ownership of state on successful commit. */
+ return 0;
fail:
if (ret == -EDEADLK)
goto backoff;
+ connector->dpms = old_mode;
drm_atomic_state_free(state);
- WARN(1, "Driver bug: Changing ->active failed with ret=%i\n", ret);
-
- return;
+ return ret;
backoff:
drm_atomic_state_clear(state);
drm_atomic_legacy_backoff(state);
@@ -2072,6 +2120,7 @@ void __drm_atomic_helper_crtc_duplicate_state(struct drm_crtc *crtc,
state->mode_changed = false;
state->active_changed = false;
state->planes_changed = false;
+ state->connectors_changed = false;
state->event = NULL;
}
EXPORT_SYMBOL(__drm_atomic_helper_crtc_duplicate_state);
diff --git a/drivers/gpu/drm/drm_context.c b/drivers/gpu/drm/drm_context.c
index 9b23525c0ed0..192a5f9eeb74 100644
--- a/drivers/gpu/drm/drm_context.c
+++ b/drivers/gpu/drm/drm_context.c
@@ -53,6 +53,10 @@ struct drm_ctx_list {
*/
void drm_legacy_ctxbitmap_free(struct drm_device * dev, int ctx_handle)
{
+ if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT) &&
+ drm_core_check_feature(dev, DRIVER_MODESET))
+ return;
+
mutex_lock(&dev->struct_mutex);
idr_remove(&dev->ctx_idr, ctx_handle);
mutex_unlock(&dev->struct_mutex);
@@ -85,10 +89,13 @@ static int drm_legacy_ctxbitmap_next(struct drm_device * dev)
*
* Initialise the drm_device::ctx_idr
*/
-int drm_legacy_ctxbitmap_init(struct drm_device * dev)
+void drm_legacy_ctxbitmap_init(struct drm_device * dev)
{
+ if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT) &&
+ drm_core_check_feature(dev, DRIVER_MODESET))
+ return;
+
idr_init(&dev->ctx_idr);
- return 0;
}
/**
@@ -101,6 +108,10 @@ int drm_legacy_ctxbitmap_init(struct drm_device * dev)
*/
void drm_legacy_ctxbitmap_cleanup(struct drm_device * dev)
{
+ if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT) &&
+ drm_core_check_feature(dev, DRIVER_MODESET))
+ return;
+
mutex_lock(&dev->struct_mutex);
idr_destroy(&dev->ctx_idr);
mutex_unlock(&dev->struct_mutex);
@@ -119,6 +130,10 @@ void drm_legacy_ctxbitmap_flush(struct drm_device *dev, struct drm_file *file)
{
struct drm_ctx_list *pos, *tmp;
+ if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT) &&
+ drm_core_check_feature(dev, DRIVER_MODESET))
+ return;
+
mutex_lock(&dev->ctxlist_mutex);
list_for_each_entry_safe(pos, tmp, &dev->ctxlist, head) {
@@ -161,6 +176,10 @@ int drm_legacy_getsareactx(struct drm_device *dev, void *data,
struct drm_local_map *map;
struct drm_map_list *_entry;
+ if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT) &&
+ drm_core_check_feature(dev, DRIVER_MODESET))
+ return -EINVAL;
+
mutex_lock(&dev->struct_mutex);
map = idr_find(&dev->ctx_idr, request->ctx_id);
@@ -205,6 +224,10 @@ int drm_legacy_setsareactx(struct drm_device *dev, void *data,
struct drm_local_map *map = NULL;
struct drm_map_list *r_list = NULL;
+ if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT) &&
+ drm_core_check_feature(dev, DRIVER_MODESET))
+ return -EINVAL;
+
mutex_lock(&dev->struct_mutex);
list_for_each_entry(r_list, &dev->maplist, head) {
if (r_list->map
@@ -305,6 +328,10 @@ int drm_legacy_resctx(struct drm_device *dev, void *data,
struct drm_ctx ctx;
int i;
+ if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT) &&
+ drm_core_check_feature(dev, DRIVER_MODESET))
+ return -EINVAL;
+
if (res->count >= DRM_RESERVED_CONTEXTS) {
memset(&ctx, 0, sizeof(ctx));
for (i = 0; i < DRM_RESERVED_CONTEXTS; i++) {
@@ -335,6 +362,10 @@ int drm_legacy_addctx(struct drm_device *dev, void *data,
struct drm_ctx_list *ctx_entry;
struct drm_ctx *ctx = data;
+ if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT) &&
+ drm_core_check_feature(dev, DRIVER_MODESET))
+ return -EINVAL;
+
ctx->handle = drm_legacy_ctxbitmap_next(dev);
if (ctx->handle == DRM_KERNEL_CONTEXT) {
/* Skip kernel's context and get a new one. */
@@ -378,6 +409,10 @@ int drm_legacy_getctx(struct drm_device *dev, void *data,
{
struct drm_ctx *ctx = data;
+ if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT) &&
+ drm_core_check_feature(dev, DRIVER_MODESET))
+ return -EINVAL;
+
/* This is 0, because we don't handle any context flags */
ctx->flags = 0;
@@ -400,6 +435,10 @@ int drm_legacy_switchctx(struct drm_device *dev, void *data,
{
struct drm_ctx *ctx = data;
+ if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT) &&
+ drm_core_check_feature(dev, DRIVER_MODESET))
+ return -EINVAL;
+
DRM_DEBUG("%d\n", ctx->handle);
return drm_context_switch(dev, dev->last_context, ctx->handle);
}
@@ -420,6 +459,10 @@ int drm_legacy_newctx(struct drm_device *dev, void *data,
{
struct drm_ctx *ctx = data;
+ if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT) &&
+ drm_core_check_feature(dev, DRIVER_MODESET))
+ return -EINVAL;
+
DRM_DEBUG("%d\n", ctx->handle);
drm_context_switch_complete(dev, file_priv, ctx->handle);
@@ -442,6 +485,10 @@ int drm_legacy_rmctx(struct drm_device *dev, void *data,
{
struct drm_ctx *ctx = data;
+ if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT) &&
+ drm_core_check_feature(dev, DRIVER_MODESET))
+ return -EINVAL;
+
DRM_DEBUG("%d\n", ctx->handle);
if (ctx->handle != DRM_KERNEL_CONTEXT) {
if (dev->driver->context_dtor)
diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
index b9ba06176eb1..33d877c65ced 100644
--- a/drivers/gpu/drm/drm_crtc.c
+++ b/drivers/gpu/drm/drm_crtc.c
@@ -615,7 +615,7 @@ void drm_framebuffer_remove(struct drm_framebuffer *fb)
if (atomic_read(&fb->refcount.refcount) > 1) {
drm_modeset_lock_all(dev);
/* remove from any CRTC */
- list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+ drm_for_each_crtc(crtc, dev) {
if (crtc->primary->fb == fb) {
/* should turn off the crtc */
memset(&set, 0, sizeof(struct drm_mode_set));
@@ -627,7 +627,7 @@ void drm_framebuffer_remove(struct drm_framebuffer *fb)
}
}
- list_for_each_entry(plane, &dev->mode_config.plane_list, head) {
+ drm_for_each_plane(plane, dev) {
if (plane->fb == fb)
drm_plane_force_disable(plane);
}
@@ -736,7 +736,7 @@ unsigned int drm_crtc_index(struct drm_crtc *crtc)
unsigned int index = 0;
struct drm_crtc *tmp;
- list_for_each_entry(tmp, &crtc->dev->mode_config.crtc_list, head) {
+ drm_for_each_crtc(tmp, crtc->dev) {
if (tmp == crtc)
return index;
@@ -988,7 +988,7 @@ unsigned int drm_connector_index(struct drm_connector *connector)
WARN_ON(!drm_modeset_is_locked(&config->connection_mutex));
- list_for_each_entry(tmp, &connector->dev->mode_config.connector_list, head) {
+ drm_for_each_connector(tmp, connector->dev) {
if (tmp == connector)
return index;
@@ -1054,7 +1054,7 @@ void drm_connector_unplug_all(struct drm_device *dev)
{
struct drm_connector *connector;
- /* taking the mode config mutex ends up in a clash with sysfs */
+ /* FIXME: taking the mode config mutex ends up in a clash with sysfs */
list_for_each_entry(connector, &dev->mode_config.connector_list, head)
drm_connector_unregister(connector);
@@ -1151,7 +1151,7 @@ EXPORT_SYMBOL(drm_encoder_cleanup);
int drm_universal_plane_init(struct drm_device *dev, struct drm_plane *plane,
unsigned long possible_crtcs,
const struct drm_plane_funcs *funcs,
- const uint32_t *formats, uint32_t format_count,
+ const uint32_t *formats, unsigned int format_count,
enum drm_plane_type type)
{
struct drm_mode_config *config = &dev->mode_config;
@@ -1225,7 +1225,7 @@ EXPORT_SYMBOL(drm_universal_plane_init);
int drm_plane_init(struct drm_device *dev, struct drm_plane *plane,
unsigned long possible_crtcs,
const struct drm_plane_funcs *funcs,
- const uint32_t *formats, uint32_t format_count,
+ const uint32_t *formats, unsigned int format_count,
bool is_primary)
{
enum drm_plane_type type;
@@ -1280,7 +1280,7 @@ unsigned int drm_plane_index(struct drm_plane *plane)
unsigned int index = 0;
struct drm_plane *tmp;
- list_for_each_entry(tmp, &plane->dev->mode_config.plane_list, head) {
+ drm_for_each_plane(tmp, plane->dev) {
if (tmp == plane)
return index;
@@ -1305,7 +1305,7 @@ drm_plane_from_index(struct drm_device *dev, int idx)
struct drm_plane *plane;
unsigned int i = 0;
- list_for_each_entry(plane, &dev->mode_config.plane_list, head) {
+ drm_for_each_plane(plane, dev) {
if (i == idx)
return plane;
i++;
@@ -1679,70 +1679,6 @@ int drm_mode_create_suggested_offset_properties(struct drm_device *dev)
}
EXPORT_SYMBOL(drm_mode_create_suggested_offset_properties);
-static int drm_mode_group_init(struct drm_device *dev, struct drm_mode_group *group)
-{
- uint32_t total_objects = 0;
-
- total_objects += dev->mode_config.num_crtc;
- total_objects += dev->mode_config.num_connector;
- total_objects += dev->mode_config.num_encoder;
-
- group->id_list = kcalloc(total_objects, sizeof(uint32_t), GFP_KERNEL);
- if (!group->id_list)
- return -ENOMEM;
-
- group->num_crtcs = 0;
- group->num_connectors = 0;
- group->num_encoders = 0;
- return 0;
-}
-
-void drm_mode_group_destroy(struct drm_mode_group *group)
-{
- kfree(group->id_list);
- group->id_list = NULL;
-}
-
-/*
- * NOTE: Driver's shouldn't ever call drm_mode_group_init_legacy_group - it is
- * the drm core's responsibility to set up mode control groups.
- */
-int drm_mode_group_init_legacy_group(struct drm_device *dev,
- struct drm_mode_group *group)
-{
- struct drm_crtc *crtc;
- struct drm_encoder *encoder;
- struct drm_connector *connector;
- int ret;
-
- ret = drm_mode_group_init(dev, group);
- if (ret)
- return ret;
-
- list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
- group->id_list[group->num_crtcs++] = crtc->base.id;
-
- list_for_each_entry(encoder, &dev->mode_config.encoder_list, head)
- group->id_list[group->num_crtcs + group->num_encoders++] =
- encoder->base.id;
-
- list_for_each_entry(connector, &dev->mode_config.connector_list, head)
- group->id_list[group->num_crtcs + group->num_encoders +
- group->num_connectors++] = connector->base.id;
-
- return 0;
-}
-EXPORT_SYMBOL(drm_mode_group_init_legacy_group);
-
-void drm_reinit_primary_mode_group(struct drm_device *dev)
-{
- drm_modeset_lock_all(dev);
- drm_mode_group_destroy(&dev->primary->mode_group);
- drm_mode_group_init_legacy_group(dev, &dev->primary->mode_group);
- drm_modeset_unlock_all(dev);
-}
-EXPORT_SYMBOL(drm_reinit_primary_mode_group);
-
/**
* drm_mode_getresources - get graphics configuration
* @dev: drm device for the ioctl
@@ -1771,12 +1707,11 @@ int drm_mode_getresources(struct drm_device *dev, void *data,
int crtc_count = 0;
int fb_count = 0;
int encoder_count = 0;
- int copied = 0, i;
+ int copied = 0;
uint32_t __user *fb_id;
uint32_t __user *crtc_id;
uint32_t __user *connector_id;
uint32_t __user *encoder_id;
- struct drm_mode_group *mode_group;
if (!drm_core_check_feature(dev, DRIVER_MODESET))
return -EINVAL;
@@ -1809,24 +1744,14 @@ int drm_mode_getresources(struct drm_device *dev, void *data,
/* mode_config.mutex protects the connector list against e.g. DP MST
* connector hot-adding. CRTC/Plane lists are invariant. */
mutex_lock(&dev->mode_config.mutex);
- if (!drm_is_primary_client(file_priv)) {
-
- mode_group = NULL;
- list_for_each(lh, &dev->mode_config.crtc_list)
- crtc_count++;
+ drm_for_each_crtc(crtc, dev)
+ crtc_count++;
- list_for_each(lh, &dev->mode_config.connector_list)
- connector_count++;
+ drm_for_each_connector(connector, dev)
+ connector_count++;
- list_for_each(lh, &dev->mode_config.encoder_list)
- encoder_count++;
- } else {
-
- mode_group = &file_priv->master->minor->mode_group;
- crtc_count = mode_group->num_crtcs;
- connector_count = mode_group->num_connectors;
- encoder_count = mode_group->num_encoders;
- }
+ drm_for_each_encoder(encoder, dev)
+ encoder_count++;
card_res->max_height = dev->mode_config.max_height;
card_res->min_height = dev->mode_config.min_height;
@@ -1837,25 +1762,13 @@ int drm_mode_getresources(struct drm_device *dev, void *data,
if (card_res->count_crtcs >= crtc_count) {
copied = 0;
crtc_id = (uint32_t __user *)(unsigned long)card_res->crtc_id_ptr;
- if (!mode_group) {
- list_for_each_entry(crtc, &dev->mode_config.crtc_list,
- head) {
- DRM_DEBUG_KMS("[CRTC:%d]\n", crtc->base.id);
- if (put_user(crtc->base.id, crtc_id + copied)) {
- ret = -EFAULT;
- goto out;
- }
- copied++;
- }
- } else {
- for (i = 0; i < mode_group->num_crtcs; i++) {
- if (put_user(mode_group->id_list[i],
- crtc_id + copied)) {
- ret = -EFAULT;
- goto out;
- }
- copied++;
+ drm_for_each_crtc(crtc, dev) {
+ DRM_DEBUG_KMS("[CRTC:%d]\n", crtc->base.id);
+ if (put_user(crtc->base.id, crtc_id + copied)) {
+ ret = -EFAULT;
+ goto out;
}
+ copied++;
}
}
card_res->count_crtcs = crtc_count;
@@ -1864,29 +1777,15 @@ int drm_mode_getresources(struct drm_device *dev, void *data,
if (card_res->count_encoders >= encoder_count) {
copied = 0;
encoder_id = (uint32_t __user *)(unsigned long)card_res->encoder_id_ptr;
- if (!mode_group) {
- list_for_each_entry(encoder,
- &dev->mode_config.encoder_list,
- head) {
- DRM_DEBUG_KMS("[ENCODER:%d:%s]\n", encoder->base.id,
- encoder->name);
- if (put_user(encoder->base.id, encoder_id +
- copied)) {
- ret = -EFAULT;
- goto out;
- }
- copied++;
- }
- } else {
- for (i = mode_group->num_crtcs; i < mode_group->num_crtcs + mode_group->num_encoders; i++) {
- if (put_user(mode_group->id_list[i],
- encoder_id + copied)) {
- ret = -EFAULT;
- goto out;
- }
- copied++;
+ drm_for_each_encoder(encoder, dev) {
+ DRM_DEBUG_KMS("[ENCODER:%d:%s]\n", encoder->base.id,
+ encoder->name);
+ if (put_user(encoder->base.id, encoder_id +
+ copied)) {
+ ret = -EFAULT;
+ goto out;
}
-
+ copied++;
}
}
card_res->count_encoders = encoder_count;
@@ -1895,31 +1794,16 @@ int drm_mode_getresources(struct drm_device *dev, void *data,
if (card_res->count_connectors >= connector_count) {
copied = 0;
connector_id = (uint32_t __user *)(unsigned long)card_res->connector_id_ptr;
- if (!mode_group) {
- list_for_each_entry(connector,
- &dev->mode_config.connector_list,
- head) {
- DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
- connector->base.id,
- connector->name);
- if (put_user(connector->base.id,
- connector_id + copied)) {
- ret = -EFAULT;
- goto out;
- }
- copied++;
- }
- } else {
- int start = mode_group->num_crtcs +
- mode_group->num_encoders;
- for (i = start; i < start + mode_group->num_connectors; i++) {
- if (put_user(mode_group->id_list[i],
- connector_id + copied)) {
- ret = -EFAULT;
- goto out;
- }
- copied++;
+ drm_for_each_connector(connector, dev) {
+ DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
+ connector->base.id,
+ connector->name);
+ if (put_user(connector->base.id,
+ connector_id + copied)) {
+ ret = -EFAULT;
+ goto out;
}
+ copied++;
}
}
card_res->count_connectors = connector_count;
@@ -2187,7 +2071,7 @@ static struct drm_crtc *drm_encoder_get_crtc(struct drm_encoder *encoder)
/* For atomic drivers only state objects are synchronously updated and
* protected by modeset locks, so check those first. */
- list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ drm_for_each_connector(connector, dev) {
if (!connector->state)
continue;
@@ -2291,7 +2175,7 @@ int drm_mode_getplane_res(struct drm_device *dev, void *data,
plane_ptr = (uint32_t __user *)(unsigned long)plane_resp->plane_id_ptr;
/* Plane lists are invariant, no locking needed. */
- list_for_each_entry(plane, &config->plane_list, head) {
+ drm_for_each_plane(plane, dev) {
/*
* Unless userspace set the 'universal planes'
* capability bit, only advertise overlays.
@@ -2596,7 +2480,7 @@ int drm_mode_set_config_internal(struct drm_mode_set *set)
* connectors from it), hence we need to refcount the fbs across all
* crtcs. Atomic modeset will have saner semantics ...
*/
- list_for_each_entry(tmp, &crtc->dev->mode_config.crtc_list, head)
+ drm_for_each_crtc(tmp, crtc->dev)
tmp->primary->old_fb = tmp->primary->fb;
fb = set->fb;
@@ -2607,7 +2491,7 @@ int drm_mode_set_config_internal(struct drm_mode_set *set)
crtc->primary->fb = fb;
}
- list_for_each_entry(tmp, &crtc->dev->mode_config.crtc_list, head) {
+ drm_for_each_crtc(tmp, crtc->dev) {
if (tmp->primary->fb)
drm_framebuffer_reference(tmp->primary->fb);
if (tmp->primary->old_fb)
@@ -2706,8 +2590,11 @@ int drm_mode_setcrtc(struct drm_device *dev, void *data,
if (!drm_core_check_feature(dev, DRIVER_MODESET))
return -EINVAL;
- /* For some reason crtc x/y offsets are signed internally. */
- if (crtc_req->x > INT_MAX || crtc_req->y > INT_MAX)
+ /*
+ * Universal plane src offsets are only 16.16, prevent havoc for
+ * drivers using universal plane code internally.
+ */
+ if (crtc_req->x & 0xffff0000 || crtc_req->y & 0xffff0000)
return -ERANGE;
drm_modeset_lock_all(dev);
@@ -4298,7 +4185,6 @@ void drm_property_unreference_blob(struct drm_property_blob *blob)
mutex_unlock(&dev->mode_config.blob_lock);
else
might_lock(&dev->mode_config.blob_lock);
-
}
EXPORT_SYMBOL(drm_property_unreference_blob);
@@ -4469,9 +4355,7 @@ static int drm_property_replace_global_blob(struct drm_device *dev,
goto err_created;
}
- if (old_blob)
- drm_property_unreference_blob(old_blob);
-
+ drm_property_unreference_blob(old_blob);
*replace = new_blob;
return 0;
@@ -4869,9 +4753,9 @@ static int drm_mode_connector_set_obj_prop(struct drm_mode_object *obj,
/* Do DPMS ourselves */
if (property == connector->dev->mode_config.dpms_property) {
- if (connector->funcs->dpms)
- (*connector->funcs->dpms)(connector, (int)value);
ret = 0;
+ if (connector->funcs->dpms)
+ ret = (*connector->funcs->dpms)(connector, (int)value);
} else if (connector->funcs->set_property)
ret = connector->funcs->set_property(connector, property, value);
@@ -5346,13 +5230,7 @@ int drm_mode_page_flip_ioctl(struct drm_device *dev,
/* Keep the old fb, don't unref it. */
crtc->primary->old_fb = NULL;
} else {
- /*
- * Warn if the driver hasn't properly updated the crtc->fb
- * field to reflect that the new framebuffer is now used.
- * Failing to do so will screw with the reference counting
- * on framebuffers.
- */
- WARN_ON(crtc->primary->fb != fb);
+ crtc->primary->fb = fb;
/* Unref only the old framebuffer. */
fb = NULL;
}
@@ -5383,24 +5261,23 @@ void drm_mode_config_reset(struct drm_device *dev)
struct drm_encoder *encoder;
struct drm_connector *connector;
- list_for_each_entry(plane, &dev->mode_config.plane_list, head)
+ drm_for_each_plane(plane, dev)
if (plane->funcs->reset)
plane->funcs->reset(plane);
- list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
+ drm_for_each_crtc(crtc, dev)
if (crtc->funcs->reset)
crtc->funcs->reset(crtc);
- list_for_each_entry(encoder, &dev->mode_config.encoder_list, head)
+ drm_for_each_encoder(encoder, dev)
if (encoder->funcs->reset)
encoder->funcs->reset(encoder);
- list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
- connector->status = connector_status_unknown;
-
+ mutex_lock(&dev->mode_config.mutex);
+ drm_for_each_connector(connector, dev)
if (connector->funcs->reset)
connector->funcs->reset(connector);
- }
+ mutex_unlock(&dev->mode_config.mutex);
}
EXPORT_SYMBOL(drm_mode_config_reset);
diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c
index 393114df88a3..ef534758a02c 100644
--- a/drivers/gpu/drm/drm_crtc_helper.c
+++ b/drivers/gpu/drm/drm_crtc_helper.c
@@ -121,7 +121,7 @@ bool drm_helper_encoder_in_use(struct drm_encoder *encoder)
WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
}
- list_for_each_entry(connector, &dev->mode_config.connector_list, head)
+ drm_for_each_connector(connector, dev)
if (connector->encoder == encoder)
return true;
return false;
@@ -151,7 +151,7 @@ bool drm_helper_crtc_in_use(struct drm_crtc *crtc)
if (!oops_in_progress)
WARN_ON(!mutex_is_locked(&dev->mode_config.mutex));
- list_for_each_entry(encoder, &dev->mode_config.encoder_list, head)
+ drm_for_each_encoder(encoder, dev)
if (encoder->crtc == crtc && drm_helper_encoder_in_use(encoder))
return true;
return false;
@@ -180,7 +180,7 @@ static void __drm_helper_disable_unused_functions(struct drm_device *dev)
drm_warn_on_modeset_not_all_locked(dev);
- list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+ drm_for_each_encoder(encoder, dev) {
if (!drm_helper_encoder_in_use(encoder)) {
drm_encoder_disable(encoder);
/* disconnect encoder from any connector */
@@ -188,7 +188,7 @@ static void __drm_helper_disable_unused_functions(struct drm_device *dev)
}
}
- list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+ drm_for_each_crtc(crtc, dev) {
const struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
crtc->enabled = drm_helper_crtc_in_use(crtc);
if (!crtc->enabled) {
@@ -230,7 +230,7 @@ drm_crtc_prepare_encoders(struct drm_device *dev)
const struct drm_encoder_helper_funcs *encoder_funcs;
struct drm_encoder *encoder;
- list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+ drm_for_each_encoder(encoder, dev) {
encoder_funcs = encoder->helper_private;
/* Disable unused encoders */
if (encoder->crtc == NULL)
@@ -305,7 +305,7 @@ bool drm_crtc_helper_set_mode(struct drm_crtc *crtc,
* adjust it according to limitations or connector properties, and also
* a chance to reject the mode entirely.
*/
- list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+ drm_for_each_encoder(encoder, dev) {
if (encoder->crtc != crtc)
continue;
@@ -334,7 +334,7 @@ bool drm_crtc_helper_set_mode(struct drm_crtc *crtc,
crtc->hwmode = *adjusted_mode;
/* Prepare the encoders and CRTCs before setting the mode. */
- list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+ drm_for_each_encoder(encoder, dev) {
if (encoder->crtc != crtc)
continue;
@@ -359,7 +359,7 @@ bool drm_crtc_helper_set_mode(struct drm_crtc *crtc,
if (!ret)
goto done;
- list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+ drm_for_each_encoder(encoder, dev) {
if (encoder->crtc != crtc)
continue;
@@ -376,7 +376,7 @@ bool drm_crtc_helper_set_mode(struct drm_crtc *crtc,
/* Now enable the clocks, plane, pipe, and connectors that we set up. */
crtc_funcs->commit(crtc);
- list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+ drm_for_each_encoder(encoder, dev) {
if (encoder->crtc != crtc)
continue;
@@ -418,11 +418,11 @@ drm_crtc_helper_disable(struct drm_crtc *crtc)
struct drm_encoder *encoder;
/* Decouple all encoders and their attached connectors from this crtc */
- list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+ drm_for_each_encoder(encoder, dev) {
if (encoder->crtc != crtc)
continue;
- list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ drm_for_each_connector(connector, dev) {
if (connector->encoder != encoder)
continue;
@@ -519,12 +519,12 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set)
* restored, not the drivers personal bookkeeping.
*/
count = 0;
- list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+ drm_for_each_encoder(encoder, dev) {
save_encoders[count++] = *encoder;
}
count = 0;
- list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ drm_for_each_connector(connector, dev) {
save_connectors[count++] = *connector;
}
@@ -562,7 +562,7 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set)
/* a) traverse passed in connector list and get encoders for them */
count = 0;
- list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ drm_for_each_connector(connector, dev) {
const struct drm_connector_helper_funcs *connector_funcs =
connector->helper_private;
new_encoder = connector->encoder;
@@ -602,7 +602,7 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set)
}
count = 0;
- list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ drm_for_each_connector(connector, dev) {
if (!connector->encoder)
continue;
@@ -685,12 +685,12 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set)
fail:
/* Restore all previous data. */
count = 0;
- list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+ drm_for_each_encoder(encoder, dev) {
*encoder = save_encoders[count++];
}
count = 0;
- list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ drm_for_each_connector(connector, dev) {
*connector = save_connectors[count++];
}
@@ -712,7 +712,7 @@ static int drm_helper_choose_encoder_dpms(struct drm_encoder *encoder)
struct drm_connector *connector;
struct drm_device *dev = encoder->dev;
- list_for_each_entry(connector, &dev->mode_config.connector_list, head)
+ drm_for_each_connector(connector, dev)
if (connector->encoder == encoder)
if (connector->dpms < dpms)
dpms = connector->dpms;
@@ -746,7 +746,7 @@ static int drm_helper_choose_crtc_dpms(struct drm_crtc *crtc)
struct drm_connector *connector;
struct drm_device *dev = crtc->dev;
- list_for_each_entry(connector, &dev->mode_config.connector_list, head)
+ drm_for_each_connector(connector, dev)
if (connector->encoder && connector->encoder->crtc == crtc)
if (connector->dpms < dpms)
dpms = connector->dpms;
@@ -762,15 +762,18 @@ static int drm_helper_choose_crtc_dpms(struct drm_crtc *crtc)
* implementing the DPMS connector attribute. It computes the new desired DPMS
* state for all encoders and crtcs in the output mesh and calls the ->dpms()
* callback provided by the driver appropriately.
+ *
+ * Returns:
+ * Always returns 0.
*/
-void drm_helper_connector_dpms(struct drm_connector *connector, int mode)
+int drm_helper_connector_dpms(struct drm_connector *connector, int mode)
{
struct drm_encoder *encoder = connector->encoder;
struct drm_crtc *crtc = encoder ? encoder->crtc : NULL;
int old_dpms, encoder_dpms = DRM_MODE_DPMS_OFF;
if (mode == connector->dpms)
- return;
+ return 0;
old_dpms = connector->dpms;
connector->dpms = mode;
@@ -802,7 +805,7 @@ void drm_helper_connector_dpms(struct drm_connector *connector, int mode)
}
}
- return;
+ return 0;
}
EXPORT_SYMBOL(drm_helper_connector_dpms);
@@ -862,7 +865,7 @@ void drm_helper_resume_force_mode(struct drm_device *dev)
bool ret;
drm_modeset_lock_all(dev);
- list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+ drm_for_each_crtc(crtc, dev) {
if (!crtc->enabled)
continue;
@@ -876,7 +879,7 @@ void drm_helper_resume_force_mode(struct drm_device *dev)
/* Turn off outputs that were already powered off */
if (drm_helper_choose_crtc_dpms(crtc)) {
- list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+ drm_for_each_encoder(encoder, dev) {
if(encoder->crtc != crtc)
continue;
@@ -928,15 +931,15 @@ int drm_helper_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *mod
if (crtc->funcs->atomic_duplicate_state)
crtc_state = crtc->funcs->atomic_duplicate_state(crtc);
else {
- crtc_state = kzalloc(sizeof(*crtc_state), GFP_KERNEL);
- if (!crtc_state)
- return -ENOMEM;
- if (crtc->state)
- __drm_atomic_helper_crtc_duplicate_state(crtc, crtc_state);
- else
- crtc_state->crtc = crtc;
+ if (!crtc->state)
+ drm_atomic_helper_crtc_reset(crtc);
+
+ crtc_state = drm_atomic_helper_crtc_duplicate_state(crtc);
}
+ if (!crtc_state)
+ return -ENOMEM;
+
crtc_state->planes_changed = true;
crtc_state->mode_changed = true;
ret = drm_atomic_set_mode_for_crtc(crtc_state, mode);
@@ -957,11 +960,11 @@ int drm_helper_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *mod
ret = drm_helper_crtc_mode_set_base(crtc, x, y, old_fb);
out:
- if (crtc->funcs->atomic_destroy_state)
- crtc->funcs->atomic_destroy_state(crtc, crtc_state);
- else {
- __drm_atomic_helper_crtc_destroy_state(crtc, crtc_state);
- kfree(crtc_state);
+ if (crtc_state) {
+ if (crtc->funcs->atomic_destroy_state)
+ crtc->funcs->atomic_destroy_state(crtc, crtc_state);
+ else
+ drm_atomic_helper_crtc_destroy_state(crtc, crtc_state);
}
return ret;
diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c
index 778bbb6425b8..b0487c9f018c 100644
--- a/drivers/gpu/drm/drm_dp_mst_topology.c
+++ b/drivers/gpu/drm/drm_dp_mst_topology.c
@@ -1294,7 +1294,6 @@ retry:
goto retry;
}
DRM_DEBUG_KMS("failed to dpcd write %d %d\n", tosend, ret);
- WARN(1, "fail\n");
return -EIO;
}
diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
index b7bf4ce8c012..53d09a19f7e1 100644
--- a/drivers/gpu/drm/drm_drv.c
+++ b/drivers/gpu/drm/drm_drv.c
@@ -285,7 +285,6 @@ static void drm_minor_free(struct drm_device *dev, unsigned int type)
if (!minor)
return;
- drm_mode_group_destroy(&minor->mode_group);
put_device(minor->kdev);
spin_lock_irqsave(&drm_minor_lock, flags);
@@ -582,11 +581,7 @@ struct drm_device *drm_dev_alloc(struct drm_driver *driver,
if (drm_ht_create(&dev->map_hash, 12))
goto err_minors;
- ret = drm_legacy_ctxbitmap_init(dev);
- if (ret) {
- DRM_ERROR("Cannot allocate memory for context bitmap.\n");
- goto err_ht;
- }
+ drm_legacy_ctxbitmap_init(dev);
if (drm_core_check_feature(dev, DRIVER_GEM)) {
ret = drm_gem_init(dev);
@@ -600,7 +595,6 @@ struct drm_device *drm_dev_alloc(struct drm_driver *driver,
err_ctxbitmap:
drm_legacy_ctxbitmap_cleanup(dev);
-err_ht:
drm_ht_remove(&dev->map_hash);
err_minors:
drm_minor_free(dev, DRM_MINOR_LEGACY);
@@ -705,20 +699,9 @@ int drm_dev_register(struct drm_device *dev, unsigned long flags)
goto err_minors;
}
- /* setup grouping for legacy outputs */
- if (drm_core_check_feature(dev, DRIVER_MODESET)) {
- ret = drm_mode_group_init_legacy_group(dev,
- &dev->primary->mode_group);
- if (ret)
- goto err_unload;
- }
-
ret = 0;
goto out_unlock;
-err_unload:
- if (dev->driver->unload)
- dev->driver->unload(dev);
err_minors:
drm_minor_unregister(dev, DRM_MINOR_LEGACY);
drm_minor_unregister(dev, DRM_MINOR_RENDER);
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index 7087da37dae0..05bb7311ac5d 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -3413,7 +3413,7 @@ struct drm_connector *drm_select_eld(struct drm_encoder *encoder,
WARN_ON(!mutex_is_locked(&dev->mode_config.mutex));
WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
- list_for_each_entry(connector, &dev->mode_config.connector_list, head)
+ drm_for_each_connector(connector, dev)
if (connector->encoder == encoder && connector->eld[0])
return connector;
@@ -3802,7 +3802,7 @@ int drm_add_modes_noedid(struct drm_connector *connector,
struct drm_display_mode *mode;
struct drm_device *dev = connector->dev;
- count = sizeof(drm_dmt_modes) / sizeof(struct drm_display_mode);
+ count = ARRAY_SIZE(drm_dmt_modes);
if (hdisplay < 0)
hdisplay = 0;
if (vdisplay < 0)
diff --git a/drivers/gpu/drm/drm_fb_cma_helper.c b/drivers/gpu/drm/drm_fb_cma_helper.c
index 5c1aca443e54..c19a62561183 100644
--- a/drivers/gpu/drm/drm_fb_cma_helper.c
+++ b/drivers/gpu/drm/drm_fb_cma_helper.c
@@ -209,23 +209,11 @@ int drm_fb_cma_debugfs_show(struct seq_file *m, void *arg)
struct drm_info_node *node = (struct drm_info_node *) m->private;
struct drm_device *dev = node->minor->dev;
struct drm_framebuffer *fb;
- int ret;
-
- ret = mutex_lock_interruptible(&dev->mode_config.mutex);
- if (ret)
- return ret;
- ret = mutex_lock_interruptible(&dev->struct_mutex);
- if (ret) {
- mutex_unlock(&dev->mode_config.mutex);
- return ret;
- }
-
- list_for_each_entry(fb, &dev->mode_config.fb_list, head)
+ mutex_lock(&dev->mode_config.fb_lock);
+ drm_for_each_fb(fb, dev)
drm_fb_cma_describe(fb, m);
-
- mutex_unlock(&dev->struct_mutex);
- mutex_unlock(&dev->mode_config.mutex);
+ mutex_unlock(&dev->mode_config.fb_lock);
return 0;
}
@@ -234,9 +222,9 @@ EXPORT_SYMBOL_GPL(drm_fb_cma_debugfs_show);
static struct fb_ops drm_fbdev_cma_ops = {
.owner = THIS_MODULE,
- .fb_fillrect = sys_fillrect,
- .fb_copyarea = sys_copyarea,
- .fb_imageblit = sys_imageblit,
+ .fb_fillrect = drm_fb_helper_sys_fillrect,
+ .fb_copyarea = drm_fb_helper_sys_copyarea,
+ .fb_imageblit = drm_fb_helper_sys_imageblit,
.fb_check_var = drm_fb_helper_check_var,
.fb_set_par = drm_fb_helper_set_par,
.fb_blank = drm_fb_helper_blank,
@@ -275,10 +263,9 @@ static int drm_fbdev_cma_create(struct drm_fb_helper *helper,
if (IS_ERR(obj))
return -ENOMEM;
- fbi = framebuffer_alloc(0, dev->dev);
- if (!fbi) {
- dev_err(dev->dev, "Failed to allocate framebuffer info.\n");
- ret = -ENOMEM;
+ fbi = drm_fb_helper_alloc_fbi(helper);
+ if (IS_ERR(fbi)) {
+ ret = PTR_ERR(fbi);
goto err_drm_gem_cma_free_object;
}
@@ -286,23 +273,16 @@ static int drm_fbdev_cma_create(struct drm_fb_helper *helper,
if (IS_ERR(fbdev_cma->fb)) {
dev_err(dev->dev, "Failed to allocate DRM framebuffer.\n");
ret = PTR_ERR(fbdev_cma->fb);
- goto err_framebuffer_release;
+ goto err_fb_info_destroy;
}
fb = &fbdev_cma->fb->fb;
helper->fb = fb;
- helper->fbdev = fbi;
fbi->par = helper;
fbi->flags = FBINFO_FLAG_DEFAULT;
fbi->fbops = &drm_fbdev_cma_ops;
- ret = fb_alloc_cmap(&fbi->cmap, 256, 0);
- if (ret) {
- dev_err(dev->dev, "Failed to allocate color map.\n");
- goto err_drm_fb_cma_destroy;
- }
-
drm_fb_helper_fill_fix(fbi, fb->pitches[0], fb->depth);
drm_fb_helper_fill_var(fbi, helper, sizes->fb_width, sizes->fb_height);
@@ -317,11 +297,8 @@ static int drm_fbdev_cma_create(struct drm_fb_helper *helper,
return 0;
-err_drm_fb_cma_destroy:
- drm_framebuffer_unregister_private(fb);
- drm_fb_cma_destroy(fb);
-err_framebuffer_release:
- framebuffer_release(fbi);
+err_fb_info_destroy:
+ drm_fb_helper_release_fbi(helper);
err_drm_gem_cma_free_object:
drm_gem_cma_free_object(&obj->base);
return ret;
@@ -397,20 +374,8 @@ EXPORT_SYMBOL_GPL(drm_fbdev_cma_init);
*/
void drm_fbdev_cma_fini(struct drm_fbdev_cma *fbdev_cma)
{
- if (fbdev_cma->fb_helper.fbdev) {
- struct fb_info *info;
- int ret;
-
- info = fbdev_cma->fb_helper.fbdev;
- ret = unregister_framebuffer(info);
- if (ret < 0)
- DRM_DEBUG_KMS("failed unregister_framebuffer()\n");
-
- if (info->cmap.len)
- fb_dealloc_cmap(&info->cmap);
-
- framebuffer_release(info);
- }
+ drm_fb_helper_unregister_fbi(&fbdev_cma->fb_helper);
+ drm_fb_helper_release_fbi(&fbdev_cma->fb_helper);
if (fbdev_cma->fb) {
drm_framebuffer_unregister_private(&fbdev_cma->fb->fb);
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
index cac422916c7a..418d299f3b12 100644
--- a/drivers/gpu/drm/drm_fb_helper.c
+++ b/drivers/gpu/drm/drm_fb_helper.c
@@ -56,8 +56,8 @@ static LIST_HEAD(kernel_fb_helper_list);
* Teardown is done with drm_fb_helper_fini().
*
* At runtime drivers should restore the fbdev console by calling
- * drm_fb_helper_restore_fbdev_mode() from their ->lastclose callback. They
- * should also notify the fb helper code from updates to the output
+ * drm_fb_helper_restore_fbdev_mode_unlocked() from their ->lastclose callback.
+ * They should also notify the fb helper code from updates to the output
* configuration by calling drm_fb_helper_hotplug_event(). For easier
* integration with the output polling code in drm_crtc_helper.c the modeset
* code provides a ->output_poll_changed callback.
@@ -89,8 +89,9 @@ static LIST_HEAD(kernel_fb_helper_list);
* connectors to the fbdev, e.g. if some are reserved for special purposes or
* not adequate to be used for the fbcon.
*
- * Since this is part of the initial setup before the fbdev is published, no
- * locking is required.
+ * This function is protected against concurrent connector hotadds/removals
+ * using drm_fb_helper_add_one_connector() and
+ * drm_fb_helper_remove_one_connector().
*/
int drm_fb_helper_single_add_all_connectors(struct drm_fb_helper *fb_helper)
{
@@ -98,7 +99,8 @@ int drm_fb_helper_single_add_all_connectors(struct drm_fb_helper *fb_helper)
struct drm_connector *connector;
int i;
- list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ mutex_lock(&dev->mode_config.mutex);
+ drm_for_each_connector(connector, dev) {
struct drm_fb_helper_connector *fb_helper_connector;
fb_helper_connector = kzalloc(sizeof(struct drm_fb_helper_connector), GFP_KERNEL);
@@ -108,6 +110,7 @@ int drm_fb_helper_single_add_all_connectors(struct drm_fb_helper *fb_helper)
fb_helper_connector->connector = connector;
fb_helper->connector_info[fb_helper->connector_count++] = fb_helper_connector;
}
+ mutex_unlock(&dev->mode_config.mutex);
return 0;
fail:
for (i = 0; i < fb_helper->connector_count; i++) {
@@ -115,6 +118,8 @@ fail:
fb_helper->connector_info[i] = NULL;
}
fb_helper->connector_count = 0;
+ mutex_unlock(&dev->mode_config.mutex);
+
return -ENOMEM;
}
EXPORT_SYMBOL(drm_fb_helper_single_add_all_connectors);
@@ -163,11 +168,14 @@ static void remove_from_modeset(struct drm_mode_set *set,
}
set->num_connectors--;
- /* because i915 is pissy about this..
+ /*
* TODO maybe need to makes sure we set it back to !=NULL somewhere?
*/
- if (set->num_connectors == 0)
+ if (set->num_connectors == 0) {
set->fb = NULL;
+ drm_mode_destroy(connector->dev, set->mode);
+ set->mode = NULL;
+ }
}
int drm_fb_helper_remove_one_connector(struct drm_fb_helper *fb_helper,
@@ -269,7 +277,7 @@ static struct drm_framebuffer *drm_mode_config_fb(struct drm_crtc *crtc)
struct drm_device *dev = crtc->dev;
struct drm_crtc *c;
- list_for_each_entry(c, &dev->mode_config.crtc_list, head) {
+ drm_for_each_crtc(c, dev) {
if (crtc->base.id == c->base.id)
return c->primary->fb;
}
@@ -321,7 +329,7 @@ static bool restore_fbdev_mode(struct drm_fb_helper *fb_helper)
drm_warn_on_modeset_not_all_locked(dev);
- list_for_each_entry(plane, &dev->mode_config.plane_list, head) {
+ drm_for_each_plane(plane, dev) {
if (plane->type != DRM_PLANE_TYPE_PRIMARY)
drm_plane_force_disable(plane);
@@ -349,21 +357,6 @@ static bool restore_fbdev_mode(struct drm_fb_helper *fb_helper)
}
return error;
}
-/**
- * drm_fb_helper_restore_fbdev_mode - restore fbdev configuration
- * @fb_helper: fbcon to restore
- *
- * This should be called from driver's drm ->lastclose callback
- * when implementing an fbcon on top of kms using this helper. This ensures that
- * the user isn't greeted with a black screen when e.g. X dies.
- *
- * Use this variant if you need to bypass locking (panic), or already
- * hold all modeset locks. Otherwise use drm_fb_helper_restore_fbdev_mode_unlocked()
- */
-static bool drm_fb_helper_restore_fbdev_mode(struct drm_fb_helper *fb_helper)
-{
- return restore_fbdev_mode(fb_helper);
-}
/**
* drm_fb_helper_restore_fbdev_mode_unlocked - restore fbdev configuration
@@ -393,6 +386,31 @@ bool drm_fb_helper_restore_fbdev_mode_unlocked(struct drm_fb_helper *fb_helper)
}
EXPORT_SYMBOL(drm_fb_helper_restore_fbdev_mode_unlocked);
+static bool drm_fb_helper_is_bound(struct drm_fb_helper *fb_helper)
+{
+ struct drm_device *dev = fb_helper->dev;
+ struct drm_crtc *crtc;
+ int bound = 0, crtcs_bound = 0;
+
+ /* Sometimes user space wants everything disabled, so don't steal the
+ * display if there's a master. */
+ if (dev->primary->master)
+ return false;
+
+ drm_for_each_crtc(crtc, dev) {
+ if (crtc->primary->fb)
+ crtcs_bound++;
+ if (crtc->primary->fb == fb_helper->fb)
+ bound++;
+ }
+
+ if (bound < crtcs_bound)
+ return false;
+
+ return true;
+}
+
+#ifdef CONFIG_MAGIC_SYSRQ
/*
* restore fbcon display for all kms driver's using this helper, used for sysrq
* and panic handling.
@@ -411,67 +429,15 @@ static bool drm_fb_helper_force_kernel_mode(void)
if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
continue;
- /*
- * NOTE: Use trylock mode to avoid deadlocks and sleeping in
- * panic context.
- */
- if (__drm_modeset_lock_all(dev, true) != 0) {
- error = true;
- continue;
- }
-
- ret = drm_fb_helper_restore_fbdev_mode(helper);
+ drm_modeset_lock_all(dev);
+ ret = restore_fbdev_mode(helper);
if (ret)
error = true;
-
drm_modeset_unlock_all(dev);
}
return error;
}
-static int drm_fb_helper_panic(struct notifier_block *n, unsigned long ununsed,
- void *panic_str)
-{
- /*
- * It's a waste of time and effort to switch back to text console
- * if the kernel should reboot before panic messages can be seen.
- */
- if (panic_timeout < 0)
- return 0;
-
- pr_err("panic occurred, switching back to text console\n");
- return drm_fb_helper_force_kernel_mode();
-}
-
-static struct notifier_block paniced = {
- .notifier_call = drm_fb_helper_panic,
-};
-
-static bool drm_fb_helper_is_bound(struct drm_fb_helper *fb_helper)
-{
- struct drm_device *dev = fb_helper->dev;
- struct drm_crtc *crtc;
- int bound = 0, crtcs_bound = 0;
-
- /* Sometimes user space wants everything disabled, so don't steal the
- * display if there's a master. */
- if (dev->primary->master)
- return false;
-
- list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
- if (crtc->primary->fb)
- crtcs_bound++;
- if (crtc->primary->fb == fb_helper->fb)
- bound++;
- }
-
- if (bound < crtcs_bound)
- return false;
-
- return true;
-}
-
-#ifdef CONFIG_MAGIC_SYSRQ
static void drm_fb_helper_restore_work_fn(struct work_struct *ignored)
{
bool ret;
@@ -504,14 +470,6 @@ static void drm_fb_helper_dpms(struct fb_info *info, int dpms_mode)
int i, j;
/*
- * fbdev->blank can be called from irq context in case of a panic.
- * Since we already have our own special panic handler which will
- * restore the fbdev console mode completely, just bail out early.
- */
- if (oops_in_progress)
- return;
-
- /*
* For each CRTC in this fb, turn the connectors on/off.
*/
drm_modeset_lock_all(dev);
@@ -544,6 +502,9 @@ static void drm_fb_helper_dpms(struct fb_info *info, int dpms_mode)
*/
int drm_fb_helper_blank(int blank, struct fb_info *info)
{
+ if (oops_in_progress)
+ return -EBUSY;
+
switch (blank) {
/* Display: On; HSync: On, VSync: On */
case FB_BLANK_UNBLANK:
@@ -655,7 +616,7 @@ int drm_fb_helper_init(struct drm_device *dev,
}
i = 0;
- list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+ drm_for_each_crtc(crtc, dev) {
fb_helper->crtc_info[i].mode_set.crtc = crtc;
i++;
}
@@ -667,14 +628,91 @@ out_free:
}
EXPORT_SYMBOL(drm_fb_helper_init);
+/**
+ * drm_fb_helper_alloc_fbi - allocate fb_info and some of its members
+ * @fb_helper: driver-allocated fbdev helper
+ *
+ * A helper to alloc fb_info and the members cmap and apertures. Called
+ * by the driver within the fb_probe fb_helper callback function.
+ *
+ * RETURNS:
+ * fb_info pointer if things went okay, pointer containing error code
+ * otherwise
+ */
+struct fb_info *drm_fb_helper_alloc_fbi(struct drm_fb_helper *fb_helper)
+{
+ struct device *dev = fb_helper->dev->dev;
+ struct fb_info *info;
+ int ret;
+
+ info = framebuffer_alloc(0, dev);
+ if (!info)
+ return ERR_PTR(-ENOMEM);
+
+ ret = fb_alloc_cmap(&info->cmap, 256, 0);
+ if (ret)
+ goto err_release;
+
+ info->apertures = alloc_apertures(1);
+ if (!info->apertures) {
+ ret = -ENOMEM;
+ goto err_free_cmap;
+ }
+
+ fb_helper->fbdev = info;
+
+ return info;
+
+err_free_cmap:
+ fb_dealloc_cmap(&info->cmap);
+err_release:
+ framebuffer_release(info);
+ return ERR_PTR(ret);
+}
+EXPORT_SYMBOL(drm_fb_helper_alloc_fbi);
+
+/**
+ * drm_fb_helper_unregister_fbi - unregister fb_info framebuffer device
+ * @fb_helper: driver-allocated fbdev helper
+ *
+ * A wrapper around unregister_framebuffer, to release the fb_info
+ * framebuffer device
+ */
+void drm_fb_helper_unregister_fbi(struct drm_fb_helper *fb_helper)
+{
+ if (fb_helper && fb_helper->fbdev)
+ unregister_framebuffer(fb_helper->fbdev);
+}
+EXPORT_SYMBOL(drm_fb_helper_unregister_fbi);
+
+/**
+ * drm_fb_helper_release_fbi - dealloc fb_info and its members
+ * @fb_helper: driver-allocated fbdev helper
+ *
+ * A helper to free memory taken by fb_info and the members cmap and
+ * apertures
+ */
+void drm_fb_helper_release_fbi(struct drm_fb_helper *fb_helper)
+{
+ if (fb_helper) {
+ struct fb_info *info = fb_helper->fbdev;
+
+ if (info) {
+ if (info->cmap.len)
+ fb_dealloc_cmap(&info->cmap);
+ framebuffer_release(info);
+ }
+
+ fb_helper->fbdev = NULL;
+ }
+}
+EXPORT_SYMBOL(drm_fb_helper_release_fbi);
+
void drm_fb_helper_fini(struct drm_fb_helper *fb_helper)
{
if (!list_empty(&fb_helper->kernel_fb_list)) {
list_del(&fb_helper->kernel_fb_list);
if (list_empty(&kernel_fb_helper_list)) {
- pr_info("drm: unregistered panic notifier\n");
- atomic_notifier_chain_unregister(&panic_notifier_list,
- &paniced);
unregister_sysrq_key('v', &sysrq_drm_fb_helper_restore_op);
}
}
@@ -684,6 +722,149 @@ void drm_fb_helper_fini(struct drm_fb_helper *fb_helper)
}
EXPORT_SYMBOL(drm_fb_helper_fini);
+/**
+ * drm_fb_helper_unlink_fbi - wrapper around unlink_framebuffer
+ * @fb_helper: driver-allocated fbdev helper
+ *
+ * A wrapper around unlink_framebuffer implemented by fbdev core
+ */
+void drm_fb_helper_unlink_fbi(struct drm_fb_helper *fb_helper)
+{
+ if (fb_helper && fb_helper->fbdev)
+ unlink_framebuffer(fb_helper->fbdev);
+}
+EXPORT_SYMBOL(drm_fb_helper_unlink_fbi);
+
+/**
+ * drm_fb_helper_sys_read - wrapper around fb_sys_read
+ * @info: fb_info struct pointer
+ * @buf: userspace buffer to read from framebuffer memory
+ * @count: number of bytes to read from framebuffer memory
+ * @ppos: read offset within framebuffer memory
+ *
+ * A wrapper around fb_sys_read implemented by fbdev core
+ */
+ssize_t drm_fb_helper_sys_read(struct fb_info *info, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ return fb_sys_read(info, buf, count, ppos);
+}
+EXPORT_SYMBOL(drm_fb_helper_sys_read);
+
+/**
+ * drm_fb_helper_sys_write - wrapper around fb_sys_write
+ * @info: fb_info struct pointer
+ * @buf: userspace buffer to write to framebuffer memory
+ * @count: number of bytes to write to framebuffer memory
+ * @ppos: write offset within framebuffer memory
+ *
+ * A wrapper around fb_sys_write implemented by fbdev core
+ */
+ssize_t drm_fb_helper_sys_write(struct fb_info *info, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ return fb_sys_write(info, buf, count, ppos);
+}
+EXPORT_SYMBOL(drm_fb_helper_sys_write);
+
+/**
+ * drm_fb_helper_sys_fillrect - wrapper around sys_fillrect
+ * @info: fbdev registered by the helper
+ * @rect: info about rectangle to fill
+ *
+ * A wrapper around sys_fillrect implemented by fbdev core
+ */
+void drm_fb_helper_sys_fillrect(struct fb_info *info,
+ const struct fb_fillrect *rect)
+{
+ sys_fillrect(info, rect);
+}
+EXPORT_SYMBOL(drm_fb_helper_sys_fillrect);
+
+/**
+ * drm_fb_helper_sys_copyarea - wrapper around sys_copyarea
+ * @info: fbdev registered by the helper
+ * @area: info about area to copy
+ *
+ * A wrapper around sys_copyarea implemented by fbdev core
+ */
+void drm_fb_helper_sys_copyarea(struct fb_info *info,
+ const struct fb_copyarea *area)
+{
+ sys_copyarea(info, area);
+}
+EXPORT_SYMBOL(drm_fb_helper_sys_copyarea);
+
+/**
+ * drm_fb_helper_sys_imageblit - wrapper around sys_imageblit
+ * @info: fbdev registered by the helper
+ * @image: info about image to blit
+ *
+ * A wrapper around sys_imageblit implemented by fbdev core
+ */
+void drm_fb_helper_sys_imageblit(struct fb_info *info,
+ const struct fb_image *image)
+{
+ sys_imageblit(info, image);
+}
+EXPORT_SYMBOL(drm_fb_helper_sys_imageblit);
+
+/**
+ * drm_fb_helper_cfb_fillrect - wrapper around cfb_fillrect
+ * @info: fbdev registered by the helper
+ * @rect: info about rectangle to fill
+ *
+ * A wrapper around cfb_imageblit implemented by fbdev core
+ */
+void drm_fb_helper_cfb_fillrect(struct fb_info *info,
+ const struct fb_fillrect *rect)
+{
+ cfb_fillrect(info, rect);
+}
+EXPORT_SYMBOL(drm_fb_helper_cfb_fillrect);
+
+/**
+ * drm_fb_helper_cfb_copyarea - wrapper around cfb_copyarea
+ * @info: fbdev registered by the helper
+ * @area: info about area to copy
+ *
+ * A wrapper around cfb_copyarea implemented by fbdev core
+ */
+void drm_fb_helper_cfb_copyarea(struct fb_info *info,
+ const struct fb_copyarea *area)
+{
+ cfb_copyarea(info, area);
+}
+EXPORT_SYMBOL(drm_fb_helper_cfb_copyarea);
+
+/**
+ * drm_fb_helper_cfb_imageblit - wrapper around cfb_imageblit
+ * @info: fbdev registered by the helper
+ * @image: info about image to blit
+ *
+ * A wrapper around cfb_imageblit implemented by fbdev core
+ */
+void drm_fb_helper_cfb_imageblit(struct fb_info *info,
+ const struct fb_image *image)
+{
+ cfb_imageblit(info, image);
+}
+EXPORT_SYMBOL(drm_fb_helper_cfb_imageblit);
+
+/**
+ * drm_fb_helper_set_suspend - wrapper around fb_set_suspend
+ * @fb_helper: driver-allocated fbdev helper
+ * @state: desired state, zero to resume, non-zero to suspend
+ *
+ * A wrapper around fb_set_suspend implemented by fbdev core
+ */
+void drm_fb_helper_set_suspend(struct drm_fb_helper *fb_helper, int state)
+{
+ if (fb_helper && fb_helper->fbdev)
+ fb_set_suspend(fb_helper->fbdev, state);
+}
+EXPORT_SYMBOL(drm_fb_helper_set_suspend);
+
static int setcolreg(struct drm_crtc *crtc, u16 red, u16 green,
u16 blue, u16 regno, struct fb_info *info)
{
@@ -771,9 +952,10 @@ int drm_fb_helper_setcmap(struct fb_cmap *cmap, struct fb_info *info)
int i, j, rc = 0;
int start;
- if (__drm_modeset_lock_all(dev, !!oops_in_progress)) {
+ if (oops_in_progress)
return -EBUSY;
- }
+
+ drm_modeset_lock_all(dev);
if (!drm_fb_helper_is_bound(fb_helper)) {
drm_modeset_unlock_all(dev);
return -EBUSY;
@@ -922,6 +1104,9 @@ int drm_fb_helper_set_par(struct fb_info *info)
struct drm_fb_helper *fb_helper = info->par;
struct fb_var_screeninfo *var = &info->var;
+ if (oops_in_progress)
+ return -EBUSY;
+
if (var->pixclock != 0) {
DRM_ERROR("PIXEL CLOCK SET\n");
return -EINVAL;
@@ -947,9 +1132,10 @@ int drm_fb_helper_pan_display(struct fb_var_screeninfo *var,
int ret = 0;
int i;
- if (__drm_modeset_lock_all(dev, !!oops_in_progress)) {
+ if (oops_in_progress)
return -EBUSY;
- }
+
+ drm_modeset_lock_all(dev);
if (!drm_fb_helper_is_bound(fb_helper)) {
drm_modeset_unlock_all(dev);
return -EBUSY;
@@ -1109,12 +1295,7 @@ static int drm_fb_helper_single_fb_probe(struct drm_fb_helper *fb_helper,
dev_info(fb_helper->dev->dev, "fb%d: %s frame buffer device\n",
info->node, info->fix.id);
- /* Switch back to kernel console on panic */
- /* multi card linked list maybe */
if (list_empty(&kernel_fb_helper_list)) {
- dev_info(fb_helper->dev->dev, "registered panic notifier\n");
- atomic_notifier_chain_register(&panic_notifier_list,
- &paniced);
register_sysrq_key('v', &sysrq_drm_fb_helper_restore_op);
}
diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
index 16a164770713..3c2d4abd71c5 100644
--- a/drivers/gpu/drm/drm_gem.c
+++ b/drivers/gpu/drm/drm_gem.c
@@ -766,7 +766,7 @@ drm_gem_object_free(struct kref *kref)
struct drm_gem_object *obj = (struct drm_gem_object *) kref;
struct drm_device *dev = obj->dev;
- BUG_ON(!mutex_is_locked(&dev->struct_mutex));
+ WARN_ON(!mutex_is_locked(&dev->struct_mutex));
if (dev->driver->gem_free_object != NULL)
dev->driver->gem_free_object(obj);
@@ -778,22 +778,14 @@ void drm_gem_vm_open(struct vm_area_struct *vma)
struct drm_gem_object *obj = vma->vm_private_data;
drm_gem_object_reference(obj);
-
- mutex_lock(&obj->dev->struct_mutex);
- drm_vm_open_locked(obj->dev, vma);
- mutex_unlock(&obj->dev->struct_mutex);
}
EXPORT_SYMBOL(drm_gem_vm_open);
void drm_gem_vm_close(struct vm_area_struct *vma)
{
struct drm_gem_object *obj = vma->vm_private_data;
- struct drm_device *dev = obj->dev;
- mutex_lock(&dev->struct_mutex);
- drm_vm_close_locked(obj->dev, vma);
- drm_gem_object_unreference(obj);
- mutex_unlock(&dev->struct_mutex);
+ drm_gem_object_unreference_unlocked(obj);
}
EXPORT_SYMBOL(drm_gem_vm_close);
@@ -850,7 +842,6 @@ int drm_gem_mmap_obj(struct drm_gem_object *obj, unsigned long obj_size,
*/
drm_gem_object_reference(obj);
- drm_vm_open_locked(dev, vma);
return 0;
}
EXPORT_SYMBOL(drm_gem_mmap_obj);
diff --git a/drivers/gpu/drm/drm_gem_cma_helper.c b/drivers/gpu/drm/drm_gem_cma_helper.c
index bd75f303da63..86cc793cdf79 100644
--- a/drivers/gpu/drm/drm_gem_cma_helper.c
+++ b/drivers/gpu/drm/drm_gem_cma_helper.c
@@ -289,20 +289,15 @@ int drm_gem_cma_dumb_map_offset(struct drm_file *file_priv,
{
struct drm_gem_object *gem_obj;
- mutex_lock(&drm->struct_mutex);
-
gem_obj = drm_gem_object_lookup(drm, file_priv, handle);
if (!gem_obj) {
dev_err(drm->dev, "failed to lookup GEM object\n");
- mutex_unlock(&drm->struct_mutex);
return -EINVAL;
}
*offset = drm_vma_node_offset_addr(&gem_obj->vma_node);
- drm_gem_object_unreference(gem_obj);
-
- mutex_unlock(&drm->struct_mutex);
+ drm_gem_object_unreference_unlocked(gem_obj);
return 0;
}
@@ -381,11 +376,8 @@ void drm_gem_cma_describe(struct drm_gem_cma_object *cma_obj,
struct seq_file *m)
{
struct drm_gem_object *obj = &cma_obj->base;
- struct drm_device *dev = obj->dev;
uint64_t off;
- WARN_ON(!mutex_is_locked(&dev->struct_mutex));
-
off = drm_vma_node_start(&obj->vma_node);
seq_printf(m, "%2d (%2d) %08llx %pad %p %zu",
diff --git a/drivers/gpu/drm/drm_ioc32.c b/drivers/gpu/drm/drm_ioc32.c
index aa8bbb460c57..ddfa6014c2c2 100644
--- a/drivers/gpu/drm/drm_ioc32.c
+++ b/drivers/gpu/drm/drm_ioc32.c
@@ -70,6 +70,8 @@
#define DRM_IOCTL_WAIT_VBLANK32 DRM_IOWR(0x3a, drm_wait_vblank32_t)
+#define DRM_IOCTL_MODE_ADDFB232 DRM_IOWR(0xb8, drm_mode_fb_cmd232_t)
+
typedef struct drm_version_32 {
int version_major; /**< Major version */
int version_minor; /**< Minor version */
@@ -93,7 +95,7 @@ static int compat_drm_version(struct file *file, unsigned int cmd,
return -EFAULT;
version = compat_alloc_user_space(sizeof(*version));
- if (!access_ok(VERIFY_WRITE, version, sizeof(*version)))
+ if (!version)
return -EFAULT;
if (__put_user(v32.name_len, &version->name_len)
|| __put_user((void __user *)(unsigned long)v32.name,
@@ -140,7 +142,7 @@ static int compat_drm_getunique(struct file *file, unsigned int cmd,
return -EFAULT;
u = compat_alloc_user_space(sizeof(*u));
- if (!access_ok(VERIFY_WRITE, u, sizeof(*u)))
+ if (!u)
return -EFAULT;
if (__put_user(uq32.unique_len, &u->unique_len)
|| __put_user((void __user *)(unsigned long)uq32.unique,
@@ -168,7 +170,7 @@ static int compat_drm_setunique(struct file *file, unsigned int cmd,
return -EFAULT;
u = compat_alloc_user_space(sizeof(*u));
- if (!access_ok(VERIFY_WRITE, u, sizeof(*u)))
+ if (!u)
return -EFAULT;
if (__put_user(uq32.unique_len, &u->unique_len)
|| __put_user((void __user *)(unsigned long)uq32.unique,
@@ -200,7 +202,7 @@ static int compat_drm_getmap(struct file *file, unsigned int cmd,
return -EFAULT;
map = compat_alloc_user_space(sizeof(*map));
- if (!access_ok(VERIFY_WRITE, map, sizeof(*map)))
+ if (!map)
return -EFAULT;
if (__put_user(idx, &map->offset))
return -EFAULT;
@@ -237,7 +239,7 @@ static int compat_drm_addmap(struct file *file, unsigned int cmd,
return -EFAULT;
map = compat_alloc_user_space(sizeof(*map));
- if (!access_ok(VERIFY_WRITE, map, sizeof(*map)))
+ if (!map)
return -EFAULT;
if (__put_user(m32.offset, &map->offset)
|| __put_user(m32.size, &map->size)
@@ -277,7 +279,7 @@ static int compat_drm_rmmap(struct file *file, unsigned int cmd,
return -EFAULT;
map = compat_alloc_user_space(sizeof(*map));
- if (!access_ok(VERIFY_WRITE, map, sizeof(*map)))
+ if (!map)
return -EFAULT;
if (__put_user((void *)(unsigned long)handle, &map->handle))
return -EFAULT;
@@ -306,7 +308,7 @@ static int compat_drm_getclient(struct file *file, unsigned int cmd,
return -EFAULT;
client = compat_alloc_user_space(sizeof(*client));
- if (!access_ok(VERIFY_WRITE, client, sizeof(*client)))
+ if (!client)
return -EFAULT;
if (__put_user(idx, &client->idx))
return -EFAULT;
@@ -345,7 +347,7 @@ static int compat_drm_getstats(struct file *file, unsigned int cmd,
int i, err;
stats = compat_alloc_user_space(sizeof(*stats));
- if (!access_ok(VERIFY_WRITE, stats, sizeof(*stats)))
+ if (!stats)
return -EFAULT;
err = drm_ioctl(file, DRM_IOCTL_GET_STATS, (unsigned long)stats);
@@ -382,8 +384,7 @@ static int compat_drm_addbufs(struct file *file, unsigned int cmd,
unsigned long agp_start;
buf = compat_alloc_user_space(sizeof(*buf));
- if (!access_ok(VERIFY_WRITE, buf, sizeof(*buf))
- || !access_ok(VERIFY_WRITE, argp, sizeof(*argp)))
+ if (!buf || !access_ok(VERIFY_WRITE, argp, sizeof(*argp)))
return -EFAULT;
if (__copy_in_user(buf, argp, offsetof(drm_buf_desc32_t, agp_start))
@@ -414,7 +415,7 @@ static int compat_drm_markbufs(struct file *file, unsigned int cmd,
return -EFAULT;
buf = compat_alloc_user_space(sizeof(*buf));
- if (!access_ok(VERIFY_WRITE, buf, sizeof(*buf)))
+ if (!buf)
return -EFAULT;
if (__put_user(b32.size, &buf->size)
@@ -455,7 +456,7 @@ static int compat_drm_infobufs(struct file *file, unsigned int cmd,
nbytes = sizeof(*request) + count * sizeof(struct drm_buf_desc);
request = compat_alloc_user_space(nbytes);
- if (!access_ok(VERIFY_WRITE, request, nbytes))
+ if (!request)
return -EFAULT;
list = (struct drm_buf_desc *) (request + 1);
@@ -516,7 +517,7 @@ static int compat_drm_mapbufs(struct file *file, unsigned int cmd,
return -EINVAL;
nbytes = sizeof(*request) + count * sizeof(struct drm_buf_pub);
request = compat_alloc_user_space(nbytes);
- if (!access_ok(VERIFY_WRITE, request, nbytes))
+ if (!request)
return -EFAULT;
list = (struct drm_buf_pub *) (request + 1);
@@ -563,7 +564,7 @@ static int compat_drm_freebufs(struct file *file, unsigned int cmd,
return -EFAULT;
request = compat_alloc_user_space(sizeof(*request));
- if (!access_ok(VERIFY_WRITE, request, sizeof(*request)))
+ if (!request)
return -EFAULT;
if (__put_user(req32.count, &request->count)
|| __put_user((int __user *)(unsigned long)req32.list,
@@ -589,7 +590,7 @@ static int compat_drm_setsareactx(struct file *file, unsigned int cmd,
return -EFAULT;
request = compat_alloc_user_space(sizeof(*request));
- if (!access_ok(VERIFY_WRITE, request, sizeof(*request)))
+ if (!request)
return -EFAULT;
if (__put_user(req32.ctx_id, &request->ctx_id)
|| __put_user((void *)(unsigned long)req32.handle,
@@ -613,7 +614,7 @@ static int compat_drm_getsareactx(struct file *file, unsigned int cmd,
return -EFAULT;
request = compat_alloc_user_space(sizeof(*request));
- if (!access_ok(VERIFY_WRITE, request, sizeof(*request)))
+ if (!request)
return -EFAULT;
if (__put_user(ctx_id, &request->ctx_id))
return -EFAULT;
@@ -646,7 +647,7 @@ static int compat_drm_resctx(struct file *file, unsigned int cmd,
return -EFAULT;
res = compat_alloc_user_space(sizeof(*res));
- if (!access_ok(VERIFY_WRITE, res, sizeof(*res)))
+ if (!res)
return -EFAULT;
if (__put_user(res32.count, &res->count)
|| __put_user((struct drm_ctx __user *) (unsigned long)res32.contexts,
@@ -689,7 +690,7 @@ static int compat_drm_dma(struct file *file, unsigned int cmd,
return -EFAULT;
d = compat_alloc_user_space(sizeof(*d));
- if (!access_ok(VERIFY_WRITE, d, sizeof(*d)))
+ if (!d)
return -EFAULT;
if (__put_user(d32.context, &d->context)
@@ -764,7 +765,7 @@ static int compat_drm_agp_info(struct file *file, unsigned int cmd,
int err;
info = compat_alloc_user_space(sizeof(*info));
- if (!access_ok(VERIFY_WRITE, info, sizeof(*info)))
+ if (!info)
return -EFAULT;
err = drm_ioctl(file, DRM_IOCTL_AGP_INFO, (unsigned long)info);
@@ -807,7 +808,7 @@ static int compat_drm_agp_alloc(struct file *file, unsigned int cmd,
return -EFAULT;
request = compat_alloc_user_space(sizeof(*request));
- if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
+ if (!request
|| __put_user(req32.size, &request->size)
|| __put_user(req32.type, &request->type))
return -EFAULT;
@@ -834,7 +835,7 @@ static int compat_drm_agp_free(struct file *file, unsigned int cmd,
u32 handle;
request = compat_alloc_user_space(sizeof(*request));
- if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
+ if (!request
|| get_user(handle, &argp->handle)
|| __put_user(handle, &request->handle))
return -EFAULT;
@@ -858,7 +859,7 @@ static int compat_drm_agp_bind(struct file *file, unsigned int cmd,
return -EFAULT;
request = compat_alloc_user_space(sizeof(*request));
- if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
+ if (!request
|| __put_user(req32.handle, &request->handle)
|| __put_user(req32.offset, &request->offset))
return -EFAULT;
@@ -874,7 +875,7 @@ static int compat_drm_agp_unbind(struct file *file, unsigned int cmd,
u32 handle;
request = compat_alloc_user_space(sizeof(*request));
- if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
+ if (!request
|| get_user(handle, &argp->handle)
|| __put_user(handle, &request->handle))
return -EFAULT;
@@ -897,8 +898,7 @@ static int compat_drm_sg_alloc(struct file *file, unsigned int cmd,
unsigned long x;
request = compat_alloc_user_space(sizeof(*request));
- if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
- || !access_ok(VERIFY_WRITE, argp, sizeof(*argp))
+ if (!request || !access_ok(VERIFY_WRITE, argp, sizeof(*argp))
|| __get_user(x, &argp->size)
|| __put_user(x, &request->size))
return -EFAULT;
@@ -923,8 +923,7 @@ static int compat_drm_sg_free(struct file *file, unsigned int cmd,
unsigned long x;
request = compat_alloc_user_space(sizeof(*request));
- if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
- || !access_ok(VERIFY_WRITE, argp, sizeof(*argp))
+ if (!request || !access_ok(VERIFY_WRITE, argp, sizeof(*argp))
|| __get_user(x, &argp->handle)
|| __put_user(x << PAGE_SHIFT, &request->handle))
return -EFAULT;
@@ -952,7 +951,7 @@ static int compat_drm_update_draw(struct file *file, unsigned int cmd,
return -EFAULT;
request = compat_alloc_user_space(sizeof(*request));
- if (!access_ok(VERIFY_WRITE, request, sizeof(*request)) ||
+ if (!request ||
__put_user(update32.handle, &request->handle) ||
__put_user(update32.type, &request->type) ||
__put_user(update32.num, &request->num) ||
@@ -994,7 +993,7 @@ static int compat_drm_wait_vblank(struct file *file, unsigned int cmd,
return -EFAULT;
request = compat_alloc_user_space(sizeof(*request));
- if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
+ if (!request
|| __put_user(req32.request.type, &request->request.type)
|| __put_user(req32.request.sequence, &request->request.sequence)
|| __put_user(req32.request.signal, &request->request.signal))
@@ -1016,6 +1015,63 @@ static int compat_drm_wait_vblank(struct file *file, unsigned int cmd,
return 0;
}
+typedef struct drm_mode_fb_cmd232 {
+ u32 fb_id;
+ u32 width;
+ u32 height;
+ u32 pixel_format;
+ u32 flags;
+ u32 handles[4];
+ u32 pitches[4];
+ u32 offsets[4];
+ u64 modifier[4];
+} __attribute__((packed)) drm_mode_fb_cmd232_t;
+
+static int compat_drm_mode_addfb2(struct file *file, unsigned int cmd,
+ unsigned long arg)
+{
+ struct drm_mode_fb_cmd232 __user *argp = (void __user *)arg;
+ struct drm_mode_fb_cmd232 req32;
+ struct drm_mode_fb_cmd2 __user *req64;
+ int i;
+ int err;
+
+ if (copy_from_user(&req32, argp, sizeof(req32)))
+ return -EFAULT;
+
+ req64 = compat_alloc_user_space(sizeof(*req64));
+
+ if (!access_ok(VERIFY_WRITE, req64, sizeof(*req64))
+ || __put_user(req32.width, &req64->width)
+ || __put_user(req32.height, &req64->height)
+ || __put_user(req32.pixel_format, &req64->pixel_format)
+ || __put_user(req32.flags, &req64->flags))
+ return -EFAULT;
+
+ for (i = 0; i < 4; i++) {
+ if (__put_user(req32.handles[i], &req64->handles[i]))
+ return -EFAULT;
+ if (__put_user(req32.pitches[i], &req64->pitches[i]))
+ return -EFAULT;
+ if (__put_user(req32.offsets[i], &req64->offsets[i]))
+ return -EFAULT;
+ if (__put_user(req32.modifier[i], &req64->modifier[i]))
+ return -EFAULT;
+ }
+
+ err = drm_ioctl(file, DRM_IOCTL_MODE_ADDFB2, (unsigned long)req64);
+ if (err)
+ return err;
+
+ if (__get_user(req32.fb_id, &req64->fb_id))
+ return -EFAULT;
+
+ if (copy_to_user(argp, &req32, sizeof(req32)))
+ return -EFAULT;
+
+ return 0;
+}
+
static drm_ioctl_compat_t *drm_compat_ioctls[] = {
[DRM_IOCTL_NR(DRM_IOCTL_VERSION32)] = compat_drm_version,
[DRM_IOCTL_NR(DRM_IOCTL_GET_UNIQUE32)] = compat_drm_getunique,
@@ -1048,6 +1104,7 @@ static drm_ioctl_compat_t *drm_compat_ioctls[] = {
[DRM_IOCTL_NR(DRM_IOCTL_UPDATE_DRAW32)] = compat_drm_update_draw,
#endif
[DRM_IOCTL_NR(DRM_IOCTL_WAIT_VBLANK32)] = compat_drm_wait_vblank,
+ [DRM_IOCTL_NR(DRM_IOCTL_MODE_ADDFB232)] = compat_drm_mode_addfb2,
};
/**
diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c
index f9cc68fbd2a3..22d207e211e7 100644
--- a/drivers/gpu/drm/drm_irq.c
+++ b/drivers/gpu/drm/drm_irq.c
@@ -43,8 +43,8 @@
#include <linux/export.h>
/* Access macro for slots in vblank timestamp ringbuffer. */
-#define vblanktimestamp(dev, crtc, count) \
- ((dev)->vblank[crtc].time[(count) % DRM_VBLANKTIME_RBSIZE])
+#define vblanktimestamp(dev, pipe, count) \
+ ((dev)->vblank[pipe].time[(count) % DRM_VBLANKTIME_RBSIZE])
/* Retry timestamp calculation up to 3 times to satisfy
* drm_timestamp_precision before giving up.
@@ -57,7 +57,7 @@
#define DRM_REDUNDANT_VBLIRQ_THRESH_NS 1000000
static bool
-drm_get_last_vbltimestamp(struct drm_device *dev, int crtc,
+drm_get_last_vbltimestamp(struct drm_device *dev, unsigned int pipe,
struct timeval *tvblank, unsigned flags);
static unsigned int drm_timestamp_precision = 20; /* Default to 20 usecs. */
@@ -75,7 +75,7 @@ module_param_named(timestamp_precision_usec, drm_timestamp_precision, int, 0600)
module_param_named(timestamp_monotonic, drm_timestamp_monotonic, int, 0600);
static void store_vblank(struct drm_device *dev, int crtc,
- unsigned vblank_count_inc,
+ u32 vblank_count_inc,
struct timeval *t_vblank)
{
struct drm_vblank_crtc *vblank = &dev->vblank[crtc];
@@ -107,7 +107,7 @@ static void store_vblank(struct drm_device *dev, int crtc,
/**
* drm_update_vblank_count - update the master vblank counter
* @dev: DRM device
- * @crtc: counter to update
+ * @pipe: counter to update
*
* Call back into the driver to update the appropriate vblank counter
* (specified by @crtc). Deal with wraparound, if it occurred, and
@@ -120,9 +120,9 @@ static void store_vblank(struct drm_device *dev, int crtc,
* Note: caller must hold dev->vbl_lock since this reads & writes
* device vblank fields.
*/
-static void drm_update_vblank_count(struct drm_device *dev, int crtc)
+static void drm_update_vblank_count(struct drm_device *dev, unsigned int pipe)
{
- struct drm_vblank_crtc *vblank = &dev->vblank[crtc];
+ struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
u32 cur_vblank, diff;
bool rc;
struct timeval t_vblank;
@@ -140,21 +140,21 @@ static void drm_update_vblank_count(struct drm_device *dev, int crtc)
* corresponding vblank timestamp.
*/
do {
- cur_vblank = dev->driver->get_vblank_counter(dev, crtc);
- rc = drm_get_last_vbltimestamp(dev, crtc, &t_vblank, 0);
- } while (cur_vblank != dev->driver->get_vblank_counter(dev, crtc));
+ cur_vblank = dev->driver->get_vblank_counter(dev, pipe);
+ rc = drm_get_last_vbltimestamp(dev, pipe, &t_vblank, 0);
+ } while (cur_vblank != dev->driver->get_vblank_counter(dev, pipe));
/* Deal with counter wrap */
diff = cur_vblank - vblank->last;
if (cur_vblank < vblank->last) {
diff += dev->max_vblank_count + 1;
- DRM_DEBUG("last_vblank[%d]=0x%x, cur_vblank=0x%x => diff=0x%x\n",
- crtc, vblank->last, cur_vblank, diff);
+ DRM_DEBUG("last_vblank[%u]=0x%x, cur_vblank=0x%x => diff=0x%x\n",
+ pipe, vblank->last, cur_vblank, diff);
}
- DRM_DEBUG("updating vblank count on crtc %d, missed %d\n",
- crtc, diff);
+ DRM_DEBUG("updating vblank count on crtc %u, missed %d\n",
+ pipe, diff);
if (diff == 0)
return;
@@ -167,7 +167,7 @@ static void drm_update_vblank_count(struct drm_device *dev, int crtc)
if (!rc)
t_vblank = (struct timeval) {0, 0};
- store_vblank(dev, crtc, diff, &t_vblank);
+ store_vblank(dev, pipe, diff, &t_vblank);
}
/*
@@ -176,9 +176,9 @@ static void drm_update_vblank_count(struct drm_device *dev, int crtc)
* are preserved, even if there are any spurious vblank irq's after
* disable.
*/
-static void vblank_disable_and_save(struct drm_device *dev, int crtc)
+static void vblank_disable_and_save(struct drm_device *dev, unsigned int pipe)
{
- struct drm_vblank_crtc *vblank = &dev->vblank[crtc];
+ struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
unsigned long irqflags;
u32 vblcount;
s64 diff_ns;
@@ -206,8 +206,8 @@ static void vblank_disable_and_save(struct drm_device *dev, int crtc)
* vblank interrupt is disabled.
*/
if (!vblank->enabled &&
- drm_get_last_vbltimestamp(dev, crtc, &tvblank, 0)) {
- drm_update_vblank_count(dev, crtc);
+ drm_get_last_vbltimestamp(dev, pipe, &tvblank, 0)) {
+ drm_update_vblank_count(dev, pipe);
spin_unlock_irqrestore(&dev->vblank_time_lock, irqflags);
return;
}
@@ -218,7 +218,7 @@ static void vblank_disable_and_save(struct drm_device *dev, int crtc)
* hardware potentially runtime suspended.
*/
if (vblank->enabled) {
- dev->driver->disable_vblank(dev, crtc);
+ dev->driver->disable_vblank(dev, pipe);
vblank->enabled = false;
}
@@ -235,9 +235,9 @@ static void vblank_disable_and_save(struct drm_device *dev, int crtc)
* delayed gpu counter increment.
*/
do {
- vblank->last = dev->driver->get_vblank_counter(dev, crtc);
- vblrc = drm_get_last_vbltimestamp(dev, crtc, &tvblank, 0);
- } while (vblank->last != dev->driver->get_vblank_counter(dev, crtc) && (--count) && vblrc);
+ vblank->last = dev->driver->get_vblank_counter(dev, pipe);
+ vblrc = drm_get_last_vbltimestamp(dev, pipe, &tvblank, 0);
+ } while (vblank->last != dev->driver->get_vblank_counter(dev, pipe) && (--count) && vblrc);
if (!count)
vblrc = 0;
@@ -247,7 +247,7 @@ static void vblank_disable_and_save(struct drm_device *dev, int crtc)
*/
vblcount = vblank->count;
diff_ns = timeval_to_ns(&tvblank) -
- timeval_to_ns(&vblanktimestamp(dev, crtc, vblcount));
+ timeval_to_ns(&vblanktimestamp(dev, pipe, vblcount));
/* If there is at least 1 msec difference between the last stored
* timestamp and tvblank, then we are currently executing our
@@ -262,7 +262,7 @@ static void vblank_disable_and_save(struct drm_device *dev, int crtc)
* hope for the best.
*/
if (vblrc && (abs64(diff_ns) > 1000000))
- store_vblank(dev, crtc, 1, &tvblank);
+ store_vblank(dev, pipe, 1, &tvblank);
spin_unlock_irqrestore(&dev->vblank_time_lock, irqflags);
}
@@ -271,16 +271,16 @@ static void vblank_disable_fn(unsigned long arg)
{
struct drm_vblank_crtc *vblank = (void *)arg;
struct drm_device *dev = vblank->dev;
+ unsigned int pipe = vblank->pipe;
unsigned long irqflags;
- int crtc = vblank->crtc;
if (!dev->vblank_disable_allowed)
return;
spin_lock_irqsave(&dev->vbl_lock, irqflags);
if (atomic_read(&vblank->refcount) == 0 && vblank->enabled) {
- DRM_DEBUG("disabling vblank on crtc %d\n", crtc);
- vblank_disable_and_save(dev, crtc);
+ DRM_DEBUG("disabling vblank on crtc %u\n", pipe);
+ vblank_disable_and_save(dev, pipe);
}
spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
}
@@ -293,14 +293,14 @@ static void vblank_disable_fn(unsigned long arg)
*/
void drm_vblank_cleanup(struct drm_device *dev)
{
- int crtc;
+ unsigned int pipe;
/* Bail if the driver didn't call drm_vblank_init() */
if (dev->num_crtcs == 0)
return;
- for (crtc = 0; crtc < dev->num_crtcs; crtc++) {
- struct drm_vblank_crtc *vblank = &dev->vblank[crtc];
+ for (pipe = 0; pipe < dev->num_crtcs; pipe++) {
+ struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
WARN_ON(vblank->enabled &&
drm_core_check_feature(dev, DRIVER_MODESET));
@@ -316,17 +316,18 @@ EXPORT_SYMBOL(drm_vblank_cleanup);
/**
* drm_vblank_init - initialize vblank support
- * @dev: drm_device
- * @num_crtcs: number of crtcs supported by @dev
+ * @dev: DRM device
+ * @num_crtcs: number of CRTCs supported by @dev
*
* This function initializes vblank support for @num_crtcs display pipelines.
*
* Returns:
* Zero on success or a negative error code on failure.
*/
-int drm_vblank_init(struct drm_device *dev, int num_crtcs)
+int drm_vblank_init(struct drm_device *dev, unsigned int num_crtcs)
{
- int i, ret = -ENOMEM;
+ int ret = -ENOMEM;
+ unsigned int i;
spin_lock_init(&dev->vbl_lock);
spin_lock_init(&dev->vblank_time_lock);
@@ -341,7 +342,7 @@ int drm_vblank_init(struct drm_device *dev, int num_crtcs)
struct drm_vblank_crtc *vblank = &dev->vblank[i];
vblank->dev = dev;
- vblank->crtc = i;
+ vblank->pipe = i;
init_waitqueue_head(&vblank->queue);
setup_timer(&vblank->disable_timer, vblank_disable_fn,
(unsigned long)vblank);
@@ -624,17 +625,17 @@ void drm_calc_timestamping_constants(struct drm_crtc *crtc,
if (mode->flags & DRM_MODE_FLAG_INTERLACE)
framedur_ns /= 2;
} else
- DRM_ERROR("crtc %d: Can't calculate constants, dotclock = 0!\n",
+ DRM_ERROR("crtc %u: Can't calculate constants, dotclock = 0!\n",
crtc->base.id);
crtc->pixeldur_ns = pixeldur_ns;
crtc->linedur_ns = linedur_ns;
crtc->framedur_ns = framedur_ns;
- DRM_DEBUG("crtc %d: hwmode: htotal %d, vtotal %d, vdisplay %d\n",
+ DRM_DEBUG("crtc %u: hwmode: htotal %d, vtotal %d, vdisplay %d\n",
crtc->base.id, mode->crtc_htotal,
mode->crtc_vtotal, mode->crtc_vdisplay);
- DRM_DEBUG("crtc %d: clock %d kHz framedur %d linedur %d, pixeldur %d\n",
+ DRM_DEBUG("crtc %u: clock %d kHz framedur %d linedur %d, pixeldur %d\n",
crtc->base.id, dotclock, framedur_ns,
linedur_ns, pixeldur_ns);
}
@@ -643,7 +644,7 @@ EXPORT_SYMBOL(drm_calc_timestamping_constants);
/**
* drm_calc_vbltimestamp_from_scanoutpos - precise vblank timestamp helper
* @dev: DRM device
- * @crtc: Which CRTC's vblank timestamp to retrieve
+ * @pipe: index of CRTC whose vblank timestamp to retrieve
* @max_error: Desired maximum allowable error in timestamps (nanosecs)
* On return contains true maximum error of timestamp
* @vblank_time: Pointer to struct timeval which should receive the timestamp
@@ -686,7 +687,8 @@ EXPORT_SYMBOL(drm_calc_timestamping_constants);
* DRM_VBLANKTIME_INVBL - Timestamp taken while scanout was in vblank interval.
*
*/
-int drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev, int crtc,
+int drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev,
+ unsigned int pipe,
int *max_error,
struct timeval *vblank_time,
unsigned flags,
@@ -700,8 +702,8 @@ int drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev, int crtc,
int framedur_ns, linedur_ns, pixeldur_ns, delta_ns, duration_ns;
bool invbl;
- if (crtc < 0 || crtc >= dev->num_crtcs) {
- DRM_ERROR("Invalid crtc %d\n", crtc);
+ if (pipe >= dev->num_crtcs) {
+ DRM_ERROR("Invalid crtc %u\n", pipe);
return -EINVAL;
}
@@ -720,7 +722,7 @@ int drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev, int crtc,
* Happens during initial modesetting of a crtc.
*/
if (framedur_ns == 0) {
- DRM_DEBUG("crtc %d: Noop due to uninitialized mode.\n", crtc);
+ DRM_DEBUG("crtc %u: Noop due to uninitialized mode.\n", pipe);
return -EAGAIN;
}
@@ -736,13 +738,13 @@ int drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev, int crtc,
* Get vertical and horizontal scanout position vpos, hpos,
* and bounding timestamps stime, etime, pre/post query.
*/
- vbl_status = dev->driver->get_scanout_position(dev, crtc, flags, &vpos,
+ vbl_status = dev->driver->get_scanout_position(dev, pipe, flags, &vpos,
&hpos, &stime, &etime);
/* Return as no-op if scanout query unsupported or failed. */
if (!(vbl_status & DRM_SCANOUTPOS_VALID)) {
- DRM_DEBUG("crtc %d : scanoutpos query failed [%d].\n",
- crtc, vbl_status);
+ DRM_DEBUG("crtc %u : scanoutpos query failed [%d].\n",
+ pipe, vbl_status);
return -EIO;
}
@@ -756,8 +758,8 @@ int drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev, int crtc,
/* Noisy system timing? */
if (i == DRM_TIMESTAMP_MAXRETRIES) {
- DRM_DEBUG("crtc %d: Noisy timestamp %d us > %d us [%d reps].\n",
- crtc, duration_ns/1000, *max_error/1000, i);
+ DRM_DEBUG("crtc %u: Noisy timestamp %d us > %d us [%d reps].\n",
+ pipe, duration_ns/1000, *max_error/1000, i);
}
/* Return upper bound of timestamp precision error. */
@@ -790,8 +792,8 @@ int drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev, int crtc,
etime = ktime_sub_ns(etime, delta_ns);
*vblank_time = ktime_to_timeval(etime);
- DRM_DEBUG("crtc %d : v %d p(%d,%d)@ %ld.%ld -> %ld.%ld [e %d us, %d rep]\n",
- crtc, (int)vbl_status, hpos, vpos,
+ DRM_DEBUG("crtc %u : v %d p(%d,%d)@ %ld.%ld -> %ld.%ld [e %d us, %d rep]\n",
+ pipe, (int)vbl_status, hpos, vpos,
(long)tv_etime.tv_sec, (long)tv_etime.tv_usec,
(long)vblank_time->tv_sec, (long)vblank_time->tv_usec,
duration_ns/1000, i);
@@ -816,7 +818,7 @@ static struct timeval get_drm_timestamp(void)
* drm_get_last_vbltimestamp - retrieve raw timestamp for the most recent
* vblank interval
* @dev: DRM device
- * @crtc: which CRTC's vblank timestamp to retrieve
+ * @pipe: index of CRTC whose vblank timestamp to retrieve
* @tvblank: Pointer to target struct timeval which should receive the timestamp
* @flags: Flags to pass to driver:
* 0 = Default,
@@ -833,7 +835,7 @@ static struct timeval get_drm_timestamp(void)
* True if timestamp is considered to be very precise, false otherwise.
*/
static bool
-drm_get_last_vbltimestamp(struct drm_device *dev, int crtc,
+drm_get_last_vbltimestamp(struct drm_device *dev, unsigned int pipe,
struct timeval *tvblank, unsigned flags)
{
int ret;
@@ -843,7 +845,7 @@ drm_get_last_vbltimestamp(struct drm_device *dev, int crtc,
/* Query driver if possible and precision timestamping enabled. */
if (dev->driver->get_vblank_timestamp && (max_error > 0)) {
- ret = dev->driver->get_vblank_timestamp(dev, crtc, &max_error,
+ ret = dev->driver->get_vblank_timestamp(dev, pipe, &max_error,
tvblank, flags);
if (ret > 0)
return true;
@@ -860,7 +862,7 @@ drm_get_last_vbltimestamp(struct drm_device *dev, int crtc,
/**
* drm_vblank_count - retrieve "cooked" vblank counter value
* @dev: DRM device
- * @crtc: which counter to retrieve
+ * @pipe: index of CRTC for which to retrieve the counter
*
* Fetches the "cooked" vblank count value that represents the number of
* vblank events since the system was booted, including lost events due to
@@ -871,12 +873,13 @@ drm_get_last_vbltimestamp(struct drm_device *dev, int crtc,
* Returns:
* The software vblank counter.
*/
-u32 drm_vblank_count(struct drm_device *dev, int crtc)
+u32 drm_vblank_count(struct drm_device *dev, int pipe)
{
- struct drm_vblank_crtc *vblank = &dev->vblank[crtc];
+ struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
- if (WARN_ON(crtc >= dev->num_crtcs))
+ if (WARN_ON(pipe >= dev->num_crtcs))
return 0;
+
return vblank->count;
}
EXPORT_SYMBOL(drm_vblank_count);
@@ -901,11 +904,10 @@ u32 drm_crtc_vblank_count(struct drm_crtc *crtc)
EXPORT_SYMBOL(drm_crtc_vblank_count);
/**
- * drm_vblank_count_and_time - retrieve "cooked" vblank counter value
- * and the system timestamp corresponding to that vblank counter value.
- *
+ * drm_vblank_count_and_time - retrieve "cooked" vblank counter value and the
+ * system timestamp corresponding to that vblank counter value.
* @dev: DRM device
- * @crtc: which counter to retrieve
+ * @pipe: index of CRTC whose counter to retrieve
* @vblanktime: Pointer to struct timeval to receive the vblank timestamp.
*
* Fetches the "cooked" vblank count value that represents the number of
@@ -913,13 +915,13 @@ EXPORT_SYMBOL(drm_crtc_vblank_count);
* modesetting activity. Returns corresponding system timestamp of the time
* of the vblank interval that corresponds to the current vblank counter value.
*/
-u32 drm_vblank_count_and_time(struct drm_device *dev, int crtc,
+u32 drm_vblank_count_and_time(struct drm_device *dev, unsigned int pipe,
struct timeval *vblanktime)
{
- struct drm_vblank_crtc *vblank = &dev->vblank[crtc];
+ struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
u32 cur_vblank;
- if (WARN_ON(crtc >= dev->num_crtcs))
+ if (WARN_ON(pipe >= dev->num_crtcs))
return 0;
/*
@@ -930,7 +932,7 @@ u32 drm_vblank_count_and_time(struct drm_device *dev, int crtc,
do {
cur_vblank = vblank->count;
smp_rmb();
- *vblanktime = vblanktimestamp(dev, crtc, cur_vblank);
+ *vblanktime = vblanktimestamp(dev, pipe, cur_vblank);
smp_rmb();
} while (cur_vblank != vblank->count);
@@ -957,7 +959,7 @@ static void send_vblank_event(struct drm_device *dev,
/**
* drm_send_vblank_event - helper to send vblank event after pageflip
* @dev: DRM device
- * @crtc: CRTC in question
+ * @pipe: CRTC index
* @e: the event to send
*
* Updates sequence # and timestamp on event, and sends it to userspace.
@@ -965,20 +967,20 @@ static void send_vblank_event(struct drm_device *dev,
*
* This is the legacy version of drm_crtc_send_vblank_event().
*/
-void drm_send_vblank_event(struct drm_device *dev, int crtc,
- struct drm_pending_vblank_event *e)
+void drm_send_vblank_event(struct drm_device *dev, unsigned int pipe,
+ struct drm_pending_vblank_event *e)
{
struct timeval now;
unsigned int seq;
- if (crtc >= 0) {
- seq = drm_vblank_count_and_time(dev, crtc, &now);
+ if (dev->num_crtcs > 0) {
+ seq = drm_vblank_count_and_time(dev, pipe, &now);
} else {
seq = 0;
now = get_drm_timestamp();
}
- e->pipe = crtc;
+ e->pipe = pipe;
send_vblank_event(dev, e, seq, &now);
}
EXPORT_SYMBOL(drm_send_vblank_event);
@@ -1003,11 +1005,14 @@ EXPORT_SYMBOL(drm_crtc_send_vblank_event);
/**
* drm_vblank_enable - enable the vblank interrupt on a CRTC
* @dev: DRM device
- * @crtc: CRTC in question
+ * @pipe: CRTC index
+ *
+ * Returns:
+ * Zero on success or a negative error code on failure.
*/
-static int drm_vblank_enable(struct drm_device *dev, int crtc)
+static int drm_vblank_enable(struct drm_device *dev, unsigned int pipe)
{
- struct drm_vblank_crtc *vblank = &dev->vblank[crtc];
+ struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
int ret = 0;
assert_spin_locked(&dev->vbl_lock);
@@ -1022,13 +1027,13 @@ static int drm_vblank_enable(struct drm_device *dev, int crtc)
* timestamps. Filtercode in drm_handle_vblank() will
* prevent double-accounting of same vblank interval.
*/
- ret = dev->driver->enable_vblank(dev, crtc);
- DRM_DEBUG("enabling vblank on crtc %d, ret: %d\n", crtc, ret);
+ ret = dev->driver->enable_vblank(dev, pipe);
+ DRM_DEBUG("enabling vblank on crtc %u, ret: %d\n", pipe, ret);
if (ret)
atomic_dec(&vblank->refcount);
else {
vblank->enabled = true;
- drm_update_vblank_count(dev, crtc);
+ drm_update_vblank_count(dev, pipe);
}
}
@@ -1040,7 +1045,7 @@ static int drm_vblank_enable(struct drm_device *dev, int crtc)
/**
* drm_vblank_get - get a reference count on vblank events
* @dev: DRM device
- * @crtc: which CRTC to own
+ * @pipe: index of CRTC to own
*
* Acquire a reference count on vblank events to avoid having them disabled
* while in use.
@@ -1048,24 +1053,24 @@ static int drm_vblank_enable(struct drm_device *dev, int crtc)
* This is the legacy version of drm_crtc_vblank_get().
*
* Returns:
- * Zero on success, nonzero on failure.
+ * Zero on success or a negative error code on failure.
*/
-int drm_vblank_get(struct drm_device *dev, int crtc)
+int drm_vblank_get(struct drm_device *dev, unsigned int pipe)
{
- struct drm_vblank_crtc *vblank = &dev->vblank[crtc];
+ struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
unsigned long irqflags;
int ret = 0;
if (!dev->num_crtcs)
return -EINVAL;
- if (WARN_ON(crtc >= dev->num_crtcs))
+ if (WARN_ON(pipe >= dev->num_crtcs))
return -EINVAL;
spin_lock_irqsave(&dev->vbl_lock, irqflags);
/* Going from 0->1 means we have to enable interrupts again */
if (atomic_add_return(1, &vblank->refcount) == 1) {
- ret = drm_vblank_enable(dev, crtc);
+ ret = drm_vblank_enable(dev, pipe);
} else {
if (!vblank->enabled) {
atomic_dec(&vblank->refcount);
@@ -1088,7 +1093,7 @@ EXPORT_SYMBOL(drm_vblank_get);
* This is the native kms version of drm_vblank_get().
*
* Returns:
- * Zero on success, nonzero on failure.
+ * Zero on success or a negative error code on failure.
*/
int drm_crtc_vblank_get(struct drm_crtc *crtc)
{
@@ -1097,23 +1102,23 @@ int drm_crtc_vblank_get(struct drm_crtc *crtc)
EXPORT_SYMBOL(drm_crtc_vblank_get);
/**
- * drm_vblank_put - give up ownership of vblank events
+ * drm_vblank_put - release ownership of vblank events
* @dev: DRM device
- * @crtc: which counter to give up
+ * @pipe: index of CRTC to release
*
* Release ownership of a given vblank counter, turning off interrupts
* if possible. Disable interrupts after drm_vblank_offdelay milliseconds.
*
* This is the legacy version of drm_crtc_vblank_put().
*/
-void drm_vblank_put(struct drm_device *dev, int crtc)
+void drm_vblank_put(struct drm_device *dev, unsigned int pipe)
{
- struct drm_vblank_crtc *vblank = &dev->vblank[crtc];
+ struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
- if (WARN_ON(atomic_read(&vblank->refcount) == 0))
+ if (WARN_ON(pipe >= dev->num_crtcs))
return;
- if (WARN_ON(crtc >= dev->num_crtcs))
+ if (WARN_ON(atomic_read(&vblank->refcount) == 0))
return;
/* Last user schedules interrupt disable */
@@ -1147,30 +1152,34 @@ EXPORT_SYMBOL(drm_crtc_vblank_put);
/**
* drm_wait_one_vblank - wait for one vblank
* @dev: DRM device
- * @crtc: crtc index
+ * @pipe: CRTC index
*
* This waits for one vblank to pass on @crtc, using the irq driver interfaces.
* It is a failure to call this when the vblank irq for @crtc is disabled, e.g.
* due to lack of driver support or because the crtc is off.
*/
-void drm_wait_one_vblank(struct drm_device *dev, int crtc)
+void drm_wait_one_vblank(struct drm_device *dev, unsigned int pipe)
{
+ struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
int ret;
u32 last;
- ret = drm_vblank_get(dev, crtc);
- if (WARN(ret, "vblank not available on crtc %i, ret=%i\n", crtc, ret))
+ if (WARN_ON(pipe >= dev->num_crtcs))
+ return;
+
+ ret = drm_vblank_get(dev, pipe);
+ if (WARN(ret, "vblank not available on crtc %i, ret=%i\n", pipe, ret))
return;
- last = drm_vblank_count(dev, crtc);
+ last = drm_vblank_count(dev, pipe);
- ret = wait_event_timeout(dev->vblank[crtc].queue,
- last != drm_vblank_count(dev, crtc),
+ ret = wait_event_timeout(vblank->queue,
+ last != drm_vblank_count(dev, pipe),
msecs_to_jiffies(100));
- WARN(ret == 0, "vblank wait timed out on crtc %i\n", crtc);
+ WARN(ret == 0, "vblank wait timed out on crtc %i\n", pipe);
- drm_vblank_put(dev, crtc);
+ drm_vblank_put(dev, pipe);
}
EXPORT_SYMBOL(drm_wait_one_vblank);
@@ -1191,7 +1200,7 @@ EXPORT_SYMBOL(drm_crtc_wait_one_vblank);
/**
* drm_vblank_off - disable vblank events on a CRTC
* @dev: DRM device
- * @crtc: CRTC in question
+ * @pipe: CRTC index
*
* Drivers can use this function to shut down the vblank interrupt handling when
* disabling a crtc. This function ensures that the latest vblank frame count is
@@ -1202,21 +1211,21 @@ EXPORT_SYMBOL(drm_crtc_wait_one_vblank);
*
* This is the legacy version of drm_crtc_vblank_off().
*/
-void drm_vblank_off(struct drm_device *dev, int crtc)
+void drm_vblank_off(struct drm_device *dev, unsigned int pipe)
{
- struct drm_vblank_crtc *vblank = &dev->vblank[crtc];
+ struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
struct drm_pending_vblank_event *e, *t;
struct timeval now;
unsigned long irqflags;
unsigned int seq;
- if (WARN_ON(crtc >= dev->num_crtcs))
+ if (WARN_ON(pipe >= dev->num_crtcs))
return;
spin_lock_irqsave(&dev->event_lock, irqflags);
spin_lock(&dev->vbl_lock);
- vblank_disable_and_save(dev, crtc);
+ vblank_disable_and_save(dev, pipe);
wake_up(&vblank->queue);
/*
@@ -1230,16 +1239,16 @@ void drm_vblank_off(struct drm_device *dev, int crtc)
spin_unlock(&dev->vbl_lock);
/* Send any queued vblank events, lest the natives grow disquiet */
- seq = drm_vblank_count_and_time(dev, crtc, &now);
+ seq = drm_vblank_count_and_time(dev, pipe, &now);
list_for_each_entry_safe(e, t, &dev->vblank_event_list, base.link) {
- if (e->pipe != crtc)
+ if (e->pipe != pipe)
continue;
DRM_DEBUG("Sending premature vblank event on disable: \
wanted %d, current %d\n",
e->event.sequence, seq);
list_del(&e->base.link);
- drm_vblank_put(dev, e->pipe);
+ drm_vblank_put(dev, pipe);
send_vblank_event(dev, e, seq, &now);
}
spin_unlock_irqrestore(&dev->event_lock, irqflags);
@@ -1267,7 +1276,7 @@ EXPORT_SYMBOL(drm_crtc_vblank_off);
/**
* drm_crtc_vblank_reset - reset vblank state to off on a CRTC
- * @crtc: CRTC in question
+ * @drm_crtc: CRTC in question
*
* Drivers can use this function to reset the vblank state to off at load time.
* Drivers should use this together with the drm_crtc_vblank_off() and
@@ -1300,7 +1309,7 @@ EXPORT_SYMBOL(drm_crtc_vblank_reset);
/**
* drm_vblank_on - enable vblank events on a CRTC
* @dev: DRM device
- * @crtc: CRTC in question
+ * @pipe: CRTC index
*
* This functions restores the vblank interrupt state captured with
* drm_vblank_off() again. Note that calls to drm_vblank_on() and
@@ -1309,12 +1318,12 @@ EXPORT_SYMBOL(drm_crtc_vblank_reset);
*
* This is the legacy version of drm_crtc_vblank_on().
*/
-void drm_vblank_on(struct drm_device *dev, int crtc)
+void drm_vblank_on(struct drm_device *dev, unsigned int pipe)
{
- struct drm_vblank_crtc *vblank = &dev->vblank[crtc];
+ struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
unsigned long irqflags;
- if (WARN_ON(crtc >= dev->num_crtcs))
+ if (WARN_ON(pipe >= dev->num_crtcs))
return;
spin_lock_irqsave(&dev->vbl_lock, irqflags);
@@ -1332,7 +1341,7 @@ void drm_vblank_on(struct drm_device *dev, int crtc)
* vblank counter value before and after a modeset
*/
vblank->last =
- (dev->driver->get_vblank_counter(dev, crtc) - 1) &
+ (dev->driver->get_vblank_counter(dev, pipe) - 1) &
dev->max_vblank_count;
/*
* re-enable interrupts if there are users left, or the
@@ -1340,7 +1349,7 @@ void drm_vblank_on(struct drm_device *dev, int crtc)
*/
if (atomic_read(&vblank->refcount) != 0 ||
(!dev->vblank_disable_immediate && drm_vblank_offdelay == 0))
- WARN_ON(drm_vblank_enable(dev, crtc));
+ WARN_ON(drm_vblank_enable(dev, pipe));
spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
}
EXPORT_SYMBOL(drm_vblank_on);
@@ -1365,7 +1374,7 @@ EXPORT_SYMBOL(drm_crtc_vblank_on);
/**
* drm_vblank_pre_modeset - account for vblanks across mode sets
* @dev: DRM device
- * @crtc: CRTC in question
+ * @pipe: CRTC index
*
* Account for vblank events across mode setting events, which will likely
* reset the hardware frame counter.
@@ -1385,15 +1394,15 @@ EXPORT_SYMBOL(drm_crtc_vblank_on);
* Drivers must call drm_vblank_post_modeset() when re-enabling the same crtc
* again.
*/
-void drm_vblank_pre_modeset(struct drm_device *dev, int crtc)
+void drm_vblank_pre_modeset(struct drm_device *dev, unsigned int pipe)
{
- struct drm_vblank_crtc *vblank = &dev->vblank[crtc];
+ struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
/* vblank is not initialized (IRQ not installed ?), or has been freed */
if (!dev->num_crtcs)
return;
- if (WARN_ON(crtc >= dev->num_crtcs))
+ if (WARN_ON(pipe >= dev->num_crtcs))
return;
/*
@@ -1405,7 +1414,7 @@ void drm_vblank_pre_modeset(struct drm_device *dev, int crtc)
*/
if (!vblank->inmodeset) {
vblank->inmodeset = 0x1;
- if (drm_vblank_get(dev, crtc) == 0)
+ if (drm_vblank_get(dev, pipe) == 0)
vblank->inmodeset |= 0x2;
}
}
@@ -1414,27 +1423,30 @@ EXPORT_SYMBOL(drm_vblank_pre_modeset);
/**
* drm_vblank_post_modeset - undo drm_vblank_pre_modeset changes
* @dev: DRM device
- * @crtc: CRTC in question
+ * @pipe: CRTC index
*
* This function again drops the temporary vblank reference acquired in
* drm_vblank_pre_modeset.
*/
-void drm_vblank_post_modeset(struct drm_device *dev, int crtc)
+void drm_vblank_post_modeset(struct drm_device *dev, unsigned int pipe)
{
- struct drm_vblank_crtc *vblank = &dev->vblank[crtc];
+ struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
unsigned long irqflags;
/* vblank is not initialized (IRQ not installed ?), or has been freed */
if (!dev->num_crtcs)
return;
+ if (WARN_ON(pipe >= dev->num_crtcs))
+ return;
+
if (vblank->inmodeset) {
spin_lock_irqsave(&dev->vbl_lock, irqflags);
dev->vblank_disable_allowed = true;
spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
if (vblank->inmodeset & 0x2)
- drm_vblank_put(dev, crtc);
+ drm_vblank_put(dev, pipe);
vblank->inmodeset = 0;
}
@@ -1456,7 +1468,7 @@ int drm_modeset_ctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_modeset_ctl *modeset = data;
- unsigned int crtc;
+ unsigned int pipe;
/* If drm_vblank_init() hasn't been called yet, just no-op */
if (!dev->num_crtcs)
@@ -1466,16 +1478,16 @@ int drm_modeset_ctl(struct drm_device *dev, void *data,
if (drm_core_check_feature(dev, DRIVER_MODESET))
return 0;
- crtc = modeset->crtc;
- if (crtc >= dev->num_crtcs)
+ pipe = modeset->crtc;
+ if (pipe >= dev->num_crtcs)
return -EINVAL;
switch (modeset->cmd) {
case _DRM_PRE_MODESET:
- drm_vblank_pre_modeset(dev, crtc);
+ drm_vblank_pre_modeset(dev, pipe);
break;
case _DRM_POST_MODESET:
- drm_vblank_post_modeset(dev, crtc);
+ drm_vblank_post_modeset(dev, pipe);
break;
default:
return -EINVAL;
@@ -1484,7 +1496,7 @@ int drm_modeset_ctl(struct drm_device *dev, void *data,
return 0;
}
-static int drm_queue_vblank_event(struct drm_device *dev, int pipe,
+static int drm_queue_vblank_event(struct drm_device *dev, unsigned int pipe,
union drm_wait_vblank *vblwait,
struct drm_file *file_priv)
{
@@ -1538,7 +1550,7 @@ static int drm_queue_vblank_event(struct drm_device *dev, int pipe,
vblwait->reply.sequence = vblwait->request.sequence;
}
- DRM_DEBUG("event on vblank count %d, current %d, crtc %d\n",
+ DRM_DEBUG("event on vblank count %d, current %d, crtc %u\n",
vblwait->request.sequence, seq, pipe);
trace_drm_vblank_event_queued(current->pid, pipe,
@@ -1587,7 +1599,7 @@ int drm_wait_vblank(struct drm_device *dev, void *data,
struct drm_vblank_crtc *vblank;
union drm_wait_vblank *vblwait = data;
int ret;
- unsigned int flags, seq, crtc, high_crtc;
+ unsigned int flags, seq, pipe, high_pipe;
if (!dev->irq_enabled)
return -EINVAL;
@@ -1606,22 +1618,22 @@ int drm_wait_vblank(struct drm_device *dev, void *data,
}
flags = vblwait->request.type & _DRM_VBLANK_FLAGS_MASK;
- high_crtc = (vblwait->request.type & _DRM_VBLANK_HIGH_CRTC_MASK);
- if (high_crtc)
- crtc = high_crtc >> _DRM_VBLANK_HIGH_CRTC_SHIFT;
+ high_pipe = (vblwait->request.type & _DRM_VBLANK_HIGH_CRTC_MASK);
+ if (high_pipe)
+ pipe = high_pipe >> _DRM_VBLANK_HIGH_CRTC_SHIFT;
else
- crtc = flags & _DRM_VBLANK_SECONDARY ? 1 : 0;
- if (crtc >= dev->num_crtcs)
+ pipe = flags & _DRM_VBLANK_SECONDARY ? 1 : 0;
+ if (pipe >= dev->num_crtcs)
return -EINVAL;
- vblank = &dev->vblank[crtc];
+ vblank = &dev->vblank[pipe];
- ret = drm_vblank_get(dev, crtc);
+ ret = drm_vblank_get(dev, pipe);
if (ret) {
DRM_DEBUG("failed to acquire vblank counter, %d\n", ret);
return ret;
}
- seq = drm_vblank_count(dev, crtc);
+ seq = drm_vblank_count(dev, pipe);
switch (vblwait->request.type & _DRM_VBLANK_TYPES_MASK) {
case _DRM_VBLANK_RELATIVE:
@@ -1638,7 +1650,7 @@ int drm_wait_vblank(struct drm_device *dev, void *data,
/* must hold on to the vblank ref until the event fires
* drm_vblank_put will be called asynchronously
*/
- return drm_queue_vblank_event(dev, crtc, vblwait, file_priv);
+ return drm_queue_vblank_event(dev, pipe, vblwait, file_priv);
}
if ((flags & _DRM_VBLANK_NEXTONMISS) &&
@@ -1646,11 +1658,11 @@ int drm_wait_vblank(struct drm_device *dev, void *data,
vblwait->request.sequence = seq + 1;
}
- DRM_DEBUG("waiting on vblank count %d, crtc %d\n",
- vblwait->request.sequence, crtc);
+ DRM_DEBUG("waiting on vblank count %d, crtc %u\n",
+ vblwait->request.sequence, pipe);
vblank->last_wait = vblwait->request.sequence;
DRM_WAIT_ON(ret, vblank->queue, 3 * HZ,
- (((drm_vblank_count(dev, crtc) -
+ (((drm_vblank_count(dev, pipe) -
vblwait->request.sequence) <= (1 << 23)) ||
!vblank->enabled ||
!dev->irq_enabled));
@@ -1658,7 +1670,7 @@ int drm_wait_vblank(struct drm_device *dev, void *data,
if (ret != -EINTR) {
struct timeval now;
- vblwait->reply.sequence = drm_vblank_count_and_time(dev, crtc, &now);
+ vblwait->reply.sequence = drm_vblank_count_and_time(dev, pipe, &now);
vblwait->reply.tval_sec = now.tv_sec;
vblwait->reply.tval_usec = now.tv_usec;
@@ -1669,11 +1681,11 @@ int drm_wait_vblank(struct drm_device *dev, void *data,
}
done:
- drm_vblank_put(dev, crtc);
+ drm_vblank_put(dev, pipe);
return ret;
}
-static void drm_handle_vblank_events(struct drm_device *dev, int crtc)
+static void drm_handle_vblank_events(struct drm_device *dev, unsigned int pipe)
{
struct drm_pending_vblank_event *e, *t;
struct timeval now;
@@ -1681,10 +1693,10 @@ static void drm_handle_vblank_events(struct drm_device *dev, int crtc)
assert_spin_locked(&dev->event_lock);
- seq = drm_vblank_count_and_time(dev, crtc, &now);
+ seq = drm_vblank_count_and_time(dev, pipe, &now);
list_for_each_entry_safe(e, t, &dev->vblank_event_list, base.link) {
- if (e->pipe != crtc)
+ if (e->pipe != pipe)
continue;
if ((seq - e->event.sequence) > (1<<23))
continue;
@@ -1693,26 +1705,26 @@ static void drm_handle_vblank_events(struct drm_device *dev, int crtc)
e->event.sequence, seq);
list_del(&e->base.link);
- drm_vblank_put(dev, e->pipe);
+ drm_vblank_put(dev, pipe);
send_vblank_event(dev, e, seq, &now);
}
- trace_drm_vblank_event(crtc, seq);
+ trace_drm_vblank_event(pipe, seq);
}
/**
* drm_handle_vblank - handle a vblank event
* @dev: DRM device
- * @crtc: where this event occurred
+ * @pipe: index of CRTC where this event occurred
*
* Drivers should call this routine in their vblank interrupt handlers to
* update the vblank counter and send any signals that may be pending.
*
* This is the legacy version of drm_crtc_handle_vblank().
*/
-bool drm_handle_vblank(struct drm_device *dev, int crtc)
+bool drm_handle_vblank(struct drm_device *dev, unsigned int pipe)
{
- struct drm_vblank_crtc *vblank = &dev->vblank[crtc];
+ struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
u32 vblcount;
s64 diff_ns;
struct timeval tvblank;
@@ -1721,7 +1733,7 @@ bool drm_handle_vblank(struct drm_device *dev, int crtc)
if (WARN_ON_ONCE(!dev->num_crtcs))
return false;
- if (WARN_ON(crtc >= dev->num_crtcs))
+ if (WARN_ON(pipe >= dev->num_crtcs))
return false;
spin_lock_irqsave(&dev->event_lock, irqflags);
@@ -1745,11 +1757,11 @@ bool drm_handle_vblank(struct drm_device *dev, int crtc)
/* Get current timestamp and count. */
vblcount = vblank->count;
- drm_get_last_vbltimestamp(dev, crtc, &tvblank, DRM_CALLED_FROM_VBLIRQ);
+ drm_get_last_vbltimestamp(dev, pipe, &tvblank, DRM_CALLED_FROM_VBLIRQ);
/* Compute time difference to timestamp of last vblank */
diff_ns = timeval_to_ns(&tvblank) -
- timeval_to_ns(&vblanktimestamp(dev, crtc, vblcount));
+ timeval_to_ns(&vblanktimestamp(dev, pipe, vblcount));
/* Update vblank timestamp and count if at least
* DRM_REDUNDANT_VBLIRQ_THRESH_NS nanoseconds
@@ -1761,15 +1773,15 @@ bool drm_handle_vblank(struct drm_device *dev, int crtc)
* ignore those for accounting.
*/
if (abs64(diff_ns) > DRM_REDUNDANT_VBLIRQ_THRESH_NS)
- store_vblank(dev, crtc, 1, &tvblank);
+ store_vblank(dev, pipe, 1, &tvblank);
else
- DRM_DEBUG("crtc %d: Redundant vblirq ignored. diff_ns = %d\n",
- crtc, (int) diff_ns);
+ DRM_DEBUG("crtc %u: Redundant vblirq ignored. diff_ns = %d\n",
+ pipe, (int) diff_ns);
spin_unlock(&dev->vblank_time_lock);
wake_up(&vblank->queue);
- drm_handle_vblank_events(dev, crtc);
+ drm_handle_vblank_events(dev, pipe);
spin_unlock_irqrestore(&dev->event_lock, irqflags);
diff --git a/drivers/gpu/drm/drm_legacy.h b/drivers/gpu/drm/drm_legacy.h
index c1dc61473db5..9b731786e4db 100644
--- a/drivers/gpu/drm/drm_legacy.h
+++ b/drivers/gpu/drm/drm_legacy.h
@@ -42,7 +42,7 @@ struct drm_file;
#define DRM_KERNEL_CONTEXT 0
#define DRM_RESERVED_CONTEXTS 1
-int drm_legacy_ctxbitmap_init(struct drm_device *dev);
+void drm_legacy_ctxbitmap_init(struct drm_device *dev);
void drm_legacy_ctxbitmap_cleanup(struct drm_device *dev);
void drm_legacy_ctxbitmap_free(struct drm_device *dev, int ctx_handle);
void drm_legacy_ctxbitmap_flush(struct drm_device *dev, struct drm_file *file);
diff --git a/drivers/gpu/drm/drm_lock.c b/drivers/gpu/drm/drm_lock.c
index f861361a635e..4924d381b664 100644
--- a/drivers/gpu/drm/drm_lock.c
+++ b/drivers/gpu/drm/drm_lock.c
@@ -61,6 +61,9 @@ int drm_legacy_lock(struct drm_device *dev, void *data,
struct drm_master *master = file_priv->master;
int ret = 0;
+ if (drm_core_check_feature(dev, DRIVER_MODESET))
+ return -EINVAL;
+
++file_priv->lock_count;
if (lock->context == DRM_KERNEL_CONTEXT) {
@@ -153,6 +156,9 @@ int drm_legacy_unlock(struct drm_device *dev, void *data, struct drm_file *file_
struct drm_lock *lock = data;
struct drm_master *master = file_priv->master;
+ if (drm_core_check_feature(dev, DRIVER_MODESET))
+ return -EINVAL;
+
if (lock->context == DRM_KERNEL_CONTEXT) {
DRM_ERROR("Process %d using kernel context %d\n",
task_pid_nr(current), lock->context);
diff --git a/drivers/gpu/drm/drm_modeset_lock.c b/drivers/gpu/drm/drm_modeset_lock.c
index c0a5cd8c5262..fba321ca4344 100644
--- a/drivers/gpu/drm/drm_modeset_lock.c
+++ b/drivers/gpu/drm/drm_modeset_lock.c
@@ -55,41 +55,27 @@
* drm_modeset_acquire_fini(&ctx);
*/
-
/**
- * __drm_modeset_lock_all - internal helper to grab all modeset locks
- * @dev: DRM device
- * @trylock: trylock mode for atomic contexts
- *
- * This is a special version of drm_modeset_lock_all() which can also be used in
- * atomic contexts. Then @trylock must be set to true.
+ * drm_modeset_lock_all - take all modeset locks
+ * @dev: drm device
*
- * Returns:
- * 0 on success or negative error code on failure.
+ * This function takes all modeset locks, suitable where a more fine-grained
+ * scheme isn't (yet) implemented. Locks must be dropped with
+ * drm_modeset_unlock_all.
*/
-int __drm_modeset_lock_all(struct drm_device *dev,
- bool trylock)
+void drm_modeset_lock_all(struct drm_device *dev)
{
struct drm_mode_config *config = &dev->mode_config;
struct drm_modeset_acquire_ctx *ctx;
int ret;
- ctx = kzalloc(sizeof(*ctx),
- trylock ? GFP_ATOMIC : GFP_KERNEL);
- if (!ctx)
- return -ENOMEM;
+ ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+ if (WARN_ON(!ctx))
+ return;
- if (trylock) {
- if (!mutex_trylock(&config->mutex)) {
- ret = -EBUSY;
- goto out;
- }
- } else {
- mutex_lock(&config->mutex);
- }
+ mutex_lock(&config->mutex);
drm_modeset_acquire_init(ctx, 0);
- ctx->trylock_only = trylock;
retry:
ret = drm_modeset_lock(&config->connection_mutex, ctx);
@@ -108,7 +94,7 @@ retry:
drm_warn_on_modeset_not_all_locked(dev);
- return 0;
+ return;
fail:
if (ret == -EDEADLK) {
@@ -116,23 +102,7 @@ fail:
goto retry;
}
-out:
kfree(ctx);
- return ret;
-}
-EXPORT_SYMBOL(__drm_modeset_lock_all);
-
-/**
- * drm_modeset_lock_all - take all modeset locks
- * @dev: drm device
- *
- * This function takes all modeset locks, suitable where a more fine-grained
- * scheme isn't (yet) implemented. Locks must be dropped with
- * drm_modeset_unlock_all.
- */
-void drm_modeset_lock_all(struct drm_device *dev)
-{
- WARN_ON(__drm_modeset_lock_all(dev, false) != 0);
}
EXPORT_SYMBOL(drm_modeset_lock_all);
@@ -276,7 +246,7 @@ void drm_warn_on_modeset_not_all_locked(struct drm_device *dev)
if (oops_in_progress)
return;
- list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
+ drm_for_each_crtc(crtc, dev)
WARN_ON(!drm_modeset_is_locked(&crtc->mutex));
WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
@@ -464,18 +434,17 @@ EXPORT_SYMBOL(drm_modeset_unlock);
int drm_modeset_lock_all_crtcs(struct drm_device *dev,
struct drm_modeset_acquire_ctx *ctx)
{
- struct drm_mode_config *config = &dev->mode_config;
struct drm_crtc *crtc;
struct drm_plane *plane;
int ret = 0;
- list_for_each_entry(crtc, &config->crtc_list, head) {
+ drm_for_each_crtc(crtc, dev) {
ret = drm_modeset_lock(&crtc->mutex, ctx);
if (ret)
return ret;
}
- list_for_each_entry(plane, &config->plane_list, head) {
+ drm_for_each_plane(plane, dev) {
ret = drm_modeset_lock(&plane->mutex, ctx);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/drm_of.c b/drivers/gpu/drm/drm_of.c
index aaa130736bf8..be3884073ea4 100644
--- a/drivers/gpu/drm/drm_of.c
+++ b/drivers/gpu/drm/drm_of.c
@@ -19,7 +19,7 @@ static uint32_t drm_crtc_port_mask(struct drm_device *dev,
unsigned int index = 0;
struct drm_crtc *tmp;
- list_for_each_entry(tmp, &dev->mode_config.crtc_list, head) {
+ drm_for_each_crtc(tmp, dev) {
if (tmp->port == port)
return 1 << index;
diff --git a/drivers/gpu/drm/drm_plane_helper.c b/drivers/gpu/drm/drm_plane_helper.c
index 2f0ed11024eb..5e5a07af02c8 100644
--- a/drivers/gpu/drm/drm_plane_helper.c
+++ b/drivers/gpu/drm/drm_plane_helper.c
@@ -91,13 +91,14 @@ static int get_connectors_for_crtc(struct drm_crtc *crtc,
*/
WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
- list_for_each_entry(connector, &dev->mode_config.connector_list, head)
+ drm_for_each_connector(connector, dev) {
if (connector->encoder && connector->encoder->crtc == crtc) {
if (connector_list != NULL && count < num_connectors)
*(connector_list++) = connector;
count++;
}
+ }
return count;
}
@@ -436,7 +437,7 @@ int drm_plane_helper_commit(struct drm_plane *plane,
for (i = 0; i < 2; i++) {
if (crtc_funcs[i] && crtc_funcs[i]->atomic_begin)
- crtc_funcs[i]->atomic_begin(crtc[i]);
+ crtc_funcs[i]->atomic_begin(crtc[i], crtc[i]->state);
}
/*
@@ -451,7 +452,7 @@ int drm_plane_helper_commit(struct drm_plane *plane,
for (i = 0; i < 2; i++) {
if (crtc_funcs[i] && crtc_funcs[i]->atomic_flush)
- crtc_funcs[i]->atomic_flush(crtc[i]);
+ crtc_funcs[i]->atomic_flush(crtc[i], crtc[i]->state);
}
/*
@@ -525,10 +526,12 @@ int drm_plane_helper_update(struct drm_plane *plane, struct drm_crtc *crtc,
if (plane->funcs->atomic_duplicate_state)
plane_state = plane->funcs->atomic_duplicate_state(plane);
- else if (plane->state)
+ else {
+ if (!plane->state)
+ drm_atomic_helper_plane_reset(plane);
+
plane_state = drm_atomic_helper_plane_duplicate_state(plane);
- else
- plane_state = kzalloc(sizeof(*plane_state), GFP_KERNEL);
+ }
if (!plane_state)
return -ENOMEM;
plane_state->plane = plane;
@@ -572,10 +575,12 @@ int drm_plane_helper_disable(struct drm_plane *plane)
if (plane->funcs->atomic_duplicate_state)
plane_state = plane->funcs->atomic_duplicate_state(plane);
- else if (plane->state)
+ else {
+ if (!plane->state)
+ drm_atomic_helper_plane_reset(plane);
+
plane_state = drm_atomic_helper_plane_duplicate_state(plane);
- else
- plane_state = kzalloc(sizeof(*plane_state), GFP_KERNEL);
+ }
if (!plane_state)
return -ENOMEM;
plane_state->plane = plane;
diff --git a/drivers/gpu/drm/drm_probe_helper.c b/drivers/gpu/drm/drm_probe_helper.c
index 04203c0d2ecb..d734780b31c0 100644
--- a/drivers/gpu/drm/drm_probe_helper.c
+++ b/drivers/gpu/drm/drm_probe_helper.c
@@ -93,6 +93,27 @@ static int drm_helper_probe_add_cmdline_mode(struct drm_connector *connector)
return 1;
}
+#define DRM_OUTPUT_POLL_PERIOD (10*HZ)
+static void __drm_kms_helper_poll_enable(struct drm_device *dev)
+{
+ bool poll = false;
+ struct drm_connector *connector;
+
+ WARN_ON(!mutex_is_locked(&dev->mode_config.mutex));
+
+ if (!dev->mode_config.poll_enabled || !drm_kms_helper_poll)
+ return;
+
+ drm_for_each_connector(connector, dev) {
+ if (connector->polled & (DRM_CONNECTOR_POLL_CONNECT |
+ DRM_CONNECTOR_POLL_DISCONNECT))
+ poll = true;
+ }
+
+ if (poll)
+ schedule_delayed_work(&dev->mode_config.output_poll_work, DRM_OUTPUT_POLL_PERIOD);
+}
+
static int drm_helper_probe_single_connector_modes_merge_bits(struct drm_connector *connector,
uint32_t maxX, uint32_t maxY, bool merge_type_bits)
{
@@ -153,7 +174,7 @@ static int drm_helper_probe_single_connector_modes_merge_bits(struct drm_connect
/* Re-enable polling in case the global poll config changed. */
if (drm_kms_helper_poll != dev->mode_config.poll_running)
- drm_kms_helper_poll_enable(dev);
+ __drm_kms_helper_poll_enable(dev);
dev->mode_config.poll_running = drm_kms_helper_poll;
@@ -295,7 +316,6 @@ void drm_kms_helper_hotplug_event(struct drm_device *dev)
}
EXPORT_SYMBOL(drm_kms_helper_hotplug_event);
-#define DRM_OUTPUT_POLL_PERIOD (10*HZ)
static void output_poll_execute(struct work_struct *work)
{
struct delayed_work *delayed_work = to_delayed_work(work);
@@ -312,7 +332,7 @@ static void output_poll_execute(struct work_struct *work)
goto out;
mutex_lock(&dev->mode_config.mutex);
- list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ drm_for_each_connector(connector, dev) {
/* Ignore forced connectors. */
if (connector->force)
@@ -407,20 +427,9 @@ EXPORT_SYMBOL(drm_kms_helper_poll_disable);
*/
void drm_kms_helper_poll_enable(struct drm_device *dev)
{
- bool poll = false;
- struct drm_connector *connector;
-
- if (!dev->mode_config.poll_enabled || !drm_kms_helper_poll)
- return;
-
- list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
- if (connector->polled & (DRM_CONNECTOR_POLL_CONNECT |
- DRM_CONNECTOR_POLL_DISCONNECT))
- poll = true;
- }
-
- if (poll)
- schedule_delayed_work(&dev->mode_config.output_poll_work, DRM_OUTPUT_POLL_PERIOD);
+ mutex_lock(&dev->mode_config.mutex);
+ __drm_kms_helper_poll_enable(dev);
+ mutex_unlock(&dev->mode_config.mutex);
}
EXPORT_SYMBOL(drm_kms_helper_poll_enable);
@@ -495,7 +504,7 @@ bool drm_helper_hpd_irq_event(struct drm_device *dev)
return false;
mutex_lock(&dev->mode_config.mutex);
- list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ drm_for_each_connector(connector, dev) {
/* Only handle HPD capable connectors. */
if (!(connector->polled & DRM_CONNECTOR_POLL_HPD))
diff --git a/drivers/gpu/drm/exynos/Makefile b/drivers/gpu/drm/exynos/Makefile
index 7de0b1084fcd..02aecfed6354 100644
--- a/drivers/gpu/drm/exynos/Makefile
+++ b/drivers/gpu/drm/exynos/Makefile
@@ -3,10 +3,9 @@
# Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
ccflags-y := -Iinclude/drm -Idrivers/gpu/drm/exynos
-exynosdrm-y := exynos_drm_drv.o exynos_drm_encoder.o \
- exynos_drm_crtc.o exynos_drm_fbdev.o exynos_drm_fb.o \
- exynos_drm_buf.o exynos_drm_gem.o exynos_drm_core.o \
- exynos_drm_plane.o exynos_drm_dmabuf.o
+exynosdrm-y := exynos_drm_drv.o exynos_drm_crtc.o exynos_drm_fbdev.o \
+ exynos_drm_fb.o exynos_drm_gem.o exynos_drm_core.o \
+ exynos_drm_plane.o
exynosdrm-$(CONFIG_DRM_EXYNOS_IOMMU) += exynos_drm_iommu.o
exynosdrm-$(CONFIG_DRM_EXYNOS_FIMD) += exynos_drm_fimd.o
diff --git a/drivers/gpu/drm/exynos/exynos5433_drm_decon.c b/drivers/gpu/drm/exynos/exynos5433_drm_decon.c
index 8b1225f245fc..484e312e0a22 100644
--- a/drivers/gpu/drm/exynos/exynos5433_drm_decon.c
+++ b/drivers/gpu/drm/exynos/exynos5433_drm_decon.c
@@ -152,15 +152,15 @@ static void decon_commit(struct exynos_drm_crtc *crtc)
#define OFFSIZE(x) (((x) & 0x3fff) << 14)
#define PAGEWIDTH(x) ((x) & 0x3fff)
-static void decon_win_set_pixfmt(struct decon_context *ctx, unsigned int win)
+static void decon_win_set_pixfmt(struct decon_context *ctx, unsigned int win,
+ struct drm_framebuffer *fb)
{
- struct exynos_drm_plane *plane = &ctx->planes[win];
unsigned long val;
val = readl(ctx->addr + DECON_WINCONx(win));
val &= ~WINCONx_BPPMODE_MASK;
- switch (plane->pixel_format) {
+ switch (fb->pixel_format) {
case DRM_FORMAT_XRGB1555:
val |= WINCONx_BPPMODE_16BPP_I1555;
val |= WINCONx_HAWSWP_F;
@@ -186,7 +186,7 @@ static void decon_win_set_pixfmt(struct decon_context *ctx, unsigned int win)
return;
}
- DRM_DEBUG_KMS("bpp = %u\n", plane->bpp);
+ DRM_DEBUG_KMS("bpp = %u\n", fb->bits_per_pixel);
/*
* In case of exynos, setting dma-burst to 16Word causes permanent
@@ -196,7 +196,7 @@ static void decon_win_set_pixfmt(struct decon_context *ctx, unsigned int win)
* movement causes unstable DMA which results into iommu crash/tear.
*/
- if (plane->fb_width < MIN_FB_WIDTH_FOR_16WORD_BURST) {
+ if (fb->width < MIN_FB_WIDTH_FOR_16WORD_BURST) {
val &= ~WINCONx_BURSTLEN_MASK;
val |= WINCONx_BURSTLEN_8WORD;
}
@@ -219,17 +219,16 @@ static void decon_shadow_protect_win(struct decon_context *ctx, int win,
writel(val, ctx->addr + DECON_SHADOWCON);
}
-static void decon_win_commit(struct exynos_drm_crtc *crtc, unsigned int win)
+static void decon_update_plane(struct exynos_drm_crtc *crtc,
+ struct exynos_drm_plane *plane)
{
struct decon_context *ctx = crtc->ctx;
- struct exynos_drm_plane *plane;
+ struct drm_plane_state *state = plane->base.state;
+ unsigned int win = plane->zpos;
+ unsigned int bpp = state->fb->bits_per_pixel >> 3;
+ unsigned int pitch = state->fb->pitches[0];
u32 val;
- if (win < 0 || win >= WINDOWS_NR)
- return;
-
- plane = &ctx->planes[win];
-
if (ctx->suspended)
return;
@@ -238,8 +237,8 @@ static void decon_win_commit(struct exynos_drm_crtc *crtc, unsigned int win)
val = COORDINATE_X(plane->crtc_x) | COORDINATE_Y(plane->crtc_y);
writel(val, ctx->addr + DECON_VIDOSDxA(win));
- val = COORDINATE_X(plane->crtc_x + plane->crtc_width - 1) |
- COORDINATE_Y(plane->crtc_y + plane->crtc_height - 1);
+ val = COORDINATE_X(plane->crtc_x + plane->crtc_w - 1) |
+ COORDINATE_Y(plane->crtc_y + plane->crtc_h - 1);
writel(val, ctx->addr + DECON_VIDOSDxB(win));
val = VIDOSD_Wx_ALPHA_R_F(0x0) | VIDOSD_Wx_ALPHA_G_F(0x0) |
@@ -252,14 +251,14 @@ static void decon_win_commit(struct exynos_drm_crtc *crtc, unsigned int win)
writel(plane->dma_addr[0], ctx->addr + DECON_VIDW0xADD0B0(win));
- val = plane->dma_addr[0] + plane->pitch * plane->crtc_height;
+ val = plane->dma_addr[0] + pitch * plane->crtc_h;
writel(val, ctx->addr + DECON_VIDW0xADD1B0(win));
- val = OFFSIZE(plane->pitch - plane->crtc_width * (plane->bpp >> 3))
- | PAGEWIDTH(plane->crtc_width * (plane->bpp >> 3));
+ val = OFFSIZE(pitch - plane->crtc_w * bpp)
+ | PAGEWIDTH(plane->crtc_w * bpp);
writel(val, ctx->addr + DECON_VIDW0xADD2(win));
- decon_win_set_pixfmt(ctx, win);
+ decon_win_set_pixfmt(ctx, win, state->fb);
/* window enable */
val = readl(ctx->addr + DECON_WINCONx(win));
@@ -277,17 +276,13 @@ static void decon_win_commit(struct exynos_drm_crtc *crtc, unsigned int win)
atomic_set(&ctx->win_updated, 1);
}
-static void decon_win_disable(struct exynos_drm_crtc *crtc, unsigned int win)
+static void decon_disable_plane(struct exynos_drm_crtc *crtc,
+ struct exynos_drm_plane *plane)
{
struct decon_context *ctx = crtc->ctx;
- struct exynos_drm_plane *plane;
+ unsigned int win = plane->zpos;
u32 val;
- if (win < 0 || win >= WINDOWS_NR)
- return;
-
- plane = &ctx->planes[win];
-
if (ctx->suspended)
return;
@@ -378,7 +373,7 @@ static void decon_disable(struct exynos_drm_crtc *crtc)
* a destroyed buffer later.
*/
for (i = 0; i < WINDOWS_NR; i++)
- decon_win_disable(crtc, i);
+ decon_disable_plane(crtc, &ctx->planes[i]);
decon_swreset(ctx);
@@ -407,7 +402,7 @@ void decon_te_irq_handler(struct exynos_drm_crtc *crtc)
writel(val, ctx->addr + DECON_TRIGCON);
}
- drm_handle_vblank(ctx->drm_dev, ctx->pipe);
+ drm_crtc_handle_vblank(&ctx->crtc->base);
}
static void decon_clear_channels(struct exynos_drm_crtc *crtc)
@@ -460,10 +455,9 @@ static struct exynos_drm_crtc_ops decon_crtc_ops = {
.enable_vblank = decon_enable_vblank,
.disable_vblank = decon_disable_vblank,
.commit = decon_commit,
- .win_commit = decon_win_commit,
- .win_disable = decon_win_disable,
+ .update_plane = decon_update_plane,
+ .disable_plane = decon_disable_plane,
.te_handler = decon_te_irq_handler,
- .clear_channels = decon_clear_channels,
};
static int decon_bind(struct device *dev, struct device *master, void *data)
@@ -497,7 +491,9 @@ static int decon_bind(struct device *dev, struct device *master, void *data)
goto err;
}
- ret = drm_iommu_attach_device_if_possible(ctx->crtc, drm_dev, dev);
+ decon_clear_channels(ctx->crtc);
+
+ ret = drm_iommu_attach_device(drm_dev, dev);
if (ret)
goto err;
@@ -514,8 +510,7 @@ static void decon_unbind(struct device *dev, struct device *master, void *data)
decon_disable(ctx->crtc);
/* detach this sub driver from iommu mapping if supported. */
- if (is_drm_iommu_supported(ctx->drm_dev))
- drm_iommu_detach_device(ctx->drm_dev, ctx->dev);
+ drm_iommu_detach_device(ctx->drm_dev, ctx->dev);
}
static const struct component_ops decon_component_ops = {
@@ -533,7 +528,7 @@ static irqreturn_t decon_vsync_irq_handler(int irq, void *dev_id)
val = readl(ctx->addr + DECON_VIDINTCON1);
if (val & VIDINTCON1_INTFRMPEND) {
- drm_handle_vblank(ctx->drm_dev, ctx->pipe);
+ drm_crtc_handle_vblank(&ctx->crtc->base);
/* clear */
writel(VIDINTCON1_INTFRMPEND, ctx->addr + DECON_VIDINTCON1);
@@ -553,7 +548,7 @@ static irqreturn_t decon_lcd_sys_irq_handler(int irq, void *dev_id)
val = readl(ctx->addr + DECON_VIDINTCON1);
if (val & VIDINTCON1_INTFRMDONEPEND) {
- exynos_drm_crtc_finish_pageflip(ctx->drm_dev, ctx->pipe);
+ exynos_drm_crtc_finish_pageflip(ctx->crtc);
/* clear */
writel(VIDINTCON1_INTFRMDONEPEND,
diff --git a/drivers/gpu/drm/exynos/exynos7_drm_decon.c b/drivers/gpu/drm/exynos/exynos7_drm_decon.c
index 362532afd1a5..07926547c94f 100644
--- a/drivers/gpu/drm/exynos/exynos7_drm_decon.c
+++ b/drivers/gpu/drm/exynos/exynos7_drm_decon.c
@@ -61,7 +61,7 @@ struct decon_context {
atomic_t wait_vsync_event;
struct exynos_drm_panel_info panel;
- struct exynos_drm_display *display;
+ struct drm_encoder *encoder;
};
static const struct of_device_id decon_driver_dt_match[] = {
@@ -126,7 +126,9 @@ static int decon_ctx_initialize(struct decon_context *ctx,
ctx->drm_dev = drm_dev;
ctx->pipe = priv->pipe++;
- ret = drm_iommu_attach_device_if_possible(ctx->crtc, drm_dev, ctx->dev);
+ decon_clear_channels(ctx->crtc);
+
+ ret = drm_iommu_attach_device(drm_dev, ctx->dev);
if (ret)
priv->pipe--;
@@ -136,8 +138,7 @@ static int decon_ctx_initialize(struct decon_context *ctx,
static void decon_ctx_remove(struct decon_context *ctx)
{
/* detach this sub driver from iommu mapping if supported. */
- if (is_drm_iommu_supported(ctx->drm_dev))
- drm_iommu_detach_device(ctx->drm_dev, ctx->dev);
+ drm_iommu_detach_device(ctx->drm_dev, ctx->dev);
}
static u32 decon_calc_clkdiv(struct decon_context *ctx,
@@ -271,16 +272,16 @@ static void decon_disable_vblank(struct exynos_drm_crtc *crtc)
}
}
-static void decon_win_set_pixfmt(struct decon_context *ctx, unsigned int win)
+static void decon_win_set_pixfmt(struct decon_context *ctx, unsigned int win,
+ struct drm_framebuffer *fb)
{
- struct exynos_drm_plane *plane = &ctx->planes[win];
unsigned long val;
int padding;
val = readl(ctx->regs + WINCON(win));
val &= ~WINCONx_BPPMODE_MASK;
- switch (plane->pixel_format) {
+ switch (fb->pixel_format) {
case DRM_FORMAT_RGB565:
val |= WINCONx_BPPMODE_16BPP_565;
val |= WINCONx_BURSTLEN_16WORD;
@@ -329,7 +330,7 @@ static void decon_win_set_pixfmt(struct decon_context *ctx, unsigned int win)
break;
}
- DRM_DEBUG_KMS("bpp = %d\n", plane->bpp);
+ DRM_DEBUG_KMS("bpp = %d\n", fb->bits_per_pixel);
/*
* In case of exynos, setting dma-burst to 16Word causes permanent
@@ -339,8 +340,8 @@ static void decon_win_set_pixfmt(struct decon_context *ctx, unsigned int win)
* movement causes unstable DMA which results into iommu crash/tear.
*/
- padding = (plane->pitch / (plane->bpp >> 3)) - plane->fb_width;
- if (plane->fb_width + padding < MIN_FB_WIDTH_FOR_16WORD_BURST) {
+ padding = (fb->pitches[0] / (fb->bits_per_pixel >> 3)) - fb->width;
+ if (fb->width + padding < MIN_FB_WIDTH_FOR_16WORD_BURST) {
val &= ~WINCONx_BURSTLEN_MASK;
val |= WINCONx_BURSTLEN_8WORD;
}
@@ -382,23 +383,19 @@ static void decon_shadow_protect_win(struct decon_context *ctx,
writel(val, ctx->regs + SHADOWCON);
}
-static void decon_win_commit(struct exynos_drm_crtc *crtc, unsigned int win)
+static void decon_update_plane(struct exynos_drm_crtc *crtc,
+ struct exynos_drm_plane *plane)
{
struct decon_context *ctx = crtc->ctx;
struct drm_display_mode *mode = &crtc->base.state->adjusted_mode;
- struct exynos_drm_plane *plane;
+ struct drm_plane_state *state = plane->base.state;
int padding;
unsigned long val, alpha;
unsigned int last_x;
unsigned int last_y;
-
- if (ctx->suspended)
- return;
-
- if (win < 0 || win >= WINDOWS_NR)
- return;
-
- plane = &ctx->planes[win];
+ unsigned int win = plane->zpos;
+ unsigned int bpp = state->fb->bits_per_pixel >> 3;
+ unsigned int pitch = state->fb->pitches[0];
if (ctx->suspended)
return;
@@ -420,11 +417,11 @@ static void decon_win_commit(struct exynos_drm_crtc *crtc, unsigned int win)
val = (unsigned long)plane->dma_addr[0];
writel(val, ctx->regs + VIDW_BUF_START(win));
- padding = (plane->pitch / (plane->bpp >> 3)) - plane->fb_width;
+ padding = (pitch / bpp) - state->fb->width;
/* buffer size */
- writel(plane->fb_width + padding, ctx->regs + VIDW_WHOLE_X(win));
- writel(plane->fb_height, ctx->regs + VIDW_WHOLE_Y(win));
+ writel(state->fb->width + padding, ctx->regs + VIDW_WHOLE_X(win));
+ writel(state->fb->height, ctx->regs + VIDW_WHOLE_Y(win));
/* offset from the start of the buffer to read */
writel(plane->src_x, ctx->regs + VIDW_OFFSET_X(win));
@@ -433,25 +430,25 @@ static void decon_win_commit(struct exynos_drm_crtc *crtc, unsigned int win)
DRM_DEBUG_KMS("start addr = 0x%lx\n",
(unsigned long)val);
DRM_DEBUG_KMS("ovl_width = %d, ovl_height = %d\n",
- plane->crtc_width, plane->crtc_height);
+ plane->crtc_w, plane->crtc_h);
/*
* OSD position.
* In case the window layout goes of LCD layout, DECON fails.
*/
- if ((plane->crtc_x + plane->crtc_width) > mode->hdisplay)
- plane->crtc_x = mode->hdisplay - plane->crtc_width;
- if ((plane->crtc_y + plane->crtc_height) > mode->vdisplay)
- plane->crtc_y = mode->vdisplay - plane->crtc_height;
+ if ((plane->crtc_x + plane->crtc_w) > mode->hdisplay)
+ plane->crtc_x = mode->hdisplay - plane->crtc_w;
+ if ((plane->crtc_y + plane->crtc_h) > mode->vdisplay)
+ plane->crtc_y = mode->vdisplay - plane->crtc_h;
val = VIDOSDxA_TOPLEFT_X(plane->crtc_x) |
VIDOSDxA_TOPLEFT_Y(plane->crtc_y);
writel(val, ctx->regs + VIDOSD_A(win));
- last_x = plane->crtc_x + plane->crtc_width;
+ last_x = plane->crtc_x + plane->crtc_w;
if (last_x)
last_x--;
- last_y = plane->crtc_y + plane->crtc_height;
+ last_y = plane->crtc_y + plane->crtc_h;
if (last_y)
last_y--;
@@ -475,7 +472,7 @@ static void decon_win_commit(struct exynos_drm_crtc *crtc, unsigned int win)
writel(alpha, ctx->regs + VIDOSD_D(win));
- decon_win_set_pixfmt(ctx, win);
+ decon_win_set_pixfmt(ctx, win, state->fb);
/* hardware window 0 doesn't support color key. */
if (win != 0)
@@ -495,17 +492,13 @@ static void decon_win_commit(struct exynos_drm_crtc *crtc, unsigned int win)
writel(val, ctx->regs + DECON_UPDATE);
}
-static void decon_win_disable(struct exynos_drm_crtc *crtc, unsigned int win)
+static void decon_disable_plane(struct exynos_drm_crtc *crtc,
+ struct exynos_drm_plane *plane)
{
struct decon_context *ctx = crtc->ctx;
- struct exynos_drm_plane *plane;
+ unsigned int win = plane->zpos;
u32 val;
- if (win < 0 || win >= WINDOWS_NR)
- return;
-
- plane = &ctx->planes[win];
-
if (ctx->suspended)
return;
@@ -601,7 +594,7 @@ static void decon_disable(struct exynos_drm_crtc *crtc)
* a destroyed buffer later.
*/
for (i = 0; i < WINDOWS_NR; i++)
- decon_win_disable(crtc, i);
+ decon_disable_plane(crtc, &ctx->planes[i]);
clk_disable_unprepare(ctx->vclk);
clk_disable_unprepare(ctx->eclk);
@@ -621,9 +614,8 @@ static const struct exynos_drm_crtc_ops decon_crtc_ops = {
.enable_vblank = decon_enable_vblank,
.disable_vblank = decon_disable_vblank,
.wait_for_vblank = decon_wait_for_vblank,
- .win_commit = decon_win_commit,
- .win_disable = decon_win_disable,
- .clear_channels = decon_clear_channels,
+ .update_plane = decon_update_plane,
+ .disable_plane = decon_disable_plane,
};
@@ -643,8 +635,8 @@ static irqreturn_t decon_irq_handler(int irq, void *dev_id)
goto out;
if (!ctx->i80_if) {
- drm_handle_vblank(ctx->drm_dev, ctx->pipe);
- exynos_drm_crtc_finish_pageflip(ctx->drm_dev, ctx->pipe);
+ drm_crtc_handle_vblank(&ctx->crtc->base);
+ exynos_drm_crtc_finish_pageflip(ctx->crtc);
/* set wait vsync event to zero and wake up queue. */
if (atomic_read(&ctx->wait_vsync_event)) {
@@ -689,8 +681,8 @@ static int decon_bind(struct device *dev, struct device *master, void *data)
return PTR_ERR(ctx->crtc);
}
- if (ctx->display)
- exynos_drm_create_enc_conn(drm_dev, ctx->display);
+ if (ctx->encoder)
+ exynos_dpi_bind(drm_dev, ctx->encoder);
return 0;
@@ -703,8 +695,8 @@ static void decon_unbind(struct device *dev, struct device *master,
decon_disable(ctx->crtc);
- if (ctx->display)
- exynos_dpi_remove(ctx->display);
+ if (ctx->encoder)
+ exynos_dpi_remove(ctx->encoder);
decon_ctx_remove(ctx);
}
@@ -789,9 +781,9 @@ static int decon_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, ctx);
- ctx->display = exynos_dpi_probe(dev);
- if (IS_ERR(ctx->display)) {
- ret = PTR_ERR(ctx->display);
+ ctx->encoder = exynos_dpi_probe(dev);
+ if (IS_ERR(ctx->encoder)) {
+ ret = PTR_ERR(ctx->encoder);
goto err_iounmap;
}
diff --git a/drivers/gpu/drm/exynos/exynos_dp_core.c b/drivers/gpu/drm/exynos/exynos_dp_core.c
index 172b8002a2c8..d66ade0efac8 100644
--- a/drivers/gpu/drm/exynos/exynos_dp_core.c
+++ b/drivers/gpu/drm/exynos/exynos_dp_core.c
@@ -32,19 +32,20 @@
#include <drm/drm_panel.h>
#include "exynos_dp_core.h"
+#include "exynos_drm_crtc.h"
#define ctx_from_connector(c) container_of(c, struct exynos_dp_device, \
connector)
static inline struct exynos_drm_crtc *dp_to_crtc(struct exynos_dp_device *dp)
{
- return to_exynos_crtc(dp->encoder->crtc);
+ return to_exynos_crtc(dp->encoder.crtc);
}
-static inline struct exynos_dp_device *
-display_to_dp(struct exynos_drm_display *d)
+static inline struct exynos_dp_device *encoder_to_dp(
+ struct drm_encoder *e)
{
- return container_of(d, struct exynos_dp_device, display);
+ return container_of(e, struct exynos_dp_device, encoder);
}
struct bridge_init {
@@ -795,9 +796,6 @@ static int exynos_dp_config_video(struct exynos_dp_device *dp)
/* Configure video slave mode */
exynos_dp_enable_video_master(dp, 0);
- /* Enable video */
- exynos_dp_start_video(dp);
-
timeout_loop = 0;
for (;;) {
@@ -891,9 +889,9 @@ static void exynos_dp_hotplug(struct work_struct *work)
drm_helper_hpd_irq_event(dp->drm_dev);
}
-static void exynos_dp_commit(struct exynos_drm_display *display)
+static void exynos_dp_commit(struct drm_encoder *encoder)
{
- struct exynos_dp_device *dp = display_to_dp(display);
+ struct exynos_dp_device *dp = encoder_to_dp(encoder);
int ret;
/* Keep the panel disabled while we configure video */
@@ -938,6 +936,9 @@ static void exynos_dp_commit(struct exynos_drm_display *display)
if (drm_panel_enable(dp->panel))
DRM_ERROR("failed to enable the panel\n");
}
+
+ /* Enable video */
+ exynos_dp_start_video(dp);
}
static enum drm_connector_status exynos_dp_detect(
@@ -994,7 +995,7 @@ static struct drm_encoder *exynos_dp_best_encoder(
{
struct exynos_dp_device *dp = ctx_from_connector(connector);
- return dp->encoder;
+ return &dp->encoder;
}
static struct drm_connector_helper_funcs exynos_dp_connector_helper_funcs = {
@@ -1019,15 +1020,12 @@ static int exynos_drm_attach_lcd_bridge(struct exynos_dp_device *dp,
return 0;
}
-static int exynos_dp_create_connector(struct exynos_drm_display *display,
- struct drm_encoder *encoder)
+static int exynos_dp_create_connector(struct drm_encoder *encoder)
{
- struct exynos_dp_device *dp = display_to_dp(display);
+ struct exynos_dp_device *dp = encoder_to_dp(encoder);
struct drm_connector *connector = &dp->connector;
int ret;
- dp->encoder = encoder;
-
/* Pre-empt DP connector creation if there's a bridge */
if (dp->bridge) {
ret = exynos_drm_attach_lcd_bridge(dp, encoder);
@@ -1054,20 +1052,22 @@ static int exynos_dp_create_connector(struct exynos_drm_display *display,
return ret;
}
-static void exynos_dp_phy_init(struct exynos_dp_device *dp)
+static bool exynos_dp_mode_fixup(struct drm_encoder *encoder,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
{
- if (dp->phy)
- phy_power_on(dp->phy);
+ return true;
}
-static void exynos_dp_phy_exit(struct exynos_dp_device *dp)
+static void exynos_dp_mode_set(struct drm_encoder *encoder,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
{
- if (dp->phy)
- phy_power_off(dp->phy);
}
-static void exynos_dp_poweron(struct exynos_dp_device *dp)
+static void exynos_dp_enable(struct drm_encoder *encoder)
{
+ struct exynos_dp_device *dp = encoder_to_dp(encoder);
struct exynos_drm_crtc *crtc = dp_to_crtc(dp);
if (dp->dpms_mode == DRM_MODE_DPMS_ON)
@@ -1084,14 +1084,17 @@ static void exynos_dp_poweron(struct exynos_dp_device *dp)
crtc->ops->clock_enable(dp_to_crtc(dp), true);
clk_prepare_enable(dp->clock);
- exynos_dp_phy_init(dp);
+ phy_power_on(dp->phy);
exynos_dp_init_dp(dp);
enable_irq(dp->irq);
- exynos_dp_commit(&dp->display);
+ exynos_dp_commit(&dp->encoder);
+
+ dp->dpms_mode = DRM_MODE_DPMS_ON;
}
-static void exynos_dp_poweroff(struct exynos_dp_device *dp)
+static void exynos_dp_disable(struct drm_encoder *encoder)
{
+ struct exynos_dp_device *dp = encoder_to_dp(encoder);
struct exynos_drm_crtc *crtc = dp_to_crtc(dp);
if (dp->dpms_mode != DRM_MODE_DPMS_ON)
@@ -1106,7 +1109,7 @@ static void exynos_dp_poweroff(struct exynos_dp_device *dp)
disable_irq(dp->irq);
flush_work(&dp->hotplug_work);
- exynos_dp_phy_exit(dp);
+ phy_power_off(dp->phy);
clk_disable_unprepare(dp->clock);
if (crtc->ops->clock_enable)
@@ -1116,31 +1119,19 @@ static void exynos_dp_poweroff(struct exynos_dp_device *dp)
if (drm_panel_unprepare(dp->panel))
DRM_ERROR("failed to turnoff the panel\n");
}
-}
-
-static void exynos_dp_dpms(struct exynos_drm_display *display, int mode)
-{
- struct exynos_dp_device *dp = display_to_dp(display);
- switch (mode) {
- case DRM_MODE_DPMS_ON:
- exynos_dp_poweron(dp);
- break;
- case DRM_MODE_DPMS_STANDBY:
- case DRM_MODE_DPMS_SUSPEND:
- case DRM_MODE_DPMS_OFF:
- exynos_dp_poweroff(dp);
- break;
- default:
- break;
- }
- dp->dpms_mode = mode;
+ dp->dpms_mode = DRM_MODE_DPMS_OFF;
}
-static struct exynos_drm_display_ops exynos_dp_display_ops = {
- .create_connector = exynos_dp_create_connector,
- .dpms = exynos_dp_dpms,
- .commit = exynos_dp_commit,
+static struct drm_encoder_helper_funcs exynos_dp_encoder_helper_funcs = {
+ .mode_fixup = exynos_dp_mode_fixup,
+ .mode_set = exynos_dp_mode_set,
+ .enable = exynos_dp_enable,
+ .disable = exynos_dp_disable,
+};
+
+static struct drm_encoder_funcs exynos_dp_encoder_funcs = {
+ .destroy = drm_encoder_cleanup,
};
static struct video_info *exynos_dp_dt_parse_pdata(struct device *dev)
@@ -1219,9 +1210,10 @@ static int exynos_dp_bind(struct device *dev, struct device *master, void *data)
struct exynos_dp_device *dp = dev_get_drvdata(dev);
struct platform_device *pdev = to_platform_device(dev);
struct drm_device *drm_dev = data;
+ struct drm_encoder *encoder = &dp->encoder;
struct resource *res;
unsigned int irq_flags;
- int ret = 0;
+ int pipe, ret = 0;
dp->dev = &pdev->dev;
dp->dpms_mode = DRM_MODE_DPMS_OFF;
@@ -1297,7 +1289,7 @@ static int exynos_dp_bind(struct device *dev, struct device *master, void *data)
INIT_WORK(&dp->hotplug_work, exynos_dp_hotplug);
- exynos_dp_phy_init(dp);
+ phy_power_on(dp->phy);
exynos_dp_init_dp(dp);
@@ -1311,7 +1303,28 @@ static int exynos_dp_bind(struct device *dev, struct device *master, void *data)
dp->drm_dev = drm_dev;
- return exynos_drm_create_enc_conn(drm_dev, &dp->display);
+ pipe = exynos_drm_crtc_get_pipe_from_type(drm_dev,
+ EXYNOS_DISPLAY_TYPE_LCD);
+ if (pipe < 0)
+ return pipe;
+
+ encoder->possible_crtcs = 1 << pipe;
+
+ DRM_DEBUG_KMS("possible_crtcs = 0x%x\n", encoder->possible_crtcs);
+
+ drm_encoder_init(drm_dev, encoder, &exynos_dp_encoder_funcs,
+ DRM_MODE_ENCODER_TMDS);
+
+ drm_encoder_helper_add(encoder, &exynos_dp_encoder_helper_funcs);
+
+ ret = exynos_dp_create_connector(encoder);
+ if (ret) {
+ DRM_ERROR("failed to create connector ret = %d\n", ret);
+ drm_encoder_cleanup(encoder);
+ return ret;
+ }
+
+ return 0;
}
static void exynos_dp_unbind(struct device *dev, struct device *master,
@@ -1319,7 +1332,7 @@ static void exynos_dp_unbind(struct device *dev, struct device *master,
{
struct exynos_dp_device *dp = dev_get_drvdata(dev);
- exynos_dp_dpms(&dp->display, DRM_MODE_DPMS_OFF);
+ exynos_dp_disable(&dp->encoder);
}
static const struct component_ops exynos_dp_ops = {
@@ -1338,8 +1351,6 @@ static int exynos_dp_probe(struct platform_device *pdev)
if (!dp)
return -ENOMEM;
- dp->display.type = EXYNOS_DISPLAY_TYPE_LCD;
- dp->display.ops = &exynos_dp_display_ops;
platform_set_drvdata(pdev, dp);
panel_node = of_parse_phandle(dev->of_node, "panel", 0);
@@ -1377,7 +1388,7 @@ static int exynos_dp_suspend(struct device *dev)
{
struct exynos_dp_device *dp = dev_get_drvdata(dev);
- exynos_dp_dpms(&dp->display, DRM_MODE_DPMS_OFF);
+ exynos_dp_disable(&dp->encoder);
return 0;
}
@@ -1385,7 +1396,7 @@ static int exynos_dp_resume(struct device *dev)
{
struct exynos_dp_device *dp = dev_get_drvdata(dev);
- exynos_dp_dpms(&dp->display, DRM_MODE_DPMS_ON);
+ exynos_dp_enable(&dp->encoder);
return 0;
}
#endif
diff --git a/drivers/gpu/drm/exynos/exynos_dp_core.h b/drivers/gpu/drm/exynos/exynos_dp_core.h
index a4e799679669..e413b6f7b0e7 100644
--- a/drivers/gpu/drm/exynos/exynos_dp_core.h
+++ b/drivers/gpu/drm/exynos/exynos_dp_core.h
@@ -147,11 +147,10 @@ struct link_train {
};
struct exynos_dp_device {
- struct exynos_drm_display display;
+ struct drm_encoder encoder;
struct device *dev;
struct drm_device *drm_dev;
struct drm_connector connector;
- struct drm_encoder *encoder;
struct drm_panel *panel;
struct drm_bridge *bridge;
struct clk *clock;
diff --git a/drivers/gpu/drm/exynos/exynos_drm_buf.c b/drivers/gpu/drm/exynos/exynos_drm_buf.c
deleted file mode 100644
index 24994ba10e28..000000000000
--- a/drivers/gpu/drm/exynos/exynos_drm_buf.c
+++ /dev/null
@@ -1,186 +0,0 @@
-/* exynos_drm_buf.c
- *
- * Copyright (c) 2011 Samsung Electronics Co., Ltd.
- * Author: Inki Dae <inki.dae@samsung.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- */
-
-#include <drm/drmP.h>
-#include <drm/exynos_drm.h>
-
-#include "exynos_drm_drv.h"
-#include "exynos_drm_gem.h"
-#include "exynos_drm_buf.h"
-#include "exynos_drm_iommu.h"
-
-static int lowlevel_buffer_allocate(struct drm_device *dev,
- unsigned int flags, struct exynos_drm_gem_buf *buf)
-{
- int ret = 0;
- enum dma_attr attr;
- unsigned int nr_pages;
-
- if (buf->dma_addr) {
- DRM_DEBUG_KMS("already allocated.\n");
- return 0;
- }
-
- init_dma_attrs(&buf->dma_attrs);
-
- /*
- * if EXYNOS_BO_CONTIG, fully physically contiguous memory
- * region will be allocated else physically contiguous
- * as possible.
- */
- if (!(flags & EXYNOS_BO_NONCONTIG))
- dma_set_attr(DMA_ATTR_FORCE_CONTIGUOUS, &buf->dma_attrs);
-
- /*
- * if EXYNOS_BO_WC or EXYNOS_BO_NONCACHABLE, writecombine mapping
- * else cachable mapping.
- */
- if (flags & EXYNOS_BO_WC || !(flags & EXYNOS_BO_CACHABLE))
- attr = DMA_ATTR_WRITE_COMBINE;
- else
- attr = DMA_ATTR_NON_CONSISTENT;
-
- dma_set_attr(attr, &buf->dma_attrs);
- dma_set_attr(DMA_ATTR_NO_KERNEL_MAPPING, &buf->dma_attrs);
-
- nr_pages = buf->size >> PAGE_SHIFT;
-
- if (!is_drm_iommu_supported(dev)) {
- dma_addr_t start_addr;
- unsigned int i = 0;
-
- buf->pages = drm_calloc_large(nr_pages, sizeof(struct page *));
- if (!buf->pages) {
- DRM_ERROR("failed to allocate pages.\n");
- return -ENOMEM;
- }
-
- buf->cookie = dma_alloc_attrs(dev->dev,
- buf->size,
- &buf->dma_addr, GFP_KERNEL,
- &buf->dma_attrs);
- if (!buf->cookie) {
- DRM_ERROR("failed to allocate buffer.\n");
- ret = -ENOMEM;
- goto err_free;
- }
-
- start_addr = buf->dma_addr;
- while (i < nr_pages) {
- buf->pages[i] = phys_to_page(start_addr);
- start_addr += PAGE_SIZE;
- i++;
- }
- } else {
-
- buf->pages = dma_alloc_attrs(dev->dev, buf->size,
- &buf->dma_addr, GFP_KERNEL,
- &buf->dma_attrs);
- if (!buf->pages) {
- DRM_ERROR("failed to allocate buffer.\n");
- return -ENOMEM;
- }
- }
-
- buf->sgt = drm_prime_pages_to_sg(buf->pages, nr_pages);
- if (IS_ERR(buf->sgt)) {
- DRM_ERROR("failed to get sg table.\n");
- ret = PTR_ERR(buf->sgt);
- goto err_free_attrs;
- }
-
- DRM_DEBUG_KMS("dma_addr(0x%lx), size(0x%lx)\n",
- (unsigned long)buf->dma_addr,
- buf->size);
-
- return ret;
-
-err_free_attrs:
- dma_free_attrs(dev->dev, buf->size, buf->pages,
- (dma_addr_t)buf->dma_addr, &buf->dma_attrs);
- buf->dma_addr = (dma_addr_t)NULL;
-err_free:
- if (!is_drm_iommu_supported(dev))
- drm_free_large(buf->pages);
-
- return ret;
-}
-
-static void lowlevel_buffer_deallocate(struct drm_device *dev,
- unsigned int flags, struct exynos_drm_gem_buf *buf)
-{
- if (!buf->dma_addr) {
- DRM_DEBUG_KMS("dma_addr is invalid.\n");
- return;
- }
-
- DRM_DEBUG_KMS("dma_addr(0x%lx), size(0x%lx)\n",
- (unsigned long)buf->dma_addr,
- buf->size);
-
- sg_free_table(buf->sgt);
-
- kfree(buf->sgt);
- buf->sgt = NULL;
-
- if (!is_drm_iommu_supported(dev)) {
- dma_free_attrs(dev->dev, buf->size, buf->cookie,
- (dma_addr_t)buf->dma_addr, &buf->dma_attrs);
- drm_free_large(buf->pages);
- } else
- dma_free_attrs(dev->dev, buf->size, buf->pages,
- (dma_addr_t)buf->dma_addr, &buf->dma_attrs);
-
- buf->dma_addr = (dma_addr_t)NULL;
-}
-
-struct exynos_drm_gem_buf *exynos_drm_init_buf(struct drm_device *dev,
- unsigned int size)
-{
- struct exynos_drm_gem_buf *buffer;
-
- DRM_DEBUG_KMS("desired size = 0x%x\n", size);
-
- buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
- if (!buffer)
- return NULL;
-
- buffer->size = size;
- return buffer;
-}
-
-void exynos_drm_fini_buf(struct drm_device *dev,
- struct exynos_drm_gem_buf *buffer)
-{
- kfree(buffer);
- buffer = NULL;
-}
-
-int exynos_drm_alloc_buf(struct drm_device *dev,
- struct exynos_drm_gem_buf *buf, unsigned int flags)
-{
-
- /*
- * allocate memory region and set the memory information
- * to vaddr and dma_addr of a buffer object.
- */
- if (lowlevel_buffer_allocate(dev, flags, buf) < 0)
- return -ENOMEM;
-
- return 0;
-}
-
-void exynos_drm_free_buf(struct drm_device *dev,
- unsigned int flags, struct exynos_drm_gem_buf *buffer)
-{
-
- lowlevel_buffer_deallocate(dev, flags, buffer);
-}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_buf.h b/drivers/gpu/drm/exynos/exynos_drm_buf.h
deleted file mode 100644
index a6412f19673c..000000000000
--- a/drivers/gpu/drm/exynos/exynos_drm_buf.h
+++ /dev/null
@@ -1,33 +0,0 @@
-/* exynos_drm_buf.h
- *
- * Copyright (c) 2011 Samsung Electronics Co., Ltd.
- * Author: Inki Dae <inki.dae@samsung.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- */
-
-#ifndef _EXYNOS_DRM_BUF_H_
-#define _EXYNOS_DRM_BUF_H_
-
-/* create and initialize buffer object. */
-struct exynos_drm_gem_buf *exynos_drm_init_buf(struct drm_device *dev,
- unsigned int size);
-
-/* destroy buffer object. */
-void exynos_drm_fini_buf(struct drm_device *dev,
- struct exynos_drm_gem_buf *buffer);
-
-/* allocate physical memory region and setup sgt. */
-int exynos_drm_alloc_buf(struct drm_device *dev,
- struct exynos_drm_gem_buf *buf,
- unsigned int flags);
-
-/* release physical memory region, and sgt. */
-void exynos_drm_free_buf(struct drm_device *dev,
- unsigned int flags,
- struct exynos_drm_gem_buf *buffer);
-
-#endif
diff --git a/drivers/gpu/drm/exynos/exynos_drm_core.c b/drivers/gpu/drm/exynos/exynos_drm_core.c
index 4c9f972eaa07..c68a6a2a9b57 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_core.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_core.c
@@ -15,46 +15,10 @@
#include <drm/drmP.h>
#include "exynos_drm_drv.h"
#include "exynos_drm_crtc.h"
-#include "exynos_drm_encoder.h"
#include "exynos_drm_fbdev.h"
static LIST_HEAD(exynos_drm_subdrv_list);
-int exynos_drm_create_enc_conn(struct drm_device *dev,
- struct exynos_drm_display *display)
-{
- struct drm_encoder *encoder;
- int ret;
- unsigned long possible_crtcs = 0;
-
- ret = exynos_drm_crtc_get_pipe_from_type(dev, display->type);
- if (ret < 0)
- return ret;
-
- possible_crtcs |= 1 << ret;
-
- /* create and initialize a encoder for this sub driver. */
- encoder = exynos_drm_encoder_create(dev, display, possible_crtcs);
- if (!encoder) {
- DRM_ERROR("failed to create encoder\n");
- return -EFAULT;
- }
-
- display->encoder = encoder;
-
- ret = display->ops->create_connector(display, encoder);
- if (ret) {
- DRM_ERROR("failed to create connector ret = %d\n", ret);
- goto err_destroy_encoder;
- }
-
- return 0;
-
-err_destroy_encoder:
- encoder->funcs->destroy(encoder);
- return ret;
-}
-
int exynos_drm_subdrv_register(struct exynos_drm_subdrv *subdrv)
{
if (!subdrv)
diff --git a/drivers/gpu/drm/exynos/exynos_drm_crtc.c b/drivers/gpu/drm/exynos/exynos_drm_crtc.c
index 644b4b76e071..c47899738eb4 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_crtc.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_crtc.c
@@ -19,7 +19,6 @@
#include "exynos_drm_crtc.h"
#include "exynos_drm_drv.h"
-#include "exynos_drm_encoder.h"
#include "exynos_drm_plane.h"
static void exynos_drm_crtc_enable(struct drm_crtc *crtc)
@@ -80,7 +79,8 @@ exynos_drm_crtc_mode_set_nofb(struct drm_crtc *crtc)
exynos_crtc->ops->commit(exynos_crtc);
}
-static void exynos_crtc_atomic_begin(struct drm_crtc *crtc)
+static void exynos_crtc_atomic_begin(struct drm_crtc *crtc,
+ struct drm_crtc_state *old_crtc_state)
{
struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc);
@@ -90,7 +90,8 @@ static void exynos_crtc_atomic_begin(struct drm_crtc *crtc)
}
}
-static void exynos_crtc_atomic_flush(struct drm_crtc *crtc)
+static void exynos_crtc_atomic_flush(struct drm_crtc *crtc,
+ struct drm_crtc_state *old_crtc_state)
{
}
@@ -175,7 +176,7 @@ int exynos_drm_crtc_enable_vblank(struct drm_device *dev, int pipe)
return -EPERM;
if (exynos_crtc->ops->enable_vblank)
- exynos_crtc->ops->enable_vblank(exynos_crtc);
+ return exynos_crtc->ops->enable_vblank(exynos_crtc);
return 0;
}
@@ -193,24 +194,22 @@ void exynos_drm_crtc_disable_vblank(struct drm_device *dev, int pipe)
exynos_crtc->ops->disable_vblank(exynos_crtc);
}
-void exynos_drm_crtc_finish_pageflip(struct drm_device *dev, int pipe)
+void exynos_drm_crtc_finish_pageflip(struct exynos_drm_crtc *exynos_crtc)
{
- struct exynos_drm_private *dev_priv = dev->dev_private;
- struct drm_crtc *drm_crtc = dev_priv->crtc[pipe];
- struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(drm_crtc);
+ struct drm_crtc *crtc = &exynos_crtc->base;
unsigned long flags;
- spin_lock_irqsave(&dev->event_lock, flags);
+ spin_lock_irqsave(&crtc->dev->event_lock, flags);
if (exynos_crtc->event) {
- drm_send_vblank_event(dev, -1, exynos_crtc->event);
- drm_vblank_put(dev, pipe);
+ drm_crtc_send_vblank_event(crtc, exynos_crtc->event);
+ drm_crtc_vblank_put(crtc);
wake_up(&exynos_crtc->pending_flip_queue);
}
exynos_crtc->event = NULL;
- spin_unlock_irqrestore(&dev->event_lock, flags);
+ spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
}
void exynos_drm_crtc_complete_scanout(struct drm_framebuffer *fb)
@@ -237,7 +236,7 @@ void exynos_drm_crtc_complete_scanout(struct drm_framebuffer *fb)
}
int exynos_drm_crtc_get_pipe_from_type(struct drm_device *drm_dev,
- unsigned int out_type)
+ enum exynos_drm_output_type out_type)
{
struct drm_crtc *crtc;
diff --git a/drivers/gpu/drm/exynos/exynos_drm_crtc.h b/drivers/gpu/drm/exynos/exynos_drm_crtc.h
index 0f3aa70818e3..9e7027d6c2f6 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_crtc.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_crtc.h
@@ -25,12 +25,12 @@ struct exynos_drm_crtc *exynos_drm_crtc_create(struct drm_device *drm_dev,
void *context);
int exynos_drm_crtc_enable_vblank(struct drm_device *dev, int pipe);
void exynos_drm_crtc_disable_vblank(struct drm_device *dev, int pipe);
-void exynos_drm_crtc_finish_pageflip(struct drm_device *dev, int pipe);
+void exynos_drm_crtc_finish_pageflip(struct exynos_drm_crtc *exynos_crtc);
void exynos_drm_crtc_complete_scanout(struct drm_framebuffer *fb);
/* This function gets pipe value to crtc device matched with out_type. */
int exynos_drm_crtc_get_pipe_from_type(struct drm_device *drm_dev,
- unsigned int out_type);
+ enum exynos_drm_output_type out_type);
/*
* This function calls the crtc device(manager)'s te_handler() callback
diff --git a/drivers/gpu/drm/exynos/exynos_drm_dmabuf.c b/drivers/gpu/drm/exynos/exynos_drm_dmabuf.c
deleted file mode 100644
index cd485c091b30..000000000000
--- a/drivers/gpu/drm/exynos/exynos_drm_dmabuf.c
+++ /dev/null
@@ -1,286 +0,0 @@
-/* exynos_drm_dmabuf.c
- *
- * Copyright (c) 2012 Samsung Electronics Co., Ltd.
- * Author: Inki Dae <inki.dae@samsung.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- */
-
-#include <drm/drmP.h>
-#include <drm/exynos_drm.h>
-#include "exynos_drm_dmabuf.h"
-#include "exynos_drm_drv.h"
-#include "exynos_drm_gem.h"
-
-#include <linux/dma-buf.h>
-
-struct exynos_drm_dmabuf_attachment {
- struct sg_table sgt;
- enum dma_data_direction dir;
- bool is_mapped;
-};
-
-static struct exynos_drm_gem_obj *dma_buf_to_obj(struct dma_buf *buf)
-{
- return to_exynos_gem_obj(buf->priv);
-}
-
-static int exynos_gem_attach_dma_buf(struct dma_buf *dmabuf,
- struct device *dev,
- struct dma_buf_attachment *attach)
-{
- struct exynos_drm_dmabuf_attachment *exynos_attach;
-
- exynos_attach = kzalloc(sizeof(*exynos_attach), GFP_KERNEL);
- if (!exynos_attach)
- return -ENOMEM;
-
- exynos_attach->dir = DMA_NONE;
- attach->priv = exynos_attach;
-
- return 0;
-}
-
-static void exynos_gem_detach_dma_buf(struct dma_buf *dmabuf,
- struct dma_buf_attachment *attach)
-{
- struct exynos_drm_dmabuf_attachment *exynos_attach = attach->priv;
- struct sg_table *sgt;
-
- if (!exynos_attach)
- return;
-
- sgt = &exynos_attach->sgt;
-
- if (exynos_attach->dir != DMA_NONE)
- dma_unmap_sg(attach->dev, sgt->sgl, sgt->nents,
- exynos_attach->dir);
-
- sg_free_table(sgt);
- kfree(exynos_attach);
- attach->priv = NULL;
-}
-
-static struct sg_table *
- exynos_gem_map_dma_buf(struct dma_buf_attachment *attach,
- enum dma_data_direction dir)
-{
- struct exynos_drm_dmabuf_attachment *exynos_attach = attach->priv;
- struct exynos_drm_gem_obj *gem_obj = dma_buf_to_obj(attach->dmabuf);
- struct drm_device *dev = gem_obj->base.dev;
- struct exynos_drm_gem_buf *buf;
- struct scatterlist *rd, *wr;
- struct sg_table *sgt = NULL;
- unsigned int i;
- int nents, ret;
-
- /* just return current sgt if already requested. */
- if (exynos_attach->dir == dir && exynos_attach->is_mapped)
- return &exynos_attach->sgt;
-
- buf = gem_obj->buffer;
- if (!buf) {
- DRM_ERROR("buffer is null.\n");
- return ERR_PTR(-ENOMEM);
- }
-
- sgt = &exynos_attach->sgt;
-
- ret = sg_alloc_table(sgt, buf->sgt->orig_nents, GFP_KERNEL);
- if (ret) {
- DRM_ERROR("failed to alloc sgt.\n");
- return ERR_PTR(-ENOMEM);
- }
-
- mutex_lock(&dev->struct_mutex);
-
- rd = buf->sgt->sgl;
- wr = sgt->sgl;
- for (i = 0; i < sgt->orig_nents; ++i) {
- sg_set_page(wr, sg_page(rd), rd->length, rd->offset);
- rd = sg_next(rd);
- wr = sg_next(wr);
- }
-
- if (dir != DMA_NONE) {
- nents = dma_map_sg(attach->dev, sgt->sgl, sgt->orig_nents, dir);
- if (!nents) {
- DRM_ERROR("failed to map sgl with iommu.\n");
- sg_free_table(sgt);
- sgt = ERR_PTR(-EIO);
- goto err_unlock;
- }
- }
-
- exynos_attach->is_mapped = true;
- exynos_attach->dir = dir;
- attach->priv = exynos_attach;
-
- DRM_DEBUG_PRIME("buffer size = 0x%lx\n", buf->size);
-
-err_unlock:
- mutex_unlock(&dev->struct_mutex);
- return sgt;
-}
-
-static void exynos_gem_unmap_dma_buf(struct dma_buf_attachment *attach,
- struct sg_table *sgt,
- enum dma_data_direction dir)
-{
- /* Nothing to do. */
-}
-
-static void *exynos_gem_dmabuf_kmap_atomic(struct dma_buf *dma_buf,
- unsigned long page_num)
-{
- /* TODO */
-
- return NULL;
-}
-
-static void exynos_gem_dmabuf_kunmap_atomic(struct dma_buf *dma_buf,
- unsigned long page_num,
- void *addr)
-{
- /* TODO */
-}
-
-static void *exynos_gem_dmabuf_kmap(struct dma_buf *dma_buf,
- unsigned long page_num)
-{
- /* TODO */
-
- return NULL;
-}
-
-static void exynos_gem_dmabuf_kunmap(struct dma_buf *dma_buf,
- unsigned long page_num, void *addr)
-{
- /* TODO */
-}
-
-static int exynos_gem_dmabuf_mmap(struct dma_buf *dma_buf,
- struct vm_area_struct *vma)
-{
- return -ENOTTY;
-}
-
-static struct dma_buf_ops exynos_dmabuf_ops = {
- .attach = exynos_gem_attach_dma_buf,
- .detach = exynos_gem_detach_dma_buf,
- .map_dma_buf = exynos_gem_map_dma_buf,
- .unmap_dma_buf = exynos_gem_unmap_dma_buf,
- .kmap = exynos_gem_dmabuf_kmap,
- .kmap_atomic = exynos_gem_dmabuf_kmap_atomic,
- .kunmap = exynos_gem_dmabuf_kunmap,
- .kunmap_atomic = exynos_gem_dmabuf_kunmap_atomic,
- .mmap = exynos_gem_dmabuf_mmap,
- .release = drm_gem_dmabuf_release,
-};
-
-struct dma_buf *exynos_dmabuf_prime_export(struct drm_device *drm_dev,
- struct drm_gem_object *obj, int flags)
-{
- struct exynos_drm_gem_obj *exynos_gem_obj = to_exynos_gem_obj(obj);
- DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
-
- exp_info.ops = &exynos_dmabuf_ops;
- exp_info.size = exynos_gem_obj->base.size;
- exp_info.flags = flags;
- exp_info.priv = obj;
-
- return dma_buf_export(&exp_info);
-}
-
-struct drm_gem_object *exynos_dmabuf_prime_import(struct drm_device *drm_dev,
- struct dma_buf *dma_buf)
-{
- struct dma_buf_attachment *attach;
- struct sg_table *sgt;
- struct scatterlist *sgl;
- struct exynos_drm_gem_obj *exynos_gem_obj;
- struct exynos_drm_gem_buf *buffer;
- int ret;
-
- /* is this one of own objects? */
- if (dma_buf->ops == &exynos_dmabuf_ops) {
- struct drm_gem_object *obj;
-
- obj = dma_buf->priv;
-
- /* is it from our device? */
- if (obj->dev == drm_dev) {
- /*
- * Importing dmabuf exported from out own gem increases
- * refcount on gem itself instead of f_count of dmabuf.
- */
- drm_gem_object_reference(obj);
- return obj;
- }
- }
-
- attach = dma_buf_attach(dma_buf, drm_dev->dev);
- if (IS_ERR(attach))
- return ERR_PTR(-EINVAL);
-
- get_dma_buf(dma_buf);
-
- sgt = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
- if (IS_ERR(sgt)) {
- ret = PTR_ERR(sgt);
- goto err_buf_detach;
- }
-
- buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
- if (!buffer) {
- ret = -ENOMEM;
- goto err_unmap_attach;
- }
-
- exynos_gem_obj = exynos_drm_gem_init(drm_dev, dma_buf->size);
- if (!exynos_gem_obj) {
- ret = -ENOMEM;
- goto err_free_buffer;
- }
-
- sgl = sgt->sgl;
-
- buffer->size = dma_buf->size;
- buffer->dma_addr = sg_dma_address(sgl);
-
- if (sgt->nents == 1) {
- /* always physically continuous memory if sgt->nents is 1. */
- exynos_gem_obj->flags |= EXYNOS_BO_CONTIG;
- } else {
- /*
- * this case could be CONTIG or NONCONTIG type but for now
- * sets NONCONTIG.
- * TODO. we have to find a way that exporter can notify
- * the type of its own buffer to importer.
- */
- exynos_gem_obj->flags |= EXYNOS_BO_NONCONTIG;
- }
-
- exynos_gem_obj->buffer = buffer;
- buffer->sgt = sgt;
- exynos_gem_obj->base.import_attach = attach;
-
- DRM_DEBUG_PRIME("dma_addr = %pad, size = 0x%lx\n", &buffer->dma_addr,
- buffer->size);
-
- return &exynos_gem_obj->base;
-
-err_free_buffer:
- kfree(buffer);
- buffer = NULL;
-err_unmap_attach:
- dma_buf_unmap_attachment(attach, sgt, DMA_BIDIRECTIONAL);
-err_buf_detach:
- dma_buf_detach(dma_buf, attach);
- dma_buf_put(dma_buf);
-
- return ERR_PTR(ret);
-}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_dmabuf.h b/drivers/gpu/drm/exynos/exynos_drm_dmabuf.h
deleted file mode 100644
index 886de9ff484d..000000000000
--- a/drivers/gpu/drm/exynos/exynos_drm_dmabuf.h
+++ /dev/null
@@ -1,20 +0,0 @@
-/* exynos_drm_dmabuf.h
- *
- * Copyright (c) 2012 Samsung Electronics Co., Ltd.
- * Author: Inki Dae <inki.dae@samsung.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- */
-
-#ifndef _EXYNOS_DRM_DMABUF_H_
-#define _EXYNOS_DRM_DMABUF_H_
-
-struct dma_buf *exynos_dmabuf_prime_export(struct drm_device *drm_dev,
- struct drm_gem_object *obj, int flags);
-
-struct drm_gem_object *exynos_dmabuf_prime_import(struct drm_device *drm_dev,
- struct dma_buf *dma_buf);
-#endif
diff --git a/drivers/gpu/drm/exynos/exynos_drm_dpi.c b/drivers/gpu/drm/exynos/exynos_drm_dpi.c
index 7cb6595c1894..c748b8790de3 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_dpi.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_dpi.c
@@ -20,26 +20,24 @@
#include <video/of_videomode.h>
#include <video/videomode.h>
-#include "exynos_drm_drv.h"
+#include "exynos_drm_crtc.h"
struct exynos_dpi {
- struct exynos_drm_display display;
+ struct drm_encoder encoder;
struct device *dev;
struct device_node *panel_node;
struct drm_panel *panel;
struct drm_connector connector;
- struct drm_encoder *encoder;
struct videomode *vm;
- int dpms_mode;
};
#define connector_to_dpi(c) container_of(c, struct exynos_dpi, connector)
-static inline struct exynos_dpi *display_to_dpi(struct exynos_drm_display *d)
+static inline struct exynos_dpi *encoder_to_dpi(struct drm_encoder *e)
{
- return container_of(d, struct exynos_dpi, display);
+ return container_of(e, struct exynos_dpi, encoder);
}
static enum drm_connector_status
@@ -99,7 +97,7 @@ exynos_dpi_best_encoder(struct drm_connector *connector)
{
struct exynos_dpi *ctx = connector_to_dpi(connector);
- return ctx->encoder;
+ return &ctx->encoder;
}
static struct drm_connector_helper_funcs exynos_dpi_connector_helper_funcs = {
@@ -107,15 +105,12 @@ static struct drm_connector_helper_funcs exynos_dpi_connector_helper_funcs = {
.best_encoder = exynos_dpi_best_encoder,
};
-static int exynos_dpi_create_connector(struct exynos_drm_display *display,
- struct drm_encoder *encoder)
+static int exynos_dpi_create_connector(struct drm_encoder *encoder)
{
- struct exynos_dpi *ctx = display_to_dpi(display);
+ struct exynos_dpi *ctx = encoder_to_dpi(encoder);
struct drm_connector *connector = &ctx->connector;
int ret;
- ctx->encoder = encoder;
-
connector->polled = DRM_CONNECTOR_POLL_HPD;
ret = drm_connector_init(encoder->dev, connector,
@@ -133,46 +128,48 @@ static int exynos_dpi_create_connector(struct exynos_drm_display *display,
return 0;
}
-static void exynos_dpi_poweron(struct exynos_dpi *ctx)
+static bool exynos_dpi_mode_fixup(struct drm_encoder *encoder,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ return true;
+}
+
+static void exynos_dpi_mode_set(struct drm_encoder *encoder,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
{
+}
+
+static void exynos_dpi_enable(struct drm_encoder *encoder)
+{
+ struct exynos_dpi *ctx = encoder_to_dpi(encoder);
+
if (ctx->panel) {
drm_panel_prepare(ctx->panel);
drm_panel_enable(ctx->panel);
}
}
-static void exynos_dpi_poweroff(struct exynos_dpi *ctx)
+static void exynos_dpi_disable(struct drm_encoder *encoder)
{
+ struct exynos_dpi *ctx = encoder_to_dpi(encoder);
+
if (ctx->panel) {
drm_panel_disable(ctx->panel);
drm_panel_unprepare(ctx->panel);
}
}
-static void exynos_dpi_dpms(struct exynos_drm_display *display, int mode)
-{
- struct exynos_dpi *ctx = display_to_dpi(display);
-
- switch (mode) {
- case DRM_MODE_DPMS_ON:
- if (ctx->dpms_mode != DRM_MODE_DPMS_ON)
- exynos_dpi_poweron(ctx);
- break;
- case DRM_MODE_DPMS_STANDBY:
- case DRM_MODE_DPMS_SUSPEND:
- case DRM_MODE_DPMS_OFF:
- if (ctx->dpms_mode == DRM_MODE_DPMS_ON)
- exynos_dpi_poweroff(ctx);
- break;
- default:
- break;
- }
- ctx->dpms_mode = mode;
-}
+static struct drm_encoder_helper_funcs exynos_dpi_encoder_helper_funcs = {
+ .mode_fixup = exynos_dpi_mode_fixup,
+ .mode_set = exynos_dpi_mode_set,
+ .enable = exynos_dpi_enable,
+ .disable = exynos_dpi_disable,
+};
-static struct exynos_drm_display_ops exynos_dpi_display_ops = {
- .create_connector = exynos_dpi_create_connector,
- .dpms = exynos_dpi_dpms
+static struct drm_encoder_funcs exynos_dpi_encoder_funcs = {
+ .destroy = drm_encoder_cleanup,
};
/* of_* functions will be removed after merge of of_graph patches */
@@ -299,7 +296,34 @@ static int exynos_dpi_parse_dt(struct exynos_dpi *ctx)
return 0;
}
-struct exynos_drm_display *exynos_dpi_probe(struct device *dev)
+int exynos_dpi_bind(struct drm_device *dev, struct drm_encoder *encoder)
+{
+ int ret;
+
+ ret = exynos_drm_crtc_get_pipe_from_type(dev, EXYNOS_DISPLAY_TYPE_LCD);
+ if (ret < 0)
+ return ret;
+
+ encoder->possible_crtcs = 1 << ret;
+
+ DRM_DEBUG_KMS("possible_crtcs = 0x%x\n", encoder->possible_crtcs);
+
+ drm_encoder_init(dev, encoder, &exynos_dpi_encoder_funcs,
+ DRM_MODE_ENCODER_TMDS);
+
+ drm_encoder_helper_add(encoder, &exynos_dpi_encoder_helper_funcs);
+
+ ret = exynos_dpi_create_connector(encoder);
+ if (ret) {
+ DRM_ERROR("failed to create connector ret = %d\n", ret);
+ drm_encoder_cleanup(encoder);
+ return ret;
+ }
+
+ return 0;
+}
+
+struct drm_encoder *exynos_dpi_probe(struct device *dev)
{
struct exynos_dpi *ctx;
int ret;
@@ -308,10 +332,7 @@ struct exynos_drm_display *exynos_dpi_probe(struct device *dev)
if (!ctx)
return ERR_PTR(-ENOMEM);
- ctx->display.type = EXYNOS_DISPLAY_TYPE_LCD;
- ctx->display.ops = &exynos_dpi_display_ops;
ctx->dev = dev;
- ctx->dpms_mode = DRM_MODE_DPMS_OFF;
ret = exynos_dpi_parse_dt(ctx);
if (ret < 0) {
@@ -325,14 +346,14 @@ struct exynos_drm_display *exynos_dpi_probe(struct device *dev)
return ERR_PTR(-EPROBE_DEFER);
}
- return &ctx->display;
+ return &ctx->encoder;
}
-int exynos_dpi_remove(struct exynos_drm_display *display)
+int exynos_dpi_remove(struct drm_encoder *encoder)
{
- struct exynos_dpi *ctx = display_to_dpi(display);
+ struct exynos_dpi *ctx = encoder_to_dpi(encoder);
- exynos_dpi_dpms(&ctx->display, DRM_MODE_DPMS_OFF);
+ exynos_dpi_disable(&ctx->encoder);
if (ctx->panel)
drm_panel_detach(ctx->panel);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.c b/drivers/gpu/drm/exynos/exynos_drm_drv.c
index 63a68c60a353..fa5194caf259 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_drv.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_drv.c
@@ -21,13 +21,11 @@
#include "exynos_drm_drv.h"
#include "exynos_drm_crtc.h"
-#include "exynos_drm_encoder.h"
#include "exynos_drm_fbdev.h"
#include "exynos_drm_fb.h"
#include "exynos_drm_gem.h"
#include "exynos_drm_plane.h"
#include "exynos_drm_vidi.h"
-#include "exynos_drm_dmabuf.h"
#include "exynos_drm_g2d.h"
#include "exynos_drm_ipp.h"
#include "exynos_drm_iommu.h"
@@ -41,7 +39,9 @@
static int exynos_drm_load(struct drm_device *dev, unsigned long flags)
{
struct exynos_drm_private *private;
- int ret;
+ struct drm_encoder *encoder;
+ unsigned int clone_mask;
+ int cnt, ret;
private = kzalloc(sizeof(struct exynos_drm_private), GFP_KERNEL);
if (!private)
@@ -67,7 +67,13 @@ static int exynos_drm_load(struct drm_device *dev, unsigned long flags)
exynos_drm_mode_config_init(dev);
/* setup possible_clones. */
- exynos_drm_encoder_setup(dev);
+ cnt = 0;
+ clone_mask = 0;
+ list_for_each_entry(encoder, &dev->mode_config.encoder_list, head)
+ clone_mask |= (1 << (cnt++));
+
+ list_for_each_entry(encoder, &dev->mode_config.encoder_list, head)
+ encoder->possible_clones = clone_mask;
platform_set_drvdata(dev->platformdev, dev);
@@ -297,8 +303,12 @@ static struct drm_driver exynos_drm_driver = {
.dumb_destroy = drm_gem_dumb_destroy,
.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
- .gem_prime_export = exynos_dmabuf_prime_export,
- .gem_prime_import = exynos_dmabuf_prime_import,
+ .gem_prime_export = drm_gem_prime_export,
+ .gem_prime_import = drm_gem_prime_import,
+ .gem_prime_get_sg_table = exynos_drm_gem_prime_get_sg_table,
+ .gem_prime_import_sg_table = exynos_drm_gem_prime_import_sg_table,
+ .gem_prime_vmap = exynos_drm_gem_prime_vmap,
+ .gem_prime_vunmap = exynos_drm_gem_prime_vunmap,
.ioctls = exynos_ioctls,
.num_ioctls = ARRAY_SIZE(exynos_ioctls),
.fops = &exynos_drm_driver_fops,
@@ -345,9 +355,6 @@ static struct platform_driver exynos_drm_platform_driver;
* because connector requires pipe number of its crtc during initialization.
*/
static struct platform_driver *const exynos_drm_kms_drivers[] = {
-#ifdef CONFIG_DRM_EXYNOS_VIDI
- &vidi_driver,
-#endif
#ifdef CONFIG_DRM_EXYNOS_FIMD
&fimd_driver,
#endif
@@ -370,6 +377,9 @@ static struct platform_driver *const exynos_drm_kms_drivers[] = {
&mixer_driver,
&hdmi_driver,
#endif
+#ifdef CONFIG_DRM_EXYNOS_VIDI
+ &vidi_driver,
+#endif
};
static struct platform_driver *const exynos_drm_non_kms_drivers[] = {
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.h b/drivers/gpu/drm/exynos/exynos_drm_drv.h
index dd00f160c1e5..6b8a30f23473 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_drv.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_drv.h
@@ -44,23 +44,14 @@ enum exynos_drm_output_type {
* - the unit is screen coordinates.
* @src_y: offset y on a framebuffer to be displayed.
* - the unit is screen coordinates.
- * @src_width: width of a partial image to be displayed from framebuffer.
- * @src_height: height of a partial image to be displayed from framebuffer.
- * @fb_width: width of a framebuffer.
- * @fb_height: height of a framebuffer.
+ * @src_w: width of a partial image to be displayed from framebuffer.
+ * @src_h: height of a partial image to be displayed from framebuffer.
* @crtc_x: offset x on hardware screen.
* @crtc_y: offset y on hardware screen.
- * @crtc_width: window width to be displayed (hardware screen).
- * @crtc_height: window height to be displayed (hardware screen).
- * @mode_width: width of screen mode.
- * @mode_height: height of screen mode.
+ * @crtc_w: window width to be displayed (hardware screen).
+ * @crtc_h: window height to be displayed (hardware screen).
* @h_ratio: horizontal scaling ratio, 16.16 fixed point
* @v_ratio: vertical scaling ratio, 16.16 fixed point
- * @refresh: refresh rate.
- * @scan_flag: interlace or progressive way.
- * (it could be DRM_MODE_FLAG_*)
- * @bpp: pixel size.(in bit)
- * @pixel_format: fourcc pixel format of this overlay
* @dma_addr: array of bus(accessed by dma) address to the memory region
* allocated for a overlay.
* @zpos: order of overlay layer(z position).
@@ -73,76 +64,19 @@ struct exynos_drm_plane {
struct drm_plane base;
unsigned int src_x;
unsigned int src_y;
- unsigned int src_width;
- unsigned int src_height;
- unsigned int fb_width;
- unsigned int fb_height;
+ unsigned int src_w;
+ unsigned int src_h;
unsigned int crtc_x;
unsigned int crtc_y;
- unsigned int crtc_width;
- unsigned int crtc_height;
- unsigned int mode_width;
- unsigned int mode_height;
+ unsigned int crtc_w;
+ unsigned int crtc_h;
unsigned int h_ratio;
unsigned int v_ratio;
- unsigned int refresh;
- unsigned int scan_flag;
- unsigned int bpp;
- unsigned int pitch;
- uint32_t pixel_format;
dma_addr_t dma_addr[MAX_FB_BUFFER];
unsigned int zpos;
};
/*
- * Exynos DRM Display Structure.
- * - this structure is common to analog tv, digital tv and lcd panel.
- *
- * @create_connector: initialize and register a new connector
- * @remove: cleans up the display for removal
- * @mode_fixup: fix mode data comparing to hw specific display mode.
- * @mode_set: convert drm_display_mode to hw specific display mode and
- * would be called by encoder->mode_set().
- * @check_mode: check if mode is valid or not.
- * @dpms: display device on or off.
- * @commit: apply changes to hw
- */
-struct exynos_drm_display;
-struct exynos_drm_display_ops {
- int (*create_connector)(struct exynos_drm_display *display,
- struct drm_encoder *encoder);
- void (*remove)(struct exynos_drm_display *display);
- void (*mode_fixup)(struct exynos_drm_display *display,
- struct drm_connector *connector,
- const struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode);
- void (*mode_set)(struct exynos_drm_display *display,
- struct drm_display_mode *mode);
- int (*check_mode)(struct exynos_drm_display *display,
- struct drm_display_mode *mode);
- void (*dpms)(struct exynos_drm_display *display, int mode);
- void (*commit)(struct exynos_drm_display *display);
-};
-
-/*
- * Exynos drm display structure, maps 1:1 with an encoder/connector
- *
- * @list: the list entry for this manager
- * @type: one of EXYNOS_DISPLAY_TYPE_LCD and HDMI.
- * @encoder: encoder object this display maps to
- * @connector: connector object this display maps to
- * @ops: pointer to callbacks for exynos drm specific functionality
- * @ctx: A pointer to the display's implementation specific context
- */
-struct exynos_drm_display {
- struct list_head list;
- enum exynos_drm_output_type type;
- struct drm_encoder *encoder;
- struct drm_connector *connector;
- struct exynos_drm_display_ops *ops;
-};
-
-/*
* Exynos drm crtc ops
*
* @enable: enable the device
@@ -153,8 +87,8 @@ struct exynos_drm_display {
* @disable_vblank: specific driver callback for disabling vblank interrupt.
* @wait_for_vblank: wait for vblank interrupt to make sure that
* hardware overlay is updated.
- * @win_commit: apply hardware specific overlay data to registers.
- * @win_disable: disable hardware specific overlay.
+ * @update_plane: apply hardware specific overlay data to registers.
+ * @disable_plane: disable hardware specific overlay.
* @te_handler: trigger to transfer video image at the tearing effect
* synchronization signal if there is a page flip request.
* @clock_enable: optional function enabling/disabling display domain clock,
@@ -173,11 +107,12 @@ struct exynos_drm_crtc_ops {
int (*enable_vblank)(struct exynos_drm_crtc *crtc);
void (*disable_vblank)(struct exynos_drm_crtc *crtc);
void (*wait_for_vblank)(struct exynos_drm_crtc *crtc);
- void (*win_commit)(struct exynos_drm_crtc *crtc, unsigned int zpos);
- void (*win_disable)(struct exynos_drm_crtc *crtc, unsigned int zpos);
+ void (*update_plane)(struct exynos_drm_crtc *crtc,
+ struct exynos_drm_plane *plane);
+ void (*disable_plane)(struct exynos_drm_crtc *crtc,
+ struct exynos_drm_plane *plane);
void (*te_handler)(struct exynos_drm_crtc *crtc);
void (*clock_enable)(struct exynos_drm_crtc *crtc, bool enable);
- void (*clear_channels)(struct exynos_drm_crtc *crtc);
};
/*
@@ -285,20 +220,23 @@ int exynos_drm_subdrv_open(struct drm_device *dev, struct drm_file *file);
void exynos_drm_subdrv_close(struct drm_device *dev, struct drm_file *file);
#ifdef CONFIG_DRM_EXYNOS_DPI
-struct exynos_drm_display * exynos_dpi_probe(struct device *dev);
-int exynos_dpi_remove(struct exynos_drm_display *display);
+struct drm_encoder *exynos_dpi_probe(struct device *dev);
+int exynos_dpi_remove(struct drm_encoder *encoder);
+int exynos_dpi_bind(struct drm_device *dev, struct drm_encoder *encoder);
#else
-static inline struct exynos_drm_display *
+static inline struct drm_encoder *
exynos_dpi_probe(struct device *dev) { return NULL; }
-static inline int exynos_dpi_remove(struct exynos_drm_display *display)
+static inline int exynos_dpi_remove(struct drm_encoder *encoder)
+{
+ return 0;
+}
+static inline int exynos_dpi_bind(struct drm_device *dev,
+ struct drm_encoder *encoder)
{
return 0;
}
#endif
-/* This function creates a encoder and a connector, and initializes them. */
-int exynos_drm_create_enc_conn(struct drm_device *dev,
- struct exynos_drm_display *display);
extern struct platform_driver fimd_driver;
extern struct platform_driver exynos5433_decon_driver;
diff --git a/drivers/gpu/drm/exynos/exynos_drm_dsi.c b/drivers/gpu/drm/exynos/exynos_drm_dsi.c
index 0e58b36cb8c2..12b03b364703 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_dsi.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_dsi.c
@@ -259,7 +259,7 @@ struct exynos_dsi_driver_data {
};
struct exynos_dsi {
- struct exynos_drm_display display;
+ struct drm_encoder encoder;
struct mipi_dsi_host dsi_host;
struct drm_connector connector;
struct device_node *panel_node;
@@ -295,9 +295,9 @@ struct exynos_dsi {
#define host_to_dsi(host) container_of(host, struct exynos_dsi, dsi_host)
#define connector_to_dsi(c) container_of(c, struct exynos_dsi, connector)
-static inline struct exynos_dsi *display_to_dsi(struct exynos_drm_display *d)
+static inline struct exynos_dsi *encoder_to_dsi(struct drm_encoder *e)
{
- return container_of(d, struct exynos_dsi, display);
+ return container_of(e, struct exynos_dsi, encoder);
}
enum reg_idx {
@@ -1272,7 +1272,7 @@ static irqreturn_t exynos_dsi_irq(int irq, void *dev_id)
static irqreturn_t exynos_dsi_te_irq_handler(int irq, void *dev_id)
{
struct exynos_dsi *dsi = (struct exynos_dsi *)dev_id;
- struct drm_encoder *encoder = dsi->display.encoder;
+ struct drm_encoder *encoder = &dsi->encoder;
if (dsi->state & DSIM_STATE_VIDOUT_AVAILABLE)
exynos_drm_crtc_te_handler(encoder->crtc);
@@ -1518,16 +1518,17 @@ static void exynos_dsi_poweroff(struct exynos_dsi *dsi)
dev_err(dsi->dev, "cannot disable regulators %d\n", ret);
}
-static int exynos_dsi_enable(struct exynos_dsi *dsi)
+static void exynos_dsi_enable(struct drm_encoder *encoder)
{
+ struct exynos_dsi *dsi = encoder_to_dsi(encoder);
int ret;
if (dsi->state & DSIM_STATE_ENABLED)
- return 0;
+ return;
ret = exynos_dsi_poweron(dsi);
if (ret < 0)
- return ret;
+ return;
dsi->state |= DSIM_STATE_ENABLED;
@@ -1535,7 +1536,7 @@ static int exynos_dsi_enable(struct exynos_dsi *dsi)
if (ret < 0) {
dsi->state &= ~DSIM_STATE_ENABLED;
exynos_dsi_poweroff(dsi);
- return ret;
+ return;
}
exynos_dsi_set_display_mode(dsi);
@@ -1547,16 +1548,16 @@ static int exynos_dsi_enable(struct exynos_dsi *dsi)
exynos_dsi_set_display_enable(dsi, false);
drm_panel_unprepare(dsi->panel);
exynos_dsi_poweroff(dsi);
- return ret;
+ return;
}
dsi->state |= DSIM_STATE_VIDOUT_AVAILABLE;
-
- return 0;
}
-static void exynos_dsi_disable(struct exynos_dsi *dsi)
+static void exynos_dsi_disable(struct drm_encoder *encoder)
{
+ struct exynos_dsi *dsi = encoder_to_dsi(encoder);
+
if (!(dsi->state & DSIM_STATE_ENABLED))
return;
@@ -1571,26 +1572,6 @@ static void exynos_dsi_disable(struct exynos_dsi *dsi)
exynos_dsi_poweroff(dsi);
}
-static void exynos_dsi_dpms(struct exynos_drm_display *display, int mode)
-{
- struct exynos_dsi *dsi = display_to_dsi(display);
-
- if (dsi->panel) {
- switch (mode) {
- case DRM_MODE_DPMS_ON:
- exynos_dsi_enable(dsi);
- break;
- case DRM_MODE_DPMS_STANDBY:
- case DRM_MODE_DPMS_SUSPEND:
- case DRM_MODE_DPMS_OFF:
- exynos_dsi_disable(dsi);
- break;
- default:
- break;
- }
- }
-}
-
static enum drm_connector_status
exynos_dsi_detect(struct drm_connector *connector, bool force)
{
@@ -1601,10 +1582,10 @@ exynos_dsi_detect(struct drm_connector *connector, bool force)
if (dsi->panel)
drm_panel_attach(dsi->panel, &dsi->connector);
} else if (!dsi->panel_node) {
- struct exynos_drm_display *display;
+ struct drm_encoder *encoder;
- display = platform_get_drvdata(to_platform_device(dsi->dev));
- exynos_dsi_dpms(display, DRM_MODE_DPMS_OFF);
+ encoder = platform_get_drvdata(to_platform_device(dsi->dev));
+ exynos_dsi_disable(encoder);
drm_panel_detach(dsi->panel);
dsi->panel = NULL;
}
@@ -1647,7 +1628,7 @@ exynos_dsi_best_encoder(struct drm_connector *connector)
{
struct exynos_dsi *dsi = connector_to_dsi(connector);
- return dsi->display.encoder;
+ return &dsi->encoder;
}
static struct drm_connector_helper_funcs exynos_dsi_connector_helper_funcs = {
@@ -1655,10 +1636,9 @@ static struct drm_connector_helper_funcs exynos_dsi_connector_helper_funcs = {
.best_encoder = exynos_dsi_best_encoder,
};
-static int exynos_dsi_create_connector(struct exynos_drm_display *display,
- struct drm_encoder *encoder)
+static int exynos_dsi_create_connector(struct drm_encoder *encoder)
{
- struct exynos_dsi *dsi = display_to_dsi(display);
+ struct exynos_dsi *dsi = encoder_to_dsi(encoder);
struct drm_connector *connector = &dsi->connector;
int ret;
@@ -1679,26 +1659,40 @@ static int exynos_dsi_create_connector(struct exynos_drm_display *display,
return 0;
}
-static void exynos_dsi_mode_set(struct exynos_drm_display *display,
- struct drm_display_mode *mode)
+static bool exynos_dsi_mode_fixup(struct drm_encoder *encoder,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
{
- struct exynos_dsi *dsi = display_to_dsi(display);
- struct videomode *vm = &dsi->vm;
+ return true;
+}
- vm->hactive = mode->hdisplay;
- vm->vactive = mode->vdisplay;
- vm->vfront_porch = mode->vsync_start - mode->vdisplay;
- vm->vback_porch = mode->vtotal - mode->vsync_end;
- vm->vsync_len = mode->vsync_end - mode->vsync_start;
- vm->hfront_porch = mode->hsync_start - mode->hdisplay;
- vm->hback_porch = mode->htotal - mode->hsync_end;
- vm->hsync_len = mode->hsync_end - mode->hsync_start;
+static void exynos_dsi_mode_set(struct drm_encoder *encoder,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct exynos_dsi *dsi = encoder_to_dsi(encoder);
+ struct videomode *vm = &dsi->vm;
+ struct drm_display_mode *m = adjusted_mode;
+
+ vm->hactive = m->hdisplay;
+ vm->vactive = m->vdisplay;
+ vm->vfront_porch = m->vsync_start - m->vdisplay;
+ vm->vback_porch = m->vtotal - m->vsync_end;
+ vm->vsync_len = m->vsync_end - m->vsync_start;
+ vm->hfront_porch = m->hsync_start - m->hdisplay;
+ vm->hback_porch = m->htotal - m->hsync_end;
+ vm->hsync_len = m->hsync_end - m->hsync_start;
}
-static struct exynos_drm_display_ops exynos_dsi_display_ops = {
- .create_connector = exynos_dsi_create_connector,
+static struct drm_encoder_helper_funcs exynos_dsi_encoder_helper_funcs = {
+ .mode_fixup = exynos_dsi_mode_fixup,
.mode_set = exynos_dsi_mode_set,
- .dpms = exynos_dsi_dpms
+ .enable = exynos_dsi_enable,
+ .disable = exynos_dsi_disable,
+};
+
+static struct drm_encoder_funcs exynos_dsi_encoder_funcs = {
+ .destroy = drm_encoder_cleanup,
};
MODULE_DEVICE_TABLE(of, exynos_dsi_of_match);
@@ -1821,22 +1815,35 @@ end:
static int exynos_dsi_bind(struct device *dev, struct device *master,
void *data)
{
- struct exynos_drm_display *display = dev_get_drvdata(dev);
- struct exynos_dsi *dsi = display_to_dsi(display);
+ struct drm_encoder *encoder = dev_get_drvdata(dev);
+ struct exynos_dsi *dsi = encoder_to_dsi(encoder);
struct drm_device *drm_dev = data;
struct drm_bridge *bridge;
int ret;
- ret = exynos_drm_create_enc_conn(drm_dev, display);
+ ret = exynos_drm_crtc_get_pipe_from_type(drm_dev,
+ EXYNOS_DISPLAY_TYPE_LCD);
+ if (ret < 0)
+ return ret;
+
+ encoder->possible_crtcs = 1 << ret;
+
+ DRM_DEBUG_KMS("possible_crtcs = 0x%x\n", encoder->possible_crtcs);
+
+ drm_encoder_init(drm_dev, encoder, &exynos_dsi_encoder_funcs,
+ DRM_MODE_ENCODER_TMDS);
+
+ drm_encoder_helper_add(encoder, &exynos_dsi_encoder_helper_funcs);
+
+ ret = exynos_dsi_create_connector(encoder);
if (ret) {
- DRM_ERROR("Encoder create [%d] failed with %d\n",
- display->type, ret);
+ DRM_ERROR("failed to create connector ret = %d\n", ret);
+ drm_encoder_cleanup(encoder);
return ret;
}
bridge = of_drm_find_bridge(dsi->bridge_node);
if (bridge) {
- display->encoder->bridge = bridge;
drm_bridge_attach(drm_dev, bridge);
}
@@ -1846,10 +1853,10 @@ static int exynos_dsi_bind(struct device *dev, struct device *master,
static void exynos_dsi_unbind(struct device *dev, struct device *master,
void *data)
{
- struct exynos_drm_display *display = dev_get_drvdata(dev);
- struct exynos_dsi *dsi = display_to_dsi(display);
+ struct drm_encoder *encoder = dev_get_drvdata(dev);
+ struct exynos_dsi *dsi = encoder_to_dsi(encoder);
- exynos_dsi_dpms(display, DRM_MODE_DPMS_OFF);
+ exynos_dsi_disable(encoder);
mipi_dsi_host_unregister(&dsi->dsi_host);
}
@@ -1870,9 +1877,6 @@ static int exynos_dsi_probe(struct platform_device *pdev)
if (!dsi)
return -ENOMEM;
- dsi->display.type = EXYNOS_DISPLAY_TYPE_LCD;
- dsi->display.ops = &exynos_dsi_display_ops;
-
/* To be checked as invalid one */
dsi->te_gpio = -ENOENT;
@@ -1948,7 +1952,7 @@ static int exynos_dsi_probe(struct platform_device *pdev)
return ret;
}
- platform_set_drvdata(pdev, &dsi->display);
+ platform_set_drvdata(pdev, &dsi->encoder);
return component_add(dev, &exynos_dsi_component_ops);
}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_encoder.c b/drivers/gpu/drm/exynos/exynos_drm_encoder.c
deleted file mode 100644
index 7b89fd520e45..000000000000
--- a/drivers/gpu/drm/exynos/exynos_drm_encoder.c
+++ /dev/null
@@ -1,174 +0,0 @@
-/* exynos_drm_encoder.c
- *
- * Copyright (c) 2011 Samsung Electronics Co., Ltd.
- * Authors:
- * Inki Dae <inki.dae@samsung.com>
- * Joonyoung Shim <jy0922.shim@samsung.com>
- * Seung-Woo Kim <sw0312.kim@samsung.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- */
-
-#include <drm/drmP.h>
-#include <drm/drm_crtc_helper.h>
-
-#include "exynos_drm_drv.h"
-#include "exynos_drm_encoder.h"
-
-#define to_exynos_encoder(x) container_of(x, struct exynos_drm_encoder,\
- drm_encoder)
-
-/*
- * exynos specific encoder structure.
- *
- * @drm_encoder: encoder object.
- * @display: the display structure that maps to this encoder
- */
-struct exynos_drm_encoder {
- struct drm_encoder drm_encoder;
- struct exynos_drm_display *display;
-};
-
-static bool
-exynos_drm_encoder_mode_fixup(struct drm_encoder *encoder,
- const struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
-{
- struct drm_device *dev = encoder->dev;
- struct exynos_drm_encoder *exynos_encoder = to_exynos_encoder(encoder);
- struct exynos_drm_display *display = exynos_encoder->display;
- struct drm_connector *connector;
-
- list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
- if (connector->encoder != encoder)
- continue;
-
- if (display->ops->mode_fixup)
- display->ops->mode_fixup(display, connector, mode,
- adjusted_mode);
- }
-
- return true;
-}
-
-static void exynos_drm_encoder_mode_set(struct drm_encoder *encoder,
- struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
-{
- struct exynos_drm_encoder *exynos_encoder = to_exynos_encoder(encoder);
- struct exynos_drm_display *display = exynos_encoder->display;
-
- if (display->ops->mode_set)
- display->ops->mode_set(display, adjusted_mode);
-}
-
-static void exynos_drm_encoder_enable(struct drm_encoder *encoder)
-{
- struct exynos_drm_encoder *exynos_encoder = to_exynos_encoder(encoder);
- struct exynos_drm_display *display = exynos_encoder->display;
-
- if (display->ops->dpms)
- display->ops->dpms(display, DRM_MODE_DPMS_ON);
-
- if (display->ops->commit)
- display->ops->commit(display);
-}
-
-static void exynos_drm_encoder_disable(struct drm_encoder *encoder)
-{
- struct exynos_drm_encoder *exynos_encoder = to_exynos_encoder(encoder);
- struct exynos_drm_display *display = exynos_encoder->display;
-
- if (display->ops->dpms)
- display->ops->dpms(display, DRM_MODE_DPMS_OFF);
-}
-
-static struct drm_encoder_helper_funcs exynos_encoder_helper_funcs = {
- .mode_fixup = exynos_drm_encoder_mode_fixup,
- .mode_set = exynos_drm_encoder_mode_set,
- .enable = exynos_drm_encoder_enable,
- .disable = exynos_drm_encoder_disable,
-};
-
-static void exynos_drm_encoder_destroy(struct drm_encoder *encoder)
-{
- struct exynos_drm_encoder *exynos_encoder = to_exynos_encoder(encoder);
-
- drm_encoder_cleanup(encoder);
- kfree(exynos_encoder);
-}
-
-static struct drm_encoder_funcs exynos_encoder_funcs = {
- .destroy = exynos_drm_encoder_destroy,
-};
-
-static unsigned int exynos_drm_encoder_clones(struct drm_encoder *encoder)
-{
- struct drm_encoder *clone;
- struct drm_device *dev = encoder->dev;
- struct exynos_drm_encoder *exynos_encoder = to_exynos_encoder(encoder);
- struct exynos_drm_display *display = exynos_encoder->display;
- unsigned int clone_mask = 0;
- int cnt = 0;
-
- list_for_each_entry(clone, &dev->mode_config.encoder_list, head) {
- switch (display->type) {
- case EXYNOS_DISPLAY_TYPE_LCD:
- case EXYNOS_DISPLAY_TYPE_HDMI:
- case EXYNOS_DISPLAY_TYPE_VIDI:
- clone_mask |= (1 << (cnt++));
- break;
- default:
- continue;
- }
- }
-
- return clone_mask;
-}
-
-void exynos_drm_encoder_setup(struct drm_device *dev)
-{
- struct drm_encoder *encoder;
-
- list_for_each_entry(encoder, &dev->mode_config.encoder_list, head)
- encoder->possible_clones = exynos_drm_encoder_clones(encoder);
-}
-
-struct drm_encoder *
-exynos_drm_encoder_create(struct drm_device *dev,
- struct exynos_drm_display *display,
- unsigned long possible_crtcs)
-{
- struct drm_encoder *encoder;
- struct exynos_drm_encoder *exynos_encoder;
-
- if (!possible_crtcs)
- return NULL;
-
- exynos_encoder = kzalloc(sizeof(*exynos_encoder), GFP_KERNEL);
- if (!exynos_encoder)
- return NULL;
-
- exynos_encoder->display = display;
- encoder = &exynos_encoder->drm_encoder;
- encoder->possible_crtcs = possible_crtcs;
-
- DRM_DEBUG_KMS("possible_crtcs = 0x%x\n", encoder->possible_crtcs);
-
- drm_encoder_init(dev, encoder, &exynos_encoder_funcs,
- DRM_MODE_ENCODER_TMDS);
-
- drm_encoder_helper_add(encoder, &exynos_encoder_helper_funcs);
-
- DRM_DEBUG_KMS("encoder has been created\n");
-
- return encoder;
-}
-
-struct exynos_drm_display *exynos_drm_get_display(struct drm_encoder *encoder)
-{
- return to_exynos_encoder(encoder)->display;
-}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_encoder.h b/drivers/gpu/drm/exynos/exynos_drm_encoder.h
deleted file mode 100644
index 26305d8dd93a..000000000000
--- a/drivers/gpu/drm/exynos/exynos_drm_encoder.h
+++ /dev/null
@@ -1,23 +0,0 @@
-/*
- * Copyright (c) 2011 Samsung Electronics Co., Ltd.
- * Authors:
- * Inki Dae <inki.dae@samsung.com>
- * Joonyoung Shim <jy0922.shim@samsung.com>
- * Seung-Woo Kim <sw0312.kim@samsung.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- */
-
-#ifndef _EXYNOS_DRM_ENCODER_H_
-#define _EXYNOS_DRM_ENCODER_H_
-
-void exynos_drm_encoder_setup(struct drm_device *dev);
-struct drm_encoder *exynos_drm_encoder_create(struct drm_device *dev,
- struct exynos_drm_display *mgr,
- unsigned long possible_crtcs);
-struct exynos_drm_display *exynos_drm_get_display(struct drm_encoder *encoder);
-
-#endif
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fb.c b/drivers/gpu/drm/exynos/exynos_drm_fb.c
index 2b6320e6eae2..9738f4e0c6eb 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fb.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fb.c
@@ -238,22 +238,22 @@ err_free:
return ERR_PTR(ret);
}
-struct exynos_drm_gem_buf *exynos_drm_fb_buffer(struct drm_framebuffer *fb,
- int index)
+struct exynos_drm_gem_obj *exynos_drm_fb_gem_obj(struct drm_framebuffer *fb,
+ int index)
{
struct exynos_drm_fb *exynos_fb = to_exynos_fb(fb);
- struct exynos_drm_gem_buf *buffer;
+ struct exynos_drm_gem_obj *obj;
if (index >= MAX_FB_BUFFER)
return NULL;
- buffer = exynos_fb->exynos_gem_obj[index]->buffer;
- if (!buffer)
+ obj = exynos_fb->exynos_gem_obj[index];
+ if (!obj)
return NULL;
- DRM_DEBUG_KMS("dma_addr = 0x%lx\n", (unsigned long)buffer->dma_addr);
+ DRM_DEBUG_KMS("dma_addr = 0x%lx\n", (unsigned long)obj->dma_addr);
- return buffer;
+ return obj;
}
static void exynos_drm_output_poll_changed(struct drm_device *dev)
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fb.h b/drivers/gpu/drm/exynos/exynos_drm_fb.h
index 517471b37566..1c9e27c32cd1 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fb.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_fb.h
@@ -19,8 +19,8 @@ exynos_drm_framebuffer_init(struct drm_device *dev,
struct drm_mode_fb_cmd2 *mode_cmd,
struct drm_gem_object *obj);
-/* get memory information of a drm framebuffer */
-struct exynos_drm_gem_buf *exynos_drm_fb_buffer(struct drm_framebuffer *fb,
+/* get gem object of a drm framebuffer */
+struct exynos_drm_gem_obj *exynos_drm_fb_gem_obj(struct drm_framebuffer *fb,
int index);
void exynos_drm_mode_config_init(struct drm_device *dev);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fbdev.c b/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
index e0b085b4bdfa..624595afbce0 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
@@ -40,8 +40,7 @@ static int exynos_drm_fb_mmap(struct fb_info *info,
{
struct drm_fb_helper *helper = info->par;
struct exynos_drm_fbdev *exynos_fbd = to_exynos_fbdev(helper);
- struct exynos_drm_gem_obj *exynos_gem_obj = exynos_fbd->exynos_gem_obj;
- struct exynos_drm_gem_buf *buffer = exynos_gem_obj->buffer;
+ struct exynos_drm_gem_obj *obj = exynos_fbd->exynos_gem_obj;
unsigned long vm_size;
int ret;
@@ -49,11 +48,11 @@ static int exynos_drm_fb_mmap(struct fb_info *info,
vm_size = vma->vm_end - vma->vm_start;
- if (vm_size > buffer->size)
+ if (vm_size > obj->size)
return -EINVAL;
- ret = dma_mmap_attrs(helper->dev->dev, vma, buffer->pages,
- buffer->dma_addr, buffer->size, &buffer->dma_attrs);
+ ret = dma_mmap_attrs(helper->dev->dev, vma, obj->pages, obj->dma_addr,
+ obj->size, &obj->dma_attrs);
if (ret < 0) {
DRM_ERROR("failed to mmap.\n");
return ret;
@@ -65,9 +64,9 @@ static int exynos_drm_fb_mmap(struct fb_info *info,
static struct fb_ops exynos_drm_fb_ops = {
.owner = THIS_MODULE,
.fb_mmap = exynos_drm_fb_mmap,
- .fb_fillrect = cfb_fillrect,
- .fb_copyarea = cfb_copyarea,
- .fb_imageblit = cfb_imageblit,
+ .fb_fillrect = drm_fb_helper_cfb_fillrect,
+ .fb_copyarea = drm_fb_helper_cfb_copyarea,
+ .fb_imageblit = drm_fb_helper_cfb_imageblit,
.fb_check_var = drm_fb_helper_check_var,
.fb_set_par = drm_fb_helper_set_par,
.fb_blank = drm_fb_helper_blank,
@@ -80,7 +79,7 @@ static int exynos_drm_fbdev_update(struct drm_fb_helper *helper,
struct drm_framebuffer *fb)
{
struct fb_info *fbi = helper->fbdev;
- struct exynos_drm_gem_buf *buffer;
+ struct exynos_drm_gem_obj *obj;
unsigned int size = fb->width * fb->height * (fb->bits_per_pixel >> 3);
unsigned int nr_pages;
unsigned long offset;
@@ -89,18 +88,17 @@ static int exynos_drm_fbdev_update(struct drm_fb_helper *helper,
drm_fb_helper_fill_var(fbi, helper, sizes->fb_width, sizes->fb_height);
/* RGB formats use only one buffer */
- buffer = exynos_drm_fb_buffer(fb, 0);
- if (!buffer) {
- DRM_DEBUG_KMS("buffer is null.\n");
+ obj = exynos_drm_fb_gem_obj(fb, 0);
+ if (!obj) {
+ DRM_DEBUG_KMS("gem object is null.\n");
return -EFAULT;
}
- nr_pages = buffer->size >> PAGE_SHIFT;
+ nr_pages = obj->size >> PAGE_SHIFT;
- buffer->kvaddr = (void __iomem *) vmap(buffer->pages,
- nr_pages, VM_MAP,
+ obj->kvaddr = (void __iomem *) vmap(obj->pages, nr_pages, VM_MAP,
pgprot_writecombine(PAGE_KERNEL));
- if (!buffer->kvaddr) {
+ if (!obj->kvaddr) {
DRM_ERROR("failed to map pages to kernel space.\n");
return -EIO;
}
@@ -111,7 +109,7 @@ static int exynos_drm_fbdev_update(struct drm_fb_helper *helper,
offset = fbi->var.xoffset * (fb->bits_per_pixel >> 3);
offset += fbi->var.yoffset * fb->pitches[0];
- fbi->screen_base = buffer->kvaddr + offset;
+ fbi->screen_base = obj->kvaddr + offset;
fbi->screen_size = size;
fbi->fix.smem_len = size;
@@ -142,10 +140,10 @@ static int exynos_drm_fbdev_create(struct drm_fb_helper *helper,
mutex_lock(&dev->struct_mutex);
- fbi = framebuffer_alloc(0, &pdev->dev);
- if (!fbi) {
+ fbi = drm_fb_helper_alloc_fbi(helper);
+ if (IS_ERR(fbi)) {
DRM_ERROR("failed to allocate fb info.\n");
- ret = -ENOMEM;
+ ret = PTR_ERR(fbi);
goto out;
}
@@ -165,7 +163,7 @@ static int exynos_drm_fbdev_create(struct drm_fb_helper *helper,
if (IS_ERR(exynos_gem_obj)) {
ret = PTR_ERR(exynos_gem_obj);
- goto err_release_framebuffer;
+ goto err_release_fbi;
}
exynos_fbdev->exynos_gem_obj = exynos_gem_obj;
@@ -178,33 +176,23 @@ static int exynos_drm_fbdev_create(struct drm_fb_helper *helper,
goto err_destroy_gem;
}
- helper->fbdev = fbi;
-
fbi->par = helper;
fbi->flags = FBINFO_FLAG_DEFAULT;
fbi->fbops = &exynos_drm_fb_ops;
- ret = fb_alloc_cmap(&fbi->cmap, 256, 0);
- if (ret) {
- DRM_ERROR("failed to allocate cmap.\n");
- goto err_destroy_framebuffer;
- }
-
ret = exynos_drm_fbdev_update(helper, sizes, helper->fb);
if (ret < 0)
- goto err_dealloc_cmap;
+ goto err_destroy_framebuffer;
mutex_unlock(&dev->struct_mutex);
return ret;
-err_dealloc_cmap:
- fb_dealloc_cmap(&fbi->cmap);
err_destroy_framebuffer:
drm_framebuffer_cleanup(helper->fb);
err_destroy_gem:
exynos_drm_gem_destroy(exynos_gem_obj);
-err_release_framebuffer:
- framebuffer_release(fbi);
+err_release_fbi:
+ drm_fb_helper_release_fbi(helper);
/*
* if failed, all resources allocated above would be released by
@@ -300,8 +288,8 @@ static void exynos_drm_fbdev_destroy(struct drm_device *dev,
struct exynos_drm_gem_obj *exynos_gem_obj = exynos_fbd->exynos_gem_obj;
struct drm_framebuffer *fb;
- if (exynos_gem_obj->buffer->kvaddr)
- vunmap(exynos_gem_obj->buffer->kvaddr);
+ if (exynos_gem_obj->kvaddr)
+ vunmap(exynos_gem_obj->kvaddr);
/* release drm framebuffer and real buffer */
if (fb_helper->fb && fb_helper->fb->funcs) {
@@ -312,21 +300,8 @@ static void exynos_drm_fbdev_destroy(struct drm_device *dev,
}
}
- /* release linux framebuffer */
- if (fb_helper->fbdev) {
- struct fb_info *info;
- int ret;
-
- info = fb_helper->fbdev;
- ret = unregister_framebuffer(info);
- if (ret < 0)
- DRM_DEBUG_KMS("failed unregister_framebuffer()\n");
-
- if (info->cmap.len)
- fb_dealloc_cmap(&info->cmap);
-
- framebuffer_release(info);
- }
+ drm_fb_helper_unregister_fbi(fb_helper);
+ drm_fb_helper_release_fbi(fb_helper);
drm_fb_helper_fini(fb_helper);
}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimc.c b/drivers/gpu/drm/exynos/exynos_drm_fimc.c
index 842d6b8dc3c4..2a652359af64 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fimc.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fimc.c
@@ -1745,7 +1745,6 @@ static int fimc_probe(struct platform_device *pdev)
spin_lock_init(&ctx->lock);
platform_set_drvdata(pdev, ctx);
- pm_runtime_set_active(dev);
pm_runtime_enable(dev);
ret = exynos_drm_ippdrv_register(ippdrv);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimd.c b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
index 794e56c8798e..5def6bc073eb 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fimd.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
@@ -169,7 +169,7 @@ struct fimd_context {
struct exynos_drm_panel_info panel;
struct fimd_driver_data *driver_data;
- struct exynos_drm_display *display;
+ struct drm_encoder *encoder;
};
static const struct of_device_id fimd_driver_dt_match[] = {
@@ -348,13 +348,6 @@ static void fimd_clear_channels(struct exynos_drm_crtc *crtc)
pm_runtime_put(ctx->dev);
}
-static void fimd_iommu_detach_devices(struct fimd_context *ctx)
-{
- /* detach this sub driver from iommu mapping if supported. */
- if (is_drm_iommu_supported(ctx->drm_dev))
- drm_iommu_detach_device(ctx->drm_dev, ctx->dev);
-}
-
static u32 fimd_calc_clkdiv(struct fimd_context *ctx,
const struct drm_display_mode *mode)
{
@@ -486,9 +479,9 @@ static void fimd_commit(struct exynos_drm_crtc *crtc)
}
-static void fimd_win_set_pixfmt(struct fimd_context *ctx, unsigned int win)
+static void fimd_win_set_pixfmt(struct fimd_context *ctx, unsigned int win,
+ struct drm_framebuffer *fb)
{
- struct exynos_drm_plane *plane = &ctx->planes[win];
unsigned long val;
val = WINCONx_ENWIN;
@@ -498,11 +491,11 @@ static void fimd_win_set_pixfmt(struct fimd_context *ctx, unsigned int win)
* So the request format is ARGB8888 then change it to XRGB8888.
*/
if (ctx->driver_data->has_limited_fmt && !win) {
- if (plane->pixel_format == DRM_FORMAT_ARGB8888)
- plane->pixel_format = DRM_FORMAT_XRGB8888;
+ if (fb->pixel_format == DRM_FORMAT_ARGB8888)
+ fb->pixel_format = DRM_FORMAT_XRGB8888;
}
- switch (plane->pixel_format) {
+ switch (fb->pixel_format) {
case DRM_FORMAT_C8:
val |= WINCON0_BPPMODE_8BPP_PALETTE;
val |= WINCONx_BURSTLEN_8WORD;
@@ -538,7 +531,7 @@ static void fimd_win_set_pixfmt(struct fimd_context *ctx, unsigned int win)
break;
}
- DRM_DEBUG_KMS("bpp = %d\n", plane->bpp);
+ DRM_DEBUG_KMS("bpp = %d\n", fb->bits_per_pixel);
/*
* In case of exynos, setting dma-burst to 16Word causes permanent
@@ -548,7 +541,7 @@ static void fimd_win_set_pixfmt(struct fimd_context *ctx, unsigned int win)
* movement causes unstable DMA which results into iommu crash/tear.
*/
- if (plane->fb_width < MIN_FB_WIDTH_FOR_16WORD_BURST) {
+ if (fb->width < MIN_FB_WIDTH_FOR_16WORD_BURST) {
val &= ~WINCONx_BURSTLEN_MASK;
val |= WINCONx_BURSTLEN_4WORD;
}
@@ -614,21 +607,17 @@ static void fimd_shadow_protect_win(struct fimd_context *ctx,
writel(val, ctx->regs + reg);
}
-static void fimd_win_commit(struct exynos_drm_crtc *crtc, unsigned int win)
+static void fimd_update_plane(struct exynos_drm_crtc *crtc,
+ struct exynos_drm_plane *plane)
{
struct fimd_context *ctx = crtc->ctx;
- struct exynos_drm_plane *plane;
+ struct drm_plane_state *state = plane->base.state;
dma_addr_t dma_addr;
unsigned long val, size, offset;
unsigned int last_x, last_y, buf_offsize, line_size;
-
- if (ctx->suspended)
- return;
-
- if (win < 0 || win >= WINDOWS_NR)
- return;
-
- plane = &ctx->planes[win];
+ unsigned int win = plane->zpos;
+ unsigned int bpp = state->fb->bits_per_pixel >> 3;
+ unsigned int pitch = state->fb->pitches[0];
if (ctx->suspended)
return;
@@ -647,8 +636,8 @@ static void fimd_win_commit(struct exynos_drm_crtc *crtc, unsigned int win)
fimd_shadow_protect_win(ctx, win, true);
- offset = plane->src_x * (plane->bpp >> 3);
- offset += plane->src_y * plane->pitch;
+ offset = plane->src_x * bpp;
+ offset += plane->src_y * pitch;
/* buffer start address */
dma_addr = plane->dma_addr[0] + offset;
@@ -656,18 +645,18 @@ static void fimd_win_commit(struct exynos_drm_crtc *crtc, unsigned int win)
writel(val, ctx->regs + VIDWx_BUF_START(win, 0));
/* buffer end address */
- size = plane->pitch * plane->crtc_height;
+ size = pitch * plane->crtc_h;
val = (unsigned long)(dma_addr + size);
writel(val, ctx->regs + VIDWx_BUF_END(win, 0));
DRM_DEBUG_KMS("start addr = 0x%lx, end addr = 0x%lx, size = 0x%lx\n",
(unsigned long)dma_addr, val, size);
DRM_DEBUG_KMS("ovl_width = %d, ovl_height = %d\n",
- plane->crtc_width, plane->crtc_height);
+ plane->crtc_w, plane->crtc_h);
/* buffer size */
- buf_offsize = plane->pitch - (plane->crtc_width * (plane->bpp >> 3));
- line_size = plane->crtc_width * (plane->bpp >> 3);
+ buf_offsize = pitch - (plane->crtc_w * bpp);
+ line_size = plane->crtc_w * bpp;
val = VIDW_BUF_SIZE_OFFSET(buf_offsize) |
VIDW_BUF_SIZE_PAGEWIDTH(line_size) |
VIDW_BUF_SIZE_OFFSET_E(buf_offsize) |
@@ -681,10 +670,10 @@ static void fimd_win_commit(struct exynos_drm_crtc *crtc, unsigned int win)
VIDOSDxA_TOPLEFT_Y_E(plane->crtc_y);
writel(val, ctx->regs + VIDOSD_A(win));
- last_x = plane->crtc_x + plane->crtc_width;
+ last_x = plane->crtc_x + plane->crtc_w;
if (last_x)
last_x--;
- last_y = plane->crtc_y + plane->crtc_height;
+ last_y = plane->crtc_y + plane->crtc_h;
if (last_y)
last_y--;
@@ -701,13 +690,13 @@ static void fimd_win_commit(struct exynos_drm_crtc *crtc, unsigned int win)
u32 offset = VIDOSD_D(win);
if (win == 0)
offset = VIDOSD_C(win);
- val = plane->crtc_width * plane->crtc_height;
+ val = plane->crtc_w * plane->crtc_h;
writel(val, ctx->regs + offset);
DRM_DEBUG_KMS("osd size = 0x%x\n", (unsigned int)val);
}
- fimd_win_set_pixfmt(ctx, win);
+ fimd_win_set_pixfmt(ctx, win, state->fb);
/* hardware window 0 doesn't support color key. */
if (win != 0)
@@ -725,15 +714,11 @@ static void fimd_win_commit(struct exynos_drm_crtc *crtc, unsigned int win)
atomic_set(&ctx->win_updated, 1);
}
-static void fimd_win_disable(struct exynos_drm_crtc *crtc, unsigned int win)
+static void fimd_disable_plane(struct exynos_drm_crtc *crtc,
+ struct exynos_drm_plane *plane)
{
struct fimd_context *ctx = crtc->ctx;
- struct exynos_drm_plane *plane;
-
- if (win < 0 || win >= WINDOWS_NR)
- return;
-
- plane = &ctx->planes[win];
+ unsigned int win = plane->zpos;
if (ctx->suspended)
return;
@@ -795,7 +780,7 @@ static void fimd_disable(struct exynos_drm_crtc *crtc)
* a destroyed buffer later.
*/
for (i = 0; i < WINDOWS_NR; i++)
- fimd_win_disable(crtc, i);
+ fimd_disable_plane(crtc, &ctx->planes[i]);
fimd_enable_vblank(crtc);
fimd_wait_for_vblank(crtc);
@@ -862,7 +847,7 @@ static void fimd_te_handler(struct exynos_drm_crtc *crtc)
}
if (test_bit(0, &ctx->irq_flags))
- drm_handle_vblank(ctx->drm_dev, ctx->pipe);
+ drm_crtc_handle_vblank(&ctx->crtc->base);
}
static void fimd_dp_clock_enable(struct exynos_drm_crtc *crtc, bool enable)
@@ -890,11 +875,10 @@ static const struct exynos_drm_crtc_ops fimd_crtc_ops = {
.enable_vblank = fimd_enable_vblank,
.disable_vblank = fimd_disable_vblank,
.wait_for_vblank = fimd_wait_for_vblank,
- .win_commit = fimd_win_commit,
- .win_disable = fimd_win_disable,
+ .update_plane = fimd_update_plane,
+ .disable_plane = fimd_disable_plane,
.te_handler = fimd_te_handler,
.clock_enable = fimd_dp_clock_enable,
- .clear_channels = fimd_clear_channels,
};
static irqreturn_t fimd_irq_handler(int irq, void *dev_id)
@@ -913,13 +897,13 @@ static irqreturn_t fimd_irq_handler(int irq, void *dev_id)
goto out;
if (ctx->i80_if) {
- exynos_drm_crtc_finish_pageflip(ctx->drm_dev, ctx->pipe);
+ exynos_drm_crtc_finish_pageflip(ctx->crtc);
/* Exits triggering mode */
atomic_set(&ctx->triggering, 0);
} else {
- drm_handle_vblank(ctx->drm_dev, ctx->pipe);
- exynos_drm_crtc_finish_pageflip(ctx->drm_dev, ctx->pipe);
+ drm_crtc_handle_vblank(&ctx->crtc->base);
+ exynos_drm_crtc_finish_pageflip(ctx->crtc);
/* set wait vsync event to zero and wake up queue. */
if (atomic_read(&ctx->wait_vsync_event)) {
@@ -961,10 +945,13 @@ static int fimd_bind(struct device *dev, struct device *master, void *data)
if (IS_ERR(ctx->crtc))
return PTR_ERR(ctx->crtc);
- if (ctx->display)
- exynos_drm_create_enc_conn(drm_dev, ctx->display);
+ if (ctx->encoder)
+ exynos_dpi_bind(drm_dev, ctx->encoder);
+
+ if (is_drm_iommu_supported(drm_dev))
+ fimd_clear_channels(ctx->crtc);
- ret = drm_iommu_attach_device_if_possible(ctx->crtc, drm_dev, dev);
+ ret = drm_iommu_attach_device(drm_dev, dev);
if (ret)
priv->pipe--;
@@ -978,10 +965,10 @@ static void fimd_unbind(struct device *dev, struct device *master,
fimd_disable(ctx->crtc);
- fimd_iommu_detach_devices(ctx);
+ drm_iommu_detach_device(ctx->drm_dev, ctx->dev);
- if (ctx->display)
- exynos_dpi_remove(ctx->display);
+ if (ctx->encoder)
+ exynos_dpi_remove(ctx->encoder);
}
static const struct component_ops fimd_component_ops = {
@@ -1088,10 +1075,9 @@ static int fimd_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, ctx);
- ctx->display = exynos_dpi_probe(dev);
- if (IS_ERR(ctx->display)) {
- return PTR_ERR(ctx->display);
- }
+ ctx->encoder = exynos_dpi_probe(dev);
+ if (IS_ERR(ctx->encoder))
+ return PTR_ERR(ctx->encoder);
pm_runtime_enable(dev);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_g2d.c b/drivers/gpu/drm/exynos/exynos_drm_g2d.c
index 81a250830808..ba008391a2fc 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_g2d.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_g2d.c
@@ -1319,9 +1319,6 @@ static int g2d_subdrv_probe(struct drm_device *drm_dev, struct device *dev)
return ret;
}
- if (!is_drm_iommu_supported(drm_dev))
- return 0;
-
ret = drm_iommu_attach_device(drm_dev, dev);
if (ret < 0) {
dev_err(dev, "failed to enable iommu.\n");
@@ -1334,9 +1331,6 @@ static int g2d_subdrv_probe(struct drm_device *drm_dev, struct device *dev)
static void g2d_subdrv_remove(struct drm_device *drm_dev, struct device *dev)
{
- if (!is_drm_iommu_supported(drm_dev))
- return;
-
drm_iommu_detach_device(drm_dev, dev);
}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.c b/drivers/gpu/drm/exynos/exynos_drm_gem.c
index 0d5b9698d384..67461b77f040 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gem.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_gem.c
@@ -13,98 +13,112 @@
#include <drm/drm_vma_manager.h>
#include <linux/shmem_fs.h>
+#include <linux/dma-buf.h>
#include <drm/exynos_drm.h>
#include "exynos_drm_drv.h"
#include "exynos_drm_gem.h"
-#include "exynos_drm_buf.h"
#include "exynos_drm_iommu.h"
-static unsigned int convert_to_vm_err_msg(int msg)
+static int exynos_drm_alloc_buf(struct exynos_drm_gem_obj *obj)
{
- unsigned int out_msg;
+ struct drm_device *dev = obj->base.dev;
+ enum dma_attr attr;
+ unsigned int nr_pages;
- switch (msg) {
- case 0:
- case -ERESTARTSYS:
- case -EINTR:
- out_msg = VM_FAULT_NOPAGE;
- break;
+ if (obj->dma_addr) {
+ DRM_DEBUG_KMS("already allocated.\n");
+ return 0;
+ }
- case -ENOMEM:
- out_msg = VM_FAULT_OOM;
- break;
+ init_dma_attrs(&obj->dma_attrs);
- default:
- out_msg = VM_FAULT_SIGBUS;
- break;
- }
+ /*
+ * if EXYNOS_BO_CONTIG, fully physically contiguous memory
+ * region will be allocated else physically contiguous
+ * as possible.
+ */
+ if (!(obj->flags & EXYNOS_BO_NONCONTIG))
+ dma_set_attr(DMA_ATTR_FORCE_CONTIGUOUS, &obj->dma_attrs);
- return out_msg;
-}
+ /*
+ * if EXYNOS_BO_WC or EXYNOS_BO_NONCACHABLE, writecombine mapping
+ * else cachable mapping.
+ */
+ if (obj->flags & EXYNOS_BO_WC || !(obj->flags & EXYNOS_BO_CACHABLE))
+ attr = DMA_ATTR_WRITE_COMBINE;
+ else
+ attr = DMA_ATTR_NON_CONSISTENT;
-static int check_gem_flags(unsigned int flags)
-{
- if (flags & ~(EXYNOS_BO_MASK)) {
- DRM_ERROR("invalid flags.\n");
- return -EINVAL;
- }
+ dma_set_attr(attr, &obj->dma_attrs);
+ dma_set_attr(DMA_ATTR_NO_KERNEL_MAPPING, &obj->dma_attrs);
- return 0;
-}
+ nr_pages = obj->size >> PAGE_SHIFT;
-static void update_vm_cache_attr(struct exynos_drm_gem_obj *obj,
- struct vm_area_struct *vma)
-{
- DRM_DEBUG_KMS("flags = 0x%x\n", obj->flags);
+ if (!is_drm_iommu_supported(dev)) {
+ dma_addr_t start_addr;
+ unsigned int i = 0;
- /* non-cachable as default. */
- if (obj->flags & EXYNOS_BO_CACHABLE)
- vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
- else if (obj->flags & EXYNOS_BO_WC)
- vma->vm_page_prot =
- pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
- else
- vma->vm_page_prot =
- pgprot_noncached(vm_get_page_prot(vma->vm_flags));
-}
+ obj->pages = drm_calloc_large(nr_pages, sizeof(struct page *));
+ if (!obj->pages) {
+ DRM_ERROR("failed to allocate pages.\n");
+ return -ENOMEM;
+ }
-static unsigned long roundup_gem_size(unsigned long size, unsigned int flags)
-{
- /* TODO */
+ obj->cookie = dma_alloc_attrs(dev->dev,
+ obj->size,
+ &obj->dma_addr, GFP_KERNEL,
+ &obj->dma_attrs);
+ if (!obj->cookie) {
+ DRM_ERROR("failed to allocate buffer.\n");
+ drm_free_large(obj->pages);
+ return -ENOMEM;
+ }
+
+ start_addr = obj->dma_addr;
+ while (i < nr_pages) {
+ obj->pages[i] = phys_to_page(start_addr);
+ start_addr += PAGE_SIZE;
+ i++;
+ }
+ } else {
+ obj->pages = dma_alloc_attrs(dev->dev, obj->size,
+ &obj->dma_addr, GFP_KERNEL,
+ &obj->dma_attrs);
+ if (!obj->pages) {
+ DRM_ERROR("failed to allocate buffer.\n");
+ return -ENOMEM;
+ }
+ }
+
+ DRM_DEBUG_KMS("dma_addr(0x%lx), size(0x%lx)\n",
+ (unsigned long)obj->dma_addr,
+ obj->size);
- return roundup(size, PAGE_SIZE);
+ return 0;
}
-static int exynos_drm_gem_map_buf(struct drm_gem_object *obj,
- struct vm_area_struct *vma,
- unsigned long f_vaddr,
- pgoff_t page_offset)
+static void exynos_drm_free_buf(struct exynos_drm_gem_obj *obj)
{
- struct exynos_drm_gem_obj *exynos_gem_obj = to_exynos_gem_obj(obj);
- struct exynos_drm_gem_buf *buf = exynos_gem_obj->buffer;
- struct scatterlist *sgl;
- unsigned long pfn;
- int i;
-
- if (!buf->sgt)
- return -EINTR;
+ struct drm_device *dev = obj->base.dev;
- if (page_offset >= (buf->size >> PAGE_SHIFT)) {
- DRM_ERROR("invalid page offset\n");
- return -EINVAL;
+ if (!obj->dma_addr) {
+ DRM_DEBUG_KMS("dma_addr is invalid.\n");
+ return;
}
- sgl = buf->sgt->sgl;
- for_each_sg(buf->sgt->sgl, sgl, buf->sgt->nents, i) {
- if (page_offset < (sgl->length >> PAGE_SHIFT))
- break;
- page_offset -= (sgl->length >> PAGE_SHIFT);
- }
+ DRM_DEBUG_KMS("dma_addr(0x%lx), size(0x%lx)\n",
+ (unsigned long)obj->dma_addr, obj->size);
- pfn = __phys_to_pfn(sg_phys(sgl)) + page_offset;
+ if (!is_drm_iommu_supported(dev)) {
+ dma_free_attrs(dev->dev, obj->size, obj->cookie,
+ (dma_addr_t)obj->dma_addr, &obj->dma_attrs);
+ drm_free_large(obj->pages);
+ } else
+ dma_free_attrs(dev->dev, obj->size, obj->pages,
+ (dma_addr_t)obj->dma_addr, &obj->dma_attrs);
- return vm_insert_mixed(vma, f_vaddr, pfn);
+ obj->dma_addr = (dma_addr_t)NULL;
}
static int exynos_drm_gem_handle_create(struct drm_gem_object *obj,
@@ -131,11 +145,7 @@ static int exynos_drm_gem_handle_create(struct drm_gem_object *obj,
void exynos_drm_gem_destroy(struct exynos_drm_gem_obj *exynos_gem_obj)
{
- struct drm_gem_object *obj;
- struct exynos_drm_gem_buf *buf;
-
- obj = &exynos_gem_obj->base;
- buf = exynos_gem_obj->buffer;
+ struct drm_gem_object *obj = &exynos_gem_obj->base;
DRM_DEBUG_KMS("handle count = %d\n", obj->handle_count);
@@ -148,12 +158,9 @@ void exynos_drm_gem_destroy(struct exynos_drm_gem_obj *exynos_gem_obj)
if (obj->import_attach)
goto out;
- exynos_drm_free_buf(obj->dev, exynos_gem_obj->flags, buf);
+ exynos_drm_free_buf(exynos_gem_obj);
out:
- exynos_drm_fini_buf(obj->dev, buf);
- exynos_gem_obj->buffer = NULL;
-
drm_gem_free_mmap_offset(obj);
/* release file pointer to gem object. */
@@ -180,7 +187,7 @@ unsigned long exynos_drm_gem_get_size(struct drm_device *dev,
drm_gem_object_unreference_unlocked(obj);
- return exynos_gem_obj->buffer->size;
+ return exynos_gem_obj->size;
}
@@ -193,7 +200,7 @@ struct exynos_drm_gem_obj *exynos_drm_gem_init(struct drm_device *dev,
exynos_gem_obj = kzalloc(sizeof(*exynos_gem_obj), GFP_KERNEL);
if (!exynos_gem_obj)
- return NULL;
+ return ERR_PTR(-ENOMEM);
exynos_gem_obj->size = size;
obj = &exynos_gem_obj->base;
@@ -202,7 +209,7 @@ struct exynos_drm_gem_obj *exynos_drm_gem_init(struct drm_device *dev,
if (ret < 0) {
DRM_ERROR("failed to initialize gem object\n");
kfree(exynos_gem_obj);
- return NULL;
+ return ERR_PTR(ret);
}
DRM_DEBUG_KMS("created file object = 0x%x\n", (unsigned int)obj->filp);
@@ -215,47 +222,35 @@ struct exynos_drm_gem_obj *exynos_drm_gem_create(struct drm_device *dev,
unsigned long size)
{
struct exynos_drm_gem_obj *exynos_gem_obj;
- struct exynos_drm_gem_buf *buf;
int ret;
+ if (flags & ~(EXYNOS_BO_MASK)) {
+ DRM_ERROR("invalid flags.\n");
+ return ERR_PTR(-EINVAL);
+ }
+
if (!size) {
DRM_ERROR("invalid size.\n");
return ERR_PTR(-EINVAL);
}
- size = roundup_gem_size(size, flags);
-
- ret = check_gem_flags(flags);
- if (ret)
- return ERR_PTR(ret);
-
- buf = exynos_drm_init_buf(dev, size);
- if (!buf)
- return ERR_PTR(-ENOMEM);
+ size = roundup(size, PAGE_SIZE);
exynos_gem_obj = exynos_drm_gem_init(dev, size);
- if (!exynos_gem_obj) {
- ret = -ENOMEM;
- goto err_fini_buf;
- }
-
- exynos_gem_obj->buffer = buf;
+ if (IS_ERR(exynos_gem_obj))
+ return exynos_gem_obj;
/* set memory type and cache attribute from user side. */
exynos_gem_obj->flags = flags;
- ret = exynos_drm_alloc_buf(dev, buf, flags);
- if (ret < 0)
- goto err_gem_fini;
+ ret = exynos_drm_alloc_buf(exynos_gem_obj);
+ if (ret < 0) {
+ drm_gem_object_release(&exynos_gem_obj->base);
+ kfree(exynos_gem_obj);
+ return ERR_PTR(ret);
+ }
return exynos_gem_obj;
-
-err_gem_fini:
- drm_gem_object_release(&exynos_gem_obj->base);
- kfree(exynos_gem_obj);
-err_fini_buf:
- exynos_drm_fini_buf(dev, buf);
- return ERR_PTR(ret);
}
int exynos_drm_gem_create_ioctl(struct drm_device *dev, void *data,
@@ -294,7 +289,7 @@ dma_addr_t *exynos_drm_gem_get_dma_addr(struct drm_device *dev,
exynos_gem_obj = to_exynos_gem_obj(obj);
- return &exynos_gem_obj->buffer->dma_addr;
+ return &exynos_gem_obj->dma_addr;
}
void exynos_drm_gem_put_dma_addr(struct drm_device *dev,
@@ -322,7 +317,6 @@ int exynos_drm_gem_mmap_buffer(struct exynos_drm_gem_obj *exynos_gem_obj,
struct vm_area_struct *vma)
{
struct drm_device *drm_dev = exynos_gem_obj->base.dev;
- struct exynos_drm_gem_buf *buffer;
unsigned long vm_size;
int ret;
@@ -331,19 +325,13 @@ int exynos_drm_gem_mmap_buffer(struct exynos_drm_gem_obj *exynos_gem_obj,
vm_size = vma->vm_end - vma->vm_start;
- /*
- * a buffer contains information to physically continuous memory
- * allocated by user request or at framebuffer creation.
- */
- buffer = exynos_gem_obj->buffer;
-
/* check if user-requested size is valid. */
- if (vm_size > buffer->size)
+ if (vm_size > exynos_gem_obj->size)
return -EINVAL;
- ret = dma_mmap_attrs(drm_dev->dev, vma, buffer->pages,
- buffer->dma_addr, buffer->size,
- &buffer->dma_attrs);
+ ret = dma_mmap_attrs(drm_dev->dev, vma, exynos_gem_obj->pages,
+ exynos_gem_obj->dma_addr, exynos_gem_obj->size,
+ &exynos_gem_obj->dma_attrs);
if (ret < 0) {
DRM_ERROR("failed to mmap.\n");
return ret;
@@ -503,15 +491,6 @@ void exynos_gem_unmap_sgt_from_dma(struct drm_device *drm_dev,
void exynos_drm_gem_free_object(struct drm_gem_object *obj)
{
- struct exynos_drm_gem_obj *exynos_gem_obj;
- struct exynos_drm_gem_buf *buf;
-
- exynos_gem_obj = to_exynos_gem_obj(obj);
- buf = exynos_gem_obj->buffer;
-
- if (obj->import_attach)
- drm_prime_gem_destroy(obj, buf->sgt);
-
exynos_drm_gem_destroy(to_exynos_gem_obj(obj));
}
@@ -595,24 +574,34 @@ unlock:
int exynos_drm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
{
struct drm_gem_object *obj = vma->vm_private_data;
- struct drm_device *dev = obj->dev;
- unsigned long f_vaddr;
+ struct exynos_drm_gem_obj *exynos_gem_obj = to_exynos_gem_obj(obj);
+ unsigned long pfn;
pgoff_t page_offset;
int ret;
page_offset = ((unsigned long)vmf->virtual_address -
vma->vm_start) >> PAGE_SHIFT;
- f_vaddr = (unsigned long)vmf->virtual_address;
-
- mutex_lock(&dev->struct_mutex);
- ret = exynos_drm_gem_map_buf(obj, vma, f_vaddr, page_offset);
- if (ret < 0)
- DRM_ERROR("failed to map a buffer with user.\n");
+ if (page_offset >= (exynos_gem_obj->size >> PAGE_SHIFT)) {
+ DRM_ERROR("invalid page offset\n");
+ ret = -EINVAL;
+ goto out;
+ }
- mutex_unlock(&dev->struct_mutex);
+ pfn = page_to_pfn(exynos_gem_obj->pages[page_offset]);
+ ret = vm_insert_mixed(vma, (unsigned long)vmf->virtual_address, pfn);
- return convert_to_vm_err_msg(ret);
+out:
+ switch (ret) {
+ case 0:
+ case -ERESTARTSYS:
+ case -EINTR:
+ return VM_FAULT_NOPAGE;
+ case -ENOMEM:
+ return VM_FAULT_OOM;
+ default:
+ return VM_FAULT_SIGBUS;
+ }
}
int exynos_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
@@ -631,11 +620,17 @@ int exynos_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
obj = vma->vm_private_data;
exynos_gem_obj = to_exynos_gem_obj(obj);
- ret = check_gem_flags(exynos_gem_obj->flags);
- if (ret)
- goto err_close_vm;
+ DRM_DEBUG_KMS("flags = 0x%x\n", exynos_gem_obj->flags);
- update_vm_cache_attr(exynos_gem_obj, vma);
+ /* non-cachable as default. */
+ if (exynos_gem_obj->flags & EXYNOS_BO_CACHABLE)
+ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
+ else if (exynos_gem_obj->flags & EXYNOS_BO_WC)
+ vma->vm_page_prot =
+ pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
+ else
+ vma->vm_page_prot =
+ pgprot_noncached(vm_get_page_prot(vma->vm_flags));
ret = exynos_drm_gem_mmap_buffer(exynos_gem_obj, vma);
if (ret)
@@ -649,3 +644,76 @@ err_close_vm:
return ret;
}
+
+/* low-level interface prime helpers */
+struct sg_table *exynos_drm_gem_prime_get_sg_table(struct drm_gem_object *obj)
+{
+ struct exynos_drm_gem_obj *exynos_gem_obj = to_exynos_gem_obj(obj);
+ int npages;
+
+ npages = exynos_gem_obj->size >> PAGE_SHIFT;
+
+ return drm_prime_pages_to_sg(exynos_gem_obj->pages, npages);
+}
+
+struct drm_gem_object *
+exynos_drm_gem_prime_import_sg_table(struct drm_device *dev,
+ struct dma_buf_attachment *attach,
+ struct sg_table *sgt)
+{
+ struct exynos_drm_gem_obj *exynos_gem_obj;
+ int npages;
+ int ret;
+
+ exynos_gem_obj = exynos_drm_gem_init(dev, attach->dmabuf->size);
+ if (IS_ERR(exynos_gem_obj)) {
+ ret = PTR_ERR(exynos_gem_obj);
+ goto err;
+ }
+
+ exynos_gem_obj->dma_addr = sg_dma_address(sgt->sgl);
+
+ npages = exynos_gem_obj->size >> PAGE_SHIFT;
+ exynos_gem_obj->pages = drm_malloc_ab(npages, sizeof(struct page *));
+ if (!exynos_gem_obj->pages) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ ret = drm_prime_sg_to_page_addr_arrays(sgt, exynos_gem_obj->pages, NULL,
+ npages);
+ if (ret < 0)
+ goto err_free_large;
+
+ if (sgt->nents == 1) {
+ /* always physically continuous memory if sgt->nents is 1. */
+ exynos_gem_obj->flags |= EXYNOS_BO_CONTIG;
+ } else {
+ /*
+ * this case could be CONTIG or NONCONTIG type but for now
+ * sets NONCONTIG.
+ * TODO. we have to find a way that exporter can notify
+ * the type of its own buffer to importer.
+ */
+ exynos_gem_obj->flags |= EXYNOS_BO_NONCONTIG;
+ }
+
+ return &exynos_gem_obj->base;
+
+err_free_large:
+ drm_free_large(exynos_gem_obj->pages);
+err:
+ drm_gem_object_release(&exynos_gem_obj->base);
+ kfree(exynos_gem_obj);
+ return ERR_PTR(ret);
+}
+
+void *exynos_drm_gem_prime_vmap(struct drm_gem_object *obj)
+{
+ return NULL;
+}
+
+void exynos_drm_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr)
+{
+ /* Nothing to do */
+}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.h b/drivers/gpu/drm/exynos/exynos_drm_gem.h
index 6f42e2248288..cd62f8410d1e 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gem.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_gem.h
@@ -20,35 +20,6 @@
#define IS_NONCONTIG_BUFFER(f) (f & EXYNOS_BO_NONCONTIG)
/*
- * exynos drm gem buffer structure.
- *
- * @cookie: cookie returned by dma_alloc_attrs
- * @kvaddr: kernel virtual address to allocated memory region.
- * *userptr: user space address.
- * @dma_addr: bus address(accessed by dma) to allocated memory region.
- * - this address could be physical address without IOMMU and
- * device address with IOMMU.
- * @write: whether pages will be written to by the caller.
- * @pages: Array of backing pages.
- * @sgt: sg table to transfer page data.
- * @size: size of allocated memory region.
- * @pfnmap: indicate whether memory region from userptr is mmaped with
- * VM_PFNMAP or not.
- */
-struct exynos_drm_gem_buf {
- void *cookie;
- void __iomem *kvaddr;
- unsigned long userptr;
- dma_addr_t dma_addr;
- struct dma_attrs dma_attrs;
- unsigned int write;
- struct page **pages;
- struct sg_table *sgt;
- unsigned long size;
- bool pfnmap;
-};
-
-/*
* exynos drm buffer structure.
*
* @base: a gem object.
@@ -59,18 +30,28 @@ struct exynos_drm_gem_buf {
* by user request or at framebuffer creation.
* continuous memory region allocated by user request
* or at framebuffer creation.
+ * @flags: indicate memory type to allocated buffer and cache attruibute.
* @size: size requested from user, in bytes and this size is aligned
* in page unit.
- * @flags: indicate memory type to allocated buffer and cache attruibute.
+ * @cookie: cookie returned by dma_alloc_attrs
+ * @kvaddr: kernel virtual address to allocated memory region.
+ * @dma_addr: bus address(accessed by dma) to allocated memory region.
+ * - this address could be physical address without IOMMU and
+ * device address with IOMMU.
+ * @pages: Array of backing pages.
*
* P.S. this object would be transferred to user as kms_bo.handle so
* user can access the buffer through kms_bo.handle.
*/
struct exynos_drm_gem_obj {
- struct drm_gem_object base;
- struct exynos_drm_gem_buf *buffer;
- unsigned long size;
- unsigned int flags;
+ struct drm_gem_object base;
+ unsigned int flags;
+ unsigned long size;
+ void *cookie;
+ void __iomem *kvaddr;
+ dma_addr_t dma_addr;
+ struct dma_attrs dma_attrs;
+ struct page **pages;
};
struct page **exynos_gem_get_pages(struct drm_gem_object *obj, gfp_t gfpmask);
@@ -177,4 +158,13 @@ void exynos_gem_unmap_sgt_from_dma(struct drm_device *drm_dev,
struct sg_table *sgt,
enum dma_data_direction dir);
+/* low-level interface prime helpers */
+struct sg_table *exynos_drm_gem_prime_get_sg_table(struct drm_gem_object *obj);
+struct drm_gem_object *
+exynos_drm_gem_prime_import_sg_table(struct drm_device *dev,
+ struct dma_buf_attachment *attach,
+ struct sg_table *sgt);
+void *exynos_drm_gem_prime_vmap(struct drm_gem_object *obj);
+void exynos_drm_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr);
+
#endif
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gsc.c b/drivers/gpu/drm/exynos/exynos_drm_gsc.c
index 8040ed2a831f..808a0a013780 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gsc.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_gsc.c
@@ -582,9 +582,17 @@ static int gsc_src_set_transf(struct device *dev,
break;
case EXYNOS_DRM_DEGREE_180:
cfg |= GSC_IN_ROT_180;
+ if (flip & EXYNOS_DRM_FLIP_VERTICAL)
+ cfg &= ~GSC_IN_ROT_XFLIP;
+ if (flip & EXYNOS_DRM_FLIP_HORIZONTAL)
+ cfg &= ~GSC_IN_ROT_YFLIP;
break;
case EXYNOS_DRM_DEGREE_270:
cfg |= GSC_IN_ROT_270;
+ if (flip & EXYNOS_DRM_FLIP_VERTICAL)
+ cfg &= ~GSC_IN_ROT_XFLIP;
+ if (flip & EXYNOS_DRM_FLIP_HORIZONTAL)
+ cfg &= ~GSC_IN_ROT_YFLIP;
break;
default:
dev_err(ippdrv->dev, "inavlid degree value %d.\n", degree);
@@ -593,8 +601,7 @@ static int gsc_src_set_transf(struct device *dev,
gsc_write(cfg, GSC_IN_CON);
- ctx->rotation = cfg &
- (GSC_IN_ROT_90 | GSC_IN_ROT_270) ? 1 : 0;
+ ctx->rotation = (cfg & GSC_IN_ROT_90) ? 1 : 0;
*swap = ctx->rotation;
return 0;
@@ -846,9 +853,17 @@ static int gsc_dst_set_transf(struct device *dev,
break;
case EXYNOS_DRM_DEGREE_180:
cfg |= GSC_IN_ROT_180;
+ if (flip & EXYNOS_DRM_FLIP_VERTICAL)
+ cfg &= ~GSC_IN_ROT_XFLIP;
+ if (flip & EXYNOS_DRM_FLIP_HORIZONTAL)
+ cfg &= ~GSC_IN_ROT_YFLIP;
break;
case EXYNOS_DRM_DEGREE_270:
cfg |= GSC_IN_ROT_270;
+ if (flip & EXYNOS_DRM_FLIP_VERTICAL)
+ cfg &= ~GSC_IN_ROT_XFLIP;
+ if (flip & EXYNOS_DRM_FLIP_HORIZONTAL)
+ cfg &= ~GSC_IN_ROT_YFLIP;
break;
default:
dev_err(ippdrv->dev, "inavlid degree value %d.\n", degree);
@@ -857,8 +872,7 @@ static int gsc_dst_set_transf(struct device *dev,
gsc_write(cfg, GSC_IN_CON);
- ctx->rotation = cfg &
- (GSC_IN_ROT_90 | GSC_IN_ROT_270) ? 1 : 0;
+ ctx->rotation = (cfg & GSC_IN_ROT_90) ? 1 : 0;
*swap = ctx->rotation;
return 0;
diff --git a/drivers/gpu/drm/exynos/exynos_drm_iommu.c b/drivers/gpu/drm/exynos/exynos_drm_iommu.c
index d4ec7465e9cc..055e8ec2ef21 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_iommu.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_iommu.c
@@ -87,10 +87,8 @@ int drm_iommu_attach_device(struct drm_device *drm_dev,
struct device *dev = drm_dev->dev;
int ret;
- if (!dev->archdata.mapping) {
- DRM_ERROR("iommu_mapping is null.\n");
- return -EFAULT;
- }
+ if (!dev->archdata.mapping)
+ return 0;
subdrv_dev->dma_parms = devm_kzalloc(subdrv_dev,
sizeof(*subdrv_dev->dma_parms),
@@ -144,17 +142,3 @@ void drm_iommu_detach_device(struct drm_device *drm_dev,
iommu_detach_device(mapping->domain, subdrv_dev);
drm_release_iommu_mapping(drm_dev);
}
-
-int drm_iommu_attach_device_if_possible(struct exynos_drm_crtc *exynos_crtc,
- struct drm_device *drm_dev, struct device *subdrv_dev)
-{
- int ret = 0;
-
- if (is_drm_iommu_supported(drm_dev)) {
- if (exynos_crtc->ops->clear_channels)
- exynos_crtc->ops->clear_channels(exynos_crtc);
- return drm_iommu_attach_device(drm_dev, subdrv_dev);
- }
-
- return ret;
-}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_iommu.h b/drivers/gpu/drm/exynos/exynos_drm_iommu.h
index 8341c7a475b4..dc1b5441f491 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_iommu.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_iommu.h
@@ -29,19 +29,11 @@ void drm_iommu_detach_device(struct drm_device *dev_dev,
static inline bool is_drm_iommu_supported(struct drm_device *drm_dev)
{
-#ifdef CONFIG_ARM_DMA_USE_IOMMU
struct device *dev = drm_dev->dev;
return dev->archdata.mapping ? true : false;
-#else
- return false;
-#endif
}
-int drm_iommu_attach_device_if_possible(
- struct exynos_drm_crtc *exynos_crtc, struct drm_device *drm_dev,
- struct device *subdrv_dev);
-
#else
static inline int drm_create_iommu_mapping(struct drm_device *drm_dev)
@@ -69,12 +61,5 @@ static inline bool is_drm_iommu_supported(struct drm_device *drm_dev)
return false;
}
-static inline int drm_iommu_attach_device_if_possible(
- struct exynos_drm_crtc *exynos_crtc, struct drm_device *drm_dev,
- struct device *subdrv_dev)
-{
- return 0;
-}
-
#endif
#endif
diff --git a/drivers/gpu/drm/exynos/exynos_drm_ipp.c b/drivers/gpu/drm/exynos/exynos_drm_ipp.c
index 67e5451e066f..67d24236e745 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_ipp.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_ipp.c
@@ -1622,12 +1622,10 @@ static int ipp_subdrv_probe(struct drm_device *drm_dev, struct device *dev)
INIT_LIST_HEAD(&ippdrv->cmd_list);
mutex_init(&ippdrv->cmd_lock);
- if (is_drm_iommu_supported(drm_dev)) {
- ret = drm_iommu_attach_device(drm_dev, ippdrv->dev);
- if (ret) {
- DRM_ERROR("failed to activate iommu\n");
- goto err;
- }
+ ret = drm_iommu_attach_device(drm_dev, ippdrv->dev);
+ if (ret) {
+ DRM_ERROR("failed to activate iommu\n");
+ goto err;
}
}
@@ -1637,8 +1635,7 @@ err:
/* get ipp driver entry */
list_for_each_entry_continue_reverse(ippdrv, &exynos_drm_ippdrv_list,
drv_list) {
- if (is_drm_iommu_supported(drm_dev))
- drm_iommu_detach_device(drm_dev, ippdrv->dev);
+ drm_iommu_detach_device(drm_dev, ippdrv->dev);
ipp_remove_id(&ctx->ipp_idr, &ctx->ipp_lock,
ippdrv->prop_list.ipp_id);
@@ -1654,8 +1651,7 @@ static void ipp_subdrv_remove(struct drm_device *drm_dev, struct device *dev)
/* get ipp driver entry */
list_for_each_entry_safe(ippdrv, t, &exynos_drm_ippdrv_list, drv_list) {
- if (is_drm_iommu_supported(drm_dev))
- drm_iommu_detach_device(drm_dev, ippdrv->dev);
+ drm_iommu_detach_device(drm_dev, ippdrv->dev);
ipp_remove_id(&ctx->ipp_idr, &ctx->ipp_lock,
ippdrv->prop_list.ipp_id);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_plane.c b/drivers/gpu/drm/exynos/exynos_drm_plane.c
index a729980d3c2f..d9a68fd83120 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_plane.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_plane.c
@@ -97,29 +97,18 @@ static void exynos_plane_mode_set(struct drm_plane *plane,
/* set drm framebuffer data. */
exynos_plane->src_x = src_x;
exynos_plane->src_y = src_y;
- exynos_plane->src_width = (actual_w * exynos_plane->h_ratio) >> 16;
- exynos_plane->src_height = (actual_h * exynos_plane->v_ratio) >> 16;
- exynos_plane->fb_width = fb->width;
- exynos_plane->fb_height = fb->height;
- exynos_plane->bpp = fb->bits_per_pixel;
- exynos_plane->pitch = fb->pitches[0];
- exynos_plane->pixel_format = fb->pixel_format;
+ exynos_plane->src_w = (actual_w * exynos_plane->h_ratio) >> 16;
+ exynos_plane->src_h = (actual_h * exynos_plane->v_ratio) >> 16;
/* set plane range to be displayed. */
exynos_plane->crtc_x = crtc_x;
exynos_plane->crtc_y = crtc_y;
- exynos_plane->crtc_width = actual_w;
- exynos_plane->crtc_height = actual_h;
-
- /* set drm mode data. */
- exynos_plane->mode_width = mode->hdisplay;
- exynos_plane->mode_height = mode->vdisplay;
- exynos_plane->refresh = mode->vrefresh;
- exynos_plane->scan_flag = mode->flags;
+ exynos_plane->crtc_w = actual_w;
+ exynos_plane->crtc_h = actual_h;
DRM_DEBUG_KMS("plane : offset_x/y(%d,%d), width/height(%d,%d)",
exynos_plane->crtc_x, exynos_plane->crtc_y,
- exynos_plane->crtc_width, exynos_plane->crtc_height);
+ exynos_plane->crtc_w, exynos_plane->crtc_h);
plane->crtc = crtc;
}
@@ -145,15 +134,15 @@ static int exynos_plane_atomic_check(struct drm_plane *plane,
nr = exynos_drm_fb_get_buf_cnt(state->fb);
for (i = 0; i < nr; i++) {
- struct exynos_drm_gem_buf *buffer =
- exynos_drm_fb_buffer(state->fb, i);
+ struct exynos_drm_gem_obj *obj =
+ exynos_drm_fb_gem_obj(state->fb, i);
- if (!buffer) {
- DRM_DEBUG_KMS("buffer is null\n");
+ if (!obj) {
+ DRM_DEBUG_KMS("gem object is null\n");
return -EFAULT;
}
- exynos_plane->dma_addr[i] = buffer->dma_addr +
+ exynos_plane->dma_addr[i] = obj->dma_addr +
state->fb->offsets[i];
DRM_DEBUG_KMS("buffer: %d, dma_addr = 0x%lx\n",
@@ -179,8 +168,8 @@ static void exynos_plane_atomic_update(struct drm_plane *plane,
state->src_x >> 16, state->src_y >> 16,
state->src_w >> 16, state->src_h >> 16);
- if (exynos_crtc->ops->win_commit)
- exynos_crtc->ops->win_commit(exynos_crtc, exynos_plane->zpos);
+ if (exynos_crtc->ops->update_plane)
+ exynos_crtc->ops->update_plane(exynos_crtc, exynos_plane);
}
static void exynos_plane_atomic_disable(struct drm_plane *plane,
@@ -192,9 +181,9 @@ static void exynos_plane_atomic_disable(struct drm_plane *plane,
if (!old_state->crtc)
return;
- if (exynos_crtc->ops->win_disable)
- exynos_crtc->ops->win_disable(exynos_crtc,
- exynos_plane->zpos);
+ if (exynos_crtc->ops->disable_plane)
+ exynos_crtc->ops->disable_plane(exynos_crtc,
+ exynos_plane);
}
static const struct drm_plane_helper_funcs plane_helper_funcs = {
diff --git a/drivers/gpu/drm/exynos/exynos_drm_vidi.c b/drivers/gpu/drm/exynos/exynos_drm_vidi.c
index 3413393d8a16..581af35861a6 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_vidi.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_vidi.c
@@ -25,7 +25,6 @@
#include "exynos_drm_drv.h"
#include "exynos_drm_crtc.h"
#include "exynos_drm_plane.h"
-#include "exynos_drm_encoder.h"
#include "exynos_drm_vidi.h"
/* vidi has totally three virtual windows. */
@@ -35,11 +34,10 @@
connector)
struct vidi_context {
- struct exynos_drm_display display;
+ struct drm_encoder encoder;
struct platform_device *pdev;
struct drm_device *drm_dev;
struct exynos_drm_crtc *crtc;
- struct drm_encoder *encoder;
struct drm_connector connector;
struct exynos_drm_plane planes[WINDOWS_NR];
struct edid *raw_edid;
@@ -55,9 +53,9 @@ struct vidi_context {
int pipe;
};
-static inline struct vidi_context *display_to_vidi(struct exynos_drm_display *d)
+static inline struct vidi_context *encoder_to_vidi(struct drm_encoder *e)
{
- return container_of(d, struct vidi_context, display);
+ return container_of(e, struct vidi_context, encoder);
}
static const char fake_edid_info[] = {
@@ -100,7 +98,7 @@ static int vidi_enable_vblank(struct exynos_drm_crtc *crtc)
/*
* in case of page flip request, vidi_finish_pageflip function
* will not be called because direct_vblank is true and then
- * that function will be called by crtc_ops->win_commit callback
+ * that function will be called by crtc_ops->update_plane callback
*/
schedule_work(&ctx->work);
@@ -118,19 +116,14 @@ static void vidi_disable_vblank(struct exynos_drm_crtc *crtc)
ctx->vblank_on = false;
}
-static void vidi_win_commit(struct exynos_drm_crtc *crtc, unsigned int win)
+static void vidi_update_plane(struct exynos_drm_crtc *crtc,
+ struct exynos_drm_plane *plane)
{
struct vidi_context *ctx = crtc->ctx;
- struct exynos_drm_plane *plane;
if (ctx->suspended)
return;
- if (win < 0 || win >= WINDOWS_NR)
- return;
-
- plane = &ctx->planes[win];
-
DRM_DEBUG_KMS("dma_addr = %pad\n", plane->dma_addr);
if (ctx->vblank_on)
@@ -179,7 +172,7 @@ static const struct exynos_drm_crtc_ops vidi_crtc_ops = {
.disable = vidi_disable,
.enable_vblank = vidi_enable_vblank,
.disable_vblank = vidi_disable_vblank,
- .win_commit = vidi_win_commit,
+ .update_plane = vidi_update_plane,
};
static void vidi_fake_vblank_handler(struct work_struct *work)
@@ -196,7 +189,7 @@ static void vidi_fake_vblank_handler(struct work_struct *work)
mutex_lock(&ctx->lock);
if (ctx->direct_vblank) {
- drm_handle_vblank(ctx->drm_dev, ctx->pipe);
+ drm_crtc_handle_vblank(&ctx->crtc->base);
ctx->direct_vblank = false;
mutex_unlock(&ctx->lock);
return;
@@ -204,7 +197,7 @@ static void vidi_fake_vblank_handler(struct work_struct *work)
mutex_unlock(&ctx->lock);
- exynos_drm_crtc_finish_pageflip(ctx->drm_dev, ctx->pipe);
+ exynos_drm_crtc_finish_pageflip(ctx->crtc);
}
static int vidi_show_connection(struct device *dev,
@@ -259,9 +252,7 @@ static DEVICE_ATTR(connection, 0644, vidi_show_connection,
int vidi_connection_ioctl(struct drm_device *drm_dev, void *data,
struct drm_file *file_priv)
{
- struct vidi_context *ctx = NULL;
- struct drm_encoder *encoder;
- struct exynos_drm_display *display;
+ struct vidi_context *ctx = dev_get_drvdata(drm_dev->dev);
struct drm_exynos_vidi_connection *vidi = data;
if (!vidi) {
@@ -274,21 +265,6 @@ int vidi_connection_ioctl(struct drm_device *drm_dev, void *data,
return -EINVAL;
}
- list_for_each_entry(encoder, &drm_dev->mode_config.encoder_list,
- head) {
- display = exynos_drm_get_display(encoder);
-
- if (display->type == EXYNOS_DISPLAY_TYPE_VIDI) {
- ctx = display_to_vidi(display);
- break;
- }
- }
-
- if (!ctx) {
- DRM_DEBUG_KMS("not found virtual device type encoder.\n");
- return -EINVAL;
- }
-
if (ctx->connected == vidi->connection) {
DRM_DEBUG_KMS("same connection request.\n");
return -EINVAL;
@@ -381,7 +357,7 @@ static struct drm_encoder *vidi_best_encoder(struct drm_connector *connector)
{
struct vidi_context *ctx = ctx_from_connector(connector);
- return ctx->encoder;
+ return &ctx->encoder;
}
static struct drm_connector_helper_funcs vidi_connector_helper_funcs = {
@@ -389,14 +365,12 @@ static struct drm_connector_helper_funcs vidi_connector_helper_funcs = {
.best_encoder = vidi_best_encoder,
};
-static int vidi_create_connector(struct exynos_drm_display *display,
- struct drm_encoder *encoder)
+static int vidi_create_connector(struct drm_encoder *encoder)
{
- struct vidi_context *ctx = display_to_vidi(display);
+ struct vidi_context *ctx = encoder_to_vidi(encoder);
struct drm_connector *connector = &ctx->connector;
int ret;
- ctx->encoder = encoder;
connector->polled = DRM_CONNECTOR_POLL_HPD;
ret = drm_connector_init(ctx->drm_dev, connector,
@@ -413,19 +387,47 @@ static int vidi_create_connector(struct exynos_drm_display *display,
return 0;
}
+static bool exynos_vidi_mode_fixup(struct drm_encoder *encoder,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ return true;
+}
+
+static void exynos_vidi_mode_set(struct drm_encoder *encoder,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+}
-static struct exynos_drm_display_ops vidi_display_ops = {
- .create_connector = vidi_create_connector,
+static void exynos_vidi_enable(struct drm_encoder *encoder)
+{
+}
+
+static void exynos_vidi_disable(struct drm_encoder *encoder)
+{
+}
+
+static struct drm_encoder_helper_funcs exynos_vidi_encoder_helper_funcs = {
+ .mode_fixup = exynos_vidi_mode_fixup,
+ .mode_set = exynos_vidi_mode_set,
+ .enable = exynos_vidi_enable,
+ .disable = exynos_vidi_disable,
+};
+
+static struct drm_encoder_funcs exynos_vidi_encoder_funcs = {
+ .destroy = drm_encoder_cleanup,
};
static int vidi_bind(struct device *dev, struct device *master, void *data)
{
struct vidi_context *ctx = dev_get_drvdata(dev);
struct drm_device *drm_dev = data;
+ struct drm_encoder *encoder = &ctx->encoder;
struct exynos_drm_plane *exynos_plane;
enum drm_plane_type type;
unsigned int zpos;
- int ret;
+ int pipe, ret;
vidi_ctx_initialize(ctx, drm_dev);
@@ -447,9 +449,24 @@ static int vidi_bind(struct device *dev, struct device *master, void *data)
return PTR_ERR(ctx->crtc);
}
- ret = exynos_drm_create_enc_conn(drm_dev, &ctx->display);
+ pipe = exynos_drm_crtc_get_pipe_from_type(drm_dev,
+ EXYNOS_DISPLAY_TYPE_VIDI);
+ if (pipe < 0)
+ return pipe;
+
+ encoder->possible_crtcs = 1 << pipe;
+
+ DRM_DEBUG_KMS("possible_crtcs = 0x%x\n", encoder->possible_crtcs);
+
+ drm_encoder_init(drm_dev, encoder, &exynos_vidi_encoder_funcs,
+ DRM_MODE_ENCODER_TMDS);
+
+ drm_encoder_helper_add(encoder, &exynos_vidi_encoder_helper_funcs);
+
+ ret = vidi_create_connector(encoder);
if (ret) {
- ctx->crtc->base.funcs->destroy(&ctx->crtc->base);
+ DRM_ERROR("failed to create connector ret = %d\n", ret);
+ drm_encoder_cleanup(encoder);
return ret;
}
@@ -475,8 +492,6 @@ static int vidi_probe(struct platform_device *pdev)
if (!ctx)
return -ENOMEM;
- ctx->display.type = EXYNOS_DISPLAY_TYPE_VIDI;
- ctx->display.ops = &vidi_display_ops;
ctx->default_win = 0;
ctx->pdev = pdev;
diff --git a/drivers/gpu/drm/exynos/exynos_hdmi.c b/drivers/gpu/drm/exynos/exynos_hdmi.c
index 99e286489031..932f7fa240f8 100644
--- a/drivers/gpu/drm/exynos/exynos_hdmi.c
+++ b/drivers/gpu/drm/exynos/exynos_hdmi.c
@@ -22,7 +22,6 @@
#include "regs-hdmi.h"
#include <linux/kernel.h>
-#include <linux/spinlock.h>
#include <linux/wait.h>
#include <linux/i2c.h>
#include <linux/platform_device.h>
@@ -33,8 +32,8 @@
#include <linux/clk.h>
#include <linux/regulator/consumer.h>
#include <linux/io.h>
-#include <linux/of.h>
#include <linux/of_address.h>
+#include <linux/of_device.h>
#include <linux/of_gpio.h>
#include <linux/hdmi.h>
#include <linux/component.h>
@@ -48,7 +47,6 @@
#include "exynos_mixer.h"
#include <linux/gpio.h>
-#include <media/s5p_hdmi.h>
#define ctx_from_connector(c) container_of(c, struct hdmi_context, connector)
@@ -88,109 +86,14 @@ struct hdmi_resources {
int regul_count;
};
-struct hdmi_tg_regs {
- u8 cmd[1];
- u8 h_fsz[2];
- u8 hact_st[2];
- u8 hact_sz[2];
- u8 v_fsz[2];
- u8 vsync[2];
- u8 vsync2[2];
- u8 vact_st[2];
- u8 vact_sz[2];
- u8 field_chg[2];
- u8 vact_st2[2];
- u8 vact_st3[2];
- u8 vact_st4[2];
- u8 vsync_top_hdmi[2];
- u8 vsync_bot_hdmi[2];
- u8 field_top_hdmi[2];
- u8 field_bot_hdmi[2];
- u8 tg_3d[1];
-};
-
-struct hdmi_v13_core_regs {
- u8 h_blank[2];
- u8 v_blank[3];
- u8 h_v_line[3];
- u8 vsync_pol[1];
- u8 int_pro_mode[1];
- u8 v_blank_f[3];
- u8 h_sync_gen[3];
- u8 v_sync_gen1[3];
- u8 v_sync_gen2[3];
- u8 v_sync_gen3[3];
-};
-
-struct hdmi_v14_core_regs {
- u8 h_blank[2];
- u8 v2_blank[2];
- u8 v1_blank[2];
- u8 v_line[2];
- u8 h_line[2];
- u8 hsync_pol[1];
- u8 vsync_pol[1];
- u8 int_pro_mode[1];
- u8 v_blank_f0[2];
- u8 v_blank_f1[2];
- u8 h_sync_start[2];
- u8 h_sync_end[2];
- u8 v_sync_line_bef_2[2];
- u8 v_sync_line_bef_1[2];
- u8 v_sync_line_aft_2[2];
- u8 v_sync_line_aft_1[2];
- u8 v_sync_line_aft_pxl_2[2];
- u8 v_sync_line_aft_pxl_1[2];
- u8 v_blank_f2[2]; /* for 3D mode */
- u8 v_blank_f3[2]; /* for 3D mode */
- u8 v_blank_f4[2]; /* for 3D mode */
- u8 v_blank_f5[2]; /* for 3D mode */
- u8 v_sync_line_aft_3[2];
- u8 v_sync_line_aft_4[2];
- u8 v_sync_line_aft_5[2];
- u8 v_sync_line_aft_6[2];
- u8 v_sync_line_aft_pxl_3[2];
- u8 v_sync_line_aft_pxl_4[2];
- u8 v_sync_line_aft_pxl_5[2];
- u8 v_sync_line_aft_pxl_6[2];
- u8 vact_space_1[2];
- u8 vact_space_2[2];
- u8 vact_space_3[2];
- u8 vact_space_4[2];
- u8 vact_space_5[2];
- u8 vact_space_6[2];
-};
-
-struct hdmi_v13_conf {
- struct hdmi_v13_core_regs core;
- struct hdmi_tg_regs tg;
-};
-
-struct hdmi_v14_conf {
- struct hdmi_v14_core_regs core;
- struct hdmi_tg_regs tg;
-};
-
-struct hdmi_conf_regs {
- int pixel_clock;
- int cea_video_id;
- enum hdmi_picture_aspect aspect_ratio;
- union {
- struct hdmi_v13_conf v13_conf;
- struct hdmi_v14_conf v14_conf;
- } conf;
-};
-
struct hdmi_context {
- struct exynos_drm_display display;
+ struct drm_encoder encoder;
struct device *dev;
struct drm_device *drm_dev;
struct drm_connector connector;
- struct drm_encoder *encoder;
bool hpd;
bool powered;
bool dvi_mode;
- struct mutex hdmi_mutex;
void __iomem *regs;
int irq;
@@ -201,22 +104,20 @@ struct hdmi_context {
/* current hdmiphy conf regs */
struct drm_display_mode current_mode;
- struct hdmi_conf_regs mode_conf;
+ u8 cea_video_id;
struct hdmi_resources res;
+ const struct hdmi_driver_data *drv_data;
int hpd_gpio;
void __iomem *regs_hdmiphy;
- const struct hdmiphy_config *phy_confs;
- unsigned int phy_conf_count;
struct regmap *pmureg;
- enum hdmi_type type;
};
-static inline struct hdmi_context *display_to_hdmi(struct exynos_drm_display *d)
+static inline struct hdmi_context *encoder_to_hdmi(struct drm_encoder *e)
{
- return container_of(d, struct hdmi_context, display);
+ return container_of(e, struct hdmi_context, encoder);
}
struct hdmiphy_config {
@@ -624,6 +525,16 @@ static inline void hdmi_reg_writeb(struct hdmi_context *hdata,
writeb(value, hdata->regs + reg_id);
}
+static inline void hdmi_reg_writev(struct hdmi_context *hdata, u32 reg_id,
+ int bytes, u32 val)
+{
+ while (--bytes >= 0) {
+ writeb(val & 0xff, hdata->regs + reg_id);
+ val >>= 8;
+ reg_id += 4;
+ }
+}
+
static inline void hdmi_reg_writemask(struct hdmi_context *hdata,
u32 reg_id, u32 value, u32 mask)
{
@@ -930,7 +841,7 @@ static void hdmi_v14_regs_dump(struct hdmi_context *hdata, char *prefix)
static void hdmi_regs_dump(struct hdmi_context *hdata, char *prefix)
{
- if (hdata->type == HDMI_TYPE13)
+ if (hdata->drv_data->type == HDMI_TYPE13)
hdmi_v13_regs_dump(hdata, prefix);
else
hdmi_v14_regs_dump(hdata, prefix);
@@ -957,7 +868,7 @@ static void hdmi_reg_infoframe(struct hdmi_context *hdata,
u32 hdr_sum;
u8 chksum;
u32 mod;
- u32 vic;
+ u8 ar;
mod = hdmi_reg_read(hdata, HDMI_MODE_SEL);
if (hdata->dvi_mode) {
@@ -988,27 +899,22 @@ static void hdmi_reg_infoframe(struct hdmi_context *hdata,
* Set the aspect ratio as per the mode, mentioned in
* Table 9 AVI InfoFrame Data Byte 2 of CEA-861-D Standard
*/
- switch (hdata->mode_conf.aspect_ratio) {
+ ar = hdata->current_mode.picture_aspect_ratio;
+ switch (ar) {
case HDMI_PICTURE_ASPECT_4_3:
- hdmi_reg_writeb(hdata, HDMI_AVI_BYTE(2),
- hdata->mode_conf.aspect_ratio |
- AVI_4_3_CENTER_RATIO);
+ ar |= AVI_4_3_CENTER_RATIO;
break;
case HDMI_PICTURE_ASPECT_16_9:
- hdmi_reg_writeb(hdata, HDMI_AVI_BYTE(2),
- hdata->mode_conf.aspect_ratio |
- AVI_16_9_CENTER_RATIO);
+ ar |= AVI_16_9_CENTER_RATIO;
break;
case HDMI_PICTURE_ASPECT_NONE:
default:
- hdmi_reg_writeb(hdata, HDMI_AVI_BYTE(2),
- hdata->mode_conf.aspect_ratio |
- AVI_SAME_AS_PIC_ASPECT_RATIO);
+ ar |= AVI_SAME_AS_PIC_ASPECT_RATIO;
break;
}
+ hdmi_reg_writeb(hdata, HDMI_AVI_BYTE(2), ar);
- vic = hdata->mode_conf.cea_video_id;
- hdmi_reg_writeb(hdata, HDMI_AVI_BYTE(4), vic);
+ hdmi_reg_writeb(hdata, HDMI_AVI_BYTE(4), hdata->cea_video_id);
chksum = hdmi_chksum(hdata, HDMI_AVI_BYTE(1),
infoframe->any.length, hdr_sum);
@@ -1038,10 +944,10 @@ static enum drm_connector_status hdmi_detect(struct drm_connector *connector,
{
struct hdmi_context *hdata = ctx_from_connector(connector);
- hdata->hpd = gpio_get_value(hdata->hpd_gpio);
+ if (gpio_get_value(hdata->hpd_gpio))
+ return connector_status_connected;
- return hdata->hpd ? connector_status_connected :
- connector_status_disconnected;
+ return connector_status_disconnected;
}
static void hdmi_connector_destroy(struct drm_connector *connector)
@@ -1064,6 +970,7 @@ static int hdmi_get_modes(struct drm_connector *connector)
{
struct hdmi_context *hdata = ctx_from_connector(connector);
struct edid *edid;
+ int ret;
if (!hdata->ddc_adpt)
return -ENODEV;
@@ -1079,15 +986,19 @@ static int hdmi_get_modes(struct drm_connector *connector)
drm_mode_connector_update_edid_property(connector, edid);
- return drm_add_edid_modes(connector, edid);
+ ret = drm_add_edid_modes(connector, edid);
+
+ kfree(edid);
+
+ return ret;
}
static int hdmi_find_phy_conf(struct hdmi_context *hdata, u32 pixel_clock)
{
int i;
- for (i = 0; i < hdata->phy_conf_count; i++)
- if (hdata->phy_confs[i].pixel_clock == pixel_clock)
+ for (i = 0; i < hdata->drv_data->phy_conf_count; i++)
+ if (hdata->drv_data->phy_confs[i].pixel_clock == pixel_clock)
return i;
DRM_DEBUG_KMS("Could not find phy config for %d\n", pixel_clock);
@@ -1120,7 +1031,7 @@ static struct drm_encoder *hdmi_best_encoder(struct drm_connector *connector)
{
struct hdmi_context *hdata = ctx_from_connector(connector);
- return hdata->encoder;
+ return &hdata->encoder;
}
static struct drm_connector_helper_funcs hdmi_connector_helper_funcs = {
@@ -1129,14 +1040,12 @@ static struct drm_connector_helper_funcs hdmi_connector_helper_funcs = {
.best_encoder = hdmi_best_encoder,
};
-static int hdmi_create_connector(struct exynos_drm_display *display,
- struct drm_encoder *encoder)
+static int hdmi_create_connector(struct drm_encoder *encoder)
{
- struct hdmi_context *hdata = display_to_hdmi(display);
+ struct hdmi_context *hdata = encoder_to_hdmi(encoder);
struct drm_connector *connector = &hdata->connector;
int ret;
- hdata->encoder = encoder;
connector->interlace_allowed = true;
connector->polled = DRM_CONNECTOR_POLL_HPD;
@@ -1154,23 +1063,30 @@ static int hdmi_create_connector(struct exynos_drm_display *display,
return 0;
}
-static void hdmi_mode_fixup(struct exynos_drm_display *display,
- struct drm_connector *connector,
- const struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
+static bool hdmi_mode_fixup(struct drm_encoder *encoder,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
{
+ struct drm_device *dev = encoder->dev;
+ struct drm_connector *connector;
struct drm_display_mode *m;
int mode_ok;
- DRM_DEBUG_KMS("%s\n", __FILE__);
-
drm_mode_set_crtcinfo(adjusted_mode, 0);
+ list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ if (connector->encoder == encoder)
+ break;
+ }
+
+ if (connector->encoder != encoder)
+ return true;
+
mode_ok = hdmi_mode_valid(connector, adjusted_mode);
/* just return if user desired mode exists. */
if (mode_ok == MODE_OK)
- return;
+ return true;
/*
* otherwise, find the most suitable mode among modes and change it
@@ -1190,6 +1106,8 @@ static void hdmi_mode_fixup(struct exynos_drm_display *display,
break;
}
}
+
+ return true;
}
static void hdmi_set_acr(u32 freq, u8 *acr)
@@ -1252,7 +1170,7 @@ static void hdmi_reg_acr(struct hdmi_context *hdata, u8 *acr)
hdmi_reg_writeb(hdata, HDMI_ACR_CTS1, acr[2]);
hdmi_reg_writeb(hdata, HDMI_ACR_CTS2, acr[1]);
- if (hdata->type == HDMI_TYPE13)
+ if (hdata->drv_data->type == HDMI_TYPE13)
hdmi_reg_writeb(hdata, HDMI_V13_ACR_CON, 4);
else
hdmi_reg_writeb(hdata, HDMI_ACR_CON, 4);
@@ -1386,7 +1304,7 @@ static void hdmi_conf_init(struct hdmi_context *hdata)
HDMI_VID_PREAMBLE_DIS | HDMI_GUARD_BAND_DIS);
}
- if (hdata->type == HDMI_TYPE13) {
+ if (hdata->drv_data->type == HDMI_TYPE13) {
/* choose bluescreen (fecal) color */
hdmi_reg_writeb(hdata, HDMI_V13_BLUE_SCREEN_0, 0x12);
hdmi_reg_writeb(hdata, HDMI_V13_BLUE_SCREEN_1, 0x34);
@@ -1419,66 +1337,94 @@ static void hdmi_conf_init(struct hdmi_context *hdata)
static void hdmi_v13_mode_apply(struct hdmi_context *hdata)
{
- const struct hdmi_tg_regs *tg = &hdata->mode_conf.conf.v13_conf.tg;
- const struct hdmi_v13_core_regs *core =
- &hdata->mode_conf.conf.v13_conf.core;
+ struct drm_display_mode *m = &hdata->current_mode;
+ unsigned int val;
int tries;
- /* setting core registers */
- hdmi_reg_writeb(hdata, HDMI_H_BLANK_0, core->h_blank[0]);
- hdmi_reg_writeb(hdata, HDMI_H_BLANK_1, core->h_blank[1]);
- hdmi_reg_writeb(hdata, HDMI_V13_V_BLANK_0, core->v_blank[0]);
- hdmi_reg_writeb(hdata, HDMI_V13_V_BLANK_1, core->v_blank[1]);
- hdmi_reg_writeb(hdata, HDMI_V13_V_BLANK_2, core->v_blank[2]);
- hdmi_reg_writeb(hdata, HDMI_V13_H_V_LINE_0, core->h_v_line[0]);
- hdmi_reg_writeb(hdata, HDMI_V13_H_V_LINE_1, core->h_v_line[1]);
- hdmi_reg_writeb(hdata, HDMI_V13_H_V_LINE_2, core->h_v_line[2]);
- hdmi_reg_writeb(hdata, HDMI_VSYNC_POL, core->vsync_pol[0]);
- hdmi_reg_writeb(hdata, HDMI_INT_PRO_MODE, core->int_pro_mode[0]);
- hdmi_reg_writeb(hdata, HDMI_V13_V_BLANK_F_0, core->v_blank_f[0]);
- hdmi_reg_writeb(hdata, HDMI_V13_V_BLANK_F_1, core->v_blank_f[1]);
- hdmi_reg_writeb(hdata, HDMI_V13_V_BLANK_F_2, core->v_blank_f[2]);
- hdmi_reg_writeb(hdata, HDMI_V13_H_SYNC_GEN_0, core->h_sync_gen[0]);
- hdmi_reg_writeb(hdata, HDMI_V13_H_SYNC_GEN_1, core->h_sync_gen[1]);
- hdmi_reg_writeb(hdata, HDMI_V13_H_SYNC_GEN_2, core->h_sync_gen[2]);
- hdmi_reg_writeb(hdata, HDMI_V13_V_SYNC_GEN_1_0, core->v_sync_gen1[0]);
- hdmi_reg_writeb(hdata, HDMI_V13_V_SYNC_GEN_1_1, core->v_sync_gen1[1]);
- hdmi_reg_writeb(hdata, HDMI_V13_V_SYNC_GEN_1_2, core->v_sync_gen1[2]);
- hdmi_reg_writeb(hdata, HDMI_V13_V_SYNC_GEN_2_0, core->v_sync_gen2[0]);
- hdmi_reg_writeb(hdata, HDMI_V13_V_SYNC_GEN_2_1, core->v_sync_gen2[1]);
- hdmi_reg_writeb(hdata, HDMI_V13_V_SYNC_GEN_2_2, core->v_sync_gen2[2]);
- hdmi_reg_writeb(hdata, HDMI_V13_V_SYNC_GEN_3_0, core->v_sync_gen3[0]);
- hdmi_reg_writeb(hdata, HDMI_V13_V_SYNC_GEN_3_1, core->v_sync_gen3[1]);
- hdmi_reg_writeb(hdata, HDMI_V13_V_SYNC_GEN_3_2, core->v_sync_gen3[2]);
+ hdmi_reg_writev(hdata, HDMI_H_BLANK_0, 2, m->htotal - m->hdisplay);
+ hdmi_reg_writev(hdata, HDMI_V13_H_V_LINE_0, 3,
+ (m->htotal << 12) | m->vtotal);
+
+ val = (m->flags & DRM_MODE_FLAG_NVSYNC) ? 1 : 0;
+ hdmi_reg_writev(hdata, HDMI_VSYNC_POL, 1, val);
+
+ val = (m->flags & DRM_MODE_FLAG_INTERLACE) ? 1 : 0;
+ hdmi_reg_writev(hdata, HDMI_INT_PRO_MODE, 1, val);
+
+ val = (m->hsync_start - m->hdisplay - 2);
+ val |= ((m->hsync_end - m->hdisplay - 2) << 10);
+ val |= ((m->flags & DRM_MODE_FLAG_NHSYNC) ? 1 : 0)<<20;
+ hdmi_reg_writev(hdata, HDMI_V13_H_SYNC_GEN_0, 3, val);
+
+ /*
+ * Quirk requirement for exynos HDMI IP design,
+ * 2 pixels less than the actual calculation for hsync_start
+ * and end.
+ */
+
+ /* Following values & calculations differ for different type of modes */
+ if (m->flags & DRM_MODE_FLAG_INTERLACE) {
+ /* Interlaced Mode */
+ val = ((m->vsync_end - m->vdisplay) / 2);
+ val |= ((m->vsync_start - m->vdisplay) / 2) << 12;
+ hdmi_reg_writev(hdata, HDMI_V13_V_SYNC_GEN_1_0, 3, val);
+
+ val = m->vtotal / 2;
+ val |= ((m->vtotal - m->vdisplay) / 2) << 11;
+ hdmi_reg_writev(hdata, HDMI_V13_V_BLANK_0, 3, val);
+
+ val = (m->vtotal +
+ ((m->vsync_end - m->vsync_start) * 4) + 5) / 2;
+ val |= m->vtotal << 11;
+ hdmi_reg_writev(hdata, HDMI_V13_V_BLANK_F_0, 3, val);
+
+ val = ((m->vtotal / 2) + 7);
+ val |= ((m->vtotal / 2) + 2) << 12;
+ hdmi_reg_writev(hdata, HDMI_V13_V_SYNC_GEN_2_0, 3, val);
+
+ val = ((m->htotal / 2) + (m->hsync_start - m->hdisplay));
+ val |= ((m->htotal / 2) +
+ (m->hsync_start - m->hdisplay)) << 12;
+ hdmi_reg_writev(hdata, HDMI_V13_V_SYNC_GEN_3_0, 3, val);
+
+ hdmi_reg_writev(hdata, HDMI_TG_VACT_ST_L, 2,
+ (m->vtotal - m->vdisplay) / 2);
+ hdmi_reg_writev(hdata, HDMI_TG_VACT_SZ_L, 2, m->vdisplay / 2);
+
+ hdmi_reg_writev(hdata, HDMI_TG_VACT_ST2_L, 2, 0x249);
+ } else {
+ /* Progressive Mode */
+
+ val = m->vtotal;
+ val |= (m->vtotal - m->vdisplay) << 11;
+ hdmi_reg_writev(hdata, HDMI_V13_V_BLANK_0, 3, val);
+
+ hdmi_reg_writev(hdata, HDMI_V13_V_BLANK_F_0, 3, 0);
+
+ val = (m->vsync_end - m->vdisplay);
+ val |= ((m->vsync_start - m->vdisplay) << 12);
+ hdmi_reg_writev(hdata, HDMI_V13_V_SYNC_GEN_1_0, 3, val);
+
+ hdmi_reg_writev(hdata, HDMI_V13_V_SYNC_GEN_2_0, 3, 0x1001);
+ hdmi_reg_writev(hdata, HDMI_V13_V_SYNC_GEN_3_0, 3, 0x1001);
+ hdmi_reg_writev(hdata, HDMI_TG_VACT_ST_L, 2,
+ m->vtotal - m->vdisplay);
+ hdmi_reg_writev(hdata, HDMI_TG_VACT_SZ_L, 2, m->vdisplay);
+ hdmi_reg_writev(hdata, HDMI_TG_VACT_ST2_L, 2, 0x248);
+ }
+
/* Timing generator registers */
- hdmi_reg_writeb(hdata, HDMI_TG_H_FSZ_L, tg->h_fsz[0]);
- hdmi_reg_writeb(hdata, HDMI_TG_H_FSZ_H, tg->h_fsz[1]);
- hdmi_reg_writeb(hdata, HDMI_TG_HACT_ST_L, tg->hact_st[0]);
- hdmi_reg_writeb(hdata, HDMI_TG_HACT_ST_H, tg->hact_st[1]);
- hdmi_reg_writeb(hdata, HDMI_TG_HACT_SZ_L, tg->hact_sz[0]);
- hdmi_reg_writeb(hdata, HDMI_TG_HACT_SZ_H, tg->hact_sz[1]);
- hdmi_reg_writeb(hdata, HDMI_TG_V_FSZ_L, tg->v_fsz[0]);
- hdmi_reg_writeb(hdata, HDMI_TG_V_FSZ_H, tg->v_fsz[1]);
- hdmi_reg_writeb(hdata, HDMI_TG_VSYNC_L, tg->vsync[0]);
- hdmi_reg_writeb(hdata, HDMI_TG_VSYNC_H, tg->vsync[1]);
- hdmi_reg_writeb(hdata, HDMI_TG_VSYNC2_L, tg->vsync2[0]);
- hdmi_reg_writeb(hdata, HDMI_TG_VSYNC2_H, tg->vsync2[1]);
- hdmi_reg_writeb(hdata, HDMI_TG_VACT_ST_L, tg->vact_st[0]);
- hdmi_reg_writeb(hdata, HDMI_TG_VACT_ST_H, tg->vact_st[1]);
- hdmi_reg_writeb(hdata, HDMI_TG_VACT_SZ_L, tg->vact_sz[0]);
- hdmi_reg_writeb(hdata, HDMI_TG_VACT_SZ_H, tg->vact_sz[1]);
- hdmi_reg_writeb(hdata, HDMI_TG_FIELD_CHG_L, tg->field_chg[0]);
- hdmi_reg_writeb(hdata, HDMI_TG_FIELD_CHG_H, tg->field_chg[1]);
- hdmi_reg_writeb(hdata, HDMI_TG_VACT_ST2_L, tg->vact_st2[0]);
- hdmi_reg_writeb(hdata, HDMI_TG_VACT_ST2_H, tg->vact_st2[1]);
- hdmi_reg_writeb(hdata, HDMI_TG_VSYNC_TOP_HDMI_L, tg->vsync_top_hdmi[0]);
- hdmi_reg_writeb(hdata, HDMI_TG_VSYNC_TOP_HDMI_H, tg->vsync_top_hdmi[1]);
- hdmi_reg_writeb(hdata, HDMI_TG_VSYNC_BOT_HDMI_L, tg->vsync_bot_hdmi[0]);
- hdmi_reg_writeb(hdata, HDMI_TG_VSYNC_BOT_HDMI_H, tg->vsync_bot_hdmi[1]);
- hdmi_reg_writeb(hdata, HDMI_TG_FIELD_TOP_HDMI_L, tg->field_top_hdmi[0]);
- hdmi_reg_writeb(hdata, HDMI_TG_FIELD_TOP_HDMI_H, tg->field_top_hdmi[1]);
- hdmi_reg_writeb(hdata, HDMI_TG_FIELD_BOT_HDMI_L, tg->field_bot_hdmi[0]);
- hdmi_reg_writeb(hdata, HDMI_TG_FIELD_BOT_HDMI_H, tg->field_bot_hdmi[1]);
+ hdmi_reg_writev(hdata, HDMI_TG_H_FSZ_L, 2, m->htotal);
+ hdmi_reg_writev(hdata, HDMI_TG_HACT_ST_L, 2, m->htotal - m->hdisplay);
+ hdmi_reg_writev(hdata, HDMI_TG_HACT_SZ_L, 2, m->hdisplay);
+ hdmi_reg_writev(hdata, HDMI_TG_V_FSZ_L, 2, m->vtotal);
+ hdmi_reg_writev(hdata, HDMI_TG_VSYNC_L, 2, 0x1);
+ hdmi_reg_writev(hdata, HDMI_TG_VSYNC2_L, 2, 0x233);
+ hdmi_reg_writev(hdata, HDMI_TG_FIELD_CHG_L, 2, 0x233);
+ hdmi_reg_writev(hdata, HDMI_TG_VSYNC_TOP_HDMI_L, 2, 0x1);
+ hdmi_reg_writev(hdata, HDMI_TG_VSYNC_BOT_HDMI_L, 2, 0x233);
+ hdmi_reg_writev(hdata, HDMI_TG_FIELD_TOP_HDMI_L, 2, 0x1);
+ hdmi_reg_writev(hdata, HDMI_TG_FIELD_BOT_HDMI_L, 2, 0x233);
/* waiting for HDMIPHY's PLL to get to steady state */
for (tries = 100; tries; --tries) {
@@ -1503,144 +1449,119 @@ static void hdmi_v13_mode_apply(struct hdmi_context *hdata)
static void hdmi_v14_mode_apply(struct hdmi_context *hdata)
{
- const struct hdmi_tg_regs *tg = &hdata->mode_conf.conf.v14_conf.tg;
- const struct hdmi_v14_core_regs *core =
- &hdata->mode_conf.conf.v14_conf.core;
+ struct drm_display_mode *m = &hdata->current_mode;
int tries;
- /* setting core registers */
- hdmi_reg_writeb(hdata, HDMI_H_BLANK_0, core->h_blank[0]);
- hdmi_reg_writeb(hdata, HDMI_H_BLANK_1, core->h_blank[1]);
- hdmi_reg_writeb(hdata, HDMI_V2_BLANK_0, core->v2_blank[0]);
- hdmi_reg_writeb(hdata, HDMI_V2_BLANK_1, core->v2_blank[1]);
- hdmi_reg_writeb(hdata, HDMI_V1_BLANK_0, core->v1_blank[0]);
- hdmi_reg_writeb(hdata, HDMI_V1_BLANK_1, core->v1_blank[1]);
- hdmi_reg_writeb(hdata, HDMI_V_LINE_0, core->v_line[0]);
- hdmi_reg_writeb(hdata, HDMI_V_LINE_1, core->v_line[1]);
- hdmi_reg_writeb(hdata, HDMI_H_LINE_0, core->h_line[0]);
- hdmi_reg_writeb(hdata, HDMI_H_LINE_1, core->h_line[1]);
- hdmi_reg_writeb(hdata, HDMI_HSYNC_POL, core->hsync_pol[0]);
- hdmi_reg_writeb(hdata, HDMI_VSYNC_POL, core->vsync_pol[0]);
- hdmi_reg_writeb(hdata, HDMI_INT_PRO_MODE, core->int_pro_mode[0]);
- hdmi_reg_writeb(hdata, HDMI_V_BLANK_F0_0, core->v_blank_f0[0]);
- hdmi_reg_writeb(hdata, HDMI_V_BLANK_F0_1, core->v_blank_f0[1]);
- hdmi_reg_writeb(hdata, HDMI_V_BLANK_F1_0, core->v_blank_f1[0]);
- hdmi_reg_writeb(hdata, HDMI_V_BLANK_F1_1, core->v_blank_f1[1]);
- hdmi_reg_writeb(hdata, HDMI_H_SYNC_START_0, core->h_sync_start[0]);
- hdmi_reg_writeb(hdata, HDMI_H_SYNC_START_1, core->h_sync_start[1]);
- hdmi_reg_writeb(hdata, HDMI_H_SYNC_END_0, core->h_sync_end[0]);
- hdmi_reg_writeb(hdata, HDMI_H_SYNC_END_1, core->h_sync_end[1]);
- hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_BEF_2_0,
- core->v_sync_line_bef_2[0]);
- hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_BEF_2_1,
- core->v_sync_line_bef_2[1]);
- hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_BEF_1_0,
- core->v_sync_line_bef_1[0]);
- hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_BEF_1_1,
- core->v_sync_line_bef_1[1]);
- hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_AFT_2_0,
- core->v_sync_line_aft_2[0]);
- hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_AFT_2_1,
- core->v_sync_line_aft_2[1]);
- hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_AFT_1_0,
- core->v_sync_line_aft_1[0]);
- hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_AFT_1_1,
- core->v_sync_line_aft_1[1]);
- hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_AFT_PXL_2_0,
- core->v_sync_line_aft_pxl_2[0]);
- hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_AFT_PXL_2_1,
- core->v_sync_line_aft_pxl_2[1]);
- hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_AFT_PXL_1_0,
- core->v_sync_line_aft_pxl_1[0]);
- hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_AFT_PXL_1_1,
- core->v_sync_line_aft_pxl_1[1]);
- hdmi_reg_writeb(hdata, HDMI_V_BLANK_F2_0, core->v_blank_f2[0]);
- hdmi_reg_writeb(hdata, HDMI_V_BLANK_F2_1, core->v_blank_f2[1]);
- hdmi_reg_writeb(hdata, HDMI_V_BLANK_F3_0, core->v_blank_f3[0]);
- hdmi_reg_writeb(hdata, HDMI_V_BLANK_F3_1, core->v_blank_f3[1]);
- hdmi_reg_writeb(hdata, HDMI_V_BLANK_F4_0, core->v_blank_f4[0]);
- hdmi_reg_writeb(hdata, HDMI_V_BLANK_F4_1, core->v_blank_f4[1]);
- hdmi_reg_writeb(hdata, HDMI_V_BLANK_F5_0, core->v_blank_f5[0]);
- hdmi_reg_writeb(hdata, HDMI_V_BLANK_F5_1, core->v_blank_f5[1]);
- hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_AFT_3_0,
- core->v_sync_line_aft_3[0]);
- hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_AFT_3_1,
- core->v_sync_line_aft_3[1]);
- hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_AFT_4_0,
- core->v_sync_line_aft_4[0]);
- hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_AFT_4_1,
- core->v_sync_line_aft_4[1]);
- hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_AFT_5_0,
- core->v_sync_line_aft_5[0]);
- hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_AFT_5_1,
- core->v_sync_line_aft_5[1]);
- hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_AFT_6_0,
- core->v_sync_line_aft_6[0]);
- hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_AFT_6_1,
- core->v_sync_line_aft_6[1]);
- hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_AFT_PXL_3_0,
- core->v_sync_line_aft_pxl_3[0]);
- hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_AFT_PXL_3_1,
- core->v_sync_line_aft_pxl_3[1]);
- hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_AFT_PXL_4_0,
- core->v_sync_line_aft_pxl_4[0]);
- hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_AFT_PXL_4_1,
- core->v_sync_line_aft_pxl_4[1]);
- hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_AFT_PXL_5_0,
- core->v_sync_line_aft_pxl_5[0]);
- hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_AFT_PXL_5_1,
- core->v_sync_line_aft_pxl_5[1]);
- hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_AFT_PXL_6_0,
- core->v_sync_line_aft_pxl_6[0]);
- hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_AFT_PXL_6_1,
- core->v_sync_line_aft_pxl_6[1]);
- hdmi_reg_writeb(hdata, HDMI_VACT_SPACE_1_0, core->vact_space_1[0]);
- hdmi_reg_writeb(hdata, HDMI_VACT_SPACE_1_1, core->vact_space_1[1]);
- hdmi_reg_writeb(hdata, HDMI_VACT_SPACE_2_0, core->vact_space_2[0]);
- hdmi_reg_writeb(hdata, HDMI_VACT_SPACE_2_1, core->vact_space_2[1]);
- hdmi_reg_writeb(hdata, HDMI_VACT_SPACE_3_0, core->vact_space_3[0]);
- hdmi_reg_writeb(hdata, HDMI_VACT_SPACE_3_1, core->vact_space_3[1]);
- hdmi_reg_writeb(hdata, HDMI_VACT_SPACE_4_0, core->vact_space_4[0]);
- hdmi_reg_writeb(hdata, HDMI_VACT_SPACE_4_1, core->vact_space_4[1]);
- hdmi_reg_writeb(hdata, HDMI_VACT_SPACE_5_0, core->vact_space_5[0]);
- hdmi_reg_writeb(hdata, HDMI_VACT_SPACE_5_1, core->vact_space_5[1]);
- hdmi_reg_writeb(hdata, HDMI_VACT_SPACE_6_0, core->vact_space_6[0]);
- hdmi_reg_writeb(hdata, HDMI_VACT_SPACE_6_1, core->vact_space_6[1]);
+ hdmi_reg_writev(hdata, HDMI_H_BLANK_0, 2, m->htotal - m->hdisplay);
+ hdmi_reg_writev(hdata, HDMI_V_LINE_0, 2, m->vtotal);
+ hdmi_reg_writev(hdata, HDMI_H_LINE_0, 2, m->htotal);
+ hdmi_reg_writev(hdata, HDMI_HSYNC_POL, 1,
+ (m->flags & DRM_MODE_FLAG_NHSYNC) ? 1 : 0);
+ hdmi_reg_writev(hdata, HDMI_VSYNC_POL, 1,
+ (m->flags & DRM_MODE_FLAG_NVSYNC) ? 1 : 0);
+ hdmi_reg_writev(hdata, HDMI_INT_PRO_MODE, 1,
+ (m->flags & DRM_MODE_FLAG_INTERLACE) ? 1 : 0);
+
+ /*
+ * Quirk requirement for exynos 5 HDMI IP design,
+ * 2 pixels less than the actual calculation for hsync_start
+ * and end.
+ */
+
+ /* Following values & calculations differ for different type of modes */
+ if (m->flags & DRM_MODE_FLAG_INTERLACE) {
+ /* Interlaced Mode */
+ hdmi_reg_writev(hdata, HDMI_V_SYNC_LINE_BEF_2_0, 2,
+ (m->vsync_end - m->vdisplay) / 2);
+ hdmi_reg_writev(hdata, HDMI_V_SYNC_LINE_BEF_1_0, 2,
+ (m->vsync_start - m->vdisplay) / 2);
+ hdmi_reg_writev(hdata, HDMI_V2_BLANK_0, 2, m->vtotal / 2);
+ hdmi_reg_writev(hdata, HDMI_V1_BLANK_0, 2,
+ (m->vtotal - m->vdisplay) / 2);
+ hdmi_reg_writev(hdata, HDMI_V_BLANK_F0_0, 2,
+ m->vtotal - m->vdisplay / 2);
+ hdmi_reg_writev(hdata, HDMI_V_BLANK_F1_0, 2, m->vtotal);
+ hdmi_reg_writev(hdata, HDMI_V_SYNC_LINE_AFT_2_0, 2,
+ (m->vtotal / 2) + 7);
+ hdmi_reg_writev(hdata, HDMI_V_SYNC_LINE_AFT_1_0, 2,
+ (m->vtotal / 2) + 2);
+ hdmi_reg_writev(hdata, HDMI_V_SYNC_LINE_AFT_PXL_2_0, 2,
+ (m->htotal / 2) + (m->hsync_start - m->hdisplay));
+ hdmi_reg_writev(hdata, HDMI_V_SYNC_LINE_AFT_PXL_1_0, 2,
+ (m->htotal / 2) + (m->hsync_start - m->hdisplay));
+ hdmi_reg_writev(hdata, HDMI_TG_VACT_ST_L, 2,
+ (m->vtotal - m->vdisplay) / 2);
+ hdmi_reg_writev(hdata, HDMI_TG_VACT_SZ_L, 2, m->vdisplay / 2);
+ hdmi_reg_writev(hdata, HDMI_TG_VACT_ST2_L, 2,
+ m->vtotal - m->vdisplay / 2);
+ hdmi_reg_writev(hdata, HDMI_TG_VSYNC2_L, 2,
+ (m->vtotal / 2) + 1);
+ hdmi_reg_writev(hdata, HDMI_TG_VSYNC_BOT_HDMI_L, 2,
+ (m->vtotal / 2) + 1);
+ hdmi_reg_writev(hdata, HDMI_TG_FIELD_BOT_HDMI_L, 2,
+ (m->vtotal / 2) + 1);
+ hdmi_reg_writev(hdata, HDMI_TG_VACT_ST3_L, 2, 0x0);
+ hdmi_reg_writev(hdata, HDMI_TG_VACT_ST4_L, 2, 0x0);
+ } else {
+ /* Progressive Mode */
+ hdmi_reg_writev(hdata, HDMI_V_SYNC_LINE_BEF_2_0, 2,
+ m->vsync_end - m->vdisplay);
+ hdmi_reg_writev(hdata, HDMI_V_SYNC_LINE_BEF_1_0, 2,
+ m->vsync_start - m->vdisplay);
+ hdmi_reg_writev(hdata, HDMI_V2_BLANK_0, 2, m->vtotal);
+ hdmi_reg_writev(hdata, HDMI_V1_BLANK_0, 2,
+ m->vtotal - m->vdisplay);
+ hdmi_reg_writev(hdata, HDMI_V_BLANK_F0_0, 2, 0xffff);
+ hdmi_reg_writev(hdata, HDMI_V_BLANK_F1_0, 2, 0xffff);
+ hdmi_reg_writev(hdata, HDMI_V_SYNC_LINE_AFT_2_0, 2, 0xffff);
+ hdmi_reg_writev(hdata, HDMI_V_SYNC_LINE_AFT_1_0, 2, 0xffff);
+ hdmi_reg_writev(hdata, HDMI_V_SYNC_LINE_AFT_PXL_2_0, 2, 0xffff);
+ hdmi_reg_writev(hdata, HDMI_V_SYNC_LINE_AFT_PXL_1_0, 2, 0xffff);
+ hdmi_reg_writev(hdata, HDMI_TG_VACT_ST_L, 2,
+ m->vtotal - m->vdisplay);
+ hdmi_reg_writev(hdata, HDMI_TG_VACT_SZ_L, 2, m->vdisplay);
+ hdmi_reg_writev(hdata, HDMI_TG_VACT_ST2_L, 2, 0x248);
+ hdmi_reg_writev(hdata, HDMI_TG_VACT_ST3_L, 2, 0x47b);
+ hdmi_reg_writev(hdata, HDMI_TG_VACT_ST4_L, 2, 0x6ae);
+ hdmi_reg_writev(hdata, HDMI_TG_VSYNC2_L, 2, 0x233);
+ hdmi_reg_writev(hdata, HDMI_TG_VSYNC_BOT_HDMI_L, 2, 0x233);
+ hdmi_reg_writev(hdata, HDMI_TG_FIELD_BOT_HDMI_L, 2, 0x233);
+ }
+
+ /* Following values & calculations are same irrespective of mode type */
+ hdmi_reg_writev(hdata, HDMI_H_SYNC_START_0, 2,
+ m->hsync_start - m->hdisplay - 2);
+ hdmi_reg_writev(hdata, HDMI_H_SYNC_END_0, 2,
+ m->hsync_end - m->hdisplay - 2);
+ hdmi_reg_writev(hdata, HDMI_VACT_SPACE_1_0, 2, 0xffff);
+ hdmi_reg_writev(hdata, HDMI_VACT_SPACE_2_0, 2, 0xffff);
+ hdmi_reg_writev(hdata, HDMI_VACT_SPACE_3_0, 2, 0xffff);
+ hdmi_reg_writev(hdata, HDMI_VACT_SPACE_4_0, 2, 0xffff);
+ hdmi_reg_writev(hdata, HDMI_VACT_SPACE_5_0, 2, 0xffff);
+ hdmi_reg_writev(hdata, HDMI_VACT_SPACE_6_0, 2, 0xffff);
+ hdmi_reg_writev(hdata, HDMI_V_BLANK_F2_0, 2, 0xffff);
+ hdmi_reg_writev(hdata, HDMI_V_BLANK_F3_0, 2, 0xffff);
+ hdmi_reg_writev(hdata, HDMI_V_BLANK_F4_0, 2, 0xffff);
+ hdmi_reg_writev(hdata, HDMI_V_BLANK_F5_0, 2, 0xffff);
+ hdmi_reg_writev(hdata, HDMI_V_SYNC_LINE_AFT_3_0, 2, 0xffff);
+ hdmi_reg_writev(hdata, HDMI_V_SYNC_LINE_AFT_4_0, 2, 0xffff);
+ hdmi_reg_writev(hdata, HDMI_V_SYNC_LINE_AFT_5_0, 2, 0xffff);
+ hdmi_reg_writev(hdata, HDMI_V_SYNC_LINE_AFT_6_0, 2, 0xffff);
+ hdmi_reg_writev(hdata, HDMI_V_SYNC_LINE_AFT_PXL_3_0, 2, 0xffff);
+ hdmi_reg_writev(hdata, HDMI_V_SYNC_LINE_AFT_PXL_4_0, 2, 0xffff);
+ hdmi_reg_writev(hdata, HDMI_V_SYNC_LINE_AFT_PXL_5_0, 2, 0xffff);
+ hdmi_reg_writev(hdata, HDMI_V_SYNC_LINE_AFT_PXL_6_0, 2, 0xffff);
/* Timing generator registers */
- hdmi_reg_writeb(hdata, HDMI_TG_H_FSZ_L, tg->h_fsz[0]);
- hdmi_reg_writeb(hdata, HDMI_TG_H_FSZ_H, tg->h_fsz[1]);
- hdmi_reg_writeb(hdata, HDMI_TG_HACT_ST_L, tg->hact_st[0]);
- hdmi_reg_writeb(hdata, HDMI_TG_HACT_ST_H, tg->hact_st[1]);
- hdmi_reg_writeb(hdata, HDMI_TG_HACT_SZ_L, tg->hact_sz[0]);
- hdmi_reg_writeb(hdata, HDMI_TG_HACT_SZ_H, tg->hact_sz[1]);
- hdmi_reg_writeb(hdata, HDMI_TG_V_FSZ_L, tg->v_fsz[0]);
- hdmi_reg_writeb(hdata, HDMI_TG_V_FSZ_H, tg->v_fsz[1]);
- hdmi_reg_writeb(hdata, HDMI_TG_VSYNC_L, tg->vsync[0]);
- hdmi_reg_writeb(hdata, HDMI_TG_VSYNC_H, tg->vsync[1]);
- hdmi_reg_writeb(hdata, HDMI_TG_VSYNC2_L, tg->vsync2[0]);
- hdmi_reg_writeb(hdata, HDMI_TG_VSYNC2_H, tg->vsync2[1]);
- hdmi_reg_writeb(hdata, HDMI_TG_VACT_ST_L, tg->vact_st[0]);
- hdmi_reg_writeb(hdata, HDMI_TG_VACT_ST_H, tg->vact_st[1]);
- hdmi_reg_writeb(hdata, HDMI_TG_VACT_SZ_L, tg->vact_sz[0]);
- hdmi_reg_writeb(hdata, HDMI_TG_VACT_SZ_H, tg->vact_sz[1]);
- hdmi_reg_writeb(hdata, HDMI_TG_FIELD_CHG_L, tg->field_chg[0]);
- hdmi_reg_writeb(hdata, HDMI_TG_FIELD_CHG_H, tg->field_chg[1]);
- hdmi_reg_writeb(hdata, HDMI_TG_VACT_ST2_L, tg->vact_st2[0]);
- hdmi_reg_writeb(hdata, HDMI_TG_VACT_ST2_H, tg->vact_st2[1]);
- hdmi_reg_writeb(hdata, HDMI_TG_VACT_ST3_L, tg->vact_st3[0]);
- hdmi_reg_writeb(hdata, HDMI_TG_VACT_ST3_H, tg->vact_st3[1]);
- hdmi_reg_writeb(hdata, HDMI_TG_VACT_ST4_L, tg->vact_st4[0]);
- hdmi_reg_writeb(hdata, HDMI_TG_VACT_ST4_H, tg->vact_st4[1]);
- hdmi_reg_writeb(hdata, HDMI_TG_VSYNC_TOP_HDMI_L, tg->vsync_top_hdmi[0]);
- hdmi_reg_writeb(hdata, HDMI_TG_VSYNC_TOP_HDMI_H, tg->vsync_top_hdmi[1]);
- hdmi_reg_writeb(hdata, HDMI_TG_VSYNC_BOT_HDMI_L, tg->vsync_bot_hdmi[0]);
- hdmi_reg_writeb(hdata, HDMI_TG_VSYNC_BOT_HDMI_H, tg->vsync_bot_hdmi[1]);
- hdmi_reg_writeb(hdata, HDMI_TG_FIELD_TOP_HDMI_L, tg->field_top_hdmi[0]);
- hdmi_reg_writeb(hdata, HDMI_TG_FIELD_TOP_HDMI_H, tg->field_top_hdmi[1]);
- hdmi_reg_writeb(hdata, HDMI_TG_FIELD_BOT_HDMI_L, tg->field_bot_hdmi[0]);
- hdmi_reg_writeb(hdata, HDMI_TG_FIELD_BOT_HDMI_H, tg->field_bot_hdmi[1]);
- hdmi_reg_writeb(hdata, HDMI_TG_3D, tg->tg_3d[0]);
+ hdmi_reg_writev(hdata, HDMI_TG_H_FSZ_L, 2, m->htotal);
+ hdmi_reg_writev(hdata, HDMI_TG_HACT_ST_L, 2, m->htotal - m->hdisplay);
+ hdmi_reg_writev(hdata, HDMI_TG_HACT_SZ_L, 2, m->hdisplay);
+ hdmi_reg_writev(hdata, HDMI_TG_V_FSZ_L, 2, m->vtotal);
+ hdmi_reg_writev(hdata, HDMI_TG_VSYNC_L, 2, 0x1);
+ hdmi_reg_writev(hdata, HDMI_TG_FIELD_CHG_L, 2, 0x233);
+ hdmi_reg_writev(hdata, HDMI_TG_VSYNC_TOP_HDMI_L, 2, 0x1);
+ hdmi_reg_writev(hdata, HDMI_TG_FIELD_TOP_HDMI_L, 2, 0x1);
+ hdmi_reg_writev(hdata, HDMI_TG_3D, 1, 0x0);
/* waiting for HDMIPHY's PLL to get to steady state */
for (tries = 100; tries; --tries) {
@@ -1665,7 +1586,7 @@ static void hdmi_v14_mode_apply(struct hdmi_context *hdata)
static void hdmi_mode_apply(struct hdmi_context *hdata)
{
- if (hdata->type == HDMI_TYPE13)
+ if (hdata->drv_data->type == HDMI_TYPE13)
hdmi_v13_mode_apply(hdata);
else
hdmi_v14_mode_apply(hdata);
@@ -1683,7 +1604,7 @@ static void hdmiphy_conf_reset(struct hdmi_context *hdata)
hdmiphy_reg_writeb(hdata, HDMIPHY_MODE_SET_DONE,
HDMI_PHY_ENABLE_MODE_SET);
- if (hdata->type == HDMI_TYPE13)
+ if (hdata->drv_data->type == HDMI_TYPE13)
reg = HDMI_V13_PHY_RSTOUT;
else
reg = HDMI_PHY_RSTOUT;
@@ -1697,7 +1618,7 @@ static void hdmiphy_conf_reset(struct hdmi_context *hdata)
static void hdmiphy_poweron(struct hdmi_context *hdata)
{
- if (hdata->type != HDMI_TYPE14)
+ if (hdata->drv_data->type != HDMI_TYPE14)
return;
DRM_DEBUG_KMS("\n");
@@ -1717,7 +1638,7 @@ static void hdmiphy_poweron(struct hdmi_context *hdata)
static void hdmiphy_poweroff(struct hdmi_context *hdata)
{
- if (hdata->type != HDMI_TYPE14)
+ if (hdata->drv_data->type != HDMI_TYPE14)
return;
DRM_DEBUG_KMS("\n");
@@ -1743,13 +1664,14 @@ static void hdmiphy_conf_apply(struct hdmi_context *hdata)
int i;
/* pixel clock */
- i = hdmi_find_phy_conf(hdata, hdata->mode_conf.pixel_clock);
+ i = hdmi_find_phy_conf(hdata, hdata->current_mode.clock * 1000);
if (i < 0) {
DRM_ERROR("failed to find hdmiphy conf\n");
return;
}
- ret = hdmiphy_reg_write_buf(hdata, 0, hdata->phy_confs[i].conf, 32);
+ ret = hdmiphy_reg_write_buf(hdata, 0,
+ hdata->drv_data->phy_confs[i].conf, 32);
if (ret) {
DRM_ERROR("failed to configure hdmiphy\n");
return;
@@ -1771,10 +1693,8 @@ static void hdmi_conf_apply(struct hdmi_context *hdata)
hdmiphy_conf_reset(hdata);
hdmiphy_conf_apply(hdata);
- mutex_lock(&hdata->hdmi_mutex);
hdmi_start(hdata, false);
hdmi_conf_init(hdata);
- mutex_unlock(&hdata->hdmi_mutex);
hdmi_audio_init(hdata);
@@ -1785,271 +1705,32 @@ static void hdmi_conf_apply(struct hdmi_context *hdata)
hdmi_regs_dump(hdata, "start");
}
-static void hdmi_set_reg(u8 *reg_pair, int num_bytes, u32 value)
-{
- int i;
- BUG_ON(num_bytes > 4);
- for (i = 0; i < num_bytes; i++)
- reg_pair[i] = (value >> (8 * i)) & 0xff;
-}
-
-static void hdmi_v13_mode_set(struct hdmi_context *hdata,
- struct drm_display_mode *m)
+static void hdmi_mode_set(struct drm_encoder *encoder,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
{
- struct hdmi_v13_core_regs *core = &hdata->mode_conf.conf.v13_conf.core;
- struct hdmi_tg_regs *tg = &hdata->mode_conf.conf.v13_conf.tg;
- unsigned int val;
-
- hdata->mode_conf.cea_video_id =
- drm_match_cea_mode((struct drm_display_mode *)m);
- hdata->mode_conf.pixel_clock = m->clock * 1000;
- hdata->mode_conf.aspect_ratio = m->picture_aspect_ratio;
-
- hdmi_set_reg(core->h_blank, 2, m->htotal - m->hdisplay);
- hdmi_set_reg(core->h_v_line, 3, (m->htotal << 12) | m->vtotal);
-
- val = (m->flags & DRM_MODE_FLAG_NVSYNC) ? 1 : 0;
- hdmi_set_reg(core->vsync_pol, 1, val);
-
- val = (m->flags & DRM_MODE_FLAG_INTERLACE) ? 1 : 0;
- hdmi_set_reg(core->int_pro_mode, 1, val);
-
- val = (m->hsync_start - m->hdisplay - 2);
- val |= ((m->hsync_end - m->hdisplay - 2) << 10);
- val |= ((m->flags & DRM_MODE_FLAG_NHSYNC) ? 1 : 0)<<20;
- hdmi_set_reg(core->h_sync_gen, 3, val);
-
- /*
- * Quirk requirement for exynos HDMI IP design,
- * 2 pixels less than the actual calculation for hsync_start
- * and end.
- */
-
- /* Following values & calculations differ for different type of modes */
- if (m->flags & DRM_MODE_FLAG_INTERLACE) {
- /* Interlaced Mode */
- val = ((m->vsync_end - m->vdisplay) / 2);
- val |= ((m->vsync_start - m->vdisplay) / 2) << 12;
- hdmi_set_reg(core->v_sync_gen1, 3, val);
-
- val = m->vtotal / 2;
- val |= ((m->vtotal - m->vdisplay) / 2) << 11;
- hdmi_set_reg(core->v_blank, 3, val);
-
- val = (m->vtotal +
- ((m->vsync_end - m->vsync_start) * 4) + 5) / 2;
- val |= m->vtotal << 11;
- hdmi_set_reg(core->v_blank_f, 3, val);
-
- val = ((m->vtotal / 2) + 7);
- val |= ((m->vtotal / 2) + 2) << 12;
- hdmi_set_reg(core->v_sync_gen2, 3, val);
-
- val = ((m->htotal / 2) + (m->hsync_start - m->hdisplay));
- val |= ((m->htotal / 2) +
- (m->hsync_start - m->hdisplay)) << 12;
- hdmi_set_reg(core->v_sync_gen3, 3, val);
-
- hdmi_set_reg(tg->vact_st, 2, (m->vtotal - m->vdisplay) / 2);
- hdmi_set_reg(tg->vact_sz, 2, m->vdisplay / 2);
-
- hdmi_set_reg(tg->vact_st2, 2, 0x249);/* Reset value + 1*/
- } else {
- /* Progressive Mode */
-
- val = m->vtotal;
- val |= (m->vtotal - m->vdisplay) << 11;
- hdmi_set_reg(core->v_blank, 3, val);
-
- hdmi_set_reg(core->v_blank_f, 3, 0);
-
- val = (m->vsync_end - m->vdisplay);
- val |= ((m->vsync_start - m->vdisplay) << 12);
- hdmi_set_reg(core->v_sync_gen1, 3, val);
-
- hdmi_set_reg(core->v_sync_gen2, 3, 0x1001);/* Reset value */
- hdmi_set_reg(core->v_sync_gen3, 3, 0x1001);/* Reset value */
- hdmi_set_reg(tg->vact_st, 2, m->vtotal - m->vdisplay);
- hdmi_set_reg(tg->vact_sz, 2, m->vdisplay);
- hdmi_set_reg(tg->vact_st2, 2, 0x248); /* Reset value */
- }
-
- /* Timing generator registers */
- hdmi_set_reg(tg->cmd, 1, 0x0);
- hdmi_set_reg(tg->h_fsz, 2, m->htotal);
- hdmi_set_reg(tg->hact_st, 2, m->htotal - m->hdisplay);
- hdmi_set_reg(tg->hact_sz, 2, m->hdisplay);
- hdmi_set_reg(tg->v_fsz, 2, m->vtotal);
- hdmi_set_reg(tg->vsync, 2, 0x1);
- hdmi_set_reg(tg->vsync2, 2, 0x233); /* Reset value */
- hdmi_set_reg(tg->field_chg, 2, 0x233); /* Reset value */
- hdmi_set_reg(tg->vsync_top_hdmi, 2, 0x1); /* Reset value */
- hdmi_set_reg(tg->vsync_bot_hdmi, 2, 0x233); /* Reset value */
- hdmi_set_reg(tg->field_top_hdmi, 2, 0x1); /* Reset value */
- hdmi_set_reg(tg->field_bot_hdmi, 2, 0x233); /* Reset value */
- hdmi_set_reg(tg->tg_3d, 1, 0x0); /* Not used */
-}
-
-static void hdmi_v14_mode_set(struct hdmi_context *hdata,
- struct drm_display_mode *m)
-{
- struct hdmi_tg_regs *tg = &hdata->mode_conf.conf.v14_conf.tg;
- struct hdmi_v14_core_regs *core =
- &hdata->mode_conf.conf.v14_conf.core;
-
- hdata->mode_conf.cea_video_id =
- drm_match_cea_mode((struct drm_display_mode *)m);
- hdata->mode_conf.pixel_clock = m->clock * 1000;
- hdata->mode_conf.aspect_ratio = m->picture_aspect_ratio;
-
- hdmi_set_reg(core->h_blank, 2, m->htotal - m->hdisplay);
- hdmi_set_reg(core->v_line, 2, m->vtotal);
- hdmi_set_reg(core->h_line, 2, m->htotal);
- hdmi_set_reg(core->hsync_pol, 1,
- (m->flags & DRM_MODE_FLAG_NHSYNC) ? 1 : 0);
- hdmi_set_reg(core->vsync_pol, 1,
- (m->flags & DRM_MODE_FLAG_NVSYNC) ? 1 : 0);
- hdmi_set_reg(core->int_pro_mode, 1,
- (m->flags & DRM_MODE_FLAG_INTERLACE) ? 1 : 0);
-
- /*
- * Quirk requirement for exynos 5 HDMI IP design,
- * 2 pixels less than the actual calculation for hsync_start
- * and end.
- */
-
- /* Following values & calculations differ for different type of modes */
- if (m->flags & DRM_MODE_FLAG_INTERLACE) {
- /* Interlaced Mode */
- hdmi_set_reg(core->v_sync_line_bef_2, 2,
- (m->vsync_end - m->vdisplay) / 2);
- hdmi_set_reg(core->v_sync_line_bef_1, 2,
- (m->vsync_start - m->vdisplay) / 2);
- hdmi_set_reg(core->v2_blank, 2, m->vtotal / 2);
- hdmi_set_reg(core->v1_blank, 2, (m->vtotal - m->vdisplay) / 2);
- hdmi_set_reg(core->v_blank_f0, 2, m->vtotal - m->vdisplay / 2);
- hdmi_set_reg(core->v_blank_f1, 2, m->vtotal);
- hdmi_set_reg(core->v_sync_line_aft_2, 2, (m->vtotal / 2) + 7);
- hdmi_set_reg(core->v_sync_line_aft_1, 2, (m->vtotal / 2) + 2);
- hdmi_set_reg(core->v_sync_line_aft_pxl_2, 2,
- (m->htotal / 2) + (m->hsync_start - m->hdisplay));
- hdmi_set_reg(core->v_sync_line_aft_pxl_1, 2,
- (m->htotal / 2) + (m->hsync_start - m->hdisplay));
- hdmi_set_reg(tg->vact_st, 2, (m->vtotal - m->vdisplay) / 2);
- hdmi_set_reg(tg->vact_sz, 2, m->vdisplay / 2);
- hdmi_set_reg(tg->vact_st2, 2, m->vtotal - m->vdisplay / 2);
- hdmi_set_reg(tg->vsync2, 2, (m->vtotal / 2) + 1);
- hdmi_set_reg(tg->vsync_bot_hdmi, 2, (m->vtotal / 2) + 1);
- hdmi_set_reg(tg->field_bot_hdmi, 2, (m->vtotal / 2) + 1);
- hdmi_set_reg(tg->vact_st3, 2, 0x0);
- hdmi_set_reg(tg->vact_st4, 2, 0x0);
- } else {
- /* Progressive Mode */
- hdmi_set_reg(core->v_sync_line_bef_2, 2,
- m->vsync_end - m->vdisplay);
- hdmi_set_reg(core->v_sync_line_bef_1, 2,
- m->vsync_start - m->vdisplay);
- hdmi_set_reg(core->v2_blank, 2, m->vtotal);
- hdmi_set_reg(core->v1_blank, 2, m->vtotal - m->vdisplay);
- hdmi_set_reg(core->v_blank_f0, 2, 0xffff);
- hdmi_set_reg(core->v_blank_f1, 2, 0xffff);
- hdmi_set_reg(core->v_sync_line_aft_2, 2, 0xffff);
- hdmi_set_reg(core->v_sync_line_aft_1, 2, 0xffff);
- hdmi_set_reg(core->v_sync_line_aft_pxl_2, 2, 0xffff);
- hdmi_set_reg(core->v_sync_line_aft_pxl_1, 2, 0xffff);
- hdmi_set_reg(tg->vact_st, 2, m->vtotal - m->vdisplay);
- hdmi_set_reg(tg->vact_sz, 2, m->vdisplay);
- hdmi_set_reg(tg->vact_st2, 2, 0x248); /* Reset value */
- hdmi_set_reg(tg->vact_st3, 2, 0x47b); /* Reset value */
- hdmi_set_reg(tg->vact_st4, 2, 0x6ae); /* Reset value */
- hdmi_set_reg(tg->vsync2, 2, 0x233); /* Reset value */
- hdmi_set_reg(tg->vsync_bot_hdmi, 2, 0x233); /* Reset value */
- hdmi_set_reg(tg->field_bot_hdmi, 2, 0x233); /* Reset value */
- }
-
- /* Following values & calculations are same irrespective of mode type */
- hdmi_set_reg(core->h_sync_start, 2, m->hsync_start - m->hdisplay - 2);
- hdmi_set_reg(core->h_sync_end, 2, m->hsync_end - m->hdisplay - 2);
- hdmi_set_reg(core->vact_space_1, 2, 0xffff);
- hdmi_set_reg(core->vact_space_2, 2, 0xffff);
- hdmi_set_reg(core->vact_space_3, 2, 0xffff);
- hdmi_set_reg(core->vact_space_4, 2, 0xffff);
- hdmi_set_reg(core->vact_space_5, 2, 0xffff);
- hdmi_set_reg(core->vact_space_6, 2, 0xffff);
- hdmi_set_reg(core->v_blank_f2, 2, 0xffff);
- hdmi_set_reg(core->v_blank_f3, 2, 0xffff);
- hdmi_set_reg(core->v_blank_f4, 2, 0xffff);
- hdmi_set_reg(core->v_blank_f5, 2, 0xffff);
- hdmi_set_reg(core->v_sync_line_aft_3, 2, 0xffff);
- hdmi_set_reg(core->v_sync_line_aft_4, 2, 0xffff);
- hdmi_set_reg(core->v_sync_line_aft_5, 2, 0xffff);
- hdmi_set_reg(core->v_sync_line_aft_6, 2, 0xffff);
- hdmi_set_reg(core->v_sync_line_aft_pxl_3, 2, 0xffff);
- hdmi_set_reg(core->v_sync_line_aft_pxl_4, 2, 0xffff);
- hdmi_set_reg(core->v_sync_line_aft_pxl_5, 2, 0xffff);
- hdmi_set_reg(core->v_sync_line_aft_pxl_6, 2, 0xffff);
-
- /* Timing generator registers */
- hdmi_set_reg(tg->cmd, 1, 0x0);
- hdmi_set_reg(tg->h_fsz, 2, m->htotal);
- hdmi_set_reg(tg->hact_st, 2, m->htotal - m->hdisplay);
- hdmi_set_reg(tg->hact_sz, 2, m->hdisplay);
- hdmi_set_reg(tg->v_fsz, 2, m->vtotal);
- hdmi_set_reg(tg->vsync, 2, 0x1);
- hdmi_set_reg(tg->field_chg, 2, 0x233); /* Reset value */
- hdmi_set_reg(tg->vsync_top_hdmi, 2, 0x1); /* Reset value */
- hdmi_set_reg(tg->field_top_hdmi, 2, 0x1); /* Reset value */
- hdmi_set_reg(tg->tg_3d, 1, 0x0);
-}
-
-static void hdmi_mode_set(struct exynos_drm_display *display,
- struct drm_display_mode *mode)
-{
- struct hdmi_context *hdata = display_to_hdmi(display);
- struct drm_display_mode *m = mode;
+ struct hdmi_context *hdata = encoder_to_hdmi(encoder);
+ struct drm_display_mode *m = adjusted_mode;
DRM_DEBUG_KMS("xres=%d, yres=%d, refresh=%d, intl=%s\n",
m->hdisplay, m->vdisplay,
m->vrefresh, (m->flags & DRM_MODE_FLAG_INTERLACE) ?
"INTERLACED" : "PROGRESSIVE");
- /* preserve mode information for later use. */
- drm_mode_copy(&hdata->current_mode, mode);
-
- if (hdata->type == HDMI_TYPE13)
- hdmi_v13_mode_set(hdata, mode);
- else
- hdmi_v14_mode_set(hdata, mode);
-}
-
-static void hdmi_commit(struct exynos_drm_display *display)
-{
- struct hdmi_context *hdata = display_to_hdmi(display);
-
- mutex_lock(&hdata->hdmi_mutex);
- if (!hdata->powered) {
- mutex_unlock(&hdata->hdmi_mutex);
- return;
- }
- mutex_unlock(&hdata->hdmi_mutex);
-
- hdmi_conf_apply(hdata);
+ drm_mode_copy(&hdata->current_mode, m);
+ hdata->cea_video_id = drm_match_cea_mode(mode);
}
-static void hdmi_poweron(struct hdmi_context *hdata)
+static void hdmi_enable(struct drm_encoder *encoder)
{
+ struct hdmi_context *hdata = encoder_to_hdmi(encoder);
struct hdmi_resources *res = &hdata->res;
- mutex_lock(&hdata->hdmi_mutex);
- if (hdata->powered) {
- mutex_unlock(&hdata->hdmi_mutex);
+ if (hdata->powered)
return;
- }
hdata->powered = true;
- mutex_unlock(&hdata->hdmi_mutex);
-
pm_runtime_get_sync(hdata->dev);
if (regulator_bulk_enable(res->regul_count, res->regul_bulk))
@@ -2063,17 +1744,32 @@ static void hdmi_poweron(struct hdmi_context *hdata)
clk_prepare_enable(res->sclk_hdmi);
hdmiphy_poweron(hdata);
- hdmi_commit(&hdata->display);
+ hdmi_conf_apply(hdata);
}
-static void hdmi_poweroff(struct hdmi_context *hdata)
+static void hdmi_disable(struct drm_encoder *encoder)
{
+ struct hdmi_context *hdata = encoder_to_hdmi(encoder);
struct hdmi_resources *res = &hdata->res;
+ struct drm_crtc *crtc = encoder->crtc;
+ const struct drm_crtc_helper_funcs *funcs = NULL;
- mutex_lock(&hdata->hdmi_mutex);
if (!hdata->powered)
- goto out;
- mutex_unlock(&hdata->hdmi_mutex);
+ return;
+
+ /*
+ * The SFRs of VP and Mixer are updated by Vertical Sync of
+ * Timing generator which is a part of HDMI so the sequence
+ * to disable TV Subsystem should be as following,
+ * VP -> Mixer -> HDMI
+ *
+ * Below codes will try to disable Mixer and VP(if used)
+ * prior to disabling HDMI.
+ */
+ if (crtc)
+ funcs = crtc->helper_private;
+ if (funcs && funcs->disable)
+ (*funcs->disable)(crtc);
/* HDMI System Disable */
hdmi_reg_writemask(hdata, HDMI_CON_0, 0, HDMI_EN);
@@ -2093,57 +1789,18 @@ static void hdmi_poweroff(struct hdmi_context *hdata)
pm_runtime_put_sync(hdata->dev);
- mutex_lock(&hdata->hdmi_mutex);
hdata->powered = false;
-
-out:
- mutex_unlock(&hdata->hdmi_mutex);
}
-static void hdmi_dpms(struct exynos_drm_display *display, int mode)
-{
- struct hdmi_context *hdata = display_to_hdmi(display);
- struct drm_encoder *encoder = hdata->encoder;
- struct drm_crtc *crtc = encoder->crtc;
- const struct drm_crtc_helper_funcs *funcs = NULL;
-
- DRM_DEBUG_KMS("mode %d\n", mode);
-
- switch (mode) {
- case DRM_MODE_DPMS_ON:
- hdmi_poweron(hdata);
- break;
- case DRM_MODE_DPMS_STANDBY:
- case DRM_MODE_DPMS_SUSPEND:
- case DRM_MODE_DPMS_OFF:
- /*
- * The SFRs of VP and Mixer are updated by Vertical Sync of
- * Timing generator which is a part of HDMI so the sequence
- * to disable TV Subsystem should be as following,
- * VP -> Mixer -> HDMI
- *
- * Below codes will try to disable Mixer and VP(if used)
- * prior to disabling HDMI.
- */
- if (crtc)
- funcs = crtc->helper_private;
- if (funcs && funcs->disable)
- (*funcs->disable)(crtc);
-
- hdmi_poweroff(hdata);
- break;
- default:
- DRM_DEBUG_KMS("unknown dpms mode: %d\n", mode);
- break;
- }
-}
-
-static struct exynos_drm_display_ops hdmi_display_ops = {
- .create_connector = hdmi_create_connector,
+static struct drm_encoder_helper_funcs exynos_hdmi_encoder_helper_funcs = {
.mode_fixup = hdmi_mode_fixup,
.mode_set = hdmi_mode_set,
- .dpms = hdmi_dpms,
- .commit = hdmi_commit,
+ .enable = hdmi_enable,
+ .disable = hdmi_disable,
+};
+
+static struct drm_encoder_funcs exynos_hdmi_encoder_funcs = {
+ .destroy = drm_encoder_cleanup,
};
static void hdmi_hotplug_work_func(struct work_struct *work)
@@ -2152,10 +1809,6 @@ static void hdmi_hotplug_work_func(struct work_struct *work)
hdata = container_of(work, struct hdmi_context, hotplug_work.work);
- mutex_lock(&hdata->hdmi_mutex);
- hdata->hpd = gpio_get_value(hdata->hpd_gpio);
- mutex_unlock(&hdata->hdmi_mutex);
-
if (hdata->drm_dev)
drm_helper_hpd_irq_event(hdata->drm_dev);
}
@@ -2254,30 +1907,6 @@ fail:
return ret;
}
-static struct s5p_hdmi_platform_data *drm_hdmi_dt_parse_pdata
- (struct device *dev)
-{
- struct device_node *np = dev->of_node;
- struct s5p_hdmi_platform_data *pd;
- u32 value;
-
- pd = devm_kzalloc(dev, sizeof(*pd), GFP_KERNEL);
- if (!pd)
- goto err_data;
-
- if (!of_find_property(np, "hpd-gpio", &value)) {
- DRM_ERROR("no hpd gpio property found\n");
- goto err_data;
- }
-
- pd->hpd_gpio = of_get_named_gpio(np, "hpd-gpio", 0);
-
- return pd;
-
-err_data:
- return NULL;
-}
-
static struct of_device_id hdmi_match_types[] = {
{
.compatible = "samsung,exynos5-hdmi",
@@ -2301,10 +1930,33 @@ static int hdmi_bind(struct device *dev, struct device *master, void *data)
{
struct drm_device *drm_dev = data;
struct hdmi_context *hdata = dev_get_drvdata(dev);
+ struct drm_encoder *encoder = &hdata->encoder;
+ int ret, pipe;
hdata->drm_dev = drm_dev;
- return exynos_drm_create_enc_conn(drm_dev, &hdata->display);
+ pipe = exynos_drm_crtc_get_pipe_from_type(drm_dev,
+ EXYNOS_DISPLAY_TYPE_HDMI);
+ if (pipe < 0)
+ return pipe;
+
+ encoder->possible_crtcs = 1 << pipe;
+
+ DRM_DEBUG_KMS("possible_crtcs = 0x%x\n", encoder->possible_crtcs);
+
+ drm_encoder_init(drm_dev, encoder, &exynos_hdmi_encoder_funcs,
+ DRM_MODE_ENCODER_TMDS);
+
+ drm_encoder_helper_add(encoder, &exynos_hdmi_encoder_helper_funcs);
+
+ ret = hdmi_create_connector(encoder);
+ if (ret) {
+ DRM_ERROR("failed to create connector ret = %d\n", ret);
+ drm_encoder_cleanup(encoder);
+ return ret;
+ }
+
+ return 0;
}
static void hdmi_unbind(struct device *dev, struct device *master, void *data)
@@ -2338,43 +1990,30 @@ static struct device_node *hdmi_legacy_phy_dt_binding(struct device *dev)
static int hdmi_probe(struct platform_device *pdev)
{
struct device_node *ddc_node, *phy_node;
- struct s5p_hdmi_platform_data *pdata;
- struct hdmi_driver_data *drv_data;
const struct of_device_id *match;
struct device *dev = &pdev->dev;
struct hdmi_context *hdata;
struct resource *res;
int ret;
- if (!dev->of_node)
- return -ENODEV;
-
- pdata = drm_hdmi_dt_parse_pdata(dev);
- if (!pdata)
- return -EINVAL;
-
hdata = devm_kzalloc(dev, sizeof(struct hdmi_context), GFP_KERNEL);
if (!hdata)
return -ENOMEM;
- hdata->display.type = EXYNOS_DISPLAY_TYPE_HDMI;
- hdata->display.ops = &hdmi_display_ops;
-
- mutex_init(&hdata->hdmi_mutex);
-
- platform_set_drvdata(pdev, hdata);
-
- match = of_match_node(hdmi_match_types, dev->of_node);
+ match = of_match_device(hdmi_match_types, dev);
if (!match)
return -ENODEV;
- drv_data = (struct hdmi_driver_data *)match->data;
- hdata->type = drv_data->type;
- hdata->phy_confs = drv_data->phy_confs;
- hdata->phy_conf_count = drv_data->phy_conf_count;
+ hdata->drv_data = match->data;
+
+ platform_set_drvdata(pdev, hdata);
- hdata->hpd_gpio = pdata->hpd_gpio;
hdata->dev = dev;
+ hdata->hpd_gpio = of_get_named_gpio(dev->of_node, "hpd-gpio", 0);
+ if (hdata->hpd_gpio < 0) {
+ DRM_ERROR("cannot get hpd gpio property\n");
+ return hdata->hpd_gpio;
+ }
ret = hdmi_resources_init(hdata);
if (ret) {
@@ -2426,7 +2065,7 @@ out_get_ddc_adpt:
}
out_get_phy_port:
- if (drv_data->is_apb_phy) {
+ if (hdata->drv_data->is_apb_phy) {
hdata->regs_hdmiphy = of_iomap(phy_node, 0);
if (!hdata->regs_hdmiphy) {
DRM_ERROR("failed to ioremap hdmi phy\n");
@@ -2449,8 +2088,6 @@ out_get_phy_port:
goto err_hdmiphy;
}
- hdata->hpd = gpio_get_value(hdata->hpd_gpio);
-
INIT_DELAYED_WORK(&hdata->hotplug_work, hdmi_hotplug_work_func);
ret = devm_request_threaded_irq(dev, hdata->irq, NULL,
diff --git a/drivers/gpu/drm/exynos/exynos_mixer.c b/drivers/gpu/drm/exynos/exynos_mixer.c
index cae98db33062..e68340c77676 100644
--- a/drivers/gpu/drm/exynos/exynos_mixer.c
+++ b/drivers/gpu/drm/exynos/exynos_mixer.c
@@ -69,6 +69,11 @@ enum mixer_version_id {
MXR_VER_128_0_0_184,
};
+enum mixer_flag_bits {
+ MXR_BIT_POWERED,
+ MXR_BIT_VSYNC,
+};
+
struct mixer_context {
struct platform_device *pdev;
struct device *dev;
@@ -76,13 +81,11 @@ struct mixer_context {
struct exynos_drm_crtc *crtc;
struct exynos_drm_plane planes[MIXER_WIN_NR];
int pipe;
+ unsigned long flags;
bool interlace;
- bool powered;
bool vp_enabled;
bool has_sclk;
- u32 int_en;
- struct mutex mixer_mutex;
struct mixer_resources mixer_res;
enum mixer_version_id mxr_ver;
wait_queue_head_t wait_vsync_queue;
@@ -380,19 +383,20 @@ static void mixer_stop(struct mixer_context *ctx)
usleep_range(10000, 12000);
}
-static void vp_video_buffer(struct mixer_context *ctx, unsigned int win)
+static void vp_video_buffer(struct mixer_context *ctx,
+ struct exynos_drm_plane *plane)
{
struct mixer_resources *res = &ctx->mixer_res;
+ struct drm_plane_state *state = plane->base.state;
+ struct drm_framebuffer *fb = state->fb;
+ struct drm_display_mode *mode = &state->crtc->mode;
unsigned long flags;
- struct exynos_drm_plane *plane;
dma_addr_t luma_addr[2], chroma_addr[2];
bool tiled_mode = false;
bool crcb_mode = false;
u32 val;
- plane = &ctx->planes[win];
-
- switch (plane->pixel_format) {
+ switch (fb->pixel_format) {
case DRM_FORMAT_NV12:
crcb_mode = false;
break;
@@ -401,21 +405,21 @@ static void vp_video_buffer(struct mixer_context *ctx, unsigned int win)
break;
default:
DRM_ERROR("pixel format for vp is wrong [%d].\n",
- plane->pixel_format);
+ fb->pixel_format);
return;
}
luma_addr[0] = plane->dma_addr[0];
chroma_addr[0] = plane->dma_addr[1];
- if (plane->scan_flag & DRM_MODE_FLAG_INTERLACE) {
+ if (mode->flags & DRM_MODE_FLAG_INTERLACE) {
ctx->interlace = true;
if (tiled_mode) {
luma_addr[1] = luma_addr[0] + 0x40;
chroma_addr[1] = chroma_addr[0] + 0x40;
} else {
- luma_addr[1] = luma_addr[0] + plane->pitch;
- chroma_addr[1] = chroma_addr[0] + plane->pitch;
+ luma_addr[1] = luma_addr[0] + fb->pitches[0];
+ chroma_addr[1] = chroma_addr[0] + fb->pitches[0];
}
} else {
ctx->interlace = false;
@@ -436,25 +440,25 @@ static void vp_video_buffer(struct mixer_context *ctx, unsigned int win)
vp_reg_writemask(res, VP_MODE, val, VP_MODE_FMT_MASK);
/* setting size of input image */
- vp_reg_write(res, VP_IMG_SIZE_Y, VP_IMG_HSIZE(plane->pitch) |
- VP_IMG_VSIZE(plane->fb_height));
+ vp_reg_write(res, VP_IMG_SIZE_Y, VP_IMG_HSIZE(fb->pitches[0]) |
+ VP_IMG_VSIZE(fb->height));
/* chroma height has to reduced by 2 to avoid chroma distorions */
- vp_reg_write(res, VP_IMG_SIZE_C, VP_IMG_HSIZE(plane->pitch) |
- VP_IMG_VSIZE(plane->fb_height / 2));
+ vp_reg_write(res, VP_IMG_SIZE_C, VP_IMG_HSIZE(fb->pitches[0]) |
+ VP_IMG_VSIZE(fb->height / 2));
- vp_reg_write(res, VP_SRC_WIDTH, plane->src_width);
- vp_reg_write(res, VP_SRC_HEIGHT, plane->src_height);
+ vp_reg_write(res, VP_SRC_WIDTH, plane->src_w);
+ vp_reg_write(res, VP_SRC_HEIGHT, plane->src_h);
vp_reg_write(res, VP_SRC_H_POSITION,
VP_SRC_H_POSITION_VAL(plane->src_x));
vp_reg_write(res, VP_SRC_V_POSITION, plane->src_y);
- vp_reg_write(res, VP_DST_WIDTH, plane->crtc_width);
+ vp_reg_write(res, VP_DST_WIDTH, plane->crtc_w);
vp_reg_write(res, VP_DST_H_POSITION, plane->crtc_x);
if (ctx->interlace) {
- vp_reg_write(res, VP_DST_HEIGHT, plane->crtc_height / 2);
+ vp_reg_write(res, VP_DST_HEIGHT, plane->crtc_h / 2);
vp_reg_write(res, VP_DST_V_POSITION, plane->crtc_y / 2);
} else {
- vp_reg_write(res, VP_DST_HEIGHT, plane->crtc_height);
+ vp_reg_write(res, VP_DST_HEIGHT, plane->crtc_h);
vp_reg_write(res, VP_DST_V_POSITION, plane->crtc_y);
}
@@ -469,9 +473,9 @@ static void vp_video_buffer(struct mixer_context *ctx, unsigned int win)
vp_reg_write(res, VP_TOP_C_PTR, chroma_addr[0]);
vp_reg_write(res, VP_BOT_C_PTR, chroma_addr[1]);
- mixer_cfg_scan(ctx, plane->mode_height);
- mixer_cfg_rgb_fmt(ctx, plane->mode_height);
- mixer_cfg_layer(ctx, win, true);
+ mixer_cfg_scan(ctx, mode->vdisplay);
+ mixer_cfg_rgb_fmt(ctx, mode->vdisplay);
+ mixer_cfg_layer(ctx, plane->zpos, true);
mixer_run(ctx);
mixer_vsync_set_update(ctx, true);
@@ -491,15 +495,15 @@ static void mixer_layer_update(struct mixer_context *ctx)
static int mixer_setup_scale(const struct exynos_drm_plane *plane,
unsigned int *x_ratio, unsigned int *y_ratio)
{
- if (plane->crtc_width != plane->src_width) {
- if (plane->crtc_width == 2 * plane->src_width)
+ if (plane->crtc_w != plane->src_w) {
+ if (plane->crtc_w == 2 * plane->src_w)
*x_ratio = 1;
else
goto fail;
}
- if (plane->crtc_height != plane->src_height) {
- if (plane->crtc_height == 2 * plane->src_height)
+ if (plane->crtc_h != plane->src_h) {
+ if (plane->crtc_h == 2 * plane->src_h)
*y_ratio = 1;
else
goto fail;
@@ -512,20 +516,22 @@ fail:
return -ENOTSUPP;
}
-static void mixer_graph_buffer(struct mixer_context *ctx, unsigned int win)
+static void mixer_graph_buffer(struct mixer_context *ctx,
+ struct exynos_drm_plane *plane)
{
struct mixer_resources *res = &ctx->mixer_res;
+ struct drm_plane_state *state = plane->base.state;
+ struct drm_framebuffer *fb = state->fb;
+ struct drm_display_mode *mode = &state->crtc->mode;
unsigned long flags;
- struct exynos_drm_plane *plane;
+ unsigned int win = plane->zpos;
unsigned int x_ratio = 0, y_ratio = 0;
unsigned int src_x_offset, src_y_offset, dst_x_offset, dst_y_offset;
dma_addr_t dma_addr;
unsigned int fmt;
u32 val;
- plane = &ctx->planes[win];
-
- switch (plane->pixel_format) {
+ switch (fb->pixel_format) {
case DRM_FORMAT_XRGB4444:
fmt = MXR_FORMAT_ARGB4444;
break;
@@ -557,12 +563,12 @@ static void mixer_graph_buffer(struct mixer_context *ctx, unsigned int win)
/* converting dma address base and source offset */
dma_addr = plane->dma_addr[0]
- + (plane->src_x * plane->bpp >> 3)
- + (plane->src_y * plane->pitch);
+ + (plane->src_x * fb->bits_per_pixel >> 3)
+ + (plane->src_y * fb->pitches[0]);
src_x_offset = 0;
src_y_offset = 0;
- if (plane->scan_flag & DRM_MODE_FLAG_INTERLACE)
+ if (mode->flags & DRM_MODE_FLAG_INTERLACE)
ctx->interlace = true;
else
ctx->interlace = false;
@@ -576,18 +582,18 @@ static void mixer_graph_buffer(struct mixer_context *ctx, unsigned int win)
/* setup geometry */
mixer_reg_write(res, MXR_GRAPHIC_SPAN(win),
- plane->pitch / (plane->bpp >> 3));
+ fb->pitches[0] / (fb->bits_per_pixel >> 3));
/* setup display size */
if (ctx->mxr_ver == MXR_VER_128_0_0_184 &&
win == MIXER_DEFAULT_WIN) {
- val = MXR_MXR_RES_HEIGHT(plane->mode_height);
- val |= MXR_MXR_RES_WIDTH(plane->mode_width);
+ val = MXR_MXR_RES_HEIGHT(mode->vdisplay);
+ val |= MXR_MXR_RES_WIDTH(mode->hdisplay);
mixer_reg_write(res, MXR_RESOLUTION, val);
}
- val = MXR_GRP_WH_WIDTH(plane->src_width);
- val |= MXR_GRP_WH_HEIGHT(plane->src_height);
+ val = MXR_GRP_WH_WIDTH(plane->src_w);
+ val |= MXR_GRP_WH_HEIGHT(plane->src_h);
val |= MXR_GRP_WH_H_SCALE(x_ratio);
val |= MXR_GRP_WH_V_SCALE(y_ratio);
mixer_reg_write(res, MXR_GRAPHIC_WH(win), val);
@@ -605,8 +611,8 @@ static void mixer_graph_buffer(struct mixer_context *ctx, unsigned int win)
/* set buffer address to mixer */
mixer_reg_write(res, MXR_GRAPHIC_BASE(win), dma_addr);
- mixer_cfg_scan(ctx, plane->mode_height);
- mixer_cfg_rgb_fmt(ctx, plane->mode_height);
+ mixer_cfg_scan(ctx, mode->vdisplay);
+ mixer_cfg_rgb_fmt(ctx, mode->vdisplay);
mixer_cfg_layer(ctx, win, true);
/* layer update mandatory for mixer 16.0.33.0 */
@@ -718,6 +724,10 @@ static irqreturn_t mixer_irq_handler(int irq, void *arg)
/* handling VSYNC */
if (val & MXR_INT_STATUS_VSYNC) {
+ /* vsync interrupt use different bit for read and clear */
+ val |= MXR_INT_CLEAR_VSYNC;
+ val &= ~MXR_INT_STATUS_VSYNC;
+
/* interlace scan need to check shadow register */
if (ctx->interlace) {
base = mixer_reg_read(res, MXR_GRAPHIC_BASE(0));
@@ -731,8 +741,8 @@ static irqreturn_t mixer_irq_handler(int irq, void *arg)
goto out;
}
- drm_handle_vblank(ctx->drm_dev, ctx->pipe);
- exynos_drm_crtc_finish_pageflip(ctx->drm_dev, ctx->pipe);
+ drm_crtc_handle_vblank(&ctx->crtc->base);
+ exynos_drm_crtc_finish_pageflip(ctx->crtc);
/* set wait vsync event to zero and wake up queue. */
if (atomic_read(&ctx->wait_vsync_event)) {
@@ -743,11 +753,6 @@ static irqreturn_t mixer_irq_handler(int irq, void *arg)
out:
/* clear interrupts */
- if (~val & MXR_INT_EN_VSYNC) {
- /* vsync interrupt use different bit for read and clear */
- val &= ~MXR_INT_EN_VSYNC;
- val |= MXR_INT_CLEAR_VSYNC;
- }
mixer_reg_write(res, MXR_INT_STATUS, val);
spin_unlock(&res->reg_slock);
@@ -882,8 +887,7 @@ static int mixer_initialize(struct mixer_context *mixer_ctx,
}
}
- ret = drm_iommu_attach_device_if_possible(mixer_ctx->crtc, drm_dev,
- mixer_ctx->dev);
+ ret = drm_iommu_attach_device(drm_dev, mixer_ctx->dev);
if (ret)
priv->pipe--;
@@ -892,8 +896,7 @@ static int mixer_initialize(struct mixer_context *mixer_ctx,
static void mixer_ctx_remove(struct mixer_context *mixer_ctx)
{
- if (is_drm_iommu_supported(mixer_ctx->drm_dev))
- drm_iommu_detach_device(mixer_ctx->drm_dev, mixer_ctx->dev);
+ drm_iommu_detach_device(mixer_ctx->drm_dev, mixer_ctx->dev);
}
static int mixer_enable_vblank(struct exynos_drm_crtc *crtc)
@@ -901,14 +904,13 @@ static int mixer_enable_vblank(struct exynos_drm_crtc *crtc)
struct mixer_context *mixer_ctx = crtc->ctx;
struct mixer_resources *res = &mixer_ctx->mixer_res;
- if (!mixer_ctx->powered) {
- mixer_ctx->int_en |= MXR_INT_EN_VSYNC;
+ __set_bit(MXR_BIT_VSYNC, &mixer_ctx->flags);
+ if (!test_bit(MXR_BIT_POWERED, &mixer_ctx->flags))
return 0;
- }
/* enable vsync interrupt */
- mixer_reg_writemask(res, MXR_INT_EN, MXR_INT_EN_VSYNC,
- MXR_INT_EN_VSYNC);
+ mixer_reg_writemask(res, MXR_INT_STATUS, ~0, MXR_INT_CLEAR_VSYNC);
+ mixer_reg_writemask(res, MXR_INT_EN, ~0, MXR_INT_EN_VSYNC);
return 0;
}
@@ -918,48 +920,48 @@ static void mixer_disable_vblank(struct exynos_drm_crtc *crtc)
struct mixer_context *mixer_ctx = crtc->ctx;
struct mixer_resources *res = &mixer_ctx->mixer_res;
+ __clear_bit(MXR_BIT_VSYNC, &mixer_ctx->flags);
+
+ if (!test_bit(MXR_BIT_POWERED, &mixer_ctx->flags))
+ return;
+
/* disable vsync interrupt */
+ mixer_reg_writemask(res, MXR_INT_STATUS, ~0, MXR_INT_CLEAR_VSYNC);
mixer_reg_writemask(res, MXR_INT_EN, 0, MXR_INT_EN_VSYNC);
}
-static void mixer_win_commit(struct exynos_drm_crtc *crtc, unsigned int win)
+static void mixer_update_plane(struct exynos_drm_crtc *crtc,
+ struct exynos_drm_plane *plane)
{
struct mixer_context *mixer_ctx = crtc->ctx;
- DRM_DEBUG_KMS("win: %d\n", win);
+ DRM_DEBUG_KMS("win: %d\n", plane->zpos);
- mutex_lock(&mixer_ctx->mixer_mutex);
- if (!mixer_ctx->powered) {
- mutex_unlock(&mixer_ctx->mixer_mutex);
+ if (!test_bit(MXR_BIT_POWERED, &mixer_ctx->flags))
return;
- }
- mutex_unlock(&mixer_ctx->mixer_mutex);
- if (win > 1 && mixer_ctx->vp_enabled)
- vp_video_buffer(mixer_ctx, win);
+ if (plane->zpos > 1 && mixer_ctx->vp_enabled)
+ vp_video_buffer(mixer_ctx, plane);
else
- mixer_graph_buffer(mixer_ctx, win);
+ mixer_graph_buffer(mixer_ctx, plane);
}
-static void mixer_win_disable(struct exynos_drm_crtc *crtc, unsigned int win)
+static void mixer_disable_plane(struct exynos_drm_crtc *crtc,
+ struct exynos_drm_plane *plane)
{
struct mixer_context *mixer_ctx = crtc->ctx;
struct mixer_resources *res = &mixer_ctx->mixer_res;
unsigned long flags;
- DRM_DEBUG_KMS("win: %d\n", win);
+ DRM_DEBUG_KMS("win: %d\n", plane->zpos);
- mutex_lock(&mixer_ctx->mixer_mutex);
- if (!mixer_ctx->powered) {
- mutex_unlock(&mixer_ctx->mixer_mutex);
+ if (!test_bit(MXR_BIT_POWERED, &mixer_ctx->flags))
return;
- }
- mutex_unlock(&mixer_ctx->mixer_mutex);
spin_lock_irqsave(&res->reg_slock, flags);
mixer_vsync_set_update(mixer_ctx, false);
- mixer_cfg_layer(mixer_ctx, win, false);
+ mixer_cfg_layer(mixer_ctx, plane->zpos, false);
mixer_vsync_set_update(mixer_ctx, true);
spin_unlock_irqrestore(&res->reg_slock, flags);
@@ -970,12 +972,8 @@ static void mixer_wait_for_vblank(struct exynos_drm_crtc *crtc)
struct mixer_context *mixer_ctx = crtc->ctx;
int err;
- mutex_lock(&mixer_ctx->mixer_mutex);
- if (!mixer_ctx->powered) {
- mutex_unlock(&mixer_ctx->mixer_mutex);
+ if (!test_bit(MXR_BIT_POWERED, &mixer_ctx->flags))
return;
- }
- mutex_unlock(&mixer_ctx->mixer_mutex);
err = drm_vblank_get(mixer_ctx->drm_dev, mixer_ctx->pipe);
if (err < 0) {
@@ -1003,13 +1001,8 @@ static void mixer_enable(struct exynos_drm_crtc *crtc)
struct mixer_resources *res = &ctx->mixer_res;
int ret;
- mutex_lock(&ctx->mixer_mutex);
- if (ctx->powered) {
- mutex_unlock(&ctx->mixer_mutex);
+ if (test_bit(MXR_BIT_POWERED, &ctx->flags))
return;
- }
-
- mutex_unlock(&ctx->mixer_mutex);
pm_runtime_get_sync(ctx->dev);
@@ -1041,13 +1034,14 @@ static void mixer_enable(struct exynos_drm_crtc *crtc)
}
}
- mutex_lock(&ctx->mixer_mutex);
- ctx->powered = true;
- mutex_unlock(&ctx->mixer_mutex);
+ set_bit(MXR_BIT_POWERED, &ctx->flags);
mixer_reg_writemask(res, MXR_STATUS, ~0, MXR_STATUS_SOFT_RESET);
- mixer_reg_write(res, MXR_INT_EN, ctx->int_en);
+ if (test_bit(MXR_BIT_VSYNC, &ctx->flags)) {
+ mixer_reg_writemask(res, MXR_INT_STATUS, ~0, MXR_INT_CLEAR_VSYNC);
+ mixer_reg_writemask(res, MXR_INT_EN, ~0, MXR_INT_EN_VSYNC);
+ }
mixer_win_reset(ctx);
}
@@ -1057,24 +1051,16 @@ static void mixer_disable(struct exynos_drm_crtc *crtc)
struct mixer_resources *res = &ctx->mixer_res;
int i;
- mutex_lock(&ctx->mixer_mutex);
- if (!ctx->powered) {
- mutex_unlock(&ctx->mixer_mutex);
+ if (!test_bit(MXR_BIT_POWERED, &ctx->flags))
return;
- }
- mutex_unlock(&ctx->mixer_mutex);
mixer_stop(ctx);
mixer_regs_dump(ctx);
for (i = 0; i < MIXER_WIN_NR; i++)
- mixer_win_disable(crtc, i);
-
- ctx->int_en = mixer_reg_read(res, MXR_INT_EN);
+ mixer_disable_plane(crtc, &ctx->planes[i]);
- mutex_lock(&ctx->mixer_mutex);
- ctx->powered = false;
- mutex_unlock(&ctx->mixer_mutex);
+ clear_bit(MXR_BIT_POWERED, &ctx->flags);
clk_disable_unprepare(res->hdmi);
clk_disable_unprepare(res->mixer);
@@ -1113,8 +1099,8 @@ static const struct exynos_drm_crtc_ops mixer_crtc_ops = {
.enable_vblank = mixer_enable_vblank,
.disable_vblank = mixer_disable_vblank,
.wait_for_vblank = mixer_wait_for_vblank,
- .win_commit = mixer_win_commit,
- .win_disable = mixer_win_disable,
+ .update_plane = mixer_update_plane,
+ .disable_plane = mixer_disable_plane,
};
static struct mixer_drv_data exynos5420_mxr_drv_data = {
@@ -1236,8 +1222,6 @@ static int mixer_probe(struct platform_device *pdev)
return -ENOMEM;
}
- mutex_init(&ctx->mixer_mutex);
-
if (dev->of_node) {
const struct of_device_id *match;
diff --git a/drivers/gpu/drm/gma500/accel_2d.c b/drivers/gpu/drm/gma500/accel_2d.c
index de6f62a6ceb7..db9f7d011832 100644
--- a/drivers/gpu/drm/gma500/accel_2d.c
+++ b/drivers/gpu/drm/gma500/accel_2d.c
@@ -276,12 +276,12 @@ static void psbfb_copyarea_accel(struct fb_info *info,
break;
default:
/* software fallback */
- cfb_copyarea(info, a);
+ drm_fb_helper_cfb_copyarea(info, a);
return;
}
if (!gma_power_begin(dev, false)) {
- cfb_copyarea(info, a);
+ drm_fb_helper_cfb_copyarea(info, a);
return;
}
psb_accel_2d_copy(dev_priv,
@@ -308,7 +308,7 @@ void psbfb_copyarea(struct fb_info *info,
/* Avoid the 8 pixel erratum */
if (region->width == 8 || region->height == 8 ||
(info->flags & FBINFO_HWACCEL_DISABLED))
- return cfb_copyarea(info, region);
+ return drm_fb_helper_cfb_copyarea(info, region);
psbfb_copyarea_accel(info, region);
}
diff --git a/drivers/gpu/drm/gma500/framebuffer.c b/drivers/gpu/drm/gma500/framebuffer.c
index 2d42ce6d3757..2eaf1b31c7bd 100644
--- a/drivers/gpu/drm/gma500/framebuffer.c
+++ b/drivers/gpu/drm/gma500/framebuffer.c
@@ -194,9 +194,9 @@ static struct fb_ops psbfb_ops = {
.fb_set_par = drm_fb_helper_set_par,
.fb_blank = drm_fb_helper_blank,
.fb_setcolreg = psbfb_setcolreg,
- .fb_fillrect = cfb_fillrect,
+ .fb_fillrect = drm_fb_helper_cfb_fillrect,
.fb_copyarea = psbfb_copyarea,
- .fb_imageblit = cfb_imageblit,
+ .fb_imageblit = drm_fb_helper_cfb_imageblit,
.fb_mmap = psbfb_mmap,
.fb_sync = psbfb_sync,
.fb_ioctl = psbfb_ioctl,
@@ -208,9 +208,9 @@ static struct fb_ops psbfb_roll_ops = {
.fb_set_par = drm_fb_helper_set_par,
.fb_blank = drm_fb_helper_blank,
.fb_setcolreg = psbfb_setcolreg,
- .fb_fillrect = cfb_fillrect,
- .fb_copyarea = cfb_copyarea,
- .fb_imageblit = cfb_imageblit,
+ .fb_fillrect = drm_fb_helper_cfb_fillrect,
+ .fb_copyarea = drm_fb_helper_cfb_copyarea,
+ .fb_imageblit = drm_fb_helper_cfb_imageblit,
.fb_pan_display = psbfb_pan,
.fb_mmap = psbfb_mmap,
.fb_ioctl = psbfb_ioctl,
@@ -222,9 +222,9 @@ static struct fb_ops psbfb_unaccel_ops = {
.fb_set_par = drm_fb_helper_set_par,
.fb_blank = drm_fb_helper_blank,
.fb_setcolreg = psbfb_setcolreg,
- .fb_fillrect = cfb_fillrect,
- .fb_copyarea = cfb_copyarea,
- .fb_imageblit = cfb_imageblit,
+ .fb_fillrect = drm_fb_helper_cfb_fillrect,
+ .fb_copyarea = drm_fb_helper_cfb_copyarea,
+ .fb_imageblit = drm_fb_helper_cfb_imageblit,
.fb_mmap = psbfb_mmap,
.fb_ioctl = psbfb_ioctl,
};
@@ -343,7 +343,6 @@ static int psbfb_create(struct psb_fbdev *fbdev,
struct drm_framebuffer *fb;
struct psb_framebuffer *psbfb = &fbdev->pfb;
struct drm_mode_fb_cmd2 mode_cmd;
- struct device *device = &dev->pdev->dev;
int size;
int ret;
struct gtt_range *backing;
@@ -409,9 +408,9 @@ static int psbfb_create(struct psb_fbdev *fbdev,
mutex_lock(&dev->struct_mutex);
- info = framebuffer_alloc(0, device);
- if (!info) {
- ret = -ENOMEM;
+ info = drm_fb_helper_alloc_fbi(&fbdev->psb_fb_helper);
+ if (IS_ERR(info)) {
+ ret = PTR_ERR(info);
goto out_err1;
}
info->par = fbdev;
@@ -426,7 +425,6 @@ static int psbfb_create(struct psb_fbdev *fbdev,
psbfb->fbdev = info;
fbdev->psb_fb_helper.fb = fb;
- fbdev->psb_fb_helper.fbdev = info;
drm_fb_helper_fill_fix(info, fb->pitches[0], fb->depth);
strcpy(info->fix.id, "psbdrmfb");
@@ -440,12 +438,6 @@ static int psbfb_create(struct psb_fbdev *fbdev,
} else /* Software */
info->fbops = &psbfb_unaccel_ops;
- ret = fb_alloc_cmap(&info->cmap, 256, 0);
- if (ret) {
- ret = -ENOMEM;
- goto out_unref;
- }
-
info->fix.smem_start = dev->mode_config.fb_base;
info->fix.smem_len = size;
info->fix.ywrapstep = gtt_roll;
@@ -456,11 +448,6 @@ static int psbfb_create(struct psb_fbdev *fbdev,
info->screen_size = size;
if (dev_priv->gtt.stolen_size) {
- info->apertures = alloc_apertures(1);
- if (!info->apertures) {
- ret = -ENOMEM;
- goto out_unref;
- }
info->apertures->ranges[0].base = dev->mode_config.fb_base;
info->apertures->ranges[0].size = dev_priv->gtt.stolen_size;
}
@@ -483,6 +470,8 @@ out_unref:
psb_gtt_free_range(dev, backing);
else
drm_gem_object_unreference(&backing->gem);
+
+ drm_fb_helper_release_fbi(&fbdev->psb_fb_helper);
out_err1:
mutex_unlock(&dev->struct_mutex);
psb_gtt_free_range(dev, backing);
@@ -570,16 +559,11 @@ static const struct drm_fb_helper_funcs psb_fb_helper_funcs = {
static int psb_fbdev_destroy(struct drm_device *dev, struct psb_fbdev *fbdev)
{
- struct fb_info *info;
struct psb_framebuffer *psbfb = &fbdev->pfb;
- if (fbdev->psb_fb_helper.fbdev) {
- info = fbdev->psb_fb_helper.fbdev;
- unregister_framebuffer(info);
- if (info->cmap.len)
- fb_dealloc_cmap(&info->cmap);
- framebuffer_release(info);
- }
+ drm_fb_helper_unregister_fbi(&fbdev->psb_fb_helper);
+ drm_fb_helper_release_fbi(&fbdev->psb_fb_helper);
+
drm_fb_helper_fini(&fbdev->psb_fb_helper);
drm_framebuffer_unregister_private(&psbfb->base);
drm_framebuffer_cleanup(&psbfb->base);
diff --git a/drivers/gpu/drm/i2c/tda998x_drv.c b/drivers/gpu/drm/i2c/tda998x_drv.c
index fe1599d75f14..424228be79ae 100644
--- a/drivers/gpu/drm/i2c/tda998x_drv.c
+++ b/drivers/gpu/drm/i2c/tda998x_drv.c
@@ -606,8 +606,6 @@ static void
tda998x_write_if(struct tda998x_priv *priv, uint8_t bit, uint16_t addr,
uint8_t *buf, size_t size)
{
- buf[PB(0)] = tda998x_cksum(buf, size);
-
reg_clear(priv, REG_DIP_IF_FLAGS, bit);
reg_write_range(priv, addr, buf, size);
reg_set(priv, REG_DIP_IF_FLAGS, bit);
@@ -627,6 +625,8 @@ tda998x_write_aif(struct tda998x_priv *priv, struct tda998x_encoder_params *p)
buf[PB(4)] = p->audio_frame[4];
buf[PB(5)] = p->audio_frame[5] & 0xf8; /* DM_INH + LSV */
+ buf[PB(0)] = tda998x_cksum(buf, sizeof(buf));
+
tda998x_write_if(priv, DIP_IF_FLAGS_IF4, REG_IF4_HB0, buf,
sizeof(buf));
}
diff --git a/drivers/gpu/drm/i915/Kconfig b/drivers/gpu/drm/i915/Kconfig
index 74acca9bcd9d..051eab33e4c7 100644
--- a/drivers/gpu/drm/i915/Kconfig
+++ b/drivers/gpu/drm/i915/Kconfig
@@ -36,30 +36,6 @@ config DRM_I915
i810 driver instead, and the Atom z5xx series has an entirely
different implementation.
-config DRM_I915_KMS
- bool "Enable modesetting on intel by default"
- depends on DRM_I915
- default y
- help
- Choose this option if you want kernel modesetting enabled by default.
-
- If in doubt, say "Y".
-
-config DRM_I915_FBDEV
- bool "Enable legacy fbdev support for the modesetting intel driver"
- depends on DRM_I915
- select DRM_KMS_FB_HELPER
- select FB_CFB_FILLRECT
- select FB_CFB_COPYAREA
- select FB_CFB_IMAGEBLIT
- default y
- help
- Choose this option if you have a need for the legacy fbdev
- support. Note that this support also provide the linux console
- support on top of the intel modesetting driver.
-
- If in doubt, say "Y".
-
config DRM_I915_PRELIMINARY_HW_SUPPORT
bool "Enable preliminary support for prerelease Intel hardware by default"
depends on DRM_I915
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index b7ddf48e1d75..998b4643109f 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -6,12 +6,13 @@
# core driver code
i915-y := i915_drv.o \
+ i915_irq.o \
i915_params.o \
i915_suspend.o \
i915_sysfs.o \
+ intel_csr.o \
intel_pm.o \
- intel_runtime_pm.o \
- intel_csr.o
+ intel_runtime_pm.o
i915-$(CONFIG_COMPAT) += i915_ioc32.o
i915-$(CONFIG_DEBUG_FS) += i915_debugfs.o
@@ -20,21 +21,22 @@ i915-$(CONFIG_DEBUG_FS) += i915_debugfs.o
i915-y += i915_cmd_parser.o \
i915_gem_batch_pool.o \
i915_gem_context.o \
- i915_gem_render_state.o \
i915_gem_debug.o \
i915_gem_dmabuf.o \
i915_gem_evict.o \
i915_gem_execbuffer.o \
+ i915_gem_fence.o \
i915_gem_gtt.o \
i915_gem.o \
+ i915_gem_render_state.o \
i915_gem_shrinker.o \
i915_gem_stolen.o \
i915_gem_tiling.o \
i915_gem_userptr.o \
i915_gpu_error.o \
- i915_irq.o \
i915_trace_points.o \
intel_lrc.o \
+ intel_mocs.o \
intel_ringbuffer.o \
intel_uncore.o
@@ -46,18 +48,21 @@ i915-y += intel_renderstate_gen6.o \
# modesetting core code
i915-y += intel_audio.o \
+ intel_atomic.o \
+ intel_atomic_plane.o \
intel_bios.o \
intel_display.o \
intel_fbc.o \
intel_fifo_underrun.o \
intel_frontbuffer.o \
+ intel_hotplug.o \
intel_modes.o \
intel_overlay.o \
intel_psr.o \
intel_sideband.o \
intel_sprite.o
i915-$(CONFIG_ACPI) += intel_acpi.o intel_opregion.o
-i915-$(CONFIG_DRM_I915_FBDEV) += intel_fbdev.o
+i915-$(CONFIG_DRM_FBDEV_EMULATION) += intel_fbdev.o
# modesetting output/encoder code
i915-y += dvo_ch7017.o \
@@ -66,15 +71,13 @@ i915-y += dvo_ch7017.o \
dvo_ns2501.o \
dvo_sil164.o \
dvo_tfp410.o \
- intel_atomic.o \
- intel_atomic_plane.o \
intel_crt.o \
intel_ddi.o \
- intel_dp.o \
intel_dp_mst.o \
+ intel_dp.o \
intel_dsi.o \
- intel_dsi_pll.o \
intel_dsi_panel_vbt.o \
+ intel_dsi_pll.o \
intel_dvo.o \
intel_hdmi.o \
intel_i2c.o \
diff --git a/drivers/gpu/drm/i915/dvo_ivch.c b/drivers/gpu/drm/i915/dvo_ivch.c
index 89b08a896d20..732ce8785945 100644
--- a/drivers/gpu/drm/i915/dvo_ivch.c
+++ b/drivers/gpu/drm/i915/dvo_ivch.c
@@ -22,6 +22,7 @@
*
* Authors:
* Eric Anholt <eric@anholt.net>
+ * Thomas Richter <thor@math.tu-berlin.de>
*
* Minor modifications (Dithering enable):
* Thomas Richter <thor@math.tu-berlin.de>
@@ -90,7 +91,7 @@
/*
* LCD Vertical Display Size
*/
-#define VR21 0x20
+#define VR21 0x21
/*
* Panel power down status
@@ -155,16 +156,33 @@
# define VR8F_POWER_MASK (0x3c)
# define VR8F_POWER_POS (2)
+/* Some Bios implementations do not restore the DVO state upon
+ * resume from standby. Thus, this driver has to handle it
+ * instead. The following list contains all registers that
+ * require saving.
+ */
+static const uint16_t backup_addresses[] = {
+ 0x11, 0x12,
+ 0x18, 0x19, 0x1a, 0x1f,
+ 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27,
+ 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37,
+ 0x8e, 0x8f,
+ 0x10 /* this must come last */
+};
+
struct ivch_priv {
bool quiet;
uint16_t width, height;
+
+ /* Register backup */
+
+ uint16_t reg_backup[ARRAY_SIZE(backup_addresses)];
};
static void ivch_dump_regs(struct intel_dvo_device *dvo);
-
/**
* Reads a register on the ivch.
*
@@ -246,6 +264,7 @@ static bool ivch_init(struct intel_dvo_device *dvo,
{
struct ivch_priv *priv;
uint16_t temp;
+ int i;
priv = kzalloc(sizeof(struct ivch_priv), GFP_KERNEL);
if (priv == NULL)
@@ -273,6 +292,14 @@ static bool ivch_init(struct intel_dvo_device *dvo,
ivch_read(dvo, VR20, &priv->width);
ivch_read(dvo, VR21, &priv->height);
+ /* Make a backup of the registers to be able to restore them
+ * upon suspend.
+ */
+ for (i = 0; i < ARRAY_SIZE(backup_addresses); i++)
+ ivch_read(dvo, backup_addresses[i], priv->reg_backup + i);
+
+ ivch_dump_regs(dvo);
+
return true;
out:
@@ -294,12 +321,31 @@ static enum drm_mode_status ivch_mode_valid(struct intel_dvo_device *dvo,
return MODE_OK;
}
+/* Restore the DVO registers after a resume
+ * from RAM. Registers have been saved during
+ * the initialization.
+ */
+static void ivch_reset(struct intel_dvo_device *dvo)
+{
+ struct ivch_priv *priv = dvo->dev_priv;
+ int i;
+
+ DRM_DEBUG_KMS("Resetting the IVCH registers\n");
+
+ ivch_write(dvo, VR10, 0x0000);
+
+ for (i = 0; i < ARRAY_SIZE(backup_addresses); i++)
+ ivch_write(dvo, backup_addresses[i], priv->reg_backup[i]);
+}
+
/** Sets the power state of the panel connected to the ivch */
static void ivch_dpms(struct intel_dvo_device *dvo, bool enable)
{
int i;
uint16_t vr01, vr30, backlight;
+ ivch_reset(dvo);
+
/* Set the new power state of the panel. */
if (!ivch_read(dvo, VR01, &vr01))
return;
@@ -308,6 +354,7 @@ static void ivch_dpms(struct intel_dvo_device *dvo, bool enable)
backlight = 1;
else
backlight = 0;
+
ivch_write(dvo, VR80, backlight);
if (enable)
@@ -334,6 +381,8 @@ static bool ivch_get_hw_state(struct intel_dvo_device *dvo)
{
uint16_t vr01;
+ ivch_reset(dvo);
+
/* Set the new power state of the panel. */
if (!ivch_read(dvo, VR01, &vr01))
return false;
@@ -348,11 +397,15 @@ static void ivch_mode_set(struct intel_dvo_device *dvo,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
+ struct ivch_priv *priv = dvo->dev_priv;
uint16_t vr40 = 0;
uint16_t vr01 = 0;
uint16_t vr10;
- ivch_read(dvo, VR10, &vr10);
+ ivch_reset(dvo);
+
+ vr10 = priv->reg_backup[ARRAY_SIZE(backup_addresses) - 1];
+
/* Enable dithering for 18 bpp pipelines */
vr10 &= VR10_INTERFACE_DEPTH_MASK;
if (vr10 == VR10_INTERFACE_2X18 || vr10 == VR10_INTERFACE_1X18)
@@ -366,7 +419,7 @@ static void ivch_mode_set(struct intel_dvo_device *dvo,
uint16_t x_ratio, y_ratio;
vr01 |= VR01_PANEL_FIT_ENABLE;
- vr40 |= VR40_CLOCK_GATING_ENABLE | VR40_ENHANCED_PANEL_FITTING;
+ vr40 |= VR40_CLOCK_GATING_ENABLE;
x_ratio = (((mode->hdisplay - 1) << 16) /
(adjusted_mode->hdisplay - 1)) >> 2;
y_ratio = (((mode->vdisplay - 1) << 16) /
@@ -381,8 +434,6 @@ static void ivch_mode_set(struct intel_dvo_device *dvo,
ivch_write(dvo, VR01, vr01);
ivch_write(dvo, VR40, vr40);
-
- ivch_dump_regs(dvo);
}
static void ivch_dump_regs(struct intel_dvo_device *dvo)
diff --git a/drivers/gpu/drm/i915/i915_cmd_parser.c b/drivers/gpu/drm/i915/i915_cmd_parser.c
index 306d9e4e5cf3..237ff6884a22 100644
--- a/drivers/gpu/drm/i915/i915_cmd_parser.c
+++ b/drivers/gpu/drm/i915/i915_cmd_parser.c
@@ -131,7 +131,7 @@ static const struct drm_i915_cmd_descriptor common_cmds[] = {
.mask = MI_GLOBAL_GTT,
.expected = 0,
}}, ),
- CMD( MI_LOAD_REGISTER_MEM, SMI, !F, 0xFF, W | B,
+ CMD( MI_LOAD_REGISTER_MEM(1), SMI, !F, 0xFF, W | B,
.reg = { .offset = 1, .mask = 0x007FFFFC },
.bits = {{
.offset = 0,
@@ -151,8 +151,8 @@ static const struct drm_i915_cmd_descriptor render_cmds[] = {
CMD( MI_ARB_ON_OFF, SMI, F, 1, R ),
CMD( MI_PREDICATE, SMI, F, 1, S ),
CMD( MI_TOPOLOGY_FILTER, SMI, F, 1, S ),
- CMD( MI_DISPLAY_FLIP, SMI, !F, 0xFF, R ),
CMD( MI_SET_APPID, SMI, F, 1, S ),
+ CMD( MI_DISPLAY_FLIP, SMI, !F, 0xFF, R ),
CMD( MI_SET_CONTEXT, SMI, !F, 0xFF, R ),
CMD( MI_URB_CLEAR, SMI, !F, 0xFF, S ),
CMD( MI_STORE_DWORD_IMM, SMI, !F, 0x3F, B,
@@ -564,7 +564,7 @@ static bool validate_cmds_sorted(struct intel_engine_cs *ring,
for (j = 0; j < table->count; j++) {
const struct drm_i915_cmd_descriptor *desc =
- &table->table[i];
+ &table->table[j];
u32 curr = desc->cmd.value & desc->cmd.mask;
if (curr < previous) {
@@ -1021,7 +1021,7 @@ static bool check_cmd(const struct intel_engine_cs *ring,
* only MI_LOAD_REGISTER_IMM commands.
*/
if (reg_addr == OACONTROL) {
- if (desc->cmd.value == MI_LOAD_REGISTER_MEM) {
+ if (desc->cmd.value == MI_LOAD_REGISTER_MEM(1)) {
DRM_DEBUG_DRIVER("CMD: Rejected LRM to OACONTROL\n");
return false;
}
@@ -1035,7 +1035,7 @@ static bool check_cmd(const struct intel_engine_cs *ring,
* allowed mask/value pair given in the whitelist entry.
*/
if (reg->mask) {
- if (desc->cmd.value == MI_LOAD_REGISTER_MEM) {
+ if (desc->cmd.value == MI_LOAD_REGISTER_MEM(1)) {
DRM_DEBUG_DRIVER("CMD: Rejected LRM to masked register 0x%08X\n",
reg_addr);
return false;
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 82bbe3f2a7e1..33aabc79813b 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -117,6 +117,20 @@ static inline const char *get_global_flag(struct drm_i915_gem_object *obj)
return i915_gem_obj_to_ggtt(obj) ? "g" : " ";
}
+static u64 i915_gem_obj_total_ggtt_size(struct drm_i915_gem_object *obj)
+{
+ u64 size = 0;
+ struct i915_vma *vma;
+
+ list_for_each_entry(vma, &obj->vma_list, vma_link) {
+ if (i915_is_ggtt(vma->vm) &&
+ drm_mm_node_allocated(&vma->node))
+ size += vma->node.size;
+ }
+
+ return size;
+}
+
static void
describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
{
@@ -156,13 +170,13 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
if (obj->fence_reg != I915_FENCE_REG_NONE)
seq_printf(m, " (fence: %d)", obj->fence_reg);
list_for_each_entry(vma, &obj->vma_list, vma_link) {
- if (!i915_is_ggtt(vma->vm))
- seq_puts(m, " (pp");
+ seq_printf(m, " (%sgtt offset: %08llx, size: %08llx",
+ i915_is_ggtt(vma->vm) ? "g" : "pp",
+ vma->node.start, vma->node.size);
+ if (i915_is_ggtt(vma->vm))
+ seq_printf(m, ", type: %u)", vma->ggtt_view.type);
else
- seq_puts(m, " (g");
- seq_printf(m, "gtt offset: %08llx, size: %08llx, type: %u)",
- vma->node.start, vma->node.size,
- vma->ggtt_view.type);
+ seq_puts(m, ")");
}
if (obj->stolen)
seq_printf(m, " (stolen: %08llx)", obj->stolen->start);
@@ -198,7 +212,7 @@ static int i915_gem_object_list_info(struct seq_file *m, void *data)
struct drm_i915_private *dev_priv = dev->dev_private;
struct i915_address_space *vm = &dev_priv->gtt.base;
struct i915_vma *vma;
- size_t total_obj_size, total_gtt_size;
+ u64 total_obj_size, total_gtt_size;
int count, ret;
ret = mutex_lock_interruptible(&dev->struct_mutex);
@@ -231,7 +245,7 @@ static int i915_gem_object_list_info(struct seq_file *m, void *data)
}
mutex_unlock(&dev->struct_mutex);
- seq_printf(m, "Total %d objects, %zu bytes, %zu GTT size\n",
+ seq_printf(m, "Total %d objects, %llu bytes, %llu GTT size\n",
count, total_obj_size, total_gtt_size);
return 0;
}
@@ -253,7 +267,7 @@ static int i915_gem_stolen_list_info(struct seq_file *m, void *data)
struct drm_device *dev = node->minor->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_gem_object *obj;
- size_t total_obj_size, total_gtt_size;
+ u64 total_obj_size, total_gtt_size;
LIST_HEAD(stolen);
int count, ret;
@@ -269,7 +283,7 @@ static int i915_gem_stolen_list_info(struct seq_file *m, void *data)
list_add(&obj->obj_exec_link, &stolen);
total_obj_size += obj->base.size;
- total_gtt_size += i915_gem_obj_ggtt_size(obj);
+ total_gtt_size += i915_gem_obj_total_ggtt_size(obj);
count++;
}
list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_list) {
@@ -292,14 +306,14 @@ static int i915_gem_stolen_list_info(struct seq_file *m, void *data)
}
mutex_unlock(&dev->struct_mutex);
- seq_printf(m, "Total %d objects, %zu bytes, %zu GTT size\n",
+ seq_printf(m, "Total %d objects, %llu bytes, %llu GTT size\n",
count, total_obj_size, total_gtt_size);
return 0;
}
#define count_objects(list, member) do { \
list_for_each_entry(obj, list, member) { \
- size += i915_gem_obj_ggtt_size(obj); \
+ size += i915_gem_obj_total_ggtt_size(obj); \
++count; \
if (obj->map_and_fenceable) { \
mappable_size += i915_gem_obj_ggtt_size(obj); \
@@ -310,10 +324,10 @@ static int i915_gem_stolen_list_info(struct seq_file *m, void *data)
struct file_stats {
struct drm_i915_file_private *file_priv;
- int count;
- size_t total, unbound;
- size_t global, shared;
- size_t active, inactive;
+ unsigned long count;
+ u64 total, unbound;
+ u64 global, shared;
+ u64 active, inactive;
};
static int per_file_stats(int id, void *ptr, void *data)
@@ -370,7 +384,7 @@ static int per_file_stats(int id, void *ptr, void *data)
#define print_file_stats(m, name, stats) do { \
if (stats.count) \
- seq_printf(m, "%s: %u objects, %zu bytes (%zu active, %zu inactive, %zu global, %zu shared, %zu unbound)\n", \
+ seq_printf(m, "%s: %lu objects, %llu bytes (%llu active, %llu inactive, %llu global, %llu shared, %llu unbound)\n", \
name, \
stats.count, \
stats.total, \
@@ -405,7 +419,7 @@ static void print_batch_pool_stats(struct seq_file *m,
#define count_vmas(list, member) do { \
list_for_each_entry(vma, list, member) { \
- size += i915_gem_obj_ggtt_size(vma->obj); \
+ size += i915_gem_obj_total_ggtt_size(vma->obj); \
++count; \
if (vma->obj->map_and_fenceable) { \
mappable_size += i915_gem_obj_ggtt_size(vma->obj); \
@@ -420,7 +434,7 @@ static int i915_gem_object_info(struct seq_file *m, void* data)
struct drm_device *dev = node->minor->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
u32 count, mappable_count, purgeable_count;
- size_t size, mappable_size, purgeable_size;
+ u64 size, mappable_size, purgeable_size;
struct drm_i915_gem_object *obj;
struct i915_address_space *vm = &dev_priv->gtt.base;
struct drm_file *file;
@@ -437,17 +451,17 @@ static int i915_gem_object_info(struct seq_file *m, void* data)
size = count = mappable_size = mappable_count = 0;
count_objects(&dev_priv->mm.bound_list, global_list);
- seq_printf(m, "%u [%u] objects, %zu [%zu] bytes in gtt\n",
+ seq_printf(m, "%u [%u] objects, %llu [%llu] bytes in gtt\n",
count, mappable_count, size, mappable_size);
size = count = mappable_size = mappable_count = 0;
count_vmas(&vm->active_list, mm_list);
- seq_printf(m, " %u [%u] active objects, %zu [%zu] bytes\n",
+ seq_printf(m, " %u [%u] active objects, %llu [%llu] bytes\n",
count, mappable_count, size, mappable_size);
size = count = mappable_size = mappable_count = 0;
count_vmas(&vm->inactive_list, mm_list);
- seq_printf(m, " %u [%u] inactive objects, %zu [%zu] bytes\n",
+ seq_printf(m, " %u [%u] inactive objects, %llu [%llu] bytes\n",
count, mappable_count, size, mappable_size);
size = count = purgeable_size = purgeable_count = 0;
@@ -456,7 +470,7 @@ static int i915_gem_object_info(struct seq_file *m, void* data)
if (obj->madv == I915_MADV_DONTNEED)
purgeable_size += obj->base.size, ++purgeable_count;
}
- seq_printf(m, "%u unbound objects, %zu bytes\n", count, size);
+ seq_printf(m, "%u unbound objects, %llu bytes\n", count, size);
size = count = mappable_size = mappable_count = 0;
list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
@@ -473,16 +487,16 @@ static int i915_gem_object_info(struct seq_file *m, void* data)
++purgeable_count;
}
}
- seq_printf(m, "%u purgeable objects, %zu bytes\n",
+ seq_printf(m, "%u purgeable objects, %llu bytes\n",
purgeable_count, purgeable_size);
- seq_printf(m, "%u pinned mappable objects, %zu bytes\n",
+ seq_printf(m, "%u pinned mappable objects, %llu bytes\n",
mappable_count, mappable_size);
- seq_printf(m, "%u fault mappable objects, %zu bytes\n",
+ seq_printf(m, "%u fault mappable objects, %llu bytes\n",
count, size);
- seq_printf(m, "%zu [%lu] gtt total\n",
+ seq_printf(m, "%llu [%llu] gtt total\n",
dev_priv->gtt.base.total,
- dev_priv->gtt.mappable_end - dev_priv->gtt.base.start);
+ (u64)dev_priv->gtt.mappable_end - dev_priv->gtt.base.start);
seq_putc(m, '\n');
print_batch_pool_stats(m, dev_priv);
@@ -519,7 +533,7 @@ static int i915_gem_gtt_info(struct seq_file *m, void *data)
uintptr_t list = (uintptr_t) node->info_ent->data;
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_gem_object *obj;
- size_t total_obj_size, total_gtt_size;
+ u64 total_obj_size, total_gtt_size;
int count, ret;
ret = mutex_lock_interruptible(&dev->struct_mutex);
@@ -535,13 +549,13 @@ static int i915_gem_gtt_info(struct seq_file *m, void *data)
describe_obj(m, obj);
seq_putc(m, '\n');
total_obj_size += obj->base.size;
- total_gtt_size += i915_gem_obj_ggtt_size(obj);
+ total_gtt_size += i915_gem_obj_total_ggtt_size(obj);
count++;
}
mutex_unlock(&dev->struct_mutex);
- seq_printf(m, "Total %d objects, %zu bytes, %zu GTT size\n",
+ seq_printf(m, "Total %d objects, %llu bytes, %llu GTT size\n",
count, total_obj_size, total_gtt_size);
return 0;
@@ -1132,9 +1146,9 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
(rgvstat & MEMSTAT_PSTATE_MASK) >> MEMSTAT_PSTATE_SHIFT);
} else if (IS_GEN6(dev) || (IS_GEN7(dev) && !IS_VALLEYVIEW(dev)) ||
IS_BROADWELL(dev) || IS_GEN9(dev)) {
- u32 gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS);
- u32 rp_state_limits = I915_READ(GEN6_RP_STATE_LIMITS);
- u32 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
+ u32 rp_state_limits;
+ u32 gt_perf_status;
+ u32 rp_state_cap;
u32 rpmodectl, rpinclimit, rpdeclimit;
u32 rpstat, cagf, reqf;
u32 rpupei, rpcurup, rpprevup;
@@ -1142,6 +1156,15 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
u32 pm_ier, pm_imr, pm_isr, pm_iir, pm_mask;
int max_freq;
+ rp_state_limits = I915_READ(GEN6_RP_STATE_LIMITS);
+ if (IS_BROXTON(dev)) {
+ rp_state_cap = I915_READ(BXT_RP_STATE_CAP);
+ gt_perf_status = I915_READ(BXT_GT_PERF_STATUS);
+ } else {
+ rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
+ gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS);
+ }
+
/* RPSTAT1 is in the GT power well */
ret = mutex_lock_interruptible(&dev->struct_mutex);
if (ret)
@@ -1229,7 +1252,8 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
seq_printf(m, "Down threshold: %d%%\n",
dev_priv->rps.down_threshold);
- max_freq = (rp_state_cap & 0xff0000) >> 16;
+ max_freq = (IS_BROXTON(dev) ? rp_state_cap >> 0 :
+ rp_state_cap >> 16) & 0xff;
max_freq *= (IS_SKYLAKE(dev) ? GEN9_FREQ_SCALER : 1);
seq_printf(m, "Lowest (RPN) frequency: %dMHz\n",
intel_gpu_freq(dev_priv, max_freq));
@@ -1239,7 +1263,8 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
seq_printf(m, "Nominal (RP1) frequency: %dMHz\n",
intel_gpu_freq(dev_priv, max_freq));
- max_freq = rp_state_cap & 0xff;
+ max_freq = (IS_BROXTON(dev) ? rp_state_cap >> 16 :
+ rp_state_cap >> 0) & 0xff;
max_freq *= (IS_SKYLAKE(dev) ? GEN9_FREQ_SCALER : 1);
seq_printf(m, "Max non-overclocked (RP0) frequency: %dMHz\n",
intel_gpu_freq(dev_priv, max_freq));
@@ -1581,6 +1606,21 @@ static int i915_drpc_info(struct seq_file *m, void *unused)
return ironlake_drpc_info(m);
}
+static int i915_frontbuffer_tracking(struct seq_file *m, void *unused)
+{
+ struct drm_info_node *node = m->private;
+ struct drm_device *dev = node->minor->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ seq_printf(m, "FB tracking busy bits: 0x%08x\n",
+ dev_priv->fb_tracking.busy_bits);
+
+ seq_printf(m, "FB tracking flip bits: 0x%08x\n",
+ dev_priv->fb_tracking.flip_bits);
+
+ return 0;
+}
+
static int i915_fbc_status(struct seq_file *m, void *unused)
{
struct drm_info_node *node = m->private;
@@ -1593,51 +1633,20 @@ static int i915_fbc_status(struct seq_file *m, void *unused)
}
intel_runtime_pm_get(dev_priv);
+ mutex_lock(&dev_priv->fbc.lock);
- if (intel_fbc_enabled(dev)) {
+ if (intel_fbc_enabled(dev_priv))
seq_puts(m, "FBC enabled\n");
- } else {
- seq_puts(m, "FBC disabled: ");
- switch (dev_priv->fbc.no_fbc_reason) {
- case FBC_OK:
- seq_puts(m, "FBC actived, but currently disabled in hardware");
- break;
- case FBC_UNSUPPORTED:
- seq_puts(m, "unsupported by this chipset");
- break;
- case FBC_NO_OUTPUT:
- seq_puts(m, "no outputs");
- break;
- case FBC_STOLEN_TOO_SMALL:
- seq_puts(m, "not enough stolen memory");
- break;
- case FBC_UNSUPPORTED_MODE:
- seq_puts(m, "mode not supported");
- break;
- case FBC_MODE_TOO_LARGE:
- seq_puts(m, "mode too large");
- break;
- case FBC_BAD_PLANE:
- seq_puts(m, "FBC unsupported on plane");
- break;
- case FBC_NOT_TILED:
- seq_puts(m, "scanout buffer not tiled");
- break;
- case FBC_MULTIPLE_PIPES:
- seq_puts(m, "multiple pipes are enabled");
- break;
- case FBC_MODULE_PARAM:
- seq_puts(m, "disabled per module param (default off)");
- break;
- case FBC_CHIP_DEFAULT:
- seq_puts(m, "disabled per chip default");
- break;
- default:
- seq_puts(m, "unknown reason");
- }
- seq_putc(m, '\n');
- }
+ else
+ seq_printf(m, "FBC disabled: %s\n",
+ intel_no_fbc_reason_str(dev_priv->fbc.no_fbc_reason));
+
+ if (INTEL_INFO(dev_priv)->gen >= 7)
+ seq_printf(m, "Compressing: %s\n",
+ yesno(I915_READ(FBC_STATUS2) &
+ FBC_COMPRESSION_MASK));
+ mutex_unlock(&dev_priv->fbc.lock);
intel_runtime_pm_put(dev_priv);
return 0;
@@ -1651,9 +1660,7 @@ static int i915_fbc_fc_get(void *data, u64 *val)
if (INTEL_INFO(dev)->gen < 7 || !HAS_FBC(dev))
return -ENODEV;
- drm_modeset_lock_all(dev);
*val = dev_priv->fbc.false_color;
- drm_modeset_unlock_all(dev);
return 0;
}
@@ -1667,7 +1674,7 @@ static int i915_fbc_fc_set(void *data, u64 val)
if (INTEL_INFO(dev)->gen < 7 || !HAS_FBC(dev))
return -ENODEV;
- drm_modeset_lock_all(dev);
+ mutex_lock(&dev_priv->fbc.lock);
reg = I915_READ(ILK_DPFC_CONTROL);
dev_priv->fbc.false_color = val;
@@ -1676,7 +1683,7 @@ static int i915_fbc_fc_set(void *data, u64 val)
(reg | FBC_CTL_FALSE_COLOR) :
(reg & ~FBC_CTL_FALSE_COLOR));
- drm_modeset_unlock_all(dev);
+ mutex_unlock(&dev_priv->fbc.lock);
return 0;
}
@@ -1778,8 +1785,9 @@ static int i915_ring_freq_table(struct seq_file *m, void *unused)
struct drm_i915_private *dev_priv = dev->dev_private;
int ret = 0;
int gpu_freq, ia_freq;
+ unsigned int max_gpu_freq, min_gpu_freq;
- if (!(IS_GEN6(dev) || IS_GEN7(dev))) {
+ if (!HAS_CORE_RING_FREQ(dev)) {
seq_puts(m, "unsupported on this chipset\n");
return 0;
}
@@ -1792,17 +1800,27 @@ static int i915_ring_freq_table(struct seq_file *m, void *unused)
if (ret)
goto out;
+ if (IS_SKYLAKE(dev)) {
+ /* Convert GT frequency to 50 HZ units */
+ min_gpu_freq =
+ dev_priv->rps.min_freq_softlimit / GEN9_FREQ_SCALER;
+ max_gpu_freq =
+ dev_priv->rps.max_freq_softlimit / GEN9_FREQ_SCALER;
+ } else {
+ min_gpu_freq = dev_priv->rps.min_freq_softlimit;
+ max_gpu_freq = dev_priv->rps.max_freq_softlimit;
+ }
+
seq_puts(m, "GPU freq (MHz)\tEffective CPU freq (MHz)\tEffective Ring freq (MHz)\n");
- for (gpu_freq = dev_priv->rps.min_freq_softlimit;
- gpu_freq <= dev_priv->rps.max_freq_softlimit;
- gpu_freq++) {
+ for (gpu_freq = min_gpu_freq; gpu_freq <= max_gpu_freq; gpu_freq++) {
ia_freq = gpu_freq;
sandybridge_pcode_read(dev_priv,
GEN6_PCODE_READ_MIN_FREQ_TABLE,
&ia_freq);
seq_printf(m, "%d\t\t%d\t\t\t\t%d\n",
- intel_gpu_freq(dev_priv, gpu_freq),
+ intel_gpu_freq(dev_priv, (gpu_freq *
+ (IS_SKYLAKE(dev) ? GEN9_FREQ_SCALER : 1))),
((ia_freq >> 0) & 0xff) * 100,
((ia_freq >> 8) & 0xff) * 100);
}
@@ -1848,8 +1866,9 @@ static int i915_gem_framebuffer_info(struct seq_file *m, void *data)
struct drm_device *dev = node->minor->dev;
struct intel_fbdev *ifbdev = NULL;
struct intel_framebuffer *fb;
+ struct drm_framebuffer *drm_fb;
-#ifdef CONFIG_DRM_I915_FBDEV
+#ifdef CONFIG_DRM_FBDEV_EMULATION
struct drm_i915_private *dev_priv = dev->dev_private;
ifbdev = dev_priv->fbdev;
@@ -1867,7 +1886,8 @@ static int i915_gem_framebuffer_info(struct seq_file *m, void *data)
#endif
mutex_lock(&dev->mode_config.fb_lock);
- list_for_each_entry(fb, &dev->mode_config.fb_list, base.head) {
+ drm_for_each_fb(drm_fb, dev) {
+ fb = to_intel_framebuffer(drm_fb);
if (ifbdev && &fb->base == ifbdev->helper.fb)
continue;
@@ -2248,7 +2268,7 @@ static void gen6_ppgtt_info(struct seq_file *m, struct drm_device *dev)
struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
seq_puts(m, "aliasing PPGTT:\n");
- seq_printf(m, "pd gtt offset: 0x%08x\n", ppgtt->pd.pd_offset);
+ seq_printf(m, "pd gtt offset: 0x%08x\n", ppgtt->pd.base.ggtt_offset);
ppgtt->debug_dump(ppgtt, m);
}
@@ -2479,13 +2499,13 @@ static int i915_energy_uJ(struct seq_file *m, void *data)
return 0;
}
-static int i915_pc8_status(struct seq_file *m, void *unused)
+static int i915_runtime_pm_status(struct seq_file *m, void *unused)
{
struct drm_info_node *node = m->private;
struct drm_device *dev = node->minor->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- if (!IS_HASWELL(dev) && !IS_BROADWELL(dev)) {
+ if (!HAS_RUNTIME_PM(dev)) {
seq_puts(m, "not supported\n");
return 0;
}
@@ -2493,6 +2513,12 @@ static int i915_pc8_status(struct seq_file *m, void *unused)
seq_printf(m, "GPU idle: %s\n", yesno(!dev_priv->mm.busy));
seq_printf(m, "IRQs disabled: %s\n",
yesno(!intel_irqs_enabled(dev_priv)));
+#ifdef CONFIG_PM
+ seq_printf(m, "Usage count: %d\n",
+ atomic_read(&dev->dev->power.usage_count));
+#else
+ seq_printf(m, "Device Power Management (CONFIG_PM) disabled\n");
+#endif
return 0;
}
@@ -2780,13 +2806,16 @@ static int i915_display_info(struct seq_file *m, void *unused)
seq_printf(m, "---------\n");
for_each_intel_crtc(dev, crtc) {
bool active;
+ struct intel_crtc_state *pipe_config;
int x, y;
+ pipe_config = to_intel_crtc_state(crtc->base.state);
+
seq_printf(m, "CRTC %d: pipe: %c, active=%s (size=%dx%d)\n",
crtc->base.base.id, pipe_name(crtc->pipe),
- yesno(crtc->active), crtc->config->pipe_src_w,
- crtc->config->pipe_src_h);
- if (crtc->active) {
+ yesno(pipe_config->base.active),
+ pipe_config->pipe_src_w, pipe_config->pipe_src_h);
+ if (pipe_config->base.active) {
intel_crtc_info(m, crtc);
active = cursor_position(dev, crtc->pipe, &x, &y);
@@ -3027,7 +3056,7 @@ static void drrs_status_per_crtc(struct seq_file *m,
seq_puts(m, "\n\n");
- if (intel_crtc->config->has_drrs) {
+ if (to_intel_crtc_state(intel_crtc->base.state)->has_drrs) {
struct intel_panel *panel;
mutex_lock(&drrs->mutex);
@@ -3079,7 +3108,7 @@ static int i915_drrs_status(struct seq_file *m, void *unused)
for_each_intel_crtc(dev, intel_crtc) {
drm_modeset_lock(&intel_crtc->base.mutex, NULL);
- if (intel_crtc->active) {
+ if (intel_crtc->base.state->active) {
active_crtc_cnt++;
seq_printf(m, "\nCRTC %d: ", active_crtc_cnt);
@@ -3616,53 +3645,40 @@ static int ilk_pipe_crc_ctl_reg(enum intel_pipe_crc_source *source,
return 0;
}
-static void hsw_trans_edp_pipe_A_crc_wa(struct drm_device *dev)
+static void hsw_trans_edp_pipe_A_crc_wa(struct drm_device *dev, bool enable)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *crtc =
to_intel_crtc(dev_priv->pipe_to_crtc_mapping[PIPE_A]);
+ struct intel_crtc_state *pipe_config;
+ struct drm_atomic_state *state;
+ int ret = 0;
drm_modeset_lock_all(dev);
- /*
- * If we use the eDP transcoder we need to make sure that we don't
- * bypass the pfit, since otherwise the pipe CRC source won't work. Only
- * relevant on hsw with pipe A when using the always-on power well
- * routing.
- */
- if (crtc->config->cpu_transcoder == TRANSCODER_EDP &&
- !crtc->config->pch_pfit.enabled) {
- crtc->config->pch_pfit.force_thru = true;
-
- intel_display_power_get(dev_priv,
- POWER_DOMAIN_PIPE_PANEL_FITTER(PIPE_A));
-
- intel_crtc_reset(crtc);
+ state = drm_atomic_state_alloc(dev);
+ if (!state) {
+ ret = -ENOMEM;
+ goto out;
}
- drm_modeset_unlock_all(dev);
-}
-
-static void hsw_undo_trans_edp_pipe_A_crc_wa(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_crtc *crtc =
- to_intel_crtc(dev_priv->pipe_to_crtc_mapping[PIPE_A]);
- drm_modeset_lock_all(dev);
- /*
- * If we use the eDP transcoder we need to make sure that we don't
- * bypass the pfit, since otherwise the pipe CRC source won't work. Only
- * relevant on hsw with pipe A when using the always-on power well
- * routing.
- */
- if (crtc->config->pch_pfit.force_thru) {
- crtc->config->pch_pfit.force_thru = false;
+ state->acquire_ctx = drm_modeset_legacy_acquire_ctx(&crtc->base);
+ pipe_config = intel_atomic_get_crtc_state(state, crtc);
+ if (IS_ERR(pipe_config)) {
+ ret = PTR_ERR(pipe_config);
+ goto out;
+ }
- intel_crtc_reset(crtc);
+ pipe_config->pch_pfit.force_thru = enable;
+ if (pipe_config->cpu_transcoder == TRANSCODER_EDP &&
+ pipe_config->pch_pfit.enabled != enable)
+ pipe_config->base.connectors_changed = true;
- intel_display_power_put(dev_priv,
- POWER_DOMAIN_PIPE_PANEL_FITTER(PIPE_A));
- }
+ ret = drm_atomic_commit(state);
+out:
drm_modeset_unlock_all(dev);
+ WARN(ret, "Toggling workaround to %i returns %i\n", enable, ret);
+ if (ret)
+ drm_atomic_state_free(state);
}
static int ivb_pipe_crc_ctl_reg(struct drm_device *dev,
@@ -3682,7 +3698,7 @@ static int ivb_pipe_crc_ctl_reg(struct drm_device *dev,
break;
case INTEL_PIPE_CRC_SOURCE_PF:
if (IS_HASWELL(dev) && pipe == PIPE_A)
- hsw_trans_edp_pipe_A_crc_wa(dev);
+ hsw_trans_edp_pipe_A_crc_wa(dev, true);
*val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PF_IVB;
break;
@@ -3776,7 +3792,7 @@ static int pipe_crc_set_source(struct drm_device *dev, enum pipe pipe,
pipe_name(pipe));
drm_modeset_lock(&crtc->base.mutex, NULL);
- if (crtc->active)
+ if (crtc->base.state->active)
intel_wait_for_vblank(dev, pipe);
drm_modeset_unlock(&crtc->base.mutex);
@@ -3794,7 +3810,7 @@ static int pipe_crc_set_source(struct drm_device *dev, enum pipe pipe,
else if (IS_VALLEYVIEW(dev))
vlv_undo_pipe_scramble_reset(dev, pipe);
else if (IS_HASWELL(dev) && pipe == PIPE_A)
- hsw_undo_trans_edp_pipe_A_crc_wa(dev);
+ hsw_trans_edp_pipe_A_crc_wa(dev, false);
hsw_enable_ips(crtc);
}
@@ -3980,24 +3996,14 @@ static ssize_t i915_displayport_test_active_write(struct file *file,
{
char *input_buffer;
int status = 0;
- struct seq_file *m;
struct drm_device *dev;
struct drm_connector *connector;
struct list_head *connector_list;
struct intel_dp *intel_dp;
int val = 0;
- m = file->private_data;
- if (!m) {
- status = -ENODEV;
- return status;
- }
- dev = m->private;
+ dev = ((struct seq_file *)file->private_data)->private;
- if (!dev) {
- status = -ENODEV;
- return status;
- }
connector_list = &dev->mode_config.connector_list;
if (len == 0)
@@ -4021,9 +4027,7 @@ static ssize_t i915_displayport_test_active_write(struct file *file,
DRM_MODE_CONNECTOR_DisplayPort)
continue;
- if (connector->connector_type ==
- DRM_MODE_CONNECTOR_DisplayPort &&
- connector->status == connector_status_connected &&
+ if (connector->status == connector_status_connected &&
connector->encoder != NULL) {
intel_dp = enc_to_intel_dp(connector->encoder);
status = kstrtoint(input_buffer, 10, &val);
@@ -4055,9 +4059,6 @@ static int i915_displayport_test_active_show(struct seq_file *m, void *data)
struct list_head *connector_list = &dev->mode_config.connector_list;
struct intel_dp *intel_dp;
- if (!dev)
- return -ENODEV;
-
list_for_each_entry(connector, connector_list, head) {
if (connector->connector_type !=
@@ -4102,9 +4103,6 @@ static int i915_displayport_test_data_show(struct seq_file *m, void *data)
struct list_head *connector_list = &dev->mode_config.connector_list;
struct intel_dp *intel_dp;
- if (!dev)
- return -ENODEV;
-
list_for_each_entry(connector, connector_list, head) {
if (connector->connector_type !=
@@ -4144,9 +4142,6 @@ static int i915_displayport_test_type_show(struct seq_file *m, void *data)
struct list_head *connector_list = &dev->mode_config.connector_list;
struct intel_dp *intel_dp;
- if (!dev)
- return -ENODEV;
-
list_for_each_entry(connector, connector_list, head) {
if (connector->connector_type !=
@@ -4183,8 +4178,15 @@ static const struct file_operations i915_displayport_test_type_fops = {
static void wm_latency_show(struct seq_file *m, const uint16_t wm[8])
{
struct drm_device *dev = m->private;
- int num_levels = ilk_wm_max_level(dev) + 1;
int level;
+ int num_levels;
+
+ if (IS_CHERRYVIEW(dev))
+ num_levels = 3;
+ else if (IS_VALLEYVIEW(dev))
+ num_levels = 1;
+ else
+ num_levels = ilk_wm_max_level(dev) + 1;
drm_modeset_lock_all(dev);
@@ -4193,9 +4195,9 @@ static void wm_latency_show(struct seq_file *m, const uint16_t wm[8])
/*
* - WM1+ latency values in 0.5us units
- * - latencies are in us on gen9
+ * - latencies are in us on gen9/vlv/chv
*/
- if (INTEL_INFO(dev)->gen >= 9)
+ if (INTEL_INFO(dev)->gen >= 9 || IS_VALLEYVIEW(dev))
latency *= 10;
else if (level > 0)
latency *= 5;
@@ -4259,7 +4261,7 @@ static int pri_wm_latency_open(struct inode *inode, struct file *file)
{
struct drm_device *dev = inode->i_private;
- if (HAS_GMCH_DISPLAY(dev))
+ if (INTEL_INFO(dev)->gen < 5)
return -ENODEV;
return single_open(file, pri_wm_latency_show, dev);
@@ -4291,11 +4293,18 @@ static ssize_t wm_latency_write(struct file *file, const char __user *ubuf,
struct seq_file *m = file->private_data;
struct drm_device *dev = m->private;
uint16_t new[8] = { 0 };
- int num_levels = ilk_wm_max_level(dev) + 1;
+ int num_levels;
int level;
int ret;
char tmp[32];
+ if (IS_CHERRYVIEW(dev))
+ num_levels = 3;
+ else if (IS_VALLEYVIEW(dev))
+ num_levels = 1;
+ else
+ num_levels = ilk_wm_max_level(dev) + 1;
+
if (len >= sizeof(tmp))
return -EINVAL;
@@ -5027,6 +5036,7 @@ static const struct drm_info_list i915_debugfs_list[] = {
{"i915_drpc_info", i915_drpc_info, 0},
{"i915_emon_status", i915_emon_status, 0},
{"i915_ring_freq_table", i915_ring_freq_table, 0},
+ {"i915_frontbuffer_tracking", i915_frontbuffer_tracking, 0},
{"i915_fbc_status", i915_fbc_status, 0},
{"i915_ips_status", i915_ips_status, 0},
{"i915_sr_status", i915_sr_status, 0},
@@ -5042,7 +5052,7 @@ static const struct drm_info_list i915_debugfs_list[] = {
{"i915_edp_psr_status", i915_edp_psr_status, 0},
{"i915_sink_crc_eDP1", i915_sink_crc, 0},
{"i915_energy_uJ", i915_energy_uJ, 0},
- {"i915_pc8_status", i915_pc8_status, 0},
+ {"i915_runtime_pm_status", i915_runtime_pm_status, 0},
{"i915_power_domain_info", i915_power_domain_info, 0},
{"i915_display_info", i915_display_info, 0},
{"i915_semaphore_status", i915_semaphore_status, 0},
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index d2df321ba634..ab37d1121be8 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -163,6 +163,13 @@ static int i915_getparam(struct drm_device *dev, void *data,
if (!value)
return -ENODEV;
break;
+ case I915_PARAM_HAS_GPU_RESET:
+ value = i915.enable_hangcheck &&
+ intel_has_gpu_reset(dev);
+ break;
+ case I915_PARAM_HAS_RESOURCE_STREAMER:
+ value = HAS_RESOURCE_STREAMER(dev);
+ break;
default:
DRM_DEBUG("Unknown parameter %d\n", param->param);
return -EINVAL;
@@ -719,11 +726,19 @@ static void intel_device_info_runtime_init(struct drm_device *dev)
info = (struct intel_device_info *)&dev_priv->info;
+ /*
+ * Skylake and Broxton currently don't expose the topmost plane as its
+ * use is exclusive with the legacy cursor and we only want to expose
+ * one of those, not both. Until we can safely expose the topmost plane
+ * as a DRM_PLANE_TYPE_CURSOR with all the features exposed/supported,
+ * we don't expose the topmost plane at all to prevent ABI breakage
+ * down the line.
+ */
if (IS_BROXTON(dev)) {
- info->num_sprites[PIPE_A] = 3;
- info->num_sprites[PIPE_B] = 3;
- info->num_sprites[PIPE_C] = 2;
- } else if (IS_VALLEYVIEW(dev) || INTEL_INFO(dev)->gen == 9)
+ info->num_sprites[PIPE_A] = 2;
+ info->num_sprites[PIPE_B] = 2;
+ info->num_sprites[PIPE_C] = 1;
+ } else if (IS_VALLEYVIEW(dev))
for_each_pipe(dev_priv, pipe)
info->num_sprites[pipe] = 2;
else
@@ -933,8 +948,8 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
goto out_mtrrfree;
}
- dev_priv->dp_wq = alloc_ordered_workqueue("i915-dp", 0);
- if (dev_priv->dp_wq == NULL) {
+ dev_priv->hotplug.dp_wq = alloc_ordered_workqueue("i915-dp", 0);
+ if (dev_priv->hotplug.dp_wq == NULL) {
DRM_ERROR("Failed to create our dp workqueue.\n");
ret = -ENOMEM;
goto out_freewq;
@@ -1029,7 +1044,7 @@ out_gem_unload:
pm_qos_remove_request(&dev_priv->pm_qos);
destroy_workqueue(dev_priv->gpu_error.hangcheck_wq);
out_freedpwq:
- destroy_workqueue(dev_priv->dp_wq);
+ destroy_workqueue(dev_priv->hotplug.dp_wq);
out_freewq:
destroy_workqueue(dev_priv->wq);
out_mtrrfree:
@@ -1116,6 +1131,7 @@ int i915_driver_unload(struct drm_device *dev)
i915_gem_cleanup_ringbuffer(dev);
i915_gem_context_fini(dev);
mutex_unlock(&dev->struct_mutex);
+ intel_fbc_cleanup_cfb(dev_priv);
i915_gem_cleanup_stolen(dev);
intel_csr_ucode_fini(dev);
@@ -1123,7 +1139,7 @@ int i915_driver_unload(struct drm_device *dev)
intel_teardown_gmbus(dev);
intel_teardown_mchbar(dev);
- destroy_workqueue(dev_priv->dp_wq);
+ destroy_workqueue(dev_priv->hotplug.dp_wq);
destroy_workqueue(dev_priv->wq);
destroy_workqueue(dev_priv->gpu_error.hangcheck_wq);
pm_qos_remove_request(&dev_priv->pm_qos);
@@ -1258,13 +1274,3 @@ const struct drm_ioctl_desc i915_ioctls[] = {
};
int i915_max_ioctl = ARRAY_SIZE(i915_ioctls);
-
-/*
- * This is really ugly: Because old userspace abused the linux agp interface to
- * manage the gtt, we need to claim that all intel devices are agp. For
- * otherwise the drm core refuses to initialize the agp support code.
- */
-int i915_driver_device_is_agp(struct drm_device *dev)
-{
- return 1;
-}
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 884b4f9b81c4..1d887459e37f 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -356,7 +356,6 @@ static const struct intel_device_info intel_cherryview_info = {
};
static const struct intel_device_info intel_skylake_info = {
- .is_preliminary = 1,
.is_skylake = 1,
.gen = 9, .num_pipes = 3,
.need_gfx_hws = 1, .has_hotplug = 1,
@@ -369,7 +368,6 @@ static const struct intel_device_info intel_skylake_info = {
};
static const struct intel_device_info intel_skylake_gt3_info = {
- .is_preliminary = 1,
.is_skylake = 1,
.gen = 9, .num_pipes = 3,
.need_gfx_hws = 1, .has_hotplug = 1,
@@ -440,9 +438,7 @@ static const struct pci_device_id pciidlist[] = { /* aka */
{0, 0, 0}
};
-#if defined(CONFIG_DRM_I915_KMS)
MODULE_DEVICE_TABLE(pci, pciidlist);
-#endif
void intel_detect_pch(struct drm_device *dev)
{
@@ -541,21 +537,6 @@ bool i915_semaphore_is_enabled(struct drm_device *dev)
return true;
}
-void intel_hpd_cancel_work(struct drm_i915_private *dev_priv)
-{
- spin_lock_irq(&dev_priv->irq_lock);
-
- dev_priv->long_hpd_port_mask = 0;
- dev_priv->short_hpd_port_mask = 0;
- dev_priv->hpd_event_bits = 0;
-
- spin_unlock_irq(&dev_priv->irq_lock);
-
- cancel_work_sync(&dev_priv->dig_port_work);
- cancel_work_sync(&dev_priv->hotplug_work);
- cancel_delayed_work_sync(&dev_priv->hotplug_reenable_work);
-}
-
void i915_firmware_load_error_print(const char *fw_path, int err)
{
DRM_ERROR("failed to load firmware %s (%d)\n", fw_path, err);
@@ -601,7 +582,6 @@ static int bxt_resume_prepare(struct drm_i915_private *dev_priv);
static int i915_drm_suspend(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- struct drm_crtc *crtc;
pci_power_t opregion_target_state;
int error;
@@ -632,8 +612,7 @@ static int i915_drm_suspend(struct drm_device *dev)
* for _thaw. Also, power gate the CRTC power wells.
*/
drm_modeset_lock_all(dev);
- for_each_crtc(dev, crtc)
- intel_crtc_control(crtc, false);
+ intel_display_suspend(dev);
drm_modeset_unlock_all(dev);
intel_dp_mst_suspend(dev);
@@ -760,7 +739,7 @@ static int i915_drm_resume(struct drm_device *dev)
spin_unlock_irq(&dev_priv->irq_lock);
drm_modeset_lock_all(dev);
- intel_modeset_setup_hw_state(dev, true);
+ intel_display_resume(dev);
drm_modeset_unlock_all(dev);
intel_dp_mst_resume(dev);
@@ -865,9 +844,6 @@ int i915_reset(struct drm_device *dev)
bool simulated;
int ret;
- if (!i915.reset)
- return 0;
-
intel_reset_gt_powersave(dev);
mutex_lock(&dev->struct_mutex);
@@ -959,8 +935,6 @@ static int i915_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (PCI_FUNC(pdev->devfn))
return -ENODEV;
- driver.driver_features &= ~(DRIVER_USE_AGP);
-
return drm_get_pci_dev(pdev, ent, &driver);
}
@@ -1515,7 +1489,15 @@ static int intel_runtime_suspend(struct device *device)
* FIXME: We really should find a document that references the arguments
* used below!
*/
- if (IS_HASWELL(dev)) {
+ if (IS_BROADWELL(dev)) {
+ /*
+ * On Broadwell, if we use PCI_D1 the PCH DDI ports will stop
+ * being detected, and the call we do at intel_runtime_resume()
+ * won't be able to restore them. Since PCI_D3hot matches the
+ * actual specification and appears to be working, use it.
+ */
+ intel_opregion_notify_adapter(dev, PCI_D3hot);
+ } else {
/*
* current versions of firmware which depend on this opregion
* notification have repurposed the D1 definition to mean
@@ -1524,16 +1506,6 @@ static int intel_runtime_suspend(struct device *device)
* the suspend path.
*/
intel_opregion_notify_adapter(dev, PCI_D1);
- } else {
- /*
- * On Broadwell, if we use PCI_D1 the PCH DDI ports will stop
- * being detected, and the call we do at intel_runtime_resume()
- * won't be able to restore them. Since PCI_D3hot matches the
- * actual specification and appears to be working, use it. Let's
- * assume the other non-Haswell platforms will stay the same as
- * Broadwell.
- */
- intel_opregion_notify_adapter(dev, PCI_D3hot);
}
assert_forcewakes_inactive(dev_priv);
@@ -1673,7 +1645,6 @@ static struct drm_driver driver = {
* deal with them for Intel hardware.
*/
.driver_features =
- DRIVER_USE_AGP |
DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_GEM | DRIVER_PRIME |
DRIVER_RENDER,
.load = i915_driver_load,
@@ -1688,7 +1659,6 @@ static struct drm_driver driver = {
.suspend = i915_suspend_legacy,
.resume = i915_resume_legacy,
- .device_is_agp = i915_driver_device_is_agp,
#if defined(CONFIG_DEBUG_FS)
.debugfs_init = i915_debugfs_init,
.debugfs_cleanup = i915_debugfs_cleanup,
@@ -1727,20 +1697,14 @@ static int __init i915_init(void)
driver.num_ioctls = i915_max_ioctl;
/*
- * If CONFIG_DRM_I915_KMS is set, default to KMS unless
- * explicitly disabled with the module pararmeter.
- *
- * Otherwise, just follow the parameter (defaulting to off).
- *
- * Allow optional vga_text_mode_force boot option to override
- * the default behavior.
+ * Enable KMS by default, unless explicitly overriden by
+ * either the i915.modeset prarameter or by the
+ * vga_text_mode_force boot option.
*/
-#if defined(CONFIG_DRM_I915_KMS)
- if (i915.modeset != 0)
- driver.driver_features |= DRIVER_MODESET;
-#endif
- if (i915.modeset == 1)
- driver.driver_features |= DRIVER_MODESET;
+ driver.driver_features |= DRIVER_MODESET;
+
+ if (i915.modeset == 0)
+ driver.driver_features &= ~DRIVER_MODESET;
#ifdef CONFIG_VGA_CONSOLE
if (vgacon_text_force() && i915.modeset == -1)
@@ -1759,7 +1723,7 @@ static int __init i915_init(void)
* to the atomic ioctl and the atomic properties. Only plane operations on
* a single CRTC will actually work.
*/
- if (i915.nuclear_pageflip)
+ if (driver.driver_features & DRIVER_MODESET)
driver.driver_features |= DRIVER_ATOMIC;
return drm_pci_init(&driver, &i915_pci_driver);
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 542fac628b28..599441beea17 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -56,7 +56,7 @@
#define DRIVER_NAME "i915"
#define DRIVER_DESC "Intel Graphics"
-#define DRIVER_DATE "20150522"
+#define DRIVER_DATE "20150731"
#undef WARN_ON
/* Many gcc seem to no see through this and fall over :( */
@@ -206,17 +206,50 @@ enum intel_display_power_domain {
enum hpd_pin {
HPD_NONE = 0,
- HPD_PORT_A = HPD_NONE, /* PORT_A is internal */
HPD_TV = HPD_NONE, /* TV is known to be unreliable */
HPD_CRT,
HPD_SDVO_B,
HPD_SDVO_C,
+ HPD_PORT_A,
HPD_PORT_B,
HPD_PORT_C,
HPD_PORT_D,
HPD_NUM_PINS
};
+#define for_each_hpd_pin(__pin) \
+ for ((__pin) = (HPD_NONE + 1); (__pin) < HPD_NUM_PINS; (__pin)++)
+
+struct i915_hotplug {
+ struct work_struct hotplug_work;
+
+ struct {
+ unsigned long last_jiffies;
+ int count;
+ enum {
+ HPD_ENABLED = 0,
+ HPD_DISABLED = 1,
+ HPD_MARK_DISABLED = 2
+ } state;
+ } stats[HPD_NUM_PINS];
+ u32 event_bits;
+ struct delayed_work reenable_work;
+
+ struct intel_digital_port *irq_port[I915_MAX_PORTS];
+ u32 long_port_mask;
+ u32 short_port_mask;
+ struct work_struct dig_port_work;
+
+ /*
+ * if we get a HPD irq from DP and a HPD irq from non-DP
+ * the non-DP HPD could block the workqueue on a mode config
+ * mutex getting, that userspace may have taken. However
+ * userspace is waiting on the DP workqueue to run which is
+ * blocked behind the non-DP one.
+ */
+ struct workqueue_struct *dp_wq;
+};
+
#define I915_GEM_GPU_DOMAINS \
(I915_GEM_DOMAIN_RENDER | \
I915_GEM_DOMAIN_SAMPLER | \
@@ -243,6 +276,12 @@ enum hpd_pin {
&dev->mode_config.plane_list, \
base.head)
+#define for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) \
+ list_for_each_entry(intel_plane, \
+ &(dev)->mode_config.plane_list, \
+ base.head) \
+ if ((intel_plane)->pipe == (intel_crtc)->pipe)
+
#define for_each_intel_crtc(dev, intel_crtc) \
list_for_each_entry(intel_crtc, &dev->mode_config.crtc_list, base.head)
@@ -333,7 +372,8 @@ struct intel_dpll_hw_state {
uint32_t cfgcr1, cfgcr2;
/* bxt */
- uint32_t ebb0, pll0, pll1, pll2, pll3, pll6, pll8, pll10, pcsdw12;
+ uint32_t ebb0, ebb4, pll0, pll1, pll2, pll3, pll6, pll8, pll9, pll10,
+ pcsdw12;
};
struct intel_shared_dpll_config {
@@ -343,7 +383,6 @@ struct intel_shared_dpll_config {
struct intel_shared_dpll {
struct intel_shared_dpll_config config;
- struct intel_shared_dpll_config *new_config;
int active; /* count of number of active CRTCs (i.e. DPMS on) */
bool on; /* is the PLL actually active? Disabled during modeset */
@@ -445,6 +484,7 @@ struct drm_i915_error_state {
struct timeval time;
char error_msg[128];
+ int iommu;
u32 reset_count;
u32 suspend_count;
@@ -559,9 +599,6 @@ struct intel_limit;
struct dpll;
struct drm_i915_display_funcs {
- bool (*fbc_enabled)(struct drm_device *dev);
- void (*enable_fbc)(struct drm_crtc *crtc);
- void (*disable_fbc)(struct drm_device *dev);
int (*get_display_clock_speed)(struct drm_device *dev);
int (*get_fifo_size)(struct drm_device *dev, int plane);
/**
@@ -587,7 +624,8 @@ struct drm_i915_display_funcs {
struct drm_crtc *crtc,
uint32_t sprite_width, uint32_t sprite_height,
int pixel_size, bool enable, bool scaled);
- void (*modeset_global_resources)(struct drm_atomic_state *state);
+ int (*modeset_calc_cdclk)(struct drm_atomic_state *state);
+ void (*modeset_commit_cdclk)(struct drm_atomic_state *state);
/* Returns the active state of the crtc, and if the crtc is active,
* fills out the pipe-config with the hw state. */
bool (*get_pipe_config)(struct intel_crtc *,
@@ -598,7 +636,6 @@ struct drm_i915_display_funcs {
struct intel_crtc_state *crtc_state);
void (*crtc_enable)(struct drm_crtc *crtc);
void (*crtc_disable)(struct drm_crtc *crtc);
- void (*off)(struct drm_crtc *crtc);
void (*audio_codec_enable)(struct drm_connector *connector,
struct intel_encoder *encoder,
struct drm_display_mode *mode);
@@ -608,7 +645,7 @@ struct drm_i915_display_funcs {
int (*queue_flip)(struct drm_device *dev, struct drm_crtc *crtc,
struct drm_framebuffer *fb,
struct drm_i915_gem_object *obj,
- struct intel_engine_cs *ring,
+ struct drm_i915_gem_request *req,
uint32_t flags);
void (*update_primary_plane)(struct drm_crtc *crtc,
struct drm_framebuffer *fb,
@@ -706,7 +743,7 @@ enum csr_state {
struct intel_csr {
const char *fw_path;
- __be32 *dmc_payload;
+ uint32_t *dmc_payload;
uint32_t dmc_fw_size;
uint32_t mmio_count;
uint32_t mmioaddr[8];
@@ -805,11 +842,15 @@ struct i915_ctx_hang_stats {
/* This must match up with the value previously used for execbuf2.rsvd1. */
#define DEFAULT_CONTEXT_HANDLE 0
+
+#define CONTEXT_NO_ZEROMAP (1<<0)
/**
* struct intel_context - as the name implies, represents a context.
* @ref: reference count.
* @user_handle: userspace tracking identity for this context.
* @remap_slice: l3 row remapping information.
+ * @flags: context specific flags:
+ * CONTEXT_NO_ZEROMAP: do not allow mapping things to page 0.
* @file_priv: filp associated with this context (NULL for global default
* context).
* @hang_stats: information about the role of this context in possible GPU
@@ -826,6 +867,8 @@ struct intel_context {
struct kref ref;
int user_handle;
uint8_t remap_slice;
+ struct drm_i915_private *i915;
+ int flags;
struct drm_i915_file_private *file_priv;
struct i915_ctx_hang_stats hang_stats;
struct i915_hw_ppgtt *ppgtt;
@@ -852,9 +895,13 @@ enum fb_op_origin {
ORIGIN_CPU,
ORIGIN_CS,
ORIGIN_FLIP,
+ ORIGIN_DIRTYFB,
};
struct i915_fbc {
+ /* This is always the inner lock when overlapping with struct_mutex and
+ * it's the outer lock when overlapping with stolen_lock. */
+ struct mutex lock;
unsigned long uncompressed_size;
unsigned threshold;
unsigned int fb_id;
@@ -874,7 +921,7 @@ struct i915_fbc {
struct intel_fbc_work {
struct delayed_work work;
- struct drm_crtc *crtc;
+ struct intel_crtc *crtc;
struct drm_framebuffer *fb;
} *fbc_work;
@@ -890,7 +937,13 @@ struct i915_fbc {
FBC_MULTIPLE_PIPES, /* more than one pipe active */
FBC_MODULE_PARAM,
FBC_CHIP_DEFAULT, /* disabled by default on this chip */
+ FBC_ROTATION, /* rotation is not supported */
+ FBC_IN_DBG_MASTER, /* kernel debugger is active */
} no_fbc_reason;
+
+ bool (*fbc_enabled)(struct drm_i915_private *dev_priv);
+ void (*enable_fbc)(struct intel_crtc *crtc);
+ void (*disable_fbc)(struct drm_i915_private *dev_priv);
};
/**
@@ -1200,6 +1253,10 @@ struct intel_l3_parity {
struct i915_gem_mm {
/** Memory allocator for GTT stolen memory */
struct drm_mm stolen;
+ /** Protects the usage of the GTT stolen memory allocator. This is
+ * always the inner lock when overlapping with struct_mutex. */
+ struct mutex stolen_lock;
+
/** List of all objects in gtt_space. Used to restore gtt
* mappings on resume */
struct list_head bound_list;
@@ -1353,6 +1410,11 @@ enum modeset_restore {
MODESET_SUSPENDED,
};
+#define DP_AUX_A 0x40
+#define DP_AUX_B 0x10
+#define DP_AUX_C 0x20
+#define DP_AUX_D 0x30
+
struct ddi_vbt_port_info {
/*
* This is an index in the HDMI/DVI DDI buffer translation table.
@@ -1365,6 +1427,11 @@ struct ddi_vbt_port_info {
uint8_t supports_dvi:1;
uint8_t supports_hdmi:1;
uint8_t supports_dp:1;
+
+ uint8_t alternate_aux_channel;
+
+ uint8_t dp_boost_level;
+ uint8_t hdmi_boost_level;
};
enum psr_lines_to_wait {
@@ -1460,23 +1527,27 @@ struct ilk_wm_values {
enum intel_ddb_partitioning partitioning;
};
-struct vlv_wm_values {
- struct {
- uint16_t primary;
- uint16_t sprite[2];
- uint8_t cursor;
- } pipe[3];
+struct vlv_pipe_wm {
+ uint16_t primary;
+ uint16_t sprite[2];
+ uint8_t cursor;
+};
- struct {
- uint16_t plane;
- uint8_t cursor;
- } sr;
+struct vlv_sr_wm {
+ uint16_t plane;
+ uint8_t cursor;
+};
+struct vlv_wm_values {
+ struct vlv_pipe_wm pipe[3];
+ struct vlv_sr_wm sr;
struct {
uint8_t cursor;
uint8_t sprite[2];
uint8_t primary;
} ddl[3];
+ uint8_t level;
+ bool cxsr;
};
struct skl_ddb_entry {
@@ -1610,6 +1681,18 @@ struct i915_virtual_gpu {
bool active;
};
+struct i915_execbuffer_params {
+ struct drm_device *dev;
+ struct drm_file *file;
+ uint32_t dispatch_flags;
+ uint32_t args_batch_start_offset;
+ uint32_t batch_obj_vm_offset;
+ struct intel_engine_cs *ring;
+ struct drm_i915_gem_object *batch_obj;
+ struct intel_context *ctx;
+ struct drm_i915_gem_request *request;
+};
+
struct drm_i915_private {
struct drm_device *dev;
struct kmem_cache *objects;
@@ -1679,19 +1762,7 @@ struct drm_i915_private {
u32 pm_rps_events;
u32 pipestat_irq_mask[I915_MAX_PIPES];
- struct work_struct hotplug_work;
- struct {
- unsigned long hpd_last_jiffies;
- int hpd_cnt;
- enum {
- HPD_ENABLED = 0,
- HPD_DISABLED = 1,
- HPD_MARK_DISABLED = 2
- } hpd_mark;
- } hpd_stats[HPD_NUM_PINS];
- u32 hpd_event_bits;
- struct delayed_work hotplug_reenable_work;
-
+ struct i915_hotplug hotplug;
struct i915_fbc fbc;
struct i915_drrs drrs;
struct intel_opregion opregion;
@@ -1717,7 +1788,7 @@ struct drm_i915_private {
unsigned int fsb_freq, mem_freq, is_ddr3;
unsigned int skl_boot_cdclk;
- unsigned int cdclk_freq;
+ unsigned int cdclk_freq, max_cdclk_freq;
unsigned int hpll_freq;
/**
@@ -1768,9 +1839,6 @@ struct drm_i915_private {
/* Reclocking support */
bool render_reclock_avail;
- bool lvds_downclock_avail;
- /* indicates the reduced downclock for LVDS*/
- int lvds_downclock;
struct i915_frontbuffer_tracking fb_tracking;
@@ -1798,7 +1866,7 @@ struct drm_i915_private {
struct drm_i915_gem_object *vlv_pctx;
-#ifdef CONFIG_DRM_I915_FBDEV
+#ifdef CONFIG_DRM_FBDEV_EMULATION
/* list of fbdev register on this device */
struct intel_fbdev *fbdev;
struct work_struct fbdev_suspend_work;
@@ -1857,29 +1925,11 @@ struct drm_i915_private {
struct i915_runtime_pm pm;
- struct intel_digital_port *hpd_irq_port[I915_MAX_PORTS];
- u32 long_hpd_port_mask;
- u32 short_hpd_port_mask;
- struct work_struct dig_port_work;
-
- /*
- * if we get a HPD irq from DP and a HPD irq from non-DP
- * the non-DP HPD could block the workqueue on a mode config
- * mutex getting, that userspace may have taken. However
- * userspace is waiting on the DP workqueue to run which is
- * blocked behind the non-DP one.
- */
- struct workqueue_struct *dp_wq;
-
/* Abstract the submission mechanism (legacy ringbuffer or execlists) away */
struct {
- int (*execbuf_submit)(struct drm_device *dev, struct drm_file *file,
- struct intel_engine_cs *ring,
- struct intel_context *ctx,
+ int (*execbuf_submit)(struct i915_execbuffer_params *params,
struct drm_i915_gem_execbuffer2 *args,
- struct list_head *vmas,
- struct drm_i915_gem_object *batch_obj,
- u64 exec_start, u32 flags);
+ struct list_head *vmas);
int (*init_rings)(struct drm_device *dev);
void (*cleanup_ring)(struct intel_engine_cs *ring);
void (*stop_ring)(struct intel_engine_cs *ring);
@@ -2036,8 +2086,6 @@ struct drm_i915_gem_object {
unsigned int cache_level:3;
unsigned int cache_dirty:1;
- unsigned int has_dma_mapping:1;
-
unsigned int frontbuffer_bits:INTEL_FRONTBUFFER_BITS;
unsigned int pin_display;
@@ -2149,7 +2197,8 @@ struct drm_i915_gem_request {
struct intel_context *ctx;
struct intel_ringbuffer *ringbuf;
- /** Batch buffer related to this request if any */
+ /** Batch buffer related to this request if any (used for
+ error state dump only) */
struct drm_i915_gem_object *batch_obj;
/** Time at which this request was emitted, in jiffies. */
@@ -2187,8 +2236,12 @@ struct drm_i915_gem_request {
};
int i915_gem_request_alloc(struct intel_engine_cs *ring,
- struct intel_context *ctx);
+ struct intel_context *ctx,
+ struct drm_i915_gem_request **req_out);
+void i915_gem_request_cancel(struct drm_i915_gem_request *req);
void i915_gem_request_free(struct kref *req_ref);
+int i915_gem_request_add_to_client(struct drm_i915_gem_request *req,
+ struct drm_file *file);
static inline uint32_t
i915_gem_request_get_seqno(struct drm_i915_gem_request *req)
@@ -2392,6 +2445,9 @@ struct drm_i915_cmd_table {
((INTEL_DEVID(dev) & 0xf) == 0x6 || \
(INTEL_DEVID(dev) & 0xf) == 0xb || \
(INTEL_DEVID(dev) & 0xf) == 0xe))
+/* ULX machines are also considered ULT. */
+#define IS_BDW_ULX(dev) (IS_BROADWELL(dev) && \
+ (INTEL_DEVID(dev) & 0xf) == 0xe)
#define IS_BDW_GT3(dev) (IS_BROADWELL(dev) && \
(INTEL_DEVID(dev) & 0x00F0) == 0x0020)
#define IS_HSW_ULT(dev) (IS_HASWELL(dev) && \
@@ -2401,6 +2457,14 @@ struct drm_i915_cmd_table {
/* ULX machines are also considered ULT. */
#define IS_HSW_ULX(dev) (INTEL_DEVID(dev) == 0x0A0E || \
INTEL_DEVID(dev) == 0x0A1E)
+#define IS_SKL_ULT(dev) (INTEL_DEVID(dev) == 0x1906 || \
+ INTEL_DEVID(dev) == 0x1913 || \
+ INTEL_DEVID(dev) == 0x1916 || \
+ INTEL_DEVID(dev) == 0x1921 || \
+ INTEL_DEVID(dev) == 0x1926)
+#define IS_SKL_ULX(dev) (INTEL_DEVID(dev) == 0x190E || \
+ INTEL_DEVID(dev) == 0x1915 || \
+ INTEL_DEVID(dev) == 0x191E)
#define IS_PRELIMINARY_HW(intel_info) ((intel_info)->is_preliminary)
#define SKL_REVID_A0 (0x0)
@@ -2467,9 +2531,6 @@ struct drm_i915_cmd_table {
*/
#define HAS_128_BYTE_Y_TILING(dev) (!IS_GEN2(dev) && !(IS_I915G(dev) || \
IS_I915GM(dev)))
-#define SUPPORTS_DIGITAL_OUTPUTS(dev) (!IS_GEN2(dev) && !IS_PINEVIEW(dev))
-#define SUPPORTS_INTEGRATED_HDMI(dev) (IS_G4X(dev) || IS_GEN5(dev))
-#define SUPPORTS_INTEGRATED_DP(dev) (IS_G4X(dev) || IS_GEN5(dev))
#define SUPPORTS_TV(dev) (INTEL_INFO(dev)->supports_tv)
#define I915_HAS_HOTPLUG(dev) (INTEL_INFO(dev)->has_hotplug)
@@ -2495,6 +2556,12 @@ struct drm_i915_cmd_table {
#define HAS_CSR(dev) (IS_SKYLAKE(dev))
+#define HAS_RESOURCE_STREAMER(dev) (IS_HASWELL(dev) || \
+ INTEL_INFO(dev)->gen >= 8)
+
+#define HAS_CORE_RING_FREQ(dev) (INTEL_INFO(dev)->gen >= 6 && \
+ !IS_VALLEYVIEW(dev) && !IS_BROXTON(dev))
+
#define INTEL_PCH_DEVICE_ID_MASK 0xff00
#define INTEL_PCH_IBX_DEVICE_ID_TYPE 0x3b00
#define INTEL_PCH_CPT_DEVICE_ID_TYPE 0x1c00
@@ -2534,7 +2601,6 @@ struct i915_params {
int modeset;
int panel_ignore_lid;
int semaphores;
- unsigned int lvds_downclock;
int lvds_channel_mode;
int panel_use_ssc;
int vbt_sdvo_panel_type;
@@ -2556,10 +2622,11 @@ struct i915_params {
bool reset;
bool disable_display;
bool disable_vtd_wa;
+ bool enable_guc_submission;
+ int guc_log_level;
int use_mmio_flip;
int mmio_debug;
bool verbose_state_checks;
- bool nuclear_pageflip;
int edp_vswing;
};
extern struct i915_params i915 __read_mostly;
@@ -2573,21 +2640,27 @@ extern void i915_driver_preclose(struct drm_device *dev,
struct drm_file *file);
extern void i915_driver_postclose(struct drm_device *dev,
struct drm_file *file);
-extern int i915_driver_device_is_agp(struct drm_device * dev);
#ifdef CONFIG_COMPAT
extern long i915_compat_ioctl(struct file *filp, unsigned int cmd,
unsigned long arg);
#endif
extern int intel_gpu_reset(struct drm_device *dev);
+extern bool intel_has_gpu_reset(struct drm_device *dev);
extern int i915_reset(struct drm_device *dev);
extern unsigned long i915_chipset_val(struct drm_i915_private *dev_priv);
extern unsigned long i915_mch_val(struct drm_i915_private *dev_priv);
extern unsigned long i915_gfx_val(struct drm_i915_private *dev_priv);
extern void i915_update_gfx_val(struct drm_i915_private *dev_priv);
int vlv_force_gfx_clock(struct drm_i915_private *dev_priv, bool on);
-void intel_hpd_cancel_work(struct drm_i915_private *dev_priv);
void i915_firmware_load_error_print(const char *fw_path, int err);
+/* intel_hotplug.c */
+void intel_hpd_irq_handler(struct drm_device *dev, u32 pin_mask, u32 long_mask);
+void intel_hpd_init(struct drm_i915_private *dev_priv);
+void intel_hpd_init_work(struct drm_i915_private *dev_priv);
+void intel_hpd_cancel_work(struct drm_i915_private *dev_priv);
+bool intel_hpd_pin_to_port(enum hpd_pin pin, enum port *port);
+
/* i915_irq.c */
void i915_queue_hangcheck(struct drm_device *dev);
__printf(3, 4)
@@ -2595,7 +2668,6 @@ void i915_handle_error(struct drm_device *dev, bool wedged,
const char *fmt, ...);
extern void intel_irq_init(struct drm_i915_private *dev_priv);
-extern void intel_hpd_init(struct drm_i915_private *dev_priv);
int intel_irq_install(struct drm_i915_private *dev_priv);
void intel_irq_uninstall(struct drm_i915_private *dev_priv);
@@ -2662,19 +2734,11 @@ int i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
int i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
void i915_gem_execbuffer_move_to_active(struct list_head *vmas,
- struct intel_engine_cs *ring);
-void i915_gem_execbuffer_retire_commands(struct drm_device *dev,
- struct drm_file *file,
- struct intel_engine_cs *ring,
- struct drm_i915_gem_object *obj);
-int i915_gem_ringbuffer_submission(struct drm_device *dev,
- struct drm_file *file,
- struct intel_engine_cs *ring,
- struct intel_context *ctx,
+ struct drm_i915_gem_request *req);
+void i915_gem_execbuffer_retire_commands(struct i915_execbuffer_params *params);
+int i915_gem_ringbuffer_submission(struct i915_execbuffer_params *params,
struct drm_i915_gem_execbuffer2 *args,
- struct list_head *vmas,
- struct drm_i915_gem_object *batch_obj,
- u64 exec_start, u32 flags);
+ struct list_head *vmas);
int i915_gem_execbuffer(struct drm_device *dev, void *data,
struct drm_file *file_priv);
int i915_gem_execbuffer2(struct drm_device *dev, void *data,
@@ -2707,6 +2771,8 @@ void i915_gem_object_init(struct drm_i915_gem_object *obj,
const struct drm_i915_gem_object_ops *ops);
struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
size_t size);
+struct drm_i915_gem_object *i915_gem_object_create_from_data(
+ struct drm_device *dev, const void *data, size_t size);
void i915_init_vm(struct drm_i915_private *dev_priv,
struct i915_address_space *vm);
void i915_gem_free_object(struct drm_gem_object *obj);
@@ -2781,9 +2847,10 @@ static inline void i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj)
int __must_check i915_mutex_lock_interruptible(struct drm_device *dev);
int i915_gem_object_sync(struct drm_i915_gem_object *obj,
- struct intel_engine_cs *to);
+ struct intel_engine_cs *to,
+ struct drm_i915_gem_request **to_req);
void i915_vma_move_to_active(struct i915_vma *vma,
- struct intel_engine_cs *ring);
+ struct drm_i915_gem_request *req);
int i915_gem_dumb_create(struct drm_file *file_priv,
struct drm_device *dev,
struct drm_mode_create_dumb *args);
@@ -2812,11 +2879,6 @@ static inline bool i915_gem_request_completed(struct drm_i915_gem_request *req,
int __must_check i915_gem_get_seqno(struct drm_device *dev, u32 *seqno);
int __must_check i915_gem_set_seqno(struct drm_device *dev, u32 seqno);
-int __must_check i915_gem_object_get_fence(struct drm_i915_gem_object *obj);
-int __must_check i915_gem_object_put_fence(struct drm_i915_gem_object *obj);
-
-bool i915_gem_object_pin_fence(struct drm_i915_gem_object *obj);
-void i915_gem_object_unpin_fence(struct drm_i915_gem_object *obj);
struct drm_i915_gem_request *
i915_gem_find_active_request(struct intel_engine_cs *ring);
@@ -2825,7 +2887,6 @@ bool i915_gem_retire_requests(struct drm_device *dev);
void i915_gem_retire_requests_ring(struct intel_engine_cs *ring);
int __must_check i915_gem_check_wedge(struct i915_gpu_error *error,
bool interruptible);
-int __must_check i915_gem_check_olr(struct drm_i915_gem_request *req);
static inline bool i915_reset_in_progress(struct i915_gpu_error *error)
{
@@ -2860,16 +2921,18 @@ bool i915_gem_clflush_object(struct drm_i915_gem_object *obj, bool force);
int __must_check i915_gem_init(struct drm_device *dev);
int i915_gem_init_rings(struct drm_device *dev);
int __must_check i915_gem_init_hw(struct drm_device *dev);
-int i915_gem_l3_remap(struct intel_engine_cs *ring, int slice);
+int i915_gem_l3_remap(struct drm_i915_gem_request *req, int slice);
void i915_gem_init_swizzling(struct drm_device *dev);
void i915_gem_cleanup_ringbuffer(struct drm_device *dev);
int __must_check i915_gpu_idle(struct drm_device *dev);
int __must_check i915_gem_suspend(struct drm_device *dev);
-int __i915_add_request(struct intel_engine_cs *ring,
- struct drm_file *file,
- struct drm_i915_gem_object *batch_obj);
-#define i915_add_request(ring) \
- __i915_add_request(ring, NULL, NULL)
+void __i915_add_request(struct drm_i915_gem_request *req,
+ struct drm_i915_gem_object *batch_obj,
+ bool flush_caches);
+#define i915_add_request(req) \
+ __i915_add_request(req, NULL, true)
+#define i915_add_request_no_flush(req) \
+ __i915_add_request(req, NULL, false)
int __i915_wait_request(struct drm_i915_gem_request *req,
unsigned reset_counter,
bool interruptible,
@@ -2889,6 +2952,7 @@ int __must_check
i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
u32 alignment,
struct intel_engine_cs *pipelined,
+ struct drm_i915_gem_request **pipelined_request,
const struct i915_ggtt_view *view);
void i915_gem_object_unpin_from_display_plane(struct drm_i915_gem_object *obj,
const struct i915_ggtt_view *view);
@@ -2912,8 +2976,6 @@ struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev,
struct dma_buf *i915_gem_prime_export(struct drm_device *dev,
struct drm_gem_object *gem_obj, int flags);
-void i915_gem_restore_fences(struct drm_device *dev);
-
unsigned long
i915_gem_obj_ggtt_offset_view(struct drm_i915_gem_object *o,
const struct i915_ggtt_view *view);
@@ -3008,15 +3070,27 @@ i915_gem_object_ggtt_unpin(struct drm_i915_gem_object *obj)
i915_gem_object_ggtt_unpin_view(obj, &i915_ggtt_view_normal);
}
+/* i915_gem_fence.c */
+int __must_check i915_gem_object_get_fence(struct drm_i915_gem_object *obj);
+int __must_check i915_gem_object_put_fence(struct drm_i915_gem_object *obj);
+
+bool i915_gem_object_pin_fence(struct drm_i915_gem_object *obj);
+void i915_gem_object_unpin_fence(struct drm_i915_gem_object *obj);
+
+void i915_gem_restore_fences(struct drm_device *dev);
+
+void i915_gem_detect_bit_6_swizzle(struct drm_device *dev);
+void i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj);
+void i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj);
+
/* i915_gem_context.c */
int __must_check i915_gem_context_init(struct drm_device *dev);
void i915_gem_context_fini(struct drm_device *dev);
void i915_gem_context_reset(struct drm_device *dev);
int i915_gem_context_open(struct drm_device *dev, struct drm_file *file);
-int i915_gem_context_enable(struct drm_i915_private *dev_priv);
+int i915_gem_context_enable(struct drm_i915_gem_request *req);
void i915_gem_context_close(struct drm_device *dev, struct drm_file *file);
-int i915_switch_context(struct intel_engine_cs *ring,
- struct intel_context *to);
+int i915_switch_context(struct drm_i915_gem_request *req);
struct intel_context *
i915_gem_context_get(struct drm_i915_file_private *file_priv, u32 id);
void i915_gem_context_free(struct kref *ctx_ref);
@@ -3066,9 +3140,12 @@ static inline void i915_gem_chipset_flush(struct drm_device *dev)
}
/* i915_gem_stolen.c */
+int i915_gem_stolen_insert_node(struct drm_i915_private *dev_priv,
+ struct drm_mm_node *node, u64 size,
+ unsigned alignment);
+void i915_gem_stolen_remove_node(struct drm_i915_private *dev_priv,
+ struct drm_mm_node *node);
int i915_gem_init_stolen(struct drm_device *dev);
-int i915_gem_stolen_setup_compression(struct drm_device *dev, int size, int fb_cpp);
-void i915_gem_stolen_cleanup_compression(struct drm_device *dev);
void i915_gem_cleanup_stolen(struct drm_device *dev);
struct drm_i915_gem_object *
i915_gem_object_create_stolen(struct drm_device *dev, u32 size);
@@ -3098,10 +3175,6 @@ static inline bool i915_gem_object_needs_bit17_swizzle(struct drm_i915_gem_objec
obj->tiling_mode != I915_TILING_NONE;
}
-void i915_gem_detect_bit_6_swizzle(struct drm_device *dev);
-void i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj);
-void i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj);
-
/* i915_gem_debug.c */
#if WATCH_LISTS
int i915_verify_lists(struct drm_device *dev);
@@ -3116,7 +3189,8 @@ void i915_debugfs_cleanup(struct drm_minor *minor);
int i915_debugfs_connector_add(struct drm_connector *connector);
void intel_display_crc_init(struct drm_device *dev);
#else
-static inline int i915_debugfs_connector_add(struct drm_connector *connector) {}
+static inline int i915_debugfs_connector_add(struct drm_connector *connector)
+{ return 0; }
static inline void intel_display_crc_init(struct drm_device *dev) {}
#endif
@@ -3222,8 +3296,7 @@ extern void intel_modeset_gem_init(struct drm_device *dev);
extern void intel_modeset_cleanup(struct drm_device *dev);
extern void intel_connector_unregister(struct intel_connector *);
extern int intel_modeset_vga_set_state(struct drm_device *dev, bool state);
-extern void intel_modeset_setup_hw_state(struct drm_device *dev,
- bool force_restore);
+extern void intel_display_resume(struct drm_device *dev);
extern void i915_redisable_vga(struct drm_device *dev);
extern void i915_redisable_vga_power_on(struct drm_device *dev);
extern bool ironlake_set_drps(struct drm_device *dev, u8 val);
@@ -3303,15 +3376,14 @@ int intel_freq_opcode(struct drm_i915_private *dev_priv, int val);
#define I915_READ64(reg) dev_priv->uncore.funcs.mmio_readq(dev_priv, (reg), true)
#define I915_READ64_2x32(lower_reg, upper_reg) ({ \
- u32 upper = I915_READ(upper_reg); \
- u32 lower = I915_READ(lower_reg); \
- u32 tmp = I915_READ(upper_reg); \
- if (upper != tmp) { \
- upper = tmp; \
- lower = I915_READ(lower_reg); \
- WARN_ON(I915_READ(upper_reg) != upper); \
- } \
- (u64)upper << 32 | lower; })
+ u32 upper, lower, tmp; \
+ tmp = I915_READ(upper_reg); \
+ do { \
+ upper = tmp; \
+ lower = I915_READ(lower_reg); \
+ tmp = I915_READ(upper_reg); \
+ } while (upper != tmp); \
+ (u64)upper << 32 | lower; })
#define POSTING_READ(reg) (void)I915_READ_NOTRACE(reg)
#define POSTING_READ16(reg) (void)I915_READ16_NOTRACE(reg)
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 248fd1ac7b3a..84f91bcc12f7 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -46,11 +46,6 @@ static void
i915_gem_object_retire__write(struct drm_i915_gem_object *obj);
static void
i915_gem_object_retire__read(struct drm_i915_gem_object *obj, int ring);
-static void i915_gem_write_fence(struct drm_device *dev, int reg,
- struct drm_i915_gem_object *obj);
-static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj,
- struct drm_i915_fence_reg *fence,
- bool enable);
static bool cpu_cache_is_coherent(struct drm_device *dev,
enum i915_cache_level level)
@@ -66,18 +61,6 @@ static bool cpu_write_needs_clflush(struct drm_i915_gem_object *obj)
return obj->pin_display;
}
-static inline void i915_gem_object_fence_lost(struct drm_i915_gem_object *obj)
-{
- if (obj->tiling_mode)
- i915_gem_release_mmap(obj);
-
- /* As we do not have an associated fence register, we will force
- * a tiling change if we ever need to acquire one.
- */
- obj->fence_dirty = false;
- obj->fence_reg = I915_FENCE_REG_NONE;
-}
-
/* some bookkeeping */
static void i915_gem_info_add_obj(struct drm_i915_private *dev_priv,
size_t size)
@@ -149,14 +132,18 @@ i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_gem_get_aperture *args = data;
- struct drm_i915_gem_object *obj;
+ struct i915_gtt *ggtt = &dev_priv->gtt;
+ struct i915_vma *vma;
size_t pinned;
pinned = 0;
mutex_lock(&dev->struct_mutex);
- list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list)
- if (i915_gem_obj_is_pinned(obj))
- pinned += i915_gem_obj_ggtt_size(obj);
+ list_for_each_entry(vma, &ggtt->base.active_list, mm_list)
+ if (vma->pin_count)
+ pinned += vma->node.size;
+ list_for_each_entry(vma, &ggtt->base.inactive_list, mm_list)
+ if (vma->pin_count)
+ pinned += vma->node.size;
mutex_unlock(&dev->struct_mutex);
args->aper_size = dev_priv->gtt.base.total;
@@ -213,7 +200,6 @@ i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj)
sg_dma_len(sg) = obj->base.size;
obj->pages = st;
- obj->has_dma_mapping = true;
return 0;
}
@@ -265,8 +251,6 @@ i915_gem_object_put_pages_phys(struct drm_i915_gem_object *obj)
sg_free_table(obj->pages);
kfree(obj->pages);
-
- obj->has_dma_mapping = false;
}
static void
@@ -350,7 +334,7 @@ i915_gem_phys_pwrite(struct drm_i915_gem_object *obj,
if (ret)
return ret;
- intel_fb_obj_invalidate(obj, NULL, ORIGIN_CPU);
+ intel_fb_obj_invalidate(obj, ORIGIN_CPU);
if (__copy_from_user_inatomic_nocache(vaddr, user_data, args->size)) {
unsigned long unwritten;
@@ -371,7 +355,7 @@ i915_gem_phys_pwrite(struct drm_i915_gem_object *obj,
i915_gem_chipset_flush(dev);
out:
- intel_fb_obj_flush(obj, false);
+ intel_fb_obj_flush(obj, false, ORIGIN_CPU);
return ret;
}
@@ -804,7 +788,7 @@ i915_gem_gtt_pwrite_fast(struct drm_device *dev,
offset = i915_gem_obj_ggtt_offset(obj) + args->offset;
- intel_fb_obj_invalidate(obj, NULL, ORIGIN_GTT);
+ intel_fb_obj_invalidate(obj, ORIGIN_GTT);
while (remain > 0) {
/* Operation in this page
@@ -835,7 +819,7 @@ i915_gem_gtt_pwrite_fast(struct drm_device *dev,
}
out_flush:
- intel_fb_obj_flush(obj, false);
+ intel_fb_obj_flush(obj, false, ORIGIN_GTT);
out_unpin:
i915_gem_object_ggtt_unpin(obj);
out:
@@ -948,7 +932,7 @@ i915_gem_shmem_pwrite(struct drm_device *dev,
if (ret)
return ret;
- intel_fb_obj_invalidate(obj, NULL, ORIGIN_CPU);
+ intel_fb_obj_invalidate(obj, ORIGIN_CPU);
i915_gem_object_pin_pages(obj);
@@ -1028,7 +1012,7 @@ out:
if (needs_clflush_after)
i915_gem_chipset_flush(dev);
- intel_fb_obj_flush(obj, false);
+ intel_fb_obj_flush(obj, false, ORIGIN_CPU);
return ret;
}
@@ -1149,23 +1133,6 @@ i915_gem_check_wedge(struct i915_gpu_error *error,
return 0;
}
-/*
- * Compare arbitrary request against outstanding lazy request. Emit on match.
- */
-int
-i915_gem_check_olr(struct drm_i915_gem_request *req)
-{
- int ret;
-
- WARN_ON(!mutex_is_locked(&req->ring->dev->struct_mutex));
-
- ret = 0;
- if (req == req->ring->outstanding_lazy_request)
- ret = i915_add_request(req->ring);
-
- return ret;
-}
-
static void fake_irq(unsigned long data)
{
wake_up_process((struct task_struct *)data);
@@ -1337,6 +1304,33 @@ out:
return ret;
}
+int i915_gem_request_add_to_client(struct drm_i915_gem_request *req,
+ struct drm_file *file)
+{
+ struct drm_i915_private *dev_private;
+ struct drm_i915_file_private *file_priv;
+
+ WARN_ON(!req || !file || req->file_priv);
+
+ if (!req || !file)
+ return -EINVAL;
+
+ if (req->file_priv)
+ return -EINVAL;
+
+ dev_private = req->ring->dev->dev_private;
+ file_priv = file->driver_priv;
+
+ spin_lock(&file_priv->mm.lock);
+ req->file_priv = file_priv;
+ list_add_tail(&req->client_list, &file_priv->mm.request_list);
+ spin_unlock(&file_priv->mm.lock);
+
+ req->pid = get_pid(task_pid(current));
+
+ return 0;
+}
+
static inline void
i915_gem_request_remove_from_client(struct drm_i915_gem_request *request)
{
@@ -1349,6 +1343,9 @@ i915_gem_request_remove_from_client(struct drm_i915_gem_request *request)
list_del(&request->client_list);
request->file_priv = NULL;
spin_unlock(&file_priv->mm.lock);
+
+ put_pid(request->pid);
+ request->pid = NULL;
}
static void i915_gem_request_retire(struct drm_i915_gem_request *request)
@@ -1368,8 +1365,6 @@ static void i915_gem_request_retire(struct drm_i915_gem_request *request)
list_del_init(&request->list);
i915_gem_request_remove_from_client(request);
- put_pid(request->pid);
-
i915_gem_request_unreference(request);
}
@@ -1418,10 +1413,6 @@ i915_wait_request(struct drm_i915_gem_request *req)
if (ret)
return ret;
- ret = i915_gem_check_olr(req);
- if (ret)
- return ret;
-
ret = __i915_wait_request(req,
atomic_read(&dev_priv->gpu_error.reset_counter),
interruptible, NULL, NULL);
@@ -1521,10 +1512,6 @@ i915_gem_object_wait_rendering__nonblocking(struct drm_i915_gem_object *obj,
if (req == NULL)
return 0;
- ret = i915_gem_check_olr(req);
- if (ret)
- goto err;
-
requests[n++] = i915_gem_request_reference(req);
} else {
for (i = 0; i < I915_NUM_RINGS; i++) {
@@ -1534,10 +1521,6 @@ i915_gem_object_wait_rendering__nonblocking(struct drm_i915_gem_object *obj,
if (req == NULL)
continue;
- ret = i915_gem_check_olr(req);
- if (ret)
- goto err;
-
requests[n++] = i915_gem_request_reference(req);
}
}
@@ -1548,7 +1531,6 @@ i915_gem_object_wait_rendering__nonblocking(struct drm_i915_gem_object *obj,
NULL, rps);
mutex_lock(&dev->struct_mutex);
-err:
for (i = 0; i < n; i++) {
if (ret == 0)
i915_gem_object_retire_request(obj, requests[i]);
@@ -1616,6 +1598,11 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
else
ret = i915_gem_object_set_to_cpu_domain(obj, write_domain != 0);
+ if (write_domain != 0)
+ intel_fb_obj_invalidate(obj,
+ write_domain == I915_GEM_DOMAIN_GTT ?
+ ORIGIN_GTT : ORIGIN_CPU);
+
unref:
drm_gem_object_unreference(&obj->base);
unlock:
@@ -2139,6 +2126,8 @@ i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj)
obj->base.read_domains = obj->base.write_domain = I915_GEM_DOMAIN_CPU;
}
+ i915_gem_gtt_finish_object(obj);
+
if (i915_gem_object_needs_bit17_swizzle(obj))
i915_gem_object_save_bit_17_swizzle(obj);
@@ -2199,6 +2188,7 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
struct sg_page_iter sg_iter;
struct page *page;
unsigned long last_pfn = 0; /* suppress gcc warning */
+ int ret;
gfp_t gfp;
/* Assert that the object is not currently in any GPU domain. As it
@@ -2246,8 +2236,10 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
*/
i915_gem_shrink_all(dev_priv);
page = shmem_read_mapping_page(mapping, i);
- if (IS_ERR(page))
+ if (IS_ERR(page)) {
+ ret = PTR_ERR(page);
goto err_pages;
+ }
}
#ifdef CONFIG_SWIOTLB
if (swiotlb_nr_tbl()) {
@@ -2276,6 +2268,10 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
sg_mark_end(sg);
obj->pages = st;
+ ret = i915_gem_gtt_prepare_object(obj);
+ if (ret)
+ goto err_pages;
+
if (i915_gem_object_needs_bit17_swizzle(obj))
i915_gem_object_do_bit_17_swizzle(obj);
@@ -2300,10 +2296,10 @@ err_pages:
* space and so want to translate the error from shmemfs back to our
* usual understanding of ENOMEM.
*/
- if (PTR_ERR(page) == -ENOSPC)
- return -ENOMEM;
- else
- return PTR_ERR(page);
+ if (ret == -ENOSPC)
+ ret = -ENOMEM;
+
+ return ret;
}
/* Ensure that the associated pages are gathered from the backing storage
@@ -2343,9 +2339,12 @@ i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
}
void i915_vma_move_to_active(struct i915_vma *vma,
- struct intel_engine_cs *ring)
+ struct drm_i915_gem_request *req)
{
struct drm_i915_gem_object *obj = vma->obj;
+ struct intel_engine_cs *ring;
+
+ ring = i915_gem_request_get_ring(req);
/* Add a reference if we're newly entering the active list. */
if (obj->active == 0)
@@ -2353,8 +2352,7 @@ void i915_vma_move_to_active(struct i915_vma *vma,
obj->active |= intel_ring_flag(ring);
list_move_tail(&obj->ring_list[ring->id], &ring->active_list);
- i915_gem_request_assign(&obj->last_read_req[ring->id],
- intel_ring_get_request(ring));
+ i915_gem_request_assign(&obj->last_read_req[ring->id], req);
list_move_tail(&vma->mm_list, &vma->vm->active_list);
}
@@ -2366,7 +2364,7 @@ i915_gem_object_retire__write(struct drm_i915_gem_object *obj)
RQ_BUG_ON(!(obj->active & intel_ring_flag(obj->last_write_req->ring)));
i915_gem_request_assign(&obj->last_write_req, NULL);
- intel_fb_obj_flush(obj, true);
+ intel_fb_obj_flush(obj, true, ORIGIN_CS);
}
static void
@@ -2387,6 +2385,13 @@ i915_gem_object_retire__read(struct drm_i915_gem_object *obj, int ring)
if (obj->active)
return;
+ /* Bump our place on the bound list to keep it roughly in LRU order
+ * so that we don't steal from recently used but inactive objects
+ * (unless we are forced to ofc!)
+ */
+ list_move_tail(&obj->global_list,
+ &to_i915(obj->base.dev)->mm.bound_list);
+
list_for_each_entry(vma, &obj->vma_list, vma_link) {
if (!list_empty(&vma->mm_list))
list_move_tail(&vma->mm_list, &vma->vm->inactive_list);
@@ -2466,24 +2471,34 @@ i915_gem_get_seqno(struct drm_device *dev, u32 *seqno)
return 0;
}
-int __i915_add_request(struct intel_engine_cs *ring,
- struct drm_file *file,
- struct drm_i915_gem_object *obj)
+/*
+ * NB: This function is not allowed to fail. Doing so would mean the the
+ * request is not being tracked for completion but the work itself is
+ * going to happen on the hardware. This would be a Bad Thing(tm).
+ */
+void __i915_add_request(struct drm_i915_gem_request *request,
+ struct drm_i915_gem_object *obj,
+ bool flush_caches)
{
- struct drm_i915_private *dev_priv = ring->dev->dev_private;
- struct drm_i915_gem_request *request;
+ struct intel_engine_cs *ring;
+ struct drm_i915_private *dev_priv;
struct intel_ringbuffer *ringbuf;
u32 request_start;
int ret;
- request = ring->outstanding_lazy_request;
if (WARN_ON(request == NULL))
- return -ENOMEM;
+ return;
- if (i915.enable_execlists) {
- ringbuf = request->ctx->engine[ring->id].ringbuf;
- } else
- ringbuf = ring->buffer;
+ ring = request->ring;
+ dev_priv = ring->dev->dev_private;
+ ringbuf = request->ringbuf;
+
+ /*
+ * To ensure that this call will not fail, space for its emissions
+ * should already have been reserved in the ring buffer. Let the ring
+ * know that it is time to use that space up.
+ */
+ intel_ring_reserved_space_use(ringbuf);
request_start = intel_ring_get_tail(ringbuf);
/*
@@ -2493,14 +2508,13 @@ int __i915_add_request(struct intel_engine_cs *ring,
* is that the flush _must_ happen before the next request, no matter
* what.
*/
- if (i915.enable_execlists) {
- ret = logical_ring_flush_all_caches(ringbuf, request->ctx);
- if (ret)
- return ret;
- } else {
- ret = intel_ring_flush_all_caches(ring);
- if (ret)
- return ret;
+ if (flush_caches) {
+ if (i915.enable_execlists)
+ ret = logical_ring_flush_all_caches(request);
+ else
+ ret = intel_ring_flush_all_caches(request);
+ /* Not allowed to fail! */
+ WARN(ret, "*_ring_flush_all_caches failed: %d!\n", ret);
}
/* Record the position of the start of the request so that
@@ -2510,17 +2524,15 @@ int __i915_add_request(struct intel_engine_cs *ring,
*/
request->postfix = intel_ring_get_tail(ringbuf);
- if (i915.enable_execlists) {
- ret = ring->emit_request(ringbuf, request);
- if (ret)
- return ret;
- } else {
- ret = ring->add_request(ring);
- if (ret)
- return ret;
+ if (i915.enable_execlists)
+ ret = ring->emit_request(request);
+ else {
+ ret = ring->add_request(request);
request->tail = intel_ring_get_tail(ringbuf);
}
+ /* Not allowed to fail! */
+ WARN(ret, "emit|add_request failed: %d!\n", ret);
request->head = request_start;
@@ -2532,33 +2544,11 @@ int __i915_add_request(struct intel_engine_cs *ring,
*/
request->batch_obj = obj;
- if (!i915.enable_execlists) {
- /* Hold a reference to the current context so that we can inspect
- * it later in case a hangcheck error event fires.
- */
- request->ctx = ring->last_context;
- if (request->ctx)
- i915_gem_context_reference(request->ctx);
- }
-
request->emitted_jiffies = jiffies;
+ ring->last_submitted_seqno = request->seqno;
list_add_tail(&request->list, &ring->request_list);
- request->file_priv = NULL;
-
- if (file) {
- struct drm_i915_file_private *file_priv = file->driver_priv;
-
- spin_lock(&file_priv->mm.lock);
- request->file_priv = file_priv;
- list_add_tail(&request->client_list,
- &file_priv->mm.request_list);
- spin_unlock(&file_priv->mm.lock);
-
- request->pid = get_pid(task_pid(current));
- }
trace_i915_gem_request_add(request);
- ring->outstanding_lazy_request = NULL;
i915_queue_hangcheck(ring->dev);
@@ -2567,7 +2557,8 @@ int __i915_add_request(struct intel_engine_cs *ring,
round_jiffies_up_relative(HZ));
intel_mark_busy(dev_priv->dev);
- return 0;
+ /* Sanity check that the reserved size was large enough. */
+ intel_ring_reserved_space_end(ringbuf);
}
static bool i915_context_is_banned(struct drm_i915_private *dev_priv,
@@ -2621,12 +2612,13 @@ void i915_gem_request_free(struct kref *req_ref)
typeof(*req), ref);
struct intel_context *ctx = req->ctx;
+ if (req->file_priv)
+ i915_gem_request_remove_from_client(req);
+
if (ctx) {
if (i915.enable_execlists) {
- struct intel_engine_cs *ring = req->ring;
-
- if (ctx != ring->default_context)
- intel_lr_context_unpin(ring, ctx);
+ if (ctx != req->ring->default_context)
+ intel_lr_context_unpin(req);
}
i915_gem_context_unreference(ctx);
@@ -2636,36 +2628,63 @@ void i915_gem_request_free(struct kref *req_ref)
}
int i915_gem_request_alloc(struct intel_engine_cs *ring,
- struct intel_context *ctx)
+ struct intel_context *ctx,
+ struct drm_i915_gem_request **req_out)
{
struct drm_i915_private *dev_priv = to_i915(ring->dev);
struct drm_i915_gem_request *req;
int ret;
- if (ring->outstanding_lazy_request)
- return 0;
+ if (!req_out)
+ return -EINVAL;
+
+ *req_out = NULL;
req = kmem_cache_zalloc(dev_priv->requests, GFP_KERNEL);
if (req == NULL)
return -ENOMEM;
- kref_init(&req->ref);
- req->i915 = dev_priv;
-
ret = i915_gem_get_seqno(ring->dev, &req->seqno);
if (ret)
goto err;
+ kref_init(&req->ref);
+ req->i915 = dev_priv;
req->ring = ring;
+ req->ctx = ctx;
+ i915_gem_context_reference(req->ctx);
if (i915.enable_execlists)
- ret = intel_logical_ring_alloc_request_extras(req, ctx);
+ ret = intel_logical_ring_alloc_request_extras(req);
else
ret = intel_ring_alloc_request_extras(req);
- if (ret)
+ if (ret) {
+ i915_gem_context_unreference(req->ctx);
goto err;
+ }
+
+ /*
+ * Reserve space in the ring buffer for all the commands required to
+ * eventually emit this request. This is to guarantee that the
+ * i915_add_request() call can't fail. Note that the reserve may need
+ * to be redone if the request is not actually submitted straight
+ * away, e.g. because a GPU scheduler has deferred it.
+ */
+ if (i915.enable_execlists)
+ ret = intel_logical_ring_reserve_space(req);
+ else
+ ret = intel_ring_reserve_space(req);
+ if (ret) {
+ /*
+ * At this point, the request is fully allocated even if not
+ * fully prepared. Thus it can be cleaned up using the proper
+ * free code.
+ */
+ i915_gem_request_cancel(req);
+ return ret;
+ }
- ring->outstanding_lazy_request = req;
+ *req_out = req;
return 0;
err:
@@ -2673,6 +2692,13 @@ err:
return ret;
}
+void i915_gem_request_cancel(struct drm_i915_gem_request *req)
+{
+ intel_ring_reserved_space_cancel(req->ringbuf);
+
+ i915_gem_request_unreference(req);
+}
+
struct drm_i915_gem_request *
i915_gem_find_active_request(struct intel_engine_cs *ring)
{
@@ -2734,7 +2760,7 @@ static void i915_gem_reset_ring_cleanup(struct drm_i915_private *dev_priv,
list_del(&submit_req->execlist_link);
if (submit_req->ctx != ring->default_context)
- intel_lr_context_unpin(ring, submit_req->ctx);
+ intel_lr_context_unpin(submit_req);
i915_gem_request_unreference(submit_req);
}
@@ -2755,30 +2781,6 @@ static void i915_gem_reset_ring_cleanup(struct drm_i915_private *dev_priv,
i915_gem_request_retire(request);
}
-
- /* This may not have been flushed before the reset, so clean it now */
- i915_gem_request_assign(&ring->outstanding_lazy_request, NULL);
-}
-
-void i915_gem_restore_fences(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- int i;
-
- for (i = 0; i < dev_priv->num_fence_regs; i++) {
- struct drm_i915_fence_reg *reg = &dev_priv->fence_regs[i];
-
- /*
- * Commit delayed tiling changes if we have an object still
- * attached to the fence, otherwise just clear the fence.
- */
- if (reg->obj) {
- i915_gem_object_update_fence(reg->obj, reg,
- reg->obj->tiling_mode);
- } else {
- i915_gem_write_fence(dev, i, NULL);
- }
- }
}
void i915_gem_reset(struct drm_device *dev)
@@ -2940,7 +2942,7 @@ i915_gem_idle_work_handler(struct work_struct *work)
static int
i915_gem_object_flush_active(struct drm_i915_gem_object *obj)
{
- int ret, i;
+ int i;
if (!obj->active)
return 0;
@@ -2955,10 +2957,6 @@ i915_gem_object_flush_active(struct drm_i915_gem_object *obj)
if (list_empty(&req->list))
goto retire;
- ret = i915_gem_check_olr(req);
- if (ret)
- return ret;
-
if (i915_gem_request_completed(req, true)) {
__i915_gem_request_retire__upto(req);
retire:
@@ -3061,25 +3059,22 @@ out:
static int
__i915_gem_object_sync(struct drm_i915_gem_object *obj,
struct intel_engine_cs *to,
- struct drm_i915_gem_request *req)
+ struct drm_i915_gem_request *from_req,
+ struct drm_i915_gem_request **to_req)
{
struct intel_engine_cs *from;
int ret;
- from = i915_gem_request_get_ring(req);
+ from = i915_gem_request_get_ring(from_req);
if (to == from)
return 0;
- if (i915_gem_request_completed(req, true))
+ if (i915_gem_request_completed(from_req, true))
return 0;
- ret = i915_gem_check_olr(req);
- if (ret)
- return ret;
-
if (!i915_semaphore_is_enabled(obj->base.dev)) {
struct drm_i915_private *i915 = to_i915(obj->base.dev);
- ret = __i915_wait_request(req,
+ ret = __i915_wait_request(from_req,
atomic_read(&i915->gpu_error.reset_counter),
i915->mm.interruptible,
NULL,
@@ -3087,16 +3082,24 @@ __i915_gem_object_sync(struct drm_i915_gem_object *obj,
if (ret)
return ret;
- i915_gem_object_retire_request(obj, req);
+ i915_gem_object_retire_request(obj, from_req);
} else {
int idx = intel_ring_sync_index(from, to);
- u32 seqno = i915_gem_request_get_seqno(req);
+ u32 seqno = i915_gem_request_get_seqno(from_req);
+
+ WARN_ON(!to_req);
if (seqno <= from->semaphore.sync_seqno[idx])
return 0;
- trace_i915_gem_ring_sync_to(from, to, req);
- ret = to->semaphore.sync_to(to, from, seqno);
+ if (*to_req == NULL) {
+ ret = i915_gem_request_alloc(to, to->default_context, to_req);
+ if (ret)
+ return ret;
+ }
+
+ trace_i915_gem_ring_sync_to(*to_req, from, from_req);
+ ret = to->semaphore.sync_to(*to_req, from, seqno);
if (ret)
return ret;
@@ -3116,11 +3119,14 @@ __i915_gem_object_sync(struct drm_i915_gem_object *obj,
*
* @obj: object which may be in use on another ring.
* @to: ring we wish to use the object on. May be NULL.
+ * @to_req: request we wish to use the object for. See below.
+ * This will be allocated and returned if a request is
+ * required but not passed in.
*
* This code is meant to abstract object synchronization with the GPU.
* Calling with NULL implies synchronizing the object with the CPU
* rather than a particular GPU ring. Conceptually we serialise writes
- * between engines inside the GPU. We only allow on engine to write
+ * between engines inside the GPU. We only allow one engine to write
* into a buffer at any time, but multiple readers. To ensure each has
* a coherent view of memory, we must:
*
@@ -3131,11 +3137,22 @@ __i915_gem_object_sync(struct drm_i915_gem_object *obj,
* - If we are a write request (pending_write_domain is set), the new
* request must wait for outstanding read requests to complete.
*
+ * For CPU synchronisation (NULL to) no request is required. For syncing with
+ * rings to_req must be non-NULL. However, a request does not have to be
+ * pre-allocated. If *to_req is NULL and sync commands will be emitted then a
+ * request will be allocated automatically and returned through *to_req. Note
+ * that it is not guaranteed that commands will be emitted (because the system
+ * might already be idle). Hence there is no need to create a request that
+ * might never have any work submitted. Note further that if a request is
+ * returned in *to_req, it is the responsibility of the caller to submit
+ * that request (after potentially adding more work to it).
+ *
* Returns 0 if successful, else propagates up the lower layer error.
*/
int
i915_gem_object_sync(struct drm_i915_gem_object *obj,
- struct intel_engine_cs *to)
+ struct intel_engine_cs *to,
+ struct drm_i915_gem_request **to_req)
{
const bool readonly = obj->base.pending_write_domain == 0;
struct drm_i915_gem_request *req[I915_NUM_RINGS];
@@ -3157,7 +3174,7 @@ i915_gem_object_sync(struct drm_i915_gem_object *obj,
req[n++] = obj->last_read_req[i];
}
for (i = 0; i < n; i++) {
- ret = __i915_gem_object_sync(obj, to, req[i]);
+ ret = __i915_gem_object_sync(obj, to, req[i], to_req);
if (ret)
return ret;
}
@@ -3247,10 +3264,8 @@ int i915_vma_unbind(struct i915_vma *vma)
/* Since the unbound list is global, only move to that list if
* no more VMAs exist. */
- if (list_empty(&obj->vma_list)) {
- i915_gem_gtt_finish_object(obj);
+ if (list_empty(&obj->vma_list))
list_move_tail(&obj->global_list, &dev_priv->mm.unbound_list);
- }
/* And finally now the object is completely decoupled from this vma,
* we can drop its hold on the backing storage and allow it to be
@@ -3270,354 +3285,27 @@ int i915_gpu_idle(struct drm_device *dev)
/* Flush everything onto the inactive list. */
for_each_ring(ring, dev_priv, i) {
if (!i915.enable_execlists) {
- ret = i915_switch_context(ring, ring->default_context);
+ struct drm_i915_gem_request *req;
+
+ ret = i915_gem_request_alloc(ring, ring->default_context, &req);
if (ret)
return ret;
- }
-
- ret = intel_ring_idle(ring);
- if (ret)
- return ret;
- }
-
- WARN_ON(i915_verify_lists(dev));
- return 0;
-}
-
-static void i965_write_fence_reg(struct drm_device *dev, int reg,
- struct drm_i915_gem_object *obj)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- int fence_reg;
- int fence_pitch_shift;
- if (INTEL_INFO(dev)->gen >= 6) {
- fence_reg = FENCE_REG_SANDYBRIDGE_0;
- fence_pitch_shift = SANDYBRIDGE_FENCE_PITCH_SHIFT;
- } else {
- fence_reg = FENCE_REG_965_0;
- fence_pitch_shift = I965_FENCE_PITCH_SHIFT;
- }
-
- fence_reg += reg * 8;
+ ret = i915_switch_context(req);
+ if (ret) {
+ i915_gem_request_cancel(req);
+ return ret;
+ }
- /* To w/a incoherency with non-atomic 64-bit register updates,
- * we split the 64-bit update into two 32-bit writes. In order
- * for a partial fence not to be evaluated between writes, we
- * precede the update with write to turn off the fence register,
- * and only enable the fence as the last step.
- *
- * For extra levels of paranoia, we make sure each step lands
- * before applying the next step.
- */
- I915_WRITE(fence_reg, 0);
- POSTING_READ(fence_reg);
-
- if (obj) {
- u32 size = i915_gem_obj_ggtt_size(obj);
- uint64_t val;
-
- /* Adjust fence size to match tiled area */
- if (obj->tiling_mode != I915_TILING_NONE) {
- uint32_t row_size = obj->stride *
- (obj->tiling_mode == I915_TILING_Y ? 32 : 8);
- size = (size / row_size) * row_size;
+ i915_add_request_no_flush(req);
}
- val = (uint64_t)((i915_gem_obj_ggtt_offset(obj) + size - 4096) &
- 0xfffff000) << 32;
- val |= i915_gem_obj_ggtt_offset(obj) & 0xfffff000;
- val |= (uint64_t)((obj->stride / 128) - 1) << fence_pitch_shift;
- if (obj->tiling_mode == I915_TILING_Y)
- val |= 1 << I965_FENCE_TILING_Y_SHIFT;
- val |= I965_FENCE_REG_VALID;
-
- I915_WRITE(fence_reg + 4, val >> 32);
- POSTING_READ(fence_reg + 4);
-
- I915_WRITE(fence_reg + 0, val);
- POSTING_READ(fence_reg);
- } else {
- I915_WRITE(fence_reg + 4, 0);
- POSTING_READ(fence_reg + 4);
- }
-}
-
-static void i915_write_fence_reg(struct drm_device *dev, int reg,
- struct drm_i915_gem_object *obj)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- u32 val;
-
- if (obj) {
- u32 size = i915_gem_obj_ggtt_size(obj);
- int pitch_val;
- int tile_width;
-
- WARN((i915_gem_obj_ggtt_offset(obj) & ~I915_FENCE_START_MASK) ||
- (size & -size) != size ||
- (i915_gem_obj_ggtt_offset(obj) & (size - 1)),
- "object 0x%08lx [fenceable? %d] not 1M or pot-size (0x%08x) aligned\n",
- i915_gem_obj_ggtt_offset(obj), obj->map_and_fenceable, size);
-
- if (obj->tiling_mode == I915_TILING_Y && HAS_128_BYTE_Y_TILING(dev))
- tile_width = 128;
- else
- tile_width = 512;
-
- /* Note: pitch better be a power of two tile widths */
- pitch_val = obj->stride / tile_width;
- pitch_val = ffs(pitch_val) - 1;
-
- val = i915_gem_obj_ggtt_offset(obj);
- if (obj->tiling_mode == I915_TILING_Y)
- val |= 1 << I830_FENCE_TILING_Y_SHIFT;
- val |= I915_FENCE_SIZE_BITS(size);
- val |= pitch_val << I830_FENCE_PITCH_SHIFT;
- val |= I830_FENCE_REG_VALID;
- } else
- val = 0;
-
- if (reg < 8)
- reg = FENCE_REG_830_0 + reg * 4;
- else
- reg = FENCE_REG_945_8 + (reg - 8) * 4;
-
- I915_WRITE(reg, val);
- POSTING_READ(reg);
-}
-
-static void i830_write_fence_reg(struct drm_device *dev, int reg,
- struct drm_i915_gem_object *obj)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- uint32_t val;
-
- if (obj) {
- u32 size = i915_gem_obj_ggtt_size(obj);
- uint32_t pitch_val;
-
- WARN((i915_gem_obj_ggtt_offset(obj) & ~I830_FENCE_START_MASK) ||
- (size & -size) != size ||
- (i915_gem_obj_ggtt_offset(obj) & (size - 1)),
- "object 0x%08lx not 512K or pot-size 0x%08x aligned\n",
- i915_gem_obj_ggtt_offset(obj), size);
-
- pitch_val = obj->stride / 128;
- pitch_val = ffs(pitch_val) - 1;
-
- val = i915_gem_obj_ggtt_offset(obj);
- if (obj->tiling_mode == I915_TILING_Y)
- val |= 1 << I830_FENCE_TILING_Y_SHIFT;
- val |= I830_FENCE_SIZE_BITS(size);
- val |= pitch_val << I830_FENCE_PITCH_SHIFT;
- val |= I830_FENCE_REG_VALID;
- } else
- val = 0;
-
- I915_WRITE(FENCE_REG_830_0 + reg * 4, val);
- POSTING_READ(FENCE_REG_830_0 + reg * 4);
-}
-
-inline static bool i915_gem_object_needs_mb(struct drm_i915_gem_object *obj)
-{
- return obj && obj->base.read_domains & I915_GEM_DOMAIN_GTT;
-}
-
-static void i915_gem_write_fence(struct drm_device *dev, int reg,
- struct drm_i915_gem_object *obj)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
-
- /* Ensure that all CPU reads are completed before installing a fence
- * and all writes before removing the fence.
- */
- if (i915_gem_object_needs_mb(dev_priv->fence_regs[reg].obj))
- mb();
-
- WARN(obj && (!obj->stride || !obj->tiling_mode),
- "bogus fence setup with stride: 0x%x, tiling mode: %i\n",
- obj->stride, obj->tiling_mode);
-
- if (IS_GEN2(dev))
- i830_write_fence_reg(dev, reg, obj);
- else if (IS_GEN3(dev))
- i915_write_fence_reg(dev, reg, obj);
- else if (INTEL_INFO(dev)->gen >= 4)
- i965_write_fence_reg(dev, reg, obj);
-
- /* And similarly be paranoid that no direct access to this region
- * is reordered to before the fence is installed.
- */
- if (i915_gem_object_needs_mb(obj))
- mb();
-}
-
-static inline int fence_number(struct drm_i915_private *dev_priv,
- struct drm_i915_fence_reg *fence)
-{
- return fence - dev_priv->fence_regs;
-}
-
-static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj,
- struct drm_i915_fence_reg *fence,
- bool enable)
-{
- struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
- int reg = fence_number(dev_priv, fence);
-
- i915_gem_write_fence(obj->base.dev, reg, enable ? obj : NULL);
-
- if (enable) {
- obj->fence_reg = reg;
- fence->obj = obj;
- list_move_tail(&fence->lru_list, &dev_priv->mm.fence_list);
- } else {
- obj->fence_reg = I915_FENCE_REG_NONE;
- fence->obj = NULL;
- list_del_init(&fence->lru_list);
- }
- obj->fence_dirty = false;
-}
-
-static int
-i915_gem_object_wait_fence(struct drm_i915_gem_object *obj)
-{
- if (obj->last_fenced_req) {
- int ret = i915_wait_request(obj->last_fenced_req);
- if (ret)
- return ret;
-
- i915_gem_request_assign(&obj->last_fenced_req, NULL);
- }
-
- return 0;
-}
-
-int
-i915_gem_object_put_fence(struct drm_i915_gem_object *obj)
-{
- struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
- struct drm_i915_fence_reg *fence;
- int ret;
-
- ret = i915_gem_object_wait_fence(obj);
- if (ret)
- return ret;
-
- if (obj->fence_reg == I915_FENCE_REG_NONE)
- return 0;
-
- fence = &dev_priv->fence_regs[obj->fence_reg];
-
- if (WARN_ON(fence->pin_count))
- return -EBUSY;
-
- i915_gem_object_fence_lost(obj);
- i915_gem_object_update_fence(obj, fence, false);
-
- return 0;
-}
-
-static struct drm_i915_fence_reg *
-i915_find_fence_reg(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct drm_i915_fence_reg *reg, *avail;
- int i;
-
- /* First try to find a free reg */
- avail = NULL;
- for (i = dev_priv->fence_reg_start; i < dev_priv->num_fence_regs; i++) {
- reg = &dev_priv->fence_regs[i];
- if (!reg->obj)
- return reg;
-
- if (!reg->pin_count)
- avail = reg;
- }
-
- if (avail == NULL)
- goto deadlock;
-
- /* None available, try to steal one or wait for a user to finish */
- list_for_each_entry(reg, &dev_priv->mm.fence_list, lru_list) {
- if (reg->pin_count)
- continue;
-
- return reg;
- }
-
-deadlock:
- /* Wait for completion of pending flips which consume fences */
- if (intel_has_pending_fb_unpin(dev))
- return ERR_PTR(-EAGAIN);
-
- return ERR_PTR(-EDEADLK);
-}
-
-/**
- * i915_gem_object_get_fence - set up fencing for an object
- * @obj: object to map through a fence reg
- *
- * When mapping objects through the GTT, userspace wants to be able to write
- * to them without having to worry about swizzling if the object is tiled.
- * This function walks the fence regs looking for a free one for @obj,
- * stealing one if it can't find any.
- *
- * It then sets up the reg based on the object's properties: address, pitch
- * and tiling format.
- *
- * For an untiled surface, this removes any existing fence.
- */
-int
-i915_gem_object_get_fence(struct drm_i915_gem_object *obj)
-{
- struct drm_device *dev = obj->base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- bool enable = obj->tiling_mode != I915_TILING_NONE;
- struct drm_i915_fence_reg *reg;
- int ret;
-
- /* Have we updated the tiling parameters upon the object and so
- * will need to serialise the write to the associated fence register?
- */
- if (obj->fence_dirty) {
- ret = i915_gem_object_wait_fence(obj);
+ ret = intel_ring_idle(ring);
if (ret)
return ret;
}
- /* Just update our place in the LRU if our fence is getting reused. */
- if (obj->fence_reg != I915_FENCE_REG_NONE) {
- reg = &dev_priv->fence_regs[obj->fence_reg];
- if (!obj->fence_dirty) {
- list_move_tail(&reg->lru_list,
- &dev_priv->mm.fence_list);
- return 0;
- }
- } else if (enable) {
- if (WARN_ON(!obj->map_and_fenceable))
- return -EINVAL;
-
- reg = i915_find_fence_reg(dev);
- if (IS_ERR(reg))
- return PTR_ERR(reg);
-
- if (reg->obj) {
- struct drm_i915_gem_object *old = reg->obj;
-
- ret = i915_gem_object_wait_fence(old);
- if (ret)
- return ret;
-
- i915_gem_object_fence_lost(old);
- }
- } else
- return 0;
-
- i915_gem_object_update_fence(obj, reg, enable);
-
+ WARN_ON(i915_verify_lists(dev));
return 0;
}
@@ -3668,9 +3356,9 @@ i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj,
struct drm_device *dev = obj->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
u32 size, fence_size, fence_alignment, unfenced_alignment;
- unsigned long start =
+ u64 start =
flags & PIN_OFFSET_BIAS ? flags & PIN_OFFSET_MASK : 0;
- unsigned long end =
+ u64 end =
flags & PIN_MAPPABLE ? dev_priv->gtt.mappable_end : vm->total;
struct i915_vma *vma;
int ret;
@@ -3726,7 +3414,7 @@ i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj,
* attempt to find space.
*/
if (size > end) {
- DRM_DEBUG("Attempting to bind an object (view type=%u) larger than the aperture: size=%u > %s aperture=%lu\n",
+ DRM_DEBUG("Attempting to bind an object (view type=%u) larger than the aperture: size=%u > %s aperture=%llu\n",
ggtt_view ? ggtt_view->type : 0,
size,
flags & PIN_MAPPABLE ? "mappable" : "total",
@@ -3768,22 +3456,16 @@ search_free:
goto err_remove_node;
}
- ret = i915_gem_gtt_prepare_object(obj);
- if (ret)
- goto err_remove_node;
-
trace_i915_vma_bind(vma, flags);
ret = i915_vma_bind(vma, obj->cache_level, flags);
if (ret)
- goto err_finish_gtt;
+ goto err_remove_node;
list_move_tail(&obj->global_list, &dev_priv->mm.bound_list);
list_add_tail(&vma->mm_list, &vm->inactive_list);
return vma;
-err_finish_gtt:
- i915_gem_gtt_finish_object(obj);
err_remove_node:
drm_mm_remove_node(&vma->node);
err_free_vma:
@@ -3854,7 +3536,7 @@ i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj)
old_write_domain = obj->base.write_domain;
obj->base.write_domain = 0;
- intel_fb_obj_flush(obj, false);
+ intel_fb_obj_flush(obj, false, ORIGIN_GTT);
trace_i915_gem_object_change_domain(obj,
obj->base.read_domains,
@@ -3876,7 +3558,7 @@ i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj)
old_write_domain = obj->base.write_domain;
obj->base.write_domain = 0;
- intel_fb_obj_flush(obj, false);
+ intel_fb_obj_flush(obj, false, ORIGIN_CPU);
trace_i915_gem_object_change_domain(obj,
obj->base.read_domains,
@@ -3938,9 +3620,6 @@ i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
obj->dirty = 1;
}
- if (write)
- intel_fb_obj_invalidate(obj, NULL, ORIGIN_GTT);
-
trace_i915_gem_object_change_domain(obj,
old_read_domains,
old_write_domain);
@@ -4095,12 +3774,13 @@ int
i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
u32 alignment,
struct intel_engine_cs *pipelined,
+ struct drm_i915_gem_request **pipelined_request,
const struct i915_ggtt_view *view)
{
u32 old_read_domains, old_write_domain;
int ret;
- ret = i915_gem_object_sync(obj, pipelined);
+ ret = i915_gem_object_sync(obj, pipelined, pipelined_request);
if (ret)
return ret;
@@ -4211,9 +3891,6 @@ i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write)
obj->base.write_domain = I915_GEM_DOMAIN_CPU;
}
- if (write)
- intel_fb_obj_invalidate(obj, NULL, ORIGIN_CPU);
-
trace_i915_gem_object_change_domain(obj,
old_read_domains,
old_write_domain);
@@ -4254,6 +3931,13 @@ i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
if (time_after_eq(request->emitted_jiffies, recent_enough))
break;
+ /*
+ * Note that the request might not have been submitted yet.
+ * In which case emitted_jiffies will be zero.
+ */
+ if (!request->emitted_jiffies)
+ continue;
+
target = request;
}
reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
@@ -4424,32 +4108,6 @@ i915_gem_object_ggtt_unpin_view(struct drm_i915_gem_object *obj,
--vma->pin_count;
}
-bool
-i915_gem_object_pin_fence(struct drm_i915_gem_object *obj)
-{
- if (obj->fence_reg != I915_FENCE_REG_NONE) {
- struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
- struct i915_vma *ggtt_vma = i915_gem_obj_to_ggtt(obj);
-
- WARN_ON(!ggtt_vma ||
- dev_priv->fence_regs[obj->fence_reg].pin_count >
- ggtt_vma->pin_count);
- dev_priv->fence_regs[obj->fence_reg].pin_count++;
- return true;
- } else
- return false;
-}
-
-void
-i915_gem_object_unpin_fence(struct drm_i915_gem_object *obj)
-{
- if (obj->fence_reg != I915_FENCE_REG_NONE) {
- struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
- WARN_ON(dev_priv->fence_regs[obj->fence_reg].pin_count <= 0);
- dev_priv->fence_regs[obj->fence_reg].pin_count--;
- }
-}
-
int
i915_gem_busy_ioctl(struct drm_device *dev, void *data,
struct drm_file *file)
@@ -4811,8 +4469,9 @@ err:
return ret;
}
-int i915_gem_l3_remap(struct intel_engine_cs *ring, int slice)
+int i915_gem_l3_remap(struct drm_i915_gem_request *req, int slice)
{
+ struct intel_engine_cs *ring = req->ring;
struct drm_device *dev = ring->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
u32 reg_base = GEN7_L3LOG_BASE + (slice * 0x200);
@@ -4822,7 +4481,7 @@ int i915_gem_l3_remap(struct intel_engine_cs *ring, int slice)
if (!HAS_L3_DPF(dev) || !remap_info)
return 0;
- ret = intel_ring_begin(ring, GEN7_L3LOG_SIZE / 4 * 3);
+ ret = intel_ring_begin(req, GEN7_L3LOG_SIZE / 4 * 3);
if (ret)
return ret;
@@ -4968,7 +4627,7 @@ i915_gem_init_hw(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_engine_cs *ring;
- int ret, i;
+ int ret, i, j;
if (INTEL_INFO(dev)->gen < 6 && !intel_enable_gtt())
return -EIO;
@@ -5005,27 +4664,55 @@ i915_gem_init_hw(struct drm_device *dev)
*/
init_unused_rings(dev);
+ BUG_ON(!dev_priv->ring[RCS].default_context);
+
+ ret = i915_ppgtt_init_hw(dev);
+ if (ret) {
+ DRM_ERROR("PPGTT enable HW failed %d\n", ret);
+ goto out;
+ }
+
+ /* Need to do basic initialisation of all rings first: */
for_each_ring(ring, dev_priv, i) {
ret = ring->init_hw(ring);
if (ret)
goto out;
}
- for (i = 0; i < NUM_L3_SLICES(dev); i++)
- i915_gem_l3_remap(&dev_priv->ring[RCS], i);
+ /* Now it is safe to go back round and do everything else: */
+ for_each_ring(ring, dev_priv, i) {
+ struct drm_i915_gem_request *req;
- ret = i915_ppgtt_init_hw(dev);
- if (ret && ret != -EIO) {
- DRM_ERROR("PPGTT enable failed %d\n", ret);
- i915_gem_cleanup_ringbuffer(dev);
- }
+ WARN_ON(!ring->default_context);
- ret = i915_gem_context_enable(dev_priv);
- if (ret && ret != -EIO) {
- DRM_ERROR("Context enable failed %d\n", ret);
- i915_gem_cleanup_ringbuffer(dev);
+ ret = i915_gem_request_alloc(ring, ring->default_context, &req);
+ if (ret) {
+ i915_gem_cleanup_ringbuffer(dev);
+ goto out;
+ }
- goto out;
+ if (ring->id == RCS) {
+ for (j = 0; j < NUM_L3_SLICES(dev); j++)
+ i915_gem_l3_remap(req, j);
+ }
+
+ ret = i915_ppgtt_init_ring(req);
+ if (ret && ret != -EIO) {
+ DRM_ERROR("PPGTT enable ring #%d failed %d\n", i, ret);
+ i915_gem_request_cancel(req);
+ i915_gem_cleanup_ringbuffer(dev);
+ goto out;
+ }
+
+ ret = i915_gem_context_enable(req);
+ if (ret && ret != -EIO) {
+ DRM_ERROR("Context enable ring #%d failed %d\n", i, ret);
+ i915_gem_request_cancel(req);
+ i915_gem_cleanup_ringbuffer(dev);
+ goto out;
+ }
+
+ i915_add_request_no_flush(req);
}
out:
@@ -5112,6 +4799,14 @@ i915_gem_cleanup_ringbuffer(struct drm_device *dev)
for_each_ring(ring, dev_priv, i)
dev_priv->gt.cleanup_ring(ring);
+
+ if (i915.enable_execlists)
+ /*
+ * Neither the BIOS, ourselves or any other kernel
+ * expects the system to be in execlists mode on startup,
+ * so we need to reset the GPU back to legacy mode.
+ */
+ intel_gpu_reset(dev);
}
static void
@@ -5389,3 +5084,42 @@ bool i915_gem_obj_is_pinned(struct drm_i915_gem_object *obj)
return false;
}
+/* Allocate a new GEM object and fill it with the supplied data */
+struct drm_i915_gem_object *
+i915_gem_object_create_from_data(struct drm_device *dev,
+ const void *data, size_t size)
+{
+ struct drm_i915_gem_object *obj;
+ struct sg_table *sg;
+ size_t bytes;
+ int ret;
+
+ obj = i915_gem_alloc_object(dev, round_up(size, PAGE_SIZE));
+ if (IS_ERR_OR_NULL(obj))
+ return obj;
+
+ ret = i915_gem_object_set_to_cpu_domain(obj, true);
+ if (ret)
+ goto fail;
+
+ ret = i915_gem_object_get_pages(obj);
+ if (ret)
+ goto fail;
+
+ i915_gem_object_pin_pages(obj);
+ sg = obj->pages;
+ bytes = sg_copy_from_buffer(sg->sgl, sg->nents, (void *)data, size);
+ i915_gem_object_unpin_pages(obj);
+
+ if (WARN_ON(bytes != size)) {
+ DRM_ERROR("Incomplete copy, wrote %zu of %zu", bytes, size);
+ ret = -EFAULT;
+ goto fail;
+ }
+
+ return obj;
+
+fail:
+ drm_gem_object_unreference(&obj->base);
+ return ERR_PTR(ret);
+}
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
index 8867818b1401..8e893b354bcc 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/i915_gem_context.c
@@ -135,8 +135,7 @@ static int get_context_size(struct drm_device *dev)
void i915_gem_context_free(struct kref *ctx_ref)
{
- struct intel_context *ctx = container_of(ctx_ref,
- typeof(*ctx), ref);
+ struct intel_context *ctx = container_of(ctx_ref, typeof(*ctx), ref);
trace_i915_context_free(ctx);
@@ -157,9 +156,7 @@ i915_gem_alloc_context_obj(struct drm_device *dev, size_t size)
struct drm_i915_gem_object *obj;
int ret;
- obj = i915_gem_object_create_stolen(dev, size);
- if (obj == NULL)
- obj = i915_gem_alloc_object(dev, size);
+ obj = i915_gem_alloc_object(dev, size);
if (obj == NULL)
return ERR_PTR(-ENOMEM);
@@ -197,6 +194,7 @@ __create_hw_context(struct drm_device *dev,
kref_init(&ctx->ref);
list_add_tail(&ctx->link, &dev_priv->context_list);
+ ctx->i915 = dev_priv;
if (dev_priv->hw_context_size) {
struct drm_i915_gem_object *obj =
@@ -289,6 +287,7 @@ err_unpin:
if (is_global_default_ctx && ctx->legacy_hw_ctx.rcs_state)
i915_gem_object_ggtt_unpin(ctx->legacy_hw_ctx.rcs_state);
err_destroy:
+ idr_remove(&file_priv->context_idr, ctx->user_handle);
i915_gem_context_unreference(ctx);
return ERR_PTR(ret);
}
@@ -409,32 +408,23 @@ void i915_gem_context_fini(struct drm_device *dev)
i915_gem_context_unreference(dctx);
}
-int i915_gem_context_enable(struct drm_i915_private *dev_priv)
+int i915_gem_context_enable(struct drm_i915_gem_request *req)
{
- struct intel_engine_cs *ring;
- int ret, i;
-
- BUG_ON(!dev_priv->ring[RCS].default_context);
+ struct intel_engine_cs *ring = req->ring;
+ int ret;
if (i915.enable_execlists) {
- for_each_ring(ring, dev_priv, i) {
- if (ring->init_context) {
- ret = ring->init_context(ring,
- ring->default_context);
- if (ret) {
- DRM_ERROR("ring init context: %d\n",
- ret);
- return ret;
- }
- }
- }
+ if (ring->init_context == NULL)
+ return 0;
+ ret = ring->init_context(req);
} else
- for_each_ring(ring, dev_priv, i) {
- ret = i915_switch_context(ring, ring->default_context);
- if (ret)
- return ret;
- }
+ ret = i915_switch_context(req);
+
+ if (ret) {
+ DRM_ERROR("ring init context: %d\n", ret);
+ return ret;
+ }
return 0;
}
@@ -487,10 +477,9 @@ i915_gem_context_get(struct drm_i915_file_private *file_priv, u32 id)
}
static inline int
-mi_set_context(struct intel_engine_cs *ring,
- struct intel_context *new_context,
- u32 hw_flags)
+mi_set_context(struct drm_i915_gem_request *req, u32 hw_flags)
{
+ struct intel_engine_cs *ring = req->ring;
u32 flags = hw_flags | MI_MM_SPACE_GTT;
const int num_rings =
/* Use an extended w/a on ivb+ if signalling from other rings */
@@ -505,13 +494,15 @@ mi_set_context(struct intel_engine_cs *ring,
* itlb_before_ctx_switch.
*/
if (IS_GEN6(ring->dev)) {
- ret = ring->flush(ring, I915_GEM_GPU_DOMAINS, 0);
+ ret = ring->flush(req, I915_GEM_GPU_DOMAINS, 0);
if (ret)
return ret;
}
/* These flags are for resource streamer on HSW+ */
- if (!IS_HASWELL(ring->dev) && INTEL_INFO(ring->dev)->gen < 8)
+ if (IS_HASWELL(ring->dev) || INTEL_INFO(ring->dev)->gen >= 8)
+ flags |= (HSW_MI_RS_SAVE_STATE_EN | HSW_MI_RS_RESTORE_STATE_EN);
+ else if (INTEL_INFO(ring->dev)->gen < 8)
flags |= (MI_SAVE_EXT_STATE_EN | MI_RESTORE_EXT_STATE_EN);
@@ -519,7 +510,7 @@ mi_set_context(struct intel_engine_cs *ring,
if (INTEL_INFO(ring->dev)->gen >= 7)
len += 2 + (num_rings ? 4*num_rings + 2 : 0);
- ret = intel_ring_begin(ring, len);
+ ret = intel_ring_begin(req, len);
if (ret)
return ret;
@@ -542,7 +533,7 @@ mi_set_context(struct intel_engine_cs *ring,
intel_ring_emit(ring, MI_NOOP);
intel_ring_emit(ring, MI_SET_CONTEXT);
- intel_ring_emit(ring, i915_gem_obj_ggtt_offset(new_context->legacy_hw_ctx.rcs_state) |
+ intel_ring_emit(ring, i915_gem_obj_ggtt_offset(req->ctx->legacy_hw_ctx.rcs_state) |
flags);
/*
* w/a: MI_SET_CONTEXT must always be followed by MI_NOOP
@@ -623,9 +614,10 @@ needs_pd_load_post(struct intel_engine_cs *ring, struct intel_context *to,
return false;
}
-static int do_switch(struct intel_engine_cs *ring,
- struct intel_context *to)
+static int do_switch(struct drm_i915_gem_request *req)
{
+ struct intel_context *to = req->ctx;
+ struct intel_engine_cs *ring = req->ring;
struct drm_i915_private *dev_priv = ring->dev->dev_private;
struct intel_context *from = ring->last_context;
u32 hw_flags = 0;
@@ -661,7 +653,7 @@ static int do_switch(struct intel_engine_cs *ring,
* Register Immediate commands in Ring Buffer before submitting
* a context."*/
trace_switch_mm(ring, to);
- ret = to->ppgtt->switch_mm(to->ppgtt, ring);
+ ret = to->ppgtt->switch_mm(to->ppgtt, req);
if (ret)
goto unpin_out;
@@ -703,7 +695,7 @@ static int do_switch(struct intel_engine_cs *ring,
WARN_ON(needs_pd_load_pre(ring, to) &&
needs_pd_load_post(ring, to, hw_flags));
- ret = mi_set_context(ring, to, hw_flags);
+ ret = mi_set_context(req, hw_flags);
if (ret)
goto unpin_out;
@@ -712,7 +704,7 @@ static int do_switch(struct intel_engine_cs *ring,
*/
if (needs_pd_load_post(ring, to, hw_flags)) {
trace_switch_mm(ring, to);
- ret = to->ppgtt->switch_mm(to->ppgtt, ring);
+ ret = to->ppgtt->switch_mm(to->ppgtt, req);
/* The hardware context switch is emitted, but we haven't
* actually changed the state - so it's probably safe to bail
* here. Still, let the user know something dangerous has
@@ -728,7 +720,7 @@ static int do_switch(struct intel_engine_cs *ring,
if (!(to->remap_slice & (1<<i)))
continue;
- ret = i915_gem_l3_remap(ring, i);
+ ret = i915_gem_l3_remap(req, i);
/* If it failed, try again next round */
if (ret)
DRM_DEBUG_DRIVER("L3 remapping failed\n");
@@ -744,7 +736,7 @@ static int do_switch(struct intel_engine_cs *ring,
*/
if (from != NULL) {
from->legacy_hw_ctx.rcs_state->base.read_domains = I915_GEM_DOMAIN_INSTRUCTION;
- i915_vma_move_to_active(i915_gem_obj_to_ggtt(from->legacy_hw_ctx.rcs_state), ring);
+ i915_vma_move_to_active(i915_gem_obj_to_ggtt(from->legacy_hw_ctx.rcs_state), req);
/* As long as MI_SET_CONTEXT is serializing, ie. it flushes the
* whole damn pipeline, we don't need to explicitly mark the
* object dirty. The only exception is that the context must be
@@ -768,7 +760,7 @@ done:
if (uninitialized) {
if (ring->init_context) {
- ret = ring->init_context(ring, to);
+ ret = ring->init_context(req);
if (ret)
DRM_ERROR("ring init context: %d\n", ret);
}
@@ -784,8 +776,7 @@ unpin_out:
/**
* i915_switch_context() - perform a GPU context switch.
- * @ring: ring for which we'll execute the context switch
- * @to: the context to switch to
+ * @req: request for which we'll execute the context switch
*
* The context life cycle is simple. The context refcount is incremented and
* decremented by 1 and create and destroy. If the context is in use by the GPU,
@@ -796,25 +787,25 @@ unpin_out:
* switched by writing to the ELSP and requests keep a reference to their
* context.
*/
-int i915_switch_context(struct intel_engine_cs *ring,
- struct intel_context *to)
+int i915_switch_context(struct drm_i915_gem_request *req)
{
+ struct intel_engine_cs *ring = req->ring;
struct drm_i915_private *dev_priv = ring->dev->dev_private;
WARN_ON(i915.enable_execlists);
WARN_ON(!mutex_is_locked(&dev_priv->dev->struct_mutex));
- if (to->legacy_hw_ctx.rcs_state == NULL) { /* We have the fake context */
- if (to != ring->last_context) {
- i915_gem_context_reference(to);
+ if (req->ctx->legacy_hw_ctx.rcs_state == NULL) { /* We have the fake context */
+ if (req->ctx != ring->last_context) {
+ i915_gem_context_reference(req->ctx);
if (ring->last_context)
i915_gem_context_unreference(ring->last_context);
- ring->last_context = to;
+ ring->last_context = req->ctx;
}
return 0;
}
- return do_switch(ring, to);
+ return do_switch(req);
}
static bool contexts_enabled(struct drm_device *dev)
@@ -900,6 +891,9 @@ int i915_gem_context_getparam_ioctl(struct drm_device *dev, void *data,
case I915_CONTEXT_PARAM_BAN_PERIOD:
args->value = ctx->hang_stats.ban_period_seconds;
break;
+ case I915_CONTEXT_PARAM_NO_ZEROMAP:
+ args->value = ctx->flags & CONTEXT_NO_ZEROMAP;
+ break;
default:
ret = -EINVAL;
break;
@@ -937,6 +931,14 @@ int i915_gem_context_setparam_ioctl(struct drm_device *dev, void *data,
else
ctx->hang_stats.ban_period_seconds = args->value;
break;
+ case I915_CONTEXT_PARAM_NO_ZEROMAP:
+ if (args->size) {
+ ret = -EINVAL;
+ } else {
+ ctx->flags &= ~CONTEXT_NO_ZEROMAP;
+ ctx->flags |= args->value ? CONTEXT_NO_ZEROMAP : 0;
+ }
+ break;
default:
ret = -EINVAL;
break;
diff --git a/drivers/gpu/drm/i915/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/i915_gem_dmabuf.c
index 7998da27c500..e9c2bfd85b52 100644
--- a/drivers/gpu/drm/i915/i915_gem_dmabuf.c
+++ b/drivers/gpu/drm/i915/i915_gem_dmabuf.c
@@ -256,7 +256,6 @@ static int i915_gem_object_get_pages_dmabuf(struct drm_i915_gem_object *obj)
return PTR_ERR(sg);
obj->pages = sg;
- obj->has_dma_mapping = true;
return 0;
}
@@ -264,7 +263,6 @@ static void i915_gem_object_put_pages_dmabuf(struct drm_i915_gem_object *obj)
{
dma_buf_unmap_attachment(obj->base.import_attach,
obj->pages, DMA_BIDIRECTIONAL);
- obj->has_dma_mapping = false;
}
static const struct drm_i915_gem_object_ops i915_gem_object_dmabuf_ops = {
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index a7fa14516cda..923a3c4bf0b7 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -677,6 +677,7 @@ eb_vma_misplaced(struct i915_vma *vma)
static int
i915_gem_execbuffer_reserve(struct intel_engine_cs *ring,
struct list_head *vmas,
+ struct intel_context *ctx,
bool *need_relocs)
{
struct drm_i915_gem_object *obj;
@@ -699,6 +700,9 @@ i915_gem_execbuffer_reserve(struct intel_engine_cs *ring,
obj = vma->obj;
entry = vma->exec_entry;
+ if (ctx->flags & CONTEXT_NO_ZEROMAP)
+ entry->flags |= __EXEC_OBJECT_NEEDS_BIAS;
+
if (!has_fenced_gpu_access)
entry->flags &= ~EXEC_OBJECT_NEEDS_FENCE;
need_fence =
@@ -776,7 +780,8 @@ i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
struct drm_file *file,
struct intel_engine_cs *ring,
struct eb_vmas *eb,
- struct drm_i915_gem_exec_object2 *exec)
+ struct drm_i915_gem_exec_object2 *exec,
+ struct intel_context *ctx)
{
struct drm_i915_gem_relocation_entry *reloc;
struct i915_address_space *vm;
@@ -862,7 +867,7 @@ i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
goto err;
need_relocs = (args->flags & I915_EXEC_NO_RELOC) == 0;
- ret = i915_gem_execbuffer_reserve(ring, &eb->vmas, &need_relocs);
+ ret = i915_gem_execbuffer_reserve(ring, &eb->vmas, ctx, &need_relocs);
if (ret)
goto err;
@@ -887,10 +892,10 @@ err:
}
static int
-i915_gem_execbuffer_move_to_gpu(struct intel_engine_cs *ring,
+i915_gem_execbuffer_move_to_gpu(struct drm_i915_gem_request *req,
struct list_head *vmas)
{
- const unsigned other_rings = ~intel_ring_flag(ring);
+ const unsigned other_rings = ~intel_ring_flag(req->ring);
struct i915_vma *vma;
uint32_t flush_domains = 0;
bool flush_chipset = false;
@@ -900,7 +905,7 @@ i915_gem_execbuffer_move_to_gpu(struct intel_engine_cs *ring,
struct drm_i915_gem_object *obj = vma->obj;
if (obj->active & other_rings) {
- ret = i915_gem_object_sync(obj, ring);
+ ret = i915_gem_object_sync(obj, req->ring, &req);
if (ret)
return ret;
}
@@ -912,7 +917,7 @@ i915_gem_execbuffer_move_to_gpu(struct intel_engine_cs *ring,
}
if (flush_chipset)
- i915_gem_chipset_flush(ring->dev);
+ i915_gem_chipset_flush(req->ring->dev);
if (flush_domains & I915_GEM_DOMAIN_GTT)
wmb();
@@ -920,7 +925,7 @@ i915_gem_execbuffer_move_to_gpu(struct intel_engine_cs *ring,
/* Unconditionally invalidate gpu caches and ensure that we do flush
* any residual writes from the previous batch.
*/
- return intel_ring_invalidate_all_caches(ring);
+ return intel_ring_invalidate_all_caches(req);
}
static bool
@@ -953,6 +958,9 @@ validate_exec_list(struct drm_device *dev,
if (exec[i].flags & invalid_flags)
return -EINVAL;
+ if (exec[i].alignment && !is_power_of_2(exec[i].alignment))
+ return -EINVAL;
+
/* First check for malicious input causing overflow in
* the worst case where we need to allocate the entire
* relocation tree as a single array.
@@ -1013,9 +1021,9 @@ i915_gem_validate_context(struct drm_device *dev, struct drm_file *file,
void
i915_gem_execbuffer_move_to_active(struct list_head *vmas,
- struct intel_engine_cs *ring)
+ struct drm_i915_gem_request *req)
{
- struct drm_i915_gem_request *req = intel_ring_get_request(ring);
+ struct intel_engine_cs *ring = i915_gem_request_get_ring(req);
struct i915_vma *vma;
list_for_each_entry(vma, vmas, exec_list) {
@@ -1029,12 +1037,12 @@ i915_gem_execbuffer_move_to_active(struct list_head *vmas,
obj->base.pending_read_domains |= obj->base.read_domains;
obj->base.read_domains = obj->base.pending_read_domains;
- i915_vma_move_to_active(vma, ring);
+ i915_vma_move_to_active(vma, req);
if (obj->base.write_domain) {
obj->dirty = 1;
i915_gem_request_assign(&obj->last_write_req, req);
- intel_fb_obj_invalidate(obj, ring, ORIGIN_CS);
+ intel_fb_obj_invalidate(obj, ORIGIN_CS);
/* update for the implicit flush after a batch */
obj->base.write_domain &= ~I915_GEM_GPU_DOMAINS;
@@ -1053,22 +1061,20 @@ i915_gem_execbuffer_move_to_active(struct list_head *vmas,
}
void
-i915_gem_execbuffer_retire_commands(struct drm_device *dev,
- struct drm_file *file,
- struct intel_engine_cs *ring,
- struct drm_i915_gem_object *obj)
+i915_gem_execbuffer_retire_commands(struct i915_execbuffer_params *params)
{
/* Unconditionally force add_request to emit a full flush. */
- ring->gpu_caches_dirty = true;
+ params->ring->gpu_caches_dirty = true;
/* Add a breadcrumb for the completion of the batch buffer */
- (void)__i915_add_request(ring, file, obj);
+ __i915_add_request(params->request, params->batch_obj, true);
}
static int
i915_reset_gen7_sol_offsets(struct drm_device *dev,
- struct intel_engine_cs *ring)
+ struct drm_i915_gem_request *req)
{
+ struct intel_engine_cs *ring = req->ring;
struct drm_i915_private *dev_priv = dev->dev_private;
int ret, i;
@@ -1077,7 +1083,7 @@ i915_reset_gen7_sol_offsets(struct drm_device *dev,
return -EINVAL;
}
- ret = intel_ring_begin(ring, 4 * 3);
+ ret = intel_ring_begin(req, 4 * 3);
if (ret)
return ret;
@@ -1093,10 +1099,11 @@ i915_reset_gen7_sol_offsets(struct drm_device *dev,
}
static int
-i915_emit_box(struct intel_engine_cs *ring,
+i915_emit_box(struct drm_i915_gem_request *req,
struct drm_clip_rect *box,
int DR1, int DR4)
{
+ struct intel_engine_cs *ring = req->ring;
int ret;
if (box->y2 <= box->y1 || box->x2 <= box->x1 ||
@@ -1107,7 +1114,7 @@ i915_emit_box(struct intel_engine_cs *ring,
}
if (INTEL_INFO(ring->dev)->gen >= 4) {
- ret = intel_ring_begin(ring, 4);
+ ret = intel_ring_begin(req, 4);
if (ret)
return ret;
@@ -1116,7 +1123,7 @@ i915_emit_box(struct intel_engine_cs *ring,
intel_ring_emit(ring, ((box->x2 - 1) & 0xffff) | (box->y2 - 1) << 16);
intel_ring_emit(ring, DR4);
} else {
- ret = intel_ring_begin(ring, 6);
+ ret = intel_ring_begin(req, 6);
if (ret)
return ret;
@@ -1186,17 +1193,15 @@ err:
}
int
-i915_gem_ringbuffer_submission(struct drm_device *dev, struct drm_file *file,
- struct intel_engine_cs *ring,
- struct intel_context *ctx,
+i915_gem_ringbuffer_submission(struct i915_execbuffer_params *params,
struct drm_i915_gem_execbuffer2 *args,
- struct list_head *vmas,
- struct drm_i915_gem_object *batch_obj,
- u64 exec_start, u32 dispatch_flags)
+ struct list_head *vmas)
{
struct drm_clip_rect *cliprects = NULL;
+ struct drm_device *dev = params->dev;
+ struct intel_engine_cs *ring = params->ring;
struct drm_i915_private *dev_priv = dev->dev_private;
- u64 exec_len;
+ u64 exec_start, exec_len;
int instp_mode;
u32 instp_mask;
int i, ret = 0;
@@ -1244,15 +1249,15 @@ i915_gem_ringbuffer_submission(struct drm_device *dev, struct drm_file *file,
}
}
- ret = i915_gem_execbuffer_move_to_gpu(ring, vmas);
+ ret = i915_gem_execbuffer_move_to_gpu(params->request, vmas);
if (ret)
goto error;
- ret = i915_switch_context(ring, ctx);
+ ret = i915_switch_context(params->request);
if (ret)
goto error;
- WARN(ctx->ppgtt && ctx->ppgtt->pd_dirty_rings & (1<<ring->id),
+ WARN(params->ctx->ppgtt && params->ctx->ppgtt->pd_dirty_rings & (1<<ring->id),
"%s didn't clear reload\n", ring->name);
instp_mode = args->flags & I915_EXEC_CONSTANTS_MASK;
@@ -1294,7 +1299,7 @@ i915_gem_ringbuffer_submission(struct drm_device *dev, struct drm_file *file,
if (ring == &dev_priv->ring[RCS] &&
instp_mode != dev_priv->relative_constants_mode) {
- ret = intel_ring_begin(ring, 4);
+ ret = intel_ring_begin(params->request, 4);
if (ret)
goto error;
@@ -1308,37 +1313,40 @@ i915_gem_ringbuffer_submission(struct drm_device *dev, struct drm_file *file,
}
if (args->flags & I915_EXEC_GEN7_SOL_RESET) {
- ret = i915_reset_gen7_sol_offsets(dev, ring);
+ ret = i915_reset_gen7_sol_offsets(dev, params->request);
if (ret)
goto error;
}
- exec_len = args->batch_len;
+ exec_len = args->batch_len;
+ exec_start = params->batch_obj_vm_offset +
+ params->args_batch_start_offset;
+
if (cliprects) {
for (i = 0; i < args->num_cliprects; i++) {
- ret = i915_emit_box(ring, &cliprects[i],
+ ret = i915_emit_box(params->request, &cliprects[i],
args->DR1, args->DR4);
if (ret)
goto error;
- ret = ring->dispatch_execbuffer(ring,
+ ret = ring->dispatch_execbuffer(params->request,
exec_start, exec_len,
- dispatch_flags);
+ params->dispatch_flags);
if (ret)
goto error;
}
} else {
- ret = ring->dispatch_execbuffer(ring,
+ ret = ring->dispatch_execbuffer(params->request,
exec_start, exec_len,
- dispatch_flags);
+ params->dispatch_flags);
if (ret)
return ret;
}
- trace_i915_gem_ring_dispatch(intel_ring_get_request(ring), dispatch_flags);
+ trace_i915_gem_ring_dispatch(params->request, params->dispatch_flags);
- i915_gem_execbuffer_move_to_active(vmas, ring);
- i915_gem_execbuffer_retire_commands(dev, file, ring, batch_obj);
+ i915_gem_execbuffer_move_to_active(vmas, params->request);
+ i915_gem_execbuffer_retire_commands(params);
error:
kfree(cliprects);
@@ -1408,8 +1416,9 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
struct intel_engine_cs *ring;
struct intel_context *ctx;
struct i915_address_space *vm;
+ struct i915_execbuffer_params params_master; /* XXX: will be removed later */
+ struct i915_execbuffer_params *params = &params_master;
const u32 ctx_id = i915_execbuffer2_get_context_id(*args);
- u64 exec_start = args->batch_start_offset;
u32 dispatch_flags;
int ret;
bool need_relocs;
@@ -1482,6 +1491,20 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
return -EINVAL;
}
+ if (args->flags & I915_EXEC_RESOURCE_STREAMER) {
+ if (!HAS_RESOURCE_STREAMER(dev)) {
+ DRM_DEBUG("RS is only allowed for Haswell, Gen8 and above\n");
+ return -EINVAL;
+ }
+ if (ring->id != RCS) {
+ DRM_DEBUG("RS is not available on %s\n",
+ ring->name);
+ return -EINVAL;
+ }
+
+ dispatch_flags |= I915_DISPATCH_RS;
+ }
+
intel_runtime_pm_get(dev_priv);
ret = i915_mutex_lock_interruptible(dev);
@@ -1502,6 +1525,8 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
else
vm = &dev_priv->gtt.base;
+ memset(&params_master, 0x00, sizeof(params_master));
+
eb = eb_create(args);
if (eb == NULL) {
i915_gem_context_unreference(ctx);
@@ -1520,7 +1545,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
/* Move the objects en-masse into the GTT, evicting if necessary. */
need_relocs = (args->flags & I915_EXEC_NO_RELOC) == 0;
- ret = i915_gem_execbuffer_reserve(ring, &eb->vmas, &need_relocs);
+ ret = i915_gem_execbuffer_reserve(ring, &eb->vmas, ctx, &need_relocs);
if (ret)
goto err;
@@ -1530,7 +1555,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
if (ret) {
if (ret == -EFAULT) {
ret = i915_gem_execbuffer_relocate_slow(dev, args, file, ring,
- eb, exec);
+ eb, exec, ctx);
BUG_ON(!mutex_is_locked(&dev->struct_mutex));
}
if (ret)
@@ -1544,6 +1569,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
goto err;
}
+ params->args_batch_start_offset = args->batch_start_offset;
if (i915_needs_cmd_parser(ring) && args->batch_len) {
struct drm_i915_gem_object *parsed_batch_obj;
@@ -1575,7 +1601,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
* command parser has accepted.
*/
dispatch_flags |= I915_DISPATCH_SECURE;
- exec_start = 0;
+ params->args_batch_start_offset = 0;
batch_obj = parsed_batch_obj;
}
}
@@ -1600,15 +1626,36 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
if (ret)
goto err;
- exec_start += i915_gem_obj_ggtt_offset(batch_obj);
+ params->batch_obj_vm_offset = i915_gem_obj_ggtt_offset(batch_obj);
} else
- exec_start += i915_gem_obj_offset(batch_obj, vm);
+ params->batch_obj_vm_offset = i915_gem_obj_offset(batch_obj, vm);
- ret = dev_priv->gt.execbuf_submit(dev, file, ring, ctx, args,
- &eb->vmas, batch_obj, exec_start,
- dispatch_flags);
+ /* Allocate a request for this batch buffer nice and early. */
+ ret = i915_gem_request_alloc(ring, ctx, &params->request);
+ if (ret)
+ goto err_batch_unpin;
+
+ ret = i915_gem_request_add_to_client(params->request, file);
+ if (ret)
+ goto err_batch_unpin;
/*
+ * Save assorted stuff away to pass through to *_submission().
+ * NB: This data should be 'persistent' and not local as it will
+ * kept around beyond the duration of the IOCTL once the GPU
+ * scheduler arrives.
+ */
+ params->dev = dev;
+ params->file = file;
+ params->ring = ring;
+ params->dispatch_flags = dispatch_flags;
+ params->batch_obj = batch_obj;
+ params->ctx = ctx;
+
+ ret = dev_priv->gt.execbuf_submit(params, args, &eb->vmas);
+
+err_batch_unpin:
+ /*
* FIXME: We crucially rely upon the active tracking for the (ppgtt)
* batch vma for correctness. For less ugly and less fragility this
* needs to be adjusted to also track the ggtt batch vma properly as
@@ -1616,11 +1663,20 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
*/
if (dispatch_flags & I915_DISPATCH_SECURE)
i915_gem_object_ggtt_unpin(batch_obj);
+
err:
/* the request owns the ref now */
i915_gem_context_unreference(ctx);
eb_destroy(eb);
+ /*
+ * If the request was created but not successfully submitted then it
+ * must be freed again. If it was submitted then it is being tracked
+ * on the active request list and no clean up is required here.
+ */
+ if (ret && params->request)
+ i915_gem_request_cancel(params->request);
+
mutex_unlock(&dev->struct_mutex);
pre_mutex_err:
diff --git a/drivers/gpu/drm/i915/i915_gem_fence.c b/drivers/gpu/drm/i915/i915_gem_fence.c
new file mode 100644
index 000000000000..af1f8c461060
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_gem_fence.c
@@ -0,0 +1,787 @@
+/*
+ * Copyright © 2008-2015 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include <drm/drmP.h>
+#include <drm/i915_drm.h>
+#include "i915_drv.h"
+
+/**
+ * DOC: fence register handling
+ *
+ * Important to avoid confusions: "fences" in the i915 driver are not execution
+ * fences used to track command completion but hardware detiler objects which
+ * wrap a given range of the global GTT. Each platform has only a fairly limited
+ * set of these objects.
+ *
+ * Fences are used to detile GTT memory mappings. They're also connected to the
+ * hardware frontbuffer render tracking and hence interract with frontbuffer
+ * conmpression. Furthermore on older platforms fences are required for tiled
+ * objects used by the display engine. They can also be used by the render
+ * engine - they're required for blitter commands and are optional for render
+ * commands. But on gen4+ both display (with the exception of fbc) and rendering
+ * have their own tiling state bits and don't need fences.
+ *
+ * Also note that fences only support X and Y tiling and hence can't be used for
+ * the fancier new tiling formats like W, Ys and Yf.
+ *
+ * Finally note that because fences are such a restricted resource they're
+ * dynamically associated with objects. Furthermore fence state is committed to
+ * the hardware lazily to avoid unecessary stalls on gen2/3. Therefore code must
+ * explictly call i915_gem_object_get_fence() to synchronize fencing status
+ * for cpu access. Also note that some code wants an unfenced view, for those
+ * cases the fence can be removed forcefully with i915_gem_object_put_fence().
+ *
+ * Internally these functions will synchronize with userspace access by removing
+ * CPU ptes into GTT mmaps (not the GTT ptes themselves) as needed.
+ */
+
+static void i965_write_fence_reg(struct drm_device *dev, int reg,
+ struct drm_i915_gem_object *obj)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int fence_reg;
+ int fence_pitch_shift;
+
+ if (INTEL_INFO(dev)->gen >= 6) {
+ fence_reg = FENCE_REG_SANDYBRIDGE_0;
+ fence_pitch_shift = SANDYBRIDGE_FENCE_PITCH_SHIFT;
+ } else {
+ fence_reg = FENCE_REG_965_0;
+ fence_pitch_shift = I965_FENCE_PITCH_SHIFT;
+ }
+
+ fence_reg += reg * 8;
+
+ /* To w/a incoherency with non-atomic 64-bit register updates,
+ * we split the 64-bit update into two 32-bit writes. In order
+ * for a partial fence not to be evaluated between writes, we
+ * precede the update with write to turn off the fence register,
+ * and only enable the fence as the last step.
+ *
+ * For extra levels of paranoia, we make sure each step lands
+ * before applying the next step.
+ */
+ I915_WRITE(fence_reg, 0);
+ POSTING_READ(fence_reg);
+
+ if (obj) {
+ u32 size = i915_gem_obj_ggtt_size(obj);
+ uint64_t val;
+
+ /* Adjust fence size to match tiled area */
+ if (obj->tiling_mode != I915_TILING_NONE) {
+ uint32_t row_size = obj->stride *
+ (obj->tiling_mode == I915_TILING_Y ? 32 : 8);
+ size = (size / row_size) * row_size;
+ }
+
+ val = (uint64_t)((i915_gem_obj_ggtt_offset(obj) + size - 4096) &
+ 0xfffff000) << 32;
+ val |= i915_gem_obj_ggtt_offset(obj) & 0xfffff000;
+ val |= (uint64_t)((obj->stride / 128) - 1) << fence_pitch_shift;
+ if (obj->tiling_mode == I915_TILING_Y)
+ val |= 1 << I965_FENCE_TILING_Y_SHIFT;
+ val |= I965_FENCE_REG_VALID;
+
+ I915_WRITE(fence_reg + 4, val >> 32);
+ POSTING_READ(fence_reg + 4);
+
+ I915_WRITE(fence_reg + 0, val);
+ POSTING_READ(fence_reg);
+ } else {
+ I915_WRITE(fence_reg + 4, 0);
+ POSTING_READ(fence_reg + 4);
+ }
+}
+
+static void i915_write_fence_reg(struct drm_device *dev, int reg,
+ struct drm_i915_gem_object *obj)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 val;
+
+ if (obj) {
+ u32 size = i915_gem_obj_ggtt_size(obj);
+ int pitch_val;
+ int tile_width;
+
+ WARN((i915_gem_obj_ggtt_offset(obj) & ~I915_FENCE_START_MASK) ||
+ (size & -size) != size ||
+ (i915_gem_obj_ggtt_offset(obj) & (size - 1)),
+ "object 0x%08lx [fenceable? %d] not 1M or pot-size (0x%08x) aligned\n",
+ i915_gem_obj_ggtt_offset(obj), obj->map_and_fenceable, size);
+
+ if (obj->tiling_mode == I915_TILING_Y && HAS_128_BYTE_Y_TILING(dev))
+ tile_width = 128;
+ else
+ tile_width = 512;
+
+ /* Note: pitch better be a power of two tile widths */
+ pitch_val = obj->stride / tile_width;
+ pitch_val = ffs(pitch_val) - 1;
+
+ val = i915_gem_obj_ggtt_offset(obj);
+ if (obj->tiling_mode == I915_TILING_Y)
+ val |= 1 << I830_FENCE_TILING_Y_SHIFT;
+ val |= I915_FENCE_SIZE_BITS(size);
+ val |= pitch_val << I830_FENCE_PITCH_SHIFT;
+ val |= I830_FENCE_REG_VALID;
+ } else
+ val = 0;
+
+ if (reg < 8)
+ reg = FENCE_REG_830_0 + reg * 4;
+ else
+ reg = FENCE_REG_945_8 + (reg - 8) * 4;
+
+ I915_WRITE(reg, val);
+ POSTING_READ(reg);
+}
+
+static void i830_write_fence_reg(struct drm_device *dev, int reg,
+ struct drm_i915_gem_object *obj)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ uint32_t val;
+
+ if (obj) {
+ u32 size = i915_gem_obj_ggtt_size(obj);
+ uint32_t pitch_val;
+
+ WARN((i915_gem_obj_ggtt_offset(obj) & ~I830_FENCE_START_MASK) ||
+ (size & -size) != size ||
+ (i915_gem_obj_ggtt_offset(obj) & (size - 1)),
+ "object 0x%08lx not 512K or pot-size 0x%08x aligned\n",
+ i915_gem_obj_ggtt_offset(obj), size);
+
+ pitch_val = obj->stride / 128;
+ pitch_val = ffs(pitch_val) - 1;
+
+ val = i915_gem_obj_ggtt_offset(obj);
+ if (obj->tiling_mode == I915_TILING_Y)
+ val |= 1 << I830_FENCE_TILING_Y_SHIFT;
+ val |= I830_FENCE_SIZE_BITS(size);
+ val |= pitch_val << I830_FENCE_PITCH_SHIFT;
+ val |= I830_FENCE_REG_VALID;
+ } else
+ val = 0;
+
+ I915_WRITE(FENCE_REG_830_0 + reg * 4, val);
+ POSTING_READ(FENCE_REG_830_0 + reg * 4);
+}
+
+inline static bool i915_gem_object_needs_mb(struct drm_i915_gem_object *obj)
+{
+ return obj && obj->base.read_domains & I915_GEM_DOMAIN_GTT;
+}
+
+static void i915_gem_write_fence(struct drm_device *dev, int reg,
+ struct drm_i915_gem_object *obj)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ /* Ensure that all CPU reads are completed before installing a fence
+ * and all writes before removing the fence.
+ */
+ if (i915_gem_object_needs_mb(dev_priv->fence_regs[reg].obj))
+ mb();
+
+ WARN(obj && (!obj->stride || !obj->tiling_mode),
+ "bogus fence setup with stride: 0x%x, tiling mode: %i\n",
+ obj->stride, obj->tiling_mode);
+
+ if (IS_GEN2(dev))
+ i830_write_fence_reg(dev, reg, obj);
+ else if (IS_GEN3(dev))
+ i915_write_fence_reg(dev, reg, obj);
+ else if (INTEL_INFO(dev)->gen >= 4)
+ i965_write_fence_reg(dev, reg, obj);
+
+ /* And similarly be paranoid that no direct access to this region
+ * is reordered to before the fence is installed.
+ */
+ if (i915_gem_object_needs_mb(obj))
+ mb();
+}
+
+static inline int fence_number(struct drm_i915_private *dev_priv,
+ struct drm_i915_fence_reg *fence)
+{
+ return fence - dev_priv->fence_regs;
+}
+
+static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj,
+ struct drm_i915_fence_reg *fence,
+ bool enable)
+{
+ struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
+ int reg = fence_number(dev_priv, fence);
+
+ i915_gem_write_fence(obj->base.dev, reg, enable ? obj : NULL);
+
+ if (enable) {
+ obj->fence_reg = reg;
+ fence->obj = obj;
+ list_move_tail(&fence->lru_list, &dev_priv->mm.fence_list);
+ } else {
+ obj->fence_reg = I915_FENCE_REG_NONE;
+ fence->obj = NULL;
+ list_del_init(&fence->lru_list);
+ }
+ obj->fence_dirty = false;
+}
+
+static inline void i915_gem_object_fence_lost(struct drm_i915_gem_object *obj)
+{
+ if (obj->tiling_mode)
+ i915_gem_release_mmap(obj);
+
+ /* As we do not have an associated fence register, we will force
+ * a tiling change if we ever need to acquire one.
+ */
+ obj->fence_dirty = false;
+ obj->fence_reg = I915_FENCE_REG_NONE;
+}
+
+static int
+i915_gem_object_wait_fence(struct drm_i915_gem_object *obj)
+{
+ if (obj->last_fenced_req) {
+ int ret = i915_wait_request(obj->last_fenced_req);
+ if (ret)
+ return ret;
+
+ i915_gem_request_assign(&obj->last_fenced_req, NULL);
+ }
+
+ return 0;
+}
+
+/**
+ * i915_gem_object_put_fence - force-remove fence for an object
+ * @obj: object to map through a fence reg
+ *
+ * This function force-removes any fence from the given object, which is useful
+ * if the kernel wants to do untiled GTT access.
+ *
+ * Returns:
+ *
+ * 0 on success, negative error code on failure.
+ */
+int
+i915_gem_object_put_fence(struct drm_i915_gem_object *obj)
+{
+ struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
+ struct drm_i915_fence_reg *fence;
+ int ret;
+
+ ret = i915_gem_object_wait_fence(obj);
+ if (ret)
+ return ret;
+
+ if (obj->fence_reg == I915_FENCE_REG_NONE)
+ return 0;
+
+ fence = &dev_priv->fence_regs[obj->fence_reg];
+
+ if (WARN_ON(fence->pin_count))
+ return -EBUSY;
+
+ i915_gem_object_fence_lost(obj);
+ i915_gem_object_update_fence(obj, fence, false);
+
+ return 0;
+}
+
+static struct drm_i915_fence_reg *
+i915_find_fence_reg(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_fence_reg *reg, *avail;
+ int i;
+
+ /* First try to find a free reg */
+ avail = NULL;
+ for (i = dev_priv->fence_reg_start; i < dev_priv->num_fence_regs; i++) {
+ reg = &dev_priv->fence_regs[i];
+ if (!reg->obj)
+ return reg;
+
+ if (!reg->pin_count)
+ avail = reg;
+ }
+
+ if (avail == NULL)
+ goto deadlock;
+
+ /* None available, try to steal one or wait for a user to finish */
+ list_for_each_entry(reg, &dev_priv->mm.fence_list, lru_list) {
+ if (reg->pin_count)
+ continue;
+
+ return reg;
+ }
+
+deadlock:
+ /* Wait for completion of pending flips which consume fences */
+ if (intel_has_pending_fb_unpin(dev))
+ return ERR_PTR(-EAGAIN);
+
+ return ERR_PTR(-EDEADLK);
+}
+
+/**
+ * i915_gem_object_get_fence - set up fencing for an object
+ * @obj: object to map through a fence reg
+ *
+ * When mapping objects through the GTT, userspace wants to be able to write
+ * to them without having to worry about swizzling if the object is tiled.
+ * This function walks the fence regs looking for a free one for @obj,
+ * stealing one if it can't find any.
+ *
+ * It then sets up the reg based on the object's properties: address, pitch
+ * and tiling format.
+ *
+ * For an untiled surface, this removes any existing fence.
+ *
+ * Returns:
+ *
+ * 0 on success, negative error code on failure.
+ */
+int
+i915_gem_object_get_fence(struct drm_i915_gem_object *obj)
+{
+ struct drm_device *dev = obj->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ bool enable = obj->tiling_mode != I915_TILING_NONE;
+ struct drm_i915_fence_reg *reg;
+ int ret;
+
+ /* Have we updated the tiling parameters upon the object and so
+ * will need to serialise the write to the associated fence register?
+ */
+ if (obj->fence_dirty) {
+ ret = i915_gem_object_wait_fence(obj);
+ if (ret)
+ return ret;
+ }
+
+ /* Just update our place in the LRU if our fence is getting reused. */
+ if (obj->fence_reg != I915_FENCE_REG_NONE) {
+ reg = &dev_priv->fence_regs[obj->fence_reg];
+ if (!obj->fence_dirty) {
+ list_move_tail(&reg->lru_list,
+ &dev_priv->mm.fence_list);
+ return 0;
+ }
+ } else if (enable) {
+ if (WARN_ON(!obj->map_and_fenceable))
+ return -EINVAL;
+
+ reg = i915_find_fence_reg(dev);
+ if (IS_ERR(reg))
+ return PTR_ERR(reg);
+
+ if (reg->obj) {
+ struct drm_i915_gem_object *old = reg->obj;
+
+ ret = i915_gem_object_wait_fence(old);
+ if (ret)
+ return ret;
+
+ i915_gem_object_fence_lost(old);
+ }
+ } else
+ return 0;
+
+ i915_gem_object_update_fence(obj, reg, enable);
+
+ return 0;
+}
+
+/**
+ * i915_gem_object_pin_fence - pin fencing state
+ * @obj: object to pin fencing for
+ *
+ * This pins the fencing state (whether tiled or untiled) to make sure the
+ * object is ready to be used as a scanout target. Fencing status must be
+ * synchronize first by calling i915_gem_object_get_fence():
+ *
+ * The resulting fence pin reference must be released again with
+ * i915_gem_object_unpin_fence().
+ *
+ * Returns:
+ *
+ * True if the object has a fence, false otherwise.
+ */
+bool
+i915_gem_object_pin_fence(struct drm_i915_gem_object *obj)
+{
+ if (obj->fence_reg != I915_FENCE_REG_NONE) {
+ struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
+ struct i915_vma *ggtt_vma = i915_gem_obj_to_ggtt(obj);
+
+ WARN_ON(!ggtt_vma ||
+ dev_priv->fence_regs[obj->fence_reg].pin_count >
+ ggtt_vma->pin_count);
+ dev_priv->fence_regs[obj->fence_reg].pin_count++;
+ return true;
+ } else
+ return false;
+}
+
+/**
+ * i915_gem_object_unpin_fence - unpin fencing state
+ * @obj: object to unpin fencing for
+ *
+ * This releases the fence pin reference acquired through
+ * i915_gem_object_pin_fence. It will handle both objects with and without an
+ * attached fence correctly, callers do not need to distinguish this.
+ */
+void
+i915_gem_object_unpin_fence(struct drm_i915_gem_object *obj)
+{
+ if (obj->fence_reg != I915_FENCE_REG_NONE) {
+ struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
+ WARN_ON(dev_priv->fence_regs[obj->fence_reg].pin_count <= 0);
+ dev_priv->fence_regs[obj->fence_reg].pin_count--;
+ }
+}
+
+/**
+ * i915_gem_restore_fences - restore fence state
+ * @dev: DRM device
+ *
+ * Restore the hw fence state to match the software tracking again, to be called
+ * after a gpu reset and on resume.
+ */
+void i915_gem_restore_fences(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int i;
+
+ for (i = 0; i < dev_priv->num_fence_regs; i++) {
+ struct drm_i915_fence_reg *reg = &dev_priv->fence_regs[i];
+
+ /*
+ * Commit delayed tiling changes if we have an object still
+ * attached to the fence, otherwise just clear the fence.
+ */
+ if (reg->obj) {
+ i915_gem_object_update_fence(reg->obj, reg,
+ reg->obj->tiling_mode);
+ } else {
+ i915_gem_write_fence(dev, i, NULL);
+ }
+ }
+}
+
+/**
+ * DOC: tiling swizzling details
+ *
+ * The idea behind tiling is to increase cache hit rates by rearranging
+ * pixel data so that a group of pixel accesses are in the same cacheline.
+ * Performance improvement from doing this on the back/depth buffer are on
+ * the order of 30%.
+ *
+ * Intel architectures make this somewhat more complicated, though, by
+ * adjustments made to addressing of data when the memory is in interleaved
+ * mode (matched pairs of DIMMS) to improve memory bandwidth.
+ * For interleaved memory, the CPU sends every sequential 64 bytes
+ * to an alternate memory channel so it can get the bandwidth from both.
+ *
+ * The GPU also rearranges its accesses for increased bandwidth to interleaved
+ * memory, and it matches what the CPU does for non-tiled. However, when tiled
+ * it does it a little differently, since one walks addresses not just in the
+ * X direction but also Y. So, along with alternating channels when bit
+ * 6 of the address flips, it also alternates when other bits flip -- Bits 9
+ * (every 512 bytes, an X tile scanline) and 10 (every two X tile scanlines)
+ * are common to both the 915 and 965-class hardware.
+ *
+ * The CPU also sometimes XORs in higher bits as well, to improve
+ * bandwidth doing strided access like we do so frequently in graphics. This
+ * is called "Channel XOR Randomization" in the MCH documentation. The result
+ * is that the CPU is XORing in either bit 11 or bit 17 to bit 6 of its address
+ * decode.
+ *
+ * All of this bit 6 XORing has an effect on our memory management,
+ * as we need to make sure that the 3d driver can correctly address object
+ * contents.
+ *
+ * If we don't have interleaved memory, all tiling is safe and no swizzling is
+ * required.
+ *
+ * When bit 17 is XORed in, we simply refuse to tile at all. Bit
+ * 17 is not just a page offset, so as we page an objet out and back in,
+ * individual pages in it will have different bit 17 addresses, resulting in
+ * each 64 bytes being swapped with its neighbor!
+ *
+ * Otherwise, if interleaved, we have to tell the 3d driver what the address
+ * swizzling it needs to do is, since it's writing with the CPU to the pages
+ * (bit 6 and potentially bit 11 XORed in), and the GPU is reading from the
+ * pages (bit 6, 9, and 10 XORed in), resulting in a cumulative bit swizzling
+ * required by the CPU of XORing in bit 6, 9, 10, and potentially 11, in order
+ * to match what the GPU expects.
+ */
+
+/**
+ * i915_gem_detect_bit_6_swizzle - detect bit 6 swizzling pattern
+ * @dev: DRM device
+ *
+ * Detects bit 6 swizzling of address lookup between IGD access and CPU
+ * access through main memory.
+ */
+void
+i915_gem_detect_bit_6_swizzle(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ uint32_t swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN;
+ uint32_t swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN;
+
+ if (INTEL_INFO(dev)->gen >= 8 || IS_VALLEYVIEW(dev)) {
+ /*
+ * On BDW+, swizzling is not used. We leave the CPU memory
+ * controller in charge of optimizing memory accesses without
+ * the extra address manipulation GPU side.
+ *
+ * VLV and CHV don't have GPU swizzling.
+ */
+ swizzle_x = I915_BIT_6_SWIZZLE_NONE;
+ swizzle_y = I915_BIT_6_SWIZZLE_NONE;
+ } else if (INTEL_INFO(dev)->gen >= 6) {
+ if (dev_priv->preserve_bios_swizzle) {
+ if (I915_READ(DISP_ARB_CTL) &
+ DISP_TILE_SURFACE_SWIZZLING) {
+ swizzle_x = I915_BIT_6_SWIZZLE_9_10;
+ swizzle_y = I915_BIT_6_SWIZZLE_9;
+ } else {
+ swizzle_x = I915_BIT_6_SWIZZLE_NONE;
+ swizzle_y = I915_BIT_6_SWIZZLE_NONE;
+ }
+ } else {
+ uint32_t dimm_c0, dimm_c1;
+ dimm_c0 = I915_READ(MAD_DIMM_C0);
+ dimm_c1 = I915_READ(MAD_DIMM_C1);
+ dimm_c0 &= MAD_DIMM_A_SIZE_MASK | MAD_DIMM_B_SIZE_MASK;
+ dimm_c1 &= MAD_DIMM_A_SIZE_MASK | MAD_DIMM_B_SIZE_MASK;
+ /* Enable swizzling when the channels are populated
+ * with identically sized dimms. We don't need to check
+ * the 3rd channel because no cpu with gpu attached
+ * ships in that configuration. Also, swizzling only
+ * makes sense for 2 channels anyway. */
+ if (dimm_c0 == dimm_c1) {
+ swizzle_x = I915_BIT_6_SWIZZLE_9_10;
+ swizzle_y = I915_BIT_6_SWIZZLE_9;
+ } else {
+ swizzle_x = I915_BIT_6_SWIZZLE_NONE;
+ swizzle_y = I915_BIT_6_SWIZZLE_NONE;
+ }
+ }
+ } else if (IS_GEN5(dev)) {
+ /* On Ironlake whatever DRAM config, GPU always do
+ * same swizzling setup.
+ */
+ swizzle_x = I915_BIT_6_SWIZZLE_9_10;
+ swizzle_y = I915_BIT_6_SWIZZLE_9;
+ } else if (IS_GEN2(dev)) {
+ /* As far as we know, the 865 doesn't have these bit 6
+ * swizzling issues.
+ */
+ swizzle_x = I915_BIT_6_SWIZZLE_NONE;
+ swizzle_y = I915_BIT_6_SWIZZLE_NONE;
+ } else if (IS_MOBILE(dev) || (IS_GEN3(dev) && !IS_G33(dev))) {
+ uint32_t dcc;
+
+ /* On 9xx chipsets, channel interleave by the CPU is
+ * determined by DCC. For single-channel, neither the CPU
+ * nor the GPU do swizzling. For dual channel interleaved,
+ * the GPU's interleave is bit 9 and 10 for X tiled, and bit
+ * 9 for Y tiled. The CPU's interleave is independent, and
+ * can be based on either bit 11 (haven't seen this yet) or
+ * bit 17 (common).
+ */
+ dcc = I915_READ(DCC);
+ switch (dcc & DCC_ADDRESSING_MODE_MASK) {
+ case DCC_ADDRESSING_MODE_SINGLE_CHANNEL:
+ case DCC_ADDRESSING_MODE_DUAL_CHANNEL_ASYMMETRIC:
+ swizzle_x = I915_BIT_6_SWIZZLE_NONE;
+ swizzle_y = I915_BIT_6_SWIZZLE_NONE;
+ break;
+ case DCC_ADDRESSING_MODE_DUAL_CHANNEL_INTERLEAVED:
+ if (dcc & DCC_CHANNEL_XOR_DISABLE) {
+ /* This is the base swizzling by the GPU for
+ * tiled buffers.
+ */
+ swizzle_x = I915_BIT_6_SWIZZLE_9_10;
+ swizzle_y = I915_BIT_6_SWIZZLE_9;
+ } else if ((dcc & DCC_CHANNEL_XOR_BIT_17) == 0) {
+ /* Bit 11 swizzling by the CPU in addition. */
+ swizzle_x = I915_BIT_6_SWIZZLE_9_10_11;
+ swizzle_y = I915_BIT_6_SWIZZLE_9_11;
+ } else {
+ /* Bit 17 swizzling by the CPU in addition. */
+ swizzle_x = I915_BIT_6_SWIZZLE_9_10_17;
+ swizzle_y = I915_BIT_6_SWIZZLE_9_17;
+ }
+ break;
+ }
+
+ /* check for L-shaped memory aka modified enhanced addressing */
+ if (IS_GEN4(dev)) {
+ uint32_t ddc2 = I915_READ(DCC2);
+
+ if (!(ddc2 & DCC2_MODIFIED_ENHANCED_DISABLE))
+ dev_priv->quirks |= QUIRK_PIN_SWIZZLED_PAGES;
+ }
+
+ if (dcc == 0xffffffff) {
+ DRM_ERROR("Couldn't read from MCHBAR. "
+ "Disabling tiling.\n");
+ swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN;
+ swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN;
+ }
+ } else {
+ /* The 965, G33, and newer, have a very flexible memory
+ * configuration. It will enable dual-channel mode
+ * (interleaving) on as much memory as it can, and the GPU
+ * will additionally sometimes enable different bit 6
+ * swizzling for tiled objects from the CPU.
+ *
+ * Here's what I found on the G965:
+ * slot fill memory size swizzling
+ * 0A 0B 1A 1B 1-ch 2-ch
+ * 512 0 0 0 512 0 O
+ * 512 0 512 0 16 1008 X
+ * 512 0 0 512 16 1008 X
+ * 0 512 0 512 16 1008 X
+ * 1024 1024 1024 0 2048 1024 O
+ *
+ * We could probably detect this based on either the DRB
+ * matching, which was the case for the swizzling required in
+ * the table above, or from the 1-ch value being less than
+ * the minimum size of a rank.
+ */
+ if (I915_READ16(C0DRB3) != I915_READ16(C1DRB3)) {
+ swizzle_x = I915_BIT_6_SWIZZLE_NONE;
+ swizzle_y = I915_BIT_6_SWIZZLE_NONE;
+ } else {
+ swizzle_x = I915_BIT_6_SWIZZLE_9_10;
+ swizzle_y = I915_BIT_6_SWIZZLE_9;
+ }
+ }
+
+ dev_priv->mm.bit_6_swizzle_x = swizzle_x;
+ dev_priv->mm.bit_6_swizzle_y = swizzle_y;
+}
+
+/*
+ * Swap every 64 bytes of this page around, to account for it having a new
+ * bit 17 of its physical address and therefore being interpreted differently
+ * by the GPU.
+ */
+static void
+i915_gem_swizzle_page(struct page *page)
+{
+ char temp[64];
+ char *vaddr;
+ int i;
+
+ vaddr = kmap(page);
+
+ for (i = 0; i < PAGE_SIZE; i += 128) {
+ memcpy(temp, &vaddr[i], 64);
+ memcpy(&vaddr[i], &vaddr[i + 64], 64);
+ memcpy(&vaddr[i + 64], temp, 64);
+ }
+
+ kunmap(page);
+}
+
+/**
+ * i915_gem_object_do_bit_17_swizzle - fixup bit 17 swizzling
+ * @obj: i915 GEM buffer object
+ *
+ * This function fixes up the swizzling in case any page frame number for this
+ * object has changed in bit 17 since that state has been saved with
+ * i915_gem_object_save_bit_17_swizzle().
+ *
+ * This is called when pinning backing storage again, since the kernel is free
+ * to move unpinned backing storage around (either by directly moving pages or
+ * by swapping them out and back in again).
+ */
+void
+i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj)
+{
+ struct sg_page_iter sg_iter;
+ int i;
+
+ if (obj->bit_17 == NULL)
+ return;
+
+ i = 0;
+ for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0) {
+ struct page *page = sg_page_iter_page(&sg_iter);
+ char new_bit_17 = page_to_phys(page) >> 17;
+ if ((new_bit_17 & 0x1) !=
+ (test_bit(i, obj->bit_17) != 0)) {
+ i915_gem_swizzle_page(page);
+ set_page_dirty(page);
+ }
+ i++;
+ }
+}
+
+/**
+ * i915_gem_object_save_bit_17_swizzle - save bit 17 swizzling
+ * @obj: i915 GEM buffer object
+ *
+ * This function saves the bit 17 of each page frame number so that swizzling
+ * can be fixed up later on with i915_gem_object_do_bit_17_swizzle(). This must
+ * be called before the backing storage can be unpinned.
+ */
+void
+i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj)
+{
+ struct sg_page_iter sg_iter;
+ int page_count = obj->base.size >> PAGE_SHIFT;
+ int i;
+
+ if (obj->bit_17 == NULL) {
+ obj->bit_17 = kcalloc(BITS_TO_LONGS(page_count),
+ sizeof(long), GFP_KERNEL);
+ if (obj->bit_17 == NULL) {
+ DRM_ERROR("Failed to allocate memory for bit 17 "
+ "record\n");
+ return;
+ }
+ }
+
+ i = 0;
+ for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0) {
+ if (page_to_phys(sg_page_iter_page(&sg_iter)) & (1 << 17))
+ __set_bit(i, obj->bit_17);
+ else
+ __clear_bit(i, obj->bit_17);
+ i++;
+ }
+}
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 9daa2883ac18..96054a560f4f 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -192,9 +192,8 @@ static gen8_pte_t gen8_pte_encode(dma_addr_t addr,
return pte;
}
-static gen8_pde_t gen8_pde_encode(struct drm_device *dev,
- dma_addr_t addr,
- enum i915_cache_level level)
+static gen8_pde_t gen8_pde_encode(const dma_addr_t addr,
+ const enum i915_cache_level level)
{
gen8_pde_t pde = _PAGE_PRESENT | _PAGE_RW;
pde |= addr;
@@ -301,75 +300,120 @@ static gen6_pte_t iris_pte_encode(dma_addr_t addr,
return pte;
}
-#define i915_dma_unmap_single(px, dev) \
- __i915_dma_unmap_single((px)->daddr, dev)
-
-static void __i915_dma_unmap_single(dma_addr_t daddr,
- struct drm_device *dev)
+static int __setup_page_dma(struct drm_device *dev,
+ struct i915_page_dma *p, gfp_t flags)
{
struct device *device = &dev->pdev->dev;
- dma_unmap_page(device, daddr, 4096, PCI_DMA_BIDIRECTIONAL);
+ p->page = alloc_page(flags);
+ if (!p->page)
+ return -ENOMEM;
+
+ p->daddr = dma_map_page(device,
+ p->page, 0, 4096, PCI_DMA_BIDIRECTIONAL);
+
+ if (dma_mapping_error(device, p->daddr)) {
+ __free_page(p->page);
+ return -EINVAL;
+ }
+
+ return 0;
}
-/**
- * i915_dma_map_single() - Create a dma mapping for a page table/dir/etc.
- * @px: Page table/dir/etc to get a DMA map for
- * @dev: drm device
- *
- * Page table allocations are unified across all gens. They always require a
- * single 4k allocation, as well as a DMA mapping. If we keep the structs
- * symmetric here, the simple macro covers us for every page table type.
- *
- * Return: 0 if success.
- */
-#define i915_dma_map_single(px, dev) \
- i915_dma_map_page_single((px)->page, (dev), &(px)->daddr)
+static int setup_page_dma(struct drm_device *dev, struct i915_page_dma *p)
+{
+ return __setup_page_dma(dev, p, GFP_KERNEL);
+}
-static int i915_dma_map_page_single(struct page *page,
- struct drm_device *dev,
- dma_addr_t *daddr)
+static void cleanup_page_dma(struct drm_device *dev, struct i915_page_dma *p)
{
- struct device *device = &dev->pdev->dev;
+ if (WARN_ON(!p->page))
+ return;
- *daddr = dma_map_page(device, page, 0, 4096, PCI_DMA_BIDIRECTIONAL);
- if (dma_mapping_error(device, *daddr))
- return -ENOMEM;
+ dma_unmap_page(&dev->pdev->dev, p->daddr, 4096, PCI_DMA_BIDIRECTIONAL);
+ __free_page(p->page);
+ memset(p, 0, sizeof(*p));
+}
- return 0;
+static void *kmap_page_dma(struct i915_page_dma *p)
+{
+ return kmap_atomic(p->page);
}
-static void unmap_and_free_pt(struct i915_page_table *pt,
- struct drm_device *dev)
+/* We use the flushing unmap only with ppgtt structures:
+ * page directories, page tables and scratch pages.
+ */
+static void kunmap_page_dma(struct drm_device *dev, void *vaddr)
{
- if (WARN_ON(!pt->page))
- return;
+ /* There are only few exceptions for gen >=6. chv and bxt.
+ * And we are not sure about the latter so play safe for now.
+ */
+ if (IS_CHERRYVIEW(dev) || IS_BROXTON(dev))
+ drm_clflush_virt_range(vaddr, PAGE_SIZE);
- i915_dma_unmap_single(pt, dev);
- __free_page(pt->page);
- kfree(pt->used_ptes);
- kfree(pt);
+ kunmap_atomic(vaddr);
}
-static void gen8_initialize_pt(struct i915_address_space *vm,
- struct i915_page_table *pt)
+#define kmap_px(px) kmap_page_dma(px_base(px))
+#define kunmap_px(ppgtt, vaddr) kunmap_page_dma((ppgtt)->base.dev, (vaddr))
+
+#define setup_px(dev, px) setup_page_dma((dev), px_base(px))
+#define cleanup_px(dev, px) cleanup_page_dma((dev), px_base(px))
+#define fill_px(dev, px, v) fill_page_dma((dev), px_base(px), (v))
+#define fill32_px(dev, px, v) fill_page_dma_32((dev), px_base(px), (v))
+
+static void fill_page_dma(struct drm_device *dev, struct i915_page_dma *p,
+ const uint64_t val)
{
- gen8_pte_t *pt_vaddr, scratch_pte;
int i;
+ uint64_t * const vaddr = kmap_page_dma(p);
- pt_vaddr = kmap_atomic(pt->page);
- scratch_pte = gen8_pte_encode(vm->scratch.addr,
- I915_CACHE_LLC, true);
+ for (i = 0; i < 512; i++)
+ vaddr[i] = val;
+
+ kunmap_page_dma(dev, vaddr);
+}
+
+static void fill_page_dma_32(struct drm_device *dev, struct i915_page_dma *p,
+ const uint32_t val32)
+{
+ uint64_t v = val32;
+
+ v = v << 32 | val32;
+
+ fill_page_dma(dev, p, v);
+}
+
+static struct i915_page_scratch *alloc_scratch_page(struct drm_device *dev)
+{
+ struct i915_page_scratch *sp;
+ int ret;
+
+ sp = kzalloc(sizeof(*sp), GFP_KERNEL);
+ if (sp == NULL)
+ return ERR_PTR(-ENOMEM);
+
+ ret = __setup_page_dma(dev, px_base(sp), GFP_DMA32 | __GFP_ZERO);
+ if (ret) {
+ kfree(sp);
+ return ERR_PTR(ret);
+ }
+
+ set_pages_uc(px_page(sp), 1);
+
+ return sp;
+}
- for (i = 0; i < GEN8_PTES; i++)
- pt_vaddr[i] = scratch_pte;
+static void free_scratch_page(struct drm_device *dev,
+ struct i915_page_scratch *sp)
+{
+ set_pages_wb(px_page(sp), 1);
- if (!HAS_LLC(vm->dev))
- drm_clflush_virt_range(pt_vaddr, PAGE_SIZE);
- kunmap_atomic(pt_vaddr);
+ cleanup_px(dev, sp);
+ kfree(sp);
}
-static struct i915_page_table *alloc_pt_single(struct drm_device *dev)
+static struct i915_page_table *alloc_pt(struct drm_device *dev)
{
struct i915_page_table *pt;
const size_t count = INTEL_INFO(dev)->gen >= 8 ?
@@ -386,19 +430,13 @@ static struct i915_page_table *alloc_pt_single(struct drm_device *dev)
if (!pt->used_ptes)
goto fail_bitmap;
- pt->page = alloc_page(GFP_KERNEL);
- if (!pt->page)
- goto fail_page;
-
- ret = i915_dma_map_single(pt, dev);
+ ret = setup_px(dev, pt);
if (ret)
- goto fail_dma;
+ goto fail_page_m;
return pt;
-fail_dma:
- __free_page(pt->page);
-fail_page:
+fail_page_m:
kfree(pt->used_ptes);
fail_bitmap:
kfree(pt);
@@ -406,18 +444,38 @@ fail_bitmap:
return ERR_PTR(ret);
}
-static void unmap_and_free_pd(struct i915_page_directory *pd,
- struct drm_device *dev)
+static void free_pt(struct drm_device *dev, struct i915_page_table *pt)
{
- if (pd->page) {
- i915_dma_unmap_single(pd, dev);
- __free_page(pd->page);
- kfree(pd->used_pdes);
- kfree(pd);
- }
+ cleanup_px(dev, pt);
+ kfree(pt->used_ptes);
+ kfree(pt);
+}
+
+static void gen8_initialize_pt(struct i915_address_space *vm,
+ struct i915_page_table *pt)
+{
+ gen8_pte_t scratch_pte;
+
+ scratch_pte = gen8_pte_encode(px_dma(vm->scratch_page),
+ I915_CACHE_LLC, true);
+
+ fill_px(vm->dev, pt, scratch_pte);
+}
+
+static void gen6_initialize_pt(struct i915_address_space *vm,
+ struct i915_page_table *pt)
+{
+ gen6_pte_t scratch_pte;
+
+ WARN_ON(px_dma(vm->scratch_page) == 0);
+
+ scratch_pte = vm->pte_encode(px_dma(vm->scratch_page),
+ I915_CACHE_LLC, true, 0);
+
+ fill32_px(vm->dev, pt, scratch_pte);
}
-static struct i915_page_directory *alloc_pd_single(struct drm_device *dev)
+static struct i915_page_directory *alloc_pd(struct drm_device *dev)
{
struct i915_page_directory *pd;
int ret = -ENOMEM;
@@ -429,38 +487,52 @@ static struct i915_page_directory *alloc_pd_single(struct drm_device *dev)
pd->used_pdes = kcalloc(BITS_TO_LONGS(I915_PDES),
sizeof(*pd->used_pdes), GFP_KERNEL);
if (!pd->used_pdes)
- goto free_pd;
-
- pd->page = alloc_page(GFP_KERNEL);
- if (!pd->page)
- goto free_bitmap;
+ goto fail_bitmap;
- ret = i915_dma_map_single(pd, dev);
+ ret = setup_px(dev, pd);
if (ret)
- goto free_page;
+ goto fail_page_m;
return pd;
-free_page:
- __free_page(pd->page);
-free_bitmap:
+fail_page_m:
kfree(pd->used_pdes);
-free_pd:
+fail_bitmap:
kfree(pd);
return ERR_PTR(ret);
}
+static void free_pd(struct drm_device *dev, struct i915_page_directory *pd)
+{
+ if (px_page(pd)) {
+ cleanup_px(dev, pd);
+ kfree(pd->used_pdes);
+ kfree(pd);
+ }
+}
+
+static void gen8_initialize_pd(struct i915_address_space *vm,
+ struct i915_page_directory *pd)
+{
+ gen8_pde_t scratch_pde;
+
+ scratch_pde = gen8_pde_encode(px_dma(vm->scratch_pt), I915_CACHE_LLC);
+
+ fill_px(vm->dev, pd, scratch_pde);
+}
+
/* Broadwell Page Directory Pointer Descriptors */
-static int gen8_write_pdp(struct intel_engine_cs *ring,
+static int gen8_write_pdp(struct drm_i915_gem_request *req,
unsigned entry,
dma_addr_t addr)
{
+ struct intel_engine_cs *ring = req->ring;
int ret;
BUG_ON(entry >= 4);
- ret = intel_ring_begin(ring, 6);
+ ret = intel_ring_begin(req, 6);
if (ret)
return ret;
@@ -476,16 +548,14 @@ static int gen8_write_pdp(struct intel_engine_cs *ring,
}
static int gen8_mm_switch(struct i915_hw_ppgtt *ppgtt,
- struct intel_engine_cs *ring)
+ struct drm_i915_gem_request *req)
{
int i, ret;
for (i = GEN8_LEGACY_PDPES - 1; i >= 0; i--) {
- struct i915_page_directory *pd = ppgtt->pdp.page_directory[i];
- dma_addr_t pd_daddr = pd ? pd->daddr : ppgtt->scratch_pd->daddr;
- /* The page directory might be NULL, but we need to clear out
- * whatever the previous context might have used. */
- ret = gen8_write_pdp(ring, i, pd_daddr);
+ const dma_addr_t pd_daddr = i915_page_dir_dma_addr(ppgtt, i);
+
+ ret = gen8_write_pdp(req, i, pd_daddr);
if (ret)
return ret;
}
@@ -507,13 +577,12 @@ static void gen8_ppgtt_clear_range(struct i915_address_space *vm,
unsigned num_entries = length >> PAGE_SHIFT;
unsigned last_pte, i;
- scratch_pte = gen8_pte_encode(ppgtt->base.scratch.addr,
+ scratch_pte = gen8_pte_encode(px_dma(ppgtt->base.scratch_page),
I915_CACHE_LLC, use_scratch);
while (num_entries) {
struct i915_page_directory *pd;
struct i915_page_table *pt;
- struct page *page_table;
if (WARN_ON(!ppgtt->pdp.page_directory[pdpe]))
break;
@@ -525,25 +594,21 @@ static void gen8_ppgtt_clear_range(struct i915_address_space *vm,
pt = pd->page_table[pde];
- if (WARN_ON(!pt->page))
+ if (WARN_ON(!px_page(pt)))
break;
- page_table = pt->page;
-
last_pte = pte + num_entries;
if (last_pte > GEN8_PTES)
last_pte = GEN8_PTES;
- pt_vaddr = kmap_atomic(page_table);
+ pt_vaddr = kmap_px(pt);
for (i = pte; i < last_pte; i++) {
pt_vaddr[i] = scratch_pte;
num_entries--;
}
- if (!HAS_LLC(ppgtt->base.dev))
- drm_clflush_virt_range(pt_vaddr, PAGE_SIZE);
- kunmap_atomic(pt_vaddr);
+ kunmap_px(ppgtt, pt);
pte = 0;
if (++pde == I915_PDES) {
@@ -575,18 +640,14 @@ static void gen8_ppgtt_insert_entries(struct i915_address_space *vm,
if (pt_vaddr == NULL) {
struct i915_page_directory *pd = ppgtt->pdp.page_directory[pdpe];
struct i915_page_table *pt = pd->page_table[pde];
- struct page *page_table = pt->page;
-
- pt_vaddr = kmap_atomic(page_table);
+ pt_vaddr = kmap_px(pt);
}
pt_vaddr[pte] =
gen8_pte_encode(sg_page_iter_dma_address(&sg_iter),
cache_level, true);
if (++pte == GEN8_PTES) {
- if (!HAS_LLC(ppgtt->base.dev))
- drm_clflush_virt_range(pt_vaddr, PAGE_SIZE);
- kunmap_atomic(pt_vaddr);
+ kunmap_px(ppgtt, pt_vaddr);
pt_vaddr = NULL;
if (++pde == I915_PDES) {
pdpe++;
@@ -595,58 +656,64 @@ static void gen8_ppgtt_insert_entries(struct i915_address_space *vm,
pte = 0;
}
}
- if (pt_vaddr) {
- if (!HAS_LLC(ppgtt->base.dev))
- drm_clflush_virt_range(pt_vaddr, PAGE_SIZE);
- kunmap_atomic(pt_vaddr);
- }
-}
-
-static void __gen8_do_map_pt(gen8_pde_t * const pde,
- struct i915_page_table *pt,
- struct drm_device *dev)
-{
- gen8_pde_t entry =
- gen8_pde_encode(dev, pt->daddr, I915_CACHE_LLC);
- *pde = entry;
-}
-
-static void gen8_initialize_pd(struct i915_address_space *vm,
- struct i915_page_directory *pd)
-{
- struct i915_hw_ppgtt *ppgtt =
- container_of(vm, struct i915_hw_ppgtt, base);
- gen8_pde_t *page_directory;
- struct i915_page_table *pt;
- int i;
-
- page_directory = kmap_atomic(pd->page);
- pt = ppgtt->scratch_pt;
- for (i = 0; i < I915_PDES; i++)
- /* Map the PDE to the page table */
- __gen8_do_map_pt(page_directory + i, pt, vm->dev);
- if (!HAS_LLC(vm->dev))
- drm_clflush_virt_range(page_directory, PAGE_SIZE);
- kunmap_atomic(page_directory);
+ if (pt_vaddr)
+ kunmap_px(ppgtt, pt_vaddr);
}
-static void gen8_free_page_tables(struct i915_page_directory *pd, struct drm_device *dev)
+static void gen8_free_page_tables(struct drm_device *dev,
+ struct i915_page_directory *pd)
{
int i;
- if (!pd->page)
+ if (!px_page(pd))
return;
for_each_set_bit(i, pd->used_pdes, I915_PDES) {
if (WARN_ON(!pd->page_table[i]))
continue;
- unmap_and_free_pt(pd->page_table[i], dev);
+ free_pt(dev, pd->page_table[i]);
pd->page_table[i] = NULL;
}
}
+static int gen8_init_scratch(struct i915_address_space *vm)
+{
+ struct drm_device *dev = vm->dev;
+
+ vm->scratch_page = alloc_scratch_page(dev);
+ if (IS_ERR(vm->scratch_page))
+ return PTR_ERR(vm->scratch_page);
+
+ vm->scratch_pt = alloc_pt(dev);
+ if (IS_ERR(vm->scratch_pt)) {
+ free_scratch_page(dev, vm->scratch_page);
+ return PTR_ERR(vm->scratch_pt);
+ }
+
+ vm->scratch_pd = alloc_pd(dev);
+ if (IS_ERR(vm->scratch_pd)) {
+ free_pt(dev, vm->scratch_pt);
+ free_scratch_page(dev, vm->scratch_page);
+ return PTR_ERR(vm->scratch_pd);
+ }
+
+ gen8_initialize_pt(vm, vm->scratch_pt);
+ gen8_initialize_pd(vm, vm->scratch_pd);
+
+ return 0;
+}
+
+static void gen8_free_scratch(struct i915_address_space *vm)
+{
+ struct drm_device *dev = vm->dev;
+
+ free_pd(dev, vm->scratch_pd);
+ free_pt(dev, vm->scratch_pt);
+ free_scratch_page(dev, vm->scratch_page);
+}
+
static void gen8_ppgtt_cleanup(struct i915_address_space *vm)
{
struct i915_hw_ppgtt *ppgtt =
@@ -657,12 +724,12 @@ static void gen8_ppgtt_cleanup(struct i915_address_space *vm)
if (WARN_ON(!ppgtt->pdp.page_directory[i]))
continue;
- gen8_free_page_tables(ppgtt->pdp.page_directory[i], ppgtt->base.dev);
- unmap_and_free_pd(ppgtt->pdp.page_directory[i], ppgtt->base.dev);
+ gen8_free_page_tables(ppgtt->base.dev,
+ ppgtt->pdp.page_directory[i]);
+ free_pd(ppgtt->base.dev, ppgtt->pdp.page_directory[i]);
}
- unmap_and_free_pd(ppgtt->scratch_pd, ppgtt->base.dev);
- unmap_and_free_pt(ppgtt->scratch_pt, ppgtt->base.dev);
+ gen8_free_scratch(vm);
}
/**
@@ -698,24 +765,24 @@ static int gen8_ppgtt_alloc_pagetabs(struct i915_hw_ppgtt *ppgtt,
/* Don't reallocate page tables */
if (pt) {
/* Scratch is never allocated this way */
- WARN_ON(pt == ppgtt->scratch_pt);
+ WARN_ON(pt == ppgtt->base.scratch_pt);
continue;
}
- pt = alloc_pt_single(dev);
+ pt = alloc_pt(dev);
if (IS_ERR(pt))
goto unwind_out;
gen8_initialize_pt(&ppgtt->base, pt);
pd->page_table[pde] = pt;
- set_bit(pde, new_pts);
+ __set_bit(pde, new_pts);
}
return 0;
unwind_out:
for_each_set_bit(pde, new_pts, I915_PDES)
- unmap_and_free_pt(pd->page_table[pde], dev);
+ free_pt(dev, pd->page_table[pde]);
return -ENOMEM;
}
@@ -756,27 +823,24 @@ static int gen8_ppgtt_alloc_page_directories(struct i915_hw_ppgtt *ppgtt,
WARN_ON(!bitmap_empty(new_pds, GEN8_LEGACY_PDPES));
- /* FIXME: upper bound must not overflow 32 bits */
- WARN_ON((start + length) > (1ULL << 32));
-
gen8_for_each_pdpe(pd, pdp, start, length, temp, pdpe) {
if (pd)
continue;
- pd = alloc_pd_single(dev);
+ pd = alloc_pd(dev);
if (IS_ERR(pd))
goto unwind_out;
gen8_initialize_pd(&ppgtt->base, pd);
pdp->page_directory[pdpe] = pd;
- set_bit(pdpe, new_pds);
+ __set_bit(pdpe, new_pds);
}
return 0;
unwind_out:
for_each_set_bit(pdpe, new_pds, GEN8_LEGACY_PDPES)
- unmap_and_free_pd(pdp->page_directory[pdpe], dev);
+ free_pd(dev, pdp->page_directory[pdpe]);
return -ENOMEM;
}
@@ -830,6 +894,16 @@ err_out:
return -ENOMEM;
}
+/* PDE TLBs are a pain to invalidate on GEN8+. When we modify
+ * the page table structures, we mark them dirty so that
+ * context switching/execlist queuing code takes extra steps
+ * to ensure that tlbs are flushed.
+ */
+static void mark_tlbs_dirty(struct i915_hw_ppgtt *ppgtt)
+{
+ ppgtt->pd_dirty_rings = INTEL_INFO(ppgtt->base.dev)->ring_mask;
+}
+
static int gen8_alloc_va_range(struct i915_address_space *vm,
uint64_t start,
uint64_t length)
@@ -848,7 +922,10 @@ static int gen8_alloc_va_range(struct i915_address_space *vm,
* actually use the other side of the canonical address space.
*/
if (WARN_ON(start + length < start))
- return -ERANGE;
+ return -ENODEV;
+
+ if (WARN_ON(start + length > ppgtt->base.total))
+ return -ENODEV;
ret = alloc_gen8_temp_bitmaps(&new_page_dirs, &new_page_tables);
if (ret)
@@ -876,7 +953,7 @@ static int gen8_alloc_va_range(struct i915_address_space *vm,
/* Allocations have completed successfully, so set the bitmaps, and do
* the mappings. */
gen8_for_each_pdpe(pd, &ppgtt->pdp, start, length, temp, pdpe) {
- gen8_pde_t *const page_directory = kmap_atomic(pd->page);
+ gen8_pde_t *const page_directory = kmap_px(pd);
struct i915_page_table *pt;
uint64_t pd_len = gen8_clamp_pd(start, length);
uint64_t pd_start = start;
@@ -897,36 +974,36 @@ static int gen8_alloc_va_range(struct i915_address_space *vm,
gen8_pte_count(pd_start, pd_len));
/* Our pde is now pointing to the pagetable, pt */
- set_bit(pde, pd->used_pdes);
+ __set_bit(pde, pd->used_pdes);
/* Map the PDE to the page table */
- __gen8_do_map_pt(page_directory + pde, pt, vm->dev);
+ page_directory[pde] = gen8_pde_encode(px_dma(pt),
+ I915_CACHE_LLC);
/* NB: We haven't yet mapped ptes to pages. At this
* point we're still relying on insert_entries() */
}
- if (!HAS_LLC(vm->dev))
- drm_clflush_virt_range(page_directory, PAGE_SIZE);
-
- kunmap_atomic(page_directory);
+ kunmap_px(ppgtt, page_directory);
- set_bit(pdpe, ppgtt->pdp.used_pdpes);
+ __set_bit(pdpe, ppgtt->pdp.used_pdpes);
}
free_gen8_temp_bitmaps(new_page_dirs, new_page_tables);
+ mark_tlbs_dirty(ppgtt);
return 0;
err_out:
while (pdpe--) {
for_each_set_bit(temp, new_page_tables[pdpe], I915_PDES)
- unmap_and_free_pt(ppgtt->pdp.page_directory[pdpe]->page_table[temp], vm->dev);
+ free_pt(vm->dev, ppgtt->pdp.page_directory[pdpe]->page_table[temp]);
}
for_each_set_bit(pdpe, new_page_dirs, GEN8_LEGACY_PDPES)
- unmap_and_free_pd(ppgtt->pdp.page_directory[pdpe], vm->dev);
+ free_pd(vm->dev, ppgtt->pdp.page_directory[pdpe]);
free_gen8_temp_bitmaps(new_page_dirs, new_page_tables);
+ mark_tlbs_dirty(ppgtt);
return ret;
}
@@ -939,16 +1016,11 @@ err_out:
*/
static int gen8_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
{
- ppgtt->scratch_pt = alloc_pt_single(ppgtt->base.dev);
- if (IS_ERR(ppgtt->scratch_pt))
- return PTR_ERR(ppgtt->scratch_pt);
-
- ppgtt->scratch_pd = alloc_pd_single(ppgtt->base.dev);
- if (IS_ERR(ppgtt->scratch_pd))
- return PTR_ERR(ppgtt->scratch_pd);
+ int ret;
- gen8_initialize_pt(&ppgtt->base, ppgtt->scratch_pt);
- gen8_initialize_pd(&ppgtt->base, ppgtt->scratch_pd);
+ ret = gen8_init_scratch(&ppgtt->base);
+ if (ret)
+ return ret;
ppgtt->base.start = 0;
ppgtt->base.total = 1ULL << 32;
@@ -980,12 +1052,13 @@ static void gen6_dump_ppgtt(struct i915_hw_ppgtt *ppgtt, struct seq_file *m)
uint32_t pte, pde, temp;
uint32_t start = ppgtt->base.start, length = ppgtt->base.total;
- scratch_pte = vm->pte_encode(vm->scratch.addr, I915_CACHE_LLC, true, 0);
+ scratch_pte = vm->pte_encode(px_dma(vm->scratch_page),
+ I915_CACHE_LLC, true, 0);
gen6_for_each_pde(unused, &ppgtt->pd, start, length, temp, pde) {
u32 expected;
gen6_pte_t *pt_vaddr;
- dma_addr_t pt_addr = ppgtt->pd.page_table[pde]->daddr;
+ const dma_addr_t pt_addr = px_dma(ppgtt->pd.page_table[pde]);
pd_entry = readl(ppgtt->pd_addr + pde);
expected = (GEN6_PDE_ADDR_ENCODE(pt_addr) | GEN6_PDE_VALID);
@@ -996,7 +1069,8 @@ static void gen6_dump_ppgtt(struct i915_hw_ppgtt *ppgtt, struct seq_file *m)
expected);
seq_printf(m, "\tPDE: %x\n", pd_entry);
- pt_vaddr = kmap_atomic(ppgtt->pd.page_table[pde]->page);
+ pt_vaddr = kmap_px(ppgtt->pd.page_table[pde]);
+
for (pte = 0; pte < GEN6_PTES; pte+=4) {
unsigned long va =
(pde * PAGE_SIZE * GEN6_PTES) +
@@ -1018,7 +1092,7 @@ static void gen6_dump_ppgtt(struct i915_hw_ppgtt *ppgtt, struct seq_file *m)
}
seq_puts(m, "\n");
}
- kunmap_atomic(pt_vaddr);
+ kunmap_px(ppgtt, pt_vaddr);
}
}
@@ -1031,7 +1105,7 @@ static void gen6_write_pde(struct i915_page_directory *pd,
container_of(pd, struct i915_hw_ppgtt, pd);
u32 pd_entry;
- pd_entry = GEN6_PDE_ADDR_ENCODE(pt->daddr);
+ pd_entry = GEN6_PDE_ADDR_ENCODE(px_dma(pt));
pd_entry |= GEN6_PDE_VALID;
writel(pd_entry, ppgtt->pd_addr + pde);
@@ -1056,22 +1130,23 @@ static void gen6_write_page_range(struct drm_i915_private *dev_priv,
static uint32_t get_pd_offset(struct i915_hw_ppgtt *ppgtt)
{
- BUG_ON(ppgtt->pd.pd_offset & 0x3f);
+ BUG_ON(ppgtt->pd.base.ggtt_offset & 0x3f);
- return (ppgtt->pd.pd_offset / 64) << 16;
+ return (ppgtt->pd.base.ggtt_offset / 64) << 16;
}
static int hsw_mm_switch(struct i915_hw_ppgtt *ppgtt,
- struct intel_engine_cs *ring)
+ struct drm_i915_gem_request *req)
{
+ struct intel_engine_cs *ring = req->ring;
int ret;
/* NB: TLBs must be flushed and invalidated before a switch */
- ret = ring->flush(ring, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
+ ret = ring->flush(req, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
if (ret)
return ret;
- ret = intel_ring_begin(ring, 6);
+ ret = intel_ring_begin(req, 6);
if (ret)
return ret;
@@ -1087,8 +1162,9 @@ static int hsw_mm_switch(struct i915_hw_ppgtt *ppgtt,
}
static int vgpu_mm_switch(struct i915_hw_ppgtt *ppgtt,
- struct intel_engine_cs *ring)
+ struct drm_i915_gem_request *req)
{
+ struct intel_engine_cs *ring = req->ring;
struct drm_i915_private *dev_priv = to_i915(ppgtt->base.dev);
I915_WRITE(RING_PP_DIR_DCLV(ring), PP_DIR_DCLV_2G);
@@ -1097,16 +1173,17 @@ static int vgpu_mm_switch(struct i915_hw_ppgtt *ppgtt,
}
static int gen7_mm_switch(struct i915_hw_ppgtt *ppgtt,
- struct intel_engine_cs *ring)
+ struct drm_i915_gem_request *req)
{
+ struct intel_engine_cs *ring = req->ring;
int ret;
/* NB: TLBs must be flushed and invalidated before a switch */
- ret = ring->flush(ring, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
+ ret = ring->flush(req, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
if (ret)
return ret;
- ret = intel_ring_begin(ring, 6);
+ ret = intel_ring_begin(req, 6);
if (ret)
return ret;
@@ -1120,7 +1197,7 @@ static int gen7_mm_switch(struct i915_hw_ppgtt *ppgtt,
/* XXX: RCS is the only one to auto invalidate the TLBs? */
if (ring->id != RCS) {
- ret = ring->flush(ring, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
+ ret = ring->flush(req, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
if (ret)
return ret;
}
@@ -1129,8 +1206,9 @@ static int gen7_mm_switch(struct i915_hw_ppgtt *ppgtt,
}
static int gen6_mm_switch(struct i915_hw_ppgtt *ppgtt,
- struct intel_engine_cs *ring)
+ struct drm_i915_gem_request *req)
{
+ struct intel_engine_cs *ring = req->ring;
struct drm_device *dev = ppgtt->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -1214,19 +1292,20 @@ static void gen6_ppgtt_clear_range(struct i915_address_space *vm,
unsigned first_pte = first_entry % GEN6_PTES;
unsigned last_pte, i;
- scratch_pte = vm->pte_encode(vm->scratch.addr, I915_CACHE_LLC, true, 0);
+ scratch_pte = vm->pte_encode(px_dma(vm->scratch_page),
+ I915_CACHE_LLC, true, 0);
while (num_entries) {
last_pte = first_pte + num_entries;
if (last_pte > GEN6_PTES)
last_pte = GEN6_PTES;
- pt_vaddr = kmap_atomic(ppgtt->pd.page_table[act_pt]->page);
+ pt_vaddr = kmap_px(ppgtt->pd.page_table[act_pt]);
for (i = first_pte; i < last_pte; i++)
pt_vaddr[i] = scratch_pte;
- kunmap_atomic(pt_vaddr);
+ kunmap_px(ppgtt, pt_vaddr);
num_entries -= last_pte - first_pte;
first_pte = 0;
@@ -1250,54 +1329,25 @@ static void gen6_ppgtt_insert_entries(struct i915_address_space *vm,
pt_vaddr = NULL;
for_each_sg_page(pages->sgl, &sg_iter, pages->nents, 0) {
if (pt_vaddr == NULL)
- pt_vaddr = kmap_atomic(ppgtt->pd.page_table[act_pt]->page);
+ pt_vaddr = kmap_px(ppgtt->pd.page_table[act_pt]);
pt_vaddr[act_pte] =
vm->pte_encode(sg_page_iter_dma_address(&sg_iter),
cache_level, true, flags);
if (++act_pte == GEN6_PTES) {
- kunmap_atomic(pt_vaddr);
+ kunmap_px(ppgtt, pt_vaddr);
pt_vaddr = NULL;
act_pt++;
act_pte = 0;
}
}
if (pt_vaddr)
- kunmap_atomic(pt_vaddr);
-}
-
-/* PDE TLBs are a pain invalidate pre GEN8. It requires a context reload. If we
- * are switching between contexts with the same LRCA, we also must do a force
- * restore.
- */
-static void mark_tlbs_dirty(struct i915_hw_ppgtt *ppgtt)
-{
- /* If current vm != vm, */
- ppgtt->pd_dirty_rings = INTEL_INFO(ppgtt->base.dev)->ring_mask;
-}
-
-static void gen6_initialize_pt(struct i915_address_space *vm,
- struct i915_page_table *pt)
-{
- gen6_pte_t *pt_vaddr, scratch_pte;
- int i;
-
- WARN_ON(vm->scratch.addr == 0);
-
- scratch_pte = vm->pte_encode(vm->scratch.addr,
- I915_CACHE_LLC, true, 0);
-
- pt_vaddr = kmap_atomic(pt->page);
-
- for (i = 0; i < GEN6_PTES; i++)
- pt_vaddr[i] = scratch_pte;
-
- kunmap_atomic(pt_vaddr);
+ kunmap_px(ppgtt, pt_vaddr);
}
static int gen6_alloc_va_range(struct i915_address_space *vm,
- uint64_t start, uint64_t length)
+ uint64_t start_in, uint64_t length_in)
{
DECLARE_BITMAP(new_page_tables, I915_PDES);
struct drm_device *dev = vm->dev;
@@ -1305,11 +1355,15 @@ static int gen6_alloc_va_range(struct i915_address_space *vm,
struct i915_hw_ppgtt *ppgtt =
container_of(vm, struct i915_hw_ppgtt, base);
struct i915_page_table *pt;
- const uint32_t start_save = start, length_save = length;
+ uint32_t start, length, start_save, length_save;
uint32_t pde, temp;
int ret;
- WARN_ON(upper_32_bits(start));
+ if (WARN_ON(start_in + length_in > ppgtt->base.total))
+ return -ENODEV;
+
+ start = start_save = start_in;
+ length = length_save = length_in;
bitmap_zero(new_page_tables, I915_PDES);
@@ -1319,7 +1373,7 @@ static int gen6_alloc_va_range(struct i915_address_space *vm,
* tables.
*/
gen6_for_each_pde(pt, &ppgtt->pd, start, length, temp, pde) {
- if (pt != ppgtt->scratch_pt) {
+ if (pt != vm->scratch_pt) {
WARN_ON(bitmap_empty(pt->used_ptes, GEN6_PTES));
continue;
}
@@ -1327,7 +1381,7 @@ static int gen6_alloc_va_range(struct i915_address_space *vm,
/* We've already allocated a page table */
WARN_ON(!bitmap_empty(pt->used_ptes, GEN6_PTES));
- pt = alloc_pt_single(dev);
+ pt = alloc_pt(dev);
if (IS_ERR(pt)) {
ret = PTR_ERR(pt);
goto unwind_out;
@@ -1336,7 +1390,7 @@ static int gen6_alloc_va_range(struct i915_address_space *vm,
gen6_initialize_pt(vm, pt);
ppgtt->pd.page_table[pde] = pt;
- set_bit(pde, new_page_tables);
+ __set_bit(pde, new_page_tables);
trace_i915_page_table_entry_alloc(vm, pde, start, GEN6_PDE_SHIFT);
}
@@ -1350,7 +1404,7 @@ static int gen6_alloc_va_range(struct i915_address_space *vm,
bitmap_set(tmp_bitmap, gen6_pte_index(start),
gen6_pte_count(start, length));
- if (test_and_clear_bit(pde, new_page_tables))
+ if (__test_and_clear_bit(pde, new_page_tables))
gen6_write_pde(&ppgtt->pd, pde, pt);
trace_i915_page_table_entry_map(vm, pde, pt,
@@ -1374,14 +1428,41 @@ unwind_out:
for_each_set_bit(pde, new_page_tables, I915_PDES) {
struct i915_page_table *pt = ppgtt->pd.page_table[pde];
- ppgtt->pd.page_table[pde] = ppgtt->scratch_pt;
- unmap_and_free_pt(pt, vm->dev);
+ ppgtt->pd.page_table[pde] = vm->scratch_pt;
+ free_pt(vm->dev, pt);
}
mark_tlbs_dirty(ppgtt);
return ret;
}
+static int gen6_init_scratch(struct i915_address_space *vm)
+{
+ struct drm_device *dev = vm->dev;
+
+ vm->scratch_page = alloc_scratch_page(dev);
+ if (IS_ERR(vm->scratch_page))
+ return PTR_ERR(vm->scratch_page);
+
+ vm->scratch_pt = alloc_pt(dev);
+ if (IS_ERR(vm->scratch_pt)) {
+ free_scratch_page(dev, vm->scratch_page);
+ return PTR_ERR(vm->scratch_pt);
+ }
+
+ gen6_initialize_pt(vm, vm->scratch_pt);
+
+ return 0;
+}
+
+static void gen6_free_scratch(struct i915_address_space *vm)
+{
+ struct drm_device *dev = vm->dev;
+
+ free_pt(dev, vm->scratch_pt);
+ free_scratch_page(dev, vm->scratch_page);
+}
+
static void gen6_ppgtt_cleanup(struct i915_address_space *vm)
{
struct i915_hw_ppgtt *ppgtt =
@@ -1389,20 +1470,19 @@ static void gen6_ppgtt_cleanup(struct i915_address_space *vm)
struct i915_page_table *pt;
uint32_t pde;
-
drm_mm_remove_node(&ppgtt->node);
gen6_for_all_pdes(pt, ppgtt, pde) {
- if (pt != ppgtt->scratch_pt)
- unmap_and_free_pt(pt, ppgtt->base.dev);
+ if (pt != vm->scratch_pt)
+ free_pt(ppgtt->base.dev, pt);
}
- unmap_and_free_pt(ppgtt->scratch_pt, ppgtt->base.dev);
- unmap_and_free_pd(&ppgtt->pd, ppgtt->base.dev);
+ gen6_free_scratch(vm);
}
static int gen6_ppgtt_allocate_page_directories(struct i915_hw_ppgtt *ppgtt)
{
+ struct i915_address_space *vm = &ppgtt->base;
struct drm_device *dev = ppgtt->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
bool retried = false;
@@ -1413,11 +1493,10 @@ static int gen6_ppgtt_allocate_page_directories(struct i915_hw_ppgtt *ppgtt)
* size. We allocate at the top of the GTT to avoid fragmentation.
*/
BUG_ON(!drm_mm_initialized(&dev_priv->gtt.base.mm));
- ppgtt->scratch_pt = alloc_pt_single(ppgtt->base.dev);
- if (IS_ERR(ppgtt->scratch_pt))
- return PTR_ERR(ppgtt->scratch_pt);
- gen6_initialize_pt(&ppgtt->base, ppgtt->scratch_pt);
+ ret = gen6_init_scratch(vm);
+ if (ret)
+ return ret;
alloc:
ret = drm_mm_insert_node_in_range_generic(&dev_priv->gtt.base.mm,
@@ -1448,7 +1527,7 @@ alloc:
return 0;
err_out:
- unmap_and_free_pt(ppgtt->scratch_pt, ppgtt->base.dev);
+ gen6_free_scratch(vm);
return ret;
}
@@ -1464,7 +1543,7 @@ static void gen6_scratch_va_range(struct i915_hw_ppgtt *ppgtt,
uint32_t pde, temp;
gen6_for_each_pde(unused, &ppgtt->pd, start, length, temp, pde)
- ppgtt->pd.page_table[pde] = ppgtt->scratch_pt;
+ ppgtt->pd.page_table[pde] = ppgtt->base.scratch_pt;
}
static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
@@ -1500,11 +1579,11 @@ static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
ppgtt->base.total = I915_PDES * GEN6_PTES * PAGE_SIZE;
ppgtt->debug_dump = gen6_dump_ppgtt;
- ppgtt->pd.pd_offset =
+ ppgtt->pd.base.ggtt_offset =
ppgtt->node.start / PAGE_SIZE * sizeof(gen6_pte_t);
ppgtt->pd_addr = (gen6_pte_t __iomem *)dev_priv->gtt.gsm +
- ppgtt->pd.pd_offset / sizeof(gen6_pte_t);
+ ppgtt->pd.base.ggtt_offset / sizeof(gen6_pte_t);
gen6_scratch_va_range(ppgtt, 0, ppgtt->base.total);
@@ -1515,23 +1594,21 @@ static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
ppgtt->node.start / PAGE_SIZE);
DRM_DEBUG("Adding PPGTT at offset %x\n",
- ppgtt->pd.pd_offset << 10);
+ ppgtt->pd.base.ggtt_offset << 10);
return 0;
}
static int __hw_ppgtt_init(struct drm_device *dev, struct i915_hw_ppgtt *ppgtt)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
-
ppgtt->base.dev = dev;
- ppgtt->base.scratch = dev_priv->gtt.base.scratch;
if (INTEL_INFO(dev)->gen < 8)
return gen6_ppgtt_init(ppgtt);
else
return gen8_ppgtt_init(ppgtt);
}
+
int i915_ppgtt_init(struct drm_device *dev, struct i915_hw_ppgtt *ppgtt)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -1550,11 +1627,6 @@ int i915_ppgtt_init(struct drm_device *dev, struct i915_hw_ppgtt *ppgtt)
int i915_ppgtt_init_hw(struct drm_device *dev)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_engine_cs *ring;
- struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
- int i, ret = 0;
-
/* In the case of execlists, PPGTT is enabled by the context descriptor
* and the PDPs are contained within the context itself. We don't
* need to do anything here. */
@@ -1573,16 +1645,23 @@ int i915_ppgtt_init_hw(struct drm_device *dev)
else
MISSING_CASE(INTEL_INFO(dev)->gen);
- if (ppgtt) {
- for_each_ring(ring, dev_priv, i) {
- ret = ppgtt->switch_mm(ppgtt, ring);
- if (ret != 0)
- return ret;
- }
- }
+ return 0;
+}
- return ret;
+int i915_ppgtt_init_ring(struct drm_i915_gem_request *req)
+{
+ struct drm_i915_private *dev_priv = req->ring->dev->dev_private;
+ struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
+
+ if (i915.enable_execlists)
+ return 0;
+
+ if (!ppgtt)
+ return 0;
+
+ return ppgtt->switch_mm(ppgtt, req);
}
+
struct i915_hw_ppgtt *
i915_ppgtt_create(struct drm_device *dev, struct drm_i915_file_private *fpriv)
{
@@ -1723,9 +1802,6 @@ void i915_gem_suspend_gtt_mappings(struct drm_device *dev)
int i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj)
{
- if (obj->has_dma_mapping)
- return 0;
-
if (!dma_map_sg(&obj->base.dev->pdev->dev,
obj->pages->sgl, obj->pages->nents,
PCI_DMA_BIDIRECTIONAL))
@@ -1846,7 +1922,7 @@ static void gen8_ggtt_clear_range(struct i915_address_space *vm,
first_entry, num_entries, max_entries))
num_entries = max_entries;
- scratch_pte = gen8_pte_encode(vm->scratch.addr,
+ scratch_pte = gen8_pte_encode(px_dma(vm->scratch_page),
I915_CACHE_LLC,
use_scratch);
for (i = 0; i < num_entries; i++)
@@ -1872,7 +1948,8 @@ static void gen6_ggtt_clear_range(struct i915_address_space *vm,
first_entry, num_entries, max_entries))
num_entries = max_entries;
- scratch_pte = vm->pte_encode(vm->scratch.addr, I915_CACHE_LLC, use_scratch, 0);
+ scratch_pte = vm->pte_encode(px_dma(vm->scratch_page),
+ I915_CACHE_LLC, use_scratch, 0);
for (i = 0; i < num_entries; i++)
iowrite32(scratch_pte, &gtt_base[i]);
@@ -1926,6 +2003,17 @@ static int ggtt_bind_vma(struct i915_vma *vma,
vma->vm->insert_entries(vma->vm, pages,
vma->node.start,
cache_level, pte_flags);
+
+ /* Note the inconsistency here is due to absence of the
+ * aliasing ppgtt on gen4 and earlier. Though we always
+ * request PIN_USER for execbuffer (translated to LOCAL_BIND),
+ * without the appgtt, we cannot honour that request and so
+ * must substitute it with a global binding. Since we do this
+ * behind the upper layers back, we need to explicitly set
+ * the bound flag ourselves.
+ */
+ vma->bound |= GLOBAL_BIND;
+
}
if (dev_priv->mm.aliasing_ppgtt && flags & LOCAL_BIND) {
@@ -1972,10 +2060,8 @@ void i915_gem_gtt_finish_object(struct drm_i915_gem_object *obj)
interruptible = do_idling(dev_priv);
- if (!obj->has_dma_mapping)
- dma_unmap_sg(&dev->pdev->dev,
- obj->pages->sgl, obj->pages->nents,
- PCI_DMA_BIDIRECTIONAL);
+ dma_unmap_sg(&dev->pdev->dev, obj->pages->sgl, obj->pages->nents,
+ PCI_DMA_BIDIRECTIONAL);
undo_idling(dev_priv, interruptible);
}
@@ -2099,7 +2185,7 @@ static int i915_gem_setup_global_gtt(struct drm_device *dev,
void i915_gem_init_global_gtt(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- unsigned long gtt_size, mappable_size;
+ u64 gtt_size, mappable_size;
gtt_size = dev_priv->gtt.base.total;
mappable_size = dev_priv->gtt.mappable_end;
@@ -2129,42 +2215,6 @@ void i915_global_gtt_cleanup(struct drm_device *dev)
vm->cleanup(vm);
}
-static int setup_scratch_page(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct page *page;
- dma_addr_t dma_addr;
-
- page = alloc_page(GFP_KERNEL | GFP_DMA32 | __GFP_ZERO);
- if (page == NULL)
- return -ENOMEM;
- set_pages_uc(page, 1);
-
-#ifdef CONFIG_INTEL_IOMMU
- dma_addr = pci_map_page(dev->pdev, page, 0, PAGE_SIZE,
- PCI_DMA_BIDIRECTIONAL);
- if (pci_dma_mapping_error(dev->pdev, dma_addr))
- return -EINVAL;
-#else
- dma_addr = page_to_phys(page);
-#endif
- dev_priv->gtt.base.scratch.page = page;
- dev_priv->gtt.base.scratch.addr = dma_addr;
-
- return 0;
-}
-
-static void teardown_scratch_page(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct page *page = dev_priv->gtt.base.scratch.page;
-
- set_pages_wb(page, 1);
- pci_unmap_page(dev->pdev, dev_priv->gtt.base.scratch.addr,
- PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
- __free_page(page);
-}
-
static unsigned int gen6_get_total_gtt_size(u16 snb_gmch_ctl)
{
snb_gmch_ctl >>= SNB_GMCH_GGMS_SHIFT;
@@ -2247,8 +2297,8 @@ static int ggtt_probe_common(struct drm_device *dev,
size_t gtt_size)
{
struct drm_i915_private *dev_priv = dev->dev_private;
+ struct i915_page_scratch *scratch_page;
phys_addr_t gtt_phys_addr;
- int ret;
/* For Modern GENs the PTEs and register space are split in the BAR */
gtt_phys_addr = pci_resource_start(dev->pdev, 0) +
@@ -2270,14 +2320,17 @@ static int ggtt_probe_common(struct drm_device *dev,
return -ENOMEM;
}
- ret = setup_scratch_page(dev);
- if (ret) {
+ scratch_page = alloc_scratch_page(dev);
+ if (IS_ERR(scratch_page)) {
DRM_ERROR("Scratch setup failed\n");
/* iounmap will also get called at remove, but meh */
iounmap(dev_priv->gtt.gsm);
+ return PTR_ERR(scratch_page);
}
- return ret;
+ dev_priv->gtt.base.scratch_page = scratch_page;
+
+ return 0;
}
/* The GGTT and PPGTT need a private PPAT setup in order to handle cacheability
@@ -2354,13 +2407,13 @@ static void chv_setup_private_ppat(struct drm_i915_private *dev_priv)
}
static int gen8_gmch_probe(struct drm_device *dev,
- size_t *gtt_total,
+ u64 *gtt_total,
size_t *stolen,
phys_addr_t *mappable_base,
- unsigned long *mappable_end)
+ u64 *mappable_end)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- unsigned int gtt_size;
+ u64 gtt_size;
u16 snb_gmch_ctl;
int ret;
@@ -2402,10 +2455,10 @@ static int gen8_gmch_probe(struct drm_device *dev,
}
static int gen6_gmch_probe(struct drm_device *dev,
- size_t *gtt_total,
+ u64 *gtt_total,
size_t *stolen,
phys_addr_t *mappable_base,
- unsigned long *mappable_end)
+ u64 *mappable_end)
{
struct drm_i915_private *dev_priv = dev->dev_private;
unsigned int gtt_size;
@@ -2419,7 +2472,7 @@ static int gen6_gmch_probe(struct drm_device *dev,
* a coarse sanity check.
*/
if ((*mappable_end < (64<<20) || (*mappable_end > (512<<20)))) {
- DRM_ERROR("Unknown GMADR size (%lx)\n",
+ DRM_ERROR("Unknown GMADR size (%llx)\n",
dev_priv->gtt.mappable_end);
return -ENXIO;
}
@@ -2449,14 +2502,14 @@ static void gen6_gmch_remove(struct i915_address_space *vm)
struct i915_gtt *gtt = container_of(vm, struct i915_gtt, base);
iounmap(gtt->gsm);
- teardown_scratch_page(vm->dev);
+ free_scratch_page(vm->dev, vm->scratch_page);
}
static int i915_gmch_probe(struct drm_device *dev,
- size_t *gtt_total,
+ u64 *gtt_total,
size_t *stolen,
phys_addr_t *mappable_base,
- unsigned long *mappable_end)
+ u64 *mappable_end)
{
struct drm_i915_private *dev_priv = dev->dev_private;
int ret;
@@ -2513,17 +2566,17 @@ int i915_gem_gtt_init(struct drm_device *dev)
dev_priv->gtt.base.cleanup = gen6_gmch_remove;
}
+ gtt->base.dev = dev;
+
ret = gtt->gtt_probe(dev, &gtt->base.total, &gtt->stolen_size,
&gtt->mappable_base, &gtt->mappable_end);
if (ret)
return ret;
- gtt->base.dev = dev;
-
/* GMADR is the PCI mmio aperture into the global GTT. */
- DRM_INFO("Memory usable by graphics device = %zdM\n",
+ DRM_INFO("Memory usable by graphics device = %lluM\n",
gtt->base.total >> 20);
- DRM_DEBUG_DRIVER("GMADR size = %ldM\n", gtt->mappable_end >> 20);
+ DRM_DEBUG_DRIVER("GMADR size = %lldM\n", gtt->mappable_end >> 20);
DRM_DEBUG_DRIVER("GTT stolen size = %zdM\n", gtt->stolen_size >> 20);
#ifdef CONFIG_INTEL_IOMMU
if (intel_iommu_gfx_mapped)
@@ -2546,6 +2599,8 @@ void i915_gem_restore_gtt_mappings(struct drm_device *dev)
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_gem_object *obj;
struct i915_address_space *vm;
+ struct i915_vma *vma;
+ bool flush;
i915_check_and_clear_faults(dev);
@@ -2555,16 +2610,23 @@ void i915_gem_restore_gtt_mappings(struct drm_device *dev)
dev_priv->gtt.base.total,
true);
+ /* Cache flush objects bound into GGTT and rebind them. */
+ vm = &dev_priv->gtt.base;
list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
- struct i915_vma *vma = i915_gem_obj_to_vma(obj,
- &dev_priv->gtt.base);
- if (!vma)
- continue;
+ flush = false;
+ list_for_each_entry(vma, &obj->vma_list, vma_link) {
+ if (vma->vm != vm)
+ continue;
- i915_gem_clflush_object(obj, obj->pin_display);
- WARN_ON(i915_vma_bind(vma, obj->cache_level, PIN_UPDATE));
- }
+ WARN_ON(i915_vma_bind(vma, obj->cache_level,
+ PIN_UPDATE));
+
+ flush = true;
+ }
+ if (flush)
+ i915_gem_clflush_object(obj, obj->pin_display);
+ }
if (INTEL_INFO(dev)->gen >= 8) {
if (IS_CHERRYVIEW(dev) || IS_BROXTON(dev))
@@ -2691,30 +2753,17 @@ static struct sg_table *
intel_rotate_fb_obj_pages(struct i915_ggtt_view *ggtt_view,
struct drm_i915_gem_object *obj)
{
- struct drm_device *dev = obj->base.dev;
struct intel_rotation_info *rot_info = &ggtt_view->rotation_info;
- unsigned long size, pages, rot_pages;
+ unsigned int size_pages = rot_info->size >> PAGE_SHIFT;
struct sg_page_iter sg_iter;
unsigned long i;
dma_addr_t *page_addr_list;
struct sg_table *st;
- unsigned int tile_pitch, tile_height;
- unsigned int width_pages, height_pages;
int ret = -ENOMEM;
- pages = obj->base.size / PAGE_SIZE;
-
- /* Calculate tiling geometry. */
- tile_height = intel_tile_height(dev, rot_info->pixel_format,
- rot_info->fb_modifier);
- tile_pitch = PAGE_SIZE / tile_height;
- width_pages = DIV_ROUND_UP(rot_info->pitch, tile_pitch);
- height_pages = DIV_ROUND_UP(rot_info->height, tile_height);
- rot_pages = width_pages * height_pages;
- size = rot_pages * PAGE_SIZE;
-
/* Allocate a temporary list of source pages for random access. */
- page_addr_list = drm_malloc_ab(pages, sizeof(dma_addr_t));
+ page_addr_list = drm_malloc_ab(obj->base.size / PAGE_SIZE,
+ sizeof(dma_addr_t));
if (!page_addr_list)
return ERR_PTR(ret);
@@ -2723,7 +2772,7 @@ intel_rotate_fb_obj_pages(struct i915_ggtt_view *ggtt_view,
if (!st)
goto err_st_alloc;
- ret = sg_alloc_table(st, rot_pages, GFP_KERNEL);
+ ret = sg_alloc_table(st, size_pages, GFP_KERNEL);
if (ret)
goto err_sg_alloc;
@@ -2735,13 +2784,15 @@ intel_rotate_fb_obj_pages(struct i915_ggtt_view *ggtt_view,
}
/* Rotate the pages. */
- rotate_pages(page_addr_list, width_pages, height_pages, st);
+ rotate_pages(page_addr_list,
+ rot_info->width_pages, rot_info->height_pages,
+ st);
DRM_DEBUG_KMS(
- "Created rotated page mapping for object size %lu (pitch=%u, height=%u, pixel_format=0x%x, %ux%u tiles, %lu pages).\n",
- size, rot_info->pitch, rot_info->height,
- rot_info->pixel_format, width_pages, height_pages,
- rot_pages);
+ "Created rotated page mapping for object size %zu (pitch=%u, height=%u, pixel_format=0x%x, %ux%u tiles, %u pages).\n",
+ obj->base.size, rot_info->pitch, rot_info->height,
+ rot_info->pixel_format, rot_info->width_pages,
+ rot_info->height_pages, size_pages);
drm_free_large(page_addr_list);
@@ -2753,10 +2804,10 @@ err_st_alloc:
drm_free_large(page_addr_list);
DRM_DEBUG_KMS(
- "Failed to create rotated mapping for object size %lu! (%d) (pitch=%u, height=%u, pixel_format=0x%x, %ux%u tiles, %lu pages)\n",
- size, ret, rot_info->pitch, rot_info->height,
- rot_info->pixel_format, width_pages, height_pages,
- rot_pages);
+ "Failed to create rotated mapping for object size %zu! (%d) (pitch=%u, height=%u, pixel_format=0x%x, %ux%u tiles, %u pages)\n",
+ obj->base.size, ret, rot_info->pitch, rot_info->height,
+ rot_info->pixel_format, rot_info->width_pages,
+ rot_info->height_pages, size_pages);
return ERR_PTR(ret);
}
@@ -2874,9 +2925,12 @@ int i915_vma_bind(struct i915_vma *vma, enum i915_cache_level cache_level,
vma->node.size,
VM_TO_TRACE_NAME(vma->vm));
+ /* XXX: i915_vma_pin() will fix this +- hack */
+ vma->pin_count++;
ret = vma->vm->allocate_va_range(vma->vm,
vma->node.start,
vma->node.size);
+ vma->pin_count--;
if (ret)
return ret;
}
@@ -2901,9 +2955,10 @@ size_t
i915_ggtt_view_size(struct drm_i915_gem_object *obj,
const struct i915_ggtt_view *view)
{
- if (view->type == I915_GGTT_VIEW_NORMAL ||
- view->type == I915_GGTT_VIEW_ROTATED) {
+ if (view->type == I915_GGTT_VIEW_NORMAL) {
return obj->base.size;
+ } else if (view->type == I915_GGTT_VIEW_ROTATED) {
+ return view->rotation_info.size;
} else if (view->type == I915_GGTT_VIEW_PARTIAL) {
return view->params.partial.size << PAGE_SHIFT;
} else {
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.h b/drivers/gpu/drm/i915/i915_gem_gtt.h
index 0d46dd20bf71..e1cfa292f9ad 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.h
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.h
@@ -126,6 +126,8 @@ struct intel_rotation_info {
unsigned int pitch;
uint32_t pixel_format;
uint64_t fb_modifier;
+ unsigned int width_pages, height_pages;
+ uint64_t size;
};
struct i915_ggtt_view {
@@ -205,19 +207,34 @@ struct i915_vma {
#define DRM_I915_GEM_OBJECT_MAX_PIN_COUNT 0xf
};
-struct i915_page_table {
+struct i915_page_dma {
struct page *page;
- dma_addr_t daddr;
+ union {
+ dma_addr_t daddr;
+
+ /* For gen6/gen7 only. This is the offset in the GGTT
+ * where the page directory entries for PPGTT begin
+ */
+ uint32_t ggtt_offset;
+ };
+};
+
+#define px_base(px) (&(px)->base)
+#define px_page(px) (px_base(px)->page)
+#define px_dma(px) (px_base(px)->daddr)
+
+struct i915_page_scratch {
+ struct i915_page_dma base;
+};
+
+struct i915_page_table {
+ struct i915_page_dma base;
unsigned long *used_ptes;
};
struct i915_page_directory {
- struct page *page; /* NULL for GEN6-GEN7 */
- union {
- uint32_t pd_offset;
- dma_addr_t daddr;
- };
+ struct i915_page_dma base;
unsigned long *used_pdes;
struct i915_page_table *page_table[I915_PDES]; /* PDEs */
@@ -233,13 +250,12 @@ struct i915_address_space {
struct drm_mm mm;
struct drm_device *dev;
struct list_head global_link;
- unsigned long start; /* Start offset always 0 for dri2 */
- size_t total; /* size addr space maps (ex. 2GB for ggtt) */
+ u64 start; /* Start offset always 0 for dri2 */
+ u64 total; /* size addr space maps (ex. 2GB for ggtt) */
- struct {
- dma_addr_t addr;
- struct page *page;
- } scratch;
+ struct i915_page_scratch *scratch_page;
+ struct i915_page_table *scratch_pt;
+ struct i915_page_directory *scratch_pd;
/**
* List of objects currently involved in rendering.
@@ -300,9 +316,9 @@ struct i915_address_space {
*/
struct i915_gtt {
struct i915_address_space base;
- size_t stolen_size; /* Total size of stolen memory */
- unsigned long mappable_end; /* End offset that we can CPU map */
+ size_t stolen_size; /* Total size of stolen memory */
+ u64 mappable_end; /* End offset that we can CPU map */
struct io_mapping *mappable; /* Mapping to our CPU mappable region */
phys_addr_t mappable_base; /* PA of our GMADR */
@@ -314,9 +330,9 @@ struct i915_gtt {
int mtrr;
/* global gtt ops */
- int (*gtt_probe)(struct drm_device *dev, size_t *gtt_total,
+ int (*gtt_probe)(struct drm_device *dev, u64 *gtt_total,
size_t *stolen, phys_addr_t *mappable_base,
- unsigned long *mappable_end);
+ u64 *mappable_end);
};
struct i915_hw_ppgtt {
@@ -329,16 +345,13 @@ struct i915_hw_ppgtt {
struct i915_page_directory pd;
};
- struct i915_page_table *scratch_pt;
- struct i915_page_directory *scratch_pd;
-
struct drm_i915_file_private *file_priv;
gen6_pte_t __iomem *pd_addr;
int (*enable)(struct i915_hw_ppgtt *ppgtt);
int (*switch_mm)(struct i915_hw_ppgtt *ppgtt,
- struct intel_engine_cs *ring);
+ struct drm_i915_gem_request *req);
void (*debug_dump)(struct i915_hw_ppgtt *ppgtt, struct seq_file *m);
};
@@ -468,6 +481,14 @@ static inline size_t gen8_pte_count(uint64_t address, uint64_t length)
return i915_pte_count(address, length, GEN8_PDE_SHIFT);
}
+static inline dma_addr_t
+i915_page_dir_dma_addr(const struct i915_hw_ppgtt *ppgtt, const unsigned n)
+{
+ return test_bit(n, ppgtt->pdp.used_pdpes) ?
+ px_dma(ppgtt->pdp.page_directory[n]) :
+ px_dma(ppgtt->base.scratch_pd);
+}
+
int i915_gem_gtt_init(struct drm_device *dev);
void i915_gem_init_global_gtt(struct drm_device *dev);
void i915_global_gtt_cleanup(struct drm_device *dev);
@@ -475,6 +496,7 @@ void i915_global_gtt_cleanup(struct drm_device *dev);
int i915_ppgtt_init(struct drm_device *dev, struct i915_hw_ppgtt *ppgtt);
int i915_ppgtt_init_hw(struct drm_device *dev);
+int i915_ppgtt_init_ring(struct drm_i915_gem_request *req);
void i915_ppgtt_release(struct kref *kref);
struct i915_hw_ppgtt *i915_ppgtt_create(struct drm_device *dev,
struct drm_i915_file_private *fpriv);
diff --git a/drivers/gpu/drm/i915/i915_gem_render_state.c b/drivers/gpu/drm/i915/i915_gem_render_state.c
index 521548a08578..5026a6267a88 100644
--- a/drivers/gpu/drm/i915/i915_gem_render_state.c
+++ b/drivers/gpu/drm/i915/i915_gem_render_state.c
@@ -73,6 +73,24 @@ free_gem:
return ret;
}
+/*
+ * Macro to add commands to auxiliary batch.
+ * This macro only checks for page overflow before inserting the commands,
+ * this is sufficient as the null state generator makes the final batch
+ * with two passes to build command and state separately. At this point
+ * the size of both are known and it compacts them by relocating the state
+ * right after the commands taking care of aligment so we should sufficient
+ * space below them for adding new commands.
+ */
+#define OUT_BATCH(batch, i, val) \
+ do { \
+ if (WARN_ON((i) >= PAGE_SIZE / sizeof(u32))) { \
+ ret = -ENOSPC; \
+ goto err_out; \
+ } \
+ (batch)[(i)++] = (val); \
+ } while(0)
+
static int render_state_setup(struct render_state *so)
{
const struct intel_renderstate_rodata *rodata = so->rodata;
@@ -96,8 +114,10 @@ static int render_state_setup(struct render_state *so)
s = lower_32_bits(r);
if (so->gen >= 8) {
if (i + 1 >= rodata->batch_items ||
- rodata->batch[i + 1] != 0)
- return -EINVAL;
+ rodata->batch[i + 1] != 0) {
+ ret = -EINVAL;
+ goto err_out;
+ }
d[i++] = s;
s = upper_32_bits(r);
@@ -108,6 +128,21 @@ static int render_state_setup(struct render_state *so)
d[i++] = s;
}
+
+ while (i % CACHELINE_DWORDS)
+ OUT_BATCH(d, i, MI_NOOP);
+
+ so->aux_batch_offset = i * sizeof(u32);
+
+ OUT_BATCH(d, i, MI_BATCH_BUFFER_END);
+ so->aux_batch_size = (i * sizeof(u32)) - so->aux_batch_offset;
+
+ /*
+ * Since we are sending length, we need to strictly conform to
+ * all requirements. For Gen2 this must be a multiple of 8.
+ */
+ so->aux_batch_size = ALIGN(so->aux_batch_size, 8);
+
kunmap(page);
ret = i915_gem_object_set_to_gtt_domain(so->obj, false);
@@ -120,8 +155,14 @@ static int render_state_setup(struct render_state *so)
}
return 0;
+
+err_out:
+ kunmap(page);
+ return ret;
}
+#undef OUT_BATCH
+
void i915_gem_render_state_fini(struct render_state *so)
{
i915_gem_object_ggtt_unpin(so->obj);
@@ -152,29 +193,36 @@ int i915_gem_render_state_prepare(struct intel_engine_cs *ring,
return 0;
}
-int i915_gem_render_state_init(struct intel_engine_cs *ring)
+int i915_gem_render_state_init(struct drm_i915_gem_request *req)
{
struct render_state so;
int ret;
- ret = i915_gem_render_state_prepare(ring, &so);
+ ret = i915_gem_render_state_prepare(req->ring, &so);
if (ret)
return ret;
if (so.rodata == NULL)
return 0;
- ret = ring->dispatch_execbuffer(ring,
- so.ggtt_offset,
- so.rodata->batch_items * 4,
- I915_DISPATCH_SECURE);
+ ret = req->ring->dispatch_execbuffer(req, so.ggtt_offset,
+ so.rodata->batch_items * 4,
+ I915_DISPATCH_SECURE);
if (ret)
goto out;
- i915_vma_move_to_active(i915_gem_obj_to_ggtt(so.obj), ring);
+ if (so.aux_batch_size > 8) {
+ ret = req->ring->dispatch_execbuffer(req,
+ (so.ggtt_offset +
+ so.aux_batch_offset),
+ so.aux_batch_size,
+ I915_DISPATCH_SECURE);
+ if (ret)
+ goto out;
+ }
+
+ i915_vma_move_to_active(i915_gem_obj_to_ggtt(so.obj), req);
- ret = __i915_add_request(ring, NULL, so.obj);
- /* __i915_add_request moves object to inactive if it fails */
out:
i915_gem_render_state_fini(&so);
return ret;
diff --git a/drivers/gpu/drm/i915/i915_gem_render_state.h b/drivers/gpu/drm/i915/i915_gem_render_state.h
index c44961ed3fad..e641bb093a90 100644
--- a/drivers/gpu/drm/i915/i915_gem_render_state.h
+++ b/drivers/gpu/drm/i915/i915_gem_render_state.h
@@ -37,9 +37,11 @@ struct render_state {
struct drm_i915_gem_object *obj;
u64 ggtt_offset;
int gen;
+ u32 aux_batch_size;
+ u32 aux_batch_offset;
};
-int i915_gem_render_state_init(struct intel_engine_cs *ring);
+int i915_gem_render_state_init(struct drm_i915_gem_request *req);
void i915_gem_render_state_fini(struct render_state *so);
int i915_gem_render_state_prepare(struct intel_engine_cs *ring,
struct render_state *so);
diff --git a/drivers/gpu/drm/i915/i915_gem_stolen.c b/drivers/gpu/drm/i915/i915_gem_stolen.c
index 348ed5abcdbf..a36cb95ec798 100644
--- a/drivers/gpu/drm/i915/i915_gem_stolen.c
+++ b/drivers/gpu/drm/i915/i915_gem_stolen.c
@@ -42,6 +42,31 @@
* for is a boon.
*/
+int i915_gem_stolen_insert_node(struct drm_i915_private *dev_priv,
+ struct drm_mm_node *node, u64 size,
+ unsigned alignment)
+{
+ int ret;
+
+ if (!drm_mm_initialized(&dev_priv->mm.stolen))
+ return -ENODEV;
+
+ mutex_lock(&dev_priv->mm.stolen_lock);
+ ret = drm_mm_insert_node(&dev_priv->mm.stolen, node, size, alignment,
+ DRM_MM_SEARCH_DEFAULT);
+ mutex_unlock(&dev_priv->mm.stolen_lock);
+
+ return ret;
+}
+
+void i915_gem_stolen_remove_node(struct drm_i915_private *dev_priv,
+ struct drm_mm_node *node)
+{
+ mutex_lock(&dev_priv->mm.stolen_lock);
+ drm_mm_remove_node(node);
+ mutex_unlock(&dev_priv->mm.stolen_lock);
+}
+
static unsigned long i915_stolen_to_physical(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -151,150 +176,115 @@ static unsigned long i915_stolen_to_physical(struct drm_device *dev)
return base;
}
-static int find_compression_threshold(struct drm_device *dev,
- struct drm_mm_node *node,
- int size,
- int fb_cpp)
+void i915_gem_cleanup_stolen(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- int compression_threshold = 1;
- int ret;
-
- /* HACK: This code depends on what we will do in *_enable_fbc. If that
- * code changes, this code needs to change as well.
- *
- * The enable_fbc code will attempt to use one of our 2 compression
- * thresholds, therefore, in that case, we only have 1 resort.
- */
- /* Try to over-allocate to reduce reallocations and fragmentation. */
- ret = drm_mm_insert_node(&dev_priv->mm.stolen, node,
- size <<= 1, 4096, DRM_MM_SEARCH_DEFAULT);
- if (ret == 0)
- return compression_threshold;
-
-again:
- /* HW's ability to limit the CFB is 1:4 */
- if (compression_threshold > 4 ||
- (fb_cpp == 2 && compression_threshold == 2))
- return 0;
+ if (!drm_mm_initialized(&dev_priv->mm.stolen))
+ return;
- ret = drm_mm_insert_node(&dev_priv->mm.stolen, node,
- size >>= 1, 4096,
- DRM_MM_SEARCH_DEFAULT);
- if (ret && INTEL_INFO(dev)->gen <= 4) {
- return 0;
- } else if (ret) {
- compression_threshold <<= 1;
- goto again;
- } else {
- return compression_threshold;
- }
+ drm_mm_takedown(&dev_priv->mm.stolen);
}
-static int i915_setup_compression(struct drm_device *dev, int size, int fb_cpp)
+static void gen6_get_stolen_reserved(struct drm_i915_private *dev_priv,
+ unsigned long *base, unsigned long *size)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct drm_mm_node *uninitialized_var(compressed_llb);
- int ret;
-
- ret = find_compression_threshold(dev, &dev_priv->fbc.compressed_fb,
- size, fb_cpp);
- if (!ret)
- goto err_llb;
- else if (ret > 1) {
- DRM_INFO("Reducing the compressed framebuffer size. This may lead to less power savings than a non-reduced-size. Try to increase stolen memory size if available in BIOS.\n");
-
- }
-
- dev_priv->fbc.threshold = ret;
-
- if (INTEL_INFO(dev_priv)->gen >= 5)
- I915_WRITE(ILK_DPFC_CB_BASE, dev_priv->fbc.compressed_fb.start);
- else if (IS_GM45(dev)) {
- I915_WRITE(DPFC_CB_BASE, dev_priv->fbc.compressed_fb.start);
- } else {
- compressed_llb = kzalloc(sizeof(*compressed_llb), GFP_KERNEL);
- if (!compressed_llb)
- goto err_fb;
-
- ret = drm_mm_insert_node(&dev_priv->mm.stolen, compressed_llb,
- 4096, 4096, DRM_MM_SEARCH_DEFAULT);
- if (ret)
- goto err_fb;
-
- dev_priv->fbc.compressed_llb = compressed_llb;
-
- I915_WRITE(FBC_CFB_BASE,
- dev_priv->mm.stolen_base + dev_priv->fbc.compressed_fb.start);
- I915_WRITE(FBC_LL_BASE,
- dev_priv->mm.stolen_base + compressed_llb->start);
+ uint32_t reg_val = I915_READ(GEN6_STOLEN_RESERVED);
+
+ *base = reg_val & GEN6_STOLEN_RESERVED_ADDR_MASK;
+
+ switch (reg_val & GEN6_STOLEN_RESERVED_SIZE_MASK) {
+ case GEN6_STOLEN_RESERVED_1M:
+ *size = 1024 * 1024;
+ break;
+ case GEN6_STOLEN_RESERVED_512K:
+ *size = 512 * 1024;
+ break;
+ case GEN6_STOLEN_RESERVED_256K:
+ *size = 256 * 1024;
+ break;
+ case GEN6_STOLEN_RESERVED_128K:
+ *size = 128 * 1024;
+ break;
+ default:
+ *size = 1024 * 1024;
+ MISSING_CASE(reg_val & GEN6_STOLEN_RESERVED_SIZE_MASK);
}
-
- dev_priv->fbc.uncompressed_size = size;
-
- DRM_DEBUG_KMS("reserved %d bytes of contiguous stolen space for FBC\n",
- size);
-
- return 0;
-
-err_fb:
- kfree(compressed_llb);
- drm_mm_remove_node(&dev_priv->fbc.compressed_fb);
-err_llb:
- pr_info_once("drm: not enough stolen space for compressed buffer (need %d more bytes), disabling. Hint: you may be able to increase stolen memory size in the BIOS to avoid this.\n", size);
- return -ENOSPC;
}
-int i915_gem_stolen_setup_compression(struct drm_device *dev, int size, int fb_cpp)
+static void gen7_get_stolen_reserved(struct drm_i915_private *dev_priv,
+ unsigned long *base, unsigned long *size)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
-
- if (!drm_mm_initialized(&dev_priv->mm.stolen))
- return -ENODEV;
-
- if (size <= dev_priv->fbc.uncompressed_size)
- return 0;
-
- /* Release any current block */
- i915_gem_stolen_cleanup_compression(dev);
-
- return i915_setup_compression(dev, size, fb_cpp);
+ uint32_t reg_val = I915_READ(GEN6_STOLEN_RESERVED);
+
+ *base = reg_val & GEN7_STOLEN_RESERVED_ADDR_MASK;
+
+ switch (reg_val & GEN7_STOLEN_RESERVED_SIZE_MASK) {
+ case GEN7_STOLEN_RESERVED_1M:
+ *size = 1024 * 1024;
+ break;
+ case GEN7_STOLEN_RESERVED_256K:
+ *size = 256 * 1024;
+ break;
+ default:
+ *size = 1024 * 1024;
+ MISSING_CASE(reg_val & GEN7_STOLEN_RESERVED_SIZE_MASK);
+ }
}
-void i915_gem_stolen_cleanup_compression(struct drm_device *dev)
+static void gen8_get_stolen_reserved(struct drm_i915_private *dev_priv,
+ unsigned long *base, unsigned long *size)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
-
- if (dev_priv->fbc.uncompressed_size == 0)
- return;
-
- drm_mm_remove_node(&dev_priv->fbc.compressed_fb);
-
- if (dev_priv->fbc.compressed_llb) {
- drm_mm_remove_node(dev_priv->fbc.compressed_llb);
- kfree(dev_priv->fbc.compressed_llb);
+ uint32_t reg_val = I915_READ(GEN6_STOLEN_RESERVED);
+
+ *base = reg_val & GEN6_STOLEN_RESERVED_ADDR_MASK;
+
+ switch (reg_val & GEN8_STOLEN_RESERVED_SIZE_MASK) {
+ case GEN8_STOLEN_RESERVED_1M:
+ *size = 1024 * 1024;
+ break;
+ case GEN8_STOLEN_RESERVED_2M:
+ *size = 2 * 1024 * 1024;
+ break;
+ case GEN8_STOLEN_RESERVED_4M:
+ *size = 4 * 1024 * 1024;
+ break;
+ case GEN8_STOLEN_RESERVED_8M:
+ *size = 8 * 1024 * 1024;
+ break;
+ default:
+ *size = 8 * 1024 * 1024;
+ MISSING_CASE(reg_val & GEN8_STOLEN_RESERVED_SIZE_MASK);
}
-
- dev_priv->fbc.uncompressed_size = 0;
}
-void i915_gem_cleanup_stolen(struct drm_device *dev)
+static void bdw_get_stolen_reserved(struct drm_i915_private *dev_priv,
+ unsigned long *base, unsigned long *size)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ uint32_t reg_val = I915_READ(GEN6_STOLEN_RESERVED);
+ unsigned long stolen_top;
- if (!drm_mm_initialized(&dev_priv->mm.stolen))
- return;
+ stolen_top = dev_priv->mm.stolen_base + dev_priv->gtt.stolen_size;
- i915_gem_stolen_cleanup_compression(dev);
- drm_mm_takedown(&dev_priv->mm.stolen);
+ *base = reg_val & GEN6_STOLEN_RESERVED_ADDR_MASK;
+
+ /* On these platforms, the register doesn't have a size field, so the
+ * size is the distance between the base and the top of the stolen
+ * memory. We also have the genuine case where base is zero and there's
+ * nothing reserved. */
+ if (*base == 0)
+ *size = 0;
+ else
+ *size = stolen_top - *base;
}
int i915_gem_init_stolen(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- u32 tmp;
- int bios_reserved = 0;
+ unsigned long reserved_total, reserved_base, reserved_size;
+ unsigned long stolen_top;
+
+ mutex_init(&dev_priv->mm.stolen_lock);
#ifdef CONFIG_INTEL_IOMMU
if (intel_iommu_gfx_mapped && INTEL_INFO(dev)->gen < 8) {
@@ -310,26 +300,61 @@ int i915_gem_init_stolen(struct drm_device *dev)
if (dev_priv->mm.stolen_base == 0)
return 0;
- DRM_DEBUG_KMS("found %zd bytes of stolen memory at %08lx\n",
- dev_priv->gtt.stolen_size, dev_priv->mm.stolen_base);
-
- if (INTEL_INFO(dev)->gen >= 8) {
- tmp = I915_READ(GEN7_BIOS_RESERVED);
- tmp >>= GEN8_BIOS_RESERVED_SHIFT;
- tmp &= GEN8_BIOS_RESERVED_MASK;
- bios_reserved = (1024*1024) << tmp;
- } else if (IS_GEN7(dev)) {
- tmp = I915_READ(GEN7_BIOS_RESERVED);
- bios_reserved = tmp & GEN7_BIOS_RESERVED_256K ?
- 256*1024 : 1024*1024;
+ stolen_top = dev_priv->mm.stolen_base + dev_priv->gtt.stolen_size;
+
+ switch (INTEL_INFO(dev_priv)->gen) {
+ case 2:
+ case 3:
+ case 4:
+ case 5:
+ /* Assume the gen6 maximum for the older platforms. */
+ reserved_size = 1024 * 1024;
+ reserved_base = stolen_top - reserved_size;
+ break;
+ case 6:
+ gen6_get_stolen_reserved(dev_priv, &reserved_base,
+ &reserved_size);
+ break;
+ case 7:
+ gen7_get_stolen_reserved(dev_priv, &reserved_base,
+ &reserved_size);
+ break;
+ default:
+ if (IS_BROADWELL(dev_priv) || IS_SKYLAKE(dev_priv))
+ bdw_get_stolen_reserved(dev_priv, &reserved_base,
+ &reserved_size);
+ else
+ gen8_get_stolen_reserved(dev_priv, &reserved_base,
+ &reserved_size);
+ break;
+ }
+
+ /* It is possible for the reserved base to be zero, but the register
+ * field for size doesn't have a zero option. */
+ if (reserved_base == 0) {
+ reserved_size = 0;
+ reserved_base = stolen_top;
}
- if (WARN_ON(bios_reserved > dev_priv->gtt.stolen_size))
+ if (reserved_base < dev_priv->mm.stolen_base ||
+ reserved_base + reserved_size > stolen_top) {
+ DRM_DEBUG_KMS("Stolen reserved area [0x%08lx - 0x%08lx] outside stolen memory [0x%08lx - 0x%08lx]\n",
+ reserved_base, reserved_base + reserved_size,
+ dev_priv->mm.stolen_base, stolen_top);
return 0;
+ }
+
+ /* It is possible for the reserved area to end before the end of stolen
+ * memory, so just consider the start. */
+ reserved_total = stolen_top - reserved_base;
+
+ DRM_DEBUG_KMS("Memory reserved for graphics device: %luK, usable: %luK\n",
+ dev_priv->gtt.stolen_size >> 10,
+ (dev_priv->gtt.stolen_size - reserved_total) >> 10);
/* Basic memrange allocator for stolen space */
drm_mm_init(&dev_priv->mm.stolen, 0, dev_priv->gtt.stolen_size -
- bios_reserved);
+ reserved_total);
return 0;
}
@@ -386,8 +411,10 @@ static void i915_gem_object_put_pages_stolen(struct drm_i915_gem_object *obj)
static void
i915_gem_object_release_stolen(struct drm_i915_gem_object *obj)
{
+ struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
+
if (obj->stolen) {
- drm_mm_remove_node(obj->stolen);
+ i915_gem_stolen_remove_node(dev_priv, obj->stolen);
kfree(obj->stolen);
obj->stolen = NULL;
}
@@ -416,7 +443,6 @@ _i915_gem_object_create_stolen(struct drm_device *dev,
if (obj->pages == NULL)
goto cleanup;
- obj->has_dma_mapping = true;
i915_gem_object_pin_pages(obj);
obj->stolen = stolen;
@@ -449,8 +475,7 @@ i915_gem_object_create_stolen(struct drm_device *dev, u32 size)
if (!stolen)
return NULL;
- ret = drm_mm_insert_node(&dev_priv->mm.stolen, stolen, size,
- 4096, DRM_MM_SEARCH_DEFAULT);
+ ret = i915_gem_stolen_insert_node(dev_priv, stolen, size, 4096);
if (ret) {
kfree(stolen);
return NULL;
@@ -460,7 +485,7 @@ i915_gem_object_create_stolen(struct drm_device *dev, u32 size)
if (obj)
return obj;
- drm_mm_remove_node(stolen);
+ i915_gem_stolen_remove_node(dev_priv, stolen);
kfree(stolen);
return NULL;
}
@@ -495,7 +520,9 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev,
stolen->start = stolen_offset;
stolen->size = size;
+ mutex_lock(&dev_priv->mm.stolen_lock);
ret = drm_mm_reserve_node(&dev_priv->mm.stolen, stolen);
+ mutex_unlock(&dev_priv->mm.stolen_lock);
if (ret) {
DRM_DEBUG_KMS("failed to allocate stolen space\n");
kfree(stolen);
@@ -505,7 +532,7 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev,
obj = _i915_gem_object_create_stolen(dev, stolen);
if (obj == NULL) {
DRM_DEBUG_KMS("failed to allocate stolen object\n");
- drm_mm_remove_node(stolen);
+ i915_gem_stolen_remove_node(dev_priv, stolen);
kfree(stolen);
return NULL;
}
@@ -546,7 +573,7 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev,
err_vma:
i915_gem_vma_destroy(vma);
err_out:
- drm_mm_remove_node(stolen);
+ i915_gem_stolen_remove_node(dev_priv, stolen);
kfree(stolen);
drm_gem_object_unreference(&obj->base);
return NULL;
diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c
index 633bd1fcab69..8a6717cc265c 100644
--- a/drivers/gpu/drm/i915/i915_gem_tiling.c
+++ b/drivers/gpu/drm/i915/i915_gem_tiling.c
@@ -31,201 +31,32 @@
#include <drm/i915_drm.h>
#include "i915_drv.h"
-/** @file i915_gem_tiling.c
- *
- * Support for managing tiling state of buffer objects.
- *
- * The idea behind tiling is to increase cache hit rates by rearranging
- * pixel data so that a group of pixel accesses are in the same cacheline.
- * Performance improvement from doing this on the back/depth buffer are on
- * the order of 30%.
- *
- * Intel architectures make this somewhat more complicated, though, by
- * adjustments made to addressing of data when the memory is in interleaved
- * mode (matched pairs of DIMMS) to improve memory bandwidth.
- * For interleaved memory, the CPU sends every sequential 64 bytes
- * to an alternate memory channel so it can get the bandwidth from both.
- *
- * The GPU also rearranges its accesses for increased bandwidth to interleaved
- * memory, and it matches what the CPU does for non-tiled. However, when tiled
- * it does it a little differently, since one walks addresses not just in the
- * X direction but also Y. So, along with alternating channels when bit
- * 6 of the address flips, it also alternates when other bits flip -- Bits 9
- * (every 512 bytes, an X tile scanline) and 10 (every two X tile scanlines)
- * are common to both the 915 and 965-class hardware.
- *
- * The CPU also sometimes XORs in higher bits as well, to improve
- * bandwidth doing strided access like we do so frequently in graphics. This
- * is called "Channel XOR Randomization" in the MCH documentation. The result
- * is that the CPU is XORing in either bit 11 or bit 17 to bit 6 of its address
- * decode.
+/**
+ * DOC: buffer object tiling
*
- * All of this bit 6 XORing has an effect on our memory management,
- * as we need to make sure that the 3d driver can correctly address object
- * contents.
+ * i915_gem_set_tiling() and i915_gem_get_tiling() is the userspace interface to
+ * declare fence register requirements.
*
- * If we don't have interleaved memory, all tiling is safe and no swizzling is
- * required.
+ * In principle GEM doesn't care at all about the internal data layout of an
+ * object, and hence it also doesn't care about tiling or swizzling. There's two
+ * exceptions:
*
- * When bit 17 is XORed in, we simply refuse to tile at all. Bit
- * 17 is not just a page offset, so as we page an objet out and back in,
- * individual pages in it will have different bit 17 addresses, resulting in
- * each 64 bytes being swapped with its neighbor!
+ * - For X and Y tiling the hardware provides detilers for CPU access, so called
+ * fences. Since there's only a limited amount of them the kernel must manage
+ * these, and therefore userspace must tell the kernel the object tiling if it
+ * wants to use fences for detiling.
+ * - On gen3 and gen4 platforms have a swizzling pattern for tiled objects which
+ * depends upon the physical page frame number. When swapping such objects the
+ * page frame number might change and the kernel must be able to fix this up
+ * and hence now the tiling. Note that on a subset of platforms with
+ * asymmetric memory channel population the swizzling pattern changes in an
+ * unknown way, and for those the kernel simply forbids swapping completely.
*
- * Otherwise, if interleaved, we have to tell the 3d driver what the address
- * swizzling it needs to do is, since it's writing with the CPU to the pages
- * (bit 6 and potentially bit 11 XORed in), and the GPU is reading from the
- * pages (bit 6, 9, and 10 XORed in), resulting in a cumulative bit swizzling
- * required by the CPU of XORing in bit 6, 9, 10, and potentially 11, in order
- * to match what the GPU expects.
- */
-
-/**
- * Detects bit 6 swizzling of address lookup between IGD access and CPU
- * access through main memory.
+ * Since neither of this applies for new tiling layouts on modern platforms like
+ * W, Ys and Yf tiling GEM only allows object tiling to be set to X or Y tiled.
+ * Anything else can be handled in userspace entirely without the kernel's
+ * invovlement.
*/
-void
-i915_gem_detect_bit_6_swizzle(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- uint32_t swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN;
- uint32_t swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN;
-
- if (INTEL_INFO(dev)->gen >= 8 || IS_VALLEYVIEW(dev)) {
- /*
- * On BDW+, swizzling is not used. We leave the CPU memory
- * controller in charge of optimizing memory accesses without
- * the extra address manipulation GPU side.
- *
- * VLV and CHV don't have GPU swizzling.
- */
- swizzle_x = I915_BIT_6_SWIZZLE_NONE;
- swizzle_y = I915_BIT_6_SWIZZLE_NONE;
- } else if (INTEL_INFO(dev)->gen >= 6) {
- if (dev_priv->preserve_bios_swizzle) {
- if (I915_READ(DISP_ARB_CTL) &
- DISP_TILE_SURFACE_SWIZZLING) {
- swizzle_x = I915_BIT_6_SWIZZLE_9_10;
- swizzle_y = I915_BIT_6_SWIZZLE_9;
- } else {
- swizzle_x = I915_BIT_6_SWIZZLE_NONE;
- swizzle_y = I915_BIT_6_SWIZZLE_NONE;
- }
- } else {
- uint32_t dimm_c0, dimm_c1;
- dimm_c0 = I915_READ(MAD_DIMM_C0);
- dimm_c1 = I915_READ(MAD_DIMM_C1);
- dimm_c0 &= MAD_DIMM_A_SIZE_MASK | MAD_DIMM_B_SIZE_MASK;
- dimm_c1 &= MAD_DIMM_A_SIZE_MASK | MAD_DIMM_B_SIZE_MASK;
- /* Enable swizzling when the channels are populated
- * with identically sized dimms. We don't need to check
- * the 3rd channel because no cpu with gpu attached
- * ships in that configuration. Also, swizzling only
- * makes sense for 2 channels anyway. */
- if (dimm_c0 == dimm_c1) {
- swizzle_x = I915_BIT_6_SWIZZLE_9_10;
- swizzle_y = I915_BIT_6_SWIZZLE_9;
- } else {
- swizzle_x = I915_BIT_6_SWIZZLE_NONE;
- swizzle_y = I915_BIT_6_SWIZZLE_NONE;
- }
- }
- } else if (IS_GEN5(dev)) {
- /* On Ironlake whatever DRAM config, GPU always do
- * same swizzling setup.
- */
- swizzle_x = I915_BIT_6_SWIZZLE_9_10;
- swizzle_y = I915_BIT_6_SWIZZLE_9;
- } else if (IS_GEN2(dev)) {
- /* As far as we know, the 865 doesn't have these bit 6
- * swizzling issues.
- */
- swizzle_x = I915_BIT_6_SWIZZLE_NONE;
- swizzle_y = I915_BIT_6_SWIZZLE_NONE;
- } else if (IS_MOBILE(dev) || (IS_GEN3(dev) && !IS_G33(dev))) {
- uint32_t dcc;
-
- /* On 9xx chipsets, channel interleave by the CPU is
- * determined by DCC. For single-channel, neither the CPU
- * nor the GPU do swizzling. For dual channel interleaved,
- * the GPU's interleave is bit 9 and 10 for X tiled, and bit
- * 9 for Y tiled. The CPU's interleave is independent, and
- * can be based on either bit 11 (haven't seen this yet) or
- * bit 17 (common).
- */
- dcc = I915_READ(DCC);
- switch (dcc & DCC_ADDRESSING_MODE_MASK) {
- case DCC_ADDRESSING_MODE_SINGLE_CHANNEL:
- case DCC_ADDRESSING_MODE_DUAL_CHANNEL_ASYMMETRIC:
- swizzle_x = I915_BIT_6_SWIZZLE_NONE;
- swizzle_y = I915_BIT_6_SWIZZLE_NONE;
- break;
- case DCC_ADDRESSING_MODE_DUAL_CHANNEL_INTERLEAVED:
- if (dcc & DCC_CHANNEL_XOR_DISABLE) {
- /* This is the base swizzling by the GPU for
- * tiled buffers.
- */
- swizzle_x = I915_BIT_6_SWIZZLE_9_10;
- swizzle_y = I915_BIT_6_SWIZZLE_9;
- } else if ((dcc & DCC_CHANNEL_XOR_BIT_17) == 0) {
- /* Bit 11 swizzling by the CPU in addition. */
- swizzle_x = I915_BIT_6_SWIZZLE_9_10_11;
- swizzle_y = I915_BIT_6_SWIZZLE_9_11;
- } else {
- /* Bit 17 swizzling by the CPU in addition. */
- swizzle_x = I915_BIT_6_SWIZZLE_9_10_17;
- swizzle_y = I915_BIT_6_SWIZZLE_9_17;
- }
- break;
- }
-
- /* check for L-shaped memory aka modified enhanced addressing */
- if (IS_GEN4(dev)) {
- uint32_t ddc2 = I915_READ(DCC2);
-
- if (!(ddc2 & DCC2_MODIFIED_ENHANCED_DISABLE))
- dev_priv->quirks |= QUIRK_PIN_SWIZZLED_PAGES;
- }
-
- if (dcc == 0xffffffff) {
- DRM_ERROR("Couldn't read from MCHBAR. "
- "Disabling tiling.\n");
- swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN;
- swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN;
- }
- } else {
- /* The 965, G33, and newer, have a very flexible memory
- * configuration. It will enable dual-channel mode
- * (interleaving) on as much memory as it can, and the GPU
- * will additionally sometimes enable different bit 6
- * swizzling for tiled objects from the CPU.
- *
- * Here's what I found on the G965:
- * slot fill memory size swizzling
- * 0A 0B 1A 1B 1-ch 2-ch
- * 512 0 0 0 512 0 O
- * 512 0 512 0 16 1008 X
- * 512 0 0 512 16 1008 X
- * 0 512 0 512 16 1008 X
- * 1024 1024 1024 0 2048 1024 O
- *
- * We could probably detect this based on either the DRB
- * matching, which was the case for the swizzling required in
- * the table above, or from the 1-ch value being less than
- * the minimum size of a rank.
- */
- if (I915_READ16(C0DRB3) != I915_READ16(C1DRB3)) {
- swizzle_x = I915_BIT_6_SWIZZLE_NONE;
- swizzle_y = I915_BIT_6_SWIZZLE_NONE;
- } else {
- swizzle_x = I915_BIT_6_SWIZZLE_9_10;
- swizzle_y = I915_BIT_6_SWIZZLE_9;
- }
- }
-
- dev_priv->mm.bit_6_swizzle_x = swizzle_x;
- dev_priv->mm.bit_6_swizzle_y = swizzle_y;
-}
/* Check pitch constriants for all chips & tiling formats */
static bool
@@ -313,8 +144,18 @@ i915_gem_object_fence_ok(struct drm_i915_gem_object *obj, int tiling_mode)
}
/**
+ * i915_gem_set_tiling - IOCTL handler to set tiling mode
+ * @dev: DRM device
+ * @data: data pointer for the ioctl
+ * @file: DRM file for the ioctl call
+ *
* Sets the tiling mode of an object, returning the required swizzling of
* bit 6 of addresses in the object.
+ *
+ * Called by the user via ioctl.
+ *
+ * Returns:
+ * Zero on success, negative errno on failure.
*/
int
i915_gem_set_tiling(struct drm_device *dev, void *data,
@@ -432,7 +273,17 @@ err:
}
/**
+ * i915_gem_get_tiling - IOCTL handler to get tiling mode
+ * @dev: DRM device
+ * @data: data pointer for the ioctl
+ * @file: DRM file for the ioctl call
+ *
* Returns the current tiling mode and required bit 6 swizzling for the object.
+ *
+ * Called by the user via ioctl.
+ *
+ * Returns:
+ * Zero on success, negative errno on failure.
*/
int
i915_gem_get_tiling(struct drm_device *dev, void *data,
@@ -464,7 +315,10 @@ i915_gem_get_tiling(struct drm_device *dev, void *data,
}
/* Hide bit 17 from the user -- see comment in i915_gem_set_tiling */
- args->phys_swizzle_mode = args->swizzle_mode;
+ if (dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES)
+ args->phys_swizzle_mode = I915_BIT_6_SWIZZLE_UNKNOWN;
+ else
+ args->phys_swizzle_mode = args->swizzle_mode;
if (args->swizzle_mode == I915_BIT_6_SWIZZLE_9_17)
args->swizzle_mode = I915_BIT_6_SWIZZLE_9;
if (args->swizzle_mode == I915_BIT_6_SWIZZLE_9_10_17)
@@ -475,75 +329,3 @@ i915_gem_get_tiling(struct drm_device *dev, void *data,
return 0;
}
-
-/**
- * Swap every 64 bytes of this page around, to account for it having a new
- * bit 17 of its physical address and therefore being interpreted differently
- * by the GPU.
- */
-static void
-i915_gem_swizzle_page(struct page *page)
-{
- char temp[64];
- char *vaddr;
- int i;
-
- vaddr = kmap(page);
-
- for (i = 0; i < PAGE_SIZE; i += 128) {
- memcpy(temp, &vaddr[i], 64);
- memcpy(&vaddr[i], &vaddr[i + 64], 64);
- memcpy(&vaddr[i + 64], temp, 64);
- }
-
- kunmap(page);
-}
-
-void
-i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj)
-{
- struct sg_page_iter sg_iter;
- int i;
-
- if (obj->bit_17 == NULL)
- return;
-
- i = 0;
- for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0) {
- struct page *page = sg_page_iter_page(&sg_iter);
- char new_bit_17 = page_to_phys(page) >> 17;
- if ((new_bit_17 & 0x1) !=
- (test_bit(i, obj->bit_17) != 0)) {
- i915_gem_swizzle_page(page);
- set_page_dirty(page);
- }
- i++;
- }
-}
-
-void
-i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj)
-{
- struct sg_page_iter sg_iter;
- int page_count = obj->base.size >> PAGE_SHIFT;
- int i;
-
- if (obj->bit_17 == NULL) {
- obj->bit_17 = kcalloc(BITS_TO_LONGS(page_count),
- sizeof(long), GFP_KERNEL);
- if (obj->bit_17 == NULL) {
- DRM_ERROR("Failed to allocate memory for bit 17 "
- "record\n");
- return;
- }
- }
-
- i = 0;
- for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0) {
- if (page_to_phys(sg_page_iter_page(&sg_iter)) & (1 << 17))
- __set_bit(i, obj->bit_17);
- else
- __clear_bit(i, obj->bit_17);
- i++;
- }
-}
diff --git a/drivers/gpu/drm/i915/i915_gem_userptr.c b/drivers/gpu/drm/i915/i915_gem_userptr.c
index 1f4e5a32a16e..8fd431bcdfd3 100644
--- a/drivers/gpu/drm/i915/i915_gem_userptr.c
+++ b/drivers/gpu/drm/i915/i915_gem_userptr.c
@@ -545,6 +545,26 @@ err:
return ret;
}
+static int
+__i915_gem_userptr_set_pages(struct drm_i915_gem_object *obj,
+ struct page **pvec, int num_pages)
+{
+ int ret;
+
+ ret = st_set_pages(&obj->pages, pvec, num_pages);
+ if (ret)
+ return ret;
+
+ ret = i915_gem_gtt_prepare_object(obj);
+ if (ret) {
+ sg_free_table(obj->pages);
+ kfree(obj->pages);
+ obj->pages = NULL;
+ }
+
+ return ret;
+}
+
static void
__i915_gem_userptr_get_pages_worker(struct work_struct *_work)
{
@@ -584,9 +604,12 @@ __i915_gem_userptr_get_pages_worker(struct work_struct *_work)
if (obj->userptr.work != &work->work) {
ret = 0;
} else if (pinned == num_pages) {
- ret = st_set_pages(&obj->pages, pvec, num_pages);
+ ret = __i915_gem_userptr_set_pages(obj, pvec, num_pages);
if (ret == 0) {
list_add_tail(&obj->global_list, &to_i915(dev)->mm.unbound_list);
+ obj->get_page.sg = obj->pages->sgl;
+ obj->get_page.last = 0;
+
pinned = 0;
}
}
@@ -693,7 +716,7 @@ i915_gem_userptr_get_pages(struct drm_i915_gem_object *obj)
}
}
} else {
- ret = st_set_pages(&obj->pages, pvec, num_pages);
+ ret = __i915_gem_userptr_set_pages(obj, pvec, num_pages);
if (ret == 0) {
obj->userptr.work = NULL;
pinned = 0;
@@ -715,6 +738,8 @@ i915_gem_userptr_put_pages(struct drm_i915_gem_object *obj)
if (obj->madv != I915_MADV_WILLNEED)
obj->dirty = 0;
+ i915_gem_gtt_finish_object(obj);
+
for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0) {
struct page *page = sg_page_iter_page(&sg_iter);
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
index 6f4256918f76..41d0739e6fdf 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.c
+++ b/drivers/gpu/drm/i915/i915_gpu_error.c
@@ -369,6 +369,7 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
err_printf(m, "Reset count: %u\n", error->reset_count);
err_printf(m, "Suspend count: %u\n", error->suspend_count);
err_printf(m, "PCI ID: 0x%04x\n", dev->pdev->device);
+ err_printf(m, "IOMMU enabled?: %d\n", error->iommu);
err_printf(m, "EIR: 0x%08x\n", error->eir);
err_printf(m, "IER: 0x%08x\n", error->ier);
if (INTEL_INFO(dev)->gen >= 8) {
@@ -1266,6 +1267,10 @@ static void i915_error_capture_msg(struct drm_device *dev,
static void i915_capture_gen_state(struct drm_i915_private *dev_priv,
struct drm_i915_error_state *error)
{
+ error->iommu = -1;
+#ifdef CONFIG_INTEL_IOMMU
+ error->iommu = intel_iommu_gfx_mapped;
+#endif
error->reset_count = i915_reset_count(&dev_priv->gpu_error);
error->suspend_count = dev_priv->suspend_count;
}
diff --git a/drivers/gpu/drm/i915/i915_guc_reg.h b/drivers/gpu/drm/i915/i915_guc_reg.h
new file mode 100644
index 000000000000..ccdc6c8ac20b
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_guc_reg.h
@@ -0,0 +1,102 @@
+/*
+ * Copyright © 2014 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+#ifndef _I915_GUC_REG_H_
+#define _I915_GUC_REG_H_
+
+/* Definitions of GuC H/W registers, bits, etc */
+
+#define GUC_STATUS 0xc000
+#define GS_BOOTROM_SHIFT 1
+#define GS_BOOTROM_MASK (0x7F << GS_BOOTROM_SHIFT)
+#define GS_BOOTROM_RSA_FAILED (0x50 << GS_BOOTROM_SHIFT)
+#define GS_UKERNEL_SHIFT 8
+#define GS_UKERNEL_MASK (0xFF << GS_UKERNEL_SHIFT)
+#define GS_UKERNEL_LAPIC_DONE (0x30 << GS_UKERNEL_SHIFT)
+#define GS_UKERNEL_DPC_ERROR (0x60 << GS_UKERNEL_SHIFT)
+#define GS_UKERNEL_READY (0xF0 << GS_UKERNEL_SHIFT)
+#define GS_MIA_SHIFT 16
+#define GS_MIA_MASK (0x07 << GS_MIA_SHIFT)
+
+#define GUC_WOPCM_SIZE 0xc050
+#define GUC_WOPCM_SIZE_VALUE (0x80 << 12) /* 512KB */
+#define GUC_WOPCM_OFFSET 0x80000 /* 512KB */
+
+#define SOFT_SCRATCH(n) (0xc180 + ((n) * 4))
+
+#define UOS_RSA_SCRATCH_0 0xc200
+#define DMA_ADDR_0_LOW 0xc300
+#define DMA_ADDR_0_HIGH 0xc304
+#define DMA_ADDR_1_LOW 0xc308
+#define DMA_ADDR_1_HIGH 0xc30c
+#define DMA_ADDRESS_SPACE_WOPCM (7 << 16)
+#define DMA_ADDRESS_SPACE_GTT (8 << 16)
+#define DMA_COPY_SIZE 0xc310
+#define DMA_CTRL 0xc314
+#define UOS_MOVE (1<<4)
+#define START_DMA (1<<0)
+#define DMA_GUC_WOPCM_OFFSET 0xc340
+
+#define GEN8_GT_PM_CONFIG 0x138140
+#define GEN9_GT_PM_CONFIG 0x13816c
+#define GEN8_GT_DOORBELL_ENABLE (1<<0)
+
+#define GEN8_GTCR 0x4274
+#define GEN8_GTCR_INVALIDATE (1<<0)
+
+#define GUC_ARAT_C6DIS 0xA178
+
+#define GUC_SHIM_CONTROL 0xc064
+#define GUC_DISABLE_SRAM_INIT_TO_ZEROES (1<<0)
+#define GUC_ENABLE_READ_CACHE_LOGIC (1<<1)
+#define GUC_ENABLE_MIA_CACHING (1<<2)
+#define GUC_GEN10_MSGCH_ENABLE (1<<4)
+#define GUC_ENABLE_READ_CACHE_FOR_SRAM_DATA (1<<9)
+#define GUC_ENABLE_READ_CACHE_FOR_WOPCM_DATA (1<<10)
+#define GUC_ENABLE_MIA_CLOCK_GATING (1<<15)
+#define GUC_GEN10_SHIM_WC_ENABLE (1<<21)
+
+#define GUC_SHIM_CONTROL_VALUE (GUC_DISABLE_SRAM_INIT_TO_ZEROES | \
+ GUC_ENABLE_READ_CACHE_LOGIC | \
+ GUC_ENABLE_MIA_CACHING | \
+ GUC_ENABLE_READ_CACHE_FOR_SRAM_DATA | \
+ GUC_ENABLE_READ_CACHE_FOR_WOPCM_DATA)
+
+#define HOST2GUC_INTERRUPT 0xc4c8
+#define HOST2GUC_TRIGGER (1<<0)
+
+#define DRBMISC1 0x1984
+#define DOORBELL_ENABLE (1<<0)
+
+#define GEN8_DRBREGL(x) (0x1000 + (x) * 8)
+#define GEN8_DRB_VALID (1<<0)
+#define GEN8_DRBREGU(x) (GEN8_DRBREGL(x) + 4)
+
+#define DE_GUCRMR 0x44054
+
+#define GUC_BCS_RCS_IER 0xC550
+#define GUC_VCS2_VCS1_IER 0xC554
+#define GUC_WD_VECS_IER 0xC558
+#define GUC_PM_P24C_IER 0xC55C
+
+#endif
diff --git a/drivers/gpu/drm/i915/i915_ioc32.c b/drivers/gpu/drm/i915/i915_ioc32.c
index 176de6322e4d..97f3a5640289 100644
--- a/drivers/gpu/drm/i915/i915_ioc32.c
+++ b/drivers/gpu/drm/i915/i915_ioc32.c
@@ -35,107 +35,20 @@
#include <drm/i915_drm.h>
#include "i915_drv.h"
-typedef struct _drm_i915_batchbuffer32 {
- int start; /* agp offset */
- int used; /* nr bytes in use */
- int DR1; /* hw flags for GFX_OP_DRAWRECT_INFO */
- int DR4; /* window origin for GFX_OP_DRAWRECT_INFO */
- int num_cliprects; /* mulitpass with multiple cliprects? */
- u32 cliprects; /* pointer to userspace cliprects */
-} drm_i915_batchbuffer32_t;
-
-static int compat_i915_batchbuffer(struct file *file, unsigned int cmd,
- unsigned long arg)
-{
- drm_i915_batchbuffer32_t batchbuffer32;
- drm_i915_batchbuffer_t __user *batchbuffer;
-
- if (copy_from_user
- (&batchbuffer32, (void __user *)arg, sizeof(batchbuffer32)))
- return -EFAULT;
-
- batchbuffer = compat_alloc_user_space(sizeof(*batchbuffer));
- if (!access_ok(VERIFY_WRITE, batchbuffer, sizeof(*batchbuffer))
- || __put_user(batchbuffer32.start, &batchbuffer->start)
- || __put_user(batchbuffer32.used, &batchbuffer->used)
- || __put_user(batchbuffer32.DR1, &batchbuffer->DR1)
- || __put_user(batchbuffer32.DR4, &batchbuffer->DR4)
- || __put_user(batchbuffer32.num_cliprects,
- &batchbuffer->num_cliprects)
- || __put_user((int __user *)(unsigned long)batchbuffer32.cliprects,
- &batchbuffer->cliprects))
- return -EFAULT;
-
- return drm_ioctl(file, DRM_IOCTL_I915_BATCHBUFFER,
- (unsigned long)batchbuffer);
-}
-
-typedef struct _drm_i915_cmdbuffer32 {
- u32 buf; /* pointer to userspace command buffer */
- int sz; /* nr bytes in buf */
- int DR1; /* hw flags for GFX_OP_DRAWRECT_INFO */
- int DR4; /* window origin for GFX_OP_DRAWRECT_INFO */
- int num_cliprects; /* mulitpass with multiple cliprects? */
- u32 cliprects; /* pointer to userspace cliprects */
-} drm_i915_cmdbuffer32_t;
-
-static int compat_i915_cmdbuffer(struct file *file, unsigned int cmd,
- unsigned long arg)
-{
- drm_i915_cmdbuffer32_t cmdbuffer32;
- drm_i915_cmdbuffer_t __user *cmdbuffer;
-
- if (copy_from_user
- (&cmdbuffer32, (void __user *)arg, sizeof(cmdbuffer32)))
- return -EFAULT;
-
- cmdbuffer = compat_alloc_user_space(sizeof(*cmdbuffer));
- if (!access_ok(VERIFY_WRITE, cmdbuffer, sizeof(*cmdbuffer))
- || __put_user((int __user *)(unsigned long)cmdbuffer32.buf,
- &cmdbuffer->buf)
- || __put_user(cmdbuffer32.sz, &cmdbuffer->sz)
- || __put_user(cmdbuffer32.DR1, &cmdbuffer->DR1)
- || __put_user(cmdbuffer32.DR4, &cmdbuffer->DR4)
- || __put_user(cmdbuffer32.num_cliprects, &cmdbuffer->num_cliprects)
- || __put_user((int __user *)(unsigned long)cmdbuffer32.cliprects,
- &cmdbuffer->cliprects))
- return -EFAULT;
-
- return drm_ioctl(file, DRM_IOCTL_I915_CMDBUFFER,
- (unsigned long)cmdbuffer);
-}
-
-typedef struct drm_i915_irq_emit32 {
- u32 irq_seq;
-} drm_i915_irq_emit32_t;
-
-static int compat_i915_irq_emit(struct file *file, unsigned int cmd,
- unsigned long arg)
-{
- drm_i915_irq_emit32_t req32;
- drm_i915_irq_emit_t __user *request;
-
- if (copy_from_user(&req32, (void __user *)arg, sizeof(req32)))
- return -EFAULT;
-
- request = compat_alloc_user_space(sizeof(*request));
- if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
- || __put_user((int __user *)(unsigned long)req32.irq_seq,
- &request->irq_seq))
- return -EFAULT;
-
- return drm_ioctl(file, DRM_IOCTL_I915_IRQ_EMIT,
- (unsigned long)request);
-}
-typedef struct drm_i915_getparam32 {
- int param;
+struct drm_i915_getparam32 {
+ s32 param;
+ /*
+ * We screwed up the generic ioctl struct here and used a variable-sized
+ * pointer. Use u32 in the compat struct to match the 32bit pointer
+ * userspace expects.
+ */
u32 value;
-} drm_i915_getparam32_t;
+};
static int compat_i915_getparam(struct file *file, unsigned int cmd,
unsigned long arg)
{
- drm_i915_getparam32_t req32;
+ struct drm_i915_getparam32 req32;
drm_i915_getparam_t __user *request;
if (copy_from_user(&req32, (void __user *)arg, sizeof(req32)))
@@ -152,41 +65,8 @@ static int compat_i915_getparam(struct file *file, unsigned int cmd,
(unsigned long)request);
}
-typedef struct drm_i915_mem_alloc32 {
- int region;
- int alignment;
- int size;
- u32 region_offset; /* offset from start of fb or agp */
-} drm_i915_mem_alloc32_t;
-
-static int compat_i915_alloc(struct file *file, unsigned int cmd,
- unsigned long arg)
-{
- drm_i915_mem_alloc32_t req32;
- drm_i915_mem_alloc_t __user *request;
-
- if (copy_from_user(&req32, (void __user *)arg, sizeof(req32)))
- return -EFAULT;
-
- request = compat_alloc_user_space(sizeof(*request));
- if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
- || __put_user(req32.region, &request->region)
- || __put_user(req32.alignment, &request->alignment)
- || __put_user(req32.size, &request->size)
- || __put_user((void __user *)(unsigned long)req32.region_offset,
- &request->region_offset))
- return -EFAULT;
-
- return drm_ioctl(file, DRM_IOCTL_I915_ALLOC,
- (unsigned long)request);
-}
-
static drm_ioctl_compat_t *i915_compat_ioctls[] = {
- [DRM_I915_BATCHBUFFER] = compat_i915_batchbuffer,
- [DRM_I915_CMDBUFFER] = compat_i915_cmdbuffer,
[DRM_I915_GETPARAM] = compat_i915_getparam,
- [DRM_I915_IRQ_EMIT] = compat_i915_irq_emit,
- [DRM_I915_ALLOC] = compat_i915_alloc
};
/**
@@ -204,7 +84,7 @@ long i915_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
drm_ioctl_compat_t *fn = NULL;
int ret;
- if (nr < DRM_COMMAND_BASE)
+ if (nr < DRM_COMMAND_BASE || nr >= DRM_COMMAND_END)
return drm_compat_ioctl(filp, cmd, arg);
if (nr < DRM_COMMAND_BASE + ARRAY_SIZE(i915_compat_ioctls))
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index e6bb72dca3ff..1118c39281f9 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -564,8 +564,7 @@ static u32 i915_get_vblank_counter(struct drm_device *dev, int pipe)
u32 high1, high2, low, pixel, vbl_start, hsync_start, htotal;
struct intel_crtc *intel_crtc =
to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
- const struct drm_display_mode *mode =
- &intel_crtc->config->base.adjusted_mode;
+ const struct drm_display_mode *mode = &intel_crtc->base.hwmode;
htotal = mode->crtc_htotal;
hsync_start = mode->crtc_hsync_start;
@@ -620,7 +619,7 @@ static int __intel_get_crtc_scanline(struct intel_crtc *crtc)
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- const struct drm_display_mode *mode = &crtc->config->base.adjusted_mode;
+ const struct drm_display_mode *mode = &crtc->base.hwmode;
enum pipe pipe = crtc->pipe;
int position, vtotal;
@@ -647,14 +646,14 @@ static int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe,
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- const struct drm_display_mode *mode = &intel_crtc->config->base.adjusted_mode;
+ const struct drm_display_mode *mode = &intel_crtc->base.hwmode;
int position;
int vbl_start, vbl_end, hsync_start, htotal, vtotal;
bool in_vbl = true;
int ret = 0;
unsigned long irqflags;
- if (!intel_crtc->active) {
+ if (WARN_ON(!mode->crtc_clock)) {
DRM_DEBUG_DRIVER("trying to get scanoutpos for disabled "
"pipe %c\n", pipe_name(pipe));
return 0;
@@ -796,7 +795,7 @@ static int i915_get_vblank_timestamp(struct drm_device *dev, int pipe,
return -EINVAL;
}
- if (!crtc->state->enable) {
+ if (!crtc->hwmode.crtc_clock) {
DRM_DEBUG_KMS("crtc %d is disabled\n", pipe);
return -EBUSY;
}
@@ -805,151 +804,7 @@ static int i915_get_vblank_timestamp(struct drm_device *dev, int pipe,
return drm_calc_vbltimestamp_from_scanoutpos(dev, pipe, max_error,
vblank_time, flags,
crtc,
- &to_intel_crtc(crtc)->config->base.adjusted_mode);
-}
-
-static bool intel_hpd_irq_event(struct drm_device *dev,
- struct drm_connector *connector)
-{
- enum drm_connector_status old_status;
-
- WARN_ON(!mutex_is_locked(&dev->mode_config.mutex));
- old_status = connector->status;
-
- connector->status = connector->funcs->detect(connector, false);
- if (old_status == connector->status)
- return false;
-
- DRM_DEBUG_KMS("[CONNECTOR:%d:%s] status updated from %s to %s\n",
- connector->base.id,
- connector->name,
- drm_get_connector_status_name(old_status),
- drm_get_connector_status_name(connector->status));
-
- return true;
-}
-
-static void i915_digport_work_func(struct work_struct *work)
-{
- struct drm_i915_private *dev_priv =
- container_of(work, struct drm_i915_private, dig_port_work);
- u32 long_port_mask, short_port_mask;
- struct intel_digital_port *intel_dig_port;
- int i;
- u32 old_bits = 0;
-
- spin_lock_irq(&dev_priv->irq_lock);
- long_port_mask = dev_priv->long_hpd_port_mask;
- dev_priv->long_hpd_port_mask = 0;
- short_port_mask = dev_priv->short_hpd_port_mask;
- dev_priv->short_hpd_port_mask = 0;
- spin_unlock_irq(&dev_priv->irq_lock);
-
- for (i = 0; i < I915_MAX_PORTS; i++) {
- bool valid = false;
- bool long_hpd = false;
- intel_dig_port = dev_priv->hpd_irq_port[i];
- if (!intel_dig_port || !intel_dig_port->hpd_pulse)
- continue;
-
- if (long_port_mask & (1 << i)) {
- valid = true;
- long_hpd = true;
- } else if (short_port_mask & (1 << i))
- valid = true;
-
- if (valid) {
- enum irqreturn ret;
-
- ret = intel_dig_port->hpd_pulse(intel_dig_port, long_hpd);
- if (ret == IRQ_NONE) {
- /* fall back to old school hpd */
- old_bits |= (1 << intel_dig_port->base.hpd_pin);
- }
- }
- }
-
- if (old_bits) {
- spin_lock_irq(&dev_priv->irq_lock);
- dev_priv->hpd_event_bits |= old_bits;
- spin_unlock_irq(&dev_priv->irq_lock);
- schedule_work(&dev_priv->hotplug_work);
- }
-}
-
-/*
- * Handle hotplug events outside the interrupt handler proper.
- */
-#define I915_REENABLE_HOTPLUG_DELAY (2*60*1000)
-
-static void i915_hotplug_work_func(struct work_struct *work)
-{
- struct drm_i915_private *dev_priv =
- container_of(work, struct drm_i915_private, hotplug_work);
- struct drm_device *dev = dev_priv->dev;
- struct drm_mode_config *mode_config = &dev->mode_config;
- struct intel_connector *intel_connector;
- struct intel_encoder *intel_encoder;
- struct drm_connector *connector;
- bool hpd_disabled = false;
- bool changed = false;
- u32 hpd_event_bits;
-
- mutex_lock(&mode_config->mutex);
- DRM_DEBUG_KMS("running encoder hotplug functions\n");
-
- spin_lock_irq(&dev_priv->irq_lock);
-
- hpd_event_bits = dev_priv->hpd_event_bits;
- dev_priv->hpd_event_bits = 0;
- list_for_each_entry(connector, &mode_config->connector_list, head) {
- intel_connector = to_intel_connector(connector);
- if (!intel_connector->encoder)
- continue;
- intel_encoder = intel_connector->encoder;
- if (intel_encoder->hpd_pin > HPD_NONE &&
- dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_MARK_DISABLED &&
- connector->polled == DRM_CONNECTOR_POLL_HPD) {
- DRM_INFO("HPD interrupt storm detected on connector %s: "
- "switching from hotplug detection to polling\n",
- connector->name);
- dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark = HPD_DISABLED;
- connector->polled = DRM_CONNECTOR_POLL_CONNECT
- | DRM_CONNECTOR_POLL_DISCONNECT;
- hpd_disabled = true;
- }
- if (hpd_event_bits & (1 << intel_encoder->hpd_pin)) {
- DRM_DEBUG_KMS("Connector %s (pin %i) received hotplug event.\n",
- connector->name, intel_encoder->hpd_pin);
- }
- }
- /* if there were no outputs to poll, poll was disabled,
- * therefore make sure it's enabled when disabling HPD on
- * some connectors */
- if (hpd_disabled) {
- drm_kms_helper_poll_enable(dev);
- mod_delayed_work(system_wq, &dev_priv->hotplug_reenable_work,
- msecs_to_jiffies(I915_REENABLE_HOTPLUG_DELAY));
- }
-
- spin_unlock_irq(&dev_priv->irq_lock);
-
- list_for_each_entry(connector, &mode_config->connector_list, head) {
- intel_connector = to_intel_connector(connector);
- if (!intel_connector->encoder)
- continue;
- intel_encoder = intel_connector->encoder;
- if (hpd_event_bits & (1 << intel_encoder->hpd_pin)) {
- if (intel_encoder->hot_plug)
- intel_encoder->hot_plug(intel_encoder);
- if (intel_hpd_irq_event(dev, connector))
- changed = true;
- }
- }
- mutex_unlock(&mode_config->mutex);
-
- if (changed)
- drm_kms_helper_hotplug_event(dev);
+ &crtc->hwmode);
}
static void ironlake_rps_change_irq_handler(struct drm_device *dev)
@@ -1372,165 +1227,78 @@ static irqreturn_t gen8_gt_irq_handler(struct drm_i915_private *dev_priv,
return ret;
}
-#define HPD_STORM_DETECT_PERIOD 1000
-#define HPD_STORM_THRESHOLD 5
-
-static int pch_port_to_hotplug_shift(enum port port)
+static bool bxt_port_hotplug_long_detect(enum port port, u32 val)
{
switch (port) {
case PORT_A:
- case PORT_E:
- default:
- return -1;
+ return val & BXT_PORTA_HOTPLUG_LONG_DETECT;
case PORT_B:
- return 0;
+ return val & PORTB_HOTPLUG_LONG_DETECT;
case PORT_C:
- return 8;
+ return val & PORTC_HOTPLUG_LONG_DETECT;
case PORT_D:
- return 16;
+ return val & PORTD_HOTPLUG_LONG_DETECT;
+ default:
+ return false;
}
}
-static int i915_port_to_hotplug_shift(enum port port)
+static bool pch_port_hotplug_long_detect(enum port port, u32 val)
{
switch (port) {
- case PORT_A:
- case PORT_E:
- default:
- return -1;
case PORT_B:
- return 17;
+ return val & PORTB_HOTPLUG_LONG_DETECT;
case PORT_C:
- return 19;
+ return val & PORTC_HOTPLUG_LONG_DETECT;
case PORT_D:
- return 21;
+ return val & PORTD_HOTPLUG_LONG_DETECT;
+ default:
+ return false;
}
}
-static enum port get_port_from_pin(enum hpd_pin pin)
+static bool i9xx_port_hotplug_long_detect(enum port port, u32 val)
{
- switch (pin) {
- case HPD_PORT_B:
- return PORT_B;
- case HPD_PORT_C:
- return PORT_C;
- case HPD_PORT_D:
- return PORT_D;
+ switch (port) {
+ case PORT_B:
+ return val & PORTB_HOTPLUG_INT_LONG_PULSE;
+ case PORT_C:
+ return val & PORTC_HOTPLUG_INT_LONG_PULSE;
+ case PORT_D:
+ return val & PORTD_HOTPLUG_INT_LONG_PULSE;
default:
- return PORT_A; /* no hpd */
+ return false;
}
}
-static void intel_hpd_irq_handler(struct drm_device *dev,
- u32 hotplug_trigger,
- u32 dig_hotplug_reg,
- const u32 hpd[HPD_NUM_PINS])
+/* Get a bit mask of pins that have triggered, and which ones may be long. */
+static void intel_get_hpd_pins(u32 *pin_mask, u32 *long_mask,
+ u32 hotplug_trigger, u32 dig_hotplug_reg,
+ const u32 hpd[HPD_NUM_PINS],
+ bool long_pulse_detect(enum port port, u32 val))
{
- struct drm_i915_private *dev_priv = dev->dev_private;
- int i;
enum port port;
- bool storm_detected = false;
- bool queue_dig = false, queue_hp = false;
- u32 dig_shift;
- u32 dig_port_mask = 0;
-
- if (!hotplug_trigger)
- return;
+ int i;
- DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x, dig 0x%08x\n",
- hotplug_trigger, dig_hotplug_reg);
+ *pin_mask = 0;
+ *long_mask = 0;
- spin_lock(&dev_priv->irq_lock);
- for (i = 1; i < HPD_NUM_PINS; i++) {
- if (!(hpd[i] & hotplug_trigger))
+ for_each_hpd_pin(i) {
+ if ((hpd[i] & hotplug_trigger) == 0)
continue;
- port = get_port_from_pin(i);
- if (port && dev_priv->hpd_irq_port[port]) {
- bool long_hpd;
-
- if (!HAS_GMCH_DISPLAY(dev_priv)) {
- dig_shift = pch_port_to_hotplug_shift(port);
- long_hpd = (dig_hotplug_reg >> dig_shift) & PORTB_HOTPLUG_LONG_DETECT;
- } else {
- dig_shift = i915_port_to_hotplug_shift(port);
- long_hpd = (hotplug_trigger >> dig_shift) & PORTB_HOTPLUG_LONG_DETECT;
- }
+ *pin_mask |= BIT(i);
- DRM_DEBUG_DRIVER("digital hpd port %c - %s\n",
- port_name(port),
- long_hpd ? "long" : "short");
- /* for long HPD pulses we want to have the digital queue happen,
- but we still want HPD storm detection to function. */
- if (long_hpd) {
- dev_priv->long_hpd_port_mask |= (1 << port);
- dig_port_mask |= hpd[i];
- } else {
- /* for short HPD just trigger the digital queue */
- dev_priv->short_hpd_port_mask |= (1 << port);
- hotplug_trigger &= ~hpd[i];
- }
- queue_dig = true;
- }
- }
-
- for (i = 1; i < HPD_NUM_PINS; i++) {
- if (hpd[i] & hotplug_trigger &&
- dev_priv->hpd_stats[i].hpd_mark == HPD_DISABLED) {
- /*
- * On GMCH platforms the interrupt mask bits only
- * prevent irq generation, not the setting of the
- * hotplug bits itself. So only WARN about unexpected
- * interrupts on saner platforms.
- */
- WARN_ONCE(INTEL_INFO(dev)->gen >= 5 && !IS_VALLEYVIEW(dev),
- "Received HPD interrupt (0x%08x) on pin %d (0x%08x) although disabled\n",
- hotplug_trigger, i, hpd[i]);
-
- continue;
- }
-
- if (!(hpd[i] & hotplug_trigger) ||
- dev_priv->hpd_stats[i].hpd_mark != HPD_ENABLED)
+ if (!intel_hpd_pin_to_port(i, &port))
continue;
- if (!(dig_port_mask & hpd[i])) {
- dev_priv->hpd_event_bits |= (1 << i);
- queue_hp = true;
- }
-
- if (!time_in_range(jiffies, dev_priv->hpd_stats[i].hpd_last_jiffies,
- dev_priv->hpd_stats[i].hpd_last_jiffies
- + msecs_to_jiffies(HPD_STORM_DETECT_PERIOD))) {
- dev_priv->hpd_stats[i].hpd_last_jiffies = jiffies;
- dev_priv->hpd_stats[i].hpd_cnt = 0;
- DRM_DEBUG_KMS("Received HPD interrupt on PIN %d - cnt: 0\n", i);
- } else if (dev_priv->hpd_stats[i].hpd_cnt > HPD_STORM_THRESHOLD) {
- dev_priv->hpd_stats[i].hpd_mark = HPD_MARK_DISABLED;
- dev_priv->hpd_event_bits &= ~(1 << i);
- DRM_DEBUG_KMS("HPD interrupt storm detected on PIN %d\n", i);
- storm_detected = true;
- } else {
- dev_priv->hpd_stats[i].hpd_cnt++;
- DRM_DEBUG_KMS("Received HPD interrupt on PIN %d - cnt: %d\n", i,
- dev_priv->hpd_stats[i].hpd_cnt);
- }
+ if (long_pulse_detect(port, dig_hotplug_reg))
+ *long_mask |= BIT(i);
}
- if (storm_detected)
- dev_priv->display.hpd_irq_setup(dev);
- spin_unlock(&dev_priv->irq_lock);
+ DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x, dig 0x%08x, pins 0x%08x\n",
+ hotplug_trigger, dig_hotplug_reg, *pin_mask);
- /*
- * Our hotplug handler can grab modeset locks (by calling down into the
- * fb helpers). Hence it must not be run on our own dev-priv->wq work
- * queue for otherwise the flush_work in the pageflip code will
- * deadlock.
- */
- if (queue_dig)
- queue_work(dev_priv->dp_wq, &dev_priv->dig_port_work);
- if (queue_hp)
- schedule_work(&dev_priv->hotplug_work);
}
static void gmbus_irq_handler(struct drm_device *dev)
@@ -1755,28 +1523,35 @@ static void i9xx_hpd_irq_handler(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
+ u32 pin_mask, long_mask;
- if (hotplug_status) {
- I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
- /*
- * Make sure hotplug status is cleared before we clear IIR, or else we
- * may miss hotplug events.
- */
- POSTING_READ(PORT_HOTPLUG_STAT);
+ if (!hotplug_status)
+ return;
- if (IS_G4X(dev) || IS_VALLEYVIEW(dev)) {
- u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_G4X;
+ I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
+ /*
+ * Make sure hotplug status is cleared before we clear IIR, or else we
+ * may miss hotplug events.
+ */
+ POSTING_READ(PORT_HOTPLUG_STAT);
- intel_hpd_irq_handler(dev, hotplug_trigger, 0, hpd_status_g4x);
- } else {
- u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915;
+ if (IS_G4X(dev) || IS_VALLEYVIEW(dev)) {
+ u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_G4X;
- intel_hpd_irq_handler(dev, hotplug_trigger, 0, hpd_status_i915);
- }
+ intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger,
+ hotplug_trigger, hpd_status_g4x,
+ i9xx_port_hotplug_long_detect);
+ intel_hpd_irq_handler(dev, pin_mask, long_mask);
- if ((IS_G4X(dev) || IS_VALLEYVIEW(dev)) &&
- hotplug_status & DP_AUX_CHANNEL_MASK_INT_STATUS_G4X)
+ if (hotplug_status & DP_AUX_CHANNEL_MASK_INT_STATUS_G4X)
dp_aux_irq_handler(dev);
+ } else {
+ u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915;
+
+ intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger,
+ hotplug_trigger, hpd_status_g4x,
+ i9xx_port_hotplug_long_detect);
+ intel_hpd_irq_handler(dev, pin_mask, long_mask);
}
}
@@ -1875,12 +1650,18 @@ static void ibx_irq_handler(struct drm_device *dev, u32 pch_iir)
struct drm_i915_private *dev_priv = dev->dev_private;
int pipe;
u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK;
- u32 dig_hotplug_reg;
- dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
- I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
+ if (hotplug_trigger) {
+ u32 dig_hotplug_reg, pin_mask, long_mask;
- intel_hpd_irq_handler(dev, hotplug_trigger, dig_hotplug_reg, hpd_ibx);
+ dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
+ I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
+
+ intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger,
+ dig_hotplug_reg, hpd_ibx,
+ pch_port_hotplug_long_detect);
+ intel_hpd_irq_handler(dev, pin_mask, long_mask);
+ }
if (pch_iir & SDE_AUDIO_POWER_MASK) {
int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK) >>
@@ -1972,12 +1753,18 @@ static void cpt_irq_handler(struct drm_device *dev, u32 pch_iir)
struct drm_i915_private *dev_priv = dev->dev_private;
int pipe;
u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_CPT;
- u32 dig_hotplug_reg;
- dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
- I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
+ if (hotplug_trigger) {
+ u32 dig_hotplug_reg, pin_mask, long_mask;
- intel_hpd_irq_handler(dev, hotplug_trigger, dig_hotplug_reg, hpd_cpt);
+ dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
+ I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
+
+ intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger,
+ dig_hotplug_reg, hpd_cpt,
+ pch_port_hotplug_long_detect);
+ intel_hpd_irq_handler(dev, pin_mask, long_mask);
+ }
if (pch_iir & SDE_AUDIO_POWER_MASK_CPT) {
int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK_CPT) >>
@@ -2176,8 +1963,8 @@ static irqreturn_t ironlake_irq_handler(int irq, void *arg)
static void bxt_hpd_handler(struct drm_device *dev, uint32_t iir_status)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- uint32_t hp_control;
- uint32_t hp_trigger;
+ u32 hp_control, hp_trigger;
+ u32 pin_mask, long_mask;
/* Get the status */
hp_trigger = iir_status & BXT_DE_PORT_HOTPLUG_MASK;
@@ -2189,20 +1976,12 @@ static void bxt_hpd_handler(struct drm_device *dev, uint32_t iir_status)
return;
}
- DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n",
- hp_control & BXT_HOTPLUG_CTL_MASK);
-
- /* Check for HPD storm and schedule bottom half */
- intel_hpd_irq_handler(dev, hp_trigger, hp_control, hpd_bxt);
-
- /*
- * FIXME: Save the hot plug status for bottom half before
- * clearing the sticky status bits, else the status will be
- * lost.
- */
-
/* Clear sticky bits in hpd status */
I915_WRITE(BXT_HOTPLUG_CTL, hp_control);
+
+ intel_get_hpd_pins(&pin_mask, &long_mask, hp_trigger, hp_control,
+ hpd_bxt, bxt_port_hotplug_long_detect);
+ intel_hpd_irq_handler(dev, pin_mask, long_mask);
}
static irqreturn_t gen8_irq_handler(int irq, void *arg)
@@ -2706,18 +2485,11 @@ static void gen8_disable_vblank(struct drm_device *dev, int pipe)
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
}
-static struct drm_i915_gem_request *
-ring_last_request(struct intel_engine_cs *ring)
-{
- return list_entry(ring->request_list.prev,
- struct drm_i915_gem_request, list);
-}
-
static bool
-ring_idle(struct intel_engine_cs *ring)
+ring_idle(struct intel_engine_cs *ring, u32 seqno)
{
return (list_empty(&ring->request_list) ||
- i915_gem_request_completed(ring_last_request(ring), false));
+ i915_seqno_passed(seqno, ring->last_submitted_seqno));
}
static bool
@@ -2939,7 +2711,7 @@ static void i915_hangcheck_elapsed(struct work_struct *work)
acthd = intel_ring_get_active_head(ring);
if (ring->hangcheck.seqno == seqno) {
- if (ring_idle(ring)) {
+ if (ring_idle(ring, seqno)) {
ring->hangcheck.action = HANGCHECK_IDLE;
if (waitqueue_active(&ring->irq_queue)) {
@@ -3210,12 +2982,12 @@ static void ibx_hpd_irq_setup(struct drm_device *dev)
if (HAS_PCH_IBX(dev)) {
hotplug_irqs = SDE_HOTPLUG_MASK;
for_each_intel_encoder(dev, intel_encoder)
- if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED)
+ if (dev_priv->hotplug.stats[intel_encoder->hpd_pin].state == HPD_ENABLED)
enabled_irqs |= hpd_ibx[intel_encoder->hpd_pin];
} else {
hotplug_irqs = SDE_HOTPLUG_MASK_CPT;
for_each_intel_encoder(dev, intel_encoder)
- if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED)
+ if (dev_priv->hotplug.stats[intel_encoder->hpd_pin].state == HPD_ENABLED)
enabled_irqs |= hpd_cpt[intel_encoder->hpd_pin];
}
@@ -3244,7 +3016,7 @@ static void bxt_hpd_irq_setup(struct drm_device *dev)
/* Now, enable HPD */
for_each_intel_encoder(dev, intel_encoder) {
- if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark
+ if (dev_priv->hotplug.stats[intel_encoder->hpd_pin].state
== HPD_ENABLED)
hotplug_port |= hpd_bxt[intel_encoder->hpd_pin];
}
@@ -4137,7 +3909,7 @@ static void i915_hpd_irq_setup(struct drm_device *dev)
/* Note HDMI and DP share hotplug bits */
/* enable bits are the same for all generations */
for_each_intel_encoder(dev, intel_encoder)
- if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED)
+ if (dev_priv->hotplug.stats[intel_encoder->hpd_pin].state == HPD_ENABLED)
hotplug_en |= hpd_mask_i915[intel_encoder->hpd_pin];
/* Programming the CRT detection parameters tends
to generate a spurious hotplug event about three
@@ -4277,46 +4049,6 @@ static void i965_irq_uninstall(struct drm_device * dev)
I915_WRITE(IIR, I915_READ(IIR));
}
-static void intel_hpd_irq_reenable_work(struct work_struct *work)
-{
- struct drm_i915_private *dev_priv =
- container_of(work, typeof(*dev_priv),
- hotplug_reenable_work.work);
- struct drm_device *dev = dev_priv->dev;
- struct drm_mode_config *mode_config = &dev->mode_config;
- int i;
-
- intel_runtime_pm_get(dev_priv);
-
- spin_lock_irq(&dev_priv->irq_lock);
- for (i = (HPD_NONE + 1); i < HPD_NUM_PINS; i++) {
- struct drm_connector *connector;
-
- if (dev_priv->hpd_stats[i].hpd_mark != HPD_DISABLED)
- continue;
-
- dev_priv->hpd_stats[i].hpd_mark = HPD_ENABLED;
-
- list_for_each_entry(connector, &mode_config->connector_list, head) {
- struct intel_connector *intel_connector = to_intel_connector(connector);
-
- if (intel_connector->encoder->hpd_pin == i) {
- if (connector->polled != intel_connector->polled)
- DRM_DEBUG_DRIVER("Reenabling HPD on connector %s\n",
- connector->name);
- connector->polled = intel_connector->polled;
- if (!connector->polled)
- connector->polled = DRM_CONNECTOR_POLL_HPD;
- }
- }
- }
- if (dev_priv->display.hpd_irq_setup)
- dev_priv->display.hpd_irq_setup(dev);
- spin_unlock_irq(&dev_priv->irq_lock);
-
- intel_runtime_pm_put(dev_priv);
-}
-
/**
* intel_irq_init - initializes irq support
* @dev_priv: i915 device instance
@@ -4328,8 +4060,8 @@ void intel_irq_init(struct drm_i915_private *dev_priv)
{
struct drm_device *dev = dev_priv->dev;
- INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
- INIT_WORK(&dev_priv->dig_port_work, i915_digport_work_func);
+ intel_hpd_init_work(dev_priv);
+
INIT_WORK(&dev_priv->rps.work, gen6_pm_rps_work);
INIT_WORK(&dev_priv->l3_parity.error_work, ivybridge_parity_work);
@@ -4342,8 +4074,6 @@ void intel_irq_init(struct drm_i915_private *dev_priv)
INIT_DELAYED_WORK(&dev_priv->gpu_error.hangcheck_work,
i915_hangcheck_elapsed);
- INIT_DELAYED_WORK(&dev_priv->hotplug_reenable_work,
- intel_hpd_irq_reenable_work);
pm_qos_add_request(&dev_priv->pm_qos, PM_QOS_CPU_DMA_LATENCY, PM_QOS_DEFAULT_VALUE);
@@ -4429,46 +4159,6 @@ void intel_irq_init(struct drm_i915_private *dev_priv)
}
/**
- * intel_hpd_init - initializes and enables hpd support
- * @dev_priv: i915 device instance
- *
- * This function enables the hotplug support. It requires that interrupts have
- * already been enabled with intel_irq_init_hw(). From this point on hotplug and
- * poll request can run concurrently to other code, so locking rules must be
- * obeyed.
- *
- * This is a separate step from interrupt enabling to simplify the locking rules
- * in the driver load and resume code.
- */
-void intel_hpd_init(struct drm_i915_private *dev_priv)
-{
- struct drm_device *dev = dev_priv->dev;
- struct drm_mode_config *mode_config = &dev->mode_config;
- struct drm_connector *connector;
- int i;
-
- for (i = 1; i < HPD_NUM_PINS; i++) {
- dev_priv->hpd_stats[i].hpd_cnt = 0;
- dev_priv->hpd_stats[i].hpd_mark = HPD_ENABLED;
- }
- list_for_each_entry(connector, &mode_config->connector_list, head) {
- struct intel_connector *intel_connector = to_intel_connector(connector);
- connector->polled = intel_connector->polled;
- if (connector->encoder && !connector->polled && I915_HAS_HOTPLUG(dev) && intel_connector->encoder->hpd_pin > HPD_NONE)
- connector->polled = DRM_CONNECTOR_POLL_HPD;
- if (intel_connector->mst_port)
- connector->polled = DRM_CONNECTOR_POLL_HPD;
- }
-
- /* Interrupt setup is already guaranteed to be single-threaded, this is
- * just to make the assert_spin_locked checks happy. */
- spin_lock_irq(&dev_priv->irq_lock);
- if (dev_priv->display.hpd_irq_setup)
- dev_priv->display.hpd_irq_setup(dev);
- spin_unlock_irq(&dev_priv->irq_lock);
-}
-
-/**
* intel_irq_install - enables the hardware interrupt
* @dev_priv: i915 device instance
*
diff --git a/drivers/gpu/drm/i915/i915_params.c b/drivers/gpu/drm/i915/i915_params.c
index 8ac5a1b29ac0..5ae4b0aba564 100644
--- a/drivers/gpu/drm/i915/i915_params.c
+++ b/drivers/gpu/drm/i915/i915_params.c
@@ -28,7 +28,6 @@ struct i915_params i915 __read_mostly = {
.modeset = -1,
.panel_ignore_lid = 1,
.semaphores = -1,
- .lvds_downclock = 0,
.lvds_channel_mode = 0,
.panel_use_ssc = -1,
.vbt_sdvo_panel_type = -1,
@@ -52,13 +51,14 @@ struct i915_params i915 __read_mostly = {
.use_mmio_flip = 0,
.mmio_debug = 0,
.verbose_state_checks = 1,
- .nuclear_pageflip = 0,
.edp_vswing = 0,
+ .enable_guc_submission = false,
+ .guc_log_level = -1,
};
module_param_named(modeset, i915.modeset, int, 0400);
MODULE_PARM_DESC(modeset,
- "Use kernel modesetting [KMS] (0=DRM_I915_KMS from .config, "
+ "Use kernel modesetting [KMS] (0=disable, "
"1=on, -1=force vga console preference [default])");
module_param_named(panel_ignore_lid, i915.panel_ignore_lid, int, 0600);
@@ -84,11 +84,6 @@ MODULE_PARM_DESC(enable_fbc,
"Enable frame buffer compression for power savings "
"(default: -1 (use per-chip default))");
-module_param_named(lvds_downclock, i915.lvds_downclock, int, 0400);
-MODULE_PARM_DESC(lvds_downclock,
- "Use panel (LVDS/eDP) downclocking for power savings "
- "(default: false)");
-
module_param_named(lvds_channel_mode, i915.lvds_channel_mode, int, 0600);
MODULE_PARM_DESC(lvds_channel_mode,
"Specify LVDS channel mode "
@@ -104,7 +99,7 @@ MODULE_PARM_DESC(vbt_sdvo_panel_type,
"Override/Ignore selection of SDVO panel mode in the VBT "
"(-2=ignore, -1=auto [default], index in VBT BIOS table)");
-module_param_named(reset, i915.reset, bool, 0600);
+module_param_named_unsafe(reset, i915.reset, bool, 0600);
MODULE_PARM_DESC(reset, "Attempt GPU resets (default: true)");
module_param_named(enable_hangcheck, i915.enable_hangcheck, bool, 0644);
@@ -182,13 +177,16 @@ module_param_named(verbose_state_checks, i915.verbose_state_checks, bool, 0600);
MODULE_PARM_DESC(verbose_state_checks,
"Enable verbose logs (ie. WARN_ON()) in case of unexpected hw state conditions.");
-module_param_named_unsafe(nuclear_pageflip, i915.nuclear_pageflip, bool, 0600);
-MODULE_PARM_DESC(nuclear_pageflip,
- "Force atomic modeset functionality; only planes work for now (default: false).");
-
/* WA to get away with the default setting in VBT for early platforms.Will be removed */
module_param_named_unsafe(edp_vswing, i915.edp_vswing, int, 0400);
MODULE_PARM_DESC(edp_vswing,
"Ignore/Override vswing pre-emph table selection from VBT "
"(0=use value from vbt [default], 1=low power swing(200mV),"
"2=default swing(400mV))");
+
+module_param_named_unsafe(enable_guc_submission, i915.enable_guc_submission, bool, 0400);
+MODULE_PARM_DESC(enable_guc_submission, "Enable GuC submission (default:false)");
+
+module_param_named(guc_log_level, i915.guc_log_level, int, 0400);
+MODULE_PARM_DESC(guc_log_level,
+ "GuC firmware logging level (-1:disabled (default), 0-3:enabled)");
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 2030f602cbf8..8e46c348366b 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -50,12 +50,17 @@
/* PCI config space */
-#define HPLLCC 0xc0 /* 855 only */
-#define GC_CLOCK_CONTROL_MASK (0xf << 0)
+#define HPLLCC 0xc0 /* 85x only */
+#define GC_CLOCK_CONTROL_MASK (0x7 << 0)
#define GC_CLOCK_133_200 (0 << 0)
#define GC_CLOCK_100_200 (1 << 0)
#define GC_CLOCK_100_133 (2 << 0)
-#define GC_CLOCK_166_250 (3 << 0)
+#define GC_CLOCK_133_266 (3 << 0)
+#define GC_CLOCK_133_200_2 (4 << 0)
+#define GC_CLOCK_133_266_2 (5 << 0)
+#define GC_CLOCK_166_266 (6 << 0)
+#define GC_CLOCK_166_250 (7 << 0)
+
#define GCFGC2 0xda
#define GCFGC 0xf0 /* 915+ only */
#define GC_LOW_FREQUENCY_ENABLE (1 << 7)
@@ -155,6 +160,7 @@
#define GAM_ECOCHK 0x4090
#define BDW_DISABLE_HDC_INVALIDATION (1<<25)
#define ECOCHK_SNB_BIT (1<<10)
+#define ECOCHK_DIS_TLB (1<<8)
#define HSW_ECOCHK_ARB_PRIO_SOL (1<<6)
#define ECOCHK_PPGTT_CACHE64B (0x3<<3)
#define ECOCHK_PPGTT_CACHE4B (0x0<<3)
@@ -172,13 +178,22 @@
#define GAB_CTL 0x24000
#define GAB_CTL_CONT_AFTER_PAGEFAULT (1<<8)
-#define GEN7_BIOS_RESERVED 0x1082C0
-#define GEN7_BIOS_RESERVED_1M (0 << 5)
-#define GEN7_BIOS_RESERVED_256K (1 << 5)
-#define GEN8_BIOS_RESERVED_SHIFT 7
-#define GEN7_BIOS_RESERVED_MASK 0x1
-#define GEN8_BIOS_RESERVED_MASK 0x3
-
+#define GEN6_STOLEN_RESERVED 0x1082C0
+#define GEN6_STOLEN_RESERVED_ADDR_MASK (0xFFF << 20)
+#define GEN7_STOLEN_RESERVED_ADDR_MASK (0x3FFF << 18)
+#define GEN6_STOLEN_RESERVED_SIZE_MASK (3 << 4)
+#define GEN6_STOLEN_RESERVED_1M (0 << 4)
+#define GEN6_STOLEN_RESERVED_512K (1 << 4)
+#define GEN6_STOLEN_RESERVED_256K (2 << 4)
+#define GEN6_STOLEN_RESERVED_128K (3 << 4)
+#define GEN7_STOLEN_RESERVED_SIZE_MASK (1 << 5)
+#define GEN7_STOLEN_RESERVED_1M (0 << 5)
+#define GEN7_STOLEN_RESERVED_256K (1 << 5)
+#define GEN8_STOLEN_RESERVED_SIZE_MASK (3 << 7)
+#define GEN8_STOLEN_RESERVED_1M (0 << 7)
+#define GEN8_STOLEN_RESERVED_2M (1 << 7)
+#define GEN8_STOLEN_RESERVED_4M (2 << 7)
+#define GEN8_STOLEN_RESERVED_8M (3 << 7)
/* VGA stuff */
@@ -316,6 +331,8 @@
#define MI_RESTORE_EXT_STATE_EN (1<<2)
#define MI_FORCE_RESTORE (1<<1)
#define MI_RESTORE_INHIBIT (1<<0)
+#define HSW_MI_RS_SAVE_STATE_EN (1<<3)
+#define HSW_MI_RS_RESTORE_STATE_EN (1<<2)
#define MI_SEMAPHORE_SIGNAL MI_INSTR(0x1b, 0) /* GEN8+ */
#define MI_SEMAPHORE_TARGET(engine) ((engine)<<15)
#define MI_SEMAPHORE_WAIT MI_INSTR(0x1c, 2) /* GEN8+ */
@@ -347,6 +364,8 @@
#define MI_INVALIDATE_BSD (1<<7)
#define MI_FLUSH_DW_USE_GTT (1<<2)
#define MI_FLUSH_DW_USE_PPGTT (0<<2)
+#define MI_LOAD_REGISTER_MEM(x) MI_INSTR(0x29, 2*(x)-1)
+#define MI_LOAD_REGISTER_MEM_GEN8(x) MI_INSTR(0x29, 3*(x)-1)
#define MI_BATCH_BUFFER MI_INSTR(0x30, 1)
#define MI_BATCH_NON_SECURE (1)
/* for snb/ivb/vlv this also means "batch in ppgtt" when ppgtt is enabled. */
@@ -356,6 +375,7 @@
#define MI_BATCH_BUFFER_START MI_INSTR(0x31, 0)
#define MI_BATCH_GTT (2<<6) /* aliased with (1<<7) on gen4 */
#define MI_BATCH_BUFFER_START_GEN8 MI_INSTR(0x31, 1)
+#define MI_BATCH_RESOURCE_STREAMER (1<<10)
#define MI_PREDICATE_SRC0 (0x2400)
#define MI_PREDICATE_SRC1 (0x2408)
@@ -410,6 +430,7 @@
#define DISPLAY_PLANE_A (0<<20)
#define DISPLAY_PLANE_B (1<<20)
#define GFX_OP_PIPE_CONTROL(len) ((0x3<<29)|(0x3<<27)|(0x2<<24)|(len-2))
+#define PIPE_CONTROL_FLUSH_L3 (1<<27)
#define PIPE_CONTROL_GLOBAL_GTT_IVB (1<<24) /* gen7+ */
#define PIPE_CONTROL_MMIO_WRITE (1<<23)
#define PIPE_CONTROL_STORE_DATA_INDEX (1<<21)
@@ -426,6 +447,7 @@
#define PIPE_CONTROL_INDIRECT_STATE_DISABLE (1<<9)
#define PIPE_CONTROL_NOTIFY (1<<8)
#define PIPE_CONTROL_FLUSH_ENABLE (1<<7) /* gen7+ */
+#define PIPE_CONTROL_DC_FLUSH_ENABLE (1<<5)
#define PIPE_CONTROL_VF_CACHE_INVALIDATE (1<<4)
#define PIPE_CONTROL_CONST_CACHE_INVALIDATE (1<<3)
#define PIPE_CONTROL_STATE_CACHE_INVALIDATE (1<<2)
@@ -449,7 +471,6 @@
#define MI_CLFLUSH MI_INSTR(0x27, 0)
#define MI_REPORT_PERF_COUNT MI_INSTR(0x28, 0)
#define MI_REPORT_PERF_COUNT_GGTT (1<<0)
-#define MI_LOAD_REGISTER_MEM MI_INSTR(0x29, 0)
#define MI_LOAD_REGISTER_REG MI_INSTR(0x2A, 0)
#define MI_RS_STORE_DATA_IMM MI_INSTR(0x2B, 0)
#define MI_LOAD_URB_MEM MI_INSTR(0x2C, 0)
@@ -1163,10 +1184,12 @@ enum skl_disp_power_wells {
#define _PORT_PLL_EBB_0_A 0x162034
#define _PORT_PLL_EBB_0_B 0x6C034
#define _PORT_PLL_EBB_0_C 0x6C340
-#define PORT_PLL_P1_MASK (0x07 << 13)
-#define PORT_PLL_P1(x) ((x) << 13)
-#define PORT_PLL_P2_MASK (0x1f << 8)
-#define PORT_PLL_P2(x) ((x) << 8)
+#define PORT_PLL_P1_SHIFT 13
+#define PORT_PLL_P1_MASK (0x07 << PORT_PLL_P1_SHIFT)
+#define PORT_PLL_P1(x) ((x) << PORT_PLL_P1_SHIFT)
+#define PORT_PLL_P2_SHIFT 8
+#define PORT_PLL_P2_MASK (0x1f << PORT_PLL_P2_SHIFT)
+#define PORT_PLL_P2(x) ((x) << PORT_PLL_P2_SHIFT)
#define BXT_PORT_PLL_EBB_0(port) _PORT3(port, _PORT_PLL_EBB_0_A, \
_PORT_PLL_EBB_0_B, \
_PORT_PLL_EBB_0_C)
@@ -1186,8 +1209,9 @@ enum skl_disp_power_wells {
/* PORT_PLL_0_A */
#define PORT_PLL_M2_MASK 0xFF
/* PORT_PLL_1_A */
-#define PORT_PLL_N_MASK (0x0F << 8)
-#define PORT_PLL_N(x) ((x) << 8)
+#define PORT_PLL_N_SHIFT 8
+#define PORT_PLL_N_MASK (0x0F << PORT_PLL_N_SHIFT)
+#define PORT_PLL_N(x) ((x) << PORT_PLL_N_SHIFT)
/* PORT_PLL_2_A */
#define PORT_PLL_M2_FRAC_MASK 0x3FFFFF
/* PORT_PLL_3_A */
@@ -1201,9 +1225,11 @@ enum skl_disp_power_wells {
/* PORT_PLL_8_A */
#define PORT_PLL_TARGET_CNT_MASK 0x3FF
/* PORT_PLL_9_A */
-#define PORT_PLL_LOCK_THRESHOLD_MASK 0xe
+#define PORT_PLL_LOCK_THRESHOLD_SHIFT 1
+#define PORT_PLL_LOCK_THRESHOLD_MASK (0x7 << PORT_PLL_LOCK_THRESHOLD_SHIFT)
/* PORT_PLL_10_A */
#define PORT_PLL_DCO_AMP_OVR_EN_H (1<<27)
+#define PORT_PLL_DCO_AMP_DEFAULT 15
#define PORT_PLL_DCO_AMP_MASK 0x3c00
#define PORT_PLL_DCO_AMP(x) (x<<10)
#define _PORT_PLL_BASE(port) _PORT3(port, _PORT_PLL_0_A, \
@@ -1377,6 +1403,18 @@ enum skl_disp_power_wells {
_PORT_TX_DW14_LN0_C) + \
_BXT_LANE_OFFSET(lane))
+/* UAIMI scratch pad register 1 */
+#define UAIMI_SPR1 0x4F074
+/* SKL VccIO mask */
+#define SKL_VCCIO_MASK 0x1
+/* SKL balance leg register */
+#define DISPIO_CR_TX_BMU_CR0 0x6C00C
+/* I_boost values */
+#define BALANCE_LEG_SHIFT(port) (8+3*(port))
+#define BALANCE_LEG_MASK(port) (7<<(8+3*(port)))
+/* Balance leg disable bits */
+#define BALANCE_LEG_DISABLE_SHIFT 23
+
/*
* Fence registers
*/
@@ -1456,6 +1494,9 @@ enum skl_disp_power_wells {
#define RING_MAX_IDLE(base) ((base)+0x54)
#define RING_HWS_PGA(base) ((base)+0x80)
#define RING_HWS_PGA_GEN6(base) ((base)+0x2080)
+#define RING_RESET_CTL(base) ((base)+0xd0)
+#define RESET_CTL_REQUEST_RESET (1 << 0)
+#define RESET_CTL_READY_TO_RESET (1 << 1)
#define HSW_GTT_CACHE_EN 0x4024
#define GTT_CACHE_EN_ALL 0xF0007FFF
@@ -1946,6 +1987,9 @@ enum skl_disp_power_wells {
#define FBC_FENCE_OFF 0x03218 /* BSpec typo has 321Bh */
#define FBC_TAG 0x03300
+#define FBC_STATUS2 0x43214
+#define FBC_COMPRESSION_MASK 0x7ff
+
#define FBC_LL_SIZE (1536)
/* Framebuffer compression for GM45+ */
@@ -2116,7 +2160,7 @@ enum skl_disp_power_wells {
#define DPLL_DVO_2X_MODE (1 << 30)
#define DPLL_EXT_BUFFER_ENABLE_VLV (1 << 30)
#define DPLL_SYNCLOCK_ENABLE (1 << 29)
-#define DPLL_REFA_CLK_ENABLE_VLV (1 << 29)
+#define DPLL_REF_CLK_ENABLE_VLV (1 << 29)
#define DPLL_VGA_MODE_DIS (1 << 28)
#define DPLLB_MODE_DAC_SERIAL (1 << 26) /* i915 */
#define DPLLB_MODE_LVDS (2 << 26) /* i915 */
@@ -2130,8 +2174,8 @@ enum skl_disp_power_wells {
#define DPLL_FPA01_P1_POST_DIV_MASK_PINEVIEW 0x00ff8000 /* Pineview */
#define DPLL_LOCK_VLV (1<<15)
#define DPLL_INTEGRATED_CRI_CLK_VLV (1<<14)
-#define DPLL_INTEGRATED_CLOCK_VLV (1<<13)
-#define DPLL_SSC_REF_CLOCK_CHV (1<<13)
+#define DPLL_INTEGRATED_REF_CLK_VLV (1<<13)
+#define DPLL_SSC_REF_CLK_CHV (1<<13)
#define DPLL_PORTC_READY_MASK (0xf << 4)
#define DPLL_PORTB_READY_MASK (0xf)
@@ -2488,6 +2532,9 @@ enum skl_disp_power_wells {
#define CLKCFG_MEM_800 (3 << 4)
#define CLKCFG_MEM_MASK (7 << 4)
+#define HPLLVCO (MCHBAR_MIRROR_BASE + 0xc38)
+#define HPLLVCO_MOBILE (MCHBAR_MIRROR_BASE + 0xc0f)
+
#define TSC1 0x11001
#define TSE (1<<0)
#define TR1 0x11006
@@ -2718,8 +2765,10 @@ enum skl_disp_power_wells {
#define GEN6_GT_THREAD_STATUS_CORE_MASK 0x7
#define GEN6_GT_PERF_STATUS (MCHBAR_MIRROR_BASE_SNB + 0x5948)
+#define BXT_GT_PERF_STATUS (MCHBAR_MIRROR_BASE_SNB + 0x7070)
#define GEN6_RP_STATE_LIMITS (MCHBAR_MIRROR_BASE_SNB + 0x5994)
#define GEN6_RP_STATE_CAP (MCHBAR_MIRROR_BASE_SNB + 0x5998)
+#define BXT_RP_STATE_CAP 0x138170
#define INTERVAL_1_28_US(us) (((us) * 100) >> 7)
#define INTERVAL_1_33_US(us) (((us) * 3) >> 2)
@@ -2767,7 +2816,8 @@ enum skl_disp_power_wells {
* valid. Now, docs explain in dwords what is in the context object. The full
* size is 70720 bytes, however, the power context and execlist context will
* never be saved (power context is stored elsewhere, and execlists don't work
- * on HSW) - so the final size is 66944 bytes, which rounds to 17 pages.
+ * on HSW) - so the final size, including the extra state required for the
+ * Resource Streamer, is 66944 bytes, which rounds to 17 pages.
*/
#define HSW_CXT_TOTAL_SIZE (17 * PAGE_SIZE)
/* Same as Haswell, but 72064 bytes now. */
@@ -4398,9 +4448,32 @@ enum skl_disp_power_wells {
#define DSPARB_BSTART_SHIFT 0
#define DSPARB_BEND_SHIFT 9 /* on 855 */
#define DSPARB_AEND_SHIFT 0
-
+#define DSPARB_SPRITEA_SHIFT_VLV 0
+#define DSPARB_SPRITEA_MASK_VLV (0xff << 0)
+#define DSPARB_SPRITEB_SHIFT_VLV 8
+#define DSPARB_SPRITEB_MASK_VLV (0xff << 8)
+#define DSPARB_SPRITEC_SHIFT_VLV 16
+#define DSPARB_SPRITEC_MASK_VLV (0xff << 16)
+#define DSPARB_SPRITED_SHIFT_VLV 24
+#define DSPARB_SPRITED_MASK_VLV (0xff << 24)
#define DSPARB2 (VLV_DISPLAY_BASE + 0x70060) /* vlv/chv */
+#define DSPARB_SPRITEA_HI_SHIFT_VLV 0
+#define DSPARB_SPRITEA_HI_MASK_VLV (0x1 << 0)
+#define DSPARB_SPRITEB_HI_SHIFT_VLV 4
+#define DSPARB_SPRITEB_HI_MASK_VLV (0x1 << 4)
+#define DSPARB_SPRITEC_HI_SHIFT_VLV 8
+#define DSPARB_SPRITEC_HI_MASK_VLV (0x1 << 8)
+#define DSPARB_SPRITED_HI_SHIFT_VLV 12
+#define DSPARB_SPRITED_HI_MASK_VLV (0x1 << 12)
+#define DSPARB_SPRITEE_HI_SHIFT_VLV 16
+#define DSPARB_SPRITEE_HI_MASK_VLV (0x1 << 16)
+#define DSPARB_SPRITEF_HI_SHIFT_VLV 20
+#define DSPARB_SPRITEF_HI_MASK_VLV (0x1 << 20)
#define DSPARB3 (VLV_DISPLAY_BASE + 0x7006c) /* chv */
+#define DSPARB_SPRITEE_SHIFT_VLV 0
+#define DSPARB_SPRITEE_MASK_VLV (0xff << 0)
+#define DSPARB_SPRITEF_SHIFT_VLV 8
+#define DSPARB_SPRITEF_MASK_VLV (0xff << 8)
/* pnv/gen4/g4x/vlv/chv */
#define DSPFW1 (dev_priv->info.display_mmio_offset + 0x70034)
@@ -5754,6 +5827,13 @@ enum skl_disp_power_wells {
#define HSW_NDE_RSTWRN_OPT 0x46408
#define RESET_PCH_HANDSHAKE_ENABLE (1<<4)
+#define SKL_DFSM 0x51000
+#define SKL_DFSM_CDCLK_LIMIT_MASK (3 << 23)
+#define SKL_DFSM_CDCLK_LIMIT_675 (0 << 23)
+#define SKL_DFSM_CDCLK_LIMIT_540 (1 << 23)
+#define SKL_DFSM_CDCLK_LIMIT_450 (2 << 23)
+#define SKL_DFSM_CDCLK_LIMIT_337_5 (3 << 23)
+
#define FF_SLICE_CS_CHICKEN2 0x20e4
#define GEN9_TSG_BARRIER_ACK_DISABLE (1<<8)
@@ -5791,6 +5871,7 @@ enum skl_disp_power_wells {
#define GEN8_L3SQCREG4 0xb118
#define GEN8_LQSC_RO_PERF_DIS (1<<27)
+#define GEN8_LQSC_FLUSH_COHERENT_LINES (1<<21)
/* GEN8 chicken */
#define HDC_CHICKEN0 0x7300
@@ -5913,6 +5994,11 @@ enum skl_disp_power_wells {
/* digital port hotplug */
#define PCH_PORT_HOTPLUG 0xc4030 /* SHOTPLUG_CTL */
+#define BXT_PORTA_HOTPLUG_ENABLE (1 << 28)
+#define BXT_PORTA_HOTPLUG_STATUS_MASK (0x3 << 24)
+#define BXT_PORTA_HOTPLUG_NO_DETECT (0 << 24)
+#define BXT_PORTA_HOTPLUG_SHORT_DETECT (1 << 24)
+#define BXT_PORTA_HOTPLUG_LONG_DETECT (2 << 24)
#define PORTD_HOTPLUG_ENABLE (1 << 20)
#define PORTD_PULSE_DURATION_2ms (0)
#define PORTD_PULSE_DURATION_4_5ms (1 << 18)
@@ -6047,6 +6133,9 @@ enum skl_disp_power_wells {
#define _VIDEO_DIP_CTL_A 0xe0200
#define _VIDEO_DIP_DATA_A 0xe0208
#define _VIDEO_DIP_GCP_A 0xe0210
+#define GCP_COLOR_INDICATION (1 << 2)
+#define GCP_DEFAULT_PHASE_ENABLE (1 << 1)
+#define GCP_AV_MUTE (1 << 0)
#define _VIDEO_DIP_CTL_B 0xe1200
#define _VIDEO_DIP_DATA_B 0xe1208
@@ -6186,6 +6275,7 @@ enum skl_disp_power_wells {
#define _TRANSA_CHICKEN1 0xf0060
#define _TRANSB_CHICKEN1 0xf1060
#define TRANS_CHICKEN1(pipe) _PIPE(pipe, _TRANSA_CHICKEN1, _TRANSB_CHICKEN1)
+#define TRANS_CHICKEN1_HDMIUNIT_GC_DISABLE (1<<10)
#define TRANS_CHICKEN1_DP0UNIT_GC_DISABLE (1<<4)
#define _TRANSA_CHICKEN2 0xf0064
#define _TRANSB_CHICKEN2 0xf1064
@@ -6370,6 +6460,8 @@ enum skl_disp_power_wells {
#define PCH_PP_CONTROL 0xc7204
#define PANEL_UNLOCK_REGS (0xabcd << 16)
#define PANEL_UNLOCK_MASK (0xffff << 16)
+#define BXT_POWER_CYCLE_DELAY_MASK (0x1f0)
+#define BXT_POWER_CYCLE_DELAY_SHIFT 4
#define EDP_FORCE_VDD (1 << 3)
#define EDP_BLC_ENABLE (1 << 2)
#define PANEL_POWER_RESET (1 << 1)
@@ -6398,6 +6490,17 @@ enum skl_disp_power_wells {
#define PANEL_POWER_CYCLE_DELAY_MASK (0x1f)
#define PANEL_POWER_CYCLE_DELAY_SHIFT 0
+/* BXT PPS changes - 2nd set of PPS registers */
+#define _BXT_PP_STATUS2 0xc7300
+#define _BXT_PP_CONTROL2 0xc7304
+#define _BXT_PP_ON_DELAYS2 0xc7308
+#define _BXT_PP_OFF_DELAYS2 0xc730c
+
+#define BXT_PP_STATUS(n) ((!n) ? PCH_PP_STATUS : _BXT_PP_STATUS2)
+#define BXT_PP_CONTROL(n) ((!n) ? PCH_PP_CONTROL : _BXT_PP_CONTROL2)
+#define BXT_PP_ON_DELAYS(n) ((!n) ? PCH_PP_ON_DELAYS : _BXT_PP_ON_DELAYS2)
+#define BXT_PP_OFF_DELAYS(n) ((!n) ? PCH_PP_OFF_DELAYS : _BXT_PP_OFF_DELAYS2)
+
#define PCH_DP_B 0xe4100
#define PCH_DPB_AUX_CH_CTL 0xe4110
#define PCH_DPB_AUX_CH_DATA1 0xe4114
@@ -6698,6 +6801,7 @@ enum skl_disp_power_wells {
#define GEN6_PCODE_READ_RC6VIDS 0x5
#define GEN6_ENCODE_RC6_VID(mv) (((mv) - 245) / 5)
#define GEN6_DECODE_RC6_VID(vids) (((vids) * 5) + 245)
+#define BDW_PCODE_DISPLAY_FREQ_CHANGE_REQ 0x18
#define GEN9_PCODE_READ_MEM_LATENCY 0x6
#define GEN9_MEM_LATENCY_LEVEL_MASK 0xFF
#define GEN9_MEM_LATENCY_LEVEL_1_5_SHIFT 8
@@ -6756,6 +6860,9 @@ enum skl_disp_power_wells {
#define GEN7_MISCCPCTL (0x9424)
#define GEN7_DOP_CLOCK_GATE_ENABLE (1<<0)
+#define GEN8_GARBCNTL 0xB004
+#define GEN9_GAPS_TSV_CREDIT_DISABLE (1<<7)
+
/* IVYBRIDGE DPF */
#define GEN7_L3CDERRST1 0xB008 /* L3CD Error Status 1 */
#define HSW_L3CDERRST11 0xB208 /* L3CD Error Status register 1 slice 1 */
@@ -7163,6 +7270,7 @@ enum skl_disp_power_wells {
#define LCPLL_CLK_FREQ_337_5_BDW (2<<26)
#define LCPLL_CLK_FREQ_675_BDW (3<<26)
#define LCPLL_CD_CLOCK_DISABLE (1<<25)
+#define LCPLL_ROOT_CD_CLOCK_DISABLE (1<<24)
#define LCPLL_CD2X_CLOCK_DISABLE (1<<23)
#define LCPLL_POWER_DOWN_ALLOW (1<<22)
#define LCPLL_CD_SOURCE_FCLK (1<<21)
@@ -7265,12 +7373,6 @@ enum skl_disp_power_wells {
#define DC_STATE_EN 0x45504
#define DC_STATE_EN_UPTO_DC5 (1<<0)
#define DC_STATE_EN_DC9 (1<<3)
-
-/*
-* SKL DC
-*/
-#define DC_STATE_EN 0x45504
-#define DC_STATE_EN_UPTO_DC5 (1<<0)
#define DC_STATE_EN_UPTO_DC6 (2<<0)
#define DC_STATE_EN_UPTO_DC5_DC6_MASK 0x3
@@ -7822,4 +7924,13 @@ enum skl_disp_power_wells {
#define _PALETTE_A (dev_priv->info.display_mmio_offset + 0xa000)
#define _PALETTE_B (dev_priv->info.display_mmio_offset + 0xa800)
+/* MOCS (Memory Object Control State) registers */
+#define GEN9_LNCFCMOCS0 0xb020 /* L3 Cache Control base */
+
+#define GEN9_GFX_MOCS_0 0xc800 /* Graphics MOCS base register*/
+#define GEN9_MFX0_MOCS_0 0xc900 /* Media 0 MOCS base register*/
+#define GEN9_MFX1_MOCS_0 0xca00 /* Media 1 MOCS base register*/
+#define GEN9_VEBOX_MOCS_0 0xcb00 /* Video MOCS base register*/
+#define GEN9_BLT_MOCS_0 0xcc00 /* Blitter MOCS base register*/
+
#endif /* _I915_REG_H_ */
diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c
index cf67f82f7b7f..1ccac618468e 100644
--- a/drivers/gpu/drm/i915/i915_suspend.c
+++ b/drivers/gpu/drm/i915/i915_suspend.c
@@ -92,7 +92,7 @@ static void i915_restore_display(struct drm_device *dev)
}
/* only restore FBC info on the platform that supports FBC*/
- intel_fbc_disable(dev);
+ intel_fbc_disable(dev_priv);
/* restore FBC interval */
if (HAS_FBC(dev) && INTEL_INFO(dev)->gen <= 4 && !IS_G4X(dev))
diff --git a/drivers/gpu/drm/i915/i915_sysfs.c b/drivers/gpu/drm/i915/i915_sysfs.c
index 247626885f49..55bd04c6b939 100644
--- a/drivers/gpu/drm/i915/i915_sysfs.c
+++ b/drivers/gpu/drm/i915/i915_sysfs.c
@@ -64,24 +64,16 @@ static u32 calc_residency(struct drm_device *dev, const u32 reg)
goto out;
}
- units = 0;
- div = 1000000ULL;
-
- if (IS_CHERRYVIEW(dev)) {
+ if (IS_CHERRYVIEW(dev) && czcount_30ns == 1) {
/* Special case for 320Mhz */
- if (czcount_30ns == 1) {
- div = 10000000ULL;
- units = 3125ULL;
- } else {
- /* chv counts are one less */
- czcount_30ns += 1;
- }
+ div = 10000000ULL;
+ units = 3125ULL;
+ } else {
+ czcount_30ns += 1;
+ div = 1000000ULL;
+ units = DIV_ROUND_UP_ULL(30ULL * bias, czcount_30ns);
}
- if (units == 0)
- units = DIV_ROUND_UP_ULL(30ULL * bias,
- (u64)czcount_30ns);
-
if (I915_READ(VLV_COUNTER_CONTROL) & VLV_COUNT_RANGE_HIGH)
units <<= 8;
diff --git a/drivers/gpu/drm/i915/i915_trace.h b/drivers/gpu/drm/i915/i915_trace.h
index 497cba5deb1e..2f34c47bd4bf 100644
--- a/drivers/gpu/drm/i915/i915_trace.h
+++ b/drivers/gpu/drm/i915/i915_trace.h
@@ -424,10 +424,10 @@ TRACE_EVENT(i915_gem_evict_vm,
);
TRACE_EVENT(i915_gem_ring_sync_to,
- TP_PROTO(struct intel_engine_cs *from,
- struct intel_engine_cs *to,
+ TP_PROTO(struct drm_i915_gem_request *to_req,
+ struct intel_engine_cs *from,
struct drm_i915_gem_request *req),
- TP_ARGS(from, to, req),
+ TP_ARGS(to_req, from, req),
TP_STRUCT__entry(
__field(u32, dev)
@@ -439,7 +439,7 @@ TRACE_EVENT(i915_gem_ring_sync_to,
TP_fast_assign(
__entry->dev = from->dev->primary->index;
__entry->sync_from = from->id;
- __entry->sync_to = to->id;
+ __entry->sync_to = to_req->ring->id;
__entry->seqno = i915_gem_request_get_seqno(req);
),
@@ -475,8 +475,8 @@ TRACE_EVENT(i915_gem_ring_dispatch,
);
TRACE_EVENT(i915_gem_ring_flush,
- TP_PROTO(struct intel_engine_cs *ring, u32 invalidate, u32 flush),
- TP_ARGS(ring, invalidate, flush),
+ TP_PROTO(struct drm_i915_gem_request *req, u32 invalidate, u32 flush),
+ TP_ARGS(req, invalidate, flush),
TP_STRUCT__entry(
__field(u32, dev)
@@ -486,8 +486,8 @@ TRACE_EVENT(i915_gem_ring_flush,
),
TP_fast_assign(
- __entry->dev = ring->dev->primary->index;
- __entry->ring = ring->id;
+ __entry->dev = req->ring->dev->primary->index;
+ __entry->ring = req->ring->id;
__entry->invalidate = invalidate;
__entry->flush = flush;
),
@@ -727,7 +727,7 @@ DECLARE_EVENT_CLASS(i915_context,
TP_fast_assign(
__entry->ctx = ctx;
__entry->vm = ctx->ppgtt ? &ctx->ppgtt->base : NULL;
- __entry->dev = ctx->file_priv->dev_priv->dev->primary->index;
+ __entry->dev = ctx->i915->dev->primary->index;
),
TP_printk("dev=%u, ctx=%p, ctx_vm=%p",
diff --git a/drivers/gpu/drm/i915/intel_atomic.c b/drivers/gpu/drm/i915/intel_atomic.c
index 7ed8033aae60..e2531cf59266 100644
--- a/drivers/gpu/drm/i915/intel_atomic.c
+++ b/drivers/gpu/drm/i915/intel_atomic.c
@@ -35,162 +35,6 @@
#include <drm/drm_plane_helper.h>
#include "intel_drv.h"
-
-/**
- * intel_atomic_check - validate state object
- * @dev: drm device
- * @state: state to validate
- */
-int intel_atomic_check(struct drm_device *dev,
- struct drm_atomic_state *state)
-{
- int nplanes = dev->mode_config.num_total_plane;
- int ncrtcs = dev->mode_config.num_crtc;
- int nconnectors = dev->mode_config.num_connector;
- enum pipe nuclear_pipe = INVALID_PIPE;
- struct intel_crtc *nuclear_crtc = NULL;
- struct intel_crtc_state *crtc_state = NULL;
- int ret;
- int i;
- bool not_nuclear = false;
-
- /*
- * FIXME: At the moment, we only support "nuclear pageflip" on a
- * single CRTC. Cross-crtc updates will be added later.
- */
- for (i = 0; i < nplanes; i++) {
- struct intel_plane *plane = to_intel_plane(state->planes[i]);
- if (!plane)
- continue;
-
- if (nuclear_pipe == INVALID_PIPE) {
- nuclear_pipe = plane->pipe;
- } else if (nuclear_pipe != plane->pipe) {
- DRM_DEBUG_KMS("i915 only support atomic plane operations on a single CRTC at the moment\n");
- return -EINVAL;
- }
- }
-
- /*
- * FIXME: We only handle planes for now; make sure there are no CRTC's
- * or connectors involved.
- */
- state->allow_modeset = false;
- for (i = 0; i < ncrtcs; i++) {
- struct intel_crtc *crtc = to_intel_crtc(state->crtcs[i]);
- if (crtc)
- memset(&crtc->atomic, 0, sizeof(crtc->atomic));
- if (crtc && crtc->pipe != nuclear_pipe)
- not_nuclear = true;
- if (crtc && crtc->pipe == nuclear_pipe) {
- nuclear_crtc = crtc;
- crtc_state = to_intel_crtc_state(state->crtc_states[i]);
- }
- }
- for (i = 0; i < nconnectors; i++)
- if (state->connectors[i] != NULL)
- not_nuclear = true;
-
- if (not_nuclear) {
- DRM_DEBUG_KMS("i915 only supports atomic plane operations at the moment\n");
- return -EINVAL;
- }
-
- ret = drm_atomic_helper_check_planes(dev, state);
- if (ret)
- return ret;
-
- /* FIXME: move to crtc atomic check function once it is ready */
- ret = intel_atomic_setup_scalers(dev, nuclear_crtc, crtc_state);
- if (ret)
- return ret;
-
- return ret;
-}
-
-
-/**
- * intel_atomic_commit - commit validated state object
- * @dev: DRM device
- * @state: the top-level driver state object
- * @async: asynchronous commit
- *
- * This function commits a top-level state object that has been validated
- * with drm_atomic_helper_check().
- *
- * FIXME: Atomic modeset support for i915 is not yet complete. At the moment
- * we can only handle plane-related operations and do not yet support
- * asynchronous commit.
- *
- * RETURNS
- * Zero for success or -errno.
- */
-int intel_atomic_commit(struct drm_device *dev,
- struct drm_atomic_state *state,
- bool async)
-{
- int ret;
- int i;
-
- if (async) {
- DRM_DEBUG_KMS("i915 does not yet support async commit\n");
- return -EINVAL;
- }
-
- ret = drm_atomic_helper_prepare_planes(dev, state);
- if (ret)
- return ret;
-
- /* Point of no return */
-
- /*
- * FIXME: The proper sequence here will eventually be:
- *
- * drm_atomic_helper_swap_state(dev, state)
- * drm_atomic_helper_commit_modeset_disables(dev, state);
- * drm_atomic_helper_commit_planes(dev, state);
- * drm_atomic_helper_commit_modeset_enables(dev, state);
- * drm_atomic_helper_wait_for_vblanks(dev, state);
- * drm_atomic_helper_cleanup_planes(dev, state);
- * drm_atomic_state_free(state);
- *
- * once we have full atomic modeset. For now, just manually update
- * plane states to avoid clobbering good states with dummy states
- * while nuclear pageflipping.
- */
- for (i = 0; i < dev->mode_config.num_total_plane; i++) {
- struct drm_plane *plane = state->planes[i];
-
- if (!plane)
- continue;
-
- plane->state->state = state;
- swap(state->plane_states[i], plane->state);
- plane->state->state = NULL;
- }
-
- /* swap crtc_scaler_state */
- for (i = 0; i < dev->mode_config.num_crtc; i++) {
- struct drm_crtc *crtc = state->crtcs[i];
- if (!crtc) {
- continue;
- }
-
- to_intel_crtc(crtc)->config->scaler_state =
- to_intel_crtc_state(state->crtc_states[i])->scaler_state;
-
- if (INTEL_INFO(dev)->gen >= 9)
- skl_detach_scalers(to_intel_crtc(crtc));
- }
-
- drm_atomic_helper_commit_planes(dev, state);
- drm_atomic_helper_wait_for_vblanks(dev, state);
- drm_atomic_helper_cleanup_planes(dev, state);
- drm_atomic_state_free(state);
-
- return 0;
-}
-
/**
* intel_connector_atomic_get_property - fetch connector property value
* @connector: connector to fetch property for
@@ -298,17 +142,12 @@ int intel_atomic_setup_scalers(struct drm_device *dev,
struct drm_plane *plane = NULL;
struct intel_plane *intel_plane;
struct intel_plane_state *plane_state = NULL;
- struct intel_crtc_scaler_state *scaler_state;
- struct drm_atomic_state *drm_state;
+ struct intel_crtc_scaler_state *scaler_state =
+ &crtc_state->scaler_state;
+ struct drm_atomic_state *drm_state = crtc_state->base.state;
int num_scalers_need;
int i, j;
- if (INTEL_INFO(dev)->gen < 9 || !intel_crtc || !crtc_state)
- return 0;
-
- scaler_state = &crtc_state->scaler_state;
- drm_state = crtc_state->base.state;
-
num_scalers_need = hweight32(scaler_state->scaler_users);
DRM_DEBUG_KMS("crtc_state = %p need = %d avail = %d scaler_users = 0x%x\n",
crtc_state, num_scalers_need, intel_crtc->num_scalers,
@@ -336,17 +175,21 @@ int intel_atomic_setup_scalers(struct drm_device *dev,
/* walkthrough scaler_users bits and start assigning scalers */
for (i = 0; i < sizeof(scaler_state->scaler_users) * 8; i++) {
int *scaler_id;
+ const char *name;
+ int idx;
/* skip if scaler not required */
if (!(scaler_state->scaler_users & (1 << i)))
continue;
if (i == SKL_CRTC_INDEX) {
+ name = "CRTC";
+ idx = intel_crtc->base.base.id;
+
/* panel fitter case: assign as a crtc scaler */
scaler_id = &scaler_state->scaler_id;
} else {
- if (!drm_state)
- continue;
+ name = "PLANE";
/* plane scaler case: assign as a plane scaler */
/* find the plane that set the bit as scaler_user */
@@ -365,9 +208,19 @@ int intel_atomic_setup_scalers(struct drm_device *dev,
plane->base.id);
return PTR_ERR(state);
}
+
+ /*
+ * the plane is added after plane checks are run,
+ * but since this plane is unchanged just do the
+ * minimum required validation.
+ */
+ if (plane->type == DRM_PLANE_TYPE_PRIMARY)
+ intel_crtc->atomic.wait_for_flips = true;
+ crtc_state->base.planes_changed = true;
}
intel_plane = to_intel_plane(plane);
+ idx = plane->base.id;
/* plane on different crtc cannot be a scaler user of this crtc */
if (WARN_ON(intel_plane->pipe != intel_crtc->pipe)) {
@@ -383,23 +236,16 @@ int intel_atomic_setup_scalers(struct drm_device *dev,
for (j = 0; j < intel_crtc->num_scalers; j++) {
if (!scaler_state->scalers[j].in_use) {
scaler_state->scalers[j].in_use = 1;
- *scaler_id = scaler_state->scalers[j].id;
+ *scaler_id = j;
DRM_DEBUG_KMS("Attached scaler id %u.%u to %s:%d\n",
- intel_crtc->pipe,
- i == SKL_CRTC_INDEX ? scaler_state->scaler_id :
- plane_state->scaler_id,
- i == SKL_CRTC_INDEX ? "CRTC" : "PLANE",
- i == SKL_CRTC_INDEX ? intel_crtc->base.base.id :
- plane->base.id);
+ intel_crtc->pipe, *scaler_id, name, idx);
break;
}
}
}
if (WARN_ON(*scaler_id < 0)) {
- DRM_DEBUG_KMS("Cannot find scaler for %s:%d\n",
- i == SKL_CRTC_INDEX ? "CRTC" : "PLANE",
- i == SKL_CRTC_INDEX ? intel_crtc->base.base.id:plane->base.id);
+ DRM_DEBUG_KMS("Cannot find scaler for %s:%d\n", name, idx);
continue;
}
@@ -421,3 +267,54 @@ int intel_atomic_setup_scalers(struct drm_device *dev,
return 0;
}
+
+static void
+intel_atomic_duplicate_dpll_state(struct drm_i915_private *dev_priv,
+ struct intel_shared_dpll_config *shared_dpll)
+{
+ enum intel_dpll_id i;
+
+ /* Copy shared dpll state */
+ for (i = 0; i < dev_priv->num_shared_dpll; i++) {
+ struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
+
+ shared_dpll[i] = pll->config;
+ }
+}
+
+struct intel_shared_dpll_config *
+intel_atomic_get_shared_dpll_state(struct drm_atomic_state *s)
+{
+ struct intel_atomic_state *state = to_intel_atomic_state(s);
+
+ WARN_ON(!drm_modeset_is_locked(&s->dev->mode_config.connection_mutex));
+
+ if (!state->dpll_set) {
+ state->dpll_set = true;
+
+ intel_atomic_duplicate_dpll_state(to_i915(s->dev),
+ state->shared_dpll);
+ }
+
+ return state->shared_dpll;
+}
+
+struct drm_atomic_state *
+intel_atomic_state_alloc(struct drm_device *dev)
+{
+ struct intel_atomic_state *state = kzalloc(sizeof(*state), GFP_KERNEL);
+
+ if (!state || drm_atomic_state_init(dev, &state->base) < 0) {
+ kfree(state);
+ return NULL;
+ }
+
+ return &state->base;
+}
+
+void intel_atomic_state_clear(struct drm_atomic_state *s)
+{
+ struct intel_atomic_state *state = to_intel_atomic_state(s);
+ drm_atomic_state_default_clear(&state->base);
+ state->dpll_set = false;
+}
diff --git a/drivers/gpu/drm/i915/intel_atomic_plane.c b/drivers/gpu/drm/i915/intel_atomic_plane.c
index 86ba4b2c3a65..f1ab8e4b9c11 100644
--- a/drivers/gpu/drm/i915/intel_atomic_plane.c
+++ b/drivers/gpu/drm/i915/intel_atomic_plane.c
@@ -56,6 +56,7 @@ intel_create_plane_state(struct drm_plane *plane)
state->base.plane = plane;
state->base.rotation = BIT(DRM_ROTATE_0);
+ state->ckey.flags = I915_SET_COLORKEY_NONE;
return state;
}
@@ -114,8 +115,10 @@ static int intel_plane_atomic_check(struct drm_plane *plane,
struct intel_crtc_state *crtc_state;
struct intel_plane *intel_plane = to_intel_plane(plane);
struct intel_plane_state *intel_state = to_intel_plane_state(state);
+ struct drm_crtc_state *drm_crtc_state;
+ int ret;
- crtc = crtc ? crtc : plane->crtc;
+ crtc = crtc ? crtc : plane->state->crtc;
intel_crtc = to_intel_crtc(crtc);
/*
@@ -127,16 +130,11 @@ static int intel_plane_atomic_check(struct drm_plane *plane,
if (!crtc)
return 0;
- /* FIXME: temporary hack necessary while we still use the plane update
- * helper. */
- if (state->state) {
- crtc_state =
- intel_atomic_get_crtc_state(state->state, intel_crtc);
- if (IS_ERR(crtc_state))
- return PTR_ERR(crtc_state);
- } else {
- crtc_state = intel_crtc->config;
- }
+ drm_crtc_state = drm_atomic_get_existing_crtc_state(state->state, crtc);
+ if (WARN_ON(!drm_crtc_state))
+ return -EINVAL;
+
+ crtc_state = to_intel_crtc_state(drm_crtc_state);
/*
* The original src/dest coordinates are stored in state->base, but
@@ -160,20 +158,6 @@ static int intel_plane_atomic_check(struct drm_plane *plane,
intel_state->clip.y2 =
crtc_state->base.active ? crtc_state->pipe_src_h : 0;
- /*
- * Disabling a plane is always okay; we just need to update
- * fb tracking in a special way since cleanup_fb() won't
- * get called by the plane helpers.
- */
- if (state->fb == NULL && plane->state->fb != NULL) {
- /*
- * 'prepare' is never called when plane is being disabled, so
- * we need to handle frontbuffer tracking as a special case
- */
- intel_crtc->atomic.disabled_planes |=
- (1 << drm_plane_index(plane));
- }
-
if (state->fb && intel_rotation_90_or_270(state->rotation)) {
if (!(state->fb->modifier[0] == I915_FORMAT_MOD_Y_TILED ||
state->fb->modifier[0] == I915_FORMAT_MOD_Yf_TILED)) {
@@ -198,7 +182,12 @@ static int intel_plane_atomic_check(struct drm_plane *plane,
}
}
- return intel_plane->check_plane(plane, intel_state);
+ intel_state->visible = false;
+ ret = intel_plane->check_plane(plane, crtc_state, intel_state);
+ if (ret)
+ return ret;
+
+ return intel_plane_atomic_calc_changes(&crtc_state->base, state);
}
static void intel_plane_atomic_update(struct drm_plane *plane,
diff --git a/drivers/gpu/drm/i915/intel_audio.c b/drivers/gpu/drm/i915/intel_audio.c
index 3da9b8409f20..dc32cf4585f8 100644
--- a/drivers/gpu/drm/i915/intel_audio.c
+++ b/drivers/gpu/drm/i915/intel_audio.c
@@ -41,7 +41,8 @@
*
* The disable sequences must be performed before disabling the transcoder or
* port. The enable sequences may only be performed after enabling the
- * transcoder and port, and after completed link training.
+ * transcoder and port, and after completed link training. Therefore the audio
+ * enable/disable sequences are part of the modeset sequence.
*
* The codec and controller sequences could be done either parallel or serial,
* but generally the ELDV/PD change in the codec sequence indicates to the audio
diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c
index 198fc3c3291b..8e46149bafdd 100644
--- a/drivers/gpu/drm/i915/intel_bios.c
+++ b/drivers/gpu/drm/i915/intel_bios.c
@@ -122,42 +122,6 @@ fill_detail_timing_data(struct drm_display_mode *panel_fixed_mode,
drm_mode_set_name(panel_fixed_mode);
}
-static bool
-lvds_dvo_timing_equal_size(const struct lvds_dvo_timing *a,
- const struct lvds_dvo_timing *b)
-{
- if (a->hactive_hi != b->hactive_hi ||
- a->hactive_lo != b->hactive_lo)
- return false;
-
- if (a->hsync_off_hi != b->hsync_off_hi ||
- a->hsync_off_lo != b->hsync_off_lo)
- return false;
-
- if (a->hsync_pulse_width != b->hsync_pulse_width)
- return false;
-
- if (a->hblank_hi != b->hblank_hi ||
- a->hblank_lo != b->hblank_lo)
- return false;
-
- if (a->vactive_hi != b->vactive_hi ||
- a->vactive_lo != b->vactive_lo)
- return false;
-
- if (a->vsync_off != b->vsync_off)
- return false;
-
- if (a->vsync_pulse_width != b->vsync_pulse_width)
- return false;
-
- if (a->vblank_hi != b->vblank_hi ||
- a->vblank_lo != b->vblank_lo)
- return false;
-
- return true;
-}
-
static const struct lvds_dvo_timing *
get_lvds_dvo_timing(const struct bdb_lvds_lfp_data *lvds_lfp_data,
const struct bdb_lvds_lfp_data_ptrs *lvds_lfp_data_ptrs,
@@ -213,7 +177,7 @@ parse_lfp_panel_data(struct drm_i915_private *dev_priv,
const struct lvds_dvo_timing *panel_dvo_timing;
const struct lvds_fp_timing *fp_timing;
struct drm_display_mode *panel_fixed_mode;
- int i, downclock, drrs_mode;
+ int drrs_mode;
lvds_options = find_section(bdb, BDB_LVDS_OPTIONS);
if (!lvds_options)
@@ -272,30 +236,6 @@ parse_lfp_panel_data(struct drm_i915_private *dev_priv,
DRM_DEBUG_KMS("Found panel mode in BIOS VBT tables:\n");
drm_mode_debug_printmodeline(panel_fixed_mode);
- /*
- * Iterate over the LVDS panel timing info to find the lowest clock
- * for the native resolution.
- */
- downclock = panel_dvo_timing->clock;
- for (i = 0; i < 16; i++) {
- const struct lvds_dvo_timing *dvo_timing;
-
- dvo_timing = get_lvds_dvo_timing(lvds_lfp_data,
- lvds_lfp_data_ptrs,
- i);
- if (lvds_dvo_timing_equal_size(dvo_timing, panel_dvo_timing) &&
- dvo_timing->clock < downclock)
- downclock = dvo_timing->clock;
- }
-
- if (downclock < panel_dvo_timing->clock && i915.lvds_downclock) {
- dev_priv->lvds_downclock_avail = 1;
- dev_priv->lvds_downclock = downclock * 10;
- DRM_DEBUG_KMS("LVDS downclock is found in VBT. "
- "Normal Clock %dKHz, downclock %dKHz\n",
- panel_fixed_mode->clock, 10*downclock);
- }
-
fp_timing = get_lvds_fp_timing(bdb, lvds_lfp_data,
lvds_lfp_data_ptrs,
lvds_options->panel_type);
@@ -946,6 +886,17 @@ err:
memset(dev_priv->vbt.dsi.sequence, 0, sizeof(dev_priv->vbt.dsi.sequence));
}
+static u8 translate_iboost(u8 val)
+{
+ static const u8 mapping[] = { 1, 3, 7 }; /* See VBT spec */
+
+ if (val >= ARRAY_SIZE(mapping)) {
+ DRM_DEBUG_KMS("Unsupported I_boost value found in VBT (%d), display may not work properly\n", val);
+ return 0;
+ }
+ return mapping[val];
+}
+
static void parse_ddi_port(struct drm_i915_private *dev_priv, enum port port,
const struct bdb_header *bdb)
{
@@ -1028,13 +979,28 @@ static void parse_ddi_port(struct drm_i915_private *dev_priv, enum port port,
}
if (is_dp) {
- if (aux_channel == 0x40 && port != PORT_A)
+ if (port == PORT_E) {
+ info->alternate_aux_channel = aux_channel;
+ /* if DDIE share aux channel with other port, then
+ * DP couldn't exist on the shared port. Otherwise
+ * they share the same aux channel and system
+ * couldn't communicate with them seperately. */
+ if (aux_channel == DP_AUX_A)
+ dev_priv->vbt.ddi_port_info[PORT_A].supports_dp = 0;
+ else if (aux_channel == DP_AUX_B)
+ dev_priv->vbt.ddi_port_info[PORT_B].supports_dp = 0;
+ else if (aux_channel == DP_AUX_C)
+ dev_priv->vbt.ddi_port_info[PORT_C].supports_dp = 0;
+ else if (aux_channel == DP_AUX_D)
+ dev_priv->vbt.ddi_port_info[PORT_D].supports_dp = 0;
+ }
+ else if (aux_channel == DP_AUX_A && port != PORT_A)
DRM_DEBUG_KMS("Unexpected AUX channel for port A\n");
- if (aux_channel == 0x10 && port != PORT_B)
+ else if (aux_channel == DP_AUX_B && port != PORT_B)
DRM_DEBUG_KMS("Unexpected AUX channel for port B\n");
- if (aux_channel == 0x20 && port != PORT_C)
+ else if (aux_channel == DP_AUX_C && port != PORT_C)
DRM_DEBUG_KMS("Unexpected AUX channel for port C\n");
- if (aux_channel == 0x30 && port != PORT_D)
+ else if (aux_channel == DP_AUX_D && port != PORT_D)
DRM_DEBUG_KMS("Unexpected AUX channel for port D\n");
}
@@ -1046,6 +1012,16 @@ static void parse_ddi_port(struct drm_i915_private *dev_priv, enum port port,
hdmi_level_shift);
info->hdmi_level_shift = hdmi_level_shift;
}
+
+ /* Parse the I_boost config for SKL and above */
+ if (bdb->version >= 196 && (child->common.flags_1 & IBOOST_ENABLE)) {
+ info->dp_boost_level = translate_iboost(child->common.iboost_level & 0xF);
+ DRM_DEBUG_KMS("VBT (e)DP boost level for port %c: %d\n",
+ port_name(port), info->dp_boost_level);
+ info->hdmi_boost_level = translate_iboost(child->common.iboost_level >> 4);
+ DRM_DEBUG_KMS("VBT HDMI boost level for port %c: %d\n",
+ port_name(port), info->hdmi_boost_level);
+ }
}
static void parse_ddi_ports(struct drm_i915_private *dev_priv,
@@ -1075,15 +1051,34 @@ parse_device_mapping(struct drm_i915_private *dev_priv,
const union child_device_config *p_child;
union child_device_config *child_dev_ptr;
int i, child_device_num, count;
- u16 block_size;
+ u8 expected_size;
+ u16 block_size;
p_defs = find_section(bdb, BDB_GENERAL_DEFINITIONS);
if (!p_defs) {
DRM_DEBUG_KMS("No general definition block is found, no devices defined.\n");
return;
}
- if (p_defs->child_dev_size < sizeof(*p_child)) {
- DRM_ERROR("General definiton block child device size is too small.\n");
+ if (bdb->version < 195) {
+ expected_size = 33;
+ } else if (bdb->version == 195) {
+ expected_size = 37;
+ } else if (bdb->version <= 197) {
+ expected_size = 38;
+ } else {
+ expected_size = 38;
+ DRM_DEBUG_DRIVER("Expected child_device_config size for BDB version %u not known; assuming %u\n",
+ expected_size, bdb->version);
+ }
+
+ if (expected_size > sizeof(*p_child)) {
+ DRM_ERROR("child_device_config cannot fit in p_child\n");
+ return;
+ }
+
+ if (p_defs->child_dev_size != expected_size) {
+ DRM_ERROR("Size mismatch; child_device_config size=%u (expected %u); bdb->version: %u\n",
+ p_defs->child_dev_size, expected_size, bdb->version);
return;
}
/* get the block size of general definitions */
@@ -1130,7 +1125,7 @@ parse_device_mapping(struct drm_i915_private *dev_priv,
child_dev_ptr = dev_priv->vbt.child_dev + count;
count++;
- memcpy(child_dev_ptr, p_child, sizeof(*p_child));
+ memcpy(child_dev_ptr, p_child, p_defs->child_dev_size);
}
return;
}
diff --git a/drivers/gpu/drm/i915/intel_bios.h b/drivers/gpu/drm/i915/intel_bios.h
index af0b47652752..6d909efbf43f 100644
--- a/drivers/gpu/drm/i915/intel_bios.h
+++ b/drivers/gpu/drm/i915/intel_bios.h
@@ -231,6 +231,10 @@ struct old_child_dev_config {
/* This one contains field offsets that are known to be common for all BDB
* versions. Notice that the meaning of the contents contents may still change,
* but at least the offsets are consistent. */
+
+/* Definitions for flags_1 */
+#define IBOOST_ENABLE (1<<3)
+
struct common_child_dev_config {
u16 handle;
u16 device_type;
@@ -239,8 +243,13 @@ struct common_child_dev_config {
u8 not_common2[2];
u8 ddc_pin;
u16 edid_ptr;
+ u8 obsolete;
+ u8 flags_1;
+ u8 not_common3[13];
+ u8 iboost_level;
} __packed;
+
/* This field changes depending on the BDB version, so the most reliable way to
* read it is by checking the BDB version and reading the raw pointer. */
union child_device_config {
@@ -778,6 +787,13 @@ int intel_parse_bios(struct drm_device *dev);
#define MIPI_DSI_UNDEFINED_PANEL_ID 0
#define MIPI_DSI_GENERIC_PANEL_ID 1
+/*
+ * PMIC vs SoC Backlight support specified in pwm_blc
+ * field in mipi_config block below.
+*/
+#define PPS_BLC_PMIC 0
+#define PPS_BLC_SOC 1
+
struct mipi_config {
u16 panel_id;
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c
index 521af2c069cb..af5e43bef4a4 100644
--- a/drivers/gpu/drm/i915/intel_crt.c
+++ b/drivers/gpu/drm/i915/intel_crt.c
@@ -236,53 +236,6 @@ static void intel_enable_crt(struct intel_encoder *encoder)
intel_crt_set_dpms(encoder, crt->connector->base.dpms);
}
-/* Special dpms function to support cloning between dvo/sdvo/crt. */
-static void intel_crt_dpms(struct drm_connector *connector, int mode)
-{
- struct drm_device *dev = connector->dev;
- struct intel_encoder *encoder = intel_attached_encoder(connector);
- struct drm_crtc *crtc;
- int old_dpms;
-
- /* PCH platforms and VLV only support on/off. */
- if (INTEL_INFO(dev)->gen >= 5 && mode != DRM_MODE_DPMS_ON)
- mode = DRM_MODE_DPMS_OFF;
-
- if (mode == connector->dpms)
- return;
-
- old_dpms = connector->dpms;
- connector->dpms = mode;
-
- /* Only need to change hw state when actually enabled */
- crtc = encoder->base.crtc;
- if (!crtc) {
- encoder->connectors_active = false;
- return;
- }
-
- /* We need the pipe to run for anything but OFF. */
- if (mode == DRM_MODE_DPMS_OFF)
- encoder->connectors_active = false;
- else
- encoder->connectors_active = true;
-
- /* We call connector dpms manually below in case pipe dpms doesn't
- * change due to cloning. */
- if (mode < old_dpms) {
- /* From off to on, enable the pipe first. */
- intel_crtc_update_dpms(crtc);
-
- intel_crt_set_dpms(encoder, mode);
- } else {
- intel_crt_set_dpms(encoder, mode);
-
- intel_crtc_update_dpms(crtc);
- }
-
- intel_modeset_check_state(connector->dev);
-}
-
static enum drm_mode_status
intel_crt_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
@@ -798,7 +751,7 @@ static void intel_crt_reset(struct drm_connector *connector)
static const struct drm_connector_funcs intel_crt_connector_funcs = {
.reset = intel_crt_reset,
- .dpms = intel_crt_dpms,
+ .dpms = drm_atomic_helper_connector_dpms,
.detect = intel_crt_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
.destroy = intel_crt_destroy,
diff --git a/drivers/gpu/drm/i915/intel_csr.c b/drivers/gpu/drm/i915/intel_csr.c
index bcb41e61877d..ba1ae031e6fd 100644
--- a/drivers/gpu/drm/i915/intel_csr.c
+++ b/drivers/gpu/drm/i915/intel_csr.c
@@ -244,7 +244,7 @@ void intel_csr_load_status_set(struct drm_i915_private *dev_priv,
void intel_csr_load_program(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- __be32 *payload = dev_priv->csr.dmc_payload;
+ u32 *payload = dev_priv->csr.dmc_payload;
uint32_t i, fw_size;
if (!IS_GEN9(dev)) {
@@ -256,7 +256,7 @@ void intel_csr_load_program(struct drm_device *dev)
fw_size = dev_priv->csr.dmc_fw_size;
for (i = 0; i < fw_size; i++)
I915_WRITE(CSR_PROGRAM_BASE + i * 4,
- (u32 __force)payload[i]);
+ payload[i]);
for (i = 0; i < dev_priv->csr.mmio_count; i++) {
I915_WRITE(dev_priv->csr.mmioaddr[i],
@@ -279,7 +279,7 @@ static void finish_csr_load(const struct firmware *fw, void *context)
char substepping = intel_get_substepping(dev);
uint32_t dmc_offset = CSR_DEFAULT_FW_OFFSET, readcount = 0, nbytes;
uint32_t i;
- __be32 *dmc_payload;
+ uint32_t *dmc_payload;
bool fw_loaded = false;
if (!fw) {
@@ -375,20 +375,13 @@ static void finish_csr_load(const struct firmware *fw, void *context)
}
dmc_payload = csr->dmc_payload;
- for (i = 0; i < dmc_header->fw_size; i++) {
- uint32_t *tmp = (u32 *)&fw->data[readcount + i * 4];
- /*
- * The firmware payload is an array of 32 bit words stored in
- * little-endian format in the firmware image and programmed
- * as 32 bit big-endian format to memory.
- */
- dmc_payload[i] = cpu_to_be32(*tmp);
- }
+ memcpy(dmc_payload, &fw->data[readcount], nbytes);
/* load csr program during system boot, as needed for DC states */
intel_csr_load_program(dev);
fw_loaded = true;
+ DRM_DEBUG_KMS("Finished loading %s\n", dev_priv->csr.fw_path);
out:
if (fw_loaded)
intel_runtime_pm_put(dev_priv);
@@ -422,6 +415,8 @@ void intel_csr_ucode_init(struct drm_device *dev)
return;
}
+ DRM_DEBUG_KMS("Loading %s\n", csr->fw_path);
+
/*
* Obtain a runtime pm reference, until CSR is loaded,
* to avoid entering runtime-suspend.
@@ -459,7 +454,8 @@ void intel_csr_ucode_fini(struct drm_device *dev)
void assert_csr_loaded(struct drm_i915_private *dev_priv)
{
- WARN((intel_csr_load_status_get(dev_priv) != FW_LOADED), "CSR is not loaded.\n");
+ WARN(intel_csr_load_status_get(dev_priv) != FW_LOADED,
+ "CSR is not loaded.\n");
WARN(!I915_READ(CSR_PROGRAM_BASE),
"CSR program storage start is NULL\n");
WARN(!I915_READ(CSR_SSP_BASE), "CSR SSP Base Not fine\n");
diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c
index cacb07b7a8f1..6cfe65d6a8cf 100644
--- a/drivers/gpu/drm/i915/intel_ddi.c
+++ b/drivers/gpu/drm/i915/intel_ddi.c
@@ -31,6 +31,7 @@
struct ddi_buf_trans {
u32 trans1; /* balance leg enable, de-emph level */
u32 trans2; /* vref sel, vswing */
+ u8 i_boost; /* SKL: I_boost; valid: 0x0, 0x1, 0x3, 0x7 */
};
/* HDMI/DVI modes ignore everything but the last 2 items. So we share
@@ -38,134 +39,213 @@ struct ddi_buf_trans {
* automatically adapt to HDMI connections as well
*/
static const struct ddi_buf_trans hsw_ddi_translations_dp[] = {
- { 0x00FFFFFF, 0x0006000E },
- { 0x00D75FFF, 0x0005000A },
- { 0x00C30FFF, 0x00040006 },
- { 0x80AAAFFF, 0x000B0000 },
- { 0x00FFFFFF, 0x0005000A },
- { 0x00D75FFF, 0x000C0004 },
- { 0x80C30FFF, 0x000B0000 },
- { 0x00FFFFFF, 0x00040006 },
- { 0x80D75FFF, 0x000B0000 },
+ { 0x00FFFFFF, 0x0006000E, 0x0 },
+ { 0x00D75FFF, 0x0005000A, 0x0 },
+ { 0x00C30FFF, 0x00040006, 0x0 },
+ { 0x80AAAFFF, 0x000B0000, 0x0 },
+ { 0x00FFFFFF, 0x0005000A, 0x0 },
+ { 0x00D75FFF, 0x000C0004, 0x0 },
+ { 0x80C30FFF, 0x000B0000, 0x0 },
+ { 0x00FFFFFF, 0x00040006, 0x0 },
+ { 0x80D75FFF, 0x000B0000, 0x0 },
};
static const struct ddi_buf_trans hsw_ddi_translations_fdi[] = {
- { 0x00FFFFFF, 0x0007000E },
- { 0x00D75FFF, 0x000F000A },
- { 0x00C30FFF, 0x00060006 },
- { 0x00AAAFFF, 0x001E0000 },
- { 0x00FFFFFF, 0x000F000A },
- { 0x00D75FFF, 0x00160004 },
- { 0x00C30FFF, 0x001E0000 },
- { 0x00FFFFFF, 0x00060006 },
- { 0x00D75FFF, 0x001E0000 },
+ { 0x00FFFFFF, 0x0007000E, 0x0 },
+ { 0x00D75FFF, 0x000F000A, 0x0 },
+ { 0x00C30FFF, 0x00060006, 0x0 },
+ { 0x00AAAFFF, 0x001E0000, 0x0 },
+ { 0x00FFFFFF, 0x000F000A, 0x0 },
+ { 0x00D75FFF, 0x00160004, 0x0 },
+ { 0x00C30FFF, 0x001E0000, 0x0 },
+ { 0x00FFFFFF, 0x00060006, 0x0 },
+ { 0x00D75FFF, 0x001E0000, 0x0 },
};
static const struct ddi_buf_trans hsw_ddi_translations_hdmi[] = {
/* Idx NT mV d T mV d db */
- { 0x00FFFFFF, 0x0006000E }, /* 0: 400 400 0 */
- { 0x00E79FFF, 0x000E000C }, /* 1: 400 500 2 */
- { 0x00D75FFF, 0x0005000A }, /* 2: 400 600 3.5 */
- { 0x00FFFFFF, 0x0005000A }, /* 3: 600 600 0 */
- { 0x00E79FFF, 0x001D0007 }, /* 4: 600 750 2 */
- { 0x00D75FFF, 0x000C0004 }, /* 5: 600 900 3.5 */
- { 0x00FFFFFF, 0x00040006 }, /* 6: 800 800 0 */
- { 0x80E79FFF, 0x00030002 }, /* 7: 800 1000 2 */
- { 0x00FFFFFF, 0x00140005 }, /* 8: 850 850 0 */
- { 0x00FFFFFF, 0x000C0004 }, /* 9: 900 900 0 */
- { 0x00FFFFFF, 0x001C0003 }, /* 10: 950 950 0 */
- { 0x80FFFFFF, 0x00030002 }, /* 11: 1000 1000 0 */
+ { 0x00FFFFFF, 0x0006000E, 0x0 },/* 0: 400 400 0 */
+ { 0x00E79FFF, 0x000E000C, 0x0 },/* 1: 400 500 2 */
+ { 0x00D75FFF, 0x0005000A, 0x0 },/* 2: 400 600 3.5 */
+ { 0x00FFFFFF, 0x0005000A, 0x0 },/* 3: 600 600 0 */
+ { 0x00E79FFF, 0x001D0007, 0x0 },/* 4: 600 750 2 */
+ { 0x00D75FFF, 0x000C0004, 0x0 },/* 5: 600 900 3.5 */
+ { 0x00FFFFFF, 0x00040006, 0x0 },/* 6: 800 800 0 */
+ { 0x80E79FFF, 0x00030002, 0x0 },/* 7: 800 1000 2 */
+ { 0x00FFFFFF, 0x00140005, 0x0 },/* 8: 850 850 0 */
+ { 0x00FFFFFF, 0x000C0004, 0x0 },/* 9: 900 900 0 */
+ { 0x00FFFFFF, 0x001C0003, 0x0 },/* 10: 950 950 0 */
+ { 0x80FFFFFF, 0x00030002, 0x0 },/* 11: 1000 1000 0 */
};
static const struct ddi_buf_trans bdw_ddi_translations_edp[] = {
- { 0x00FFFFFF, 0x00000012 },
- { 0x00EBAFFF, 0x00020011 },
- { 0x00C71FFF, 0x0006000F },
- { 0x00AAAFFF, 0x000E000A },
- { 0x00FFFFFF, 0x00020011 },
- { 0x00DB6FFF, 0x0005000F },
- { 0x00BEEFFF, 0x000A000C },
- { 0x00FFFFFF, 0x0005000F },
- { 0x00DB6FFF, 0x000A000C },
+ { 0x00FFFFFF, 0x00000012, 0x0 },
+ { 0x00EBAFFF, 0x00020011, 0x0 },
+ { 0x00C71FFF, 0x0006000F, 0x0 },
+ { 0x00AAAFFF, 0x000E000A, 0x0 },
+ { 0x00FFFFFF, 0x00020011, 0x0 },
+ { 0x00DB6FFF, 0x0005000F, 0x0 },
+ { 0x00BEEFFF, 0x000A000C, 0x0 },
+ { 0x00FFFFFF, 0x0005000F, 0x0 },
+ { 0x00DB6FFF, 0x000A000C, 0x0 },
};
static const struct ddi_buf_trans bdw_ddi_translations_dp[] = {
- { 0x00FFFFFF, 0x0007000E },
- { 0x00D75FFF, 0x000E000A },
- { 0x00BEFFFF, 0x00140006 },
- { 0x80B2CFFF, 0x001B0002 },
- { 0x00FFFFFF, 0x000E000A },
- { 0x00DB6FFF, 0x00160005 },
- { 0x80C71FFF, 0x001A0002 },
- { 0x00F7DFFF, 0x00180004 },
- { 0x80D75FFF, 0x001B0002 },
+ { 0x00FFFFFF, 0x0007000E, 0x0 },
+ { 0x00D75FFF, 0x000E000A, 0x0 },
+ { 0x00BEFFFF, 0x00140006, 0x0 },
+ { 0x80B2CFFF, 0x001B0002, 0x0 },
+ { 0x00FFFFFF, 0x000E000A, 0x0 },
+ { 0x00DB6FFF, 0x00160005, 0x0 },
+ { 0x80C71FFF, 0x001A0002, 0x0 },
+ { 0x00F7DFFF, 0x00180004, 0x0 },
+ { 0x80D75FFF, 0x001B0002, 0x0 },
};
static const struct ddi_buf_trans bdw_ddi_translations_fdi[] = {
- { 0x00FFFFFF, 0x0001000E },
- { 0x00D75FFF, 0x0004000A },
- { 0x00C30FFF, 0x00070006 },
- { 0x00AAAFFF, 0x000C0000 },
- { 0x00FFFFFF, 0x0004000A },
- { 0x00D75FFF, 0x00090004 },
- { 0x00C30FFF, 0x000C0000 },
- { 0x00FFFFFF, 0x00070006 },
- { 0x00D75FFF, 0x000C0000 },
+ { 0x00FFFFFF, 0x0001000E, 0x0 },
+ { 0x00D75FFF, 0x0004000A, 0x0 },
+ { 0x00C30FFF, 0x00070006, 0x0 },
+ { 0x00AAAFFF, 0x000C0000, 0x0 },
+ { 0x00FFFFFF, 0x0004000A, 0x0 },
+ { 0x00D75FFF, 0x00090004, 0x0 },
+ { 0x00C30FFF, 0x000C0000, 0x0 },
+ { 0x00FFFFFF, 0x00070006, 0x0 },
+ { 0x00D75FFF, 0x000C0000, 0x0 },
};
static const struct ddi_buf_trans bdw_ddi_translations_hdmi[] = {
/* Idx NT mV d T mV df db */
- { 0x00FFFFFF, 0x0007000E }, /* 0: 400 400 0 */
- { 0x00D75FFF, 0x000E000A }, /* 1: 400 600 3.5 */
- { 0x00BEFFFF, 0x00140006 }, /* 2: 400 800 6 */
- { 0x00FFFFFF, 0x0009000D }, /* 3: 450 450 0 */
- { 0x00FFFFFF, 0x000E000A }, /* 4: 600 600 0 */
- { 0x00D7FFFF, 0x00140006 }, /* 5: 600 800 2.5 */
- { 0x80CB2FFF, 0x001B0002 }, /* 6: 600 1000 4.5 */
- { 0x00FFFFFF, 0x00140006 }, /* 7: 800 800 0 */
- { 0x80E79FFF, 0x001B0002 }, /* 8: 800 1000 2 */
- { 0x80FFFFFF, 0x001B0002 }, /* 9: 1000 1000 0 */
+ { 0x00FFFFFF, 0x0007000E, 0x0 },/* 0: 400 400 0 */
+ { 0x00D75FFF, 0x000E000A, 0x0 },/* 1: 400 600 3.5 */
+ { 0x00BEFFFF, 0x00140006, 0x0 },/* 2: 400 800 6 */
+ { 0x00FFFFFF, 0x0009000D, 0x0 },/* 3: 450 450 0 */
+ { 0x00FFFFFF, 0x000E000A, 0x0 },/* 4: 600 600 0 */
+ { 0x00D7FFFF, 0x00140006, 0x0 },/* 5: 600 800 2.5 */
+ { 0x80CB2FFF, 0x001B0002, 0x0 },/* 6: 600 1000 4.5 */
+ { 0x00FFFFFF, 0x00140006, 0x0 },/* 7: 800 800 0 */
+ { 0x80E79FFF, 0x001B0002, 0x0 },/* 8: 800 1000 2 */
+ { 0x80FFFFFF, 0x001B0002, 0x0 },/* 9: 1000 1000 0 */
};
+/* Skylake H, S, and Skylake Y with 0.95V VccIO */
static const struct ddi_buf_trans skl_ddi_translations_dp[] = {
- { 0x00000018, 0x000000a2 },
- { 0x00004014, 0x0000009B },
- { 0x00006012, 0x00000088 },
- { 0x00008010, 0x00000087 },
- { 0x00000018, 0x0000009B },
- { 0x00004014, 0x00000088 },
- { 0x00006012, 0x00000087 },
- { 0x00000018, 0x00000088 },
- { 0x00004014, 0x00000087 },
+ { 0x00002016, 0x000000A0, 0x0 },
+ { 0x00005012, 0x0000009B, 0x0 },
+ { 0x00007011, 0x00000088, 0x0 },
+ { 0x00009010, 0x000000C7, 0x0 },
+ { 0x00002016, 0x0000009B, 0x0 },
+ { 0x00005012, 0x00000088, 0x0 },
+ { 0x00007011, 0x000000C7, 0x0 },
+ { 0x00002016, 0x000000DF, 0x0 },
+ { 0x00005012, 0x000000C7, 0x0 },
};
-/* eDP 1.4 low vswing translation parameters */
+/* Skylake U */
+static const struct ddi_buf_trans skl_u_ddi_translations_dp[] = {
+ { 0x00002016, 0x000000A2, 0x0 },
+ { 0x00005012, 0x00000088, 0x0 },
+ { 0x00007011, 0x00000087, 0x0 },
+ { 0x80009010, 0x000000C7, 0x1 }, /* Uses I_boost */
+ { 0x00002016, 0x0000009D, 0x0 },
+ { 0x00005012, 0x000000C7, 0x0 },
+ { 0x00007011, 0x000000C7, 0x0 },
+ { 0x00002016, 0x00000088, 0x0 },
+ { 0x00005012, 0x000000C7, 0x0 },
+};
+
+/* Skylake Y with 0.85V VccIO */
+static const struct ddi_buf_trans skl_y_085v_ddi_translations_dp[] = {
+ { 0x00000018, 0x000000A2, 0x0 },
+ { 0x00005012, 0x00000088, 0x0 },
+ { 0x00007011, 0x00000087, 0x0 },
+ { 0x80009010, 0x000000C7, 0x1 }, /* Uses I_boost */
+ { 0x00000018, 0x0000009D, 0x0 },
+ { 0x00005012, 0x000000C7, 0x0 },
+ { 0x00007011, 0x000000C7, 0x0 },
+ { 0x00000018, 0x00000088, 0x0 },
+ { 0x00005012, 0x000000C7, 0x0 },
+};
+
+/*
+ * Skylake H and S, and Skylake Y with 0.95V VccIO
+ * eDP 1.4 low vswing translation parameters
+ */
static const struct ddi_buf_trans skl_ddi_translations_edp[] = {
- { 0x00000018, 0x000000a8 },
- { 0x00002016, 0x000000ab },
- { 0x00006012, 0x000000a2 },
- { 0x00008010, 0x00000088 },
- { 0x00000018, 0x000000ab },
- { 0x00004014, 0x000000a2 },
- { 0x00006012, 0x000000a6 },
- { 0x00000018, 0x000000a2 },
- { 0x00005013, 0x0000009c },
- { 0x00000018, 0x00000088 },
+ { 0x00000018, 0x000000A8, 0x0 },
+ { 0x00004013, 0x000000A9, 0x0 },
+ { 0x00007011, 0x000000A2, 0x0 },
+ { 0x00009010, 0x0000009C, 0x0 },
+ { 0x00000018, 0x000000A9, 0x0 },
+ { 0x00006013, 0x000000A2, 0x0 },
+ { 0x00007011, 0x000000A6, 0x0 },
+ { 0x00000018, 0x000000AB, 0x0 },
+ { 0x00007013, 0x0000009F, 0x0 },
+ { 0x00000018, 0x000000DF, 0x0 },
};
+/*
+ * Skylake U
+ * eDP 1.4 low vswing translation parameters
+ */
+static const struct ddi_buf_trans skl_u_ddi_translations_edp[] = {
+ { 0x00000018, 0x000000A8, 0x0 },
+ { 0x00004013, 0x000000A9, 0x0 },
+ { 0x00007011, 0x000000A2, 0x0 },
+ { 0x00009010, 0x0000009C, 0x0 },
+ { 0x00000018, 0x000000A9, 0x0 },
+ { 0x00006013, 0x000000A2, 0x0 },
+ { 0x00007011, 0x000000A6, 0x0 },
+ { 0x00002016, 0x000000AB, 0x0 },
+ { 0x00005013, 0x0000009F, 0x0 },
+ { 0x00000018, 0x000000DF, 0x0 },
+};
+/*
+ * Skylake Y with 0.95V VccIO
+ * eDP 1.4 low vswing translation parameters
+ */
+static const struct ddi_buf_trans skl_y_085v_ddi_translations_edp[] = {
+ { 0x00000018, 0x000000A8, 0x0 },
+ { 0x00004013, 0x000000AB, 0x0 },
+ { 0x00007011, 0x000000A4, 0x0 },
+ { 0x00009010, 0x000000DF, 0x0 },
+ { 0x00000018, 0x000000AA, 0x0 },
+ { 0x00006013, 0x000000A4, 0x0 },
+ { 0x00007011, 0x0000009D, 0x0 },
+ { 0x00000018, 0x000000A0, 0x0 },
+ { 0x00006012, 0x000000DF, 0x0 },
+ { 0x00000018, 0x0000008A, 0x0 },
+};
+
+/* Skylake H, S and U, and Skylake Y with 0.95V VccIO */
static const struct ddi_buf_trans skl_ddi_translations_hdmi[] = {
- { 0x00000018, 0x000000ac },
- { 0x00005012, 0x0000009d },
- { 0x00007011, 0x00000088 },
- { 0x00000018, 0x000000a1 },
- { 0x00000018, 0x00000098 },
- { 0x00004013, 0x00000088 },
- { 0x00006012, 0x00000087 },
- { 0x00000018, 0x000000df },
- { 0x00003015, 0x00000087 },
- { 0x00003015, 0x000000c7 },
- { 0x00000018, 0x000000c7 },
+ { 0x00000018, 0x000000AC, 0x0 },
+ { 0x00005012, 0x0000009D, 0x0 },
+ { 0x00007011, 0x00000088, 0x0 },
+ { 0x00000018, 0x000000A1, 0x0 },
+ { 0x00000018, 0x00000098, 0x0 },
+ { 0x00004013, 0x00000088, 0x0 },
+ { 0x00006012, 0x00000087, 0x0 },
+ { 0x00000018, 0x000000DF, 0x0 },
+ { 0x00003015, 0x00000087, 0x0 }, /* Default */
+ { 0x00003015, 0x000000C7, 0x0 },
+ { 0x00000018, 0x000000C7, 0x0 },
+};
+
+/* Skylake Y with 0.85V VccIO */
+static const struct ddi_buf_trans skl_y_085v_ddi_translations_hdmi[] = {
+ { 0x00000018, 0x000000A1, 0x0 },
+ { 0x00005012, 0x000000DF, 0x0 },
+ { 0x00007011, 0x00000084, 0x0 },
+ { 0x00000018, 0x000000A4, 0x0 },
+ { 0x00000018, 0x0000009D, 0x0 },
+ { 0x00004013, 0x00000080, 0x0 },
+ { 0x00006013, 0x000000C7, 0x0 },
+ { 0x00000018, 0x0000008A, 0x0 },
+ { 0x00003015, 0x000000C7, 0x0 }, /* Default */
+ { 0x80003015, 0x000000C7, 0x7 }, /* Uses I_boost */
+ { 0x00000018, 0x000000C7, 0x0 },
};
struct bxt_ddi_buf_trans {
@@ -181,16 +261,16 @@ struct bxt_ddi_buf_trans {
*/
static const struct bxt_ddi_buf_trans bxt_ddi_translations_dp[] = {
/* Idx NT mV diff db */
- { 52, 0, 0, 128, true }, /* 0: 400 0 */
- { 78, 0, 0, 85, false }, /* 1: 400 3.5 */
- { 104, 0, 0, 64, false }, /* 2: 400 6 */
- { 154, 0, 0, 43, false }, /* 3: 400 9.5 */
- { 77, 0, 0, 128, false }, /* 4: 600 0 */
- { 116, 0, 0, 85, false }, /* 5: 600 3.5 */
- { 154, 0, 0, 64, false }, /* 6: 600 6 */
- { 102, 0, 0, 128, false }, /* 7: 800 0 */
- { 154, 0, 0, 85, false }, /* 8: 800 3.5 */
- { 154, 0x9A, 1, 128, false }, /* 9: 1200 0 */
+ { 52, 0x9A, 0, 128, true }, /* 0: 400 0 */
+ { 78, 0x9A, 0, 85, false }, /* 1: 400 3.5 */
+ { 104, 0x9A, 0, 64, false }, /* 2: 400 6 */
+ { 154, 0x9A, 0, 43, false }, /* 3: 400 9.5 */
+ { 77, 0x9A, 0, 128, false }, /* 4: 600 0 */
+ { 116, 0x9A, 0, 85, false }, /* 5: 600 3.5 */
+ { 154, 0x9A, 0, 64, false }, /* 6: 600 6 */
+ { 102, 0x9A, 0, 128, false }, /* 7: 800 0 */
+ { 154, 0x9A, 0, 85, false }, /* 8: 800 3.5 */
+ { 154, 0x9A, 1, 128, false }, /* 9: 1200 0 */
};
/* BSpec has 2 recommended values - entries 0 and 8.
@@ -198,18 +278,21 @@ static const struct bxt_ddi_buf_trans bxt_ddi_translations_dp[] = {
*/
static const struct bxt_ddi_buf_trans bxt_ddi_translations_hdmi[] = {
/* Idx NT mV diff db */
- { 52, 0, 0, 128, false }, /* 0: 400 0 */
- { 52, 0, 0, 85, false }, /* 1: 400 3.5 */
- { 52, 0, 0, 64, false }, /* 2: 400 6 */
- { 42, 0, 0, 43, false }, /* 3: 400 9.5 */
- { 77, 0, 0, 128, false }, /* 4: 600 0 */
- { 77, 0, 0, 85, false }, /* 5: 600 3.5 */
- { 77, 0, 0, 64, false }, /* 6: 600 6 */
- { 102, 0, 0, 128, false }, /* 7: 800 0 */
- { 102, 0, 0, 85, false }, /* 8: 800 3.5 */
+ { 52, 0x9A, 0, 128, false }, /* 0: 400 0 */
+ { 52, 0x9A, 0, 85, false }, /* 1: 400 3.5 */
+ { 52, 0x9A, 0, 64, false }, /* 2: 400 6 */
+ { 42, 0x9A, 0, 43, false }, /* 3: 400 9.5 */
+ { 77, 0x9A, 0, 128, false }, /* 4: 600 0 */
+ { 77, 0x9A, 0, 85, false }, /* 5: 600 3.5 */
+ { 77, 0x9A, 0, 64, false }, /* 6: 600 6 */
+ { 102, 0x9A, 0, 128, false }, /* 7: 800 0 */
+ { 102, 0x9A, 0, 85, false }, /* 8: 800 3.5 */
{ 154, 0x9A, 1, 128, true }, /* 9: 1200 0 */
};
+static void bxt_ddi_vswing_sequence(struct drm_device *dev, u32 level,
+ enum port port, int type);
+
static void ddi_get_encoder_port(struct intel_encoder *intel_encoder,
struct intel_digital_port **dig_port,
enum port *port)
@@ -249,6 +332,102 @@ intel_dig_port_supports_hdmi(const struct intel_digital_port *intel_dig_port)
return intel_dig_port->hdmi.hdmi_reg;
}
+static const struct ddi_buf_trans *skl_get_buf_trans_dp(struct drm_device *dev,
+ int *n_entries)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ const struct ddi_buf_trans *ddi_translations;
+ static int is_095v = -1;
+
+ if (is_095v == -1) {
+ u32 spr1 = I915_READ(UAIMI_SPR1);
+
+ is_095v = spr1 & SKL_VCCIO_MASK;
+ }
+
+ if (IS_SKL_ULX(dev) && !is_095v) {
+ ddi_translations = skl_y_085v_ddi_translations_dp;
+ *n_entries = ARRAY_SIZE(skl_y_085v_ddi_translations_dp);
+ } else if (IS_SKL_ULT(dev)) {
+ ddi_translations = skl_u_ddi_translations_dp;
+ *n_entries = ARRAY_SIZE(skl_u_ddi_translations_dp);
+ } else {
+ ddi_translations = skl_ddi_translations_dp;
+ *n_entries = ARRAY_SIZE(skl_ddi_translations_dp);
+ }
+
+ return ddi_translations;
+}
+
+static const struct ddi_buf_trans *skl_get_buf_trans_edp(struct drm_device *dev,
+ int *n_entries)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ const struct ddi_buf_trans *ddi_translations;
+ static int is_095v = -1;
+
+ if (is_095v == -1) {
+ u32 spr1 = I915_READ(UAIMI_SPR1);
+
+ is_095v = spr1 & SKL_VCCIO_MASK;
+ }
+
+ if (IS_SKL_ULX(dev) && !is_095v) {
+ if (dev_priv->edp_low_vswing) {
+ ddi_translations = skl_y_085v_ddi_translations_edp;
+ *n_entries =
+ ARRAY_SIZE(skl_y_085v_ddi_translations_edp);
+ } else {
+ ddi_translations = skl_y_085v_ddi_translations_dp;
+ *n_entries =
+ ARRAY_SIZE(skl_y_085v_ddi_translations_dp);
+ }
+ } else if (IS_SKL_ULT(dev)) {
+ if (dev_priv->edp_low_vswing) {
+ ddi_translations = skl_u_ddi_translations_edp;
+ *n_entries = ARRAY_SIZE(skl_u_ddi_translations_edp);
+ } else {
+ ddi_translations = skl_u_ddi_translations_dp;
+ *n_entries = ARRAY_SIZE(skl_u_ddi_translations_dp);
+ }
+ } else {
+ if (dev_priv->edp_low_vswing) {
+ ddi_translations = skl_ddi_translations_edp;
+ *n_entries = ARRAY_SIZE(skl_ddi_translations_edp);
+ } else {
+ ddi_translations = skl_ddi_translations_dp;
+ *n_entries = ARRAY_SIZE(skl_ddi_translations_dp);
+ }
+ }
+
+ return ddi_translations;
+}
+
+static const struct ddi_buf_trans *
+skl_get_buf_trans_hdmi(struct drm_device *dev,
+ int *n_entries)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ const struct ddi_buf_trans *ddi_translations;
+ static int is_095v = -1;
+
+ if (is_095v == -1) {
+ u32 spr1 = I915_READ(UAIMI_SPR1);
+
+ is_095v = spr1 & SKL_VCCIO_MASK;
+ }
+
+ if (IS_SKL_ULX(dev) && !is_095v) {
+ ddi_translations = skl_y_085v_ddi_translations_hdmi;
+ *n_entries = ARRAY_SIZE(skl_y_085v_ddi_translations_hdmi);
+ } else {
+ ddi_translations = skl_ddi_translations_hdmi;
+ *n_entries = ARRAY_SIZE(skl_ddi_translations_hdmi);
+ }
+
+ return ddi_translations;
+}
+
/*
* Starting with Haswell, DDI port buffers must be programmed with correct
* values in advance. The buffer values are different for FDI and DP modes,
@@ -261,6 +440,7 @@ static void intel_prepare_ddi_buffers(struct drm_device *dev, enum port port,
{
struct drm_i915_private *dev_priv = dev->dev_private;
u32 reg;
+ u32 iboost_bit = 0;
int i, n_hdmi_entries, n_dp_entries, n_edp_entries, hdmi_default_entry,
size;
int hdmi_level = dev_priv->vbt.ddi_port_info[port].hdmi_level_shift;
@@ -280,19 +460,17 @@ static void intel_prepare_ddi_buffers(struct drm_device *dev, enum port port,
return;
} else if (IS_SKYLAKE(dev)) {
ddi_translations_fdi = NULL;
- ddi_translations_dp = skl_ddi_translations_dp;
- n_dp_entries = ARRAY_SIZE(skl_ddi_translations_dp);
- if (dev_priv->edp_low_vswing) {
- ddi_translations_edp = skl_ddi_translations_edp;
- n_edp_entries = ARRAY_SIZE(skl_ddi_translations_edp);
- } else {
- ddi_translations_edp = skl_ddi_translations_dp;
- n_edp_entries = ARRAY_SIZE(skl_ddi_translations_dp);
- }
-
- ddi_translations_hdmi = skl_ddi_translations_hdmi;
- n_hdmi_entries = ARRAY_SIZE(skl_ddi_translations_hdmi);
- hdmi_default_entry = 7;
+ ddi_translations_dp =
+ skl_get_buf_trans_dp(dev, &n_dp_entries);
+ ddi_translations_edp =
+ skl_get_buf_trans_edp(dev, &n_edp_entries);
+ ddi_translations_hdmi =
+ skl_get_buf_trans_hdmi(dev, &n_hdmi_entries);
+ hdmi_default_entry = 8;
+ /* If we're boosting the current, set bit 31 of trans1 */
+ if (dev_priv->vbt.ddi_port_info[port].hdmi_boost_level ||
+ dev_priv->vbt.ddi_port_info[port].dp_boost_level)
+ iboost_bit = 1<<31;
} else if (IS_BROADWELL(dev)) {
ddi_translations_fdi = bdw_ddi_translations_fdi;
ddi_translations_dp = bdw_ddi_translations_dp;
@@ -353,7 +531,7 @@ static void intel_prepare_ddi_buffers(struct drm_device *dev, enum port port,
}
for (i = 0, reg = DDI_BUF_TRANS(port); i < size; i++) {
- I915_WRITE(reg, ddi_translations[i].trans1);
+ I915_WRITE(reg, ddi_translations[i].trans1 | iboost_bit);
reg += 4;
I915_WRITE(reg, ddi_translations[i].trans2);
reg += 4;
@@ -368,7 +546,7 @@ static void intel_prepare_ddi_buffers(struct drm_device *dev, enum port port,
hdmi_level = hdmi_default_entry;
/* Entry 9 is for HDMI: */
- I915_WRITE(reg, ddi_translations_hdmi[hdmi_level].trans1);
+ I915_WRITE(reg, ddi_translations_hdmi[hdmi_level].trans1 | iboost_bit);
reg += 4;
I915_WRITE(reg, ddi_translations_hdmi[hdmi_level].trans2);
reg += 4;
@@ -625,11 +803,11 @@ intel_ddi_get_crtc_new_encoder(struct intel_crtc_state *crtc_state)
(void) (&__a == &__b); \
__a > __b ? (__a - __b) : (__b - __a); })
-struct wrpll_rnp {
+struct hsw_wrpll_rnp {
unsigned p, n2, r2;
};
-static unsigned wrpll_get_budget_for_freq(int clock)
+static unsigned hsw_wrpll_get_budget_for_freq(int clock)
{
unsigned budget;
@@ -703,9 +881,9 @@ static unsigned wrpll_get_budget_for_freq(int clock)
return budget;
}
-static void wrpll_update_rnp(uint64_t freq2k, unsigned budget,
- unsigned r2, unsigned n2, unsigned p,
- struct wrpll_rnp *best)
+static void hsw_wrpll_update_rnp(uint64_t freq2k, unsigned budget,
+ unsigned r2, unsigned n2, unsigned p,
+ struct hsw_wrpll_rnp *best)
{
uint64_t a, b, c, d, diff, diff_best;
@@ -762,8 +940,7 @@ static void wrpll_update_rnp(uint64_t freq2k, unsigned budget,
/* Otherwise a < c && b >= d, do nothing */
}
-static int intel_ddi_calc_wrpll_link(struct drm_i915_private *dev_priv,
- int reg)
+static int hsw_ddi_calc_wrpll_link(struct drm_i915_private *dev_priv, int reg)
{
int refclk = LC_FREQ;
int n, p, r;
@@ -856,6 +1033,26 @@ static int skl_calc_wrpll_link(struct drm_i915_private *dev_priv,
return dco_freq / (p0 * p1 * p2 * 5);
}
+static void ddi_dotclock_get(struct intel_crtc_state *pipe_config)
+{
+ int dotclock;
+
+ if (pipe_config->has_pch_encoder)
+ dotclock = intel_dotclock_calculate(pipe_config->port_clock,
+ &pipe_config->fdi_m_n);
+ else if (pipe_config->has_dp_encoder)
+ dotclock = intel_dotclock_calculate(pipe_config->port_clock,
+ &pipe_config->dp_m_n);
+ else if (pipe_config->has_hdmi_sink && pipe_config->pipe_bpp == 36)
+ dotclock = pipe_config->port_clock * 2 / 3;
+ else
+ dotclock = pipe_config->port_clock;
+
+ if (pipe_config->pixel_multiplier)
+ dotclock /= pipe_config->pixel_multiplier;
+
+ pipe_config->base.adjusted_mode.crtc_clock = dotclock;
+}
static void skl_ddi_clock_get(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config)
@@ -902,12 +1099,7 @@ static void skl_ddi_clock_get(struct intel_encoder *encoder,
pipe_config->port_clock = link_clock;
- if (pipe_config->has_dp_encoder)
- pipe_config->base.adjusted_mode.crtc_clock =
- intel_dotclock_calculate(pipe_config->port_clock,
- &pipe_config->dp_m_n);
- else
- pipe_config->base.adjusted_mode.crtc_clock = pipe_config->port_clock;
+ ddi_dotclock_get(pipe_config);
}
static void hsw_ddi_clock_get(struct intel_encoder *encoder,
@@ -929,10 +1121,10 @@ static void hsw_ddi_clock_get(struct intel_encoder *encoder,
link_clock = 270000;
break;
case PORT_CLK_SEL_WRPLL1:
- link_clock = intel_ddi_calc_wrpll_link(dev_priv, WRPLL_CTL1);
+ link_clock = hsw_ddi_calc_wrpll_link(dev_priv, WRPLL_CTL1);
break;
case PORT_CLK_SEL_WRPLL2:
- link_clock = intel_ddi_calc_wrpll_link(dev_priv, WRPLL_CTL2);
+ link_clock = hsw_ddi_calc_wrpll_link(dev_priv, WRPLL_CTL2);
break;
case PORT_CLK_SEL_SPLL:
pll = I915_READ(SPLL_CTL) & SPLL_PLL_FREQ_MASK;
@@ -954,23 +1146,32 @@ static void hsw_ddi_clock_get(struct intel_encoder *encoder,
pipe_config->port_clock = link_clock * 2;
- if (pipe_config->has_pch_encoder)
- pipe_config->base.adjusted_mode.crtc_clock =
- intel_dotclock_calculate(pipe_config->port_clock,
- &pipe_config->fdi_m_n);
- else if (pipe_config->has_dp_encoder)
- pipe_config->base.adjusted_mode.crtc_clock =
- intel_dotclock_calculate(pipe_config->port_clock,
- &pipe_config->dp_m_n);
- else
- pipe_config->base.adjusted_mode.crtc_clock = pipe_config->port_clock;
+ ddi_dotclock_get(pipe_config);
}
static int bxt_calc_pll_link(struct drm_i915_private *dev_priv,
enum intel_dpll_id dpll)
{
- /* FIXME formula not available in bspec */
- return 0;
+ struct intel_shared_dpll *pll;
+ struct intel_dpll_hw_state *state;
+ intel_clock_t clock;
+
+ /* For DDI ports we always use a shared PLL. */
+ if (WARN_ON(dpll == DPLL_ID_PRIVATE))
+ return 0;
+
+ pll = &dev_priv->shared_dplls[dpll];
+ state = &pll->config.hw_state;
+
+ clock.m1 = 2;
+ clock.m2 = (state->pll0 & PORT_PLL_M2_MASK) << 22;
+ if (state->pll3 & PORT_PLL_M2_FRAC_ENABLE)
+ clock.m2 |= state->pll2 & PORT_PLL_M2_FRAC_MASK;
+ clock.n = (state->pll1 & PORT_PLL_N_MASK) >> PORT_PLL_N_SHIFT;
+ clock.p1 = (state->ebb0 & PORT_PLL_P1_MASK) >> PORT_PLL_P1_SHIFT;
+ clock.p2 = (state->ebb0 & PORT_PLL_P2_MASK) >> PORT_PLL_P2_SHIFT;
+
+ return chv_calc_dpll_params(100000, &clock);
}
static void bxt_ddi_clock_get(struct intel_encoder *encoder,
@@ -980,16 +1181,9 @@ static void bxt_ddi_clock_get(struct intel_encoder *encoder,
enum port port = intel_ddi_get_encoder_port(encoder);
uint32_t dpll = port;
- pipe_config->port_clock =
- bxt_calc_pll_link(dev_priv, dpll);
+ pipe_config->port_clock = bxt_calc_pll_link(dev_priv, dpll);
- if (pipe_config->has_dp_encoder)
- pipe_config->base.adjusted_mode.crtc_clock =
- intel_dotclock_calculate(pipe_config->port_clock,
- &pipe_config->dp_m_n);
- else
- pipe_config->base.adjusted_mode.crtc_clock =
- pipe_config->port_clock;
+ ddi_dotclock_get(pipe_config);
}
void intel_ddi_clock_get(struct intel_encoder *encoder,
@@ -1011,12 +1205,12 @@ hsw_ddi_calculate_wrpll(int clock /* in Hz */,
{
uint64_t freq2k;
unsigned p, n2, r2;
- struct wrpll_rnp best = { 0, 0, 0 };
+ struct hsw_wrpll_rnp best = { 0, 0, 0 };
unsigned budget;
freq2k = clock / 100;
- budget = wrpll_get_budget_for_freq(clock);
+ budget = hsw_wrpll_get_budget_for_freq(clock);
/* Special case handling for 540 pixel clock: bypass WR PLL entirely
* and directly pass the LC PLL to it. */
@@ -1060,8 +1254,8 @@ hsw_ddi_calculate_wrpll(int clock /* in Hz */,
n2++) {
for (p = P_MIN; p <= P_MAX; p += P_INC)
- wrpll_update_rnp(freq2k, budget,
- r2, n2, p, &best);
+ hsw_wrpll_update_rnp(freq2k, budget,
+ r2, n2, p, &best);
}
}
@@ -1105,6 +1299,102 @@ hsw_ddi_pll_select(struct intel_crtc *intel_crtc,
return true;
}
+struct skl_wrpll_context {
+ uint64_t min_deviation; /* current minimal deviation */
+ uint64_t central_freq; /* chosen central freq */
+ uint64_t dco_freq; /* chosen dco freq */
+ unsigned int p; /* chosen divider */
+};
+
+static void skl_wrpll_context_init(struct skl_wrpll_context *ctx)
+{
+ memset(ctx, 0, sizeof(*ctx));
+
+ ctx->min_deviation = U64_MAX;
+}
+
+/* DCO freq must be within +1%/-6% of the DCO central freq */
+#define SKL_DCO_MAX_PDEVIATION 100
+#define SKL_DCO_MAX_NDEVIATION 600
+
+static void skl_wrpll_try_divider(struct skl_wrpll_context *ctx,
+ uint64_t central_freq,
+ uint64_t dco_freq,
+ unsigned int divider)
+{
+ uint64_t deviation;
+
+ deviation = div64_u64(10000 * abs_diff(dco_freq, central_freq),
+ central_freq);
+
+ /* positive deviation */
+ if (dco_freq >= central_freq) {
+ if (deviation < SKL_DCO_MAX_PDEVIATION &&
+ deviation < ctx->min_deviation) {
+ ctx->min_deviation = deviation;
+ ctx->central_freq = central_freq;
+ ctx->dco_freq = dco_freq;
+ ctx->p = divider;
+ }
+ /* negative deviation */
+ } else if (deviation < SKL_DCO_MAX_NDEVIATION &&
+ deviation < ctx->min_deviation) {
+ ctx->min_deviation = deviation;
+ ctx->central_freq = central_freq;
+ ctx->dco_freq = dco_freq;
+ ctx->p = divider;
+ }
+}
+
+static void skl_wrpll_get_multipliers(unsigned int p,
+ unsigned int *p0 /* out */,
+ unsigned int *p1 /* out */,
+ unsigned int *p2 /* out */)
+{
+ /* even dividers */
+ if (p % 2 == 0) {
+ unsigned int half = p / 2;
+
+ if (half == 1 || half == 2 || half == 3 || half == 5) {
+ *p0 = 2;
+ *p1 = 1;
+ *p2 = half;
+ } else if (half % 2 == 0) {
+ *p0 = 2;
+ *p1 = half / 2;
+ *p2 = 2;
+ } else if (half % 3 == 0) {
+ *p0 = 3;
+ *p1 = half / 3;
+ *p2 = 2;
+ } else if (half % 7 == 0) {
+ *p0 = 7;
+ *p1 = half / 7;
+ *p2 = 2;
+ }
+ } else if (p == 3 || p == 9) { /* 3, 5, 7, 9, 15, 21, 35 */
+ *p0 = 3;
+ *p1 = 1;
+ *p2 = p / 3;
+ } else if (p == 5 || p == 7) {
+ *p0 = p;
+ *p1 = 1;
+ *p2 = 1;
+ } else if (p == 15) {
+ *p0 = 3;
+ *p1 = 1;
+ *p2 = 5;
+ } else if (p == 21) {
+ *p0 = 7;
+ *p1 = 1;
+ *p2 = 3;
+ } else if (p == 35) {
+ *p0 = 7;
+ *p1 = 1;
+ *p2 = 5;
+ }
+}
+
struct skl_wrpll_params {
uint32_t dco_fraction;
uint32_t dco_integer;
@@ -1115,150 +1405,145 @@ struct skl_wrpll_params {
uint32_t central_freq;
};
-static void
-skl_ddi_calculate_wrpll(int clock /* in Hz */,
- struct skl_wrpll_params *wrpll_params)
+static void skl_wrpll_params_populate(struct skl_wrpll_params *params,
+ uint64_t afe_clock,
+ uint64_t central_freq,
+ uint32_t p0, uint32_t p1, uint32_t p2)
{
- uint64_t afe_clock = clock * 5; /* AFE Clock is 5x Pixel clock */
- uint64_t dco_central_freq[3] = {8400000000ULL,
- 9000000000ULL,
- 9600000000ULL};
- uint32_t min_dco_deviation = 400;
- uint32_t min_dco_index = 3;
- uint32_t P0[4] = {1, 2, 3, 7};
- uint32_t P2[4] = {1, 2, 3, 5};
- bool found = false;
- uint32_t candidate_p = 0;
- uint32_t candidate_p0[3] = {0}, candidate_p1[3] = {0};
- uint32_t candidate_p2[3] = {0};
- uint32_t dco_central_freq_deviation[3];
- uint32_t i, P1, k, dco_count;
- bool retry_with_odd = false;
uint64_t dco_freq;
- /* Determine P0, P1 or P2 */
- for (dco_count = 0; dco_count < 3; dco_count++) {
- found = false;
- candidate_p =
- div64_u64(dco_central_freq[dco_count], afe_clock);
- if (retry_with_odd == false)
- candidate_p = (candidate_p % 2 == 0 ?
- candidate_p : candidate_p + 1);
-
- for (P1 = 1; P1 < candidate_p; P1++) {
- for (i = 0; i < 4; i++) {
- if (!(P0[i] != 1 || P1 == 1))
- continue;
-
- for (k = 0; k < 4; k++) {
- if (P1 != 1 && P2[k] != 2)
- continue;
-
- if (candidate_p == P0[i] * P1 * P2[k]) {
- /* Found possible P0, P1, P2 */
- found = true;
- candidate_p0[dco_count] = P0[i];
- candidate_p1[dco_count] = P1;
- candidate_p2[dco_count] = P2[k];
- goto found;
- }
-
- }
- }
- }
+ switch (central_freq) {
+ case 9600000000ULL:
+ params->central_freq = 0;
+ break;
+ case 9000000000ULL:
+ params->central_freq = 1;
+ break;
+ case 8400000000ULL:
+ params->central_freq = 3;
+ }
-found:
- if (found) {
- dco_central_freq_deviation[dco_count] =
- div64_u64(10000 *
- abs_diff((candidate_p * afe_clock),
- dco_central_freq[dco_count]),
- dco_central_freq[dco_count]);
-
- if (dco_central_freq_deviation[dco_count] <
- min_dco_deviation) {
- min_dco_deviation =
- dco_central_freq_deviation[dco_count];
- min_dco_index = dco_count;
- }
- }
+ switch (p0) {
+ case 1:
+ params->pdiv = 0;
+ break;
+ case 2:
+ params->pdiv = 1;
+ break;
+ case 3:
+ params->pdiv = 2;
+ break;
+ case 7:
+ params->pdiv = 4;
+ break;
+ default:
+ WARN(1, "Incorrect PDiv\n");
+ }
- if (min_dco_index > 2 && dco_count == 2) {
- retry_with_odd = true;
- dco_count = 0;
- }
+ switch (p2) {
+ case 5:
+ params->kdiv = 0;
+ break;
+ case 2:
+ params->kdiv = 1;
+ break;
+ case 3:
+ params->kdiv = 2;
+ break;
+ case 1:
+ params->kdiv = 3;
+ break;
+ default:
+ WARN(1, "Incorrect KDiv\n");
}
- if (min_dco_index > 2) {
- WARN(1, "No valid values found for the given pixel clock\n");
- } else {
- wrpll_params->central_freq = dco_central_freq[min_dco_index];
+ params->qdiv_ratio = p1;
+ params->qdiv_mode = (params->qdiv_ratio == 1) ? 0 : 1;
- switch (dco_central_freq[min_dco_index]) {
- case 9600000000ULL:
- wrpll_params->central_freq = 0;
- break;
- case 9000000000ULL:
- wrpll_params->central_freq = 1;
- break;
- case 8400000000ULL:
- wrpll_params->central_freq = 3;
- }
+ dco_freq = p0 * p1 * p2 * afe_clock;
- switch (candidate_p0[min_dco_index]) {
- case 1:
- wrpll_params->pdiv = 0;
- break;
- case 2:
- wrpll_params->pdiv = 1;
- break;
- case 3:
- wrpll_params->pdiv = 2;
- break;
- case 7:
- wrpll_params->pdiv = 4;
- break;
- default:
- WARN(1, "Incorrect PDiv\n");
- }
+ /*
+ * Intermediate values are in Hz.
+ * Divide by MHz to match bsepc
+ */
+ params->dco_integer = div_u64(dco_freq, 24 * MHz(1));
+ params->dco_fraction =
+ div_u64((div_u64(dco_freq, 24) -
+ params->dco_integer * MHz(1)) * 0x8000, MHz(1));
+}
- switch (candidate_p2[min_dco_index]) {
- case 5:
- wrpll_params->kdiv = 0;
- break;
- case 2:
- wrpll_params->kdiv = 1;
- break;
- case 3:
- wrpll_params->kdiv = 2;
- break;
- case 1:
- wrpll_params->kdiv = 3;
- break;
- default:
- WARN(1, "Incorrect KDiv\n");
+static bool
+skl_ddi_calculate_wrpll(int clock /* in Hz */,
+ struct skl_wrpll_params *wrpll_params)
+{
+ uint64_t afe_clock = clock * 5; /* AFE Clock is 5x Pixel clock */
+ uint64_t dco_central_freq[3] = {8400000000ULL,
+ 9000000000ULL,
+ 9600000000ULL};
+ static const int even_dividers[] = { 4, 6, 8, 10, 12, 14, 16, 18, 20,
+ 24, 28, 30, 32, 36, 40, 42, 44,
+ 48, 52, 54, 56, 60, 64, 66, 68,
+ 70, 72, 76, 78, 80, 84, 88, 90,
+ 92, 96, 98 };
+ static const int odd_dividers[] = { 3, 5, 7, 9, 15, 21, 35 };
+ static const struct {
+ const int *list;
+ int n_dividers;
+ } dividers[] = {
+ { even_dividers, ARRAY_SIZE(even_dividers) },
+ { odd_dividers, ARRAY_SIZE(odd_dividers) },
+ };
+ struct skl_wrpll_context ctx;
+ unsigned int dco, d, i;
+ unsigned int p0, p1, p2;
+
+ skl_wrpll_context_init(&ctx);
+
+ for (d = 0; d < ARRAY_SIZE(dividers); d++) {
+ for (dco = 0; dco < ARRAY_SIZE(dco_central_freq); dco++) {
+ for (i = 0; i < dividers[d].n_dividers; i++) {
+ unsigned int p = dividers[d].list[i];
+ uint64_t dco_freq = p * afe_clock;
+
+ skl_wrpll_try_divider(&ctx,
+ dco_central_freq[dco],
+ dco_freq,
+ p);
+ /*
+ * Skip the remaining dividers if we're sure to
+ * have found the definitive divider, we can't
+ * improve a 0 deviation.
+ */
+ if (ctx.min_deviation == 0)
+ goto skip_remaining_dividers;
+ }
}
- wrpll_params->qdiv_ratio = candidate_p1[min_dco_index];
- wrpll_params->qdiv_mode =
- (wrpll_params->qdiv_ratio == 1) ? 0 : 1;
-
- dco_freq = candidate_p0[min_dco_index] *
- candidate_p1[min_dco_index] *
- candidate_p2[min_dco_index] * afe_clock;
-
+skip_remaining_dividers:
/*
- * Intermediate values are in Hz.
- * Divide by MHz to match bsepc
+ * If a solution is found with an even divider, prefer
+ * this one.
*/
- wrpll_params->dco_integer = div_u64(dco_freq, (24 * MHz(1)));
- wrpll_params->dco_fraction =
- div_u64(((div_u64(dco_freq, 24) -
- wrpll_params->dco_integer * MHz(1)) * 0x8000), MHz(1));
+ if (d == 0 && ctx.p)
+ break;
+ }
+ if (!ctx.p) {
+ DRM_DEBUG_DRIVER("No valid divider found for %dHz\n", clock);
+ return false;
}
-}
+ /*
+ * gcc incorrectly analyses that these can be used without being
+ * initialized. To be fair, it's hard to guess.
+ */
+ p0 = p1 = p2 = 0;
+ skl_wrpll_get_multipliers(ctx.p, &p0, &p1, &p2);
+ skl_wrpll_params_populate(wrpll_params, afe_clock, ctx.central_freq,
+ p0, p1, p2);
+
+ return true;
+}
static bool
skl_ddi_pll_select(struct intel_crtc *intel_crtc,
@@ -1281,7 +1566,8 @@ skl_ddi_pll_select(struct intel_crtc *intel_crtc,
ctrl1 |= DPLL_CTRL1_HDMI_MODE(0);
- skl_ddi_calculate_wrpll(clock * 1000, &wrpll_params);
+ if (!skl_ddi_calculate_wrpll(clock * 1000, &wrpll_params))
+ return false;
cfgcr1 = DPLL_CFGCR1_FREQ_ENABLE |
DPLL_CFGCR1_DCO_FRACTION(wrpll_params.dco_fraction) |
@@ -1334,6 +1620,7 @@ skl_ddi_pll_select(struct intel_crtc *intel_crtc,
/* bxt clock parameters */
struct bxt_clk_div {
+ int clock;
uint32_t p1;
uint32_t p2;
uint32_t m2_int;
@@ -1343,14 +1630,14 @@ struct bxt_clk_div {
};
/* pre-calculated values for DP linkrates */
-static struct bxt_clk_div bxt_dp_clk_val[7] = {
- /* 162 */ {4, 2, 32, 1677722, 1, 1},
- /* 270 */ {4, 1, 27, 0, 0, 1},
- /* 540 */ {2, 1, 27, 0, 0, 1},
- /* 216 */ {3, 2, 32, 1677722, 1, 1},
- /* 243 */ {4, 1, 24, 1258291, 1, 1},
- /* 324 */ {4, 1, 32, 1677722, 1, 1},
- /* 432 */ {3, 1, 32, 1677722, 1, 1}
+static const struct bxt_clk_div bxt_dp_clk_val[] = {
+ {162000, 4, 2, 32, 1677722, 1, 1},
+ {270000, 4, 1, 27, 0, 0, 1},
+ {540000, 2, 1, 27, 0, 0, 1},
+ {216000, 3, 2, 32, 1677722, 1, 1},
+ {243000, 4, 1, 24, 1258291, 1, 1},
+ {324000, 4, 1, 32, 1677722, 1, 1},
+ {432000, 3, 1, 32, 1677722, 1, 1}
};
static bool
@@ -1363,7 +1650,7 @@ bxt_ddi_pll_select(struct intel_crtc *intel_crtc,
struct bxt_clk_div clk_div = {0};
int vco = 0;
uint32_t prop_coef, int_coef, gain_ctl, targ_cnt;
- uint32_t dcoampovr_en_h, dco_amp, lanestagger;
+ uint32_t lanestagger;
if (intel_encoder->type == INTEL_OUTPUT_HDMI) {
intel_clock_t best_clock;
@@ -1390,29 +1677,19 @@ bxt_ddi_pll_select(struct intel_crtc *intel_crtc,
vco = best_clock.vco;
} else if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT ||
intel_encoder->type == INTEL_OUTPUT_EDP) {
- struct drm_encoder *encoder = &intel_encoder->base;
- struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+ int i;
- switch (intel_dp->link_bw) {
- case DP_LINK_BW_1_62:
- clk_div = bxt_dp_clk_val[0];
- break;
- case DP_LINK_BW_2_7:
- clk_div = bxt_dp_clk_val[1];
- break;
- case DP_LINK_BW_5_4:
- clk_div = bxt_dp_clk_val[2];
- break;
- default:
- clk_div = bxt_dp_clk_val[0];
- DRM_ERROR("Unknown link rate\n");
+ clk_div = bxt_dp_clk_val[0];
+ for (i = 0; i < ARRAY_SIZE(bxt_dp_clk_val); ++i) {
+ if (bxt_dp_clk_val[i].clock == clock) {
+ clk_div = bxt_dp_clk_val[i];
+ break;
+ }
}
vco = clock * 10 / 2 * clk_div.p1 * clk_div.p2;
}
- dco_amp = 15;
- dcoampovr_en_h = 0;
- if (vco >= 6200000 && vco <= 6480000) {
+ if (vco >= 6200000 && vco <= 6700000) {
prop_coef = 4;
int_coef = 9;
gain_ctl = 3;
@@ -1423,8 +1700,6 @@ bxt_ddi_pll_select(struct intel_crtc *intel_crtc,
int_coef = 11;
gain_ctl = 3;
targ_cnt = 9;
- if (vco >= 4800000 && vco < 5400000)
- dcoampovr_en_h = 1;
} else if (vco == 5400000) {
prop_coef = 3;
int_coef = 8;
@@ -1466,10 +1741,13 @@ bxt_ddi_pll_select(struct intel_crtc *intel_crtc,
crtc_state->dpll_hw_state.pll8 = targ_cnt;
- if (dcoampovr_en_h)
- crtc_state->dpll_hw_state.pll10 = PORT_PLL_DCO_AMP_OVR_EN_H;
+ crtc_state->dpll_hw_state.pll9 = 5 << PORT_PLL_LOCK_THRESHOLD_SHIFT;
- crtc_state->dpll_hw_state.pll10 |= PORT_PLL_DCO_AMP(dco_amp);
+ crtc_state->dpll_hw_state.pll10 =
+ PORT_PLL_DCO_AMP(PORT_PLL_DCO_AMP_DEFAULT)
+ | PORT_PLL_DCO_AMP_OVR_EN_H;
+
+ crtc_state->dpll_hw_state.ebb4 = PORT_PLL_10BIT_CLK_ENABLE;
crtc_state->dpll_hw_state.pcsdw12 =
LANESTAGGER_STRAP_OVRD | lanestagger;
@@ -1799,8 +2077,65 @@ void intel_ddi_disable_pipe_clock(struct intel_crtc *intel_crtc)
TRANS_CLK_SEL_DISABLED);
}
-void bxt_ddi_vswing_sequence(struct drm_device *dev, u32 level,
- enum port port, int type)
+static void skl_ddi_set_iboost(struct drm_device *dev, u32 level,
+ enum port port, int type)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ const struct ddi_buf_trans *ddi_translations;
+ uint8_t iboost;
+ uint8_t dp_iboost, hdmi_iboost;
+ int n_entries;
+ u32 reg;
+
+ /* VBT may override standard boost values */
+ dp_iboost = dev_priv->vbt.ddi_port_info[port].dp_boost_level;
+ hdmi_iboost = dev_priv->vbt.ddi_port_info[port].hdmi_boost_level;
+
+ if (type == INTEL_OUTPUT_DISPLAYPORT) {
+ if (dp_iboost) {
+ iboost = dp_iboost;
+ } else {
+ ddi_translations = skl_get_buf_trans_dp(dev, &n_entries);
+ iboost = ddi_translations[port].i_boost;
+ }
+ } else if (type == INTEL_OUTPUT_EDP) {
+ if (dp_iboost) {
+ iboost = dp_iboost;
+ } else {
+ ddi_translations = skl_get_buf_trans_edp(dev, &n_entries);
+ iboost = ddi_translations[port].i_boost;
+ }
+ } else if (type == INTEL_OUTPUT_HDMI) {
+ if (hdmi_iboost) {
+ iboost = hdmi_iboost;
+ } else {
+ ddi_translations = skl_get_buf_trans_hdmi(dev, &n_entries);
+ iboost = ddi_translations[port].i_boost;
+ }
+ } else {
+ return;
+ }
+
+ /* Make sure that the requested I_boost is valid */
+ if (iboost && iboost != 0x1 && iboost != 0x3 && iboost != 0x7) {
+ DRM_ERROR("Invalid I_boost value %u\n", iboost);
+ return;
+ }
+
+ reg = I915_READ(DISPIO_CR_TX_BMU_CR0);
+ reg &= ~BALANCE_LEG_MASK(port);
+ reg &= ~(1 << (BALANCE_LEG_DISABLE_SHIFT + port));
+
+ if (iboost)
+ reg |= iboost << BALANCE_LEG_SHIFT(port);
+ else
+ reg |= 1 << (BALANCE_LEG_DISABLE_SHIFT + port);
+
+ I915_WRITE(DISPIO_CR_TX_BMU_CR0, reg);
+}
+
+static void bxt_ddi_vswing_sequence(struct drm_device *dev, u32 level,
+ enum port port, int type)
{
struct drm_i915_private *dev_priv = dev->dev_private;
const struct bxt_ddi_buf_trans *ddi_translations;
@@ -1860,6 +2195,73 @@ void bxt_ddi_vswing_sequence(struct drm_device *dev, u32 level,
I915_WRITE(BXT_PORT_PCS_DW10_GRP(port), val);
}
+static uint32_t translate_signal_level(int signal_levels)
+{
+ uint32_t level;
+
+ switch (signal_levels) {
+ default:
+ DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level: 0x%x\n",
+ signal_levels);
+ case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
+ level = 0;
+ break;
+ case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
+ level = 1;
+ break;
+ case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
+ level = 2;
+ break;
+ case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_3:
+ level = 3;
+ break;
+
+ case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
+ level = 4;
+ break;
+ case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
+ level = 5;
+ break;
+ case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_2:
+ level = 6;
+ break;
+
+ case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
+ level = 7;
+ break;
+ case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
+ level = 8;
+ break;
+
+ case DP_TRAIN_VOLTAGE_SWING_LEVEL_3 | DP_TRAIN_PRE_EMPH_LEVEL_0:
+ level = 9;
+ break;
+ }
+
+ return level;
+}
+
+uint32_t ddi_signal_levels(struct intel_dp *intel_dp)
+{
+ struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
+ struct drm_device *dev = dport->base.base.dev;
+ struct intel_encoder *encoder = &dport->base;
+ uint8_t train_set = intel_dp->train_set[0];
+ int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
+ DP_TRAIN_PRE_EMPHASIS_MASK);
+ enum port port = dport->port;
+ uint32_t level;
+
+ level = translate_signal_level(signal_levels);
+
+ if (IS_SKYLAKE(dev))
+ skl_ddi_set_iboost(dev, level, port, encoder->type);
+ else if (IS_BROXTON(dev))
+ bxt_ddi_vswing_sequence(dev, level, port, encoder->type);
+
+ return DDI_BUF_TRANS_SELECT(level);
+}
+
static void intel_ddi_pre_enable(struct intel_encoder *intel_encoder)
{
struct drm_encoder *encoder = &intel_encoder->base;
@@ -2404,7 +2806,7 @@ static void bxt_ddi_pll_enable(struct drm_i915_private *dev_priv,
temp = I915_READ(BXT_PORT_PLL(port, 9));
temp &= ~PORT_PLL_LOCK_THRESHOLD_MASK;
- temp |= (5 << 1);
+ temp |= pll->config.hw_state.pll9;
I915_WRITE(BXT_PORT_PLL(port, 9), temp);
temp = I915_READ(BXT_PORT_PLL(port, 10));
@@ -2417,8 +2819,8 @@ static void bxt_ddi_pll_enable(struct drm_i915_private *dev_priv,
temp = I915_READ(BXT_PORT_PLL_EBB_4(port));
temp |= PORT_PLL_RECALIBRATE;
I915_WRITE(BXT_PORT_PLL_EBB_4(port), temp);
- /* Enable 10 bit clock */
- temp |= PORT_PLL_10BIT_CLK_ENABLE;
+ temp &= ~PORT_PLL_10BIT_CLK_ENABLE;
+ temp |= pll->config.hw_state.ebb4;
I915_WRITE(BXT_PORT_PLL_EBB_4(port), temp);
/* Enable PLL */
@@ -2469,13 +2871,38 @@ static bool bxt_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv,
return false;
hw_state->ebb0 = I915_READ(BXT_PORT_PLL_EBB_0(port));
+ hw_state->ebb0 &= PORT_PLL_P1_MASK | PORT_PLL_P2_MASK;
+
+ hw_state->ebb4 = I915_READ(BXT_PORT_PLL_EBB_4(port));
+ hw_state->ebb4 &= PORT_PLL_10BIT_CLK_ENABLE;
+
hw_state->pll0 = I915_READ(BXT_PORT_PLL(port, 0));
+ hw_state->pll0 &= PORT_PLL_M2_MASK;
+
hw_state->pll1 = I915_READ(BXT_PORT_PLL(port, 1));
+ hw_state->pll1 &= PORT_PLL_N_MASK;
+
hw_state->pll2 = I915_READ(BXT_PORT_PLL(port, 2));
+ hw_state->pll2 &= PORT_PLL_M2_FRAC_MASK;
+
hw_state->pll3 = I915_READ(BXT_PORT_PLL(port, 3));
+ hw_state->pll3 &= PORT_PLL_M2_FRAC_ENABLE;
+
hw_state->pll6 = I915_READ(BXT_PORT_PLL(port, 6));
+ hw_state->pll6 &= PORT_PLL_PROP_COEFF_MASK |
+ PORT_PLL_INT_COEFF_MASK |
+ PORT_PLL_GAIN_CTL_MASK;
+
hw_state->pll8 = I915_READ(BXT_PORT_PLL(port, 8));
+ hw_state->pll8 &= PORT_PLL_TARGET_CNT_MASK;
+
+ hw_state->pll9 = I915_READ(BXT_PORT_PLL(port, 9));
+ hw_state->pll9 &= PORT_PLL_LOCK_THRESHOLD_MASK;
+
hw_state->pll10 = I915_READ(BXT_PORT_PLL(port, 10));
+ hw_state->pll10 &= PORT_PLL_DCO_AMP_OVR_EN_H |
+ PORT_PLL_DCO_AMP_MASK;
+
/*
* While we write to the group register to program all lanes at once we
* can read only lane registers. We configure all lanes the same way, so
@@ -2486,6 +2913,7 @@ static bool bxt_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv,
DRM_DEBUG_DRIVER("lane stagger config different for lane 01 (%08x) and 23 (%08x)\n",
hw_state->pcsdw12,
I915_READ(BXT_PORT_PCS_DW12_LN23(port)));
+ hw_state->pcsdw12 &= LANE_STAGGER_MASK | LANESTAGGER_STRAP_OVRD;
return true;
}
@@ -2510,7 +2938,6 @@ void intel_ddi_pll_init(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t val = I915_READ(LCPLL_CTL);
- int cdclk_freq;
if (IS_SKYLAKE(dev))
skl_shared_dplls_init(dev_priv);
@@ -2519,10 +2946,10 @@ void intel_ddi_pll_init(struct drm_device *dev)
else
hsw_shared_dplls_init(dev_priv);
- cdclk_freq = dev_priv->display.get_display_clock_speed(dev);
- DRM_DEBUG_KMS("CDCLK running at %dKHz\n", cdclk_freq);
-
if (IS_SKYLAKE(dev)) {
+ int cdclk_freq;
+
+ cdclk_freq = dev_priv->display.get_display_clock_speed(dev);
dev_priv->skl_boot_cdclk = cdclk_freq;
if (!(I915_READ(LCPLL1_CTL) & LCPLL_PLL_ENABLE))
DRM_ERROR("LCPLL1 is disabled\n");
@@ -2618,20 +3045,6 @@ void intel_ddi_fdi_disable(struct drm_crtc *crtc)
I915_WRITE(_FDI_RXA_CTL, val);
}
-static void intel_ddi_hot_plug(struct intel_encoder *intel_encoder)
-{
- struct intel_digital_port *intel_dig_port = enc_to_dig_port(&intel_encoder->base);
- int type = intel_dig_port->base.type;
-
- if (type != INTEL_OUTPUT_DISPLAYPORT &&
- type != INTEL_OUTPUT_EDP &&
- type != INTEL_OUTPUT_UNKNOWN) {
- return;
- }
-
- intel_dp_hot_plug(intel_encoder);
-}
-
void intel_ddi_get_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config)
{
@@ -2793,10 +3206,9 @@ void intel_ddi_init(struct drm_device *dev, enum port port)
dev_priv->vbt.ddi_port_info[port].supports_hdmi);
init_dp = dev_priv->vbt.ddi_port_info[port].supports_dp;
if (!init_dp && !init_hdmi) {
- DRM_DEBUG_KMS("VBT says port %c is not DVI/HDMI/DP compatible, assuming it is\n",
+ DRM_DEBUG_KMS("VBT says port %c is not DVI/HDMI/DP compatible, respect it\n",
port_name(port));
- init_hdmi = true;
- init_dp = true;
+ return;
}
intel_dig_port = kzalloc(sizeof(*intel_dig_port), GFP_KERNEL);
@@ -2825,14 +3237,13 @@ void intel_ddi_init(struct drm_device *dev, enum port port)
intel_encoder->type = INTEL_OUTPUT_UNKNOWN;
intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
intel_encoder->cloneable = 0;
- intel_encoder->hot_plug = intel_ddi_hot_plug;
if (init_dp) {
if (!intel_ddi_init_dp_connector(intel_dig_port))
goto err;
intel_dig_port->hpd_pulse = intel_dp_hpd_pulse;
- dev_priv->hpd_irq_port[port] = intel_dig_port;
+ dev_priv->hotplug.irq_port[port] = intel_dig_port;
}
/* In theory we don't need the encoder->type check, but leave it just in
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 1b61f9810387..83936403502f 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -86,9 +86,6 @@ static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
static void ironlake_pch_clock_get(struct intel_crtc *crtc,
struct intel_crtc_state *pipe_config);
-static int intel_set_mode(struct drm_crtc *crtc,
- struct drm_atomic_state *state,
- bool force_restore);
static int intel_framebuffer_init(struct drm_device *dev,
struct intel_framebuffer *ifb,
struct drm_mode_fb_cmd2 *mode_cmd,
@@ -105,22 +102,13 @@ static void vlv_prepare_pll(struct intel_crtc *crtc,
const struct intel_crtc_state *pipe_config);
static void chv_prepare_pll(struct intel_crtc *crtc,
const struct intel_crtc_state *pipe_config);
-static void intel_begin_crtc_commit(struct drm_crtc *crtc);
-static void intel_finish_crtc_commit(struct drm_crtc *crtc);
+static void intel_begin_crtc_commit(struct drm_crtc *, struct drm_crtc_state *);
+static void intel_finish_crtc_commit(struct drm_crtc *, struct drm_crtc_state *);
static void skl_init_scalers(struct drm_device *dev, struct intel_crtc *intel_crtc,
struct intel_crtc_state *crtc_state);
static int i9xx_get_refclk(const struct intel_crtc_state *crtc_state,
int num_connectors);
-static void intel_crtc_enable_planes(struct drm_crtc *crtc);
-static void intel_crtc_disable_planes(struct drm_crtc *crtc);
-
-static struct intel_encoder *intel_find_encoder(struct intel_connector *connector, int pipe)
-{
- if (!connector->mst_port)
- return connector->encoder;
- else
- return &connector->mst_port->mst_encoders[pipe]->base;
-}
+static void intel_modeset_setup_hw_state(struct drm_device *dev);
typedef struct {
int min, max;
@@ -413,7 +401,7 @@ static const intel_limit_t intel_limits_chv = {
static const intel_limit_t intel_limits_bxt = {
/* FIXME: find real dot limits */
.dot = { .min = 0, .max = INT_MAX },
- .vco = { .min = 4800000, .max = 6480000 },
+ .vco = { .min = 4800000, .max = 6700000 },
.n = { .min = 1, .max = 1 },
.m1 = { .min = 2, .max = 2 },
/* FIXME: find real m2 limits */
@@ -422,14 +410,10 @@ static const intel_limit_t intel_limits_bxt = {
.p2 = { .p2_slow = 1, .p2_fast = 20 },
};
-static void vlv_clock(int refclk, intel_clock_t *clock)
+static bool
+needs_modeset(struct drm_crtc_state *state)
{
- clock->m = clock->m1 * clock->m2;
- clock->p = clock->p1 * clock->p2;
- if (WARN_ON(clock->n == 0 || clock->p == 0))
- return;
- clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n);
- clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
+ return drm_atomic_crtc_needs_modeset(state);
}
/**
@@ -561,15 +545,25 @@ intel_limit(struct intel_crtc_state *crtc_state, int refclk)
return limit;
}
+/*
+ * Platform specific helpers to calculate the port PLL loopback- (clock.m),
+ * and post-divider (clock.p) values, pre- (clock.vco) and post-divided fast
+ * (clock.dot) clock rates. This fast dot clock is fed to the port's IO logic.
+ * The helpers' return value is the rate of the clock that is fed to the
+ * display engine's pipe which can be the above fast dot clock rate or a
+ * divided-down version of it.
+ */
/* m1 is reserved as 0 in Pineview, n is a ring counter */
-static void pineview_clock(int refclk, intel_clock_t *clock)
+static int pnv_calc_dpll_params(int refclk, intel_clock_t *clock)
{
clock->m = clock->m2 + 2;
clock->p = clock->p1 * clock->p2;
if (WARN_ON(clock->n == 0 || clock->p == 0))
- return;
+ return 0;
clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n);
clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
+
+ return clock->dot;
}
static uint32_t i9xx_dpll_compute_m(struct dpll *dpll)
@@ -577,25 +571,41 @@ static uint32_t i9xx_dpll_compute_m(struct dpll *dpll)
return 5 * (dpll->m1 + 2) + (dpll->m2 + 2);
}
-static void i9xx_clock(int refclk, intel_clock_t *clock)
+static int i9xx_calc_dpll_params(int refclk, intel_clock_t *clock)
{
clock->m = i9xx_dpll_compute_m(clock);
clock->p = clock->p1 * clock->p2;
if (WARN_ON(clock->n + 2 == 0 || clock->p == 0))
- return;
+ return 0;
clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n + 2);
clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
+
+ return clock->dot;
}
-static void chv_clock(int refclk, intel_clock_t *clock)
+static int vlv_calc_dpll_params(int refclk, intel_clock_t *clock)
{
clock->m = clock->m1 * clock->m2;
clock->p = clock->p1 * clock->p2;
if (WARN_ON(clock->n == 0 || clock->p == 0))
- return;
+ return 0;
+ clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n);
+ clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
+
+ return clock->dot / 5;
+}
+
+int chv_calc_dpll_params(int refclk, intel_clock_t *clock)
+{
+ clock->m = clock->m1 * clock->m2;
+ clock->p = clock->p1 * clock->p2;
+ if (WARN_ON(clock->n == 0 || clock->p == 0))
+ return 0;
clock->vco = DIV_ROUND_CLOSEST_ULL((uint64_t)refclk * clock->m,
clock->n << 22);
clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
+
+ return clock->dot / 5;
}
#define INTELPllInvalid(s) do { /* DRM_DEBUG(s); */ return false; } while (0)
@@ -639,16 +649,12 @@ static bool intel_PLL_is_valid(struct drm_device *dev,
return true;
}
-static bool
-i9xx_find_best_dpll(const intel_limit_t *limit,
- struct intel_crtc_state *crtc_state,
- int target, int refclk, intel_clock_t *match_clock,
- intel_clock_t *best_clock)
+static int
+i9xx_select_p2_div(const intel_limit_t *limit,
+ const struct intel_crtc_state *crtc_state,
+ int target)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
- struct drm_device *dev = crtc->base.dev;
- intel_clock_t clock;
- int err = target;
+ struct drm_device *dev = crtc_state->base.crtc->dev;
if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS)) {
/*
@@ -657,18 +663,31 @@ i9xx_find_best_dpll(const intel_limit_t *limit,
* single/dual channel state, if we even can.
*/
if (intel_is_dual_link_lvds(dev))
- clock.p2 = limit->p2.p2_fast;
+ return limit->p2.p2_fast;
else
- clock.p2 = limit->p2.p2_slow;
+ return limit->p2.p2_slow;
} else {
if (target < limit->p2.dot_limit)
- clock.p2 = limit->p2.p2_slow;
+ return limit->p2.p2_slow;
else
- clock.p2 = limit->p2.p2_fast;
+ return limit->p2.p2_fast;
}
+}
+
+static bool
+i9xx_find_best_dpll(const intel_limit_t *limit,
+ struct intel_crtc_state *crtc_state,
+ int target, int refclk, intel_clock_t *match_clock,
+ intel_clock_t *best_clock)
+{
+ struct drm_device *dev = crtc_state->base.crtc->dev;
+ intel_clock_t clock;
+ int err = target;
memset(best_clock, 0, sizeof(*best_clock));
+ clock.p2 = i9xx_select_p2_div(limit, crtc_state, target);
+
for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max;
clock.m1++) {
for (clock.m2 = limit->m2.min;
@@ -681,7 +700,7 @@ i9xx_find_best_dpll(const intel_limit_t *limit,
clock.p1 <= limit->p1.max; clock.p1++) {
int this_err;
- i9xx_clock(refclk, &clock);
+ i9xx_calc_dpll_params(refclk, &clock);
if (!intel_PLL_is_valid(dev, limit,
&clock))
continue;
@@ -708,30 +727,14 @@ pnv_find_best_dpll(const intel_limit_t *limit,
int target, int refclk, intel_clock_t *match_clock,
intel_clock_t *best_clock)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
- struct drm_device *dev = crtc->base.dev;
+ struct drm_device *dev = crtc_state->base.crtc->dev;
intel_clock_t clock;
int err = target;
- if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS)) {
- /*
- * For LVDS just rely on its current settings for dual-channel.
- * We haven't figured out how to reliably set up different
- * single/dual channel state, if we even can.
- */
- if (intel_is_dual_link_lvds(dev))
- clock.p2 = limit->p2.p2_fast;
- else
- clock.p2 = limit->p2.p2_slow;
- } else {
- if (target < limit->p2.dot_limit)
- clock.p2 = limit->p2.p2_slow;
- else
- clock.p2 = limit->p2.p2_fast;
- }
-
memset(best_clock, 0, sizeof(*best_clock));
+ clock.p2 = i9xx_select_p2_div(limit, crtc_state, target);
+
for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max;
clock.m1++) {
for (clock.m2 = limit->m2.min;
@@ -742,7 +745,7 @@ pnv_find_best_dpll(const intel_limit_t *limit,
clock.p1 <= limit->p1.max; clock.p1++) {
int this_err;
- pineview_clock(refclk, &clock);
+ pnv_calc_dpll_params(refclk, &clock);
if (!intel_PLL_is_valid(dev, limit,
&clock))
continue;
@@ -769,28 +772,17 @@ g4x_find_best_dpll(const intel_limit_t *limit,
int target, int refclk, intel_clock_t *match_clock,
intel_clock_t *best_clock)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
- struct drm_device *dev = crtc->base.dev;
+ struct drm_device *dev = crtc_state->base.crtc->dev;
intel_clock_t clock;
int max_n;
- bool found;
+ bool found = false;
/* approximately equals target * 0.00585 */
int err_most = (target >> 8) + (target >> 9);
- found = false;
-
- if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS)) {
- if (intel_is_dual_link_lvds(dev))
- clock.p2 = limit->p2.p2_fast;
- else
- clock.p2 = limit->p2.p2_slow;
- } else {
- if (target < limit->p2.dot_limit)
- clock.p2 = limit->p2.p2_slow;
- else
- clock.p2 = limit->p2.p2_fast;
- }
memset(best_clock, 0, sizeof(*best_clock));
+
+ clock.p2 = i9xx_select_p2_div(limit, crtc_state, target);
+
max_n = limit->n.max;
/* based on hardware requirement, prefer smaller n to precision */
for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) {
@@ -803,7 +795,7 @@ g4x_find_best_dpll(const intel_limit_t *limit,
clock.p1 >= limit->p1.min; clock.p1--) {
int this_err;
- i9xx_clock(refclk, &clock);
+ i9xx_calc_dpll_params(refclk, &clock);
if (!intel_PLL_is_valid(dev, limit,
&clock))
continue;
@@ -893,7 +885,7 @@ vlv_find_best_dpll(const intel_limit_t *limit,
clock.m2 = DIV_ROUND_CLOSEST(target * clock.p * clock.n,
refclk * clock.m1);
- vlv_clock(refclk, &clock);
+ vlv_calc_dpll_params(refclk, &clock);
if (!intel_PLL_is_valid(dev, limit,
&clock))
@@ -956,7 +948,7 @@ chv_find_best_dpll(const intel_limit_t *limit,
clock.m2 = m2;
- chv_clock(refclk, &clock);
+ chv_calc_dpll_params(refclk, &clock);
if (!intel_PLL_is_valid(dev, limit, &clock))
continue;
@@ -1026,7 +1018,7 @@ static bool pipe_dsl_stopped(struct drm_device *dev, enum pipe pipe)
line_mask = DSL_LINEMASK_GEN3;
line1 = I915_READ(reg) & line_mask;
- mdelay(5);
+ msleep(5);
line2 = I915_READ(reg) & line_mask;
return line1 == line2;
@@ -1694,7 +1686,7 @@ static int intel_num_dvo_pipes(struct drm_device *dev)
int count = 0;
for_each_intel_crtc(dev, crtc)
- count += crtc->active &&
+ count += crtc->base.state->active &&
intel_pipe_has_type(crtc, INTEL_OUTPUT_DVO);
return count;
@@ -1775,7 +1767,7 @@ static void i9xx_disable_pll(struct intel_crtc *crtc)
/* Disable DVO 2x clock on both PLLs if necessary */
if (IS_I830(dev) &&
intel_pipe_has_type(crtc, INTEL_OUTPUT_DVO) &&
- intel_num_dvo_pipes(dev) == 1) {
+ !intel_num_dvo_pipes(dev)) {
I915_WRITE(DPLL(PIPE_B),
I915_READ(DPLL(PIPE_B)) & ~DPLL_DVO_2X_MODE);
I915_WRITE(DPLL(PIPE_A),
@@ -1790,13 +1782,13 @@ static void i9xx_disable_pll(struct intel_crtc *crtc)
/* Make sure the pipe isn't still relying on us */
assert_pipe_disabled(dev_priv, pipe);
- I915_WRITE(DPLL(pipe), 0);
+ I915_WRITE(DPLL(pipe), DPLL_VGA_MODE_DIS);
POSTING_READ(DPLL(pipe));
}
static void vlv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
{
- u32 val = 0;
+ u32 val;
/* Make sure the pipe isn't still relying on us */
assert_pipe_disabled(dev_priv, pipe);
@@ -1805,8 +1797,9 @@ static void vlv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
* Leave integrated clock source and reference clock enabled for pipe B.
* The latter is needed for VGA hotplug / manual detection.
*/
+ val = DPLL_VGA_MODE_DIS;
if (pipe == PIPE_B)
- val = DPLL_INTEGRATED_CRI_CLK_VLV | DPLL_REFA_CLK_ENABLE_VLV;
+ val = DPLL_INTEGRATED_CRI_CLK_VLV | DPLL_REF_CLK_ENABLE_VLV;
I915_WRITE(DPLL(pipe), val);
POSTING_READ(DPLL(pipe));
@@ -1821,7 +1814,8 @@ static void chv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
assert_pipe_disabled(dev_priv, pipe);
/* Set PLL en = 0 */
- val = DPLL_SSC_REF_CLOCK_CHV | DPLL_REFA_CLK_ENABLE_VLV;
+ val = DPLL_SSC_REF_CLK_CHV |
+ DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
if (pipe != PIPE_A)
val |= DPLL_INTEGRATED_CRI_CLK_VLV;
I915_WRITE(DPLL(pipe), val);
@@ -1942,11 +1936,13 @@ static void intel_disable_shared_dpll(struct intel_crtc *crtc)
struct intel_shared_dpll *pll = intel_crtc_to_shared_dpll(crtc);
/* PCH only available on ILK+ */
- BUG_ON(INTEL_INFO(dev)->gen < 5);
- if (WARN_ON(pll == NULL))
- return;
+ if (INTEL_INFO(dev)->gen < 5)
+ return;
- if (WARN_ON(pll->config.crtc_mask == 0))
+ if (pll == NULL)
+ return;
+
+ if (WARN_ON(!(pll->config.crtc_mask & (1 << drm_crtc_index(&crtc->base)))))
return;
DRM_DEBUG_KMS("disable %s (active %d, on? %d) for crtc %d\n",
@@ -2004,11 +2000,15 @@ static void ironlake_enable_pch_transcoder(struct drm_i915_private *dev_priv,
if (HAS_PCH_IBX(dev_priv->dev)) {
/*
- * make the BPC in transcoder be consistent with
- * that in pipeconf reg.
+ * Make the BPC in transcoder be consistent with
+ * that in pipeconf reg. For HDMI we must use 8bpc
+ * here for both 8bpc and 12bpc.
*/
val &= ~PIPECONF_BPC_MASK;
- val |= pipeconf_val & PIPECONF_BPC_MASK;
+ if (intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_HDMI))
+ val |= PIPECONF_8BPC;
+ else
+ val |= pipeconf_val & PIPECONF_BPC_MASK;
}
val &= ~TRANS_INTERLACE_MASK;
@@ -2122,6 +2122,8 @@ static void intel_enable_pipe(struct intel_crtc *crtc)
int reg;
u32 val;
+ DRM_DEBUG_KMS("enabling pipe %c\n", pipe_name(pipe));
+
assert_planes_disabled(dev_priv, pipe);
assert_cursor_disabled(dev_priv, pipe);
assert_sprites_disabled(dev_priv, pipe);
@@ -2181,6 +2183,8 @@ static void intel_disable_pipe(struct intel_crtc *crtc)
int reg;
u32 val;
+ DRM_DEBUG_KMS("disabling pipe %c\n", pipe_name(pipe));
+
/*
* Make sure planes won't keep trying to pump pixels to us,
* or we might hang the display.
@@ -2211,28 +2215,6 @@ static void intel_disable_pipe(struct intel_crtc *crtc)
intel_wait_for_pipe_off(crtc);
}
-/**
- * intel_enable_primary_hw_plane - enable the primary plane on a given pipe
- * @plane: plane to be enabled
- * @crtc: crtc for the plane
- *
- * Enable @plane on @crtc, making sure that the pipe is running first.
- */
-static void intel_enable_primary_hw_plane(struct drm_plane *plane,
- struct drm_crtc *crtc)
-{
- struct drm_device *dev = plane->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-
- /* If the pipe isn't enabled, we can't pump pixels and may hang */
- assert_pipe_enabled(dev_priv, intel_crtc->pipe);
- to_intel_plane_state(plane->state)->visible = true;
-
- dev_priv->display.update_primary_plane(crtc, plane->fb,
- crtc->x, crtc->y);
-}
-
static bool need_vtd_wa(struct drm_device *dev)
{
#ifdef CONFIG_INTEL_IOMMU
@@ -2302,6 +2284,7 @@ intel_fill_fb_ggtt_view(struct i915_ggtt_view *view, struct drm_framebuffer *fb,
const struct drm_plane_state *plane_state)
{
struct intel_rotation_info *info = &view->rotation_info;
+ unsigned int tile_height, tile_pitch;
*view = i915_ggtt_view_normal;
@@ -2318,14 +2301,35 @@ intel_fill_fb_ggtt_view(struct i915_ggtt_view *view, struct drm_framebuffer *fb,
info->pitch = fb->pitches[0];
info->fb_modifier = fb->modifier[0];
+ tile_height = intel_tile_height(fb->dev, fb->pixel_format,
+ fb->modifier[0]);
+ tile_pitch = PAGE_SIZE / tile_height;
+ info->width_pages = DIV_ROUND_UP(fb->pitches[0], tile_pitch);
+ info->height_pages = DIV_ROUND_UP(fb->height, tile_height);
+ info->size = info->width_pages * info->height_pages * PAGE_SIZE;
+
return 0;
}
+static unsigned int intel_linear_alignment(struct drm_i915_private *dev_priv)
+{
+ if (INTEL_INFO(dev_priv)->gen >= 9)
+ return 256 * 1024;
+ else if (IS_BROADWATER(dev_priv) || IS_CRESTLINE(dev_priv) ||
+ IS_VALLEYVIEW(dev_priv))
+ return 128 * 1024;
+ else if (INTEL_INFO(dev_priv)->gen >= 4)
+ return 4 * 1024;
+ else
+ return 0;
+}
+
int
intel_pin_and_fence_fb_obj(struct drm_plane *plane,
struct drm_framebuffer *fb,
const struct drm_plane_state *plane_state,
- struct intel_engine_cs *pipelined)
+ struct intel_engine_cs *pipelined,
+ struct drm_i915_gem_request **pipelined_request)
{
struct drm_device *dev = fb->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -2338,14 +2342,7 @@ intel_pin_and_fence_fb_obj(struct drm_plane *plane,
switch (fb->modifier[0]) {
case DRM_FORMAT_MOD_NONE:
- if (INTEL_INFO(dev)->gen >= 9)
- alignment = 256 * 1024;
- else if (IS_BROADWATER(dev) || IS_CRESTLINE(dev))
- alignment = 128 * 1024;
- else if (INTEL_INFO(dev)->gen >= 4)
- alignment = 4 * 1024;
- else
- alignment = 64 * 1024;
+ alignment = intel_linear_alignment(dev_priv);
break;
case I915_FORMAT_MOD_X_TILED:
if (INTEL_INFO(dev)->gen >= 9)
@@ -2390,7 +2387,7 @@ intel_pin_and_fence_fb_obj(struct drm_plane *plane,
dev_priv->mm.interruptible = false;
ret = i915_gem_object_pin_to_display_plane(obj, alignment, pipelined,
- &view);
+ pipelined_request, &view);
if (ret)
goto err_interruptible;
@@ -2400,7 +2397,18 @@ intel_pin_and_fence_fb_obj(struct drm_plane *plane,
* a fence as the cost is not that onerous.
*/
ret = i915_gem_object_get_fence(obj);
- if (ret)
+ if (ret == -EDEADLK) {
+ /*
+ * -EDEADLK means there are no free fences
+ * no pending flips.
+ *
+ * This is propagated to atomic, but it uses
+ * -EDEADLK to force a locking recovery, so
+ * change the returned error to -EBUSY.
+ */
+ ret = -EBUSY;
+ goto err_unpin;
+ } else if (ret)
goto err_unpin;
i915_gem_object_pin_fence(obj);
@@ -2435,7 +2443,8 @@ static void intel_unpin_fb_obj(struct drm_framebuffer *fb,
/* Computes the linear offset to the base tile and adjusts x, y. bytes per pixel
* is assumed to be a power-of-two. */
-unsigned long intel_gen4_compute_page_offset(int *x, int *y,
+unsigned long intel_gen4_compute_page_offset(struct drm_i915_private *dev_priv,
+ int *x, int *y,
unsigned int tiling_mode,
unsigned int cpp,
unsigned int pitch)
@@ -2451,12 +2460,13 @@ unsigned long intel_gen4_compute_page_offset(int *x, int *y,
return tile_rows * pitch * 8 + tiles * 4096;
} else {
+ unsigned int alignment = intel_linear_alignment(dev_priv) - 1;
unsigned int offset;
offset = *y * pitch + *x * cpp;
- *y = 0;
- *x = (offset & 4095) / cpp;
- return offset & -4096;
+ *y = (offset & alignment) / pitch;
+ *x = ((offset & alignment) - *y * pitch) / cpp;
+ return offset & ~alignment;
}
}
@@ -2583,6 +2593,7 @@ intel_find_initial_plane_obj(struct intel_crtc *intel_crtc,
struct intel_crtc *i;
struct drm_i915_gem_object *obj;
struct drm_plane *primary = intel_crtc->base.primary;
+ struct drm_plane_state *plane_state = primary->state;
struct drm_framebuffer *fb;
if (!plane_config->fb)
@@ -2622,15 +2633,23 @@ intel_find_initial_plane_obj(struct intel_crtc *intel_crtc,
return;
valid_fb:
+ plane_state->src_x = plane_state->src_y = 0;
+ plane_state->src_w = fb->width << 16;
+ plane_state->src_h = fb->height << 16;
+
+ plane_state->crtc_x = plane_state->src_y = 0;
+ plane_state->crtc_w = fb->width;
+ plane_state->crtc_h = fb->height;
+
obj = intel_fb_obj(fb);
if (obj->tiling_mode != I915_TILING_NONE)
dev_priv->preserve_bios_swizzle = true;
- primary->fb = fb;
- primary->state->crtc = &intel_crtc->base;
- primary->crtc = &intel_crtc->base;
- update_state_fb(primary);
- obj->frontbuffer_bits |= INTEL_FRONTBUFFER_PRIMARY(intel_crtc->pipe);
+ drm_framebuffer_reference(fb);
+ primary->fb = primary->state->fb = fb;
+ primary->crtc = primary->state->crtc = &intel_crtc->base;
+ intel_crtc->base.state->plane_mask |= (1 << drm_plane_index(primary));
+ obj->frontbuffer_bits |= to_intel_plane(primary)->frontbuffer_bit;
}
static void i9xx_update_primary_plane(struct drm_crtc *crtc,
@@ -2725,7 +2744,8 @@ static void i9xx_update_primary_plane(struct drm_crtc *crtc,
if (INTEL_INFO(dev)->gen >= 4) {
intel_crtc->dspaddr_offset =
- intel_gen4_compute_page_offset(&x, &y, obj->tiling_mode,
+ intel_gen4_compute_page_offset(dev_priv,
+ &x, &y, obj->tiling_mode,
pixel_size,
fb->pitches[0]);
linear_offset -= intel_crtc->dspaddr_offset;
@@ -2826,7 +2846,8 @@ static void ironlake_update_primary_plane(struct drm_crtc *crtc,
linear_offset = y * fb->pitches[0] + x * pixel_size;
intel_crtc->dspaddr_offset =
- intel_gen4_compute_page_offset(&x, &y, obj->tiling_mode,
+ intel_gen4_compute_page_offset(dev_priv,
+ &x, &y, obj->tiling_mode,
pixel_size,
fb->pitches[0]);
linear_offset -= intel_crtc->dspaddr_offset;
@@ -2904,32 +2925,32 @@ unsigned long intel_plane_obj_offset(struct intel_plane *intel_plane,
return i915_gem_obj_ggtt_offset_view(obj, view);
}
+static void skl_detach_scaler(struct intel_crtc *intel_crtc, int id)
+{
+ struct drm_device *dev = intel_crtc->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ I915_WRITE(SKL_PS_CTRL(intel_crtc->pipe, id), 0);
+ I915_WRITE(SKL_PS_WIN_POS(intel_crtc->pipe, id), 0);
+ I915_WRITE(SKL_PS_WIN_SZ(intel_crtc->pipe, id), 0);
+ DRM_DEBUG_KMS("CRTC:%d Disabled scaler id %u.%u\n",
+ intel_crtc->base.base.id, intel_crtc->pipe, id);
+}
+
/*
* This function detaches (aka. unbinds) unused scalers in hardware
*/
-void skl_detach_scalers(struct intel_crtc *intel_crtc)
+static void skl_detach_scalers(struct intel_crtc *intel_crtc)
{
- struct drm_device *dev;
- struct drm_i915_private *dev_priv;
struct intel_crtc_scaler_state *scaler_state;
int i;
- if (!intel_crtc || !intel_crtc->config)
- return;
-
- dev = intel_crtc->base.dev;
- dev_priv = dev->dev_private;
scaler_state = &intel_crtc->config->scaler_state;
/* loop through and disable scalers that aren't in use */
for (i = 0; i < intel_crtc->num_scalers; i++) {
- if (!scaler_state->scalers[i].in_use) {
- I915_WRITE(SKL_PS_CTRL(intel_crtc->pipe, i), 0);
- I915_WRITE(SKL_PS_WIN_POS(intel_crtc->pipe, i), 0);
- I915_WRITE(SKL_PS_WIN_SZ(intel_crtc->pipe, i), 0);
- DRM_DEBUG_KMS("CRTC:%d Disabled scaler id %u.%u\n",
- intel_crtc->base.base.id, intel_crtc->pipe, i);
- }
+ if (!scaler_state->scalers[i].in_use)
+ skl_detach_scaler(intel_crtc, i);
}
}
@@ -3132,8 +3153,8 @@ intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb,
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- if (dev_priv->display.disable_fbc)
- dev_priv->display.disable_fbc(dev);
+ if (dev_priv->fbc.disable_fbc)
+ dev_priv->fbc.disable_fbc(dev_priv);
dev_priv->display.update_primary_plane(crtc, fb, x, y);
@@ -3176,24 +3197,8 @@ static void intel_update_primary_planes(struct drm_device *dev)
}
}
-void intel_crtc_reset(struct intel_crtc *crtc)
-{
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
-
- if (!crtc->active)
- return;
-
- intel_crtc_disable_planes(&crtc->base);
- dev_priv->display.crtc_disable(&crtc->base);
- dev_priv->display.crtc_enable(&crtc->base);
- intel_crtc_enable_planes(&crtc->base);
-}
-
void intel_prepare_reset(struct drm_device *dev)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
- struct intel_crtc *crtc;
-
/* no reset support for gen2 */
if (IS_GEN2(dev))
return;
@@ -3203,18 +3208,11 @@ void intel_prepare_reset(struct drm_device *dev)
return;
drm_modeset_lock_all(dev);
-
/*
* Disabling the crtcs gracefully seems nicer. Also the
* g33 docs say we should at least disable all the planes.
*/
- for_each_intel_crtc(dev, crtc) {
- if (!crtc->active)
- continue;
-
- intel_crtc_disable_planes(&crtc->base);
- dev_priv->display.crtc_disable(&crtc->base);
- }
+ intel_display_suspend(dev);
}
void intel_finish_reset(struct drm_device *dev)
@@ -3258,7 +3256,7 @@ void intel_finish_reset(struct drm_device *dev)
dev_priv->display.hpd_irq_setup(dev);
spin_unlock_irq(&dev_priv->irq_lock);
- intel_modeset_setup_hw_state(dev, true);
+ intel_display_resume(dev);
intel_hpd_init(dev_priv);
@@ -4200,34 +4198,16 @@ static void lpt_pch_enable(struct drm_crtc *crtc)
lpt_enable_pch_transcoder(dev_priv, cpu_transcoder);
}
-void intel_put_shared_dpll(struct intel_crtc *crtc)
-{
- struct intel_shared_dpll *pll = intel_crtc_to_shared_dpll(crtc);
-
- if (pll == NULL)
- return;
-
- if (!(pll->config.crtc_mask & (1 << crtc->pipe))) {
- WARN(1, "bad %s crtc mask\n", pll->name);
- return;
- }
-
- pll->config.crtc_mask &= ~(1 << crtc->pipe);
- if (pll->config.crtc_mask == 0) {
- WARN_ON(pll->on);
- WARN_ON(pll->active);
- }
-
- crtc->config->shared_dpll = DPLL_ID_PRIVATE;
-}
-
struct intel_shared_dpll *intel_get_shared_dpll(struct intel_crtc *crtc,
struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
struct intel_shared_dpll *pll;
+ struct intel_shared_dpll_config *shared_dpll;
enum intel_dpll_id i;
+ shared_dpll = intel_atomic_get_shared_dpll_state(crtc_state->base.state);
+
if (HAS_PCH_IBX(dev_priv->dev)) {
/* Ironlake PCH has a fixed PLL->PCH pipe mapping. */
i = (enum intel_dpll_id) crtc->pipe;
@@ -4236,7 +4216,7 @@ struct intel_shared_dpll *intel_get_shared_dpll(struct intel_crtc *crtc,
DRM_DEBUG_KMS("CRTC:%d using pre-allocated %s\n",
crtc->base.base.id, pll->name);
- WARN_ON(pll->new_config->crtc_mask);
+ WARN_ON(shared_dpll[i].crtc_mask);
goto found;
}
@@ -4256,7 +4236,7 @@ struct intel_shared_dpll *intel_get_shared_dpll(struct intel_crtc *crtc,
pll = &dev_priv->shared_dplls[i];
DRM_DEBUG_KMS("CRTC:%d using pre-allocated %s\n",
crtc->base.base.id, pll->name);
- WARN_ON(pll->new_config->crtc_mask);
+ WARN_ON(shared_dpll[i].crtc_mask);
goto found;
}
@@ -4265,15 +4245,15 @@ struct intel_shared_dpll *intel_get_shared_dpll(struct intel_crtc *crtc,
pll = &dev_priv->shared_dplls[i];
/* Only want to check enabled timings first */
- if (pll->new_config->crtc_mask == 0)
+ if (shared_dpll[i].crtc_mask == 0)
continue;
if (memcmp(&crtc_state->dpll_hw_state,
- &pll->new_config->hw_state,
- sizeof(pll->new_config->hw_state)) == 0) {
+ &shared_dpll[i].hw_state,
+ sizeof(crtc_state->dpll_hw_state)) == 0) {
DRM_DEBUG_KMS("CRTC:%d sharing existing %s (crtc mask 0x%08x, ative %d)\n",
crtc->base.base.id, pll->name,
- pll->new_config->crtc_mask,
+ shared_dpll[i].crtc_mask,
pll->active);
goto found;
}
@@ -4282,7 +4262,7 @@ struct intel_shared_dpll *intel_get_shared_dpll(struct intel_crtc *crtc,
/* Ok no matching timings, maybe there's a free one? */
for (i = 0; i < dev_priv->num_shared_dpll; i++) {
pll = &dev_priv->shared_dplls[i];
- if (pll->new_config->crtc_mask == 0) {
+ if (shared_dpll[i].crtc_mask == 0) {
DRM_DEBUG_KMS("CRTC:%d allocated %s\n",
crtc->base.base.id, pll->name);
goto found;
@@ -4292,83 +4272,33 @@ struct intel_shared_dpll *intel_get_shared_dpll(struct intel_crtc *crtc,
return NULL;
found:
- if (pll->new_config->crtc_mask == 0)
- pll->new_config->hw_state = crtc_state->dpll_hw_state;
+ if (shared_dpll[i].crtc_mask == 0)
+ shared_dpll[i].hw_state =
+ crtc_state->dpll_hw_state;
crtc_state->shared_dpll = i;
DRM_DEBUG_DRIVER("using %s for pipe %c\n", pll->name,
pipe_name(crtc->pipe));
- pll->new_config->crtc_mask |= 1 << crtc->pipe;
+ shared_dpll[i].crtc_mask |= 1 << crtc->pipe;
return pll;
}
-/**
- * intel_shared_dpll_start_config - start a new PLL staged config
- * @dev_priv: DRM device
- * @clear_pipes: mask of pipes that will have their PLLs freed
- *
- * Starts a new PLL staged config, copying the current config but
- * releasing the references of pipes specified in clear_pipes.
- */
-static int intel_shared_dpll_start_config(struct drm_i915_private *dev_priv,
- unsigned clear_pipes)
-{
- struct intel_shared_dpll *pll;
- enum intel_dpll_id i;
-
- for (i = 0; i < dev_priv->num_shared_dpll; i++) {
- pll = &dev_priv->shared_dplls[i];
-
- pll->new_config = kmemdup(&pll->config, sizeof pll->config,
- GFP_KERNEL);
- if (!pll->new_config)
- goto cleanup;
-
- pll->new_config->crtc_mask &= ~clear_pipes;
- }
-
- return 0;
-
-cleanup:
- while (--i >= 0) {
- pll = &dev_priv->shared_dplls[i];
- kfree(pll->new_config);
- pll->new_config = NULL;
- }
-
- return -ENOMEM;
-}
-
-static void intel_shared_dpll_commit(struct drm_i915_private *dev_priv)
+static void intel_shared_dpll_commit(struct drm_atomic_state *state)
{
+ struct drm_i915_private *dev_priv = to_i915(state->dev);
+ struct intel_shared_dpll_config *shared_dpll;
struct intel_shared_dpll *pll;
enum intel_dpll_id i;
- for (i = 0; i < dev_priv->num_shared_dpll; i++) {
- pll = &dev_priv->shared_dplls[i];
-
- WARN_ON(pll->new_config == &pll->config);
-
- pll->config = *pll->new_config;
- kfree(pll->new_config);
- pll->new_config = NULL;
- }
-}
-
-static void intel_shared_dpll_abort_config(struct drm_i915_private *dev_priv)
-{
- struct intel_shared_dpll *pll;
- enum intel_dpll_id i;
+ if (!to_intel_atomic_state(state)->dpll_set)
+ return;
+ shared_dpll = to_intel_atomic_state(state)->shared_dpll;
for (i = 0; i < dev_priv->num_shared_dpll; i++) {
pll = &dev_priv->shared_dplls[i];
-
- WARN_ON(pll->new_config == &pll->config);
-
- kfree(pll->new_config);
- pll->new_config = NULL;
+ pll->config = shared_dpll[i];
}
}
@@ -4386,62 +4316,16 @@ static void cpt_verify_modeset(struct drm_device *dev, int pipe)
}
}
-/**
- * skl_update_scaler_users - Stages update to crtc's scaler state
- * @intel_crtc: crtc
- * @crtc_state: crtc_state
- * @plane: plane (NULL indicates crtc is requesting update)
- * @plane_state: plane's state
- * @force_detach: request unconditional detachment of scaler
- *
- * This function updates scaler state for requested plane or crtc.
- * To request scaler usage update for a plane, caller shall pass plane pointer.
- * To request scaler usage update for crtc, caller shall pass plane pointer
- * as NULL.
- *
- * Return
- * 0 - scaler_usage updated successfully
- * error - requested scaling cannot be supported or other error condition
- */
-int
-skl_update_scaler_users(
- struct intel_crtc *intel_crtc, struct intel_crtc_state *crtc_state,
- struct intel_plane *intel_plane, struct intel_plane_state *plane_state,
- int force_detach)
+static int
+skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach,
+ unsigned scaler_user, int *scaler_id, unsigned int rotation,
+ int src_w, int src_h, int dst_w, int dst_h)
{
+ struct intel_crtc_scaler_state *scaler_state =
+ &crtc_state->scaler_state;
+ struct intel_crtc *intel_crtc =
+ to_intel_crtc(crtc_state->base.crtc);
int need_scaling;
- int idx;
- int src_w, src_h, dst_w, dst_h;
- int *scaler_id;
- struct drm_framebuffer *fb;
- struct intel_crtc_scaler_state *scaler_state;
- unsigned int rotation;
-
- if (!intel_crtc || !crtc_state)
- return 0;
-
- scaler_state = &crtc_state->scaler_state;
-
- idx = intel_plane ? drm_plane_index(&intel_plane->base) : SKL_CRTC_INDEX;
- fb = intel_plane ? plane_state->base.fb : NULL;
-
- if (intel_plane) {
- src_w = drm_rect_width(&plane_state->src) >> 16;
- src_h = drm_rect_height(&plane_state->src) >> 16;
- dst_w = drm_rect_width(&plane_state->dst);
- dst_h = drm_rect_height(&plane_state->dst);
- scaler_id = &plane_state->scaler_id;
- rotation = plane_state->base.rotation;
- } else {
- struct drm_display_mode *adjusted_mode =
- &crtc_state->base.adjusted_mode;
- src_w = crtc_state->pipe_src_w;
- src_h = crtc_state->pipe_src_h;
- dst_w = adjusted_mode->hdisplay;
- dst_h = adjusted_mode->vdisplay;
- scaler_id = &scaler_state->scaler_id;
- rotation = DRM_ROTATE_0;
- }
need_scaling = intel_rotation_90_or_270(rotation) ?
(src_h != dst_w || src_w != dst_h):
@@ -4457,17 +4341,14 @@ skl_update_scaler_users(
* update to free the scaler is done in plane/panel-fit programming.
* For this purpose crtc/plane_state->scaler_id isn't reset here.
*/
- if (force_detach || !need_scaling || (intel_plane &&
- (!fb || !plane_state->visible))) {
+ if (force_detach || !need_scaling) {
if (*scaler_id >= 0) {
- scaler_state->scaler_users &= ~(1 << idx);
+ scaler_state->scaler_users &= ~(1 << scaler_user);
scaler_state->scalers[*scaler_id].in_use = 0;
- DRM_DEBUG_KMS("Staged freeing scaler id %d.%d from %s:%d "
- "crtc_state = %p scaler_users = 0x%x\n",
- intel_crtc->pipe, *scaler_id, intel_plane ? "PLANE" : "CRTC",
- intel_plane ? intel_plane->base.base.id :
- intel_crtc->base.base.id, crtc_state,
+ DRM_DEBUG_KMS("scaler_user index %u.%u: "
+ "Staged freeing scaler id %d scaler_users = 0x%x\n",
+ intel_crtc->pipe, scaler_user, *scaler_id,
scaler_state->scaler_users);
*scaler_id = -1;
}
@@ -4480,55 +4361,123 @@ skl_update_scaler_users(
src_w > SKL_MAX_SRC_W || src_h > SKL_MAX_SRC_H ||
dst_w > SKL_MAX_DST_W || dst_h > SKL_MAX_DST_H) {
- DRM_DEBUG_KMS("%s:%d scaler_user index %u.%u: src %ux%u dst %ux%u "
+ DRM_DEBUG_KMS("scaler_user index %u.%u: src %ux%u dst %ux%u "
"size is out of scaler range\n",
- intel_plane ? "PLANE" : "CRTC",
- intel_plane ? intel_plane->base.base.id : intel_crtc->base.base.id,
- intel_crtc->pipe, idx, src_w, src_h, dst_w, dst_h);
+ intel_crtc->pipe, scaler_user, src_w, src_h, dst_w, dst_h);
return -EINVAL;
}
+ /* mark this plane as a scaler user in crtc_state */
+ scaler_state->scaler_users |= (1 << scaler_user);
+ DRM_DEBUG_KMS("scaler_user index %u.%u: "
+ "staged scaling request for %ux%u->%ux%u scaler_users = 0x%x\n",
+ intel_crtc->pipe, scaler_user, src_w, src_h, dst_w, dst_h,
+ scaler_state->scaler_users);
+
+ return 0;
+}
+
+/**
+ * skl_update_scaler_crtc - Stages update to scaler state for a given crtc.
+ *
+ * @state: crtc's scaler state
+ *
+ * Return
+ * 0 - scaler_usage updated successfully
+ * error - requested scaling cannot be supported or other error condition
+ */
+int skl_update_scaler_crtc(struct intel_crtc_state *state)
+{
+ struct intel_crtc *intel_crtc = to_intel_crtc(state->base.crtc);
+ struct drm_display_mode *adjusted_mode =
+ &state->base.adjusted_mode;
+
+ DRM_DEBUG_KMS("Updating scaler for [CRTC:%i] scaler_user index %u.%u\n",
+ intel_crtc->base.base.id, intel_crtc->pipe, SKL_CRTC_INDEX);
+
+ return skl_update_scaler(state, !state->base.active, SKL_CRTC_INDEX,
+ &state->scaler_state.scaler_id, DRM_ROTATE_0,
+ state->pipe_src_w, state->pipe_src_h,
+ adjusted_mode->hdisplay, adjusted_mode->vdisplay);
+}
+
+/**
+ * skl_update_scaler_plane - Stages update to scaler state for a given plane.
+ *
+ * @state: crtc's scaler state
+ * @plane_state: atomic plane state to update
+ *
+ * Return
+ * 0 - scaler_usage updated successfully
+ * error - requested scaling cannot be supported or other error condition
+ */
+static int skl_update_scaler_plane(struct intel_crtc_state *crtc_state,
+ struct intel_plane_state *plane_state)
+{
+
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct intel_plane *intel_plane =
+ to_intel_plane(plane_state->base.plane);
+ struct drm_framebuffer *fb = plane_state->base.fb;
+ int ret;
+
+ bool force_detach = !fb || !plane_state->visible;
+
+ DRM_DEBUG_KMS("Updating scaler for [PLANE:%d] scaler_user index %u.%u\n",
+ intel_plane->base.base.id, intel_crtc->pipe,
+ drm_plane_index(&intel_plane->base));
+
+ ret = skl_update_scaler(crtc_state, force_detach,
+ drm_plane_index(&intel_plane->base),
+ &plane_state->scaler_id,
+ plane_state->base.rotation,
+ drm_rect_width(&plane_state->src) >> 16,
+ drm_rect_height(&plane_state->src) >> 16,
+ drm_rect_width(&plane_state->dst),
+ drm_rect_height(&plane_state->dst));
+
+ if (ret || plane_state->scaler_id < 0)
+ return ret;
+
/* check colorkey */
- if (WARN_ON(intel_plane &&
- intel_plane->ckey.flags != I915_SET_COLORKEY_NONE)) {
- DRM_DEBUG_KMS("PLANE:%d scaling %ux%u->%ux%u not allowed with colorkey",
- intel_plane->base.base.id, src_w, src_h, dst_w, dst_h);
+ if (plane_state->ckey.flags != I915_SET_COLORKEY_NONE) {
+ DRM_DEBUG_KMS("[PLANE:%d] scaling with color key not allowed",
+ intel_plane->base.base.id);
return -EINVAL;
}
/* Check src format */
- if (intel_plane) {
- switch (fb->pixel_format) {
- case DRM_FORMAT_RGB565:
- case DRM_FORMAT_XBGR8888:
- case DRM_FORMAT_XRGB8888:
- case DRM_FORMAT_ABGR8888:
- case DRM_FORMAT_ARGB8888:
- case DRM_FORMAT_XRGB2101010:
- case DRM_FORMAT_XBGR2101010:
- case DRM_FORMAT_YUYV:
- case DRM_FORMAT_YVYU:
- case DRM_FORMAT_UYVY:
- case DRM_FORMAT_VYUY:
- break;
- default:
- DRM_DEBUG_KMS("PLANE:%d FB:%d unsupported scaling format 0x%x\n",
- intel_plane->base.base.id, fb->base.id, fb->pixel_format);
- return -EINVAL;
- }
+ switch (fb->pixel_format) {
+ case DRM_FORMAT_RGB565:
+ case DRM_FORMAT_XBGR8888:
+ case DRM_FORMAT_XRGB8888:
+ case DRM_FORMAT_ABGR8888:
+ case DRM_FORMAT_ARGB8888:
+ case DRM_FORMAT_XRGB2101010:
+ case DRM_FORMAT_XBGR2101010:
+ case DRM_FORMAT_YUYV:
+ case DRM_FORMAT_YVYU:
+ case DRM_FORMAT_UYVY:
+ case DRM_FORMAT_VYUY:
+ break;
+ default:
+ DRM_DEBUG_KMS("[PLANE:%d] FB:%d unsupported scaling format 0x%x\n",
+ intel_plane->base.base.id, fb->base.id, fb->pixel_format);
+ return -EINVAL;
}
- /* mark this plane as a scaler user in crtc_state */
- scaler_state->scaler_users |= (1 << idx);
- DRM_DEBUG_KMS("%s:%d staged scaling request for %ux%u->%ux%u "
- "crtc_state = %p scaler_users = 0x%x\n",
- intel_plane ? "PLANE" : "CRTC",
- intel_plane ? intel_plane->base.base.id : intel_crtc->base.base.id,
- src_w, src_h, dst_w, dst_h, crtc_state, scaler_state->scaler_users);
return 0;
}
-static void skylake_pfit_update(struct intel_crtc *crtc, int enable)
+static void skylake_scaler_disable(struct intel_crtc *crtc)
+{
+ int i;
+
+ for (i = 0; i < crtc->num_scalers; i++)
+ skl_detach_scaler(crtc, i);
+}
+
+static void skylake_pfit_enable(struct intel_crtc *crtc)
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -4538,13 +4487,6 @@ static void skylake_pfit_update(struct intel_crtc *crtc, int enable)
DRM_DEBUG_KMS("for crtc_state = %p\n", crtc->config);
- /* To update pfit, first update scaler state */
- skl_update_scaler_users(crtc, crtc->config, NULL, NULL, !enable);
- intel_atomic_setup_scalers(crtc->base.dev, crtc, crtc->config);
- skl_detach_scalers(crtc);
- if (!enable)
- return;
-
if (crtc->config->pch_pfit.enabled) {
int id;
@@ -4584,20 +4526,6 @@ static void ironlake_pfit_enable(struct intel_crtc *crtc)
}
}
-static void intel_enable_sprite_planes(struct drm_crtc *crtc)
-{
- struct drm_device *dev = crtc->dev;
- enum pipe pipe = to_intel_crtc(crtc)->pipe;
- struct drm_plane *plane;
- struct intel_plane *intel_plane;
-
- drm_for_each_legacy_plane(plane, &dev->mode_config.plane_list) {
- intel_plane = to_intel_plane(plane);
- if (intel_plane->pipe == pipe)
- intel_plane_restore(&intel_plane->base);
- }
-}
-
void hsw_enable_ips(struct intel_crtc *crtc)
{
struct drm_device *dev = crtc->base.dev;
@@ -4668,7 +4596,7 @@ static void intel_crtc_load_lut(struct drm_crtc *crtc)
bool reenable_ips = false;
/* The clocks have to be on to load the palette. */
- if (!crtc->state->enable || !intel_crtc->active)
+ if (!crtc->state->active)
return;
if (HAS_GMCH_DISPLAY(dev_priv->dev)) {
@@ -4755,10 +4683,6 @@ intel_post_enable_primary(struct drm_crtc *crtc)
*/
hsw_enable_ips(intel_crtc);
- mutex_lock(&dev->struct_mutex);
- intel_fbc_update(dev);
- mutex_unlock(&dev->struct_mutex);
-
/*
* Gen2 reports pipe underruns whenever all planes are disabled.
* So don't enable underrun reporting before at least some planes
@@ -4810,13 +4734,11 @@ intel_pre_disable_primary(struct drm_crtc *crtc)
* event which is after the vblank start event, so we need to have a
* wait-for-vblank between disabling the plane and the pipe.
*/
- if (HAS_GMCH_DISPLAY(dev))
+ if (HAS_GMCH_DISPLAY(dev)) {
intel_set_memory_cxsr(dev_priv, false);
-
- mutex_lock(&dev->struct_mutex);
- if (dev_priv->fbc.crtc == intel_crtc)
- intel_fbc_disable(dev);
- mutex_unlock(&dev->struct_mutex);
+ dev_priv->wm.vlv.cxsr = false;
+ intel_wait_for_vblank(dev, pipe);
+ }
/*
* FIXME IPS should be fine as long as one plane is
@@ -4827,46 +4749,83 @@ intel_pre_disable_primary(struct drm_crtc *crtc)
hsw_disable_ips(intel_crtc);
}
-static void intel_crtc_enable_planes(struct drm_crtc *crtc)
+static void intel_post_plane_update(struct intel_crtc *crtc)
{
- struct drm_device *dev = crtc->dev;
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- int pipe = intel_crtc->pipe;
+ struct intel_crtc_atomic_commit *atomic = &crtc->atomic;
+ struct drm_device *dev = crtc->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_plane *plane;
- intel_enable_primary_hw_plane(crtc->primary, crtc);
- intel_enable_sprite_planes(crtc);
- intel_crtc_update_cursor(crtc, true);
+ if (atomic->wait_vblank)
+ intel_wait_for_vblank(dev, crtc->pipe);
- intel_post_enable_primary(crtc);
+ intel_frontbuffer_flip(dev, atomic->fb_bits);
- /*
- * FIXME: Once we grow proper nuclear flip support out of this we need
- * to compute the mask of flip planes precisely. For the time being
- * consider this a flip to a NULL plane.
- */
- intel_frontbuffer_flip(dev, INTEL_FRONTBUFFER_ALL_MASK(pipe));
+ if (atomic->disable_cxsr)
+ crtc->wm.cxsr_allowed = true;
+
+ if (crtc->atomic.update_wm_post)
+ intel_update_watermarks(&crtc->base);
+
+ if (atomic->update_fbc)
+ intel_fbc_update(dev_priv);
+
+ if (atomic->post_enable_primary)
+ intel_post_enable_primary(&crtc->base);
+
+ drm_for_each_plane_mask(plane, dev, atomic->update_sprite_watermarks)
+ intel_update_sprite_watermarks(plane, &crtc->base,
+ 0, 0, 0, false, false);
+
+ memset(atomic, 0, sizeof(*atomic));
+}
+
+static void intel_pre_plane_update(struct intel_crtc *crtc)
+{
+ struct drm_device *dev = crtc->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc_atomic_commit *atomic = &crtc->atomic;
+ struct drm_plane *p;
+
+ /* Track fb's for any planes being disabled */
+ drm_for_each_plane_mask(p, dev, atomic->disabled_planes) {
+ struct intel_plane *plane = to_intel_plane(p);
+
+ mutex_lock(&dev->struct_mutex);
+ i915_gem_track_fb(intel_fb_obj(plane->base.fb), NULL,
+ plane->frontbuffer_bit);
+ mutex_unlock(&dev->struct_mutex);
+ }
+
+ if (atomic->wait_for_flips)
+ intel_crtc_wait_for_pending_flips(&crtc->base);
+
+ if (atomic->disable_fbc)
+ intel_fbc_disable_crtc(crtc);
+
+ if (crtc->atomic.disable_ips)
+ hsw_disable_ips(crtc);
+
+ if (atomic->pre_disable_primary)
+ intel_pre_disable_primary(&crtc->base);
+
+ if (atomic->disable_cxsr) {
+ crtc->wm.cxsr_allowed = false;
+ intel_set_memory_cxsr(dev_priv, false);
+ }
}
-static void intel_crtc_disable_planes(struct drm_crtc *crtc)
+static void intel_crtc_disable_planes(struct drm_crtc *crtc, unsigned plane_mask)
{
struct drm_device *dev = crtc->dev;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- struct intel_plane *intel_plane;
+ struct drm_plane *p;
int pipe = intel_crtc->pipe;
- intel_crtc_wait_for_pending_flips(crtc);
-
- intel_pre_disable_primary(crtc);
-
intel_crtc_dpms_overlay_disable(intel_crtc);
- for_each_intel_plane(dev, intel_plane) {
- if (intel_plane->pipe == pipe) {
- struct drm_crtc *from = intel_plane->base.crtc;
- intel_plane->disable_plane(&intel_plane->base,
- from ?: crtc, true);
- }
- }
+ drm_for_each_plane_mask(p, dev, plane_mask)
+ to_intel_plane(p)->disable_plane(p, crtc);
/*
* FIXME: Once we grow proper nuclear flip support out of this we need
@@ -4884,9 +4843,7 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc)
struct intel_encoder *encoder;
int pipe = intel_crtc->pipe;
- WARN_ON(!crtc->state->enable);
-
- if (intel_crtc->active)
+ if (WARN_ON(intel_crtc->active))
return;
if (intel_crtc->config->has_pch_encoder)
@@ -4953,46 +4910,17 @@ static bool hsw_crtc_supports_ips(struct intel_crtc *crtc)
return HAS_IPS(crtc->base.dev) && crtc->pipe == PIPE_A;
}
-/*
- * This implements the workaround described in the "notes" section of the mode
- * set sequence documentation. When going from no pipes or single pipe to
- * multiple pipes, and planes are enabled after the pipe, we need to wait at
- * least 2 vblanks on the first pipe before enabling planes on the second pipe.
- */
-static void haswell_mode_set_planes_workaround(struct intel_crtc *crtc)
-{
- struct drm_device *dev = crtc->base.dev;
- struct intel_crtc *crtc_it, *other_active_crtc = NULL;
-
- /* We want to get the other_active_crtc only if there's only 1 other
- * active crtc. */
- for_each_intel_crtc(dev, crtc_it) {
- if (!crtc_it->active || crtc_it == crtc)
- continue;
-
- if (other_active_crtc)
- return;
-
- other_active_crtc = crtc_it;
- }
- if (!other_active_crtc)
- return;
-
- intel_wait_for_vblank(dev, other_active_crtc->pipe);
- intel_wait_for_vblank(dev, other_active_crtc->pipe);
-}
-
static void haswell_crtc_enable(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct intel_encoder *encoder;
- int pipe = intel_crtc->pipe;
+ int pipe = intel_crtc->pipe, hsw_workaround_pipe;
+ struct intel_crtc_state *pipe_config =
+ to_intel_crtc_state(crtc->state);
- WARN_ON(!crtc->state->enable);
-
- if (intel_crtc->active)
+ if (WARN_ON(intel_crtc->active))
return;
if (intel_crtc_to_shared_dpll(intel_crtc))
@@ -5033,7 +4961,7 @@ static void haswell_crtc_enable(struct drm_crtc *crtc)
intel_ddi_enable_pipe_clock(intel_crtc);
if (INTEL_INFO(dev)->gen == 9)
- skylake_pfit_update(intel_crtc, 1);
+ skylake_pfit_enable(intel_crtc);
else if (INTEL_INFO(dev)->gen < 9)
ironlake_pfit_enable(intel_crtc);
else
@@ -5067,7 +4995,11 @@ static void haswell_crtc_enable(struct drm_crtc *crtc)
/* If we change the relative order between pipe/planes enabling, we need
* to change the workaround. */
- haswell_mode_set_planes_workaround(intel_crtc);
+ hsw_workaround_pipe = pipe_config->hsw_workaround_pipe;
+ if (IS_HASWELL(dev) && hsw_workaround_pipe != INVALID_PIPE) {
+ intel_wait_for_vblank(dev, hsw_workaround_pipe);
+ intel_wait_for_vblank(dev, hsw_workaround_pipe);
+ }
}
static void ironlake_pfit_disable(struct intel_crtc *crtc)
@@ -5094,9 +5026,6 @@ static void ironlake_crtc_disable(struct drm_crtc *crtc)
int pipe = intel_crtc->pipe;
u32 reg, temp;
- if (!intel_crtc->active)
- return;
-
for_each_encoder_on_crtc(dev, crtc, encoder)
encoder->disable(encoder);
@@ -5135,18 +5064,11 @@ static void ironlake_crtc_disable(struct drm_crtc *crtc)
I915_WRITE(PCH_DPLL_SEL, temp);
}
- /* disable PCH DPLL */
- intel_disable_shared_dpll(intel_crtc);
-
ironlake_fdi_pll_disable(intel_crtc);
}
intel_crtc->active = false;
intel_update_watermarks(crtc);
-
- mutex_lock(&dev->struct_mutex);
- intel_fbc_update(dev);
- mutex_unlock(&dev->struct_mutex);
}
static void haswell_crtc_disable(struct drm_crtc *crtc)
@@ -5157,9 +5079,6 @@ static void haswell_crtc_disable(struct drm_crtc *crtc)
struct intel_encoder *encoder;
enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
- if (!intel_crtc->active)
- return;
-
for_each_encoder_on_crtc(dev, crtc, encoder) {
intel_opregion_notify_encoder(encoder, false);
encoder->disable(encoder);
@@ -5179,7 +5098,7 @@ static void haswell_crtc_disable(struct drm_crtc *crtc)
intel_ddi_disable_transcoder_func(dev_priv, cpu_transcoder);
if (INTEL_INFO(dev)->gen == 9)
- skylake_pfit_update(intel_crtc, 0);
+ skylake_scaler_disable(intel_crtc);
else if (INTEL_INFO(dev)->gen < 9)
ironlake_pfit_disable(intel_crtc);
else
@@ -5198,22 +5117,8 @@ static void haswell_crtc_disable(struct drm_crtc *crtc)
intel_crtc->active = false;
intel_update_watermarks(crtc);
-
- mutex_lock(&dev->struct_mutex);
- intel_fbc_update(dev);
- mutex_unlock(&dev->struct_mutex);
-
- if (intel_crtc_to_shared_dpll(intel_crtc))
- intel_disable_shared_dpll(intel_crtc);
}
-static void ironlake_crtc_off(struct drm_crtc *crtc)
-{
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- intel_put_shared_dpll(intel_crtc);
-}
-
-
static void i9xx_pfit_enable(struct intel_crtc *crtc)
{
struct drm_device *dev = crtc->base.dev;
@@ -5242,6 +5147,7 @@ static enum intel_display_power_domain port_to_power_domain(enum port port)
{
switch (port) {
case PORT_A:
+ case PORT_E:
return POWER_DOMAIN_PORT_DDI_A_4_LANES;
case PORT_B:
return POWER_DOMAIN_PORT_DDI_B_4_LANES;
@@ -5295,6 +5201,9 @@ static unsigned long get_crtc_power_domains(struct drm_crtc *crtc)
unsigned long mask;
enum transcoder transcoder;
+ if (!crtc->state->active)
+ return 0;
+
transcoder = intel_pipe_to_cpu_transcoder(dev->dev_private, pipe);
mask = BIT(POWER_DOMAIN_PIPE(pipe));
@@ -5309,45 +5218,131 @@ static unsigned long get_crtc_power_domains(struct drm_crtc *crtc)
return mask;
}
+static unsigned long modeset_get_crtc_power_domains(struct drm_crtc *crtc)
+{
+ struct drm_i915_private *dev_priv = crtc->dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ enum intel_display_power_domain domain;
+ unsigned long domains, new_domains, old_domains;
+
+ old_domains = intel_crtc->enabled_power_domains;
+ intel_crtc->enabled_power_domains = new_domains = get_crtc_power_domains(crtc);
+
+ domains = new_domains & ~old_domains;
+
+ for_each_power_domain(domain, domains)
+ intel_display_power_get(dev_priv, domain);
+
+ return old_domains & ~new_domains;
+}
+
+static void modeset_put_power_domains(struct drm_i915_private *dev_priv,
+ unsigned long domains)
+{
+ enum intel_display_power_domain domain;
+
+ for_each_power_domain(domain, domains)
+ intel_display_power_put(dev_priv, domain);
+}
+
static void modeset_update_crtc_power_domains(struct drm_atomic_state *state)
{
struct drm_device *dev = state->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- unsigned long pipe_domains[I915_MAX_PIPES] = { 0, };
- struct intel_crtc *crtc;
+ unsigned long put_domains[I915_MAX_PIPES] = {};
+ struct drm_crtc_state *crtc_state;
+ struct drm_crtc *crtc;
+ int i;
- /*
- * First get all needed power domains, then put all unneeded, to avoid
- * any unnecessary toggling of the power wells.
- */
- for_each_intel_crtc(dev, crtc) {
- enum intel_display_power_domain domain;
+ for_each_crtc_in_state(state, crtc, crtc_state, i) {
+ if (needs_modeset(crtc->state))
+ put_domains[to_intel_crtc(crtc)->pipe] =
+ modeset_get_crtc_power_domains(crtc);
+ }
- if (!crtc->base.state->enable)
- continue;
+ if (dev_priv->display.modeset_commit_cdclk) {
+ unsigned int cdclk = to_intel_atomic_state(state)->cdclk;
+
+ if (cdclk != dev_priv->cdclk_freq &&
+ !WARN_ON(!state->allow_modeset))
+ dev_priv->display.modeset_commit_cdclk(state);
+ }
+
+ for (i = 0; i < I915_MAX_PIPES; i++)
+ if (put_domains[i])
+ modeset_put_power_domains(dev_priv, put_domains[i]);
+}
+
+static void intel_update_max_cdclk(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
- pipe_domains[crtc->pipe] = get_crtc_power_domains(&crtc->base);
+ if (IS_SKYLAKE(dev)) {
+ u32 limit = I915_READ(SKL_DFSM) & SKL_DFSM_CDCLK_LIMIT_MASK;
- for_each_power_domain(domain, pipe_domains[crtc->pipe])
- intel_display_power_get(dev_priv, domain);
+ if (limit == SKL_DFSM_CDCLK_LIMIT_675)
+ dev_priv->max_cdclk_freq = 675000;
+ else if (limit == SKL_DFSM_CDCLK_LIMIT_540)
+ dev_priv->max_cdclk_freq = 540000;
+ else if (limit == SKL_DFSM_CDCLK_LIMIT_450)
+ dev_priv->max_cdclk_freq = 450000;
+ else
+ dev_priv->max_cdclk_freq = 337500;
+ } else if (IS_BROADWELL(dev)) {
+ /*
+ * FIXME with extra cooling we can allow
+ * 540 MHz for ULX and 675 Mhz for ULT.
+ * How can we know if extra cooling is
+ * available? PCI ID, VTB, something else?
+ */
+ if (I915_READ(FUSE_STRAP) & HSW_CDCLK_LIMIT)
+ dev_priv->max_cdclk_freq = 450000;
+ else if (IS_BDW_ULX(dev))
+ dev_priv->max_cdclk_freq = 450000;
+ else if (IS_BDW_ULT(dev))
+ dev_priv->max_cdclk_freq = 540000;
+ else
+ dev_priv->max_cdclk_freq = 675000;
+ } else if (IS_CHERRYVIEW(dev)) {
+ dev_priv->max_cdclk_freq = 320000;
+ } else if (IS_VALLEYVIEW(dev)) {
+ dev_priv->max_cdclk_freq = 400000;
+ } else {
+ /* otherwise assume cdclk is fixed */
+ dev_priv->max_cdclk_freq = dev_priv->cdclk_freq;
}
- if (dev_priv->display.modeset_global_resources)
- dev_priv->display.modeset_global_resources(state);
+ DRM_DEBUG_DRIVER("Max CD clock rate: %d kHz\n",
+ dev_priv->max_cdclk_freq);
+}
- for_each_intel_crtc(dev, crtc) {
- enum intel_display_power_domain domain;
+static void intel_update_cdclk(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
- for_each_power_domain(domain, crtc->enabled_power_domains)
- intel_display_power_put(dev_priv, domain);
+ dev_priv->cdclk_freq = dev_priv->display.get_display_clock_speed(dev);
+ DRM_DEBUG_DRIVER("Current CD clock rate: %d kHz\n",
+ dev_priv->cdclk_freq);
- crtc->enabled_power_domains = pipe_domains[crtc->pipe];
+ /*
+ * Program the gmbus_freq based on the cdclk frequency.
+ * BSpec erroneously claims we should aim for 4MHz, but
+ * in fact 1MHz is the correct frequency.
+ */
+ if (IS_VALLEYVIEW(dev)) {
+ /*
+ * Program the gmbus_freq based on the cdclk frequency.
+ * BSpec erroneously claims we should aim for 4MHz, but
+ * in fact 1MHz is the correct frequency.
+ */
+ I915_WRITE(GMBUSFREQ_VLV, DIV_ROUND_UP(dev_priv->cdclk_freq, 1000));
}
- intel_display_set_init_power(dev_priv, false);
+ if (dev_priv->max_cdclk_freq == 0)
+ intel_update_max_cdclk(dev);
}
-void broxton_set_cdclk(struct drm_device *dev, int frequency)
+static void broxton_set_cdclk(struct drm_device *dev, int frequency)
{
struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t divider;
@@ -5463,7 +5458,7 @@ void broxton_set_cdclk(struct drm_device *dev, int frequency)
return;
}
- dev_priv->cdclk_freq = frequency;
+ intel_update_cdclk(dev);
}
void broxton_init_cdclk(struct drm_device *dev)
@@ -5638,6 +5633,7 @@ static bool skl_cdclk_wait_for_pcu_ready(struct drm_i915_private *dev_priv)
static void skl_set_cdclk(struct drm_i915_private *dev_priv, unsigned int freq)
{
+ struct drm_device *dev = dev_priv->dev;
u32 freq_select, pcu_ack;
DRM_DEBUG_DRIVER("Changing CDCLK to %dKHz\n", freq);
@@ -5678,6 +5674,8 @@ static void skl_set_cdclk(struct drm_i915_private *dev_priv, unsigned int freq)
mutex_lock(&dev_priv->rps.hw_lock);
sandybridge_pcode_write(dev_priv, SKL_PCODE_CDCLK_CONTROL, pcu_ack);
mutex_unlock(&dev_priv->rps.hw_lock);
+
+ intel_update_cdclk(dev);
}
void skl_uninit_cdclk(struct drm_i915_private *dev_priv)
@@ -5748,22 +5746,6 @@ static int valleyview_get_vco(struct drm_i915_private *dev_priv)
return vco_freq[hpll_freq] * 1000;
}
-static void vlv_update_cdclk(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
-
- dev_priv->cdclk_freq = dev_priv->display.get_display_clock_speed(dev);
- DRM_DEBUG_DRIVER("Current CD clock rate: %d kHz\n",
- dev_priv->cdclk_freq);
-
- /*
- * Program the gmbus_freq based on the cdclk frequency.
- * BSpec erroneously claims we should aim for 4MHz, but
- * in fact 1MHz is the correct frequency.
- */
- I915_WRITE(GMBUSFREQ_VLV, DIV_ROUND_UP(dev_priv->cdclk_freq, 1000));
-}
-
/* Adjust CDclk dividers to allow high res or save power if possible */
static void valleyview_set_cdclk(struct drm_device *dev, int cdclk)
{
@@ -5827,7 +5809,7 @@ static void valleyview_set_cdclk(struct drm_device *dev, int cdclk)
mutex_unlock(&dev_priv->sb_lock);
- vlv_update_cdclk(dev);
+ intel_update_cdclk(dev);
}
static void cherryview_set_cdclk(struct drm_device *dev, int cdclk)
@@ -5868,7 +5850,7 @@ static void cherryview_set_cdclk(struct drm_device *dev, int cdclk)
}
mutex_unlock(&dev_priv->rps.hw_lock);
- vlv_update_cdclk(dev);
+ intel_update_cdclk(dev);
}
static int valleyview_calc_cdclk(struct drm_i915_private *dev_priv,
@@ -5931,11 +5913,7 @@ static int intel_mode_max_pixclk(struct drm_device *dev,
int max_pixclk = 0;
for_each_intel_crtc(dev, intel_crtc) {
- if (state)
- crtc_state =
- intel_atomic_get_crtc_state(state, intel_crtc);
- else
- crtc_state = intel_crtc->config;
+ crtc_state = intel_atomic_get_crtc_state(state, intel_crtc);
if (IS_ERR(crtc_state))
return PTR_ERR(crtc_state);
@@ -5949,39 +5927,32 @@ static int intel_mode_max_pixclk(struct drm_device *dev,
return max_pixclk;
}
-static int valleyview_modeset_global_pipes(struct drm_atomic_state *state)
+static int valleyview_modeset_calc_cdclk(struct drm_atomic_state *state)
{
- struct drm_i915_private *dev_priv = to_i915(state->dev);
- struct drm_crtc *crtc;
- struct drm_crtc_state *crtc_state;
- int max_pixclk = intel_mode_max_pixclk(state->dev, state);
- int cdclk, i;
+ struct drm_device *dev = state->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int max_pixclk = intel_mode_max_pixclk(dev, state);
if (max_pixclk < 0)
return max_pixclk;
- if (IS_VALLEYVIEW(dev_priv))
- cdclk = valleyview_calc_cdclk(dev_priv, max_pixclk);
- else
- cdclk = broxton_calc_cdclk(dev_priv, max_pixclk);
+ to_intel_atomic_state(state)->cdclk =
+ valleyview_calc_cdclk(dev_priv, max_pixclk);
- if (cdclk == dev_priv->cdclk_freq)
- return 0;
+ return 0;
+}
- /* add all active pipes to the state */
- for_each_crtc(state->dev, crtc) {
- if (!crtc->state->enable)
- continue;
+static int broxton_modeset_calc_cdclk(struct drm_atomic_state *state)
+{
+ struct drm_device *dev = state->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int max_pixclk = intel_mode_max_pixclk(dev, state);
- crtc_state = drm_atomic_get_crtc_state(state, crtc);
- if (IS_ERR(crtc_state))
- return PTR_ERR(crtc_state);
- }
+ if (max_pixclk < 0)
+ return max_pixclk;
- /* disable/enable all currently active pipes while we change cdclk */
- for_each_crtc_in_state(state, crtc, crtc_state, i)
- if (crtc_state->enable)
- crtc_state->mode_changed = true;
+ to_intel_atomic_state(state)->cdclk =
+ broxton_calc_cdclk(dev_priv, max_pixclk);
return 0;
}
@@ -5998,7 +5969,7 @@ static void vlv_program_pfi_credits(struct drm_i915_private *dev_priv)
if (DIV_ROUND_CLOSEST(dev_priv->cdclk_freq, 1000) >= dev_priv->rps.cz_freq) {
/* CHV suggested value is 31 or 63 */
if (IS_CHERRYVIEW(dev_priv))
- credits = PFI_CREDIT_31;
+ credits = PFI_CREDIT_63;
else
credits = PFI_CREDIT(15);
} else {
@@ -6022,41 +5993,31 @@ static void vlv_program_pfi_credits(struct drm_i915_private *dev_priv)
WARN_ON(I915_READ(GCI_CONTROL) & PFI_CREDIT_RESEND);
}
-static void valleyview_modeset_global_resources(struct drm_atomic_state *old_state)
+static void valleyview_modeset_commit_cdclk(struct drm_atomic_state *old_state)
{
struct drm_device *dev = old_state->dev;
+ unsigned int req_cdclk = to_intel_atomic_state(old_state)->cdclk;
struct drm_i915_private *dev_priv = dev->dev_private;
- int max_pixclk = intel_mode_max_pixclk(dev, NULL);
- int req_cdclk;
-
- /* The path in intel_mode_max_pixclk() with a NULL atomic state should
- * never fail. */
- if (WARN_ON(max_pixclk < 0))
- return;
- req_cdclk = valleyview_calc_cdclk(dev_priv, max_pixclk);
-
- if (req_cdclk != dev_priv->cdclk_freq) {
- /*
- * FIXME: We can end up here with all power domains off, yet
- * with a CDCLK frequency other than the minimum. To account
- * for this take the PIPE-A power domain, which covers the HW
- * blocks needed for the following programming. This can be
- * removed once it's guaranteed that we get here either with
- * the minimum CDCLK set, or the required power domains
- * enabled.
- */
- intel_display_power_get(dev_priv, POWER_DOMAIN_PIPE_A);
+ /*
+ * FIXME: We can end up here with all power domains off, yet
+ * with a CDCLK frequency other than the minimum. To account
+ * for this take the PIPE-A power domain, which covers the HW
+ * blocks needed for the following programming. This can be
+ * removed once it's guaranteed that we get here either with
+ * the minimum CDCLK set, or the required power domains
+ * enabled.
+ */
+ intel_display_power_get(dev_priv, POWER_DOMAIN_PIPE_A);
- if (IS_CHERRYVIEW(dev))
- cherryview_set_cdclk(dev, req_cdclk);
- else
- valleyview_set_cdclk(dev, req_cdclk);
+ if (IS_CHERRYVIEW(dev))
+ cherryview_set_cdclk(dev, req_cdclk);
+ else
+ valleyview_set_cdclk(dev, req_cdclk);
- vlv_program_pfi_credits(dev_priv);
+ vlv_program_pfi_credits(dev_priv);
- intel_display_power_put(dev_priv, POWER_DOMAIN_PIPE_A);
- }
+ intel_display_power_put(dev_priv, POWER_DOMAIN_PIPE_A);
}
static void valleyview_crtc_enable(struct drm_crtc *crtc)
@@ -6068,9 +6029,7 @@ static void valleyview_crtc_enable(struct drm_crtc *crtc)
int pipe = intel_crtc->pipe;
bool is_dsi;
- WARN_ON(!crtc->state->enable);
-
- if (intel_crtc->active)
+ if (WARN_ON(intel_crtc->active))
return;
is_dsi = intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_DSI);
@@ -6119,7 +6078,6 @@ static void valleyview_crtc_enable(struct drm_crtc *crtc)
intel_crtc_load_lut(crtc);
- intel_update_watermarks(crtc);
intel_enable_pipe(intel_crtc);
assert_vblank_disabled(crtc);
@@ -6146,9 +6104,7 @@ static void i9xx_crtc_enable(struct drm_crtc *crtc)
struct intel_encoder *encoder;
int pipe = intel_crtc->pipe;
- WARN_ON(!crtc->state->enable);
-
- if (intel_crtc->active)
+ if (WARN_ON(intel_crtc->active))
return;
i9xx_set_pll_dividers(intel_crtc);
@@ -6208,9 +6164,6 @@ static void i9xx_crtc_disable(struct drm_crtc *crtc)
struct intel_encoder *encoder;
int pipe = intel_crtc->pipe;
- if (!intel_crtc->active)
- return;
-
/*
* On gen2 planes are double buffered but the pipe isn't, so we must
* wait for planes to fully turn off before disabling the pipe.
@@ -6247,91 +6200,89 @@ static void i9xx_crtc_disable(struct drm_crtc *crtc)
intel_crtc->active = false;
intel_update_watermarks(crtc);
-
- mutex_lock(&dev->struct_mutex);
- intel_fbc_update(dev);
- mutex_unlock(&dev->struct_mutex);
-}
-
-static void i9xx_crtc_off(struct drm_crtc *crtc)
-{
}
-/* Master function to enable/disable CRTC and corresponding power wells */
-void intel_crtc_control(struct drm_crtc *crtc, bool enable)
+static void intel_crtc_disable_noatomic(struct drm_crtc *crtc)
{
- struct drm_device *dev = crtc->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->dev);
enum intel_display_power_domain domain;
unsigned long domains;
- if (enable) {
- if (!intel_crtc->active) {
- domains = get_crtc_power_domains(crtc);
- for_each_power_domain(domain, domains)
- intel_display_power_get(dev_priv, domain);
- intel_crtc->enabled_power_domains = domains;
-
- dev_priv->display.crtc_enable(crtc);
- intel_crtc_enable_planes(crtc);
- }
- } else {
- if (intel_crtc->active) {
- intel_crtc_disable_planes(crtc);
- dev_priv->display.crtc_disable(crtc);
+ if (!intel_crtc->active)
+ return;
- domains = intel_crtc->enabled_power_domains;
- for_each_power_domain(domain, domains)
- intel_display_power_put(dev_priv, domain);
- intel_crtc->enabled_power_domains = 0;
- }
+ if (to_intel_plane_state(crtc->primary->state)->visible) {
+ intel_crtc_wait_for_pending_flips(crtc);
+ intel_pre_disable_primary(crtc);
}
+
+ intel_crtc_disable_planes(crtc, crtc->state->plane_mask);
+ dev_priv->display.crtc_disable(crtc);
+ intel_disable_shared_dpll(intel_crtc);
+
+ domains = intel_crtc->enabled_power_domains;
+ for_each_power_domain(domain, domains)
+ intel_display_power_put(dev_priv, domain);
+ intel_crtc->enabled_power_domains = 0;
}
-/**
- * Sets the power management mode of the pipe and plane.
+/*
+ * turn all crtc's off, but do not adjust state
+ * This has to be paired with a call to intel_modeset_setup_hw_state.
*/
-void intel_crtc_update_dpms(struct drm_crtc *crtc)
+int intel_display_suspend(struct drm_device *dev)
{
- struct drm_device *dev = crtc->dev;
- struct intel_encoder *intel_encoder;
- bool enable = false;
+ struct drm_mode_config *config = &dev->mode_config;
+ struct drm_modeset_acquire_ctx *ctx = config->acquire_ctx;
+ struct drm_atomic_state *state;
+ struct drm_crtc *crtc;
+ unsigned crtc_mask = 0;
+ int ret = 0;
- for_each_encoder_on_crtc(dev, crtc, intel_encoder)
- enable |= intel_encoder->connectors_active;
+ if (WARN_ON(!ctx))
+ return 0;
- intel_crtc_control(crtc, enable);
+ lockdep_assert_held(&ctx->ww_ctx);
+ state = drm_atomic_state_alloc(dev);
+ if (WARN_ON(!state))
+ return -ENOMEM;
- crtc->state->active = enable;
-}
+ state->acquire_ctx = ctx;
+ state->allow_modeset = true;
-static void intel_crtc_disable(struct drm_crtc *crtc)
-{
- struct drm_device *dev = crtc->dev;
- struct drm_connector *connector;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ for_each_crtc(dev, crtc) {
+ struct drm_crtc_state *crtc_state =
+ drm_atomic_get_crtc_state(state, crtc);
- /* crtc should still be enabled when we disable it. */
- WARN_ON(!crtc->state->enable);
+ ret = PTR_ERR_OR_ZERO(crtc_state);
+ if (ret)
+ goto free;
- intel_crtc_disable_planes(crtc);
- dev_priv->display.crtc_disable(crtc);
- dev_priv->display.off(crtc);
+ if (!crtc_state->active)
+ continue;
- drm_plane_helper_disable(crtc->primary);
+ crtc_state->active = false;
+ crtc_mask |= 1 << drm_crtc_index(crtc);
+ }
- /* Update computed state. */
- list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
- if (!connector->encoder || !connector->encoder->crtc)
- continue;
+ if (crtc_mask) {
+ ret = drm_atomic_commit(state);
- if (connector->encoder->crtc != crtc)
- continue;
+ if (!ret) {
+ for_each_crtc(dev, crtc)
+ if (crtc_mask & (1 << drm_crtc_index(crtc)))
+ crtc->state->active = true;
- connector->dpms = DRM_MODE_DPMS_OFF;
- to_intel_encoder(connector->encoder)->connectors_active = false;
+ return ret;
+ }
}
+
+free:
+ if (ret)
+ DRM_ERROR("Suspending crtc's failed with %i\n", ret);
+ drm_atomic_state_free(state);
+ return ret;
}
void intel_encoder_destroy(struct drm_encoder *encoder)
@@ -6342,62 +6293,42 @@ void intel_encoder_destroy(struct drm_encoder *encoder)
kfree(intel_encoder);
}
-/* Simple dpms helper for encoders with just one connector, no cloning and only
- * one kind of off state. It clamps all !ON modes to fully OFF and changes the
- * state of the entire output pipe. */
-static void intel_encoder_dpms(struct intel_encoder *encoder, int mode)
-{
- if (mode == DRM_MODE_DPMS_ON) {
- encoder->connectors_active = true;
-
- intel_crtc_update_dpms(encoder->base.crtc);
- } else {
- encoder->connectors_active = false;
-
- intel_crtc_update_dpms(encoder->base.crtc);
- }
-}
-
/* Cross check the actual hw state with our own modeset state tracking (and it's
* internal consistency). */
static void intel_connector_check_state(struct intel_connector *connector)
{
+ struct drm_crtc *crtc = connector->base.state->crtc;
+
+ DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
+ connector->base.base.id,
+ connector->base.name);
+
if (connector->get_hw_state(connector)) {
- struct intel_encoder *encoder = connector->encoder;
- struct drm_crtc *crtc;
- bool encoder_enabled;
- enum pipe pipe;
+ struct drm_encoder *encoder = &connector->encoder->base;
+ struct drm_connector_state *conn_state = connector->base.state;
- DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
- connector->base.base.id,
- connector->base.name);
+ I915_STATE_WARN(!crtc,
+ "connector enabled without attached crtc\n");
- /* there is no real hw state for MST connectors */
- if (connector->mst_port)
+ if (!crtc)
return;
- I915_STATE_WARN(connector->base.dpms == DRM_MODE_DPMS_OFF,
- "wrong connector dpms state\n");
- I915_STATE_WARN(connector->base.encoder != &encoder->base,
- "active connector not linked to encoder\n");
-
- if (encoder) {
- I915_STATE_WARN(!encoder->connectors_active,
- "encoder->connectors_active not set\n");
+ I915_STATE_WARN(!crtc->state->active,
+ "connector is active, but attached crtc isn't\n");
- encoder_enabled = encoder->get_hw_state(encoder, &pipe);
- I915_STATE_WARN(!encoder_enabled, "encoder not enabled\n");
- if (I915_STATE_WARN_ON(!encoder->base.crtc))
- return;
+ if (!encoder)
+ return;
- crtc = encoder->base.crtc;
+ I915_STATE_WARN(conn_state->best_encoder != encoder,
+ "atomic encoder doesn't match attached encoder\n");
- I915_STATE_WARN(!crtc->state->enable,
- "crtc not enabled\n");
- I915_STATE_WARN(!to_intel_crtc(crtc)->active, "crtc not active\n");
- I915_STATE_WARN(pipe != to_intel_crtc(crtc)->pipe,
- "encoder active on the wrong pipe\n");
- }
+ I915_STATE_WARN(conn_state->crtc != encoder->crtc,
+ "attached encoder crtc differs from connector crtc\n");
+ } else {
+ I915_STATE_WARN(crtc && crtc->state->active,
+ "attached crtc is active, but connector isn't\n");
+ I915_STATE_WARN(!crtc && connector->base.state->best_encoder,
+ "best encoder set without crtc!\n");
}
}
@@ -6429,26 +6360,6 @@ struct intel_connector *intel_connector_alloc(void)
return connector;
}
-/* Even simpler default implementation, if there's really no special case to
- * consider. */
-void intel_connector_dpms(struct drm_connector *connector, int mode)
-{
- /* All the simple cases only support two dpms states. */
- if (mode != DRM_MODE_DPMS_ON)
- mode = DRM_MODE_DPMS_OFF;
-
- if (mode == connector->dpms)
- return;
-
- connector->dpms = mode;
-
- /* Only need to change hw state when actually enabled */
- if (connector->encoder)
- intel_encoder_dpms(to_intel_encoder(connector->encoder), mode);
-
- intel_modeset_check_state(connector->dev);
-}
-
/* Simple connector->get_hw_state implementation for encoders that support only
* one connector and no cloning and hence the encoder state determines the state
* of the connector. */
@@ -6586,12 +6497,36 @@ retry:
return ret;
}
+static bool pipe_config_supports_ips(struct drm_i915_private *dev_priv,
+ struct intel_crtc_state *pipe_config)
+{
+ if (pipe_config->pipe_bpp > 24)
+ return false;
+
+ /* HSW can handle pixel rate up to cdclk? */
+ if (IS_HASWELL(dev_priv->dev))
+ return true;
+
+ /*
+ * We compare against max which means we must take
+ * the increased cdclk requirement into account when
+ * calculating the new cdclk.
+ *
+ * Should measure whether using a lower cdclk w/o IPS
+ */
+ return ilk_pipe_pixel_rate(pipe_config) <=
+ dev_priv->max_cdclk_freq * 95 / 100;
+}
+
static void hsw_compute_ips_config(struct intel_crtc *crtc,
struct intel_crtc_state *pipe_config)
{
+ struct drm_device *dev = crtc->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
pipe_config->ips_enabled = i915.enable_ips &&
- hsw_crtc_supports_ips(crtc) &&
- pipe_config->pipe_bpp <= 24;
+ hsw_crtc_supports_ips(crtc) &&
+ pipe_config_supports_ips(dev_priv, pipe_config);
}
static int intel_crtc_compute_config(struct intel_crtc *crtc,
@@ -6600,12 +6535,10 @@ static int intel_crtc_compute_config(struct intel_crtc *crtc,
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
- int ret;
/* FIXME should check pixel clock limits on all platforms */
if (INTEL_INFO(dev)->gen < 4) {
- int clock_limit =
- dev_priv->display.get_display_clock_speed(dev);
+ int clock_limit = dev_priv->max_cdclk_freq;
/*
* Enable pixel doubling when the dot clock
@@ -6647,14 +6580,7 @@ static int intel_crtc_compute_config(struct intel_crtc *crtc,
if (pipe_config->has_pch_encoder)
return ironlake_fdi_compute_config(crtc, pipe_config);
- /* FIXME: remove below call once atomic mode set is place and all crtc
- * related checks called from atomic_crtc_check function */
- ret = 0;
- DRM_DEBUG_KMS("intel_crtc = %p drm_state (pipe_config->base.state) = %p\n",
- crtc, pipe_config->base.state);
- ret = intel_atomic_setup_scalers(dev, crtc, pipe_config);
-
- return ret;
+ return 0;
}
static int skylake_get_display_clock_speed(struct drm_device *dev)
@@ -6664,10 +6590,8 @@ static int skylake_get_display_clock_speed(struct drm_device *dev)
uint32_t cdctl = I915_READ(CDCLK_CTL);
uint32_t linkrate;
- if (!(lcpll1 & LCPLL_PLL_ENABLE)) {
- WARN(1, "LCPLL1 not enabled\n");
+ if (!(lcpll1 & LCPLL_PLL_ENABLE))
return 24000; /* 24MHz is the cd freq with NSSC ref */
- }
if ((cdctl & CDCLK_FREQ_SEL_MASK) == CDCLK_FREQ_540)
return 540000;
@@ -6706,6 +6630,34 @@ static int skylake_get_display_clock_speed(struct drm_device *dev)
return 24000;
}
+static int broxton_get_display_clock_speed(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ uint32_t cdctl = I915_READ(CDCLK_CTL);
+ uint32_t pll_ratio = I915_READ(BXT_DE_PLL_CTL) & BXT_DE_PLL_RATIO_MASK;
+ uint32_t pll_enab = I915_READ(BXT_DE_PLL_ENABLE);
+ int cdclk;
+
+ if (!(pll_enab & BXT_DE_PLL_PLL_ENABLE))
+ return 19200;
+
+ cdclk = 19200 * pll_ratio / 2;
+
+ switch (cdctl & BXT_CDCLK_CD2X_DIV_SEL_MASK) {
+ case BXT_CDCLK_CD2X_DIV_SEL_1:
+ return cdclk; /* 576MHz or 624MHz */
+ case BXT_CDCLK_CD2X_DIV_SEL_1_5:
+ return cdclk * 2 / 3; /* 384MHz */
+ case BXT_CDCLK_CD2X_DIV_SEL_2:
+ return cdclk / 2; /* 288MHz */
+ case BXT_CDCLK_CD2X_DIV_SEL_4:
+ return cdclk / 4; /* 144MHz */
+ }
+
+ /* error case, do as if DE PLL isn't enabled */
+ return 19200;
+}
+
static int broadwell_get_display_clock_speed(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -6834,20 +6786,37 @@ static int i865_get_display_clock_speed(struct drm_device *dev)
return 266667;
}
-static int i855_get_display_clock_speed(struct drm_device *dev)
+static int i85x_get_display_clock_speed(struct drm_device *dev)
{
u16 hpllcc = 0;
+
+ /*
+ * 852GM/852GMV only supports 133 MHz and the HPLLCC
+ * encoding is different :(
+ * FIXME is this the right way to detect 852GM/852GMV?
+ */
+ if (dev->pdev->revision == 0x1)
+ return 133333;
+
+ pci_bus_read_config_word(dev->pdev->bus,
+ PCI_DEVFN(0, 3), HPLLCC, &hpllcc);
+
/* Assume that the hardware is in the high speed state. This
* should be the default.
*/
switch (hpllcc & GC_CLOCK_CONTROL_MASK) {
case GC_CLOCK_133_200:
+ case GC_CLOCK_133_200_2:
case GC_CLOCK_100_200:
return 200000;
case GC_CLOCK_166_250:
return 250000;
case GC_CLOCK_100_133:
return 133333;
+ case GC_CLOCK_133_266:
+ case GC_CLOCK_133_266_2:
+ case GC_CLOCK_166_266:
+ return 266667;
}
/* Shouldn't happen */
@@ -6859,6 +6828,175 @@ static int i830_get_display_clock_speed(struct drm_device *dev)
return 133333;
}
+static unsigned int intel_hpll_vco(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ static const unsigned int blb_vco[8] = {
+ [0] = 3200000,
+ [1] = 4000000,
+ [2] = 5333333,
+ [3] = 4800000,
+ [4] = 6400000,
+ };
+ static const unsigned int pnv_vco[8] = {
+ [0] = 3200000,
+ [1] = 4000000,
+ [2] = 5333333,
+ [3] = 4800000,
+ [4] = 2666667,
+ };
+ static const unsigned int cl_vco[8] = {
+ [0] = 3200000,
+ [1] = 4000000,
+ [2] = 5333333,
+ [3] = 6400000,
+ [4] = 3333333,
+ [5] = 3566667,
+ [6] = 4266667,
+ };
+ static const unsigned int elk_vco[8] = {
+ [0] = 3200000,
+ [1] = 4000000,
+ [2] = 5333333,
+ [3] = 4800000,
+ };
+ static const unsigned int ctg_vco[8] = {
+ [0] = 3200000,
+ [1] = 4000000,
+ [2] = 5333333,
+ [3] = 6400000,
+ [4] = 2666667,
+ [5] = 4266667,
+ };
+ const unsigned int *vco_table;
+ unsigned int vco;
+ uint8_t tmp = 0;
+
+ /* FIXME other chipsets? */
+ if (IS_GM45(dev))
+ vco_table = ctg_vco;
+ else if (IS_G4X(dev))
+ vco_table = elk_vco;
+ else if (IS_CRESTLINE(dev))
+ vco_table = cl_vco;
+ else if (IS_PINEVIEW(dev))
+ vco_table = pnv_vco;
+ else if (IS_G33(dev))
+ vco_table = blb_vco;
+ else
+ return 0;
+
+ tmp = I915_READ(IS_MOBILE(dev) ? HPLLVCO_MOBILE : HPLLVCO);
+
+ vco = vco_table[tmp & 0x7];
+ if (vco == 0)
+ DRM_ERROR("Bad HPLL VCO (HPLLVCO=0x%02x)\n", tmp);
+ else
+ DRM_DEBUG_KMS("HPLL VCO %u kHz\n", vco);
+
+ return vco;
+}
+
+static int gm45_get_display_clock_speed(struct drm_device *dev)
+{
+ unsigned int cdclk_sel, vco = intel_hpll_vco(dev);
+ uint16_t tmp = 0;
+
+ pci_read_config_word(dev->pdev, GCFGC, &tmp);
+
+ cdclk_sel = (tmp >> 12) & 0x1;
+
+ switch (vco) {
+ case 2666667:
+ case 4000000:
+ case 5333333:
+ return cdclk_sel ? 333333 : 222222;
+ case 3200000:
+ return cdclk_sel ? 320000 : 228571;
+ default:
+ DRM_ERROR("Unable to determine CDCLK. HPLL VCO=%u, CFGC=0x%04x\n", vco, tmp);
+ return 222222;
+ }
+}
+
+static int i965gm_get_display_clock_speed(struct drm_device *dev)
+{
+ static const uint8_t div_3200[] = { 16, 10, 8 };
+ static const uint8_t div_4000[] = { 20, 12, 10 };
+ static const uint8_t div_5333[] = { 24, 16, 14 };
+ const uint8_t *div_table;
+ unsigned int cdclk_sel, vco = intel_hpll_vco(dev);
+ uint16_t tmp = 0;
+
+ pci_read_config_word(dev->pdev, GCFGC, &tmp);
+
+ cdclk_sel = ((tmp >> 8) & 0x1f) - 1;
+
+ if (cdclk_sel >= ARRAY_SIZE(div_3200))
+ goto fail;
+
+ switch (vco) {
+ case 3200000:
+ div_table = div_3200;
+ break;
+ case 4000000:
+ div_table = div_4000;
+ break;
+ case 5333333:
+ div_table = div_5333;
+ break;
+ default:
+ goto fail;
+ }
+
+ return DIV_ROUND_CLOSEST(vco, div_table[cdclk_sel]);
+
+fail:
+ DRM_ERROR("Unable to determine CDCLK. HPLL VCO=%u kHz, CFGC=0x%04x\n", vco, tmp);
+ return 200000;
+}
+
+static int g33_get_display_clock_speed(struct drm_device *dev)
+{
+ static const uint8_t div_3200[] = { 12, 10, 8, 7, 5, 16 };
+ static const uint8_t div_4000[] = { 14, 12, 10, 8, 6, 20 };
+ static const uint8_t div_4800[] = { 20, 14, 12, 10, 8, 24 };
+ static const uint8_t div_5333[] = { 20, 16, 12, 12, 8, 28 };
+ const uint8_t *div_table;
+ unsigned int cdclk_sel, vco = intel_hpll_vco(dev);
+ uint16_t tmp = 0;
+
+ pci_read_config_word(dev->pdev, GCFGC, &tmp);
+
+ cdclk_sel = (tmp >> 4) & 0x7;
+
+ if (cdclk_sel >= ARRAY_SIZE(div_3200))
+ goto fail;
+
+ switch (vco) {
+ case 3200000:
+ div_table = div_3200;
+ break;
+ case 4000000:
+ div_table = div_4000;
+ break;
+ case 4800000:
+ div_table = div_4800;
+ break;
+ case 5333333:
+ div_table = div_5333;
+ break;
+ default:
+ goto fail;
+ }
+
+ return DIV_ROUND_CLOSEST(vco, div_table[cdclk_sel]);
+
+fail:
+ DRM_ERROR("Unable to determine CDCLK. HPLL VCO=%u kHz, CFGC=0x%08x\n", vco, tmp);
+ return 190476;
+}
+
static void
intel_reduce_m_n_ratio(uint32_t *num, uint32_t *den)
{
@@ -7064,8 +7202,8 @@ void intel_dp_set_m_n(struct intel_crtc *crtc, enum link_m_n_set m_n)
intel_cpu_transcoder_set_m_n(crtc, dp_m_n, dp_m2_n2);
}
-static void vlv_update_pll(struct intel_crtc *crtc,
- struct intel_crtc_state *pipe_config)
+static void vlv_compute_dpll(struct intel_crtc *crtc,
+ struct intel_crtc_state *pipe_config)
{
u32 dpll, dpll_md;
@@ -7074,8 +7212,8 @@ static void vlv_update_pll(struct intel_crtc *crtc,
* clock for pipe B, since VGA hotplug / manual detection depends
* on it.
*/
- dpll = DPLL_EXT_BUFFER_ENABLE_VLV | DPLL_REFA_CLK_ENABLE_VLV |
- DPLL_VGA_MODE_DIS | DPLL_INTEGRATED_CLOCK_VLV;
+ dpll = DPLL_EXT_BUFFER_ENABLE_VLV | DPLL_REF_CLK_ENABLE_VLV |
+ DPLL_VGA_MODE_DIS | DPLL_INTEGRATED_REF_CLK_VLV;
/* We should never disable this, set it here for state tracking */
if (crtc->pipe == PIPE_B)
dpll |= DPLL_INTEGRATED_CRI_CLK_VLV;
@@ -7178,11 +7316,11 @@ static void vlv_prepare_pll(struct intel_crtc *crtc,
mutex_unlock(&dev_priv->sb_lock);
}
-static void chv_update_pll(struct intel_crtc *crtc,
- struct intel_crtc_state *pipe_config)
+static void chv_compute_dpll(struct intel_crtc *crtc,
+ struct intel_crtc_state *pipe_config)
{
- pipe_config->dpll_hw_state.dpll = DPLL_SSC_REF_CLOCK_CHV |
- DPLL_REFA_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS |
+ pipe_config->dpll_hw_state.dpll = DPLL_SSC_REF_CLK_CHV |
+ DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS |
DPLL_VCO_ENABLE;
if (crtc->pipe != PIPE_A)
pipe_config->dpll_hw_state.dpll |= DPLL_INTEGRATED_CRI_CLK_VLV;
@@ -7318,11 +7456,11 @@ void vlv_force_pll_on(struct drm_device *dev, enum pipe pipe,
};
if (IS_CHERRYVIEW(dev)) {
- chv_update_pll(crtc, &pipe_config);
+ chv_compute_dpll(crtc, &pipe_config);
chv_prepare_pll(crtc, &pipe_config);
chv_enable_pll(crtc, &pipe_config);
} else {
- vlv_update_pll(crtc, &pipe_config);
+ vlv_compute_dpll(crtc, &pipe_config);
vlv_prepare_pll(crtc, &pipe_config);
vlv_enable_pll(crtc, &pipe_config);
}
@@ -7344,10 +7482,10 @@ void vlv_force_pll_off(struct drm_device *dev, enum pipe pipe)
vlv_disable_pll(to_i915(dev), pipe);
}
-static void i9xx_update_pll(struct intel_crtc *crtc,
- struct intel_crtc_state *crtc_state,
- intel_clock_t *reduced_clock,
- int num_connectors)
+static void i9xx_compute_dpll(struct intel_crtc *crtc,
+ struct intel_crtc_state *crtc_state,
+ intel_clock_t *reduced_clock,
+ int num_connectors)
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -7421,10 +7559,10 @@ static void i9xx_update_pll(struct intel_crtc *crtc,
}
}
-static void i8xx_update_pll(struct intel_crtc *crtc,
- struct intel_crtc_state *crtc_state,
- intel_clock_t *reduced_clock,
- int num_connectors)
+static void i8xx_compute_dpll(struct intel_crtc *crtc,
+ struct intel_crtc_state *crtc_state,
+ intel_clock_t *reduced_clock,
+ int num_connectors)
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -7584,9 +7722,14 @@ void intel_mode_from_pipe_config(struct drm_display_mode *mode,
mode->vsync_end = pipe_config->base.adjusted_mode.crtc_vsync_end;
mode->flags = pipe_config->base.adjusted_mode.flags;
+ mode->type = DRM_MODE_TYPE_DRIVER;
mode->clock = pipe_config->base.adjusted_mode.crtc_clock;
mode->flags |= pipe_config->base.adjusted_mode.flags;
+
+ mode->hsync = drm_mode_hsync(mode);
+ mode->vrefresh = drm_mode_vrefresh(mode);
+ drm_mode_set_name(mode);
}
static void i9xx_set_pipeconf(struct intel_crtc *intel_crtc)
@@ -7658,9 +7801,9 @@ static int i9xx_crtc_compute_clock(struct intel_crtc *crtc,
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
int refclk, num_connectors = 0;
- intel_clock_t clock, reduced_clock;
- bool ok, has_reduced_clock = false;
- bool is_lvds = false, is_dsi = false;
+ intel_clock_t clock;
+ bool ok;
+ bool is_dsi = false;
struct intel_encoder *encoder;
const intel_limit_t *limit;
struct drm_atomic_state *state = crtc_state->base.state;
@@ -7678,9 +7821,6 @@ static int i9xx_crtc_compute_clock(struct intel_crtc *crtc,
encoder = to_intel_encoder(connector_state->best_encoder);
switch (encoder->type) {
- case INTEL_OUTPUT_LVDS:
- is_lvds = true;
- break;
case INTEL_OUTPUT_DSI:
is_dsi = true;
break;
@@ -7712,19 +7852,6 @@ static int i9xx_crtc_compute_clock(struct intel_crtc *crtc,
return -EINVAL;
}
- if (is_lvds && dev_priv->lvds_downclock_avail) {
- /*
- * Ensure we match the reduced clock's P to the target
- * clock. If the clocks don't match, we can't switch
- * the display clock by using the FP0/FP1. In such case
- * we will disable the LVDS downclock feature.
- */
- has_reduced_clock =
- dev_priv->display.find_dpll(limit, crtc_state,
- dev_priv->lvds_downclock,
- refclk, &clock,
- &reduced_clock);
- }
/* Compat-code for transition, will disappear. */
crtc_state->dpll.n = clock.n;
crtc_state->dpll.m1 = clock.m1;
@@ -7734,17 +7861,15 @@ static int i9xx_crtc_compute_clock(struct intel_crtc *crtc,
}
if (IS_GEN2(dev)) {
- i8xx_update_pll(crtc, crtc_state,
- has_reduced_clock ? &reduced_clock : NULL,
- num_connectors);
+ i8xx_compute_dpll(crtc, crtc_state, NULL,
+ num_connectors);
} else if (IS_CHERRYVIEW(dev)) {
- chv_update_pll(crtc, crtc_state);
+ chv_compute_dpll(crtc, crtc_state);
} else if (IS_VALLEYVIEW(dev)) {
- vlv_update_pll(crtc, crtc_state);
+ vlv_compute_dpll(crtc, crtc_state);
} else {
- i9xx_update_pll(crtc, crtc_state,
- has_reduced_clock ? &reduced_clock : NULL,
- num_connectors);
+ i9xx_compute_dpll(crtc, crtc_state, NULL,
+ num_connectors);
}
return 0;
@@ -7804,10 +7929,7 @@ static void vlv_crtc_clock_get(struct intel_crtc *crtc,
clock.p1 = (mdiv >> DPIO_P1_SHIFT) & 7;
clock.p2 = (mdiv >> DPIO_P2_SHIFT) & 0x1f;
- vlv_clock(refclk, &clock);
-
- /* clock.dot is the fast clock */
- pipe_config->port_clock = clock.dot / 5;
+ pipe_config->port_clock = vlv_calc_dpll_params(refclk, &clock);
}
static void
@@ -7887,7 +8009,7 @@ static void chv_crtc_clock_get(struct intel_crtc *crtc,
int pipe = pipe_config->cpu_transcoder;
enum dpio_channel port = vlv_pipe_to_channel(pipe);
intel_clock_t clock;
- u32 cmn_dw13, pll_dw0, pll_dw1, pll_dw2;
+ u32 cmn_dw13, pll_dw0, pll_dw1, pll_dw2, pll_dw3;
int refclk = 100000;
mutex_lock(&dev_priv->sb_lock);
@@ -7895,18 +8017,18 @@ static void chv_crtc_clock_get(struct intel_crtc *crtc,
pll_dw0 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW0(port));
pll_dw1 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW1(port));
pll_dw2 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW2(port));
+ pll_dw3 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW3(port));
mutex_unlock(&dev_priv->sb_lock);
clock.m1 = (pll_dw1 & 0x7) == DPIO_CHV_M1_DIV_BY_2 ? 2 : 0;
- clock.m2 = ((pll_dw0 & 0xff) << 22) | (pll_dw2 & 0x3fffff);
+ clock.m2 = (pll_dw0 & 0xff) << 22;
+ if (pll_dw3 & DPIO_CHV_FRAC_DIV_EN)
+ clock.m2 |= pll_dw2 & 0x3fffff;
clock.n = (pll_dw1 >> DPIO_CHV_N_DIV_SHIFT) & 0xf;
clock.p1 = (cmn_dw13 >> DPIO_CHV_P1_DIV_SHIFT) & 0x7;
clock.p2 = (cmn_dw13 >> DPIO_CHV_P2_DIV_SHIFT) & 0x1f;
- chv_clock(refclk, &clock);
-
- /* clock.dot is the fast clock */
- pipe_config->port_clock = clock.dot / 5;
+ pipe_config->port_clock = chv_calc_dpll_params(refclk, &clock);
}
static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
@@ -8555,9 +8677,7 @@ static bool ironlake_compute_clocks(struct drm_crtc *crtc,
struct drm_i915_private *dev_priv = dev->dev_private;
int refclk;
const intel_limit_t *limit;
- bool ret, is_lvds = false;
-
- is_lvds = intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS);
+ bool ret;
refclk = ironlake_get_refclk(crtc_state);
@@ -8573,20 +8693,6 @@ static bool ironlake_compute_clocks(struct drm_crtc *crtc,
if (!ret)
return false;
- if (is_lvds && dev_priv->lvds_downclock_avail) {
- /*
- * Ensure we match the reduced clock's P to the target clock.
- * If the clocks don't match, we can't switch the display clock
- * by using the FP0/FP1. In such case we will disable the LVDS
- * downclock feature.
- */
- *has_reduced_clock =
- dev_priv->display.find_dpll(limit, crtc_state,
- dev_priv->lvds_downclock,
- refclk, clock,
- reduced_clock);
- }
-
return true;
}
@@ -9294,6 +9400,7 @@ static void hsw_restore_lcpll(struct drm_i915_private *dev_priv)
}
intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
+ intel_update_cdclk(dev_priv->dev);
}
/*
@@ -9355,21 +9462,160 @@ void hsw_disable_pc8(struct drm_i915_private *dev_priv)
intel_prepare_ddi(dev);
}
-static void broxton_modeset_global_resources(struct drm_atomic_state *old_state)
+static void broxton_modeset_commit_cdclk(struct drm_atomic_state *old_state)
{
struct drm_device *dev = old_state->dev;
+ unsigned int req_cdclk = to_intel_atomic_state(old_state)->cdclk;
+
+ broxton_set_cdclk(dev, req_cdclk);
+}
+
+/* compute the max rate for new configuration */
+static int ilk_max_pixel_rate(struct drm_atomic_state *state)
+{
+ struct intel_crtc *intel_crtc;
+ struct intel_crtc_state *crtc_state;
+ int max_pixel_rate = 0;
+
+ for_each_intel_crtc(state->dev, intel_crtc) {
+ int pixel_rate;
+
+ crtc_state = intel_atomic_get_crtc_state(state, intel_crtc);
+ if (IS_ERR(crtc_state))
+ return PTR_ERR(crtc_state);
+
+ if (!crtc_state->base.enable)
+ continue;
+
+ pixel_rate = ilk_pipe_pixel_rate(crtc_state);
+
+ /* pixel rate mustn't exceed 95% of cdclk with IPS on BDW */
+ if (IS_BROADWELL(state->dev) && crtc_state->ips_enabled)
+ pixel_rate = DIV_ROUND_UP(pixel_rate * 100, 95);
+
+ max_pixel_rate = max(max_pixel_rate, pixel_rate);
+ }
+
+ return max_pixel_rate;
+}
+
+static void broadwell_set_cdclk(struct drm_device *dev, int cdclk)
+{
struct drm_i915_private *dev_priv = dev->dev_private;
- int max_pixclk = intel_mode_max_pixclk(dev, NULL);
- int req_cdclk;
+ uint32_t val, data;
+ int ret;
+
+ if (WARN((I915_READ(LCPLL_CTL) &
+ (LCPLL_PLL_DISABLE | LCPLL_PLL_LOCK |
+ LCPLL_CD_CLOCK_DISABLE | LCPLL_ROOT_CD_CLOCK_DISABLE |
+ LCPLL_CD2X_CLOCK_DISABLE | LCPLL_POWER_DOWN_ALLOW |
+ LCPLL_CD_SOURCE_FCLK)) != LCPLL_PLL_LOCK,
+ "trying to change cdclk frequency with cdclk not enabled\n"))
+ return;
+
+ mutex_lock(&dev_priv->rps.hw_lock);
+ ret = sandybridge_pcode_write(dev_priv,
+ BDW_PCODE_DISPLAY_FREQ_CHANGE_REQ, 0x0);
+ mutex_unlock(&dev_priv->rps.hw_lock);
+ if (ret) {
+ DRM_ERROR("failed to inform pcode about cdclk change\n");
+ return;
+ }
+
+ val = I915_READ(LCPLL_CTL);
+ val |= LCPLL_CD_SOURCE_FCLK;
+ I915_WRITE(LCPLL_CTL, val);
- /* see the comment in valleyview_modeset_global_resources */
- if (WARN_ON(max_pixclk < 0))
+ if (wait_for_atomic_us(I915_READ(LCPLL_CTL) &
+ LCPLL_CD_SOURCE_FCLK_DONE, 1))
+ DRM_ERROR("Switching to FCLK failed\n");
+
+ val = I915_READ(LCPLL_CTL);
+ val &= ~LCPLL_CLK_FREQ_MASK;
+
+ switch (cdclk) {
+ case 450000:
+ val |= LCPLL_CLK_FREQ_450;
+ data = 0;
+ break;
+ case 540000:
+ val |= LCPLL_CLK_FREQ_54O_BDW;
+ data = 1;
+ break;
+ case 337500:
+ val |= LCPLL_CLK_FREQ_337_5_BDW;
+ data = 2;
+ break;
+ case 675000:
+ val |= LCPLL_CLK_FREQ_675_BDW;
+ data = 3;
+ break;
+ default:
+ WARN(1, "invalid cdclk frequency\n");
return;
+ }
+
+ I915_WRITE(LCPLL_CTL, val);
+
+ val = I915_READ(LCPLL_CTL);
+ val &= ~LCPLL_CD_SOURCE_FCLK;
+ I915_WRITE(LCPLL_CTL, val);
+
+ if (wait_for_atomic_us((I915_READ(LCPLL_CTL) &
+ LCPLL_CD_SOURCE_FCLK_DONE) == 0, 1))
+ DRM_ERROR("Switching back to LCPLL failed\n");
+
+ mutex_lock(&dev_priv->rps.hw_lock);
+ sandybridge_pcode_write(dev_priv, HSW_PCODE_DE_WRITE_FREQ_REQ, data);
+ mutex_unlock(&dev_priv->rps.hw_lock);
+
+ intel_update_cdclk(dev);
+
+ WARN(cdclk != dev_priv->cdclk_freq,
+ "cdclk requested %d kHz but got %d kHz\n",
+ cdclk, dev_priv->cdclk_freq);
+}
+
+static int broadwell_modeset_calc_cdclk(struct drm_atomic_state *state)
+{
+ struct drm_i915_private *dev_priv = to_i915(state->dev);
+ int max_pixclk = ilk_max_pixel_rate(state);
+ int cdclk;
+
+ /*
+ * FIXME should also account for plane ratio
+ * once 64bpp pixel formats are supported.
+ */
+ if (max_pixclk > 540000)
+ cdclk = 675000;
+ else if (max_pixclk > 450000)
+ cdclk = 540000;
+ else if (max_pixclk > 337500)
+ cdclk = 450000;
+ else
+ cdclk = 337500;
+
+ /*
+ * FIXME move the cdclk caclulation to
+ * compute_config() so we can fail gracegully.
+ */
+ if (cdclk > dev_priv->max_cdclk_freq) {
+ DRM_ERROR("requested cdclk (%d kHz) exceeds max (%d kHz)\n",
+ cdclk, dev_priv->max_cdclk_freq);
+ cdclk = dev_priv->max_cdclk_freq;
+ }
+
+ to_intel_atomic_state(state)->cdclk = cdclk;
+
+ return 0;
+}
- req_cdclk = broxton_calc_cdclk(dev_priv, max_pixclk);
+static void broadwell_modeset_commit_cdclk(struct drm_atomic_state *old_state)
+{
+ struct drm_device *dev = old_state->dev;
+ unsigned int req_cdclk = to_intel_atomic_state(old_state)->cdclk;
- if (req_cdclk != dev_priv->cdclk_freq)
- broxton_set_cdclk(dev, req_cdclk);
+ broadwell_set_cdclk(dev, req_cdclk);
}
static int haswell_crtc_compute_clock(struct intel_crtc *crtc,
@@ -9886,7 +10132,7 @@ static struct drm_framebuffer *
mode_fits_in_fbdev(struct drm_device *dev,
struct drm_display_mode *mode)
{
-#ifdef CONFIG_DRM_I915_FBDEV
+#ifdef CONFIG_DRM_FBDEV_EMULATION
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_gem_object *obj;
struct drm_framebuffer *fb;
@@ -9975,7 +10221,7 @@ bool intel_get_load_detect_pipe(struct drm_connector *connector,
retry:
ret = drm_modeset_lock(&config->connection_mutex, ctx);
if (ret)
- goto fail_unlock;
+ goto fail;
/*
* Algorithm gets a little messy:
@@ -9993,10 +10239,10 @@ retry:
ret = drm_modeset_lock(&crtc->mutex, ctx);
if (ret)
- goto fail_unlock;
+ goto fail;
ret = drm_modeset_lock(&crtc->primary->mutex, ctx);
if (ret)
- goto fail_unlock;
+ goto fail;
old->dpms_mode = connector->dpms;
old->load_detect_temp = false;
@@ -10015,9 +10261,6 @@ retry:
continue;
if (possible_crtc->state->enable)
continue;
- /* This can occur when applying the pipe A quirk on resume. */
- if (to_intel_crtc(possible_crtc)->new_enabled)
- continue;
crtc = possible_crtc;
break;
@@ -10028,20 +10271,17 @@ retry:
*/
if (!crtc) {
DRM_DEBUG_KMS("no pipe available for load-detect\n");
- goto fail_unlock;
+ goto fail;
}
ret = drm_modeset_lock(&crtc->mutex, ctx);
if (ret)
- goto fail_unlock;
+ goto fail;
ret = drm_modeset_lock(&crtc->primary->mutex, ctx);
if (ret)
- goto fail_unlock;
- intel_encoder->new_crtc = to_intel_crtc(crtc);
- to_intel_connector(connector)->new_encoder = intel_encoder;
+ goto fail;
intel_crtc = to_intel_crtc(crtc);
- intel_crtc->new_enabled = true;
old->dpms_mode = connector->dpms;
old->load_detect_temp = true;
old->release_fb = NULL;
@@ -10097,7 +10337,7 @@ retry:
drm_mode_copy(&crtc_state->base.mode, mode);
- if (intel_set_mode(crtc, state, true)) {
+ if (drm_atomic_commit(state)) {
DRM_DEBUG_KMS("failed to set mode on load-detect pipe\n");
if (old->release_fb)
old->release_fb->funcs->destroy(old->release_fb);
@@ -10109,9 +10349,7 @@ retry:
intel_wait_for_vblank(dev, intel_crtc->pipe);
return true;
- fail:
- intel_crtc->new_enabled = crtc->state->enable;
-fail_unlock:
+fail:
drm_atomic_state_free(state);
state = NULL;
@@ -10157,10 +10395,6 @@ void intel_release_load_detect_pipe(struct drm_connector *connector,
if (IS_ERR(crtc_state))
goto fail;
- to_intel_connector(connector)->new_encoder = NULL;
- intel_encoder->new_crtc = NULL;
- intel_crtc->new_enabled = false;
-
connector_state->best_encoder = NULL;
connector_state->crtc = NULL;
@@ -10171,7 +10405,7 @@ void intel_release_load_detect_pipe(struct drm_connector *connector,
if (ret)
goto fail;
- ret = intel_set_mode(crtc, state, true);
+ ret = drm_atomic_commit(state);
if (ret)
goto fail;
@@ -10219,6 +10453,7 @@ static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
u32 dpll = pipe_config->dpll_hw_state.dpll;
u32 fp;
intel_clock_t clock;
+ int port_clock;
int refclk = i9xx_pll_refclk(dev, pipe_config);
if ((dpll & DISPLAY_RATE_SELECT_FPA1) == 0)
@@ -10259,9 +10494,9 @@ static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
}
if (IS_PINEVIEW(dev))
- pineview_clock(refclk, &clock);
+ port_clock = pnv_calc_dpll_params(refclk, &clock);
else
- i9xx_clock(refclk, &clock);
+ port_clock = i9xx_calc_dpll_params(refclk, &clock);
} else {
u32 lvds = IS_I830(dev) ? 0 : I915_READ(LVDS);
bool is_lvds = (pipe == 1) && (lvds & LVDS_PORT_EN);
@@ -10287,7 +10522,7 @@ static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
clock.p2 = 2;
}
- i9xx_clock(refclk, &clock);
+ port_clock = i9xx_calc_dpll_params(refclk, &clock);
}
/*
@@ -10295,7 +10530,7 @@ static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
* port_clock to compute adjusted_mode.crtc_clock in the
* encoder's get_config() function.
*/
- pipe_config->port_clock = clock.dot;
+ pipe_config->port_clock = port_clock;
}
int intel_dotclock_calculate(int link_freq,
@@ -10384,42 +10619,6 @@ struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev,
return mode;
}
-static void intel_decrease_pllclock(struct drm_crtc *crtc)
-{
- struct drm_device *dev = crtc->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-
- if (!HAS_GMCH_DISPLAY(dev))
- return;
-
- if (!dev_priv->lvds_downclock_avail)
- return;
-
- /*
- * Since this is called by a timer, we should never get here in
- * the manual case.
- */
- if (!HAS_PIPE_CXSR(dev) && intel_crtc->lowfreq_avail) {
- int pipe = intel_crtc->pipe;
- int dpll_reg = DPLL(pipe);
- int dpll;
-
- DRM_DEBUG_DRIVER("downclocking LVDS\n");
-
- assert_panel_unlocked(dev_priv, pipe);
-
- dpll = I915_READ(dpll_reg);
- dpll |= DISPLAY_RATE_SELECT_FPA1;
- I915_WRITE(dpll_reg, dpll);
- intel_wait_for_vblank(dev, pipe);
- dpll = I915_READ(dpll_reg);
- if (!(dpll & DISPLAY_RATE_SELECT_FPA1))
- DRM_DEBUG_DRIVER("failed to downclock LVDS!\n");
- }
-
-}
-
void intel_mark_busy(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -10437,20 +10636,12 @@ void intel_mark_busy(struct drm_device *dev)
void intel_mark_idle(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- struct drm_crtc *crtc;
if (!dev_priv->mm.busy)
return;
dev_priv->mm.busy = false;
- for_each_crtc(dev, crtc) {
- if (!crtc->primary->fb)
- continue;
-
- intel_decrease_pllclock(crtc);
- }
-
if (INTEL_INFO(dev)->gen >= 6)
gen6_rps_idle(dev->dev_private);
@@ -10482,24 +10673,23 @@ static void intel_unpin_work_fn(struct work_struct *__work)
{
struct intel_unpin_work *work =
container_of(__work, struct intel_unpin_work, work);
- struct drm_device *dev = work->crtc->dev;
- enum pipe pipe = to_intel_crtc(work->crtc)->pipe;
+ struct intel_crtc *crtc = to_intel_crtc(work->crtc);
+ struct drm_device *dev = crtc->base.dev;
+ struct drm_plane *primary = crtc->base.primary;
mutex_lock(&dev->struct_mutex);
- intel_unpin_fb_obj(work->old_fb, work->crtc->primary->state);
+ intel_unpin_fb_obj(work->old_fb, primary->state);
drm_gem_object_unreference(&work->pending_flip_obj->base);
- intel_fbc_update(dev);
-
if (work->flip_queued_req)
i915_gem_request_assign(&work->flip_queued_req, NULL);
mutex_unlock(&dev->struct_mutex);
- intel_frontbuffer_flip_complete(dev, INTEL_FRONTBUFFER_PRIMARY(pipe));
+ intel_frontbuffer_flip_complete(dev, to_intel_plane(primary)->frontbuffer_bit);
drm_framebuffer_unreference(work->old_fb);
- BUG_ON(atomic_read(&to_intel_crtc(work->crtc)->unpin_work_count) == 0);
- atomic_dec(&to_intel_crtc(work->crtc)->unpin_work_count);
+ BUG_ON(atomic_read(&crtc->unpin_work_count) == 0);
+ atomic_dec(&crtc->unpin_work_count);
kfree(work);
}
@@ -10632,14 +10822,15 @@ static int intel_gen2_queue_flip(struct drm_device *dev,
struct drm_crtc *crtc,
struct drm_framebuffer *fb,
struct drm_i915_gem_object *obj,
- struct intel_engine_cs *ring,
+ struct drm_i915_gem_request *req,
uint32_t flags)
{
+ struct intel_engine_cs *ring = req->ring;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
u32 flip_mask;
int ret;
- ret = intel_ring_begin(ring, 6);
+ ret = intel_ring_begin(req, 6);
if (ret)
return ret;
@@ -10659,7 +10850,6 @@ static int intel_gen2_queue_flip(struct drm_device *dev,
intel_ring_emit(ring, 0); /* aux display base address, unused */
intel_mark_page_flip_active(intel_crtc);
- __intel_ring_advance(ring);
return 0;
}
@@ -10667,14 +10857,15 @@ static int intel_gen3_queue_flip(struct drm_device *dev,
struct drm_crtc *crtc,
struct drm_framebuffer *fb,
struct drm_i915_gem_object *obj,
- struct intel_engine_cs *ring,
+ struct drm_i915_gem_request *req,
uint32_t flags)
{
+ struct intel_engine_cs *ring = req->ring;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
u32 flip_mask;
int ret;
- ret = intel_ring_begin(ring, 6);
+ ret = intel_ring_begin(req, 6);
if (ret)
return ret;
@@ -10691,7 +10882,6 @@ static int intel_gen3_queue_flip(struct drm_device *dev,
intel_ring_emit(ring, MI_NOOP);
intel_mark_page_flip_active(intel_crtc);
- __intel_ring_advance(ring);
return 0;
}
@@ -10699,15 +10889,16 @@ static int intel_gen4_queue_flip(struct drm_device *dev,
struct drm_crtc *crtc,
struct drm_framebuffer *fb,
struct drm_i915_gem_object *obj,
- struct intel_engine_cs *ring,
+ struct drm_i915_gem_request *req,
uint32_t flags)
{
+ struct intel_engine_cs *ring = req->ring;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
uint32_t pf, pipesrc;
int ret;
- ret = intel_ring_begin(ring, 4);
+ ret = intel_ring_begin(req, 4);
if (ret)
return ret;
@@ -10730,7 +10921,6 @@ static int intel_gen4_queue_flip(struct drm_device *dev,
intel_ring_emit(ring, pf | pipesrc);
intel_mark_page_flip_active(intel_crtc);
- __intel_ring_advance(ring);
return 0;
}
@@ -10738,15 +10928,16 @@ static int intel_gen6_queue_flip(struct drm_device *dev,
struct drm_crtc *crtc,
struct drm_framebuffer *fb,
struct drm_i915_gem_object *obj,
- struct intel_engine_cs *ring,
+ struct drm_i915_gem_request *req,
uint32_t flags)
{
+ struct intel_engine_cs *ring = req->ring;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
uint32_t pf, pipesrc;
int ret;
- ret = intel_ring_begin(ring, 4);
+ ret = intel_ring_begin(req, 4);
if (ret)
return ret;
@@ -10766,7 +10957,6 @@ static int intel_gen6_queue_flip(struct drm_device *dev,
intel_ring_emit(ring, pf | pipesrc);
intel_mark_page_flip_active(intel_crtc);
- __intel_ring_advance(ring);
return 0;
}
@@ -10774,9 +10964,10 @@ static int intel_gen7_queue_flip(struct drm_device *dev,
struct drm_crtc *crtc,
struct drm_framebuffer *fb,
struct drm_i915_gem_object *obj,
- struct intel_engine_cs *ring,
+ struct drm_i915_gem_request *req,
uint32_t flags)
{
+ struct intel_engine_cs *ring = req->ring;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
uint32_t plane_bit = 0;
int len, ret;
@@ -10818,11 +11009,11 @@ static int intel_gen7_queue_flip(struct drm_device *dev,
* then do the cacheline alignment, and finally emit the
* MI_DISPLAY_FLIP.
*/
- ret = intel_ring_cacheline_align(ring);
+ ret = intel_ring_cacheline_align(req);
if (ret)
return ret;
- ret = intel_ring_begin(ring, len);
+ ret = intel_ring_begin(req, len);
if (ret)
return ret;
@@ -10861,7 +11052,6 @@ static int intel_gen7_queue_flip(struct drm_device *dev,
intel_ring_emit(ring, (MI_NOOP));
intel_mark_page_flip_active(intel_crtc);
- __intel_ring_advance(ring);
return 0;
}
@@ -10970,12 +11160,11 @@ static void ilk_do_mmio_flip(struct intel_crtc *intel_crtc)
static void intel_do_mmio_flip(struct intel_crtc *intel_crtc)
{
struct drm_device *dev = intel_crtc->base.dev;
- bool atomic_update;
u32 start_vbl_count;
intel_mark_page_flip_active(intel_crtc);
- atomic_update = intel_pipe_update_start(intel_crtc, &start_vbl_count);
+ intel_pipe_update_start(intel_crtc, &start_vbl_count);
if (INTEL_INFO(dev)->gen >= 9)
skl_do_mmio_flip(intel_crtc);
@@ -10983,8 +11172,7 @@ static void intel_do_mmio_flip(struct intel_crtc *intel_crtc)
/* use_mmio_flip() retricts MMIO flips to ilk+ */
ilk_do_mmio_flip(intel_crtc);
- if (atomic_update)
- intel_pipe_update_end(intel_crtc, start_vbl_count);
+ intel_pipe_update_end(intel_crtc, start_vbl_count);
}
static void intel_mmio_flip_work_func(struct work_struct *work)
@@ -11031,7 +11219,7 @@ static int intel_default_queue_flip(struct drm_device *dev,
struct drm_crtc *crtc,
struct drm_framebuffer *fb,
struct drm_i915_gem_object *obj,
- struct intel_engine_cs *ring,
+ struct drm_i915_gem_request *req,
uint32_t flags)
{
return -ENODEV;
@@ -11117,6 +11305,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
struct intel_unpin_work *work;
struct intel_engine_cs *ring;
bool mmio_flip;
+ struct drm_i915_gem_request *request = NULL;
int ret;
/*
@@ -11223,7 +11412,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
*/
ret = intel_pin_and_fence_fb_obj(crtc->primary, fb,
crtc->primary->state,
- mmio_flip ? i915_gem_request_get_ring(obj->last_write_req) : ring);
+ mmio_flip ? i915_gem_request_get_ring(obj->last_write_req) : ring, &request);
if (ret)
goto cleanup_pending;
@@ -11239,31 +11428,34 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
i915_gem_request_assign(&work->flip_queued_req,
obj->last_write_req);
} else {
- if (obj->last_write_req) {
- ret = i915_gem_check_olr(obj->last_write_req);
+ if (!request) {
+ ret = i915_gem_request_alloc(ring, ring->default_context, &request);
if (ret)
goto cleanup_unpin;
}
- ret = dev_priv->display.queue_flip(dev, crtc, fb, obj, ring,
+ ret = dev_priv->display.queue_flip(dev, crtc, fb, obj, request,
page_flip_flags);
if (ret)
goto cleanup_unpin;
- i915_gem_request_assign(&work->flip_queued_req,
- intel_ring_get_request(ring));
+ i915_gem_request_assign(&work->flip_queued_req, request);
}
+ if (request)
+ i915_add_request_no_flush(request);
+
work->flip_queued_vblank = drm_crtc_vblank_count(crtc);
work->enable_stall_check = true;
i915_gem_track_fb(intel_fb_obj(work->old_fb), obj,
- INTEL_FRONTBUFFER_PRIMARY(pipe));
-
- intel_fbc_disable(dev);
- intel_frontbuffer_flip_prepare(dev, INTEL_FRONTBUFFER_PRIMARY(pipe));
+ to_intel_plane(primary)->frontbuffer_bit);
mutex_unlock(&dev->struct_mutex);
+ intel_fbc_disable_crtc(intel_crtc);
+ intel_frontbuffer_flip_prepare(dev,
+ to_intel_plane(primary)->frontbuffer_bit);
+
trace_i915_flip_request(intel_crtc->plane, obj);
return 0;
@@ -11271,6 +11463,8 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
cleanup_unpin:
intel_unpin_fb_obj(fb, crtc->primary->state);
cleanup_pending:
+ if (request)
+ i915_gem_request_cancel(request);
atomic_dec(&intel_crtc->unpin_work_count);
mutex_unlock(&dev->struct_mutex);
cleanup:
@@ -11289,8 +11483,35 @@ free_work:
kfree(work);
if (ret == -EIO) {
+ struct drm_atomic_state *state;
+ struct drm_plane_state *plane_state;
+
out_hang:
- ret = intel_plane_restore(primary);
+ state = drm_atomic_state_alloc(dev);
+ if (!state)
+ return -ENOMEM;
+ state->acquire_ctx = drm_modeset_legacy_acquire_ctx(crtc);
+
+retry:
+ plane_state = drm_atomic_get_plane_state(state, primary);
+ ret = PTR_ERR_OR_ZERO(plane_state);
+ if (!ret) {
+ drm_atomic_set_fb_for_plane(plane_state, fb);
+
+ ret = drm_atomic_set_crtc_for_plane(plane_state, crtc);
+ if (!ret)
+ ret = drm_atomic_commit(state);
+ }
+
+ if (ret == -EDEADLK) {
+ drm_modeset_backoff(state->acquire_ctx);
+ drm_atomic_state_clear(state);
+ goto retry;
+ }
+
+ if (ret)
+ drm_atomic_state_free(state);
+
if (ret == 0 && event) {
spin_lock_irq(&dev->event_lock);
drm_send_vblank_event(dev, pipe, event);
@@ -11300,44 +11521,274 @@ out_hang:
return ret;
}
-static const struct drm_crtc_helper_funcs intel_helper_funcs = {
- .mode_set_base_atomic = intel_pipe_set_base_atomic,
- .load_lut = intel_crtc_load_lut,
- .atomic_begin = intel_begin_crtc_commit,
- .atomic_flush = intel_finish_crtc_commit,
-};
/**
- * intel_modeset_update_staged_output_state
+ * intel_wm_need_update - Check whether watermarks need updating
+ * @plane: drm plane
+ * @state: new plane state
*
- * Updates the staged output configuration state, e.g. after we've read out the
- * current hw state.
+ * Check current plane state versus the new one to determine whether
+ * watermarks need to be recalculated.
+ *
+ * Returns true or false.
*/
-static void intel_modeset_update_staged_output_state(struct drm_device *dev)
+static bool intel_wm_need_update(struct drm_plane *plane,
+ struct drm_plane_state *state)
+{
+ /* Update watermarks on tiling changes. */
+ if (!plane->state->fb || !state->fb ||
+ plane->state->fb->modifier[0] != state->fb->modifier[0] ||
+ plane->state->rotation != state->rotation)
+ return true;
+
+ if (plane->state->crtc_w != state->crtc_w)
+ return true;
+
+ return false;
+}
+
+int intel_plane_atomic_calc_changes(struct drm_crtc_state *crtc_state,
+ struct drm_plane_state *plane_state)
+{
+ struct drm_crtc *crtc = crtc_state->crtc;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ struct drm_plane *plane = plane_state->plane;
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_plane_state *old_plane_state =
+ to_intel_plane_state(plane->state);
+ int idx = intel_crtc->base.base.id, ret;
+ int i = drm_plane_index(plane);
+ bool mode_changed = needs_modeset(crtc_state);
+ bool was_crtc_enabled = crtc->state->active;
+ bool is_crtc_enabled = crtc_state->active;
+
+ bool turn_off, turn_on, visible, was_visible;
+ struct drm_framebuffer *fb = plane_state->fb;
+
+ if (crtc_state && INTEL_INFO(dev)->gen >= 9 &&
+ plane->type != DRM_PLANE_TYPE_CURSOR) {
+ ret = skl_update_scaler_plane(
+ to_intel_crtc_state(crtc_state),
+ to_intel_plane_state(plane_state));
+ if (ret)
+ return ret;
+ }
+
+ /*
+ * Disabling a plane is always okay; we just need to update
+ * fb tracking in a special way since cleanup_fb() won't
+ * get called by the plane helpers.
+ */
+ if (old_plane_state->base.fb && !fb)
+ intel_crtc->atomic.disabled_planes |= 1 << i;
+
+ was_visible = old_plane_state->visible;
+ visible = to_intel_plane_state(plane_state)->visible;
+
+ if (!was_crtc_enabled && WARN_ON(was_visible))
+ was_visible = false;
+
+ if (!is_crtc_enabled && WARN_ON(visible))
+ visible = false;
+
+ if (!was_visible && !visible)
+ return 0;
+
+ turn_off = was_visible && (!visible || mode_changed);
+ turn_on = visible && (!was_visible || mode_changed);
+
+ DRM_DEBUG_ATOMIC("[CRTC:%i] has [PLANE:%i] with fb %i\n", idx,
+ plane->base.id, fb ? fb->base.id : -1);
+
+ DRM_DEBUG_ATOMIC("[PLANE:%i] visible %i -> %i, off %i, on %i, ms %i\n",
+ plane->base.id, was_visible, visible,
+ turn_off, turn_on, mode_changed);
+
+ if (turn_on) {
+ intel_crtc->atomic.update_wm_pre = true;
+ /* must disable cxsr around plane enable/disable */
+ if (plane->type != DRM_PLANE_TYPE_CURSOR) {
+ intel_crtc->atomic.disable_cxsr = true;
+ /* to potentially re-enable cxsr */
+ intel_crtc->atomic.wait_vblank = true;
+ intel_crtc->atomic.update_wm_post = true;
+ }
+ } else if (turn_off) {
+ intel_crtc->atomic.update_wm_post = true;
+ /* must disable cxsr around plane enable/disable */
+ if (plane->type != DRM_PLANE_TYPE_CURSOR) {
+ if (is_crtc_enabled)
+ intel_crtc->atomic.wait_vblank = true;
+ intel_crtc->atomic.disable_cxsr = true;
+ }
+ } else if (intel_wm_need_update(plane, plane_state)) {
+ intel_crtc->atomic.update_wm_pre = true;
+ }
+
+ if (visible)
+ intel_crtc->atomic.fb_bits |=
+ to_intel_plane(plane)->frontbuffer_bit;
+
+ switch (plane->type) {
+ case DRM_PLANE_TYPE_PRIMARY:
+ intel_crtc->atomic.wait_for_flips = true;
+ intel_crtc->atomic.pre_disable_primary = turn_off;
+ intel_crtc->atomic.post_enable_primary = turn_on;
+
+ if (turn_off) {
+ /*
+ * FIXME: Actually if we will still have any other
+ * plane enabled on the pipe we could let IPS enabled
+ * still, but for now lets consider that when we make
+ * primary invisible by setting DSPCNTR to 0 on
+ * update_primary_plane function IPS needs to be
+ * disable.
+ */
+ intel_crtc->atomic.disable_ips = true;
+
+ intel_crtc->atomic.disable_fbc = true;
+ }
+
+ /*
+ * FBC does not work on some platforms for rotated
+ * planes, so disable it when rotation is not 0 and
+ * update it when rotation is set back to 0.
+ *
+ * FIXME: This is redundant with the fbc update done in
+ * the primary plane enable function except that that
+ * one is done too late. We eventually need to unify
+ * this.
+ */
+
+ if (visible &&
+ INTEL_INFO(dev)->gen <= 4 && !IS_G4X(dev) &&
+ dev_priv->fbc.crtc == intel_crtc &&
+ plane_state->rotation != BIT(DRM_ROTATE_0))
+ intel_crtc->atomic.disable_fbc = true;
+
+ /*
+ * BDW signals flip done immediately if the plane
+ * is disabled, even if the plane enable is already
+ * armed to occur at the next vblank :(
+ */
+ if (turn_on && IS_BROADWELL(dev))
+ intel_crtc->atomic.wait_vblank = true;
+
+ intel_crtc->atomic.update_fbc |= visible || mode_changed;
+ break;
+ case DRM_PLANE_TYPE_CURSOR:
+ break;
+ case DRM_PLANE_TYPE_OVERLAY:
+ if (turn_off && !mode_changed) {
+ intel_crtc->atomic.wait_vblank = true;
+ intel_crtc->atomic.update_sprite_watermarks |=
+ 1 << i;
+ }
+ }
+ return 0;
+}
+
+static bool encoders_cloneable(const struct intel_encoder *a,
+ const struct intel_encoder *b)
+{
+ /* masks could be asymmetric, so check both ways */
+ return a == b || (a->cloneable & (1 << b->type) &&
+ b->cloneable & (1 << a->type));
+}
+
+static bool check_single_encoder_cloning(struct drm_atomic_state *state,
+ struct intel_crtc *crtc,
+ struct intel_encoder *encoder)
+{
+ struct intel_encoder *source_encoder;
+ struct drm_connector *connector;
+ struct drm_connector_state *connector_state;
+ int i;
+
+ for_each_connector_in_state(state, connector, connector_state, i) {
+ if (connector_state->crtc != &crtc->base)
+ continue;
+
+ source_encoder =
+ to_intel_encoder(connector_state->best_encoder);
+ if (!encoders_cloneable(encoder, source_encoder))
+ return false;
+ }
+
+ return true;
+}
+
+static bool check_encoder_cloning(struct drm_atomic_state *state,
+ struct intel_crtc *crtc)
{
- struct intel_crtc *crtc;
struct intel_encoder *encoder;
- struct intel_connector *connector;
+ struct drm_connector *connector;
+ struct drm_connector_state *connector_state;
+ int i;
- for_each_intel_connector(dev, connector) {
- connector->new_encoder =
- to_intel_encoder(connector->base.encoder);
+ for_each_connector_in_state(state, connector, connector_state, i) {
+ if (connector_state->crtc != &crtc->base)
+ continue;
+
+ encoder = to_intel_encoder(connector_state->best_encoder);
+ if (!check_single_encoder_cloning(state, crtc, encoder))
+ return false;
}
- for_each_intel_encoder(dev, encoder) {
- encoder->new_crtc =
- to_intel_crtc(encoder->base.crtc);
+ return true;
+}
+
+static int intel_crtc_atomic_check(struct drm_crtc *crtc,
+ struct drm_crtc_state *crtc_state)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ struct intel_crtc_state *pipe_config =
+ to_intel_crtc_state(crtc_state);
+ struct drm_atomic_state *state = crtc_state->state;
+ int ret;
+ bool mode_changed = needs_modeset(crtc_state);
+
+ if (mode_changed && !check_encoder_cloning(state, intel_crtc)) {
+ DRM_DEBUG_KMS("rejecting invalid cloning configuration\n");
+ return -EINVAL;
}
- for_each_intel_crtc(dev, crtc) {
- crtc->new_enabled = crtc->base.state->enable;
+ if (mode_changed && !crtc_state->active)
+ intel_crtc->atomic.update_wm_post = true;
+
+ if (mode_changed && crtc_state->enable &&
+ dev_priv->display.crtc_compute_clock &&
+ !WARN_ON(pipe_config->shared_dpll != DPLL_ID_PRIVATE)) {
+ ret = dev_priv->display.crtc_compute_clock(intel_crtc,
+ pipe_config);
+ if (ret)
+ return ret;
+ }
+
+ ret = 0;
+ if (INTEL_INFO(dev)->gen >= 9) {
+ if (mode_changed)
+ ret = skl_update_scaler_crtc(pipe_config);
+
+ if (!ret)
+ ret = intel_atomic_setup_scalers(dev, intel_crtc,
+ pipe_config);
}
+
+ return ret;
}
-/* Transitional helper to copy current connector/encoder state to
- * connector->state. This is needed so that code that is partially
- * converted to atomic does the right thing.
- */
+static const struct drm_crtc_helper_funcs intel_helper_funcs = {
+ .mode_set_base_atomic = intel_pipe_set_base_atomic,
+ .load_lut = intel_crtc_load_lut,
+ .atomic_begin = intel_begin_crtc_commit,
+ .atomic_flush = intel_finish_crtc_commit,
+ .atomic_check = intel_crtc_atomic_check,
+};
+
static void intel_modeset_update_connector_atomic_state(struct drm_device *dev)
{
struct intel_connector *connector;
@@ -11355,39 +11806,6 @@ static void intel_modeset_update_connector_atomic_state(struct drm_device *dev)
}
}
-/* Fixup legacy state after an atomic state swap.
- */
-static void intel_modeset_fixup_state(struct drm_atomic_state *state)
-{
- struct intel_crtc *crtc;
- struct intel_encoder *encoder;
- struct intel_connector *connector;
-
- for_each_intel_connector(state->dev, connector) {
- connector->base.encoder = connector->base.state->best_encoder;
- if (connector->base.encoder)
- connector->base.encoder->crtc =
- connector->base.state->crtc;
- }
-
- /* Update crtc of disabled encoders */
- for_each_intel_encoder(state->dev, encoder) {
- int num_connectors = 0;
-
- for_each_intel_connector(state->dev, connector)
- if (connector->base.encoder == &encoder->base)
- num_connectors++;
-
- if (num_connectors == 0)
- encoder->base.crtc = NULL;
- }
-
- for_each_intel_crtc(state->dev, crtc) {
- crtc->base.enabled = crtc->base.state->enable;
- crtc->config = to_intel_crtc_state(crtc->base.state);
- }
-}
-
static void
connected_sink_compute_bpp(struct intel_connector *connector,
struct intel_crtc_state *pipe_config)
@@ -11523,17 +11941,20 @@ static void intel_dump_pipe_config(struct intel_crtc *crtc,
DRM_DEBUG_KMS("double wide: %i\n", pipe_config->double_wide);
if (IS_BROXTON(dev)) {
- DRM_DEBUG_KMS("ddi_pll_sel: %u; dpll_hw_state: ebb0: 0x%x, "
+ DRM_DEBUG_KMS("ddi_pll_sel: %u; dpll_hw_state: ebb0: 0x%x, ebb4: 0x%x,"
"pll0: 0x%x, pll1: 0x%x, pll2: 0x%x, pll3: 0x%x, "
- "pll6: 0x%x, pll8: 0x%x, pcsdw12: 0x%x\n",
+ "pll6: 0x%x, pll8: 0x%x, pll9: 0x%x, pll10: 0x%x, pcsdw12: 0x%x\n",
pipe_config->ddi_pll_sel,
pipe_config->dpll_hw_state.ebb0,
+ pipe_config->dpll_hw_state.ebb4,
pipe_config->dpll_hw_state.pll0,
pipe_config->dpll_hw_state.pll1,
pipe_config->dpll_hw_state.pll2,
pipe_config->dpll_hw_state.pll3,
pipe_config->dpll_hw_state.pll6,
pipe_config->dpll_hw_state.pll8,
+ pipe_config->dpll_hw_state.pll9,
+ pipe_config->dpll_hw_state.pll10,
pipe_config->dpll_hw_state.pcsdw12);
} else if (IS_SKYLAKE(dev)) {
DRM_DEBUG_KMS("ddi_pll_sel: %u; dpll_hw_state: "
@@ -11590,56 +12011,6 @@ static void intel_dump_pipe_config(struct intel_crtc *crtc,
}
}
-static bool encoders_cloneable(const struct intel_encoder *a,
- const struct intel_encoder *b)
-{
- /* masks could be asymmetric, so check both ways */
- return a == b || (a->cloneable & (1 << b->type) &&
- b->cloneable & (1 << a->type));
-}
-
-static bool check_single_encoder_cloning(struct drm_atomic_state *state,
- struct intel_crtc *crtc,
- struct intel_encoder *encoder)
-{
- struct intel_encoder *source_encoder;
- struct drm_connector *connector;
- struct drm_connector_state *connector_state;
- int i;
-
- for_each_connector_in_state(state, connector, connector_state, i) {
- if (connector_state->crtc != &crtc->base)
- continue;
-
- source_encoder =
- to_intel_encoder(connector_state->best_encoder);
- if (!encoders_cloneable(encoder, source_encoder))
- return false;
- }
-
- return true;
-}
-
-static bool check_encoder_cloning(struct drm_atomic_state *state,
- struct intel_crtc *crtc)
-{
- struct intel_encoder *encoder;
- struct drm_connector *connector;
- struct drm_connector_state *connector_state;
- int i;
-
- for_each_connector_in_state(state, connector, connector_state, i) {
- if (connector_state->crtc != &crtc->base)
- continue;
-
- encoder = to_intel_encoder(connector_state->best_encoder);
- if (!check_single_encoder_cloning(state, crtc, encoder))
- return false;
- }
-
- return true;
-}
-
static bool check_digital_port_conflicts(struct drm_atomic_state *state)
{
struct drm_device *dev = state->dev;
@@ -11693,6 +12064,7 @@ clear_intel_crtc_state(struct intel_crtc_state *crtc_state)
struct intel_dpll_hw_state dpll_hw_state;
enum intel_dpll_id shared_dpll;
uint32_t ddi_pll_sel;
+ bool force_thru;
/* FIXME: before the switch to atomic started, a new pipe_config was
* kzalloc'd. Code that depends on any field being zero should be
@@ -11704,6 +12076,7 @@ clear_intel_crtc_state(struct intel_crtc_state *crtc_state)
shared_dpll = crtc_state->shared_dpll;
dpll_hw_state = crtc_state->dpll_hw_state;
ddi_pll_sel = crtc_state->ddi_pll_sel;
+ force_thru = crtc_state->pch_pfit.force_thru;
memset(crtc_state, 0, sizeof *crtc_state);
@@ -11712,13 +12085,14 @@ clear_intel_crtc_state(struct intel_crtc_state *crtc_state)
crtc_state->shared_dpll = shared_dpll;
crtc_state->dpll_hw_state = dpll_hw_state;
crtc_state->ddi_pll_sel = ddi_pll_sel;
+ crtc_state->pch_pfit.force_thru = force_thru;
}
static int
intel_modeset_pipe_config(struct drm_crtc *crtc,
- struct drm_atomic_state *state,
struct intel_crtc_state *pipe_config)
{
+ struct drm_atomic_state *state = pipe_config->base.state;
struct intel_encoder *encoder;
struct drm_connector *connector;
struct drm_connector_state *connector_state;
@@ -11726,16 +12100,6 @@ intel_modeset_pipe_config(struct drm_crtc *crtc,
int i;
bool retry = true;
- if (!check_encoder_cloning(state, to_intel_crtc(crtc))) {
- DRM_DEBUG_KMS("rejecting invalid cloning configuration\n");
- return -EINVAL;
- }
-
- if (!check_digital_port_conflicts(state)) {
- DRM_DEBUG_KMS("rejecting conflicting digital port configuration\n");
- return -EINVAL;
- }
-
clear_intel_crtc_state(pipe_config);
pipe_config->cpu_transcoder =
@@ -11823,94 +12187,33 @@ encoder_retry:
goto encoder_retry;
}
- pipe_config->dither = pipe_config->pipe_bpp != base_bpp;
+ /* Dithering seems to not pass-through bits correctly when it should, so
+ * only enable it on 6bpc panels. */
+ pipe_config->dither = pipe_config->pipe_bpp == 6*3;
DRM_DEBUG_KMS("plane bpp: %i, pipe bpp: %i, dithering: %i\n",
base_bpp, pipe_config->pipe_bpp, pipe_config->dither);
- return 0;
fail:
return ret;
}
-static bool intel_crtc_in_use(struct drm_crtc *crtc)
-{
- struct drm_encoder *encoder;
- struct drm_device *dev = crtc->dev;
-
- list_for_each_entry(encoder, &dev->mode_config.encoder_list, head)
- if (encoder->crtc == crtc)
- return true;
-
- return false;
-}
-
-static bool
-needs_modeset(struct drm_crtc_state *state)
-{
- return state->mode_changed || state->active_changed;
-}
-
static void
-intel_modeset_update_state(struct drm_atomic_state *state)
+intel_modeset_update_crtc_state(struct drm_atomic_state *state)
{
- struct drm_device *dev = state->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_encoder *intel_encoder;
struct drm_crtc *crtc;
struct drm_crtc_state *crtc_state;
- struct drm_connector *connector;
int i;
- intel_shared_dpll_commit(dev_priv);
-
- for_each_intel_encoder(dev, intel_encoder) {
- if (!intel_encoder->base.crtc)
- continue;
-
- for_each_crtc_in_state(state, crtc, crtc_state, i) {
- if (crtc != intel_encoder->base.crtc)
- continue;
-
- if (crtc_state->enable && needs_modeset(crtc_state))
- intel_encoder->connectors_active = false;
-
- break;
- }
- }
-
- drm_atomic_helper_swap_state(state->dev, state);
- intel_modeset_fixup_state(state);
-
/* Double check state. */
- for_each_crtc(dev, crtc) {
- WARN_ON(crtc->state->enable != intel_crtc_in_use(crtc));
- }
-
- list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
- if (!connector->encoder || !connector->encoder->crtc)
- continue;
-
- for_each_crtc_in_state(state, crtc, crtc_state, i) {
- if (crtc != connector->encoder->crtc)
- continue;
-
- if (crtc->state->enable && needs_modeset(crtc->state)) {
- struct drm_property *dpms_property =
- dev->mode_config.dpms_property;
-
- connector->dpms = DRM_MODE_DPMS_ON;
- drm_object_property_set_value(&connector->base,
- dpms_property,
- DRM_MODE_DPMS_ON);
-
- intel_encoder = to_intel_encoder(connector->encoder);
- intel_encoder->connectors_active = true;
- }
+ for_each_crtc_in_state(state, crtc, crtc_state, i) {
+ to_intel_crtc(crtc)->config = to_intel_crtc_state(crtc->state);
- break;
- }
+ /* Update hwmode for vblank functions */
+ if (crtc->state->active)
+ crtc->hwmode = crtc->state->adjusted_mode;
+ else
+ crtc->hwmode.crtc_clock = 0;
}
-
}
static bool intel_fuzzy_clock_check(int clock1, int clock2)
@@ -11937,27 +12240,133 @@ static bool intel_fuzzy_clock_check(int clock1, int clock2)
base.head) \
if (mask & (1 <<(intel_crtc)->pipe))
+
+static bool
+intel_compare_m_n(unsigned int m, unsigned int n,
+ unsigned int m2, unsigned int n2,
+ bool exact)
+{
+ if (m == m2 && n == n2)
+ return true;
+
+ if (exact || !m || !n || !m2 || !n2)
+ return false;
+
+ BUILD_BUG_ON(DATA_LINK_M_N_MASK > INT_MAX);
+
+ if (m > m2) {
+ while (m > m2) {
+ m2 <<= 1;
+ n2 <<= 1;
+ }
+ } else if (m < m2) {
+ while (m < m2) {
+ m <<= 1;
+ n <<= 1;
+ }
+ }
+
+ return m == m2 && n == n2;
+}
+
+static bool
+intel_compare_link_m_n(const struct intel_link_m_n *m_n,
+ struct intel_link_m_n *m2_n2,
+ bool adjust)
+{
+ if (m_n->tu == m2_n2->tu &&
+ intel_compare_m_n(m_n->gmch_m, m_n->gmch_n,
+ m2_n2->gmch_m, m2_n2->gmch_n, !adjust) &&
+ intel_compare_m_n(m_n->link_m, m_n->link_n,
+ m2_n2->link_m, m2_n2->link_n, !adjust)) {
+ if (adjust)
+ *m2_n2 = *m_n;
+
+ return true;
+ }
+
+ return false;
+}
+
static bool
intel_pipe_config_compare(struct drm_device *dev,
struct intel_crtc_state *current_config,
- struct intel_crtc_state *pipe_config)
+ struct intel_crtc_state *pipe_config,
+ bool adjust)
{
+ bool ret = true;
+
+#define INTEL_ERR_OR_DBG_KMS(fmt, ...) \
+ do { \
+ if (!adjust) \
+ DRM_ERROR(fmt, ##__VA_ARGS__); \
+ else \
+ DRM_DEBUG_KMS(fmt, ##__VA_ARGS__); \
+ } while (0)
+
#define PIPE_CONF_CHECK_X(name) \
if (current_config->name != pipe_config->name) { \
- DRM_ERROR("mismatch in " #name " " \
+ INTEL_ERR_OR_DBG_KMS("mismatch in " #name " " \
"(expected 0x%08x, found 0x%08x)\n", \
current_config->name, \
pipe_config->name); \
- return false; \
+ ret = false; \
}
#define PIPE_CONF_CHECK_I(name) \
if (current_config->name != pipe_config->name) { \
- DRM_ERROR("mismatch in " #name " " \
+ INTEL_ERR_OR_DBG_KMS("mismatch in " #name " " \
"(expected %i, found %i)\n", \
current_config->name, \
pipe_config->name); \
- return false; \
+ ret = false; \
+ }
+
+#define PIPE_CONF_CHECK_M_N(name) \
+ if (!intel_compare_link_m_n(&current_config->name, \
+ &pipe_config->name,\
+ adjust)) { \
+ INTEL_ERR_OR_DBG_KMS("mismatch in " #name " " \
+ "(expected tu %i gmch %i/%i link %i/%i, " \
+ "found tu %i, gmch %i/%i link %i/%i)\n", \
+ current_config->name.tu, \
+ current_config->name.gmch_m, \
+ current_config->name.gmch_n, \
+ current_config->name.link_m, \
+ current_config->name.link_n, \
+ pipe_config->name.tu, \
+ pipe_config->name.gmch_m, \
+ pipe_config->name.gmch_n, \
+ pipe_config->name.link_m, \
+ pipe_config->name.link_n); \
+ ret = false; \
+ }
+
+#define PIPE_CONF_CHECK_M_N_ALT(name, alt_name) \
+ if (!intel_compare_link_m_n(&current_config->name, \
+ &pipe_config->name, adjust) && \
+ !intel_compare_link_m_n(&current_config->alt_name, \
+ &pipe_config->name, adjust)) { \
+ INTEL_ERR_OR_DBG_KMS("mismatch in " #name " " \
+ "(expected tu %i gmch %i/%i link %i/%i, " \
+ "or tu %i gmch %i/%i link %i/%i, " \
+ "found tu %i, gmch %i/%i link %i/%i)\n", \
+ current_config->name.tu, \
+ current_config->name.gmch_m, \
+ current_config->name.gmch_n, \
+ current_config->name.link_m, \
+ current_config->name.link_n, \
+ current_config->alt_name.tu, \
+ current_config->alt_name.gmch_m, \
+ current_config->alt_name.gmch_n, \
+ current_config->alt_name.link_m, \
+ current_config->alt_name.link_n, \
+ pipe_config->name.tu, \
+ pipe_config->name.gmch_m, \
+ pipe_config->name.gmch_n, \
+ pipe_config->name.link_m, \
+ pipe_config->name.link_n); \
+ ret = false; \
}
/* This is required for BDW+ where there is only one set of registers for
@@ -11968,30 +12377,30 @@ intel_pipe_config_compare(struct drm_device *dev,
#define PIPE_CONF_CHECK_I_ALT(name, alt_name) \
if ((current_config->name != pipe_config->name) && \
(current_config->alt_name != pipe_config->name)) { \
- DRM_ERROR("mismatch in " #name " " \
+ INTEL_ERR_OR_DBG_KMS("mismatch in " #name " " \
"(expected %i or %i, found %i)\n", \
current_config->name, \
current_config->alt_name, \
pipe_config->name); \
- return false; \
+ ret = false; \
}
#define PIPE_CONF_CHECK_FLAGS(name, mask) \
if ((current_config->name ^ pipe_config->name) & (mask)) { \
- DRM_ERROR("mismatch in " #name "(" #mask ") " \
+ INTEL_ERR_OR_DBG_KMS("mismatch in " #name "(" #mask ") " \
"(expected %i, found %i)\n", \
current_config->name & (mask), \
pipe_config->name & (mask)); \
- return false; \
+ ret = false; \
}
#define PIPE_CONF_CHECK_CLOCK_FUZZY(name) \
if (!intel_fuzzy_clock_check(current_config->name, pipe_config->name)) { \
- DRM_ERROR("mismatch in " #name " " \
+ INTEL_ERR_OR_DBG_KMS("mismatch in " #name " " \
"(expected %i, found %i)\n", \
current_config->name, \
pipe_config->name); \
- return false; \
+ ret = false; \
}
#define PIPE_CONF_QUIRK(quirk) \
@@ -12001,35 +12410,18 @@ intel_pipe_config_compare(struct drm_device *dev,
PIPE_CONF_CHECK_I(has_pch_encoder);
PIPE_CONF_CHECK_I(fdi_lanes);
- PIPE_CONF_CHECK_I(fdi_m_n.gmch_m);
- PIPE_CONF_CHECK_I(fdi_m_n.gmch_n);
- PIPE_CONF_CHECK_I(fdi_m_n.link_m);
- PIPE_CONF_CHECK_I(fdi_m_n.link_n);
- PIPE_CONF_CHECK_I(fdi_m_n.tu);
+ PIPE_CONF_CHECK_M_N(fdi_m_n);
PIPE_CONF_CHECK_I(has_dp_encoder);
if (INTEL_INFO(dev)->gen < 8) {
- PIPE_CONF_CHECK_I(dp_m_n.gmch_m);
- PIPE_CONF_CHECK_I(dp_m_n.gmch_n);
- PIPE_CONF_CHECK_I(dp_m_n.link_m);
- PIPE_CONF_CHECK_I(dp_m_n.link_n);
- PIPE_CONF_CHECK_I(dp_m_n.tu);
-
- if (current_config->has_drrs) {
- PIPE_CONF_CHECK_I(dp_m2_n2.gmch_m);
- PIPE_CONF_CHECK_I(dp_m2_n2.gmch_n);
- PIPE_CONF_CHECK_I(dp_m2_n2.link_m);
- PIPE_CONF_CHECK_I(dp_m2_n2.link_n);
- PIPE_CONF_CHECK_I(dp_m2_n2.tu);
- }
- } else {
- PIPE_CONF_CHECK_I_ALT(dp_m_n.gmch_m, dp_m2_n2.gmch_m);
- PIPE_CONF_CHECK_I_ALT(dp_m_n.gmch_n, dp_m2_n2.gmch_n);
- PIPE_CONF_CHECK_I_ALT(dp_m_n.link_m, dp_m2_n2.link_m);
- PIPE_CONF_CHECK_I_ALT(dp_m_n.link_n, dp_m2_n2.link_n);
- PIPE_CONF_CHECK_I_ALT(dp_m_n.tu, dp_m2_n2.tu);
- }
+ PIPE_CONF_CHECK_M_N(dp_m_n);
+
+ PIPE_CONF_CHECK_I(has_drrs);
+ if (current_config->has_drrs)
+ PIPE_CONF_CHECK_M_N(dp_m2_n2);
+ } else
+ PIPE_CONF_CHECK_M_N_ALT(dp_m_n, dp_m2_n2);
PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hdisplay);
PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_htotal);
@@ -12071,21 +12463,11 @@ intel_pipe_config_compare(struct drm_device *dev,
PIPE_CONF_CHECK_I(pipe_src_w);
PIPE_CONF_CHECK_I(pipe_src_h);
- /*
- * FIXME: BIOS likes to set up a cloned config with lvds+external
- * screen. Since we don't yet re-compute the pipe config when moving
- * just the lvds port away to another pipe the sw tracking won't match.
- *
- * Proper atomic modesets with recomputed global state will fix this.
- * Until then just don't check gmch state for inherited modes.
- */
- if (!PIPE_CONF_QUIRK(PIPE_CONFIG_QUIRK_INHERITED_MODE)) {
- PIPE_CONF_CHECK_I(gmch_pfit.control);
- /* pfit ratios are autocomputed by the hw on gen4+ */
- if (INTEL_INFO(dev)->gen < 4)
- PIPE_CONF_CHECK_I(gmch_pfit.pgm_ratios);
- PIPE_CONF_CHECK_I(gmch_pfit.lvds_border_bits);
- }
+ PIPE_CONF_CHECK_I(gmch_pfit.control);
+ /* pfit ratios are autocomputed by the hw on gen4+ */
+ if (INTEL_INFO(dev)->gen < 4)
+ PIPE_CONF_CHECK_I(gmch_pfit.pgm_ratios);
+ PIPE_CONF_CHECK_I(gmch_pfit.lvds_border_bits);
PIPE_CONF_CHECK_I(pch_pfit.enabled);
if (current_config->pch_pfit.enabled) {
@@ -12125,8 +12507,9 @@ intel_pipe_config_compare(struct drm_device *dev,
#undef PIPE_CONF_CHECK_FLAGS
#undef PIPE_CONF_CHECK_CLOCK_FUZZY
#undef PIPE_CONF_QUIRK
+#undef INTEL_ERR_OR_DBG_KMS
- return true;
+ return ret;
}
static void check_wm_state(struct drm_device *dev)
@@ -12180,17 +12563,23 @@ static void check_wm_state(struct drm_device *dev)
}
static void
-check_connector_state(struct drm_device *dev)
+check_connector_state(struct drm_device *dev,
+ struct drm_atomic_state *old_state)
{
- struct intel_connector *connector;
+ struct drm_connector_state *old_conn_state;
+ struct drm_connector *connector;
+ int i;
+
+ for_each_connector_in_state(old_state, connector, old_conn_state, i) {
+ struct drm_encoder *encoder = connector->encoder;
+ struct drm_connector_state *state = connector->state;
- for_each_intel_connector(dev, connector) {
/* This also checks the encoder/connector hw state with the
* ->get_hw_state callbacks. */
- intel_connector_check_state(connector);
+ intel_connector_check_state(to_intel_connector(connector));
- I915_STATE_WARN(&connector->new_encoder->base != connector->base.encoder,
- "connector's staged encoder doesn't match current encoder\n");
+ I915_STATE_WARN(state->best_encoder != encoder,
+ "connector's atomic encoder doesn't match legacy encoder\n");
}
}
@@ -12202,124 +12591,106 @@ check_encoder_state(struct drm_device *dev)
for_each_intel_encoder(dev, encoder) {
bool enabled = false;
- bool active = false;
- enum pipe pipe, tracked_pipe;
+ enum pipe pipe;
DRM_DEBUG_KMS("[ENCODER:%d:%s]\n",
encoder->base.base.id,
encoder->base.name);
- I915_STATE_WARN(&encoder->new_crtc->base != encoder->base.crtc,
- "encoder's stage crtc doesn't match current crtc\n");
- I915_STATE_WARN(encoder->connectors_active && !encoder->base.crtc,
- "encoder's active_connectors set, but no crtc\n");
-
for_each_intel_connector(dev, connector) {
- if (connector->base.encoder != &encoder->base)
+ if (connector->base.state->best_encoder != &encoder->base)
continue;
enabled = true;
- if (connector->base.dpms != DRM_MODE_DPMS_OFF)
- active = true;
+
+ I915_STATE_WARN(connector->base.state->crtc !=
+ encoder->base.crtc,
+ "connector's crtc doesn't match encoder crtc\n");
}
- /*
- * for MST connectors if we unplug the connector is gone
- * away but the encoder is still connected to a crtc
- * until a modeset happens in response to the hotplug.
- */
- if (!enabled && encoder->base.encoder_type == DRM_MODE_ENCODER_DPMST)
- continue;
I915_STATE_WARN(!!encoder->base.crtc != enabled,
"encoder's enabled state mismatch "
"(expected %i, found %i)\n",
!!encoder->base.crtc, enabled);
- I915_STATE_WARN(active && !encoder->base.crtc,
- "active encoder with no crtc\n");
-
- I915_STATE_WARN(encoder->connectors_active != active,
- "encoder's computed active state doesn't match tracked active state "
- "(expected %i, found %i)\n", active, encoder->connectors_active);
-
- active = encoder->get_hw_state(encoder, &pipe);
- I915_STATE_WARN(active != encoder->connectors_active,
- "encoder's hw state doesn't match sw tracking "
- "(expected %i, found %i)\n",
- encoder->connectors_active, active);
-
- if (!encoder->base.crtc)
- continue;
- tracked_pipe = to_intel_crtc(encoder->base.crtc)->pipe;
- I915_STATE_WARN(active && pipe != tracked_pipe,
- "active encoder's pipe doesn't match"
- "(expected %i, found %i)\n",
- tracked_pipe, pipe);
+ if (!encoder->base.crtc) {
+ bool active;
+ active = encoder->get_hw_state(encoder, &pipe);
+ I915_STATE_WARN(active,
+ "encoder detached but still enabled on pipe %c.\n",
+ pipe_name(pipe));
+ }
}
}
static void
-check_crtc_state(struct drm_device *dev)
+check_crtc_state(struct drm_device *dev, struct drm_atomic_state *old_state)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_crtc *crtc;
struct intel_encoder *encoder;
- struct intel_crtc_state pipe_config;
+ struct drm_crtc_state *old_crtc_state;
+ struct drm_crtc *crtc;
+ int i;
- for_each_intel_crtc(dev, crtc) {
- bool enabled = false;
- bool active = false;
+ for_each_crtc_in_state(old_state, crtc, old_crtc_state, i) {
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ struct intel_crtc_state *pipe_config, *sw_config;
+ bool active;
- memset(&pipe_config, 0, sizeof(pipe_config));
+ if (!needs_modeset(crtc->state))
+ continue;
- DRM_DEBUG_KMS("[CRTC:%d]\n",
- crtc->base.base.id);
+ __drm_atomic_helper_crtc_destroy_state(crtc, old_crtc_state);
+ pipe_config = to_intel_crtc_state(old_crtc_state);
+ memset(pipe_config, 0, sizeof(*pipe_config));
+ pipe_config->base.crtc = crtc;
+ pipe_config->base.state = old_state;
- I915_STATE_WARN(crtc->active && !crtc->base.state->enable,
- "active crtc, but not enabled in sw tracking\n");
+ DRM_DEBUG_KMS("[CRTC:%d]\n",
+ crtc->base.id);
- for_each_intel_encoder(dev, encoder) {
- if (encoder->base.crtc != &crtc->base)
- continue;
- enabled = true;
- if (encoder->connectors_active)
- active = true;
- }
+ active = dev_priv->display.get_pipe_config(intel_crtc,
+ pipe_config);
- I915_STATE_WARN(active != crtc->active,
- "crtc's computed active state doesn't match tracked active state "
- "(expected %i, found %i)\n", active, crtc->active);
- I915_STATE_WARN(enabled != crtc->base.state->enable,
- "crtc's computed enabled state doesn't match tracked enabled state "
- "(expected %i, found %i)\n", enabled,
- crtc->base.state->enable);
+ /* hw state is inconsistent with the pipe quirk */
+ if ((intel_crtc->pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) ||
+ (intel_crtc->pipe == PIPE_B && dev_priv->quirks & QUIRK_PIPEB_FORCE))
+ active = crtc->state->active;
- active = dev_priv->display.get_pipe_config(crtc,
- &pipe_config);
+ I915_STATE_WARN(crtc->state->active != active,
+ "crtc active state doesn't match with hw state "
+ "(expected %i, found %i)\n", crtc->state->active, active);
- /* hw state is inconsistent with the pipe quirk */
- if ((crtc->pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) ||
- (crtc->pipe == PIPE_B && dev_priv->quirks & QUIRK_PIPEB_FORCE))
- active = crtc->active;
+ I915_STATE_WARN(intel_crtc->active != crtc->state->active,
+ "transitional active state does not match atomic hw state "
+ "(expected %i, found %i)\n", crtc->state->active, intel_crtc->active);
- for_each_intel_encoder(dev, encoder) {
+ for_each_encoder_on_crtc(dev, crtc, encoder) {
enum pipe pipe;
- if (encoder->base.crtc != &crtc->base)
- continue;
- if (encoder->get_hw_state(encoder, &pipe))
- encoder->get_config(encoder, &pipe_config);
+
+ active = encoder->get_hw_state(encoder, &pipe);
+ I915_STATE_WARN(active != crtc->state->active,
+ "[ENCODER:%i] active %i with crtc active %i\n",
+ encoder->base.base.id, active, crtc->state->active);
+
+ I915_STATE_WARN(active && intel_crtc->pipe != pipe,
+ "Encoder connected to wrong pipe %c\n",
+ pipe_name(pipe));
+
+ if (active)
+ encoder->get_config(encoder, pipe_config);
}
- I915_STATE_WARN(crtc->active != active,
- "crtc active state doesn't match with hw state "
- "(expected %i, found %i)\n", crtc->active, active);
+ if (!crtc->state->active)
+ continue;
- if (active &&
- !intel_pipe_config_compare(dev, crtc->config, &pipe_config)) {
+ sw_config = to_intel_crtc_state(crtc->state);
+ if (!intel_pipe_config_compare(dev, sw_config,
+ pipe_config, false)) {
I915_STATE_WARN(1, "pipe state doesn't match!\n");
- intel_dump_pipe_config(crtc, &pipe_config,
+ intel_dump_pipe_config(intel_crtc, pipe_config,
"[hw state]");
- intel_dump_pipe_config(crtc, crtc->config,
+ intel_dump_pipe_config(intel_crtc, sw_config,
"[sw state]");
}
}
@@ -12374,13 +12745,14 @@ check_shared_dpll_state(struct drm_device *dev)
}
}
-void
-intel_modeset_check_state(struct drm_device *dev)
+static void
+intel_modeset_check_state(struct drm_device *dev,
+ struct drm_atomic_state *old_state)
{
check_wm_state(dev);
- check_connector_state(dev);
+ check_connector_state(dev, old_state);
check_encoder_state(dev);
- check_crtc_state(dev);
+ check_crtc_state(dev, old_state);
check_shared_dpll_state(dev);
}
@@ -12434,557 +12806,390 @@ static void update_scanline_offset(struct intel_crtc *crtc)
crtc->scanline_offset = 1;
}
-static struct intel_crtc_state *
-intel_modeset_compute_config(struct drm_crtc *crtc,
- struct drm_atomic_state *state)
-{
- struct intel_crtc_state *pipe_config;
- int ret = 0;
-
- ret = drm_atomic_add_affected_connectors(state, crtc);
- if (ret)
- return ERR_PTR(ret);
-
- ret = drm_atomic_helper_check_modeset(state->dev, state);
- if (ret)
- return ERR_PTR(ret);
-
- /*
- * Note this needs changes when we start tracking multiple modes
- * and crtcs. At that point we'll need to compute the whole config
- * (i.e. one pipe_config for each crtc) rather than just the one
- * for this crtc.
- */
- pipe_config = intel_atomic_get_crtc_state(state, to_intel_crtc(crtc));
- if (IS_ERR(pipe_config))
- return pipe_config;
-
- if (!pipe_config->base.enable)
- return pipe_config;
-
- ret = intel_modeset_pipe_config(crtc, state, pipe_config);
- if (ret)
- return ERR_PTR(ret);
-
- /* Check things that can only be changed through modeset */
- if (pipe_config->has_audio !=
- to_intel_crtc(crtc)->config->has_audio)
- pipe_config->base.mode_changed = true;
-
- /*
- * Note we have an issue here with infoframes: current code
- * only updates them on the full mode set path per hw
- * requirements. So here we should be checking for any
- * required changes and forcing a mode set.
- */
-
- intel_dump_pipe_config(to_intel_crtc(crtc), pipe_config,"[modeset]");
-
- ret = drm_atomic_helper_check_planes(state->dev, state);
- if (ret)
- return ERR_PTR(ret);
-
- return pipe_config;
-}
-
-static int __intel_set_mode_setup_plls(struct drm_atomic_state *state)
+static void intel_modeset_clear_plls(struct drm_atomic_state *state)
{
struct drm_device *dev = state->dev;
struct drm_i915_private *dev_priv = to_i915(dev);
- unsigned clear_pipes = 0;
+ struct intel_shared_dpll_config *shared_dpll = NULL;
struct intel_crtc *intel_crtc;
struct intel_crtc_state *intel_crtc_state;
struct drm_crtc *crtc;
struct drm_crtc_state *crtc_state;
- int ret = 0;
int i;
if (!dev_priv->display.crtc_compute_clock)
- return 0;
-
- for_each_crtc_in_state(state, crtc, crtc_state, i) {
- intel_crtc = to_intel_crtc(crtc);
- intel_crtc_state = to_intel_crtc_state(crtc_state);
-
- if (needs_modeset(crtc_state)) {
- clear_pipes |= 1 << intel_crtc->pipe;
- intel_crtc_state->shared_dpll = DPLL_ID_PRIVATE;
- }
- }
-
- ret = intel_shared_dpll_start_config(dev_priv, clear_pipes);
- if (ret)
- goto done;
+ return;
for_each_crtc_in_state(state, crtc, crtc_state, i) {
- if (!needs_modeset(crtc_state) || !crtc_state->enable)
- continue;
+ int dpll;
intel_crtc = to_intel_crtc(crtc);
intel_crtc_state = to_intel_crtc_state(crtc_state);
+ dpll = intel_crtc_state->shared_dpll;
- ret = dev_priv->display.crtc_compute_clock(intel_crtc,
- intel_crtc_state);
- if (ret) {
- intel_shared_dpll_abort_config(dev_priv);
- goto done;
- }
- }
+ if (!needs_modeset(crtc_state) || dpll == DPLL_ID_PRIVATE)
+ continue;
-done:
- return ret;
-}
+ intel_crtc_state->shared_dpll = DPLL_ID_PRIVATE;
-/* Code that should eventually be part of atomic_check() */
-static int __intel_set_mode_checks(struct drm_atomic_state *state)
-{
- struct drm_device *dev = state->dev;
- int ret;
+ if (!shared_dpll)
+ shared_dpll = intel_atomic_get_shared_dpll_state(state);
- /*
- * See if the config requires any additional preparation, e.g.
- * to adjust global state with pipes off. We need to do this
- * here so we can get the modeset_pipe updated config for the new
- * mode set on this crtc. For other crtcs we need to use the
- * adjusted_mode bits in the crtc directly.
- */
- if (IS_VALLEYVIEW(dev) || IS_BROXTON(dev)) {
- ret = valleyview_modeset_global_pipes(state);
- if (ret)
- return ret;
+ shared_dpll[dpll].crtc_mask &= ~(1 << intel_crtc->pipe);
}
-
- ret = __intel_set_mode_setup_plls(state);
- if (ret)
- return ret;
-
- return 0;
}
-static int __intel_set_mode(struct drm_crtc *modeset_crtc,
- struct intel_crtc_state *pipe_config)
+/*
+ * This implements the workaround described in the "notes" section of the mode
+ * set sequence documentation. When going from no pipes or single pipe to
+ * multiple pipes, and planes are enabled after the pipe, we need to wait at
+ * least 2 vblanks on the first pipe before enabling planes on the second pipe.
+ */
+static int haswell_mode_set_planes_workaround(struct drm_atomic_state *state)
{
- struct drm_device *dev = modeset_crtc->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct drm_atomic_state *state = pipe_config->base.state;
- struct drm_crtc *crtc;
struct drm_crtc_state *crtc_state;
- int ret = 0;
+ struct intel_crtc *intel_crtc;
+ struct drm_crtc *crtc;
+ struct intel_crtc_state *first_crtc_state = NULL;
+ struct intel_crtc_state *other_crtc_state = NULL;
+ enum pipe first_pipe = INVALID_PIPE, enabled_pipe = INVALID_PIPE;
int i;
- ret = __intel_set_mode_checks(state);
- if (ret < 0)
- return ret;
-
- ret = drm_atomic_helper_prepare_planes(dev, state);
- if (ret)
- return ret;
-
+ /* look at all crtc's that are going to be enabled in during modeset */
for_each_crtc_in_state(state, crtc, crtc_state, i) {
- if (!needs_modeset(crtc_state))
+ intel_crtc = to_intel_crtc(crtc);
+
+ if (!crtc_state->active || !needs_modeset(crtc_state))
continue;
- if (!crtc_state->enable) {
- intel_crtc_disable(crtc);
- } else if (crtc->state->enable) {
- intel_crtc_disable_planes(crtc);
- dev_priv->display.crtc_disable(crtc);
+ if (first_crtc_state) {
+ other_crtc_state = to_intel_crtc_state(crtc_state);
+ break;
+ } else {
+ first_crtc_state = to_intel_crtc_state(crtc_state);
+ first_pipe = intel_crtc->pipe;
}
}
- /* crtc->mode is already used by the ->mode_set callbacks, hence we need
- * to set it here already despite that we pass it down the callchain.
- *
- * Note we'll need to fix this up when we start tracking multiple
- * pipes; here we assume a single modeset_pipe and only track the
- * single crtc and mode.
- */
- if (pipe_config->base.enable && needs_modeset(&pipe_config->base)) {
- modeset_crtc->mode = pipe_config->base.mode;
-
- /*
- * Calculate and store various constants which
- * are later needed by vblank and swap-completion
- * timestamping. They are derived from true hwmode.
- */
- drm_calc_timestamping_constants(modeset_crtc,
- &pipe_config->base.adjusted_mode);
- }
-
- /* Only after disabling all output pipelines that will be changed can we
- * update the the output configuration. */
- intel_modeset_update_state(state);
+ /* No workaround needed? */
+ if (!first_crtc_state)
+ return 0;
- /* The state has been swaped above, so state actually contains the
- * old state now. */
+ /* w/a possibly needed, check how many crtc's are already enabled. */
+ for_each_intel_crtc(state->dev, intel_crtc) {
+ struct intel_crtc_state *pipe_config;
- modeset_update_crtc_power_domains(state);
+ pipe_config = intel_atomic_get_crtc_state(state, intel_crtc);
+ if (IS_ERR(pipe_config))
+ return PTR_ERR(pipe_config);
- drm_atomic_helper_commit_planes(dev, state);
+ pipe_config->hsw_workaround_pipe = INVALID_PIPE;
- /* Now enable the clocks, plane, pipe, and connectors that we set up. */
- for_each_crtc_in_state(state, crtc, crtc_state, i) {
- if (!needs_modeset(crtc->state) || !crtc->state->enable)
+ if (!pipe_config->base.active ||
+ needs_modeset(&pipe_config->base))
continue;
- update_scanline_offset(to_intel_crtc(crtc));
+ /* 2 or more enabled crtcs means no need for w/a */
+ if (enabled_pipe != INVALID_PIPE)
+ return 0;
- dev_priv->display.crtc_enable(crtc);
- intel_crtc_enable_planes(crtc);
+ enabled_pipe = intel_crtc->pipe;
}
- /* FIXME: add subpixel order */
-
- drm_atomic_helper_cleanup_planes(dev, state);
-
- drm_atomic_state_free(state);
+ if (enabled_pipe != INVALID_PIPE)
+ first_crtc_state->hsw_workaround_pipe = enabled_pipe;
+ else if (other_crtc_state)
+ other_crtc_state->hsw_workaround_pipe = first_pipe;
return 0;
}
-static int intel_set_mode_with_config(struct drm_crtc *crtc,
- struct intel_crtc_state *pipe_config,
- bool force_restore)
+static int intel_modeset_all_pipes(struct drm_atomic_state *state)
{
- int ret;
+ struct drm_crtc *crtc;
+ struct drm_crtc_state *crtc_state;
+ int ret = 0;
- ret = __intel_set_mode(crtc, pipe_config);
+ /* add all active pipes to the state */
+ for_each_crtc(state->dev, crtc) {
+ crtc_state = drm_atomic_get_crtc_state(state, crtc);
+ if (IS_ERR(crtc_state))
+ return PTR_ERR(crtc_state);
- if (ret == 0 && force_restore) {
- intel_modeset_update_staged_output_state(crtc->dev);
- intel_modeset_check_state(crtc->dev);
- }
+ if (!crtc_state->active || needs_modeset(crtc_state))
+ continue;
- return ret;
-}
+ crtc_state->mode_changed = true;
-static int intel_set_mode(struct drm_crtc *crtc,
- struct drm_atomic_state *state,
- bool force_restore)
-{
- struct intel_crtc_state *pipe_config;
- int ret = 0;
+ ret = drm_atomic_add_affected_connectors(state, crtc);
+ if (ret)
+ break;
- pipe_config = intel_modeset_compute_config(crtc, state);
- if (IS_ERR(pipe_config)) {
- ret = PTR_ERR(pipe_config);
- goto out;
+ ret = drm_atomic_add_affected_planes(state, crtc);
+ if (ret)
+ break;
}
- ret = intel_set_mode_with_config(crtc, pipe_config, force_restore);
- if (ret)
- goto out;
-
-out:
return ret;
}
-void intel_crtc_restore_mode(struct drm_crtc *crtc)
+
+static int intel_modeset_checks(struct drm_atomic_state *state)
{
- struct drm_device *dev = crtc->dev;
- struct drm_atomic_state *state;
- struct intel_encoder *encoder;
- struct intel_connector *connector;
- struct drm_connector_state *connector_state;
- struct intel_crtc_state *crtc_state;
+ struct drm_device *dev = state->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
int ret;
- state = drm_atomic_state_alloc(dev);
- if (!state) {
- DRM_DEBUG_KMS("[CRTC:%d] mode restore failed, out of memory",
- crtc->base.id);
- return;
- }
-
- state->acquire_ctx = dev->mode_config.acquire_ctx;
-
- /* The force restore path in the HW readout code relies on the staged
- * config still keeping the user requested config while the actual
- * state has been overwritten by the configuration read from HW. We
- * need to copy the staged config to the atomic state, otherwise the
- * mode set will just reapply the state the HW is already in. */
- for_each_intel_encoder(dev, encoder) {
- if (&encoder->new_crtc->base != crtc)
- continue;
-
- for_each_intel_connector(dev, connector) {
- if (connector->new_encoder != encoder)
- continue;
-
- connector_state = drm_atomic_get_connector_state(state, &connector->base);
- if (IS_ERR(connector_state)) {
- DRM_DEBUG_KMS("Failed to add [CONNECTOR:%d:%s] to state: %ld\n",
- connector->base.base.id,
- connector->base.name,
- PTR_ERR(connector_state));
- continue;
- }
-
- connector_state->crtc = crtc;
- connector_state->best_encoder = &encoder->base;
- }
- }
-
- crtc_state = intel_atomic_get_crtc_state(state, to_intel_crtc(crtc));
- if (IS_ERR(crtc_state)) {
- DRM_DEBUG_KMS("Failed to add [CRTC:%d] to state: %ld\n",
- crtc->base.id, PTR_ERR(crtc_state));
- drm_atomic_state_free(state);
- return;
+ if (!check_digital_port_conflicts(state)) {
+ DRM_DEBUG_KMS("rejecting conflicting digital port configuration\n");
+ return -EINVAL;
}
- crtc_state->base.active = crtc_state->base.enable =
- to_intel_crtc(crtc)->new_enabled;
-
- drm_mode_copy(&crtc_state->base.mode, &crtc->mode);
+ /*
+ * See if the config requires any additional preparation, e.g.
+ * to adjust global state with pipes off. We need to do this
+ * here so we can get the modeset_pipe updated config for the new
+ * mode set on this crtc. For other crtcs we need to use the
+ * adjusted_mode bits in the crtc directly.
+ */
+ if (dev_priv->display.modeset_calc_cdclk) {
+ unsigned int cdclk;
- intel_modeset_setup_plane_state(state, crtc, &crtc->mode,
- crtc->primary->fb, crtc->x, crtc->y);
+ ret = dev_priv->display.modeset_calc_cdclk(state);
- ret = intel_set_mode(crtc, state, false);
- if (ret)
- drm_atomic_state_free(state);
-}
+ cdclk = to_intel_atomic_state(state)->cdclk;
+ if (!ret && cdclk != dev_priv->cdclk_freq)
+ ret = intel_modeset_all_pipes(state);
-#undef for_each_intel_crtc_masked
+ if (ret < 0)
+ return ret;
+ } else
+ to_intel_atomic_state(state)->cdclk = dev_priv->cdclk_freq;
-static bool intel_connector_in_mode_set(struct intel_connector *connector,
- struct drm_mode_set *set)
-{
- int ro;
+ intel_modeset_clear_plls(state);
- for (ro = 0; ro < set->num_connectors; ro++)
- if (set->connectors[ro] == &connector->base)
- return true;
+ if (IS_HASWELL(dev))
+ return haswell_mode_set_planes_workaround(state);
- return false;
+ return 0;
}
-static int
-intel_modeset_stage_output_state(struct drm_device *dev,
- struct drm_mode_set *set,
- struct drm_atomic_state *state)
+/**
+ * intel_atomic_check - validate state object
+ * @dev: drm device
+ * @state: state to validate
+ */
+static int intel_atomic_check(struct drm_device *dev,
+ struct drm_atomic_state *state)
{
- struct intel_connector *connector;
- struct drm_connector *drm_connector;
- struct drm_connector_state *connector_state;
struct drm_crtc *crtc;
struct drm_crtc_state *crtc_state;
- int i, ret;
-
- /* The upper layers ensure that we either disable a crtc or have a list
- * of connectors. For paranoia, double-check this. */
- WARN_ON(!set->fb && (set->num_connectors != 0));
- WARN_ON(set->fb && (set->num_connectors == 0));
+ int ret, i;
+ bool any_ms = false;
- for_each_intel_connector(dev, connector) {
- bool in_mode_set = intel_connector_in_mode_set(connector, set);
+ ret = drm_atomic_helper_check_modeset(dev, state);
+ if (ret)
+ return ret;
- if (!in_mode_set && connector->base.state->crtc != set->crtc)
- continue;
+ for_each_crtc_in_state(state, crtc, crtc_state, i) {
+ struct intel_crtc_state *pipe_config =
+ to_intel_crtc_state(crtc_state);
- connector_state =
- drm_atomic_get_connector_state(state, &connector->base);
- if (IS_ERR(connector_state))
- return PTR_ERR(connector_state);
+ /* Catch I915_MODE_FLAG_INHERITED */
+ if (crtc_state->mode.private_flags != crtc->state->mode.private_flags)
+ crtc_state->mode_changed = true;
- if (in_mode_set) {
- int pipe = to_intel_crtc(set->crtc)->pipe;
- connector_state->best_encoder =
- &intel_find_encoder(connector, pipe)->base;
+ if (!crtc_state->enable) {
+ if (needs_modeset(crtc_state))
+ any_ms = true;
+ continue;
}
- if (connector->base.state->crtc != set->crtc)
+ if (!needs_modeset(crtc_state))
continue;
- /* If we disable the crtc, disable all its connectors. Also, if
- * the connector is on the changing crtc but not on the new
- * connector list, disable it. */
- if (!set->fb || !in_mode_set) {
- connector_state->best_encoder = NULL;
-
- DRM_DEBUG_KMS("[CONNECTOR:%d:%s] to [NOCRTC]\n",
- connector->base.base.id,
- connector->base.name);
- }
- }
- /* connector->new_encoder is now updated for all connectors. */
+ /* FIXME: For only active_changed we shouldn't need to do any
+ * state recomputation at all. */
- for_each_connector_in_state(state, drm_connector, connector_state, i) {
- connector = to_intel_connector(drm_connector);
+ ret = drm_atomic_add_affected_connectors(state, crtc);
+ if (ret)
+ return ret;
- if (!connector_state->best_encoder) {
- ret = drm_atomic_set_crtc_for_connector(connector_state,
- NULL);
- if (ret)
- return ret;
+ ret = intel_modeset_pipe_config(crtc, pipe_config);
+ if (ret)
+ return ret;
- continue;
+ if (i915.fastboot &&
+ intel_pipe_config_compare(state->dev,
+ to_intel_crtc_state(crtc->state),
+ pipe_config, true)) {
+ crtc_state->mode_changed = false;
}
- if (intel_connector_in_mode_set(connector, set)) {
- struct drm_crtc *crtc = connector->base.state->crtc;
-
- /* If this connector was in a previous crtc, add it
- * to the state. We might need to disable it. */
- if (crtc) {
- crtc_state =
- drm_atomic_get_crtc_state(state, crtc);
- if (IS_ERR(crtc_state))
- return PTR_ERR(crtc_state);
- }
+ if (needs_modeset(crtc_state)) {
+ any_ms = true;
- ret = drm_atomic_set_crtc_for_connector(connector_state,
- set->crtc);
+ ret = drm_atomic_add_affected_planes(state, crtc);
if (ret)
return ret;
}
- /* Make sure the new CRTC will work with the encoder */
- if (!drm_encoder_crtc_ok(connector_state->best_encoder,
- connector_state->crtc)) {
- return -EINVAL;
- }
-
- DRM_DEBUG_KMS("[CONNECTOR:%d:%s] to [CRTC:%d]\n",
- connector->base.base.id,
- connector->base.name,
- connector_state->crtc->base.id);
-
- if (connector_state->best_encoder != &connector->encoder->base)
- connector->encoder =
- to_intel_encoder(connector_state->best_encoder);
+ intel_dump_pipe_config(to_intel_crtc(crtc), pipe_config,
+ needs_modeset(crtc_state) ?
+ "[modeset]" : "[fastset]");
}
- for_each_crtc_in_state(state, crtc, crtc_state, i) {
- bool has_connectors;
+ if (any_ms) {
+ ret = intel_modeset_checks(state);
- ret = drm_atomic_add_affected_connectors(state, crtc);
if (ret)
return ret;
+ } else
+ to_intel_atomic_state(state)->cdclk =
+ to_i915(state->dev)->cdclk_freq;
+
+ return drm_atomic_helper_check_planes(state->dev, state);
+}
- has_connectors = !!drm_atomic_connectors_for_crtc(state, crtc);
- if (has_connectors != crtc_state->enable)
- crtc_state->enable =
- crtc_state->active = has_connectors;
+/**
+ * intel_atomic_commit - commit validated state object
+ * @dev: DRM device
+ * @state: the top-level driver state object
+ * @async: asynchronous commit
+ *
+ * This function commits a top-level state object that has been validated
+ * with drm_atomic_helper_check().
+ *
+ * FIXME: Atomic modeset support for i915 is not yet complete. At the moment
+ * we can only handle plane-related operations and do not yet support
+ * asynchronous commit.
+ *
+ * RETURNS
+ * Zero for success or -errno.
+ */
+static int intel_atomic_commit(struct drm_device *dev,
+ struct drm_atomic_state *state,
+ bool async)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_crtc *crtc;
+ struct drm_crtc_state *crtc_state;
+ int ret = 0;
+ int i;
+ bool any_ms = false;
+
+ if (async) {
+ DRM_DEBUG_KMS("i915 does not yet support async commit\n");
+ return -EINVAL;
}
- ret = intel_modeset_setup_plane_state(state, set->crtc, set->mode,
- set->fb, set->x, set->y);
+ ret = drm_atomic_helper_prepare_planes(dev, state);
if (ret)
return ret;
- crtc_state = drm_atomic_get_crtc_state(state, set->crtc);
- if (IS_ERR(crtc_state))
- return PTR_ERR(crtc_state);
+ drm_atomic_helper_swap_state(dev, state);
- if (set->mode)
- drm_mode_copy(&crtc_state->mode, set->mode);
+ for_each_crtc_in_state(state, crtc, crtc_state, i) {
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- if (set->num_connectors)
- crtc_state->active = true;
+ if (!needs_modeset(crtc->state))
+ continue;
- return 0;
-}
+ any_ms = true;
+ intel_pre_plane_update(intel_crtc);
-static bool primary_plane_visible(struct drm_crtc *crtc)
-{
- struct intel_plane_state *plane_state =
- to_intel_plane_state(crtc->primary->state);
+ if (crtc_state->active) {
+ intel_crtc_disable_planes(crtc, crtc_state->plane_mask);
+ dev_priv->display.crtc_disable(crtc);
+ intel_crtc->active = false;
+ intel_disable_shared_dpll(intel_crtc);
+ }
+ }
- return plane_state->visible;
-}
+ /* Only after disabling all output pipelines that will be changed can we
+ * update the the output configuration. */
+ intel_modeset_update_crtc_state(state);
-static int intel_crtc_set_config(struct drm_mode_set *set)
-{
- struct drm_device *dev;
- struct drm_atomic_state *state = NULL;
- struct intel_crtc_state *pipe_config;
- bool primary_plane_was_visible;
- int ret;
+ if (any_ms) {
+ intel_shared_dpll_commit(state);
- BUG_ON(!set);
- BUG_ON(!set->crtc);
- BUG_ON(!set->crtc->helper_private);
+ drm_atomic_helper_update_legacy_modeset_state(state->dev, state);
+ modeset_update_crtc_power_domains(state);
+ }
- /* Enforce sane interface api - has been abused by the fb helper. */
- BUG_ON(!set->mode && set->fb);
- BUG_ON(set->fb && set->num_connectors == 0);
+ /* Now enable the clocks, plane, pipe, and connectors that we set up. */
+ for_each_crtc_in_state(state, crtc, crtc_state, i) {
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ bool modeset = needs_modeset(crtc->state);
- if (set->fb) {
- DRM_DEBUG_KMS("[CRTC:%d] [FB:%d] #connectors=%d (x y) (%i %i)\n",
- set->crtc->base.id, set->fb->base.id,
- (int)set->num_connectors, set->x, set->y);
- } else {
- DRM_DEBUG_KMS("[CRTC:%d] [NOFB]\n", set->crtc->base.id);
- }
+ if (modeset && crtc->state->active) {
+ update_scanline_offset(to_intel_crtc(crtc));
+ dev_priv->display.crtc_enable(crtc);
+ }
- dev = set->crtc->dev;
+ if (!modeset)
+ intel_pre_plane_update(intel_crtc);
- state = drm_atomic_state_alloc(dev);
- if (!state)
- return -ENOMEM;
+ drm_atomic_helper_commit_planes_on_crtc(crtc_state);
+ intel_post_plane_update(intel_crtc);
+ }
- state->acquire_ctx = dev->mode_config.acquire_ctx;
+ /* FIXME: add subpixel order */
- ret = intel_modeset_stage_output_state(dev, set, state);
- if (ret)
- goto out;
+ drm_atomic_helper_wait_for_vblanks(dev, state);
+ drm_atomic_helper_cleanup_planes(dev, state);
- pipe_config = intel_modeset_compute_config(set->crtc, state);
- if (IS_ERR(pipe_config)) {
- ret = PTR_ERR(pipe_config);
- goto out;
- }
+ if (any_ms)
+ intel_modeset_check_state(dev, state);
+
+ drm_atomic_state_free(state);
- intel_update_pipe_size(to_intel_crtc(set->crtc));
+ return 0;
+}
- primary_plane_was_visible = primary_plane_visible(set->crtc);
+void intel_crtc_restore_mode(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_atomic_state *state;
+ struct drm_crtc_state *crtc_state;
+ int ret;
- ret = intel_set_mode_with_config(set->crtc, pipe_config, true);
+ state = drm_atomic_state_alloc(dev);
+ if (!state) {
+ DRM_DEBUG_KMS("[CRTC:%d] crtc restore failed, out of memory",
+ crtc->base.id);
+ return;
+ }
- if (ret == 0 &&
- pipe_config->base.enable &&
- pipe_config->base.planes_changed &&
- !needs_modeset(&pipe_config->base)) {
- struct intel_crtc *intel_crtc = to_intel_crtc(set->crtc);
+ state->acquire_ctx = drm_modeset_legacy_acquire_ctx(crtc);
- /*
- * We need to make sure the primary plane is re-enabled if it
- * has previously been turned off.
- */
- if (ret == 0 && !primary_plane_was_visible &&
- primary_plane_visible(set->crtc)) {
- WARN_ON(!intel_crtc->active);
- intel_post_enable_primary(set->crtc);
- }
+retry:
+ crtc_state = drm_atomic_get_crtc_state(state, crtc);
+ ret = PTR_ERR_OR_ZERO(crtc_state);
+ if (!ret) {
+ if (!crtc_state->active)
+ goto out;
- /*
- * In the fastboot case this may be our only check of the
- * state after boot. It would be better to only do it on
- * the first update, but we don't have a nice way of doing that
- * (and really, set_config isn't used much for high freq page
- * flipping, so increasing its cost here shouldn't be a big
- * deal).
- */
- if (i915.fastboot && ret == 0)
- intel_modeset_check_state(set->crtc->dev);
+ crtc_state->mode_changed = true;
+ ret = drm_atomic_commit(state);
}
- if (ret) {
- DRM_DEBUG_KMS("failed to set mode on [CRTC:%d], err = %d\n",
- set->crtc->base.id, ret);
+ if (ret == -EDEADLK) {
+ drm_atomic_state_clear(state);
+ drm_modeset_backoff(state->acquire_ctx);
+ goto retry;
}
-out:
if (ret)
+out:
drm_atomic_state_free(state);
- return ret;
}
+#undef for_each_intel_crtc_masked
+
static const struct drm_crtc_funcs intel_crtc_funcs = {
.gamma_set = intel_crtc_gamma_set,
- .set_config = intel_crtc_set_config,
+ .set_config = drm_atomic_helper_set_config,
.destroy = intel_crtc_destroy,
.page_flip = intel_crtc_page_flip,
.atomic_duplicate_state = intel_crtc_duplicate_state,
@@ -13081,6 +13286,8 @@ static void intel_shared_dpll_init(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
+ intel_update_cdclk(dev);
+
if (HAS_DDI(dev))
intel_ddi_pll_init(dev);
else if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev))
@@ -13092,28 +13299,6 @@ static void intel_shared_dpll_init(struct drm_device *dev)
}
/**
- * intel_wm_need_update - Check whether watermarks need updating
- * @plane: drm plane
- * @state: new plane state
- *
- * Check current plane state versus the new one to determine whether
- * watermarks need to be recalculated.
- *
- * Returns true or false.
- */
-bool intel_wm_need_update(struct drm_plane *plane,
- struct drm_plane_state *state)
-{
- /* Update watermarks on tiling changes. */
- if (!plane->state->fb || !state->fb ||
- plane->state->fb->modifier[0] != state->fb->modifier[0] ||
- plane->state->rotation != state->rotation)
- return true;
-
- return false;
-}
-
-/**
* intel_prepare_plane_fb - Prepare fb for usage on plane
* @plane: drm plane to prepare for
* @fb: framebuffer to prepare for presentation
@@ -13132,27 +13317,13 @@ intel_prepare_plane_fb(struct drm_plane *plane,
{
struct drm_device *dev = plane->dev;
struct intel_plane *intel_plane = to_intel_plane(plane);
- enum pipe pipe = intel_plane->pipe;
struct drm_i915_gem_object *obj = intel_fb_obj(fb);
struct drm_i915_gem_object *old_obj = intel_fb_obj(plane->fb);
- unsigned frontbuffer_bits = 0;
int ret = 0;
if (!obj)
return 0;
- switch (plane->type) {
- case DRM_PLANE_TYPE_PRIMARY:
- frontbuffer_bits = INTEL_FRONTBUFFER_PRIMARY(pipe);
- break;
- case DRM_PLANE_TYPE_CURSOR:
- frontbuffer_bits = INTEL_FRONTBUFFER_CURSOR(pipe);
- break;
- case DRM_PLANE_TYPE_OVERLAY:
- frontbuffer_bits = INTEL_FRONTBUFFER_SPRITE(pipe);
- break;
- }
-
mutex_lock(&dev->struct_mutex);
if (plane->type == DRM_PLANE_TYPE_CURSOR &&
@@ -13162,11 +13333,11 @@ intel_prepare_plane_fb(struct drm_plane *plane,
if (ret)
DRM_DEBUG_KMS("failed to attach phys object\n");
} else {
- ret = intel_pin_and_fence_fb_obj(plane, fb, new_state, NULL);
+ ret = intel_pin_and_fence_fb_obj(plane, fb, new_state, NULL, NULL);
}
if (ret == 0)
- i915_gem_track_fb(old_obj, obj, frontbuffer_bits);
+ i915_gem_track_fb(old_obj, obj, intel_plane->frontbuffer_bit);
mutex_unlock(&dev->struct_mutex);
@@ -13213,7 +13384,7 @@ skl_max_scale(struct intel_crtc *intel_crtc, struct intel_crtc_state *crtc_state
dev = intel_crtc->base.dev;
dev_priv = dev->dev_private;
crtc_clock = crtc_state->base.adjusted_mode.crtc_clock;
- cdclk = dev_priv->display.get_display_clock_speed(dev);
+ cdclk = to_intel_atomic_state(crtc_state->base.state)->cdclk;
if (!crtc_clock || !cdclk)
return DRM_PLANE_HELPER_NO_SCALING;
@@ -13231,105 +13402,28 @@ skl_max_scale(struct intel_crtc *intel_crtc, struct intel_crtc_state *crtc_state
static int
intel_check_primary_plane(struct drm_plane *plane,
+ struct intel_crtc_state *crtc_state,
struct intel_plane_state *state)
{
- struct drm_device *dev = plane->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_crtc *crtc = state->base.crtc;
- struct intel_crtc *intel_crtc;
- struct intel_crtc_state *crtc_state;
struct drm_framebuffer *fb = state->base.fb;
- struct drm_rect *dest = &state->dst;
- struct drm_rect *src = &state->src;
- const struct drm_rect *clip = &state->clip;
- bool can_position = false;
- int max_scale = DRM_PLANE_HELPER_NO_SCALING;
int min_scale = DRM_PLANE_HELPER_NO_SCALING;
- int ret;
-
- crtc = crtc ? crtc : plane->crtc;
- intel_crtc = to_intel_crtc(crtc);
- crtc_state = state->base.state ?
- intel_atomic_get_crtc_state(state->base.state, intel_crtc) : NULL;
+ int max_scale = DRM_PLANE_HELPER_NO_SCALING;
+ bool can_position = false;
- if (INTEL_INFO(dev)->gen >= 9) {
- /* use scaler when colorkey is not required */
- if (to_intel_plane(plane)->ckey.flags == I915_SET_COLORKEY_NONE) {
- min_scale = 1;
- max_scale = skl_max_scale(intel_crtc, crtc_state);
- }
+ /* use scaler when colorkey is not required */
+ if (INTEL_INFO(plane->dev)->gen >= 9 &&
+ state->ckey.flags == I915_SET_COLORKEY_NONE) {
+ min_scale = 1;
+ max_scale = skl_max_scale(to_intel_crtc(crtc), crtc_state);
can_position = true;
}
- ret = drm_plane_helper_check_update(plane, crtc, fb,
- src, dest, clip,
- min_scale,
- max_scale,
- can_position, true,
- &state->visible);
- if (ret)
- return ret;
-
- if (intel_crtc->active) {
- struct intel_plane_state *old_state =
- to_intel_plane_state(plane->state);
-
- intel_crtc->atomic.wait_for_flips = true;
-
- /*
- * FBC does not work on some platforms for rotated
- * planes, so disable it when rotation is not 0 and
- * update it when rotation is set back to 0.
- *
- * FIXME: This is redundant with the fbc update done in
- * the primary plane enable function except that that
- * one is done too late. We eventually need to unify
- * this.
- */
- if (state->visible &&
- INTEL_INFO(dev)->gen <= 4 && !IS_G4X(dev) &&
- dev_priv->fbc.crtc == intel_crtc &&
- state->base.rotation != BIT(DRM_ROTATE_0)) {
- intel_crtc->atomic.disable_fbc = true;
- }
-
- if (state->visible && !old_state->visible) {
- /*
- * BDW signals flip done immediately if the plane
- * is disabled, even if the plane enable is already
- * armed to occur at the next vblank :(
- */
- if (IS_BROADWELL(dev))
- intel_crtc->atomic.wait_vblank = true;
- }
-
- /*
- * FIXME: Actually if we will still have any other plane enabled
- * on the pipe we could let IPS enabled still, but for
- * now lets consider that when we make primary invisible
- * by setting DSPCNTR to 0 on update_primary_plane function
- * IPS needs to be disable.
- */
- if (!state->visible || !fb)
- intel_crtc->atomic.disable_ips = true;
-
- intel_crtc->atomic.fb_bits |=
- INTEL_FRONTBUFFER_PRIMARY(intel_crtc->pipe);
-
- intel_crtc->atomic.update_fbc = true;
-
- if (intel_wm_need_update(plane, &state->base))
- intel_crtc->atomic.update_wm = true;
- }
-
- if (INTEL_INFO(dev)->gen >= 9) {
- ret = skl_update_scaler_users(intel_crtc, crtc_state,
- to_intel_plane(plane), state, 0);
- if (ret)
- return ret;
- }
-
- return 0;
+ return drm_plane_helper_check_update(plane, crtc, fb, &state->src,
+ &state->dst, &state->clip,
+ min_scale, max_scale,
+ can_position, true,
+ &state->visible);
}
static void
@@ -13350,20 +13444,19 @@ intel_commit_primary_plane(struct drm_plane *plane,
crtc->x = src->x1 >> 16;
crtc->y = src->y1 >> 16;
- if (intel_crtc->active) {
- if (state->visible)
- /* FIXME: kill this fastboot hack */
- intel_update_pipe_size(intel_crtc);
+ if (!crtc->state->active)
+ return;
- dev_priv->display.update_primary_plane(crtc, plane->fb,
- crtc->x, crtc->y);
- }
+ if (state->visible)
+ /* FIXME: kill this fastboot hack */
+ intel_update_pipe_size(intel_crtc);
+
+ dev_priv->display.update_primary_plane(crtc, fb, crtc->x, crtc->y);
}
static void
intel_disable_primary_plane(struct drm_plane *plane,
- struct drm_crtc *crtc,
- bool force)
+ struct drm_crtc *crtc)
{
struct drm_device *dev = plane->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -13371,96 +13464,30 @@ intel_disable_primary_plane(struct drm_plane *plane,
dev_priv->display.update_primary_plane(crtc, NULL, 0, 0);
}
-static void intel_begin_crtc_commit(struct drm_crtc *crtc)
+static void intel_begin_crtc_commit(struct drm_crtc *crtc,
+ struct drm_crtc_state *old_crtc_state)
{
struct drm_device *dev = crtc->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- struct intel_plane *intel_plane;
- struct drm_plane *p;
- unsigned fb_bits = 0;
-
- /* Track fb's for any planes being disabled */
- list_for_each_entry(p, &dev->mode_config.plane_list, head) {
- intel_plane = to_intel_plane(p);
-
- if (intel_crtc->atomic.disabled_planes &
- (1 << drm_plane_index(p))) {
- switch (p->type) {
- case DRM_PLANE_TYPE_PRIMARY:
- fb_bits = INTEL_FRONTBUFFER_PRIMARY(intel_plane->pipe);
- break;
- case DRM_PLANE_TYPE_CURSOR:
- fb_bits = INTEL_FRONTBUFFER_CURSOR(intel_plane->pipe);
- break;
- case DRM_PLANE_TYPE_OVERLAY:
- fb_bits = INTEL_FRONTBUFFER_SPRITE(intel_plane->pipe);
- break;
- }
-
- mutex_lock(&dev->struct_mutex);
- i915_gem_track_fb(intel_fb_obj(p->fb), NULL, fb_bits);
- mutex_unlock(&dev->struct_mutex);
- }
- }
-
- if (intel_crtc->atomic.wait_for_flips)
- intel_crtc_wait_for_pending_flips(crtc);
-
- if (intel_crtc->atomic.disable_fbc)
- intel_fbc_disable(dev);
- if (intel_crtc->atomic.disable_ips)
- hsw_disable_ips(intel_crtc);
-
- if (intel_crtc->atomic.pre_disable_primary)
- intel_pre_disable_primary(crtc);
-
- if (intel_crtc->atomic.update_wm)
+ if (intel_crtc->atomic.update_wm_pre)
intel_update_watermarks(crtc);
- intel_runtime_pm_get(dev_priv);
-
/* Perform vblank evasion around commit operation */
- if (intel_crtc->active)
- intel_crtc->atomic.evade =
- intel_pipe_update_start(intel_crtc,
- &intel_crtc->atomic.start_vbl_count);
+ if (crtc->state->active)
+ intel_pipe_update_start(intel_crtc, &intel_crtc->start_vbl_count);
+
+ if (!needs_modeset(crtc->state) && INTEL_INFO(dev)->gen >= 9)
+ skl_detach_scalers(intel_crtc);
}
-static void intel_finish_crtc_commit(struct drm_crtc *crtc)
+static void intel_finish_crtc_commit(struct drm_crtc *crtc,
+ struct drm_crtc_state *old_crtc_state)
{
- struct drm_device *dev = crtc->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- struct drm_plane *p;
-
- if (intel_crtc->atomic.evade)
- intel_pipe_update_end(intel_crtc,
- intel_crtc->atomic.start_vbl_count);
-
- intel_runtime_pm_put(dev_priv);
-
- if (intel_crtc->atomic.wait_vblank)
- intel_wait_for_vblank(dev, intel_crtc->pipe);
- intel_frontbuffer_flip(dev, intel_crtc->atomic.fb_bits);
-
- if (intel_crtc->atomic.update_fbc) {
- mutex_lock(&dev->struct_mutex);
- intel_fbc_update(dev);
- mutex_unlock(&dev->struct_mutex);
- }
-
- if (intel_crtc->atomic.post_enable_primary)
- intel_post_enable_primary(crtc);
-
- drm_for_each_legacy_plane(p, &dev->mode_config.plane_list)
- if (intel_crtc->atomic.update_sprite_watermarks & drm_plane_index(p))
- intel_update_sprite_watermarks(p, crtc, 0, 0, 0,
- false, false);
-
- memset(&intel_crtc->atomic, 0, sizeof(intel_crtc->atomic));
+ if (crtc->state->active)
+ intel_pipe_update_end(intel_crtc, intel_crtc->start_vbl_count);
}
/**
@@ -13495,7 +13522,7 @@ static struct drm_plane *intel_primary_plane_create(struct drm_device *dev,
struct intel_plane *primary;
struct intel_plane_state *state;
const uint32_t *intel_primary_formats;
- int num_formats;
+ unsigned int num_formats;
primary = kzalloc(sizeof(*primary), GFP_KERNEL);
if (primary == NULL)
@@ -13516,10 +13543,10 @@ static struct drm_plane *intel_primary_plane_create(struct drm_device *dev,
}
primary->pipe = pipe;
primary->plane = pipe;
+ primary->frontbuffer_bit = INTEL_FRONTBUFFER_PRIMARY(pipe);
primary->check_plane = intel_check_primary_plane;
primary->commit_plane = intel_commit_primary_plane;
primary->disable_plane = intel_disable_primary_plane;
- primary->ckey.flags = I915_SET_COLORKEY_NONE;
if (HAS_FBC(dev) && INTEL_INFO(dev)->gen < 4)
primary->plane = !pipe;
@@ -13567,37 +13594,29 @@ void intel_create_rotation_property(struct drm_device *dev, struct intel_plane *
static int
intel_check_cursor_plane(struct drm_plane *plane,
+ struct intel_crtc_state *crtc_state,
struct intel_plane_state *state)
{
- struct drm_crtc *crtc = state->base.crtc;
- struct drm_device *dev = plane->dev;
+ struct drm_crtc *crtc = crtc_state->base.crtc;
struct drm_framebuffer *fb = state->base.fb;
- struct drm_rect *dest = &state->dst;
- struct drm_rect *src = &state->src;
- const struct drm_rect *clip = &state->clip;
struct drm_i915_gem_object *obj = intel_fb_obj(fb);
- struct intel_crtc *intel_crtc;
unsigned stride;
int ret;
- crtc = crtc ? crtc : plane->crtc;
- intel_crtc = to_intel_crtc(crtc);
-
- ret = drm_plane_helper_check_update(plane, crtc, fb,
- src, dest, clip,
+ ret = drm_plane_helper_check_update(plane, crtc, fb, &state->src,
+ &state->dst, &state->clip,
DRM_PLANE_HELPER_NO_SCALING,
DRM_PLANE_HELPER_NO_SCALING,
true, true, &state->visible);
if (ret)
return ret;
-
/* if we want to turn off the cursor ignore width and height */
if (!obj)
- goto finish;
+ return 0;
/* Check for which cursor types we support */
- if (!cursor_size_ok(dev, state->base.crtc_w, state->base.crtc_h)) {
+ if (!cursor_size_ok(plane->dev, state->base.crtc_w, state->base.crtc_h)) {
DRM_DEBUG("Cursor dimension %dx%d not supported\n",
state->base.crtc_w, state->base.crtc_h);
return -EINVAL;
@@ -13611,34 +13630,16 @@ intel_check_cursor_plane(struct drm_plane *plane,
if (fb->modifier[0] != DRM_FORMAT_MOD_NONE) {
DRM_DEBUG_KMS("cursor cannot be tiled\n");
- ret = -EINVAL;
- }
-
-finish:
- if (intel_crtc->active) {
- if (plane->state->crtc_w != state->base.crtc_w)
- intel_crtc->atomic.update_wm = true;
-
- intel_crtc->atomic.fb_bits |=
- INTEL_FRONTBUFFER_CURSOR(intel_crtc->pipe);
+ return -EINVAL;
}
- return ret;
+ return 0;
}
static void
intel_disable_cursor_plane(struct drm_plane *plane,
- struct drm_crtc *crtc,
- bool force)
+ struct drm_crtc *crtc)
{
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-
- if (!force) {
- plane->fb = NULL;
- intel_crtc->cursor_bo = NULL;
- intel_crtc->cursor_addr = 0;
- }
-
intel_crtc_update_cursor(crtc, false);
}
@@ -13671,9 +13672,9 @@ intel_commit_cursor_plane(struct drm_plane *plane,
intel_crtc->cursor_addr = addr;
intel_crtc->cursor_bo = obj;
-update:
- if (intel_crtc->active)
+update:
+ if (crtc->state->active)
intel_crtc_update_cursor(crtc, state->visible);
}
@@ -13698,6 +13699,7 @@ static struct drm_plane *intel_cursor_plane_create(struct drm_device *dev,
cursor->max_downscale = 1;
cursor->pipe = pipe;
cursor->plane = pipe;
+ cursor->frontbuffer_bit = INTEL_FRONTBUFFER_CURSOR(pipe);
cursor->check_plane = intel_check_cursor_plane;
cursor->commit_plane = intel_commit_cursor_plane;
cursor->disable_plane = intel_disable_cursor_plane;
@@ -13738,8 +13740,6 @@ static void skl_init_scalers(struct drm_device *dev, struct intel_crtc *intel_cr
for (i = 0; i < intel_crtc->num_scalers; i++) {
intel_scaler = &scaler_state->scalers[i];
intel_scaler->in_use = 0;
- intel_scaler->id = i;
-
intel_scaler->mode = PS_SCALER_MODE_DYN;
}
@@ -13811,6 +13811,8 @@ static void intel_crtc_init(struct drm_device *dev, int pipe)
intel_crtc->cursor_cntl = ~0;
intel_crtc->cursor_size = ~0;
+ intel_crtc->wm.cxsr_allowed = true;
+
BUG_ON(pipe >= ARRAY_SIZE(dev_priv->plane_to_crtc_mapping) ||
dev_priv->plane_to_crtc_mapping[intel_crtc->plane] != NULL);
dev_priv->plane_to_crtc_mapping[intel_crtc->plane] = &intel_crtc->base;
@@ -13945,8 +13947,7 @@ static void intel_setup_outputs(struct drm_device *dev)
*/
found = I915_READ(DDI_BUF_CTL_A) & DDI_INIT_DISPLAY_DETECTED;
/* WaIgnoreDDIAStrap: skl */
- if (found ||
- (IS_SKYLAKE(dev) && INTEL_REVID(dev) < SKL_REVID_D0))
+ if (found || IS_SKYLAKE(dev))
intel_ddi_init(dev, PORT_A);
/* DDI B, C and D detection is indicated by the SFUSE_STRAP
@@ -14022,18 +14023,18 @@ static void intel_setup_outputs(struct drm_device *dev)
}
intel_dsi_init(dev);
- } else if (SUPPORTS_DIGITAL_OUTPUTS(dev)) {
+ } else if (!IS_GEN2(dev) && !IS_PINEVIEW(dev)) {
bool found = false;
if (I915_READ(GEN3_SDVOB) & SDVO_DETECTED) {
DRM_DEBUG_KMS("probing SDVOB\n");
found = intel_sdvo_init(dev, GEN3_SDVOB, true);
- if (!found && SUPPORTS_INTEGRATED_HDMI(dev)) {
+ if (!found && IS_G4X(dev)) {
DRM_DEBUG_KMS("probing HDMI on SDVOB\n");
intel_hdmi_init(dev, GEN4_HDMIB, PORT_B);
}
- if (!found && SUPPORTS_INTEGRATED_DP(dev))
+ if (!found && IS_G4X(dev))
intel_dp_init(dev, DP_B, PORT_B);
}
@@ -14046,15 +14047,15 @@ static void intel_setup_outputs(struct drm_device *dev)
if (!found && (I915_READ(GEN3_SDVOC) & SDVO_DETECTED)) {
- if (SUPPORTS_INTEGRATED_HDMI(dev)) {
+ if (IS_G4X(dev)) {
DRM_DEBUG_KMS("probing HDMI on SDVOC\n");
intel_hdmi_init(dev, GEN4_HDMIC, PORT_C);
}
- if (SUPPORTS_INTEGRATED_DP(dev))
+ if (IS_G4X(dev))
intel_dp_init(dev, DP_C, PORT_C);
}
- if (SUPPORTS_INTEGRATED_DP(dev) &&
+ if (IS_G4X(dev) &&
(I915_READ(DP_D) & DP_DETECTED))
intel_dp_init(dev, DP_D, PORT_D);
} else if (IS_GEN2(dev))
@@ -14099,9 +14100,27 @@ static int intel_user_framebuffer_create_handle(struct drm_framebuffer *fb,
return drm_gem_handle_create(file, &obj->base, handle);
}
+static int intel_user_framebuffer_dirty(struct drm_framebuffer *fb,
+ struct drm_file *file,
+ unsigned flags, unsigned color,
+ struct drm_clip_rect *clips,
+ unsigned num_clips)
+{
+ struct drm_device *dev = fb->dev;
+ struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
+ struct drm_i915_gem_object *obj = intel_fb->obj;
+
+ mutex_lock(&dev->struct_mutex);
+ intel_fb_obj_flush(obj, false, ORIGIN_DIRTYFB);
+ mutex_unlock(&dev->struct_mutex);
+
+ return 0;
+}
+
static const struct drm_framebuffer_funcs intel_fb_funcs = {
.destroy = intel_user_framebuffer_destroy,
.create_handle = intel_user_framebuffer_create_handle,
+ .dirty = intel_user_framebuffer_dirty,
};
static
@@ -14296,7 +14315,7 @@ intel_user_framebuffer_create(struct drm_device *dev,
return intel_framebuffer_create(dev, mode_cmd, obj);
}
-#ifndef CONFIG_DRM_I915_FBDEV
+#ifndef CONFIG_DRM_FBDEV_EMULATION
static inline void intel_fbdev_output_poll_changed(struct drm_device *dev)
{
}
@@ -14307,6 +14326,8 @@ static const struct drm_mode_config_funcs intel_mode_funcs = {
.output_poll_changed = intel_fbdev_output_poll_changed,
.atomic_check = intel_atomic_check,
.atomic_commit = intel_atomic_commit,
+ .atomic_state_alloc = intel_atomic_state_alloc,
+ .atomic_state_clear = intel_atomic_state_clear,
};
/* Set up chip specific display functions */
@@ -14333,7 +14354,6 @@ static void intel_init_display(struct drm_device *dev)
haswell_crtc_compute_clock;
dev_priv->display.crtc_enable = haswell_crtc_enable;
dev_priv->display.crtc_disable = haswell_crtc_disable;
- dev_priv->display.off = ironlake_crtc_off;
dev_priv->display.update_primary_plane =
skylake_update_primary_plane;
} else if (HAS_DDI(dev)) {
@@ -14344,7 +14364,6 @@ static void intel_init_display(struct drm_device *dev)
haswell_crtc_compute_clock;
dev_priv->display.crtc_enable = haswell_crtc_enable;
dev_priv->display.crtc_disable = haswell_crtc_disable;
- dev_priv->display.off = ironlake_crtc_off;
dev_priv->display.update_primary_plane =
ironlake_update_primary_plane;
} else if (HAS_PCH_SPLIT(dev)) {
@@ -14355,7 +14374,6 @@ static void intel_init_display(struct drm_device *dev)
ironlake_crtc_compute_clock;
dev_priv->display.crtc_enable = ironlake_crtc_enable;
dev_priv->display.crtc_disable = ironlake_crtc_disable;
- dev_priv->display.off = ironlake_crtc_off;
dev_priv->display.update_primary_plane =
ironlake_update_primary_plane;
} else if (IS_VALLEYVIEW(dev)) {
@@ -14365,7 +14383,6 @@ static void intel_init_display(struct drm_device *dev)
dev_priv->display.crtc_compute_clock = i9xx_crtc_compute_clock;
dev_priv->display.crtc_enable = valleyview_crtc_enable;
dev_priv->display.crtc_disable = i9xx_crtc_disable;
- dev_priv->display.off = i9xx_crtc_off;
dev_priv->display.update_primary_plane =
i9xx_update_primary_plane;
} else {
@@ -14375,7 +14392,6 @@ static void intel_init_display(struct drm_device *dev)
dev_priv->display.crtc_compute_clock = i9xx_crtc_compute_clock;
dev_priv->display.crtc_enable = i9xx_crtc_enable;
dev_priv->display.crtc_disable = i9xx_crtc_disable;
- dev_priv->display.off = i9xx_crtc_off;
dev_priv->display.update_primary_plane =
i9xx_update_primary_plane;
}
@@ -14384,6 +14400,9 @@ static void intel_init_display(struct drm_device *dev)
if (IS_SKYLAKE(dev))
dev_priv->display.get_display_clock_speed =
skylake_get_display_clock_speed;
+ else if (IS_BROXTON(dev))
+ dev_priv->display.get_display_clock_speed =
+ broxton_get_display_clock_speed;
else if (IS_BROADWELL(dev))
dev_priv->display.get_display_clock_speed =
broadwell_get_display_clock_speed;
@@ -14397,9 +14416,21 @@ static void intel_init_display(struct drm_device *dev)
dev_priv->display.get_display_clock_speed =
ilk_get_display_clock_speed;
else if (IS_I945G(dev) || IS_BROADWATER(dev) ||
- IS_GEN6(dev) || IS_IVYBRIDGE(dev) || (IS_G33(dev) && !IS_PINEVIEW_M(dev)))
+ IS_GEN6(dev) || IS_IVYBRIDGE(dev))
dev_priv->display.get_display_clock_speed =
i945_get_display_clock_speed;
+ else if (IS_GM45(dev))
+ dev_priv->display.get_display_clock_speed =
+ gm45_get_display_clock_speed;
+ else if (IS_CRESTLINE(dev))
+ dev_priv->display.get_display_clock_speed =
+ i965gm_get_display_clock_speed;
+ else if (IS_PINEVIEW(dev))
+ dev_priv->display.get_display_clock_speed =
+ pnv_get_display_clock_speed;
+ else if (IS_G33(dev) || IS_G4X(dev))
+ dev_priv->display.get_display_clock_speed =
+ g33_get_display_clock_speed;
else if (IS_I915G(dev))
dev_priv->display.get_display_clock_speed =
i915_get_display_clock_speed;
@@ -14417,10 +14448,12 @@ static void intel_init_display(struct drm_device *dev)
i865_get_display_clock_speed;
else if (IS_I85X(dev))
dev_priv->display.get_display_clock_speed =
- i855_get_display_clock_speed;
- else /* 852, 830 */
+ i85x_get_display_clock_speed;
+ else { /* 830 */
+ WARN(!IS_I830(dev), "Unknown platform. Assuming 133 MHz CDCLK\n");
dev_priv->display.get_display_clock_speed =
i830_get_display_clock_speed;
+ }
if (IS_GEN5(dev)) {
dev_priv->display.fdi_link_train = ironlake_fdi_link_train;
@@ -14431,12 +14464,22 @@ static void intel_init_display(struct drm_device *dev)
dev_priv->display.fdi_link_train = ivb_manual_fdi_link_train;
} else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
dev_priv->display.fdi_link_train = hsw_fdi_link_train;
+ if (IS_BROADWELL(dev)) {
+ dev_priv->display.modeset_commit_cdclk =
+ broadwell_modeset_commit_cdclk;
+ dev_priv->display.modeset_calc_cdclk =
+ broadwell_modeset_calc_cdclk;
+ }
} else if (IS_VALLEYVIEW(dev)) {
- dev_priv->display.modeset_global_resources =
- valleyview_modeset_global_resources;
+ dev_priv->display.modeset_commit_cdclk =
+ valleyview_modeset_commit_cdclk;
+ dev_priv->display.modeset_calc_cdclk =
+ valleyview_modeset_calc_cdclk;
} else if (IS_BROXTON(dev)) {
- dev_priv->display.modeset_global_resources =
- broxton_modeset_global_resources;
+ dev_priv->display.modeset_commit_cdclk =
+ broxton_modeset_commit_cdclk;
+ dev_priv->display.modeset_calc_cdclk =
+ broxton_modeset_calc_cdclk;
}
switch (INTEL_INFO(dev)->gen) {
@@ -14655,13 +14698,9 @@ static void i915_disable_vga(struct drm_device *dev)
void intel_modeset_init_hw(struct drm_device *dev)
{
+ intel_update_cdclk(dev);
intel_prepare_ddi(dev);
-
- if (IS_VALLEYVIEW(dev))
- vlv_update_cdclk(dev);
-
intel_init_clock_gating(dev);
-
intel_enable_gt_powersave(dev);
}
@@ -14741,13 +14780,15 @@ void intel_modeset_init(struct drm_device *dev)
intel_setup_outputs(dev);
/* Just in case the BIOS is doing something questionable. */
- intel_fbc_disable(dev);
+ intel_fbc_disable(dev_priv);
drm_modeset_lock_all(dev);
- intel_modeset_setup_hw_state(dev, false);
+ intel_modeset_setup_hw_state(dev);
drm_modeset_unlock_all(dev);
for_each_intel_crtc(dev, crtc) {
+ struct intel_initial_plane_config plane_config = {};
+
if (!crtc->active)
continue;
@@ -14758,15 +14799,14 @@ void intel_modeset_init(struct drm_device *dev)
* can even allow for smooth boot transitions if the BIOS
* fb is large enough for the active pipe configuration.
*/
- if (dev_priv->display.get_initial_plane_config) {
- dev_priv->display.get_initial_plane_config(crtc,
- &crtc->plane_config);
- /*
- * If the fb is shared between multiple heads, we'll
- * just get the first one.
- */
- intel_find_initial_plane_obj(crtc, &crtc->plane_config);
- }
+ dev_priv->display.get_initial_plane_config(crtc,
+ &plane_config);
+
+ /*
+ * If the fb is shared between multiple heads, we'll
+ * just get the first one.
+ */
+ intel_find_initial_plane_obj(crtc, &plane_config);
}
}
@@ -14818,7 +14858,9 @@ static void intel_sanitize_crtc(struct intel_crtc *crtc)
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_encoder *encoder;
u32 reg;
+ bool enable;
/* Clear any frame start delays used for debugging left by the BIOS */
reg = PIPECONF(crtc->config->cpu_transcoder);
@@ -14827,6 +14869,7 @@ static void intel_sanitize_crtc(struct intel_crtc *crtc)
/* restore vblank interrupts to correct state */
drm_crtc_vblank_reset(&crtc->base);
if (crtc->active) {
+ drm_calc_timestamping_constants(&crtc->base, &crtc->base.hwmode);
update_scanline_offset(crtc);
drm_crtc_vblank_on(&crtc->base);
}
@@ -14835,7 +14878,6 @@ static void intel_sanitize_crtc(struct intel_crtc *crtc)
* disable the crtc (and hence change the state) if it is wrong. Note
* that gen4+ has a fixed plane -> pipe mapping. */
if (INTEL_INFO(dev)->gen < 4 && !intel_check_plane_mapping(crtc)) {
- struct intel_connector *connector;
bool plane;
DRM_DEBUG_KMS("[CRTC:%d] wrong plane connection detected!\n",
@@ -14847,30 +14889,8 @@ static void intel_sanitize_crtc(struct intel_crtc *crtc)
plane = crtc->plane;
to_intel_plane_state(crtc->base.primary->state)->visible = true;
crtc->plane = !plane;
- intel_crtc_disable_planes(&crtc->base);
- dev_priv->display.crtc_disable(&crtc->base);
+ intel_crtc_disable_noatomic(&crtc->base);
crtc->plane = plane;
-
- /* ... and break all links. */
- for_each_intel_connector(dev, connector) {
- if (connector->encoder->base.crtc != &crtc->base)
- continue;
-
- connector->base.dpms = DRM_MODE_DPMS_OFF;
- connector->base.encoder = NULL;
- }
- /* multiple connectors may have the same encoder:
- * handle them and break crtc link separately */
- for_each_intel_connector(dev, connector)
- if (connector->encoder->base.crtc == &crtc->base) {
- connector->encoder->base.crtc = NULL;
- connector->encoder->connectors_active = false;
- }
-
- WARN_ON(crtc->active);
- crtc->base.state->enable = false;
- crtc->base.state->active = false;
- crtc->base.enabled = false;
}
if (dev_priv->quirks & QUIRK_PIPEA_FORCE &&
@@ -14884,20 +14904,27 @@ static void intel_sanitize_crtc(struct intel_crtc *crtc)
/* Adjust the state of the output pipe according to whether we
* have active connectors/encoders. */
- intel_crtc_update_dpms(&crtc->base);
+ enable = false;
+ for_each_encoder_on_crtc(dev, &crtc->base, encoder) {
+ enable = true;
+ break;
+ }
- if (crtc->active != crtc->base.state->enable) {
- struct intel_encoder *encoder;
+ if (!enable)
+ intel_crtc_disable_noatomic(&crtc->base);
+
+ if (crtc->active != crtc->base.state->active) {
/* This can happen either due to bugs in the get_hw_state
- * functions or because the pipe is force-enabled due to the
+ * functions or because of calls to intel_crtc_disable_noatomic,
+ * or because the pipe is force-enabled due to the
* pipe A quirk. */
DRM_DEBUG_KMS("[CRTC:%d] hw state adjusted, was %s, now %s\n",
crtc->base.base.id,
crtc->base.state->enable ? "enabled" : "disabled",
crtc->active ? "enabled" : "disabled");
- crtc->base.state->enable = crtc->active;
+ WARN_ON(drm_atomic_set_mode_for_crtc(crtc->base.state, NULL) < 0);
crtc->base.state->active = crtc->active;
crtc->base.enabled = crtc->active;
@@ -14908,10 +14935,8 @@ static void intel_sanitize_crtc(struct intel_crtc *crtc)
* actually up, hence no need to break them. */
WARN_ON(crtc->active);
- for_each_encoder_on_crtc(dev, &crtc->base, encoder) {
- WARN_ON(encoder->connectors_active);
+ for_each_encoder_on_crtc(dev, &crtc->base, encoder)
encoder->base.crtc = NULL;
- }
}
if (crtc->active || HAS_GMCH_DISPLAY(dev)) {
@@ -14937,6 +14962,7 @@ static void intel_sanitize_encoder(struct intel_encoder *encoder)
{
struct intel_connector *connector;
struct drm_device *dev = encoder->base.dev;
+ bool active = false;
/* We need to check both for a crtc link (meaning that the
* encoder is active and trying to read from a pipe) and the
@@ -14944,7 +14970,15 @@ static void intel_sanitize_encoder(struct intel_encoder *encoder)
bool has_active_crtc = encoder->base.crtc &&
to_intel_crtc(encoder->base.crtc)->active;
- if (encoder->connectors_active && !has_active_crtc) {
+ for_each_intel_connector(dev, connector) {
+ if (connector->base.encoder != &encoder->base)
+ continue;
+
+ active = true;
+ break;
+ }
+
+ if (active && !has_active_crtc) {
DRM_DEBUG_KMS("[ENCODER:%d:%s] has active connectors but no active pipe!\n",
encoder->base.base.id,
encoder->base.name);
@@ -14961,7 +14995,6 @@ static void intel_sanitize_encoder(struct intel_encoder *encoder)
encoder->post_disable(encoder);
}
encoder->base.crtc = NULL;
- encoder->connectors_active = false;
/* Inconsistent output/port/pipe state happens presumably due to
* a bug in one of the get_hw_state functions. Or someplace else
@@ -15010,10 +15043,31 @@ static bool primary_get_hw_state(struct intel_crtc *crtc)
{
struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
- if (!crtc->active)
- return false;
+ return !!(I915_READ(DSPCNTR(crtc->plane)) & DISPLAY_PLANE_ENABLE);
+}
+
+static void readout_plane_state(struct intel_crtc *crtc,
+ struct intel_crtc_state *crtc_state)
+{
+ struct intel_plane *p;
+ struct intel_plane_state *plane_state;
+ bool active = crtc_state->base.active;
+
+ for_each_intel_plane(crtc->base.dev, p) {
+ if (crtc->pipe != p->pipe)
+ continue;
+
+ plane_state = to_intel_plane_state(p->base.state);
- return I915_READ(DSPCNTR(crtc->plane)) & DISPLAY_PLANE_ENABLE;
+ if (p->base.type == DRM_PLANE_TYPE_PRIMARY)
+ plane_state->visible = primary_get_hw_state(crtc);
+ else {
+ if (active)
+ p->disable_plane(&p->base, &crtc->base);
+
+ plane_state->visible = false;
+ }
+ }
}
static void intel_modeset_readout_hw_state(struct drm_device *dev)
@@ -15026,22 +15080,44 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
int i;
for_each_intel_crtc(dev, crtc) {
- struct drm_plane *primary = crtc->base.primary;
- struct intel_plane_state *plane_state;
-
+ __drm_atomic_helper_crtc_destroy_state(&crtc->base, crtc->base.state);
memset(crtc->config, 0, sizeof(*crtc->config));
-
- crtc->config->quirks |= PIPE_CONFIG_QUIRK_INHERITED_MODE;
+ crtc->config->base.crtc = &crtc->base;
crtc->active = dev_priv->display.get_pipe_config(crtc,
crtc->config);
- crtc->base.state->enable = crtc->active;
crtc->base.state->active = crtc->active;
crtc->base.enabled = crtc->active;
- plane_state = to_intel_plane_state(primary->state);
- plane_state->visible = primary_get_hw_state(crtc);
+ memset(&crtc->base.mode, 0, sizeof(crtc->base.mode));
+ if (crtc->base.state->active) {
+ intel_mode_from_pipe_config(&crtc->base.mode, crtc->config);
+ intel_mode_from_pipe_config(&crtc->base.state->adjusted_mode, crtc->config);
+ WARN_ON(drm_atomic_set_mode_for_crtc(crtc->base.state, &crtc->base.mode));
+
+ /*
+ * The initial mode needs to be set in order to keep
+ * the atomic core happy. It wants a valid mode if the
+ * crtc's enabled, so we do the above call.
+ *
+ * At this point some state updated by the connectors
+ * in their ->detect() callback has not run yet, so
+ * no recalculation can be done yet.
+ *
+ * Even if we could do a recalculation and modeset
+ * right now it would cause a double modeset if
+ * fbdev or userspace chooses a different initial mode.
+ *
+ * If that happens, someone indicated they wanted a
+ * mode change, which means it's safe to do a full
+ * recalculation.
+ */
+ crtc->base.state->mode.private_flags = I915_MODE_FLAG_INHERITED;
+ }
+
+ crtc->base.hwmode = crtc->config->base.adjusted_mode;
+ readout_plane_state(crtc, to_intel_crtc_state(crtc->base.state));
DRM_DEBUG_KMS("[CRTC:%d] hw state readout: %s\n",
crtc->base.base.id,
@@ -15080,7 +15156,6 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
encoder->base.crtc = NULL;
}
- encoder->connectors_active = false;
DRM_DEBUG_KMS("[ENCODER:%d:%s] hw state readout: %s, pipe %c\n",
encoder->base.base.id,
encoder->base.name,
@@ -15091,7 +15166,6 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
for_each_intel_connector(dev, connector) {
if (connector->get_hw_state(connector)) {
connector->base.dpms = DRM_MODE_DPMS_ON;
- connector->encoder->connectors_active = true;
connector->base.encoder = &connector->encoder->base;
} else {
connector->base.dpms = DRM_MODE_DPMS_OFF;
@@ -15104,10 +15178,11 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
}
}
-/* Scan out the current hw modeset state, sanitizes it and maps it into the drm
- * and i915 state tracking structures. */
-void intel_modeset_setup_hw_state(struct drm_device *dev,
- bool force_restore)
+/* Scan out the current hw modeset state,
+ * and sanitizes it to the current state
+ */
+static void
+intel_modeset_setup_hw_state(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
enum pipe pipe;
@@ -15117,21 +15192,6 @@ void intel_modeset_setup_hw_state(struct drm_device *dev,
intel_modeset_readout_hw_state(dev);
- /*
- * Now that we have the config, copy it to each CRTC struct
- * Note that this could go away if we move to using crtc_config
- * checking everywhere.
- */
- for_each_intel_crtc(dev, crtc) {
- if (crtc->active && i915.fastboot) {
- intel_mode_from_pipe_config(&crtc->base.mode,
- crtc->config);
- DRM_DEBUG_KMS("[CRTC:%d] found active mode: ",
- crtc->base.base.id);
- drm_mode_debug_printmodeline(&crtc->base.mode);
- }
- }
-
/* HW state is read out, now we need to sanitize this mess. */
for_each_intel_encoder(dev, encoder) {
intel_sanitize_encoder(encoder);
@@ -15158,29 +15218,73 @@ void intel_modeset_setup_hw_state(struct drm_device *dev,
pll->on = false;
}
- if (IS_GEN9(dev))
+ if (IS_VALLEYVIEW(dev))
+ vlv_wm_get_hw_state(dev);
+ else if (IS_GEN9(dev))
skl_wm_get_hw_state(dev);
else if (HAS_PCH_SPLIT(dev))
ilk_wm_get_hw_state(dev);
- if (force_restore) {
- i915_redisable_vga(dev);
+ for_each_intel_crtc(dev, crtc) {
+ unsigned long put_domains;
- /*
- * We need to use raw interfaces for restoring state to avoid
- * checking (bogus) intermediate states.
- */
- for_each_pipe(dev_priv, pipe) {
- struct drm_crtc *crtc =
- dev_priv->pipe_to_crtc_mapping[pipe];
+ put_domains = modeset_get_crtc_power_domains(&crtc->base);
+ if (WARN_ON(put_domains))
+ modeset_put_power_domains(dev_priv, put_domains);
+ }
+ intel_display_set_init_power(dev_priv, false);
+}
- intel_crtc_restore_mode(crtc);
- }
- } else {
- intel_modeset_update_staged_output_state(dev);
+void intel_display_resume(struct drm_device *dev)
+{
+ struct drm_atomic_state *state = drm_atomic_state_alloc(dev);
+ struct intel_connector *conn;
+ struct intel_plane *plane;
+ struct drm_crtc *crtc;
+ int ret;
+
+ if (!state)
+ return;
+
+ state->acquire_ctx = dev->mode_config.acquire_ctx;
+
+ /* preserve complete old state, including dpll */
+ intel_atomic_get_shared_dpll_state(state);
+
+ for_each_crtc(dev, crtc) {
+ struct drm_crtc_state *crtc_state =
+ drm_atomic_get_crtc_state(state, crtc);
+
+ ret = PTR_ERR_OR_ZERO(crtc_state);
+ if (ret)
+ goto err;
+
+ /* force a restore */
+ crtc_state->mode_changed = true;
+ }
+
+ for_each_intel_plane(dev, plane) {
+ ret = PTR_ERR_OR_ZERO(drm_atomic_get_plane_state(state, &plane->base));
+ if (ret)
+ goto err;
}
- intel_modeset_check_state(dev);
+ for_each_intel_connector(dev, conn) {
+ ret = PTR_ERR_OR_ZERO(drm_atomic_get_connector_state(state, &conn->base));
+ if (ret)
+ goto err;
+ }
+
+ intel_modeset_setup_hw_state(dev);
+
+ i915_redisable_vga(dev);
+ ret = drm_atomic_commit(state);
+ if (!ret)
+ return;
+
+err:
+ DRM_ERROR("Restoring old state failed with %i\n", ret);
+ drm_atomic_state_free(state);
}
void intel_modeset_gem_init(struct drm_device *dev)
@@ -15222,14 +15326,16 @@ void intel_modeset_gem_init(struct drm_device *dev)
ret = intel_pin_and_fence_fb_obj(c->primary,
c->primary->fb,
c->primary->state,
- NULL);
+ NULL, NULL);
mutex_unlock(&dev->struct_mutex);
if (ret) {
DRM_ERROR("failed to pin boot fb on pipe %d\n",
to_intel_crtc(c)->pipe);
drm_framebuffer_unreference(c->primary->fb);
c->primary->fb = NULL;
+ c->primary->crtc = c->primary->state->crtc = NULL;
update_state_fb(c->primary);
+ c->state->plane_mask &= ~(1 << drm_plane_index(c->primary));
}
}
@@ -15266,13 +15372,9 @@ void intel_modeset_cleanup(struct drm_device *dev)
*/
drm_kms_helper_poll_fini(dev);
- mutex_lock(&dev->struct_mutex);
-
intel_unregister_dsm_handler();
- intel_fbc_disable(dev);
-
- mutex_unlock(&dev->struct_mutex);
+ intel_fbc_disable(dev_priv);
/* flush any delayed tasks or pending work */
flush_scheduled_work();
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index 6e8faa253792..016e7bc6af0a 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -91,6 +91,8 @@ static const struct dp_link_dpll chv_dpll[] = {
{ .p1 = 2, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 } }
};
+static const int bxt_rates[] = { 162000, 216000, 243000, 270000,
+ 324000, 432000, 540000 };
static const int skl_rates[] = { 162000, 216000, 270000,
324000, 432000, 540000 };
static const int chv_rates[] = { 162000, 202500, 210000, 216000,
@@ -565,7 +567,9 @@ static u32 _pp_ctrl_reg(struct intel_dp *intel_dp)
{
struct drm_device *dev = intel_dp_to_dev(intel_dp);
- if (HAS_PCH_SPLIT(dev))
+ if (IS_BROXTON(dev))
+ return BXT_PP_CONTROL(0);
+ else if (HAS_PCH_SPLIT(dev))
return PCH_PP_CONTROL;
else
return VLV_PIPE_PP_CONTROL(vlv_power_sequencer_pipe(intel_dp));
@@ -575,7 +579,9 @@ static u32 _pp_stat_reg(struct intel_dp *intel_dp)
{
struct drm_device *dev = intel_dp_to_dev(intel_dp);
- if (HAS_PCH_SPLIT(dev))
+ if (IS_BROXTON(dev))
+ return BXT_PP_STATUS(0);
+ else if (HAS_PCH_SPLIT(dev))
return PCH_PP_STATUS;
else
return VLV_PIPE_PP_STATUS(vlv_power_sequencer_pipe(intel_dp));
@@ -708,7 +714,8 @@ static uint32_t ilk_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
return 0;
if (intel_dig_port->port == PORT_A) {
- return DIV_ROUND_UP(dev_priv->display.get_display_clock_speed(dev), 2000);
+ return DIV_ROUND_UP(dev_priv->cdclk_freq, 2000);
+
} else {
return DIV_ROUND_UP(intel_pch_rawclk(dev), 2);
}
@@ -723,7 +730,7 @@ static uint32_t hsw_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
if (intel_dig_port->port == PORT_A) {
if (index)
return 0;
- return DIV_ROUND_CLOSEST(dev_priv->display.get_display_clock_speed(dev), 2000);
+ return DIV_ROUND_CLOSEST(dev_priv->cdclk_freq, 2000);
} else if (dev_priv->pch_id == INTEL_PCH_LPT_DEVICE_ID_TYPE) {
/* Workaround for non-ULT HSW */
switch (index) {
@@ -842,8 +849,15 @@ intel_dp_aux_ch(struct intel_dp *intel_dp,
}
if (try == 3) {
- WARN(1, "dp_aux_ch not started status 0x%08x\n",
- I915_READ(ch_ctl));
+ static u32 last_status = -1;
+ const u32 status = I915_READ(ch_ctl);
+
+ if (status != last_status) {
+ WARN(1, "dp_aux_ch not started status 0x%08x\n",
+ status);
+ last_status = status;
+ }
+
ret = -EBUSY;
goto out;
}
@@ -1019,11 +1033,34 @@ static void
intel_dp_aux_init(struct intel_dp *intel_dp, struct intel_connector *connector)
{
struct drm_device *dev = intel_dp_to_dev(intel_dp);
+ struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
enum port port = intel_dig_port->port;
+ struct ddi_vbt_port_info *info = &dev_priv->vbt.ddi_port_info[port];
const char *name = NULL;
+ uint32_t porte_aux_ctl_reg = DPA_AUX_CH_CTL;
int ret;
+ /* On SKL we don't have Aux for port E so we rely on VBT to set
+ * a proper alternate aux channel.
+ */
+ if (IS_SKYLAKE(dev) && port == PORT_E) {
+ switch (info->alternate_aux_channel) {
+ case DP_AUX_B:
+ porte_aux_ctl_reg = DPB_AUX_CH_CTL;
+ break;
+ case DP_AUX_C:
+ porte_aux_ctl_reg = DPC_AUX_CH_CTL;
+ break;
+ case DP_AUX_D:
+ porte_aux_ctl_reg = DPD_AUX_CH_CTL;
+ break;
+ case DP_AUX_A:
+ default:
+ porte_aux_ctl_reg = DPA_AUX_CH_CTL;
+ }
+ }
+
switch (port) {
case PORT_A:
intel_dp->aux_ch_ctl_reg = DPA_AUX_CH_CTL;
@@ -1041,6 +1078,10 @@ intel_dp_aux_init(struct intel_dp *intel_dp, struct intel_connector *connector)
intel_dp->aux_ch_ctl_reg = PCH_DPD_AUX_CH_CTL;
name = "DPDDC-D";
break;
+ case PORT_E:
+ intel_dp->aux_ch_ctl_reg = porte_aux_ctl_reg;
+ name = "DPDDC-E";
+ break;
default:
BUG();
}
@@ -1054,7 +1095,7 @@ intel_dp_aux_init(struct intel_dp *intel_dp, struct intel_connector *connector)
*
* Skylake moves AUX_CTL back next to DDI_BUF_CTL, on the CPU.
*/
- if (!IS_HASWELL(dev) && !IS_BROADWELL(dev))
+ if (!IS_HASWELL(dev) && !IS_BROADWELL(dev) && port != PORT_E)
intel_dp->aux_ch_ctl_reg = intel_dp->output_reg + 0x10;
intel_dp->aux.name = name;
@@ -1172,7 +1213,10 @@ intel_dp_sink_rates(struct intel_dp *intel_dp, const int **sink_rates)
static int
intel_dp_source_rates(struct drm_device *dev, const int **source_rates)
{
- if (IS_SKYLAKE(dev)) {
+ if (IS_BROXTON(dev)) {
+ *source_rates = bxt_rates;
+ return ARRAY_SIZE(bxt_rates);
+ } else if (IS_SKYLAKE(dev)) {
*source_rates = skl_rates;
return ARRAY_SIZE(skl_rates);
} else if (IS_CHERRYVIEW(dev)) {
@@ -1374,7 +1418,7 @@ intel_dp_compute_config(struct intel_encoder *encoder,
if (INTEL_INFO(dev)->gen >= 9) {
int ret;
- ret = skl_update_scaler_users(intel_crtc, pipe_config, NULL, NULL, 0);
+ ret = skl_update_scaler_crtc(pipe_config);
if (ret)
return ret;
}
@@ -1399,7 +1443,10 @@ intel_dp_compute_config(struct intel_encoder *encoder,
* bpc in between. */
bpp = pipe_config->pipe_bpp;
if (is_edp(intel_dp)) {
- if (dev_priv->vbt.edp_bpp && dev_priv->vbt.edp_bpp < bpp) {
+
+ /* Get bpp from vbt only for panels that dont have bpp in edid */
+ if (intel_connector->base.display_info.bpc == 0 &&
+ (dev_priv->vbt.edp_bpp && dev_priv->vbt.edp_bpp < bpp)) {
DRM_DEBUG_KMS("clamping bpp for eDP panel to BIOS-provided %i\n",
dev_priv->vbt.edp_bpp);
bpp = dev_priv->vbt.edp_bpp;
@@ -1699,8 +1746,10 @@ static u32 ironlake_get_pp_control(struct intel_dp *intel_dp)
lockdep_assert_held(&dev_priv->pps_mutex);
control = I915_READ(_pp_ctrl_reg(intel_dp));
- control &= ~PANEL_UNLOCK_MASK;
- control |= PANEL_UNLOCK_REGS;
+ if (!IS_BROXTON(dev)) {
+ control &= ~PANEL_UNLOCK_MASK;
+ control |= PANEL_UNLOCK_REGS;
+ }
return control;
}
@@ -2612,7 +2661,7 @@ static void vlv_steal_power_sequencer(struct drm_device *dev,
DRM_DEBUG_KMS("stealing pipe %c power sequencer from port %c\n",
pipe_name(pipe), port_name(port));
- WARN(encoder->connectors_active,
+ WARN(encoder->base.crtc,
"stealing pipe %c power sequencer from active eDP port %c\n",
pipe_name(pipe), port_name(port));
@@ -3414,92 +3463,6 @@ gen7_edp_signal_levels(uint8_t train_set)
}
}
-/* Gen7.5's (HSW) DP voltage swing and pre-emphasis control */
-static uint32_t
-hsw_signal_levels(uint8_t train_set)
-{
- int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
- DP_TRAIN_PRE_EMPHASIS_MASK);
- switch (signal_levels) {
- case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
- return DDI_BUF_TRANS_SELECT(0);
- case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
- return DDI_BUF_TRANS_SELECT(1);
- case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
- return DDI_BUF_TRANS_SELECT(2);
- case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_3:
- return DDI_BUF_TRANS_SELECT(3);
-
- case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
- return DDI_BUF_TRANS_SELECT(4);
- case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
- return DDI_BUF_TRANS_SELECT(5);
- case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_2:
- return DDI_BUF_TRANS_SELECT(6);
-
- case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
- return DDI_BUF_TRANS_SELECT(7);
- case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
- return DDI_BUF_TRANS_SELECT(8);
-
- case DP_TRAIN_VOLTAGE_SWING_LEVEL_3 | DP_TRAIN_PRE_EMPH_LEVEL_0:
- return DDI_BUF_TRANS_SELECT(9);
- default:
- DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
- "0x%x\n", signal_levels);
- return DDI_BUF_TRANS_SELECT(0);
- }
-}
-
-static void bxt_signal_levels(struct intel_dp *intel_dp)
-{
- struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
- enum port port = dport->port;
- struct drm_device *dev = dport->base.base.dev;
- struct intel_encoder *encoder = &dport->base;
- uint8_t train_set = intel_dp->train_set[0];
- uint32_t level = 0;
-
- int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
- DP_TRAIN_PRE_EMPHASIS_MASK);
- switch (signal_levels) {
- default:
- DRM_DEBUG_KMS("Unsupported voltage swing/pre-emph level\n");
- case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
- level = 0;
- break;
- case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
- level = 1;
- break;
- case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
- level = 2;
- break;
- case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_3:
- level = 3;
- break;
- case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
- level = 4;
- break;
- case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
- level = 5;
- break;
- case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_2:
- level = 6;
- break;
- case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
- level = 7;
- break;
- case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
- level = 8;
- break;
- case DP_TRAIN_VOLTAGE_SWING_LEVEL_3 | DP_TRAIN_PRE_EMPH_LEVEL_0:
- level = 9;
- break;
- }
-
- bxt_ddi_vswing_sequence(dev, level, port, encoder->type);
-}
-
/* Properly updates "DP" with the correct signal levels. */
static void
intel_dp_set_signal_levels(struct intel_dp *intel_dp, uint32_t *DP)
@@ -3507,22 +3470,20 @@ intel_dp_set_signal_levels(struct intel_dp *intel_dp, uint32_t *DP)
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
enum port port = intel_dig_port->port;
struct drm_device *dev = intel_dig_port->base.base.dev;
- uint32_t signal_levels, mask;
+ uint32_t signal_levels, mask = 0;
uint8_t train_set = intel_dp->train_set[0];
- if (IS_BROXTON(dev)) {
- signal_levels = 0;
- bxt_signal_levels(intel_dp);
- mask = 0;
- } else if (HAS_DDI(dev)) {
- signal_levels = hsw_signal_levels(train_set);
- mask = DDI_BUF_EMP_MASK;
+ if (HAS_DDI(dev)) {
+ signal_levels = ddi_signal_levels(intel_dp);
+
+ if (IS_BROXTON(dev))
+ signal_levels = 0;
+ else
+ mask = DDI_BUF_EMP_MASK;
} else if (IS_CHERRYVIEW(dev)) {
signal_levels = chv_signal_levels(intel_dp);
- mask = 0;
} else if (IS_VALLEYVIEW(dev)) {
signal_levels = vlv_signal_levels(intel_dp);
- mask = 0;
} else if (IS_GEN7(dev) && port == PORT_A) {
signal_levels = gen7_edp_signal_levels(train_set);
mask = EDP_LINK_TRAIN_VOL_EMP_MASK_IVB;
@@ -4034,43 +3995,67 @@ intel_dp_probe_mst(struct intel_dp *intel_dp)
return intel_dp->is_mst;
}
-int intel_dp_sink_crc(struct intel_dp *intel_dp, u8 *crc)
+static void intel_dp_sink_crc_stop(struct intel_dp *intel_dp)
{
- struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
- struct drm_device *dev = intel_dig_port->base.base.dev;
- struct intel_crtc *intel_crtc =
- to_intel_crtc(intel_dig_port->base.base.crtc);
+ struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+ struct intel_crtc *intel_crtc = to_intel_crtc(dig_port->base.base.crtc);
u8 buf;
- int test_crc_count;
- int attempts = 6;
- int ret = 0;
- hsw_disable_ips(intel_crtc);
-
- if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK_MISC, &buf) < 0) {
- ret = -EIO;
- goto out;
+ if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK, &buf) < 0) {
+ DRM_DEBUG_KMS("Sink CRC couldn't be stopped properly\n");
+ return;
}
- if (!(buf & DP_TEST_CRC_SUPPORTED)) {
- ret = -ENOTTY;
- goto out;
- }
+ if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK,
+ buf & ~DP_TEST_SINK_START) < 0)
+ DRM_DEBUG_KMS("Sink CRC couldn't be stopped properly\n");
- if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK, &buf) < 0) {
- ret = -EIO;
- goto out;
- }
+ hsw_enable_ips(intel_crtc);
+}
+
+static int intel_dp_sink_crc_start(struct intel_dp *intel_dp)
+{
+ struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+ struct intel_crtc *intel_crtc = to_intel_crtc(dig_port->base.base.crtc);
+ u8 buf;
+
+ if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK_MISC, &buf) < 0)
+ return -EIO;
+
+ if (!(buf & DP_TEST_CRC_SUPPORTED))
+ return -ENOTTY;
+
+ if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK, &buf) < 0)
+ return -EIO;
+
+ hsw_disable_ips(intel_crtc);
if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK,
- buf | DP_TEST_SINK_START) < 0) {
- ret = -EIO;
- goto out;
+ buf | DP_TEST_SINK_START) < 0) {
+ hsw_enable_ips(intel_crtc);
+ return -EIO;
}
+ return 0;
+}
+
+int intel_dp_sink_crc(struct intel_dp *intel_dp, u8 *crc)
+{
+ struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+ struct drm_device *dev = dig_port->base.base.dev;
+ struct intel_crtc *intel_crtc = to_intel_crtc(dig_port->base.base.crtc);
+ u8 buf;
+ int test_crc_count;
+ int attempts = 6;
+ int ret;
+
+ ret = intel_dp_sink_crc_start(intel_dp);
+ if (ret)
+ return ret;
+
if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK_MISC, &buf) < 0) {
ret = -EIO;
- goto out;
+ goto stop;
}
test_crc_count = buf & DP_TEST_COUNT_MASK;
@@ -4079,7 +4064,7 @@ int intel_dp_sink_crc(struct intel_dp *intel_dp, u8 *crc)
if (drm_dp_dpcd_readb(&intel_dp->aux,
DP_TEST_SINK_MISC, &buf) < 0) {
ret = -EIO;
- goto out;
+ goto stop;
}
intel_wait_for_vblank(dev, intel_crtc->pipe);
} while (--attempts && (buf & DP_TEST_COUNT_MASK) == test_crc_count);
@@ -4087,25 +4072,13 @@ int intel_dp_sink_crc(struct intel_dp *intel_dp, u8 *crc)
if (attempts == 0) {
DRM_DEBUG_KMS("Panel is unable to calculate CRC after 6 vblanks\n");
ret = -ETIMEDOUT;
- goto out;
+ goto stop;
}
- if (drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_CRC_R_CR, crc, 6) < 0) {
+ if (drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_CRC_R_CR, crc, 6) < 0)
ret = -EIO;
- goto out;
- }
-
- if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK, &buf) < 0) {
- ret = -EIO;
- goto out;
- }
- if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK,
- buf & ~DP_TEST_SINK_START) < 0) {
- ret = -EIO;
- goto out;
- }
-out:
- hsw_enable_ips(intel_crtc);
+stop:
+ intel_dp_sink_crc_stop(intel_dp);
return ret;
}
@@ -4166,9 +4139,16 @@ static uint8_t intel_dp_autotest_edid(struct intel_dp *intel_dp)
intel_dp->aux.i2c_defer_count);
intel_dp->compliance_test_data = INTEL_DP_RESOLUTION_FAILSAFE;
} else {
+ struct edid *block = intel_connector->detect_edid;
+
+ /* We have to write the checksum
+ * of the last block read
+ */
+ block += intel_connector->detect_edid->extensions;
+
if (!drm_dp_dpcd_write(&intel_dp->aux,
DP_TEST_EDID_CHECKSUM,
- &intel_connector->detect_edid->checksum,
+ &block->checksum,
1))
DRM_DEBUG_KMS("Failed to write EDID checksum\n");
@@ -4316,10 +4296,7 @@ intel_dp_check_link_status(struct intel_dp *intel_dp)
WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
- if (!intel_encoder->connectors_active)
- return;
-
- if (WARN_ON(!intel_encoder->base.crtc))
+ if (!intel_encoder->base.crtc)
return;
if (!to_intel_crtc(intel_encoder->base.crtc)->active)
@@ -4900,7 +4877,7 @@ static void intel_dp_encoder_reset(struct drm_encoder *encoder)
}
static const struct drm_connector_funcs intel_dp_connector_funcs = {
- .dpms = intel_connector_dpms,
+ .dpms = drm_atomic_helper_connector_dpms,
.detect = intel_dp_detect,
.force = intel_dp_force,
.fill_modes = drm_helper_probe_single_connector_modes,
@@ -4922,12 +4899,6 @@ static const struct drm_encoder_funcs intel_dp_enc_funcs = {
.destroy = intel_dp_encoder_destroy,
};
-void
-intel_dp_hot_plug(struct intel_encoder *intel_encoder)
-{
- return;
-}
-
enum irqreturn
intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd)
{
@@ -5095,8 +5066,8 @@ intel_dp_init_panel_power_sequencer(struct drm_device *dev,
struct drm_i915_private *dev_priv = dev->dev_private;
struct edp_power_seq cur, vbt, spec,
*final = &intel_dp->pps_delays;
- u32 pp_on, pp_off, pp_div, pp;
- int pp_ctrl_reg, pp_on_reg, pp_off_reg, pp_div_reg;
+ u32 pp_on, pp_off, pp_div = 0, pp_ctl = 0;
+ int pp_ctrl_reg, pp_on_reg, pp_off_reg, pp_div_reg = 0;
lockdep_assert_held(&dev_priv->pps_mutex);
@@ -5104,7 +5075,16 @@ intel_dp_init_panel_power_sequencer(struct drm_device *dev,
if (final->t11_t12 != 0)
return;
- if (HAS_PCH_SPLIT(dev)) {
+ if (IS_BROXTON(dev)) {
+ /*
+ * TODO: BXT has 2 sets of PPS registers.
+ * Correct Register for Broxton need to be identified
+ * using VBT. hardcoding for now
+ */
+ pp_ctrl_reg = BXT_PP_CONTROL(0);
+ pp_on_reg = BXT_PP_ON_DELAYS(0);
+ pp_off_reg = BXT_PP_OFF_DELAYS(0);
+ } else if (HAS_PCH_SPLIT(dev)) {
pp_ctrl_reg = PCH_PP_CONTROL;
pp_on_reg = PCH_PP_ON_DELAYS;
pp_off_reg = PCH_PP_OFF_DELAYS;
@@ -5120,12 +5100,14 @@ intel_dp_init_panel_power_sequencer(struct drm_device *dev,
/* Workaround: Need to write PP_CONTROL with the unlock key as
* the very first thing. */
- pp = ironlake_get_pp_control(intel_dp);
- I915_WRITE(pp_ctrl_reg, pp);
+ pp_ctl = ironlake_get_pp_control(intel_dp);
pp_on = I915_READ(pp_on_reg);
pp_off = I915_READ(pp_off_reg);
- pp_div = I915_READ(pp_div_reg);
+ if (!IS_BROXTON(dev)) {
+ I915_WRITE(pp_ctrl_reg, pp_ctl);
+ pp_div = I915_READ(pp_div_reg);
+ }
/* Pull timing values out of registers */
cur.t1_t3 = (pp_on & PANEL_POWER_UP_DELAY_MASK) >>
@@ -5140,8 +5122,17 @@ intel_dp_init_panel_power_sequencer(struct drm_device *dev,
cur.t10 = (pp_off & PANEL_POWER_DOWN_DELAY_MASK) >>
PANEL_POWER_DOWN_DELAY_SHIFT;
- cur.t11_t12 = ((pp_div & PANEL_POWER_CYCLE_DELAY_MASK) >>
+ if (IS_BROXTON(dev)) {
+ u16 tmp = (pp_ctl & BXT_POWER_CYCLE_DELAY_MASK) >>
+ BXT_POWER_CYCLE_DELAY_SHIFT;
+ if (tmp > 0)
+ cur.t11_t12 = (tmp - 1) * 1000;
+ else
+ cur.t11_t12 = 0;
+ } else {
+ cur.t11_t12 = ((pp_div & PANEL_POWER_CYCLE_DELAY_MASK) >>
PANEL_POWER_CYCLE_DELAY_SHIFT) * 1000;
+ }
DRM_DEBUG_KMS("cur t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
cur.t1_t3, cur.t8, cur.t9, cur.t10, cur.t11_t12);
@@ -5198,13 +5189,23 @@ intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
struct drm_i915_private *dev_priv = dev->dev_private;
u32 pp_on, pp_off, pp_div, port_sel = 0;
int div = HAS_PCH_SPLIT(dev) ? intel_pch_rawclk(dev) : intel_hrawclk(dev);
- int pp_on_reg, pp_off_reg, pp_div_reg;
+ int pp_on_reg, pp_off_reg, pp_div_reg = 0, pp_ctrl_reg;
enum port port = dp_to_dig_port(intel_dp)->port;
const struct edp_power_seq *seq = &intel_dp->pps_delays;
lockdep_assert_held(&dev_priv->pps_mutex);
- if (HAS_PCH_SPLIT(dev)) {
+ if (IS_BROXTON(dev)) {
+ /*
+ * TODO: BXT has 2 sets of PPS registers.
+ * Correct Register for Broxton need to be identified
+ * using VBT. hardcoding for now
+ */
+ pp_ctrl_reg = BXT_PP_CONTROL(0);
+ pp_on_reg = BXT_PP_ON_DELAYS(0);
+ pp_off_reg = BXT_PP_OFF_DELAYS(0);
+
+ } else if (HAS_PCH_SPLIT(dev)) {
pp_on_reg = PCH_PP_ON_DELAYS;
pp_off_reg = PCH_PP_OFF_DELAYS;
pp_div_reg = PCH_PP_DIVISOR;
@@ -5230,9 +5231,16 @@ intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
(seq->t10 << PANEL_POWER_DOWN_DELAY_SHIFT);
/* Compute the divisor for the pp clock, simply match the Bspec
* formula. */
- pp_div = ((100 * div)/2 - 1) << PP_REFERENCE_DIVIDER_SHIFT;
- pp_div |= (DIV_ROUND_UP(seq->t11_t12, 1000)
- << PANEL_POWER_CYCLE_DELAY_SHIFT);
+ if (IS_BROXTON(dev)) {
+ pp_div = I915_READ(pp_ctrl_reg);
+ pp_div &= ~BXT_POWER_CYCLE_DELAY_MASK;
+ pp_div |= (DIV_ROUND_UP((seq->t11_t12 + 1), 1000)
+ << BXT_POWER_CYCLE_DELAY_SHIFT);
+ } else {
+ pp_div = ((100 * div)/2 - 1) << PP_REFERENCE_DIVIDER_SHIFT;
+ pp_div |= (DIV_ROUND_UP(seq->t11_t12, 1000)
+ << PANEL_POWER_CYCLE_DELAY_SHIFT);
+ }
/* Haswell doesn't have any port selection bits for the panel
* power sequencer any more. */
@@ -5249,11 +5257,16 @@ intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
I915_WRITE(pp_on_reg, pp_on);
I915_WRITE(pp_off_reg, pp_off);
- I915_WRITE(pp_div_reg, pp_div);
+ if (IS_BROXTON(dev))
+ I915_WRITE(pp_ctrl_reg, pp_div);
+ else
+ I915_WRITE(pp_div_reg, pp_div);
DRM_DEBUG_KMS("panel power sequencer register settings: PP_ON %#x, PP_OFF %#x, PP_DIV %#x\n",
I915_READ(pp_on_reg),
I915_READ(pp_off_reg),
+ IS_BROXTON(dev) ?
+ (I915_READ(pp_ctrl_reg) & BXT_POWER_CYCLE_DELAY_MASK) :
I915_READ(pp_div_reg));
}
@@ -5458,13 +5471,12 @@ unlock:
}
/**
- * intel_edp_drrs_invalidate - Invalidate DRRS
+ * intel_edp_drrs_invalidate - Disable Idleness DRRS
* @dev: DRM device
* @frontbuffer_bits: frontbuffer plane tracking bits
*
- * When there is a disturbance on screen (due to cursor movement/time
- * update etc), DRRS needs to be invalidated, i.e. need to switch to
- * high RR.
+ * This function gets called everytime rendering on the given planes start.
+ * Hence DRRS needs to be Upclocked, i.e. (LOW_RR -> HIGH_RR).
*
* Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits.
*/
@@ -5489,26 +5501,27 @@ void intel_edp_drrs_invalidate(struct drm_device *dev,
crtc = dp_to_dig_port(dev_priv->drrs.dp)->base.base.crtc;
pipe = to_intel_crtc(crtc)->pipe;
- if (dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR) {
+ frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
+ dev_priv->drrs.busy_frontbuffer_bits |= frontbuffer_bits;
+
+ /* invalidate means busy screen hence upclock */
+ if (frontbuffer_bits && dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
intel_dp_set_drrs_state(dev_priv->dev,
dev_priv->drrs.dp->attached_connector->panel.
fixed_mode->vrefresh);
- }
- frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
-
- dev_priv->drrs.busy_frontbuffer_bits |= frontbuffer_bits;
mutex_unlock(&dev_priv->drrs.mutex);
}
/**
- * intel_edp_drrs_flush - Flush DRRS
+ * intel_edp_drrs_flush - Restart Idleness DRRS
* @dev: DRM device
* @frontbuffer_bits: frontbuffer plane tracking bits
*
- * When there is no movement on screen, DRRS work can be scheduled.
- * This DRRS work is responsible for setting relevant registers after a
- * timeout of 1 second.
+ * This function gets called every time rendering on the given planes has
+ * completed or flip on a crtc is completed. So DRRS should be upclocked
+ * (LOW_RR -> HIGH_RR). And also Idleness detection should be started again,
+ * if no other planes are dirty.
*
* Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits.
*/
@@ -5532,10 +5545,21 @@ void intel_edp_drrs_flush(struct drm_device *dev,
crtc = dp_to_dig_port(dev_priv->drrs.dp)->base.base.crtc;
pipe = to_intel_crtc(crtc)->pipe;
+
+ frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
dev_priv->drrs.busy_frontbuffer_bits &= ~frontbuffer_bits;
- if (dev_priv->drrs.refresh_rate_type != DRRS_LOW_RR &&
- !dev_priv->drrs.busy_frontbuffer_bits)
+ /* flush means busy screen hence upclock */
+ if (frontbuffer_bits && dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
+ intel_dp_set_drrs_state(dev_priv->dev,
+ dev_priv->drrs.dp->attached_connector->panel.
+ fixed_mode->vrefresh);
+
+ /*
+ * flush also means no more activity hence schedule downclock, if all
+ * other fbs are quiescent too
+ */
+ if (!dev_priv->drrs.busy_frontbuffer_bits)
schedule_delayed_work(&dev_priv->drrs.work,
msecs_to_jiffies(1000));
mutex_unlock(&dev_priv->drrs.mutex);
@@ -5939,10 +5963,9 @@ intel_dp_init(struct drm_device *dev, int output_reg, enum port port)
intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
}
intel_encoder->cloneable = 0;
- intel_encoder->hot_plug = intel_dp_hot_plug;
intel_dig_port->hpd_pulse = intel_dp_hpd_pulse;
- dev_priv->hpd_irq_port[port] = intel_dig_port;
+ dev_priv->hotplug.irq_port[port] = intel_dig_port;
if (!intel_dp_init_connector(intel_dig_port, intel_connector)) {
drm_encoder_cleanup(encoder);
@@ -5958,7 +5981,7 @@ void intel_dp_mst_suspend(struct drm_device *dev)
/* disable MST */
for (i = 0; i < I915_MAX_PORTS; i++) {
- struct intel_digital_port *intel_dig_port = dev_priv->hpd_irq_port[i];
+ struct intel_digital_port *intel_dig_port = dev_priv->hotplug.irq_port[i];
if (!intel_dig_port)
continue;
@@ -5977,7 +6000,7 @@ void intel_dp_mst_resume(struct drm_device *dev)
int i;
for (i = 0; i < I915_MAX_PORTS; i++) {
- struct intel_digital_port *intel_dig_port = dev_priv->hpd_irq_port[i];
+ struct intel_digital_port *intel_dig_port = dev_priv->hotplug.irq_port[i];
if (!intel_dig_port)
continue;
if (intel_dig_port->base.type == INTEL_OUTPUT_DISPLAYPORT) {
diff --git a/drivers/gpu/drm/i915/intel_dp_mst.c b/drivers/gpu/drm/i915/intel_dp_mst.c
index 6e4cc5334f47..369f8b6b804f 100644
--- a/drivers/gpu/drm/i915/intel_dp_mst.c
+++ b/drivers/gpu/drm/i915/intel_dp_mst.c
@@ -328,7 +328,7 @@ intel_dp_mst_connector_destroy(struct drm_connector *connector)
}
static const struct drm_connector_funcs intel_dp_mst_connector_funcs = {
- .dpms = intel_connector_dpms,
+ .dpms = drm_atomic_helper_connector_dpms,
.detect = intel_dp_mst_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
.set_property = intel_dp_mst_set_property,
@@ -357,6 +357,16 @@ intel_dp_mst_mode_valid(struct drm_connector *connector,
return MODE_OK;
}
+static struct drm_encoder *intel_mst_atomic_best_encoder(struct drm_connector *connector,
+ struct drm_connector_state *state)
+{
+ struct intel_connector *intel_connector = to_intel_connector(connector);
+ struct intel_dp *intel_dp = intel_connector->mst_port;
+ struct intel_crtc *crtc = to_intel_crtc(state->crtc);
+
+ return &intel_dp->mst_encoders[crtc->pipe]->base.base;
+}
+
static struct drm_encoder *intel_mst_best_encoder(struct drm_connector *connector)
{
struct intel_connector *intel_connector = to_intel_connector(connector);
@@ -367,6 +377,7 @@ static struct drm_encoder *intel_mst_best_encoder(struct drm_connector *connecto
static const struct drm_connector_helper_funcs intel_dp_mst_connector_helper_funcs = {
.get_modes = intel_dp_mst_get_modes,
.mode_valid = intel_dp_mst_mode_valid,
+ .atomic_best_encoder = intel_mst_atomic_best_encoder,
.best_encoder = intel_mst_best_encoder,
};
@@ -395,7 +406,7 @@ static bool intel_dp_mst_get_hw_state(struct intel_connector *connector)
static void intel_connector_add_to_fbdev(struct intel_connector *connector)
{
-#ifdef CONFIG_DRM_I915_FBDEV
+#ifdef CONFIG_DRM_FBDEV_EMULATION
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
drm_fb_helper_add_one_connector(&dev_priv->fbdev->helper, &connector->base);
#endif
@@ -403,7 +414,7 @@ static void intel_connector_add_to_fbdev(struct intel_connector *connector)
static void intel_connector_remove_from_fbdev(struct intel_connector *connector)
{
-#ifdef CONFIG_DRM_I915_FBDEV
+#ifdef CONFIG_DRM_FBDEV_EMULATION
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
drm_fb_helper_remove_one_connector(&dev_priv->fbdev->helper, &connector->base);
#endif
@@ -441,10 +452,9 @@ static struct drm_connector *intel_dp_add_mst_connector(struct drm_dp_mst_topolo
drm_object_attach_property(&connector->base, dev->mode_config.tile_property, 0);
drm_mode_connector_set_path_property(connector, pathprop);
- drm_reinit_primary_mode_group(dev);
- mutex_lock(&dev->mode_config.mutex);
+ drm_modeset_lock_all(dev);
intel_connector_add_to_fbdev(intel_connector);
- mutex_unlock(&dev->mode_config.mutex);
+ drm_modeset_unlock_all(dev);
drm_connector_register(&intel_connector->base);
return connector;
}
@@ -454,19 +464,28 @@ static void intel_dp_destroy_mst_connector(struct drm_dp_mst_topology_mgr *mgr,
{
struct intel_connector *intel_connector = to_intel_connector(connector);
struct drm_device *dev = connector->dev;
+
/* need to nuke the connector */
- mutex_lock(&dev->mode_config.mutex);
- intel_connector_dpms(connector, DRM_MODE_DPMS_OFF);
- mutex_unlock(&dev->mode_config.mutex);
+ drm_modeset_lock_all(dev);
+ if (connector->state->crtc) {
+ struct drm_mode_set set;
+ int ret;
+
+ memset(&set, 0, sizeof(set));
+ set.crtc = connector->state->crtc,
+
+ ret = drm_atomic_helper_set_config(&set);
+
+ WARN(ret, "Disabling mst crtc failed with %i\n", ret);
+ }
+ drm_modeset_unlock_all(dev);
intel_connector->unregister(intel_connector);
- mutex_lock(&dev->mode_config.mutex);
+ drm_modeset_lock_all(dev);
intel_connector_remove_from_fbdev(intel_connector);
drm_connector_cleanup(connector);
- mutex_unlock(&dev->mode_config.mutex);
-
- drm_reinit_primary_mode_group(dev);
+ drm_modeset_unlock_all(dev);
kfree(intel_connector);
DRM_DEBUG_KMS("\n");
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index 105928382e21..93008fbb815d 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -130,15 +130,9 @@ struct intel_fbdev {
struct intel_encoder {
struct drm_encoder base;
- /*
- * The new crtc this encoder will be driven from. Only differs from
- * base->crtc while a modeset is in progress.
- */
- struct intel_crtc *new_crtc;
enum intel_output_type type;
unsigned int cloneable;
- bool connectors_active;
void (*hot_plug)(struct intel_encoder *);
bool (*compute_config)(struct intel_encoder *,
struct intel_crtc_state *);
@@ -182,6 +176,10 @@ struct intel_panel {
bool enabled;
bool combination_mode; /* gen 2/4 only */
bool active_low_pwm;
+
+ /* PWM chip */
+ struct pwm_device *pwm;
+
struct backlight_device *device;
} backlight;
@@ -195,12 +193,6 @@ struct intel_connector {
*/
struct intel_encoder *encoder;
- /*
- * The new encoder this connector will be driven. Only differs from
- * encoder while a modeset is in progress.
- */
- struct intel_encoder *new_encoder;
-
/* Reads out the current hw, returning true if the connector is enabled
* and active (i.e. dpms ON state). */
bool (*get_hw_state)(struct intel_connector *);
@@ -241,6 +233,14 @@ typedef struct dpll {
int p;
} intel_clock_t;
+struct intel_atomic_state {
+ struct drm_atomic_state base;
+
+ unsigned int cdclk;
+ bool dpll_set;
+ struct intel_shared_dpll_config shared_dpll[I915_NUM_PLLS];
+};
+
struct intel_plane_state {
struct drm_plane_state base;
struct drm_rect src;
@@ -256,7 +256,7 @@ struct intel_plane_state {
* plane requiring a scaler:
* - During check_plane, its bit is set in
* crtc_state->scaler_state.scaler_users by calling helper function
- * update_scaler_users.
+ * update_scaler_plane.
* - scaler_id indicates the scaler it got assigned.
*
* plane doesn't require a scaler:
@@ -264,9 +264,11 @@ struct intel_plane_state {
* got disabled.
* - During check_plane, corresponding bit is reset in
* crtc_state->scaler_state.scaler_users by calling helper function
- * update_scaler_users.
+ * update_scaler_plane.
*/
int scaler_id;
+
+ struct drm_intel_sprite_colorkey ckey;
};
struct intel_initial_plane_config {
@@ -286,7 +288,6 @@ struct intel_initial_plane_config {
#define SKL_MAX_DST_H 4096
struct intel_scaler {
- int id;
int in_use;
uint32_t mode;
};
@@ -319,6 +320,9 @@ struct intel_crtc_scaler_state {
int scaler_id;
};
+/* drm_mode->private_flags */
+#define I915_MODE_FLAG_INHERITED 1
+
struct intel_crtc_state {
struct drm_crtc_state base;
@@ -331,7 +335,6 @@ struct intel_crtc_state {
* accordingly.
*/
#define PIPE_CONFIG_QUIRK_MODE_SYNC_FLAGS (1<<0) /* unreliable sync mode.flags */
-#define PIPE_CONFIG_QUIRK_INHERITED_MODE (1<<1) /* mode inherited from firmware */
unsigned long quirks;
/* Pipe source size (ie. panel fitter input size)
@@ -447,6 +450,18 @@ struct intel_crtc_state {
int pbn;
struct intel_crtc_scaler_state scaler_state;
+
+ /* w/a for waiting 2 vblanks during crtc enable */
+ enum pipe hsw_workaround_pipe;
+};
+
+struct vlv_wm_state {
+ struct vlv_pipe_wm wm[3];
+ struct vlv_sr_wm sr[3];
+ uint8_t num_active_planes;
+ uint8_t num_levels;
+ uint8_t level;
+ bool cxsr;
};
struct intel_pipe_wm {
@@ -478,16 +493,13 @@ struct skl_pipe_wm {
* and thus can't be run with interrupts disabled.
*/
struct intel_crtc_atomic_commit {
- /* vblank evasion */
- bool evade;
- unsigned start_vbl_count;
-
/* Sleepable operations to perform before commit */
bool wait_for_flips;
bool disable_fbc;
bool disable_ips;
+ bool disable_cxsr;
bool pre_disable_primary;
- bool update_wm;
+ bool update_wm_pre, update_wm_post;
unsigned disabled_planes;
/* Sleepable operations to perform after commit */
@@ -527,9 +539,7 @@ struct intel_crtc {
uint32_t cursor_size;
uint32_t cursor_base;
- struct intel_initial_plane_config plane_config;
struct intel_crtc_state *config;
- bool new_enabled;
/* reset counter value when the last flip was submitted */
unsigned int reset_counter;
@@ -544,14 +554,19 @@ struct intel_crtc {
struct intel_pipe_wm active;
/* SKL wm values currently in use */
struct skl_pipe_wm skl_active;
+ /* allow CxSR on this pipe */
+ bool cxsr_allowed;
} wm;
int scanline_offset;
+ unsigned start_vbl_count;
struct intel_crtc_atomic_commit atomic;
/* scalers available on this crtc */
int num_scalers;
+
+ struct vlv_wm_state wm_state;
};
struct intel_plane_wm_parameters {
@@ -570,6 +585,7 @@ struct intel_plane_wm_parameters {
bool scaled;
u64 tiling;
unsigned int rotation;
+ uint16_t fifo_size;
};
struct intel_plane {
@@ -578,9 +594,7 @@ struct intel_plane {
enum pipe pipe;
bool can_scale;
int max_downscale;
-
- /* FIXME convert to properties */
- struct drm_intel_sprite_colorkey ckey;
+ uint32_t frontbuffer_bit;
/* Since we need to change the watermarks before/after
* enabling/disabling the planes, we need to store the parameters here
@@ -603,8 +617,9 @@ struct intel_plane {
uint32_t x, uint32_t y,
uint32_t src_w, uint32_t src_h);
void (*disable_plane)(struct drm_plane *plane,
- struct drm_crtc *crtc, bool force);
+ struct drm_crtc *crtc);
int (*check_plane)(struct drm_plane *plane,
+ struct intel_crtc_state *crtc_state,
struct intel_plane_state *state);
void (*commit_plane)(struct drm_plane *plane,
struct intel_plane_state *state);
@@ -629,6 +644,7 @@ struct cxsr_latency {
unsigned long cursor_hpll_disable;
};
+#define to_intel_atomic_state(x) container_of(x, struct intel_atomic_state, base)
#define to_intel_crtc(x) container_of(x, struct intel_crtc, base)
#define to_intel_crtc_state(x) container_of(x, struct intel_crtc_state, base)
#define to_intel_connector(x) container_of(x, struct intel_connector, base)
@@ -940,43 +956,23 @@ void intel_ddi_init_dp_buf_reg(struct intel_encoder *encoder);
void intel_ddi_clock_get(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config);
void intel_ddi_set_vc_payload_alloc(struct drm_crtc *crtc, bool state);
-void bxt_ddi_vswing_sequence(struct drm_device *dev, u32 level,
- enum port port, int type);
+uint32_t ddi_signal_levels(struct intel_dp *intel_dp);
/* intel_frontbuffer.c */
void intel_fb_obj_invalidate(struct drm_i915_gem_object *obj,
- struct intel_engine_cs *ring,
enum fb_op_origin origin);
void intel_frontbuffer_flip_prepare(struct drm_device *dev,
unsigned frontbuffer_bits);
void intel_frontbuffer_flip_complete(struct drm_device *dev,
unsigned frontbuffer_bits);
-void intel_frontbuffer_flush(struct drm_device *dev,
- unsigned frontbuffer_bits);
-/**
- * intel_frontbuffer_flip - synchronous frontbuffer flip
- * @dev: DRM device
- * @frontbuffer_bits: frontbuffer plane tracking bits
- *
- * This function gets called after scheduling a flip on @obj. This is for
- * synchronous plane updates which will happen on the next vblank and which will
- * not get delayed by pending gpu rendering.
- *
- * Can be called without any locks held.
- */
-static inline
void intel_frontbuffer_flip(struct drm_device *dev,
- unsigned frontbuffer_bits)
-{
- intel_frontbuffer_flush(dev, frontbuffer_bits);
-}
-
+ unsigned frontbuffer_bits);
unsigned int intel_fb_align_height(struct drm_device *dev,
unsigned int height,
uint32_t pixel_format,
uint64_t fb_format_modifier);
-void intel_fb_obj_flush(struct drm_i915_gem_object *obj, bool retire);
-
+void intel_fb_obj_flush(struct drm_i915_gem_object *obj, bool retire,
+ enum fb_op_origin origin);
u32 intel_fb_stride_alignment(struct drm_device *dev, uint64_t fb_modifier,
uint32_t pixel_format);
@@ -994,15 +990,11 @@ int intel_pch_rawclk(struct drm_device *dev);
void intel_mark_busy(struct drm_device *dev);
void intel_mark_idle(struct drm_device *dev);
void intel_crtc_restore_mode(struct drm_crtc *crtc);
-void intel_crtc_control(struct drm_crtc *crtc, bool enable);
-void intel_crtc_reset(struct intel_crtc *crtc);
-void intel_crtc_update_dpms(struct drm_crtc *crtc);
+int intel_display_suspend(struct drm_device *dev);
void intel_encoder_destroy(struct drm_encoder *encoder);
int intel_connector_init(struct intel_connector *);
struct intel_connector *intel_connector_alloc(void);
-void intel_connector_dpms(struct drm_connector *, int mode);
bool intel_connector_get_hw_state(struct intel_connector *connector);
-void intel_modeset_check_state(struct drm_device *dev);
bool ibx_digital_port_connected(struct drm_i915_private *dev_priv,
struct intel_digital_port *port);
void intel_connector_attach_encoder(struct intel_connector *connector,
@@ -1035,7 +1027,8 @@ void intel_release_load_detect_pipe(struct drm_connector *connector,
int intel_pin_and_fence_fb_obj(struct drm_plane *plane,
struct drm_framebuffer *fb,
const struct drm_plane_state *plane_state,
- struct intel_engine_cs *pipelined);
+ struct intel_engine_cs *pipelined,
+ struct drm_i915_gem_request **pipelined_request);
struct drm_framebuffer *
__intel_framebuffer_create(struct drm_device *dev,
struct drm_mode_fb_cmd2 *mode_cmd,
@@ -1058,6 +1051,8 @@ int intel_plane_atomic_set_property(struct drm_plane *plane,
struct drm_plane_state *state,
struct drm_property *property,
uint64_t val);
+int intel_plane_atomic_calc_changes(struct drm_crtc_state *crtc_state,
+ struct drm_plane_state *plane_state);
unsigned int
intel_tile_height(struct drm_device *dev, uint32_t pixel_format,
@@ -1072,9 +1067,6 @@ intel_rotation_90_or_270(unsigned int rotation)
void intel_create_rotation_property(struct drm_device *dev,
struct intel_plane *plane);
-bool intel_wm_need_update(struct drm_plane *plane,
- struct drm_plane_state *state);
-
/* shared dpll functions */
struct intel_shared_dpll *intel_crtc_to_shared_dpll(struct intel_crtc *crtc);
void assert_shared_dpll(struct drm_i915_private *dev_priv,
@@ -1084,7 +1076,6 @@ void assert_shared_dpll(struct drm_i915_private *dev_priv,
#define assert_shared_dpll_disabled(d, p) assert_shared_dpll(d, p, false)
struct intel_shared_dpll *intel_get_shared_dpll(struct intel_crtc *crtc,
struct intel_crtc_state *state);
-void intel_put_shared_dpll(struct intel_crtc *crtc);
void vlv_force_pll_on(struct drm_device *dev, enum pipe pipe,
const struct dpll *dpll);
@@ -1104,7 +1095,8 @@ void assert_fdi_rx_pll(struct drm_i915_private *dev_priv,
void assert_pipe(struct drm_i915_private *dev_priv, enum pipe pipe, bool state);
#define assert_pipe_enabled(d, p) assert_pipe(d, p, true)
#define assert_pipe_disabled(d, p) assert_pipe(d, p, false)
-unsigned long intel_gen4_compute_page_offset(int *x, int *y,
+unsigned long intel_gen4_compute_page_offset(struct drm_i915_private *dev_priv,
+ int *x, int *y,
unsigned int tiling_mode,
unsigned int bpp,
unsigned int pitch);
@@ -1114,7 +1106,6 @@ void hsw_enable_pc8(struct drm_i915_private *dev_priv);
void hsw_disable_pc8(struct drm_i915_private *dev_priv);
void broxton_init_cdclk(struct drm_device *dev);
void broxton_uninit_cdclk(struct drm_device *dev);
-void broxton_set_cdclk(struct drm_device *dev, int frequency);
void broxton_ddi_phy_init(struct drm_device *dev);
void broxton_ddi_phy_uninit(struct drm_device *dev);
void bxt_enable_dc9(struct drm_i915_private *dev_priv);
@@ -1130,6 +1121,8 @@ ironlake_check_encoder_dotclock(const struct intel_crtc_state *pipe_config,
int dotclock);
bool bxt_find_best_dpll(struct intel_crtc_state *crtc_state, int target_clock,
intel_clock_t *best_clock);
+int chv_calc_dpll_params(int refclk, intel_clock_t *pll_clock);
+
bool intel_crtc_active(struct drm_crtc *crtc);
void hsw_enable_ips(struct intel_crtc *crtc);
void hsw_disable_ips(struct intel_crtc *crtc);
@@ -1139,10 +1132,8 @@ void intel_mode_from_pipe_config(struct drm_display_mode *mode,
struct intel_crtc_state *pipe_config);
void intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc);
void intel_modeset_preclose(struct drm_device *dev, struct drm_file *file);
-void skl_detach_scalers(struct intel_crtc *intel_crtc);
-int skl_update_scaler_users(struct intel_crtc *intel_crtc,
- struct intel_crtc_state *crtc_state, struct intel_plane *intel_plane,
- struct intel_plane_state *plane_state, int force_detach);
+
+int skl_update_scaler_crtc(struct intel_crtc_state *crtc_state);
int skl_max_scale(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state);
unsigned long intel_plane_obj_offset(struct intel_plane *intel_plane,
@@ -1207,7 +1198,7 @@ void intel_dvo_init(struct drm_device *dev);
/* legacy fbdev emulation in intel_fbdev.c */
-#ifdef CONFIG_DRM_I915_FBDEV
+#ifdef CONFIG_DRM_FBDEV_EMULATION
extern int intel_fbdev_init(struct drm_device *dev);
extern void intel_fbdev_initial_config(void *data, async_cookie_t cookie);
extern void intel_fbdev_fini(struct drm_device *dev);
@@ -1238,15 +1229,18 @@ static inline void intel_fbdev_restore_mode(struct drm_device *dev)
#endif
/* intel_fbc.c */
-bool intel_fbc_enabled(struct drm_device *dev);
-void intel_fbc_update(struct drm_device *dev);
+bool intel_fbc_enabled(struct drm_i915_private *dev_priv);
+void intel_fbc_update(struct drm_i915_private *dev_priv);
void intel_fbc_init(struct drm_i915_private *dev_priv);
-void intel_fbc_disable(struct drm_device *dev);
+void intel_fbc_disable(struct drm_i915_private *dev_priv);
+void intel_fbc_disable_crtc(struct intel_crtc *crtc);
void intel_fbc_invalidate(struct drm_i915_private *dev_priv,
unsigned int frontbuffer_bits,
enum fb_op_origin origin);
void intel_fbc_flush(struct drm_i915_private *dev_priv,
- unsigned int frontbuffer_bits);
+ unsigned int frontbuffer_bits, enum fb_op_origin origin);
+const char *intel_no_fbc_reason_str(enum no_fbc_reason reason);
+void intel_fbc_cleanup_cfb(struct drm_i915_private *dev_priv);
/* intel_hdmi.c */
void intel_hdmi_init(struct drm_device *dev, int hdmi_reg, enum port port);
@@ -1314,11 +1308,13 @@ void intel_backlight_unregister(struct drm_device *dev);
void intel_psr_enable(struct intel_dp *intel_dp);
void intel_psr_disable(struct intel_dp *intel_dp);
void intel_psr_invalidate(struct drm_device *dev,
- unsigned frontbuffer_bits);
+ unsigned frontbuffer_bits);
void intel_psr_flush(struct drm_device *dev,
- unsigned frontbuffer_bits);
+ unsigned frontbuffer_bits,
+ enum fb_op_origin origin);
void intel_psr_init(struct drm_device *dev);
-void intel_psr_single_frame_update(struct drm_device *dev);
+void intel_psr_single_frame_update(struct drm_device *dev,
+ unsigned frontbuffer_bits);
/* intel_runtime_pm.c */
int intel_power_domains_init(struct drm_i915_private *);
@@ -1372,11 +1368,12 @@ void gen6_rps_boost(struct drm_i915_private *dev_priv,
unsigned long submitted);
void intel_queue_rps_boost_for_request(struct drm_device *dev,
struct drm_i915_gem_request *req);
+void vlv_wm_get_hw_state(struct drm_device *dev);
void ilk_wm_get_hw_state(struct drm_device *dev);
void skl_wm_get_hw_state(struct drm_device *dev);
void skl_ddb_get_hw_state(struct drm_i915_private *dev_priv,
struct skl_ddb_allocation *ddb /* out */);
-
+uint32_t ilk_pipe_pixel_rate(const struct intel_crtc_state *pipe_config);
/* intel_sdvo.c */
bool intel_sdvo_init(struct drm_device *dev, uint32_t sdvo_reg, bool is_sdvob);
@@ -1384,10 +1381,9 @@ bool intel_sdvo_init(struct drm_device *dev, uint32_t sdvo_reg, bool is_sdvob);
/* intel_sprite.c */
int intel_plane_init(struct drm_device *dev, enum pipe pipe, int plane);
-int intel_plane_restore(struct drm_plane *plane);
int intel_sprite_set_colorkey(struct drm_device *dev, void *data,
struct drm_file *file_priv);
-bool intel_pipe_update_start(struct intel_crtc *crtc,
+void intel_pipe_update_start(struct intel_crtc *crtc,
uint32_t *start_vbl_count);
void intel_pipe_update_end(struct intel_crtc *crtc, u32 start_vbl_count);
@@ -1395,11 +1391,6 @@ void intel_pipe_update_end(struct intel_crtc *crtc, u32 start_vbl_count);
void intel_tv_init(struct drm_device *dev);
/* intel_atomic.c */
-int intel_atomic_check(struct drm_device *dev,
- struct drm_atomic_state *state);
-int intel_atomic_commit(struct drm_device *dev,
- struct drm_atomic_state *state,
- bool async);
int intel_connector_atomic_get_property(struct drm_connector *connector,
const struct drm_connector_state *state,
struct drm_property *property,
@@ -1407,6 +1398,11 @@ int intel_connector_atomic_get_property(struct drm_connector *connector,
struct drm_crtc_state *intel_crtc_duplicate_state(struct drm_crtc *crtc);
void intel_crtc_destroy_state(struct drm_crtc *crtc,
struct drm_crtc_state *state);
+struct drm_atomic_state *intel_atomic_state_alloc(struct drm_device *dev);
+void intel_atomic_state_clear(struct drm_atomic_state *);
+struct intel_shared_dpll_config *
+intel_atomic_get_shared_dpll_state(struct drm_atomic_state *s);
+
static inline struct intel_crtc_state *
intel_atomic_get_crtc_state(struct drm_atomic_state *state,
struct intel_crtc *crtc)
diff --git a/drivers/gpu/drm/i915/intel_dsi.c b/drivers/gpu/drm/i915/intel_dsi.c
index b5a5558ecd63..4a601cf90f16 100644
--- a/drivers/gpu/drm/i915/intel_dsi.c
+++ b/drivers/gpu/drm/i915/intel_dsi.c
@@ -31,6 +31,7 @@
#include <drm/drm_panel.h>
#include <drm/drm_mipi_dsi.h>
#include <linux/slab.h>
+#include <linux/gpio/consumer.h>
#include "i915_drv.h"
#include "intel_drv.h"
#include "intel_dsi.h"
@@ -261,11 +262,6 @@ static inline bool is_cmd_mode(struct intel_dsi *intel_dsi)
return intel_dsi->operation_mode == INTEL_DSI_COMMAND_MODE;
}
-static void intel_dsi_hot_plug(struct intel_encoder *encoder)
-{
- DRM_DEBUG_KMS("\n");
-}
-
static bool intel_dsi_compute_config(struct intel_encoder *encoder,
struct intel_crtc_state *config)
{
@@ -401,6 +397,8 @@ static void intel_dsi_enable(struct intel_encoder *encoder)
intel_dsi_port_enable(encoder);
}
+
+ intel_panel_enable_backlight(intel_dsi->attached_connector);
}
static void intel_dsi_pre_enable(struct intel_encoder *encoder)
@@ -415,15 +413,21 @@ static void intel_dsi_pre_enable(struct intel_encoder *encoder)
DRM_DEBUG_KMS("\n");
+ /* Panel Enable over CRC PMIC */
+ if (intel_dsi->gpio_panel)
+ gpiod_set_value_cansleep(intel_dsi->gpio_panel, 1);
+
+ msleep(intel_dsi->panel_on_delay);
+
/* Disable DPOunit clock gating, can stall pipe
* and we need DPLL REFA always enabled */
tmp = I915_READ(DPLL(pipe));
- tmp |= DPLL_REFA_CLK_ENABLE_VLV;
+ tmp |= DPLL_REF_CLK_ENABLE_VLV;
I915_WRITE(DPLL(pipe), tmp);
/* update the hw state for DPLL */
- intel_crtc->config->dpll_hw_state.dpll = DPLL_INTEGRATED_CLOCK_VLV |
- DPLL_REFA_CLK_ENABLE_VLV;
+ intel_crtc->config->dpll_hw_state.dpll = DPLL_INTEGRATED_REF_CLK_VLV |
+ DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
tmp = I915_READ(DSPCLK_GATE_D);
tmp |= DPOUNIT_CLOCK_GATE_DISABLE;
@@ -432,8 +436,6 @@ static void intel_dsi_pre_enable(struct intel_encoder *encoder)
/* put device in ready state */
intel_dsi_device_ready(encoder);
- msleep(intel_dsi->panel_on_delay);
-
drm_panel_prepare(intel_dsi->panel);
for_each_dsi_port(port, intel_dsi->ports)
@@ -461,6 +463,8 @@ static void intel_dsi_pre_disable(struct intel_encoder *encoder)
DRM_DEBUG_KMS("\n");
+ intel_panel_disable_backlight(intel_dsi->attached_connector);
+
if (is_vid_mode(intel_dsi)) {
/* Send Shutdown command to the panel in LP mode */
for_each_dsi_port(port, intel_dsi->ports)
@@ -576,6 +580,10 @@ static void intel_dsi_post_disable(struct intel_encoder *encoder)
msleep(intel_dsi->panel_off_delay);
msleep(intel_dsi->panel_pwr_cycle_delay);
+
+ /* Panel Disable over CRC PMIC */
+ if (intel_dsi->gpio_panel)
+ gpiod_set_value_cansleep(intel_dsi->gpio_panel, 0);
}
static bool intel_dsi_get_hw_state(struct intel_encoder *encoder,
@@ -955,6 +963,11 @@ static void intel_dsi_encoder_destroy(struct drm_encoder *encoder)
/* XXX: Logically this call belongs in the panel driver. */
drm_panel_remove(intel_dsi->panel);
}
+
+ /* dispose of the gpios */
+ if (intel_dsi->gpio_panel)
+ gpiod_put(intel_dsi->gpio_panel);
+
intel_encoder_destroy(encoder);
}
@@ -969,7 +982,7 @@ static const struct drm_connector_helper_funcs intel_dsi_connector_helper_funcs
};
static const struct drm_connector_funcs intel_dsi_connector_funcs = {
- .dpms = intel_connector_dpms,
+ .dpms = drm_atomic_helper_connector_dpms,
.detect = intel_dsi_detect,
.destroy = intel_dsi_connector_destroy,
.fill_modes = drm_helper_probe_single_connector_modes,
@@ -1022,7 +1035,6 @@ void intel_dsi_init(struct drm_device *dev)
drm_encoder_init(dev, encoder, &intel_dsi_funcs, DRM_MODE_ENCODER_DSI);
/* XXX: very likely not all of these are needed */
- intel_encoder->hot_plug = intel_dsi_hot_plug;
intel_encoder->compute_config = intel_dsi_compute_config;
intel_encoder->pre_pll_enable = intel_dsi_pre_pll_enable;
intel_encoder->pre_enable = intel_dsi_pre_enable;
@@ -1071,6 +1083,20 @@ void intel_dsi_init(struct drm_device *dev)
goto err;
}
+ /*
+ * In case of BYT with CRC PMIC, we need to use GPIO for
+ * Panel control.
+ */
+ if (dev_priv->vbt.dsi.config->pwm_blc == PPS_BLC_PMIC) {
+ intel_dsi->gpio_panel =
+ gpiod_get(dev->dev, "panel", GPIOD_OUT_HIGH);
+
+ if (IS_ERR(intel_dsi->gpio_panel)) {
+ DRM_ERROR("Failed to own gpio for panel control\n");
+ intel_dsi->gpio_panel = NULL;
+ }
+ }
+
intel_encoder->type = INTEL_OUTPUT_DSI;
intel_encoder->cloneable = 0;
drm_connector_init(dev, connector, &intel_dsi_connector_funcs,
@@ -1104,6 +1130,7 @@ void intel_dsi_init(struct drm_device *dev)
}
intel_panel_init(&intel_connector->panel, fixed_mode, NULL);
+ intel_panel_setup_backlight(connector, INVALID_PIPE);
return;
diff --git a/drivers/gpu/drm/i915/intel_dsi.h b/drivers/gpu/drm/i915/intel_dsi.h
index 2784ac442368..42a68593e32a 100644
--- a/drivers/gpu/drm/i915/intel_dsi.h
+++ b/drivers/gpu/drm/i915/intel_dsi.h
@@ -42,6 +42,9 @@ struct intel_dsi {
struct drm_panel *panel;
struct intel_dsi_host *dsi_hosts[I915_MAX_PORTS];
+ /* GPIO Desc for CRC based Panel control */
+ struct gpio_desc *gpio_panel;
+
struct intel_connector *attached_connector;
/* bit mask of ports being driven */
diff --git a/drivers/gpu/drm/i915/intel_dsi_pll.c b/drivers/gpu/drm/i915/intel_dsi_pll.c
index d20cf37b6901..c6a8975b128f 100644
--- a/drivers/gpu/drm/i915/intel_dsi_pll.c
+++ b/drivers/gpu/drm/i915/intel_dsi_pll.c
@@ -38,6 +38,27 @@
#define DSI_HFP_PACKET_EXTRA_SIZE 6
#define DSI_EOTP_PACKET_SIZE 4
+static int dsi_pixel_format_bpp(int pixel_format)
+{
+ int bpp;
+
+ switch (pixel_format) {
+ default:
+ case VID_MODE_FORMAT_RGB888:
+ case VID_MODE_FORMAT_RGB666_LOOSE:
+ bpp = 24;
+ break;
+ case VID_MODE_FORMAT_RGB666:
+ bpp = 18;
+ break;
+ case VID_MODE_FORMAT_RGB565:
+ bpp = 16;
+ break;
+ }
+
+ return bpp;
+}
+
struct dsi_mnp {
u32 dsi_pll_ctrl;
u32 dsi_pll_div;
@@ -46,8 +67,8 @@ struct dsi_mnp {
static const u32 lfsr_converts[] = {
426, 469, 234, 373, 442, 221, 110, 311, 411, /* 62 - 70 */
461, 486, 243, 377, 188, 350, 175, 343, 427, 213, /* 71 - 80 */
- 106, 53, 282, 397, 354, 227, 113, 56, 284, 142, /* 81 - 90 */
- 71, 35 /* 91 - 92 */
+ 106, 53, 282, 397, 454, 227, 113, 56, 284, 142, /* 81 - 90 */
+ 71, 35, 273, 136, 324, 418, 465, 488, 500, 506 /* 91 - 100 */
};
#ifdef DSI_CLK_FROM_RR
@@ -65,19 +86,7 @@ static u32 dsi_rr_formula(const struct drm_display_mode *mode,
u32 dsi_bit_clock_hz;
u32 dsi_clk;
- switch (pixel_format) {
- default:
- case VID_MODE_FORMAT_RGB888:
- case VID_MODE_FORMAT_RGB666_LOOSE:
- bpp = 24;
- break;
- case VID_MODE_FORMAT_RGB666:
- bpp = 18;
- break;
- case VID_MODE_FORMAT_RGB565:
- bpp = 16;
- break;
- }
+ bpp = dsi_pixel_format_bpp(pixel_format);
hactive = mode->hdisplay;
vactive = mode->vdisplay;
@@ -137,21 +146,7 @@ static u32 dsi_rr_formula(const struct drm_display_mode *mode,
static u32 dsi_clk_from_pclk(u32 pclk, int pixel_format, int lane_count)
{
u32 dsi_clk_khz;
- u32 bpp;
-
- switch (pixel_format) {
- default:
- case VID_MODE_FORMAT_RGB888:
- case VID_MODE_FORMAT_RGB666_LOOSE:
- bpp = 24;
- break;
- case VID_MODE_FORMAT_RGB666:
- bpp = 18;
- break;
- case VID_MODE_FORMAT_RGB565:
- bpp = 16;
- break;
- }
+ u32 bpp = dsi_pixel_format_bpp(pixel_format);
/* DSI data rate = pixel clock * bits per pixel / lane count
pixel clock is converted from KHz to Hz */
@@ -162,11 +157,13 @@ static u32 dsi_clk_from_pclk(u32 pclk, int pixel_format, int lane_count)
#endif
-static int dsi_calc_mnp(int target_dsi_clk, struct dsi_mnp *dsi_mnp)
+static int dsi_calc_mnp(struct drm_i915_private *dev_priv,
+ struct dsi_mnp *dsi_mnp, int target_dsi_clk)
{
unsigned int calc_m = 0, calc_p = 0;
- unsigned int m, n = 1, p;
- int ref_clk = 25000;
+ unsigned int m_min, m_max, p_min = 2, p_max = 6;
+ unsigned int m, n, p;
+ int ref_clk;
int delta = target_dsi_clk;
u32 m_seed;
@@ -176,8 +173,20 @@ static int dsi_calc_mnp(int target_dsi_clk, struct dsi_mnp *dsi_mnp)
return -ECHRNG;
}
- for (m = 62; m <= 92 && delta; m++) {
- for (p = 2; p <= 6 && delta; p++) {
+ if (IS_CHERRYVIEW(dev_priv)) {
+ ref_clk = 100000;
+ n = 4;
+ m_min = 70;
+ m_max = 96;
+ } else {
+ ref_clk = 25000;
+ n = 1;
+ m_min = 62;
+ m_max = 92;
+ }
+
+ for (m = m_min; m <= m_max && delta; m++) {
+ for (p = p_min; p <= p_max && delta; p++) {
/*
* Find the optimal m and p divisors with minimal delta
* +/- the required clock
@@ -217,7 +226,7 @@ static void vlv_configure_dsi_pll(struct intel_encoder *encoder)
dsi_clk = dsi_clk_from_pclk(intel_dsi->pclk, intel_dsi->pixel_format,
intel_dsi->lane_count);
- ret = dsi_calc_mnp(dsi_clk, &dsi_mnp);
+ ret = dsi_calc_mnp(dev_priv, &dsi_mnp, dsi_clk);
if (ret) {
DRM_DEBUG_KMS("dsi_calc_mnp failed\n");
return;
@@ -286,21 +295,7 @@ void vlv_disable_dsi_pll(struct intel_encoder *encoder)
static void assert_bpp_mismatch(int pixel_format, int pipe_bpp)
{
- int bpp;
-
- switch (pixel_format) {
- default:
- case VID_MODE_FORMAT_RGB888:
- case VID_MODE_FORMAT_RGB666_LOOSE:
- bpp = 24;
- break;
- case VID_MODE_FORMAT_RGB666:
- bpp = 18;
- break;
- case VID_MODE_FORMAT_RGB565:
- bpp = 16;
- break;
- }
+ int bpp = dsi_pixel_format_bpp(pixel_format);
WARN(bpp != pipe_bpp,
"bpp match assertion failure (expected %d, current %d)\n",
diff --git a/drivers/gpu/drm/i915/intel_dvo.c b/drivers/gpu/drm/i915/intel_dvo.c
index ece5bd754f85..dc532bb61d22 100644
--- a/drivers/gpu/drm/i915/intel_dvo.c
+++ b/drivers/gpu/drm/i915/intel_dvo.c
@@ -196,50 +196,6 @@ static void intel_enable_dvo(struct intel_encoder *encoder)
intel_dvo->dev.dev_ops->dpms(&intel_dvo->dev, true);
}
-/* Special dpms function to support cloning between dvo/sdvo/crt. */
-static void intel_dvo_dpms(struct drm_connector *connector, int mode)
-{
- struct intel_dvo *intel_dvo = intel_attached_dvo(connector);
- struct drm_crtc *crtc;
- struct intel_crtc_state *config;
-
- /* dvo supports only 2 dpms states. */
- if (mode != DRM_MODE_DPMS_ON)
- mode = DRM_MODE_DPMS_OFF;
-
- if (mode == connector->dpms)
- return;
-
- connector->dpms = mode;
-
- /* Only need to change hw state when actually enabled */
- crtc = intel_dvo->base.base.crtc;
- if (!crtc) {
- intel_dvo->base.connectors_active = false;
- return;
- }
-
- /* We call connector dpms manually below in case pipe dpms doesn't
- * change due to cloning. */
- if (mode == DRM_MODE_DPMS_ON) {
- config = to_intel_crtc(crtc)->config;
-
- intel_dvo->base.connectors_active = true;
-
- intel_crtc_update_dpms(crtc);
-
- intel_dvo->dev.dev_ops->dpms(&intel_dvo->dev, true);
- } else {
- intel_dvo->dev.dev_ops->dpms(&intel_dvo->dev, false);
-
- intel_dvo->base.connectors_active = false;
-
- intel_crtc_update_dpms(crtc);
- }
-
- intel_modeset_check_state(connector->dev);
-}
-
static enum drm_mode_status
intel_dvo_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
@@ -387,7 +343,7 @@ static void intel_dvo_destroy(struct drm_connector *connector)
}
static const struct drm_connector_funcs intel_dvo_connector_funcs = {
- .dpms = intel_dvo_dpms,
+ .dpms = drm_atomic_helper_connector_dpms,
.detect = intel_dvo_detect,
.destroy = intel_dvo_destroy,
.fill_modes = drm_helper_probe_single_connector_modes,
diff --git a/drivers/gpu/drm/i915/intel_fbc.c b/drivers/gpu/drm/i915/intel_fbc.c
index 6abb83432d4d..1f97fb548c2a 100644
--- a/drivers/gpu/drm/i915/intel_fbc.c
+++ b/drivers/gpu/drm/i915/intel_fbc.c
@@ -41,9 +41,8 @@
#include "intel_drv.h"
#include "i915_drv.h"
-static void i8xx_fbc_disable(struct drm_device *dev)
+static void i8xx_fbc_disable(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
u32 fbc_ctl;
dev_priv->fbc.enabled = false;
@@ -65,13 +64,11 @@ static void i8xx_fbc_disable(struct drm_device *dev)
DRM_DEBUG_KMS("disabled FBC\n");
}
-static void i8xx_fbc_enable(struct drm_crtc *crtc)
+static void i8xx_fbc_enable(struct intel_crtc *crtc)
{
- struct drm_device *dev = crtc->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct drm_framebuffer *fb = crtc->primary->fb;
+ struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
+ struct drm_framebuffer *fb = crtc->base.primary->fb;
struct drm_i915_gem_object *obj = intel_fb_obj(fb);
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int cfb_pitch;
int i;
u32 fbc_ctl;
@@ -84,7 +81,7 @@ static void i8xx_fbc_enable(struct drm_crtc *crtc)
cfb_pitch = fb->pitches[0];
/* FBC_CTL wants 32B or 64B units */
- if (IS_GEN2(dev))
+ if (IS_GEN2(dev_priv))
cfb_pitch = (cfb_pitch / 32) - 1;
else
cfb_pitch = (cfb_pitch / 64) - 1;
@@ -93,66 +90,61 @@ static void i8xx_fbc_enable(struct drm_crtc *crtc)
for (i = 0; i < (FBC_LL_SIZE / 32) + 1; i++)
I915_WRITE(FBC_TAG + (i * 4), 0);
- if (IS_GEN4(dev)) {
+ if (IS_GEN4(dev_priv)) {
u32 fbc_ctl2;
/* Set it up... */
fbc_ctl2 = FBC_CTL_FENCE_DBL | FBC_CTL_IDLE_IMM | FBC_CTL_CPU_FENCE;
- fbc_ctl2 |= FBC_CTL_PLANE(intel_crtc->plane);
+ fbc_ctl2 |= FBC_CTL_PLANE(crtc->plane);
I915_WRITE(FBC_CONTROL2, fbc_ctl2);
- I915_WRITE(FBC_FENCE_OFF, crtc->y);
+ I915_WRITE(FBC_FENCE_OFF, crtc->base.y);
}
/* enable it... */
fbc_ctl = I915_READ(FBC_CONTROL);
fbc_ctl &= 0x3fff << FBC_CTL_INTERVAL_SHIFT;
fbc_ctl |= FBC_CTL_EN | FBC_CTL_PERIODIC;
- if (IS_I945GM(dev))
+ if (IS_I945GM(dev_priv))
fbc_ctl |= FBC_CTL_C3_IDLE; /* 945 needs special SR handling */
fbc_ctl |= (cfb_pitch & 0xff) << FBC_CTL_STRIDE_SHIFT;
fbc_ctl |= obj->fence_reg;
I915_WRITE(FBC_CONTROL, fbc_ctl);
DRM_DEBUG_KMS("enabled FBC, pitch %d, yoff %d, plane %c\n",
- cfb_pitch, crtc->y, plane_name(intel_crtc->plane));
+ cfb_pitch, crtc->base.y, plane_name(crtc->plane));
}
-static bool i8xx_fbc_enabled(struct drm_device *dev)
+static bool i8xx_fbc_enabled(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
-
return I915_READ(FBC_CONTROL) & FBC_CTL_EN;
}
-static void g4x_fbc_enable(struct drm_crtc *crtc)
+static void g4x_fbc_enable(struct intel_crtc *crtc)
{
- struct drm_device *dev = crtc->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct drm_framebuffer *fb = crtc->primary->fb;
+ struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
+ struct drm_framebuffer *fb = crtc->base.primary->fb;
struct drm_i915_gem_object *obj = intel_fb_obj(fb);
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
u32 dpfc_ctl;
dev_priv->fbc.enabled = true;
- dpfc_ctl = DPFC_CTL_PLANE(intel_crtc->plane) | DPFC_SR_EN;
+ dpfc_ctl = DPFC_CTL_PLANE(crtc->plane) | DPFC_SR_EN;
if (drm_format_plane_cpp(fb->pixel_format, 0) == 2)
dpfc_ctl |= DPFC_CTL_LIMIT_2X;
else
dpfc_ctl |= DPFC_CTL_LIMIT_1X;
dpfc_ctl |= DPFC_CTL_FENCE_EN | obj->fence_reg;
- I915_WRITE(DPFC_FENCE_YOFF, crtc->y);
+ I915_WRITE(DPFC_FENCE_YOFF, crtc->base.y);
/* enable it... */
I915_WRITE(DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
- DRM_DEBUG_KMS("enabled fbc on plane %c\n", plane_name(intel_crtc->plane));
+ DRM_DEBUG_KMS("enabled fbc on plane %c\n", plane_name(crtc->plane));
}
-static void g4x_fbc_disable(struct drm_device *dev)
+static void g4x_fbc_disable(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
u32 dpfc_ctl;
dev_priv->fbc.enabled = false;
@@ -167,10 +159,8 @@ static void g4x_fbc_disable(struct drm_device *dev)
}
}
-static bool g4x_fbc_enabled(struct drm_device *dev)
+static bool g4x_fbc_enabled(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
-
return I915_READ(DPFC_CONTROL) & DPFC_CTL_EN;
}
@@ -180,22 +170,21 @@ static void intel_fbc_nuke(struct drm_i915_private *dev_priv)
POSTING_READ(MSG_FBC_REND_STATE);
}
-static void ilk_fbc_enable(struct drm_crtc *crtc)
+static void ilk_fbc_enable(struct intel_crtc *crtc)
{
- struct drm_device *dev = crtc->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct drm_framebuffer *fb = crtc->primary->fb;
+ struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
+ struct drm_framebuffer *fb = crtc->base.primary->fb;
struct drm_i915_gem_object *obj = intel_fb_obj(fb);
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
u32 dpfc_ctl;
+ int threshold = dev_priv->fbc.threshold;
dev_priv->fbc.enabled = true;
- dpfc_ctl = DPFC_CTL_PLANE(intel_crtc->plane);
+ dpfc_ctl = DPFC_CTL_PLANE(crtc->plane);
if (drm_format_plane_cpp(fb->pixel_format, 0) == 2)
- dev_priv->fbc.threshold++;
+ threshold++;
- switch (dev_priv->fbc.threshold) {
+ switch (threshold) {
case 4:
case 3:
dpfc_ctl |= DPFC_CTL_LIMIT_4X;
@@ -208,28 +197,27 @@ static void ilk_fbc_enable(struct drm_crtc *crtc)
break;
}
dpfc_ctl |= DPFC_CTL_FENCE_EN;
- if (IS_GEN5(dev))
+ if (IS_GEN5(dev_priv))
dpfc_ctl |= obj->fence_reg;
- I915_WRITE(ILK_DPFC_FENCE_YOFF, crtc->y);
+ I915_WRITE(ILK_DPFC_FENCE_YOFF, crtc->base.y);
I915_WRITE(ILK_FBC_RT_BASE, i915_gem_obj_ggtt_offset(obj) | ILK_FBC_RT_VALID);
/* enable it... */
I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
- if (IS_GEN6(dev)) {
+ if (IS_GEN6(dev_priv)) {
I915_WRITE(SNB_DPFC_CTL_SA,
SNB_CPU_FENCE_ENABLE | obj->fence_reg);
- I915_WRITE(DPFC_CPU_FENCE_OFFSET, crtc->y);
+ I915_WRITE(DPFC_CPU_FENCE_OFFSET, crtc->base.y);
}
intel_fbc_nuke(dev_priv);
- DRM_DEBUG_KMS("enabled fbc on plane %c\n", plane_name(intel_crtc->plane));
+ DRM_DEBUG_KMS("enabled fbc on plane %c\n", plane_name(crtc->plane));
}
-static void ilk_fbc_disable(struct drm_device *dev)
+static void ilk_fbc_disable(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
u32 dpfc_ctl;
dev_priv->fbc.enabled = false;
@@ -244,29 +232,29 @@ static void ilk_fbc_disable(struct drm_device *dev)
}
}
-static bool ilk_fbc_enabled(struct drm_device *dev)
+static bool ilk_fbc_enabled(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
-
return I915_READ(ILK_DPFC_CONTROL) & DPFC_CTL_EN;
}
-static void gen7_fbc_enable(struct drm_crtc *crtc)
+static void gen7_fbc_enable(struct intel_crtc *crtc)
{
- struct drm_device *dev = crtc->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct drm_framebuffer *fb = crtc->primary->fb;
+ struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
+ struct drm_framebuffer *fb = crtc->base.primary->fb;
struct drm_i915_gem_object *obj = intel_fb_obj(fb);
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
u32 dpfc_ctl;
+ int threshold = dev_priv->fbc.threshold;
dev_priv->fbc.enabled = true;
- dpfc_ctl = IVB_DPFC_CTL_PLANE(intel_crtc->plane);
+ dpfc_ctl = 0;
+ if (IS_IVYBRIDGE(dev_priv))
+ dpfc_ctl |= IVB_DPFC_CTL_PLANE(crtc->plane);
+
if (drm_format_plane_cpp(fb->pixel_format, 0) == 2)
- dev_priv->fbc.threshold++;
+ threshold++;
- switch (dev_priv->fbc.threshold) {
+ switch (threshold) {
case 4:
case 3:
dpfc_ctl |= DPFC_CTL_LIMIT_4X;
@@ -286,39 +274,37 @@ static void gen7_fbc_enable(struct drm_crtc *crtc)
I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
- if (IS_IVYBRIDGE(dev)) {
+ if (IS_IVYBRIDGE(dev_priv)) {
/* WaFbcAsynchFlipDisableFbcQueue:ivb */
I915_WRITE(ILK_DISPLAY_CHICKEN1,
I915_READ(ILK_DISPLAY_CHICKEN1) |
ILK_FBCQ_DIS);
} else {
/* WaFbcAsynchFlipDisableFbcQueue:hsw,bdw */
- I915_WRITE(CHICKEN_PIPESL_1(intel_crtc->pipe),
- I915_READ(CHICKEN_PIPESL_1(intel_crtc->pipe)) |
+ I915_WRITE(CHICKEN_PIPESL_1(crtc->pipe),
+ I915_READ(CHICKEN_PIPESL_1(crtc->pipe)) |
HSW_FBCQ_DIS);
}
I915_WRITE(SNB_DPFC_CTL_SA,
SNB_CPU_FENCE_ENABLE | obj->fence_reg);
- I915_WRITE(DPFC_CPU_FENCE_OFFSET, crtc->y);
+ I915_WRITE(DPFC_CPU_FENCE_OFFSET, crtc->base.y);
intel_fbc_nuke(dev_priv);
- DRM_DEBUG_KMS("enabled fbc on plane %c\n", plane_name(intel_crtc->plane));
+ DRM_DEBUG_KMS("enabled fbc on plane %c\n", plane_name(crtc->plane));
}
/**
* intel_fbc_enabled - Is FBC enabled?
- * @dev: the drm_device
+ * @dev_priv: i915 device instance
*
* This function is used to verify the current state of FBC.
* FIXME: This should be tracked in the plane config eventually
* instead of queried at runtime for most callers.
*/
-bool intel_fbc_enabled(struct drm_device *dev)
+bool intel_fbc_enabled(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
-
return dev_priv->fbc.enabled;
}
@@ -327,31 +313,33 @@ static void intel_fbc_work_fn(struct work_struct *__work)
struct intel_fbc_work *work =
container_of(to_delayed_work(__work),
struct intel_fbc_work, work);
- struct drm_device *dev = work->crtc->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = work->crtc->base.dev->dev_private;
+ struct drm_framebuffer *crtc_fb = work->crtc->base.primary->fb;
- mutex_lock(&dev->struct_mutex);
+ mutex_lock(&dev_priv->fbc.lock);
if (work == dev_priv->fbc.fbc_work) {
/* Double check that we haven't switched fb without cancelling
* the prior work.
*/
- if (work->crtc->primary->fb == work->fb) {
- dev_priv->display.enable_fbc(work->crtc);
+ if (crtc_fb == work->fb) {
+ dev_priv->fbc.enable_fbc(work->crtc);
- dev_priv->fbc.crtc = to_intel_crtc(work->crtc);
- dev_priv->fbc.fb_id = work->crtc->primary->fb->base.id;
- dev_priv->fbc.y = work->crtc->y;
+ dev_priv->fbc.crtc = work->crtc;
+ dev_priv->fbc.fb_id = crtc_fb->base.id;
+ dev_priv->fbc.y = work->crtc->base.y;
}
dev_priv->fbc.fbc_work = NULL;
}
- mutex_unlock(&dev->struct_mutex);
+ mutex_unlock(&dev_priv->fbc.lock);
kfree(work);
}
static void intel_fbc_cancel_work(struct drm_i915_private *dev_priv)
{
+ WARN_ON(!mutex_is_locked(&dev_priv->fbc.lock));
+
if (dev_priv->fbc.fbc_work == NULL)
return;
@@ -373,26 +361,24 @@ static void intel_fbc_cancel_work(struct drm_i915_private *dev_priv)
dev_priv->fbc.fbc_work = NULL;
}
-static void intel_fbc_enable(struct drm_crtc *crtc)
+static void intel_fbc_enable(struct intel_crtc *crtc)
{
struct intel_fbc_work *work;
- struct drm_device *dev = crtc->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
- if (!dev_priv->display.enable_fbc)
- return;
+ WARN_ON(!mutex_is_locked(&dev_priv->fbc.lock));
intel_fbc_cancel_work(dev_priv);
work = kzalloc(sizeof(*work), GFP_KERNEL);
if (work == NULL) {
DRM_ERROR("Failed to allocate FBC work structure\n");
- dev_priv->display.enable_fbc(crtc);
+ dev_priv->fbc.enable_fbc(crtc);
return;
}
work->crtc = crtc;
- work->fb = crtc->primary->fb;
+ work->fb = crtc->base.primary->fb;
INIT_DELAYED_WORK(&work->work, intel_fbc_work_fn);
dev_priv->fbc.fbc_work = work;
@@ -413,75 +399,274 @@ static void intel_fbc_enable(struct drm_crtc *crtc)
schedule_delayed_work(&work->work, msecs_to_jiffies(50));
}
+static void __intel_fbc_disable(struct drm_i915_private *dev_priv)
+{
+ WARN_ON(!mutex_is_locked(&dev_priv->fbc.lock));
+
+ intel_fbc_cancel_work(dev_priv);
+
+ dev_priv->fbc.disable_fbc(dev_priv);
+ dev_priv->fbc.crtc = NULL;
+}
+
/**
* intel_fbc_disable - disable FBC
- * @dev: the drm_device
+ * @dev_priv: i915 device instance
*
* This function disables FBC.
*/
-void intel_fbc_disable(struct drm_device *dev)
+void intel_fbc_disable(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ if (!dev_priv->fbc.enable_fbc)
+ return;
- intel_fbc_cancel_work(dev_priv);
+ mutex_lock(&dev_priv->fbc.lock);
+ __intel_fbc_disable(dev_priv);
+ mutex_unlock(&dev_priv->fbc.lock);
+}
+
+/*
+ * intel_fbc_disable_crtc - disable FBC if it's associated with crtc
+ * @crtc: the CRTC
+ *
+ * This function disables FBC if it's associated with the provided CRTC.
+ */
+void intel_fbc_disable_crtc(struct intel_crtc *crtc)
+{
+ struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
- if (!dev_priv->display.disable_fbc)
+ if (!dev_priv->fbc.enable_fbc)
return;
- dev_priv->display.disable_fbc(dev);
- dev_priv->fbc.crtc = NULL;
+ mutex_lock(&dev_priv->fbc.lock);
+ if (dev_priv->fbc.crtc == crtc)
+ __intel_fbc_disable(dev_priv);
+ mutex_unlock(&dev_priv->fbc.lock);
}
-static bool set_no_fbc_reason(struct drm_i915_private *dev_priv,
+const char *intel_no_fbc_reason_str(enum no_fbc_reason reason)
+{
+ switch (reason) {
+ case FBC_OK:
+ return "FBC enabled but currently disabled in hardware";
+ case FBC_UNSUPPORTED:
+ return "unsupported by this chipset";
+ case FBC_NO_OUTPUT:
+ return "no output";
+ case FBC_STOLEN_TOO_SMALL:
+ return "not enough stolen memory";
+ case FBC_UNSUPPORTED_MODE:
+ return "mode incompatible with compression";
+ case FBC_MODE_TOO_LARGE:
+ return "mode too large for compression";
+ case FBC_BAD_PLANE:
+ return "FBC unsupported on plane";
+ case FBC_NOT_TILED:
+ return "framebuffer not tiled or fenced";
+ case FBC_MULTIPLE_PIPES:
+ return "more than one pipe active";
+ case FBC_MODULE_PARAM:
+ return "disabled per module param";
+ case FBC_CHIP_DEFAULT:
+ return "disabled per chip default";
+ case FBC_ROTATION:
+ return "rotation unsupported";
+ case FBC_IN_DBG_MASTER:
+ return "Kernel debugger is active";
+ default:
+ MISSING_CASE(reason);
+ return "unknown reason";
+ }
+}
+
+static void set_no_fbc_reason(struct drm_i915_private *dev_priv,
enum no_fbc_reason reason)
{
if (dev_priv->fbc.no_fbc_reason == reason)
- return false;
+ return;
dev_priv->fbc.no_fbc_reason = reason;
- return true;
+ DRM_DEBUG_KMS("Disabling FBC: %s\n", intel_no_fbc_reason_str(reason));
}
static struct drm_crtc *intel_fbc_find_crtc(struct drm_i915_private *dev_priv)
{
struct drm_crtc *crtc = NULL, *tmp_crtc;
enum pipe pipe;
- bool pipe_a_only = false, one_pipe_only = false;
+ bool pipe_a_only = false;
if (IS_HASWELL(dev_priv) || INTEL_INFO(dev_priv)->gen >= 8)
pipe_a_only = true;
- else if (INTEL_INFO(dev_priv)->gen <= 4)
- one_pipe_only = true;
for_each_pipe(dev_priv, pipe) {
tmp_crtc = dev_priv->pipe_to_crtc_mapping[pipe];
if (intel_crtc_active(tmp_crtc) &&
- to_intel_plane_state(tmp_crtc->primary->state)->visible) {
- if (one_pipe_only && crtc) {
- if (set_no_fbc_reason(dev_priv, FBC_MULTIPLE_PIPES))
- DRM_DEBUG_KMS("more than one pipe active, disabling compression\n");
- return NULL;
- }
+ to_intel_plane_state(tmp_crtc->primary->state)->visible)
crtc = tmp_crtc;
- }
if (pipe_a_only)
break;
}
- if (!crtc || crtc->primary->fb == NULL) {
- if (set_no_fbc_reason(dev_priv, FBC_NO_OUTPUT))
- DRM_DEBUG_KMS("no output, disabling\n");
+ if (!crtc || crtc->primary->fb == NULL)
return NULL;
- }
return crtc;
}
+static bool multiple_pipes_ok(struct drm_i915_private *dev_priv)
+{
+ enum pipe pipe;
+ int n_pipes = 0;
+ struct drm_crtc *crtc;
+
+ if (INTEL_INFO(dev_priv)->gen > 4)
+ return true;
+
+ for_each_pipe(dev_priv, pipe) {
+ crtc = dev_priv->pipe_to_crtc_mapping[pipe];
+
+ if (intel_crtc_active(crtc) &&
+ to_intel_plane_state(crtc->primary->state)->visible)
+ n_pipes++;
+ }
+
+ return (n_pipes < 2);
+}
+
+static int find_compression_threshold(struct drm_i915_private *dev_priv,
+ struct drm_mm_node *node,
+ int size,
+ int fb_cpp)
+{
+ int compression_threshold = 1;
+ int ret;
+
+ /* HACK: This code depends on what we will do in *_enable_fbc. If that
+ * code changes, this code needs to change as well.
+ *
+ * The enable_fbc code will attempt to use one of our 2 compression
+ * thresholds, therefore, in that case, we only have 1 resort.
+ */
+
+ /* Try to over-allocate to reduce reallocations and fragmentation. */
+ ret = i915_gem_stolen_insert_node(dev_priv, node, size <<= 1, 4096);
+ if (ret == 0)
+ return compression_threshold;
+
+again:
+ /* HW's ability to limit the CFB is 1:4 */
+ if (compression_threshold > 4 ||
+ (fb_cpp == 2 && compression_threshold == 2))
+ return 0;
+
+ ret = i915_gem_stolen_insert_node(dev_priv, node, size >>= 1, 4096);
+ if (ret && INTEL_INFO(dev_priv)->gen <= 4) {
+ return 0;
+ } else if (ret) {
+ compression_threshold <<= 1;
+ goto again;
+ } else {
+ return compression_threshold;
+ }
+}
+
+static int intel_fbc_alloc_cfb(struct drm_i915_private *dev_priv, int size,
+ int fb_cpp)
+{
+ struct drm_mm_node *uninitialized_var(compressed_llb);
+ int ret;
+
+ ret = find_compression_threshold(dev_priv, &dev_priv->fbc.compressed_fb,
+ size, fb_cpp);
+ if (!ret)
+ goto err_llb;
+ else if (ret > 1) {
+ DRM_INFO("Reducing the compressed framebuffer size. This may lead to less power savings than a non-reduced-size. Try to increase stolen memory size if available in BIOS.\n");
+
+ }
+
+ dev_priv->fbc.threshold = ret;
+
+ if (INTEL_INFO(dev_priv)->gen >= 5)
+ I915_WRITE(ILK_DPFC_CB_BASE, dev_priv->fbc.compressed_fb.start);
+ else if (IS_GM45(dev_priv)) {
+ I915_WRITE(DPFC_CB_BASE, dev_priv->fbc.compressed_fb.start);
+ } else {
+ compressed_llb = kzalloc(sizeof(*compressed_llb), GFP_KERNEL);
+ if (!compressed_llb)
+ goto err_fb;
+
+ ret = i915_gem_stolen_insert_node(dev_priv, compressed_llb,
+ 4096, 4096);
+ if (ret)
+ goto err_fb;
+
+ dev_priv->fbc.compressed_llb = compressed_llb;
+
+ I915_WRITE(FBC_CFB_BASE,
+ dev_priv->mm.stolen_base + dev_priv->fbc.compressed_fb.start);
+ I915_WRITE(FBC_LL_BASE,
+ dev_priv->mm.stolen_base + compressed_llb->start);
+ }
+
+ dev_priv->fbc.uncompressed_size = size;
+
+ DRM_DEBUG_KMS("reserved %d bytes of contiguous stolen space for FBC\n",
+ size);
+
+ return 0;
+
+err_fb:
+ kfree(compressed_llb);
+ i915_gem_stolen_remove_node(dev_priv, &dev_priv->fbc.compressed_fb);
+err_llb:
+ pr_info_once("drm: not enough stolen space for compressed buffer (need %d more bytes), disabling. Hint: you may be able to increase stolen memory size in the BIOS to avoid this.\n", size);
+ return -ENOSPC;
+}
+
+static void __intel_fbc_cleanup_cfb(struct drm_i915_private *dev_priv)
+{
+ if (dev_priv->fbc.uncompressed_size == 0)
+ return;
+
+ i915_gem_stolen_remove_node(dev_priv, &dev_priv->fbc.compressed_fb);
+
+ if (dev_priv->fbc.compressed_llb) {
+ i915_gem_stolen_remove_node(dev_priv,
+ dev_priv->fbc.compressed_llb);
+ kfree(dev_priv->fbc.compressed_llb);
+ }
+
+ dev_priv->fbc.uncompressed_size = 0;
+}
+
+void intel_fbc_cleanup_cfb(struct drm_i915_private *dev_priv)
+{
+ if (!dev_priv->fbc.enable_fbc)
+ return;
+
+ mutex_lock(&dev_priv->fbc.lock);
+ __intel_fbc_cleanup_cfb(dev_priv);
+ mutex_unlock(&dev_priv->fbc.lock);
+}
+
+static int intel_fbc_setup_cfb(struct drm_i915_private *dev_priv, int size,
+ int fb_cpp)
+{
+ if (size <= dev_priv->fbc.uncompressed_size)
+ return 0;
+
+ /* Release any current block */
+ __intel_fbc_cleanup_cfb(dev_priv);
+
+ return intel_fbc_alloc_cfb(dev_priv, size, fb_cpp);
+}
+
/**
- * intel_fbc_update - enable/disable FBC as needed
- * @dev: the drm_device
+ * __intel_fbc_update - enable/disable FBC as needed, unlocked
+ * @dev_priv: i915 device instance
*
* Set up the framebuffer compression hardware at mode set time. We
* enable it if possible:
@@ -498,9 +683,8 @@ static struct drm_crtc *intel_fbc_find_crtc(struct drm_i915_private *dev_priv)
*
* We need to enable/disable FBC on a global basis.
*/
-void intel_fbc_update(struct drm_device *dev)
+static void __intel_fbc_update(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_crtc *crtc = NULL;
struct intel_crtc *intel_crtc;
struct drm_framebuffer *fb;
@@ -508,22 +692,19 @@ void intel_fbc_update(struct drm_device *dev)
const struct drm_display_mode *adjusted_mode;
unsigned int max_width, max_height;
- if (!HAS_FBC(dev))
- return;
+ WARN_ON(!mutex_is_locked(&dev_priv->fbc.lock));
/* disable framebuffer compression in vGPU */
- if (intel_vgpu_active(dev))
+ if (intel_vgpu_active(dev_priv->dev))
i915.enable_fbc = 0;
if (i915.enable_fbc < 0) {
- if (set_no_fbc_reason(dev_priv, FBC_CHIP_DEFAULT))
- DRM_DEBUG_KMS("disabled per chip default\n");
+ set_no_fbc_reason(dev_priv, FBC_CHIP_DEFAULT);
goto out_disable;
}
if (!i915.enable_fbc) {
- if (set_no_fbc_reason(dev_priv, FBC_MODULE_PARAM))
- DRM_DEBUG_KMS("fbc disabled per module param\n");
+ set_no_fbc_reason(dev_priv, FBC_MODULE_PARAM);
goto out_disable;
}
@@ -537,8 +718,15 @@ void intel_fbc_update(struct drm_device *dev)
* - going to an unsupported config (interlace, pixel multiply, etc.)
*/
crtc = intel_fbc_find_crtc(dev_priv);
- if (!crtc)
+ if (!crtc) {
+ set_no_fbc_reason(dev_priv, FBC_NO_OUTPUT);
goto out_disable;
+ }
+
+ if (!multiple_pipes_ok(dev_priv)) {
+ set_no_fbc_reason(dev_priv, FBC_MULTIPLE_PIPES);
+ goto out_disable;
+ }
intel_crtc = to_intel_crtc(crtc);
fb = crtc->primary->fb;
@@ -547,16 +735,14 @@ void intel_fbc_update(struct drm_device *dev)
if ((adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) ||
(adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)) {
- if (set_no_fbc_reason(dev_priv, FBC_UNSUPPORTED_MODE))
- DRM_DEBUG_KMS("mode incompatible with compression, "
- "disabling\n");
+ set_no_fbc_reason(dev_priv, FBC_UNSUPPORTED_MODE);
goto out_disable;
}
- if (INTEL_INFO(dev)->gen >= 8 || IS_HASWELL(dev)) {
+ if (INTEL_INFO(dev_priv)->gen >= 8 || IS_HASWELL(dev_priv)) {
max_width = 4096;
max_height = 4096;
- } else if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
+ } else if (IS_G4X(dev_priv) || INTEL_INFO(dev_priv)->gen >= 5) {
max_width = 4096;
max_height = 2048;
} else {
@@ -565,14 +751,12 @@ void intel_fbc_update(struct drm_device *dev)
}
if (intel_crtc->config->pipe_src_w > max_width ||
intel_crtc->config->pipe_src_h > max_height) {
- if (set_no_fbc_reason(dev_priv, FBC_MODE_TOO_LARGE))
- DRM_DEBUG_KMS("mode too large for compression, disabling\n");
+ set_no_fbc_reason(dev_priv, FBC_MODE_TOO_LARGE);
goto out_disable;
}
- if ((INTEL_INFO(dev)->gen < 4 || HAS_DDI(dev)) &&
+ if ((INTEL_INFO(dev_priv)->gen < 4 || HAS_DDI(dev_priv)) &&
intel_crtc->plane != PLANE_A) {
- if (set_no_fbc_reason(dev_priv, FBC_BAD_PLANE))
- DRM_DEBUG_KMS("plane not A, disabling compression\n");
+ set_no_fbc_reason(dev_priv, FBC_BAD_PLANE);
goto out_disable;
}
@@ -581,25 +765,24 @@ void intel_fbc_update(struct drm_device *dev)
*/
if (obj->tiling_mode != I915_TILING_X ||
obj->fence_reg == I915_FENCE_REG_NONE) {
- if (set_no_fbc_reason(dev_priv, FBC_NOT_TILED))
- DRM_DEBUG_KMS("framebuffer not tiled or fenced, disabling compression\n");
+ set_no_fbc_reason(dev_priv, FBC_NOT_TILED);
goto out_disable;
}
- if (INTEL_INFO(dev)->gen <= 4 && !IS_G4X(dev) &&
+ if (INTEL_INFO(dev_priv)->gen <= 4 && !IS_G4X(dev_priv) &&
crtc->primary->state->rotation != BIT(DRM_ROTATE_0)) {
- if (set_no_fbc_reason(dev_priv, FBC_UNSUPPORTED_MODE))
- DRM_DEBUG_KMS("Rotation unsupported, disabling\n");
+ set_no_fbc_reason(dev_priv, FBC_ROTATION);
goto out_disable;
}
/* If the kernel debugger is active, always disable compression */
- if (in_dbg_master())
+ if (in_dbg_master()) {
+ set_no_fbc_reason(dev_priv, FBC_IN_DBG_MASTER);
goto out_disable;
+ }
- if (i915_gem_stolen_setup_compression(dev, obj->base.size,
- drm_format_plane_cpp(fb->pixel_format, 0))) {
- if (set_no_fbc_reason(dev_priv, FBC_STOLEN_TOO_SMALL))
- DRM_DEBUG_KMS("framebuffer too large, disabling compression\n");
+ if (intel_fbc_setup_cfb(dev_priv, obj->base.size,
+ drm_format_plane_cpp(fb->pixel_format, 0))) {
+ set_no_fbc_reason(dev_priv, FBC_STOLEN_TOO_SMALL);
goto out_disable;
}
@@ -613,7 +796,7 @@ void intel_fbc_update(struct drm_device *dev)
dev_priv->fbc.y == crtc->y)
return;
- if (intel_fbc_enabled(dev)) {
+ if (intel_fbc_enabled(dev_priv)) {
/* We update FBC along two paths, after changing fb/crtc
* configuration (modeswitching) and after page-flipping
* finishes. For the latter, we know that not only did
@@ -638,58 +821,87 @@ void intel_fbc_update(struct drm_device *dev)
* some point. And we wait before enabling FBC anyway.
*/
DRM_DEBUG_KMS("disabling active FBC for update\n");
- intel_fbc_disable(dev);
+ __intel_fbc_disable(dev_priv);
}
- intel_fbc_enable(crtc);
+ intel_fbc_enable(intel_crtc);
dev_priv->fbc.no_fbc_reason = FBC_OK;
return;
out_disable:
/* Multiple disables should be harmless */
- if (intel_fbc_enabled(dev)) {
+ if (intel_fbc_enabled(dev_priv)) {
DRM_DEBUG_KMS("unsupported config, disabling FBC\n");
- intel_fbc_disable(dev);
+ __intel_fbc_disable(dev_priv);
}
- i915_gem_stolen_cleanup_compression(dev);
+ __intel_fbc_cleanup_cfb(dev_priv);
+}
+
+/*
+ * intel_fbc_update - enable/disable FBC as needed
+ * @dev_priv: i915 device instance
+ *
+ * This function reevaluates the overall state and enables or disables FBC.
+ */
+void intel_fbc_update(struct drm_i915_private *dev_priv)
+{
+ if (!dev_priv->fbc.enable_fbc)
+ return;
+
+ mutex_lock(&dev_priv->fbc.lock);
+ __intel_fbc_update(dev_priv);
+ mutex_unlock(&dev_priv->fbc.lock);
}
void intel_fbc_invalidate(struct drm_i915_private *dev_priv,
unsigned int frontbuffer_bits,
enum fb_op_origin origin)
{
- struct drm_device *dev = dev_priv->dev;
unsigned int fbc_bits;
+ if (!dev_priv->fbc.enable_fbc)
+ return;
+
if (origin == ORIGIN_GTT)
return;
+ mutex_lock(&dev_priv->fbc.lock);
+
if (dev_priv->fbc.enabled)
fbc_bits = INTEL_FRONTBUFFER_PRIMARY(dev_priv->fbc.crtc->pipe);
else if (dev_priv->fbc.fbc_work)
fbc_bits = INTEL_FRONTBUFFER_PRIMARY(
- to_intel_crtc(dev_priv->fbc.fbc_work->crtc)->pipe);
+ dev_priv->fbc.fbc_work->crtc->pipe);
else
fbc_bits = dev_priv->fbc.possible_framebuffer_bits;
dev_priv->fbc.busy_bits |= (fbc_bits & frontbuffer_bits);
if (dev_priv->fbc.busy_bits)
- intel_fbc_disable(dev);
+ __intel_fbc_disable(dev_priv);
+
+ mutex_unlock(&dev_priv->fbc.lock);
}
void intel_fbc_flush(struct drm_i915_private *dev_priv,
- unsigned int frontbuffer_bits)
+ unsigned int frontbuffer_bits, enum fb_op_origin origin)
{
- struct drm_device *dev = dev_priv->dev;
+ if (!dev_priv->fbc.enable_fbc)
+ return;
- if (!dev_priv->fbc.busy_bits)
+ if (origin == ORIGIN_GTT)
return;
+ mutex_lock(&dev_priv->fbc.lock);
+
dev_priv->fbc.busy_bits &= ~frontbuffer_bits;
- if (!dev_priv->fbc.busy_bits)
- intel_fbc_update(dev);
+ if (!dev_priv->fbc.busy_bits) {
+ __intel_fbc_disable(dev_priv);
+ __intel_fbc_update(dev_priv);
+ }
+
+ mutex_unlock(&dev_priv->fbc.lock);
}
/**
@@ -702,6 +914,8 @@ void intel_fbc_init(struct drm_i915_private *dev_priv)
{
enum pipe pipe;
+ mutex_init(&dev_priv->fbc.lock);
+
if (!HAS_FBC(dev_priv)) {
dev_priv->fbc.enabled = false;
dev_priv->fbc.no_fbc_reason = FBC_UNSUPPORTED;
@@ -717,25 +931,25 @@ void intel_fbc_init(struct drm_i915_private *dev_priv)
}
if (INTEL_INFO(dev_priv)->gen >= 7) {
- dev_priv->display.fbc_enabled = ilk_fbc_enabled;
- dev_priv->display.enable_fbc = gen7_fbc_enable;
- dev_priv->display.disable_fbc = ilk_fbc_disable;
+ dev_priv->fbc.fbc_enabled = ilk_fbc_enabled;
+ dev_priv->fbc.enable_fbc = gen7_fbc_enable;
+ dev_priv->fbc.disable_fbc = ilk_fbc_disable;
} else if (INTEL_INFO(dev_priv)->gen >= 5) {
- dev_priv->display.fbc_enabled = ilk_fbc_enabled;
- dev_priv->display.enable_fbc = ilk_fbc_enable;
- dev_priv->display.disable_fbc = ilk_fbc_disable;
+ dev_priv->fbc.fbc_enabled = ilk_fbc_enabled;
+ dev_priv->fbc.enable_fbc = ilk_fbc_enable;
+ dev_priv->fbc.disable_fbc = ilk_fbc_disable;
} else if (IS_GM45(dev_priv)) {
- dev_priv->display.fbc_enabled = g4x_fbc_enabled;
- dev_priv->display.enable_fbc = g4x_fbc_enable;
- dev_priv->display.disable_fbc = g4x_fbc_disable;
+ dev_priv->fbc.fbc_enabled = g4x_fbc_enabled;
+ dev_priv->fbc.enable_fbc = g4x_fbc_enable;
+ dev_priv->fbc.disable_fbc = g4x_fbc_disable;
} else {
- dev_priv->display.fbc_enabled = i8xx_fbc_enabled;
- dev_priv->display.enable_fbc = i8xx_fbc_enable;
- dev_priv->display.disable_fbc = i8xx_fbc_disable;
+ dev_priv->fbc.fbc_enabled = i8xx_fbc_enabled;
+ dev_priv->fbc.enable_fbc = i8xx_fbc_enable;
+ dev_priv->fbc.disable_fbc = i8xx_fbc_disable;
/* This value was pulled out of someone's hat */
I915_WRITE(FBC_CONTROL, 500 << FBC_CTL_INTERVAL_SHIFT);
}
- dev_priv->fbc.enabled = dev_priv->display.fbc_enabled(dev_priv->dev);
+ dev_priv->fbc.enabled = dev_priv->fbc.fbc_enabled(dev_priv);
}
diff --git a/drivers/gpu/drm/i915/intel_fbdev.c b/drivers/gpu/drm/i915/intel_fbdev.c
index 6372cfc7d053..8c6a6fa46005 100644
--- a/drivers/gpu/drm/i915/intel_fbdev.c
+++ b/drivers/gpu/drm/i915/intel_fbdev.c
@@ -55,16 +55,8 @@ static int intel_fbdev_set_par(struct fb_info *info)
ret = drm_fb_helper_set_par(info);
if (ret == 0) {
- /*
- * FIXME: fbdev presumes that all callbacks also work from
- * atomic contexts and relies on that for emergency oops
- * printing. KMS totally doesn't do that and the locking here is
- * by far not the only place this goes wrong. Ignore this for
- * now until we solve this for real.
- */
mutex_lock(&fb_helper->dev->struct_mutex);
- ret = i915_gem_object_set_to_gtt_domain(ifbdev->fb->obj,
- true);
+ intel_fb_obj_invalidate(ifbdev->fb->obj, ORIGIN_GTT);
mutex_unlock(&fb_helper->dev->struct_mutex);
}
@@ -81,15 +73,8 @@ static int intel_fbdev_blank(int blank, struct fb_info *info)
ret = drm_fb_helper_blank(blank, info);
if (ret == 0) {
- /*
- * FIXME: fbdev presumes that all callbacks also work from
- * atomic contexts and relies on that for emergency oops
- * printing. KMS totally doesn't do that and the locking here is
- * by far not the only place this goes wrong. Ignore this for
- * now until we solve this for real.
- */
mutex_lock(&fb_helper->dev->struct_mutex);
- intel_fb_obj_invalidate(ifbdev->fb->obj, NULL, ORIGIN_GTT);
+ intel_fb_obj_invalidate(ifbdev->fb->obj, ORIGIN_GTT);
mutex_unlock(&fb_helper->dev->struct_mutex);
}
@@ -107,15 +92,8 @@ static int intel_fbdev_pan_display(struct fb_var_screeninfo *var,
ret = drm_fb_helper_pan_display(var, info);
if (ret == 0) {
- /*
- * FIXME: fbdev presumes that all callbacks also work from
- * atomic contexts and relies on that for emergency oops
- * printing. KMS totally doesn't do that and the locking here is
- * by far not the only place this goes wrong. Ignore this for
- * now until we solve this for real.
- */
mutex_lock(&fb_helper->dev->struct_mutex);
- intel_fb_obj_invalidate(ifbdev->fb->obj, NULL, ORIGIN_GTT);
+ intel_fb_obj_invalidate(ifbdev->fb->obj, ORIGIN_GTT);
mutex_unlock(&fb_helper->dev->struct_mutex);
}
@@ -126,9 +104,9 @@ static struct fb_ops intelfb_ops = {
.owner = THIS_MODULE,
.fb_check_var = drm_fb_helper_check_var,
.fb_set_par = intel_fbdev_set_par,
- .fb_fillrect = cfb_fillrect,
- .fb_copyarea = cfb_copyarea,
- .fb_imageblit = cfb_imageblit,
+ .fb_fillrect = drm_fb_helper_cfb_fillrect,
+ .fb_copyarea = drm_fb_helper_cfb_copyarea,
+ .fb_imageblit = drm_fb_helper_cfb_imageblit,
.fb_pan_display = intel_fbdev_pan_display,
.fb_blank = intel_fbdev_blank,
.fb_setcmap = drm_fb_helper_setcmap,
@@ -177,7 +155,7 @@ static int intelfb_alloc(struct drm_fb_helper *helper,
}
/* Flush everything out, we'll be doing GTT only from now on */
- ret = intel_pin_and_fence_fb_obj(NULL, fb, NULL, NULL);
+ ret = intel_pin_and_fence_fb_obj(NULL, fb, NULL, NULL, NULL);
if (ret) {
DRM_ERROR("failed to pin obj: %d\n", ret);
goto out_fb;
@@ -237,9 +215,9 @@ static int intelfb_create(struct drm_fb_helper *helper,
obj = intel_fb->obj;
size = obj->base.size;
- info = framebuffer_alloc(0, &dev->pdev->dev);
- if (!info) {
- ret = -ENOMEM;
+ info = drm_fb_helper_alloc_fbi(helper);
+ if (IS_ERR(info)) {
+ ret = PTR_ERR(info);
goto out_unpin;
}
@@ -248,24 +226,13 @@ static int intelfb_create(struct drm_fb_helper *helper,
fb = &ifbdev->fb->base;
ifbdev->helper.fb = fb;
- ifbdev->helper.fbdev = info;
strcpy(info->fix.id, "inteldrmfb");
info->flags = FBINFO_DEFAULT | FBINFO_CAN_FORCE_OUTPUT;
info->fbops = &intelfb_ops;
- ret = fb_alloc_cmap(&info->cmap, 256, 0);
- if (ret) {
- ret = -ENOMEM;
- goto out_unpin;
- }
/* setup aperture base/size for vesafb takeover */
- info->apertures = alloc_apertures(1);
- if (!info->apertures) {
- ret = -ENOMEM;
- goto out_unpin;
- }
info->apertures->ranges[0].base = dev->mode_config.fb_base;
info->apertures->ranges[0].size = dev_priv->gtt.mappable_end;
@@ -277,7 +244,7 @@ static int intelfb_create(struct drm_fb_helper *helper,
size);
if (!info->screen_base) {
ret = -ENOSPC;
- goto out_unpin;
+ goto out_destroy_fbi;
}
info->screen_size = size;
@@ -304,6 +271,8 @@ static int intelfb_create(struct drm_fb_helper *helper,
vga_switcheroo_client_fb_set(dev->pdev, info);
return 0;
+out_destroy_fbi:
+ drm_fb_helper_release_fbi(helper);
out_unpin:
i915_gem_object_ggtt_unpin(obj);
drm_gem_object_unreference(&obj->base);
@@ -484,18 +453,13 @@ retry:
* IMPORTANT: We want to use the adjusted mode (i.e.
* after the panel fitter upscaling) as the initial
* config, not the input mode, which is what crtc->mode
- * usually contains. But since our current fastboot
+ * usually contains. But since our current
* code puts a mode derived from the post-pfit timings
- * into crtc->mode this works out correctly. We don't
- * use hwmode anywhere right now, so use it for this
- * since the fb helper layer wants a pointer to
- * something we own.
+ * into crtc->mode this works out correctly.
*/
DRM_DEBUG_KMS("looking for current mode on connector %s\n",
connector->name);
- intel_mode_from_pipe_config(&encoder->crtc->hwmode,
- to_intel_crtc(encoder->crtc)->config);
- modes[i] = &encoder->crtc->hwmode;
+ modes[i] = &encoder->crtc->mode;
}
crtcs[i] = new_crtc;
@@ -550,16 +514,9 @@ static const struct drm_fb_helper_funcs intel_fb_helper_funcs = {
static void intel_fbdev_destroy(struct drm_device *dev,
struct intel_fbdev *ifbdev)
{
- if (ifbdev->helper.fbdev) {
- struct fb_info *info = ifbdev->helper.fbdev;
- unregister_framebuffer(info);
- iounmap(info->screen_base);
- if (info->cmap.len)
- fb_dealloc_cmap(&info->cmap);
-
- framebuffer_release(info);
- }
+ drm_fb_helper_unregister_fbi(&ifbdev->helper);
+ drm_fb_helper_release_fbi(&ifbdev->helper);
drm_fb_helper_fini(&ifbdev->helper);
@@ -582,7 +539,6 @@ static bool intel_fbdev_init_bios(struct drm_device *dev,
struct intel_framebuffer *fb = NULL;
struct drm_crtc *crtc;
struct intel_crtc *intel_crtc;
- struct intel_initial_plane_config *plane_config = NULL;
unsigned int max_size = 0;
if (!i915.fastboot)
@@ -590,20 +546,21 @@ static bool intel_fbdev_init_bios(struct drm_device *dev,
/* Find the largest fb */
for_each_crtc(dev, crtc) {
+ struct drm_i915_gem_object *obj =
+ intel_fb_obj(crtc->primary->state->fb);
intel_crtc = to_intel_crtc(crtc);
- if (!intel_crtc->active || !crtc->primary->fb) {
+ if (!intel_crtc->active || !obj) {
DRM_DEBUG_KMS("pipe %c not active or no fb, skipping\n",
pipe_name(intel_crtc->pipe));
continue;
}
- if (intel_crtc->plane_config.size > max_size) {
+ if (obj->base.size > max_size) {
DRM_DEBUG_KMS("found possible fb from plane %c\n",
pipe_name(intel_crtc->pipe));
- plane_config = &intel_crtc->plane_config;
- fb = to_intel_framebuffer(crtc->primary->fb);
- max_size = plane_config->size;
+ fb = to_intel_framebuffer(crtc->primary->state->fb);
+ max_size = obj->base.size;
}
}
@@ -638,7 +595,6 @@ static bool intel_fbdev_init_bios(struct drm_device *dev,
DRM_DEBUG_KMS("fb not wide enough for plane %c (%d vs %d)\n",
pipe_name(intel_crtc->pipe),
cur_size, fb->base.pitches[0]);
- plane_config = NULL;
fb = NULL;
break;
}
@@ -659,7 +615,6 @@ static bool intel_fbdev_init_bios(struct drm_device *dev,
DRM_DEBUG_KMS("fb not big enough for plane %c (%d vs %d)\n",
pipe_name(intel_crtc->pipe),
cur_size, max_size);
- plane_config = NULL;
fb = NULL;
break;
}
@@ -810,7 +765,7 @@ void intel_fbdev_set_suspend(struct drm_device *dev, int state, bool synchronous
if (state == FBINFO_STATE_RUNNING && ifbdev->fb->obj->stolen)
memset_io(info->screen_base, 0, info->screen_size);
- fb_set_suspend(info, state);
+ drm_fb_helper_set_suspend(&ifbdev->helper, state);
console_unlock();
}
@@ -825,11 +780,20 @@ void intel_fbdev_restore_mode(struct drm_device *dev)
{
int ret;
struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_fbdev *ifbdev = dev_priv->fbdev;
+ struct drm_fb_helper *fb_helper;
- if (!dev_priv->fbdev)
+ if (!ifbdev)
return;
- ret = drm_fb_helper_restore_fbdev_mode_unlocked(&dev_priv->fbdev->helper);
- if (ret)
+ fb_helper = &ifbdev->helper;
+
+ ret = drm_fb_helper_restore_fbdev_mode_unlocked(fb_helper);
+ if (ret) {
DRM_DEBUG("failed to restore crtc mode\n");
+ } else {
+ mutex_lock(&fb_helper->dev->struct_mutex);
+ intel_fb_obj_invalidate(ifbdev->fb->obj, ORIGIN_GTT);
+ mutex_unlock(&fb_helper->dev->struct_mutex);
+ }
}
diff --git a/drivers/gpu/drm/i915/intel_frontbuffer.c b/drivers/gpu/drm/i915/intel_frontbuffer.c
index 57095f54c1f2..ac85357010b4 100644
--- a/drivers/gpu/drm/i915/intel_frontbuffer.c
+++ b/drivers/gpu/drm/i915/intel_frontbuffer.c
@@ -65,84 +65,29 @@
#include "intel_drv.h"
#include "i915_drv.h"
-static void intel_increase_pllclock(struct drm_device *dev,
- enum pipe pipe)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- int dpll_reg = DPLL(pipe);
- int dpll;
-
- if (!HAS_GMCH_DISPLAY(dev))
- return;
-
- if (!dev_priv->lvds_downclock_avail)
- return;
-
- dpll = I915_READ(dpll_reg);
- if (!HAS_PIPE_CXSR(dev) && (dpll & DISPLAY_RATE_SELECT_FPA1)) {
- DRM_DEBUG_DRIVER("upclocking LVDS\n");
-
- assert_panel_unlocked(dev_priv, pipe);
-
- dpll &= ~DISPLAY_RATE_SELECT_FPA1;
- I915_WRITE(dpll_reg, dpll);
- intel_wait_for_vblank(dev, pipe);
-
- dpll = I915_READ(dpll_reg);
- if (dpll & DISPLAY_RATE_SELECT_FPA1)
- DRM_DEBUG_DRIVER("failed to upclock LVDS!\n");
- }
-}
-
-/**
- * intel_mark_fb_busy - mark given planes as busy
- * @dev: DRM device
- * @frontbuffer_bits: bits for the affected planes
- * @ring: optional ring for asynchronous commands
- *
- * This function gets called every time the screen contents change. It can be
- * used to keep e.g. the update rate at the nominal refresh rate with DRRS.
- */
-static void intel_mark_fb_busy(struct drm_device *dev,
- unsigned frontbuffer_bits,
- struct intel_engine_cs *ring)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- enum pipe pipe;
-
- for_each_pipe(dev_priv, pipe) {
- if (!(frontbuffer_bits & INTEL_FRONTBUFFER_ALL_MASK(pipe)))
- continue;
-
- intel_increase_pllclock(dev, pipe);
- }
-}
-
/**
* intel_fb_obj_invalidate - invalidate frontbuffer object
* @obj: GEM object to invalidate
- * @ring: set for asynchronous rendering
* @origin: which operation caused the invalidation
*
* This function gets called every time rendering on the given object starts and
* frontbuffer caching (fbc, low refresh rate for DRRS, panel self refresh) must
- * be invalidated. If @ring is non-NULL any subsequent invalidation will be delayed
+ * be invalidated. For ORIGIN_CS any subsequent invalidation will be delayed
* until the rendering completes or a flip on this frontbuffer plane is
* scheduled.
*/
void intel_fb_obj_invalidate(struct drm_i915_gem_object *obj,
- struct intel_engine_cs *ring,
enum fb_op_origin origin)
{
struct drm_device *dev = obj->base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
WARN_ON(!mutex_is_locked(&dev->struct_mutex));
if (!obj->frontbuffer_bits)
return;
- if (ring) {
+ if (origin == ORIGIN_CS) {
mutex_lock(&dev_priv->fb_tracking.lock);
dev_priv->fb_tracking.busy_bits
|= obj->frontbuffer_bits;
@@ -151,8 +96,6 @@ void intel_fb_obj_invalidate(struct drm_i915_gem_object *obj,
mutex_unlock(&dev_priv->fb_tracking.lock);
}
- intel_mark_fb_busy(dev, obj->frontbuffer_bits, ring);
-
intel_psr_invalidate(dev, obj->frontbuffer_bits);
intel_edp_drrs_invalidate(dev, obj->frontbuffer_bits);
intel_fbc_invalidate(dev_priv, obj->frontbuffer_bits, origin);
@@ -162,6 +105,7 @@ void intel_fb_obj_invalidate(struct drm_i915_gem_object *obj,
* intel_frontbuffer_flush - flush frontbuffer
* @dev: DRM device
* @frontbuffer_bits: frontbuffer plane tracking bits
+ * @origin: which operation caused the flush
*
* This function gets called every time rendering on the given planes has
* completed and frontbuffer caching can be started again. Flushes will get
@@ -169,37 +113,40 @@ void intel_fb_obj_invalidate(struct drm_i915_gem_object *obj,
*
* Can be called without any locks held.
*/
-void intel_frontbuffer_flush(struct drm_device *dev,
- unsigned frontbuffer_bits)
+static void intel_frontbuffer_flush(struct drm_device *dev,
+ unsigned frontbuffer_bits,
+ enum fb_op_origin origin)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
/* Delay flushing when rings are still busy.*/
mutex_lock(&dev_priv->fb_tracking.lock);
frontbuffer_bits &= ~dev_priv->fb_tracking.busy_bits;
mutex_unlock(&dev_priv->fb_tracking.lock);
- intel_mark_fb_busy(dev, frontbuffer_bits, NULL);
+ if (!frontbuffer_bits)
+ return;
intel_edp_drrs_flush(dev, frontbuffer_bits);
- intel_psr_flush(dev, frontbuffer_bits);
- intel_fbc_flush(dev_priv, frontbuffer_bits);
+ intel_psr_flush(dev, frontbuffer_bits, origin);
+ intel_fbc_flush(dev_priv, frontbuffer_bits, origin);
}
/**
* intel_fb_obj_flush - flush frontbuffer object
* @obj: GEM object to flush
* @retire: set when retiring asynchronous rendering
+ * @origin: which operation caused the flush
*
* This function gets called every time rendering on the given object has
* completed and frontbuffer caching can be started again. If @retire is true
* then any delayed flushes will be unblocked.
*/
void intel_fb_obj_flush(struct drm_i915_gem_object *obj,
- bool retire)
+ bool retire, enum fb_op_origin origin)
{
struct drm_device *dev = obj->base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
unsigned frontbuffer_bits;
WARN_ON(!mutex_is_locked(&dev->struct_mutex));
@@ -218,7 +165,7 @@ void intel_fb_obj_flush(struct drm_i915_gem_object *obj,
mutex_unlock(&dev_priv->fb_tracking.lock);
}
- intel_frontbuffer_flush(dev, frontbuffer_bits);
+ intel_frontbuffer_flush(dev, frontbuffer_bits, origin);
}
/**
@@ -236,7 +183,7 @@ void intel_fb_obj_flush(struct drm_i915_gem_object *obj,
void intel_frontbuffer_flip_prepare(struct drm_device *dev,
unsigned frontbuffer_bits)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
mutex_lock(&dev_priv->fb_tracking.lock);
dev_priv->fb_tracking.flip_bits |= frontbuffer_bits;
@@ -244,7 +191,7 @@ void intel_frontbuffer_flip_prepare(struct drm_device *dev,
dev_priv->fb_tracking.busy_bits &= ~frontbuffer_bits;
mutex_unlock(&dev_priv->fb_tracking.lock);
- intel_psr_single_frame_update(dev);
+ intel_psr_single_frame_update(dev, frontbuffer_bits);
}
/**
@@ -260,7 +207,7 @@ void intel_frontbuffer_flip_prepare(struct drm_device *dev,
void intel_frontbuffer_flip_complete(struct drm_device *dev,
unsigned frontbuffer_bits)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
mutex_lock(&dev_priv->fb_tracking.lock);
/* Mask any cancelled flips. */
@@ -268,5 +215,29 @@ void intel_frontbuffer_flip_complete(struct drm_device *dev,
dev_priv->fb_tracking.flip_bits &= ~frontbuffer_bits;
mutex_unlock(&dev_priv->fb_tracking.lock);
- intel_frontbuffer_flush(dev, frontbuffer_bits);
+ intel_frontbuffer_flush(dev, frontbuffer_bits, ORIGIN_FLIP);
+}
+
+/**
+ * intel_frontbuffer_flip - synchronous frontbuffer flip
+ * @dev: DRM device
+ * @frontbuffer_bits: frontbuffer plane tracking bits
+ *
+ * This function gets called after scheduling a flip on @obj. This is for
+ * synchronous plane updates which will happen on the next vblank and which will
+ * not get delayed by pending gpu rendering.
+ *
+ * Can be called without any locks held.
+ */
+void intel_frontbuffer_flip(struct drm_device *dev,
+ unsigned frontbuffer_bits)
+{
+ struct drm_i915_private *dev_priv = to_i915(dev);
+
+ mutex_lock(&dev_priv->fb_tracking.lock);
+ /* Remove stale busy bits due to the old buffer. */
+ dev_priv->fb_tracking.busy_bits &= ~frontbuffer_bits;
+ mutex_unlock(&dev_priv->fb_tracking.lock);
+
+ intel_frontbuffer_flush(dev, frontbuffer_bits, ORIGIN_FLIP);
}
diff --git a/drivers/gpu/drm/i915/intel_guc_fwif.h b/drivers/gpu/drm/i915/intel_guc_fwif.h
new file mode 100644
index 000000000000..18d7f20936c8
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_guc_fwif.h
@@ -0,0 +1,245 @@
+/*
+ * Copyright © 2014 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+#ifndef _INTEL_GUC_FWIF_H
+#define _INTEL_GUC_FWIF_H
+
+/*
+ * This file is partially autogenerated, although currently with some manual
+ * fixups afterwards. In future, it should be entirely autogenerated, in order
+ * to ensure that the definitions herein remain in sync with those used by the
+ * GuC's own firmware.
+ *
+ * EDITING THIS FILE IS THEREFORE NOT RECOMMENDED - YOUR CHANGES MAY BE LOST.
+ */
+
+#define GFXCORE_FAMILY_GEN8 11
+#define GFXCORE_FAMILY_GEN9 12
+#define GFXCORE_FAMILY_FORCE_ULONG 0x7fffffff
+
+#define GUC_CTX_PRIORITY_CRITICAL 0
+#define GUC_CTX_PRIORITY_HIGH 1
+#define GUC_CTX_PRIORITY_NORMAL 2
+#define GUC_CTX_PRIORITY_LOW 3
+
+#define GUC_MAX_GPU_CONTEXTS 1024
+#define GUC_INVALID_CTX_ID (GUC_MAX_GPU_CONTEXTS + 1)
+
+/* Work queue item header definitions */
+#define WQ_STATUS_ACTIVE 1
+#define WQ_STATUS_SUSPENDED 2
+#define WQ_STATUS_CMD_ERROR 3
+#define WQ_STATUS_ENGINE_ID_NOT_USED 4
+#define WQ_STATUS_SUSPENDED_FROM_RESET 5
+#define WQ_TYPE_SHIFT 0
+#define WQ_TYPE_BATCH_BUF (0x1 << WQ_TYPE_SHIFT)
+#define WQ_TYPE_PSEUDO (0x2 << WQ_TYPE_SHIFT)
+#define WQ_TYPE_INORDER (0x3 << WQ_TYPE_SHIFT)
+#define WQ_TARGET_SHIFT 10
+#define WQ_LEN_SHIFT 16
+#define WQ_NO_WCFLUSH_WAIT (1 << 27)
+#define WQ_PRESENT_WORKLOAD (1 << 28)
+#define WQ_WORKLOAD_SHIFT 29
+#define WQ_WORKLOAD_GENERAL (0 << WQ_WORKLOAD_SHIFT)
+#define WQ_WORKLOAD_GPGPU (1 << WQ_WORKLOAD_SHIFT)
+#define WQ_WORKLOAD_TOUCH (2 << WQ_WORKLOAD_SHIFT)
+
+#define WQ_RING_TAIL_SHIFT 20
+#define WQ_RING_TAIL_MASK (0x7FF << WQ_RING_TAIL_SHIFT)
+
+#define GUC_DOORBELL_ENABLED 1
+#define GUC_DOORBELL_DISABLED 0
+
+#define GUC_CTX_DESC_ATTR_ACTIVE (1 << 0)
+#define GUC_CTX_DESC_ATTR_PENDING_DB (1 << 1)
+#define GUC_CTX_DESC_ATTR_KERNEL (1 << 2)
+#define GUC_CTX_DESC_ATTR_PREEMPT (1 << 3)
+#define GUC_CTX_DESC_ATTR_RESET (1 << 4)
+#define GUC_CTX_DESC_ATTR_WQLOCKED (1 << 5)
+#define GUC_CTX_DESC_ATTR_PCH (1 << 6)
+
+/* The guc control data is 10 DWORDs */
+#define GUC_CTL_CTXINFO 0
+#define GUC_CTL_CTXNUM_IN16_SHIFT 0
+#define GUC_CTL_BASE_ADDR_SHIFT 12
+#define GUC_CTL_ARAT_HIGH 1
+#define GUC_CTL_ARAT_LOW 2
+#define GUC_CTL_DEVICE_INFO 3
+#define GUC_CTL_GTTYPE_SHIFT 0
+#define GUC_CTL_COREFAMILY_SHIFT 7
+#define GUC_CTL_LOG_PARAMS 4
+#define GUC_LOG_VALID (1 << 0)
+#define GUC_LOG_NOTIFY_ON_HALF_FULL (1 << 1)
+#define GUC_LOG_ALLOC_IN_MEGABYTE (1 << 3)
+#define GUC_LOG_CRASH_PAGES 1
+#define GUC_LOG_CRASH_SHIFT 4
+#define GUC_LOG_DPC_PAGES 3
+#define GUC_LOG_DPC_SHIFT 6
+#define GUC_LOG_ISR_PAGES 3
+#define GUC_LOG_ISR_SHIFT 9
+#define GUC_LOG_BUF_ADDR_SHIFT 12
+#define GUC_CTL_PAGE_FAULT_CONTROL 5
+#define GUC_CTL_WA 6
+#define GUC_CTL_WA_UK_BY_DRIVER (1 << 3)
+#define GUC_CTL_FEATURE 7
+#define GUC_CTL_VCS2_ENABLED (1 << 0)
+#define GUC_CTL_KERNEL_SUBMISSIONS (1 << 1)
+#define GUC_CTL_FEATURE2 (1 << 2)
+#define GUC_CTL_POWER_GATING (1 << 3)
+#define GUC_CTL_DISABLE_SCHEDULER (1 << 4)
+#define GUC_CTL_PREEMPTION_LOG (1 << 5)
+#define GUC_CTL_ENABLE_SLPC (1 << 7)
+#define GUC_CTL_DEBUG 8
+#define GUC_LOG_VERBOSITY_SHIFT 0
+#define GUC_LOG_VERBOSITY_LOW (0 << GUC_LOG_VERBOSITY_SHIFT)
+#define GUC_LOG_VERBOSITY_MED (1 << GUC_LOG_VERBOSITY_SHIFT)
+#define GUC_LOG_VERBOSITY_HIGH (2 << GUC_LOG_VERBOSITY_SHIFT)
+#define GUC_LOG_VERBOSITY_ULTRA (3 << GUC_LOG_VERBOSITY_SHIFT)
+/* Verbosity range-check limits, without the shift */
+#define GUC_LOG_VERBOSITY_MIN 0
+#define GUC_LOG_VERBOSITY_MAX 3
+
+#define GUC_CTL_MAX_DWORDS (GUC_CTL_DEBUG + 1)
+
+struct guc_doorbell_info {
+ u32 db_status;
+ u32 cookie;
+ u32 reserved[14];
+} __packed;
+
+union guc_doorbell_qw {
+ struct {
+ u32 db_status;
+ u32 cookie;
+ };
+ u64 value_qw;
+} __packed;
+
+#define GUC_MAX_DOORBELLS 256
+#define GUC_INVALID_DOORBELL_ID (GUC_MAX_DOORBELLS)
+
+#define GUC_DB_SIZE (PAGE_SIZE)
+#define GUC_WQ_SIZE (PAGE_SIZE * 2)
+
+/* Work item for submitting workloads into work queue of GuC. */
+struct guc_wq_item {
+ u32 header;
+ u32 context_desc;
+ u32 ring_tail;
+ u32 fence_id;
+} __packed;
+
+struct guc_process_desc {
+ u32 context_id;
+ u64 db_base_addr;
+ u32 head;
+ u32 tail;
+ u32 error_offset;
+ u64 wq_base_addr;
+ u32 wq_size_bytes;
+ u32 wq_status;
+ u32 engine_presence;
+ u32 priority;
+ u32 reserved[30];
+} __packed;
+
+/* engine id and context id is packed into guc_execlist_context.context_id*/
+#define GUC_ELC_CTXID_OFFSET 0
+#define GUC_ELC_ENGINE_OFFSET 29
+
+/* The execlist context including software and HW information */
+struct guc_execlist_context {
+ u32 context_desc;
+ u32 context_id;
+ u32 ring_status;
+ u32 ring_lcra;
+ u32 ring_begin;
+ u32 ring_end;
+ u32 ring_next_free_location;
+ u32 ring_current_tail_pointer_value;
+ u8 engine_state_submit_value;
+ u8 engine_state_wait_value;
+ u16 pagefault_count;
+ u16 engine_submit_queue_count;
+} __packed;
+
+/*Context descriptor for communicating between uKernel and Driver*/
+struct guc_context_desc {
+ u32 sched_common_area;
+ u32 context_id;
+ u32 pas_id;
+ u8 engines_used;
+ u64 db_trigger_cpu;
+ u32 db_trigger_uk;
+ u64 db_trigger_phy;
+ u16 db_id;
+
+ struct guc_execlist_context lrc[I915_NUM_RINGS];
+
+ u8 attribute;
+
+ u32 priority;
+
+ u32 wq_sampled_tail_offset;
+ u32 wq_total_submit_enqueues;
+
+ u32 process_desc;
+ u32 wq_addr;
+ u32 wq_size;
+
+ u32 engine_presence;
+
+ u32 reserved0[1];
+ u64 reserved1[1];
+
+ u64 desc_private;
+} __packed;
+
+/* This Action will be programmed in C180 - SOFT_SCRATCH_O_REG */
+enum host2guc_action {
+ HOST2GUC_ACTION_DEFAULT = 0x0,
+ HOST2GUC_ACTION_SAMPLE_FORCEWAKE = 0x6,
+ HOST2GUC_ACTION_ALLOCATE_DOORBELL = 0x10,
+ HOST2GUC_ACTION_DEALLOCATE_DOORBELL = 0x20,
+ HOST2GUC_ACTION_SLPC_REQUEST = 0x3003,
+ HOST2GUC_ACTION_LIMIT
+};
+
+/*
+ * The GuC sends its response to a command by overwriting the
+ * command in SS0. The response is distinguishable from a command
+ * by the fact that all the MASK bits are set. The remaining bits
+ * give more detail.
+ */
+#define GUC2HOST_RESPONSE_MASK ((u32)0xF0000000)
+#define GUC2HOST_IS_RESPONSE(x) ((u32)(x) >= GUC2HOST_RESPONSE_MASK)
+#define GUC2HOST_STATUS(x) (GUC2HOST_RESPONSE_MASK | (x))
+
+/* GUC will return status back to SOFT_SCRATCH_O_REG */
+enum guc2host_status {
+ GUC2HOST_STATUS_SUCCESS = GUC2HOST_STATUS(0x0),
+ GUC2HOST_STATUS_ALLOCATE_DOORBELL_FAIL = GUC2HOST_STATUS(0x10),
+ GUC2HOST_STATUS_DEALLOCATE_DOORBELL_FAIL = GUC2HOST_STATUS(0x20),
+ GUC2HOST_STATUS_GENERIC_FAIL = GUC2HOST_STATUS(0x0000F000)
+};
+
+#endif
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
index e97731aab6dc..51cbea8247fe 100644
--- a/drivers/gpu/drm/i915/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/intel_hdmi.c
@@ -174,10 +174,14 @@ static bool g4x_infoframe_enabled(struct drm_encoder *encoder)
struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
u32 val = I915_READ(VIDEO_DIP_CTL);
- if (VIDEO_DIP_PORT(intel_dig_port->port) == (val & VIDEO_DIP_PORT_MASK))
- return val & VIDEO_DIP_ENABLE;
+ if ((val & VIDEO_DIP_ENABLE) == 0)
+ return false;
- return false;
+ if ((val & VIDEO_DIP_PORT_MASK) != VIDEO_DIP_PORT(intel_dig_port->port))
+ return false;
+
+ return val & (VIDEO_DIP_ENABLE_AVI |
+ VIDEO_DIP_ENABLE_VENDOR | VIDEO_DIP_ENABLE_SPD);
}
static void ibx_write_infoframe(struct drm_encoder *encoder,
@@ -227,10 +231,15 @@ static bool ibx_infoframe_enabled(struct drm_encoder *encoder)
int reg = TVIDEO_DIP_CTL(intel_crtc->pipe);
u32 val = I915_READ(reg);
- if (VIDEO_DIP_PORT(intel_dig_port->port) == (val & VIDEO_DIP_PORT_MASK))
- return val & VIDEO_DIP_ENABLE;
+ if ((val & VIDEO_DIP_ENABLE) == 0)
+ return false;
+
+ if ((val & VIDEO_DIP_PORT_MASK) != VIDEO_DIP_PORT(intel_dig_port->port))
+ return false;
- return false;
+ return val & (VIDEO_DIP_ENABLE_AVI |
+ VIDEO_DIP_ENABLE_VENDOR | VIDEO_DIP_ENABLE_GAMUT |
+ VIDEO_DIP_ENABLE_SPD | VIDEO_DIP_ENABLE_GCP);
}
static void cpt_write_infoframe(struct drm_encoder *encoder,
@@ -282,7 +291,12 @@ static bool cpt_infoframe_enabled(struct drm_encoder *encoder)
int reg = TVIDEO_DIP_CTL(intel_crtc->pipe);
u32 val = I915_READ(reg);
- return val & VIDEO_DIP_ENABLE;
+ if ((val & VIDEO_DIP_ENABLE) == 0)
+ return false;
+
+ return val & (VIDEO_DIP_ENABLE_AVI |
+ VIDEO_DIP_ENABLE_VENDOR | VIDEO_DIP_ENABLE_GAMUT |
+ VIDEO_DIP_ENABLE_SPD | VIDEO_DIP_ENABLE_GCP);
}
static void vlv_write_infoframe(struct drm_encoder *encoder,
@@ -332,10 +346,15 @@ static bool vlv_infoframe_enabled(struct drm_encoder *encoder)
int reg = VLV_TVIDEO_DIP_CTL(intel_crtc->pipe);
u32 val = I915_READ(reg);
- if (VIDEO_DIP_PORT(intel_dig_port->port) == (val & VIDEO_DIP_PORT_MASK))
- return val & VIDEO_DIP_ENABLE;
+ if ((val & VIDEO_DIP_ENABLE) == 0)
+ return false;
- return false;
+ if ((val & VIDEO_DIP_PORT_MASK) != VIDEO_DIP_PORT(intel_dig_port->port))
+ return false;
+
+ return val & (VIDEO_DIP_ENABLE_AVI |
+ VIDEO_DIP_ENABLE_VENDOR | VIDEO_DIP_ENABLE_GAMUT |
+ VIDEO_DIP_ENABLE_SPD | VIDEO_DIP_ENABLE_GCP);
}
static void hsw_write_infoframe(struct drm_encoder *encoder,
@@ -383,8 +402,9 @@ static bool hsw_infoframe_enabled(struct drm_encoder *encoder)
u32 ctl_reg = HSW_TVIDEO_DIP_CTL(intel_crtc->config->cpu_transcoder);
u32 val = I915_READ(ctl_reg);
- return val & (VIDEO_DIP_ENABLE_AVI_HSW | VIDEO_DIP_ENABLE_SPD_HSW |
- VIDEO_DIP_ENABLE_VS_HSW);
+ return val & (VIDEO_DIP_ENABLE_VSC_HSW | VIDEO_DIP_ENABLE_AVI_HSW |
+ VIDEO_DIP_ENABLE_GCP_HSW | VIDEO_DIP_ENABLE_VS_HSW |
+ VIDEO_DIP_ENABLE_GMP_HSW | VIDEO_DIP_ENABLE_SPD_HSW);
}
/*
@@ -514,7 +534,13 @@ static void g4x_set_infoframes(struct drm_encoder *encoder,
if (!enable) {
if (!(val & VIDEO_DIP_ENABLE))
return;
- val &= ~VIDEO_DIP_ENABLE;
+ if (port != (val & VIDEO_DIP_PORT_MASK)) {
+ DRM_DEBUG_KMS("video DIP still enabled on port %c\n",
+ (val & VIDEO_DIP_PORT_MASK) >> 29);
+ return;
+ }
+ val &= ~(VIDEO_DIP_ENABLE | VIDEO_DIP_ENABLE_AVI |
+ VIDEO_DIP_ENABLE_VENDOR | VIDEO_DIP_ENABLE_SPD);
I915_WRITE(reg, val);
POSTING_READ(reg);
return;
@@ -522,16 +548,17 @@ static void g4x_set_infoframes(struct drm_encoder *encoder,
if (port != (val & VIDEO_DIP_PORT_MASK)) {
if (val & VIDEO_DIP_ENABLE) {
- val &= ~VIDEO_DIP_ENABLE;
- I915_WRITE(reg, val);
- POSTING_READ(reg);
+ DRM_DEBUG_KMS("video DIP already enabled on port %c\n",
+ (val & VIDEO_DIP_PORT_MASK) >> 29);
+ return;
}
val &= ~VIDEO_DIP_PORT_MASK;
val |= port;
}
val |= VIDEO_DIP_ENABLE;
- val &= ~VIDEO_DIP_ENABLE_VENDOR;
+ val &= ~(VIDEO_DIP_ENABLE_AVI |
+ VIDEO_DIP_ENABLE_VENDOR | VIDEO_DIP_ENABLE_SPD);
I915_WRITE(reg, val);
POSTING_READ(reg);
@@ -541,6 +568,97 @@ static void g4x_set_infoframes(struct drm_encoder *encoder,
intel_hdmi_set_hdmi_infoframe(encoder, adjusted_mode);
}
+static bool hdmi_sink_is_deep_color(struct drm_encoder *encoder)
+{
+ struct drm_device *dev = encoder->dev;
+ struct drm_connector *connector;
+
+ WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
+
+ /*
+ * HDMI cloning is only supported on g4x which doesn't
+ * support deep color or GCP infoframes anyway so no
+ * need to worry about multiple HDMI sinks here.
+ */
+ list_for_each_entry(connector, &dev->mode_config.connector_list, head)
+ if (connector->encoder == encoder)
+ return connector->display_info.bpc > 8;
+
+ return false;
+}
+
+/*
+ * Determine if default_phase=1 can be indicated in the GCP infoframe.
+ *
+ * From HDMI specification 1.4a:
+ * - The first pixel of each Video Data Period shall always have a pixel packing phase of 0
+ * - The first pixel following each Video Data Period shall have a pixel packing phase of 0
+ * - The PP bits shall be constant for all GCPs and will be equal to the last packing phase
+ * - The first pixel following every transition of HSYNC or VSYNC shall have a pixel packing
+ * phase of 0
+ */
+static bool gcp_default_phase_possible(int pipe_bpp,
+ const struct drm_display_mode *mode)
+{
+ unsigned int pixels_per_group;
+
+ switch (pipe_bpp) {
+ case 30:
+ /* 4 pixels in 5 clocks */
+ pixels_per_group = 4;
+ break;
+ case 36:
+ /* 2 pixels in 3 clocks */
+ pixels_per_group = 2;
+ break;
+ case 48:
+ /* 1 pixel in 2 clocks */
+ pixels_per_group = 1;
+ break;
+ default:
+ /* phase information not relevant for 8bpc */
+ return false;
+ }
+
+ return mode->crtc_hdisplay % pixels_per_group == 0 &&
+ mode->crtc_htotal % pixels_per_group == 0 &&
+ mode->crtc_hblank_start % pixels_per_group == 0 &&
+ mode->crtc_hblank_end % pixels_per_group == 0 &&
+ mode->crtc_hsync_start % pixels_per_group == 0 &&
+ mode->crtc_hsync_end % pixels_per_group == 0 &&
+ ((mode->flags & DRM_MODE_FLAG_INTERLACE) == 0 ||
+ mode->crtc_htotal/2 % pixels_per_group == 0);
+}
+
+static bool intel_hdmi_set_gcp_infoframe(struct drm_encoder *encoder)
+{
+ struct drm_i915_private *dev_priv = encoder->dev->dev_private;
+ struct intel_crtc *crtc = to_intel_crtc(encoder->crtc);
+ u32 reg, val = 0;
+
+ if (HAS_DDI(dev_priv))
+ reg = HSW_TVIDEO_DIP_GCP(crtc->config->cpu_transcoder);
+ else if (IS_VALLEYVIEW(dev_priv))
+ reg = VLV_TVIDEO_DIP_GCP(crtc->pipe);
+ else if (HAS_PCH_SPLIT(dev_priv->dev))
+ reg = TVIDEO_DIP_GCP(crtc->pipe);
+ else
+ return false;
+
+ /* Indicate color depth whenever the sink supports deep color */
+ if (hdmi_sink_is_deep_color(encoder))
+ val |= GCP_COLOR_INDICATION;
+
+ /* Enable default_phase whenever the display mode is suitably aligned */
+ if (gcp_default_phase_possible(crtc->config->pipe_bpp,
+ &crtc->config->base.adjusted_mode))
+ val |= GCP_DEFAULT_PHASE_ENABLE;
+
+ I915_WRITE(reg, val);
+
+ return val != 0;
+}
+
static void ibx_set_infoframes(struct drm_encoder *encoder,
bool enable,
struct drm_display_mode *adjusted_mode)
@@ -561,25 +679,29 @@ static void ibx_set_infoframes(struct drm_encoder *encoder,
if (!enable) {
if (!(val & VIDEO_DIP_ENABLE))
return;
- val &= ~VIDEO_DIP_ENABLE;
+ val &= ~(VIDEO_DIP_ENABLE | VIDEO_DIP_ENABLE_AVI |
+ VIDEO_DIP_ENABLE_VENDOR | VIDEO_DIP_ENABLE_GAMUT |
+ VIDEO_DIP_ENABLE_SPD | VIDEO_DIP_ENABLE_GCP);
I915_WRITE(reg, val);
POSTING_READ(reg);
return;
}
if (port != (val & VIDEO_DIP_PORT_MASK)) {
- if (val & VIDEO_DIP_ENABLE) {
- val &= ~VIDEO_DIP_ENABLE;
- I915_WRITE(reg, val);
- POSTING_READ(reg);
- }
+ WARN(val & VIDEO_DIP_ENABLE,
+ "DIP already enabled on port %c\n",
+ (val & VIDEO_DIP_PORT_MASK) >> 29);
val &= ~VIDEO_DIP_PORT_MASK;
val |= port;
}
val |= VIDEO_DIP_ENABLE;
- val &= ~(VIDEO_DIP_ENABLE_VENDOR | VIDEO_DIP_ENABLE_GAMUT |
- VIDEO_DIP_ENABLE_GCP);
+ val &= ~(VIDEO_DIP_ENABLE_AVI |
+ VIDEO_DIP_ENABLE_VENDOR | VIDEO_DIP_ENABLE_GAMUT |
+ VIDEO_DIP_ENABLE_SPD | VIDEO_DIP_ENABLE_GCP);
+
+ if (intel_hdmi_set_gcp_infoframe(encoder))
+ val |= VIDEO_DIP_ENABLE_GCP;
I915_WRITE(reg, val);
POSTING_READ(reg);
@@ -607,7 +729,9 @@ static void cpt_set_infoframes(struct drm_encoder *encoder,
if (!enable) {
if (!(val & VIDEO_DIP_ENABLE))
return;
- val &= ~(VIDEO_DIP_ENABLE | VIDEO_DIP_ENABLE_AVI);
+ val &= ~(VIDEO_DIP_ENABLE | VIDEO_DIP_ENABLE_AVI |
+ VIDEO_DIP_ENABLE_VENDOR | VIDEO_DIP_ENABLE_GAMUT |
+ VIDEO_DIP_ENABLE_SPD | VIDEO_DIP_ENABLE_GCP);
I915_WRITE(reg, val);
POSTING_READ(reg);
return;
@@ -616,7 +740,10 @@ static void cpt_set_infoframes(struct drm_encoder *encoder,
/* Set both together, unset both together: see the spec. */
val |= VIDEO_DIP_ENABLE | VIDEO_DIP_ENABLE_AVI;
val &= ~(VIDEO_DIP_ENABLE_VENDOR | VIDEO_DIP_ENABLE_GAMUT |
- VIDEO_DIP_ENABLE_GCP);
+ VIDEO_DIP_ENABLE_SPD | VIDEO_DIP_ENABLE_GCP);
+
+ if (intel_hdmi_set_gcp_infoframe(encoder))
+ val |= VIDEO_DIP_ENABLE_GCP;
I915_WRITE(reg, val);
POSTING_READ(reg);
@@ -646,25 +773,29 @@ static void vlv_set_infoframes(struct drm_encoder *encoder,
if (!enable) {
if (!(val & VIDEO_DIP_ENABLE))
return;
- val &= ~VIDEO_DIP_ENABLE;
+ val &= ~(VIDEO_DIP_ENABLE | VIDEO_DIP_ENABLE_AVI |
+ VIDEO_DIP_ENABLE_VENDOR | VIDEO_DIP_ENABLE_GAMUT |
+ VIDEO_DIP_ENABLE_SPD | VIDEO_DIP_ENABLE_GCP);
I915_WRITE(reg, val);
POSTING_READ(reg);
return;
}
if (port != (val & VIDEO_DIP_PORT_MASK)) {
- if (val & VIDEO_DIP_ENABLE) {
- val &= ~VIDEO_DIP_ENABLE;
- I915_WRITE(reg, val);
- POSTING_READ(reg);
- }
+ WARN(val & VIDEO_DIP_ENABLE,
+ "DIP already enabled on port %c\n",
+ (val & VIDEO_DIP_PORT_MASK) >> 29);
val &= ~VIDEO_DIP_PORT_MASK;
val |= port;
}
val |= VIDEO_DIP_ENABLE;
- val &= ~(VIDEO_DIP_ENABLE_AVI | VIDEO_DIP_ENABLE_VENDOR |
- VIDEO_DIP_ENABLE_GAMUT | VIDEO_DIP_ENABLE_GCP);
+ val &= ~(VIDEO_DIP_ENABLE_AVI |
+ VIDEO_DIP_ENABLE_VENDOR | VIDEO_DIP_ENABLE_GAMUT |
+ VIDEO_DIP_ENABLE_SPD | VIDEO_DIP_ENABLE_GCP);
+
+ if (intel_hdmi_set_gcp_infoframe(encoder))
+ val |= VIDEO_DIP_ENABLE_GCP;
I915_WRITE(reg, val);
POSTING_READ(reg);
@@ -686,14 +817,18 @@ static void hsw_set_infoframes(struct drm_encoder *encoder,
assert_hdmi_port_disabled(intel_hdmi);
+ val &= ~(VIDEO_DIP_ENABLE_VSC_HSW | VIDEO_DIP_ENABLE_AVI_HSW |
+ VIDEO_DIP_ENABLE_GCP_HSW | VIDEO_DIP_ENABLE_VS_HSW |
+ VIDEO_DIP_ENABLE_GMP_HSW | VIDEO_DIP_ENABLE_SPD_HSW);
+
if (!enable) {
- I915_WRITE(reg, 0);
+ I915_WRITE(reg, val);
POSTING_READ(reg);
return;
}
- val &= ~(VIDEO_DIP_ENABLE_VSC_HSW | VIDEO_DIP_ENABLE_GCP_HSW |
- VIDEO_DIP_ENABLE_VS_HSW | VIDEO_DIP_ENABLE_GMP_HSW);
+ if (intel_hdmi_set_gcp_infoframe(encoder))
+ val |= VIDEO_DIP_ENABLE_GCP_HSW;
I915_WRITE(reg, val);
POSTING_READ(reg);
@@ -808,58 +943,146 @@ static void intel_hdmi_get_config(struct intel_encoder *encoder,
else
dotclock = pipe_config->port_clock;
+ if (pipe_config->pixel_multiplier)
+ dotclock /= pipe_config->pixel_multiplier;
+
if (HAS_PCH_SPLIT(dev_priv->dev))
ironlake_check_encoder_dotclock(pipe_config, dotclock);
pipe_config->base.adjusted_mode.crtc_clock = dotclock;
}
-static void intel_enable_hdmi(struct intel_encoder *encoder)
+static void intel_enable_hdmi_audio(struct intel_encoder *encoder)
+{
+ struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
+
+ WARN_ON(!crtc->config->has_hdmi_sink);
+ DRM_DEBUG_DRIVER("Enabling HDMI audio on pipe %c\n",
+ pipe_name(crtc->pipe));
+ intel_audio_codec_enable(encoder);
+}
+
+static void g4x_enable_hdmi(struct intel_encoder *encoder)
{
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
u32 temp;
- u32 enable_bits = SDVO_ENABLE;
- if (intel_crtc->config->has_audio)
- enable_bits |= SDVO_AUDIO_ENABLE;
+ temp = I915_READ(intel_hdmi->hdmi_reg);
+
+ temp |= SDVO_ENABLE;
+ if (crtc->config->has_audio)
+ temp |= SDVO_AUDIO_ENABLE;
+
+ I915_WRITE(intel_hdmi->hdmi_reg, temp);
+ POSTING_READ(intel_hdmi->hdmi_reg);
+
+ if (crtc->config->has_audio)
+ intel_enable_hdmi_audio(encoder);
+}
+
+static void ibx_enable_hdmi(struct intel_encoder *encoder)
+{
+ struct drm_device *dev = encoder->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
+ struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
+ u32 temp;
temp = I915_READ(intel_hdmi->hdmi_reg);
- /* HW workaround for IBX, we need to move the port to transcoder A
- * before disabling it, so restore the transcoder select bit here. */
- if (HAS_PCH_IBX(dev))
- enable_bits |= SDVO_PIPE_SEL(intel_crtc->pipe);
+ temp |= SDVO_ENABLE;
+ if (crtc->config->has_audio)
+ temp |= SDVO_AUDIO_ENABLE;
+
+ /*
+ * HW workaround, need to write this twice for issue
+ * that may result in first write getting masked.
+ */
+ I915_WRITE(intel_hdmi->hdmi_reg, temp);
+ POSTING_READ(intel_hdmi->hdmi_reg);
+ I915_WRITE(intel_hdmi->hdmi_reg, temp);
+ POSTING_READ(intel_hdmi->hdmi_reg);
- /* HW workaround, need to toggle enable bit off and on for 12bpc, but
- * we do this anyway which shows more stable in testing.
+ /*
+ * HW workaround, need to toggle enable bit off and on
+ * for 12bpc with pixel repeat.
+ *
+ * FIXME: BSpec says this should be done at the end of
+ * of the modeset sequence, so not sure if this isn't too soon.
*/
- if (HAS_PCH_SPLIT(dev)) {
+ if (crtc->config->pipe_bpp > 24 &&
+ crtc->config->pixel_multiplier > 1) {
I915_WRITE(intel_hdmi->hdmi_reg, temp & ~SDVO_ENABLE);
POSTING_READ(intel_hdmi->hdmi_reg);
+
+ /*
+ * HW workaround, need to write this twice for issue
+ * that may result in first write getting masked.
+ */
+ I915_WRITE(intel_hdmi->hdmi_reg, temp);
+ POSTING_READ(intel_hdmi->hdmi_reg);
+ I915_WRITE(intel_hdmi->hdmi_reg, temp);
+ POSTING_READ(intel_hdmi->hdmi_reg);
}
- temp |= enable_bits;
+ if (crtc->config->has_audio)
+ intel_enable_hdmi_audio(encoder);
+}
+
+static void cpt_enable_hdmi(struct intel_encoder *encoder)
+{
+ struct drm_device *dev = encoder->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
+ struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
+ enum pipe pipe = crtc->pipe;
+ u32 temp;
+
+ temp = I915_READ(intel_hdmi->hdmi_reg);
+
+ temp |= SDVO_ENABLE;
+ if (crtc->config->has_audio)
+ temp |= SDVO_AUDIO_ENABLE;
+
+ /*
+ * WaEnableHDMI8bpcBefore12bpc:snb,ivb
+ *
+ * The procedure for 12bpc is as follows:
+ * 1. disable HDMI clock gating
+ * 2. enable HDMI with 8bpc
+ * 3. enable HDMI with 12bpc
+ * 4. enable HDMI clock gating
+ */
+
+ if (crtc->config->pipe_bpp > 24) {
+ I915_WRITE(TRANS_CHICKEN1(pipe),
+ I915_READ(TRANS_CHICKEN1(pipe)) |
+ TRANS_CHICKEN1_HDMIUNIT_GC_DISABLE);
+
+ temp &= ~SDVO_COLOR_FORMAT_MASK;
+ temp |= SDVO_COLOR_FORMAT_8bpc;
+ }
I915_WRITE(intel_hdmi->hdmi_reg, temp);
POSTING_READ(intel_hdmi->hdmi_reg);
- /* HW workaround, need to write this twice for issue that may result
- * in first write getting masked.
- */
- if (HAS_PCH_SPLIT(dev)) {
+ if (crtc->config->pipe_bpp > 24) {
+ temp &= ~SDVO_COLOR_FORMAT_MASK;
+ temp |= HDMI_COLOR_FORMAT_12bpc;
+
I915_WRITE(intel_hdmi->hdmi_reg, temp);
POSTING_READ(intel_hdmi->hdmi_reg);
- }
- if (intel_crtc->config->has_audio) {
- WARN_ON(!intel_crtc->config->has_hdmi_sink);
- DRM_DEBUG_DRIVER("Enabling HDMI audio on pipe %c\n",
- pipe_name(intel_crtc->pipe));
- intel_audio_codec_enable(encoder);
+ I915_WRITE(TRANS_CHICKEN1(pipe),
+ I915_READ(TRANS_CHICKEN1(pipe)) &
+ ~TRANS_CHICKEN1_HDMIUNIT_GC_DISABLE);
}
+
+ if (crtc->config->has_audio)
+ intel_enable_hdmi_audio(encoder);
}
static void vlv_enable_hdmi(struct intel_encoder *encoder)
@@ -901,6 +1124,8 @@ static void intel_disable_hdmi(struct intel_encoder *encoder)
I915_WRITE(intel_hdmi->hdmi_reg, temp);
POSTING_READ(intel_hdmi->hdmi_reg);
}
+
+ intel_hdmi->set_infoframes(&encoder->base, false, NULL);
}
static void g4x_disable_hdmi(struct intel_encoder *encoder)
@@ -926,7 +1151,7 @@ static void pch_post_disable_hdmi(struct intel_encoder *encoder)
intel_disable_hdmi(encoder);
}
-static int hdmi_portclock_limit(struct intel_hdmi *hdmi, bool respect_dvi_limit)
+static int hdmi_port_clock_limit(struct intel_hdmi *hdmi, bool respect_dvi_limit)
{
struct drm_device *dev = intel_hdmi_to_dev(hdmi);
@@ -939,24 +1164,51 @@ static int hdmi_portclock_limit(struct intel_hdmi *hdmi, bool respect_dvi_limit)
}
static enum drm_mode_status
+hdmi_port_clock_valid(struct intel_hdmi *hdmi,
+ int clock, bool respect_dvi_limit)
+{
+ struct drm_device *dev = intel_hdmi_to_dev(hdmi);
+
+ if (clock < 25000)
+ return MODE_CLOCK_LOW;
+ if (clock > hdmi_port_clock_limit(hdmi, respect_dvi_limit))
+ return MODE_CLOCK_HIGH;
+
+ /* BXT DPLL can't generate 223-240 MHz */
+ if (IS_BROXTON(dev) && clock > 223333 && clock < 240000)
+ return MODE_CLOCK_RANGE;
+
+ /* CHV DPLL can't generate 216-240 MHz */
+ if (IS_CHERRYVIEW(dev) && clock > 216000 && clock < 240000)
+ return MODE_CLOCK_RANGE;
+
+ return MODE_OK;
+}
+
+static enum drm_mode_status
intel_hdmi_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
{
- int clock = mode->clock;
+ struct intel_hdmi *hdmi = intel_attached_hdmi(connector);
+ struct drm_device *dev = intel_hdmi_to_dev(hdmi);
+ enum drm_mode_status status;
+ int clock;
+ if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
+ return MODE_NO_DBLESCAN;
+
+ clock = mode->clock;
if (mode->flags & DRM_MODE_FLAG_DBLCLK)
clock *= 2;
- if (clock > hdmi_portclock_limit(intel_attached_hdmi(connector),
- true))
- return MODE_CLOCK_HIGH;
- if (clock < 20000)
- return MODE_CLOCK_LOW;
+ /* check if we can do 8bpc */
+ status = hdmi_port_clock_valid(hdmi, clock, true);
- if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
- return MODE_NO_DBLESCAN;
+ /* if we can't do 8bpc we may still be able to do 12bpc */
+ if (!HAS_GMCH_DISPLAY(dev) && status != MODE_OK)
+ status = hdmi_port_clock_valid(hdmi, clock * 3 / 2, true);
- return MODE_OK;
+ return status;
}
static bool hdmi_12bpc_possible(struct intel_crtc_state *crtc_state)
@@ -997,8 +1249,8 @@ bool intel_hdmi_compute_config(struct intel_encoder *encoder,
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
struct drm_device *dev = encoder->base.dev;
struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
- int clock_12bpc = pipe_config->base.adjusted_mode.crtc_clock * 3 / 2;
- int portclock_limit = hdmi_portclock_limit(intel_hdmi, false);
+ int clock_8bpc = pipe_config->base.adjusted_mode.crtc_clock;
+ int clock_12bpc = clock_8bpc * 3 / 2;
int desired_bpp;
pipe_config->has_hdmi_sink = intel_hdmi->has_hdmi_sink;
@@ -1017,6 +1269,8 @@ bool intel_hdmi_compute_config(struct intel_encoder *encoder,
if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK) {
pipe_config->pixel_multiplier = 2;
+ clock_8bpc *= 2;
+ clock_12bpc *= 2;
}
if (intel_hdmi->color_range)
@@ -1035,9 +1289,8 @@ bool intel_hdmi_compute_config(struct intel_encoder *encoder,
* within limits.
*/
if (pipe_config->pipe_bpp > 8*3 && pipe_config->has_hdmi_sink &&
- clock_12bpc <= portclock_limit &&
- hdmi_12bpc_possible(pipe_config) &&
- 0 /* FIXME 12bpc support totally broken */) {
+ hdmi_port_clock_valid(intel_hdmi, clock_12bpc, false) == MODE_OK &&
+ hdmi_12bpc_possible(pipe_config)) {
DRM_DEBUG_KMS("picking bpc to 12 for HDMI output\n");
desired_bpp = 12*3;
@@ -1046,6 +1299,8 @@ bool intel_hdmi_compute_config(struct intel_encoder *encoder,
} else {
DRM_DEBUG_KMS("picking bpc to 8 for HDMI output\n");
desired_bpp = 8*3;
+
+ pipe_config->port_clock = clock_8bpc;
}
if (!pipe_config->bw_constrained) {
@@ -1053,8 +1308,9 @@ bool intel_hdmi_compute_config(struct intel_encoder *encoder,
pipe_config->pipe_bpp = desired_bpp;
}
- if (adjusted_mode->crtc_clock > portclock_limit) {
- DRM_DEBUG_KMS("too high HDMI clock, rejecting mode\n");
+ if (hdmi_port_clock_valid(intel_hdmi, pipe_config->port_clock,
+ false) != MODE_OK) {
+ DRM_DEBUG_KMS("unsupported HDMI clock, rejecting mode\n");
return false;
}
@@ -1323,7 +1579,7 @@ static void vlv_hdmi_pre_enable(struct intel_encoder *encoder)
intel_crtc->config->has_hdmi_sink,
adjusted_mode);
- intel_enable_hdmi(encoder);
+ g4x_enable_hdmi(encoder);
vlv_wait_port_ready(dev_priv, dport, 0x0);
}
@@ -1640,7 +1896,7 @@ static void chv_hdmi_pre_enable(struct intel_encoder *encoder)
intel_crtc->config->has_hdmi_sink,
adjusted_mode);
- intel_enable_hdmi(encoder);
+ g4x_enable_hdmi(encoder);
vlv_wait_port_ready(dev_priv, dport, 0x0);
}
@@ -1653,7 +1909,7 @@ static void intel_hdmi_destroy(struct drm_connector *connector)
}
static const struct drm_connector_funcs intel_hdmi_connector_funcs = {
- .dpms = intel_connector_dpms,
+ .dpms = drm_atomic_helper_connector_dpms,
.detect = intel_hdmi_detect,
.force = intel_hdmi_force,
.fill_modes = drm_helper_probe_single_connector_modes,
@@ -1827,7 +2083,12 @@ void intel_hdmi_init(struct drm_device *dev, int hdmi_reg, enum port port)
intel_encoder->post_disable = vlv_hdmi_post_disable;
} else {
intel_encoder->pre_enable = intel_hdmi_pre_enable;
- intel_encoder->enable = intel_enable_hdmi;
+ if (HAS_PCH_CPT(dev))
+ intel_encoder->enable = cpt_enable_hdmi;
+ else if (HAS_PCH_IBX(dev))
+ intel_encoder->enable = ibx_enable_hdmi;
+ else
+ intel_encoder->enable = g4x_enable_hdmi;
}
intel_encoder->type = INTEL_OUTPUT_HDMI;
diff --git a/drivers/gpu/drm/i915/intel_hotplug.c b/drivers/gpu/drm/i915/intel_hotplug.c
new file mode 100644
index 000000000000..032a0bf75f3b
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_hotplug.c
@@ -0,0 +1,505 @@
+/*
+ * Copyright © 2015 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include <linux/kernel.h>
+
+#include <drm/drmP.h>
+#include <drm/i915_drm.h>
+
+#include "i915_drv.h"
+#include "intel_drv.h"
+
+/**
+ * DOC: Hotplug
+ *
+ * Simply put, hotplug occurs when a display is connected to or disconnected
+ * from the system. However, there may be adapters and docking stations and
+ * Display Port short pulses and MST devices involved, complicating matters.
+ *
+ * Hotplug in i915 is handled in many different levels of abstraction.
+ *
+ * The platform dependent interrupt handling code in i915_irq.c enables,
+ * disables, and does preliminary handling of the interrupts. The interrupt
+ * handlers gather the hotplug detect (HPD) information from relevant registers
+ * into a platform independent mask of hotplug pins that have fired.
+ *
+ * The platform independent interrupt handler intel_hpd_irq_handler() in
+ * intel_hotplug.c does hotplug irq storm detection and mitigation, and passes
+ * further processing to appropriate bottom halves (Display Port specific and
+ * regular hotplug).
+ *
+ * The Display Port work function i915_digport_work_func() calls into
+ * intel_dp_hpd_pulse() via hooks, which handles DP short pulses and DP MST long
+ * pulses, with failures and non-MST long pulses triggering regular hotplug
+ * processing on the connector.
+ *
+ * The regular hotplug work function i915_hotplug_work_func() calls connector
+ * detect hooks, and, if connector status changes, triggers sending of hotplug
+ * uevent to userspace via drm_kms_helper_hotplug_event().
+ *
+ * Finally, the userspace is responsible for triggering a modeset upon receiving
+ * the hotplug uevent, disabling or enabling the crtc as needed.
+ *
+ * The hotplug interrupt storm detection and mitigation code keeps track of the
+ * number of interrupts per hotplug pin per a period of time, and if the number
+ * of interrupts exceeds a certain threshold, the interrupt is disabled for a
+ * while before being re-enabled. The intention is to mitigate issues raising
+ * from broken hardware triggering massive amounts of interrupts and grinding
+ * the system to a halt.
+ *
+ * Current implementation expects that hotplug interrupt storm will not be
+ * seen when display port sink is connected, hence on platforms whose DP
+ * callback is handled by i915_digport_work_func reenabling of hpd is not
+ * performed (it was never expected to be disabled in the first place ;) )
+ * this is specific to DP sinks handled by this routine and any other display
+ * such as HDMI or DVI enabled on the same port will have proper logic since
+ * it will use i915_hotplug_work_func where this logic is handled.
+ */
+
+bool intel_hpd_pin_to_port(enum hpd_pin pin, enum port *port)
+{
+ switch (pin) {
+ case HPD_PORT_A:
+ *port = PORT_A;
+ return true;
+ case HPD_PORT_B:
+ *port = PORT_B;
+ return true;
+ case HPD_PORT_C:
+ *port = PORT_C;
+ return true;
+ case HPD_PORT_D:
+ *port = PORT_D;
+ return true;
+ default:
+ return false; /* no hpd */
+ }
+}
+
+#define HPD_STORM_DETECT_PERIOD 1000
+#define HPD_STORM_THRESHOLD 5
+#define HPD_STORM_REENABLE_DELAY (2 * 60 * 1000)
+
+/**
+ * intel_hpd_irq_storm_detect - gather stats and detect HPD irq storm on a pin
+ * @dev_priv: private driver data pointer
+ * @pin: the pin to gather stats on
+ *
+ * Gather stats about HPD irqs from the specified @pin, and detect irq
+ * storms. Only the pin specific stats and state are changed, the caller is
+ * responsible for further action.
+ *
+ * @HPD_STORM_THRESHOLD irqs are allowed within @HPD_STORM_DETECT_PERIOD ms,
+ * otherwise it's considered an irq storm, and the irq state is set to
+ * @HPD_MARK_DISABLED.
+ *
+ * Return true if an irq storm was detected on @pin.
+ */
+static bool intel_hpd_irq_storm_detect(struct drm_i915_private *dev_priv,
+ enum hpd_pin pin)
+{
+ unsigned long start = dev_priv->hotplug.stats[pin].last_jiffies;
+ unsigned long end = start + msecs_to_jiffies(HPD_STORM_DETECT_PERIOD);
+ bool storm = false;
+
+ if (!time_in_range(jiffies, start, end)) {
+ dev_priv->hotplug.stats[pin].last_jiffies = jiffies;
+ dev_priv->hotplug.stats[pin].count = 0;
+ DRM_DEBUG_KMS("Received HPD interrupt on PIN %d - cnt: 0\n", pin);
+ } else if (dev_priv->hotplug.stats[pin].count > HPD_STORM_THRESHOLD) {
+ dev_priv->hotplug.stats[pin].state = HPD_MARK_DISABLED;
+ DRM_DEBUG_KMS("HPD interrupt storm detected on PIN %d\n", pin);
+ storm = true;
+ } else {
+ dev_priv->hotplug.stats[pin].count++;
+ DRM_DEBUG_KMS("Received HPD interrupt on PIN %d - cnt: %d\n", pin,
+ dev_priv->hotplug.stats[pin].count);
+ }
+
+ return storm;
+}
+
+static void intel_hpd_irq_storm_disable(struct drm_i915_private *dev_priv)
+{
+ struct drm_device *dev = dev_priv->dev;
+ struct drm_mode_config *mode_config = &dev->mode_config;
+ struct intel_connector *intel_connector;
+ struct intel_encoder *intel_encoder;
+ struct drm_connector *connector;
+ enum hpd_pin pin;
+ bool hpd_disabled = false;
+
+ assert_spin_locked(&dev_priv->irq_lock);
+
+ list_for_each_entry(connector, &mode_config->connector_list, head) {
+ if (connector->polled != DRM_CONNECTOR_POLL_HPD)
+ continue;
+
+ intel_connector = to_intel_connector(connector);
+ intel_encoder = intel_connector->encoder;
+ if (!intel_encoder)
+ continue;
+
+ pin = intel_encoder->hpd_pin;
+ if (pin == HPD_NONE ||
+ dev_priv->hotplug.stats[pin].state != HPD_MARK_DISABLED)
+ continue;
+
+ DRM_INFO("HPD interrupt storm detected on connector %s: "
+ "switching from hotplug detection to polling\n",
+ connector->name);
+
+ dev_priv->hotplug.stats[pin].state = HPD_DISABLED;
+ connector->polled = DRM_CONNECTOR_POLL_CONNECT
+ | DRM_CONNECTOR_POLL_DISCONNECT;
+ hpd_disabled = true;
+ }
+
+ /* Enable polling and queue hotplug re-enabling. */
+ if (hpd_disabled) {
+ drm_kms_helper_poll_enable(dev);
+ mod_delayed_work(system_wq, &dev_priv->hotplug.reenable_work,
+ msecs_to_jiffies(HPD_STORM_REENABLE_DELAY));
+ }
+}
+
+static void intel_hpd_irq_storm_reenable_work(struct work_struct *work)
+{
+ struct drm_i915_private *dev_priv =
+ container_of(work, typeof(*dev_priv),
+ hotplug.reenable_work.work);
+ struct drm_device *dev = dev_priv->dev;
+ struct drm_mode_config *mode_config = &dev->mode_config;
+ int i;
+
+ intel_runtime_pm_get(dev_priv);
+
+ spin_lock_irq(&dev_priv->irq_lock);
+ for_each_hpd_pin(i) {
+ struct drm_connector *connector;
+
+ if (dev_priv->hotplug.stats[i].state != HPD_DISABLED)
+ continue;
+
+ dev_priv->hotplug.stats[i].state = HPD_ENABLED;
+
+ list_for_each_entry(connector, &mode_config->connector_list, head) {
+ struct intel_connector *intel_connector = to_intel_connector(connector);
+
+ if (intel_connector->encoder->hpd_pin == i) {
+ if (connector->polled != intel_connector->polled)
+ DRM_DEBUG_DRIVER("Reenabling HPD on connector %s\n",
+ connector->name);
+ connector->polled = intel_connector->polled;
+ if (!connector->polled)
+ connector->polled = DRM_CONNECTOR_POLL_HPD;
+ }
+ }
+ }
+ if (dev_priv->display.hpd_irq_setup)
+ dev_priv->display.hpd_irq_setup(dev);
+ spin_unlock_irq(&dev_priv->irq_lock);
+
+ intel_runtime_pm_put(dev_priv);
+}
+
+static bool intel_hpd_irq_event(struct drm_device *dev,
+ struct drm_connector *connector)
+{
+ enum drm_connector_status old_status;
+
+ WARN_ON(!mutex_is_locked(&dev->mode_config.mutex));
+ old_status = connector->status;
+
+ connector->status = connector->funcs->detect(connector, false);
+ if (old_status == connector->status)
+ return false;
+
+ DRM_DEBUG_KMS("[CONNECTOR:%d:%s] status updated from %s to %s\n",
+ connector->base.id,
+ connector->name,
+ drm_get_connector_status_name(old_status),
+ drm_get_connector_status_name(connector->status));
+
+ return true;
+}
+
+static void i915_digport_work_func(struct work_struct *work)
+{
+ struct drm_i915_private *dev_priv =
+ container_of(work, struct drm_i915_private, hotplug.dig_port_work);
+ u32 long_port_mask, short_port_mask;
+ struct intel_digital_port *intel_dig_port;
+ int i;
+ u32 old_bits = 0;
+
+ spin_lock_irq(&dev_priv->irq_lock);
+ long_port_mask = dev_priv->hotplug.long_port_mask;
+ dev_priv->hotplug.long_port_mask = 0;
+ short_port_mask = dev_priv->hotplug.short_port_mask;
+ dev_priv->hotplug.short_port_mask = 0;
+ spin_unlock_irq(&dev_priv->irq_lock);
+
+ for (i = 0; i < I915_MAX_PORTS; i++) {
+ bool valid = false;
+ bool long_hpd = false;
+ intel_dig_port = dev_priv->hotplug.irq_port[i];
+ if (!intel_dig_port || !intel_dig_port->hpd_pulse)
+ continue;
+
+ if (long_port_mask & (1 << i)) {
+ valid = true;
+ long_hpd = true;
+ } else if (short_port_mask & (1 << i))
+ valid = true;
+
+ if (valid) {
+ enum irqreturn ret;
+
+ ret = intel_dig_port->hpd_pulse(intel_dig_port, long_hpd);
+ if (ret == IRQ_NONE) {
+ /* fall back to old school hpd */
+ old_bits |= (1 << intel_dig_port->base.hpd_pin);
+ }
+ }
+ }
+
+ if (old_bits) {
+ spin_lock_irq(&dev_priv->irq_lock);
+ dev_priv->hotplug.event_bits |= old_bits;
+ spin_unlock_irq(&dev_priv->irq_lock);
+ schedule_work(&dev_priv->hotplug.hotplug_work);
+ }
+}
+
+/*
+ * Handle hotplug events outside the interrupt handler proper.
+ */
+static void i915_hotplug_work_func(struct work_struct *work)
+{
+ struct drm_i915_private *dev_priv =
+ container_of(work, struct drm_i915_private, hotplug.hotplug_work);
+ struct drm_device *dev = dev_priv->dev;
+ struct drm_mode_config *mode_config = &dev->mode_config;
+ struct intel_connector *intel_connector;
+ struct intel_encoder *intel_encoder;
+ struct drm_connector *connector;
+ bool changed = false;
+ u32 hpd_event_bits;
+
+ mutex_lock(&mode_config->mutex);
+ DRM_DEBUG_KMS("running encoder hotplug functions\n");
+
+ spin_lock_irq(&dev_priv->irq_lock);
+
+ hpd_event_bits = dev_priv->hotplug.event_bits;
+ dev_priv->hotplug.event_bits = 0;
+
+ /* Disable hotplug on connectors that hit an irq storm. */
+ intel_hpd_irq_storm_disable(dev_priv);
+
+ spin_unlock_irq(&dev_priv->irq_lock);
+
+ list_for_each_entry(connector, &mode_config->connector_list, head) {
+ intel_connector = to_intel_connector(connector);
+ if (!intel_connector->encoder)
+ continue;
+ intel_encoder = intel_connector->encoder;
+ if (hpd_event_bits & (1 << intel_encoder->hpd_pin)) {
+ DRM_DEBUG_KMS("Connector %s (pin %i) received hotplug event.\n",
+ connector->name, intel_encoder->hpd_pin);
+ if (intel_encoder->hot_plug)
+ intel_encoder->hot_plug(intel_encoder);
+ if (intel_hpd_irq_event(dev, connector))
+ changed = true;
+ }
+ }
+ mutex_unlock(&mode_config->mutex);
+
+ if (changed)
+ drm_kms_helper_hotplug_event(dev);
+}
+
+
+/**
+ * intel_hpd_irq_handler - main hotplug irq handler
+ * @dev: drm device
+ * @pin_mask: a mask of hpd pins that have triggered the irq
+ * @long_mask: a mask of hpd pins that may be long hpd pulses
+ *
+ * This is the main hotplug irq handler for all platforms. The platform specific
+ * irq handlers call the platform specific hotplug irq handlers, which read and
+ * decode the appropriate registers into bitmasks about hpd pins that have
+ * triggered (@pin_mask), and which of those pins may be long pulses
+ * (@long_mask). The @long_mask is ignored if the port corresponding to the pin
+ * is not a digital port.
+ *
+ * Here, we do hotplug irq storm detection and mitigation, and pass further
+ * processing to appropriate bottom halves.
+ */
+void intel_hpd_irq_handler(struct drm_device *dev,
+ u32 pin_mask, u32 long_mask)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int i;
+ enum port port;
+ bool storm_detected = false;
+ bool queue_dig = false, queue_hp = false;
+ bool is_dig_port;
+
+ if (!pin_mask)
+ return;
+
+ spin_lock(&dev_priv->irq_lock);
+ for_each_hpd_pin(i) {
+ if (!(BIT(i) & pin_mask))
+ continue;
+
+ is_dig_port = intel_hpd_pin_to_port(i, &port) &&
+ dev_priv->hotplug.irq_port[port];
+
+ if (is_dig_port) {
+ bool long_hpd = long_mask & BIT(i);
+
+ DRM_DEBUG_DRIVER("digital hpd port %c - %s\n", port_name(port),
+ long_hpd ? "long" : "short");
+ /*
+ * For long HPD pulses we want to have the digital queue happen,
+ * but we still want HPD storm detection to function.
+ */
+ queue_dig = true;
+ if (long_hpd) {
+ dev_priv->hotplug.long_port_mask |= (1 << port);
+ } else {
+ /* for short HPD just trigger the digital queue */
+ dev_priv->hotplug.short_port_mask |= (1 << port);
+ continue;
+ }
+ }
+
+ if (dev_priv->hotplug.stats[i].state == HPD_DISABLED) {
+ /*
+ * On GMCH platforms the interrupt mask bits only
+ * prevent irq generation, not the setting of the
+ * hotplug bits itself. So only WARN about unexpected
+ * interrupts on saner platforms.
+ */
+ WARN_ONCE(INTEL_INFO(dev)->gen >= 5 && !IS_VALLEYVIEW(dev),
+ "Received HPD interrupt on pin %d although disabled\n", i);
+ continue;
+ }
+
+ if (dev_priv->hotplug.stats[i].state != HPD_ENABLED)
+ continue;
+
+ if (!is_dig_port) {
+ dev_priv->hotplug.event_bits |= BIT(i);
+ queue_hp = true;
+ }
+
+ if (intel_hpd_irq_storm_detect(dev_priv, i)) {
+ dev_priv->hotplug.event_bits &= ~BIT(i);
+ storm_detected = true;
+ }
+ }
+
+ if (storm_detected)
+ dev_priv->display.hpd_irq_setup(dev);
+ spin_unlock(&dev_priv->irq_lock);
+
+ /*
+ * Our hotplug handler can grab modeset locks (by calling down into the
+ * fb helpers). Hence it must not be run on our own dev-priv->wq work
+ * queue for otherwise the flush_work in the pageflip code will
+ * deadlock.
+ */
+ if (queue_dig)
+ queue_work(dev_priv->hotplug.dp_wq, &dev_priv->hotplug.dig_port_work);
+ if (queue_hp)
+ schedule_work(&dev_priv->hotplug.hotplug_work);
+}
+
+/**
+ * intel_hpd_init - initializes and enables hpd support
+ * @dev_priv: i915 device instance
+ *
+ * This function enables the hotplug support. It requires that interrupts have
+ * already been enabled with intel_irq_init_hw(). From this point on hotplug and
+ * poll request can run concurrently to other code, so locking rules must be
+ * obeyed.
+ *
+ * This is a separate step from interrupt enabling to simplify the locking rules
+ * in the driver load and resume code.
+ */
+void intel_hpd_init(struct drm_i915_private *dev_priv)
+{
+ struct drm_device *dev = dev_priv->dev;
+ struct drm_mode_config *mode_config = &dev->mode_config;
+ struct drm_connector *connector;
+ int i;
+
+ for_each_hpd_pin(i) {
+ dev_priv->hotplug.stats[i].count = 0;
+ dev_priv->hotplug.stats[i].state = HPD_ENABLED;
+ }
+ list_for_each_entry(connector, &mode_config->connector_list, head) {
+ struct intel_connector *intel_connector = to_intel_connector(connector);
+ connector->polled = intel_connector->polled;
+ if (connector->encoder && !connector->polled && I915_HAS_HOTPLUG(dev) && intel_connector->encoder->hpd_pin > HPD_NONE)
+ connector->polled = DRM_CONNECTOR_POLL_HPD;
+ if (intel_connector->mst_port)
+ connector->polled = DRM_CONNECTOR_POLL_HPD;
+ }
+
+ /*
+ * Interrupt setup is already guaranteed to be single-threaded, this is
+ * just to make the assert_spin_locked checks happy.
+ */
+ spin_lock_irq(&dev_priv->irq_lock);
+ if (dev_priv->display.hpd_irq_setup)
+ dev_priv->display.hpd_irq_setup(dev);
+ spin_unlock_irq(&dev_priv->irq_lock);
+}
+
+void intel_hpd_init_work(struct drm_i915_private *dev_priv)
+{
+ INIT_WORK(&dev_priv->hotplug.hotplug_work, i915_hotplug_work_func);
+ INIT_WORK(&dev_priv->hotplug.dig_port_work, i915_digport_work_func);
+ INIT_DELAYED_WORK(&dev_priv->hotplug.reenable_work,
+ intel_hpd_irq_storm_reenable_work);
+}
+
+void intel_hpd_cancel_work(struct drm_i915_private *dev_priv)
+{
+ spin_lock_irq(&dev_priv->irq_lock);
+
+ dev_priv->hotplug.long_port_mask = 0;
+ dev_priv->hotplug.short_port_mask = 0;
+ dev_priv->hotplug.event_bits = 0;
+
+ spin_unlock_irq(&dev_priv->irq_lock);
+
+ cancel_work_sync(&dev_priv->hotplug.dig_port_work);
+ cancel_work_sync(&dev_priv->hotplug.hotplug_work);
+ cancel_delayed_work_sync(&dev_priv->hotplug.reenable_work);
+}
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index 9b74ffae5f5a..5bc0ce1347ce 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -135,6 +135,7 @@
#include <drm/drmP.h>
#include <drm/i915_drm.h>
#include "i915_drv.h"
+#include "intel_mocs.h"
#define GEN9_LR_CONTEXT_RENDER_SIZE (22 * PAGE_SIZE)
#define GEN8_LR_CONTEXT_RENDER_SIZE (20 * PAGE_SIZE)
@@ -190,9 +191,7 @@
#define GEN8_CTX_PRIVILEGE (1<<8)
#define ASSIGN_CTX_PDP(ppgtt, reg_state, n) { \
- const u64 _addr = test_bit(n, ppgtt->pdp.used_pdpes) ? \
- ppgtt->pdp.page_directory[n]->daddr : \
- ppgtt->scratch_pd->daddr; \
+ const u64 _addr = i915_page_dir_dma_addr((ppgtt), (n)); \
reg_state[CTX_PDP ## n ## _UDW+1] = upper_32_bits(_addr); \
reg_state[CTX_PDP ## n ## _LDW+1] = lower_32_bits(_addr); \
}
@@ -211,9 +210,9 @@ enum {
FAULT_AND_CONTINUE /* Unsupported */
};
#define GEN8_CTX_ID_SHIFT 32
+#define CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT 0x17
-static int intel_lr_context_pin(struct intel_engine_cs *ring,
- struct intel_context *ctx);
+static int intel_lr_context_pin(struct drm_i915_gem_request *rq);
/**
* intel_sanitize_enable_execlists() - sanitize i915.enable_execlists
@@ -263,10 +262,11 @@ u32 intel_execlists_ctx_id(struct drm_i915_gem_object *ctx_obj)
return lrca >> 12;
}
-static uint64_t execlists_ctx_descriptor(struct intel_engine_cs *ring,
- struct drm_i915_gem_object *ctx_obj)
+static uint64_t execlists_ctx_descriptor(struct drm_i915_gem_request *rq)
{
+ struct intel_engine_cs *ring = rq->ring;
struct drm_device *dev = ring->dev;
+ struct drm_i915_gem_object *ctx_obj = rq->ctx->engine[ring->id].state;
uint64_t desc;
uint64_t lrca = i915_gem_obj_ggtt_offset(ctx_obj);
@@ -294,55 +294,59 @@ static uint64_t execlists_ctx_descriptor(struct intel_engine_cs *ring,
return desc;
}
-static void execlists_elsp_write(struct intel_engine_cs *ring,
- struct drm_i915_gem_object *ctx_obj0,
- struct drm_i915_gem_object *ctx_obj1)
+static void execlists_elsp_write(struct drm_i915_gem_request *rq0,
+ struct drm_i915_gem_request *rq1)
{
+
+ struct intel_engine_cs *ring = rq0->ring;
struct drm_device *dev = ring->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- uint64_t temp = 0;
- uint32_t desc[4];
+ uint64_t desc[2];
- /* XXX: You must always write both descriptors in the order below. */
- if (ctx_obj1)
- temp = execlists_ctx_descriptor(ring, ctx_obj1);
- else
- temp = 0;
- desc[1] = (u32)(temp >> 32);
- desc[0] = (u32)temp;
+ if (rq1) {
+ desc[1] = execlists_ctx_descriptor(rq1);
+ rq1->elsp_submitted++;
+ } else {
+ desc[1] = 0;
+ }
- temp = execlists_ctx_descriptor(ring, ctx_obj0);
- desc[3] = (u32)(temp >> 32);
- desc[2] = (u32)temp;
+ desc[0] = execlists_ctx_descriptor(rq0);
+ rq0->elsp_submitted++;
+ /* You must always write both descriptors in the order below. */
spin_lock(&dev_priv->uncore.lock);
intel_uncore_forcewake_get__locked(dev_priv, FORCEWAKE_ALL);
- I915_WRITE_FW(RING_ELSP(ring), desc[1]);
- I915_WRITE_FW(RING_ELSP(ring), desc[0]);
- I915_WRITE_FW(RING_ELSP(ring), desc[3]);
+ I915_WRITE_FW(RING_ELSP(ring), upper_32_bits(desc[1]));
+ I915_WRITE_FW(RING_ELSP(ring), lower_32_bits(desc[1]));
+ I915_WRITE_FW(RING_ELSP(ring), upper_32_bits(desc[0]));
/* The context is automatically loaded after the following */
- I915_WRITE_FW(RING_ELSP(ring), desc[2]);
+ I915_WRITE_FW(RING_ELSP(ring), lower_32_bits(desc[0]));
- /* ELSP is a wo register, so use another nearby reg for posting instead */
+ /* ELSP is a wo register, use another nearby reg for posting */
POSTING_READ_FW(RING_EXECLIST_STATUS(ring));
intel_uncore_forcewake_put__locked(dev_priv, FORCEWAKE_ALL);
spin_unlock(&dev_priv->uncore.lock);
}
-static int execlists_update_context(struct drm_i915_gem_object *ctx_obj,
- struct drm_i915_gem_object *ring_obj,
- struct i915_hw_ppgtt *ppgtt,
- u32 tail)
+static int execlists_update_context(struct drm_i915_gem_request *rq)
{
+ struct intel_engine_cs *ring = rq->ring;
+ struct i915_hw_ppgtt *ppgtt = rq->ctx->ppgtt;
+ struct drm_i915_gem_object *ctx_obj = rq->ctx->engine[ring->id].state;
+ struct drm_i915_gem_object *rb_obj = rq->ringbuf->obj;
struct page *page;
uint32_t *reg_state;
+ BUG_ON(!ctx_obj);
+ WARN_ON(!i915_gem_obj_is_pinned(ctx_obj));
+ WARN_ON(!i915_gem_obj_is_pinned(rb_obj));
+
page = i915_gem_object_get_page(ctx_obj, 1);
reg_state = kmap_atomic(page);
- reg_state[CTX_RING_TAIL+1] = tail;
- reg_state[CTX_RING_BUFFER_START+1] = i915_gem_obj_ggtt_offset(ring_obj);
+ reg_state[CTX_RING_TAIL+1] = rq->tail;
+ reg_state[CTX_RING_BUFFER_START+1] = i915_gem_obj_ggtt_offset(rb_obj);
/* True PPGTT with dynamic page allocation: update PDP registers and
* point the unallocated PDPs to the scratch page
@@ -359,32 +363,15 @@ static int execlists_update_context(struct drm_i915_gem_object *ctx_obj,
return 0;
}
-static void execlists_submit_contexts(struct intel_engine_cs *ring,
- struct intel_context *to0, u32 tail0,
- struct intel_context *to1, u32 tail1)
+static void execlists_submit_requests(struct drm_i915_gem_request *rq0,
+ struct drm_i915_gem_request *rq1)
{
- struct drm_i915_gem_object *ctx_obj0 = to0->engine[ring->id].state;
- struct intel_ringbuffer *ringbuf0 = to0->engine[ring->id].ringbuf;
- struct drm_i915_gem_object *ctx_obj1 = NULL;
- struct intel_ringbuffer *ringbuf1 = NULL;
-
- BUG_ON(!ctx_obj0);
- WARN_ON(!i915_gem_obj_is_pinned(ctx_obj0));
- WARN_ON(!i915_gem_obj_is_pinned(ringbuf0->obj));
-
- execlists_update_context(ctx_obj0, ringbuf0->obj, to0->ppgtt, tail0);
+ execlists_update_context(rq0);
- if (to1) {
- ringbuf1 = to1->engine[ring->id].ringbuf;
- ctx_obj1 = to1->engine[ring->id].state;
- BUG_ON(!ctx_obj1);
- WARN_ON(!i915_gem_obj_is_pinned(ctx_obj1));
- WARN_ON(!i915_gem_obj_is_pinned(ringbuf1->obj));
+ if (rq1)
+ execlists_update_context(rq1);
- execlists_update_context(ctx_obj1, ringbuf1->obj, to1->ppgtt, tail1);
- }
-
- execlists_elsp_write(ring, ctx_obj0, ctx_obj1);
+ execlists_elsp_write(rq0, rq1);
}
static void execlists_context_unqueue(struct intel_engine_cs *ring)
@@ -444,13 +431,7 @@ static void execlists_context_unqueue(struct intel_engine_cs *ring)
WARN_ON(req1 && req1->elsp_submitted);
- execlists_submit_contexts(ring, req0->ctx, req0->tail,
- req1 ? req1->ctx : NULL,
- req1 ? req1->tail : 0);
-
- req0->elsp_submitted++;
- if (req1)
- req1->elsp_submitted++;
+ execlists_submit_requests(req0, req1);
}
static bool execlists_check_remove_request(struct intel_engine_cs *ring,
@@ -516,6 +497,9 @@ void intel_lrc_irq_handler(struct intel_engine_cs *ring)
status_id = I915_READ(RING_CONTEXT_STATUS_BUF(ring) +
(read_pointer % 6) * 8 + 4);
+ if (status & GEN8_CTX_STATUS_IDLE_ACTIVE)
+ continue;
+
if (status & GEN8_CTX_STATUS_PREEMPTED) {
if (status & GEN8_CTX_STATUS_LITE_RESTORE) {
if (execlists_check_remove_request(ring, status_id))
@@ -540,37 +524,21 @@ void intel_lrc_irq_handler(struct intel_engine_cs *ring)
ring->next_context_status_buffer = write_pointer % 6;
I915_WRITE(RING_CONTEXT_STATUS_PTR(ring),
- ((u32)ring->next_context_status_buffer & 0x07) << 8);
+ _MASKED_FIELD(0x07 << 8, ((u32)ring->next_context_status_buffer & 0x07) << 8));
}
-static int execlists_context_queue(struct intel_engine_cs *ring,
- struct intel_context *to,
- u32 tail,
- struct drm_i915_gem_request *request)
+static int execlists_context_queue(struct drm_i915_gem_request *request)
{
+ struct intel_engine_cs *ring = request->ring;
struct drm_i915_gem_request *cursor;
int num_elements = 0;
- if (to != ring->default_context)
- intel_lr_context_pin(ring, to);
+ if (request->ctx != ring->default_context)
+ intel_lr_context_pin(request);
- if (!request) {
- /*
- * If there isn't a request associated with this submission,
- * create one as a temporary holder.
- */
- request = kzalloc(sizeof(*request), GFP_KERNEL);
- if (request == NULL)
- return -ENOMEM;
- request->ring = ring;
- request->ctx = to;
- kref_init(&request->ref);
- i915_gem_context_reference(request->ctx);
- } else {
- i915_gem_request_reference(request);
- WARN_ON(to != request->ctx);
- }
- request->tail = tail;
+ i915_gem_request_reference(request);
+
+ request->tail = request->ringbuf->tail;
spin_lock_irq(&ring->execlist_lock);
@@ -585,7 +553,7 @@ static int execlists_context_queue(struct intel_engine_cs *ring,
struct drm_i915_gem_request,
execlist_link);
- if (to == tail_req->ctx) {
+ if (request->ctx == tail_req->ctx) {
WARN(tail_req->elsp_submitted != 0,
"More than 2 already-submitted reqs queued\n");
list_del(&tail_req->execlist_link);
@@ -603,10 +571,9 @@ static int execlists_context_queue(struct intel_engine_cs *ring,
return 0;
}
-static int logical_ring_invalidate_all_caches(struct intel_ringbuffer *ringbuf,
- struct intel_context *ctx)
+static int logical_ring_invalidate_all_caches(struct drm_i915_gem_request *req)
{
- struct intel_engine_cs *ring = ringbuf->ring;
+ struct intel_engine_cs *ring = req->ring;
uint32_t flush_domains;
int ret;
@@ -614,8 +581,7 @@ static int logical_ring_invalidate_all_caches(struct intel_ringbuffer *ringbuf,
if (ring->gpu_caches_dirty)
flush_domains = I915_GEM_GPU_DOMAINS;
- ret = ring->emit_flush(ringbuf, ctx,
- I915_GEM_GPU_DOMAINS, flush_domains);
+ ret = ring->emit_flush(req, I915_GEM_GPU_DOMAINS, flush_domains);
if (ret)
return ret;
@@ -623,12 +589,10 @@ static int logical_ring_invalidate_all_caches(struct intel_ringbuffer *ringbuf,
return 0;
}
-static int execlists_move_to_gpu(struct intel_ringbuffer *ringbuf,
- struct intel_context *ctx,
+static int execlists_move_to_gpu(struct drm_i915_gem_request *req,
struct list_head *vmas)
{
- struct intel_engine_cs *ring = ringbuf->ring;
- const unsigned other_rings = ~intel_ring_flag(ring);
+ const unsigned other_rings = ~intel_ring_flag(req->ring);
struct i915_vma *vma;
uint32_t flush_domains = 0;
bool flush_chipset = false;
@@ -638,7 +602,7 @@ static int execlists_move_to_gpu(struct intel_ringbuffer *ringbuf,
struct drm_i915_gem_object *obj = vma->obj;
if (obj->active & other_rings) {
- ret = i915_gem_object_sync(obj, ring);
+ ret = i915_gem_object_sync(obj, req->ring, &req);
if (ret)
return ret;
}
@@ -655,59 +619,59 @@ static int execlists_move_to_gpu(struct intel_ringbuffer *ringbuf,
/* Unconditionally invalidate gpu caches and ensure that we do flush
* any residual writes from the previous batch.
*/
- return logical_ring_invalidate_all_caches(ringbuf, ctx);
+ return logical_ring_invalidate_all_caches(req);
}
-int intel_logical_ring_alloc_request_extras(struct drm_i915_gem_request *request,
- struct intel_context *ctx)
+int intel_logical_ring_alloc_request_extras(struct drm_i915_gem_request *request)
{
int ret;
- if (ctx != request->ring->default_context) {
- ret = intel_lr_context_pin(request->ring, ctx);
+ request->ringbuf = request->ctx->engine[request->ring->id].ringbuf;
+
+ if (request->ctx != request->ring->default_context) {
+ ret = intel_lr_context_pin(request);
if (ret)
return ret;
}
- request->ringbuf = ctx->engine[request->ring->id].ringbuf;
- request->ctx = ctx;
- i915_gem_context_reference(request->ctx);
-
return 0;
}
-static int logical_ring_wait_for_space(struct intel_ringbuffer *ringbuf,
- struct intel_context *ctx,
+static int logical_ring_wait_for_space(struct drm_i915_gem_request *req,
int bytes)
{
- struct intel_engine_cs *ring = ringbuf->ring;
- struct drm_i915_gem_request *request;
+ struct intel_ringbuffer *ringbuf = req->ringbuf;
+ struct intel_engine_cs *ring = req->ring;
+ struct drm_i915_gem_request *target;
unsigned space;
int ret;
if (intel_ring_space(ringbuf) >= bytes)
return 0;
- list_for_each_entry(request, &ring->request_list, list) {
+ /* The whole point of reserving space is to not wait! */
+ WARN_ON(ringbuf->reserved_in_use);
+
+ list_for_each_entry(target, &ring->request_list, list) {
/*
* The request queue is per-engine, so can contain requests
* from multiple ringbuffers. Here, we must ignore any that
* aren't from the ringbuffer we're considering.
*/
- if (request->ringbuf != ringbuf)
+ if (target->ringbuf != ringbuf)
continue;
/* Would completion of this request free enough space? */
- space = __intel_ring_space(request->postfix, ringbuf->tail,
+ space = __intel_ring_space(target->postfix, ringbuf->tail,
ringbuf->size);
if (space >= bytes)
break;
}
- if (WARN_ON(&request->list == &ring->request_list))
+ if (WARN_ON(&target->list == &ring->request_list))
return -ENOSPC;
- ret = i915_wait_request(request);
+ ret = i915_wait_request(target);
if (ret)
return ret;
@@ -717,7 +681,7 @@ static int logical_ring_wait_for_space(struct intel_ringbuffer *ringbuf,
/*
* intel_logical_ring_advance_and_submit() - advance the tail and submit the workload
- * @ringbuf: Logical Ringbuffer to advance.
+ * @request: Request to advance the logical ringbuffer of.
*
* The tail is updated in our logical ringbuffer struct, not in the actual context. What
* really happens during submission is that the context and current tail will be placed
@@ -725,33 +689,23 @@ static int logical_ring_wait_for_space(struct intel_ringbuffer *ringbuf,
* point, the tail *inside* the context is updated and the ELSP written to.
*/
static void
-intel_logical_ring_advance_and_submit(struct intel_ringbuffer *ringbuf,
- struct intel_context *ctx,
- struct drm_i915_gem_request *request)
+intel_logical_ring_advance_and_submit(struct drm_i915_gem_request *request)
{
- struct intel_engine_cs *ring = ringbuf->ring;
+ struct intel_engine_cs *ring = request->ring;
- intel_logical_ring_advance(ringbuf);
+ intel_logical_ring_advance(request->ringbuf);
if (intel_ring_stopped(ring))
return;
- execlists_context_queue(ring, ctx, ringbuf->tail, request);
+ execlists_context_queue(request);
}
-static int logical_ring_wrap_buffer(struct intel_ringbuffer *ringbuf,
- struct intel_context *ctx)
+static void __wrap_ring_buffer(struct intel_ringbuffer *ringbuf)
{
uint32_t __iomem *virt;
int rem = ringbuf->size - ringbuf->tail;
- if (ringbuf->space < rem) {
- int ret = logical_ring_wait_for_space(ringbuf, ctx, rem);
-
- if (ret)
- return ret;
- }
-
virt = ringbuf->virtual_start + ringbuf->tail;
rem /= 4;
while (rem--)
@@ -759,25 +713,50 @@ static int logical_ring_wrap_buffer(struct intel_ringbuffer *ringbuf,
ringbuf->tail = 0;
intel_ring_update_space(ringbuf);
-
- return 0;
}
-static int logical_ring_prepare(struct intel_ringbuffer *ringbuf,
- struct intel_context *ctx, int bytes)
+static int logical_ring_prepare(struct drm_i915_gem_request *req, int bytes)
{
- int ret;
+ struct intel_ringbuffer *ringbuf = req->ringbuf;
+ int remain_usable = ringbuf->effective_size - ringbuf->tail;
+ int remain_actual = ringbuf->size - ringbuf->tail;
+ int ret, total_bytes, wait_bytes = 0;
+ bool need_wrap = false;
+
+ if (ringbuf->reserved_in_use)
+ total_bytes = bytes;
+ else
+ total_bytes = bytes + ringbuf->reserved_size;
- if (unlikely(ringbuf->tail + bytes > ringbuf->effective_size)) {
- ret = logical_ring_wrap_buffer(ringbuf, ctx);
- if (unlikely(ret))
- return ret;
+ if (unlikely(bytes > remain_usable)) {
+ /*
+ * Not enough space for the basic request. So need to flush
+ * out the remainder and then wait for base + reserved.
+ */
+ wait_bytes = remain_actual + total_bytes;
+ need_wrap = true;
+ } else {
+ if (unlikely(total_bytes > remain_usable)) {
+ /*
+ * The base request will fit but the reserved space
+ * falls off the end. So only need to to wait for the
+ * reserved size after flushing out the remainder.
+ */
+ wait_bytes = remain_actual + ringbuf->reserved_size;
+ need_wrap = true;
+ } else if (total_bytes > ringbuf->space) {
+ /* No wrapping required, just waiting. */
+ wait_bytes = total_bytes;
+ }
}
- if (unlikely(ringbuf->space < bytes)) {
- ret = logical_ring_wait_for_space(ringbuf, ctx, bytes);
+ if (wait_bytes) {
+ ret = logical_ring_wait_for_space(req, wait_bytes);
if (unlikely(ret))
return ret;
+
+ if (need_wrap)
+ __wrap_ring_buffer(ringbuf);
}
return 0;
@@ -786,7 +765,8 @@ static int logical_ring_prepare(struct intel_ringbuffer *ringbuf,
/**
* intel_logical_ring_begin() - prepare the logical ringbuffer to accept some commands
*
- * @ringbuf: Logical ringbuffer.
+ * @request: The request to start some new work for
+ * @ctx: Logical ring context whose ringbuffer is being prepared.
* @num_dwords: number of DWORDs that we plan to write to the ringbuffer.
*
* The ringbuffer might not be ready to accept the commands right away (maybe it needs to
@@ -796,32 +776,42 @@ static int logical_ring_prepare(struct intel_ringbuffer *ringbuf,
*
* Return: non-zero if the ringbuffer is not ready to be written to.
*/
-static int intel_logical_ring_begin(struct intel_ringbuffer *ringbuf,
- struct intel_context *ctx, int num_dwords)
+int intel_logical_ring_begin(struct drm_i915_gem_request *req, int num_dwords)
{
- struct intel_engine_cs *ring = ringbuf->ring;
- struct drm_device *dev = ring->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv;
int ret;
+ WARN_ON(req == NULL);
+ dev_priv = req->ring->dev->dev_private;
+
ret = i915_gem_check_wedge(&dev_priv->gpu_error,
dev_priv->mm.interruptible);
if (ret)
return ret;
- ret = logical_ring_prepare(ringbuf, ctx, num_dwords * sizeof(uint32_t));
- if (ret)
- return ret;
-
- /* Preallocate the olr before touching the ring */
- ret = i915_gem_request_alloc(ring, ctx);
+ ret = logical_ring_prepare(req, num_dwords * sizeof(uint32_t));
if (ret)
return ret;
- ringbuf->space -= num_dwords * sizeof(uint32_t);
+ req->ringbuf->space -= num_dwords * sizeof(uint32_t);
return 0;
}
+int intel_logical_ring_reserve_space(struct drm_i915_gem_request *request)
+{
+ /*
+ * The first call merely notes the reserve request and is common for
+ * all back ends. The subsequent localised _begin() call actually
+ * ensures that the reservation is available. Without the begin, if
+ * the request creator immediately submitted the request without
+ * adding any commands to it then there might not actually be
+ * sufficient room for the submission commands.
+ */
+ intel_ring_reserved_space_reserve(request->ringbuf, MIN_SPACE_FOR_ADD_REQUEST);
+
+ return intel_logical_ring_begin(request, 0);
+}
+
/**
* execlists_submission() - submit a batchbuffer for execution, Execlists style
* @dev: DRM device.
@@ -839,16 +829,15 @@ static int intel_logical_ring_begin(struct intel_ringbuffer *ringbuf,
*
* Return: non-zero if the submission fails.
*/
-int intel_execlists_submission(struct drm_device *dev, struct drm_file *file,
- struct intel_engine_cs *ring,
- struct intel_context *ctx,
+int intel_execlists_submission(struct i915_execbuffer_params *params,
struct drm_i915_gem_execbuffer2 *args,
- struct list_head *vmas,
- struct drm_i915_gem_object *batch_obj,
- u64 exec_start, u32 dispatch_flags)
+ struct list_head *vmas)
{
+ struct drm_device *dev = params->dev;
+ struct intel_engine_cs *ring = params->ring;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_ringbuffer *ringbuf = ctx->engine[ring->id].ringbuf;
+ struct intel_ringbuffer *ringbuf = params->ctx->engine[ring->id].ringbuf;
+ u64 exec_start;
int instp_mode;
u32 instp_mask;
int ret;
@@ -899,13 +888,13 @@ int intel_execlists_submission(struct drm_device *dev, struct drm_file *file,
return -EINVAL;
}
- ret = execlists_move_to_gpu(ringbuf, ctx, vmas);
+ ret = execlists_move_to_gpu(params->request, vmas);
if (ret)
return ret;
if (ring == &dev_priv->ring[RCS] &&
instp_mode != dev_priv->relative_constants_mode) {
- ret = intel_logical_ring_begin(ringbuf, ctx, 4);
+ ret = intel_logical_ring_begin(params->request, 4);
if (ret)
return ret;
@@ -918,14 +907,17 @@ int intel_execlists_submission(struct drm_device *dev, struct drm_file *file,
dev_priv->relative_constants_mode = instp_mode;
}
- ret = ring->emit_bb_start(ringbuf, ctx, exec_start, dispatch_flags);
+ exec_start = params->batch_obj_vm_offset +
+ args->batch_start_offset;
+
+ ret = ring->emit_bb_start(params->request, exec_start, params->dispatch_flags);
if (ret)
return ret;
- trace_i915_gem_ring_dispatch(intel_ring_get_request(ring), dispatch_flags);
+ trace_i915_gem_ring_dispatch(params->request, params->dispatch_flags);
- i915_gem_execbuffer_move_to_active(vmas, ring);
- i915_gem_execbuffer_retire_commands(dev, file, ring, batch_obj);
+ i915_gem_execbuffer_move_to_active(vmas, params->request);
+ i915_gem_execbuffer_retire_commands(params);
return 0;
}
@@ -950,7 +942,7 @@ void intel_execlists_retire_requests(struct intel_engine_cs *ring)
ctx->engine[ring->id].state;
if (ctx_obj && (ctx != ring->default_context))
- intel_lr_context_unpin(ring, ctx);
+ intel_lr_context_unpin(req);
list_del(&req->execlist_link);
i915_gem_request_unreference(req);
}
@@ -978,16 +970,15 @@ void intel_logical_ring_stop(struct intel_engine_cs *ring)
I915_WRITE_MODE(ring, _MASKED_BIT_DISABLE(STOP_RING));
}
-int logical_ring_flush_all_caches(struct intel_ringbuffer *ringbuf,
- struct intel_context *ctx)
+int logical_ring_flush_all_caches(struct drm_i915_gem_request *req)
{
- struct intel_engine_cs *ring = ringbuf->ring;
+ struct intel_engine_cs *ring = req->ring;
int ret;
if (!ring->gpu_caches_dirty)
return 0;
- ret = ring->emit_flush(ringbuf, ctx, 0, I915_GEM_GPU_DOMAINS);
+ ret = ring->emit_flush(req, 0, I915_GEM_GPU_DOMAINS);
if (ret)
return ret;
@@ -995,15 +986,15 @@ int logical_ring_flush_all_caches(struct intel_ringbuffer *ringbuf,
return 0;
}
-static int intel_lr_context_pin(struct intel_engine_cs *ring,
- struct intel_context *ctx)
+static int intel_lr_context_pin(struct drm_i915_gem_request *rq)
{
- struct drm_i915_gem_object *ctx_obj = ctx->engine[ring->id].state;
- struct intel_ringbuffer *ringbuf = ctx->engine[ring->id].ringbuf;
+ struct intel_engine_cs *ring = rq->ring;
+ struct drm_i915_gem_object *ctx_obj = rq->ctx->engine[ring->id].state;
+ struct intel_ringbuffer *ringbuf = rq->ringbuf;
int ret = 0;
WARN_ON(!mutex_is_locked(&ring->dev->struct_mutex));
- if (ctx->engine[ring->id].pin_count++ == 0) {
+ if (rq->ctx->engine[ring->id].pin_count++ == 0) {
ret = i915_gem_obj_ggtt_pin(ctx_obj,
GEN8_LR_CONTEXT_ALIGN, 0);
if (ret)
@@ -1019,31 +1010,31 @@ static int intel_lr_context_pin(struct intel_engine_cs *ring,
unpin_ctx_obj:
i915_gem_object_ggtt_unpin(ctx_obj);
reset_pin_count:
- ctx->engine[ring->id].pin_count = 0;
+ rq->ctx->engine[ring->id].pin_count = 0;
return ret;
}
-void intel_lr_context_unpin(struct intel_engine_cs *ring,
- struct intel_context *ctx)
+void intel_lr_context_unpin(struct drm_i915_gem_request *rq)
{
- struct drm_i915_gem_object *ctx_obj = ctx->engine[ring->id].state;
- struct intel_ringbuffer *ringbuf = ctx->engine[ring->id].ringbuf;
+ struct intel_engine_cs *ring = rq->ring;
+ struct drm_i915_gem_object *ctx_obj = rq->ctx->engine[ring->id].state;
+ struct intel_ringbuffer *ringbuf = rq->ringbuf;
if (ctx_obj) {
WARN_ON(!mutex_is_locked(&ring->dev->struct_mutex));
- if (--ctx->engine[ring->id].pin_count == 0) {
+ if (--rq->ctx->engine[ring->id].pin_count == 0) {
intel_unpin_ringbuffer_obj(ringbuf);
i915_gem_object_ggtt_unpin(ctx_obj);
}
}
}
-static int intel_logical_ring_workarounds_emit(struct intel_engine_cs *ring,
- struct intel_context *ctx)
+static int intel_logical_ring_workarounds_emit(struct drm_i915_gem_request *req)
{
int ret, i;
- struct intel_ringbuffer *ringbuf = ctx->engine[ring->id].ringbuf;
+ struct intel_engine_cs *ring = req->ring;
+ struct intel_ringbuffer *ringbuf = req->ringbuf;
struct drm_device *dev = ring->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct i915_workarounds *w = &dev_priv->workarounds;
@@ -1052,11 +1043,11 @@ static int intel_logical_ring_workarounds_emit(struct intel_engine_cs *ring,
return 0;
ring->gpu_caches_dirty = true;
- ret = logical_ring_flush_all_caches(ringbuf, ctx);
+ ret = logical_ring_flush_all_caches(req);
if (ret)
return ret;
- ret = intel_logical_ring_begin(ringbuf, ctx, w->count * 2 + 2);
+ ret = intel_logical_ring_begin(req, w->count * 2 + 2);
if (ret)
return ret;
@@ -1070,13 +1061,361 @@ static int intel_logical_ring_workarounds_emit(struct intel_engine_cs *ring,
intel_logical_ring_advance(ringbuf);
ring->gpu_caches_dirty = true;
- ret = logical_ring_flush_all_caches(ringbuf, ctx);
+ ret = logical_ring_flush_all_caches(req);
if (ret)
return ret;
return 0;
}
+#define wa_ctx_emit(batch, index, cmd) \
+ do { \
+ int __index = (index)++; \
+ if (WARN_ON(__index >= (PAGE_SIZE / sizeof(uint32_t)))) { \
+ return -ENOSPC; \
+ } \
+ batch[__index] = (cmd); \
+ } while (0)
+
+
+/*
+ * In this WA we need to set GEN8_L3SQCREG4[21:21] and reset it after
+ * PIPE_CONTROL instruction. This is required for the flush to happen correctly
+ * but there is a slight complication as this is applied in WA batch where the
+ * values are only initialized once so we cannot take register value at the
+ * beginning and reuse it further; hence we save its value to memory, upload a
+ * constant value with bit21 set and then we restore it back with the saved value.
+ * To simplify the WA, a constant value is formed by using the default value
+ * of this register. This shouldn't be a problem because we are only modifying
+ * it for a short period and this batch in non-premptible. We can ofcourse
+ * use additional instructions that read the actual value of the register
+ * at that time and set our bit of interest but it makes the WA complicated.
+ *
+ * This WA is also required for Gen9 so extracting as a function avoids
+ * code duplication.
+ */
+static inline int gen8_emit_flush_coherentl3_wa(struct intel_engine_cs *ring,
+ uint32_t *const batch,
+ uint32_t index)
+{
+ uint32_t l3sqc4_flush = (0x40400000 | GEN8_LQSC_FLUSH_COHERENT_LINES);
+
+ /*
+ * WaDisableLSQCROPERFforOCL:skl
+ * This WA is implemented in skl_init_clock_gating() but since
+ * this batch updates GEN8_L3SQCREG4 with default value we need to
+ * set this bit here to retain the WA during flush.
+ */
+ if (IS_SKYLAKE(ring->dev) && INTEL_REVID(ring->dev) <= SKL_REVID_E0)
+ l3sqc4_flush |= GEN8_LQSC_RO_PERF_DIS;
+
+ wa_ctx_emit(batch, index, (MI_STORE_REGISTER_MEM_GEN8(1) |
+ MI_SRM_LRM_GLOBAL_GTT));
+ wa_ctx_emit(batch, index, GEN8_L3SQCREG4);
+ wa_ctx_emit(batch, index, ring->scratch.gtt_offset + 256);
+ wa_ctx_emit(batch, index, 0);
+
+ wa_ctx_emit(batch, index, MI_LOAD_REGISTER_IMM(1));
+ wa_ctx_emit(batch, index, GEN8_L3SQCREG4);
+ wa_ctx_emit(batch, index, l3sqc4_flush);
+
+ wa_ctx_emit(batch, index, GFX_OP_PIPE_CONTROL(6));
+ wa_ctx_emit(batch, index, (PIPE_CONTROL_CS_STALL |
+ PIPE_CONTROL_DC_FLUSH_ENABLE));
+ wa_ctx_emit(batch, index, 0);
+ wa_ctx_emit(batch, index, 0);
+ wa_ctx_emit(batch, index, 0);
+ wa_ctx_emit(batch, index, 0);
+
+ wa_ctx_emit(batch, index, (MI_LOAD_REGISTER_MEM_GEN8(1) |
+ MI_SRM_LRM_GLOBAL_GTT));
+ wa_ctx_emit(batch, index, GEN8_L3SQCREG4);
+ wa_ctx_emit(batch, index, ring->scratch.gtt_offset + 256);
+ wa_ctx_emit(batch, index, 0);
+
+ return index;
+}
+
+static inline uint32_t wa_ctx_start(struct i915_wa_ctx_bb *wa_ctx,
+ uint32_t offset,
+ uint32_t start_alignment)
+{
+ return wa_ctx->offset = ALIGN(offset, start_alignment);
+}
+
+static inline int wa_ctx_end(struct i915_wa_ctx_bb *wa_ctx,
+ uint32_t offset,
+ uint32_t size_alignment)
+{
+ wa_ctx->size = offset - wa_ctx->offset;
+
+ WARN(wa_ctx->size % size_alignment,
+ "wa_ctx_bb failed sanity checks: size %d is not aligned to %d\n",
+ wa_ctx->size, size_alignment);
+ return 0;
+}
+
+/**
+ * gen8_init_indirectctx_bb() - initialize indirect ctx batch with WA
+ *
+ * @ring: only applicable for RCS
+ * @wa_ctx: structure representing wa_ctx
+ * offset: specifies start of the batch, should be cache-aligned. This is updated
+ * with the offset value received as input.
+ * size: size of the batch in DWORDS but HW expects in terms of cachelines
+ * @batch: page in which WA are loaded
+ * @offset: This field specifies the start of the batch, it should be
+ * cache-aligned otherwise it is adjusted accordingly.
+ * Typically we only have one indirect_ctx and per_ctx batch buffer which are
+ * initialized at the beginning and shared across all contexts but this field
+ * helps us to have multiple batches at different offsets and select them based
+ * on a criteria. At the moment this batch always start at the beginning of the page
+ * and at this point we don't have multiple wa_ctx batch buffers.
+ *
+ * The number of WA applied are not known at the beginning; we use this field
+ * to return the no of DWORDS written.
+ *
+ * It is to be noted that this batch does not contain MI_BATCH_BUFFER_END
+ * so it adds NOOPs as padding to make it cacheline aligned.
+ * MI_BATCH_BUFFER_END will be added to perctx batch and both of them together
+ * makes a complete batch buffer.
+ *
+ * Return: non-zero if we exceed the PAGE_SIZE limit.
+ */
+
+static int gen8_init_indirectctx_bb(struct intel_engine_cs *ring,
+ struct i915_wa_ctx_bb *wa_ctx,
+ uint32_t *const batch,
+ uint32_t *offset)
+{
+ uint32_t scratch_addr;
+ uint32_t index = wa_ctx_start(wa_ctx, *offset, CACHELINE_DWORDS);
+
+ /* WaDisableCtxRestoreArbitration:bdw,chv */
+ wa_ctx_emit(batch, index, MI_ARB_ON_OFF | MI_ARB_DISABLE);
+
+ /* WaFlushCoherentL3CacheLinesAtContextSwitch:bdw */
+ if (IS_BROADWELL(ring->dev)) {
+ index = gen8_emit_flush_coherentl3_wa(ring, batch, index);
+ if (index < 0)
+ return index;
+ }
+
+ /* WaClearSlmSpaceAtContextSwitch:bdw,chv */
+ /* Actual scratch location is at 128 bytes offset */
+ scratch_addr = ring->scratch.gtt_offset + 2*CACHELINE_BYTES;
+
+ wa_ctx_emit(batch, index, GFX_OP_PIPE_CONTROL(6));
+ wa_ctx_emit(batch, index, (PIPE_CONTROL_FLUSH_L3 |
+ PIPE_CONTROL_GLOBAL_GTT_IVB |
+ PIPE_CONTROL_CS_STALL |
+ PIPE_CONTROL_QW_WRITE));
+ wa_ctx_emit(batch, index, scratch_addr);
+ wa_ctx_emit(batch, index, 0);
+ wa_ctx_emit(batch, index, 0);
+ wa_ctx_emit(batch, index, 0);
+
+ /* Pad to end of cacheline */
+ while (index % CACHELINE_DWORDS)
+ wa_ctx_emit(batch, index, MI_NOOP);
+
+ /*
+ * MI_BATCH_BUFFER_END is not required in Indirect ctx BB because
+ * execution depends on the length specified in terms of cache lines
+ * in the register CTX_RCS_INDIRECT_CTX
+ */
+
+ return wa_ctx_end(wa_ctx, *offset = index, CACHELINE_DWORDS);
+}
+
+/**
+ * gen8_init_perctx_bb() - initialize per ctx batch with WA
+ *
+ * @ring: only applicable for RCS
+ * @wa_ctx: structure representing wa_ctx
+ * offset: specifies start of the batch, should be cache-aligned.
+ * size: size of the batch in DWORDS but HW expects in terms of cachelines
+ * @batch: page in which WA are loaded
+ * @offset: This field specifies the start of this batch.
+ * This batch is started immediately after indirect_ctx batch. Since we ensure
+ * that indirect_ctx ends on a cacheline this batch is aligned automatically.
+ *
+ * The number of DWORDS written are returned using this field.
+ *
+ * This batch is terminated with MI_BATCH_BUFFER_END and so we need not add padding
+ * to align it with cacheline as padding after MI_BATCH_BUFFER_END is redundant.
+ */
+static int gen8_init_perctx_bb(struct intel_engine_cs *ring,
+ struct i915_wa_ctx_bb *wa_ctx,
+ uint32_t *const batch,
+ uint32_t *offset)
+{
+ uint32_t index = wa_ctx_start(wa_ctx, *offset, CACHELINE_DWORDS);
+
+ /* WaDisableCtxRestoreArbitration:bdw,chv */
+ wa_ctx_emit(batch, index, MI_ARB_ON_OFF | MI_ARB_ENABLE);
+
+ wa_ctx_emit(batch, index, MI_BATCH_BUFFER_END);
+
+ return wa_ctx_end(wa_ctx, *offset = index, 1);
+}
+
+static int gen9_init_indirectctx_bb(struct intel_engine_cs *ring,
+ struct i915_wa_ctx_bb *wa_ctx,
+ uint32_t *const batch,
+ uint32_t *offset)
+{
+ int ret;
+ struct drm_device *dev = ring->dev;
+ uint32_t index = wa_ctx_start(wa_ctx, *offset, CACHELINE_DWORDS);
+
+ /* WaDisableCtxRestoreArbitration:skl,bxt */
+ if ((IS_SKYLAKE(dev) && (INTEL_REVID(dev) <= SKL_REVID_D0)) ||
+ (IS_BROXTON(dev) && (INTEL_REVID(dev) == BXT_REVID_A0)))
+ wa_ctx_emit(batch, index, MI_ARB_ON_OFF | MI_ARB_DISABLE);
+
+ /* WaFlushCoherentL3CacheLinesAtContextSwitch:skl,bxt */
+ ret = gen8_emit_flush_coherentl3_wa(ring, batch, index);
+ if (ret < 0)
+ return ret;
+ index = ret;
+
+ /* Pad to end of cacheline */
+ while (index % CACHELINE_DWORDS)
+ wa_ctx_emit(batch, index, MI_NOOP);
+
+ return wa_ctx_end(wa_ctx, *offset = index, CACHELINE_DWORDS);
+}
+
+static int gen9_init_perctx_bb(struct intel_engine_cs *ring,
+ struct i915_wa_ctx_bb *wa_ctx,
+ uint32_t *const batch,
+ uint32_t *offset)
+{
+ struct drm_device *dev = ring->dev;
+ uint32_t index = wa_ctx_start(wa_ctx, *offset, CACHELINE_DWORDS);
+
+ /* WaSetDisablePixMaskCammingAndRhwoInCommonSliceChicken:skl,bxt */
+ if ((IS_SKYLAKE(dev) && (INTEL_REVID(dev) <= SKL_REVID_B0)) ||
+ (IS_BROXTON(dev) && (INTEL_REVID(dev) == BXT_REVID_A0))) {
+ wa_ctx_emit(batch, index, MI_LOAD_REGISTER_IMM(1));
+ wa_ctx_emit(batch, index, GEN9_SLICE_COMMON_ECO_CHICKEN0);
+ wa_ctx_emit(batch, index,
+ _MASKED_BIT_ENABLE(DISABLE_PIXEL_MASK_CAMMING));
+ wa_ctx_emit(batch, index, MI_NOOP);
+ }
+
+ /* WaDisableCtxRestoreArbitration:skl,bxt */
+ if ((IS_SKYLAKE(dev) && (INTEL_REVID(dev) <= SKL_REVID_D0)) ||
+ (IS_BROXTON(dev) && (INTEL_REVID(dev) == BXT_REVID_A0)))
+ wa_ctx_emit(batch, index, MI_ARB_ON_OFF | MI_ARB_ENABLE);
+
+ wa_ctx_emit(batch, index, MI_BATCH_BUFFER_END);
+
+ return wa_ctx_end(wa_ctx, *offset = index, 1);
+}
+
+static int lrc_setup_wa_ctx_obj(struct intel_engine_cs *ring, u32 size)
+{
+ int ret;
+
+ ring->wa_ctx.obj = i915_gem_alloc_object(ring->dev, PAGE_ALIGN(size));
+ if (!ring->wa_ctx.obj) {
+ DRM_DEBUG_DRIVER("alloc LRC WA ctx backing obj failed.\n");
+ return -ENOMEM;
+ }
+
+ ret = i915_gem_obj_ggtt_pin(ring->wa_ctx.obj, PAGE_SIZE, 0);
+ if (ret) {
+ DRM_DEBUG_DRIVER("pin LRC WA ctx backing obj failed: %d\n",
+ ret);
+ drm_gem_object_unreference(&ring->wa_ctx.obj->base);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void lrc_destroy_wa_ctx_obj(struct intel_engine_cs *ring)
+{
+ if (ring->wa_ctx.obj) {
+ i915_gem_object_ggtt_unpin(ring->wa_ctx.obj);
+ drm_gem_object_unreference(&ring->wa_ctx.obj->base);
+ ring->wa_ctx.obj = NULL;
+ }
+}
+
+static int intel_init_workaround_bb(struct intel_engine_cs *ring)
+{
+ int ret;
+ uint32_t *batch;
+ uint32_t offset;
+ struct page *page;
+ struct i915_ctx_workarounds *wa_ctx = &ring->wa_ctx;
+
+ WARN_ON(ring->id != RCS);
+
+ /* update this when WA for higher Gen are added */
+ if (INTEL_INFO(ring->dev)->gen > 9) {
+ DRM_ERROR("WA batch buffer is not initialized for Gen%d\n",
+ INTEL_INFO(ring->dev)->gen);
+ return 0;
+ }
+
+ /* some WA perform writes to scratch page, ensure it is valid */
+ if (ring->scratch.obj == NULL) {
+ DRM_ERROR("scratch page not allocated for %s\n", ring->name);
+ return -EINVAL;
+ }
+
+ ret = lrc_setup_wa_ctx_obj(ring, PAGE_SIZE);
+ if (ret) {
+ DRM_DEBUG_DRIVER("Failed to setup context WA page: %d\n", ret);
+ return ret;
+ }
+
+ page = i915_gem_object_get_page(wa_ctx->obj, 0);
+ batch = kmap_atomic(page);
+ offset = 0;
+
+ if (INTEL_INFO(ring->dev)->gen == 8) {
+ ret = gen8_init_indirectctx_bb(ring,
+ &wa_ctx->indirect_ctx,
+ batch,
+ &offset);
+ if (ret)
+ goto out;
+
+ ret = gen8_init_perctx_bb(ring,
+ &wa_ctx->per_ctx,
+ batch,
+ &offset);
+ if (ret)
+ goto out;
+ } else if (INTEL_INFO(ring->dev)->gen == 9) {
+ ret = gen9_init_indirectctx_bb(ring,
+ &wa_ctx->indirect_ctx,
+ batch,
+ &offset);
+ if (ret)
+ goto out;
+
+ ret = gen9_init_perctx_bb(ring,
+ &wa_ctx->per_ctx,
+ batch,
+ &offset);
+ if (ret)
+ goto out;
+ }
+
+out:
+ kunmap_atomic(batch);
+ if (ret)
+ lrc_destroy_wa_ctx_obj(ring);
+
+ return ret;
+}
+
static int gen8_init_common_ring(struct intel_engine_cs *ring)
{
struct drm_device *dev = ring->dev;
@@ -1137,19 +1476,64 @@ static int gen9_init_render_ring(struct intel_engine_cs *ring)
return init_workarounds_ring(ring);
}
-static int gen8_emit_bb_start(struct intel_ringbuffer *ringbuf,
- struct intel_context *ctx,
+static int intel_logical_ring_emit_pdps(struct drm_i915_gem_request *req)
+{
+ struct i915_hw_ppgtt *ppgtt = req->ctx->ppgtt;
+ struct intel_engine_cs *ring = req->ring;
+ struct intel_ringbuffer *ringbuf = req->ringbuf;
+ const int num_lri_cmds = GEN8_LEGACY_PDPES * 2;
+ int i, ret;
+
+ ret = intel_logical_ring_begin(req, num_lri_cmds * 2 + 2);
+ if (ret)
+ return ret;
+
+ intel_logical_ring_emit(ringbuf, MI_LOAD_REGISTER_IMM(num_lri_cmds));
+ for (i = GEN8_LEGACY_PDPES - 1; i >= 0; i--) {
+ const dma_addr_t pd_daddr = i915_page_dir_dma_addr(ppgtt, i);
+
+ intel_logical_ring_emit(ringbuf, GEN8_RING_PDP_UDW(ring, i));
+ intel_logical_ring_emit(ringbuf, upper_32_bits(pd_daddr));
+ intel_logical_ring_emit(ringbuf, GEN8_RING_PDP_LDW(ring, i));
+ intel_logical_ring_emit(ringbuf, lower_32_bits(pd_daddr));
+ }
+
+ intel_logical_ring_emit(ringbuf, MI_NOOP);
+ intel_logical_ring_advance(ringbuf);
+
+ return 0;
+}
+
+static int gen8_emit_bb_start(struct drm_i915_gem_request *req,
u64 offset, unsigned dispatch_flags)
{
+ struct intel_ringbuffer *ringbuf = req->ringbuf;
bool ppgtt = !(dispatch_flags & I915_DISPATCH_SECURE);
int ret;
- ret = intel_logical_ring_begin(ringbuf, ctx, 4);
+ /* Don't rely in hw updating PDPs, specially in lite-restore.
+ * Ideally, we should set Force PD Restore in ctx descriptor,
+ * but we can't. Force Restore would be a second option, but
+ * it is unsafe in case of lite-restore (because the ctx is
+ * not idle). */
+ if (req->ctx->ppgtt &&
+ (intel_ring_flag(req->ring) & req->ctx->ppgtt->pd_dirty_rings)) {
+ ret = intel_logical_ring_emit_pdps(req);
+ if (ret)
+ return ret;
+
+ req->ctx->ppgtt->pd_dirty_rings &= ~intel_ring_flag(req->ring);
+ }
+
+ ret = intel_logical_ring_begin(req, 4);
if (ret)
return ret;
/* FIXME(BDW): Address space and security selectors. */
- intel_logical_ring_emit(ringbuf, MI_BATCH_BUFFER_START_GEN8 | (ppgtt<<8));
+ intel_logical_ring_emit(ringbuf, MI_BATCH_BUFFER_START_GEN8 |
+ (ppgtt<<8) |
+ (dispatch_flags & I915_DISPATCH_RS ?
+ MI_BATCH_RESOURCE_STREAMER : 0));
intel_logical_ring_emit(ringbuf, lower_32_bits(offset));
intel_logical_ring_emit(ringbuf, upper_32_bits(offset));
intel_logical_ring_emit(ringbuf, MI_NOOP);
@@ -1191,18 +1575,18 @@ static void gen8_logical_ring_put_irq(struct intel_engine_cs *ring)
spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
}
-static int gen8_emit_flush(struct intel_ringbuffer *ringbuf,
- struct intel_context *ctx,
+static int gen8_emit_flush(struct drm_i915_gem_request *request,
u32 invalidate_domains,
u32 unused)
{
+ struct intel_ringbuffer *ringbuf = request->ringbuf;
struct intel_engine_cs *ring = ringbuf->ring;
struct drm_device *dev = ring->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t cmd;
int ret;
- ret = intel_logical_ring_begin(ringbuf, ctx, 4);
+ ret = intel_logical_ring_begin(request, 4);
if (ret)
return ret;
@@ -1232,11 +1616,11 @@ static int gen8_emit_flush(struct intel_ringbuffer *ringbuf,
return 0;
}
-static int gen8_emit_flush_render(struct intel_ringbuffer *ringbuf,
- struct intel_context *ctx,
+static int gen8_emit_flush_render(struct drm_i915_gem_request *request,
u32 invalidate_domains,
u32 flush_domains)
{
+ struct intel_ringbuffer *ringbuf = request->ringbuf;
struct intel_engine_cs *ring = ringbuf->ring;
u32 scratch_addr = ring->scratch.gtt_offset + 2 * CACHELINE_BYTES;
bool vf_flush_wa;
@@ -1268,7 +1652,7 @@ static int gen8_emit_flush_render(struct intel_ringbuffer *ringbuf,
vf_flush_wa = INTEL_INFO(ring->dev)->gen >= 9 &&
flags & PIPE_CONTROL_VF_CACHE_INVALIDATE;
- ret = intel_logical_ring_begin(ringbuf, ctx, vf_flush_wa ? 12 : 6);
+ ret = intel_logical_ring_begin(request, vf_flush_wa ? 12 : 6);
if (ret)
return ret;
@@ -1302,9 +1686,9 @@ static void gen8_set_seqno(struct intel_engine_cs *ring, u32 seqno)
intel_write_status_page(ring, I915_GEM_HWS_INDEX, seqno);
}
-static int gen8_emit_request(struct intel_ringbuffer *ringbuf,
- struct drm_i915_gem_request *request)
+static int gen8_emit_request(struct drm_i915_gem_request *request)
{
+ struct intel_ringbuffer *ringbuf = request->ringbuf;
struct intel_engine_cs *ring = ringbuf->ring;
u32 cmd;
int ret;
@@ -1314,7 +1698,7 @@ static int gen8_emit_request(struct intel_ringbuffer *ringbuf,
* used as a workaround for not being allowed to do lite
* restore with HEAD==TAIL (WaIdleLiteRestore).
*/
- ret = intel_logical_ring_begin(ringbuf, request->ctx, 8);
+ ret = intel_logical_ring_begin(request, 8);
if (ret)
return ret;
@@ -1326,11 +1710,10 @@ static int gen8_emit_request(struct intel_ringbuffer *ringbuf,
(ring->status_page.gfx_addr +
(I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT)));
intel_logical_ring_emit(ringbuf, 0);
- intel_logical_ring_emit(ringbuf,
- i915_gem_request_get_seqno(ring->outstanding_lazy_request));
+ intel_logical_ring_emit(ringbuf, i915_gem_request_get_seqno(request));
intel_logical_ring_emit(ringbuf, MI_USER_INTERRUPT);
intel_logical_ring_emit(ringbuf, MI_NOOP);
- intel_logical_ring_advance_and_submit(ringbuf, request->ctx, request);
+ intel_logical_ring_advance_and_submit(request);
/*
* Here we add two extra NOOPs as padding to avoid
@@ -1343,49 +1726,53 @@ static int gen8_emit_request(struct intel_ringbuffer *ringbuf,
return 0;
}
-static int intel_lr_context_render_state_init(struct intel_engine_cs *ring,
- struct intel_context *ctx)
+static int intel_lr_context_render_state_init(struct drm_i915_gem_request *req)
{
- struct intel_ringbuffer *ringbuf = ctx->engine[ring->id].ringbuf;
struct render_state so;
- struct drm_i915_file_private *file_priv = ctx->file_priv;
- struct drm_file *file = file_priv ? file_priv->file : NULL;
int ret;
- ret = i915_gem_render_state_prepare(ring, &so);
+ ret = i915_gem_render_state_prepare(req->ring, &so);
if (ret)
return ret;
if (so.rodata == NULL)
return 0;
- ret = ring->emit_bb_start(ringbuf,
- ctx,
- so.ggtt_offset,
- I915_DISPATCH_SECURE);
+ ret = req->ring->emit_bb_start(req, so.ggtt_offset,
+ I915_DISPATCH_SECURE);
if (ret)
goto out;
- i915_vma_move_to_active(i915_gem_obj_to_ggtt(so.obj), ring);
+ ret = req->ring->emit_bb_start(req,
+ (so.ggtt_offset + so.aux_batch_offset),
+ I915_DISPATCH_SECURE);
+ if (ret)
+ goto out;
+
+ i915_vma_move_to_active(i915_gem_obj_to_ggtt(so.obj), req);
- ret = __i915_add_request(ring, file, so.obj);
- /* intel_logical_ring_add_request moves object to inactive if it
- * fails */
out:
i915_gem_render_state_fini(&so);
return ret;
}
-static int gen8_init_rcs_context(struct intel_engine_cs *ring,
- struct intel_context *ctx)
+static int gen8_init_rcs_context(struct drm_i915_gem_request *req)
{
int ret;
- ret = intel_logical_ring_workarounds_emit(ring, ctx);
+ ret = intel_logical_ring_workarounds_emit(req);
if (ret)
return ret;
- return intel_lr_context_render_state_init(ring, ctx);
+ ret = intel_rcs_context_init_mocs(req);
+ /*
+ * Failing to program the MOCS is non-fatal.The system will not
+ * run at peak performance. So generate an error and carry on.
+ */
+ if (ret)
+ DRM_ERROR("MOCS failed to program: expect performance issues.\n");
+
+ return intel_lr_context_render_state_init(req);
}
/**
@@ -1405,7 +1792,6 @@ void intel_logical_ring_cleanup(struct intel_engine_cs *ring)
intel_logical_ring_stop(ring);
WARN_ON((I915_READ_MODE(ring) & MODE_IDLE) == 0);
- i915_gem_request_assign(&ring->outstanding_lazy_request, NULL);
if (ring->cleanup)
ring->cleanup(ring);
@@ -1417,6 +1803,8 @@ void intel_logical_ring_cleanup(struct intel_engine_cs *ring)
kunmap(sg_page(ring->status_page.obj->pages->sgl));
ring->status_page.obj = NULL;
}
+
+ lrc_destroy_wa_ctx_obj(ring);
}
static int logical_ring_init(struct drm_device *dev, struct intel_engine_cs *ring)
@@ -1476,11 +1864,28 @@ static int logical_render_ring_init(struct drm_device *dev)
ring->emit_bb_start = gen8_emit_bb_start;
ring->dev = dev;
- ret = logical_ring_init(dev, ring);
+
+ ret = intel_init_pipe_control(ring);
if (ret)
return ret;
- return intel_init_pipe_control(ring);
+ ret = intel_init_workaround_bb(ring);
+ if (ret) {
+ /*
+ * We continue even if we fail to initialize WA batch
+ * because we only expect rare glitches but nothing
+ * critical to prevent us from using GPU
+ */
+ DRM_ERROR("WA batch buffer initialization failed: %d\n",
+ ret);
+ }
+
+ ret = logical_ring_init(dev, ring);
+ if (ret) {
+ lrc_destroy_wa_ctx_obj(ring);
+ }
+
+ return ret;
}
static int logical_bsd_ring_init(struct drm_device *dev)
@@ -1735,7 +2140,8 @@ populate_lr_context(struct intel_context *ctx, struct drm_i915_gem_object *ctx_o
reg_state[CTX_CONTEXT_CONTROL] = RING_CONTEXT_CONTROL(ring);
reg_state[CTX_CONTEXT_CONTROL+1] =
_MASKED_BIT_ENABLE(CTX_CTRL_INHIBIT_SYN_CTX_SWITCH |
- CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT);
+ CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT |
+ CTX_CTRL_RS_CTX_ENABLE);
reg_state[CTX_RING_HEAD] = RING_HEAD(ring->mmio_base);
reg_state[CTX_RING_HEAD+1] = 0;
reg_state[CTX_RING_TAIL] = RING_TAIL(ring->mmio_base);
@@ -1760,15 +2166,27 @@ populate_lr_context(struct intel_context *ctx, struct drm_i915_gem_object *ctx_o
reg_state[CTX_SECOND_BB_STATE] = ring->mmio_base + 0x118;
reg_state[CTX_SECOND_BB_STATE+1] = 0;
if (ring->id == RCS) {
- /* TODO: according to BSpec, the register state context
- * for CHV does not have these. OTOH, these registers do
- * exist in CHV. I'm waiting for a clarification */
reg_state[CTX_BB_PER_CTX_PTR] = ring->mmio_base + 0x1c0;
reg_state[CTX_BB_PER_CTX_PTR+1] = 0;
reg_state[CTX_RCS_INDIRECT_CTX] = ring->mmio_base + 0x1c4;
reg_state[CTX_RCS_INDIRECT_CTX+1] = 0;
reg_state[CTX_RCS_INDIRECT_CTX_OFFSET] = ring->mmio_base + 0x1c8;
reg_state[CTX_RCS_INDIRECT_CTX_OFFSET+1] = 0;
+ if (ring->wa_ctx.obj) {
+ struct i915_ctx_workarounds *wa_ctx = &ring->wa_ctx;
+ uint32_t ggtt_offset = i915_gem_obj_ggtt_offset(wa_ctx->obj);
+
+ reg_state[CTX_RCS_INDIRECT_CTX+1] =
+ (ggtt_offset + wa_ctx->indirect_ctx.offset * sizeof(uint32_t)) |
+ (wa_ctx->indirect_ctx.size / CACHELINE_DWORDS);
+
+ reg_state[CTX_RCS_INDIRECT_CTX_OFFSET+1] =
+ CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT << 6;
+
+ reg_state[CTX_BB_PER_CTX_PTR+1] =
+ (ggtt_offset + wa_ctx->per_ctx.offset * sizeof(uint32_t)) |
+ 0x01;
+ }
}
reg_state[CTX_LRI_HEADER_1] = MI_LOAD_REGISTER_IMM(9);
reg_state[CTX_LRI_HEADER_1] |= MI_LRI_FORCE_POSTED;
@@ -1973,13 +2391,22 @@ int intel_lr_context_deferred_create(struct intel_context *ctx,
lrc_setup_hardware_status_page(ring, ctx_obj);
else if (ring->id == RCS && !ctx->rcs_initialized) {
if (ring->init_context) {
- ret = ring->init_context(ring, ctx);
+ struct drm_i915_gem_request *req;
+
+ ret = i915_gem_request_alloc(ring, ctx, &req);
+ if (ret)
+ return ret;
+
+ ret = ring->init_context(req);
if (ret) {
DRM_ERROR("ring init context: %d\n", ret);
+ i915_gem_request_cancel(req);
ctx->engine[ring->id].ringbuf = NULL;
ctx->engine[ring->id].state = NULL;
goto error;
}
+
+ i915_add_request_no_flush(req);
}
ctx->rcs_initialized = true;
diff --git a/drivers/gpu/drm/i915/intel_lrc.h b/drivers/gpu/drm/i915/intel_lrc.h
index 04d3a6d8b207..64f89f9982a2 100644
--- a/drivers/gpu/drm/i915/intel_lrc.h
+++ b/drivers/gpu/drm/i915/intel_lrc.h
@@ -32,18 +32,19 @@
#define RING_CONTEXT_CONTROL(ring) ((ring)->mmio_base+0x244)
#define CTX_CTRL_INHIBIT_SYN_CTX_SWITCH (1 << 3)
#define CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT (1 << 0)
+#define CTX_CTRL_RS_CTX_ENABLE (1 << 1)
#define RING_CONTEXT_STATUS_BUF(ring) ((ring)->mmio_base+0x370)
#define RING_CONTEXT_STATUS_PTR(ring) ((ring)->mmio_base+0x3a0)
/* Logical Rings */
-int intel_logical_ring_alloc_request_extras(struct drm_i915_gem_request *request,
- struct intel_context *ctx);
+int intel_logical_ring_alloc_request_extras(struct drm_i915_gem_request *request);
+int intel_logical_ring_reserve_space(struct drm_i915_gem_request *request);
void intel_logical_ring_stop(struct intel_engine_cs *ring);
void intel_logical_ring_cleanup(struct intel_engine_cs *ring);
int intel_logical_rings_init(struct drm_device *dev);
+int intel_logical_ring_begin(struct drm_i915_gem_request *req, int num_dwords);
-int logical_ring_flush_all_caches(struct intel_ringbuffer *ringbuf,
- struct intel_context *ctx);
+int logical_ring_flush_all_caches(struct drm_i915_gem_request *req);
/**
* intel_logical_ring_advance() - advance the ringbuffer tail
* @ringbuf: Ringbuffer to advance.
@@ -70,20 +71,16 @@ static inline void intel_logical_ring_emit(struct intel_ringbuffer *ringbuf,
void intel_lr_context_free(struct intel_context *ctx);
int intel_lr_context_deferred_create(struct intel_context *ctx,
struct intel_engine_cs *ring);
-void intel_lr_context_unpin(struct intel_engine_cs *ring,
- struct intel_context *ctx);
+void intel_lr_context_unpin(struct drm_i915_gem_request *req);
void intel_lr_context_reset(struct drm_device *dev,
struct intel_context *ctx);
/* Execlists */
int intel_sanitize_enable_execlists(struct drm_device *dev, int enable_execlists);
-int intel_execlists_submission(struct drm_device *dev, struct drm_file *file,
- struct intel_engine_cs *ring,
- struct intel_context *ctx,
+struct i915_execbuffer_params;
+int intel_execlists_submission(struct i915_execbuffer_params *params,
struct drm_i915_gem_execbuffer2 *args,
- struct list_head *vmas,
- struct drm_i915_gem_object *batch_obj,
- u64 exec_start, u32 dispatch_flags);
+ struct list_head *vmas);
u32 intel_execlists_ctx_id(struct drm_i915_gem_object *ctx_obj);
void intel_lrc_irq_handler(struct intel_engine_cs *ring);
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index 161ab26f81fb..881b5d13592e 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -239,8 +239,6 @@ static void intel_disable_lvds(struct intel_encoder *encoder)
{
struct drm_device *dev = encoder->base.dev;
struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base);
- struct intel_connector *intel_connector =
- &lvds_encoder->attached_connector->base;
struct drm_i915_private *dev_priv = dev->dev_private;
u32 ctl_reg, stat_reg;
@@ -252,8 +250,6 @@ static void intel_disable_lvds(struct intel_encoder *encoder)
stat_reg = PP_STATUS;
}
- intel_panel_disable_backlight(intel_connector);
-
I915_WRITE(ctl_reg, I915_READ(ctl_reg) & ~POWER_TARGET_ON);
if (wait_for((I915_READ(stat_reg) & PP_ON) == 0, 1000))
DRM_ERROR("timed out waiting for panel to power off\n");
@@ -262,6 +258,31 @@ static void intel_disable_lvds(struct intel_encoder *encoder)
POSTING_READ(lvds_encoder->reg);
}
+static void gmch_disable_lvds(struct intel_encoder *encoder)
+{
+ struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base);
+ struct intel_connector *intel_connector =
+ &lvds_encoder->attached_connector->base;
+
+ intel_panel_disable_backlight(intel_connector);
+
+ intel_disable_lvds(encoder);
+}
+
+static void pch_disable_lvds(struct intel_encoder *encoder)
+{
+ struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base);
+ struct intel_connector *intel_connector =
+ &lvds_encoder->attached_connector->base;
+
+ intel_panel_disable_backlight(intel_connector);
+}
+
+static void pch_post_disable_lvds(struct intel_encoder *encoder)
+{
+ intel_disable_lvds(encoder);
+}
+
static enum drm_mode_status
intel_lvds_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
@@ -452,7 +473,7 @@ static int intel_lid_notify(struct notifier_block *nb, unsigned long val,
*/
if (!HAS_PCH_SPLIT(dev)) {
drm_modeset_lock_all(dev);
- intel_modeset_setup_hw_state(dev, true);
+ intel_display_resume(dev);
drm_modeset_unlock_all(dev);
}
@@ -528,7 +549,7 @@ static const struct drm_connector_helper_funcs intel_lvds_connector_helper_funcs
};
static const struct drm_connector_funcs intel_lvds_connector_funcs = {
- .dpms = intel_connector_dpms,
+ .dpms = drm_atomic_helper_connector_dpms,
.detect = intel_lvds_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
.set_property = intel_lvds_set_property,
@@ -942,12 +963,6 @@ void intel_lvds_init(struct drm_device *dev)
if (dmi_check_system(intel_no_lvds))
return;
- pin = GMBUS_PIN_PANEL;
- if (!lvds_is_present_in_vbt(dev, &pin)) {
- DRM_DEBUG_KMS("LVDS is not present in VBT\n");
- return;
- }
-
if (HAS_PCH_SPLIT(dev)) {
if ((I915_READ(PCH_LVDS) & LVDS_DETECTED) == 0)
return;
@@ -957,6 +972,16 @@ void intel_lvds_init(struct drm_device *dev)
}
}
+ pin = GMBUS_PIN_PANEL;
+ if (!lvds_is_present_in_vbt(dev, &pin)) {
+ u32 reg = HAS_PCH_SPLIT(dev) ? PCH_LVDS : LVDS;
+ if ((I915_READ(reg) & LVDS_PORT_EN) == 0) {
+ DRM_DEBUG_KMS("LVDS is not present in VBT\n");
+ return;
+ }
+ DRM_DEBUG_KMS("LVDS is not present in VBT, but enabled anyway\n");
+ }
+
lvds_encoder = kzalloc(sizeof(*lvds_encoder), GFP_KERNEL);
if (!lvds_encoder)
return;
@@ -988,7 +1013,12 @@ void intel_lvds_init(struct drm_device *dev)
intel_encoder->enable = intel_enable_lvds;
intel_encoder->pre_enable = intel_pre_enable_lvds;
intel_encoder->compute_config = intel_lvds_compute_config;
- intel_encoder->disable = intel_disable_lvds;
+ if (HAS_PCH_SPLIT(dev_priv)) {
+ intel_encoder->disable = pch_disable_lvds;
+ intel_encoder->post_disable = pch_post_disable_lvds;
+ } else {
+ intel_encoder->disable = gmch_disable_lvds;
+ }
intel_encoder->get_hw_state = intel_lvds_get_hw_state;
intel_encoder->get_config = intel_lvds_get_config;
intel_connector->get_hw_state = intel_connector_get_hw_state;
@@ -1068,24 +1098,8 @@ void intel_lvds_init(struct drm_device *dev)
drm_mode_debug_printmodeline(scan);
fixed_mode = drm_mode_duplicate(dev, scan);
- if (fixed_mode) {
- downclock_mode =
- intel_find_panel_downclock(dev,
- fixed_mode, connector);
- if (downclock_mode != NULL &&
- i915.lvds_downclock) {
- /* We found the downclock for LVDS. */
- dev_priv->lvds_downclock_avail = true;
- dev_priv->lvds_downclock =
- downclock_mode->clock;
- DRM_DEBUG_KMS("LVDS downclock is found"
- " in EDID. Normal clock %dKhz, "
- "downclock %dKhz\n",
- fixed_mode->clock,
- dev_priv->lvds_downclock);
- }
+ if (fixed_mode)
goto out;
- }
}
}
diff --git a/drivers/gpu/drm/i915/intel_mocs.c b/drivers/gpu/drm/i915/intel_mocs.c
new file mode 100644
index 000000000000..6d3c6c0a5c62
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_mocs.c
@@ -0,0 +1,335 @@
+/*
+ * Copyright (c) 2015 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions: *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include "intel_mocs.h"
+#include "intel_lrc.h"
+#include "intel_ringbuffer.h"
+
+/* structures required */
+struct drm_i915_mocs_entry {
+ u32 control_value;
+ u16 l3cc_value;
+};
+
+struct drm_i915_mocs_table {
+ u32 size;
+ const struct drm_i915_mocs_entry *table;
+};
+
+/* Defines for the tables (XXX_MOCS_0 - XXX_MOCS_63) */
+#define LE_CACHEABILITY(value) ((value) << 0)
+#define LE_TGT_CACHE(value) ((value) << 2)
+#define LE_LRUM(value) ((value) << 4)
+#define LE_AOM(value) ((value) << 6)
+#define LE_RSC(value) ((value) << 7)
+#define LE_SCC(value) ((value) << 8)
+#define LE_PFM(value) ((value) << 11)
+#define LE_SCF(value) ((value) << 14)
+
+/* Defines for the tables (LNCFMOCS0 - LNCFMOCS31) - two entries per word */
+#define L3_ESC(value) ((value) << 0)
+#define L3_SCC(value) ((value) << 1)
+#define L3_CACHEABILITY(value) ((value) << 4)
+
+/* Helper defines */
+#define GEN9_NUM_MOCS_ENTRIES 62 /* 62 out of 64 - 63 & 64 are reserved. */
+
+/* (e)LLC caching options */
+#define LE_PAGETABLE 0
+#define LE_UC 1
+#define LE_WT 2
+#define LE_WB 3
+
+/* L3 caching options */
+#define L3_DIRECT 0
+#define L3_UC 1
+#define L3_RESERVED 2
+#define L3_WB 3
+
+/* Target cache */
+#define ELLC 0
+#define LLC 1
+#define LLC_ELLC 2
+
+/*
+ * MOCS tables
+ *
+ * These are the MOCS tables that are programmed across all the rings.
+ * The control value is programmed to all the rings that support the
+ * MOCS registers. While the l3cc_values are only programmed to the
+ * LNCFCMOCS0 - LNCFCMOCS32 registers.
+ *
+ * These tables are intended to be kept reasonably consistent across
+ * platforms. However some of the fields are not applicable to all of
+ * them.
+ *
+ * Entries not part of the following tables are undefined as far as
+ * userspace is concerned and shouldn't be relied upon. For the time
+ * being they will be implicitly initialized to the strictest caching
+ * configuration (uncached) to guarantee forwards compatibility with
+ * userspace programs written against more recent kernels providing
+ * additional MOCS entries.
+ *
+ * NOTE: These tables MUST start with being uncached and the length
+ * MUST be less than 63 as the last two registers are reserved
+ * by the hardware. These tables are part of the kernel ABI and
+ * may only be updated incrementally by adding entries at the
+ * end.
+ */
+static const struct drm_i915_mocs_entry skylake_mocs_table[] = {
+ /* { 0x00000009, 0x0010 } */
+ { (LE_CACHEABILITY(LE_UC) | LE_TGT_CACHE(LLC_ELLC) | LE_LRUM(0) |
+ LE_AOM(0) | LE_RSC(0) | LE_SCC(0) | LE_PFM(0) | LE_SCF(0)),
+ (L3_ESC(0) | L3_SCC(0) | L3_CACHEABILITY(L3_UC)) },
+ /* { 0x00000038, 0x0030 } */
+ { (LE_CACHEABILITY(LE_PAGETABLE) | LE_TGT_CACHE(LLC_ELLC) | LE_LRUM(3) |
+ LE_AOM(0) | LE_RSC(0) | LE_SCC(0) | LE_PFM(0) | LE_SCF(0)),
+ (L3_ESC(0) | L3_SCC(0) | L3_CACHEABILITY(L3_WB)) },
+ /* { 0x0000003b, 0x0030 } */
+ { (LE_CACHEABILITY(LE_WB) | LE_TGT_CACHE(LLC_ELLC) | LE_LRUM(3) |
+ LE_AOM(0) | LE_RSC(0) | LE_SCC(0) | LE_PFM(0) | LE_SCF(0)),
+ (L3_ESC(0) | L3_SCC(0) | L3_CACHEABILITY(L3_WB)) }
+};
+
+/* NOTE: the LE_TGT_CACHE is not used on Broxton */
+static const struct drm_i915_mocs_entry broxton_mocs_table[] = {
+ /* { 0x00000009, 0x0010 } */
+ { (LE_CACHEABILITY(LE_UC) | LE_TGT_CACHE(LLC_ELLC) | LE_LRUM(0) |
+ LE_AOM(0) | LE_RSC(0) | LE_SCC(0) | LE_PFM(0) | LE_SCF(0)),
+ (L3_ESC(0) | L3_SCC(0) | L3_CACHEABILITY(L3_UC)) },
+ /* { 0x00000038, 0x0030 } */
+ { (LE_CACHEABILITY(LE_PAGETABLE) | LE_TGT_CACHE(LLC_ELLC) | LE_LRUM(3) |
+ LE_AOM(0) | LE_RSC(0) | LE_SCC(0) | LE_PFM(0) | LE_SCF(0)),
+ (L3_ESC(0) | L3_SCC(0) | L3_CACHEABILITY(L3_WB)) },
+ /* { 0x0000003b, 0x0030 } */
+ { (LE_CACHEABILITY(LE_WB) | LE_TGT_CACHE(LLC_ELLC) | LE_LRUM(3) |
+ LE_AOM(0) | LE_RSC(0) | LE_SCC(0) | LE_PFM(0) | LE_SCF(0)),
+ (L3_ESC(0) | L3_SCC(0) | L3_CACHEABILITY(L3_WB)) }
+};
+
+/**
+ * get_mocs_settings()
+ * @dev: DRM device.
+ * @table: Output table that will be made to point at appropriate
+ * MOCS values for the device.
+ *
+ * This function will return the values of the MOCS table that needs to
+ * be programmed for the platform. It will return the values that need
+ * to be programmed and if they need to be programmed.
+ *
+ * Return: true if there are applicable MOCS settings for the device.
+ */
+static bool get_mocs_settings(struct drm_device *dev,
+ struct drm_i915_mocs_table *table)
+{
+ bool result = false;
+
+ if (IS_SKYLAKE(dev)) {
+ table->size = ARRAY_SIZE(skylake_mocs_table);
+ table->table = skylake_mocs_table;
+ result = true;
+ } else if (IS_BROXTON(dev)) {
+ table->size = ARRAY_SIZE(broxton_mocs_table);
+ table->table = broxton_mocs_table;
+ result = true;
+ } else {
+ WARN_ONCE(INTEL_INFO(dev)->gen >= 9,
+ "Platform that should have a MOCS table does not.\n");
+ }
+
+ return result;
+}
+
+/**
+ * emit_mocs_control_table() - emit the mocs control table
+ * @req: Request to set up the MOCS table for.
+ * @table: The values to program into the control regs.
+ * @reg_base: The base for the engine that needs to be programmed.
+ *
+ * This function simply emits a MI_LOAD_REGISTER_IMM command for the
+ * given table starting at the given address.
+ *
+ * Return: 0 on success, otherwise the error status.
+ */
+static int emit_mocs_control_table(struct drm_i915_gem_request *req,
+ const struct drm_i915_mocs_table *table,
+ u32 reg_base)
+{
+ struct intel_ringbuffer *ringbuf = req->ringbuf;
+ unsigned int index;
+ int ret;
+
+ if (WARN_ON(table->size > GEN9_NUM_MOCS_ENTRIES))
+ return -ENODEV;
+
+ ret = intel_logical_ring_begin(req, 2 + 2 * GEN9_NUM_MOCS_ENTRIES);
+ if (ret) {
+ DRM_DEBUG("intel_logical_ring_begin failed %d\n", ret);
+ return ret;
+ }
+
+ intel_logical_ring_emit(ringbuf,
+ MI_LOAD_REGISTER_IMM(GEN9_NUM_MOCS_ENTRIES));
+
+ for (index = 0; index < table->size; index++) {
+ intel_logical_ring_emit(ringbuf, reg_base + index * 4);
+ intel_logical_ring_emit(ringbuf,
+ table->table[index].control_value);
+ }
+
+ /*
+ * Ok, now set the unused entries to uncached. These entries
+ * are officially undefined and no contract for the contents
+ * and settings is given for these entries.
+ *
+ * Entry 0 in the table is uncached - so we are just writing
+ * that value to all the used entries.
+ */
+ for (; index < GEN9_NUM_MOCS_ENTRIES; index++) {
+ intel_logical_ring_emit(ringbuf, reg_base + index * 4);
+ intel_logical_ring_emit(ringbuf, table->table[0].control_value);
+ }
+
+ intel_logical_ring_emit(ringbuf, MI_NOOP);
+ intel_logical_ring_advance(ringbuf);
+
+ return 0;
+}
+
+/**
+ * emit_mocs_l3cc_table() - emit the mocs control table
+ * @req: Request to set up the MOCS table for.
+ * @table: The values to program into the control regs.
+ *
+ * This function simply emits a MI_LOAD_REGISTER_IMM command for the
+ * given table starting at the given address. This register set is
+ * programmed in pairs.
+ *
+ * Return: 0 on success, otherwise the error status.
+ */
+static int emit_mocs_l3cc_table(struct drm_i915_gem_request *req,
+ const struct drm_i915_mocs_table *table)
+{
+ struct intel_ringbuffer *ringbuf = req->ringbuf;
+ unsigned int count;
+ unsigned int i;
+ u32 value;
+ u32 filler = (table->table[0].l3cc_value & 0xffff) |
+ ((table->table[0].l3cc_value & 0xffff) << 16);
+ int ret;
+
+ if (WARN_ON(table->size > GEN9_NUM_MOCS_ENTRIES))
+ return -ENODEV;
+
+ ret = intel_logical_ring_begin(req, 2 + GEN9_NUM_MOCS_ENTRIES);
+ if (ret) {
+ DRM_DEBUG("intel_logical_ring_begin failed %d\n", ret);
+ return ret;
+ }
+
+ intel_logical_ring_emit(ringbuf,
+ MI_LOAD_REGISTER_IMM(GEN9_NUM_MOCS_ENTRIES / 2));
+
+ for (i = 0, count = 0; i < table->size / 2; i++, count += 2) {
+ value = (table->table[count].l3cc_value & 0xffff) |
+ ((table->table[count + 1].l3cc_value & 0xffff) << 16);
+
+ intel_logical_ring_emit(ringbuf, GEN9_LNCFCMOCS0 + i * 4);
+ intel_logical_ring_emit(ringbuf, value);
+ }
+
+ if (table->size & 0x01) {
+ /* Odd table size - 1 left over */
+ value = (table->table[count].l3cc_value & 0xffff) |
+ ((table->table[0].l3cc_value & 0xffff) << 16);
+ } else
+ value = filler;
+
+ /*
+ * Now set the rest of the table to uncached - use entry 0 as
+ * this will be uncached. Leave the last pair uninitialised as
+ * they are reserved by the hardware.
+ */
+ for (; i < GEN9_NUM_MOCS_ENTRIES / 2; i++) {
+ intel_logical_ring_emit(ringbuf, GEN9_LNCFCMOCS0 + i * 4);
+ intel_logical_ring_emit(ringbuf, value);
+
+ value = filler;
+ }
+
+ intel_logical_ring_emit(ringbuf, MI_NOOP);
+ intel_logical_ring_advance(ringbuf);
+
+ return 0;
+}
+
+/**
+ * intel_rcs_context_init_mocs() - program the MOCS register.
+ * @req: Request to set up the MOCS tables for.
+ *
+ * This function will emit a batch buffer with the values required for
+ * programming the MOCS register values for all the currently supported
+ * rings.
+ *
+ * These registers are partially stored in the RCS context, so they are
+ * emitted at the same time so that when a context is created these registers
+ * are set up. These registers have to be emitted into the start of the
+ * context as setting the ELSP will re-init some of these registers back
+ * to the hw values.
+ *
+ * Return: 0 on success, otherwise the error status.
+ */
+int intel_rcs_context_init_mocs(struct drm_i915_gem_request *req)
+{
+ struct drm_i915_mocs_table t;
+ int ret;
+
+ if (get_mocs_settings(req->ring->dev, &t)) {
+ /* Program the control registers */
+ ret = emit_mocs_control_table(req, &t, GEN9_GFX_MOCS_0);
+ if (ret)
+ return ret;
+
+ ret = emit_mocs_control_table(req, &t, GEN9_MFX0_MOCS_0);
+ if (ret)
+ return ret;
+
+ ret = emit_mocs_control_table(req, &t, GEN9_MFX1_MOCS_0);
+ if (ret)
+ return ret;
+
+ ret = emit_mocs_control_table(req, &t, GEN9_VEBOX_MOCS_0);
+ if (ret)
+ return ret;
+
+ ret = emit_mocs_control_table(req, &t, GEN9_BLT_MOCS_0);
+ if (ret)
+ return ret;
+
+ /* Now program the l3cc registers */
+ ret = emit_mocs_l3cc_table(req, &t);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/i915/intel_mocs.h b/drivers/gpu/drm/i915/intel_mocs.h
new file mode 100644
index 000000000000..76e45b1748b3
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_mocs.h
@@ -0,0 +1,57 @@
+/*
+ * Copyright (c) 2015 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef INTEL_MOCS_H
+#define INTEL_MOCS_H
+
+/**
+ * DOC: Memory Objects Control State (MOCS)
+ *
+ * Motivation:
+ * In previous Gens the MOCS settings was a value that was set by user land as
+ * part of the batch. In Gen9 this has changed to be a single table (per ring)
+ * that all batches now reference by index instead of programming the MOCS
+ * directly.
+ *
+ * The one wrinkle in this is that only PART of the MOCS tables are included
+ * in context (The GFX_MOCS_0 - GFX_MOCS_64 and the LNCFCMOCS0 - LNCFCMOCS32
+ * registers). The rest are not (the settings for the other rings).
+ *
+ * This table needs to be set at system start-up because the way the table
+ * interacts with the contexts and the GmmLib interface.
+ *
+ *
+ * Implementation:
+ *
+ * The tables (one per supported platform) are defined in intel_mocs.c
+ * and are programmed in the first batch after the context is loaded
+ * (with the hardware workarounds). This will then let the usual
+ * context handling keep the MOCS in step.
+ */
+
+#include <drm/drmP.h>
+#include "i915_drv.h"
+
+int intel_rcs_context_init_mocs(struct drm_i915_gem_request *req);
+
+#endif
diff --git a/drivers/gpu/drm/i915/intel_opregion.c b/drivers/gpu/drm/i915/intel_opregion.c
index 481337436f72..cb1c65739425 100644
--- a/drivers/gpu/drm/i915/intel_opregion.c
+++ b/drivers/gpu/drm/i915/intel_opregion.c
@@ -25,8 +25,6 @@
*
*/
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
#include <linux/acpi.h>
#include <acpi/video.h>
@@ -53,6 +51,7 @@
#define MBOX_ACPI (1<<0)
#define MBOX_SWSCI (1<<1)
#define MBOX_ASLE (1<<2)
+#define MBOX_ASLE_EXT (1<<4)
struct opregion_header {
u8 signature[16];
@@ -62,7 +61,10 @@ struct opregion_header {
u8 vbios_ver[16];
u8 driver_ver[16];
u32 mboxes;
- u8 reserved[164];
+ u32 driver_model;
+ u32 pcon;
+ u8 dver[32];
+ u8 rsvd[124];
} __packed;
/* OpRegion mailbox #1: public ACPI methods */
@@ -84,7 +86,9 @@ struct opregion_acpi {
u32 evts; /* ASL supported events */
u32 cnot; /* current OS notification */
u32 nrdy; /* driver status */
- u8 rsvd2[60];
+ u32 did2[7]; /* extended supported display devices ID list */
+ u32 cpd2[7]; /* extended attached display devices list */
+ u8 rsvd2[4];
} __packed;
/* OpRegion mailbox #2: SWSCI */
@@ -113,7 +117,10 @@ struct opregion_asle {
u32 pcft; /* power conservation features */
u32 srot; /* supported rotation angles */
u32 iuer; /* IUER events */
- u8 rsvd[86];
+ u64 fdss;
+ u32 fdsp;
+ u32 stat;
+ u8 rsvd[70];
} __packed;
/* Driver readiness indicator */
@@ -611,6 +618,38 @@ static struct notifier_block intel_opregion_notifier = {
* (version 3)
*/
+static u32 get_did(struct intel_opregion *opregion, int i)
+{
+ u32 did;
+
+ if (i < ARRAY_SIZE(opregion->acpi->didl)) {
+ did = ioread32(&opregion->acpi->didl[i]);
+ } else {
+ i -= ARRAY_SIZE(opregion->acpi->didl);
+
+ if (WARN_ON(i >= ARRAY_SIZE(opregion->acpi->did2)))
+ return 0;
+
+ did = ioread32(&opregion->acpi->did2[i]);
+ }
+
+ return did;
+}
+
+static void set_did(struct intel_opregion *opregion, int i, u32 val)
+{
+ if (i < ARRAY_SIZE(opregion->acpi->didl)) {
+ iowrite32(val, &opregion->acpi->didl[i]);
+ } else {
+ i -= ARRAY_SIZE(opregion->acpi->didl);
+
+ if (WARN_ON(i >= ARRAY_SIZE(opregion->acpi->did2)))
+ return;
+
+ iowrite32(val, &opregion->acpi->did2[i]);
+ }
+}
+
static void intel_didl_outputs(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -620,7 +659,7 @@ static void intel_didl_outputs(struct drm_device *dev)
struct acpi_device *acpi_dev, *acpi_cdev, *acpi_video_bus = NULL;
unsigned long long device_id;
acpi_status status;
- u32 temp;
+ u32 temp, max_outputs;
int i = 0;
handle = ACPI_HANDLE(&dev->pdev->dev);
@@ -639,41 +678,50 @@ static void intel_didl_outputs(struct drm_device *dev)
}
if (!acpi_video_bus) {
- pr_warn("No ACPI video bus found\n");
+ DRM_ERROR("No ACPI video bus found\n");
return;
}
+ /*
+ * In theory, did2, the extended didl, gets added at opregion version
+ * 3.0. In practice, however, we're supposed to set it for earlier
+ * versions as well, since a BIOS that doesn't understand did2 should
+ * not look at it anyway. Use a variable so we can tweak this if a need
+ * arises later.
+ */
+ max_outputs = ARRAY_SIZE(opregion->acpi->didl) +
+ ARRAY_SIZE(opregion->acpi->did2);
+
list_for_each_entry(acpi_cdev, &acpi_video_bus->children, node) {
- if (i >= 8) {
- dev_dbg(&dev->pdev->dev,
- "More than 8 outputs detected via ACPI\n");
+ if (i >= max_outputs) {
+ DRM_DEBUG_KMS("More than %u outputs detected via ACPI\n",
+ max_outputs);
return;
}
- status =
- acpi_evaluate_integer(acpi_cdev->handle, "_ADR",
- NULL, &device_id);
+ status = acpi_evaluate_integer(acpi_cdev->handle, "_ADR",
+ NULL, &device_id);
if (ACPI_SUCCESS(status)) {
if (!device_id)
goto blind_set;
- iowrite32((u32)(device_id & 0x0f0f),
- &opregion->acpi->didl[i]);
- i++;
+ set_did(opregion, i++, (u32)(device_id & 0x0f0f));
}
}
end:
- /* If fewer than 8 outputs, the list must be null terminated */
- if (i < 8)
- iowrite32(0, &opregion->acpi->didl[i]);
+ DRM_DEBUG_KMS("%d outputs detected\n", i);
+
+ /* If fewer than max outputs, the list must be null terminated */
+ if (i < max_outputs)
+ set_did(opregion, i, 0);
return;
blind_set:
i = 0;
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
int output_type = ACPI_OTHER_OUTPUT;
- if (i >= 8) {
- dev_dbg(&dev->pdev->dev,
- "More than 8 outputs in connector list\n");
+ if (i >= max_outputs) {
+ DRM_DEBUG_KMS("More than %u outputs in connector list\n",
+ max_outputs);
return;
}
switch (connector->connector_type) {
@@ -698,9 +746,8 @@ blind_set:
output_type = ACPI_LVDS_OUTPUT;
break;
}
- temp = ioread32(&opregion->acpi->didl[i]);
- iowrite32(temp | (1<<31) | output_type | i,
- &opregion->acpi->didl[i]);
+ temp = get_did(opregion, i);
+ set_did(opregion, i, temp | (1 << 31) | output_type | i);
i++;
}
goto end;
@@ -720,7 +767,7 @@ static void intel_setup_cadls(struct drm_device *dev)
* display switching hotkeys. Just like DIDL, CADL is NULL-terminated if
* there are less than eight devices. */
do {
- disp_id = ioread32(&opregion->acpi->didl[i]);
+ disp_id = get_did(opregion, i);
iowrite32(disp_id, &opregion->acpi->cadl[i]);
} while (++i < 8 && disp_id != 0);
}
@@ -852,6 +899,11 @@ int intel_opregion_setup(struct drm_device *dev)
char buf[sizeof(OPREGION_SIGNATURE)];
int err = 0;
+ BUILD_BUG_ON(sizeof(struct opregion_header) != 0x100);
+ BUILD_BUG_ON(sizeof(struct opregion_acpi) != 0x100);
+ BUILD_BUG_ON(sizeof(struct opregion_swsci) != 0x100);
+ BUILD_BUG_ON(sizeof(struct opregion_asle) != 0x100);
+
pci_read_config_dword(dev->pdev, PCI_ASLS, &asls);
DRM_DEBUG_DRIVER("graphic opregion physical addr: 0x%x\n", asls);
if (asls == 0) {
diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c
index 25c8ec697da1..444542696a2c 100644
--- a/drivers/gpu/drm/i915/intel_overlay.c
+++ b/drivers/gpu/drm/i915/intel_overlay.c
@@ -210,19 +210,14 @@ static void intel_overlay_unmap_regs(struct intel_overlay *overlay,
}
static int intel_overlay_do_wait_request(struct intel_overlay *overlay,
+ struct drm_i915_gem_request *req,
void (*tail)(struct intel_overlay *))
{
- struct drm_device *dev = overlay->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_engine_cs *ring = &dev_priv->ring[RCS];
int ret;
WARN_ON(overlay->last_flip_req);
- i915_gem_request_assign(&overlay->last_flip_req,
- ring->outstanding_lazy_request);
- ret = i915_add_request(ring);
- if (ret)
- return ret;
+ i915_gem_request_assign(&overlay->last_flip_req, req);
+ i915_add_request(req);
overlay->flip_tail = tail;
ret = i915_wait_request(overlay->last_flip_req);
@@ -239,15 +234,22 @@ static int intel_overlay_on(struct intel_overlay *overlay)
struct drm_device *dev = overlay->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_engine_cs *ring = &dev_priv->ring[RCS];
+ struct drm_i915_gem_request *req;
int ret;
WARN_ON(overlay->active);
WARN_ON(IS_I830(dev) && !(dev_priv->quirks & QUIRK_PIPEA_FORCE));
- ret = intel_ring_begin(ring, 4);
+ ret = i915_gem_request_alloc(ring, ring->default_context, &req);
if (ret)
return ret;
+ ret = intel_ring_begin(req, 4);
+ if (ret) {
+ i915_gem_request_cancel(req);
+ return ret;
+ }
+
overlay->active = true;
intel_ring_emit(ring, MI_OVERLAY_FLIP | MI_OVERLAY_ON);
@@ -256,7 +258,7 @@ static int intel_overlay_on(struct intel_overlay *overlay)
intel_ring_emit(ring, MI_NOOP);
intel_ring_advance(ring);
- return intel_overlay_do_wait_request(overlay, NULL);
+ return intel_overlay_do_wait_request(overlay, req, NULL);
}
/* overlay needs to be enabled in OCMD reg */
@@ -266,6 +268,7 @@ static int intel_overlay_continue(struct intel_overlay *overlay,
struct drm_device *dev = overlay->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_engine_cs *ring = &dev_priv->ring[RCS];
+ struct drm_i915_gem_request *req;
u32 flip_addr = overlay->flip_addr;
u32 tmp;
int ret;
@@ -280,18 +283,25 @@ static int intel_overlay_continue(struct intel_overlay *overlay,
if (tmp & (1 << 17))
DRM_DEBUG("overlay underrun, DOVSTA: %x\n", tmp);
- ret = intel_ring_begin(ring, 2);
+ ret = i915_gem_request_alloc(ring, ring->default_context, &req);
if (ret)
return ret;
+ ret = intel_ring_begin(req, 2);
+ if (ret) {
+ i915_gem_request_cancel(req);
+ return ret;
+ }
+
intel_ring_emit(ring, MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE);
intel_ring_emit(ring, flip_addr);
intel_ring_advance(ring);
WARN_ON(overlay->last_flip_req);
- i915_gem_request_assign(&overlay->last_flip_req,
- ring->outstanding_lazy_request);
- return i915_add_request(ring);
+ i915_gem_request_assign(&overlay->last_flip_req, req);
+ i915_add_request(req);
+
+ return 0;
}
static void intel_overlay_release_old_vid_tail(struct intel_overlay *overlay)
@@ -327,6 +337,7 @@ static int intel_overlay_off(struct intel_overlay *overlay)
struct drm_device *dev = overlay->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_engine_cs *ring = &dev_priv->ring[RCS];
+ struct drm_i915_gem_request *req;
u32 flip_addr = overlay->flip_addr;
int ret;
@@ -338,10 +349,16 @@ static int intel_overlay_off(struct intel_overlay *overlay)
* of the hw. Do it in both cases */
flip_addr |= OFC_UPDATE;
- ret = intel_ring_begin(ring, 6);
+ ret = i915_gem_request_alloc(ring, ring->default_context, &req);
if (ret)
return ret;
+ ret = intel_ring_begin(req, 6);
+ if (ret) {
+ i915_gem_request_cancel(req);
+ return ret;
+ }
+
/* wait for overlay to go idle */
intel_ring_emit(ring, MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE);
intel_ring_emit(ring, flip_addr);
@@ -360,7 +377,7 @@ static int intel_overlay_off(struct intel_overlay *overlay)
}
intel_ring_advance(ring);
- return intel_overlay_do_wait_request(overlay, intel_overlay_off_tail);
+ return intel_overlay_do_wait_request(overlay, req, intel_overlay_off_tail);
}
/* recover from an interruption due to a signal
@@ -404,15 +421,23 @@ static int intel_overlay_release_old_vid(struct intel_overlay *overlay)
if (I915_READ(ISR) & I915_OVERLAY_PLANE_FLIP_PENDING_INTERRUPT) {
/* synchronous slowpath */
- ret = intel_ring_begin(ring, 2);
+ struct drm_i915_gem_request *req;
+
+ ret = i915_gem_request_alloc(ring, ring->default_context, &req);
if (ret)
return ret;
+ ret = intel_ring_begin(req, 2);
+ if (ret) {
+ i915_gem_request_cancel(req);
+ return ret;
+ }
+
intel_ring_emit(ring, MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
intel_ring_emit(ring, MI_NOOP);
intel_ring_advance(ring);
- ret = intel_overlay_do_wait_request(overlay,
+ ret = intel_overlay_do_wait_request(overlay, req,
intel_overlay_release_old_vid_tail);
if (ret)
return ret;
@@ -724,7 +749,7 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
if (ret != 0)
return ret;
- ret = i915_gem_object_pin_to_display_plane(new_bo, 0, NULL,
+ ret = i915_gem_object_pin_to_display_plane(new_bo, 0, NULL, NULL,
&i915_ggtt_view_normal);
if (ret != 0)
return ret;
diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c
index 55aad2322e10..e2ab3f6ed022 100644
--- a/drivers/gpu/drm/i915/intel_panel.c
+++ b/drivers/gpu/drm/i915/intel_panel.c
@@ -32,8 +32,11 @@
#include <linux/kernel.h>
#include <linux/moduleparam.h>
+#include <linux/pwm.h>
#include "intel_drv.h"
+#define CRC_PMIC_PWM_PERIOD_NS 21333
+
void
intel_fixed_panel_mode(const struct drm_display_mode *fixed_mode,
struct drm_display_mode *adjusted_mode)
@@ -544,6 +547,15 @@ static u32 bxt_get_backlight(struct intel_connector *connector)
return I915_READ(BXT_BLC_PWM_DUTY1);
}
+static u32 pwm_get_backlight(struct intel_connector *connector)
+{
+ struct intel_panel *panel = &connector->panel;
+ int duty_ns;
+
+ duty_ns = pwm_get_duty_cycle(panel->backlight.pwm);
+ return DIV_ROUND_UP(duty_ns * 100, CRC_PMIC_PWM_PERIOD_NS);
+}
+
static u32 intel_panel_get_backlight(struct intel_connector *connector)
{
struct drm_device *dev = connector->base.dev;
@@ -632,6 +644,14 @@ static void bxt_set_backlight(struct intel_connector *connector, u32 level)
I915_WRITE(BXT_BLC_PWM_DUTY1, level);
}
+static void pwm_set_backlight(struct intel_connector *connector, u32 level)
+{
+ struct intel_panel *panel = &connector->panel;
+ int duty_ns = DIV_ROUND_UP(level * CRC_PMIC_PWM_PERIOD_NS, 100);
+
+ pwm_config(panel->backlight.pwm, duty_ns, CRC_PMIC_PWM_PERIOD_NS);
+}
+
static void
intel_panel_actually_set_backlight(struct intel_connector *connector, u32 level)
{
@@ -769,6 +789,16 @@ static void bxt_disable_backlight(struct intel_connector *connector)
I915_WRITE(BXT_BLC_PWM_CTL1, tmp & ~BXT_BLC_PWM_ENABLE);
}
+static void pwm_disable_backlight(struct intel_connector *connector)
+{
+ struct intel_panel *panel = &connector->panel;
+
+ /* Disable the backlight */
+ pwm_config(panel->backlight.pwm, 0, CRC_PMIC_PWM_PERIOD_NS);
+ usleep_range(2000, 3000);
+ pwm_disable(panel->backlight.pwm);
+}
+
void intel_panel_disable_backlight(struct intel_connector *connector)
{
struct drm_device *dev = connector->base.dev;
@@ -1010,6 +1040,14 @@ static void bxt_enable_backlight(struct intel_connector *connector)
I915_WRITE(BXT_BLC_PWM_CTL1, pwm_ctl | BXT_BLC_PWM_ENABLE);
}
+static void pwm_enable_backlight(struct intel_connector *connector)
+{
+ struct intel_panel *panel = &connector->panel;
+
+ pwm_enable(panel->backlight.pwm);
+ intel_panel_actually_set_backlight(connector, panel->backlight.level);
+}
+
void intel_panel_enable_backlight(struct intel_connector *connector)
{
struct drm_device *dev = connector->base.dev;
@@ -1386,6 +1424,40 @@ bxt_setup_backlight(struct intel_connector *connector, enum pipe unused)
return 0;
}
+static int pwm_setup_backlight(struct intel_connector *connector,
+ enum pipe pipe)
+{
+ struct drm_device *dev = connector->base.dev;
+ struct intel_panel *panel = &connector->panel;
+ int retval;
+
+ /* Get the PWM chip for backlight control */
+ panel->backlight.pwm = pwm_get(dev->dev, "pwm_backlight");
+ if (IS_ERR(panel->backlight.pwm)) {
+ DRM_ERROR("Failed to own the pwm chip\n");
+ panel->backlight.pwm = NULL;
+ return -ENODEV;
+ }
+
+ retval = pwm_config(panel->backlight.pwm, CRC_PMIC_PWM_PERIOD_NS,
+ CRC_PMIC_PWM_PERIOD_NS);
+ if (retval < 0) {
+ DRM_ERROR("Failed to configure the pwm chip\n");
+ pwm_put(panel->backlight.pwm);
+ panel->backlight.pwm = NULL;
+ return retval;
+ }
+
+ panel->backlight.min = 0; /* 0% */
+ panel->backlight.max = 100; /* 100% */
+ panel->backlight.level = DIV_ROUND_UP(
+ pwm_get_duty_cycle(panel->backlight.pwm) * 100,
+ CRC_PMIC_PWM_PERIOD_NS);
+ panel->backlight.enabled = panel->backlight.level != 0;
+
+ return 0;
+}
+
int intel_panel_setup_backlight(struct drm_connector *connector, enum pipe pipe)
{
struct drm_device *dev = connector->dev;
@@ -1429,6 +1501,10 @@ void intel_panel_destroy_backlight(struct drm_connector *connector)
struct intel_connector *intel_connector = to_intel_connector(connector);
struct intel_panel *panel = &intel_connector->panel;
+ /* dispose of the pwm */
+ if (panel->backlight.pwm)
+ pwm_put(panel->backlight.pwm);
+
panel->backlight.present = false;
}
@@ -1456,11 +1532,19 @@ void intel_panel_init_backlight_funcs(struct drm_device *dev)
dev_priv->display.set_backlight = pch_set_backlight;
dev_priv->display.get_backlight = pch_get_backlight;
} else if (IS_VALLEYVIEW(dev)) {
- dev_priv->display.setup_backlight = vlv_setup_backlight;
- dev_priv->display.enable_backlight = vlv_enable_backlight;
- dev_priv->display.disable_backlight = vlv_disable_backlight;
- dev_priv->display.set_backlight = vlv_set_backlight;
- dev_priv->display.get_backlight = vlv_get_backlight;
+ if (dev_priv->vbt.has_mipi) {
+ dev_priv->display.setup_backlight = pwm_setup_backlight;
+ dev_priv->display.enable_backlight = pwm_enable_backlight;
+ dev_priv->display.disable_backlight = pwm_disable_backlight;
+ dev_priv->display.set_backlight = pwm_set_backlight;
+ dev_priv->display.get_backlight = pwm_get_backlight;
+ } else {
+ dev_priv->display.setup_backlight = vlv_setup_backlight;
+ dev_priv->display.enable_backlight = vlv_enable_backlight;
+ dev_priv->display.disable_backlight = vlv_disable_backlight;
+ dev_priv->display.set_backlight = vlv_set_backlight;
+ dev_priv->display.get_backlight = vlv_get_backlight;
+ }
} else if (IS_GEN4(dev)) {
dev_priv->display.setup_backlight = i965_setup_backlight;
dev_priv->display.enable_backlight = i965_enable_backlight;
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index eadc15cddbeb..fff0c22682ee 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -59,6 +59,10 @@ static void gen9_init_clock_gating(struct drm_device *dev)
/* WaEnableLbsSlaRetryTimerDecrement:skl */
I915_WRITE(BDW_SCRATCH1, I915_READ(BDW_SCRATCH1) |
GEN9_LBS_SLA_RETRY_TIMER_DECREMENT_ENABLE);
+
+ /* WaDisableKillLogic:bxt,skl */
+ I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) |
+ ECOCHK_DIS_TLB);
}
static void skl_init_clock_gating(struct drm_device *dev)
@@ -91,10 +95,19 @@ static void skl_init_clock_gating(struct drm_device *dev)
_MASKED_BIT_ENABLE(GEN9_TSG_BARRIER_ACK_DISABLE));
}
+ /* GEN8_L3SQCREG4 has a dependency with WA batch so any new changes
+ * involving this register should also be added to WA batch as required.
+ */
if (INTEL_REVID(dev) <= SKL_REVID_E0)
/* WaDisableLSQCROPERFforOCL:skl */
I915_WRITE(GEN8_L3SQCREG4, I915_READ(GEN8_L3SQCREG4) |
GEN8_LQSC_RO_PERF_DIS);
+
+ /* WaEnableGapsTsvCreditFix:skl */
+ if (IS_SKYLAKE(dev) && (INTEL_REVID(dev) >= SKL_REVID_C0)) {
+ I915_WRITE(GEN8_GARBCNTL, (I915_READ(GEN8_GARBCNTL) |
+ GEN9_GAPS_TSV_CREDIT_DISABLE));
+ }
}
static void bxt_init_clock_gating(struct drm_device *dev)
@@ -334,22 +347,26 @@ void intel_set_memory_cxsr(struct drm_i915_private *dev_priv, bool enable)
if (IS_VALLEYVIEW(dev)) {
I915_WRITE(FW_BLC_SELF_VLV, enable ? FW_CSPWRDWNEN : 0);
- if (IS_CHERRYVIEW(dev))
- chv_set_memory_pm5(dev_priv, enable);
+ POSTING_READ(FW_BLC_SELF_VLV);
+ dev_priv->wm.vlv.cxsr = enable;
} else if (IS_G4X(dev) || IS_CRESTLINE(dev)) {
I915_WRITE(FW_BLC_SELF, enable ? FW_BLC_SELF_EN : 0);
+ POSTING_READ(FW_BLC_SELF);
} else if (IS_PINEVIEW(dev)) {
val = I915_READ(DSPFW3) & ~PINEVIEW_SELF_REFRESH_EN;
val |= enable ? PINEVIEW_SELF_REFRESH_EN : 0;
I915_WRITE(DSPFW3, val);
+ POSTING_READ(DSPFW3);
} else if (IS_I945G(dev) || IS_I945GM(dev)) {
val = enable ? _MASKED_BIT_ENABLE(FW_BLC_SELF_EN) :
_MASKED_BIT_DISABLE(FW_BLC_SELF_EN);
I915_WRITE(FW_BLC_SELF, val);
+ POSTING_READ(FW_BLC_SELF);
} else if (IS_I915GM(dev)) {
val = enable ? _MASKED_BIT_ENABLE(INSTPM_SELF_EN) :
_MASKED_BIT_DISABLE(INSTPM_SELF_EN);
I915_WRITE(INSTPM, val);
+ POSTING_READ(INSTPM);
} else {
return;
}
@@ -923,223 +940,484 @@ static void vlv_write_wm_values(struct intel_crtc *crtc,
FW_WM(wm->pipe[PIPE_A].primary >> 8, PLANEA_HI));
}
- POSTING_READ(DSPFW1);
+ /* zero (unused) WM1 watermarks */
+ I915_WRITE(DSPFW4, 0);
+ I915_WRITE(DSPFW5, 0);
+ I915_WRITE(DSPFW6, 0);
+ I915_WRITE(DSPHOWM1, 0);
- dev_priv->wm.vlv = *wm;
+ POSTING_READ(DSPFW1);
}
#undef FW_WM_VLV
-static uint8_t vlv_compute_drain_latency(struct drm_crtc *crtc,
- struct drm_plane *plane)
-{
- struct drm_device *dev = crtc->dev;
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- int entries, prec_mult, drain_latency, pixel_size;
- int clock = intel_crtc->config->base.adjusted_mode.crtc_clock;
- const int high_precision = IS_CHERRYVIEW(dev) ? 16 : 64;
-
- /*
- * FIXME the plane might have an fb
- * but be invisible (eg. due to clipping)
- */
- if (!intel_crtc->active || !plane->state->fb)
- return 0;
+enum vlv_wm_level {
+ VLV_WM_LEVEL_PM2,
+ VLV_WM_LEVEL_PM5,
+ VLV_WM_LEVEL_DDR_DVFS,
+ CHV_WM_NUM_LEVELS,
+ VLV_WM_NUM_LEVELS = 1,
+};
- if (WARN(clock == 0, "Pixel clock is zero!\n"))
- return 0;
+/* latency must be in 0.1us units. */
+static unsigned int vlv_wm_method2(unsigned int pixel_rate,
+ unsigned int pipe_htotal,
+ unsigned int horiz_pixels,
+ unsigned int bytes_per_pixel,
+ unsigned int latency)
+{
+ unsigned int ret;
- pixel_size = drm_format_plane_cpp(plane->state->fb->pixel_format, 0);
+ ret = (latency * pixel_rate) / (pipe_htotal * 10000);
+ ret = (ret + 1) * horiz_pixels * bytes_per_pixel;
+ ret = DIV_ROUND_UP(ret, 64);
- if (WARN(pixel_size == 0, "Pixel size is zero!\n"))
- return 0;
+ return ret;
+}
- entries = DIV_ROUND_UP(clock, 1000) * pixel_size;
+static void vlv_setup_wm_latency(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
- prec_mult = high_precision;
- drain_latency = 64 * prec_mult * 4 / entries;
+ /* all latencies in usec */
+ dev_priv->wm.pri_latency[VLV_WM_LEVEL_PM2] = 3;
- if (drain_latency > DRAIN_LATENCY_MASK) {
- prec_mult /= 2;
- drain_latency = 64 * prec_mult * 4 / entries;
+ if (IS_CHERRYVIEW(dev_priv)) {
+ dev_priv->wm.pri_latency[VLV_WM_LEVEL_PM5] = 12;
+ dev_priv->wm.pri_latency[VLV_WM_LEVEL_DDR_DVFS] = 33;
}
-
- if (drain_latency > DRAIN_LATENCY_MASK)
- drain_latency = DRAIN_LATENCY_MASK;
-
- return drain_latency | (prec_mult == high_precision ?
- DDL_PRECISION_HIGH : DDL_PRECISION_LOW);
}
-static int vlv_compute_wm(struct intel_crtc *crtc,
- struct intel_plane *plane,
- int fifo_size)
+static uint16_t vlv_compute_wm_level(struct intel_plane *plane,
+ struct intel_crtc *crtc,
+ const struct intel_plane_state *state,
+ int level)
{
- int clock, entries, pixel_size;
+ struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+ int clock, htotal, pixel_size, width, wm;
- /*
- * FIXME the plane might have an fb
- * but be invisible (eg. due to clipping)
- */
- if (!crtc->active || !plane->base.state->fb)
+ if (dev_priv->wm.pri_latency[level] == 0)
+ return USHRT_MAX;
+
+ if (!state->visible)
return 0;
- pixel_size = drm_format_plane_cpp(plane->base.state->fb->pixel_format, 0);
+ pixel_size = drm_format_plane_cpp(state->base.fb->pixel_format, 0);
clock = crtc->config->base.adjusted_mode.crtc_clock;
+ htotal = crtc->config->base.adjusted_mode.crtc_htotal;
+ width = crtc->config->pipe_src_w;
+ if (WARN_ON(htotal == 0))
+ htotal = 1;
- entries = DIV_ROUND_UP(clock, 1000) * pixel_size;
+ if (plane->base.type == DRM_PLANE_TYPE_CURSOR) {
+ /*
+ * FIXME the formula gives values that are
+ * too big for the cursor FIFO, and hence we
+ * would never be able to use cursors. For
+ * now just hardcode the watermark.
+ */
+ wm = 63;
+ } else {
+ wm = vlv_wm_method2(clock, htotal, width, pixel_size,
+ dev_priv->wm.pri_latency[level] * 10);
+ }
- /*
- * Set up the watermark such that we don't start issuing memory
- * requests until we are within PND's max deadline value (256us).
- * Idea being to be idle as long as possible while still taking
- * advatange of PND's deadline scheduling. The limit of 8
- * cachelines (used when the FIFO will anyway drain in less time
- * than 256us) should match what we would be done if trickle
- * feed were enabled.
- */
- return fifo_size - clamp(DIV_ROUND_UP(256 * entries, 64), 0, fifo_size - 8);
+ return min_t(int, wm, USHRT_MAX);
}
-static bool vlv_compute_sr_wm(struct drm_device *dev,
- struct vlv_wm_values *wm)
+static void vlv_compute_fifo(struct intel_crtc *crtc)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
- struct drm_crtc *crtc;
- enum pipe pipe = INVALID_PIPE;
- int num_planes = 0;
- int fifo_size = 0;
+ struct drm_device *dev = crtc->base.dev;
+ struct vlv_wm_state *wm_state = &crtc->wm_state;
struct intel_plane *plane;
+ unsigned int total_rate = 0;
+ const int fifo_size = 512 - 1;
+ int fifo_extra, fifo_left = fifo_size;
- wm->sr.cursor = wm->sr.plane = 0;
+ for_each_intel_plane_on_crtc(dev, crtc, plane) {
+ struct intel_plane_state *state =
+ to_intel_plane_state(plane->base.state);
- crtc = single_enabled_crtc(dev);
- /* maxfifo not supported on pipe C */
- if (crtc && to_intel_crtc(crtc)->pipe != PIPE_C) {
- pipe = to_intel_crtc(crtc)->pipe;
- num_planes = !!wm->pipe[pipe].primary +
- !!wm->pipe[pipe].sprite[0] +
- !!wm->pipe[pipe].sprite[1];
- fifo_size = INTEL_INFO(dev_priv)->num_pipes * 512 - 1;
+ if (plane->base.type == DRM_PLANE_TYPE_CURSOR)
+ continue;
+
+ if (state->visible) {
+ wm_state->num_active_planes++;
+ total_rate += drm_format_plane_cpp(state->base.fb->pixel_format, 0);
+ }
}
- if (fifo_size == 0 || num_planes > 1)
- return false;
+ for_each_intel_plane_on_crtc(dev, crtc, plane) {
+ struct intel_plane_state *state =
+ to_intel_plane_state(plane->base.state);
+ unsigned int rate;
+
+ if (plane->base.type == DRM_PLANE_TYPE_CURSOR) {
+ plane->wm.fifo_size = 63;
+ continue;
+ }
+
+ if (!state->visible) {
+ plane->wm.fifo_size = 0;
+ continue;
+ }
+
+ rate = drm_format_plane_cpp(state->base.fb->pixel_format, 0);
+ plane->wm.fifo_size = fifo_size * rate / total_rate;
+ fifo_left -= plane->wm.fifo_size;
+ }
+
+ fifo_extra = DIV_ROUND_UP(fifo_left, wm_state->num_active_planes ?: 1);
- wm->sr.cursor = vlv_compute_wm(to_intel_crtc(crtc),
- to_intel_plane(crtc->cursor), 0x3f);
+ /* spread the remainder evenly */
+ for_each_intel_plane_on_crtc(dev, crtc, plane) {
+ int plane_extra;
+
+ if (fifo_left == 0)
+ break;
- list_for_each_entry(plane, &dev->mode_config.plane_list, base.head) {
if (plane->base.type == DRM_PLANE_TYPE_CURSOR)
continue;
- if (plane->pipe != pipe)
+ /* give it all to the first plane if none are active */
+ if (plane->wm.fifo_size == 0 &&
+ wm_state->num_active_planes)
continue;
- wm->sr.plane = vlv_compute_wm(to_intel_crtc(crtc),
- plane, fifo_size);
- if (wm->sr.plane != 0)
+ plane_extra = min(fifo_extra, fifo_left);
+ plane->wm.fifo_size += plane_extra;
+ fifo_left -= plane_extra;
+ }
+
+ WARN_ON(fifo_left != 0);
+}
+
+static void vlv_invert_wms(struct intel_crtc *crtc)
+{
+ struct vlv_wm_state *wm_state = &crtc->wm_state;
+ int level;
+
+ for (level = 0; level < wm_state->num_levels; level++) {
+ struct drm_device *dev = crtc->base.dev;
+ const int sr_fifo_size = INTEL_INFO(dev)->num_pipes * 512 - 1;
+ struct intel_plane *plane;
+
+ wm_state->sr[level].plane = sr_fifo_size - wm_state->sr[level].plane;
+ wm_state->sr[level].cursor = 63 - wm_state->sr[level].cursor;
+
+ for_each_intel_plane_on_crtc(dev, crtc, plane) {
+ switch (plane->base.type) {
+ int sprite;
+ case DRM_PLANE_TYPE_CURSOR:
+ wm_state->wm[level].cursor = plane->wm.fifo_size -
+ wm_state->wm[level].cursor;
+ break;
+ case DRM_PLANE_TYPE_PRIMARY:
+ wm_state->wm[level].primary = plane->wm.fifo_size -
+ wm_state->wm[level].primary;
+ break;
+ case DRM_PLANE_TYPE_OVERLAY:
+ sprite = plane->plane;
+ wm_state->wm[level].sprite[sprite] = plane->wm.fifo_size -
+ wm_state->wm[level].sprite[sprite];
+ break;
+ }
+ }
+ }
+}
+
+static void vlv_compute_wm(struct intel_crtc *crtc)
+{
+ struct drm_device *dev = crtc->base.dev;
+ struct vlv_wm_state *wm_state = &crtc->wm_state;
+ struct intel_plane *plane;
+ int sr_fifo_size = INTEL_INFO(dev)->num_pipes * 512 - 1;
+ int level;
+
+ memset(wm_state, 0, sizeof(*wm_state));
+
+ wm_state->cxsr = crtc->pipe != PIPE_C && crtc->wm.cxsr_allowed;
+ if (IS_CHERRYVIEW(dev))
+ wm_state->num_levels = CHV_WM_NUM_LEVELS;
+ else
+ wm_state->num_levels = VLV_WM_NUM_LEVELS;
+
+ wm_state->num_active_planes = 0;
+
+ vlv_compute_fifo(crtc);
+
+ if (wm_state->num_active_planes != 1)
+ wm_state->cxsr = false;
+
+ if (wm_state->cxsr) {
+ for (level = 0; level < wm_state->num_levels; level++) {
+ wm_state->sr[level].plane = sr_fifo_size;
+ wm_state->sr[level].cursor = 63;
+ }
+ }
+
+ for_each_intel_plane_on_crtc(dev, crtc, plane) {
+ struct intel_plane_state *state =
+ to_intel_plane_state(plane->base.state);
+
+ if (!state->visible)
+ continue;
+
+ /* normal watermarks */
+ for (level = 0; level < wm_state->num_levels; level++) {
+ int wm = vlv_compute_wm_level(plane, crtc, state, level);
+ int max_wm = plane->base.type == DRM_PLANE_TYPE_CURSOR ? 63 : 511;
+
+ /* hack */
+ if (WARN_ON(level == 0 && wm > max_wm))
+ wm = max_wm;
+
+ if (wm > plane->wm.fifo_size)
+ break;
+
+ switch (plane->base.type) {
+ int sprite;
+ case DRM_PLANE_TYPE_CURSOR:
+ wm_state->wm[level].cursor = wm;
+ break;
+ case DRM_PLANE_TYPE_PRIMARY:
+ wm_state->wm[level].primary = wm;
+ break;
+ case DRM_PLANE_TYPE_OVERLAY:
+ sprite = plane->plane;
+ wm_state->wm[level].sprite[sprite] = wm;
+ break;
+ }
+ }
+
+ wm_state->num_levels = level;
+
+ if (!wm_state->cxsr)
+ continue;
+
+ /* maxfifo watermarks */
+ switch (plane->base.type) {
+ int sprite, level;
+ case DRM_PLANE_TYPE_CURSOR:
+ for (level = 0; level < wm_state->num_levels; level++)
+ wm_state->sr[level].cursor =
+ wm_state->sr[level].cursor;
+ break;
+ case DRM_PLANE_TYPE_PRIMARY:
+ for (level = 0; level < wm_state->num_levels; level++)
+ wm_state->sr[level].plane =
+ min(wm_state->sr[level].plane,
+ wm_state->wm[level].primary);
+ break;
+ case DRM_PLANE_TYPE_OVERLAY:
+ sprite = plane->plane;
+ for (level = 0; level < wm_state->num_levels; level++)
+ wm_state->sr[level].plane =
+ min(wm_state->sr[level].plane,
+ wm_state->wm[level].sprite[sprite]);
break;
+ }
}
- return true;
+ /* clear any (partially) filled invalid levels */
+ for (level = wm_state->num_levels; level < CHV_WM_NUM_LEVELS; level++) {
+ memset(&wm_state->wm[level], 0, sizeof(wm_state->wm[level]));
+ memset(&wm_state->sr[level], 0, sizeof(wm_state->sr[level]));
+ }
+
+ vlv_invert_wms(crtc);
}
-static void valleyview_update_wm(struct drm_crtc *crtc)
+#define VLV_FIFO(plane, value) \
+ (((value) << DSPARB_ ## plane ## _SHIFT_VLV) & DSPARB_ ## plane ## _MASK_VLV)
+
+static void vlv_pipe_set_fifo_size(struct intel_crtc *crtc)
{
- struct drm_device *dev = crtc->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- enum pipe pipe = intel_crtc->pipe;
- bool cxsr_enabled;
- struct vlv_wm_values wm = dev_priv->wm.vlv;
+ struct drm_device *dev = crtc->base.dev;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ struct intel_plane *plane;
+ int sprite0_start = 0, sprite1_start = 0, fifo_size = 0;
- wm.ddl[pipe].primary = vlv_compute_drain_latency(crtc, crtc->primary);
- wm.pipe[pipe].primary = vlv_compute_wm(intel_crtc,
- to_intel_plane(crtc->primary),
- vlv_get_fifo_size(dev, pipe, 0));
+ for_each_intel_plane_on_crtc(dev, crtc, plane) {
+ if (plane->base.type == DRM_PLANE_TYPE_CURSOR) {
+ WARN_ON(plane->wm.fifo_size != 63);
+ continue;
+ }
- wm.ddl[pipe].cursor = vlv_compute_drain_latency(crtc, crtc->cursor);
- wm.pipe[pipe].cursor = vlv_compute_wm(intel_crtc,
- to_intel_plane(crtc->cursor),
- 0x3f);
+ if (plane->base.type == DRM_PLANE_TYPE_PRIMARY)
+ sprite0_start = plane->wm.fifo_size;
+ else if (plane->plane == 0)
+ sprite1_start = sprite0_start + plane->wm.fifo_size;
+ else
+ fifo_size = sprite1_start + plane->wm.fifo_size;
+ }
- cxsr_enabled = vlv_compute_sr_wm(dev, &wm);
+ WARN_ON(fifo_size != 512 - 1);
- if (memcmp(&wm, &dev_priv->wm.vlv, sizeof(wm)) == 0)
- return;
+ DRM_DEBUG_KMS("Pipe %c FIFO split %d / %d / %d\n",
+ pipe_name(crtc->pipe), sprite0_start,
+ sprite1_start, fifo_size);
- DRM_DEBUG_KMS("Setting FIFO watermarks - %c: plane=%d, cursor=%d, "
- "SR: plane=%d, cursor=%d\n", pipe_name(pipe),
- wm.pipe[pipe].primary, wm.pipe[pipe].cursor,
- wm.sr.plane, wm.sr.cursor);
+ switch (crtc->pipe) {
+ uint32_t dsparb, dsparb2, dsparb3;
+ case PIPE_A:
+ dsparb = I915_READ(DSPARB);
+ dsparb2 = I915_READ(DSPARB2);
- /*
- * FIXME DDR DVFS introduces massive memory latencies which
- * are not known to system agent so any deadline specified
- * by the display may not be respected. To support DDR DVFS
- * the watermark code needs to be rewritten to essentially
- * bypass deadline mechanism and rely solely on the
- * watermarks. For now disable DDR DVFS.
- */
- if (IS_CHERRYVIEW(dev_priv))
- chv_set_memory_dvfs(dev_priv, false);
+ dsparb &= ~(VLV_FIFO(SPRITEA, 0xff) |
+ VLV_FIFO(SPRITEB, 0xff));
+ dsparb |= (VLV_FIFO(SPRITEA, sprite0_start) |
+ VLV_FIFO(SPRITEB, sprite1_start));
- if (!cxsr_enabled)
- intel_set_memory_cxsr(dev_priv, false);
+ dsparb2 &= ~(VLV_FIFO(SPRITEA_HI, 0x1) |
+ VLV_FIFO(SPRITEB_HI, 0x1));
+ dsparb2 |= (VLV_FIFO(SPRITEA_HI, sprite0_start >> 8) |
+ VLV_FIFO(SPRITEB_HI, sprite1_start >> 8));
- vlv_write_wm_values(intel_crtc, &wm);
+ I915_WRITE(DSPARB, dsparb);
+ I915_WRITE(DSPARB2, dsparb2);
+ break;
+ case PIPE_B:
+ dsparb = I915_READ(DSPARB);
+ dsparb2 = I915_READ(DSPARB2);
- if (cxsr_enabled)
- intel_set_memory_cxsr(dev_priv, true);
+ dsparb &= ~(VLV_FIFO(SPRITEC, 0xff) |
+ VLV_FIFO(SPRITED, 0xff));
+ dsparb |= (VLV_FIFO(SPRITEC, sprite0_start) |
+ VLV_FIFO(SPRITED, sprite1_start));
+
+ dsparb2 &= ~(VLV_FIFO(SPRITEC_HI, 0xff) |
+ VLV_FIFO(SPRITED_HI, 0xff));
+ dsparb2 |= (VLV_FIFO(SPRITEC_HI, sprite0_start >> 8) |
+ VLV_FIFO(SPRITED_HI, sprite1_start >> 8));
+
+ I915_WRITE(DSPARB, dsparb);
+ I915_WRITE(DSPARB2, dsparb2);
+ break;
+ case PIPE_C:
+ dsparb3 = I915_READ(DSPARB3);
+ dsparb2 = I915_READ(DSPARB2);
+
+ dsparb3 &= ~(VLV_FIFO(SPRITEE, 0xff) |
+ VLV_FIFO(SPRITEF, 0xff));
+ dsparb3 |= (VLV_FIFO(SPRITEE, sprite0_start) |
+ VLV_FIFO(SPRITEF, sprite1_start));
+
+ dsparb2 &= ~(VLV_FIFO(SPRITEE_HI, 0xff) |
+ VLV_FIFO(SPRITEF_HI, 0xff));
+ dsparb2 |= (VLV_FIFO(SPRITEE_HI, sprite0_start >> 8) |
+ VLV_FIFO(SPRITEF_HI, sprite1_start >> 8));
+
+ I915_WRITE(DSPARB3, dsparb3);
+ I915_WRITE(DSPARB2, dsparb2);
+ break;
+ default:
+ break;
+ }
}
-static void valleyview_update_sprite_wm(struct drm_plane *plane,
- struct drm_crtc *crtc,
- uint32_t sprite_width,
- uint32_t sprite_height,
- int pixel_size,
- bool enabled, bool scaled)
+#undef VLV_FIFO
+
+static void vlv_merge_wm(struct drm_device *dev,
+ struct vlv_wm_values *wm)
+{
+ struct intel_crtc *crtc;
+ int num_active_crtcs = 0;
+
+ if (IS_CHERRYVIEW(dev))
+ wm->level = VLV_WM_LEVEL_DDR_DVFS;
+ else
+ wm->level = VLV_WM_LEVEL_PM2;
+ wm->cxsr = true;
+
+ for_each_intel_crtc(dev, crtc) {
+ const struct vlv_wm_state *wm_state = &crtc->wm_state;
+
+ if (!crtc->active)
+ continue;
+
+ if (!wm_state->cxsr)
+ wm->cxsr = false;
+
+ num_active_crtcs++;
+ wm->level = min_t(int, wm->level, wm_state->num_levels - 1);
+ }
+
+ if (num_active_crtcs != 1)
+ wm->cxsr = false;
+
+ if (num_active_crtcs > 1)
+ wm->level = VLV_WM_LEVEL_PM2;
+
+ for_each_intel_crtc(dev, crtc) {
+ struct vlv_wm_state *wm_state = &crtc->wm_state;
+ enum pipe pipe = crtc->pipe;
+
+ if (!crtc->active)
+ continue;
+
+ wm->pipe[pipe] = wm_state->wm[wm->level];
+ if (wm->cxsr)
+ wm->sr = wm_state->sr[wm->level];
+
+ wm->ddl[pipe].primary = DDL_PRECISION_HIGH | 2;
+ wm->ddl[pipe].sprite[0] = DDL_PRECISION_HIGH | 2;
+ wm->ddl[pipe].sprite[1] = DDL_PRECISION_HIGH | 2;
+ wm->ddl[pipe].cursor = DDL_PRECISION_HIGH | 2;
+ }
+}
+
+static void vlv_update_wm(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
enum pipe pipe = intel_crtc->pipe;
- int sprite = to_intel_plane(plane)->plane;
- bool cxsr_enabled;
- struct vlv_wm_values wm = dev_priv->wm.vlv;
+ struct vlv_wm_values wm = {};
- if (enabled) {
- wm.ddl[pipe].sprite[sprite] =
- vlv_compute_drain_latency(crtc, plane);
+ vlv_compute_wm(intel_crtc);
+ vlv_merge_wm(dev, &wm);
- wm.pipe[pipe].sprite[sprite] =
- vlv_compute_wm(intel_crtc,
- to_intel_plane(plane),
- vlv_get_fifo_size(dev, pipe, sprite+1));
- } else {
- wm.ddl[pipe].sprite[sprite] = 0;
- wm.pipe[pipe].sprite[sprite] = 0;
+ if (memcmp(&dev_priv->wm.vlv, &wm, sizeof(wm)) == 0) {
+ /* FIXME should be part of crtc atomic commit */
+ vlv_pipe_set_fifo_size(intel_crtc);
+ return;
}
- cxsr_enabled = vlv_compute_sr_wm(dev, &wm);
-
- if (memcmp(&wm, &dev_priv->wm.vlv, sizeof(wm)) == 0)
- return;
+ if (wm.level < VLV_WM_LEVEL_DDR_DVFS &&
+ dev_priv->wm.vlv.level >= VLV_WM_LEVEL_DDR_DVFS)
+ chv_set_memory_dvfs(dev_priv, false);
- DRM_DEBUG_KMS("Setting FIFO watermarks - %c: sprite %c=%d, "
- "SR: plane=%d, cursor=%d\n", pipe_name(pipe),
- sprite_name(pipe, sprite),
- wm.pipe[pipe].sprite[sprite],
- wm.sr.plane, wm.sr.cursor);
+ if (wm.level < VLV_WM_LEVEL_PM5 &&
+ dev_priv->wm.vlv.level >= VLV_WM_LEVEL_PM5)
+ chv_set_memory_pm5(dev_priv, false);
- if (!cxsr_enabled)
+ if (!wm.cxsr && dev_priv->wm.vlv.cxsr)
intel_set_memory_cxsr(dev_priv, false);
+ /* FIXME should be part of crtc atomic commit */
+ vlv_pipe_set_fifo_size(intel_crtc);
+
vlv_write_wm_values(intel_crtc, &wm);
- if (cxsr_enabled)
+ DRM_DEBUG_KMS("Setting FIFO watermarks - %c: plane=%d, cursor=%d, "
+ "sprite0=%d, sprite1=%d, SR: plane=%d, cursor=%d level=%d cxsr=%d\n",
+ pipe_name(pipe), wm.pipe[pipe].primary, wm.pipe[pipe].cursor,
+ wm.pipe[pipe].sprite[0], wm.pipe[pipe].sprite[1],
+ wm.sr.plane, wm.sr.cursor, wm.level, wm.cxsr);
+
+ if (wm.cxsr && !dev_priv->wm.vlv.cxsr)
intel_set_memory_cxsr(dev_priv, true);
+
+ if (wm.level >= VLV_WM_LEVEL_PM5 &&
+ dev_priv->wm.vlv.level < VLV_WM_LEVEL_PM5)
+ chv_set_memory_pm5(dev_priv, true);
+
+ if (wm.level >= VLV_WM_LEVEL_DDR_DVFS &&
+ dev_priv->wm.vlv.level < VLV_WM_LEVEL_DDR_DVFS)
+ chv_set_memory_dvfs(dev_priv, true);
+
+ dev_priv->wm.vlv = wm;
}
#define single_plane_enabled(mask) is_power_of_2(mask)
@@ -1434,23 +1712,22 @@ static void i845_update_wm(struct drm_crtc *unused_crtc)
I915_WRITE(FW_BLC, fwater_lo);
}
-static uint32_t ilk_pipe_pixel_rate(struct drm_device *dev,
- struct drm_crtc *crtc)
+uint32_t ilk_pipe_pixel_rate(const struct intel_crtc_state *pipe_config)
{
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
uint32_t pixel_rate;
- pixel_rate = intel_crtc->config->base.adjusted_mode.crtc_clock;
+ pixel_rate = pipe_config->base.adjusted_mode.crtc_clock;
/* We only use IF-ID interlacing. If we ever use PF-ID we'll need to
* adjust the pixel_rate here. */
- if (intel_crtc->config->pch_pfit.enabled) {
+ if (pipe_config->pch_pfit.enabled) {
uint64_t pipe_w, pipe_h, pfit_w, pfit_h;
- uint32_t pfit_size = intel_crtc->config->pch_pfit.size;
+ uint32_t pfit_size = pipe_config->pch_pfit.size;
+
+ pipe_w = pipe_config->pipe_src_w;
+ pipe_h = pipe_config->pipe_src_h;
- pipe_w = intel_crtc->config->pipe_src_w;
- pipe_h = intel_crtc->config->pipe_src_h;
pfit_w = (pfit_size >> 16) & 0xFFFF;
pfit_h = pfit_size & 0xFFFF;
if (pipe_w < pfit_w)
@@ -1815,7 +2092,7 @@ hsw_compute_linetime_wm(struct drm_device *dev, struct drm_crtc *crtc)
linetime = DIV_ROUND_CLOSEST(mode->crtc_htotal * 1000 * 8,
mode->crtc_clock);
ips_linetime = DIV_ROUND_CLOSEST(mode->crtc_htotal * 1000 * 8,
- dev_priv->display.get_display_clock_speed(dev_priv->dev));
+ dev_priv->cdclk_freq);
return PIPE_WM_LINETIME_IPS_LINETIME(ips_linetime) |
PIPE_WM_LINETIME_TIME(linetime);
@@ -2066,7 +2343,7 @@ static void ilk_compute_wm_parameters(struct drm_crtc *crtc,
p->active = true;
p->pipe_htotal = intel_crtc->config->base.adjusted_mode.crtc_htotal;
- p->pixel_rate = ilk_pipe_pixel_rate(dev, crtc);
+ p->pixel_rate = ilk_pipe_pixel_rate(intel_crtc->config);
if (crtc->primary->state->fb)
p->pri.bytes_per_pixel =
@@ -2085,7 +2362,7 @@ static void ilk_compute_wm_parameters(struct drm_crtc *crtc,
p->pri.horiz_pixels = intel_crtc->config->pipe_src_w;
p->cur.horiz_pixels = intel_crtc->base.cursor->state->crtc_w;
- drm_for_each_legacy_plane(plane, &dev->mode_config.plane_list) {
+ drm_for_each_legacy_plane(plane, dev) {
struct intel_plane *intel_plane = to_intel_plane(plane);
if (intel_plane->pipe == pipe) {
@@ -2215,6 +2492,7 @@ static void ilk_wm_merge(struct drm_device *dev,
const struct ilk_wm_maximums *max,
struct intel_pipe_wm *merged)
{
+ struct drm_i915_private *dev_priv = dev->dev_private;
int level, max_level = ilk_wm_max_level(dev);
int last_enabled_level = max_level;
@@ -2255,7 +2533,8 @@ static void ilk_wm_merge(struct drm_device *dev,
* What we should check here is whether FBC can be
* enabled sometime later.
*/
- if (IS_GEN5(dev) && !merged->fbc_wm_enabled && intel_fbc_enabled(dev)) {
+ if (IS_GEN5(dev) && !merged->fbc_wm_enabled &&
+ intel_fbc_enabled(dev_priv)) {
for (level = 2; level <= max_level; level++) {
struct intel_wm_level *wm = &merged->wm[level];
@@ -3043,8 +3322,10 @@ skl_compute_linetime_wm(struct drm_crtc *crtc, struct skl_pipe_wm_parameters *p)
if (!to_intel_crtc(crtc)->active)
return 0;
- return DIV_ROUND_UP(8 * p->pipe_htotal * 1000, p->pixel_rate);
+ if (WARN_ON(p->pixel_rate == 0))
+ return 0;
+ return DIV_ROUND_UP(8 * p->pipe_htotal * 1000, p->pixel_rate);
}
static void skl_compute_transition_wm(struct drm_crtc *crtc,
@@ -3685,6 +3966,139 @@ static void ilk_pipe_wm_get_hw_state(struct drm_crtc *crtc)
}
}
+#define _FW_WM(value, plane) \
+ (((value) & DSPFW_ ## plane ## _MASK) >> DSPFW_ ## plane ## _SHIFT)
+#define _FW_WM_VLV(value, plane) \
+ (((value) & DSPFW_ ## plane ## _MASK_VLV) >> DSPFW_ ## plane ## _SHIFT)
+
+static void vlv_read_wm_values(struct drm_i915_private *dev_priv,
+ struct vlv_wm_values *wm)
+{
+ enum pipe pipe;
+ uint32_t tmp;
+
+ for_each_pipe(dev_priv, pipe) {
+ tmp = I915_READ(VLV_DDL(pipe));
+
+ wm->ddl[pipe].primary =
+ (tmp >> DDL_PLANE_SHIFT) & (DDL_PRECISION_HIGH | DRAIN_LATENCY_MASK);
+ wm->ddl[pipe].cursor =
+ (tmp >> DDL_CURSOR_SHIFT) & (DDL_PRECISION_HIGH | DRAIN_LATENCY_MASK);
+ wm->ddl[pipe].sprite[0] =
+ (tmp >> DDL_SPRITE_SHIFT(0)) & (DDL_PRECISION_HIGH | DRAIN_LATENCY_MASK);
+ wm->ddl[pipe].sprite[1] =
+ (tmp >> DDL_SPRITE_SHIFT(1)) & (DDL_PRECISION_HIGH | DRAIN_LATENCY_MASK);
+ }
+
+ tmp = I915_READ(DSPFW1);
+ wm->sr.plane = _FW_WM(tmp, SR);
+ wm->pipe[PIPE_B].cursor = _FW_WM(tmp, CURSORB);
+ wm->pipe[PIPE_B].primary = _FW_WM_VLV(tmp, PLANEB);
+ wm->pipe[PIPE_A].primary = _FW_WM_VLV(tmp, PLANEA);
+
+ tmp = I915_READ(DSPFW2);
+ wm->pipe[PIPE_A].sprite[1] = _FW_WM_VLV(tmp, SPRITEB);
+ wm->pipe[PIPE_A].cursor = _FW_WM(tmp, CURSORA);
+ wm->pipe[PIPE_A].sprite[0] = _FW_WM_VLV(tmp, SPRITEA);
+
+ tmp = I915_READ(DSPFW3);
+ wm->sr.cursor = _FW_WM(tmp, CURSOR_SR);
+
+ if (IS_CHERRYVIEW(dev_priv)) {
+ tmp = I915_READ(DSPFW7_CHV);
+ wm->pipe[PIPE_B].sprite[1] = _FW_WM_VLV(tmp, SPRITED);
+ wm->pipe[PIPE_B].sprite[0] = _FW_WM_VLV(tmp, SPRITEC);
+
+ tmp = I915_READ(DSPFW8_CHV);
+ wm->pipe[PIPE_C].sprite[1] = _FW_WM_VLV(tmp, SPRITEF);
+ wm->pipe[PIPE_C].sprite[0] = _FW_WM_VLV(tmp, SPRITEE);
+
+ tmp = I915_READ(DSPFW9_CHV);
+ wm->pipe[PIPE_C].primary = _FW_WM_VLV(tmp, PLANEC);
+ wm->pipe[PIPE_C].cursor = _FW_WM(tmp, CURSORC);
+
+ tmp = I915_READ(DSPHOWM);
+ wm->sr.plane |= _FW_WM(tmp, SR_HI) << 9;
+ wm->pipe[PIPE_C].sprite[1] |= _FW_WM(tmp, SPRITEF_HI) << 8;
+ wm->pipe[PIPE_C].sprite[0] |= _FW_WM(tmp, SPRITEE_HI) << 8;
+ wm->pipe[PIPE_C].primary |= _FW_WM(tmp, PLANEC_HI) << 8;
+ wm->pipe[PIPE_B].sprite[1] |= _FW_WM(tmp, SPRITED_HI) << 8;
+ wm->pipe[PIPE_B].sprite[0] |= _FW_WM(tmp, SPRITEC_HI) << 8;
+ wm->pipe[PIPE_B].primary |= _FW_WM(tmp, PLANEB_HI) << 8;
+ wm->pipe[PIPE_A].sprite[1] |= _FW_WM(tmp, SPRITEB_HI) << 8;
+ wm->pipe[PIPE_A].sprite[0] |= _FW_WM(tmp, SPRITEA_HI) << 8;
+ wm->pipe[PIPE_A].primary |= _FW_WM(tmp, PLANEA_HI) << 8;
+ } else {
+ tmp = I915_READ(DSPFW7);
+ wm->pipe[PIPE_B].sprite[1] = _FW_WM_VLV(tmp, SPRITED);
+ wm->pipe[PIPE_B].sprite[0] = _FW_WM_VLV(tmp, SPRITEC);
+
+ tmp = I915_READ(DSPHOWM);
+ wm->sr.plane |= _FW_WM(tmp, SR_HI) << 9;
+ wm->pipe[PIPE_B].sprite[1] |= _FW_WM(tmp, SPRITED_HI) << 8;
+ wm->pipe[PIPE_B].sprite[0] |= _FW_WM(tmp, SPRITEC_HI) << 8;
+ wm->pipe[PIPE_B].primary |= _FW_WM(tmp, PLANEB_HI) << 8;
+ wm->pipe[PIPE_A].sprite[1] |= _FW_WM(tmp, SPRITEB_HI) << 8;
+ wm->pipe[PIPE_A].sprite[0] |= _FW_WM(tmp, SPRITEA_HI) << 8;
+ wm->pipe[PIPE_A].primary |= _FW_WM(tmp, PLANEA_HI) << 8;
+ }
+}
+
+#undef _FW_WM
+#undef _FW_WM_VLV
+
+void vlv_wm_get_hw_state(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ struct vlv_wm_values *wm = &dev_priv->wm.vlv;
+ struct intel_plane *plane;
+ enum pipe pipe;
+ u32 val;
+
+ vlv_read_wm_values(dev_priv, wm);
+
+ for_each_intel_plane(dev, plane) {
+ switch (plane->base.type) {
+ int sprite;
+ case DRM_PLANE_TYPE_CURSOR:
+ plane->wm.fifo_size = 63;
+ break;
+ case DRM_PLANE_TYPE_PRIMARY:
+ plane->wm.fifo_size = vlv_get_fifo_size(dev, plane->pipe, 0);
+ break;
+ case DRM_PLANE_TYPE_OVERLAY:
+ sprite = plane->plane;
+ plane->wm.fifo_size = vlv_get_fifo_size(dev, plane->pipe, sprite + 1);
+ break;
+ }
+ }
+
+ wm->cxsr = I915_READ(FW_BLC_SELF_VLV) & FW_CSPWRDWNEN;
+ wm->level = VLV_WM_LEVEL_PM2;
+
+ if (IS_CHERRYVIEW(dev_priv)) {
+ mutex_lock(&dev_priv->rps.hw_lock);
+
+ val = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ);
+ if (val & DSP_MAXFIFO_PM5_ENABLE)
+ wm->level = VLV_WM_LEVEL_PM5;
+
+ val = vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2);
+ if ((val & FORCE_DDR_HIGH_FREQ) == 0)
+ wm->level = VLV_WM_LEVEL_DDR_DVFS;
+
+ mutex_unlock(&dev_priv->rps.hw_lock);
+ }
+
+ for_each_pipe(dev_priv, pipe)
+ DRM_DEBUG_KMS("Initial watermarks: pipe %c, plane=%d, cursor=%d, sprite0=%d, sprite1=%d\n",
+ pipe_name(pipe), wm->pipe[pipe].primary, wm->pipe[pipe].cursor,
+ wm->pipe[pipe].sprite[0], wm->pipe[pipe].sprite[1]);
+
+ DRM_DEBUG_KMS("Initial watermarks: SR plane=%d, SR cursor=%d level=%d cxsr=%d\n",
+ wm->sr.plane, wm->sr.cursor, wm->level, wm->cxsr);
+}
+
void ilk_wm_get_hw_state(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -4083,14 +4497,14 @@ static void valleyview_set_rps(struct drm_device *dev, u8 val)
"Odd GPU freq value\n"))
val &= ~1;
+ I915_WRITE(GEN6_PMINTRMSK, gen6_rps_pm_mask(dev_priv, val));
+
if (val != dev_priv->rps.cur_freq) {
vlv_punit_write(dev_priv, PUNIT_REG_GPU_FREQ_REQ, val);
if (!IS_CHERRYVIEW(dev_priv))
gen6_set_rps_thresholds(dev_priv, val);
}
- I915_WRITE(GEN6_PMINTRMSK, gen6_rps_pm_mask(dev_priv, val));
-
dev_priv->rps.cur_freq = val;
trace_intel_gpu_freq_change(intel_gpu_freq(dev_priv, val));
}
@@ -4250,12 +4664,8 @@ static void intel_print_rc6_info(struct drm_device *dev, u32 mode)
static int sanitize_rc6_option(const struct drm_device *dev, int enable_rc6)
{
- /* No RC6 before Ironlake */
- if (INTEL_INFO(dev)->gen < 5)
- return 0;
-
- /* RC6 is only on Ironlake mobile not on desktop */
- if (INTEL_INFO(dev)->gen == 5 && !IS_IRONLAKE_M(dev))
+ /* No RC6 before Ironlake and code is gone for ilk. */
+ if (INTEL_INFO(dev)->gen < 6)
return 0;
/* Respect the kernel parameter if it is set */
@@ -4275,10 +4685,6 @@ static int sanitize_rc6_option(const struct drm_device *dev, int enable_rc6)
return enable_rc6 & mask;
}
- /* Disable RC6 on Ironlake */
- if (INTEL_INFO(dev)->gen == 5)
- return 0;
-
if (IS_IVYBRIDGE(dev))
return (INTEL_RC6_ENABLE | INTEL_RC6p_ENABLE);
@@ -4297,25 +4703,26 @@ static void gen6_init_rps_frequencies(struct drm_device *dev)
u32 ddcc_status = 0;
int ret;
- rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
/* All of these values are in units of 50MHz */
dev_priv->rps.cur_freq = 0;
/* static values from HW: RP0 > RP1 > RPn (min_freq) */
- dev_priv->rps.rp0_freq = (rp_state_cap >> 0) & 0xff;
- dev_priv->rps.rp1_freq = (rp_state_cap >> 8) & 0xff;
- dev_priv->rps.min_freq = (rp_state_cap >> 16) & 0xff;
- if (IS_SKYLAKE(dev)) {
- /* Store the frequency values in 16.66 MHZ units, which is
- the natural hardware unit for SKL */
- dev_priv->rps.rp0_freq *= GEN9_FREQ_SCALER;
- dev_priv->rps.rp1_freq *= GEN9_FREQ_SCALER;
- dev_priv->rps.min_freq *= GEN9_FREQ_SCALER;
+ if (IS_BROXTON(dev)) {
+ rp_state_cap = I915_READ(BXT_RP_STATE_CAP);
+ dev_priv->rps.rp0_freq = (rp_state_cap >> 16) & 0xff;
+ dev_priv->rps.rp1_freq = (rp_state_cap >> 8) & 0xff;
+ dev_priv->rps.min_freq = (rp_state_cap >> 0) & 0xff;
+ } else {
+ rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
+ dev_priv->rps.rp0_freq = (rp_state_cap >> 0) & 0xff;
+ dev_priv->rps.rp1_freq = (rp_state_cap >> 8) & 0xff;
+ dev_priv->rps.min_freq = (rp_state_cap >> 16) & 0xff;
}
+
/* hw_max = RP0 until we check for overclocking */
dev_priv->rps.max_freq = dev_priv->rps.rp0_freq;
dev_priv->rps.efficient_freq = dev_priv->rps.rp1_freq;
- if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
+ if (IS_HASWELL(dev) || IS_BROADWELL(dev) || IS_SKYLAKE(dev)) {
ret = sandybridge_pcode_read(dev_priv,
HSW_PCODE_DYNAMIC_DUTY_CYCLE_CONTROL,
&ddcc_status);
@@ -4327,6 +4734,16 @@ static void gen6_init_rps_frequencies(struct drm_device *dev)
dev_priv->rps.max_freq);
}
+ if (IS_SKYLAKE(dev)) {
+ /* Store the frequency values in 16.66 MHZ units, which is
+ the natural hardware unit for SKL */
+ dev_priv->rps.rp0_freq *= GEN9_FREQ_SCALER;
+ dev_priv->rps.rp1_freq *= GEN9_FREQ_SCALER;
+ dev_priv->rps.min_freq *= GEN9_FREQ_SCALER;
+ dev_priv->rps.max_freq *= GEN9_FREQ_SCALER;
+ dev_priv->rps.efficient_freq *= GEN9_FREQ_SCALER;
+ }
+
dev_priv->rps.idle_freq = dev_priv->rps.min_freq;
/* Preserve min/max settings in case of re-init */
@@ -4619,6 +5036,7 @@ static void __gen6_update_ring_freq(struct drm_device *dev)
int min_freq = 15;
unsigned int gpu_freq;
unsigned int max_ia_freq, min_ring_freq;
+ unsigned int max_gpu_freq, min_gpu_freq;
int scaling_factor = 180;
struct cpufreq_policy *policy;
@@ -4643,17 +5061,31 @@ static void __gen6_update_ring_freq(struct drm_device *dev)
/* convert DDR frequency from units of 266.6MHz to bandwidth */
min_ring_freq = mult_frac(min_ring_freq, 8, 3);
+ if (IS_SKYLAKE(dev)) {
+ /* Convert GT frequency to 50 HZ units */
+ min_gpu_freq = dev_priv->rps.min_freq / GEN9_FREQ_SCALER;
+ max_gpu_freq = dev_priv->rps.max_freq / GEN9_FREQ_SCALER;
+ } else {
+ min_gpu_freq = dev_priv->rps.min_freq;
+ max_gpu_freq = dev_priv->rps.max_freq;
+ }
+
/*
* For each potential GPU frequency, load a ring frequency we'd like
* to use for memory access. We do this by specifying the IA frequency
* the PCU should use as a reference to determine the ring frequency.
*/
- for (gpu_freq = dev_priv->rps.max_freq; gpu_freq >= dev_priv->rps.min_freq;
- gpu_freq--) {
- int diff = dev_priv->rps.max_freq - gpu_freq;
+ for (gpu_freq = max_gpu_freq; gpu_freq >= min_gpu_freq; gpu_freq--) {
+ int diff = max_gpu_freq - gpu_freq;
unsigned int ia_freq = 0, ring_freq = 0;
- if (INTEL_INFO(dev)->gen >= 8) {
+ if (IS_SKYLAKE(dev)) {
+ /*
+ * ring_freq = 2 * GT. ring_freq is in 100MHz units
+ * No floor required for ring frequency on SKL.
+ */
+ ring_freq = gpu_freq;
+ } else if (INTEL_INFO(dev)->gen >= 8) {
/* max(2 * GT, DDR). NB: GT is 50MHz units */
ring_freq = max(min_ring_freq, gpu_freq);
} else if (IS_HASWELL(dev)) {
@@ -4687,7 +5119,7 @@ void gen6_update_ring_freq(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- if (INTEL_INFO(dev)->gen < 6 || IS_VALLEYVIEW(dev))
+ if (!HAS_CORE_RING_FREQ(dev))
return;
mutex_lock(&dev_priv->rps.hw_lock);
@@ -5802,7 +6234,8 @@ static void intel_gen6_powersave_work(struct work_struct *work)
} else if (INTEL_INFO(dev)->gen >= 9) {
gen9_enable_rc6(dev);
gen9_enable_rps(dev);
- __gen6_update_ring_freq(dev);
+ if (IS_SKYLAKE(dev))
+ __gen6_update_ring_freq(dev);
} else if (IS_BROADWELL(dev)) {
gen8_enable_rps(dev);
__gen6_update_ring_freq(dev);
@@ -6686,13 +7119,15 @@ void intel_init_pm(struct drm_device *dev)
else if (INTEL_INFO(dev)->gen == 8)
dev_priv->display.init_clock_gating = broadwell_init_clock_gating;
} else if (IS_CHERRYVIEW(dev)) {
- dev_priv->display.update_wm = valleyview_update_wm;
- dev_priv->display.update_sprite_wm = valleyview_update_sprite_wm;
+ vlv_setup_wm_latency(dev);
+
+ dev_priv->display.update_wm = vlv_update_wm;
dev_priv->display.init_clock_gating =
cherryview_init_clock_gating;
} else if (IS_VALLEYVIEW(dev)) {
- dev_priv->display.update_wm = valleyview_update_wm;
- dev_priv->display.update_sprite_wm = valleyview_update_sprite_wm;
+ vlv_setup_wm_latency(dev);
+
+ dev_priv->display.update_wm = vlv_update_wm;
dev_priv->display.init_clock_gating =
valleyview_init_clock_gating;
} else if (IS_PINEVIEW(dev)) {
diff --git a/drivers/gpu/drm/i915/intel_psr.c b/drivers/gpu/drm/i915/intel_psr.c
index 5ee0fa57ed19..a04b4dc5ed9b 100644
--- a/drivers/gpu/drm/i915/intel_psr.c
+++ b/drivers/gpu/drm/i915/intel_psr.c
@@ -254,10 +254,13 @@ static void hsw_psr_enable_source(struct intel_dp *intel_dp)
uint32_t max_sleep_time = 0x1f;
/* Lately it was identified that depending on panel idle frame count
* calculated at HW can be off by 1. So let's use what came
- * from VBT + 1 and at minimum 2 to be on the safe side.
+ * from VBT + 1.
+ * There are also other cases where panel demands at least 4
+ * but VBT is not being set. To cover these 2 cases lets use
+ * at least 5 when VBT isn't set to be on the safest side.
*/
uint32_t idle_frames = dev_priv->vbt.psr.idle_frames ?
- dev_priv->vbt.psr.idle_frames + 1 : 2;
+ dev_priv->vbt.psr.idle_frames + 1 : 5;
uint32_t val = 0x0;
const uint32_t link_entry_time = EDP_PSR_MIN_LINK_ENTRY_TIME_8_LINES;
@@ -400,7 +403,7 @@ void intel_psr_enable(struct intel_dp *intel_dp)
/* Avoid continuous PSR exit by masking memup and hpd */
I915_WRITE(EDP_PSR_DEBUG_CTL(dev), EDP_PSR_DEBUG_MASK_MEMUP |
- EDP_PSR_DEBUG_MASK_HPD | EDP_PSR_DEBUG_MASK_LPSP);
+ EDP_PSR_DEBUG_MASK_HPD);
/* Enable PSR on the panel */
hsw_psr_enable_sink(intel_dp);
@@ -596,13 +599,15 @@ static void intel_psr_exit(struct drm_device *dev)
/**
* intel_psr_single_frame_update - Single Frame Update
* @dev: DRM device
+ * @frontbuffer_bits: frontbuffer plane tracking bits
*
* Some platforms support a single frame update feature that is used to
* send and update only one frame on Remote Frame Buffer.
* So far it is only implemented for Valleyview and Cherryview because
* hardware requires this to be done before a page flip.
*/
-void intel_psr_single_frame_update(struct drm_device *dev)
+void intel_psr_single_frame_update(struct drm_device *dev,
+ unsigned frontbuffer_bits)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_crtc *crtc;
@@ -624,14 +629,16 @@ void intel_psr_single_frame_update(struct drm_device *dev)
crtc = dp_to_dig_port(dev_priv->psr.enabled)->base.base.crtc;
pipe = to_intel_crtc(crtc)->pipe;
- val = I915_READ(VLV_PSRCTL(pipe));
- /*
- * We need to set this bit before writing registers for a flip.
- * This bit will be self-clear when it gets to the PSR active state.
- */
- I915_WRITE(VLV_PSRCTL(pipe), val | VLV_EDP_PSR_SINGLE_FRAME_UPDATE);
+ if (frontbuffer_bits & INTEL_FRONTBUFFER_ALL_MASK(pipe)) {
+ val = I915_READ(VLV_PSRCTL(pipe));
+ /*
+ * We need to set this bit before writing registers for a flip.
+ * This bit will be self-clear when it gets to the PSR active state.
+ */
+ I915_WRITE(VLV_PSRCTL(pipe), val | VLV_EDP_PSR_SINGLE_FRAME_UPDATE);
+ }
mutex_unlock(&dev_priv->psr.lock);
}
@@ -648,7 +655,7 @@ void intel_psr_single_frame_update(struct drm_device *dev)
* Dirty frontbuffers relevant to PSR are tracked in busy_frontbuffer_bits."
*/
void intel_psr_invalidate(struct drm_device *dev,
- unsigned frontbuffer_bits)
+ unsigned frontbuffer_bits)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_crtc *crtc;
@@ -663,11 +670,12 @@ void intel_psr_invalidate(struct drm_device *dev,
crtc = dp_to_dig_port(dev_priv->psr.enabled)->base.base.crtc;
pipe = to_intel_crtc(crtc)->pipe;
- intel_psr_exit(dev);
-
frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
-
dev_priv->psr.busy_frontbuffer_bits |= frontbuffer_bits;
+
+ if (frontbuffer_bits)
+ intel_psr_exit(dev);
+
mutex_unlock(&dev_priv->psr.lock);
}
@@ -675,6 +683,7 @@ void intel_psr_invalidate(struct drm_device *dev,
* intel_psr_flush - Flush PSR
* @dev: DRM device
* @frontbuffer_bits: frontbuffer plane tracking bits
+ * @origin: which operation caused the flush
*
* Since the hardware frontbuffer tracking has gaps we need to integrate
* with the software frontbuffer tracking. This function gets called every
@@ -684,11 +693,12 @@ void intel_psr_invalidate(struct drm_device *dev,
* Dirty frontbuffers relevant to PSR are tracked in busy_frontbuffer_bits.
*/
void intel_psr_flush(struct drm_device *dev,
- unsigned frontbuffer_bits)
+ unsigned frontbuffer_bits, enum fb_op_origin origin)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_crtc *crtc;
enum pipe pipe;
+ int delay_ms = HAS_DDI(dev) ? 100 : 500;
mutex_lock(&dev_priv->psr.lock);
if (!dev_priv->psr.enabled) {
@@ -698,30 +708,33 @@ void intel_psr_flush(struct drm_device *dev,
crtc = dp_to_dig_port(dev_priv->psr.enabled)->base.base.crtc;
pipe = to_intel_crtc(crtc)->pipe;
- dev_priv->psr.busy_frontbuffer_bits &= ~frontbuffer_bits;
- /*
- * On Haswell sprite plane updates don't result in a psr invalidating
- * signal in the hardware. Which means we need to manually fake this in
- * software for all flushes, not just when we've seen a preceding
- * invalidation through frontbuffer rendering.
- */
- if (IS_HASWELL(dev) &&
- (frontbuffer_bits & INTEL_FRONTBUFFER_SPRITE(pipe)))
- intel_psr_exit(dev);
+ frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
+ dev_priv->psr.busy_frontbuffer_bits &= ~frontbuffer_bits;
- /*
- * On Valleyview and Cherryview we don't use hardware tracking so
- * any plane updates or cursor moves don't result in a PSR
- * invalidating. Which means we need to manually fake this in
- * software for all flushes, not just when we've seen a preceding
- * invalidation through frontbuffer rendering. */
- if (!HAS_DDI(dev))
- intel_psr_exit(dev);
+ if (HAS_DDI(dev)) {
+ /*
+ * By definition every flush should mean invalidate + flush,
+ * however on core platforms let's minimize the
+ * disable/re-enable so we can avoid the invalidate when flip
+ * originated the flush.
+ */
+ if (frontbuffer_bits && origin != ORIGIN_FLIP)
+ intel_psr_exit(dev);
+ } else {
+ /*
+ * On Valleyview and Cherryview we don't use hardware tracking
+ * so any plane updates or cursor moves don't result in a PSR
+ * invalidating. Which means we need to manually fake this in
+ * software for all flushes.
+ */
+ if (frontbuffer_bits)
+ intel_psr_exit(dev);
+ }
if (!dev_priv->psr.active && !dev_priv->psr.busy_frontbuffer_bits)
schedule_delayed_work(&dev_priv->psr.work,
- msecs_to_jiffies(100));
+ msecs_to_jiffies(delay_ms));
mutex_unlock(&dev_priv->psr.lock);
}
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 3817a6f00d9e..6e6b8db996ef 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -81,7 +81,7 @@ bool intel_ring_stopped(struct intel_engine_cs *ring)
return dev_priv->gpu_error.stop_rings & intel_ring_flag(ring);
}
-void __intel_ring_advance(struct intel_engine_cs *ring)
+static void __intel_ring_advance(struct intel_engine_cs *ring)
{
struct intel_ringbuffer *ringbuf = ring->buffer;
ringbuf->tail &= ringbuf->size - 1;
@@ -91,10 +91,11 @@ void __intel_ring_advance(struct intel_engine_cs *ring)
}
static int
-gen2_render_ring_flush(struct intel_engine_cs *ring,
+gen2_render_ring_flush(struct drm_i915_gem_request *req,
u32 invalidate_domains,
u32 flush_domains)
{
+ struct intel_engine_cs *ring = req->ring;
u32 cmd;
int ret;
@@ -105,7 +106,7 @@ gen2_render_ring_flush(struct intel_engine_cs *ring,
if (invalidate_domains & I915_GEM_DOMAIN_SAMPLER)
cmd |= MI_READ_FLUSH;
- ret = intel_ring_begin(ring, 2);
+ ret = intel_ring_begin(req, 2);
if (ret)
return ret;
@@ -117,10 +118,11 @@ gen2_render_ring_flush(struct intel_engine_cs *ring,
}
static int
-gen4_render_ring_flush(struct intel_engine_cs *ring,
+gen4_render_ring_flush(struct drm_i915_gem_request *req,
u32 invalidate_domains,
u32 flush_domains)
{
+ struct intel_engine_cs *ring = req->ring;
struct drm_device *dev = ring->dev;
u32 cmd;
int ret;
@@ -163,7 +165,7 @@ gen4_render_ring_flush(struct intel_engine_cs *ring,
(IS_G4X(dev) || IS_GEN5(dev)))
cmd |= MI_INVALIDATE_ISP;
- ret = intel_ring_begin(ring, 2);
+ ret = intel_ring_begin(req, 2);
if (ret)
return ret;
@@ -212,13 +214,13 @@ gen4_render_ring_flush(struct intel_engine_cs *ring,
* really our business. That leaves only stall at scoreboard.
*/
static int
-intel_emit_post_sync_nonzero_flush(struct intel_engine_cs *ring)
+intel_emit_post_sync_nonzero_flush(struct drm_i915_gem_request *req)
{
+ struct intel_engine_cs *ring = req->ring;
u32 scratch_addr = ring->scratch.gtt_offset + 2 * CACHELINE_BYTES;
int ret;
-
- ret = intel_ring_begin(ring, 6);
+ ret = intel_ring_begin(req, 6);
if (ret)
return ret;
@@ -231,7 +233,7 @@ intel_emit_post_sync_nonzero_flush(struct intel_engine_cs *ring)
intel_ring_emit(ring, MI_NOOP);
intel_ring_advance(ring);
- ret = intel_ring_begin(ring, 6);
+ ret = intel_ring_begin(req, 6);
if (ret)
return ret;
@@ -247,15 +249,16 @@ intel_emit_post_sync_nonzero_flush(struct intel_engine_cs *ring)
}
static int
-gen6_render_ring_flush(struct intel_engine_cs *ring,
- u32 invalidate_domains, u32 flush_domains)
+gen6_render_ring_flush(struct drm_i915_gem_request *req,
+ u32 invalidate_domains, u32 flush_domains)
{
+ struct intel_engine_cs *ring = req->ring;
u32 flags = 0;
u32 scratch_addr = ring->scratch.gtt_offset + 2 * CACHELINE_BYTES;
int ret;
/* Force SNB workarounds for PIPE_CONTROL flushes */
- ret = intel_emit_post_sync_nonzero_flush(ring);
+ ret = intel_emit_post_sync_nonzero_flush(req);
if (ret)
return ret;
@@ -285,7 +288,7 @@ gen6_render_ring_flush(struct intel_engine_cs *ring,
flags |= PIPE_CONTROL_QW_WRITE | PIPE_CONTROL_CS_STALL;
}
- ret = intel_ring_begin(ring, 4);
+ ret = intel_ring_begin(req, 4);
if (ret)
return ret;
@@ -299,11 +302,12 @@ gen6_render_ring_flush(struct intel_engine_cs *ring,
}
static int
-gen7_render_ring_cs_stall_wa(struct intel_engine_cs *ring)
+gen7_render_ring_cs_stall_wa(struct drm_i915_gem_request *req)
{
+ struct intel_engine_cs *ring = req->ring;
int ret;
- ret = intel_ring_begin(ring, 4);
+ ret = intel_ring_begin(req, 4);
if (ret)
return ret;
@@ -318,9 +322,10 @@ gen7_render_ring_cs_stall_wa(struct intel_engine_cs *ring)
}
static int
-gen7_render_ring_flush(struct intel_engine_cs *ring,
+gen7_render_ring_flush(struct drm_i915_gem_request *req,
u32 invalidate_domains, u32 flush_domains)
{
+ struct intel_engine_cs *ring = req->ring;
u32 flags = 0;
u32 scratch_addr = ring->scratch.gtt_offset + 2 * CACHELINE_BYTES;
int ret;
@@ -362,10 +367,10 @@ gen7_render_ring_flush(struct intel_engine_cs *ring,
/* Workaround: we must issue a pipe_control with CS-stall bit
* set before a pipe_control command that has the state cache
* invalidate bit set. */
- gen7_render_ring_cs_stall_wa(ring);
+ gen7_render_ring_cs_stall_wa(req);
}
- ret = intel_ring_begin(ring, 4);
+ ret = intel_ring_begin(req, 4);
if (ret)
return ret;
@@ -379,12 +384,13 @@ gen7_render_ring_flush(struct intel_engine_cs *ring,
}
static int
-gen8_emit_pipe_control(struct intel_engine_cs *ring,
+gen8_emit_pipe_control(struct drm_i915_gem_request *req,
u32 flags, u32 scratch_addr)
{
+ struct intel_engine_cs *ring = req->ring;
int ret;
- ret = intel_ring_begin(ring, 6);
+ ret = intel_ring_begin(req, 6);
if (ret)
return ret;
@@ -400,11 +406,11 @@ gen8_emit_pipe_control(struct intel_engine_cs *ring,
}
static int
-gen8_render_ring_flush(struct intel_engine_cs *ring,
+gen8_render_ring_flush(struct drm_i915_gem_request *req,
u32 invalidate_domains, u32 flush_domains)
{
u32 flags = 0;
- u32 scratch_addr = ring->scratch.gtt_offset + 2 * CACHELINE_BYTES;
+ u32 scratch_addr = req->ring->scratch.gtt_offset + 2 * CACHELINE_BYTES;
int ret;
flags |= PIPE_CONTROL_CS_STALL;
@@ -424,7 +430,7 @@ gen8_render_ring_flush(struct intel_engine_cs *ring,
flags |= PIPE_CONTROL_GLOBAL_GTT_IVB;
/* WaCsStallBeforeStateCacheInvalidate:bdw,chv */
- ret = gen8_emit_pipe_control(ring,
+ ret = gen8_emit_pipe_control(req,
PIPE_CONTROL_CS_STALL |
PIPE_CONTROL_STALL_AT_SCOREBOARD,
0);
@@ -432,7 +438,7 @@ gen8_render_ring_flush(struct intel_engine_cs *ring,
return ret;
}
- return gen8_emit_pipe_control(ring, flags, scratch_addr);
+ return gen8_emit_pipe_control(req, flags, scratch_addr);
}
static void ring_write_tail(struct intel_engine_cs *ring,
@@ -703,10 +709,10 @@ err:
return ret;
}
-static int intel_ring_workarounds_emit(struct intel_engine_cs *ring,
- struct intel_context *ctx)
+static int intel_ring_workarounds_emit(struct drm_i915_gem_request *req)
{
int ret, i;
+ struct intel_engine_cs *ring = req->ring;
struct drm_device *dev = ring->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct i915_workarounds *w = &dev_priv->workarounds;
@@ -715,11 +721,11 @@ static int intel_ring_workarounds_emit(struct intel_engine_cs *ring,
return 0;
ring->gpu_caches_dirty = true;
- ret = intel_ring_flush_all_caches(ring);
+ ret = intel_ring_flush_all_caches(req);
if (ret)
return ret;
- ret = intel_ring_begin(ring, (w->count * 2 + 2));
+ ret = intel_ring_begin(req, (w->count * 2 + 2));
if (ret)
return ret;
@@ -733,7 +739,7 @@ static int intel_ring_workarounds_emit(struct intel_engine_cs *ring,
intel_ring_advance(ring);
ring->gpu_caches_dirty = true;
- ret = intel_ring_flush_all_caches(ring);
+ ret = intel_ring_flush_all_caches(req);
if (ret)
return ret;
@@ -742,16 +748,15 @@ static int intel_ring_workarounds_emit(struct intel_engine_cs *ring,
return 0;
}
-static int intel_rcs_ctx_init(struct intel_engine_cs *ring,
- struct intel_context *ctx)
+static int intel_rcs_ctx_init(struct drm_i915_gem_request *req)
{
int ret;
- ret = intel_ring_workarounds_emit(ring, ctx);
+ ret = intel_ring_workarounds_emit(req);
if (ret != 0)
return ret;
- ret = i915_gem_render_state_init(ring);
+ ret = i915_gem_render_state_init(req);
if (ret)
DRM_ERROR("init render state: %d\n", ret);
@@ -775,11 +780,11 @@ static int wa_add(struct drm_i915_private *dev_priv,
return 0;
}
-#define WA_REG(addr, mask, val) { \
+#define WA_REG(addr, mask, val) do { \
const int r = wa_add(dev_priv, (addr), (mask), (val)); \
if (r) \
return r; \
- }
+ } while (0)
#define WA_SET_BIT_MASKED(addr, mask) \
WA_REG(addr, (mask), _MASKED_BIT_ENABLE(mask))
@@ -800,6 +805,11 @@ static int bdw_init_workarounds(struct intel_engine_cs *ring)
struct drm_device *dev = ring->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
+ WA_SET_BIT_MASKED(INSTPM, INSTPM_FORCE_ORDERING);
+
+ /* WaDisableAsyncFlipPerfMode:bdw */
+ WA_SET_BIT_MASKED(MI_MODE, ASYNC_FLIP_PERF_DISABLE);
+
/* WaDisablePartialInstShootdown:bdw */
/* WaDisableThreadStallDopClockGating:bdw (pre-production) */
WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN,
@@ -861,6 +871,11 @@ static int chv_init_workarounds(struct intel_engine_cs *ring)
struct drm_device *dev = ring->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
+ WA_SET_BIT_MASKED(INSTPM, INSTPM_FORCE_ORDERING);
+
+ /* WaDisableAsyncFlipPerfMode:chv */
+ WA_SET_BIT_MASKED(MI_MODE, ASYNC_FLIP_PERF_DISABLE);
+
/* WaDisablePartialInstShootdown:chv */
/* WaDisableThreadStallDopClockGating:chv */
WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN,
@@ -931,8 +946,11 @@ static int gen9_init_workarounds(struct intel_engine_cs *ring)
/* WaSetDisablePixMaskCammingAndRhwoInCommonSliceChicken:skl,bxt */
WA_SET_BIT_MASKED(GEN7_COMMON_SLICE_CHICKEN1,
GEN9_RHWO_OPTIMIZATION_DISABLE);
- WA_SET_BIT_MASKED(GEN9_SLICE_COMMON_ECO_CHICKEN0,
- DISABLE_PIXEL_MASK_CAMMING);
+ /*
+ * WA also requires GEN9_SLICE_COMMON_ECO_CHICKEN0[14:14] to be set
+ * but we do that in per ctx batchbuffer as there is an issue
+ * with this register not getting restored on ctx restore
+ */
}
if ((IS_SKYLAKE(dev) && INTEL_REVID(dev) >= SKL_REVID_C0) ||
@@ -1023,13 +1041,6 @@ static int skl_init_workarounds(struct intel_engine_cs *ring)
WA_SET_BIT_MASKED(HIZ_CHICKEN,
BDW_HIZ_POWER_COMPILER_CLOCK_GATING_DISABLE);
- if (INTEL_REVID(dev) == SKL_REVID_C0 ||
- INTEL_REVID(dev) == SKL_REVID_D0)
- /* WaBarrierPerformanceFixDisable:skl */
- WA_SET_BIT_MASKED(HDC_CHICKEN0,
- HDC_FENCE_DEST_SLM_DISABLE |
- HDC_BARRIER_PERFORMANCE_DISABLE);
-
if (INTEL_REVID(dev) <= SKL_REVID_D0) {
/*
*Use Force Non-Coherent whenever executing a 3D context. This
@@ -1041,6 +1052,20 @@ static int skl_init_workarounds(struct intel_engine_cs *ring)
HDC_FORCE_NON_COHERENT);
}
+ if (INTEL_REVID(dev) == SKL_REVID_C0 ||
+ INTEL_REVID(dev) == SKL_REVID_D0)
+ /* WaBarrierPerformanceFixDisable:skl */
+ WA_SET_BIT_MASKED(HDC_CHICKEN0,
+ HDC_FENCE_DEST_SLM_DISABLE |
+ HDC_BARRIER_PERFORMANCE_DISABLE);
+
+ /* WaDisableSbeCacheDispatchPortSharing:skl */
+ if (INTEL_REVID(dev) <= SKL_REVID_F0) {
+ WA_SET_BIT_MASKED(
+ GEN7_HALF_SLICE_CHICKEN1,
+ GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE);
+ }
+
return skl_tune_iz_hashing(ring);
}
@@ -1105,9 +1130,9 @@ static int init_render_ring(struct intel_engine_cs *ring)
* to use MI_WAIT_FOR_EVENT within the CS. It should already be
* programmed to '1' on all products.
*
- * WaDisableAsyncFlipPerfMode:snb,ivb,hsw,vlv,bdw,chv
+ * WaDisableAsyncFlipPerfMode:snb,ivb,hsw,vlv
*/
- if (INTEL_INFO(dev)->gen >= 6 && INTEL_INFO(dev)->gen < 9)
+ if (INTEL_INFO(dev)->gen >= 6 && INTEL_INFO(dev)->gen < 8)
I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(ASYNC_FLIP_PERF_DISABLE));
/* Required for the hardware to program scanline values for waiting */
@@ -1132,7 +1157,7 @@ static int init_render_ring(struct intel_engine_cs *ring)
_MASKED_BIT_DISABLE(CM0_STC_EVICT_DISABLE_LRA_SNB));
}
- if (INTEL_INFO(dev)->gen >= 6)
+ if (INTEL_INFO(dev)->gen >= 6 && INTEL_INFO(dev)->gen < 8)
I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_FORCE_ORDERING));
if (HAS_L3_DPF(dev))
@@ -1155,10 +1180,11 @@ static void render_ring_cleanup(struct intel_engine_cs *ring)
intel_fini_pipe_control(ring);
}
-static int gen8_rcs_signal(struct intel_engine_cs *signaller,
+static int gen8_rcs_signal(struct drm_i915_gem_request *signaller_req,
unsigned int num_dwords)
{
#define MBOX_UPDATE_DWORDS 8
+ struct intel_engine_cs *signaller = signaller_req->ring;
struct drm_device *dev = signaller->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_engine_cs *waiter;
@@ -1168,7 +1194,7 @@ static int gen8_rcs_signal(struct intel_engine_cs *signaller,
num_dwords += (num_rings-1) * MBOX_UPDATE_DWORDS;
#undef MBOX_UPDATE_DWORDS
- ret = intel_ring_begin(signaller, num_dwords);
+ ret = intel_ring_begin(signaller_req, num_dwords);
if (ret)
return ret;
@@ -1178,8 +1204,7 @@ static int gen8_rcs_signal(struct intel_engine_cs *signaller,
if (gtt_offset == MI_SEMAPHORE_SYNC_INVALID)
continue;
- seqno = i915_gem_request_get_seqno(
- signaller->outstanding_lazy_request);
+ seqno = i915_gem_request_get_seqno(signaller_req);
intel_ring_emit(signaller, GFX_OP_PIPE_CONTROL(6));
intel_ring_emit(signaller, PIPE_CONTROL_GLOBAL_GTT_IVB |
PIPE_CONTROL_QW_WRITE |
@@ -1196,10 +1221,11 @@ static int gen8_rcs_signal(struct intel_engine_cs *signaller,
return 0;
}
-static int gen8_xcs_signal(struct intel_engine_cs *signaller,
+static int gen8_xcs_signal(struct drm_i915_gem_request *signaller_req,
unsigned int num_dwords)
{
#define MBOX_UPDATE_DWORDS 6
+ struct intel_engine_cs *signaller = signaller_req->ring;
struct drm_device *dev = signaller->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_engine_cs *waiter;
@@ -1209,7 +1235,7 @@ static int gen8_xcs_signal(struct intel_engine_cs *signaller,
num_dwords += (num_rings-1) * MBOX_UPDATE_DWORDS;
#undef MBOX_UPDATE_DWORDS
- ret = intel_ring_begin(signaller, num_dwords);
+ ret = intel_ring_begin(signaller_req, num_dwords);
if (ret)
return ret;
@@ -1219,8 +1245,7 @@ static int gen8_xcs_signal(struct intel_engine_cs *signaller,
if (gtt_offset == MI_SEMAPHORE_SYNC_INVALID)
continue;
- seqno = i915_gem_request_get_seqno(
- signaller->outstanding_lazy_request);
+ seqno = i915_gem_request_get_seqno(signaller_req);
intel_ring_emit(signaller, (MI_FLUSH_DW + 1) |
MI_FLUSH_DW_OP_STOREDW);
intel_ring_emit(signaller, lower_32_bits(gtt_offset) |
@@ -1235,9 +1260,10 @@ static int gen8_xcs_signal(struct intel_engine_cs *signaller,
return 0;
}
-static int gen6_signal(struct intel_engine_cs *signaller,
+static int gen6_signal(struct drm_i915_gem_request *signaller_req,
unsigned int num_dwords)
{
+ struct intel_engine_cs *signaller = signaller_req->ring;
struct drm_device *dev = signaller->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_engine_cs *useless;
@@ -1248,15 +1274,14 @@ static int gen6_signal(struct intel_engine_cs *signaller,
num_dwords += round_up((num_rings-1) * MBOX_UPDATE_DWORDS, 2);
#undef MBOX_UPDATE_DWORDS
- ret = intel_ring_begin(signaller, num_dwords);
+ ret = intel_ring_begin(signaller_req, num_dwords);
if (ret)
return ret;
for_each_ring(useless, dev_priv, i) {
u32 mbox_reg = signaller->semaphore.mbox.signal[i];
if (mbox_reg != GEN6_NOSYNC) {
- u32 seqno = i915_gem_request_get_seqno(
- signaller->outstanding_lazy_request);
+ u32 seqno = i915_gem_request_get_seqno(signaller_req);
intel_ring_emit(signaller, MI_LOAD_REGISTER_IMM(1));
intel_ring_emit(signaller, mbox_reg);
intel_ring_emit(signaller, seqno);
@@ -1272,30 +1297,29 @@ static int gen6_signal(struct intel_engine_cs *signaller,
/**
* gen6_add_request - Update the semaphore mailbox registers
- *
- * @ring - ring that is adding a request
- * @seqno - return seqno stuck into the ring
+ *
+ * @request - request to write to the ring
*
* Update the mailbox registers in the *other* rings with the current seqno.
* This acts like a signal in the canonical semaphore.
*/
static int
-gen6_add_request(struct intel_engine_cs *ring)
+gen6_add_request(struct drm_i915_gem_request *req)
{
+ struct intel_engine_cs *ring = req->ring;
int ret;
if (ring->semaphore.signal)
- ret = ring->semaphore.signal(ring, 4);
+ ret = ring->semaphore.signal(req, 4);
else
- ret = intel_ring_begin(ring, 4);
+ ret = intel_ring_begin(req, 4);
if (ret)
return ret;
intel_ring_emit(ring, MI_STORE_DWORD_INDEX);
intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
- intel_ring_emit(ring,
- i915_gem_request_get_seqno(ring->outstanding_lazy_request));
+ intel_ring_emit(ring, i915_gem_request_get_seqno(req));
intel_ring_emit(ring, MI_USER_INTERRUPT);
__intel_ring_advance(ring);
@@ -1318,14 +1342,15 @@ static inline bool i915_gem_has_seqno_wrapped(struct drm_device *dev,
*/
static int
-gen8_ring_sync(struct intel_engine_cs *waiter,
+gen8_ring_sync(struct drm_i915_gem_request *waiter_req,
struct intel_engine_cs *signaller,
u32 seqno)
{
+ struct intel_engine_cs *waiter = waiter_req->ring;
struct drm_i915_private *dev_priv = waiter->dev->dev_private;
int ret;
- ret = intel_ring_begin(waiter, 4);
+ ret = intel_ring_begin(waiter_req, 4);
if (ret)
return ret;
@@ -1343,10 +1368,11 @@ gen8_ring_sync(struct intel_engine_cs *waiter,
}
static int
-gen6_ring_sync(struct intel_engine_cs *waiter,
+gen6_ring_sync(struct drm_i915_gem_request *waiter_req,
struct intel_engine_cs *signaller,
u32 seqno)
{
+ struct intel_engine_cs *waiter = waiter_req->ring;
u32 dw1 = MI_SEMAPHORE_MBOX |
MI_SEMAPHORE_COMPARE |
MI_SEMAPHORE_REGISTER;
@@ -1361,7 +1387,7 @@ gen6_ring_sync(struct intel_engine_cs *waiter,
WARN_ON(wait_mbox == MI_SEMAPHORE_SYNC_INVALID);
- ret = intel_ring_begin(waiter, 4);
+ ret = intel_ring_begin(waiter_req, 4);
if (ret)
return ret;
@@ -1392,8 +1418,9 @@ do { \
} while (0)
static int
-pc_render_add_request(struct intel_engine_cs *ring)
+pc_render_add_request(struct drm_i915_gem_request *req)
{
+ struct intel_engine_cs *ring = req->ring;
u32 scratch_addr = ring->scratch.gtt_offset + 2 * CACHELINE_BYTES;
int ret;
@@ -1405,7 +1432,7 @@ pc_render_add_request(struct intel_engine_cs *ring)
* incoherence by flushing the 6 PIPE_NOTIFY buffers out to
* memory before requesting an interrupt.
*/
- ret = intel_ring_begin(ring, 32);
+ ret = intel_ring_begin(req, 32);
if (ret)
return ret;
@@ -1413,8 +1440,7 @@ pc_render_add_request(struct intel_engine_cs *ring)
PIPE_CONTROL_WRITE_FLUSH |
PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE);
intel_ring_emit(ring, ring->scratch.gtt_offset | PIPE_CONTROL_GLOBAL_GTT);
- intel_ring_emit(ring,
- i915_gem_request_get_seqno(ring->outstanding_lazy_request));
+ intel_ring_emit(ring, i915_gem_request_get_seqno(req));
intel_ring_emit(ring, 0);
PIPE_CONTROL_FLUSH(ring, scratch_addr);
scratch_addr += 2 * CACHELINE_BYTES; /* write to separate cachelines */
@@ -1433,8 +1459,7 @@ pc_render_add_request(struct intel_engine_cs *ring)
PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE |
PIPE_CONTROL_NOTIFY);
intel_ring_emit(ring, ring->scratch.gtt_offset | PIPE_CONTROL_GLOBAL_GTT);
- intel_ring_emit(ring,
- i915_gem_request_get_seqno(ring->outstanding_lazy_request));
+ intel_ring_emit(ring, i915_gem_request_get_seqno(req));
intel_ring_emit(ring, 0);
__intel_ring_advance(ring);
@@ -1585,13 +1610,14 @@ i8xx_ring_put_irq(struct intel_engine_cs *ring)
}
static int
-bsd_ring_flush(struct intel_engine_cs *ring,
+bsd_ring_flush(struct drm_i915_gem_request *req,
u32 invalidate_domains,
u32 flush_domains)
{
+ struct intel_engine_cs *ring = req->ring;
int ret;
- ret = intel_ring_begin(ring, 2);
+ ret = intel_ring_begin(req, 2);
if (ret)
return ret;
@@ -1602,18 +1628,18 @@ bsd_ring_flush(struct intel_engine_cs *ring,
}
static int
-i9xx_add_request(struct intel_engine_cs *ring)
+i9xx_add_request(struct drm_i915_gem_request *req)
{
+ struct intel_engine_cs *ring = req->ring;
int ret;
- ret = intel_ring_begin(ring, 4);
+ ret = intel_ring_begin(req, 4);
if (ret)
return ret;
intel_ring_emit(ring, MI_STORE_DWORD_INDEX);
intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
- intel_ring_emit(ring,
- i915_gem_request_get_seqno(ring->outstanding_lazy_request));
+ intel_ring_emit(ring, i915_gem_request_get_seqno(req));
intel_ring_emit(ring, MI_USER_INTERRUPT);
__intel_ring_advance(ring);
@@ -1745,13 +1771,14 @@ gen8_ring_put_irq(struct intel_engine_cs *ring)
}
static int
-i965_dispatch_execbuffer(struct intel_engine_cs *ring,
+i965_dispatch_execbuffer(struct drm_i915_gem_request *req,
u64 offset, u32 length,
unsigned dispatch_flags)
{
+ struct intel_engine_cs *ring = req->ring;
int ret;
- ret = intel_ring_begin(ring, 2);
+ ret = intel_ring_begin(req, 2);
if (ret)
return ret;
@@ -1771,14 +1798,15 @@ i965_dispatch_execbuffer(struct intel_engine_cs *ring,
#define I830_TLB_ENTRIES (2)
#define I830_WA_SIZE max(I830_TLB_ENTRIES*4096, I830_BATCH_LIMIT)
static int
-i830_dispatch_execbuffer(struct intel_engine_cs *ring,
+i830_dispatch_execbuffer(struct drm_i915_gem_request *req,
u64 offset, u32 len,
unsigned dispatch_flags)
{
+ struct intel_engine_cs *ring = req->ring;
u32 cs_offset = ring->scratch.gtt_offset;
int ret;
- ret = intel_ring_begin(ring, 6);
+ ret = intel_ring_begin(req, 6);
if (ret)
return ret;
@@ -1795,7 +1823,7 @@ i830_dispatch_execbuffer(struct intel_engine_cs *ring,
if (len > I830_BATCH_LIMIT)
return -ENOSPC;
- ret = intel_ring_begin(ring, 6 + 2);
+ ret = intel_ring_begin(req, 6 + 2);
if (ret)
return ret;
@@ -1818,7 +1846,7 @@ i830_dispatch_execbuffer(struct intel_engine_cs *ring,
offset = cs_offset;
}
- ret = intel_ring_begin(ring, 4);
+ ret = intel_ring_begin(req, 4);
if (ret)
return ret;
@@ -1833,13 +1861,14 @@ i830_dispatch_execbuffer(struct intel_engine_cs *ring,
}
static int
-i915_dispatch_execbuffer(struct intel_engine_cs *ring,
+i915_dispatch_execbuffer(struct drm_i915_gem_request *req,
u64 offset, u32 len,
unsigned dispatch_flags)
{
+ struct intel_engine_cs *ring = req->ring;
int ret;
- ret = intel_ring_begin(ring, 2);
+ ret = intel_ring_begin(req, 2);
if (ret)
return ret;
@@ -2082,7 +2111,6 @@ void intel_cleanup_ring_buffer(struct intel_engine_cs *ring)
intel_unpin_ringbuffer_obj(ringbuf);
intel_destroy_ringbuffer_obj(ringbuf);
- i915_gem_request_assign(&ring->outstanding_lazy_request, NULL);
if (ring->cleanup)
ring->cleanup(ring);
@@ -2106,6 +2134,9 @@ static int ring_wait_for_space(struct intel_engine_cs *ring, int n)
if (intel_ring_space(ringbuf) >= n)
return 0;
+ /* The whole point of reserving space is to not wait! */
+ WARN_ON(ringbuf->reserved_in_use);
+
list_for_each_entry(request, &ring->request_list, list) {
space = __intel_ring_space(request->postfix, ringbuf->tail,
ringbuf->size);
@@ -2124,18 +2155,11 @@ static int ring_wait_for_space(struct intel_engine_cs *ring, int n)
return 0;
}
-static int intel_wrap_ring_buffer(struct intel_engine_cs *ring)
+static void __wrap_ring_buffer(struct intel_ringbuffer *ringbuf)
{
uint32_t __iomem *virt;
- struct intel_ringbuffer *ringbuf = ring->buffer;
int rem = ringbuf->size - ringbuf->tail;
- if (ringbuf->space < rem) {
- int ret = ring_wait_for_space(ring, rem);
- if (ret)
- return ret;
- }
-
virt = ringbuf->virtual_start + ringbuf->tail;
rem /= 4;
while (rem--)
@@ -2143,21 +2167,11 @@ static int intel_wrap_ring_buffer(struct intel_engine_cs *ring)
ringbuf->tail = 0;
intel_ring_update_space(ringbuf);
-
- return 0;
}
int intel_ring_idle(struct intel_engine_cs *ring)
{
struct drm_i915_gem_request *req;
- int ret;
-
- /* We need to add any requests required to flush the objects and ring */
- if (ring->outstanding_lazy_request) {
- ret = i915_add_request(ring);
- if (ret)
- return ret;
- }
/* Wait upon the last request to be completed */
if (list_empty(&ring->request_list))
@@ -2180,33 +2194,126 @@ int intel_ring_alloc_request_extras(struct drm_i915_gem_request *request)
return 0;
}
-static int __intel_ring_prepare(struct intel_engine_cs *ring,
- int bytes)
+int intel_ring_reserve_space(struct drm_i915_gem_request *request)
+{
+ /*
+ * The first call merely notes the reserve request and is common for
+ * all back ends. The subsequent localised _begin() call actually
+ * ensures that the reservation is available. Without the begin, if
+ * the request creator immediately submitted the request without
+ * adding any commands to it then there might not actually be
+ * sufficient room for the submission commands.
+ */
+ intel_ring_reserved_space_reserve(request->ringbuf, MIN_SPACE_FOR_ADD_REQUEST);
+
+ return intel_ring_begin(request, 0);
+}
+
+void intel_ring_reserved_space_reserve(struct intel_ringbuffer *ringbuf, int size)
+{
+ WARN_ON(ringbuf->reserved_size);
+ WARN_ON(ringbuf->reserved_in_use);
+
+ ringbuf->reserved_size = size;
+}
+
+void intel_ring_reserved_space_cancel(struct intel_ringbuffer *ringbuf)
+{
+ WARN_ON(ringbuf->reserved_in_use);
+
+ ringbuf->reserved_size = 0;
+ ringbuf->reserved_in_use = false;
+}
+
+void intel_ring_reserved_space_use(struct intel_ringbuffer *ringbuf)
+{
+ WARN_ON(ringbuf->reserved_in_use);
+
+ ringbuf->reserved_in_use = true;
+ ringbuf->reserved_tail = ringbuf->tail;
+}
+
+void intel_ring_reserved_space_end(struct intel_ringbuffer *ringbuf)
+{
+ WARN_ON(!ringbuf->reserved_in_use);
+ if (ringbuf->tail > ringbuf->reserved_tail) {
+ WARN(ringbuf->tail > ringbuf->reserved_tail + ringbuf->reserved_size,
+ "request reserved size too small: %d vs %d!\n",
+ ringbuf->tail - ringbuf->reserved_tail, ringbuf->reserved_size);
+ } else {
+ /*
+ * The ring was wrapped while the reserved space was in use.
+ * That means that some unknown amount of the ring tail was
+ * no-op filled and skipped. Thus simply adding the ring size
+ * to the tail and doing the above space check will not work.
+ * Rather than attempt to track how much tail was skipped,
+ * it is much simpler to say that also skipping the sanity
+ * check every once in a while is not a big issue.
+ */
+ }
+
+ ringbuf->reserved_size = 0;
+ ringbuf->reserved_in_use = false;
+}
+
+static int __intel_ring_prepare(struct intel_engine_cs *ring, int bytes)
{
struct intel_ringbuffer *ringbuf = ring->buffer;
- int ret;
+ int remain_usable = ringbuf->effective_size - ringbuf->tail;
+ int remain_actual = ringbuf->size - ringbuf->tail;
+ int ret, total_bytes, wait_bytes = 0;
+ bool need_wrap = false;
- if (unlikely(ringbuf->tail + bytes > ringbuf->effective_size)) {
- ret = intel_wrap_ring_buffer(ring);
- if (unlikely(ret))
- return ret;
+ if (ringbuf->reserved_in_use)
+ total_bytes = bytes;
+ else
+ total_bytes = bytes + ringbuf->reserved_size;
+
+ if (unlikely(bytes > remain_usable)) {
+ /*
+ * Not enough space for the basic request. So need to flush
+ * out the remainder and then wait for base + reserved.
+ */
+ wait_bytes = remain_actual + total_bytes;
+ need_wrap = true;
+ } else {
+ if (unlikely(total_bytes > remain_usable)) {
+ /*
+ * The base request will fit but the reserved space
+ * falls off the end. So only need to to wait for the
+ * reserved size after flushing out the remainder.
+ */
+ wait_bytes = remain_actual + ringbuf->reserved_size;
+ need_wrap = true;
+ } else if (total_bytes > ringbuf->space) {
+ /* No wrapping required, just waiting. */
+ wait_bytes = total_bytes;
+ }
}
- if (unlikely(ringbuf->space < bytes)) {
- ret = ring_wait_for_space(ring, bytes);
+ if (wait_bytes) {
+ ret = ring_wait_for_space(ring, wait_bytes);
if (unlikely(ret))
return ret;
+
+ if (need_wrap)
+ __wrap_ring_buffer(ringbuf);
}
return 0;
}
-int intel_ring_begin(struct intel_engine_cs *ring,
+int intel_ring_begin(struct drm_i915_gem_request *req,
int num_dwords)
{
- struct drm_i915_private *dev_priv = ring->dev->dev_private;
+ struct intel_engine_cs *ring;
+ struct drm_i915_private *dev_priv;
int ret;
+ WARN_ON(req == NULL);
+ ring = req->ring;
+ dev_priv = ring->dev->dev_private;
+
ret = i915_gem_check_wedge(&dev_priv->gpu_error,
dev_priv->mm.interruptible);
if (ret)
@@ -2216,18 +2323,14 @@ int intel_ring_begin(struct intel_engine_cs *ring,
if (ret)
return ret;
- /* Preallocate the olr before touching the ring */
- ret = i915_gem_request_alloc(ring, ring->default_context);
- if (ret)
- return ret;
-
ring->buffer->space -= num_dwords * sizeof(uint32_t);
return 0;
}
/* Align the ring tail to a cacheline boundary */
-int intel_ring_cacheline_align(struct intel_engine_cs *ring)
+int intel_ring_cacheline_align(struct drm_i915_gem_request *req)
{
+ struct intel_engine_cs *ring = req->ring;
int num_dwords = (ring->buffer->tail & (CACHELINE_BYTES - 1)) / sizeof(uint32_t);
int ret;
@@ -2235,7 +2338,7 @@ int intel_ring_cacheline_align(struct intel_engine_cs *ring)
return 0;
num_dwords = CACHELINE_BYTES / sizeof(uint32_t) - num_dwords;
- ret = intel_ring_begin(ring, num_dwords);
+ ret = intel_ring_begin(req, num_dwords);
if (ret)
return ret;
@@ -2252,8 +2355,6 @@ void intel_ring_init_seqno(struct intel_engine_cs *ring, u32 seqno)
struct drm_device *dev = ring->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- BUG_ON(ring->outstanding_lazy_request);
-
if (INTEL_INFO(dev)->gen == 6 || INTEL_INFO(dev)->gen == 7) {
I915_WRITE(RING_SYNC_0(ring->mmio_base), 0);
I915_WRITE(RING_SYNC_1(ring->mmio_base), 0);
@@ -2298,13 +2399,14 @@ static void gen6_bsd_ring_write_tail(struct intel_engine_cs *ring,
_MASKED_BIT_DISABLE(GEN6_BSD_SLEEP_MSG_DISABLE));
}
-static int gen6_bsd_ring_flush(struct intel_engine_cs *ring,
+static int gen6_bsd_ring_flush(struct drm_i915_gem_request *req,
u32 invalidate, u32 flush)
{
+ struct intel_engine_cs *ring = req->ring;
uint32_t cmd;
int ret;
- ret = intel_ring_begin(ring, 4);
+ ret = intel_ring_begin(req, 4);
if (ret)
return ret;
@@ -2342,20 +2444,23 @@ static int gen6_bsd_ring_flush(struct intel_engine_cs *ring,
}
static int
-gen8_ring_dispatch_execbuffer(struct intel_engine_cs *ring,
+gen8_ring_dispatch_execbuffer(struct drm_i915_gem_request *req,
u64 offset, u32 len,
unsigned dispatch_flags)
{
+ struct intel_engine_cs *ring = req->ring;
bool ppgtt = USES_PPGTT(ring->dev) &&
!(dispatch_flags & I915_DISPATCH_SECURE);
int ret;
- ret = intel_ring_begin(ring, 4);
+ ret = intel_ring_begin(req, 4);
if (ret)
return ret;
/* FIXME(BDW): Address space and security selectors. */
- intel_ring_emit(ring, MI_BATCH_BUFFER_START_GEN8 | (ppgtt<<8));
+ intel_ring_emit(ring, MI_BATCH_BUFFER_START_GEN8 | (ppgtt<<8) |
+ (dispatch_flags & I915_DISPATCH_RS ?
+ MI_BATCH_RESOURCE_STREAMER : 0));
intel_ring_emit(ring, lower_32_bits(offset));
intel_ring_emit(ring, upper_32_bits(offset));
intel_ring_emit(ring, MI_NOOP);
@@ -2365,20 +2470,23 @@ gen8_ring_dispatch_execbuffer(struct intel_engine_cs *ring,
}
static int
-hsw_ring_dispatch_execbuffer(struct intel_engine_cs *ring,
+hsw_ring_dispatch_execbuffer(struct drm_i915_gem_request *req,
u64 offset, u32 len,
unsigned dispatch_flags)
{
+ struct intel_engine_cs *ring = req->ring;
int ret;
- ret = intel_ring_begin(ring, 2);
+ ret = intel_ring_begin(req, 2);
if (ret)
return ret;
intel_ring_emit(ring,
MI_BATCH_BUFFER_START |
(dispatch_flags & I915_DISPATCH_SECURE ?
- 0 : MI_BATCH_PPGTT_HSW | MI_BATCH_NON_SECURE_HSW));
+ 0 : MI_BATCH_PPGTT_HSW | MI_BATCH_NON_SECURE_HSW) |
+ (dispatch_flags & I915_DISPATCH_RS ?
+ MI_BATCH_RESOURCE_STREAMER : 0));
/* bit0-7 is the length on GEN6+ */
intel_ring_emit(ring, offset);
intel_ring_advance(ring);
@@ -2387,13 +2495,14 @@ hsw_ring_dispatch_execbuffer(struct intel_engine_cs *ring,
}
static int
-gen6_ring_dispatch_execbuffer(struct intel_engine_cs *ring,
+gen6_ring_dispatch_execbuffer(struct drm_i915_gem_request *req,
u64 offset, u32 len,
unsigned dispatch_flags)
{
+ struct intel_engine_cs *ring = req->ring;
int ret;
- ret = intel_ring_begin(ring, 2);
+ ret = intel_ring_begin(req, 2);
if (ret)
return ret;
@@ -2410,14 +2519,15 @@ gen6_ring_dispatch_execbuffer(struct intel_engine_cs *ring,
/* Blitter support (SandyBridge+) */
-static int gen6_ring_flush(struct intel_engine_cs *ring,
+static int gen6_ring_flush(struct drm_i915_gem_request *req,
u32 invalidate, u32 flush)
{
+ struct intel_engine_cs *ring = req->ring;
struct drm_device *dev = ring->dev;
uint32_t cmd;
int ret;
- ret = intel_ring_begin(ring, 4);
+ ret = intel_ring_begin(req, 4);
if (ret)
return ret;
@@ -2818,26 +2928,28 @@ int intel_init_vebox_ring_buffer(struct drm_device *dev)
}
int
-intel_ring_flush_all_caches(struct intel_engine_cs *ring)
+intel_ring_flush_all_caches(struct drm_i915_gem_request *req)
{
+ struct intel_engine_cs *ring = req->ring;
int ret;
if (!ring->gpu_caches_dirty)
return 0;
- ret = ring->flush(ring, 0, I915_GEM_GPU_DOMAINS);
+ ret = ring->flush(req, 0, I915_GEM_GPU_DOMAINS);
if (ret)
return ret;
- trace_i915_gem_ring_flush(ring, 0, I915_GEM_GPU_DOMAINS);
+ trace_i915_gem_ring_flush(req, 0, I915_GEM_GPU_DOMAINS);
ring->gpu_caches_dirty = false;
return 0;
}
int
-intel_ring_invalidate_all_caches(struct intel_engine_cs *ring)
+intel_ring_invalidate_all_caches(struct drm_i915_gem_request *req)
{
+ struct intel_engine_cs *ring = req->ring;
uint32_t flush_domains;
int ret;
@@ -2845,11 +2957,11 @@ intel_ring_invalidate_all_caches(struct intel_engine_cs *ring)
if (ring->gpu_caches_dirty)
flush_domains = I915_GEM_GPU_DOMAINS;
- ret = ring->flush(ring, I915_GEM_GPU_DOMAINS, flush_domains);
+ ret = ring->flush(req, I915_GEM_GPU_DOMAINS, flush_domains);
if (ret)
return ret;
- trace_i915_gem_ring_flush(ring, I915_GEM_GPU_DOMAINS, flush_domains);
+ trace_i915_gem_ring_flush(req, I915_GEM_GPU_DOMAINS, flush_domains);
ring->gpu_caches_dirty = false;
return 0;
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index e539314ae87e..2e85fda94963 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -12,6 +12,7 @@
* workarounds!
*/
#define CACHELINE_BYTES 64
+#define CACHELINE_DWORDS (CACHELINE_BYTES / sizeof(uint32_t))
/*
* Gen2 BSpec "1. Programming Environment" / 1.4.4.6 "Ring Buffer Use"
@@ -105,6 +106,9 @@ struct intel_ringbuffer {
int space;
int size;
int effective_size;
+ int reserved_size;
+ int reserved_tail;
+ bool reserved_in_use;
/** We track the position of the requests in the ring buffer, and
* when each is retired we increment last_retired_head as the GPU
@@ -120,6 +124,25 @@ struct intel_ringbuffer {
struct intel_context;
struct drm_i915_reg_descriptor;
+/*
+ * we use a single page to load ctx workarounds so all of these
+ * values are referred in terms of dwords
+ *
+ * struct i915_wa_ctx_bb:
+ * offset: specifies batch starting position, also helpful in case
+ * if we want to have multiple batches at different offsets based on
+ * some criteria. It is not a requirement at the moment but provides
+ * an option for future use.
+ * size: size of the batch in DWORDS
+ */
+struct i915_ctx_workarounds {
+ struct i915_wa_ctx_bb {
+ u32 offset;
+ u32 size;
+ } indirect_ctx, per_ctx;
+ struct drm_i915_gem_object *obj;
+};
+
struct intel_engine_cs {
const char *name;
enum intel_ring_id {
@@ -143,6 +166,7 @@ struct intel_engine_cs {
struct i915_gem_batch_pool batch_pool;
struct intel_hw_status_page status_page;
+ struct i915_ctx_workarounds wa_ctx;
unsigned irq_refcount; /* protected by dev_priv->irq_lock */
u32 irq_enable_mask; /* bitmask to enable ring interrupt */
@@ -152,15 +176,14 @@ struct intel_engine_cs {
int (*init_hw)(struct intel_engine_cs *ring);
- int (*init_context)(struct intel_engine_cs *ring,
- struct intel_context *ctx);
+ int (*init_context)(struct drm_i915_gem_request *req);
void (*write_tail)(struct intel_engine_cs *ring,
u32 value);
- int __must_check (*flush)(struct intel_engine_cs *ring,
+ int __must_check (*flush)(struct drm_i915_gem_request *req,
u32 invalidate_domains,
u32 flush_domains);
- int (*add_request)(struct intel_engine_cs *ring);
+ int (*add_request)(struct drm_i915_gem_request *req);
/* Some chipsets are not quite as coherent as advertised and need
* an expensive kick to force a true read of the up-to-date seqno.
* However, the up-to-date seqno is not always required and the last
@@ -171,11 +194,12 @@ struct intel_engine_cs {
bool lazy_coherency);
void (*set_seqno)(struct intel_engine_cs *ring,
u32 seqno);
- int (*dispatch_execbuffer)(struct intel_engine_cs *ring,
+ int (*dispatch_execbuffer)(struct drm_i915_gem_request *req,
u64 offset, u32 length,
unsigned dispatch_flags);
#define I915_DISPATCH_SECURE 0x1
#define I915_DISPATCH_PINNED 0x2
+#define I915_DISPATCH_RS 0x4
void (*cleanup)(struct intel_engine_cs *ring);
/* GEN8 signal/wait table - never trust comments!
@@ -229,10 +253,10 @@ struct intel_engine_cs {
};
/* AKA wait() */
- int (*sync_to)(struct intel_engine_cs *ring,
- struct intel_engine_cs *to,
+ int (*sync_to)(struct drm_i915_gem_request *to_req,
+ struct intel_engine_cs *from,
u32 seqno);
- int (*signal)(struct intel_engine_cs *signaller,
+ int (*signal)(struct drm_i915_gem_request *signaller_req,
/* num_dwords needed by caller */
unsigned int num_dwords);
} semaphore;
@@ -243,14 +267,11 @@ struct intel_engine_cs {
struct list_head execlist_retired_req_list;
u8 next_context_status_buffer;
u32 irq_keep_mask; /* bitmask for interrupts that should not be masked */
- int (*emit_request)(struct intel_ringbuffer *ringbuf,
- struct drm_i915_gem_request *request);
- int (*emit_flush)(struct intel_ringbuffer *ringbuf,
- struct intel_context *ctx,
+ int (*emit_request)(struct drm_i915_gem_request *request);
+ int (*emit_flush)(struct drm_i915_gem_request *request,
u32 invalidate_domains,
u32 flush_domains);
- int (*emit_bb_start)(struct intel_ringbuffer *ringbuf,
- struct intel_context *ctx,
+ int (*emit_bb_start)(struct drm_i915_gem_request *req,
u64 offset, unsigned dispatch_flags);
/**
@@ -272,9 +293,12 @@ struct intel_engine_cs {
struct list_head request_list;
/**
- * Do we have some not yet emitted requests outstanding?
+ * Seqno of request most recently submitted to request_list.
+ * Used exclusively by hang checker to avoid grabbing lock while
+ * inspecting request list.
*/
- struct drm_i915_gem_request *outstanding_lazy_request;
+ u32 last_submitted_seqno;
+
bool gpu_caches_dirty;
wait_queue_head_t irq_queue;
@@ -401,8 +425,8 @@ void intel_cleanup_ring_buffer(struct intel_engine_cs *ring);
int intel_ring_alloc_request_extras(struct drm_i915_gem_request *request);
-int __must_check intel_ring_begin(struct intel_engine_cs *ring, int n);
-int __must_check intel_ring_cacheline_align(struct intel_engine_cs *ring);
+int __must_check intel_ring_begin(struct drm_i915_gem_request *req, int n);
+int __must_check intel_ring_cacheline_align(struct drm_i915_gem_request *req);
static inline void intel_ring_emit(struct intel_engine_cs *ring,
u32 data)
{
@@ -419,12 +443,11 @@ int __intel_ring_space(int head, int tail, int size);
void intel_ring_update_space(struct intel_ringbuffer *ringbuf);
int intel_ring_space(struct intel_ringbuffer *ringbuf);
bool intel_ring_stopped(struct intel_engine_cs *ring);
-void __intel_ring_advance(struct intel_engine_cs *ring);
int __must_check intel_ring_idle(struct intel_engine_cs *ring);
void intel_ring_init_seqno(struct intel_engine_cs *ring, u32 seqno);
-int intel_ring_flush_all_caches(struct intel_engine_cs *ring);
-int intel_ring_invalidate_all_caches(struct intel_engine_cs *ring);
+int intel_ring_flush_all_caches(struct drm_i915_gem_request *req);
+int intel_ring_invalidate_all_caches(struct drm_i915_gem_request *req);
void intel_fini_pipe_control(struct intel_engine_cs *ring);
int intel_init_pipe_control(struct intel_engine_cs *ring);
@@ -444,11 +467,29 @@ static inline u32 intel_ring_get_tail(struct intel_ringbuffer *ringbuf)
return ringbuf->tail;
}
-static inline struct drm_i915_gem_request *
-intel_ring_get_request(struct intel_engine_cs *ring)
-{
- BUG_ON(ring->outstanding_lazy_request == NULL);
- return ring->outstanding_lazy_request;
-}
+/*
+ * Arbitrary size for largest possible 'add request' sequence. The code paths
+ * are complex and variable. Empirical measurement shows that the worst case
+ * is ILK at 136 words. Reserving too much is better than reserving too little
+ * as that allows for corner cases that might have been missed. So the figure
+ * has been rounded up to 160 words.
+ */
+#define MIN_SPACE_FOR_ADD_REQUEST 160
+
+/*
+ * Reserve space in the ring to guarantee that the i915_add_request() call
+ * will always have sufficient room to do its stuff. The request creation
+ * code calls this automatically.
+ */
+void intel_ring_reserved_space_reserve(struct intel_ringbuffer *ringbuf, int size);
+/* Cancel the reservation, e.g. because the request is being discarded. */
+void intel_ring_reserved_space_cancel(struct intel_ringbuffer *ringbuf);
+/* Use the reserved space - for use by i915_add_request() only. */
+void intel_ring_reserved_space_use(struct intel_ringbuffer *ringbuf);
+/* Finish with the reserved space - for use by i915_add_request() only. */
+void intel_ring_reserved_space_end(struct intel_ringbuffer *ringbuf);
+
+/* Legacy ringbuffer specific portion of reservation code: */
+int intel_ring_reserve_space(struct drm_i915_gem_request *request);
#endif /* _INTEL_RINGBUFFER_H_ */
diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c
index 1a45385f4d66..821644d1b544 100644
--- a/drivers/gpu/drm/i915/intel_runtime_pm.c
+++ b/drivers/gpu/drm/i915/intel_runtime_pm.c
@@ -68,6 +68,22 @@
bool intel_display_power_well_is_enabled(struct drm_i915_private *dev_priv,
int power_well_id);
+static void intel_power_well_enable(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well)
+{
+ DRM_DEBUG_KMS("enabling %s\n", power_well->name);
+ power_well->ops->enable(dev_priv, power_well);
+ power_well->hw_enabled = true;
+}
+
+static void intel_power_well_disable(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well)
+{
+ DRM_DEBUG_KMS("disabling %s\n", power_well->name);
+ power_well->hw_enabled = false;
+ power_well->ops->disable(dev_priv, power_well);
+}
+
/*
* We should only use the power well if we explicitly asked the hardware to
* enable it, so check if it's enabled and also check if we've requested it to
@@ -835,12 +851,8 @@ static bool vlv_power_well_enabled(struct drm_i915_private *dev_priv,
return enabled;
}
-static void vlv_display_power_well_enable(struct drm_i915_private *dev_priv,
- struct i915_power_well *power_well)
+static void vlv_display_power_well_init(struct drm_i915_private *dev_priv)
{
- WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DISP2D);
-
- vlv_set_power_well(dev_priv, power_well, true);
spin_lock_irq(&dev_priv->irq_lock);
valleyview_enable_display_irqs(dev_priv);
@@ -858,18 +870,33 @@ static void vlv_display_power_well_enable(struct drm_i915_private *dev_priv,
i915_redisable_vga_power_on(dev_priv->dev);
}
+static void vlv_display_power_well_deinit(struct drm_i915_private *dev_priv)
+{
+ spin_lock_irq(&dev_priv->irq_lock);
+ valleyview_disable_display_irqs(dev_priv);
+ spin_unlock_irq(&dev_priv->irq_lock);
+
+ vlv_power_sequencer_reset(dev_priv);
+}
+
+static void vlv_display_power_well_enable(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well)
+{
+ WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DISP2D);
+
+ vlv_set_power_well(dev_priv, power_well, true);
+
+ vlv_display_power_well_init(dev_priv);
+}
+
static void vlv_display_power_well_disable(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well)
{
WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DISP2D);
- spin_lock_irq(&dev_priv->irq_lock);
- valleyview_disable_display_irqs(dev_priv);
- spin_unlock_irq(&dev_priv->irq_lock);
+ vlv_display_power_well_deinit(dev_priv);
vlv_set_power_well(dev_priv, power_well, false);
-
- vlv_power_sequencer_reset(dev_priv);
}
static void vlv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
@@ -882,8 +909,8 @@ static void vlv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
* display and the reference clock for VGA
* hotplug / manual detection.
*/
- I915_WRITE(DPLL(PIPE_B), I915_READ(DPLL(PIPE_B)) |
- DPLL_REFA_CLK_ENABLE_VLV | DPLL_INTEGRATED_CRI_CLK_VLV);
+ I915_WRITE(DPLL(PIPE_B), I915_READ(DPLL(PIPE_B)) | DPLL_VGA_MODE_DIS |
+ DPLL_REF_CLK_ENABLE_VLV | DPLL_INTEGRATED_CRI_CLK_VLV);
udelay(1); /* >10ns for cmnreset, >0ns for sidereset */
vlv_set_power_well(dev_priv, power_well, true);
@@ -933,14 +960,14 @@ static void chv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
*/
if (power_well->data == PUNIT_POWER_WELL_DPIO_CMN_BC) {
phy = DPIO_PHY0;
- I915_WRITE(DPLL(PIPE_B), I915_READ(DPLL(PIPE_B)) |
- DPLL_REFA_CLK_ENABLE_VLV);
- I915_WRITE(DPLL(PIPE_B), I915_READ(DPLL(PIPE_B)) |
- DPLL_REFA_CLK_ENABLE_VLV | DPLL_INTEGRATED_CRI_CLK_VLV);
+ I915_WRITE(DPLL(PIPE_B), I915_READ(DPLL(PIPE_B)) | DPLL_VGA_MODE_DIS |
+ DPLL_REF_CLK_ENABLE_VLV);
+ I915_WRITE(DPLL(PIPE_B), I915_READ(DPLL(PIPE_B)) | DPLL_VGA_MODE_DIS |
+ DPLL_REF_CLK_ENABLE_VLV | DPLL_INTEGRATED_CRI_CLK_VLV);
} else {
phy = DPIO_PHY1;
- I915_WRITE(DPLL(PIPE_C), I915_READ(DPLL(PIPE_C)) |
- DPLL_REFA_CLK_ENABLE_VLV | DPLL_INTEGRATED_CRI_CLK_VLV);
+ I915_WRITE(DPLL(PIPE_C), I915_READ(DPLL(PIPE_C)) | DPLL_VGA_MODE_DIS |
+ DPLL_REF_CLK_ENABLE_VLV | DPLL_INTEGRATED_CRI_CLK_VLV);
}
udelay(1); /* >10ns for cmnreset, >0ns for sidereset */
vlv_set_power_well(dev_priv, power_well, true);
@@ -1042,53 +1069,29 @@ out:
static void chv_pipe_power_well_sync_hw(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well)
{
+ WARN_ON_ONCE(power_well->data != PIPE_A);
+
chv_set_pipe_power_well(dev_priv, power_well, power_well->count > 0);
}
static void chv_pipe_power_well_enable(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well)
{
- WARN_ON_ONCE(power_well->data != PIPE_A &&
- power_well->data != PIPE_B &&
- power_well->data != PIPE_C);
+ WARN_ON_ONCE(power_well->data != PIPE_A);
chv_set_pipe_power_well(dev_priv, power_well, true);
- if (power_well->data == PIPE_A) {
- spin_lock_irq(&dev_priv->irq_lock);
- valleyview_enable_display_irqs(dev_priv);
- spin_unlock_irq(&dev_priv->irq_lock);
-
- /*
- * During driver initialization/resume we can avoid restoring the
- * part of the HW/SW state that will be inited anyway explicitly.
- */
- if (dev_priv->power_domains.initializing)
- return;
-
- intel_hpd_init(dev_priv);
-
- i915_redisable_vga_power_on(dev_priv->dev);
- }
+ vlv_display_power_well_init(dev_priv);
}
static void chv_pipe_power_well_disable(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well)
{
- WARN_ON_ONCE(power_well->data != PIPE_A &&
- power_well->data != PIPE_B &&
- power_well->data != PIPE_C);
+ WARN_ON_ONCE(power_well->data != PIPE_A);
- if (power_well->data == PIPE_A) {
- spin_lock_irq(&dev_priv->irq_lock);
- valleyview_disable_display_irqs(dev_priv);
- spin_unlock_irq(&dev_priv->irq_lock);
- }
+ vlv_display_power_well_deinit(dev_priv);
chv_set_pipe_power_well(dev_priv, power_well, false);
-
- if (power_well->data == PIPE_A)
- vlv_power_sequencer_reset(dev_priv);
}
/**
@@ -1117,11 +1120,8 @@ void intel_display_power_get(struct drm_i915_private *dev_priv,
mutex_lock(&power_domains->lock);
for_each_power_well(i, power_well, BIT(domain), power_domains) {
- if (!power_well->count++) {
- DRM_DEBUG_KMS("enabling %s\n", power_well->name);
- power_well->ops->enable(dev_priv, power_well);
- power_well->hw_enabled = true;
- }
+ if (!power_well->count++)
+ intel_power_well_enable(dev_priv, power_well);
}
power_domains->domain_use_count[domain]++;
@@ -1155,11 +1155,8 @@ void intel_display_power_put(struct drm_i915_private *dev_priv,
for_each_power_well_rev(i, power_well, BIT(domain), power_domains) {
WARN_ON(!power_well->count);
- if (!--power_well->count && i915.disable_power_well) {
- DRM_DEBUG_KMS("disabling %s\n", power_well->name);
- power_well->hw_enabled = false;
- power_well->ops->disable(dev_priv, power_well);
- }
+ if (!--power_well->count && i915.disable_power_well)
+ intel_power_well_disable(dev_priv, power_well);
}
mutex_unlock(&power_domains->lock);
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
index aa2fd751609c..c98098e884cc 100644
--- a/drivers/gpu/drm/i915/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/intel_sdvo.c
@@ -1508,51 +1508,6 @@ static void intel_enable_sdvo(struct intel_encoder *encoder)
intel_sdvo_set_active_outputs(intel_sdvo, intel_sdvo->attached_output);
}
-/* Special dpms function to support cloning between dvo/sdvo/crt. */
-static void intel_sdvo_dpms(struct drm_connector *connector, int mode)
-{
- struct drm_crtc *crtc;
- struct intel_sdvo *intel_sdvo = intel_attached_sdvo(connector);
-
- /* dvo supports only 2 dpms states. */
- if (mode != DRM_MODE_DPMS_ON)
- mode = DRM_MODE_DPMS_OFF;
-
- if (mode == connector->dpms)
- return;
-
- connector->dpms = mode;
-
- /* Only need to change hw state when actually enabled */
- crtc = intel_sdvo->base.base.crtc;
- if (!crtc) {
- intel_sdvo->base.connectors_active = false;
- return;
- }
-
- /* We set active outputs manually below in case pipe dpms doesn't change
- * due to cloning. */
- if (mode != DRM_MODE_DPMS_ON) {
- intel_sdvo_set_active_outputs(intel_sdvo, 0);
- if (0)
- intel_sdvo_set_encoder_power_state(intel_sdvo, mode);
-
- intel_sdvo->base.connectors_active = false;
-
- intel_crtc_update_dpms(crtc);
- } else {
- intel_sdvo->base.connectors_active = true;
-
- intel_crtc_update_dpms(crtc);
-
- if (0)
- intel_sdvo_set_encoder_power_state(intel_sdvo, mode);
- intel_sdvo_set_active_outputs(intel_sdvo, intel_sdvo->attached_output);
- }
-
- intel_modeset_check_state(connector->dev);
-}
-
static enum drm_mode_status
intel_sdvo_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
@@ -2190,7 +2145,7 @@ done:
}
static const struct drm_connector_funcs intel_sdvo_connector_funcs = {
- .dpms = intel_sdvo_dpms,
+ .dpms = drm_atomic_helper_connector_dpms,
.detect = intel_sdvo_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
.set_property = intel_sdvo_set_property,
diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c
index 8193a35388d7..9d8af2f8a875 100644
--- a/drivers/gpu/drm/i915/intel_sprite.c
+++ b/drivers/gpu/drm/i915/intel_sprite.c
@@ -75,10 +75,8 @@ static int usecs_to_scanlines(const struct drm_display_mode *mode, int usecs)
* until a subsequent call to intel_pipe_update_end(). That is done to
* avoid random delays. The value written to @start_vbl_count should be
* supplied to intel_pipe_update_end() for error checking.
- *
- * Return: true if the call was successful
*/
-bool intel_pipe_update_start(struct intel_crtc *crtc, uint32_t *start_vbl_count)
+void intel_pipe_update_start(struct intel_crtc *crtc, uint32_t *start_vbl_count)
{
struct drm_device *dev = crtc->base.dev;
const struct drm_display_mode *mode = &crtc->config->base.adjusted_mode;
@@ -96,13 +94,14 @@ bool intel_pipe_update_start(struct intel_crtc *crtc, uint32_t *start_vbl_count)
min = vblank_start - usecs_to_scanlines(mode, 100);
max = vblank_start - 1;
+ local_irq_disable();
+ *start_vbl_count = 0;
+
if (min <= 0 || max <= 0)
- return false;
+ return;
if (WARN_ON(drm_crtc_vblank_get(&crtc->base)))
- return false;
-
- local_irq_disable();
+ return;
trace_i915_pipe_update_start(crtc, min, max);
@@ -138,8 +137,6 @@ bool intel_pipe_update_start(struct intel_crtc *crtc, uint32_t *start_vbl_count)
*start_vbl_count = dev->driver->get_vblank_counter(dev, pipe);
trace_i915_pipe_update_vblank_evaded(crtc, min, max, *start_vbl_count);
-
- return true;
}
/**
@@ -161,7 +158,7 @@ void intel_pipe_update_end(struct intel_crtc *crtc, u32 start_vbl_count)
local_irq_enable();
- if (start_vbl_count != end_vbl_count)
+ if (start_vbl_count && start_vbl_count != end_vbl_count)
DRM_ERROR("Atomic update failure on pipe %c (start=%u end=%u)\n",
pipe_name(pipe), start_vbl_count, end_vbl_count);
}
@@ -182,7 +179,8 @@ skl_update_plane(struct drm_plane *drm_plane, struct drm_crtc *crtc,
const int plane = intel_plane->plane + 1;
u32 plane_ctl, stride_div, stride;
int pixel_size = drm_format_plane_cpp(fb->pixel_format, 0);
- const struct drm_intel_sprite_colorkey *key = &intel_plane->ckey;
+ const struct drm_intel_sprite_colorkey *key =
+ &to_intel_plane_state(drm_plane->state)->ckey;
unsigned long surf_addr;
u32 tile_height, plane_offset, plane_size;
unsigned int rotation;
@@ -272,7 +270,7 @@ skl_update_plane(struct drm_plane *drm_plane, struct drm_crtc *crtc,
}
static void
-skl_disable_plane(struct drm_plane *dplane, struct drm_crtc *crtc, bool force)
+skl_disable_plane(struct drm_plane *dplane, struct drm_crtc *crtc)
{
struct drm_device *dev = dplane->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -344,7 +342,8 @@ vlv_update_plane(struct drm_plane *dplane, struct drm_crtc *crtc,
u32 sprctl;
unsigned long sprsurf_offset, linear_offset;
int pixel_size = drm_format_plane_cpp(fb->pixel_format, 0);
- const struct drm_intel_sprite_colorkey *key = &intel_plane->ckey;
+ const struct drm_intel_sprite_colorkey *key =
+ &to_intel_plane_state(dplane->state)->ckey;
sprctl = SP_ENABLE;
@@ -400,10 +399,6 @@ vlv_update_plane(struct drm_plane *dplane, struct drm_crtc *crtc,
if (obj->tiling_mode != I915_TILING_NONE)
sprctl |= SP_TILED;
- intel_update_sprite_watermarks(dplane, crtc, src_w, src_h,
- pixel_size, true,
- src_w != crtc_w || src_h != crtc_h);
-
/* Sizes are 0 based */
src_w--;
src_h--;
@@ -411,7 +406,8 @@ vlv_update_plane(struct drm_plane *dplane, struct drm_crtc *crtc,
crtc_h--;
linear_offset = y * fb->pitches[0] + x * pixel_size;
- sprsurf_offset = intel_gen4_compute_page_offset(&x, &y,
+ sprsurf_offset = intel_gen4_compute_page_offset(dev_priv,
+ &x, &y,
obj->tiling_mode,
pixel_size,
fb->pitches[0]);
@@ -455,7 +451,7 @@ vlv_update_plane(struct drm_plane *dplane, struct drm_crtc *crtc,
}
static void
-vlv_disable_plane(struct drm_plane *dplane, struct drm_crtc *crtc, bool force)
+vlv_disable_plane(struct drm_plane *dplane, struct drm_crtc *crtc)
{
struct drm_device *dev = dplane->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -467,8 +463,6 @@ vlv_disable_plane(struct drm_plane *dplane, struct drm_crtc *crtc, bool force)
I915_WRITE(SPSURF(pipe, plane), 0);
POSTING_READ(SPSURF(pipe, plane));
-
- intel_update_sprite_watermarks(dplane, crtc, 0, 0, 0, false, false);
}
static void
@@ -487,7 +481,8 @@ ivb_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
u32 sprctl, sprscale = 0;
unsigned long sprsurf_offset, linear_offset;
int pixel_size = drm_format_plane_cpp(fb->pixel_format, 0);
- const struct drm_intel_sprite_colorkey *key = &intel_plane->ckey;
+ const struct drm_intel_sprite_colorkey *key =
+ &to_intel_plane_state(plane->state)->ckey;
sprctl = SPRITE_ENABLE;
@@ -546,7 +541,8 @@ ivb_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
linear_offset = y * fb->pitches[0] + x * pixel_size;
sprsurf_offset =
- intel_gen4_compute_page_offset(&x, &y, obj->tiling_mode,
+ intel_gen4_compute_page_offset(dev_priv,
+ &x, &y, obj->tiling_mode,
pixel_size, fb->pitches[0]);
linear_offset -= sprsurf_offset;
@@ -595,7 +591,7 @@ ivb_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
}
static void
-ivb_disable_plane(struct drm_plane *plane, struct drm_crtc *crtc, bool force)
+ivb_disable_plane(struct drm_plane *plane, struct drm_crtc *crtc)
{
struct drm_device *dev = plane->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -627,7 +623,8 @@ ilk_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
unsigned long dvssurf_offset, linear_offset;
u32 dvscntr, dvsscale;
int pixel_size = drm_format_plane_cpp(fb->pixel_format, 0);
- const struct drm_intel_sprite_colorkey *key = &intel_plane->ckey;
+ const struct drm_intel_sprite_colorkey *key =
+ &to_intel_plane_state(plane->state)->ckey;
dvscntr = DVS_ENABLE;
@@ -682,7 +679,8 @@ ilk_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
linear_offset = y * fb->pitches[0] + x * pixel_size;
dvssurf_offset =
- intel_gen4_compute_page_offset(&x, &y, obj->tiling_mode,
+ intel_gen4_compute_page_offset(dev_priv,
+ &x, &y, obj->tiling_mode,
pixel_size, fb->pitches[0]);
linear_offset -= dvssurf_offset;
@@ -722,7 +720,7 @@ ilk_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
}
static void
-ilk_disable_plane(struct drm_plane *plane, struct drm_crtc *crtc, bool force)
+ilk_disable_plane(struct drm_plane *plane, struct drm_crtc *crtc)
{
struct drm_device *dev = plane->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -739,11 +737,12 @@ ilk_disable_plane(struct drm_plane *plane, struct drm_crtc *crtc, bool force)
static int
intel_check_sprite_plane(struct drm_plane *plane,
+ struct intel_crtc_state *crtc_state,
struct intel_plane_state *state)
{
struct drm_device *dev = plane->dev;
- struct intel_crtc *intel_crtc = to_intel_crtc(state->base.crtc);
- struct intel_crtc_state *crtc_state;
+ struct drm_crtc *crtc = state->base.crtc;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct intel_plane *intel_plane = to_intel_plane(plane);
struct drm_framebuffer *fb = state->base.fb;
int crtc_x, crtc_y;
@@ -756,15 +755,10 @@ intel_check_sprite_plane(struct drm_plane *plane,
int max_scale, min_scale;
bool can_scale;
int pixel_size;
- int ret;
-
- intel_crtc = intel_crtc ? intel_crtc : to_intel_crtc(plane->crtc);
- crtc_state = state->base.state ?
- intel_atomic_get_crtc_state(state->base.state, intel_crtc) : NULL;
if (!fb) {
state->visible = false;
- goto finish;
+ return 0;
}
/* Don't modify another pipe's plane */
@@ -782,7 +776,7 @@ intel_check_sprite_plane(struct drm_plane *plane,
/* setup can_scale, min_scale, max_scale */
if (INTEL_INFO(dev)->gen >= 9) {
/* use scaler when colorkey is not required */
- if (intel_plane->ckey.flags == I915_SET_COLORKEY_NONE) {
+ if (state->ckey.flags == I915_SET_COLORKEY_NONE) {
can_scale = 1;
min_scale = 1;
max_scale = skl_max_scale(intel_crtc, crtc_state);
@@ -802,7 +796,6 @@ intel_check_sprite_plane(struct drm_plane *plane,
* coordinates and sizes. We probably need some way to decide whether
* more strict checking should be done instead.
*/
-
drm_rect_rotate(src, fb->width << 16, fb->height << 16,
state->base.rotation);
@@ -812,7 +805,7 @@ intel_check_sprite_plane(struct drm_plane *plane,
vscale = drm_rect_calc_vscale_relaxed(src, dst, min_scale, max_scale);
BUG_ON(vscale < 0);
- state->visible = drm_rect_clip_scaled(src, dst, clip, hscale, vscale);
+ state->visible = drm_rect_clip_scaled(src, dst, clip, hscale, vscale);
crtc_x = dst->x1;
crtc_y = dst->y1;
@@ -917,36 +910,6 @@ intel_check_sprite_plane(struct drm_plane *plane,
dst->y1 = crtc_y;
dst->y2 = crtc_y + crtc_h;
-finish:
- /*
- * If the sprite is completely covering the primary plane,
- * we can disable the primary and save power.
- */
- if (intel_crtc->active) {
- intel_crtc->atomic.fb_bits |=
- INTEL_FRONTBUFFER_SPRITE(intel_crtc->pipe);
-
- if (intel_wm_need_update(plane, &state->base))
- intel_crtc->atomic.update_wm = true;
-
- if (!state->visible) {
- /*
- * Avoid underruns when disabling the sprite.
- * FIXME remove once watermark updates are done properly.
- */
- intel_crtc->atomic.wait_vblank = true;
- intel_crtc->atomic.update_sprite_watermarks |=
- (1 << drm_plane_index(plane));
- }
- }
-
- if (INTEL_INFO(dev)->gen >= 9) {
- ret = skl_update_scaler_users(intel_crtc, crtc_state, intel_plane,
- state, 0);
- if (ret)
- return ret;
- }
-
return 0;
}
@@ -955,34 +918,27 @@ intel_commit_sprite_plane(struct drm_plane *plane,
struct intel_plane_state *state)
{
struct drm_crtc *crtc = state->base.crtc;
- struct intel_crtc *intel_crtc;
struct intel_plane *intel_plane = to_intel_plane(plane);
struct drm_framebuffer *fb = state->base.fb;
- int crtc_x, crtc_y;
- unsigned int crtc_w, crtc_h;
- uint32_t src_x, src_y, src_w, src_h;
crtc = crtc ? crtc : plane->crtc;
- intel_crtc = to_intel_crtc(crtc);
plane->fb = fb;
- if (intel_crtc->active) {
- if (state->visible) {
- crtc_x = state->dst.x1;
- crtc_y = state->dst.y1;
- crtc_w = drm_rect_width(&state->dst);
- crtc_h = drm_rect_height(&state->dst);
- src_x = state->src.x1 >> 16;
- src_y = state->src.y1 >> 16;
- src_w = drm_rect_width(&state->src) >> 16;
- src_h = drm_rect_height(&state->src) >> 16;
- intel_plane->update_plane(plane, crtc, fb,
- crtc_x, crtc_y, crtc_w, crtc_h,
- src_x, src_y, src_w, src_h);
- } else {
- intel_plane->disable_plane(plane, crtc, false);
- }
+ if (!crtc->state->active)
+ return;
+
+ if (state->visible) {
+ intel_plane->update_plane(plane, crtc, fb,
+ state->dst.x1, state->dst.y1,
+ drm_rect_width(&state->dst),
+ drm_rect_height(&state->dst),
+ state->src.x1 >> 16,
+ state->src.y1 >> 16,
+ drm_rect_width(&state->src) >> 16,
+ drm_rect_height(&state->src) >> 16);
+ } else {
+ intel_plane->disable_plane(plane, crtc);
}
}
@@ -991,7 +947,9 @@ int intel_sprite_set_colorkey(struct drm_device *dev, void *data,
{
struct drm_intel_sprite_colorkey *set = data;
struct drm_plane *plane;
- struct intel_plane *intel_plane;
+ struct drm_plane_state *plane_state;
+ struct drm_atomic_state *state;
+ struct drm_modeset_acquire_ctx ctx;
int ret = 0;
/* Make sure we don't try to enable both src & dest simultaneously */
@@ -1002,50 +960,41 @@ int intel_sprite_set_colorkey(struct drm_device *dev, void *data,
set->flags & I915_SET_COLORKEY_DESTINATION)
return -EINVAL;
- drm_modeset_lock_all(dev);
-
plane = drm_plane_find(dev, set->plane_id);
- if (!plane || plane->type != DRM_PLANE_TYPE_OVERLAY) {
- ret = -ENOENT;
- goto out_unlock;
- }
+ if (!plane || plane->type != DRM_PLANE_TYPE_OVERLAY)
+ return -ENOENT;
- intel_plane = to_intel_plane(plane);
+ drm_modeset_acquire_init(&ctx, 0);
- if (INTEL_INFO(dev)->gen >= 9) {
- /* plane scaling and colorkey are mutually exclusive */
- if (to_intel_plane_state(plane->state)->scaler_id >= 0) {
- DRM_ERROR("colorkey not allowed with scaler\n");
- ret = -EINVAL;
- goto out_unlock;
- }
+ state = drm_atomic_state_alloc(plane->dev);
+ if (!state) {
+ ret = -ENOMEM;
+ goto out;
}
+ state->acquire_ctx = &ctx;
+
+ while (1) {
+ plane_state = drm_atomic_get_plane_state(state, plane);
+ ret = PTR_ERR_OR_ZERO(plane_state);
+ if (!ret) {
+ to_intel_plane_state(plane_state)->ckey = *set;
+ ret = drm_atomic_commit(state);
+ }
- intel_plane->ckey = *set;
-
- /*
- * The only way this could fail would be due to
- * the current plane state being unsupportable already,
- * and we dont't consider that an error for the
- * colorkey ioctl. So just ignore any error.
- */
- intel_plane_restore(plane);
+ if (ret != -EDEADLK)
+ break;
-out_unlock:
- drm_modeset_unlock_all(dev);
- return ret;
-}
+ drm_atomic_state_clear(state);
+ drm_modeset_backoff(&ctx);
+ }
-int intel_plane_restore(struct drm_plane *plane)
-{
- if (!plane->crtc || !plane->state->fb)
- return 0;
+ if (ret)
+ drm_atomic_state_free(state);
- return drm_plane_helper_update(plane, plane->crtc, plane->state->fb,
- plane->state->crtc_x, plane->state->crtc_y,
- plane->state->crtc_w, plane->state->crtc_h,
- plane->state->src_x, plane->state->src_y,
- plane->state->src_w, plane->state->src_h);
+out:
+ drm_modeset_drop_locks(&ctx);
+ drm_modeset_acquire_fini(&ctx);
+ return ret;
}
static const uint32_t ilk_plane_formats[] = {
@@ -1172,9 +1121,9 @@ intel_plane_init(struct drm_device *dev, enum pipe pipe, int plane)
intel_plane->pipe = pipe;
intel_plane->plane = plane;
+ intel_plane->frontbuffer_bit = INTEL_FRONTBUFFER_SPRITE(pipe);
intel_plane->check_plane = intel_check_sprite_plane;
intel_plane->commit_plane = intel_commit_sprite_plane;
- intel_plane->ckey.flags = I915_SET_COLORKEY_NONE;
possible_crtcs = (1 << pipe);
ret = drm_universal_plane_init(dev, &intel_plane->base, possible_crtcs,
&intel_plane_funcs,
@@ -1189,6 +1138,6 @@ intel_plane_init(struct drm_device *dev, enum pipe pipe, int plane)
drm_plane_helper_add(&intel_plane->base, &intel_plane_helper_funcs);
- out:
+out:
return ret;
}
diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c
index 8b9d325bda3c..0568ae6ec9dd 100644
--- a/drivers/gpu/drm/i915/intel_tv.c
+++ b/drivers/gpu/drm/i915/intel_tv.c
@@ -1509,7 +1509,7 @@ out:
}
static const struct drm_connector_funcs intel_tv_connector_funcs = {
- .dpms = intel_connector_dpms,
+ .dpms = drm_atomic_helper_connector_dpms,
.detect = intel_tv_detect,
.destroy = intel_tv_destroy,
.set_property = intel_tv_set_property,
diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c
index a6d8a3ee7750..9d3c2e420d2b 100644
--- a/drivers/gpu/drm/i915/intel_uncore.c
+++ b/drivers/gpu/drm/i915/intel_uncore.c
@@ -1274,10 +1274,12 @@ int i915_reg_read_ioctl(struct drm_device *dev,
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_reg_read *reg = data;
struct register_whitelist const *entry = whitelist;
+ unsigned size;
+ u64 offset;
int i, ret = 0;
for (i = 0; i < ARRAY_SIZE(whitelist); i++, entry++) {
- if (entry->offset == reg->offset &&
+ if (entry->offset == (reg->offset & -entry->size) &&
(1 << INTEL_INFO(dev)->gen & entry->gen_bitmask))
break;
}
@@ -1285,23 +1287,33 @@ int i915_reg_read_ioctl(struct drm_device *dev,
if (i == ARRAY_SIZE(whitelist))
return -EINVAL;
+ /* We use the low bits to encode extra flags as the register should
+ * be naturally aligned (and those that are not so aligned merely
+ * limit the available flags for that register).
+ */
+ offset = entry->offset;
+ size = entry->size;
+ size |= reg->offset ^ offset;
+
intel_runtime_pm_get(dev_priv);
- switch (entry->size) {
+ switch (size) {
+ case 8 | 1:
+ reg->val = I915_READ64_2x32(offset, offset+4);
+ break;
case 8:
- reg->val = I915_READ64(reg->offset);
+ reg->val = I915_READ64(offset);
break;
case 4:
- reg->val = I915_READ(reg->offset);
+ reg->val = I915_READ(offset);
break;
case 2:
- reg->val = I915_READ16(reg->offset);
+ reg->val = I915_READ16(offset);
break;
case 1:
- reg->val = I915_READ8(reg->offset);
+ reg->val = I915_READ8(offset);
break;
default:
- MISSING_CASE(entry->size);
ret = -EINVAL;
goto out;
}
@@ -1455,20 +1467,80 @@ static int gen6_do_reset(struct drm_device *dev)
return ret;
}
-int intel_gpu_reset(struct drm_device *dev)
+static int wait_for_register(struct drm_i915_private *dev_priv,
+ const u32 reg,
+ const u32 mask,
+ const u32 value,
+ const unsigned long timeout_ms)
+{
+ return wait_for((I915_READ(reg) & mask) == value, timeout_ms);
+}
+
+static int gen8_do_reset(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_engine_cs *engine;
+ int i;
+
+ for_each_ring(engine, dev_priv, i) {
+ I915_WRITE(RING_RESET_CTL(engine->mmio_base),
+ _MASKED_BIT_ENABLE(RESET_CTL_REQUEST_RESET));
+
+ if (wait_for_register(dev_priv,
+ RING_RESET_CTL(engine->mmio_base),
+ RESET_CTL_READY_TO_RESET,
+ RESET_CTL_READY_TO_RESET,
+ 700)) {
+ DRM_ERROR("%s: reset request timeout\n", engine->name);
+ goto not_ready;
+ }
+ }
+
+ return gen6_do_reset(dev);
+
+not_ready:
+ for_each_ring(engine, dev_priv, i)
+ I915_WRITE(RING_RESET_CTL(engine->mmio_base),
+ _MASKED_BIT_DISABLE(RESET_CTL_REQUEST_RESET));
+
+ return -EIO;
+}
+
+static int (*intel_get_gpu_reset(struct drm_device *dev))(struct drm_device *)
{
- if (INTEL_INFO(dev)->gen >= 6)
- return gen6_do_reset(dev);
+ if (!i915.reset)
+ return NULL;
+
+ if (INTEL_INFO(dev)->gen >= 8)
+ return gen8_do_reset;
+ else if (INTEL_INFO(dev)->gen >= 6)
+ return gen6_do_reset;
else if (IS_GEN5(dev))
- return ironlake_do_reset(dev);
+ return ironlake_do_reset;
else if (IS_G4X(dev))
- return g4x_do_reset(dev);
+ return g4x_do_reset;
else if (IS_G33(dev))
- return g33_do_reset(dev);
+ return g33_do_reset;
else if (INTEL_INFO(dev)->gen >= 3)
- return i915_do_reset(dev);
+ return i915_do_reset;
else
+ return NULL;
+}
+
+int intel_gpu_reset(struct drm_device *dev)
+{
+ int (*reset)(struct drm_device *);
+
+ reset = intel_get_gpu_reset(dev);
+ if (reset == NULL)
return -ENODEV;
+
+ return reset(dev);
+}
+
+bool intel_has_gpu_reset(struct drm_device *dev)
+{
+ return intel_get_gpu_reset(dev) != NULL;
}
void intel_uncore_check_errors(struct drm_device *dev)
diff --git a/drivers/gpu/drm/imx/imx-tve.c b/drivers/gpu/drm/imx/imx-tve.c
index 214eceefc981..e671ad369416 100644
--- a/drivers/gpu/drm/imx/imx-tve.c
+++ b/drivers/gpu/drm/imx/imx-tve.c
@@ -301,7 +301,7 @@ static void imx_tve_encoder_prepare(struct drm_encoder *encoder)
switch (tve->mode) {
case TVE_MODE_VGA:
- imx_drm_set_bus_format_pins(encoder, MEDIA_BUS_FMT_YUV8_1X24,
+ imx_drm_set_bus_format_pins(encoder, MEDIA_BUS_FMT_GBR888_1X24,
tve->hsync_pin, tve->vsync_pin);
break;
case TVE_MODE_TVOUT:
diff --git a/drivers/gpu/drm/imx/parallel-display.c b/drivers/gpu/drm/imx/parallel-display.c
index 74a9ce40ddc4..b4deb9cf9d71 100644
--- a/drivers/gpu/drm/imx/parallel-display.c
+++ b/drivers/gpu/drm/imx/parallel-display.c
@@ -21,6 +21,7 @@
#include <drm/drm_panel.h>
#include <linux/videodev2.h>
#include <video/of_display_timing.h>
+#include <linux/of_graph.h>
#include "imx-drm.h"
@@ -208,7 +209,7 @@ static int imx_pd_bind(struct device *dev, struct device *master, void *data)
{
struct drm_device *drm = data;
struct device_node *np = dev->of_node;
- struct device_node *panel_node;
+ struct device_node *port;
const u8 *edidp;
struct imx_parallel_display *imxpd;
int ret;
@@ -234,11 +235,19 @@ static int imx_pd_bind(struct device *dev, struct device *master, void *data)
imxpd->bus_format = MEDIA_BUS_FMT_RGB666_1X24_CPADHI;
}
- panel_node = of_parse_phandle(np, "fsl,panel", 0);
- if (panel_node) {
- imxpd->panel = of_drm_find_panel(panel_node);
- if (!imxpd->panel)
- return -EPROBE_DEFER;
+ /* port@1 is the output port */
+ port = of_graph_get_port_by_id(np, 1);
+ if (port) {
+ struct device_node *endpoint, *remote;
+
+ endpoint = of_get_child_by_name(port, "endpoint");
+ if (endpoint) {
+ remote = of_graph_get_remote_port_parent(endpoint);
+ if (remote)
+ imxpd->panel = of_drm_find_panel(remote);
+ if (!imxpd->panel)
+ return -EPROBE_DEFER;
+ }
}
imxpd->dev = dev;
diff --git a/drivers/gpu/drm/mgag200/mgag200_cursor.c b/drivers/gpu/drm/mgag200/mgag200_cursor.c
index 9f9780b7ddf0..4f2068fe5d88 100644
--- a/drivers/gpu/drm/mgag200/mgag200_cursor.c
+++ b/drivers/gpu/drm/mgag200/mgag200_cursor.c
@@ -70,18 +70,22 @@ int mga_crtc_cursor_set(struct drm_crtc *crtc,
BUG_ON(pixels_2 != pixels_current && pixels_2 != pixels_prev);
BUG_ON(pixels_current == pixels_prev);
+ obj = drm_gem_object_lookup(dev, file_priv, handle);
+ if (!obj)
+ return -ENOENT;
+
ret = mgag200_bo_reserve(pixels_1, true);
if (ret) {
WREG8(MGA_CURPOSXL, 0);
WREG8(MGA_CURPOSXH, 0);
- return ret;
+ goto out_unref;
}
ret = mgag200_bo_reserve(pixels_2, true);
if (ret) {
WREG8(MGA_CURPOSXL, 0);
WREG8(MGA_CURPOSXH, 0);
mgag200_bo_unreserve(pixels_1);
- return ret;
+ goto out_unreserve1;
}
if (!handle) {
@@ -106,16 +110,6 @@ int mga_crtc_cursor_set(struct drm_crtc *crtc,
}
}
- mutex_lock(&dev->struct_mutex);
- obj = drm_gem_object_lookup(dev, file_priv, handle);
- if (!obj) {
- mutex_unlock(&dev->struct_mutex);
- ret = -ENOENT;
- goto out1;
- }
- drm_gem_object_unreference(obj);
- mutex_unlock(&dev->struct_mutex);
-
bo = gem_to_mga_bo(obj);
ret = mgag200_bo_reserve(bo, true);
if (ret) {
@@ -252,7 +246,11 @@ int mga_crtc_cursor_set(struct drm_crtc *crtc,
if (ret)
mga_hide_cursor(mdev);
mgag200_bo_unreserve(pixels_1);
+out_unreserve1:
mgag200_bo_unreserve(pixels_2);
+out_unref:
+ drm_gem_object_unreference_unlocked(obj);
+
return ret;
}
diff --git a/drivers/gpu/drm/mgag200/mgag200_fb.c b/drivers/gpu/drm/mgag200/mgag200_fb.c
index c36b8304042b..87de15ea1f93 100644
--- a/drivers/gpu/drm/mgag200/mgag200_fb.c
+++ b/drivers/gpu/drm/mgag200/mgag200_fb.c
@@ -101,7 +101,7 @@ static void mga_fillrect(struct fb_info *info,
const struct fb_fillrect *rect)
{
struct mga_fbdev *mfbdev = info->par;
- sys_fillrect(info, rect);
+ drm_fb_helper_sys_fillrect(info, rect);
mga_dirty_update(mfbdev, rect->dx, rect->dy, rect->width,
rect->height);
}
@@ -110,7 +110,7 @@ static void mga_copyarea(struct fb_info *info,
const struct fb_copyarea *area)
{
struct mga_fbdev *mfbdev = info->par;
- sys_copyarea(info, area);
+ drm_fb_helper_sys_copyarea(info, area);
mga_dirty_update(mfbdev, area->dx, area->dy, area->width,
area->height);
}
@@ -119,7 +119,7 @@ static void mga_imageblit(struct fb_info *info,
const struct fb_image *image)
{
struct mga_fbdev *mfbdev = info->par;
- sys_imageblit(info, image);
+ drm_fb_helper_sys_imageblit(info, image);
mga_dirty_update(mfbdev, image->dx, image->dy, image->width,
image->height);
}
@@ -166,8 +166,6 @@ static int mgag200fb_create(struct drm_fb_helper *helper,
struct fb_info *info;
struct drm_framebuffer *fb;
struct drm_gem_object *gobj = NULL;
- struct device *device = &dev->pdev->dev;
- struct mgag200_bo *bo;
int ret;
void *sysram;
int size;
@@ -185,15 +183,14 @@ static int mgag200fb_create(struct drm_fb_helper *helper,
DRM_ERROR("failed to create fbcon backing object %d\n", ret);
return ret;
}
- bo = gem_to_mga_bo(gobj);
sysram = vmalloc(size);
if (!sysram)
return -ENOMEM;
- info = framebuffer_alloc(0, device);
- if (info == NULL)
- return -ENOMEM;
+ info = drm_fb_helper_alloc_fbi(helper);
+ if (IS_ERR(info))
+ return PTR_ERR(info);
info->par = mfbdev;
@@ -208,14 +205,6 @@ static int mgag200fb_create(struct drm_fb_helper *helper,
/* setup helper */
mfbdev->helper.fb = fb;
- mfbdev->helper.fbdev = info;
-
- ret = fb_alloc_cmap(&info->cmap, 256, 0);
- if (ret) {
- DRM_ERROR("%s: can't allocate color map\n", info->fix.id);
- ret = -ENOMEM;
- goto out;
- }
strcpy(info->fix.id, "mgadrmfb");
@@ -223,11 +212,6 @@ static int mgag200fb_create(struct drm_fb_helper *helper,
info->fbops = &mgag200fb_ops;
/* setup aperture base/size for vesafb takeover */
- info->apertures = alloc_apertures(1);
- if (!info->apertures) {
- ret = -ENOMEM;
- goto out;
- }
info->apertures->ranges[0].base = mdev->dev->mode_config.fb_base;
info->apertures->ranges[0].size = mdev->mc.vram_size;
@@ -242,24 +226,15 @@ static int mgag200fb_create(struct drm_fb_helper *helper,
DRM_DEBUG_KMS("allocated %dx%d\n",
fb->width, fb->height);
return 0;
-out:
- return ret;
}
static int mga_fbdev_destroy(struct drm_device *dev,
struct mga_fbdev *mfbdev)
{
- struct fb_info *info;
struct mga_framebuffer *mfb = &mfbdev->mfb;
- if (mfbdev->helper.fbdev) {
- info = mfbdev->helper.fbdev;
-
- unregister_framebuffer(info);
- if (info->cmap.len)
- fb_dealloc_cmap(&info->cmap);
- framebuffer_release(info);
- }
+ drm_fb_helper_unregister_fbi(&mfbdev->helper);
+ drm_fb_helper_release_fbi(&mfbdev->helper);
if (mfb->obj) {
drm_gem_object_unreference_unlocked(mfb->obj);
diff --git a/drivers/gpu/drm/mgag200/mgag200_main.c b/drivers/gpu/drm/mgag200/mgag200_main.c
index f6b283b8375e..c99c2cb28939 100644
--- a/drivers/gpu/drm/mgag200/mgag200_main.c
+++ b/drivers/gpu/drm/mgag200/mgag200_main.c
@@ -345,23 +345,15 @@ mgag200_dumb_mmap_offset(struct drm_file *file,
uint64_t *offset)
{
struct drm_gem_object *obj;
- int ret;
struct mgag200_bo *bo;
- mutex_lock(&dev->struct_mutex);
obj = drm_gem_object_lookup(dev, file, handle);
- if (obj == NULL) {
- ret = -ENOENT;
- goto out_unlock;
- }
+ if (obj == NULL)
+ return -ENOENT;
bo = gem_to_mga_bo(obj);
*offset = mgag200_bo_mmap_offset(bo);
- drm_gem_object_unreference(obj);
- ret = 0;
-out_unlock:
- mutex_unlock(&dev->struct_mutex);
- return ret;
-
+ drm_gem_object_unreference_unlocked(obj);
+ return 0;
}
diff --git a/drivers/gpu/drm/mgag200/mgag200_mode.c b/drivers/gpu/drm/mgag200/mgag200_mode.c
index ad4b9010dfb0..cd75cff096e1 100644
--- a/drivers/gpu/drm/mgag200/mgag200_mode.c
+++ b/drivers/gpu/drm/mgag200/mgag200_mode.c
@@ -158,7 +158,7 @@ static int mga_g200se_set_plls(struct mga_device *mdev, long clock)
static int mga_g200wb_set_plls(struct mga_device *mdev, long clock)
{
unsigned int vcomax, vcomin, pllreffreq;
- unsigned int delta, tmpdelta, permitteddelta;
+ unsigned int delta, tmpdelta;
unsigned int testp, testm, testn;
unsigned int p, m, n;
unsigned int computed;
@@ -172,7 +172,6 @@ static int mga_g200wb_set_plls(struct mga_device *mdev, long clock)
pllreffreq = 48000;
delta = 0xffffffff;
- permitteddelta = clock * 5 / 1000;
for (testp = 1; testp < 9; testp++) {
if (clock * testp > vcomax)
@@ -298,7 +297,7 @@ static int mga_g200wb_set_plls(struct mga_device *mdev, long clock)
static int mga_g200ev_set_plls(struct mga_device *mdev, long clock)
{
unsigned int vcomax, vcomin, pllreffreq;
- unsigned int delta, tmpdelta, permitteddelta;
+ unsigned int delta, tmpdelta;
unsigned int testp, testm, testn;
unsigned int p, m, n;
unsigned int computed;
@@ -310,7 +309,6 @@ static int mga_g200ev_set_plls(struct mga_device *mdev, long clock)
pllreffreq = 50000;
delta = 0xffffffff;
- permitteddelta = clock * 5 / 1000;
for (testp = 16; testp > 0; testp--) {
if (clock * testp > vcomax)
@@ -392,7 +390,7 @@ static int mga_g200ev_set_plls(struct mga_device *mdev, long clock)
static int mga_g200eh_set_plls(struct mga_device *mdev, long clock)
{
unsigned int vcomax, vcomin, pllreffreq;
- unsigned int delta, tmpdelta, permitteddelta;
+ unsigned int delta, tmpdelta;
unsigned int testp, testm, testn;
unsigned int p, m, n;
unsigned int computed;
@@ -406,7 +404,6 @@ static int mga_g200eh_set_plls(struct mga_device *mdev, long clock)
pllreffreq = 33333;
delta = 0xffffffff;
- permitteddelta = clock * 5 / 1000;
for (testp = 16; testp > 0; testp >>= 1) {
if (clock * testp > vcomax)
diff --git a/drivers/gpu/drm/mgag200/mgag200_ttm.c b/drivers/gpu/drm/mgag200/mgag200_ttm.c
index d16964ea0ed4..05108b505fbf 100644
--- a/drivers/gpu/drm/mgag200/mgag200_ttm.c
+++ b/drivers/gpu/drm/mgag200/mgag200_ttm.c
@@ -378,7 +378,7 @@ int mgag200_bo_pin(struct mgag200_bo *bo, u32 pl_flag, u64 *gpu_addr)
int mgag200_bo_unpin(struct mgag200_bo *bo)
{
- int i, ret;
+ int i;
if (!bo->pin_count) {
DRM_ERROR("unpin bad %p\n", bo);
return 0;
@@ -389,11 +389,7 @@ int mgag200_bo_unpin(struct mgag200_bo *bo)
for (i = 0; i < bo->placement.num_placement ; i++)
bo->placements[i].flags &= ~TTM_PL_FLAG_NO_EVICT;
- ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false);
- if (ret)
- return ret;
-
- return 0;
+ return ttm_bo_validate(&bo->bo, &bo->placement, false, false);
}
int mgag200_bo_push_sysram(struct mgag200_bo *bo)
diff --git a/drivers/gpu/drm/msm/Kconfig b/drivers/gpu/drm/msm/Kconfig
index 08ba8d0d93f5..8e6c7c638e24 100644
--- a/drivers/gpu/drm/msm/Kconfig
+++ b/drivers/gpu/drm/msm/Kconfig
@@ -9,6 +9,7 @@ config DRM_MSM
select DRM_PANEL
select SHMEM
select TMPFS
+ select QCOM_SCM
default y
help
DRM/KMS driver for MSM/snapdragon.
@@ -53,3 +54,17 @@ config DRM_MSM_DSI_PLL
help
Choose this option to enable DSI PLL driver which provides DSI
source clocks under common clock framework.
+
+config DRM_MSM_DSI_28NM_PHY
+ bool "Enable DSI 28nm PHY driver in MSM DRM"
+ depends on DRM_MSM_DSI
+ default y
+ help
+ Choose this option if the 28nm DSI PHY is used on the platform.
+
+config DRM_MSM_DSI_20NM_PHY
+ bool "Enable DSI 20nm PHY driver in MSM DRM"
+ depends on DRM_MSM_DSI
+ default y
+ help
+ Choose this option if the 20nm DSI PHY is used on the platform.
diff --git a/drivers/gpu/drm/msm/Makefile b/drivers/gpu/drm/msm/Makefile
index 16a81b94d6f0..0a543eb5e5d7 100644
--- a/drivers/gpu/drm/msm/Makefile
+++ b/drivers/gpu/drm/msm/Makefile
@@ -1,5 +1,5 @@
ccflags-y := -Iinclude/drm -Idrivers/gpu/drm/msm
-ccflags-$(CONFIG_DRM_MSM_DSI_PLL) += -Idrivers/gpu/drm/msm/dsi
+ccflags-$(CONFIG_DRM_MSM_DSI) += -Idrivers/gpu/drm/msm/dsi
msm-y := \
adreno/adreno_device.o \
@@ -10,6 +10,7 @@ msm-y := \
hdmi/hdmi_audio.o \
hdmi/hdmi_bridge.o \
hdmi/hdmi_connector.o \
+ hdmi/hdmi_hdcp.o \
hdmi/hdmi_i2c.o \
hdmi/hdmi_phy_8960.o \
hdmi/hdmi_phy_8x60.o \
@@ -53,12 +54,18 @@ msm-$(CONFIG_DRM_MSM_FBDEV) += msm_fbdev.o
msm-$(CONFIG_COMMON_CLK) += mdp/mdp4/mdp4_lvds_pll.o
msm-$(CONFIG_DRM_MSM_DSI) += dsi/dsi.o \
+ dsi/dsi_cfg.o \
dsi/dsi_host.o \
dsi/dsi_manager.o \
- dsi/dsi_phy.o \
+ dsi/phy/dsi_phy.o \
mdp/mdp5/mdp5_cmd_encoder.o
-msm-$(CONFIG_DRM_MSM_DSI_PLL) += dsi/pll/dsi_pll.o \
- dsi/pll/dsi_pll_28nm.o
+msm-$(CONFIG_DRM_MSM_DSI_28NM_PHY) += dsi/phy/dsi_phy_28nm.o
+msm-$(CONFIG_DRM_MSM_DSI_20NM_PHY) += dsi/phy/dsi_phy_20nm.o
+
+ifeq ($(CONFIG_DRM_MSM_DSI_PLL),y)
+msm-y += dsi/pll/dsi_pll.o
+msm-$(CONFIG_DRM_MSM_DSI_28NM_PHY) += dsi/pll/dsi_pll_28nm.o
+endif
obj-$(CONFIG_DRM_MSM) += msm.o
diff --git a/drivers/gpu/drm/msm/adreno/a2xx.xml.h b/drivers/gpu/drm/msm/adreno/a2xx.xml.h
index 23176e402796..0261f0d31612 100644
--- a/drivers/gpu/drm/msm/adreno/a2xx.xml.h
+++ b/drivers/gpu/drm/msm/adreno/a2xx.xml.h
@@ -8,15 +8,15 @@ http://github.com/freedreno/envytools/
git clone https://github.com/freedreno/envytools.git
The rules-ng-ng source files this header was generated from are:
-- /home/robclark/src/freedreno/envytools/rnndb/adreno.xml ( 364 bytes, from 2013-11-30 14:47:15)
-- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/a2xx.xml ( 32901 bytes, from 2014-06-02 15:21:30)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_common.xml ( 10551 bytes, from 2014-11-13 22:44:30)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml ( 14895 bytes, from 2015-04-19 15:23:28)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml ( 66709 bytes, from 2015-04-12 18:16:35)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml ( 60633 bytes, from 2015-05-20 14:48:19)
-
-Copyright (C) 2013-2014 by the following authors:
+- /home/robclark/src/freedreno/envytools/rnndb/adreno.xml ( 364 bytes, from 2015-05-20 20:03:07)
+- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2015-05-20 20:03:07)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/a2xx.xml ( 32901 bytes, from 2015-05-20 20:03:14)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_common.xml ( 10551 bytes, from 2015-05-20 20:03:14)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml ( 14968 bytes, from 2015-05-20 20:12:27)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml ( 67120 bytes, from 2015-08-14 23:22:03)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml ( 63785 bytes, from 2015-08-14 18:27:06)
+
+Copyright (C) 2013-2015 by the following authors:
- Rob Clark <robdclark@gmail.com> (robclark)
Permission is hereby granted, free of charge, to any person obtaining
diff --git a/drivers/gpu/drm/msm/adreno/a3xx.xml.h b/drivers/gpu/drm/msm/adreno/a3xx.xml.h
index 1c599e5cf318..48d133711487 100644
--- a/drivers/gpu/drm/msm/adreno/a3xx.xml.h
+++ b/drivers/gpu/drm/msm/adreno/a3xx.xml.h
@@ -8,13 +8,13 @@ http://github.com/freedreno/envytools/
git clone https://github.com/freedreno/envytools.git
The rules-ng-ng source files this header was generated from are:
-- /home/robclark/src/freedreno/envytools/rnndb/adreno.xml ( 364 bytes, from 2013-11-30 14:47:15)
-- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/a2xx.xml ( 32901 bytes, from 2014-06-02 15:21:30)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_common.xml ( 10551 bytes, from 2014-11-13 22:44:30)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml ( 14895 bytes, from 2015-04-19 15:23:28)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml ( 66709 bytes, from 2015-04-12 18:16:35)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml ( 60633 bytes, from 2015-05-20 14:48:19)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno.xml ( 364 bytes, from 2015-05-20 20:03:07)
+- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2015-05-20 20:03:07)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/a2xx.xml ( 32901 bytes, from 2015-05-20 20:03:14)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_common.xml ( 10551 bytes, from 2015-05-20 20:03:14)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml ( 14968 bytes, from 2015-05-20 20:12:27)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml ( 67120 bytes, from 2015-08-14 23:22:03)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml ( 63785 bytes, from 2015-08-14 18:27:06)
Copyright (C) 2013-2015 by the following authors:
- Rob Clark <robdclark@gmail.com> (robclark)
@@ -326,6 +326,13 @@ enum a3xx_tex_type {
A3XX_TEX_3D = 3,
};
+enum a3xx_tex_msaa {
+ A3XX_TPL1_MSAA1X = 0,
+ A3XX_TPL1_MSAA2X = 1,
+ A3XX_TPL1_MSAA4X = 2,
+ A3XX_TPL1_MSAA8X = 3,
+};
+
#define A3XX_INT0_RBBM_GPU_IDLE 0x00000001
#define A3XX_INT0_RBBM_AHB_ERROR 0x00000002
#define A3XX_INT0_RBBM_REG_TIMEOUT 0x00000004
@@ -2652,6 +2659,7 @@ static inline uint32_t A3XX_VGT_DRAW_INITIATOR_NUM_INSTANCES(uint32_t val)
#define REG_A3XX_VGT_IMMED_DATA 0x000021fd
#define REG_A3XX_TEX_SAMP_0 0x00000000
+#define A3XX_TEX_SAMP_0_CLAMPENABLE 0x00000001
#define A3XX_TEX_SAMP_0_MIPFILTER_LINEAR 0x00000002
#define A3XX_TEX_SAMP_0_XY_MAG__MASK 0x0000000c
#define A3XX_TEX_SAMP_0_XY_MAG__SHIFT 2
@@ -2695,6 +2703,7 @@ static inline uint32_t A3XX_TEX_SAMP_0_COMPARE_FUNC(enum adreno_compare_func val
{
return ((val) << A3XX_TEX_SAMP_0_COMPARE_FUNC__SHIFT) & A3XX_TEX_SAMP_0_COMPARE_FUNC__MASK;
}
+#define A3XX_TEX_SAMP_0_CUBEMAPSEAMLESSFILTOFF 0x01000000
#define A3XX_TEX_SAMP_0_UNNORM_COORDS 0x80000000
#define REG_A3XX_TEX_SAMP_1 0x00000001
@@ -2750,6 +2759,12 @@ static inline uint32_t A3XX_TEX_CONST_0_MIPLVLS(uint32_t val)
{
return ((val) << A3XX_TEX_CONST_0_MIPLVLS__SHIFT) & A3XX_TEX_CONST_0_MIPLVLS__MASK;
}
+#define A3XX_TEX_CONST_0_MSAATEX__MASK 0x00300000
+#define A3XX_TEX_CONST_0_MSAATEX__SHIFT 20
+static inline uint32_t A3XX_TEX_CONST_0_MSAATEX(enum a3xx_tex_msaa val)
+{
+ return ((val) << A3XX_TEX_CONST_0_MSAATEX__SHIFT) & A3XX_TEX_CONST_0_MSAATEX__MASK;
+}
#define A3XX_TEX_CONST_0_FMT__MASK 0x1fc00000
#define A3XX_TEX_CONST_0_FMT__SHIFT 22
static inline uint32_t A3XX_TEX_CONST_0_FMT(enum a3xx_tex_fmt val)
@@ -2785,7 +2800,7 @@ static inline uint32_t A3XX_TEX_CONST_1_FETCHSIZE(enum a3xx_tex_fetchsize val)
}
#define REG_A3XX_TEX_CONST_2 0x00000002
-#define A3XX_TEX_CONST_2_INDX__MASK 0x000000ff
+#define A3XX_TEX_CONST_2_INDX__MASK 0x000001ff
#define A3XX_TEX_CONST_2_INDX__SHIFT 0
static inline uint32_t A3XX_TEX_CONST_2_INDX(uint32_t val)
{
@@ -2805,7 +2820,7 @@ static inline uint32_t A3XX_TEX_CONST_2_SWAP(enum a3xx_color_swap val)
}
#define REG_A3XX_TEX_CONST_3 0x00000003
-#define A3XX_TEX_CONST_3_LAYERSZ1__MASK 0x00007fff
+#define A3XX_TEX_CONST_3_LAYERSZ1__MASK 0x0001ffff
#define A3XX_TEX_CONST_3_LAYERSZ1__SHIFT 0
static inline uint32_t A3XX_TEX_CONST_3_LAYERSZ1(uint32_t val)
{
diff --git a/drivers/gpu/drm/msm/adreno/a4xx.xml.h b/drivers/gpu/drm/msm/adreno/a4xx.xml.h
index 3f06ecf62583..ac55066db3b0 100644
--- a/drivers/gpu/drm/msm/adreno/a4xx.xml.h
+++ b/drivers/gpu/drm/msm/adreno/a4xx.xml.h
@@ -8,13 +8,13 @@ http://github.com/freedreno/envytools/
git clone https://github.com/freedreno/envytools.git
The rules-ng-ng source files this header was generated from are:
-- /home/robclark/src/freedreno/envytools/rnndb/adreno.xml ( 364 bytes, from 2013-11-30 14:47:15)
-- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/a2xx.xml ( 32901 bytes, from 2014-06-02 15:21:30)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_common.xml ( 10551 bytes, from 2014-11-13 22:44:30)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml ( 14895 bytes, from 2015-04-19 15:23:28)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml ( 66709 bytes, from 2015-04-12 18:16:35)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml ( 60633 bytes, from 2015-05-20 14:48:19)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno.xml ( 364 bytes, from 2015-05-20 20:03:07)
+- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2015-05-20 20:03:07)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/a2xx.xml ( 32901 bytes, from 2015-05-20 20:03:14)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_common.xml ( 10551 bytes, from 2015-05-20 20:03:14)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml ( 14968 bytes, from 2015-05-20 20:12:27)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml ( 67120 bytes, from 2015-08-14 23:22:03)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml ( 63785 bytes, from 2015-08-14 18:27:06)
Copyright (C) 2013-2015 by the following authors:
- Rob Clark <robdclark@gmail.com> (robclark)
@@ -227,6 +227,7 @@ enum a4xx_depth_format {
DEPTH4_NONE = 0,
DEPTH4_16 = 1,
DEPTH4_24_8 = 2,
+ DEPTH4_32 = 3,
};
enum a4xx_tess_spacing {
@@ -429,7 +430,7 @@ static inline uint32_t A4XX_RB_MRT_BUF_INFO_COLOR_SWAP(enum a3xx_color_swap val)
return ((val) << A4XX_RB_MRT_BUF_INFO_COLOR_SWAP__SHIFT) & A4XX_RB_MRT_BUF_INFO_COLOR_SWAP__MASK;
}
#define A4XX_RB_MRT_BUF_INFO_COLOR_SRGB 0x00002000
-#define A4XX_RB_MRT_BUF_INFO_COLOR_BUF_PITCH__MASK 0x007fc000
+#define A4XX_RB_MRT_BUF_INFO_COLOR_BUF_PITCH__MASK 0xffffc000
#define A4XX_RB_MRT_BUF_INFO_COLOR_BUF_PITCH__SHIFT 14
static inline uint32_t A4XX_RB_MRT_BUF_INFO_COLOR_BUF_PITCH(uint32_t val)
{
@@ -439,7 +440,7 @@ static inline uint32_t A4XX_RB_MRT_BUF_INFO_COLOR_BUF_PITCH(uint32_t val)
static inline uint32_t REG_A4XX_RB_MRT_BASE(uint32_t i0) { return 0x000020a6 + 0x5*i0; }
static inline uint32_t REG_A4XX_RB_MRT_CONTROL3(uint32_t i0) { return 0x000020a7 + 0x5*i0; }
-#define A4XX_RB_MRT_CONTROL3_STRIDE__MASK 0x0001fff8
+#define A4XX_RB_MRT_CONTROL3_STRIDE__MASK 0x03fffff8
#define A4XX_RB_MRT_CONTROL3_STRIDE__SHIFT 3
static inline uint32_t A4XX_RB_MRT_CONTROL3_STRIDE(uint32_t val)
{
@@ -570,6 +571,15 @@ static inline uint32_t A4XX_RB_FS_OUTPUT_SAMPLE_MASK(uint32_t val)
return ((val) << A4XX_RB_FS_OUTPUT_SAMPLE_MASK__SHIFT) & A4XX_RB_FS_OUTPUT_SAMPLE_MASK__MASK;
}
+#define REG_A4XX_RB_SAMPLE_COUNT_CONTROL 0x000020fa
+#define A4XX_RB_SAMPLE_COUNT_CONTROL_COPY 0x00000002
+#define A4XX_RB_SAMPLE_COUNT_CONTROL_ADDR__MASK 0xfffffffc
+#define A4XX_RB_SAMPLE_COUNT_CONTROL_ADDR__SHIFT 2
+static inline uint32_t A4XX_RB_SAMPLE_COUNT_CONTROL_ADDR(uint32_t val)
+{
+ return ((val >> 2) << A4XX_RB_SAMPLE_COUNT_CONTROL_ADDR__SHIFT) & A4XX_RB_SAMPLE_COUNT_CONTROL_ADDR__MASK;
+}
+
#define REG_A4XX_RB_RENDER_COMPONENTS 0x000020fb
#define A4XX_RB_RENDER_COMPONENTS_RT0__MASK 0x0000000f
#define A4XX_RB_RENDER_COMPONENTS_RT0__SHIFT 0
@@ -811,6 +821,23 @@ static inline uint32_t A4XX_RB_STENCIL_CONTROL_ZFAIL_BF(enum adreno_stencil_op v
#define REG_A4XX_RB_STENCIL_CONTROL2 0x00002107
#define A4XX_RB_STENCIL_CONTROL2_STENCIL_BUFFER 0x00000001
+#define REG_A4XX_RB_STENCIL_INFO 0x00002108
+#define A4XX_RB_STENCIL_INFO_SEPARATE_STENCIL 0x00000001
+#define A4XX_RB_STENCIL_INFO_STENCIL_BASE__MASK 0xfffff000
+#define A4XX_RB_STENCIL_INFO_STENCIL_BASE__SHIFT 12
+static inline uint32_t A4XX_RB_STENCIL_INFO_STENCIL_BASE(uint32_t val)
+{
+ return ((val >> 12) << A4XX_RB_STENCIL_INFO_STENCIL_BASE__SHIFT) & A4XX_RB_STENCIL_INFO_STENCIL_BASE__MASK;
+}
+
+#define REG_A4XX_RB_STENCIL_PITCH 0x00002109
+#define A4XX_RB_STENCIL_PITCH__MASK 0xffffffff
+#define A4XX_RB_STENCIL_PITCH__SHIFT 0
+static inline uint32_t A4XX_RB_STENCIL_PITCH(uint32_t val)
+{
+ return ((val >> 5) << A4XX_RB_STENCIL_PITCH__SHIFT) & A4XX_RB_STENCIL_PITCH__MASK;
+}
+
#define REG_A4XX_RB_STENCILREFMASK 0x0000210b
#define A4XX_RB_STENCILREFMASK_STENCILREF__MASK 0x000000ff
#define A4XX_RB_STENCILREFMASK_STENCILREF__SHIFT 0
@@ -1433,6 +1460,7 @@ static inline uint32_t A4XX_SP_FS_MRT_REG_MRTFORMAT(enum a4xx_color_fmt val)
{
return ((val) << A4XX_SP_FS_MRT_REG_MRTFORMAT__SHIFT) & A4XX_SP_FS_MRT_REG_MRTFORMAT__MASK;
}
+#define A4XX_SP_FS_MRT_REG_COLOR_SRGB 0x00040000
#define REG_A4XX_SP_CS_CTRL_REG0 0x00002300
@@ -1470,6 +1498,76 @@ static inline uint32_t A4XX_SP_HS_OBJ_OFFSET_REG_SHADEROBJOFFSET(uint32_t val)
#define REG_A4XX_SP_HS_LENGTH_REG 0x00002312
+#define REG_A4XX_SP_DS_PARAM_REG 0x0000231a
+#define A4XX_SP_DS_PARAM_REG_POSREGID__MASK 0x000000ff
+#define A4XX_SP_DS_PARAM_REG_POSREGID__SHIFT 0
+static inline uint32_t A4XX_SP_DS_PARAM_REG_POSREGID(uint32_t val)
+{
+ return ((val) << A4XX_SP_DS_PARAM_REG_POSREGID__SHIFT) & A4XX_SP_DS_PARAM_REG_POSREGID__MASK;
+}
+#define A4XX_SP_DS_PARAM_REG_TOTALGSOUTVAR__MASK 0xfff00000
+#define A4XX_SP_DS_PARAM_REG_TOTALGSOUTVAR__SHIFT 20
+static inline uint32_t A4XX_SP_DS_PARAM_REG_TOTALGSOUTVAR(uint32_t val)
+{
+ return ((val) << A4XX_SP_DS_PARAM_REG_TOTALGSOUTVAR__SHIFT) & A4XX_SP_DS_PARAM_REG_TOTALGSOUTVAR__MASK;
+}
+
+static inline uint32_t REG_A4XX_SP_DS_OUT(uint32_t i0) { return 0x0000231b + 0x1*i0; }
+
+static inline uint32_t REG_A4XX_SP_DS_OUT_REG(uint32_t i0) { return 0x0000231b + 0x1*i0; }
+#define A4XX_SP_DS_OUT_REG_A_REGID__MASK 0x000001ff
+#define A4XX_SP_DS_OUT_REG_A_REGID__SHIFT 0
+static inline uint32_t A4XX_SP_DS_OUT_REG_A_REGID(uint32_t val)
+{
+ return ((val) << A4XX_SP_DS_OUT_REG_A_REGID__SHIFT) & A4XX_SP_DS_OUT_REG_A_REGID__MASK;
+}
+#define A4XX_SP_DS_OUT_REG_A_COMPMASK__MASK 0x00001e00
+#define A4XX_SP_DS_OUT_REG_A_COMPMASK__SHIFT 9
+static inline uint32_t A4XX_SP_DS_OUT_REG_A_COMPMASK(uint32_t val)
+{
+ return ((val) << A4XX_SP_DS_OUT_REG_A_COMPMASK__SHIFT) & A4XX_SP_DS_OUT_REG_A_COMPMASK__MASK;
+}
+#define A4XX_SP_DS_OUT_REG_B_REGID__MASK 0x01ff0000
+#define A4XX_SP_DS_OUT_REG_B_REGID__SHIFT 16
+static inline uint32_t A4XX_SP_DS_OUT_REG_B_REGID(uint32_t val)
+{
+ return ((val) << A4XX_SP_DS_OUT_REG_B_REGID__SHIFT) & A4XX_SP_DS_OUT_REG_B_REGID__MASK;
+}
+#define A4XX_SP_DS_OUT_REG_B_COMPMASK__MASK 0x1e000000
+#define A4XX_SP_DS_OUT_REG_B_COMPMASK__SHIFT 25
+static inline uint32_t A4XX_SP_DS_OUT_REG_B_COMPMASK(uint32_t val)
+{
+ return ((val) << A4XX_SP_DS_OUT_REG_B_COMPMASK__SHIFT) & A4XX_SP_DS_OUT_REG_B_COMPMASK__MASK;
+}
+
+static inline uint32_t REG_A4XX_SP_DS_VPC_DST(uint32_t i0) { return 0x0000232c + 0x1*i0; }
+
+static inline uint32_t REG_A4XX_SP_DS_VPC_DST_REG(uint32_t i0) { return 0x0000232c + 0x1*i0; }
+#define A4XX_SP_DS_VPC_DST_REG_OUTLOC0__MASK 0x000000ff
+#define A4XX_SP_DS_VPC_DST_REG_OUTLOC0__SHIFT 0
+static inline uint32_t A4XX_SP_DS_VPC_DST_REG_OUTLOC0(uint32_t val)
+{
+ return ((val) << A4XX_SP_DS_VPC_DST_REG_OUTLOC0__SHIFT) & A4XX_SP_DS_VPC_DST_REG_OUTLOC0__MASK;
+}
+#define A4XX_SP_DS_VPC_DST_REG_OUTLOC1__MASK 0x0000ff00
+#define A4XX_SP_DS_VPC_DST_REG_OUTLOC1__SHIFT 8
+static inline uint32_t A4XX_SP_DS_VPC_DST_REG_OUTLOC1(uint32_t val)
+{
+ return ((val) << A4XX_SP_DS_VPC_DST_REG_OUTLOC1__SHIFT) & A4XX_SP_DS_VPC_DST_REG_OUTLOC1__MASK;
+}
+#define A4XX_SP_DS_VPC_DST_REG_OUTLOC2__MASK 0x00ff0000
+#define A4XX_SP_DS_VPC_DST_REG_OUTLOC2__SHIFT 16
+static inline uint32_t A4XX_SP_DS_VPC_DST_REG_OUTLOC2(uint32_t val)
+{
+ return ((val) << A4XX_SP_DS_VPC_DST_REG_OUTLOC2__SHIFT) & A4XX_SP_DS_VPC_DST_REG_OUTLOC2__MASK;
+}
+#define A4XX_SP_DS_VPC_DST_REG_OUTLOC3__MASK 0xff000000
+#define A4XX_SP_DS_VPC_DST_REG_OUTLOC3__SHIFT 24
+static inline uint32_t A4XX_SP_DS_VPC_DST_REG_OUTLOC3(uint32_t val)
+{
+ return ((val) << A4XX_SP_DS_VPC_DST_REG_OUTLOC3__SHIFT) & A4XX_SP_DS_VPC_DST_REG_OUTLOC3__MASK;
+}
+
#define REG_A4XX_SP_DS_OBJ_OFFSET_REG 0x00002334
#define A4XX_SP_DS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET__MASK 0x01ff0000
#define A4XX_SP_DS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET__SHIFT 16
@@ -1492,6 +1590,82 @@ static inline uint32_t A4XX_SP_DS_OBJ_OFFSET_REG_SHADEROBJOFFSET(uint32_t val)
#define REG_A4XX_SP_DS_LENGTH_REG 0x00002339
+#define REG_A4XX_SP_GS_PARAM_REG 0x00002341
+#define A4XX_SP_GS_PARAM_REG_POSREGID__MASK 0x000000ff
+#define A4XX_SP_GS_PARAM_REG_POSREGID__SHIFT 0
+static inline uint32_t A4XX_SP_GS_PARAM_REG_POSREGID(uint32_t val)
+{
+ return ((val) << A4XX_SP_GS_PARAM_REG_POSREGID__SHIFT) & A4XX_SP_GS_PARAM_REG_POSREGID__MASK;
+}
+#define A4XX_SP_GS_PARAM_REG_PRIMREGID__MASK 0x0000ff00
+#define A4XX_SP_GS_PARAM_REG_PRIMREGID__SHIFT 8
+static inline uint32_t A4XX_SP_GS_PARAM_REG_PRIMREGID(uint32_t val)
+{
+ return ((val) << A4XX_SP_GS_PARAM_REG_PRIMREGID__SHIFT) & A4XX_SP_GS_PARAM_REG_PRIMREGID__MASK;
+}
+#define A4XX_SP_GS_PARAM_REG_TOTALGSOUTVAR__MASK 0xfff00000
+#define A4XX_SP_GS_PARAM_REG_TOTALGSOUTVAR__SHIFT 20
+static inline uint32_t A4XX_SP_GS_PARAM_REG_TOTALGSOUTVAR(uint32_t val)
+{
+ return ((val) << A4XX_SP_GS_PARAM_REG_TOTALGSOUTVAR__SHIFT) & A4XX_SP_GS_PARAM_REG_TOTALGSOUTVAR__MASK;
+}
+
+static inline uint32_t REG_A4XX_SP_GS_OUT(uint32_t i0) { return 0x00002342 + 0x1*i0; }
+
+static inline uint32_t REG_A4XX_SP_GS_OUT_REG(uint32_t i0) { return 0x00002342 + 0x1*i0; }
+#define A4XX_SP_GS_OUT_REG_A_REGID__MASK 0x000001ff
+#define A4XX_SP_GS_OUT_REG_A_REGID__SHIFT 0
+static inline uint32_t A4XX_SP_GS_OUT_REG_A_REGID(uint32_t val)
+{
+ return ((val) << A4XX_SP_GS_OUT_REG_A_REGID__SHIFT) & A4XX_SP_GS_OUT_REG_A_REGID__MASK;
+}
+#define A4XX_SP_GS_OUT_REG_A_COMPMASK__MASK 0x00001e00
+#define A4XX_SP_GS_OUT_REG_A_COMPMASK__SHIFT 9
+static inline uint32_t A4XX_SP_GS_OUT_REG_A_COMPMASK(uint32_t val)
+{
+ return ((val) << A4XX_SP_GS_OUT_REG_A_COMPMASK__SHIFT) & A4XX_SP_GS_OUT_REG_A_COMPMASK__MASK;
+}
+#define A4XX_SP_GS_OUT_REG_B_REGID__MASK 0x01ff0000
+#define A4XX_SP_GS_OUT_REG_B_REGID__SHIFT 16
+static inline uint32_t A4XX_SP_GS_OUT_REG_B_REGID(uint32_t val)
+{
+ return ((val) << A4XX_SP_GS_OUT_REG_B_REGID__SHIFT) & A4XX_SP_GS_OUT_REG_B_REGID__MASK;
+}
+#define A4XX_SP_GS_OUT_REG_B_COMPMASK__MASK 0x1e000000
+#define A4XX_SP_GS_OUT_REG_B_COMPMASK__SHIFT 25
+static inline uint32_t A4XX_SP_GS_OUT_REG_B_COMPMASK(uint32_t val)
+{
+ return ((val) << A4XX_SP_GS_OUT_REG_B_COMPMASK__SHIFT) & A4XX_SP_GS_OUT_REG_B_COMPMASK__MASK;
+}
+
+static inline uint32_t REG_A4XX_SP_GS_VPC_DST(uint32_t i0) { return 0x00002353 + 0x1*i0; }
+
+static inline uint32_t REG_A4XX_SP_GS_VPC_DST_REG(uint32_t i0) { return 0x00002353 + 0x1*i0; }
+#define A4XX_SP_GS_VPC_DST_REG_OUTLOC0__MASK 0x000000ff
+#define A4XX_SP_GS_VPC_DST_REG_OUTLOC0__SHIFT 0
+static inline uint32_t A4XX_SP_GS_VPC_DST_REG_OUTLOC0(uint32_t val)
+{
+ return ((val) << A4XX_SP_GS_VPC_DST_REG_OUTLOC0__SHIFT) & A4XX_SP_GS_VPC_DST_REG_OUTLOC0__MASK;
+}
+#define A4XX_SP_GS_VPC_DST_REG_OUTLOC1__MASK 0x0000ff00
+#define A4XX_SP_GS_VPC_DST_REG_OUTLOC1__SHIFT 8
+static inline uint32_t A4XX_SP_GS_VPC_DST_REG_OUTLOC1(uint32_t val)
+{
+ return ((val) << A4XX_SP_GS_VPC_DST_REG_OUTLOC1__SHIFT) & A4XX_SP_GS_VPC_DST_REG_OUTLOC1__MASK;
+}
+#define A4XX_SP_GS_VPC_DST_REG_OUTLOC2__MASK 0x00ff0000
+#define A4XX_SP_GS_VPC_DST_REG_OUTLOC2__SHIFT 16
+static inline uint32_t A4XX_SP_GS_VPC_DST_REG_OUTLOC2(uint32_t val)
+{
+ return ((val) << A4XX_SP_GS_VPC_DST_REG_OUTLOC2__SHIFT) & A4XX_SP_GS_VPC_DST_REG_OUTLOC2__MASK;
+}
+#define A4XX_SP_GS_VPC_DST_REG_OUTLOC3__MASK 0xff000000
+#define A4XX_SP_GS_VPC_DST_REG_OUTLOC3__SHIFT 24
+static inline uint32_t A4XX_SP_GS_VPC_DST_REG_OUTLOC3(uint32_t val)
+{
+ return ((val) << A4XX_SP_GS_VPC_DST_REG_OUTLOC3__SHIFT) & A4XX_SP_GS_VPC_DST_REG_OUTLOC3__MASK;
+}
+
#define REG_A4XX_SP_GS_OBJ_OFFSET_REG 0x0000235b
#define A4XX_SP_GS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET__MASK 0x01ff0000
#define A4XX_SP_GS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET__SHIFT 16
@@ -1693,6 +1867,18 @@ static inline uint32_t A4XX_VFD_CONTROL_3_REGID_VTXCNT(uint32_t val)
{
return ((val) << A4XX_VFD_CONTROL_3_REGID_VTXCNT__SHIFT) & A4XX_VFD_CONTROL_3_REGID_VTXCNT__MASK;
}
+#define A4XX_VFD_CONTROL_3_REGID_TESSX__MASK 0x00ff0000
+#define A4XX_VFD_CONTROL_3_REGID_TESSX__SHIFT 16
+static inline uint32_t A4XX_VFD_CONTROL_3_REGID_TESSX(uint32_t val)
+{
+ return ((val) << A4XX_VFD_CONTROL_3_REGID_TESSX__SHIFT) & A4XX_VFD_CONTROL_3_REGID_TESSX__MASK;
+}
+#define A4XX_VFD_CONTROL_3_REGID_TESSY__MASK 0xff000000
+#define A4XX_VFD_CONTROL_3_REGID_TESSY__SHIFT 24
+static inline uint32_t A4XX_VFD_CONTROL_3_REGID_TESSY(uint32_t val)
+{
+ return ((val) << A4XX_VFD_CONTROL_3_REGID_TESSY__SHIFT) & A4XX_VFD_CONTROL_3_REGID_TESSY__MASK;
+}
#define REG_A4XX_VFD_CONTROL_4 0x00002204
@@ -2489,6 +2675,8 @@ static inline uint32_t A4XX_UNKNOWN_20F7(float val)
#define REG_A4XX_UNKNOWN_22D7 0x000022d7
+#define REG_A4XX_UNKNOWN_2352 0x00002352
+
#define REG_A4XX_TEX_SAMP_0 0x00000000
#define A4XX_TEX_SAMP_0_MIPFILTER_LINEAR_NEAR 0x00000001
#define A4XX_TEX_SAMP_0_XY_MAG__MASK 0x00000006
diff --git a/drivers/gpu/drm/msm/adreno/adreno_common.xml.h b/drivers/gpu/drm/msm/adreno/adreno_common.xml.h
index 9562a1fa552b..399a9e528139 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_common.xml.h
+++ b/drivers/gpu/drm/msm/adreno/adreno_common.xml.h
@@ -8,15 +8,15 @@ http://github.com/freedreno/envytools/
git clone https://github.com/freedreno/envytools.git
The rules-ng-ng source files this header was generated from are:
-- /home/robclark/src/freedreno/envytools/rnndb/adreno.xml ( 364 bytes, from 2013-11-30 14:47:15)
-- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/a2xx.xml ( 32901 bytes, from 2014-06-02 15:21:30)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_common.xml ( 10551 bytes, from 2014-11-13 22:44:30)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml ( 14895 bytes, from 2015-04-19 15:23:28)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml ( 66709 bytes, from 2015-04-12 18:16:35)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml ( 60633 bytes, from 2015-05-20 14:48:19)
-
-Copyright (C) 2013-2014 by the following authors:
+- /home/robclark/src/freedreno/envytools/rnndb/adreno.xml ( 364 bytes, from 2015-05-20 20:03:07)
+- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2015-05-20 20:03:07)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/a2xx.xml ( 32901 bytes, from 2015-05-20 20:03:14)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_common.xml ( 10551 bytes, from 2015-05-20 20:03:14)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml ( 14968 bytes, from 2015-05-20 20:12:27)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml ( 67120 bytes, from 2015-08-14 23:22:03)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml ( 63785 bytes, from 2015-08-14 18:27:06)
+
+Copyright (C) 2013-2015 by the following authors:
- Rob Clark <robdclark@gmail.com> (robclark)
Permission is hereby granted, free of charge, to any person obtaining
diff --git a/drivers/gpu/drm/msm/adreno/adreno_pm4.xml.h b/drivers/gpu/drm/msm/adreno/adreno_pm4.xml.h
index bd5b23bf9041..41904fed1350 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_pm4.xml.h
+++ b/drivers/gpu/drm/msm/adreno/adreno_pm4.xml.h
@@ -8,13 +8,13 @@ http://github.com/freedreno/envytools/
git clone https://github.com/freedreno/envytools.git
The rules-ng-ng source files this header was generated from are:
-- /home/robclark/src/freedreno/envytools/rnndb/adreno.xml ( 364 bytes, from 2013-11-30 14:47:15)
-- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/a2xx.xml ( 32901 bytes, from 2014-06-02 15:21:30)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_common.xml ( 10551 bytes, from 2014-11-13 22:44:30)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml ( 14895 bytes, from 2015-04-19 15:23:28)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml ( 66709 bytes, from 2015-04-12 18:16:35)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml ( 60633 bytes, from 2015-05-20 14:48:19)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno.xml ( 364 bytes, from 2015-05-20 20:03:07)
+- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2015-05-20 20:03:07)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/a2xx.xml ( 32901 bytes, from 2015-05-20 20:03:14)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_common.xml ( 10551 bytes, from 2015-05-20 20:03:14)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml ( 14968 bytes, from 2015-05-20 20:12:27)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml ( 67120 bytes, from 2015-08-14 23:22:03)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml ( 63785 bytes, from 2015-08-14 18:27:06)
Copyright (C) 2013-2015 by the following authors:
- Rob Clark <robdclark@gmail.com> (robclark)
@@ -67,7 +67,7 @@ enum vgt_event_type {
enum pc_di_primtype {
DI_PT_NONE = 0,
- DI_PT_POINTLIST_A2XX = 1,
+ DI_PT_POINTLIST_PSIZE = 1,
DI_PT_LINELIST = 2,
DI_PT_LINESTRIP = 3,
DI_PT_TRILIST = 4,
@@ -75,7 +75,7 @@ enum pc_di_primtype {
DI_PT_TRISTRIP = 6,
DI_PT_LINELOOP = 7,
DI_PT_RECTLIST = 8,
- DI_PT_POINTLIST_A3XX = 9,
+ DI_PT_POINTLIST = 9,
DI_PT_LINE_ADJ = 10,
DI_PT_LINESTRIP_ADJ = 11,
DI_PT_TRI_ADJ = 12,
diff --git a/drivers/gpu/drm/msm/dsi/dsi.c b/drivers/gpu/drm/msm/dsi/dsi.c
index 1f2561e2ff71..6edcd6f57e70 100644
--- a/drivers/gpu/drm/msm/dsi/dsi.c
+++ b/drivers/gpu/drm/msm/dsi/dsi.c
@@ -15,10 +15,10 @@
struct drm_encoder *msm_dsi_get_encoder(struct msm_dsi *msm_dsi)
{
- if (!msm_dsi || !msm_dsi->panel)
+ if (!msm_dsi || !msm_dsi_device_connected(msm_dsi))
return NULL;
- return (msm_dsi->panel_flags & MIPI_DSI_MODE_VIDEO) ?
+ return (msm_dsi->device_flags & MIPI_DSI_MODE_VIDEO) ?
msm_dsi->encoders[MSM_DSI_VIDEO_ENCODER_ID] :
msm_dsi->encoders[MSM_DSI_CMD_ENCODER_ID];
}
@@ -74,19 +74,15 @@ static void dsi_destroy(struct msm_dsi *msm_dsi)
static struct msm_dsi *dsi_init(struct platform_device *pdev)
{
- struct msm_dsi *msm_dsi = NULL;
+ struct msm_dsi *msm_dsi;
int ret;
- if (!pdev) {
- ret = -ENXIO;
- goto fail;
- }
+ if (!pdev)
+ return ERR_PTR(-ENXIO);
msm_dsi = devm_kzalloc(&pdev->dev, sizeof(*msm_dsi), GFP_KERNEL);
- if (!msm_dsi) {
- ret = -ENOMEM;
- goto fail;
- }
+ if (!msm_dsi)
+ return ERR_PTR(-ENOMEM);
DBG("dsi probed=%p", msm_dsi);
msm_dsi->pdev = pdev;
@@ -95,24 +91,22 @@ static struct msm_dsi *dsi_init(struct platform_device *pdev)
/* Init dsi host */
ret = msm_dsi_host_init(msm_dsi);
if (ret)
- goto fail;
+ goto destroy_dsi;
/* GET dsi PHY */
ret = dsi_get_phy(msm_dsi);
if (ret)
- goto fail;
+ goto destroy_dsi;
/* Register to dsi manager */
ret = msm_dsi_manager_register(msm_dsi);
if (ret)
- goto fail;
+ goto destroy_dsi;
return msm_dsi;
-fail:
- if (msm_dsi)
- dsi_destroy(msm_dsi);
-
+destroy_dsi:
+ dsi_destroy(msm_dsi);
return ERR_PTR(ret);
}
@@ -196,6 +190,7 @@ int msm_dsi_modeset_init(struct msm_dsi *msm_dsi, struct drm_device *dev,
struct drm_encoder *encoders[MSM_DSI_ENCODER_NUM])
{
struct msm_drm_private *priv = dev->dev_private;
+ struct drm_bridge *ext_bridge;
int ret, i;
if (WARN_ON(!encoders[MSM_DSI_VIDEO_ENCODER_ID] ||
@@ -223,10 +218,25 @@ int msm_dsi_modeset_init(struct msm_dsi *msm_dsi, struct drm_device *dev,
msm_dsi->encoders[i] = encoders[i];
}
- msm_dsi->connector = msm_dsi_manager_connector_init(msm_dsi->id);
+ /*
+ * check if the dsi encoder output is connected to a panel or an
+ * external bridge. We create a connector only if we're connected to a
+ * drm_panel device. When we're connected to an external bridge, we
+ * assume that the drm_bridge driver will create the connector itself.
+ */
+ ext_bridge = msm_dsi_host_get_bridge(msm_dsi->host);
+
+ if (ext_bridge)
+ msm_dsi->connector =
+ msm_dsi_manager_ext_bridge_init(msm_dsi->id);
+ else
+ msm_dsi->connector =
+ msm_dsi_manager_connector_init(msm_dsi->id);
+
if (IS_ERR(msm_dsi->connector)) {
ret = PTR_ERR(msm_dsi->connector);
- dev_err(dev->dev, "failed to create dsi connector: %d\n", ret);
+ dev_err(dev->dev,
+ "failed to create dsi connector: %d\n", ret);
msm_dsi->connector = NULL;
goto fail;
}
@@ -242,10 +252,12 @@ fail:
msm_dsi_manager_bridge_destroy(msm_dsi->bridge);
msm_dsi->bridge = NULL;
}
- if (msm_dsi->connector) {
+
+ /* don't destroy connector if we didn't make it */
+ if (msm_dsi->connector && !msm_dsi->external_bridge)
msm_dsi->connector->funcs->destroy(msm_dsi->connector);
- msm_dsi->connector = NULL;
- }
+
+ msm_dsi->connector = NULL;
}
return ret;
diff --git a/drivers/gpu/drm/msm/dsi/dsi.h b/drivers/gpu/drm/msm/dsi/dsi.h
index 92d697de4858..5f5a3732cdf6 100644
--- a/drivers/gpu/drm/msm/dsi/dsi.h
+++ b/drivers/gpu/drm/msm/dsi/dsi.h
@@ -27,21 +27,10 @@
#define DSI_1 1
#define DSI_MAX 2
-#define DSI_CLOCK_MASTER DSI_0
-#define DSI_CLOCK_SLAVE DSI_1
-
-#define DSI_LEFT DSI_0
-#define DSI_RIGHT DSI_1
-
-/* According to the current drm framework sequence, take the encoder of
- * DSI_1 as master encoder
- */
-#define DSI_ENCODER_MASTER DSI_1
-#define DSI_ENCODER_SLAVE DSI_0
-
enum msm_dsi_phy_type {
MSM_DSI_PHY_28NM_HPM,
MSM_DSI_PHY_28NM_LP,
+ MSM_DSI_PHY_20NM,
MSM_DSI_PHY_MAX
};
@@ -65,13 +54,21 @@ struct msm_dsi {
struct drm_device *dev;
struct platform_device *pdev;
+ /* connector managed by us when we're connected to a drm_panel */
struct drm_connector *connector;
+ /* internal dsi bridge attached to MDP interface */
struct drm_bridge *bridge;
struct mipi_dsi_host *host;
struct msm_dsi_phy *phy;
+
+ /*
+ * panel/external_bridge connected to dsi bridge output, only one of the
+ * two can be valid at a time
+ */
struct drm_panel *panel;
- unsigned long panel_flags;
+ struct drm_bridge *external_bridge;
+ unsigned long device_flags;
struct device *phy_dev;
bool phy_enabled;
@@ -86,6 +83,7 @@ struct msm_dsi {
struct drm_bridge *msm_dsi_manager_bridge_init(u8 id);
void msm_dsi_manager_bridge_destroy(struct drm_bridge *bridge);
struct drm_connector *msm_dsi_manager_connector_init(u8 id);
+struct drm_connector *msm_dsi_manager_ext_bridge_init(u8 id);
int msm_dsi_manager_phy_enable(int id,
const unsigned long bit_rate, const unsigned long esc_rate,
u32 *clk_pre, u32 *clk_post);
@@ -96,6 +94,11 @@ int msm_dsi_manager_register(struct msm_dsi *msm_dsi);
void msm_dsi_manager_unregister(struct msm_dsi *msm_dsi);
/* msm dsi */
+static inline bool msm_dsi_device_connected(struct msm_dsi *msm_dsi)
+{
+ return msm_dsi->panel || msm_dsi->external_bridge;
+}
+
struct drm_encoder *msm_dsi_get_encoder(struct msm_dsi *msm_dsi);
/* dsi pll */
@@ -106,6 +109,8 @@ struct msm_dsi_pll *msm_dsi_pll_init(struct platform_device *pdev,
void msm_dsi_pll_destroy(struct msm_dsi_pll *pll);
int msm_dsi_pll_get_clk_provider(struct msm_dsi_pll *pll,
struct clk **byte_clk_provider, struct clk **pixel_clk_provider);
+void msm_dsi_pll_save_state(struct msm_dsi_pll *pll);
+int msm_dsi_pll_restore_state(struct msm_dsi_pll *pll);
#else
static inline struct msm_dsi_pll *msm_dsi_pll_init(struct platform_device *pdev,
enum msm_dsi_phy_type type, int id) {
@@ -119,6 +124,13 @@ static inline int msm_dsi_pll_get_clk_provider(struct msm_dsi_pll *pll,
{
return -ENODEV;
}
+static inline void msm_dsi_pll_save_state(struct msm_dsi_pll *pll)
+{
+}
+static inline int msm_dsi_pll_restore_state(struct msm_dsi_pll *pll)
+{
+ return 0;
+}
#endif
/* dsi host */
@@ -140,6 +152,7 @@ int msm_dsi_host_set_display_mode(struct mipi_dsi_host *host,
struct drm_display_mode *mode);
struct drm_panel *msm_dsi_host_get_panel(struct mipi_dsi_host *host,
unsigned long *panel_flags);
+struct drm_bridge *msm_dsi_host_get_bridge(struct mipi_dsi_host *host);
int msm_dsi_host_register(struct mipi_dsi_host *host, bool check_defer);
void msm_dsi_host_unregister(struct mipi_dsi_host *host);
int msm_dsi_host_set_src_pll(struct mipi_dsi_host *host,
@@ -153,9 +166,9 @@ int msm_dsi_host_init(struct msm_dsi *msm_dsi);
struct msm_dsi_phy;
void msm_dsi_phy_driver_register(void);
void msm_dsi_phy_driver_unregister(void);
-int msm_dsi_phy_enable(struct msm_dsi_phy *phy, bool is_dual_panel,
+int msm_dsi_phy_enable(struct msm_dsi_phy *phy, int src_pll_id,
const unsigned long bit_rate, const unsigned long esc_rate);
-int msm_dsi_phy_disable(struct msm_dsi_phy *phy);
+void msm_dsi_phy_disable(struct msm_dsi_phy *phy);
void msm_dsi_phy_get_clk_pre_post(struct msm_dsi_phy *phy,
u32 *clk_pre, u32 *clk_post);
struct msm_dsi_pll *msm_dsi_phy_get_pll(struct msm_dsi_phy *phy);
diff --git a/drivers/gpu/drm/msm/dsi/dsi.xml.h b/drivers/gpu/drm/msm/dsi/dsi.xml.h
index 9791ea04bcbc..1d2e32f0817b 100644
--- a/drivers/gpu/drm/msm/dsi/dsi.xml.h
+++ b/drivers/gpu/drm/msm/dsi/dsi.xml.h
@@ -8,17 +8,17 @@ http://github.com/freedreno/envytools/
git clone https://github.com/freedreno/envytools.git
The rules-ng-ng source files this header was generated from are:
-- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 676 bytes, from 2014-12-05 15:34:49)
-- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20915 bytes, from 2015-03-24 22:05:22)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 2352 bytes, from 2015-04-12 15:02:42)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 35083 bytes, from 2015-04-12 15:04:03)
-- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 22094 bytes, from 2015-05-12 12:45:23)
-- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2013-08-11 19:26:32)
-- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2014-10-31 16:48:57)
-- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2013-07-05 19:21:12)
-- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 29012 bytes, from 2015-05-12 12:45:23)
-- /home/robclark/src/freedreno/envytools/rnndb/edp/edp.xml ( 10416 bytes, from 2015-05-12 12:45:23)
+- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 676 bytes, from 2015-05-20 20:03:14)
+- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2015-05-20 20:03:07)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20915 bytes, from 2015-05-20 20:03:14)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 2576 bytes, from 2015-07-09 22:10:24)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 36021 bytes, from 2015-07-09 22:10:24)
+- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 26057 bytes, from 2015-08-14 21:47:57)
+- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2015-05-20 20:03:07)
+- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2015-05-20 20:03:14)
+- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2015-05-20 20:03:07)
+- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 29154 bytes, from 2015-08-10 21:25:43)
+- /home/robclark/src/freedreno/envytools/rnndb/edp/edp.xml ( 10416 bytes, from 2015-05-20 20:03:14)
Copyright (C) 2013-2015 by the following authors:
- Rob Clark <robdclark@gmail.com> (robclark)
@@ -382,6 +382,11 @@ static inline uint32_t DSI_TRIG_CTRL_STREAM(uint32_t val)
#define REG_DSI_TRIG_DMA 0x0000008c
#define REG_DSI_DLN0_PHY_ERR 0x000000b0
+#define DSI_DLN0_PHY_ERR_DLN0_ERR_ESC 0x00000001
+#define DSI_DLN0_PHY_ERR_DLN0_ERR_SYNC_ESC 0x00000010
+#define DSI_DLN0_PHY_ERR_DLN0_ERR_CONTROL 0x00000100
+#define DSI_DLN0_PHY_ERR_DLN0_ERR_CONTENTION_LP0 0x00001000
+#define DSI_DLN0_PHY_ERR_DLN0_ERR_CONTENTION_LP1 0x00010000
#define REG_DSI_TIMEOUT_STATUS 0x000000bc
@@ -435,6 +440,9 @@ static inline uint32_t DSI_LANE_SWAP_CTRL_DLN_SWAP_SEL(enum dsi_lane_swap val)
#define REG_DSI_PHY_RESET 0x00000128
#define DSI_PHY_RESET_RESET 0x00000001
+#define REG_DSI_T_CLK_PRE_EXTEND 0x0000017c
+#define DSI_T_CLK_PRE_EXTEND_INC_BY_2_BYTECLK 0x00000001
+
#define REG_DSI_RDBK_DATA_CTRL 0x000001d0
#define DSI_RDBK_DATA_CTRL_COUNT__MASK 0x00ff0000
#define DSI_RDBK_DATA_CTRL_COUNT__SHIFT 16
@@ -830,6 +838,7 @@ static inline uint32_t DSI_28nm_PHY_TIMING_CTRL_11_TRIG3_CMD(uint32_t val)
#define REG_DSI_28nm_PHY_BIST_CTRL_5 0x000001c8
#define REG_DSI_28nm_PHY_GLBL_TEST_CTRL 0x000001d4
+#define DSI_28nm_PHY_GLBL_TEST_CTRL_BITCLK_HS_SEL 0x00000001
#define REG_DSI_28nm_PHY_LDO_CNTRL 0x000001dc
@@ -994,5 +1003,185 @@ static inline uint32_t DSI_28nm_PHY_PLL_SDM_CFG3_FREQ_SEED_15_8(uint32_t val)
#define REG_DSI_28nm_PHY_PLL_CTRL_54 0x000000d4
+static inline uint32_t REG_DSI_20nm_PHY_LN(uint32_t i0) { return 0x00000000 + 0x40*i0; }
+
+static inline uint32_t REG_DSI_20nm_PHY_LN_CFG_0(uint32_t i0) { return 0x00000000 + 0x40*i0; }
+
+static inline uint32_t REG_DSI_20nm_PHY_LN_CFG_1(uint32_t i0) { return 0x00000004 + 0x40*i0; }
+
+static inline uint32_t REG_DSI_20nm_PHY_LN_CFG_2(uint32_t i0) { return 0x00000008 + 0x40*i0; }
+
+static inline uint32_t REG_DSI_20nm_PHY_LN_CFG_3(uint32_t i0) { return 0x0000000c + 0x40*i0; }
+
+static inline uint32_t REG_DSI_20nm_PHY_LN_CFG_4(uint32_t i0) { return 0x00000010 + 0x40*i0; }
+
+static inline uint32_t REG_DSI_20nm_PHY_LN_TEST_DATAPATH(uint32_t i0) { return 0x00000014 + 0x40*i0; }
+
+static inline uint32_t REG_DSI_20nm_PHY_LN_DEBUG_SEL(uint32_t i0) { return 0x00000018 + 0x40*i0; }
+
+static inline uint32_t REG_DSI_20nm_PHY_LN_TEST_STR_0(uint32_t i0) { return 0x0000001c + 0x40*i0; }
+
+static inline uint32_t REG_DSI_20nm_PHY_LN_TEST_STR_1(uint32_t i0) { return 0x00000020 + 0x40*i0; }
+
+#define REG_DSI_20nm_PHY_LNCK_CFG_0 0x00000100
+
+#define REG_DSI_20nm_PHY_LNCK_CFG_1 0x00000104
+
+#define REG_DSI_20nm_PHY_LNCK_CFG_2 0x00000108
+
+#define REG_DSI_20nm_PHY_LNCK_CFG_3 0x0000010c
+
+#define REG_DSI_20nm_PHY_LNCK_CFG_4 0x00000110
+
+#define REG_DSI_20nm_PHY_LNCK_TEST_DATAPATH 0x00000114
+
+#define REG_DSI_20nm_PHY_LNCK_DEBUG_SEL 0x00000118
+
+#define REG_DSI_20nm_PHY_LNCK_TEST_STR0 0x0000011c
+
+#define REG_DSI_20nm_PHY_LNCK_TEST_STR1 0x00000120
+
+#define REG_DSI_20nm_PHY_TIMING_CTRL_0 0x00000140
+#define DSI_20nm_PHY_TIMING_CTRL_0_CLK_ZERO__MASK 0x000000ff
+#define DSI_20nm_PHY_TIMING_CTRL_0_CLK_ZERO__SHIFT 0
+static inline uint32_t DSI_20nm_PHY_TIMING_CTRL_0_CLK_ZERO(uint32_t val)
+{
+ return ((val) << DSI_20nm_PHY_TIMING_CTRL_0_CLK_ZERO__SHIFT) & DSI_20nm_PHY_TIMING_CTRL_0_CLK_ZERO__MASK;
+}
+
+#define REG_DSI_20nm_PHY_TIMING_CTRL_1 0x00000144
+#define DSI_20nm_PHY_TIMING_CTRL_1_CLK_TRAIL__MASK 0x000000ff
+#define DSI_20nm_PHY_TIMING_CTRL_1_CLK_TRAIL__SHIFT 0
+static inline uint32_t DSI_20nm_PHY_TIMING_CTRL_1_CLK_TRAIL(uint32_t val)
+{
+ return ((val) << DSI_20nm_PHY_TIMING_CTRL_1_CLK_TRAIL__SHIFT) & DSI_20nm_PHY_TIMING_CTRL_1_CLK_TRAIL__MASK;
+}
+
+#define REG_DSI_20nm_PHY_TIMING_CTRL_2 0x00000148
+#define DSI_20nm_PHY_TIMING_CTRL_2_CLK_PREPARE__MASK 0x000000ff
+#define DSI_20nm_PHY_TIMING_CTRL_2_CLK_PREPARE__SHIFT 0
+static inline uint32_t DSI_20nm_PHY_TIMING_CTRL_2_CLK_PREPARE(uint32_t val)
+{
+ return ((val) << DSI_20nm_PHY_TIMING_CTRL_2_CLK_PREPARE__SHIFT) & DSI_20nm_PHY_TIMING_CTRL_2_CLK_PREPARE__MASK;
+}
+
+#define REG_DSI_20nm_PHY_TIMING_CTRL_3 0x0000014c
+#define DSI_20nm_PHY_TIMING_CTRL_3_CLK_ZERO_8 0x00000001
+
+#define REG_DSI_20nm_PHY_TIMING_CTRL_4 0x00000150
+#define DSI_20nm_PHY_TIMING_CTRL_4_HS_EXIT__MASK 0x000000ff
+#define DSI_20nm_PHY_TIMING_CTRL_4_HS_EXIT__SHIFT 0
+static inline uint32_t DSI_20nm_PHY_TIMING_CTRL_4_HS_EXIT(uint32_t val)
+{
+ return ((val) << DSI_20nm_PHY_TIMING_CTRL_4_HS_EXIT__SHIFT) & DSI_20nm_PHY_TIMING_CTRL_4_HS_EXIT__MASK;
+}
+
+#define REG_DSI_20nm_PHY_TIMING_CTRL_5 0x00000154
+#define DSI_20nm_PHY_TIMING_CTRL_5_HS_ZERO__MASK 0x000000ff
+#define DSI_20nm_PHY_TIMING_CTRL_5_HS_ZERO__SHIFT 0
+static inline uint32_t DSI_20nm_PHY_TIMING_CTRL_5_HS_ZERO(uint32_t val)
+{
+ return ((val) << DSI_20nm_PHY_TIMING_CTRL_5_HS_ZERO__SHIFT) & DSI_20nm_PHY_TIMING_CTRL_5_HS_ZERO__MASK;
+}
+
+#define REG_DSI_20nm_PHY_TIMING_CTRL_6 0x00000158
+#define DSI_20nm_PHY_TIMING_CTRL_6_HS_PREPARE__MASK 0x000000ff
+#define DSI_20nm_PHY_TIMING_CTRL_6_HS_PREPARE__SHIFT 0
+static inline uint32_t DSI_20nm_PHY_TIMING_CTRL_6_HS_PREPARE(uint32_t val)
+{
+ return ((val) << DSI_20nm_PHY_TIMING_CTRL_6_HS_PREPARE__SHIFT) & DSI_20nm_PHY_TIMING_CTRL_6_HS_PREPARE__MASK;
+}
+
+#define REG_DSI_20nm_PHY_TIMING_CTRL_7 0x0000015c
+#define DSI_20nm_PHY_TIMING_CTRL_7_HS_TRAIL__MASK 0x000000ff
+#define DSI_20nm_PHY_TIMING_CTRL_7_HS_TRAIL__SHIFT 0
+static inline uint32_t DSI_20nm_PHY_TIMING_CTRL_7_HS_TRAIL(uint32_t val)
+{
+ return ((val) << DSI_20nm_PHY_TIMING_CTRL_7_HS_TRAIL__SHIFT) & DSI_20nm_PHY_TIMING_CTRL_7_HS_TRAIL__MASK;
+}
+
+#define REG_DSI_20nm_PHY_TIMING_CTRL_8 0x00000160
+#define DSI_20nm_PHY_TIMING_CTRL_8_HS_RQST__MASK 0x000000ff
+#define DSI_20nm_PHY_TIMING_CTRL_8_HS_RQST__SHIFT 0
+static inline uint32_t DSI_20nm_PHY_TIMING_CTRL_8_HS_RQST(uint32_t val)
+{
+ return ((val) << DSI_20nm_PHY_TIMING_CTRL_8_HS_RQST__SHIFT) & DSI_20nm_PHY_TIMING_CTRL_8_HS_RQST__MASK;
+}
+
+#define REG_DSI_20nm_PHY_TIMING_CTRL_9 0x00000164
+#define DSI_20nm_PHY_TIMING_CTRL_9_TA_GO__MASK 0x00000007
+#define DSI_20nm_PHY_TIMING_CTRL_9_TA_GO__SHIFT 0
+static inline uint32_t DSI_20nm_PHY_TIMING_CTRL_9_TA_GO(uint32_t val)
+{
+ return ((val) << DSI_20nm_PHY_TIMING_CTRL_9_TA_GO__SHIFT) & DSI_20nm_PHY_TIMING_CTRL_9_TA_GO__MASK;
+}
+#define DSI_20nm_PHY_TIMING_CTRL_9_TA_SURE__MASK 0x00000070
+#define DSI_20nm_PHY_TIMING_CTRL_9_TA_SURE__SHIFT 4
+static inline uint32_t DSI_20nm_PHY_TIMING_CTRL_9_TA_SURE(uint32_t val)
+{
+ return ((val) << DSI_20nm_PHY_TIMING_CTRL_9_TA_SURE__SHIFT) & DSI_20nm_PHY_TIMING_CTRL_9_TA_SURE__MASK;
+}
+
+#define REG_DSI_20nm_PHY_TIMING_CTRL_10 0x00000168
+#define DSI_20nm_PHY_TIMING_CTRL_10_TA_GET__MASK 0x00000007
+#define DSI_20nm_PHY_TIMING_CTRL_10_TA_GET__SHIFT 0
+static inline uint32_t DSI_20nm_PHY_TIMING_CTRL_10_TA_GET(uint32_t val)
+{
+ return ((val) << DSI_20nm_PHY_TIMING_CTRL_10_TA_GET__SHIFT) & DSI_20nm_PHY_TIMING_CTRL_10_TA_GET__MASK;
+}
+
+#define REG_DSI_20nm_PHY_TIMING_CTRL_11 0x0000016c
+#define DSI_20nm_PHY_TIMING_CTRL_11_TRIG3_CMD__MASK 0x000000ff
+#define DSI_20nm_PHY_TIMING_CTRL_11_TRIG3_CMD__SHIFT 0
+static inline uint32_t DSI_20nm_PHY_TIMING_CTRL_11_TRIG3_CMD(uint32_t val)
+{
+ return ((val) << DSI_20nm_PHY_TIMING_CTRL_11_TRIG3_CMD__SHIFT) & DSI_20nm_PHY_TIMING_CTRL_11_TRIG3_CMD__MASK;
+}
+
+#define REG_DSI_20nm_PHY_CTRL_0 0x00000170
+
+#define REG_DSI_20nm_PHY_CTRL_1 0x00000174
+
+#define REG_DSI_20nm_PHY_CTRL_2 0x00000178
+
+#define REG_DSI_20nm_PHY_CTRL_3 0x0000017c
+
+#define REG_DSI_20nm_PHY_CTRL_4 0x00000180
+
+#define REG_DSI_20nm_PHY_STRENGTH_0 0x00000184
+
+#define REG_DSI_20nm_PHY_STRENGTH_1 0x00000188
+
+#define REG_DSI_20nm_PHY_BIST_CTRL_0 0x000001b4
+
+#define REG_DSI_20nm_PHY_BIST_CTRL_1 0x000001b8
+
+#define REG_DSI_20nm_PHY_BIST_CTRL_2 0x000001bc
+
+#define REG_DSI_20nm_PHY_BIST_CTRL_3 0x000001c0
+
+#define REG_DSI_20nm_PHY_BIST_CTRL_4 0x000001c4
+
+#define REG_DSI_20nm_PHY_BIST_CTRL_5 0x000001c8
+
+#define REG_DSI_20nm_PHY_GLBL_TEST_CTRL 0x000001d4
+#define DSI_20nm_PHY_GLBL_TEST_CTRL_BITCLK_HS_SEL 0x00000001
+
+#define REG_DSI_20nm_PHY_LDO_CNTRL 0x000001dc
+
+#define REG_DSI_20nm_PHY_REGULATOR_CTRL_0 0x00000000
+
+#define REG_DSI_20nm_PHY_REGULATOR_CTRL_1 0x00000004
+
+#define REG_DSI_20nm_PHY_REGULATOR_CTRL_2 0x00000008
+
+#define REG_DSI_20nm_PHY_REGULATOR_CTRL_3 0x0000000c
+
+#define REG_DSI_20nm_PHY_REGULATOR_CTRL_4 0x00000010
+
+#define REG_DSI_20nm_PHY_REGULATOR_CTRL_5 0x00000014
+
+#define REG_DSI_20nm_PHY_REGULATOR_CAL_PWR_CFG 0x00000018
+
#endif /* DSI_XML */
diff --git a/drivers/gpu/drm/msm/dsi/dsi_cfg.c b/drivers/gpu/drm/msm/dsi/dsi_cfg.c
new file mode 100644
index 000000000000..5872d5e5934f
--- /dev/null
+++ b/drivers/gpu/drm/msm/dsi/dsi_cfg.c
@@ -0,0 +1,92 @@
+/*
+ * Copyright (c) 2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include "dsi_cfg.h"
+
+/* DSI v2 has not been supported by now */
+static const struct msm_dsi_config dsi_v2_cfg = {
+ .io_offset = 0,
+};
+
+static const struct msm_dsi_config msm8974_apq8084_dsi_cfg = {
+ .io_offset = DSI_6G_REG_SHIFT,
+ .reg_cfg = {
+ .num = 4,
+ .regs = {
+ {"gdsc", -1, -1, -1, -1},
+ {"vdd", 3000000, 3000000, 150000, 100},
+ {"vdda", 1200000, 1200000, 100000, 100},
+ {"vddio", 1800000, 1800000, 100000, 100},
+ },
+ },
+};
+
+static const struct msm_dsi_config msm8916_dsi_cfg = {
+ .io_offset = DSI_6G_REG_SHIFT,
+ .reg_cfg = {
+ .num = 4,
+ .regs = {
+ {"gdsc", -1, -1, -1, -1},
+ {"vdd", 2850000, 2850000, 100000, 100},
+ {"vdda", 1200000, 1200000, 100000, 100},
+ {"vddio", 1800000, 1800000, 100000, 100},
+ },
+ },
+};
+
+static const struct msm_dsi_config msm8994_dsi_cfg = {
+ .io_offset = DSI_6G_REG_SHIFT,
+ .reg_cfg = {
+ .num = 7,
+ .regs = {
+ {"gdsc", -1, -1, -1, -1},
+ {"vdda", 1250000, 1250000, 100000, 100},
+ {"vddio", 1800000, 1800000, 100000, 100},
+ {"vcca", 1000000, 1000000, 10000, 100},
+ {"vdd", 1800000, 1800000, 100000, 100},
+ {"lab_reg", -1, -1, -1, -1},
+ {"ibb_reg", -1, -1, -1, -1},
+ },
+ }
+};
+
+static const struct msm_dsi_cfg_handler dsi_cfg_handlers[] = {
+ {MSM_DSI_VER_MAJOR_V2, U32_MAX, &dsi_v2_cfg},
+ {MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V1_0,
+ &msm8974_apq8084_dsi_cfg},
+ {MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V1_1,
+ &msm8974_apq8084_dsi_cfg},
+ {MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V1_1_1,
+ &msm8974_apq8084_dsi_cfg},
+ {MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V1_2,
+ &msm8974_apq8084_dsi_cfg},
+ {MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V1_3, &msm8994_dsi_cfg},
+ {MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V1_3_1, &msm8916_dsi_cfg},
+};
+
+const struct msm_dsi_cfg_handler *msm_dsi_cfg_get(u32 major, u32 minor)
+{
+ const struct msm_dsi_cfg_handler *cfg_hnd = NULL;
+ int i;
+
+ for (i = ARRAY_SIZE(dsi_cfg_handlers) - 1; i >= 0; i--) {
+ if ((dsi_cfg_handlers[i].major == major) &&
+ (dsi_cfg_handlers[i].minor == minor)) {
+ cfg_hnd = &dsi_cfg_handlers[i];
+ break;
+ }
+ }
+
+ return cfg_hnd;
+}
+
diff --git a/drivers/gpu/drm/msm/dsi/dsi_cfg.h b/drivers/gpu/drm/msm/dsi/dsi_cfg.h
new file mode 100644
index 000000000000..4cf887240177
--- /dev/null
+++ b/drivers/gpu/drm/msm/dsi/dsi_cfg.h
@@ -0,0 +1,44 @@
+/*
+ * Copyright (c) 2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __MSM_DSI_CFG_H__
+#define __MSM_DSI_CFG_H__
+
+#include "dsi.h"
+
+#define MSM_DSI_VER_MAJOR_V2 0x02
+#define MSM_DSI_VER_MAJOR_6G 0x03
+#define MSM_DSI_6G_VER_MINOR_V1_0 0x10000000
+#define MSM_DSI_6G_VER_MINOR_V1_1 0x10010000
+#define MSM_DSI_6G_VER_MINOR_V1_1_1 0x10010001
+#define MSM_DSI_6G_VER_MINOR_V1_2 0x10020000
+#define MSM_DSI_6G_VER_MINOR_V1_3 0x10030000
+#define MSM_DSI_6G_VER_MINOR_V1_3_1 0x10030001
+
+#define DSI_6G_REG_SHIFT 4
+
+struct msm_dsi_config {
+ u32 io_offset;
+ struct dsi_reg_config reg_cfg;
+};
+
+struct msm_dsi_cfg_handler {
+ u32 major;
+ u32 minor;
+ const struct msm_dsi_config *cfg;
+};
+
+const struct msm_dsi_cfg_handler *msm_dsi_cfg_get(u32 major, u32 minor);
+
+#endif /* __MSM_DSI_CFG_H__ */
+
diff --git a/drivers/gpu/drm/msm/dsi/dsi_host.c b/drivers/gpu/drm/msm/dsi/dsi_host.c
index de0400923303..8d82973fe9db 100644
--- a/drivers/gpu/drm/msm/dsi/dsi_host.c
+++ b/drivers/gpu/drm/msm/dsi/dsi_host.c
@@ -20,103 +20,15 @@
#include <linux/of_device.h>
#include <linux/of_gpio.h>
#include <linux/of_irq.h>
+#include <linux/pinctrl/consumer.h>
+#include <linux/of_graph.h>
#include <linux/regulator/consumer.h>
#include <linux/spinlock.h>
#include <video/mipi_display.h>
#include "dsi.h"
#include "dsi.xml.h"
-
-#define MSM_DSI_VER_MAJOR_V2 0x02
-#define MSM_DSI_VER_MAJOR_6G 0x03
-#define MSM_DSI_6G_VER_MINOR_V1_0 0x10000000
-#define MSM_DSI_6G_VER_MINOR_V1_1 0x10010000
-#define MSM_DSI_6G_VER_MINOR_V1_1_1 0x10010001
-#define MSM_DSI_6G_VER_MINOR_V1_2 0x10020000
-#define MSM_DSI_6G_VER_MINOR_V1_3_1 0x10030001
-
-#define DSI_6G_REG_SHIFT 4
-
-struct dsi_config {
- u32 major;
- u32 minor;
- u32 io_offset;
- struct dsi_reg_config reg_cfg;
-};
-
-static const struct dsi_config dsi_cfgs[] = {
- {MSM_DSI_VER_MAJOR_V2, 0, 0, {0,} },
- { /* 8974 v1 */
- .major = MSM_DSI_VER_MAJOR_6G,
- .minor = MSM_DSI_6G_VER_MINOR_V1_0,
- .io_offset = DSI_6G_REG_SHIFT,
- .reg_cfg = {
- .num = 4,
- .regs = {
- {"gdsc", -1, -1, -1, -1},
- {"vdd", 3000000, 3000000, 150000, 100},
- {"vdda", 1200000, 1200000, 100000, 100},
- {"vddio", 1800000, 1800000, 100000, 100},
- },
- },
- },
- { /* 8974 v2 */
- .major = MSM_DSI_VER_MAJOR_6G,
- .minor = MSM_DSI_6G_VER_MINOR_V1_1,
- .io_offset = DSI_6G_REG_SHIFT,
- .reg_cfg = {
- .num = 4,
- .regs = {
- {"gdsc", -1, -1, -1, -1},
- {"vdd", 3000000, 3000000, 150000, 100},
- {"vdda", 1200000, 1200000, 100000, 100},
- {"vddio", 1800000, 1800000, 100000, 100},
- },
- },
- },
- { /* 8974 v3 */
- .major = MSM_DSI_VER_MAJOR_6G,
- .minor = MSM_DSI_6G_VER_MINOR_V1_1_1,
- .io_offset = DSI_6G_REG_SHIFT,
- .reg_cfg = {
- .num = 4,
- .regs = {
- {"gdsc", -1, -1, -1, -1},
- {"vdd", 3000000, 3000000, 150000, 100},
- {"vdda", 1200000, 1200000, 100000, 100},
- {"vddio", 1800000, 1800000, 100000, 100},
- },
- },
- },
- { /* 8084 */
- .major = MSM_DSI_VER_MAJOR_6G,
- .minor = MSM_DSI_6G_VER_MINOR_V1_2,
- .io_offset = DSI_6G_REG_SHIFT,
- .reg_cfg = {
- .num = 4,
- .regs = {
- {"gdsc", -1, -1, -1, -1},
- {"vdd", 3000000, 3000000, 150000, 100},
- {"vdda", 1200000, 1200000, 100000, 100},
- {"vddio", 1800000, 1800000, 100000, 100},
- },
- },
- },
- { /* 8916 */
- .major = MSM_DSI_VER_MAJOR_6G,
- .minor = MSM_DSI_6G_VER_MINOR_V1_3_1,
- .io_offset = DSI_6G_REG_SHIFT,
- .reg_cfg = {
- .num = 4,
- .regs = {
- {"gdsc", -1, -1, -1, -1},
- {"vdd", 2850000, 2850000, 100000, 100},
- {"vdda", 1200000, 1200000, 100000, 100},
- {"vddio", 1800000, 1800000, 100000, 100},
- },
- },
- },
-};
+#include "dsi_cfg.h"
static int dsi_get_version(const void __iomem *base, u32 *major, u32 *minor)
{
@@ -194,7 +106,7 @@ struct msm_dsi_host {
struct gpio_desc *disp_en_gpio;
struct gpio_desc *te_gpio;
- const struct dsi_config *cfg;
+ const struct msm_dsi_cfg_handler *cfg_hnd;
struct completion dma_comp;
struct completion video_comp;
@@ -212,8 +124,8 @@ struct msm_dsi_host {
struct drm_display_mode *mode;
- /* Panel info */
- struct device_node *panel_node;
+ /* connected device info */
+ struct device_node *device_node;
unsigned int channel;
unsigned int lanes;
enum mipi_dsi_pixel_format format;
@@ -239,61 +151,58 @@ static u32 dsi_get_bpp(const enum mipi_dsi_pixel_format fmt)
static inline u32 dsi_read(struct msm_dsi_host *msm_host, u32 reg)
{
- return msm_readl(msm_host->ctrl_base + msm_host->cfg->io_offset + reg);
+ return msm_readl(msm_host->ctrl_base + reg);
}
static inline void dsi_write(struct msm_dsi_host *msm_host, u32 reg, u32 data)
{
- msm_writel(data, msm_host->ctrl_base + msm_host->cfg->io_offset + reg);
+ msm_writel(data, msm_host->ctrl_base + reg);
}
static int dsi_host_regulator_enable(struct msm_dsi_host *msm_host);
static void dsi_host_regulator_disable(struct msm_dsi_host *msm_host);
-static const struct dsi_config *dsi_get_config(struct msm_dsi_host *msm_host)
+static const struct msm_dsi_cfg_handler *dsi_get_config(
+ struct msm_dsi_host *msm_host)
{
- const struct dsi_config *cfg;
+ const struct msm_dsi_cfg_handler *cfg_hnd = NULL;
struct regulator *gdsc_reg;
- int i, ret;
+ int ret;
u32 major = 0, minor = 0;
gdsc_reg = regulator_get(&msm_host->pdev->dev, "gdsc");
if (IS_ERR(gdsc_reg)) {
pr_err("%s: cannot get gdsc\n", __func__);
- goto fail;
+ goto exit;
}
ret = regulator_enable(gdsc_reg);
if (ret) {
pr_err("%s: unable to enable gdsc\n", __func__);
- regulator_put(gdsc_reg);
- goto fail;
+ goto put_gdsc;
}
ret = clk_prepare_enable(msm_host->ahb_clk);
if (ret) {
pr_err("%s: unable to enable ahb_clk\n", __func__);
- regulator_disable(gdsc_reg);
- regulator_put(gdsc_reg);
- goto fail;
+ goto disable_gdsc;
}
ret = dsi_get_version(msm_host->ctrl_base, &major, &minor);
-
- clk_disable_unprepare(msm_host->ahb_clk);
- regulator_disable(gdsc_reg);
- regulator_put(gdsc_reg);
if (ret) {
pr_err("%s: Invalid version\n", __func__);
- goto fail;
+ goto disable_clks;
}
- for (i = 0; i < ARRAY_SIZE(dsi_cfgs); i++) {
- cfg = dsi_cfgs + i;
- if ((cfg->major == major) && (cfg->minor == minor))
- return cfg;
- }
- pr_err("%s: Version %x:%x not support\n", __func__, major, minor);
+ cfg_hnd = msm_dsi_cfg_get(major, minor);
-fail:
- return NULL;
+ DBG("%s: Version %x:%x\n", __func__, major, minor);
+
+disable_clks:
+ clk_disable_unprepare(msm_host->ahb_clk);
+disable_gdsc:
+ regulator_disable(gdsc_reg);
+put_gdsc:
+ regulator_put(gdsc_reg);
+exit:
+ return cfg_hnd;
}
static inline struct msm_dsi_host *to_msm_dsi_host(struct mipi_dsi_host *host)
@@ -304,8 +213,8 @@ static inline struct msm_dsi_host *to_msm_dsi_host(struct mipi_dsi_host *host)
static void dsi_host_regulator_disable(struct msm_dsi_host *msm_host)
{
struct regulator_bulk_data *s = msm_host->supplies;
- const struct dsi_reg_entry *regs = msm_host->cfg->reg_cfg.regs;
- int num = msm_host->cfg->reg_cfg.num;
+ const struct dsi_reg_entry *regs = msm_host->cfg_hnd->cfg->reg_cfg.regs;
+ int num = msm_host->cfg_hnd->cfg->reg_cfg.num;
int i;
DBG("");
@@ -320,8 +229,8 @@ static void dsi_host_regulator_disable(struct msm_dsi_host *msm_host)
static int dsi_host_regulator_enable(struct msm_dsi_host *msm_host)
{
struct regulator_bulk_data *s = msm_host->supplies;
- const struct dsi_reg_entry *regs = msm_host->cfg->reg_cfg.regs;
- int num = msm_host->cfg->reg_cfg.num;
+ const struct dsi_reg_entry *regs = msm_host->cfg_hnd->cfg->reg_cfg.regs;
+ int num = msm_host->cfg_hnd->cfg->reg_cfg.num;
int ret, i;
DBG("");
@@ -354,8 +263,8 @@ fail:
static int dsi_regulator_init(struct msm_dsi_host *msm_host)
{
struct regulator_bulk_data *s = msm_host->supplies;
- const struct dsi_reg_entry *regs = msm_host->cfg->reg_cfg.regs;
- int num = msm_host->cfg->reg_cfg.num;
+ const struct dsi_reg_entry *regs = msm_host->cfg_hnd->cfg->reg_cfg.regs;
+ int num = msm_host->cfg_hnd->cfg->reg_cfg.num;
int i, ret;
for (i = 0; i < num; i++)
@@ -697,6 +606,7 @@ static void dsi_ctrl_config(struct msm_dsi_host *msm_host, bool enable,
{
u32 flags = msm_host->mode_flags;
enum mipi_dsi_pixel_format mipi_fmt = msm_host->format;
+ const struct msm_dsi_cfg_handler *cfg_hnd = msm_host->cfg_hnd;
u32 data = 0;
if (!enable) {
@@ -750,8 +660,8 @@ static void dsi_ctrl_config(struct msm_dsi_host *msm_host, bool enable,
data |= DSI_TRIG_CTRL_MDP_TRIGGER(TRIGGER_NONE);
data |= DSI_TRIG_CTRL_DMA_TRIGGER(TRIGGER_SW);
data |= DSI_TRIG_CTRL_STREAM(msm_host->channel);
- if ((msm_host->cfg->major == MSM_DSI_VER_MAJOR_6G) &&
- (msm_host->cfg->minor >= MSM_DSI_6G_VER_MINOR_V1_2))
+ if ((cfg_hnd->major == MSM_DSI_VER_MAJOR_6G) &&
+ (cfg_hnd->minor >= MSM_DSI_6G_VER_MINOR_V1_2))
data |= DSI_TRIG_CTRL_BLOCK_DMA_WITHIN_FRAME;
dsi_write(msm_host, REG_DSI_TRIG_CTRL, data);
@@ -1257,7 +1167,11 @@ static void dsi_dln0_phy_err(struct msm_dsi_host *msm_host)
status = dsi_read(msm_host, REG_DSI_DLN0_PHY_ERR);
- if (status) {
+ if (status & (DSI_DLN0_PHY_ERR_DLN0_ERR_ESC |
+ DSI_DLN0_PHY_ERR_DLN0_ERR_SYNC_ESC |
+ DSI_DLN0_PHY_ERR_DLN0_ERR_CONTROL |
+ DSI_DLN0_PHY_ERR_DLN0_ERR_CONTENTION_LP0 |
+ DSI_DLN0_PHY_ERR_DLN0_ERR_CONTENTION_LP1)) {
dsi_write(msm_host, REG_DSI_DLN0_PHY_ERR, status);
msm_host->err_work_state |= DSI_ERR_STATE_DLN0_PHY;
}
@@ -1359,7 +1273,8 @@ static int dsi_host_init_panel_gpios(struct msm_dsi_host *msm_host,
return PTR_ERR(msm_host->disp_en_gpio);
}
- msm_host->te_gpio = devm_gpiod_get(panel_device, "disp-te", GPIOD_IN);
+ msm_host->te_gpio = devm_gpiod_get_optional(panel_device, "disp-te",
+ GPIOD_IN);
if (IS_ERR(msm_host->te_gpio)) {
DBG("cannot get disp-te-gpios %ld", PTR_ERR(msm_host->te_gpio));
return PTR_ERR(msm_host->te_gpio);
@@ -1379,7 +1294,7 @@ static int dsi_host_attach(struct mipi_dsi_host *host,
msm_host->format = dsi->format;
msm_host->mode_flags = dsi->mode_flags;
- msm_host->panel_node = dsi->dev.of_node;
+ WARN_ON(dsi->dev.of_node != msm_host->device_node);
/* Some gpios defined in panel DT need to be controlled by host */
ret = dsi_host_init_panel_gpios(msm_host, &dsi->dev);
@@ -1398,7 +1313,7 @@ static int dsi_host_detach(struct mipi_dsi_host *host,
{
struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
- msm_host->panel_node = NULL;
+ msm_host->device_node = NULL;
DBG("id=%d", msm_host->id);
if (msm_host->dev)
@@ -1429,6 +1344,48 @@ static struct mipi_dsi_host_ops dsi_host_ops = {
.transfer = dsi_host_transfer,
};
+static int dsi_host_parse_dt(struct msm_dsi_host *msm_host)
+{
+ struct device *dev = &msm_host->pdev->dev;
+ struct device_node *np = dev->of_node;
+ struct device_node *endpoint, *device_node;
+ int ret;
+
+ ret = of_property_read_u32(np, "qcom,dsi-host-index", &msm_host->id);
+ if (ret) {
+ dev_err(dev, "%s: host index not specified, ret=%d\n",
+ __func__, ret);
+ return ret;
+ }
+
+ /*
+ * Get the first endpoint node. In our case, dsi has one output port
+ * to which the panel is connected. Don't return an error if a port
+ * isn't defined. It's possible that there is nothing connected to
+ * the dsi output.
+ */
+ endpoint = of_graph_get_next_endpoint(np, NULL);
+ if (!endpoint) {
+ dev_dbg(dev, "%s: no endpoint\n", __func__);
+ return 0;
+ }
+
+ /* Get panel node from the output port's endpoint data */
+ device_node = of_graph_get_remote_port_parent(endpoint);
+ if (!device_node) {
+ dev_err(dev, "%s: no valid device\n", __func__);
+ of_node_put(endpoint);
+ return -ENODEV;
+ }
+
+ of_node_put(endpoint);
+ of_node_put(device_node);
+
+ msm_host->device_node = device_node;
+
+ return 0;
+}
+
int msm_dsi_host_init(struct msm_dsi *msm_dsi)
{
struct msm_dsi_host *msm_host = NULL;
@@ -1443,15 +1400,13 @@ int msm_dsi_host_init(struct msm_dsi *msm_dsi)
goto fail;
}
- ret = of_property_read_u32(pdev->dev.of_node,
- "qcom,dsi-host-index", &msm_host->id);
+ msm_host->pdev = pdev;
+
+ ret = dsi_host_parse_dt(msm_host);
if (ret) {
- dev_err(&pdev->dev,
- "%s: host index not specified, ret=%d\n",
- __func__, ret);
+ pr_err("%s: failed to parse dt\n", __func__);
goto fail;
}
- msm_host->pdev = pdev;
ret = dsi_clk_init(msm_host);
if (ret) {
@@ -1466,13 +1421,16 @@ int msm_dsi_host_init(struct msm_dsi *msm_dsi)
goto fail;
}
- msm_host->cfg = dsi_get_config(msm_host);
- if (!msm_host->cfg) {
+ msm_host->cfg_hnd = dsi_get_config(msm_host);
+ if (!msm_host->cfg_hnd) {
ret = -EINVAL;
pr_err("%s: get config failed\n", __func__);
goto fail;
}
+ /* fixup base address by io offset */
+ msm_host->ctrl_base += msm_host->cfg_hnd->cfg->io_offset;
+
ret = dsi_regulator_init(msm_host);
if (ret) {
pr_err("%s: regulator init failed\n", __func__);
@@ -1559,7 +1517,6 @@ int msm_dsi_host_modeset_init(struct mipi_dsi_host *host,
int msm_dsi_host_register(struct mipi_dsi_host *host, bool check_defer)
{
struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
- struct device_node *node;
int ret;
/* Register mipi dsi host */
@@ -1577,14 +1534,13 @@ int msm_dsi_host_register(struct mipi_dsi_host *host, bool check_defer)
* It makes sure panel is connected when fbcon detects
* connector status and gets the proper display mode to
* create framebuffer.
+ * Don't try to defer if there is nothing connected to the dsi
+ * output
*/
- if (check_defer) {
- node = of_get_child_by_name(msm_host->pdev->dev.of_node,
- "panel");
- if (node) {
- if (!of_drm_find_panel(node))
+ if (check_defer && msm_host->device_node) {
+ if (!of_drm_find_panel(msm_host->device_node))
+ if (!of_drm_find_bridge(msm_host->device_node))
return -EPROBE_DEFER;
- }
}
}
@@ -1663,6 +1619,7 @@ int msm_dsi_host_cmd_rx(struct mipi_dsi_host *host,
const struct mipi_dsi_msg *msg)
{
struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
+ const struct msm_dsi_cfg_handler *cfg_hnd = msm_host->cfg_hnd;
int data_byte, rx_byte, dlen, end;
int short_response, diff, pkt_size, ret = 0;
char cmd;
@@ -1704,8 +1661,8 @@ int msm_dsi_host_cmd_rx(struct mipi_dsi_host *host,
return -EINVAL;
}
- if ((msm_host->cfg->major == MSM_DSI_VER_MAJOR_6G) &&
- (msm_host->cfg->minor >= MSM_DSI_6G_VER_MINOR_V1_1)) {
+ if ((cfg_hnd->major == MSM_DSI_VER_MAJOR_6G) &&
+ (cfg_hnd->minor >= MSM_DSI_6G_VER_MINOR_V1_1)) {
/* Clear the RDBK_DATA registers */
dsi_write(msm_host, REG_DSI_RDBK_DATA_CTRL,
DSI_RDBK_DATA_CTRL_CLR);
@@ -1919,6 +1876,13 @@ int msm_dsi_host_power_on(struct mipi_dsi_host *host)
goto fail_disable_reg;
}
+ ret = pinctrl_pm_select_default_state(&msm_host->pdev->dev);
+ if (ret) {
+ pr_err("%s: failed to set pinctrl default state, %d\n",
+ __func__, ret);
+ goto fail_disable_clk;
+ }
+
dsi_timing_setup(msm_host);
dsi_sw_reset(msm_host);
dsi_ctrl_config(msm_host, true, clk_pre, clk_post);
@@ -1931,6 +1895,8 @@ int msm_dsi_host_power_on(struct mipi_dsi_host *host)
return 0;
+fail_disable_clk:
+ dsi_clk_ctrl(msm_host, 0);
fail_disable_reg:
dsi_host_regulator_disable(msm_host);
unlock_ret:
@@ -1953,6 +1919,8 @@ int msm_dsi_host_power_off(struct mipi_dsi_host *host)
if (msm_host->disp_en_gpio)
gpiod_set_value(msm_host->disp_en_gpio, 0);
+ pinctrl_pm_select_sleep_state(&msm_host->pdev->dev);
+
msm_dsi_manager_phy_disable(msm_host->id);
dsi_clk_ctrl(msm_host, 0);
@@ -1993,10 +1961,16 @@ struct drm_panel *msm_dsi_host_get_panel(struct mipi_dsi_host *host,
struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
struct drm_panel *panel;
- panel = of_drm_find_panel(msm_host->panel_node);
+ panel = of_drm_find_panel(msm_host->device_node);
if (panel_flags)
*panel_flags = msm_host->mode_flags;
return panel;
}
+struct drm_bridge *msm_dsi_host_get_bridge(struct mipi_dsi_host *host)
+{
+ struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
+
+ return of_drm_find_bridge(msm_host->device_node);
+}
diff --git a/drivers/gpu/drm/msm/dsi/dsi_manager.c b/drivers/gpu/drm/msm/dsi/dsi_manager.c
index 87ac6612b6f8..0455ff75074a 100644
--- a/drivers/gpu/drm/msm/dsi/dsi_manager.c
+++ b/drivers/gpu/drm/msm/dsi/dsi_manager.c
@@ -14,19 +14,31 @@
#include "msm_kms.h"
#include "dsi.h"
+#define DSI_CLOCK_MASTER DSI_0
+#define DSI_CLOCK_SLAVE DSI_1
+
+#define DSI_LEFT DSI_0
+#define DSI_RIGHT DSI_1
+
+/* According to the current drm framework sequence, take the encoder of
+ * DSI_1 as master encoder
+ */
+#define DSI_ENCODER_MASTER DSI_1
+#define DSI_ENCODER_SLAVE DSI_0
+
struct msm_dsi_manager {
struct msm_dsi *dsi[DSI_MAX];
- bool is_dual_panel;
+ bool is_dual_dsi;
bool is_sync_needed;
- int master_panel_id;
+ int master_dsi_link_id;
};
static struct msm_dsi_manager msm_dsim_glb;
-#define IS_DUAL_PANEL() (msm_dsim_glb.is_dual_panel)
+#define IS_DUAL_DSI() (msm_dsim_glb.is_dual_dsi)
#define IS_SYNC_NEEDED() (msm_dsim_glb.is_sync_needed)
-#define IS_MASTER_PANEL(id) (msm_dsim_glb.master_panel_id == id)
+#define IS_MASTER_DSI_LINK(id) (msm_dsim_glb.master_dsi_link_id == id)
static inline struct msm_dsi *dsi_mgr_get_dsi(int id)
{
@@ -38,23 +50,23 @@ static inline struct msm_dsi *dsi_mgr_get_other_dsi(int id)
return msm_dsim_glb.dsi[(id + 1) % DSI_MAX];
}
-static int dsi_mgr_parse_dual_panel(struct device_node *np, int id)
+static int dsi_mgr_parse_dual_dsi(struct device_node *np, int id)
{
struct msm_dsi_manager *msm_dsim = &msm_dsim_glb;
- /* We assume 2 dsi nodes have the same information of dual-panel and
+ /* We assume 2 dsi nodes have the same information of dual-dsi and
* sync-mode, and only one node specifies master in case of dual mode.
*/
- if (!msm_dsim->is_dual_panel)
- msm_dsim->is_dual_panel = of_property_read_bool(
- np, "qcom,dual-panel-mode");
+ if (!msm_dsim->is_dual_dsi)
+ msm_dsim->is_dual_dsi = of_property_read_bool(
+ np, "qcom,dual-dsi-mode");
- if (msm_dsim->is_dual_panel) {
- if (of_property_read_bool(np, "qcom,master-panel"))
- msm_dsim->master_panel_id = id;
+ if (msm_dsim->is_dual_dsi) {
+ if (of_property_read_bool(np, "qcom,master-dsi"))
+ msm_dsim->master_dsi_link_id = id;
if (!msm_dsim->is_sync_needed)
msm_dsim->is_sync_needed = of_property_read_bool(
- np, "qcom,sync-dual-panel");
+ np, "qcom,sync-dual-dsi");
}
return 0;
@@ -68,7 +80,7 @@ static int dsi_mgr_host_register(int id)
struct msm_dsi_pll *src_pll;
int ret;
- if (!IS_DUAL_PANEL()) {
+ if (!IS_DUAL_DSI()) {
ret = msm_dsi_host_register(msm_dsi->host, true);
if (ret)
return ret;
@@ -78,9 +90,9 @@ static int dsi_mgr_host_register(int id)
} else if (!other_dsi) {
ret = 0;
} else {
- struct msm_dsi *mdsi = IS_MASTER_PANEL(id) ?
+ struct msm_dsi *mdsi = IS_MASTER_DSI_LINK(id) ?
msm_dsi : other_dsi;
- struct msm_dsi *sdsi = IS_MASTER_PANEL(id) ?
+ struct msm_dsi *sdsi = IS_MASTER_DSI_LINK(id) ?
other_dsi : msm_dsi;
/* Register slave host first, so that slave DSI device
* has a chance to probe, and do not block the master
@@ -144,28 +156,28 @@ static enum drm_connector_status dsi_mgr_connector_detect(
DBG("id=%d", id);
if (!msm_dsi->panel) {
msm_dsi->panel = msm_dsi_host_get_panel(msm_dsi->host,
- &msm_dsi->panel_flags);
+ &msm_dsi->device_flags);
/* There is only 1 panel in the global panel list
- * for dual panel mode. Therefore slave dsi should get
+ * for dual DSI mode. Therefore slave dsi should get
* the drm_panel instance from master dsi, and
* keep using the panel flags got from the current DSI link.
*/
- if (!msm_dsi->panel && IS_DUAL_PANEL() &&
- !IS_MASTER_PANEL(id) && other_dsi)
+ if (!msm_dsi->panel && IS_DUAL_DSI() &&
+ !IS_MASTER_DSI_LINK(id) && other_dsi)
msm_dsi->panel = msm_dsi_host_get_panel(
other_dsi->host, NULL);
- if (msm_dsi->panel && IS_DUAL_PANEL())
+ if (msm_dsi->panel && IS_DUAL_DSI())
drm_object_attach_property(&connector->base,
connector->dev->mode_config.tile_property, 0);
- /* Set split display info to kms once dual panel is connected
- * to both hosts
+ /* Set split display info to kms once dual DSI panel is
+ * connected to both hosts.
*/
- if (msm_dsi->panel && IS_DUAL_PANEL() &&
+ if (msm_dsi->panel && IS_DUAL_DSI() &&
other_dsi && other_dsi->panel) {
- bool cmd_mode = !(msm_dsi->panel_flags &
+ bool cmd_mode = !(msm_dsi->device_flags &
MIPI_DSI_MODE_VIDEO);
struct drm_encoder *encoder = msm_dsi_get_encoder(
dsi_mgr_get_dsi(DSI_ENCODER_MASTER));
@@ -176,7 +188,7 @@ static enum drm_connector_status dsi_mgr_connector_detect(
kms->funcs->set_split_display(kms, encoder,
slave_enc, cmd_mode);
else
- pr_err("mdp does not support dual panel\n");
+ pr_err("mdp does not support dual DSI\n");
}
}
@@ -273,7 +285,7 @@ static int dsi_mgr_connector_get_modes(struct drm_connector *connector)
if (!num)
return 0;
- if (IS_DUAL_PANEL()) {
+ if (IS_DUAL_DSI()) {
/* report half resolution to user */
dsi_dual_connector_fix_modes(connector);
ret = dsi_dual_connector_tile_init(connector, id);
@@ -328,11 +340,12 @@ static void dsi_mgr_bridge_pre_enable(struct drm_bridge *bridge)
struct msm_dsi *msm_dsi1 = dsi_mgr_get_dsi(DSI_1);
struct mipi_dsi_host *host = msm_dsi->host;
struct drm_panel *panel = msm_dsi->panel;
- bool is_dual_panel = IS_DUAL_PANEL();
+ bool is_dual_dsi = IS_DUAL_DSI();
int ret;
DBG("id=%d", id);
- if (!panel || (is_dual_panel && (DSI_1 == id)))
+ if (!msm_dsi_device_connected(msm_dsi) ||
+ (is_dual_dsi && (DSI_1 == id)))
return;
ret = msm_dsi_host_power_on(host);
@@ -341,7 +354,7 @@ static void dsi_mgr_bridge_pre_enable(struct drm_bridge *bridge)
goto host_on_fail;
}
- if (is_dual_panel && msm_dsi1) {
+ if (is_dual_dsi && msm_dsi1) {
ret = msm_dsi_host_power_on(msm_dsi1->host);
if (ret) {
pr_err("%s: power on host1 failed, %d\n",
@@ -353,10 +366,13 @@ static void dsi_mgr_bridge_pre_enable(struct drm_bridge *bridge)
/* Always call panel functions once, because even for dual panels,
* there is only one drm_panel instance.
*/
- ret = drm_panel_prepare(panel);
- if (ret) {
- pr_err("%s: prepare panel %d failed, %d\n", __func__, id, ret);
- goto panel_prep_fail;
+ if (panel) {
+ ret = drm_panel_prepare(panel);
+ if (ret) {
+ pr_err("%s: prepare panel %d failed, %d\n", __func__,
+ id, ret);
+ goto panel_prep_fail;
+ }
}
ret = msm_dsi_host_enable(host);
@@ -365,7 +381,7 @@ static void dsi_mgr_bridge_pre_enable(struct drm_bridge *bridge)
goto host_en_fail;
}
- if (is_dual_panel && msm_dsi1) {
+ if (is_dual_dsi && msm_dsi1) {
ret = msm_dsi_host_enable(msm_dsi1->host);
if (ret) {
pr_err("%s: enable host1 failed, %d\n", __func__, ret);
@@ -373,23 +389,27 @@ static void dsi_mgr_bridge_pre_enable(struct drm_bridge *bridge)
}
}
- ret = drm_panel_enable(panel);
- if (ret) {
- pr_err("%s: enable panel %d failed, %d\n", __func__, id, ret);
- goto panel_en_fail;
+ if (panel) {
+ ret = drm_panel_enable(panel);
+ if (ret) {
+ pr_err("%s: enable panel %d failed, %d\n", __func__, id,
+ ret);
+ goto panel_en_fail;
+ }
}
return;
panel_en_fail:
- if (is_dual_panel && msm_dsi1)
+ if (is_dual_dsi && msm_dsi1)
msm_dsi_host_disable(msm_dsi1->host);
host1_en_fail:
msm_dsi_host_disable(host);
host_en_fail:
- drm_panel_unprepare(panel);
+ if (panel)
+ drm_panel_unprepare(panel);
panel_prep_fail:
- if (is_dual_panel && msm_dsi1)
+ if (is_dual_dsi && msm_dsi1)
msm_dsi_host_power_off(msm_dsi1->host);
host1_on_fail:
msm_dsi_host_power_off(host);
@@ -414,37 +434,44 @@ static void dsi_mgr_bridge_post_disable(struct drm_bridge *bridge)
struct msm_dsi *msm_dsi1 = dsi_mgr_get_dsi(DSI_1);
struct mipi_dsi_host *host = msm_dsi->host;
struct drm_panel *panel = msm_dsi->panel;
- bool is_dual_panel = IS_DUAL_PANEL();
+ bool is_dual_dsi = IS_DUAL_DSI();
int ret;
DBG("id=%d", id);
- if (!panel || (is_dual_panel && (DSI_1 == id)))
+ if (!msm_dsi_device_connected(msm_dsi) ||
+ (is_dual_dsi && (DSI_1 == id)))
return;
- ret = drm_panel_disable(panel);
- if (ret)
- pr_err("%s: Panel %d OFF failed, %d\n", __func__, id, ret);
+ if (panel) {
+ ret = drm_panel_disable(panel);
+ if (ret)
+ pr_err("%s: Panel %d OFF failed, %d\n", __func__, id,
+ ret);
+ }
ret = msm_dsi_host_disable(host);
if (ret)
pr_err("%s: host %d disable failed, %d\n", __func__, id, ret);
- if (is_dual_panel && msm_dsi1) {
+ if (is_dual_dsi && msm_dsi1) {
ret = msm_dsi_host_disable(msm_dsi1->host);
if (ret)
pr_err("%s: host1 disable failed, %d\n", __func__, ret);
}
- ret = drm_panel_unprepare(panel);
- if (ret)
- pr_err("%s: Panel %d unprepare failed,%d\n", __func__, id, ret);
+ if (panel) {
+ ret = drm_panel_unprepare(panel);
+ if (ret)
+ pr_err("%s: Panel %d unprepare failed,%d\n", __func__,
+ id, ret);
+ }
ret = msm_dsi_host_power_off(host);
if (ret)
pr_err("%s: host %d power off failed,%d\n", __func__, id, ret);
- if (is_dual_panel && msm_dsi1) {
+ if (is_dual_dsi && msm_dsi1) {
ret = msm_dsi_host_power_off(msm_dsi1->host);
if (ret)
pr_err("%s: host1 power off failed, %d\n",
@@ -460,7 +487,7 @@ static void dsi_mgr_bridge_mode_set(struct drm_bridge *bridge,
struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id);
struct msm_dsi *other_dsi = dsi_mgr_get_other_dsi(id);
struct mipi_dsi_host *host = msm_dsi->host;
- bool is_dual_panel = IS_DUAL_PANEL();
+ bool is_dual_dsi = IS_DUAL_DSI();
DBG("set mode: %d:\"%s\" %d %d %d %d %d %d %d %d %d %d 0x%x 0x%x",
mode->base.id, mode->name,
@@ -471,11 +498,11 @@ static void dsi_mgr_bridge_mode_set(struct drm_bridge *bridge,
mode->vsync_end, mode->vtotal,
mode->type, mode->flags);
- if (is_dual_panel && (DSI_1 == id))
+ if (is_dual_dsi && (DSI_1 == id))
return;
msm_dsi_host_set_display_mode(host, adjusted_mode);
- if (is_dual_panel && other_dsi)
+ if (is_dual_dsi && other_dsi)
msm_dsi_host_set_display_mode(other_dsi->host, adjusted_mode);
}
@@ -503,7 +530,7 @@ static const struct drm_bridge_funcs dsi_mgr_bridge_funcs = {
.mode_set = dsi_mgr_bridge_mode_set,
};
-/* initialize connector */
+/* initialize connector when we're connected to a drm_panel */
struct drm_connector *msm_dsi_manager_connector_init(u8 id)
{
struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id);
@@ -588,6 +615,53 @@ fail:
return ERR_PTR(ret);
}
+struct drm_connector *msm_dsi_manager_ext_bridge_init(u8 id)
+{
+ struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id);
+ struct drm_device *dev = msm_dsi->dev;
+ struct drm_encoder *encoder;
+ struct drm_bridge *int_bridge, *ext_bridge;
+ struct drm_connector *connector;
+ struct list_head *connector_list;
+
+ int_bridge = msm_dsi->bridge;
+ ext_bridge = msm_dsi->external_bridge =
+ msm_dsi_host_get_bridge(msm_dsi->host);
+
+ /*
+ * HACK: we may not know the external DSI bridge device's mode
+ * flags here. We'll get to know them only when the device
+ * attaches to the dsi host. For now, assume the bridge supports
+ * DSI video mode
+ */
+ encoder = msm_dsi->encoders[MSM_DSI_VIDEO_ENCODER_ID];
+
+ /* link the internal dsi bridge to the external bridge */
+ int_bridge->next = ext_bridge;
+ /* set the external bridge's encoder as dsi's encoder */
+ ext_bridge->encoder = encoder;
+
+ drm_bridge_attach(dev, ext_bridge);
+
+ /*
+ * we need the drm_connector created by the external bridge
+ * driver (or someone else) to feed it to our driver's
+ * priv->connector[] list, mainly for msm_fbdev_init()
+ */
+ connector_list = &dev->mode_config.connector_list;
+
+ list_for_each_entry(connector, connector_list, head) {
+ int i;
+
+ for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
+ if (connector->encoder_ids[i] == encoder->base.id)
+ return connector;
+ }
+ }
+
+ return ERR_PTR(-ENODEV);
+}
+
void msm_dsi_manager_bridge_destroy(struct drm_bridge *bridge)
{
}
@@ -598,12 +672,29 @@ int msm_dsi_manager_phy_enable(int id,
{
struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id);
struct msm_dsi_phy *phy = msm_dsi->phy;
+ int src_pll_id = IS_DUAL_DSI() ? DSI_CLOCK_MASTER : id;
+ struct msm_dsi_pll *pll = msm_dsi_phy_get_pll(msm_dsi->phy);
int ret;
- ret = msm_dsi_phy_enable(phy, IS_DUAL_PANEL(), bit_rate, esc_rate);
+ ret = msm_dsi_phy_enable(phy, src_pll_id, bit_rate, esc_rate);
if (ret)
return ret;
+ /*
+ * Reset DSI PHY silently changes its PLL registers to reset status,
+ * which will confuse clock driver and result in wrong output rate of
+ * link clocks. Restore PLL status if its PLL is being used as clock
+ * source.
+ */
+ if (!IS_DUAL_DSI() || (id == DSI_CLOCK_MASTER)) {
+ ret = msm_dsi_pll_restore_state(pll);
+ if (ret) {
+ pr_err("%s: failed to restore pll state\n", __func__);
+ msm_dsi_phy_disable(phy);
+ return ret;
+ }
+ }
+
msm_dsi->phy_enabled = true;
msm_dsi_phy_get_clk_pre_post(phy, clk_pre, clk_post);
@@ -616,13 +707,18 @@ void msm_dsi_manager_phy_disable(int id)
struct msm_dsi *mdsi = dsi_mgr_get_dsi(DSI_CLOCK_MASTER);
struct msm_dsi *sdsi = dsi_mgr_get_dsi(DSI_CLOCK_SLAVE);
struct msm_dsi_phy *phy = msm_dsi->phy;
+ struct msm_dsi_pll *pll = msm_dsi_phy_get_pll(msm_dsi->phy);
+
+ /* Save PLL status if it is a clock source */
+ if (!IS_DUAL_DSI() || (id == DSI_CLOCK_MASTER))
+ msm_dsi_pll_save_state(pll);
/* disable DSI phy
* In dual-dsi configuration, the phy should be disabled for the
* first controller only when the second controller is disabled.
*/
msm_dsi->phy_enabled = false;
- if (IS_DUAL_PANEL() && mdsi && sdsi) {
+ if (IS_DUAL_DSI() && mdsi && sdsi) {
if (!mdsi->phy_enabled && !sdsi->phy_enabled) {
msm_dsi_phy_disable(sdsi->phy);
msm_dsi_phy_disable(mdsi->phy);
@@ -713,9 +809,9 @@ int msm_dsi_manager_register(struct msm_dsi *msm_dsi)
msm_dsim->dsi[id] = msm_dsi;
- ret = dsi_mgr_parse_dual_panel(msm_dsi->pdev->dev.of_node, id);
+ ret = dsi_mgr_parse_dual_dsi(msm_dsi->pdev->dev.of_node, id);
if (ret) {
- pr_err("%s: failed to parse dual panel info\n", __func__);
+ pr_err("%s: failed to parse dual DSI info\n", __func__);
goto fail;
}
diff --git a/drivers/gpu/drm/msm/dsi/mmss_cc.xml.h b/drivers/gpu/drm/msm/dsi/mmss_cc.xml.h
index 728152f3ef48..5de505e627be 100644
--- a/drivers/gpu/drm/msm/dsi/mmss_cc.xml.h
+++ b/drivers/gpu/drm/msm/dsi/mmss_cc.xml.h
@@ -8,19 +8,19 @@ http://github.com/freedreno/envytools/
git clone https://github.com/freedreno/envytools.git
The rules-ng-ng source files this header was generated from are:
-- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 676 bytes, from 2014-12-05 15:34:49)
-- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20915 bytes, from 2015-03-24 22:05:22)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 2352 bytes, from 2015-04-12 15:02:42)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 35083 bytes, from 2015-04-12 15:04:03)
-- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 22094 bytes, from 2015-05-12 12:45:23)
-- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2013-08-11 19:26:32)
-- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2014-10-31 16:48:57)
-- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2013-07-05 19:21:12)
-- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 29012 bytes, from 2015-05-12 12:45:23)
-- /home/robclark/src/freedreno/envytools/rnndb/edp/edp.xml ( 10416 bytes, from 2015-05-12 12:45:23)
-
-Copyright (C) 2013-2014 by the following authors:
+- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 676 bytes, from 2015-05-20 20:03:14)
+- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2015-05-20 20:03:07)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20915 bytes, from 2015-05-20 20:03:14)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 2576 bytes, from 2015-07-09 22:10:24)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 36021 bytes, from 2015-07-09 22:10:24)
+- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 26057 bytes, from 2015-08-14 21:47:57)
+- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2015-05-20 20:03:07)
+- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2015-05-20 20:03:14)
+- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2015-05-20 20:03:07)
+- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 29154 bytes, from 2015-08-10 21:25:43)
+- /home/robclark/src/freedreno/envytools/rnndb/edp/edp.xml ( 10416 bytes, from 2015-05-20 20:03:14)
+
+Copyright (C) 2013-2015 by the following authors:
- Rob Clark <robdclark@gmail.com> (robclark)
Permission is hereby granted, free of charge, to any person obtaining
diff --git a/drivers/gpu/drm/msm/dsi/dsi_phy.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c
index 2d3b33ce1cc5..401ff58d6893 100644
--- a/drivers/gpu/drm/msm/dsi/dsi_phy.c
+++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c
@@ -12,142 +12,8 @@
*/
#include <linux/platform_device.h>
-#include <linux/regulator/consumer.h>
-#include "dsi.h"
-#include "dsi.xml.h"
-
-#define dsi_phy_read(offset) msm_readl((offset))
-#define dsi_phy_write(offset, data) msm_writel((data), (offset))
-
-struct dsi_phy_ops {
- int (*enable)(struct msm_dsi_phy *phy, bool is_dual_panel,
- const unsigned long bit_rate, const unsigned long esc_rate);
- int (*disable)(struct msm_dsi_phy *phy);
-};
-
-struct dsi_phy_cfg {
- enum msm_dsi_phy_type type;
- struct dsi_reg_config reg_cfg;
- struct dsi_phy_ops ops;
-};
-
-struct dsi_dphy_timing {
- u32 clk_pre;
- u32 clk_post;
- u32 clk_zero;
- u32 clk_trail;
- u32 clk_prepare;
- u32 hs_exit;
- u32 hs_zero;
- u32 hs_prepare;
- u32 hs_trail;
- u32 hs_rqst;
- u32 ta_go;
- u32 ta_sure;
- u32 ta_get;
-};
-
-struct msm_dsi_phy {
- struct platform_device *pdev;
- void __iomem *base;
- void __iomem *reg_base;
- int id;
-
- struct clk *ahb_clk;
- struct regulator_bulk_data supplies[DSI_DEV_REGULATOR_MAX];
-
- struct dsi_dphy_timing timing;
- const struct dsi_phy_cfg *cfg;
-
- struct msm_dsi_pll *pll;
-};
-
-static int dsi_phy_regulator_init(struct msm_dsi_phy *phy)
-{
- struct regulator_bulk_data *s = phy->supplies;
- const struct dsi_reg_entry *regs = phy->cfg->reg_cfg.regs;
- struct device *dev = &phy->pdev->dev;
- int num = phy->cfg->reg_cfg.num;
- int i, ret;
-
- for (i = 0; i < num; i++)
- s[i].supply = regs[i].name;
-
- ret = devm_regulator_bulk_get(&phy->pdev->dev, num, s);
- if (ret < 0) {
- dev_err(dev, "%s: failed to init regulator, ret=%d\n",
- __func__, ret);
- return ret;
- }
-
- for (i = 0; i < num; i++) {
- if ((regs[i].min_voltage >= 0) && (regs[i].max_voltage >= 0)) {
- ret = regulator_set_voltage(s[i].consumer,
- regs[i].min_voltage, regs[i].max_voltage);
- if (ret < 0) {
- dev_err(dev,
- "regulator %d set voltage failed, %d\n",
- i, ret);
- return ret;
- }
- }
- }
-
- return 0;
-}
-
-static void dsi_phy_regulator_disable(struct msm_dsi_phy *phy)
-{
- struct regulator_bulk_data *s = phy->supplies;
- const struct dsi_reg_entry *regs = phy->cfg->reg_cfg.regs;
- int num = phy->cfg->reg_cfg.num;
- int i;
-
- DBG("");
- for (i = num - 1; i >= 0; i--)
- if (regs[i].disable_load >= 0)
- regulator_set_load(s[i].consumer,
- regs[i].disable_load);
-
- regulator_bulk_disable(num, s);
-}
-
-static int dsi_phy_regulator_enable(struct msm_dsi_phy *phy)
-{
- struct regulator_bulk_data *s = phy->supplies;
- const struct dsi_reg_entry *regs = phy->cfg->reg_cfg.regs;
- struct device *dev = &phy->pdev->dev;
- int num = phy->cfg->reg_cfg.num;
- int ret, i;
-
- DBG("");
- for (i = 0; i < num; i++) {
- if (regs[i].enable_load >= 0) {
- ret = regulator_set_load(s[i].consumer,
- regs[i].enable_load);
- if (ret < 0) {
- dev_err(dev,
- "regulator %d set op mode failed, %d\n",
- i, ret);
- goto fail;
- }
- }
- }
-
- ret = regulator_bulk_enable(num, s);
- if (ret < 0) {
- dev_err(dev, "regulator enable failed, %d\n", ret);
- goto fail;
- }
-
- return 0;
-
-fail:
- for (i--; i >= 0; i--)
- regulator_set_load(s[i].consumer, regs[i].disable_load);
- return ret;
-}
+#include "dsi_phy.h"
#define S_DIV_ROUND_UP(n, d) \
(((n) >= 0) ? (((n) + (d) - 1) / (d)) : (((n) - (d) + 1) / (d)))
@@ -156,6 +22,7 @@ static inline s32 linear_inter(s32 tmax, s32 tmin, s32 percent,
s32 min_result, bool even)
{
s32 v;
+
v = (tmax - tmin) * percent;
v = S_DIV_ROUND_UP(v, 100) + tmin;
if (even && (v & 0x1))
@@ -164,7 +31,7 @@ static inline s32 linear_inter(s32 tmax, s32 tmin, s32 percent,
return max_t(s32, min_result, v);
}
-static void dsi_dphy_timing_calc_clk_zero(struct dsi_dphy_timing *timing,
+static void dsi_dphy_timing_calc_clk_zero(struct msm_dsi_dphy_timing *timing,
s32 ui, s32 coeff, s32 pcnt)
{
s32 tmax, tmin, clk_z;
@@ -186,7 +53,7 @@ static void dsi_dphy_timing_calc_clk_zero(struct dsi_dphy_timing *timing,
timing->clk_zero = clk_z + 8 - temp;
}
-static int dsi_dphy_timing_calc(struct dsi_dphy_timing *timing,
+int msm_dsi_dphy_timing_calc(struct msm_dsi_dphy_timing *timing,
const unsigned long bit_rate, const unsigned long esc_rate)
{
s32 ui, lpx;
@@ -256,9 +123,8 @@ static int dsi_dphy_timing_calc(struct dsi_dphy_timing *timing,
temp += 8 * ui + lpx;
tmin = S_DIV_ROUND_UP(temp, 8 * ui) - 1;
if (tmin > tmax) {
- temp = linear_inter(2 * tmax, tmin, pcnt2, 0, false) >> 1;
+ temp = linear_inter(2 * tmax, tmin, pcnt2, 0, false);
timing->clk_pre = temp >> 1;
- temp = (2 * tmax - tmin) * pcnt2;
} else {
timing->clk_pre = linear_inter(tmax, tmin, pcnt2, 0, false);
}
@@ -276,130 +142,119 @@ static int dsi_dphy_timing_calc(struct dsi_dphy_timing *timing,
return 0;
}
-static void dsi_28nm_phy_regulator_ctrl(struct msm_dsi_phy *phy, bool enable)
+void msm_dsi_phy_set_src_pll(struct msm_dsi_phy *phy, int pll_id, u32 reg,
+ u32 bit_mask)
{
- void __iomem *base = phy->reg_base;
+ int phy_id = phy->id;
+ u32 val;
- if (!enable) {
- dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CAL_PWR_CFG, 0);
+ if ((phy_id >= DSI_MAX) || (pll_id >= DSI_MAX))
return;
- }
- dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CTRL_0, 0x0);
- dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CAL_PWR_CFG, 1);
- dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CTRL_5, 0);
- dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CTRL_3, 0);
- dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CTRL_2, 0x3);
- dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CTRL_1, 0x9);
- dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CTRL_0, 0x7);
- dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CTRL_4, 0x20);
+ val = dsi_phy_read(phy->base + reg);
+
+ if (phy->cfg->src_pll_truthtable[phy_id][pll_id])
+ dsi_phy_write(phy->base + reg, val | bit_mask);
+ else
+ dsi_phy_write(phy->base + reg, val & (~bit_mask));
}
-static int dsi_28nm_phy_enable(struct msm_dsi_phy *phy, bool is_dual_panel,
- const unsigned long bit_rate, const unsigned long esc_rate)
+static int dsi_phy_regulator_init(struct msm_dsi_phy *phy)
{
- struct dsi_dphy_timing *timing = &phy->timing;
- int i;
- void __iomem *base = phy->base;
+ struct regulator_bulk_data *s = phy->supplies;
+ const struct dsi_reg_entry *regs = phy->cfg->reg_cfg.regs;
+ struct device *dev = &phy->pdev->dev;
+ int num = phy->cfg->reg_cfg.num;
+ int i, ret;
- DBG("");
+ for (i = 0; i < num; i++)
+ s[i].supply = regs[i].name;
- if (dsi_dphy_timing_calc(timing, bit_rate, esc_rate)) {
- pr_err("%s: D-PHY timing calculation failed\n", __func__);
- return -EINVAL;
+ ret = devm_regulator_bulk_get(dev, num, s);
+ if (ret < 0) {
+ dev_err(dev, "%s: failed to init regulator, ret=%d\n",
+ __func__, ret);
+ return ret;
}
- dsi_phy_write(base + REG_DSI_28nm_PHY_STRENGTH_0, 0xff);
-
- dsi_28nm_phy_regulator_ctrl(phy, true);
-
- dsi_phy_write(base + REG_DSI_28nm_PHY_LDO_CNTRL, 0x00);
-
- dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_0,
- DSI_28nm_PHY_TIMING_CTRL_0_CLK_ZERO(timing->clk_zero));
- dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_1,
- DSI_28nm_PHY_TIMING_CTRL_1_CLK_TRAIL(timing->clk_trail));
- dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_2,
- DSI_28nm_PHY_TIMING_CTRL_2_CLK_PREPARE(timing->clk_prepare));
- if (timing->clk_zero & BIT(8))
- dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_3,
- DSI_28nm_PHY_TIMING_CTRL_3_CLK_ZERO_8);
- dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_4,
- DSI_28nm_PHY_TIMING_CTRL_4_HS_EXIT(timing->hs_exit));
- dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_5,
- DSI_28nm_PHY_TIMING_CTRL_5_HS_ZERO(timing->hs_zero));
- dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_6,
- DSI_28nm_PHY_TIMING_CTRL_6_HS_PREPARE(timing->hs_prepare));
- dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_7,
- DSI_28nm_PHY_TIMING_CTRL_7_HS_TRAIL(timing->hs_trail));
- dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_8,
- DSI_28nm_PHY_TIMING_CTRL_8_HS_RQST(timing->hs_rqst));
- dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_9,
- DSI_28nm_PHY_TIMING_CTRL_9_TA_GO(timing->ta_go) |
- DSI_28nm_PHY_TIMING_CTRL_9_TA_SURE(timing->ta_sure));
- dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_10,
- DSI_28nm_PHY_TIMING_CTRL_10_TA_GET(timing->ta_get));
- dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_11,
- DSI_28nm_PHY_TIMING_CTRL_11_TRIG3_CMD(0));
-
- dsi_phy_write(base + REG_DSI_28nm_PHY_CTRL_1, 0x00);
- dsi_phy_write(base + REG_DSI_28nm_PHY_CTRL_0, 0x5f);
-
- dsi_phy_write(base + REG_DSI_28nm_PHY_STRENGTH_1, 0x6);
-
- for (i = 0; i < 4; i++) {
- dsi_phy_write(base + REG_DSI_28nm_PHY_LN_CFG_0(i), 0);
- dsi_phy_write(base + REG_DSI_28nm_PHY_LN_CFG_1(i), 0);
- dsi_phy_write(base + REG_DSI_28nm_PHY_LN_CFG_2(i), 0);
- dsi_phy_write(base + REG_DSI_28nm_PHY_LN_CFG_3(i), 0);
- dsi_phy_write(base + REG_DSI_28nm_PHY_LN_TEST_DATAPATH(i), 0);
- dsi_phy_write(base + REG_DSI_28nm_PHY_LN_DEBUG_SEL(i), 0);
- dsi_phy_write(base + REG_DSI_28nm_PHY_LN_TEST_STR_0(i), 0x1);
- dsi_phy_write(base + REG_DSI_28nm_PHY_LN_TEST_STR_1(i), 0x97);
+ for (i = 0; i < num; i++) {
+ if ((regs[i].min_voltage >= 0) && (regs[i].max_voltage >= 0)) {
+ ret = regulator_set_voltage(s[i].consumer,
+ regs[i].min_voltage, regs[i].max_voltage);
+ if (ret < 0) {
+ dev_err(dev,
+ "regulator %d set voltage failed, %d\n",
+ i, ret);
+ return ret;
+ }
+ }
}
- dsi_phy_write(base + REG_DSI_28nm_PHY_LN_CFG_4(0), 0);
- dsi_phy_write(base + REG_DSI_28nm_PHY_LN_CFG_4(1), 0x5);
- dsi_phy_write(base + REG_DSI_28nm_PHY_LN_CFG_4(2), 0xa);
- dsi_phy_write(base + REG_DSI_28nm_PHY_LN_CFG_4(3), 0xf);
- dsi_phy_write(base + REG_DSI_28nm_PHY_LNCK_CFG_1, 0xc0);
- dsi_phy_write(base + REG_DSI_28nm_PHY_LNCK_TEST_STR0, 0x1);
- dsi_phy_write(base + REG_DSI_28nm_PHY_LNCK_TEST_STR1, 0xbb);
+ return 0;
+}
- dsi_phy_write(base + REG_DSI_28nm_PHY_CTRL_0, 0x5f);
+static void dsi_phy_regulator_disable(struct msm_dsi_phy *phy)
+{
+ struct regulator_bulk_data *s = phy->supplies;
+ const struct dsi_reg_entry *regs = phy->cfg->reg_cfg.regs;
+ int num = phy->cfg->reg_cfg.num;
+ int i;
- if (is_dual_panel && (phy->id != DSI_CLOCK_MASTER))
- dsi_phy_write(base + REG_DSI_28nm_PHY_GLBL_TEST_CTRL, 0x00);
- else
- dsi_phy_write(base + REG_DSI_28nm_PHY_GLBL_TEST_CTRL, 0x01);
+ DBG("");
+ for (i = num - 1; i >= 0; i--)
+ if (regs[i].disable_load >= 0)
+ regulator_set_load(s[i].consumer, regs[i].disable_load);
- return 0;
+ regulator_bulk_disable(num, s);
}
-static int dsi_28nm_phy_disable(struct msm_dsi_phy *phy)
+static int dsi_phy_regulator_enable(struct msm_dsi_phy *phy)
{
- dsi_phy_write(phy->base + REG_DSI_28nm_PHY_CTRL_0, 0);
- dsi_28nm_phy_regulator_ctrl(phy, false);
+ struct regulator_bulk_data *s = phy->supplies;
+ const struct dsi_reg_entry *regs = phy->cfg->reg_cfg.regs;
+ struct device *dev = &phy->pdev->dev;
+ int num = phy->cfg->reg_cfg.num;
+ int ret, i;
- /*
- * Wait for the registers writes to complete in order to
- * ensure that the phy is completely disabled
- */
- wmb();
+ DBG("");
+ for (i = 0; i < num; i++) {
+ if (regs[i].enable_load >= 0) {
+ ret = regulator_set_load(s[i].consumer,
+ regs[i].enable_load);
+ if (ret < 0) {
+ dev_err(dev,
+ "regulator %d set op mode failed, %d\n",
+ i, ret);
+ goto fail;
+ }
+ }
+ }
+
+ ret = regulator_bulk_enable(num, s);
+ if (ret < 0) {
+ dev_err(dev, "regulator enable failed, %d\n", ret);
+ goto fail;
+ }
return 0;
+
+fail:
+ for (i--; i >= 0; i--)
+ regulator_set_load(s[i].consumer, regs[i].disable_load);
+ return ret;
}
static int dsi_phy_enable_resource(struct msm_dsi_phy *phy)
{
+ struct device *dev = &phy->pdev->dev;
int ret;
- pm_runtime_get_sync(&phy->pdev->dev);
+ pm_runtime_get_sync(dev);
ret = clk_prepare_enable(phy->ahb_clk);
if (ret) {
- pr_err("%s: can't enable ahb clk, %d\n", __func__, ret);
- pm_runtime_put_sync(&phy->pdev->dev);
+ dev_err(dev, "%s: can't enable ahb clk, %d\n", __func__, ret);
+ pm_runtime_put_sync(dev);
}
return ret;
@@ -411,92 +266,74 @@ static void dsi_phy_disable_resource(struct msm_dsi_phy *phy)
pm_runtime_put_sync(&phy->pdev->dev);
}
-static const struct dsi_phy_cfg dsi_phy_cfgs[MSM_DSI_PHY_MAX] = {
- [MSM_DSI_PHY_28NM_HPM] = {
- .type = MSM_DSI_PHY_28NM_HPM,
- .reg_cfg = {
- .num = 1,
- .regs = {
- {"vddio", 1800000, 1800000, 100000, 100},
- },
- },
- .ops = {
- .enable = dsi_28nm_phy_enable,
- .disable = dsi_28nm_phy_disable,
- }
- },
- [MSM_DSI_PHY_28NM_LP] = {
- .type = MSM_DSI_PHY_28NM_LP,
- .reg_cfg = {
- .num = 1,
- .regs = {
- {"vddio", 1800000, 1800000, 100000, 100},
- },
- },
- .ops = {
- .enable = dsi_28nm_phy_enable,
- .disable = dsi_28nm_phy_disable,
- }
- },
-};
-
static const struct of_device_id dsi_phy_dt_match[] = {
+#ifdef CONFIG_DRM_MSM_DSI_28NM_PHY
{ .compatible = "qcom,dsi-phy-28nm-hpm",
- .data = &dsi_phy_cfgs[MSM_DSI_PHY_28NM_HPM],},
+ .data = &dsi_phy_28nm_hpm_cfgs },
{ .compatible = "qcom,dsi-phy-28nm-lp",
- .data = &dsi_phy_cfgs[MSM_DSI_PHY_28NM_LP],},
+ .data = &dsi_phy_28nm_lp_cfgs },
+#endif
+#ifdef CONFIG_DRM_MSM_DSI_20NM_PHY
+ { .compatible = "qcom,dsi-phy-20nm",
+ .data = &dsi_phy_20nm_cfgs },
+#endif
{}
};
static int dsi_phy_driver_probe(struct platform_device *pdev)
{
struct msm_dsi_phy *phy;
+ struct device *dev = &pdev->dev;
const struct of_device_id *match;
int ret;
- phy = devm_kzalloc(&pdev->dev, sizeof(*phy), GFP_KERNEL);
+ phy = devm_kzalloc(dev, sizeof(*phy), GFP_KERNEL);
if (!phy)
return -ENOMEM;
- match = of_match_node(dsi_phy_dt_match, pdev->dev.of_node);
+ match = of_match_node(dsi_phy_dt_match, dev->of_node);
if (!match)
return -ENODEV;
phy->cfg = match->data;
phy->pdev = pdev;
- ret = of_property_read_u32(pdev->dev.of_node,
+ ret = of_property_read_u32(dev->of_node,
"qcom,dsi-phy-index", &phy->id);
if (ret) {
- dev_err(&pdev->dev,
- "%s: PHY index not specified, ret=%d\n",
+ dev_err(dev, "%s: PHY index not specified, %d\n",
__func__, ret);
goto fail;
}
+ phy->regulator_ldo_mode = of_property_read_bool(dev->of_node,
+ "qcom,dsi-phy-regulator-ldo-mode");
+
phy->base = msm_ioremap(pdev, "dsi_phy", "DSI_PHY");
if (IS_ERR(phy->base)) {
- dev_err(&pdev->dev, "%s: failed to map phy base\n", __func__);
+ dev_err(dev, "%s: failed to map phy base\n", __func__);
ret = -ENOMEM;
goto fail;
}
- phy->reg_base = msm_ioremap(pdev, "dsi_phy_regulator", "DSI_PHY_REG");
+
+ phy->reg_base = msm_ioremap(pdev, "dsi_phy_regulator",
+ "DSI_PHY_REG");
if (IS_ERR(phy->reg_base)) {
- dev_err(&pdev->dev,
- "%s: failed to map phy regulator base\n", __func__);
+ dev_err(dev, "%s: failed to map phy regulator base\n",
+ __func__);
ret = -ENOMEM;
goto fail;
}
ret = dsi_phy_regulator_init(phy);
if (ret) {
- dev_err(&pdev->dev, "%s: failed to init regulator\n", __func__);
+ dev_err(dev, "%s: failed to init regulator\n", __func__);
goto fail;
}
- phy->ahb_clk = devm_clk_get(&pdev->dev, "iface_clk");
+ phy->ahb_clk = devm_clk_get(dev, "iface_clk");
if (IS_ERR(phy->ahb_clk)) {
- pr_err("%s: Unable to get ahb clk\n", __func__);
+ dev_err(dev, "%s: Unable to get ahb clk\n", __func__);
ret = PTR_ERR(phy->ahb_clk);
goto fail;
}
@@ -510,7 +347,7 @@ static int dsi_phy_driver_probe(struct platform_device *pdev)
phy->pll = msm_dsi_pll_init(pdev, phy->cfg->type, phy->id);
if (!phy->pll)
- dev_info(&pdev->dev,
+ dev_info(dev,
"%s: pll init failed, need separate pll clk driver\n",
__func__);
@@ -557,9 +394,10 @@ void __exit msm_dsi_phy_driver_unregister(void)
platform_driver_unregister(&dsi_phy_platform_driver);
}
-int msm_dsi_phy_enable(struct msm_dsi_phy *phy, bool is_dual_panel,
+int msm_dsi_phy_enable(struct msm_dsi_phy *phy, int src_pll_id,
const unsigned long bit_rate, const unsigned long esc_rate)
{
+ struct device *dev = &phy->pdev->dev;
int ret;
if (!phy || !phy->cfg->ops.enable)
@@ -567,30 +405,37 @@ int msm_dsi_phy_enable(struct msm_dsi_phy *phy, bool is_dual_panel,
ret = dsi_phy_regulator_enable(phy);
if (ret) {
- dev_err(&phy->pdev->dev, "%s: regulator enable failed, %d\n",
+ dev_err(dev, "%s: regulator enable failed, %d\n",
__func__, ret);
return ret;
}
- return phy->cfg->ops.enable(phy, is_dual_panel, bit_rate, esc_rate);
+ ret = phy->cfg->ops.enable(phy, src_pll_id, bit_rate, esc_rate);
+ if (ret) {
+ dev_err(dev, "%s: phy enable failed, %d\n", __func__, ret);
+ dsi_phy_regulator_disable(phy);
+ return ret;
+ }
+
+ return 0;
}
-int msm_dsi_phy_disable(struct msm_dsi_phy *phy)
+void msm_dsi_phy_disable(struct msm_dsi_phy *phy)
{
if (!phy || !phy->cfg->ops.disable)
- return -EINVAL;
+ return;
phy->cfg->ops.disable(phy);
- dsi_phy_regulator_disable(phy);
- return 0;
+ dsi_phy_regulator_disable(phy);
}
void msm_dsi_phy_get_clk_pre_post(struct msm_dsi_phy *phy,
- u32 *clk_pre, u32 *clk_post)
+ u32 *clk_pre, u32 *clk_post)
{
if (!phy)
return;
+
if (clk_pre)
*clk_pre = phy->timing.clk_pre;
if (clk_post)
diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.h b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.h
new file mode 100644
index 000000000000..0456b253239f
--- /dev/null
+++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.h
@@ -0,0 +1,89 @@
+/*
+ * Copyright (c) 2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __DSI_PHY_H__
+#define __DSI_PHY_H__
+
+#include <linux/regulator/consumer.h>
+
+#include "dsi.h"
+
+#define dsi_phy_read(offset) msm_readl((offset))
+#define dsi_phy_write(offset, data) msm_writel((data), (offset))
+
+struct msm_dsi_phy_ops {
+ int (*enable)(struct msm_dsi_phy *phy, int src_pll_id,
+ const unsigned long bit_rate, const unsigned long esc_rate);
+ void (*disable)(struct msm_dsi_phy *phy);
+};
+
+struct msm_dsi_phy_cfg {
+ enum msm_dsi_phy_type type;
+ struct dsi_reg_config reg_cfg;
+ struct msm_dsi_phy_ops ops;
+
+ /*
+ * Each cell {phy_id, pll_id} of the truth table indicates
+ * if the source PLL selection bit should be set for each PHY.
+ * Fill default H/W values in illegal cells, eg. cell {0, 1}.
+ */
+ bool src_pll_truthtable[DSI_MAX][DSI_MAX];
+};
+
+extern const struct msm_dsi_phy_cfg dsi_phy_28nm_hpm_cfgs;
+extern const struct msm_dsi_phy_cfg dsi_phy_28nm_lp_cfgs;
+extern const struct msm_dsi_phy_cfg dsi_phy_20nm_cfgs;
+
+struct msm_dsi_dphy_timing {
+ u32 clk_pre;
+ u32 clk_post;
+ u32 clk_zero;
+ u32 clk_trail;
+ u32 clk_prepare;
+ u32 hs_exit;
+ u32 hs_zero;
+ u32 hs_prepare;
+ u32 hs_trail;
+ u32 hs_rqst;
+ u32 ta_go;
+ u32 ta_sure;
+ u32 ta_get;
+};
+
+struct msm_dsi_phy {
+ struct platform_device *pdev;
+ void __iomem *base;
+ void __iomem *reg_base;
+ int id;
+
+ struct clk *ahb_clk;
+ struct regulator_bulk_data supplies[DSI_DEV_REGULATOR_MAX];
+
+ struct msm_dsi_dphy_timing timing;
+ const struct msm_dsi_phy_cfg *cfg;
+
+ bool regulator_ldo_mode;
+
+ struct msm_dsi_pll *pll;
+};
+
+/*
+ * PHY internal functions
+ */
+int msm_dsi_dphy_timing_calc(struct msm_dsi_dphy_timing *timing,
+ const unsigned long bit_rate, const unsigned long esc_rate);
+void msm_dsi_phy_set_src_pll(struct msm_dsi_phy *phy, int pll_id, u32 reg,
+ u32 bit_mask);
+
+#endif /* __DSI_PHY_H__ */
+
diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_20nm.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_20nm.c
new file mode 100644
index 000000000000..2e9ba118d50a
--- /dev/null
+++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_20nm.c
@@ -0,0 +1,150 @@
+/*
+ * Copyright (c) 2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include "dsi_phy.h"
+#include "dsi.xml.h"
+
+static void dsi_20nm_dphy_set_timing(struct msm_dsi_phy *phy,
+ struct msm_dsi_dphy_timing *timing)
+{
+ void __iomem *base = phy->base;
+
+ dsi_phy_write(base + REG_DSI_20nm_PHY_TIMING_CTRL_0,
+ DSI_20nm_PHY_TIMING_CTRL_0_CLK_ZERO(timing->clk_zero));
+ dsi_phy_write(base + REG_DSI_20nm_PHY_TIMING_CTRL_1,
+ DSI_20nm_PHY_TIMING_CTRL_1_CLK_TRAIL(timing->clk_trail));
+ dsi_phy_write(base + REG_DSI_20nm_PHY_TIMING_CTRL_2,
+ DSI_20nm_PHY_TIMING_CTRL_2_CLK_PREPARE(timing->clk_prepare));
+ if (timing->clk_zero & BIT(8))
+ dsi_phy_write(base + REG_DSI_20nm_PHY_TIMING_CTRL_3,
+ DSI_20nm_PHY_TIMING_CTRL_3_CLK_ZERO_8);
+ dsi_phy_write(base + REG_DSI_20nm_PHY_TIMING_CTRL_4,
+ DSI_20nm_PHY_TIMING_CTRL_4_HS_EXIT(timing->hs_exit));
+ dsi_phy_write(base + REG_DSI_20nm_PHY_TIMING_CTRL_5,
+ DSI_20nm_PHY_TIMING_CTRL_5_HS_ZERO(timing->hs_zero));
+ dsi_phy_write(base + REG_DSI_20nm_PHY_TIMING_CTRL_6,
+ DSI_20nm_PHY_TIMING_CTRL_6_HS_PREPARE(timing->hs_prepare));
+ dsi_phy_write(base + REG_DSI_20nm_PHY_TIMING_CTRL_7,
+ DSI_20nm_PHY_TIMING_CTRL_7_HS_TRAIL(timing->hs_trail));
+ dsi_phy_write(base + REG_DSI_20nm_PHY_TIMING_CTRL_8,
+ DSI_20nm_PHY_TIMING_CTRL_8_HS_RQST(timing->hs_rqst));
+ dsi_phy_write(base + REG_DSI_20nm_PHY_TIMING_CTRL_9,
+ DSI_20nm_PHY_TIMING_CTRL_9_TA_GO(timing->ta_go) |
+ DSI_20nm_PHY_TIMING_CTRL_9_TA_SURE(timing->ta_sure));
+ dsi_phy_write(base + REG_DSI_20nm_PHY_TIMING_CTRL_10,
+ DSI_20nm_PHY_TIMING_CTRL_10_TA_GET(timing->ta_get));
+ dsi_phy_write(base + REG_DSI_20nm_PHY_TIMING_CTRL_11,
+ DSI_20nm_PHY_TIMING_CTRL_11_TRIG3_CMD(0));
+}
+
+static void dsi_20nm_phy_regulator_ctrl(struct msm_dsi_phy *phy, bool enable)
+{
+ void __iomem *base = phy->reg_base;
+
+ if (!enable) {
+ dsi_phy_write(base + REG_DSI_20nm_PHY_REGULATOR_CAL_PWR_CFG, 0);
+ return;
+ }
+
+ if (phy->regulator_ldo_mode) {
+ dsi_phy_write(phy->base + REG_DSI_20nm_PHY_LDO_CNTRL, 0x1d);
+ return;
+ }
+
+ /* non LDO mode */
+ dsi_phy_write(base + REG_DSI_20nm_PHY_REGULATOR_CTRL_1, 0x03);
+ dsi_phy_write(base + REG_DSI_20nm_PHY_REGULATOR_CTRL_2, 0x03);
+ dsi_phy_write(base + REG_DSI_20nm_PHY_REGULATOR_CTRL_3, 0x00);
+ dsi_phy_write(base + REG_DSI_20nm_PHY_REGULATOR_CTRL_4, 0x20);
+ dsi_phy_write(base + REG_DSI_20nm_PHY_REGULATOR_CAL_PWR_CFG, 0x01);
+ dsi_phy_write(phy->base + REG_DSI_20nm_PHY_LDO_CNTRL, 0x00);
+ dsi_phy_write(base + REG_DSI_20nm_PHY_REGULATOR_CTRL_0, 0x03);
+}
+
+static int dsi_20nm_phy_enable(struct msm_dsi_phy *phy, int src_pll_id,
+ const unsigned long bit_rate, const unsigned long esc_rate)
+{
+ struct msm_dsi_dphy_timing *timing = &phy->timing;
+ int i;
+ void __iomem *base = phy->base;
+ u32 cfg_4[4] = {0x20, 0x40, 0x20, 0x00};
+
+ DBG("");
+
+ if (msm_dsi_dphy_timing_calc(timing, bit_rate, esc_rate)) {
+ dev_err(&phy->pdev->dev,
+ "%s: D-PHY timing calculation failed\n", __func__);
+ return -EINVAL;
+ }
+
+ dsi_20nm_phy_regulator_ctrl(phy, true);
+
+ dsi_phy_write(base + REG_DSI_20nm_PHY_STRENGTH_0, 0xff);
+
+ msm_dsi_phy_set_src_pll(phy, src_pll_id,
+ REG_DSI_20nm_PHY_GLBL_TEST_CTRL,
+ DSI_20nm_PHY_GLBL_TEST_CTRL_BITCLK_HS_SEL);
+
+ for (i = 0; i < 4; i++) {
+ dsi_phy_write(base + REG_DSI_20nm_PHY_LN_CFG_3(i),
+ (i >> 1) * 0x40);
+ dsi_phy_write(base + REG_DSI_20nm_PHY_LN_TEST_STR_0(i), 0x01);
+ dsi_phy_write(base + REG_DSI_20nm_PHY_LN_TEST_STR_1(i), 0x46);
+ dsi_phy_write(base + REG_DSI_20nm_PHY_LN_CFG_0(i), 0x02);
+ dsi_phy_write(base + REG_DSI_20nm_PHY_LN_CFG_1(i), 0xa0);
+ dsi_phy_write(base + REG_DSI_20nm_PHY_LN_CFG_4(i), cfg_4[i]);
+ }
+
+ dsi_phy_write(base + REG_DSI_20nm_PHY_LNCK_CFG_3, 0x80);
+ dsi_phy_write(base + REG_DSI_20nm_PHY_LNCK_TEST_STR0, 0x01);
+ dsi_phy_write(base + REG_DSI_20nm_PHY_LNCK_TEST_STR1, 0x46);
+ dsi_phy_write(base + REG_DSI_20nm_PHY_LNCK_CFG_0, 0x00);
+ dsi_phy_write(base + REG_DSI_20nm_PHY_LNCK_CFG_1, 0xa0);
+ dsi_phy_write(base + REG_DSI_20nm_PHY_LNCK_CFG_2, 0x00);
+ dsi_phy_write(base + REG_DSI_20nm_PHY_LNCK_CFG_4, 0x00);
+
+ dsi_20nm_dphy_set_timing(phy, timing);
+
+ dsi_phy_write(base + REG_DSI_20nm_PHY_CTRL_1, 0x00);
+
+ dsi_phy_write(base + REG_DSI_20nm_PHY_STRENGTH_1, 0x06);
+
+ /* make sure everything is written before enable */
+ wmb();
+ dsi_phy_write(base + REG_DSI_20nm_PHY_CTRL_0, 0x7f);
+
+ return 0;
+}
+
+static void dsi_20nm_phy_disable(struct msm_dsi_phy *phy)
+{
+ dsi_phy_write(phy->base + REG_DSI_20nm_PHY_CTRL_0, 0);
+ dsi_20nm_phy_regulator_ctrl(phy, false);
+}
+
+const struct msm_dsi_phy_cfg dsi_phy_20nm_cfgs = {
+ .type = MSM_DSI_PHY_20NM,
+ .src_pll_truthtable = { {false, true}, {false, true} },
+ .reg_cfg = {
+ .num = 2,
+ .regs = {
+ {"vddio", 1800000, 1800000, 100000, 100},
+ {"vcca", 1000000, 1000000, 10000, 100},
+ },
+ },
+ .ops = {
+ .enable = dsi_20nm_phy_enable,
+ .disable = dsi_20nm_phy_disable,
+ }
+};
+
diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm.c
new file mode 100644
index 000000000000..f1a7c7b46420
--- /dev/null
+++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm.c
@@ -0,0 +1,166 @@
+/*
+ * Copyright (c) 2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include "dsi_phy.h"
+#include "dsi.xml.h"
+
+static void dsi_28nm_dphy_set_timing(struct msm_dsi_phy *phy,
+ struct msm_dsi_dphy_timing *timing)
+{
+ void __iomem *base = phy->base;
+
+ dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_0,
+ DSI_28nm_PHY_TIMING_CTRL_0_CLK_ZERO(timing->clk_zero));
+ dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_1,
+ DSI_28nm_PHY_TIMING_CTRL_1_CLK_TRAIL(timing->clk_trail));
+ dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_2,
+ DSI_28nm_PHY_TIMING_CTRL_2_CLK_PREPARE(timing->clk_prepare));
+ if (timing->clk_zero & BIT(8))
+ dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_3,
+ DSI_28nm_PHY_TIMING_CTRL_3_CLK_ZERO_8);
+ dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_4,
+ DSI_28nm_PHY_TIMING_CTRL_4_HS_EXIT(timing->hs_exit));
+ dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_5,
+ DSI_28nm_PHY_TIMING_CTRL_5_HS_ZERO(timing->hs_zero));
+ dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_6,
+ DSI_28nm_PHY_TIMING_CTRL_6_HS_PREPARE(timing->hs_prepare));
+ dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_7,
+ DSI_28nm_PHY_TIMING_CTRL_7_HS_TRAIL(timing->hs_trail));
+ dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_8,
+ DSI_28nm_PHY_TIMING_CTRL_8_HS_RQST(timing->hs_rqst));
+ dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_9,
+ DSI_28nm_PHY_TIMING_CTRL_9_TA_GO(timing->ta_go) |
+ DSI_28nm_PHY_TIMING_CTRL_9_TA_SURE(timing->ta_sure));
+ dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_10,
+ DSI_28nm_PHY_TIMING_CTRL_10_TA_GET(timing->ta_get));
+ dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_11,
+ DSI_28nm_PHY_TIMING_CTRL_11_TRIG3_CMD(0));
+}
+
+static void dsi_28nm_phy_regulator_ctrl(struct msm_dsi_phy *phy, bool enable)
+{
+ void __iomem *base = phy->reg_base;
+
+ if (!enable) {
+ dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CAL_PWR_CFG, 0);
+ return;
+ }
+
+ dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CTRL_0, 0x0);
+ dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CAL_PWR_CFG, 1);
+ dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CTRL_5, 0);
+ dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CTRL_3, 0);
+ dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CTRL_2, 0x3);
+ dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CTRL_1, 0x9);
+ dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CTRL_0, 0x7);
+ dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CTRL_4, 0x20);
+}
+
+static int dsi_28nm_phy_enable(struct msm_dsi_phy *phy, int src_pll_id,
+ const unsigned long bit_rate, const unsigned long esc_rate)
+{
+ struct msm_dsi_dphy_timing *timing = &phy->timing;
+ int i;
+ void __iomem *base = phy->base;
+
+ DBG("");
+
+ if (msm_dsi_dphy_timing_calc(timing, bit_rate, esc_rate)) {
+ dev_err(&phy->pdev->dev,
+ "%s: D-PHY timing calculation failed\n", __func__);
+ return -EINVAL;
+ }
+
+ dsi_phy_write(base + REG_DSI_28nm_PHY_STRENGTH_0, 0xff);
+
+ dsi_28nm_phy_regulator_ctrl(phy, true);
+
+ dsi_phy_write(base + REG_DSI_28nm_PHY_LDO_CNTRL, 0x00);
+
+ dsi_28nm_dphy_set_timing(phy, timing);
+
+ dsi_phy_write(base + REG_DSI_28nm_PHY_CTRL_1, 0x00);
+ dsi_phy_write(base + REG_DSI_28nm_PHY_CTRL_0, 0x5f);
+
+ dsi_phy_write(base + REG_DSI_28nm_PHY_STRENGTH_1, 0x6);
+
+ for (i = 0; i < 4; i++) {
+ dsi_phy_write(base + REG_DSI_28nm_PHY_LN_CFG_0(i), 0);
+ dsi_phy_write(base + REG_DSI_28nm_PHY_LN_CFG_1(i), 0);
+ dsi_phy_write(base + REG_DSI_28nm_PHY_LN_CFG_2(i), 0);
+ dsi_phy_write(base + REG_DSI_28nm_PHY_LN_CFG_3(i), 0);
+ dsi_phy_write(base + REG_DSI_28nm_PHY_LN_TEST_DATAPATH(i), 0);
+ dsi_phy_write(base + REG_DSI_28nm_PHY_LN_DEBUG_SEL(i), 0);
+ dsi_phy_write(base + REG_DSI_28nm_PHY_LN_TEST_STR_0(i), 0x1);
+ dsi_phy_write(base + REG_DSI_28nm_PHY_LN_TEST_STR_1(i), 0x97);
+ }
+ dsi_phy_write(base + REG_DSI_28nm_PHY_LN_CFG_4(0), 0);
+ dsi_phy_write(base + REG_DSI_28nm_PHY_LN_CFG_4(1), 0x5);
+ dsi_phy_write(base + REG_DSI_28nm_PHY_LN_CFG_4(2), 0xa);
+ dsi_phy_write(base + REG_DSI_28nm_PHY_LN_CFG_4(3), 0xf);
+
+ dsi_phy_write(base + REG_DSI_28nm_PHY_LNCK_CFG_1, 0xc0);
+ dsi_phy_write(base + REG_DSI_28nm_PHY_LNCK_TEST_STR0, 0x1);
+ dsi_phy_write(base + REG_DSI_28nm_PHY_LNCK_TEST_STR1, 0xbb);
+
+ dsi_phy_write(base + REG_DSI_28nm_PHY_CTRL_0, 0x5f);
+
+ msm_dsi_phy_set_src_pll(phy, src_pll_id,
+ REG_DSI_28nm_PHY_GLBL_TEST_CTRL,
+ DSI_28nm_PHY_GLBL_TEST_CTRL_BITCLK_HS_SEL);
+
+ return 0;
+}
+
+static void dsi_28nm_phy_disable(struct msm_dsi_phy *phy)
+{
+ dsi_phy_write(phy->base + REG_DSI_28nm_PHY_CTRL_0, 0);
+ dsi_28nm_phy_regulator_ctrl(phy, false);
+
+ /*
+ * Wait for the registers writes to complete in order to
+ * ensure that the phy is completely disabled
+ */
+ wmb();
+}
+
+const struct msm_dsi_phy_cfg dsi_phy_28nm_hpm_cfgs = {
+ .type = MSM_DSI_PHY_28NM_HPM,
+ .src_pll_truthtable = { {true, true}, {false, true} },
+ .reg_cfg = {
+ .num = 1,
+ .regs = {
+ {"vddio", 1800000, 1800000, 100000, 100},
+ },
+ },
+ .ops = {
+ .enable = dsi_28nm_phy_enable,
+ .disable = dsi_28nm_phy_disable,
+ },
+};
+
+const struct msm_dsi_phy_cfg dsi_phy_28nm_lp_cfgs = {
+ .type = MSM_DSI_PHY_28NM_LP,
+ .src_pll_truthtable = { {true, true}, {true, true} },
+ .reg_cfg = {
+ .num = 1,
+ .regs = {
+ {"vddio", 1800000, 1800000, 100000, 100},
+ },
+ },
+ .ops = {
+ .enable = dsi_28nm_phy_enable,
+ .disable = dsi_28nm_phy_disable,
+ },
+};
+
diff --git a/drivers/gpu/drm/msm/dsi/pll/dsi_pll.c b/drivers/gpu/drm/msm/dsi/pll/dsi_pll.c
index 509376fdd112..5104fc9f9a53 100644
--- a/drivers/gpu/drm/msm/dsi/pll/dsi_pll.c
+++ b/drivers/gpu/drm/msm/dsi/pll/dsi_pll.c
@@ -72,31 +72,14 @@ long msm_dsi_pll_helper_clk_round_rate(struct clk_hw *hw,
int msm_dsi_pll_helper_clk_prepare(struct clk_hw *hw)
{
struct msm_dsi_pll *pll = hw_clk_to_pll(hw);
- int ret;
-
- /*
- * Certain PLLs need to update the same VCO rate and registers
- * after resume in suspend/resume scenario.
- */
- if (pll->restore_state) {
- ret = pll->restore_state(pll);
- if (ret)
- goto error;
- }
- ret = dsi_pll_enable(pll);
-
-error:
- return ret;
+ return dsi_pll_enable(pll);
}
void msm_dsi_pll_helper_clk_unprepare(struct clk_hw *hw)
{
struct msm_dsi_pll *pll = hw_clk_to_pll(hw);
- if (pll->save_state)
- pll->save_state(pll);
-
dsi_pll_disable(pll);
}
@@ -134,6 +117,29 @@ void msm_dsi_pll_destroy(struct msm_dsi_pll *pll)
pll->destroy(pll);
}
+void msm_dsi_pll_save_state(struct msm_dsi_pll *pll)
+{
+ if (pll->save_state) {
+ pll->save_state(pll);
+ pll->state_saved = true;
+ }
+}
+
+int msm_dsi_pll_restore_state(struct msm_dsi_pll *pll)
+{
+ int ret;
+
+ if (pll->restore_state && pll->state_saved) {
+ ret = pll->restore_state(pll);
+ if (ret)
+ return ret;
+
+ pll->state_saved = false;
+ }
+
+ return 0;
+}
+
struct msm_dsi_pll *msm_dsi_pll_init(struct platform_device *pdev,
enum msm_dsi_phy_type type, int id)
{
diff --git a/drivers/gpu/drm/msm/dsi/pll/dsi_pll.h b/drivers/gpu/drm/msm/dsi/pll/dsi_pll.h
index 5a3bb241c039..063caa2c5740 100644
--- a/drivers/gpu/drm/msm/dsi/pll/dsi_pll.h
+++ b/drivers/gpu/drm/msm/dsi/pll/dsi_pll.h
@@ -27,6 +27,7 @@ struct msm_dsi_pll {
struct clk_hw clk_hw;
bool pll_on;
+ bool state_saved;
unsigned long min_rate;
unsigned long max_rate;
@@ -82,8 +83,16 @@ void msm_dsi_pll_helper_unregister_clks(struct platform_device *pdev,
/*
* Initialization for Each PLL Type
*/
+#ifdef CONFIG_DRM_MSM_DSI_28NM_PHY
struct msm_dsi_pll *msm_dsi_pll_28nm_init(struct platform_device *pdev,
enum msm_dsi_phy_type type, int id);
+#else
+static inline struct msm_dsi_pll *msm_dsi_pll_28nm_init(
+ struct platform_device *pdev, enum msm_dsi_phy_type type, int id)
+{
+ return ERR_PTR(-ENODEV);
+}
+#endif
#endif /* __DSI_PLL_H__ */
diff --git a/drivers/gpu/drm/msm/dsi/pll/dsi_pll_28nm.c b/drivers/gpu/drm/msm/dsi/pll/dsi_pll_28nm.c
index eb8ac3097ff5..1912cfcca48c 100644
--- a/drivers/gpu/drm/msm/dsi/pll/dsi_pll_28nm.c
+++ b/drivers/gpu/drm/msm/dsi/pll/dsi_pll_28nm.c
@@ -465,26 +465,21 @@ static int dsi_pll_28nm_restore_state(struct msm_dsi_pll *pll)
void __iomem *base = pll_28nm->mmio;
int ret;
- if ((cached_state->vco_rate != 0) &&
- (cached_state->vco_rate == __clk_get_rate(pll->clk_hw.clk))) {
- ret = dsi_pll_28nm_clk_set_rate(&pll->clk_hw,
- cached_state->vco_rate, 0);
- if (ret) {
- dev_err(&pll_28nm->pdev->dev,
- "restore vco rate failed. ret=%d\n", ret);
- return ret;
- }
-
- pll_write(base + REG_DSI_28nm_PHY_PLL_POSTDIV3_CFG,
- cached_state->postdiv3);
- pll_write(base + REG_DSI_28nm_PHY_PLL_POSTDIV1_CFG,
- cached_state->postdiv1);
- pll_write(base + REG_DSI_28nm_PHY_PLL_VREG_CFG,
- cached_state->byte_mux);
-
- cached_state->vco_rate = 0;
+ ret = dsi_pll_28nm_clk_set_rate(&pll->clk_hw,
+ cached_state->vco_rate, 0);
+ if (ret) {
+ dev_err(&pll_28nm->pdev->dev,
+ "restore vco rate failed. ret=%d\n", ret);
+ return ret;
}
+ pll_write(base + REG_DSI_28nm_PHY_PLL_POSTDIV3_CFG,
+ cached_state->postdiv3);
+ pll_write(base + REG_DSI_28nm_PHY_PLL_POSTDIV1_CFG,
+ cached_state->postdiv1);
+ pll_write(base + REG_DSI_28nm_PHY_PLL_VREG_CFG,
+ cached_state->byte_mux);
+
return 0;
}
diff --git a/drivers/gpu/drm/msm/dsi/sfpb.xml.h b/drivers/gpu/drm/msm/dsi/sfpb.xml.h
index 26f268e2dd3d..06cbddfc914f 100644
--- a/drivers/gpu/drm/msm/dsi/sfpb.xml.h
+++ b/drivers/gpu/drm/msm/dsi/sfpb.xml.h
@@ -8,19 +8,19 @@ http://github.com/freedreno/envytools/
git clone https://github.com/freedreno/envytools.git
The rules-ng-ng source files this header was generated from are:
-- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 676 bytes, from 2014-12-05 15:34:49)
-- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20915 bytes, from 2015-03-24 22:05:22)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 2352 bytes, from 2015-04-12 15:02:42)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 35083 bytes, from 2015-04-12 15:04:03)
-- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 22094 bytes, from 2015-05-12 12:45:23)
-- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2013-08-11 19:26:32)
-- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2014-10-31 16:48:57)
-- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2013-07-05 19:21:12)
-- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 29012 bytes, from 2015-05-12 12:45:23)
-- /home/robclark/src/freedreno/envytools/rnndb/edp/edp.xml ( 10416 bytes, from 2015-05-12 12:45:23)
-
-Copyright (C) 2013 by the following authors:
+- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 676 bytes, from 2015-05-20 20:03:14)
+- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2015-05-20 20:03:07)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20915 bytes, from 2015-05-20 20:03:14)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 2576 bytes, from 2015-07-09 22:10:24)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 36021 bytes, from 2015-07-09 22:10:24)
+- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 26057 bytes, from 2015-08-14 21:47:57)
+- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2015-05-20 20:03:07)
+- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2015-05-20 20:03:14)
+- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2015-05-20 20:03:07)
+- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 29154 bytes, from 2015-08-10 21:25:43)
+- /home/robclark/src/freedreno/envytools/rnndb/edp/edp.xml ( 10416 bytes, from 2015-05-20 20:03:14)
+
+Copyright (C) 2013-2015 by the following authors:
- Rob Clark <robdclark@gmail.com> (robclark)
Permission is hereby granted, free of charge, to any person obtaining
diff --git a/drivers/gpu/drm/msm/edp/edp.xml.h b/drivers/gpu/drm/msm/edp/edp.xml.h
index f9c71dceb5e2..bef1d65fe28c 100644
--- a/drivers/gpu/drm/msm/edp/edp.xml.h
+++ b/drivers/gpu/drm/msm/edp/edp.xml.h
@@ -8,17 +8,17 @@ http://github.com/freedreno/envytools/
git clone https://github.com/freedreno/envytools.git
The rules-ng-ng source files this header was generated from are:
-- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 676 bytes, from 2014-12-05 15:34:49)
-- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20915 bytes, from 2015-03-24 22:05:22)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 2352 bytes, from 2015-04-12 15:02:42)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 35083 bytes, from 2015-04-12 15:04:03)
-- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 22094 bytes, from 2015-05-12 12:45:23)
-- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2013-08-11 19:26:32)
-- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2014-10-31 16:48:57)
-- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2013-07-05 19:21:12)
-- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 29012 bytes, from 2015-05-12 12:45:23)
-- /home/robclark/src/freedreno/envytools/rnndb/edp/edp.xml ( 10416 bytes, from 2015-05-12 12:45:23)
+- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 676 bytes, from 2015-05-20 20:03:14)
+- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2015-05-20 20:03:07)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20915 bytes, from 2015-05-20 20:03:14)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 2576 bytes, from 2015-07-09 22:10:24)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 36021 bytes, from 2015-07-09 22:10:24)
+- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 26057 bytes, from 2015-08-14 21:47:57)
+- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2015-05-20 20:03:07)
+- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2015-05-20 20:03:14)
+- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2015-05-20 20:03:07)
+- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 29154 bytes, from 2015-08-10 21:25:43)
+- /home/robclark/src/freedreno/envytools/rnndb/edp/edp.xml ( 10416 bytes, from 2015-05-20 20:03:14)
Copyright (C) 2013-2015 by the following authors:
- Rob Clark <robdclark@gmail.com> (robclark)
diff --git a/drivers/gpu/drm/msm/edp/edp_ctrl.c b/drivers/gpu/drm/msm/edp/edp_ctrl.c
index 7991069dd492..81200e9be382 100644
--- a/drivers/gpu/drm/msm/edp/edp_ctrl.c
+++ b/drivers/gpu/drm/msm/edp/edp_ctrl.c
@@ -373,7 +373,7 @@ static int edp_gpio_config(struct edp_ctrl *ctrl)
struct device *dev = &ctrl->pdev->dev;
int ret;
- ctrl->panel_hpd_gpio = devm_gpiod_get(dev, "panel-hpd");
+ ctrl->panel_hpd_gpio = devm_gpiod_get(dev, "panel-hpd", GPIOD_IN);
if (IS_ERR(ctrl->panel_hpd_gpio)) {
ret = PTR_ERR(ctrl->panel_hpd_gpio);
ctrl->panel_hpd_gpio = NULL;
@@ -381,13 +381,7 @@ static int edp_gpio_config(struct edp_ctrl *ctrl)
return ret;
}
- ret = gpiod_direction_input(ctrl->panel_hpd_gpio);
- if (ret) {
- pr_err("%s: Set direction for hpd failed, %d\n", __func__, ret);
- return ret;
- }
-
- ctrl->panel_en_gpio = devm_gpiod_get(dev, "panel-en");
+ ctrl->panel_en_gpio = devm_gpiod_get(dev, "panel-en", GPIOD_OUT_LOW);
if (IS_ERR(ctrl->panel_en_gpio)) {
ret = PTR_ERR(ctrl->panel_en_gpio);
ctrl->panel_en_gpio = NULL;
@@ -395,13 +389,6 @@ static int edp_gpio_config(struct edp_ctrl *ctrl)
return ret;
}
- ret = gpiod_direction_output(ctrl->panel_en_gpio, 0);
- if (ret) {
- pr_err("%s: Set direction for panel_en failed, %d\n",
- __func__, ret);
- return ret;
- }
-
DBG("gpio on");
return 0;
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi.c b/drivers/gpu/drm/msm/hdmi/hdmi.c
index 814536202efe..101b324cdeef 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi.c
@@ -22,7 +22,9 @@
void hdmi_set_mode(struct hdmi *hdmi, bool power_on)
{
uint32_t ctrl = 0;
+ unsigned long flags;
+ spin_lock_irqsave(&hdmi->reg_lock, flags);
if (power_on) {
ctrl |= HDMI_CTRL_ENABLE;
if (!hdmi->hdmi_mode) {
@@ -37,6 +39,7 @@ void hdmi_set_mode(struct hdmi *hdmi, bool power_on)
}
hdmi_write(hdmi, REG_HDMI_CTRL, ctrl);
+ spin_unlock_irqrestore(&hdmi->reg_lock, flags);
DBG("HDMI Core: %s, HDMI_CTRL=0x%08x",
power_on ? "Enable" : "Disable", ctrl);
}
@@ -51,6 +54,10 @@ static irqreturn_t hdmi_irq(int irq, void *dev_id)
/* Process DDC: */
hdmi_i2c_irq(hdmi->i2c);
+ /* Process HDCP: */
+ if (hdmi->hdcp_ctrl)
+ hdmi_hdcp_irq(hdmi->hdcp_ctrl);
+
/* TODO audio.. */
return IRQ_HANDLED;
@@ -60,6 +67,15 @@ static void hdmi_destroy(struct hdmi *hdmi)
{
struct hdmi_phy *phy = hdmi->phy;
+ /*
+ * at this point, hpd has been disabled,
+ * after flush workq, it's safe to deinit hdcp
+ */
+ if (hdmi->workq) {
+ flush_workqueue(hdmi->workq);
+ destroy_workqueue(hdmi->workq);
+ }
+ hdmi_hdcp_destroy(hdmi);
if (phy)
phy->funcs->destroy(phy);
@@ -77,6 +93,7 @@ static struct hdmi *hdmi_init(struct platform_device *pdev)
{
struct hdmi_platform_config *config = pdev->dev.platform_data;
struct hdmi *hdmi = NULL;
+ struct resource *res;
int i, ret;
hdmi = devm_kzalloc(&pdev->dev, sizeof(*hdmi), GFP_KERNEL);
@@ -87,18 +104,18 @@ static struct hdmi *hdmi_init(struct platform_device *pdev)
hdmi->pdev = pdev;
hdmi->config = config;
+ spin_lock_init(&hdmi->reg_lock);
/* not sure about which phy maps to which msm.. probably I miss some */
- if (config->phy_init)
+ if (config->phy_init) {
hdmi->phy = config->phy_init(hdmi);
- else
- hdmi->phy = ERR_PTR(-ENXIO);
- if (IS_ERR(hdmi->phy)) {
- ret = PTR_ERR(hdmi->phy);
- dev_err(&pdev->dev, "failed to load phy: %d\n", ret);
- hdmi->phy = NULL;
- goto fail;
+ if (IS_ERR(hdmi->phy)) {
+ ret = PTR_ERR(hdmi->phy);
+ dev_err(&pdev->dev, "failed to load phy: %d\n", ret);
+ hdmi->phy = NULL;
+ goto fail;
+ }
}
hdmi->mmio = msm_ioremap(pdev, config->mmio_name, "HDMI");
@@ -107,6 +124,18 @@ static struct hdmi *hdmi_init(struct platform_device *pdev)
goto fail;
}
+ /* HDCP needs physical address of hdmi register */
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ config->mmio_name);
+ hdmi->mmio_phy_addr = res->start;
+
+ hdmi->qfprom_mmio = msm_ioremap(pdev,
+ config->qfprom_mmio_name, "HDMI_QFPROM");
+ if (IS_ERR(hdmi->qfprom_mmio)) {
+ dev_info(&pdev->dev, "can't find qfprom resource\n");
+ hdmi->qfprom_mmio = NULL;
+ }
+
hdmi->hpd_regs = devm_kzalloc(&pdev->dev, sizeof(hdmi->hpd_regs[0]) *
config->hpd_reg_cnt, GFP_KERNEL);
if (!hdmi->hpd_regs) {
@@ -189,6 +218,8 @@ static struct hdmi *hdmi_init(struct platform_device *pdev)
hdmi->pwr_clks[i] = clk;
}
+ hdmi->workq = alloc_ordered_workqueue("msm_hdmi", 0);
+
hdmi->i2c = hdmi_i2c_init(hdmi);
if (IS_ERR(hdmi->i2c)) {
ret = PTR_ERR(hdmi->i2c);
@@ -197,6 +228,12 @@ static struct hdmi *hdmi_init(struct platform_device *pdev)
goto fail;
}
+ hdmi->hdcp_ctrl = hdmi_hdcp_init(hdmi);
+ if (IS_ERR(hdmi->hdcp_ctrl)) {
+ dev_warn(&pdev->dev, "failed to init hdcp: disabled\n");
+ hdmi->hdcp_ctrl = NULL;
+ }
+
return hdmi;
fail:
@@ -310,7 +347,7 @@ static const char *pwr_clk_names_8x74[] = {"extp_clk", "alt_iface_clk"};
static const char *hpd_clk_names_8x74[] = {"iface_clk", "core_clk", "mdp_core_clk"};
static unsigned long hpd_clk_freq_8x74[] = {0, 19200000, 0};
-static struct hdmi_platform_config hdmi_tx_8074_config = {
+static struct hdmi_platform_config hdmi_tx_8974_config = {
.phy_init = hdmi_phy_8x74_init,
HDMI_CFG(pwr_reg, 8x74),
HDMI_CFG(hpd_reg, 8x74),
@@ -330,9 +367,21 @@ static struct hdmi_platform_config hdmi_tx_8084_config = {
.hpd_freq = hpd_clk_freq_8x74,
};
+static const char *hpd_reg_names_8x94[] = {};
+
+static struct hdmi_platform_config hdmi_tx_8994_config = {
+ .phy_init = NULL, /* nothing to do for this HDMI PHY 20nm */
+ HDMI_CFG(pwr_reg, 8x74),
+ HDMI_CFG(hpd_reg, 8x94),
+ HDMI_CFG(pwr_clk, 8x74),
+ HDMI_CFG(hpd_clk, 8x74),
+ .hpd_freq = hpd_clk_freq_8x74,
+};
+
static const struct of_device_id dt_match[] = {
+ { .compatible = "qcom,hdmi-tx-8994", .data = &hdmi_tx_8994_config },
{ .compatible = "qcom,hdmi-tx-8084", .data = &hdmi_tx_8084_config },
- { .compatible = "qcom,hdmi-tx-8074", .data = &hdmi_tx_8074_config },
+ { .compatible = "qcom,hdmi-tx-8974", .data = &hdmi_tx_8974_config },
{ .compatible = "qcom,hdmi-tx-8960", .data = &hdmi_tx_8960_config },
{ .compatible = "qcom,hdmi-tx-8660", .data = &hdmi_tx_8660_config },
{}
@@ -347,8 +396,7 @@ static int get_gpio(struct device *dev, struct device_node *of_node, const char
snprintf(name2, sizeof(name2), "%s-gpio", name);
gpio = of_get_named_gpio(of_node, name2, 0);
if (gpio < 0) {
- dev_err(dev, "failed to get gpio: %s (%d)\n",
- name, gpio);
+ DBG("failed to get gpio: %s (%d)", name, gpio);
gpio = -1;
}
}
@@ -376,6 +424,7 @@ static int hdmi_bind(struct device *dev, struct device *master, void *data)
}
hdmi_cfg->mmio_name = "core_physical";
+ hdmi_cfg->qfprom_mmio_name = "qfprom_physical";
hdmi_cfg->ddc_clk_gpio = get_gpio(dev, of_node, "qcom,hdmi-tx-ddc-clk");
hdmi_cfg->ddc_data_gpio = get_gpio(dev, of_node, "qcom,hdmi-tx-ddc-data");
hdmi_cfg->hpd_gpio = get_gpio(dev, of_node, "qcom,hdmi-tx-hpd");
@@ -391,7 +440,6 @@ static int hdmi_bind(struct device *dev, struct device *master, void *data)
if (cpu_is_apq8064()) {
static const char *hpd_reg_names[] = {"8921_hdmi_mvs"};
config.phy_init = hdmi_phy_8960_init;
- config.mmio_name = "hdmi_msm_hdmi_addr";
config.hpd_reg_names = hpd_reg_names;
config.hpd_reg_cnt = ARRAY_SIZE(hpd_reg_names);
config.hpd_clk_names = hpd_clk_names;
@@ -404,7 +452,6 @@ static int hdmi_bind(struct device *dev, struct device *master, void *data)
} else if (cpu_is_msm8960() || cpu_is_msm8960ab()) {
static const char *hpd_reg_names[] = {"8921_hdmi_mvs"};
config.phy_init = hdmi_phy_8960_init;
- config.mmio_name = "hdmi_msm_hdmi_addr";
config.hpd_reg_names = hpd_reg_names;
config.hpd_reg_cnt = ARRAY_SIZE(hpd_reg_names);
config.hpd_clk_names = hpd_clk_names;
@@ -419,7 +466,6 @@ static int hdmi_bind(struct device *dev, struct device *master, void *data)
"8901_hdmi_mvs", "8901_mpp0"
};
config.phy_init = hdmi_phy_8x60_init;
- config.mmio_name = "hdmi_msm_hdmi_addr";
config.hpd_reg_names = hpd_reg_names;
config.hpd_reg_cnt = ARRAY_SIZE(hpd_reg_names);
config.hpd_clk_names = hpd_clk_names;
@@ -430,6 +476,9 @@ static int hdmi_bind(struct device *dev, struct device *master, void *data)
config.mux_en_gpio = -1;
config.mux_sel_gpio = -1;
}
+ config.mmio_name = "hdmi_msm_hdmi_addr";
+ config.qfprom_mmio_name = "hdmi_msm_qfprom_addr";
+
hdmi_cfg = &config;
#endif
dev->platform_data = hdmi_cfg;
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi.h b/drivers/gpu/drm/msm/hdmi/hdmi.h
index 68fdfb3622a5..d0e663192d01 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi.h
+++ b/drivers/gpu/drm/msm/hdmi/hdmi.h
@@ -37,6 +37,8 @@ struct hdmi_audio {
int rate;
};
+struct hdmi_hdcp_ctrl;
+
struct hdmi {
struct drm_device *dev;
struct platform_device *pdev;
@@ -51,6 +53,8 @@ struct hdmi {
unsigned long int pixclock;
void __iomem *mmio;
+ void __iomem *qfprom_mmio;
+ phys_addr_t mmio_phy_addr;
struct regulator **hpd_regs;
struct regulator **pwr_regs;
@@ -68,12 +72,25 @@ struct hdmi {
bool hdmi_mode; /* are we in hdmi mode? */
int irq;
+ struct workqueue_struct *workq;
+
+ struct hdmi_hdcp_ctrl *hdcp_ctrl;
+
+ /*
+ * spinlock to protect registers shared by different execution
+ * REG_HDMI_CTRL
+ * REG_HDMI_DDC_ARBITRATION
+ * REG_HDMI_HDCP_INT_CTRL
+ * REG_HDMI_HPD_CTRL
+ */
+ spinlock_t reg_lock;
};
/* platform config data (ie. from DT, or pdata) */
struct hdmi_platform_config {
struct hdmi_phy *(*phy_init)(struct hdmi *hdmi);
const char *mmio_name;
+ const char *qfprom_mmio_name;
/* regulators that need to be on for hpd: */
const char **hpd_reg_names;
@@ -109,6 +126,11 @@ static inline u32 hdmi_read(struct hdmi *hdmi, u32 reg)
return msm_readl(hdmi->mmio + reg);
}
+static inline u32 hdmi_qfprom_read(struct hdmi *hdmi, u32 reg)
+{
+ return msm_readl(hdmi->qfprom_mmio + reg);
+}
+
/*
* The phy appears to be different, for example between 8960 and 8x60,
* so split the phy related functions out and load the correct one at
@@ -117,7 +139,6 @@ static inline u32 hdmi_read(struct hdmi *hdmi, u32 reg)
struct hdmi_phy_funcs {
void (*destroy)(struct hdmi_phy *phy);
- void (*reset)(struct hdmi_phy *phy);
void (*powerup)(struct hdmi_phy *phy, unsigned long int pixclock);
void (*powerdown)(struct hdmi_phy *phy);
};
@@ -163,4 +184,13 @@ void hdmi_i2c_irq(struct i2c_adapter *i2c);
void hdmi_i2c_destroy(struct i2c_adapter *i2c);
struct i2c_adapter *hdmi_i2c_init(struct hdmi *hdmi);
+/*
+ * hdcp
+ */
+struct hdmi_hdcp_ctrl *hdmi_hdcp_init(struct hdmi *hdmi);
+void hdmi_hdcp_destroy(struct hdmi *hdmi);
+void hdmi_hdcp_on(struct hdmi_hdcp_ctrl *hdcp_ctrl);
+void hdmi_hdcp_off(struct hdmi_hdcp_ctrl *hdcp_ctrl);
+void hdmi_hdcp_irq(struct hdmi_hdcp_ctrl *hdcp_ctrl);
+
#endif /* __HDMI_CONNECTOR_H__ */
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi.xml.h b/drivers/gpu/drm/msm/hdmi/hdmi.xml.h
index e6f034808371..0b1b5586ff35 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi.xml.h
+++ b/drivers/gpu/drm/msm/hdmi/hdmi.xml.h
@@ -8,17 +8,17 @@ http://github.com/freedreno/envytools/
git clone https://github.com/freedreno/envytools.git
The rules-ng-ng source files this header was generated from are:
-- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 676 bytes, from 2014-12-05 15:34:49)
-- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20915 bytes, from 2015-03-24 22:05:22)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 2352 bytes, from 2015-04-12 15:02:42)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 35083 bytes, from 2015-04-12 15:04:03)
-- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 22094 bytes, from 2015-05-12 12:45:23)
-- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2013-08-11 19:26:32)
-- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2014-10-31 16:48:57)
-- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2013-07-05 19:21:12)
-- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 29012 bytes, from 2015-05-12 12:45:23)
-- /home/robclark/src/freedreno/envytools/rnndb/edp/edp.xml ( 10416 bytes, from 2015-05-12 12:45:23)
+- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 676 bytes, from 2015-05-20 20:03:14)
+- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2015-05-20 20:03:07)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20915 bytes, from 2015-05-20 20:03:14)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 2576 bytes, from 2015-07-09 22:10:24)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 36021 bytes, from 2015-07-09 22:10:24)
+- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 26057 bytes, from 2015-08-14 21:47:57)
+- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2015-05-20 20:03:07)
+- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2015-05-20 20:03:14)
+- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2015-05-20 20:03:07)
+- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 29154 bytes, from 2015-08-10 21:25:43)
+- /home/robclark/src/freedreno/envytools/rnndb/edp/edp.xml ( 10416 bytes, from 2015-05-20 20:03:14)
Copyright (C) 2013-2015 by the following authors:
- Rob Clark <robdclark@gmail.com> (robclark)
@@ -441,6 +441,12 @@ static inline uint32_t HDMI_DDC_REF_REFTIMER(uint32_t val)
#define REG_HDMI_HDCP_SW_LOWER_AKSV 0x00000288
+#define REG_HDMI_CEC_CTRL 0x0000028c
+
+#define REG_HDMI_CEC_WR_DATA 0x00000290
+
+#define REG_HDMI_CEC_CEC_RETRANSMIT 0x00000294
+
#define REG_HDMI_CEC_STATUS 0x00000298
#define REG_HDMI_CEC_INT 0x0000029c
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_audio.c b/drivers/gpu/drm/msm/hdmi/hdmi_audio.c
index 872485f60134..df232e20c13e 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi_audio.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi_audio.c
@@ -203,7 +203,6 @@ int hdmi_audio_update(struct hdmi *hdmi)
audio_config |= HDMI_AUDIO_CFG_FIFO_WATERMARK(4);
audio_config |= HDMI_AUDIO_CFG_ENGINE_ENABLE;
} else {
- hdmi_write(hdmi, REG_HDMI_GC, HDMI_GC_MUTE);
acr_pkt_ctrl &= ~HDMI_ACR_PKT_CTRL_CONT;
acr_pkt_ctrl &= ~HDMI_ACR_PKT_CTRL_SEND;
vbi_pkt_ctrl &= ~HDMI_VBI_PKT_CTRL_GC_ENABLE;
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_bridge.c b/drivers/gpu/drm/msm/hdmi/hdmi_bridge.c
index a7a1d8267cf0..92b69ae8caf9 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi_bridge.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi_bridge.c
@@ -100,8 +100,13 @@ static void hdmi_bridge_pre_enable(struct drm_bridge *bridge)
hdmi_audio_update(hdmi);
}
- phy->funcs->powerup(phy, hdmi->pixclock);
+ if (phy)
+ phy->funcs->powerup(phy, hdmi->pixclock);
+
hdmi_set_mode(hdmi, true);
+
+ if (hdmi->hdcp_ctrl)
+ hdmi_hdcp_on(hdmi->hdcp_ctrl);
}
static void hdmi_bridge_enable(struct drm_bridge *bridge)
@@ -118,9 +123,14 @@ static void hdmi_bridge_post_disable(struct drm_bridge *bridge)
struct hdmi *hdmi = hdmi_bridge->hdmi;
struct hdmi_phy *phy = hdmi->phy;
+ if (hdmi->hdcp_ctrl)
+ hdmi_hdcp_off(hdmi->hdcp_ctrl);
+
DBG("power down");
hdmi_set_mode(hdmi, false);
- phy->funcs->powerdown(phy);
+
+ if (phy)
+ phy->funcs->powerdown(phy);
if (hdmi->power_on) {
power_off(bridge);
@@ -142,8 +152,6 @@ static void hdmi_bridge_mode_set(struct drm_bridge *bridge,
hdmi->pixclock = mode->clock * 1000;
- hdmi->hdmi_mode = drm_match_cea_mode(mode) > 1;
-
hstart = mode->htotal - mode->hsync_start;
hend = mode->htotal - mode->hsync_start + mode->hdisplay;
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_connector.c b/drivers/gpu/drm/msm/hdmi/hdmi_connector.c
index 54aa93ff5473..a3b05ae52dae 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi_connector.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi_connector.c
@@ -28,6 +28,55 @@ struct hdmi_connector {
};
#define to_hdmi_connector(x) container_of(x, struct hdmi_connector, base)
+static void hdmi_phy_reset(struct hdmi *hdmi)
+{
+ unsigned int val;
+
+ val = hdmi_read(hdmi, REG_HDMI_PHY_CTRL);
+
+ if (val & HDMI_PHY_CTRL_SW_RESET_LOW) {
+ /* pull low */
+ hdmi_write(hdmi, REG_HDMI_PHY_CTRL,
+ val & ~HDMI_PHY_CTRL_SW_RESET);
+ } else {
+ /* pull high */
+ hdmi_write(hdmi, REG_HDMI_PHY_CTRL,
+ val | HDMI_PHY_CTRL_SW_RESET);
+ }
+
+ if (val & HDMI_PHY_CTRL_SW_RESET_PLL_LOW) {
+ /* pull low */
+ hdmi_write(hdmi, REG_HDMI_PHY_CTRL,
+ val & ~HDMI_PHY_CTRL_SW_RESET_PLL);
+ } else {
+ /* pull high */
+ hdmi_write(hdmi, REG_HDMI_PHY_CTRL,
+ val | HDMI_PHY_CTRL_SW_RESET_PLL);
+ }
+
+ msleep(100);
+
+ if (val & HDMI_PHY_CTRL_SW_RESET_LOW) {
+ /* pull high */
+ hdmi_write(hdmi, REG_HDMI_PHY_CTRL,
+ val | HDMI_PHY_CTRL_SW_RESET);
+ } else {
+ /* pull low */
+ hdmi_write(hdmi, REG_HDMI_PHY_CTRL,
+ val & ~HDMI_PHY_CTRL_SW_RESET);
+ }
+
+ if (val & HDMI_PHY_CTRL_SW_RESET_PLL_LOW) {
+ /* pull high */
+ hdmi_write(hdmi, REG_HDMI_PHY_CTRL,
+ val | HDMI_PHY_CTRL_SW_RESET_PLL);
+ } else {
+ /* pull low */
+ hdmi_write(hdmi, REG_HDMI_PHY_CTRL,
+ val & ~HDMI_PHY_CTRL_SW_RESET_PLL);
+ }
+}
+
static int gpio_config(struct hdmi *hdmi, bool on)
{
struct device *dev = &hdmi->pdev->dev;
@@ -35,21 +84,25 @@ static int gpio_config(struct hdmi *hdmi, bool on)
int ret;
if (on) {
- ret = gpio_request(config->ddc_clk_gpio, "HDMI_DDC_CLK");
- if (ret) {
- dev_err(dev, "'%s'(%d) gpio_request failed: %d\n",
- "HDMI_DDC_CLK", config->ddc_clk_gpio, ret);
- goto error1;
+ if (config->ddc_clk_gpio != -1) {
+ ret = gpio_request(config->ddc_clk_gpio, "HDMI_DDC_CLK");
+ if (ret) {
+ dev_err(dev, "'%s'(%d) gpio_request failed: %d\n",
+ "HDMI_DDC_CLK", config->ddc_clk_gpio, ret);
+ goto error1;
+ }
+ gpio_set_value_cansleep(config->ddc_clk_gpio, 1);
}
- gpio_set_value_cansleep(config->ddc_clk_gpio, 1);
- ret = gpio_request(config->ddc_data_gpio, "HDMI_DDC_DATA");
- if (ret) {
- dev_err(dev, "'%s'(%d) gpio_request failed: %d\n",
- "HDMI_DDC_DATA", config->ddc_data_gpio, ret);
- goto error2;
+ if (config->ddc_data_gpio != -1) {
+ ret = gpio_request(config->ddc_data_gpio, "HDMI_DDC_DATA");
+ if (ret) {
+ dev_err(dev, "'%s'(%d) gpio_request failed: %d\n",
+ "HDMI_DDC_DATA", config->ddc_data_gpio, ret);
+ goto error2;
+ }
+ gpio_set_value_cansleep(config->ddc_data_gpio, 1);
}
- gpio_set_value_cansleep(config->ddc_data_gpio, 1);
ret = gpio_request(config->hpd_gpio, "HDMI_HPD");
if (ret) {
@@ -94,8 +147,12 @@ static int gpio_config(struct hdmi *hdmi, bool on)
}
DBG("gpio on");
} else {
- gpio_free(config->ddc_clk_gpio);
- gpio_free(config->ddc_data_gpio);
+ if (config->ddc_clk_gpio != -1)
+ gpio_free(config->ddc_clk_gpio);
+
+ if (config->ddc_data_gpio != -1)
+ gpio_free(config->ddc_data_gpio);
+
gpio_free(config->hpd_gpio);
if (config->mux_en_gpio != -1) {
@@ -126,9 +183,11 @@ error5:
error4:
gpio_free(config->hpd_gpio);
error3:
- gpio_free(config->ddc_data_gpio);
+ if (config->ddc_data_gpio != -1)
+ gpio_free(config->ddc_data_gpio);
error2:
- gpio_free(config->ddc_clk_gpio);
+ if (config->ddc_clk_gpio != -1)
+ gpio_free(config->ddc_clk_gpio);
error1:
return ret;
}
@@ -138,9 +197,9 @@ static int hpd_enable(struct hdmi_connector *hdmi_connector)
struct hdmi *hdmi = hdmi_connector->hdmi;
const struct hdmi_platform_config *config = hdmi->config;
struct device *dev = &hdmi->pdev->dev;
- struct hdmi_phy *phy = hdmi->phy;
uint32_t hpd_ctrl;
int i, ret;
+ unsigned long flags;
for (i = 0; i < config->hpd_reg_cnt; i++) {
ret = regulator_enable(hdmi->hpd_regs[i]);
@@ -181,7 +240,7 @@ static int hpd_enable(struct hdmi_connector *hdmi_connector)
}
hdmi_set_mode(hdmi, false);
- phy->funcs->reset(phy);
+ hdmi_phy_reset(hdmi);
hdmi_set_mode(hdmi, true);
hdmi_write(hdmi, REG_HDMI_USEC_REFTIMER, 0x0001001b);
@@ -192,6 +251,7 @@ static int hpd_enable(struct hdmi_connector *hdmi_connector)
HDMI_HPD_INT_CTRL_INT_EN);
/* set timeout to 4.1ms (max) for hardware debounce */
+ spin_lock_irqsave(&hdmi->reg_lock, flags);
hpd_ctrl = hdmi_read(hdmi, REG_HDMI_HPD_CTRL);
hpd_ctrl |= HDMI_HPD_CTRL_TIMEOUT(0x1fff);
@@ -200,6 +260,7 @@ static int hpd_enable(struct hdmi_connector *hdmi_connector)
~HDMI_HPD_CTRL_ENABLE & hpd_ctrl);
hdmi_write(hdmi, REG_HDMI_HPD_CTRL,
HDMI_HPD_CTRL_ENABLE | hpd_ctrl);
+ spin_unlock_irqrestore(&hdmi->reg_lock, flags);
return 0;
@@ -250,7 +311,6 @@ hotplug_work(struct work_struct *work)
void hdmi_connector_irq(struct drm_connector *connector)
{
struct hdmi_connector *hdmi_connector = to_hdmi_connector(connector);
- struct msm_drm_private *priv = connector->dev->dev_private;
struct hdmi *hdmi = hdmi_connector->hdmi;
uint32_t hpd_int_status, hpd_int_ctrl;
@@ -274,7 +334,7 @@ void hdmi_connector_irq(struct drm_connector *connector)
hpd_int_ctrl |= HDMI_HPD_INT_CTRL_INT_CONNECT;
hdmi_write(hdmi, REG_HDMI_HPD_INT_CTRL, hpd_int_ctrl);
- queue_work(priv->wq, &hdmi_connector->hpd_work);
+ queue_work(hdmi->workq, &hdmi_connector->hpd_work);
}
}
@@ -350,6 +410,7 @@ static int hdmi_connector_get_modes(struct drm_connector *connector)
hdmi_write(hdmi, REG_HDMI_CTRL, hdmi_ctrl);
+ hdmi->hdmi_mode = drm_detect_hdmi_monitor(edid);
drm_mode_connector_update_edid_property(connector, edid);
if (edid) {
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_hdcp.c b/drivers/gpu/drm/msm/hdmi/hdmi_hdcp.c
new file mode 100644
index 000000000000..1dc9c34eb0df
--- /dev/null
+++ b/drivers/gpu/drm/msm/hdmi/hdmi_hdcp.c
@@ -0,0 +1,1437 @@
+/* Copyright (c) 2010-2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include "hdmi.h"
+#include <linux/qcom_scm.h>
+
+#define HDCP_REG_ENABLE 0x01
+#define HDCP_REG_DISABLE 0x00
+#define HDCP_PORT_ADDR 0x74
+
+#define HDCP_INT_STATUS_MASK ( \
+ HDMI_HDCP_INT_CTRL_AUTH_SUCCESS_INT | \
+ HDMI_HDCP_INT_CTRL_AUTH_FAIL_INT | \
+ HDMI_HDCP_INT_CTRL_AUTH_XFER_REQ_INT | \
+ HDMI_HDCP_INT_CTRL_AUTH_XFER_DONE_INT)
+
+#define AUTH_WORK_RETRIES_TIME 100
+#define AUTH_RETRIES_TIME 30
+
+/* QFPROM Registers for HDMI/HDCP */
+#define QFPROM_RAW_FEAT_CONFIG_ROW0_LSB 0x000000F8
+#define QFPROM_RAW_FEAT_CONFIG_ROW0_MSB 0x000000FC
+#define HDCP_KSV_LSB 0x000060D8
+#define HDCP_KSV_MSB 0x000060DC
+
+enum DS_TYPE { /* type of downstream device */
+ DS_UNKNOWN,
+ DS_RECEIVER,
+ DS_REPEATER,
+};
+
+enum hdmi_hdcp_state {
+ HDCP_STATE_NO_AKSV,
+ HDCP_STATE_INACTIVE,
+ HDCP_STATE_AUTHENTICATING,
+ HDCP_STATE_AUTHENTICATED,
+ HDCP_STATE_AUTH_FAILED
+};
+
+struct hdmi_hdcp_reg_data {
+ u32 reg_id;
+ u32 off;
+ char *name;
+ u32 reg_val;
+};
+
+struct hdmi_hdcp_ctrl {
+ struct hdmi *hdmi;
+ u32 auth_retries;
+ bool tz_hdcp;
+ enum hdmi_hdcp_state hdcp_state;
+ struct work_struct hdcp_auth_work;
+ struct work_struct hdcp_reauth_work;
+
+#define AUTH_ABORT_EV 1
+#define AUTH_RESULT_RDY_EV 2
+ unsigned long auth_event;
+ wait_queue_head_t auth_event_queue;
+
+ u32 ksv_fifo_w_index;
+ /*
+ * store aksv from qfprom
+ */
+ u32 aksv_lsb;
+ u32 aksv_msb;
+ bool aksv_valid;
+ u32 ds_type;
+ u32 bksv_lsb;
+ u32 bksv_msb;
+ u8 dev_count;
+ u8 depth;
+ u8 ksv_list[5 * 127];
+ bool max_cascade_exceeded;
+ bool max_dev_exceeded;
+};
+
+static int hdmi_ddc_read(struct hdmi *hdmi, u16 addr, u8 offset,
+ u8 *data, u16 data_len)
+{
+ int rc;
+ int retry = 5;
+ struct i2c_msg msgs[] = {
+ {
+ .addr = addr >> 1,
+ .flags = 0,
+ .len = 1,
+ .buf = &offset,
+ }, {
+ .addr = addr >> 1,
+ .flags = I2C_M_RD,
+ .len = data_len,
+ .buf = data,
+ }
+ };
+
+ DBG("Start DDC read");
+retry:
+ rc = i2c_transfer(hdmi->i2c, msgs, 2);
+
+ retry--;
+ if (rc == 2)
+ rc = 0;
+ else if (retry > 0)
+ goto retry;
+ else
+ rc = -EIO;
+
+ DBG("End DDC read %d", rc);
+
+ return rc;
+}
+
+#define HDCP_DDC_WRITE_MAX_BYTE_NUM 32
+
+static int hdmi_ddc_write(struct hdmi *hdmi, u16 addr, u8 offset,
+ u8 *data, u16 data_len)
+{
+ int rc;
+ int retry = 10;
+ u8 buf[HDCP_DDC_WRITE_MAX_BYTE_NUM];
+ struct i2c_msg msgs[] = {
+ {
+ .addr = addr >> 1,
+ .flags = 0,
+ .len = 1,
+ }
+ };
+
+ DBG("Start DDC write");
+ if (data_len > (HDCP_DDC_WRITE_MAX_BYTE_NUM - 1)) {
+ pr_err("%s: write size too big\n", __func__);
+ return -ERANGE;
+ }
+
+ buf[0] = offset;
+ memcpy(&buf[1], data, data_len);
+ msgs[0].buf = buf;
+ msgs[0].len = data_len + 1;
+retry:
+ rc = i2c_transfer(hdmi->i2c, msgs, 1);
+
+ retry--;
+ if (rc == 1)
+ rc = 0;
+ else if (retry > 0)
+ goto retry;
+ else
+ rc = -EIO;
+
+ DBG("End DDC write %d", rc);
+
+ return rc;
+}
+
+static int hdmi_hdcp_scm_wr(struct hdmi_hdcp_ctrl *hdcp_ctrl, u32 *preg,
+ u32 *pdata, u32 count)
+{
+ struct hdmi *hdmi = hdcp_ctrl->hdmi;
+ struct qcom_scm_hdcp_req scm_buf[QCOM_SCM_HDCP_MAX_REQ_CNT];
+ u32 resp, phy_addr, idx = 0;
+ int i, ret = 0;
+
+ WARN_ON(!pdata || !preg || (count == 0));
+
+ if (hdcp_ctrl->tz_hdcp) {
+ phy_addr = (u32)hdmi->mmio_phy_addr;
+
+ while (count) {
+ memset(scm_buf, 0, sizeof(scm_buf));
+ for (i = 0; i < count && i < QCOM_SCM_HDCP_MAX_REQ_CNT;
+ i++) {
+ scm_buf[i].addr = phy_addr + preg[idx];
+ scm_buf[i].val = pdata[idx];
+ idx++;
+ }
+ ret = qcom_scm_hdcp_req(scm_buf, i, &resp);
+
+ if (ret || resp) {
+ pr_err("%s: error: scm_call ret=%d resp=%u\n",
+ __func__, ret, resp);
+ ret = -EINVAL;
+ break;
+ }
+
+ count -= i;
+ }
+ } else {
+ for (i = 0; i < count; i++)
+ hdmi_write(hdmi, preg[i], pdata[i]);
+ }
+
+ return ret;
+}
+
+void hdmi_hdcp_irq(struct hdmi_hdcp_ctrl *hdcp_ctrl)
+{
+ struct hdmi *hdmi = hdcp_ctrl->hdmi;
+ u32 reg_val, hdcp_int_status;
+ unsigned long flags;
+
+ spin_lock_irqsave(&hdmi->reg_lock, flags);
+ reg_val = hdmi_read(hdmi, REG_HDMI_HDCP_INT_CTRL);
+ hdcp_int_status = reg_val & HDCP_INT_STATUS_MASK;
+ if (!hdcp_int_status) {
+ spin_unlock_irqrestore(&hdmi->reg_lock, flags);
+ return;
+ }
+ /* Clear Interrupts */
+ reg_val |= hdcp_int_status << 1;
+ /* Clear AUTH_FAIL_INFO as well */
+ if (hdcp_int_status & HDMI_HDCP_INT_CTRL_AUTH_FAIL_INT)
+ reg_val |= HDMI_HDCP_INT_CTRL_AUTH_FAIL_INFO_ACK;
+ hdmi_write(hdmi, REG_HDMI_HDCP_INT_CTRL, reg_val);
+ spin_unlock_irqrestore(&hdmi->reg_lock, flags);
+
+ DBG("hdcp irq %x", hdcp_int_status);
+
+ if (hdcp_int_status & HDMI_HDCP_INT_CTRL_AUTH_SUCCESS_INT) {
+ pr_info("%s:AUTH_SUCCESS_INT received\n", __func__);
+ if (HDCP_STATE_AUTHENTICATING == hdcp_ctrl->hdcp_state) {
+ set_bit(AUTH_RESULT_RDY_EV, &hdcp_ctrl->auth_event);
+ wake_up_all(&hdcp_ctrl->auth_event_queue);
+ }
+ }
+
+ if (hdcp_int_status & HDMI_HDCP_INT_CTRL_AUTH_FAIL_INT) {
+ reg_val = hdmi_read(hdmi, REG_HDMI_HDCP_LINK0_STATUS);
+ pr_info("%s: AUTH_FAIL_INT rcvd, LINK0_STATUS=0x%08x\n",
+ __func__, reg_val);
+ if (HDCP_STATE_AUTHENTICATED == hdcp_ctrl->hdcp_state)
+ queue_work(hdmi->workq, &hdcp_ctrl->hdcp_reauth_work);
+ else if (HDCP_STATE_AUTHENTICATING ==
+ hdcp_ctrl->hdcp_state) {
+ set_bit(AUTH_RESULT_RDY_EV, &hdcp_ctrl->auth_event);
+ wake_up_all(&hdcp_ctrl->auth_event_queue);
+ }
+ }
+}
+
+static int hdmi_hdcp_msleep(struct hdmi_hdcp_ctrl *hdcp_ctrl, u32 ms, u32 ev)
+{
+ int rc;
+
+ rc = wait_event_timeout(hdcp_ctrl->auth_event_queue,
+ !!test_bit(ev, &hdcp_ctrl->auth_event),
+ msecs_to_jiffies(ms));
+ if (rc) {
+ pr_info("%s: msleep is canceled by event %d\n",
+ __func__, ev);
+ clear_bit(ev, &hdcp_ctrl->auth_event);
+ return -ECANCELED;
+ }
+
+ return 0;
+}
+
+static int hdmi_hdcp_read_validate_aksv(struct hdmi_hdcp_ctrl *hdcp_ctrl)
+{
+ struct hdmi *hdmi = hdcp_ctrl->hdmi;
+
+ /* Fetch aksv from QFPROM, this info should be public. */
+ hdcp_ctrl->aksv_lsb = hdmi_qfprom_read(hdmi, HDCP_KSV_LSB);
+ hdcp_ctrl->aksv_msb = hdmi_qfprom_read(hdmi, HDCP_KSV_MSB);
+
+ /* check there are 20 ones in AKSV */
+ if ((hweight32(hdcp_ctrl->aksv_lsb) + hweight32(hdcp_ctrl->aksv_msb))
+ != 20) {
+ pr_err("%s: AKSV QFPROM doesn't have 20 1's, 20 0's\n",
+ __func__);
+ pr_err("%s: QFPROM AKSV chk failed (AKSV=%02x%08x)\n",
+ __func__, hdcp_ctrl->aksv_msb,
+ hdcp_ctrl->aksv_lsb);
+ return -EINVAL;
+ }
+ DBG("AKSV=%02x%08x", hdcp_ctrl->aksv_msb, hdcp_ctrl->aksv_lsb);
+
+ return 0;
+}
+
+static int reset_hdcp_ddc_failures(struct hdmi_hdcp_ctrl *hdcp_ctrl)
+{
+ struct hdmi *hdmi = hdcp_ctrl->hdmi;
+ u32 reg_val, failure, nack0;
+ int rc = 0;
+
+ /* Check for any DDC transfer failures */
+ reg_val = hdmi_read(hdmi, REG_HDMI_HDCP_DDC_STATUS);
+ failure = reg_val & HDMI_HDCP_DDC_STATUS_FAILED;
+ nack0 = reg_val & HDMI_HDCP_DDC_STATUS_NACK0;
+ DBG("HDCP_DDC_STATUS=0x%x, FAIL=%d, NACK0=%d",
+ reg_val, failure, nack0);
+
+ if (failure) {
+ /*
+ * Indicates that the last HDCP HW DDC transfer failed.
+ * This occurs when a transfer is attempted with HDCP DDC
+ * disabled (HDCP_DDC_DISABLE=1) or the number of retries
+ * matches HDCP_DDC_RETRY_CNT.
+ * Failure occurred, let's clear it.
+ */
+ DBG("DDC failure detected");
+
+ /* First, Disable DDC */
+ hdmi_write(hdmi, REG_HDMI_HDCP_DDC_CTRL_0,
+ HDMI_HDCP_DDC_CTRL_0_DISABLE);
+
+ /* ACK the Failure to Clear it */
+ reg_val = hdmi_read(hdmi, REG_HDMI_HDCP_DDC_CTRL_1);
+ reg_val |= HDMI_HDCP_DDC_CTRL_1_FAILED_ACK;
+ hdmi_write(hdmi, REG_HDMI_HDCP_DDC_CTRL_1, reg_val);
+
+ /* Check if the FAILURE got Cleared */
+ reg_val = hdmi_read(hdmi, REG_HDMI_HDCP_DDC_STATUS);
+ if (reg_val & HDMI_HDCP_DDC_STATUS_FAILED)
+ pr_info("%s: Unable to clear HDCP DDC Failure\n",
+ __func__);
+
+ /* Re-Enable HDCP DDC */
+ hdmi_write(hdmi, REG_HDMI_HDCP_DDC_CTRL_0, 0);
+ }
+
+ if (nack0) {
+ DBG("Before: HDMI_DDC_SW_STATUS=0x%08x",
+ hdmi_read(hdmi, REG_HDMI_DDC_SW_STATUS));
+ /* Reset HDMI DDC software status */
+ reg_val = hdmi_read(hdmi, REG_HDMI_DDC_CTRL);
+ reg_val |= HDMI_DDC_CTRL_SW_STATUS_RESET;
+ hdmi_write(hdmi, REG_HDMI_DDC_CTRL, reg_val);
+
+ rc = hdmi_hdcp_msleep(hdcp_ctrl, 20, AUTH_ABORT_EV);
+
+ reg_val = hdmi_read(hdmi, REG_HDMI_DDC_CTRL);
+ reg_val &= ~HDMI_DDC_CTRL_SW_STATUS_RESET;
+ hdmi_write(hdmi, REG_HDMI_DDC_CTRL, reg_val);
+
+ /* Reset HDMI DDC Controller */
+ reg_val = hdmi_read(hdmi, REG_HDMI_DDC_CTRL);
+ reg_val |= HDMI_DDC_CTRL_SOFT_RESET;
+ hdmi_write(hdmi, REG_HDMI_DDC_CTRL, reg_val);
+
+ /* If previous msleep is aborted, skip this msleep */
+ if (!rc)
+ rc = hdmi_hdcp_msleep(hdcp_ctrl, 20, AUTH_ABORT_EV);
+
+ reg_val = hdmi_read(hdmi, REG_HDMI_DDC_CTRL);
+ reg_val &= ~HDMI_DDC_CTRL_SOFT_RESET;
+ hdmi_write(hdmi, REG_HDMI_DDC_CTRL, reg_val);
+ DBG("After: HDMI_DDC_SW_STATUS=0x%08x",
+ hdmi_read(hdmi, REG_HDMI_DDC_SW_STATUS));
+ }
+
+ return rc;
+}
+
+static int hdmi_hdcp_hw_ddc_clean(struct hdmi_hdcp_ctrl *hdcp_ctrl)
+{
+ int rc;
+ u32 hdcp_ddc_status, ddc_hw_status;
+ u32 xfer_done, xfer_req, hw_done;
+ bool hw_not_ready;
+ u32 timeout_count;
+ struct hdmi *hdmi = hdcp_ctrl->hdmi;
+
+ if (hdmi_read(hdmi, REG_HDMI_DDC_HW_STATUS) == 0)
+ return 0;
+
+ /* Wait to be clean on DDC HW engine */
+ timeout_count = 100;
+ do {
+ hdcp_ddc_status = hdmi_read(hdmi, REG_HDMI_HDCP_DDC_STATUS);
+ ddc_hw_status = hdmi_read(hdmi, REG_HDMI_DDC_HW_STATUS);
+
+ xfer_done = hdcp_ddc_status & HDMI_HDCP_DDC_STATUS_XFER_DONE;
+ xfer_req = hdcp_ddc_status & HDMI_HDCP_DDC_STATUS_XFER_REQ;
+ hw_done = ddc_hw_status & HDMI_DDC_HW_STATUS_DONE;
+ hw_not_ready = !xfer_done || xfer_req || !hw_done;
+
+ if (hw_not_ready)
+ break;
+
+ timeout_count--;
+ if (!timeout_count) {
+ pr_warn("%s: hw_ddc_clean failed\n", __func__);
+ return -ETIMEDOUT;
+ }
+
+ rc = hdmi_hdcp_msleep(hdcp_ctrl, 20, AUTH_ABORT_EV);
+ if (rc)
+ return rc;
+ } while (1);
+
+ return 0;
+}
+
+static void hdmi_hdcp_reauth_work(struct work_struct *work)
+{
+ struct hdmi_hdcp_ctrl *hdcp_ctrl = container_of(work,
+ struct hdmi_hdcp_ctrl, hdcp_reauth_work);
+ struct hdmi *hdmi = hdcp_ctrl->hdmi;
+ unsigned long flags;
+ u32 reg_val;
+
+ DBG("HDCP REAUTH WORK");
+ /*
+ * Disable HPD circuitry.
+ * This is needed to reset the HDCP cipher engine so that when we
+ * attempt a re-authentication, HW would clear the AN0_READY and
+ * AN1_READY bits in HDMI_HDCP_LINK0_STATUS register
+ */
+ spin_lock_irqsave(&hdmi->reg_lock, flags);
+ reg_val = hdmi_read(hdmi, REG_HDMI_HPD_CTRL);
+ reg_val &= ~HDMI_HPD_CTRL_ENABLE;
+ hdmi_write(hdmi, REG_HDMI_HPD_CTRL, reg_val);
+
+ /* Disable HDCP interrupts */
+ hdmi_write(hdmi, REG_HDMI_HDCP_INT_CTRL, 0);
+ spin_unlock_irqrestore(&hdmi->reg_lock, flags);
+
+ hdmi_write(hdmi, REG_HDMI_HDCP_RESET,
+ HDMI_HDCP_RESET_LINK0_DEAUTHENTICATE);
+
+ /* Wait to be clean on DDC HW engine */
+ if (hdmi_hdcp_hw_ddc_clean(hdcp_ctrl)) {
+ pr_info("%s: reauth work aborted\n", __func__);
+ return;
+ }
+
+ /* Disable encryption and disable the HDCP block */
+ hdmi_write(hdmi, REG_HDMI_HDCP_CTRL, 0);
+
+ /* Enable HPD circuitry */
+ spin_lock_irqsave(&hdmi->reg_lock, flags);
+ reg_val = hdmi_read(hdmi, REG_HDMI_HPD_CTRL);
+ reg_val |= HDMI_HPD_CTRL_ENABLE;
+ hdmi_write(hdmi, REG_HDMI_HPD_CTRL, reg_val);
+ spin_unlock_irqrestore(&hdmi->reg_lock, flags);
+
+ /*
+ * Only retry defined times then abort current authenticating process
+ */
+ if (++hdcp_ctrl->auth_retries == AUTH_RETRIES_TIME) {
+ hdcp_ctrl->hdcp_state = HDCP_STATE_INACTIVE;
+ hdcp_ctrl->auth_retries = 0;
+ pr_info("%s: abort reauthentication!\n", __func__);
+
+ return;
+ }
+
+ DBG("Queue AUTH WORK");
+ hdcp_ctrl->hdcp_state = HDCP_STATE_AUTHENTICATING;
+ queue_work(hdmi->workq, &hdcp_ctrl->hdcp_auth_work);
+}
+
+static int hdmi_hdcp_auth_prepare(struct hdmi_hdcp_ctrl *hdcp_ctrl)
+{
+ struct hdmi *hdmi = hdcp_ctrl->hdmi;
+ u32 link0_status;
+ u32 reg_val;
+ unsigned long flags;
+ int rc;
+
+ if (!hdcp_ctrl->aksv_valid) {
+ rc = hdmi_hdcp_read_validate_aksv(hdcp_ctrl);
+ if (rc) {
+ pr_err("%s: ASKV validation failed\n", __func__);
+ hdcp_ctrl->hdcp_state = HDCP_STATE_NO_AKSV;
+ return -ENOTSUPP;
+ }
+ hdcp_ctrl->aksv_valid = true;
+ }
+
+ spin_lock_irqsave(&hdmi->reg_lock, flags);
+ /* disable HDMI Encrypt */
+ reg_val = hdmi_read(hdmi, REG_HDMI_CTRL);
+ reg_val &= ~HDMI_CTRL_ENCRYPTED;
+ hdmi_write(hdmi, REG_HDMI_CTRL, reg_val);
+
+ /* Enabling Software DDC */
+ reg_val = hdmi_read(hdmi, REG_HDMI_DDC_ARBITRATION);
+ reg_val &= ~HDMI_DDC_ARBITRATION_HW_ARBITRATION;
+ hdmi_write(hdmi, REG_HDMI_DDC_ARBITRATION, reg_val);
+ spin_unlock_irqrestore(&hdmi->reg_lock, flags);
+
+ /*
+ * Write AKSV read from QFPROM to the HDCP registers.
+ * This step is needed for HDCP authentication and must be
+ * written before enabling HDCP.
+ */
+ hdmi_write(hdmi, REG_HDMI_HDCP_SW_LOWER_AKSV, hdcp_ctrl->aksv_lsb);
+ hdmi_write(hdmi, REG_HDMI_HDCP_SW_UPPER_AKSV, hdcp_ctrl->aksv_msb);
+
+ /*
+ * HDCP setup prior to enabling HDCP_CTRL.
+ * Setup seed values for random number An.
+ */
+ hdmi_write(hdmi, REG_HDMI_HDCP_ENTROPY_CTRL0, 0xB1FFB0FF);
+ hdmi_write(hdmi, REG_HDMI_HDCP_ENTROPY_CTRL1, 0xF00DFACE);
+
+ /* Disable the RngCipher state */
+ reg_val = hdmi_read(hdmi, REG_HDMI_HDCP_DEBUG_CTRL);
+ reg_val &= ~HDMI_HDCP_DEBUG_CTRL_RNG_CIPHER;
+ hdmi_write(hdmi, REG_HDMI_HDCP_DEBUG_CTRL, reg_val);
+ DBG("HDCP_DEBUG_CTRL=0x%08x",
+ hdmi_read(hdmi, REG_HDMI_HDCP_DEBUG_CTRL));
+
+ /*
+ * Ensure that all register writes are completed before
+ * enabling HDCP cipher
+ */
+ wmb();
+
+ /*
+ * Enable HDCP
+ * This needs to be done as early as possible in order for the
+ * hardware to make An available to read
+ */
+ hdmi_write(hdmi, REG_HDMI_HDCP_CTRL, HDMI_HDCP_CTRL_ENABLE);
+
+ /*
+ * If we had stale values for the An ready bit, it should most
+ * likely be cleared now after enabling HDCP cipher
+ */
+ link0_status = hdmi_read(hdmi, REG_HDMI_HDCP_LINK0_STATUS);
+ DBG("After enabling HDCP Link0_Status=0x%08x", link0_status);
+ if (!(link0_status &
+ (HDMI_HDCP_LINK0_STATUS_AN_0_READY |
+ HDMI_HDCP_LINK0_STATUS_AN_1_READY)))
+ DBG("An not ready after enabling HDCP");
+
+ /* Clear any DDC failures from previous tries before enable HDCP*/
+ rc = reset_hdcp_ddc_failures(hdcp_ctrl);
+
+ return rc;
+}
+
+static void hdmi_hdcp_auth_fail(struct hdmi_hdcp_ctrl *hdcp_ctrl)
+{
+ struct hdmi *hdmi = hdcp_ctrl->hdmi;
+ u32 reg_val;
+ unsigned long flags;
+
+ DBG("hdcp auth failed, queue reauth work");
+ /* clear HDMI Encrypt */
+ spin_lock_irqsave(&hdmi->reg_lock, flags);
+ reg_val = hdmi_read(hdmi, REG_HDMI_CTRL);
+ reg_val &= ~HDMI_CTRL_ENCRYPTED;
+ hdmi_write(hdmi, REG_HDMI_CTRL, reg_val);
+ spin_unlock_irqrestore(&hdmi->reg_lock, flags);
+
+ hdcp_ctrl->hdcp_state = HDCP_STATE_AUTH_FAILED;
+ queue_work(hdmi->workq, &hdcp_ctrl->hdcp_reauth_work);
+}
+
+static void hdmi_hdcp_auth_done(struct hdmi_hdcp_ctrl *hdcp_ctrl)
+{
+ struct hdmi *hdmi = hdcp_ctrl->hdmi;
+ u32 reg_val;
+ unsigned long flags;
+
+ /*
+ * Disable software DDC before going into part3 to make sure
+ * there is no Arbitration between software and hardware for DDC
+ */
+ spin_lock_irqsave(&hdmi->reg_lock, flags);
+ reg_val = hdmi_read(hdmi, REG_HDMI_DDC_ARBITRATION);
+ reg_val |= HDMI_DDC_ARBITRATION_HW_ARBITRATION;
+ hdmi_write(hdmi, REG_HDMI_DDC_ARBITRATION, reg_val);
+ spin_unlock_irqrestore(&hdmi->reg_lock, flags);
+
+ /* enable HDMI Encrypt */
+ spin_lock_irqsave(&hdmi->reg_lock, flags);
+ reg_val = hdmi_read(hdmi, REG_HDMI_CTRL);
+ reg_val |= HDMI_CTRL_ENCRYPTED;
+ hdmi_write(hdmi, REG_HDMI_CTRL, reg_val);
+ spin_unlock_irqrestore(&hdmi->reg_lock, flags);
+
+ hdcp_ctrl->hdcp_state = HDCP_STATE_AUTHENTICATED;
+ hdcp_ctrl->auth_retries = 0;
+}
+
+/*
+ * hdcp authenticating part 1
+ * Wait Key/An ready
+ * Read BCAPS from sink
+ * Write BCAPS and AKSV into HDCP engine
+ * Write An and AKSV to sink
+ * Read BKSV from sink and write into HDCP engine
+ */
+static int hdmi_hdcp_wait_key_an_ready(struct hdmi_hdcp_ctrl *hdcp_ctrl)
+{
+ int rc;
+ struct hdmi *hdmi = hdcp_ctrl->hdmi;
+ u32 link0_status, keys_state;
+ u32 timeout_count;
+ bool an_ready;
+
+ /* Wait for HDCP keys to be checked and validated */
+ timeout_count = 100;
+ do {
+ link0_status = hdmi_read(hdmi, REG_HDMI_HDCP_LINK0_STATUS);
+ keys_state = (link0_status >> 28) & 0x7;
+ if (keys_state == HDCP_KEYS_STATE_VALID)
+ break;
+
+ DBG("Keys not ready(%d). s=%d, l0=%0x08x",
+ timeout_count, keys_state, link0_status);
+
+ timeout_count--;
+ if (!timeout_count) {
+ pr_err("%s: Wait key state timedout", __func__);
+ return -ETIMEDOUT;
+ }
+
+ rc = hdmi_hdcp_msleep(hdcp_ctrl, 20, AUTH_ABORT_EV);
+ if (rc)
+ return rc;
+ } while (1);
+
+ timeout_count = 100;
+ do {
+ link0_status = hdmi_read(hdmi, REG_HDMI_HDCP_LINK0_STATUS);
+ an_ready = (link0_status & HDMI_HDCP_LINK0_STATUS_AN_0_READY)
+ && (link0_status & HDMI_HDCP_LINK0_STATUS_AN_1_READY);
+ if (an_ready)
+ break;
+
+ DBG("An not ready(%d). l0_status=0x%08x",
+ timeout_count, link0_status);
+
+ timeout_count--;
+ if (!timeout_count) {
+ pr_err("%s: Wait An timedout", __func__);
+ return -ETIMEDOUT;
+ }
+
+ rc = hdmi_hdcp_msleep(hdcp_ctrl, 20, AUTH_ABORT_EV);
+ if (rc)
+ return rc;
+ } while (1);
+
+ return 0;
+}
+
+static int hdmi_hdcp_send_aksv_an(struct hdmi_hdcp_ctrl *hdcp_ctrl)
+{
+ int rc = 0;
+ struct hdmi *hdmi = hdcp_ctrl->hdmi;
+ u32 link0_aksv_0, link0_aksv_1;
+ u32 link0_an[2];
+ u8 aksv[5];
+
+ /* Read An0 and An1 */
+ link0_an[0] = hdmi_read(hdmi, REG_HDMI_HDCP_RCVPORT_DATA5);
+ link0_an[1] = hdmi_read(hdmi, REG_HDMI_HDCP_RCVPORT_DATA6);
+
+ /* Read AKSV */
+ link0_aksv_0 = hdmi_read(hdmi, REG_HDMI_HDCP_RCVPORT_DATA3);
+ link0_aksv_1 = hdmi_read(hdmi, REG_HDMI_HDCP_RCVPORT_DATA4);
+
+ DBG("Link ASKV=%08x%08x", link0_aksv_0, link0_aksv_1);
+ /* Copy An and AKSV to byte arrays for transmission */
+ aksv[0] = link0_aksv_0 & 0xFF;
+ aksv[1] = (link0_aksv_0 >> 8) & 0xFF;
+ aksv[2] = (link0_aksv_0 >> 16) & 0xFF;
+ aksv[3] = (link0_aksv_0 >> 24) & 0xFF;
+ aksv[4] = link0_aksv_1 & 0xFF;
+
+ /* Write An to offset 0x18 */
+ rc = hdmi_ddc_write(hdmi, HDCP_PORT_ADDR, 0x18, (u8 *)link0_an,
+ (u16)sizeof(link0_an));
+ if (rc) {
+ pr_err("%s:An write failed\n", __func__);
+ return rc;
+ }
+ DBG("Link0-An=%08x%08x", link0_an[0], link0_an[1]);
+
+ /* Write AKSV to offset 0x10 */
+ rc = hdmi_ddc_write(hdmi, HDCP_PORT_ADDR, 0x10, aksv, 5);
+ if (rc) {
+ pr_err("%s:AKSV write failed\n", __func__);
+ return rc;
+ }
+ DBG("Link0-AKSV=%02x%08x", link0_aksv_1 & 0xFF, link0_aksv_0);
+
+ return 0;
+}
+
+static int hdmi_hdcp_recv_bksv(struct hdmi_hdcp_ctrl *hdcp_ctrl)
+{
+ int rc = 0;
+ struct hdmi *hdmi = hdcp_ctrl->hdmi;
+ u8 bksv[5];
+ u32 reg[2], data[2];
+
+ /* Read BKSV at offset 0x00 */
+ rc = hdmi_ddc_read(hdmi, HDCP_PORT_ADDR, 0x00, bksv, 5);
+ if (rc) {
+ pr_err("%s:BKSV read failed\n", __func__);
+ return rc;
+ }
+
+ hdcp_ctrl->bksv_lsb = bksv[0] | (bksv[1] << 8) |
+ (bksv[2] << 16) | (bksv[3] << 24);
+ hdcp_ctrl->bksv_msb = bksv[4];
+ DBG(":BKSV=%02x%08x", hdcp_ctrl->bksv_msb, hdcp_ctrl->bksv_lsb);
+
+ /* check there are 20 ones in BKSV */
+ if ((hweight32(hdcp_ctrl->bksv_lsb) + hweight32(hdcp_ctrl->bksv_msb))
+ != 20) {
+ pr_err(": BKSV doesn't have 20 1's and 20 0's\n");
+ pr_err(": BKSV chk fail. BKSV=%02x%02x%02x%02x%02x\n",
+ bksv[4], bksv[3], bksv[2], bksv[1], bksv[0]);
+ return -EINVAL;
+ }
+
+ /* Write BKSV read from sink to HDCP registers */
+ reg[0] = REG_HDMI_HDCP_RCVPORT_DATA0;
+ data[0] = hdcp_ctrl->bksv_lsb;
+ reg[1] = REG_HDMI_HDCP_RCVPORT_DATA1;
+ data[1] = hdcp_ctrl->bksv_msb;
+ rc = hdmi_hdcp_scm_wr(hdcp_ctrl, reg, data, 2);
+
+ return rc;
+}
+
+static int hdmi_hdcp_recv_bcaps(struct hdmi_hdcp_ctrl *hdcp_ctrl)
+{
+ int rc = 0;
+ struct hdmi *hdmi = hdcp_ctrl->hdmi;
+ u32 reg, data;
+ u8 bcaps;
+
+ rc = hdmi_ddc_read(hdmi, HDCP_PORT_ADDR, 0x40, &bcaps, 1);
+ if (rc) {
+ pr_err("%s:BCAPS read failed\n", __func__);
+ return rc;
+ }
+ DBG("BCAPS=%02x", bcaps);
+
+ /* receiver (0), repeater (1) */
+ hdcp_ctrl->ds_type = (bcaps & BIT(6)) ? DS_REPEATER : DS_RECEIVER;
+
+ /* Write BCAPS to the hardware */
+ reg = REG_HDMI_HDCP_RCVPORT_DATA12;
+ data = (u32)bcaps;
+ rc = hdmi_hdcp_scm_wr(hdcp_ctrl, &reg, &data, 1);
+
+ return rc;
+}
+
+static int hdmi_hdcp_auth_part1_key_exchange(struct hdmi_hdcp_ctrl *hdcp_ctrl)
+{
+ struct hdmi *hdmi = hdcp_ctrl->hdmi;
+ unsigned long flags;
+ int rc;
+
+ /* Wait for AKSV key and An ready */
+ rc = hdmi_hdcp_wait_key_an_ready(hdcp_ctrl);
+ if (rc) {
+ pr_err("%s: wait key and an ready failed\n", __func__);
+ return rc;
+ };
+
+ /* Read BCAPS and send to HDCP engine */
+ rc = hdmi_hdcp_recv_bcaps(hdcp_ctrl);
+ if (rc) {
+ pr_err("%s: read bcaps error, abort\n", __func__);
+ return rc;
+ }
+
+ /*
+ * 1.1_Features turned off by default.
+ * No need to write AInfo since 1.1_Features is disabled.
+ */
+ hdmi_write(hdmi, REG_HDMI_HDCP_RCVPORT_DATA4, 0);
+
+ /* Send AKSV and An to sink */
+ rc = hdmi_hdcp_send_aksv_an(hdcp_ctrl);
+ if (rc) {
+ pr_err("%s:An/Aksv write failed\n", __func__);
+ return rc;
+ }
+
+ /* Read BKSV and send to HDCP engine*/
+ rc = hdmi_hdcp_recv_bksv(hdcp_ctrl);
+ if (rc) {
+ pr_err("%s:BKSV Process failed\n", __func__);
+ return rc;
+ }
+
+ /* Enable HDCP interrupts and ack/clear any stale interrupts */
+ spin_lock_irqsave(&hdmi->reg_lock, flags);
+ hdmi_write(hdmi, REG_HDMI_HDCP_INT_CTRL,
+ HDMI_HDCP_INT_CTRL_AUTH_SUCCESS_ACK |
+ HDMI_HDCP_INT_CTRL_AUTH_SUCCESS_MASK |
+ HDMI_HDCP_INT_CTRL_AUTH_FAIL_ACK |
+ HDMI_HDCP_INT_CTRL_AUTH_FAIL_MASK |
+ HDMI_HDCP_INT_CTRL_AUTH_FAIL_INFO_ACK);
+ spin_unlock_irqrestore(&hdmi->reg_lock, flags);
+
+ return 0;
+}
+
+/* read R0' from sink and pass it to HDCP engine */
+static int hdmi_hdcp_auth_part1_recv_r0(struct hdmi_hdcp_ctrl *hdcp_ctrl)
+{
+ struct hdmi *hdmi = hdcp_ctrl->hdmi;
+ int rc = 0;
+ u8 buf[2];
+
+ /*
+ * HDCP Compliance Test case 1A-01:
+ * Wait here at least 100ms before reading R0'
+ */
+ rc = hdmi_hdcp_msleep(hdcp_ctrl, 125, AUTH_ABORT_EV);
+ if (rc)
+ return rc;
+
+ /* Read R0' at offset 0x08 */
+ rc = hdmi_ddc_read(hdmi, HDCP_PORT_ADDR, 0x08, buf, 2);
+ if (rc) {
+ pr_err("%s:R0' read failed\n", __func__);
+ return rc;
+ }
+ DBG("R0'=%02x%02x", buf[1], buf[0]);
+
+ /* Write R0' to HDCP registers and check to see if it is a match */
+ hdmi_write(hdmi, REG_HDMI_HDCP_RCVPORT_DATA2_0,
+ (((u32)buf[1]) << 8) | buf[0]);
+
+ return 0;
+}
+
+/* Wait for authenticating result: R0/R0' are matched or not */
+static int hdmi_hdcp_auth_part1_verify_r0(struct hdmi_hdcp_ctrl *hdcp_ctrl)
+{
+ struct hdmi *hdmi = hdcp_ctrl->hdmi;
+ u32 link0_status;
+ int rc;
+
+ /* wait for hdcp irq, 10 sec should be long enough */
+ rc = hdmi_hdcp_msleep(hdcp_ctrl, 10000, AUTH_RESULT_RDY_EV);
+ if (!rc) {
+ pr_err("%s: Wait Auth IRQ timeout\n", __func__);
+ return -ETIMEDOUT;
+ }
+
+ link0_status = hdmi_read(hdmi, REG_HDMI_HDCP_LINK0_STATUS);
+ if (!(link0_status & HDMI_HDCP_LINK0_STATUS_RI_MATCHES)) {
+ pr_err("%s: Authentication Part I failed\n", __func__);
+ return -EINVAL;
+ }
+
+ /* Enable HDCP Encryption */
+ hdmi_write(hdmi, REG_HDMI_HDCP_CTRL,
+ HDMI_HDCP_CTRL_ENABLE |
+ HDMI_HDCP_CTRL_ENCRYPTION_ENABLE);
+
+ return 0;
+}
+
+static int hdmi_hdcp_recv_check_bstatus(struct hdmi_hdcp_ctrl *hdcp_ctrl,
+ u16 *pbstatus)
+{
+ int rc;
+ struct hdmi *hdmi = hdcp_ctrl->hdmi;
+ bool max_devs_exceeded = false, max_cascade_exceeded = false;
+ u32 repeater_cascade_depth = 0, down_stream_devices = 0;
+ u16 bstatus;
+ u8 buf[2];
+
+ /* Read BSTATUS at offset 0x41 */
+ rc = hdmi_ddc_read(hdmi, HDCP_PORT_ADDR, 0x41, buf, 2);
+ if (rc) {
+ pr_err("%s: BSTATUS read failed\n", __func__);
+ goto error;
+ }
+ *pbstatus = bstatus = (buf[1] << 8) | buf[0];
+
+
+ down_stream_devices = bstatus & 0x7F;
+ repeater_cascade_depth = (bstatus >> 8) & 0x7;
+ max_devs_exceeded = (bstatus & BIT(7)) ? true : false;
+ max_cascade_exceeded = (bstatus & BIT(11)) ? true : false;
+
+ if (down_stream_devices == 0) {
+ /*
+ * If no downstream devices are attached to the repeater
+ * then part II fails.
+ * todo: The other approach would be to continue PART II.
+ */
+ pr_err("%s: No downstream devices\n", __func__);
+ rc = -EINVAL;
+ goto error;
+ }
+
+ /*
+ * HDCP Compliance 1B-05:
+ * Check if no. of devices connected to repeater
+ * exceed max_devices_connected from bit 7 of Bstatus.
+ */
+ if (max_devs_exceeded) {
+ pr_err("%s: no. of devs connected exceeds max allowed",
+ __func__);
+ rc = -EINVAL;
+ goto error;
+ }
+
+ /*
+ * HDCP Compliance 1B-06:
+ * Check if no. of cascade connected to repeater
+ * exceed max_cascade_connected from bit 11 of Bstatus.
+ */
+ if (max_cascade_exceeded) {
+ pr_err("%s: no. of cascade conn exceeds max allowed",
+ __func__);
+ rc = -EINVAL;
+ goto error;
+ }
+
+error:
+ hdcp_ctrl->dev_count = down_stream_devices;
+ hdcp_ctrl->max_cascade_exceeded = max_cascade_exceeded;
+ hdcp_ctrl->max_dev_exceeded = max_devs_exceeded;
+ hdcp_ctrl->depth = repeater_cascade_depth;
+ return rc;
+}
+
+static int hdmi_hdcp_auth_part2_wait_ksv_fifo_ready(
+ struct hdmi_hdcp_ctrl *hdcp_ctrl)
+{
+ int rc;
+ struct hdmi *hdmi = hdcp_ctrl->hdmi;
+ u32 reg, data;
+ u32 timeout_count;
+ u16 bstatus;
+ u8 bcaps;
+
+ /*
+ * Wait until READY bit is set in BCAPS, as per HDCP specifications
+ * maximum permitted time to check for READY bit is five seconds.
+ */
+ timeout_count = 100;
+ do {
+ /* Read BCAPS at offset 0x40 */
+ rc = hdmi_ddc_read(hdmi, HDCP_PORT_ADDR, 0x40, &bcaps, 1);
+ if (rc) {
+ pr_err("%s: BCAPS read failed\n", __func__);
+ return rc;
+ }
+
+ if (bcaps & BIT(5))
+ break;
+
+ timeout_count--;
+ if (!timeout_count) {
+ pr_err("%s: Wait KSV fifo ready timedout", __func__);
+ return -ETIMEDOUT;
+ }
+
+ rc = hdmi_hdcp_msleep(hdcp_ctrl, 20, AUTH_ABORT_EV);
+ if (rc)
+ return rc;
+ } while (1);
+
+ rc = hdmi_hdcp_recv_check_bstatus(hdcp_ctrl, &bstatus);
+ if (rc) {
+ pr_err("%s: bstatus error\n", __func__);
+ return rc;
+ }
+
+ /* Write BSTATUS and BCAPS to HDCP registers */
+ reg = REG_HDMI_HDCP_RCVPORT_DATA12;
+ data = bcaps | (bstatus << 8);
+ rc = hdmi_hdcp_scm_wr(hdcp_ctrl, &reg, &data, 1);
+ if (rc) {
+ pr_err("%s: BSTATUS write failed\n", __func__);
+ return rc;
+ }
+
+ return 0;
+}
+
+/*
+ * hdcp authenticating part 2: 2nd
+ * read ksv fifo from sink
+ * transfer V' from sink to HDCP engine
+ * reset SHA engine
+ */
+static int hdmi_hdcp_transfer_v_h(struct hdmi_hdcp_ctrl *hdcp_ctrl)
+{
+ struct hdmi *hdmi = hdcp_ctrl->hdmi;
+ int rc = 0;
+ struct hdmi_hdcp_reg_data reg_data[] = {
+ {REG_HDMI_HDCP_RCVPORT_DATA7, 0x20, "V' H0"},
+ {REG_HDMI_HDCP_RCVPORT_DATA8, 0x24, "V' H1"},
+ {REG_HDMI_HDCP_RCVPORT_DATA9, 0x28, "V' H2"},
+ {REG_HDMI_HDCP_RCVPORT_DATA10, 0x2C, "V' H3"},
+ {REG_HDMI_HDCP_RCVPORT_DATA11, 0x30, "V' H4"},
+ };
+ struct hdmi_hdcp_reg_data *rd;
+ u32 size = ARRAY_SIZE(reg_data);
+ u32 reg[ARRAY_SIZE(reg_data)];
+ u32 data[ARRAY_SIZE(reg_data)];
+ int i;
+
+ for (i = 0; i < size; i++) {
+ rd = &reg_data[i];
+ rc = hdmi_ddc_read(hdmi, HDCP_PORT_ADDR,
+ rd->off, (u8 *)&data[i], (u16)sizeof(data[i]));
+ if (rc) {
+ pr_err("%s: Read %s failed\n", __func__, rd->name);
+ goto error;
+ }
+
+ DBG("%s =%x", rd->name, data[i]);
+ reg[i] = reg_data[i].reg_id;
+ }
+
+ rc = hdmi_hdcp_scm_wr(hdcp_ctrl, reg, data, size);
+
+error:
+ return rc;
+}
+
+static int hdmi_hdcp_recv_ksv_fifo(struct hdmi_hdcp_ctrl *hdcp_ctrl)
+{
+ int rc;
+ struct hdmi *hdmi = hdcp_ctrl->hdmi;
+ u32 ksv_bytes;
+
+ ksv_bytes = 5 * hdcp_ctrl->dev_count;
+
+ rc = hdmi_ddc_read(hdmi, HDCP_PORT_ADDR, 0x43,
+ hdcp_ctrl->ksv_list, ksv_bytes);
+ if (rc)
+ pr_err("%s: KSV FIFO read failed\n", __func__);
+
+ return rc;
+}
+
+static int hdmi_hdcp_reset_sha_engine(struct hdmi_hdcp_ctrl *hdcp_ctrl)
+{
+ u32 reg[2], data[2];
+ u32 rc = 0;
+
+ reg[0] = REG_HDMI_HDCP_SHA_CTRL;
+ data[0] = HDCP_REG_ENABLE;
+ reg[1] = REG_HDMI_HDCP_SHA_CTRL;
+ data[1] = HDCP_REG_DISABLE;
+
+ rc = hdmi_hdcp_scm_wr(hdcp_ctrl, reg, data, 2);
+
+ return rc;
+}
+
+static int hdmi_hdcp_auth_part2_recv_ksv_fifo(
+ struct hdmi_hdcp_ctrl *hdcp_ctrl)
+{
+ int rc;
+ u32 timeout_count;
+
+ /*
+ * Read KSV FIFO over DDC
+ * Key Selection vector FIFO Used to pull downstream KSVs
+ * from HDCP Repeaters.
+ * All bytes (DEVICE_COUNT * 5) must be read in a single,
+ * auto incrementing access.
+ * All bytes read as 0x00 for HDCP Receivers that are not
+ * HDCP Repeaters (REPEATER == 0).
+ */
+ timeout_count = 100;
+ do {
+ rc = hdmi_hdcp_recv_ksv_fifo(hdcp_ctrl);
+ if (!rc)
+ break;
+
+ timeout_count--;
+ if (!timeout_count) {
+ pr_err("%s: Recv ksv fifo timedout", __func__);
+ return -ETIMEDOUT;
+ }
+
+ rc = hdmi_hdcp_msleep(hdcp_ctrl, 25, AUTH_ABORT_EV);
+ if (rc)
+ return rc;
+ } while (1);
+
+ rc = hdmi_hdcp_transfer_v_h(hdcp_ctrl);
+ if (rc) {
+ pr_err("%s: transfer V failed\n", __func__);
+ return rc;
+ }
+
+ /* reset SHA engine before write ksv fifo */
+ rc = hdmi_hdcp_reset_sha_engine(hdcp_ctrl);
+ if (rc) {
+ pr_err("%s: fail to reset sha engine\n", __func__);
+ return rc;
+ }
+
+ return 0;
+}
+
+/*
+ * Write KSV FIFO to HDCP_SHA_DATA.
+ * This is done 1 byte at time starting with the LSB.
+ * Once 64 bytes have been written, we need to poll for
+ * HDCP_SHA_BLOCK_DONE before writing any further
+ * If the last byte is written, we need to poll for
+ * HDCP_SHA_COMP_DONE to wait until HW finish
+ */
+static int hdmi_hdcp_write_ksv_fifo(struct hdmi_hdcp_ctrl *hdcp_ctrl)
+{
+ int i;
+ struct hdmi *hdmi = hdcp_ctrl->hdmi;
+ u32 ksv_bytes, last_byte = 0;
+ u8 *ksv_fifo = NULL;
+ u32 reg_val, data, reg;
+ u32 rc = 0;
+
+ ksv_bytes = 5 * hdcp_ctrl->dev_count;
+
+ /* Check if need to wait for HW completion */
+ if (hdcp_ctrl->ksv_fifo_w_index) {
+ reg_val = hdmi_read(hdmi, REG_HDMI_HDCP_SHA_STATUS);
+ DBG("HDCP_SHA_STATUS=%08x", reg_val);
+ if (hdcp_ctrl->ksv_fifo_w_index == ksv_bytes) {
+ /* check COMP_DONE if last write */
+ if (reg_val & HDMI_HDCP_SHA_STATUS_COMP_DONE) {
+ DBG("COMP_DONE");
+ return 0;
+ } else {
+ return -EAGAIN;
+ }
+ } else {
+ /* check BLOCK_DONE if not last write */
+ if (!(reg_val & HDMI_HDCP_SHA_STATUS_BLOCK_DONE))
+ return -EAGAIN;
+
+ DBG("BLOCK_DONE");
+ }
+ }
+
+ ksv_bytes -= hdcp_ctrl->ksv_fifo_w_index;
+ if (ksv_bytes <= 64)
+ last_byte = 1;
+ else
+ ksv_bytes = 64;
+
+ ksv_fifo = hdcp_ctrl->ksv_list;
+ ksv_fifo += hdcp_ctrl->ksv_fifo_w_index;
+
+ for (i = 0; i < ksv_bytes; i++) {
+ /* Write KSV byte and set DONE bit[0] for last byte*/
+ reg_val = ksv_fifo[i] << 16;
+ if ((i == (ksv_bytes - 1)) && last_byte)
+ reg_val |= HDMI_HDCP_SHA_DATA_DONE;
+
+ reg = REG_HDMI_HDCP_SHA_DATA;
+ data = reg_val;
+ rc = hdmi_hdcp_scm_wr(hdcp_ctrl, &reg, &data, 1);
+
+ if (rc)
+ return rc;
+ }
+
+ hdcp_ctrl->ksv_fifo_w_index += ksv_bytes;
+
+ /*
+ *return -EAGAIN to notify caller to wait for COMP_DONE or BLOCK_DONE
+ */
+ return -EAGAIN;
+}
+
+/* write ksv fifo into HDCP engine */
+static int hdmi_hdcp_auth_part2_write_ksv_fifo(
+ struct hdmi_hdcp_ctrl *hdcp_ctrl)
+{
+ int rc;
+ u32 timeout_count;
+
+ hdcp_ctrl->ksv_fifo_w_index = 0;
+ timeout_count = 100;
+ do {
+ rc = hdmi_hdcp_write_ksv_fifo(hdcp_ctrl);
+ if (!rc)
+ break;
+
+ if (rc != -EAGAIN)
+ return rc;
+
+ timeout_count--;
+ if (!timeout_count) {
+ pr_err("%s: Write KSV fifo timedout", __func__);
+ return -ETIMEDOUT;
+ }
+
+ rc = hdmi_hdcp_msleep(hdcp_ctrl, 20, AUTH_ABORT_EV);
+ if (rc)
+ return rc;
+ } while (1);
+
+ return 0;
+}
+
+static int hdmi_hdcp_auth_part2_check_v_match(struct hdmi_hdcp_ctrl *hdcp_ctrl)
+{
+ int rc = 0;
+ struct hdmi *hdmi = hdcp_ctrl->hdmi;
+ u32 link0_status;
+ u32 timeout_count = 100;
+
+ do {
+ link0_status = hdmi_read(hdmi, REG_HDMI_HDCP_LINK0_STATUS);
+ if (link0_status & HDMI_HDCP_LINK0_STATUS_V_MATCHES)
+ break;
+
+ timeout_count--;
+ if (!timeout_count) {
+ pr_err("%s: HDCP V Match timedout", __func__);
+ return -ETIMEDOUT;
+ }
+
+ rc = hdmi_hdcp_msleep(hdcp_ctrl, 20, AUTH_ABORT_EV);
+ if (rc)
+ return rc;
+ } while (1);
+
+ return 0;
+}
+
+static void hdmi_hdcp_auth_work(struct work_struct *work)
+{
+ struct hdmi_hdcp_ctrl *hdcp_ctrl = container_of(work,
+ struct hdmi_hdcp_ctrl, hdcp_auth_work);
+ int rc;
+
+ rc = hdmi_hdcp_auth_prepare(hdcp_ctrl);
+ if (rc) {
+ pr_err("%s: auth prepare failed %d\n", __func__, rc);
+ goto end;
+ }
+
+ /* HDCP PartI */
+ rc = hdmi_hdcp_auth_part1_key_exchange(hdcp_ctrl);
+ if (rc) {
+ pr_err("%s: key exchange failed %d\n", __func__, rc);
+ goto end;
+ }
+
+ rc = hdmi_hdcp_auth_part1_recv_r0(hdcp_ctrl);
+ if (rc) {
+ pr_err("%s: receive r0 failed %d\n", __func__, rc);
+ goto end;
+ }
+
+ rc = hdmi_hdcp_auth_part1_verify_r0(hdcp_ctrl);
+ if (rc) {
+ pr_err("%s: verify r0 failed %d\n", __func__, rc);
+ goto end;
+ }
+ pr_info("%s: Authentication Part I successful\n", __func__);
+ if (hdcp_ctrl->ds_type == DS_RECEIVER)
+ goto end;
+
+ /* HDCP PartII */
+ rc = hdmi_hdcp_auth_part2_wait_ksv_fifo_ready(hdcp_ctrl);
+ if (rc) {
+ pr_err("%s: wait ksv fifo ready failed %d\n", __func__, rc);
+ goto end;
+ }
+
+ rc = hdmi_hdcp_auth_part2_recv_ksv_fifo(hdcp_ctrl);
+ if (rc) {
+ pr_err("%s: recv ksv fifo failed %d\n", __func__, rc);
+ goto end;
+ }
+
+ rc = hdmi_hdcp_auth_part2_write_ksv_fifo(hdcp_ctrl);
+ if (rc) {
+ pr_err("%s: write ksv fifo failed %d\n", __func__, rc);
+ goto end;
+ }
+
+ rc = hdmi_hdcp_auth_part2_check_v_match(hdcp_ctrl);
+ if (rc)
+ pr_err("%s: check v match failed %d\n", __func__, rc);
+
+end:
+ if (rc == -ECANCELED) {
+ pr_info("%s: hdcp authentication canceled\n", __func__);
+ } else if (rc == -ENOTSUPP) {
+ pr_info("%s: hdcp is not supported\n", __func__);
+ } else if (rc) {
+ pr_err("%s: hdcp authentication failed\n", __func__);
+ hdmi_hdcp_auth_fail(hdcp_ctrl);
+ } else {
+ hdmi_hdcp_auth_done(hdcp_ctrl);
+ }
+}
+
+void hdmi_hdcp_on(struct hdmi_hdcp_ctrl *hdcp_ctrl)
+{
+ struct hdmi *hdmi = hdcp_ctrl->hdmi;
+ u32 reg_val;
+ unsigned long flags;
+
+ if ((HDCP_STATE_INACTIVE != hdcp_ctrl->hdcp_state) ||
+ (HDCP_STATE_NO_AKSV == hdcp_ctrl->hdcp_state)) {
+ DBG("still active or activating or no askv. returning");
+ return;
+ }
+
+ /* clear HDMI Encrypt */
+ spin_lock_irqsave(&hdmi->reg_lock, flags);
+ reg_val = hdmi_read(hdmi, REG_HDMI_CTRL);
+ reg_val &= ~HDMI_CTRL_ENCRYPTED;
+ hdmi_write(hdmi, REG_HDMI_CTRL, reg_val);
+ spin_unlock_irqrestore(&hdmi->reg_lock, flags);
+
+ hdcp_ctrl->auth_event = 0;
+ hdcp_ctrl->hdcp_state = HDCP_STATE_AUTHENTICATING;
+ hdcp_ctrl->auth_retries = 0;
+ queue_work(hdmi->workq, &hdcp_ctrl->hdcp_auth_work);
+}
+
+void hdmi_hdcp_off(struct hdmi_hdcp_ctrl *hdcp_ctrl)
+{
+ struct hdmi *hdmi = hdcp_ctrl->hdmi;
+ unsigned long flags;
+ u32 reg_val;
+
+ if ((HDCP_STATE_INACTIVE == hdcp_ctrl->hdcp_state) ||
+ (HDCP_STATE_NO_AKSV == hdcp_ctrl->hdcp_state)) {
+ DBG("hdcp inactive or no aksv. returning");
+ return;
+ }
+
+ /*
+ * Disable HPD circuitry.
+ * This is needed to reset the HDCP cipher engine so that when we
+ * attempt a re-authentication, HW would clear the AN0_READY and
+ * AN1_READY bits in HDMI_HDCP_LINK0_STATUS register
+ */
+ spin_lock_irqsave(&hdmi->reg_lock, flags);
+ reg_val = hdmi_read(hdmi, REG_HDMI_HPD_CTRL);
+ reg_val &= ~HDMI_HPD_CTRL_ENABLE;
+ hdmi_write(hdmi, REG_HDMI_HPD_CTRL, reg_val);
+
+ /*
+ * Disable HDCP interrupts.
+ * Also, need to set the state to inactive here so that any ongoing
+ * reauth works will know that the HDCP session has been turned off.
+ */
+ hdmi_write(hdmi, REG_HDMI_HDCP_INT_CTRL, 0);
+ spin_unlock_irqrestore(&hdmi->reg_lock, flags);
+
+ /*
+ * Cancel any pending auth/reauth attempts.
+ * If one is ongoing, this will wait for it to finish.
+ * No more reauthentication attempts will be scheduled since we
+ * set the current state to inactive.
+ */
+ set_bit(AUTH_ABORT_EV, &hdcp_ctrl->auth_event);
+ wake_up_all(&hdcp_ctrl->auth_event_queue);
+ cancel_work_sync(&hdcp_ctrl->hdcp_auth_work);
+ cancel_work_sync(&hdcp_ctrl->hdcp_reauth_work);
+
+ hdmi_write(hdmi, REG_HDMI_HDCP_RESET,
+ HDMI_HDCP_RESET_LINK0_DEAUTHENTICATE);
+
+ /* Disable encryption and disable the HDCP block */
+ hdmi_write(hdmi, REG_HDMI_HDCP_CTRL, 0);
+
+ spin_lock_irqsave(&hdmi->reg_lock, flags);
+ reg_val = hdmi_read(hdmi, REG_HDMI_CTRL);
+ reg_val &= ~HDMI_CTRL_ENCRYPTED;
+ hdmi_write(hdmi, REG_HDMI_CTRL, reg_val);
+
+ /* Enable HPD circuitry */
+ reg_val = hdmi_read(hdmi, REG_HDMI_HPD_CTRL);
+ reg_val |= HDMI_HPD_CTRL_ENABLE;
+ hdmi_write(hdmi, REG_HDMI_HPD_CTRL, reg_val);
+ spin_unlock_irqrestore(&hdmi->reg_lock, flags);
+
+ hdcp_ctrl->hdcp_state = HDCP_STATE_INACTIVE;
+
+ DBG("HDCP: Off");
+}
+
+struct hdmi_hdcp_ctrl *hdmi_hdcp_init(struct hdmi *hdmi)
+{
+ struct hdmi_hdcp_ctrl *hdcp_ctrl = NULL;
+
+ if (!hdmi->qfprom_mmio) {
+ pr_err("%s: HDCP is not supported without qfprom\n",
+ __func__);
+ return ERR_PTR(-EINVAL);
+ }
+
+ hdcp_ctrl = kzalloc(sizeof(*hdcp_ctrl), GFP_KERNEL);
+ if (!hdcp_ctrl)
+ return ERR_PTR(-ENOMEM);
+
+ INIT_WORK(&hdcp_ctrl->hdcp_auth_work, hdmi_hdcp_auth_work);
+ INIT_WORK(&hdcp_ctrl->hdcp_reauth_work, hdmi_hdcp_reauth_work);
+ init_waitqueue_head(&hdcp_ctrl->auth_event_queue);
+ hdcp_ctrl->hdmi = hdmi;
+ hdcp_ctrl->hdcp_state = HDCP_STATE_INACTIVE;
+ hdcp_ctrl->aksv_valid = false;
+
+ if (qcom_scm_hdcp_available())
+ hdcp_ctrl->tz_hdcp = true;
+ else
+ hdcp_ctrl->tz_hdcp = false;
+
+ return hdcp_ctrl;
+}
+
+void hdmi_hdcp_destroy(struct hdmi *hdmi)
+{
+ if (hdmi && hdmi->hdcp_ctrl) {
+ kfree(hdmi->hdcp_ctrl);
+ hdmi->hdcp_ctrl = NULL;
+ }
+}
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_phy_8960.c b/drivers/gpu/drm/msm/hdmi/hdmi_phy_8960.c
index 6997ec636c6d..3a01cb5051e2 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi_phy_8960.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi_phy_8960.c
@@ -426,57 +426,6 @@ static void hdmi_phy_8960_destroy(struct hdmi_phy *phy)
kfree(phy_8960);
}
-static void hdmi_phy_8960_reset(struct hdmi_phy *phy)
-{
- struct hdmi_phy_8960 *phy_8960 = to_hdmi_phy_8960(phy);
- struct hdmi *hdmi = phy_8960->hdmi;
- unsigned int val;
-
- val = hdmi_read(hdmi, REG_HDMI_PHY_CTRL);
-
- if (val & HDMI_PHY_CTRL_SW_RESET_LOW) {
- /* pull low */
- hdmi_write(hdmi, REG_HDMI_PHY_CTRL,
- val & ~HDMI_PHY_CTRL_SW_RESET);
- } else {
- /* pull high */
- hdmi_write(hdmi, REG_HDMI_PHY_CTRL,
- val | HDMI_PHY_CTRL_SW_RESET);
- }
-
- if (val & HDMI_PHY_CTRL_SW_RESET_PLL_LOW) {
- /* pull low */
- hdmi_write(hdmi, REG_HDMI_PHY_CTRL,
- val & ~HDMI_PHY_CTRL_SW_RESET_PLL);
- } else {
- /* pull high */
- hdmi_write(hdmi, REG_HDMI_PHY_CTRL,
- val | HDMI_PHY_CTRL_SW_RESET_PLL);
- }
-
- msleep(100);
-
- if (val & HDMI_PHY_CTRL_SW_RESET_LOW) {
- /* pull high */
- hdmi_write(hdmi, REG_HDMI_PHY_CTRL,
- val | HDMI_PHY_CTRL_SW_RESET);
- } else {
- /* pull low */
- hdmi_write(hdmi, REG_HDMI_PHY_CTRL,
- val & ~HDMI_PHY_CTRL_SW_RESET);
- }
-
- if (val & HDMI_PHY_CTRL_SW_RESET_PLL_LOW) {
- /* pull high */
- hdmi_write(hdmi, REG_HDMI_PHY_CTRL,
- val | HDMI_PHY_CTRL_SW_RESET_PLL);
- } else {
- /* pull low */
- hdmi_write(hdmi, REG_HDMI_PHY_CTRL,
- val & ~HDMI_PHY_CTRL_SW_RESET_PLL);
- }
-}
-
static void hdmi_phy_8960_powerup(struct hdmi_phy *phy,
unsigned long int pixclock)
{
@@ -511,7 +460,6 @@ static void hdmi_phy_8960_powerdown(struct hdmi_phy *phy)
static const struct hdmi_phy_funcs hdmi_phy_8960_funcs = {
.destroy = hdmi_phy_8960_destroy,
- .reset = hdmi_phy_8960_reset,
.powerup = hdmi_phy_8960_powerup,
.powerdown = hdmi_phy_8960_powerdown,
};
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_phy_8x60.c b/drivers/gpu/drm/msm/hdmi/hdmi_phy_8x60.c
index 391433c1af7c..cb01421ae1e4 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi_phy_8x60.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi_phy_8x60.c
@@ -29,37 +29,6 @@ static void hdmi_phy_8x60_destroy(struct hdmi_phy *phy)
kfree(phy_8x60);
}
-static void hdmi_phy_8x60_reset(struct hdmi_phy *phy)
-{
- struct hdmi_phy_8x60 *phy_8x60 = to_hdmi_phy_8x60(phy);
- struct hdmi *hdmi = phy_8x60->hdmi;
- unsigned int val;
-
- val = hdmi_read(hdmi, REG_HDMI_PHY_CTRL);
-
- if (val & HDMI_PHY_CTRL_SW_RESET_LOW) {
- /* pull low */
- hdmi_write(hdmi, REG_HDMI_PHY_CTRL,
- val & ~HDMI_PHY_CTRL_SW_RESET);
- } else {
- /* pull high */
- hdmi_write(hdmi, REG_HDMI_PHY_CTRL,
- val | HDMI_PHY_CTRL_SW_RESET);
- }
-
- msleep(100);
-
- if (val & HDMI_PHY_CTRL_SW_RESET_LOW) {
- /* pull high */
- hdmi_write(hdmi, REG_HDMI_PHY_CTRL,
- val | HDMI_PHY_CTRL_SW_RESET);
- } else {
- /* pull low */
- hdmi_write(hdmi, REG_HDMI_PHY_CTRL,
- val & ~HDMI_PHY_CTRL_SW_RESET);
- }
-}
-
static void hdmi_phy_8x60_powerup(struct hdmi_phy *phy,
unsigned long int pixclock)
{
@@ -182,7 +151,6 @@ static void hdmi_phy_8x60_powerdown(struct hdmi_phy *phy)
static const struct hdmi_phy_funcs hdmi_phy_8x60_funcs = {
.destroy = hdmi_phy_8x60_destroy,
- .reset = hdmi_phy_8x60_reset,
.powerup = hdmi_phy_8x60_powerup,
.powerdown = hdmi_phy_8x60_powerdown,
};
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_phy_8x74.c b/drivers/gpu/drm/msm/hdmi/hdmi_phy_8x74.c
index 59fa6cdacb2a..56ab8917ee9a 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi_phy_8x74.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi_phy_8x74.c
@@ -19,7 +19,6 @@
struct hdmi_phy_8x74 {
struct hdmi_phy base;
- struct hdmi *hdmi;
void __iomem *mmio;
};
#define to_hdmi_phy_8x74(x) container_of(x, struct hdmi_phy_8x74, base)
@@ -41,59 +40,6 @@ static void hdmi_phy_8x74_destroy(struct hdmi_phy *phy)
kfree(phy_8x74);
}
-static void hdmi_phy_8x74_reset(struct hdmi_phy *phy)
-{
- struct hdmi_phy_8x74 *phy_8x74 = to_hdmi_phy_8x74(phy);
- struct hdmi *hdmi = phy_8x74->hdmi;
- unsigned int val;
-
- /* NOTE that HDMI_PHY_CTL is in core mmio, not phy mmio: */
-
- val = hdmi_read(hdmi, REG_HDMI_PHY_CTRL);
-
- if (val & HDMI_PHY_CTRL_SW_RESET_LOW) {
- /* pull low */
- hdmi_write(hdmi, REG_HDMI_PHY_CTRL,
- val & ~HDMI_PHY_CTRL_SW_RESET);
- } else {
- /* pull high */
- hdmi_write(hdmi, REG_HDMI_PHY_CTRL,
- val | HDMI_PHY_CTRL_SW_RESET);
- }
-
- if (val & HDMI_PHY_CTRL_SW_RESET_PLL_LOW) {
- /* pull low */
- hdmi_write(hdmi, REG_HDMI_PHY_CTRL,
- val & ~HDMI_PHY_CTRL_SW_RESET_PLL);
- } else {
- /* pull high */
- hdmi_write(hdmi, REG_HDMI_PHY_CTRL,
- val | HDMI_PHY_CTRL_SW_RESET_PLL);
- }
-
- msleep(100);
-
- if (val & HDMI_PHY_CTRL_SW_RESET_LOW) {
- /* pull high */
- hdmi_write(hdmi, REG_HDMI_PHY_CTRL,
- val | HDMI_PHY_CTRL_SW_RESET);
- } else {
- /* pull low */
- hdmi_write(hdmi, REG_HDMI_PHY_CTRL,
- val & ~HDMI_PHY_CTRL_SW_RESET);
- }
-
- if (val & HDMI_PHY_CTRL_SW_RESET_PLL_LOW) {
- /* pull high */
- hdmi_write(hdmi, REG_HDMI_PHY_CTRL,
- val | HDMI_PHY_CTRL_SW_RESET_PLL);
- } else {
- /* pull low */
- hdmi_write(hdmi, REG_HDMI_PHY_CTRL,
- val & ~HDMI_PHY_CTRL_SW_RESET_PLL);
- }
-}
-
static void hdmi_phy_8x74_powerup(struct hdmi_phy *phy,
unsigned long int pixclock)
{
@@ -117,7 +63,6 @@ static void hdmi_phy_8x74_powerdown(struct hdmi_phy *phy)
static const struct hdmi_phy_funcs hdmi_phy_8x74_funcs = {
.destroy = hdmi_phy_8x74_destroy,
- .reset = hdmi_phy_8x74_reset,
.powerup = hdmi_phy_8x74_powerup,
.powerdown = hdmi_phy_8x74_powerdown,
};
@@ -138,8 +83,6 @@ struct hdmi_phy *hdmi_phy_8x74_init(struct hdmi *hdmi)
phy->funcs = &hdmi_phy_8x74_funcs;
- phy_8x74->hdmi = hdmi;
-
/* for 8x74, the phy mmio is mapped separately: */
phy_8x74->mmio = msm_ioremap(hdmi->pdev,
"phy_physical", "HDMI_8x74");
diff --git a/drivers/gpu/drm/msm/hdmi/qfprom.xml.h b/drivers/gpu/drm/msm/hdmi/qfprom.xml.h
index 978c3f70872a..2aa23b98f8aa 100644
--- a/drivers/gpu/drm/msm/hdmi/qfprom.xml.h
+++ b/drivers/gpu/drm/msm/hdmi/qfprom.xml.h
@@ -8,19 +8,19 @@ http://github.com/freedreno/envytools/
git clone https://github.com/freedreno/envytools.git
The rules-ng-ng source files this header was generated from are:
-- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 676 bytes, from 2014-12-05 15:34:49)
-- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20915 bytes, from 2015-03-24 22:05:22)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 2352 bytes, from 2015-04-12 15:02:42)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 35083 bytes, from 2015-04-12 15:04:03)
-- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 22094 bytes, from 2015-05-12 12:45:23)
-- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2013-08-11 19:26:32)
-- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2014-10-31 16:48:57)
-- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2013-07-05 19:21:12)
-- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 29012 bytes, from 2015-05-12 12:45:23)
-- /home/robclark/src/freedreno/envytools/rnndb/edp/edp.xml ( 10416 bytes, from 2015-05-12 12:45:23)
-
-Copyright (C) 2013 by the following authors:
+- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 676 bytes, from 2015-05-20 20:03:14)
+- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2015-05-20 20:03:07)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20915 bytes, from 2015-05-20 20:03:14)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 2576 bytes, from 2015-07-09 22:10:24)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 36021 bytes, from 2015-07-09 22:10:24)
+- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 26057 bytes, from 2015-08-14 21:47:57)
+- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2015-05-20 20:03:07)
+- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2015-05-20 20:03:14)
+- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2015-05-20 20:03:07)
+- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 29154 bytes, from 2015-08-10 21:25:43)
+- /home/robclark/src/freedreno/envytools/rnndb/edp/edp.xml ( 10416 bytes, from 2015-05-20 20:03:14)
+
+Copyright (C) 2013-2015 by the following authors:
- Rob Clark <robdclark@gmail.com> (robclark)
Permission is hereby granted, free of charge, to any person obtaining
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4.xml.h b/drivers/gpu/drm/msm/mdp/mdp4/mdp4.xml.h
index 153fc487d683..74b86734fef5 100644
--- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4.xml.h
+++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4.xml.h
@@ -8,17 +8,17 @@ http://github.com/freedreno/envytools/
git clone https://github.com/freedreno/envytools.git
The rules-ng-ng source files this header was generated from are:
-- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 676 bytes, from 2014-12-05 15:34:49)
-- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20915 bytes, from 2015-03-24 22:05:22)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 2352 bytes, from 2015-04-12 15:02:42)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 35083 bytes, from 2015-04-12 15:04:03)
-- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 22094 bytes, from 2015-05-12 12:45:23)
-- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2013-08-11 19:26:32)
-- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2014-10-31 16:48:57)
-- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2013-07-05 19:21:12)
-- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 29012 bytes, from 2015-05-12 12:45:23)
-- /home/robclark/src/freedreno/envytools/rnndb/edp/edp.xml ( 10416 bytes, from 2015-05-12 12:45:23)
+- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 676 bytes, from 2015-05-20 20:03:14)
+- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2015-05-20 20:03:07)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20915 bytes, from 2015-05-20 20:03:14)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 2576 bytes, from 2015-07-09 22:10:24)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 36021 bytes, from 2015-07-09 22:10:24)
+- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 26057 bytes, from 2015-08-14 21:47:57)
+- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2015-05-20 20:03:07)
+- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2015-05-20 20:03:14)
+- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2015-05-20 20:03:07)
+- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 29154 bytes, from 2015-08-10 21:25:43)
+- /home/robclark/src/freedreno/envytools/rnndb/edp/edp.xml ( 10416 bytes, from 2015-05-20 20:03:14)
Copyright (C) 2013-2015 by the following authors:
- Rob Clark <robdclark@gmail.com> (robclark)
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c
index c4bb9d9c7667..6ac9aa165768 100644
--- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c
+++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c
@@ -334,13 +334,15 @@ static int mdp4_crtc_atomic_check(struct drm_crtc *crtc,
return 0;
}
-static void mdp4_crtc_atomic_begin(struct drm_crtc *crtc)
+static void mdp4_crtc_atomic_begin(struct drm_crtc *crtc,
+ struct drm_crtc_state *old_crtc_state)
{
struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
DBG("%s: begin", mdp4_crtc->name);
}
-static void mdp4_crtc_atomic_flush(struct drm_crtc *crtc)
+static void mdp4_crtc_atomic_flush(struct drm_crtc *crtc,
+ struct drm_crtc_state *old_crtc_state)
{
struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
struct drm_device *dev = crtc->dev;
@@ -680,7 +682,5 @@ struct drm_crtc *mdp4_crtc_init(struct drm_device *dev,
drm_crtc_helper_add(crtc, &mdp4_crtc_helper_funcs);
plane->crtc = crtc;
- mdp4_plane_install_properties(plane, &crtc->base);
-
return crtc;
}
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_irq.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_irq.c
index 7369ee7f0c55..5ed38cf548a1 100644
--- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_irq.c
+++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_irq.c
@@ -19,8 +19,11 @@
#include "msm_drv.h"
#include "mdp4_kms.h"
-void mdp4_set_irqmask(struct mdp_kms *mdp_kms, uint32_t irqmask)
+void mdp4_set_irqmask(struct mdp_kms *mdp_kms, uint32_t irqmask,
+ uint32_t old_irqmask)
{
+ mdp4_write(to_mdp4_kms(mdp_kms), REG_MDP4_INTR_CLEAR,
+ irqmask ^ (irqmask & old_irqmask));
mdp4_write(to_mdp4_kms(mdp_kms), REG_MDP4_INTR_ENABLE, irqmask);
}
@@ -68,9 +71,10 @@ irqreturn_t mdp4_irq(struct msm_kms *kms)
struct drm_device *dev = mdp4_kms->dev;
struct msm_drm_private *priv = dev->dev_private;
unsigned int id;
- uint32_t status;
+ uint32_t status, enable;
- status = mdp4_read(mdp4_kms, REG_MDP4_INTR_STATUS);
+ enable = mdp4_read(mdp4_kms, REG_MDP4_INTR_ENABLE);
+ status = mdp4_read(mdp4_kms, REG_MDP4_INTR_STATUS) & enable;
mdp4_write(mdp4_kms, REG_MDP4_INTR_CLEAR, status);
VERB("status=%08x", status);
@@ -86,13 +90,22 @@ irqreturn_t mdp4_irq(struct msm_kms *kms)
int mdp4_enable_vblank(struct msm_kms *kms, struct drm_crtc *crtc)
{
+ struct mdp4_kms *mdp4_kms = to_mdp4_kms(to_mdp_kms(kms));
+
+ mdp4_enable(mdp4_kms);
mdp_update_vblank_mask(to_mdp_kms(kms),
mdp4_crtc_vblank(crtc), true);
+ mdp4_disable(mdp4_kms);
+
return 0;
}
void mdp4_disable_vblank(struct msm_kms *kms, struct drm_crtc *crtc)
{
+ struct mdp4_kms *mdp4_kms = to_mdp4_kms(to_mdp_kms(kms));
+
+ mdp4_enable(mdp4_kms);
mdp_update_vblank_mask(to_mdp_kms(kms),
mdp4_crtc_vblank(crtc), false);
+ mdp4_disable(mdp4_kms);
}
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c
index 531e4acc2a87..077f7521a971 100644
--- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c
+++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c
@@ -241,22 +241,37 @@ int mdp4_enable(struct mdp4_kms *mdp4_kms)
}
#ifdef CONFIG_OF
-static struct drm_panel *detect_panel(struct drm_device *dev, const char *name)
+static struct drm_panel *detect_panel(struct drm_device *dev)
{
- struct device_node *n;
+ struct device_node *endpoint, *panel_node;
+ struct device_node *np = dev->dev->of_node;
struct drm_panel *panel = NULL;
- n = of_parse_phandle(dev->dev->of_node, name, 0);
- if (n) {
- panel = of_drm_find_panel(n);
- if (!panel)
- panel = ERR_PTR(-EPROBE_DEFER);
+ endpoint = of_graph_get_next_endpoint(np, NULL);
+ if (!endpoint) {
+ dev_err(dev->dev, "no valid endpoint\n");
+ return ERR_PTR(-ENODEV);
+ }
+
+ panel_node = of_graph_get_remote_port_parent(endpoint);
+ if (!panel_node) {
+ dev_err(dev->dev, "no valid panel node\n");
+ of_node_put(endpoint);
+ return ERR_PTR(-ENODEV);
+ }
+
+ of_node_put(endpoint);
+
+ panel = of_drm_find_panel(panel_node);
+ if (!panel) {
+ of_node_put(panel_node);
+ return ERR_PTR(-EPROBE_DEFER);
}
return panel;
}
#else
-static struct drm_panel *detect_panel(struct drm_device *dev, const char *name)
+static struct drm_panel *detect_panel(struct drm_device *dev)
{
// ??? maybe use a module param to specify which panel is attached?
}
@@ -294,7 +309,7 @@ static int modeset_init(struct mdp4_kms *mdp4_kms)
* Setup the LCDC/LVDS path: RGB2 -> DMA_P -> LCDC -> LVDS:
*/
- panel = detect_panel(dev, "qcom,lvds-panel");
+ panel = detect_panel(dev);
if (IS_ERR(panel)) {
ret = PTR_ERR(panel);
dev_err(dev->dev, "failed to detect LVDS panel: %d\n", ret);
@@ -527,6 +542,11 @@ struct msm_kms *mdp4_kms_init(struct drm_device *dev)
goto fail;
}
+ dev->mode_config.min_width = 0;
+ dev->mode_config.min_height = 0;
+ dev->mode_config.max_width = 2048;
+ dev->mode_config.max_height = 2048;
+
return kms;
fail:
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.h b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.h
index c1ecb9d6bdef..8a7f6e1e2bca 100644
--- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.h
+++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.h
@@ -167,7 +167,8 @@ static inline uint32_t mixercfg(uint32_t mixer_cfg, int mixer,
int mdp4_disable(struct mdp4_kms *mdp4_kms);
int mdp4_enable(struct mdp4_kms *mdp4_kms);
-void mdp4_set_irqmask(struct mdp_kms *mdp_kms, uint32_t irqmask);
+void mdp4_set_irqmask(struct mdp_kms *mdp_kms, uint32_t irqmask,
+ uint32_t old_irqmask);
void mdp4_irq_preinstall(struct msm_kms *kms);
int mdp4_irq_postinstall(struct msm_kms *kms);
void mdp4_irq_uninstall(struct msm_kms *kms);
@@ -175,29 +176,24 @@ irqreturn_t mdp4_irq(struct msm_kms *kms);
int mdp4_enable_vblank(struct msm_kms *kms, struct drm_crtc *crtc);
void mdp4_disable_vblank(struct msm_kms *kms, struct drm_crtc *crtc);
-static inline bool pipe_supports_yuv(enum mdp4_pipe pipe)
+static inline uint32_t mdp4_pipe_caps(enum mdp4_pipe pipe)
{
switch (pipe) {
case VG1:
case VG2:
case VG3:
case VG4:
- return true;
+ return MDP_PIPE_CAP_HFLIP | MDP_PIPE_CAP_VFLIP |
+ MDP_PIPE_CAP_SCALE | MDP_PIPE_CAP_CSC;
+ case RGB1:
+ case RGB2:
+ case RGB3:
+ return MDP_PIPE_CAP_SCALE;
default:
- return false;
+ return 0;
}
}
-static inline
-uint32_t mdp4_get_formats(enum mdp4_pipe pipe_id, uint32_t *pixel_formats,
- uint32_t max_formats)
-{
- return mdp_get_formats(pixel_formats, max_formats,
- !pipe_supports_yuv(pipe_id));
-}
-
-void mdp4_plane_install_properties(struct drm_plane *plane,
- struct drm_mode_object *obj);
enum mdp4_pipe mdp4_plane_pipe(struct drm_plane *plane);
struct drm_plane *mdp4_plane_init(struct drm_device *dev,
enum mdp4_pipe pipe_id, bool private_plane);
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_lcdc_encoder.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_lcdc_encoder.c
index c04843376c54..4cd6e721aa0a 100644
--- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_lcdc_encoder.c
+++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_lcdc_encoder.c
@@ -346,8 +346,10 @@ static void mdp4_lcdc_encoder_disable(struct drm_encoder *encoder)
mdp4_write(mdp4_kms, REG_MDP4_LCDC_ENABLE, 0);
- if (panel)
+ if (panel) {
drm_panel_disable(panel);
+ drm_panel_unprepare(panel);
+ }
/*
* Wait for a vsync so we know the ENABLE=0 latched before
@@ -412,8 +414,10 @@ static void mdp4_lcdc_encoder_enable(struct drm_encoder *encoder)
if (ret)
dev_err(dev->dev, "failed to enable lcdc_clk: %d\n", ret);
- if (panel)
+ if (panel) {
+ drm_panel_prepare(panel);
drm_panel_enable(panel);
+ }
setup_phy(encoder);
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c
index 0d1dbb737933..e9dee367b597 100644
--- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c
+++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c
@@ -26,6 +26,7 @@ struct mdp4_plane {
enum mdp4_pipe pipe;
+ uint32_t caps;
uint32_t nformats;
uint32_t formats[32];
@@ -74,7 +75,7 @@ static void mdp4_plane_destroy(struct drm_plane *plane)
}
/* helper to install properties which are common to planes and crtcs */
-void mdp4_plane_install_properties(struct drm_plane *plane,
+static void mdp4_plane_install_properties(struct drm_plane *plane,
struct drm_mode_object *obj)
{
// XXX
@@ -220,13 +221,15 @@ static int mdp4_plane_mode_set(struct drm_plane *plane,
uint32_t op_mode = 0;
uint32_t phasex_step = MDP4_VG_PHASE_STEP_DEFAULT;
uint32_t phasey_step = MDP4_VG_PHASE_STEP_DEFAULT;
- enum mdp4_frame_format frame_type = mdp4_get_frame_format(fb);
+ enum mdp4_frame_format frame_type;
if (!(crtc && fb)) {
DBG("%s: disabled!", mdp4_plane->name);
return 0;
}
+ frame_type = mdp4_get_frame_format(fb);
+
/* src values are in Q16 fixed point, convert to integer: */
src_x = src_x >> 16;
src_y = src_y >> 16;
@@ -380,9 +383,11 @@ struct drm_plane *mdp4_plane_init(struct drm_device *dev,
mdp4_plane->pipe = pipe_id;
mdp4_plane->name = pipe_names[pipe_id];
+ mdp4_plane->caps = mdp4_pipe_caps(pipe_id);
- mdp4_plane->nformats = mdp4_get_formats(pipe_id, mdp4_plane->formats,
- ARRAY_SIZE(mdp4_plane->formats));
+ mdp4_plane->nformats = mdp_get_formats(mdp4_plane->formats,
+ ARRAY_SIZE(mdp4_plane->formats),
+ !pipe_supports_yuv(mdp4_plane->caps));
type = private_plane ? DRM_PLANE_TYPE_PRIMARY : DRM_PLANE_TYPE_OVERLAY;
ret = drm_universal_plane_init(dev, plane, 0xff, &mdp4_plane_funcs,
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5.xml.h b/drivers/gpu/drm/msm/mdp/mdp5/mdp5.xml.h
index 50e17527e2e5..3469f50d5590 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5.xml.h
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5.xml.h
@@ -8,17 +8,17 @@ http://github.com/freedreno/envytools/
git clone https://github.com/freedreno/envytools.git
The rules-ng-ng source files this header was generated from are:
-- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 676 bytes, from 2014-12-05 15:34:49)
-- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20915 bytes, from 2015-03-24 22:05:22)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 2352 bytes, from 2015-04-12 15:02:42)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 35083 bytes, from 2015-04-12 15:04:03)
-- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 22094 bytes, from 2015-05-12 12:45:23)
-- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2013-08-11 19:26:32)
-- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2014-10-31 16:48:57)
-- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2013-07-05 19:21:12)
-- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 29012 bytes, from 2015-05-12 12:45:23)
-- /home/robclark/src/freedreno/envytools/rnndb/edp/edp.xml ( 10416 bytes, from 2015-05-12 12:45:23)
+- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 676 bytes, from 2015-05-20 20:03:14)
+- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2015-05-20 20:03:07)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20915 bytes, from 2015-05-20 20:03:14)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 2576 bytes, from 2015-07-09 22:10:24)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 36021 bytes, from 2015-07-09 22:10:24)
+- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 26057 bytes, from 2015-08-14 21:47:57)
+- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2015-05-20 20:03:07)
+- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2015-05-20 20:03:14)
+- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2015-05-20 20:03:07)
+- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 29154 bytes, from 2015-08-10 21:25:43)
+- /home/robclark/src/freedreno/envytools/rnndb/edp/edp.xml ( 10416 bytes, from 2015-05-20 20:03:14)
Copyright (C) 2013-2015 by the following authors:
- Rob Clark <robdclark@gmail.com> (robclark)
@@ -381,49 +381,49 @@ static inline uint32_t REG_MDP5_CTL_LAYER(uint32_t i0, uint32_t i1) { return 0x0
static inline uint32_t REG_MDP5_CTL_LAYER_REG(uint32_t i0, uint32_t i1) { return 0x00000000 + __offset_CTL(i0) + __offset_LAYER(i1); }
#define MDP5_CTL_LAYER_REG_VIG0__MASK 0x00000007
#define MDP5_CTL_LAYER_REG_VIG0__SHIFT 0
-static inline uint32_t MDP5_CTL_LAYER_REG_VIG0(enum mdp_mixer_stage_id val)
+static inline uint32_t MDP5_CTL_LAYER_REG_VIG0(uint32_t val)
{
return ((val) << MDP5_CTL_LAYER_REG_VIG0__SHIFT) & MDP5_CTL_LAYER_REG_VIG0__MASK;
}
#define MDP5_CTL_LAYER_REG_VIG1__MASK 0x00000038
#define MDP5_CTL_LAYER_REG_VIG1__SHIFT 3
-static inline uint32_t MDP5_CTL_LAYER_REG_VIG1(enum mdp_mixer_stage_id val)
+static inline uint32_t MDP5_CTL_LAYER_REG_VIG1(uint32_t val)
{
return ((val) << MDP5_CTL_LAYER_REG_VIG1__SHIFT) & MDP5_CTL_LAYER_REG_VIG1__MASK;
}
#define MDP5_CTL_LAYER_REG_VIG2__MASK 0x000001c0
#define MDP5_CTL_LAYER_REG_VIG2__SHIFT 6
-static inline uint32_t MDP5_CTL_LAYER_REG_VIG2(enum mdp_mixer_stage_id val)
+static inline uint32_t MDP5_CTL_LAYER_REG_VIG2(uint32_t val)
{
return ((val) << MDP5_CTL_LAYER_REG_VIG2__SHIFT) & MDP5_CTL_LAYER_REG_VIG2__MASK;
}
#define MDP5_CTL_LAYER_REG_RGB0__MASK 0x00000e00
#define MDP5_CTL_LAYER_REG_RGB0__SHIFT 9
-static inline uint32_t MDP5_CTL_LAYER_REG_RGB0(enum mdp_mixer_stage_id val)
+static inline uint32_t MDP5_CTL_LAYER_REG_RGB0(uint32_t val)
{
return ((val) << MDP5_CTL_LAYER_REG_RGB0__SHIFT) & MDP5_CTL_LAYER_REG_RGB0__MASK;
}
#define MDP5_CTL_LAYER_REG_RGB1__MASK 0x00007000
#define MDP5_CTL_LAYER_REG_RGB1__SHIFT 12
-static inline uint32_t MDP5_CTL_LAYER_REG_RGB1(enum mdp_mixer_stage_id val)
+static inline uint32_t MDP5_CTL_LAYER_REG_RGB1(uint32_t val)
{
return ((val) << MDP5_CTL_LAYER_REG_RGB1__SHIFT) & MDP5_CTL_LAYER_REG_RGB1__MASK;
}
#define MDP5_CTL_LAYER_REG_RGB2__MASK 0x00038000
#define MDP5_CTL_LAYER_REG_RGB2__SHIFT 15
-static inline uint32_t MDP5_CTL_LAYER_REG_RGB2(enum mdp_mixer_stage_id val)
+static inline uint32_t MDP5_CTL_LAYER_REG_RGB2(uint32_t val)
{
return ((val) << MDP5_CTL_LAYER_REG_RGB2__SHIFT) & MDP5_CTL_LAYER_REG_RGB2__MASK;
}
#define MDP5_CTL_LAYER_REG_DMA0__MASK 0x001c0000
#define MDP5_CTL_LAYER_REG_DMA0__SHIFT 18
-static inline uint32_t MDP5_CTL_LAYER_REG_DMA0(enum mdp_mixer_stage_id val)
+static inline uint32_t MDP5_CTL_LAYER_REG_DMA0(uint32_t val)
{
return ((val) << MDP5_CTL_LAYER_REG_DMA0__SHIFT) & MDP5_CTL_LAYER_REG_DMA0__MASK;
}
#define MDP5_CTL_LAYER_REG_DMA1__MASK 0x00e00000
#define MDP5_CTL_LAYER_REG_DMA1__SHIFT 21
-static inline uint32_t MDP5_CTL_LAYER_REG_DMA1(enum mdp_mixer_stage_id val)
+static inline uint32_t MDP5_CTL_LAYER_REG_DMA1(uint32_t val)
{
return ((val) << MDP5_CTL_LAYER_REG_DMA1__SHIFT) & MDP5_CTL_LAYER_REG_DMA1__MASK;
}
@@ -431,13 +431,13 @@ static inline uint32_t MDP5_CTL_LAYER_REG_DMA1(enum mdp_mixer_stage_id val)
#define MDP5_CTL_LAYER_REG_CURSOR_OUT 0x02000000
#define MDP5_CTL_LAYER_REG_VIG3__MASK 0x1c000000
#define MDP5_CTL_LAYER_REG_VIG3__SHIFT 26
-static inline uint32_t MDP5_CTL_LAYER_REG_VIG3(enum mdp_mixer_stage_id val)
+static inline uint32_t MDP5_CTL_LAYER_REG_VIG3(uint32_t val)
{
return ((val) << MDP5_CTL_LAYER_REG_VIG3__SHIFT) & MDP5_CTL_LAYER_REG_VIG3__MASK;
}
#define MDP5_CTL_LAYER_REG_RGB3__MASK 0xe0000000
#define MDP5_CTL_LAYER_REG_RGB3__SHIFT 29
-static inline uint32_t MDP5_CTL_LAYER_REG_RGB3(enum mdp_mixer_stage_id val)
+static inline uint32_t MDP5_CTL_LAYER_REG_RGB3(uint32_t val)
{
return ((val) << MDP5_CTL_LAYER_REG_RGB3__SHIFT) & MDP5_CTL_LAYER_REG_RGB3__MASK;
}
@@ -499,6 +499,44 @@ static inline uint32_t REG_MDP5_CTL_START(uint32_t i0) { return 0x0000001c + __o
static inline uint32_t REG_MDP5_CTL_PACK_3D(uint32_t i0) { return 0x00000020 + __offset_CTL(i0); }
+static inline uint32_t __offset_LAYER_EXT(uint32_t idx)
+{
+ switch (idx) {
+ case 0: return 0x00000040;
+ case 1: return 0x00000044;
+ case 2: return 0x00000048;
+ case 3: return 0x0000004c;
+ case 4: return 0x00000050;
+ case 5: return 0x00000054;
+ default: return INVALID_IDX(idx);
+ }
+}
+static inline uint32_t REG_MDP5_CTL_LAYER_EXT(uint32_t i0, uint32_t i1) { return 0x00000000 + __offset_CTL(i0) + __offset_LAYER_EXT(i1); }
+
+static inline uint32_t REG_MDP5_CTL_LAYER_EXT_REG(uint32_t i0, uint32_t i1) { return 0x00000000 + __offset_CTL(i0) + __offset_LAYER_EXT(i1); }
+#define MDP5_CTL_LAYER_EXT_REG_VIG0_BIT3 0x00000001
+#define MDP5_CTL_LAYER_EXT_REG_VIG1_BIT3 0x00000004
+#define MDP5_CTL_LAYER_EXT_REG_VIG2_BIT3 0x00000010
+#define MDP5_CTL_LAYER_EXT_REG_VIG3_BIT3 0x00000040
+#define MDP5_CTL_LAYER_EXT_REG_RGB0_BIT3 0x00000100
+#define MDP5_CTL_LAYER_EXT_REG_RGB1_BIT3 0x00000400
+#define MDP5_CTL_LAYER_EXT_REG_RGB2_BIT3 0x00001000
+#define MDP5_CTL_LAYER_EXT_REG_RGB3_BIT3 0x00004000
+#define MDP5_CTL_LAYER_EXT_REG_DMA0_BIT3 0x00010000
+#define MDP5_CTL_LAYER_EXT_REG_DMA1_BIT3 0x00040000
+#define MDP5_CTL_LAYER_EXT_REG_CURSOR0__MASK 0x00f00000
+#define MDP5_CTL_LAYER_EXT_REG_CURSOR0__SHIFT 20
+static inline uint32_t MDP5_CTL_LAYER_EXT_REG_CURSOR0(enum mdp_mixer_stage_id val)
+{
+ return ((val) << MDP5_CTL_LAYER_EXT_REG_CURSOR0__SHIFT) & MDP5_CTL_LAYER_EXT_REG_CURSOR0__MASK;
+}
+#define MDP5_CTL_LAYER_EXT_REG_CURSOR1__MASK 0x3c000000
+#define MDP5_CTL_LAYER_EXT_REG_CURSOR1__SHIFT 26
+static inline uint32_t MDP5_CTL_LAYER_EXT_REG_CURSOR1(enum mdp_mixer_stage_id val)
+{
+ return ((val) << MDP5_CTL_LAYER_EXT_REG_CURSOR1__SHIFT) & MDP5_CTL_LAYER_EXT_REG_CURSOR1__MASK;
+}
+
static inline uint32_t __offset_PIPE(enum mdp5_pipe idx)
{
switch (idx) {
@@ -803,11 +841,11 @@ static inline uint32_t MDP5_PIPE_SRC_FORMAT_UNPACK_COUNT(uint32_t val)
}
#define MDP5_PIPE_SRC_FORMAT_UNPACK_TIGHT 0x00020000
#define MDP5_PIPE_SRC_FORMAT_UNPACK_ALIGN_MSB 0x00040000
-#define MDP5_PIPE_SRC_FORMAT_NUM_PLANES__MASK 0x00180000
-#define MDP5_PIPE_SRC_FORMAT_NUM_PLANES__SHIFT 19
-static inline uint32_t MDP5_PIPE_SRC_FORMAT_NUM_PLANES(enum mdp_fetch_type val)
+#define MDP5_PIPE_SRC_FORMAT_FETCH_TYPE__MASK 0x00180000
+#define MDP5_PIPE_SRC_FORMAT_FETCH_TYPE__SHIFT 19
+static inline uint32_t MDP5_PIPE_SRC_FORMAT_FETCH_TYPE(enum mdp_fetch_type val)
{
- return ((val) << MDP5_PIPE_SRC_FORMAT_NUM_PLANES__SHIFT) & MDP5_PIPE_SRC_FORMAT_NUM_PLANES__MASK;
+ return ((val) << MDP5_PIPE_SRC_FORMAT_FETCH_TYPE__SHIFT) & MDP5_PIPE_SRC_FORMAT_FETCH_TYPE__MASK;
}
#define MDP5_PIPE_SRC_FORMAT_CHROMA_SAMP__MASK 0x01800000
#define MDP5_PIPE_SRC_FORMAT_CHROMA_SAMP__SHIFT 23
@@ -897,41 +935,41 @@ static inline uint32_t MDP5_PIPE_DECIMATION_HORZ(uint32_t val)
static inline uint32_t REG_MDP5_PIPE_SCALE_CONFIG(enum mdp5_pipe i0) { return 0x00000204 + __offset_PIPE(i0); }
#define MDP5_PIPE_SCALE_CONFIG_SCALEX_EN 0x00000001
#define MDP5_PIPE_SCALE_CONFIG_SCALEY_EN 0x00000002
-#define MDP5_PIPE_SCALE_CONFIG_SCALEX_MIN_FILTER__MASK 0x00000300
-#define MDP5_PIPE_SCALE_CONFIG_SCALEX_MIN_FILTER__SHIFT 8
-static inline uint32_t MDP5_PIPE_SCALE_CONFIG_SCALEX_MIN_FILTER(enum mdp5_scale_filter val)
+#define MDP5_PIPE_SCALE_CONFIG_SCALEX_FILTER_COMP_0__MASK 0x00000300
+#define MDP5_PIPE_SCALE_CONFIG_SCALEX_FILTER_COMP_0__SHIFT 8
+static inline uint32_t MDP5_PIPE_SCALE_CONFIG_SCALEX_FILTER_COMP_0(enum mdp5_scale_filter val)
{
- return ((val) << MDP5_PIPE_SCALE_CONFIG_SCALEX_MIN_FILTER__SHIFT) & MDP5_PIPE_SCALE_CONFIG_SCALEX_MIN_FILTER__MASK;
+ return ((val) << MDP5_PIPE_SCALE_CONFIG_SCALEX_FILTER_COMP_0__SHIFT) & MDP5_PIPE_SCALE_CONFIG_SCALEX_FILTER_COMP_0__MASK;
}
-#define MDP5_PIPE_SCALE_CONFIG_SCALEY_MIN_FILTER__MASK 0x00000c00
-#define MDP5_PIPE_SCALE_CONFIG_SCALEY_MIN_FILTER__SHIFT 10
-static inline uint32_t MDP5_PIPE_SCALE_CONFIG_SCALEY_MIN_FILTER(enum mdp5_scale_filter val)
+#define MDP5_PIPE_SCALE_CONFIG_SCALEY_FILTER_COMP_0__MASK 0x00000c00
+#define MDP5_PIPE_SCALE_CONFIG_SCALEY_FILTER_COMP_0__SHIFT 10
+static inline uint32_t MDP5_PIPE_SCALE_CONFIG_SCALEY_FILTER_COMP_0(enum mdp5_scale_filter val)
{
- return ((val) << MDP5_PIPE_SCALE_CONFIG_SCALEY_MIN_FILTER__SHIFT) & MDP5_PIPE_SCALE_CONFIG_SCALEY_MIN_FILTER__MASK;
+ return ((val) << MDP5_PIPE_SCALE_CONFIG_SCALEY_FILTER_COMP_0__SHIFT) & MDP5_PIPE_SCALE_CONFIG_SCALEY_FILTER_COMP_0__MASK;
}
-#define MDP5_PIPE_SCALE_CONFIG_SCALEX_CR_FILTER__MASK 0x00003000
-#define MDP5_PIPE_SCALE_CONFIG_SCALEX_CR_FILTER__SHIFT 12
-static inline uint32_t MDP5_PIPE_SCALE_CONFIG_SCALEX_CR_FILTER(enum mdp5_scale_filter val)
+#define MDP5_PIPE_SCALE_CONFIG_SCALEX_FILTER_COMP_1_2__MASK 0x00003000
+#define MDP5_PIPE_SCALE_CONFIG_SCALEX_FILTER_COMP_1_2__SHIFT 12
+static inline uint32_t MDP5_PIPE_SCALE_CONFIG_SCALEX_FILTER_COMP_1_2(enum mdp5_scale_filter val)
{
- return ((val) << MDP5_PIPE_SCALE_CONFIG_SCALEX_CR_FILTER__SHIFT) & MDP5_PIPE_SCALE_CONFIG_SCALEX_CR_FILTER__MASK;
+ return ((val) << MDP5_PIPE_SCALE_CONFIG_SCALEX_FILTER_COMP_1_2__SHIFT) & MDP5_PIPE_SCALE_CONFIG_SCALEX_FILTER_COMP_1_2__MASK;
}
-#define MDP5_PIPE_SCALE_CONFIG_SCALEY_CR_FILTER__MASK 0x0000c000
-#define MDP5_PIPE_SCALE_CONFIG_SCALEY_CR_FILTER__SHIFT 14
-static inline uint32_t MDP5_PIPE_SCALE_CONFIG_SCALEY_CR_FILTER(enum mdp5_scale_filter val)
+#define MDP5_PIPE_SCALE_CONFIG_SCALEY_FILTER_COMP_1_2__MASK 0x0000c000
+#define MDP5_PIPE_SCALE_CONFIG_SCALEY_FILTER_COMP_1_2__SHIFT 14
+static inline uint32_t MDP5_PIPE_SCALE_CONFIG_SCALEY_FILTER_COMP_1_2(enum mdp5_scale_filter val)
{
- return ((val) << MDP5_PIPE_SCALE_CONFIG_SCALEY_CR_FILTER__SHIFT) & MDP5_PIPE_SCALE_CONFIG_SCALEY_CR_FILTER__MASK;
+ return ((val) << MDP5_PIPE_SCALE_CONFIG_SCALEY_FILTER_COMP_1_2__SHIFT) & MDP5_PIPE_SCALE_CONFIG_SCALEY_FILTER_COMP_1_2__MASK;
}
-#define MDP5_PIPE_SCALE_CONFIG_SCALEX_MAX_FILTER__MASK 0x00030000
-#define MDP5_PIPE_SCALE_CONFIG_SCALEX_MAX_FILTER__SHIFT 16
-static inline uint32_t MDP5_PIPE_SCALE_CONFIG_SCALEX_MAX_FILTER(enum mdp5_scale_filter val)
+#define MDP5_PIPE_SCALE_CONFIG_SCALEX_FILTER_COMP_3__MASK 0x00030000
+#define MDP5_PIPE_SCALE_CONFIG_SCALEX_FILTER_COMP_3__SHIFT 16
+static inline uint32_t MDP5_PIPE_SCALE_CONFIG_SCALEX_FILTER_COMP_3(enum mdp5_scale_filter val)
{
- return ((val) << MDP5_PIPE_SCALE_CONFIG_SCALEX_MAX_FILTER__SHIFT) & MDP5_PIPE_SCALE_CONFIG_SCALEX_MAX_FILTER__MASK;
+ return ((val) << MDP5_PIPE_SCALE_CONFIG_SCALEX_FILTER_COMP_3__SHIFT) & MDP5_PIPE_SCALE_CONFIG_SCALEX_FILTER_COMP_3__MASK;
}
-#define MDP5_PIPE_SCALE_CONFIG_SCALEY_MAX_FILTER__MASK 0x000c0000
-#define MDP5_PIPE_SCALE_CONFIG_SCALEY_MAX_FILTER__SHIFT 18
-static inline uint32_t MDP5_PIPE_SCALE_CONFIG_SCALEY_MAX_FILTER(enum mdp5_scale_filter val)
+#define MDP5_PIPE_SCALE_CONFIG_SCALEY_FILTER_COMP_3__MASK 0x000c0000
+#define MDP5_PIPE_SCALE_CONFIG_SCALEY_FILTER_COMP_3__SHIFT 18
+static inline uint32_t MDP5_PIPE_SCALE_CONFIG_SCALEY_FILTER_COMP_3(enum mdp5_scale_filter val)
{
- return ((val) << MDP5_PIPE_SCALE_CONFIG_SCALEY_MAX_FILTER__SHIFT) & MDP5_PIPE_SCALE_CONFIG_SCALEY_MAX_FILTER__MASK;
+ return ((val) << MDP5_PIPE_SCALE_CONFIG_SCALEY_FILTER_COMP_3__SHIFT) & MDP5_PIPE_SCALE_CONFIG_SCALEY_FILTER_COMP_3__MASK;
}
static inline uint32_t REG_MDP5_PIPE_SCALE_PHASE_STEP_X(enum mdp5_pipe i0) { return 0x00000210 + __offset_PIPE(i0); }
@@ -984,9 +1022,22 @@ static inline uint32_t REG_MDP5_LM_BORDER_COLOR_0(uint32_t i0) { return 0x000000
static inline uint32_t REG_MDP5_LM_BORDER_COLOR_1(uint32_t i0) { return 0x00000010 + __offset_LM(i0); }
-static inline uint32_t REG_MDP5_LM_BLEND(uint32_t i0, uint32_t i1) { return 0x00000020 + __offset_LM(i0) + 0x30*i1; }
+static inline uint32_t __offset_BLEND(uint32_t idx)
+{
+ switch (idx) {
+ case 0: return 0x00000020;
+ case 1: return 0x00000050;
+ case 2: return 0x00000080;
+ case 3: return 0x000000b0;
+ case 4: return 0x00000230;
+ case 5: return 0x00000260;
+ case 6: return 0x00000290;
+ default: return INVALID_IDX(idx);
+ }
+}
+static inline uint32_t REG_MDP5_LM_BLEND(uint32_t i0, uint32_t i1) { return 0x00000000 + __offset_LM(i0) + __offset_BLEND(i1); }
-static inline uint32_t REG_MDP5_LM_BLEND_OP_MODE(uint32_t i0, uint32_t i1) { return 0x00000020 + __offset_LM(i0) + 0x30*i1; }
+static inline uint32_t REG_MDP5_LM_BLEND_OP_MODE(uint32_t i0, uint32_t i1) { return 0x00000000 + __offset_LM(i0) + __offset_BLEND(i1); }
#define MDP5_LM_BLEND_OP_MODE_FG_ALPHA__MASK 0x00000003
#define MDP5_LM_BLEND_OP_MODE_FG_ALPHA__SHIFT 0
static inline uint32_t MDP5_LM_BLEND_OP_MODE_FG_ALPHA(enum mdp_alpha_type val)
@@ -1008,25 +1059,25 @@ static inline uint32_t MDP5_LM_BLEND_OP_MODE_BG_ALPHA(enum mdp_alpha_type val)
#define MDP5_LM_BLEND_OP_MODE_BG_INV_MOD_ALPHA 0x00001000
#define MDP5_LM_BLEND_OP_MODE_BG_TRANSP_EN 0x00002000
-static inline uint32_t REG_MDP5_LM_BLEND_FG_ALPHA(uint32_t i0, uint32_t i1) { return 0x00000024 + __offset_LM(i0) + 0x30*i1; }
+static inline uint32_t REG_MDP5_LM_BLEND_FG_ALPHA(uint32_t i0, uint32_t i1) { return 0x00000004 + __offset_LM(i0) + __offset_BLEND(i1); }
-static inline uint32_t REG_MDP5_LM_BLEND_BG_ALPHA(uint32_t i0, uint32_t i1) { return 0x00000028 + __offset_LM(i0) + 0x30*i1; }
+static inline uint32_t REG_MDP5_LM_BLEND_BG_ALPHA(uint32_t i0, uint32_t i1) { return 0x00000008 + __offset_LM(i0) + __offset_BLEND(i1); }
-static inline uint32_t REG_MDP5_LM_BLEND_FG_TRANSP_LOW0(uint32_t i0, uint32_t i1) { return 0x0000002c + __offset_LM(i0) + 0x30*i1; }
+static inline uint32_t REG_MDP5_LM_BLEND_FG_TRANSP_LOW0(uint32_t i0, uint32_t i1) { return 0x0000000c + __offset_LM(i0) + __offset_BLEND(i1); }
-static inline uint32_t REG_MDP5_LM_BLEND_FG_TRANSP_LOW1(uint32_t i0, uint32_t i1) { return 0x00000030 + __offset_LM(i0) + 0x30*i1; }
+static inline uint32_t REG_MDP5_LM_BLEND_FG_TRANSP_LOW1(uint32_t i0, uint32_t i1) { return 0x00000010 + __offset_LM(i0) + __offset_BLEND(i1); }
-static inline uint32_t REG_MDP5_LM_BLEND_FG_TRANSP_HIGH0(uint32_t i0, uint32_t i1) { return 0x00000034 + __offset_LM(i0) + 0x30*i1; }
+static inline uint32_t REG_MDP5_LM_BLEND_FG_TRANSP_HIGH0(uint32_t i0, uint32_t i1) { return 0x00000014 + __offset_LM(i0) + __offset_BLEND(i1); }
-static inline uint32_t REG_MDP5_LM_BLEND_FG_TRANSP_HIGH1(uint32_t i0, uint32_t i1) { return 0x00000038 + __offset_LM(i0) + 0x30*i1; }
+static inline uint32_t REG_MDP5_LM_BLEND_FG_TRANSP_HIGH1(uint32_t i0, uint32_t i1) { return 0x00000018 + __offset_LM(i0) + __offset_BLEND(i1); }
-static inline uint32_t REG_MDP5_LM_BLEND_BG_TRANSP_LOW0(uint32_t i0, uint32_t i1) { return 0x0000003c + __offset_LM(i0) + 0x30*i1; }
+static inline uint32_t REG_MDP5_LM_BLEND_BG_TRANSP_LOW0(uint32_t i0, uint32_t i1) { return 0x0000001c + __offset_LM(i0) + __offset_BLEND(i1); }
-static inline uint32_t REG_MDP5_LM_BLEND_BG_TRANSP_LOW1(uint32_t i0, uint32_t i1) { return 0x00000040 + __offset_LM(i0) + 0x30*i1; }
+static inline uint32_t REG_MDP5_LM_BLEND_BG_TRANSP_LOW1(uint32_t i0, uint32_t i1) { return 0x00000020 + __offset_LM(i0) + __offset_BLEND(i1); }
-static inline uint32_t REG_MDP5_LM_BLEND_BG_TRANSP_HIGH0(uint32_t i0, uint32_t i1) { return 0x00000044 + __offset_LM(i0) + 0x30*i1; }
+static inline uint32_t REG_MDP5_LM_BLEND_BG_TRANSP_HIGH0(uint32_t i0, uint32_t i1) { return 0x00000024 + __offset_LM(i0) + __offset_BLEND(i1); }
-static inline uint32_t REG_MDP5_LM_BLEND_BG_TRANSP_HIGH1(uint32_t i0, uint32_t i1) { return 0x00000048 + __offset_LM(i0) + 0x30*i1; }
+static inline uint32_t REG_MDP5_LM_BLEND_BG_TRANSP_HIGH1(uint32_t i0, uint32_t i1) { return 0x00000028 + __offset_LM(i0) + __offset_BLEND(i1); }
static inline uint32_t REG_MDP5_LM_CURSOR_IMG_SIZE(uint32_t i0) { return 0x000000e0 + __offset_LM(i0); }
#define MDP5_LM_CURSOR_IMG_SIZE_SRC_W__MASK 0x0000ffff
@@ -1260,6 +1311,13 @@ static inline uint32_t REG_MDP5_PP_FBC_LOSSY_MODE(uint32_t i0) { return 0x000000
static inline uint32_t __offset_WB(uint32_t idx)
{
switch (idx) {
+#if 0 /* TEMPORARY until patch that adds wb.base[] is merged */
+ case 0: return (mdp5_cfg->wb.base[0]);
+ case 1: return (mdp5_cfg->wb.base[1]);
+ case 2: return (mdp5_cfg->wb.base[2]);
+ case 3: return (mdp5_cfg->wb.base[3]);
+ case 4: return (mdp5_cfg->wb.base[4]);
+#endif
default: return INVALID_IDX(idx);
}
}
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.c
index 8b9a7931b162..a1e26f23c7cc 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.c
@@ -22,7 +22,76 @@ struct mdp5_cfg_handler {
/* mdp5_cfg must be exposed (used in mdp5.xml.h) */
const struct mdp5_cfg_hw *mdp5_cfg = NULL;
-const struct mdp5_cfg_hw msm8x74_config = {
+const struct mdp5_cfg_hw msm8x74v1_config = {
+ .name = "msm8x74v1",
+ .mdp = {
+ .count = 1,
+ .base = { 0x00100 },
+ },
+ .smp = {
+ .mmb_count = 22,
+ .mmb_size = 4096,
+ .clients = {
+ [SSPP_VIG0] = 1, [SSPP_VIG1] = 4, [SSPP_VIG2] = 7,
+ [SSPP_DMA0] = 10, [SSPP_DMA1] = 13,
+ [SSPP_RGB0] = 16, [SSPP_RGB1] = 17, [SSPP_RGB2] = 18,
+ },
+ },
+ .ctl = {
+ .count = 5,
+ .base = { 0x00600, 0x00700, 0x00800, 0x00900, 0x00a00 },
+ .flush_hw_mask = 0x0003ffff,
+ },
+ .pipe_vig = {
+ .count = 3,
+ .base = { 0x01200, 0x01600, 0x01a00 },
+ .caps = MDP_PIPE_CAP_HFLIP |
+ MDP_PIPE_CAP_VFLIP |
+ MDP_PIPE_CAP_SCALE |
+ MDP_PIPE_CAP_CSC |
+ 0,
+ },
+ .pipe_rgb = {
+ .count = 3,
+ .base = { 0x01e00, 0x02200, 0x02600 },
+ .caps = MDP_PIPE_CAP_HFLIP |
+ MDP_PIPE_CAP_VFLIP |
+ MDP_PIPE_CAP_SCALE |
+ 0,
+ },
+ .pipe_dma = {
+ .count = 2,
+ .base = { 0x02a00, 0x02e00 },
+ .caps = MDP_PIPE_CAP_HFLIP |
+ MDP_PIPE_CAP_VFLIP |
+ 0,
+ },
+ .lm = {
+ .count = 5,
+ .base = { 0x03200, 0x03600, 0x03a00, 0x03e00, 0x04200 },
+ .nb_stages = 5,
+ },
+ .dspp = {
+ .count = 3,
+ .base = { 0x04600, 0x04a00, 0x04e00 },
+ },
+ .pp = {
+ .count = 3,
+ .base = { 0x21b00, 0x21c00, 0x21d00 },
+ },
+ .intf = {
+ .base = { 0x21100, 0x21300, 0x21500, 0x21700 },
+ .connect = {
+ [0] = INTF_eDP,
+ [1] = INTF_DSI,
+ [2] = INTF_DSI,
+ [3] = INTF_HDMI,
+ },
+ },
+ .max_clk = 200000000,
+};
+
+const struct mdp5_cfg_hw msm8x74v2_config = {
.name = "msm8x74",
.mdp = {
.count = 1,
@@ -45,19 +114,27 @@ const struct mdp5_cfg_hw msm8x74_config = {
.pipe_vig = {
.count = 3,
.base = { 0x01200, 0x01600, 0x01a00 },
+ .caps = MDP_PIPE_CAP_HFLIP | MDP_PIPE_CAP_VFLIP |
+ MDP_PIPE_CAP_SCALE | MDP_PIPE_CAP_CSC |
+ MDP_PIPE_CAP_DECIMATION,
},
.pipe_rgb = {
.count = 3,
.base = { 0x01e00, 0x02200, 0x02600 },
+ .caps = MDP_PIPE_CAP_HFLIP | MDP_PIPE_CAP_VFLIP |
+ MDP_PIPE_CAP_SCALE | MDP_PIPE_CAP_DECIMATION,
},
.pipe_dma = {
.count = 2,
.base = { 0x02a00, 0x02e00 },
+ .caps = MDP_PIPE_CAP_HFLIP | MDP_PIPE_CAP_VFLIP,
},
.lm = {
.count = 5,
.base = { 0x03200, 0x03600, 0x03a00, 0x03e00, 0x04200 },
.nb_stages = 5,
+ .max_width = 2048,
+ .max_height = 0xFFFF,
},
.dspp = {
.count = 3,
@@ -65,7 +142,7 @@ const struct mdp5_cfg_hw msm8x74_config = {
},
.ad = {
.count = 2,
- .base = { 0x13100, 0x13300 }, /* NOTE: no ad in v1.0 */
+ .base = { 0x13100, 0x13300 },
},
.pp = {
.count = 3,
@@ -113,19 +190,27 @@ const struct mdp5_cfg_hw apq8084_config = {
.pipe_vig = {
.count = 4,
.base = { 0x01200, 0x01600, 0x01a00, 0x01e00 },
+ .caps = MDP_PIPE_CAP_HFLIP | MDP_PIPE_CAP_VFLIP |
+ MDP_PIPE_CAP_SCALE | MDP_PIPE_CAP_CSC |
+ MDP_PIPE_CAP_DECIMATION,
},
.pipe_rgb = {
.count = 4,
.base = { 0x02200, 0x02600, 0x02a00, 0x02e00 },
+ .caps = MDP_PIPE_CAP_HFLIP | MDP_PIPE_CAP_VFLIP |
+ MDP_PIPE_CAP_SCALE | MDP_PIPE_CAP_DECIMATION,
},
.pipe_dma = {
.count = 2,
.base = { 0x03200, 0x03600 },
+ .caps = MDP_PIPE_CAP_HFLIP | MDP_PIPE_CAP_VFLIP,
},
.lm = {
.count = 6,
.base = { 0x03a00, 0x03e00, 0x04200, 0x04600, 0x04a00, 0x04e00 },
.nb_stages = 5,
+ .max_width = 2048,
+ .max_height = 0xFFFF,
},
.dspp = {
.count = 4,
@@ -174,19 +259,27 @@ const struct mdp5_cfg_hw msm8x16_config = {
.pipe_vig = {
.count = 1,
.base = { 0x05000 },
+ .caps = MDP_PIPE_CAP_HFLIP | MDP_PIPE_CAP_VFLIP |
+ MDP_PIPE_CAP_SCALE | MDP_PIPE_CAP_CSC |
+ MDP_PIPE_CAP_DECIMATION,
},
.pipe_rgb = {
.count = 2,
.base = { 0x15000, 0x17000 },
+ .caps = MDP_PIPE_CAP_HFLIP | MDP_PIPE_CAP_VFLIP |
+ MDP_PIPE_CAP_SCALE | MDP_PIPE_CAP_DECIMATION,
},
.pipe_dma = {
.count = 1,
.base = { 0x25000 },
+ .caps = MDP_PIPE_CAP_HFLIP | MDP_PIPE_CAP_VFLIP,
},
.lm = {
.count = 2, /* LM0 and LM3 */
.base = { 0x45000, 0x48000 },
.nb_stages = 5,
+ .max_width = 2048,
+ .max_height = 0xFFFF,
},
.dspp = {
.count = 1,
@@ -203,14 +296,91 @@ const struct mdp5_cfg_hw msm8x16_config = {
.max_clk = 320000000,
};
+const struct mdp5_cfg_hw msm8x94_config = {
+ .name = "msm8x94",
+ .mdp = {
+ .count = 1,
+ .base = { 0x01000 },
+ },
+ .smp = {
+ .mmb_count = 44,
+ .mmb_size = 8192,
+ .clients = {
+ [SSPP_VIG0] = 1, [SSPP_VIG1] = 4,
+ [SSPP_VIG2] = 7, [SSPP_VIG3] = 19,
+ [SSPP_DMA0] = 10, [SSPP_DMA1] = 13,
+ [SSPP_RGB0] = 16, [SSPP_RGB1] = 17,
+ [SSPP_RGB2] = 18, [SSPP_RGB3] = 22,
+ },
+ .reserved_state[0] = GENMASK(23, 0), /* first 24 MMBs */
+ .reserved = {
+ [1] = 1, [4] = 1, [7] = 1, [19] = 1,
+ [16] = 5, [17] = 5, [18] = 5, [22] = 5,
+ },
+ },
+ .ctl = {
+ .count = 5,
+ .base = { 0x02000, 0x02200, 0x02400, 0x02600, 0x02800 },
+ .flush_hw_mask = 0xf0ffffff,
+ },
+ .pipe_vig = {
+ .count = 4,
+ .base = { 0x05000, 0x07000, 0x09000, 0x0b000 },
+ .caps = MDP_PIPE_CAP_HFLIP | MDP_PIPE_CAP_VFLIP |
+ MDP_PIPE_CAP_SCALE | MDP_PIPE_CAP_CSC |
+ MDP_PIPE_CAP_DECIMATION,
+ },
+ .pipe_rgb = {
+ .count = 4,
+ .base = { 0x15000, 0x17000, 0x19000, 0x1b000 },
+ .caps = MDP_PIPE_CAP_HFLIP | MDP_PIPE_CAP_VFLIP |
+ MDP_PIPE_CAP_SCALE | MDP_PIPE_CAP_DECIMATION,
+ },
+ .pipe_dma = {
+ .count = 2,
+ .base = { 0x25000, 0x27000 },
+ .caps = MDP_PIPE_CAP_HFLIP | MDP_PIPE_CAP_VFLIP,
+ },
+ .lm = {
+ .count = 6,
+ .base = { 0x45000, 0x46000, 0x47000, 0x48000, 0x49000, 0x4a000 },
+ .nb_stages = 8,
+ .max_width = 2048,
+ .max_height = 0xFFFF,
+ },
+ .dspp = {
+ .count = 4,
+ .base = { 0x55000, 0x57000, 0x59000, 0x5b000 },
+
+ },
+ .ad = {
+ .count = 3,
+ .base = { 0x79000, 0x79800, 0x7a000 },
+ },
+ .pp = {
+ .count = 4,
+ .base = { 0x71000, 0x71800, 0x72000, 0x72800 },
+ },
+ .intf = {
+ .base = { 0x6b000, 0x6b800, 0x6c000, 0x6c800, 0x6d000 },
+ .connect = {
+ [0] = INTF_DISABLED,
+ [1] = INTF_DSI,
+ [2] = INTF_DSI,
+ [3] = INTF_HDMI,
+ },
+ },
+ .max_clk = 320000000,
+};
+
static const struct mdp5_cfg_handler cfg_handlers[] = {
- { .revision = 0, .config = { .hw = &msm8x74_config } },
- { .revision = 2, .config = { .hw = &msm8x74_config } },
+ { .revision = 0, .config = { .hw = &msm8x74v1_config } },
+ { .revision = 2, .config = { .hw = &msm8x74v2_config } },
{ .revision = 3, .config = { .hw = &apq8084_config } },
{ .revision = 6, .config = { .hw = &msm8x16_config } },
+ { .revision = 9, .config = { .hw = &msm8x94_config } },
};
-
static struct mdp5_cfg_platform *mdp5_get_config(struct platform_device *dev);
const struct mdp5_cfg_hw *mdp5_cfg_get_hw_config(struct mdp5_cfg_handler *cfg_handler)
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.h b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.h
index 69349abe59f2..efb918d9f68b 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.h
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.h
@@ -42,6 +42,13 @@ struct mdp5_sub_block {
struct mdp5_lm_block {
MDP5_SUB_BLOCK_DEFINITION;
uint32_t nb_stages; /* number of stages per blender */
+ uint32_t max_width; /* Maximum output resolution */
+ uint32_t max_height;
+};
+
+struct mdp5_pipe_block {
+ MDP5_SUB_BLOCK_DEFINITION;
+ uint32_t caps; /* pipe capabilities */
};
struct mdp5_ctl_block {
@@ -70,9 +77,9 @@ struct mdp5_cfg_hw {
struct mdp5_sub_block mdp;
struct mdp5_smp_block smp;
struct mdp5_ctl_block ctl;
- struct mdp5_sub_block pipe_vig;
- struct mdp5_sub_block pipe_rgb;
- struct mdp5_sub_block pipe_dma;
+ struct mdp5_pipe_block pipe_vig;
+ struct mdp5_pipe_block pipe_rgb;
+ struct mdp5_pipe_block pipe_dma;
struct mdp5_lm_block lm;
struct mdp5_sub_block dspp;
struct mdp5_sub_block ad;
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cmd_encoder.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cmd_encoder.c
index ee31b16fe7ea..8e6c9b598a57 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cmd_encoder.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cmd_encoder.c
@@ -21,6 +21,8 @@ struct mdp5_cmd_encoder {
struct mdp5_interface intf;
bool enabled;
uint32_t bsc;
+
+ struct mdp5_ctl *ctl;
};
#define to_mdp5_cmd_encoder(x) container_of(x, struct mdp5_cmd_encoder, base)
@@ -210,13 +212,14 @@ static void mdp5_cmd_encoder_mode_set(struct drm_encoder *encoder,
mode->vsync_end, mode->vtotal,
mode->type, mode->flags);
pingpong_tearcheck_setup(encoder, mode);
- mdp5_crtc_set_intf(encoder->crtc, &mdp5_cmd_enc->intf);
+ mdp5_crtc_set_pipeline(encoder->crtc, &mdp5_cmd_enc->intf,
+ mdp5_cmd_enc->ctl);
}
static void mdp5_cmd_encoder_disable(struct drm_encoder *encoder)
{
struct mdp5_cmd_encoder *mdp5_cmd_enc = to_mdp5_cmd_encoder(encoder);
- struct mdp5_ctl *ctl = mdp5_crtc_get_ctl(encoder->crtc);
+ struct mdp5_ctl *ctl = mdp5_cmd_enc->ctl;
struct mdp5_interface *intf = &mdp5_cmd_enc->intf;
if (WARN_ON(!mdp5_cmd_enc->enabled))
@@ -235,7 +238,7 @@ static void mdp5_cmd_encoder_disable(struct drm_encoder *encoder)
static void mdp5_cmd_encoder_enable(struct drm_encoder *encoder)
{
struct mdp5_cmd_encoder *mdp5_cmd_enc = to_mdp5_cmd_encoder(encoder);
- struct mdp5_ctl *ctl = mdp5_crtc_get_ctl(encoder->crtc);
+ struct mdp5_ctl *ctl = mdp5_cmd_enc->ctl;
struct mdp5_interface *intf = &mdp5_cmd_enc->intf;
if (WARN_ON(mdp5_cmd_enc->enabled))
@@ -300,7 +303,7 @@ int mdp5_cmd_encoder_set_split_display(struct drm_encoder *encoder,
/* initialize command mode encoder */
struct drm_encoder *mdp5_cmd_encoder_init(struct drm_device *dev,
- struct mdp5_interface *intf)
+ struct mdp5_interface *intf, struct mdp5_ctl *ctl)
{
struct drm_encoder *encoder = NULL;
struct mdp5_cmd_encoder *mdp5_cmd_enc;
@@ -320,6 +323,7 @@ struct drm_encoder *mdp5_cmd_encoder_init(struct drm_device *dev,
memcpy(&mdp5_cmd_enc->intf, intf, sizeof(mdp5_cmd_enc->intf));
encoder = &mdp5_cmd_enc->base;
+ mdp5_cmd_enc->ctl = ctl;
drm_encoder_init(dev, encoder, &mdp5_cmd_encoder_funcs,
DRM_MODE_ENCODER_DSI);
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c
index dea3d2e559b1..7f9f4ac88029 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c
@@ -160,8 +160,7 @@ static void complete_flip(struct drm_crtc *crtc, struct drm_file *file)
if (mdp5_crtc->ctl && !crtc->state->enable) {
/* set STAGE_UNUSED for all layers */
- mdp5_ctl_blend(mdp5_crtc->ctl, mdp5_crtc->lm, 0x00000000);
- mdp5_ctl_release(mdp5_crtc->ctl);
+ mdp5_ctl_blend(mdp5_crtc->ctl, NULL, 0, 0);
mdp5_crtc->ctl = NULL;
}
}
@@ -196,13 +195,9 @@ static bool mdp5_crtc_mode_fixup(struct drm_crtc *crtc,
/*
* blend_setup() - blend all the planes of a CRTC
*
- * When border is enabled, the border color will ALWAYS be the base layer.
- * Therefore, the first plane (private RGB pipe) will start at STAGE0.
- * If disabled, the first plane starts at STAGE_BASE.
- *
- * Note:
- * Border is not enabled here because the private plane is exactly
- * the CRTC resolution.
+ * If no base layer is available, border will be enabled as the base layer.
+ * Otherwise all layers will be blended based on their stage calculated
+ * in mdp5_crtc_atomic_check.
*/
static void blend_setup(struct drm_crtc *crtc)
{
@@ -210,9 +205,14 @@ static void blend_setup(struct drm_crtc *crtc)
struct mdp5_kms *mdp5_kms = get_kms(crtc);
struct drm_plane *plane;
const struct mdp5_cfg_hw *hw_cfg;
- uint32_t lm = mdp5_crtc->lm, blend_cfg = 0;
+ struct mdp5_plane_state *pstate, *pstates[STAGE_MAX + 1] = {NULL};
+ const struct mdp_format *format;
+ uint32_t lm = mdp5_crtc->lm;
+ uint32_t blend_op, fg_alpha, bg_alpha, ctl_blend_flags = 0;
unsigned long flags;
-#define blender(stage) ((stage) - STAGE_BASE)
+ uint8_t stage[STAGE_MAX + 1];
+ int i, plane_cnt = 0;
+#define blender(stage) ((stage) - STAGE0)
hw_cfg = mdp5_cfg_get_hw_config(mdp5_kms->cfg);
@@ -222,33 +222,73 @@ static void blend_setup(struct drm_crtc *crtc)
if (!mdp5_crtc->ctl)
goto out;
+ /* Collect all plane information */
drm_atomic_crtc_for_each_plane(plane, crtc) {
- enum mdp_mixer_stage_id stage =
- to_mdp5_plane_state(plane->state)->stage;
+ pstate = to_mdp5_plane_state(plane->state);
+ pstates[pstate->stage] = pstate;
+ stage[pstate->stage] = mdp5_plane_pipe(plane);
+ plane_cnt++;
+ }
- /*
- * Note: This cannot happen with current implementation but
- * we need to check this condition once z property is added
- */
- BUG_ON(stage > hw_cfg->lm.nb_stages);
+ /*
+ * If there is no base layer, enable border color.
+ * Although it's not possbile in current blend logic,
+ * put it here as a reminder.
+ */
+ if (!pstates[STAGE_BASE] && plane_cnt) {
+ ctl_blend_flags |= MDP5_CTL_BLEND_OP_FLAG_BORDER_OUT;
+ DBG("Border Color is enabled");
+ }
- /* LM */
- mdp5_write(mdp5_kms,
- REG_MDP5_LM_BLEND_OP_MODE(lm, blender(stage)),
- MDP5_LM_BLEND_OP_MODE_FG_ALPHA(FG_CONST) |
- MDP5_LM_BLEND_OP_MODE_BG_ALPHA(BG_CONST));
+ /* The reset for blending */
+ for (i = STAGE0; i <= STAGE_MAX; i++) {
+ if (!pstates[i])
+ continue;
+
+ format = to_mdp_format(
+ msm_framebuffer_format(pstates[i]->base.fb));
+ plane = pstates[i]->base.plane;
+ blend_op = MDP5_LM_BLEND_OP_MODE_FG_ALPHA(FG_CONST) |
+ MDP5_LM_BLEND_OP_MODE_BG_ALPHA(BG_CONST);
+ fg_alpha = pstates[i]->alpha;
+ bg_alpha = 0xFF - pstates[i]->alpha;
+ DBG("Stage %d fg_alpha %x bg_alpha %x", i, fg_alpha, bg_alpha);
+
+ if (format->alpha_enable && pstates[i]->premultiplied) {
+ blend_op = MDP5_LM_BLEND_OP_MODE_FG_ALPHA(FG_CONST) |
+ MDP5_LM_BLEND_OP_MODE_BG_ALPHA(FG_PIXEL);
+ if (fg_alpha != 0xff) {
+ bg_alpha = fg_alpha;
+ blend_op |=
+ MDP5_LM_BLEND_OP_MODE_BG_MOD_ALPHA |
+ MDP5_LM_BLEND_OP_MODE_BG_INV_MOD_ALPHA;
+ } else {
+ blend_op |= MDP5_LM_BLEND_OP_MODE_BG_INV_ALPHA;
+ }
+ } else if (format->alpha_enable) {
+ blend_op = MDP5_LM_BLEND_OP_MODE_FG_ALPHA(FG_PIXEL) |
+ MDP5_LM_BLEND_OP_MODE_BG_ALPHA(FG_PIXEL);
+ if (fg_alpha != 0xff) {
+ bg_alpha = fg_alpha;
+ blend_op |=
+ MDP5_LM_BLEND_OP_MODE_FG_MOD_ALPHA |
+ MDP5_LM_BLEND_OP_MODE_FG_INV_MOD_ALPHA |
+ MDP5_LM_BLEND_OP_MODE_BG_MOD_ALPHA |
+ MDP5_LM_BLEND_OP_MODE_BG_INV_MOD_ALPHA;
+ } else {
+ blend_op |= MDP5_LM_BLEND_OP_MODE_BG_INV_ALPHA;
+ }
+ }
+
+ mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_OP_MODE(lm,
+ blender(i)), blend_op);
mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_FG_ALPHA(lm,
- blender(stage)), 0xff);
+ blender(i)), fg_alpha);
mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_BG_ALPHA(lm,
- blender(stage)), 0x00);
- /* CTL */
- blend_cfg |= mdp_ctl_blend_mask(mdp5_plane_pipe(plane), stage);
- DBG("%s: blending pipe %s on stage=%d", mdp5_crtc->name,
- pipe2name(mdp5_plane_pipe(plane)), stage);
+ blender(i)), bg_alpha);
}
- DBG("%s: lm%d: blend config = 0x%08x", mdp5_crtc->name, lm, blend_cfg);
- mdp5_ctl_blend(mdp5_crtc->ctl, lm, blend_cfg);
+ mdp5_ctl_blend(mdp5_crtc->ctl, stage, plane_cnt, ctl_blend_flags);
out:
spin_unlock_irqrestore(&mdp5_crtc->lm_lock, flags);
@@ -339,25 +379,19 @@ static int mdp5_crtc_atomic_check(struct drm_crtc *crtc,
struct mdp5_kms *mdp5_kms = get_kms(crtc);
struct drm_plane *plane;
struct drm_device *dev = crtc->dev;
- struct plane_state pstates[STAGE3 + 1];
+ struct plane_state pstates[STAGE_MAX + 1];
+ const struct mdp5_cfg_hw *hw_cfg;
int cnt = 0, i;
DBG("%s: check", mdp5_crtc->name);
- /* request a free CTL, if none is already allocated for this CRTC */
- if (state->enable && !mdp5_crtc->ctl) {
- mdp5_crtc->ctl = mdp5_ctlm_request(mdp5_kms->ctlm, crtc);
- if (WARN_ON(!mdp5_crtc->ctl))
- return -EINVAL;
- }
-
/* verify that there are not too many planes attached to crtc
* and that we don't have conflicting mixer stages:
*/
+ hw_cfg = mdp5_cfg_get_hw_config(mdp5_kms->cfg);
drm_atomic_crtc_state_for_each_plane(plane, state) {
struct drm_plane_state *pstate;
-
- if (cnt >= ARRAY_SIZE(pstates)) {
+ if (cnt >= (hw_cfg->lm.nb_stages)) {
dev_err(dev->dev, "too many planes!\n");
return -EINVAL;
}
@@ -369,13 +403,13 @@ static int mdp5_crtc_atomic_check(struct drm_crtc *crtc,
*/
if (!pstate)
pstate = plane->state;
-
pstates[cnt].plane = plane;
pstates[cnt].state = to_mdp5_plane_state(pstate);
cnt++;
}
+ /* assign a stage based on sorted zpos property */
sort(pstates, cnt, sizeof(pstates[0]), pstate_cmp, NULL);
for (i = 0; i < cnt; i++) {
@@ -388,13 +422,15 @@ static int mdp5_crtc_atomic_check(struct drm_crtc *crtc,
return 0;
}
-static void mdp5_crtc_atomic_begin(struct drm_crtc *crtc)
+static void mdp5_crtc_atomic_begin(struct drm_crtc *crtc,
+ struct drm_crtc_state *old_crtc_state)
{
struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
DBG("%s: begin", mdp5_crtc->name);
}
-static void mdp5_crtc_atomic_flush(struct drm_crtc *crtc)
+static void mdp5_crtc_atomic_flush(struct drm_crtc *crtc,
+ struct drm_crtc_state *old_crtc_state)
{
struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
struct drm_device *dev = crtc->dev;
@@ -691,8 +727,8 @@ void mdp5_crtc_cancel_pending_flip(struct drm_crtc *crtc, struct drm_file *file)
complete_flip(crtc, file);
}
-/* set interface for routing crtc->encoder: */
-void mdp5_crtc_set_intf(struct drm_crtc *crtc, struct mdp5_interface *intf)
+void mdp5_crtc_set_pipeline(struct drm_crtc *crtc,
+ struct mdp5_interface *intf, struct mdp5_ctl *ctl)
{
struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
struct mdp5_kms *mdp5_kms = get_kms(crtc);
@@ -715,7 +751,8 @@ void mdp5_crtc_set_intf(struct drm_crtc *crtc, struct mdp5_interface *intf)
mdp_irq_update(&mdp5_kms->base);
- mdp5_ctl_set_intf(mdp5_crtc->ctl, intf);
+ mdp5_crtc->ctl = ctl;
+ mdp5_ctl_set_pipeline(ctl, intf, lm);
}
int mdp5_crtc_get_lm(struct drm_crtc *crtc)
@@ -724,12 +761,6 @@ int mdp5_crtc_get_lm(struct drm_crtc *crtc)
return WARN_ON(!crtc) ? -EINVAL : mdp5_crtc->lm;
}
-struct mdp5_ctl *mdp5_crtc_get_ctl(struct drm_crtc *crtc)
-{
- struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
- return WARN_ON(!crtc) ? NULL : mdp5_crtc->ctl;
-}
-
void mdp5_crtc_wait_for_commit_done(struct drm_crtc *crtc)
{
struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
@@ -774,7 +805,5 @@ struct drm_crtc *mdp5_crtc_init(struct drm_device *dev,
drm_crtc_helper_add(crtc, &mdp5_crtc_helper_funcs);
plane->crtc = crtc;
- mdp5_plane_install_properties(plane, &crtc->base);
-
return crtc;
}
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.c
index f2530f224a76..4e81ca4f964a 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.c
@@ -17,7 +17,7 @@
/*
* CTL - MDP Control Pool Manager
*
- * Controls are shared between all CRTCs.
+ * Controls are shared between all display interfaces.
*
* They are intended to be used for data path configuration.
* The top level register programming describes the complete data path for
@@ -27,12 +27,11 @@
*
* In certain use cases (high-resolution dual pipe), one single CTL can be
* shared across multiple CRTCs.
- *
- * Because the number of CTLs can be less than the number of CRTCs,
- * CTLs are dynamically allocated from a pool of CTLs, only once a CRTC is
- * requested by the client (in mdp5_crtc_mode_set()).
*/
+#define CTL_STAT_BUSY 0x1
+#define CTL_STAT_BOOKED 0x2
+
struct op_mode {
struct mdp5_interface intf;
@@ -46,8 +45,8 @@ struct mdp5_ctl {
u32 id;
int lm;
- /* whether this CTL has been allocated or not: */
- bool busy;
+ /* CTL status bitmask */
+ u32 status;
/* Operation Mode Configuration for the Pipeline */
struct op_mode pipeline;
@@ -61,7 +60,10 @@ struct mdp5_ctl {
bool cursor_on;
- struct drm_crtc *crtc;
+ /* True if the current CTL has FLUSH bits pending for single FLUSH. */
+ bool flush_pending;
+
+ struct mdp5_ctl *pair; /* Paired CTL to be flushed together */
};
struct mdp5_ctl_manager {
@@ -74,6 +76,10 @@ struct mdp5_ctl_manager {
/* to filter out non-present bits in the current hardware config */
u32 flush_hw_mask;
+ /* status for single FLUSH */
+ bool single_flush_supported;
+ u32 single_flush_pending_mask;
+
/* pool of CTLs + lock to protect resource allocation (ctls[i].busy) */
spinlock_t pool_lock;
struct mdp5_ctl ctls[MAX_CTL];
@@ -168,11 +174,21 @@ static void set_ctl_op(struct mdp5_ctl *ctl, struct mdp5_interface *intf)
spin_unlock_irqrestore(&ctl->hw_lock, flags);
}
-int mdp5_ctl_set_intf(struct mdp5_ctl *ctl, struct mdp5_interface *intf)
+int mdp5_ctl_set_pipeline(struct mdp5_ctl *ctl,
+ struct mdp5_interface *intf, int lm)
{
struct mdp5_ctl_manager *ctl_mgr = ctl->ctlm;
struct mdp5_kms *mdp5_kms = get_kms(ctl_mgr);
+ if (unlikely(WARN_ON(intf->num != ctl->pipeline.intf.num))) {
+ dev_err(mdp5_kms->dev->dev,
+ "CTL %d is allocated by INTF %d, but used by INTF %d\n",
+ ctl->id, ctl->pipeline.intf.num, intf->num);
+ return -EINVAL;
+ }
+
+ ctl->lm = lm;
+
memcpy(&ctl->pipeline.intf, intf, sizeof(*intf));
ctl->pipeline.start_mask = mdp_ctl_flush_mask_lm(ctl->lm) |
@@ -287,29 +303,85 @@ int mdp5_ctl_set_cursor(struct mdp5_ctl *ctl, int cursor_id, bool enable)
blend_cfg &= ~MDP5_CTL_LAYER_REG_CURSOR_OUT;
ctl_write(ctl, REG_MDP5_CTL_LAYER_REG(ctl->id, lm), blend_cfg);
+ ctl->cursor_on = enable;
spin_unlock_irqrestore(&ctl->hw_lock, flags);
ctl->pending_ctl_trigger = mdp_ctl_flush_mask_cursor(cursor_id);
- ctl->cursor_on = enable;
return 0;
}
-int mdp5_ctl_blend(struct mdp5_ctl *ctl, u32 lm, u32 blend_cfg)
+static u32 mdp_ctl_blend_mask(enum mdp5_pipe pipe,
+ enum mdp_mixer_stage_id stage)
+{
+ switch (pipe) {
+ case SSPP_VIG0: return MDP5_CTL_LAYER_REG_VIG0(stage);
+ case SSPP_VIG1: return MDP5_CTL_LAYER_REG_VIG1(stage);
+ case SSPP_VIG2: return MDP5_CTL_LAYER_REG_VIG2(stage);
+ case SSPP_RGB0: return MDP5_CTL_LAYER_REG_RGB0(stage);
+ case SSPP_RGB1: return MDP5_CTL_LAYER_REG_RGB1(stage);
+ case SSPP_RGB2: return MDP5_CTL_LAYER_REG_RGB2(stage);
+ case SSPP_DMA0: return MDP5_CTL_LAYER_REG_DMA0(stage);
+ case SSPP_DMA1: return MDP5_CTL_LAYER_REG_DMA1(stage);
+ case SSPP_VIG3: return MDP5_CTL_LAYER_REG_VIG3(stage);
+ case SSPP_RGB3: return MDP5_CTL_LAYER_REG_RGB3(stage);
+ default: return 0;
+ }
+}
+
+static u32 mdp_ctl_blend_ext_mask(enum mdp5_pipe pipe,
+ enum mdp_mixer_stage_id stage)
+{
+ if (stage < STAGE6)
+ return 0;
+
+ switch (pipe) {
+ case SSPP_VIG0: return MDP5_CTL_LAYER_EXT_REG_VIG0_BIT3;
+ case SSPP_VIG1: return MDP5_CTL_LAYER_EXT_REG_VIG1_BIT3;
+ case SSPP_VIG2: return MDP5_CTL_LAYER_EXT_REG_VIG2_BIT3;
+ case SSPP_RGB0: return MDP5_CTL_LAYER_EXT_REG_RGB0_BIT3;
+ case SSPP_RGB1: return MDP5_CTL_LAYER_EXT_REG_RGB1_BIT3;
+ case SSPP_RGB2: return MDP5_CTL_LAYER_EXT_REG_RGB2_BIT3;
+ case SSPP_DMA0: return MDP5_CTL_LAYER_EXT_REG_DMA0_BIT3;
+ case SSPP_DMA1: return MDP5_CTL_LAYER_EXT_REG_DMA1_BIT3;
+ case SSPP_VIG3: return MDP5_CTL_LAYER_EXT_REG_VIG3_BIT3;
+ case SSPP_RGB3: return MDP5_CTL_LAYER_EXT_REG_RGB3_BIT3;
+ default: return 0;
+ }
+}
+
+int mdp5_ctl_blend(struct mdp5_ctl *ctl, u8 *stage, u32 stage_cnt,
+ u32 ctl_blend_op_flags)
{
unsigned long flags;
+ u32 blend_cfg = 0, blend_ext_cfg = 0;
+ int i, start_stage;
+
+ if (ctl_blend_op_flags & MDP5_CTL_BLEND_OP_FLAG_BORDER_OUT) {
+ start_stage = STAGE0;
+ blend_cfg |= MDP5_CTL_LAYER_REG_BORDER_COLOR;
+ } else {
+ start_stage = STAGE_BASE;
+ }
+ for (i = start_stage; i < start_stage + stage_cnt; i++) {
+ blend_cfg |= mdp_ctl_blend_mask(stage[i], i);
+ blend_ext_cfg |= mdp_ctl_blend_ext_mask(stage[i], i);
+ }
+
+ spin_lock_irqsave(&ctl->hw_lock, flags);
if (ctl->cursor_on)
blend_cfg |= MDP5_CTL_LAYER_REG_CURSOR_OUT;
- else
- blend_cfg &= ~MDP5_CTL_LAYER_REG_CURSOR_OUT;
- spin_lock_irqsave(&ctl->hw_lock, flags);
- ctl_write(ctl, REG_MDP5_CTL_LAYER_REG(ctl->id, lm), blend_cfg);
+ ctl_write(ctl, REG_MDP5_CTL_LAYER_REG(ctl->id, ctl->lm), blend_cfg);
+ ctl_write(ctl, REG_MDP5_CTL_LAYER_EXT_REG(ctl->id, ctl->lm), blend_ext_cfg);
spin_unlock_irqrestore(&ctl->hw_lock, flags);
- ctl->pending_ctl_trigger = mdp_ctl_flush_mask_lm(lm);
+ ctl->pending_ctl_trigger = mdp_ctl_flush_mask_lm(ctl->lm);
+
+ DBG("lm%d: blend config = 0x%08x. ext_cfg = 0x%08x", ctl->lm,
+ blend_cfg, blend_ext_cfg);
return 0;
}
@@ -379,6 +451,31 @@ static u32 fix_sw_flush(struct mdp5_ctl *ctl, u32 flush_mask)
return sw_mask;
}
+static void fix_for_single_flush(struct mdp5_ctl *ctl, u32 *flush_mask,
+ u32 *flush_id)
+{
+ struct mdp5_ctl_manager *ctl_mgr = ctl->ctlm;
+
+ if (ctl->pair) {
+ DBG("CTL %d FLUSH pending mask %x", ctl->id, *flush_mask);
+ ctl->flush_pending = true;
+ ctl_mgr->single_flush_pending_mask |= (*flush_mask);
+ *flush_mask = 0;
+
+ if (ctl->pair->flush_pending) {
+ *flush_id = min_t(u32, ctl->id, ctl->pair->id);
+ *flush_mask = ctl_mgr->single_flush_pending_mask;
+
+ ctl->flush_pending = false;
+ ctl->pair->flush_pending = false;
+ ctl_mgr->single_flush_pending_mask = 0;
+
+ DBG("Single FLUSH mask %x,ID %d", *flush_mask,
+ *flush_id);
+ }
+ }
+}
+
/**
* mdp5_ctl_commit() - Register Flush
*
@@ -400,6 +497,8 @@ u32 mdp5_ctl_commit(struct mdp5_ctl *ctl, u32 flush_mask)
struct mdp5_ctl_manager *ctl_mgr = ctl->ctlm;
struct op_mode *pipeline = &ctl->pipeline;
unsigned long flags;
+ u32 flush_id = ctl->id;
+ u32 curr_ctl_flush_mask;
pipeline->start_mask &= ~flush_mask;
@@ -415,9 +514,13 @@ u32 mdp5_ctl_commit(struct mdp5_ctl *ctl, u32 flush_mask)
flush_mask &= ctl_mgr->flush_hw_mask;
+ curr_ctl_flush_mask = flush_mask;
+
+ fix_for_single_flush(ctl, &flush_mask, &flush_id);
+
if (flush_mask) {
spin_lock_irqsave(&ctl->hw_lock, flags);
- ctl_write(ctl, REG_MDP5_CTL_FLUSH(ctl->id), flush_mask);
+ ctl_write(ctl, REG_MDP5_CTL_FLUSH(flush_id), flush_mask);
spin_unlock_irqrestore(&ctl->hw_lock, flags);
}
@@ -426,7 +529,7 @@ u32 mdp5_ctl_commit(struct mdp5_ctl *ctl, u32 flush_mask)
refill_start_mask(ctl);
}
- return flush_mask;
+ return curr_ctl_flush_mask;
}
u32 mdp5_ctl_get_commit_status(struct mdp5_ctl *ctl)
@@ -434,59 +537,85 @@ u32 mdp5_ctl_get_commit_status(struct mdp5_ctl *ctl)
return ctl_read(ctl, REG_MDP5_CTL_FLUSH(ctl->id));
}
-void mdp5_ctl_release(struct mdp5_ctl *ctl)
+int mdp5_ctl_get_ctl_id(struct mdp5_ctl *ctl)
{
- struct mdp5_ctl_manager *ctl_mgr = ctl->ctlm;
- unsigned long flags;
+ return WARN_ON(!ctl) ? -EINVAL : ctl->id;
+}
- if (unlikely(WARN_ON(ctl->id >= MAX_CTL) || !ctl->busy)) {
- dev_err(ctl_mgr->dev->dev, "CTL %d in bad state (%d)",
- ctl->id, ctl->busy);
- return;
+/*
+ * mdp5_ctl_pair() - Associate 2 booked CTLs for single FLUSH
+ */
+int mdp5_ctl_pair(struct mdp5_ctl *ctlx, struct mdp5_ctl *ctly, bool enable)
+{
+ struct mdp5_ctl_manager *ctl_mgr = ctlx->ctlm;
+ struct mdp5_kms *mdp5_kms = get_kms(ctl_mgr);
+
+ /* do nothing silently if hw doesn't support */
+ if (!ctl_mgr->single_flush_supported)
+ return 0;
+
+ if (!enable) {
+ ctlx->pair = NULL;
+ ctly->pair = NULL;
+ mdp5_write(mdp5_kms, REG_MDP5_MDP_SPARE_0(0), 0);
+ return 0;
+ } else if ((ctlx->pair != NULL) || (ctly->pair != NULL)) {
+ dev_err(ctl_mgr->dev->dev, "CTLs already paired\n");
+ return -EINVAL;
+ } else if (!(ctlx->status & ctly->status & CTL_STAT_BOOKED)) {
+ dev_err(ctl_mgr->dev->dev, "Only pair booked CTLs\n");
+ return -EINVAL;
}
- spin_lock_irqsave(&ctl_mgr->pool_lock, flags);
- ctl->busy = false;
- spin_unlock_irqrestore(&ctl_mgr->pool_lock, flags);
+ ctlx->pair = ctly;
+ ctly->pair = ctlx;
- DBG("CTL %d released", ctl->id);
-}
+ mdp5_write(mdp5_kms, REG_MDP5_MDP_SPARE_0(0),
+ MDP5_MDP_SPARE_0_SPLIT_DPL_SINGLE_FLUSH_EN);
-int mdp5_ctl_get_ctl_id(struct mdp5_ctl *ctl)
-{
- return WARN_ON(!ctl) ? -EINVAL : ctl->id;
+ return 0;
}
/*
- * mdp5_ctl_request() - CTL dynamic allocation
+ * mdp5_ctl_request() - CTL allocation
*
- * Note: Current implementation considers that we can only have one CRTC per CTL
+ * Try to return booked CTL for @intf_num is 1 or 2, unbooked for other INTFs.
+ * If no CTL is available in preferred category, allocate from the other one.
*
- * @return first free CTL
+ * @return fail if no CTL is available.
*/
struct mdp5_ctl *mdp5_ctlm_request(struct mdp5_ctl_manager *ctl_mgr,
- struct drm_crtc *crtc)
+ int intf_num)
{
struct mdp5_ctl *ctl = NULL;
+ const u32 checkm = CTL_STAT_BUSY | CTL_STAT_BOOKED;
+ u32 match = ((intf_num == 1) || (intf_num == 2)) ? CTL_STAT_BOOKED : 0;
unsigned long flags;
int c;
spin_lock_irqsave(&ctl_mgr->pool_lock, flags);
+ /* search the preferred */
for (c = 0; c < ctl_mgr->nctl; c++)
- if (!ctl_mgr->ctls[c].busy)
- break;
+ if ((ctl_mgr->ctls[c].status & checkm) == match)
+ goto found;
- if (unlikely(c >= ctl_mgr->nctl)) {
- dev_err(ctl_mgr->dev->dev, "No more CTL available!");
- goto unlock;
- }
+ dev_warn(ctl_mgr->dev->dev,
+ "fall back to the other CTL category for INTF %d!\n", intf_num);
- ctl = &ctl_mgr->ctls[c];
+ match ^= CTL_STAT_BOOKED;
+ for (c = 0; c < ctl_mgr->nctl; c++)
+ if ((ctl_mgr->ctls[c].status & checkm) == match)
+ goto found;
- ctl->lm = mdp5_crtc_get_lm(crtc);
- ctl->crtc = crtc;
- ctl->busy = true;
+ dev_err(ctl_mgr->dev->dev, "No more CTL available!");
+ goto unlock;
+
+found:
+ ctl = &ctl_mgr->ctls[c];
+ ctl->pipeline.intf.num = intf_num;
+ ctl->lm = -1;
+ ctl->status |= CTL_STAT_BUSY;
ctl->pending_ctl_trigger = 0;
DBG("CTL %d allocated", ctl->id);
@@ -515,9 +644,11 @@ void mdp5_ctlm_destroy(struct mdp5_ctl_manager *ctl_mgr)
}
struct mdp5_ctl_manager *mdp5_ctlm_init(struct drm_device *dev,
- void __iomem *mmio_base, const struct mdp5_cfg_hw *hw_cfg)
+ void __iomem *mmio_base, struct mdp5_cfg_handler *cfg_hnd)
{
struct mdp5_ctl_manager *ctl_mgr;
+ const struct mdp5_cfg_hw *hw_cfg = mdp5_cfg_get_hw_config(cfg_hnd);
+ int rev = mdp5_cfg_get_hw_rev(cfg_hnd);
const struct mdp5_ctl_block *ctl_cfg = &hw_cfg->ctl;
unsigned long flags;
int c, ret;
@@ -551,14 +682,28 @@ struct mdp5_ctl_manager *mdp5_ctlm_init(struct drm_device *dev,
if (WARN_ON(!ctl_cfg->base[c])) {
dev_err(dev->dev, "CTL_%d: base is null!\n", c);
ret = -EINVAL;
+ spin_unlock_irqrestore(&ctl_mgr->pool_lock, flags);
goto fail;
}
ctl->ctlm = ctl_mgr;
ctl->id = c;
ctl->reg_offset = ctl_cfg->base[c];
- ctl->busy = false;
+ ctl->status = 0;
spin_lock_init(&ctl->hw_lock);
}
+
+ /*
+ * In Dual DSI case, CTL0 and CTL1 are always assigned to two DSI
+ * interfaces to support single FLUSH feature (Flush CTL0 and CTL1 when
+ * only write into CTL0's FLUSH register) to keep two DSI pipes in sync.
+ * Single FLUSH is supported from hw rev v3.0.
+ */
+ if (rev >= 3) {
+ ctl_mgr->single_flush_supported = true;
+ /* Reserve CTL0/1 for INTF1/2 */
+ ctl_mgr->ctls[0].status |= CTL_STAT_BOOKED;
+ ctl_mgr->ctls[1].status |= CTL_STAT_BOOKED;
+ }
spin_unlock_irqrestore(&ctl_mgr->pool_lock, flags);
DBG("Pool of %d CTLs created.", ctl_mgr->nctl);
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.h b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.h
index 4678228c4f14..96148c6f863c 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.h
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.h
@@ -23,7 +23,7 @@
*/
struct mdp5_ctl_manager;
struct mdp5_ctl_manager *mdp5_ctlm_init(struct drm_device *dev,
- void __iomem *mmio_base, const struct mdp5_cfg_hw *hw_cfg);
+ void __iomem *mmio_base, struct mdp5_cfg_handler *cfg_hnd);
void mdp5_ctlm_hw_reset(struct mdp5_ctl_manager *ctlm);
void mdp5_ctlm_destroy(struct mdp5_ctl_manager *ctlm);
@@ -32,49 +32,32 @@ void mdp5_ctlm_destroy(struct mdp5_ctl_manager *ctlm);
* mdp5_ctl_request(ctlm, ...) returns a ctl (CTL resource) handler,
* which is then used to call the other mdp5_ctl_*(ctl, ...) functions.
*/
-struct mdp5_ctl *mdp5_ctlm_request(struct mdp5_ctl_manager *ctlm, struct drm_crtc *crtc);
+struct mdp5_ctl *mdp5_ctlm_request(struct mdp5_ctl_manager *ctlm, int intf_num);
+
int mdp5_ctl_get_ctl_id(struct mdp5_ctl *ctl);
struct mdp5_interface;
-int mdp5_ctl_set_intf(struct mdp5_ctl *ctl, struct mdp5_interface *intf);
+int mdp5_ctl_set_pipeline(struct mdp5_ctl *ctl, struct mdp5_interface *intf,
+ int lm);
int mdp5_ctl_set_encoder_state(struct mdp5_ctl *ctl, bool enabled);
int mdp5_ctl_set_cursor(struct mdp5_ctl *ctl, int cursor_id, bool enable);
-
-/*
- * blend_cfg (LM blender config):
- *
- * The function below allows the caller of mdp5_ctl_blend() to specify how pipes
- * are being blended according to their stage (z-order), through @blend_cfg arg.
- */
-static inline u32 mdp_ctl_blend_mask(enum mdp5_pipe pipe,
- enum mdp_mixer_stage_id stage)
-{
- switch (pipe) {
- case SSPP_VIG0: return MDP5_CTL_LAYER_REG_VIG0(stage);
- case SSPP_VIG1: return MDP5_CTL_LAYER_REG_VIG1(stage);
- case SSPP_VIG2: return MDP5_CTL_LAYER_REG_VIG2(stage);
- case SSPP_RGB0: return MDP5_CTL_LAYER_REG_RGB0(stage);
- case SSPP_RGB1: return MDP5_CTL_LAYER_REG_RGB1(stage);
- case SSPP_RGB2: return MDP5_CTL_LAYER_REG_RGB2(stage);
- case SSPP_DMA0: return MDP5_CTL_LAYER_REG_DMA0(stage);
- case SSPP_DMA1: return MDP5_CTL_LAYER_REG_DMA1(stage);
- case SSPP_VIG3: return MDP5_CTL_LAYER_REG_VIG3(stage);
- case SSPP_RGB3: return MDP5_CTL_LAYER_REG_RGB3(stage);
- default: return 0;
- }
-}
+int mdp5_ctl_pair(struct mdp5_ctl *ctlx, struct mdp5_ctl *ctly, bool enable);
/*
* mdp5_ctl_blend() - Blend multiple layers on a Layer Mixer (LM)
*
- * @blend_cfg: see LM blender config definition below
+ * @stage: array to contain the pipe num for each stage
+ * @stage_cnt: valid stage number in stage array
+ * @ctl_blend_op_flags: blender operation mode flags
*
* Note:
* CTL registers need to be flushed after calling this function
* (call mdp5_ctl_commit() with mdp_ctl_flush_mask_ctl() mask)
*/
-int mdp5_ctl_blend(struct mdp5_ctl *ctl, u32 lm, u32 blend_cfg);
+#define MDP5_CTL_BLEND_OP_FLAG_BORDER_OUT BIT(0)
+int mdp5_ctl_blend(struct mdp5_ctl *ctl, u8 *stage, u32 stage_cnt,
+ u32 ctl_blend_op_flags);
/**
* mdp_ctl_flush_mask...() - Register FLUSH masks
@@ -91,8 +74,6 @@ u32 mdp_ctl_flush_mask_encoder(struct mdp5_interface *intf);
u32 mdp5_ctl_commit(struct mdp5_ctl *ctl, u32 flush_mask);
u32 mdp5_ctl_get_commit_status(struct mdp5_ctl *ctl);
-void mdp5_ctl_release(struct mdp5_ctl *ctl);
-
#endif /* __MDP5_CTL_H__ */
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c
index de97c08f3f1f..c9e32b08a7a0 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c
@@ -27,6 +27,8 @@ struct mdp5_encoder {
spinlock_t intf_lock; /* protect REG_MDP5_INTF_* registers */
bool enabled;
uint32_t bsc;
+
+ struct mdp5_ctl *ctl;
};
#define to_mdp5_encoder(x) container_of(x, struct mdp5_encoder, base)
@@ -222,14 +224,15 @@ static void mdp5_encoder_mode_set(struct drm_encoder *encoder,
spin_unlock_irqrestore(&mdp5_encoder->intf_lock, flags);
- mdp5_crtc_set_intf(encoder->crtc, &mdp5_encoder->intf);
+ mdp5_crtc_set_pipeline(encoder->crtc, &mdp5_encoder->intf,
+ mdp5_encoder->ctl);
}
static void mdp5_encoder_disable(struct drm_encoder *encoder)
{
struct mdp5_encoder *mdp5_encoder = to_mdp5_encoder(encoder);
struct mdp5_kms *mdp5_kms = get_kms(encoder);
- struct mdp5_ctl *ctl = mdp5_crtc_get_ctl(encoder->crtc);
+ struct mdp5_ctl *ctl = mdp5_encoder->ctl;
int lm = mdp5_crtc_get_lm(encoder->crtc);
struct mdp5_interface *intf = &mdp5_encoder->intf;
int intfn = mdp5_encoder->intf.num;
@@ -264,7 +267,7 @@ static void mdp5_encoder_enable(struct drm_encoder *encoder)
{
struct mdp5_encoder *mdp5_encoder = to_mdp5_encoder(encoder);
struct mdp5_kms *mdp5_kms = get_kms(encoder);
- struct mdp5_ctl *ctl = mdp5_crtc_get_ctl(encoder->crtc);
+ struct mdp5_ctl *ctl = mdp5_encoder->ctl;
struct mdp5_interface *intf = &mdp5_encoder->intf;
int intfn = mdp5_encoder->intf.num;
unsigned long flags;
@@ -294,6 +297,7 @@ int mdp5_encoder_set_split_display(struct drm_encoder *encoder,
struct drm_encoder *slave_encoder)
{
struct mdp5_encoder *mdp5_encoder = to_mdp5_encoder(encoder);
+ struct mdp5_encoder *mdp5_slave_enc = to_mdp5_encoder(slave_encoder);
struct mdp5_kms *mdp5_kms;
int intf_num;
u32 data = 0;
@@ -316,12 +320,13 @@ int mdp5_encoder_set_split_display(struct drm_encoder *encoder,
/* Make sure clocks are on when connectors calling this function. */
mdp5_enable(mdp5_kms);
- mdp5_write(mdp5_kms, REG_MDP5_MDP_SPARE_0(0),
- MDP5_MDP_SPARE_0_SPLIT_DPL_SINGLE_FLUSH_EN);
/* Dumb Panel, Sync mode */
mdp5_write(mdp5_kms, REG_MDP5_MDP_SPLIT_DPL_UPPER(0), 0);
mdp5_write(mdp5_kms, REG_MDP5_MDP_SPLIT_DPL_LOWER(0), data);
mdp5_write(mdp5_kms, REG_MDP5_MDP_SPLIT_DPL_EN(0), 1);
+
+ mdp5_ctl_pair(mdp5_encoder->ctl, mdp5_slave_enc->ctl, true);
+
mdp5_disable(mdp5_kms);
return 0;
@@ -329,7 +334,7 @@ int mdp5_encoder_set_split_display(struct drm_encoder *encoder,
/* initialize encoder */
struct drm_encoder *mdp5_encoder_init(struct drm_device *dev,
- struct mdp5_interface *intf)
+ struct mdp5_interface *intf, struct mdp5_ctl *ctl)
{
struct drm_encoder *encoder = NULL;
struct mdp5_encoder *mdp5_encoder;
@@ -345,6 +350,7 @@ struct drm_encoder *mdp5_encoder_init(struct drm_device *dev,
memcpy(&mdp5_encoder->intf, intf, sizeof(mdp5_encoder->intf));
encoder = &mdp5_encoder->base;
+ mdp5_encoder->ctl = ctl;
spin_lock_init(&mdp5_encoder->intf_lock);
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_irq.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_irq.c
index 33bd4c6160dd..b1f73bee1368 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_irq.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_irq.c
@@ -21,8 +21,11 @@
#include "msm_drv.h"
#include "mdp5_kms.h"
-void mdp5_set_irqmask(struct mdp_kms *mdp_kms, uint32_t irqmask)
+void mdp5_set_irqmask(struct mdp_kms *mdp_kms, uint32_t irqmask,
+ uint32_t old_irqmask)
{
+ mdp5_write(to_mdp5_kms(mdp_kms), REG_MDP5_MDP_INTR_CLEAR(0),
+ irqmask ^ (irqmask & old_irqmask));
mdp5_write(to_mdp5_kms(mdp_kms), REG_MDP5_MDP_INTR_EN(0), irqmask);
}
@@ -71,9 +74,10 @@ static void mdp5_irq_mdp(struct mdp_kms *mdp_kms)
struct drm_device *dev = mdp5_kms->dev;
struct msm_drm_private *priv = dev->dev_private;
unsigned int id;
- uint32_t status;
+ uint32_t status, enable;
- status = mdp5_read(mdp5_kms, REG_MDP5_MDP_INTR_STATUS(0));
+ enable = mdp5_read(mdp5_kms, REG_MDP5_MDP_INTR_EN(0));
+ status = mdp5_read(mdp5_kms, REG_MDP5_MDP_INTR_STATUS(0)) & enable;
mdp5_write(mdp5_kms, REG_MDP5_MDP_INTR_CLEAR(0), status);
VERB("status=%08x", status);
@@ -112,15 +116,24 @@ irqreturn_t mdp5_irq(struct msm_kms *kms)
int mdp5_enable_vblank(struct msm_kms *kms, struct drm_crtc *crtc)
{
+ struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms));
+
+ mdp5_enable(mdp5_kms);
mdp_update_vblank_mask(to_mdp_kms(kms),
mdp5_crtc_vblank(crtc), true);
+ mdp5_disable(mdp5_kms);
+
return 0;
}
void mdp5_disable_vblank(struct msm_kms *kms, struct drm_crtc *crtc)
{
+ struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms));
+
+ mdp5_enable(mdp5_kms);
mdp_update_vblank_mask(to_mdp_kms(kms),
mdp5_crtc_vblank(crtc), false);
+ mdp5_disable(mdp5_kms);
}
/*
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c
index 206f758f7d64..047cb0433ccb 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c
@@ -76,7 +76,20 @@ static void mdp5_prepare_commit(struct msm_kms *kms, struct drm_atomic_state *st
static void mdp5_complete_commit(struct msm_kms *kms, struct drm_atomic_state *state)
{
+ int i;
struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms));
+ int nplanes = mdp5_kms->dev->mode_config.num_total_plane;
+
+ for (i = 0; i < nplanes; i++) {
+ struct drm_plane *plane = state->planes[i];
+ struct drm_plane_state *plane_state = state->plane_states[i];
+
+ if (!plane)
+ continue;
+
+ mdp5_plane_complete_commit(plane, plane_state);
+ }
+
mdp5_disable(mdp5_kms);
}
@@ -164,7 +177,8 @@ int mdp5_disable(struct mdp5_kms *mdp5_kms)
clk_disable_unprepare(mdp5_kms->ahb_clk);
clk_disable_unprepare(mdp5_kms->axi_clk);
clk_disable_unprepare(mdp5_kms->core_clk);
- clk_disable_unprepare(mdp5_kms->lut_clk);
+ if (mdp5_kms->lut_clk)
+ clk_disable_unprepare(mdp5_kms->lut_clk);
return 0;
}
@@ -176,14 +190,15 @@ int mdp5_enable(struct mdp5_kms *mdp5_kms)
clk_prepare_enable(mdp5_kms->ahb_clk);
clk_prepare_enable(mdp5_kms->axi_clk);
clk_prepare_enable(mdp5_kms->core_clk);
- clk_prepare_enable(mdp5_kms->lut_clk);
+ if (mdp5_kms->lut_clk)
+ clk_prepare_enable(mdp5_kms->lut_clk);
return 0;
}
static struct drm_encoder *construct_encoder(struct mdp5_kms *mdp5_kms,
enum mdp5_intf_type intf_type, int intf_num,
- enum mdp5_intf_mode intf_mode)
+ enum mdp5_intf_mode intf_mode, struct mdp5_ctl *ctl)
{
struct drm_device *dev = mdp5_kms->dev;
struct msm_drm_private *priv = dev->dev_private;
@@ -196,9 +211,9 @@ static struct drm_encoder *construct_encoder(struct mdp5_kms *mdp5_kms,
if ((intf_type == INTF_DSI) &&
(intf_mode == MDP5_INTF_DSI_MODE_COMMAND))
- encoder = mdp5_cmd_encoder_init(dev, &intf);
+ encoder = mdp5_cmd_encoder_init(dev, &intf, ctl);
else
- encoder = mdp5_encoder_init(dev, &intf);
+ encoder = mdp5_encoder_init(dev, &intf, ctl);
if (IS_ERR(encoder)) {
dev_err(dev->dev, "failed to construct encoder\n");
@@ -236,6 +251,8 @@ static int modeset_init_intf(struct mdp5_kms *mdp5_kms, int intf_num)
const struct mdp5_cfg_hw *hw_cfg =
mdp5_cfg_get_hw_config(mdp5_kms->cfg);
enum mdp5_intf_type intf_type = hw_cfg->intf.connect[intf_num];
+ struct mdp5_ctl_manager *ctlm = mdp5_kms->ctlm;
+ struct mdp5_ctl *ctl;
struct drm_encoder *encoder;
int ret = 0;
@@ -246,8 +263,14 @@ static int modeset_init_intf(struct mdp5_kms *mdp5_kms, int intf_num)
if (!priv->edp)
break;
+ ctl = mdp5_ctlm_request(ctlm, intf_num);
+ if (!ctl) {
+ ret = -EINVAL;
+ break;
+ }
+
encoder = construct_encoder(mdp5_kms, INTF_eDP, intf_num,
- MDP5_INTF_MODE_NONE);
+ MDP5_INTF_MODE_NONE, ctl);
if (IS_ERR(encoder)) {
ret = PTR_ERR(encoder);
break;
@@ -259,8 +282,14 @@ static int modeset_init_intf(struct mdp5_kms *mdp5_kms, int intf_num)
if (!priv->hdmi)
break;
+ ctl = mdp5_ctlm_request(ctlm, intf_num);
+ if (!ctl) {
+ ret = -EINVAL;
+ break;
+ }
+
encoder = construct_encoder(mdp5_kms, INTF_HDMI, intf_num,
- MDP5_INTF_MODE_NONE);
+ MDP5_INTF_MODE_NONE, ctl);
if (IS_ERR(encoder)) {
ret = PTR_ERR(encoder);
break;
@@ -285,14 +314,20 @@ static int modeset_init_intf(struct mdp5_kms *mdp5_kms, int intf_num)
if (!priv->dsi[dsi_id])
break;
+ ctl = mdp5_ctlm_request(ctlm, intf_num);
+ if (!ctl) {
+ ret = -EINVAL;
+ break;
+ }
+
for (i = 0; i < MSM_DSI_ENCODER_NUM; i++) {
mode = (i == MSM_DSI_CMD_ENCODER_ID) ?
MDP5_INTF_DSI_MODE_COMMAND :
MDP5_INTF_DSI_MODE_VIDEO;
dsi_encs[i] = construct_encoder(mdp5_kms, INTF_DSI,
- intf_num, mode);
- if (IS_ERR(dsi_encs)) {
- ret = PTR_ERR(dsi_encs);
+ intf_num, mode, ctl);
+ if (IS_ERR(dsi_encs[i])) {
+ ret = PTR_ERR(dsi_encs[i]);
break;
}
}
@@ -314,9 +349,12 @@ static int modeset_init(struct mdp5_kms *mdp5_kms)
static const enum mdp5_pipe crtcs[] = {
SSPP_RGB0, SSPP_RGB1, SSPP_RGB2, SSPP_RGB3,
};
- static const enum mdp5_pipe pub_planes[] = {
+ static const enum mdp5_pipe vig_planes[] = {
SSPP_VIG0, SSPP_VIG1, SSPP_VIG2, SSPP_VIG3,
};
+ static const enum mdp5_pipe dma_planes[] = {
+ SSPP_DMA0, SSPP_DMA1,
+ };
struct drm_device *dev = mdp5_kms->dev;
struct msm_drm_private *priv = dev->dev_private;
const struct mdp5_cfg_hw *hw_cfg;
@@ -337,7 +375,7 @@ static int modeset_init(struct mdp5_kms *mdp5_kms)
struct drm_crtc *crtc;
plane = mdp5_plane_init(dev, crtcs[i], true,
- hw_cfg->pipe_rgb.base[i]);
+ hw_cfg->pipe_rgb.base[i], hw_cfg->pipe_rgb.caps);
if (IS_ERR(plane)) {
ret = PTR_ERR(plane);
dev_err(dev->dev, "failed to construct plane for %s (%d)\n",
@@ -355,16 +393,30 @@ static int modeset_init(struct mdp5_kms *mdp5_kms)
priv->crtcs[priv->num_crtcs++] = crtc;
}
- /* Construct public planes: */
+ /* Construct video planes: */
for (i = 0; i < hw_cfg->pipe_vig.count; i++) {
struct drm_plane *plane;
- plane = mdp5_plane_init(dev, pub_planes[i], false,
- hw_cfg->pipe_vig.base[i]);
+ plane = mdp5_plane_init(dev, vig_planes[i], false,
+ hw_cfg->pipe_vig.base[i], hw_cfg->pipe_vig.caps);
if (IS_ERR(plane)) {
ret = PTR_ERR(plane);
dev_err(dev->dev, "failed to construct %s plane: %d\n",
- pipe2name(pub_planes[i]), ret);
+ pipe2name(vig_planes[i]), ret);
+ goto fail;
+ }
+ }
+
+ /* DMA planes */
+ for (i = 0; i < hw_cfg->pipe_dma.count; i++) {
+ struct drm_plane *plane;
+
+ plane = mdp5_plane_init(dev, dma_planes[i], false,
+ hw_cfg->pipe_dma.base[i], hw_cfg->pipe_dma.caps);
+ if (IS_ERR(plane)) {
+ ret = PTR_ERR(plane);
+ dev_err(dev->dev, "failed to construct %s plane: %d\n",
+ pipe2name(dma_planes[i]), ret);
goto fail;
}
}
@@ -476,7 +528,7 @@ struct msm_kms *mdp5_kms_init(struct drm_device *dev)
goto fail;
ret = get_clk(pdev, &mdp5_kms->lut_clk, "lut_clk");
if (ret)
- goto fail;
+ DBG("failed to get (optional) lut_clk clock");
ret = get_clk(pdev, &mdp5_kms->vsync_clk, "vsync_clk");
if (ret)
goto fail;
@@ -508,7 +560,7 @@ struct msm_kms *mdp5_kms_init(struct drm_device *dev)
goto fail;
}
- mdp5_kms->ctlm = mdp5_ctlm_init(dev, mdp5_kms->mmio, config->hw);
+ mdp5_kms->ctlm = mdp5_ctlm_init(dev, mdp5_kms->mmio, mdp5_kms->cfg);
if (IS_ERR(mdp5_kms->ctlm)) {
ret = PTR_ERR(mdp5_kms->ctlm);
mdp5_kms->ctlm = NULL;
@@ -564,6 +616,11 @@ struct msm_kms *mdp5_kms_init(struct drm_device *dev)
goto fail;
}
+ dev->mode_config.min_width = 0;
+ dev->mode_config.min_height = 0;
+ dev->mode_config.max_width = config->hw->lm.max_width;
+ dev->mode_config.max_height = config->hw->lm.max_height;
+
return kms;
fail:
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h
index e0eb24587c84..0bb62423586e 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h
@@ -70,18 +70,12 @@ struct mdp5_kms {
struct mdp5_plane_state {
struct drm_plane_state base;
- /* "virtual" zpos.. we calculate actual mixer-stage at runtime
- * by sorting the attached planes by zpos and then assigning
- * mixer stage lowest to highest. Private planes get default
- * zpos of zero, and public planes a unique value that is
- * greater than zero. This way, things work out if a naive
- * userspace assigns planes to a crtc without setting zpos.
- */
- int zpos;
+ /* aligned with property */
+ uint8_t premultiplied;
+ uint8_t zpos;
+ uint8_t alpha;
- /* the actual mixer stage, calculated in crtc->atomic_check()
- * NOTE: this should move to mdp5_crtc_state, when that exists
- */
+ /* assigned by crtc blender */
enum mdp_mixer_stage_id stage;
/* some additional transactional status to help us know in the
@@ -192,7 +186,8 @@ static inline uint32_t lm2ppdone(int lm)
int mdp5_disable(struct mdp5_kms *mdp5_kms);
int mdp5_enable(struct mdp5_kms *mdp5_kms);
-void mdp5_set_irqmask(struct mdp_kms *mdp_kms, uint32_t irqmask);
+void mdp5_set_irqmask(struct mdp_kms *mdp_kms, uint32_t irqmask,
+ uint32_t old_irqmask);
void mdp5_irq_preinstall(struct msm_kms *kms);
int mdp5_irq_postinstall(struct msm_kms *kms);
void mdp5_irq_uninstall(struct msm_kms *kms);
@@ -202,58 +197,38 @@ void mdp5_disable_vblank(struct msm_kms *kms, struct drm_crtc *crtc);
int mdp5_irq_domain_init(struct mdp5_kms *mdp5_kms);
void mdp5_irq_domain_fini(struct mdp5_kms *mdp5_kms);
-static inline bool pipe_supports_yuv(enum mdp5_pipe pipe)
-{
- switch (pipe) {
- case SSPP_VIG0:
- case SSPP_VIG1:
- case SSPP_VIG2:
- case SSPP_VIG3:
- return true;
- default:
- return false;
- }
-}
-
-static inline
-uint32_t mdp5_get_formats(enum mdp5_pipe pipe, uint32_t *pixel_formats,
- uint32_t max_formats)
-{
- return mdp_get_formats(pixel_formats, max_formats,
- !pipe_supports_yuv(pipe));
-}
-
-void mdp5_plane_install_properties(struct drm_plane *plane,
- struct drm_mode_object *obj);
uint32_t mdp5_plane_get_flush(struct drm_plane *plane);
void mdp5_plane_complete_flip(struct drm_plane *plane);
+void mdp5_plane_complete_commit(struct drm_plane *plane,
+ struct drm_plane_state *state);
enum mdp5_pipe mdp5_plane_pipe(struct drm_plane *plane);
struct drm_plane *mdp5_plane_init(struct drm_device *dev,
- enum mdp5_pipe pipe, bool private_plane, uint32_t reg_offset);
+ enum mdp5_pipe pipe, bool private_plane,
+ uint32_t reg_offset, uint32_t caps);
uint32_t mdp5_crtc_vblank(struct drm_crtc *crtc);
int mdp5_crtc_get_lm(struct drm_crtc *crtc);
-struct mdp5_ctl *mdp5_crtc_get_ctl(struct drm_crtc *crtc);
void mdp5_crtc_cancel_pending_flip(struct drm_crtc *crtc, struct drm_file *file);
-void mdp5_crtc_set_intf(struct drm_crtc *crtc, struct mdp5_interface *intf);
+void mdp5_crtc_set_pipeline(struct drm_crtc *crtc,
+ struct mdp5_interface *intf, struct mdp5_ctl *ctl);
void mdp5_crtc_wait_for_commit_done(struct drm_crtc *crtc);
struct drm_crtc *mdp5_crtc_init(struct drm_device *dev,
struct drm_plane *plane, int id);
struct drm_encoder *mdp5_encoder_init(struct drm_device *dev,
- struct mdp5_interface *intf);
+ struct mdp5_interface *intf, struct mdp5_ctl *ctl);
int mdp5_encoder_set_split_display(struct drm_encoder *encoder,
struct drm_encoder *slave_encoder);
#ifdef CONFIG_DRM_MSM_DSI
struct drm_encoder *mdp5_cmd_encoder_init(struct drm_device *dev,
- struct mdp5_interface *intf);
+ struct mdp5_interface *intf, struct mdp5_ctl *ctl);
int mdp5_cmd_encoder_set_split_display(struct drm_encoder *encoder,
struct drm_encoder *slave_encoder);
#else
-static inline struct drm_encoder *mdp5_cmd_encoder_init(
- struct drm_device *dev, struct mdp5_interface *intf)
+static inline struct drm_encoder *mdp5_cmd_encoder_init(struct drm_device *dev,
+ struct mdp5_interface *intf, struct mdp5_ctl *ctl)
{
return ERR_PTR(-EINVAL);
}
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c
index 57b8f56ae9d0..07fb62fea6dc 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2014 The Linux Foundation. All rights reserved.
+ * Copyright (C) 2014-2015 The Linux Foundation. All rights reserved.
* Copyright (C) 2013 Red Hat
* Author: Rob Clark <robdclark@gmail.com>
*
@@ -26,13 +26,12 @@ struct mdp5_plane {
spinlock_t pipe_lock; /* protect REG_MDP5_PIPE_* registers */
uint32_t reg_offset;
+ uint32_t caps;
uint32_t flush_mask; /* used to commit pipe registers */
uint32_t nformats;
uint32_t formats[32];
-
- bool enabled;
};
#define to_mdp5_plane(x) container_of(x, struct mdp5_plane, base)
@@ -42,6 +41,7 @@ static int mdp5_plane_mode_set(struct drm_plane *plane,
unsigned int crtc_w, unsigned int crtc_h,
uint32_t src_x, uint32_t src_y,
uint32_t src_w, uint32_t src_h);
+
static void set_scanout_locked(struct drm_plane *plane,
struct drm_framebuffer *fb);
@@ -56,44 +56,132 @@ static bool plane_enabled(struct drm_plane_state *state)
return state->fb && state->crtc;
}
-static int mdp5_plane_disable(struct drm_plane *plane)
+static void mdp5_plane_destroy(struct drm_plane *plane)
{
struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane);
- struct mdp5_kms *mdp5_kms = get_kms(plane);
- enum mdp5_pipe pipe = mdp5_plane->pipe;
- DBG("%s: disable", mdp5_plane->name);
-
- if (mdp5_kms) {
- /* Release the memory we requested earlier from the SMP: */
- mdp5_smp_release(mdp5_kms->smp, pipe);
- }
+ drm_plane_helper_disable(plane);
+ drm_plane_cleanup(plane);
- return 0;
+ kfree(mdp5_plane);
}
-static void mdp5_plane_destroy(struct drm_plane *plane)
+static void mdp5_plane_install_rotation_property(struct drm_device *dev,
+ struct drm_plane *plane)
{
struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane);
- drm_plane_helper_disable(plane);
- drm_plane_cleanup(plane);
+ if (!(mdp5_plane->caps & MDP_PIPE_CAP_HFLIP) &&
+ !(mdp5_plane->caps & MDP_PIPE_CAP_VFLIP))
+ return;
- kfree(mdp5_plane);
+ if (!dev->mode_config.rotation_property)
+ dev->mode_config.rotation_property =
+ drm_mode_create_rotation_property(dev,
+ BIT(DRM_REFLECT_X) | BIT(DRM_REFLECT_Y));
+
+ if (dev->mode_config.rotation_property)
+ drm_object_attach_property(&plane->base,
+ dev->mode_config.rotation_property,
+ 0);
}
/* helper to install properties which are common to planes and crtcs */
-void mdp5_plane_install_properties(struct drm_plane *plane,
+static void mdp5_plane_install_properties(struct drm_plane *plane,
struct drm_mode_object *obj)
{
- // XXX
+ struct drm_device *dev = plane->dev;
+ struct msm_drm_private *dev_priv = dev->dev_private;
+ struct drm_property *prop;
+
+#define INSTALL_PROPERTY(name, NAME, init_val, fnc, ...) do { \
+ prop = dev_priv->plane_property[PLANE_PROP_##NAME]; \
+ if (!prop) { \
+ prop = drm_property_##fnc(dev, 0, #name, \
+ ##__VA_ARGS__); \
+ if (!prop) { \
+ dev_warn(dev->dev, \
+ "Create property %s failed\n", \
+ #name); \
+ return; \
+ } \
+ dev_priv->plane_property[PLANE_PROP_##NAME] = prop; \
+ } \
+ drm_object_attach_property(&plane->base, prop, init_val); \
+ } while (0)
+
+#define INSTALL_RANGE_PROPERTY(name, NAME, min, max, init_val) \
+ INSTALL_PROPERTY(name, NAME, init_val, \
+ create_range, min, max)
+
+#define INSTALL_ENUM_PROPERTY(name, NAME, init_val) \
+ INSTALL_PROPERTY(name, NAME, init_val, \
+ create_enum, name##_prop_enum_list, \
+ ARRAY_SIZE(name##_prop_enum_list))
+
+ INSTALL_RANGE_PROPERTY(zpos, ZPOS, 1, 255, 1);
+
+ mdp5_plane_install_rotation_property(dev, plane);
+
+#undef INSTALL_RANGE_PROPERTY
+#undef INSTALL_ENUM_PROPERTY
+#undef INSTALL_PROPERTY
+}
+
+static int mdp5_plane_atomic_set_property(struct drm_plane *plane,
+ struct drm_plane_state *state, struct drm_property *property,
+ uint64_t val)
+{
+ struct drm_device *dev = plane->dev;
+ struct mdp5_plane_state *pstate;
+ struct msm_drm_private *dev_priv = dev->dev_private;
+ int ret = 0;
+
+ pstate = to_mdp5_plane_state(state);
+
+#define SET_PROPERTY(name, NAME, type) do { \
+ if (dev_priv->plane_property[PLANE_PROP_##NAME] == property) { \
+ pstate->name = (type)val; \
+ DBG("Set property %s %d", #name, (type)val); \
+ goto done; \
+ } \
+ } while (0)
+
+ SET_PROPERTY(zpos, ZPOS, uint8_t);
+
+ dev_err(dev->dev, "Invalid property\n");
+ ret = -EINVAL;
+done:
+ return ret;
+#undef SET_PROPERTY
}
-int mdp5_plane_set_property(struct drm_plane *plane,
- struct drm_property *property, uint64_t val)
+static int mdp5_plane_atomic_get_property(struct drm_plane *plane,
+ const struct drm_plane_state *state,
+ struct drm_property *property, uint64_t *val)
{
- // XXX
- return -EINVAL;
+ struct drm_device *dev = plane->dev;
+ struct mdp5_plane_state *pstate;
+ struct msm_drm_private *dev_priv = dev->dev_private;
+ int ret = 0;
+
+ pstate = to_mdp5_plane_state(state);
+
+#define GET_PROPERTY(name, NAME, type) do { \
+ if (dev_priv->plane_property[PLANE_PROP_##NAME] == property) { \
+ *val = pstate->name; \
+ DBG("Get property %s %lld", #name, *val); \
+ goto done; \
+ } \
+ } while (0)
+
+ GET_PROPERTY(zpos, ZPOS, uint8_t);
+
+ dev_err(dev->dev, "Invalid property\n");
+ ret = -EINVAL;
+done:
+ return ret;
+#undef SET_PROPERTY
}
static void mdp5_plane_reset(struct drm_plane *plane)
@@ -106,11 +194,15 @@ static void mdp5_plane_reset(struct drm_plane *plane)
kfree(to_mdp5_plane_state(plane->state));
mdp5_state = kzalloc(sizeof(*mdp5_state), GFP_KERNEL);
- if (plane->type == DRM_PLANE_TYPE_PRIMARY) {
- mdp5_state->zpos = 0;
- } else {
- mdp5_state->zpos = 1 + drm_plane_index(plane);
- }
+ /* assign default blend parameters */
+ mdp5_state->alpha = 255;
+ mdp5_state->premultiplied = 0;
+
+ if (plane->type == DRM_PLANE_TYPE_PRIMARY)
+ mdp5_state->zpos = STAGE_BASE;
+ else
+ mdp5_state->zpos = STAGE0 + drm_plane_index(plane);
+
mdp5_state->base.plane = plane;
plane->state = &mdp5_state->base;
@@ -149,7 +241,9 @@ static const struct drm_plane_funcs mdp5_plane_funcs = {
.update_plane = drm_atomic_helper_update_plane,
.disable_plane = drm_atomic_helper_disable_plane,
.destroy = mdp5_plane_destroy,
- .set_property = mdp5_plane_set_property,
+ .set_property = drm_atomic_helper_plane_set_property,
+ .atomic_set_property = mdp5_plane_atomic_set_property,
+ .atomic_get_property = mdp5_plane_atomic_get_property,
.reset = mdp5_plane_reset,
.atomic_duplicate_state = mdp5_plane_duplicate_state,
.atomic_destroy_state = mdp5_plane_destroy_state,
@@ -182,10 +276,44 @@ static int mdp5_plane_atomic_check(struct drm_plane *plane,
{
struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane);
struct drm_plane_state *old_state = plane->state;
+ const struct mdp_format *format;
+ bool vflip, hflip;
DBG("%s: check (%d -> %d)", mdp5_plane->name,
plane_enabled(old_state), plane_enabled(state));
+ if (plane_enabled(state)) {
+ format = to_mdp_format(msm_framebuffer_format(state->fb));
+ if (MDP_FORMAT_IS_YUV(format) &&
+ !pipe_supports_yuv(mdp5_plane->caps)) {
+ dev_err(plane->dev->dev,
+ "Pipe doesn't support YUV\n");
+
+ return -EINVAL;
+ }
+
+ if (!(mdp5_plane->caps & MDP_PIPE_CAP_SCALE) &&
+ (((state->src_w >> 16) != state->crtc_w) ||
+ ((state->src_h >> 16) != state->crtc_h))) {
+ dev_err(plane->dev->dev,
+ "Pipe doesn't support scaling (%dx%d -> %dx%d)\n",
+ state->src_w >> 16, state->src_h >> 16,
+ state->crtc_w, state->crtc_h);
+
+ return -EINVAL;
+ }
+
+ hflip = !!(state->rotation & BIT(DRM_REFLECT_X));
+ vflip = !!(state->rotation & BIT(DRM_REFLECT_Y));
+ if ((vflip && !(mdp5_plane->caps & MDP_PIPE_CAP_VFLIP)) ||
+ (hflip && !(mdp5_plane->caps & MDP_PIPE_CAP_HFLIP))) {
+ dev_err(plane->dev->dev,
+ "Pipe doesn't support flip\n");
+
+ return -EINVAL;
+ }
+ }
+
if (plane_enabled(state) && plane_enabled(old_state)) {
/* we cannot change SMP block configuration during scanout: */
bool full_modeset = false;
@@ -224,7 +352,6 @@ static void mdp5_plane_atomic_update(struct drm_plane *plane,
if (!plane_enabled(state)) {
to_mdp5_plane_state(state)->pending = true;
- mdp5_plane_disable(plane);
} else if (to_mdp5_plane_state(state)->mode_changed) {
int ret;
to_mdp5_plane_state(state)->pending = true;
@@ -365,16 +492,21 @@ static int calc_phase_step(uint32_t src, uint32_t dst, uint32_t *out_phase)
return 0;
}
-static int calc_scalex_steps(uint32_t pixel_format, uint32_t src, uint32_t dest,
+static int calc_scalex_steps(struct drm_plane *plane,
+ uint32_t pixel_format, uint32_t src, uint32_t dest,
uint32_t phasex_steps[2])
{
+ struct mdp5_kms *mdp5_kms = get_kms(plane);
+ struct device *dev = mdp5_kms->dev->dev;
uint32_t phasex_step;
unsigned int hsub;
int ret;
ret = calc_phase_step(src, dest, &phasex_step);
- if (ret)
+ if (ret) {
+ dev_err(dev, "X scaling (%d->%d) failed: %d\n", src, dest, ret);
return ret;
+ }
hsub = drm_format_horz_chroma_subsampling(pixel_format);
@@ -384,16 +516,21 @@ static int calc_scalex_steps(uint32_t pixel_format, uint32_t src, uint32_t dest,
return 0;
}
-static int calc_scaley_steps(uint32_t pixel_format, uint32_t src, uint32_t dest,
+static int calc_scaley_steps(struct drm_plane *plane,
+ uint32_t pixel_format, uint32_t src, uint32_t dest,
uint32_t phasey_steps[2])
{
+ struct mdp5_kms *mdp5_kms = get_kms(plane);
+ struct device *dev = mdp5_kms->dev->dev;
uint32_t phasey_step;
unsigned int vsub;
int ret;
ret = calc_phase_step(src, dest, &phasey_step);
- if (ret)
+ if (ret) {
+ dev_err(dev, "Y scaling (%d->%d) failed: %d\n", src, dest, ret);
return ret;
+ }
vsub = drm_format_vert_chroma_subsampling(pixel_format);
@@ -403,28 +540,38 @@ static int calc_scaley_steps(uint32_t pixel_format, uint32_t src, uint32_t dest,
return 0;
}
-static uint32_t get_scalex_config(uint32_t src, uint32_t dest)
-{
- uint32_t filter;
-
- filter = (src <= dest) ? SCALE_FILTER_BIL : SCALE_FILTER_PCMN;
-
- return MDP5_PIPE_SCALE_CONFIG_SCALEX_EN |
- MDP5_PIPE_SCALE_CONFIG_SCALEX_MIN_FILTER(filter) |
- MDP5_PIPE_SCALE_CONFIG_SCALEX_CR_FILTER(filter) |
- MDP5_PIPE_SCALE_CONFIG_SCALEX_MAX_FILTER(filter);
-}
-
-static uint32_t get_scaley_config(uint32_t src, uint32_t dest)
+static uint32_t get_scale_config(enum mdp_chroma_samp_type chroma_sample,
+ uint32_t src, uint32_t dest, bool hor)
{
- uint32_t filter;
-
- filter = (src <= dest) ? SCALE_FILTER_BIL : SCALE_FILTER_PCMN;
+ uint32_t y_filter = (src <= dest) ? SCALE_FILTER_CA : SCALE_FILTER_PCMN;
+ uint32_t y_a_filter = (src <= dest) ? SCALE_FILTER_BIL : SCALE_FILTER_PCMN;
+ uint32_t uv_filter = ((src / 2) <= dest) ? /* 2x upsample */
+ SCALE_FILTER_BIL : SCALE_FILTER_PCMN;
+ uint32_t value = 0;
+
+ if (chroma_sample == CHROMA_420 || chroma_sample == CHROMA_H2V1) {
+ if (hor)
+ value = MDP5_PIPE_SCALE_CONFIG_SCALEX_EN |
+ MDP5_PIPE_SCALE_CONFIG_SCALEX_FILTER_COMP_0(y_filter) |
+ MDP5_PIPE_SCALE_CONFIG_SCALEX_FILTER_COMP_3(y_a_filter) |
+ MDP5_PIPE_SCALE_CONFIG_SCALEX_FILTER_COMP_1_2(uv_filter);
+ else
+ value = MDP5_PIPE_SCALE_CONFIG_SCALEY_EN |
+ MDP5_PIPE_SCALE_CONFIG_SCALEY_FILTER_COMP_0(y_filter) |
+ MDP5_PIPE_SCALE_CONFIG_SCALEY_FILTER_COMP_3(y_a_filter) |
+ MDP5_PIPE_SCALE_CONFIG_SCALEY_FILTER_COMP_1_2(uv_filter);
+ } else if (src != dest) {
+ if (hor)
+ value = MDP5_PIPE_SCALE_CONFIG_SCALEX_EN |
+ MDP5_PIPE_SCALE_CONFIG_SCALEX_FILTER_COMP_0(y_a_filter) |
+ MDP5_PIPE_SCALE_CONFIG_SCALEX_FILTER_COMP_3(y_a_filter);
+ else
+ value = MDP5_PIPE_SCALE_CONFIG_SCALEY_EN |
+ MDP5_PIPE_SCALE_CONFIG_SCALEY_FILTER_COMP_0(y_a_filter) |
+ MDP5_PIPE_SCALE_CONFIG_SCALEY_FILTER_COMP_3(y_a_filter);
+ }
- return MDP5_PIPE_SCALE_CONFIG_SCALEY_EN |
- MDP5_PIPE_SCALE_CONFIG_SCALEY_MIN_FILTER(filter) |
- MDP5_PIPE_SCALE_CONFIG_SCALEY_CR_FILTER(filter) |
- MDP5_PIPE_SCALE_CONFIG_SCALEY_MAX_FILTER(filter);
+ return value;
}
static int mdp5_plane_mode_set(struct drm_plane *plane,
@@ -435,8 +582,8 @@ static int mdp5_plane_mode_set(struct drm_plane *plane,
uint32_t src_w, uint32_t src_h)
{
struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane);
+ struct drm_plane_state *pstate = plane->state;
struct mdp5_kms *mdp5_kms = get_kms(plane);
- struct device *dev = mdp5_kms->dev->dev;
enum mdp5_pipe pipe = mdp5_plane->pipe;
const struct mdp_format *format;
uint32_t nplanes, config = 0;
@@ -444,6 +591,7 @@ static int mdp5_plane_mode_set(struct drm_plane *plane,
uint32_t phasex_step[2] = {0,}, phasey_step[2] = {0,};
uint32_t hdecm = 0, vdecm = 0;
uint32_t pix_format;
+ bool vflip, hflip;
unsigned long flags;
int ret;
@@ -468,7 +616,7 @@ static int mdp5_plane_mode_set(struct drm_plane *plane,
/* Request some memory from the SMP: */
ret = mdp5_smp_request(mdp5_kms->smp,
- mdp5_plane->pipe, fb->pixel_format, src_w);
+ mdp5_plane->pipe, format, src_w, false);
if (ret)
return ret;
@@ -480,29 +628,23 @@ static int mdp5_plane_mode_set(struct drm_plane *plane,
*/
mdp5_smp_configure(mdp5_kms->smp, pipe);
- /* SCALE is used to both scale and up-sample chroma components */
+ ret = calc_scalex_steps(plane, pix_format, src_w, crtc_w, phasex_step);
+ if (ret)
+ return ret;
- if ((src_w != crtc_w) || MDP_FORMAT_IS_YUV(format)) {
- /* TODO calc hdecm */
- ret = calc_scalex_steps(pix_format, src_w, crtc_w, phasex_step);
- if (ret) {
- dev_err(dev, "X scaling (%d -> %d) failed: %d\n",
- src_w, crtc_w, ret);
- return ret;
- }
- config |= get_scalex_config(src_w, crtc_w);
- }
+ ret = calc_scaley_steps(plane, pix_format, src_h, crtc_h, phasey_step);
+ if (ret)
+ return ret;
- if ((src_h != crtc_h) || MDP_FORMAT_IS_YUV(format)) {
- /* TODO calc vdecm */
- ret = calc_scaley_steps(pix_format, src_h, crtc_h, phasey_step);
- if (ret) {
- dev_err(dev, "Y scaling (%d -> %d) failed: %d\n",
- src_h, crtc_h, ret);
- return ret;
- }
- config |= get_scaley_config(src_h, crtc_h);
- }
+ /* TODO calc hdecm, vdecm */
+
+ /* SCALE is used to both scale and up-sample chroma components */
+ config |= get_scale_config(format->chroma_sample, src_w, crtc_w, true);
+ config |= get_scale_config(format->chroma_sample, src_h, crtc_h, false);
+ DBG("scale config = %x", config);
+
+ hflip = !!(pstate->rotation & BIT(DRM_REFLECT_X));
+ vflip = !!(pstate->rotation & BIT(DRM_REFLECT_Y));
spin_lock_irqsave(&mdp5_plane->pipe_lock, flags);
@@ -535,7 +677,7 @@ static int mdp5_plane_mode_set(struct drm_plane *plane,
MDP5_PIPE_SRC_FORMAT_CPP(format->cpp - 1) |
MDP5_PIPE_SRC_FORMAT_UNPACK_COUNT(format->unpack_count - 1) |
COND(format->unpack_tight, MDP5_PIPE_SRC_FORMAT_UNPACK_TIGHT) |
- MDP5_PIPE_SRC_FORMAT_NUM_PLANES(format->fetch_type) |
+ MDP5_PIPE_SRC_FORMAT_FETCH_TYPE(format->fetch_type) |
MDP5_PIPE_SRC_FORMAT_CHROMA_SAMP(format->chroma_sample));
mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_UNPACK(pipe),
@@ -545,29 +687,35 @@ static int mdp5_plane_mode_set(struct drm_plane *plane,
MDP5_PIPE_SRC_UNPACK_ELEM3(format->unpack[3]));
mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_OP_MODE(pipe),
+ (hflip ? MDP5_PIPE_SRC_OP_MODE_FLIP_LR : 0) |
+ (vflip ? MDP5_PIPE_SRC_OP_MODE_FLIP_UD : 0) |
MDP5_PIPE_SRC_OP_MODE_BWC(BWC_LOSSLESS));
/* not using secure mode: */
mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_ADDR_SW_STATUS(pipe), 0);
- mdp5_write(mdp5_kms, REG_MDP5_PIPE_SCALE_PHASE_STEP_X(pipe),
- phasex_step[0]);
- mdp5_write(mdp5_kms, REG_MDP5_PIPE_SCALE_PHASE_STEP_Y(pipe),
- phasey_step[0]);
- mdp5_write(mdp5_kms, REG_MDP5_PIPE_SCALE_CR_PHASE_STEP_X(pipe),
- phasex_step[1]);
- mdp5_write(mdp5_kms, REG_MDP5_PIPE_SCALE_CR_PHASE_STEP_Y(pipe),
- phasey_step[1]);
- mdp5_write(mdp5_kms, REG_MDP5_PIPE_DECIMATION(pipe),
- MDP5_PIPE_DECIMATION_VERT(vdecm) |
- MDP5_PIPE_DECIMATION_HORZ(hdecm));
- mdp5_write(mdp5_kms, REG_MDP5_PIPE_SCALE_CONFIG(pipe), config);
-
- if (MDP_FORMAT_IS_YUV(format))
- csc_enable(mdp5_kms, pipe,
- mdp_get_default_csc_cfg(CSC_YUV2RGB));
- else
- csc_disable(mdp5_kms, pipe);
+ if (mdp5_plane->caps & MDP_PIPE_CAP_SCALE) {
+ mdp5_write(mdp5_kms, REG_MDP5_PIPE_SCALE_PHASE_STEP_X(pipe),
+ phasex_step[0]);
+ mdp5_write(mdp5_kms, REG_MDP5_PIPE_SCALE_PHASE_STEP_Y(pipe),
+ phasey_step[0]);
+ mdp5_write(mdp5_kms, REG_MDP5_PIPE_SCALE_CR_PHASE_STEP_X(pipe),
+ phasex_step[1]);
+ mdp5_write(mdp5_kms, REG_MDP5_PIPE_SCALE_CR_PHASE_STEP_Y(pipe),
+ phasey_step[1]);
+ mdp5_write(mdp5_kms, REG_MDP5_PIPE_DECIMATION(pipe),
+ MDP5_PIPE_DECIMATION_VERT(vdecm) |
+ MDP5_PIPE_DECIMATION_HORZ(hdecm));
+ mdp5_write(mdp5_kms, REG_MDP5_PIPE_SCALE_CONFIG(pipe), config);
+ }
+
+ if (mdp5_plane->caps & MDP_PIPE_CAP_CSC) {
+ if (MDP_FORMAT_IS_YUV(format))
+ csc_enable(mdp5_kms, pipe,
+ mdp_get_default_csc_cfg(CSC_YUV2RGB));
+ else
+ csc_disable(mdp5_kms, pipe);
+ }
set_scanout_locked(plane, fb);
@@ -602,9 +750,24 @@ uint32_t mdp5_plane_get_flush(struct drm_plane *plane)
return mdp5_plane->flush_mask;
}
+/* called after vsync in thread context */
+void mdp5_plane_complete_commit(struct drm_plane *plane,
+ struct drm_plane_state *state)
+{
+ struct mdp5_kms *mdp5_kms = get_kms(plane);
+ struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane);
+ enum mdp5_pipe pipe = mdp5_plane->pipe;
+
+ if (!plane_enabled(plane->state)) {
+ DBG("%s: free SMP", mdp5_plane->name);
+ mdp5_smp_release(mdp5_kms->smp, pipe);
+ }
+}
+
/* initialize plane */
struct drm_plane *mdp5_plane_init(struct drm_device *dev,
- enum mdp5_pipe pipe, bool private_plane, uint32_t reg_offset)
+ enum mdp5_pipe pipe, bool private_plane, uint32_t reg_offset,
+ uint32_t caps)
{
struct drm_plane *plane = NULL;
struct mdp5_plane *mdp5_plane;
@@ -621,9 +784,11 @@ struct drm_plane *mdp5_plane_init(struct drm_device *dev,
mdp5_plane->pipe = pipe;
mdp5_plane->name = pipe2name(pipe);
+ mdp5_plane->caps = caps;
- mdp5_plane->nformats = mdp5_get_formats(pipe, mdp5_plane->formats,
- ARRAY_SIZE(mdp5_plane->formats));
+ mdp5_plane->nformats = mdp_get_formats(mdp5_plane->formats,
+ ARRAY_SIZE(mdp5_plane->formats),
+ !pipe_supports_yuv(mdp5_plane->caps));
mdp5_plane->flush_mask = mdp_ctl_flush_mask_pipe(pipe);
mdp5_plane->reg_offset = reg_offset;
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.c
index 16702aecf0df..563cca972dcb 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.c
@@ -34,22 +34,44 @@
* and CANNOT be re-allocated (eg: MMB0 and MMB1 both tied to RGB0).
*
* For each block that can be dynamically allocated, it can be either
- * free, or pending/in-use by a client. The updates happen in three steps:
+ * free:
+ * The block is free.
+ *
+ * pending:
+ * The block is allocated to some client and not free.
+ *
+ * configured:
+ * The block is allocated to some client, and assigned to that
+ * client in MDP5_MDP_SMP_ALLOC registers.
+ *
+ * inuse:
+ * The block is being actively used by a client.
+ *
+ * The updates happen in the following steps:
*
* 1) mdp5_smp_request():
* When plane scanout is setup, calculate required number of
- * blocks needed per client, and request. Blocks not inuse or
- * pending by any other client are added to client's pending
- * set.
+ * blocks needed per client, and request. Blocks neither inuse nor
+ * configured nor pending by any other client are added to client's
+ * pending set.
+ * For shrinking, blocks in pending but not in configured can be freed
+ * directly, but those already in configured will be freed later by
+ * mdp5_smp_commit.
*
* 2) mdp5_smp_configure():
* As hw is programmed, before FLUSH, MDP5_MDP_SMP_ALLOC registers
* are configured for the union(pending, inuse)
+ * Current pending is copied to configured.
+ * It is assumed that mdp5_smp_request and mdp5_smp_configure not run
+ * concurrently for the same pipe.
*
* 3) mdp5_smp_commit():
- * After next vblank, copy pending -> inuse. Optionally update
+ * After next vblank, copy configured -> inuse. Optionally update
* MDP5_SMP_ALLOC registers if there are newly unused blocks
*
+ * 4) mdp5_smp_release():
+ * Must be called after the pipe is disabled and no longer uses any SMB
+ *
* On the next vblank after changes have been committed to hw, the
* client's pending blocks become it's in-use blocks (and no-longer
* in-use blocks become available to other clients).
@@ -68,6 +90,8 @@
struct mdp5_smp {
struct drm_device *dev;
+ const struct mdp5_smp_block *cfg;
+
int blk_cnt;
int blk_size;
@@ -77,6 +101,9 @@ struct mdp5_smp {
struct mdp5_client_smp_state client_state[MAX_CLIENTS];
};
+static void update_smp_state(struct mdp5_smp *smp,
+ u32 cid, mdp5_smp_state_t *assigned);
+
static inline
struct mdp5_kms *get_kms(struct mdp5_smp *smp)
{
@@ -112,14 +139,12 @@ static int smp_request_block(struct mdp5_smp *smp,
u32 cid, int nblks)
{
struct mdp5_kms *mdp5_kms = get_kms(smp);
- const struct mdp5_cfg_hw *hw_cfg;
struct mdp5_client_smp_state *ps = &smp->client_state[cid];
int i, ret, avail, cur_nblks, cnt = smp->blk_cnt;
int reserved;
unsigned long flags;
- hw_cfg = mdp5_cfg_get_hw_config(mdp5_kms->cfg);
- reserved = hw_cfg->smp.reserved[cid];
+ reserved = smp->cfg->reserved[cid];
spin_lock_irqsave(&smp->state_lock, flags);
@@ -149,7 +174,12 @@ static int smp_request_block(struct mdp5_smp *smp,
for (i = cur_nblks; i > nblks; i--) {
int blk = find_first_bit(ps->pending, cnt);
clear_bit(blk, ps->pending);
- /* don't clear in global smp_state until _commit() */
+
+ /* clear in global smp_state if not in configured
+ * otherwise until _commit()
+ */
+ if (!test_bit(blk, ps->configured))
+ clear_bit(blk, smp->state);
}
}
@@ -179,12 +209,14 @@ static void set_fifo_thresholds(struct mdp5_smp *smp,
* decimated width. Ie. SMP buffering sits downstream of decimation (which
* presumably happens during the dma from scanout buffer).
*/
-int mdp5_smp_request(struct mdp5_smp *smp, enum mdp5_pipe pipe, u32 fmt, u32 width)
+int mdp5_smp_request(struct mdp5_smp *smp, enum mdp5_pipe pipe,
+ const struct mdp_format *format, u32 width, bool hdecim)
{
struct mdp5_kms *mdp5_kms = get_kms(smp);
struct drm_device *dev = mdp5_kms->dev;
int rev = mdp5_cfg_get_hw_rev(mdp5_kms->cfg);
int i, hsub, nplanes, nlines, nblks, ret;
+ u32 fmt = format->base.pixel_format;
nplanes = drm_format_num_planes(fmt);
hsub = drm_format_horz_chroma_subsampling(fmt);
@@ -192,6 +224,21 @@ int mdp5_smp_request(struct mdp5_smp *smp, enum mdp5_pipe pipe, u32 fmt, u32 wid
/* different if BWC (compressed framebuffer?) enabled: */
nlines = 2;
+ /* Newer MDPs have split/packing logic, which fetches sub-sampled
+ * U and V components (splits them from Y if necessary) and packs
+ * them together, writes to SMP using a single client.
+ */
+ if ((rev > 0) && (format->chroma_sample > CHROMA_FULL)) {
+ fmt = DRM_FORMAT_NV24;
+ nplanes = 2;
+
+ /* if decimation is enabled, HW decimates less on the
+ * sub sampled chroma components
+ */
+ if (hdecim && (hsub > 1))
+ hsub = 1;
+ }
+
for (i = 0, nblks = 0; i < nplanes; i++) {
int n, fetch_stride, cpp;
@@ -223,10 +270,33 @@ int mdp5_smp_request(struct mdp5_smp *smp, enum mdp5_pipe pipe, u32 fmt, u32 wid
/* Release SMP blocks for all clients of the pipe */
void mdp5_smp_release(struct mdp5_smp *smp, enum mdp5_pipe pipe)
{
- int i, nblks;
+ int i;
+ unsigned long flags;
+ int cnt = smp->blk_cnt;
+
+ for (i = 0; i < pipe2nclients(pipe); i++) {
+ mdp5_smp_state_t assigned;
+ u32 cid = pipe2client(pipe, i);
+ struct mdp5_client_smp_state *ps = &smp->client_state[cid];
+
+ spin_lock_irqsave(&smp->state_lock, flags);
+
+ /* clear hw assignment */
+ bitmap_or(assigned, ps->inuse, ps->configured, cnt);
+ update_smp_state(smp, CID_UNUSED, &assigned);
+
+ /* free to global pool */
+ bitmap_andnot(smp->state, smp->state, ps->pending, cnt);
+ bitmap_andnot(smp->state, smp->state, assigned, cnt);
+
+ /* clear client's infor */
+ bitmap_zero(ps->pending, cnt);
+ bitmap_zero(ps->configured, cnt);
+ bitmap_zero(ps->inuse, cnt);
+
+ spin_unlock_irqrestore(&smp->state_lock, flags);
+ }
- for (i = 0, nblks = 0; i < pipe2nclients(pipe); i++)
- smp_request_block(smp, pipe2client(pipe, i), 0);
set_fifo_thresholds(smp, pipe, 0);
}
@@ -274,12 +344,20 @@ void mdp5_smp_configure(struct mdp5_smp *smp, enum mdp5_pipe pipe)
u32 cid = pipe2client(pipe, i);
struct mdp5_client_smp_state *ps = &smp->client_state[cid];
- bitmap_or(assigned, ps->inuse, ps->pending, cnt);
+ /*
+ * if vblank has not happened since last smp_configure
+ * skip the configure for now
+ */
+ if (!bitmap_equal(ps->inuse, ps->configured, cnt))
+ continue;
+
+ bitmap_copy(ps->configured, ps->pending, cnt);
+ bitmap_or(assigned, ps->inuse, ps->configured, cnt);
update_smp_state(smp, cid, &assigned);
}
}
-/* step #3: after vblank, copy pending -> inuse: */
+/* step #3: after vblank, copy configured -> inuse: */
void mdp5_smp_commit(struct mdp5_smp *smp, enum mdp5_pipe pipe)
{
int cnt = smp->blk_cnt;
@@ -295,7 +373,7 @@ void mdp5_smp_commit(struct mdp5_smp *smp, enum mdp5_pipe pipe)
* using, which can be released and made available to other
* clients:
*/
- if (bitmap_andnot(released, ps->inuse, ps->pending, cnt)) {
+ if (bitmap_andnot(released, ps->inuse, ps->configured, cnt)) {
unsigned long flags;
spin_lock_irqsave(&smp->state_lock, flags);
@@ -306,7 +384,7 @@ void mdp5_smp_commit(struct mdp5_smp *smp, enum mdp5_pipe pipe)
update_smp_state(smp, CID_UNUSED, &released);
}
- bitmap_copy(ps->inuse, ps->pending, cnt);
+ bitmap_copy(ps->inuse, ps->configured, cnt);
}
}
@@ -327,6 +405,7 @@ struct mdp5_smp *mdp5_smp_init(struct drm_device *dev, const struct mdp5_smp_blo
}
smp->dev = dev;
+ smp->cfg = cfg;
smp->blk_cnt = cfg->mmb_count;
smp->blk_size = cfg->mmb_size;
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.h b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.h
index e47179f63585..20b87e800ea3 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.h
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.h
@@ -23,6 +23,7 @@
struct mdp5_client_smp_state {
mdp5_smp_state_t inuse;
+ mdp5_smp_state_t configured;
mdp5_smp_state_t pending;
};
@@ -38,7 +39,8 @@ struct mdp5_smp;
struct mdp5_smp *mdp5_smp_init(struct drm_device *dev, const struct mdp5_smp_block *cfg);
void mdp5_smp_destroy(struct mdp5_smp *smp);
-int mdp5_smp_request(struct mdp5_smp *smp, enum mdp5_pipe pipe, u32 fmt, u32 width);
+int mdp5_smp_request(struct mdp5_smp *smp, enum mdp5_pipe pipe,
+ const struct mdp_format *format, u32 width, bool hdecim);
void mdp5_smp_configure(struct mdp5_smp *smp, enum mdp5_pipe pipe);
void mdp5_smp_commit(struct mdp5_smp *smp, enum mdp5_pipe pipe);
void mdp5_smp_release(struct mdp5_smp *smp, enum mdp5_pipe pipe);
diff --git a/drivers/gpu/drm/msm/mdp/mdp_common.xml.h b/drivers/gpu/drm/msm/mdp/mdp_common.xml.h
index 641d036c5bcb..4f792c4e40f4 100644
--- a/drivers/gpu/drm/msm/mdp/mdp_common.xml.h
+++ b/drivers/gpu/drm/msm/mdp/mdp_common.xml.h
@@ -8,17 +8,17 @@ http://github.com/freedreno/envytools/
git clone https://github.com/freedreno/envytools.git
The rules-ng-ng source files this header was generated from are:
-- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 676 bytes, from 2014-12-05 15:34:49)
-- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20915 bytes, from 2015-03-24 22:05:22)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 2352 bytes, from 2015-04-12 15:02:42)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 35083 bytes, from 2015-04-12 15:04:03)
-- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 22094 bytes, from 2015-05-12 12:45:23)
-- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2013-08-11 19:26:32)
-- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2014-10-31 16:48:57)
-- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2013-07-05 19:21:12)
-- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 29012 bytes, from 2015-05-12 12:45:23)
-- /home/robclark/src/freedreno/envytools/rnndb/edp/edp.xml ( 10416 bytes, from 2015-05-12 12:45:23)
+- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 676 bytes, from 2015-05-20 20:03:14)
+- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2015-05-20 20:03:07)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20915 bytes, from 2015-05-20 20:03:14)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 2576 bytes, from 2015-07-09 22:10:24)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 36021 bytes, from 2015-07-09 22:10:24)
+- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 26057 bytes, from 2015-08-14 21:47:57)
+- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2015-05-20 20:03:07)
+- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2015-05-20 20:03:14)
+- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2015-05-20 20:03:07)
+- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 29154 bytes, from 2015-08-10 21:25:43)
+- /home/robclark/src/freedreno/envytools/rnndb/edp/edp.xml ( 10416 bytes, from 2015-05-20 20:03:14)
Copyright (C) 2013-2015 by the following authors:
- Rob Clark <robdclark@gmail.com> (robclark)
@@ -46,7 +46,7 @@ WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
enum mdp_chroma_samp_type {
- CHROMA_RGB = 0,
+ CHROMA_FULL = 0,
CHROMA_H2V1 = 1,
CHROMA_H1V2 = 2,
CHROMA_420 = 3,
@@ -65,6 +65,10 @@ enum mdp_mixer_stage_id {
STAGE1 = 3,
STAGE2 = 4,
STAGE3 = 5,
+ STAGE4 = 6,
+ STAGE5 = 7,
+ STAGE6 = 8,
+ STAGE_MAX = 8,
};
enum mdp_alpha_type {
diff --git a/drivers/gpu/drm/msm/mdp/mdp_format.c b/drivers/gpu/drm/msm/mdp/mdp_format.c
index 7b0524dc1872..1c2caffc97e4 100644
--- a/drivers/gpu/drm/msm/mdp/mdp_format.c
+++ b/drivers/gpu/drm/msm/mdp/mdp_format.c
@@ -71,7 +71,7 @@ static struct csc_cfg csc_convert[CSC_MAX] = {
},
};
-#define FMT(name, a, r, g, b, e0, e1, e2, e3, alpha, tight, c, cnt, fp, cs) { \
+#define FMT(name, a, r, g, b, e0, e1, e2, e3, alpha, tight, c, cnt, fp, cs, yuv) { \
.base = { .pixel_format = DRM_FORMAT_ ## name }, \
.bpc_a = BPC ## a ## A, \
.bpc_r = BPC ## r, \
@@ -83,7 +83,8 @@ static struct csc_cfg csc_convert[CSC_MAX] = {
.cpp = c, \
.unpack_count = cnt, \
.fetch_type = fp, \
- .chroma_sample = cs \
+ .chroma_sample = cs, \
+ .is_yuv = yuv, \
}
#define BPC0A 0
@@ -95,30 +96,49 @@ static struct csc_cfg csc_convert[CSC_MAX] = {
static const struct mdp_format formats[] = {
/* name a r g b e0 e1 e2 e3 alpha tight cpp cnt ... */
FMT(ARGB8888, 8, 8, 8, 8, 1, 0, 2, 3, true, true, 4, 4,
- MDP_PLANE_INTERLEAVED, CHROMA_RGB),
+ MDP_PLANE_INTERLEAVED, CHROMA_FULL, false),
FMT(ABGR8888, 8, 8, 8, 8, 2, 0, 1, 3, true, true, 4, 4,
- MDP_PLANE_INTERLEAVED, CHROMA_RGB),
+ MDP_PLANE_INTERLEAVED, CHROMA_FULL, false),
FMT(RGBA8888, 8, 8, 8, 8, 3, 1, 0, 2, true, true, 4, 4,
- MDP_PLANE_INTERLEAVED, CHROMA_RGB),
+ MDP_PLANE_INTERLEAVED, CHROMA_FULL, false),
FMT(BGRA8888, 8, 8, 8, 8, 3, 2, 0, 1, true, true, 4, 4,
- MDP_PLANE_INTERLEAVED, CHROMA_RGB),
+ MDP_PLANE_INTERLEAVED, CHROMA_FULL, false),
FMT(XRGB8888, 8, 8, 8, 8, 1, 0, 2, 3, false, true, 4, 4,
- MDP_PLANE_INTERLEAVED, CHROMA_RGB),
+ MDP_PLANE_INTERLEAVED, CHROMA_FULL, false),
FMT(RGB888, 0, 8, 8, 8, 1, 0, 2, 0, false, true, 3, 3,
- MDP_PLANE_INTERLEAVED, CHROMA_RGB),
+ MDP_PLANE_INTERLEAVED, CHROMA_FULL, false),
FMT(BGR888, 0, 8, 8, 8, 2, 0, 1, 0, false, true, 3, 3,
- MDP_PLANE_INTERLEAVED, CHROMA_RGB),
+ MDP_PLANE_INTERLEAVED, CHROMA_FULL, false),
FMT(RGB565, 0, 5, 6, 5, 1, 0, 2, 0, false, true, 2, 3,
- MDP_PLANE_INTERLEAVED, CHROMA_RGB),
+ MDP_PLANE_INTERLEAVED, CHROMA_FULL, false),
FMT(BGR565, 0, 5, 6, 5, 2, 0, 1, 0, false, true, 2, 3,
- MDP_PLANE_INTERLEAVED, CHROMA_RGB),
+ MDP_PLANE_INTERLEAVED, CHROMA_FULL, false),
/* --- RGB formats above / YUV formats below this line --- */
+ /* 2 plane YUV */
FMT(NV12, 0, 8, 8, 8, 1, 2, 0, 0, false, true, 2, 2,
- MDP_PLANE_PSEUDO_PLANAR, CHROMA_420),
+ MDP_PLANE_PSEUDO_PLANAR, CHROMA_420, true),
FMT(NV21, 0, 8, 8, 8, 2, 1, 0, 0, false, true, 2, 2,
- MDP_PLANE_PSEUDO_PLANAR, CHROMA_420),
+ MDP_PLANE_PSEUDO_PLANAR, CHROMA_420, true),
+ FMT(NV16, 0, 8, 8, 8, 1, 2, 0, 0, false, true, 2, 2,
+ MDP_PLANE_PSEUDO_PLANAR, CHROMA_H2V1, true),
+ FMT(NV61, 0, 8, 8, 8, 2, 1, 0, 0, false, true, 2, 2,
+ MDP_PLANE_PSEUDO_PLANAR, CHROMA_H2V1, true),
+ /* 1 plane YUV */
+ FMT(VYUY, 0, 8, 8, 8, 2, 0, 1, 0, false, true, 2, 4,
+ MDP_PLANE_INTERLEAVED, CHROMA_H2V1, true),
+ FMT(UYVY, 0, 8, 8, 8, 1, 0, 2, 0, false, true, 2, 4,
+ MDP_PLANE_INTERLEAVED, CHROMA_H2V1, true),
+ FMT(YUYV, 0, 8, 8, 8, 0, 1, 0, 2, false, true, 2, 4,
+ MDP_PLANE_INTERLEAVED, CHROMA_H2V1, true),
+ FMT(YVYU, 0, 8, 8, 8, 0, 2, 0, 1, false, true, 2, 4,
+ MDP_PLANE_INTERLEAVED, CHROMA_H2V1, true),
+ /* 3 plane YUV */
+ FMT(YUV420, 0, 8, 8, 8, 2, 1, 0, 0, false, true, 1, 1,
+ MDP_PLANE_PLANAR, CHROMA_420, true),
+ FMT(YVU420, 0, 8, 8, 8, 1, 2, 0, 0, false, true, 1, 1,
+ MDP_PLANE_PLANAR, CHROMA_420, true),
};
/*
diff --git a/drivers/gpu/drm/msm/mdp/mdp_kms.c b/drivers/gpu/drm/msm/mdp/mdp_kms.c
index 1988c243f437..64287304054d 100644
--- a/drivers/gpu/drm/msm/mdp/mdp_kms.c
+++ b/drivers/gpu/drm/msm/mdp/mdp_kms.c
@@ -39,7 +39,8 @@ static void update_irq(struct mdp_kms *mdp_kms)
list_for_each_entry(irq, &mdp_kms->irq_list, node)
irqmask |= irq->irqmask;
- mdp_kms->funcs->set_irqmask(mdp_kms, irqmask);
+ mdp_kms->funcs->set_irqmask(mdp_kms, irqmask, mdp_kms->cur_irq_mask);
+ mdp_kms->cur_irq_mask = irqmask;
}
/* if an mdp_irq's irqmask has changed, such as when mdp5 crtc<->encoder
diff --git a/drivers/gpu/drm/msm/mdp/mdp_kms.h b/drivers/gpu/drm/msm/mdp/mdp_kms.h
index 2d3428cb74d0..46a94e7d50e2 100644
--- a/drivers/gpu/drm/msm/mdp/mdp_kms.h
+++ b/drivers/gpu/drm/msm/mdp/mdp_kms.h
@@ -30,7 +30,8 @@ struct mdp_kms;
struct mdp_kms_funcs {
struct msm_kms_funcs base;
- void (*set_irqmask)(struct mdp_kms *mdp_kms, uint32_t irqmask);
+ void (*set_irqmask)(struct mdp_kms *mdp_kms, uint32_t irqmask,
+ uint32_t old_irqmask);
};
struct mdp_kms {
@@ -42,6 +43,7 @@ struct mdp_kms {
bool in_irq;
struct list_head irq_list; /* list of mdp4_irq */
uint32_t vblank_mask; /* irq bits set for userspace vblank */
+ uint32_t cur_irq_mask; /* current irq mask */
};
#define to_mdp_kms(x) container_of(x, struct mdp_kms, base)
@@ -90,13 +92,27 @@ struct mdp_format {
uint8_t cpp, unpack_count;
enum mdp_fetch_type fetch_type;
enum mdp_chroma_samp_type chroma_sample;
+ bool is_yuv;
};
#define to_mdp_format(x) container_of(x, struct mdp_format, base)
-#define MDP_FORMAT_IS_YUV(mdp_format) ((mdp_format)->chroma_sample > CHROMA_RGB)
+#define MDP_FORMAT_IS_YUV(mdp_format) ((mdp_format)->is_yuv)
uint32_t mdp_get_formats(uint32_t *formats, uint32_t max_formats, bool rgb_only);
const struct msm_format *mdp_get_format(struct msm_kms *kms, uint32_t format);
+/* MDP pipe capabilities */
+#define MDP_PIPE_CAP_HFLIP BIT(0)
+#define MDP_PIPE_CAP_VFLIP BIT(1)
+#define MDP_PIPE_CAP_SCALE BIT(2)
+#define MDP_PIPE_CAP_CSC BIT(3)
+#define MDP_PIPE_CAP_DECIMATION BIT(4)
+
+static inline bool pipe_supports_yuv(uint32_t pipe_caps)
+{
+ return (pipe_caps & MDP_PIPE_CAP_SCALE) &&
+ (pipe_caps & MDP_PIPE_CAP_CSC);
+}
+
enum csc_type {
CSC_RGB2RGB = 0,
CSC_YUV2RGB,
diff --git a/drivers/gpu/drm/msm/msm_atomic.c b/drivers/gpu/drm/msm/msm_atomic.c
index 1b22d8bfe142..1ceb4f22dd89 100644
--- a/drivers/gpu/drm/msm/msm_atomic.c
+++ b/drivers/gpu/drm/msm/msm_atomic.c
@@ -283,12 +283,8 @@ int msm_atomic_commit(struct drm_device *dev,
timeout = ktime_add_ms(ktime_get(), 1000);
- ret = msm_wait_fence_interruptable(dev, c->fence, &timeout);
- if (ret) {
- WARN_ON(ret); // TODO unswap state back? or??
- commit_destroy(c);
- return ret;
- }
+ /* uninterruptible wait */
+ msm_wait_fence(dev, c->fence, &timeout, false);
complete_commit(c);
diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
index b7ef56ed8d1c..0339c5d82d37 100644
--- a/drivers/gpu/drm/msm/msm_drv.c
+++ b/drivers/gpu/drm/msm/msm_drv.c
@@ -116,6 +116,65 @@ u32 msm_readl(const void __iomem *addr)
return val;
}
+struct vblank_event {
+ struct list_head node;
+ int crtc_id;
+ bool enable;
+};
+
+static void vblank_ctrl_worker(struct work_struct *work)
+{
+ struct msm_vblank_ctrl *vbl_ctrl = container_of(work,
+ struct msm_vblank_ctrl, work);
+ struct msm_drm_private *priv = container_of(vbl_ctrl,
+ struct msm_drm_private, vblank_ctrl);
+ struct msm_kms *kms = priv->kms;
+ struct vblank_event *vbl_ev, *tmp;
+ unsigned long flags;
+
+ spin_lock_irqsave(&vbl_ctrl->lock, flags);
+ list_for_each_entry_safe(vbl_ev, tmp, &vbl_ctrl->event_list, node) {
+ list_del(&vbl_ev->node);
+ spin_unlock_irqrestore(&vbl_ctrl->lock, flags);
+
+ if (vbl_ev->enable)
+ kms->funcs->enable_vblank(kms,
+ priv->crtcs[vbl_ev->crtc_id]);
+ else
+ kms->funcs->disable_vblank(kms,
+ priv->crtcs[vbl_ev->crtc_id]);
+
+ kfree(vbl_ev);
+
+ spin_lock_irqsave(&vbl_ctrl->lock, flags);
+ }
+
+ spin_unlock_irqrestore(&vbl_ctrl->lock, flags);
+}
+
+static int vblank_ctrl_queue_work(struct msm_drm_private *priv,
+ int crtc_id, bool enable)
+{
+ struct msm_vblank_ctrl *vbl_ctrl = &priv->vblank_ctrl;
+ struct vblank_event *vbl_ev;
+ unsigned long flags;
+
+ vbl_ev = kzalloc(sizeof(*vbl_ev), GFP_ATOMIC);
+ if (!vbl_ev)
+ return -ENOMEM;
+
+ vbl_ev->crtc_id = crtc_id;
+ vbl_ev->enable = enable;
+
+ spin_lock_irqsave(&vbl_ctrl->lock, flags);
+ list_add_tail(&vbl_ev->node, &vbl_ctrl->event_list);
+ spin_unlock_irqrestore(&vbl_ctrl->lock, flags);
+
+ queue_work(priv->wq, &vbl_ctrl->work);
+
+ return 0;
+}
+
/*
* DRM operations:
*/
@@ -125,6 +184,18 @@ static int msm_unload(struct drm_device *dev)
struct msm_drm_private *priv = dev->dev_private;
struct msm_kms *kms = priv->kms;
struct msm_gpu *gpu = priv->gpu;
+ struct msm_vblank_ctrl *vbl_ctrl = &priv->vblank_ctrl;
+ struct vblank_event *vbl_ev, *tmp;
+
+ /* We must cancel and cleanup any pending vblank enable/disable
+ * work before drm_irq_uninstall() to avoid work re-enabling an
+ * irq after uninstall has disabled it.
+ */
+ cancel_work_sync(&vbl_ctrl->work);
+ list_for_each_entry_safe(vbl_ev, tmp, &vbl_ctrl->event_list, node) {
+ list_del(&vbl_ev->node);
+ kfree(vbl_ev);
+ }
drm_kms_helper_poll_fini(dev);
drm_mode_config_cleanup(dev);
@@ -282,6 +353,9 @@ static int msm_load(struct drm_device *dev, unsigned long flags)
INIT_LIST_HEAD(&priv->inactive_list);
INIT_LIST_HEAD(&priv->fence_cbs);
+ INIT_LIST_HEAD(&priv->vblank_ctrl.event_list);
+ INIT_WORK(&priv->vblank_ctrl.work, vblank_ctrl_worker);
+ spin_lock_init(&priv->vblank_ctrl.lock);
drm_mode_config_init(dev);
@@ -331,10 +405,6 @@ static int msm_load(struct drm_device *dev, unsigned long flags)
}
}
- dev->mode_config.min_width = 0;
- dev->mode_config.min_height = 0;
- dev->mode_config.max_width = 2048;
- dev->mode_config.max_height = 2048;
dev->mode_config.funcs = &mode_config_funcs;
ret = drm_vblank_init(dev, priv->num_crtcs);
@@ -468,7 +538,7 @@ static int msm_enable_vblank(struct drm_device *dev, int crtc_id)
if (!kms)
return -ENXIO;
DBG("dev=%p, crtc=%d", dev, crtc_id);
- return kms->funcs->enable_vblank(kms, priv->crtcs[crtc_id]);
+ return vblank_ctrl_queue_work(priv, crtc_id, true);
}
static void msm_disable_vblank(struct drm_device *dev, int crtc_id)
@@ -478,7 +548,7 @@ static void msm_disable_vblank(struct drm_device *dev, int crtc_id)
if (!kms)
return;
DBG("dev=%p, crtc=%d", dev, crtc_id);
- kms->funcs->disable_vblank(kms, priv->crtcs[crtc_id]);
+ vblank_ctrl_queue_work(priv, crtc_id, false);
}
/*
@@ -637,8 +707,8 @@ static void msm_debugfs_cleanup(struct drm_minor *minor)
* Fences:
*/
-int msm_wait_fence_interruptable(struct drm_device *dev, uint32_t fence,
- ktime_t *timeout)
+int msm_wait_fence(struct drm_device *dev, uint32_t fence,
+ ktime_t *timeout , bool interruptible)
{
struct msm_drm_private *priv = dev->dev_private;
int ret;
@@ -667,7 +737,12 @@ int msm_wait_fence_interruptable(struct drm_device *dev, uint32_t fence,
remaining_jiffies = timespec_to_jiffies(&ts);
}
- ret = wait_event_interruptible_timeout(priv->fence_event,
+ if (interruptible)
+ ret = wait_event_interruptible_timeout(priv->fence_event,
+ fence_completed(dev, fence),
+ remaining_jiffies);
+ else
+ ret = wait_event_timeout(priv->fence_event,
fence_completed(dev, fence),
remaining_jiffies);
@@ -853,7 +928,7 @@ static int msm_ioctl_wait_fence(struct drm_device *dev, void *data,
return -EINVAL;
}
- return msm_wait_fence_interruptable(dev, args->fence, &timeout);
+ return msm_wait_fence(dev, args->fence, &timeout, true);
}
static const struct drm_ioctl_desc msm_ioctls[] = {
diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h
index e7c5ea125d45..3be7a56b14f1 100644
--- a/drivers/gpu/drm/msm/msm_drv.h
+++ b/drivers/gpu/drm/msm/msm_drv.h
@@ -30,6 +30,7 @@
#include <linux/list.h>
#include <linux/iommu.h>
#include <linux/types.h>
+#include <linux/of_graph.h>
#include <asm/sizes.h>
#ifndef CONFIG_OF
@@ -64,6 +65,19 @@ struct msm_file_private {
int dummy;
};
+enum msm_mdp_plane_property {
+ PLANE_PROP_ZPOS,
+ PLANE_PROP_ALPHA,
+ PLANE_PROP_PREMULTIPLIED,
+ PLANE_PROP_MAX_NUM
+};
+
+struct msm_vblank_ctrl {
+ struct work_struct work;
+ struct list_head event_list;
+ spinlock_t lock;
+};
+
struct msm_drm_private {
struct msm_kms *kms;
@@ -128,6 +142,9 @@ struct msm_drm_private {
unsigned int num_connectors;
struct drm_connector *connectors[8];
+ /* Properties */
+ struct drm_property *plane_property[PLANE_PROP_MAX_NUM];
+
/* VRAM carveout, used when no IOMMU: */
struct {
unsigned long size;
@@ -137,6 +154,8 @@ struct msm_drm_private {
*/
struct drm_mm mm;
} vram;
+
+ struct msm_vblank_ctrl vblank_ctrl;
};
struct msm_format {
@@ -164,8 +183,8 @@ int msm_atomic_commit(struct drm_device *dev,
int msm_register_mmu(struct drm_device *dev, struct msm_mmu *mmu);
-int msm_wait_fence_interruptable(struct drm_device *dev, uint32_t fence,
- ktime_t *timeout);
+int msm_wait_fence(struct drm_device *dev, uint32_t fence,
+ ktime_t *timeout, bool interruptible);
int msm_queue_fence_cb(struct drm_device *dev,
struct msm_fence_cb *cb, uint32_t fence);
void msm_update_fence(struct drm_device *dev, uint32_t fence);
diff --git a/drivers/gpu/drm/msm/msm_fbdev.c b/drivers/gpu/drm/msm/msm_fbdev.c
index 95f6532df02d..f97a1964ef39 100644
--- a/drivers/gpu/drm/msm/msm_fbdev.c
+++ b/drivers/gpu/drm/msm/msm_fbdev.c
@@ -43,11 +43,11 @@ static struct fb_ops msm_fb_ops = {
/* Note: to properly handle manual update displays, we wrap the
* basic fbdev ops which write to the framebuffer
*/
- .fb_read = fb_sys_read,
- .fb_write = fb_sys_write,
- .fb_fillrect = sys_fillrect,
- .fb_copyarea = sys_copyarea,
- .fb_imageblit = sys_imageblit,
+ .fb_read = drm_fb_helper_sys_read,
+ .fb_write = drm_fb_helper_sys_write,
+ .fb_fillrect = drm_fb_helper_sys_fillrect,
+ .fb_copyarea = drm_fb_helper_sys_copyarea,
+ .fb_imageblit = drm_fb_helper_sys_imageblit,
.fb_mmap = msm_fbdev_mmap,
.fb_check_var = drm_fb_helper_check_var,
@@ -144,10 +144,10 @@ static int msm_fbdev_create(struct drm_fb_helper *helper,
goto fail_unlock;
}
- fbi = framebuffer_alloc(0, dev->dev);
- if (!fbi) {
+ fbi = drm_fb_helper_alloc_fbi(helper);
+ if (IS_ERR(fbi)) {
dev_err(dev->dev, "failed to allocate fb info\n");
- ret = -ENOMEM;
+ ret = PTR_ERR(fbi);
goto fail_unlock;
}
@@ -155,7 +155,6 @@ static int msm_fbdev_create(struct drm_fb_helper *helper,
fbdev->fb = fb;
helper->fb = fb;
- helper->fbdev = fbi;
fbi->par = helper;
fbi->flags = FBINFO_DEFAULT;
@@ -163,12 +162,6 @@ static int msm_fbdev_create(struct drm_fb_helper *helper,
strcpy(fbi->fix.id, "msm");
- ret = fb_alloc_cmap(&fbi->cmap, 256, 0);
- if (ret) {
- ret = -ENOMEM;
- goto fail_unlock;
- }
-
drm_fb_helper_fill_fix(fbi, fb->pitches[0], fb->depth);
drm_fb_helper_fill_var(fbi, helper, sizes->fb_width, sizes->fb_height);
@@ -191,7 +184,6 @@ fail_unlock:
fail:
if (ret) {
- framebuffer_release(fbi);
if (fb) {
drm_framebuffer_unregister_private(fb);
drm_framebuffer_remove(fb);
@@ -266,17 +258,11 @@ void msm_fbdev_free(struct drm_device *dev)
struct msm_drm_private *priv = dev->dev_private;
struct drm_fb_helper *helper = priv->fbdev;
struct msm_fbdev *fbdev;
- struct fb_info *fbi;
DBG();
- fbi = helper->fbdev;
-
- /* only cleanup framebuffer if it is present */
- if (fbi) {
- unregister_framebuffer(fbi);
- framebuffer_release(fbi);
- }
+ drm_fb_helper_unregister_fbi(helper);
+ drm_fb_helper_release_fbi(helper);
drm_fb_helper_fini(helper);
diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c
index f211b80e3a1e..c76cc853b08a 100644
--- a/drivers/gpu/drm/msm/msm_gem.c
+++ b/drivers/gpu/drm/msm/msm_gem.c
@@ -460,7 +460,7 @@ int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op, ktime_t *timeout)
if (op & MSM_PREP_NOSYNC)
timeout = NULL;
- ret = msm_wait_fence_interruptable(dev, fence, timeout);
+ ret = msm_wait_fence(dev, fence, timeout, true);
}
/* TODO cache maintenance */
diff --git a/drivers/gpu/drm/msm/msm_gem_prime.c b/drivers/gpu/drm/msm/msm_gem_prime.c
index dd7a7ab603e2..831461bc98a5 100644
--- a/drivers/gpu/drm/msm/msm_gem_prime.c
+++ b/drivers/gpu/drm/msm/msm_gem_prime.c
@@ -23,8 +23,12 @@
struct sg_table *msm_gem_prime_get_sg_table(struct drm_gem_object *obj)
{
struct msm_gem_object *msm_obj = to_msm_bo(obj);
- BUG_ON(!msm_obj->sgt); /* should have already pinned! */
- return msm_obj->sgt;
+ int npages = obj->size >> PAGE_SHIFT;
+
+ if (WARN_ON(!msm_obj->pages)) /* should have already pinned! */
+ return NULL;
+
+ return drm_prime_pages_to_sg(msm_obj->pages, npages);
}
void *msm_gem_prime_vmap(struct drm_gem_object *obj)
diff --git a/drivers/gpu/drm/nouveau/dispnv04/overlay.c b/drivers/gpu/drm/nouveau/dispnv04/overlay.c
index 9f2498571d09..5f6ea1873f51 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/overlay.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/overlay.c
@@ -261,7 +261,7 @@ nv10_overlay_init(struct drm_device *device)
{
struct nouveau_drm *drm = nouveau_drm(device);
struct nouveau_plane *plane = kzalloc(sizeof(struct nouveau_plane), GFP_KERNEL);
- int num_formats = ARRAY_SIZE(formats);
+ unsigned int num_formats = ARRAY_SIZE(formats);
int ret;
if (!plane)
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c
index 3162040bc314..1f26eba245d1 100644
--- a/drivers/gpu/drm/nouveau/nouveau_connector.c
+++ b/drivers/gpu/drm/nouveau/nouveau_connector.c
@@ -919,7 +919,7 @@ nouveau_connector_funcs_lvds = {
.force = nouveau_connector_force
};
-static void
+static int
nouveau_connector_dp_dpms(struct drm_connector *connector, int mode)
{
struct nouveau_encoder *nv_encoder = NULL;
@@ -938,7 +938,7 @@ nouveau_connector_dp_dpms(struct drm_connector *connector, int mode)
}
}
- drm_helper_connector_dpms(connector, mode);
+ return drm_helper_connector_dpms(connector, mode);
}
static const struct drm_connector_funcs
diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c
index 649024d4daf1..109b8262dc85 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_drm.c
@@ -128,6 +128,7 @@ nouveau_cli_destroy(struct nouveau_cli *cli)
nvkm_vm_ref(NULL, &nvxx_client(&cli->base)->vm, NULL);
nvif_client_fini(&cli->base);
usif_client_fini(cli);
+ kfree(cli);
}
static void
@@ -865,8 +866,10 @@ nouveau_drm_preclose(struct drm_device *dev, struct drm_file *fpriv)
pm_runtime_get_sync(dev->dev);
+ mutex_lock(&cli->mutex);
if (cli->abi16)
nouveau_abi16_fini(cli->abi16);
+ mutex_unlock(&cli->mutex);
mutex_lock(&drm->client.mutex);
list_del(&cli->head);
@@ -943,7 +946,8 @@ static struct drm_driver
driver_stub = {
.driver_features =
DRIVER_USE_AGP |
- DRIVER_GEM | DRIVER_MODESET | DRIVER_PRIME | DRIVER_RENDER,
+ DRIVER_GEM | DRIVER_MODESET | DRIVER_PRIME | DRIVER_RENDER |
+ DRIVER_KMS_LEGACY_CONTEXT,
.load = nouveau_drm_load,
.unload = nouveau_drm_unload,
diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
index 6751553abe4a..2791701685dc 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
@@ -84,7 +84,7 @@ nouveau_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
if (ret != -ENODEV)
nouveau_fbcon_gpu_lockup(info);
- cfb_fillrect(info, rect);
+ drm_fb_helper_cfb_fillrect(info, rect);
}
static void
@@ -116,7 +116,7 @@ nouveau_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *image)
if (ret != -ENODEV)
nouveau_fbcon_gpu_lockup(info);
- cfb_copyarea(info, image);
+ drm_fb_helper_cfb_copyarea(info, image);
}
static void
@@ -148,7 +148,7 @@ nouveau_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
if (ret != -ENODEV)
nouveau_fbcon_gpu_lockup(info);
- cfb_imageblit(info, image);
+ drm_fb_helper_cfb_imageblit(info, image);
}
static int
@@ -197,9 +197,9 @@ static struct fb_ops nouveau_fbcon_sw_ops = {
.owner = THIS_MODULE,
.fb_check_var = drm_fb_helper_check_var,
.fb_set_par = drm_fb_helper_set_par,
- .fb_fillrect = cfb_fillrect,
- .fb_copyarea = cfb_copyarea,
- .fb_imageblit = cfb_imageblit,
+ .fb_fillrect = drm_fb_helper_cfb_fillrect,
+ .fb_copyarea = drm_fb_helper_cfb_copyarea,
+ .fb_imageblit = drm_fb_helper_cfb_imageblit,
.fb_pan_display = drm_fb_helper_pan_display,
.fb_blank = drm_fb_helper_blank,
.fb_setcmap = drm_fb_helper_setcmap,
@@ -319,7 +319,6 @@ nouveau_fbcon_create(struct drm_fb_helper *helper,
struct nouveau_channel *chan;
struct nouveau_bo *nvbo;
struct drm_mode_fb_cmd2 mode_cmd;
- struct pci_dev *pdev = dev->pdev;
int size, ret;
mode_cmd.width = sizes->surface_width;
@@ -365,20 +364,13 @@ nouveau_fbcon_create(struct drm_fb_helper *helper,
mutex_lock(&dev->struct_mutex);
- info = framebuffer_alloc(0, &pdev->dev);
- if (!info) {
- ret = -ENOMEM;
+ info = drm_fb_helper_alloc_fbi(helper);
+ if (IS_ERR(info)) {
+ ret = PTR_ERR(info);
goto out_unlock;
}
info->skip_vt_switch = 1;
- ret = fb_alloc_cmap(&info->cmap, 256, 0);
- if (ret) {
- ret = -ENOMEM;
- framebuffer_release(info);
- goto out_unlock;
- }
-
info->par = fbcon;
nouveau_framebuffer_init(dev, &fbcon->nouveau_fb, &mode_cmd, nvbo);
@@ -388,7 +380,6 @@ nouveau_fbcon_create(struct drm_fb_helper *helper,
/* setup helper */
fbcon->helper.fb = fb;
- fbcon->helper.fbdev = info;
strcpy(info->fix.id, "nouveaufb");
if (!chan)
@@ -450,15 +441,9 @@ static int
nouveau_fbcon_destroy(struct drm_device *dev, struct nouveau_fbdev *fbcon)
{
struct nouveau_framebuffer *nouveau_fb = &fbcon->nouveau_fb;
- struct fb_info *info;
- if (fbcon->helper.fbdev) {
- info = fbcon->helper.fbdev;
- unregister_framebuffer(info);
- if (info->cmap.len)
- fb_dealloc_cmap(&info->cmap);
- framebuffer_release(info);
- }
+ drm_fb_helper_unregister_fbi(&fbcon->helper);
+ drm_fb_helper_release_fbi(&fbcon->helper);
if (nouveau_fb->nvbo) {
nouveau_bo_unmap(nouveau_fb->nvbo);
@@ -496,7 +481,7 @@ nouveau_fbcon_set_suspend(struct drm_device *dev, int state)
console_lock();
if (state == FBINFO_STATE_RUNNING)
nouveau_fbcon_accel_restore(dev);
- fb_set_suspend(drm->fbcon->helper.fbdev, state);
+ drm_fb_helper_set_suspend(&drm->fbcon->helper, state);
if (state != FBINFO_STATE_RUNNING)
nouveau_fbcon_accel_save_disable(dev);
console_unlock();
diff --git a/drivers/gpu/drm/nouveau/nouveau_platform.c b/drivers/gpu/drm/nouveau/nouveau_platform.c
index 775277f1edb0..dcfbbfaf1739 100644
--- a/drivers/gpu/drm/nouveau/nouveau_platform.c
+++ b/drivers/gpu/drm/nouveau/nouveau_platform.c
@@ -92,6 +92,8 @@ static int nouveau_platform_power_down(struct nouveau_platform_gpu *gpu)
return 0;
}
+#if IS_ENABLED(CONFIG_IOMMU_API)
+
static void nouveau_platform_probe_iommu(struct device *dev,
struct nouveau_platform_gpu *gpu)
{
@@ -158,6 +160,20 @@ static void nouveau_platform_remove_iommu(struct device *dev,
}
}
+#else
+
+static void nouveau_platform_probe_iommu(struct device *dev,
+ struct nouveau_platform_gpu *gpu)
+{
+}
+
+static void nouveau_platform_remove_iommu(struct device *dev,
+ struct nouveau_platform_gpu *gpu)
+{
+}
+
+#endif
+
static int nouveau_platform_probe(struct platform_device *pdev)
{
struct nouveau_platform_gpu *gpu;
diff --git a/drivers/gpu/drm/nouveau/nouveau_ttm.c b/drivers/gpu/drm/nouveau/nouveau_ttm.c
index 18f449715788..737e8f976a98 100644
--- a/drivers/gpu/drm/nouveau/nouveau_ttm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_ttm.c
@@ -175,15 +175,24 @@ nouveau_gart_manager_new(struct ttm_mem_type_manager *man,
node->page_shift = 12;
switch (drm->device.info.family) {
+ case NV_DEVICE_INFO_V0_TNT:
+ case NV_DEVICE_INFO_V0_CELSIUS:
+ case NV_DEVICE_INFO_V0_KELVIN:
+ case NV_DEVICE_INFO_V0_RANKINE:
+ case NV_DEVICE_INFO_V0_CURIE:
+ break;
case NV_DEVICE_INFO_V0_TESLA:
if (drm->device.info.chipset != 0x50)
node->memtype = (nvbo->tile_flags & 0x7f00) >> 8;
break;
case NV_DEVICE_INFO_V0_FERMI:
case NV_DEVICE_INFO_V0_KEPLER:
+ case NV_DEVICE_INFO_V0_MAXWELL:
node->memtype = (nvbo->tile_flags & 0xff00) >> 8;
break;
default:
+ NV_WARN(drm, "%s: unhandled family type %x\n", __func__,
+ drm->device.info.family);
break;
}
@@ -424,10 +433,8 @@ nouveau_ttm_init(struct nouveau_drm *drm)
void
nouveau_ttm_fini(struct nouveau_drm *drm)
{
- mutex_lock(&drm->dev->struct_mutex);
ttm_bo_clean_mm(&drm->ttm.bdev, TTM_PL_VRAM);
ttm_bo_clean_mm(&drm->ttm.bdev, TTM_PL_TT);
- mutex_unlock(&drm->dev->struct_mutex);
ttm_bo_device_release(&drm->ttm.bdev);
diff --git a/drivers/gpu/drm/nouveau/nv04_fbcon.c b/drivers/gpu/drm/nouveau/nv04_fbcon.c
index 4ef602c5469d..495c57644ced 100644
--- a/drivers/gpu/drm/nouveau/nv04_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nv04_fbcon.c
@@ -203,7 +203,7 @@ nv04_fbcon_accel_init(struct fb_info *info)
if (ret)
return ret;
- if (RING_SPACE(chan, 49)) {
+ if (RING_SPACE(chan, 49 + (device->info.chipset >= 0x11 ? 4 : 0))) {
nouveau_fbcon_gpu_lockup(info);
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nv50_display.c b/drivers/gpu/drm/nouveau/nv50_display.c
index 7da7958556a3..981342d142ff 100644
--- a/drivers/gpu/drm/nouveau/nv50_display.c
+++ b/drivers/gpu/drm/nouveau/nv50_display.c
@@ -979,7 +979,7 @@ nv50_crtc_cursor_show_hide(struct nouveau_crtc *nv_crtc, bool show, bool update)
{
struct nv50_mast *mast = nv50_mast(nv_crtc->base.dev);
- if (show && nv_crtc->cursor.nvbo)
+ if (show && nv_crtc->cursor.nvbo && nv_crtc->base.enabled)
nv50_crtc_cursor_show(nv_crtc);
else
nv50_crtc_cursor_hide(nv_crtc);
diff --git a/drivers/gpu/drm/nouveau/nv50_fbcon.c b/drivers/gpu/drm/nouveau/nv50_fbcon.c
index 394c89abcc97..901130b06072 100644
--- a/drivers/gpu/drm/nouveau/nv50_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nv50_fbcon.c
@@ -188,7 +188,7 @@ nv50_fbcon_accel_init(struct fb_info *info)
if (ret)
return ret;
- ret = RING_SPACE(chan, 59);
+ ret = RING_SPACE(chan, 58);
if (ret) {
nouveau_fbcon_gpu_lockup(info);
return ret;
@@ -252,6 +252,7 @@ nv50_fbcon_accel_init(struct fb_info *info)
OUT_RING(chan, info->var.yres_virtual);
OUT_RING(chan, upper_32_bits(fb->vma.offset));
OUT_RING(chan, lower_32_bits(fb->vma.offset));
+ FIRE_RING(chan);
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nvc0_fbcon.c b/drivers/gpu/drm/nouveau/nvc0_fbcon.c
index 61246677e8dc..fcd2e5f27bb9 100644
--- a/drivers/gpu/drm/nouveau/nvc0_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nvc0_fbcon.c
@@ -188,7 +188,7 @@ nvc0_fbcon_accel_init(struct fb_info *info)
return -EINVAL;
}
- ret = RING_SPACE(chan, 60);
+ ret = RING_SPACE(chan, 58);
if (ret) {
WARN_ON(1);
nouveau_fbcon_gpu_lockup(info);
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gf110.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gf110.c
index 9ef6728c528d..7f2f05f78cc8 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gf110.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gf110.c
@@ -809,7 +809,7 @@ exec_lookup(struct nv50_disp_priv *priv, int head, int or, u32 ctrl,
case 0x00000900: type = DCB_OUTPUT_DP; mask = 2; break;
default:
nv_error(priv, "unknown SOR mc 0x%08x\n", ctrl);
- return 0x0000;
+ return NULL;
}
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
index 5606c25e5d02..ca11ddb6ed46 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
@@ -663,6 +663,37 @@ gf100_gr_zbc_init(struct gf100_gr_priv *priv)
gf100_gr_zbc_clear_depth(priv, index);
}
+/**
+ * Wait until GR goes idle. GR is considered idle if it is disabled by the
+ * MC (0x200) register, or GR is not busy and a context switch is not in
+ * progress.
+ */
+int
+gf100_gr_wait_idle(struct gf100_gr_priv *priv)
+{
+ unsigned long end_jiffies = jiffies + msecs_to_jiffies(2000);
+ bool gr_enabled, ctxsw_active, gr_busy;
+
+ do {
+ /*
+ * required to make sure FIFO_ENGINE_STATUS (0x2640) is
+ * up-to-date
+ */
+ nv_rd32(priv, 0x400700);
+
+ gr_enabled = nv_rd32(priv, 0x200) & 0x1000;
+ ctxsw_active = nv_rd32(priv, 0x2640) & 0x8000;
+ gr_busy = nv_rd32(priv, 0x40060c) & 0x1;
+
+ if (!gr_enabled || (!gr_busy && !ctxsw_active))
+ return 0;
+ } while (time_before(jiffies, end_jiffies));
+
+ nv_error(priv, "wait for idle timeout (en: %d, ctxsw: %d, busy: %d)\n",
+ gr_enabled, ctxsw_active, gr_busy);
+ return -EAGAIN;
+}
+
void
gf100_gr_mmio(struct gf100_gr_priv *priv, const struct gf100_gr_pack *p)
{
@@ -699,7 +730,13 @@ gf100_gr_icmd(struct gf100_gr_priv *priv, const struct gf100_gr_pack *p)
while (addr < next) {
nv_wr32(priv, 0x400200, addr);
- nv_wait(priv, 0x400700, 0x00000002, 0x00000000);
+ /**
+ * Wait for GR to go idle after submitting a
+ * GO_IDLE bundle
+ */
+ if ((addr & 0xffff) == 0xe100)
+ gf100_gr_wait_idle(priv);
+ nv_wait(priv, 0x400700, 0x00000004, 0x00000000);
addr += init->pitch;
}
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h
index 8af1a89eda84..c9533fdac4fc 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h
@@ -181,6 +181,7 @@ struct gf100_gr_oclass {
int ppc_nr;
};
+int gf100_gr_wait_idle(struct gf100_gr_priv *);
void gf100_gr_mmio(struct gf100_gr_priv *, const struct gf100_gr_pack *);
void gf100_gr_icmd(struct gf100_gr_priv *, const struct gf100_gr_pack *);
void gf100_gr_mthd(struct gf100_gr_priv *, const struct gf100_gr_pack *);
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/pm/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/pm/base.c
index 2006c445938d..4cf36a3aa814 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/pm/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/pm/base.c
@@ -332,9 +332,12 @@ static void
nvkm_perfctx_dtor(struct nvkm_object *object)
{
struct nvkm_pm *ppm = (void *)object->engine;
+ struct nvkm_perfctx *ctx = (void *)object;
+
mutex_lock(&nv_subdev(ppm)->mutex);
- nvkm_engctx_destroy(&ppm->context->base);
- ppm->context = NULL;
+ nvkm_engctx_destroy(&ctx->base);
+ if (ppm->context == ctx)
+ ppm->context = NULL;
mutex_unlock(&nv_subdev(ppm)->mutex);
}
@@ -355,12 +358,11 @@ nvkm_perfctx_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
mutex_lock(&nv_subdev(ppm)->mutex);
if (ppm->context == NULL)
ppm->context = ctx;
- mutex_unlock(&nv_subdev(ppm)->mutex);
-
if (ctx != ppm->context)
- return -EBUSY;
+ ret = -EBUSY;
+ mutex_unlock(&nv_subdev(ppm)->mutex);
- return 0;
+ return ret;
}
struct nvkm_oclass
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/init.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/init.c
index f67cdae1e90a..f4611e3f0971 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/init.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/init.c
@@ -1285,6 +1285,44 @@ init_zm_reg_sequence(struct nvbios_init *init)
}
/**
+ * INIT_PLL_INDIRECT - opcode 0x59
+ *
+ */
+static void
+init_pll_indirect(struct nvbios_init *init)
+{
+ struct nvkm_bios *bios = init->bios;
+ u32 reg = nv_ro32(bios, init->offset + 1);
+ u16 addr = nv_ro16(bios, init->offset + 5);
+ u32 freq = (u32)nv_ro16(bios, addr) * 1000;
+
+ trace("PLL_INDIRECT\tR[0x%06x] =PLL= VBIOS[%04x] = %dkHz\n",
+ reg, addr, freq);
+ init->offset += 7;
+
+ init_prog_pll(init, reg, freq);
+}
+
+/**
+ * INIT_ZM_REG_INDIRECT - opcode 0x5a
+ *
+ */
+static void
+init_zm_reg_indirect(struct nvbios_init *init)
+{
+ struct nvkm_bios *bios = init->bios;
+ u32 reg = nv_ro32(bios, init->offset + 1);
+ u16 addr = nv_ro16(bios, init->offset + 5);
+ u32 data = nv_ro32(bios, addr);
+
+ trace("ZM_REG_INDIRECT\tR[0x%06x] = VBIOS[0x%04x] = 0x%08x\n",
+ reg, addr, data);
+ init->offset += 7;
+
+ init_wr32(init, addr, data);
+}
+
+/**
* INIT_SUB_DIRECT - opcode 0x5b
*
*/
@@ -2145,6 +2183,8 @@ static struct nvbios_init_opcode {
[0x56] = { init_condition_time },
[0x57] = { init_ltime },
[0x58] = { init_zm_reg_sequence },
+ [0x59] = { init_pll_indirect },
+ [0x5a] = { init_zm_reg_indirect },
[0x5b] = { init_sub_direct },
[0x5c] = { init_jump },
[0x5e] = { init_i2c_if },
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gt215.c b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gt215.c
index 822d32a28d6e..065e9f5c8db9 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gt215.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gt215.c
@@ -180,7 +180,8 @@ gt215_clk_info(struct nvkm_clk *clock, int clk, u32 khz,
struct gt215_clk_info *info)
{
struct gt215_clk_priv *priv = (void *)clock;
- u32 oclk, sclk, sdiv, diff;
+ u32 oclk, sclk, sdiv;
+ s32 diff;
info->clk = 0;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gk20a.c b/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gk20a.c
index c0fdb89e74ac..24dcdfb58a8d 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gk20a.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gk20a.c
@@ -38,6 +38,14 @@ gk20a_ibus_init_priv_ring(struct gk20a_ibus_priv *priv)
nv_wr32(priv, 0x12004c, 0x4);
nv_wr32(priv, 0x122204, 0x2);
nv_rd32(priv, 0x122204);
+
+ /*
+ * Bug: increase clock timeout to avoid operation failure at high
+ * gpcclk rate.
+ */
+ nv_wr32(priv, 0x122354, 0x800);
+ nv_wr32(priv, 0x128328, 0x800);
+ nv_wr32(priv, 0x124320, 0x800);
}
static void
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv04.c b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv04.c
index 80614f1b2074..282143f49d72 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv04.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv04.c
@@ -50,7 +50,12 @@ nv04_instobj_dtor(struct nvkm_object *object)
{
struct nv04_instmem_priv *priv = (void *)nvkm_instmem(object);
struct nv04_instobj_priv *node = (void *)object;
+ struct nvkm_subdev *subdev = (void *)priv;
+
+ mutex_lock(&subdev->mutex);
nvkm_mm_free(&priv->heap, &node->mem);
+ mutex_unlock(&subdev->mutex);
+
nvkm_instobj_destroy(&node->base);
}
@@ -62,6 +67,7 @@ nv04_instobj_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
struct nv04_instmem_priv *priv = (void *)nvkm_instmem(parent);
struct nv04_instobj_priv *node;
struct nvkm_instobj_args *args = data;
+ struct nvkm_subdev *subdev = (void *)priv;
int ret;
if (!args->align)
@@ -72,8 +78,10 @@ nv04_instobj_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
if (ret)
return ret;
+ mutex_lock(&subdev->mutex);
ret = nvkm_mm_head(&priv->heap, 0, 1, args->size, args->size,
args->align, &node->mem);
+ mutex_unlock(&subdev->mutex);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/omapdrm/omap_crtc.c b/drivers/gpu/drm/omapdrm/omap_crtc.c
index 23d9c928cdc9..9a4ba4f03567 100644
--- a/drivers/gpu/drm/omapdrm/omap_crtc.c
+++ b/drivers/gpu/drm/omapdrm/omap_crtc.c
@@ -388,11 +388,13 @@ static void omap_crtc_mode_set_nofb(struct drm_crtc *crtc)
copy_timings_drm_to_omap(&omap_crtc->timings, mode);
}
-static void omap_crtc_atomic_begin(struct drm_crtc *crtc)
+static void omap_crtc_atomic_begin(struct drm_crtc *crtc,
+ struct drm_crtc_state *old_crtc_state)
{
}
-static void omap_crtc_atomic_flush(struct drm_crtc *crtc)
+static void omap_crtc_atomic_flush(struct drm_crtc *crtc,
+ struct drm_crtc_state *old_crtc_state)
{
struct omap_crtc *omap_crtc = to_omap_crtc(crtc);
diff --git a/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c b/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c
index f2daad8c3d96..7841970de48d 100644
--- a/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c
+++ b/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c
@@ -285,7 +285,7 @@ static int dmm_txn_commit(struct dmm_txn *txn, bool wait)
if (wait) {
if (!wait_for_completion_timeout(&engine->compl,
- msecs_to_jiffies(1))) {
+ msecs_to_jiffies(100))) {
dev_err(dmm->dev, "timed out waiting for done\n");
ret = -ETIMEDOUT;
}
diff --git a/drivers/gpu/drm/omapdrm/omap_drv.h b/drivers/gpu/drm/omapdrm/omap_drv.h
index ae2df41f216f..12081e61d45a 100644
--- a/drivers/gpu/drm/omapdrm/omap_drv.h
+++ b/drivers/gpu/drm/omapdrm/omap_drv.h
@@ -177,7 +177,7 @@ struct drm_framebuffer *omap_framebuffer_init(struct drm_device *dev,
struct drm_mode_fb_cmd2 *mode_cmd, struct drm_gem_object **bos);
struct drm_gem_object *omap_framebuffer_bo(struct drm_framebuffer *fb, int p);
int omap_framebuffer_pin(struct drm_framebuffer *fb);
-int omap_framebuffer_unpin(struct drm_framebuffer *fb);
+void omap_framebuffer_unpin(struct drm_framebuffer *fb);
void omap_framebuffer_update_scanout(struct drm_framebuffer *fb,
struct omap_drm_window *win, struct omap_overlay_info *info);
struct drm_connector *omap_framebuffer_get_next_connector(
@@ -211,7 +211,7 @@ void omap_gem_dma_sync(struct drm_gem_object *obj,
enum dma_data_direction dir);
int omap_gem_get_paddr(struct drm_gem_object *obj,
dma_addr_t *paddr, bool remap);
-int omap_gem_put_paddr(struct drm_gem_object *obj);
+void omap_gem_put_paddr(struct drm_gem_object *obj);
int omap_gem_get_pages(struct drm_gem_object *obj, struct page ***pages,
bool remap);
int omap_gem_put_pages(struct drm_gem_object *obj);
@@ -236,7 +236,7 @@ static inline int align_pitch(int pitch, int width, int bpp)
/* PVR needs alignment to 8 pixels.. right now that is the most
* restrictive stride requirement..
*/
- return ALIGN(pitch, 8 * bytespp);
+ return roundup(pitch, 8 * bytespp);
}
/* map crtc to vblank mask */
diff --git a/drivers/gpu/drm/omapdrm/omap_fb.c b/drivers/gpu/drm/omapdrm/omap_fb.c
index 0b967e76df1a..51b1219af87f 100644
--- a/drivers/gpu/drm/omapdrm/omap_fb.c
+++ b/drivers/gpu/drm/omapdrm/omap_fb.c
@@ -287,10 +287,10 @@ fail:
}
/* unpin, no longer being scanned out: */
-int omap_framebuffer_unpin(struct drm_framebuffer *fb)
+void omap_framebuffer_unpin(struct drm_framebuffer *fb)
{
struct omap_framebuffer *omap_fb = to_omap_framebuffer(fb);
- int ret, i, n = drm_format_num_planes(fb->pixel_format);
+ int i, n = drm_format_num_planes(fb->pixel_format);
mutex_lock(&omap_fb->lock);
@@ -298,24 +298,16 @@ int omap_framebuffer_unpin(struct drm_framebuffer *fb)
if (omap_fb->pin_count > 0) {
mutex_unlock(&omap_fb->lock);
- return 0;
+ return;
}
for (i = 0; i < n; i++) {
struct plane *plane = &omap_fb->planes[i];
- ret = omap_gem_put_paddr(plane->bo);
- if (ret)
- goto fail;
+ omap_gem_put_paddr(plane->bo);
plane->paddr = 0;
}
mutex_unlock(&omap_fb->lock);
-
- return 0;
-
-fail:
- mutex_unlock(&omap_fb->lock);
- return ret;
}
struct drm_gem_object *omap_framebuffer_bo(struct drm_framebuffer *fb, int p)
diff --git a/drivers/gpu/drm/omapdrm/omap_fbdev.c b/drivers/gpu/drm/omapdrm/omap_fbdev.c
index 23b5a84389e3..b8e4cdec28c3 100644
--- a/drivers/gpu/drm/omapdrm/omap_fbdev.c
+++ b/drivers/gpu/drm/omapdrm/omap_fbdev.c
@@ -86,11 +86,11 @@ static struct fb_ops omap_fb_ops = {
/* Note: to properly handle manual update displays, we wrap the
* basic fbdev ops which write to the framebuffer
*/
- .fb_read = fb_sys_read,
- .fb_write = fb_sys_write,
- .fb_fillrect = sys_fillrect,
- .fb_copyarea = sys_copyarea,
- .fb_imageblit = sys_imageblit,
+ .fb_read = drm_fb_helper_sys_read,
+ .fb_write = drm_fb_helper_sys_write,
+ .fb_fillrect = drm_fb_helper_sys_fillrect,
+ .fb_copyarea = drm_fb_helper_sys_copyarea,
+ .fb_imageblit = drm_fb_helper_sys_imageblit,
.fb_check_var = drm_fb_helper_check_var,
.fb_set_par = drm_fb_helper_set_par,
@@ -135,7 +135,7 @@ static int omap_fbdev_create(struct drm_fb_helper *helper,
fbdev->ywrap_enabled = priv->has_dmm && ywrap_enabled;
if (fbdev->ywrap_enabled) {
/* need to align pitch to page size if using DMM scrolling */
- mode_cmd.pitches[0] = ALIGN(mode_cmd.pitches[0], PAGE_SIZE);
+ mode_cmd.pitches[0] = PAGE_ALIGN(mode_cmd.pitches[0]);
}
/* allocate backing bo */
@@ -179,10 +179,10 @@ static int omap_fbdev_create(struct drm_fb_helper *helper,
mutex_lock(&dev->struct_mutex);
- fbi = framebuffer_alloc(0, dev->dev);
- if (!fbi) {
+ fbi = drm_fb_helper_alloc_fbi(helper);
+ if (IS_ERR(fbi)) {
dev_err(dev->dev, "failed to allocate fb info\n");
- ret = -ENOMEM;
+ ret = PTR_ERR(fbi);
goto fail_unlock;
}
@@ -190,7 +190,6 @@ static int omap_fbdev_create(struct drm_fb_helper *helper,
fbdev->fb = fb;
helper->fb = fb;
- helper->fbdev = fbi;
fbi->par = helper;
fbi->flags = FBINFO_DEFAULT;
@@ -198,12 +197,6 @@ static int omap_fbdev_create(struct drm_fb_helper *helper,
strcpy(fbi->fix.id, MODULE_NAME);
- ret = fb_alloc_cmap(&fbi->cmap, 256, 0);
- if (ret) {
- ret = -ENOMEM;
- goto fail_unlock;
- }
-
drm_fb_helper_fill_fix(fbi, fb->pitches[0], fb->depth);
drm_fb_helper_fill_var(fbi, helper, sizes->fb_width, sizes->fb_height);
@@ -236,8 +229,9 @@ fail_unlock:
fail:
if (ret) {
- if (fbi)
- framebuffer_release(fbi);
+
+ drm_fb_helper_release_fbi(helper);
+
if (fb) {
drm_framebuffer_unregister_private(fb);
drm_framebuffer_remove(fb);
@@ -312,17 +306,11 @@ void omap_fbdev_free(struct drm_device *dev)
struct omap_drm_private *priv = dev->dev_private;
struct drm_fb_helper *helper = priv->fbdev;
struct omap_fbdev *fbdev;
- struct fb_info *fbi;
DBG();
- fbi = helper->fbdev;
-
- /* only cleanup framebuffer if it is present */
- if (fbi) {
- unregister_framebuffer(fbi);
- framebuffer_release(fbi);
- }
+ drm_fb_helper_unregister_fbi(helper);
+ drm_fb_helper_release_fbi(helper);
drm_fb_helper_fini(helper);
diff --git a/drivers/gpu/drm/omapdrm/omap_gem.c b/drivers/gpu/drm/omapdrm/omap_gem.c
index 2ab77801cf5f..7ed08fdc4c42 100644
--- a/drivers/gpu/drm/omapdrm/omap_gem.c
+++ b/drivers/gpu/drm/omapdrm/omap_gem.c
@@ -808,10 +808,10 @@ fail:
/* Release physical address, when DMA is no longer being performed.. this
* could potentially unpin and unmap buffers from TILER
*/
-int omap_gem_put_paddr(struct drm_gem_object *obj)
+void omap_gem_put_paddr(struct drm_gem_object *obj)
{
struct omap_gem_object *omap_obj = to_omap_bo(obj);
- int ret = 0;
+ int ret;
mutex_lock(&obj->dev->struct_mutex);
if (omap_obj->paddr_cnt > 0) {
@@ -821,7 +821,6 @@ int omap_gem_put_paddr(struct drm_gem_object *obj)
if (ret) {
dev_err(obj->dev->dev,
"could not unpin pages: %d\n", ret);
- goto fail;
}
ret = tiler_release(omap_obj->block);
if (ret) {
@@ -832,9 +831,8 @@ int omap_gem_put_paddr(struct drm_gem_object *obj)
omap_obj->block = NULL;
}
}
-fail:
+
mutex_unlock(&obj->dev->struct_mutex);
- return ret;
}
/* Get rotated scanout address (only valid if already pinned), at the
@@ -1378,11 +1376,7 @@ struct drm_gem_object *omap_gem_new(struct drm_device *dev,
omap_obj = kzalloc(sizeof(*omap_obj), GFP_KERNEL);
if (!omap_obj)
- goto fail;
-
- spin_lock(&priv->list_lock);
- list_add(&omap_obj->mm_list, &priv->obj_list);
- spin_unlock(&priv->list_lock);
+ return NULL;
obj = &omap_obj->base;
@@ -1392,11 +1386,19 @@ struct drm_gem_object *omap_gem_new(struct drm_device *dev,
*/
omap_obj->vaddr = dma_alloc_writecombine(dev->dev, size,
&omap_obj->paddr, GFP_KERNEL);
- if (omap_obj->vaddr)
- flags |= OMAP_BO_DMA;
+ if (!omap_obj->vaddr) {
+ kfree(omap_obj);
+
+ return NULL;
+ }
+ flags |= OMAP_BO_DMA;
}
+ spin_lock(&priv->list_lock);
+ list_add(&omap_obj->mm_list, &priv->obj_list);
+ spin_unlock(&priv->list_lock);
+
omap_obj->flags = flags;
if (flags & OMAP_BO_TILED) {
diff --git a/drivers/gpu/drm/omapdrm/omap_plane.c b/drivers/gpu/drm/omapdrm/omap_plane.c
index cfa8276c4deb..098904696a5c 100644
--- a/drivers/gpu/drm/omapdrm/omap_plane.c
+++ b/drivers/gpu/drm/omapdrm/omap_plane.c
@@ -17,6 +17,7 @@
* this program. If not, see <http://www.gnu.org/licenses/>.
*/
+#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_plane_helper.h>
@@ -153,9 +154,34 @@ static void omap_plane_atomic_disable(struct drm_plane *plane,
dispc_ovl_enable(omap_plane->id, false);
}
+static int omap_plane_atomic_check(struct drm_plane *plane,
+ struct drm_plane_state *state)
+{
+ struct drm_crtc_state *crtc_state;
+
+ if (!state->crtc)
+ return 0;
+
+ crtc_state = drm_atomic_get_crtc_state(state->state, state->crtc);
+ if (IS_ERR(crtc_state))
+ return PTR_ERR(crtc_state);
+
+ if (state->crtc_x < 0 || state->crtc_y < 0)
+ return -EINVAL;
+
+ if (state->crtc_x + state->crtc_w > crtc_state->adjusted_mode.hdisplay)
+ return -EINVAL;
+
+ if (state->crtc_y + state->crtc_h > crtc_state->adjusted_mode.vdisplay)
+ return -EINVAL;
+
+ return 0;
+}
+
static const struct drm_plane_helper_funcs omap_plane_helper_funcs = {
.prepare_fb = omap_plane_prepare_fb,
.cleanup_fb = omap_plane_cleanup_fb,
+ .atomic_check = omap_plane_atomic_check,
.atomic_update = omap_plane_atomic_update,
.atomic_disable = omap_plane_atomic_disable,
};
diff --git a/drivers/gpu/drm/qxl/qxl_fb.c b/drivers/gpu/drm/qxl/qxl_fb.c
index 6b6e57e8c2d6..41c422fee31a 100644
--- a/drivers/gpu/drm/qxl/qxl_fb.c
+++ b/drivers/gpu/drm/qxl/qxl_fb.c
@@ -197,7 +197,7 @@ static void qxl_fb_fillrect(struct fb_info *info,
{
struct qxl_fbdev *qfbdev = info->par;
- sys_fillrect(info, rect);
+ drm_fb_helper_sys_fillrect(info, rect);
qxl_dirty_update(qfbdev, rect->dx, rect->dy, rect->width,
rect->height);
}
@@ -207,7 +207,7 @@ static void qxl_fb_copyarea(struct fb_info *info,
{
struct qxl_fbdev *qfbdev = info->par;
- sys_copyarea(info, area);
+ drm_fb_helper_sys_copyarea(info, area);
qxl_dirty_update(qfbdev, area->dx, area->dy, area->width,
area->height);
}
@@ -217,7 +217,7 @@ static void qxl_fb_imageblit(struct fb_info *info,
{
struct qxl_fbdev *qfbdev = info->par;
- sys_imageblit(info, image);
+ drm_fb_helper_sys_imageblit(info, image);
qxl_dirty_update(qfbdev, image->dx, image->dy, image->width,
image->height);
}
@@ -345,7 +345,6 @@ static int qxlfb_create(struct qxl_fbdev *qfbdev,
struct drm_mode_fb_cmd2 mode_cmd;
struct drm_gem_object *gobj = NULL;
struct qxl_bo *qbo = NULL;
- struct device *device = &qdev->pdev->dev;
int ret;
int size;
int bpp = sizes->surface_bpp;
@@ -374,9 +373,9 @@ static int qxlfb_create(struct qxl_fbdev *qfbdev,
shadow);
size = mode_cmd.pitches[0] * mode_cmd.height;
- info = framebuffer_alloc(0, device);
- if (info == NULL) {
- ret = -ENOMEM;
+ info = drm_fb_helper_alloc_fbi(&qfbdev->helper);
+ if (IS_ERR(info)) {
+ ret = PTR_ERR(info);
goto out_unref;
}
@@ -388,7 +387,7 @@ static int qxlfb_create(struct qxl_fbdev *qfbdev,
/* setup helper with fb data */
qfbdev->helper.fb = fb;
- qfbdev->helper.fbdev = info;
+
qfbdev->shadow = shadow;
strcpy(info->fix.id, "qxldrmfb");
@@ -410,11 +409,6 @@ static int qxlfb_create(struct qxl_fbdev *qfbdev,
sizes->fb_height);
/* setup aperture base/size for vesafb takeover */
- info->apertures = alloc_apertures(1);
- if (!info->apertures) {
- ret = -ENOMEM;
- goto out_unref;
- }
info->apertures->ranges[0].base = qdev->ddev->mode_config.fb_base;
info->apertures->ranges[0].size = qdev->vram_size;
@@ -423,13 +417,7 @@ static int qxlfb_create(struct qxl_fbdev *qfbdev,
if (info->screen_base == NULL) {
ret = -ENOSPC;
- goto out_unref;
- }
-
- ret = fb_alloc_cmap(&info->cmap, 256, 0);
- if (ret) {
- ret = -ENOMEM;
- goto out_unref;
+ goto out_destroy_fbi;
}
info->fbdefio = &qxl_defio;
@@ -441,6 +429,8 @@ static int qxlfb_create(struct qxl_fbdev *qfbdev,
DRM_INFO("fb: depth %d, pitch %d, width %d, height %d\n", fb->depth, fb->pitches[0], fb->width, fb->height);
return 0;
+out_destroy_fbi:
+ drm_fb_helper_release_fbi(&qfbdev->helper);
out_unref:
if (qbo) {
ret = qxl_bo_reserve(qbo, false);
@@ -479,15 +469,11 @@ static int qxl_fb_find_or_create_single(
static int qxl_fbdev_destroy(struct drm_device *dev, struct qxl_fbdev *qfbdev)
{
- struct fb_info *info;
struct qxl_framebuffer *qfb = &qfbdev->qfb;
- if (qfbdev->helper.fbdev) {
- info = qfbdev->helper.fbdev;
+ drm_fb_helper_unregister_fbi(&qfbdev->helper);
+ drm_fb_helper_release_fbi(&qfbdev->helper);
- unregister_framebuffer(info);
- framebuffer_release(info);
- }
if (qfb->obj) {
qxlfb_destroy_pinned_object(qfb->obj);
qfb->obj = NULL;
@@ -557,7 +543,7 @@ void qxl_fbdev_fini(struct qxl_device *qdev)
void qxl_fbdev_set_suspend(struct qxl_device *qdev, int state)
{
- fb_set_suspend(qdev->mode_info.qfbdev->helper.fbdev, state);
+ drm_fb_helper_set_suspend(&qdev->mode_info.qfbdev->helper, state);
}
bool qxl_fbdev_qobj_is_fb(struct qxl_device *qdev, struct qxl_bo *qobj)
diff --git a/drivers/gpu/drm/qxl/qxl_object.c b/drivers/gpu/drm/qxl/qxl_object.c
index 6d6f33de48f4..b28370e014c6 100644
--- a/drivers/gpu/drm/qxl/qxl_object.c
+++ b/drivers/gpu/drm/qxl/qxl_object.c
@@ -272,7 +272,6 @@ void qxl_bo_force_delete(struct qxl_device *qdev)
return;
dev_err(qdev->dev, "Userspace still has active objects !\n");
list_for_each_entry_safe(bo, n, &qdev->gem.objects, list) {
- mutex_lock(&qdev->ddev->struct_mutex);
dev_err(qdev->dev, "%p %p %lu %lu force free\n",
&bo->gem_base, bo, (unsigned long)bo->gem_base.size,
*((unsigned long *)&bo->gem_base.refcount));
@@ -280,8 +279,7 @@ void qxl_bo_force_delete(struct qxl_device *qdev)
list_del_init(&bo->list);
mutex_unlock(&qdev->gem.mutex);
/* this should unref the ttm bo */
- drm_gem_object_unreference(&bo->gem_base);
- mutex_unlock(&qdev->ddev->struct_mutex);
+ drm_gem_object_unreference_unlocked(&bo->gem_base);
}
}
diff --git a/drivers/gpu/drm/radeon/atombios_encoders.c b/drivers/gpu/drm/radeon/atombios_encoders.c
index dd39f434b4a7..c3872598b85a 100644
--- a/drivers/gpu/drm/radeon/atombios_encoders.c
+++ b/drivers/gpu/drm/radeon/atombios_encoders.c
@@ -2299,8 +2299,7 @@ radeon_atom_encoder_mode_set(struct drm_encoder *encoder,
encoder_mode = atombios_get_encoder_mode(encoder);
if (connector && (radeon_audio != 0) &&
((encoder_mode == ATOM_ENCODER_MODE_HDMI) ||
- (ENCODER_MODE_IS_DP(encoder_mode) &&
- drm_detect_monitor_audio(radeon_connector_edid(connector)))))
+ ENCODER_MODE_IS_DP(encoder_mode)))
radeon_audio_mode_set(encoder, adjusted_mode);
}
diff --git a/drivers/gpu/drm/radeon/ci_dpm.c b/drivers/gpu/drm/radeon/ci_dpm.c
index 8730562323a8..4a09947be244 100644
--- a/drivers/gpu/drm/radeon/ci_dpm.c
+++ b/drivers/gpu/drm/radeon/ci_dpm.c
@@ -5818,7 +5818,7 @@ int ci_dpm_init(struct radeon_device *rdev)
tmp |= DPM_ENABLED;
break;
default:
- DRM_ERROR("Invalid PCC GPIO: %u!\n", gpio.shift);
+ DRM_DEBUG("Invalid PCC GPIO: %u!\n", gpio.shift);
break;
}
WREG32_SMC(CNB_PWRMGT_CNTL, tmp);
diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c
index 4ecf5caa8c6d..248953d2fdb7 100644
--- a/drivers/gpu/drm/radeon/cik.c
+++ b/drivers/gpu/drm/radeon/cik.c
@@ -7964,23 +7964,27 @@ restart_ih:
case 1: /* D1 vblank/vline */
switch (src_data) {
case 0: /* D1 vblank */
- if (rdev->irq.stat_regs.cik.disp_int & LB_D1_VBLANK_INTERRUPT) {
- if (rdev->irq.crtc_vblank_int[0]) {
- drm_handle_vblank(rdev->ddev, 0);
- rdev->pm.vblank_sync = true;
- wake_up(&rdev->irq.vblank_queue);
- }
- if (atomic_read(&rdev->irq.pflip[0]))
- radeon_crtc_handle_vblank(rdev, 0);
- rdev->irq.stat_regs.cik.disp_int &= ~LB_D1_VBLANK_INTERRUPT;
- DRM_DEBUG("IH: D1 vblank\n");
+ if (!(rdev->irq.stat_regs.cik.disp_int & LB_D1_VBLANK_INTERRUPT))
+ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+ if (rdev->irq.crtc_vblank_int[0]) {
+ drm_handle_vblank(rdev->ddev, 0);
+ rdev->pm.vblank_sync = true;
+ wake_up(&rdev->irq.vblank_queue);
}
+ if (atomic_read(&rdev->irq.pflip[0]))
+ radeon_crtc_handle_vblank(rdev, 0);
+ rdev->irq.stat_regs.cik.disp_int &= ~LB_D1_VBLANK_INTERRUPT;
+ DRM_DEBUG("IH: D1 vblank\n");
+
break;
case 1: /* D1 vline */
- if (rdev->irq.stat_regs.cik.disp_int & LB_D1_VLINE_INTERRUPT) {
- rdev->irq.stat_regs.cik.disp_int &= ~LB_D1_VLINE_INTERRUPT;
- DRM_DEBUG("IH: D1 vline\n");
- }
+ if (!(rdev->irq.stat_regs.cik.disp_int & LB_D1_VLINE_INTERRUPT))
+ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+ rdev->irq.stat_regs.cik.disp_int &= ~LB_D1_VLINE_INTERRUPT;
+ DRM_DEBUG("IH: D1 vline\n");
+
break;
default:
DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
@@ -7990,23 +7994,27 @@ restart_ih:
case 2: /* D2 vblank/vline */
switch (src_data) {
case 0: /* D2 vblank */
- if (rdev->irq.stat_regs.cik.disp_int_cont & LB_D2_VBLANK_INTERRUPT) {
- if (rdev->irq.crtc_vblank_int[1]) {
- drm_handle_vblank(rdev->ddev, 1);
- rdev->pm.vblank_sync = true;
- wake_up(&rdev->irq.vblank_queue);
- }
- if (atomic_read(&rdev->irq.pflip[1]))
- radeon_crtc_handle_vblank(rdev, 1);
- rdev->irq.stat_regs.cik.disp_int_cont &= ~LB_D2_VBLANK_INTERRUPT;
- DRM_DEBUG("IH: D2 vblank\n");
+ if (!(rdev->irq.stat_regs.cik.disp_int_cont & LB_D2_VBLANK_INTERRUPT))
+ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+ if (rdev->irq.crtc_vblank_int[1]) {
+ drm_handle_vblank(rdev->ddev, 1);
+ rdev->pm.vblank_sync = true;
+ wake_up(&rdev->irq.vblank_queue);
}
+ if (atomic_read(&rdev->irq.pflip[1]))
+ radeon_crtc_handle_vblank(rdev, 1);
+ rdev->irq.stat_regs.cik.disp_int_cont &= ~LB_D2_VBLANK_INTERRUPT;
+ DRM_DEBUG("IH: D2 vblank\n");
+
break;
case 1: /* D2 vline */
- if (rdev->irq.stat_regs.cik.disp_int_cont & LB_D2_VLINE_INTERRUPT) {
- rdev->irq.stat_regs.cik.disp_int_cont &= ~LB_D2_VLINE_INTERRUPT;
- DRM_DEBUG("IH: D2 vline\n");
- }
+ if (!(rdev->irq.stat_regs.cik.disp_int_cont & LB_D2_VLINE_INTERRUPT))
+ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+ rdev->irq.stat_regs.cik.disp_int_cont &= ~LB_D2_VLINE_INTERRUPT;
+ DRM_DEBUG("IH: D2 vline\n");
+
break;
default:
DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
@@ -8016,23 +8024,27 @@ restart_ih:
case 3: /* D3 vblank/vline */
switch (src_data) {
case 0: /* D3 vblank */
- if (rdev->irq.stat_regs.cik.disp_int_cont2 & LB_D3_VBLANK_INTERRUPT) {
- if (rdev->irq.crtc_vblank_int[2]) {
- drm_handle_vblank(rdev->ddev, 2);
- rdev->pm.vblank_sync = true;
- wake_up(&rdev->irq.vblank_queue);
- }
- if (atomic_read(&rdev->irq.pflip[2]))
- radeon_crtc_handle_vblank(rdev, 2);
- rdev->irq.stat_regs.cik.disp_int_cont2 &= ~LB_D3_VBLANK_INTERRUPT;
- DRM_DEBUG("IH: D3 vblank\n");
+ if (!(rdev->irq.stat_regs.cik.disp_int_cont2 & LB_D3_VBLANK_INTERRUPT))
+ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+ if (rdev->irq.crtc_vblank_int[2]) {
+ drm_handle_vblank(rdev->ddev, 2);
+ rdev->pm.vblank_sync = true;
+ wake_up(&rdev->irq.vblank_queue);
}
+ if (atomic_read(&rdev->irq.pflip[2]))
+ radeon_crtc_handle_vblank(rdev, 2);
+ rdev->irq.stat_regs.cik.disp_int_cont2 &= ~LB_D3_VBLANK_INTERRUPT;
+ DRM_DEBUG("IH: D3 vblank\n");
+
break;
case 1: /* D3 vline */
- if (rdev->irq.stat_regs.cik.disp_int_cont2 & LB_D3_VLINE_INTERRUPT) {
- rdev->irq.stat_regs.cik.disp_int_cont2 &= ~LB_D3_VLINE_INTERRUPT;
- DRM_DEBUG("IH: D3 vline\n");
- }
+ if (!(rdev->irq.stat_regs.cik.disp_int_cont2 & LB_D3_VLINE_INTERRUPT))
+ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+ rdev->irq.stat_regs.cik.disp_int_cont2 &= ~LB_D3_VLINE_INTERRUPT;
+ DRM_DEBUG("IH: D3 vline\n");
+
break;
default:
DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
@@ -8042,23 +8054,27 @@ restart_ih:
case 4: /* D4 vblank/vline */
switch (src_data) {
case 0: /* D4 vblank */
- if (rdev->irq.stat_regs.cik.disp_int_cont3 & LB_D4_VBLANK_INTERRUPT) {
- if (rdev->irq.crtc_vblank_int[3]) {
- drm_handle_vblank(rdev->ddev, 3);
- rdev->pm.vblank_sync = true;
- wake_up(&rdev->irq.vblank_queue);
- }
- if (atomic_read(&rdev->irq.pflip[3]))
- radeon_crtc_handle_vblank(rdev, 3);
- rdev->irq.stat_regs.cik.disp_int_cont3 &= ~LB_D4_VBLANK_INTERRUPT;
- DRM_DEBUG("IH: D4 vblank\n");
+ if (!(rdev->irq.stat_regs.cik.disp_int_cont3 & LB_D4_VBLANK_INTERRUPT))
+ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+ if (rdev->irq.crtc_vblank_int[3]) {
+ drm_handle_vblank(rdev->ddev, 3);
+ rdev->pm.vblank_sync = true;
+ wake_up(&rdev->irq.vblank_queue);
}
+ if (atomic_read(&rdev->irq.pflip[3]))
+ radeon_crtc_handle_vblank(rdev, 3);
+ rdev->irq.stat_regs.cik.disp_int_cont3 &= ~LB_D4_VBLANK_INTERRUPT;
+ DRM_DEBUG("IH: D4 vblank\n");
+
break;
case 1: /* D4 vline */
- if (rdev->irq.stat_regs.cik.disp_int_cont3 & LB_D4_VLINE_INTERRUPT) {
- rdev->irq.stat_regs.cik.disp_int_cont3 &= ~LB_D4_VLINE_INTERRUPT;
- DRM_DEBUG("IH: D4 vline\n");
- }
+ if (!(rdev->irq.stat_regs.cik.disp_int_cont3 & LB_D4_VLINE_INTERRUPT))
+ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+ rdev->irq.stat_regs.cik.disp_int_cont3 &= ~LB_D4_VLINE_INTERRUPT;
+ DRM_DEBUG("IH: D4 vline\n");
+
break;
default:
DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
@@ -8068,23 +8084,27 @@ restart_ih:
case 5: /* D5 vblank/vline */
switch (src_data) {
case 0: /* D5 vblank */
- if (rdev->irq.stat_regs.cik.disp_int_cont4 & LB_D5_VBLANK_INTERRUPT) {
- if (rdev->irq.crtc_vblank_int[4]) {
- drm_handle_vblank(rdev->ddev, 4);
- rdev->pm.vblank_sync = true;
- wake_up(&rdev->irq.vblank_queue);
- }
- if (atomic_read(&rdev->irq.pflip[4]))
- radeon_crtc_handle_vblank(rdev, 4);
- rdev->irq.stat_regs.cik.disp_int_cont4 &= ~LB_D5_VBLANK_INTERRUPT;
- DRM_DEBUG("IH: D5 vblank\n");
+ if (!(rdev->irq.stat_regs.cik.disp_int_cont4 & LB_D5_VBLANK_INTERRUPT))
+ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+ if (rdev->irq.crtc_vblank_int[4]) {
+ drm_handle_vblank(rdev->ddev, 4);
+ rdev->pm.vblank_sync = true;
+ wake_up(&rdev->irq.vblank_queue);
}
+ if (atomic_read(&rdev->irq.pflip[4]))
+ radeon_crtc_handle_vblank(rdev, 4);
+ rdev->irq.stat_regs.cik.disp_int_cont4 &= ~LB_D5_VBLANK_INTERRUPT;
+ DRM_DEBUG("IH: D5 vblank\n");
+
break;
case 1: /* D5 vline */
- if (rdev->irq.stat_regs.cik.disp_int_cont4 & LB_D5_VLINE_INTERRUPT) {
- rdev->irq.stat_regs.cik.disp_int_cont4 &= ~LB_D5_VLINE_INTERRUPT;
- DRM_DEBUG("IH: D5 vline\n");
- }
+ if (!(rdev->irq.stat_regs.cik.disp_int_cont4 & LB_D5_VLINE_INTERRUPT))
+ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+ rdev->irq.stat_regs.cik.disp_int_cont4 &= ~LB_D5_VLINE_INTERRUPT;
+ DRM_DEBUG("IH: D5 vline\n");
+
break;
default:
DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
@@ -8094,23 +8114,27 @@ restart_ih:
case 6: /* D6 vblank/vline */
switch (src_data) {
case 0: /* D6 vblank */
- if (rdev->irq.stat_regs.cik.disp_int_cont5 & LB_D6_VBLANK_INTERRUPT) {
- if (rdev->irq.crtc_vblank_int[5]) {
- drm_handle_vblank(rdev->ddev, 5);
- rdev->pm.vblank_sync = true;
- wake_up(&rdev->irq.vblank_queue);
- }
- if (atomic_read(&rdev->irq.pflip[5]))
- radeon_crtc_handle_vblank(rdev, 5);
- rdev->irq.stat_regs.cik.disp_int_cont5 &= ~LB_D6_VBLANK_INTERRUPT;
- DRM_DEBUG("IH: D6 vblank\n");
+ if (!(rdev->irq.stat_regs.cik.disp_int_cont5 & LB_D6_VBLANK_INTERRUPT))
+ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+ if (rdev->irq.crtc_vblank_int[5]) {
+ drm_handle_vblank(rdev->ddev, 5);
+ rdev->pm.vblank_sync = true;
+ wake_up(&rdev->irq.vblank_queue);
}
+ if (atomic_read(&rdev->irq.pflip[5]))
+ radeon_crtc_handle_vblank(rdev, 5);
+ rdev->irq.stat_regs.cik.disp_int_cont5 &= ~LB_D6_VBLANK_INTERRUPT;
+ DRM_DEBUG("IH: D6 vblank\n");
+
break;
case 1: /* D6 vline */
- if (rdev->irq.stat_regs.cik.disp_int_cont5 & LB_D6_VLINE_INTERRUPT) {
- rdev->irq.stat_regs.cik.disp_int_cont5 &= ~LB_D6_VLINE_INTERRUPT;
- DRM_DEBUG("IH: D6 vline\n");
- }
+ if (!(rdev->irq.stat_regs.cik.disp_int_cont5 & LB_D6_VLINE_INTERRUPT))
+ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+ rdev->irq.stat_regs.cik.disp_int_cont5 &= ~LB_D6_VLINE_INTERRUPT;
+ DRM_DEBUG("IH: D6 vline\n");
+
break;
default:
DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
@@ -8130,88 +8154,112 @@ restart_ih:
case 42: /* HPD hotplug */
switch (src_data) {
case 0:
- if (rdev->irq.stat_regs.cik.disp_int & DC_HPD1_INTERRUPT) {
- rdev->irq.stat_regs.cik.disp_int &= ~DC_HPD1_INTERRUPT;
- queue_hotplug = true;
- DRM_DEBUG("IH: HPD1\n");
- }
+ if (!(rdev->irq.stat_regs.cik.disp_int & DC_HPD1_INTERRUPT))
+ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+ rdev->irq.stat_regs.cik.disp_int &= ~DC_HPD1_INTERRUPT;
+ queue_hotplug = true;
+ DRM_DEBUG("IH: HPD1\n");
+
break;
case 1:
- if (rdev->irq.stat_regs.cik.disp_int_cont & DC_HPD2_INTERRUPT) {
- rdev->irq.stat_regs.cik.disp_int_cont &= ~DC_HPD2_INTERRUPT;
- queue_hotplug = true;
- DRM_DEBUG("IH: HPD2\n");
- }
+ if (!(rdev->irq.stat_regs.cik.disp_int_cont & DC_HPD2_INTERRUPT))
+ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+ rdev->irq.stat_regs.cik.disp_int_cont &= ~DC_HPD2_INTERRUPT;
+ queue_hotplug = true;
+ DRM_DEBUG("IH: HPD2\n");
+
break;
case 2:
- if (rdev->irq.stat_regs.cik.disp_int_cont2 & DC_HPD3_INTERRUPT) {
- rdev->irq.stat_regs.cik.disp_int_cont2 &= ~DC_HPD3_INTERRUPT;
- queue_hotplug = true;
- DRM_DEBUG("IH: HPD3\n");
- }
+ if (!(rdev->irq.stat_regs.cik.disp_int_cont2 & DC_HPD3_INTERRUPT))
+ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+ rdev->irq.stat_regs.cik.disp_int_cont2 &= ~DC_HPD3_INTERRUPT;
+ queue_hotplug = true;
+ DRM_DEBUG("IH: HPD3\n");
+
break;
case 3:
- if (rdev->irq.stat_regs.cik.disp_int_cont3 & DC_HPD4_INTERRUPT) {
- rdev->irq.stat_regs.cik.disp_int_cont3 &= ~DC_HPD4_INTERRUPT;
- queue_hotplug = true;
- DRM_DEBUG("IH: HPD4\n");
- }
+ if (!(rdev->irq.stat_regs.cik.disp_int_cont3 & DC_HPD4_INTERRUPT))
+ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+ rdev->irq.stat_regs.cik.disp_int_cont3 &= ~DC_HPD4_INTERRUPT;
+ queue_hotplug = true;
+ DRM_DEBUG("IH: HPD4\n");
+
break;
case 4:
- if (rdev->irq.stat_regs.cik.disp_int_cont4 & DC_HPD5_INTERRUPT) {
- rdev->irq.stat_regs.cik.disp_int_cont4 &= ~DC_HPD5_INTERRUPT;
- queue_hotplug = true;
- DRM_DEBUG("IH: HPD5\n");
- }
+ if (!(rdev->irq.stat_regs.cik.disp_int_cont4 & DC_HPD5_INTERRUPT))
+ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+ rdev->irq.stat_regs.cik.disp_int_cont4 &= ~DC_HPD5_INTERRUPT;
+ queue_hotplug = true;
+ DRM_DEBUG("IH: HPD5\n");
+
break;
case 5:
- if (rdev->irq.stat_regs.cik.disp_int_cont5 & DC_HPD6_INTERRUPT) {
- rdev->irq.stat_regs.cik.disp_int_cont5 &= ~DC_HPD6_INTERRUPT;
- queue_hotplug = true;
- DRM_DEBUG("IH: HPD6\n");
- }
+ if (!(rdev->irq.stat_regs.cik.disp_int_cont5 & DC_HPD6_INTERRUPT))
+ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+ rdev->irq.stat_regs.cik.disp_int_cont5 &= ~DC_HPD6_INTERRUPT;
+ queue_hotplug = true;
+ DRM_DEBUG("IH: HPD6\n");
+
break;
case 6:
- if (rdev->irq.stat_regs.cik.disp_int & DC_HPD1_RX_INTERRUPT) {
- rdev->irq.stat_regs.cik.disp_int &= ~DC_HPD1_RX_INTERRUPT;
- queue_dp = true;
- DRM_DEBUG("IH: HPD_RX 1\n");
- }
+ if (!(rdev->irq.stat_regs.cik.disp_int & DC_HPD1_RX_INTERRUPT))
+ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+ rdev->irq.stat_regs.cik.disp_int &= ~DC_HPD1_RX_INTERRUPT;
+ queue_dp = true;
+ DRM_DEBUG("IH: HPD_RX 1\n");
+
break;
case 7:
- if (rdev->irq.stat_regs.cik.disp_int_cont & DC_HPD2_RX_INTERRUPT) {
- rdev->irq.stat_regs.cik.disp_int_cont &= ~DC_HPD2_RX_INTERRUPT;
- queue_dp = true;
- DRM_DEBUG("IH: HPD_RX 2\n");
- }
+ if (!(rdev->irq.stat_regs.cik.disp_int_cont & DC_HPD2_RX_INTERRUPT))
+ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+ rdev->irq.stat_regs.cik.disp_int_cont &= ~DC_HPD2_RX_INTERRUPT;
+ queue_dp = true;
+ DRM_DEBUG("IH: HPD_RX 2\n");
+
break;
case 8:
- if (rdev->irq.stat_regs.cik.disp_int_cont2 & DC_HPD3_RX_INTERRUPT) {
- rdev->irq.stat_regs.cik.disp_int_cont2 &= ~DC_HPD3_RX_INTERRUPT;
- queue_dp = true;
- DRM_DEBUG("IH: HPD_RX 3\n");
- }
+ if (!(rdev->irq.stat_regs.cik.disp_int_cont2 & DC_HPD3_RX_INTERRUPT))
+ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+ rdev->irq.stat_regs.cik.disp_int_cont2 &= ~DC_HPD3_RX_INTERRUPT;
+ queue_dp = true;
+ DRM_DEBUG("IH: HPD_RX 3\n");
+
break;
case 9:
- if (rdev->irq.stat_regs.cik.disp_int_cont3 & DC_HPD4_RX_INTERRUPT) {
- rdev->irq.stat_regs.cik.disp_int_cont3 &= ~DC_HPD4_RX_INTERRUPT;
- queue_dp = true;
- DRM_DEBUG("IH: HPD_RX 4\n");
- }
+ if (!(rdev->irq.stat_regs.cik.disp_int_cont3 & DC_HPD4_RX_INTERRUPT))
+ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+ rdev->irq.stat_regs.cik.disp_int_cont3 &= ~DC_HPD4_RX_INTERRUPT;
+ queue_dp = true;
+ DRM_DEBUG("IH: HPD_RX 4\n");
+
break;
case 10:
- if (rdev->irq.stat_regs.cik.disp_int_cont4 & DC_HPD5_RX_INTERRUPT) {
- rdev->irq.stat_regs.cik.disp_int_cont4 &= ~DC_HPD5_RX_INTERRUPT;
- queue_dp = true;
- DRM_DEBUG("IH: HPD_RX 5\n");
- }
+ if (!(rdev->irq.stat_regs.cik.disp_int_cont4 & DC_HPD5_RX_INTERRUPT))
+ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+ rdev->irq.stat_regs.cik.disp_int_cont4 &= ~DC_HPD5_RX_INTERRUPT;
+ queue_dp = true;
+ DRM_DEBUG("IH: HPD_RX 5\n");
+
break;
case 11:
- if (rdev->irq.stat_regs.cik.disp_int_cont5 & DC_HPD6_RX_INTERRUPT) {
- rdev->irq.stat_regs.cik.disp_int_cont5 &= ~DC_HPD6_RX_INTERRUPT;
- queue_dp = true;
- DRM_DEBUG("IH: HPD_RX 6\n");
- }
+ if (!(rdev->irq.stat_regs.cik.disp_int_cont5 & DC_HPD6_RX_INTERRUPT))
+ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+ rdev->irq.stat_regs.cik.disp_int_cont5 &= ~DC_HPD6_RX_INTERRUPT;
+ queue_dp = true;
+ DRM_DEBUG("IH: HPD_RX 6\n");
+
break;
default:
DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
diff --git a/drivers/gpu/drm/radeon/dce6_afmt.c b/drivers/gpu/drm/radeon/dce6_afmt.c
index 68fd9fc677e3..44480c1b9738 100644
--- a/drivers/gpu/drm/radeon/dce6_afmt.c
+++ b/drivers/gpu/drm/radeon/dce6_afmt.c
@@ -93,30 +93,26 @@ void dce6_afmt_select_pin(struct drm_encoder *encoder)
struct radeon_device *rdev = encoder->dev->dev_private;
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
- u32 offset;
- if (!dig || !dig->afmt || !dig->afmt->pin)
+ if (!dig || !dig->afmt || !dig->pin)
return;
- offset = dig->afmt->offset;
-
- WREG32(AFMT_AUDIO_SRC_CONTROL + offset,
- AFMT_AUDIO_SRC_SELECT(dig->afmt->pin->id));
+ WREG32(AFMT_AUDIO_SRC_CONTROL + dig->afmt->offset,
+ AFMT_AUDIO_SRC_SELECT(dig->pin->id));
}
void dce6_afmt_write_latency_fields(struct drm_encoder *encoder,
- struct drm_connector *connector, struct drm_display_mode *mode)
+ struct drm_connector *connector,
+ struct drm_display_mode *mode)
{
struct radeon_device *rdev = encoder->dev->dev_private;
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
- u32 tmp = 0, offset;
+ u32 tmp = 0;
- if (!dig || !dig->afmt || !dig->afmt->pin)
+ if (!dig || !dig->afmt || !dig->pin)
return;
- offset = dig->afmt->pin->offset;
-
if (mode->flags & DRM_MODE_FLAG_INTERLACE) {
if (connector->latency_present[1])
tmp = VIDEO_LIPSYNC(connector->video_latency[1]) |
@@ -130,24 +126,24 @@ void dce6_afmt_write_latency_fields(struct drm_encoder *encoder,
else
tmp = VIDEO_LIPSYNC(0) | AUDIO_LIPSYNC(0);
}
- WREG32_ENDPOINT(offset, AZ_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC, tmp);
+ WREG32_ENDPOINT(dig->pin->offset,
+ AZ_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC, tmp);
}
void dce6_afmt_hdmi_write_speaker_allocation(struct drm_encoder *encoder,
- u8 *sadb, int sad_count)
+ u8 *sadb, int sad_count)
{
struct radeon_device *rdev = encoder->dev->dev_private;
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
- u32 offset, tmp;
+ u32 tmp;
- if (!dig || !dig->afmt || !dig->afmt->pin)
+ if (!dig || !dig->afmt || !dig->pin)
return;
- offset = dig->afmt->pin->offset;
-
/* program the speaker allocation */
- tmp = RREG32_ENDPOINT(offset, AZ_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER);
+ tmp = RREG32_ENDPOINT(dig->pin->offset,
+ AZ_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER);
tmp &= ~(DP_CONNECTION | SPEAKER_ALLOCATION_MASK);
/* set HDMI mode */
tmp |= HDMI_CONNECTION;
@@ -155,24 +151,24 @@ void dce6_afmt_hdmi_write_speaker_allocation(struct drm_encoder *encoder,
tmp |= SPEAKER_ALLOCATION(sadb[0]);
else
tmp |= SPEAKER_ALLOCATION(5); /* stereo */
- WREG32_ENDPOINT(offset, AZ_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER, tmp);
+ WREG32_ENDPOINT(dig->pin->offset,
+ AZ_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER, tmp);
}
void dce6_afmt_dp_write_speaker_allocation(struct drm_encoder *encoder,
- u8 *sadb, int sad_count)
+ u8 *sadb, int sad_count)
{
struct radeon_device *rdev = encoder->dev->dev_private;
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
- u32 offset, tmp;
+ u32 tmp;
- if (!dig || !dig->afmt || !dig->afmt->pin)
+ if (!dig || !dig->afmt || !dig->pin)
return;
- offset = dig->afmt->pin->offset;
-
/* program the speaker allocation */
- tmp = RREG32_ENDPOINT(offset, AZ_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER);
+ tmp = RREG32_ENDPOINT(dig->pin->offset,
+ AZ_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER);
tmp &= ~(HDMI_CONNECTION | SPEAKER_ALLOCATION_MASK);
/* set DP mode */
tmp |= DP_CONNECTION;
@@ -180,13 +176,13 @@ void dce6_afmt_dp_write_speaker_allocation(struct drm_encoder *encoder,
tmp |= SPEAKER_ALLOCATION(sadb[0]);
else
tmp |= SPEAKER_ALLOCATION(5); /* stereo */
- WREG32_ENDPOINT(offset, AZ_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER, tmp);
+ WREG32_ENDPOINT(dig->pin->offset,
+ AZ_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER, tmp);
}
void dce6_afmt_write_sad_regs(struct drm_encoder *encoder,
- struct cea_sad *sads, int sad_count)
+ struct cea_sad *sads, int sad_count)
{
- u32 offset;
int i;
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
@@ -206,11 +202,9 @@ void dce6_afmt_write_sad_regs(struct drm_encoder *encoder,
{ AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13, HDMI_AUDIO_CODING_TYPE_WMA_PRO },
};
- if (!dig || !dig->afmt || !dig->afmt->pin)
+ if (!dig || !dig->afmt || !dig->pin)
return;
- offset = dig->afmt->pin->offset;
-
for (i = 0; i < ARRAY_SIZE(eld_reg_to_type); i++) {
u32 value = 0;
u8 stereo_freqs = 0;
@@ -237,7 +231,7 @@ void dce6_afmt_write_sad_regs(struct drm_encoder *encoder,
value |= SUPPORTED_FREQUENCIES_STEREO(stereo_freqs);
- WREG32_ENDPOINT(offset, eld_reg_to_type[i][0], value);
+ WREG32_ENDPOINT(dig->pin->offset, eld_reg_to_type[i][0], value);
}
}
@@ -253,7 +247,7 @@ void dce6_audio_enable(struct radeon_device *rdev,
}
void dce6_hdmi_audio_set_dto(struct radeon_device *rdev,
- struct radeon_crtc *crtc, unsigned int clock)
+ struct radeon_crtc *crtc, unsigned int clock)
{
/* Two dtos; generally use dto0 for HDMI */
u32 value = 0;
@@ -272,7 +266,7 @@ void dce6_hdmi_audio_set_dto(struct radeon_device *rdev,
}
void dce6_dp_audio_set_dto(struct radeon_device *rdev,
- struct radeon_crtc *crtc, unsigned int clock)
+ struct radeon_crtc *crtc, unsigned int clock)
{
/* Two dtos; generally use dto1 for DP */
u32 value = 0;
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
index 3a6d483a2c36..0acde1949c18 100644
--- a/drivers/gpu/drm/radeon/evergreen.c
+++ b/drivers/gpu/drm/radeon/evergreen.c
@@ -4924,7 +4924,7 @@ restart_ih:
return IRQ_NONE;
rptr = rdev->ih.rptr;
- DRM_DEBUG("r600_irq_process start: rptr %d, wptr %d\n", rptr, wptr);
+ DRM_DEBUG("evergreen_irq_process start: rptr %d, wptr %d\n", rptr, wptr);
/* Order reading of wptr vs. reading of IH ring data */
rmb();
@@ -4942,23 +4942,27 @@ restart_ih:
case 1: /* D1 vblank/vline */
switch (src_data) {
case 0: /* D1 vblank */
- if (rdev->irq.stat_regs.evergreen.disp_int & LB_D1_VBLANK_INTERRUPT) {
- if (rdev->irq.crtc_vblank_int[0]) {
- drm_handle_vblank(rdev->ddev, 0);
- rdev->pm.vblank_sync = true;
- wake_up(&rdev->irq.vblank_queue);
- }
- if (atomic_read(&rdev->irq.pflip[0]))
- radeon_crtc_handle_vblank(rdev, 0);
- rdev->irq.stat_regs.evergreen.disp_int &= ~LB_D1_VBLANK_INTERRUPT;
- DRM_DEBUG("IH: D1 vblank\n");
+ if (!(rdev->irq.stat_regs.evergreen.disp_int & LB_D1_VBLANK_INTERRUPT))
+ DRM_DEBUG("IH: D1 vblank - IH event w/o asserted irq bit?\n");
+
+ if (rdev->irq.crtc_vblank_int[0]) {
+ drm_handle_vblank(rdev->ddev, 0);
+ rdev->pm.vblank_sync = true;
+ wake_up(&rdev->irq.vblank_queue);
}
+ if (atomic_read(&rdev->irq.pflip[0]))
+ radeon_crtc_handle_vblank(rdev, 0);
+ rdev->irq.stat_regs.evergreen.disp_int &= ~LB_D1_VBLANK_INTERRUPT;
+ DRM_DEBUG("IH: D1 vblank\n");
+
break;
case 1: /* D1 vline */
- if (rdev->irq.stat_regs.evergreen.disp_int & LB_D1_VLINE_INTERRUPT) {
- rdev->irq.stat_regs.evergreen.disp_int &= ~LB_D1_VLINE_INTERRUPT;
- DRM_DEBUG("IH: D1 vline\n");
- }
+ if (!(rdev->irq.stat_regs.evergreen.disp_int & LB_D1_VLINE_INTERRUPT))
+ DRM_DEBUG("IH: D1 vline - IH event w/o asserted irq bit?\n");
+
+ rdev->irq.stat_regs.evergreen.disp_int &= ~LB_D1_VLINE_INTERRUPT;
+ DRM_DEBUG("IH: D1 vline\n");
+
break;
default:
DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
@@ -4968,23 +4972,27 @@ restart_ih:
case 2: /* D2 vblank/vline */
switch (src_data) {
case 0: /* D2 vblank */
- if (rdev->irq.stat_regs.evergreen.disp_int_cont & LB_D2_VBLANK_INTERRUPT) {
- if (rdev->irq.crtc_vblank_int[1]) {
- drm_handle_vblank(rdev->ddev, 1);
- rdev->pm.vblank_sync = true;
- wake_up(&rdev->irq.vblank_queue);
- }
- if (atomic_read(&rdev->irq.pflip[1]))
- radeon_crtc_handle_vblank(rdev, 1);
- rdev->irq.stat_regs.evergreen.disp_int_cont &= ~LB_D2_VBLANK_INTERRUPT;
- DRM_DEBUG("IH: D2 vblank\n");
+ if (!(rdev->irq.stat_regs.evergreen.disp_int_cont & LB_D2_VBLANK_INTERRUPT))
+ DRM_DEBUG("IH: D2 vblank - IH event w/o asserted irq bit?\n");
+
+ if (rdev->irq.crtc_vblank_int[1]) {
+ drm_handle_vblank(rdev->ddev, 1);
+ rdev->pm.vblank_sync = true;
+ wake_up(&rdev->irq.vblank_queue);
}
+ if (atomic_read(&rdev->irq.pflip[1]))
+ radeon_crtc_handle_vblank(rdev, 1);
+ rdev->irq.stat_regs.evergreen.disp_int_cont &= ~LB_D2_VBLANK_INTERRUPT;
+ DRM_DEBUG("IH: D2 vblank\n");
+
break;
case 1: /* D2 vline */
- if (rdev->irq.stat_regs.evergreen.disp_int_cont & LB_D2_VLINE_INTERRUPT) {
- rdev->irq.stat_regs.evergreen.disp_int_cont &= ~LB_D2_VLINE_INTERRUPT;
- DRM_DEBUG("IH: D2 vline\n");
- }
+ if (!(rdev->irq.stat_regs.evergreen.disp_int_cont & LB_D2_VLINE_INTERRUPT))
+ DRM_DEBUG("IH: D2 vline - IH event w/o asserted irq bit?\n");
+
+ rdev->irq.stat_regs.evergreen.disp_int_cont &= ~LB_D2_VLINE_INTERRUPT;
+ DRM_DEBUG("IH: D2 vline\n");
+
break;
default:
DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
@@ -4994,23 +5002,27 @@ restart_ih:
case 3: /* D3 vblank/vline */
switch (src_data) {
case 0: /* D3 vblank */
- if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & LB_D3_VBLANK_INTERRUPT) {
- if (rdev->irq.crtc_vblank_int[2]) {
- drm_handle_vblank(rdev->ddev, 2);
- rdev->pm.vblank_sync = true;
- wake_up(&rdev->irq.vblank_queue);
- }
- if (atomic_read(&rdev->irq.pflip[2]))
- radeon_crtc_handle_vblank(rdev, 2);
- rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~LB_D3_VBLANK_INTERRUPT;
- DRM_DEBUG("IH: D3 vblank\n");
+ if (!(rdev->irq.stat_regs.evergreen.disp_int_cont2 & LB_D3_VBLANK_INTERRUPT))
+ DRM_DEBUG("IH: D3 vblank - IH event w/o asserted irq bit?\n");
+
+ if (rdev->irq.crtc_vblank_int[2]) {
+ drm_handle_vblank(rdev->ddev, 2);
+ rdev->pm.vblank_sync = true;
+ wake_up(&rdev->irq.vblank_queue);
}
+ if (atomic_read(&rdev->irq.pflip[2]))
+ radeon_crtc_handle_vblank(rdev, 2);
+ rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~LB_D3_VBLANK_INTERRUPT;
+ DRM_DEBUG("IH: D3 vblank\n");
+
break;
case 1: /* D3 vline */
- if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & LB_D3_VLINE_INTERRUPT) {
- rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~LB_D3_VLINE_INTERRUPT;
- DRM_DEBUG("IH: D3 vline\n");
- }
+ if (!(rdev->irq.stat_regs.evergreen.disp_int_cont2 & LB_D3_VLINE_INTERRUPT))
+ DRM_DEBUG("IH: D3 vline - IH event w/o asserted irq bit?\n");
+
+ rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~LB_D3_VLINE_INTERRUPT;
+ DRM_DEBUG("IH: D3 vline\n");
+
break;
default:
DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
@@ -5020,23 +5032,27 @@ restart_ih:
case 4: /* D4 vblank/vline */
switch (src_data) {
case 0: /* D4 vblank */
- if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & LB_D4_VBLANK_INTERRUPT) {
- if (rdev->irq.crtc_vblank_int[3]) {
- drm_handle_vblank(rdev->ddev, 3);
- rdev->pm.vblank_sync = true;
- wake_up(&rdev->irq.vblank_queue);
- }
- if (atomic_read(&rdev->irq.pflip[3]))
- radeon_crtc_handle_vblank(rdev, 3);
- rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~LB_D4_VBLANK_INTERRUPT;
- DRM_DEBUG("IH: D4 vblank\n");
+ if (!(rdev->irq.stat_regs.evergreen.disp_int_cont3 & LB_D4_VBLANK_INTERRUPT))
+ DRM_DEBUG("IH: D4 vblank - IH event w/o asserted irq bit?\n");
+
+ if (rdev->irq.crtc_vblank_int[3]) {
+ drm_handle_vblank(rdev->ddev, 3);
+ rdev->pm.vblank_sync = true;
+ wake_up(&rdev->irq.vblank_queue);
}
+ if (atomic_read(&rdev->irq.pflip[3]))
+ radeon_crtc_handle_vblank(rdev, 3);
+ rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~LB_D4_VBLANK_INTERRUPT;
+ DRM_DEBUG("IH: D4 vblank\n");
+
break;
case 1: /* D4 vline */
- if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & LB_D4_VLINE_INTERRUPT) {
- rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~LB_D4_VLINE_INTERRUPT;
- DRM_DEBUG("IH: D4 vline\n");
- }
+ if (!(rdev->irq.stat_regs.evergreen.disp_int_cont3 & LB_D4_VLINE_INTERRUPT))
+ DRM_DEBUG("IH: D4 vline - IH event w/o asserted irq bit?\n");
+
+ rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~LB_D4_VLINE_INTERRUPT;
+ DRM_DEBUG("IH: D4 vline\n");
+
break;
default:
DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
@@ -5046,23 +5062,27 @@ restart_ih:
case 5: /* D5 vblank/vline */
switch (src_data) {
case 0: /* D5 vblank */
- if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & LB_D5_VBLANK_INTERRUPT) {
- if (rdev->irq.crtc_vblank_int[4]) {
- drm_handle_vblank(rdev->ddev, 4);
- rdev->pm.vblank_sync = true;
- wake_up(&rdev->irq.vblank_queue);
- }
- if (atomic_read(&rdev->irq.pflip[4]))
- radeon_crtc_handle_vblank(rdev, 4);
- rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~LB_D5_VBLANK_INTERRUPT;
- DRM_DEBUG("IH: D5 vblank\n");
+ if (!(rdev->irq.stat_regs.evergreen.disp_int_cont4 & LB_D5_VBLANK_INTERRUPT))
+ DRM_DEBUG("IH: D5 vblank - IH event w/o asserted irq bit?\n");
+
+ if (rdev->irq.crtc_vblank_int[4]) {
+ drm_handle_vblank(rdev->ddev, 4);
+ rdev->pm.vblank_sync = true;
+ wake_up(&rdev->irq.vblank_queue);
}
+ if (atomic_read(&rdev->irq.pflip[4]))
+ radeon_crtc_handle_vblank(rdev, 4);
+ rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~LB_D5_VBLANK_INTERRUPT;
+ DRM_DEBUG("IH: D5 vblank\n");
+
break;
case 1: /* D5 vline */
- if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & LB_D5_VLINE_INTERRUPT) {
- rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~LB_D5_VLINE_INTERRUPT;
- DRM_DEBUG("IH: D5 vline\n");
- }
+ if (!(rdev->irq.stat_regs.evergreen.disp_int_cont4 & LB_D5_VLINE_INTERRUPT))
+ DRM_DEBUG("IH: D5 vline - IH event w/o asserted irq bit?\n");
+
+ rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~LB_D5_VLINE_INTERRUPT;
+ DRM_DEBUG("IH: D5 vline\n");
+
break;
default:
DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
@@ -5072,23 +5092,27 @@ restart_ih:
case 6: /* D6 vblank/vline */
switch (src_data) {
case 0: /* D6 vblank */
- if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & LB_D6_VBLANK_INTERRUPT) {
- if (rdev->irq.crtc_vblank_int[5]) {
- drm_handle_vblank(rdev->ddev, 5);
- rdev->pm.vblank_sync = true;
- wake_up(&rdev->irq.vblank_queue);
- }
- if (atomic_read(&rdev->irq.pflip[5]))
- radeon_crtc_handle_vblank(rdev, 5);
- rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~LB_D6_VBLANK_INTERRUPT;
- DRM_DEBUG("IH: D6 vblank\n");
+ if (!(rdev->irq.stat_regs.evergreen.disp_int_cont5 & LB_D6_VBLANK_INTERRUPT))
+ DRM_DEBUG("IH: D6 vblank - IH event w/o asserted irq bit?\n");
+
+ if (rdev->irq.crtc_vblank_int[5]) {
+ drm_handle_vblank(rdev->ddev, 5);
+ rdev->pm.vblank_sync = true;
+ wake_up(&rdev->irq.vblank_queue);
}
+ if (atomic_read(&rdev->irq.pflip[5]))
+ radeon_crtc_handle_vblank(rdev, 5);
+ rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~LB_D6_VBLANK_INTERRUPT;
+ DRM_DEBUG("IH: D6 vblank\n");
+
break;
case 1: /* D6 vline */
- if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & LB_D6_VLINE_INTERRUPT) {
- rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~LB_D6_VLINE_INTERRUPT;
- DRM_DEBUG("IH: D6 vline\n");
- }
+ if (!(rdev->irq.stat_regs.evergreen.disp_int_cont5 & LB_D6_VLINE_INTERRUPT))
+ DRM_DEBUG("IH: D6 vline - IH event w/o asserted irq bit?\n");
+
+ rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~LB_D6_VLINE_INTERRUPT;
+ DRM_DEBUG("IH: D6 vline\n");
+
break;
default:
DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
@@ -5108,88 +5132,100 @@ restart_ih:
case 42: /* HPD hotplug */
switch (src_data) {
case 0:
- if (rdev->irq.stat_regs.evergreen.disp_int & DC_HPD1_INTERRUPT) {
- rdev->irq.stat_regs.evergreen.disp_int &= ~DC_HPD1_INTERRUPT;
- queue_hotplug = true;
- DRM_DEBUG("IH: HPD1\n");
- }
+ if (!(rdev->irq.stat_regs.evergreen.disp_int & DC_HPD1_INTERRUPT))
+ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+ rdev->irq.stat_regs.evergreen.disp_int &= ~DC_HPD1_INTERRUPT;
+ queue_hotplug = true;
+ DRM_DEBUG("IH: HPD1\n");
break;
case 1:
- if (rdev->irq.stat_regs.evergreen.disp_int_cont & DC_HPD2_INTERRUPT) {
- rdev->irq.stat_regs.evergreen.disp_int_cont &= ~DC_HPD2_INTERRUPT;
- queue_hotplug = true;
- DRM_DEBUG("IH: HPD2\n");
- }
+ if (!(rdev->irq.stat_regs.evergreen.disp_int_cont & DC_HPD2_INTERRUPT))
+ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+ rdev->irq.stat_regs.evergreen.disp_int_cont &= ~DC_HPD2_INTERRUPT;
+ queue_hotplug = true;
+ DRM_DEBUG("IH: HPD2\n");
break;
case 2:
- if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & DC_HPD3_INTERRUPT) {
- rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~DC_HPD3_INTERRUPT;
- queue_hotplug = true;
- DRM_DEBUG("IH: HPD3\n");
- }
+ if (!(rdev->irq.stat_regs.evergreen.disp_int_cont2 & DC_HPD3_INTERRUPT))
+ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+ rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~DC_HPD3_INTERRUPT;
+ queue_hotplug = true;
+ DRM_DEBUG("IH: HPD3\n");
break;
case 3:
- if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & DC_HPD4_INTERRUPT) {
- rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~DC_HPD4_INTERRUPT;
- queue_hotplug = true;
- DRM_DEBUG("IH: HPD4\n");
- }
+ if (!(rdev->irq.stat_regs.evergreen.disp_int_cont3 & DC_HPD4_INTERRUPT))
+ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+ rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~DC_HPD4_INTERRUPT;
+ queue_hotplug = true;
+ DRM_DEBUG("IH: HPD4\n");
break;
case 4:
- if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & DC_HPD5_INTERRUPT) {
- rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~DC_HPD5_INTERRUPT;
- queue_hotplug = true;
- DRM_DEBUG("IH: HPD5\n");
- }
+ if (!(rdev->irq.stat_regs.evergreen.disp_int_cont4 & DC_HPD5_INTERRUPT))
+ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+ rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~DC_HPD5_INTERRUPT;
+ queue_hotplug = true;
+ DRM_DEBUG("IH: HPD5\n");
break;
case 5:
- if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & DC_HPD6_INTERRUPT) {
- rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~DC_HPD6_INTERRUPT;
- queue_hotplug = true;
- DRM_DEBUG("IH: HPD6\n");
- }
+ if (!(rdev->irq.stat_regs.evergreen.disp_int_cont5 & DC_HPD6_INTERRUPT))
+ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+ rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~DC_HPD6_INTERRUPT;
+ queue_hotplug = true;
+ DRM_DEBUG("IH: HPD6\n");
break;
case 6:
- if (rdev->irq.stat_regs.evergreen.disp_int & DC_HPD1_RX_INTERRUPT) {
- rdev->irq.stat_regs.evergreen.disp_int &= ~DC_HPD1_RX_INTERRUPT;
- queue_dp = true;
- DRM_DEBUG("IH: HPD_RX 1\n");
- }
+ if (!(rdev->irq.stat_regs.evergreen.disp_int & DC_HPD1_RX_INTERRUPT))
+ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+ rdev->irq.stat_regs.evergreen.disp_int &= ~DC_HPD1_RX_INTERRUPT;
+ queue_dp = true;
+ DRM_DEBUG("IH: HPD_RX 1\n");
break;
case 7:
- if (rdev->irq.stat_regs.evergreen.disp_int_cont & DC_HPD2_RX_INTERRUPT) {
- rdev->irq.stat_regs.evergreen.disp_int_cont &= ~DC_HPD2_RX_INTERRUPT;
- queue_dp = true;
- DRM_DEBUG("IH: HPD_RX 2\n");
- }
+ if (!(rdev->irq.stat_regs.evergreen.disp_int_cont & DC_HPD2_RX_INTERRUPT))
+ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+ rdev->irq.stat_regs.evergreen.disp_int_cont &= ~DC_HPD2_RX_INTERRUPT;
+ queue_dp = true;
+ DRM_DEBUG("IH: HPD_RX 2\n");
break;
case 8:
- if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & DC_HPD3_RX_INTERRUPT) {
- rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~DC_HPD3_RX_INTERRUPT;
- queue_dp = true;
- DRM_DEBUG("IH: HPD_RX 3\n");
- }
+ if (!(rdev->irq.stat_regs.evergreen.disp_int_cont2 & DC_HPD3_RX_INTERRUPT))
+ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+ rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~DC_HPD3_RX_INTERRUPT;
+ queue_dp = true;
+ DRM_DEBUG("IH: HPD_RX 3\n");
break;
case 9:
- if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & DC_HPD4_RX_INTERRUPT) {
- rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~DC_HPD4_RX_INTERRUPT;
- queue_dp = true;
- DRM_DEBUG("IH: HPD_RX 4\n");
- }
+ if (!(rdev->irq.stat_regs.evergreen.disp_int_cont3 & DC_HPD4_RX_INTERRUPT))
+ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+ rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~DC_HPD4_RX_INTERRUPT;
+ queue_dp = true;
+ DRM_DEBUG("IH: HPD_RX 4\n");
break;
case 10:
- if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & DC_HPD5_RX_INTERRUPT) {
- rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~DC_HPD5_RX_INTERRUPT;
- queue_dp = true;
- DRM_DEBUG("IH: HPD_RX 5\n");
- }
+ if (!(rdev->irq.stat_regs.evergreen.disp_int_cont4 & DC_HPD5_RX_INTERRUPT))
+ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+ rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~DC_HPD5_RX_INTERRUPT;
+ queue_dp = true;
+ DRM_DEBUG("IH: HPD_RX 5\n");
break;
case 11:
- if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & DC_HPD6_RX_INTERRUPT) {
- rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~DC_HPD6_RX_INTERRUPT;
- queue_dp = true;
- DRM_DEBUG("IH: HPD_RX 6\n");
- }
+ if (!(rdev->irq.stat_regs.evergreen.disp_int_cont5 & DC_HPD6_RX_INTERRUPT))
+ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+ rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~DC_HPD6_RX_INTERRUPT;
+ queue_dp = true;
+ DRM_DEBUG("IH: HPD_RX 6\n");
break;
default:
DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
@@ -5199,46 +5235,52 @@ restart_ih:
case 44: /* hdmi */
switch (src_data) {
case 0:
- if (rdev->irq.stat_regs.evergreen.afmt_status1 & AFMT_AZ_FORMAT_WTRIG) {
- rdev->irq.stat_regs.evergreen.afmt_status1 &= ~AFMT_AZ_FORMAT_WTRIG;
- queue_hdmi = true;
- DRM_DEBUG("IH: HDMI0\n");
- }
+ if (!(rdev->irq.stat_regs.evergreen.afmt_status1 & AFMT_AZ_FORMAT_WTRIG))
+ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+ rdev->irq.stat_regs.evergreen.afmt_status1 &= ~AFMT_AZ_FORMAT_WTRIG;
+ queue_hdmi = true;
+ DRM_DEBUG("IH: HDMI0\n");
break;
case 1:
- if (rdev->irq.stat_regs.evergreen.afmt_status2 & AFMT_AZ_FORMAT_WTRIG) {
- rdev->irq.stat_regs.evergreen.afmt_status2 &= ~AFMT_AZ_FORMAT_WTRIG;
- queue_hdmi = true;
- DRM_DEBUG("IH: HDMI1\n");
- }
+ if (!(rdev->irq.stat_regs.evergreen.afmt_status2 & AFMT_AZ_FORMAT_WTRIG))
+ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+ rdev->irq.stat_regs.evergreen.afmt_status2 &= ~AFMT_AZ_FORMAT_WTRIG;
+ queue_hdmi = true;
+ DRM_DEBUG("IH: HDMI1\n");
break;
case 2:
- if (rdev->irq.stat_regs.evergreen.afmt_status3 & AFMT_AZ_FORMAT_WTRIG) {
- rdev->irq.stat_regs.evergreen.afmt_status3 &= ~AFMT_AZ_FORMAT_WTRIG;
- queue_hdmi = true;
- DRM_DEBUG("IH: HDMI2\n");
- }
+ if (!(rdev->irq.stat_regs.evergreen.afmt_status3 & AFMT_AZ_FORMAT_WTRIG))
+ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+ rdev->irq.stat_regs.evergreen.afmt_status3 &= ~AFMT_AZ_FORMAT_WTRIG;
+ queue_hdmi = true;
+ DRM_DEBUG("IH: HDMI2\n");
break;
case 3:
- if (rdev->irq.stat_regs.evergreen.afmt_status4 & AFMT_AZ_FORMAT_WTRIG) {
- rdev->irq.stat_regs.evergreen.afmt_status4 &= ~AFMT_AZ_FORMAT_WTRIG;
- queue_hdmi = true;
- DRM_DEBUG("IH: HDMI3\n");
- }
+ if (!(rdev->irq.stat_regs.evergreen.afmt_status4 & AFMT_AZ_FORMAT_WTRIG))
+ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+ rdev->irq.stat_regs.evergreen.afmt_status4 &= ~AFMT_AZ_FORMAT_WTRIG;
+ queue_hdmi = true;
+ DRM_DEBUG("IH: HDMI3\n");
break;
case 4:
- if (rdev->irq.stat_regs.evergreen.afmt_status5 & AFMT_AZ_FORMAT_WTRIG) {
- rdev->irq.stat_regs.evergreen.afmt_status5 &= ~AFMT_AZ_FORMAT_WTRIG;
- queue_hdmi = true;
- DRM_DEBUG("IH: HDMI4\n");
- }
+ if (!(rdev->irq.stat_regs.evergreen.afmt_status5 & AFMT_AZ_FORMAT_WTRIG))
+ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+ rdev->irq.stat_regs.evergreen.afmt_status5 &= ~AFMT_AZ_FORMAT_WTRIG;
+ queue_hdmi = true;
+ DRM_DEBUG("IH: HDMI4\n");
break;
case 5:
- if (rdev->irq.stat_regs.evergreen.afmt_status6 & AFMT_AZ_FORMAT_WTRIG) {
- rdev->irq.stat_regs.evergreen.afmt_status6 &= ~AFMT_AZ_FORMAT_WTRIG;
- queue_hdmi = true;
- DRM_DEBUG("IH: HDMI5\n");
- }
+ if (!(rdev->irq.stat_regs.evergreen.afmt_status6 & AFMT_AZ_FORMAT_WTRIG))
+ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+ rdev->irq.stat_regs.evergreen.afmt_status6 &= ~AFMT_AZ_FORMAT_WTRIG;
+ queue_hdmi = true;
+ DRM_DEBUG("IH: HDMI5\n");
break;
default:
DRM_ERROR("Unhandled interrupt: %d %d\n", src_id, src_data);
diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c
index 8e5aeeb058a5..158872eb78e4 100644
--- a/drivers/gpu/drm/radeon/ni.c
+++ b/drivers/gpu/drm/radeon/ni.c
@@ -2162,18 +2162,20 @@ static int cayman_startup(struct radeon_device *rdev)
DRM_ERROR("radeon: failed initializing UVD (%d).\n", r);
}
- ring = &rdev->ring[TN_RING_TYPE_VCE1_INDEX];
- if (ring->ring_size)
- r = radeon_ring_init(rdev, ring, ring->ring_size, 0, 0x0);
+ if (rdev->family == CHIP_ARUBA) {
+ ring = &rdev->ring[TN_RING_TYPE_VCE1_INDEX];
+ if (ring->ring_size)
+ r = radeon_ring_init(rdev, ring, ring->ring_size, 0, 0x0);
- ring = &rdev->ring[TN_RING_TYPE_VCE2_INDEX];
- if (ring->ring_size)
- r = radeon_ring_init(rdev, ring, ring->ring_size, 0, 0x0);
+ ring = &rdev->ring[TN_RING_TYPE_VCE2_INDEX];
+ if (ring->ring_size)
+ r = radeon_ring_init(rdev, ring, ring->ring_size, 0, 0x0);
- if (!r)
- r = vce_v1_0_init(rdev);
- else if (r != -ENOENT)
- DRM_ERROR("radeon: failed initializing VCE (%d).\n", r);
+ if (!r)
+ r = vce_v1_0_init(rdev);
+ if (r)
+ DRM_ERROR("radeon: failed initializing VCE (%d).\n", r);
+ }
r = radeon_ib_pool_init(rdev);
if (r) {
@@ -2396,7 +2398,8 @@ void cayman_fini(struct radeon_device *rdev)
radeon_irq_kms_fini(rdev);
uvd_v1_0_fini(rdev);
radeon_uvd_fini(rdev);
- radeon_vce_fini(rdev);
+ if (rdev->family == CHIP_ARUBA)
+ radeon_vce_fini(rdev);
cayman_pcie_gart_fini(rdev);
r600_vram_scratch_fini(rdev);
radeon_gem_fini(rdev);
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
index 35dafd77a639..4ea5b10ff5f4 100644
--- a/drivers/gpu/drm/radeon/r600.c
+++ b/drivers/gpu/drm/radeon/r600.c
@@ -4086,23 +4086,27 @@ restart_ih:
case 1: /* D1 vblank/vline */
switch (src_data) {
case 0: /* D1 vblank */
- if (rdev->irq.stat_regs.r600.disp_int & LB_D1_VBLANK_INTERRUPT) {
- if (rdev->irq.crtc_vblank_int[0]) {
- drm_handle_vblank(rdev->ddev, 0);
- rdev->pm.vblank_sync = true;
- wake_up(&rdev->irq.vblank_queue);
- }
- if (atomic_read(&rdev->irq.pflip[0]))
- radeon_crtc_handle_vblank(rdev, 0);
- rdev->irq.stat_regs.r600.disp_int &= ~LB_D1_VBLANK_INTERRUPT;
- DRM_DEBUG("IH: D1 vblank\n");
+ if (!(rdev->irq.stat_regs.r600.disp_int & LB_D1_VBLANK_INTERRUPT))
+ DRM_DEBUG("IH: D1 vblank - IH event w/o asserted irq bit?\n");
+
+ if (rdev->irq.crtc_vblank_int[0]) {
+ drm_handle_vblank(rdev->ddev, 0);
+ rdev->pm.vblank_sync = true;
+ wake_up(&rdev->irq.vblank_queue);
}
+ if (atomic_read(&rdev->irq.pflip[0]))
+ radeon_crtc_handle_vblank(rdev, 0);
+ rdev->irq.stat_regs.r600.disp_int &= ~LB_D1_VBLANK_INTERRUPT;
+ DRM_DEBUG("IH: D1 vblank\n");
+
break;
case 1: /* D1 vline */
- if (rdev->irq.stat_regs.r600.disp_int & LB_D1_VLINE_INTERRUPT) {
- rdev->irq.stat_regs.r600.disp_int &= ~LB_D1_VLINE_INTERRUPT;
- DRM_DEBUG("IH: D1 vline\n");
- }
+ if (!(rdev->irq.stat_regs.r600.disp_int & LB_D1_VLINE_INTERRUPT))
+ DRM_DEBUG("IH: D1 vline - IH event w/o asserted irq bit?\n");
+
+ rdev->irq.stat_regs.r600.disp_int &= ~LB_D1_VLINE_INTERRUPT;
+ DRM_DEBUG("IH: D1 vline\n");
+
break;
default:
DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
@@ -4112,23 +4116,27 @@ restart_ih:
case 5: /* D2 vblank/vline */
switch (src_data) {
case 0: /* D2 vblank */
- if (rdev->irq.stat_regs.r600.disp_int & LB_D2_VBLANK_INTERRUPT) {
- if (rdev->irq.crtc_vblank_int[1]) {
- drm_handle_vblank(rdev->ddev, 1);
- rdev->pm.vblank_sync = true;
- wake_up(&rdev->irq.vblank_queue);
- }
- if (atomic_read(&rdev->irq.pflip[1]))
- radeon_crtc_handle_vblank(rdev, 1);
- rdev->irq.stat_regs.r600.disp_int &= ~LB_D2_VBLANK_INTERRUPT;
- DRM_DEBUG("IH: D2 vblank\n");
+ if (!(rdev->irq.stat_regs.r600.disp_int & LB_D2_VBLANK_INTERRUPT))
+ DRM_DEBUG("IH: D2 vblank - IH event w/o asserted irq bit?\n");
+
+ if (rdev->irq.crtc_vblank_int[1]) {
+ drm_handle_vblank(rdev->ddev, 1);
+ rdev->pm.vblank_sync = true;
+ wake_up(&rdev->irq.vblank_queue);
}
+ if (atomic_read(&rdev->irq.pflip[1]))
+ radeon_crtc_handle_vblank(rdev, 1);
+ rdev->irq.stat_regs.r600.disp_int &= ~LB_D2_VBLANK_INTERRUPT;
+ DRM_DEBUG("IH: D2 vblank\n");
+
break;
case 1: /* D1 vline */
- if (rdev->irq.stat_regs.r600.disp_int & LB_D2_VLINE_INTERRUPT) {
- rdev->irq.stat_regs.r600.disp_int &= ~LB_D2_VLINE_INTERRUPT;
- DRM_DEBUG("IH: D2 vline\n");
- }
+ if (!(rdev->irq.stat_regs.r600.disp_int & LB_D2_VLINE_INTERRUPT))
+ DRM_DEBUG("IH: D2 vline - IH event w/o asserted irq bit?\n");
+
+ rdev->irq.stat_regs.r600.disp_int &= ~LB_D2_VLINE_INTERRUPT;
+ DRM_DEBUG("IH: D2 vline\n");
+
break;
default:
DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
@@ -4148,46 +4156,53 @@ restart_ih:
case 19: /* HPD/DAC hotplug */
switch (src_data) {
case 0:
- if (rdev->irq.stat_regs.r600.disp_int & DC_HPD1_INTERRUPT) {
- rdev->irq.stat_regs.r600.disp_int &= ~DC_HPD1_INTERRUPT;
- queue_hotplug = true;
- DRM_DEBUG("IH: HPD1\n");
- }
+ if (!(rdev->irq.stat_regs.r600.disp_int & DC_HPD1_INTERRUPT))
+ DRM_DEBUG("IH: HPD1 - IH event w/o asserted irq bit?\n");
+
+ rdev->irq.stat_regs.r600.disp_int &= ~DC_HPD1_INTERRUPT;
+ queue_hotplug = true;
+ DRM_DEBUG("IH: HPD1\n");
break;
case 1:
- if (rdev->irq.stat_regs.r600.disp_int & DC_HPD2_INTERRUPT) {
- rdev->irq.stat_regs.r600.disp_int &= ~DC_HPD2_INTERRUPT;
- queue_hotplug = true;
- DRM_DEBUG("IH: HPD2\n");
- }
+ if (!(rdev->irq.stat_regs.r600.disp_int & DC_HPD2_INTERRUPT))
+ DRM_DEBUG("IH: HPD2 - IH event w/o asserted irq bit?\n");
+
+ rdev->irq.stat_regs.r600.disp_int &= ~DC_HPD2_INTERRUPT;
+ queue_hotplug = true;
+ DRM_DEBUG("IH: HPD2\n");
break;
case 4:
- if (rdev->irq.stat_regs.r600.disp_int_cont & DC_HPD3_INTERRUPT) {
- rdev->irq.stat_regs.r600.disp_int_cont &= ~DC_HPD3_INTERRUPT;
- queue_hotplug = true;
- DRM_DEBUG("IH: HPD3\n");
- }
+ if (!(rdev->irq.stat_regs.r600.disp_int_cont & DC_HPD3_INTERRUPT))
+ DRM_DEBUG("IH: HPD3 - IH event w/o asserted irq bit?\n");
+
+ rdev->irq.stat_regs.r600.disp_int_cont &= ~DC_HPD3_INTERRUPT;
+ queue_hotplug = true;
+ DRM_DEBUG("IH: HPD3\n");
break;
case 5:
- if (rdev->irq.stat_regs.r600.disp_int_cont & DC_HPD4_INTERRUPT) {
- rdev->irq.stat_regs.r600.disp_int_cont &= ~DC_HPD4_INTERRUPT;
- queue_hotplug = true;
- DRM_DEBUG("IH: HPD4\n");
- }
+ if (!(rdev->irq.stat_regs.r600.disp_int_cont & DC_HPD4_INTERRUPT))
+ DRM_DEBUG("IH: HPD4 - IH event w/o asserted irq bit?\n");
+
+ rdev->irq.stat_regs.r600.disp_int_cont &= ~DC_HPD4_INTERRUPT;
+ queue_hotplug = true;
+ DRM_DEBUG("IH: HPD4\n");
break;
case 10:
- if (rdev->irq.stat_regs.r600.disp_int_cont2 & DC_HPD5_INTERRUPT) {
- rdev->irq.stat_regs.r600.disp_int_cont2 &= ~DC_HPD5_INTERRUPT;
- queue_hotplug = true;
- DRM_DEBUG("IH: HPD5\n");
- }
+ if (!(rdev->irq.stat_regs.r600.disp_int_cont2 & DC_HPD5_INTERRUPT))
+ DRM_DEBUG("IH: HPD5 - IH event w/o asserted irq bit?\n");
+
+ rdev->irq.stat_regs.r600.disp_int_cont2 &= ~DC_HPD5_INTERRUPT;
+ queue_hotplug = true;
+ DRM_DEBUG("IH: HPD5\n");
break;
case 12:
- if (rdev->irq.stat_regs.r600.disp_int_cont2 & DC_HPD6_INTERRUPT) {
- rdev->irq.stat_regs.r600.disp_int_cont2 &= ~DC_HPD6_INTERRUPT;
- queue_hotplug = true;
- DRM_DEBUG("IH: HPD6\n");
- }
+ if (!(rdev->irq.stat_regs.r600.disp_int_cont2 & DC_HPD6_INTERRUPT))
+ DRM_DEBUG("IH: HPD6 - IH event w/o asserted irq bit?\n");
+
+ rdev->irq.stat_regs.r600.disp_int_cont2 &= ~DC_HPD6_INTERRUPT;
+ queue_hotplug = true;
+ DRM_DEBUG("IH: HPD6\n");
+
break;
default:
DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
@@ -4197,18 +4212,22 @@ restart_ih:
case 21: /* hdmi */
switch (src_data) {
case 4:
- if (rdev->irq.stat_regs.r600.hdmi0_status & HDMI0_AZ_FORMAT_WTRIG) {
- rdev->irq.stat_regs.r600.hdmi0_status &= ~HDMI0_AZ_FORMAT_WTRIG;
- queue_hdmi = true;
- DRM_DEBUG("IH: HDMI0\n");
- }
+ if (!(rdev->irq.stat_regs.r600.hdmi0_status & HDMI0_AZ_FORMAT_WTRIG))
+ DRM_DEBUG("IH: HDMI0 - IH event w/o asserted irq bit?\n");
+
+ rdev->irq.stat_regs.r600.hdmi0_status &= ~HDMI0_AZ_FORMAT_WTRIG;
+ queue_hdmi = true;
+ DRM_DEBUG("IH: HDMI0\n");
+
break;
case 5:
- if (rdev->irq.stat_regs.r600.hdmi1_status & HDMI0_AZ_FORMAT_WTRIG) {
- rdev->irq.stat_regs.r600.hdmi1_status &= ~HDMI0_AZ_FORMAT_WTRIG;
- queue_hdmi = true;
- DRM_DEBUG("IH: HDMI1\n");
- }
+ if (!(rdev->irq.stat_regs.r600.hdmi1_status & HDMI0_AZ_FORMAT_WTRIG))
+ DRM_DEBUG("IH: HDMI1 - IH event w/o asserted irq bit?\n");
+
+ rdev->irq.stat_regs.r600.hdmi1_status &= ~HDMI0_AZ_FORMAT_WTRIG;
+ queue_hdmi = true;
+ DRM_DEBUG("IH: HDMI1\n");
+
break;
default:
DRM_ERROR("Unhandled interrupt: %d %d\n", src_id, src_data);
diff --git a/drivers/gpu/drm/radeon/r600_cp.c b/drivers/gpu/drm/radeon/r600_cp.c
index 09e3f39925fa..98f9adaccc3d 100644
--- a/drivers/gpu/drm/radeon/r600_cp.c
+++ b/drivers/gpu/drm/radeon/r600_cp.c
@@ -2483,7 +2483,7 @@ int r600_cp_dispatch_texture(struct drm_device *dev,
struct drm_buf *buf;
u32 *buffer;
const u8 __user *data;
- int size, pass_size;
+ unsigned int size, pass_size;
u64 src_offset, dst_offset;
if (!radeon_check_offset(dev_priv, tex->offset)) {
diff --git a/drivers/gpu/drm/radeon/radeon_audio.c b/drivers/gpu/drm/radeon/radeon_audio.c
index fa719c53449b..fbc8d88d6e5d 100644
--- a/drivers/gpu/drm/radeon/radeon_audio.c
+++ b/drivers/gpu/drm/radeon/radeon_audio.c
@@ -245,6 +245,28 @@ static struct radeon_audio_funcs dce6_dp_funcs = {
static void radeon_audio_enable(struct radeon_device *rdev,
struct r600_audio_pin *pin, u8 enable_mask)
{
+ struct drm_encoder *encoder;
+ struct radeon_encoder *radeon_encoder;
+ struct radeon_encoder_atom_dig *dig;
+ int pin_count = 0;
+
+ if (!pin)
+ return;
+
+ if (rdev->mode_info.mode_config_initialized) {
+ list_for_each_entry(encoder, &rdev->ddev->mode_config.encoder_list, head) {
+ if (radeon_encoder_is_digital(encoder)) {
+ radeon_encoder = to_radeon_encoder(encoder);
+ dig = radeon_encoder->enc_priv;
+ if (dig->pin == pin)
+ pin_count++;
+ }
+ }
+
+ if ((pin_count > 1) && (enable_mask == 0))
+ return;
+ }
+
if (rdev->audio.funcs->enable)
rdev->audio.funcs->enable(rdev, pin, enable_mask);
}
@@ -336,24 +358,13 @@ void radeon_audio_endpoint_wreg(struct radeon_device *rdev, u32 offset,
static void radeon_audio_write_sad_regs(struct drm_encoder *encoder)
{
- struct radeon_encoder *radeon_encoder;
- struct drm_connector *connector;
- struct radeon_connector *radeon_connector = NULL;
+ struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
struct cea_sad *sads;
int sad_count;
- list_for_each_entry(connector,
- &encoder->dev->mode_config.connector_list, head) {
- if (connector->encoder == encoder) {
- radeon_connector = to_radeon_connector(connector);
- break;
- }
- }
-
- if (!radeon_connector) {
- DRM_ERROR("Couldn't find encoder's connector\n");
+ if (!connector)
return;
- }
sad_count = drm_edid_to_sad(radeon_connector_edid(connector), &sads);
if (sad_count <= 0) {
@@ -362,8 +373,6 @@ static void radeon_audio_write_sad_regs(struct drm_encoder *encoder)
}
BUG_ON(!sads);
- radeon_encoder = to_radeon_encoder(encoder);
-
if (radeon_encoder->audio && radeon_encoder->audio->write_sad_regs)
radeon_encoder->audio->write_sad_regs(encoder, sads, sad_count);
@@ -372,27 +381,16 @@ static void radeon_audio_write_sad_regs(struct drm_encoder *encoder)
static void radeon_audio_write_speaker_allocation(struct drm_encoder *encoder)
{
+ struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
- struct drm_connector *connector;
- struct radeon_connector *radeon_connector = NULL;
u8 *sadb = NULL;
int sad_count;
- list_for_each_entry(connector,
- &encoder->dev->mode_config.connector_list, head) {
- if (connector->encoder == encoder) {
- radeon_connector = to_radeon_connector(connector);
- break;
- }
- }
-
- if (!radeon_connector) {
- DRM_ERROR("Couldn't find encoder's connector\n");
+ if (!connector)
return;
- }
- sad_count = drm_edid_to_speaker_allocation(
- radeon_connector_edid(connector), &sadb);
+ sad_count = drm_edid_to_speaker_allocation(radeon_connector_edid(connector),
+ &sadb);
if (sad_count < 0) {
DRM_DEBUG("Couldn't read Speaker Allocation Data Block: %d\n",
sad_count);
@@ -406,26 +404,13 @@ static void radeon_audio_write_speaker_allocation(struct drm_encoder *encoder)
}
static void radeon_audio_write_latency_fields(struct drm_encoder *encoder,
- struct drm_display_mode *mode)
+ struct drm_display_mode *mode)
{
- struct radeon_encoder *radeon_encoder;
- struct drm_connector *connector;
- struct radeon_connector *radeon_connector = 0;
-
- list_for_each_entry(connector,
- &encoder->dev->mode_config.connector_list, head) {
- if (connector->encoder == encoder) {
- radeon_connector = to_radeon_connector(connector);
- break;
- }
- }
+ struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
- if (!radeon_connector) {
- DRM_ERROR("Couldn't find encoder's connector\n");
+ if (!connector)
return;
- }
-
- radeon_encoder = to_radeon_encoder(encoder);
if (radeon_encoder->audio && radeon_encoder->audio->write_latency_fields)
radeon_encoder->audio->write_latency_fields(encoder, connector, mode);
@@ -451,29 +436,23 @@ static void radeon_audio_select_pin(struct drm_encoder *encoder)
}
void radeon_audio_detect(struct drm_connector *connector,
+ struct drm_encoder *encoder,
enum drm_connector_status status)
{
- struct radeon_device *rdev;
- struct radeon_encoder *radeon_encoder;
+ struct drm_device *dev = connector->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
struct radeon_encoder_atom_dig *dig;
- if (!connector || !connector->encoder)
+ if (!radeon_audio_chipset_supported(rdev))
return;
- rdev = connector->encoder->dev->dev_private;
-
- if (!radeon_audio_chipset_supported(rdev))
+ if (!radeon_encoder_is_digital(encoder))
return;
- radeon_encoder = to_radeon_encoder(connector->encoder);
dig = radeon_encoder->enc_priv;
if (status == connector_status_connected) {
- if (!drm_detect_monitor_audio(radeon_connector_edid(connector))) {
- radeon_encoder->audio = NULL;
- return;
- }
-
if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) {
struct radeon_connector *radeon_connector = to_radeon_connector(connector);
@@ -486,11 +465,17 @@ void radeon_audio_detect(struct drm_connector *connector,
radeon_encoder->audio = rdev->audio.hdmi_funcs;
}
- dig->afmt->pin = radeon_audio_get_pin(connector->encoder);
- radeon_audio_enable(rdev, dig->afmt->pin, 0xf);
+ if (drm_detect_monitor_audio(radeon_connector_edid(connector))) {
+ if (!dig->pin)
+ dig->pin = radeon_audio_get_pin(encoder);
+ radeon_audio_enable(rdev, dig->pin, 0xf);
+ } else {
+ radeon_audio_enable(rdev, dig->pin, 0);
+ dig->pin = NULL;
+ }
} else {
- radeon_audio_enable(rdev, dig->afmt->pin, 0);
- dig->afmt->pin = NULL;
+ radeon_audio_enable(rdev, dig->pin, 0);
+ dig->pin = NULL;
}
}
@@ -518,29 +503,18 @@ static void radeon_audio_set_dto(struct drm_encoder *encoder, unsigned int clock
}
static int radeon_audio_set_avi_packet(struct drm_encoder *encoder,
- struct drm_display_mode *mode)
+ struct drm_display_mode *mode)
{
struct radeon_device *rdev = encoder->dev->dev_private;
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
- struct drm_connector *connector;
- struct radeon_connector *radeon_connector = NULL;
+ struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
u8 buffer[HDMI_INFOFRAME_HEADER_SIZE + HDMI_AVI_INFOFRAME_SIZE];
struct hdmi_avi_infoframe frame;
int err;
- list_for_each_entry(connector,
- &encoder->dev->mode_config.connector_list, head) {
- if (connector->encoder == encoder) {
- radeon_connector = to_radeon_connector(connector);
- break;
- }
- }
-
- if (!radeon_connector) {
- DRM_ERROR("Couldn't find encoder's connector\n");
- return -ENOENT;
- }
+ if (!connector)
+ return -EINVAL;
err = drm_hdmi_avi_infoframe_from_display_mode(&frame, mode);
if (err < 0) {
@@ -563,8 +537,8 @@ static int radeon_audio_set_avi_packet(struct drm_encoder *encoder,
return err;
}
- if (dig && dig->afmt &&
- radeon_encoder->audio && radeon_encoder->audio->set_avi_packet)
+ if (dig && dig->afmt && radeon_encoder->audio &&
+ radeon_encoder->audio->set_avi_packet)
radeon_encoder->audio->set_avi_packet(rdev, dig->afmt->offset,
buffer, sizeof(buffer));
@@ -722,30 +696,41 @@ static void radeon_audio_hdmi_mode_set(struct drm_encoder *encoder,
{
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
+ struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
if (!dig || !dig->afmt)
return;
- radeon_audio_set_mute(encoder, true);
+ if (!connector)
+ return;
- radeon_audio_write_speaker_allocation(encoder);
- radeon_audio_write_sad_regs(encoder);
- radeon_audio_write_latency_fields(encoder, mode);
- radeon_audio_set_dto(encoder, mode->clock);
- radeon_audio_set_vbi_packet(encoder);
- radeon_hdmi_set_color_depth(encoder);
- radeon_audio_update_acr(encoder, mode->clock);
- radeon_audio_set_audio_packet(encoder);
- radeon_audio_select_pin(encoder);
+ if (drm_detect_monitor_audio(radeon_connector_edid(connector))) {
+ radeon_audio_set_mute(encoder, true);
- if (radeon_audio_set_avi_packet(encoder, mode) < 0)
- return;
+ radeon_audio_write_speaker_allocation(encoder);
+ radeon_audio_write_sad_regs(encoder);
+ radeon_audio_write_latency_fields(encoder, mode);
+ radeon_audio_set_dto(encoder, mode->clock);
+ radeon_audio_set_vbi_packet(encoder);
+ radeon_hdmi_set_color_depth(encoder);
+ radeon_audio_update_acr(encoder, mode->clock);
+ radeon_audio_set_audio_packet(encoder);
+ radeon_audio_select_pin(encoder);
+
+ if (radeon_audio_set_avi_packet(encoder, mode) < 0)
+ return;
- radeon_audio_set_mute(encoder, false);
+ radeon_audio_set_mute(encoder, false);
+ } else {
+ radeon_hdmi_set_color_depth(encoder);
+
+ if (radeon_audio_set_avi_packet(encoder, mode) < 0)
+ return;
+ }
}
static void radeon_audio_dp_mode_set(struct drm_encoder *encoder,
- struct drm_display_mode *mode)
+ struct drm_display_mode *mode)
{
struct drm_device *dev = encoder->dev;
struct radeon_device *rdev = dev->dev_private;
@@ -759,22 +744,27 @@ static void radeon_audio_dp_mode_set(struct drm_encoder *encoder,
if (!dig || !dig->afmt)
return;
- radeon_audio_write_speaker_allocation(encoder);
- radeon_audio_write_sad_regs(encoder);
- radeon_audio_write_latency_fields(encoder, mode);
- if (rdev->clock.dp_extclk || ASIC_IS_DCE5(rdev))
- radeon_audio_set_dto(encoder, rdev->clock.default_dispclk * 10);
- else
- radeon_audio_set_dto(encoder, dig_connector->dp_clock);
- radeon_audio_set_audio_packet(encoder);
- radeon_audio_select_pin(encoder);
-
- if (radeon_audio_set_avi_packet(encoder, mode) < 0)
+ if (!connector)
return;
+
+ if (drm_detect_monitor_audio(radeon_connector_edid(connector))) {
+ radeon_audio_write_speaker_allocation(encoder);
+ radeon_audio_write_sad_regs(encoder);
+ radeon_audio_write_latency_fields(encoder, mode);
+ if (rdev->clock.dp_extclk || ASIC_IS_DCE5(rdev))
+ radeon_audio_set_dto(encoder, rdev->clock.default_dispclk * 10);
+ else
+ radeon_audio_set_dto(encoder, dig_connector->dp_clock);
+ radeon_audio_set_audio_packet(encoder);
+ radeon_audio_select_pin(encoder);
+
+ if (radeon_audio_set_avi_packet(encoder, mode) < 0)
+ return;
+ }
}
void radeon_audio_mode_set(struct drm_encoder *encoder,
- struct drm_display_mode *mode)
+ struct drm_display_mode *mode)
{
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
diff --git a/drivers/gpu/drm/radeon/radeon_audio.h b/drivers/gpu/drm/radeon/radeon_audio.h
index 8438304f7139..059cc3012062 100644
--- a/drivers/gpu/drm/radeon/radeon_audio.h
+++ b/drivers/gpu/drm/radeon/radeon_audio.h
@@ -68,7 +68,8 @@ struct radeon_audio_funcs
int radeon_audio_init(struct radeon_device *rdev);
void radeon_audio_detect(struct drm_connector *connector,
- enum drm_connector_status status);
+ struct drm_encoder *encoder,
+ enum drm_connector_status status);
u32 radeon_audio_endpoint_rreg(struct radeon_device *rdev,
u32 offset, u32 reg);
void radeon_audio_endpoint_wreg(struct radeon_device *rdev,
diff --git a/drivers/gpu/drm/radeon/radeon_combios.c b/drivers/gpu/drm/radeon/radeon_combios.c
index 3e5f6b71f3ad..c097d3a82bda 100644
--- a/drivers/gpu/drm/radeon/radeon_combios.c
+++ b/drivers/gpu/drm/radeon/radeon_combios.c
@@ -1255,10 +1255,15 @@ struct radeon_encoder_lvds *radeon_combios_get_lvds_info(struct radeon_encoder
if ((RBIOS16(tmp) == lvds->native_mode.hdisplay) &&
(RBIOS16(tmp + 2) == lvds->native_mode.vdisplay)) {
+ u32 hss = (RBIOS16(tmp + 21) - RBIOS16(tmp + 19) - 1) * 8;
+
+ if (hss > lvds->native_mode.hdisplay)
+ hss = (10 - 1) * 8;
+
lvds->native_mode.htotal = lvds->native_mode.hdisplay +
(RBIOS16(tmp + 17) - RBIOS16(tmp + 19)) * 8;
lvds->native_mode.hsync_start = lvds->native_mode.hdisplay +
- (RBIOS16(tmp + 21) - RBIOS16(tmp + 19) - 1) * 8;
+ hss;
lvds->native_mode.hsync_end = lvds->native_mode.hsync_start +
(RBIOS8(tmp + 23) * 8);
diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
index cebb65e07e1d..94b21ae70ef7 100644
--- a/drivers/gpu/drm/radeon/radeon_connectors.c
+++ b/drivers/gpu/drm/radeon/radeon_connectors.c
@@ -1379,8 +1379,16 @@ out:
/* updated in get modes as well since we need to know if it's analog or digital */
radeon_connector_update_scratch_regs(connector, ret);
- if (radeon_audio != 0)
- radeon_audio_detect(connector, ret);
+ if ((radeon_audio != 0) && radeon_connector->use_digital) {
+ const struct drm_connector_helper_funcs *connector_funcs =
+ connector->helper_private;
+
+ encoder = connector_funcs->best_encoder(connector);
+ if (encoder && (encoder->encoder_type == DRM_MODE_ENCODER_TMDS)) {
+ radeon_connector_get_edid(connector);
+ radeon_audio_detect(connector, encoder, ret);
+ }
+ }
exit:
pm_runtime_mark_last_busy(connector->dev->dev);
@@ -1717,8 +1725,10 @@ radeon_dp_detect(struct drm_connector *connector, bool force)
radeon_connector_update_scratch_regs(connector, ret);
- if (radeon_audio != 0)
- radeon_audio_detect(connector, ret);
+ if ((radeon_audio != 0) && encoder) {
+ radeon_connector_get_edid(connector);
+ radeon_audio_detect(connector, encoder, ret);
+ }
out:
pm_runtime_mark_last_busy(connector->dev->dev);
diff --git a/drivers/gpu/drm/radeon/radeon_cursor.c b/drivers/gpu/drm/radeon/radeon_cursor.c
index 45e54060ee97..afaf346bd50e 100644
--- a/drivers/gpu/drm/radeon/radeon_cursor.c
+++ b/drivers/gpu/drm/radeon/radeon_cursor.c
@@ -91,15 +91,34 @@ static void radeon_show_cursor(struct drm_crtc *crtc)
struct radeon_device *rdev = crtc->dev->dev_private;
if (ASIC_IS_DCE4(rdev)) {
+ WREG32(EVERGREEN_CUR_SURFACE_ADDRESS_HIGH + radeon_crtc->crtc_offset,
+ upper_32_bits(radeon_crtc->cursor_addr));
+ WREG32(EVERGREEN_CUR_SURFACE_ADDRESS + radeon_crtc->crtc_offset,
+ lower_32_bits(radeon_crtc->cursor_addr));
WREG32(RADEON_MM_INDEX, EVERGREEN_CUR_CONTROL + radeon_crtc->crtc_offset);
WREG32(RADEON_MM_DATA, EVERGREEN_CURSOR_EN |
EVERGREEN_CURSOR_MODE(EVERGREEN_CURSOR_24_8_PRE_MULT) |
EVERGREEN_CURSOR_URGENT_CONTROL(EVERGREEN_CURSOR_URGENT_1_2));
} else if (ASIC_IS_AVIVO(rdev)) {
+ if (rdev->family >= CHIP_RV770) {
+ if (radeon_crtc->crtc_id)
+ WREG32(R700_D2CUR_SURFACE_ADDRESS_HIGH,
+ upper_32_bits(radeon_crtc->cursor_addr));
+ else
+ WREG32(R700_D1CUR_SURFACE_ADDRESS_HIGH,
+ upper_32_bits(radeon_crtc->cursor_addr));
+ }
+
+ WREG32(AVIVO_D1CUR_SURFACE_ADDRESS + radeon_crtc->crtc_offset,
+ lower_32_bits(radeon_crtc->cursor_addr));
WREG32(RADEON_MM_INDEX, AVIVO_D1CUR_CONTROL + radeon_crtc->crtc_offset);
WREG32(RADEON_MM_DATA, AVIVO_D1CURSOR_EN |
(AVIVO_D1CURSOR_MODE_24BPP << AVIVO_D1CURSOR_MODE_SHIFT));
} else {
+ /* offset is from DISP(2)_BASE_ADDRESS */
+ WREG32(RADEON_CUR_OFFSET + radeon_crtc->crtc_offset,
+ radeon_crtc->cursor_addr - radeon_crtc->legacy_display_base_addr);
+
switch (radeon_crtc->crtc_id) {
case 0:
WREG32(RADEON_MM_INDEX, RADEON_CRTC_GEN_CNTL);
@@ -205,8 +224,9 @@ static int radeon_cursor_move_locked(struct drm_crtc *crtc, int x, int y)
| (x << 16)
| y));
/* offset is from DISP(2)_BASE_ADDRESS */
- WREG32(RADEON_CUR_OFFSET + radeon_crtc->crtc_offset, (radeon_crtc->legacy_cursor_offset +
- (yorigin * 256)));
+ WREG32(RADEON_CUR_OFFSET + radeon_crtc->crtc_offset,
+ radeon_crtc->cursor_addr - radeon_crtc->legacy_display_base_addr +
+ yorigin * 256);
}
radeon_crtc->cursor_x = x;
@@ -227,53 +247,6 @@ int radeon_crtc_cursor_move(struct drm_crtc *crtc,
return ret;
}
-static int radeon_set_cursor(struct drm_crtc *crtc, struct drm_gem_object *obj)
-{
- struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
- struct radeon_device *rdev = crtc->dev->dev_private;
- struct radeon_bo *robj = gem_to_radeon_bo(obj);
- uint64_t gpu_addr;
- int ret;
-
- ret = radeon_bo_reserve(robj, false);
- if (unlikely(ret != 0))
- goto fail;
- /* Only 27 bit offset for legacy cursor */
- ret = radeon_bo_pin_restricted(robj, RADEON_GEM_DOMAIN_VRAM,
- ASIC_IS_AVIVO(rdev) ? 0 : 1 << 27,
- &gpu_addr);
- radeon_bo_unreserve(robj);
- if (ret)
- goto fail;
-
- if (ASIC_IS_DCE4(rdev)) {
- WREG32(EVERGREEN_CUR_SURFACE_ADDRESS_HIGH + radeon_crtc->crtc_offset,
- upper_32_bits(gpu_addr));
- WREG32(EVERGREEN_CUR_SURFACE_ADDRESS + radeon_crtc->crtc_offset,
- gpu_addr & 0xffffffff);
- } else if (ASIC_IS_AVIVO(rdev)) {
- if (rdev->family >= CHIP_RV770) {
- if (radeon_crtc->crtc_id)
- WREG32(R700_D2CUR_SURFACE_ADDRESS_HIGH, upper_32_bits(gpu_addr));
- else
- WREG32(R700_D1CUR_SURFACE_ADDRESS_HIGH, upper_32_bits(gpu_addr));
- }
- WREG32(AVIVO_D1CUR_SURFACE_ADDRESS + radeon_crtc->crtc_offset,
- gpu_addr & 0xffffffff);
- } else {
- radeon_crtc->legacy_cursor_offset = gpu_addr - radeon_crtc->legacy_display_base_addr;
- /* offset is from DISP(2)_BASE_ADDRESS */
- WREG32(RADEON_CUR_OFFSET + radeon_crtc->crtc_offset, radeon_crtc->legacy_cursor_offset);
- }
-
- return 0;
-
-fail:
- drm_gem_object_unreference_unlocked(obj);
-
- return ret;
-}
-
int radeon_crtc_cursor_set2(struct drm_crtc *crtc,
struct drm_file *file_priv,
uint32_t handle,
@@ -283,7 +256,9 @@ int radeon_crtc_cursor_set2(struct drm_crtc *crtc,
int32_t hot_y)
{
struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
+ struct radeon_device *rdev = crtc->dev->dev_private;
struct drm_gem_object *obj;
+ struct radeon_bo *robj;
int ret;
if (!handle) {
@@ -305,6 +280,23 @@ int radeon_crtc_cursor_set2(struct drm_crtc *crtc,
return -ENOENT;
}
+ robj = gem_to_radeon_bo(obj);
+ ret = radeon_bo_reserve(robj, false);
+ if (ret != 0) {
+ drm_gem_object_unreference_unlocked(obj);
+ return ret;
+ }
+ /* Only 27 bit offset for legacy cursor */
+ ret = radeon_bo_pin_restricted(robj, RADEON_GEM_DOMAIN_VRAM,
+ ASIC_IS_AVIVO(rdev) ? 0 : 1 << 27,
+ &radeon_crtc->cursor_addr);
+ radeon_bo_unreserve(robj);
+ if (ret) {
+ DRM_ERROR("Failed to pin new cursor BO (%d)\n", ret);
+ drm_gem_object_unreference_unlocked(obj);
+ return ret;
+ }
+
radeon_crtc->cursor_width = width;
radeon_crtc->cursor_height = height;
@@ -323,13 +315,7 @@ int radeon_crtc_cursor_set2(struct drm_crtc *crtc,
radeon_crtc->cursor_hot_y = hot_y;
}
- ret = radeon_set_cursor(crtc, obj);
-
- if (ret)
- DRM_ERROR("radeon_set_cursor returned %d, not changing cursor\n",
- ret);
- else
- radeon_show_cursor(crtc);
+ radeon_show_cursor(crtc);
radeon_lock_cursor(crtc, false);
@@ -341,8 +327,7 @@ unpin:
radeon_bo_unpin(robj);
radeon_bo_unreserve(robj);
}
- if (radeon_crtc->cursor_bo != obj)
- drm_gem_object_unreference_unlocked(radeon_crtc->cursor_bo);
+ drm_gem_object_unreference_unlocked(radeon_crtc->cursor_bo);
}
radeon_crtc->cursor_bo = obj;
@@ -360,7 +345,6 @@ unpin:
void radeon_cursor_reset(struct drm_crtc *crtc)
{
struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
- int ret;
if (radeon_crtc->cursor_bo) {
radeon_lock_cursor(crtc, true);
@@ -368,12 +352,7 @@ void radeon_cursor_reset(struct drm_crtc *crtc)
radeon_cursor_move_locked(crtc, radeon_crtc->cursor_x,
radeon_crtc->cursor_y);
- ret = radeon_set_cursor(crtc, radeon_crtc->cursor_bo);
- if (ret)
- DRM_ERROR("radeon_set_cursor returned %d, not showing "
- "cursor\n", ret);
- else
- radeon_show_cursor(crtc);
+ radeon_show_cursor(crtc);
radeon_lock_cursor(crtc, false);
}
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index 2593b1168bd6..d8319dae8358 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -1080,6 +1080,22 @@ static bool radeon_check_pot_argument(int arg)
}
/**
+ * Determine a sensible default GART size according to ASIC family.
+ *
+ * @family ASIC family name
+ */
+static int radeon_gart_size_auto(enum radeon_family family)
+{
+ /* default to a larger gart size on newer asics */
+ if (family >= CHIP_TAHITI)
+ return 2048;
+ else if (family >= CHIP_RV770)
+ return 1024;
+ else
+ return 512;
+}
+
+/**
* radeon_check_arguments - validate module params
*
* @rdev: radeon_device pointer
@@ -1097,27 +1113,17 @@ static void radeon_check_arguments(struct radeon_device *rdev)
}
if (radeon_gart_size == -1) {
- /* default to a larger gart size on newer asics */
- if (rdev->family >= CHIP_RV770)
- radeon_gart_size = 1024;
- else
- radeon_gart_size = 512;
+ radeon_gart_size = radeon_gart_size_auto(rdev->family);
}
/* gtt size must be power of two and greater or equal to 32M */
if (radeon_gart_size < 32) {
dev_warn(rdev->dev, "gart size (%d) too small\n",
radeon_gart_size);
- if (rdev->family >= CHIP_RV770)
- radeon_gart_size = 1024;
- else
- radeon_gart_size = 512;
+ radeon_gart_size = radeon_gart_size_auto(rdev->family);
} else if (!radeon_check_pot_argument(radeon_gart_size)) {
dev_warn(rdev->dev, "gart size (%d) must be a power of 2\n",
radeon_gart_size);
- if (rdev->family >= CHIP_RV770)
- radeon_gart_size = 1024;
- else
- radeon_gart_size = 512;
+ radeon_gart_size = radeon_gart_size_auto(rdev->family);
}
rdev->mc.gtt_size = (uint64_t)radeon_gart_size << 20;
@@ -1572,11 +1578,21 @@ int radeon_suspend_kms(struct drm_device *dev, bool suspend, bool fbcon)
drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF);
}
- /* unpin the front buffers */
+ /* unpin the front buffers and cursors */
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+ struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
struct radeon_framebuffer *rfb = to_radeon_framebuffer(crtc->primary->fb);
struct radeon_bo *robj;
+ if (radeon_crtc->cursor_bo) {
+ struct radeon_bo *robj = gem_to_radeon_bo(radeon_crtc->cursor_bo);
+ r = radeon_bo_reserve(robj, false);
+ if (r == 0) {
+ radeon_bo_unpin(robj);
+ radeon_bo_unreserve(robj);
+ }
+ }
+
if (rfb == NULL || rfb->obj == NULL) {
continue;
}
@@ -1639,6 +1655,7 @@ int radeon_resume_kms(struct drm_device *dev, bool resume, bool fbcon)
{
struct drm_connector *connector;
struct radeon_device *rdev = dev->dev_private;
+ struct drm_crtc *crtc;
int r;
if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
@@ -1678,6 +1695,27 @@ int radeon_resume_kms(struct drm_device *dev, bool resume, bool fbcon)
radeon_restore_bios_scratch_regs(rdev);
+ /* pin cursors */
+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+ struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
+
+ if (radeon_crtc->cursor_bo) {
+ struct radeon_bo *robj = gem_to_radeon_bo(radeon_crtc->cursor_bo);
+ r = radeon_bo_reserve(robj, false);
+ if (r == 0) {
+ /* Only 27 bit offset for legacy cursor */
+ r = radeon_bo_pin_restricted(robj,
+ RADEON_GEM_DOMAIN_VRAM,
+ ASIC_IS_AVIVO(rdev) ?
+ 0 : 1 << 27,
+ &radeon_crtc->cursor_addr);
+ if (r != 0)
+ DRM_ERROR("Failed to pin cursor BO (%d)\n", r);
+ radeon_bo_unreserve(robj);
+ }
+ }
+ }
+
/* init dig PHYs, disp eng pll */
if (rdev->is_atom_bios) {
radeon_atom_encoder_init(rdev);
diff --git a/drivers/gpu/drm/radeon/radeon_dp_mst.c b/drivers/gpu/drm/radeon/radeon_dp_mst.c
index 257b10be5cda..5e09c061847f 100644
--- a/drivers/gpu/drm/radeon/radeon_dp_mst.c
+++ b/drivers/gpu/drm/radeon/radeon_dp_mst.c
@@ -246,9 +246,10 @@ radeon_dp_mst_connector_destroy(struct drm_connector *connector)
kfree(radeon_connector);
}
-static void radeon_connector_dpms(struct drm_connector *connector, int mode)
+static int radeon_connector_dpms(struct drm_connector *connector, int mode)
{
DRM_DEBUG_KMS("\n");
+ return 0;
}
static const struct drm_connector_funcs radeon_dp_mst_connector_funcs = {
@@ -284,11 +285,10 @@ static struct drm_connector *radeon_dp_add_mst_connector(struct drm_dp_mst_topol
drm_object_attach_property(&connector->base, dev->mode_config.path_property, 0);
drm_mode_connector_set_path_property(connector, pathprop);
- drm_reinit_primary_mode_group(dev);
- mutex_lock(&dev->mode_config.mutex);
+ drm_modeset_lock_all(dev);
radeon_fb_add_connector(rdev, connector);
- mutex_unlock(&dev->mode_config.mutex);
+ drm_modeset_unlock_all(dev);
drm_connector_register(connector);
return connector;
@@ -303,14 +303,12 @@ static void radeon_dp_destroy_mst_connector(struct drm_dp_mst_topology_mgr *mgr,
drm_connector_unregister(connector);
/* need to nuke the connector */
- mutex_lock(&dev->mode_config.mutex);
+ drm_modeset_lock_all(dev);
/* dpms off */
radeon_fb_remove_connector(rdev, connector);
drm_connector_cleanup(connector);
- mutex_unlock(&dev->mode_config.mutex);
- drm_reinit_primary_mode_group(dev);
-
+ drm_modeset_unlock_all(dev);
kfree(connector);
DRM_DEBUG_KMS("\n");
diff --git a/drivers/gpu/drm/radeon/radeon_fb.c b/drivers/gpu/drm/radeon/radeon_fb.c
index 634793ea8418..7214858ffcea 100644
--- a/drivers/gpu/drm/radeon/radeon_fb.c
+++ b/drivers/gpu/drm/radeon/radeon_fb.c
@@ -82,9 +82,9 @@ static struct fb_ops radeonfb_ops = {
.owner = THIS_MODULE,
.fb_check_var = drm_fb_helper_check_var,
.fb_set_par = radeon_fb_helper_set_par,
- .fb_fillrect = cfb_fillrect,
- .fb_copyarea = cfb_copyarea,
- .fb_imageblit = cfb_imageblit,
+ .fb_fillrect = drm_fb_helper_cfb_fillrect,
+ .fb_copyarea = drm_fb_helper_cfb_copyarea,
+ .fb_imageblit = drm_fb_helper_cfb_imageblit,
.fb_pan_display = drm_fb_helper_pan_display,
.fb_blank = drm_fb_helper_blank,
.fb_setcmap = drm_fb_helper_setcmap,
@@ -227,7 +227,6 @@ static int radeonfb_create(struct drm_fb_helper *helper,
struct drm_mode_fb_cmd2 mode_cmd;
struct drm_gem_object *gobj = NULL;
struct radeon_bo *rbo = NULL;
- struct device *device = &rdev->pdev->dev;
int ret;
unsigned long tmp;
@@ -250,25 +249,25 @@ static int radeonfb_create(struct drm_fb_helper *helper,
rbo = gem_to_radeon_bo(gobj);
/* okay we have an object now allocate the framebuffer */
- info = framebuffer_alloc(0, device);
- if (info == NULL) {
- ret = -ENOMEM;
+ info = drm_fb_helper_alloc_fbi(helper);
+ if (IS_ERR(info)) {
+ ret = PTR_ERR(info);
goto out_unref;
}
info->par = rfbdev;
+ info->skip_vt_switch = true;
ret = radeon_framebuffer_init(rdev->ddev, &rfbdev->rfb, &mode_cmd, gobj);
if (ret) {
DRM_ERROR("failed to initialize framebuffer %d\n", ret);
- goto out_unref;
+ goto out_destroy_fbi;
}
fb = &rfbdev->rfb.base;
/* setup helper */
rfbdev->helper.fb = fb;
- rfbdev->helper.fbdev = info;
memset_io(rbo->kptr, 0x0, radeon_bo_size(rbo));
@@ -288,11 +287,6 @@ static int radeonfb_create(struct drm_fb_helper *helper,
drm_fb_helper_fill_var(info, &rfbdev->helper, sizes->fb_width, sizes->fb_height);
/* setup aperture base/size for vesafb takeover */
- info->apertures = alloc_apertures(1);
- if (!info->apertures) {
- ret = -ENOMEM;
- goto out_unref;
- }
info->apertures->ranges[0].base = rdev->ddev->mode_config.fb_base;
info->apertures->ranges[0].size = rdev->mc.aper_size;
@@ -300,13 +294,7 @@ static int radeonfb_create(struct drm_fb_helper *helper,
if (info->screen_base == NULL) {
ret = -ENOSPC;
- goto out_unref;
- }
-
- ret = fb_alloc_cmap(&info->cmap, 256, 0);
- if (ret) {
- ret = -ENOMEM;
- goto out_unref;
+ goto out_destroy_fbi;
}
DRM_INFO("fb mappable at 0x%lX\n", info->fix.smem_start);
@@ -318,6 +306,8 @@ static int radeonfb_create(struct drm_fb_helper *helper,
vga_switcheroo_client_fb_set(rdev->ddev->pdev, info);
return 0;
+out_destroy_fbi:
+ drm_fb_helper_release_fbi(helper);
out_unref:
if (rbo) {
@@ -338,17 +328,10 @@ void radeon_fb_output_poll_changed(struct radeon_device *rdev)
static int radeon_fbdev_destroy(struct drm_device *dev, struct radeon_fbdev *rfbdev)
{
- struct fb_info *info;
struct radeon_framebuffer *rfb = &rfbdev->rfb;
- if (rfbdev->helper.fbdev) {
- info = rfbdev->helper.fbdev;
-
- unregister_framebuffer(info);
- if (info->cmap.len)
- fb_dealloc_cmap(&info->cmap);
- framebuffer_release(info);
- }
+ drm_fb_helper_unregister_fbi(&rfbdev->helper);
+ drm_fb_helper_release_fbi(&rfbdev->helper);
if (rfb->obj) {
radeonfb_destroy_pinned_object(rfb->obj);
diff --git a/drivers/gpu/drm/radeon/radeon_gart.c b/drivers/gpu/drm/radeon/radeon_gart.c
index 5450fa95a47e..c4777c8d0312 100644
--- a/drivers/gpu/drm/radeon/radeon_gart.c
+++ b/drivers/gpu/drm/radeon/radeon_gart.c
@@ -260,8 +260,10 @@ void radeon_gart_unbind(struct radeon_device *rdev, unsigned offset,
}
}
}
- mb();
- radeon_gart_tlb_flush(rdev);
+ if (rdev->gart.ptr) {
+ mb();
+ radeon_gart_tlb_flush(rdev);
+ }
}
/**
@@ -306,8 +308,10 @@ int radeon_gart_bind(struct radeon_device *rdev, unsigned offset,
page_base += RADEON_GPU_PAGE_SIZE;
}
}
- mb();
- radeon_gart_tlb_flush(rdev);
+ if (rdev->gart.ptr) {
+ mb();
+ radeon_gart_tlb_flush(rdev);
+ }
return 0;
}
diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c
index ac3c1310b953..3dcc5733ff69 100644
--- a/drivers/gpu/drm/radeon/radeon_gem.c
+++ b/drivers/gpu/drm/radeon/radeon_gem.c
@@ -36,6 +36,7 @@ void radeon_gem_object_free(struct drm_gem_object *gobj)
if (robj) {
if (robj->gem_base.import_attach)
drm_prime_gem_destroy(&robj->gem_base, robj->tbo.sg);
+ radeon_mn_unregister(robj);
radeon_bo_unref(&robj);
}
}
@@ -428,7 +429,6 @@ int radeon_gem_mmap_ioctl(struct drm_device *dev, void *data,
int radeon_gem_busy_ioctl(struct drm_device *dev, void *data,
struct drm_file *filp)
{
- struct radeon_device *rdev = dev->dev_private;
struct drm_radeon_gem_busy *args = data;
struct drm_gem_object *gobj;
struct radeon_bo *robj;
@@ -440,10 +440,16 @@ int radeon_gem_busy_ioctl(struct drm_device *dev, void *data,
return -ENOENT;
}
robj = gem_to_radeon_bo(gobj);
- r = radeon_bo_wait(robj, &cur_placement, true);
+
+ r = reservation_object_test_signaled_rcu(robj->tbo.resv, true);
+ if (r == 0)
+ r = -EBUSY;
+ else
+ r = 0;
+
+ cur_placement = ACCESS_ONCE(robj->tbo.mem.mem_type);
args->domain = radeon_mem_type_to_domain(cur_placement);
drm_gem_object_unreference_unlocked(gobj);
- r = radeon_gem_handle_lockup(rdev, r);
return r;
}
@@ -471,6 +477,7 @@ int radeon_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
r = ret;
/* Flush HDP cache via MMIO if necessary */
+ cur_placement = ACCESS_ONCE(robj->tbo.mem.mem_type);
if (rdev->asic->mmio_hdp_flush &&
radeon_mem_type_to_domain(cur_placement) == RADEON_GEM_DOMAIN_VRAM)
robj->rdev->asic->mmio_hdp_flush(rdev);
diff --git a/drivers/gpu/drm/radeon/radeon_kfd.c b/drivers/gpu/drm/radeon/radeon_kfd.c
index e476c331f3fa..9a4d69e59401 100644
--- a/drivers/gpu/drm/radeon/radeon_kfd.c
+++ b/drivers/gpu/drm/radeon/radeon_kfd.c
@@ -845,7 +845,8 @@ static uint16_t get_fw_version(struct kgd_dev *kgd, enum kgd_engine_type type)
hdr = (const union radeon_firmware_header *) rdev->rlc_fw->data;
break;
- case KGD_ENGINE_SDMA:
+ case KGD_ENGINE_SDMA1:
+ case KGD_ENGINE_SDMA2:
hdr = (const union radeon_firmware_header *)
rdev->sdma_fw->data;
break;
diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h
index 6de5459316b5..aecc3e3dec0c 100644
--- a/drivers/gpu/drm/radeon/radeon_mode.h
+++ b/drivers/gpu/drm/radeon/radeon_mode.h
@@ -237,7 +237,6 @@ struct radeon_afmt {
int offset;
bool last_buffer_filled_status;
int id;
- struct r600_audio_pin *pin;
};
struct radeon_mode_info {
@@ -343,7 +342,6 @@ struct radeon_crtc {
int max_cursor_width;
int max_cursor_height;
uint32_t legacy_display_base_addr;
- uint32_t legacy_cursor_offset;
enum radeon_rmx_type rmx_type;
u8 h_border;
u8 v_border;
@@ -440,6 +438,7 @@ struct radeon_encoder_atom_dig {
uint8_t backlight_level;
int panel_mode;
struct radeon_afmt *afmt;
+ struct r600_audio_pin *pin;
int active_mst_links;
};
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c
index 318165d4855c..676362769b8d 100644
--- a/drivers/gpu/drm/radeon/radeon_object.c
+++ b/drivers/gpu/drm/radeon/radeon_object.c
@@ -75,7 +75,6 @@ static void radeon_ttm_bo_destroy(struct ttm_buffer_object *tbo)
bo = container_of(tbo, struct radeon_bo, tbo);
radeon_update_memory_usage(bo, bo->tbo.mem.mem_type, -1);
- radeon_mn_unregister(bo);
mutex_lock(&bo->rdev->gem.mutex);
list_del_init(&bo->list);
diff --git a/drivers/gpu/drm/radeon/radeon_vm.c b/drivers/gpu/drm/radeon/radeon_vm.c
index ec10533a49b8..48d97c040f49 100644
--- a/drivers/gpu/drm/radeon/radeon_vm.c
+++ b/drivers/gpu/drm/radeon/radeon_vm.c
@@ -493,38 +493,35 @@ int radeon_vm_bo_set_addr(struct radeon_device *rdev,
}
if (bo_va->it.start || bo_va->it.last) {
- spin_lock(&vm->status_lock);
- if (list_empty(&bo_va->vm_status)) {
- /* add a clone of the bo_va to clear the old address */
- struct radeon_bo_va *tmp;
- spin_unlock(&vm->status_lock);
- tmp = kzalloc(sizeof(struct radeon_bo_va), GFP_KERNEL);
- if (!tmp) {
- mutex_unlock(&vm->mutex);
- r = -ENOMEM;
- goto error_unreserve;
- }
- tmp->it.start = bo_va->it.start;
- tmp->it.last = bo_va->it.last;
- tmp->vm = vm;
- tmp->bo = radeon_bo_ref(bo_va->bo);
- spin_lock(&vm->status_lock);
- list_add(&tmp->vm_status, &vm->freed);
+ /* add a clone of the bo_va to clear the old address */
+ struct radeon_bo_va *tmp;
+ tmp = kzalloc(sizeof(struct radeon_bo_va), GFP_KERNEL);
+ if (!tmp) {
+ mutex_unlock(&vm->mutex);
+ r = -ENOMEM;
+ goto error_unreserve;
}
- spin_unlock(&vm->status_lock);
+ tmp->it.start = bo_va->it.start;
+ tmp->it.last = bo_va->it.last;
+ tmp->vm = vm;
+ tmp->bo = radeon_bo_ref(bo_va->bo);
interval_tree_remove(&bo_va->it, &vm->va);
+ spin_lock(&vm->status_lock);
bo_va->it.start = 0;
bo_va->it.last = 0;
+ list_del_init(&bo_va->vm_status);
+ list_add(&tmp->vm_status, &vm->freed);
+ spin_unlock(&vm->status_lock);
}
if (soffset || eoffset) {
+ spin_lock(&vm->status_lock);
bo_va->it.start = soffset;
bo_va->it.last = eoffset - 1;
- interval_tree_insert(&bo_va->it, &vm->va);
- spin_lock(&vm->status_lock);
list_add(&bo_va->vm_status, &vm->cleared);
spin_unlock(&vm->status_lock);
+ interval_tree_insert(&bo_va->it, &vm->va);
}
bo_va->flags = flags;
@@ -1158,7 +1155,8 @@ void radeon_vm_bo_invalidate(struct radeon_device *rdev,
list_for_each_entry(bo_va, &bo->va, bo_list) {
spin_lock(&bo_va->vm->status_lock);
- if (list_empty(&bo_va->vm_status))
+ if (list_empty(&bo_va->vm_status) &&
+ (bo_va->it.start || bo_va->it.last))
list_add(&bo_va->vm_status, &bo_va->vm->invalidated);
spin_unlock(&bo_va->vm->status_lock);
}
diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c
index 26388b5dd6ed..07037e32dea3 100644
--- a/drivers/gpu/drm/radeon/si.c
+++ b/drivers/gpu/drm/radeon/si.c
@@ -6466,23 +6466,27 @@ restart_ih:
case 1: /* D1 vblank/vline */
switch (src_data) {
case 0: /* D1 vblank */
- if (rdev->irq.stat_regs.evergreen.disp_int & LB_D1_VBLANK_INTERRUPT) {
- if (rdev->irq.crtc_vblank_int[0]) {
- drm_handle_vblank(rdev->ddev, 0);
- rdev->pm.vblank_sync = true;
- wake_up(&rdev->irq.vblank_queue);
- }
- if (atomic_read(&rdev->irq.pflip[0]))
- radeon_crtc_handle_vblank(rdev, 0);
- rdev->irq.stat_regs.evergreen.disp_int &= ~LB_D1_VBLANK_INTERRUPT;
- DRM_DEBUG("IH: D1 vblank\n");
+ if (!(rdev->irq.stat_regs.evergreen.disp_int & LB_D1_VBLANK_INTERRUPT))
+ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+ if (rdev->irq.crtc_vblank_int[0]) {
+ drm_handle_vblank(rdev->ddev, 0);
+ rdev->pm.vblank_sync = true;
+ wake_up(&rdev->irq.vblank_queue);
}
+ if (atomic_read(&rdev->irq.pflip[0]))
+ radeon_crtc_handle_vblank(rdev, 0);
+ rdev->irq.stat_regs.evergreen.disp_int &= ~LB_D1_VBLANK_INTERRUPT;
+ DRM_DEBUG("IH: D1 vblank\n");
+
break;
case 1: /* D1 vline */
- if (rdev->irq.stat_regs.evergreen.disp_int & LB_D1_VLINE_INTERRUPT) {
- rdev->irq.stat_regs.evergreen.disp_int &= ~LB_D1_VLINE_INTERRUPT;
- DRM_DEBUG("IH: D1 vline\n");
- }
+ if (!(rdev->irq.stat_regs.evergreen.disp_int & LB_D1_VLINE_INTERRUPT))
+ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+ rdev->irq.stat_regs.evergreen.disp_int &= ~LB_D1_VLINE_INTERRUPT;
+ DRM_DEBUG("IH: D1 vline\n");
+
break;
default:
DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
@@ -6492,23 +6496,27 @@ restart_ih:
case 2: /* D2 vblank/vline */
switch (src_data) {
case 0: /* D2 vblank */
- if (rdev->irq.stat_regs.evergreen.disp_int_cont & LB_D2_VBLANK_INTERRUPT) {
- if (rdev->irq.crtc_vblank_int[1]) {
- drm_handle_vblank(rdev->ddev, 1);
- rdev->pm.vblank_sync = true;
- wake_up(&rdev->irq.vblank_queue);
- }
- if (atomic_read(&rdev->irq.pflip[1]))
- radeon_crtc_handle_vblank(rdev, 1);
- rdev->irq.stat_regs.evergreen.disp_int_cont &= ~LB_D2_VBLANK_INTERRUPT;
- DRM_DEBUG("IH: D2 vblank\n");
+ if (!(rdev->irq.stat_regs.evergreen.disp_int_cont & LB_D2_VBLANK_INTERRUPT))
+ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+ if (rdev->irq.crtc_vblank_int[1]) {
+ drm_handle_vblank(rdev->ddev, 1);
+ rdev->pm.vblank_sync = true;
+ wake_up(&rdev->irq.vblank_queue);
}
+ if (atomic_read(&rdev->irq.pflip[1]))
+ radeon_crtc_handle_vblank(rdev, 1);
+ rdev->irq.stat_regs.evergreen.disp_int_cont &= ~LB_D2_VBLANK_INTERRUPT;
+ DRM_DEBUG("IH: D2 vblank\n");
+
break;
case 1: /* D2 vline */
- if (rdev->irq.stat_regs.evergreen.disp_int_cont & LB_D2_VLINE_INTERRUPT) {
- rdev->irq.stat_regs.evergreen.disp_int_cont &= ~LB_D2_VLINE_INTERRUPT;
- DRM_DEBUG("IH: D2 vline\n");
- }
+ if (!(rdev->irq.stat_regs.evergreen.disp_int_cont & LB_D2_VLINE_INTERRUPT))
+ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+ rdev->irq.stat_regs.evergreen.disp_int_cont &= ~LB_D2_VLINE_INTERRUPT;
+ DRM_DEBUG("IH: D2 vline\n");
+
break;
default:
DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
@@ -6518,23 +6526,27 @@ restart_ih:
case 3: /* D3 vblank/vline */
switch (src_data) {
case 0: /* D3 vblank */
- if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & LB_D3_VBLANK_INTERRUPT) {
- if (rdev->irq.crtc_vblank_int[2]) {
- drm_handle_vblank(rdev->ddev, 2);
- rdev->pm.vblank_sync = true;
- wake_up(&rdev->irq.vblank_queue);
- }
- if (atomic_read(&rdev->irq.pflip[2]))
- radeon_crtc_handle_vblank(rdev, 2);
- rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~LB_D3_VBLANK_INTERRUPT;
- DRM_DEBUG("IH: D3 vblank\n");
+ if (!(rdev->irq.stat_regs.evergreen.disp_int_cont2 & LB_D3_VBLANK_INTERRUPT))
+ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+ if (rdev->irq.crtc_vblank_int[2]) {
+ drm_handle_vblank(rdev->ddev, 2);
+ rdev->pm.vblank_sync = true;
+ wake_up(&rdev->irq.vblank_queue);
}
+ if (atomic_read(&rdev->irq.pflip[2]))
+ radeon_crtc_handle_vblank(rdev, 2);
+ rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~LB_D3_VBLANK_INTERRUPT;
+ DRM_DEBUG("IH: D3 vblank\n");
+
break;
case 1: /* D3 vline */
- if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & LB_D3_VLINE_INTERRUPT) {
- rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~LB_D3_VLINE_INTERRUPT;
- DRM_DEBUG("IH: D3 vline\n");
- }
+ if (!(rdev->irq.stat_regs.evergreen.disp_int_cont2 & LB_D3_VLINE_INTERRUPT))
+ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+ rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~LB_D3_VLINE_INTERRUPT;
+ DRM_DEBUG("IH: D3 vline\n");
+
break;
default:
DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
@@ -6544,23 +6556,27 @@ restart_ih:
case 4: /* D4 vblank/vline */
switch (src_data) {
case 0: /* D4 vblank */
- if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & LB_D4_VBLANK_INTERRUPT) {
- if (rdev->irq.crtc_vblank_int[3]) {
- drm_handle_vblank(rdev->ddev, 3);
- rdev->pm.vblank_sync = true;
- wake_up(&rdev->irq.vblank_queue);
- }
- if (atomic_read(&rdev->irq.pflip[3]))
- radeon_crtc_handle_vblank(rdev, 3);
- rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~LB_D4_VBLANK_INTERRUPT;
- DRM_DEBUG("IH: D4 vblank\n");
+ if (!(rdev->irq.stat_regs.evergreen.disp_int_cont3 & LB_D4_VBLANK_INTERRUPT))
+ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+ if (rdev->irq.crtc_vblank_int[3]) {
+ drm_handle_vblank(rdev->ddev, 3);
+ rdev->pm.vblank_sync = true;
+ wake_up(&rdev->irq.vblank_queue);
}
+ if (atomic_read(&rdev->irq.pflip[3]))
+ radeon_crtc_handle_vblank(rdev, 3);
+ rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~LB_D4_VBLANK_INTERRUPT;
+ DRM_DEBUG("IH: D4 vblank\n");
+
break;
case 1: /* D4 vline */
- if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & LB_D4_VLINE_INTERRUPT) {
- rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~LB_D4_VLINE_INTERRUPT;
- DRM_DEBUG("IH: D4 vline\n");
- }
+ if (!(rdev->irq.stat_regs.evergreen.disp_int_cont3 & LB_D4_VLINE_INTERRUPT))
+ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+ rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~LB_D4_VLINE_INTERRUPT;
+ DRM_DEBUG("IH: D4 vline\n");
+
break;
default:
DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
@@ -6570,23 +6586,27 @@ restart_ih:
case 5: /* D5 vblank/vline */
switch (src_data) {
case 0: /* D5 vblank */
- if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & LB_D5_VBLANK_INTERRUPT) {
- if (rdev->irq.crtc_vblank_int[4]) {
- drm_handle_vblank(rdev->ddev, 4);
- rdev->pm.vblank_sync = true;
- wake_up(&rdev->irq.vblank_queue);
- }
- if (atomic_read(&rdev->irq.pflip[4]))
- radeon_crtc_handle_vblank(rdev, 4);
- rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~LB_D5_VBLANK_INTERRUPT;
- DRM_DEBUG("IH: D5 vblank\n");
+ if (!(rdev->irq.stat_regs.evergreen.disp_int_cont4 & LB_D5_VBLANK_INTERRUPT))
+ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+ if (rdev->irq.crtc_vblank_int[4]) {
+ drm_handle_vblank(rdev->ddev, 4);
+ rdev->pm.vblank_sync = true;
+ wake_up(&rdev->irq.vblank_queue);
}
+ if (atomic_read(&rdev->irq.pflip[4]))
+ radeon_crtc_handle_vblank(rdev, 4);
+ rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~LB_D5_VBLANK_INTERRUPT;
+ DRM_DEBUG("IH: D5 vblank\n");
+
break;
case 1: /* D5 vline */
- if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & LB_D5_VLINE_INTERRUPT) {
- rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~LB_D5_VLINE_INTERRUPT;
- DRM_DEBUG("IH: D5 vline\n");
- }
+ if (!(rdev->irq.stat_regs.evergreen.disp_int_cont4 & LB_D5_VLINE_INTERRUPT))
+ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+ rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~LB_D5_VLINE_INTERRUPT;
+ DRM_DEBUG("IH: D5 vline\n");
+
break;
default:
DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
@@ -6596,23 +6616,27 @@ restart_ih:
case 6: /* D6 vblank/vline */
switch (src_data) {
case 0: /* D6 vblank */
- if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & LB_D6_VBLANK_INTERRUPT) {
- if (rdev->irq.crtc_vblank_int[5]) {
- drm_handle_vblank(rdev->ddev, 5);
- rdev->pm.vblank_sync = true;
- wake_up(&rdev->irq.vblank_queue);
- }
- if (atomic_read(&rdev->irq.pflip[5]))
- radeon_crtc_handle_vblank(rdev, 5);
- rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~LB_D6_VBLANK_INTERRUPT;
- DRM_DEBUG("IH: D6 vblank\n");
+ if (!(rdev->irq.stat_regs.evergreen.disp_int_cont5 & LB_D6_VBLANK_INTERRUPT))
+ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+ if (rdev->irq.crtc_vblank_int[5]) {
+ drm_handle_vblank(rdev->ddev, 5);
+ rdev->pm.vblank_sync = true;
+ wake_up(&rdev->irq.vblank_queue);
}
+ if (atomic_read(&rdev->irq.pflip[5]))
+ radeon_crtc_handle_vblank(rdev, 5);
+ rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~LB_D6_VBLANK_INTERRUPT;
+ DRM_DEBUG("IH: D6 vblank\n");
+
break;
case 1: /* D6 vline */
- if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & LB_D6_VLINE_INTERRUPT) {
- rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~LB_D6_VLINE_INTERRUPT;
- DRM_DEBUG("IH: D6 vline\n");
- }
+ if (!(rdev->irq.stat_regs.evergreen.disp_int_cont5 & LB_D6_VLINE_INTERRUPT))
+ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+ rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~LB_D6_VLINE_INTERRUPT;
+ DRM_DEBUG("IH: D6 vline\n");
+
break;
default:
DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
@@ -6632,88 +6656,112 @@ restart_ih:
case 42: /* HPD hotplug */
switch (src_data) {
case 0:
- if (rdev->irq.stat_regs.evergreen.disp_int & DC_HPD1_INTERRUPT) {
- rdev->irq.stat_regs.evergreen.disp_int &= ~DC_HPD1_INTERRUPT;
- queue_hotplug = true;
- DRM_DEBUG("IH: HPD1\n");
- }
+ if (!(rdev->irq.stat_regs.evergreen.disp_int & DC_HPD1_INTERRUPT))
+ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+ rdev->irq.stat_regs.evergreen.disp_int &= ~DC_HPD1_INTERRUPT;
+ queue_hotplug = true;
+ DRM_DEBUG("IH: HPD1\n");
+
break;
case 1:
- if (rdev->irq.stat_regs.evergreen.disp_int_cont & DC_HPD2_INTERRUPT) {
- rdev->irq.stat_regs.evergreen.disp_int_cont &= ~DC_HPD2_INTERRUPT;
- queue_hotplug = true;
- DRM_DEBUG("IH: HPD2\n");
- }
+ if (!(rdev->irq.stat_regs.evergreen.disp_int_cont & DC_HPD2_INTERRUPT))
+ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+ rdev->irq.stat_regs.evergreen.disp_int_cont &= ~DC_HPD2_INTERRUPT;
+ queue_hotplug = true;
+ DRM_DEBUG("IH: HPD2\n");
+
break;
case 2:
- if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & DC_HPD3_INTERRUPT) {
- rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~DC_HPD3_INTERRUPT;
- queue_hotplug = true;
- DRM_DEBUG("IH: HPD3\n");
- }
+ if (!(rdev->irq.stat_regs.evergreen.disp_int_cont2 & DC_HPD3_INTERRUPT))
+ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+ rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~DC_HPD3_INTERRUPT;
+ queue_hotplug = true;
+ DRM_DEBUG("IH: HPD3\n");
+
break;
case 3:
- if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & DC_HPD4_INTERRUPT) {
- rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~DC_HPD4_INTERRUPT;
- queue_hotplug = true;
- DRM_DEBUG("IH: HPD4\n");
- }
+ if (!(rdev->irq.stat_regs.evergreen.disp_int_cont3 & DC_HPD4_INTERRUPT))
+ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+ rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~DC_HPD4_INTERRUPT;
+ queue_hotplug = true;
+ DRM_DEBUG("IH: HPD4\n");
+
break;
case 4:
- if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & DC_HPD5_INTERRUPT) {
- rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~DC_HPD5_INTERRUPT;
- queue_hotplug = true;
- DRM_DEBUG("IH: HPD5\n");
- }
+ if (!(rdev->irq.stat_regs.evergreen.disp_int_cont4 & DC_HPD5_INTERRUPT))
+ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+ rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~DC_HPD5_INTERRUPT;
+ queue_hotplug = true;
+ DRM_DEBUG("IH: HPD5\n");
+
break;
case 5:
- if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & DC_HPD6_INTERRUPT) {
- rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~DC_HPD6_INTERRUPT;
- queue_hotplug = true;
- DRM_DEBUG("IH: HPD6\n");
- }
+ if (!(rdev->irq.stat_regs.evergreen.disp_int_cont5 & DC_HPD6_INTERRUPT))
+ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+ rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~DC_HPD6_INTERRUPT;
+ queue_hotplug = true;
+ DRM_DEBUG("IH: HPD6\n");
+
break;
case 6:
- if (rdev->irq.stat_regs.evergreen.disp_int & DC_HPD1_RX_INTERRUPT) {
- rdev->irq.stat_regs.evergreen.disp_int &= ~DC_HPD1_RX_INTERRUPT;
- queue_dp = true;
- DRM_DEBUG("IH: HPD_RX 1\n");
- }
+ if (!(rdev->irq.stat_regs.evergreen.disp_int & DC_HPD1_RX_INTERRUPT))
+ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+ rdev->irq.stat_regs.evergreen.disp_int &= ~DC_HPD1_RX_INTERRUPT;
+ queue_dp = true;
+ DRM_DEBUG("IH: HPD_RX 1\n");
+
break;
case 7:
- if (rdev->irq.stat_regs.evergreen.disp_int_cont & DC_HPD2_RX_INTERRUPT) {
- rdev->irq.stat_regs.evergreen.disp_int_cont &= ~DC_HPD2_RX_INTERRUPT;
- queue_dp = true;
- DRM_DEBUG("IH: HPD_RX 2\n");
- }
+ if (!(rdev->irq.stat_regs.evergreen.disp_int_cont & DC_HPD2_RX_INTERRUPT))
+ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+ rdev->irq.stat_regs.evergreen.disp_int_cont &= ~DC_HPD2_RX_INTERRUPT;
+ queue_dp = true;
+ DRM_DEBUG("IH: HPD_RX 2\n");
+
break;
case 8:
- if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & DC_HPD3_RX_INTERRUPT) {
- rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~DC_HPD3_RX_INTERRUPT;
- queue_dp = true;
- DRM_DEBUG("IH: HPD_RX 3\n");
- }
+ if (!(rdev->irq.stat_regs.evergreen.disp_int_cont2 & DC_HPD3_RX_INTERRUPT))
+ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+ rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~DC_HPD3_RX_INTERRUPT;
+ queue_dp = true;
+ DRM_DEBUG("IH: HPD_RX 3\n");
+
break;
case 9:
- if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & DC_HPD4_RX_INTERRUPT) {
- rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~DC_HPD4_RX_INTERRUPT;
- queue_dp = true;
- DRM_DEBUG("IH: HPD_RX 4\n");
- }
+ if (!(rdev->irq.stat_regs.evergreen.disp_int_cont3 & DC_HPD4_RX_INTERRUPT))
+ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+ rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~DC_HPD4_RX_INTERRUPT;
+ queue_dp = true;
+ DRM_DEBUG("IH: HPD_RX 4\n");
+
break;
case 10:
- if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & DC_HPD5_RX_INTERRUPT) {
- rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~DC_HPD5_RX_INTERRUPT;
- queue_dp = true;
- DRM_DEBUG("IH: HPD_RX 5\n");
- }
+ if (!(rdev->irq.stat_regs.evergreen.disp_int_cont4 & DC_HPD5_RX_INTERRUPT))
+ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+ rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~DC_HPD5_RX_INTERRUPT;
+ queue_dp = true;
+ DRM_DEBUG("IH: HPD_RX 5\n");
+
break;
case 11:
- if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & DC_HPD6_RX_INTERRUPT) {
- rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~DC_HPD6_RX_INTERRUPT;
- queue_dp = true;
- DRM_DEBUG("IH: HPD_RX 6\n");
- }
+ if (!(rdev->irq.stat_regs.evergreen.disp_int_cont5 & DC_HPD6_RX_INTERRUPT))
+ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+ rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~DC_HPD6_RX_INTERRUPT;
+ queue_dp = true;
+ DRM_DEBUG("IH: HPD_RX 6\n");
+
break;
default:
DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
diff --git a/drivers/gpu/drm/radeon/si_dpm.c b/drivers/gpu/drm/radeon/si_dpm.c
index 1dbdf3230dae..787cd8fd897f 100644
--- a/drivers/gpu/drm/radeon/si_dpm.c
+++ b/drivers/gpu/drm/radeon/si_dpm.c
@@ -2926,6 +2926,7 @@ static struct si_dpm_quirk si_dpm_quirk_list[] = {
/* PITCAIRN - https://bugs.freedesktop.org/show_bug.cgi?id=76490 */
{ PCI_VENDOR_ID_ATI, 0x6810, 0x1462, 0x3036, 0, 120000 },
{ PCI_VENDOR_ID_ATI, 0x6811, 0x174b, 0xe271, 0, 120000 },
+ { PCI_VENDOR_ID_ATI, 0x6810, 0x174b, 0xe271, 85000, 90000 },
{ 0, 0, 0, 0 },
};
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_crtc.c b/drivers/gpu/drm/rcar-du/rcar_du_crtc.c
index 65d6ba6621ac..48cb19949ca3 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_crtc.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_crtc.c
@@ -496,7 +496,8 @@ static bool rcar_du_crtc_mode_fixup(struct drm_crtc *crtc,
return true;
}
-static void rcar_du_crtc_atomic_begin(struct drm_crtc *crtc)
+static void rcar_du_crtc_atomic_begin(struct drm_crtc *crtc,
+ struct drm_crtc_state *old_crtc_state)
{
struct drm_pending_vblank_event *event = crtc->state->event;
struct rcar_du_crtc *rcrtc = to_rcar_crtc(crtc);
@@ -512,7 +513,8 @@ static void rcar_du_crtc_atomic_begin(struct drm_crtc *crtc)
}
}
-static void rcar_du_crtc_atomic_flush(struct drm_crtc *crtc)
+static void rcar_du_crtc_atomic_flush(struct drm_crtc *crtc,
+ struct drm_crtc_state *old_crtc_state)
{
struct rcar_du_crtc *rcrtc = to_rcar_crtc(crtc);
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_drv.c b/drivers/gpu/drm/rockchip/rockchip_drm_drv.c
index 01b558fe3695..9a0c2911272a 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_drv.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_drv.c
@@ -555,7 +555,6 @@ static struct platform_driver rockchip_drm_platform_driver = {
.probe = rockchip_drm_platform_probe,
.remove = rockchip_drm_platform_remove,
.driver = {
- .owner = THIS_MODULE,
.name = "rockchip-drm",
.of_match_table = rockchip_drm_dt_ids,
.pm = &rockchip_drm_pm_ops,
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_fb.c b/drivers/gpu/drm/rockchip/rockchip_drm_fb.c
index 77d52893d40f..002645bb5bbf 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_fb.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_fb.c
@@ -162,7 +162,8 @@ static void rockchip_drm_output_poll_changed(struct drm_device *dev)
struct rockchip_drm_private *private = dev->dev_private;
struct drm_fb_helper *fb_helper = &private->fbdev_helper;
- drm_fb_helper_hotplug_event(fb_helper);
+ if (fb_helper)
+ drm_fb_helper_hotplug_event(fb_helper);
}
static const struct drm_mode_config_funcs rockchip_drm_mode_config_funcs = {
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_fbdev.c b/drivers/gpu/drm/rockchip/rockchip_drm_fbdev.c
index 5b0dc0f6fd94..f261512bb4a0 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_fbdev.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_fbdev.c
@@ -37,9 +37,9 @@ static int rockchip_fbdev_mmap(struct fb_info *info,
static struct fb_ops rockchip_drm_fbdev_ops = {
.owner = THIS_MODULE,
.fb_mmap = rockchip_fbdev_mmap,
- .fb_fillrect = cfb_fillrect,
- .fb_copyarea = cfb_copyarea,
- .fb_imageblit = cfb_imageblit,
+ .fb_fillrect = drm_fb_helper_cfb_fillrect,
+ .fb_copyarea = drm_fb_helper_cfb_copyarea,
+ .fb_imageblit = drm_fb_helper_cfb_imageblit,
.fb_check_var = drm_fb_helper_check_var,
.fb_set_par = drm_fb_helper_set_par,
.fb_blank = drm_fb_helper_blank,
@@ -77,10 +77,10 @@ static int rockchip_drm_fbdev_create(struct drm_fb_helper *helper,
private->fbdev_bo = &rk_obj->base;
- fbi = framebuffer_alloc(0, dev->dev);
- if (!fbi) {
- dev_err(dev->dev, "Failed to allocate framebuffer info.\n");
- ret = -ENOMEM;
+ fbi = drm_fb_helper_alloc_fbi(helper);
+ if (IS_ERR(fbi)) {
+ dev_err(dev->dev, "Failed to create framebuffer info.\n");
+ ret = PTR_ERR(fbi);
goto err_rockchip_gem_free_object;
}
@@ -89,21 +89,13 @@ static int rockchip_drm_fbdev_create(struct drm_fb_helper *helper,
if (IS_ERR(helper->fb)) {
dev_err(dev->dev, "Failed to allocate DRM framebuffer.\n");
ret = PTR_ERR(helper->fb);
- goto err_framebuffer_release;
+ goto err_release_fbi;
}
- helper->fbdev = fbi;
-
fbi->par = helper;
fbi->flags = FBINFO_FLAG_DEFAULT;
fbi->fbops = &rockchip_drm_fbdev_ops;
- ret = fb_alloc_cmap(&fbi->cmap, 256, 0);
- if (ret) {
- dev_err(dev->dev, "Failed to allocate color map.\n");
- goto err_drm_framebuffer_unref;
- }
-
fb = helper->fb;
drm_fb_helper_fill_fix(fbi, fb->pitches[0], fb->depth);
drm_fb_helper_fill_var(fbi, helper, sizes->fb_width, sizes->fb_height);
@@ -124,10 +116,8 @@ static int rockchip_drm_fbdev_create(struct drm_fb_helper *helper,
return 0;
-err_drm_framebuffer_unref:
- drm_framebuffer_unreference(helper->fb);
-err_framebuffer_release:
- framebuffer_release(fbi);
+err_release_fbi:
+ drm_fb_helper_release_fbi(helper);
err_rockchip_gem_free_object:
rockchip_gem_free_object(&rk_obj->base);
return ret;
@@ -190,21 +180,8 @@ void rockchip_drm_fbdev_fini(struct drm_device *dev)
helper = &private->fbdev_helper;
- if (helper->fbdev) {
- struct fb_info *info;
- int ret;
-
- info = helper->fbdev;
- ret = unregister_framebuffer(info);
- if (ret < 0)
- DRM_DEBUG_KMS("failed unregister_framebuffer() - %d\n",
- ret);
-
- if (info->cmap.len)
- fb_dealloc_cmap(&info->cmap);
-
- framebuffer_release(info);
- }
+ drm_fb_helper_unregister_fbi(helper);
+ drm_fb_helper_release_fbi(helper);
if (helper->fb)
drm_framebuffer_unreference(helper->fb);
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_gem.c b/drivers/gpu/drm/rockchip/rockchip_drm_gem.c
index eb2282cc4a56..a6d9104f7f15 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_gem.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_gem.c
@@ -54,55 +54,56 @@ static void rockchip_gem_free_buf(struct rockchip_gem_object *rk_obj)
&rk_obj->dma_attrs);
}
-int rockchip_gem_mmap_buf(struct drm_gem_object *obj,
- struct vm_area_struct *vma)
+static int rockchip_drm_gem_object_mmap(struct drm_gem_object *obj,
+ struct vm_area_struct *vma)
+
{
+ int ret;
struct rockchip_gem_object *rk_obj = to_rockchip_obj(obj);
struct drm_device *drm = obj->dev;
- unsigned long vm_size;
-
- vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP;
- vm_size = vma->vm_end - vma->vm_start;
- if (vm_size > obj->size)
- return -EINVAL;
+ /*
+ * dma_alloc_attrs() allocated a struct page table for rk_obj, so clear
+ * VM_PFNMAP flag that was set by drm_gem_mmap_obj()/drm_gem_mmap().
+ */
+ vma->vm_flags &= ~VM_PFNMAP;
- return dma_mmap_attrs(drm->dev, vma, rk_obj->kvaddr, rk_obj->dma_addr,
+ ret = dma_mmap_attrs(drm->dev, vma, rk_obj->kvaddr, rk_obj->dma_addr,
obj->size, &rk_obj->dma_attrs);
+ if (ret)
+ drm_gem_vm_close(vma);
+
+ return ret;
}
-/* drm driver mmap file operations */
-int rockchip_gem_mmap(struct file *filp, struct vm_area_struct *vma)
+int rockchip_gem_mmap_buf(struct drm_gem_object *obj,
+ struct vm_area_struct *vma)
{
- struct drm_file *priv = filp->private_data;
- struct drm_device *dev = priv->minor->dev;
- struct drm_gem_object *obj;
- struct drm_vma_offset_node *node;
+ struct drm_device *drm = obj->dev;
int ret;
- if (drm_device_is_unplugged(dev))
- return -ENODEV;
+ mutex_lock(&drm->struct_mutex);
+ ret = drm_gem_mmap_obj(obj, obj->size, vma);
+ mutex_unlock(&drm->struct_mutex);
+ if (ret)
+ return ret;
- mutex_lock(&dev->struct_mutex);
+ return rockchip_drm_gem_object_mmap(obj, vma);
+}
- node = drm_vma_offset_exact_lookup(dev->vma_offset_manager,
- vma->vm_pgoff,
- vma_pages(vma));
- if (!node) {
- mutex_unlock(&dev->struct_mutex);
- DRM_ERROR("failed to find vma node.\n");
- return -EINVAL;
- } else if (!drm_vma_node_is_allowed(node, filp)) {
- mutex_unlock(&dev->struct_mutex);
- return -EACCES;
- }
+/* drm driver mmap file operations */
+int rockchip_gem_mmap(struct file *filp, struct vm_area_struct *vma)
+{
+ struct drm_gem_object *obj;
+ int ret;
- obj = container_of(node, struct drm_gem_object, vma_node);
- ret = rockchip_gem_mmap_buf(obj, vma);
+ ret = drm_gem_mmap(filp, vma);
+ if (ret)
+ return ret;
- mutex_unlock(&dev->struct_mutex);
+ obj = vma->vm_private_data;
- return ret;
+ return rockchip_drm_gem_object_mmap(obj, vma);
}
struct rockchip_gem_object *
@@ -199,13 +200,10 @@ int rockchip_gem_dumb_map_offset(struct drm_file *file_priv,
struct drm_gem_object *obj;
int ret;
- mutex_lock(&dev->struct_mutex);
-
obj = drm_gem_object_lookup(dev, file_priv, handle);
if (!obj) {
DRM_ERROR("failed to lookup gem object.\n");
- ret = -EINVAL;
- goto unlock;
+ return -EINVAL;
}
ret = drm_gem_create_mmap_offset(obj);
@@ -216,10 +214,9 @@ int rockchip_gem_dumb_map_offset(struct drm_file *file_priv,
DRM_DEBUG_KMS("offset = 0x%llx\n", *offset);
out:
- drm_gem_object_unreference(obj);
-unlock:
- mutex_unlock(&dev->struct_mutex);
- return ret;
+ drm_gem_object_unreference_unlocked(obj);
+
+ return 0;
}
/*
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
index dc65161d7cad..34b78e736532 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
@@ -170,6 +170,7 @@ struct vop_win_phy {
struct vop_reg enable;
struct vop_reg format;
+ struct vop_reg rb_swap;
struct vop_reg act_info;
struct vop_reg dsp_info;
struct vop_reg dsp_st;
@@ -199,8 +200,12 @@ struct vop_data {
static const uint32_t formats_01[] = {
DRM_FORMAT_XRGB8888,
DRM_FORMAT_ARGB8888,
+ DRM_FORMAT_XBGR8888,
+ DRM_FORMAT_ABGR8888,
DRM_FORMAT_RGB888,
+ DRM_FORMAT_BGR888,
DRM_FORMAT_RGB565,
+ DRM_FORMAT_BGR565,
DRM_FORMAT_NV12,
DRM_FORMAT_NV16,
DRM_FORMAT_NV24,
@@ -209,8 +214,12 @@ static const uint32_t formats_01[] = {
static const uint32_t formats_234[] = {
DRM_FORMAT_XRGB8888,
DRM_FORMAT_ARGB8888,
+ DRM_FORMAT_XBGR8888,
+ DRM_FORMAT_ABGR8888,
DRM_FORMAT_RGB888,
+ DRM_FORMAT_BGR888,
DRM_FORMAT_RGB565,
+ DRM_FORMAT_BGR565,
};
static const struct vop_win_phy win01_data = {
@@ -218,6 +227,7 @@ static const struct vop_win_phy win01_data = {
.nformats = ARRAY_SIZE(formats_01),
.enable = VOP_REG(WIN0_CTRL0, 0x1, 0),
.format = VOP_REG(WIN0_CTRL0, 0x7, 1),
+ .rb_swap = VOP_REG(WIN0_CTRL0, 0x1, 12),
.act_info = VOP_REG(WIN0_ACT_INFO, 0x1fff1fff, 0),
.dsp_info = VOP_REG(WIN0_DSP_INFO, 0x0fff0fff, 0),
.dsp_st = VOP_REG(WIN0_DSP_ST, 0x1fff1fff, 0),
@@ -234,6 +244,7 @@ static const struct vop_win_phy win23_data = {
.nformats = ARRAY_SIZE(formats_234),
.enable = VOP_REG(WIN2_CTRL0, 0x1, 0),
.format = VOP_REG(WIN2_CTRL0, 0x7, 1),
+ .rb_swap = VOP_REG(WIN2_CTRL0, 0x1, 12),
.dsp_info = VOP_REG(WIN2_DSP_INFO0, 0x0fff0fff, 0),
.dsp_st = VOP_REG(WIN2_DSP_ST0, 0x1fff1fff, 0),
.yrgb_mst = VOP_REG(WIN2_MST0, 0xffffffff, 0),
@@ -242,15 +253,6 @@ static const struct vop_win_phy win23_data = {
.dst_alpha_ctl = VOP_REG(WIN2_DST_ALPHA_CTRL, 0xff, 0),
};
-static const struct vop_win_phy cursor_data = {
- .data_formats = formats_234,
- .nformats = ARRAY_SIZE(formats_234),
- .enable = VOP_REG(HWC_CTRL0, 0x1, 0),
- .format = VOP_REG(HWC_CTRL0, 0x7, 1),
- .dsp_st = VOP_REG(HWC_DSP_ST, 0x1fff1fff, 0),
- .yrgb_mst = VOP_REG(HWC_MST, 0xffffffff, 0),
-};
-
static const struct vop_ctrl ctrl_data = {
.standby = VOP_REG(SYS_CTRL, 0x1, 22),
.gate_en = VOP_REG(SYS_CTRL, 0x1, 23),
@@ -282,14 +284,14 @@ static const struct vop_reg_data vop_init_reg_table[] = {
/*
* Note: rk3288 has a dedicated 'cursor' window, however, that window requires
* special support to get alpha blending working. For now, just use overlay
- * window 1 for the drm cursor.
+ * window 3 for the drm cursor.
+ *
*/
static const struct vop_win_data rk3288_vop_win_data[] = {
{ .base = 0x00, .phy = &win01_data, .type = DRM_PLANE_TYPE_PRIMARY },
- { .base = 0x40, .phy = &win01_data, .type = DRM_PLANE_TYPE_CURSOR },
+ { .base = 0x40, .phy = &win01_data, .type = DRM_PLANE_TYPE_OVERLAY },
{ .base = 0x00, .phy = &win23_data, .type = DRM_PLANE_TYPE_OVERLAY },
- { .base = 0x50, .phy = &win23_data, .type = DRM_PLANE_TYPE_OVERLAY },
- { .base = 0x00, .phy = &cursor_data, .type = DRM_PLANE_TYPE_OVERLAY },
+ { .base = 0x50, .phy = &win23_data, .type = DRM_PLANE_TYPE_CURSOR },
};
static const struct vop_data rk3288_vop = {
@@ -352,15 +354,32 @@ static inline void vop_mask_write_relaxed(struct vop *vop, uint32_t offset,
}
}
+static bool has_rb_swapped(uint32_t format)
+{
+ switch (format) {
+ case DRM_FORMAT_XBGR8888:
+ case DRM_FORMAT_ABGR8888:
+ case DRM_FORMAT_BGR888:
+ case DRM_FORMAT_BGR565:
+ return true;
+ default:
+ return false;
+ }
+}
+
static enum vop_data_format vop_convert_format(uint32_t format)
{
switch (format) {
case DRM_FORMAT_XRGB8888:
case DRM_FORMAT_ARGB8888:
+ case DRM_FORMAT_XBGR8888:
+ case DRM_FORMAT_ABGR8888:
return VOP_FMT_ARGB8888;
case DRM_FORMAT_RGB888:
+ case DRM_FORMAT_BGR888:
return VOP_FMT_RGB888;
case DRM_FORMAT_RGB565:
+ case DRM_FORMAT_BGR565:
return VOP_FMT_RGB565;
case DRM_FORMAT_NV12:
return VOP_FMT_YUV420SP;
@@ -378,6 +397,7 @@ static bool is_alpha_support(uint32_t format)
{
switch (format) {
case DRM_FORMAT_ARGB8888:
+ case DRM_FORMAT_ABGR8888:
return true;
default:
return false;
@@ -588,6 +608,7 @@ static int vop_update_plane_event(struct drm_plane *plane,
enum vop_data_format format;
uint32_t val;
bool is_alpha;
+ bool rb_swap;
bool visible;
int ret;
struct drm_rect dest = {
@@ -621,6 +642,7 @@ static int vop_update_plane_event(struct drm_plane *plane,
return 0;
is_alpha = is_alpha_support(fb->pixel_format);
+ rb_swap = has_rb_swapped(fb->pixel_format);
format = vop_convert_format(fb->pixel_format);
if (format < 0)
return format;
@@ -689,6 +711,7 @@ static int vop_update_plane_event(struct drm_plane *plane,
val = (dsp_sty - 1) << 16;
val |= (dsp_stx - 1) & 0xffff;
VOP_WIN_SET(vop, win, dsp_st, val);
+ VOP_WIN_SET(vop, win, rb_swap, rb_swap);
if (is_alpha) {
VOP_WIN_SET(vop, win, dst_alpha_ctl,
diff --git a/drivers/gpu/drm/shmobile/shmob_drm_crtc.c b/drivers/gpu/drm/shmobile/shmob_drm_crtc.c
index 859ccb658601..e9272b0a8592 100644
--- a/drivers/gpu/drm/shmobile/shmob_drm_crtc.c
+++ b/drivers/gpu/drm/shmobile/shmob_drm_crtc.c
@@ -248,7 +248,7 @@ static void shmob_drm_crtc_start(struct shmob_drm_crtc *scrtc)
lcdc_write(sdev, LDDDSR, value);
/* Setup planes. */
- drm_for_each_legacy_plane(plane, &dev->mode_config.plane_list) {
+ drm_for_each_legacy_plane(plane, dev) {
if (plane->crtc == crtc)
shmob_drm_plane_setup(plane);
}
diff --git a/drivers/gpu/drm/sti/Makefile b/drivers/gpu/drm/sti/Makefile
index f0f1e4ee2d92..e27490b492a5 100644
--- a/drivers/gpu/drm/sti/Makefile
+++ b/drivers/gpu/drm/sti/Makefile
@@ -1,12 +1,11 @@
sticompositor-y := \
- sti_layer.o \
sti_mixer.o \
sti_gdp.o \
sti_vid.o \
sti_cursor.o \
sti_compositor.o \
- sti_drm_crtc.o \
- sti_drm_plane.o
+ sti_crtc.o \
+ sti_plane.o
stihdmi-y := sti_hdmi.o \
sti_hdmi_tx3g0c55phy.o \
@@ -24,4 +23,4 @@ obj-$(CONFIG_DRM_STI) = \
sticompositor.o \
sti_hqvdp.o \
stidvo.o \
- sti_drm_drv.o
+ sti_drv.o
diff --git a/drivers/gpu/drm/sti/sti_compositor.c b/drivers/gpu/drm/sti/sti_compositor.c
index 43215d3020fb..c652627b1bca 100644
--- a/drivers/gpu/drm/sti/sti_compositor.c
+++ b/drivers/gpu/drm/sti/sti_compositor.c
@@ -14,10 +14,12 @@
#include <drm/drmP.h>
#include "sti_compositor.h"
-#include "sti_drm_crtc.h"
-#include "sti_drm_drv.h"
-#include "sti_drm_plane.h"
+#include "sti_crtc.h"
+#include "sti_cursor.h"
+#include "sti_drv.h"
#include "sti_gdp.h"
+#include "sti_plane.h"
+#include "sti_vid.h"
#include "sti_vtg.h"
/*
@@ -31,7 +33,7 @@ struct sti_compositor_data stih407_compositor_data = {
{STI_GPD_SUBDEV, (int)STI_GDP_1, 0x200},
{STI_GPD_SUBDEV, (int)STI_GDP_2, 0x300},
{STI_GPD_SUBDEV, (int)STI_GDP_3, 0x400},
- {STI_VID_SUBDEV, (int)STI_VID_0, 0x700},
+ {STI_VID_SUBDEV, (int)STI_HQVDP_0, 0x700},
{STI_MIXER_MAIN_SUBDEV, STI_MIXER_MAIN, 0xC00},
{STI_MIXER_AUX_SUBDEV, STI_MIXER_AUX, 0xD00},
},
@@ -53,14 +55,29 @@ struct sti_compositor_data stih416_compositor_data = {
},
};
-static int sti_compositor_init_subdev(struct sti_compositor *compo,
- struct sti_compositor_subdev_descriptor *desc,
- unsigned int array_size)
+static int sti_compositor_bind(struct device *dev,
+ struct device *master,
+ void *data)
{
- unsigned int i, mixer_id = 0, layer_id = 0;
+ struct sti_compositor *compo = dev_get_drvdata(dev);
+ struct drm_device *drm_dev = data;
+ unsigned int i, mixer_id = 0, vid_id = 0, crtc_id = 0;
+ struct sti_private *dev_priv = drm_dev->dev_private;
+ struct drm_plane *cursor = NULL;
+ struct drm_plane *primary = NULL;
+ struct sti_compositor_subdev_descriptor *desc = compo->data.subdev_desc;
+ unsigned int array_size = compo->data.nb_subdev;
+
+ dev_priv->compo = compo;
+ /* Register mixer subdev and video subdev first */
for (i = 0; i < array_size; i++) {
switch (desc[i].type) {
+ case STI_VID_SUBDEV:
+ compo->vid[vid_id++] =
+ sti_vid_create(compo->dev, desc[i].id,
+ compo->regs + desc[i].offset);
+ break;
case STI_MIXER_MAIN_SUBDEV:
case STI_MIXER_AUX_SUBDEV:
compo->mixer[mixer_id++] =
@@ -68,83 +85,68 @@ static int sti_compositor_init_subdev(struct sti_compositor *compo,
compo->regs + desc[i].offset);
break;
case STI_GPD_SUBDEV:
- case STI_VID_SUBDEV:
case STI_CURSOR_SUBDEV:
- compo->layer[layer_id++] =
- sti_layer_create(compo->dev, desc[i].id,
- compo->regs + desc[i].offset);
+ /* Nothing to do, wait for the second round */
break;
default:
DRM_ERROR("Unknow subdev compoment type\n");
return 1;
}
-
}
- compo->nb_mixers = mixer_id;
- compo->nb_layers = layer_id;
-
- return 0;
-}
-
-static int sti_compositor_bind(struct device *dev, struct device *master,
- void *data)
-{
- struct sti_compositor *compo = dev_get_drvdata(dev);
- struct drm_device *drm_dev = data;
- unsigned int i, crtc = 0, plane = 0;
- struct sti_drm_private *dev_priv = drm_dev->dev_private;
- struct drm_plane *cursor = NULL;
- struct drm_plane *primary = NULL;
- dev_priv->compo = compo;
-
- for (i = 0; i < compo->nb_layers; i++) {
- if (compo->layer[i]) {
- enum sti_layer_desc desc = compo->layer[i]->desc;
- enum sti_layer_type type = desc & STI_LAYER_TYPE_MASK;
- enum drm_plane_type plane_type = DRM_PLANE_TYPE_OVERLAY;
+ /* Register the other subdevs, create crtc and planes */
+ for (i = 0; i < array_size; i++) {
+ enum drm_plane_type plane_type = DRM_PLANE_TYPE_OVERLAY;
- if (crtc < compo->nb_mixers)
- plane_type = DRM_PLANE_TYPE_PRIMARY;
+ if (crtc_id < mixer_id)
+ plane_type = DRM_PLANE_TYPE_PRIMARY;
- switch (type) {
- case STI_CUR:
- cursor = sti_drm_plane_init(drm_dev,
- compo->layer[i],
- 1, DRM_PLANE_TYPE_CURSOR);
- break;
- case STI_GDP:
- case STI_VID:
- primary = sti_drm_plane_init(drm_dev,
- compo->layer[i],
- (1 << compo->nb_mixers) - 1,
- plane_type);
- plane++;
+ switch (desc[i].type) {
+ case STI_MIXER_MAIN_SUBDEV:
+ case STI_MIXER_AUX_SUBDEV:
+ case STI_VID_SUBDEV:
+ /* Nothing to do, already done at the first round */
+ break;
+ case STI_CURSOR_SUBDEV:
+ cursor = sti_cursor_create(drm_dev, compo->dev,
+ desc[i].id,
+ compo->regs + desc[i].offset,
+ 1);
+ if (!cursor) {
+ DRM_ERROR("Can't create CURSOR plane\n");
break;
- case STI_BCK:
- case STI_VDP:
+ }
+ break;
+ case STI_GPD_SUBDEV:
+ primary = sti_gdp_create(drm_dev, compo->dev,
+ desc[i].id,
+ compo->regs + desc[i].offset,
+ (1 << mixer_id) - 1,
+ plane_type);
+ if (!primary) {
+ DRM_ERROR("Can't create GDP plane\n");
break;
}
+ break;
+ default:
+ DRM_ERROR("Unknown subdev compoment type\n");
+ return 1;
+ }
- /* The first planes are reserved for primary planes*/
- if (crtc < compo->nb_mixers && primary) {
- sti_drm_crtc_init(drm_dev, compo->mixer[crtc],
- primary, cursor);
- crtc++;
- cursor = NULL;
- primary = NULL;
- }
+ /* The first planes are reserved for primary planes*/
+ if (crtc_id < mixer_id && primary) {
+ sti_crtc_init(drm_dev, compo->mixer[crtc_id],
+ primary, cursor);
+ crtc_id++;
+ cursor = NULL;
+ primary = NULL;
}
}
- drm_vblank_init(drm_dev, crtc);
+ drm_vblank_init(drm_dev, crtc_id);
/* Allow usage of vblank without having to call drm_irq_install */
drm_dev->irq_enabled = 1;
- DRM_DEBUG_DRIVER("Initialized %d DRM CRTC(s) and %d DRM plane(s)\n",
- crtc, plane);
- DRM_DEBUG_DRIVER("DRM plane(s) for VID/VDP not created yet\n");
-
return 0;
}
@@ -179,7 +181,6 @@ static int sti_compositor_probe(struct platform_device *pdev)
struct device_node *vtg_np;
struct sti_compositor *compo;
struct resource *res;
- int err;
compo = devm_kzalloc(dev, sizeof(*compo), GFP_KERNEL);
if (!compo) {
@@ -187,7 +188,7 @@ static int sti_compositor_probe(struct platform_device *pdev)
return -ENOMEM;
}
compo->dev = dev;
- compo->vtg_vblank_nb.notifier_call = sti_drm_crtc_vblank_cb;
+ compo->vtg_vblank_nb.notifier_call = sti_crtc_vblank_cb;
/* populate data structure depending on compatibility */
BUG_ON(!of_match_node(compositor_of_match, np)->data);
@@ -251,12 +252,6 @@ static int sti_compositor_probe(struct platform_device *pdev)
if (vtg_np)
compo->vtg_aux = of_vtg_find(vtg_np);
- /* Initialize compositor subdevices */
- err = sti_compositor_init_subdev(compo, compo->data.subdev_desc,
- compo->data.nb_subdev);
- if (err)
- return err;
-
platform_set_drvdata(pdev, compo);
return component_add(&pdev->dev, &sti_compositor_ops);
diff --git a/drivers/gpu/drm/sti/sti_compositor.h b/drivers/gpu/drm/sti/sti_compositor.h
index 019eb44c62cc..1a4a73dab11e 100644
--- a/drivers/gpu/drm/sti/sti_compositor.h
+++ b/drivers/gpu/drm/sti/sti_compositor.h
@@ -12,13 +12,13 @@
#include <linux/clk.h>
#include <linux/kernel.h>
-#include "sti_layer.h"
#include "sti_mixer.h"
+#include "sti_plane.h"
#define WAIT_NEXT_VSYNC_MS 50 /*ms*/
-#define STI_MAX_LAYER 8
#define STI_MAX_MIXER 2
+#define STI_MAX_VID 1
enum sti_compositor_subdev_type {
STI_MIXER_MAIN_SUBDEV,
@@ -59,11 +59,9 @@ struct sti_compositor_data {
* @rst_main: reset control of the main path
* @rst_aux: reset control of the aux path
* @mixer: array of mixers
+ * @vid: array of vids
* @vtg_main: vtg for main data path
* @vtg_aux: vtg for auxillary data path
- * @layer: array of layers
- * @nb_mixers: number of mixers for this compositor
- * @nb_layers: number of layers (GDP,VID,...) for this compositor
* @vtg_vblank_nb: callback for VTG VSYNC notification
*/
struct sti_compositor {
@@ -77,11 +75,9 @@ struct sti_compositor {
struct reset_control *rst_main;
struct reset_control *rst_aux;
struct sti_mixer *mixer[STI_MAX_MIXER];
+ struct sti_vid *vid[STI_MAX_VID];
struct sti_vtg *vtg_main;
struct sti_vtg *vtg_aux;
- struct sti_layer *layer[STI_MAX_LAYER];
- int nb_mixers;
- int nb_layers;
struct notifier_block vtg_vblank_nb;
};
diff --git a/drivers/gpu/drm/sti/sti_drm_crtc.c b/drivers/gpu/drm/sti/sti_crtc.c
index 6b641c5a2ec7..018ffc970e96 100644
--- a/drivers/gpu/drm/sti/sti_drm_crtc.c
+++ b/drivers/gpu/drm/sti/sti_crtc.c
@@ -15,22 +15,20 @@
#include <drm/drm_plane_helper.h>
#include "sti_compositor.h"
-#include "sti_drm_drv.h"
-#include "sti_drm_crtc.h"
+#include "sti_crtc.h"
+#include "sti_drv.h"
+#include "sti_vid.h"
#include "sti_vtg.h"
-static void sti_drm_crtc_dpms(struct drm_crtc *crtc, int mode)
-{
- DRM_DEBUG_KMS("\n");
-}
-
-static void sti_drm_crtc_prepare(struct drm_crtc *crtc)
+static void sti_crtc_enable(struct drm_crtc *crtc)
{
struct sti_mixer *mixer = to_sti_mixer(crtc);
struct device *dev = mixer->dev;
struct sti_compositor *compo = dev_get_drvdata(dev);
- mixer->enabled = true;
+ DRM_DEBUG_DRIVER("\n");
+
+ mixer->status = STI_MIXER_READY;
/* Prepare and enable the compo IP clock */
if (mixer->id == STI_MIXER_MAIN) {
@@ -41,45 +39,28 @@ static void sti_drm_crtc_prepare(struct drm_crtc *crtc)
DRM_INFO("Failed to prepare/enable compo_aux clk\n");
}
- sti_mixer_clear_all_layers(mixer);
+ drm_crtc_vblank_on(crtc);
}
-static void sti_drm_crtc_commit(struct drm_crtc *crtc)
+static void sti_crtc_disabling(struct drm_crtc *crtc)
{
struct sti_mixer *mixer = to_sti_mixer(crtc);
- struct device *dev = mixer->dev;
- struct sti_compositor *compo = dev_get_drvdata(dev);
- struct sti_layer *layer;
-
- if ((!mixer || !compo)) {
- DRM_ERROR("Can not find mixer or compositor)\n");
- return;
- }
- /* get GDP which is reserved to the CRTC FB */
- layer = to_sti_layer(crtc->primary);
- if (layer)
- sti_layer_commit(layer);
- else
- DRM_ERROR("Can not find CRTC dedicated plane (GDP0)\n");
-
- /* Enable layer on mixer */
- if (sti_mixer_set_layer_status(mixer, layer, true))
- DRM_ERROR("Can not enable layer at mixer\n");
+ DRM_DEBUG_DRIVER("\n");
- drm_crtc_vblank_on(crtc);
+ mixer->status = STI_MIXER_DISABLING;
}
-static bool sti_drm_crtc_mode_fixup(struct drm_crtc *crtc,
- const struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
+static bool sti_crtc_mode_fixup(struct drm_crtc *crtc,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
{
/* accept the provided drm_display_mode, do not fix it up */
return true;
}
static int
-sti_drm_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *mode)
+sti_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *mode)
{
struct sti_mixer *mixer = to_sti_mixer(crtc);
struct device *dev = mixer->dev;
@@ -122,22 +103,19 @@ sti_drm_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *mode)
res = sti_mixer_active_video_area(mixer, &crtc->mode);
if (res) {
- DRM_ERROR("Can not set active video area\n");
+ DRM_ERROR("Can't set active video area\n");
return -EINVAL;
}
return res;
}
-static void sti_drm_crtc_disable(struct drm_crtc *crtc)
+static void sti_crtc_disable(struct drm_crtc *crtc)
{
struct sti_mixer *mixer = to_sti_mixer(crtc);
struct device *dev = mixer->dev;
struct sti_compositor *compo = dev_get_drvdata(dev);
- if (!mixer->enabled)
- return;
-
DRM_DEBUG_KMS("CRTC:%d (%s)\n", crtc->base.id, sti_mixer_to_str(mixer));
/* Disable Background */
@@ -154,17 +132,18 @@ static void sti_drm_crtc_disable(struct drm_crtc *crtc)
clk_disable_unprepare(compo->clk_compo_aux);
}
- mixer->enabled = false;
+ mixer->status = STI_MIXER_DISABLED;
}
static void
-sti_drm_crtc_mode_set_nofb(struct drm_crtc *crtc)
+sti_crtc_mode_set_nofb(struct drm_crtc *crtc)
{
- sti_drm_crtc_prepare(crtc);
- sti_drm_crtc_mode_set(crtc, &crtc->state->adjusted_mode);
+ sti_crtc_enable(crtc);
+ sti_crtc_mode_set(crtc, &crtc->state->adjusted_mode);
}
-static void sti_drm_atomic_begin(struct drm_crtc *crtc)
+static void sti_crtc_atomic_begin(struct drm_crtc *crtc,
+ struct drm_crtc_state *old_crtc_state)
{
struct sti_mixer *mixer = to_sti_mixer(crtc);
@@ -178,46 +157,109 @@ static void sti_drm_atomic_begin(struct drm_crtc *crtc)
}
}
-static void sti_drm_atomic_flush(struct drm_crtc *crtc)
+static void sti_crtc_atomic_flush(struct drm_crtc *crtc,
+ struct drm_crtc_state *old_crtc_state)
{
+ struct drm_device *drm_dev = crtc->dev;
+ struct sti_mixer *mixer = to_sti_mixer(crtc);
+ struct sti_compositor *compo = dev_get_drvdata(mixer->dev);
+ struct drm_plane *p;
+
+ DRM_DEBUG_DRIVER("\n");
+
+ /* perform plane actions */
+ list_for_each_entry(p, &drm_dev->mode_config.plane_list, head) {
+ struct sti_plane *plane = to_sti_plane(p);
+
+ switch (plane->status) {
+ case STI_PLANE_UPDATED:
+ /* update planes tag as updated */
+ DRM_DEBUG_DRIVER("update plane %s\n",
+ sti_plane_to_str(plane));
+
+ if (sti_mixer_set_plane_depth(mixer, plane)) {
+ DRM_ERROR("Cannot set plane %s depth\n",
+ sti_plane_to_str(plane));
+ break;
+ }
+
+ if (sti_mixer_set_plane_status(mixer, plane, true)) {
+ DRM_ERROR("Cannot enable plane %s at mixer\n",
+ sti_plane_to_str(plane));
+ break;
+ }
+
+ /* if plane is HQVDP_0 then commit the vid[0] */
+ if (plane->desc == STI_HQVDP_0)
+ sti_vid_commit(compo->vid[0], p->state);
+
+ plane->status = STI_PLANE_READY;
+
+ break;
+ case STI_PLANE_DISABLING:
+ /* disabling sequence for planes tag as disabling */
+ DRM_DEBUG_DRIVER("disable plane %s from mixer\n",
+ sti_plane_to_str(plane));
+
+ if (sti_mixer_set_plane_status(mixer, plane, false)) {
+ DRM_ERROR("Cannot disable plane %s at mixer\n",
+ sti_plane_to_str(plane));
+ continue;
+ }
+
+ if (plane->desc == STI_CURSOR)
+ /* tag plane status for disabled */
+ plane->status = STI_PLANE_DISABLED;
+ else
+ /* tag plane status for flushing */
+ plane->status = STI_PLANE_FLUSHING;
+
+ /* if plane is HQVDP_0 then disable the vid[0] */
+ if (plane->desc == STI_HQVDP_0)
+ sti_vid_disable(compo->vid[0]);
+
+ break;
+ default:
+ /* Other status case are not handled */
+ break;
+ }
+ }
}
static struct drm_crtc_helper_funcs sti_crtc_helper_funcs = {
- .dpms = sti_drm_crtc_dpms,
- .prepare = sti_drm_crtc_prepare,
- .commit = sti_drm_crtc_commit,
- .mode_fixup = sti_drm_crtc_mode_fixup,
+ .enable = sti_crtc_enable,
+ .disable = sti_crtc_disabling,
+ .mode_fixup = sti_crtc_mode_fixup,
.mode_set = drm_helper_crtc_mode_set,
- .mode_set_nofb = sti_drm_crtc_mode_set_nofb,
+ .mode_set_nofb = sti_crtc_mode_set_nofb,
.mode_set_base = drm_helper_crtc_mode_set_base,
- .disable = sti_drm_crtc_disable,
- .atomic_begin = sti_drm_atomic_begin,
- .atomic_flush = sti_drm_atomic_flush,
+ .atomic_begin = sti_crtc_atomic_begin,
+ .atomic_flush = sti_crtc_atomic_flush,
};
-static void sti_drm_crtc_destroy(struct drm_crtc *crtc)
+static void sti_crtc_destroy(struct drm_crtc *crtc)
{
DRM_DEBUG_KMS("\n");
drm_crtc_cleanup(crtc);
}
-static int sti_drm_crtc_set_property(struct drm_crtc *crtc,
- struct drm_property *property,
- uint64_t val)
+static int sti_crtc_set_property(struct drm_crtc *crtc,
+ struct drm_property *property,
+ uint64_t val)
{
DRM_DEBUG_KMS("\n");
return 0;
}
-int sti_drm_crtc_vblank_cb(struct notifier_block *nb,
- unsigned long event, void *data)
+int sti_crtc_vblank_cb(struct notifier_block *nb,
+ unsigned long event, void *data)
{
struct drm_device *drm_dev;
struct sti_compositor *compo =
container_of(nb, struct sti_compositor, vtg_vblank_nb);
int *crtc = data;
unsigned long flags;
- struct sti_drm_private *priv;
+ struct sti_private *priv;
drm_dev = compo->mixer[*crtc]->drm_crtc.dev;
priv = drm_dev->dev_private;
@@ -233,21 +275,38 @@ int sti_drm_crtc_vblank_cb(struct notifier_block *nb,
spin_lock_irqsave(&drm_dev->event_lock, flags);
if (compo->mixer[*crtc]->pending_event) {
drm_send_vblank_event(drm_dev, -1,
- compo->mixer[*crtc]->pending_event);
+ compo->mixer[*crtc]->pending_event);
drm_vblank_put(drm_dev, *crtc);
compo->mixer[*crtc]->pending_event = NULL;
}
spin_unlock_irqrestore(&drm_dev->event_lock, flags);
+ if (compo->mixer[*crtc]->status == STI_MIXER_DISABLING) {
+ struct drm_plane *p;
+
+ /* Disable mixer only if all overlay planes (GDP and VDP)
+ * are disabled */
+ list_for_each_entry(p, &drm_dev->mode_config.plane_list, head) {
+ struct sti_plane *plane = to_sti_plane(p);
+
+ if ((plane->desc & STI_PLANE_TYPE_MASK) <= STI_VDP)
+ if (plane->status != STI_PLANE_DISABLED)
+ return 0;
+ }
+ sti_crtc_disable(&compo->mixer[*crtc]->drm_crtc);
+ }
+
return 0;
}
-int sti_drm_crtc_enable_vblank(struct drm_device *dev, int crtc)
+int sti_crtc_enable_vblank(struct drm_device *dev, int crtc)
{
- struct sti_drm_private *dev_priv = dev->dev_private;
+ struct sti_private *dev_priv = dev->dev_private;
struct sti_compositor *compo = dev_priv->compo;
struct notifier_block *vtg_vblank_nb = &compo->vtg_vblank_nb;
+ DRM_DEBUG_DRIVER("\n");
+
if (sti_vtg_register_client(crtc == STI_MIXER_MAIN ?
compo->vtg_main : compo->vtg_aux,
vtg_vblank_nb, crtc)) {
@@ -257,11 +316,11 @@ int sti_drm_crtc_enable_vblank(struct drm_device *dev, int crtc)
return 0;
}
-EXPORT_SYMBOL(sti_drm_crtc_enable_vblank);
+EXPORT_SYMBOL(sti_crtc_enable_vblank);
-void sti_drm_crtc_disable_vblank(struct drm_device *dev, int crtc)
+void sti_crtc_disable_vblank(struct drm_device *drm_dev, int crtc)
{
- struct sti_drm_private *priv = dev->dev_private;
+ struct sti_private *priv = drm_dev->dev_private;
struct sti_compositor *compo = priv->compo;
struct notifier_block *vtg_vblank_nb = &compo->vtg_vblank_nb;
@@ -273,23 +332,23 @@ void sti_drm_crtc_disable_vblank(struct drm_device *dev, int crtc)
/* free the resources of the pending requests */
if (compo->mixer[crtc]->pending_event) {
- drm_vblank_put(dev, crtc);
+ drm_vblank_put(drm_dev, crtc);
compo->mixer[crtc]->pending_event = NULL;
}
}
-EXPORT_SYMBOL(sti_drm_crtc_disable_vblank);
+EXPORT_SYMBOL(sti_crtc_disable_vblank);
static struct drm_crtc_funcs sti_crtc_funcs = {
.set_config = drm_atomic_helper_set_config,
.page_flip = drm_atomic_helper_page_flip,
- .destroy = sti_drm_crtc_destroy,
- .set_property = sti_drm_crtc_set_property,
+ .destroy = sti_crtc_destroy,
+ .set_property = sti_crtc_set_property,
.reset = drm_atomic_helper_crtc_reset,
.atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_crtc_destroy_state,
};
-bool sti_drm_crtc_is_main(struct drm_crtc *crtc)
+bool sti_crtc_is_main(struct drm_crtc *crtc)
{
struct sti_mixer *mixer = to_sti_mixer(crtc);
@@ -298,18 +357,18 @@ bool sti_drm_crtc_is_main(struct drm_crtc *crtc)
return false;
}
-EXPORT_SYMBOL(sti_drm_crtc_is_main);
+EXPORT_SYMBOL(sti_crtc_is_main);
-int sti_drm_crtc_init(struct drm_device *drm_dev, struct sti_mixer *mixer,
- struct drm_plane *primary, struct drm_plane *cursor)
+int sti_crtc_init(struct drm_device *drm_dev, struct sti_mixer *mixer,
+ struct drm_plane *primary, struct drm_plane *cursor)
{
struct drm_crtc *crtc = &mixer->drm_crtc;
int res;
res = drm_crtc_init_with_planes(drm_dev, crtc, primary, cursor,
- &sti_crtc_funcs);
+ &sti_crtc_funcs);
if (res) {
- DRM_ERROR("Can not initialze CRTC\n");
+ DRM_ERROR("Can't initialze CRTC\n");
return -EINVAL;
}
diff --git a/drivers/gpu/drm/sti/sti_crtc.h b/drivers/gpu/drm/sti/sti_crtc.h
new file mode 100644
index 000000000000..51963e6ddbe7
--- /dev/null
+++ b/drivers/gpu/drm/sti/sti_crtc.h
@@ -0,0 +1,22 @@
+/*
+ * Copyright (C) STMicroelectronics SA 2014
+ * Author: Benjamin Gaignard <benjamin.gaignard@st.com> for STMicroelectronics.
+ * License terms: GNU General Public License (GPL), version 2
+ */
+
+#ifndef _STI_CRTC_H_
+#define _STI_CRTC_H_
+
+#include <drm/drmP.h>
+
+struct sti_mixer;
+
+int sti_crtc_init(struct drm_device *drm_dev, struct sti_mixer *mixer,
+ struct drm_plane *primary, struct drm_plane *cursor);
+int sti_crtc_enable_vblank(struct drm_device *dev, int crtc);
+void sti_crtc_disable_vblank(struct drm_device *dev, int crtc);
+int sti_crtc_vblank_cb(struct notifier_block *nb,
+ unsigned long event, void *data);
+bool sti_crtc_is_main(struct drm_crtc *drm_crtc);
+
+#endif
diff --git a/drivers/gpu/drm/sti/sti_cursor.c b/drivers/gpu/drm/sti/sti_cursor.c
index 010eaee60bf7..dd1032195051 100644
--- a/drivers/gpu/drm/sti/sti_cursor.c
+++ b/drivers/gpu/drm/sti/sti_cursor.c
@@ -7,8 +7,14 @@
*/
#include <drm/drmP.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_plane_helper.h>
+
+#include "sti_compositor.h"
#include "sti_cursor.h"
-#include "sti_layer.h"
+#include "sti_plane.h"
#include "sti_vtg.h"
/* Registers */
@@ -42,15 +48,19 @@ struct dma_pixmap {
/**
* STI Cursor structure
*
- * @layer: layer structure
- * @width: cursor width
- * @height: cursor height
- * @clut: color look up table
- * @clut_paddr: color look up table physical address
- * @pixmap: pixmap dma buffer (clut8-format cursor)
+ * @sti_plane: sti_plane structure
+ * @dev: driver device
+ * @regs: cursor registers
+ * @width: cursor width
+ * @height: cursor height
+ * @clut: color look up table
+ * @clut_paddr: color look up table physical address
+ * @pixmap: pixmap dma buffer (clut8-format cursor)
*/
struct sti_cursor {
- struct sti_layer layer;
+ struct sti_plane plane;
+ struct device *dev;
+ void __iomem *regs;
unsigned int width;
unsigned int height;
unsigned short *clut;
@@ -62,22 +72,10 @@ static const uint32_t cursor_supported_formats[] = {
DRM_FORMAT_ARGB8888,
};
-#define to_sti_cursor(x) container_of(x, struct sti_cursor, layer)
-
-static const uint32_t *sti_cursor_get_formats(struct sti_layer *layer)
-{
- return cursor_supported_formats;
-}
-
-static unsigned int sti_cursor_get_nb_formats(struct sti_layer *layer)
-{
- return ARRAY_SIZE(cursor_supported_formats);
-}
+#define to_sti_cursor(x) container_of(x, struct sti_cursor, plane)
-static void sti_cursor_argb8888_to_clut8(struct sti_layer *layer)
+static void sti_cursor_argb8888_to_clut8(struct sti_cursor *cursor, u32 *src)
{
- struct sti_cursor *cursor = to_sti_cursor(layer);
- u32 *src = layer->vaddr;
u8 *dst = cursor->pixmap.base;
unsigned int i, j;
u32 a, r, g, b;
@@ -96,127 +94,155 @@ static void sti_cursor_argb8888_to_clut8(struct sti_layer *layer)
}
}
-static int sti_cursor_prepare_layer(struct sti_layer *layer, bool first_prepare)
+static void sti_cursor_init(struct sti_cursor *cursor)
{
- struct sti_cursor *cursor = to_sti_cursor(layer);
- struct drm_display_mode *mode = layer->mode;
+ unsigned short *base = cursor->clut;
+ unsigned int a, r, g, b;
+
+ /* Assign CLUT values, ARGB444 format */
+ for (a = 0; a < 4; a++)
+ for (r = 0; r < 4; r++)
+ for (g = 0; g < 4; g++)
+ for (b = 0; b < 4; b++)
+ *base++ = (a * 5) << 12 |
+ (r * 5) << 8 |
+ (g * 5) << 4 |
+ (b * 5);
+}
+
+static void sti_cursor_atomic_update(struct drm_plane *drm_plane,
+ struct drm_plane_state *oldstate)
+{
+ struct drm_plane_state *state = drm_plane->state;
+ struct sti_plane *plane = to_sti_plane(drm_plane);
+ struct sti_cursor *cursor = to_sti_cursor(plane);
+ struct drm_crtc *crtc = state->crtc;
+ struct sti_mixer *mixer = to_sti_mixer(crtc);
+ struct drm_framebuffer *fb = state->fb;
+ struct drm_display_mode *mode = &crtc->mode;
+ int dst_x = state->crtc_x;
+ int dst_y = state->crtc_y;
+ int dst_w = clamp_val(state->crtc_w, 0, mode->crtc_hdisplay - dst_x);
+ int dst_h = clamp_val(state->crtc_h, 0, mode->crtc_vdisplay - dst_y);
+ /* src_x are in 16.16 format */
+ int src_w = state->src_w >> 16;
+ int src_h = state->src_h >> 16;
+ bool first_prepare = plane->status == STI_PLANE_DISABLED ? true : false;
+ struct drm_gem_cma_object *cma_obj;
u32 y, x;
u32 val;
- DRM_DEBUG_DRIVER("\n");
+ DRM_DEBUG_KMS("CRTC:%d (%s) drm plane:%d (%s)\n",
+ crtc->base.id, sti_mixer_to_str(mixer),
+ drm_plane->base.id, sti_plane_to_str(plane));
+ DRM_DEBUG_KMS("(%dx%d)@(%d,%d)\n", dst_w, dst_h, dst_x, dst_y);
- dev_dbg(layer->dev, "%s %s\n", __func__, sti_layer_to_str(layer));
+ dev_dbg(cursor->dev, "%s %s\n", __func__,
+ sti_plane_to_str(plane));
- if (layer->src_w < STI_CURS_MIN_SIZE ||
- layer->src_h < STI_CURS_MIN_SIZE ||
- layer->src_w > STI_CURS_MAX_SIZE ||
- layer->src_h > STI_CURS_MAX_SIZE) {
+ if (src_w < STI_CURS_MIN_SIZE ||
+ src_h < STI_CURS_MIN_SIZE ||
+ src_w > STI_CURS_MAX_SIZE ||
+ src_h > STI_CURS_MAX_SIZE) {
DRM_ERROR("Invalid cursor size (%dx%d)\n",
- layer->src_w, layer->src_h);
- return -EINVAL;
+ src_w, src_h);
+ return;
}
/* If the cursor size has changed, re-allocated the pixmap */
if (!cursor->pixmap.base ||
- (cursor->width != layer->src_w) ||
- (cursor->height != layer->src_h)) {
- cursor->width = layer->src_w;
- cursor->height = layer->src_h;
+ (cursor->width != src_w) ||
+ (cursor->height != src_h)) {
+ cursor->width = src_w;
+ cursor->height = src_h;
if (cursor->pixmap.base)
- dma_free_writecombine(layer->dev,
+ dma_free_writecombine(cursor->dev,
cursor->pixmap.size,
cursor->pixmap.base,
cursor->pixmap.paddr);
cursor->pixmap.size = cursor->width * cursor->height;
- cursor->pixmap.base = dma_alloc_writecombine(layer->dev,
+ cursor->pixmap.base = dma_alloc_writecombine(cursor->dev,
cursor->pixmap.size,
&cursor->pixmap.paddr,
GFP_KERNEL | GFP_DMA);
if (!cursor->pixmap.base) {
DRM_ERROR("Failed to allocate memory for pixmap\n");
- return -ENOMEM;
+ return;
}
}
+ cma_obj = drm_fb_cma_get_gem_obj(fb, 0);
+ if (!cma_obj) {
+ DRM_ERROR("Can't get CMA GEM object for fb\n");
+ return;
+ }
+
/* Convert ARGB8888 to CLUT8 */
- sti_cursor_argb8888_to_clut8(layer);
+ sti_cursor_argb8888_to_clut8(cursor, (u32 *)cma_obj->vaddr);
/* AWS and AWE depend on the mode */
y = sti_vtg_get_line_number(*mode, 0);
x = sti_vtg_get_pixel_number(*mode, 0);
val = y << 16 | x;
- writel(val, layer->regs + CUR_AWS);
+ writel(val, cursor->regs + CUR_AWS);
y = sti_vtg_get_line_number(*mode, mode->vdisplay - 1);
x = sti_vtg_get_pixel_number(*mode, mode->hdisplay - 1);
val = y << 16 | x;
- writel(val, layer->regs + CUR_AWE);
+ writel(val, cursor->regs + CUR_AWE);
if (first_prepare) {
/* Set and fetch CLUT */
- writel(cursor->clut_paddr, layer->regs + CUR_CML);
- writel(CUR_CTL_CLUT_UPDATE, layer->regs + CUR_CTL);
+ writel(cursor->clut_paddr, cursor->regs + CUR_CML);
+ writel(CUR_CTL_CLUT_UPDATE, cursor->regs + CUR_CTL);
}
- return 0;
-}
-
-static int sti_cursor_commit_layer(struct sti_layer *layer)
-{
- struct sti_cursor *cursor = to_sti_cursor(layer);
- struct drm_display_mode *mode = layer->mode;
- u32 ydo, xdo;
-
- dev_dbg(layer->dev, "%s %s\n", __func__, sti_layer_to_str(layer));
-
/* Set memory location, size, and position */
- writel(cursor->pixmap.paddr, layer->regs + CUR_PML);
- writel(cursor->width, layer->regs + CUR_PMP);
- writel(cursor->height << 16 | cursor->width, layer->regs + CUR_SIZE);
+ writel(cursor->pixmap.paddr, cursor->regs + CUR_PML);
+ writel(cursor->width, cursor->regs + CUR_PMP);
+ writel(cursor->height << 16 | cursor->width, cursor->regs + CUR_SIZE);
- ydo = sti_vtg_get_line_number(*mode, layer->dst_y);
- xdo = sti_vtg_get_pixel_number(*mode, layer->dst_y);
- writel((ydo << 16) | xdo, layer->regs + CUR_VPO);
+ y = sti_vtg_get_line_number(*mode, dst_y);
+ x = sti_vtg_get_pixel_number(*mode, dst_y);
+ writel((y << 16) | x, cursor->regs + CUR_VPO);
- return 0;
+ plane->status = STI_PLANE_UPDATED;
}
-static int sti_cursor_disable_layer(struct sti_layer *layer)
+static void sti_cursor_atomic_disable(struct drm_plane *drm_plane,
+ struct drm_plane_state *oldstate)
{
- return 0;
-}
+ struct sti_plane *plane = to_sti_plane(drm_plane);
+ struct sti_mixer *mixer = to_sti_mixer(drm_plane->crtc);
-static void sti_cursor_init(struct sti_layer *layer)
-{
- struct sti_cursor *cursor = to_sti_cursor(layer);
- unsigned short *base = cursor->clut;
- unsigned int a, r, g, b;
+ if (!drm_plane->crtc) {
+ DRM_DEBUG_DRIVER("drm plane:%d not enabled\n",
+ drm_plane->base.id);
+ return;
+ }
- /* Assign CLUT values, ARGB444 format */
- for (a = 0; a < 4; a++)
- for (r = 0; r < 4; r++)
- for (g = 0; g < 4; g++)
- for (b = 0; b < 4; b++)
- *base++ = (a * 5) << 12 |
- (r * 5) << 8 |
- (g * 5) << 4 |
- (b * 5);
+ DRM_DEBUG_DRIVER("CRTC:%d (%s) drm plane:%d (%s)\n",
+ drm_plane->crtc->base.id, sti_mixer_to_str(mixer),
+ drm_plane->base.id, sti_plane_to_str(plane));
+
+ plane->status = STI_PLANE_DISABLING;
}
-static const struct sti_layer_funcs cursor_ops = {
- .get_formats = sti_cursor_get_formats,
- .get_nb_formats = sti_cursor_get_nb_formats,
- .init = sti_cursor_init,
- .prepare = sti_cursor_prepare_layer,
- .commit = sti_cursor_commit_layer,
- .disable = sti_cursor_disable_layer,
+static const struct drm_plane_helper_funcs sti_cursor_helpers_funcs = {
+ .atomic_update = sti_cursor_atomic_update,
+ .atomic_disable = sti_cursor_atomic_disable,
};
-struct sti_layer *sti_cursor_create(struct device *dev)
+struct drm_plane *sti_cursor_create(struct drm_device *drm_dev,
+ struct device *dev, int desc,
+ void __iomem *baseaddr,
+ unsigned int possible_crtcs)
{
struct sti_cursor *cursor;
+ size_t size;
+ int res;
cursor = devm_kzalloc(dev, sizeof(*cursor), GFP_KERNEL);
if (!cursor) {
@@ -225,18 +251,43 @@ struct sti_layer *sti_cursor_create(struct device *dev)
}
/* Allocate clut buffer */
- cursor->clut = dma_alloc_writecombine(dev,
- 0x100 * sizeof(unsigned short),
- &cursor->clut_paddr,
- GFP_KERNEL | GFP_DMA);
+ size = 0x100 * sizeof(unsigned short);
+ cursor->clut = dma_alloc_writecombine(dev, size, &cursor->clut_paddr,
+ GFP_KERNEL | GFP_DMA);
if (!cursor->clut) {
DRM_ERROR("Failed to allocate memory for cursor clut\n");
- devm_kfree(dev, cursor);
- return NULL;
+ goto err_clut;
+ }
+
+ cursor->dev = dev;
+ cursor->regs = baseaddr;
+ cursor->plane.desc = desc;
+ cursor->plane.status = STI_PLANE_DISABLED;
+
+ sti_cursor_init(cursor);
+
+ res = drm_universal_plane_init(drm_dev, &cursor->plane.drm_plane,
+ possible_crtcs,
+ &sti_plane_helpers_funcs,
+ cursor_supported_formats,
+ ARRAY_SIZE(cursor_supported_formats),
+ DRM_PLANE_TYPE_CURSOR);
+ if (res) {
+ DRM_ERROR("Failed to initialize universal plane\n");
+ goto err_plane;
}
- cursor->layer.ops = &cursor_ops;
+ drm_plane_helper_add(&cursor->plane.drm_plane,
+ &sti_cursor_helpers_funcs);
+
+ sti_plane_init_property(&cursor->plane, DRM_PLANE_TYPE_CURSOR);
+
+ return &cursor->plane.drm_plane;
- return (struct sti_layer *)cursor;
+err_plane:
+ dma_free_writecombine(dev, size, cursor->clut, cursor->clut_paddr);
+err_clut:
+ devm_kfree(dev, cursor);
+ return NULL;
}
diff --git a/drivers/gpu/drm/sti/sti_cursor.h b/drivers/gpu/drm/sti/sti_cursor.h
index 3c9827404f27..2ee5c10e8b33 100644
--- a/drivers/gpu/drm/sti/sti_cursor.h
+++ b/drivers/gpu/drm/sti/sti_cursor.h
@@ -7,6 +7,9 @@
#ifndef _STI_CURSOR_H_
#define _STI_CURSOR_H_
-struct sti_layer *sti_cursor_create(struct device *dev);
+struct drm_plane *sti_cursor_create(struct drm_device *drm_dev,
+ struct device *dev, int desc,
+ void __iomem *baseaddr,
+ unsigned int possible_crtcs);
#endif
diff --git a/drivers/gpu/drm/sti/sti_drm_crtc.h b/drivers/gpu/drm/sti/sti_drm_crtc.h
deleted file mode 100644
index caca8b14f017..000000000000
--- a/drivers/gpu/drm/sti/sti_drm_crtc.h
+++ /dev/null
@@ -1,22 +0,0 @@
-/*
- * Copyright (C) STMicroelectronics SA 2014
- * Author: Benjamin Gaignard <benjamin.gaignard@st.com> for STMicroelectronics.
- * License terms: GNU General Public License (GPL), version 2
- */
-
-#ifndef _STI_DRM_CRTC_H_
-#define _STI_DRM_CRTC_H_
-
-#include <drm/drmP.h>
-
-struct sti_mixer;
-
-int sti_drm_crtc_init(struct drm_device *drm_dev, struct sti_mixer *mixer,
- struct drm_plane *primary, struct drm_plane *cursor);
-int sti_drm_crtc_enable_vblank(struct drm_device *dev, int crtc);
-void sti_drm_crtc_disable_vblank(struct drm_device *dev, int crtc);
-int sti_drm_crtc_vblank_cb(struct notifier_block *nb,
- unsigned long event, void *data);
-bool sti_drm_crtc_is_main(struct drm_crtc *drm_crtc);
-
-#endif
diff --git a/drivers/gpu/drm/sti/sti_drm_plane.c b/drivers/gpu/drm/sti/sti_drm_plane.c
deleted file mode 100644
index 64d4ed43dda3..000000000000
--- a/drivers/gpu/drm/sti/sti_drm_plane.c
+++ /dev/null
@@ -1,251 +0,0 @@
-/*
- * Copyright (C) STMicroelectronics SA 2014
- * Authors: Benjamin Gaignard <benjamin.gaignard@st.com>
- * Fabien Dessenne <fabien.dessenne@st.com>
- * for STMicroelectronics.
- * License terms: GNU General Public License (GPL), version 2
- */
-
-#include <drm/drmP.h>
-#include <drm/drm_atomic_helper.h>
-#include <drm/drm_plane_helper.h>
-
-#include "sti_compositor.h"
-#include "sti_drm_drv.h"
-#include "sti_drm_plane.h"
-#include "sti_vtg.h"
-
-enum sti_layer_desc sti_layer_default_zorder[] = {
- STI_GDP_0,
- STI_VID_0,
- STI_GDP_1,
- STI_VID_1,
- STI_GDP_2,
- STI_GDP_3,
-};
-
-/* (Background) < GDP0 < VID0 < GDP1 < VID1 < GDP2 < GDP3 < (ForeGround) */
-
-static int
-sti_drm_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
- struct drm_framebuffer *fb, int crtc_x, int crtc_y,
- unsigned int crtc_w, unsigned int crtc_h,
- uint32_t src_x, uint32_t src_y,
- uint32_t src_w, uint32_t src_h)
-{
- struct sti_layer *layer = to_sti_layer(plane);
- struct sti_mixer *mixer = to_sti_mixer(crtc);
- int res;
-
- DRM_DEBUG_KMS("CRTC:%d (%s) drm plane:%d (%s)\n",
- crtc->base.id, sti_mixer_to_str(mixer),
- plane->base.id, sti_layer_to_str(layer));
- DRM_DEBUG_KMS("(%dx%d)@(%d,%d)\n", crtc_w, crtc_h, crtc_x, crtc_y);
-
- res = sti_mixer_set_layer_depth(mixer, layer);
- if (res) {
- DRM_ERROR("Can not set layer depth\n");
- return res;
- }
-
- /* src_x are in 16.16 format. */
- res = sti_layer_prepare(layer, crtc, fb,
- &crtc->mode, mixer->id,
- crtc_x, crtc_y, crtc_w, crtc_h,
- src_x >> 16, src_y >> 16,
- src_w >> 16, src_h >> 16);
- if (res) {
- DRM_ERROR("Layer prepare failed\n");
- return res;
- }
-
- res = sti_layer_commit(layer);
- if (res) {
- DRM_ERROR("Layer commit failed\n");
- return res;
- }
-
- res = sti_mixer_set_layer_status(mixer, layer, true);
- if (res) {
- DRM_ERROR("Can not enable layer at mixer\n");
- return res;
- }
-
- return 0;
-}
-
-static int sti_drm_disable_plane(struct drm_plane *plane)
-{
- struct sti_layer *layer;
- struct sti_mixer *mixer;
- int lay_res, mix_res;
-
- if (!plane->crtc) {
- DRM_DEBUG_DRIVER("drm plane:%d not enabled\n", plane->base.id);
- return 0;
- }
- layer = to_sti_layer(plane);
- mixer = to_sti_mixer(plane->crtc);
-
- DRM_DEBUG_DRIVER("CRTC:%d (%s) drm plane:%d (%s)\n",
- plane->crtc->base.id, sti_mixer_to_str(mixer),
- plane->base.id, sti_layer_to_str(layer));
-
- /* Disable layer at mixer level */
- mix_res = sti_mixer_set_layer_status(mixer, layer, false);
- if (mix_res)
- DRM_ERROR("Can not disable layer at mixer\n");
-
- /* Wait a while to be sure that a Vsync event is received */
- msleep(WAIT_NEXT_VSYNC_MS);
-
- /* Then disable layer itself */
- lay_res = sti_layer_disable(layer);
- if (lay_res)
- DRM_ERROR("Layer disable failed\n");
-
- if (lay_res || mix_res)
- return -EINVAL;
-
- return 0;
-}
-
-static void sti_drm_plane_destroy(struct drm_plane *plane)
-{
- DRM_DEBUG_DRIVER("\n");
-
- drm_plane_helper_disable(plane);
- drm_plane_cleanup(plane);
-}
-
-static int sti_drm_plane_set_property(struct drm_plane *plane,
- struct drm_property *property,
- uint64_t val)
-{
- struct drm_device *dev = plane->dev;
- struct sti_drm_private *private = dev->dev_private;
- struct sti_layer *layer = to_sti_layer(plane);
-
- DRM_DEBUG_DRIVER("\n");
-
- if (property == private->plane_zorder_property) {
- layer->zorder = val;
- return 0;
- }
-
- return -EINVAL;
-}
-
-static struct drm_plane_funcs sti_drm_plane_funcs = {
- .update_plane = drm_atomic_helper_update_plane,
- .disable_plane = drm_atomic_helper_disable_plane,
- .destroy = sti_drm_plane_destroy,
- .set_property = sti_drm_plane_set_property,
- .reset = drm_atomic_helper_plane_reset,
- .atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state,
- .atomic_destroy_state = drm_atomic_helper_plane_destroy_state,
-};
-
-static int sti_drm_plane_prepare_fb(struct drm_plane *plane,
- struct drm_framebuffer *fb,
- const struct drm_plane_state *new_state)
-{
- return 0;
-}
-
-static void sti_drm_plane_cleanup_fb(struct drm_plane *plane,
- struct drm_framebuffer *fb,
- const struct drm_plane_state *old_fb)
-{
-}
-
-static int sti_drm_plane_atomic_check(struct drm_plane *plane,
- struct drm_plane_state *state)
-{
- return 0;
-}
-
-static void sti_drm_plane_atomic_update(struct drm_plane *plane,
- struct drm_plane_state *oldstate)
-{
- struct drm_plane_state *state = plane->state;
-
- sti_drm_update_plane(plane, state->crtc, state->fb,
- state->crtc_x, state->crtc_y,
- state->crtc_w, state->crtc_h,
- state->src_x, state->src_y,
- state->src_w, state->src_h);
-}
-
-static void sti_drm_plane_atomic_disable(struct drm_plane *plane,
- struct drm_plane_state *oldstate)
-{
- sti_drm_disable_plane(plane);
-}
-
-static const struct drm_plane_helper_funcs sti_drm_plane_helpers_funcs = {
- .prepare_fb = sti_drm_plane_prepare_fb,
- .cleanup_fb = sti_drm_plane_cleanup_fb,
- .atomic_check = sti_drm_plane_atomic_check,
- .atomic_update = sti_drm_plane_atomic_update,
- .atomic_disable = sti_drm_plane_atomic_disable,
-};
-
-static void sti_drm_plane_attach_zorder_property(struct drm_plane *plane,
- uint64_t default_val)
-{
- struct drm_device *dev = plane->dev;
- struct sti_drm_private *private = dev->dev_private;
- struct drm_property *prop;
- struct sti_layer *layer = to_sti_layer(plane);
-
- prop = private->plane_zorder_property;
- if (!prop) {
- prop = drm_property_create_range(dev, 0, "zpos", 0,
- GAM_MIXER_NB_DEPTH_LEVEL - 1);
- if (!prop)
- return;
-
- private->plane_zorder_property = prop;
- }
-
- drm_object_attach_property(&plane->base, prop, default_val);
- layer->zorder = default_val;
-}
-
-struct drm_plane *sti_drm_plane_init(struct drm_device *dev,
- struct sti_layer *layer,
- unsigned int possible_crtcs,
- enum drm_plane_type type)
-{
- int err, i;
- uint64_t default_zorder = 0;
-
- err = drm_universal_plane_init(dev, &layer->plane, possible_crtcs,
- &sti_drm_plane_funcs,
- sti_layer_get_formats(layer),
- sti_layer_get_nb_formats(layer), type);
- if (err) {
- DRM_ERROR("Failed to initialize plane\n");
- return NULL;
- }
-
- drm_plane_helper_add(&layer->plane, &sti_drm_plane_helpers_funcs);
-
- for (i = 0; i < ARRAY_SIZE(sti_layer_default_zorder); i++)
- if (sti_layer_default_zorder[i] == layer->desc)
- break;
-
- default_zorder = i + 1;
-
- if (type == DRM_PLANE_TYPE_OVERLAY)
- sti_drm_plane_attach_zorder_property(&layer->plane,
- default_zorder);
-
- DRM_DEBUG_DRIVER("drm plane:%d mapped to %s with zorder:%llu\n",
- layer->plane.base.id,
- sti_layer_to_str(layer), default_zorder);
-
- return &layer->plane;
-}
-EXPORT_SYMBOL(sti_drm_plane_init);
diff --git a/drivers/gpu/drm/sti/sti_drm_plane.h b/drivers/gpu/drm/sti/sti_drm_plane.h
deleted file mode 100644
index 4f191839f2a7..000000000000
--- a/drivers/gpu/drm/sti/sti_drm_plane.h
+++ /dev/null
@@ -1,18 +0,0 @@
-/*
- * Copyright (C) STMicroelectronics SA 2014
- * Author: Benjamin Gaignard <benjamin.gaignard@st.com> for STMicroelectronics.
- * License terms: GNU General Public License (GPL), version 2
- */
-
-#ifndef _STI_DRM_PLANE_H_
-#define _STI_DRM_PLANE_H_
-
-#include <drm/drmP.h>
-
-struct sti_layer;
-
-struct drm_plane *sti_drm_plane_init(struct drm_device *dev,
- struct sti_layer *layer,
- unsigned int possible_crtcs,
- enum drm_plane_type type);
-#endif
diff --git a/drivers/gpu/drm/sti/sti_drm_drv.c b/drivers/gpu/drm/sti/sti_drv.c
index 59d558b400b3..6f4af6a8ba1b 100644
--- a/drivers/gpu/drm/sti/sti_drm_drv.c
+++ b/drivers/gpu/drm/sti/sti_drv.c
@@ -18,8 +18,8 @@
#include <drm/drm_gem_cma_helper.h>
#include <drm/drm_fb_cma_helper.h>
-#include "sti_drm_drv.h"
-#include "sti_drm_crtc.h"
+#include "sti_crtc.h"
+#include "sti_drv.h"
#define DRIVER_NAME "sti"
#define DRIVER_DESC "STMicroelectronics SoC DRM"
@@ -30,15 +30,15 @@
#define STI_MAX_FB_HEIGHT 4096
#define STI_MAX_FB_WIDTH 4096
-static void sti_drm_atomic_schedule(struct sti_drm_private *private,
- struct drm_atomic_state *state)
+static void sti_atomic_schedule(struct sti_private *private,
+ struct drm_atomic_state *state)
{
private->commit.state = state;
schedule_work(&private->commit.work);
}
-static void sti_drm_atomic_complete(struct sti_drm_private *private,
- struct drm_atomic_state *state)
+static void sti_atomic_complete(struct sti_private *private,
+ struct drm_atomic_state *state)
{
struct drm_device *drm = private->drm_dev;
@@ -68,18 +68,18 @@ static void sti_drm_atomic_complete(struct sti_drm_private *private,
drm_atomic_state_free(state);
}
-static void sti_drm_atomic_work(struct work_struct *work)
+static void sti_atomic_work(struct work_struct *work)
{
- struct sti_drm_private *private = container_of(work,
- struct sti_drm_private, commit.work);
+ struct sti_private *private = container_of(work,
+ struct sti_private, commit.work);
- sti_drm_atomic_complete(private, private->commit.state);
+ sti_atomic_complete(private, private->commit.state);
}
-static int sti_drm_atomic_commit(struct drm_device *drm,
- struct drm_atomic_state *state, bool async)
+static int sti_atomic_commit(struct drm_device *drm,
+ struct drm_atomic_state *state, bool async)
{
- struct sti_drm_private *private = drm->dev_private;
+ struct sti_private *private = drm->dev_private;
int err;
err = drm_atomic_helper_prepare_planes(drm, state);
@@ -99,21 +99,21 @@ static int sti_drm_atomic_commit(struct drm_device *drm,
drm_atomic_helper_swap_state(drm, state);
if (async)
- sti_drm_atomic_schedule(private, state);
+ sti_atomic_schedule(private, state);
else
- sti_drm_atomic_complete(private, state);
+ sti_atomic_complete(private, state);
mutex_unlock(&private->commit.lock);
return 0;
}
-static struct drm_mode_config_funcs sti_drm_mode_config_funcs = {
+static struct drm_mode_config_funcs sti_mode_config_funcs = {
.fb_create = drm_fb_cma_create,
.atomic_check = drm_atomic_helper_check,
- .atomic_commit = sti_drm_atomic_commit,
+ .atomic_commit = sti_atomic_commit,
};
-static void sti_drm_mode_config_init(struct drm_device *dev)
+static void sti_mode_config_init(struct drm_device *dev)
{
dev->mode_config.min_width = 0;
dev->mode_config.min_height = 0;
@@ -126,15 +126,15 @@ static void sti_drm_mode_config_init(struct drm_device *dev)
dev->mode_config.max_width = STI_MAX_FB_HEIGHT;
dev->mode_config.max_height = STI_MAX_FB_WIDTH;
- dev->mode_config.funcs = &sti_drm_mode_config_funcs;
+ dev->mode_config.funcs = &sti_mode_config_funcs;
}
-static int sti_drm_load(struct drm_device *dev, unsigned long flags)
+static int sti_load(struct drm_device *dev, unsigned long flags)
{
- struct sti_drm_private *private;
+ struct sti_private *private;
int ret;
- private = kzalloc(sizeof(struct sti_drm_private), GFP_KERNEL);
+ private = kzalloc(sizeof(*private), GFP_KERNEL);
if (!private) {
DRM_ERROR("Failed to allocate private\n");
return -ENOMEM;
@@ -143,12 +143,12 @@ static int sti_drm_load(struct drm_device *dev, unsigned long flags)
private->drm_dev = dev;
mutex_init(&private->commit.lock);
- INIT_WORK(&private->commit.work, sti_drm_atomic_work);
+ INIT_WORK(&private->commit.work, sti_atomic_work);
drm_mode_config_init(dev);
drm_kms_helper_poll_init(dev);
- sti_drm_mode_config_init(dev);
+ sti_mode_config_init(dev);
ret = component_bind_all(dev->dev, dev);
if (ret) {
@@ -162,13 +162,13 @@ static int sti_drm_load(struct drm_device *dev, unsigned long flags)
#ifdef CONFIG_DRM_STI_FBDEV
drm_fbdev_cma_init(dev, 32,
- dev->mode_config.num_crtc,
- dev->mode_config.num_connector);
+ dev->mode_config.num_crtc,
+ dev->mode_config.num_connector);
#endif
return 0;
}
-static const struct file_operations sti_drm_driver_fops = {
+static const struct file_operations sti_driver_fops = {
.owner = THIS_MODULE,
.open = drm_open,
.mmap = drm_gem_cma_mmap,
@@ -181,33 +181,33 @@ static const struct file_operations sti_drm_driver_fops = {
.release = drm_release,
};
-static struct dma_buf *sti_drm_gem_prime_export(struct drm_device *dev,
- struct drm_gem_object *obj,
- int flags)
+static struct dma_buf *sti_gem_prime_export(struct drm_device *dev,
+ struct drm_gem_object *obj,
+ int flags)
{
/* we want to be able to write in mmapped buffer */
flags |= O_RDWR;
return drm_gem_prime_export(dev, obj, flags);
}
-static struct drm_driver sti_drm_driver = {
+static struct drm_driver sti_driver = {
.driver_features = DRIVER_HAVE_IRQ | DRIVER_MODESET |
DRIVER_GEM | DRIVER_PRIME,
- .load = sti_drm_load,
+ .load = sti_load,
.gem_free_object = drm_gem_cma_free_object,
.gem_vm_ops = &drm_gem_cma_vm_ops,
.dumb_create = drm_gem_cma_dumb_create,
.dumb_map_offset = drm_gem_cma_dumb_map_offset,
.dumb_destroy = drm_gem_dumb_destroy,
- .fops = &sti_drm_driver_fops,
+ .fops = &sti_driver_fops,
.get_vblank_counter = drm_vblank_count,
- .enable_vblank = sti_drm_crtc_enable_vblank,
- .disable_vblank = sti_drm_crtc_disable_vblank,
+ .enable_vblank = sti_crtc_enable_vblank,
+ .disable_vblank = sti_crtc_disable_vblank,
.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
- .gem_prime_export = sti_drm_gem_prime_export,
+ .gem_prime_export = sti_gem_prime_export,
.gem_prime_import = drm_gem_prime_import,
.gem_prime_get_sg_table = drm_gem_cma_prime_get_sg_table,
.gem_prime_import_sg_table = drm_gem_cma_prime_import_sg_table,
@@ -227,30 +227,32 @@ static int compare_of(struct device *dev, void *data)
return dev->of_node == data;
}
-static int sti_drm_bind(struct device *dev)
+static int sti_bind(struct device *dev)
{
- return drm_platform_init(&sti_drm_driver, to_platform_device(dev));
+ return drm_platform_init(&sti_driver, to_platform_device(dev));
}
-static void sti_drm_unbind(struct device *dev)
+static void sti_unbind(struct device *dev)
{
drm_put_dev(dev_get_drvdata(dev));
}
-static const struct component_master_ops sti_drm_ops = {
- .bind = sti_drm_bind,
- .unbind = sti_drm_unbind,
+static const struct component_master_ops sti_ops = {
+ .bind = sti_bind,
+ .unbind = sti_unbind,
};
-static int sti_drm_master_probe(struct platform_device *pdev)
+static int sti_platform_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
- struct device_node *node = dev->parent->of_node;
+ struct device_node *node = dev->of_node;
struct device_node *child_np;
struct component_match *match = NULL;
dma_set_coherent_mask(dev, DMA_BIT_MASK(32));
+ of_platform_populate(node, NULL, NULL, dev);
+
child_np = of_get_next_available_child(node, NULL);
while (child_np) {
@@ -259,68 +261,33 @@ static int sti_drm_master_probe(struct platform_device *pdev)
child_np = of_get_next_available_child(node, child_np);
}
- return component_master_add_with_match(dev, &sti_drm_ops, match);
-}
-
-static int sti_drm_master_remove(struct platform_device *pdev)
-{
- component_master_del(&pdev->dev, &sti_drm_ops);
- return 0;
+ return component_master_add_with_match(dev, &sti_ops, match);
}
-static struct platform_driver sti_drm_master_driver = {
- .probe = sti_drm_master_probe,
- .remove = sti_drm_master_remove,
- .driver = {
- .name = DRIVER_NAME "__master",
- },
-};
-
-static int sti_drm_platform_probe(struct platform_device *pdev)
+static int sti_platform_remove(struct platform_device *pdev)
{
- struct device *dev = &pdev->dev;
- struct device_node *node = dev->of_node;
- struct platform_device *master;
-
- of_platform_populate(node, NULL, NULL, dev);
-
- platform_driver_register(&sti_drm_master_driver);
- master = platform_device_register_resndata(dev,
- DRIVER_NAME "__master", -1,
- NULL, 0, NULL, 0);
- if (IS_ERR(master))
- return PTR_ERR(master);
-
- platform_set_drvdata(pdev, master);
- return 0;
-}
-
-static int sti_drm_platform_remove(struct platform_device *pdev)
-{
- struct platform_device *master = platform_get_drvdata(pdev);
-
+ component_master_del(&pdev->dev, &sti_ops);
of_platform_depopulate(&pdev->dev);
- platform_device_unregister(master);
- platform_driver_unregister(&sti_drm_master_driver);
+
return 0;
}
-static const struct of_device_id sti_drm_dt_ids[] = {
+static const struct of_device_id sti_dt_ids[] = {
{ .compatible = "st,sti-display-subsystem", },
{ /* end node */ },
};
-MODULE_DEVICE_TABLE(of, sti_drm_dt_ids);
+MODULE_DEVICE_TABLE(of, sti_dt_ids);
-static struct platform_driver sti_drm_platform_driver = {
- .probe = sti_drm_platform_probe,
- .remove = sti_drm_platform_remove,
+static struct platform_driver sti_platform_driver = {
+ .probe = sti_platform_probe,
+ .remove = sti_platform_remove,
.driver = {
.name = DRIVER_NAME,
- .of_match_table = sti_drm_dt_ids,
+ .of_match_table = sti_dt_ids,
},
};
-module_platform_driver(sti_drm_platform_driver);
+module_platform_driver(sti_platform_driver);
MODULE_AUTHOR("Benjamin Gaignard <benjamin.gaignard@st.com>");
MODULE_DESCRIPTION("STMicroelectronics SoC DRM driver");
diff --git a/drivers/gpu/drm/sti/sti_drm_drv.h b/drivers/gpu/drm/sti/sti_drv.h
index c413aa3ff402..9372f69e1859 100644
--- a/drivers/gpu/drm/sti/sti_drm_drv.h
+++ b/drivers/gpu/drm/sti/sti_drv.h
@@ -4,8 +4,8 @@
* License terms: GNU General Public License (GPL), version 2
*/
-#ifndef _STI_DRM_DRV_H_
-#define _STI_DRM_DRV_H_
+#ifndef _STI_DRV_H_
+#define _STI_DRV_H_
#include <drm/drmP.h>
@@ -20,7 +20,7 @@ struct sti_tvout;
* @plane_zorder_property: z-order property for CRTC planes
* @drm_dev: drm device
*/
-struct sti_drm_private {
+struct sti_private {
struct sti_compositor *compo;
struct drm_property *plane_zorder_property;
struct drm_device *drm_dev;
diff --git a/drivers/gpu/drm/sti/sti_gdp.c b/drivers/gpu/drm/sti/sti_gdp.c
index 087906fd8846..9365670427ad 100644
--- a/drivers/gpu/drm/sti/sti_gdp.c
+++ b/drivers/gpu/drm/sti/sti_gdp.c
@@ -9,9 +9,12 @@
#include <linux/clk.h>
#include <linux/dma-mapping.h>
+#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_gem_cma_helper.h>
+
#include "sti_compositor.h"
#include "sti_gdp.h"
-#include "sti_layer.h"
+#include "sti_plane.h"
#include "sti_vtg.h"
#define ALPHASWITCH BIT(6)
@@ -26,7 +29,7 @@
#define GDP_XBGR8888 (GDP_RGB888_32 | BIGNOTLITTLE | ALPHASWITCH)
#define GDP_ARGB8565 0x04
#define GDP_ARGB8888 0x05
-#define GDP_ABGR8888 (GDP_ARGB8888 | BIGNOTLITTLE | ALPHASWITCH)
+#define GDP_ABGR8888 (GDP_ARGB8888 | BIGNOTLITTLE | ALPHASWITCH)
#define GDP_ARGB1555 0x06
#define GDP_ARGB4444 0x07
#define GDP_CLUT8 0x0B
@@ -53,8 +56,8 @@
#define GAM_GDP_PPT_IGNORE (BIT(1) | BIT(0))
#define GAM_GDP_SIZE_MAX 0x7FF
-#define GDP_NODE_NB_BANK 2
-#define GDP_NODE_PER_FIELD 2
+#define GDP_NODE_NB_BANK 2
+#define GDP_NODE_PER_FIELD 2
struct sti_gdp_node {
u32 gam_gdp_ctl;
@@ -85,16 +88,20 @@ struct sti_gdp_node_list {
/**
* STI GDP structure
*
- * @layer: layer structure
+ * @sti_plane: sti_plane structure
+ * @dev: driver device
+ * @regs: gdp registers
* @clk_pix: pixel clock for the current gdp
* @clk_main_parent: gdp parent clock if main path used
* @clk_aux_parent: gdp parent clock if aux path used
* @vtg_field_nb: callback for VTG FIELD (top or bottom) notification
* @is_curr_top: true if the current node processed is the top field
- * @node_list: array of node list
+ * @node_list: array of node list
*/
struct sti_gdp {
- struct sti_layer layer;
+ struct sti_plane plane;
+ struct device *dev;
+ void __iomem *regs;
struct clk *clk_pix;
struct clk *clk_main_parent;
struct clk *clk_aux_parent;
@@ -103,7 +110,7 @@ struct sti_gdp {
struct sti_gdp_node_list node_list[GDP_NODE_NB_BANK];
};
-#define to_sti_gdp(x) container_of(x, struct sti_gdp, layer)
+#define to_sti_gdp(x) container_of(x, struct sti_gdp, plane)
static const uint32_t gdp_supported_formats[] = {
DRM_FORMAT_XRGB8888,
@@ -120,16 +127,6 @@ static const uint32_t gdp_supported_formats[] = {
DRM_FORMAT_C8,
};
-static const uint32_t *sti_gdp_get_formats(struct sti_layer *layer)
-{
- return gdp_supported_formats;
-}
-
-static unsigned int sti_gdp_get_nb_formats(struct sti_layer *layer)
-{
- return ARRAY_SIZE(gdp_supported_formats);
-}
-
static int sti_gdp_fourcc2format(int fourcc)
{
switch (fourcc) {
@@ -175,20 +172,19 @@ static int sti_gdp_get_alpharange(int format)
/**
* sti_gdp_get_free_nodes
- * @layer: gdp layer
+ * @gdp: gdp pointer
*
* Look for a GDP node list that is not currently read by the HW.
*
* RETURNS:
* Pointer to the free GDP node list
*/
-static struct sti_gdp_node_list *sti_gdp_get_free_nodes(struct sti_layer *layer)
+static struct sti_gdp_node_list *sti_gdp_get_free_nodes(struct sti_gdp *gdp)
{
int hw_nvn;
- struct sti_gdp *gdp = to_sti_gdp(layer);
unsigned int i;
- hw_nvn = readl(layer->regs + GAM_GDP_NVN_OFFSET);
+ hw_nvn = readl(gdp->regs + GAM_GDP_NVN_OFFSET);
if (!hw_nvn)
goto end;
@@ -199,7 +195,7 @@ static struct sti_gdp_node_list *sti_gdp_get_free_nodes(struct sti_layer *layer)
/* in hazardious cases restart with the first node */
DRM_ERROR("inconsistent NVN for %s: 0x%08X\n",
- sti_layer_to_str(layer), hw_nvn);
+ sti_plane_to_str(&gdp->plane), hw_nvn);
end:
return &gdp->node_list[0];
@@ -207,7 +203,7 @@ end:
/**
* sti_gdp_get_current_nodes
- * @layer: GDP layer
+ * @gdp: gdp pointer
*
* Look for GDP nodes that are currently read by the HW.
*
@@ -215,13 +211,12 @@ end:
* Pointer to the current GDP node list
*/
static
-struct sti_gdp_node_list *sti_gdp_get_current_nodes(struct sti_layer *layer)
+struct sti_gdp_node_list *sti_gdp_get_current_nodes(struct sti_gdp *gdp)
{
int hw_nvn;
- struct sti_gdp *gdp = to_sti_gdp(layer);
unsigned int i;
- hw_nvn = readl(layer->regs + GAM_GDP_NVN_OFFSET);
+ hw_nvn = readl(gdp->regs + GAM_GDP_NVN_OFFSET);
if (!hw_nvn)
goto end;
@@ -232,205 +227,25 @@ struct sti_gdp_node_list *sti_gdp_get_current_nodes(struct sti_layer *layer)
end:
DRM_DEBUG_DRIVER("Warning, NVN 0x%08X for %s does not match any node\n",
- hw_nvn, sti_layer_to_str(layer));
+ hw_nvn, sti_plane_to_str(&gdp->plane));
return NULL;
}
/**
- * sti_gdp_prepare_layer
- * @lay: gdp layer
- * @first_prepare: true if it is the first time this function is called
- *
- * Update the free GDP node list according to the layer properties.
- *
- * RETURNS:
- * 0 on success.
- */
-static int sti_gdp_prepare_layer(struct sti_layer *layer, bool first_prepare)
-{
- struct sti_gdp_node_list *list;
- struct sti_gdp_node *top_field, *btm_field;
- struct drm_display_mode *mode = layer->mode;
- struct device *dev = layer->dev;
- struct sti_gdp *gdp = to_sti_gdp(layer);
- struct sti_compositor *compo = dev_get_drvdata(dev);
- int format;
- unsigned int depth, bpp;
- int rate = mode->clock * 1000;
- int res;
- u32 ydo, xdo, yds, xds;
-
- list = sti_gdp_get_free_nodes(layer);
- top_field = list->top_field;
- btm_field = list->btm_field;
-
- dev_dbg(dev, "%s %s top_node:0x%p btm_node:0x%p\n", __func__,
- sti_layer_to_str(layer), top_field, btm_field);
-
- /* Build the top field from layer params */
- top_field->gam_gdp_agc = GAM_GDP_AGC_FULL_RANGE;
- top_field->gam_gdp_ctl = WAIT_NEXT_VSYNC;
- format = sti_gdp_fourcc2format(layer->format);
- if (format == -1) {
- DRM_ERROR("Format not supported by GDP %.4s\n",
- (char *)&layer->format);
- return 1;
- }
- top_field->gam_gdp_ctl |= format;
- top_field->gam_gdp_ctl |= sti_gdp_get_alpharange(format);
- top_field->gam_gdp_ppt &= ~GAM_GDP_PPT_IGNORE;
-
- /* pixel memory location */
- drm_fb_get_bpp_depth(layer->format, &depth, &bpp);
- top_field->gam_gdp_pml = (u32) layer->paddr + layer->offsets[0];
- top_field->gam_gdp_pml += layer->src_x * (bpp >> 3);
- top_field->gam_gdp_pml += layer->src_y * layer->pitches[0];
-
- /* input parameters */
- top_field->gam_gdp_pmp = layer->pitches[0];
- top_field->gam_gdp_size =
- clamp_val(layer->src_h, 0, GAM_GDP_SIZE_MAX) << 16 |
- clamp_val(layer->src_w, 0, GAM_GDP_SIZE_MAX);
-
- /* output parameters */
- ydo = sti_vtg_get_line_number(*mode, layer->dst_y);
- yds = sti_vtg_get_line_number(*mode, layer->dst_y + layer->dst_h - 1);
- xdo = sti_vtg_get_pixel_number(*mode, layer->dst_x);
- xds = sti_vtg_get_pixel_number(*mode, layer->dst_x + layer->dst_w - 1);
- top_field->gam_gdp_vpo = (ydo << 16) | xdo;
- top_field->gam_gdp_vps = (yds << 16) | xds;
-
- /* Same content and chained together */
- memcpy(btm_field, top_field, sizeof(*btm_field));
- top_field->gam_gdp_nvn = list->btm_field_paddr;
- btm_field->gam_gdp_nvn = list->top_field_paddr;
-
- /* Interlaced mode */
- if (layer->mode->flags & DRM_MODE_FLAG_INTERLACE)
- btm_field->gam_gdp_pml = top_field->gam_gdp_pml +
- layer->pitches[0];
-
- if (first_prepare) {
- /* Register gdp callback */
- if (sti_vtg_register_client(layer->mixer_id == STI_MIXER_MAIN ?
- compo->vtg_main : compo->vtg_aux,
- &gdp->vtg_field_nb, layer->mixer_id)) {
- DRM_ERROR("Cannot register VTG notifier\n");
- return 1;
- }
-
- /* Set and enable gdp clock */
- if (gdp->clk_pix) {
- struct clk *clkp;
- /* According to the mixer used, the gdp pixel clock
- * should have a different parent clock. */
- if (layer->mixer_id == STI_MIXER_MAIN)
- clkp = gdp->clk_main_parent;
- else
- clkp = gdp->clk_aux_parent;
-
- if (clkp)
- clk_set_parent(gdp->clk_pix, clkp);
-
- res = clk_set_rate(gdp->clk_pix, rate);
- if (res < 0) {
- DRM_ERROR("Cannot set rate (%dHz) for gdp\n",
- rate);
- return 1;
- }
-
- if (clk_prepare_enable(gdp->clk_pix)) {
- DRM_ERROR("Failed to prepare/enable gdp\n");
- return 1;
- }
- }
- }
-
- return 0;
-}
-
-/**
- * sti_gdp_commit_layer
- * @lay: gdp layer
- *
- * Update the NVN field of the 'right' field of the current GDP node (being
- * used by the HW) with the address of the updated ('free') top field GDP node.
- * - In interlaced mode the 'right' field is the bottom field as we update
- * frames starting from their top field
- * - In progressive mode, we update both bottom and top fields which are
- * equal nodes.
- * At the next VSYNC, the updated node list will be used by the HW.
- *
- * RETURNS:
- * 0 on success.
- */
-static int sti_gdp_commit_layer(struct sti_layer *layer)
-{
- struct sti_gdp_node_list *updated_list = sti_gdp_get_free_nodes(layer);
- struct sti_gdp_node *updated_top_node = updated_list->top_field;
- struct sti_gdp_node *updated_btm_node = updated_list->btm_field;
- struct sti_gdp *gdp = to_sti_gdp(layer);
- u32 dma_updated_top = updated_list->top_field_paddr;
- u32 dma_updated_btm = updated_list->btm_field_paddr;
- struct sti_gdp_node_list *curr_list = sti_gdp_get_current_nodes(layer);
-
- dev_dbg(layer->dev, "%s %s top/btm_node:0x%p/0x%p\n", __func__,
- sti_layer_to_str(layer),
- updated_top_node, updated_btm_node);
- dev_dbg(layer->dev, "Current NVN:0x%X\n",
- readl(layer->regs + GAM_GDP_NVN_OFFSET));
- dev_dbg(layer->dev, "Posted buff: %lx current buff: %x\n",
- (unsigned long)layer->paddr,
- readl(layer->regs + GAM_GDP_PML_OFFSET));
-
- if (curr_list == NULL) {
- /* First update or invalid node should directly write in the
- * hw register */
- DRM_DEBUG_DRIVER("%s first update (or invalid node)",
- sti_layer_to_str(layer));
-
- writel(gdp->is_curr_top == true ?
- dma_updated_btm : dma_updated_top,
- layer->regs + GAM_GDP_NVN_OFFSET);
- return 0;
- }
-
- if (layer->mode->flags & DRM_MODE_FLAG_INTERLACE) {
- if (gdp->is_curr_top == true) {
- /* Do not update in the middle of the frame, but
- * postpone the update after the bottom field has
- * been displayed */
- curr_list->btm_field->gam_gdp_nvn = dma_updated_top;
- } else {
- /* Direct update to avoid one frame delay */
- writel(dma_updated_top,
- layer->regs + GAM_GDP_NVN_OFFSET);
- }
- } else {
- /* Direct update for progressive to avoid one frame delay */
- writel(dma_updated_top, layer->regs + GAM_GDP_NVN_OFFSET);
- }
-
- return 0;
-}
-
-/**
- * sti_gdp_disable_layer
- * @lay: gdp layer
+ * sti_gdp_disable
+ * @gdp: gdp pointer
*
* Disable a GDP.
- *
- * RETURNS:
- * 0 on success.
*/
-static int sti_gdp_disable_layer(struct sti_layer *layer)
+static void sti_gdp_disable(struct sti_gdp *gdp)
{
+ struct drm_plane *drm_plane = &gdp->plane.drm_plane;
+ struct sti_mixer *mixer = to_sti_mixer(drm_plane->crtc);
+ struct sti_compositor *compo = dev_get_drvdata(gdp->dev);
unsigned int i;
- struct sti_gdp *gdp = to_sti_gdp(layer);
- struct sti_compositor *compo = dev_get_drvdata(layer->dev);
- DRM_DEBUG_DRIVER("%s\n", sti_layer_to_str(layer));
+ DRM_DEBUG_DRIVER("%s\n", sti_plane_to_str(&gdp->plane));
/* Set the nodes as 'to be ignored on mixer' */
for (i = 0; i < GDP_NODE_NB_BANK; i++) {
@@ -438,14 +253,14 @@ static int sti_gdp_disable_layer(struct sti_layer *layer)
gdp->node_list[i].btm_field->gam_gdp_ppt |= GAM_GDP_PPT_IGNORE;
}
- if (sti_vtg_unregister_client(layer->mixer_id == STI_MIXER_MAIN ?
+ if (sti_vtg_unregister_client(mixer->id == STI_MIXER_MAIN ?
compo->vtg_main : compo->vtg_aux, &gdp->vtg_field_nb))
DRM_DEBUG_DRIVER("Warning: cannot unregister VTG notifier\n");
if (gdp->clk_pix)
clk_disable_unprepare(gdp->clk_pix);
- return 0;
+ gdp->plane.status = STI_PLANE_DISABLED;
}
/**
@@ -464,6 +279,14 @@ int sti_gdp_field_cb(struct notifier_block *nb,
{
struct sti_gdp *gdp = container_of(nb, struct sti_gdp, vtg_field_nb);
+ if (gdp->plane.status == STI_PLANE_FLUSHING) {
+ /* disable need to be synchronize on vsync event */
+ DRM_DEBUG_DRIVER("Vsync event received => disable %s\n",
+ sti_plane_to_str(&gdp->plane));
+
+ sti_gdp_disable(gdp);
+ }
+
switch (event) {
case VTG_TOP_FIELD_EVENT:
gdp->is_curr_top = true;
@@ -479,10 +302,9 @@ int sti_gdp_field_cb(struct notifier_block *nb,
return 0;
}
-static void sti_gdp_init(struct sti_layer *layer)
+static void sti_gdp_init(struct sti_gdp *gdp)
{
- struct sti_gdp *gdp = to_sti_gdp(layer);
- struct device_node *np = layer->dev->of_node;
+ struct device_node *np = gdp->dev->of_node;
dma_addr_t dma_addr;
void *base;
unsigned int i, size;
@@ -490,8 +312,8 @@ static void sti_gdp_init(struct sti_layer *layer)
/* Allocate all the nodes within a single memory page */
size = sizeof(struct sti_gdp_node) *
GDP_NODE_PER_FIELD * GDP_NODE_NB_BANK;
- base = dma_alloc_writecombine(layer->dev,
- size, &dma_addr, GFP_KERNEL | GFP_DMA);
+ base = dma_alloc_writecombine(gdp->dev,
+ size, &dma_addr, GFP_KERNEL | GFP_DMA);
if (!base) {
DRM_ERROR("Failed to allocate memory for GDP node\n");
@@ -526,7 +348,7 @@ static void sti_gdp_init(struct sti_layer *layer)
/* GDP of STiH407 chip have its own pixel clock */
char *clk_name;
- switch (layer->desc) {
+ switch (gdp->plane.desc) {
case STI_GDP_0:
clk_name = "pix_gdp1";
break;
@@ -544,32 +366,249 @@ static void sti_gdp_init(struct sti_layer *layer)
return;
}
- gdp->clk_pix = devm_clk_get(layer->dev, clk_name);
+ gdp->clk_pix = devm_clk_get(gdp->dev, clk_name);
if (IS_ERR(gdp->clk_pix))
DRM_ERROR("Cannot get %s clock\n", clk_name);
- gdp->clk_main_parent = devm_clk_get(layer->dev, "main_parent");
+ gdp->clk_main_parent = devm_clk_get(gdp->dev, "main_parent");
if (IS_ERR(gdp->clk_main_parent))
DRM_ERROR("Cannot get main_parent clock\n");
- gdp->clk_aux_parent = devm_clk_get(layer->dev, "aux_parent");
+ gdp->clk_aux_parent = devm_clk_get(gdp->dev, "aux_parent");
if (IS_ERR(gdp->clk_aux_parent))
DRM_ERROR("Cannot get aux_parent clock\n");
}
}
-static const struct sti_layer_funcs gdp_ops = {
- .get_formats = sti_gdp_get_formats,
- .get_nb_formats = sti_gdp_get_nb_formats,
- .init = sti_gdp_init,
- .prepare = sti_gdp_prepare_layer,
- .commit = sti_gdp_commit_layer,
- .disable = sti_gdp_disable_layer,
+static void sti_gdp_atomic_update(struct drm_plane *drm_plane,
+ struct drm_plane_state *oldstate)
+{
+ struct drm_plane_state *state = drm_plane->state;
+ struct sti_plane *plane = to_sti_plane(drm_plane);
+ struct sti_gdp *gdp = to_sti_gdp(plane);
+ struct drm_crtc *crtc = state->crtc;
+ struct sti_compositor *compo = dev_get_drvdata(gdp->dev);
+ struct drm_framebuffer *fb = state->fb;
+ bool first_prepare = plane->status == STI_PLANE_DISABLED ? true : false;
+ struct sti_mixer *mixer;
+ struct drm_display_mode *mode;
+ int dst_x, dst_y, dst_w, dst_h;
+ int src_x, src_y, src_w, src_h;
+ struct drm_gem_cma_object *cma_obj;
+ struct sti_gdp_node_list *list;
+ struct sti_gdp_node_list *curr_list;
+ struct sti_gdp_node *top_field, *btm_field;
+ u32 dma_updated_top;
+ u32 dma_updated_btm;
+ int format;
+ unsigned int depth, bpp;
+ u32 ydo, xdo, yds, xds;
+ int res;
+
+ /* Manage the case where crtc is null (disabled) */
+ if (!crtc)
+ return;
+
+ mixer = to_sti_mixer(crtc);
+ mode = &crtc->mode;
+ dst_x = state->crtc_x;
+ dst_y = state->crtc_y;
+ dst_w = clamp_val(state->crtc_w, 0, mode->crtc_hdisplay - dst_x);
+ dst_h = clamp_val(state->crtc_h, 0, mode->crtc_vdisplay - dst_y);
+ /* src_x are in 16.16 format */
+ src_x = state->src_x >> 16;
+ src_y = state->src_y >> 16;
+ src_w = state->src_w >> 16;
+ src_h = state->src_h >> 16;
+
+ DRM_DEBUG_KMS("CRTC:%d (%s) drm plane:%d (%s)\n",
+ crtc->base.id, sti_mixer_to_str(mixer),
+ drm_plane->base.id, sti_plane_to_str(plane));
+ DRM_DEBUG_KMS("%s dst=(%dx%d)@(%d,%d) - src=(%dx%d)@(%d,%d)\n",
+ sti_plane_to_str(plane),
+ dst_w, dst_h, dst_x, dst_y,
+ src_w, src_h, src_x, src_y);
+
+ list = sti_gdp_get_free_nodes(gdp);
+ top_field = list->top_field;
+ btm_field = list->btm_field;
+
+ dev_dbg(gdp->dev, "%s %s top_node:0x%p btm_node:0x%p\n", __func__,
+ sti_plane_to_str(plane), top_field, btm_field);
+
+ /* build the top field */
+ top_field->gam_gdp_agc = GAM_GDP_AGC_FULL_RANGE;
+ top_field->gam_gdp_ctl = WAIT_NEXT_VSYNC;
+ format = sti_gdp_fourcc2format(fb->pixel_format);
+ if (format == -1) {
+ DRM_ERROR("Format not supported by GDP %.4s\n",
+ (char *)&fb->pixel_format);
+ return;
+ }
+ top_field->gam_gdp_ctl |= format;
+ top_field->gam_gdp_ctl |= sti_gdp_get_alpharange(format);
+ top_field->gam_gdp_ppt &= ~GAM_GDP_PPT_IGNORE;
+
+ cma_obj = drm_fb_cma_get_gem_obj(fb, 0);
+ if (!cma_obj) {
+ DRM_ERROR("Can't get CMA GEM object for fb\n");
+ return;
+ }
+
+ DRM_DEBUG_DRIVER("drm FB:%d format:%.4s phys@:0x%lx\n", fb->base.id,
+ (char *)&fb->pixel_format,
+ (unsigned long)cma_obj->paddr);
+
+ /* pixel memory location */
+ drm_fb_get_bpp_depth(fb->pixel_format, &depth, &bpp);
+ top_field->gam_gdp_pml = (u32)cma_obj->paddr + fb->offsets[0];
+ top_field->gam_gdp_pml += src_x * (bpp >> 3);
+ top_field->gam_gdp_pml += src_y * fb->pitches[0];
+
+ /* input parameters */
+ top_field->gam_gdp_pmp = fb->pitches[0];
+ top_field->gam_gdp_size = clamp_val(src_h, 0, GAM_GDP_SIZE_MAX) << 16 |
+ clamp_val(src_w, 0, GAM_GDP_SIZE_MAX);
+
+ /* output parameters */
+ ydo = sti_vtg_get_line_number(*mode, dst_y);
+ yds = sti_vtg_get_line_number(*mode, dst_y + dst_h - 1);
+ xdo = sti_vtg_get_pixel_number(*mode, dst_x);
+ xds = sti_vtg_get_pixel_number(*mode, dst_x + dst_w - 1);
+ top_field->gam_gdp_vpo = (ydo << 16) | xdo;
+ top_field->gam_gdp_vps = (yds << 16) | xds;
+
+ /* Same content and chained together */
+ memcpy(btm_field, top_field, sizeof(*btm_field));
+ top_field->gam_gdp_nvn = list->btm_field_paddr;
+ btm_field->gam_gdp_nvn = list->top_field_paddr;
+
+ /* Interlaced mode */
+ if (mode->flags & DRM_MODE_FLAG_INTERLACE)
+ btm_field->gam_gdp_pml = top_field->gam_gdp_pml +
+ fb->pitches[0];
+
+ if (first_prepare) {
+ /* Register gdp callback */
+ if (sti_vtg_register_client(mixer->id == STI_MIXER_MAIN ?
+ compo->vtg_main : compo->vtg_aux,
+ &gdp->vtg_field_nb, mixer->id)) {
+ DRM_ERROR("Cannot register VTG notifier\n");
+ return;
+ }
+
+ /* Set and enable gdp clock */
+ if (gdp->clk_pix) {
+ struct clk *clkp;
+ int rate = mode->clock * 1000;
+
+ /* According to the mixer used, the gdp pixel clock
+ * should have a different parent clock. */
+ if (mixer->id == STI_MIXER_MAIN)
+ clkp = gdp->clk_main_parent;
+ else
+ clkp = gdp->clk_aux_parent;
+
+ if (clkp)
+ clk_set_parent(gdp->clk_pix, clkp);
+
+ res = clk_set_rate(gdp->clk_pix, rate);
+ if (res < 0) {
+ DRM_ERROR("Cannot set rate (%dHz) for gdp\n",
+ rate);
+ return;
+ }
+
+ if (clk_prepare_enable(gdp->clk_pix)) {
+ DRM_ERROR("Failed to prepare/enable gdp\n");
+ return;
+ }
+ }
+ }
+
+ /* Update the NVN field of the 'right' field of the current GDP node
+ * (being used by the HW) with the address of the updated ('free') top
+ * field GDP node.
+ * - In interlaced mode the 'right' field is the bottom field as we
+ * update frames starting from their top field
+ * - In progressive mode, we update both bottom and top fields which
+ * are equal nodes.
+ * At the next VSYNC, the updated node list will be used by the HW.
+ */
+ curr_list = sti_gdp_get_current_nodes(gdp);
+ dma_updated_top = list->top_field_paddr;
+ dma_updated_btm = list->btm_field_paddr;
+
+ dev_dbg(gdp->dev, "Current NVN:0x%X\n",
+ readl(gdp->regs + GAM_GDP_NVN_OFFSET));
+ dev_dbg(gdp->dev, "Posted buff: %lx current buff: %x\n",
+ (unsigned long)cma_obj->paddr,
+ readl(gdp->regs + GAM_GDP_PML_OFFSET));
+
+ if (!curr_list) {
+ /* First update or invalid node should directly write in the
+ * hw register */
+ DRM_DEBUG_DRIVER("%s first update (or invalid node)",
+ sti_plane_to_str(plane));
+
+ writel(gdp->is_curr_top ?
+ dma_updated_btm : dma_updated_top,
+ gdp->regs + GAM_GDP_NVN_OFFSET);
+ goto end;
+ }
+
+ if (mode->flags & DRM_MODE_FLAG_INTERLACE) {
+ if (gdp->is_curr_top) {
+ /* Do not update in the middle of the frame, but
+ * postpone the update after the bottom field has
+ * been displayed */
+ curr_list->btm_field->gam_gdp_nvn = dma_updated_top;
+ } else {
+ /* Direct update to avoid one frame delay */
+ writel(dma_updated_top,
+ gdp->regs + GAM_GDP_NVN_OFFSET);
+ }
+ } else {
+ /* Direct update for progressive to avoid one frame delay */
+ writel(dma_updated_top, gdp->regs + GAM_GDP_NVN_OFFSET);
+ }
+
+end:
+ plane->status = STI_PLANE_UPDATED;
+}
+
+static void sti_gdp_atomic_disable(struct drm_plane *drm_plane,
+ struct drm_plane_state *oldstate)
+{
+ struct sti_plane *plane = to_sti_plane(drm_plane);
+ struct sti_mixer *mixer = to_sti_mixer(drm_plane->crtc);
+
+ if (!drm_plane->crtc) {
+ DRM_DEBUG_DRIVER("drm plane:%d not enabled\n",
+ drm_plane->base.id);
+ return;
+ }
+
+ DRM_DEBUG_DRIVER("CRTC:%d (%s) drm plane:%d (%s)\n",
+ drm_plane->crtc->base.id, sti_mixer_to_str(mixer),
+ drm_plane->base.id, sti_plane_to_str(plane));
+
+ plane->status = STI_PLANE_DISABLING;
+}
+
+static const struct drm_plane_helper_funcs sti_gdp_helpers_funcs = {
+ .atomic_update = sti_gdp_atomic_update,
+ .atomic_disable = sti_gdp_atomic_disable,
};
-struct sti_layer *sti_gdp_create(struct device *dev, int id)
+struct drm_plane *sti_gdp_create(struct drm_device *drm_dev,
+ struct device *dev, int desc,
+ void __iomem *baseaddr,
+ unsigned int possible_crtcs,
+ enum drm_plane_type type)
{
struct sti_gdp *gdp;
+ int res;
gdp = devm_kzalloc(dev, sizeof(*gdp), GFP_KERNEL);
if (!gdp) {
@@ -577,8 +616,33 @@ struct sti_layer *sti_gdp_create(struct device *dev, int id)
return NULL;
}
- gdp->layer.ops = &gdp_ops;
+ gdp->dev = dev;
+ gdp->regs = baseaddr;
+ gdp->plane.desc = desc;
+ gdp->plane.status = STI_PLANE_DISABLED;
+
gdp->vtg_field_nb.notifier_call = sti_gdp_field_cb;
- return (struct sti_layer *)gdp;
+ sti_gdp_init(gdp);
+
+ res = drm_universal_plane_init(drm_dev, &gdp->plane.drm_plane,
+ possible_crtcs,
+ &sti_plane_helpers_funcs,
+ gdp_supported_formats,
+ ARRAY_SIZE(gdp_supported_formats),
+ type);
+ if (res) {
+ DRM_ERROR("Failed to initialize universal plane\n");
+ goto err;
+ }
+
+ drm_plane_helper_add(&gdp->plane.drm_plane, &sti_gdp_helpers_funcs);
+
+ sti_plane_init_property(&gdp->plane, type);
+
+ return &gdp->plane.drm_plane;
+
+err:
+ devm_kfree(dev, gdp);
+ return NULL;
}
diff --git a/drivers/gpu/drm/sti/sti_gdp.h b/drivers/gpu/drm/sti/sti_gdp.h
index 1dab68274ad3..73947a4a8004 100644
--- a/drivers/gpu/drm/sti/sti_gdp.h
+++ b/drivers/gpu/drm/sti/sti_gdp.h
@@ -11,6 +11,9 @@
#include <linux/types.h>
-struct sti_layer *sti_gdp_create(struct device *dev, int id);
-
+struct drm_plane *sti_gdp_create(struct drm_device *drm_dev,
+ struct device *dev, int desc,
+ void __iomem *baseaddr,
+ unsigned int possible_crtcs,
+ enum drm_plane_type type);
#endif
diff --git a/drivers/gpu/drm/sti/sti_hdmi.c b/drivers/gpu/drm/sti/sti_hdmi.c
index f28a4d54487c..09e29e43423e 100644
--- a/drivers/gpu/drm/sti/sti_hdmi.c
+++ b/drivers/gpu/drm/sti/sti_hdmi.c
@@ -588,7 +588,7 @@ static int sti_hdmi_connector_get_modes(struct drm_connector *connector)
return count;
fail:
- DRM_ERROR("Can not read HDMI EDID\n");
+ DRM_ERROR("Can't read HDMI EDID\n");
return 0;
}
@@ -693,21 +693,8 @@ static int sti_hdmi_bind(struct device *dev, struct device *master, void *data)
struct sti_hdmi_connector *connector;
struct drm_connector *drm_connector;
struct drm_bridge *bridge;
- struct device_node *ddc;
int err;
- ddc = of_parse_phandle(dev->of_node, "ddc", 0);
- if (ddc) {
- hdmi->ddc_adapt = of_find_i2c_adapter_by_node(ddc);
- if (!hdmi->ddc_adapt) {
- err = -EPROBE_DEFER;
- of_node_put(ddc);
- return err;
- }
-
- of_node_put(ddc);
- }
-
/* Set the drm device handle */
hdmi->drm_dev = drm_dev;
@@ -796,6 +783,7 @@ static int sti_hdmi_probe(struct platform_device *pdev)
struct sti_hdmi *hdmi;
struct device_node *np = dev->of_node;
struct resource *res;
+ struct device_node *ddc;
int ret;
DRM_INFO("%s\n", __func__);
@@ -804,6 +792,17 @@ static int sti_hdmi_probe(struct platform_device *pdev)
if (!hdmi)
return -ENOMEM;
+ ddc = of_parse_phandle(pdev->dev.of_node, "ddc", 0);
+ if (ddc) {
+ hdmi->ddc_adapt = of_find_i2c_adapter_by_node(ddc);
+ if (!hdmi->ddc_adapt) {
+ of_node_put(ddc);
+ return -EPROBE_DEFER;
+ }
+
+ of_node_put(ddc);
+ }
+
hdmi->dev = pdev->dev;
/* Get resources */
diff --git a/drivers/gpu/drm/sti/sti_hqvdp.c b/drivers/gpu/drm/sti/sti_hqvdp.c
index b0eb62de1b2e..7c8f9b8bfae1 100644
--- a/drivers/gpu/drm/sti/sti_hqvdp.c
+++ b/drivers/gpu/drm/sti/sti_hqvdp.c
@@ -12,11 +12,12 @@
#include <linux/reset.h>
#include <drm/drmP.h>
+#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_gem_cma_helper.h>
-#include "sti_drm_plane.h"
-#include "sti_hqvdp.h"
+#include "sti_compositor.h"
#include "sti_hqvdp_lut.h"
-#include "sti_layer.h"
+#include "sti_plane.h"
#include "sti_vtg.h"
/* Firmware name */
@@ -322,8 +323,7 @@ struct sti_hqvdp_cmd {
* @dev: driver device
* @drm_dev: the drm device
* @regs: registers
- * @layer: layer structure for hqvdp it self
- * @vid_plane: VID plug used as link with compositor IP
+ * @plane: plane structure for hqvdp it self
* @clk: IP clock
* @clk_pix_main: pix main clock
* @reset: reset control
@@ -334,13 +334,13 @@ struct sti_hqvdp_cmd {
* @hqvdp_cmd: buffer of commands
* @hqvdp_cmd_paddr: physical address of hqvdp_cmd
* @vtg: vtg for main data path
+ * @xp70_initialized: true if xp70 is already initialized
*/
struct sti_hqvdp {
struct device *dev;
struct drm_device *drm_dev;
void __iomem *regs;
- struct sti_layer layer;
- struct drm_plane *vid_plane;
+ struct sti_plane plane;
struct clk *clk;
struct clk *clk_pix_main;
struct reset_control *reset;
@@ -351,24 +351,15 @@ struct sti_hqvdp {
void *hqvdp_cmd;
dma_addr_t hqvdp_cmd_paddr;
struct sti_vtg *vtg;
+ bool xp70_initialized;
};
-#define to_sti_hqvdp(x) container_of(x, struct sti_hqvdp, layer)
+#define to_sti_hqvdp(x) container_of(x, struct sti_hqvdp, plane)
static const uint32_t hqvdp_supported_formats[] = {
DRM_FORMAT_NV12,
};
-static const uint32_t *sti_hqvdp_get_formats(struct sti_layer *layer)
-{
- return hqvdp_supported_formats;
-}
-
-static unsigned int sti_hqvdp_get_nb_formats(struct sti_layer *layer)
-{
- return ARRAY_SIZE(hqvdp_supported_formats);
-}
-
/**
* sti_hqvdp_get_free_cmd
* @hqvdp: hqvdp structure
@@ -484,7 +475,12 @@ static void sti_hqvdp_update_hvsrc(enum sti_hvsrc_orient orient, int scale,
/**
* sti_hqvdp_check_hw_scaling
- * @layer: hqvdp layer
+ * @hqvdp: hqvdp pointer
+ * @mode: display mode with timing constraints
+ * @src_w: source width
+ * @src_h: source height
+ * @dst_w: destination width
+ * @dst_h: destination height
*
* Check if the HW is able to perform the scaling request
* The firmware scaling limitation is "CEIL(1/Zy) <= FLOOR(LFW)" where:
@@ -498,184 +494,36 @@ static void sti_hqvdp_update_hvsrc(enum sti_hvsrc_orient orient, int scale,
* RETURNS:
* True if the HW can scale.
*/
-static bool sti_hqvdp_check_hw_scaling(struct sti_layer *layer)
+static bool sti_hqvdp_check_hw_scaling(struct sti_hqvdp *hqvdp,
+ struct drm_display_mode *mode,
+ int src_w, int src_h,
+ int dst_w, int dst_h)
{
- struct sti_hqvdp *hqvdp = to_sti_hqvdp(layer);
unsigned long lfw;
unsigned int inv_zy;
- lfw = layer->mode->htotal * (clk_get_rate(hqvdp->clk) / 1000000);
- lfw /= max(layer->src_w, layer->dst_w) * layer->mode->clock / 1000;
+ lfw = mode->htotal * (clk_get_rate(hqvdp->clk) / 1000000);
+ lfw /= max(src_w, dst_w) * mode->clock / 1000;
- inv_zy = DIV_ROUND_UP(layer->src_h, layer->dst_h);
+ inv_zy = DIV_ROUND_UP(src_h, dst_h);
return (inv_zy <= lfw) ? true : false;
}
/**
- * sti_hqvdp_prepare_layer
- * @layer: hqvdp layer
- * @first_prepare: true if it is the first time this function is called
+ * sti_hqvdp_disable
+ * @hqvdp: hqvdp pointer
*
- * Prepares a command for the firmware
- *
- * RETURNS:
- * 0 on success.
+ * Disables the HQVDP plane
*/
-static int sti_hqvdp_prepare_layer(struct sti_layer *layer, bool first_prepare)
-{
- struct sti_hqvdp *hqvdp = to_sti_hqvdp(layer);
- struct sti_hqvdp_cmd *cmd;
- int scale_h, scale_v;
- int cmd_offset;
-
- dev_dbg(hqvdp->dev, "%s %s\n", __func__, sti_layer_to_str(layer));
-
- /* prepare and commit VID plane */
- hqvdp->vid_plane->funcs->update_plane(hqvdp->vid_plane,
- layer->crtc, layer->fb,
- layer->dst_x, layer->dst_y,
- layer->dst_w, layer->dst_h,
- layer->src_x, layer->src_y,
- layer->src_w, layer->src_h);
-
- cmd_offset = sti_hqvdp_get_free_cmd(hqvdp);
- if (cmd_offset == -1) {
- DRM_ERROR("No available hqvdp_cmd now\n");
- return -EBUSY;
- }
- cmd = hqvdp->hqvdp_cmd + cmd_offset;
-
- if (!sti_hqvdp_check_hw_scaling(layer)) {
- DRM_ERROR("Scaling beyond HW capabilities\n");
- return -EINVAL;
- }
-
- /* Static parameters, defaulting to progressive mode */
- cmd->top.config = TOP_CONFIG_PROGRESSIVE;
- cmd->top.mem_format = TOP_MEM_FORMAT_DFLT;
- cmd->hvsrc.param_ctrl = HVSRC_PARAM_CTRL_DFLT;
- cmd->csdi.config = CSDI_CONFIG_PROG;
-
- /* VC1RE, FMD bypassed : keep everything set to 0
- * IQI/P2I bypassed */
- cmd->iqi.config = IQI_CONFIG_DFLT;
- cmd->iqi.con_bri = IQI_CON_BRI_DFLT;
- cmd->iqi.sat_gain = IQI_SAT_GAIN_DFLT;
- cmd->iqi.pxf_conf = IQI_PXF_CONF_DFLT;
-
- /* Buffer planes address */
- cmd->top.current_luma = (u32) layer->paddr + layer->offsets[0];
- cmd->top.current_chroma = (u32) layer->paddr + layer->offsets[1];
-
- /* Pitches */
- cmd->top.luma_processed_pitch = cmd->top.luma_src_pitch =
- layer->pitches[0];
- cmd->top.chroma_processed_pitch = cmd->top.chroma_src_pitch =
- layer->pitches[1];
-
- /* Input / output size
- * Align to upper even value */
- layer->dst_w = ALIGN(layer->dst_w, 2);
- layer->dst_h = ALIGN(layer->dst_h, 2);
-
- if ((layer->src_w > MAX_WIDTH) || (layer->src_w < MIN_WIDTH) ||
- (layer->src_h > MAX_HEIGHT) || (layer->src_h < MIN_HEIGHT) ||
- (layer->dst_w > MAX_WIDTH) || (layer->dst_w < MIN_WIDTH) ||
- (layer->dst_h > MAX_HEIGHT) || (layer->dst_h < MIN_HEIGHT)) {
- DRM_ERROR("Invalid in/out size %dx%d -> %dx%d\n",
- layer->src_w, layer->src_h,
- layer->dst_w, layer->dst_h);
- return -EINVAL;
- }
- cmd->top.input_viewport_size = cmd->top.input_frame_size =
- layer->src_h << 16 | layer->src_w;
- cmd->hvsrc.output_picture_size = layer->dst_h << 16 | layer->dst_w;
- cmd->top.input_viewport_ori = layer->src_y << 16 | layer->src_x;
-
- /* Handle interlaced */
- if (layer->fb->flags & DRM_MODE_FB_INTERLACED) {
- /* Top field to display */
- cmd->top.config = TOP_CONFIG_INTER_TOP;
-
- /* Update pitches and vert size */
- cmd->top.input_frame_size = (layer->src_h / 2) << 16 |
- layer->src_w;
- cmd->top.luma_processed_pitch *= 2;
- cmd->top.luma_src_pitch *= 2;
- cmd->top.chroma_processed_pitch *= 2;
- cmd->top.chroma_src_pitch *= 2;
-
- /* Enable directional deinterlacing processing */
- cmd->csdi.config = CSDI_CONFIG_INTER_DIR;
- cmd->csdi.config2 = CSDI_CONFIG2_DFLT;
- cmd->csdi.dcdi_config = CSDI_DCDI_CONFIG_DFLT;
- }
-
- /* Update hvsrc lut coef */
- scale_h = SCALE_FACTOR * layer->dst_w / layer->src_w;
- sti_hqvdp_update_hvsrc(HVSRC_HORI, scale_h, &cmd->hvsrc);
-
- scale_v = SCALE_FACTOR * layer->dst_h / layer->src_h;
- sti_hqvdp_update_hvsrc(HVSRC_VERT, scale_v, &cmd->hvsrc);
-
- if (first_prepare) {
- /* Prevent VTG shutdown */
- if (clk_prepare_enable(hqvdp->clk_pix_main)) {
- DRM_ERROR("Failed to prepare/enable pix main clk\n");
- return -ENXIO;
- }
-
- /* Register VTG Vsync callback to handle bottom fields */
- if ((layer->fb->flags & DRM_MODE_FB_INTERLACED) &&
- sti_vtg_register_client(hqvdp->vtg,
- &hqvdp->vtg_nb, layer->mixer_id)) {
- DRM_ERROR("Cannot register VTG notifier\n");
- return -ENXIO;
- }
- }
-
- return 0;
-}
-
-static int sti_hqvdp_commit_layer(struct sti_layer *layer)
+static void sti_hqvdp_disable(struct sti_hqvdp *hqvdp)
{
- struct sti_hqvdp *hqvdp = to_sti_hqvdp(layer);
- int cmd_offset;
-
- dev_dbg(hqvdp->dev, "%s %s\n", __func__, sti_layer_to_str(layer));
-
- cmd_offset = sti_hqvdp_get_free_cmd(hqvdp);
- if (cmd_offset == -1) {
- DRM_ERROR("No available hqvdp_cmd now\n");
- return -EBUSY;
- }
-
- writel(hqvdp->hqvdp_cmd_paddr + cmd_offset,
- hqvdp->regs + HQVDP_MBX_NEXT_CMD);
-
- hqvdp->curr_field_count++;
-
- /* Interlaced : get ready to display the bottom field at next Vsync */
- if (layer->fb->flags & DRM_MODE_FB_INTERLACED)
- hqvdp->btm_field_pending = true;
-
- dev_dbg(hqvdp->dev, "%s Posted command:0x%x\n",
- __func__, hqvdp->hqvdp_cmd_paddr + cmd_offset);
-
- return 0;
-}
-
-static int sti_hqvdp_disable_layer(struct sti_layer *layer)
-{
- struct sti_hqvdp *hqvdp = to_sti_hqvdp(layer);
int i;
- DRM_DEBUG_DRIVER("%s\n", sti_layer_to_str(layer));
+ DRM_DEBUG_DRIVER("%s\n", sti_plane_to_str(&hqvdp->plane));
/* Unregister VTG Vsync callback */
- if ((layer->fb->flags & DRM_MODE_FB_INTERLACED) &&
- sti_vtg_unregister_client(hqvdp->vtg, &hqvdp->vtg_nb))
+ if (sti_vtg_unregister_client(hqvdp->vtg, &hqvdp->vtg_nb))
DRM_DEBUG_DRIVER("Warning: cannot unregister VTG notifier\n");
/* Set next cmd to NULL */
@@ -691,15 +539,10 @@ static int sti_hqvdp_disable_layer(struct sti_layer *layer)
/* VTG can stop now */
clk_disable_unprepare(hqvdp->clk_pix_main);
- if (i == POLL_MAX_ATTEMPT) {
+ if (i == POLL_MAX_ATTEMPT)
DRM_ERROR("XP70 could not revert to idle\n");
- return -ENXIO;
- }
-
- /* disable VID plane */
- hqvdp->vid_plane->funcs->disable_plane(hqvdp->vid_plane);
- return 0;
+ hqvdp->plane.status = STI_PLANE_DISABLED;
}
/**
@@ -724,6 +567,14 @@ int sti_hqvdp_vtg_cb(struct notifier_block *nb, unsigned long evt, void *data)
return 0;
}
+ if (hqvdp->plane.status == STI_PLANE_FLUSHING) {
+ /* disable need to be synchronize on vsync event */
+ DRM_DEBUG_DRIVER("Vsync event received => disable %s\n",
+ sti_plane_to_str(&hqvdp->plane));
+
+ sti_hqvdp_disable(hqvdp);
+ }
+
if (hqvdp->btm_field_pending) {
/* Create the btm field command from the current one */
btm_cmd_offset = sti_hqvdp_get_free_cmd(hqvdp);
@@ -758,32 +609,10 @@ int sti_hqvdp_vtg_cb(struct notifier_block *nb, unsigned long evt, void *data)
return 0;
}
-static struct drm_plane *sti_hqvdp_find_vid(struct drm_device *dev, int id)
+static void sti_hqvdp_init(struct sti_hqvdp *hqvdp)
{
- struct drm_plane *plane;
-
- list_for_each_entry(plane, &dev->mode_config.plane_list, head) {
- struct sti_layer *layer = to_sti_layer(plane);
-
- if (layer->desc == id)
- return plane;
- }
-
- return NULL;
-}
-
-static void sti_hqvd_init(struct sti_layer *layer)
-{
- struct sti_hqvdp *hqvdp = to_sti_hqvdp(layer);
int size;
- /* find the plane macthing with vid 0 */
- hqvdp->vid_plane = sti_hqvdp_find_vid(hqvdp->drm_dev, STI_VID_0);
- if (!hqvdp->vid_plane) {
- DRM_ERROR("Cannot find Main video layer\n");
- return;
- }
-
hqvdp->vtg_nb.notifier_call = sti_hqvdp_vtg_cb;
/* Allocate memory for the VDP commands */
@@ -799,24 +628,213 @@ static void sti_hqvd_init(struct sti_layer *layer)
memset(hqvdp->hqvdp_cmd, 0, size);
}
-static const struct sti_layer_funcs hqvdp_ops = {
- .get_formats = sti_hqvdp_get_formats,
- .get_nb_formats = sti_hqvdp_get_nb_formats,
- .init = sti_hqvd_init,
- .prepare = sti_hqvdp_prepare_layer,
- .commit = sti_hqvdp_commit_layer,
- .disable = sti_hqvdp_disable_layer,
+static void sti_hqvdp_atomic_update(struct drm_plane *drm_plane,
+ struct drm_plane_state *oldstate)
+{
+ struct drm_plane_state *state = drm_plane->state;
+ struct sti_plane *plane = to_sti_plane(drm_plane);
+ struct sti_hqvdp *hqvdp = to_sti_hqvdp(plane);
+ struct drm_crtc *crtc = state->crtc;
+ struct sti_mixer *mixer = to_sti_mixer(crtc);
+ struct drm_framebuffer *fb = state->fb;
+ struct drm_display_mode *mode = &crtc->mode;
+ int dst_x = state->crtc_x;
+ int dst_y = state->crtc_y;
+ int dst_w = clamp_val(state->crtc_w, 0, mode->crtc_hdisplay - dst_x);
+ int dst_h = clamp_val(state->crtc_h, 0, mode->crtc_vdisplay - dst_y);
+ /* src_x are in 16.16 format */
+ int src_x = state->src_x >> 16;
+ int src_y = state->src_y >> 16;
+ int src_w = state->src_w >> 16;
+ int src_h = state->src_h >> 16;
+ bool first_prepare = plane->status == STI_PLANE_DISABLED ? true : false;
+ struct drm_gem_cma_object *cma_obj;
+ struct sti_hqvdp_cmd *cmd;
+ int scale_h, scale_v;
+ int cmd_offset;
+
+ DRM_DEBUG_KMS("CRTC:%d (%s) drm plane:%d (%s)\n",
+ crtc->base.id, sti_mixer_to_str(mixer),
+ drm_plane->base.id, sti_plane_to_str(plane));
+ DRM_DEBUG_KMS("%s dst=(%dx%d)@(%d,%d) - src=(%dx%d)@(%d,%d)\n",
+ sti_plane_to_str(plane),
+ dst_w, dst_h, dst_x, dst_y,
+ src_w, src_h, src_x, src_y);
+
+ cmd_offset = sti_hqvdp_get_free_cmd(hqvdp);
+ if (cmd_offset == -1) {
+ DRM_ERROR("No available hqvdp_cmd now\n");
+ return;
+ }
+ cmd = hqvdp->hqvdp_cmd + cmd_offset;
+
+ if (!sti_hqvdp_check_hw_scaling(hqvdp, mode,
+ src_w, src_h,
+ dst_w, dst_h)) {
+ DRM_ERROR("Scaling beyond HW capabilities\n");
+ return;
+ }
+
+ /* Static parameters, defaulting to progressive mode */
+ cmd->top.config = TOP_CONFIG_PROGRESSIVE;
+ cmd->top.mem_format = TOP_MEM_FORMAT_DFLT;
+ cmd->hvsrc.param_ctrl = HVSRC_PARAM_CTRL_DFLT;
+ cmd->csdi.config = CSDI_CONFIG_PROG;
+
+ /* VC1RE, FMD bypassed : keep everything set to 0
+ * IQI/P2I bypassed */
+ cmd->iqi.config = IQI_CONFIG_DFLT;
+ cmd->iqi.con_bri = IQI_CON_BRI_DFLT;
+ cmd->iqi.sat_gain = IQI_SAT_GAIN_DFLT;
+ cmd->iqi.pxf_conf = IQI_PXF_CONF_DFLT;
+
+ cma_obj = drm_fb_cma_get_gem_obj(fb, 0);
+ if (!cma_obj) {
+ DRM_ERROR("Can't get CMA GEM object for fb\n");
+ return;
+ }
+
+ DRM_DEBUG_DRIVER("drm FB:%d format:%.4s phys@:0x%lx\n", fb->base.id,
+ (char *)&fb->pixel_format,
+ (unsigned long)cma_obj->paddr);
+
+ /* Buffer planes address */
+ cmd->top.current_luma = (u32)cma_obj->paddr + fb->offsets[0];
+ cmd->top.current_chroma = (u32)cma_obj->paddr + fb->offsets[1];
+
+ /* Pitches */
+ cmd->top.luma_processed_pitch = fb->pitches[0];
+ cmd->top.luma_src_pitch = fb->pitches[0];
+ cmd->top.chroma_processed_pitch = fb->pitches[1];
+ cmd->top.chroma_src_pitch = fb->pitches[1];
+
+ /* Input / output size
+ * Align to upper even value */
+ dst_w = ALIGN(dst_w, 2);
+ dst_h = ALIGN(dst_h, 2);
+
+ if ((src_w > MAX_WIDTH) || (src_w < MIN_WIDTH) ||
+ (src_h > MAX_HEIGHT) || (src_h < MIN_HEIGHT) ||
+ (dst_w > MAX_WIDTH) || (dst_w < MIN_WIDTH) ||
+ (dst_h > MAX_HEIGHT) || (dst_h < MIN_HEIGHT)) {
+ DRM_ERROR("Invalid in/out size %dx%d -> %dx%d\n",
+ src_w, src_h,
+ dst_w, dst_h);
+ return;
+ }
+
+ cmd->top.input_viewport_size = src_h << 16 | src_w;
+ cmd->top.input_frame_size = src_h << 16 | src_w;
+ cmd->hvsrc.output_picture_size = dst_h << 16 | dst_w;
+ cmd->top.input_viewport_ori = src_y << 16 | src_x;
+
+ /* Handle interlaced */
+ if (fb->flags & DRM_MODE_FB_INTERLACED) {
+ /* Top field to display */
+ cmd->top.config = TOP_CONFIG_INTER_TOP;
+
+ /* Update pitches and vert size */
+ cmd->top.input_frame_size = (src_h / 2) << 16 | src_w;
+ cmd->top.luma_processed_pitch *= 2;
+ cmd->top.luma_src_pitch *= 2;
+ cmd->top.chroma_processed_pitch *= 2;
+ cmd->top.chroma_src_pitch *= 2;
+
+ /* Enable directional deinterlacing processing */
+ cmd->csdi.config = CSDI_CONFIG_INTER_DIR;
+ cmd->csdi.config2 = CSDI_CONFIG2_DFLT;
+ cmd->csdi.dcdi_config = CSDI_DCDI_CONFIG_DFLT;
+ }
+
+ /* Update hvsrc lut coef */
+ scale_h = SCALE_FACTOR * dst_w / src_w;
+ sti_hqvdp_update_hvsrc(HVSRC_HORI, scale_h, &cmd->hvsrc);
+
+ scale_v = SCALE_FACTOR * dst_h / src_h;
+ sti_hqvdp_update_hvsrc(HVSRC_VERT, scale_v, &cmd->hvsrc);
+
+ if (first_prepare) {
+ /* Prevent VTG shutdown */
+ if (clk_prepare_enable(hqvdp->clk_pix_main)) {
+ DRM_ERROR("Failed to prepare/enable pix main clk\n");
+ return;
+ }
+
+ /* Register VTG Vsync callback to handle bottom fields */
+ if (sti_vtg_register_client(hqvdp->vtg,
+ &hqvdp->vtg_nb,
+ mixer->id)) {
+ DRM_ERROR("Cannot register VTG notifier\n");
+ return;
+ }
+ }
+
+ writel(hqvdp->hqvdp_cmd_paddr + cmd_offset,
+ hqvdp->regs + HQVDP_MBX_NEXT_CMD);
+
+ hqvdp->curr_field_count++;
+
+ /* Interlaced : get ready to display the bottom field at next Vsync */
+ if (fb->flags & DRM_MODE_FB_INTERLACED)
+ hqvdp->btm_field_pending = true;
+
+ dev_dbg(hqvdp->dev, "%s Posted command:0x%x\n",
+ __func__, hqvdp->hqvdp_cmd_paddr + cmd_offset);
+
+ plane->status = STI_PLANE_UPDATED;
+}
+
+static void sti_hqvdp_atomic_disable(struct drm_plane *drm_plane,
+ struct drm_plane_state *oldstate)
+{
+ struct sti_plane *plane = to_sti_plane(drm_plane);
+ struct sti_mixer *mixer = to_sti_mixer(drm_plane->crtc);
+
+ if (!drm_plane->crtc) {
+ DRM_DEBUG_DRIVER("drm plane:%d not enabled\n",
+ drm_plane->base.id);
+ return;
+ }
+
+ DRM_DEBUG_DRIVER("CRTC:%d (%s) drm plane:%d (%s)\n",
+ drm_plane->crtc->base.id, sti_mixer_to_str(mixer),
+ drm_plane->base.id, sti_plane_to_str(plane));
+
+ plane->status = STI_PLANE_DISABLING;
+}
+
+static const struct drm_plane_helper_funcs sti_hqvdp_helpers_funcs = {
+ .atomic_update = sti_hqvdp_atomic_update,
+ .atomic_disable = sti_hqvdp_atomic_disable,
};
-struct sti_layer *sti_hqvdp_create(struct device *dev)
+static struct drm_plane *sti_hqvdp_create(struct drm_device *drm_dev,
+ struct device *dev, int desc)
{
struct sti_hqvdp *hqvdp = dev_get_drvdata(dev);
+ int res;
+
+ hqvdp->plane.desc = desc;
+ hqvdp->plane.status = STI_PLANE_DISABLED;
- hqvdp->layer.ops = &hqvdp_ops;
+ sti_hqvdp_init(hqvdp);
- return &hqvdp->layer;
+ res = drm_universal_plane_init(drm_dev, &hqvdp->plane.drm_plane, 1,
+ &sti_plane_helpers_funcs,
+ hqvdp_supported_formats,
+ ARRAY_SIZE(hqvdp_supported_formats),
+ DRM_PLANE_TYPE_OVERLAY);
+ if (res) {
+ DRM_ERROR("Failed to initialize universal plane\n");
+ return NULL;
+ }
+
+ drm_plane_helper_add(&hqvdp->plane.drm_plane, &sti_hqvdp_helpers_funcs);
+
+ sti_plane_init_property(&hqvdp->plane, DRM_PLANE_TYPE_OVERLAY);
+
+ return &hqvdp->plane.drm_plane;
}
-EXPORT_SYMBOL(sti_hqvdp_create);
static void sti_hqvdp_init_plugs(struct sti_hqvdp *hqvdp)
{
@@ -859,6 +877,12 @@ static void sti_hqvdp_start_xp70(const struct firmware *firmware, void *ctxt)
} *header;
DRM_DEBUG_DRIVER("\n");
+
+ if (hqvdp->xp70_initialized) {
+ DRM_INFO("HQVDP XP70 already initialized\n");
+ return;
+ }
+
/* Check firmware parts */
if (!firmware) {
DRM_ERROR("Firmware not available\n");
@@ -946,7 +970,10 @@ static void sti_hqvdp_start_xp70(const struct firmware *firmware, void *ctxt)
/* Launch Vsync */
writel(SOFT_VSYNC_HW, hqvdp->regs + HQVDP_MBX_SOFT_VSYNC);
- DRM_INFO("HQVDP XP70 started\n");
+ DRM_INFO("HQVDP XP70 initialized\n");
+
+ hqvdp->xp70_initialized = true;
+
out:
release_firmware(firmware);
}
@@ -955,7 +982,7 @@ int sti_hqvdp_bind(struct device *dev, struct device *master, void *data)
{
struct sti_hqvdp *hqvdp = dev_get_drvdata(dev);
struct drm_device *drm_dev = data;
- struct sti_layer *layer;
+ struct drm_plane *plane;
int err;
DRM_DEBUG_DRIVER("\n");
@@ -971,13 +998,10 @@ int sti_hqvdp_bind(struct device *dev, struct device *master, void *data)
return err;
}
- layer = sti_layer_create(hqvdp->dev, STI_HQVDP_0, hqvdp->regs);
- if (!layer) {
+ /* Create HQVDP plane once xp70 is initialized */
+ plane = sti_hqvdp_create(drm_dev, hqvdp->dev, STI_HQVDP_0);
+ if (!plane)
DRM_ERROR("Can't create HQVDP plane\n");
- return -ENOMEM;
- }
-
- sti_drm_plane_init(drm_dev, layer, 1, DRM_PLANE_TYPE_OVERLAY);
return 0;
}
diff --git a/drivers/gpu/drm/sti/sti_hqvdp.h b/drivers/gpu/drm/sti/sti_hqvdp.h
deleted file mode 100644
index cd5ecd0a6dea..000000000000
--- a/drivers/gpu/drm/sti/sti_hqvdp.h
+++ /dev/null
@@ -1,12 +0,0 @@
-/*
- * Copyright (C) STMicroelectronics SA 2014
- * Authors: Fabien Dessenne <fabien.dessenne@st.com> for STMicroelectronics.
- * License terms: GNU General Public License (GPL), version 2
- */
-
-#ifndef _STI_HQVDP_H_
-#define _STI_HQVDP_H_
-
-struct sti_layer *sti_hqvdp_create(struct device *dev);
-
-#endif
diff --git a/drivers/gpu/drm/sti/sti_layer.c b/drivers/gpu/drm/sti/sti_layer.c
deleted file mode 100644
index 899104f9d4bc..000000000000
--- a/drivers/gpu/drm/sti/sti_layer.c
+++ /dev/null
@@ -1,213 +0,0 @@
-/*
- * Copyright (C) STMicroelectronics SA 2014
- * Authors: Benjamin Gaignard <benjamin.gaignard@st.com>
- * Fabien Dessenne <fabien.dessenne@st.com>
- * for STMicroelectronics.
- * License terms: GNU General Public License (GPL), version 2
- */
-
-#include <drm/drmP.h>
-#include <drm/drm_gem_cma_helper.h>
-#include <drm/drm_fb_cma_helper.h>
-
-#include "sti_compositor.h"
-#include "sti_cursor.h"
-#include "sti_gdp.h"
-#include "sti_hqvdp.h"
-#include "sti_layer.h"
-#include "sti_vid.h"
-
-const char *sti_layer_to_str(struct sti_layer *layer)
-{
- switch (layer->desc) {
- case STI_GDP_0:
- return "GDP0";
- case STI_GDP_1:
- return "GDP1";
- case STI_GDP_2:
- return "GDP2";
- case STI_GDP_3:
- return "GDP3";
- case STI_VID_0:
- return "VID0";
- case STI_VID_1:
- return "VID1";
- case STI_CURSOR:
- return "CURSOR";
- case STI_HQVDP_0:
- return "HQVDP0";
- default:
- return "<UNKNOWN LAYER>";
- }
-}
-EXPORT_SYMBOL(sti_layer_to_str);
-
-struct sti_layer *sti_layer_create(struct device *dev, int desc,
- void __iomem *baseaddr)
-{
-
- struct sti_layer *layer = NULL;
-
- switch (desc & STI_LAYER_TYPE_MASK) {
- case STI_GDP:
- layer = sti_gdp_create(dev, desc);
- break;
- case STI_VID:
- layer = sti_vid_create(dev);
- break;
- case STI_CUR:
- layer = sti_cursor_create(dev);
- break;
- case STI_VDP:
- layer = sti_hqvdp_create(dev);
- break;
- }
-
- if (!layer) {
- DRM_ERROR("Failed to create layer\n");
- return NULL;
- }
-
- layer->desc = desc;
- layer->dev = dev;
- layer->regs = baseaddr;
-
- layer->ops->init(layer);
-
- DRM_DEBUG_DRIVER("%s created\n", sti_layer_to_str(layer));
-
- return layer;
-}
-EXPORT_SYMBOL(sti_layer_create);
-
-int sti_layer_prepare(struct sti_layer *layer,
- struct drm_crtc *crtc,
- struct drm_framebuffer *fb,
- struct drm_display_mode *mode, int mixer_id,
- int dest_x, int dest_y, int dest_w, int dest_h,
- int src_x, int src_y, int src_w, int src_h)
-{
- int ret;
- unsigned int i;
- struct drm_gem_cma_object *cma_obj;
-
- if (!layer || !fb || !mode) {
- DRM_ERROR("Null fb, layer or mode\n");
- return 1;
- }
-
- cma_obj = drm_fb_cma_get_gem_obj(fb, 0);
- if (!cma_obj) {
- DRM_ERROR("Can't get CMA GEM object for fb\n");
- return 1;
- }
-
- layer->crtc = crtc;
- layer->fb = fb;
- layer->mode = mode;
- layer->mixer_id = mixer_id;
- layer->dst_x = dest_x;
- layer->dst_y = dest_y;
- layer->dst_w = clamp_val(dest_w, 0, mode->crtc_hdisplay - dest_x);
- layer->dst_h = clamp_val(dest_h, 0, mode->crtc_vdisplay - dest_y);
- layer->src_x = src_x;
- layer->src_y = src_y;
- layer->src_w = src_w;
- layer->src_h = src_h;
- layer->format = fb->pixel_format;
- layer->vaddr = cma_obj->vaddr;
- layer->paddr = cma_obj->paddr;
- for (i = 0; i < 4; i++) {
- layer->pitches[i] = fb->pitches[i];
- layer->offsets[i] = fb->offsets[i];
- }
-
- DRM_DEBUG_DRIVER("%s is associated with mixer_id %d\n",
- sti_layer_to_str(layer),
- layer->mixer_id);
- DRM_DEBUG_DRIVER("%s dst=(%dx%d)@(%d,%d) - src=(%dx%d)@(%d,%d)\n",
- sti_layer_to_str(layer),
- layer->dst_w, layer->dst_h, layer->dst_x, layer->dst_y,
- layer->src_w, layer->src_h, layer->src_x,
- layer->src_y);
-
- DRM_DEBUG_DRIVER("drm FB:%d format:%.4s phys@:0x%lx\n", fb->base.id,
- (char *)&layer->format, (unsigned long)layer->paddr);
-
- if (!layer->ops->prepare)
- goto err_no_prepare;
-
- ret = layer->ops->prepare(layer, !layer->enabled);
- if (!ret)
- layer->enabled = true;
-
- return ret;
-
-err_no_prepare:
- DRM_ERROR("Cannot prepare\n");
- return 1;
-}
-
-int sti_layer_commit(struct sti_layer *layer)
-{
- if (!layer)
- return 1;
-
- if (!layer->ops->commit)
- goto err_no_commit;
-
- return layer->ops->commit(layer);
-
-err_no_commit:
- DRM_ERROR("Cannot commit\n");
- return 1;
-}
-
-int sti_layer_disable(struct sti_layer *layer)
-{
- int ret;
-
- DRM_DEBUG_DRIVER("%s\n", sti_layer_to_str(layer));
- if (!layer)
- return 1;
-
- if (!layer->enabled)
- return 0;
-
- if (!layer->ops->disable)
- goto err_no_disable;
-
- ret = layer->ops->disable(layer);
- if (!ret)
- layer->enabled = false;
- else
- DRM_ERROR("Disable failed\n");
-
- return ret;
-
-err_no_disable:
- DRM_ERROR("Cannot disable\n");
- return 1;
-}
-
-const uint32_t *sti_layer_get_formats(struct sti_layer *layer)
-{
- if (!layer)
- return NULL;
-
- if (!layer->ops->get_formats)
- return NULL;
-
- return layer->ops->get_formats(layer);
-}
-
-unsigned int sti_layer_get_nb_formats(struct sti_layer *layer)
-{
- if (!layer)
- return 0;
-
- if (!layer->ops->get_nb_formats)
- return 0;
-
- return layer->ops->get_nb_formats(layer);
-}
diff --git a/drivers/gpu/drm/sti/sti_layer.h b/drivers/gpu/drm/sti/sti_layer.h
deleted file mode 100644
index ceff497f557e..000000000000
--- a/drivers/gpu/drm/sti/sti_layer.h
+++ /dev/null
@@ -1,131 +0,0 @@
-/*
- * Copyright (C) STMicroelectronics SA 2014
- * Authors: Benjamin Gaignard <benjamin.gaignard@st.com>
- * Fabien Dessenne <fabien.dessenne@st.com>
- * for STMicroelectronics.
- * License terms: GNU General Public License (GPL), version 2
- */
-
-#ifndef _STI_LAYER_H_
-#define _STI_LAYER_H_
-
-#include <drm/drmP.h>
-
-#define to_sti_layer(x) container_of(x, struct sti_layer, plane)
-
-#define STI_LAYER_TYPE_SHIFT 8
-#define STI_LAYER_TYPE_MASK (~((1<<STI_LAYER_TYPE_SHIFT)-1))
-
-struct sti_layer;
-
-enum sti_layer_type {
- STI_GDP = 1 << STI_LAYER_TYPE_SHIFT,
- STI_VID = 2 << STI_LAYER_TYPE_SHIFT,
- STI_CUR = 3 << STI_LAYER_TYPE_SHIFT,
- STI_BCK = 4 << STI_LAYER_TYPE_SHIFT,
- STI_VDP = 5 << STI_LAYER_TYPE_SHIFT
-};
-
-enum sti_layer_id_of_type {
- STI_ID_0 = 0,
- STI_ID_1 = 1,
- STI_ID_2 = 2,
- STI_ID_3 = 3
-};
-
-enum sti_layer_desc {
- STI_GDP_0 = STI_GDP | STI_ID_0,
- STI_GDP_1 = STI_GDP | STI_ID_1,
- STI_GDP_2 = STI_GDP | STI_ID_2,
- STI_GDP_3 = STI_GDP | STI_ID_3,
- STI_VID_0 = STI_VID | STI_ID_0,
- STI_VID_1 = STI_VID | STI_ID_1,
- STI_HQVDP_0 = STI_VDP | STI_ID_0,
- STI_CURSOR = STI_CUR,
- STI_BACK = STI_BCK
-};
-
-/**
- * STI layer functions structure
- *
- * @get_formats: get layer supported formats
- * @get_nb_formats: get number of format supported
- * @init: initialize the layer
- * @prepare: prepare layer before rendering
- * @commit: set layer for rendering
- * @disable: disable layer
- */
-struct sti_layer_funcs {
- const uint32_t* (*get_formats)(struct sti_layer *layer);
- unsigned int (*get_nb_formats)(struct sti_layer *layer);
- void (*init)(struct sti_layer *layer);
- int (*prepare)(struct sti_layer *layer, bool first_prepare);
- int (*commit)(struct sti_layer *layer);
- int (*disable)(struct sti_layer *layer);
-};
-
-/**
- * STI layer structure
- *
- * @plane: drm plane it is bound to (if any)
- * @fb: drm fb it is bound to
- * @crtc: crtc it is bound to
- * @mode: display mode
- * @desc: layer type & id
- * @device: driver device
- * @regs: layer registers
- * @ops: layer functions
- * @zorder: layer z-order
- * @mixer_id: id of the mixer used to display the layer
- * @enabled: to know if the layer is active or not
- * @src_x src_y: coordinates of the input (fb) area
- * @src_w src_h: size of the input (fb) area
- * @dst_x dst_y: coordinates of the output (crtc) area
- * @dst_w dst_h: size of the output (crtc) area
- * @format: format
- * @pitches: pitch of 'planes' (eg: Y, U, V)
- * @offsets: offset of 'planes'
- * @vaddr: virtual address of the input buffer
- * @paddr: physical address of the input buffer
- */
-struct sti_layer {
- struct drm_plane plane;
- struct drm_framebuffer *fb;
- struct drm_crtc *crtc;
- struct drm_display_mode *mode;
- enum sti_layer_desc desc;
- struct device *dev;
- void __iomem *regs;
- const struct sti_layer_funcs *ops;
- int zorder;
- int mixer_id;
- bool enabled;
- int src_x, src_y;
- int src_w, src_h;
- int dst_x, dst_y;
- int dst_w, dst_h;
- uint32_t format;
- unsigned int pitches[4];
- unsigned int offsets[4];
- void *vaddr;
- dma_addr_t paddr;
-};
-
-struct sti_layer *sti_layer_create(struct device *dev, int desc,
- void __iomem *baseaddr);
-int sti_layer_prepare(struct sti_layer *layer,
- struct drm_crtc *crtc,
- struct drm_framebuffer *fb,
- struct drm_display_mode *mode,
- int mixer_id,
- int dest_x, int dest_y,
- int dest_w, int dest_h,
- int src_x, int src_y,
- int src_w, int src_h);
-int sti_layer_commit(struct sti_layer *layer);
-int sti_layer_disable(struct sti_layer *layer);
-const uint32_t *sti_layer_get_formats(struct sti_layer *layer);
-unsigned int sti_layer_get_nb_formats(struct sti_layer *layer);
-const char *sti_layer_to_str(struct sti_layer *layer);
-
-#endif
diff --git a/drivers/gpu/drm/sti/sti_mixer.c b/drivers/gpu/drm/sti/sti_mixer.c
index 13a4b84deab6..0182e9365004 100644
--- a/drivers/gpu/drm/sti/sti_mixer.c
+++ b/drivers/gpu/drm/sti/sti_mixer.c
@@ -58,6 +58,7 @@ const char *sti_mixer_to_str(struct sti_mixer *mixer)
return "<UNKNOWN MIXER>";
}
}
+EXPORT_SYMBOL(sti_mixer_to_str);
static inline u32 sti_mixer_reg_read(struct sti_mixer *mixer, u32 reg_id)
{
@@ -101,52 +102,57 @@ static void sti_mixer_set_background_area(struct sti_mixer *mixer,
sti_mixer_reg_write(mixer, GAM_MIXER_BCS, yds << 16 | xds);
}
-int sti_mixer_set_layer_depth(struct sti_mixer *mixer, struct sti_layer *layer)
+int sti_mixer_set_plane_depth(struct sti_mixer *mixer, struct sti_plane *plane)
{
- int layer_id = 0, depth = layer->zorder;
+ int plane_id, depth = plane->zorder;
+ unsigned int i;
u32 mask, val;
- if (depth >= GAM_MIXER_NB_DEPTH_LEVEL)
+ if ((depth < 1) || (depth > GAM_MIXER_NB_DEPTH_LEVEL))
return 1;
- switch (layer->desc) {
+ switch (plane->desc) {
case STI_GDP_0:
- layer_id = GAM_DEPTH_GDP0_ID;
+ plane_id = GAM_DEPTH_GDP0_ID;
break;
case STI_GDP_1:
- layer_id = GAM_DEPTH_GDP1_ID;
+ plane_id = GAM_DEPTH_GDP1_ID;
break;
case STI_GDP_2:
- layer_id = GAM_DEPTH_GDP2_ID;
+ plane_id = GAM_DEPTH_GDP2_ID;
break;
case STI_GDP_3:
- layer_id = GAM_DEPTH_GDP3_ID;
+ plane_id = GAM_DEPTH_GDP3_ID;
break;
- case STI_VID_0:
case STI_HQVDP_0:
- layer_id = GAM_DEPTH_VID0_ID;
- break;
- case STI_VID_1:
- layer_id = GAM_DEPTH_VID1_ID;
+ plane_id = GAM_DEPTH_VID0_ID;
break;
case STI_CURSOR:
/* no need to set depth for cursor */
return 0;
default:
- DRM_ERROR("Unknown layer %d\n", layer->desc);
+ DRM_ERROR("Unknown plane %d\n", plane->desc);
return 1;
}
- mask = GAM_DEPTH_MASK_ID << (3 * depth);
- layer_id = layer_id << (3 * depth);
+
+ /* Search if a previous depth was already assigned to the plane */
+ val = sti_mixer_reg_read(mixer, GAM_MIXER_CRB);
+ for (i = 0; i < GAM_MIXER_NB_DEPTH_LEVEL; i++) {
+ mask = GAM_DEPTH_MASK_ID << (3 * i);
+ if ((val & mask) == plane_id << (3 * i))
+ break;
+ }
+
+ mask |= GAM_DEPTH_MASK_ID << (3 * (depth - 1));
+ plane_id = plane_id << (3 * (depth - 1));
DRM_DEBUG_DRIVER("%s %s depth=%d\n", sti_mixer_to_str(mixer),
- sti_layer_to_str(layer), depth);
+ sti_plane_to_str(plane), depth);
dev_dbg(mixer->dev, "GAM_MIXER_CRB val 0x%x mask 0x%x\n",
- layer_id, mask);
+ plane_id, mask);
- val = sti_mixer_reg_read(mixer, GAM_MIXER_CRB);
val &= ~mask;
- val |= layer_id;
+ val |= plane_id;
sti_mixer_reg_write(mixer, GAM_MIXER_CRB, val);
dev_dbg(mixer->dev, "Read GAM_MIXER_CRB 0x%x\n",
@@ -176,9 +182,9 @@ int sti_mixer_active_video_area(struct sti_mixer *mixer,
return 0;
}
-static u32 sti_mixer_get_layer_mask(struct sti_layer *layer)
+static u32 sti_mixer_get_plane_mask(struct sti_plane *plane)
{
- switch (layer->desc) {
+ switch (plane->desc) {
case STI_BACK:
return GAM_CTL_BACK_MASK;
case STI_GDP_0:
@@ -189,11 +195,8 @@ static u32 sti_mixer_get_layer_mask(struct sti_layer *layer)
return GAM_CTL_GDP2_MASK;
case STI_GDP_3:
return GAM_CTL_GDP3_MASK;
- case STI_VID_0:
case STI_HQVDP_0:
return GAM_CTL_VID0_MASK;
- case STI_VID_1:
- return GAM_CTL_VID1_MASK;
case STI_CURSOR:
return GAM_CTL_CURSOR_MASK;
default:
@@ -201,17 +204,17 @@ static u32 sti_mixer_get_layer_mask(struct sti_layer *layer)
}
}
-int sti_mixer_set_layer_status(struct sti_mixer *mixer,
- struct sti_layer *layer, bool status)
+int sti_mixer_set_plane_status(struct sti_mixer *mixer,
+ struct sti_plane *plane, bool status)
{
u32 mask, val;
DRM_DEBUG_DRIVER("%s %s %s\n", status ? "enable" : "disable",
- sti_mixer_to_str(mixer), sti_layer_to_str(layer));
+ sti_mixer_to_str(mixer), sti_plane_to_str(plane));
- mask = sti_mixer_get_layer_mask(layer);
+ mask = sti_mixer_get_plane_mask(plane);
if (!mask) {
- DRM_ERROR("Can not find layer mask\n");
+ DRM_ERROR("Can't find layer mask\n");
return -EINVAL;
}
@@ -223,15 +226,6 @@ int sti_mixer_set_layer_status(struct sti_mixer *mixer,
return 0;
}
-void sti_mixer_clear_all_layers(struct sti_mixer *mixer)
-{
- u32 val;
-
- DRM_DEBUG_DRIVER("%s clear all layer\n", sti_mixer_to_str(mixer));
- val = sti_mixer_reg_read(mixer, GAM_MIXER_CTL) & 0xFFFF0000;
- sti_mixer_reg_write(mixer, GAM_MIXER_CTL, val);
-}
-
void sti_mixer_set_matrix(struct sti_mixer *mixer)
{
unsigned int i;
diff --git a/drivers/gpu/drm/sti/sti_mixer.h b/drivers/gpu/drm/sti/sti_mixer.h
index b97282182908..efb1a9a5ba86 100644
--- a/drivers/gpu/drm/sti/sti_mixer.h
+++ b/drivers/gpu/drm/sti/sti_mixer.h
@@ -11,10 +11,16 @@
#include <drm/drmP.h>
-#include "sti_layer.h"
+#include "sti_plane.h"
#define to_sti_mixer(x) container_of(x, struct sti_mixer, drm_crtc)
+enum sti_mixer_status {
+ STI_MIXER_READY,
+ STI_MIXER_DISABLING,
+ STI_MIXER_DISABLED,
+};
+
/**
* STI Mixer subdevice structure
*
@@ -23,33 +29,32 @@
* @id: id of the mixer
* @drm_crtc: crtc object link to the mixer
* @pending_event: set if a flip event is pending on crtc
- * @enabled: to know if the mixer is active or not
+ * @status: to know the status of the mixer
*/
struct sti_mixer {
struct device *dev;
void __iomem *regs;
int id;
- struct drm_crtc drm_crtc;
+ struct drm_crtc drm_crtc;
struct drm_pending_vblank_event *pending_event;
- bool enabled;
+ enum sti_mixer_status status;
};
const char *sti_mixer_to_str(struct sti_mixer *mixer);
struct sti_mixer *sti_mixer_create(struct device *dev, int id,
- void __iomem *baseaddr);
+ void __iomem *baseaddr);
-int sti_mixer_set_layer_status(struct sti_mixer *mixer,
- struct sti_layer *layer, bool status);
-void sti_mixer_clear_all_layers(struct sti_mixer *mixer);
-int sti_mixer_set_layer_depth(struct sti_mixer *mixer, struct sti_layer *layer);
+int sti_mixer_set_plane_status(struct sti_mixer *mixer,
+ struct sti_plane *plane, bool status);
+int sti_mixer_set_plane_depth(struct sti_mixer *mixer, struct sti_plane *plane);
int sti_mixer_active_video_area(struct sti_mixer *mixer,
- struct drm_display_mode *mode);
+ struct drm_display_mode *mode);
void sti_mixer_set_background_status(struct sti_mixer *mixer, bool enable);
/* depth in Cross-bar control = z order */
-#define GAM_MIXER_NB_DEPTH_LEVEL 7
+#define GAM_MIXER_NB_DEPTH_LEVEL 6
#define STI_MIXER_MAIN 0
#define STI_MIXER_AUX 1
diff --git a/drivers/gpu/drm/sti/sti_plane.c b/drivers/gpu/drm/sti/sti_plane.c
new file mode 100644
index 000000000000..d5c5e91f2956
--- /dev/null
+++ b/drivers/gpu/drm/sti/sti_plane.c
@@ -0,0 +1,122 @@
+/*
+ * Copyright (C) STMicroelectronics SA 2014
+ * Authors: Benjamin Gaignard <benjamin.gaignard@st.com>
+ * Fabien Dessenne <fabien.dessenne@st.com>
+ * for STMicroelectronics.
+ * License terms: GNU General Public License (GPL), version 2
+ */
+
+#include <drm/drmP.h>
+#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_gem_cma_helper.h>
+
+#include "sti_compositor.h"
+#include "sti_drv.h"
+#include "sti_plane.h"
+
+/* (Background) < GDP0 < GDP1 < HQVDP0 < GDP2 < GDP3 < (ForeGround) */
+enum sti_plane_desc sti_plane_default_zorder[] = {
+ STI_GDP_0,
+ STI_GDP_1,
+ STI_HQVDP_0,
+ STI_GDP_2,
+ STI_GDP_3,
+};
+
+const char *sti_plane_to_str(struct sti_plane *plane)
+{
+ switch (plane->desc) {
+ case STI_GDP_0:
+ return "GDP0";
+ case STI_GDP_1:
+ return "GDP1";
+ case STI_GDP_2:
+ return "GDP2";
+ case STI_GDP_3:
+ return "GDP3";
+ case STI_HQVDP_0:
+ return "HQVDP0";
+ case STI_CURSOR:
+ return "CURSOR";
+ default:
+ return "<UNKNOWN PLANE>";
+ }
+}
+EXPORT_SYMBOL(sti_plane_to_str);
+
+static void sti_plane_destroy(struct drm_plane *drm_plane)
+{
+ DRM_DEBUG_DRIVER("\n");
+
+ drm_plane_helper_disable(drm_plane);
+ drm_plane_cleanup(drm_plane);
+}
+
+static int sti_plane_set_property(struct drm_plane *drm_plane,
+ struct drm_property *property,
+ uint64_t val)
+{
+ struct drm_device *dev = drm_plane->dev;
+ struct sti_private *private = dev->dev_private;
+ struct sti_plane *plane = to_sti_plane(drm_plane);
+
+ DRM_DEBUG_DRIVER("\n");
+
+ if (property == private->plane_zorder_property) {
+ plane->zorder = val;
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static void sti_plane_attach_zorder_property(struct drm_plane *drm_plane)
+{
+ struct drm_device *dev = drm_plane->dev;
+ struct sti_private *private = dev->dev_private;
+ struct sti_plane *plane = to_sti_plane(drm_plane);
+ struct drm_property *prop;
+
+ prop = private->plane_zorder_property;
+ if (!prop) {
+ prop = drm_property_create_range(dev, 0, "zpos", 1,
+ GAM_MIXER_NB_DEPTH_LEVEL);
+ if (!prop)
+ return;
+
+ private->plane_zorder_property = prop;
+ }
+
+ drm_object_attach_property(&drm_plane->base, prop, plane->zorder);
+}
+
+void sti_plane_init_property(struct sti_plane *plane,
+ enum drm_plane_type type)
+{
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(sti_plane_default_zorder); i++)
+ if (sti_plane_default_zorder[i] == plane->desc)
+ break;
+
+ plane->zorder = i + 1;
+
+ if (type == DRM_PLANE_TYPE_OVERLAY)
+ sti_plane_attach_zorder_property(&plane->drm_plane);
+
+ DRM_DEBUG_DRIVER("drm plane:%d mapped to %s with zorder:%d\n",
+ plane->drm_plane.base.id,
+ sti_plane_to_str(plane), plane->zorder);
+}
+EXPORT_SYMBOL(sti_plane_init_property);
+
+struct drm_plane_funcs sti_plane_helpers_funcs = {
+ .update_plane = drm_atomic_helper_update_plane,
+ .disable_plane = drm_atomic_helper_disable_plane,
+ .destroy = sti_plane_destroy,
+ .set_property = sti_plane_set_property,
+ .reset = drm_atomic_helper_plane_reset,
+ .atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_plane_destroy_state,
+};
+EXPORT_SYMBOL(sti_plane_helpers_funcs);
diff --git a/drivers/gpu/drm/sti/sti_plane.h b/drivers/gpu/drm/sti/sti_plane.h
new file mode 100644
index 000000000000..86f1e6fc81b9
--- /dev/null
+++ b/drivers/gpu/drm/sti/sti_plane.h
@@ -0,0 +1,71 @@
+/*
+ * Copyright (C) STMicroelectronics SA 2014
+ * Author: Benjamin Gaignard <benjamin.gaignard@st.com> for STMicroelectronics.
+ * License terms: GNU General Public License (GPL), version 2
+ */
+
+#ifndef _STI_PLANE_H_
+#define _STI_PLANE_H_
+
+#include <drm/drmP.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_plane_helper.h>
+
+extern struct drm_plane_funcs sti_plane_helpers_funcs;
+
+#define to_sti_plane(x) container_of(x, struct sti_plane, drm_plane)
+
+#define STI_PLANE_TYPE_SHIFT 8
+#define STI_PLANE_TYPE_MASK (~((1 << STI_PLANE_TYPE_SHIFT) - 1))
+
+enum sti_plane_type {
+ STI_GDP = 1 << STI_PLANE_TYPE_SHIFT,
+ STI_VDP = 2 << STI_PLANE_TYPE_SHIFT,
+ STI_CUR = 3 << STI_PLANE_TYPE_SHIFT,
+ STI_BCK = 4 << STI_PLANE_TYPE_SHIFT
+};
+
+enum sti_plane_id_of_type {
+ STI_ID_0 = 0,
+ STI_ID_1 = 1,
+ STI_ID_2 = 2,
+ STI_ID_3 = 3
+};
+
+enum sti_plane_desc {
+ STI_GDP_0 = STI_GDP | STI_ID_0,
+ STI_GDP_1 = STI_GDP | STI_ID_1,
+ STI_GDP_2 = STI_GDP | STI_ID_2,
+ STI_GDP_3 = STI_GDP | STI_ID_3,
+ STI_HQVDP_0 = STI_VDP | STI_ID_0,
+ STI_CURSOR = STI_CUR,
+ STI_BACK = STI_BCK
+};
+
+enum sti_plane_status {
+ STI_PLANE_READY,
+ STI_PLANE_UPDATED,
+ STI_PLANE_DISABLING,
+ STI_PLANE_FLUSHING,
+ STI_PLANE_DISABLED,
+};
+
+/**
+ * STI plane structure
+ *
+ * @plane: drm plane it is bound to (if any)
+ * @desc: plane type & id
+ * @status: to know the status of the plane
+ * @zorder: plane z-order
+ */
+struct sti_plane {
+ struct drm_plane drm_plane;
+ enum sti_plane_desc desc;
+ enum sti_plane_status status;
+ int zorder;
+};
+
+const char *sti_plane_to_str(struct sti_plane *plane);
+void sti_plane_init_property(struct sti_plane *plane,
+ enum drm_plane_type type);
+#endif
diff --git a/drivers/gpu/drm/sti/sti_tvout.c b/drivers/gpu/drm/sti/sti_tvout.c
index 5cc53116508e..c1aac8e66fb5 100644
--- a/drivers/gpu/drm/sti/sti_tvout.c
+++ b/drivers/gpu/drm/sti/sti_tvout.c
@@ -16,7 +16,7 @@
#include <drm/drmP.h>
#include <drm/drm_crtc_helper.h>
-#include "sti_drm_crtc.h"
+#include "sti_crtc.h"
/* glue registers */
#define TVO_CSC_MAIN_M0 0x000
@@ -473,7 +473,7 @@ static void sti_dvo_encoder_commit(struct drm_encoder *encoder)
{
struct sti_tvout *tvout = to_sti_tvout(encoder);
- tvout_dvo_start(tvout, sti_drm_crtc_is_main(encoder->crtc));
+ tvout_dvo_start(tvout, sti_crtc_is_main(encoder->crtc));
}
static void sti_dvo_encoder_disable(struct drm_encoder *encoder)
@@ -523,7 +523,7 @@ static void sti_hda_encoder_commit(struct drm_encoder *encoder)
{
struct sti_tvout *tvout = to_sti_tvout(encoder);
- tvout_hda_start(tvout, sti_drm_crtc_is_main(encoder->crtc));
+ tvout_hda_start(tvout, sti_crtc_is_main(encoder->crtc));
}
static void sti_hda_encoder_disable(struct drm_encoder *encoder)
@@ -575,7 +575,7 @@ static void sti_hdmi_encoder_commit(struct drm_encoder *encoder)
{
struct sti_tvout *tvout = to_sti_tvout(encoder);
- tvout_hdmi_start(tvout, sti_drm_crtc_is_main(encoder->crtc));
+ tvout_hdmi_start(tvout, sti_crtc_is_main(encoder->crtc));
}
static void sti_hdmi_encoder_disable(struct drm_encoder *encoder)
@@ -644,7 +644,6 @@ static int sti_tvout_bind(struct device *dev, struct device *master, void *data)
struct sti_tvout *tvout = dev_get_drvdata(dev);
struct drm_device *drm_dev = data;
unsigned int i;
- int ret;
tvout->drm_dev = drm_dev;
@@ -658,17 +657,15 @@ static int sti_tvout_bind(struct device *dev, struct device *master, void *data)
sti_tvout_create_encoders(drm_dev, tvout);
- ret = component_bind_all(dev, drm_dev);
- if (ret)
- sti_tvout_destroy_encoders(tvout);
-
- return ret;
+ return 0;
}
static void sti_tvout_unbind(struct device *dev, struct device *master,
void *data)
{
- /* do nothing */
+ struct sti_tvout *tvout = dev_get_drvdata(dev);
+
+ sti_tvout_destroy_encoders(tvout);
}
static const struct component_ops sti_tvout_ops = {
@@ -676,34 +673,12 @@ static const struct component_ops sti_tvout_ops = {
.unbind = sti_tvout_unbind,
};
-static int compare_of(struct device *dev, void *data)
-{
- return dev->of_node == data;
-}
-
-static int sti_tvout_master_bind(struct device *dev)
-{
- return 0;
-}
-
-static void sti_tvout_master_unbind(struct device *dev)
-{
- /* do nothing */
-}
-
-static const struct component_master_ops sti_tvout_master_ops = {
- .bind = sti_tvout_master_bind,
- .unbind = sti_tvout_master_unbind,
-};
-
static int sti_tvout_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct device_node *node = dev->of_node;
struct sti_tvout *tvout;
struct resource *res;
- struct device_node *child_np;
- struct component_match *match = NULL;
DRM_INFO("%s\n", __func__);
@@ -734,24 +709,11 @@ static int sti_tvout_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, tvout);
- of_platform_populate(node, NULL, NULL, dev);
-
- child_np = of_get_next_available_child(node, NULL);
-
- while (child_np) {
- component_match_add(dev, &match, compare_of, child_np);
- of_node_put(child_np);
- child_np = of_get_next_available_child(node, child_np);
- }
-
- component_master_add_with_match(dev, &sti_tvout_master_ops, match);
-
return component_add(dev, &sti_tvout_ops);
}
static int sti_tvout_remove(struct platform_device *pdev)
{
- component_master_del(&pdev->dev, &sti_tvout_master_ops);
component_del(&pdev->dev, &sti_tvout_ops);
return 0;
}
diff --git a/drivers/gpu/drm/sti/sti_vid.c b/drivers/gpu/drm/sti/sti_vid.c
index 10ced6a479f4..a8254cc362a1 100644
--- a/drivers/gpu/drm/sti/sti_vid.c
+++ b/drivers/gpu/drm/sti/sti_vid.c
@@ -6,7 +6,7 @@
#include <drm/drmP.h>
-#include "sti_layer.h"
+#include "sti_plane.h"
#include "sti_vid.h"
#include "sti_vtg.h"
@@ -43,35 +43,37 @@
#define VID_MPR2_BT709 0x07150545
#define VID_MPR3_BT709 0x00000AE8
-static int sti_vid_prepare_layer(struct sti_layer *vid, bool first_prepare)
+void sti_vid_commit(struct sti_vid *vid,
+ struct drm_plane_state *state)
{
- u32 val;
+ struct drm_crtc *crtc = state->crtc;
+ struct drm_display_mode *mode = &crtc->mode;
+ int dst_x = state->crtc_x;
+ int dst_y = state->crtc_y;
+ int dst_w = clamp_val(state->crtc_w, 0, mode->crtc_hdisplay - dst_x);
+ int dst_h = clamp_val(state->crtc_h, 0, mode->crtc_vdisplay - dst_y);
+ u32 val, ydo, xdo, yds, xds;
+
+ /* Input / output size
+ * Align to upper even value */
+ dst_w = ALIGN(dst_w, 2);
+ dst_h = ALIGN(dst_h, 2);
/* Unmask */
val = readl(vid->regs + VID_CTL);
val &= ~VID_CTL_IGNORE;
writel(val, vid->regs + VID_CTL);
- return 0;
-}
-
-static int sti_vid_commit_layer(struct sti_layer *vid)
-{
- struct drm_display_mode *mode = vid->mode;
- u32 ydo, xdo, yds, xds;
-
- ydo = sti_vtg_get_line_number(*mode, vid->dst_y);
- yds = sti_vtg_get_line_number(*mode, vid->dst_y + vid->dst_h - 1);
- xdo = sti_vtg_get_pixel_number(*mode, vid->dst_x);
- xds = sti_vtg_get_pixel_number(*mode, vid->dst_x + vid->dst_w - 1);
+ ydo = sti_vtg_get_line_number(*mode, dst_y);
+ yds = sti_vtg_get_line_number(*mode, dst_y + dst_h - 1);
+ xdo = sti_vtg_get_pixel_number(*mode, dst_x);
+ xds = sti_vtg_get_pixel_number(*mode, dst_x + dst_w - 1);
writel((ydo << 16) | xdo, vid->regs + VID_VPO);
writel((yds << 16) | xds, vid->regs + VID_VPS);
-
- return 0;
}
-static int sti_vid_disable_layer(struct sti_layer *vid)
+void sti_vid_disable(struct sti_vid *vid)
{
u32 val;
@@ -79,21 +81,9 @@ static int sti_vid_disable_layer(struct sti_layer *vid)
val = readl(vid->regs + VID_CTL);
val |= VID_CTL_IGNORE;
writel(val, vid->regs + VID_CTL);
-
- return 0;
}
-static const uint32_t *sti_vid_get_formats(struct sti_layer *layer)
-{
- return NULL;
-}
-
-static unsigned int sti_vid_get_nb_formats(struct sti_layer *layer)
-{
- return 0;
-}
-
-static void sti_vid_init(struct sti_layer *vid)
+static void sti_vid_init(struct sti_vid *vid)
{
/* Enable PSI, Mask layer */
writel(VID_CTL_PSI_ENABLE | VID_CTL_IGNORE, vid->regs + VID_CTL);
@@ -113,18 +103,10 @@ static void sti_vid_init(struct sti_layer *vid)
writel(VID_CSAT_DFLT, vid->regs + VID_CSAT);
}
-static const struct sti_layer_funcs vid_ops = {
- .get_formats = sti_vid_get_formats,
- .get_nb_formats = sti_vid_get_nb_formats,
- .init = sti_vid_init,
- .prepare = sti_vid_prepare_layer,
- .commit = sti_vid_commit_layer,
- .disable = sti_vid_disable_layer,
-};
-
-struct sti_layer *sti_vid_create(struct device *dev)
+struct sti_vid *sti_vid_create(struct device *dev, int id,
+ void __iomem *baseaddr)
{
- struct sti_layer *vid;
+ struct sti_vid *vid;
vid = devm_kzalloc(dev, sizeof(*vid), GFP_KERNEL);
if (!vid) {
@@ -132,7 +114,11 @@ struct sti_layer *sti_vid_create(struct device *dev)
return NULL;
}
- vid->ops = &vid_ops;
+ vid->dev = dev;
+ vid->regs = baseaddr;
+ vid->id = id;
+
+ sti_vid_init(vid);
return vid;
}
diff --git a/drivers/gpu/drm/sti/sti_vid.h b/drivers/gpu/drm/sti/sti_vid.h
index 2c0aecd63294..5dea4791f1d6 100644
--- a/drivers/gpu/drm/sti/sti_vid.h
+++ b/drivers/gpu/drm/sti/sti_vid.h
@@ -7,6 +7,23 @@
#ifndef _STI_VID_H_
#define _STI_VID_H_
-struct sti_layer *sti_vid_create(struct device *dev);
+/**
+ * STI VID structure
+ *
+ * @dev: driver device
+ * @regs: vid registers
+ * @id: id of the vid
+ */
+struct sti_vid {
+ struct device *dev;
+ void __iomem *regs;
+ int id;
+};
+
+void sti_vid_commit(struct sti_vid *vid,
+ struct drm_plane_state *state);
+void sti_vid_disable(struct sti_vid *vid);
+struct sti_vid *sti_vid_create(struct device *dev, int id,
+ void __iomem *baseaddr);
#endif
diff --git a/drivers/gpu/drm/tegra/dc.c b/drivers/gpu/drm/tegra/dc.c
index a287e4fec865..ddefb85dc4f7 100644
--- a/drivers/gpu/drm/tegra/dc.c
+++ b/drivers/gpu/drm/tegra/dc.c
@@ -76,6 +76,14 @@ to_tegra_plane_state(struct drm_plane_state *state)
return NULL;
}
+static void tegra_dc_stats_reset(struct tegra_dc_stats *stats)
+{
+ stats->frames = 0;
+ stats->vblank = 0;
+ stats->underflow = 0;
+ stats->overflow = 0;
+}
+
/*
* Reads the active copy of a register. This takes the dc->lock spinlock to
* prevent races with the VBLANK processing which also needs access to the
@@ -759,7 +767,6 @@ static void tegra_cursor_atomic_update(struct drm_plane *plane,
/* position the cursor */
value = (state->crtc_y & 0x3fff) << 16 | (state->crtc_x & 0x3fff);
tegra_dc_writel(dc, value, DC_DISP_CURSOR_POSITION);
-
}
static void tegra_cursor_atomic_disable(struct drm_plane *plane,
@@ -809,9 +816,11 @@ static struct drm_plane *tegra_dc_cursor_plane_create(struct drm_device *drm,
return ERR_PTR(-ENOMEM);
/*
- * We'll treat the cursor as an overlay plane with index 6 here so
- * that the update and activation request bits in DC_CMD_STATE_CONTROL
- * match up.
+ * This index is kind of fake. The cursor isn't a regular plane, but
+ * its update and activation request bits in DC_CMD_STATE_CONTROL do
+ * use the same programming. Setting this fake index here allows the
+ * code in tegra_add_plane_state() to do the right thing without the
+ * need to special-casing the cursor plane.
*/
plane->index = 6;
@@ -1015,6 +1024,8 @@ static void tegra_crtc_reset(struct drm_crtc *crtc)
crtc->state = &state->base;
crtc->state->crtc = crtc;
}
+
+ drm_crtc_vblank_reset(crtc);
}
static struct drm_crtc_state *
@@ -1052,90 +1063,6 @@ static const struct drm_crtc_funcs tegra_crtc_funcs = {
.atomic_destroy_state = tegra_crtc_atomic_destroy_state,
};
-static void tegra_dc_stop(struct tegra_dc *dc)
-{
- u32 value;
-
- /* stop the display controller */
- value = tegra_dc_readl(dc, DC_CMD_DISPLAY_COMMAND);
- value &= ~DISP_CTRL_MODE_MASK;
- tegra_dc_writel(dc, value, DC_CMD_DISPLAY_COMMAND);
-
- tegra_dc_commit(dc);
-}
-
-static bool tegra_dc_idle(struct tegra_dc *dc)
-{
- u32 value;
-
- value = tegra_dc_readl_active(dc, DC_CMD_DISPLAY_COMMAND);
-
- return (value & DISP_CTRL_MODE_MASK) == 0;
-}
-
-static int tegra_dc_wait_idle(struct tegra_dc *dc, unsigned long timeout)
-{
- timeout = jiffies + msecs_to_jiffies(timeout);
-
- while (time_before(jiffies, timeout)) {
- if (tegra_dc_idle(dc))
- return 0;
-
- usleep_range(1000, 2000);
- }
-
- dev_dbg(dc->dev, "timeout waiting for DC to become idle\n");
- return -ETIMEDOUT;
-}
-
-static void tegra_crtc_disable(struct drm_crtc *crtc)
-{
- struct tegra_dc *dc = to_tegra_dc(crtc);
- u32 value;
-
- if (!tegra_dc_idle(dc)) {
- tegra_dc_stop(dc);
-
- /*
- * Ignore the return value, there isn't anything useful to do
- * in case this fails.
- */
- tegra_dc_wait_idle(dc, 100);
- }
-
- /*
- * This should really be part of the RGB encoder driver, but clearing
- * these bits has the side-effect of stopping the display controller.
- * When that happens no VBLANK interrupts will be raised. At the same
- * time the encoder is disabled before the display controller, so the
- * above code is always going to timeout waiting for the controller
- * to go idle.
- *
- * Given the close coupling between the RGB encoder and the display
- * controller doing it here is still kind of okay. None of the other
- * encoder drivers require these bits to be cleared.
- *
- * XXX: Perhaps given that the display controller is switched off at
- * this point anyway maybe clearing these bits isn't even useful for
- * the RGB encoder?
- */
- if (dc->rgb) {
- value = tegra_dc_readl(dc, DC_CMD_DISPLAY_POWER_CONTROL);
- value &= ~(PW0_ENABLE | PW1_ENABLE | PW2_ENABLE | PW3_ENABLE |
- PW4_ENABLE | PM0_ENABLE | PM1_ENABLE);
- tegra_dc_writel(dc, value, DC_CMD_DISPLAY_POWER_CONTROL);
- }
-
- drm_crtc_vblank_off(crtc);
-}
-
-static bool tegra_crtc_mode_fixup(struct drm_crtc *crtc,
- const struct drm_display_mode *mode,
- struct drm_display_mode *adjusted)
-{
- return true;
-}
-
static int tegra_dc_set_timings(struct tegra_dc *dc,
struct drm_display_mode *mode)
{
@@ -1229,7 +1156,85 @@ static void tegra_dc_commit_state(struct tegra_dc *dc,
tegra_dc_writel(dc, value, DC_DISP_DISP_CLOCK_CONTROL);
}
-static void tegra_crtc_mode_set_nofb(struct drm_crtc *crtc)
+static void tegra_dc_stop(struct tegra_dc *dc)
+{
+ u32 value;
+
+ /* stop the display controller */
+ value = tegra_dc_readl(dc, DC_CMD_DISPLAY_COMMAND);
+ value &= ~DISP_CTRL_MODE_MASK;
+ tegra_dc_writel(dc, value, DC_CMD_DISPLAY_COMMAND);
+
+ tegra_dc_commit(dc);
+}
+
+static bool tegra_dc_idle(struct tegra_dc *dc)
+{
+ u32 value;
+
+ value = tegra_dc_readl_active(dc, DC_CMD_DISPLAY_COMMAND);
+
+ return (value & DISP_CTRL_MODE_MASK) == 0;
+}
+
+static int tegra_dc_wait_idle(struct tegra_dc *dc, unsigned long timeout)
+{
+ timeout = jiffies + msecs_to_jiffies(timeout);
+
+ while (time_before(jiffies, timeout)) {
+ if (tegra_dc_idle(dc))
+ return 0;
+
+ usleep_range(1000, 2000);
+ }
+
+ dev_dbg(dc->dev, "timeout waiting for DC to become idle\n");
+ return -ETIMEDOUT;
+}
+
+static void tegra_crtc_disable(struct drm_crtc *crtc)
+{
+ struct tegra_dc *dc = to_tegra_dc(crtc);
+ u32 value;
+
+ if (!tegra_dc_idle(dc)) {
+ tegra_dc_stop(dc);
+
+ /*
+ * Ignore the return value, there isn't anything useful to do
+ * in case this fails.
+ */
+ tegra_dc_wait_idle(dc, 100);
+ }
+
+ /*
+ * This should really be part of the RGB encoder driver, but clearing
+ * these bits has the side-effect of stopping the display controller.
+ * When that happens no VBLANK interrupts will be raised. At the same
+ * time the encoder is disabled before the display controller, so the
+ * above code is always going to timeout waiting for the controller
+ * to go idle.
+ *
+ * Given the close coupling between the RGB encoder and the display
+ * controller doing it here is still kind of okay. None of the other
+ * encoder drivers require these bits to be cleared.
+ *
+ * XXX: Perhaps given that the display controller is switched off at
+ * this point anyway maybe clearing these bits isn't even useful for
+ * the RGB encoder?
+ */
+ if (dc->rgb) {
+ value = tegra_dc_readl(dc, DC_CMD_DISPLAY_POWER_CONTROL);
+ value &= ~(PW0_ENABLE | PW1_ENABLE | PW2_ENABLE | PW3_ENABLE |
+ PW4_ENABLE | PM0_ENABLE | PM1_ENABLE);
+ tegra_dc_writel(dc, value, DC_CMD_DISPLAY_POWER_CONTROL);
+ }
+
+ tegra_dc_stats_reset(&dc->stats);
+ drm_crtc_vblank_off(crtc);
+}
+
+static void tegra_crtc_enable(struct drm_crtc *crtc)
{
struct drm_display_mode *mode = &crtc->state->adjusted_mode;
struct tegra_dc_state *state = to_dc_state(crtc->state);
@@ -1259,15 +1264,7 @@ static void tegra_crtc_mode_set_nofb(struct drm_crtc *crtc)
tegra_dc_writel(dc, value, DC_CMD_DISPLAY_POWER_CONTROL);
tegra_dc_commit(dc);
-}
-
-static void tegra_crtc_prepare(struct drm_crtc *crtc)
-{
- drm_crtc_vblank_off(crtc);
-}
-static void tegra_crtc_commit(struct drm_crtc *crtc)
-{
drm_crtc_vblank_on(crtc);
}
@@ -1277,7 +1274,8 @@ static int tegra_crtc_atomic_check(struct drm_crtc *crtc,
return 0;
}
-static void tegra_crtc_atomic_begin(struct drm_crtc *crtc)
+static void tegra_crtc_atomic_begin(struct drm_crtc *crtc,
+ struct drm_crtc_state *old_crtc_state)
{
struct tegra_dc *dc = to_tegra_dc(crtc);
@@ -1291,7 +1289,8 @@ static void tegra_crtc_atomic_begin(struct drm_crtc *crtc)
}
}
-static void tegra_crtc_atomic_flush(struct drm_crtc *crtc)
+static void tegra_crtc_atomic_flush(struct drm_crtc *crtc,
+ struct drm_crtc_state *old_crtc_state)
{
struct tegra_dc_state *state = to_dc_state(crtc->state);
struct tegra_dc *dc = to_tegra_dc(crtc);
@@ -1302,10 +1301,7 @@ static void tegra_crtc_atomic_flush(struct drm_crtc *crtc)
static const struct drm_crtc_helper_funcs tegra_crtc_helper_funcs = {
.disable = tegra_crtc_disable,
- .mode_fixup = tegra_crtc_mode_fixup,
- .mode_set_nofb = tegra_crtc_mode_set_nofb,
- .prepare = tegra_crtc_prepare,
- .commit = tegra_crtc_commit,
+ .enable = tegra_crtc_enable,
.atomic_check = tegra_crtc_atomic_check,
.atomic_begin = tegra_crtc_atomic_begin,
.atomic_flush = tegra_crtc_atomic_flush,
@@ -1323,6 +1319,7 @@ static irqreturn_t tegra_dc_irq(int irq, void *data)
/*
dev_dbg(dc->dev, "%s(): frame end\n", __func__);
*/
+ dc->stats.frames++;
}
if (status & VBLANK_INT) {
@@ -1331,12 +1328,21 @@ static irqreturn_t tegra_dc_irq(int irq, void *data)
*/
drm_crtc_handle_vblank(&dc->base);
tegra_dc_finish_page_flip(dc);
+ dc->stats.vblank++;
}
if (status & (WIN_A_UF_INT | WIN_B_UF_INT | WIN_C_UF_INT)) {
/*
dev_dbg(dc->dev, "%s(): underflow\n", __func__);
*/
+ dc->stats.underflow++;
+ }
+
+ if (status & (WIN_A_OF_INT | WIN_B_OF_INT | WIN_C_OF_INT)) {
+ /*
+ dev_dbg(dc->dev, "%s(): overflow\n", __func__);
+ */
+ dc->stats.overflow++;
}
return IRQ_HANDLED;
@@ -1346,6 +1352,14 @@ static int tegra_dc_show_regs(struct seq_file *s, void *data)
{
struct drm_info_node *node = s->private;
struct tegra_dc *dc = node->info_ent->data;
+ int err = 0;
+
+ drm_modeset_lock_crtc(&dc->base, NULL);
+
+ if (!dc->base.state->active) {
+ err = -EBUSY;
+ goto unlock;
+ }
#define DUMP_REG(name) \
seq_printf(s, "%-40s %#05x %08x\n", #name, name, \
@@ -1566,11 +1580,59 @@ static int tegra_dc_show_regs(struct seq_file *s, void *data)
#undef DUMP_REG
+unlock:
+ drm_modeset_unlock_crtc(&dc->base);
+ return err;
+}
+
+static int tegra_dc_show_crc(struct seq_file *s, void *data)
+{
+ struct drm_info_node *node = s->private;
+ struct tegra_dc *dc = node->info_ent->data;
+ int err = 0;
+ u32 value;
+
+ drm_modeset_lock_crtc(&dc->base, NULL);
+
+ if (!dc->base.state->active) {
+ err = -EBUSY;
+ goto unlock;
+ }
+
+ value = DC_COM_CRC_CONTROL_ACTIVE_DATA | DC_COM_CRC_CONTROL_ENABLE;
+ tegra_dc_writel(dc, value, DC_COM_CRC_CONTROL);
+ tegra_dc_commit(dc);
+
+ drm_crtc_wait_one_vblank(&dc->base);
+ drm_crtc_wait_one_vblank(&dc->base);
+
+ value = tegra_dc_readl(dc, DC_COM_CRC_CHECKSUM);
+ seq_printf(s, "%08x\n", value);
+
+ tegra_dc_writel(dc, 0, DC_COM_CRC_CONTROL);
+
+unlock:
+ drm_modeset_unlock_crtc(&dc->base);
+ return err;
+}
+
+static int tegra_dc_show_stats(struct seq_file *s, void *data)
+{
+ struct drm_info_node *node = s->private;
+ struct tegra_dc *dc = node->info_ent->data;
+
+ seq_printf(s, "frames: %lu\n", dc->stats.frames);
+ seq_printf(s, "vblank: %lu\n", dc->stats.vblank);
+ seq_printf(s, "underflow: %lu\n", dc->stats.underflow);
+ seq_printf(s, "overflow: %lu\n", dc->stats.overflow);
+
return 0;
}
static struct drm_info_list debugfs_files[] = {
{ "regs", tegra_dc_show_regs, 0, NULL },
+ { "crc", tegra_dc_show_crc, 0, NULL },
+ { "stats", tegra_dc_show_stats, 0, NULL },
};
static int tegra_dc_debugfs_init(struct tegra_dc *dc, struct drm_minor *minor)
@@ -1716,7 +1778,8 @@ static int tegra_dc_init(struct host1x_client *client)
tegra_dc_writel(dc, value, DC_CMD_CONT_SYNCPT_VSYNC);
}
- value = WIN_A_UF_INT | WIN_B_UF_INT | WIN_C_UF_INT | WIN_A_OF_INT;
+ value = WIN_A_UF_INT | WIN_B_UF_INT | WIN_C_UF_INT |
+ WIN_A_OF_INT | WIN_B_OF_INT | WIN_C_OF_INT;
tegra_dc_writel(dc, value, DC_CMD_INT_TYPE);
value = WIN_A_UF_INT | WIN_B_UF_INT | WIN_C_UF_INT |
@@ -1732,15 +1795,19 @@ static int tegra_dc_init(struct host1x_client *client)
WINDOW_B_THRESHOLD(1) | WINDOW_C_THRESHOLD(1);
tegra_dc_writel(dc, value, DC_DISP_DISP_MEM_HIGH_PRIORITY_TIMER);
- value = VBLANK_INT | WIN_A_UF_INT | WIN_B_UF_INT | WIN_C_UF_INT;
+ value = VBLANK_INT | WIN_A_UF_INT | WIN_B_UF_INT | WIN_C_UF_INT |
+ WIN_A_OF_INT | WIN_B_OF_INT | WIN_C_OF_INT;
tegra_dc_writel(dc, value, DC_CMD_INT_ENABLE);
- value = WIN_A_UF_INT | WIN_B_UF_INT | WIN_C_UF_INT;
+ value = WIN_A_UF_INT | WIN_B_UF_INT | WIN_C_UF_INT |
+ WIN_A_OF_INT | WIN_B_OF_INT | WIN_C_OF_INT;
tegra_dc_writel(dc, value, DC_CMD_INT_MASK);
if (dc->soc->supports_border_color)
tegra_dc_writel(dc, 0, DC_DISP_BORDER_COLOR);
+ tegra_dc_stats_reset(&dc->stats);
+
return 0;
cleanup:
@@ -1826,8 +1893,20 @@ static const struct tegra_dc_soc_info tegra124_dc_soc_info = {
.has_powergate = true,
};
+static const struct tegra_dc_soc_info tegra210_dc_soc_info = {
+ .supports_border_color = false,
+ .supports_interlacing = true,
+ .supports_cursor = true,
+ .supports_block_linear = true,
+ .pitch_align = 64,
+ .has_powergate = true,
+};
+
static const struct of_device_id tegra_dc_of_match[] = {
{
+ .compatible = "nvidia,tegra210-dc",
+ .data = &tegra210_dc_soc_info,
+ }, {
.compatible = "nvidia,tegra124-dc",
.data = &tegra124_dc_soc_info,
}, {
@@ -1957,6 +2036,10 @@ static int tegra_dc_probe(struct platform_device *pdev)
return -ENXIO;
}
+ dc->syncpt = host1x_syncpt_request(&pdev->dev, flags);
+ if (!dc->syncpt)
+ dev_warn(&pdev->dev, "failed to allocate syncpoint\n");
+
INIT_LIST_HEAD(&dc->client.list);
dc->client.ops = &dc_client_ops;
dc->client.dev = &pdev->dev;
@@ -1974,10 +2057,6 @@ static int tegra_dc_probe(struct platform_device *pdev)
return err;
}
- dc->syncpt = host1x_syncpt_request(&pdev->dev, flags);
- if (!dc->syncpt)
- dev_warn(&pdev->dev, "failed to allocate syncpoint\n");
-
platform_set_drvdata(pdev, dc);
return 0;
@@ -2016,7 +2095,6 @@ static int tegra_dc_remove(struct platform_device *pdev)
struct platform_driver tegra_dc_driver = {
.driver = {
.name = "tegra-dc",
- .owner = THIS_MODULE,
.of_match_table = tegra_dc_of_match,
},
.probe = tegra_dc_probe,
diff --git a/drivers/gpu/drm/tegra/dc.h b/drivers/gpu/drm/tegra/dc.h
index 55792daabbb5..4a268635749b 100644
--- a/drivers/gpu/drm/tegra/dc.h
+++ b/drivers/gpu/drm/tegra/dc.h
@@ -86,6 +86,11 @@
#define DC_CMD_REG_ACT_CONTROL 0x043
#define DC_COM_CRC_CONTROL 0x300
+#define DC_COM_CRC_CONTROL_ALWAYS (1 << 3)
+#define DC_COM_CRC_CONTROL_FULL_FRAME (0 << 2)
+#define DC_COM_CRC_CONTROL_ACTIVE_DATA (1 << 2)
+#define DC_COM_CRC_CONTROL_WAIT (1 << 1)
+#define DC_COM_CRC_CONTROL_ENABLE (1 << 0)
#define DC_COM_CRC_CHECKSUM 0x301
#define DC_COM_PIN_OUTPUT_ENABLE(x) (0x302 + (x))
#define DC_COM_PIN_OUTPUT_POLARITY(x) (0x306 + (x))
@@ -114,15 +119,17 @@
#define DC_COM_CRC_CHECKSUM_LATCHED 0x329
#define DC_DISP_DISP_SIGNAL_OPTIONS0 0x400
-#define H_PULSE_0_ENABLE (1 << 8)
-#define H_PULSE_1_ENABLE (1 << 10)
-#define H_PULSE_2_ENABLE (1 << 12)
+#define H_PULSE0_ENABLE (1 << 8)
+#define H_PULSE1_ENABLE (1 << 10)
+#define H_PULSE2_ENABLE (1 << 12)
#define DC_DISP_DISP_SIGNAL_OPTIONS1 0x401
#define DC_DISP_DISP_WIN_OPTIONS 0x402
#define HDMI_ENABLE (1 << 30)
#define DSI_ENABLE (1 << 29)
+#define SOR1_TIMING_CYA (1 << 27)
+#define SOR1_ENABLE (1 << 26)
#define SOR_ENABLE (1 << 25)
#define CURSOR_ENABLE (1 << 16)
@@ -242,9 +249,20 @@
#define BASE_COLOR_SIZE565 (6 << 0)
#define BASE_COLOR_SIZE332 (7 << 0)
#define BASE_COLOR_SIZE888 (8 << 0)
+#define DITHER_CONTROL_MASK (3 << 8)
#define DITHER_CONTROL_DISABLE (0 << 8)
#define DITHER_CONTROL_ORDERED (2 << 8)
#define DITHER_CONTROL_ERRDIFF (3 << 8)
+#define BASE_COLOR_SIZE_MASK (0xf << 0)
+#define BASE_COLOR_SIZE_666 (0 << 0)
+#define BASE_COLOR_SIZE_111 (1 << 0)
+#define BASE_COLOR_SIZE_222 (2 << 0)
+#define BASE_COLOR_SIZE_333 (3 << 0)
+#define BASE_COLOR_SIZE_444 (4 << 0)
+#define BASE_COLOR_SIZE_555 (5 << 0)
+#define BASE_COLOR_SIZE_565 (6 << 0)
+#define BASE_COLOR_SIZE_332 (7 << 0)
+#define BASE_COLOR_SIZE_888 (8 << 0)
#define DC_DISP_SHIFT_CLOCK_OPTIONS 0x431
#define SC1_H_QUALIFIER_NONE (1 << 16)
diff --git a/drivers/gpu/drm/tegra/dpaux.c b/drivers/gpu/drm/tegra/dpaux.c
index 07b26972f487..224a7dc8e4ed 100644
--- a/drivers/gpu/drm/tegra/dpaux.c
+++ b/drivers/gpu/drm/tegra/dpaux.c
@@ -294,26 +294,41 @@ static int tegra_dpaux_probe(struct platform_device *pdev)
}
dpaux->rst = devm_reset_control_get(&pdev->dev, "dpaux");
- if (IS_ERR(dpaux->rst))
+ if (IS_ERR(dpaux->rst)) {
+ dev_err(&pdev->dev, "failed to get reset control: %ld\n",
+ PTR_ERR(dpaux->rst));
return PTR_ERR(dpaux->rst);
+ }
dpaux->clk = devm_clk_get(&pdev->dev, NULL);
- if (IS_ERR(dpaux->clk))
+ if (IS_ERR(dpaux->clk)) {
+ dev_err(&pdev->dev, "failed to get module clock: %ld\n",
+ PTR_ERR(dpaux->clk));
return PTR_ERR(dpaux->clk);
+ }
err = clk_prepare_enable(dpaux->clk);
- if (err < 0)
+ if (err < 0) {
+ dev_err(&pdev->dev, "failed to enable module clock: %d\n",
+ err);
return err;
+ }
reset_control_deassert(dpaux->rst);
dpaux->clk_parent = devm_clk_get(&pdev->dev, "parent");
- if (IS_ERR(dpaux->clk_parent))
+ if (IS_ERR(dpaux->clk_parent)) {
+ dev_err(&pdev->dev, "failed to get parent clock: %ld\n",
+ PTR_ERR(dpaux->clk_parent));
return PTR_ERR(dpaux->clk_parent);
+ }
err = clk_prepare_enable(dpaux->clk_parent);
- if (err < 0)
+ if (err < 0) {
+ dev_err(&pdev->dev, "failed to enable parent clock: %d\n",
+ err);
return err;
+ }
err = clk_set_rate(dpaux->clk_parent, 270000000);
if (err < 0) {
@@ -323,8 +338,11 @@ static int tegra_dpaux_probe(struct platform_device *pdev)
}
dpaux->vdd = devm_regulator_get(&pdev->dev, "vdd");
- if (IS_ERR(dpaux->vdd))
+ if (IS_ERR(dpaux->vdd)) {
+ dev_err(&pdev->dev, "failed to get VDD supply: %ld\n",
+ PTR_ERR(dpaux->vdd));
return PTR_ERR(dpaux->vdd);
+ }
err = devm_request_irq(dpaux->dev, dpaux->irq, tegra_dpaux_irq, 0,
dev_name(dpaux->dev), dpaux);
@@ -334,6 +352,8 @@ static int tegra_dpaux_probe(struct platform_device *pdev)
return err;
}
+ disable_irq(dpaux->irq);
+
dpaux->aux.transfer = tegra_dpaux_transfer;
dpaux->aux.dev = &pdev->dev;
@@ -341,6 +361,24 @@ static int tegra_dpaux_probe(struct platform_device *pdev)
if (err < 0)
return err;
+ /*
+ * Assume that by default the DPAUX/I2C pads will be used for HDMI,
+ * so power them up and configure them in I2C mode.
+ *
+ * The DPAUX code paths reconfigure the pads in AUX mode, but there
+ * is no possibility to perform the I2C mode configuration in the
+ * HDMI path.
+ */
+ value = tegra_dpaux_readl(dpaux, DPAUX_HYBRID_SPARE);
+ value &= ~DPAUX_HYBRID_SPARE_PAD_POWER_DOWN;
+ tegra_dpaux_writel(dpaux, value, DPAUX_HYBRID_SPARE);
+
+ value = tegra_dpaux_readl(dpaux, DPAUX_HYBRID_PADCTL);
+ value = DPAUX_HYBRID_PADCTL_I2C_SDA_INPUT_RCV |
+ DPAUX_HYBRID_PADCTL_I2C_SCL_INPUT_RCV |
+ DPAUX_HYBRID_PADCTL_MODE_I2C;
+ tegra_dpaux_writel(dpaux, value, DPAUX_HYBRID_PADCTL);
+
/* enable and clear all interrupts */
value = DPAUX_INTR_AUX_DONE | DPAUX_INTR_IRQ_EVENT |
DPAUX_INTR_UNPLUG_EVENT | DPAUX_INTR_PLUG_EVENT;
@@ -359,6 +397,12 @@ static int tegra_dpaux_probe(struct platform_device *pdev)
static int tegra_dpaux_remove(struct platform_device *pdev)
{
struct tegra_dpaux *dpaux = platform_get_drvdata(pdev);
+ u32 value;
+
+ /* make sure pads are powered down when not in use */
+ value = tegra_dpaux_readl(dpaux, DPAUX_HYBRID_SPARE);
+ value |= DPAUX_HYBRID_SPARE_PAD_POWER_DOWN;
+ tegra_dpaux_writel(dpaux, value, DPAUX_HYBRID_SPARE);
drm_dp_aux_unregister(&dpaux->aux);
@@ -376,6 +420,7 @@ static int tegra_dpaux_remove(struct platform_device *pdev)
}
static const struct of_device_id tegra_dpaux_of_match[] = {
+ { .compatible = "nvidia,tegra210-dpaux", },
{ .compatible = "nvidia,tegra124-dpaux", },
{ },
};
@@ -425,8 +470,10 @@ int tegra_dpaux_attach(struct tegra_dpaux *dpaux, struct tegra_output *output)
enum drm_connector_status status;
status = tegra_dpaux_detect(dpaux);
- if (status == connector_status_connected)
+ if (status == connector_status_connected) {
+ enable_irq(dpaux->irq);
return 0;
+ }
usleep_range(1000, 2000);
}
@@ -439,6 +486,8 @@ int tegra_dpaux_detach(struct tegra_dpaux *dpaux)
unsigned long timeout;
int err;
+ disable_irq(dpaux->irq);
+
err = regulator_disable(dpaux->vdd);
if (err < 0)
return err;
diff --git a/drivers/gpu/drm/tegra/dpaux.h b/drivers/gpu/drm/tegra/dpaux.h
index 806e245ca787..20783d9f4728 100644
--- a/drivers/gpu/drm/tegra/dpaux.h
+++ b/drivers/gpu/drm/tegra/dpaux.h
@@ -57,6 +57,8 @@
#define DPAUX_DP_AUX_CONFIG 0x45
#define DPAUX_HYBRID_PADCTL 0x49
+#define DPAUX_HYBRID_PADCTL_I2C_SDA_INPUT_RCV (1 << 15)
+#define DPAUX_HYBRID_PADCTL_I2C_SCL_INPUT_RCV (1 << 14)
#define DPAUX_HYBRID_PADCTL_AUX_CMH(x) (((x) & 0x3) << 12)
#define DPAUX_HYBRID_PADCTL_AUX_DRVZ(x) (((x) & 0x7) << 8)
#define DPAUX_HYBRID_PADCTL_AUX_DRVI(x) (((x) & 0x3f) << 2)
diff --git a/drivers/gpu/drm/tegra/drm.c b/drivers/gpu/drm/tegra/drm.c
index 427f50c6803c..6d88cf1fcd1c 100644
--- a/drivers/gpu/drm/tegra/drm.c
+++ b/drivers/gpu/drm/tegra/drm.c
@@ -171,8 +171,6 @@ static int tegra_drm_load(struct drm_device *drm, unsigned long flags)
if (err < 0)
goto fbdev;
- drm_mode_config_reset(drm);
-
/*
* We don't use the drm_irq_install() helpers provided by the DRM
* core, so we need to set this manually in order to allow the
@@ -182,11 +180,14 @@ static int tegra_drm_load(struct drm_device *drm, unsigned long flags)
/* syncpoints are used for full 32-bit hardware VBLANK counters */
drm->max_vblank_count = 0xffffffff;
+ drm->vblank_disable_allowed = true;
err = drm_vblank_init(drm, drm->mode_config.num_crtc);
if (err < 0)
goto device;
+ drm_mode_config_reset(drm);
+
err = tegra_drm_fb_init(drm);
if (err < 0)
goto vblank;
@@ -1037,9 +1038,8 @@ static int host1x_drm_resume(struct device *dev)
}
#endif
-static const struct dev_pm_ops host1x_drm_pm_ops = {
- SET_SYSTEM_SLEEP_PM_OPS(host1x_drm_suspend, host1x_drm_resume)
-};
+static SIMPLE_DEV_PM_OPS(host1x_drm_pm_ops, host1x_drm_suspend,
+ host1x_drm_resume);
static const struct of_device_id host1x_drm_subdevs[] = {
{ .compatible = "nvidia,tegra20-dc", },
@@ -1056,6 +1056,12 @@ static const struct of_device_id host1x_drm_subdevs[] = {
{ .compatible = "nvidia,tegra124-dc", },
{ .compatible = "nvidia,tegra124-sor", },
{ .compatible = "nvidia,tegra124-hdmi", },
+ { .compatible = "nvidia,tegra124-dsi", },
+ { .compatible = "nvidia,tegra132-dsi", },
+ { .compatible = "nvidia,tegra210-dc", },
+ { .compatible = "nvidia,tegra210-dsi", },
+ { .compatible = "nvidia,tegra210-sor", },
+ { .compatible = "nvidia,tegra210-sor1", },
{ /* sentinel */ }
};
diff --git a/drivers/gpu/drm/tegra/drm.h b/drivers/gpu/drm/tegra/drm.h
index 659b2fcc986d..ec49275ffb24 100644
--- a/drivers/gpu/drm/tegra/drm.h
+++ b/drivers/gpu/drm/tegra/drm.h
@@ -12,6 +12,7 @@
#include <uapi/drm/tegra_drm.h>
#include <linux/host1x.h>
+#include <linux/of_gpio.h>
#include <drm/drmP.h>
#include <drm/drm_crtc_helper.h>
@@ -104,6 +105,13 @@ int tegra_drm_exit(struct tegra_drm *tegra);
struct tegra_dc_soc_info;
struct tegra_output;
+struct tegra_dc_stats {
+ unsigned long frames;
+ unsigned long vblank;
+ unsigned long underflow;
+ unsigned long overflow;
+};
+
struct tegra_dc {
struct host1x_client client;
struct host1x_syncpt *syncpt;
@@ -121,6 +129,7 @@ struct tegra_dc {
struct tegra_output *rgb;
+ struct tegra_dc_stats stats;
struct list_head list;
struct drm_info_list *debugfs_files;
@@ -200,6 +209,7 @@ struct tegra_output {
const struct edid *edid;
unsigned int hpd_irq;
int hpd_gpio;
+ enum of_gpio_flags hpd_gpio_flags;
struct drm_encoder encoder;
struct drm_connector connector;
diff --git a/drivers/gpu/drm/tegra/dsi.c b/drivers/gpu/drm/tegra/dsi.c
index ed970f622903..f0a138ef68ce 100644
--- a/drivers/gpu/drm/tegra/dsi.c
+++ b/drivers/gpu/drm/tegra/dsi.c
@@ -119,6 +119,16 @@ static int tegra_dsi_show_regs(struct seq_file *s, void *data)
{
struct drm_info_node *node = s->private;
struct tegra_dsi *dsi = node->info_ent->data;
+ struct drm_crtc *crtc = dsi->output.encoder.crtc;
+ struct drm_device *drm = node->minor->dev;
+ int err = 0;
+
+ drm_modeset_lock_all(drm);
+
+ if (!crtc || !crtc->state->active) {
+ err = -EBUSY;
+ goto unlock;
+ }
#define DUMP_REG(name) \
seq_printf(s, "%-32s %#05x %08x\n", #name, name, \
@@ -208,7 +218,9 @@ static int tegra_dsi_show_regs(struct seq_file *s, void *data)
#undef DUMP_REG
- return 0;
+unlock:
+ drm_modeset_unlock_all(drm);
+ return err;
}
static struct drm_info_list debugfs_files[] = {
@@ -548,14 +560,19 @@ static void tegra_dsi_configure(struct tegra_dsi *dsi, unsigned int pipe,
/* horizontal sync width */
hsw = (mode->hsync_end - mode->hsync_start) * mul / div;
- hsw -= 10;
/* horizontal back porch */
hbp = (mode->htotal - mode->hsync_end) * mul / div;
- hbp -= 14;
+
+ if ((dsi->flags & MIPI_DSI_MODE_VIDEO_SYNC_PULSE) == 0)
+ hbp += hsw;
/* horizontal front porch */
hfp = (mode->hsync_start - mode->hdisplay) * mul / div;
+
+ /* subtract packet overhead */
+ hsw -= 10;
+ hbp -= 14;
hfp -= 8;
tegra_dsi_writel(dsi, hsw << 16 | 0, DSI_PKT_LEN_0_1);
@@ -726,10 +743,6 @@ static void tegra_dsi_soft_reset(struct tegra_dsi *dsi)
tegra_dsi_soft_reset(dsi->slave);
}
-static void tegra_dsi_connector_dpms(struct drm_connector *connector, int mode)
-{
-}
-
static void tegra_dsi_connector_reset(struct drm_connector *connector)
{
struct tegra_dsi_state *state;
@@ -756,7 +769,7 @@ tegra_dsi_connector_duplicate_state(struct drm_connector *connector)
}
static const struct drm_connector_funcs tegra_dsi_connector_funcs = {
- .dpms = tegra_dsi_connector_dpms,
+ .dpms = drm_atomic_helper_connector_dpms,
.reset = tegra_dsi_connector_reset,
.detect = tegra_output_connector_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
@@ -782,22 +795,48 @@ static const struct drm_encoder_funcs tegra_dsi_encoder_funcs = {
.destroy = tegra_output_encoder_destroy,
};
-static void tegra_dsi_encoder_dpms(struct drm_encoder *encoder, int mode)
+static void tegra_dsi_encoder_disable(struct drm_encoder *encoder)
{
-}
+ struct tegra_output *output = encoder_to_output(encoder);
+ struct tegra_dc *dc = to_tegra_dc(encoder->crtc);
+ struct tegra_dsi *dsi = to_dsi(output);
+ u32 value;
+ int err;
-static void tegra_dsi_encoder_prepare(struct drm_encoder *encoder)
-{
-}
+ if (output->panel)
+ drm_panel_disable(output->panel);
-static void tegra_dsi_encoder_commit(struct drm_encoder *encoder)
-{
+ tegra_dsi_video_disable(dsi);
+
+ /*
+ * The following accesses registers of the display controller, so make
+ * sure it's only executed when the output is attached to one.
+ */
+ if (dc) {
+ value = tegra_dc_readl(dc, DC_DISP_DISP_WIN_OPTIONS);
+ value &= ~DSI_ENABLE;
+ tegra_dc_writel(dc, value, DC_DISP_DISP_WIN_OPTIONS);
+
+ tegra_dc_commit(dc);
+ }
+
+ err = tegra_dsi_wait_idle(dsi, 100);
+ if (err < 0)
+ dev_dbg(dsi->dev, "failed to idle DSI: %d\n", err);
+
+ tegra_dsi_soft_reset(dsi);
+
+ if (output->panel)
+ drm_panel_unprepare(output->panel);
+
+ tegra_dsi_disable(dsi);
+
+ return;
}
-static void tegra_dsi_encoder_mode_set(struct drm_encoder *encoder,
- struct drm_display_mode *mode,
- struct drm_display_mode *adjusted)
+static void tegra_dsi_encoder_enable(struct drm_encoder *encoder)
{
+ struct drm_display_mode *mode = &encoder->crtc->state->adjusted_mode;
struct tegra_output *output = encoder_to_output(encoder);
struct tegra_dc *dc = to_tegra_dc(encoder->crtc);
struct tegra_dsi *dsi = to_dsi(output);
@@ -835,45 +874,6 @@ static void tegra_dsi_encoder_mode_set(struct drm_encoder *encoder,
return;
}
-static void tegra_dsi_encoder_disable(struct drm_encoder *encoder)
-{
- struct tegra_output *output = encoder_to_output(encoder);
- struct tegra_dc *dc = to_tegra_dc(encoder->crtc);
- struct tegra_dsi *dsi = to_dsi(output);
- u32 value;
- int err;
-
- if (output->panel)
- drm_panel_disable(output->panel);
-
- tegra_dsi_video_disable(dsi);
-
- /*
- * The following accesses registers of the display controller, so make
- * sure it's only executed when the output is attached to one.
- */
- if (dc) {
- value = tegra_dc_readl(dc, DC_DISP_DISP_WIN_OPTIONS);
- value &= ~DSI_ENABLE;
- tegra_dc_writel(dc, value, DC_DISP_DISP_WIN_OPTIONS);
-
- tegra_dc_commit(dc);
- }
-
- err = tegra_dsi_wait_idle(dsi, 100);
- if (err < 0)
- dev_dbg(dsi->dev, "failed to idle DSI: %d\n", err);
-
- tegra_dsi_soft_reset(dsi);
-
- if (output->panel)
- drm_panel_unprepare(output->panel);
-
- tegra_dsi_disable(dsi);
-
- return;
-}
-
static int
tegra_dsi_encoder_atomic_check(struct drm_encoder *encoder,
struct drm_crtc_state *crtc_state,
@@ -956,11 +956,8 @@ tegra_dsi_encoder_atomic_check(struct drm_encoder *encoder,
}
static const struct drm_encoder_helper_funcs tegra_dsi_encoder_helper_funcs = {
- .dpms = tegra_dsi_encoder_dpms,
- .prepare = tegra_dsi_encoder_prepare,
- .commit = tegra_dsi_encoder_commit,
- .mode_set = tegra_dsi_encoder_mode_set,
.disable = tegra_dsi_encoder_disable,
+ .enable = tegra_dsi_encoder_enable,
.atomic_check = tegra_dsi_encoder_atomic_check,
};
@@ -992,6 +989,10 @@ static int tegra_dsi_pad_calibrate(struct tegra_dsi *dsi)
DSI_PAD_OUT_CLK(0x0);
tegra_dsi_writel(dsi, value, DSI_PAD_CONTROL_2);
+ value = DSI_PAD_PREEMP_PD_CLK(0x3) | DSI_PAD_PREEMP_PU_CLK(0x3) |
+ DSI_PAD_PREEMP_PD(0x03) | DSI_PAD_PREEMP_PU(0x3);
+ tegra_dsi_writel(dsi, value, DSI_PAD_CONTROL_3);
+
return tegra_mipi_calibrate(dsi->mipi);
}
@@ -1621,6 +1622,9 @@ static int tegra_dsi_remove(struct platform_device *pdev)
}
static const struct of_device_id tegra_dsi_of_match[] = {
+ { .compatible = "nvidia,tegra210-dsi", },
+ { .compatible = "nvidia,tegra132-dsi", },
+ { .compatible = "nvidia,tegra124-dsi", },
{ .compatible = "nvidia,tegra114-dsi", },
{ },
};
diff --git a/drivers/gpu/drm/tegra/dsi.h b/drivers/gpu/drm/tegra/dsi.h
index bad1006a5150..219263615399 100644
--- a/drivers/gpu/drm/tegra/dsi.h
+++ b/drivers/gpu/drm/tegra/dsi.h
@@ -113,6 +113,10 @@
#define DSI_PAD_SLEW_DN(x) (((x) & 0x7) << 12)
#define DSI_PAD_SLEW_UP(x) (((x) & 0x7) << 16)
#define DSI_PAD_CONTROL_3 0x51
+#define DSI_PAD_PREEMP_PD_CLK(x) (((x) & 0x3) << 12)
+#define DSI_PAD_PREEMP_PU_CLK(x) (((x) & 0x3) << 8)
+#define DSI_PAD_PREEMP_PD(x) (((x) & 0x3) << 4)
+#define DSI_PAD_PREEMP_PU(x) (((x) & 0x3) << 0)
#define DSI_PAD_CONTROL_4 0x52
#define DSI_GANGED_MODE_CONTROL 0x53
#define DSI_GANGED_MODE_CONTROL_ENABLE (1 << 0)
diff --git a/drivers/gpu/drm/tegra/fb.c b/drivers/gpu/drm/tegra/fb.c
index 397fb34d5d5b..07c844b746b4 100644
--- a/drivers/gpu/drm/tegra/fb.c
+++ b/drivers/gpu/drm/tegra/fb.c
@@ -184,9 +184,9 @@ unreference:
#ifdef CONFIG_DRM_TEGRA_FBDEV
static struct fb_ops tegra_fb_ops = {
.owner = THIS_MODULE,
- .fb_fillrect = sys_fillrect,
- .fb_copyarea = sys_copyarea,
- .fb_imageblit = sys_imageblit,
+ .fb_fillrect = drm_fb_helper_sys_fillrect,
+ .fb_copyarea = drm_fb_helper_sys_copyarea,
+ .fb_imageblit = drm_fb_helper_sys_imageblit,
.fb_check_var = drm_fb_helper_check_var,
.fb_set_par = drm_fb_helper_set_par,
.fb_blank = drm_fb_helper_blank,
@@ -224,11 +224,11 @@ static int tegra_fbdev_probe(struct drm_fb_helper *helper,
if (IS_ERR(bo))
return PTR_ERR(bo);
- info = framebuffer_alloc(0, drm->dev);
- if (!info) {
+ info = drm_fb_helper_alloc_fbi(helper);
+ if (IS_ERR(info)) {
dev_err(drm->dev, "failed to allocate framebuffer info\n");
drm_gem_object_unreference_unlocked(&bo->gem);
- return -ENOMEM;
+ return PTR_ERR(info);
}
fbdev->fb = tegra_fb_alloc(drm, &cmd, &bo, 1);
@@ -248,12 +248,6 @@ static int tegra_fbdev_probe(struct drm_fb_helper *helper,
info->flags = FBINFO_FLAG_DEFAULT;
info->fbops = &tegra_fb_ops;
- err = fb_alloc_cmap(&info->cmap, 256, 0);
- if (err < 0) {
- dev_err(drm->dev, "failed to allocate color map: %d\n", err);
- goto destroy;
- }
-
drm_fb_helper_fill_fix(info, fb->pitches[0], fb->depth);
drm_fb_helper_fill_var(info, helper, fb->width, fb->height);
@@ -282,7 +276,7 @@ destroy:
drm_framebuffer_unregister_private(fb);
tegra_fb_destroy(fb);
release:
- framebuffer_release(info);
+ drm_fb_helper_release_fbi(helper);
return err;
}
@@ -347,20 +341,9 @@ fini:
static void tegra_fbdev_exit(struct tegra_fbdev *fbdev)
{
- struct fb_info *info = fbdev->base.fbdev;
-
- if (info) {
- int err;
- err = unregister_framebuffer(info);
- if (err < 0)
- DRM_DEBUG_KMS("failed to unregister framebuffer\n");
-
- if (info->cmap.len)
- fb_dealloc_cmap(&info->cmap);
-
- framebuffer_release(info);
- }
+ drm_fb_helper_unregister_fbi(&fbdev->base);
+ drm_fb_helper_release_fbi(&fbdev->base);
if (fbdev->fb) {
drm_framebuffer_unregister_private(&fbdev->fb->base);
diff --git a/drivers/gpu/drm/tegra/hdmi.c b/drivers/gpu/drm/tegra/hdmi.c
index 06ab1783bba1..52b32cbd9de6 100644
--- a/drivers/gpu/drm/tegra/hdmi.c
+++ b/drivers/gpu/drm/tegra/hdmi.c
@@ -772,13 +772,8 @@ static bool tegra_output_is_hdmi(struct tegra_output *output)
return drm_detect_hdmi_monitor(edid);
}
-static void tegra_hdmi_connector_dpms(struct drm_connector *connector,
- int mode)
-{
-}
-
static const struct drm_connector_funcs tegra_hdmi_connector_funcs = {
- .dpms = tegra_hdmi_connector_dpms,
+ .dpms = drm_atomic_helper_connector_dpms,
.reset = drm_atomic_helper_connector_reset,
.detect = tegra_output_connector_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
@@ -818,22 +813,27 @@ static const struct drm_encoder_funcs tegra_hdmi_encoder_funcs = {
.destroy = tegra_output_encoder_destroy,
};
-static void tegra_hdmi_encoder_dpms(struct drm_encoder *encoder, int mode)
+static void tegra_hdmi_encoder_disable(struct drm_encoder *encoder)
{
-}
+ struct tegra_dc *dc = to_tegra_dc(encoder->crtc);
+ u32 value;
-static void tegra_hdmi_encoder_prepare(struct drm_encoder *encoder)
-{
-}
+ /*
+ * The following accesses registers of the display controller, so make
+ * sure it's only executed when the output is attached to one.
+ */
+ if (dc) {
+ value = tegra_dc_readl(dc, DC_DISP_DISP_WIN_OPTIONS);
+ value &= ~HDMI_ENABLE;
+ tegra_dc_writel(dc, value, DC_DISP_DISP_WIN_OPTIONS);
-static void tegra_hdmi_encoder_commit(struct drm_encoder *encoder)
-{
+ tegra_dc_commit(dc);
+ }
}
-static void tegra_hdmi_encoder_mode_set(struct drm_encoder *encoder,
- struct drm_display_mode *mode,
- struct drm_display_mode *adjusted)
+static void tegra_hdmi_encoder_enable(struct drm_encoder *encoder)
{
+ struct drm_display_mode *mode = &encoder->crtc->state->adjusted_mode;
unsigned int h_sync_width, h_front_porch, h_back_porch, i, rekey;
struct tegra_output *output = encoder_to_output(encoder);
struct tegra_dc *dc = to_tegra_dc(encoder->crtc);
@@ -872,13 +872,13 @@ static void tegra_hdmi_encoder_mode_set(struct drm_encoder *encoder,
tegra_dc_writel(dc, VSYNC_H_POSITION(1),
DC_DISP_DISP_TIMING_OPTIONS);
- tegra_dc_writel(dc, DITHER_CONTROL_DISABLE | BASE_COLOR_SIZE888,
+ tegra_dc_writel(dc, DITHER_CONTROL_DISABLE | BASE_COLOR_SIZE_888,
DC_DISP_DISP_COLOR_CONTROL);
/* video_preamble uses h_pulse2 */
pulse_start = 1 + h_sync_width + h_back_porch - 10;
- tegra_dc_writel(dc, H_PULSE_2_ENABLE, DC_DISP_DISP_SIGNAL_OPTIONS0);
+ tegra_dc_writel(dc, H_PULSE2_ENABLE, DC_DISP_DISP_SIGNAL_OPTIONS0);
value = PULSE_MODE_NORMAL | PULSE_POLARITY_HIGH | PULSE_QUAL_VACTIVE |
PULSE_LAST_END_A;
@@ -1035,24 +1035,6 @@ static void tegra_hdmi_encoder_mode_set(struct drm_encoder *encoder,
/* TODO: add HDCP support */
}
-static void tegra_hdmi_encoder_disable(struct drm_encoder *encoder)
-{
- struct tegra_dc *dc = to_tegra_dc(encoder->crtc);
- u32 value;
-
- /*
- * The following accesses registers of the display controller, so make
- * sure it's only executed when the output is attached to one.
- */
- if (dc) {
- value = tegra_dc_readl(dc, DC_DISP_DISP_WIN_OPTIONS);
- value &= ~HDMI_ENABLE;
- tegra_dc_writel(dc, value, DC_DISP_DISP_WIN_OPTIONS);
-
- tegra_dc_commit(dc);
- }
-}
-
static int
tegra_hdmi_encoder_atomic_check(struct drm_encoder *encoder,
struct drm_crtc_state *crtc_state,
@@ -1075,11 +1057,8 @@ tegra_hdmi_encoder_atomic_check(struct drm_encoder *encoder,
}
static const struct drm_encoder_helper_funcs tegra_hdmi_encoder_helper_funcs = {
- .dpms = tegra_hdmi_encoder_dpms,
- .prepare = tegra_hdmi_encoder_prepare,
- .commit = tegra_hdmi_encoder_commit,
- .mode_set = tegra_hdmi_encoder_mode_set,
.disable = tegra_hdmi_encoder_disable,
+ .enable = tegra_hdmi_encoder_enable,
.atomic_check = tegra_hdmi_encoder_atomic_check,
};
@@ -1087,11 +1066,16 @@ static int tegra_hdmi_show_regs(struct seq_file *s, void *data)
{
struct drm_info_node *node = s->private;
struct tegra_hdmi *hdmi = node->info_ent->data;
- int err;
+ struct drm_crtc *crtc = hdmi->output.encoder.crtc;
+ struct drm_device *drm = node->minor->dev;
+ int err = 0;
- err = clk_prepare_enable(hdmi->clk);
- if (err)
- return err;
+ drm_modeset_lock_all(drm);
+
+ if (!crtc || !crtc->state->active) {
+ err = -EBUSY;
+ goto unlock;
+ }
#define DUMP_REG(name) \
seq_printf(s, "%-56s %#05x %08x\n", #name, name, \
@@ -1258,9 +1242,9 @@ static int tegra_hdmi_show_regs(struct seq_file *s, void *data)
#undef DUMP_REG
- clk_disable_unprepare(hdmi->clk);
-
- return 0;
+unlock:
+ drm_modeset_unlock_all(drm);
+ return err;
}
static struct drm_info_list debugfs_files[] = {
diff --git a/drivers/gpu/drm/tegra/output.c b/drivers/gpu/drm/tegra/output.c
index 37db47975d48..46664b622270 100644
--- a/drivers/gpu/drm/tegra/output.c
+++ b/drivers/gpu/drm/tegra/output.c
@@ -7,8 +7,6 @@
* published by the Free Software Foundation.
*/
-#include <linux/of_gpio.h>
-
#include <drm/drm_atomic_helper.h>
#include <drm/drm_panel.h>
#include "drm.h"
@@ -59,10 +57,17 @@ tegra_output_connector_detect(struct drm_connector *connector, bool force)
enum drm_connector_status status = connector_status_unknown;
if (gpio_is_valid(output->hpd_gpio)) {
- if (gpio_get_value(output->hpd_gpio) == 0)
- status = connector_status_disconnected;
- else
- status = connector_status_connected;
+ if (output->hpd_gpio_flags & OF_GPIO_ACTIVE_LOW) {
+ if (gpio_get_value(output->hpd_gpio) != 0)
+ status = connector_status_disconnected;
+ else
+ status = connector_status_connected;
+ } else {
+ if (gpio_get_value(output->hpd_gpio) == 0)
+ status = connector_status_disconnected;
+ else
+ status = connector_status_connected;
+ }
} else {
if (!output->panel)
status = connector_status_disconnected;
@@ -97,7 +102,6 @@ static irqreturn_t hpd_irq(int irq, void *data)
int tegra_output_probe(struct tegra_output *output)
{
struct device_node *ddc, *panel;
- enum of_gpio_flags flags;
int err, size;
if (!output->of_node)
@@ -128,7 +132,7 @@ int tegra_output_probe(struct tegra_output *output)
output->hpd_gpio = of_get_named_gpio_flags(output->of_node,
"nvidia,hpd-gpio", 0,
- &flags);
+ &output->hpd_gpio_flags);
if (gpio_is_valid(output->hpd_gpio)) {
unsigned long flags;
diff --git a/drivers/gpu/drm/tegra/rgb.c b/drivers/gpu/drm/tegra/rgb.c
index 7cd833f5b5b5..bc9735b4ad60 100644
--- a/drivers/gpu/drm/tegra/rgb.c
+++ b/drivers/gpu/drm/tegra/rgb.c
@@ -18,7 +18,6 @@
struct tegra_rgb {
struct tegra_output output;
struct tegra_dc *dc;
- bool enabled;
struct clk *clk_parent;
struct clk *clk;
@@ -88,13 +87,8 @@ static void tegra_dc_write_regs(struct tegra_dc *dc,
tegra_dc_writel(dc, table[i].value, table[i].offset);
}
-static void tegra_rgb_connector_dpms(struct drm_connector *connector,
- int mode)
-{
-}
-
static const struct drm_connector_funcs tegra_rgb_connector_funcs = {
- .dpms = tegra_rgb_connector_dpms,
+ .dpms = drm_atomic_helper_connector_dpms,
.reset = drm_atomic_helper_connector_reset,
.detect = tegra_output_connector_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
@@ -125,21 +119,22 @@ static const struct drm_encoder_funcs tegra_rgb_encoder_funcs = {
.destroy = tegra_output_encoder_destroy,
};
-static void tegra_rgb_encoder_dpms(struct drm_encoder *encoder, int mode)
+static void tegra_rgb_encoder_disable(struct drm_encoder *encoder)
{
-}
+ struct tegra_output *output = encoder_to_output(encoder);
+ struct tegra_rgb *rgb = to_rgb(output);
-static void tegra_rgb_encoder_prepare(struct drm_encoder *encoder)
-{
-}
+ if (output->panel)
+ drm_panel_disable(output->panel);
-static void tegra_rgb_encoder_commit(struct drm_encoder *encoder)
-{
+ tegra_dc_write_regs(rgb->dc, rgb_disable, ARRAY_SIZE(rgb_disable));
+ tegra_dc_commit(rgb->dc);
+
+ if (output->panel)
+ drm_panel_unprepare(output->panel);
}
-static void tegra_rgb_encoder_mode_set(struct drm_encoder *encoder,
- struct drm_display_mode *mode,
- struct drm_display_mode *adjusted)
+static void tegra_rgb_encoder_enable(struct drm_encoder *encoder)
{
struct tegra_output *output = encoder_to_output(encoder);
struct tegra_rgb *rgb = to_rgb(output);
@@ -174,21 +169,6 @@ static void tegra_rgb_encoder_mode_set(struct drm_encoder *encoder,
drm_panel_enable(output->panel);
}
-static void tegra_rgb_encoder_disable(struct drm_encoder *encoder)
-{
- struct tegra_output *output = encoder_to_output(encoder);
- struct tegra_rgb *rgb = to_rgb(output);
-
- if (output->panel)
- drm_panel_disable(output->panel);
-
- tegra_dc_write_regs(rgb->dc, rgb_disable, ARRAY_SIZE(rgb_disable));
- tegra_dc_commit(rgb->dc);
-
- if (output->panel)
- drm_panel_unprepare(output->panel);
-}
-
static int
tegra_rgb_encoder_atomic_check(struct drm_encoder *encoder,
struct drm_crtc_state *crtc_state,
@@ -231,11 +211,8 @@ tegra_rgb_encoder_atomic_check(struct drm_encoder *encoder,
}
static const struct drm_encoder_helper_funcs tegra_rgb_encoder_helper_funcs = {
- .dpms = tegra_rgb_encoder_dpms,
- .prepare = tegra_rgb_encoder_prepare,
- .commit = tegra_rgb_encoder_commit,
- .mode_set = tegra_rgb_encoder_mode_set,
.disable = tegra_rgb_encoder_disable,
+ .enable = tegra_rgb_encoder_enable,
.atomic_check = tegra_rgb_encoder_atomic_check,
};
diff --git a/drivers/gpu/drm/tegra/sor.c b/drivers/gpu/drm/tegra/sor.c
index 7591d8901f9a..da1715ebdd71 100644
--- a/drivers/gpu/drm/tegra/sor.c
+++ b/drivers/gpu/drm/tegra/sor.c
@@ -10,7 +10,9 @@
#include <linux/debugfs.h>
#include <linux/gpio.h>
#include <linux/io.h>
+#include <linux/of_device.h>
#include <linux/platform_device.h>
+#include <linux/regulator/consumer.h>
#include <linux/reset.h>
#include <soc/tegra/pmc.h>
@@ -23,11 +25,146 @@
#include "drm.h"
#include "sor.h"
+#define SOR_REKEY 0x38
+
+struct tegra_sor_hdmi_settings {
+ unsigned long frequency;
+
+ u8 vcocap;
+ u8 ichpmp;
+ u8 loadadj;
+ u8 termadj;
+ u8 tx_pu;
+ u8 bg_vref;
+
+ u8 drive_current[4];
+ u8 preemphasis[4];
+};
+
+#if 1
+static const struct tegra_sor_hdmi_settings tegra210_sor_hdmi_defaults[] = {
+ {
+ .frequency = 54000000,
+ .vcocap = 0x0,
+ .ichpmp = 0x1,
+ .loadadj = 0x3,
+ .termadj = 0x9,
+ .tx_pu = 0x10,
+ .bg_vref = 0x8,
+ .drive_current = { 0x33, 0x3a, 0x3a, 0x3a },
+ .preemphasis = { 0x00, 0x00, 0x00, 0x00 },
+ }, {
+ .frequency = 75000000,
+ .vcocap = 0x3,
+ .ichpmp = 0x1,
+ .loadadj = 0x3,
+ .termadj = 0x9,
+ .tx_pu = 0x40,
+ .bg_vref = 0x8,
+ .drive_current = { 0x33, 0x3a, 0x3a, 0x3a },
+ .preemphasis = { 0x00, 0x00, 0x00, 0x00 },
+ }, {
+ .frequency = 150000000,
+ .vcocap = 0x3,
+ .ichpmp = 0x1,
+ .loadadj = 0x3,
+ .termadj = 0x9,
+ .tx_pu = 0x66,
+ .bg_vref = 0x8,
+ .drive_current = { 0x33, 0x3a, 0x3a, 0x3a },
+ .preemphasis = { 0x00, 0x00, 0x00, 0x00 },
+ }, {
+ .frequency = 300000000,
+ .vcocap = 0x3,
+ .ichpmp = 0x1,
+ .loadadj = 0x3,
+ .termadj = 0x9,
+ .tx_pu = 0x66,
+ .bg_vref = 0xa,
+ .drive_current = { 0x33, 0x3f, 0x3f, 0x3f },
+ .preemphasis = { 0x00, 0x17, 0x17, 0x17 },
+ }, {
+ .frequency = 600000000,
+ .vcocap = 0x3,
+ .ichpmp = 0x1,
+ .loadadj = 0x3,
+ .termadj = 0x9,
+ .tx_pu = 0x66,
+ .bg_vref = 0x8,
+ .drive_current = { 0x33, 0x3f, 0x3f, 0x3f },
+ .preemphasis = { 0x00, 0x00, 0x00, 0x00 },
+ },
+};
+#else
+static const struct tegra_sor_hdmi_settings tegra210_sor_hdmi_defaults[] = {
+ {
+ .frequency = 75000000,
+ .vcocap = 0x3,
+ .ichpmp = 0x1,
+ .loadadj = 0x3,
+ .termadj = 0x9,
+ .tx_pu = 0x40,
+ .bg_vref = 0x8,
+ .drive_current = { 0x29, 0x29, 0x29, 0x29 },
+ .preemphasis = { 0x00, 0x00, 0x00, 0x00 },
+ }, {
+ .frequency = 150000000,
+ .vcocap = 0x3,
+ .ichpmp = 0x1,
+ .loadadj = 0x3,
+ .termadj = 0x9,
+ .tx_pu = 0x66,
+ .bg_vref = 0x8,
+ .drive_current = { 0x30, 0x37, 0x37, 0x37 },
+ .preemphasis = { 0x01, 0x02, 0x02, 0x02 },
+ }, {
+ .frequency = 300000000,
+ .vcocap = 0x3,
+ .ichpmp = 0x6,
+ .loadadj = 0x3,
+ .termadj = 0x9,
+ .tx_pu = 0x66,
+ .bg_vref = 0xf,
+ .drive_current = { 0x30, 0x37, 0x37, 0x37 },
+ .preemphasis = { 0x10, 0x3e, 0x3e, 0x3e },
+ }, {
+ .frequency = 600000000,
+ .vcocap = 0x3,
+ .ichpmp = 0xa,
+ .loadadj = 0x3,
+ .termadj = 0xb,
+ .tx_pu = 0x66,
+ .bg_vref = 0xe,
+ .drive_current = { 0x35, 0x3e, 0x3e, 0x3e },
+ .preemphasis = { 0x02, 0x3f, 0x3f, 0x3f },
+ },
+};
+#endif
+
+struct tegra_sor_soc {
+ bool supports_edp;
+ bool supports_lvds;
+ bool supports_hdmi;
+ bool supports_dp;
+
+ const struct tegra_sor_hdmi_settings *settings;
+ unsigned int num_settings;
+};
+
+struct tegra_sor;
+
+struct tegra_sor_ops {
+ const char *name;
+ int (*probe)(struct tegra_sor *sor);
+ int (*remove)(struct tegra_sor *sor);
+};
+
struct tegra_sor {
struct host1x_client client;
struct tegra_output output;
struct device *dev;
+ const struct tegra_sor_soc *soc;
void __iomem *regs;
struct reset_control *rst;
@@ -38,12 +175,19 @@ struct tegra_sor {
struct tegra_dpaux *dpaux;
- struct mutex lock;
- bool enabled;
-
struct drm_info_list *debugfs_files;
struct drm_minor *minor;
struct dentry *debugfs;
+
+ const struct tegra_sor_ops *ops;
+
+ /* for HDMI 2.0 */
+ struct tegra_sor_hdmi_settings *settings;
+ unsigned int num_settings;
+
+ struct regulator *avdd_io_supply;
+ struct regulator *vdd_pll_supply;
+ struct regulator *hdmi_supply;
};
struct tegra_sor_config {
@@ -94,40 +238,40 @@ static int tegra_sor_dp_train_fast(struct tegra_sor *sor,
SOR_LANE_DRIVE_CURRENT_LANE2(0x40) |
SOR_LANE_DRIVE_CURRENT_LANE1(0x40) |
SOR_LANE_DRIVE_CURRENT_LANE0(0x40);
- tegra_sor_writel(sor, value, SOR_LANE_DRIVE_CURRENT_0);
+ tegra_sor_writel(sor, value, SOR_LANE_DRIVE_CURRENT0);
value = SOR_LANE_PREEMPHASIS_LANE3(0x0f) |
SOR_LANE_PREEMPHASIS_LANE2(0x0f) |
SOR_LANE_PREEMPHASIS_LANE1(0x0f) |
SOR_LANE_PREEMPHASIS_LANE0(0x0f);
- tegra_sor_writel(sor, value, SOR_LANE_PREEMPHASIS_0);
+ tegra_sor_writel(sor, value, SOR_LANE_PREEMPHASIS0);
- value = SOR_LANE_POST_CURSOR_LANE3(0x00) |
- SOR_LANE_POST_CURSOR_LANE2(0x00) |
- SOR_LANE_POST_CURSOR_LANE1(0x00) |
- SOR_LANE_POST_CURSOR_LANE0(0x00);
- tegra_sor_writel(sor, value, SOR_LANE_POST_CURSOR_0);
+ value = SOR_LANE_POSTCURSOR_LANE3(0x00) |
+ SOR_LANE_POSTCURSOR_LANE2(0x00) |
+ SOR_LANE_POSTCURSOR_LANE1(0x00) |
+ SOR_LANE_POSTCURSOR_LANE0(0x00);
+ tegra_sor_writel(sor, value, SOR_LANE_POSTCURSOR0);
/* disable LVDS mode */
tegra_sor_writel(sor, 0, SOR_LVDS);
- value = tegra_sor_readl(sor, SOR_DP_PADCTL_0);
+ value = tegra_sor_readl(sor, SOR_DP_PADCTL0);
value |= SOR_DP_PADCTL_TX_PU_ENABLE;
value &= ~SOR_DP_PADCTL_TX_PU_MASK;
value |= SOR_DP_PADCTL_TX_PU(2); /* XXX: don't hardcode? */
- tegra_sor_writel(sor, value, SOR_DP_PADCTL_0);
+ tegra_sor_writel(sor, value, SOR_DP_PADCTL0);
- value = tegra_sor_readl(sor, SOR_DP_PADCTL_0);
+ value = tegra_sor_readl(sor, SOR_DP_PADCTL0);
value |= SOR_DP_PADCTL_CM_TXD_3 | SOR_DP_PADCTL_CM_TXD_2 |
SOR_DP_PADCTL_CM_TXD_1 | SOR_DP_PADCTL_CM_TXD_0;
- tegra_sor_writel(sor, value, SOR_DP_PADCTL_0);
+ tegra_sor_writel(sor, value, SOR_DP_PADCTL0);
usleep_range(10, 100);
- value = tegra_sor_readl(sor, SOR_DP_PADCTL_0);
+ value = tegra_sor_readl(sor, SOR_DP_PADCTL0);
value &= ~(SOR_DP_PADCTL_CM_TXD_3 | SOR_DP_PADCTL_CM_TXD_2 |
SOR_DP_PADCTL_CM_TXD_1 | SOR_DP_PADCTL_CM_TXD_0);
- tegra_sor_writel(sor, value, SOR_DP_PADCTL_0);
+ tegra_sor_writel(sor, value, SOR_DP_PADCTL0);
err = tegra_dpaux_prepare(sor->dpaux, DP_SET_ANSI_8B10B);
if (err < 0)
@@ -148,11 +292,11 @@ static int tegra_sor_dp_train_fast(struct tegra_sor *sor,
if (err < 0)
return err;
- value = tegra_sor_readl(sor, SOR_DP_SPARE_0);
+ value = tegra_sor_readl(sor, SOR_DP_SPARE0);
value |= SOR_DP_SPARE_SEQ_ENABLE;
value &= ~SOR_DP_SPARE_PANEL_INTERNAL;
value |= SOR_DP_SPARE_MACRO_SOR_CLK;
- tegra_sor_writel(sor, value, SOR_DP_SPARE_0);
+ tegra_sor_writel(sor, value, SOR_DP_SPARE0);
for (i = 0, value = 0; i < link->num_lanes; i++) {
unsigned long lane = SOR_DP_TPG_CHANNEL_CODING |
@@ -187,18 +331,59 @@ static int tegra_sor_dp_train_fast(struct tegra_sor *sor,
return 0;
}
+static void tegra_sor_dp_term_calibrate(struct tegra_sor *sor)
+{
+ u32 mask = 0x08, adj = 0, value;
+
+ /* enable pad calibration logic */
+ value = tegra_sor_readl(sor, SOR_DP_PADCTL0);
+ value &= ~SOR_DP_PADCTL_PAD_CAL_PD;
+ tegra_sor_writel(sor, value, SOR_DP_PADCTL0);
+
+ value = tegra_sor_readl(sor, SOR_PLL1);
+ value |= SOR_PLL1_TMDS_TERM;
+ tegra_sor_writel(sor, value, SOR_PLL1);
+
+ while (mask) {
+ adj |= mask;
+
+ value = tegra_sor_readl(sor, SOR_PLL1);
+ value &= ~SOR_PLL1_TMDS_TERMADJ_MASK;
+ value |= SOR_PLL1_TMDS_TERMADJ(adj);
+ tegra_sor_writel(sor, value, SOR_PLL1);
+
+ usleep_range(100, 200);
+
+ value = tegra_sor_readl(sor, SOR_PLL1);
+ if (value & SOR_PLL1_TERM_COMPOUT)
+ adj &= ~mask;
+
+ mask >>= 1;
+ }
+
+ value = tegra_sor_readl(sor, SOR_PLL1);
+ value &= ~SOR_PLL1_TMDS_TERMADJ_MASK;
+ value |= SOR_PLL1_TMDS_TERMADJ(adj);
+ tegra_sor_writel(sor, value, SOR_PLL1);
+
+ /* disable pad calibration logic */
+ value = tegra_sor_readl(sor, SOR_DP_PADCTL0);
+ value |= SOR_DP_PADCTL_PAD_CAL_PD;
+ tegra_sor_writel(sor, value, SOR_DP_PADCTL0);
+}
+
static void tegra_sor_super_update(struct tegra_sor *sor)
{
- tegra_sor_writel(sor, 0, SOR_SUPER_STATE_0);
- tegra_sor_writel(sor, 1, SOR_SUPER_STATE_0);
- tegra_sor_writel(sor, 0, SOR_SUPER_STATE_0);
+ tegra_sor_writel(sor, 0, SOR_SUPER_STATE0);
+ tegra_sor_writel(sor, 1, SOR_SUPER_STATE0);
+ tegra_sor_writel(sor, 0, SOR_SUPER_STATE0);
}
static void tegra_sor_update(struct tegra_sor *sor)
{
- tegra_sor_writel(sor, 0, SOR_STATE_0);
- tegra_sor_writel(sor, 1, SOR_STATE_0);
- tegra_sor_writel(sor, 0, SOR_STATE_0);
+ tegra_sor_writel(sor, 0, SOR_STATE0);
+ tegra_sor_writel(sor, 1, SOR_STATE0);
+ tegra_sor_writel(sor, 0, SOR_STATE0);
}
static int tegra_sor_setup_pwm(struct tegra_sor *sor, unsigned long timeout)
@@ -235,16 +420,16 @@ static int tegra_sor_attach(struct tegra_sor *sor)
unsigned long value, timeout;
/* wake up in normal mode */
- value = tegra_sor_readl(sor, SOR_SUPER_STATE_1);
+ value = tegra_sor_readl(sor, SOR_SUPER_STATE1);
value |= SOR_SUPER_STATE_HEAD_MODE_AWAKE;
value |= SOR_SUPER_STATE_MODE_NORMAL;
- tegra_sor_writel(sor, value, SOR_SUPER_STATE_1);
+ tegra_sor_writel(sor, value, SOR_SUPER_STATE1);
tegra_sor_super_update(sor);
/* attach */
- value = tegra_sor_readl(sor, SOR_SUPER_STATE_1);
+ value = tegra_sor_readl(sor, SOR_SUPER_STATE1);
value |= SOR_SUPER_STATE_ATTACHED;
- tegra_sor_writel(sor, value, SOR_SUPER_STATE_1);
+ tegra_sor_writel(sor, value, SOR_SUPER_STATE1);
tegra_sor_super_update(sor);
timeout = jiffies + msecs_to_jiffies(250);
@@ -385,7 +570,7 @@ static int tegra_sor_compute_params(struct tegra_sor *sor,
}
static int tegra_sor_calc_config(struct tegra_sor *sor,
- struct drm_display_mode *mode,
+ const struct drm_display_mode *mode,
struct tegra_sor_config *config,
struct drm_dp_link *link)
{
@@ -481,9 +666,9 @@ static int tegra_sor_detach(struct tegra_sor *sor)
unsigned long value, timeout;
/* switch to safe mode */
- value = tegra_sor_readl(sor, SOR_SUPER_STATE_1);
+ value = tegra_sor_readl(sor, SOR_SUPER_STATE1);
value &= ~SOR_SUPER_STATE_MODE_NORMAL;
- tegra_sor_writel(sor, value, SOR_SUPER_STATE_1);
+ tegra_sor_writel(sor, value, SOR_SUPER_STATE1);
tegra_sor_super_update(sor);
timeout = jiffies + msecs_to_jiffies(250);
@@ -498,15 +683,15 @@ static int tegra_sor_detach(struct tegra_sor *sor)
return -ETIMEDOUT;
/* go to sleep */
- value = tegra_sor_readl(sor, SOR_SUPER_STATE_1);
+ value = tegra_sor_readl(sor, SOR_SUPER_STATE1);
value &= ~SOR_SUPER_STATE_HEAD_MODE_MASK;
- tegra_sor_writel(sor, value, SOR_SUPER_STATE_1);
+ tegra_sor_writel(sor, value, SOR_SUPER_STATE1);
tegra_sor_super_update(sor);
/* detach */
- value = tegra_sor_readl(sor, SOR_SUPER_STATE_1);
+ value = tegra_sor_readl(sor, SOR_SUPER_STATE1);
value &= ~SOR_SUPER_STATE_ATTACHED;
- tegra_sor_writel(sor, value, SOR_SUPER_STATE_1);
+ tegra_sor_writel(sor, value, SOR_SUPER_STATE1);
tegra_sor_super_update(sor);
timeout = jiffies + msecs_to_jiffies(250);
@@ -552,10 +737,10 @@ static int tegra_sor_power_down(struct tegra_sor *sor)
if (err < 0)
dev_err(sor->dev, "failed to set safe parent clock: %d\n", err);
- value = tegra_sor_readl(sor, SOR_DP_PADCTL_0);
+ value = tegra_sor_readl(sor, SOR_DP_PADCTL0);
value &= ~(SOR_DP_PADCTL_PD_TXD_3 | SOR_DP_PADCTL_PD_TXD_0 |
SOR_DP_PADCTL_PD_TXD_1 | SOR_DP_PADCTL_PD_TXD_2);
- tegra_sor_writel(sor, value, SOR_DP_PADCTL_0);
+ tegra_sor_writel(sor, value, SOR_DP_PADCTL0);
/* stop lane sequencer */
value = SOR_LANE_SEQ_CTL_TRIGGER | SOR_LANE_SEQ_CTL_SEQUENCE_UP |
@@ -575,39 +760,26 @@ static int tegra_sor_power_down(struct tegra_sor *sor)
if ((value & SOR_LANE_SEQ_CTL_TRIGGER) != 0)
return -ETIMEDOUT;
- value = tegra_sor_readl(sor, SOR_PLL_2);
- value |= SOR_PLL_2_PORT_POWERDOWN;
- tegra_sor_writel(sor, value, SOR_PLL_2);
+ value = tegra_sor_readl(sor, SOR_PLL2);
+ value |= SOR_PLL2_PORT_POWERDOWN;
+ tegra_sor_writel(sor, value, SOR_PLL2);
usleep_range(20, 100);
- value = tegra_sor_readl(sor, SOR_PLL_0);
- value |= SOR_PLL_0_POWER_OFF;
- value |= SOR_PLL_0_VCOPD;
- tegra_sor_writel(sor, value, SOR_PLL_0);
+ value = tegra_sor_readl(sor, SOR_PLL0);
+ value |= SOR_PLL0_VCOPD | SOR_PLL0_PWR;
+ tegra_sor_writel(sor, value, SOR_PLL0);
- value = tegra_sor_readl(sor, SOR_PLL_2);
- value |= SOR_PLL_2_SEQ_PLLCAPPD;
- value |= SOR_PLL_2_SEQ_PLLCAPPD_ENFORCE;
- tegra_sor_writel(sor, value, SOR_PLL_2);
+ value = tegra_sor_readl(sor, SOR_PLL2);
+ value |= SOR_PLL2_SEQ_PLLCAPPD;
+ value |= SOR_PLL2_SEQ_PLLCAPPD_ENFORCE;
+ tegra_sor_writel(sor, value, SOR_PLL2);
usleep_range(20, 100);
return 0;
}
-static int tegra_sor_crc_open(struct inode *inode, struct file *file)
-{
- file->private_data = inode->i_private;
-
- return 0;
-}
-
-static int tegra_sor_crc_release(struct inode *inode, struct file *file)
-{
- return 0;
-}
-
static int tegra_sor_crc_wait(struct tegra_sor *sor, unsigned long timeout)
{
u32 value;
@@ -615,8 +787,8 @@ static int tegra_sor_crc_wait(struct tegra_sor *sor, unsigned long timeout)
timeout = jiffies + msecs_to_jiffies(timeout);
while (time_before(jiffies, timeout)) {
- value = tegra_sor_readl(sor, SOR_CRC_A);
- if (value & SOR_CRC_A_VALID)
+ value = tegra_sor_readl(sor, SOR_CRCA);
+ if (value & SOR_CRCA_VALID)
return 0;
usleep_range(100, 200);
@@ -625,24 +797,25 @@ static int tegra_sor_crc_wait(struct tegra_sor *sor, unsigned long timeout)
return -ETIMEDOUT;
}
-static ssize_t tegra_sor_crc_read(struct file *file, char __user *buffer,
- size_t size, loff_t *ppos)
+static int tegra_sor_show_crc(struct seq_file *s, void *data)
{
- struct tegra_sor *sor = file->private_data;
- ssize_t num, err;
- char buf[10];
+ struct drm_info_node *node = s->private;
+ struct tegra_sor *sor = node->info_ent->data;
+ struct drm_crtc *crtc = sor->output.encoder.crtc;
+ struct drm_device *drm = node->minor->dev;
+ int err = 0;
u32 value;
- mutex_lock(&sor->lock);
+ drm_modeset_lock_all(drm);
- if (!sor->enabled) {
- err = -EAGAIN;
+ if (!crtc || !crtc->state->active) {
+ err = -EBUSY;
goto unlock;
}
- value = tegra_sor_readl(sor, SOR_STATE_1);
+ value = tegra_sor_readl(sor, SOR_STATE1);
value &= ~SOR_STATE_ASY_CRC_MODE_MASK;
- tegra_sor_writel(sor, value, SOR_STATE_1);
+ tegra_sor_writel(sor, value, SOR_STATE1);
value = tegra_sor_readl(sor, SOR_CRC_CNTRL);
value |= SOR_CRC_CNTRL_ENABLE;
@@ -656,65 +829,66 @@ static ssize_t tegra_sor_crc_read(struct file *file, char __user *buffer,
if (err < 0)
goto unlock;
- tegra_sor_writel(sor, SOR_CRC_A_RESET, SOR_CRC_A);
- value = tegra_sor_readl(sor, SOR_CRC_B);
+ tegra_sor_writel(sor, SOR_CRCA_RESET, SOR_CRCA);
+ value = tegra_sor_readl(sor, SOR_CRCB);
- num = scnprintf(buf, sizeof(buf), "%08x\n", value);
-
- err = simple_read_from_buffer(buffer, size, ppos, buf, num);
+ seq_printf(s, "%08x\n", value);
unlock:
- mutex_unlock(&sor->lock);
+ drm_modeset_unlock_all(drm);
return err;
}
-static const struct file_operations tegra_sor_crc_fops = {
- .owner = THIS_MODULE,
- .open = tegra_sor_crc_open,
- .read = tegra_sor_crc_read,
- .release = tegra_sor_crc_release,
-};
-
static int tegra_sor_show_regs(struct seq_file *s, void *data)
{
struct drm_info_node *node = s->private;
struct tegra_sor *sor = node->info_ent->data;
+ struct drm_crtc *crtc = sor->output.encoder.crtc;
+ struct drm_device *drm = node->minor->dev;
+ int err = 0;
+
+ drm_modeset_lock_all(drm);
+
+ if (!crtc || !crtc->state->active) {
+ err = -EBUSY;
+ goto unlock;
+ }
#define DUMP_REG(name) \
seq_printf(s, "%-38s %#05x %08x\n", #name, name, \
tegra_sor_readl(sor, name))
DUMP_REG(SOR_CTXSW);
- DUMP_REG(SOR_SUPER_STATE_0);
- DUMP_REG(SOR_SUPER_STATE_1);
- DUMP_REG(SOR_STATE_0);
- DUMP_REG(SOR_STATE_1);
- DUMP_REG(SOR_HEAD_STATE_0(0));
- DUMP_REG(SOR_HEAD_STATE_0(1));
- DUMP_REG(SOR_HEAD_STATE_1(0));
- DUMP_REG(SOR_HEAD_STATE_1(1));
- DUMP_REG(SOR_HEAD_STATE_2(0));
- DUMP_REG(SOR_HEAD_STATE_2(1));
- DUMP_REG(SOR_HEAD_STATE_3(0));
- DUMP_REG(SOR_HEAD_STATE_3(1));
- DUMP_REG(SOR_HEAD_STATE_4(0));
- DUMP_REG(SOR_HEAD_STATE_4(1));
- DUMP_REG(SOR_HEAD_STATE_5(0));
- DUMP_REG(SOR_HEAD_STATE_5(1));
+ DUMP_REG(SOR_SUPER_STATE0);
+ DUMP_REG(SOR_SUPER_STATE1);
+ DUMP_REG(SOR_STATE0);
+ DUMP_REG(SOR_STATE1);
+ DUMP_REG(SOR_HEAD_STATE0(0));
+ DUMP_REG(SOR_HEAD_STATE0(1));
+ DUMP_REG(SOR_HEAD_STATE1(0));
+ DUMP_REG(SOR_HEAD_STATE1(1));
+ DUMP_REG(SOR_HEAD_STATE2(0));
+ DUMP_REG(SOR_HEAD_STATE2(1));
+ DUMP_REG(SOR_HEAD_STATE3(0));
+ DUMP_REG(SOR_HEAD_STATE3(1));
+ DUMP_REG(SOR_HEAD_STATE4(0));
+ DUMP_REG(SOR_HEAD_STATE4(1));
+ DUMP_REG(SOR_HEAD_STATE5(0));
+ DUMP_REG(SOR_HEAD_STATE5(1));
DUMP_REG(SOR_CRC_CNTRL);
DUMP_REG(SOR_DP_DEBUG_MVID);
DUMP_REG(SOR_CLK_CNTRL);
DUMP_REG(SOR_CAP);
DUMP_REG(SOR_PWR);
DUMP_REG(SOR_TEST);
- DUMP_REG(SOR_PLL_0);
- DUMP_REG(SOR_PLL_1);
- DUMP_REG(SOR_PLL_2);
- DUMP_REG(SOR_PLL_3);
+ DUMP_REG(SOR_PLL0);
+ DUMP_REG(SOR_PLL1);
+ DUMP_REG(SOR_PLL2);
+ DUMP_REG(SOR_PLL3);
DUMP_REG(SOR_CSTM);
DUMP_REG(SOR_LVDS);
- DUMP_REG(SOR_CRC_A);
- DUMP_REG(SOR_CRC_B);
+ DUMP_REG(SOR_CRCA);
+ DUMP_REG(SOR_CRCB);
DUMP_REG(SOR_BLANK);
DUMP_REG(SOR_SEQ_CTL);
DUMP_REG(SOR_LANE_SEQ_CTL);
@@ -736,86 +910,89 @@ static int tegra_sor_show_regs(struct seq_file *s, void *data)
DUMP_REG(SOR_SEQ_INST(15));
DUMP_REG(SOR_PWM_DIV);
DUMP_REG(SOR_PWM_CTL);
- DUMP_REG(SOR_VCRC_A_0);
- DUMP_REG(SOR_VCRC_A_1);
- DUMP_REG(SOR_VCRC_B_0);
- DUMP_REG(SOR_VCRC_B_1);
- DUMP_REG(SOR_CCRC_A_0);
- DUMP_REG(SOR_CCRC_A_1);
- DUMP_REG(SOR_CCRC_B_0);
- DUMP_REG(SOR_CCRC_B_1);
- DUMP_REG(SOR_EDATA_A_0);
- DUMP_REG(SOR_EDATA_A_1);
- DUMP_REG(SOR_EDATA_B_0);
- DUMP_REG(SOR_EDATA_B_1);
- DUMP_REG(SOR_COUNT_A_0);
- DUMP_REG(SOR_COUNT_A_1);
- DUMP_REG(SOR_COUNT_B_0);
- DUMP_REG(SOR_COUNT_B_1);
- DUMP_REG(SOR_DEBUG_A_0);
- DUMP_REG(SOR_DEBUG_A_1);
- DUMP_REG(SOR_DEBUG_B_0);
- DUMP_REG(SOR_DEBUG_B_1);
+ DUMP_REG(SOR_VCRC_A0);
+ DUMP_REG(SOR_VCRC_A1);
+ DUMP_REG(SOR_VCRC_B0);
+ DUMP_REG(SOR_VCRC_B1);
+ DUMP_REG(SOR_CCRC_A0);
+ DUMP_REG(SOR_CCRC_A1);
+ DUMP_REG(SOR_CCRC_B0);
+ DUMP_REG(SOR_CCRC_B1);
+ DUMP_REG(SOR_EDATA_A0);
+ DUMP_REG(SOR_EDATA_A1);
+ DUMP_REG(SOR_EDATA_B0);
+ DUMP_REG(SOR_EDATA_B1);
+ DUMP_REG(SOR_COUNT_A0);
+ DUMP_REG(SOR_COUNT_A1);
+ DUMP_REG(SOR_COUNT_B0);
+ DUMP_REG(SOR_COUNT_B1);
+ DUMP_REG(SOR_DEBUG_A0);
+ DUMP_REG(SOR_DEBUG_A1);
+ DUMP_REG(SOR_DEBUG_B0);
+ DUMP_REG(SOR_DEBUG_B1);
DUMP_REG(SOR_TRIG);
DUMP_REG(SOR_MSCHECK);
DUMP_REG(SOR_XBAR_CTRL);
DUMP_REG(SOR_XBAR_POL);
- DUMP_REG(SOR_DP_LINKCTL_0);
- DUMP_REG(SOR_DP_LINKCTL_1);
- DUMP_REG(SOR_LANE_DRIVE_CURRENT_0);
- DUMP_REG(SOR_LANE_DRIVE_CURRENT_1);
- DUMP_REG(SOR_LANE4_DRIVE_CURRENT_0);
- DUMP_REG(SOR_LANE4_DRIVE_CURRENT_1);
- DUMP_REG(SOR_LANE_PREEMPHASIS_0);
- DUMP_REG(SOR_LANE_PREEMPHASIS_1);
- DUMP_REG(SOR_LANE4_PREEMPHASIS_0);
- DUMP_REG(SOR_LANE4_PREEMPHASIS_1);
- DUMP_REG(SOR_LANE_POST_CURSOR_0);
- DUMP_REG(SOR_LANE_POST_CURSOR_1);
- DUMP_REG(SOR_DP_CONFIG_0);
- DUMP_REG(SOR_DP_CONFIG_1);
- DUMP_REG(SOR_DP_MN_0);
- DUMP_REG(SOR_DP_MN_1);
- DUMP_REG(SOR_DP_PADCTL_0);
- DUMP_REG(SOR_DP_PADCTL_1);
- DUMP_REG(SOR_DP_DEBUG_0);
- DUMP_REG(SOR_DP_DEBUG_1);
- DUMP_REG(SOR_DP_SPARE_0);
- DUMP_REG(SOR_DP_SPARE_1);
+ DUMP_REG(SOR_DP_LINKCTL0);
+ DUMP_REG(SOR_DP_LINKCTL1);
+ DUMP_REG(SOR_LANE_DRIVE_CURRENT0);
+ DUMP_REG(SOR_LANE_DRIVE_CURRENT1);
+ DUMP_REG(SOR_LANE4_DRIVE_CURRENT0);
+ DUMP_REG(SOR_LANE4_DRIVE_CURRENT1);
+ DUMP_REG(SOR_LANE_PREEMPHASIS0);
+ DUMP_REG(SOR_LANE_PREEMPHASIS1);
+ DUMP_REG(SOR_LANE4_PREEMPHASIS0);
+ DUMP_REG(SOR_LANE4_PREEMPHASIS1);
+ DUMP_REG(SOR_LANE_POSTCURSOR0);
+ DUMP_REG(SOR_LANE_POSTCURSOR1);
+ DUMP_REG(SOR_DP_CONFIG0);
+ DUMP_REG(SOR_DP_CONFIG1);
+ DUMP_REG(SOR_DP_MN0);
+ DUMP_REG(SOR_DP_MN1);
+ DUMP_REG(SOR_DP_PADCTL0);
+ DUMP_REG(SOR_DP_PADCTL1);
+ DUMP_REG(SOR_DP_DEBUG0);
+ DUMP_REG(SOR_DP_DEBUG1);
+ DUMP_REG(SOR_DP_SPARE0);
+ DUMP_REG(SOR_DP_SPARE1);
DUMP_REG(SOR_DP_AUDIO_CTRL);
DUMP_REG(SOR_DP_AUDIO_HBLANK_SYMBOLS);
DUMP_REG(SOR_DP_AUDIO_VBLANK_SYMBOLS);
DUMP_REG(SOR_DP_GENERIC_INFOFRAME_HEADER);
- DUMP_REG(SOR_DP_GENERIC_INFOFRAME_SUBPACK_0);
- DUMP_REG(SOR_DP_GENERIC_INFOFRAME_SUBPACK_1);
- DUMP_REG(SOR_DP_GENERIC_INFOFRAME_SUBPACK_2);
- DUMP_REG(SOR_DP_GENERIC_INFOFRAME_SUBPACK_3);
- DUMP_REG(SOR_DP_GENERIC_INFOFRAME_SUBPACK_4);
- DUMP_REG(SOR_DP_GENERIC_INFOFRAME_SUBPACK_5);
- DUMP_REG(SOR_DP_GENERIC_INFOFRAME_SUBPACK_6);
+ DUMP_REG(SOR_DP_GENERIC_INFOFRAME_SUBPACK0);
+ DUMP_REG(SOR_DP_GENERIC_INFOFRAME_SUBPACK1);
+ DUMP_REG(SOR_DP_GENERIC_INFOFRAME_SUBPACK2);
+ DUMP_REG(SOR_DP_GENERIC_INFOFRAME_SUBPACK3);
+ DUMP_REG(SOR_DP_GENERIC_INFOFRAME_SUBPACK4);
+ DUMP_REG(SOR_DP_GENERIC_INFOFRAME_SUBPACK5);
+ DUMP_REG(SOR_DP_GENERIC_INFOFRAME_SUBPACK6);
DUMP_REG(SOR_DP_TPG);
DUMP_REG(SOR_DP_TPG_CONFIG);
- DUMP_REG(SOR_DP_LQ_CSTM_0);
- DUMP_REG(SOR_DP_LQ_CSTM_1);
- DUMP_REG(SOR_DP_LQ_CSTM_2);
+ DUMP_REG(SOR_DP_LQ_CSTM0);
+ DUMP_REG(SOR_DP_LQ_CSTM1);
+ DUMP_REG(SOR_DP_LQ_CSTM2);
#undef DUMP_REG
- return 0;
+unlock:
+ drm_modeset_unlock_all(drm);
+ return err;
}
static const struct drm_info_list debugfs_files[] = {
+ { "crc", tegra_sor_show_crc, 0, NULL },
{ "regs", tegra_sor_show_regs, 0, NULL },
};
static int tegra_sor_debugfs_init(struct tegra_sor *sor,
struct drm_minor *minor)
{
- struct dentry *entry;
+ const char *name = sor->soc->supports_dp ? "sor1" : "sor";
unsigned int i;
- int err = 0;
+ int err;
- sor->debugfs = debugfs_create_dir("sor", minor->debugfs_root);
+ sor->debugfs = debugfs_create_dir(name, minor->debugfs_root);
if (!sor->debugfs)
return -ENOMEM;
@@ -835,14 +1012,9 @@ static int tegra_sor_debugfs_init(struct tegra_sor *sor,
if (err < 0)
goto free;
- entry = debugfs_create_file("crc", 0644, sor->debugfs, sor,
- &tegra_sor_crc_fops);
- if (!entry) {
- err = -ENOMEM;
- goto free;
- }
+ sor->minor = minor;
- return err;
+ return 0;
free:
kfree(sor->debugfs_files);
@@ -860,14 +1032,10 @@ static void tegra_sor_debugfs_exit(struct tegra_sor *sor)
sor->minor = NULL;
kfree(sor->debugfs_files);
- sor->debugfs = NULL;
-
- debugfs_remove_recursive(sor->debugfs);
sor->debugfs_files = NULL;
-}
-static void tegra_sor_connector_dpms(struct drm_connector *connector, int mode)
-{
+ debugfs_remove_recursive(sor->debugfs);
+ sor->debugfs = NULL;
}
static enum drm_connector_status
@@ -879,11 +1047,11 @@ tegra_sor_connector_detect(struct drm_connector *connector, bool force)
if (sor->dpaux)
return tegra_dpaux_detect(sor->dpaux);
- return connector_status_unknown;
+ return tegra_output_connector_detect(connector, force);
}
static const struct drm_connector_funcs tegra_sor_connector_funcs = {
- .dpms = tegra_sor_connector_dpms,
+ .dpms = drm_atomic_helper_connector_dpms,
.reset = drm_atomic_helper_connector_reset,
.detect = tegra_sor_connector_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
@@ -926,22 +1094,102 @@ static const struct drm_encoder_funcs tegra_sor_encoder_funcs = {
.destroy = tegra_output_encoder_destroy,
};
-static void tegra_sor_encoder_dpms(struct drm_encoder *encoder, int mode)
+static void tegra_sor_edp_disable(struct drm_encoder *encoder)
{
-}
+ struct tegra_output *output = encoder_to_output(encoder);
+ struct tegra_dc *dc = to_tegra_dc(encoder->crtc);
+ struct tegra_sor *sor = to_sor(output);
+ u32 value;
+ int err;
-static void tegra_sor_encoder_prepare(struct drm_encoder *encoder)
-{
+ if (output->panel)
+ drm_panel_disable(output->panel);
+
+ err = tegra_sor_detach(sor);
+ if (err < 0)
+ dev_err(sor->dev, "failed to detach SOR: %d\n", err);
+
+ tegra_sor_writel(sor, 0, SOR_STATE1);
+ tegra_sor_update(sor);
+
+ /*
+ * The following accesses registers of the display controller, so make
+ * sure it's only executed when the output is attached to one.
+ */
+ if (dc) {
+ value = tegra_dc_readl(dc, DC_DISP_DISP_WIN_OPTIONS);
+ value &= ~SOR_ENABLE;
+ tegra_dc_writel(dc, value, DC_DISP_DISP_WIN_OPTIONS);
+
+ tegra_dc_commit(dc);
+ }
+
+ err = tegra_sor_power_down(sor);
+ if (err < 0)
+ dev_err(sor->dev, "failed to power down SOR: %d\n", err);
+
+ if (sor->dpaux) {
+ err = tegra_dpaux_disable(sor->dpaux);
+ if (err < 0)
+ dev_err(sor->dev, "failed to disable DP: %d\n", err);
+ }
+
+ err = tegra_io_rail_power_off(TEGRA_IO_RAIL_LVDS);
+ if (err < 0)
+ dev_err(sor->dev, "failed to power off I/O rail: %d\n", err);
+
+ if (output->panel)
+ drm_panel_unprepare(output->panel);
+
+ reset_control_assert(sor->rst);
+ clk_disable_unprepare(sor->clk);
}
-static void tegra_sor_encoder_commit(struct drm_encoder *encoder)
+#if 0
+static int calc_h_ref_to_sync(const struct drm_display_mode *mode,
+ unsigned int *value)
{
+ unsigned int hfp, hsw, hbp, a = 0, b;
+
+ hfp = mode->hsync_start - mode->hdisplay;
+ hsw = mode->hsync_end - mode->hsync_start;
+ hbp = mode->htotal - mode->hsync_end;
+
+ pr_info("hfp: %u, hsw: %u, hbp: %u\n", hfp, hsw, hbp);
+
+ b = hfp - 1;
+
+ pr_info("a: %u, b: %u\n", a, b);
+ pr_info("a + hsw + hbp = %u\n", a + hsw + hbp);
+
+ if (a + hsw + hbp <= 11) {
+ a = 1 + 11 - hsw - hbp;
+ pr_info("a: %u\n", a);
+ }
+
+ if (a > b)
+ return -EINVAL;
+
+ if (hsw < 1)
+ return -EINVAL;
+
+ if (mode->hdisplay < 16)
+ return -EINVAL;
+
+ if (value) {
+ if (b > a && a % 2)
+ *value = a + 1;
+ else
+ *value = a;
+ }
+
+ return 0;
}
+#endif
-static void tegra_sor_encoder_mode_set(struct drm_encoder *encoder,
- struct drm_display_mode *mode,
- struct drm_display_mode *adjusted)
+static void tegra_sor_edp_enable(struct drm_encoder *encoder)
{
+ struct drm_display_mode *mode = &encoder->crtc->state->adjusted_mode;
struct tegra_output *output = encoder_to_output(encoder);
struct tegra_dc *dc = to_tegra_dc(encoder->crtc);
unsigned int vbe, vse, hbe, hse, vbs, hbs, i;
@@ -952,14 +1200,9 @@ static void tegra_sor_encoder_mode_set(struct drm_encoder *encoder,
int err = 0;
u32 value;
- mutex_lock(&sor->lock);
-
- if (sor->enabled)
- goto unlock;
-
err = clk_prepare_enable(sor->clk);
if (err < 0)
- goto unlock;
+ dev_err(sor->dev, "failed to enable clock: %d\n", err);
reset_control_deassert(sor->rst);
@@ -978,7 +1221,7 @@ static void tegra_sor_encoder_mode_set(struct drm_encoder *encoder,
if (err < 0) {
dev_err(sor->dev, "failed to probe eDP link: %d\n",
err);
- goto unlock;
+ return;
}
}
@@ -999,40 +1242,40 @@ static void tegra_sor_encoder_mode_set(struct drm_encoder *encoder,
value |= SOR_CLK_CNTRL_DP_CLK_SEL_SINGLE_DPCLK;
tegra_sor_writel(sor, value, SOR_CLK_CNTRL);
- value = tegra_sor_readl(sor, SOR_PLL_2);
- value &= ~SOR_PLL_2_BANDGAP_POWERDOWN;
- tegra_sor_writel(sor, value, SOR_PLL_2);
+ value = tegra_sor_readl(sor, SOR_PLL2);
+ value &= ~SOR_PLL2_BANDGAP_POWERDOWN;
+ tegra_sor_writel(sor, value, SOR_PLL2);
usleep_range(20, 100);
- value = tegra_sor_readl(sor, SOR_PLL_3);
- value |= SOR_PLL_3_PLL_VDD_MODE_V3_3;
- tegra_sor_writel(sor, value, SOR_PLL_3);
+ value = tegra_sor_readl(sor, SOR_PLL3);
+ value |= SOR_PLL3_PLL_VDD_MODE_3V3;
+ tegra_sor_writel(sor, value, SOR_PLL3);
- value = SOR_PLL_0_ICHPMP(0xf) | SOR_PLL_0_VCOCAP_RST |
- SOR_PLL_0_PLLREG_LEVEL_V45 | SOR_PLL_0_RESISTOR_EXT;
- tegra_sor_writel(sor, value, SOR_PLL_0);
+ value = SOR_PLL0_ICHPMP(0xf) | SOR_PLL0_VCOCAP_RST |
+ SOR_PLL0_PLLREG_LEVEL_V45 | SOR_PLL0_RESISTOR_EXT;
+ tegra_sor_writel(sor, value, SOR_PLL0);
- value = tegra_sor_readl(sor, SOR_PLL_2);
- value |= SOR_PLL_2_SEQ_PLLCAPPD;
- value &= ~SOR_PLL_2_SEQ_PLLCAPPD_ENFORCE;
- value |= SOR_PLL_2_LVDS_ENABLE;
- tegra_sor_writel(sor, value, SOR_PLL_2);
+ value = tegra_sor_readl(sor, SOR_PLL2);
+ value |= SOR_PLL2_SEQ_PLLCAPPD;
+ value &= ~SOR_PLL2_SEQ_PLLCAPPD_ENFORCE;
+ value |= SOR_PLL2_LVDS_ENABLE;
+ tegra_sor_writel(sor, value, SOR_PLL2);
- value = SOR_PLL_1_TERM_COMPOUT | SOR_PLL_1_TMDS_TERM;
- tegra_sor_writel(sor, value, SOR_PLL_1);
+ value = SOR_PLL1_TERM_COMPOUT | SOR_PLL1_TMDS_TERM;
+ tegra_sor_writel(sor, value, SOR_PLL1);
while (true) {
- value = tegra_sor_readl(sor, SOR_PLL_2);
- if ((value & SOR_PLL_2_SEQ_PLLCAPPD_ENFORCE) == 0)
+ value = tegra_sor_readl(sor, SOR_PLL2);
+ if ((value & SOR_PLL2_SEQ_PLLCAPPD_ENFORCE) == 0)
break;
usleep_range(250, 1000);
}
- value = tegra_sor_readl(sor, SOR_PLL_2);
- value &= ~SOR_PLL_2_POWERDOWN_OVERRIDE;
- value &= ~SOR_PLL_2_PORT_POWERDOWN;
- tegra_sor_writel(sor, value, SOR_PLL_2);
+ value = tegra_sor_readl(sor, SOR_PLL2);
+ value &= ~SOR_PLL2_POWERDOWN_OVERRIDE;
+ value &= ~SOR_PLL2_PORT_POWERDOWN;
+ tegra_sor_writel(sor, value, SOR_PLL2);
/*
* power up
@@ -1045,51 +1288,49 @@ static void tegra_sor_encoder_mode_set(struct drm_encoder *encoder,
tegra_sor_writel(sor, value, SOR_CLK_CNTRL);
/* step 1 */
- value = tegra_sor_readl(sor, SOR_PLL_2);
- value |= SOR_PLL_2_SEQ_PLLCAPPD_ENFORCE | SOR_PLL_2_PORT_POWERDOWN |
- SOR_PLL_2_BANDGAP_POWERDOWN;
- tegra_sor_writel(sor, value, SOR_PLL_2);
+ value = tegra_sor_readl(sor, SOR_PLL2);
+ value |= SOR_PLL2_SEQ_PLLCAPPD_ENFORCE | SOR_PLL2_PORT_POWERDOWN |
+ SOR_PLL2_BANDGAP_POWERDOWN;
+ tegra_sor_writel(sor, value, SOR_PLL2);
- value = tegra_sor_readl(sor, SOR_PLL_0);
- value |= SOR_PLL_0_VCOPD | SOR_PLL_0_POWER_OFF;
- tegra_sor_writel(sor, value, SOR_PLL_0);
+ value = tegra_sor_readl(sor, SOR_PLL0);
+ value |= SOR_PLL0_VCOPD | SOR_PLL0_PWR;
+ tegra_sor_writel(sor, value, SOR_PLL0);
- value = tegra_sor_readl(sor, SOR_DP_PADCTL_0);
+ value = tegra_sor_readl(sor, SOR_DP_PADCTL0);
value &= ~SOR_DP_PADCTL_PAD_CAL_PD;
- tegra_sor_writel(sor, value, SOR_DP_PADCTL_0);
+ tegra_sor_writel(sor, value, SOR_DP_PADCTL0);
/* step 2 */
err = tegra_io_rail_power_on(TEGRA_IO_RAIL_LVDS);
- if (err < 0) {
+ if (err < 0)
dev_err(sor->dev, "failed to power on I/O rail: %d\n", err);
- goto unlock;
- }
usleep_range(5, 100);
/* step 3 */
- value = tegra_sor_readl(sor, SOR_PLL_2);
- value &= ~SOR_PLL_2_BANDGAP_POWERDOWN;
- tegra_sor_writel(sor, value, SOR_PLL_2);
+ value = tegra_sor_readl(sor, SOR_PLL2);
+ value &= ~SOR_PLL2_BANDGAP_POWERDOWN;
+ tegra_sor_writel(sor, value, SOR_PLL2);
usleep_range(20, 100);
/* step 4 */
- value = tegra_sor_readl(sor, SOR_PLL_0);
- value &= ~SOR_PLL_0_POWER_OFF;
- value &= ~SOR_PLL_0_VCOPD;
- tegra_sor_writel(sor, value, SOR_PLL_0);
+ value = tegra_sor_readl(sor, SOR_PLL0);
+ value &= ~SOR_PLL0_VCOPD;
+ value &= ~SOR_PLL0_PWR;
+ tegra_sor_writel(sor, value, SOR_PLL0);
- value = tegra_sor_readl(sor, SOR_PLL_2);
- value &= ~SOR_PLL_2_SEQ_PLLCAPPD_ENFORCE;
- tegra_sor_writel(sor, value, SOR_PLL_2);
+ value = tegra_sor_readl(sor, SOR_PLL2);
+ value &= ~SOR_PLL2_SEQ_PLLCAPPD_ENFORCE;
+ tegra_sor_writel(sor, value, SOR_PLL2);
usleep_range(200, 1000);
/* step 5 */
- value = tegra_sor_readl(sor, SOR_PLL_2);
- value &= ~SOR_PLL_2_PORT_POWERDOWN;
- tegra_sor_writel(sor, value, SOR_PLL_2);
+ value = tegra_sor_readl(sor, SOR_PLL2);
+ value &= ~SOR_PLL2_PORT_POWERDOWN;
+ tegra_sor_writel(sor, value, SOR_PLL2);
/* switch to DP clock */
err = clk_set_parent(sor->clk, sor->clk_dp);
@@ -1097,7 +1338,7 @@ static void tegra_sor_encoder_mode_set(struct drm_encoder *encoder,
dev_err(sor->dev, "failed to set DP parent clock: %d\n", err);
/* power DP lanes */
- value = tegra_sor_readl(sor, SOR_DP_PADCTL_0);
+ value = tegra_sor_readl(sor, SOR_DP_PADCTL0);
if (link.num_lanes <= 2)
value &= ~(SOR_DP_PADCTL_PD_TXD_3 | SOR_DP_PADCTL_PD_TXD_2);
@@ -1114,12 +1355,12 @@ static void tegra_sor_encoder_mode_set(struct drm_encoder *encoder,
else
value |= SOR_DP_PADCTL_PD_TXD_0;
- tegra_sor_writel(sor, value, SOR_DP_PADCTL_0);
+ tegra_sor_writel(sor, value, SOR_DP_PADCTL0);
- value = tegra_sor_readl(sor, SOR_DP_LINKCTL_0);
+ value = tegra_sor_readl(sor, SOR_DP_LINKCTL0);
value &= ~SOR_DP_LINKCTL_LANE_COUNT_MASK;
value |= SOR_DP_LINKCTL_LANE_COUNT(link.num_lanes);
- tegra_sor_writel(sor, value, SOR_DP_LINKCTL_0);
+ tegra_sor_writel(sor, value, SOR_DP_LINKCTL0);
/* start lane sequencer */
value = SOR_LANE_SEQ_CTL_TRIGGER | SOR_LANE_SEQ_CTL_SEQUENCE_DOWN |
@@ -1141,14 +1382,14 @@ static void tegra_sor_encoder_mode_set(struct drm_encoder *encoder,
tegra_sor_writel(sor, value, SOR_CLK_CNTRL);
/* set linkctl */
- value = tegra_sor_readl(sor, SOR_DP_LINKCTL_0);
+ value = tegra_sor_readl(sor, SOR_DP_LINKCTL0);
value |= SOR_DP_LINKCTL_ENABLE;
value &= ~SOR_DP_LINKCTL_TU_SIZE_MASK;
value |= SOR_DP_LINKCTL_TU_SIZE(config.tu_size);
value |= SOR_DP_LINKCTL_ENHANCED_FRAME;
- tegra_sor_writel(sor, value, SOR_DP_LINKCTL_0);
+ tegra_sor_writel(sor, value, SOR_DP_LINKCTL0);
for (i = 0, value = 0; i < 4; i++) {
unsigned long lane = SOR_DP_TPG_CHANNEL_CODING |
@@ -1159,7 +1400,7 @@ static void tegra_sor_encoder_mode_set(struct drm_encoder *encoder,
tegra_sor_writel(sor, value, SOR_DP_TPG);
- value = tegra_sor_readl(sor, SOR_DP_CONFIG_0);
+ value = tegra_sor_readl(sor, SOR_DP_CONFIG0);
value &= ~SOR_DP_CONFIG_WATERMARK_MASK;
value |= SOR_DP_CONFIG_WATERMARK(config.watermark);
@@ -1176,7 +1417,7 @@ static void tegra_sor_encoder_mode_set(struct drm_encoder *encoder,
value |= SOR_DP_CONFIG_ACTIVE_SYM_ENABLE;
value |= SOR_DP_CONFIG_DISPARITY_NEGATIVE;
- tegra_sor_writel(sor, value, SOR_DP_CONFIG_0);
+ tegra_sor_writel(sor, value, SOR_DP_CONFIG0);
value = tegra_sor_readl(sor, SOR_DP_AUDIO_HBLANK_SYMBOLS);
value &= ~SOR_DP_AUDIO_HBLANK_SYMBOLS_MASK;
@@ -1189,33 +1430,27 @@ static void tegra_sor_encoder_mode_set(struct drm_encoder *encoder,
tegra_sor_writel(sor, value, SOR_DP_AUDIO_VBLANK_SYMBOLS);
/* enable pad calibration logic */
- value = tegra_sor_readl(sor, SOR_DP_PADCTL_0);
+ value = tegra_sor_readl(sor, SOR_DP_PADCTL0);
value |= SOR_DP_PADCTL_PAD_CAL_PD;
- tegra_sor_writel(sor, value, SOR_DP_PADCTL_0);
+ tegra_sor_writel(sor, value, SOR_DP_PADCTL0);
if (sor->dpaux) {
u8 rate, lanes;
err = drm_dp_link_probe(aux, &link);
- if (err < 0) {
+ if (err < 0)
dev_err(sor->dev, "failed to probe eDP link: %d\n",
err);
- goto unlock;
- }
err = drm_dp_link_power_up(aux, &link);
- if (err < 0) {
+ if (err < 0)
dev_err(sor->dev, "failed to power up eDP link: %d\n",
err);
- goto unlock;
- }
err = drm_dp_link_configure(aux, &link);
- if (err < 0) {
+ if (err < 0)
dev_err(sor->dev, "failed to configure eDP link: %d\n",
err);
- goto unlock;
- }
rate = drm_dp_link_rate_to_bw_code(link.rate);
lanes = link.num_lanes;
@@ -1225,14 +1460,14 @@ static void tegra_sor_encoder_mode_set(struct drm_encoder *encoder,
value |= SOR_CLK_CNTRL_DP_LINK_SPEED(rate);
tegra_sor_writel(sor, value, SOR_CLK_CNTRL);
- value = tegra_sor_readl(sor, SOR_DP_LINKCTL_0);
+ value = tegra_sor_readl(sor, SOR_DP_LINKCTL0);
value &= ~SOR_DP_LINKCTL_LANE_COUNT_MASK;
value |= SOR_DP_LINKCTL_LANE_COUNT(lanes);
if (link.capabilities & DP_LINK_CAP_ENHANCED_FRAMING)
value |= SOR_DP_LINKCTL_ENHANCED_FRAME;
- tegra_sor_writel(sor, value, SOR_DP_LINKCTL_0);
+ tegra_sor_writel(sor, value, SOR_DP_LINKCTL0);
/* disable training pattern generator */
@@ -1249,17 +1484,14 @@ static void tegra_sor_encoder_mode_set(struct drm_encoder *encoder,
if (err < 0) {
dev_err(sor->dev, "DP fast link training failed: %d\n",
err);
- goto unlock;
}
dev_dbg(sor->dev, "fast link training succeeded\n");
}
err = tegra_sor_power_up(sor, 250);
- if (err < 0) {
+ if (err < 0)
dev_err(sor->dev, "failed to power up SOR: %d\n", err);
- goto unlock;
- }
/*
* configure panel (24bpp, vsync-, hsync-, DP-A protocol, complete
@@ -1295,7 +1527,7 @@ static void tegra_sor_encoder_mode_set(struct drm_encoder *encoder,
break;
}
- tegra_sor_writel(sor, value, SOR_STATE_1);
+ tegra_sor_writel(sor, value, SOR_STATE1);
/*
* TODO: The video timing programming below doesn't seem to match the
@@ -1303,25 +1535,27 @@ static void tegra_sor_encoder_mode_set(struct drm_encoder *encoder,
*/
value = ((mode->vtotal & 0x7fff) << 16) | (mode->htotal & 0x7fff);
- tegra_sor_writel(sor, value, SOR_HEAD_STATE_1(0));
+ tegra_sor_writel(sor, value, SOR_HEAD_STATE1(dc->pipe));
vse = mode->vsync_end - mode->vsync_start - 1;
hse = mode->hsync_end - mode->hsync_start - 1;
value = ((vse & 0x7fff) << 16) | (hse & 0x7fff);
- tegra_sor_writel(sor, value, SOR_HEAD_STATE_2(0));
+ tegra_sor_writel(sor, value, SOR_HEAD_STATE2(dc->pipe));
vbe = vse + (mode->vsync_start - mode->vdisplay);
hbe = hse + (mode->hsync_start - mode->hdisplay);
value = ((vbe & 0x7fff) << 16) | (hbe & 0x7fff);
- tegra_sor_writel(sor, value, SOR_HEAD_STATE_3(0));
+ tegra_sor_writel(sor, value, SOR_HEAD_STATE3(dc->pipe));
vbs = vbe + mode->vdisplay;
hbs = hbe + mode->hdisplay;
value = ((vbs & 0x7fff) << 16) | (hbs & 0x7fff);
- tegra_sor_writel(sor, value, SOR_HEAD_STATE_4(0));
+ tegra_sor_writel(sor, value, SOR_HEAD_STATE4(dc->pipe));
+
+ tegra_sor_writel(sor, 0x1, SOR_HEAD_STATE5(dc->pipe));
/* CSTM (LVDS, link A/B, upper) */
value = SOR_CSTM_LVDS | SOR_CSTM_LINK_ACT_A | SOR_CSTM_LINK_ACT_B |
@@ -1330,10 +1564,8 @@ static void tegra_sor_encoder_mode_set(struct drm_encoder *encoder,
/* PWM setup */
err = tegra_sor_setup_pwm(sor, 250);
- if (err < 0) {
+ if (err < 0)
dev_err(sor->dev, "failed to setup PWM: %d\n", err);
- goto unlock;
- }
tegra_sor_update(sor);
@@ -1344,147 +1576,610 @@ static void tegra_sor_encoder_mode_set(struct drm_encoder *encoder,
tegra_dc_commit(dc);
err = tegra_sor_attach(sor);
- if (err < 0) {
+ if (err < 0)
dev_err(sor->dev, "failed to attach SOR: %d\n", err);
- goto unlock;
- }
err = tegra_sor_wakeup(sor);
- if (err < 0) {
+ if (err < 0)
dev_err(sor->dev, "failed to enable DC: %d\n", err);
- goto unlock;
- }
if (output->panel)
drm_panel_enable(output->panel);
-
- sor->enabled = true;
-
-unlock:
- mutex_unlock(&sor->lock);
}
-static void tegra_sor_encoder_disable(struct drm_encoder *encoder)
+static int
+tegra_sor_encoder_atomic_check(struct drm_encoder *encoder,
+ struct drm_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state)
{
struct tegra_output *output = encoder_to_output(encoder);
- struct tegra_dc *dc = to_tegra_dc(encoder->crtc);
+ struct tegra_dc *dc = to_tegra_dc(conn_state->crtc);
+ unsigned long pclk = crtc_state->mode.clock * 1000;
struct tegra_sor *sor = to_sor(output);
- u32 value;
int err;
- mutex_lock(&sor->lock);
+ err = tegra_dc_state_setup_clock(dc, crtc_state, sor->clk_parent,
+ pclk, 0);
+ if (err < 0) {
+ dev_err(output->dev, "failed to setup CRTC state: %d\n", err);
+ return err;
+ }
- if (!sor->enabled)
- goto unlock;
+ return 0;
+}
- if (output->panel)
- drm_panel_disable(output->panel);
+static const struct drm_encoder_helper_funcs tegra_sor_edp_helpers = {
+ .disable = tegra_sor_edp_disable,
+ .enable = tegra_sor_edp_enable,
+ .atomic_check = tegra_sor_encoder_atomic_check,
+};
- err = tegra_sor_detach(sor);
- if (err < 0) {
- dev_err(sor->dev, "failed to detach SOR: %d\n", err);
- goto unlock;
+static inline u32 tegra_sor_hdmi_subpack(const u8 *ptr, size_t size)
+{
+ u32 value = 0;
+ size_t i;
+
+ for (i = size; i > 0; i--)
+ value = (value << 8) | ptr[i - 1];
+
+ return value;
+}
+
+static void tegra_sor_hdmi_write_infopack(struct tegra_sor *sor,
+ const void *data, size_t size)
+{
+ const u8 *ptr = data;
+ unsigned long offset;
+ size_t i, j;
+ u32 value;
+
+ switch (ptr[0]) {
+ case HDMI_INFOFRAME_TYPE_AVI:
+ offset = SOR_HDMI_AVI_INFOFRAME_HEADER;
+ break;
+
+ case HDMI_INFOFRAME_TYPE_AUDIO:
+ offset = SOR_HDMI_AUDIO_INFOFRAME_HEADER;
+ break;
+
+ case HDMI_INFOFRAME_TYPE_VENDOR:
+ offset = SOR_HDMI_VSI_INFOFRAME_HEADER;
+ break;
+
+ default:
+ dev_err(sor->dev, "unsupported infoframe type: %02x\n",
+ ptr[0]);
+ return;
}
- tegra_sor_writel(sor, 0, SOR_STATE_1);
- tegra_sor_update(sor);
+ value = INFOFRAME_HEADER_TYPE(ptr[0]) |
+ INFOFRAME_HEADER_VERSION(ptr[1]) |
+ INFOFRAME_HEADER_LEN(ptr[2]);
+ tegra_sor_writel(sor, value, offset);
+ offset++;
/*
- * The following accesses registers of the display controller, so make
- * sure it's only executed when the output is attached to one.
+ * Each subpack contains 7 bytes, divided into:
+ * - subpack_low: bytes 0 - 3
+ * - subpack_high: bytes 4 - 6 (with byte 7 padded to 0x00)
*/
- if (dc) {
- value = tegra_dc_readl(dc, DC_DISP_DISP_WIN_OPTIONS);
- value &= ~SOR_ENABLE;
- tegra_dc_writel(dc, value, DC_DISP_DISP_WIN_OPTIONS);
+ for (i = 3, j = 0; i < size; i += 7, j += 8) {
+ size_t rem = size - i, num = min_t(size_t, rem, 4);
- tegra_dc_commit(dc);
- }
+ value = tegra_sor_hdmi_subpack(&ptr[i], num);
+ tegra_sor_writel(sor, value, offset++);
- err = tegra_sor_power_down(sor);
- if (err < 0) {
- dev_err(sor->dev, "failed to power down SOR: %d\n", err);
- goto unlock;
+ num = min_t(size_t, rem - num, 3);
+
+ value = tegra_sor_hdmi_subpack(&ptr[i + 4], num);
+ tegra_sor_writel(sor, value, offset++);
}
+}
- if (sor->dpaux) {
- err = tegra_dpaux_disable(sor->dpaux);
- if (err < 0) {
- dev_err(sor->dev, "failed to disable DP: %d\n", err);
- goto unlock;
- }
+static int
+tegra_sor_hdmi_setup_avi_infoframe(struct tegra_sor *sor,
+ const struct drm_display_mode *mode)
+{
+ u8 buffer[HDMI_INFOFRAME_SIZE(AVI)];
+ struct hdmi_avi_infoframe frame;
+ u32 value;
+ int err;
+
+ /* disable AVI infoframe */
+ value = tegra_sor_readl(sor, SOR_HDMI_AVI_INFOFRAME_CTRL);
+ value &= ~INFOFRAME_CTRL_SINGLE;
+ value &= ~INFOFRAME_CTRL_OTHER;
+ value &= ~INFOFRAME_CTRL_ENABLE;
+ tegra_sor_writel(sor, value, SOR_HDMI_AVI_INFOFRAME_CTRL);
+
+ err = drm_hdmi_avi_infoframe_from_display_mode(&frame, mode);
+ if (err < 0) {
+ dev_err(sor->dev, "failed to setup AVI infoframe: %d\n", err);
+ return err;
}
- err = tegra_io_rail_power_off(TEGRA_IO_RAIL_LVDS);
+ err = hdmi_avi_infoframe_pack(&frame, buffer, sizeof(buffer));
if (err < 0) {
- dev_err(sor->dev, "failed to power off I/O rail: %d\n", err);
- goto unlock;
+ dev_err(sor->dev, "failed to pack AVI infoframe: %d\n", err);
+ return err;
}
- if (output->panel)
- drm_panel_unprepare(output->panel);
+ tegra_sor_hdmi_write_infopack(sor, buffer, err);
- clk_disable_unprepare(sor->clk);
- reset_control_assert(sor->rst);
+ /* enable AVI infoframe */
+ value = tegra_sor_readl(sor, SOR_HDMI_AVI_INFOFRAME_CTRL);
+ value |= INFOFRAME_CTRL_CHECKSUM_ENABLE;
+ value |= INFOFRAME_CTRL_ENABLE;
+ tegra_sor_writel(sor, value, SOR_HDMI_AVI_INFOFRAME_CTRL);
- sor->enabled = false;
+ return 0;
+}
-unlock:
- mutex_unlock(&sor->lock);
+static void tegra_sor_hdmi_disable_audio_infoframe(struct tegra_sor *sor)
+{
+ u32 value;
+
+ value = tegra_sor_readl(sor, SOR_HDMI_AUDIO_INFOFRAME_CTRL);
+ value &= ~INFOFRAME_CTRL_ENABLE;
+ tegra_sor_writel(sor, value, SOR_HDMI_AUDIO_INFOFRAME_CTRL);
}
-static int
-tegra_sor_encoder_atomic_check(struct drm_encoder *encoder,
- struct drm_crtc_state *crtc_state,
- struct drm_connector_state *conn_state)
+static struct tegra_sor_hdmi_settings *
+tegra_sor_hdmi_find_settings(struct tegra_sor *sor, unsigned long frequency)
+{
+ unsigned int i;
+
+ for (i = 0; i < sor->num_settings; i++)
+ if (frequency <= sor->settings[i].frequency)
+ return &sor->settings[i];
+
+ return NULL;
+}
+
+static void tegra_sor_hdmi_disable(struct drm_encoder *encoder)
{
struct tegra_output *output = encoder_to_output(encoder);
- struct tegra_dc *dc = to_tegra_dc(conn_state->crtc);
- unsigned long pclk = crtc_state->mode.clock * 1000;
+ struct tegra_dc *dc = to_tegra_dc(encoder->crtc);
struct tegra_sor *sor = to_sor(output);
+ u32 value;
int err;
- err = tegra_dc_state_setup_clock(dc, crtc_state, sor->clk_parent,
- pclk, 0);
- if (err < 0) {
- dev_err(output->dev, "failed to setup CRTC state: %d\n", err);
- return err;
+ err = tegra_sor_detach(sor);
+ if (err < 0)
+ dev_err(sor->dev, "failed to detach SOR: %d\n", err);
+
+ tegra_sor_writel(sor, 0, SOR_STATE1);
+ tegra_sor_update(sor);
+
+ /* disable display to SOR clock */
+ value = tegra_dc_readl(dc, DC_DISP_DISP_WIN_OPTIONS);
+ value &= ~SOR1_TIMING_CYA;
+ value &= ~SOR1_ENABLE;
+ tegra_dc_writel(dc, value, DC_DISP_DISP_WIN_OPTIONS);
+
+ tegra_dc_commit(dc);
+
+ err = tegra_sor_power_down(sor);
+ if (err < 0)
+ dev_err(sor->dev, "failed to power down SOR: %d\n", err);
+
+ err = tegra_io_rail_power_off(TEGRA_IO_RAIL_HDMI);
+ if (err < 0)
+ dev_err(sor->dev, "failed to power off HDMI rail: %d\n", err);
+
+ reset_control_assert(sor->rst);
+ usleep_range(1000, 2000);
+ clk_disable_unprepare(sor->clk);
+}
+
+static void tegra_sor_hdmi_enable(struct drm_encoder *encoder)
+{
+ struct tegra_output *output = encoder_to_output(encoder);
+ unsigned int h_ref_to_sync = 1, pulse_start, max_ac;
+ struct tegra_dc *dc = to_tegra_dc(encoder->crtc);
+ unsigned int vbe, vse, hbe, hse, vbs, hbs, div;
+ struct tegra_sor_hdmi_settings *settings;
+ struct tegra_sor *sor = to_sor(output);
+ struct drm_display_mode *mode;
+ struct drm_display_info *info;
+ u32 value;
+ int err;
+
+ mode = &encoder->crtc->state->adjusted_mode;
+ info = &output->connector.display_info;
+
+ err = clk_prepare_enable(sor->clk);
+ if (err < 0)
+ dev_err(sor->dev, "failed to enable clock: %d\n", err);
+
+ usleep_range(1000, 2000);
+
+ reset_control_deassert(sor->rst);
+
+ err = clk_set_parent(sor->clk, sor->clk_safe);
+ if (err < 0)
+ dev_err(sor->dev, "failed to set safe parent clock: %d\n", err);
+
+ div = clk_get_rate(sor->clk) / 1000000 * 4;
+
+ err = tegra_io_rail_power_on(TEGRA_IO_RAIL_HDMI);
+ if (err < 0)
+ dev_err(sor->dev, "failed to power on HDMI rail: %d\n", err);
+
+ usleep_range(20, 100);
+
+ value = tegra_sor_readl(sor, SOR_PLL2);
+ value &= ~SOR_PLL2_BANDGAP_POWERDOWN;
+ tegra_sor_writel(sor, value, SOR_PLL2);
+
+ usleep_range(20, 100);
+
+ value = tegra_sor_readl(sor, SOR_PLL3);
+ value &= ~SOR_PLL3_PLL_VDD_MODE_3V3;
+ tegra_sor_writel(sor, value, SOR_PLL3);
+
+ value = tegra_sor_readl(sor, SOR_PLL0);
+ value &= ~SOR_PLL0_VCOPD;
+ value &= ~SOR_PLL0_PWR;
+ tegra_sor_writel(sor, value, SOR_PLL0);
+
+ value = tegra_sor_readl(sor, SOR_PLL2);
+ value &= ~SOR_PLL2_SEQ_PLLCAPPD_ENFORCE;
+ tegra_sor_writel(sor, value, SOR_PLL2);
+
+ usleep_range(200, 400);
+
+ value = tegra_sor_readl(sor, SOR_PLL2);
+ value &= ~SOR_PLL2_POWERDOWN_OVERRIDE;
+ value &= ~SOR_PLL2_PORT_POWERDOWN;
+ tegra_sor_writel(sor, value, SOR_PLL2);
+
+ usleep_range(20, 100);
+
+ value = tegra_sor_readl(sor, SOR_DP_PADCTL0);
+ value |= SOR_DP_PADCTL_PD_TXD_3 | SOR_DP_PADCTL_PD_TXD_0 |
+ SOR_DP_PADCTL_PD_TXD_1 | SOR_DP_PADCTL_PD_TXD_2;
+ tegra_sor_writel(sor, value, SOR_DP_PADCTL0);
+
+ while (true) {
+ value = tegra_sor_readl(sor, SOR_LANE_SEQ_CTL);
+ if ((value & SOR_LANE_SEQ_CTL_STATE_BUSY) == 0)
+ break;
+
+ usleep_range(250, 1000);
}
- return 0;
+ value = SOR_LANE_SEQ_CTL_TRIGGER | SOR_LANE_SEQ_CTL_SEQUENCE_DOWN |
+ SOR_LANE_SEQ_CTL_POWER_STATE_UP | SOR_LANE_SEQ_CTL_DELAY(5);
+ tegra_sor_writel(sor, value, SOR_LANE_SEQ_CTL);
+
+ while (true) {
+ value = tegra_sor_readl(sor, SOR_LANE_SEQ_CTL);
+ if ((value & SOR_LANE_SEQ_CTL_TRIGGER) == 0)
+ break;
+
+ usleep_range(250, 1000);
+ }
+
+ value = tegra_sor_readl(sor, SOR_CLK_CNTRL);
+ value &= ~SOR_CLK_CNTRL_DP_LINK_SPEED_MASK;
+ value &= ~SOR_CLK_CNTRL_DP_CLK_SEL_MASK;
+
+ if (mode->clock < 340000)
+ value |= SOR_CLK_CNTRL_DP_LINK_SPEED_G2_70;
+ else
+ value |= SOR_CLK_CNTRL_DP_LINK_SPEED_G5_40;
+
+ value |= SOR_CLK_CNTRL_DP_CLK_SEL_SINGLE_PCLK;
+ tegra_sor_writel(sor, value, SOR_CLK_CNTRL);
+
+ value = tegra_sor_readl(sor, SOR_DP_SPARE0);
+ value |= SOR_DP_SPARE_DISP_VIDEO_PREAMBLE;
+ value &= ~SOR_DP_SPARE_PANEL_INTERNAL;
+ value |= SOR_DP_SPARE_SEQ_ENABLE;
+ tegra_sor_writel(sor, value, SOR_DP_SPARE0);
+
+ value = SOR_SEQ_CTL_PU_PC(0) | SOR_SEQ_CTL_PU_PC_ALT(0) |
+ SOR_SEQ_CTL_PD_PC(8) | SOR_SEQ_CTL_PD_PC_ALT(8);
+ tegra_sor_writel(sor, value, SOR_SEQ_CTL);
+
+ value = SOR_SEQ_INST_DRIVE_PWM_OUT_LO | SOR_SEQ_INST_HALT |
+ SOR_SEQ_INST_WAIT_VSYNC | SOR_SEQ_INST_WAIT(1);
+ tegra_sor_writel(sor, value, SOR_SEQ_INST(0));
+ tegra_sor_writel(sor, value, SOR_SEQ_INST(8));
+
+ /* program the reference clock */
+ value = SOR_REFCLK_DIV_INT(div) | SOR_REFCLK_DIV_FRAC(div);
+ tegra_sor_writel(sor, value, SOR_REFCLK);
+
+ /* XXX don't hardcode */
+ value = SOR_XBAR_CTRL_LINK1_XSEL(4, 4) |
+ SOR_XBAR_CTRL_LINK1_XSEL(3, 3) |
+ SOR_XBAR_CTRL_LINK1_XSEL(2, 2) |
+ SOR_XBAR_CTRL_LINK1_XSEL(1, 1) |
+ SOR_XBAR_CTRL_LINK1_XSEL(0, 0) |
+ SOR_XBAR_CTRL_LINK0_XSEL(4, 4) |
+ SOR_XBAR_CTRL_LINK0_XSEL(3, 3) |
+ SOR_XBAR_CTRL_LINK0_XSEL(2, 0) |
+ SOR_XBAR_CTRL_LINK0_XSEL(1, 1) |
+ SOR_XBAR_CTRL_LINK0_XSEL(0, 2);
+ tegra_sor_writel(sor, value, SOR_XBAR_CTRL);
+
+ tegra_sor_writel(sor, 0x00000000, SOR_XBAR_POL);
+
+ err = clk_set_parent(sor->clk, sor->clk_parent);
+ if (err < 0)
+ dev_err(sor->dev, "failed to set parent clock: %d\n", err);
+
+ value = SOR_INPUT_CONTROL_HDMI_SRC_SELECT(dc->pipe);
+
+ /* XXX is this the proper check? */
+ if (mode->clock < 75000)
+ value |= SOR_INPUT_CONTROL_ARM_VIDEO_RANGE_LIMITED;
+
+ tegra_sor_writel(sor, value, SOR_INPUT_CONTROL);
+
+ max_ac = ((mode->htotal - mode->hdisplay) - SOR_REKEY - 18) / 32;
+
+ value = SOR_HDMI_CTRL_ENABLE | SOR_HDMI_CTRL_MAX_AC_PACKET(max_ac) |
+ SOR_HDMI_CTRL_AUDIO_LAYOUT | SOR_HDMI_CTRL_REKEY(SOR_REKEY);
+ tegra_sor_writel(sor, value, SOR_HDMI_CTRL);
+
+ /* H_PULSE2 setup */
+ pulse_start = h_ref_to_sync + (mode->hsync_end - mode->hsync_start) +
+ (mode->htotal - mode->hsync_end) - 10;
+
+ value = PULSE_LAST_END_A | PULSE_QUAL_VACTIVE |
+ PULSE_POLARITY_HIGH | PULSE_MODE_NORMAL;
+ tegra_dc_writel(dc, value, DC_DISP_H_PULSE2_CONTROL);
+
+ value = PULSE_END(pulse_start + 8) | PULSE_START(pulse_start);
+ tegra_dc_writel(dc, value, DC_DISP_H_PULSE2_POSITION_A);
+
+ value = tegra_dc_readl(dc, DC_DISP_DISP_SIGNAL_OPTIONS0);
+ value |= H_PULSE2_ENABLE;
+ tegra_dc_writel(dc, value, DC_DISP_DISP_SIGNAL_OPTIONS0);
+
+ /* infoframe setup */
+ err = tegra_sor_hdmi_setup_avi_infoframe(sor, mode);
+ if (err < 0)
+ dev_err(sor->dev, "failed to setup AVI infoframe: %d\n", err);
+
+ /* XXX HDMI audio support not implemented yet */
+ tegra_sor_hdmi_disable_audio_infoframe(sor);
+
+ /* use single TMDS protocol */
+ value = tegra_sor_readl(sor, SOR_STATE1);
+ value &= ~SOR_STATE_ASY_PROTOCOL_MASK;
+ value |= SOR_STATE_ASY_PROTOCOL_SINGLE_TMDS_A;
+ tegra_sor_writel(sor, value, SOR_STATE1);
+
+ /* power up pad calibration */
+ value = tegra_sor_readl(sor, SOR_DP_PADCTL0);
+ value &= ~SOR_DP_PADCTL_PAD_CAL_PD;
+ tegra_sor_writel(sor, value, SOR_DP_PADCTL0);
+
+ /* production settings */
+ settings = tegra_sor_hdmi_find_settings(sor, mode->clock * 1000);
+ if (IS_ERR(settings)) {
+ dev_err(sor->dev, "no settings for pixel clock %d Hz: %ld\n",
+ mode->clock * 1000, PTR_ERR(settings));
+ return;
+ }
+
+ value = tegra_sor_readl(sor, SOR_PLL0);
+ value &= ~SOR_PLL0_ICHPMP_MASK;
+ value &= ~SOR_PLL0_VCOCAP_MASK;
+ value |= SOR_PLL0_ICHPMP(settings->ichpmp);
+ value |= SOR_PLL0_VCOCAP(settings->vcocap);
+ tegra_sor_writel(sor, value, SOR_PLL0);
+
+ tegra_sor_dp_term_calibrate(sor);
+
+ value = tegra_sor_readl(sor, SOR_PLL1);
+ value &= ~SOR_PLL1_LOADADJ_MASK;
+ value |= SOR_PLL1_LOADADJ(settings->loadadj);
+ tegra_sor_writel(sor, value, SOR_PLL1);
+
+ value = tegra_sor_readl(sor, SOR_PLL3);
+ value &= ~SOR_PLL3_BG_VREF_LEVEL_MASK;
+ value |= SOR_PLL3_BG_VREF_LEVEL(settings->bg_vref);
+ tegra_sor_writel(sor, value, SOR_PLL3);
+
+ value = settings->drive_current[0] << 24 |
+ settings->drive_current[1] << 16 |
+ settings->drive_current[2] << 8 |
+ settings->drive_current[3] << 0;
+ tegra_sor_writel(sor, value, SOR_LANE_DRIVE_CURRENT0);
+
+ value = settings->preemphasis[0] << 24 |
+ settings->preemphasis[1] << 16 |
+ settings->preemphasis[2] << 8 |
+ settings->preemphasis[3] << 0;
+ tegra_sor_writel(sor, value, SOR_LANE_PREEMPHASIS0);
+
+ value = tegra_sor_readl(sor, SOR_DP_PADCTL0);
+ value &= ~SOR_DP_PADCTL_TX_PU_MASK;
+ value |= SOR_DP_PADCTL_TX_PU_ENABLE;
+ value |= SOR_DP_PADCTL_TX_PU(settings->tx_pu);
+ tegra_sor_writel(sor, value, SOR_DP_PADCTL0);
+
+ /* power down pad calibration */
+ value = tegra_sor_readl(sor, SOR_DP_PADCTL0);
+ value |= SOR_DP_PADCTL_PAD_CAL_PD;
+ tegra_sor_writel(sor, value, SOR_DP_PADCTL0);
+
+ /* miscellaneous display controller settings */
+ value = VSYNC_H_POSITION(1);
+ tegra_dc_writel(dc, value, DC_DISP_DISP_TIMING_OPTIONS);
+
+ value = tegra_dc_readl(dc, DC_DISP_DISP_COLOR_CONTROL);
+ value &= ~DITHER_CONTROL_MASK;
+ value &= ~BASE_COLOR_SIZE_MASK;
+
+ switch (info->bpc) {
+ case 6:
+ value |= BASE_COLOR_SIZE_666;
+ break;
+
+ case 8:
+ value |= BASE_COLOR_SIZE_888;
+ break;
+
+ default:
+ WARN(1, "%u bits-per-color not supported\n", info->bpc);
+ break;
+ }
+
+ tegra_dc_writel(dc, value, DC_DISP_DISP_COLOR_CONTROL);
+
+ err = tegra_sor_power_up(sor, 250);
+ if (err < 0)
+ dev_err(sor->dev, "failed to power up SOR: %d\n", err);
+
+ /* configure mode */
+ value = tegra_sor_readl(sor, SOR_STATE1);
+ value &= ~SOR_STATE_ASY_PIXELDEPTH_MASK;
+ value &= ~SOR_STATE_ASY_CRC_MODE_MASK;
+ value &= ~SOR_STATE_ASY_OWNER_MASK;
+
+ value |= SOR_STATE_ASY_CRC_MODE_COMPLETE |
+ SOR_STATE_ASY_OWNER(dc->pipe + 1);
+
+ if (mode->flags & DRM_MODE_FLAG_PHSYNC)
+ value &= ~SOR_STATE_ASY_HSYNCPOL;
+
+ if (mode->flags & DRM_MODE_FLAG_NHSYNC)
+ value |= SOR_STATE_ASY_HSYNCPOL;
+
+ if (mode->flags & DRM_MODE_FLAG_PVSYNC)
+ value &= ~SOR_STATE_ASY_VSYNCPOL;
+
+ if (mode->flags & DRM_MODE_FLAG_NVSYNC)
+ value |= SOR_STATE_ASY_VSYNCPOL;
+
+ switch (info->bpc) {
+ case 8:
+ value |= SOR_STATE_ASY_PIXELDEPTH_BPP_24_444;
+ break;
+
+ case 6:
+ value |= SOR_STATE_ASY_PIXELDEPTH_BPP_18_444;
+ break;
+
+ default:
+ BUG();
+ break;
+ }
+
+ tegra_sor_writel(sor, value, SOR_STATE1);
+
+ value = tegra_sor_readl(sor, SOR_HEAD_STATE0(dc->pipe));
+ value &= ~SOR_HEAD_STATE_RANGECOMPRESS_MASK;
+ value &= ~SOR_HEAD_STATE_DYNRANGE_MASK;
+ tegra_sor_writel(sor, value, SOR_HEAD_STATE0(dc->pipe));
+
+ value = tegra_sor_readl(sor, SOR_HEAD_STATE0(dc->pipe));
+ value &= ~SOR_HEAD_STATE_COLORSPACE_MASK;
+ value |= SOR_HEAD_STATE_COLORSPACE_RGB;
+ tegra_sor_writel(sor, value, SOR_HEAD_STATE0(dc->pipe));
+
+ /*
+ * TODO: The video timing programming below doesn't seem to match the
+ * register definitions.
+ */
+
+ value = ((mode->vtotal & 0x7fff) << 16) | (mode->htotal & 0x7fff);
+ tegra_sor_writel(sor, value, SOR_HEAD_STATE1(dc->pipe));
+
+ /* sync end = sync width - 1 */
+ vse = mode->vsync_end - mode->vsync_start - 1;
+ hse = mode->hsync_end - mode->hsync_start - 1;
+
+ value = ((vse & 0x7fff) << 16) | (hse & 0x7fff);
+ tegra_sor_writel(sor, value, SOR_HEAD_STATE2(dc->pipe));
+
+ /* blank end = sync end + back porch */
+ vbe = vse + (mode->vtotal - mode->vsync_end);
+ hbe = hse + (mode->htotal - mode->hsync_end);
+
+ value = ((vbe & 0x7fff) << 16) | (hbe & 0x7fff);
+ tegra_sor_writel(sor, value, SOR_HEAD_STATE3(dc->pipe));
+
+ /* blank start = blank end + active */
+ vbs = vbe + mode->vdisplay;
+ hbs = hbe + mode->hdisplay;
+
+ value = ((vbs & 0x7fff) << 16) | (hbs & 0x7fff);
+ tegra_sor_writel(sor, value, SOR_HEAD_STATE4(dc->pipe));
+
+ tegra_sor_writel(sor, 0x1, SOR_HEAD_STATE5(dc->pipe));
+
+ tegra_sor_update(sor);
+
+ err = tegra_sor_attach(sor);
+ if (err < 0)
+ dev_err(sor->dev, "failed to attach SOR: %d\n", err);
+
+ /* enable display to SOR clock and generate HDMI preamble */
+ value = tegra_dc_readl(dc, DC_DISP_DISP_WIN_OPTIONS);
+ value |= SOR1_ENABLE | SOR1_TIMING_CYA;
+ tegra_dc_writel(dc, value, DC_DISP_DISP_WIN_OPTIONS);
+
+ tegra_dc_commit(dc);
+
+ err = tegra_sor_wakeup(sor);
+ if (err < 0)
+ dev_err(sor->dev, "failed to wakeup SOR: %d\n", err);
}
-static const struct drm_encoder_helper_funcs tegra_sor_encoder_helper_funcs = {
- .dpms = tegra_sor_encoder_dpms,
- .prepare = tegra_sor_encoder_prepare,
- .commit = tegra_sor_encoder_commit,
- .mode_set = tegra_sor_encoder_mode_set,
- .disable = tegra_sor_encoder_disable,
+static const struct drm_encoder_helper_funcs tegra_sor_hdmi_helpers = {
+ .disable = tegra_sor_hdmi_disable,
+ .enable = tegra_sor_hdmi_enable,
.atomic_check = tegra_sor_encoder_atomic_check,
};
static int tegra_sor_init(struct host1x_client *client)
{
struct drm_device *drm = dev_get_drvdata(client->parent);
+ const struct drm_encoder_helper_funcs *helpers = NULL;
struct tegra_sor *sor = host1x_client_to_sor(client);
+ int connector = DRM_MODE_CONNECTOR_Unknown;
+ int encoder = DRM_MODE_ENCODER_NONE;
int err;
- if (!sor->dpaux)
- return -ENODEV;
+ if (!sor->dpaux) {
+ if (sor->soc->supports_hdmi) {
+ connector = DRM_MODE_CONNECTOR_HDMIA;
+ encoder = DRM_MODE_ENCODER_TMDS;
+ helpers = &tegra_sor_hdmi_helpers;
+ } else if (sor->soc->supports_lvds) {
+ connector = DRM_MODE_CONNECTOR_LVDS;
+ encoder = DRM_MODE_ENCODER_LVDS;
+ }
+ } else {
+ if (sor->soc->supports_edp) {
+ connector = DRM_MODE_CONNECTOR_eDP;
+ encoder = DRM_MODE_ENCODER_TMDS;
+ helpers = &tegra_sor_edp_helpers;
+ } else if (sor->soc->supports_dp) {
+ connector = DRM_MODE_CONNECTOR_DisplayPort;
+ encoder = DRM_MODE_ENCODER_TMDS;
+ }
+ }
sor->output.dev = sor->dev;
drm_connector_init(drm, &sor->output.connector,
&tegra_sor_connector_funcs,
- DRM_MODE_CONNECTOR_eDP);
+ connector);
drm_connector_helper_add(&sor->output.connector,
&tegra_sor_connector_helper_funcs);
sor->output.connector.dpms = DRM_MODE_DPMS_OFF;
drm_encoder_init(drm, &sor->output.encoder, &tegra_sor_encoder_funcs,
- DRM_MODE_ENCODER_TMDS);
- drm_encoder_helper_add(&sor->output.encoder,
- &tegra_sor_encoder_helper_funcs);
+ encoder);
+ drm_encoder_helper_add(&sor->output.encoder, helpers);
drm_mode_connector_attach_encoder(&sor->output.connector,
&sor->output.encoder);
@@ -1577,18 +2272,130 @@ static const struct host1x_client_ops sor_client_ops = {
.exit = tegra_sor_exit,
};
+static const struct tegra_sor_ops tegra_sor_edp_ops = {
+ .name = "eDP",
+};
+
+static int tegra_sor_hdmi_probe(struct tegra_sor *sor)
+{
+ int err;
+
+ sor->avdd_io_supply = devm_regulator_get(sor->dev, "avdd-io");
+ if (IS_ERR(sor->avdd_io_supply)) {
+ dev_err(sor->dev, "cannot get AVDD I/O supply: %ld\n",
+ PTR_ERR(sor->avdd_io_supply));
+ return PTR_ERR(sor->avdd_io_supply);
+ }
+
+ err = regulator_enable(sor->avdd_io_supply);
+ if (err < 0) {
+ dev_err(sor->dev, "failed to enable AVDD I/O supply: %d\n",
+ err);
+ return err;
+ }
+
+ sor->vdd_pll_supply = devm_regulator_get(sor->dev, "vdd-pll");
+ if (IS_ERR(sor->vdd_pll_supply)) {
+ dev_err(sor->dev, "cannot get VDD PLL supply: %ld\n",
+ PTR_ERR(sor->vdd_pll_supply));
+ return PTR_ERR(sor->vdd_pll_supply);
+ }
+
+ err = regulator_enable(sor->vdd_pll_supply);
+ if (err < 0) {
+ dev_err(sor->dev, "failed to enable VDD PLL supply: %d\n",
+ err);
+ return err;
+ }
+
+ sor->hdmi_supply = devm_regulator_get(sor->dev, "hdmi");
+ if (IS_ERR(sor->hdmi_supply)) {
+ dev_err(sor->dev, "cannot get HDMI supply: %ld\n",
+ PTR_ERR(sor->hdmi_supply));
+ return PTR_ERR(sor->hdmi_supply);
+ }
+
+ err = regulator_enable(sor->hdmi_supply);
+ if (err < 0) {
+ dev_err(sor->dev, "failed to enable HDMI supply: %d\n", err);
+ return err;
+ }
+
+ return 0;
+}
+
+static int tegra_sor_hdmi_remove(struct tegra_sor *sor)
+{
+ regulator_disable(sor->hdmi_supply);
+ regulator_disable(sor->vdd_pll_supply);
+ regulator_disable(sor->avdd_io_supply);
+
+ return 0;
+}
+
+static const struct tegra_sor_ops tegra_sor_hdmi_ops = {
+ .name = "HDMI",
+ .probe = tegra_sor_hdmi_probe,
+ .remove = tegra_sor_hdmi_remove,
+};
+
+static const struct tegra_sor_soc tegra124_sor = {
+ .supports_edp = true,
+ .supports_lvds = true,
+ .supports_hdmi = false,
+ .supports_dp = false,
+};
+
+static const struct tegra_sor_soc tegra210_sor = {
+ .supports_edp = true,
+ .supports_lvds = false,
+ .supports_hdmi = false,
+ .supports_dp = false,
+};
+
+static const struct tegra_sor_soc tegra210_sor1 = {
+ .supports_edp = false,
+ .supports_lvds = false,
+ .supports_hdmi = true,
+ .supports_dp = true,
+
+ .num_settings = ARRAY_SIZE(tegra210_sor_hdmi_defaults),
+ .settings = tegra210_sor_hdmi_defaults,
+};
+
+static const struct of_device_id tegra_sor_of_match[] = {
+ { .compatible = "nvidia,tegra210-sor1", .data = &tegra210_sor1 },
+ { .compatible = "nvidia,tegra210-sor", .data = &tegra210_sor },
+ { .compatible = "nvidia,tegra124-sor", .data = &tegra124_sor },
+ { },
+};
+MODULE_DEVICE_TABLE(of, tegra_sor_of_match);
+
static int tegra_sor_probe(struct platform_device *pdev)
{
+ const struct of_device_id *match;
struct device_node *np;
struct tegra_sor *sor;
struct resource *regs;
int err;
+ match = of_match_device(tegra_sor_of_match, &pdev->dev);
+
sor = devm_kzalloc(&pdev->dev, sizeof(*sor), GFP_KERNEL);
if (!sor)
return -ENOMEM;
sor->output.dev = sor->dev = &pdev->dev;
+ sor->soc = match->data;
+
+ sor->settings = devm_kmemdup(&pdev->dev, sor->soc->settings,
+ sor->soc->num_settings *
+ sizeof(*sor->settings),
+ GFP_KERNEL);
+ if (!sor->settings)
+ return -ENOMEM;
+
+ sor->num_settings = sor->soc->num_settings;
np = of_parse_phandle(pdev->dev.of_node, "nvidia,dpaux", 0);
if (np) {
@@ -1599,51 +2406,106 @@ static int tegra_sor_probe(struct platform_device *pdev)
return -EPROBE_DEFER;
}
+ if (!sor->dpaux) {
+ if (sor->soc->supports_hdmi) {
+ sor->ops = &tegra_sor_hdmi_ops;
+ } else if (sor->soc->supports_lvds) {
+ dev_err(&pdev->dev, "LVDS not supported yet\n");
+ return -ENODEV;
+ } else {
+ dev_err(&pdev->dev, "unknown (non-DP) support\n");
+ return -ENODEV;
+ }
+ } else {
+ if (sor->soc->supports_edp) {
+ sor->ops = &tegra_sor_edp_ops;
+ } else if (sor->soc->supports_dp) {
+ dev_err(&pdev->dev, "DisplayPort not supported yet\n");
+ return -ENODEV;
+ } else {
+ dev_err(&pdev->dev, "unknown (DP) support\n");
+ return -ENODEV;
+ }
+ }
+
err = tegra_output_probe(&sor->output);
- if (err < 0)
+ if (err < 0) {
+ dev_err(&pdev->dev, "failed to probe output: %d\n", err);
return err;
+ }
+
+ if (sor->ops && sor->ops->probe) {
+ err = sor->ops->probe(sor);
+ if (err < 0) {
+ dev_err(&pdev->dev, "failed to probe %s: %d\n",
+ sor->ops->name, err);
+ goto output;
+ }
+ }
regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
sor->regs = devm_ioremap_resource(&pdev->dev, regs);
- if (IS_ERR(sor->regs))
- return PTR_ERR(sor->regs);
+ if (IS_ERR(sor->regs)) {
+ err = PTR_ERR(sor->regs);
+ goto remove;
+ }
sor->rst = devm_reset_control_get(&pdev->dev, "sor");
- if (IS_ERR(sor->rst))
- return PTR_ERR(sor->rst);
+ if (IS_ERR(sor->rst)) {
+ err = PTR_ERR(sor->rst);
+ dev_err(&pdev->dev, "failed to get reset control: %d\n", err);
+ goto remove;
+ }
sor->clk = devm_clk_get(&pdev->dev, NULL);
- if (IS_ERR(sor->clk))
- return PTR_ERR(sor->clk);
+ if (IS_ERR(sor->clk)) {
+ err = PTR_ERR(sor->clk);
+ dev_err(&pdev->dev, "failed to get module clock: %d\n", err);
+ goto remove;
+ }
sor->clk_parent = devm_clk_get(&pdev->dev, "parent");
- if (IS_ERR(sor->clk_parent))
- return PTR_ERR(sor->clk_parent);
+ if (IS_ERR(sor->clk_parent)) {
+ err = PTR_ERR(sor->clk_parent);
+ dev_err(&pdev->dev, "failed to get parent clock: %d\n", err);
+ goto remove;
+ }
sor->clk_safe = devm_clk_get(&pdev->dev, "safe");
- if (IS_ERR(sor->clk_safe))
- return PTR_ERR(sor->clk_safe);
+ if (IS_ERR(sor->clk_safe)) {
+ err = PTR_ERR(sor->clk_safe);
+ dev_err(&pdev->dev, "failed to get safe clock: %d\n", err);
+ goto remove;
+ }
sor->clk_dp = devm_clk_get(&pdev->dev, "dp");
- if (IS_ERR(sor->clk_dp))
- return PTR_ERR(sor->clk_dp);
+ if (IS_ERR(sor->clk_dp)) {
+ err = PTR_ERR(sor->clk_dp);
+ dev_err(&pdev->dev, "failed to get DP clock: %d\n", err);
+ goto remove;
+ }
INIT_LIST_HEAD(&sor->client.list);
sor->client.ops = &sor_client_ops;
sor->client.dev = &pdev->dev;
- mutex_init(&sor->lock);
-
err = host1x_client_register(&sor->client);
if (err < 0) {
dev_err(&pdev->dev, "failed to register host1x client: %d\n",
err);
- return err;
+ goto remove;
}
platform_set_drvdata(pdev, sor);
return 0;
+
+remove:
+ if (sor->ops && sor->ops->remove)
+ sor->ops->remove(sor);
+output:
+ tegra_output_remove(&sor->output);
+ return err;
}
static int tegra_sor_remove(struct platform_device *pdev)
@@ -1658,17 +2520,17 @@ static int tegra_sor_remove(struct platform_device *pdev)
return err;
}
+ if (sor->ops && sor->ops->remove) {
+ err = sor->ops->remove(sor);
+ if (err < 0)
+ dev_err(&pdev->dev, "failed to remove SOR: %d\n", err);
+ }
+
tegra_output_remove(&sor->output);
return 0;
}
-static const struct of_device_id tegra_sor_of_match[] = {
- { .compatible = "nvidia,tegra124-sor", },
- { },
-};
-MODULE_DEVICE_TABLE(of, tegra_sor_of_match);
-
struct platform_driver tegra_sor_driver = {
.driver = {
.name = "tegra-sor",
diff --git a/drivers/gpu/drm/tegra/sor.h b/drivers/gpu/drm/tegra/sor.h
index a5f8853fedb5..2d31d027e3f6 100644
--- a/drivers/gpu/drm/tegra/sor.h
+++ b/drivers/gpu/drm/tegra/sor.h
@@ -11,9 +11,9 @@
#define SOR_CTXSW 0x00
-#define SOR_SUPER_STATE_0 0x01
+#define SOR_SUPER_STATE0 0x01
-#define SOR_SUPER_STATE_1 0x02
+#define SOR_SUPER_STATE1 0x02
#define SOR_SUPER_STATE_ATTACHED (1 << 3)
#define SOR_SUPER_STATE_MODE_NORMAL (1 << 2)
#define SOR_SUPER_STATE_HEAD_MODE_MASK (3 << 0)
@@ -21,9 +21,9 @@
#define SOR_SUPER_STATE_HEAD_MODE_SNOOZE (1 << 0)
#define SOR_SUPER_STATE_HEAD_MODE_SLEEP (0 << 0)
-#define SOR_STATE_0 0x03
+#define SOR_STATE0 0x03
-#define SOR_STATE_1 0x04
+#define SOR_STATE1 0x04
#define SOR_STATE_ASY_PIXELDEPTH_MASK (0xf << 17)
#define SOR_STATE_ASY_PIXELDEPTH_BPP_18_444 (0x2 << 17)
#define SOR_STATE_ASY_PIXELDEPTH_BPP_24_444 (0x5 << 17)
@@ -33,19 +33,27 @@
#define SOR_STATE_ASY_PROTOCOL_CUSTOM (0xf << 8)
#define SOR_STATE_ASY_PROTOCOL_DP_A (0x8 << 8)
#define SOR_STATE_ASY_PROTOCOL_DP_B (0x9 << 8)
+#define SOR_STATE_ASY_PROTOCOL_SINGLE_TMDS_A (0x1 << 8)
#define SOR_STATE_ASY_PROTOCOL_LVDS (0x0 << 8)
#define SOR_STATE_ASY_CRC_MODE_MASK (0x3 << 6)
#define SOR_STATE_ASY_CRC_MODE_NON_ACTIVE (0x2 << 6)
#define SOR_STATE_ASY_CRC_MODE_COMPLETE (0x1 << 6)
#define SOR_STATE_ASY_CRC_MODE_ACTIVE (0x0 << 6)
+#define SOR_STATE_ASY_OWNER_MASK 0xf
#define SOR_STATE_ASY_OWNER(x) (((x) & 0xf) << 0)
-#define SOR_HEAD_STATE_0(x) (0x05 + (x))
-#define SOR_HEAD_STATE_1(x) (0x07 + (x))
-#define SOR_HEAD_STATE_2(x) (0x09 + (x))
-#define SOR_HEAD_STATE_3(x) (0x0b + (x))
-#define SOR_HEAD_STATE_4(x) (0x0d + (x))
-#define SOR_HEAD_STATE_5(x) (0x0f + (x))
+#define SOR_HEAD_STATE0(x) (0x05 + (x))
+#define SOR_HEAD_STATE_RANGECOMPRESS_MASK (0x1 << 3)
+#define SOR_HEAD_STATE_DYNRANGE_MASK (0x1 << 2)
+#define SOR_HEAD_STATE_DYNRANGE_VESA (0 << 2)
+#define SOR_HEAD_STATE_DYNRANGE_CEA (1 << 2)
+#define SOR_HEAD_STATE_COLORSPACE_MASK (0x3 << 0)
+#define SOR_HEAD_STATE_COLORSPACE_RGB (0 << 0)
+#define SOR_HEAD_STATE1(x) (0x07 + (x))
+#define SOR_HEAD_STATE2(x) (0x09 + (x))
+#define SOR_HEAD_STATE3(x) (0x0b + (x))
+#define SOR_HEAD_STATE4(x) (0x0d + (x))
+#define SOR_HEAD_STATE5(x) (0x0f + (x))
#define SOR_CRC_CNTRL 0x11
#define SOR_CRC_CNTRL_ENABLE (1 << 0)
#define SOR_DP_DEBUG_MVID 0x12
@@ -75,62 +83,101 @@
#define SOR_TEST_HEAD_MODE_MASK (3 << 8)
#define SOR_TEST_HEAD_MODE_AWAKE (2 << 8)
-#define SOR_PLL_0 0x17
-#define SOR_PLL_0_ICHPMP_MASK (0xf << 24)
-#define SOR_PLL_0_ICHPMP(x) (((x) & 0xf) << 24)
-#define SOR_PLL_0_VCOCAP_MASK (0xf << 8)
-#define SOR_PLL_0_VCOCAP(x) (((x) & 0xf) << 8)
-#define SOR_PLL_0_VCOCAP_RST SOR_PLL_0_VCOCAP(3)
-#define SOR_PLL_0_PLLREG_MASK (0x3 << 6)
-#define SOR_PLL_0_PLLREG_LEVEL(x) (((x) & 0x3) << 6)
-#define SOR_PLL_0_PLLREG_LEVEL_V25 SOR_PLL_0_PLLREG_LEVEL(0)
-#define SOR_PLL_0_PLLREG_LEVEL_V15 SOR_PLL_0_PLLREG_LEVEL(1)
-#define SOR_PLL_0_PLLREG_LEVEL_V35 SOR_PLL_0_PLLREG_LEVEL(2)
-#define SOR_PLL_0_PLLREG_LEVEL_V45 SOR_PLL_0_PLLREG_LEVEL(3)
-#define SOR_PLL_0_PULLDOWN (1 << 5)
-#define SOR_PLL_0_RESISTOR_EXT (1 << 4)
-#define SOR_PLL_0_VCOPD (1 << 2)
-#define SOR_PLL_0_POWER_OFF (1 << 0)
-
-#define SOR_PLL_1 0x18
+#define SOR_PLL0 0x17
+#define SOR_PLL0_ICHPMP_MASK (0xf << 24)
+#define SOR_PLL0_ICHPMP(x) (((x) & 0xf) << 24)
+#define SOR_PLL0_VCOCAP_MASK (0xf << 8)
+#define SOR_PLL0_VCOCAP(x) (((x) & 0xf) << 8)
+#define SOR_PLL0_VCOCAP_RST SOR_PLL0_VCOCAP(3)
+#define SOR_PLL0_PLLREG_MASK (0x3 << 6)
+#define SOR_PLL0_PLLREG_LEVEL(x) (((x) & 0x3) << 6)
+#define SOR_PLL0_PLLREG_LEVEL_V25 SOR_PLL0_PLLREG_LEVEL(0)
+#define SOR_PLL0_PLLREG_LEVEL_V15 SOR_PLL0_PLLREG_LEVEL(1)
+#define SOR_PLL0_PLLREG_LEVEL_V35 SOR_PLL0_PLLREG_LEVEL(2)
+#define SOR_PLL0_PLLREG_LEVEL_V45 SOR_PLL0_PLLREG_LEVEL(3)
+#define SOR_PLL0_PULLDOWN (1 << 5)
+#define SOR_PLL0_RESISTOR_EXT (1 << 4)
+#define SOR_PLL0_VCOPD (1 << 2)
+#define SOR_PLL0_PWR (1 << 0)
+
+#define SOR_PLL1 0x18
/* XXX: read-only bit? */
-#define SOR_PLL_1_TERM_COMPOUT (1 << 15)
-#define SOR_PLL_1_TMDS_TERM (1 << 8)
-
-#define SOR_PLL_2 0x19
-#define SOR_PLL_2_LVDS_ENABLE (1 << 25)
-#define SOR_PLL_2_SEQ_PLLCAPPD_ENFORCE (1 << 24)
-#define SOR_PLL_2_PORT_POWERDOWN (1 << 23)
-#define SOR_PLL_2_BANDGAP_POWERDOWN (1 << 22)
-#define SOR_PLL_2_POWERDOWN_OVERRIDE (1 << 18)
-#define SOR_PLL_2_SEQ_PLLCAPPD (1 << 17)
-
-#define SOR_PLL_3 0x1a
-#define SOR_PLL_3_PLL_VDD_MODE_V1_8 (0 << 13)
-#define SOR_PLL_3_PLL_VDD_MODE_V3_3 (1 << 13)
+#define SOR_PLL1_LOADADJ_MASK (0xf << 20)
+#define SOR_PLL1_LOADADJ(x) (((x) & 0xf) << 20)
+#define SOR_PLL1_TERM_COMPOUT (1 << 15)
+#define SOR_PLL1_TMDS_TERMADJ_MASK (0xf << 9)
+#define SOR_PLL1_TMDS_TERMADJ(x) (((x) & 0xf) << 9)
+#define SOR_PLL1_TMDS_TERM (1 << 8)
+
+#define SOR_PLL2 0x19
+#define SOR_PLL2_LVDS_ENABLE (1 << 25)
+#define SOR_PLL2_SEQ_PLLCAPPD_ENFORCE (1 << 24)
+#define SOR_PLL2_PORT_POWERDOWN (1 << 23)
+#define SOR_PLL2_BANDGAP_POWERDOWN (1 << 22)
+#define SOR_PLL2_POWERDOWN_OVERRIDE (1 << 18)
+#define SOR_PLL2_SEQ_PLLCAPPD (1 << 17)
+#define SOR_PLL2_SEQ_PLL_PULLDOWN (1 << 16)
+
+#define SOR_PLL3 0x1a
+#define SOR_PLL3_BG_VREF_LEVEL_MASK (0xf << 24)
+#define SOR_PLL3_BG_VREF_LEVEL(x) (((x) & 0xf) << 24)
+#define SOR_PLL3_PLL_VDD_MODE_1V8 (0 << 13)
+#define SOR_PLL3_PLL_VDD_MODE_3V3 (1 << 13)
#define SOR_CSTM 0x1b
+#define SOR_CSTM_ROTCLK_MASK (0xf << 24)
+#define SOR_CSTM_ROTCLK(x) (((x) & 0xf) << 24)
#define SOR_CSTM_LVDS (1 << 16)
#define SOR_CSTM_LINK_ACT_B (1 << 15)
#define SOR_CSTM_LINK_ACT_A (1 << 14)
#define SOR_CSTM_UPPER (1 << 11)
#define SOR_LVDS 0x1c
-#define SOR_CRC_A 0x1d
-#define SOR_CRC_A_VALID (1 << 0)
-#define SOR_CRC_A_RESET (1 << 0)
-#define SOR_CRC_B 0x1e
+#define SOR_CRCA 0x1d
+#define SOR_CRCA_VALID (1 << 0)
+#define SOR_CRCA_RESET (1 << 0)
+#define SOR_CRCB 0x1e
#define SOR_BLANK 0x1f
#define SOR_SEQ_CTL 0x20
+#define SOR_SEQ_CTL_PD_PC_ALT(x) (((x) & 0xf) << 12)
+#define SOR_SEQ_CTL_PD_PC(x) (((x) & 0xf) << 8)
+#define SOR_SEQ_CTL_PU_PC_ALT(x) (((x) & 0xf) << 4)
+#define SOR_SEQ_CTL_PU_PC(x) (((x) & 0xf) << 0)
#define SOR_LANE_SEQ_CTL 0x21
#define SOR_LANE_SEQ_CTL_TRIGGER (1 << 31)
+#define SOR_LANE_SEQ_CTL_STATE_BUSY (1 << 28)
#define SOR_LANE_SEQ_CTL_SEQUENCE_UP (0 << 20)
#define SOR_LANE_SEQ_CTL_SEQUENCE_DOWN (1 << 20)
#define SOR_LANE_SEQ_CTL_POWER_STATE_UP (0 << 16)
#define SOR_LANE_SEQ_CTL_POWER_STATE_DOWN (1 << 16)
+#define SOR_LANE_SEQ_CTL_DELAY(x) (((x) & 0xf) << 12)
#define SOR_SEQ_INST(x) (0x22 + (x))
+#define SOR_SEQ_INST_PLL_PULLDOWN (1 << 31)
+#define SOR_SEQ_INST_POWERDOWN_MACRO (1 << 30)
+#define SOR_SEQ_INST_ASSERT_PLL_RESET (1 << 29)
+#define SOR_SEQ_INST_BLANK_V (1 << 28)
+#define SOR_SEQ_INST_BLANK_H (1 << 27)
+#define SOR_SEQ_INST_BLANK_DE (1 << 26)
+#define SOR_SEQ_INST_BLACK_DATA (1 << 25)
+#define SOR_SEQ_INST_TRISTATE_IOS (1 << 24)
+#define SOR_SEQ_INST_DRIVE_PWM_OUT_LO (1 << 23)
+#define SOR_SEQ_INST_PIN_B_LOW (0 << 22)
+#define SOR_SEQ_INST_PIN_B_HIGH (1 << 22)
+#define SOR_SEQ_INST_PIN_A_LOW (0 << 21)
+#define SOR_SEQ_INST_PIN_A_HIGH (1 << 21)
+#define SOR_SEQ_INST_SEQUENCE_UP (0 << 19)
+#define SOR_SEQ_INST_SEQUENCE_DOWN (1 << 19)
+#define SOR_SEQ_INST_LANE_SEQ_STOP (0 << 18)
+#define SOR_SEQ_INST_LANE_SEQ_RUN (1 << 18)
+#define SOR_SEQ_INST_PORT_POWERDOWN (1 << 17)
+#define SOR_SEQ_INST_PLL_POWERDOWN (1 << 16)
+#define SOR_SEQ_INST_HALT (1 << 15)
+#define SOR_SEQ_INST_WAIT_US (0 << 12)
+#define SOR_SEQ_INST_WAIT_MS (1 << 12)
+#define SOR_SEQ_INST_WAIT_VSYNC (2 << 12)
+#define SOR_SEQ_INST_WAIT(x) (((x) & 0x3ff) << 0)
#define SOR_PWM_DIV 0x32
#define SOR_PWM_DIV_MASK 0xffffff
@@ -140,32 +187,36 @@
#define SOR_PWM_CTL_CLK_SEL (1 << 30)
#define SOR_PWM_CTL_DUTY_CYCLE_MASK 0xffffff
-#define SOR_VCRC_A_0 0x34
-#define SOR_VCRC_A_1 0x35
-#define SOR_VCRC_B_0 0x36
-#define SOR_VCRC_B_1 0x37
-#define SOR_CCRC_A_0 0x38
-#define SOR_CCRC_A_1 0x39
-#define SOR_CCRC_B_0 0x3a
-#define SOR_CCRC_B_1 0x3b
-#define SOR_EDATA_A_0 0x3c
-#define SOR_EDATA_A_1 0x3d
-#define SOR_EDATA_B_0 0x3e
-#define SOR_EDATA_B_1 0x3f
-#define SOR_COUNT_A_0 0x40
-#define SOR_COUNT_A_1 0x41
-#define SOR_COUNT_B_0 0x42
-#define SOR_COUNT_B_1 0x43
-#define SOR_DEBUG_A_0 0x44
-#define SOR_DEBUG_A_1 0x45
-#define SOR_DEBUG_B_0 0x46
-#define SOR_DEBUG_B_1 0x47
+#define SOR_VCRC_A0 0x34
+#define SOR_VCRC_A1 0x35
+#define SOR_VCRC_B0 0x36
+#define SOR_VCRC_B1 0x37
+#define SOR_CCRC_A0 0x38
+#define SOR_CCRC_A1 0x39
+#define SOR_CCRC_B0 0x3a
+#define SOR_CCRC_B1 0x3b
+#define SOR_EDATA_A0 0x3c
+#define SOR_EDATA_A1 0x3d
+#define SOR_EDATA_B0 0x3e
+#define SOR_EDATA_B1 0x3f
+#define SOR_COUNT_A0 0x40
+#define SOR_COUNT_A1 0x41
+#define SOR_COUNT_B0 0x42
+#define SOR_COUNT_B1 0x43
+#define SOR_DEBUG_A0 0x44
+#define SOR_DEBUG_A1 0x45
+#define SOR_DEBUG_B0 0x46
+#define SOR_DEBUG_B1 0x47
#define SOR_TRIG 0x48
#define SOR_MSCHECK 0x49
#define SOR_XBAR_CTRL 0x4a
+#define SOR_XBAR_CTRL_LINK1_XSEL(channel, value) ((((value) & 0x7) << ((channel) * 3)) << 17)
+#define SOR_XBAR_CTRL_LINK0_XSEL(channel, value) ((((value) & 0x7) << ((channel) * 3)) << 2)
+#define SOR_XBAR_CTRL_LINK_SWAP (1 << 1)
+#define SOR_XBAR_CTRL_BYPASS (1 << 0)
#define SOR_XBAR_POL 0x4b
-#define SOR_DP_LINKCTL_0 0x4c
+#define SOR_DP_LINKCTL0 0x4c
#define SOR_DP_LINKCTL_LANE_COUNT_MASK (0x1f << 16)
#define SOR_DP_LINKCTL_LANE_COUNT(x) (((1 << (x)) - 1) << 16)
#define SOR_DP_LINKCTL_ENHANCED_FRAME (1 << 14)
@@ -173,34 +224,34 @@
#define SOR_DP_LINKCTL_TU_SIZE(x) (((x) & 0x7f) << 2)
#define SOR_DP_LINKCTL_ENABLE (1 << 0)
-#define SOR_DP_LINKCTL_1 0x4d
+#define SOR_DP_LINKCTL1 0x4d
-#define SOR_LANE_DRIVE_CURRENT_0 0x4e
-#define SOR_LANE_DRIVE_CURRENT_1 0x4f
-#define SOR_LANE4_DRIVE_CURRENT_0 0x50
-#define SOR_LANE4_DRIVE_CURRENT_1 0x51
+#define SOR_LANE_DRIVE_CURRENT0 0x4e
+#define SOR_LANE_DRIVE_CURRENT1 0x4f
+#define SOR_LANE4_DRIVE_CURRENT0 0x50
+#define SOR_LANE4_DRIVE_CURRENT1 0x51
#define SOR_LANE_DRIVE_CURRENT_LANE3(x) (((x) & 0xff) << 24)
#define SOR_LANE_DRIVE_CURRENT_LANE2(x) (((x) & 0xff) << 16)
#define SOR_LANE_DRIVE_CURRENT_LANE1(x) (((x) & 0xff) << 8)
#define SOR_LANE_DRIVE_CURRENT_LANE0(x) (((x) & 0xff) << 0)
-#define SOR_LANE_PREEMPHASIS_0 0x52
-#define SOR_LANE_PREEMPHASIS_1 0x53
-#define SOR_LANE4_PREEMPHASIS_0 0x54
-#define SOR_LANE4_PREEMPHASIS_1 0x55
+#define SOR_LANE_PREEMPHASIS0 0x52
+#define SOR_LANE_PREEMPHASIS1 0x53
+#define SOR_LANE4_PREEMPHASIS0 0x54
+#define SOR_LANE4_PREEMPHASIS1 0x55
#define SOR_LANE_PREEMPHASIS_LANE3(x) (((x) & 0xff) << 24)
#define SOR_LANE_PREEMPHASIS_LANE2(x) (((x) & 0xff) << 16)
#define SOR_LANE_PREEMPHASIS_LANE1(x) (((x) & 0xff) << 8)
#define SOR_LANE_PREEMPHASIS_LANE0(x) (((x) & 0xff) << 0)
-#define SOR_LANE_POST_CURSOR_0 0x56
-#define SOR_LANE_POST_CURSOR_1 0x57
-#define SOR_LANE_POST_CURSOR_LANE3(x) (((x) & 0xff) << 24)
-#define SOR_LANE_POST_CURSOR_LANE2(x) (((x) & 0xff) << 16)
-#define SOR_LANE_POST_CURSOR_LANE1(x) (((x) & 0xff) << 8)
-#define SOR_LANE_POST_CURSOR_LANE0(x) (((x) & 0xff) << 0)
+#define SOR_LANE_POSTCURSOR0 0x56
+#define SOR_LANE_POSTCURSOR1 0x57
+#define SOR_LANE_POSTCURSOR_LANE3(x) (((x) & 0xff) << 24)
+#define SOR_LANE_POSTCURSOR_LANE2(x) (((x) & 0xff) << 16)
+#define SOR_LANE_POSTCURSOR_LANE1(x) (((x) & 0xff) << 8)
+#define SOR_LANE_POSTCURSOR_LANE0(x) (((x) & 0xff) << 0)
-#define SOR_DP_CONFIG_0 0x58
+#define SOR_DP_CONFIG0 0x58
#define SOR_DP_CONFIG_DISPARITY_NEGATIVE (1 << 31)
#define SOR_DP_CONFIG_ACTIVE_SYM_ENABLE (1 << 26)
#define SOR_DP_CONFIG_ACTIVE_SYM_POLARITY (1 << 24)
@@ -211,11 +262,11 @@
#define SOR_DP_CONFIG_WATERMARK_MASK (0x3f << 0)
#define SOR_DP_CONFIG_WATERMARK(x) (((x) & 0x3f) << 0)
-#define SOR_DP_CONFIG_1 0x59
-#define SOR_DP_MN_0 0x5a
-#define SOR_DP_MN_1 0x5b
+#define SOR_DP_CONFIG1 0x59
+#define SOR_DP_MN0 0x5a
+#define SOR_DP_MN1 0x5b
-#define SOR_DP_PADCTL_0 0x5c
+#define SOR_DP_PADCTL0 0x5c
#define SOR_DP_PADCTL_PAD_CAL_PD (1 << 23)
#define SOR_DP_PADCTL_TX_PU_ENABLE (1 << 22)
#define SOR_DP_PADCTL_TX_PU_MASK (0xff << 8)
@@ -229,17 +280,18 @@
#define SOR_DP_PADCTL_PD_TXD_1 (1 << 1)
#define SOR_DP_PADCTL_PD_TXD_2 (1 << 0)
-#define SOR_DP_PADCTL_1 0x5d
+#define SOR_DP_PADCTL1 0x5d
-#define SOR_DP_DEBUG_0 0x5e
-#define SOR_DP_DEBUG_1 0x5f
+#define SOR_DP_DEBUG0 0x5e
+#define SOR_DP_DEBUG1 0x5f
-#define SOR_DP_SPARE_0 0x60
-#define SOR_DP_SPARE_MACRO_SOR_CLK (1 << 2)
-#define SOR_DP_SPARE_PANEL_INTERNAL (1 << 1)
-#define SOR_DP_SPARE_SEQ_ENABLE (1 << 0)
+#define SOR_DP_SPARE0 0x60
+#define SOR_DP_SPARE_DISP_VIDEO_PREAMBLE (1 << 3)
+#define SOR_DP_SPARE_MACRO_SOR_CLK (1 << 2)
+#define SOR_DP_SPARE_PANEL_INTERNAL (1 << 1)
+#define SOR_DP_SPARE_SEQ_ENABLE (1 << 0)
-#define SOR_DP_SPARE_1 0x61
+#define SOR_DP_SPARE1 0x61
#define SOR_DP_AUDIO_CTRL 0x62
#define SOR_DP_AUDIO_HBLANK_SYMBOLS 0x63
@@ -249,13 +301,13 @@
#define SOR_DP_AUDIO_VBLANK_SYMBOLS_MASK (0x1fffff << 0)
#define SOR_DP_GENERIC_INFOFRAME_HEADER 0x65
-#define SOR_DP_GENERIC_INFOFRAME_SUBPACK_0 0x66
-#define SOR_DP_GENERIC_INFOFRAME_SUBPACK_1 0x67
-#define SOR_DP_GENERIC_INFOFRAME_SUBPACK_2 0x68
-#define SOR_DP_GENERIC_INFOFRAME_SUBPACK_3 0x69
-#define SOR_DP_GENERIC_INFOFRAME_SUBPACK_4 0x6a
-#define SOR_DP_GENERIC_INFOFRAME_SUBPACK_5 0x6b
-#define SOR_DP_GENERIC_INFOFRAME_SUBPACK_6 0x6c
+#define SOR_DP_GENERIC_INFOFRAME_SUBPACK0 0x66
+#define SOR_DP_GENERIC_INFOFRAME_SUBPACK1 0x67
+#define SOR_DP_GENERIC_INFOFRAME_SUBPACK2 0x68
+#define SOR_DP_GENERIC_INFOFRAME_SUBPACK3 0x69
+#define SOR_DP_GENERIC_INFOFRAME_SUBPACK4 0x6a
+#define SOR_DP_GENERIC_INFOFRAME_SUBPACK5 0x6b
+#define SOR_DP_GENERIC_INFOFRAME_SUBPACK6 0x6c
#define SOR_DP_TPG 0x6d
#define SOR_DP_TPG_CHANNEL_CODING (1 << 6)
@@ -275,8 +327,44 @@
#define SOR_DP_TPG_PATTERN_NONE (0x0 << 0)
#define SOR_DP_TPG_CONFIG 0x6e
-#define SOR_DP_LQ_CSTM_0 0x6f
-#define SOR_DP_LQ_CSTM_1 0x70
-#define SOR_DP_LQ_CSTM_2 0x71
+#define SOR_DP_LQ_CSTM0 0x6f
+#define SOR_DP_LQ_CSTM1 0x70
+#define SOR_DP_LQ_CSTM2 0x71
+
+#define SOR_HDMI_AUDIO_INFOFRAME_CTRL 0x9a
+#define SOR_HDMI_AUDIO_INFOFRAME_STATUS 0x9b
+#define SOR_HDMI_AUDIO_INFOFRAME_HEADER 0x9c
+
+#define SOR_HDMI_AVI_INFOFRAME_CTRL 0x9f
+#define INFOFRAME_CTRL_CHECKSUM_ENABLE (1 << 9)
+#define INFOFRAME_CTRL_SINGLE (1 << 8)
+#define INFOFRAME_CTRL_OTHER (1 << 4)
+#define INFOFRAME_CTRL_ENABLE (1 << 0)
+
+#define SOR_HDMI_AVI_INFOFRAME_STATUS 0xa0
+#define INFOFRAME_STATUS_DONE (1 << 0)
+
+#define SOR_HDMI_AVI_INFOFRAME_HEADER 0xa1
+#define INFOFRAME_HEADER_LEN(x) (((x) & 0xff) << 16)
+#define INFOFRAME_HEADER_VERSION(x) (((x) & 0xff) << 8)
+#define INFOFRAME_HEADER_TYPE(x) (((x) & 0xff) << 0)
+
+#define SOR_HDMI_CTRL 0xc0
+#define SOR_HDMI_CTRL_ENABLE (1 << 30)
+#define SOR_HDMI_CTRL_MAX_AC_PACKET(x) (((x) & 0x1f) << 16)
+#define SOR_HDMI_CTRL_AUDIO_LAYOUT (1 << 10)
+#define SOR_HDMI_CTRL_REKEY(x) (((x) & 0x7f) << 0)
+
+#define SOR_REFCLK 0xe6
+#define SOR_REFCLK_DIV_INT(x) ((((x) >> 2) & 0xff) << 8)
+#define SOR_REFCLK_DIV_FRAC(x) (((x) & 0x3) << 6)
+
+#define SOR_INPUT_CONTROL 0xe8
+#define SOR_INPUT_CONTROL_ARM_VIDEO_RANGE_LIMITED (1 << 1)
+#define SOR_INPUT_CONTROL_HDMI_SRC_SELECT(x) (((x) & 0x1) << 0)
+
+#define SOR_HDMI_VSI_INFOFRAME_CTRL 0x123
+#define SOR_HDMI_VSI_INFOFRAME_STATUS 0x124
+#define SOR_HDMI_VSI_INFOFRAME_HEADER 0x125
#endif
diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c
index 882cccdad272..ac6fe40b99f7 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_util.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_util.c
@@ -490,7 +490,8 @@ pgprot_t ttm_io_prot(uint32_t caching_flags, pgprot_t tmp)
else if (boot_cpu_data.x86 > 3)
tmp = pgprot_noncached(tmp);
#endif
-#if defined(__ia64__) || defined(__arm__) || defined(__powerpc__)
+#if defined(__ia64__) || defined(__arm__) || defined(__aarch64__) || \
+ defined(__powerpc__)
if (caching_flags & TTM_PL_FLAG_WC)
tmp = pgprot_writecombine(tmp);
else
diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c b/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
index 3077f1554099..624d941aaad1 100644
--- a/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
+++ b/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
@@ -963,14 +963,13 @@ void ttm_dma_unpopulate(struct ttm_dma_tt *ttm_dma, struct device *dev)
} else {
pool->npages_free += count;
list_splice(&ttm_dma->pages_list, &pool->free_list);
- npages = count;
- if (pool->npages_free > _manager->options.max_size) {
+ /*
+ * Wait to have at at least NUM_PAGES_TO_ALLOC number of pages
+ * to free in order to minimize calls to set_memory_wb().
+ */
+ if (pool->npages_free >= (_manager->options.max_size +
+ NUM_PAGES_TO_ALLOC))
npages = pool->npages_free - _manager->options.max_size;
- /* free at least NUM_PAGES_TO_ALLOC number of pages
- * to reduce calls to set_memory_wb */
- if (npages < NUM_PAGES_TO_ALLOC)
- npages = NUM_PAGES_TO_ALLOC;
- }
}
spin_unlock_irqrestore(&pool->lock, irq_flags);
diff --git a/drivers/gpu/drm/ttm/ttm_tt.c b/drivers/gpu/drm/ttm/ttm_tt.c
index bf080abc86d1..4e19d0f9cc30 100644
--- a/drivers/gpu/drm/ttm/ttm_tt.c
+++ b/drivers/gpu/drm/ttm/ttm_tt.c
@@ -340,7 +340,7 @@ int ttm_tt_swapout(struct ttm_tt *ttm, struct file *persistent_swap_storage)
swap_storage = shmem_file_setup("ttm swap",
ttm->num_pages << PAGE_SHIFT,
0);
- if (unlikely(IS_ERR(swap_storage))) {
+ if (IS_ERR(swap_storage)) {
pr_err("Failed allocating swap storage\n");
return PTR_ERR(swap_storage);
}
@@ -354,7 +354,7 @@ int ttm_tt_swapout(struct ttm_tt *ttm, struct file *persistent_swap_storage)
if (unlikely(from_page == NULL))
continue;
to_page = shmem_read_mapping_page(swap_space, i);
- if (unlikely(IS_ERR(to_page))) {
+ if (IS_ERR(to_page)) {
ret = PTR_ERR(to_page);
goto out_err;
}
diff --git a/drivers/gpu/drm/udl/udl_fb.c b/drivers/gpu/drm/udl/udl_fb.c
index 5fc16cecd3ba..62c7b1dafaa4 100644
--- a/drivers/gpu/drm/udl/udl_fb.c
+++ b/drivers/gpu/drm/udl/udl_fb.c
@@ -288,7 +288,7 @@ static void udl_fb_fillrect(struct fb_info *info, const struct fb_fillrect *rect
{
struct udl_fbdev *ufbdev = info->par;
- sys_fillrect(info, rect);
+ drm_fb_helper_sys_fillrect(info, rect);
udl_handle_damage(&ufbdev->ufb, rect->dx, rect->dy, rect->width,
rect->height);
@@ -298,7 +298,7 @@ static void udl_fb_copyarea(struct fb_info *info, const struct fb_copyarea *regi
{
struct udl_fbdev *ufbdev = info->par;
- sys_copyarea(info, region);
+ drm_fb_helper_sys_copyarea(info, region);
udl_handle_damage(&ufbdev->ufb, region->dx, region->dy, region->width,
region->height);
@@ -308,7 +308,7 @@ static void udl_fb_imageblit(struct fb_info *info, const struct fb_image *image)
{
struct udl_fbdev *ufbdev = info->par;
- sys_imageblit(info, image);
+ drm_fb_helper_sys_imageblit(info, image);
udl_handle_damage(&ufbdev->ufb, image->dx, image->dy, image->width,
image->height);
@@ -476,7 +476,6 @@ static int udlfb_create(struct drm_fb_helper *helper,
container_of(helper, struct udl_fbdev, helper);
struct drm_device *dev = ufbdev->helper.dev;
struct fb_info *info;
- struct device *device = dev->dev;
struct drm_framebuffer *fb;
struct drm_mode_fb_cmd2 mode_cmd;
struct udl_gem_object *obj;
@@ -506,21 +505,20 @@ static int udlfb_create(struct drm_fb_helper *helper,
goto out_gfree;
}
- info = framebuffer_alloc(0, device);
- if (!info) {
- ret = -ENOMEM;
+ info = drm_fb_helper_alloc_fbi(helper);
+ if (IS_ERR(info)) {
+ ret = PTR_ERR(info);
goto out_gfree;
}
info->par = ufbdev;
ret = udl_framebuffer_init(dev, &ufbdev->ufb, &mode_cmd, obj);
if (ret)
- goto out_gfree;
+ goto out_destroy_fbi;
fb = &ufbdev->ufb.base;
ufbdev->helper.fb = fb;
- ufbdev->helper.fbdev = info;
strcpy(info->fix.id, "udldrmfb");
@@ -533,18 +531,13 @@ static int udlfb_create(struct drm_fb_helper *helper,
drm_fb_helper_fill_fix(info, fb->pitches[0], fb->depth);
drm_fb_helper_fill_var(info, &ufbdev->helper, sizes->fb_width, sizes->fb_height);
- ret = fb_alloc_cmap(&info->cmap, 256, 0);
- if (ret) {
- ret = -ENOMEM;
- goto out_gfree;
- }
-
-
DRM_DEBUG_KMS("allocated %dx%d vmal %p\n",
fb->width, fb->height,
ufbdev->ufb.obj->vmapping);
return ret;
+out_destroy_fbi:
+ drm_fb_helper_release_fbi(helper);
out_gfree:
drm_gem_object_unreference(&ufbdev->ufb.obj->base);
out:
@@ -558,14 +551,8 @@ static const struct drm_fb_helper_funcs udl_fb_helper_funcs = {
static void udl_fbdev_destroy(struct drm_device *dev,
struct udl_fbdev *ufbdev)
{
- struct fb_info *info;
- if (ufbdev->helper.fbdev) {
- info = ufbdev->helper.fbdev;
- unregister_framebuffer(info);
- if (info->cmap.len)
- fb_dealloc_cmap(&info->cmap);
- framebuffer_release(info);
- }
+ drm_fb_helper_unregister_fbi(&ufbdev->helper);
+ drm_fb_helper_release_fbi(&ufbdev->helper);
drm_fb_helper_fini(&ufbdev->helper);
drm_framebuffer_unregister_private(&ufbdev->ufb.base);
drm_framebuffer_cleanup(&ufbdev->ufb.base);
@@ -631,11 +618,7 @@ void udl_fbdev_unplug(struct drm_device *dev)
return;
ufbdev = udl->fbdev;
- if (ufbdev->helper.fbdev) {
- struct fb_info *info;
- info = ufbdev->helper.fbdev;
- unlink_framebuffer(info);
- }
+ drm_fb_helper_unlink_fbi(&ufbdev->helper);
}
struct drm_framebuffer *
diff --git a/drivers/gpu/drm/virtio/virtgpu_fb.c b/drivers/gpu/drm/virtio/virtgpu_fb.c
index df198d9e770c..6a81e084593b 100644
--- a/drivers/gpu/drm/virtio/virtgpu_fb.c
+++ b/drivers/gpu/drm/virtio/virtgpu_fb.c
@@ -173,7 +173,7 @@ static void virtio_gpu_3d_fillrect(struct fb_info *info,
const struct fb_fillrect *rect)
{
struct virtio_gpu_fbdev *vfbdev = info->par;
- sys_fillrect(info, rect);
+ drm_fb_helper_sys_fillrect(info, rect);
virtio_gpu_dirty_update(&vfbdev->vgfb, true, rect->dx, rect->dy,
rect->width, rect->height);
schedule_delayed_work(&vfbdev->work, VIRTIO_GPU_FBCON_POLL_PERIOD);
@@ -183,7 +183,7 @@ static void virtio_gpu_3d_copyarea(struct fb_info *info,
const struct fb_copyarea *area)
{
struct virtio_gpu_fbdev *vfbdev = info->par;
- sys_copyarea(info, area);
+ drm_fb_helper_sys_copyarea(info, area);
virtio_gpu_dirty_update(&vfbdev->vgfb, true, area->dx, area->dy,
area->width, area->height);
schedule_delayed_work(&vfbdev->work, VIRTIO_GPU_FBCON_POLL_PERIOD);
@@ -193,7 +193,7 @@ static void virtio_gpu_3d_imageblit(struct fb_info *info,
const struct fb_image *image)
{
struct virtio_gpu_fbdev *vfbdev = info->par;
- sys_imageblit(info, image);
+ drm_fb_helper_sys_imageblit(info, image);
virtio_gpu_dirty_update(&vfbdev->vgfb, true, image->dx, image->dy,
image->width, image->height);
schedule_delayed_work(&vfbdev->work, VIRTIO_GPU_FBCON_POLL_PERIOD);
@@ -230,7 +230,6 @@ static int virtio_gpufb_create(struct drm_fb_helper *helper,
struct drm_framebuffer *fb;
struct drm_mode_fb_cmd2 mode_cmd = {};
struct virtio_gpu_object *obj;
- struct device *device = vgdev->dev;
uint32_t resid, format, size;
int ret;
@@ -317,18 +316,12 @@ static int virtio_gpufb_create(struct drm_fb_helper *helper,
if (ret)
goto err_obj_attach;
- info = framebuffer_alloc(0, device);
- if (!info) {
- ret = -ENOMEM;
+ info = drm_fb_helper_alloc_fbi(helper);
+ if (IS_ERR(info)) {
+ ret = PTR_ERR(info);
goto err_fb_alloc;
}
- ret = fb_alloc_cmap(&info->cmap, 256, 0);
- if (ret) {
- ret = -ENOMEM;
- goto err_fb_alloc_cmap;
- }
-
info->par = helper;
ret = virtio_gpu_framebuffer_init(dev, &vfbdev->vgfb,
@@ -339,7 +332,6 @@ static int virtio_gpufb_create(struct drm_fb_helper *helper,
fb = &vfbdev->vgfb.base;
vfbdev->helper.fb = fb;
- vfbdev->helper.fbdev = info;
strcpy(info->fix.id, "virtiodrmfb");
info->flags = FBINFO_DEFAULT;
@@ -357,9 +349,7 @@ static int virtio_gpufb_create(struct drm_fb_helper *helper,
return 0;
err_fb_init:
- fb_dealloc_cmap(&info->cmap);
-err_fb_alloc_cmap:
- framebuffer_release(info);
+ drm_fb_helper_release_fbi(helper);
err_fb_alloc:
virtio_gpu_cmd_resource_inval_backing(vgdev, resid);
err_obj_attach:
@@ -371,15 +361,11 @@ err_obj_vmap:
static int virtio_gpu_fbdev_destroy(struct drm_device *dev,
struct virtio_gpu_fbdev *vgfbdev)
{
- struct fb_info *info;
struct virtio_gpu_framebuffer *vgfb = &vgfbdev->vgfb;
- if (vgfbdev->helper.fbdev) {
- info = vgfbdev->helper.fbdev;
+ drm_fb_helper_unregister_fbi(&vgfbdev->helper);
+ drm_fb_helper_release_fbi(&vgfbdev->helper);
- unregister_framebuffer(info);
- framebuffer_release(info);
- }
if (vgfb->obj)
vgfb->obj = NULL;
drm_fb_helper_fini(&vgfbdev->helper);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_context.c b/drivers/gpu/drm/vmwgfx/vmwgfx_context.c
index 5ac92874404d..44e6ecba3de7 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_context.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_context.c
@@ -159,7 +159,7 @@ static int vmw_gb_context_init(struct vmw_private *dev_priv,
if (dev_priv->has_mob) {
uctx->man = vmw_cmdbuf_res_man_create(dev_priv);
- if (unlikely(IS_ERR(uctx->man))) {
+ if (IS_ERR(uctx->man)) {
ret = PTR_ERR(uctx->man);
uctx->man = NULL;
goto out_err;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
index 620bb5cf617c..6218a36cf01a 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
@@ -1054,7 +1054,7 @@ static long vmw_generic_ioctl(struct file *filp, unsigned int cmd,
return -EINVAL;
vmaster = vmw_master_check(dev, file_priv, flags);
- if (unlikely(IS_ERR(vmaster))) {
+ if (IS_ERR(vmaster)) {
ret = PTR_ERR(vmaster);
if (ret != -ERESTARTSYS)
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
index 654c8daeb5ab..97ad3bcb99a7 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
@@ -2492,7 +2492,7 @@ int vmw_execbuf_process(struct drm_file *file_priv,
ret = ttm_eu_reserve_buffers(&ticket, &sw_context->validate_nodes,
true, NULL);
if (unlikely(ret != 0))
- goto out_err;
+ goto out_err_nores;
ret = vmw_validate_buffers(dev_priv, sw_context);
if (unlikely(ret != 0))
@@ -2536,6 +2536,7 @@ int vmw_execbuf_process(struct drm_file *file_priv,
vmw_resource_relocations_free(&sw_context->res_relocations);
vmw_fifo_commit(dev_priv, command_size);
+ mutex_unlock(&dev_priv->binding_mutex);
vmw_query_bo_switch_commit(dev_priv, sw_context);
ret = vmw_execbuf_fence_commands(file_priv, dev_priv,
@@ -2551,7 +2552,6 @@ int vmw_execbuf_process(struct drm_file *file_priv,
DRM_ERROR("Fence submission error. Syncing.\n");
vmw_resource_list_unreserve(&sw_context->resource_list, false);
- mutex_unlock(&dev_priv->binding_mutex);
ttm_eu_fence_buffer_objects(&ticket, &sw_context->validate_nodes,
(void *) fence);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
index 07cda8cbbddb..2adc11bc0920 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
@@ -1808,8 +1808,9 @@ void vmw_du_crtc_gamma_set(struct drm_crtc *crtc,
}
}
-void vmw_du_connector_dpms(struct drm_connector *connector, int mode)
+int vmw_du_connector_dpms(struct drm_connector *connector, int mode)
{
+ return 0;
}
void vmw_du_connector_save(struct drm_connector *connector)
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
index 8d038c36bd57..f1a324cfb4c3 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
@@ -133,7 +133,7 @@ void vmw_du_crtc_gamma_set(struct drm_crtc *crtc,
int vmw_du_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv,
uint32_t handle, uint32_t width, uint32_t height);
int vmw_du_crtc_cursor_move(struct drm_crtc *crtc, int x, int y);
-void vmw_du_connector_dpms(struct drm_connector *connector, int mode);
+int vmw_du_connector_dpms(struct drm_connector *connector, int mode);
void vmw_du_connector_save(struct drm_connector *connector);
void vmw_du_connector_restore(struct drm_connector *connector);
enum drm_connector_status
diff --git a/drivers/gpu/host1x/mipi.c b/drivers/gpu/host1x/mipi.c
index fbc6ee6ca337..52a6fd224127 100644
--- a/drivers/gpu/host1x/mipi.c
+++ b/drivers/gpu/host1x/mipi.c
@@ -31,6 +31,9 @@
#include "dev.h"
#define MIPI_CAL_CTRL 0x00
+#define MIPI_CAL_CTRL_NOISE_FILTER(x) (((x) & 0xf) << 26)
+#define MIPI_CAL_CTRL_PRESCALE(x) (((x) & 0x3) << 24)
+#define MIPI_CAL_CTRL_CLKEN_OVR (1 << 4)
#define MIPI_CAL_CTRL_START (1 << 0)
#define MIPI_CAL_AUTOCAL_CTRL 0x01
@@ -44,15 +47,18 @@
#define MIPI_CAL_CONFIG_CSIC 0x07
#define MIPI_CAL_CONFIG_CSID 0x08
#define MIPI_CAL_CONFIG_CSIE 0x09
+#define MIPI_CAL_CONFIG_CSIF 0x0a
#define MIPI_CAL_CONFIG_DSIA 0x0e
#define MIPI_CAL_CONFIG_DSIB 0x0f
#define MIPI_CAL_CONFIG_DSIC 0x10
#define MIPI_CAL_CONFIG_DSID 0x11
-#define MIPI_CAL_CONFIG_DSIAB_CLK 0x19
-#define MIPI_CAL_CONFIG_DSICD_CLK 0x1a
+#define MIPI_CAL_CONFIG_DSIA_CLK 0x19
+#define MIPI_CAL_CONFIG_DSIB_CLK 0x1a
#define MIPI_CAL_CONFIG_CSIAB_CLK 0x1b
+#define MIPI_CAL_CONFIG_DSIC_CLK 0x1c
#define MIPI_CAL_CONFIG_CSICD_CLK 0x1c
+#define MIPI_CAL_CONFIG_DSID_CLK 0x1d
#define MIPI_CAL_CONFIG_CSIE_CLK 0x1d
/* for data and clock lanes */
@@ -73,8 +79,11 @@
#define MIPI_CAL_BIAS_PAD_CFG1 0x17
#define MIPI_CAL_BIAS_PAD_DRV_DN_REF(x) (((x) & 0x7) << 16)
+#define MIPI_CAL_BIAS_PAD_DRV_UP_REF(x) (((x) & 0x7) << 8)
#define MIPI_CAL_BIAS_PAD_CFG2 0x18
+#define MIPI_CAL_BIAS_PAD_VCLAMP(x) (((x) & 0x7) << 16)
+#define MIPI_CAL_BIAS_PAD_VAUXP(x) (((x) & 0x7) << 4)
#define MIPI_CAL_BIAS_PAD_PDVREG (1 << 1)
struct tegra_mipi_pad {
@@ -86,13 +95,35 @@ struct tegra_mipi_soc {
bool has_clk_lane;
const struct tegra_mipi_pad *pads;
unsigned int num_pads;
+
+ bool clock_enable_override;
+ bool needs_vclamp_ref;
+
+ /* bias pad configuration settings */
+ u8 pad_drive_down_ref;
+ u8 pad_drive_up_ref;
+
+ u8 pad_vclamp_level;
+ u8 pad_vauxp_level;
+
+ /* calibration settings for data lanes */
+ u8 hspdos;
+ u8 hspuos;
+ u8 termos;
+
+ /* calibration settings for clock lanes */
+ u8 hsclkpdos;
+ u8 hsclkpuos;
};
struct tegra_mipi {
const struct tegra_mipi_soc *soc;
+ struct device *dev;
void __iomem *regs;
struct mutex lock;
struct clk *clk;
+
+ unsigned long usage_count;
};
struct tegra_mipi_device {
@@ -114,6 +145,67 @@ static inline void tegra_mipi_writel(struct tegra_mipi *mipi, u32 value,
writel(value, mipi->regs + (offset << 2));
}
+static int tegra_mipi_power_up(struct tegra_mipi *mipi)
+{
+ u32 value;
+ int err;
+
+ err = clk_enable(mipi->clk);
+ if (err < 0)
+ return err;
+
+ value = tegra_mipi_readl(mipi, MIPI_CAL_BIAS_PAD_CFG0);
+ value &= ~MIPI_CAL_BIAS_PAD_PDVCLAMP;
+
+ if (mipi->soc->needs_vclamp_ref)
+ value |= MIPI_CAL_BIAS_PAD_E_VCLAMP_REF;
+
+ tegra_mipi_writel(mipi, value, MIPI_CAL_BIAS_PAD_CFG0);
+
+ value = tegra_mipi_readl(mipi, MIPI_CAL_BIAS_PAD_CFG2);
+ value &= ~MIPI_CAL_BIAS_PAD_PDVREG;
+ tegra_mipi_writel(mipi, value, MIPI_CAL_BIAS_PAD_CFG2);
+
+ clk_disable(mipi->clk);
+
+ return 0;
+}
+
+static int tegra_mipi_power_down(struct tegra_mipi *mipi)
+{
+ u32 value;
+ int err;
+
+ err = clk_enable(mipi->clk);
+ if (err < 0)
+ return err;
+
+ /*
+ * The MIPI_CAL_BIAS_PAD_PDVREG controls a voltage regulator that
+ * supplies the DSI pads. This must be kept enabled until none of the
+ * DSI lanes are used anymore.
+ */
+ value = tegra_mipi_readl(mipi, MIPI_CAL_BIAS_PAD_CFG2);
+ value |= MIPI_CAL_BIAS_PAD_PDVREG;
+ tegra_mipi_writel(mipi, value, MIPI_CAL_BIAS_PAD_CFG2);
+
+ /*
+ * MIPI_CAL_BIAS_PAD_PDVCLAMP and MIPI_CAL_BIAS_PAD_E_VCLAMP_REF
+ * control a regulator that supplies current to the pre-driver logic.
+ * Powering down this regulator causes DSI to fail, so it must remain
+ * powered on until none of the DSI lanes are used anymore.
+ */
+ value = tegra_mipi_readl(mipi, MIPI_CAL_BIAS_PAD_CFG0);
+
+ if (mipi->soc->needs_vclamp_ref)
+ value &= ~MIPI_CAL_BIAS_PAD_E_VCLAMP_REF;
+
+ value |= MIPI_CAL_BIAS_PAD_PDVCLAMP;
+ tegra_mipi_writel(mipi, value, MIPI_CAL_BIAS_PAD_CFG0);
+
+ return 0;
+}
+
struct tegra_mipi_device *tegra_mipi_request(struct device *device)
{
struct device_node *np = device->of_node;
@@ -150,6 +242,20 @@ struct tegra_mipi_device *tegra_mipi_request(struct device *device)
dev->pads = args.args[0];
dev->device = device;
+ mutex_lock(&dev->mipi->lock);
+
+ if (dev->mipi->usage_count++ == 0) {
+ err = tegra_mipi_power_up(dev->mipi);
+ if (err < 0) {
+ dev_err(dev->mipi->dev,
+ "failed to power up MIPI bricks: %d\n",
+ err);
+ return ERR_PTR(err);
+ }
+ }
+
+ mutex_unlock(&dev->mipi->lock);
+
return dev;
put:
@@ -164,6 +270,25 @@ EXPORT_SYMBOL(tegra_mipi_request);
void tegra_mipi_free(struct tegra_mipi_device *device)
{
+ int err;
+
+ mutex_lock(&device->mipi->lock);
+
+ if (--device->mipi->usage_count == 0) {
+ err = tegra_mipi_power_down(device->mipi);
+ if (err < 0) {
+ /*
+ * Not much that can be done here, so an error message
+ * will have to do.
+ */
+ dev_err(device->mipi->dev,
+ "failed to power down MIPI bricks: %d\n",
+ err);
+ }
+ }
+
+ mutex_unlock(&device->mipi->lock);
+
platform_device_put(device->pdev);
kfree(device);
}
@@ -199,16 +324,15 @@ int tegra_mipi_calibrate(struct tegra_mipi_device *device)
mutex_lock(&device->mipi->lock);
- value = tegra_mipi_readl(device->mipi, MIPI_CAL_BIAS_PAD_CFG0);
- value &= ~MIPI_CAL_BIAS_PAD_PDVCLAMP;
- value |= MIPI_CAL_BIAS_PAD_E_VCLAMP_REF;
- tegra_mipi_writel(device->mipi, value, MIPI_CAL_BIAS_PAD_CFG0);
-
- tegra_mipi_writel(device->mipi, MIPI_CAL_BIAS_PAD_DRV_DN_REF(2),
- MIPI_CAL_BIAS_PAD_CFG1);
+ value = MIPI_CAL_BIAS_PAD_DRV_DN_REF(soc->pad_drive_down_ref) |
+ MIPI_CAL_BIAS_PAD_DRV_UP_REF(soc->pad_drive_up_ref);
+ tegra_mipi_writel(device->mipi, value, MIPI_CAL_BIAS_PAD_CFG1);
value = tegra_mipi_readl(device->mipi, MIPI_CAL_BIAS_PAD_CFG2);
- value &= ~MIPI_CAL_BIAS_PAD_PDVREG;
+ value &= ~MIPI_CAL_BIAS_PAD_VCLAMP(0x7);
+ value &= ~MIPI_CAL_BIAS_PAD_VAUXP(0x7);
+ value |= MIPI_CAL_BIAS_PAD_VCLAMP(soc->pad_vclamp_level);
+ value |= MIPI_CAL_BIAS_PAD_VAUXP(soc->pad_vauxp_level);
tegra_mipi_writel(device->mipi, value, MIPI_CAL_BIAS_PAD_CFG2);
for (i = 0; i < soc->num_pads; i++) {
@@ -216,21 +340,38 @@ int tegra_mipi_calibrate(struct tegra_mipi_device *device)
if (device->pads & BIT(i)) {
data = MIPI_CAL_CONFIG_SELECT |
- MIPI_CAL_CONFIG_HSPDOS(0) |
- MIPI_CAL_CONFIG_HSPUOS(4) |
- MIPI_CAL_CONFIG_TERMOS(5);
+ MIPI_CAL_CONFIG_HSPDOS(soc->hspdos) |
+ MIPI_CAL_CONFIG_HSPUOS(soc->hspuos) |
+ MIPI_CAL_CONFIG_TERMOS(soc->termos);
clk = MIPI_CAL_CONFIG_SELECT |
- MIPI_CAL_CONFIG_HSCLKPDOSD(0) |
- MIPI_CAL_CONFIG_HSCLKPUOSD(4);
+ MIPI_CAL_CONFIG_HSCLKPDOSD(soc->hsclkpdos) |
+ MIPI_CAL_CONFIG_HSCLKPUOSD(soc->hsclkpuos);
}
tegra_mipi_writel(device->mipi, data, soc->pads[i].data);
- if (soc->has_clk_lane)
+ if (soc->has_clk_lane && soc->pads[i].clk != 0)
tegra_mipi_writel(device->mipi, clk, soc->pads[i].clk);
}
value = tegra_mipi_readl(device->mipi, MIPI_CAL_CTRL);
+ value &= ~MIPI_CAL_CTRL_NOISE_FILTER(0xf);
+ value &= ~MIPI_CAL_CTRL_PRESCALE(0x3);
+ value |= MIPI_CAL_CTRL_NOISE_FILTER(0xa);
+ value |= MIPI_CAL_CTRL_PRESCALE(0x2);
+
+ if (!soc->clock_enable_override)
+ value &= ~MIPI_CAL_CTRL_CLKEN_OVR;
+ else
+ value |= MIPI_CAL_CTRL_CLKEN_OVR;
+
+ tegra_mipi_writel(device->mipi, value, MIPI_CAL_CTRL);
+
+ /* clear any pending status bits */
+ value = tegra_mipi_readl(device->mipi, MIPI_CAL_STATUS);
+ tegra_mipi_writel(device->mipi, value, MIPI_CAL_STATUS);
+
+ value = tegra_mipi_readl(device->mipi, MIPI_CAL_CTRL);
value |= MIPI_CAL_CTRL_START;
tegra_mipi_writel(device->mipi, value, MIPI_CAL_CTRL);
@@ -259,6 +400,17 @@ static const struct tegra_mipi_soc tegra114_mipi_soc = {
.has_clk_lane = false,
.pads = tegra114_mipi_pads,
.num_pads = ARRAY_SIZE(tegra114_mipi_pads),
+ .clock_enable_override = true,
+ .needs_vclamp_ref = true,
+ .pad_drive_down_ref = 0x2,
+ .pad_drive_up_ref = 0x0,
+ .pad_vclamp_level = 0x0,
+ .pad_vauxp_level = 0x0,
+ .hspdos = 0x0,
+ .hspuos = 0x4,
+ .termos = 0x5,
+ .hsclkpdos = 0x0,
+ .hsclkpuos = 0x4,
};
static const struct tegra_mipi_pad tegra124_mipi_pads[] = {
@@ -266,20 +418,80 @@ static const struct tegra_mipi_pad tegra124_mipi_pads[] = {
{ .data = MIPI_CAL_CONFIG_CSIB, .clk = MIPI_CAL_CONFIG_CSIAB_CLK },
{ .data = MIPI_CAL_CONFIG_CSIC, .clk = MIPI_CAL_CONFIG_CSICD_CLK },
{ .data = MIPI_CAL_CONFIG_CSID, .clk = MIPI_CAL_CONFIG_CSICD_CLK },
- { .data = MIPI_CAL_CONFIG_CSIE, .clk = MIPI_CAL_CONFIG_CSIE_CLK },
- { .data = MIPI_CAL_CONFIG_DSIA, .clk = MIPI_CAL_CONFIG_DSIAB_CLK },
- { .data = MIPI_CAL_CONFIG_DSIB, .clk = MIPI_CAL_CONFIG_DSIAB_CLK },
+ { .data = MIPI_CAL_CONFIG_CSIE, .clk = MIPI_CAL_CONFIG_CSIE_CLK },
+ { .data = MIPI_CAL_CONFIG_DSIA, .clk = MIPI_CAL_CONFIG_DSIA_CLK },
+ { .data = MIPI_CAL_CONFIG_DSIB, .clk = MIPI_CAL_CONFIG_DSIB_CLK },
};
static const struct tegra_mipi_soc tegra124_mipi_soc = {
.has_clk_lane = true,
.pads = tegra124_mipi_pads,
.num_pads = ARRAY_SIZE(tegra124_mipi_pads),
+ .clock_enable_override = true,
+ .needs_vclamp_ref = true,
+ .pad_drive_down_ref = 0x2,
+ .pad_drive_up_ref = 0x0,
+ .pad_vclamp_level = 0x0,
+ .pad_vauxp_level = 0x0,
+ .hspdos = 0x0,
+ .hspuos = 0x0,
+ .termos = 0x0,
+ .hsclkpdos = 0x1,
+ .hsclkpuos = 0x2,
+};
+
+static const struct tegra_mipi_soc tegra132_mipi_soc = {
+ .has_clk_lane = true,
+ .pads = tegra124_mipi_pads,
+ .num_pads = ARRAY_SIZE(tegra124_mipi_pads),
+ .clock_enable_override = false,
+ .needs_vclamp_ref = false,
+ .pad_drive_down_ref = 0x0,
+ .pad_drive_up_ref = 0x3,
+ .pad_vclamp_level = 0x0,
+ .pad_vauxp_level = 0x0,
+ .hspdos = 0x0,
+ .hspuos = 0x0,
+ .termos = 0x0,
+ .hsclkpdos = 0x3,
+ .hsclkpuos = 0x2,
+};
+
+static const struct tegra_mipi_pad tegra210_mipi_pads[] = {
+ { .data = MIPI_CAL_CONFIG_CSIA, .clk = 0 },
+ { .data = MIPI_CAL_CONFIG_CSIB, .clk = 0 },
+ { .data = MIPI_CAL_CONFIG_CSIC, .clk = 0 },
+ { .data = MIPI_CAL_CONFIG_CSID, .clk = 0 },
+ { .data = MIPI_CAL_CONFIG_CSIE, .clk = 0 },
+ { .data = MIPI_CAL_CONFIG_CSIF, .clk = 0 },
+ { .data = MIPI_CAL_CONFIG_DSIA, .clk = MIPI_CAL_CONFIG_DSIA_CLK },
+ { .data = MIPI_CAL_CONFIG_DSIB, .clk = MIPI_CAL_CONFIG_DSIB_CLK },
+ { .data = MIPI_CAL_CONFIG_DSIC, .clk = MIPI_CAL_CONFIG_DSIC_CLK },
+ { .data = MIPI_CAL_CONFIG_DSID, .clk = MIPI_CAL_CONFIG_DSID_CLK },
+};
+
+static const struct tegra_mipi_soc tegra210_mipi_soc = {
+ .has_clk_lane = true,
+ .pads = tegra210_mipi_pads,
+ .num_pads = ARRAY_SIZE(tegra210_mipi_pads),
+ .clock_enable_override = true,
+ .needs_vclamp_ref = false,
+ .pad_drive_down_ref = 0x0,
+ .pad_drive_up_ref = 0x3,
+ .pad_vclamp_level = 0x1,
+ .pad_vauxp_level = 0x1,
+ .hspdos = 0x0,
+ .hspuos = 0x2,
+ .termos = 0x0,
+ .hsclkpdos = 0x0,
+ .hsclkpuos = 0x2,
};
-static struct of_device_id tegra_mipi_of_match[] = {
+static const struct of_device_id tegra_mipi_of_match[] = {
{ .compatible = "nvidia,tegra114-mipi", .data = &tegra114_mipi_soc },
{ .compatible = "nvidia,tegra124-mipi", .data = &tegra124_mipi_soc },
+ { .compatible = "nvidia,tegra132-mipi", .data = &tegra132_mipi_soc },
+ { .compatible = "nvidia,tegra210-mipi", .data = &tegra210_mipi_soc },
{ },
};
@@ -299,6 +511,7 @@ static int tegra_mipi_probe(struct platform_device *pdev)
return -ENOMEM;
mipi->soc = match->data;
+ mipi->dev = &pdev->dev;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
mipi->regs = devm_ioremap_resource(&pdev->dev, res);
diff --git a/drivers/gpu/ipu-v3/ipu-common.c b/drivers/gpu/ipu-v3/ipu-common.c
index 6d2f39d36e44..00f2058944e5 100644
--- a/drivers/gpu/ipu-v3/ipu-common.c
+++ b/drivers/gpu/ipu-v3/ipu-common.c
@@ -1107,6 +1107,9 @@ static int ipu_irq_init(struct ipu_soc *ipu)
return ret;
}
+ for (i = 0; i < IPU_NUM_IRQS; i += 32)
+ ipu_cm_write(ipu, 0, IPU_INT_CTRL(i / 32));
+
for (i = 0; i < IPU_NUM_IRQS; i += 32) {
gc = irq_get_domain_generic_chip(ipu->domain, i);
gc->reg_base = ipu->cm_reg;
diff --git a/drivers/gpu/vga/vga_switcheroo.c b/drivers/gpu/vga/vga_switcheroo.c
index 37ac7b5dbd06..21060668fd25 100644
--- a/drivers/gpu/vga/vga_switcheroo.c
+++ b/drivers/gpu/vga/vga_switcheroo.c
@@ -6,17 +6,19 @@
* Licensed under GPLv2
*
* vga_switcheroo.c - Support for laptop with dual GPU using one set of outputs
-
- Switcher interface - methods require for ATPX and DCM
- - switchto - this throws the output MUX switch
- - discrete_set_power - sets the power state for the discrete card
-
- GPU driver interface
- - set_gpu_state - this should do the equiv of s/r for the card
- - this should *not* set the discrete power state
- - switch_check - check if the device is in a position to switch now
+ *
+ * Switcher interface - methods require for ATPX and DCM
+ * - switchto - this throws the output MUX switch
+ * - discrete_set_power - sets the power state for the discrete card
+ *
+ * GPU driver interface
+ * - set_gpu_state - this should do the equiv of s/r for the card
+ * - this should *not* set the discrete power state
+ * - switch_check - check if the device is in a position to switch now
*/
+#define pr_fmt(fmt) "vga_switcheroo: " fmt
+
#include <linux/module.h>
#include <linux/seq_file.h>
#include <linux/uaccess.h>
@@ -111,7 +113,7 @@ int vga_switcheroo_register_handler(struct vga_switcheroo_handler *handler)
vgasr_priv.handler = handler;
if (vga_switcheroo_ready()) {
- printk(KERN_INFO "vga_switcheroo: enabled\n");
+ pr_info("enabled\n");
vga_switcheroo_enable();
}
mutex_unlock(&vgasr_mutex);
@@ -124,7 +126,7 @@ void vga_switcheroo_unregister_handler(void)
mutex_lock(&vgasr_mutex);
vgasr_priv.handler = NULL;
if (vgasr_priv.active) {
- pr_info("vga_switcheroo: disabled\n");
+ pr_info("disabled\n");
vga_switcheroo_debugfs_fini(&vgasr_priv);
vgasr_priv.active = false;
}
@@ -155,7 +157,7 @@ static int register_client(struct pci_dev *pdev,
vgasr_priv.registered_clients++;
if (vga_switcheroo_ready()) {
- printk(KERN_INFO "vga_switcheroo: enabled\n");
+ pr_info("enabled\n");
vga_switcheroo_enable();
}
mutex_unlock(&vgasr_mutex);
@@ -167,7 +169,8 @@ int vga_switcheroo_register_client(struct pci_dev *pdev,
bool driver_power_control)
{
return register_client(pdev, ops, -1,
- pdev == vga_default_device(), driver_power_control);
+ pdev == vga_default_device(),
+ driver_power_control);
}
EXPORT_SYMBOL(vga_switcheroo_register_client);
@@ -183,6 +186,7 @@ static struct vga_switcheroo_client *
find_client_from_pci(struct list_head *head, struct pci_dev *pdev)
{
struct vga_switcheroo_client *client;
+
list_for_each_entry(client, head, list)
if (client->pdev == pdev)
return client;
@@ -193,6 +197,7 @@ static struct vga_switcheroo_client *
find_client_from_id(struct list_head *head, int client_id)
{
struct vga_switcheroo_client *client;
+
list_for_each_entry(client, head, list)
if (client->id == client_id)
return client;
@@ -203,6 +208,7 @@ static struct vga_switcheroo_client *
find_active_client(struct list_head *head)
{
struct vga_switcheroo_client *client;
+
list_for_each_entry(client, head, list)
if (client->active && client_is_vga(client))
return client;
@@ -235,7 +241,7 @@ void vga_switcheroo_unregister_client(struct pci_dev *pdev)
kfree(client);
}
if (vgasr_priv.active && vgasr_priv.registered_clients < 2) {
- printk(KERN_INFO "vga_switcheroo: disabled\n");
+ pr_info("disabled\n");
vga_switcheroo_debugfs_fini(&vgasr_priv);
vgasr_priv.active = false;
}
@@ -260,10 +266,12 @@ static int vga_switcheroo_show(struct seq_file *m, void *v)
{
struct vga_switcheroo_client *client;
int i = 0;
+
mutex_lock(&vgasr_mutex);
list_for_each_entry(client, &vgasr_priv.clients, list) {
seq_printf(m, "%d:%s%s:%c:%s%s:%s\n", i,
- client_id(client) == VGA_SWITCHEROO_DIS ? "DIS" : "IGD",
+ client_id(client) == VGA_SWITCHEROO_DIS ? "DIS" :
+ "IGD",
client_is_vga(client) ? "" : "-Audio",
client->active ? '+' : ' ',
client->driver_power_control ? "Dyn" : "",
@@ -347,6 +355,7 @@ static int vga_switchto_stage2(struct vga_switcheroo_client *new_client)
if (new_client->fb_info) {
struct fb_event event;
+
console_lock();
event.info = new_client->fb_info;
fb_notifier_call_chain(FB_EVENT_REMAP_ALL_CONSOLE, &event);
@@ -375,7 +384,7 @@ static bool check_can_switch(void)
list_for_each_entry(client, &vgasr_priv.clients, list) {
if (!client->ops->can_switch(client->pdev)) {
- printk(KERN_ERR "vga_switcheroo: client %x refused switch\n", client->id);
+ pr_err("client %x refused switch\n", client->id);
return false;
}
}
@@ -484,20 +493,20 @@ vga_switcheroo_debugfs_write(struct file *filp, const char __user *ubuf,
if (can_switch) {
ret = vga_switchto_stage1(client);
if (ret)
- printk(KERN_ERR "vga_switcheroo: switching failed stage 1 %d\n", ret);
+ pr_err("switching failed stage 1 %d\n", ret);
ret = vga_switchto_stage2(client);
if (ret)
- printk(KERN_ERR "vga_switcheroo: switching failed stage 2 %d\n", ret);
+ pr_err("switching failed stage 2 %d\n", ret);
} else {
- printk(KERN_INFO "vga_switcheroo: setting delayed switch to client %d\n", client->id);
+ pr_info("setting delayed switch to client %d\n", client->id);
vgasr_priv.delayed_switch_active = true;
vgasr_priv.delayed_client_id = client_id;
ret = vga_switchto_stage1(client);
if (ret)
- printk(KERN_ERR "vga_switcheroo: delayed switching stage 1 failed %d\n", ret);
+ pr_err("delayed switching stage 1 failed %d\n", ret);
}
out:
@@ -516,32 +525,32 @@ static const struct file_operations vga_switcheroo_debugfs_fops = {
static void vga_switcheroo_debugfs_fini(struct vgasr_priv *priv)
{
- if (priv->switch_file) {
- debugfs_remove(priv->switch_file);
- priv->switch_file = NULL;
- }
- if (priv->debugfs_root) {
- debugfs_remove(priv->debugfs_root);
- priv->debugfs_root = NULL;
- }
+ debugfs_remove(priv->switch_file);
+ priv->switch_file = NULL;
+
+ debugfs_remove(priv->debugfs_root);
+ priv->debugfs_root = NULL;
}
static int vga_switcheroo_debugfs_init(struct vgasr_priv *priv)
{
+ static const char mp[] = "/sys/kernel/debug";
+
/* already initialised */
if (priv->debugfs_root)
return 0;
priv->debugfs_root = debugfs_create_dir("vgaswitcheroo", NULL);
if (!priv->debugfs_root) {
- printk(KERN_ERR "vga_switcheroo: Cannot create /sys/kernel/debug/vgaswitcheroo\n");
+ pr_err("Cannot create %s/vgaswitcheroo\n", mp);
goto fail;
}
priv->switch_file = debugfs_create_file("switch", 0644,
- priv->debugfs_root, NULL, &vga_switcheroo_debugfs_fops);
+ priv->debugfs_root, NULL,
+ &vga_switcheroo_debugfs_fops);
if (!priv->switch_file) {
- printk(KERN_ERR "vga_switcheroo: cannot create /sys/kernel/debug/vgaswitcheroo/switch\n");
+ pr_err("cannot create %s/vgaswitcheroo/switch\n", mp);
goto fail;
}
return 0;
@@ -560,7 +569,8 @@ int vga_switcheroo_process_delayed_switch(void)
if (!vgasr_priv.delayed_switch_active)
goto err;
- printk(KERN_INFO "vga_switcheroo: processing delayed switch to %d\n", vgasr_priv.delayed_client_id);
+ pr_info("processing delayed switch to %d\n",
+ vgasr_priv.delayed_client_id);
client = find_client_from_id(&vgasr_priv.clients,
vgasr_priv.delayed_client_id);
@@ -569,7 +579,7 @@ int vga_switcheroo_process_delayed_switch(void)
ret = vga_switchto_stage2(client);
if (ret)
- printk(KERN_ERR "vga_switcheroo: delayed switching failed stage 2 %d\n", ret);
+ pr_err("delayed switching failed stage 2 %d\n", ret);
vgasr_priv.delayed_switch_active = false;
err = 0;
@@ -579,7 +589,8 @@ err:
}
EXPORT_SYMBOL(vga_switcheroo_process_delayed_switch);
-static void vga_switcheroo_power_switch(struct pci_dev *pdev, enum vga_switcheroo_state state)
+static void vga_switcheroo_power_switch(struct pci_dev *pdev,
+ enum vga_switcheroo_state state)
{
struct vga_switcheroo_client *client;
@@ -598,7 +609,8 @@ static void vga_switcheroo_power_switch(struct pci_dev *pdev, enum vga_switchero
/* force a PCI device to a certain state - mainly to turn off audio clients */
-void vga_switcheroo_set_dynamic_switch(struct pci_dev *pdev, enum vga_switcheroo_state dynamic)
+void vga_switcheroo_set_dynamic_switch(struct pci_dev *pdev,
+ enum vga_switcheroo_state dynamic)
{
struct vga_switcheroo_client *client;
@@ -644,7 +656,8 @@ static int vga_switcheroo_runtime_resume(struct device *dev)
/* this version is for the case where the power switch is separate
to the device being powered down. */
-int vga_switcheroo_init_domain_pm_ops(struct device *dev, struct dev_pm_domain *domain)
+int vga_switcheroo_init_domain_pm_ops(struct device *dev,
+ struct dev_pm_domain *domain)
{
/* copy over all the bus versions */
if (dev->bus && dev->bus->pm) {
@@ -675,7 +688,8 @@ static int vga_switcheroo_runtime_resume_hdmi_audio(struct device *dev)
/* we need to check if we have to switch back on the video
device so the audio device can come back */
list_for_each_entry(client, &vgasr_priv.clients, list) {
- if (PCI_SLOT(client->pdev->devfn) == PCI_SLOT(pdev->devfn) && client_is_vga(client)) {
+ if (PCI_SLOT(client->pdev->devfn) == PCI_SLOT(pdev->devfn) &&
+ client_is_vga(client)) {
found = client;
ret = pm_runtime_get_sync(&client->pdev->dev);
if (ret) {
@@ -695,12 +709,15 @@ static int vga_switcheroo_runtime_resume_hdmi_audio(struct device *dev)
return ret;
}
-int vga_switcheroo_init_domain_pm_optimus_hdmi_audio(struct device *dev, struct dev_pm_domain *domain)
+int
+vga_switcheroo_init_domain_pm_optimus_hdmi_audio(struct device *dev,
+ struct dev_pm_domain *domain)
{
/* copy over all the bus versions */
if (dev->bus && dev->bus->pm) {
domain->ops = *dev->bus->pm;
- domain->ops.runtime_resume = vga_switcheroo_runtime_resume_hdmi_audio;
+ domain->ops.runtime_resume =
+ vga_switcheroo_runtime_resume_hdmi_audio;
dev->pm_domain = domain;
return 0;
diff --git a/drivers/gpu/vga/vgaarb.c b/drivers/gpu/vga/vgaarb.c
index 7bcbf863656e..a0b433456107 100644
--- a/drivers/gpu/vga/vgaarb.c
+++ b/drivers/gpu/vga/vgaarb.c
@@ -29,6 +29,8 @@
*
*/
+#define pr_fmt(fmt) "vgaarb: " fmt
+
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/pci.h>
@@ -134,7 +136,6 @@ struct pci_dev *vga_default_device(void)
{
return vga_default;
}
-
EXPORT_SYMBOL_GPL(vga_default_device);
void vga_set_default_device(struct pci_dev *pdev)
@@ -298,9 +299,9 @@ enable_them:
pci_set_vga_state(vgadev->pdev, true, pci_bits, flags);
- if (!vgadev->bridge_has_one_vga) {
+ if (!vgadev->bridge_has_one_vga)
vga_irq_set_state(vgadev, true);
- }
+
vgadev->owns |= wants;
lock_them:
vgadev->locks |= (rsrc & VGA_RSRC_LEGACY_MASK);
@@ -452,15 +453,15 @@ bail:
}
EXPORT_SYMBOL(vga_put);
-/* Rules for using a bridge to control a VGA descendant decoding:
- if a bridge has only one VGA descendant then it can be used
- to control the VGA routing for that device.
- It should always use the bridge closest to the device to control it.
- If a bridge has a direct VGA descendant, but also have a sub-bridge
- VGA descendant then we cannot use that bridge to control the direct VGA descendant.
- So for every device we register, we need to iterate all its parent bridges
- so we can invalidate any devices using them properly.
-*/
+/*
+ * Rules for using a bridge to control a VGA descendant decoding: if a bridge
+ * has only one VGA descendant then it can be used to control the VGA routing
+ * for that device. It should always use the bridge closest to the device to
+ * control it. If a bridge has a direct VGA descendant, but also have a sub-
+ * bridge VGA descendant then we cannot use that bridge to control the direct
+ * VGA descendant. So for every device we register, we need to iterate all
+ * its parent bridges so we can invalidate any devices using them properly.
+ */
static void vga_arbiter_check_bridge_sharing(struct vga_device *vgadev)
{
struct vga_device *same_bridge_vgadev;
@@ -484,21 +485,26 @@ static void vga_arbiter_check_bridge_sharing(struct vga_device *vgadev)
/* see if the share a bridge with this device */
if (new_bridge == bridge) {
- /* if their direct parent bridge is the same
- as any bridge of this device then it can't be used
- for that device */
+ /*
+ * If their direct parent bridge is the same
+ * as any bridge of this device then it can't
+ * be used for that device.
+ */
same_bridge_vgadev->bridge_has_one_vga = false;
}
- /* now iterate the previous devices bridge hierarchy */
- /* if the new devices parent bridge is in the other devices
- hierarchy then we can't use it to control this device */
+ /*
+ * Now iterate the previous devices bridge hierarchy.
+ * If the new devices parent bridge is in the other
+ * devices hierarchy then we can't use it to control
+ * this device
+ */
while (bus) {
bridge = bus->self;
- if (bridge) {
- if (bridge == vgadev->pdev->bus->self)
- vgadev->bridge_has_one_vga = false;
- }
+
+ if (bridge && bridge == vgadev->pdev->bus->self)
+ vgadev->bridge_has_one_vga = false;
+
bus = bus->parent;
}
}
@@ -527,10 +533,10 @@ static bool vga_arbiter_add_pci_device(struct pci_dev *pdev)
/* Allocate structure */
vgadev = kmalloc(sizeof(struct vga_device), GFP_KERNEL);
if (vgadev == NULL) {
- pr_err("vgaarb: failed to allocate pci device\n");
- /* What to do on allocation failure ? For now, let's
- * just do nothing, I'm not sure there is anything saner
- * to be done
+ pr_err("failed to allocate pci device\n");
+ /*
+ * What to do on allocation failure ? For now, let's just do
+ * nothing, I'm not sure there is anything saner to be done.
*/
return false;
}
@@ -566,8 +572,8 @@ static bool vga_arbiter_add_pci_device(struct pci_dev *pdev)
bridge = bus->self;
if (bridge) {
u16 l;
- pci_read_config_word(bridge, PCI_BRIDGE_CONTROL,
- &l);
+
+ pci_read_config_word(bridge, PCI_BRIDGE_CONTROL, &l);
if (!(l & PCI_BRIDGE_CTL_VGA)) {
vgadev->owns = 0;
break;
@@ -581,8 +587,7 @@ static bool vga_arbiter_add_pci_device(struct pci_dev *pdev)
*/
if (vga_default == NULL &&
((vgadev->owns & VGA_RSRC_LEGACY_MASK) == VGA_RSRC_LEGACY_MASK)) {
- pr_info("vgaarb: setting as boot device: PCI:%s\n",
- pci_name(pdev));
+ pr_info("setting as boot device: PCI:%s\n", pci_name(pdev));
vga_set_default_device(pdev);
}
@@ -591,7 +596,7 @@ static bool vga_arbiter_add_pci_device(struct pci_dev *pdev)
/* Add to the list */
list_add(&vgadev->list, &vga_list);
vga_count++;
- pr_info("vgaarb: device added: PCI:%s,decodes=%s,owns=%s,locks=%s\n",
+ pr_info("device added: PCI:%s,decodes=%s,owns=%s,locks=%s\n",
pci_name(pdev),
vga_iostate_to_str(vgadev->decodes),
vga_iostate_to_str(vgadev->owns),
@@ -651,7 +656,7 @@ static inline void vga_update_device_decodes(struct vga_device *vgadev,
decodes_unlocked = vgadev->locks & decodes_removed;
vgadev->decodes = new_decodes;
- pr_info("vgaarb: device changed decodes: PCI:%s,olddecodes=%s,decodes=%s:owns=%s\n",
+ pr_info("device changed decodes: PCI:%s,olddecodes=%s,decodes=%s:owns=%s\n",
pci_name(vgadev->pdev),
vga_iostate_to_str(old_decodes),
vga_iostate_to_str(vgadev->decodes),
@@ -673,10 +678,12 @@ static inline void vga_update_device_decodes(struct vga_device *vgadev,
if (!(old_decodes & VGA_RSRC_LEGACY_MASK) &&
new_decodes & VGA_RSRC_LEGACY_MASK)
vga_decode_count++;
- pr_debug("vgaarb: decoding count now is: %d\n", vga_decode_count);
+ pr_debug("decoding count now is: %d\n", vga_decode_count);
}
-static void __vga_set_legacy_decoding(struct pci_dev *pdev, unsigned int decodes, bool userspace)
+static void __vga_set_legacy_decoding(struct pci_dev *pdev,
+ unsigned int decodes,
+ bool userspace)
{
struct vga_device *vgadev;
unsigned long flags;
@@ -712,7 +719,8 @@ EXPORT_SYMBOL(vga_set_legacy_decoding);
/* call with NULL to unregister */
int vga_client_register(struct pci_dev *pdev, void *cookie,
void (*irq_set_state)(void *cookie, bool state),
- unsigned int (*set_vga_decode)(void *cookie, bool decode))
+ unsigned int (*set_vga_decode)(void *cookie,
+ bool decode))
{
int ret = -ENODEV;
struct vga_device *vgadev;
@@ -832,7 +840,7 @@ static int vga_pci_str_to_vars(char *buf, int count, unsigned int *domain,
return 1;
}
-static ssize_t vga_arb_read(struct file *file, char __user * buf,
+static ssize_t vga_arb_read(struct file *file, char __user *buf,
size_t count, loff_t *ppos)
{
struct vga_arb_private *priv = file->private_data;
@@ -899,7 +907,7 @@ done:
* TODO: To avoid parsing inside kernel and to improve the speed we may
* consider use ioctl here
*/
-static ssize_t vga_arb_write(struct file *file, const char __user * buf,
+static ssize_t vga_arb_write(struct file *file, const char __user *buf,
size_t count, loff_t *ppos)
{
struct vga_arb_private *priv = file->private_data;
@@ -1075,13 +1083,13 @@ static ssize_t vga_arb_write(struct file *file, const char __user * buf,
ret_val = -EPROTO;
goto done;
}
- pr_debug("vgaarb: %s ==> %x:%x:%x.%x\n", curr_pos,
+ pr_debug("%s ==> %x:%x:%x.%x\n", curr_pos,
domain, bus, PCI_SLOT(devfn), PCI_FUNC(devfn));
pdev = pci_get_domain_bus_and_slot(domain, bus, devfn);
- pr_debug("vgaarb: pdev %p\n", pdev);
+ pr_debug("pdev %p\n", pdev);
if (!pdev) {
- pr_err("vgaarb: invalid PCI address %x:%x:%x\n",
+ pr_err("invalid PCI address %x:%x:%x\n",
domain, bus, devfn);
ret_val = -ENODEV;
goto done;
@@ -1089,10 +1097,13 @@ static ssize_t vga_arb_write(struct file *file, const char __user * buf,
}
vgadev = vgadev_find(pdev);
- pr_debug("vgaarb: vgadev %p\n", vgadev);
+ pr_debug("vgadev %p\n", vgadev);
if (vgadev == NULL) {
- pr_err("vgaarb: this pci device is not a vga device\n");
- pci_dev_put(pdev);
+ if (pdev) {
+ pr_err("this pci device is not a vga device\n");
+ pci_dev_put(pdev);
+ }
+
ret_val = -ENODEV;
goto done;
}
@@ -1109,7 +1120,7 @@ static ssize_t vga_arb_write(struct file *file, const char __user * buf,
}
}
if (i == MAX_USER_CARDS) {
- pr_err("vgaarb: maximum user cards (%d) number reached!\n",
+ pr_err("maximum user cards (%d) number reached!\n",
MAX_USER_CARDS);
pci_dev_put(pdev);
/* XXX: which value to return? */
@@ -1125,7 +1136,7 @@ static ssize_t vga_arb_write(struct file *file, const char __user * buf,
} else if (strncmp(curr_pos, "decodes ", 8) == 0) {
curr_pos += 8;
remaining -= 8;
- pr_debug("vgaarb: client 0x%p called 'decodes'\n", priv);
+ pr_debug("client 0x%p called 'decodes'\n", priv);
if (!vga_str_to_iostate(curr_pos, remaining, &io_state)) {
ret_val = -EPROTO;
@@ -1150,7 +1161,7 @@ done:
return ret_val;
}
-static unsigned int vga_arb_fpoll(struct file *file, poll_table * wait)
+static unsigned int vga_arb_fpoll(struct file *file, poll_table *wait)
{
struct vga_arb_private *priv = file->private_data;
@@ -1246,7 +1257,8 @@ static void vga_arbiter_notify_clients(void)
else
new_state = true;
if (vgadev->set_vga_decode) {
- new_decodes = vgadev->set_vga_decode(vgadev->cookie, new_state);
+ new_decodes = vgadev->set_vga_decode(vgadev->cookie,
+ new_state);
vga_update_device_decodes(vgadev, new_decodes);
}
}
@@ -1300,7 +1312,7 @@ static int __init vga_arb_device_init(void)
rc = misc_register(&vga_arb_device);
if (rc < 0)
- pr_err("vgaarb: error %d registering device\n", rc);
+ pr_err("error %d registering device\n", rc);
bus_register_notifier(&pci_bus_type, &pci_notifier);
@@ -1312,21 +1324,29 @@ static int __init vga_arb_device_init(void)
PCI_ANY_ID, pdev)) != NULL)
vga_arbiter_add_pci_device(pdev);
- pr_info("vgaarb: loaded\n");
+ pr_info("loaded\n");
list_for_each_entry(vgadev, &vga_list, list) {
#if defined(CONFIG_X86) || defined(CONFIG_IA64)
- /* Override I/O based detection done by vga_arbiter_add_pci_device()
- * as it may take the wrong device (e.g. on Apple system under EFI).
+ /*
+ * Override vga_arbiter_add_pci_device()'s I/O based detection
+ * as it may take the wrong device (e.g. on Apple system under
+ * EFI).
*
- * Select the device owning the boot framebuffer if there is one.
+ * Select the device owning the boot framebuffer if there is
+ * one.
*/
- resource_size_t start, end;
+ resource_size_t start, end, limit;
+ unsigned long flags;
int i;
+ limit = screen_info.lfb_base + screen_info.lfb_size;
+
/* Does firmware framebuffer belong to us? */
for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
- if (!(pci_resource_flags(vgadev->pdev, i) & IORESOURCE_MEM))
+ flags = pci_resource_flags(vgadev->pdev, i);
+
+ if ((flags & IORESOURCE_MEM) == 0)
continue;
start = pci_resource_start(vgadev->pdev, i);
@@ -1335,22 +1355,24 @@ static int __init vga_arb_device_init(void)
if (!start || !end)
continue;
- if (screen_info.lfb_base < start ||
- (screen_info.lfb_base + screen_info.lfb_size) >= end)
+ if (screen_info.lfb_base < start || limit >= end)
continue;
+
if (!vga_default_device())
- pr_info("vgaarb: setting as boot device: PCI:%s\n",
+ pr_info("setting as boot device: PCI:%s\n",
pci_name(vgadev->pdev));
else if (vgadev->pdev != vga_default_device())
- pr_info("vgaarb: overriding boot device: PCI:%s\n",
+ pr_info("overriding boot device: PCI:%s\n",
pci_name(vgadev->pdev));
vga_set_default_device(vgadev->pdev);
}
#endif
if (vgadev->bridge_has_one_vga)
- pr_info("vgaarb: bridge control possible %s\n", pci_name(vgadev->pdev));
+ pr_info("bridge control possible %s\n",
+ pci_name(vgadev->pdev));
else
- pr_info("vgaarb: no bridge control possible %s\n", pci_name(vgadev->pdev));
+ pr_info("no bridge control possible %s\n",
+ pci_name(vgadev->pdev));
}
return rc;
}
OpenPOWER on IntegriCloud